aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c65
-rw-r--r--drivers/accel/amdxdna/aie2_error.c8
-rw-r--r--drivers/accel/amdxdna/aie2_message.c10
-rw-r--r--drivers/accel/amdxdna/aie2_pci.h10
-rw-r--r--drivers/accel/amdxdna/aie2_smu.c2
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c2
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.h3
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.c23
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.h2
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.c6
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.h2
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c89
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c90
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h14
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c9
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c43
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h1
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c110
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h14
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.c10
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.h1
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h1
-rw-r--r--drivers/accel/ivpu/ivpu_hw_ip.c4
-rw-r--r--drivers/accel/ivpu/ivpu_hw_reg_io.h64
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c7
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.h2
-rw-r--r--drivers/accel/ivpu/ivpu_job.c522
-rw-r--r--drivers/accel/ivpu/ivpu_job.h8
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c29
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c121
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.h2
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c13
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.h2
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c10
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h2
-rw-r--r--drivers/accel/ivpu/ivpu_sysfs.c35
-rw-r--r--drivers/accel/qaic/mhi_controller.c360
-rw-r--r--drivers/accel/qaic/mhi_controller.h2
-rw-r--r--drivers/accel/qaic/qaic.h14
-rw-r--r--drivers/accel/qaic/qaic_data.c9
-rw-r--r--drivers/accel/qaic/qaic_drv.c97
-rw-r--r--drivers/accel/qaic/qaic_timesync.c2
-rw-r--r--drivers/accel/qaic/sahara.c43
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/Makefile4
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/acpi_video.c7
-rw-r--r--drivers/acpi/arm64/dma.c5
-rw-r--r--drivers/acpi/button.c10
-rw-r--r--drivers/acpi/device_pm.c4
-rw-r--r--drivers/acpi/fan.h1
-rw-r--r--drivers/acpi/fan_attr.c37
-rw-r--r--drivers/acpi/fan_core.c25
-rw-r--r--drivers/acpi/fan_hwmon.c8
-rw-r--r--drivers/acpi/hed.c7
-rw-r--r--drivers/acpi/numa/srat.c1
-rw-r--r--drivers/acpi/platform_profile.c45
-rw-r--r--drivers/acpi/power.c5
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/scan.c7
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/amba/bus.c3
-rw-r--r--drivers/ata/ahci.c34
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/libahci.c4
-rw-r--r--drivers/ata/libata-core.c56
-rw-r--r--drivers/ata/libata-eh.c11
-rw-r--r--drivers/ata/pata_octeon_cf.c5
-rw-r--r--drivers/ata/sata_via.c3
-rw-r--r--drivers/auxdisplay/Kconfig1
-rw-r--r--drivers/auxdisplay/charlcd.c5
-rw-r--r--drivers/auxdisplay/charlcd.h5
-rw-r--r--drivers/auxdisplay/hd44780.c19
-rw-r--r--drivers/auxdisplay/hd44780_common.c24
-rw-r--r--drivers/auxdisplay/hd44780_common.h4
-rw-r--r--drivers/auxdisplay/lcd2s.c12
-rw-r--r--drivers/auxdisplay/panel.c17
-rw-r--r--drivers/auxdisplay/seg-led-gpio.c3
-rw-r--r--drivers/base/arch_topology.c26
-rw-r--r--drivers/base/component.c17
-rw-r--r--drivers/base/core.c61
-rw-r--r--drivers/base/devres.c12
-rw-r--r--drivers/base/devtmpfs.c153
-rw-r--r--drivers/base/platform.c3
-rw-r--r--drivers/base/power/clock_ops.c73
-rw-r--r--drivers/base/power/generic_ops.c24
-rw-r--r--drivers/base/power/main.c165
-rw-r--r--drivers/base/power/runtime.c91
-rw-r--r--drivers/base/regmap/internal.h12
-rw-r--r--drivers/base/regmap/regcache.c31
-rw-r--r--drivers/base/regmap/regmap-irq.c2
-rw-r--r--drivers/block/loop.c106
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/null_blk/main.c188
-rw-r--r--drivers/block/null_blk/null_blk.h6
-rw-r--r--drivers/block/null_blk/zoned.c20
-rw-r--r--drivers/block/rnbd/rnbd-clt.c2
-rw-r--r--drivers/block/rnull.rs2
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/ublk_drv.c171
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/bfusb.c3
-rw-r--r--drivers/bluetooth/btintel.c341
-rw-r--r--drivers/bluetooth/btintel.h24
-rw-r--r--drivers/bluetooth/btintel_pcie.c582
-rw-r--r--drivers/bluetooth/btintel_pcie.h93
-rw-r--r--drivers/bluetooth/btmtk.c10
-rw-r--r--drivers/bluetooth/btmtksdio.c3
-rw-r--r--drivers/bluetooth/btnxpuart.c407
-rw-r--r--drivers/bluetooth/btqca.c27
-rw-r--r--drivers/bluetooth/btqca.h4
-rw-r--r--drivers/bluetooth/btusb.c77
-rw-r--r--drivers/bluetooth/hci_ldisc.c19
-rw-r--r--drivers/bluetooth/hci_qca.c27
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/bluetooth/hci_vhci.c5
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c3
-rw-r--r--drivers/bus/mhi/host/boot.c203
-rw-r--r--drivers/bus/mhi/host/init.c2
-rw-r--r--drivers/bus/mhi/host/internal.h7
-rw-r--r--drivers/bus/qcom-ssc-block-bus.c34
-rw-r--r--drivers/cdx/cdx.c3
-rw-r--r--drivers/char/hw_random/Kconfig21
-rw-r--r--drivers/char/hw_random/imx-rngc.c69
-rw-r--r--drivers/char/hw_random/rockchip-rng.c250
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c3
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/char/sonypi.c11
-rw-r--r--drivers/char/tpm/Kconfig9
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/tpm-chip.c6
-rw-r--r--drivers/char/tpm/tpm-interface.c37
-rw-r--r--drivers/char/tpm/tpm2-cmd.c1
-rw-r--r--drivers/char/tpm/tpm2-sessions.c2
-rw-r--r--drivers/char/tpm/tpm_crb.c105
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.c348
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.h25
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c22
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.h1
-rw-r--r--drivers/char/tpm/tpm_tis_core.c20
-rw-r--r--drivers/char/tpm/tpm_tis_core.h1
-rw-r--r--drivers/clk/clk-stm32f4.c4
-rw-r--r--drivers/clk/clk.c13
-rw-r--r--drivers/clk/davinci/Makefile2
-rw-r--r--drivers/clk/davinci/pll-da830.c71
-rw-r--r--drivers/clk/davinci/pll.c9
-rw-r--r--drivers/clk/davinci/psc-da830.c118
-rw-r--r--drivers/clk/davinci/psc.c8
-rw-r--r--drivers/clk/davinci/psc.h7
-rw-r--r--drivers/clk/imgtec/clk-boston.c6
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c6
-rw-r--r--drivers/clk/imx/clk-imx8mp.c151
-rw-r--r--drivers/clk/keystone/syscon-clk.c13
-rw-r--r--drivers/clk/mediatek/clk-mt8188-cam.c17
-rw-r--r--drivers/clk/mediatek/clk-mt8188-img.c18
-rw-r--r--drivers/clk/mediatek/clk-mt8188-ipe.c14
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdo1.c11
-rw-r--r--drivers/clk/meson/a1-pll.c2
-rw-r--r--drivers/clk/meson/g12a.c38
-rw-r--r--drivers/clk/meson/gxbb.c14
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apmu.c4
-rw-r--r--drivers/clk/qcom/Kconfig9
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/camcc-sa8775p.c3
-rw-r--r--drivers/clk/qcom/camcc-sc7180.c2
-rw-r--r--drivers/clk/qcom/camcc-sc7280.c2
-rw-r--r--drivers/clk/qcom/camcc-sc8280xp.c2
-rw-r--r--drivers/clk/qcom/camcc-sdm845.c1
-rw-r--r--drivers/clk/qcom/camcc-sm4450.c3
-rw-r--r--drivers/clk/qcom/camcc-sm6350.c1
-rw-r--r--drivers/clk/qcom/camcc-sm7150.c1
-rw-r--r--drivers/clk/qcom/camcc-sm8150.c4
-rw-r--r--drivers/clk/qcom/camcc-sm8250.c58
-rw-r--r--drivers/clk/qcom/camcc-sm8550.c2
-rw-r--r--drivers/clk/qcom/camcc-sm8650.c2
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c52
-rw-r--r--drivers/clk/qcom/clk-branch.c4
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c32
-rw-r--r--drivers/clk/qcom/common.c6
-rw-r--r--drivers/clk/qcom/dispcc-qcm2290.c3
-rw-r--r--drivers/clk/qcom/dispcc-sc7180.c1
-rw-r--r--drivers/clk/qcom/dispcc-sc7280.c1
-rw-r--r--drivers/clk/qcom/dispcc-sc8280xp.c7
-rw-r--r--drivers/clk/qcom/dispcc-sdm845.c2
-rw-r--r--drivers/clk/qcom/dispcc-sm4450.c3
-rw-r--r--drivers/clk/qcom/dispcc-sm6115.c3
-rw-r--r--drivers/clk/qcom/dispcc-sm6125.c1
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c1
-rw-r--r--drivers/clk/qcom/dispcc-sm6375.c1
-rw-r--r--drivers/clk/qcom/dispcc-sm7150.c1
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c2
-rw-r--r--drivers/clk/qcom/dispcc-sm8450.c5
-rw-r--r--drivers/clk/qcom/dispcc-sm8550.c5
-rw-r--r--drivers/clk/qcom/dispcc-sm8750.c6
-rw-r--r--drivers/clk/qcom/dispcc0-sa8775p.c3
-rw-r--r--drivers/clk/qcom/dispcc1-sa8775p.c3
-rw-r--r--drivers/clk/qcom/gcc-ipq5424.c24
-rw-r--r--drivers/clk/qcom/gcc-ipq9574.c15
-rw-r--r--drivers/clk/qcom/gcc-msm8953.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c1
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c2
-rw-r--r--drivers/clk/qcom/gcc-sm8650.c4
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c30
-rw-r--r--drivers/clk/qcom/gdsc.c98
-rw-r--r--drivers/clk/qcom/gdsc.h1
-rw-r--r--drivers/clk/qcom/gpucc-msm8998.c3
-rw-r--r--drivers/clk/qcom/gpucc-sa8775p.c49
-rw-r--r--drivers/clk/qcom/gpucc-sar2130p.c1
-rw-r--r--drivers/clk/qcom/gpucc-sc7180.c1
-rw-r--r--drivers/clk/qcom/gpucc-sc7280.c1
-rw-r--r--drivers/clk/qcom/gpucc-sc8280xp.c3
-rw-r--r--drivers/clk/qcom/gpucc-sdm660.c5
-rw-r--r--drivers/clk/qcom/gpucc-sdm845.c1
-rw-r--r--drivers/clk/qcom/gpucc-sm4450.c1
-rw-r--r--drivers/clk/qcom/gpucc-sm6350.c1
-rw-r--r--drivers/clk/qcom/gpucc-sm8150.c1
-rw-r--r--drivers/clk/qcom/gpucc-sm8250.c1
-rw-r--r--drivers/clk/qcom/gpucc-sm8350.c2
-rw-r--r--drivers/clk/qcom/gpucc-x1p42100.c2
-rw-r--r--drivers/clk/qcom/kpss-xcc.c1
-rw-r--r--drivers/clk/qcom/krait-cc.c1
-rw-r--r--drivers/clk/qcom/lpassaudiocc-sc7280.c23
-rw-r--r--drivers/clk/qcom/lpasscc-sdm845.c1
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c1
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7280.c1
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c4
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8994.c3
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c4
-rw-r--r--drivers/clk/qcom/mmcc-msm8998.c3
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c8
-rw-r--r--drivers/clk/qcom/nsscc-ipq9574.c3110
-rw-r--r--drivers/clk/qcom/videocc-sa8775p.c10
-rw-r--r--drivers/clk/qcom/videocc-sm8350.c2
-rw-r--r--drivers/clk/qcom/videocc-sm8450.c2
-rw-r--r--drivers/clk/qcom/videocc-sm8550.c2
-rw-r--r--drivers/clk/renesas/r7s9210-cpg-mssr.c10
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a779h0-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c7
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c55
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c9
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c86
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c24
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c129
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h12
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c16
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h2
-rw-r--r--drivers/clk/rockchip/Kconfig14
-rw-r--r--drivers/clk/rockchip/Makefile2
-rw-r--r--drivers/clk/rockchip/clk-pll.c10
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c4
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3528.c1116
-rw-r--r--drivers/clk/rockchip/clk-rk3562.c1101
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c1
-rw-r--r--drivers/clk/rockchip/clk.h63
-rw-r--r--drivers/clk/rockchip/rst-rk3528.c306
-rw-r--r--drivers/clk/rockchip/rst-rk3562.c429
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-cpu.c2
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c1
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c1
-rw-r--r--drivers/clk/samsung/clk-exynos2200.c3928
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c4
-rw-r--r--drivers/clk/samsung/clk-exynos4.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4412-isp.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c3
-rw-r--r--drivers/clk/samsung/clk-exynos5410.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c4
-rw-r--r--drivers/clk/samsung/clk-exynos7.c1
-rw-r--r--drivers/clk/samsung/clk-exynos7870.c1829
-rw-r--r--drivers/clk/samsung/clk-exynos7885.c2
-rw-r--r--drivers/clk/samsung/clk-exynos850.c2
-rw-r--r--drivers/clk/samsung/clk-exynos8895.c2
-rw-r--r--drivers/clk/samsung/clk-exynos990.c182
-rw-r--r--drivers/clk/samsung/clk-exynosautov9.c2
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c2
-rw-r--r--drivers/clk/samsung/clk-fsd.c2
-rw-r--r--drivers/clk/samsung/clk-gs101.c10
-rw-r--r--drivers/clk/samsung/clk-pll.c8
-rw-r--r--drivers/clk/samsung/clk-pll.h1
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c1
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c1
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c1
-rw-r--r--drivers/clk/samsung/clk.c4
-rw-r--r--drivers/clk/samsung/clk.h1
-rw-r--r--drivers/clk/sunxi-ng/Kconfig10
-rw-r--r--drivers/clk/sunxi-ng/Makefile4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c60
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c248
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-r.h14
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523.c1685
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523.h14
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h5
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c51
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h58
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c2
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/exynos_mct.c2
-rw-r--r--drivers/clocksource/hyperv_timer.c4
-rw-r--r--drivers/clocksource/mips-gic-timer.c6
-rw-r--r--drivers/clocksource/timer-stm32-lp.c36
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/Kconfig.powerpc18
-rw-r--r--drivers/cpufreq/Kconfig.x8612
-rw-r--r--drivers/cpufreq/Makefile3
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/amd-pstate-trace.h57
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c211
-rw-r--r--drivers/cpufreq/amd-pstate.c670
-rw-r--r--drivers/cpufreq/amd-pstate.h65
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c18
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c6
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c2
-rw-r--r--drivers/cpufreq/bmips-cpufreq.c1
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c1
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c9
-rw-r--r--drivers/cpufreq/cpufreq-dt.c24
-rw-r--r--drivers/cpufreq/cpufreq.c84
-rw-r--r--drivers/cpufreq/cpufreq_governor.c45
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c1
-rw-r--r--drivers/cpufreq/e_powersaver.c1
-rw-r--r--drivers/cpufreq/elanfreq.c1
-rw-r--r--drivers/cpufreq/freq_table.c15
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c43
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c1
-rw-r--r--drivers/cpufreq/longhaul.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c1
-rw-r--r--drivers/cpufreq/loongson3_cpufreq.c11
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c3
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c3
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c1
-rw-r--r--drivers/cpufreq/p4-clockmod.c1
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c1
-rw-r--r--drivers/cpufreq/powernow-k6.c1
-rw-r--r--drivers/cpufreq/powernow-k7.c1
-rw-r--r--drivers/cpufreq/powernow-k8.c1
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c11
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c173
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h33
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c102
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c150
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c16
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c8
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c1
-rw-r--r--drivers/cpufreq/sc520_freq.c1
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c21
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c8
-rw-r--r--drivers/cpufreq/sh-cpufreq.c1
-rw-r--r--drivers/cpufreq/spear-cpufreq.c1
-rw-r--r--drivers/cpufreq/speedstep-centrino.c1
-rw-r--r--drivers/cpufreq/speedstep-ich.c1
-rw-r--r--drivers/cpufreq/speedstep-smi.c1
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c6
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c8
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c1
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c1
-rw-r--r--drivers/cpufreq/virtual-cpufreq.c3
-rw-r--r--drivers/cpuidle/Makefile3
-rw-r--r--drivers/cpuidle/cpuidle-arm.c8
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c2
-rw-r--r--drivers/cpuidle/cpuidle-psci.c7
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c2
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c4
-rw-r--r--drivers/cpuidle/governors/menu.c129
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c4
-rw-r--r--drivers/crypto/bcm/cipher.c27
-rw-r--r--drivers/crypto/bcm/spu2.c3
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c3
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c46
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.h17
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c50
-rw-r--r--drivers/crypto/ccp/sp-pci.c16
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c7
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h1
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c125
-rw-r--r--drivers/crypto/inside-secure/Makefile1
-rw-r--r--drivers/crypto/inside-secure/eip93/Kconfig20
-rw-r--r--drivers/crypto/inside-secure/eip93/Makefile5
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aead.c711
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aead.h38
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aes.h16
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-cipher.c413
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-cipher.h60
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-common.c822
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-common.h24
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-des.h16
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.c866
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.h82
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-main.c501
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-main.h151
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-regs.h335
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c164
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c19
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c13
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c4
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c4
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c4
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile66
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h23
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.c167
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.h26
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.c15
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c56
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c59
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c22
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h4
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.c159
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.h6
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c85
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_req.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c38
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c6
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/Makefile2
-rw-r--r--drivers/crypto/marvell/Kconfig4
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c16
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c100
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c7
-rw-r--r--drivers/crypto/mxs-dcp.c8
-rw-r--r--drivers/crypto/nx/nx-842.c33
-rw-r--r--drivers/crypto/nx/nx-842.h15
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c16
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c17
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c31
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c70
-rw-r--r--drivers/crypto/nx/nx.c34
-rw-r--r--drivers/crypto/nx/nx.h3
-rw-r--r--drivers/crypto/padlock-sha.c4
-rw-r--r--drivers/crypto/s5p-sss.c38
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c2
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c34
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c401
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c287
-rw-r--r--drivers/crypto/tegra/tegra-se-key.c29
-rw-r--r--drivers/crypto/tegra/tegra-se-main.c16
-rw-r--r--drivers/crypto/tegra/tegra-se.h39
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c41
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c6
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_skcipher_algs.c17
-rw-r--r--drivers/cxl/Kconfig12
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/core.h17
-rw-r--r--drivers/cxl/core/features.c708
-rw-r--r--drivers/cxl/core/mbox.c124
-rw-r--r--drivers/cxl/core/memdev.c22
-rw-r--r--drivers/cxl/cxlmem.h47
-rw-r--r--drivers/cxl/pci.c8
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c3
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c11
-rw-r--r--drivers/dma-buf/st-dma-fence-unwrap.c268
-rw-r--r--drivers/dpll/dpll_core.c7
-rw-r--r--drivers/edac/Kconfig30
-rw-r--r--drivers/edac/Makefile3
-rw-r--r--drivers/edac/amd64_edac.c52
-rw-r--r--drivers/edac/debugfs.c5
-rwxr-xr-xdrivers/edac/ecs.c205
-rw-r--r--drivers/edac/edac_device.c185
-rw-r--r--drivers/edac/i10nm_base.c2
-rw-r--r--drivers/edac/i5400_edac.c3
-rw-r--r--drivers/edac/i7300_edac.c7
-rw-r--r--drivers/edac/ie31200_edac.c641
-rw-r--r--drivers/edac/igen6_edac.c41
-rwxr-xr-xdrivers/edac/mem_repair.c359
-rw-r--r--drivers/edac/pnd2_edac.c4
-rwxr-xr-xdrivers/edac/scrub.c209
-rw-r--r--drivers/edac/skx_common.c33
-rw-r--r--drivers/edac/skx_common.h11
-rw-r--r--drivers/edac/xgene_edac.c17
-rw-r--r--drivers/firmware/Kconfig12
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/arm_ffa/bus.c14
-rw-r--r--drivers/firmware/arm_ffa/driver.c532
-rw-r--r--drivers/firmware/arm_scmi/bus.c69
-rw-r--r--drivers/firmware/arm_scmi/driver.c10
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c13
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c51
-rw-r--r--drivers/firmware/efi/efibc.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile10
-rw-r--r--drivers/firmware/efi/libstub/efistub.h3
-rw-r--r--drivers/firmware/efi/libstub/intrinsics.c26
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c4
-rw-r--r--drivers/firmware/efi/libstub/x86-mixed.S253
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c52
-rw-r--r--drivers/firmware/efi/libstub/zboot-decompress-gzip.c68
-rw-r--r--drivers/firmware/efi/libstub/zboot-decompress-zstd.c49
-rw-r--r--drivers/firmware/efi/libstub/zboot.c65
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds1
-rw-r--r--drivers/firmware/imx/imx-scu.c1
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c18
-rw-r--r--drivers/firmware/qcom/qcom_scm.c4
-rw-r--r--drivers/firmware/samsung/Kconfig14
-rw-r--r--drivers/firmware/samsung/Makefile4
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c224
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.h29
-rw-r--r--drivers/firmware/samsung/exynos-acpm.c769
-rw-r--r--drivers/firmware/samsung/exynos-acpm.h23
-rw-r--r--drivers/firmware/smccc/kvm_guest.c66
-rw-r--r--drivers/firmware/smccc/soc_id.c80
-rw-r--r--drivers/firmware/thead,th1520-aon.c250
-rw-r--r--drivers/firmware/xilinx/zynqmp.c6
-rw-r--r--drivers/fwctl/Kconfig33
-rw-r--r--drivers/fwctl/Makefile6
-rw-r--r--drivers/fwctl/main.c421
-rw-r--r--drivers/fwctl/mlx5/Makefile4
-rw-r--r--drivers/fwctl/mlx5/main.c411
-rw-r--r--drivers/fwctl/pds/Makefile4
-rw-r--r--drivers/fwctl/pds/main.c536
-rw-r--r--drivers/gpio/Kconfig9
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/TODO89
-rw-r--r--drivers/gpio/dev-sync-probe.c97
-rw-r--r--drivers/gpio/dev-sync-probe.h25
-rw-r--r--drivers/gpio/gpio-74x164.c92
-rw-r--r--drivers/gpio/gpio-adnp.c138
-rw-r--r--drivers/gpio/gpio-adp5520.c12
-rw-r--r--drivers/gpio/gpio-adp5585.c10
-rw-r--r--drivers/gpio/gpio-aggregator.c38
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c14
-rw-r--r--drivers/gpio/gpio-altera.c6
-rw-r--r--drivers/gpio/gpio-amd-fch.c7
-rw-r--r--drivers/gpio/gpio-amd8111.c6
-rw-r--r--drivers/gpio/gpio-arizona.c9
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c86
-rw-r--r--drivers/gpio/gpio-aspeed.c108
-rw-r--r--drivers/gpio/gpio-bcm-kona.c69
-rw-r--r--drivers/gpio/gpio-bd71815.c15
-rw-r--r--drivers/gpio/gpio-bd71828.c15
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c8
-rw-r--r--drivers/gpio/gpio-brcmstb.c3
-rw-r--r--drivers/gpio/gpio-bt8xx.c48
-rw-r--r--drivers/gpio/gpio-cgbc.c24
-rw-r--r--drivers/gpio/gpio-creg-snps.c10
-rw-r--r--drivers/gpio/gpio-cros-ec.c13
-rw-r--r--drivers/gpio/gpio-crystalcove.c15
-rw-r--r--drivers/gpio/gpio-cs5535.c6
-rw-r--r--drivers/gpio/gpio-da9052.c34
-rw-r--r--drivers/gpio/gpio-da9055.c14
-rw-r--r--drivers/gpio/gpio-davinci.c6
-rw-r--r--drivers/gpio/gpio-grgpio.c3
-rw-r--r--drivers/gpio/gpio-latch.c68
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c55
-rw-r--r--drivers/gpio/gpio-max3191x.c18
-rw-r--r--drivers/gpio/gpio-max77650.c14
-rw-r--r--drivers/gpio/gpio-mmio.c37
-rw-r--r--drivers/gpio/gpio-mockup.c14
-rw-r--r--drivers/gpio/gpio-mvebu.c15
-rw-r--r--drivers/gpio/gpio-nomadik.c3
-rw-r--r--drivers/gpio/gpio-pca953x.c17
-rw-r--r--drivers/gpio/gpio-pcf857x.c29
-rw-r--r--drivers/gpio/gpio-rcar.c13
-rw-r--r--drivers/gpio/gpio-regmap.c73
-rw-r--r--drivers/gpio/gpio-sim.c98
-rw-r--r--drivers/gpio/gpio-stmpe.c6
-rw-r--r--drivers/gpio/gpio-vf610.c105
-rw-r--r--drivers/gpio/gpio-virtio.c29
-rw-r--r--drivers/gpio/gpio-virtuser.c73
-rw-r--r--drivers/gpio/gpio-wcove.c3
-rw-r--r--drivers/gpio/gpio-wm831x.c3
-rw-r--r--drivers/gpio/gpio-xilinx.c102
-rw-r--r--drivers/gpio/gpio-xra1403.c3
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpio/gpiolib-cdev.c15
-rw-r--r--drivers/gpio/gpiolib-of.c137
-rw-r--r--drivers/gpio/gpiolib.c390
-rw-r--r--drivers/gpio/gpiolib.h44
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig24
-rw-r--r--drivers/gpu/drm/Makefile22
-rw-r--r--drivers/gpu/drm/adp/Kconfig17
-rw-r--r--drivers/gpu/drm/adp/Makefile5
-rw-r--r--drivers/gpu/drm/adp/adp-mipi.c276
-rw-r--r--drivers/gpu/drm/adp/adp_drv.c612
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c581
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c289
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c196
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c308
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c201
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c465
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c490
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c148
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c346
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c255
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nvd.h208
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c264
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_enums.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sid.h369
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c201
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c1179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c675
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c671
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c639
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c664
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c598
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c579
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c49
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h677
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm82
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c38
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c337
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c75
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c45
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c77
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c36
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c88
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c70
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h45
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c154
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c73
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c400
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h31
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c44
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c78
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c193
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c311
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c139
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h97
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h94
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c134
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c12413
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c354
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c109
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c99
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c408
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c209
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h213
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c2171
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/Makefile (renamed from drivers/gpu/drm/amd/display/dc/spl/Makefile)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl.c)102
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.c (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.h (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c)452
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.h (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.c (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c)1058
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.h (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h)9
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.c (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c)232
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h (renamed from drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h)12
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.c (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.h (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_debug.h (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_debug.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_os_types.h (renamed from drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h12
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h131
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c85
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c111
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c98
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c97
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.c34
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c144
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c19
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c137
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h26
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c4
-rw-r--r--drivers/gpu/drm/amd/include/amd_acpi.h4
-rw-r--r--drivers/gpu/drm/amd/include/amd_cper.h269
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h20
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_offset.h15485
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_sh_mask.h61940
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h32
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h48
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h37
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h7
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h42
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c27
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c670
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h4
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c78
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c71
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h281
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h143
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h115
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c490
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c306
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c54
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c104
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c3
-rw-r--r--drivers/gpu/drm/arm/Kconfig1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c2
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c4
-rw-r--r--drivers/gpu/drm/ast/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c309
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c265
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c8
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h73
-rw-r--r--drivers/gpu/drm/ast/ast_main.c215
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c588
-rw-r--r--drivers/gpu/drm/ast/ast_post.c51
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h31
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h187
-rw-r--r--drivers/gpu/drm/ast/ast_vbios.c241
-rw-r--r--drivers/gpu/drm/ast/ast_vbios.h108
-rw-r--r--drivers/gpu/drm/bridge/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/Makefile4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c16
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c29
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c8
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c19
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c7
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c5
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb.c10
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c21
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c5
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c10
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c8
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c11
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c16
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c5
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c5
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c18
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c7
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c3
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c11
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c1
-rw-r--r--drivers/gpu/drm/bridge/panel.c16
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c1
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c5
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c8
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c494
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c9
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c6
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c20
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c1
-rw-r--r--drivers/gpu/drm/bridge/tda998x_drv.c (renamed from drivers/gpu/drm/i2c/tda998x_drv.c)49
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c153
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c19
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c6
-rw-r--r--drivers/gpu/drm/ci/build.sh2
-rw-r--r--drivers/gpu/drm/ci/build.yml104
-rw-r--r--drivers/gpu/drm/ci/container.yml22
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml197
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh13
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml11
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh100
-rw-r--r--drivers/gpu/drm/ci/test.yml37
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt22
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt31
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt298
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt18
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt15
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt112
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt55
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt15
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt12
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt26
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt56
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-fails.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt543
-rw-r--r--drivers/gpu/drm/clients/drm_log.c4
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_dual_mode_helper.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c119
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c71
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c333
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c41
-rw-r--r--drivers/gpu/drm/drm_bridge.c79
-rw-r--r--drivers/gpu/drm/drm_buddy.c11
-rw-r--r--drivers/gpu/drm/drm_client_event.c41
-rw-r--r--drivers/gpu/drm/drm_connector.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c20
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h2
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c2
-rw-r--r--drivers/gpu/drm/drm_draw.c2
-rw-r--r--drivers/gpu/drm/drm_drv.c68
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c20
-rw-r--r--drivers/gpu/drm/drm_file.c26
-rw-r--r--drivers/gpu/drm/drm_format_helper.c123
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c12
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c30
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c2250
-rw-r--r--drivers/gpu/drm/drm_managed.c8
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c2
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c54
-rw-r--r--drivers/gpu/drm/drm_of.c2
-rw-r--r--drivers/gpu/drm/drm_panel.c5
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c46
-rw-r--r--drivers/gpu/drm/drm_panic.c7
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs137
-rw-r--r--drivers/gpu/drm/drm_prime.c8
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c35
-rw-r--r--drivers/gpu/drm/drm_writeback.c186
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c5
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c5
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/Kconfig36
-rw-r--r--drivers/gpu/drm/i2c/Makefile10
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c4
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c114
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.h14
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c172
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.h6
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c26
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c372
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h10
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c24
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c444
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c500
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c496
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c204
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.c189
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg_regs.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c180
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c78
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c212
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c297
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c512
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2230
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h67
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c291
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c789
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c134
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h78
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c75
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h150
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h100
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c78
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c370
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c183
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c514
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c36
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c1044
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c126
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c410
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c270
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc_regs.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c352
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c483
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c183
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c291
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c118
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c170
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit_regs.h79
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c230
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c302
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c364
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c114
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c178
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c90
-rw-r--r--drivers/gpu/drm/i915/display/intel_tdf.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c86
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c249
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c170
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h1
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c390
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h7
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c701
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.h14
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c169
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h7
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c18
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c14
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.h16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c17
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c38
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c89
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h139
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c21
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c15
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c17
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/README6
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm2
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm2
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c104
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c16
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c117
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c68
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c23
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c6
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c23
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gtt_view_types.h59
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c69
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h4
-rw-r--r--drivers/gpu/drm/i915/i915_module.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c24
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c123
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h145
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h8
-rw-r--r--drivers/gpu/drm/i915/i915_request.c5
-rw-r--r--drivers/gpu/drm/i915/i915_request.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h52
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c3
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c5
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c21
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c15
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c44
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.h45
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c18
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c2
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c4
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c16
-rw-r--r--drivers/gpu/drm/loongson/lsdc_plane.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_merge.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c323
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi_regs.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c18
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c117
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c5
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c5
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c5
-rw-r--r--drivers/gpu/drm/mgag200/Makefile1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh5.c204
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c29
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c72
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h17
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c140
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c233
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c468
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c30
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c298
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h14
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c25
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c47
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c24
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c193
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c32
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c21
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c120
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h31
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c124
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c324
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_dsc_helper.h11
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h8
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c3
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c54
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c19
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h10
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h4
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c17
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml1
-rw-r--r--drivers/gpu/drm/msm/registers/display/hdmi.xml2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c17
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_drv.c (renamed from drivers/gpu/drm/i2c/ch7006_drv.c)32
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_mode.c (renamed from drivers/gpu/drm/i2c/ch7006_mode.c)8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_priv.h (renamed from drivers/gpu/drm/i2c/ch7006_priv.h)11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/i2c/sil164_drv.c (renamed from drivers/gpu/drm/i2c/sil164_drv.c)35
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c (renamed from drivers/gpu/drm/drm_encoder_slave.c)95
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/ch7006.h87
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h220
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/sil164.h64
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c539
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c17
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.h1
-rw-r--r--drivers/gpu/drm/panel/Kconfig31
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-ebbg-ft8719.c67
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c23
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83102.c380
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9882t.c2
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67200.c499
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c91
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c59
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c64
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c107
-rw-r--r--drivers/gpu/drm/panel/panel-summit.c132
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c181
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm692e5.c442
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c178
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c20
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c22
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c14
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c10
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.h6
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h5
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c54
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.h2
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c114
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h3
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c115
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c5
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c4
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c5
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c114
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c16
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c250
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c4
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c1592
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h278
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c80
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c1798
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c36
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_internal.h91
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c131
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c7
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c6
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c12
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c2
-rw-r--r--drivers/gpu/drm/stm/ltdc.c4
-rw-r--r--drivers/gpu/drm/stm/lvds.c5
-rw-r--r--drivers/gpu/drm/tegra/dc.c3
-rw-r--r--drivers/gpu/drm/tegra/dsi.c2
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/tests/Makefile1
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_state_test.c379
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c30
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c81
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c254
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c41
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h2
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c2
-rw-r--r--drivers/gpu/drm/tiny/Kconfig12
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/appletbdrm.c840
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c4
-rw-r--r--drivers/gpu/drm/tiny/repaper.c4
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c207
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c250
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c718
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c50
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c83
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c25
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h1
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c90
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_base.c37
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo_guest.h2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c46
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c33
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h27
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c183
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c111
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c41
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c14
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c96
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c36
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c154
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c39
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c24
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h11
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c32
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c61
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c37
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c4
-rw-r--r--drivers/gpu/drm/xe/Kconfig24
-rw-r--r--drivers/gpu/drm/xe/Kconfig.profile1
-rw-r--r--drivers/gpu/drm/xe/Makefile28
-rw-r--r--drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h41
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h1
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h3
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h15
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h5
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h8
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h7
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h13
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h3
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h74
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h17
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_irq.c23
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c2
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c11
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c4
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c199
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h11
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rps.c2
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c68
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c21
-rw-r--r--drivers/gpu/drm/xe/display/xe_tdf.c6
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_instr_defs.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mfx_commands.h28
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h5
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h29
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h13
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h8
-rw-r--r--drivers/gpu/drm/xe/regs/xe_mchbar_regs.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pxp_regs.h23
-rw-r--r--drivers/gpu/drm/xe/regs/xe_reg_defs.h28
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c334
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c26
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c431
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h66
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c6
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c8
-rw-r--r--drivers/gpu/drm/xe/xe_device.c247
-rw-r--r--drivers/gpu/drm/xe/xe_device.h5
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c6
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h66
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c8
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c960
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.h24
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c6
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c77
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h5
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c17
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c17
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c9
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c63
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c73
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c57
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c11
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c23
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c29
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c49
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c161
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c50
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c28
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c28
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c22
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c16
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c18
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.c176
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.h47
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf_types.h28
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.c28
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c373
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity_types.h92
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c8
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c150
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c10
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h7
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c40
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.h3
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c6
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c6
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c1
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c91
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.h4
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c20
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c18
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h7
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c175
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h10
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c77
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h4
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c4
-rw-r--r--drivers/gpu/drm/xe/xe_module.c12
-rw-r--r--drivers/gpu/drm/xe/xe_module.h2
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c113
-rw-r--r--drivers/gpu/drm/xe/xe_oa.h4
-rw-r--r--drivers/gpu/drm/xe/xe_observation.c14
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c300
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c51
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h20
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h14
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c54
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c531
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.h18
-rw-r--r--drivers/gpu/drm/xe/xe_pmu_types.h39
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c399
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h5
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c919
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.h35
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_debugfs.c120
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_debugfs.h13
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.c588
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.h22
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_types.h135
-rw-r--r--drivers/gpu/drm/xe/xe_query.c82
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c4
-rw-r--r--drivers/gpu/drm/xe/xe_res_cursor.h123
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c34
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c6
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c51
-rw-r--r--drivers/gpu/drm/xe/xe_sa.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c258
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.h18
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c2
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c237
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h17
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode_types.h35
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c946
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h150
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c1
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c23
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h1
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h30
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h19
-rw-r--r--drivers/gpu/drm/xe/xe_trace_guc.h49
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c17
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h2
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_sys_mgr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c84
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.h3
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c16
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c543
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h23
-rw-r--r--drivers/gpu/drm/xe/xe_vm_doc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h58
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c39
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules11
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c165
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp_audio.c4
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c2
-rw-r--r--drivers/gpu/host1x/debug.c9
-rw-r--r--drivers/gpu/host1x/debug.h1
-rw-r--r--drivers/gpu/host1x/dev.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c38
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c23
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c108
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c73
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c48
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h2
-rw-r--r--drivers/gpu/ipu-v3/ipu-vdi.c11
-rw-r--r--drivers/gpu/nova-core/Kconfig14
-rw-r--r--drivers/gpu/nova-core/Makefile3
-rw-r--r--drivers/gpu/nova-core/driver.rs47
-rw-r--r--drivers/gpu/nova-core/firmware.rs45
-rw-r--r--drivers/gpu/nova-core/gpu.rs199
-rw-r--r--drivers/gpu/nova-core/nova_core.rs20
-rw-r--r--drivers/gpu/nova-core/regs.rs55
-rw-r--r--drivers/gpu/nova-core/util.rs21
-rw-r--r--drivers/hid/Kconfig40
-rw-r--r--drivers/hid/Makefile4
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c58
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c50
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h3
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c2
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c75
-rw-r--r--drivers/hid/bpf/progs/Huion__KeydialK20.bpf.c531
-rw-r--r--drivers/hid/bpf/progs/TUXEDO__Sirius-16-Gen1-and-Gen2.bpf.c47
-rw-r--r--drivers/hid/bpf/progs/XPPen__ACK05.bpf.c330
-rw-r--r--drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c44
-rw-r--r--drivers/hid/bpf/progs/hid_bpf_async.h219
-rw-r--r--drivers/hid/bpf/progs/hid_bpf_helpers.h19
-rw-r--r--drivers/hid/hid-appletb-bl.c204
-rw-r--r--drivers/hid/hid-appletb-kbd.c507
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-google-hammer.c1
-rw-r--r--drivers/hid/hid-ids.h37
-rw-r--r--drivers/hid/hid-lenovo.c8
-rw-r--r--drivers/hid/hid-lg-g15.c146
-rw-r--r--drivers/hid/hid-plantronics.c144
-rw-r--r--drivers/hid/hid-quirks.c24
-rw-r--r--drivers/hid/hid-steam.c7
-rw-r--r--drivers/hid/hid-universal-pidff.c202
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c14
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c16
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h4
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c2
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c2
-rw-r--r--drivers/hid/usbhid/hid-core.c1
-rw-r--r--drivers/hid/usbhid/hid-pidff.c569
-rw-r--r--drivers/hid/usbhid/hid-pidff.h33
-rw-r--r--drivers/hid/usbhid/usbkbd.c2
-rw-r--r--drivers/hid/wacom_sys.c35
-rw-r--r--drivers/hid/wacom_wac.c8
-rw-r--r--drivers/hid/wacom_wac.h7
-rw-r--r--drivers/hsi/clients/ssi_protocol.c1
-rw-r--r--drivers/hv/Kconfig17
-rw-r--r--drivers/hv/Makefile4
-rw-r--r--drivers/hv/hv.c94
-rw-r--r--drivers/hv/hv_common.c198
-rw-r--r--drivers/hv/hv_proc.c195
-rw-r--r--drivers/hv/mshv.h30
-rw-r--r--drivers/hv/mshv_common.c161
-rw-r--r--drivers/hv/mshv_eventfd.c833
-rw-r--r--drivers/hv/mshv_eventfd.h71
-rw-r--r--drivers/hv/mshv_irq.c124
-rw-r--r--drivers/hv/mshv_portid_table.c83
-rw-r--r--drivers/hv/mshv_root.h311
-rw-r--r--drivers/hv/mshv_root_hv_call.c849
-rw-r--r--drivers/hv/mshv_root_main.c2307
-rw-r--r--drivers/hv/mshv_synic.c665
-rw-r--r--drivers/hv/vmbus_drv.c67
-rw-r--r--drivers/hwmon/Kconfig25
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/acpi_power_meter.c856
-rw-r--r--drivers/hwmon/asus-ec-sensors.c10
-rw-r--r--drivers/hwmon/cgbc-hwmon.c304
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c5
-rw-r--r--drivers/hwmon/emc2305.c38
-rw-r--r--drivers/hwmon/gpio-fan.c16
-rw-r--r--drivers/hwmon/gsc-hwmon.c1
-rw-r--r--drivers/hwmon/hp-wmi-sensors.c4
-rw-r--r--drivers/hwmon/htu31.c350
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/hwmon/ina3221.c9
-rw-r--r--drivers/hwmon/isl28022.c44
-rw-r--r--drivers/hwmon/k10temp.c2
-rw-r--r--drivers/hwmon/lm90.c82
-rw-r--r--drivers/hwmon/ltc4282.c44
-rw-r--r--drivers/hwmon/nct6683.c3
-rw-r--r--drivers/hwmon/nct6775-core.c4
-rw-r--r--drivers/hwmon/ntc_thermistor.c15
-rw-r--r--drivers/hwmon/pmbus/Kconfig15
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/ina233.c191
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c69
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c371
-rw-r--r--drivers/hwmon/pt5161l.c46
-rw-r--r--drivers/hwmon/sg2042-mcu.c42
-rw-r--r--drivers/hwmon/sht3x.c67
-rw-r--r--drivers/hwmon/tps23861.c31
-rw-r--r--drivers/hwmon/xgene-hwmon.c2
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c94
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c2
-rw-r--r--drivers/hwtracing/stm/heartbeat.c6
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c12
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c12
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c26
-rw-r--r--drivers/i2c/busses/i2c-sis630.c12
-rw-r--r--drivers/idle/Makefile5
-rw-r--r--drivers/idle/intel_idle.c49
-rw-r--r--drivers/iio/adc/ti-tsc2046.c4
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c4
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c2
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c2
-rw-r--r--drivers/iio/trigger/iio-trig-hrtimer.c4
-rw-r--r--drivers/infiniband/core/Makefile3
-rw-r--r--drivers/infiniband/core/cache.c6
-rw-r--r--drivers/infiniband/core/cma.c24
-rw-r--r--drivers/infiniband/core/counters.c52
-rw-r--r--drivers/infiniband/core/device.c20
-rw-r--r--drivers/infiniband/core/iwcm.c4
-rw-r--r--drivers/infiniband/core/mad.c38
-rw-r--r--drivers/infiniband/core/nldev.c18
-rw-r--r--drivers/infiniband/core/sysfs.c15
-rw-r--r--drivers/infiniband/core/ucaps.c267
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/core/umem.c36
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c163
-rw-r--r--drivers/infiniband/core/uverbs_main.c2
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c4
-rw-r--r--drivers/infiniband/core/verbs.c13
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h12
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c215
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.h15
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c92
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c36
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h3
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c1
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c18
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h1
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c2
-rw-r--r--drivers/infiniband/hw/hfi1/init.c5
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c4
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c20
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c22
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c2
-rw-r--r--drivers/infiniband/hw/irdma/Kconfig1
-rw-r--r--drivers/infiniband/hw/irdma/hw.c2
-rw-r--r--drivers/infiniband/hw/irdma/main.c46
-rw-r--r--drivers/infiniband/hw/irdma/main.h4
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h6
-rw-r--r--drivers/infiniband/hw/irdma/puda.c19
-rw-r--r--drivers/infiniband/hw/irdma/puda.h5
-rw-r--r--drivers/infiniband/hw/irdma/utils.c47
-rw-r--r--drivers/infiniband/hw/mana/Makefile2
-rw-r--r--drivers/infiniband/hw/mana/ah.c58
-rw-r--r--drivers/infiniband/hw/mana/counters.c105
-rw-r--r--drivers/infiniband/hw/mana/counters.h44
-rw-r--r--drivers/infiniband/hw/mana/cq.c228
-rw-r--r--drivers/infiniband/hw/mana/device.c82
-rw-r--r--drivers/infiniband/hw/mana/main.c103
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h210
-rw-r--r--drivers/infiniband/hw/mana/mr.c105
-rw-r--r--drivers/infiniband/hw/mana/qp.c245
-rw-r--r--drivers/infiniband/hw/mana/shadow_queue.h115
-rw-r--r--drivers/infiniband/hw/mana/wr.c168
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c14
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c195
-rw-r--r--drivers/infiniband/hw/mlx5/counters.h15
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c41
-rw-r--r--drivers/infiniband/hw/mlx5/devx.h5
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c637
-rw-r--r--drivers/infiniband/hw/mlx5/fs.h17
-rw-r--r--drivers/infiniband/hw/mlx5/main.c77
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h23
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c52
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c10
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c28
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c5
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig3
-rw-r--r--drivers/infiniband/sw/rxe/Makefile2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c59
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h38
-rw-r--r--drivers/infiniband/sw/rxe/rxe_icrc.c40
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h35
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_odp.c326
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c24
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h42
-rw-r--r--drivers/infiniband/sw/siw/Kconfig4
-rw-r--r--drivers/infiniband/sw/siw/siw.h41
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c22
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c54
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c23
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c44
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c8
-rw-r--r--drivers/input/joystick/xpad.c39
-rw-r--r--drivers/input/keyboard/ipaq-micro-keys.c5
-rw-r--r--drivers/input/misc/Kconfig6
-rw-r--r--drivers/input/misc/iqs7222.c50
-rw-r--r--drivers/input/misc/max77693-haptic.c13
-rw-r--r--drivers/input/serio/gscps2.c6
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h111
-rw-r--r--drivers/input/sparse-keymap.c3
-rw-r--r--drivers/input/touchscreen/ads7846.c2
-rw-r--r--drivers/input/touchscreen/goodix_berlin_core.c26
-rw-r--r--drivers/input/touchscreen/imagis.c9
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c2
-rw-r--r--drivers/iommu/Kconfig4
-rw-r--r--drivers/iommu/amd/amd_iommu.h8
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h30
-rw-r--r--drivers/iommu/amd/init.c65
-rw-r--r--drivers/iommu/amd/io_pgtable.c7
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c2
-rw-r--r--drivers/iommu/amd/iommu.c91
-rw-r--r--drivers/iommu/amd/pasid.c2
-rw-r--r--drivers/iommu/apple-dart.c22
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c11
-rw-r--r--drivers/iommu/dma-iommu.c82
-rw-r--r--drivers/iommu/hyperv-iommu.c8
-rw-r--r--drivers/iommu/intel/iommu.c239
-rw-r--r--drivers/iommu/intel/iommu.h28
-rw-r--r--drivers/iommu/intel/irq_remapping.c42
-rw-r--r--drivers/iommu/intel/pasid.c43
-rw-r--r--drivers/iommu/intel/prq.c2
-rw-r--r--drivers/iommu/intel/svm.c43
-rw-r--r--drivers/iommu/io-pgtable-dart.c2
-rw-r--r--drivers/iommu/iommu-priv.h5
-rw-r--r--drivers/iommu/iommu.c220
-rw-r--r--drivers/iommu/iommufd/device.c266
-rw-r--r--drivers/iommu/iommufd/fault.c130
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c5
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h64
-rw-r--r--drivers/iommu/iommufd/main.c9
-rw-r--r--drivers/iommu/mtk_iommu_v1.c25
-rw-r--r--drivers/iommu/of_iommu.c13
-rw-r--r--drivers/iommu/rockchip-iommu.c61
-rw-r--r--drivers/iommu/s390-iommu.c138
-rw-r--r--drivers/iommu/tegra-smmu.c1
-rw-r--r--drivers/irqchip/Kconfig40
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-apple-aic.c8
-rw-r--r--drivers/irqchip/irq-bcm2712-mip.c292
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c57
-rw-r--r--drivers/irqchip/irq-gic-v2m.c6
-rw-r--r--drivers/irqchip/irq-gic-v3-its-msi-parent.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c36
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c13
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c14
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c1
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c1
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c5
-rw-r--r--drivers/irqchip/irq-meson-gpio.c48
-rw-r--r--drivers/irqchip/irq-msi-lib.c11
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c1
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c1
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c1
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c53
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c198
-rw-r--r--drivers/irqchip/irq-riscv-aplic-direct.c24
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c14
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c215
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c151
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h12
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c249
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c85
-rw-r--r--drivers/leds/Kconfig16
-rw-r--r--drivers/leds/Makefile5
-rw-r--r--drivers/leds/led-core.c22
-rw-r--r--drivers/leds/leds-aw200xx.c2
-rw-r--r--drivers/leds/leds-lp8860.c2
-rw-r--r--drivers/leds/leds-max77705.c275
-rw-r--r--drivers/leds/leds-mlxcpld.c1
-rw-r--r--drivers/leds/leds-nic78bx.c16
-rw-r--r--drivers/leds/leds-pca955x.c359
-rw-r--r--drivers/leds/leds-st1202.c42
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c5
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c23
-rw-r--r--drivers/leds/simatic/Kconfig (renamed from drivers/leds/simple/Kconfig)0
-rw-r--r--drivers/leds/simatic/Makefile (renamed from drivers/leds/simple/Makefile)0
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio-apollolake.c (renamed from drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c)0
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio-core.c (renamed from drivers/leds/simple/simatic-ipc-leds-gpio-core.c)0
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio-elkhartlake.c (renamed from drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c)0
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio-f7188x.c (renamed from drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c)0
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds-gpio.h (renamed from drivers/leds/simple/simatic-ipc-leds-gpio.h)0
-rw-r--r--drivers/leds/simatic/simatic-ipc-leds.c (renamed from drivers/leds/simple/simatic-ipc-leds.c)0
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c16
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c4
-rw-r--r--drivers/mailbox/arm_mhu.c2
-rw-r--r--drivers/mailbox/arm_mhu_db.c2
-rw-r--r--drivers/mailbox/arm_mhuv2.c2
-rw-r--r--drivers/mailbox/exynos-mailbox.c2
-rw-r--r--drivers/mailbox/mailbox.c27
-rw-r--r--drivers/mailbox/mailbox.h2
-rw-r--r--drivers/mailbox/pcc.c113
-rw-r--r--drivers/mailbox/pl320-ipc.c14
-rw-r--r--drivers/mailbox/tegra-hsp.c72
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-integrity.c12
-rw-r--r--drivers/md/dm-table.c7
-rw-r--r--drivers/md/md-bitmap.c14
-rw-r--r--drivers/md/md-cluster.c18
-rw-r--r--drivers/md/md-cluster.h6
-rw-r--r--drivers/md/md-linear.c15
-rw-r--r--drivers/md/md.c356
-rw-r--r--drivers/md/md.h62
-rw-r--r--drivers/md/raid0.c18
-rw-r--r--drivers/md/raid1-10.c6
-rw-r--r--drivers/md/raid1.c56
-rw-r--r--drivers/md/raid10.c66
-rw-r--r--drivers/md/raid5-cache.c31
-rw-r--r--drivers/md/raid5-ppl.c16
-rw-r--r--drivers/md/raid5.c91
-rw-r--r--drivers/media/cec/core/cec-api.c2
-rw-r--r--drivers/media/cec/core/cec-pin.c14
-rw-r--r--drivers/media/cec/i2c/Kconfig9
-rw-r--r--drivers/media/cec/i2c/Makefile1
-rw-r--r--drivers/media/cec/i2c/tda9950.c (renamed from drivers/gpu/drm/i2c/tda9950.c)0
-rw-r--r--drivers/media/common/siano/smsdvb-main.c2
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c8
-rw-r--r--drivers/media/dvb-frontends/dibx000_common.c10
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c2
-rw-r--r--drivers/media/dvb-frontends/stv0299.c2
-rw-r--r--drivers/media/dvb-frontends/tda10048.c8
-rw-r--r--drivers/media/i2c/Kconfig12
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adv7180.c34
-rw-r--r--drivers/media/i2c/adv748x/adv748x.h2
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c6
-rw-r--r--drivers/media/i2c/ccs-pll.c16
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c6
-rw-r--r--drivers/media/i2c/dw9719.c113
-rw-r--r--drivers/media/i2c/hi556.c46
-rw-r--r--drivers/media/i2c/imx214.c1287
-rw-r--r--drivers/media/i2c/imx219.c320
-rw-r--r--drivers/media/i2c/imx283.c37
-rw-r--r--drivers/media/i2c/imx319.c9
-rw-r--r--drivers/media/i2c/imx335.c21
-rw-r--r--drivers/media/i2c/imx415.c183
-rw-r--r--drivers/media/i2c/lt6911uxe.c707
-rw-r--r--drivers/media/i2c/ov08x40.c168
-rw-r--r--drivers/media/i2c/ov2740.c27
-rw-r--r--drivers/media/i2c/ov7251.c4
-rw-r--r--drivers/media/i2c/ov9282.c23
-rw-r--r--drivers/media/i2c/st-mipid02.c5
-rw-r--r--drivers/media/i2c/tc358746.c235
-rw-r--r--drivers/media/i2c/tda1997x.c7
-rw-r--r--drivers/media/i2c/vgxy61.c4
-rw-r--r--drivers/media/i2c/video-i2c.c12
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c30
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c33
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c3
-rw-r--r--drivers/media/pci/cx23885/cx23885.h1
-rw-r--r--drivers/media/pci/cx23885/netup-eeprom.c29
-rw-r--r--drivers/media/pci/cx23885/netup-eeprom.h1
-rw-r--r--drivers/media/pci/cx88/cx88-input.c3
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c11
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.c33
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.h3
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c12
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.c2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c1
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c78
-rw-r--r--drivers/media/pci/mgb4/mgb4_cmt.c8
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c13
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.h8
-rw-r--r--drivers/media/pci/mgb4/mgb4_regs.c1
-rw-r--r--drivers/media/platform/Kconfig1
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c1
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c4
-rw-r--r--drivers/media/platform/chips-media/coda/coda-common.c1
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-hw.c2
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c31
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c8
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuapi.c10
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c6
-rw-r--r--drivers/media/platform/nuvoton/npcm-video.c6
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c5
-rw-r--r--drivers/media/platform/nxp/imx8mq-mipi-csi2.c11
-rw-r--r--drivers/media/platform/qcom/Kconfig1
-rw-r--r--drivers/media/platform/qcom/Makefile1
-rw-r--r--drivers/media/platform/qcom/camss/Makefile2
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-4-1.c19
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-4-7.c42
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-780.c337
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-780.h25
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen2.c60
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c258
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h54
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c6
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c794
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h8
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-17x.c112
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c9
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c11
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-8.c11
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-480.c274
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-780.c159
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-gen1.c9
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c275
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h59
-rw-r--r--drivers/media/platform/qcom/camss/camss.c595
-rw-r--r--drivers/media/platform/qcom/camss/camss.h6
-rw-r--r--drivers/media/platform/qcom/iris/Kconfig13
-rw-r--r--drivers/media/platform/qcom/iris/Makefile31
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.c623
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.h117
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.c96
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.h111
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.c259
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.h22
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.c116
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.h15
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_common.c176
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_common.h155
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1.h16
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c826
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h448
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c666
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2.h41
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c957
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h161
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.c292
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.h125
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c934
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_queue.c318
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_queue.h182
-rw-r--r--drivers/media/platform/qcom/iris/iris_instance.h77
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_common.h186
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8250.c149
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8550.c266
-rw-r--r--drivers/media/platform/qcom/iris/iris_power.c140
-rw-r--r--drivers/media/platform/qcom/iris/iris_power.h13
-rw-r--r--drivers/media/platform/qcom/iris/iris_probe.c349
-rw-r--r--drivers/media/platform/qcom/iris/iris_resources.c131
-rw-r--r--drivers/media/platform/qcom/iris/iris_resources.h18
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.c276
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.h144
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.c90
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.h53
-rw-r--r--drivers/media/platform/qcom/iris/iris_vb2.c335
-rw-r--r--drivers/media/platform/qcom/iris/iris_vb2.h19
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.c659
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.h25
-rw-r--r--drivers/media/platform/qcom/iris/iris_vidc.c453
-rw-r--r--drivers/media/platform/qcom/iris/iris_vidc.h15
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu2.c38
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3.c122
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.c270
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.h91
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.c369
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.h28
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_register_defines.h17
-rw-r--r--drivers/media/platform/qcom/venus/Kconfig2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c100
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c18
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c9
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe.c6
-rw-r--r--drivers/media/platform/renesas/rcar-csi2.c118
-rw-r--r--drivers/media/platform/renesas/rcar-isp.c162
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c4
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c7
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c5
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c3
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.c3
-rw-r--r--drivers/media/platform/st/stm32/stm32-csi.c106
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c23
-rw-r--r--drivers/media/platform/synopsys/Kconfig3
-rw-r--r--drivers/media/platform/synopsys/Makefile2
-rw-r--r--drivers/media/platform/synopsys/hdmirx/Kconfig35
-rw-r--r--drivers/media/platform/synopsys/hdmirx/Makefile4
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c2746
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h394
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.c275
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.h43
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c3
-rw-r--r--drivers/media/platform/ti/cal/cal.c4
-rw-r--r--drivers/media/platform/ti/cal/cal.h1
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c54
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.h2
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c1
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c8
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c2
-rw-r--r--drivers/media/radio/radio-aztech.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c4
-rw-r--r--drivers/media/rc/keymaps/Makefile1
-rw-r--r--drivers/media/rc/keymaps/rc-siemens-gigaset-rc20.c71
-rw-r--r--drivers/media/rc/pwm-ir-tx.c3
-rw-r--r--drivers/media/rc/rc-core-priv.h4
-rw-r--r--drivers/media/rc/streamzap.c70
-rw-r--r--drivers/media/test-drivers/vim2m.c6
-rw-r--r--drivers/media/test-drivers/vimc/vimc-streamer.c6
-rw-r--r--drivers/media/test-drivers/visl/visl-core.c12
-rw-r--r--drivers/media/test-drivers/vivid/Kconfig12
-rw-r--r--drivers/media/test-drivers/vivid/Makefile5
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c14
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c36
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-out.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-touch.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-osd.c24
-rw-r--r--drivers/media/test-drivers/vivid/vivid-osd.h19
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.c3
-rw-r--r--drivers/media/tuners/tuner-simple.c20
-rw-r--r--drivers/media/tuners/tuner-types.c296
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c17
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h18
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/au6610.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/ce6230.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/ec168.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb/a800.c2
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c4
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c4
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c2
-rw-r--r--drivers/media/usb/dvb-usb/cxusb-analog.c4
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c6
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mb.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc.c2
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c4
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c2
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c4
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h6
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c16
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c2
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c4
-rw-r--r--drivers/media/usb/dvb-usb/nova-t-usb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/opera1.c4
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c6
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c4
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c4
-rw-r--r--drivers/media/usb/dvb-usb/umt-010.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp702x.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c40
-rw-r--r--drivers/media/usb/pwc/pwc-if.c1
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c799
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c44
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c74
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h32
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c32
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c105
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c169
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c40
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c43
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c2
-rw-r--r--drivers/memory/mtk-smi.c33
-rw-r--r--drivers/memory/omap-gpmc.c20
-rw-r--r--drivers/memory/tegra/tegra20-emc.c4
-rw-r--r--drivers/memstick/core/ms_block.c2
-rw-r--r--drivers/memstick/core/mspro_block.c4
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c1
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/message/fusion/mptscsih.c64
-rw-r--r--drivers/message/fusion/mptscsih.h1
-rw-r--r--drivers/mfd/Kconfig47
-rw-r--r--drivers/mfd/Makefile6
-rw-r--r--drivers/mfd/axp20x.c1
-rw-r--r--drivers/mfd/cgbc-core.c10
-rw-r--r--drivers/mfd/ene-kb3930.c2
-rw-r--r--drivers/mfd/ezx-pcap.c33
-rw-r--r--drivers/mfd/intel-lpss.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c1
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c1
-rw-r--r--drivers/mfd/ipaq-micro.c3
-rw-r--r--drivers/mfd/max77620.c5
-rw-r--r--drivers/mfd/max77705.c182
-rw-r--r--drivers/mfd/max8997-irq.c15
-rw-r--r--drivers/mfd/mt6397-core.c12
-rw-r--r--drivers/mfd/pcf50633-adc.c255
-rw-r--r--drivers/mfd/pcf50633-core.c304
-rw-r--r--drivers/mfd/pcf50633-gpio.c92
-rw-r--r--drivers/mfd/pcf50633-irq.c312
-rw-r--r--drivers/mfd/qnap-mcu.c6
-rw-r--r--drivers/mfd/sec-core.c12
-rw-r--r--drivers/mfd/sec-irq.c34
-rw-r--r--drivers/mfd/simple-mfd-i2c.c11
-rw-r--r--drivers/mfd/sm501.c6
-rw-r--r--drivers/mfd/sta2x11-mfd.c645
-rw-r--r--drivers/mfd/stm32-timers.c31
-rw-r--r--drivers/mfd/syscon.c9
-rw-r--r--drivers/mfd/tps65010.c13
-rw-r--r--drivers/mfd/tps65219.c279
-rw-r--r--drivers/mfd/upboard-fpga.c3
-rw-r--r--drivers/misc/mei/Kconfig2
-rw-r--r--drivers/misc/pci_endpoint_test.c131
-rw-r--r--drivers/misc/vcpu_stall_detector.c3
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/core/mmc.c6
-rw-r--r--drivers/mmc/core/pwrseq_simple.c3
-rw-r--r--drivers/mmc/core/queue.c2
-rw-r--r--drivers/mmc/core/sd.c4
-rw-r--r--drivers/mmc/core/sdio_uart.c2
-rw-r--r--drivers/mmc/core/slot-gpio.c12
-rw-r--r--drivers/mmc/host/atmel-mci.c4
-rw-r--r--drivers/mmc/host/cqhci-crypto.c8
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c41
-rw-r--r--drivers/mmc/host/dw_mmc.c97
-rw-r--r--drivers/mmc/host/dw_mmc.h27
-rw-r--r--drivers/mmc/host/omap.c19
-rw-r--r--drivers/mmc/host/renesas_sdhi.h1
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c131
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c10
-rw-r--r--drivers/mmc/host/sdhci-msm.c5
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c72
-rw-r--r--drivers/mmc/host/sdhci-omap.c4
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c6
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c1
-rw-r--r--drivers/mmc/host/sdhci.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.h10
-rw-r--r--drivers/mtd/devices/mchp48l640.c9
-rw-r--r--drivers/mtd/mtdcore.c14
-rw-r--r--drivers/mtd/mtdpart.c3
-rw-r--r--drivers/mtd/mtdpstore.c12
-rw-r--r--drivers/mtd/nand/Makefile4
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c5
-rw-r--r--drivers/mtd/nand/raw/nand_base.c4
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c36
-rw-r--r--drivers/mtd/nand/spi/Makefile3
-rw-r--r--drivers/mtd/nand/spi/core.c85
-rw-r--r--drivers/mtd/nand/spi/esmt.c90
-rw-r--r--drivers/mtd/nand/spi/macronix.c79
-rw-r--r--drivers/mtd/nand/spi/micron.c135
-rw-r--r--drivers/mtd/nand/spi/otp.c362
-rw-r--r--drivers/mtd/spi-nor/core.c77
-rw-r--r--drivers/mtd/spi-nor/macronix.c31
-rw-r--r--drivers/mtd/spi-nor/otp.c1
-rw-r--r--drivers/mtd/spi-nor/swp.c1
-rw-r--r--drivers/mtd/spi-nor/winbond.c88
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/mux/gpio.c4
-rw-r--r--drivers/net/Kconfig24
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/amt.c13
-rw-r--r--drivers/net/bareudp.c9
-rw-r--r--drivers/net/bonding/bond_main.c50
-rw-r--r--drivers/net/bonding/bond_netlink.c6
-rw-r--r--drivers/net/bonding/bond_options.c58
-rw-r--r--drivers/net/caif/caif_serial.c14
-rw-r--r--drivers/net/can/c_can/c_can_platform.c51
-rw-r--r--drivers/net/can/dev/netlink.c4
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c80
-rw-r--r--drivers/net/can/flexcan/flexcan.h6
-rw-r--r--drivers/net/can/m_can/m_can.c9
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c28
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c5
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c8
-rw-r--r--drivers/net/can/usb/gs_usb.c5
-rw-r--r--drivers/net/can/usb/ucan.c43
-rw-r--r--drivers/net/can/vxcan.c7
-rw-r--r--drivers/net/dsa/Kconfig1
-rw-r--r--drivers/net/dsa/b53/b53_common.c14
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c1
-rw-r--r--drivers/net/dsa/b53/b53_priv.h2
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c1
-rw-r--r--drivers/net/dsa/microchip/ksz8.c11
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c231
-rw-r--r--drivers/net/dsa/mt7530.c310
-rw-r--r--drivers/net/dsa/mt7530.h8
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c103
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6352.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c4
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c1
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c8
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c9
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c20
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c6
-rw-r--r--drivers/net/dummy.c1
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c7
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/airoha/Kconfig27
-rw-r--r--drivers/net/ethernet/airoha/Makefile9
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c (renamed from drivers/net/ethernet/mediatek/airoha_eth.c)1370
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h552
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c520
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.h34
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c910
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c181
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h803
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c47
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c44
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c7
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h8
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c7
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c25
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c749
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c112
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c85
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h143
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1089
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h52
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c89
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c1
-rw-r--r--drivers/net/ethernet/cadence/macb.h132
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c231
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c76
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c21
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c16
-rw-r--r--drivers/net/ethernet/cisco/enic/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile2
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h25
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h142
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h17
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c51
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c343
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c87
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.c436
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.h8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.c117
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.h7
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h45
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c7
-rw-r--r--drivers/net/ethernet/ec_bhf.c3
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c25
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c52
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c14
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve.h94
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c70
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c45
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c90
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c384
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c30
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c110
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c41
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c31
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h122
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c348
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c58
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c298
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c55
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c103
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h105
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c181
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c63
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c48
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h10
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h35
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c245
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.c485
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.h47
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c433
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h24
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h239
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_types.h34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c203
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c102
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c211
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c64
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c84
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c515
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h75
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c430
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c154
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c119
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c32
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c51
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c38
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c150
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c21
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c122
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c34
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c201
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c225
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c3
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig8
-rw-r--r--drivers/net/ethernet/mediatek/Makefile1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c81
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c22
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c715
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c133
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c582
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h5
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h9
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.c1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h84
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c174
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c882
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c109
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h8
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c50
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h9
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c356
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h35
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.c55
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h39
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c269
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h33
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c4
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c82
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c6
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c100
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c9
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c4
-rw-r--r--drivers/net/ethernet/realtek/Kconfig3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c82
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c10
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c3
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c2
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c7
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig5
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c8
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.c24
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c13
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.c522
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.h20
-rw-r--r--drivers/net/ethernet/sfc/fw_formats.h114
-rw-r--r--drivers/net/ethernet/sfc/mae.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c115
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h22
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h13842
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c59
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c11
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h11
-rw-r--r--drivers/net/ethernet/sfc/tc.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c1
-rw-r--r--drivers/net/ethernet/socionext/netsec.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c178
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c233
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c564
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c75
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c344
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/tehuti/tn40.c9
-rw-r--r--drivers/net/ethernet/tehuti/tn40.h33
-rw-r--r--drivers/net/ethernet/tehuti/tn40_mdio.c84
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c252
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h8
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c9
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c63
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c422
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c137
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h49
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c58
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c4
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig11
-rw-r--r--drivers/net/ethernet/toshiba/Makefile2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2556
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h475
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c174
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c105
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c236
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c142
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c883
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.h20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h135
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c20
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c11
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h5
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c6
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c7
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c56
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c16
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h14
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h29
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c315
-rw-r--r--drivers/net/geneve.c62
-rw-r--r--drivers/net/gtp.c10
-rw-r--r--drivers/net/hamradio/baycom_par.c4
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c4
-rw-r--r--drivers/net/hamradio/bpqether.c25
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c16
-rw-r--r--drivers/net/hyperv/rndis_filter.c13
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ieee802154/ca8210.c78
-rw-r--r--drivers/net/ipvlan/ipvlan.h3
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c1
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c9
-rw-r--r--drivers/net/ipvlan/ipvtap.c6
-rw-r--r--drivers/net/loopback.c3
-rw-r--r--drivers/net/macsec.c10
-rw-r--r--drivers/net/macvlan.c22
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/mctp/Kconfig10
-rw-r--r--drivers/net/mctp/Makefile1
-rw-r--r--drivers/net/mctp/mctp-i2c.c7
-rw-r--r--drivers/net/mctp/mctp-i3c.c5
-rw-r--r--drivers/net/mctp/mctp-usb.c385
-rw-r--r--drivers/net/mdio/mdio-i2c.c79
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/netconsole.c393
-rw-r--r--drivers/net/netdevsim/bpf.c3
-rw-r--r--drivers/net/netdevsim/ethtool.c2
-rw-r--r--drivers/net/netdevsim/ipsec.c11
-rw-r--r--drivers/net/netdevsim/netdev.c78
-rw-r--r--drivers/net/netdevsim/netdevsim.h2
-rw-r--r--drivers/net/netkit.c15
-rw-r--r--drivers/net/pcs/pcs-lynx.c1
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c1
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c22
-rw-r--r--drivers/net/pcs/pcs-xpcs.c105
-rw-r--r--drivers/net/pcs/pcs-xpcs.h26
-rw-r--r--drivers/net/pfcp.c9
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/adin1100.c5
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c7
-rw-r--r--drivers/net/phy/aquantia/aquantia_hwmon.c32
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c240
-rw-r--r--drivers/net/phy/ax88796b_rust.rs2
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c3
-rw-r--r--drivers/net/phy/bcm54140.c1
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/dp83822.c38
-rw-r--r--drivers/net/phy/dp83867.c5
-rw-r--r--drivers/net/phy/dp83td510.c187
-rw-r--r--drivers/net/phy/dp83tg720.c78
-rw-r--r--drivers/net/phy/fixed_phy.c16
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c280
-rw-r--r--drivers/net/phy/marvell.c92
-rw-r--r--drivers/net/phy/marvell10g.c24
-rw-r--r--drivers/net/phy/mdio_bus.c14
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c303
-rw-r--r--drivers/net/phy/mediatek/mtk-ge.c78
-rw-r--r--drivers/net/phy/mediatek/mtk-phy-lib.c77
-rw-r--r--drivers/net/phy/mediatek/mtk.h15
-rw-r--r--drivers/net/phy/micrel.c33
-rw-r--r--drivers/net/phy/mscc/mscc_main.c2
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c14
-rw-r--r--drivers/net/phy/mxl-gpy.c19
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c162
-rw-r--r--drivers/net/phy/nxp-tja11xx.c57
-rw-r--r--drivers/net/phy/phy-c45.c55
-rw-r--r--drivers/net/phy/phy-caps.h63
-rw-r--r--drivers/net/phy/phy-core.c318
-rw-r--r--drivers/net/phy/phy.c157
-rw-r--r--drivers/net/phy/phy_caps.c359
-rw-r--r--drivers/net/phy/phy_device.c414
-rw-r--r--drivers/net/phy/phy_led_triggers.c2
-rw-r--r--drivers/net/phy/phy_link_topology.c2
-rw-r--r--drivers/net/phy/phy_package.c350
-rw-r--r--drivers/net/phy/phylib-internal.h27
-rw-r--r--drivers/net/phy/phylib.h34
-rw-r--r--drivers/net/phy/phylink.c561
-rw-r--r--drivers/net/phy/qcom/qca807x.c16
-rw-r--r--drivers/net/phy/qt2025.rs4
-rw-r--r--drivers/net/phy/realtek/Kconfig8
-rw-r--r--drivers/net/phy/realtek/realtek_hwmon.c7
-rw-r--r--drivers/net/phy/realtek/realtek_main.c130
-rw-r--r--drivers/net/phy/sfp.c95
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c7
-rw-r--r--drivers/net/ppp/ppp_generic.c14
-rw-r--r--drivers/net/ppp/pppoe.c1
-rw-r--r--drivers/net/ppp/pptp.c1
-rw-r--r--drivers/net/sb1000.c1179
-rw-r--r--drivers/net/tap.c166
-rw-r--r--drivers/net/team/team_core.c9
-rw-r--r--drivers/net/tun.c221
-rw-r--r--drivers/net/tun_vnet.h186
-rw-r--r--drivers/net/usb/asix_devices.c17
-rw-r--r--drivers/net/usb/ax88172a.c12
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/cdc_mbim.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c3
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c6
-rw-r--r--drivers/net/usb/r8152.c7
-rw-r--r--drivers/net/usb/r8153_ecm.c6
-rw-r--r--drivers/net/veth.c11
-rw-r--r--drivers/net/virtio_net.c277
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c10
-rw-r--r--drivers/net/vrf.c14
-rw-r--r--drivers/net/vxlan/vxlan_core.c68
-rw-r--r--drivers/net/wireguard/device.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h13
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.c52
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.h79
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c35
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c133
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h7
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c145
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c195
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c107
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c80
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h11
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.c202
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c103
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h139
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h10
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c1191
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h115
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c1238
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h453
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.c337
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.h24
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h82
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c1419
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h11
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c66
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c253
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h442
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h10
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c8
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c870
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h10
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c22
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c23
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.c395
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c1176
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h290
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/ath/testmode_i.h (renamed from drivers/net/wireless/ath/ath11k/testmode_i.h)54
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c91
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c18
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/context.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c108
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/Makefile16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c670
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c344
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/constants.h88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c1998
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.h51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c1082
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.h244
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c451
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c536
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/hcmd.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c671
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h233
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c358
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c1213
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h153
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c339
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.h68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c2670
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c329
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c720
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h582
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c1076
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c759
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.h55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c396
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c321
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c393
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c224
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c2060
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.h72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c2008
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.h136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.c222
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.h102
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c1289
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.h266
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c513
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c663
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c303
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/module.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c353
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c474
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h134
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.c438
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.c240
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c700
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.c1374
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c261
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c15
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c143
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h10
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/decl.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c88
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c96
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c164
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c274
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c123
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c306
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c976
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c636
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h47
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h79
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192c.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig25
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile9
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c57
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c15
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c58
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h45
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c215
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h20
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h69
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c2257
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.h62
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.c23930
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.h40
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814ae.c31
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814au.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c16
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c16
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig2
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c38
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c2959
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c240
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h208
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c2063
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c369
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h103
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c80
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c282
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h56
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c789
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h22
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h44
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c601
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c13
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c13
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c28
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c74
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c422
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h10
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c17
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.c220
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h13
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c7
-rw-r--r--drivers/net/wireless/silabs/wfx/bus.h1
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_sdio.c54
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_spi.c45
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c14
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c25
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c21
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c10
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c24
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c1
-rw-r--r--drivers/net/wwan/wwan_core.c16
-rw-r--r--drivers/ntb/test/ntb_pingpong.c3
-rw-r--r--drivers/nvdimm/badrange.c2
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c7
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvme/common/Kconfig1
-rw-r--r--drivers/nvme/common/auth.c337
-rw-r--r--drivers/nvme/common/keyring.c65
-rw-r--r--drivers/nvme/host/Kconfig2
-rw-r--r--drivers/nvme/host/apple.c7
-rw-r--r--drivers/nvme/host/auth.c115
-rw-r--r--drivers/nvme/host/core.c15
-rw-r--r--drivers/nvme/host/fabrics.c34
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/fc.c6
-rw-r--r--drivers/nvme/host/ioctl.c12
-rw-r--r--drivers/nvme/host/multipath.c138
-rw-r--r--drivers/nvme/host/nvme.h22
-rw-r--r--drivers/nvme/host/pci.c23
-rw-r--r--drivers/nvme/host/rdma.c3
-rw-r--r--drivers/nvme/host/sysfs.c24
-rw-r--r--drivers/nvme/host/tcp.c67
-rw-r--r--drivers/nvme/host/zns.c10
-rw-r--r--drivers/nvme/target/auth.c72
-rw-r--r--drivers/nvme/target/core.c9
-rw-r--r--drivers/nvme/target/debugfs.c27
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c60
-rw-r--r--drivers/nvme/target/fabrics-cmd.c25
-rw-r--r--drivers/nvme/target/fc.c14
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/nvme/target/nvmet.h40
-rw-r--r--drivers/nvme/target/pci-epf.c40
-rw-r--r--drivers/nvme/target/tcp.c32
-rw-r--r--drivers/nvmem/brcm_nvram.c2
-rw-r--r--drivers/nvmem/layouts/u-boot-env.c2
-rw-r--r--drivers/of/address.c13
-rw-r--r--drivers/of/base.c33
-rw-r--r--drivers/of/device.c7
-rw-r--r--drivers/of/irq.c84
-rw-r--r--drivers/of/of_private.h7
-rw-r--r--drivers/of/overlay.c10
-rw-r--r--drivers/of/platform.c8
-rw-r--r--drivers/of/property.c33
-rw-r--r--drivers/of/resolver.c41
-rw-r--r--drivers/of/unittest-data/tests-interrupts.dtsi13
-rw-r--r--drivers/of/unittest.c67
-rw-r--r--drivers/parisc/led.c4
-rw-r--r--drivers/pci/Kconfig11
-rw-r--r--drivers/pci/bus.c43
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c5
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c11
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h2
-rw-r--r--drivers/pci/controller/dwc/Kconfig21
-rw-r--r--drivers/pci/controller/dwc/Makefile2
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c106
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c6
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c476
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c677
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c321
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c61
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c142
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h82
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c53
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c50
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c17
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c12
-rw-r--r--drivers/pci/controller/pci-hyperv.c5
-rw-r--r--drivers/pci/controller/pci-mvebu.c2
-rw-r--r--drivers/pci/controller/pci-tegra.c80
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c2
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c2
-rw-r--r--drivers/pci/controller/pcie-altera.c257
-rw-r--r--drivers/pci/controller/pcie-apple.c4
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c202
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c64
-rw-r--r--drivers/pci/controller/pcie-mediatek.c15
-rw-r--r--drivers/pci/controller/pcie-mt7621.c15
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c10
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip.h1
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c53
-rw-r--r--drivers/pci/controller/vmd.c32
-rw-r--r--drivers/pci/devres.c18
-rw-r--r--drivers/pci/doe.c247
-rw-r--r--drivers/pci/endpoint/Kconfig2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c142
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c56
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c4
-rw-r--r--drivers/pci/hotplug/Kconfig2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c17
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c142
-rw-r--r--drivers/pci/hotplug/pciehp_core.c5
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c11
-rw-r--r--drivers/pci/hotplug/shpchp.h18
-rw-r--r--drivers/pci/hotplug/shpchp_core.c13
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c2
-rw-r--r--drivers/pci/iomap.c29
-rw-r--r--drivers/pci/iov.c50
-rw-r--r--drivers/pci/msi/api.c2
-rw-r--r--drivers/pci/msi/msi.c35
-rw-r--r--drivers/pci/of.c127
-rw-r--r--drivers/pci/of_property.c115
-rw-r--r--drivers/pci/pci-driver.c9
-rw-r--r--drivers/pci/pci-sysfs.c11
-rw-r--r--drivers/pci/pci.c72
-rw-r--r--drivers/pci/pci.h89
-rw-r--r--drivers/pci/pcie/aer.c79
-rw-r--r--drivers/pci/pcie/aspm.c17
-rw-r--r--drivers/pci/pcie/bwctrl.c6
-rw-r--r--drivers/pci/pcie/dpc.c18
-rw-r--r--drivers/pci/pcie/portdrv.c8
-rw-r--r--drivers/pci/pcie/tlp.c56
-rw-r--r--drivers/pci/probe.c78
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/pci/pwrctrl/Kconfig11
-rw-r--r--drivers/pci/pwrctrl/Makefile3
-rw-r--r--drivers/pci/pwrctrl/core.c2
-rw-r--r--drivers/pci/pwrctrl/slot.c93
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--drivers/pci/remove.c5
-rw-r--r--drivers/pci/setup-bus.c575
-rw-r--r--drivers/pci/setup-res.c24
-rw-r--r--drivers/pci/slot.c44
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c105
-rw-r--r--drivers/perf/arm-ccn.c5
-rw-r--r--drivers/perf/arm-cmn.c5
-rw-r--r--drivers/perf/arm_cspmu/ampere_cspmu.c32
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c81
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.h57
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c22
-rw-r--r--drivers/perf/arm_pmu.c8
-rw-r--r--drivers/perf/arm_pmuv3.c11
-rw-r--r--drivers/perf/arm_spe_pmu.c4
-rw-r--r--drivers/perf/arm_v7_pmu.c50
-rw-r--r--drivers/perf/dwc_pcie_pmu.c76
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c4
-rw-r--r--drivers/perf/thunderx2_pmu.c5
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c4
-rw-r--r--drivers/pinctrl/Kconfig14
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c849
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c14
-rw-r--r--drivers/pinctrl/devicetree.c10
-rw-r--r--drivers/pinctrl/intel/Kconfig2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c11
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c8
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-tangier.c5
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c313
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.h23
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c37
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.h7
-rw-r--r--drivers/pinctrl/meson/Kconfig11
-rw-r--r--drivers/pinctrl/meson/Makefile1
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c1053
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c120
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35d1.c1
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c44
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c42
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c18
-rw-r--r--drivers/pinctrl/pinconf-generic.c130
-rw-r--r--drivers/pinctrl/pinconf.h4
-rw-r--r--drivers/pinctrl/pinctrl-amdisp.c231
-rw-r--r--drivers/pinctrl/pinctrl-amdisp.h95
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c195
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c262
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c23
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c6
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c160
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h1
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c8
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm14
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c12
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c8
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c58
-rw-r--r--drivers/pinctrl/qcom/tlmm-test.c663
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c5
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c22
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c405
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h41
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c5
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h6
-rw-r--r--drivers/pinctrl/sophgo/Kconfig46
-rw-r--r--drivers/pinctrl/sophgo/Makefile8
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv1800b.c27
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv1812h.c27
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv18xx.c602
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv18xx.h66
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2000.c27
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2002.c27
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2042-ops.c296
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2042.c655
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2042.h49
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2044.c718
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sophgo-common.c451
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sophgo.h136
-rw-r--r--drivers/pinctrl/spacemit/Kconfig3
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c8
-rw-r--r--drivers/pinctrl/sunxi/Kconfig10
-rw-r--r--drivers/pinctrl/sunxi/Makefile3
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun55i-a523-r.c54
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun55i-a523.c54
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c7
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c374
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c54
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h47
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c73
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h34
-rw-r--r--drivers/platform/arm64/Kconfig21
-rw-r--r--drivers/platform/arm64/Makefile1
-rw-r--r--drivers/platform/arm64/huawei-gaokun-ec.c825
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c22
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c71
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c28
-rw-r--r--drivers/platform/chrome/cros_ec_typec.h1
-rw-r--r--drivers/platform/mellanox/Kconfig13
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlx-platform.c (renamed from drivers/platform/x86/mlx-platform.c)17
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c20
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.h5
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c5
-rw-r--r--drivers/platform/x86/Kconfig41
-rw-r--r--drivers/platform/x86/Makefile7
-rw-r--r--drivers/platform/x86/amd/Makefile2
-rw-r--r--drivers/platform/x86/amd/hsmp/Kconfig2
-rw-r--r--drivers/platform/x86/amd/hsmp/Makefile6
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c7
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c1
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.h3
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c36
-rw-r--r--drivers/platform/x86/amd/pmc/Makefile6
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c113
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h82
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile8
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c2
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c2
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c36
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c2
-rw-r--r--drivers/platform/x86/compal-laptop.c1
-rw-r--r--drivers/platform/x86/dell/Kconfig30
-rw-r--r--drivers/platform/x86/dell/Makefile45
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-base.c491
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-legacy.c95
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c768
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c1249
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.h117
-rw-r--r--drivers/platform/x86/dell/dell-uart-backlight.c2
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c84
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/Makefile2
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/Makefile2
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c15
-rw-r--r--drivers/platform/x86/ideapad-laptop.c23
-rw-r--r--drivers/platform/x86/intel/ifs/Makefile2
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c48
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile2
-rw-r--r--drivers/platform/x86/intel/pmc/adl.c56
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c137
-rw-r--r--drivers/platform/x86/intel/pmc/cnp.c29
-rw-r--r--drivers/platform/x86/intel/pmc/core.c115
-rw-r--r--drivers/platform/x86/intel/pmc/core.h199
-rw-r--r--drivers/platform/x86/intel/pmc/icl.c24
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c67
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c109
-rw-r--r--drivers/platform/x86/intel/pmc/ptl.c550
-rw-r--r--drivers/platform/x86/intel/pmc/spt.c45
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c59
-rw-r--r--drivers/platform/x86/lenovo-wmi-hotkey-utilities.c212
-rw-r--r--drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c2
-rw-r--r--drivers/platform/x86/samsung-galaxybook.c1425
-rw-r--r--drivers/platform/x86/think-lmi.c51
-rw-r--r--drivers/platform/x86/think-lmi.h2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c181
-rw-r--r--drivers/platform/x86/wmi.c143
-rw-r--r--drivers/platform/x86/x86-android-tablets/Kconfig1
-rw-r--r--drivers/pmdomain/Kconfig1
-rw-r--r--drivers/pmdomain/Makefile1
-rw-r--r--drivers/pmdomain/amlogic/meson-secure-pwrc.c2
-rw-r--r--drivers/pmdomain/arm/scmi_pm_domain.c11
-rw-r--r--drivers/pmdomain/bcm/bcm2835-power.c1
-rw-r--r--drivers/pmdomain/core.c35
-rw-r--r--drivers/pmdomain/imx/gpcv2.c2
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c2
-rw-r--r--drivers/pmdomain/rockchip/Kconfig2
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c205
-rw-r--r--drivers/pmdomain/sunxi/sun20i-ppu.c15
-rw-r--r--drivers/pmdomain/thead/Kconfig12
-rw-r--r--drivers/pmdomain/thead/Makefile2
-rw-r--r--drivers/pmdomain/thead/th1520-pm-domains.c218
-rw-r--r--drivers/pmdomain/ti/omap_prm.c2
-rw-r--r--drivers/pnp/base.h4
-rw-r--r--drivers/pnp/card.c32
-rw-r--r--drivers/pnp/core.c16
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c1
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c8
-rw-r--r--drivers/power/supply/Kconfig12
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/ab8500_chargalg.c9
-rw-r--r--drivers/power/supply/ab8500_charger.c4
-rw-r--r--drivers/power/supply/acer_a500_battery.c3
-rw-r--r--drivers/power/supply/act8945a_charger.c2
-rw-r--r--drivers/power/supply/axp20x_ac_power.c2
-rw-r--r--drivers/power/supply/axp20x_battery.c23
-rw-r--r--drivers/power/supply/axp20x_usb_power.c4
-rw-r--r--drivers/power/supply/bd99954-charger.c4
-rw-r--r--drivers/power/supply/bq2415x_charger.c2
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq24257_charger.c2
-rw-r--r--drivers/power/supply/bq24735-charger.c2
-rw-r--r--drivers/power/supply/bq2515x_charger.c6
-rw-r--r--drivers/power/supply/bq256xx_charger.c2
-rw-r--r--drivers/power/supply/bq25890_charger.c2
-rw-r--r--drivers/power/supply/bq25980_charger.c8
-rw-r--r--drivers/power/supply/bq27xxx_battery.c40
-rw-r--r--drivers/power/supply/cpcap-battery.c2
-rw-r--r--drivers/power/supply/cpcap-charger.c2
-rw-r--r--drivers/power/supply/ds2760_battery.c52
-rw-r--r--drivers/power/supply/generic-adc-battery.c2
-rw-r--r--drivers/power/supply/gpio-charger.c2
-rw-r--r--drivers/power/supply/ingenic-battery.c2
-rw-r--r--drivers/power/supply/ip5xxx_power.c2
-rw-r--r--drivers/power/supply/lego_ev3_battery.c3
-rw-r--r--drivers/power/supply/lt3651-charger.c2
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c4
-rw-r--r--drivers/power/supply/max17042_battery.c2
-rw-r--r--drivers/power/supply/max1720x_battery.c51
-rw-r--r--drivers/power/supply/max77650-charger.c2
-rw-r--r--drivers/power/supply/max77693_charger.c2
-rw-r--r--drivers/power/supply/max77705_charger.c581
-rw-r--r--drivers/power/supply/max8903_charger.c2
-rw-r--r--drivers/power/supply/mm8013.c2
-rw-r--r--drivers/power/supply/mt6360_charger.c2
-rw-r--r--drivers/power/supply/mt6370-charger.c3
-rw-r--r--drivers/power/supply/olpc_battery.c4
-rw-r--r--drivers/power/supply/pcf50633-charger.c466
-rw-r--r--drivers/power/supply/pm8916_bms_vm.c2
-rw-r--r--drivers/power/supply/pm8916_lbc.c2
-rw-r--r--drivers/power/supply/power_supply_core.c30
-rw-r--r--drivers/power/supply/qcom_battmgr.c5
-rw-r--r--drivers/power/supply/qcom_pmi8998_charger.c2
-rw-r--r--drivers/power/supply/qcom_smbb.c2
-rw-r--r--drivers/power/supply/rk817_charger.c2
-rw-r--r--drivers/power/supply/rt5033_battery.c2
-rw-r--r--drivers/power/supply/rt5033_charger.c3
-rw-r--r--drivers/power/supply/rt9455_charger.c4
-rw-r--r--drivers/power/supply/rt9467-charger.c2
-rw-r--r--drivers/power/supply/rt9471.c2
-rw-r--r--drivers/power/supply/sbs-battery.c2
-rw-r--r--drivers/power/supply/sbs-charger.c2
-rw-r--r--drivers/power/supply/sbs-manager.c2
-rw-r--r--drivers/power/supply/sc2731_charger.c2
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c8
-rw-r--r--drivers/power/supply/smb347-charger.c4
-rw-r--r--drivers/power/supply/tps65090-charger.c2
-rw-r--r--drivers/power/supply/tps65217_charger.c2
-rw-r--r--drivers/power/supply/ucs1002_power.c2
-rw-r--r--drivers/powercap/Kconfig2
-rw-r--r--drivers/powercap/idle_inject.c3
-rw-r--r--drivers/powercap/intel_rapl_common.c5
-rw-r--r--drivers/pps/generators/pps_gen_parport.c3
-rw-r--r--drivers/ptp/ptp_chardev.c16
-rw-r--r--drivers/ptp/ptp_ocp.c7
-rw-r--r--drivers/pwm/Kconfig14
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c19
-rw-r--r--drivers/pwm/pwm-clps711x.c4
-rw-r--r--drivers/pwm/pwm-gpio.c5
-rw-r--r--drivers/pwm/pwm-lpss.c5
-rw-r--r--drivers/pwm/pwm-lpss.h1
-rw-r--r--drivers/pwm/pwm-pca9685.c9
-rw-r--r--drivers/pwm/pwm-sophgo-sg2042.c194
-rw-r--r--drivers/pwm/pwm-stmpe.c25
-rw-r--r--drivers/regulator/Kconfig18
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/ad5398.c30
-rw-r--r--drivers/regulator/axp20x-regulator.c14
-rw-r--r--drivers/regulator/core.c88
-rw-r--r--drivers/regulator/cros-ec-regulator.c4
-rw-r--r--drivers/regulator/devres.c22
-rw-r--r--drivers/regulator/dummy.c2
-rw-r--r--drivers/regulator/of_regulator.c21
-rw-r--r--drivers/regulator/pca9450-regulator.c91
-rw-r--r--drivers/regulator/pcf50633-regulator.c124
-rw-r--r--drivers/regulator/pf9453-regulator.c879
-rw-r--r--drivers/regulator/rtq2208-regulator.c216
-rw-r--r--drivers/regulator/rtq6752-regulator.c2
-rw-r--r--drivers/regulator/s2mps11.c92
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c26
-rw-r--r--drivers/remoteproc/imx_rproc.h2
-rw-r--r--drivers/remoteproc/omap_remoteproc.c1
-rw-r--r--drivers/remoteproc/pru_rproc.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c184
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c38
-rw-r--r--drivers/remoteproc/qcom_sysmon.c2
-rw-r--r--drivers/remoteproc/qcom_wcnss.c33
-rw-r--r--drivers/remoteproc/remoteproc_core.c1
-rw-r--r--drivers/reset/Kconfig7
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-imx-scu.c101
-rw-r--r--drivers/reset/reset-imx8mp-audiomix.c78
-rw-r--r--drivers/reset/reset-microchip-sparx5.c19
-rw-r--r--drivers/rtc/class.c3
-rw-r--r--drivers/s390/block/dasd.c3
-rw-r--r--drivers/s390/block/dasd_devmap.c3
-rw-r--r--drivers/s390/block/dasd_diag.c5
-rw-r--r--drivers/s390/block/dasd_eckd.c3
-rw-r--r--drivers/s390/char/con3215.c3
-rw-r--r--drivers/s390/char/con3270.c3
-rw-r--r--drivers/s390/char/diag_ftp.c2
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c6
-rw-r--r--drivers/s390/char/monreader.c3
-rw-r--r--drivers/s390/char/monwriter.c3
-rw-r--r--drivers/s390/char/raw3270.c3
-rw-r--r--drivers/s390/char/sclp.h9
-rw-r--r--drivers/s390/char/sclp_cmd.c3
-rw-r--r--drivers/s390/char/sclp_con.c17
-rw-r--r--drivers/s390/char/sclp_early.c6
-rw-r--r--drivers/s390/char/sclp_early_core.c13
-rw-r--r--drivers/s390/char/sclp_tty.c16
-rw-r--r--drivers/s390/char/vmcp.c5
-rw-r--r--drivers/s390/char/vmlogrdr.c3
-rw-r--r--drivers/s390/char/vmur.c3
-rw-r--r--drivers/s390/cio/crw.c5
-rw-r--r--drivers/s390/cio/device_id.c3
-rw-r--r--drivers/s390/cio/ioasm.c8
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h2
-rw-r--r--drivers/s390/crypto/ap_bus.c6
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c68
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h4
-rw-r--r--drivers/s390/net/Kconfig11
-rw-r--r--drivers/s390/net/Makefile1
-rw-r--r--drivers/s390/net/ism_drv.c1
-rw-r--r--drivers/s390/net/lcs.c2385
-rw-r--r--drivers/s390/net/lcs.h342
-rw-r--r--drivers/s390/net/qeth_l2_main.c3
-rw-r--r--drivers/s390/net/smsgiucv.c3
-rw-r--r--drivers/s390/net/smsgiucv_app.c3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aachba.c4
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/cxlflash/Kconfig15
-rw-r--r--drivers/scsi/cxlflash/Makefile5
-rw-r--r--drivers/scsi/cxlflash/backend.h48
-rw-r--r--drivers/scsi/cxlflash/common.h340
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c177
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c278
-rw-r--r--drivers/scsi/cxlflash/main.c3970
-rw-r--r--drivers/scsi/cxlflash/main.h129
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c1399
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.h72
-rw-r--r--drivers/scsi/cxlflash/sislite.h560
-rw-r--r--drivers/scsi/cxlflash/superpipe.c2218
-rw-r--r--drivers/scsi/cxlflash/superpipe.h150
-rw-r--r--drivers/scsi/cxlflash/vlun.c1336
-rw-r--r--drivers/scsi/cxlflash/vlun.h82
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c2
-rw-r--r--drivers/scsi/fnic/fdls_disc.c57
-rw-r--r--drivers/scsi/fnic/fnic_main.c5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c28
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c4
-rw-r--r--drivers/scsi/hpsa.c19
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c4
-rw-r--r--drivers/scsi/ips.c8
-rw-r--r--drivers/scsi/isci/init.c14
-rw-r--r--drivers/scsi/isci/isci.h7
-rw-r--r--drivers/scsi/isci/remote_device.h2
-rw-r--r--drivers/scsi/iscsi_tcp.c60
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/libiscsi_tcp.c91
-rw-r--r--drivers/scsi/lpfc/lpfc.h26
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/megaraid.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c10
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h4
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h8
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h11
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h21
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_tool.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h20
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h34
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c129
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c159
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c101
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h9
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h7
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h54
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c23
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h10
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c79
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c279
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h49
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/mvsas/mv_sas.c10
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c4
-rw-r--r--drivers/scsi/scsi.c28
-rw-r--r--drivers/scsi/scsi_debug.c928
-rw-r--r--drivers/scsi/scsi_error.c19
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_scan.c5
-rw-r--r--drivers/scsi/scsi_sysctl.c4
-rw-r--r--drivers/scsi/st.c80
-rw-r--r--drivers/scsi/st.h6
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/sh/clk/cpg.c25
-rw-r--r--drivers/soc/apple/rtkit-internal.h1
-rw-r--r--drivers/soc/apple/rtkit.c112
-rw-r--r--drivers/soc/atmel/soc.c5
-rw-r--r--drivers/soc/atmel/soc.h3
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c4
-rw-r--r--drivers/soc/imx/soc-imx8m.c26
-rw-r--r--drivers/soc/mediatek/mt8167-mmsys.h31
-rw-r--r--drivers/soc/mediatek/mt8173-mmsys.h99
-rw-r--r--drivers/soc/mediatek/mt8183-mmsys.h50
-rw-r--r--drivers/soc/mediatek/mt8186-mmsys.h88
-rw-r--r--drivers/soc/mediatek/mt8188-mmsys.h266
-rw-r--r--drivers/soc/mediatek/mt8192-mmsys.h71
-rw-r--r--drivers/soc/mediatek/mt8195-mmsys.h632
-rw-r--r--drivers/soc/mediatek/mt8365-mmsys.h84
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h14
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c6
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c22
-rw-r--r--drivers/soc/qcom/ice.c51
-rw-r--r--drivers/soc/qcom/pdr_interface.c8
-rw-r--r--drivers/soc/qcom/pdr_internal.h1
-rw-r--r--drivers/soc/qcom/pmic_glink.c2
-rw-r--r--drivers/soc/qcom/qcom_aoss.c3
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c12
-rw-r--r--drivers/soc/qcom/qcom_pdr_msg.c3
-rw-r--r--drivers/soc/renesas/Kconfig18
-rw-r--r--drivers/soc/renesas/Makefile4
-rw-r--r--drivers/soc/renesas/r9a08g045-sysc.c23
-rw-r--r--drivers/soc/renesas/r9a09g047-sys.c67
-rw-r--r--drivers/soc/renesas/r9a09g057-sys.c67
-rw-r--r--drivers/soc/renesas/renesas-soc.c33
-rw-r--r--drivers/soc/renesas/rz-sysc.c137
-rw-r--r--drivers/soc/renesas/rz-sysc.h46
-rw-r--r--drivers/soc/samsung/exynos-asv.c1
-rw-r--r--drivers/soc/samsung/exynos-chipid.c5
-rw-r--r--drivers/soc/samsung/exynos-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos-usi.c108
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c1
-rw-r--r--drivers/soc/tegra/pmc.c3
-rw-r--r--drivers/soc/ti/k3-socinfo.c13
-rw-r--r--drivers/soundwire/qcom.c26
-rw-r--r--drivers/spi/Kconfig44
-rw-r--r--drivers/spi/Makefile7
-rw-r--r--drivers/spi/atmel-quadspi.c5
-rw-r--r--drivers/spi/spi-aspeed-smc.c7
-rw-r--r--drivers/spi/spi-axi-spi-engine.c315
-rw-r--r--drivers/spi/spi-cadence-quadspi.c8
-rw-r--r--drivers/spi/spi-fsi.c13
-rw-r--r--drivers/spi/spi-fsl-lpspi.c2
-rw-r--r--drivers/spi/spi-gpio.c45
-rw-r--r--drivers/spi/spi-imx.c2
-rw-r--r--drivers/spi/spi-mem.c11
-rw-r--r--drivers/spi/spi-microchip-core.c41
-rw-r--r--drivers/spi/spi-mt65xx.c17
-rw-r--r--drivers/spi/spi-mtk-snfi.c3
-rw-r--r--drivers/spi/spi-mux.c4
-rw-r--r--drivers/spi/spi-npcm-fiu.c5
-rw-r--r--drivers/spi/spi-offload-trigger-pwm.c169
-rw-r--r--drivers/spi/spi-offload.c468
-rw-r--r--drivers/spi/spi-qpic-snand.c1633
-rw-r--r--drivers/spi/spi-realtek-rtl-snand.c1
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sg2044-nor.c488
-rw-r--r--drivers/spi/spi-stm32-ospi.c1063
-rw-r--r--drivers/spi/spi-stm32-qspi.c5
-rw-r--r--drivers/spi/spi-zynq-qspi.c4
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c173
-rw-r--r--drivers/spi/spi.c117
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/staging/fbtft/fbtft-core.c4
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c9
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c54
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c6
-rw-r--r--drivers/target/loopback/tcm_loop.c5
-rw-r--r--drivers/target/target_core_configfs.c6
-rw-r--r--drivers/target/target_core_device.c8
-rw-r--r--drivers/target/target_core_iblock.c12
-rw-r--r--drivers/target/target_core_pr.c6
-rw-r--r--drivers/target/target_core_spc.c36
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3402_thermal.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c6
-rw-r--r--drivers/thermal/intel/intel_tcc.c2
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c4
-rw-r--r--drivers/thermal/thermal_core.c20
-rw-r--r--drivers/thermal/thermal_debugfs.c4
-rw-r--r--drivers/thermal/thermal_of.c2
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/eeprom.c2
-rw-r--r--drivers/thunderbolt/tunnel.c11
-rw-r--r--drivers/thunderbolt/tunnel.h2
-rw-r--r--drivers/tty/Kconfig19
-rw-r--r--drivers/tty/hvc/hvc_iucv.c7
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c3
-rw-r--r--drivers/tty/serial/8250/8250_port.c10
-rw-r--r--drivers/tty/serial/amba-pl011.c9
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/tty/serial/serial_core.c6
-rw-r--r--drivers/tty/serial/sh-sci.c3
-rw-r--r--drivers/tty/serial/xilinx_uartps.c8
-rw-r--r--drivers/ufs/core/ufs-sysfs.c10
-rw-r--r--drivers/ufs/core/ufs_trace.h135
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c7
-rw-r--r--drivers/ufs/core/ufshcd-priv.h21
-rw-r--r--drivers/ufs/core/ufshcd.c148
-rw-r--r--drivers/ufs/host/Kconfig12
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ufs-exynos.c13
-rw-r--r--drivers/ufs/host/ufs-exynos.h2
-rw-r--r--drivers/ufs/host/ufs-hisi.c6
-rw-r--r--drivers/ufs/host/ufs-mediatek.c11
-rw-r--r--drivers/ufs/host/ufs-qcom.c131
-rw-r--r--drivers/ufs/host/ufs-qcom.h39
-rw-r--r--drivers/ufs/host/ufs-renesas.c723
-rw-r--r--drivers/ufs/host/ufs-rockchip.c354
-rw-r--r--drivers/ufs/host/ufs-rockchip.h90
-rw-r--r--drivers/ufs/host/ufs-sprd.c6
-rw-r--r--drivers/ufs/host/ufshcd-pci.c2
-rw-r--r--drivers/usb/chipidea/otg_fsm.c3
-rw-r--r--drivers/usb/dwc2/hcd_queue.c3
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.c3
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c4
-rw-r--r--drivers/usb/gadget/function/f_ncm.c3
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c6
-rw-r--r--drivers/usb/host/ehci-hcd.c3
-rw-r--r--drivers/usb/musb/musb_cppi41.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c14
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h13
-rw-r--r--drivers/usb/serial/option.c48
-rw-r--r--drivers/usb/storage/debug.c4
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c24
-rw-r--r--drivers/vfio/group.c16
-rw-r--r--drivers/vfio/pci/Kconfig4
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c2
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/video/backlight/88pm860x_bl.c1
-rw-r--r--drivers/video/backlight/Kconfig20
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/adp5520_bl.c1
-rw-r--r--drivers/video/backlight/adp8860_bl.c1
-rw-r--r--drivers/video/backlight/adp8870_bl.c1
-rw-r--r--drivers/video/backlight/apple_dwi_bl.c123
-rw-r--r--drivers/video/backlight/as3711_bl.c1
-rw-r--r--drivers/video/backlight/bd6107.c1
-rw-r--r--drivers/video/backlight/da903x_bl.c1
-rw-r--r--drivers/video/backlight/da9052_bl.c1
-rw-r--r--drivers/video/backlight/ep93xx_bl.c1
-rw-r--r--drivers/video/backlight/hp680_bl.c1
-rw-r--r--drivers/video/backlight/led_bl.c5
-rw-r--r--drivers/video/backlight/locomolcd.c1
-rw-r--r--drivers/video/backlight/lv5207lp.c1
-rw-r--r--drivers/video/backlight/max8925_bl.c1
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c154
-rw-r--r--drivers/video/backlight/tps65217_bl.c1
-rw-r--r--drivers/video/backlight/vgg2432a4.c1
-rw-r--r--drivers/video/backlight/wm831x_bl.c1
-rw-r--r--drivers/video/console/Kconfig9
-rw-r--r--drivers/video/fbdev/aty/mach64_cursor.c7
-rw-r--r--drivers/video/fbdev/au1100fb.c4
-rw-r--r--drivers/video/fbdev/core/Kconfig10
-rw-r--r--drivers/video/fbdev/core/bitblit.c5
-rw-r--r--drivers/video/fbdev/core/cfbcopyarea.c428
-rw-r--r--drivers/video/fbdev/core/cfbfillrect.c362
-rw-r--r--drivers/video/fbdev/core/cfbimgblt.c357
-rw-r--r--drivers/video/fbdev/core/cfbmem.h43
-rw-r--r--drivers/video/fbdev/core/fb_copyarea.h405
-rw-r--r--drivers/video/fbdev/core/fb_draw.h274
-rw-r--r--drivers/video/fbdev/core/fb_fillrect.h280
-rw-r--r--drivers/video/fbdev/core/fb_imageblit.h495
-rw-r--r--drivers/video/fbdev/core/fbcon.c79
-rw-r--r--drivers/video/fbdev/core/fbcon.h38
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c5
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c5
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c5
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c69
-rw-r--r--drivers/video/fbdev/core/syscopyarea.c369
-rw-r--r--drivers/video/fbdev/core/sysfillrect.c324
-rw-r--r--drivers/video/fbdev/core/sysimgblt.c333
-rw-r--r--drivers/video/fbdev/core/sysmem.h39
-rw-r--r--drivers/video/fbdev/core/tileblit.c45
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c1
-rw-r--r--drivers/video/fbdev/hyperv_fb.c52
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c15
-rw-r--r--drivers/video/fbdev/pxafb.c23
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c29
-rw-r--r--drivers/video/fbdev/sm501fb.c7
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c30
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c1
-rw-r--r--drivers/virtio/virtio_mem.c4
-rw-r--r--drivers/watchdog/diag288_wdt.c7
-rw-r--r--drivers/watchdog/softdog.c8
-rw-r--r--drivers/watchdog/watchdog_dev.c4
-rw-r--r--drivers/watchdog/watchdog_hrtimer_pretimeout.c4
-rw-r--r--drivers/xen/pci.c32
-rw-r--r--drivers/xen/platform-pci.c4
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c20
-rw-r--r--drivers/xen/xen-pciback/pciback.h2
-rw-r--r--drivers/xen/xenfs/xensyms.c4
4443 files changed, 351145 insertions, 112773 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 7bdad836fc62..7c556c5ac4fd 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -21,6 +21,8 @@ source "drivers/connector/Kconfig"
source "drivers/firmware/Kconfig"
+source "drivers/fwctl/Kconfig"
+
source "drivers/gnss/Kconfig"
source "drivers/mtd/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 45d1c3e630f7..b5749cf67044 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -135,6 +135,7 @@ obj-y += ufs/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-y += firmware/
+obj-$(CONFIG_FWCTL) += fwctl/
obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
obj-y += clocksource/
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index 5f43db02b240..00d215ac866e 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -34,6 +34,8 @@ static void aie2_job_release(struct kref *ref)
job = container_of(ref, struct amdxdna_sched_job, refcnt);
amdxdna_sched_job_cleanup(job);
+ atomic64_inc(&job->hwctx->job_free_cnt);
+ wake_up(&job->hwctx->priv->job_free_wq);
if (job->out_fence)
dma_fence_put(job->out_fence);
kfree(job);
@@ -134,7 +136,8 @@ static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
if (!fence)
return;
- dma_fence_wait(fence, false);
+ /* Wait up to 2 seconds for fw to finish all pending requests */
+ dma_fence_wait_timeout(fence, false, msecs_to_jiffies(2000));
dma_fence_put(fence);
}
@@ -185,7 +188,7 @@ aie2_sched_notify(struct amdxdna_sched_job *job)
}
static int
-aie2_sched_resp_handler(void *handle, const u32 *data, size_t size)
+aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
struct amdxdna_gem_obj *cmd_abo;
@@ -203,7 +206,7 @@ aie2_sched_resp_handler(void *handle, const u32 *data, size_t size)
goto out;
}
- status = *data;
+ status = readl(data);
XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
if (status == AIE2_STATUS_SUCCESS)
amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
@@ -216,7 +219,7 @@ out:
}
static int
-aie2_sched_nocmd_resp_handler(void *handle, const u32 *data, size_t size)
+aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
u32 ret = 0;
@@ -230,7 +233,7 @@ aie2_sched_nocmd_resp_handler(void *handle, const u32 *data, size_t size)
goto out;
}
- status = *data;
+ status = readl(data);
XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
out:
@@ -239,14 +242,14 @@ out:
}
static int
-aie2_sched_cmdlist_resp_handler(void *handle, const u32 *data, size_t size)
+aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
struct amdxdna_gem_obj *cmd_abo;
- struct cmd_chain_resp *resp;
struct amdxdna_dev *xdna;
u32 fail_cmd_status;
u32 fail_cmd_idx;
+ u32 cmd_status;
u32 ret = 0;
cmd_abo = job->cmd_bo;
@@ -256,17 +259,17 @@ aie2_sched_cmdlist_resp_handler(void *handle, const u32 *data, size_t size)
goto out;
}
- resp = (struct cmd_chain_resp *)data;
+ cmd_status = readl(data + offsetof(struct cmd_chain_resp, status));
xdna = job->hwctx->client->xdna;
- XDNA_DBG(xdna, "Status 0x%x", resp->status);
- if (resp->status == AIE2_STATUS_SUCCESS) {
+ XDNA_DBG(xdna, "Status 0x%x", cmd_status);
+ if (cmd_status == AIE2_STATUS_SUCCESS) {
amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
goto out;
}
/* Slow path to handle error, read from ringbuf on BAR */
- fail_cmd_idx = resp->fail_cmd_idx;
- fail_cmd_status = resp->fail_cmd_status;
+ fail_cmd_idx = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_idx));
+ fail_cmd_status = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_status));
XDNA_DBG(xdna, "Failed cmd idx %d, status 0x%x",
fail_cmd_idx, fail_cmd_status);
@@ -361,7 +364,7 @@ aie2_sched_job_timedout(struct drm_sched_job *sched_job)
return DRM_GPU_SCHED_STAT_NOMINAL;
}
-const struct drm_sched_backend_ops sched_ops = {
+static const struct drm_sched_backend_ops sched_ops = {
.run_job = aie2_sched_job_run,
.free_job = aie2_sched_job_free,
.timedout_job = aie2_sched_job_timedout,
@@ -516,6 +519,14 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
{
struct amdxdna_client *client = hwctx->client;
struct amdxdna_dev *xdna = client->xdna;
+ const struct drm_sched_init_args args = {
+ .ops = &sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = HWCTX_MAX_CMDS,
+ .timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
+ .name = hwctx->name,
+ .dev = xdna->ddev.dev,
+ };
struct drm_gpu_scheduler *sched;
struct amdxdna_hwctx_priv *priv;
struct amdxdna_gem_obj *heap;
@@ -573,9 +584,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
might_lock(&priv->io_lock);
fs_reclaim_release(GFP_KERNEL);
- ret = drm_sched_init(sched, &sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT,
- HWCTX_MAX_CMDS, 0, msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
- NULL, NULL, hwctx->name, xdna->ddev.dev);
+ ret = drm_sched_init(sched, &args);
if (ret) {
XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
goto free_cmd_bufs;
@@ -616,6 +625,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
hwctx->status = HWCTX_STAT_INIT;
ndev = xdna->dev_handle;
ndev->hwctx_num++;
+ init_waitqueue_head(&priv->job_free_wq);
XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
@@ -652,25 +662,23 @@ void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
xdna = hwctx->client->xdna;
ndev = xdna->dev_handle;
ndev->hwctx_num--;
- drm_sched_wqueue_stop(&hwctx->priv->sched);
- /* Now, scheduler will not send command to device. */
+ XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
+ drm_sched_entity_destroy(&hwctx->priv->entity);
+
+ aie2_hwctx_wait_for_idle(hwctx);
+
+ /* Request fw to destroy hwctx and cancel the rest pending requests */
aie2_release_resource(hwctx);
- /*
- * All submitted commands are aborted.
- * Restart scheduler queues to cleanup jobs. The amdxdna_sched_job_run()
- * will return NODEV if it is called.
- */
- drm_sched_wqueue_start(&hwctx->priv->sched);
+ /* Wait for all submitted jobs to be completed or canceled */
+ wait_event(hwctx->priv->job_free_wq,
+ atomic64_read(&hwctx->job_submit_cnt) ==
+ atomic64_read(&hwctx->job_free_cnt));
- aie2_hwctx_wait_for_idle(hwctx);
- drm_sched_entity_destroy(&hwctx->priv->entity);
drm_sched_fini(&hwctx->priv->sched);
aie2_ctx_syncobj_destroy(hwctx);
- XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
-
for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++)
drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx]));
amdxdna_gem_unpin(hwctx->priv->heap);
@@ -879,6 +887,7 @@ retry:
drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
aie2_job_put(job);
+ atomic64_inc(&hwctx->job_submit_cnt);
return 0;
diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c
index b1defaa8513b..5ee905632a39 100644
--- a/drivers/accel/amdxdna/aie2_error.c
+++ b/drivers/accel/amdxdna/aie2_error.c
@@ -209,16 +209,14 @@ static u32 aie2_error_backtrack(struct amdxdna_dev_hdl *ndev, void *err_info, u3
return err_col;
}
-static int aie2_error_async_cb(void *handle, const u32 *data, size_t size)
+static int aie2_error_async_cb(void *handle, void __iomem *data, size_t size)
{
- struct async_event_msg_resp *resp;
struct async_event *e = handle;
if (data) {
- resp = (struct async_event_msg_resp *)data;
- e->resp.type = resp->type;
+ e->resp.type = readl(data + offsetof(struct async_event_msg_resp, type));
wmb(); /* Update status in the end, so that no lock for here */
- e->resp.status = resp->status;
+ e->resp.status = readl(data + offsetof(struct async_event_msg_resp, status));
}
queue_work(e->wq, &e->work);
return 0;
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
index 9e2c9a44f76a..bf4219e32cc1 100644
--- a/drivers/accel/amdxdna/aie2_message.c
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -356,7 +356,7 @@ fail:
}
int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
- void *handle, int (*cb)(void*, const u32 *, size_t))
+ void *handle, int (*cb)(void*, void __iomem *, size_t))
{
struct async_event_msg_req req = { 0 };
struct xdna_mailbox_msg msg = {
@@ -435,7 +435,7 @@ int aie2_config_cu(struct amdxdna_hwctx *hwctx)
}
int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t))
+ int (*notify_cb)(void *, void __iomem *, size_t))
{
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
struct amdxdna_dev *xdna = hwctx->client->xdna;
@@ -640,7 +640,7 @@ aie2_cmd_op_to_msg_op(u32 op)
int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t))
+ int (*notify_cb)(void *, void __iomem *, size_t))
{
struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
@@ -705,7 +705,7 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t))
+ int (*notify_cb)(void *, void __iomem *, size_t))
{
struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
@@ -740,7 +740,7 @@ int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
}
int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t))
+ int (*notify_cb)(void *, void __iomem *, size_t))
{
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]);
diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
index f2d95531ddc2..385914840eaa 100644
--- a/drivers/accel/amdxdna/aie2_pci.h
+++ b/drivers/accel/amdxdna/aie2_pci.h
@@ -271,18 +271,18 @@ int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwc
int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size);
int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled);
int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
- void *handle, int (*cb)(void*, const u32 *, size_t));
+ void *handle, int (*cb)(void*, void __iomem *, size_t));
int aie2_config_cu(struct amdxdna_hwctx *hwctx);
int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t));
+ int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t));
+ int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t));
+ int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, const u32 *, size_t));
+ int (*notify_cb)(void *, void __iomem *, size_t));
/* aie2_hwctx.c */
int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c
index 73388443c676..d303701b0ded 100644
--- a/drivers/accel/amdxdna/aie2_smu.c
+++ b/drivers/accel/amdxdna/aie2_smu.c
@@ -64,6 +64,7 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
if (ret) {
XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n",
ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret);
+ return ret;
}
ndev->npuclk_freq = freq;
@@ -72,6 +73,7 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
if (ret) {
XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n",
ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret);
+ return ret;
}
ndev->hclk_freq = freq;
ndev->dpm_level = dpm_level;
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
index d11b1c83d9c3..43442b9e273b 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.c
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -220,6 +220,8 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
args->syncobj_handle = hwctx->syncobj_hdl;
mutex_unlock(&xdna->dev_lock);
+ atomic64_set(&hwctx->job_submit_cnt, 0);
+ atomic64_set(&hwctx->job_free_cnt, 0);
XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
drm_dev_exit(idx);
return 0;
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
index 80b0304193ec..f0a4a8586d85 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.h
+++ b/drivers/accel/amdxdna/amdxdna_ctx.h
@@ -87,6 +87,9 @@ struct amdxdna_hwctx {
struct amdxdna_qos_info qos;
struct amdxdna_hwctx_param_config_cu *cus;
u32 syncobj_hdl;
+
+ atomic64_t job_submit_cnt;
+ atomic64_t job_free_cnt ____cacheline_aligned_in_smp;
};
#define drm_job_to_xdna_job(j) \
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c
index e5301fac1397..da1ac89bb78f 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox.c
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.c
@@ -91,7 +91,7 @@ struct mailbox_pkg {
struct mailbox_msg {
void *handle;
- int (*notify_cb)(void *handle, const u32 *data, size_t size);
+ int (*notify_cb)(void *handle, void __iomem *data, size_t size);
size_t pkg_size; /* package size in bytes */
struct mailbox_pkg pkg;
};
@@ -244,7 +244,7 @@ no_space:
static int
mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header,
- void *data)
+ void __iomem *data)
{
struct mailbox_msg *mb_msg;
int msg_id;
@@ -332,7 +332,7 @@ static int mailbox_get_msg(struct mailbox_channel *mb_chann)
memcpy_fromio((u32 *)&header + 1, read_addr, rest);
read_addr += rest;
- ret = mailbox_get_resp(mb_chann, &header, (u32 *)read_addr);
+ ret = mailbox_get_resp(mb_chann, &header, read_addr);
mailbox_set_headptr(mb_chann, head + msg_size);
/* After update head, it can equal to ringbuf_size. This is expected. */
@@ -349,8 +349,6 @@ static irqreturn_t mailbox_irq_handler(int irq, void *p)
trace_mbox_irq_handle(MAILBOX_NAME, irq);
/* Schedule a rx_work to call the callback functions */
queue_work(mb_chann->work_q, &mb_chann->rx_work);
- /* Clear IOHUB register */
- mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
return IRQ_HANDLED;
}
@@ -367,6 +365,9 @@ static void mailbox_rx_worker(struct work_struct *rx_work)
return;
}
+again:
+ mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
+
while (1) {
/*
* If return is 0, keep consuming next message, until there is
@@ -380,10 +381,18 @@ static void mailbox_rx_worker(struct work_struct *rx_work)
if (unlikely(ret)) {
MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);
WRITE_ONCE(mb_chann->bad_state, true);
- disable_irq(mb_chann->msix_irq);
- break;
+ return;
}
}
+
+ /*
+ * The hardware will not generate interrupt if firmware creates a new
+ * response right after driver clears interrupt register. Check
+ * the interrupt register to make sure there is not any new response
+ * before exiting.
+ */
+ if (mailbox_reg_read(mb_chann, mb_chann->iohub_int_addr))
+ goto again;
}
int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.h b/drivers/accel/amdxdna/amdxdna_mailbox.h
index 57954c303bdd..ea367f2fb738 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox.h
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.h
@@ -25,7 +25,7 @@ struct mailbox_channel;
struct xdna_mailbox_msg {
u32 opcode;
void *handle;
- int (*notify_cb)(void *handle, const u32 *data, size_t size);
+ int (*notify_cb)(void *handle, void __iomem *data, size_t size);
u8 *send_data;
size_t send_size;
};
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.c b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
index 5139a9c96a91..6d0c24513476 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
@@ -16,7 +16,7 @@
#include "amdxdna_mailbox_helper.h"
#include "amdxdna_pci_drv.h"
-int xdna_msg_cb(void *handle, const u32 *data, size_t size)
+int xdna_msg_cb(void *handle, void __iomem *data, size_t size)
{
struct xdna_notify *cb_arg = handle;
int ret;
@@ -29,9 +29,9 @@ int xdna_msg_cb(void *handle, const u32 *data, size_t size)
goto out;
}
+ memcpy_fromio(cb_arg->data, data, cb_arg->size);
print_hex_dump_debug("resp data: ", DUMP_PREFIX_OFFSET,
- 16, 4, data, cb_arg->size, true);
- memcpy(cb_arg->data, data, cb_arg->size);
+ 16, 4, cb_arg->data, cb_arg->size, true);
out:
ret = cb_arg->error;
complete(&cb_arg->comp);
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
index 23e1317b79fe..710ff8873d61 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
@@ -35,7 +35,7 @@ struct xdna_notify {
.notify_cb = xdna_msg_cb, \
}
-int xdna_msg_cb(void *handle, const u32 *data, size_t size);
+int xdna_msg_cb(void *handle, void __iomem *data, size_t size);
int xdna_send_msg_wait(struct amdxdna_dev *xdna, struct mailbox_channel *chann,
struct xdna_mailbox_msg *msg);
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index 8180b95ed69d..0825851656a2 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -4,6 +4,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
@@ -397,6 +398,88 @@ static int dct_active_set(void *data, u64 active_percent)
DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
+static int priority_bands_show(struct seq_file *s, void *v)
+{
+ struct ivpu_device *vdev = s->private;
+ struct ivpu_hw_info *hw = vdev->hw;
+
+ for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
+ band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
+ switch (band) {
+ case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE:
+ seq_puts(s, "Idle: ");
+ break;
+
+ case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL:
+ seq_puts(s, "Normal: ");
+ break;
+
+ case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS:
+ seq_puts(s, "Focus: ");
+ break;
+
+ case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME:
+ seq_puts(s, "Realtime: ");
+ break;
+ }
+
+ seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n",
+ hw->hws.grace_period[band], hw->hws.process_grace_period[band],
+ hw->hws.process_quantum[band]);
+ }
+
+ return 0;
+}
+
+static int priority_bands_fops_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, priority_bands_show, inode->i_private);
+}
+
+static ssize_t
+priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+ struct seq_file *s = file->private_data;
+ struct ivpu_device *vdev = s->private;
+ char buf[64];
+ u32 grace_period;
+ u32 process_grace_period;
+ u32 process_quantum;
+ u32 band;
+ int ret;
+
+ if (size >= sizeof(buf))
+ return -EINVAL;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, pos, user_buf, size);
+ if (ret < 0)
+ return ret;
+
+ buf[size] = '\0';
+ ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period,
+ &process_quantum);
+ if (ret != 4)
+ return -EINVAL;
+
+ if (band >= VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT)
+ return -EINVAL;
+
+ vdev->hw->hws.grace_period[band] = grace_period;
+ vdev->hw->hws.process_grace_period[band] = process_grace_period;
+ vdev->hw->hws.process_quantum[band] = process_quantum;
+
+ return size;
+}
+
+static const struct file_operations ivpu_hws_priority_bands_fops = {
+ .owner = THIS_MODULE,
+ .open = priority_bands_fops_open,
+ .write = priority_bands_fops_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
void ivpu_debugfs_init(struct ivpu_device *vdev)
{
struct dentry *debugfs_root = vdev->drm.debugfs_root;
@@ -419,6 +502,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
&fw_trace_hw_comp_mask_fops);
debugfs_create_file("fw_trace_level", 0200, debugfs_root, vdev,
&fw_trace_level_fops);
+ debugfs_create_file("hws_priority_bands", 0200, debugfs_root, vdev,
+ &ivpu_hws_priority_bands_fops);
debugfs_create_file("reset_engine", 0200, debugfs_root, vdev,
&ivpu_reset_engine_fops);
@@ -430,4 +515,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
debugfs_root, vdev, &fw_profiling_freq_fops);
debugfs_create_file("dct", 0644, debugfs_root, vdev, &ivpu_dct_fops);
}
+
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_hw", debugfs_root, &ivpu_hw_failure);
+#endif
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 38cf1c342c72..4fa73189502e 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
#include <generated/utsrelease.h>
#include <drm/drm_accel.h>
@@ -36,8 +37,6 @@
#define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
#endif
-static struct lock_class_key submitted_jobs_xa_lock_class_key;
-
int ivpu_dbg_mask;
module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
@@ -128,20 +127,18 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
kref_put(&file_priv->ref, file_priv_release);
}
-static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
+bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability)
{
- switch (args->index) {
+ switch (capability) {
case DRM_IVPU_CAP_METRIC_STREAMER:
- args->value = 1;
- break;
+ return true;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
- args->value = 1;
- break;
+ return true;
+ case DRM_IVPU_CAP_MANAGE_CMDQ:
+ return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW;
default:
- return -EINVAL;
+ return false;
}
-
- return 0;
}
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -201,7 +198,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->hw->sku;
break;
case DRM_IVPU_PARAM_CAPABILITIES:
- ret = ivpu_get_capabilities(vdev, args);
+ args->value = ivpu_is_capable(vdev, args->index);
break;
default:
ret = -EINVAL;
@@ -310,6 +307,9 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0),
};
static int ivpu_wait_for_ready(struct ivpu_device *vdev)
@@ -421,6 +421,9 @@ void ivpu_prepare_for_reset(struct ivpu_device *vdev)
{
ivpu_hw_irq_disable(vdev);
disable_irq(vdev->irq);
+ cancel_work_sync(&vdev->irq_ipc_work);
+ cancel_work_sync(&vdev->irq_dct_work);
+ cancel_work_sync(&vdev->context_abort_work);
ivpu_ipc_disable(vdev);
ivpu_mmu_disable(vdev);
}
@@ -453,7 +456,7 @@ static const struct drm_driver driver = {
.postclose = ivpu_postclose,
.gem_create_object = ivpu_gem_create_object,
- .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
+ .gem_prime_import = ivpu_gem_prime_import,
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
@@ -465,54 +468,6 @@ static const struct drm_driver driver = {
.major = 1,
};
-static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
-{
- struct ivpu_file_priv *file_priv;
- unsigned long ctx_id;
-
- mutex_lock(&vdev->context_list_lock);
-
- xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
- if (!file_priv->has_mmu_faults || file_priv->aborted)
- continue;
-
- mutex_lock(&file_priv->lock);
- ivpu_context_abort_locked(file_priv);
- file_priv->aborted = true;
- mutex_unlock(&file_priv->lock);
- }
-
- mutex_unlock(&vdev->context_list_lock);
-}
-
-static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
-{
- struct ivpu_device *vdev = arg;
- u8 irq_src;
-
- if (kfifo_is_empty(&vdev->hw->irq.fifo))
- return IRQ_NONE;
-
- while (kfifo_get(&vdev->hw->irq.fifo, &irq_src)) {
- switch (irq_src) {
- case IVPU_HW_IRQ_SRC_IPC:
- ivpu_ipc_irq_thread_handler(vdev);
- break;
- case IVPU_HW_IRQ_SRC_MMU_EVTQ:
- ivpu_context_abort_invalid(vdev);
- break;
- case IVPU_HW_IRQ_SRC_DCT:
- ivpu_pm_dct_irq_thread_handler(vdev);
- break;
- default:
- ivpu_err_ratelimited(vdev, "Unknown IRQ source: %u\n", irq_src);
- break;
- }
- }
-
- return IRQ_HANDLED;
-}
-
static int ivpu_irq_init(struct ivpu_device *vdev)
{
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
@@ -524,12 +479,16 @@ static int ivpu_irq_init(struct ivpu_device *vdev)
return ret;
}
+ INIT_WORK(&vdev->irq_ipc_work, ivpu_ipc_irq_work_fn);
+ INIT_WORK(&vdev->irq_dct_work, ivpu_pm_irq_dct_work_fn);
+ INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_work_fn);
+
ivpu_irq_handlers_init(vdev);
vdev->irq = pci_irq_vector(pdev, 0);
- ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
- ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
+ ret = devm_request_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
+ IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
if (ret)
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
@@ -617,7 +576,6 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
- lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);
vdev->db_limit.min = IVPU_MIN_DB;
@@ -627,6 +585,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (ret)
goto err_xa_destroy;
+ ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
+ if (ret)
+ goto err_xa_destroy;
+
ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
if (ret)
goto err_xa_destroy;
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 3fdff3f6cffd..92753effb1c9 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -58,6 +58,7 @@
#define IVPU_PLATFORM_SILICON 0
#define IVPU_PLATFORM_SIMICS 2
#define IVPU_PLATFORM_FPGA 3
+#define IVPU_PLATFORM_HSLE 4
#define IVPU_PLATFORM_INVALID 8
#define IVPU_SCHED_MODE_AUTO -1
@@ -110,6 +111,7 @@ struct ivpu_wa_table {
bool disable_clock_relinquish;
bool disable_d0i3_msg;
bool wp0_during_power_up;
+ bool disable_d0i2;
};
struct ivpu_hw_info;
@@ -142,9 +144,14 @@ struct ivpu_device {
struct xa_limit db_limit;
u32 db_next;
+ struct work_struct irq_ipc_work;
+ struct work_struct irq_dct_work;
+ struct work_struct context_abort_work;
+
struct mutex bo_list_lock; /* Protects bo_list */
struct list_head bo_list;
+ struct mutex submitted_jobs_lock; /* Protects submitted_jobs */
struct xarray submitted_jobs_xa;
struct ivpu_ipc_consumer job_done_consumer;
@@ -200,6 +207,9 @@ extern bool ivpu_force_snoop;
#define IVPU_TEST_MODE_MIP_DISABLE BIT(6)
#define IVPU_TEST_MODE_DISABLE_TIMEOUTS BIT(8)
#define IVPU_TEST_MODE_TURBO BIT(9)
+#define IVPU_TEST_MODE_CLK_RELINQ_DISABLE BIT(10)
+#define IVPU_TEST_MODE_CLK_RELINQ_ENABLE BIT(11)
+#define IVPU_TEST_MODE_D0I2_DISABLE BIT(12)
extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
@@ -208,6 +218,7 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
int ivpu_shutdown(struct ivpu_device *vdev);
void ivpu_prepare_for_reset(struct ivpu_device *vdev);
+bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability);
static inline u8 ivpu_revision(struct ivpu_device *vdev)
{
@@ -282,7 +293,8 @@ static inline bool ivpu_is_simics(struct ivpu_device *vdev)
static inline bool ivpu_is_fpga(struct ivpu_device *vdev)
{
- return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA;
+ return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA ||
+ ivpu_get_platform(vdev) == IVPU_PLATFORM_HSLE;
}
static inline bool ivpu_is_force_snoop_enabled(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 6037ec0b3096..7a1bb92d8c81 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -145,7 +145,10 @@ ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_he
if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO)
return ivpu_sched_mode;
- return VPU_SCHEDULING_MODE_OS;
+ if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, JSM, 3, 24))
+ return VPU_SCHEDULING_MODE_OS;
+
+ return VPU_SCHEDULING_MODE_HW;
}
static int ivpu_fw_parse(struct ivpu_device *vdev)
@@ -531,6 +534,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->d0i3_entry_vpu_ts);
ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
boot_params->system_time_us);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = %u\n",
+ boot_params->power_profile);
}
void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
@@ -631,6 +636,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->d0i3_delayed_entry = 1;
boot_params->d0i3_residency_time_us = 0;
boot_params->d0i3_entry_vpu_ts = 0;
+ if (IVPU_WA(disable_d0i2))
+ boot_params->power_profile = 1;
boot_params->system_time_us = ktime_to_us(ktime_get_real());
wmb(); /* Flush WC buffers after writing bootparams */
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 16178054e629..8741c73b92ce 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -20,6 +20,8 @@
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
+MODULE_IMPORT_NS("DMA_BUF");
+
static const struct drm_gem_object_funcs ivpu_gem_funcs;
static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
@@ -172,6 +174,47 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
return &bo->base.base;
}
+struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct device *attach_dev = dev->dev;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct drm_gem_object *obj;
+ int ret;
+
+ attach = dma_buf_attach(dma_buf, attach_dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(dma_buf);
+
+ sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto fail_unmap;
+ }
+
+ obj->import_attach = attach;
+ obj->resv = dma_buf->resv;
+
+ return obj;
+
+fail_unmap:
+ dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+
static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
{
struct drm_gem_shmem_object *shmem;
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index d975000abd78..a222a9ec9d61 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -28,6 +28,7 @@ int ivpu_bo_pin(struct ivpu_bo *bo);
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
+struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
struct ivpu_addr_range *range, u64 size, u32 flags);
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags);
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
index 4e1054f3466e..ec9a3629da3a 100644
--- a/drivers/accel/ivpu/ivpu_hw.c
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -9,6 +9,16 @@
#include "ivpu_hw_ip.h"
#include <linux/dmi.h>
+#include <linux/fault-inject.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_FAULT_INJECTION
+DECLARE_FAULT_ATTR(ivpu_hw_failure);
+
+static char *ivpu_fail_hw;
+module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444);
+MODULE_PARM_DESC(fail_hw, "<interval>,<probability>,<space>,<times>");
+#endif
static char *platform_to_str(u32 platform)
{
@@ -19,43 +29,36 @@ static char *platform_to_str(u32 platform)
return "SIMICS";
case IVPU_PLATFORM_FPGA:
return "FPGA";
+ case IVPU_PLATFORM_HSLE:
+ return "HSLE";
default:
return "Invalid platform";
}
}
-static const struct dmi_system_id dmi_platform_simulation[] = {
- {
- .ident = "Intel Simics",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
- DMI_MATCH(DMI_BOARD_SERIAL, "123456789"),
- },
- },
- {
- .ident = "Intel Simics",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "Simics"),
- },
- },
- { }
-};
-
static void platform_init(struct ivpu_device *vdev)
{
- if (dmi_check_system(dmi_platform_simulation))
- vdev->platform = IVPU_PLATFORM_SIMICS;
- else
- vdev->platform = IVPU_PLATFORM_SILICON;
+ int platform = ivpu_hw_btrs_platform_read(vdev);
+
+ ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n", platform_to_str(platform), platform);
+
+ switch (platform) {
+ case IVPU_PLATFORM_SILICON:
+ case IVPU_PLATFORM_SIMICS:
+ case IVPU_PLATFORM_FPGA:
+ case IVPU_PLATFORM_HSLE:
+ vdev->platform = platform;
+ break;
- ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
- platform_to_str(vdev->platform), vdev->platform);
+ default:
+ ivpu_err(vdev, "Invalid platform type: %d\n", platform);
+ break;
+ }
}
static void wa_init(struct ivpu_device *vdev)
{
- vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
+ vdev->wa.punit_disabled = false;
vdev->wa.clear_runtime_mem = false;
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
@@ -65,14 +68,24 @@ static void wa_init(struct ivpu_device *vdev)
ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0)
vdev->wa.disable_clock_relinquish = true;
+ if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_ENABLE)
+ vdev->wa.disable_clock_relinquish = false;
+
+ if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_DISABLE)
+ vdev->wa.disable_clock_relinquish = true;
+
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->wa.wp0_during_power_up = true;
+ if (ivpu_test_mode & IVPU_TEST_MODE_D0I2_DISABLE)
+ vdev->wa.disable_d0i2 = true;
+
IVPU_PRINT_WA(punit_disabled);
IVPU_PRINT_WA(clear_runtime_mem);
IVPU_PRINT_WA(interrupt_clear_with_0);
IVPU_PRINT_WA(disable_clock_relinquish);
IVPU_PRINT_WA(wp0_during_power_up);
+ IVPU_PRINT_WA(disable_d0i2);
}
static void timeouts_init(struct ivpu_device *vdev)
@@ -84,12 +97,12 @@ static void timeouts_init(struct ivpu_device *vdev)
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = -1;
} else if (ivpu_is_fpga(vdev)) {
- vdev->timeout.boot = 100000;
- vdev->timeout.jsm = 50000;
- vdev->timeout.tdr = 2000000;
+ vdev->timeout.boot = 50;
+ vdev->timeout.jsm = 15000;
+ vdev->timeout.tdr = 30000;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = 500;
- vdev->timeout.state_dump_msg = 10;
+ vdev->timeout.state_dump_msg = 10000;
} else if (ivpu_is_simics(vdev)) {
vdev->timeout.boot = 50;
vdev->timeout.jsm = 500;
@@ -110,6 +123,26 @@ static void timeouts_init(struct ivpu_device *vdev)
}
}
+static void priority_bands_init(struct ivpu_device *vdev)
+{
+ /* Idle */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 0;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 160000;
+ /* Normal */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 300000;
+ /* Focus */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 200000;
+ /* Realtime */
+ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 0;
+ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 50000;
+ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
+}
+
static void memory_ranges_init(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
@@ -248,12 +281,18 @@ int ivpu_hw_init(struct ivpu_device *vdev)
{
ivpu_hw_btrs_info_init(vdev);
ivpu_hw_btrs_freq_ratios_init(vdev);
+ priority_bands_init(vdev);
memory_ranges_init(vdev);
platform_init(vdev);
wa_init(vdev);
timeouts_init(vdev);
atomic_set(&vdev->hw->firewall_irq_counter, 0);
+#ifdef CONFIG_FAULT_INJECTION
+ if (ivpu_fail_hw)
+ setup_fault_attr(&ivpu_hw_failure, ivpu_fail_hw);
+#endif
+
return 0;
}
@@ -285,8 +324,6 @@ void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
void ivpu_irq_handlers_init(struct ivpu_device *vdev)
{
- INIT_KFIFO(vdev->hw->irq.fifo);
-
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
else
@@ -300,7 +337,6 @@ void ivpu_irq_handlers_init(struct ivpu_device *vdev)
void ivpu_hw_irq_enable(struct ivpu_device *vdev)
{
- kfifo_reset(&vdev->hw->irq.fifo);
ivpu_hw_ip_irq_enable(vdev);
ivpu_hw_btrs_irq_enable(vdev);
}
@@ -327,9 +363,9 @@ irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
ivpu_hw_btrs_global_int_enable(vdev);
- if (!kfifo_is_empty(&vdev->hw->irq.fifo))
- return IRQ_WAKE_THREAD;
- if (ip_handled || btrs_handled)
- return IRQ_HANDLED;
- return IRQ_NONE;
+ if (!ip_handled && !btrs_handled)
+ return IRQ_NONE;
+
+ pm_runtime_mark_last_busy(vdev->drm.dev);
+ return IRQ_HANDLED;
}
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index fc4dbfc980c8..16435f2756d0 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -6,18 +6,10 @@
#ifndef __IVPU_HW_H__
#define __IVPU_HW_H__
-#include <linux/kfifo.h>
-
#include "ivpu_drv.h"
#include "ivpu_hw_btrs.h"
#include "ivpu_hw_ip.h"
-#define IVPU_HW_IRQ_FIFO_LENGTH 1024
-
-#define IVPU_HW_IRQ_SRC_IPC 1
-#define IVPU_HW_IRQ_SRC_MMU_EVTQ 2
-#define IVPU_HW_IRQ_SRC_DCT 3
-
struct ivpu_addr_range {
resource_size_t start;
resource_size_t end;
@@ -27,7 +19,6 @@ struct ivpu_hw_info {
struct {
bool (*btrs_irq_handler)(struct ivpu_device *vdev, int irq);
bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
- DECLARE_KFIFO(fifo, u8, IVPU_HW_IRQ_FIFO_LENGTH);
} irq;
struct {
struct ivpu_addr_range global;
@@ -45,6 +36,11 @@ struct ivpu_hw_info {
u8 pn_ratio;
u32 profiling_freq;
} pll;
+ struct {
+ u32 grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
+ u32 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ } hws;
u32 tile_fuse;
u32 sku;
u16 config;
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
index 3212c99f3682..56c56012b980 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.c
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
@@ -630,8 +630,7 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
- if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_DCT))
- ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ queue_work(system_wq, &vdev->irq_dct_work);
}
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status))
@@ -888,3 +887,10 @@ void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev)
else
return diagnose_failure_lnl(vdev);
}
+
+int ivpu_hw_btrs_platform_read(struct ivpu_device *vdev)
+{
+ u32 reg = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
+
+ return REG_GET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PLATFORM, reg);
+}
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
index 04f14f50fed6..1fd71b4d4ab0 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
@@ -46,5 +46,6 @@ void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev);
void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev);
void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev);
void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev);
+int ivpu_hw_btrs_platform_read(struct ivpu_device *vdev);
#endif /* __IVPU_HW_BTRS_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
index fc51f3098f97..fff2ef2cada6 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
@@ -86,6 +86,7 @@
#define VPU_HW_BTRS_LNL_VPU_STATUS_POWER_RESOURCE_OWN_ACK_MASK BIT_MASK(7)
#define VPU_HW_BTRS_LNL_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
#define VPU_HW_BTRS_LNL_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
+#define VPU_HW_BTRS_LNL_VPU_STATUS_PLATFORM_MASK GENMASK(31, 29)
#define VPU_HW_BTRS_LNL_IP_RESET 0x00000160u
#define VPU_HW_BTRS_LNL_IP_RESET_TRIGGER_MASK BIT_MASK(0)
diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c
index 029dd065614b..823f6a57dc54 100644
--- a/drivers/accel/ivpu/ivpu_hw_ip.c
+++ b/drivers/accel/ivpu/ivpu_hw_ip.c
@@ -968,14 +968,14 @@ void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev)
static u32 ipc_rx_count_get_37xx(struct ivpu_device *vdev)
{
- u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
+ u32 count = readl(vdev->regv + VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
static u32 ipc_rx_count_get_40xx(struct ivpu_device *vdev)
{
- u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
+ u32 count = readl(vdev->regv + VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
diff --git a/drivers/accel/ivpu/ivpu_hw_reg_io.h b/drivers/accel/ivpu/ivpu_hw_reg_io.h
index 79b3f441eac4..66259b0ead02 100644
--- a/drivers/accel/ivpu/ivpu_hw_reg_io.h
+++ b/drivers/accel/ivpu/ivpu_hw_reg_io.h
@@ -7,6 +7,7 @@
#define __IVPU_HW_REG_IO_H__
#include <linux/bitfield.h>
+#include <linux/fault-inject.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -16,13 +17,11 @@
#define REG_IO_ERROR 0xffffffff
#define REGB_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regb, (reg), #reg, __func__)
-#define REGB_RD32_SILENT(reg) readl(vdev->regb + (reg))
#define REGB_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regb, (reg), #reg, __func__)
#define REGB_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regb, (reg), (val), #reg, __func__)
#define REGB_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regb, (reg), (val), #reg, __func__)
#define REGV_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regv, (reg), #reg, __func__)
-#define REGV_RD32_SILENT(reg) readl(vdev->regv + (reg))
#define REGV_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regv, (reg), #reg, __func__)
#define REGV_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regv, (reg), (val), #reg, __func__)
#define REGV_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regv, (reg), (val), #reg, __func__)
@@ -47,31 +46,42 @@
#define REG_TEST_FLD_NUM(REG, FLD, num, val) \
((num) == FIELD_GET(REG##_##FLD##_MASK, val))
-#define REGB_POLL_FLD(reg, fld, val, timeout_us) \
-({ \
- u32 var; \
- int r; \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s started (expected 0x%x)\n", \
- __func__, #reg, reg, #fld, val); \
- r = read_poll_timeout(REGB_RD32_SILENT, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)),\
- REG_POLL_SLEEP_US, timeout_us, false, (reg)); \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s %s (reg val 0x%08x)\n", \
- __func__, #reg, reg, #fld, r ? "ETIMEDOUT" : "OK", var); \
- r; \
-})
-
-#define REGV_POLL_FLD(reg, fld, val, timeout_us) \
-({ \
- u32 var; \
- int r; \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s started (expected 0x%x)\n", \
- __func__, #reg, reg, #fld, val); \
- r = read_poll_timeout(REGV_RD32_SILENT, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)),\
- REG_POLL_SLEEP_US, timeout_us, false, (reg)); \
- ivpu_dbg(vdev, REG, "%s : %s (0x%08x) Polling field %s %s (reg val 0x%08x)\n", \
- __func__, #reg, reg, #fld, r ? "ETIMEDOUT" : "OK", var); \
- r; \
-})
+#define REGB_POLL_FLD(reg, fld, exp_fld_val, timeout_us) \
+ ivpu_hw_reg_poll_fld(vdev, vdev->regb, reg, reg##_##fld##_MASK, \
+ FIELD_PREP(reg##_##fld##_MASK, exp_fld_val), timeout_us, \
+ __func__, #reg, #fld)
+
+#define REGV_POLL_FLD(reg, fld, exp_fld_val, timeout_us) \
+ ivpu_hw_reg_poll_fld(vdev, vdev->regv, reg, reg##_##fld##_MASK, \
+ FIELD_PREP(reg##_##fld##_MASK, exp_fld_val), timeout_us, \
+ __func__, #reg, #fld)
+
+extern struct fault_attr ivpu_hw_failure;
+
+static inline int __must_check
+ivpu_hw_reg_poll_fld(struct ivpu_device *vdev, void __iomem *base,
+ u32 reg_offset, u32 reg_mask, u32 exp_masked_val, u32 timeout_us,
+ const char *func_name, const char *reg_name, const char *fld_name)
+{
+ u32 reg_val;
+ int ret;
+
+ ivpu_dbg(vdev, REG, "%s : %s (0x%08x) POLL %s started (exp_val 0x%x)\n",
+ func_name, reg_name, reg_offset, fld_name, exp_masked_val);
+
+ ret = read_poll_timeout(readl, reg_val, (reg_val & reg_mask) == exp_masked_val,
+ REG_POLL_SLEEP_US, timeout_us, false, base + reg_offset);
+
+#ifdef CONFIG_FAULT_INJECTION
+ if (should_fail(&ivpu_hw_failure, 1))
+ ret = -ETIMEDOUT;
+#endif
+
+ ivpu_dbg(vdev, REG, "%s : %s (0x%08x) POLL %s %s (reg_val 0x%08x)\n",
+ func_name, reg_name, reg_offset, fld_name, ret ? "ETIMEDOUT" : "OK", reg_val);
+
+ return ret;
+}
static inline u32
ivpu_hw_reg_rd32(struct ivpu_device *vdev, void __iomem *base, u32 reg,
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 01ebf88fe6ef..0e096fd9b95d 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -459,13 +459,12 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev)
}
}
- if (!list_empty(&ipc->cb_msg_list))
- if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_IPC))
- ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ queue_work(system_wq, &vdev->irq_ipc_work);
}
-void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
+void ivpu_ipc_irq_work_fn(struct work_struct *work)
{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_ipc_work);
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_rx_msg *rx_msg, *r;
struct list_head cb_msg_list;
diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h
index b4dfb504679b..b524a1985b9d 100644
--- a/drivers/accel/ivpu/ivpu_ipc.h
+++ b/drivers/accel/ivpu/ivpu_ipc.h
@@ -90,7 +90,7 @@ void ivpu_ipc_disable(struct ivpu_device *vdev);
void ivpu_ipc_reset(struct ivpu_device *vdev);
void ivpu_ipc_irq_handler(struct ivpu_device *vdev);
-void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
+void ivpu_ipc_irq_work_fn(struct work_struct *work);
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
u32 channel, ivpu_ipc_rx_callback_t callback);
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 7149312f16e1..004059e4f1e8 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/highmem.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <uapi/drm/ivpu_accel.h>
@@ -17,6 +18,7 @@
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
+#include "ivpu_mmu.h"
#include "ivpu_pm.h"
#include "ivpu_trace.h"
#include "vpu_boot_api.h"
@@ -83,23 +85,9 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
if (!cmdq)
return NULL;
- ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
- GFP_KERNEL);
- if (ret < 0) {
- ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
- goto err_free_cmdq;
- }
-
- ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
- &file_priv->cmdq_id_next, GFP_KERNEL);
- if (ret < 0) {
- ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret);
- goto err_erase_db_xa;
- }
-
cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!cmdq->mem)
- goto err_erase_cmdq_xa;
+ goto err_free_cmdq;
ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
if (ret)
@@ -107,10 +95,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
return cmdq;
-err_erase_cmdq_xa:
- xa_erase(&file_priv->cmdq_xa, cmdq->id);
-err_erase_db_xa:
- xa_erase(&vdev->db_xa, cmdq->db_id);
err_free_cmdq:
kfree(cmdq);
return NULL;
@@ -118,15 +102,44 @@ err_free_cmdq:
static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- if (!cmdq)
- return;
-
ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq);
ivpu_bo_free(cmdq->mem);
- xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
kfree(cmdq);
}
+static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 priority,
+ bool is_legacy)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq = NULL;
+ int ret;
+
+ lockdep_assert_held(&file_priv->lock);
+
+ cmdq = ivpu_cmdq_alloc(file_priv);
+ if (!cmdq) {
+ ivpu_err(vdev, "Failed to allocate command queue\n");
+ return NULL;
+ }
+
+ cmdq->priority = priority;
+ cmdq->is_legacy = is_legacy;
+
+ ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
+ &file_priv->cmdq_id_next, GFP_KERNEL);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
+ goto err_free_cmdq;
+ }
+
+ ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d\n", cmdq->id, file_priv->ctx.id);
+ return cmdq;
+
+err_free_cmdq:
+ ivpu_cmdq_free(file_priv, cmdq);
+ return NULL;
+}
+
static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine,
u8 priority)
{
@@ -152,6 +165,13 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
struct ivpu_device *vdev = file_priv->vdev;
int ret;
+ ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
+ GFP_KERNEL);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to allocate doorbell ID: %d\n", ret);
+ return ret;
+ }
+
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
@@ -160,41 +180,52 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (!ret)
- ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n",
- cmdq->db_id, cmdq->id, file_priv->ctx.id);
+ ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
+ cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
+ else
+ xa_erase(&vdev->db_xa, cmdq->db_id);
return ret;
}
-static int
-ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 priority)
+static void ivpu_cmdq_jobq_init(struct ivpu_device *vdev, struct vpu_job_queue *jobq)
+{
+ jobq->header.engine_idx = VPU_ENGINE_COMPUTE;
+ jobq->header.head = 0;
+ jobq->header.tail = 0;
+
+ if (ivpu_test_mode & IVPU_TEST_MODE_TURBO) {
+ ivpu_dbg(vdev, JOB, "Turbo mode enabled");
+ jobq->header.flags = VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+ }
+
+ wmb(); /* Flush WC buffer for jobq->header */
+}
+
+static inline u32 ivpu_cmdq_get_entry_count(struct ivpu_cmdq *cmdq)
+{
+ size_t size = ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header);
+
+ return size / sizeof(struct vpu_job_queue_entry);
+}
+
+static int ivpu_cmdq_register(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
struct ivpu_device *vdev = file_priv->vdev;
- struct vpu_job_queue_header *jobq_header;
int ret;
lockdep_assert_held(&file_priv->lock);
- if (cmdq->db_registered)
+ if (cmdq->db_id)
return 0;
- cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
- sizeof(struct vpu_job_queue_entry));
-
+ cmdq->entry_count = ivpu_cmdq_get_entry_count(cmdq);
cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
- jobq_header = &cmdq->jobq->header;
- jobq_header->engine_idx = VPU_ENGINE_COMPUTE;
- jobq_header->head = 0;
- jobq_header->tail = 0;
- if (ivpu_test_mode & IVPU_TEST_MODE_TURBO) {
- ivpu_dbg(vdev, JOB, "Turbo mode enabled");
- jobq_header->flags = VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
- }
- wmb(); /* Flush WC buffer for jobq->header */
+ ivpu_cmdq_jobq_init(vdev, cmdq->jobq);
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
- ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, priority);
+ ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, cmdq->priority);
if (ret)
return ret;
}
@@ -203,58 +234,83 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 prio
if (ret)
return ret;
- cmdq->db_registered = true;
-
return 0;
}
-static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+static int ivpu_cmdq_unregister(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
struct ivpu_device *vdev = file_priv->vdev;
int ret;
lockdep_assert_held(&file_priv->lock);
- if (!cmdq->db_registered)
+ if (!cmdq->db_id)
return 0;
- cmdq->db_registered = false;
-
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
if (!ret)
- ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id);
+ ivpu_dbg(vdev, JOB, "Command queue %d destroyed, ctx %d\n",
+ cmdq->id, file_priv->ctx.id);
}
ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
if (!ret)
ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
+ xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+ cmdq->db_id = 0;
+
return 0;
}
-static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority)
+static inline u8 ivpu_job_to_jsm_priority(u8 priority)
+{
+ if (priority == DRM_IVPU_JOB_PRIORITY_DEFAULT)
+ return VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL;
+
+ return priority - 1;
+}
+
+static void ivpu_cmdq_destroy(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+{
+ ivpu_cmdq_unregister(file_priv, cmdq);
+ xa_erase(&file_priv->cmdq_xa, cmdq->id);
+ ivpu_cmdq_free(file_priv, cmdq);
+}
+
+static struct ivpu_cmdq *ivpu_cmdq_acquire_legacy(struct ivpu_file_priv *file_priv, u8 priority)
{
struct ivpu_cmdq *cmdq;
- unsigned long cmdq_id;
- int ret;
+ unsigned long id;
lockdep_assert_held(&file_priv->lock);
- xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
- if (cmdq->priority == priority)
+ xa_for_each(&file_priv->cmdq_xa, id, cmdq)
+ if (cmdq->is_legacy && cmdq->priority == priority)
break;
if (!cmdq) {
- cmdq = ivpu_cmdq_alloc(file_priv);
+ cmdq = ivpu_cmdq_create(file_priv, priority, true);
if (!cmdq)
return NULL;
- cmdq->priority = priority;
}
- ret = ivpu_cmdq_init(file_priv, cmdq, priority);
- if (ret)
+ return cmdq;
+}
+
+static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32 cmdq_id)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq;
+
+ lockdep_assert_held(&file_priv->lock);
+
+ cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id);
+ if (!cmdq) {
+ ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id);
return NULL;
+ }
return cmdq;
}
@@ -266,11 +322,8 @@ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
lockdep_assert_held(&file_priv->lock);
- xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
- xa_erase(&file_priv->cmdq_xa, cmdq_id);
- ivpu_cmdq_fini(file_priv, cmdq);
- ivpu_cmdq_free(file_priv, cmdq);
- }
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
+ ivpu_cmdq_destroy(file_priv, cmdq);
}
/*
@@ -286,8 +339,10 @@ static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
mutex_lock(&file_priv->lock);
- xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
- cmdq->db_registered = false;
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
+ xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+ cmdq->db_id = 0;
+ }
mutex_unlock(&file_priv->lock);
}
@@ -305,25 +360,24 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
mutex_unlock(&vdev->context_list_lock);
}
-static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
-{
- struct ivpu_cmdq *cmdq;
- unsigned long cmdq_id;
-
- xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
- ivpu_cmdq_fini(file_priv, cmdq);
-}
-
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq;
+ unsigned long cmdq_id;
lockdep_assert_held(&file_priv->lock);
+ ivpu_dbg(vdev, JOB, "Context ID: %u abort\n", file_priv->ctx.id);
- ivpu_cmdq_fini_all(file_priv);
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
+ ivpu_cmdq_unregister(file_priv, cmdq);
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+
+ ivpu_mmu_disable_ssid_events(vdev, file_priv->ctx.id);
+
+ file_priv->aborted = true;
}
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
@@ -462,16 +516,14 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *
{
struct ivpu_job *job;
- xa_lock(&vdev->submitted_jobs_xa);
- job = __xa_erase(&vdev->submitted_jobs_xa, job_id);
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
+ job = xa_erase(&vdev->submitted_jobs_xa, job_id);
if (xa_empty(&vdev->submitted_jobs_xa) && job) {
vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
vdev->busy_time);
}
- xa_unlock(&vdev->submitted_jobs_xa);
-
return job;
}
@@ -479,6 +531,28 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
{
struct ivpu_job *job;
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
+
+ job = xa_load(&vdev->submitted_jobs_xa, job_id);
+ if (!job)
+ return -ENOENT;
+
+ if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) {
+ guard(mutex)(&job->file_priv->lock);
+
+ if (job->file_priv->has_mmu_faults)
+ return 0;
+
+ /*
+ * Mark context as faulty and defer destruction of the job to jobs abort thread
+ * handler to synchronize between both faults and jobs returning context violation
+ * status and ensure both are handled in the same way
+ */
+ job->file_priv->has_mmu_faults = true;
+ queue_work(system_wq, &vdev->context_abort_work);
+ return 0;
+ }
+
job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
if (!job)
return -ENOENT;
@@ -497,6 +571,10 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
ivpu_stop_job_timeout_detection(vdev);
ivpu_rpm_put(vdev);
+
+ if (!xa_empty(&vdev->submitted_jobs_xa))
+ ivpu_start_job_timeout_detection(vdev);
+
return 0;
}
@@ -505,11 +583,29 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
struct ivpu_job *job;
unsigned long id;
+ mutex_lock(&vdev->submitted_jobs_lock);
+
xa_for_each(&vdev->submitted_jobs_xa, id, job)
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
}
-static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
+void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
+{
+ struct ivpu_job *job;
+ unsigned long id;
+
+ mutex_lock(&vdev->submitted_jobs_lock);
+
+ xa_for_each(&vdev->submitted_jobs_xa, id, job)
+ if (job->file_priv->ctx.id == ctx_id && job->cmdq_id == cmdq_id)
+ ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
+}
+
+static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = job->vdev;
@@ -521,25 +617,35 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
if (ret < 0)
return ret;
+ mutex_lock(&vdev->submitted_jobs_lock);
mutex_lock(&file_priv->lock);
- cmdq = ivpu_cmdq_acquire(file_priv, priority);
+ if (cmdq_id == 0)
+ cmdq = ivpu_cmdq_acquire_legacy(file_priv, priority);
+ else
+ cmdq = ivpu_cmdq_acquire(file_priv, cmdq_id);
if (!cmdq) {
- ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
- file_priv->ctx.id, job->engine_idx, priority);
+ ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d\n", file_priv->ctx.id);
ret = -EINVAL;
- goto err_unlock_file_priv;
+ goto err_unlock;
}
- xa_lock(&vdev->submitted_jobs_xa);
+ ret = ivpu_cmdq_register(file_priv, cmdq);
+ if (ret) {
+ ivpu_err(vdev, "Failed to register command queue: %d\n", ret);
+ goto err_unlock;
+ }
+
+ job->cmdq_id = cmdq->id;
+
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
- ret = __xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
- &file_priv->job_id_next, GFP_KERNEL);
+ ret = xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
+ &file_priv->job_id_next, GFP_KERNEL);
if (ret < 0) {
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
file_priv->ctx.id);
ret = -EBUSY;
- goto err_unlock_submitted_jobs_xa;
+ goto err_unlock;
}
ret = ivpu_cmdq_push_job(cmdq, job);
@@ -559,23 +665,23 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
trace_job("submit", job);
ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n",
- job->job_id, file_priv->ctx.id, job->engine_idx, priority,
+ job->job_id, file_priv->ctx.id, job->engine_idx, cmdq->priority,
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
- xa_unlock(&vdev->submitted_jobs_xa);
-
mutex_unlock(&file_priv->lock);
- if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
+ if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+ }
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
return 0;
err_erase_xa:
- __xa_erase(&vdev->submitted_jobs_xa, job->job_id);
-err_unlock_submitted_jobs_xa:
- xa_unlock(&vdev->submitted_jobs_xa);
-err_unlock_file_priv:
+ xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+err_unlock:
+ mutex_unlock(&vdev->submitted_jobs_lock);
mutex_unlock(&file_priv->lock);
ivpu_rpm_put(vdev);
return ret;
@@ -585,7 +691,7 @@ static int
ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
u32 buf_count, u32 commands_offset)
{
- struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct ww_acquire_ctx acquire_ctx;
enum dma_resv_usage usage;
@@ -647,49 +753,20 @@ unlock_reservations:
return ret;
}
-static inline u8 ivpu_job_to_hws_priority(struct ivpu_file_priv *file_priv, u8 priority)
-{
- if (priority == DRM_IVPU_JOB_PRIORITY_DEFAULT)
- return DRM_IVPU_JOB_PRIORITY_NORMAL;
-
- return priority - 1;
-}
-
-int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id,
+ u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset,
+ u8 priority)
{
- struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
- struct drm_ivpu_submit *params = data;
struct ivpu_job *job;
u32 *buf_handles;
int idx, ret;
- u8 priority;
-
- if (params->engine != DRM_IVPU_ENGINE_COMPUTE)
- return -EINVAL;
-
- if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
- return -EINVAL;
-
- if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
- return -EINVAL;
-
- if (!IS_ALIGNED(params->commands_offset, 8))
- return -EINVAL;
-
- if (!file_priv->ctx.id)
- return -EINVAL;
- if (file_priv->has_mmu_faults)
- return -EBADFD;
-
- buf_handles = kcalloc(params->buffer_count, sizeof(u32), GFP_KERNEL);
+ buf_handles = kcalloc(buffer_count, sizeof(u32), GFP_KERNEL);
if (!buf_handles)
return -ENOMEM;
- ret = copy_from_user(buf_handles,
- (void __user *)params->buffers_ptr,
- params->buffer_count * sizeof(u32));
+ ret = copy_from_user(buf_handles, buffers_ptr, buffer_count * sizeof(u32));
if (ret) {
ret = -EFAULT;
goto err_free_handles;
@@ -700,27 +777,23 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_free_handles;
}
- ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
- file_priv->ctx.id, params->buffer_count);
+ ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n", file_priv->ctx.id, buffer_count);
- job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
+ job = ivpu_job_create(file_priv, engine, buffer_count);
if (!job) {
ivpu_err(vdev, "Failed to create job\n");
ret = -ENOMEM;
goto err_exit_dev;
}
- ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
- params->commands_offset);
+ ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset);
if (ret) {
ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
goto err_destroy_job;
}
- priority = ivpu_job_to_hws_priority(file_priv, params->priority);
-
down_read(&vdev->pm->reset_lock);
- ret = ivpu_job_submit(job, priority);
+ ret = ivpu_job_submit(job, priority, cmdq_id);
up_read(&vdev->pm->reset_lock);
if (ret)
goto err_signal_fence;
@@ -740,12 +813,122 @@ err_free_handles:
return ret;
}
+int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct drm_ivpu_submit *args = data;
+ u8 priority;
+
+ if (args->engine != DRM_IVPU_ENGINE_COMPUTE)
+ return -EINVAL;
+
+ if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+ return -EINVAL;
+
+ if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(args->commands_offset, 8))
+ return -EINVAL;
+
+ if (!file_priv->ctx.id)
+ return -EINVAL;
+
+ if (file_priv->has_mmu_faults)
+ return -EBADFD;
+
+ priority = ivpu_job_to_jsm_priority(args->priority);
+
+ return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine,
+ (void __user *)args->buffers_ptr, args->commands_offset, priority);
+}
+
+int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct drm_ivpu_cmdq_submit *args = data;
+
+ if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ return -ENODEV;
+
+ if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID)
+ return -EINVAL;
+
+ if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(args->commands_offset, 8))
+ return -EINVAL;
+
+ if (!file_priv->ctx.id)
+ return -EINVAL;
+
+ if (file_priv->has_mmu_faults)
+ return -EBADFD;
+
+ return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE,
+ (void __user *)args->buffers_ptr, args->commands_offset, 0);
+}
+
+int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct drm_ivpu_cmdq_create *args = data;
+ struct ivpu_cmdq *cmdq;
+
+ if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ return -ENODEV;
+
+ if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+ return -EINVAL;
+
+ mutex_lock(&file_priv->lock);
+
+ cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false);
+ if (cmdq)
+ args->cmdq_id = cmdq->id;
+
+ mutex_unlock(&file_priv->lock);
+
+ return cmdq ? 0 : -ENOMEM;
+}
+
+int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct drm_ivpu_cmdq_destroy *args = data;
+ struct ivpu_cmdq *cmdq;
+ u32 cmdq_id;
+ int ret;
+
+ if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ return -ENODEV;
+
+ mutex_lock(&file_priv->lock);
+
+ cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
+ if (!cmdq || cmdq->is_legacy) {
+ ret = -ENOENT;
+ goto err_unlock;
+ }
+
+ cmdq_id = cmdq->id;
+ ivpu_cmdq_destroy(file_priv, cmdq);
+ mutex_unlock(&file_priv->lock);
+ ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&file_priv->lock);
+ return ret;
+}
+
static void
ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
struct vpu_jsm_msg *jsm_msg)
{
struct vpu_ipc_msg_payload_job_done *payload;
- int ret;
if (!jsm_msg) {
ivpu_err(vdev, "IPC message has no JSM payload\n");
@@ -758,9 +941,10 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
}
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
- ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
- if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
- ivpu_start_job_timeout_detection(vdev);
+
+ mutex_lock(&vdev->submitted_jobs_lock);
+ ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
+ mutex_unlock(&vdev->submitted_jobs_lock);
}
void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
@@ -773,3 +957,55 @@ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
{
ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
}
+
+void ivpu_context_abort_work_fn(struct work_struct *work)
+{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work);
+ struct ivpu_file_priv *file_priv;
+ struct ivpu_job *job;
+ unsigned long ctx_id;
+ unsigned long id;
+
+ if (drm_WARN_ON(&vdev->drm, pm_runtime_get_if_active(vdev->drm.dev) <= 0))
+ return;
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+ ivpu_jsm_reset_engine(vdev, 0);
+
+ mutex_lock(&vdev->context_list_lock);
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+ if (!file_priv->has_mmu_faults || file_priv->aborted)
+ continue;
+
+ mutex_lock(&file_priv->lock);
+ ivpu_context_abort_locked(file_priv);
+ mutex_unlock(&file_priv->lock);
+ }
+ mutex_unlock(&vdev->context_list_lock);
+
+ /*
+ * We will not receive new MMU event interrupts until existing events are discarded
+ * however, we want to discard these events only after aborting the faulty context
+ * to avoid generating new faults from that context
+ */
+ ivpu_mmu_discard_events(vdev);
+
+ if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ goto runtime_put;
+
+ ivpu_jsm_hws_resume_engine(vdev, 0);
+ /*
+ * In hardware scheduling mode NPU already has stopped processing jobs
+ * and won't send us any further notifications, thus we have to free job related resources
+ * and notify userspace
+ */
+ mutex_lock(&vdev->submitted_jobs_lock);
+ xa_for_each(&vdev->submitted_jobs_xa, id, job)
+ if (job->file_priv->aborted)
+ ivpu_job_signal_and_destroy(vdev, job->job_id, DRM_IVPU_JOB_STATUS_ABORTED);
+ mutex_unlock(&vdev->submitted_jobs_lock);
+
+runtime_put:
+ pm_runtime_mark_last_busy(vdev->drm.dev);
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+}
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index 8b19e3f8b4cf..2e301c2eea7b 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -30,8 +30,8 @@ struct ivpu_cmdq {
u32 entry_count;
u32 id;
u32 db_id;
- bool db_registered;
u8 priority;
+ bool is_legacy;
};
/**
@@ -51,6 +51,7 @@ struct ivpu_job {
struct ivpu_file_priv *file_priv;
struct dma_fence *done_fence;
u64 cmd_buf_vpu_addr;
+ u32 cmdq_id;
u32 job_id;
u32 engine_idx;
size_t bo_count;
@@ -58,14 +59,19 @@ struct ivpu_job {
};
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
+void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id);
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
+void ivpu_context_abort_work_fn(struct work_struct *work);
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
index 30a40be76930..219ab8afefab 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -7,6 +7,7 @@
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
+#include "vpu_jsm_api.h"
const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
{
@@ -407,26 +408,18 @@ int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
struct vpu_jsm_msg resp;
+ struct ivpu_hw_info *hw = vdev->hw;
+ struct vpu_ipc_msg_payload_hws_priority_band_setup *setup =
+ &req.payload.hws_priority_band_setup;
int ret;
- /* Idle */
- req.payload.hws_priority_band_setup.grace_period[0] = 0;
- req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
- req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
- /* Normal */
- req.payload.hws_priority_band_setup.grace_period[1] = 50000;
- req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
- req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
- /* Focus */
- req.payload.hws_priority_band_setup.grace_period[2] = 50000;
- req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
- req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
- /* Realtime */
- req.payload.hws_priority_band_setup.grace_period[3] = 0;
- req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
- req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
-
- req.payload.hws_priority_band_setup.normal_band_percentage = 10;
+ for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
+ band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
+ setup->grace_period[band] = hw->hws.grace_period[band];
+ setup->process_grace_period[band] = hw->hws.process_grace_period[band];
+ setup->process_quantum[band] = hw->hws.process_quantum[band];
+ }
+ setup->normal_band_percentage = 10;
ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
&resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 26ef52fbb93e..5ea010568faa 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -20,6 +20,12 @@
#define IVPU_MMU_REG_CR0 0x00200020u
#define IVPU_MMU_REG_CR0ACK 0x00200024u
#define IVPU_MMU_REG_CR0ACK_VAL_MASK GENMASK(31, 0)
+#define IVPU_MMU_REG_CR0_ATSCHK_MASK BIT(4)
+#define IVPU_MMU_REG_CR0_CMDQEN_MASK BIT(3)
+#define IVPU_MMU_REG_CR0_EVTQEN_MASK BIT(2)
+#define IVPU_MMU_REG_CR0_PRIQEN_MASK BIT(1)
+#define IVPU_MMU_REG_CR0_SMMUEN_MASK BIT(0)
+
#define IVPU_MMU_REG_CR1 0x00200028u
#define IVPU_MMU_REG_CR2 0x0020002cu
#define IVPU_MMU_REG_IRQ_CTRL 0x00200050u
@@ -141,12 +147,6 @@
#define IVPU_MMU_IRQ_EVTQ_EN BIT(2)
#define IVPU_MMU_IRQ_GERROR_EN BIT(0)
-#define IVPU_MMU_CR0_ATSCHK BIT(4)
-#define IVPU_MMU_CR0_CMDQEN BIT(3)
-#define IVPU_MMU_CR0_EVTQEN BIT(2)
-#define IVPU_MMU_CR0_PRIQEN BIT(1)
-#define IVPU_MMU_CR0_SMMUEN BIT(0)
-
#define IVPU_MMU_CR1_TABLE_SH GENMASK(11, 10)
#define IVPU_MMU_CR1_TABLE_OC GENMASK(9, 8)
#define IVPU_MMU_CR1_TABLE_IC GENMASK(7, 6)
@@ -596,7 +596,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0);
REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0);
- val = IVPU_MMU_CR0_CMDQEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, CMDQEN, 0);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
@@ -617,12 +617,12 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0);
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0);
- val |= IVPU_MMU_CR0_EVTQEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
- val |= IVPU_MMU_CR0_ATSCHK;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, ATSCHK, val);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
@@ -631,7 +631,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
if (ret)
return ret;
- val |= IVPU_MMU_CR0_SMMUEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, SMMUEN, val);
return ivpu_mmu_reg_write_cr0(vdev, val);
}
@@ -725,8 +725,8 @@ static int ivpu_mmu_cdtab_entry_set(struct ivpu_device *vdev, u32 ssid, u64 cd_d
cd[2] = 0;
cd[3] = 0x0000000000007444;
- /* For global context generate memory fault on VPU */
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
+ /* For global and reserved contexts generate memory fault on VPU */
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID || ssid == IVPU_RESERVED_CONTEXT_MMU_SSID)
cd[0] |= IVPU_MMU_CD_0_A;
if (valid)
@@ -870,28 +870,107 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
return evt;
}
+static int ivpu_mmu_evtq_set(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(IVPU_MMU_REG_CR0);
+
+ if (enable)
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
+ else
+ val = REG_CLR_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
+ REGV_WR32(IVPU_MMU_REG_CR0, val);
+
+ return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
+}
+
+static int ivpu_mmu_evtq_enable(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_evtq_set(vdev, true);
+}
+
+static int ivpu_mmu_evtq_disable(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_evtq_set(vdev, false);
+}
+
+void ivpu_mmu_discard_events(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+
+ mutex_lock(&mmu->lock);
+ /*
+ * Disable event queue (stop MMU from updating the producer)
+ * to allow synchronization of consumer and producer indexes
+ */
+ ivpu_mmu_evtq_disable(vdev);
+
+ vdev->mmu->evtq.cons = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
+ REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
+ vdev->mmu->evtq.prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
+
+ ivpu_mmu_evtq_enable(vdev);
+
+ drm_WARN_ON_ONCE(&vdev->drm, vdev->mmu->evtq.cons != vdev->mmu->evtq.prod);
+
+ mutex_unlock(&mmu->lock);
+}
+
+int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
+ u64 *entry;
+ u64 val;
+
+ if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
+ return -EINVAL;
+
+ mutex_lock(&mmu->lock);
+
+ entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
+
+ val = READ_ONCE(entry[0]);
+ val &= ~IVPU_MMU_CD_0_R;
+ WRITE_ONCE(entry[0], val);
+
+ if (!ivpu_is_force_snoop_enabled(vdev))
+ clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
+
+ ivpu_mmu_cmdq_write_cfgi_all(vdev);
+ ivpu_mmu_cmdq_sync(vdev);
+
+ mutex_unlock(&mmu->lock);
+
+ return 0;
+}
+
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
{
+ struct ivpu_file_priv *file_priv;
u32 *event;
u32 ssid;
ivpu_dbg(vdev, IRQ, "MMU event queue\n");
- while ((event = ivpu_mmu_get_event(vdev)) != NULL) {
- ivpu_mmu_dump_event(vdev, event);
-
- ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
+ while ((event = ivpu_mmu_get_event(vdev))) {
+ ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, *event);
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID ||
+ ssid == IVPU_RESERVED_CONTEXT_MMU_SSID) {
+ ivpu_mmu_dump_event(vdev, event);
ivpu_pm_trigger_recovery(vdev, "MMU event");
return;
}
- ivpu_mmu_user_context_mark_invalid(vdev, ssid);
- REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
+ file_priv = xa_load(&vdev->context_xa, ssid);
+ if (file_priv) {
+ if (!READ_ONCE(file_priv->has_mmu_faults)) {
+ ivpu_mmu_dump_event(vdev, event);
+ WRITE_ONCE(file_priv->has_mmu_faults, true);
+ }
+ }
}
- if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ))
- ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ queue_work(system_wq, &vdev->context_abort_work);
}
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_mmu.h b/drivers/accel/ivpu/ivpu_mmu.h
index 7afea9cd8731..1ce7529746ad 100644
--- a/drivers/accel/ivpu/ivpu_mmu.h
+++ b/drivers/accel/ivpu/ivpu_mmu.h
@@ -47,5 +47,7 @@ int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev);
+void ivpu_mmu_discard_events(struct ivpu_device *vdev);
+int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid);
#endif /* __IVPU_MMU_H__ */
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index 0af614dfb6f9..f0267efa55aa 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -635,16 +635,3 @@ void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
ivpu_mmu_cd_clear(vdev, vdev->rctx.id);
ivpu_mmu_context_fini(vdev, &vdev->rctx);
}
-
-void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
-{
- struct ivpu_file_priv *file_priv;
-
- xa_lock(&vdev->context_xa);
-
- file_priv = xa_load(&vdev->context_xa, ssid);
- if (file_priv)
- file_priv->has_mmu_faults = true;
-
- xa_unlock(&vdev->context_xa);
-}
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h
index 8042fc067062..f255310968cf 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.h
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -37,8 +37,6 @@ void ivpu_mmu_global_context_fini(struct ivpu_device *vdev);
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev);
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev);
-void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid);
-
int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
u64 size, struct drm_mm_node *node);
void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 5060c5dd40d1..b5891e91f7ab 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -177,16 +177,11 @@ void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
return;
}
- if (ivpu_is_fpga(vdev)) {
- ivpu_err(vdev, "Recovery not available on FPGA\n");
- return;
- }
-
/* Trigger recovery if it's not in progress */
if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
ivpu_hw_diagnose_failure(vdev);
ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
- queue_work(system_long_wq, &vdev->pm->recovery_work);
+ queue_work(system_unbound_wq, &vdev->pm->recovery_work);
}
}
@@ -462,8 +457,9 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev)
return 0;
}
-void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev)
+void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_dct_work);
bool enable;
int ret;
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
index b70efe6c36e4..89b264cc0e3e 100644
--- a/drivers/accel/ivpu/ivpu_pm.h
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -45,6 +45,6 @@ void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
int ivpu_pm_dct_init(struct ivpu_device *vdev);
int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent);
int ivpu_pm_dct_disable(struct ivpu_device *vdev);
-void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev);
+void ivpu_pm_irq_dct_work_fn(struct work_struct *work);
#endif /* __IVPU_PM_H__ */
diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c
index 616477fc17fa..97102feaf8dd 100644
--- a/drivers/accel/ivpu/ivpu_sysfs.c
+++ b/drivers/accel/ivpu/ivpu_sysfs.c
@@ -7,11 +7,14 @@
#include <linux/err.h>
#include "ivpu_drv.h"
+#include "ivpu_gem.h"
#include "ivpu_fw.h"
#include "ivpu_hw.h"
#include "ivpu_sysfs.h"
-/*
+/**
+ * DOC: npu_busy_time_us
+ *
* npu_busy_time_us is the time that the device spent executing jobs.
* The time is counted when and only when there are jobs submitted to firmware.
*
@@ -30,11 +33,12 @@ npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *b
struct ivpu_device *vdev = to_ivpu_device(drm);
ktime_t total, now = 0;
- xa_lock(&vdev->submitted_jobs_xa);
+ mutex_lock(&vdev->submitted_jobs_lock);
+
total = vdev->busy_time;
if (!xa_empty(&vdev->submitted_jobs_xa))
now = ktime_sub(ktime_get(), vdev->busy_start_ts);
- xa_unlock(&vdev->submitted_jobs_xa);
+ mutex_unlock(&vdev->submitted_jobs_lock);
return sysfs_emit(buf, "%lld\n", ktime_to_us(ktime_add(total, now)));
}
@@ -42,6 +46,30 @@ npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *b
static DEVICE_ATTR_RO(npu_busy_time_us);
/**
+ * DOC: npu_memory_utilization
+ *
+ * The npu_memory_utilization is used to report in bytes a current NPU memory utilization.
+ *
+ */
+static ssize_t
+npu_memory_utilization_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ struct ivpu_bo *bo;
+ u64 total_npu_memory = 0;
+
+ mutex_lock(&vdev->bo_list_lock);
+ list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
+ total_npu_memory += bo->base.base.size;
+ mutex_unlock(&vdev->bo_list_lock);
+
+ return sysfs_emit(buf, "%lld\n", total_npu_memory);
+}
+
+static DEVICE_ATTR_RO(npu_memory_utilization);
+
+/**
* DOC: sched_mode
*
* The sched_mode is used to report current NPU scheduling mode.
@@ -64,6 +92,7 @@ static DEVICE_ATTR_RO(sched_mode);
static struct attribute *ivpu_dev_attrs[] = {
&dev_attr_npu_busy_time_us.attr,
+ &dev_attr_npu_memory_utilization.attr,
&dev_attr_sched_mode.attr,
NULL,
};
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index 8ab82e78dd94..13a14c6c6168 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -20,6 +20,11 @@ static unsigned int mhi_timeout_ms = 2000; /* 2 sec default */
module_param(mhi_timeout_ms, uint, 0600);
MODULE_PARM_DESC(mhi_timeout_ms, "MHI controller timeout value");
+static const char *fw_image_paths[FAMILY_MAX] = {
+ [FAMILY_AIC100] = "qcom/aic100/sbl.bin",
+ [FAMILY_AIC200] = "qcom/aic200/sbl.bin",
+};
+
static const struct mhi_channel_config aic100_channels[] = {
{
.name = "QAIC_LOOPBACK",
@@ -439,6 +444,297 @@ static const struct mhi_channel_config aic100_channels[] = {
},
};
+static const struct mhi_channel_config aic200_channels[] = {
+ {
+ .name = "QAIC_LOOPBACK",
+ .num = 0,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOOPBACK",
+ .num = 1,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SAHARA",
+ .num = 2,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SAHARA",
+ .num = 3,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SSR",
+ .num = 6,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_SSR",
+ .num = 7,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_CONTROL",
+ .num = 10,
+ .num_elements = 128,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_CONTROL",
+ .num = 11,
+ .num_elements = 128,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOGGING",
+ .num = 12,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_LOGGING",
+ .num = 13,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_STATUS",
+ .num = 14,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_STATUS",
+ .num = 15,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TELEMETRY",
+ .num = 16,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TELEMETRY",
+ .num = 17,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 22,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 23,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "IPCR",
+ .num = 24,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "IPCR",
+ .num = 25,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ .wake_capable = false,
+ },
+};
+
static struct mhi_event_config aic100_events[] = {
{
.num_elements = 32,
@@ -454,16 +750,44 @@ static struct mhi_event_config aic100_events[] = {
},
};
-static struct mhi_controller_config aic100_config = {
- .max_channels = 128,
- .timeout_ms = 0, /* controlled by mhi_timeout */
- .buf_len = 0,
- .num_channels = ARRAY_SIZE(aic100_channels),
- .ch_cfg = aic100_channels,
- .num_events = ARRAY_SIZE(aic100_events),
- .event_cfg = aic100_events,
- .use_bounce_buf = false,
- .m2_no_db = false,
+static struct mhi_event_config aic200_events[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 0,
+ .channel = U32_MAX,
+ .priority = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_CTRL,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+static struct mhi_controller_config mhi_cntrl_configs[] = {
+ [FAMILY_AIC100] = {
+ .max_channels = 128,
+ .timeout_ms = 0, /* controlled by mhi_timeout */
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(aic100_channels),
+ .ch_cfg = aic100_channels,
+ .num_events = ARRAY_SIZE(aic100_events),
+ .event_cfg = aic100_events,
+ .use_bounce_buf = false,
+ .m2_no_db = false,
+ },
+ [FAMILY_AIC200] = {
+ .max_channels = 128,
+ .timeout_ms = 0, /* controlled by mhi_timeout */
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(aic200_channels),
+ .ch_cfg = aic200_channels,
+ .num_events = ARRAY_SIZE(aic200_events),
+ .event_cfg = aic200_events,
+ .use_bounce_buf = false,
+ .m2_no_db = false,
+ },
};
static int mhi_read_reg(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 *out)
@@ -545,8 +869,9 @@ static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
}
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq, bool shared_msi)
+ int mhi_irq, bool shared_msi, int family)
{
+ struct mhi_controller_config mhi_config = mhi_cntrl_configs[family];
struct mhi_controller *mhi_cntrl;
int ret;
@@ -581,11 +906,18 @@ struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, voi
if (shared_msi) /* MSI shared with data path, no IRQF_NO_SUSPEND */
mhi_cntrl->irq_flags = IRQF_SHARED;
- mhi_cntrl->fw_image = "qcom/aic100/sbl.bin";
+ mhi_cntrl->fw_image = fw_image_paths[family];
+
+ if (family == FAMILY_AIC200) {
+ mhi_cntrl->name = "AIC200";
+ mhi_cntrl->seg_len = SZ_512K;
+ } else {
+ mhi_cntrl->name = "AIC100";
+ }
/* use latest configured timeout */
- aic100_config.timeout_ms = mhi_timeout_ms;
- ret = mhi_register_controller(mhi_cntrl, &aic100_config);
+ mhi_config.timeout_ms = mhi_timeout_ms;
+ ret = mhi_register_controller(mhi_cntrl, &mhi_config);
if (ret) {
pci_err(pci_dev, "mhi_register_controller failed %d\n", ret);
return ERR_PTR(ret);
diff --git a/drivers/accel/qaic/mhi_controller.h b/drivers/accel/qaic/mhi_controller.h
index 500e7f4af2af..8939f6ae185e 100644
--- a/drivers/accel/qaic/mhi_controller.h
+++ b/drivers/accel/qaic/mhi_controller.h
@@ -8,7 +8,7 @@
#define MHICONTROLLERQAIC_H_
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq, bool shared_msi);
+ int mhi_irq, bool shared_msi, int family);
void qaic_mhi_free_controller(struct mhi_controller *mhi_cntrl, bool link_up);
void qaic_mhi_start_reset(struct mhi_controller *mhi_cntrl);
void qaic_mhi_reset_done(struct mhi_controller *mhi_cntrl);
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index 02561b6cecc6..0dbb8e32e4b9 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -32,6 +32,12 @@
#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
#define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev)
+enum aic_families {
+ FAMILY_AIC100,
+ FAMILY_AIC200,
+ FAMILY_MAX,
+};
+
enum __packed dev_states {
/* Device is offline or will be very soon */
QAIC_OFFLINE,
@@ -113,10 +119,10 @@ struct qaic_device {
struct pci_dev *pdev;
/* Req. ID of request that will be queued next in MHI control device */
u32 next_seq_num;
- /* Base address of bar 0 */
- void __iomem *bar_0;
- /* Base address of bar 2 */
- void __iomem *bar_2;
+ /* Base address of the MHI bar */
+ void __iomem *bar_mhi;
+ /* Base address of the DBCs bar */
+ void __iomem *bar_dbc;
/* Controller structure for MHI devices */
struct mhi_controller *mhi_cntrl;
/* MHI control channel device */
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index c20eb63750f5..43aba57b48f0 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -172,9 +172,10 @@ static void free_slice(struct kref *kref)
static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
struct sg_table *sgt_in, u64 size, u64 offset)
{
- int total_len, len, nents, offf = 0, offl = 0;
struct scatterlist *sg, *sgn, *sgf, *sgl;
+ unsigned int len, nents, offf, offl;
struct sg_table *sgt;
+ size_t total_len;
int ret, j;
/* find out number of relevant nents needed for this mem */
@@ -182,6 +183,8 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
sgf = NULL;
sgl = NULL;
nents = 0;
+ offf = 0;
+ offl = 0;
size = size ? size : PAGE_SIZE;
for_each_sgtable_dma_sg(sgt_in, sg, j) {
@@ -554,6 +557,7 @@ static bool invalid_sem(struct qaic_sem *sem)
static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
u32 count, u64 total_size)
{
+ u64 total;
int i;
for (i = 0; i < count; i++) {
@@ -563,7 +567,8 @@ static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_
invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
return -EINVAL;
- if (slice_ent[i].offset + slice_ent[i].size > total_size)
+ if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) ||
+ total > total_size)
return -EINVAL;
}
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 81819b9ef8d4..3b415e2c9431 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -34,13 +34,46 @@
MODULE_IMPORT_NS("DMA_BUF");
-#define PCI_DEV_AIC080 0xa080
-#define PCI_DEV_AIC100 0xa100
+#define PCI_DEVICE_ID_QCOM_AIC080 0xa080
+#define PCI_DEVICE_ID_QCOM_AIC100 0xa100
+#define PCI_DEVICE_ID_QCOM_AIC200 0xa110
#define QAIC_NAME "qaic"
#define QAIC_DESC "Qualcomm Cloud AI Accelerators"
#define CNTL_MAJOR 5
#define CNTL_MINOR 0
+struct qaic_device_config {
+ /* Indicates the AIC family the device belongs to */
+ int family;
+ /* A bitmask representing the available BARs */
+ int bar_mask;
+ /* An index value used to identify the MHI controller BAR */
+ unsigned int mhi_bar_idx;
+ /* An index value used to identify the DBCs BAR */
+ unsigned int dbc_bar_idx;
+};
+
+static const struct qaic_device_config aic080_config = {
+ .family = FAMILY_AIC100,
+ .bar_mask = BIT(0) | BIT(2) | BIT(4),
+ .mhi_bar_idx = 0,
+ .dbc_bar_idx = 2,
+};
+
+static const struct qaic_device_config aic100_config = {
+ .family = FAMILY_AIC100,
+ .bar_mask = BIT(0) | BIT(2) | BIT(4),
+ .mhi_bar_idx = 0,
+ .dbc_bar_idx = 2,
+};
+
+static const struct qaic_device_config aic200_config = {
+ .family = FAMILY_AIC200,
+ .bar_mask = BIT(0) | BIT(1) | BIT(2) | BIT(4),
+ .mhi_bar_idx = 1,
+ .dbc_bar_idx = 2,
+};
+
bool datapath_polling;
module_param(datapath_polling, bool, 0400);
MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
@@ -352,7 +385,8 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
release_dbc(qdev, i);
}
-static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
+static struct qaic_device *create_qdev(struct pci_dev *pdev,
+ const struct qaic_device_config *config)
{
struct device *dev = &pdev->dev;
struct qaic_drm_device *qddev;
@@ -365,12 +399,10 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
return NULL;
qdev->dev_state = QAIC_OFFLINE;
- if (id->device == PCI_DEV_AIC080 || id->device == PCI_DEV_AIC100) {
- qdev->num_dbc = 16;
- qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
- if (!qdev->dbc)
- return NULL;
- }
+ qdev->num_dbc = 16;
+ qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
+ if (!qdev->dbc)
+ return NULL;
qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
if (IS_ERR(qddev))
@@ -426,17 +458,18 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
return qdev;
}
-static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
+static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev,
+ const struct qaic_device_config *config)
{
int bars;
int ret;
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM) & 0x3f;
/* make sure the device has the expected BARs */
- if (bars != (BIT(0) | BIT(2) | BIT(4))) {
- pci_dbg(pdev, "%s: expected BARs 0, 2, and 4 not found in device. Found 0x%x\n",
- __func__, bars);
+ if (bars != config->bar_mask) {
+ pci_dbg(pdev, "%s: expected BARs %#x not found in device. Found %#x\n",
+ __func__, config->bar_mask, bars);
return -EINVAL;
}
@@ -449,13 +482,13 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
return ret;
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
- qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
- if (IS_ERR(qdev->bar_0))
- return PTR_ERR(qdev->bar_0);
+ qdev->bar_mhi = devm_ioremap_resource(&pdev->dev, &pdev->resource[config->mhi_bar_idx]);
+ if (IS_ERR(qdev->bar_mhi))
+ return PTR_ERR(qdev->bar_mhi);
- qdev->bar_2 = devm_ioremap_resource(&pdev->dev, &pdev->resource[2]);
- if (IS_ERR(qdev->bar_2))
- return PTR_ERR(qdev->bar_2);
+ qdev->bar_dbc = devm_ioremap_resource(&pdev->dev, &pdev->resource[config->dbc_bar_idx]);
+ if (IS_ERR(qdev->bar_dbc))
+ return PTR_ERR(qdev->bar_dbc);
/* Managed release since we use pcim_enable_device above */
pci_set_master(pdev);
@@ -465,14 +498,15 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
{
+ int irq_count = qdev->num_dbc + 1;
int mhi_irq;
int ret;
int i;
/* Managed release since we use pcim_enable_device */
- ret = pci_alloc_irq_vectors(pdev, 32, 32, PCI_IRQ_MSI);
+ ret = pci_alloc_irq_vectors(pdev, irq_count, irq_count, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret == -ENOSPC) {
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret < 0)
return ret;
@@ -485,7 +519,8 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
* interrupted, it shouldn't race with itself.
*/
qdev->single_msi = true;
- pci_info(pdev, "Allocating 32 MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n");
+ pci_info(pdev, "Allocating %d MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n",
+ irq_count);
} else if (ret < 0) {
return ret;
}
@@ -515,21 +550,22 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct qaic_device_config *config = (struct qaic_device_config *)id->driver_data;
struct qaic_device *qdev;
int mhi_irq;
int ret;
int i;
- qdev = create_qdev(pdev, id);
+ qdev = create_qdev(pdev, config);
if (!qdev)
return -ENOMEM;
- ret = init_pci(qdev, pdev);
+ ret = init_pci(qdev, pdev, config);
if (ret)
return ret;
for (i = 0; i < qdev->num_dbc; ++i)
- qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i);
+ qdev->dbc[i].dbc_base = qdev->bar_dbc + QAIC_DBC_OFF(i);
mhi_irq = init_msi(qdev, pdev);
if (mhi_irq < 0)
@@ -539,8 +575,8 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq,
- qdev->single_msi);
+ qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_mhi, mhi_irq,
+ qdev->single_msi, config->family);
if (IS_ERR(qdev->mhi_cntrl)) {
ret = PTR_ERR(qdev->mhi_cntrl);
qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
@@ -607,8 +643,9 @@ static struct mhi_driver qaic_mhi_driver = {
};
static const struct pci_device_id qaic_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC080), },
- { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), },
+ { PCI_DEVICE_DATA(QCOM, AIC080, (kernel_ulong_t)&aic080_config), },
+ { PCI_DEVICE_DATA(QCOM, AIC100, (kernel_ulong_t)&aic100_config), },
+ { PCI_DEVICE_DATA(QCOM, AIC200, (kernel_ulong_t)&aic200_config), },
{ }
};
MODULE_DEVICE_TABLE(pci, qaic_ids);
diff --git a/drivers/accel/qaic/qaic_timesync.c b/drivers/accel/qaic/qaic_timesync.c
index 301f4462d51b..2473c66309d4 100644
--- a/drivers/accel/qaic/qaic_timesync.c
+++ b/drivers/accel/qaic/qaic_timesync.c
@@ -201,7 +201,7 @@ static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_devi
goto free_sync_msg;
/* Qtimer register pointer */
- mqtsdev->qtimer_addr = qdev->bar_0 + QTIMER_REG_OFFSET;
+ mqtsdev->qtimer_addr = qdev->bar_mhi + QTIMER_REG_OFFSET;
timer_setup(timer, qaic_timesync_timer, 0);
timer->expires = jiffies + msecs_to_jiffies(timesync_delay_ms);
add_timer(timer);
diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c
index 21d58aed0deb..3ebcc1f7ff58 100644
--- a/drivers/accel/qaic/sahara.c
+++ b/drivers/accel/qaic/sahara.c
@@ -160,7 +160,7 @@ struct sahara_context {
struct work_struct fw_work;
struct work_struct dump_work;
struct mhi_device *mhi_dev;
- const char **image_table;
+ const char * const *image_table;
u32 table_size;
u32 active_image_id;
const struct firmware *firmware;
@@ -177,7 +177,7 @@ struct sahara_context {
bool is_mem_dump_mode;
};
-static const char *aic100_image_table[] = {
+static const char * const aic100_image_table[] = {
[1] = "qcom/aic100/fw1.bin",
[2] = "qcom/aic100/fw2.bin",
[4] = "qcom/aic100/fw4.bin",
@@ -188,6 +188,34 @@ static const char *aic100_image_table[] = {
[10] = "qcom/aic100/fw10.bin",
};
+static const char * const aic200_image_table[] = {
+ [5] = "qcom/aic200/uefi.elf",
+ [12] = "qcom/aic200/aic200-nsp.bin",
+ [23] = "qcom/aic200/aop.mbn",
+ [32] = "qcom/aic200/tz.mbn",
+ [33] = "qcom/aic200/hypvm.mbn",
+ [39] = "qcom/aic200/aic200_abl.elf",
+ [40] = "qcom/aic200/apdp.mbn",
+ [41] = "qcom/aic200/devcfg.mbn",
+ [42] = "qcom/aic200/sec.elf",
+ [43] = "qcom/aic200/aic200-hlos.elf",
+ [49] = "qcom/aic200/shrm.elf",
+ [50] = "qcom/aic200/cpucp.elf",
+ [51] = "qcom/aic200/aop_devcfg.mbn",
+ [57] = "qcom/aic200/cpucp_dtbs.elf",
+ [62] = "qcom/aic200/uefi_dtbs.elf",
+ [63] = "qcom/aic200/xbl_ac_config.mbn",
+ [64] = "qcom/aic200/tz_ac_config.mbn",
+ [65] = "qcom/aic200/hyp_ac_config.mbn",
+ [66] = "qcom/aic200/pdp.elf",
+ [67] = "qcom/aic200/pdp_cdb.elf",
+ [68] = "qcom/aic200/sdi.mbn",
+ [69] = "qcom/aic200/dcd.mbn",
+ [73] = "qcom/aic200/gearvm.mbn",
+ [74] = "qcom/aic200/sti.bin",
+ [75] = "qcom/aic200/pvs.bin",
+};
+
static int sahara_find_image(struct sahara_context *context, u32 image_id)
{
int ret;
@@ -748,8 +776,15 @@ static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_
context->mhi_dev = mhi_dev;
INIT_WORK(&context->fw_work, sahara_processing);
INIT_WORK(&context->dump_work, sahara_dump_processing);
- context->image_table = aic100_image_table;
- context->table_size = ARRAY_SIZE(aic100_image_table);
+
+ if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) {
+ context->image_table = aic200_image_table;
+ context->table_size = ARRAY_SIZE(aic200_image_table);
+ } else {
+ context->image_table = aic100_image_table;
+ context->table_size = ARRAY_SIZE(aic100_image_table);
+ }
+
context->active_image_id = SAHARA_IMAGE_ID_NONE;
dev_set_drvdata(&mhi_dev->dev, context);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index d81b55f5068c..7f10aa38269d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -452,7 +452,7 @@ config ACPI_SBS
the modules will be called sbs and sbshc.
config ACPI_HED
- tristate "Hardware Error Device"
+ bool "Hardware Error Device"
help
This driver supports the Hardware Error Device (PNP0C33),
which is used to report some hardware errors notified via
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 40208a0f5dfb..797070fc9a3f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -5,6 +5,10 @@
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
+ifdef CONFIG_TRACE_BRANCH_PROFILING
+CFLAGS_processor_idle.o += -DDISABLE_BRANCH_PROFILING
+endif
+
#
# ACPI Boot-Time Table Parsing
#
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 01abf26764b0..435ec60a9682 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -120,8 +120,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{"IBM0071"},
/* smsc-ircc2 */
{"SMCf010"},
- /* sb1000 */
- {"GIC1000"},
/* parport_pc */
{"PNP0400"}, /* Standard LPT Printer Port */
{"PNP0401"}, /* ECP Printer Port */
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index a972831dbd66..efdadc74e3f4 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -27,6 +27,7 @@
#include <linux/acpi.h>
#include <acpi/video.h>
#include <linux/uaccess.h>
+#include <linux/string_choices.h>
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
@@ -2039,9 +2040,9 @@ static int acpi_video_bus_add(struct acpi_device *device)
pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
- video->flags.multihead ? "yes" : "no",
- video->flags.rom ? "yes" : "no",
- video->flags.post ? "yes" : "no");
+ str_yes_no(video->flags.multihead),
+ str_yes_no(video->flags.rom),
+ str_yes_no(video->flags.post));
mutex_lock(&video_list_lock);
list_add_tail(&video->entry, &video_bus_head);
mutex_unlock(&video_list_lock);
diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c
index 52b2abf88689..f30f138352b7 100644
--- a/drivers/acpi/arm64/dma.c
+++ b/drivers/acpi/arm64/dma.c
@@ -26,6 +26,11 @@ void acpi_arch_dma_setup(struct device *dev)
else
end = (1ULL << 32) - 1;
+ if (dev->dma_range_map) {
+ dev_dbg(dev, "dma_range_map already set\n");
+ return;
+ }
+
ret = acpi_dma_get_range(dev, &map);
if (!ret && map) {
end = dma_range_map_max(map);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 7773e6b860e7..90b09840536d 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -24,6 +24,7 @@
#define ACPI_BUTTON_CLASS "button"
#define ACPI_BUTTON_FILE_STATE "state"
#define ACPI_BUTTON_TYPE_UNKNOWN 0x00
+#define ACPI_BUTTON_NOTIFY_WAKE 0x02
#define ACPI_BUTTON_NOTIFY_STATUS 0x80
#define ACPI_BUTTON_SUBCLASS_POWER "power"
@@ -443,7 +444,12 @@ static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
struct input_dev *input;
int keycode;
- if (event != ACPI_BUTTON_NOTIFY_STATUS) {
+ switch (event) {
+ case ACPI_BUTTON_NOTIFY_STATUS:
+ break;
+ case ACPI_BUTTON_NOTIFY_WAKE:
+ break;
+ default:
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
event);
return;
@@ -629,7 +635,7 @@ static int acpi_button_add(struct acpi_device *device)
break;
default:
status = acpi_install_notify_handler(device->handle,
- ACPI_DEVICE_NOTIFY, handler,
+ ACPI_ALL_NOTIFY, handler,
device);
break;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 3b4d048c4941..dbd4446025ec 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1161,7 +1161,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_complete);
*/
int acpi_subsys_suspend(struct device *dev)
{
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ if (!dev_pm_smart_suspend(dev) ||
acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
pm_runtime_resume(dev);
@@ -1320,7 +1320,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_restore_early);
*/
int acpi_subsys_poweroff(struct device *dev)
{
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ if (!dev_pm_smart_suspend(dev) ||
acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
pm_runtime_resume(dev);
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index 488b51e2cb31..15eba1c70e66 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -49,6 +49,7 @@ struct acpi_fan_fst {
struct acpi_fan {
bool acpi4;
+ bool has_fst;
struct acpi_fan_fif fif;
struct acpi_fan_fps *fps;
int fps_count;
diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c
index f4f6e2381f1d..22d29ac2447c 100644
--- a/drivers/acpi/fan_attr.c
+++ b/drivers/acpi/fan_attr.c
@@ -75,15 +75,6 @@ int acpi_fan_create_attributes(struct acpi_device *device)
struct acpi_fan *fan = acpi_driver_data(device);
int i, status;
- sysfs_attr_init(&fan->fine_grain_control.attr);
- fan->fine_grain_control.show = show_fine_grain_control;
- fan->fine_grain_control.store = NULL;
- fan->fine_grain_control.attr.name = "fine_grain_control";
- fan->fine_grain_control.attr.mode = 0444;
- status = sysfs_create_file(&device->dev.kobj, &fan->fine_grain_control.attr);
- if (status)
- return status;
-
/* _FST is present if we are here */
sysfs_attr_init(&fan->fst_speed.attr);
fan->fst_speed.show = show_fan_speed;
@@ -92,7 +83,19 @@ int acpi_fan_create_attributes(struct acpi_device *device)
fan->fst_speed.attr.mode = 0444;
status = sysfs_create_file(&device->dev.kobj, &fan->fst_speed.attr);
if (status)
- goto rem_fine_grain_attr;
+ return status;
+
+ if (!fan->acpi4)
+ return 0;
+
+ sysfs_attr_init(&fan->fine_grain_control.attr);
+ fan->fine_grain_control.show = show_fine_grain_control;
+ fan->fine_grain_control.store = NULL;
+ fan->fine_grain_control.attr.name = "fine_grain_control";
+ fan->fine_grain_control.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+ if (status)
+ goto rem_fst_attr;
for (i = 0; i < fan->fps_count; ++i) {
struct acpi_fan_fps *fps = &fan->fps[i];
@@ -109,18 +112,18 @@ int acpi_fan_create_attributes(struct acpi_device *device)
for (j = 0; j < i; ++j)
sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
- goto rem_fst_attr;
+ goto rem_fine_grain_attr;
}
}
return 0;
-rem_fst_attr:
- sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
-
rem_fine_grain_attr:
sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+rem_fst_attr:
+ sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
+
return status;
}
@@ -129,9 +132,13 @@ void acpi_fan_delete_attributes(struct acpi_device *device)
struct acpi_fan *fan = acpi_driver_data(device);
int i;
+ sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
+
+ if (!fan->acpi4)
+ return;
+
for (i = 0; i < fan->fps_count; ++i)
sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
- sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
}
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 10016f52f4f4..8ad12ad3aaaf 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -203,12 +203,16 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = {
* --------------------------------------------------------------------------
*/
+static bool acpi_fan_has_fst(struct acpi_device *device)
+{
+ return acpi_has_method(device->handle, "_FST");
+}
+
static bool acpi_fan_is_acpi4(struct acpi_device *device)
{
return acpi_has_method(device->handle, "_FIF") &&
acpi_has_method(device->handle, "_FPS") &&
- acpi_has_method(device->handle, "_FSL") &&
- acpi_has_method(device->handle, "_FST");
+ acpi_has_method(device->handle, "_FSL");
}
static int acpi_fan_get_fif(struct acpi_device *device)
@@ -327,7 +331,12 @@ static int acpi_fan_probe(struct platform_device *pdev)
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
- if (acpi_fan_is_acpi4(device)) {
+ if (acpi_fan_has_fst(device)) {
+ fan->has_fst = true;
+ fan->acpi4 = acpi_fan_is_acpi4(device);
+ }
+
+ if (fan->acpi4) {
result = acpi_fan_get_fif(device);
if (result)
return result;
@@ -335,7 +344,9 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = acpi_fan_get_fps(device);
if (result)
return result;
+ }
+ if (fan->has_fst) {
result = devm_acpi_fan_create_hwmon(device);
if (result)
return result;
@@ -343,9 +354,9 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = acpi_fan_create_attributes(device);
if (result)
return result;
+ }
- fan->acpi4 = true;
- } else {
+ if (!fan->acpi4) {
result = acpi_device_update_power(device, NULL);
if (result) {
dev_err(&device->dev, "Failed to set initial power state\n");
@@ -391,7 +402,7 @@ err_remove_link:
err_unregister:
thermal_cooling_device_unregister(cdev);
err_end:
- if (fan->acpi4)
+ if (fan->has_fst)
acpi_fan_delete_attributes(device);
return result;
@@ -401,7 +412,7 @@ static void acpi_fan_remove(struct platform_device *pdev)
{
struct acpi_fan *fan = platform_get_drvdata(pdev);
- if (fan->acpi4) {
+ if (fan->has_fst) {
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
acpi_fan_delete_attributes(device);
diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c
index bd0d31a398fa..e8d90605106e 100644
--- a/drivers/acpi/fan_hwmon.c
+++ b/drivers/acpi/fan_hwmon.c
@@ -43,6 +43,10 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
case hwmon_fan_input:
return 0444;
case hwmon_fan_target:
+ /* Only acpi4 fans support fan control. */
+ if (!fan->acpi4)
+ return 0;
+
/*
* When in fine grain control mode, not every fan control value
* has an associated fan performance state.
@@ -57,6 +61,10 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
case hwmon_power:
switch (attr) {
case hwmon_power_input:
+ /* Only acpi4 fans support fan control. */
+ if (!fan->acpi4)
+ return 0;
+
/*
* When in fine grain control mode, not every fan control value
* has an associated fan performance state.
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index 7652515a6be1..3499f86c411e 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -80,7 +80,12 @@ static struct acpi_driver acpi_hed_driver = {
.remove = acpi_hed_remove,
},
};
-module_acpi_driver(acpi_hed_driver);
+
+static int __init acpi_hed_driver_init(void)
+{
+ return acpi_bus_register_driver(&acpi_hed_driver);
+}
+subsys_initcall(acpi_hed_driver_init);
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 00ac0d7bb8c9..ce815d7cb8f6 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -51,6 +51,7 @@ int node_to_pxm(int node)
return PXM_INVAL;
return node_to_pxm_map[node];
}
+EXPORT_SYMBOL_GPL(node_to_pxm);
static void __acpi_map_pxm_to_node(int pxm, int node)
{
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index ef9444482db1..671407fc2bd4 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -289,14 +289,14 @@ static int _remove_hidden_choices(struct device *dev, void *arg)
/**
* platform_profile_choices_show - Show the available profile choices for legacy sysfs interface
- * @dev: The device
+ * @kobj: The kobject
* @attr: The attribute
* @buf: The buffer to write to
*
* Return: The number of bytes written
*/
-static ssize_t platform_profile_choices_show(struct device *dev,
- struct device_attribute *attr,
+static ssize_t platform_profile_choices_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
char *buf)
{
struct aggregate_choices_data data = {
@@ -371,14 +371,14 @@ static int _store_and_notify(struct device *dev, void *data)
/**
* platform_profile_show - Show the current profile for legacy sysfs interface
- * @dev: The device
+ * @kobj: The kobject
* @attr: The attribute
* @buf: The buffer to write to
*
* Return: The number of bytes written
*/
-static ssize_t platform_profile_show(struct device *dev,
- struct device_attribute *attr,
+static ssize_t platform_profile_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
char *buf)
{
enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
@@ -400,15 +400,15 @@ static ssize_t platform_profile_show(struct device *dev,
/**
* platform_profile_store - Set the profile for legacy sysfs interface
- * @dev: The device
+ * @kobj: The kobject
* @attr: The attribute
* @buf: The buffer to read from
* @count: The number of bytes to read
*
* Return: The number of bytes read
*/
-static ssize_t platform_profile_store(struct device *dev,
- struct device_attribute *attr,
+static ssize_t platform_profile_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct aggregate_choices_data data = {
@@ -442,12 +442,12 @@ static ssize_t platform_profile_store(struct device *dev,
return count;
}
-static DEVICE_ATTR_RO(platform_profile_choices);
-static DEVICE_ATTR_RW(platform_profile);
+static struct kobj_attribute attr_platform_profile_choices = __ATTR_RO(platform_profile_choices);
+static struct kobj_attribute attr_platform_profile = __ATTR_RW(platform_profile);
static struct attribute *platform_profile_attrs[] = {
- &dev_attr_platform_profile_choices.attr,
- &dev_attr_platform_profile.attr,
+ &attr_platform_profile_choices.attr,
+ &attr_platform_profile.attr,
NULL
};
@@ -627,24 +627,23 @@ EXPORT_SYMBOL_GPL(platform_profile_register);
/**
* platform_profile_remove - Unregisters a platform profile class device
* @dev: Class device
- *
- * Return: 0
*/
-int platform_profile_remove(struct device *dev)
+void platform_profile_remove(struct device *dev)
{
- struct platform_profile_handler *pprof = to_pprof_handler(dev);
- int id;
+ struct platform_profile_handler *pprof;
+
+ if (IS_ERR_OR_NULL(dev))
+ return;
+
+ pprof = to_pprof_handler(dev);
+
guard(mutex)(&profile_lock);
- id = pprof->minor;
+ ida_free(&platform_profile_ida, pprof->minor);
device_unregister(&pprof->dev);
- ida_free(&platform_profile_ida, id);
sysfs_notify(acpi_kobj, NULL, "platform_profile");
-
sysfs_update_group(acpi_kobj, &platform_profile_group);
-
- return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_remove);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 25174c24d3d7..b7243d7563b1 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include <linux/acpi.h>
@@ -197,7 +198,7 @@ static int __get_state(acpi_handle handle, u8 *state)
cur_state = sta & ACPI_POWER_RESOURCE_STATE_ON;
acpi_handle_debug(handle, "Power resource is %s\n",
- cur_state ? "on" : "off");
+ str_on_off(cur_state));
*state = cur_state;
return 0;
@@ -240,7 +241,7 @@ static int acpi_power_get_list_state(struct list_head *list, u8 *state)
break;
}
- pr_debug("Power resource list is %s\n", cur_state ? "on" : "off");
+ pr_debug("Power resource list is %s\n", str_on_off(cur_state));
*state = cur_state;
return 0;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 698897b29de2..586cc7d1d8aa 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -590,6 +590,8 @@ static void acpi_idle_play_dead(struct cpuidle_device *dev, int index)
raw_safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
io_idle(cx->address);
+ } else if (cx->entry_method == ACPI_CSTATE_FFH) {
+ acpi_processor_ffh_play_dead(cx);
} else
return;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 9f4efa8f75a6..fb1fe9f3b1a3 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1632,13 +1632,6 @@ static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
err = viot_iommu_configure(dev);
mutex_unlock(&iommu_probe_device_lock);
- /*
- * If we have reason to believe the IOMMU driver missed the initial
- * iommu_probe_device() call for dev, replay it to get things in order.
- */
- if (!err && dev->bus)
- err = iommu_probe_device(dev);
-
return err;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 95982c098d5b..0c874186f8ae 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -803,7 +803,7 @@ static int acpi_thermal_add(struct acpi_device *device)
acpi_thermal_aml_dependency_fix(tz);
- /* Get trip points [_CRT, _PSV, etc.] (required). */
+ /* Get trip points [_ACi, _PSV, etc.] (required). */
acpi_thermal_get_trip_points(tz);
crit_temp = acpi_thermal_get_critical_trip(tz);
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 8ef259b4d037..71482d639a6d 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -364,7 +364,8 @@ static int amba_dma_configure(struct device *dev)
ret = acpi_dma_configure(dev, attr);
}
- if (!ret && !drv->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index f813dbdc2346..163ac909bd06 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -63,6 +63,7 @@ enum board_ids {
board_ahci_pcs_quirk_no_devslp,
board_ahci_pcs_quirk_no_sntf,
board_ahci_yes_fbs,
+ board_ahci_yes_fbs_atapi_dma,
/* board IDs for specific chipsets in alphabetical order */
board_ahci_al,
@@ -188,6 +189,14 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_yes_fbs_atapi_dma] = {
+ AHCI_HFLAGS (AHCI_HFLAG_YES_FBS |
+ AHCI_HFLAG_ATAPI_DMA_QUIRK),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
/* by chipsets */
[board_ahci_al] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_MSI),
@@ -589,6 +598,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9215),
+ .driver_data = board_ahci_yes_fbs_atapi_dma },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235),
@@ -1665,13 +1676,15 @@ static int ahci_get_irq_vector(struct ata_host *host, int port)
return pci_irq_vector(to_pci_dev(host->dev), port);
}
-static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
+static void ahci_init_irq(struct pci_dev *pdev, unsigned int n_ports,
struct ahci_host_priv *hpriv)
{
int nvec;
- if (hpriv->flags & AHCI_HFLAG_NO_MSI)
- return -ENODEV;
+ if (hpriv->flags & AHCI_HFLAG_NO_MSI) {
+ pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
+ return;
+ }
/*
* If number of MSIs is less than number of ports then Sharing Last
@@ -1685,7 +1698,7 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
hpriv->get_irq_vector = ahci_get_irq_vector;
hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
- return nvec;
+ return;
}
/*
@@ -1700,12 +1713,13 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
/*
* If the host is not capable of supporting per-port vectors, fall
- * back to single MSI before finally attempting single MSI-X.
+ * back to single MSI before finally attempting single MSI-X or
+ * a legacy INTx.
*/
nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (nvec == 1)
- return nvec;
- return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ return;
+ pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX | PCI_IRQ_INTX);
}
static void ahci_mark_external_port(struct ata_port *ap)
@@ -1985,10 +1999,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
host->private_data = hpriv;
- if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
- /* legacy intx interrupts */
- pcim_intx(pdev, 1);
- }
+ ahci_init_irq(pdev, n_ports, hpriv);
+
hpriv->irq = pci_irq_vector(pdev, 0);
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index c842e2de6ef9..2c10c8f440d1 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -246,6 +246,7 @@ enum {
AHCI_HFLAG_NO_SXS = BIT(26), /* SXS not supported */
AHCI_HFLAG_43BIT_ONLY = BIT(27), /* 43bit DMA addr limit */
AHCI_HFLAG_INTEL_PCS_QUIRK = BIT(28), /* apply Intel PCS quirk */
+ AHCI_HFLAG_ATAPI_DMA_QUIRK = BIT(29), /* force ATAPI to use DMA */
/* ap->flags bits */
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index e7ace4b10f15..22afa4ff860d 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1322,6 +1322,10 @@ static void ahci_dev_config(struct ata_device *dev)
{
struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
+ if ((dev->class == ATA_DEV_ATAPI) &&
+ (hpriv->flags & AHCI_HFLAG_ATAPI_DMA_QUIRK))
+ dev->quirks |= ATA_QUIRK_ATAPI_MOD16_DMA;
+
if (hpriv->flags & AHCI_HFLAG_SECT255) {
dev->max_sectors = 255;
ata_dev_info(dev,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c085dd81ebe7..773799cfd443 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -88,6 +88,7 @@ struct ata_force_param {
unsigned int xfer_mask;
unsigned int quirk_on;
unsigned int quirk_off;
+ unsigned int pflags_on;
u16 lflags_on;
u16 lflags_off;
};
@@ -332,6 +333,35 @@ void ata_force_cbl(struct ata_port *ap)
}
/**
+ * ata_force_pflags - force port flags according to libata.force
+ * @ap: ATA port of interest
+ *
+ * Force port flags according to libata.force and whine about it.
+ *
+ * LOCKING:
+ * EH context.
+ */
+static void ata_force_pflags(struct ata_port *ap)
+{
+ int i;
+
+ for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+ if (fe->port != -1 && fe->port != ap->print_id)
+ continue;
+
+ /* let pflags stack */
+ if (fe->param.pflags_on) {
+ ap->pflags |= fe->param.pflags_on;
+ ata_port_notice(ap,
+ "FORCE: port flag 0x%x forced -> 0x%x\n",
+ fe->param.pflags_on, ap->pflags);
+ }
+ }
+}
+
+/**
* ata_force_link_limits - force link limits according to libata.force
* @link: ATA link of interest
*
@@ -486,6 +516,7 @@ static void ata_force_quirks(struct ata_device *dev)
}
}
#else
+static inline void ata_force_pflags(struct ata_port *ap) { }
static inline void ata_force_link_limits(struct ata_link *link) { }
static inline void ata_force_xfermask(struct ata_device *dev) { }
static inline void ata_force_quirks(struct ata_device *dev) { }
@@ -2243,7 +2274,7 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev)
if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
ata_dev_warn(dev,
- "NCQ Send/Recv Log not supported\n");
+ "NCQ Non-Data Log not supported\n");
return;
}
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
@@ -2845,6 +2876,10 @@ int ata_dev_configure(struct ata_device *dev)
(id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
dev->quirks |= ATA_QUIRK_NOLPM;
+ if (dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI &&
+ ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI))
+ dev->quirks |= ATA_QUIRK_NOLPM;
+
if (ap->flags & ATA_FLAG_NO_LPM)
dev->quirks |= ATA_QUIRK_NOLPM;
@@ -3897,6 +3932,7 @@ static const char * const ata_quirk_names[] = {
[__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024",
[__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m",
[__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati",
+ [__ATA_QUIRK_NO_LPM_ON_ATI] = "nolpmonati",
[__ATA_QUIRK_NO_ID_DEV_LOG] = "noiddevlog",
[__ATA_QUIRK_NO_LOG_DIR] = "nologdir",
[__ATA_QUIRK_NO_FUA] = "nofua",
@@ -4142,13 +4178,16 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
ATA_QUIRK_ZERO_AFTER_TRIM },
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "SAMSUNG*MZ7LH*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI, },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "FCCT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM },
@@ -4544,7 +4583,7 @@ int atapi_check_dma(struct ata_queued_cmd *qc)
*/
if (!(qc->dev->quirks & ATA_QUIRK_ATAPI_MOD16_DMA) &&
unlikely(qc->nbytes & 15))
- return 1;
+ return -EOPNOTSUPP;
if (ap->ops->check_atapi_dma)
return ap->ops->check_atapi_dma(qc);
@@ -5452,6 +5491,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
#endif
ata_sff_port_init(ap);
+ ata_force_pflags(ap);
+
return ap;
}
EXPORT_SYMBOL_GPL(ata_port_alloc);
@@ -6264,6 +6305,9 @@ EXPORT_SYMBOL_GPL(ata_platform_remove_one);
{ "no" #name, .lflags_on = (flags) }, \
{ #name, .lflags_off = (flags) }
+#define force_pflag_on(name, flags) \
+ { #name, .pflags_on = (flags) }
+
#define force_quirk_on(name, flag) \
{ #name, .quirk_on = (flag) }
@@ -6323,6 +6367,8 @@ static const struct ata_force_param force_tbl[] __initconst = {
force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
+ force_pflag_on(external, ATA_PFLAG_EXTERNAL),
+
force_quirk_onoff(ncq, ATA_QUIRK_NONCQ),
force_quirk_onoff(ncqtrim, ATA_QUIRK_NO_NCQ_TRIM),
force_quirk_onoff(ncqati, ATA_QUIRK_NO_NCQ_ON_ATI),
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 3b303d4ae37a..16cd676eae1f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1542,8 +1542,15 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev,
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
- /* is it pointless to prefer PIO for "safety reasons"? */
- if (ap->flags & ATA_FLAG_PIO_DMA) {
+ /*
+ * Do not use DMA if the connected device only supports PIO, even if the
+ * port prefers PIO commands via DMA.
+ *
+ * Ideally, we should call atapi_check_dma() to check if it is safe for
+ * the LLD to use DMA for REQUEST_SENSE, but we don't have a qc.
+ * Since we can't check the command, perhaps we should only use pio?
+ */
+ if ((ap->flags & ATA_FLAG_PIO_DMA) && !(dev->flags & ATA_DFLAG_PIO)) {
tf.protocol = ATAPI_PROT_DMA;
tf.feature |= ATAPI_PKT_DMA;
} else {
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index dce24806a052..2d32125c16fd 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -935,9 +935,8 @@ static int octeon_cf_probe(struct platform_device *pdev)
ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0;
/* True IDE mode needs a timer to poll for not-busy. */
- hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- cf_port->delayed_finish.function = octeon_cf_delayed_finish;
+ hrtimer_setup(&cf_port->delayed_finish, octeon_cf_delayed_finish, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
} else {
/* 16 bit but not True IDE */
base = cs0 + 0x800;
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 57cbf2cef618..4ecd8f33b082 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -25,6 +25,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
+#include <linux/string_choices.h>
#define DRV_NAME "sata_via"
#define DRV_VERSION "2.6"
@@ -359,7 +360,7 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
ata_port_info(ap,
"SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
- online ? "up" : "down", sstatus, scontrol);
+ str_up_down(online), sstatus, scontrol);
/* SStatus is read one more time */
svia_scr_read(link, SCR_STATUS, &sstatus);
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 8934e6ad5772..bedc6133f970 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -503,6 +503,7 @@ config HT16K33
config MAX6959
tristate "Maxim MAX6958/6959 7-segment LED controller"
depends on I2C
+ select BITREVERSE
select REGMAP_I2C
select LINEDISP
help
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 19b619376d48..09020bb8ad15 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -595,18 +595,19 @@ static int charlcd_init(struct charlcd *lcd)
return 0;
}
-struct charlcd *charlcd_alloc(void)
+struct charlcd *charlcd_alloc(unsigned int drvdata_size)
{
struct charlcd_priv *priv;
struct charlcd *lcd;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL);
if (!priv)
return NULL;
priv->esc_seq.len = -1;
lcd = &priv->lcd;
+ lcd->drvdata = priv->drvdata;
return lcd;
}
diff --git a/drivers/auxdisplay/charlcd.h b/drivers/auxdisplay/charlcd.h
index 4d4287209d04..d10b89740bca 100644
--- a/drivers/auxdisplay/charlcd.h
+++ b/drivers/auxdisplay/charlcd.h
@@ -51,7 +51,7 @@ struct charlcd {
unsigned long y;
} addr;
- void *drvdata;
+ void *drvdata; /* Set by charlcd_alloc() */
};
/**
@@ -95,7 +95,8 @@ struct charlcd_ops {
};
void charlcd_backlight(struct charlcd *lcd, enum charlcd_onoff on);
-struct charlcd *charlcd_alloc(void);
+
+struct charlcd *charlcd_alloc(unsigned int drvdata_size);
void charlcd_free(struct charlcd *lcd);
int charlcd_register(struct charlcd *lcd);
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 0526f0d90a79..cef42656c4b0 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -222,20 +222,17 @@ static int hd44780_probe(struct platform_device *pdev)
return -EINVAL;
}
- hdc = hd44780_common_alloc();
- if (!hdc)
- return -ENOMEM;
-
- lcd = charlcd_alloc();
+ lcd = hd44780_common_alloc();
if (!lcd)
- goto fail1;
+ return -ENOMEM;
hd = kzalloc(sizeof(*hd), GFP_KERNEL);
if (!hd)
goto fail2;
+ hdc = lcd->drvdata;
hdc->hd44780 = hd;
- lcd->drvdata = hdc;
+
for (i = 0; i < ifwidth; i++) {
hd->pins[base + i] = devm_gpiod_get_index(dev, "data", i,
GPIOD_OUT_LOW);
@@ -313,9 +310,7 @@ static int hd44780_probe(struct platform_device *pdev)
fail3:
kfree(hd);
fail2:
- kfree(lcd);
-fail1:
- kfree(hdc);
+ hd44780_common_free(lcd);
return ret;
}
@@ -326,9 +321,7 @@ static void hd44780_remove(struct platform_device *pdev)
charlcd_unregister(lcd);
kfree(hdc->hd44780);
- kfree(lcd->drvdata);
-
- kfree(lcd);
+ hd44780_common_free(lcd);
}
static const struct of_device_id hd44780_of_match[] = {
diff --git a/drivers/auxdisplay/hd44780_common.c b/drivers/auxdisplay/hd44780_common.c
index 4ef87c3118c0..1792fe2a4460 100644
--- a/drivers/auxdisplay/hd44780_common.c
+++ b/drivers/auxdisplay/hd44780_common.c
@@ -351,20 +351,28 @@ int hd44780_common_redefine_char(struct charlcd *lcd, char *esc)
}
EXPORT_SYMBOL_GPL(hd44780_common_redefine_char);
-struct hd44780_common *hd44780_common_alloc(void)
+struct charlcd *hd44780_common_alloc(void)
{
- struct hd44780_common *hd;
+ struct hd44780_common *hdc;
+ struct charlcd *lcd;
- hd = kzalloc(sizeof(*hd), GFP_KERNEL);
- if (!hd)
+ lcd = charlcd_alloc(sizeof(*hdc));
+ if (!lcd)
return NULL;
- hd->ifwidth = 8;
- hd->bwidth = DEFAULT_LCD_BWIDTH;
- hd->hwidth = DEFAULT_LCD_HWIDTH;
- return hd;
+ hdc = lcd->drvdata;
+ hdc->ifwidth = 8;
+ hdc->bwidth = DEFAULT_LCD_BWIDTH;
+ hdc->hwidth = DEFAULT_LCD_HWIDTH;
+ return lcd;
}
EXPORT_SYMBOL_GPL(hd44780_common_alloc);
+void hd44780_common_free(struct charlcd *lcd)
+{
+ charlcd_free(lcd);
+}
+EXPORT_SYMBOL_GPL(hd44780_common_free);
+
MODULE_DESCRIPTION("Common functions for HD44780 (and compatibles) LCD displays");
MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/hd44780_common.h b/drivers/auxdisplay/hd44780_common.h
index a16aa8c29c99..4c87f55722b6 100644
--- a/drivers/auxdisplay/hd44780_common.h
+++ b/drivers/auxdisplay/hd44780_common.h
@@ -30,4 +30,6 @@ int hd44780_common_blink(struct charlcd *lcd, enum charlcd_onoff on);
int hd44780_common_fontsize(struct charlcd *lcd, enum charlcd_fontsize size);
int hd44780_common_lines(struct charlcd *lcd, enum charlcd_lines lines);
int hd44780_common_redefine_char(struct charlcd *lcd, char *esc);
-struct hd44780_common *hd44780_common_alloc(void);
+
+struct charlcd *hd44780_common_alloc(void);
+void hd44780_common_free(struct charlcd *lcd);
diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c
index a28daa4ffbf7..045dbef49dee 100644
--- a/drivers/auxdisplay/lcd2s.c
+++ b/drivers/auxdisplay/lcd2s.c
@@ -298,20 +298,18 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c)
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA))
return -EIO;
- lcd2s = devm_kzalloc(&i2c->dev, sizeof(*lcd2s), GFP_KERNEL);
- if (!lcd2s)
- return -ENOMEM;
-
/* Test, if the display is responding */
err = lcd2s_i2c_smbus_write_byte(i2c, LCD2S_CMD_DISPLAY_OFF);
if (err < 0)
return err;
- lcd = charlcd_alloc();
+ lcd = charlcd_alloc(sizeof(*lcd2s));
if (!lcd)
return -ENOMEM;
- lcd->drvdata = lcd2s;
+ lcd->ops = &lcd2s_ops;
+
+ lcd2s = lcd->drvdata;
lcd2s->i2c = i2c;
lcd2s->charlcd = lcd;
@@ -326,8 +324,6 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c)
if (err)
goto fail1;
- lcd->ops = &lcd2s_ops;
-
err = charlcd_register(lcd2s->charlcd);
if (err)
goto fail1;
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index a731f28455b4..91ccb9789d43 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -831,18 +831,12 @@ static void lcd_init(void)
struct charlcd *charlcd;
struct hd44780_common *hdc;
- hdc = hd44780_common_alloc();
- if (!hdc)
+ charlcd = hd44780_common_alloc();
+ if (!charlcd)
return;
- charlcd = charlcd_alloc();
- if (!charlcd) {
- kfree(hdc);
- return;
- }
-
+ hdc = charlcd->drvdata;
hdc->hd44780 = &lcd;
- charlcd->drvdata = hdc;
/*
* Init lcd struct with load-time values to preserve exact
@@ -1664,7 +1658,7 @@ err_lcd_unreg:
if (lcd.enabled)
charlcd_unregister(lcd.charlcd);
err_unreg_device:
- kfree(lcd.charlcd);
+ hd44780_common_free(lcd.charlcd);
lcd.charlcd = NULL;
parport_unregister_device(pprt);
pprt = NULL;
@@ -1691,8 +1685,7 @@ static void panel_detach(struct parport *port)
if (lcd.enabled) {
charlcd_unregister(lcd.charlcd);
lcd.initialized = false;
- kfree(lcd.charlcd->drvdata);
- kfree(lcd.charlcd);
+ hd44780_common_free(lcd.charlcd);
lcd.charlcd = NULL;
}
diff --git a/drivers/auxdisplay/seg-led-gpio.c b/drivers/auxdisplay/seg-led-gpio.c
index f10c25e6bf12..dfb62e9ce9b4 100644
--- a/drivers/auxdisplay/seg-led-gpio.c
+++ b/drivers/auxdisplay/seg-led-gpio.c
@@ -36,8 +36,7 @@ static void seg_led_update(struct work_struct *work)
bitmap_set_value8(values, map_to_seg7(&map->map.seg7, linedisp->buf[0]), 0);
- gpiod_set_array_value_cansleep(priv->segment_gpios->ndescs, priv->segment_gpios->desc,
- priv->segment_gpios->info, values);
+ gpiod_multi_set_value_cansleep(priv->segment_gpios, values);
}
static int seg_led_linedisp_get_map_type(struct linedisp *linedisp)
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 3ebe77566788..af0029d30dbe 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -11,6 +11,7 @@
#include <linux/cleanup.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_smt.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/slab.h>
@@ -28,7 +29,7 @@
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
-DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1;
+DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 0;
EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
static bool supports_scale_freq_counters(const struct cpumask *cpus)
@@ -293,13 +294,15 @@ void topology_normalize_cpu_scale(void)
capacity_scale = 1;
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
+ capacity = raw_capacity[cpu] *
+ (per_cpu(capacity_freq_ref, cpu) ?: 1);
capacity_scale = max(capacity, capacity_scale);
}
pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
+ capacity = raw_capacity[cpu] *
+ (per_cpu(capacity_freq_ref, cpu) ?: 1);
capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
capacity_scale);
topology_set_cpu_scale(cpu, capacity);
@@ -506,6 +509,10 @@ core_initcall(free_raw_capacity);
#endif
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+
+/* Used to enable the SMT control */
+static unsigned int max_smt_thread_num = 1;
+
/*
* This function returns the logic cpu number of the node.
* There are basically three kinds of return values:
@@ -565,6 +572,8 @@ static int __init parse_core(struct device_node *core, int package_id,
i++;
} while (1);
+ max_smt_thread_num = max_t(unsigned int, max_smt_thread_num, i);
+
cpu = get_cpu_for_node(core);
if (cpu >= 0) {
if (!leaf) {
@@ -677,6 +686,17 @@ static int __init parse_socket(struct device_node *socket)
if (!has_socket)
ret = parse_cluster(socket, 0, -1, 0);
+ /*
+ * Reset the max_smt_thread_num to 1 on failure. Since on failure
+ * we need to notify the framework the SMT is not supported, but
+ * max_smt_thread_num can be initialized to the SMT thread number
+ * of the cores which are successfully parsed.
+ */
+ if (ret)
+ max_smt_thread_num = 1;
+
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+
return ret;
}
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 741497324d78..a482708566bc 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -569,11 +569,28 @@ void component_master_del(struct device *parent,
}
EXPORT_SYMBOL_GPL(component_master_del);
+bool component_master_is_bound(struct device *parent,
+ const struct component_master_ops *ops)
+{
+ struct aggregate_device *adev;
+
+ guard(mutex)(&component_mutex);
+ adev = __aggregate_find(parent, ops);
+ if (!adev)
+ return 0;
+
+ return adev->bound;
+}
+EXPORT_SYMBOL_GPL(component_master_is_bound);
+
static void component_unbind(struct component *component,
struct aggregate_device *adev, void *data)
{
WARN_ON(!component->bound);
+ dev_dbg(adev->parent, "unbinding %s component %p (ops %ps)\n",
+ dev_name(component->dev), component, component->ops);
+
if (component->ops && component->ops->unbind)
component->ops->unbind(component->dev, adev->parent, data);
component->bound = false;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2fde698430df..d2f9d3a59d6b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -5172,6 +5172,67 @@ void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(set_secondary_fwnode);
/**
+ * device_remove_of_node - Remove an of_node from a device
+ * @dev: device whose device tree node is being removed
+ */
+void device_remove_of_node(struct device *dev)
+{
+ dev = get_device(dev);
+ if (!dev)
+ return;
+
+ if (!dev->of_node)
+ goto end;
+
+ if (dev->fwnode == of_fwnode_handle(dev->of_node))
+ dev->fwnode = NULL;
+
+ of_node_put(dev->of_node);
+ dev->of_node = NULL;
+
+end:
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(device_remove_of_node);
+
+/**
+ * device_add_of_node - Add an of_node to an existing device
+ * @dev: device whose device tree node is being added
+ * @of_node: of_node to add
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int device_add_of_node(struct device *dev, struct device_node *of_node)
+{
+ int ret;
+
+ if (!of_node)
+ return -EINVAL;
+
+ dev = get_device(dev);
+ if (!dev)
+ return -EINVAL;
+
+ if (dev->of_node) {
+ dev_err(dev, "Cannot replace node %pOF with %pOF\n",
+ dev->of_node, of_node);
+ ret = -EBUSY;
+ goto end;
+ }
+
+ dev->of_node = of_node_get(of_node);
+
+ if (!dev->fwnode)
+ dev->fwnode = of_fwnode_handle(of_node);
+
+ ret = 0;
+end:
+ put_device(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_add_of_node);
+
+/**
* device_set_of_node_from_dev - reuse device-tree node of another device
* @dev: device whose device-tree node is being set
* @dev2: device whose device-tree node is being reused
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 93e7779ef21e..d8a733ea5e1a 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -576,7 +576,10 @@ void *devres_open_group(struct device *dev, void *id, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(devres_open_group);
-/* Find devres group with ID @id. If @id is NULL, look for the latest. */
+/*
+ * Find devres group with ID @id. If @id is NULL, look for the latest open
+ * group.
+ */
static struct devres_group *find_group(struct device *dev, void *id)
{
struct devres_node *node;
@@ -687,6 +690,13 @@ int devres_release_group(struct device *dev, void *id)
spin_unlock_irqrestore(&dev->devres_lock, flags);
release_nodes(dev, &todo);
+ } else if (list_empty(&dev->devres_head)) {
+ /*
+ * dev is probably dying via devres_release_all(): groups
+ * have already been removed and are on the process of
+ * being released - don't touch and don't warn.
+ */
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
} else {
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index b848764ef018..6dd1a8860f1c 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -63,22 +63,6 @@ __setup("devtmpfs.mount=", mount_param);
static struct vfsmount *mnt;
-static struct dentry *public_dev_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- struct super_block *s = mnt->mnt_sb;
- int err;
-
- atomic_inc(&s->s_active);
- down_write(&s->s_umount);
- err = reconfigure_single(s, flags, data);
- if (err < 0) {
- deactivate_locked_super(s);
- return ERR_PTR(err);
- }
- return dget(s->s_root);
-}
-
static struct file_system_type internal_fs_type = {
.name = "devtmpfs",
#ifdef CONFIG_TMPFS
@@ -89,9 +73,40 @@ static struct file_system_type internal_fs_type = {
.kill_sb = kill_litter_super,
};
+/* Simply take a ref on the existing mount */
+static int devtmpfs_get_tree(struct fs_context *fc)
+{
+ struct super_block *sb = mnt->mnt_sb;
+
+ atomic_inc(&sb->s_active);
+ down_write(&sb->s_umount);
+ fc->root = dget(sb->s_root);
+ return 0;
+}
+
+/* Ops are filled in during init depending on underlying shmem or ramfs type */
+struct fs_context_operations devtmpfs_context_ops = {};
+
+/* Call the underlying initialization and set to our ops */
+static int devtmpfs_init_fs_context(struct fs_context *fc)
+{
+ int ret;
+#ifdef CONFIG_TMPFS
+ ret = shmem_init_fs_context(fc);
+#else
+ ret = ramfs_init_fs_context(fc);
+#endif
+ if (ret < 0)
+ return ret;
+
+ fc->ops = &devtmpfs_context_ops;
+
+ return 0;
+}
+
static struct file_system_type dev_fs_type = {
.name = "devtmpfs",
- .mount = public_dev_mount,
+ .init_fs_context = devtmpfs_init_fs_context,
};
static int devtmpfs_submit_req(struct req *req, const char *tmp)
@@ -160,18 +175,17 @@ static int dev_mkdir(const char *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
- int err;
dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
- if (!err)
+ dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
+ if (!IS_ERR(dentry))
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
done_path_create(&path, dentry);
- return err;
+ return PTR_ERR_OR_ZERO(dentry);
}
static int create_path(const char *nodepath)
@@ -245,15 +259,12 @@ static int dev_rmdir(const char *name)
dentry = kern_path_locked(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- if (d_inode(dentry)->i_private == &thread)
- err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry);
- else
- err = -EPERM;
- } else {
- err = -ENOENT;
- }
+ if (d_inode(dentry)->i_private == &thread)
+ err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry);
+ else
+ err = -EPERM;
+
dput(dentry);
inode_unlock(d_inode(parent.dentry));
path_put(&parent);
@@ -310,6 +321,8 @@ static int handle_remove(const char *nodename, struct device *dev)
{
struct path parent;
struct dentry *dentry;
+ struct kstat stat;
+ struct path p;
int deleted = 0;
int err;
@@ -317,32 +330,28 @@ static int handle_remove(const char *nodename, struct device *dev)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- struct kstat stat;
- struct path p = {.mnt = parent.mnt, .dentry = dentry};
- err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
- AT_STATX_SYNC_AS_STAT);
- if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
- struct iattr newattrs;
- /*
- * before unlinking this node, reset permissions
- * of possible references like hardlinks
- */
- newattrs.ia_uid = GLOBAL_ROOT_UID;
- newattrs.ia_gid = GLOBAL_ROOT_GID;
- newattrs.ia_mode = stat.mode & ~0777;
- newattrs.ia_valid =
- ATTR_UID|ATTR_GID|ATTR_MODE;
- inode_lock(d_inode(dentry));
- notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
- inode_unlock(d_inode(dentry));
- err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry, NULL);
- if (!err || err == -ENOENT)
- deleted = 1;
- }
- } else {
- err = -ENOENT;
+ p.mnt = parent.mnt;
+ p.dentry = dentry;
+ err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
+ AT_STATX_SYNC_AS_STAT);
+ if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
+ struct iattr newattrs;
+ /*
+ * before unlinking this node, reset permissions
+ * of possible references like hardlinks
+ */
+ newattrs.ia_uid = GLOBAL_ROOT_UID;
+ newattrs.ia_gid = GLOBAL_ROOT_GID;
+ newattrs.ia_mode = stat.mode & ~0777;
+ newattrs.ia_valid =
+ ATTR_UID|ATTR_GID|ATTR_MODE;
+ inode_lock(d_inode(dentry));
+ notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
+ inode_unlock(d_inode(dentry));
+ err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry, NULL);
+ if (!err || err == -ENOENT)
+ deleted = 1;
}
dput(dentry);
inode_unlock(d_inode(parent.dentry));
@@ -443,6 +452,31 @@ static int __ref devtmpfsd(void *p)
}
/*
+ * Get the underlying (shmem/ramfs) context ops to build ours
+ */
+static int devtmpfs_configure_context(void)
+{
+ struct fs_context *fc;
+
+ fc = fs_context_for_reconfigure(mnt->mnt_root, mnt->mnt_sb->s_flags,
+ MS_RMT_MASK);
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
+ /* Set up devtmpfs_context_ops based on underlying type */
+ devtmpfs_context_ops.free = fc->ops->free;
+ devtmpfs_context_ops.dup = fc->ops->dup;
+ devtmpfs_context_ops.parse_param = fc->ops->parse_param;
+ devtmpfs_context_ops.parse_monolithic = fc->ops->parse_monolithic;
+ devtmpfs_context_ops.get_tree = &devtmpfs_get_tree;
+ devtmpfs_context_ops.reconfigure = fc->ops->reconfigure;
+
+ put_fs_context(fc);
+
+ return 0;
+}
+
+/*
* Create devtmpfs instance, driver-core devices will add their device
* nodes here.
*/
@@ -456,6 +490,13 @@ int __init devtmpfs_init(void)
pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
return PTR_ERR(mnt);
}
+
+ err = devtmpfs_configure_context();
+ if (err) {
+ pr_err("unable to configure devtmpfs type %d\n", err);
+ return err;
+ }
+
err = register_filesystem(&dev_fs_type);
if (err) {
pr_err("unable to register devtmpfs type %d\n", err);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 6f2a33722c52..1813cfd0c4bd 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1451,7 +1451,8 @@ static int platform_dma_configure(struct device *dev)
attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
ret = acpi_dma_configure(dev, attr);
}
- if (ret || drv->driver_managed_dma)
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (ret || !dev->driver || drv->driver_managed_dma)
return ret;
ret = iommu_device_use_default_domain(dev);
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index e18ba676cdf6..b69bcb37c830 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -259,39 +259,6 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk)
}
EXPORT_SYMBOL_GPL(pm_clk_add_clk);
-
-/**
- * of_pm_clk_add_clk - Start using a device clock for power management.
- * @dev: Device whose clock is going to be used for power management.
- * @name: Name of clock that is going to be used for power management.
- *
- * Add the clock described in the 'clocks' device-tree node that matches
- * with the 'name' provided, to the list of clocks used for the power
- * management of @dev. On success, returns 0. Returns a negative error
- * code if the clock is not found or cannot be added.
- */
-int of_pm_clk_add_clk(struct device *dev, const char *name)
-{
- struct clk *clk;
- int ret;
-
- if (!dev || !dev->of_node || !name)
- return -EINVAL;
-
- clk = of_clk_get_by_name(dev->of_node, name);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- ret = pm_clk_add_clk(dev, clk);
- if (ret) {
- clk_put(clk);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
-
/**
* of_pm_clk_add_clks - Start using device clock(s) for power management.
* @dev: Device whose clock(s) is going to be used for power management.
@@ -377,46 +344,6 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
}
/**
- * pm_clk_remove - Stop using a device clock for power management.
- * @dev: Device whose clock should not be used for PM any more.
- * @con_id: Connection ID of the clock.
- *
- * Remove the clock represented by @con_id from the list of clocks used for
- * the power management of @dev.
- */
-void pm_clk_remove(struct device *dev, const char *con_id)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- struct pm_clock_entry *ce;
-
- if (!psd)
- return;
-
- pm_clk_list_lock(psd);
-
- list_for_each_entry(ce, &psd->clock_list, node) {
- if (!con_id && !ce->con_id)
- goto remove;
- else if (!con_id || !ce->con_id)
- continue;
- else if (!strcmp(con_id, ce->con_id))
- goto remove;
- }
-
- pm_clk_list_unlock(psd);
- return;
-
- remove:
- list_del(&ce->node);
- if (ce->enabled_when_prepared)
- psd->clock_op_might_sleep--;
- pm_clk_list_unlock(psd);
-
- __pm_clk_remove(ce);
-}
-EXPORT_SYMBOL_GPL(pm_clk_remove);
-
-/**
* pm_clk_remove_clk - Stop using a device clock for power management.
* @dev: Device whose clock should not be used for PM any more.
* @clk: Clock pointer
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 4fa525668cb7..6502720bb564 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -115,18 +115,6 @@ int pm_generic_freeze_noirq(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
/**
- * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
- * @dev: Device to freeze.
- */
-int pm_generic_freeze_late(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
-
-/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
@@ -187,18 +175,6 @@ int pm_generic_thaw_noirq(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
/**
- * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
- * @dev: Device to thaw.
- */
-int pm_generic_thaw_early(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
-
-/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 40e1d8d8a589..ac2a197c1234 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -249,7 +249,7 @@ static int dpm_wait_fn(struct device *dev, void *async_ptr)
static void dpm_wait_for_children(struct device *dev, bool async)
{
- device_for_each_child(dev, &async, dpm_wait_fn);
+ device_for_each_child(dev, &async, dpm_wait_fn);
}
static void dpm_wait_for_suppliers(struct device *dev, bool async)
@@ -599,27 +599,34 @@ static bool is_async(struct device *dev)
static bool dpm_async_fn(struct device *dev, async_func_t func)
{
- reinit_completion(&dev->power.completion);
+ if (!is_async(dev))
+ return false;
- if (is_async(dev)) {
- dev->power.async_in_progress = true;
+ dev->power.work_in_progress = true;
- get_device(dev);
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
+ return true;
- if (async_schedule_dev_nocall(func, dev))
- return true;
+ put_device(dev);
- put_device(dev);
- }
/*
- * Because async_schedule_dev_nocall() above has returned false or it
- * has not been called at all, func() is not running and it is safe to
- * update the async_in_progress flag without extra synchronization.
+ * async_schedule_dev_nocall() above has returned false, so func() is
+ * not running and it is safe to update power.work_in_progress without
+ * extra synchronization.
*/
- dev->power.async_in_progress = false;
+ dev->power.work_in_progress = false;
+
return false;
}
+static void dpm_clear_async_state(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+ dev->power.work_in_progress = false;
+}
+
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -656,15 +663,13 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
* so change its status accordingly.
*
* Otherwise, the device is going to be resumed, so set its PM-runtime
- * status to "active" unless its power.set_active flag is clear, in
+ * status to "active" unless its power.smart_suspend flag is clear, in
* which case it is not necessary to update its PM-runtime status.
*/
- if (skip_resume) {
+ if (skip_resume)
pm_runtime_set_suspended(dev);
- } else if (dev->power.set_active) {
+ else if (dev_pm_smart_suspend(dev))
pm_runtime_set_active(dev);
- dev->power.set_active = false;
- }
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -731,14 +736,16 @@ static void dpm_noirq_resume_devices(pm_message_t state)
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
*/
- list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+ dpm_clear_async_state(dev);
dpm_async_fn(dev, async_resume_noirq);
+ }
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
- if (!dev->power.async_in_progress) {
+ if (!dev->power.work_in_progress) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -871,14 +878,16 @@ void dpm_resume_early(pm_message_t state)
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
*/
- list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+ dpm_clear_async_state(dev);
dpm_async_fn(dev, async_resume_early);
+ }
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
- if (!dev->power.async_in_progress) {
+ if (!dev->power.work_in_progress) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -929,7 +938,17 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ if (!dev->power.is_suspended)
+ goto Complete;
+
if (dev->power.direct_complete) {
+ /*
+ * Allow new children to be added under the device after this
+ * point if it has no PM callbacks.
+ */
+ if (dev->power.no_pm_callbacks)
+ dev->power.is_prepared = false;
+
/* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
@@ -947,9 +966,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
*/
dev->power.is_prepared = false;
- if (!dev->power.is_suspended)
- goto Unlock;
-
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
@@ -989,7 +1005,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
error = dpm_run_callback(callback, dev, state, info);
dev->power.is_suspended = false;
- Unlock:
device_unlock(dev);
dpm_watchdog_clear(&wd);
@@ -1037,14 +1052,16 @@ void dpm_resume(pm_message_t state)
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
*/
- list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+ list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
+ dpm_clear_async_state(dev);
dpm_async_fn(dev, async_resume);
+ }
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
list_move_tail(&dev->power.entry, &dpm_prepared_list);
- if (!dev->power.async_in_progress) {
+ if (!dev->power.work_in_progress) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -1109,6 +1126,8 @@ static void device_complete(struct device *dev, pm_message_t state)
device_unlock(dev);
out:
+ /* If enabling runtime PM for the device is blocked, unblock it. */
+ pm_runtime_unblock(dev);
pm_runtime_put(dev);
}
@@ -1270,24 +1289,17 @@ Skip:
dev->power.is_noirq_suspended = true;
/*
- * Skipping the resume of devices that were in use right before the
- * system suspend (as indicated by their PM-runtime usage counters)
- * would be suboptimal. Also resume them if doing that is not allowed
- * to be skipped.
+ * Devices must be resumed unless they are explicitly allowed to be left
+ * in suspend, but even in that case skipping the resume of devices that
+ * were in use right before the system suspend (as indicated by their
+ * runtime PM usage counters and child counters) would be suboptimal.
*/
- if (atomic_read(&dev->power.usage_count) > 1 ||
- !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
- dev->power.may_skip_resume))
+ if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+ dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
dev->power.must_resume = true;
- if (dev->power.must_resume) {
- if (dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) {
- dev->power.set_active = true;
- if (dev->parent && !dev->parent->power.ignore_children)
- dev->parent->power.set_active = true;
- }
+ if (dev->power.must_resume)
dpm_superior_set_must_resume(dev);
- }
Complete:
complete_all(&dev->power.completion);
@@ -1320,6 +1332,7 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
list_move(&dev->power.entry, &dpm_noirq_list);
+ dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend_noirq))
continue;
@@ -1404,6 +1417,10 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
+ /*
+ * Disable runtime PM for the device without checking if there is a
+ * pending resume request for it.
+ */
__pm_runtime_disable(dev, false);
dpm_wait_for_subordinate(dev, async);
@@ -1493,6 +1510,7 @@ int dpm_suspend_late(pm_message_t state)
list_move(&dev->power.entry, &dpm_late_early_list);
+ dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend_late))
continue;
@@ -1650,6 +1668,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev)) {
pm_dev_dbg(dev, state, "direct-complete ");
+ dev->power.is_suspended = true;
goto Complete;
}
@@ -1760,6 +1779,7 @@ int dpm_suspend(pm_message_t state)
list_move(&dev->power.entry, &dpm_suspended_list);
+ dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend))
continue;
@@ -1791,6 +1811,46 @@ int dpm_suspend(pm_message_t state)
return error;
}
+static bool device_prepare_smart_suspend(struct device *dev)
+{
+ struct device_link *link;
+ bool ret = true;
+ int idx;
+
+ /*
+ * The "smart suspend" feature is enabled for devices whose drivers ask
+ * for it and for devices without PM callbacks.
+ *
+ * However, if "smart suspend" is not enabled for the device's parent
+ * or any of its suppliers that take runtime PM into account, it cannot
+ * be enabled for the device either.
+ */
+ if (!dev->power.no_pm_callbacks &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ return false;
+
+ if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
+ !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
+ return false;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ continue;
+
+ if (!dev_pm_smart_suspend(link->supplier) &&
+ !pm_runtime_blocked(link->supplier)) {
+ ret = false;
+ break;
+ }
+ }
+
+ device_links_read_unlock(idx);
+
+ return ret;
+}
+
/**
* device_prepare - Prepare a device for system power transition.
* @dev: Device to handle.
@@ -1802,6 +1862,7 @@ int dpm_suspend(pm_message_t state)
static int device_prepare(struct device *dev, pm_message_t state)
{
int (*callback)(struct device *) = NULL;
+ bool smart_suspend;
int ret = 0;
/*
@@ -1811,6 +1872,13 @@ static int device_prepare(struct device *dev, pm_message_t state)
* it again during the complete phase.
*/
pm_runtime_get_noresume(dev);
+ /*
+ * If runtime PM is disabled for the device at this point and it has
+ * never been enabled so far, it should not be enabled until this system
+ * suspend-resume cycle is complete, so prepare to trigger a warning on
+ * subsequent attempts to enable it.
+ */
+ smart_suspend = !pm_runtime_block_if_disabled(dev);
if (dev->power.syscore)
return 0;
@@ -1845,6 +1913,13 @@ unlock:
pm_runtime_put(dev);
return ret;
}
+ /* Do not enable "smart suspend" for devices with disabled runtime PM. */
+ if (smart_suspend)
+ smart_suspend = device_prepare_smart_suspend(dev);
+
+ spin_lock_irq(&dev->power.lock);
+
+ dev->power.smart_suspend = smart_suspend;
/*
* A positive return value from ->prepare() means "this device appears
* to be runtime-suspended and its state is fine, so if it really is
@@ -1852,11 +1927,12 @@ unlock:
* will do the same thing with all of its descendants". This only
* applies to suspend transitions, however.
*/
- spin_lock_irq(&dev->power.lock);
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
(ret > 0 || dev->power.no_pm_callbacks) &&
!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+
spin_unlock_irq(&dev->power.lock);
+
return 0;
}
@@ -2020,6 +2096,5 @@ void device_pm_check_callbacks(struct device *dev)
bool dev_pm_skip_suspend(struct device *dev)
{
- return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
- pm_runtime_status_suspended(dev);
+ return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2ee45841486b..0e127b0329c0 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -448,8 +448,19 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
retval = __rpm_callback(cb, dev);
}
- dev->power.runtime_error = retval;
- return retval != -EACCES ? retval : -EIO;
+ /*
+ * Since -EACCES means that runtime PM is disabled for the given device,
+ * it should not be returned by runtime PM callbacks. If it is returned
+ * nevertheless, assume it to be a transient error and convert it to
+ * -EAGAIN.
+ */
+ if (retval == -EACCES)
+ retval = -EAGAIN;
+
+ if (retval != -EAGAIN && retval != -EBUSY)
+ dev->power.runtime_error = retval;
+
+ return retval;
}
/**
@@ -725,21 +736,18 @@ static int rpm_suspend(struct device *dev, int rpmflags)
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
- if (retval == -EAGAIN || retval == -EBUSY) {
- dev->power.runtime_error = 0;
+ /*
+ * On transient errors, if the callback routine failed an autosuspend,
+ * and if the last_busy time has been updated so that there is a new
+ * autosuspend expiration time, automatically reschedule another
+ * autosuspend.
+ */
+ if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+
+ pm_runtime_cancel_pending(dev);
- /*
- * If the callback routine failed an autosuspend, and
- * if the last_busy time has been updated so that there
- * is a new autosuspend expiration time, automatically
- * reschedule another autosuspend.
- */
- if ((rpmflags & RPM_AUTO) &&
- pm_runtime_autosuspend_expiration(dev) != 0)
- goto repeat;
- } else {
- pm_runtime_cancel_pending(dev);
- }
goto out;
}
@@ -1460,20 +1468,31 @@ int pm_runtime_barrier(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
-/**
- * __pm_runtime_disable - Disable runtime PM of a device.
- * @dev: Device to handle.
- * @check_resume: If set, check if there's a resume request for the device.
- *
- * Increment power.disable_depth for the device and if it was zero previously,
- * cancel all pending runtime PM requests for the device and wait for all
- * operations in progress to complete. The device can be either active or
- * suspended after its runtime PM has been disabled.
- *
- * If @check_resume is set and there's a resume request pending when
- * __pm_runtime_disable() is called and power.disable_depth is zero, the
- * function will wake up the device before disabling its runtime PM.
- */
+bool pm_runtime_block_if_disabled(struct device *dev)
+{
+ bool ret;
+
+ spin_lock_irq(&dev->power.lock);
+
+ ret = !pm_runtime_enabled(dev);
+ if (ret && dev->power.last_status == RPM_INVALID)
+ dev->power.last_status = RPM_BLOCKED;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ return ret;
+}
+
+void pm_runtime_unblock(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.last_status == RPM_BLOCKED)
+ dev->power.last_status = RPM_INVALID;
+
+ spin_unlock_irq(&dev->power.lock);
+}
+
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
spin_lock_irq(&dev->power.lock);
@@ -1532,6 +1551,10 @@ void pm_runtime_enable(struct device *dev)
if (--dev->power.disable_depth > 0)
goto out;
+ if (dev->power.last_status == RPM_BLOCKED) {
+ dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
+ dump_stack();
+ }
dev->power.last_status = RPM_INVALID;
dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
@@ -1764,8 +1787,8 @@ void pm_runtime_init(struct device *dev)
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- dev->power.suspend_timer.function = pm_suspend_timer_fn;
+ hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1874,7 +1897,7 @@ void pm_runtime_drop_link(struct device_link *link)
pm_request_idle(link->supplier);
}
-static bool pm_runtime_need_not_resume(struct device *dev)
+bool pm_runtime_need_not_resume(struct device *dev)
{
return atomic_read(&dev->power.usage_count) <= 1 &&
(atomic_read(&dev->power.child_count) == 0 ||
@@ -1959,7 +1982,7 @@ int pm_runtime_force_resume(struct device *dev)
int (*callback)(struct device *);
int ret = 0;
- if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
+ if (!dev->power.needs_force_resume)
goto out;
/*
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index bdb450436cbc..6f31240ee4a9 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -73,12 +73,12 @@ struct regmap {
void *bus_context;
const char *name;
- bool async;
spinlock_t async_lock;
wait_queue_head_t async_waitq;
struct list_head async_list;
struct list_head async_free;
int async_ret;
+ bool async;
#ifdef CONFIG_DEBUG_FS
bool debugfs_disable;
@@ -117,8 +117,6 @@ struct regmap {
void *val_buf, size_t val_size);
int (*write)(void *context, const void *data, size_t count);
- bool defer_caching;
-
unsigned long read_flag_mask;
unsigned long write_flag_mask;
@@ -127,6 +125,8 @@ struct regmap {
int reg_stride;
int reg_stride_order;
+ bool defer_caching;
+
/* If set, will always write field to HW. */
bool force_write_field;
@@ -161,6 +161,9 @@ struct regmap {
struct reg_sequence *patch;
int patch_regs;
+ /* if set, the regmap core can sleep */
+ bool can_sleep;
+
/* if set, converts bulk read to single read */
bool use_single_read;
/* if set, converts bulk write to single write */
@@ -176,9 +179,6 @@ struct regmap {
void *selector_work_buf; /* Scratch buffer used for selector */
struct hwspinlock *hwlock;
-
- /* if set, the regmap core can sleep */
- bool can_sleep;
};
struct regcache_ops {
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index b1f8508c3966..f7fcf2de1301 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -21,6 +21,37 @@ static const struct regcache_ops *cache_types[] = {
&regcache_flat_ops,
};
+static int regcache_defaults_cmp(const void *a, const void *b)
+{
+ const struct reg_default *x = a;
+ const struct reg_default *y = b;
+
+ if (x->reg > y->reg)
+ return 1;
+ else if (x->reg < y->reg)
+ return -1;
+ else
+ return 0;
+}
+
+static void regcache_defaults_swap(void *a, void *b, int size)
+{
+ struct reg_default *x = a;
+ struct reg_default *y = b;
+ struct reg_default tmp;
+
+ tmp = *x;
+ *x = *y;
+ *y = tmp;
+}
+
+void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults)
+{
+ sort(defaults, ndefaults, sizeof(*defaults),
+ regcache_defaults_cmp, regcache_defaults_swap);
+}
+EXPORT_SYMBOL_GPL(regcache_sort_defaults);
+
static int regcache_hw_init(struct regmap *map)
{
int i, j;
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 978613407ea3..6c6869188c31 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -823,7 +823,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
/* Ack masked but set interrupts */
if (d->chip->no_status) {
/* no status register so default to all active */
- d->status_buf[i] = GENMASK(31, 0);
+ d->status_buf[i] = UINT_MAX;
} else {
reg = d->get_irq_reg(d, d->chip->status_base, i);
ret = regmap_read(map, reg, &d->status_buf[i]);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c05fe27a96b6..674527d770dc 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -45,8 +45,6 @@ enum {
Lo_deleting,
};
-struct loop_func_table;
-
struct loop_device {
int lo_number;
loff_t lo_offset;
@@ -54,7 +52,8 @@ struct loop_device {
int lo_flags;
char lo_file_name[LO_NAME_SIZE];
- struct file * lo_backing_file;
+ struct file *lo_backing_file;
+ unsigned int lo_min_dio_size;
struct block_device *lo_device;
gfp_t old_gfp_mask;
@@ -169,29 +168,14 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
* of backing device, and the logical block size of loop is bigger than that of
* the backing device.
*/
-static bool lo_bdev_can_use_dio(struct loop_device *lo,
- struct block_device *backing_bdev)
-{
- unsigned int sb_bsize = bdev_logical_block_size(backing_bdev);
-
- if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
- return false;
- if (lo->lo_offset & (sb_bsize - 1))
- return false;
- return true;
-}
-
static bool lo_can_use_dio(struct loop_device *lo)
{
- struct inode *inode = lo->lo_backing_file->f_mapping->host;
-
if (!(lo->lo_backing_file->f_mode & FMODE_CAN_ODIRECT))
return false;
-
- if (S_ISBLK(inode->i_mode))
- return lo_bdev_can_use_dio(lo, I_BDEV(inode));
- if (inode->i_sb->s_bdev)
- return lo_bdev_can_use_dio(lo, inode->i_sb->s_bdev);
+ if (queue_logical_block_size(lo->lo_queue) < lo->lo_min_dio_size)
+ return false;
+ if (lo->lo_offset & (lo->lo_min_dio_size - 1))
+ return false;
return true;
}
@@ -205,20 +189,12 @@ static bool lo_can_use_dio(struct loop_device *lo)
*/
static inline void loop_update_dio(struct loop_device *lo)
{
- bool dio_in_use = lo->lo_flags & LO_FLAGS_DIRECT_IO;
-
lockdep_assert_held(&lo->lo_mutex);
WARN_ON_ONCE(lo->lo_state == Lo_bound &&
lo->lo_queue->mq_freeze_depth == 0);
- if (lo->lo_backing_file->f_flags & O_DIRECT)
- lo->lo_flags |= LO_FLAGS_DIRECT_IO;
if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !lo_can_use_dio(lo))
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
-
- /* flush dirty pages before starting to issue direct I/O */
- if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !dio_in_use)
- vfs_fsync(lo->lo_backing_file, 0);
}
/**
@@ -541,6 +517,28 @@ static void loop_reread_partitions(struct loop_device *lo)
__func__, lo->lo_number, lo->lo_file_name, rc);
}
+static unsigned int loop_query_min_dio_size(struct loop_device *lo)
+{
+ struct file *file = lo->lo_backing_file;
+ struct block_device *sb_bdev = file->f_mapping->host->i_sb->s_bdev;
+ struct kstat st;
+
+ /*
+ * Use the minimal dio alignment of the file system if provided.
+ */
+ if (!vfs_getattr(&file->f_path, &st, STATX_DIOALIGN, 0) &&
+ (st.result_mask & STATX_DIOALIGN))
+ return st.dio_offset_align;
+
+ /*
+ * In a perfect world this wouldn't be needed, but as of Linux 6.13 only
+ * a handful of file systems support the STATX_DIOALIGN flag.
+ */
+ if (sb_bdev)
+ return bdev_logical_block_size(sb_bdev);
+ return SECTOR_SIZE;
+}
+
static inline int is_loop_device(struct file *file)
{
struct inode *i = file->f_mapping->host;
@@ -573,6 +571,17 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
return 0;
}
+static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
+{
+ lo->lo_backing_file = file;
+ lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
+ mapping_set_gfp_mask(file->f_mapping,
+ lo->old_gfp_mask & ~(__GFP_IO | __GFP_FS));
+ if (lo->lo_backing_file->f_flags & O_DIRECT)
+ lo->lo_flags |= LO_FLAGS_DIRECT_IO;
+ lo->lo_min_dio_size = loop_query_min_dio_size(lo);
+}
+
/*
* loop_change_fd switched the backing store of a loopback device to
* a new file. This is useful for operating system installers to free up
@@ -622,14 +631,18 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
goto out_err;
+ /*
+ * We might switch to direct I/O mode for the loop device, write back
+ * all dirty data the page cache now that so that the individual I/O
+ * operations don't have to do that.
+ */
+ vfs_fsync(file, 0);
+
/* and ... switch */
disk_force_media_change(lo->lo_disk);
memflags = blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
- lo->lo_backing_file = file;
- lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
- mapping_set_gfp_mask(file->f_mapping,
- lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+ loop_assign_backing_file(lo, file);
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue, memflags);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
@@ -971,12 +984,11 @@ loop_set_status_from_info(struct loop_device *lo,
return 0;
}
-static unsigned int loop_default_blocksize(struct loop_device *lo,
- struct block_device *backing_bdev)
+static unsigned int loop_default_blocksize(struct loop_device *lo)
{
- /* In case of direct I/O, match underlying block size */
- if ((lo->lo_backing_file->f_flags & O_DIRECT) && backing_bdev)
- return bdev_logical_block_size(backing_bdev);
+ /* In case of direct I/O, match underlying minimum I/O size */
+ if (lo->lo_flags & LO_FLAGS_DIRECT_IO)
+ return lo->lo_min_dio_size;
return SECTOR_SIZE;
}
@@ -994,7 +1006,7 @@ static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim,
backing_bdev = inode->i_sb->s_bdev;
if (!bsize)
- bsize = loop_default_blocksize(lo, backing_bdev);
+ bsize = loop_default_blocksize(lo);
loop_get_discard_config(lo, &granularity, &max_discard_sectors);
@@ -1019,7 +1031,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
const struct loop_config *config)
{
struct file *file = fget(config->fd);
- struct address_space *mapping;
struct queue_limits lim;
int error;
loff_t size;
@@ -1055,8 +1066,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (error)
goto out_unlock;
- mapping = file->f_mapping;
-
if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
error = -EINVAL;
goto out_unlock;
@@ -1088,9 +1097,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->lo_device = bdev;
- lo->lo_backing_file = file;
- lo->old_gfp_mask = mapping_gfp_mask(mapping);
- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+ loop_assign_backing_file(lo, file);
lim = queue_limits_start_update(lo->lo_queue);
loop_update_limits(lo, &lim, config->block_size);
@@ -1099,6 +1106,13 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (error)
goto out_unlock;
+ /*
+ * We might switch to direct I/O mode for the loop device, write back
+ * all dirty data the page cache now that so that the individual I/O
+ * operations don't have to do that.
+ */
+ vfs_fsync(file, 0);
+
loop_update_dio(lo);
loop_sysfs_init(lo);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 95361099a2dc..0d619df03fa9 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2056,7 +2056,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
unsigned int nents;
/* Map the scatter list for DMA access */
- nents = blk_rq_map_sg(hctx->queue, rq, command->sg);
+ nents = blk_rq_map_sg(rq, command->sg);
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
prefetch(&port->flags);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index d94ef37480bd..3bb9cee0a9b5 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -473,6 +473,8 @@ NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
NULLB_DEVICE_ATTR(fua, bool, NULL);
NULLB_DEVICE_ATTR(rotational, bool, NULL);
+NULLB_DEVICE_ATTR(badblocks_once, bool, NULL);
+NULLB_DEVICE_ATTR(badblocks_partial_io, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -559,14 +561,14 @@ static ssize_t nullb_device_badblocks_store(struct config_item *item,
goto out;
/* enable badblocks */
cmpxchg(&t_dev->badblocks.shift, -1, 0);
- if (buf[0] == '+')
- ret = badblocks_set(&t_dev->badblocks, start,
- end - start + 1, 1);
- else
- ret = badblocks_clear(&t_dev->badblocks, start,
- end - start + 1);
- if (ret == 0)
+ if (buf[0] == '+') {
+ if (badblocks_set(&t_dev->badblocks, start,
+ end - start + 1, 1))
+ ret = count;
+ } else if (badblocks_clear(&t_dev->badblocks, start,
+ end - start + 1)) {
ret = count;
+ }
out:
kfree(orig);
return ret;
@@ -592,41 +594,43 @@ static ssize_t nullb_device_zone_offline_store(struct config_item *item,
CONFIGFS_ATTR_WO(nullb_device_, zone_offline);
static struct configfs_attribute *nullb_device_attrs[] = {
- &nullb_device_attr_size,
+ &nullb_device_attr_badblocks,
+ &nullb_device_attr_badblocks_once,
+ &nullb_device_attr_badblocks_partial_io,
+ &nullb_device_attr_blocking,
+ &nullb_device_attr_blocksize,
+ &nullb_device_attr_cache_size,
&nullb_device_attr_completion_nsec,
- &nullb_device_attr_submit_queues,
- &nullb_device_attr_poll_queues,
+ &nullb_device_attr_discard,
+ &nullb_device_attr_fua,
&nullb_device_attr_home_node,
- &nullb_device_attr_queue_mode,
- &nullb_device_attr_blocksize,
- &nullb_device_attr_max_sectors,
- &nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
- &nullb_device_attr_blocking,
- &nullb_device_attr_use_per_node_hctx,
- &nullb_device_attr_power,
- &nullb_device_attr_memory_backed,
- &nullb_device_attr_discard,
+ &nullb_device_attr_irqmode,
+ &nullb_device_attr_max_sectors,
&nullb_device_attr_mbps,
- &nullb_device_attr_cache_size,
- &nullb_device_attr_badblocks,
- &nullb_device_attr_zoned,
- &nullb_device_attr_zone_size,
+ &nullb_device_attr_memory_backed,
+ &nullb_device_attr_no_sched,
+ &nullb_device_attr_poll_queues,
+ &nullb_device_attr_power,
+ &nullb_device_attr_queue_mode,
+ &nullb_device_attr_rotational,
+ &nullb_device_attr_shared_tag_bitmap,
+ &nullb_device_attr_shared_tags,
+ &nullb_device_attr_size,
+ &nullb_device_attr_submit_queues,
+ &nullb_device_attr_use_per_node_hctx,
+ &nullb_device_attr_virt_boundary,
+ &nullb_device_attr_zone_append_max_sectors,
&nullb_device_attr_zone_capacity,
- &nullb_device_attr_zone_nr_conv,
- &nullb_device_attr_zone_max_open,
+ &nullb_device_attr_zone_full,
&nullb_device_attr_zone_max_active,
- &nullb_device_attr_zone_append_max_sectors,
- &nullb_device_attr_zone_readonly,
+ &nullb_device_attr_zone_max_open,
+ &nullb_device_attr_zone_nr_conv,
&nullb_device_attr_zone_offline,
- &nullb_device_attr_zone_full,
- &nullb_device_attr_virt_boundary,
- &nullb_device_attr_no_sched,
- &nullb_device_attr_shared_tags,
- &nullb_device_attr_shared_tag_bitmap,
- &nullb_device_attr_fua,
- &nullb_device_attr_rotational,
+ &nullb_device_attr_zone_readonly,
+ &nullb_device_attr_zone_size,
+ &nullb_device_attr_zoned,
NULL,
};
@@ -704,16 +708,28 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
- return snprintf(page, PAGE_SIZE,
- "badblocks,blocking,blocksize,cache_size,fua,"
- "completion_nsec,discard,home_node,hw_queue_depth,"
- "irqmode,max_sectors,mbps,memory_backed,no_sched,"
- "poll_queues,power,queue_mode,shared_tag_bitmap,"
- "shared_tags,size,submit_queues,use_per_node_hctx,"
- "virt_boundary,zoned,zone_capacity,zone_max_active,"
- "zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
- "zone_size,zone_append_max_sectors,zone_full,"
- "rotational\n");
+
+ struct configfs_attribute **entry;
+ char delimiter = ',';
+ size_t left = PAGE_SIZE;
+ size_t written = 0;
+ int ret;
+
+ for (entry = &nullb_device_attrs[0]; *entry && left > 0; entry++) {
+ if (!*(entry + 1))
+ delimiter = '\n';
+ ret = snprintf(page + written, left, "%s%c", (*entry)->ca_name,
+ delimiter);
+ if (ret >= left) {
+ WARN_ONCE(1, "Too many null_blk features to print\n");
+ memzero_explicit(page, PAGE_SIZE);
+ return -ENOBUFS;
+ }
+ left -= ret;
+ written += ret;
+ }
+
+ return written;
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -1249,25 +1265,37 @@ static int null_transfer(struct nullb *nullb, struct page *page,
return err;
}
-static blk_status_t null_handle_rq(struct nullb_cmd *cmd)
+/*
+ * Transfer data for the given request. The transfer size is capped with the
+ * nr_sectors argument.
+ */
+static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
+ sector_t nr_sectors)
{
struct request *rq = blk_mq_rq_from_pdu(cmd);
struct nullb *nullb = cmd->nq->dev->nullb;
int err = 0;
unsigned int len;
sector_t sector = blk_rq_pos(rq);
+ unsigned int max_bytes = nr_sectors << SECTOR_SHIFT;
+ unsigned int transferred_bytes = 0;
struct req_iterator iter;
struct bio_vec bvec;
spin_lock_irq(&nullb->lock);
rq_for_each_segment(bvec, rq, iter) {
len = bvec.bv_len;
+ if (transferred_bytes + len > max_bytes)
+ len = max_bytes - transferred_bytes;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
op_is_write(req_op(rq)), sector,
rq->cmd_flags & REQ_FUA);
if (err)
break;
sector += len >> SECTOR_SHIFT;
+ transferred_bytes += len;
+ if (transferred_bytes >= max_bytes)
+ break;
}
spin_unlock_irq(&nullb->lock);
@@ -1295,31 +1323,51 @@ static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
return sts;
}
-static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
- sector_t sector,
- sector_t nr_sectors)
+/*
+ * Check if the command should fail for the badblocks. If so, return
+ * BLK_STS_IOERR and return number of partial I/O sectors to be written or read,
+ * which may be less than the requested number of sectors.
+ *
+ * @cmd: The command to handle.
+ * @sector: The start sector for I/O.
+ * @nr_sectors: Specifies number of sectors to write or read, and returns the
+ * number of sectors to be written or read.
+ */
+blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int *nr_sectors)
{
struct badblocks *bb = &cmd->nq->dev->badblocks;
- sector_t first_bad;
- int bad_sectors;
+ struct nullb_device *dev = cmd->nq->dev;
+ unsigned int block_sectors = dev->blocksize >> SECTOR_SHIFT;
+ sector_t first_bad, bad_sectors;
+ unsigned int partial_io_sectors = 0;
- if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
- return BLK_STS_IOERR;
+ if (!badblocks_check(bb, sector, *nr_sectors, &first_bad, &bad_sectors))
+ return BLK_STS_OK;
- return BLK_STS_OK;
+ if (cmd->nq->dev->badblocks_once)
+ badblocks_clear(bb, first_bad, bad_sectors);
+
+ if (cmd->nq->dev->badblocks_partial_io) {
+ if (!IS_ALIGNED(first_bad, block_sectors))
+ first_bad = ALIGN_DOWN(first_bad, block_sectors);
+ if (sector < first_bad)
+ partial_io_sectors = first_bad - sector;
+ }
+ *nr_sectors = partial_io_sectors;
+
+ return BLK_STS_IOERR;
}
-static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
- enum req_op op,
- sector_t sector,
- sector_t nr_sectors)
+blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, sector_t nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
if (op == REQ_OP_DISCARD)
return null_handle_discard(dev, sector, nr_sectors);
- return null_handle_rq(cmd);
+ return null_handle_data_transfer(cmd, nr_sectors);
}
static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
@@ -1366,18 +1414,19 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
+ blk_status_t badblocks_ret = BLK_STS_OK;
blk_status_t ret;
- if (dev->badblocks.shift != -1) {
- ret = null_handle_badblocks(cmd, sector, nr_sectors);
+ if (dev->badblocks.shift != -1)
+ badblocks_ret = null_handle_badblocks(cmd, sector, &nr_sectors);
+
+ if (dev->memory_backed && nr_sectors) {
+ ret = null_handle_memory_backed(cmd, op, sector, nr_sectors);
if (ret != BLK_STS_OK)
return ret;
}
- if (dev->memory_backed)
- return null_handle_memory_backed(cmd, op, sector, nr_sectors);
-
- return BLK_STS_OK;
+ return badblocks_ret;
}
static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
@@ -1426,8 +1475,7 @@ static void nullb_setup_bwtimer(struct nullb *nullb)
{
ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
- hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- nullb->bw_timer.function = nullb_bwtimer_fn;
+ hrtimer_setup(&nullb->bw_timer, nullb_bwtimer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
}
@@ -1549,8 +1597,8 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
- if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
- blk_mq_end_request_batch))
+ if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK,
+ blk_mq_end_request_batch))
blk_mq_end_request(req, cmd->error);
nr++;
}
@@ -1604,8 +1652,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
- hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cmd->timer.function = null_cmd_timer_expired;
+ hrtimer_setup(&cmd->timer, null_cmd_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
cmd->error = BLK_STS_OK;
cmd->nq = nq;
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 6f9fe6171087..7bb6128dbaaf 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -63,6 +63,8 @@ struct nullb_device {
unsigned long flags; /* device flags */
unsigned int curr_cache;
struct badblocks badblocks;
+ bool badblocks_once;
+ bool badblocks_partial_io;
unsigned int nr_zones;
unsigned int nr_zones_imp_open;
@@ -131,6 +133,10 @@ blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
sector_t nr_sectors);
blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, unsigned int nr_sectors);
+blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int *nr_sectors);
+blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, sector_t nr_sectors);
#ifdef CONFIG_BLK_DEV_ZONED
int null_init_zoned_dev(struct nullb_device *dev, struct queue_limits *lim);
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 0d5f9bf95229..4e5728f45989 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -353,6 +353,7 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
struct nullb_zone *zone = &dev->zones[zno];
+ blk_status_t badblocks_ret = BLK_STS_OK;
blk_status_t ret;
trace_nullb_zone_op(cmd, zno, zone->cond);
@@ -412,9 +413,20 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
zone->cond = BLK_ZONE_COND_IMP_OPEN;
}
- ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
- if (ret != BLK_STS_OK)
- goto unlock_zone;
+ if (dev->badblocks.shift != -1) {
+ badblocks_ret = null_handle_badblocks(cmd, sector, &nr_sectors);
+ if (badblocks_ret != BLK_STS_OK && !nr_sectors) {
+ ret = badblocks_ret;
+ goto unlock_zone;
+ }
+ }
+
+ if (dev->memory_backed) {
+ ret = null_handle_memory_backed(cmd, REQ_OP_WRITE, sector,
+ nr_sectors);
+ if (ret != BLK_STS_OK)
+ goto unlock_zone;
+ }
zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->capacity) {
@@ -429,7 +441,7 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
zone->cond = BLK_ZONE_COND_FULL;
}
- ret = BLK_STS_OK;
+ ret = badblocks_ret;
unlock_zone:
null_unlock_zone(dev, zone);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 82467ecde7ec..15627417f12e 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1010,7 +1010,7 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
* See queue limits.
*/
if ((req_op(rq) != REQ_OP_DISCARD) && (req_op(rq) != REQ_OP_WRITE_ZEROES))
- sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl);
+ sg_cnt = blk_rq_map_sg(rq, iu->sgt.sgl);
if (sg_cnt == 0)
sg_mark_end(&iu->sgt.sgl[0]);
diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
index ddf3629d8894..d07e76ae2c13 100644
--- a/drivers/block/rnull.rs
+++ b/drivers/block/rnull.rs
@@ -27,7 +27,7 @@ use kernel::{
module! {
type: NullBlkModule,
name: "rnull_mod",
- author: "Andreas Hindborg",
+ authors: ["Andreas Hindborg"],
description: "Rust implementation of the C null block driver",
license: "GPL v2",
}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 282f81616a78..2b33fb5b949b 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -485,7 +485,7 @@ static int __send_request(struct request *req)
}
sg_init_table(sg, port->ring_cookies);
- nsg = blk_rq_map_sg(req->q, req, sg);
+ nsg = blk_rq_map_sg(req, sg);
len = 0;
for (i = 0; i < nsg; i++)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index ca9a67b5b537..c060da409ed8 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -51,6 +51,9 @@
/* private ioctl command mirror */
#define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
+#define UBLK_IO_REGISTER_IO_BUF _IOC_NR(UBLK_U_IO_REGISTER_IO_BUF)
+#define UBLK_IO_UNREGISTER_IO_BUF _IOC_NR(UBLK_U_IO_UNREGISTER_IO_BUF)
+
/* All UBLK_F_* have to be included into UBLK_F_ALL */
#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
| UBLK_F_URING_CMD_COMP_IN_TASK \
@@ -70,11 +73,10 @@
/* All UBLK_PARAM_TYPE_* should be included here */
#define UBLK_PARAM_TYPE_ALL \
(UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
- UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED)
+ UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED | \
+ UBLK_PARAM_TYPE_DMA_ALIGN)
struct ublk_rq_data {
- struct llist_node node;
-
struct kref ref;
};
@@ -141,8 +143,6 @@ struct ublk_queue {
struct task_struct *ubq_daemon;
char *io_cmd_buf;
- struct llist_head io_cmds;
-
unsigned long io_addr; /* mapped vm address */
unsigned int max_io_sz;
bool force_abort;
@@ -196,12 +196,14 @@ struct ublk_params_header {
static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq);
+static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
+ struct ublk_queue *ubq, int tag, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
int tag);
static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
{
- return ub->dev_info.flags & UBLK_F_USER_COPY;
+ return ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
}
static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
@@ -489,15 +491,17 @@ static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
static DEFINE_MUTEX(ublk_ctl_mutex);
+
+#define UBLK_MAX_UBLKS UBLK_MINORS
+
/*
- * Max ublk devices allowed to add
+ * Max unprivileged ublk devices allowed to add
*
* It can be extended to one per-user limit in future or even controlled
* by cgroup.
*/
-#define UBLK_MAX_UBLKS UBLK_MINORS
-static unsigned int ublks_max = 64;
-static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
+static unsigned int unprivileged_ublks_max = 64;
+static unsigned int unprivileged_ublks_added; /* protected by ublk_ctl_mutex */
static struct miscdevice ublk_misc;
@@ -568,6 +572,16 @@ static int ublk_validate_params(const struct ublk_device *ub)
else if (ublk_dev_is_zoned(ub))
return -EINVAL;
+ if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN) {
+ const struct ublk_param_dma_align *p = &ub->params.dma;
+
+ if (p->alignment >= PAGE_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(p->alignment + 1))
+ return -EINVAL;
+ }
+
return 0;
}
@@ -581,7 +595,7 @@ static void ublk_apply_params(struct ublk_device *ub)
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
- return ubq->flags & UBLK_F_USER_COPY;
+ return ubq->flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
}
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
@@ -1095,7 +1109,7 @@ static void ublk_complete_rq(struct kref *ref)
}
/*
- * Since __ublk_rq_task_work always fails requests immediately during
+ * Since ublk_rq_task_work_cb always fails requests immediately during
* exiting, __ublk_fail_req() is only called from abort context during
* exiting. So lock is unnecessary.
*
@@ -1141,11 +1155,14 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
blk_mq_end_request(rq, BLK_STS_IOERR);
}
-static inline void __ublk_rq_task_work(struct request *req,
- unsigned issue_flags)
+static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
- int tag = req->tag;
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ struct ublk_queue *ubq = pdu->ubq;
+ int tag = pdu->tag;
+ struct request *req = blk_mq_tag_to_rq(
+ ubq->dev->tag_set.tags[ubq->q_id], tag);
struct ublk_io *io = &ubq->ios[tag];
unsigned int mapped_bytes;
@@ -1220,34 +1237,11 @@ static inline void __ublk_rq_task_work(struct request *req,
ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}
-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
- unsigned issue_flags)
-{
- struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
- struct ublk_rq_data *data, *tmp;
-
- io_cmds = llist_reverse_order(io_cmds);
- llist_for_each_entry_safe(data, tmp, io_cmds, node)
- __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
-}
-
-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
-{
- struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
- struct ublk_queue *ubq = pdu->ubq;
-
- ublk_forward_io_cmds(ubq, issue_flags);
-}
-
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
-
- if (llist_add(&data->node, &ubq->io_cmds)) {
- struct ublk_io *io = &ubq->ios[rq->tag];
+ struct ublk_io *io = &ubq->ios[rq->tag];
- io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
- }
+ io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
}
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
@@ -1440,7 +1434,7 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
struct request *rq;
/*
- * Either we fail the request or ublk_rq_task_work_fn
+ * Either we fail the request or ublk_rq_task_work_cb
* will do it
*/
rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
@@ -1747,6 +1741,42 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
io_uring_cmd_mark_cancelable(cmd, issue_flags);
}
+static void ublk_io_release(void *priv)
+{
+ struct request *rq = priv;
+ struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+
+ ublk_put_req_ref(ubq, rq);
+}
+
+static int ublk_register_io_buf(struct io_uring_cmd *cmd,
+ struct ublk_queue *ubq, unsigned int tag,
+ unsigned int index, unsigned int issue_flags)
+{
+ struct ublk_device *ub = cmd->file->private_data;
+ struct request *req;
+ int ret;
+
+ req = __ublk_check_and_get_req(ub, ubq, tag, 0);
+ if (!req)
+ return -EINVAL;
+
+ ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
+ issue_flags);
+ if (ret) {
+ ublk_put_req_ref(ubq, req);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
+ unsigned int index, unsigned int issue_flags)
+{
+ return io_buffer_unregister_bvec(cmd, index, issue_flags);
+}
+
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
const struct ublksrv_io_cmd *ub_cmd)
@@ -1798,6 +1828,10 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
ret = -EINVAL;
switch (_IOC_NR(cmd_op)) {
+ case UBLK_IO_REGISTER_IO_BUF:
+ return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
+ case UBLK_IO_UNREGISTER_IO_BUF:
+ return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags);
case UBLK_IO_FETCH_REQ:
/* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
if (ublk_queue_ready(ubq)) {
@@ -1866,10 +1900,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
return -EIOCBQUEUED;
out:
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
__func__, cmd_op, tag, ret, io->flags);
- return -EIOCBQUEUED;
+ return ret;
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
@@ -1925,7 +1958,10 @@ static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
- ublk_ch_uring_cmd_local(cmd, issue_flags);
+ int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
+
+ if (ret != -EIOCBQUEUED)
+ io_uring_cmd_done(cmd, ret, 0, issue_flags);
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
@@ -2190,7 +2226,8 @@ static int ublk_add_chdev(struct ublk_device *ub)
if (ret)
goto fail;
- ublks_added++;
+ if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV)
+ unprivileged_ublks_added++;
return 0;
fail:
put_device(dev);
@@ -2219,11 +2256,16 @@ static int ublk_add_tag_set(struct ublk_device *ub)
static void ublk_remove(struct ublk_device *ub)
{
+ bool unprivileged;
+
ublk_stop_dev(ub);
cancel_work_sync(&ub->nosrv_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
+ unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
ublk_put_device(ub);
- ublks_added--;
+
+ if (unprivileged)
+ unprivileged_ublks_added--;
}
static struct ublk_device *ublk_get_device_from_id(int idx)
@@ -2298,6 +2340,9 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
if (ub->params.basic.attrs & UBLK_ATTR_ROTATIONAL)
lim.features |= BLK_FEAT_ROTATIONAL;
+ if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN)
+ lim.dma_alignment = ub->params.dma.alignment;
+
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
@@ -2459,7 +2504,7 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
* buffer by pwrite() to ublk char device, which can't be
* used for unprivileged device
*/
- if (info.flags & UBLK_F_USER_COPY)
+ if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY))
return -EINVAL;
}
@@ -2485,7 +2530,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
return ret;
ret = -EACCES;
- if (ublks_added >= ublks_max)
+ if ((info.flags & UBLK_F_UNPRIVILEGED_DEV) &&
+ unprivileged_ublks_added >= unprivileged_ublks_max)
goto out_unlock;
ret = -ENOMEM;
@@ -2527,9 +2573,6 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
goto out_free_dev_number;
}
- /* We are not ready to support zero copy */
- ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
-
ub->dev_info.nr_hw_queues = min_t(unsigned int,
ub->dev_info.nr_hw_queues, nr_cpu_ids);
ublk_align_max_io_size(ub);
@@ -2863,7 +2906,7 @@ static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
- u64 features = UBLK_F_ALL & ~UBLK_F_SUPPORT_ZERO_COPY;
+ u64 features = UBLK_F_ALL;
if (header->len != UBLK_FEATURES_LEN || !header->addr)
return -EINVAL;
@@ -3059,10 +3102,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
if (ub)
ublk_put_device(ub);
out:
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
- return -EIOCBQUEUED;
+ return ret;
}
static const struct file_operations ublk_ctl_fops = {
@@ -3126,23 +3168,26 @@ static void __exit ublk_exit(void)
module_init(ublk_init);
module_exit(ublk_exit);
-static int ublk_set_max_ublks(const char *buf, const struct kernel_param *kp)
+static int ublk_set_max_unprivileged_ublks(const char *buf,
+ const struct kernel_param *kp)
{
return param_set_uint_minmax(buf, kp, 0, UBLK_MAX_UBLKS);
}
-static int ublk_get_max_ublks(char *buf, const struct kernel_param *kp)
+static int ublk_get_max_unprivileged_ublks(char *buf,
+ const struct kernel_param *kp)
{
- return sysfs_emit(buf, "%u\n", ublks_max);
+ return sysfs_emit(buf, "%u\n", unprivileged_ublks_max);
}
-static const struct kernel_param_ops ublk_max_ublks_ops = {
- .set = ublk_set_max_ublks,
- .get = ublk_get_max_ublks,
+static const struct kernel_param_ops ublk_max_unprivileged_ublks_ops = {
+ .set = ublk_set_max_unprivileged_ublks,
+ .get = ublk_get_max_unprivileged_ublks,
};
-module_param_cb(ublks_max, &ublk_max_ublks_ops, &ublks_max, 0644);
-MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
+module_param_cb(ublks_max, &ublk_max_unprivileged_ublks_ops,
+ &unprivileged_ublks_max, 0644);
+MODULE_PARM_DESC(ublks_max, "max number of unprivileged ublk devices allowed to add(default: 64)");
MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
MODULE_DESCRIPTION("Userspace block device");
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6a61ec35f426..7cffea01d868 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -226,7 +226,7 @@ static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
if (unlikely(err))
return -ENOMEM;
- return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
+ return blk_rq_map_sg(req, vbr->sg_table.sgl);
}
static void virtblk_cleanup_cmd(struct request *req)
@@ -1207,11 +1207,12 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
struct request *req = blk_mq_rq_from_pdu(vbr);
+ u8 status = virtblk_vbr_status(vbr);
found++;
if (!blk_mq_complete_request_remote(req) &&
- !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
- virtblk_complete_batch))
+ !blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK,
+ virtblk_complete_batch))
virtblk_request_done(req);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index edcd08a9dcef..5babe575c288 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -751,7 +751,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
id = blkif_ring_get_request(rinfo, req, &final_ring_req);
ring_req = &rinfo->shadow[id].req;
- num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
+ num_sg = blk_rq_map_sg(req, rinfo->shadow[id].sg);
num_grant = 0;
/* Calculate the number of grant used */
for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 4ab32abf0f48..7771edf54fb3 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -56,6 +56,18 @@ config BT_HCIBTUSB_POLL_SYNC
Say Y here to enable USB poll_sync for Bluetooth USB devices by
default.
+config BT_HCIBTUSB_AUTO_ISOC_ALT
+ bool "Automatically adjust alternate setting for Isoc endpoints"
+ depends on BT_HCIBTUSB
+ default y if CHROME_PLATFORMS
+ help
+ Say Y here to automatically adjusting the alternate setting for
+ HCI_USER_CHANNEL whenever a SCO link is established.
+
+ When enabled, btusb intercepts the HCI_EV_SYNC_CONN_COMPLETE packets
+ and configures isoc endpoint alternate setting automatically when
+ HCI_USER_CHANNEL is in use.
+
config BT_HCIBTUSB_BCM
bool "Broadcom protocol support"
depends on BT_HCIBTUSB
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index cab93935cc7f..0d6ad50da046 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -365,9 +365,8 @@ static void bfusb_rx_complete(struct urb *urb)
buf += 3;
}
- if (count < len) {
+ if (count < len)
bt_dev_err(data->hdev, "block extends over URB buffer ranges");
- }
if ((hdr & 0xe1) == 0xc1)
bfusb_recv_block(data, hdr, buf, len);
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index d2540b28bc7a..48e2f400957b 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -35,6 +35,11 @@ enum {
DSM_SET_RESET_METHOD = 3,
};
+#define BTINTEL_BT_DOMAIN 0x12
+#define BTINTEL_SAR_LEGACY 0
+#define BTINTEL_SAR_INC_PWR 1
+#define BTINTEL_SAR_INC_PWR_SUPPORTED 0
+
#define CMD_WRITE_BOOT_PARAMS 0xfc0e
struct cmd_write_boot_params {
__le32 boot_addr;
@@ -478,6 +483,7 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x1c: /* Gale Peak (GaP) */
case 0x1d: /* BlazarU (BzrU) */
case 0x1e: /* BlazarI (Bzr) */
+ case 0x1f: /* Scorpious Peak */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
@@ -2756,6 +2762,7 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver)
/* DSBR command needs to be sent for,
* 1. BlazarI or BlazarIW + B0 step product in IML image.
* 2. Gale Peak2 or BlazarU in OP image.
+ * 3. Scorpious Peak in IML image.
*/
switch (cnvi) {
@@ -2771,6 +2778,10 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver)
hdev->bus == HCI_USB)
break;
return 0;
+ case BTINTEL_CNVI_SCP:
+ if (ver->img_type == BTINTEL_IMG_IML)
+ break;
+ return 0;
default:
return 0;
}
@@ -2800,6 +2811,331 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver)
return 0;
}
+#ifdef CONFIG_ACPI
+static acpi_status btintel_evaluate_acpi_method(struct hci_dev *hdev,
+ acpi_string method,
+ union acpi_object **ptr,
+ u8 pkg_size)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *p;
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev));
+ if (!handle) {
+ bt_dev_dbg(hdev, "ACPI-BT: No ACPI support for Bluetooth device");
+ return AE_NOT_EXIST;
+ }
+
+ status = acpi_evaluate_object(handle, method, NULL, &buffer);
+
+ if (ACPI_FAILURE(status)) {
+ bt_dev_dbg(hdev, "ACPI-BT: ACPI Failure: %s method: %s",
+ acpi_format_exception(status), method);
+ return status;
+ }
+
+ p = buffer.pointer;
+
+ if (p->type != ACPI_TYPE_PACKAGE || p->package.count < pkg_size) {
+ bt_dev_warn(hdev, "ACPI-BT: Invalid object type: %d or package count: %d",
+ p->type, p->package.count);
+ kfree(buffer.pointer);
+ return AE_ERROR;
+ }
+
+ *ptr = buffer.pointer;
+ return 0;
+}
+
+static union acpi_object *btintel_acpi_get_bt_pkg(union acpi_object *buffer)
+{
+ union acpi_object *domain, *bt_pkg;
+ int i;
+
+ for (i = 1; i < buffer->package.count; i++) {
+ bt_pkg = &buffer->package.elements[i];
+ domain = &bt_pkg->package.elements[0];
+ if (domain->type == ACPI_TYPE_INTEGER &&
+ domain->integer.value == BTINTEL_BT_DOMAIN)
+ return bt_pkg;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+static int btintel_send_sar_ddc(struct hci_dev *hdev, struct btintel_cp_ddc_write *data, u8 len)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, 0xfc8b, len, data, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_warn(hdev, "Failed to send sar ddc id:0x%4.4x (%ld)",
+ le16_to_cpu(data->id), PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+ return 0;
+}
+
+static int btintel_send_edr(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 5;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->br >> 3;
+ cmd->data[1] = sar->edr2 >> 3;
+ cmd->data[2] = sar->edr3 >> 3;
+ return btintel_send_sar_ddc(hdev, cmd, 6);
+}
+
+static int btintel_send_le(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = min3(sar->le, sar->le_lr, sar->le_2mhz) >> 3;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_br(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->br >> 3;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_br_mutual(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->br;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_edr2(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->edr2;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_edr3(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->edr3;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_set_legacy_sar(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar)
+{
+ struct btintel_cp_ddc_write *cmd;
+ u8 buffer[64];
+ int ret;
+
+ cmd = (void *)buffer;
+ ret = btintel_send_br(hdev, cmd, 0x0131, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_br(hdev, cmd, 0x0132, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_le(hdev, cmd, 0x0133, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x0137, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x0138, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x013b, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x013c, sar);
+
+ return ret;
+}
+
+static int btintel_set_mutual_sar(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar)
+{
+ struct btintel_cp_ddc_write *cmd;
+ struct sk_buff *skb;
+ u8 buffer[64];
+ bool enable;
+ int ret;
+
+ cmd = (void *)buffer;
+
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(0x019e);
+
+ if (sar->revision == BTINTEL_SAR_INC_PWR &&
+ sar->inc_power_mode == BTINTEL_SAR_INC_PWR_SUPPORTED)
+ cmd->data[0] = 0x01;
+ else
+ cmd->data[0] = 0x00;
+
+ ret = btintel_send_sar_ddc(hdev, cmd, 4);
+ if (ret)
+ return ret;
+
+ if (sar->revision == BTINTEL_SAR_INC_PWR &&
+ sar->inc_power_mode == BTINTEL_SAR_INC_PWR_SUPPORTED) {
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(0x019f);
+ cmd->data[0] = sar->sar_2400_chain_a;
+
+ ret = btintel_send_sar_ddc(hdev, cmd, 4);
+ if (ret)
+ return ret;
+ }
+
+ ret = btintel_send_br_mutual(hdev, cmd, 0x01a0, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr2(hdev, cmd, 0x01a1, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr3(hdev, cmd, 0x01a2, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_le(hdev, cmd, 0x01a3, sar);
+ if (ret)
+ return ret;
+
+ enable = true;
+ skb = __hci_cmd_sync(hdev, 0xfe25, 1, &enable, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_warn(hdev, "Failed to send Intel SAR Enable (%ld)", PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int btintel_sar_send_to_device(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar,
+ struct intel_version_tlv *ver)
+{
+ u16 cnvi, cnvr;
+ int ret;
+
+ cnvi = ver->cnvi_top & 0xfff;
+ cnvr = ver->cnvr_top & 0xfff;
+
+ if (cnvi < BTINTEL_CNVI_BLAZARI && cnvr < BTINTEL_CNVR_FMP2) {
+ bt_dev_info(hdev, "Applying legacy Bluetooth SAR");
+ ret = btintel_set_legacy_sar(hdev, sar);
+ } else if (cnvi == BTINTEL_CNVI_GAP || cnvr == BTINTEL_CNVR_FMP2) {
+ bt_dev_info(hdev, "Applying mutual Bluetooth SAR");
+ ret = btintel_set_mutual_sar(hdev, sar);
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int btintel_acpi_set_sar(struct hci_dev *hdev, struct intel_version_tlv *ver)
+{
+ union acpi_object *bt_pkg, *buffer = NULL;
+ struct btintel_sar_inc_pwr sar;
+ acpi_status status;
+ u8 revision;
+ int ret;
+
+ status = btintel_evaluate_acpi_method(hdev, "BRDS", &buffer, 2);
+ if (ACPI_FAILURE(status))
+ return -ENOENT;
+
+ bt_pkg = btintel_acpi_get_bt_pkg(buffer);
+
+ if (IS_ERR(bt_pkg)) {
+ ret = PTR_ERR(bt_pkg);
+ goto error;
+ }
+
+ if (!bt_pkg->package.count) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ revision = buffer->package.elements[0].integer.value;
+
+ if (revision > BTINTEL_SAR_INC_PWR) {
+ bt_dev_dbg(hdev, "BT_SAR: revision: 0x%2.2x not supported", revision);
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
+
+ memset(&sar, 0, sizeof(sar));
+
+ if (revision == BTINTEL_SAR_LEGACY && bt_pkg->package.count == 8) {
+ sar.revision = revision;
+ sar.bt_sar_bios = bt_pkg->package.elements[1].integer.value;
+ sar.br = bt_pkg->package.elements[2].integer.value;
+ sar.edr2 = bt_pkg->package.elements[3].integer.value;
+ sar.edr3 = bt_pkg->package.elements[4].integer.value;
+ sar.le = bt_pkg->package.elements[5].integer.value;
+ sar.le_2mhz = bt_pkg->package.elements[6].integer.value;
+ sar.le_lr = bt_pkg->package.elements[7].integer.value;
+
+ } else if (revision == BTINTEL_SAR_INC_PWR && bt_pkg->package.count == 10) {
+ sar.revision = revision;
+ sar.bt_sar_bios = bt_pkg->package.elements[1].integer.value;
+ sar.inc_power_mode = bt_pkg->package.elements[2].integer.value;
+ sar.sar_2400_chain_a = bt_pkg->package.elements[3].integer.value;
+ sar.br = bt_pkg->package.elements[4].integer.value;
+ sar.edr2 = bt_pkg->package.elements[5].integer.value;
+ sar.edr3 = bt_pkg->package.elements[6].integer.value;
+ sar.le = bt_pkg->package.elements[7].integer.value;
+ sar.le_2mhz = bt_pkg->package.elements[8].integer.value;
+ sar.le_lr = bt_pkg->package.elements[9].integer.value;
+ } else {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Apply only if it is enabled in BIOS */
+ if (sar.bt_sar_bios != 1) {
+ bt_dev_dbg(hdev, "Bluetooth SAR is not enabled");
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
+
+ ret = btintel_sar_send_to_device(hdev, &sar, ver);
+error:
+ kfree(buffer);
+ return ret;
+}
+#endif /* CONFIG_ACPI */
+
+static int btintel_set_specific_absorption_rate(struct hci_dev *hdev,
+ struct intel_version_tlv *ver)
+{
+#ifdef CONFIG_ACPI
+ return btintel_acpi_set_sar(hdev, ver);
+#endif
+ return 0;
+}
+
int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
@@ -2877,6 +3213,9 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
+ /* Send sar values to controller */
+ btintel_set_specific_absorption_rate(hdev, ver);
+
/* Set PPAG feature */
btintel_set_ppag(hdev, ver);
@@ -2919,6 +3258,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x1c:
case 0x1d:
case 0x1e:
+ case 0x1f:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
default:
@@ -3258,6 +3598,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x1b:
case 0x1d:
case 0x1e:
+ case 0x1f:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index fa43eb137821..2aece3effa4e 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -56,6 +56,10 @@ struct intel_tlv {
#define BTINTEL_CNVI_BLAZARIW 0x901
#define BTINTEL_CNVI_GAP 0x910
#define BTINTEL_CNVI_BLAZARU 0x930
+#define BTINTEL_CNVI_SCP 0xA00
+
+/* CNVR */
+#define BTINTEL_CNVR_FMP2 0x910
#define BTINTEL_IMG_BOOTLOADER 0x01 /* Bootloader image */
#define BTINTEL_IMG_IML 0x02 /* Intermediate image */
@@ -164,6 +168,26 @@ struct hci_ppag_enable_cmd {
#define INTEL_TLV_DEBUG_EXCEPTION 0x02
#define INTEL_TLV_TEST_EXCEPTION 0xDE
+struct btintel_cp_ddc_write {
+ u8 len;
+ __le16 id;
+ u8 data[];
+} __packed;
+
+/* Bluetooth SAR feature (BRDS), Revision 1 */
+struct btintel_sar_inc_pwr {
+ u8 revision;
+ u32 bt_sar_bios; /* Mode of SAR control to be used, 1:enabled in bios */
+ u32 inc_power_mode; /* Increased power mode */
+ u8 sar_2400_chain_a; /* Sar power restriction LB */
+ u8 br;
+ u8 edr2;
+ u8 edr3;
+ u8 le;
+ u8 le_2mhz;
+ u8 le_lr;
+};
+
#define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8))
#define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16))
#define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff)
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 091ffe3e1495..c1e69fcc9c4f 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -36,6 +36,7 @@
/* Intel Bluetooth PCIe device id table */
static const struct pci_device_id btintel_pcie_table[] = {
{ BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
+ { BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
@@ -48,6 +49,19 @@ MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
#define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
#define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
+#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
+
+#define BTINTEL_PCIE_BLZR_HWEXP_SIZE 1024
+#define BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR 0xB00A7C00
+
+#define BTINTEL_PCIE_SCP_HWEXP_SIZE 4096
+#define BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR 0xB030F800
+
+#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
+
+#define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2
+#define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61
+
/* Alive interrupt context */
enum {
BTINTEL_PCIE_ROM,
@@ -59,6 +73,83 @@ enum {
BTINTEL_PCIE_D3
};
+/* Structure for dbgc fragment buffer
+ * @buf_addr_lsb: LSB of the buffer's physical address
+ * @buf_addr_msb: MSB of the buffer's physical address
+ * @buf_size: Total size of the buffer
+ */
+struct btintel_pcie_dbgc_ctxt_buf {
+ u32 buf_addr_lsb;
+ u32 buf_addr_msb;
+ u32 buf_size;
+};
+
+/* Structure for dbgc fragment
+ * @magic_num: 0XA5A5A5A5
+ * @ver: For Driver-FW compatibility
+ * @total_size: Total size of the payload debug info
+ * @num_buf: Num of allocated debug bufs
+ * @bufs: All buffer's addresses and sizes
+ */
+struct btintel_pcie_dbgc_ctxt {
+ u32 magic_num;
+ u32 ver;
+ u32 total_size;
+ u32 num_buf;
+ struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
+};
+
+/* This function initializes the memory for DBGC buffers and formats the
+ * DBGC fragment which consists header info and DBGC buffer's LSB, MSB and
+ * size as the payload
+ */
+static int btintel_pcie_setup_dbgc(struct btintel_pcie_data *data)
+{
+ struct btintel_pcie_dbgc_ctxt db_frag;
+ struct data_buf *buf;
+ int i;
+
+ data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT;
+ data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count,
+ sizeof(*buf), GFP_KERNEL);
+ if (!data->dbgc.bufs)
+ return -ENOMEM;
+
+ data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev,
+ data->dbgc.count *
+ BTINTEL_PCIE_DBGC_BUFFER_SIZE,
+ &data->dbgc.buf_p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!data->dbgc.buf_v_addr)
+ return -ENOMEM;
+
+ data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev,
+ sizeof(struct btintel_pcie_dbgc_ctxt),
+ &data->dbgc.frag_p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!data->dbgc.frag_v_addr)
+ return -ENOMEM;
+
+ data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt);
+
+ db_frag.magic_num = BTINTEL_PCIE_MAGIC_NUM;
+ db_frag.ver = BTINTEL_PCIE_DBGC_FRAG_VERSION;
+ db_frag.total_size = BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE;
+ db_frag.num_buf = BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT;
+
+ for (i = 0; i < data->dbgc.count; i++) {
+ buf = &data->dbgc.bufs[i];
+ buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr);
+ db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr);
+ db_frag.bufs[i].buf_size = BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ }
+
+ memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag));
+ return 0;
+}
+
static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
u16 queue_num)
{
@@ -273,6 +364,271 @@ static int btintel_pcie_reset_bt(struct btintel_pcie_data *data)
return reg == 0 ? 0 : -ENODEV;
}
+static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
+{
+ u32 reg;
+
+ /* Set MAC_INIT bit to start primary bootloader */
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
+ reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+}
+
+static int btintel_pcie_add_dmp_data(struct hci_dev *hdev, const void *data, int size)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, data, size);
+ err = hci_devcd_append(hdev, skb);
+ if (err) {
+ bt_dev_err(hdev, "Failed to append data in the coredump");
+ return err;
+ }
+
+ return 0;
+}
+
+static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
+{
+ u32 reg;
+ int retry = 15;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
+ if ((reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) == 0)
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
+
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+
+ do {
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS)
+ return 0;
+ /* Need delay here for Target Access harwdware to settle down*/
+ usleep_range(1000, 1200);
+
+ } while (--retry > 0);
+
+ return -ETIME;
+}
+
+static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
+{
+ u32 reg;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ)
+ reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
+
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS)
+ reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
+
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ)
+ reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
+
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+}
+
+static void btintel_pcie_copy_tlv(struct sk_buff *skb, enum btintel_pcie_tlv_type type,
+ void *data, int size)
+{
+ struct intel_tlv *tlv;
+
+ tlv = skb_put(skb, sizeof(*tlv) + size);
+ tlv->type = type;
+ tlv->len = size;
+ memcpy(tlv->val, data, tlv->len);
+}
+
+static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
+{
+ u32 offset, prev_size, wr_ptr_status, dump_size, i;
+ struct btintel_pcie_dbgc *dbgc = &data->dbgc;
+ u8 buf_idx, dump_time_len, fw_build;
+ struct hci_dev *hdev = data->hdev;
+ struct intel_tlv *tlv;
+ struct timespec64 now;
+ struct sk_buff *skb;
+ struct tm tm_now;
+ char buf[256];
+ u16 hdr_len;
+ int ret;
+
+ wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
+ offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
+
+ buf_idx = BTINTEL_PCIE_DBGC_DBG_BUF_IDX(wr_ptr_status);
+ if (buf_idx > dbgc->count) {
+ bt_dev_warn(hdev, "Buffer index is invalid");
+ return -EINVAL;
+ }
+
+ prev_size = buf_idx * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ if (prev_size + offset >= prev_size)
+ data->dmp_hdr.write_ptr = prev_size + offset;
+ else
+ return -EINVAL;
+
+ ktime_get_real_ts64(&now);
+ time64_to_tm(now.tv_sec, 0, &tm_now);
+ dump_time_len = snprintf(buf, sizeof(buf), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
+ tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
+ tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
+
+ fw_build = snprintf(buf + dump_time_len, sizeof(buf) - dump_time_len,
+ "Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
+ 2000 + (data->dmp_hdr.fw_timestamp >> 8),
+ data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
+ data->dmp_hdr.fw_build_num);
+
+ hdr_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
+ sizeof(*tlv) + dump_time_len +
+ sizeof(*tlv) + fw_build;
+
+ dump_size = hdr_len + sizeof(hdr_len);
+
+ skb = alloc_skb(dump_size, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Add debug buffers data length to dump size */
+ dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
+
+ ret = hci_devcd_init(hdev, dump_size);
+ if (ret) {
+ bt_dev_err(hdev, "Failed to init devcoredump, err %d", ret);
+ kfree_skb(skb);
+ return ret;
+ }
+
+ skb_put_data(skb, &hdr_len, sizeof(hdr_len));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
+ sizeof(data->dmp_hdr.cnvi_bt));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
+ sizeof(data->dmp_hdr.write_ptr));
+
+ data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
+ BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
+ sizeof(data->dmp_hdr.wrap_ctr));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
+ sizeof(data->dmp_hdr.trigger_reason));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
+ sizeof(data->dmp_hdr.fw_git_sha1));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
+ sizeof(data->dmp_hdr.cnvr_top));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
+ sizeof(data->dmp_hdr.cnvi_top));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_DUMP_TIME, buf, dump_time_len);
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_FW_BUILD, buf + dump_time_len, fw_build);
+
+ ret = hci_devcd_append(hdev, skb);
+ if (ret)
+ goto exit_err;
+
+ for (i = 0; i < dbgc->count; i++) {
+ ret = btintel_pcie_add_dmp_data(hdev, dbgc->bufs[i].data,
+ BTINTEL_PCIE_DBGC_BUFFER_SIZE);
+ if (ret)
+ break;
+ }
+
+exit_err:
+ hci_devcd_complete(hdev);
+ return ret;
+}
+
+static void btintel_pcie_dump_traces(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ int ret = 0;
+
+ ret = btintel_pcie_get_mac_access(data);
+ if (ret) {
+ bt_dev_err(hdev, "Failed to get mac access: (%d)", ret);
+ return;
+ }
+
+ ret = btintel_pcie_read_dram_buffers(data);
+
+ btintel_pcie_release_mac_access(data);
+
+ if (ret)
+ bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
+}
+
+static void btintel_pcie_dump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ u16 len = skb->len;
+ u16 *hdrlen_ptr;
+ char buf[80];
+
+ hdrlen_ptr = skb_put_zero(skb, sizeof(len));
+
+ snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
+ INTEL_HW_VARIANT(data->dmp_hdr.cnvi_bt));
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Firmware Build Number: %u\n",
+ data->dmp_hdr.fw_build_num);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Driver: %s\n", data->dmp_hdr.driver_name);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Vendor: Intel\n");
+ skb_put_data(skb, buf, strlen(buf));
+
+ *hdrlen_ptr = skb->len - len;
+}
+
+static void btintel_pcie_dump_notify(struct hci_dev *hdev, int state)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ switch (state) {
+ case HCI_DEVCOREDUMP_IDLE:
+ data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
+ break;
+ case HCI_DEVCOREDUMP_ACTIVE:
+ data->dmp_hdr.state = HCI_DEVCOREDUMP_ACTIVE;
+ break;
+ case HCI_DEVCOREDUMP_TIMEOUT:
+ case HCI_DEVCOREDUMP_ABORT:
+ case HCI_DEVCOREDUMP_DONE:
+ data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
+ break;
+ }
+}
+
/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
* BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
* BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
@@ -329,20 +685,6 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
return 0;
}
-/* BIT(0) - ROM, BIT(1) - IML and BIT(3) - OP
- * Sometimes during firmware image switching from ROM to IML or IML to OP image,
- * the previous image bit is not cleared by firmware when alive interrupt is
- * received. Driver needs to take care of these sticky bits when deciding the
- * current image running on controller.
- * Ex: 0x10 and 0x11 - both represents that controller is running IML
- */
-static inline bool btintel_pcie_in_rom(struct btintel_pcie_data *data)
-{
- return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM &&
- !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) &&
- !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
-}
-
static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
{
return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
@@ -393,6 +735,27 @@ static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
}
}
+static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
+ void *buf, u32 dev_addr, int len)
+{
+ int err;
+ u32 *val = buf;
+
+ /* Get device mac access */
+ err = btintel_pcie_get_mac_access(data);
+ if (err) {
+ bt_dev_err(data->hdev, "Failed to get mac access %d", err);
+ return err;
+ }
+
+ for (; len > 0; len -= 4, dev_addr += 4, val++)
+ *val = btintel_pcie_rd_dev_mem(data, dev_addr);
+
+ btintel_pcie_release_mac_access(data);
+
+ return 0;
+}
+
/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
* BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
*/
@@ -714,6 +1077,126 @@ exit_error:
return ret;
}
+static void btintel_pcie_read_hwexp(struct btintel_pcie_data *data)
+{
+ int len, err, offset, pending;
+ struct sk_buff *skb;
+ u8 *buf, prefix[64];
+ u32 addr, val;
+ u16 pkt_len;
+
+ struct tlv {
+ u8 type;
+ __le16 len;
+ u8 val[];
+ } __packed;
+
+ struct tlv *tlv;
+
+ switch (data->dmp_hdr.cnvi_top & 0xfff) {
+ case BTINTEL_CNVI_BLAZARI:
+ case BTINTEL_CNVI_BLAZARIW:
+ /* only from step B0 onwards */
+ if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01)
+ return;
+ len = BTINTEL_PCIE_BLZR_HWEXP_SIZE; /* exception data length */
+ addr = BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR;
+ break;
+ case BTINTEL_CNVI_SCP:
+ len = BTINTEL_PCIE_SCP_HWEXP_SIZE;
+ addr = BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR;
+ break;
+ default:
+ bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top);
+ return;
+ }
+
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ goto exit_on_error;
+
+ btintel_pcie_mac_init(data);
+
+ err = btintel_pcie_read_device_mem(data, buf, addr, len);
+ if (err)
+ goto exit_on_error;
+
+ val = get_unaligned_le32(buf);
+ if (val != BTINTEL_PCIE_MAGIC_NUM) {
+ bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x",
+ val);
+ goto exit_on_error;
+ }
+
+ snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev));
+
+ offset = 4;
+ do {
+ pending = len - offset;
+ if (pending < sizeof(*tlv))
+ break;
+ tlv = (struct tlv *)(buf + offset);
+
+ /* If type == 0, then there are no more TLVs to be parsed */
+ if (!tlv->type) {
+ bt_dev_dbg(data->hdev, "Invalid TLV type 0");
+ break;
+ }
+ pkt_len = le16_to_cpu(tlv->len);
+ offset += sizeof(*tlv);
+ pending = len - offset;
+ if (pkt_len > pending)
+ break;
+
+ offset += pkt_len;
+
+ /* Only TLVs of type == 1 are HCI events, no need to process other
+ * TLVs
+ */
+ if (tlv->type != 1)
+ continue;
+
+ bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len);
+ if (pkt_len > HCI_MAX_EVENT_SIZE)
+ break;
+ skb = bt_skb_alloc(pkt_len, GFP_KERNEL);
+ if (!skb)
+ goto exit_on_error;
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+ skb_put_data(skb, tlv->val, pkt_len);
+
+ /* copy Intel specific pcie packet type */
+ val = BTINTEL_PCIE_HCI_EVT_PKT;
+ memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val,
+ BTINTEL_PCIE_HCI_TYPE_LEN);
+
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_OFFSET, 16, 1,
+ tlv->val, pkt_len, false);
+
+ btintel_pcie_recv_frame(data, skb);
+ } while (offset < len);
+
+exit_on_error:
+ kfree(buf);
+}
+
+static void btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data *data)
+{
+ bt_dev_err(data->hdev, "Received hw exception interrupt");
+
+ if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
+ return;
+
+ if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags))
+ return;
+
+ /* Trigger device core dump when there is HW exception */
+ if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
+ data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
+
+ queue_work(data->workqueue, &data->rx_work);
+}
+
static void btintel_pcie_rx_work(struct work_struct *work)
{
struct btintel_pcie_data *data = container_of(work,
@@ -722,6 +1205,23 @@ static void btintel_pcie_rx_work(struct work_struct *work)
int err;
struct hci_dev *hdev = data->hdev;
+ if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
+ /* Unlike usb products, controller will not send hardware
+ * exception event on exception. Instead controller writes the
+ * hardware event to device memory along with optional debug
+ * events, raises MSIX and halts. Driver shall read the
+ * exception event from device memory and passes it stack for
+ * further processing.
+ */
+ btintel_pcie_read_hwexp(data);
+ clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
+ }
+
+ if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
+ btintel_pcie_dump_traces(data->hdev);
+ clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
+ }
+
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
err = btintel_pcie_recv_frame(data, skb);
@@ -840,6 +1340,10 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
return IRQ_NONE;
}
+ /* This interrupt is raised when there is an hardware exception */
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
+ btintel_pcie_msix_hw_exp_handler(data);
+
/* This interrupt is triggered by the firmware after updating
* boot_stage register and image_response register
*/
@@ -920,7 +1424,8 @@ struct btintel_pcie_causes_list {
static struct btintel_pcie_causes_list causes_list[] = {
{ BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 },
{ BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 },
- { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
+ { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
+ { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x23 },
};
/* This function configures the interrupt masks for both HW_INT_CAUSES and
@@ -1007,6 +1512,11 @@ static void btintel_pcie_init_ci(struct btintel_pcie_data *data,
ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
ci->num_urbdq1 = data->rxq.count;
ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
+
+ ci->dbg_output_mode = 0x01;
+ ci->dbgc_addr = data->dbgc.frag_p_addr;
+ ci->dbgc_size = data->dbgc.frag_size;
+ ci->dbg_preset = 0x00;
}
static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data,
@@ -1219,6 +1729,11 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
/* Setup Index Array */
btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia);
+ /* Setup data buffers for dbgc */
+ err = btintel_pcie_setup_dbgc(data);
+ if (err)
+ goto exit_error_txq;
+
/* Setup Context Information */
p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
@@ -1392,6 +1907,7 @@ static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
static int btintel_pcie_setup_internal(struct hci_dev *hdev)
{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
const u8 param[1] = { 0xFF };
struct intel_version_tlv ver_tlv;
struct sk_buff *skb;
@@ -1449,6 +1965,7 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
*/
switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
case 0x1e: /* BzrI */
+ case 0x1f: /* ScP */
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
@@ -1474,6 +1991,23 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
break;
}
+ data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top;
+ data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top;
+ data->dmp_hdr.fw_timestamp = ver_tlv.timestamp;
+ data->dmp_hdr.fw_build_type = ver_tlv.build_type;
+ data->dmp_hdr.fw_build_num = ver_tlv.build_num;
+ data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt;
+
+ if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
+ data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
+
+ err = hci_devcd_register(hdev, btintel_pcie_dump_traces, btintel_pcie_dump_hdr,
+ btintel_pcie_dump_notify);
+ if (err) {
+ bt_dev_err(hdev, "Failed to register coredump (%d)", err);
+ goto exit_error;
+ }
+
btintel_print_fseq_info(hdev);
exit_error:
kfree_skb(skb);
@@ -1538,6 +2072,7 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
goto exit_error;
}
+ data->dmp_hdr.driver_name = KBUILD_MODNAME;
return 0;
exit_error:
@@ -1650,11 +2185,28 @@ static void btintel_pcie_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
+#ifdef CONFIG_DEV_COREDUMP
+static void btintel_pcie_coredump(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data = pci_get_drvdata(pdev);
+
+ if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
+ return;
+
+ data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER;
+ queue_work(data->workqueue, &data->rx_work);
+}
+#endif
+
static struct pci_driver btintel_pcie_driver = {
.name = KBUILD_MODNAME,
.id_table = btintel_pcie_table,
.probe = btintel_pcie_probe,
.remove = btintel_pcie_remove,
+#ifdef CONFIG_DEV_COREDUMP
+ .driver.coredump = btintel_pcie_coredump
+#endif
};
module_pci_driver(btintel_pcie_driver);
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index f9aada0543c4..873178019cad 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -16,6 +16,8 @@
#define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118)
#define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C)
#define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C)
+#define BTINTEL_PCIE_PRPH_DEV_ADDR_REG (BTINTEL_PCIE_CSR_BASE + 0x440)
+#define BTINTEL_PCIE_PRPH_DEV_RD_REG (BTINTEL_PCIE_CSR_BASE + 0x458)
#define BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR (BTINTEL_PCIE_CSR_BASE + 0x460)
/* BTINTEL_PCIE_CSR Function Control Register */
@@ -23,6 +25,12 @@
#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT (BIT(6))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT (BIT(7))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS (BIT(20))
+
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ (BIT(21))
+/* Stop MAC Access disconnection request */
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS (BIT(22))
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ (BIT(23))
+
#define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS (BIT(28))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON (BIT(29))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET (BIT(31))
@@ -48,6 +56,30 @@
#define BTINTEL_PCIE_CSR_MSIX_IVAR_BASE (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0880)
#define BTINTEL_PCIE_CSR_MSIX_IVAR(cause) (BTINTEL_PCIE_CSR_MSIX_IVAR_BASE + (cause))
+/* IOSF Debug Register */
+#define BTINTEL_PCIE_DBGC_BASE_ADDR (0xf3800300)
+#define BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS (BTINTEL_PCIE_DBGC_BASE_ADDR + 0x1C)
+#define BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND (BTINTEL_PCIE_DBGC_BASE_ADDR + 0x2C)
+
+#define BTINTEL_PCIE_DBG_IDX_BIT_MASK 0x0F
+#define BTINTEL_PCIE_DBGC_DBG_BUF_IDX(data) (((data) >> 24) & BTINTEL_PCIE_DBG_IDX_BIT_MASK)
+#define BTINTEL_PCIE_DBG_OFFSET_BIT_MASK 0xFFFFFF
+
+/* The DRAM buffer count, each buffer size, and
+ * fragment buffer size
+ */
+#define BTINTEL_PCIE_DBGC_BUFFER_COUNT 16
+#define BTINTEL_PCIE_DBGC_BUFFER_SIZE (256 * 1024) /* 256 KB */
+
+#define BTINTEL_PCIE_DBGC_FRAG_VERSION 1
+#define BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT BTINTEL_PCIE_DBGC_BUFFER_COUNT
+
+/* Magic number(4), version(4), size of payload length(4) */
+#define BTINTEL_PCIE_DBGC_FRAG_HEADER_SIZE 12
+
+/* Num of alloc Dbg buff (4) + (LSB(4), MSB(4), Size(4)) for each buffer */
+#define BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE 196
+
/* Causes for the FH register interrupts */
enum msix_fh_int_causes {
BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0 = BIT(0), /* cause 0 */
@@ -57,6 +89,7 @@ enum msix_fh_int_causes {
/* Causes for the HW register interrupts */
enum msix_hw_int_causes {
BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */
+ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP = BIT(3), /* cause 35 */
};
/* PCIe device states
@@ -69,6 +102,25 @@ enum {
BTINTEL_PCIE_STATE_D3_HOT = 2,
BTINTEL_PCIE_STATE_D3_COLD = 3,
};
+
+enum {
+ BTINTEL_PCIE_CORE_HALTED,
+ BTINTEL_PCIE_HWEXP_INPROGRESS,
+ BTINTEL_PCIE_COREDUMP_INPROGRESS
+};
+
+enum btintel_pcie_tlv_type {
+ BTINTEL_CNVI_BT,
+ BTINTEL_WRITE_PTR,
+ BTINTEL_WRAP_CTR,
+ BTINTEL_TRIGGER_REASON,
+ BTINTEL_FW_SHA,
+ BTINTEL_CNVR_TOP,
+ BTINTEL_CNVI_TOP,
+ BTINTEL_DUMP_TIME,
+ BTINTEL_FW_BUILD,
+};
+
#define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
/* Minimum and Maximum number of MSI-X Vector
@@ -325,6 +377,37 @@ struct rxq {
struct data_buf *bufs;
};
+/* Structure for DRAM Buffer
+ * @count: Number of descriptors
+ * @buf: Array of data_buf structure
+ */
+struct btintel_pcie_dbgc {
+ u16 count;
+
+ void *frag_v_addr;
+ dma_addr_t frag_p_addr;
+ u16 frag_size;
+
+ dma_addr_t buf_p_addr;
+ void *buf_v_addr;
+ struct data_buf *bufs;
+};
+
+struct btintel_pcie_dump_header {
+ const char *driver_name;
+ u32 cnvi_top;
+ u32 cnvr_top;
+ u16 fw_timestamp;
+ u8 fw_build_type;
+ u32 fw_build_num;
+ u32 fw_git_sha1;
+ u32 cnvi_bt;
+ u32 write_ptr;
+ u32 wrap_ctr;
+ u16 trigger_reason;
+ int state;
+};
+
/* struct btintel_pcie_data
* @pdev: pci device
* @hdev: hdev device
@@ -405,6 +488,8 @@ struct btintel_pcie_data {
struct txq txq;
struct rxq rxq;
u32 alive_intr_ctxt;
+ struct btintel_pcie_dbgc dbgc;
+ struct btintel_pcie_dump_header dmp_hdr;
};
static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data,
@@ -444,3 +529,11 @@ static inline void btintel_pcie_clr_reg_bits(struct btintel_pcie_data *data,
r &= ~bits;
iowrite32(r, data->base_addr + offset);
}
+
+static inline u32 btintel_pcie_rd_dev_mem(struct btintel_pcie_data *data,
+ u32 addr)
+{
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_PRPH_DEV_ADDR_REG, addr);
+ return btintel_pcie_rd_reg32(data, BTINTEL_PCIE_PRPH_DEV_RD_REG);
+}
+
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 68846c5bd4f7..4390fd571dbd 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -1330,13 +1330,6 @@ int btmtk_usb_setup(struct hci_dev *hdev)
break;
case 0x7922:
case 0x7925:
- /* Reset the device to ensure it's in the initial state before
- * downloading the firmware to ensure.
- */
-
- if (!test_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags))
- btmtk_usb_subsys_reset(hdev, dev_id);
- fallthrough;
case 0x7961:
btmtk_fw_get_filename(fw_bin_name, sizeof(fw_bin_name), dev_id,
fw_version, fw_flavor);
@@ -1345,12 +1338,9 @@ int btmtk_usb_setup(struct hci_dev *hdev)
btmtk_usb_hci_wmt_sync);
if (err < 0) {
bt_dev_err(hdev, "Failed to set up firmware (%d)", err);
- clear_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags);
return err;
}
- set_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags);
-
/* It's Device EndPoint Reset Option Register */
err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT,
MTK_EP_RST_IN_OUT_OPT);
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index bd5464bde174..edd5eead1e93 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -610,7 +610,8 @@ static void btmtksdio_txrx_work(struct work_struct *work)
} while (int_status || time_is_before_jiffies(txrx_timeout));
/* Enable interrupt */
- sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
+ if (bdev->func->irq_handler)
+ sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
sdio_release_host(bdev->func);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index aa5ec1d444a9..5091dea762a0 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NXP Bluetooth driver
- * Copyright 2023 NXP
+ * Copyright 2023-2025 NXP
*/
#include <linux/module.h>
@@ -31,6 +31,7 @@
#define BTNXPUART_SERDEV_OPEN 4
#define BTNXPUART_IR_IN_PROGRESS 5
#define BTNXPUART_FW_DOWNLOAD_ABORT 6
+#define BTNXPUART_FW_DUMP_IN_PROGRESS 7
/* NXP HW err codes */
#define BTNXPUART_IR_HW_ERR 0xb0
@@ -98,14 +99,19 @@
#define PS_STATE_AWAKE 0
#define PS_STATE_SLEEP 1
-/* Bluetooth vendor command : Sleep mode */
+/* NXP Vendor Commands. Refer user manual UM11628 on nxp.com */
+/* Set custom BD Address */
+#define HCI_NXP_SET_BD_ADDR 0xfc22
+/* Set Auto-Sleep mode */
#define HCI_NXP_AUTO_SLEEP_MODE 0xfc23
-/* Bluetooth vendor command : Wakeup method */
+/* Set Wakeup method */
#define HCI_NXP_WAKEUP_METHOD 0xfc53
-/* Bluetooth vendor command : Set operational baudrate */
+/* Set operational baudrate */
#define HCI_NXP_SET_OPER_SPEED 0xfc09
-/* Bluetooth vendor command: Independent Reset */
+/* Independent Reset (Soft Reset) */
#define HCI_NXP_IND_RESET 0xfcfc
+/* Bluetooth vendor command: Trigger FW dump */
+#define HCI_NXP_TRIGGER_DUMP 0xfe91
/* Bluetooth Power State : Vendor cmd params */
#define BT_PS_ENABLE 0x02
@@ -162,6 +168,12 @@ struct btnxpuart_data {
const char *fw_name_old;
};
+enum bootloader_param_change {
+ not_changed,
+ cmd_sent,
+ changed
+};
+
struct btnxpuart_dev {
struct hci_dev *hdev;
struct serdev_device *serdev;
@@ -177,6 +189,7 @@ struct btnxpuart_dev {
u32 fw_v1_sent_bytes;
u32 fw_dnld_v3_offset;
u32 fw_v3_offset_correction;
+ u32 fw_v3_prev_sent;
u32 fw_v1_expected_len;
u32 boot_reg_offset;
wait_queue_head_t fw_dnld_done_wait_q;
@@ -185,8 +198,8 @@ struct btnxpuart_dev {
u32 new_baudrate;
u32 current_baudrate;
u32 fw_init_baudrate;
- bool timeout_changed;
- bool baudrate_changed;
+ enum bootloader_param_change timeout_changed;
+ enum bootloader_param_change baudrate_changed;
bool helper_downloaded;
struct ps_data psdata;
@@ -204,10 +217,11 @@ struct btnxpuart_dev {
#define NXP_NAK_V3 0x7b
#define NXP_CRC_ERROR_V3 0x7c
-/* Bootloader signature error codes */
-#define NXP_ACK_RX_TIMEOUT 0x0002 /* ACK not received from host */
-#define NXP_HDR_RX_TIMEOUT 0x0003 /* FW Header chunk not received */
-#define NXP_DATA_RX_TIMEOUT 0x0004 /* FW Data chunk not received */
+/* Bootloader signature error codes: Refer AN12820 from nxp.com */
+#define NXP_CRC_RX_ERROR BIT(0) /* CRC error in previous packet */
+#define NXP_ACK_RX_TIMEOUT BIT(2) /* ACK not received from host */
+#define NXP_HDR_RX_TIMEOUT BIT(3) /* FW Header chunk not received */
+#define NXP_DATA_RX_TIMEOUT BIT(4) /* FW Data chunk not received */
#define HDR_LEN 16
@@ -310,6 +324,35 @@ union nxp_v3_rx_timeout_nak_u {
u8 buf[6];
};
+struct nxp_v3_crc_nak {
+ u8 nak;
+ u8 crc;
+} __packed;
+
+union nxp_v3_crc_nak_u {
+ struct nxp_v3_crc_nak pkt;
+ u8 buf[2];
+};
+
+/* FW dump */
+#define NXP_FW_DUMP_SIZE (1024 * 1000)
+
+struct nxp_fw_dump_hdr {
+ __le16 seq_num;
+ __le16 reserved;
+ __le16 buf_type;
+ __le16 buf_len;
+};
+
+union nxp_set_bd_addr_payload {
+ struct {
+ u8 param_id;
+ u8 param_len;
+ u8 param[6];
+ } __packed data;
+ u8 buf[8];
+};
+
static u8 crc8_table[CRC8_TABLE_SIZE];
/* Default configurations */
@@ -447,8 +490,14 @@ static int ps_setup(struct hci_dev *hdev)
return PTR_ERR(psdata->h2c_ps_gpio);
}
- if (!psdata->h2c_ps_gpio)
+ if (device_property_read_u8(&serdev->dev, "nxp,wakein-pin", &psdata->h2c_wakeup_gpio)) {
+ psdata->h2c_wakeup_gpio = 0xff; /* 0xff: use default pin/gpio */
+ } else if (!psdata->h2c_ps_gpio) {
+ bt_dev_warn(hdev, "nxp,wakein-pin property without device-wakeup GPIO");
psdata->h2c_wakeup_gpio = 0xff;
+ }
+
+ device_property_read_u8(&serdev->dev, "nxp,wakeout-pin", &psdata->c2h_wakeup_gpio);
psdata->hdev = hdev;
INIT_WORK(&psdata->work, ps_work_func);
@@ -540,9 +589,11 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data)
pcmd.c2h_wakeupmode = psdata->c2h_wakeupmode;
pcmd.c2h_wakeup_gpio = psdata->c2h_wakeup_gpio;
+ pcmd.h2c_wakeup_gpio = 0xff;
switch (psdata->h2c_wakeupmode) {
case WAKEUP_METHOD_GPIO:
pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_GPIO;
+ pcmd.h2c_wakeup_gpio = psdata->h2c_wakeup_gpio;
break;
case WAKEUP_METHOD_DTR:
pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_DSR;
@@ -552,7 +603,6 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data)
pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_BREAK;
break;
}
- pcmd.h2c_wakeup_gpio = 0xff;
skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd), &pcmd);
if (IS_ERR(skb)) {
@@ -586,8 +636,13 @@ static void ps_init(struct hci_dev *hdev)
usleep_range(5000, 10000);
psdata->ps_state = PS_STATE_AWAKE;
- psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE;
- psdata->c2h_wakeup_gpio = 0xff;
+
+ if (psdata->c2h_wakeup_gpio) {
+ psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_GPIO;
+ } else {
+ psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE;
+ psdata->c2h_wakeup_gpio = 0xff;
+ }
psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID;
if (psdata->h2c_ps_gpio)
@@ -618,11 +673,6 @@ static void ps_init(struct hci_dev *hdev)
psdata->cur_psmode = PS_MODE_DISABLE;
psdata->target_ps_mode = DEFAULT_PS_MODE;
-
- if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode)
- hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL);
- if (psdata->cur_psmode != psdata->target_ps_mode)
- hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL);
}
/* NXP Firmware Download Feature */
@@ -637,8 +687,8 @@ static int nxp_download_firmware(struct hci_dev *hdev)
nxpdev->boot_reg_offset = 0;
nxpdev->fw_dnld_v3_offset = 0;
nxpdev->fw_v3_offset_correction = 0;
- nxpdev->baudrate_changed = false;
- nxpdev->timeout_changed = false;
+ nxpdev->baudrate_changed = not_changed;
+ nxpdev->timeout_changed = not_changed;
nxpdev->helper_downloaded = false;
serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE);
@@ -651,8 +701,10 @@ static int nxp_download_firmware(struct hci_dev *hdev)
&nxpdev->tx_state),
msecs_to_jiffies(60000));
- release_firmware(nxpdev->fw);
- memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
+ if (nxpdev->fw && strlen(nxpdev->fw_name)) {
+ release_firmware(nxpdev->fw);
+ memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
+ }
if (err == 0) {
bt_dev_err(hdev, "FW Download Timeout. offset: %d",
@@ -767,6 +819,16 @@ static bool is_fw_downloading(struct btnxpuart_dev *nxpdev)
return test_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
}
+static bool ind_reset_in_progress(struct btnxpuart_dev *nxpdev)
+{
+ return test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state);
+}
+
+static bool fw_dump_in_progress(struct btnxpuart_dev *nxpdev)
+{
+ return test_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state);
+}
+
static bool process_boot_signature(struct btnxpuart_dev *nxpdev)
{
if (test_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state)) {
@@ -860,15 +922,14 @@ static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb)
len = __le16_to_cpu(req->len);
if (!nxp_data->helper_fw_name) {
- if (!nxpdev->timeout_changed) {
- nxpdev->timeout_changed = nxp_fw_change_timeout(hdev,
- len);
+ if (nxpdev->timeout_changed != changed) {
+ nxp_fw_change_timeout(hdev, len);
+ nxpdev->timeout_changed = changed;
goto free_skb;
}
- if (!nxpdev->baudrate_changed) {
- nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev,
- len);
- if (nxpdev->baudrate_changed) {
+ if (nxpdev->baudrate_changed != changed) {
+ if (nxp_fw_change_baudrate(hdev, len)) {
+ nxpdev->baudrate_changed = changed;
serdev_device_set_baudrate(nxpdev->serdev,
HCI_NXP_SEC_BAUDRATE);
serdev_device_set_flow_control(nxpdev->serdev, true);
@@ -1047,32 +1108,35 @@ static void nxp_handle_fw_download_error(struct hci_dev *hdev, struct v3_data_re
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
__u32 offset = __le32_to_cpu(req->offset);
__u16 err = __le16_to_cpu(req->error);
- union nxp_v3_rx_timeout_nak_u nak_tx_buf;
-
- switch (err) {
- case NXP_ACK_RX_TIMEOUT:
- case NXP_HDR_RX_TIMEOUT:
- case NXP_DATA_RX_TIMEOUT:
- nak_tx_buf.pkt.nak = NXP_NAK_V3;
- nak_tx_buf.pkt.offset = __cpu_to_le32(offset);
- nak_tx_buf.pkt.crc = crc8(crc8_table, nak_tx_buf.buf,
- sizeof(nak_tx_buf) - 1, 0xff);
- serdev_device_write_buf(nxpdev->serdev, nak_tx_buf.buf,
- sizeof(nak_tx_buf));
- break;
- default:
- bt_dev_dbg(hdev, "Unknown bootloader error code: %d", err);
- break;
-
+ union nxp_v3_rx_timeout_nak_u timeout_nak_buf;
+ union nxp_v3_crc_nak_u crc_nak_buf;
+
+ if (err & NXP_CRC_RX_ERROR) {
+ crc_nak_buf.pkt.nak = NXP_CRC_ERROR_V3;
+ crc_nak_buf.pkt.crc = crc8(crc8_table, crc_nak_buf.buf,
+ sizeof(crc_nak_buf) - 1, 0xff);
+ serdev_device_write_buf(nxpdev->serdev, crc_nak_buf.buf,
+ sizeof(crc_nak_buf));
+ } else if (err & NXP_ACK_RX_TIMEOUT ||
+ err & NXP_HDR_RX_TIMEOUT ||
+ err & NXP_DATA_RX_TIMEOUT) {
+ timeout_nak_buf.pkt.nak = NXP_NAK_V3;
+ timeout_nak_buf.pkt.offset = __cpu_to_le32(offset);
+ timeout_nak_buf.pkt.crc = crc8(crc8_table, timeout_nak_buf.buf,
+ sizeof(timeout_nak_buf) - 1, 0xff);
+ serdev_device_write_buf(nxpdev->serdev, timeout_nak_buf.buf,
+ sizeof(timeout_nak_buf));
+ } else {
+ bt_dev_err(hdev, "Unknown bootloader error code: %d", err);
}
-
}
static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct v3_data_req *req;
- __u16 len;
+ __u16 len = 0;
+ __u16 err = 0;
__u32 offset;
if (!process_boot_signature(nxpdev))
@@ -1082,23 +1146,40 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
if (!req || !nxpdev->fw)
goto free_skb;
- if (!req->error) {
+ err = __le16_to_cpu(req->error);
+
+ if (!err) {
nxp_send_ack(NXP_ACK_V3, hdev);
+ if (nxpdev->timeout_changed == cmd_sent)
+ nxpdev->timeout_changed = changed;
+ if (nxpdev->baudrate_changed == cmd_sent)
+ nxpdev->baudrate_changed = changed;
} else {
nxp_handle_fw_download_error(hdev, req);
+ if (nxpdev->timeout_changed == cmd_sent &&
+ err == NXP_CRC_RX_ERROR) {
+ nxpdev->fw_v3_offset_correction -= nxpdev->fw_v3_prev_sent;
+ nxpdev->timeout_changed = not_changed;
+ }
+ if (nxpdev->baudrate_changed == cmd_sent &&
+ err == NXP_CRC_RX_ERROR) {
+ nxpdev->fw_v3_offset_correction -= nxpdev->fw_v3_prev_sent;
+ nxpdev->baudrate_changed = not_changed;
+ }
goto free_skb;
}
len = __le16_to_cpu(req->len);
- if (!nxpdev->timeout_changed) {
- nxpdev->timeout_changed = nxp_fw_change_timeout(hdev, len);
+ if (nxpdev->timeout_changed != changed) {
+ nxp_fw_change_timeout(hdev, len);
+ nxpdev->timeout_changed = cmd_sent;
goto free_skb;
}
- if (!nxpdev->baudrate_changed) {
- nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev, len);
- if (nxpdev->baudrate_changed) {
+ if (nxpdev->baudrate_changed != changed) {
+ if (nxp_fw_change_baudrate(hdev, len)) {
+ nxpdev->baudrate_changed = cmd_sent;
serdev_device_set_baudrate(nxpdev->serdev,
HCI_NXP_SEC_BAUDRATE);
serdev_device_set_flow_control(nxpdev->serdev, true);
@@ -1130,6 +1211,7 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
nxpdev->fw_dnld_v3_offset, len);
free_skb:
+ nxpdev->fw_v3_prev_sent = len;
kfree_skb(skb);
return 0;
}
@@ -1168,7 +1250,7 @@ static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data)
static int nxp_check_boot_sign(struct btnxpuart_dev *nxpdev)
{
serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE);
- if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state))
+ if (ind_reset_in_progress(nxpdev))
serdev_device_set_flow_control(nxpdev->serdev, false);
else
serdev_device_set_flow_control(nxpdev->serdev, true);
@@ -1197,6 +1279,102 @@ static int nxp_set_ind_reset(struct hci_dev *hdev, void *data)
return hci_recv_frame(hdev, skb);
}
+/* Firmware dump */
+static void nxp_coredump(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ u8 pcmd = 2;
+
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_TRIGGER_DUMP, 1, &pcmd);
+ if (!IS_ERR(skb))
+ kfree_skb(skb);
+}
+
+static void nxp_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ /* Nothing to be added in FW dump header */
+}
+
+static int nxp_process_fw_dump(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_acl_hdr *acl_hdr = (struct hci_acl_hdr *)skb_pull_data(skb,
+ sizeof(*acl_hdr));
+ struct nxp_fw_dump_hdr *fw_dump_hdr = (struct nxp_fw_dump_hdr *)skb->data;
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ __u16 seq_num = __le16_to_cpu(fw_dump_hdr->seq_num);
+ __u16 buf_len = __le16_to_cpu(fw_dump_hdr->buf_len);
+ int err;
+
+ if (seq_num == 0x0001) {
+ if (test_and_set_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state)) {
+ bt_dev_err(hdev, "FW dump already in progress");
+ goto free_skb;
+ }
+ bt_dev_warn(hdev, "==== Start FW dump ===");
+ err = hci_devcd_init(hdev, NXP_FW_DUMP_SIZE);
+ if (err < 0)
+ goto free_skb;
+
+ schedule_delayed_work(&hdev->dump.dump_timeout,
+ msecs_to_jiffies(20000));
+ }
+
+ err = hci_devcd_append(hdev, skb_clone(skb, GFP_ATOMIC));
+ if (err < 0)
+ goto free_skb;
+
+ if (buf_len == 0) {
+ bt_dev_warn(hdev, "==== FW dump complete ===");
+ clear_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state);
+ hci_devcd_complete(hdev);
+ nxp_set_ind_reset(hdev, NULL);
+ }
+
+free_skb:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int nxp_recv_acl_pkt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
+
+ /* FW dump chunks are ACL packets with conn handle 0xfff */
+ if ((handle & 0x0FFF) == 0xFFF)
+ return nxp_process_fw_dump(hdev, skb);
+ else
+ return hci_recv_frame(hdev, skb);
+}
+
+static int nxp_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ union nxp_set_bd_addr_payload pcmd;
+ int err;
+
+ pcmd.data.param_id = 0xfe;
+ pcmd.data.param_len = 6;
+ memcpy(pcmd.data.param, bdaddr, 6);
+
+ /* BD address can be assigned only after first reset command. */
+ err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (err) {
+ bt_dev_err(hdev,
+ "Reset before setting local-bd-addr failed (%d)",
+ err);
+ return err;
+ }
+
+ err = __hci_cmd_sync_status(hdev, HCI_NXP_SET_BD_ADDR, sizeof(pcmd),
+ pcmd.buf, HCI_CMD_TIMEOUT);
+ if (err) {
+ bt_dev_err(hdev, "Changing device address failed (%d)", err);
+ return err;
+ }
+
+ return 0;
+}
+
/* NXP protocol */
static int nxp_setup(struct hci_dev *hdev)
{
@@ -1216,11 +1394,6 @@ static int nxp_setup(struct hci_dev *hdev)
serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate);
nxpdev->current_baudrate = nxpdev->fw_init_baudrate;
- if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) {
- nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE;
- hci_cmd_sync_queue(hdev, nxp_set_baudrate_cmd, NULL, NULL);
- }
-
ps_init(hdev);
if (test_and_clear_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state))
@@ -1229,6 +1402,22 @@ static int nxp_setup(struct hci_dev *hdev)
return 0;
}
+static int nxp_post_init(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) {
+ nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE;
+ nxp_set_baudrate_cmd(hdev, NULL);
+ }
+ if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode)
+ send_wakeup_method_cmd(hdev, NULL);
+ if (psdata->cur_psmode != psdata->target_ps_mode)
+ send_ps_cmd(hdev, NULL);
+ return 0;
+}
+
static void nxp_hw_err(struct hci_dev *hdev, u8 code)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -1247,25 +1436,44 @@ static int nxp_shutdown(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct sk_buff *skb;
- u8 *status;
u8 pcmd = 0;
- if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) {
+ if (ind_reset_in_progress(nxpdev)) {
skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1, &pcmd);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- status = skb_pull_data(skb, 1);
- if (status) {
- serdev_device_set_flow_control(nxpdev->serdev, false);
- set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
- }
- kfree_skb(skb);
+ serdev_device_set_flow_control(nxpdev->serdev, false);
+ set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
+ /* HCI_NXP_IND_RESET command may not returns any response */
+ if (!IS_ERR(skb))
+ kfree_skb(skb);
+ } else if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
+ nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
+ nxp_set_baudrate_cmd(hdev, NULL);
}
return 0;
}
+static bool nxp_wakeup(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ if (psdata->c2h_wakeupmode != BT_HOST_WAKEUP_METHOD_NONE)
+ return true;
+
+ return false;
+}
+
+static void nxp_reset(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+
+ if (!ind_reset_in_progress(nxpdev) && !fw_dump_in_progress(nxpdev)) {
+ bt_dev_dbg(hdev, "CMD Timeout detected. Resetting.");
+ nxp_set_ind_reset(hdev, NULL);
+ }
+}
+
static int btnxpuart_queue_skb(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -1286,6 +1494,9 @@ static int nxp_enqueue(struct hci_dev *hdev, struct sk_buff *skb)
struct wakeup_cmd_payload wakeup_parm;
__le32 baudrate_parm;
+ if (fw_dump_in_progress(nxpdev))
+ return -EBUSY;
+
/* if vendor commands are received from user space (e.g. hcitool), update
* driver flags accordingly and ask driver to re-send the command to FW.
* In case the payload for any command does not match expected payload
@@ -1454,7 +1665,7 @@ static int btnxpuart_flush(struct hci_dev *hdev)
}
static const struct h4_recv_pkt nxp_recv_pkts[] = {
- { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_ACL, .recv = nxp_recv_acl_pkt },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
{ H4_RECV_ISO, .recv = hci_recv_frame },
@@ -1476,11 +1687,13 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
if (IS_ERR(nxpdev->rx_skb)) {
int err = PTR_ERR(nxpdev->rx_skb);
/* Safe to ignore out-of-sync bootloader signatures */
- if (!is_fw_downloading(nxpdev))
+ if (!is_fw_downloading(nxpdev) &&
+ !ind_reset_in_progress(nxpdev))
bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err);
return count;
}
- if (!is_fw_downloading(nxpdev))
+ if (!is_fw_downloading(nxpdev) &&
+ !ind_reset_in_progress(nxpdev))
nxpdev->hdev->stat.byte_rx += count;
return count;
}
@@ -1499,6 +1712,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
{
struct hci_dev *hdev;
struct btnxpuart_dev *nxpdev;
+ bdaddr_t ba = {0};
nxpdev = devm_kzalloc(&serdev->dev, sizeof(*nxpdev), GFP_KERNEL);
if (!nxpdev)
@@ -1543,11 +1757,21 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
hdev->close = btnxpuart_close;
hdev->flush = btnxpuart_flush;
hdev->setup = nxp_setup;
+ hdev->post_init = nxp_post_init;
hdev->send = nxp_enqueue;
hdev->hw_error = nxp_hw_err;
hdev->shutdown = nxp_shutdown;
+ hdev->wakeup = nxp_wakeup;
+ hdev->reset = nxp_reset;
+ hdev->set_bdaddr = nxp_set_bdaddr;
SET_HCIDEV_DEV(hdev, &serdev->dev);
+ device_property_read_u8_array(&nxpdev->serdev->dev,
+ "local-bd-address",
+ (u8 *)&ba, sizeof(ba));
+ if (bacmp(&ba, BDADDR_ANY))
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
if (hci_register_dev(hdev) < 0) {
dev_err(&serdev->dev, "Can't register HCI device\n");
goto probe_fail;
@@ -1556,6 +1780,8 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
if (ps_setup(hdev))
goto probe_fail;
+ hci_devcd_register(hdev, nxp_coredump, nxp_coredump_hdr, NULL);
+
return 0;
probe_fail:
@@ -1573,16 +1799,15 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
wake_up_interruptible(&nxpdev->check_boot_sign_wait_q);
wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
- } else {
- /* Restore FW baudrate to fw_init_baudrate if changed.
- * This will ensure FW baudrate is in sync with
- * driver baudrate in case this driver is re-inserted.
+ }
+
+ if (test_bit(HCI_RUNNING, &hdev->flags)) {
+ /* Ensure shutdown callback is executed before unregistering, so
+ * that baudrate is reset to initial value.
*/
- if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
- nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
- nxp_set_baudrate_cmd(hdev, NULL);
- }
+ nxp_shutdown(hdev);
}
+
ps_cleanup(nxpdev);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
@@ -1608,6 +1833,17 @@ static int nxp_serdev_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_DEV_COREDUMP
+static void nxp_serdev_coredump(struct device *dev)
+{
+ struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = nxpdev->hdev;
+
+ if (hdev->dump.coredump)
+ hdev->dump.coredump(hdev);
+}
+#endif
+
static struct btnxpuart_data w8987_data __maybe_unused = {
.helper_fw_name = NULL,
.fw_name = FIRMWARE_W8987,
@@ -1638,6 +1874,9 @@ static struct serdev_device_driver nxp_serdev_driver = {
.name = "btnxpuart",
.of_match_table = of_match_ptr(nxpuart_of_match_table),
.pm = &nxp_pm_ops,
+#ifdef CONFIG_DEV_COREDUMP
+ .coredump = nxp_serdev_coredump,
+#endif
},
};
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index cdf09d9a9ad2..3d6778b95e00 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -785,6 +785,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
const char *firmware_name, const char *rampatch_name)
{
struct qca_fw_config config = {};
+ const char *variant = "";
int err;
u8 rom_ver = 0;
u32 soc_ver;
@@ -815,6 +816,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
snprintf(config.fwname, sizeof(config.fwname), "qca/%s", rampatch_name);
} else {
switch (soc_type) {
+ case QCA_WCN3950:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/cmbtfw%02x.tlv", rom_ver);
+ break;
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
@@ -880,16 +885,23 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
}
} else {
switch (soc_type) {
+ case QCA_WCN3950:
+ if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_T)
+ variant = "t";
+ else if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_S)
+ variant = "u";
+
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/cmnv%02x%s.bin", rom_ver, variant);
+ break;
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
- if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/crnv%02xu.bin", rom_ver);
- } else {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/crnv%02x.bin", rom_ver);
- }
+ if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID)
+ variant = "u";
+
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crnv%02x%s.bin", rom_ver, variant);
break;
case QCA_WCN3988:
snprintf(config.fwname, sizeof(config.fwname),
@@ -948,6 +960,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
* VsMsftOpCode.
*/
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 9d28c8800225..8f3c1b1c77b3 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -41,6 +41,9 @@
#define QCA_WCN3991_SOC_ID 0x40014320
+#define QCA_WCN3950_SOC_ID_T 0x40074130
+#define QCA_WCN3950_SOC_ID_S 0x40075130
+
/* QCA chipset version can be decided by patch and SoC
* version, combination with upper 2 bytes from SoC
* and lower 2 bytes from patch will be used.
@@ -145,6 +148,7 @@ enum qca_btsoc_type {
QCA_INVALID = -1,
QCA_AR3002,
QCA_ROME,
+ QCA_WCN3950,
QCA_WCN3988,
QCA_WCN3990,
QCA_WCN3998,
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2a8d91963c63..5012b5ff92c8 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -34,6 +34,7 @@ static bool force_scofix;
static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
static bool enable_poll_sync = IS_ENABLED(CONFIG_BT_HCIBTUSB_POLL_SYNC);
static bool reset = true;
+static bool auto_isoc_alt = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT);
static struct usb_driver btusb_driver;
@@ -375,10 +376,38 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0f3), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe100), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe103), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe10a), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe10d), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe11b), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe11c), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe11f), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe141), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14a), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14b), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14d), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3623), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3624), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x2c7c, 0x0130), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x0131), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x0132), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -639,6 +668,10 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe102), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe152), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe153), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK |
@@ -1085,6 +1118,42 @@ static inline void btusb_free_frags(struct btusb_data *data)
spin_unlock_irqrestore(&data->rxlock, flags);
}
+static void btusb_sco_connected(struct btusb_data *data, struct sk_buff *skb)
+{
+ struct hci_event_hdr *hdr = (void *) skb->data;
+ struct hci_ev_sync_conn_complete *ev =
+ (void *) skb->data + sizeof(*hdr);
+ struct hci_dev *hdev = data->hdev;
+ unsigned int notify_air_mode;
+
+ if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
+ return;
+
+ if (skb->len < sizeof(*hdr) || hdr->evt != HCI_EV_SYNC_CONN_COMPLETE)
+ return;
+
+ if (skb->len != sizeof(*hdr) + sizeof(*ev) || ev->status)
+ return;
+
+ switch (ev->air_mode) {
+ case BT_CODEC_CVSD:
+ notify_air_mode = HCI_NOTIFY_ENABLE_SCO_CVSD;
+ break;
+
+ case BT_CODEC_TRANSPARENT:
+ notify_air_mode = HCI_NOTIFY_ENABLE_SCO_TRANSP;
+ break;
+
+ default:
+ return;
+ }
+
+ bt_dev_info(hdev, "enabling SCO with air mode %u", ev->air_mode);
+ data->sco_num = 1;
+ data->air_mode = notify_air_mode;
+ schedule_work(&data->work);
+}
+
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
{
if (data->intr_interval) {
@@ -1092,6 +1161,10 @@ static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
schedule_delayed_work(&data->rx_work, 0);
}
+ /* Configure altsetting for HCI_USER_CHANNEL on SCO connected */
+ if (auto_isoc_alt && hci_dev_test_flag(data->hdev, HCI_USER_CHANNEL))
+ btusb_sco_connected(data, skb);
+
return data->recv_event(data->hdev, skb);
}
@@ -2436,6 +2509,8 @@ static int btusb_setup_csr(struct hci_dev *hdev)
set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks);
set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks);
/* Clear the reset quirk since this is not an actual
* early Bluetooth 1.1 device from CSR.
@@ -2647,7 +2722,7 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
device_unlock(&btmtk_data->isopkt_intf->dev);
if (err < 0) {
btmtk_data->isopkt_intf = NULL;
- bt_dev_err(data->hdev, "Failed to claim iso interface");
+ bt_dev_err(data->hdev, "Failed to claim iso interface: %d", err);
return;
}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index d2d6ba8d2f8b..acba83156de9 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -102,7 +102,8 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
if (!skb) {
percpu_down_read(&hu->proto_lock);
- if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
+ test_bit(HCI_UART_PROTO_INIT, &hu->flags))
skb = hu->proto->dequeue(hu);
percpu_up_read(&hu->proto_lock);
@@ -124,7 +125,8 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
if (!percpu_down_read_trylock(&hu->proto_lock))
return 0;
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
+ !test_bit(HCI_UART_PROTO_INIT, &hu->flags))
goto no_schedule;
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
@@ -278,7 +280,8 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
percpu_down_read(&hu->proto_lock);
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
+ !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
percpu_up_read(&hu->proto_lock);
return -EUNATCH;
}
@@ -585,7 +588,8 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
if (tty != hu->tty)
return;
- if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
+ test_bit(HCI_UART_PROTO_INIT, &hu->flags))
hci_uart_tx_wakeup(hu);
}
@@ -611,7 +615,8 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
percpu_down_read(&hu->proto_lock);
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
+ !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
percpu_up_read(&hu->proto_lock);
return;
}
@@ -707,12 +712,16 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
hu->proto = p;
+ set_bit(HCI_UART_PROTO_INIT, &hu->flags);
+
err = hci_uart_register_dev(hu);
if (err) {
return err;
}
set_bit(HCI_UART_PROTO_READY, &hu->flags);
+ clear_bit(HCI_UART_PROTO_INIT, &hu->flags);
+
return 0;
}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 0ac2168f1dc4..f2558506a02c 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -623,6 +623,7 @@ static int qca_open(struct hci_uart *hu)
qcadev = serdev_device_get_drvdata(hu->serdev);
switch (qcadev->btsoc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1366,6 +1367,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
/* Give the controller time to process the request */
switch (qca_soc_type(hu)) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1452,6 +1454,7 @@ static unsigned int qca_get_speed(struct hci_uart *hu,
static int qca_check_speeds(struct hci_uart *hu)
{
switch (qca_soc_type(hu)) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1494,6 +1497,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
* changing the baudrate of chip and host.
*/
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1528,6 +1532,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
error:
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1746,6 +1751,7 @@ static int qca_regulator_init(struct hci_uart *hu)
}
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1776,6 +1782,7 @@ static int qca_regulator_init(struct hci_uart *hu)
qca_set_speed(hu, QCA_INIT_SPEED);
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1807,6 +1814,7 @@ static int qca_power_on(struct hci_dev *hdev)
return 0;
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1891,6 +1899,7 @@ static int qca_setup(struct hci_uart *hu)
soc_name = "qca2066";
break;
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1925,6 +1934,7 @@ retry:
clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1958,6 +1968,7 @@ retry:
}
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -2046,6 +2057,17 @@ static const struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
+static const struct qca_device_data qca_soc_data_wcn3950 __maybe_unused = {
+ .soc_type = QCA_WCN3950,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 15000 },
+ { "vddxo", 60000 },
+ { "vddrf", 155000 },
+ { "vddch0", 585000 },
+ },
+ .num_vregs = 4,
+};
+
static const struct qca_device_data qca_soc_data_wcn3988 __maybe_unused = {
.soc_type = QCA_WCN3988,
.vregs = (struct qca_vreg []) {
@@ -2338,6 +2360,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->btsoc_type = QCA_ROME;
switch (qcadev->btsoc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -2359,6 +2382,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
switch (qcadev->btsoc_type) {
case QCA_WCN6855:
case QCA_WCN7850:
+ case QCA_WCN6750:
if (!device_property_present(&serdev->dev, "enable-gpios")) {
/*
* Backward compatibility with old DT sources. If the
@@ -2374,11 +2398,11 @@ static int qca_serdev_probe(struct serdev_device *serdev)
break;
}
fallthrough;
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
- case QCA_WCN6750:
qcadev->bt_power->dev = &serdev->dev;
err = qca_init_regulators(qcadev->bt_power, data->vregs,
data->num_vregs);
@@ -2683,6 +2707,7 @@ static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
{ .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
{ .compatible = "qcom,qca9377-bt" },
+ { .compatible = "qcom,wcn3950-bt", .data = &qca_soc_data_wcn3950},
{ .compatible = "qcom,wcn3988-bt", .data = &qca_soc_data_wcn3988},
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index fbf3079b92a5..5ea5dd80e297 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -90,6 +90,7 @@ struct hci_uart {
#define HCI_UART_REGISTERED 1
#define HCI_UART_PROTO_READY 2
#define HCI_UART_NO_SUSPEND_NOTIFIER 3
+#define HCI_UART_PROTO_INIT 4
/* TX states */
#define HCI_UART_SENDING 1
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 7651321d351c..a51935d37e5d 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -316,7 +316,7 @@ static inline void force_devcd_timeout(struct hci_dev *hdev,
unsigned int timeout)
{
#ifdef CONFIG_DEV_COREDUMP
- hdev->dump.timeout = msecs_to_jiffies(timeout * 1000);
+ hdev->dump.timeout = secs_to_jiffies(timeout);
#endif
}
@@ -416,6 +416,7 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
hdev->wakeup = vhci_wakeup;
hdev->setup = vhci_setup;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ set_bit(HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, &hdev->quirks);
/* bit 6 is for external configuration */
if (opcode & 0x40)
@@ -645,7 +646,7 @@ static int vhci_open(struct inode *inode, struct file *file)
file->private_data = data;
nonseekable_open(inode, file);
- schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000));
+ schedule_delayed_work(&data->open_timeout, secs_to_jiffies(1));
return 0;
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index d1f3d327ddd1..a8be8cf246fb 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -153,7 +153,8 @@ static int fsl_mc_dma_configure(struct device *dev)
else
ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
- if (!ret && !mc_drv->driver_managed_dma) {
+ /* @mc_drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !mc_drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index 9dcc7184817d..efa3b6dddf4d 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -177,6 +177,36 @@ int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
}
EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
+static void mhi_fw_load_error_dump(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ void __iomem *base = mhi_cntrl->bhi;
+ int ret, i;
+ u32 val;
+ struct {
+ char *name;
+ u32 offset;
+ } error_reg[] = {
+ { "ERROR_CODE", BHI_ERRCODE },
+ { "ERROR_DBG1", BHI_ERRDBG1 },
+ { "ERROR_DBG2", BHI_ERRDBG2 },
+ { "ERROR_DBG3", BHI_ERRDBG3 },
+ { NULL },
+ };
+
+ read_lock_bh(pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ for (i = 0; error_reg[i].name; i++) {
+ ret = mhi_read_reg(mhi_cntrl, base, error_reg[i].offset, &val);
+ if (ret)
+ break;
+ dev_err(dev, "Reg: %s value: 0x%x\n", error_reg[i].name, val);
+ }
+ }
+ read_unlock_bh(pm_lock);
+}
+
static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
@@ -226,24 +256,13 @@ static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
}
static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
- dma_addr_t dma_addr,
- size_t size)
+ const struct mhi_buf *mhi_buf)
{
- u32 tx_status, val, session_id;
- int i, ret;
- void __iomem *base = mhi_cntrl->bhi;
- rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- struct {
- char *name;
- u32 offset;
- } error_reg[] = {
- { "ERROR_CODE", BHI_ERRCODE },
- { "ERROR_DBG1", BHI_ERRDBG1 },
- { "ERROR_DBG2", BHI_ERRDBG2 },
- { "ERROR_DBG3", BHI_ERRDBG3 },
- { NULL },
- };
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ void __iomem *base = mhi_cntrl->bhi;
+ u32 tx_status, session_id;
+ int ret;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
@@ -255,11 +274,9 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n",
session_id);
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
- upper_32_bits(dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
- lower_32_bits(dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, upper_32_bits(mhi_buf->dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, lower_32_bits(mhi_buf->dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, mhi_buf->len);
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
read_unlock_bh(pm_lock);
@@ -274,18 +291,7 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
if (tx_status == BHI_STATUS_ERROR) {
dev_err(dev, "Image transfer failed\n");
- read_lock_bh(pm_lock);
- if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
- for (i = 0; error_reg[i].name; i++) {
- ret = mhi_read_reg(mhi_cntrl, base,
- error_reg[i].offset, &val);
- if (ret)
- break;
- dev_err(dev, "Reg: %s value: 0x%x\n",
- error_reg[i].name, val);
- }
- }
- read_unlock_bh(pm_lock);
+ mhi_fw_load_error_dump(mhi_cntrl);
goto invalid_pm_state;
}
@@ -296,6 +302,16 @@ invalid_pm_state:
return -EIO;
}
+static void mhi_free_bhi_buffer(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info)
+{
+ struct mhi_buf *mhi_buf = image_info->mhi_buf;
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, mhi_buf->buf, mhi_buf->dma_addr);
+ kfree(image_info->mhi_buf);
+ kfree(image_info);
+}
+
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info)
{
@@ -310,6 +326,45 @@ void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
kfree(image_info);
}
+static int mhi_alloc_bhi_buffer(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info,
+ size_t alloc_size)
+{
+ struct image_info *img_info;
+ struct mhi_buf *mhi_buf;
+
+ img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
+ if (!img_info)
+ return -ENOMEM;
+
+ /* Allocate memory for entry */
+ img_info->mhi_buf = kzalloc(sizeof(*img_info->mhi_buf), GFP_KERNEL);
+ if (!img_info->mhi_buf)
+ goto error_alloc_mhi_buf;
+
+ /* Allocate and populate vector table */
+ mhi_buf = img_info->mhi_buf;
+
+ mhi_buf->len = alloc_size;
+ mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
+ &mhi_buf->dma_addr, GFP_KERNEL);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+
+ img_info->bhi_vec = NULL;
+ img_info->entries = 1;
+ *image_info = img_info;
+
+ return 0;
+
+error_alloc_segment:
+ kfree(mhi_buf);
+error_alloc_mhi_buf:
+ kfree(img_info);
+
+ return -ENOMEM;
+}
+
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info,
size_t alloc_size)
@@ -365,9 +420,9 @@ error_alloc_mhi_buf:
return -ENOMEM;
}
-static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
- const u8 *buf, size_t remainder,
- struct image_info *img_info)
+static void mhi_firmware_copy_bhie(struct mhi_controller *mhi_cntrl,
+ const u8 *buf, size_t remainder,
+ struct image_info *img_info)
{
size_t to_cpy;
struct mhi_buf *mhi_buf = img_info->mhi_buf;
@@ -386,15 +441,61 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
}
}
+static enum mhi_fw_load_type mhi_fw_load_type_get(const struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->fbc_download) {
+ return MHI_FW_LOAD_FBC;
+ } else {
+ if (mhi_cntrl->seg_len)
+ return MHI_FW_LOAD_BHIE;
+ else
+ return MHI_FW_LOAD_BHI;
+ }
+}
+
+static int mhi_load_image_bhi(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size)
+{
+ struct image_info *image;
+ int ret;
+
+ ret = mhi_alloc_bhi_buffer(mhi_cntrl, &image, size);
+ if (ret)
+ return ret;
+
+ /* Load the firmware into BHI vec table */
+ memcpy(image->mhi_buf->buf, fw_data, size);
+
+ ret = mhi_fw_load_bhi(mhi_cntrl, &image->mhi_buf[image->entries - 1]);
+ mhi_free_bhi_buffer(mhi_cntrl, image);
+
+ return ret;
+}
+
+static int mhi_load_image_bhie(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size)
+{
+ struct image_info *image;
+ int ret;
+
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &image, size);
+ if (ret)
+ return ret;
+
+ mhi_firmware_copy_bhie(mhi_cntrl, fw_data, size, image);
+
+ ret = mhi_fw_load_bhie(mhi_cntrl, &image->mhi_buf[image->entries - 1]);
+ mhi_free_bhie_table(mhi_cntrl, image);
+
+ return ret;
+}
+
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
const struct firmware *firmware = NULL;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_fw_load_type fw_load_type;
enum mhi_pm_state new_state;
const char *fw_name;
const u8 *fw_data;
- void *buf;
- dma_addr_t dma_addr;
size_t size, fw_sz;
int ret;
@@ -453,21 +554,17 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
fw_sz = firmware->size;
skip_req_fw:
- buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr,
- GFP_KERNEL);
- if (!buf) {
- release_firmware(firmware);
- goto error_fw_load;
- }
-
- /* Download image using BHI */
- memcpy(buf, fw_data, size);
- ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
- dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr);
+ fw_load_type = mhi_fw_load_type_get(mhi_cntrl);
+ if (fw_load_type == MHI_FW_LOAD_BHIE)
+ ret = mhi_load_image_bhie(mhi_cntrl, fw_data, size);
+ else
+ ret = mhi_load_image_bhi(mhi_cntrl, fw_data, size);
/* Error or in EDL mode, we're done */
if (ret) {
- dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret);
+ dev_err(dev, "MHI did not load image over BHI%s, ret: %d\n",
+ fw_load_type == MHI_FW_LOAD_BHIE ? "e" : "",
+ ret);
release_firmware(firmware);
goto error_fw_load;
}
@@ -486,7 +583,7 @@ skip_req_fw:
* If we're doing fbc, populate vector tables while
* device transitioning into MHI READY state
*/
- if (mhi_cntrl->fbc_download) {
+ if (fw_load_type == MHI_FW_LOAD_FBC) {
ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, fw_sz);
if (ret) {
release_firmware(firmware);
@@ -494,7 +591,7 @@ skip_req_fw:
}
/* Load the firmware into BHIE vec table */
- mhi_firmware_copy(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
+ mhi_firmware_copy_bhie(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
}
release_firmware(firmware);
@@ -511,7 +608,7 @@ fw_load_ready_state:
return;
error_ready_state:
- if (mhi_cntrl->fbc_download) {
+ if (mhi_cntrl->fbc_image) {
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
}
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index a9b1f8beee7b..13e7a55f54ff 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -1144,7 +1144,7 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
}
mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
- if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
+ if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size || mhi_cntrl->seg_len) {
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
&bhie_off);
if (ret) {
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index 3134f111be35..ce566f7d2e92 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -29,6 +29,13 @@ struct bhi_vec_entry {
u64 size;
};
+enum mhi_fw_load_type {
+ MHI_FW_LOAD_BHI, /* BHI only in PBL */
+ MHI_FW_LOAD_BHIE, /* BHIe only in PBL */
+ MHI_FW_LOAD_FBC, /* BHI in PBL followed by BHIe in SBL */
+ MHI_FW_LOAD_MAX,
+};
+
enum mhi_ch_state_type {
MHI_CH_STATE_TYPE_RESET,
MHI_CH_STATE_TYPE_STOP,
diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c
index 85d781a32df4..7f5fd4e0940d 100644
--- a/drivers/bus/qcom-ssc-block-bus.c
+++ b/drivers/bus/qcom-ssc-block-bus.c
@@ -264,18 +264,6 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- data->pd_names = qcom_ssc_block_pd_names;
- data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
-
- /* power domains */
- ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
-
- ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
-
/* low level overrides for when the HW logic doesn't "just work" */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0");
data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res);
@@ -343,11 +331,30 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
data->ssc_axi_halt = halt_args.args[0];
+ /* power domains */
+ data->pd_names = qcom_ssc_block_pd_names;
+ data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
+
+ ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
+
+ ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
+ if (ret < 0) {
+ dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
+ goto err_detach_pds_bus;
+ }
+
qcom_ssc_block_bus_init(&pdev->dev);
of_platform_populate(np, NULL, NULL, &pdev->dev);
return 0;
+
+err_detach_pds_bus:
+ qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
+
+ return ret;
}
static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
@@ -356,9 +363,6 @@ static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
qcom_ssc_block_bus_deinit(&pdev->dev);
- iounmap(data->reg_mpm_sscaon_config0);
- iounmap(data->reg_mpm_sscaon_config1);
-
qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds);
qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 7811aa734053..092306ca2541 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -360,7 +360,8 @@ static int cdx_dma_configure(struct device *dev)
return ret;
}
- if (!ret && !cdx_drv->driver_managed_dma) {
+ /* @cdx_drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !cdx_drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 17854f052386..c85827843447 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -534,10 +534,10 @@ config HW_RANDOM_NPCM
If unsure, say Y.
config HW_RANDOM_KEYSTONE
+ tristate "TI Keystone NETCP SA Hardware random number generator"
depends on ARCH_KEYSTONE || COMPILE_TEST
depends on HAS_IOMEM && OF
default HW_RANDOM
- tristate "TI Keystone NETCP SA Hardware random number generator"
help
This option enables Keystone's hardware random generator.
@@ -579,15 +579,15 @@ config HW_RANDOM_ARM_SMCCC_TRNG
module will be called arm_smccc_trng.
config HW_RANDOM_CN10K
- tristate "Marvell CN10K Random Number Generator support"
- depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST))
- default HW_RANDOM if ARCH_THUNDER
- help
- This driver provides support for the True Random Number
- generator available in Marvell CN10K SoCs.
+ tristate "Marvell CN10K Random Number Generator support"
+ depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST))
+ default HW_RANDOM if ARCH_THUNDER
+ help
+ This driver provides support for the True Random Number
+ generator available in Marvell CN10K SoCs.
- To compile this driver as a module, choose M here.
- The module will be called cn10k_rng. If unsure, say Y.
+ To compile this driver as a module, choose M here.
+ The module will be called cn10k_rng. If unsure, say Y.
config HW_RANDOM_JH7110
tristate "StarFive JH7110 Random Number Generator support"
@@ -606,7 +606,8 @@ config HW_RANDOM_ROCKCHIP
default HW_RANDOM
help
This driver provides kernel-side support for the True Random Number
- Generator hardware found on some Rockchip SoC like RK3566 or RK3568.
+ Generator hardware found on some Rockchip SoCs like RK3566, RK3568
+ or RK3588.
To compile this driver as a module, choose M here: the
module will be called rockchip-rng.
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 118a72acb99b..241664a9b5d9 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -13,6 +13,8 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/hw_random.h>
#include <linux/completion.h>
@@ -53,6 +55,7 @@
#define RNGC_SELFTEST_TIMEOUT 2500 /* us */
#define RNGC_SEED_TIMEOUT 200 /* ms */
+#define RNGC_PM_TIMEOUT 500 /* ms */
static bool self_test = true;
module_param(self_test, bool, 0);
@@ -123,7 +126,11 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
unsigned int status;
- int retval = 0;
+ int err, retval = 0;
+
+ err = pm_runtime_resume_and_get(rngc->dev);
+ if (err)
+ return err;
while (max >= sizeof(u32)) {
status = readl(rngc->base + RNGC_STATUS);
@@ -141,6 +148,8 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
max -= sizeof(u32);
}
}
+ pm_runtime_mark_last_busy(rngc->dev);
+ pm_runtime_put(rngc->dev);
return retval ? retval : -EIO;
}
@@ -169,7 +178,11 @@ static int imx_rngc_init(struct hwrng *rng)
{
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
u32 cmd, ctrl;
- int ret;
+ int ret, err;
+
+ err = pm_runtime_resume_and_get(rngc->dev);
+ if (err)
+ return err;
/* clear error */
cmd = readl(rngc->base + RNGC_COMMAND);
@@ -186,15 +199,15 @@ static int imx_rngc_init(struct hwrng *rng)
ret = wait_for_completion_timeout(&rngc->rng_op_done,
msecs_to_jiffies(RNGC_SEED_TIMEOUT));
if (!ret) {
- ret = -ETIMEDOUT;
- goto err;
+ err = -ETIMEDOUT;
+ goto out;
}
} while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR);
if (rngc->err_reg) {
- ret = -EIO;
- goto err;
+ err = -EIO;
+ goto out;
}
/*
@@ -205,23 +218,29 @@ static int imx_rngc_init(struct hwrng *rng)
ctrl |= RNGC_CTRL_AUTO_SEED;
writel(ctrl, rngc->base + RNGC_CONTROL);
+out:
/*
* if initialisation was successful, we keep the interrupt
* unmasked until imx_rngc_cleanup is called
* we mask the interrupt ourselves if we return an error
*/
- return 0;
+ if (err)
+ imx_rngc_irq_mask_clear(rngc);
-err:
- imx_rngc_irq_mask_clear(rngc);
- return ret;
+ pm_runtime_put(rngc->dev);
+ return err;
}
static void imx_rngc_cleanup(struct hwrng *rng)
{
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+ int err;
- imx_rngc_irq_mask_clear(rngc);
+ err = pm_runtime_resume_and_get(rngc->dev);
+ if (!err) {
+ imx_rngc_irq_mask_clear(rngc);
+ pm_runtime_put(rngc->dev);
+ }
}
static int __init imx_rngc_probe(struct platform_device *pdev)
@@ -240,7 +259,7 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
if (IS_ERR(rngc->base))
return PTR_ERR(rngc->base);
- rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ rngc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rngc->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(rngc->clk), "Cannot get rng_clk\n");
@@ -248,14 +267,18 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
+ clk_prepare_enable(rngc->clk);
+
ver_id = readl(rngc->base + RNGC_VER_ID);
rng_type = FIELD_GET(RNG_TYPE, ver_id);
/*
* This driver supports only RNGC and RNGB. (There's a different
* driver for RNGA.)
*/
- if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB)
+ if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) {
+ clk_disable_unprepare(rngc->clk);
return -ENODEV;
+ }
init_completion(&rngc->rng_op_done);
@@ -272,15 +295,24 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev,
irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(rngc->clk);
return dev_err_probe(&pdev->dev, ret, "Can't get interrupt working.\n");
+ }
if (self_test) {
ret = imx_rngc_self_test(rngc);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(rngc->clk);
return dev_err_probe(&pdev->dev, ret, "self test failed\n");
+ }
}
+ pm_runtime_set_autosuspend_delay(&pdev->dev, RNGC_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ devm_pm_runtime_enable(&pdev->dev);
+
ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
if (ret)
return dev_err_probe(&pdev->dev, ret, "hwrng registration failed\n");
@@ -310,7 +342,10 @@ static int imx_rngc_resume(struct device *dev)
return 0;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
+static const struct dev_pm_ops imx_rngc_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(imx_rngc_suspend, imx_rngc_resume, NULL)
+};
static const struct of_device_id imx_rngc_dt_ids[] = {
{ .compatible = "fsl,imx25-rngb" },
@@ -321,7 +356,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .pm = pm_sleep_ptr(&imx_rngc_pm_ops),
+ .pm = pm_ptr(&imx_rngc_pm_ops),
.of_match_table = imx_rngc_dt_ids,
},
};
diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c
index 289b385bbf05..161050591663 100644
--- a/drivers/char/hw_random/rockchip-rng.c
+++ b/drivers/char/hw_random/rockchip-rng.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * rockchip-rng.c True Random Number Generator driver for Rockchip RK3568 SoC
+ * rockchip-rng.c True Random Number Generator driver for Rockchip SoCs
*
* Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd.
* Copyright (c) 2022, Aurelien Jarno
+ * Copyright (c) 2025, Collabora Ltd.
* Authors:
* Lin Jinhan <troy.lin@rock-chips.com>
* Aurelien Jarno <aurelien@aurel32.net>
+ * Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
*/
#include <linux/clk.h>
#include <linux/hw_random.h>
@@ -32,6 +34,9 @@
*/
#define RK_RNG_SAMPLE_CNT 1000
+/* after how many bytes of output TRNGv1 implementations should be reseeded */
+#define RK_TRNG_V1_AUTO_RESEED_CNT 16000
+
/* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */
#define TRNG_RST_CTL 0x0004
#define TRNG_RNG_CTL 0x0400
@@ -49,11 +54,64 @@
#define TRNG_RNG_SAMPLE_CNT 0x0404
#define TRNG_RNG_DOUT 0x0410
+/*
+ * TRNG V1 register definitions
+ * The TRNG V1 IP is a stand-alone TRNG implementation (not part of a crypto IP)
+ * and can be found in the Rockchip RK3588 SoC
+ */
+#define TRNG_V1_CTRL 0x0000
+#define TRNG_V1_CTRL_NOP 0x00
+#define TRNG_V1_CTRL_RAND 0x01
+#define TRNG_V1_CTRL_SEED 0x02
+
+#define TRNG_V1_STAT 0x0004
+#define TRNG_V1_STAT_SEEDED BIT(9)
+#define TRNG_V1_STAT_GENERATING BIT(30)
+#define TRNG_V1_STAT_RESEEDING BIT(31)
+
+#define TRNG_V1_MODE 0x0008
+#define TRNG_V1_MODE_128_BIT (0x00 << 3)
+#define TRNG_V1_MODE_256_BIT (0x01 << 3)
+
+/* Interrupt Enable register; unused because polling is faster */
+#define TRNG_V1_IE 0x0010
+#define TRNG_V1_IE_GLBL_EN BIT(31)
+#define TRNG_V1_IE_SEED_DONE_EN BIT(1)
+#define TRNG_V1_IE_RAND_RDY_EN BIT(0)
+
+#define TRNG_V1_ISTAT 0x0014
+#define TRNG_V1_ISTAT_RAND_RDY BIT(0)
+
+/* RAND0 ~ RAND7 */
+#define TRNG_V1_RAND0 0x0020
+#define TRNG_V1_RAND7 0x003C
+
+/* Auto Reseed Register */
+#define TRNG_V1_AUTO_RQSTS 0x0060
+
+#define TRNG_V1_VERSION 0x00F0
+#define TRNG_v1_VERSION_CODE 0x46bc
+/* end of TRNG_V1 register definitions */
+
+/* Before removing this assert, give rk3588_rng_read an upper bound of 32 */
+static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0),
+ "You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats.");
+
struct rk_rng {
struct hwrng rng;
void __iomem *base;
int clk_num;
struct clk_bulk_data *clk_bulks;
+ const struct rk_rng_soc_data *soc_data;
+ struct device *dev;
+};
+
+struct rk_rng_soc_data {
+ int (*rk_rng_init)(struct hwrng *rng);
+ int (*rk_rng_read)(struct hwrng *rng, void *buf, size_t max, bool wait);
+ void (*rk_rng_cleanup)(struct hwrng *rng);
+ unsigned short quality;
+ bool reset_optional;
};
/* The mask in the upper 16 bits determines the bits that are updated */
@@ -62,19 +120,38 @@ static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask)
writel((mask << 16) | val, rng->base + TRNG_RNG_CTL);
}
-static int rk_rng_init(struct hwrng *rng)
+static inline void rk_rng_writel(struct rk_rng *rng, u32 val, u32 offset)
{
- struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
- int ret;
+ writel(val, rng->base + offset);
+}
+static inline u32 rk_rng_readl(struct rk_rng *rng, u32 offset)
+{
+ return readl(rng->base + offset);
+}
+
+static int rk_rng_enable_clks(struct rk_rng *rk_rng)
+{
+ int ret;
/* start clocks */
ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks);
if (ret < 0) {
- dev_err((struct device *) rk_rng->rng.priv,
- "Failed to enable clks %d\n", ret);
+ dev_err(rk_rng->dev, "Failed to enable clocks: %d\n", ret);
return ret;
}
+ return 0;
+}
+
+static int rk3568_rng_init(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ int ret;
+
+ ret = rk_rng_enable_clks(rk_rng);
+ if (ret < 0)
+ return ret;
+
/* set the sample period */
writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT);
@@ -87,7 +164,7 @@ static int rk_rng_init(struct hwrng *rng)
return 0;
}
-static void rk_rng_cleanup(struct hwrng *rng)
+static void rk3568_rng_cleanup(struct hwrng *rng)
{
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
@@ -98,14 +175,14 @@ static void rk_rng_cleanup(struct hwrng *rng)
clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
}
-static int rk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+static int rk3568_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
u32 reg;
int ret = 0;
- ret = pm_runtime_resume_and_get((struct device *) rk_rng->rng.priv);
+ ret = pm_runtime_resume_and_get(rk_rng->dev);
if (ret < 0)
return ret;
@@ -122,12 +199,120 @@ static int rk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
/* Read random data stored in the registers */
memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read);
out:
- pm_runtime_mark_last_busy((struct device *) rk_rng->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *) rk_rng->rng.priv);
+ pm_runtime_mark_last_busy(rk_rng->dev);
+ pm_runtime_put_sync_autosuspend(rk_rng->dev);
+
+ return (ret < 0) ? ret : to_read;
+}
+
+static int rk3588_rng_init(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ u32 version, status, mask, istat;
+ int ret;
+
+ ret = rk_rng_enable_clks(rk_rng);
+ if (ret < 0)
+ return ret;
+
+ version = rk_rng_readl(rk_rng, TRNG_V1_VERSION);
+ if (version != TRNG_v1_VERSION_CODE) {
+ dev_err(rk_rng->dev,
+ "wrong trng version, expected = %08x, actual = %08x\n",
+ TRNG_V1_VERSION, version);
+ ret = -EFAULT;
+ goto err_disable_clk;
+ }
+
+ mask = TRNG_V1_STAT_SEEDED | TRNG_V1_STAT_GENERATING |
+ TRNG_V1_STAT_RESEEDING;
+ if (readl_poll_timeout(rk_rng->base + TRNG_V1_STAT, status,
+ (status & mask) == TRNG_V1_STAT_SEEDED,
+ RK_RNG_POLL_PERIOD_US, RK_RNG_POLL_TIMEOUT_US) < 0) {
+ dev_err(rk_rng->dev, "timed out waiting for hwrng to reseed\n");
+ ret = -ETIMEDOUT;
+ goto err_disable_clk;
+ }
+
+ /*
+ * clear ISTAT flag, downstream advises to do this to avoid
+ * auto-reseeding "on power on"
+ */
+ istat = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
+ rk_rng_writel(rk_rng, istat, TRNG_V1_ISTAT);
+
+ /* auto reseed after RK_TRNG_V1_AUTO_RESEED_CNT bytes */
+ rk_rng_writel(rk_rng, RK_TRNG_V1_AUTO_RESEED_CNT / 16, TRNG_V1_AUTO_RQSTS);
+
+ return 0;
+err_disable_clk:
+ clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
+ return ret;
+}
+
+static void rk3588_rng_cleanup(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+
+ clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
+}
+
+static int rk3588_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
+ int ret = 0;
+ u32 reg;
+
+ ret = pm_runtime_resume_and_get(rk_rng->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Clear ISTAT, even without interrupts enabled, this will be updated */
+ reg = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
+ rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
+
+ /* generate 256 bits of random data */
+ rk_rng_writel(rk_rng, TRNG_V1_MODE_256_BIT, TRNG_V1_MODE);
+ rk_rng_writel(rk_rng, TRNG_V1_CTRL_RAND, TRNG_V1_CTRL);
+
+ ret = readl_poll_timeout_atomic(rk_rng->base + TRNG_V1_ISTAT, reg,
+ (reg & TRNG_V1_ISTAT_RAND_RDY), 0,
+ RK_RNG_POLL_TIMEOUT_US);
+ if (ret < 0)
+ goto out;
+
+ /* Read random data that's in registers TRNG_V1_RAND0 through RAND7 */
+ memcpy_fromio(buf, rk_rng->base + TRNG_V1_RAND0, to_read);
+
+out:
+ /* Clear ISTAT */
+ rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
+ /* close the TRNG */
+ rk_rng_writel(rk_rng, TRNG_V1_CTRL_NOP, TRNG_V1_CTRL);
+
+ pm_runtime_mark_last_busy(rk_rng->dev);
+ pm_runtime_put_sync_autosuspend(rk_rng->dev);
return (ret < 0) ? ret : to_read;
}
+static const struct rk_rng_soc_data rk3568_soc_data = {
+ .rk_rng_init = rk3568_rng_init,
+ .rk_rng_read = rk3568_rng_read,
+ .rk_rng_cleanup = rk3568_rng_cleanup,
+ .quality = 900,
+ .reset_optional = false,
+};
+
+static const struct rk_rng_soc_data rk3588_soc_data = {
+ .rk_rng_init = rk3588_rng_init,
+ .rk_rng_read = rk3588_rng_read,
+ .rk_rng_cleanup = rk3588_rng_cleanup,
+ .quality = 999, /* as determined by actual testing */
+ .reset_optional = true,
+};
+
static int rk_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -139,6 +324,7 @@ static int rk_rng_probe(struct platform_device *pdev)
if (!rk_rng)
return -ENOMEM;
+ rk_rng->soc_data = of_device_get_match_data(dev);
rk_rng->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rk_rng->base))
return PTR_ERR(rk_rng->base);
@@ -148,34 +334,40 @@ static int rk_rng_probe(struct platform_device *pdev)
return dev_err_probe(dev, rk_rng->clk_num,
"Failed to get clks property\n");
- rst = devm_reset_control_array_get_exclusive(&pdev->dev);
- if (IS_ERR(rst))
- return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n");
+ if (rk_rng->soc_data->reset_optional)
+ rst = devm_reset_control_array_get_optional_exclusive(dev);
+ else
+ rst = devm_reset_control_array_get_exclusive(dev);
- reset_control_assert(rst);
- udelay(2);
- reset_control_deassert(rst);
+ if (rst) {
+ if (IS_ERR(rst))
+ return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n");
+
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+ }
platform_set_drvdata(pdev, rk_rng);
rk_rng->rng.name = dev_driver_string(dev);
if (!IS_ENABLED(CONFIG_PM)) {
- rk_rng->rng.init = rk_rng_init;
- rk_rng->rng.cleanup = rk_rng_cleanup;
+ rk_rng->rng.init = rk_rng->soc_data->rk_rng_init;
+ rk_rng->rng.cleanup = rk_rng->soc_data->rk_rng_cleanup;
}
- rk_rng->rng.read = rk_rng_read;
- rk_rng->rng.priv = (unsigned long) dev;
- rk_rng->rng.quality = 900;
+ rk_rng->rng.read = rk_rng->soc_data->rk_rng_read;
+ rk_rng->dev = dev;
+ rk_rng->rng.quality = rk_rng->soc_data->quality;
pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
ret = devm_pm_runtime_enable(dev);
if (ret)
- return dev_err_probe(&pdev->dev, ret, "Runtime pm activation failed.\n");
+ return dev_err_probe(dev, ret, "Runtime pm activation failed.\n");
ret = devm_hwrng_register(dev, &rk_rng->rng);
if (ret)
- return dev_err_probe(&pdev->dev, ret, "Failed to register Rockchip hwrng\n");
+ return dev_err_probe(dev, ret, "Failed to register Rockchip hwrng\n");
return 0;
}
@@ -184,7 +376,7 @@ static int __maybe_unused rk_rng_runtime_suspend(struct device *dev)
{
struct rk_rng *rk_rng = dev_get_drvdata(dev);
- rk_rng_cleanup(&rk_rng->rng);
+ rk_rng->soc_data->rk_rng_cleanup(&rk_rng->rng);
return 0;
}
@@ -193,7 +385,7 @@ static int __maybe_unused rk_rng_runtime_resume(struct device *dev)
{
struct rk_rng *rk_rng = dev_get_drvdata(dev);
- return rk_rng_init(&rk_rng->rng);
+ return rk_rng->soc_data->rk_rng_init(&rk_rng->rng);
}
static const struct dev_pm_ops rk_rng_pm_ops = {
@@ -204,7 +396,8 @@ static const struct dev_pm_ops rk_rng_pm_ops = {
};
static const struct of_device_id rk_rng_dt_match[] = {
- { .compatible = "rockchip,rk3568-rng", },
+ { .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data },
+ { .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data },
{ /* sentinel */ },
};
@@ -221,8 +414,9 @@ static struct platform_driver rk_rng_driver = {
module_platform_driver(rk_rng_driver);
-MODULE_DESCRIPTION("Rockchip RK3568 True Random Number Generator driver");
+MODULE_DESCRIPTION("Rockchip True Random Number Generator driver");
MODULE_AUTHOR("Lin Jinhan <troy.lin@rock-chips.com>");
MODULE_AUTHOR("Aurelien Jarno <aurelien@aurel32.net>");
MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 7174bfccc7b3..b95f6d0f17ed 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -152,8 +152,7 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->period = ns_to_ktime(period * NSEC_PER_USEC);
init_completion(&priv->completion);
- hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- priv->timer.function = timeriomem_rng_trigger;
+ hrtimer_setup(&priv->timer, timeriomem_rng_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
priv->rng_ops.name = dev_name(&pdev->dev);
priv->rng_ops.read = timeriomem_rng_read;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 2581186fa61b..92cbd24a36d8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -278,7 +278,7 @@ static void crng_reseed(struct work_struct *work)
WRITE_ONCE(base_crng.generation, next_gen);
#ifdef CONFIG_VDSO_GETRANDOM
/* base_crng.generation's invalid value is ULONG_MAX, while
- * _vdso_rng_data.generation's invalid value is 0, so add one to the
+ * vdso_k_rng_data->generation's invalid value is 0, so add one to the
* former to arrive at the latter. Use smp_store_release so that this
* is ordered with the write above to base_crng.generation. Pairs with
* the smp_rmb() before the syscall in the vDSO code.
@@ -290,7 +290,7 @@ static void crng_reseed(struct work_struct *work)
* because the vDSO side only checks whether the value changed, without
* actually using or interpreting the value.
*/
- smp_store_release((unsigned long *)&__arch_get_k_vdso_rng_data()->generation, next_gen + 1);
+ smp_store_release((unsigned long *)&vdso_k_rng_data->generation, next_gen + 1);
#endif
if (!static_branch_likely(&crng_is_ready))
crng_init = CRNG_READY;
@@ -743,7 +743,7 @@ static void __cold _credit_init_bits(size_t bits)
queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
#ifdef CONFIG_VDSO_GETRANDOM
- WRITE_ONCE(__arch_get_k_vdso_rng_data()->is_ready, true);
+ WRITE_ONCE(vdso_k_rng_data->is_ready, true);
#endif
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index f887569fd3d0..677bb5ac950a 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -37,6 +37,7 @@
#include <linux/kfifo.h>
#include <linux/platform_device.h>
#include <linux/gfp.h>
+#include <linux/string_choices.h>
#include <linux/uaccess.h>
#include <asm/io.h>
@@ -1268,12 +1269,12 @@ static void sonypi_display_info(void)
"compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n",
sonypi_device.model,
verbose,
- fnkeyinit ? "on" : "off",
- camera ? "on" : "off",
- compat ? "on" : "off",
+ str_on_off(fnkeyinit),
+ str_on_off(camera),
+ str_on_off(compat),
mask,
- useinput ? "on" : "off",
- SONYPI_ACPI_ACTIVE ? "on" : "off");
+ str_on_off(useinput),
+ str_on_off(SONYPI_ACPI_ACTIVE));
printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n",
sonypi_device.irq,
sonypi_device.ioport1, sonypi_device.ioport2);
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 0fc9a510e059..fe4f3a609934 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -210,6 +210,15 @@ config TCG_CRB
from within Linux. To compile this driver as a module, choose
M here; the module will be called tpm_crb.
+config TCG_ARM_CRB_FFA
+ tristate "TPM CRB over Arm FF-A Transport"
+ depends on ARM_FFA_TRANSPORT && TCG_CRB
+ default TCG_CRB
+ help
+ If the Arm FF-A transport is used to access the TPM say Yes.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_crb_ffa.
+
config TCG_VTPM_PROXY
tristate "VTPM Proxy Interface"
depends on TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 9bb142c75243..2b004df8c04b 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -42,5 +42,6 @@ obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/
obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
obj-$(CONFIG_TCG_CRB) += tpm_crb.o
+obj-$(CONFIG_TCG_ARM_CRB_FFA) += tpm_crb_ffa.o
obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 7df7abaf3e52..e25daf2396d3 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -168,6 +168,11 @@ int tpm_try_get_ops(struct tpm_chip *chip)
goto out_ops;
mutex_lock(&chip->tpm_mutex);
+
+ /* tmp_chip_start may issue IO that is denied while suspended */
+ if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
+ goto out_lock;
+
rc = tpm_chip_start(chip);
if (rc)
goto out_lock;
@@ -300,6 +305,7 @@ int tpm_class_shutdown(struct device *dev)
down_write(&chip->ops_sem);
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
if (!tpm_chip_start(chip)) {
+ tpm2_end_auth_session(chip);
tpm2_shutdown(chip, TPM2_SU_CLEAR);
tpm_chip_stop(chip);
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index b1daa0d7b341..8d7e4da6ed53 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -58,6 +58,30 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
+static void tpm_chip_cancel(struct tpm_chip *chip)
+{
+ if (!chip->ops->cancel)
+ return;
+
+ chip->ops->cancel(chip);
+}
+
+static u8 tpm_chip_status(struct tpm_chip *chip)
+{
+ if (!chip->ops->status)
+ return 0;
+
+ return chip->ops->status(chip);
+}
+
+static bool tpm_chip_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ if (!chip->ops->req_canceled)
+ return false;
+
+ return chip->ops->req_canceled(chip, status);
+}
+
static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
{
struct tpm_header *header = buf;
@@ -104,12 +128,12 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
do {
- u8 status = chip->ops->status(chip);
+ u8 status = tpm_chip_status(chip);
if ((status & chip->ops->req_complete_mask) ==
chip->ops->req_complete_val)
goto out_recv;
- if (chip->ops->req_canceled(chip, status)) {
+ if (tpm_chip_req_canceled(chip, status)) {
dev_err(&chip->dev, "Operation Canceled\n");
return -ECANCELED;
}
@@ -118,7 +142,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
rmb();
} while (time_before(jiffies, stop));
- chip->ops->cancel(chip);
+ tpm_chip_cancel(chip);
dev_err(&chip->dev, "Operation Timed out\n");
return -ETIME;
@@ -445,18 +469,11 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
if (!chip)
return -ENODEV;
- /* Give back zero bytes, as TPM chip has not yet fully resumed: */
- if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) {
- rc = 0;
- goto out;
- }
-
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_get_random(chip, out, max);
else
rc = tpm1_get_random(chip, out, max);
-out:
tpm_put_ops(chip);
return rc;
}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index dfdcbd009720..524d802ede26 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -359,7 +359,6 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
} while (retries-- && total < max);
tpm_buf_destroy(&buf);
- tpm2_end_auth_session(chip);
return total ? total : -EIO;
out:
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index b70165b588ec..3f89635ba5e8 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -982,7 +982,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
int rc;
if (chip->auth) {
- dev_warn_once(&chip->dev, "auth session is active\n");
+ dev_dbg_once(&chip->dev, "auth session is active\n");
return 0;
}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index ea085b14ab7c..876edf2705ab 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -19,6 +19,7 @@
#ifdef CONFIG_ARM64
#include <linux/arm-smccc.h>
#endif
+#include "tpm_crb_ffa.h"
#include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2"
@@ -100,6 +101,8 @@ struct crb_priv {
u32 smc_func_id;
u32 __iomem *pluton_start_addr;
u32 __iomem *pluton_reply_addr;
+ u8 ffa_flags;
+ u8 ffa_attributes;
};
struct tpm2_crb_smc {
@@ -110,11 +113,30 @@ struct tpm2_crb_smc {
u32 smc_func_id;
};
+/* CRB over FFA start method parameters in TCG2 ACPI table */
+struct tpm2_crb_ffa {
+ u8 flags;
+ u8 attributes;
+ u16 partition_id;
+ u8 reserved[8];
+};
+
struct tpm2_crb_pluton {
u64 start_addr;
u64 reply_addr;
};
+/*
+ * Returns true if the start method supports idle.
+ */
+static inline bool tpm_crb_has_idle(u32 start_method)
+{
+ return !(start_method == ACPI_TPM2_START_METHOD ||
+ start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD ||
+ start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC ||
+ start_method == ACPI_TPM2_CRB_WITH_ARM_FFA);
+}
+
static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
unsigned long timeout)
{
@@ -173,9 +195,7 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
{
int rc;
- if ((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ if (!tpm_crb_has_idle(priv->sm))
return 0;
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
@@ -222,9 +242,7 @@ static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
{
int rc;
- if ((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ if (!tpm_crb_has_idle(priv->sm))
return 0;
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
@@ -255,13 +273,20 @@ static int crb_cmd_ready(struct tpm_chip *chip)
static int __crb_request_locality(struct device *dev,
struct crb_priv *priv, int loc)
{
- u32 value = CRB_LOC_STATE_LOC_ASSIGNED |
- CRB_LOC_STATE_TPM_REG_VALID_STS;
+ u32 value = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS;
+ int rc;
if (!priv->regs_h)
return 0;
iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl);
+
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_LOCALITY_REQUEST, loc);
+ if (rc)
+ return rc;
+ }
+
if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value,
TPM2_TIMEOUT_C)) {
dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
@@ -281,14 +306,21 @@ static int crb_request_locality(struct tpm_chip *chip, int loc)
static int __crb_relinquish_locality(struct device *dev,
struct crb_priv *priv, int loc)
{
- u32 mask = CRB_LOC_STATE_LOC_ASSIGNED |
- CRB_LOC_STATE_TPM_REG_VALID_STS;
+ u32 mask = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS;
u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS;
+ int rc;
if (!priv->regs_h)
return 0;
iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
+
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_LOCALITY_REQUEST, loc);
+ if (rc)
+ return rc;
+ }
+
if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
TPM2_TIMEOUT_C)) {
dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n");
@@ -423,13 +455,13 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
* report only ACPI start but in practice seems to require both
* CRB start, hence invoking CRB start method if hid == MSFT0101.
*/
- if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
- (priv->sm == ACPI_TPM2_MEMORY_MAPPED) ||
- (!strcmp(priv->hid, "MSFT0101")))
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER ||
+ priv->sm == ACPI_TPM2_MEMORY_MAPPED ||
+ !strcmp(priv->hid, "MSFT0101"))
iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
- if ((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD))
+ if (priv->sm == ACPI_TPM2_START_METHOD ||
+ priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)
rc = crb_do_acpi_start(chip);
if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
@@ -437,6 +469,11 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
}
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, chip->locality);
+ }
+
if (rc)
return rc;
@@ -446,13 +483,20 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
static void crb_cancel(struct tpm_chip *chip)
{
struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ int rc;
iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel);
- if (((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) &&
+ if ((priv->sm == ACPI_TPM2_START_METHOD ||
+ priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) &&
crb_do_acpi_start(chip))
dev_err(&chip->dev, "ACPI Start failed\n");
+
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, chip->locality);
+ if (rc)
+ dev_err(&chip->dev, "FF-A Start failed\n");
+ }
}
static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
@@ -609,8 +653,9 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* the control area, as one nice sane region except for some older
* stuff that puts the control area outside the ACPI IO region.
*/
- if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
- (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER ||
+ priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA ||
+ priv->sm == ACPI_TPM2_MEMORY_MAPPED) {
if (iores &&
buf->control_address == iores->start +
sizeof(*priv->regs_h))
@@ -731,6 +776,7 @@ static int crb_acpi_add(struct acpi_device *device)
struct tpm_chip *chip;
struct device *dev = &device->dev;
struct tpm2_crb_smc *crb_smc;
+ struct tpm2_crb_ffa *crb_ffa;
struct tpm2_crb_pluton *crb_pluton;
acpi_status status;
u32 sm;
@@ -769,6 +815,27 @@ static int crb_acpi_add(struct acpi_device *device)
priv->smc_func_id = crb_smc->smc_func_id;
}
+ if (sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ if (buf->header.length < (sizeof(*buf) + sizeof(*crb_ffa))) {
+ dev_err(dev,
+ FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ buf->header.length,
+ ACPI_TPM2_CRB_WITH_ARM_FFA);
+ rc = -EINVAL;
+ goto out;
+ }
+ crb_ffa = ACPI_ADD_PTR(struct tpm2_crb_ffa, buf, sizeof(*buf));
+ priv->ffa_flags = crb_ffa->flags;
+ priv->ffa_attributes = crb_ffa->attributes;
+ rc = tpm_crb_ffa_init();
+ if (rc) {
+ /* If FF-A driver is not available yet, request probe retry */
+ if (rc == -ENOENT)
+ rc = -EPROBE_DEFER;
+ goto out;
+ }
+ }
+
if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) {
dev_err(dev,
diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c
new file mode 100644
index 000000000000..3169a87a56b6
--- /dev/null
+++ b/drivers/char/tpm/tpm_crb_ffa.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ *
+ * This device driver implements the TPM CRB start method
+ * as defined in the TPM Service Command Response Buffer
+ * Interface Over FF-A (DEN0138).
+ */
+
+#define pr_fmt(fmt) "CRB_FFA: " fmt
+
+#include <linux/arm_ffa.h>
+#include "tpm_crb_ffa.h"
+
+/* TPM service function status codes */
+#define CRB_FFA_OK 0x05000001
+#define CRB_FFA_OK_RESULTS_RETURNED 0x05000002
+#define CRB_FFA_NOFUNC 0x8e000001
+#define CRB_FFA_NOTSUP 0x8e000002
+#define CRB_FFA_INVARG 0x8e000005
+#define CRB_FFA_INV_CRB_CTRL_DATA 0x8e000006
+#define CRB_FFA_ALREADY 0x8e000009
+#define CRB_FFA_DENIED 0x8e00000a
+#define CRB_FFA_NOMEM 0x8e00000b
+
+#define CRB_FFA_VERSION_MAJOR 1
+#define CRB_FFA_VERSION_MINOR 0
+
+/* version encoding */
+#define CRB_FFA_MAJOR_VERSION_MASK GENMASK(30, 16)
+#define CRB_FFA_MINOR_VERSION_MASK GENMASK(15, 0)
+#define CRB_FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(CRB_FFA_MAJOR_VERSION_MASK, (x))))
+#define CRB_FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(CRB_FFA_MINOR_VERSION_MASK, (x))))
+
+/*
+ * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and
+ * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal
+ * messages.
+ *
+ * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP
+ * are using the AArch32 SMC calling convention with register usage as
+ * defined in FF-A specification:
+ * w0: Function ID (0x8400006F or 0x84000070)
+ * w1: Source/Destination IDs
+ * w2: Reserved (MBZ)
+ * w3-w7: Implementation defined, free to be used below
+ */
+
+/*
+ * Returns the version of the interface that is available
+ * Call register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function ID, CRB_FFA_GET_INTERFACE_VERSION
+ * w5-w7: Reserved (MBZ)
+ *
+ * Return register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function status
+ * w5: TPM service interface version
+ * Bits[31:16]: major version
+ * Bits[15:0]: minor version
+ * w6-w7: Reserved (MBZ)
+ *
+ * Possible function status codes in register w4:
+ * CRB_FFA_OK_RESULTS_RETURNED: The version of the interface has been
+ * returned.
+ */
+#define CRB_FFA_GET_INTERFACE_VERSION 0x0f000001
+
+/*
+ * Return information on a given feature of the TPM service
+ * Call register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function ID, CRB_FFA_START
+ * w5: Start function qualifier
+ * Bits[31:8] (MBZ)
+ * Bits[7:0]
+ * 0: Notifies TPM that a command is ready to be processed
+ * 1: Notifies TPM that a locality request is ready to be processed
+ * w6: TPM locality, one of 0..4
+ * -If the start function qualifier is 0, identifies the locality
+ * from where the command originated.
+ * -If the start function qualifier is 1, identifies the locality
+ * of the locality request
+ * w6-w7: Reserved (MBZ)
+ *
+ * Return register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function status
+ * w5-w7: Reserved (MBZ)
+ *
+ * Possible function status codes in register w4:
+ * CRB_FFA_OK: the TPM service has been notified successfully
+ * CRB_FFA_INVARG: one or more arguments are not valid
+ * CRB_FFA_INV_CRB_CTRL_DATA: CRB control data or locality control
+ * data at the given TPM locality is not valid
+ * CRB_FFA_DENIED: the TPM has previously disabled locality requests and
+ * command processing at the given locality
+ */
+#define CRB_FFA_START 0x0f000201
+
+struct tpm_crb_ffa {
+ struct ffa_device *ffa_dev;
+ u16 major_version;
+ u16 minor_version;
+ /* lock to protect sending of FF-A messages: */
+ struct mutex msg_data_lock;
+ struct ffa_send_direct_data direct_msg_data;
+};
+
+static struct tpm_crb_ffa *tpm_crb_ffa;
+
+static int tpm_crb_ffa_to_linux_errno(int errno)
+{
+ int rc;
+
+ switch (errno) {
+ case CRB_FFA_OK:
+ rc = 0;
+ break;
+ case CRB_FFA_OK_RESULTS_RETURNED:
+ rc = 0;
+ break;
+ case CRB_FFA_NOFUNC:
+ rc = -ENOENT;
+ break;
+ case CRB_FFA_NOTSUP:
+ rc = -EPERM;
+ break;
+ case CRB_FFA_INVARG:
+ rc = -EINVAL;
+ break;
+ case CRB_FFA_INV_CRB_CTRL_DATA:
+ rc = -ENOEXEC;
+ break;
+ case CRB_FFA_ALREADY:
+ rc = -EEXIST;
+ break;
+ case CRB_FFA_DENIED:
+ rc = -EACCES;
+ break;
+ case CRB_FFA_NOMEM:
+ rc = -ENOMEM;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * tpm_crb_ffa_init - called by the CRB driver to do any needed initialization
+ *
+ * This function is called by the tpm_crb driver during the tpm_crb
+ * driver's initialization. If the tpm_crb_ffa has not been probed
+ * yet, returns -ENOENT in order to force a retry. If th ffa_crb
+ * driver had been probed but failed with an error, returns -ENODEV
+ * in order to prevent further retries.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int tpm_crb_ffa_init(void)
+{
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ if (IS_ERR_VALUE(tpm_crb_ffa))
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_crb_ffa_init);
+
+static int __tpm_crb_ffa_send_recieve(unsigned long func_id,
+ unsigned long a0,
+ unsigned long a1,
+ unsigned long a2)
+{
+ const struct ffa_msg_ops *msg_ops;
+ int ret;
+
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops;
+
+ memset(&tpm_crb_ffa->direct_msg_data, 0x00,
+ sizeof(struct ffa_send_direct_data));
+
+ tpm_crb_ffa->direct_msg_data.data1 = func_id;
+ tpm_crb_ffa->direct_msg_data.data2 = a0;
+ tpm_crb_ffa->direct_msg_data.data3 = a1;
+ tpm_crb_ffa->direct_msg_data.data4 = a2;
+
+ ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev,
+ &tpm_crb_ffa->direct_msg_data);
+ if (!ret)
+ ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1);
+
+ return ret;
+}
+
+/**
+ * tpm_crb_ffa_get_interface_version() - gets the ABI version of the TPM service
+ * @major: Pointer to caller-allocated buffer to hold the major version
+ * number the ABI
+ * @minor: Pointer to caller-allocated buffer to hold the minor version
+ * number the ABI
+ *
+ * Returns the major and minor version of the ABI of the FF-A based TPM.
+ * Allows the caller to evaluate its compatibility with the version of
+ * the ABI.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
+{
+ int rc;
+
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ if (IS_ERR_VALUE(tpm_crb_ffa))
+ return -ENODEV;
+
+ if (!major || !minor)
+ return -EINVAL;
+
+ guard(mutex)(&tpm_crb_ffa->msg_data_lock);
+
+ rc = __tpm_crb_ffa_send_recieve(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00);
+ if (!rc) {
+ *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_crb_ffa_get_interface_version);
+
+/**
+ * tpm_crb_ffa_start() - signals the TPM that a field has changed in the CRB
+ * @request_type: Identifies whether the change to the CRB is in the command
+ * fields or locality fields.
+ * @locality: Specifies the locality number.
+ *
+ * Used by the CRB driver
+ * that might be useful to those using or modifying it. Begins with
+ * empty comment line, and may include additional embedded empty
+ * comment lines.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int tpm_crb_ffa_start(int request_type, int locality)
+{
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ if (IS_ERR_VALUE(tpm_crb_ffa))
+ return -ENODEV;
+
+ guard(mutex)(&tpm_crb_ffa->msg_data_lock);
+
+ return __tpm_crb_ffa_send_recieve(CRB_FFA_START, request_type, locality, 0x00);
+}
+EXPORT_SYMBOL_GPL(tpm_crb_ffa_start);
+
+static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev)
+{
+ struct tpm_crb_ffa *p;
+ int rc;
+
+ /* only one instance of a TPM partition is supported */
+ if (tpm_crb_ffa && !IS_ERR_VALUE(tpm_crb_ffa))
+ return -EEXIST;
+
+ tpm_crb_ffa = ERR_PTR(-ENODEV); // set tpm_crb_ffa so we can detect probe failure
+
+ if (!ffa_partition_supports_direct_recv(ffa_dev)) {
+ pr_err("TPM partition doesn't support direct message receive.\n");
+ return -EINVAL;
+ }
+
+ p = kzalloc(sizeof(*tpm_crb_ffa), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ tpm_crb_ffa = p;
+
+ mutex_init(&tpm_crb_ffa->msg_data_lock);
+ tpm_crb_ffa->ffa_dev = ffa_dev;
+ ffa_dev_set_drvdata(ffa_dev, tpm_crb_ffa);
+
+ /* if TPM is aarch32 use 32-bit SMCs */
+ if (!ffa_partition_check_property(ffa_dev, FFA_PARTITION_AARCH64_EXEC))
+ ffa_dev->ops->msg_ops->mode_32bit_set(ffa_dev);
+
+ /* verify compatibility of TPM service version number */
+ rc = tpm_crb_ffa_get_interface_version(&tpm_crb_ffa->major_version,
+ &tpm_crb_ffa->minor_version);
+ if (rc) {
+ pr_err("failed to get crb interface version. rc:%d", rc);
+ goto out;
+ }
+
+ pr_info("ABI version %u.%u", tpm_crb_ffa->major_version,
+ tpm_crb_ffa->minor_version);
+
+ if (tpm_crb_ffa->major_version != CRB_FFA_VERSION_MAJOR ||
+ (tpm_crb_ffa->minor_version > 0 &&
+ tpm_crb_ffa->minor_version < CRB_FFA_VERSION_MINOR)) {
+ pr_err("Incompatible ABI version");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ kfree(tpm_crb_ffa);
+ tpm_crb_ffa = ERR_PTR(-ENODEV);
+ return -EINVAL;
+}
+
+static void tpm_crb_ffa_remove(struct ffa_device *ffa_dev)
+{
+ kfree(tpm_crb_ffa);
+ tpm_crb_ffa = NULL;
+}
+
+static const struct ffa_device_id tpm_crb_ffa_device_id[] = {
+ /* 17b862a4-1806-4faf-86b3-089a58353861 */
+ { UUID_INIT(0x17b862a4, 0x1806, 0x4faf,
+ 0x86, 0xb3, 0x08, 0x9a, 0x58, 0x35, 0x38, 0x61) },
+ {}
+};
+
+static struct ffa_driver tpm_crb_ffa_driver = {
+ .name = "ffa-crb",
+ .probe = tpm_crb_ffa_probe,
+ .remove = tpm_crb_ffa_remove,
+ .id_table = tpm_crb_ffa_device_id,
+};
+
+module_ffa_driver(tpm_crb_ffa_driver);
+
+MODULE_AUTHOR("Arm");
+MODULE_DESCRIPTION("TPM CRB FFA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_crb_ffa.h b/drivers/char/tpm/tpm_crb_ffa.h
new file mode 100644
index 000000000000..645c41ede10e
--- /dev/null
+++ b/drivers/char/tpm/tpm_crb_ffa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ *
+ * This device driver implements the TPM CRB start method
+ * as defined in the TPM Service Command Response Buffer
+ * Interface Over FF-A (DEN0138).
+ */
+#ifndef _TPM_CRB_FFA_H
+#define _TPM_CRB_FFA_H
+
+#if IS_REACHABLE(CONFIG_TCG_ARM_CRB_FFA)
+int tpm_crb_ffa_init(void);
+int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor);
+int tpm_crb_ffa_start(int request_type, int locality);
+#else
+static inline int tpm_crb_ffa_init(void) { return 0; }
+static inline int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) { return 0; }
+static inline int tpm_crb_ffa_start(int request_type, int locality) { return 0; }
+#endif
+
+#define CRB_FFA_START_TYPE_COMMAND 0
+#define CRB_FFA_START_TYPE_LOCALITY_REQUEST 1
+
+#endif
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 139556b21cc6..53ba28ccd5d3 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -164,30 +164,10 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
return 0;
}
-static void ftpm_tee_tpm_op_cancel(struct tpm_chip *chip)
-{
- /* not supported */
-}
-
-static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip)
-{
- return 0;
-}
-
-static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status)
-{
- return false;
-}
-
static const struct tpm_class_ops ftpm_tee_tpm_ops = {
.flags = TPM_OPS_AUTO_STARTUP,
.recv = ftpm_tee_tpm_op_recv,
.send = ftpm_tee_tpm_op_send,
- .cancel = ftpm_tee_tpm_op_cancel,
- .status = ftpm_tee_tpm_op_status,
- .req_complete_mask = 0,
- .req_complete_val = 0,
- .req_canceled = ftpm_tee_tpm_req_canceled,
};
/*
@@ -362,7 +342,7 @@ MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids);
static struct platform_driver ftpm_tee_plat_driver = {
.driver = {
.name = "ftpm-tee",
- .of_match_table = of_match_ptr(of_ftpm_tee_ids),
+ .of_match_table = of_ftpm_tee_ids,
},
.shutdown = ftpm_plat_tee_shutdown,
.probe = ftpm_plat_tee_probe,
diff --git a/drivers/char/tpm/tpm_ftpm_tee.h b/drivers/char/tpm/tpm_ftpm_tee.h
index f98daa7bf68c..e39903b7ea07 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.h
+++ b/drivers/char/tpm/tpm_ftpm_tee.h
@@ -21,7 +21,6 @@
/**
* struct ftpm_tee_private - fTPM's private data
* @chip: struct tpm_chip instance registered with tpm framework.
- * @state: internal state
* @session: fTPM TA session identifier.
* @resp_len: cached response buffer length.
* @resp_buf: cached response buffer.
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index fdef214b9f6b..ed0d3d8449b3 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -114,11 +114,10 @@ again:
return 0;
/* process status changes without irq support */
do {
+ usleep_range(priv->timeout_min, priv->timeout_max);
status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
- usleep_range(priv->timeout_min,
- priv->timeout_max);
} while (time_before(jiffies, stop));
return -ETIME;
}
@@ -464,7 +463,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
&priv->int_queue, false) < 0) {
- rc = -ETIME;
+ if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
+ rc = -EAGAIN;
+ else
+ rc = -ETIME;
goto out_err;
}
status = tpm_tis_status(chip);
@@ -481,7 +483,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
&priv->int_queue, false) < 0) {
- rc = -ETIME;
+ if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
+ rc = -EAGAIN;
+ else
+ rc = -ETIME;
goto out_err;
}
status = tpm_tis_status(chip);
@@ -546,9 +551,11 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
if (rc >= 0)
/* Data transfer done successfully */
break;
- else if (rc != -EIO)
+ else if (rc != -EAGAIN && rc != -EIO)
/* Data transfer failed, not recoverable */
return rc;
+
+ usleep_range(priv->timeout_min, priv->timeout_max);
}
/* go and do it */
@@ -1144,6 +1151,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
}
+ if (priv->manufacturer_id == TPM_VID_IFX)
+ set_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags);
+
if (is_bsw()) {
priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
ILB_REMAP_SIZE);
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 690ad8e9b731..970d02c337c7 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -89,6 +89,7 @@ enum tpm_tis_flags {
TPM_TIS_INVALID_STATUS = 1,
TPM_TIS_DEFAULT_CANCELLATION = 2,
TPM_TIS_IRQ_TESTED = 3,
+ TPM_TIS_STATUS_VALID_RETRY = 4,
};
struct tpm_tis_data {
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index f476883bc93b..85e23961ec34 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -888,7 +888,6 @@ static int __init stm32f4_pll_ssc_parse_dt(struct device_node *np,
struct stm32f4_pll_ssc *conf)
{
int ret;
- const char *s;
if (!conf)
return -EINVAL;
@@ -916,7 +915,8 @@ static int __init stm32f4_pll_ssc_parse_dt(struct device_node *np,
conf->mod_type = ret;
pr_debug("%pOF: SSCG settings: mod_freq: %d, mod_depth: %d mod_method: %s [%d]\n",
- np, conf->mod_freq, conf->mod_depth, s, conf->mod_type);
+ np, conf->mod_freq, conf->mod_depth,
+ stm32f4_ssc_mod_methods[ret], conf->mod_type);
return 0;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index cf7720b9172f..0565c87656cf 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2283,7 +2283,7 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
unsigned long min_rate;
unsigned long max_rate;
int p_index = 0;
- long ret;
+ int ret;
/* sanity */
if (IS_ERR_OR_NULL(core))
@@ -4397,6 +4397,13 @@ fail_ops:
fail_name:
kref_put(&core->ref, __clk_release);
fail_out:
+ if (dev) {
+ dev_err_probe(dev, ret, "failed to register clk '%s' (%pS)\n",
+ init->name, hw);
+ } else {
+ pr_err("%pOF: error %pe: failed to register clk '%s' (%pS)\n",
+ np, ERR_PTR(ret), init->name, hw);
+ }
return ERR_PTR(ret);
}
@@ -5258,6 +5265,10 @@ of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
if (!clkspec)
return ERR_PTR(-EINVAL);
+ /* Check if node in clkspec is in disabled/fail state */
+ if (!of_device_is_available(clkspec->np))
+ return ERR_PTR(-ENOENT);
+
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np) {
diff --git a/drivers/clk/davinci/Makefile b/drivers/clk/davinci/Makefile
index 5d0ae1ee72ec..f9d5c9a392e4 100644
--- a/drivers/clk/davinci/Makefile
+++ b/drivers/clk/davinci/Makefile
@@ -4,10 +4,8 @@ ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_DAVINCI_DA8XX) += da8xx-cfgchip.o
obj-y += pll.o
-obj-$(CONFIG_ARCH_DAVINCI_DA830) += pll-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += pll-da850.o
obj-y += psc.o
-obj-$(CONFIG_ARCH_DAVINCI_DA830) += psc-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += psc-da850.o
endif
diff --git a/drivers/clk/davinci/pll-da830.c b/drivers/clk/davinci/pll-da830.c
deleted file mode 100644
index 0a0d06fb25fd..000000000000
--- a/drivers/clk/davinci/pll-da830.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PLL clock descriptions for TI DA830/OMAP-L137/AM17XX
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clkdev.h>
-#include <linux/clk/davinci.h>
-#include <linux/bitops.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include "pll.h"
-
-static const struct davinci_pll_clk_info da830_pll_info = {
- .name = "pll0",
- .pllm_mask = GENMASK(4, 0),
- .pllm_min = 4,
- .pllm_max = 32,
- .pllout_min_rate = 300000000,
- .pllout_max_rate = 600000000,
- .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_HAS_POSTDIV,
-};
-
-/*
- * NB: Technically, the clocks flagged as SYSCLK_FIXED_DIV are "fixed ratio",
- * meaning that we could change the divider as long as we keep the correct
- * ratio between all of the clocks, but we don't support that because there is
- * currently not a need for it.
- */
-
-SYSCLK(2, pll0_sysclk2, pll0_pllen, 5, SYSCLK_FIXED_DIV);
-SYSCLK(3, pll0_sysclk3, pll0_pllen, 5, 0);
-SYSCLK(4, pll0_sysclk4, pll0_pllen, 5, SYSCLK_FIXED_DIV);
-SYSCLK(5, pll0_sysclk5, pll0_pllen, 5, 0);
-SYSCLK(6, pll0_sysclk6, pll0_pllen, 5, SYSCLK_FIXED_DIV);
-SYSCLK(7, pll0_sysclk7, pll0_pllen, 5, 0);
-
-int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- struct clk *clk;
-
- davinci_pll_clk_register(dev, &da830_pll_info, "ref_clk", base, cfgchip);
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk2, base);
- clk_register_clkdev(clk, "pll0_sysclk2", "da830-psc0");
- clk_register_clkdev(clk, "pll0_sysclk2", "da830-psc1");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk3, base);
- clk_register_clkdev(clk, "pll0_sysclk3", "da830-psc0");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk4, base);
- clk_register_clkdev(clk, "pll0_sysclk4", "da830-psc0");
- clk_register_clkdev(clk, "pll0_sysclk4", "da830-psc1");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk5, base);
- clk_register_clkdev(clk, "pll0_sysclk5", "da830-psc1");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk6, base);
- clk_register_clkdev(clk, "pll0_sysclk6", "da830-psc0");
-
- clk = davinci_pll_sysclk_register(dev, &pll0_sysclk7, base);
-
- clk = davinci_pll_auxclk_register(dev, "pll0_auxclk", base);
- clk_register_clkdev(clk, NULL, "i2c_davinci.1");
- clk_register_clkdev(clk, "timer0", NULL);
- clk_register_clkdev(clk, NULL, "davinci-wdt");
-
- return 0;
-}
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 82727b1fc67a..6807a2efa93b 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -840,25 +840,16 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
}
/* needed in early boot for clocksource/clockevent */
-#ifdef CONFIG_ARCH_DAVINCI_DA850
CLK_OF_DECLARE(da850_pll0, "ti,da850-pll0", of_da850_pll0_init);
-#endif
static const struct of_device_id davinci_pll_of_match[] = {
-#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .compatible = "ti,da850-pll1", .data = of_da850_pll1_init },
-#endif
{ }
};
static const struct platform_device_id davinci_pll_id_table[] = {
-#ifdef CONFIG_ARCH_DAVINCI_DA830
- { .name = "da830-pll", .driver_data = (kernel_ulong_t)da830_pll_init },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .name = "da850-pll0", .driver_data = (kernel_ulong_t)da850_pll0_init },
{ .name = "da850-pll1", .driver_data = (kernel_ulong_t)da850_pll1_init },
-#endif
{ }
};
diff --git a/drivers/clk/davinci/psc-da830.c b/drivers/clk/davinci/psc-da830.c
deleted file mode 100644
index 6481337382a6..000000000000
--- a/drivers/clk/davinci/psc-da830.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PSC clock descriptions for TI DA830/OMAP-L137/AM17XX
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "psc.h"
-
-LPSC_CLKDEV1(aemif_clkdev, NULL, "ti-aemif");
-LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
-LPSC_CLKDEV1(mmcsd_clkdev, NULL, "da830-mmc.0");
-LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
-
-static const struct davinci_lpsc_clk_info da830_psc0_info[] = {
- LPSC(0, 0, tpcc, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(1, 0, tptc0, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(2, 0, tptc1, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(3, 0, aemif, pll0_sysclk3, aemif_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(4, 0, spi0, pll0_sysclk2, spi0_clkdev, 0),
- LPSC(5, 0, mmcsd, pll0_sysclk2, mmcsd_clkdev, 0),
- LPSC(6, 0, aintc, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(7, 0, arm_rom, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(8, 0, secu_mgr, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(9, 0, uart0, pll0_sysclk2, uart0_clkdev, 0),
- LPSC(10, 0, scr0_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(11, 0, scr1_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(12, 0, scr2_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(13, 0, pruss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(14, 0, arm, pll0_sysclk6, NULL, LPSC_ALWAYS_ENABLED),
- { }
-};
-
-static int da830_psc0_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, da830_psc0_info, 16, base);
-}
-
-static struct clk_bulk_data da830_psc0_parent_clks[] = {
- { .id = "pll0_sysclk2" },
- { .id = "pll0_sysclk3" },
- { .id = "pll0_sysclk4" },
- { .id = "pll0_sysclk6" },
-};
-
-const struct davinci_psc_init_data da830_psc0_init_data = {
- .parent_clks = da830_psc0_parent_clks,
- .num_parent_clks = ARRAY_SIZE(da830_psc0_parent_clks),
- .psc_init = &da830_psc0_init,
-};
-
-LPSC_CLKDEV3(usb0_clkdev, "fck", "da830-usb-phy-clks",
- NULL, "musb-da8xx",
- NULL, "cppi41-dmaengine");
-LPSC_CLKDEV1(usb1_clkdev, NULL, "ohci-da8xx");
-/* REVISIT: gpio-davinci.c should be modified to drop con_id */
-LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
-LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
- "fck", "davinci_mdio.0");
-LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0");
-LPSC_CLKDEV1(mcasp1_clkdev, NULL, "davinci-mcasp.1");
-LPSC_CLKDEV1(mcasp2_clkdev, NULL, "davinci-mcasp.2");
-LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
-LPSC_CLKDEV1(i2c1_clkdev, NULL, "i2c_davinci.2");
-LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
-LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
-LPSC_CLKDEV1(lcdc_clkdev, "fck", "da8xx_lcdc.0");
-LPSC_CLKDEV2(pwm_clkdev, "fck", "ehrpwm.0",
- "fck", "ehrpwm.1");
-LPSC_CLKDEV3(ecap_clkdev, "fck", "ecap.0",
- "fck", "ecap.1",
- "fck", "ecap.2");
-LPSC_CLKDEV2(eqep_clkdev, NULL, "eqep.0",
- NULL, "eqep.1");
-
-static const struct davinci_lpsc_clk_info da830_psc1_info[] = {
- LPSC(1, 0, usb0, pll0_sysclk2, usb0_clkdev, 0),
- LPSC(2, 0, usb1, pll0_sysclk4, usb1_clkdev, 0),
- LPSC(3, 0, gpio, pll0_sysclk4, gpio_clkdev, 0),
- LPSC(5, 0, emac, pll0_sysclk4, emac_clkdev, 0),
- LPSC(6, 0, emif3, pll0_sysclk5, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(7, 0, mcasp0, pll0_sysclk2, mcasp0_clkdev, 0),
- LPSC(8, 0, mcasp1, pll0_sysclk2, mcasp1_clkdev, 0),
- LPSC(9, 0, mcasp2, pll0_sysclk2, mcasp2_clkdev, 0),
- LPSC(10, 0, spi1, pll0_sysclk2, spi1_clkdev, 0),
- LPSC(11, 0, i2c1, pll0_sysclk4, i2c1_clkdev, 0),
- LPSC(12, 0, uart1, pll0_sysclk2, uart1_clkdev, 0),
- LPSC(13, 0, uart2, pll0_sysclk2, uart2_clkdev, 0),
- LPSC(16, 0, lcdc, pll0_sysclk2, lcdc_clkdev, 0),
- LPSC(17, 0, pwm, pll0_sysclk2, pwm_clkdev, 0),
- LPSC(20, 0, ecap, pll0_sysclk2, ecap_clkdev, 0),
- LPSC(21, 0, eqep, pll0_sysclk2, eqep_clkdev, 0),
- { }
-};
-
-static int da830_psc1_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, da830_psc1_info, 32, base);
-}
-
-static struct clk_bulk_data da830_psc1_parent_clks[] = {
- { .id = "pll0_sysclk2" },
- { .id = "pll0_sysclk4" },
- { .id = "pll0_sysclk5" },
-};
-
-const struct davinci_psc_init_data da830_psc1_init_data = {
- .parent_clks = da830_psc1_parent_clks,
- .num_parent_clks = ARRAY_SIZE(da830_psc1_parent_clks),
- .psc_init = &da830_psc1_init,
-};
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
index 355d1be0b5d8..b48322176c21 100644
--- a/drivers/clk/davinci/psc.c
+++ b/drivers/clk/davinci/psc.c
@@ -494,22 +494,14 @@ int of_davinci_psc_clk_init(struct device *dev,
}
static const struct of_device_id davinci_psc_of_match[] = {
-#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .compatible = "ti,da850-psc0", .data = &of_da850_psc0_init_data },
{ .compatible = "ti,da850-psc1", .data = &of_da850_psc1_init_data },
-#endif
{ }
};
static const struct platform_device_id davinci_psc_id_table[] = {
-#ifdef CONFIG_ARCH_DAVINCI_DA830
- { .name = "da830-psc0", .driver_data = (kernel_ulong_t)&da830_psc0_init_data },
- { .name = "da830-psc1", .driver_data = (kernel_ulong_t)&da830_psc1_init_data },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DA850
{ .name = "da850-psc0", .driver_data = (kernel_ulong_t)&da850_psc0_init_data },
{ .name = "da850-psc1", .driver_data = (kernel_ulong_t)&da850_psc1_init_data },
-#endif
{ }
};
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index bd23f6fd56df..742672843776 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -94,14 +94,9 @@ struct davinci_psc_init_data {
int (*psc_init)(struct device *dev, void __iomem *base);
};
-#ifdef CONFIG_ARCH_DAVINCI_DA830
-extern const struct davinci_psc_init_data da830_psc0_init_data;
-extern const struct davinci_psc_init_data da830_psc1_init_data;
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DA850
extern const struct davinci_psc_init_data da850_psc0_init_data;
extern const struct davinci_psc_init_data da850_psc1_init_data;
extern const struct davinci_psc_init_data of_da850_psc0_init_data;
extern const struct davinci_psc_init_data of_da850_psc1_init_data;
-#endif
+
#endif /* __CLK_DAVINCI_PSC_H__ */
diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c
index b00cbd045af5..db96f8bea630 100644
--- a/drivers/clk/imgtec/clk-boston.c
+++ b/drivers/clk/imgtec/clk-boston.c
@@ -67,21 +67,21 @@ static void __init clk_boston_setup(struct device_node *np)
hw = clk_hw_register_fixed_rate(NULL, "input", NULL, 0, in_freq);
if (IS_ERR(hw)) {
- pr_err("failed to register input clock: %ld\n", PTR_ERR(hw));
+ pr_err("failed to register input clock: %pe\n", hw);
goto fail_input;
}
onecell->hws[BOSTON_CLK_INPUT] = hw;
hw = clk_hw_register_fixed_rate(NULL, "sys", "input", 0, sys_freq);
if (IS_ERR(hw)) {
- pr_err("failed to register sys clock: %ld\n", PTR_ERR(hw));
+ pr_err("failed to register sys clock: %pe\n", hw);
goto fail_sys;
}
onecell->hws[BOSTON_CLK_SYS] = hw;
hw = clk_hw_register_fixed_rate(NULL, "cpu", "input", 0, cpu_freq);
if (IS_ERR(hw)) {
- pr_err("failed to register cpu clock: %ld\n", PTR_ERR(hw));
+ pr_err("failed to register cpu clock: %pe\n", hw);
goto fail_cpu;
}
onecell->hws[BOSTON_CLK_CPU] = hw;
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
index c409fc7e0618..775f62dddb11 100644
--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -180,14 +180,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = {
CLK_GATE("asrc", ASRC_IPG),
CLK_GATE("pdm", PDM_IPG),
CLK_GATE("earc", EARC_IPG),
- CLK_GATE("ocrama", OCRAMA_IPG),
+ CLK_GATE_PARENT("ocrama", OCRAMA_IPG, "axi"),
CLK_GATE("aud2htx", AUD2HTX_IPG),
CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"),
CLK_GATE("sdma2", SDMA2_ROOT),
CLK_GATE("sdma3", SDMA3_ROOT),
CLK_GATE("spba2", SPBA2_ROOT),
- CLK_GATE("dsp", DSP_ROOT),
- CLK_GATE("dspdbg", DSPDBG_ROOT),
+ CLK_GATE_PARENT("dsp", DSP_ROOT, "axi"),
+ CLK_GATE_PARENT("dspdbg", DSPDBG_ROOT, "axi"),
CLK_GATE("edma", EDMA_ROOT),
CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"),
CLK_GATE("mu2", MU2_ROOT),
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index fb18f507f121..fe6dac70f1a1 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/units.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -406,11 +407,151 @@ static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_
static struct clk_hw **hws;
static struct clk_hw_onecell_data *clk_hw_data;
+struct imx8mp_clock_constraints {
+ unsigned int clkid;
+ u32 maxrate;
+};
+
+/*
+ * Below tables are taken from IMX8MPCEC Rev. 2.1, 07/2023
+ * Table 13. Maximum frequency of modules.
+ * Probable typos fixed are marked with a comment.
+ */
+static const struct imx8mp_clock_constraints imx8mp_clock_common_constraints[] = {
+ { IMX8MP_CLK_A53_DIV, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_AXI, 266666667 }, /* Datasheet claims 266MHz */
+ { IMX8MP_CLK_NAND_USDHC_BUS, 266666667 }, /* Datasheet claims 266MHz */
+ { IMX8MP_CLK_MEDIA_APB, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_APB, 133333333 }, /* Datasheet claims 133MHz */
+ { IMX8MP_CLK_ML_AXI, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_AHB, 133333333 },
+ { IMX8MP_CLK_IPG_ROOT, 66666667 },
+ { IMX8MP_CLK_AUDIO_AHB, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_DISP2_PIX, 170 * HZ_PER_MHZ },
+ { IMX8MP_CLK_DRAM_ALT, 666666667 },
+ { IMX8MP_CLK_DRAM_APB, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_CAN1, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_CAN2, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_PCIE_AUX, 10 * HZ_PER_MHZ },
+ { IMX8MP_CLK_I2C5, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C6, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI1, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI2, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI3, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI5, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI6, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_ENET_QOS, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_QOS_TIMER, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_REF, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_TIMER, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_PHY_REF, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NAND, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_QSPI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_USDHC1, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_USDHC2, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_I2C1, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C2, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C3, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C4, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_UART1, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_UART2, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_UART3, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_UART4, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ECSPI1, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ECSPI2, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_PWM1, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_PWM2, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_PWM3, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_PWM4, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_GPT1, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT2, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT3, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT4, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT5, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT6, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_WDOG, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_IPP_DO_CLKO1, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_IPP_DO_CLKO2, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_REF_266M, 266 * HZ_PER_MHZ },
+ { IMX8MP_CLK_USDHC3, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_MIPI_PHY1_REF, 300 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_DISP1_PIX, 250 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_CAM2_PIX, 277 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_LDB, 595 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ECSPI3, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_PDM, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_SAI7, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_MAIN_AXI, 400 * HZ_PER_MHZ },
+ { /* Sentinel */ }
+};
+
+static const struct imx8mp_clock_constraints imx8mp_clock_nominal_constraints[] = {
+ { IMX8MP_CLK_M7_CORE, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ML_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_SHADER_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU2D_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_AUDIO_AXI_SRC, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HSIO_AXI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_ISP, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_BUS, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_AXI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_AXI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AXI, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AHB, 300 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC_IO, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ML_AHB, 300 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G1, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G2, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_CAM1_PIX, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_VC8000E, 400 * HZ_PER_MHZ }, /* Datasheet claims 500MHz */
+ { IMX8MP_CLK_DRAM_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GIC, 400 * HZ_PER_MHZ },
+ { /* Sentinel */ }
+};
+
+static const struct imx8mp_clock_constraints imx8mp_clock_overdrive_constraints[] = {
+ { IMX8MP_CLK_M7_CORE, 800 * HZ_PER_MHZ},
+ { IMX8MP_CLK_ML_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_SHADER_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU2D_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_AUDIO_AXI_SRC, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HSIO_AXI, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_ISP, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_BUS, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_AXI, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_AXI, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AXI, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AHB, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC_IO, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ML_AHB, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G1, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G2, 700 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_CAM1_PIX, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_VC8000E, 500 * HZ_PER_MHZ }, /* Datasheet claims 400MHz */
+ { IMX8MP_CLK_DRAM_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GIC, 500 * HZ_PER_MHZ },
+ { /* Sentinel */ }
+};
+
+static void imx8mp_clocks_apply_constraints(const struct imx8mp_clock_constraints constraints[])
+{
+ const struct imx8mp_clock_constraints *constr;
+
+ for (constr = constraints; constr->clkid; constr++)
+ clk_hw_set_rate_range(hws[constr->clkid], 0, constr->maxrate);
+}
+
static int imx8mp_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np;
void __iomem *anatop_base, *ccm_base;
+ const char *opmode;
int err;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
@@ -715,6 +856,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
imx_check_clk_hws(hws, IMX8MP_CLK_END);
+ imx8mp_clocks_apply_constraints(imx8mp_clock_common_constraints);
+
+ err = of_property_read_string(np, "fsl,operating-mode", &opmode);
+ if (!err) {
+ if (!strcmp(opmode, "nominal"))
+ imx8mp_clocks_apply_constraints(imx8mp_clock_nominal_constraints);
+ else if (!strcmp(opmode, "overdrive"))
+ imx8mp_clocks_apply_constraints(imx8mp_clock_overdrive_constraints);
+ }
+
err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
if (err < 0) {
dev_err(dev, "failed to register hws for i.MX8MP\n");
diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c
index 935d9a2d8c2b..c509929da854 100644
--- a/drivers/clk/keystone/syscon-clk.c
+++ b/drivers/clk/keystone/syscon-clk.c
@@ -105,6 +105,12 @@ static struct clk_hw
return &priv->hw;
}
+static const struct regmap_config ti_syscon_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int ti_syscon_gate_clk_probe(struct platform_device *pdev)
{
const struct ti_syscon_gate_clk_data *data, *p;
@@ -113,12 +119,17 @@ static int ti_syscon_gate_clk_probe(struct platform_device *pdev)
int num_clks, num_parents, i;
const char *parent_name;
struct regmap *regmap;
+ void __iomem *base;
data = device_get_match_data(dev);
if (!data)
return -EINVAL;
- regmap = device_node_to_regmap(dev->of_node);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = regmap_init_mmio(dev, base, &ti_syscon_regmap_cfg);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
"failed to get regmap\n");
diff --git a/drivers/clk/mediatek/clk-mt8188-cam.c b/drivers/clk/mediatek/clk-mt8188-cam.c
index 7500bd25387f..9b029fdd584e 100644
--- a/drivers/clk/mediatek/clk-mt8188-cam.c
+++ b/drivers/clk/mediatek/clk-mt8188-cam.c
@@ -20,6 +20,8 @@ static const struct mtk_gate_regs cam_cg_regs = {
#define GATE_CAM(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define CAM_SYS_SMI_LARB_RST_OFF (0xA0)
+
static const struct mtk_gate cam_main_clks[] = {
GATE_CAM(CLK_CAM_MAIN_LARB13, "cam_main_larb13", "top_cam", 0),
GATE_CAM(CLK_CAM_MAIN_LARB14, "cam_main_larb14", "top_cam", 1),
@@ -72,6 +74,17 @@ static const struct mtk_gate cam_yuvb_clks[] = {
GATE_CAM(CLK_CAM_YUVB_CAMTG, "cam_yuvb_camtg", "top_cam", 2),
};
+/* Reset for SMI larb 16a/16b/17a/17b */
+static u16 cam_sys_rst_ofs[] = {
+ CAM_SYS_SMI_LARB_RST_OFF,
+};
+
+static const struct mtk_clk_rst_desc cam_sys_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = cam_sys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(cam_sys_rst_ofs),
+};
+
static const struct mtk_clk_desc cam_main_desc = {
.clks = cam_main_clks,
.num_clks = ARRAY_SIZE(cam_main_clks),
@@ -80,21 +93,25 @@ static const struct mtk_clk_desc cam_main_desc = {
static const struct mtk_clk_desc cam_rawa_desc = {
.clks = cam_rawa_clks,
.num_clks = ARRAY_SIZE(cam_rawa_clks),
+ .rst_desc = &cam_sys_rst_desc,
};
static const struct mtk_clk_desc cam_rawb_desc = {
.clks = cam_rawb_clks,
.num_clks = ARRAY_SIZE(cam_rawb_clks),
+ .rst_desc = &cam_sys_rst_desc,
};
static const struct mtk_clk_desc cam_yuva_desc = {
.clks = cam_yuva_clks,
.num_clks = ARRAY_SIZE(cam_yuva_clks),
+ .rst_desc = &cam_sys_rst_desc,
};
static const struct mtk_clk_desc cam_yuvb_desc = {
.clks = cam_yuvb_clks,
.num_clks = ARRAY_SIZE(cam_yuvb_clks),
+ .rst_desc = &cam_sys_rst_desc,
};
static const struct of_device_id of_match_clk_mt8188_cam[] = {
diff --git a/drivers/clk/mediatek/clk-mt8188-img.c b/drivers/clk/mediatek/clk-mt8188-img.c
index cb2fbd4136b9..d44bfbd8308a 100644
--- a/drivers/clk/mediatek/clk-mt8188-img.c
+++ b/drivers/clk/mediatek/clk-mt8188-img.c
@@ -20,6 +20,8 @@ static const struct mtk_gate_regs imgsys_cg_regs = {
#define GATE_IMGSYS(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &imgsys_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define IMG_SYS_SMI_LARB_RST_OFF (0xC)
+
static const struct mtk_gate imgsys_main_clks[] = {
GATE_IMGSYS(CLK_IMGSYS_MAIN_LARB9, "imgsys_main_larb9", "top_img", 0),
GATE_IMGSYS(CLK_IMGSYS_MAIN_TRAW0, "imgsys_main_traw0", "top_img", 1),
@@ -58,6 +60,17 @@ static const struct mtk_gate imgsys1_dip_nr_clks[] = {
GATE_IMGSYS(CLK_IMGSYS1_DIP_NR_DIP_NR, "imgsys1_dip_nr_dip_nr", "top_img", 1),
};
+/* Reset for SMI larb 10/11a/11b/11c/15 */
+static u16 img_sys_rst_ofs[] = {
+ IMG_SYS_SMI_LARB_RST_OFF,
+};
+
+static const struct mtk_clk_rst_desc img_sys_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = img_sys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(img_sys_rst_ofs),
+};
+
static const struct mtk_clk_desc imgsys_main_desc = {
.clks = imgsys_main_clks,
.num_clks = ARRAY_SIZE(imgsys_main_clks),
@@ -66,26 +79,31 @@ static const struct mtk_clk_desc imgsys_main_desc = {
static const struct mtk_clk_desc imgsys_wpe1_desc = {
.clks = imgsys_wpe1_clks,
.num_clks = ARRAY_SIZE(imgsys_wpe1_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct mtk_clk_desc imgsys_wpe2_desc = {
.clks = imgsys_wpe2_clks,
.num_clks = ARRAY_SIZE(imgsys_wpe2_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct mtk_clk_desc imgsys_wpe3_desc = {
.clks = imgsys_wpe3_clks,
.num_clks = ARRAY_SIZE(imgsys_wpe3_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct mtk_clk_desc imgsys1_dip_top_desc = {
.clks = imgsys1_dip_top_clks,
.num_clks = ARRAY_SIZE(imgsys1_dip_top_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct mtk_clk_desc imgsys1_dip_nr_desc = {
.clks = imgsys1_dip_nr_clks,
.num_clks = ARRAY_SIZE(imgsys1_dip_nr_clks),
+ .rst_desc = &img_sys_rst_desc,
};
static const struct of_device_id of_match_clk_mt8188_imgsys_main[] = {
diff --git a/drivers/clk/mediatek/clk-mt8188-ipe.c b/drivers/clk/mediatek/clk-mt8188-ipe.c
index 8f1933b71e28..70a011c1f9ce 100644
--- a/drivers/clk/mediatek/clk-mt8188-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8188-ipe.c
@@ -20,6 +20,8 @@ static const struct mtk_gate_regs ipe_cg_regs = {
#define GATE_IPE(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &ipe_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define IPE_SYS_SMI_LARB_RST_OFF (0xC)
+
static const struct mtk_gate ipe_clks[] = {
GATE_IPE(CLK_IPE_DPE, "ipe_dpe", "top_ipe", 0),
GATE_IPE(CLK_IPE_FDVT, "ipe_fdvt", "top_ipe", 1),
@@ -28,9 +30,21 @@ static const struct mtk_gate ipe_clks[] = {
GATE_IPE(CLK_IPE_SMI_LARB12, "ipe_smi_larb12", "top_ipe", 4),
};
+/* Reset for SMI larb 12 */
+static u16 ipe_sys_rst_ofs[] = {
+ IPE_SYS_SMI_LARB_RST_OFF,
+};
+
+static const struct mtk_clk_rst_desc ipe_sys_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = ipe_sys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(ipe_sys_rst_ofs),
+};
+
static const struct mtk_clk_desc ipe_desc = {
.clks = ipe_clks,
.num_clks = ARRAY_SIZE(ipe_clks),
+ .rst_desc = &ipe_sys_rst_desc,
};
static const struct of_device_id of_match_clk_mt8188_ipe[] = {
diff --git a/drivers/clk/mediatek/clk-mt8188-vdo1.c b/drivers/clk/mediatek/clk-mt8188-vdo1.c
index 4fa355f8f0c2..f715d45e545e 100644
--- a/drivers/clk/mediatek/clk-mt8188-vdo1.c
+++ b/drivers/clk/mediatek/clk-mt8188-vdo1.c
@@ -43,6 +43,12 @@ static const struct mtk_gate_regs vdo1_4_cg_regs = {
.sta_ofs = 0x140,
};
+static const struct mtk_gate_regs vdo1_5_cg_regs = {
+ .set_ofs = 0x400,
+ .clr_ofs = 0x400,
+ .sta_ofs = 0x400,
+};
+
#define GATE_VDO1_0(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo1_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
@@ -62,6 +68,9 @@ static const struct mtk_gate_regs vdo1_4_cg_regs = {
#define GATE_VDO1_4(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo1_4_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define GATE_VDO1_5(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_5_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
static const struct mtk_gate vdo1_clks[] = {
/* VDO1_0 */
GATE_VDO1_0(CLK_VDO1_SMI_LARB2, "vdo1_smi_larb2", "top_vpp", 0),
@@ -129,6 +138,8 @@ static const struct mtk_gate vdo1_clks[] = {
GATE_VDO1_3(CLK_VDO1_DISP_MONITOR_DPINTF, "vdo1_disp_monitor_dpintf_ck", "top_vpp", 17),
/* VDO1_4 */
GATE_VDO1_4(CLK_VDO1_26M_SLOW, "vdo1_26m_slow_ck", "clk26m", 8),
+ /* VDO1_5 */
+ GATE_VDO1_5(CLK_VDO1_DPI1_HDMI, "vdo1_dpi1_hdmi", "hdmi_txpll", 0),
};
static const struct mtk_clk_desc vdo1_desc = {
diff --git a/drivers/clk/meson/a1-pll.c b/drivers/clk/meson/a1-pll.c
index 8d7c7b4493c4..86d8159f3319 100644
--- a/drivers/clk/meson/a1-pll.c
+++ b/drivers/clk/meson/a1-pll.c
@@ -356,7 +356,7 @@ static struct platform_driver a1_pll_clkc_driver = {
};
module_platform_driver(a1_pll_clkc_driver);
-MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver");
+MODULE_DESCRIPTION("Amlogic A1 PLL Clock Controller driver");
MODULE_AUTHOR("Jian Hu <jian.hu@amlogic.com>");
MODULE_AUTHOR("Dmitry Rokosov <ddrokosov@sberdevices.ru>");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index cfffd434e998..ceabebb1863d 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -1137,8 +1137,18 @@ static struct clk_regmap g12a_cpu_clk_div16_en = {
.hw.init = &(struct clk_init_data) {
.name = "cpu_clk_div16_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk.hw
+ .parent_data = &(const struct clk_parent_data) {
+ /*
+ * Note:
+ * G12A and G12B have different cpu clocks (with
+ * different struct clk_hw). We fallback to the global
+ * naming string mechanism so this clock picks
+ * up the appropriate one. Same goes for the other
+ * clock using cpu cluster A clock output and present
+ * on both G12 variant.
+ */
+ .name = "cpu_clk",
+ .index = -1,
},
.num_parents = 1,
/*
@@ -1203,7 +1213,10 @@ static struct clk_regmap g12a_cpu_clk_apb_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_apb_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
+ .parent_data = &(const struct clk_parent_data) {
+ .name = "cpu_clk",
+ .index = -1,
+ },
.num_parents = 1,
},
};
@@ -1237,7 +1250,10 @@ static struct clk_regmap g12a_cpu_clk_atb_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_atb_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
+ .parent_data = &(const struct clk_parent_data) {
+ .name = "cpu_clk",
+ .index = -1,
+ },
.num_parents = 1,
},
};
@@ -1271,7 +1287,10 @@ static struct clk_regmap g12a_cpu_clk_axi_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_axi_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
+ .parent_data = &(const struct clk_parent_data) {
+ .name = "cpu_clk",
+ .index = -1,
+ },
.num_parents = 1,
},
};
@@ -1306,13 +1325,6 @@ static struct clk_regmap g12a_cpu_clk_trace_div = {
.name = "cpu_clk_trace_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_data = &(const struct clk_parent_data) {
- /*
- * Note:
- * G12A and G12B have different cpu_clks (with
- * different struct clk_hw). We fallback to the global
- * naming string mechanism so cpu_clk_trace_div picks
- * up the appropriate one.
- */
.name = "cpu_clk",
.index = -1,
},
@@ -4311,7 +4323,7 @@ static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14);
static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19);
static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20);
static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4);
+static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 24);
static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25);
static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26);
static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28);
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 8575b8485385..3abb44a2532b 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -1266,14 +1266,13 @@ static struct clk_regmap gxbb_cts_i958 = {
},
};
+/*
+ * This table skips a clock named 'cts_slow_oscin' in the documentation
+ * This clock does not exist yet in this controller or the AO one
+ */
+static u32 gxbb_32k_clk_parents_val_table[] = { 0, 2, 3 };
static const struct clk_parent_data gxbb_32k_clk_parent_data[] = {
{ .fw_name = "xtal", },
- /*
- * FIXME: This clock is provided by the ao clock controller but the
- * clock is not yet part of the binding of this controller, so string
- * name must be use to set this parent.
- */
- { .name = "cts_slow_oscin", .index = -1 },
{ .hw = &gxbb_fclk_div3.hw },
{ .hw = &gxbb_fclk_div5.hw },
};
@@ -1283,6 +1282,7 @@ static struct clk_regmap gxbb_32k_clk_sel = {
.offset = HHI_32K_CLK_CNTL,
.mask = 0x3,
.shift = 16,
+ .table = gxbb_32k_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "32k_clk_sel",
@@ -1306,7 +1306,7 @@ static struct clk_regmap gxbb_32k_clk_div = {
&gxbb_32k_clk_sel.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST,
+ .flags = CLK_SET_RATE_PARENT,
},
};
diff --git a/drivers/clk/mmp/clk-pxa1908-apmu.c b/drivers/clk/mmp/clk-pxa1908-apmu.c
index 8cfb1258202f..d3a070687fc5 100644
--- a/drivers/clk/mmp/clk-pxa1908-apmu.c
+++ b/drivers/clk/mmp/clk-pxa1908-apmu.c
@@ -87,8 +87,8 @@ static int pxa1908_apmu_probe(struct platform_device *pdev)
struct pxa1908_clk_unit *pxa_unit;
pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
- if (IS_ERR(pxa_unit))
- return PTR_ERR(pxa_unit);
+ if (!pxa_unit)
+ return -ENOMEM;
pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pxa_unit->base))
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 69bbf62ba3cd..7d5dac26b244 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -217,7 +217,7 @@ config IPQ_GCC_4019
config IPQ_GCC_5018
tristate "IPQ5018 Global Clock Controller"
- depends on ARM64 || COMPILE_TEST
+ depends on ARM || ARM64 || COMPILE_TEST
help
Support for global clock controller on ipq5018 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -281,6 +281,13 @@ config IPQ_GCC_9574
i2c, USB, SD/eMMC, etc. Select this for the root clock
of ipq9574.
+config IPQ_NSSCC_9574
+ tristate "IPQ9574 NSS Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on IPQ_GCC_9574
+ help
+ Support for NSS clock controller on ipq9574 devices.
+
config IPQ_NSSCC_QCA8K
tristate "QCA8K(QCA8386 or QCA8084) NSS Clock Controller"
depends on MDIO_BUS
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 0db2f98bcb3e..96862e99e5d4 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_IPQ_GCC_6018) += gcc-ipq6018.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
obj-$(CONFIG_IPQ_GCC_9574) += gcc-ipq9574.o
+obj-$(CONFIG_IPQ_NSSCC_9574) += nsscc-ipq9574.o
obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
obj-$(CONFIG_IPQ_NSSCC_QCA8K) += nsscc-qca8k.o
obj-$(CONFIG_MDM_GCC_9607) += gcc-mdm9607.o
diff --git a/drivers/clk/qcom/camcc-sa8775p.c b/drivers/clk/qcom/camcc-sa8775p.c
index c04801a5af35..11bd2e234811 100644
--- a/drivers/clk/qcom/camcc-sa8775p.c
+++ b/drivers/clk/qcom/camcc-sa8775p.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -1801,7 +1800,7 @@ static const struct regmap_config cam_cc_sa8775p_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc cam_cc_sa8775p_desc = {
+static const struct qcom_cc_desc cam_cc_sa8775p_desc = {
.config = &cam_cc_sa8775p_regmap_config,
.clks = cam_cc_sa8775p_clocks,
.num_clks = ARRAY_SIZE(cam_cc_sa8775p_clocks),
diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c
index 10e924cd533d..5031df813b4a 100644
--- a/drivers/clk/qcom/camcc-sc7180.c
+++ b/drivers/clk/qcom/camcc-sc7180.c
@@ -5,8 +5,8 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/clk/qcom/camcc-sc7280.c b/drivers/clk/qcom/camcc-sc7280.c
index accd257632df..55545f5fdb98 100644
--- a/drivers/clk/qcom/camcc-sc7280.c
+++ b/drivers/clk/qcom/camcc-sc7280.c
@@ -7,8 +7,8 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/camcc-sc8280xp.c b/drivers/clk/qcom/camcc-sc8280xp.c
index 479964f91608..18f5a3eb313e 100644
--- a/drivers/clk/qcom/camcc-sc8280xp.c
+++ b/drivers/clk/qcom/camcc-sc8280xp.c
@@ -2987,7 +2987,7 @@ static const struct regmap_config camcc_sc8280xp_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc camcc_sc8280xp_desc = {
+static const struct qcom_cc_desc camcc_sc8280xp_desc = {
.config = &camcc_sc8280xp_regmap_config,
.clks = camcc_sc8280xp_clocks,
.num_clks = ARRAY_SIZE(camcc_sc8280xp_clocks),
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 40022a10f8c0..cf60e8dd292a 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/camcc-sm4450.c b/drivers/clk/qcom/camcc-sm4450.c
index f8503ced3d05..6170d5ad9cbf 100644
--- a/drivers/clk/qcom/camcc-sm4450.c
+++ b/drivers/clk/qcom/camcc-sm4450.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -1641,7 +1640,7 @@ static const struct regmap_config cam_cc_sm4450_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc cam_cc_sm4450_desc = {
+static const struct qcom_cc_desc cam_cc_sm4450_desc = {
.config = &cam_cc_sm4450_regmap_config,
.clks = cam_cc_sm4450_clocks,
.num_clks = ARRAY_SIZE(cam_cc_sm4450_clocks),
diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c
index f6634cc8663e..1871970fb046 100644
--- a/drivers/clk/qcom/camcc-sm6350.c
+++ b/drivers/clk/qcom/camcc-sm6350.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/camcc-sm7150.c b/drivers/clk/qcom/camcc-sm7150.c
index 39033a6bb616..4a3baf5d8e85 100644
--- a/drivers/clk/qcom/camcc-sm7150.c
+++ b/drivers/clk/qcom/camcc-sm7150.c
@@ -7,7 +7,6 @@
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/camcc-sm8150.c b/drivers/clk/qcom/camcc-sm8150.c
index bb3009818ad7..62aadb27c50e 100644
--- a/drivers/clk/qcom/camcc-sm8150.c
+++ b/drivers/clk/qcom/camcc-sm8150.c
@@ -6,9 +6,9 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/pm_runtime.h>
@@ -2094,7 +2094,7 @@ static const struct regmap_config cam_cc_sm8150_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc cam_cc_sm8150_desc = {
+static const struct qcom_cc_desc cam_cc_sm8150_desc = {
.config = &cam_cc_sm8150_regmap_config,
.clks = cam_cc_sm8150_clocks,
.num_clks = ARRAY_SIZE(cam_cc_sm8150_clocks),
diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c
index 34d2f17520dc..6da89c49ba3d 100644
--- a/drivers/clk/qcom/camcc-sm8250.c
+++ b/drivers/clk/qcom/camcc-sm8250.c
@@ -4,10 +4,10 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,camcc-sm8250.h>
@@ -411,7 +411,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -433,7 +433,7 @@ static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -454,7 +454,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -469,7 +469,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -490,7 +490,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -511,7 +511,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -526,7 +526,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -556,7 +556,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -571,7 +571,7 @@ static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -586,7 +586,7 @@ static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -611,7 +611,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -634,7 +634,7 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -649,7 +649,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -673,7 +673,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
.parent_data = cam_cc_parent_data_2,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -710,7 +710,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -734,7 +734,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
.parent_data = cam_cc_parent_data_3,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -749,7 +749,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -771,7 +771,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -786,7 +786,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -810,7 +810,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
.parent_data = cam_cc_parent_data_4,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -825,7 +825,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -847,7 +847,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -862,7 +862,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -877,7 +877,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -892,7 +892,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -907,7 +907,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -922,7 +922,7 @@ static struct clk_rcg2 cam_cc_mclk5_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -993,7 +993,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
diff --git a/drivers/clk/qcom/camcc-sm8550.c b/drivers/clk/qcom/camcc-sm8550.c
index eac850bb690a..871155783c79 100644
--- a/drivers/clk/qcom/camcc-sm8550.c
+++ b/drivers/clk/qcom/camcc-sm8550.c
@@ -3487,7 +3487,7 @@ static const struct regmap_config cam_cc_sm8550_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc cam_cc_sm8550_desc = {
+static const struct qcom_cc_desc cam_cc_sm8550_desc = {
.config = &cam_cc_sm8550_regmap_config,
.clks = cam_cc_sm8550_clocks,
.num_clks = ARRAY_SIZE(cam_cc_sm8550_clocks),
diff --git a/drivers/clk/qcom/camcc-sm8650.c b/drivers/clk/qcom/camcc-sm8650.c
index a37e52a67ed4..0ccd6de8ba78 100644
--- a/drivers/clk/qcom/camcc-sm8650.c
+++ b/drivers/clk/qcom/camcc-sm8650.c
@@ -3517,7 +3517,7 @@ static const struct regmap_config cam_cc_sm8650_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc cam_cc_sm8650_desc = {
+static const struct qcom_cc_desc cam_cc_sm8650_desc = {
.config = &cam_cc_sm8650_regmap_config,
.clks = cam_cc_sm8650_clocks,
.num_clks = ARRAY_SIZE(cam_cc_sm8650_clocks),
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 9a65d14acf71..cec0afea8e44 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -709,14 +709,19 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 alpha_width = pll_alpha_width(pll);
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
+ return 0;
- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
if (ctl & PLL_ALPHA_EN) {
- regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low);
+ if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low))
+ return 0;
if (alpha_width > 32) {
- regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
- &high);
+ if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
+ &high))
+ return 0;
a = (u64)high << 32 | low;
} else {
a = low & GENMASK(alpha_width - 1, 0);
@@ -942,8 +947,11 @@ alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha = 0, ctl, alpha_m, alpha_n;
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
+ return 0;
if (ctl & PLL_ALPHA_EN) {
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha);
@@ -1137,8 +1145,11 @@ clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, frac, alpha_width = pll_alpha_width(pll);
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
- regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac))
+ return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
}
@@ -1196,7 +1207,8 @@ clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
u32 ctl;
- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+ if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
+ return 0;
ctl >>= PLL_POST_DIV_SHIFT;
ctl &= PLL_POST_DIV_MASK(pll);
@@ -1412,8 +1424,11 @@ static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, frac, alpha_width = pll_alpha_width(pll);
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
- regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ if (regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac))
+ return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
}
@@ -1563,7 +1578,8 @@ clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct regmap *regmap = pll->clkr.regmap;
u32 i, div = 1, val;
- regmap_read(regmap, PLL_USER_CTL(pll), &val);
+ if (regmap_read(regmap, PLL_USER_CTL(pll), &val))
+ return 0;
val >>= pll->post_div_shift;
val &= PLL_POST_DIV_MASK(pll);
@@ -2484,9 +2500,12 @@ static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw,
struct regmap *regmap = pll->clkr.regmap;
u32 l, frac;
- regmap_read(regmap, PLL_L_VAL(pll), &l);
+ if (regmap_read(regmap, PLL_L_VAL(pll), &l))
+ return 0;
l &= LUCID_EVO_PLL_L_VAL_MASK;
- regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac);
+
+ if (regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac))
+ return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, pll_alpha_width(pll));
}
@@ -2699,7 +2718,8 @@ static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw,
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l;
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
return parent_rate * l;
}
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 229480c5b075..0f10090d4ae6 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -28,7 +28,7 @@ static bool clk_branch_in_hwcg_mode(const struct clk_branch *br)
static bool clk_branch_check_halt(const struct clk_branch *br, bool enabling)
{
- bool invert = (br->halt_check == BRANCH_HALT_ENABLE);
+ bool invert = (br->halt_check & BRANCH_HALT_ENABLE);
u32 val;
regmap_read(br->clkr.regmap, br->halt_reg, &val);
@@ -44,7 +44,7 @@ static bool clk_branch2_check_halt(const struct clk_branch *br, bool enabling)
{
u32 val;
u32 mask;
- bool invert = (br->halt_check == BRANCH_HALT_ENABLE);
+ bool invert = (br->halt_check & BRANCH_HALT_ENABLE);
mask = CBCR_NOC_FSM_STATUS;
mask |= CBCR_CLK_OFF;
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 29ef08a9d50b..3fbaa646286f 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -486,6 +486,7 @@ DEFINE_CLK_SMD_RPM(qup, QCOM_SMD_RPM_QUP_CLK, 0);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(bb_clk1, 1, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(bb_clk2, 2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(bb_clk3, 3, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(ln_bb_clk1, 1, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(ln_bb_clk2, 2, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(ln_bb_clk3, 3, 19200000);
@@ -1046,6 +1047,36 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8998 = {
.num_icc_clks = ARRAY_SIZE(msm8998_icc_clks),
};
+static struct clk_smd_rpm *sdm429_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_BB_CLK3] = &clk_smd_rpm_bb_clk3,
+ [RPM_SMD_BB_CLK3_A] = &clk_smd_rpm_bb_clk3_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+ [RPM_SMD_BB_CLK3_PIN] = &clk_smd_rpm_bb_clk3_pin,
+ [RPM_SMD_BB_CLK3_A_PIN] = &clk_smd_rpm_bb_clk3_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_sdm429 = {
+ .clks = sdm429_clks,
+ .num_clks = ARRAY_SIZE(sdm429_clks),
+ .icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks,
+ .num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
+};
+
static struct clk_smd_rpm *sdm660_clks[] = {
[RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
[RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
@@ -1276,6 +1307,7 @@ static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 },
{ .compatible = "qcom,rpmcc-qcm2290", .data = &rpm_clk_qcm2290 },
{ .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 },
+ { .compatible = "qcom,rpmcc-sdm429", .data = &rpm_clk_sdm429 },
{ .compatible = "qcom,rpmcc-sdm660", .data = &rpm_clk_sdm660 },
{ .compatible = "qcom,rpmcc-sm6115", .data = &rpm_clk_sm6115 },
{ .compatible = "qcom,rpmcc-sm6125", .data = &rpm_clk_sm6125 },
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 33cc1f73c69d..9e3380fd7181 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -22,6 +22,7 @@ struct qcom_cc {
struct qcom_reset_controller reset;
struct clk_regmap **rclks;
size_t num_rclks;
+ struct dev_pm_domain_list *pd_list;
};
const
@@ -299,6 +300,10 @@ int qcom_cc_really_probe(struct device *dev,
if (!cc)
return -ENOMEM;
+ ret = devm_pm_domain_attach_list(dev, NULL, &cc->pd_list);
+ if (ret < 0 && ret != -EEXIST)
+ return ret;
+
reset = &cc->reset;
reset->rcdev.of_node = dev->of_node;
reset->rcdev.ops = &qcom_reset_ops;
@@ -318,6 +323,7 @@ int qcom_cc_really_probe(struct device *dev,
scd->dev = dev;
scd->scs = desc->gdscs;
scd->num = desc->num_gdscs;
+ scd->pd_list = cc->pd_list;
ret = gdsc_register(scd, &reset->rcdev, regmap);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
index d7bb1399e102..6d88d067337f 100644
--- a/drivers/clk/qcom/dispcc-qcm2290.c
+++ b/drivers/clk/qcom/dispcc-qcm2290.c
@@ -4,10 +4,11 @@
* Copyright (c) 2021, Linaro Ltd.
*/
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c
index 4710247be530..ab1a8d419863 100644
--- a/drivers/clk/qcom/dispcc-sc7180.c
+++ b/drivers/clk/qcom/dispcc-sc7180.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c
index db0745954894..8bdf57734a3d 100644
--- a/drivers/clk/qcom/dispcc-sc7280.c
+++ b/drivers/clk/qcom/dispcc-sc7280.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/dispcc-sc8280xp.c b/drivers/clk/qcom/dispcc-sc8280xp.c
index f1ca9ae0b33f..5903a759d4af 100644
--- a/drivers/clk/qcom/dispcc-sc8280xp.c
+++ b/drivers/clk/qcom/dispcc-sc8280xp.c
@@ -5,13 +5,12 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,dispcc-sc8280xp.h>
@@ -3114,7 +3113,7 @@ static const struct regmap_config disp_cc_sc8280xp_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc disp0_cc_sc8280xp_desc = {
+static const struct qcom_cc_desc disp0_cc_sc8280xp_desc = {
.config = &disp_cc_sc8280xp_regmap_config,
.clks = disp0_cc_sc8280xp_clocks,
.num_clks = ARRAY_SIZE(disp0_cc_sc8280xp_clocks),
@@ -3124,7 +3123,7 @@ static struct qcom_cc_desc disp0_cc_sc8280xp_desc = {
.num_gdscs = ARRAY_SIZE(disp0_cc_sc8280xp_gdscs),
};
-static struct qcom_cc_desc disp1_cc_sc8280xp_desc = {
+static const struct qcom_cc_desc disp1_cc_sc8280xp_desc = {
.config = &disp_cc_sc8280xp_regmap_config,
.clks = disp1_cc_sc8280xp_clocks,
.num_clks = ARRAY_SIZE(disp1_cc_sc8280xp_clocks),
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index e6139e8f74dc..2f9e9665d7e9 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -4,10 +4,10 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
diff --git a/drivers/clk/qcom/dispcc-sm4450.c b/drivers/clk/qcom/dispcc-sm4450.c
index 98ba016bc57f..e8752d01c8e6 100644
--- a/drivers/clk/qcom/dispcc-sm4450.c
+++ b/drivers/clk/qcom/dispcc-sm4450.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -722,7 +721,7 @@ static const struct regmap_config disp_cc_sm4450_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc disp_cc_sm4450_desc = {
+static const struct qcom_cc_desc disp_cc_sm4450_desc = {
.config = &disp_cc_sm4450_regmap_config,
.clks = disp_cc_sm4450_clocks,
.num_clks = ARRAY_SIZE(disp_cc_sm4450_clocks),
diff --git a/drivers/clk/qcom/dispcc-sm6115.c b/drivers/clk/qcom/dispcc-sm6115.c
index 2b236d52b29f..8ae25d51db94 100644
--- a/drivers/clk/qcom/dispcc-sm6115.c
+++ b/drivers/clk/qcom/dispcc-sm6115.c
@@ -5,10 +5,11 @@
* Copyright (c) 2021, Linaro Ltd.
*/
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/dispcc-sm6125.c b/drivers/clk/qcom/dispcc-sm6125.c
index 51c7492816fb..851d38a487d3 100644
--- a/drivers/clk/qcom/dispcc-sm6125.c
+++ b/drivers/clk/qcom/dispcc-sm6125.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index 2bc6b5f99f57..e703ecf00e44 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/dispcc-sm6375.c b/drivers/clk/qcom/dispcc-sm6375.c
index 167dd369a794..ec9dbb1f4a7c 100644
--- a/drivers/clk/qcom/dispcc-sm6375.c
+++ b/drivers/clk/qcom/dispcc-sm6375.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/dispcc-sm7150.c b/drivers/clk/qcom/dispcc-sm7150.c
index d32bd7df1433..bdfff246ed3f 100644
--- a/drivers/clk/qcom/dispcc-sm7150.c
+++ b/drivers/clk/qcom/dispcc-sm7150.c
@@ -8,7 +8,6 @@
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index 884bbd3fb305..8f433e1e7028 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -4,11 +4,11 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,dispcc-sm8250.h>
diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
index a1f183e6c636..9ce9fd28e55b 100644
--- a/drivers/clk/qcom/dispcc-sm8450.c
+++ b/drivers/clk/qcom/dispcc-sm8450.c
@@ -4,12 +4,11 @@
* Copyright (c) 2022, Linaro Ltd.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/pm_runtime.h>
@@ -1780,7 +1779,7 @@ static const struct regmap_config disp_cc_sm8450_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc disp_cc_sm8450_desc = {
+static const struct qcom_cc_desc disp_cc_sm8450_desc = {
.config = &disp_cc_sm8450_regmap_config,
.clks = disp_cc_sm8450_clocks,
.num_clks = ARRAY_SIZE(disp_cc_sm8450_clocks),
diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
index e41d4104d770..f27140c649f5 100644
--- a/drivers/clk/qcom/dispcc-sm8550.c
+++ b/drivers/clk/qcom/dispcc-sm8550.c
@@ -4,12 +4,11 @@
* Copyright (c) 2023, Linaro Ltd.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/pm_runtime.h>
@@ -1746,7 +1745,7 @@ static const struct regmap_config disp_cc_sm8550_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc disp_cc_sm8550_desc = {
+static const struct qcom_cc_desc disp_cc_sm8550_desc = {
.config = &disp_cc_sm8550_regmap_config,
.clks = disp_cc_sm8550_clocks,
.num_clks = ARRAY_SIZE(disp_cc_sm8550_clocks),
diff --git a/drivers/clk/qcom/dispcc-sm8750.c b/drivers/clk/qcom/dispcc-sm8750.c
index 0358dff91da5..877b40d50e6f 100644
--- a/drivers/clk/qcom/dispcc-sm8750.c
+++ b/drivers/clk/qcom/dispcc-sm8750.c
@@ -827,7 +827,6 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
&disp_cc_mdss_byte0_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_regmap_div_ops,
},
};
@@ -842,7 +841,6 @@ static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
&disp_cc_mdss_byte1_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_regmap_div_ops,
},
};
@@ -1883,11 +1881,11 @@ static const struct regmap_config disp_cc_sm8750_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0x11014,
+ .max_register = 0xf004, /* 0x10000, 0x10004 and maybe others are for TZ */
.fast_io = true,
};
-static struct qcom_cc_desc disp_cc_sm8750_desc = {
+static const struct qcom_cc_desc disp_cc_sm8750_desc = {
.config = &disp_cc_sm8750_regmap_config,
.clks = disp_cc_sm8750_clocks,
.num_clks = ARRAY_SIZE(disp_cc_sm8750_clocks),
diff --git a/drivers/clk/qcom/dispcc0-sa8775p.c b/drivers/clk/qcom/dispcc0-sa8775p.c
index 6e399b5f1383..aeda9cf4bfee 100644
--- a/drivers/clk/qcom/dispcc0-sa8775p.c
+++ b/drivers/clk/qcom/dispcc0-sa8775p.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -1418,7 +1417,7 @@ static const struct regmap_config disp_cc_0_sa8775p_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc disp_cc_0_sa8775p_desc = {
+static const struct qcom_cc_desc disp_cc_0_sa8775p_desc = {
.config = &disp_cc_0_sa8775p_regmap_config,
.clks = disp_cc_0_sa8775p_clocks,
.num_clks = ARRAY_SIZE(disp_cc_0_sa8775p_clocks),
diff --git a/drivers/clk/qcom/dispcc1-sa8775p.c b/drivers/clk/qcom/dispcc1-sa8775p.c
index 30ccea59415a..cd55d1c11902 100644
--- a/drivers/clk/qcom/dispcc1-sa8775p.c
+++ b/drivers/clk/qcom/dispcc1-sa8775p.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -1418,7 +1417,7 @@ static const struct regmap_config disp_cc_1_sa8775p_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc disp_cc_1_sa8775p_desc = {
+static const struct qcom_cc_desc disp_cc_1_sa8775p_desc = {
.config = &disp_cc_1_sa8775p_regmap_config,
.clks = disp_cc_1_sa8775p_clocks,
.num_clks = ARRAY_SIZE(disp_cc_1_sa8775p_clocks),
diff --git a/drivers/clk/qcom/gcc-ipq5424.c b/drivers/clk/qcom/gcc-ipq5424.c
index d5b218b76e29..3d42f3d85c7a 100644
--- a/drivers/clk/qcom/gcc-ipq5424.c
+++ b/drivers/clk/qcom/gcc-ipq5424.c
@@ -592,13 +592,19 @@ static struct clk_rcg2 gcc_qupv3_spi1_clk_src = {
};
static const struct freq_tbl ftbl_gcc_qupv3_uart0_clk_src[] = {
- F(960000, P_XO, 10, 2, 5),
- F(4800000, P_XO, 5, 0, 0),
- F(9600000, P_XO, 2, 4, 5),
- F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
+ F(3686400, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 144, 15625),
+ F(7372800, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 288, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 576, 15625),
F(24000000, P_XO, 1, 0, 0),
F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
- F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 1, 25),
+ F(40000000, P_GPLL0_OUT_MAIN, 1, 1, 20),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 500),
+ F(48000000, P_GPLL0_OUT_MAIN, 1, 3, 50),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 8, 125),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 100),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1152, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 1, 3, 40),
F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
{ }
};
@@ -634,11 +640,11 @@ static struct clk_rcg2 gcc_qupv3_uart1_clk_src = {
static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
F(144000, P_XO, 16, 12, 125),
F(400000, P_XO, 12, 1, 5),
- F(24000000, P_XO, 1, 0, 0),
- F(48000000, P_GPLL2_OUT_MAIN, 12, 1, 2),
- F(96000000, P_GPLL2_OUT_MAIN, 6, 1, 2),
+ F(24000000, P_GPLL2_OUT_MAIN, 12, 1, 2),
+ F(48000000, P_GPLL2_OUT_MAIN, 12, 0, 0),
+ F(96000000, P_GPLL2_OUT_MAIN, 6, 0, 0),
F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
- F(192000000, P_GPLL2_OUT_MAIN, 6, 0, 0),
+ F(192000000, P_GPLL2_OUT_MAIN, 3, 0, 0),
F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
index 6bb66a7e1fb6..6dc86e686de4 100644
--- a/drivers/clk/qcom/gcc-ipq9574.c
+++ b/drivers/clk/qcom/gcc-ipq9574.c
@@ -108,6 +108,20 @@ static struct clk_alpha_pll_postdiv gpll0 = {
},
};
+static struct clk_alpha_pll_postdiv gpll0_out_aux = {
+ .offset = 0x20000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll0_out_aux",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_main.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
static struct clk_alpha_pll gpll4_main = {
.offset = 0x22000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
@@ -3896,6 +3910,7 @@ static struct clk_regmap *gcc_ipq9574_clks[] = {
[GCC_PCIE1_PIPE_CLK] = &gcc_pcie1_pipe_clk.clkr,
[GCC_PCIE2_PIPE_CLK] = &gcc_pcie2_pipe_clk.clkr,
[GCC_PCIE3_PIPE_CLK] = &gcc_pcie3_pipe_clk.clkr,
+ [GPLL0_OUT_AUX] = &gpll0_out_aux.clkr,
};
static const struct qcom_reset_map gcc_ipq9574_resets[] = {
diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c
index 855a61966f3e..8f29ecc74c50 100644
--- a/drivers/clk/qcom/gcc-msm8953.c
+++ b/drivers/clk/qcom/gcc-msm8953.c
@@ -3770,7 +3770,7 @@ static struct clk_branch gcc_venus0_axi_clk = {
static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
.halt_reg = 0x4c02c,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x4c02c,
.enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 9ddce11db6df..c2e4fa5d63ad 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -7,7 +7,6 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index b32e66714951..92ad35cfb75e 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -7,7 +7,6 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index df79298a1a25..01a76f1b5b4c 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -2420,6 +2420,8 @@ static struct gdsc *gcc_sdm660_gdscs[] = {
static const struct qcom_reset_map gcc_sdm660_resets[] = {
[GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
[GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC1_BCR] = { 0x16000 },
[GCC_UFS_BCR] = { 0x75000 },
[GCC_USB3_DP_PHY_BCR] = { 0x50028 },
[GCC_USB3_PHY_BCR] = { 0x50020 },
diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
index 9dd5c48f33be..fa1672c4e7d8 100644
--- a/drivers/clk/qcom/gcc-sm8650.c
+++ b/drivers/clk/qcom/gcc-sm8650.c
@@ -3497,7 +3497,7 @@ static struct gdsc usb30_prim_gdsc = {
.pd = {
.name = "usb30_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3506,7 +3506,7 @@ static struct gdsc usb3_phy_gdsc = {
.pd = {
.name = "usb3_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 7288af845434..009f39139b64 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -2564,19 +2564,6 @@ static struct clk_branch gcc_disp_hf_axi_clk = {
},
};
-static struct clk_branch gcc_disp_xo_clk = {
- .halt_reg = 0x27018,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x27018,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_disp_xo_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_gp1_clk = {
.halt_reg = 0x64000,
.halt_check = BRANCH_HALT,
@@ -2631,21 +2618,6 @@ static struct clk_branch gcc_gp3_clk = {
},
};
-static struct clk_branch gcc_gpu_cfg_ahb_clk = {
- .halt_reg = 0x71004,
- .halt_check = BRANCH_HALT_VOTED,
- .hwcg_reg = 0x71004,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0x71004,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_gpu_cfg_ahb_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_gpu_gpll0_cph_clk_src = {
.halt_check = BRANCH_HALT_DELAY,
.clkr = {
@@ -6268,7 +6240,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_CNOC_PCIE_TUNNEL_CLK] = &gcc_cnoc_pcie_tunnel_clk.clkr,
[GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
- [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
[GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
@@ -6281,7 +6252,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_GPLL7] = &gcc_gpll7.clkr,
[GCC_GPLL8] = &gcc_gpll8.clkr,
[GCC_GPLL9] = &gcc_gpll9.clkr,
- [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
[GCC_GPU_GPLL0_CPH_CLK_SRC] = &gcc_gpu_gpll0_cph_clk_src.clkr,
[GCC_GPU_GPLL0_DIV_CPH_CLK_SRC] = &gcc_gpu_gpll0_div_cph_clk_src.clkr,
[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index fa5fe4c2a2ee..7deabf8400cf 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -46,7 +46,7 @@
#define RETAIN_MEM BIT(14)
#define RETAIN_PERIPH BIT(13)
-#define STATUS_POLL_TIMEOUT_US 1500
+#define STATUS_POLL_TIMEOUT_US 2000
#define TIMEOUT_US 500
#define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
@@ -292,6 +292,9 @@ static int gdsc_enable(struct generic_pm_domain *domain)
*/
udelay(1);
+ if (sc->flags & RETAIN_FF_ENABLE)
+ gdsc_retain_ff_on(sc);
+
/* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true);
@@ -308,9 +311,6 @@ static int gdsc_enable(struct generic_pm_domain *domain)
udelay(1);
}
- if (sc->flags & RETAIN_FF_ENABLE)
- gdsc_retain_ff_on(sc);
-
return 0;
}
@@ -457,13 +457,6 @@ static int gdsc_init(struct gdsc *sc)
goto err_disable_supply;
}
- /* Turn on HW trigger mode if supported */
- if (sc->flags & HW_CTRL) {
- ret = gdsc_hwctrl(sc, true);
- if (ret < 0)
- goto err_disable_supply;
- }
-
/*
* Make sure the retain bit is set if the GDSC is already on,
* otherwise we end up turning off the GDSC and destroying all
@@ -471,6 +464,14 @@ static int gdsc_init(struct gdsc *sc)
*/
if (sc->flags & RETAIN_FF_ENABLE)
gdsc_retain_ff_on(sc);
+
+ /* Turn on HW trigger mode if supported */
+ if (sc->flags & HW_CTRL) {
+ ret = gdsc_hwctrl(sc, true);
+ if (ret < 0)
+ goto err_disable_supply;
+ }
+
} else if (sc->flags & ALWAYS_ON) {
/* If ALWAYS_ON GDSCs are not ON, turn them ON */
gdsc_enable(&sc->pd);
@@ -506,6 +507,55 @@ err_disable_supply:
return ret;
}
+static int gdsc_add_subdomain_list(struct dev_pm_domain_list *pd_list,
+ struct generic_pm_domain *subdomain)
+{
+ int i, ret;
+
+ for (i = 0; i < pd_list->num_pds; i++) {
+ struct device *dev = pd_list->pd_devs[i];
+ struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
+
+ ret = pm_genpd_add_subdomain(genpd, subdomain);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gdsc_remove_subdomain_list(struct dev_pm_domain_list *pd_list,
+ struct generic_pm_domain *subdomain)
+{
+ int i;
+
+ for (i = 0; i < pd_list->num_pds; i++) {
+ struct device *dev = pd_list->pd_devs[i];
+ struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
+
+ pm_genpd_remove_subdomain(genpd, subdomain);
+ }
+}
+
+static void gdsc_pm_subdomain_remove(struct gdsc_desc *desc, size_t num)
+{
+ struct device *dev = desc->dev;
+ struct gdsc **scs = desc->scs;
+ int i;
+
+ /* Remove subdomains */
+ for (i = num - 1; i >= 0; i--) {
+ if (!scs[i])
+ continue;
+ if (scs[i]->parent)
+ pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
+ else if (!IS_ERR_OR_NULL(dev->pm_domain))
+ pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
+ else if (desc->pd_list)
+ gdsc_remove_subdomain_list(desc->pd_list, &scs[i]->pd);
+ }
+}
+
int gdsc_register(struct gdsc_desc *desc,
struct reset_controller_dev *rcdev, struct regmap *regmap)
{
@@ -555,30 +605,30 @@ int gdsc_register(struct gdsc_desc *desc,
if (!scs[i])
continue;
if (scs[i]->parent)
- pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
+ ret = pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
else if (!IS_ERR_OR_NULL(dev->pm_domain))
- pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
+ ret = pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
+ else if (desc->pd_list)
+ ret = gdsc_add_subdomain_list(desc->pd_list, &scs[i]->pd);
+
+ if (ret)
+ goto err_pm_subdomain_remove;
}
return of_genpd_add_provider_onecell(dev->of_node, data);
+
+err_pm_subdomain_remove:
+ gdsc_pm_subdomain_remove(desc, i);
+
+ return ret;
}
void gdsc_unregister(struct gdsc_desc *desc)
{
- int i;
struct device *dev = desc->dev;
- struct gdsc **scs = desc->scs;
size_t num = desc->num;
- /* Remove subdomains */
- for (i = 0; i < num; i++) {
- if (!scs[i])
- continue;
- if (scs[i]->parent)
- pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
- else if (!IS_ERR_OR_NULL(dev->pm_domain))
- pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
- }
+ gdsc_pm_subdomain_remove(desc, num);
of_genpd_del_provider(dev->of_node);
}
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 1e2779b823d1..dd843e86c05b 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -80,6 +80,7 @@ struct gdsc_desc {
struct device *dev;
struct gdsc **scs;
size_t num;
+ struct dev_pm_domain_list *pd_list;
};
#ifdef CONFIG_QCOM_GDSC
diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c
index 9efeab2691ba..7fce70503141 100644
--- a/drivers/clk/qcom/gpucc-msm8998.c
+++ b/drivers/clk/qcom/gpucc-msm8998.c
@@ -7,11 +7,10 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,gpucc-msm8998.h>
diff --git a/drivers/clk/qcom/gpucc-sa8775p.c b/drivers/clk/qcom/gpucc-sa8775p.c
index f8a8ac343d70..78cad622cb5a 100644
--- a/drivers/clk/qcom/gpucc-sa8775p.c
+++ b/drivers/clk/qcom/gpucc-sa8775p.c
@@ -12,7 +12,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <dt-bindings/clock/qcom,sa8775p-gpucc.h>
+#include <dt-bindings/clock/qcom,qcs8300-gpucc.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
@@ -317,6 +317,24 @@ static struct clk_branch gpu_cc_crc_ahb_clk = {
},
};
+static struct clk_branch gpu_cc_cx_accu_shift_clk = {
+ .halt_reg = 0x95e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x95e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cx_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gpu_cc_cx_ff_clk = {
.halt_reg = 0x914c,
.halt_check = BRANCH_HALT,
@@ -420,6 +438,24 @@ static struct clk_branch gpu_cc_demet_clk = {
},
};
+static struct clk_branch gpu_cc_gx_accu_shift_clk = {
+ .halt_reg = 0x95e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x95e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_gx_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
.halt_reg = 0x7000,
.halt_check = BRANCH_HALT_VOTED,
@@ -499,6 +535,7 @@ static struct clk_regmap *gpu_cc_sa8775p_clocks[] = {
[GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
[GPU_CC_CB_CLK] = &gpu_cc_cb_clk.clkr,
[GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_ACCU_SHIFT_CLK] = NULL,
[GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
[GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
[GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
@@ -508,6 +545,7 @@ static struct clk_regmap *gpu_cc_sa8775p_clocks[] = {
[GPU_CC_DEMET_DIV_CLK_SRC] = &gpu_cc_demet_div_clk_src.clkr,
[GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
[GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_ACCU_SHIFT_CLK] = NULL,
[GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
[GPU_CC_HUB_AHB_DIV_CLK_SRC] = &gpu_cc_hub_ahb_div_clk_src.clkr,
[GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
@@ -583,6 +621,7 @@ static const struct qcom_cc_desc gpu_cc_sa8775p_desc = {
};
static const struct of_device_id gpu_cc_sa8775p_match_table[] = {
+ { .compatible = "qcom,qcs8300-gpucc" },
{ .compatible = "qcom,sa8775p-gpucc" },
{ }
};
@@ -596,6 +635,14 @@ static int gpu_cc_sa8775p_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcs8300-gpucc")) {
+ gpu_cc_pll0_config.l = 0x31;
+ gpu_cc_pll0_config.alpha = 0xe555;
+
+ gpu_cc_sa8775p_clocks[GPU_CC_CX_ACCU_SHIFT_CLK] = &gpu_cc_cx_accu_shift_clk.clkr;
+ gpu_cc_sa8775p_clocks[GPU_CC_GX_ACCU_SHIFT_CLK] = &gpu_cc_gx_accu_shift_clk.clkr;
+ }
+
clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
diff --git a/drivers/clk/qcom/gpucc-sar2130p.c b/drivers/clk/qcom/gpucc-sar2130p.c
index dd72b2a48c42..c2903179ac85 100644
--- a/drivers/clk/qcom/gpucc-sar2130p.c
+++ b/drivers/clk/qcom/gpucc-sar2130p.c
@@ -6,6 +6,7 @@
#include <linux/clk-provider.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
index 08f3983d016f..a7bf44544b95 100644
--- a/drivers/clk/qcom/gpucc-sc7180.c
+++ b/drivers/clk/qcom/gpucc-sc7180.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gpucc-sc7280.c b/drivers/clk/qcom/gpucc-sc7280.c
index bd699a624517..f81289fa719d 100644
--- a/drivers/clk/qcom/gpucc-sc7280.c
+++ b/drivers/clk/qcom/gpucc-sc7280.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gpucc-sc8280xp.c b/drivers/clk/qcom/gpucc-sc8280xp.c
index c96be61e3f47..2645612f1cac 100644
--- a/drivers/clk/qcom/gpucc-sc8280xp.c
+++ b/drivers/clk/qcom/gpucc-sc8280xp.c
@@ -5,6 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -415,7 +416,7 @@ static const struct regmap_config gpu_cc_sc8280xp_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc gpu_cc_sc8280xp_desc = {
+static const struct qcom_cc_desc gpu_cc_sc8280xp_desc = {
.config = &gpu_cc_sc8280xp_regmap_config,
.clks = gpu_cc_sc8280xp_clocks,
.num_clks = ARRAY_SIZE(gpu_cc_sc8280xp_clocks),
diff --git a/drivers/clk/qcom/gpucc-sdm660.c b/drivers/clk/qcom/gpucc-sdm660.c
index 3ae1b80e38d9..28db307b6717 100644
--- a/drivers/clk/qcom/gpucc-sdm660.c
+++ b/drivers/clk/qcom/gpucc-sdm660.c
@@ -6,15 +6,14 @@
*/
#include <linux/bitops.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/of.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
+
#include <dt-bindings/clock/qcom,gpucc-sdm660.h>
#include "clk-alpha-pll.h"
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index ef26690cf504..0d63b110a1fb 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gpucc-sm4450.c b/drivers/clk/qcom/gpucc-sm4450.c
index a14d0bb031ac..34c7ba0c7d55 100644
--- a/drivers/clk/qcom/gpucc-sm4450.c
+++ b/drivers/clk/qcom/gpucc-sm4450.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
index 1e12ad8948db..35ed0500bc59 100644
--- a/drivers/clk/qcom/gpucc-sm6350.c
+++ b/drivers/clk/qcom/gpucc-sm6350.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
index d711464a71b6..7ce91208c0bc 100644
--- a/drivers/clk/qcom/gpucc-sm8150.c
+++ b/drivers/clk/qcom/gpucc-sm8150.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gpucc-sm8250.c b/drivers/clk/qcom/gpucc-sm8250.c
index 113b486a6d2f..ca0a1681d352 100644
--- a/drivers/clk/qcom/gpucc-sm8250.c
+++ b/drivers/clk/qcom/gpucc-sm8250.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gpucc-sm8350.c b/drivers/clk/qcom/gpucc-sm8350.c
index f3b6bdc24485..4025dab0a1ca 100644
--- a/drivers/clk/qcom/gpucc-sm8350.c
+++ b/drivers/clk/qcom/gpucc-sm8350.c
@@ -8,8 +8,8 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gpucc-x1p42100.c b/drivers/clk/qcom/gpucc-x1p42100.c
index dba783339613..4031d3ff560a 100644
--- a/drivers/clk/qcom/gpucc-x1p42100.c
+++ b/drivers/clk/qcom/gpucc-x1p42100.c
@@ -523,7 +523,7 @@ static const struct regmap_config gpu_cc_x1p42100_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc gpu_cc_x1p42100_desc = {
+static const struct qcom_cc_desc gpu_cc_x1p42100_desc = {
.config = &gpu_cc_x1p42100_regmap_config,
.clks = gpu_cc_x1p42100_clocks,
.num_clks = ARRAY_SIZE(gpu_cc_x1p42100_clocks),
diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c
index e7cfa8d22044..97bfb21a5e5e 100644
--- a/drivers/clk/qcom/kpss-xcc.c
+++ b/drivers/clk/qcom/kpss-xcc.c
@@ -5,7 +5,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/clk/qcom/krait-cc.c b/drivers/clk/qcom/krait-cc.c
index ae325f4e1047..f29d6dd1f3ac 100644
--- a/drivers/clk/qcom/krait-cc.c
+++ b/drivers/clk/qcom/krait-cc.c
@@ -5,7 +5,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
index 45e726477086..22169da08a51 100644
--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
+++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -713,14 +714,24 @@ static const struct qcom_reset_map lpass_audio_cc_sc7280_resets[] = {
[LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
};
+static const struct regmap_config lpass_audio_cc_sc7280_reset_regmap_config = {
+ .name = "lpassaudio_cc_reset",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .max_register = 0xc8,
+};
+
static const struct qcom_cc_desc lpass_audio_cc_reset_sc7280_desc = {
- .config = &lpass_audio_cc_sc7280_regmap_config,
+ .config = &lpass_audio_cc_sc7280_reset_regmap_config,
.resets = lpass_audio_cc_sc7280_resets,
.num_resets = ARRAY_SIZE(lpass_audio_cc_sc7280_resets),
};
static const struct of_device_id lpass_audio_cc_sc7280_match_table[] = {
- { .compatible = "qcom,sc7280-lpassaudiocc" },
+ { .compatible = "qcom,qcm6490-lpassaudiocc", .data = &lpass_audio_cc_reset_sc7280_desc },
+ { .compatible = "qcom,sc7280-lpassaudiocc", .data = &lpass_audio_cc_sc7280_desc },
{ }
};
MODULE_DEVICE_TABLE(of, lpass_audio_cc_sc7280_match_table);
@@ -752,13 +763,17 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
struct regmap *regmap;
int ret;
+ desc = device_get_match_data(&pdev->dev);
+
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcm6490-lpassaudiocc"))
+ return qcom_cc_probe_by_index(pdev, 1, desc);
+
ret = lpass_audio_setup_runtime_pm(pdev);
if (ret)
return ret;
lpass_audio_cc_sc7280_regmap_config.name = "lpassaudio_cc";
lpass_audio_cc_sc7280_regmap_config.max_register = 0x2f000;
- desc = &lpass_audio_cc_sc7280_desc;
regmap = qcom_cc_map(pdev, desc);
if (IS_ERR(regmap)) {
@@ -772,7 +787,7 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
regmap_write(regmap, 0x4, 0x3b);
regmap_write(regmap, 0x8, 0xff05);
- ret = qcom_cc_really_probe(&pdev->dev, &lpass_audio_cc_sc7280_desc, regmap);
+ ret = qcom_cc_really_probe(&pdev->dev, desc, regmap);
if (ret) {
dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC clocks\n");
goto exit;
diff --git a/drivers/clk/qcom/lpasscc-sdm845.c b/drivers/clk/qcom/lpasscc-sdm845.c
index 7040da952728..5c1ea75f9ba8 100644
--- a/drivers/clk/qcom/lpasscc-sdm845.c
+++ b/drivers/clk/qcom/lpasscc-sdm845.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/of_address.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,lpass-sdm845.h>
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
index 726c6378752f..605516d03993 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
@@ -9,7 +9,6 @@
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,lpasscorecc-sc7180.h>
diff --git a/drivers/clk/qcom/lpasscorecc-sc7280.c b/drivers/clk/qcom/lpasscorecc-sc7280.c
index b0888cd2460b..56882c202376 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7280.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7280.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index cc03722596a4..2d334977d783 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -6,9 +6,9 @@
#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,mmcc-apq8084.h>
#include <dt-bindings/reset/qcom,mmcc-apq8084.h>
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 20d1c43f35d9..cd3c9f8455e5 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -8,13 +8,11 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,mmcc-msm8960.h>
#include <dt-bindings/reset/qcom,mmcc-msm8960.h>
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 169e85f60550..12bbc49c87af 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -7,11 +7,11 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,mmcc-msm8974.h>
#include <dt-bindings/reset/qcom,mmcc-msm8974.h>
diff --git a/drivers/clk/qcom/mmcc-msm8994.c b/drivers/clk/qcom/mmcc-msm8994.c
index f70d080bf51c..7c0b959a4aa2 100644
--- a/drivers/clk/qcom/mmcc-msm8994.c
+++ b/drivers/clk/qcom/mmcc-msm8994.c
@@ -7,12 +7,11 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
-#include <linux/clk.h>
#include <dt-bindings/clock/qcom,mmcc-msm8994.h>
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index a742f848e4ee..7d67c6f73fe1 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -7,12 +7,10 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
-#include <linux/clk.h>
#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
index 5738445a8656..e2f198213b21 100644
--- a/drivers/clk/qcom/mmcc-msm8998.c
+++ b/drivers/clk/qcom/mmcc-msm8998.c
@@ -7,11 +7,10 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <dt-bindings/clock/qcom,mmcc-msm8998.h>
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
index 98ba5b4518fb..e69fc65b13da 100644
--- a/drivers/clk/qcom/mmcc-sdm660.c
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -9,14 +9,10 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/property.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
-#include <linux/clk.h>
-
#include <dt-bindings/clock/qcom,mmcc-sdm660.h>
@@ -2544,7 +2540,7 @@ static struct clk_branch video_core_clk = {
static struct clk_branch video_subcore0_clk = {
.halt_reg = 0x1048,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x1048,
.enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/nsscc-ipq9574.c b/drivers/clk/qcom/nsscc-ipq9574.c
new file mode 100644
index 000000000000..64c6b05ff066
--- /dev/null
+++ b/drivers/clk/qcom/nsscc-ipq9574.c
@@ -0,0 +1,3110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,ipq9574-nsscc.h>
+#include <dt-bindings/interconnect/qcom,ipq9574.h>
+#include <dt-bindings/reset/qcom,ipq9574-nsscc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_XO,
+ DT_BIAS_PLL_CC_CLK,
+ DT_BIAS_PLL_UBI_NC_CLK,
+ DT_GCC_GPLL0_OUT_AUX,
+ DT_UNIPHY0_NSS_RX_CLK,
+ DT_UNIPHY0_NSS_TX_CLK,
+ DT_UNIPHY1_NSS_RX_CLK,
+ DT_UNIPHY1_NSS_TX_CLK,
+ DT_UNIPHY2_NSS_RX_CLK,
+ DT_UNIPHY2_NSS_TX_CLK,
+};
+
+enum {
+ P_XO,
+ P_BIAS_PLL_CC_CLK,
+ P_BIAS_PLL_UBI_NC_CLK,
+ P_GCC_GPLL0_OUT_AUX,
+ P_UBI32_PLL_OUT_MAIN,
+ P_UNIPHY0_NSS_RX_CLK,
+ P_UNIPHY0_NSS_TX_CLK,
+ P_UNIPHY1_NSS_RX_CLK,
+ P_UNIPHY1_NSS_TX_CLK,
+ P_UNIPHY2_NSS_RX_CLK,
+ P_UNIPHY2_NSS_TX_CLK,
+};
+
+static const struct alpha_pll_config ubi32_pll_config = {
+ .l = 0x3e,
+ .alpha = 0x6666,
+ .config_ctl_val = 0x200d4aa8,
+ .config_ctl_hi_val = 0x3c,
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+ .pre_div_val = 0x0,
+ .pre_div_mask = BIT(12),
+ .post_div_val = 0x0,
+ .post_div_mask = GENMASK(9, 8),
+ .alpha_en_mask = BIT(24),
+ .test_ctl_val = 0x1c0000c0,
+ .test_ctl_hi_val = 0x4000,
+};
+
+static struct clk_alpha_pll ubi32_pll_main = {
+ .offset = 0x28000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ubi32_pll_main",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_huayra_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv ubi32_pll = {
+ .offset = 0x28000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ubi32_pll",
+ .parent_hws = (const struct clk_hw *[]) {
+ &ubi32_pll_main.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct parent_map nss_cc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_BIAS_PLL_CC_CLK, 1 },
+ { P_UNIPHY0_NSS_RX_CLK, 2 },
+ { P_UNIPHY0_NSS_TX_CLK, 3 },
+ { P_UNIPHY1_NSS_RX_CLK, 4 },
+ { P_UNIPHY1_NSS_TX_CLK, 5 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_0[] = {
+ { .index = DT_XO },
+ { .index = DT_BIAS_PLL_CC_CLK },
+ { .index = DT_UNIPHY0_NSS_RX_CLK },
+ { .index = DT_UNIPHY0_NSS_TX_CLK },
+ { .index = DT_UNIPHY1_NSS_RX_CLK },
+ { .index = DT_UNIPHY1_NSS_TX_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_BIAS_PLL_UBI_NC_CLK, 1 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_BIAS_PLL_CC_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_1[] = {
+ { .index = DT_XO },
+ { .index = DT_BIAS_PLL_UBI_NC_CLK },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_BIAS_PLL_CC_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_UBI32_PLL_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_2[] = {
+ { .index = DT_XO },
+ { .hw = &ubi32_pll.clkr.hw },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+};
+
+static const struct parent_map nss_cc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_BIAS_PLL_CC_CLK, 1 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_3[] = {
+ { .index = DT_XO },
+ { .index = DT_BIAS_PLL_CC_CLK },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+};
+
+static const struct parent_map nss_cc_parent_map_4[] = {
+ { P_XO, 0 },
+ { P_BIAS_PLL_CC_CLK, 1 },
+ { P_UNIPHY0_NSS_RX_CLK, 2 },
+ { P_UNIPHY0_NSS_TX_CLK, 3 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_4[] = {
+ { .index = DT_XO },
+ { .index = DT_BIAS_PLL_CC_CLK },
+ { .index = DT_UNIPHY0_NSS_RX_CLK },
+ { .index = DT_UNIPHY0_NSS_TX_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_BIAS_PLL_CC_CLK, 1 },
+ { P_UNIPHY2_NSS_RX_CLK, 2 },
+ { P_UNIPHY2_NSS_TX_CLK, 3 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_5[] = {
+ { .index = DT_XO },
+ { .index = DT_BIAS_PLL_CC_CLK },
+ { .index = DT_UNIPHY2_NSS_RX_CLK },
+ { .index = DT_UNIPHY2_NSS_TX_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_6[] = {
+ { P_XO, 0 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_BIAS_PLL_CC_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_6[] = {
+ { .index = DT_XO },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_BIAS_PLL_CC_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_7[] = {
+ { P_XO, 0 },
+ { P_UBI32_PLL_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_BIAS_PLL_CC_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_7[] = {
+ { .index = DT_XO },
+ { .hw = &ubi32_pll.clkr.hw },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_BIAS_PLL_CC_CLK },
+};
+
+static const struct freq_tbl ftbl_nss_cc_ce_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(353000000, P_BIAS_PLL_UBI_NC_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_ce_clk_src = {
+ .cmd_rcgr = 0x28404,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ce_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_cfg_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_AUX, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_cfg_clk_src = {
+ .cmd_rcgr = 0x28104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_3,
+ .freq_tbl = ftbl_nss_cc_cfg_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_cfg_clk_src",
+ .parent_data = nss_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_clc_clk_src[] = {
+ F(533333333, P_GCC_GPLL0_OUT_AUX, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_clc_clk_src = {
+ .cmd_rcgr = 0x28604,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_6,
+ .freq_tbl = ftbl_nss_cc_clc_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_clc_clk_src",
+ .parent_data = nss_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_crypto_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(300000000, P_BIAS_PLL_CC_CLK, 4, 0, 0),
+ F(600000000, P_BIAS_PLL_CC_CLK, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_crypto_clk_src = {
+ .cmd_rcgr = 0x16008,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_3,
+ .freq_tbl = ftbl_nss_cc_crypto_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_crypto_clk_src",
+ .parent_data = nss_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_haq_clk_src = {
+ .cmd_rcgr = 0x28304,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_haq_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_imem_clk_src = {
+ .cmd_rcgr = 0xe008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_imem_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_int_cfg_clk_src[] = {
+ F(200000000, P_GCC_GPLL0_OUT_AUX, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_int_cfg_clk_src = {
+ .cmd_rcgr = 0x287b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_3,
+ .freq_tbl = ftbl_nss_cc_int_cfg_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_int_cfg_clk_src",
+ .parent_data = nss_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_rx_clk_src_25[] = {
+ C(P_UNIPHY0_NSS_RX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_rx_clk_src_125[] = {
+ C(P_UNIPHY0_NSS_RX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port1_rx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port1_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY0_NSS_RX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port1_rx_clk_src_125),
+ FMS(312500000, P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
+ { }
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_tx_clk_src_25[] = {
+ C(P_UNIPHY0_NSS_TX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_tx_clk_src_125[] = {
+ C(P_UNIPHY0_NSS_TX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port1_tx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port1_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY0_NSS_TX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port1_tx_clk_src_125),
+ FMS(312500000, P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
+ { }
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_25[] = {
+ C(P_UNIPHY1_NSS_RX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_125[] = {
+ C(P_UNIPHY1_NSS_RX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_312p5[] = {
+ C(P_UNIPHY1_NSS_RX_CLK, 1, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port5_rx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port5_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_NSS_RX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port5_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_NSS_RX_CLK, 2, 0, 0),
+ FM(312500000, ftbl_nss_cc_port5_rx_clk_src_312p5),
+ { }
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_25[] = {
+ C(P_UNIPHY1_NSS_TX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_125[] = {
+ C(P_UNIPHY1_NSS_TX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_312p5[] = {
+ C(P_UNIPHY1_NSS_TX_CLK, 1, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port5_tx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port5_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_NSS_TX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port5_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_NSS_TX_CLK, 2, 0, 0),
+ FM(312500000, ftbl_nss_cc_port5_tx_clk_src_312p5),
+ { }
+};
+
+static const struct freq_conf ftbl_nss_cc_port6_rx_clk_src_25[] = {
+ C(P_UNIPHY2_NSS_RX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY2_NSS_RX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port6_rx_clk_src_125[] = {
+ C(P_UNIPHY2_NSS_RX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY2_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port6_rx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port6_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY2_NSS_RX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port6_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY2_NSS_RX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY2_NSS_RX_CLK, 1, 0, 0),
+ { }
+};
+
+static const struct freq_conf ftbl_nss_cc_port6_tx_clk_src_25[] = {
+ C(P_UNIPHY2_NSS_TX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY2_NSS_TX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port6_tx_clk_src_125[] = {
+ C(P_UNIPHY2_NSS_TX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY2_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port6_tx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port6_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY2_NSS_TX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port6_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY2_NSS_TX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY2_NSS_TX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port1_rx_clk_src = {
+ .cmd_rcgr = 0x28110,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_rx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port1_tx_clk_src = {
+ .cmd_rcgr = 0x2811c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_tx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port2_rx_clk_src = {
+ .cmd_rcgr = 0x28128,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_rx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port2_tx_clk_src = {
+ .cmd_rcgr = 0x28134,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_tx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port3_rx_clk_src = {
+ .cmd_rcgr = 0x28140,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_rx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port3_tx_clk_src = {
+ .cmd_rcgr = 0x2814c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_tx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port4_rx_clk_src = {
+ .cmd_rcgr = 0x28158,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_rx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port4_tx_clk_src = {
+ .cmd_rcgr = 0x28164,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_4,
+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_tx_clk_src",
+ .parent_data = nss_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port5_rx_clk_src = {
+ .cmd_rcgr = 0x28170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_0,
+ .freq_multi_tbl = ftbl_nss_cc_port5_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_rx_clk_src",
+ .parent_data = nss_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port5_tx_clk_src = {
+ .cmd_rcgr = 0x2817c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_0,
+ .freq_multi_tbl = ftbl_nss_cc_port5_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_tx_clk_src",
+ .parent_data = nss_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port6_rx_clk_src = {
+ .cmd_rcgr = 0x28188,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_5,
+ .freq_multi_tbl = ftbl_nss_cc_port6_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_rx_clk_src",
+ .parent_data = nss_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_5),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_port6_tx_clk_src = {
+ .cmd_rcgr = 0x28194,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_5,
+ .freq_multi_tbl = ftbl_nss_cc_port6_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_tx_clk_src",
+ .parent_data = nss_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_5),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ppe_clk_src = {
+ .cmd_rcgr = 0x28204,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_ubi0_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(187200000, P_UBI32_PLL_OUT_MAIN, 8, 0, 0),
+ F(748800000, P_UBI32_PLL_OUT_MAIN, 2, 0, 0),
+ F(1497600000, P_UBI32_PLL_OUT_MAIN, 1, 0, 0),
+ F(1689600000, P_UBI32_PLL_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_ubi0_clk_src = {
+ .cmd_rcgr = 0x28704,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_2,
+ .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi0_clk_src",
+ .parent_data = nss_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ubi1_clk_src = {
+ .cmd_rcgr = 0x2870c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_2,
+ .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi1_clk_src",
+ .parent_data = nss_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ubi2_clk_src = {
+ .cmd_rcgr = 0x28714,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_2,
+ .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi2_clk_src",
+ .parent_data = nss_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ubi3_clk_src = {
+ .cmd_rcgr = 0x2871c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_2,
+ .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi3_clk_src",
+ .parent_data = nss_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ubi_axi_clk_src = {
+ .cmd_rcgr = 0x28724,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_7,
+ .freq_tbl = ftbl_nss_cc_clc_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi_axi_clk_src",
+ .parent_data = nss_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ubi_nc_axi_bfdcd_clk_src = {
+ .cmd_rcgr = 0x2872c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi_nc_axi_bfdcd_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port1_rx_div_clk_src = {
+ .reg = 0x28118,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_rx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port1_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port1_tx_div_clk_src = {
+ .reg = 0x28124,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_tx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port1_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port2_rx_div_clk_src = {
+ .reg = 0x28130,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_rx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port2_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port2_tx_div_clk_src = {
+ .reg = 0x2813c,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_tx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port2_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port3_rx_div_clk_src = {
+ .reg = 0x28148,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_rx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port3_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port3_tx_div_clk_src = {
+ .reg = 0x28154,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_tx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port3_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port4_rx_div_clk_src = {
+ .reg = 0x28160,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_rx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port4_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port4_tx_div_clk_src = {
+ .reg = 0x2816c,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_tx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port4_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port5_rx_div_clk_src = {
+ .reg = 0x28178,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_rx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port5_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port5_tx_div_clk_src = {
+ .reg = 0x28184,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_tx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port5_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port6_rx_div_clk_src = {
+ .reg = 0x28190,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_rx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port6_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port6_tx_div_clk_src = {
+ .reg = 0x2819c,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_tx_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port6_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_ubi0_div_clk_src = {
+ .reg = 0x287a4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_ubi1_div_clk_src = {
+ .reg = 0x287a8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi1_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_ubi2_div_clk_src = {
+ .reg = 0x287ac,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi2_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_ubi3_div_clk_src = {
+ .reg = 0x287b0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi3_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac0_ptp_ref_div_clk_src = {
+ .reg = 0x28214,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac0_ptp_ref_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac1_ptp_ref_div_clk_src = {
+ .reg = 0x28218,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac1_ptp_ref_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac2_ptp_ref_div_clk_src = {
+ .reg = 0x2821c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac2_ptp_ref_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac3_ptp_ref_div_clk_src = {
+ .reg = 0x28220,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac3_ptp_ref_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac4_ptp_ref_div_clk_src = {
+ .reg = 0x28224,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac4_ptp_ref_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac5_ptp_ref_div_clk_src = {
+ .reg = 0x28228,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac5_ptp_ref_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch nss_cc_ce_apb_clk = {
+ .halt_reg = 0x2840c,
+ .clkr = {
+ .enable_reg = 0x2840c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ce_apb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ce_axi_clk = {
+ .halt_reg = 0x28410,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28410,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ce_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_clc_axi_clk = {
+ .halt_reg = 0x2860c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2860c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_clc_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_clc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_crypto_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_crypto_ppe_clk = {
+ .halt_reg = 0x28240,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28240,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_crypto_ppe_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_haq_ahb_clk = {
+ .halt_reg = 0x2830c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2830c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_haq_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_haq_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_haq_axi_clk = {
+ .halt_reg = 0x28310,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28310,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_haq_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_haq_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_imem_ahb_clk = {
+ .halt_reg = 0xe018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_imem_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_imem_qsb_clk = {
+ .halt_reg = 0xe010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_imem_qsb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_imem_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nss_csr_clk = {
+ .halt_reg = 0x281d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nss_csr_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ce_apb_clk = {
+ .halt_reg = 0x28414,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28414,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ce_apb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ce_axi_clk = {
+ .halt_reg = 0x28418,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28418,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ce_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_clc_axi_clk = {
+ .halt_reg = 0x28610,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28610,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_clc_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_clc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_crypto_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_crypto_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_haq_ahb_clk = {
+ .halt_reg = 0x28314,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28314,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_haq_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_haq_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_haq_axi_clk = {
+ .halt_reg = 0x28318,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28318,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_haq_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_haq_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_imem_ahb_clk = {
+ .halt_reg = 0xe01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_imem_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_imem_qsb_clk = {
+ .halt_reg = 0xe014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_imem_qsb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_imem_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_nss_csr_clk = {
+ .halt_reg = 0x281d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_nss_csr_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ppe_cfg_clk = {
+ .halt_reg = 0x28248,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28248,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ppe_cfg_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ppe_clk = {
+ .halt_reg = 0x28244,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28244,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ppe_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_ahb0_clk = {
+ .halt_reg = 0x28788,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28788,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ubi32_ahb0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_axi0_clk = {
+ .halt_reg = 0x287a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x287a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ubi32_axi0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_int0_ahb_clk = {
+ .halt_reg = 0x2878c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2878c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ubi32_int0_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_nc_axi0_1_clk = {
+ .halt_reg = 0x287bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x287bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ubi32_nc_axi0_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_nc_axi0_clk = {
+ .halt_reg = 0x28764,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28764,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_nssnoc_ubi32_nc_axi0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port1_mac_clk = {
+ .halt_reg = 0x2824c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2824c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_mac_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port1_rx_clk = {
+ .halt_reg = 0x281a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port1_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port1_tx_clk = {
+ .halt_reg = 0x281a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port1_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port2_mac_clk = {
+ .halt_reg = 0x28250,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28250,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_mac_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port2_rx_clk = {
+ .halt_reg = 0x281a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port2_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port2_tx_clk = {
+ .halt_reg = 0x281ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port2_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port3_mac_clk = {
+ .halt_reg = 0x28254,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28254,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_mac_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port3_rx_clk = {
+ .halt_reg = 0x281b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port3_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port3_tx_clk = {
+ .halt_reg = 0x281b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port3_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port4_mac_clk = {
+ .halt_reg = 0x28258,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28258,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_mac_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port4_rx_clk = {
+ .halt_reg = 0x281b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port4_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port4_tx_clk = {
+ .halt_reg = 0x281bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port4_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port4_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port5_mac_clk = {
+ .halt_reg = 0x2825c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2825c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_mac_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port5_rx_clk = {
+ .halt_reg = 0x281c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port5_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port5_tx_clk = {
+ .halt_reg = 0x281c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port5_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port5_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port6_mac_clk = {
+ .halt_reg = 0x28260,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28260,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_mac_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port6_rx_clk = {
+ .halt_reg = 0x281c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port6_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port6_tx_clk = {
+ .halt_reg = 0x281cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x281cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port6_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port6_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_edma_cfg_clk = {
+ .halt_reg = 0x2823c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2823c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_edma_cfg_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_edma_clk = {
+ .halt_reg = 0x28238,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28238,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_edma_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_btq_clk = {
+ .halt_reg = 0x2827c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2827c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_switch_btq_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_cfg_clk = {
+ .halt_reg = 0x28234,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28234,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_switch_cfg_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_clk = {
+ .halt_reg = 0x28230,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28230,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_switch_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_ipe_clk = {
+ .halt_reg = 0x2822c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2822c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ppe_switch_ipe_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_ahb0_clk = {
+ .halt_reg = 0x28768,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28768,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_ahb0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_ahb1_clk = {
+ .halt_reg = 0x28770,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28770,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_ahb1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_ahb2_clk = {
+ .halt_reg = 0x28778,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28778,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_ahb2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_ahb3_clk = {
+ .halt_reg = 0x28780,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28780,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_ahb3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_axi0_clk = {
+ .halt_reg = 0x28790,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28790,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_axi0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_axi1_clk = {
+ .halt_reg = 0x28794,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28794,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_axi1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_axi2_clk = {
+ .halt_reg = 0x28798,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28798,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_axi2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_axi3_clk = {
+ .halt_reg = 0x2879c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2879c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_axi3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_core0_clk = {
+ .halt_reg = 0x28734,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28734,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_core0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_core1_clk = {
+ .halt_reg = 0x28738,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28738,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_core1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_core2_clk = {
+ .halt_reg = 0x2873c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2873c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_core2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_core3_clk = {
+ .halt_reg = 0x28740,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28740,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_core3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi3_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_intr0_ahb_clk = {
+ .halt_reg = 0x2876c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2876c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_intr0_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_intr1_ahb_clk = {
+ .halt_reg = 0x28774,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28774,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_intr1_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_intr2_ahb_clk = {
+ .halt_reg = 0x2877c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2877c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_intr2_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_intr3_ahb_clk = {
+ .halt_reg = 0x28784,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28784,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_intr3_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_nc_axi0_clk = {
+ .halt_reg = 0x28744,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28744,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_nc_axi0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_nc_axi1_clk = {
+ .halt_reg = 0x2874c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2874c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_nc_axi1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_nc_axi2_clk = {
+ .halt_reg = 0x28754,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28754,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_nc_axi2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_nc_axi3_clk = {
+ .halt_reg = 0x2875c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2875c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_nc_axi3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_utcm0_clk = {
+ .halt_reg = 0x28748,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28748,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_utcm0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_utcm1_clk = {
+ .halt_reg = 0x28750,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28750,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_utcm1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_utcm2_clk = {
+ .halt_reg = 0x28758,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28758,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_utcm2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ubi32_utcm3_clk = {
+ .halt_reg = 0x28760,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28760,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_ubi32_utcm3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port1_rx_clk = {
+ .halt_reg = 0x28904,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28904,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port1_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port1_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port1_tx_clk = {
+ .halt_reg = 0x28908,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28908,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port1_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port1_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port2_rx_clk = {
+ .halt_reg = 0x2890c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2890c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port2_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port2_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port2_tx_clk = {
+ .halt_reg = 0x28910,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28910,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port2_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port2_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port3_rx_clk = {
+ .halt_reg = 0x28914,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28914,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port3_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port3_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port3_tx_clk = {
+ .halt_reg = 0x28918,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28918,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port3_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port3_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port4_rx_clk = {
+ .halt_reg = 0x2891c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2891c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port4_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port4_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port4_tx_clk = {
+ .halt_reg = 0x28920,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28920,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port4_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port4_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port5_rx_clk = {
+ .halt_reg = 0x28924,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28924,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port5_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port5_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port5_tx_clk = {
+ .halt_reg = 0x28928,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28928,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port5_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port5_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port6_rx_clk = {
+ .halt_reg = 0x2892c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2892c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port6_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port6_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port6_tx_clk = {
+ .halt_reg = 0x28930,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28930,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_uniphy_port6_tx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_port6_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac0_ptp_ref_clk = {
+ .halt_reg = 0x28264,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28264,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac0_ptp_ref_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_xgmac0_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac1_ptp_ref_clk = {
+ .halt_reg = 0x28268,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28268,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac1_ptp_ref_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_xgmac1_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac2_ptp_ref_clk = {
+ .halt_reg = 0x2826c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2826c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac2_ptp_ref_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_xgmac2_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac3_ptp_ref_clk = {
+ .halt_reg = 0x28270,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28270,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac3_ptp_ref_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_xgmac3_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac4_ptp_ref_clk = {
+ .halt_reg = 0x28274,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28274,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac4_ptp_ref_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_xgmac4_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac5_ptp_ref_clk = {
+ .halt_reg = 0x28278,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28278,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac5_ptp_ref_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &nss_cc_xgmac5_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *nss_cc_ipq9574_clocks[] = {
+ [NSS_CC_CE_APB_CLK] = &nss_cc_ce_apb_clk.clkr,
+ [NSS_CC_CE_AXI_CLK] = &nss_cc_ce_axi_clk.clkr,
+ [NSS_CC_CE_CLK_SRC] = &nss_cc_ce_clk_src.clkr,
+ [NSS_CC_CFG_CLK_SRC] = &nss_cc_cfg_clk_src.clkr,
+ [NSS_CC_CLC_AXI_CLK] = &nss_cc_clc_axi_clk.clkr,
+ [NSS_CC_CLC_CLK_SRC] = &nss_cc_clc_clk_src.clkr,
+ [NSS_CC_CRYPTO_CLK] = &nss_cc_crypto_clk.clkr,
+ [NSS_CC_CRYPTO_CLK_SRC] = &nss_cc_crypto_clk_src.clkr,
+ [NSS_CC_CRYPTO_PPE_CLK] = &nss_cc_crypto_ppe_clk.clkr,
+ [NSS_CC_HAQ_AHB_CLK] = &nss_cc_haq_ahb_clk.clkr,
+ [NSS_CC_HAQ_AXI_CLK] = &nss_cc_haq_axi_clk.clkr,
+ [NSS_CC_HAQ_CLK_SRC] = &nss_cc_haq_clk_src.clkr,
+ [NSS_CC_IMEM_AHB_CLK] = &nss_cc_imem_ahb_clk.clkr,
+ [NSS_CC_IMEM_CLK_SRC] = &nss_cc_imem_clk_src.clkr,
+ [NSS_CC_IMEM_QSB_CLK] = &nss_cc_imem_qsb_clk.clkr,
+ [NSS_CC_INT_CFG_CLK_SRC] = &nss_cc_int_cfg_clk_src.clkr,
+ [NSS_CC_NSS_CSR_CLK] = &nss_cc_nss_csr_clk.clkr,
+ [NSS_CC_NSSNOC_CE_APB_CLK] = &nss_cc_nssnoc_ce_apb_clk.clkr,
+ [NSS_CC_NSSNOC_CE_AXI_CLK] = &nss_cc_nssnoc_ce_axi_clk.clkr,
+ [NSS_CC_NSSNOC_CLC_AXI_CLK] = &nss_cc_nssnoc_clc_axi_clk.clkr,
+ [NSS_CC_NSSNOC_CRYPTO_CLK] = &nss_cc_nssnoc_crypto_clk.clkr,
+ [NSS_CC_NSSNOC_HAQ_AHB_CLK] = &nss_cc_nssnoc_haq_ahb_clk.clkr,
+ [NSS_CC_NSSNOC_HAQ_AXI_CLK] = &nss_cc_nssnoc_haq_axi_clk.clkr,
+ [NSS_CC_NSSNOC_IMEM_AHB_CLK] = &nss_cc_nssnoc_imem_ahb_clk.clkr,
+ [NSS_CC_NSSNOC_IMEM_QSB_CLK] = &nss_cc_nssnoc_imem_qsb_clk.clkr,
+ [NSS_CC_NSSNOC_NSS_CSR_CLK] = &nss_cc_nssnoc_nss_csr_clk.clkr,
+ [NSS_CC_NSSNOC_PPE_CFG_CLK] = &nss_cc_nssnoc_ppe_cfg_clk.clkr,
+ [NSS_CC_NSSNOC_PPE_CLK] = &nss_cc_nssnoc_ppe_clk.clkr,
+ [NSS_CC_NSSNOC_UBI32_AHB0_CLK] = &nss_cc_nssnoc_ubi32_ahb0_clk.clkr,
+ [NSS_CC_NSSNOC_UBI32_AXI0_CLK] = &nss_cc_nssnoc_ubi32_axi0_clk.clkr,
+ [NSS_CC_NSSNOC_UBI32_INT0_AHB_CLK] =
+ &nss_cc_nssnoc_ubi32_int0_ahb_clk.clkr,
+ [NSS_CC_NSSNOC_UBI32_NC_AXI0_1_CLK] =
+ &nss_cc_nssnoc_ubi32_nc_axi0_1_clk.clkr,
+ [NSS_CC_NSSNOC_UBI32_NC_AXI0_CLK] =
+ &nss_cc_nssnoc_ubi32_nc_axi0_clk.clkr,
+ [NSS_CC_PORT1_MAC_CLK] = &nss_cc_port1_mac_clk.clkr,
+ [NSS_CC_PORT1_RX_CLK] = &nss_cc_port1_rx_clk.clkr,
+ [NSS_CC_PORT1_RX_CLK_SRC] = &nss_cc_port1_rx_clk_src.clkr,
+ [NSS_CC_PORT1_RX_DIV_CLK_SRC] = &nss_cc_port1_rx_div_clk_src.clkr,
+ [NSS_CC_PORT1_TX_CLK] = &nss_cc_port1_tx_clk.clkr,
+ [NSS_CC_PORT1_TX_CLK_SRC] = &nss_cc_port1_tx_clk_src.clkr,
+ [NSS_CC_PORT1_TX_DIV_CLK_SRC] = &nss_cc_port1_tx_div_clk_src.clkr,
+ [NSS_CC_PORT2_MAC_CLK] = &nss_cc_port2_mac_clk.clkr,
+ [NSS_CC_PORT2_RX_CLK] = &nss_cc_port2_rx_clk.clkr,
+ [NSS_CC_PORT2_RX_CLK_SRC] = &nss_cc_port2_rx_clk_src.clkr,
+ [NSS_CC_PORT2_RX_DIV_CLK_SRC] = &nss_cc_port2_rx_div_clk_src.clkr,
+ [NSS_CC_PORT2_TX_CLK] = &nss_cc_port2_tx_clk.clkr,
+ [NSS_CC_PORT2_TX_CLK_SRC] = &nss_cc_port2_tx_clk_src.clkr,
+ [NSS_CC_PORT2_TX_DIV_CLK_SRC] = &nss_cc_port2_tx_div_clk_src.clkr,
+ [NSS_CC_PORT3_MAC_CLK] = &nss_cc_port3_mac_clk.clkr,
+ [NSS_CC_PORT3_RX_CLK] = &nss_cc_port3_rx_clk.clkr,
+ [NSS_CC_PORT3_RX_CLK_SRC] = &nss_cc_port3_rx_clk_src.clkr,
+ [NSS_CC_PORT3_RX_DIV_CLK_SRC] = &nss_cc_port3_rx_div_clk_src.clkr,
+ [NSS_CC_PORT3_TX_CLK] = &nss_cc_port3_tx_clk.clkr,
+ [NSS_CC_PORT3_TX_CLK_SRC] = &nss_cc_port3_tx_clk_src.clkr,
+ [NSS_CC_PORT3_TX_DIV_CLK_SRC] = &nss_cc_port3_tx_div_clk_src.clkr,
+ [NSS_CC_PORT4_MAC_CLK] = &nss_cc_port4_mac_clk.clkr,
+ [NSS_CC_PORT4_RX_CLK] = &nss_cc_port4_rx_clk.clkr,
+ [NSS_CC_PORT4_RX_CLK_SRC] = &nss_cc_port4_rx_clk_src.clkr,
+ [NSS_CC_PORT4_RX_DIV_CLK_SRC] = &nss_cc_port4_rx_div_clk_src.clkr,
+ [NSS_CC_PORT4_TX_CLK] = &nss_cc_port4_tx_clk.clkr,
+ [NSS_CC_PORT4_TX_CLK_SRC] = &nss_cc_port4_tx_clk_src.clkr,
+ [NSS_CC_PORT4_TX_DIV_CLK_SRC] = &nss_cc_port4_tx_div_clk_src.clkr,
+ [NSS_CC_PORT5_MAC_CLK] = &nss_cc_port5_mac_clk.clkr,
+ [NSS_CC_PORT5_RX_CLK] = &nss_cc_port5_rx_clk.clkr,
+ [NSS_CC_PORT5_RX_CLK_SRC] = &nss_cc_port5_rx_clk_src.clkr,
+ [NSS_CC_PORT5_RX_DIV_CLK_SRC] = &nss_cc_port5_rx_div_clk_src.clkr,
+ [NSS_CC_PORT5_TX_CLK] = &nss_cc_port5_tx_clk.clkr,
+ [NSS_CC_PORT5_TX_CLK_SRC] = &nss_cc_port5_tx_clk_src.clkr,
+ [NSS_CC_PORT5_TX_DIV_CLK_SRC] = &nss_cc_port5_tx_div_clk_src.clkr,
+ [NSS_CC_PORT6_MAC_CLK] = &nss_cc_port6_mac_clk.clkr,
+ [NSS_CC_PORT6_RX_CLK] = &nss_cc_port6_rx_clk.clkr,
+ [NSS_CC_PORT6_RX_CLK_SRC] = &nss_cc_port6_rx_clk_src.clkr,
+ [NSS_CC_PORT6_RX_DIV_CLK_SRC] = &nss_cc_port6_rx_div_clk_src.clkr,
+ [NSS_CC_PORT6_TX_CLK] = &nss_cc_port6_tx_clk.clkr,
+ [NSS_CC_PORT6_TX_CLK_SRC] = &nss_cc_port6_tx_clk_src.clkr,
+ [NSS_CC_PORT6_TX_DIV_CLK_SRC] = &nss_cc_port6_tx_div_clk_src.clkr,
+ [NSS_CC_PPE_CLK_SRC] = &nss_cc_ppe_clk_src.clkr,
+ [NSS_CC_PPE_EDMA_CFG_CLK] = &nss_cc_ppe_edma_cfg_clk.clkr,
+ [NSS_CC_PPE_EDMA_CLK] = &nss_cc_ppe_edma_clk.clkr,
+ [NSS_CC_PPE_SWITCH_BTQ_CLK] = &nss_cc_ppe_switch_btq_clk.clkr,
+ [NSS_CC_PPE_SWITCH_CFG_CLK] = &nss_cc_ppe_switch_cfg_clk.clkr,
+ [NSS_CC_PPE_SWITCH_CLK] = &nss_cc_ppe_switch_clk.clkr,
+ [NSS_CC_PPE_SWITCH_IPE_CLK] = &nss_cc_ppe_switch_ipe_clk.clkr,
+ [NSS_CC_UBI0_CLK_SRC] = &nss_cc_ubi0_clk_src.clkr,
+ [NSS_CC_UBI0_DIV_CLK_SRC] = &nss_cc_ubi0_div_clk_src.clkr,
+ [NSS_CC_UBI1_CLK_SRC] = &nss_cc_ubi1_clk_src.clkr,
+ [NSS_CC_UBI1_DIV_CLK_SRC] = &nss_cc_ubi1_div_clk_src.clkr,
+ [NSS_CC_UBI2_CLK_SRC] = &nss_cc_ubi2_clk_src.clkr,
+ [NSS_CC_UBI2_DIV_CLK_SRC] = &nss_cc_ubi2_div_clk_src.clkr,
+ [NSS_CC_UBI32_AHB0_CLK] = &nss_cc_ubi32_ahb0_clk.clkr,
+ [NSS_CC_UBI32_AHB1_CLK] = &nss_cc_ubi32_ahb1_clk.clkr,
+ [NSS_CC_UBI32_AHB2_CLK] = &nss_cc_ubi32_ahb2_clk.clkr,
+ [NSS_CC_UBI32_AHB3_CLK] = &nss_cc_ubi32_ahb3_clk.clkr,
+ [NSS_CC_UBI32_AXI0_CLK] = &nss_cc_ubi32_axi0_clk.clkr,
+ [NSS_CC_UBI32_AXI1_CLK] = &nss_cc_ubi32_axi1_clk.clkr,
+ [NSS_CC_UBI32_AXI2_CLK] = &nss_cc_ubi32_axi2_clk.clkr,
+ [NSS_CC_UBI32_AXI3_CLK] = &nss_cc_ubi32_axi3_clk.clkr,
+ [NSS_CC_UBI32_CORE0_CLK] = &nss_cc_ubi32_core0_clk.clkr,
+ [NSS_CC_UBI32_CORE1_CLK] = &nss_cc_ubi32_core1_clk.clkr,
+ [NSS_CC_UBI32_CORE2_CLK] = &nss_cc_ubi32_core2_clk.clkr,
+ [NSS_CC_UBI32_CORE3_CLK] = &nss_cc_ubi32_core3_clk.clkr,
+ [NSS_CC_UBI32_INTR0_AHB_CLK] = &nss_cc_ubi32_intr0_ahb_clk.clkr,
+ [NSS_CC_UBI32_INTR1_AHB_CLK] = &nss_cc_ubi32_intr1_ahb_clk.clkr,
+ [NSS_CC_UBI32_INTR2_AHB_CLK] = &nss_cc_ubi32_intr2_ahb_clk.clkr,
+ [NSS_CC_UBI32_INTR3_AHB_CLK] = &nss_cc_ubi32_intr3_ahb_clk.clkr,
+ [NSS_CC_UBI32_NC_AXI0_CLK] = &nss_cc_ubi32_nc_axi0_clk.clkr,
+ [NSS_CC_UBI32_NC_AXI1_CLK] = &nss_cc_ubi32_nc_axi1_clk.clkr,
+ [NSS_CC_UBI32_NC_AXI2_CLK] = &nss_cc_ubi32_nc_axi2_clk.clkr,
+ [NSS_CC_UBI32_NC_AXI3_CLK] = &nss_cc_ubi32_nc_axi3_clk.clkr,
+ [NSS_CC_UBI32_UTCM0_CLK] = &nss_cc_ubi32_utcm0_clk.clkr,
+ [NSS_CC_UBI32_UTCM1_CLK] = &nss_cc_ubi32_utcm1_clk.clkr,
+ [NSS_CC_UBI32_UTCM2_CLK] = &nss_cc_ubi32_utcm2_clk.clkr,
+ [NSS_CC_UBI32_UTCM3_CLK] = &nss_cc_ubi32_utcm3_clk.clkr,
+ [NSS_CC_UBI3_CLK_SRC] = &nss_cc_ubi3_clk_src.clkr,
+ [NSS_CC_UBI3_DIV_CLK_SRC] = &nss_cc_ubi3_div_clk_src.clkr,
+ [NSS_CC_UBI_AXI_CLK_SRC] = &nss_cc_ubi_axi_clk_src.clkr,
+ [NSS_CC_UBI_NC_AXI_BFDCD_CLK_SRC] =
+ &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr,
+ [NSS_CC_UNIPHY_PORT1_RX_CLK] = &nss_cc_uniphy_port1_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT1_TX_CLK] = &nss_cc_uniphy_port1_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT2_RX_CLK] = &nss_cc_uniphy_port2_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT2_TX_CLK] = &nss_cc_uniphy_port2_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT3_RX_CLK] = &nss_cc_uniphy_port3_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT3_TX_CLK] = &nss_cc_uniphy_port3_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT4_RX_CLK] = &nss_cc_uniphy_port4_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT4_TX_CLK] = &nss_cc_uniphy_port4_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT5_RX_CLK] = &nss_cc_uniphy_port5_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT5_TX_CLK] = &nss_cc_uniphy_port5_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT6_RX_CLK] = &nss_cc_uniphy_port6_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT6_TX_CLK] = &nss_cc_uniphy_port6_tx_clk.clkr,
+ [NSS_CC_XGMAC0_PTP_REF_CLK] = &nss_cc_xgmac0_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC] =
+ &nss_cc_xgmac0_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC1_PTP_REF_CLK] = &nss_cc_xgmac1_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC] =
+ &nss_cc_xgmac1_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC2_PTP_REF_CLK] = &nss_cc_xgmac2_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC] =
+ &nss_cc_xgmac2_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC3_PTP_REF_CLK] = &nss_cc_xgmac3_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC3_PTP_REF_DIV_CLK_SRC] =
+ &nss_cc_xgmac3_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC4_PTP_REF_CLK] = &nss_cc_xgmac4_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC4_PTP_REF_DIV_CLK_SRC] =
+ &nss_cc_xgmac4_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC5_PTP_REF_CLK] = &nss_cc_xgmac5_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC5_PTP_REF_DIV_CLK_SRC] =
+ &nss_cc_xgmac5_ptp_ref_div_clk_src.clkr,
+ [UBI32_PLL] = &ubi32_pll.clkr,
+ [UBI32_PLL_MAIN] = &ubi32_pll_main.clkr,
+};
+
+static const struct qcom_reset_map nss_cc_ipq9574_resets[] = {
+ [NSS_CC_CE_BCR] = { 0x28400, 0 },
+ [NSS_CC_CLC_BCR] = { 0x28600, 0 },
+ [NSS_CC_EIP197_BCR] = { 0x16004, 0 },
+ [NSS_CC_HAQ_BCR] = { 0x28300, 0 },
+ [NSS_CC_IMEM_BCR] = { 0xe004, 0 },
+ [NSS_CC_MAC_BCR] = { 0x28100, 0 },
+ [NSS_CC_PPE_BCR] = { 0x28200, 0 },
+ [NSS_CC_UBI_BCR] = { 0x28700, 0 },
+ [NSS_CC_UNIPHY_BCR] = { 0x28900, 0 },
+ [UBI3_CLKRST_CLAMP_ENABLE] = { 0x28a04, 9 },
+ [UBI3_CORE_CLAMP_ENABLE] = { 0x28a04, 8 },
+ [UBI2_CLKRST_CLAMP_ENABLE] = { 0x28a04, 7 },
+ [UBI2_CORE_CLAMP_ENABLE] = { 0x28a04, 6 },
+ [UBI1_CLKRST_CLAMP_ENABLE] = { 0x28a04, 5 },
+ [UBI1_CORE_CLAMP_ENABLE] = { 0x28a04, 4 },
+ [UBI0_CLKRST_CLAMP_ENABLE] = { 0x28a04, 3 },
+ [UBI0_CORE_CLAMP_ENABLE] = { 0x28a04, 2 },
+ [NSSNOC_NSS_CSR_ARES] = { 0x28a04, 1 },
+ [NSS_CSR_ARES] = { 0x28a04, 0 },
+ [PPE_BTQ_ARES] = { 0x28a08, 20 },
+ [PPE_IPE_ARES] = { 0x28a08, 19 },
+ [PPE_ARES] = { 0x28a08, 18 },
+ [PPE_CFG_ARES] = { 0x28a08, 17 },
+ [PPE_EDMA_ARES] = { 0x28a08, 16 },
+ [PPE_EDMA_CFG_ARES] = { 0x28a08, 15 },
+ [CRY_PPE_ARES] = { 0x28a08, 14 },
+ [NSSNOC_PPE_ARES] = { 0x28a08, 13 },
+ [NSSNOC_PPE_CFG_ARES] = { 0x28a08, 12 },
+ [PORT1_MAC_ARES] = { 0x28a08, 11 },
+ [PORT2_MAC_ARES] = { 0x28a08, 10 },
+ [PORT3_MAC_ARES] = { 0x28a08, 9 },
+ [PORT4_MAC_ARES] = { 0x28a08, 8 },
+ [PORT5_MAC_ARES] = { 0x28a08, 7 },
+ [PORT6_MAC_ARES] = { 0x28a08, 6 },
+ [XGMAC0_PTP_REF_ARES] = { 0x28a08, 5 },
+ [XGMAC1_PTP_REF_ARES] = { 0x28a08, 4 },
+ [XGMAC2_PTP_REF_ARES] = { 0x28a08, 3 },
+ [XGMAC3_PTP_REF_ARES] = { 0x28a08, 2 },
+ [XGMAC4_PTP_REF_ARES] = { 0x28a08, 1 },
+ [XGMAC5_PTP_REF_ARES] = { 0x28a08, 0 },
+ [HAQ_AHB_ARES] = { 0x28a0c, 3 },
+ [HAQ_AXI_ARES] = { 0x28a0c, 2 },
+ [NSSNOC_HAQ_AHB_ARES] = { 0x28a0c, 1 },
+ [NSSNOC_HAQ_AXI_ARES] = { 0x28a0c, 0 },
+ [CE_APB_ARES] = { 0x28a10, 3 },
+ [CE_AXI_ARES] = { 0x28a10, 2 },
+ [NSSNOC_CE_APB_ARES] = { 0x28a10, 1 },
+ [NSSNOC_CE_AXI_ARES] = { 0x28a10, 0 },
+ [CRYPTO_ARES] = { 0x28a14, 1 },
+ [NSSNOC_CRYPTO_ARES] = { 0x28a14, 0 },
+ [NSSNOC_NC_AXI0_1_ARES] = { 0x28a1c, 28 },
+ [UBI0_CORE_ARES] = { 0x28a1c, 27 },
+ [UBI1_CORE_ARES] = { 0x28a1c, 26 },
+ [UBI2_CORE_ARES] = { 0x28a1c, 25 },
+ [UBI3_CORE_ARES] = { 0x28a1c, 24 },
+ [NC_AXI0_ARES] = { 0x28a1c, 23 },
+ [UTCM0_ARES] = { 0x28a1c, 22 },
+ [NC_AXI1_ARES] = { 0x28a1c, 21 },
+ [UTCM1_ARES] = { 0x28a1c, 20 },
+ [NC_AXI2_ARES] = { 0x28a1c, 19 },
+ [UTCM2_ARES] = { 0x28a1c, 18 },
+ [NC_AXI3_ARES] = { 0x28a1c, 17 },
+ [UTCM3_ARES] = { 0x28a1c, 16 },
+ [NSSNOC_NC_AXI0_ARES] = { 0x28a1c, 15 },
+ [AHB0_ARES] = { 0x28a1c, 14 },
+ [INTR0_AHB_ARES] = { 0x28a1c, 13 },
+ [AHB1_ARES] = { 0x28a1c, 12 },
+ [INTR1_AHB_ARES] = { 0x28a1c, 11 },
+ [AHB2_ARES] = { 0x28a1c, 10 },
+ [INTR2_AHB_ARES] = { 0x28a1c, 9 },
+ [AHB3_ARES] = { 0x28a1c, 8 },
+ [INTR3_AHB_ARES] = { 0x28a1c, 7 },
+ [NSSNOC_AHB0_ARES] = { 0x28a1c, 6 },
+ [NSSNOC_INT0_AHB_ARES] = { 0x28a1c, 5 },
+ [AXI0_ARES] = { 0x28a1c, 4 },
+ [AXI1_ARES] = { 0x28a1c, 3 },
+ [AXI2_ARES] = { 0x28a1c, 2 },
+ [AXI3_ARES] = { 0x28a1c, 1 },
+ [NSSNOC_AXI0_ARES] = { 0x28a1c, 0 },
+ [IMEM_QSB_ARES] = { 0x28a20, 3 },
+ [NSSNOC_IMEM_QSB_ARES] = { 0x28a20, 2 },
+ [IMEM_AHB_ARES] = { 0x28a20, 1 },
+ [NSSNOC_IMEM_AHB_ARES] = { 0x28a20, 0 },
+ [UNIPHY_PORT1_RX_ARES] = { 0x28a24, 23 },
+ [UNIPHY_PORT1_TX_ARES] = { 0x28a24, 22 },
+ [UNIPHY_PORT2_RX_ARES] = { 0x28a24, 21 },
+ [UNIPHY_PORT2_TX_ARES] = { 0x28a24, 20 },
+ [UNIPHY_PORT3_RX_ARES] = { 0x28a24, 19 },
+ [UNIPHY_PORT3_TX_ARES] = { 0x28a24, 18 },
+ [UNIPHY_PORT4_RX_ARES] = { 0x28a24, 17 },
+ [UNIPHY_PORT4_TX_ARES] = { 0x28a24, 16 },
+ [UNIPHY_PORT5_RX_ARES] = { 0x28a24, 15 },
+ [UNIPHY_PORT5_TX_ARES] = { 0x28a24, 14 },
+ [UNIPHY_PORT6_RX_ARES] = { 0x28a24, 13 },
+ [UNIPHY_PORT6_TX_ARES] = { 0x28a24, 12 },
+ [PORT1_RX_ARES] = { 0x28a24, 11 },
+ [PORT1_TX_ARES] = { 0x28a24, 10 },
+ [PORT2_RX_ARES] = { 0x28a24, 9 },
+ [PORT2_TX_ARES] = { 0x28a24, 8 },
+ [PORT3_RX_ARES] = { 0x28a24, 7 },
+ [PORT3_TX_ARES] = { 0x28a24, 6 },
+ [PORT4_RX_ARES] = { 0x28a24, 5 },
+ [PORT4_TX_ARES] = { 0x28a24, 4 },
+ [PORT5_RX_ARES] = { 0x28a24, 3 },
+ [PORT5_TX_ARES] = { 0x28a24, 2 },
+ [PORT6_RX_ARES] = { 0x28a24, 1 },
+ [PORT6_TX_ARES] = { 0x28a24, 0 },
+ [PPE_FULL_RESET] = { .reg = 0x28a08, .bitmask = GENMASK(20, 17) },
+ [UNIPHY0_SOFT_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(23, 14) },
+ [UNIPHY1_SOFT_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(15, 14) },
+ [UNIPHY2_SOFT_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(13, 12) },
+ [UNIPHY_PORT1_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(23, 22) },
+ [UNIPHY_PORT2_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(21, 20) },
+ [UNIPHY_PORT3_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(19, 18) },
+ [UNIPHY_PORT4_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(17, 16) },
+ [UNIPHY_PORT5_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(15, 14) },
+ [UNIPHY_PORT6_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(13, 12) },
+ [NSSPORT1_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(11, 10) },
+ [NSSPORT2_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(9, 8) },
+ [NSSPORT3_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(7, 6) },
+ [NSSPORT4_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(5, 4) },
+ [NSSPORT5_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(3, 2) },
+ [NSSPORT6_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(1, 0) },
+ [EDMA_HW_RESET] = { .reg = 0x28a08, .bitmask = GENMASK(16, 15) },
+};
+
+static const struct regmap_config nss_cc_ipq9574_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x28a34,
+ .fast_io = true,
+};
+
+static struct qcom_icc_hws_data icc_ipq9574_nss_hws[] = {
+ { MASTER_NSSNOC_PPE, SLAVE_NSSNOC_PPE, NSS_CC_NSSNOC_PPE_CLK },
+ { MASTER_NSSNOC_PPE_CFG, SLAVE_NSSNOC_PPE_CFG, NSS_CC_NSSNOC_PPE_CFG_CLK },
+ { MASTER_NSSNOC_NSS_CSR, SLAVE_NSSNOC_NSS_CSR, NSS_CC_NSSNOC_NSS_CSR_CLK },
+ { MASTER_NSSNOC_IMEM_QSB, SLAVE_NSSNOC_IMEM_QSB, NSS_CC_NSSNOC_IMEM_QSB_CLK },
+ { MASTER_NSSNOC_IMEM_AHB, SLAVE_NSSNOC_IMEM_AHB, NSS_CC_NSSNOC_IMEM_AHB_CLK },
+};
+
+#define IPQ_NSSCC_ID (9574 * 2) /* some unique value */
+
+static const struct qcom_cc_desc nss_cc_ipq9574_desc = {
+ .config = &nss_cc_ipq9574_regmap_config,
+ .clks = nss_cc_ipq9574_clocks,
+ .num_clks = ARRAY_SIZE(nss_cc_ipq9574_clocks),
+ .resets = nss_cc_ipq9574_resets,
+ .num_resets = ARRAY_SIZE(nss_cc_ipq9574_resets),
+ .icc_hws = icc_ipq9574_nss_hws,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq9574_nss_hws),
+ .icc_first_node_id = IPQ_NSSCC_ID,
+};
+
+static const struct dev_pm_ops nss_cc_ipq9574_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static const struct of_device_id nss_cc_ipq9574_match_table[] = {
+ { .compatible = "qcom,ipq9574-nsscc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nss_cc_ipq9574_match_table);
+
+static int nss_cc_ipq9574_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to enable runtime PM\n");
+
+ ret = devm_pm_clk_create(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to create PM clock\n");
+
+ ret = pm_clk_add(&pdev->dev, "bus");
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to add bus clock\n");
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to resume\n");
+
+ regmap = qcom_cc_map(pdev, &nss_cc_ipq9574_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "Fail to map clock controller registers\n");
+ }
+
+ clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
+
+ ret = qcom_cc_really_probe(&pdev->dev, &nss_cc_ipq9574_desc, regmap);
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver nss_cc_ipq9574_driver = {
+ .probe = nss_cc_ipq9574_probe,
+ .driver = {
+ .name = "qcom,nsscc-ipq9574",
+ .of_match_table = nss_cc_ipq9574_match_table,
+ .pm = &nss_cc_ipq9574_pm_ops,
+ .sync_state = icc_sync_state,
+ },
+};
+
+module_platform_driver(nss_cc_ipq9574_driver);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. NSSCC IPQ9574 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sa8775p.c b/drivers/clk/qcom/videocc-sa8775p.c
index bf5de411fd5d..2476201dcd20 100644
--- a/drivers/clk/qcom/videocc-sa8775p.c
+++ b/drivers/clk/qcom/videocc-sa8775p.c
@@ -512,7 +512,7 @@ static const struct regmap_config video_cc_sa8775p_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc video_cc_sa8775p_desc = {
+static const struct qcom_cc_desc video_cc_sa8775p_desc = {
.config = &video_cc_sa8775p_regmap_config,
.clks = video_cc_sa8775p_clocks,
.num_clks = ARRAY_SIZE(video_cc_sa8775p_clocks),
@@ -523,6 +523,7 @@ static struct qcom_cc_desc video_cc_sa8775p_desc = {
};
static const struct of_device_id video_cc_sa8775p_match_table[] = {
+ { .compatible = "qcom,qcs8300-videocc" },
{ .compatible = "qcom,sa8775p-videocc" },
{ }
};
@@ -550,6 +551,13 @@ static int video_cc_sa8775p_probe(struct platform_device *pdev)
clk_lucid_evo_pll_configure(&video_pll0, regmap, &video_pll0_config);
clk_lucid_evo_pll_configure(&video_pll1, regmap, &video_pll1_config);
+ /*
+ * Set mvs0c clock divider to div-3 to make the mvs0 and
+ * mvs0c clocks to run at the same frequency on QCS8300
+ */
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcs8300-videocc"))
+ regmap_write(regmap, video_cc_mvs0c_div2_div_clk_src.reg, 2);
+
/* Keep some clocks always enabled */
qcom_branch_set_clk_en(regmap, 0x80ec); /* VIDEO_CC_AHB_CLK */
qcom_branch_set_clk_en(regmap, 0x8144); /* VIDEO_CC_SLEEP_CLK */
diff --git a/drivers/clk/qcom/videocc-sm8350.c b/drivers/clk/qcom/videocc-sm8350.c
index 874d4da95ff8..057a9474894a 100644
--- a/drivers/clk/qcom/videocc-sm8350.c
+++ b/drivers/clk/qcom/videocc-sm8350.c
@@ -510,7 +510,7 @@ static const struct regmap_config video_cc_sm8350_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc video_cc_sm8350_desc = {
+static const struct qcom_cc_desc video_cc_sm8350_desc = {
.config = &video_cc_sm8350_regmap_config,
.clks = video_cc_sm8350_clocks,
.num_clks = ARRAY_SIZE(video_cc_sm8350_clocks),
diff --git a/drivers/clk/qcom/videocc-sm8450.c b/drivers/clk/qcom/videocc-sm8450.c
index f26c7eccb62e..2e11dcffb664 100644
--- a/drivers/clk/qcom/videocc-sm8450.c
+++ b/drivers/clk/qcom/videocc-sm8450.c
@@ -415,7 +415,7 @@ static const struct regmap_config video_cc_sm8450_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc video_cc_sm8450_desc = {
+static const struct qcom_cc_desc video_cc_sm8450_desc = {
.config = &video_cc_sm8450_regmap_config,
.clks = video_cc_sm8450_clocks,
.num_clks = ARRAY_SIZE(video_cc_sm8450_clocks),
diff --git a/drivers/clk/qcom/videocc-sm8550.c b/drivers/clk/qcom/videocc-sm8550.c
index 7c25a50cfa97..fcfe0cade6d0 100644
--- a/drivers/clk/qcom/videocc-sm8550.c
+++ b/drivers/clk/qcom/videocc-sm8550.c
@@ -519,7 +519,7 @@ static const struct regmap_config video_cc_sm8550_regmap_config = {
.fast_io = true,
};
-static struct qcom_cc_desc video_cc_sm8550_desc = {
+static const struct qcom_cc_desc video_cc_sm8550_desc = {
.config = &video_cc_sm8550_regmap_config,
.clks = video_cc_sm8550_clocks,
.num_clks = ARRAY_SIZE(video_cc_sm8550_clocks),
diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c
index a85227c248f3..e1812867a6da 100644
--- a/drivers/clk/renesas/r7s9210-cpg-mssr.c
+++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c
@@ -170,11 +170,12 @@ static struct clk * __init rza2_cpg_clk_register(struct device *dev,
if (IS_ERR(parent))
return ERR_CAST(parent);
- switch (core->id) {
- case CLK_MAIN:
+ switch (core->type) {
+ case CLK_TYPE_RZA_MAIN:
+ r7s9210_update_clk_table(parent, base);
break;
- case CLK_PLL:
+ case CLK_TYPE_RZA_PLL:
if (cpg_mode)
mult = 44; /* Divider 1 is 1/2 */
else
@@ -185,9 +186,6 @@ static struct clk * __init rza2_cpg_clk_register(struct device *dev,
return ERR_PTR(-EINVAL);
}
- if (core->id == CLK_MAIN)
- r7s9210_update_clk_table(parent, base);
-
return clk_register_fixed_factor(NULL, core->name,
__clk_get_name(parent), 0, mult, div);
}
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index 9c7e4094705c..1be7b9592aa6 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -138,6 +138,10 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
+ DEF_MOD("isp0", 16, R8A779A0_CLK_S1D1),
+ DEF_MOD("isp1", 17, R8A779A0_CLK_S1D1),
+ DEF_MOD("isp2", 18, R8A779A0_CLK_S1D1),
+ DEF_MOD("isp3", 19, R8A779A0_CLK_S1D1),
DEF_MOD("avb0", 211, R8A779A0_CLK_S3D2),
DEF_MOD("avb1", 212, R8A779A0_CLK_S3D2),
DEF_MOD("avb2", 213, R8A779A0_CLK_S3D2),
@@ -238,6 +242,10 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("vspx1", 1029, R8A779A0_CLK_S1D1),
DEF_MOD("vspx2", 1030, R8A779A0_CLK_S1D1),
DEF_MOD("vspx3", 1031, R8A779A0_CLK_S1D1),
+ DEF_MOD("fcpvx0", 1100, R8A779A0_CLK_S1D1),
+ DEF_MOD("fcpvx1", 1101, R8A779A0_CLK_S1D1),
+ DEF_MOD("fcpvx2", 1102, R8A779A0_CLK_S1D1),
+ DEF_MOD("fcpvx3", 1103, R8A779A0_CLK_S1D1),
};
static const unsigned int r8a779a0_crit_mod_clks[] __initconst = {
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
index d45571096b96..015b9773cc55 100644
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -163,6 +163,8 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
+ DEF_MOD("isp0", 16, R8A779G0_CLK_S0D2_VIO),
+ DEF_MOD("isp1", 17, R8A779G0_CLK_S0D2_VIO),
DEF_MOD("avb0", 211, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("avb1", 212, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("avb2", 213, R8A779G0_CLK_S0D4_HSC),
diff --git a/drivers/clk/renesas/r8a779h0-cpg-mssr.c b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
index 607fa815b6c1..ffea06d77d5e 100644
--- a/drivers/clk/renesas/r8a779h0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
@@ -171,6 +171,7 @@ static const struct cpg_core_clk r8a779h0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = {
+ DEF_MOD("isp0", 16, R8A779H0_CLK_S0D2_VIO),
DEF_MOD("avb0:rgmii0", 211, R8A779H0_CLK_S0D8_HSC),
DEF_MOD("avb1:rgmii1", 212, R8A779H0_CLK_S0D8_HSC),
DEF_MOD("avb2:rgmii2", 213, R8A779H0_CLK_S0D8_HSC),
@@ -238,6 +239,8 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = {
DEF_MOD("pfc1", 916, R8A779H0_CLK_CP),
DEF_MOD("pfc2", 917, R8A779H0_CLK_CP),
DEF_MOD("tsc2:tsc1", 919, R8A779H0_CLK_CL16M),
+ DEF_MOD("vspx0", 1028, R8A779H0_CLK_S0D1_VIO),
+ DEF_MOD("fcpvx0", 1100, R8A779H0_CLK_S0D1_VIO),
DEF_MOD("ssiu", 2926, R8A779H0_CLK_S0D6_PER),
DEF_MOD("ssi", 2927, R8A779H0_CLK_S0D6_PER),
};
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index c3c2b0c43983..fce2eecfa8c0 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -89,7 +89,9 @@ static const struct clk_div_table dtable_1_32[] = {
/* Mux clock tables */
static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
+#ifdef CONFIG_ARM64
static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
+#endif
static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" };
static const u32 mtable_sdhi[] = { 1, 2, 3 };
@@ -137,7 +139,12 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
DEF_DIV("P2", R9A07G043_CLK_P2, CLK_PLL3_DIV2_4_2, DIVPL3A, dtable_1_32),
DEF_FIXED("M0", R9A07G043_CLK_M0, CLK_PLL3_DIV2_4, 1, 1),
DEF_FIXED("ZT", R9A07G043_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1),
+#ifdef CONFIG_ARM64
DEF_MUX("HP", R9A07G043_CLK_HP, SEL_PLL6_2, sel_pll6_2),
+#endif
+#ifdef CONFIG_RISCV
+ DEF_FIXED("HP", R9A07G043_CLK_HP, CLK_PLL6_250, 1, 1),
+#endif
DEF_FIXED("SPI0", R9A07G043_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi,
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index f6df3f7a31b5..77ca3a789568 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -94,6 +94,41 @@ static const struct clk_div_table dtable_1_32[] = {
{0, 0},
};
+#ifdef CONFIG_CLK_R9A07G054
+static const struct clk_div_table dtable_4_32[] = {
+ {3, 4},
+ {4, 5},
+ {5, 6},
+ {6, 7},
+ {7, 8},
+ {8, 9},
+ {9, 10},
+ {10, 11},
+ {11, 12},
+ {12, 13},
+ {13, 14},
+ {14, 15},
+ {15, 16},
+ {16, 17},
+ {17, 18},
+ {18, 19},
+ {19, 20},
+ {20, 21},
+ {21, 22},
+ {22, 23},
+ {23, 24},
+ {24, 25},
+ {25, 26},
+ {26, 27},
+ {27, 28},
+ {28, 29},
+ {29, 30},
+ {30, 31},
+ {31, 32},
+ {0, 0},
+};
+#endif
+
static const struct clk_div_table dtable_16_128[] = {
{0, 16},
{1, 32},
@@ -114,7 +149,7 @@ static const u32 mtable_sdhi[] = { 1, 2, 3 };
static const struct {
struct cpg_core_clk common[56];
#ifdef CONFIG_CLK_R9A07G054
- struct cpg_core_clk drp[0];
+ struct cpg_core_clk drp[3];
#endif
} core_clks __initconst = {
.common = {
@@ -192,6 +227,9 @@ static const struct {
},
#ifdef CONFIG_CLK_R9A07G054
.drp = {
+ DEF_FIXED("DRP_M", R9A07G054_CLK_DRP_M, CLK_PLL3, 1, 5),
+ DEF_FIXED("DRP_D", R9A07G054_CLK_DRP_D, CLK_PLL3, 1, 2),
+ DEF_DIV("DRP_A", R9A07G054_CLK_DRP_A, CLK_PLL3, DIVPL3E, dtable_4_32),
},
#endif
};
@@ -199,7 +237,7 @@ static const struct {
static const struct {
struct rzg2l_mod_clk common[79];
#ifdef CONFIG_CLK_R9A07G054
- struct rzg2l_mod_clk drp[0];
+ struct rzg2l_mod_clk drp[5];
#endif
} mod_clks = {
.common = {
@@ -364,6 +402,16 @@ static const struct {
},
#ifdef CONFIG_CLK_R9A07G054
.drp = {
+ DEF_MOD("stpai_initclk", R9A07G054_STPAI_INITCLK, R9A07G044_OSCCLK,
+ 0x5e8, 0),
+ DEF_MOD("stpai_aclk", R9A07G054_STPAI_ACLK, R9A07G044_CLK_P1,
+ 0x5e8, 1),
+ DEF_MOD("stpai_mclk", R9A07G054_STPAI_MCLK, R9A07G054_CLK_DRP_M,
+ 0x5e8, 2),
+ DEF_MOD("stpai_dclkin", R9A07G054_STPAI_DCLKIN, R9A07G054_CLK_DRP_D,
+ 0x5e8, 3),
+ DEF_MOD("stpai_aclk_drp", R9A07G054_STPAI_ACLK_DRP, R9A07G054_CLK_DRP_A,
+ 0x5e8, 4),
},
#endif
};
@@ -430,6 +478,9 @@ static const struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_ADC_PRESETN, 0x8a8, 0),
DEF_RST(R9A07G044_ADC_ADRST_N, 0x8a8, 1),
DEF_RST(R9A07G044_TSU_PRESETN, 0x8ac, 0),
+#ifdef CONFIG_CLK_R9A07G054
+ DEF_RST(R9A07G054_STPAI_ARESETN, 0x8e8, 0),
+#endif
};
static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index 0e7e3bf05b52..4035f3443598 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -51,7 +51,7 @@
#define G3S_SEL_SDHI2 SEL_PLL_PACK(G3S_CPG_SDHI_DSEL, 8, 2)
/* PLL 1/4/6 configuration registers macro. */
-#define G3S_PLL146_CONF(clk1, clk2) ((clk1) << 22 | (clk2) << 12)
+#define G3S_PLL146_CONF(clk1, clk2, setting) ((clk1) << 22 | (clk2) << 12 | (setting))
#define DEF_G3S_MUX(_name, _id, _conf, _parent_names, _mux_flags, _clk_flags) \
DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = (_conf), \
@@ -134,7 +134,8 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
/* Internal Core Clocks */
DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000),
- DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8)),
+ DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8, 0x100),
+ 1100000000UL),
DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3),
DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3),
DEF_FIXED(".pll4", CLK_PLL4, CLK_EXTAL, 100, 3),
@@ -241,6 +242,7 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0),
DEF_MOD("adc_adclk", R9A08G045_ADC_ADCLK, R9A08G045_CLK_TSU, 0x5a8, 0),
DEF_MOD("adc_pclk", R9A08G045_ADC_PCLK, R9A08G045_CLK_TSU, 0x5a8, 1),
+ DEF_MOD("tsu_pclk", R9A08G045_TSU_PCLK, R9A08G045_CLK_TSU, 0x5ac, 0),
DEF_MOD("vbat_bclk", R9A08G045_VBAT_BCLK, R9A08G045_OSCCLK, 0x614, 0),
};
@@ -279,6 +281,7 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_GPIO_SPARE_RESETN, 0x898, 2),
DEF_RST(R9A08G045_ADC_PRESETN, 0x8a8, 0),
DEF_RST(R9A08G045_ADC_ADRST_N, 0x8a8, 1),
+ DEF_RST(R9A08G045_TSU_PRESETN, 0x8ac, 0),
DEF_RST(R9A08G045_VBAT_BRESETN, 0x914, 0),
};
@@ -353,6 +356,8 @@ static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = {
DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(4)), 0),
DEF_PD("adc", R9A08G045_PD_ADC,
DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(14)), 0),
+ DEF_PD("tsu", R9A08G045_PD_TSU,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(15)), 0),
DEF_PD("vbat", R9A08G045_PD_VBAT,
DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(8)),
GENPD_FLAG_ALWAYS_ON),
diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c
index 536d922bed70..e9cf4342d0cf 100644
--- a/drivers/clk/renesas/r9a09g047-cpg.c
+++ b/drivers/clk/renesas/r9a09g047-cpg.c
@@ -28,12 +28,19 @@ enum clk_ids {
CLK_PLLCLN,
CLK_PLLDTY,
CLK_PLLCA55,
+ CLK_PLLVDO,
/* Internal Core Clocks */
CLK_PLLCM33_DIV16,
+ CLK_PLLCLN_DIV2,
+ CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
+ CLK_PLLCLN_DIV20,
CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV16,
+ CLK_PLLVDO_CRU0,
/* Module Clocks */
MOD_CLK_BASE,
@@ -47,6 +54,12 @@ static const struct clk_div_table dtable_1_8[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_4[] = {
+ {0, 2},
+ {1, 4},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -67,14 +80,22 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+ DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
/* Internal Core Clocks */
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
+ DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
+ DEF_FIXED(".pllcln_div20", CLK_PLLCLN_DIV20, CLK_PLLCLN, 1, 20),
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
+
+ DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G047_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
@@ -90,8 +111,22 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
};
static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
+ DEF_MOD_CRITICAL("icu_0_pclk_i", CLK_PLLCM33_DIV16, 0, 5, 0, 5,
+ BUS_MSTOP_NONE),
DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
+ BUS_MSTOP(5, BIT(13))),
DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
BUS_MSTOP(3, BIT(14))),
DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
@@ -112,12 +147,54 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(1, BIT(7))),
DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
BUS_MSTOP(1, BIT(8))),
+ DEF_MOD("canfd_0_pclk", CLK_PLLCLN_DIV16, 9, 12, 4, 28,
+ BUS_MSTOP(10, BIT(14))),
+ DEF_MOD("canfd_0_clk_ram", CLK_PLLCLN_DIV8, 9, 13, 4, 29,
+ BUS_MSTOP(10, BIT(14))),
+ DEF_MOD("canfd_0_clkc", CLK_PLLCLN_DIV20, 9, 14, 4, 30,
+ BUS_MSTOP(10, BIT(14))),
+ DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_0_pclk", CLK_PLLDTY_DIV16, 13, 4, 6, 20,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("tsu_1_pclk", CLK_QEXTAL, 16, 10, 8, 10,
+ BUS_MSTOP(2, BIT(15))),
};
static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 6, 1, 7), /* ICU_0_PRESETN_I */
DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
+ DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
+ DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
@@ -128,6 +205,15 @@ static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
+ DEF_RST(10, 1, 4, 18), /* CANFD_0_RSTP_N */
+ DEF_RST(10, 2, 4, 19), /* CANFD_0_RSTC_N */
+ DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
+ DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
+ DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
+ DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
+ DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
+ DEF_RST(15, 8, 7, 9), /* TSU_1_PRESETN */
};
const struct rzv2h_cpg_info r9a09g047_cpg_info __initconst = {
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
index 3705e18f66ad..d63eafbca780 100644
--- a/drivers/clk/renesas/r9a09g057-cpg.c
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -31,6 +31,8 @@ enum clk_ids {
CLK_PLLVDO,
/* Internal Core Clocks */
+ CLK_PLLCM33_DIV4,
+ CLK_PLLCM33_DIV4_PLLCM33,
CLK_PLLCM33_DIV16,
CLK_PLLCLN_DIV2,
CLK_PLLCLN_DIV8,
@@ -39,6 +41,8 @@ enum clk_ids {
CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
CLK_PLLDTY_DIV16,
+ CLK_PLLDTY_RCPU,
+ CLK_PLLDTY_RCPU_DIV4,
CLK_PLLVDO_CRU0,
CLK_PLLVDO_CRU1,
CLK_PLLVDO_CRU2,
@@ -85,6 +89,9 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
/* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
+ DEF_DDIV(".pllcm33_div4_pllcm33", CLK_PLLCM33_DIV4_PLLCM33,
+ CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
@@ -95,6 +102,8 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
+ DEF_DDIV(".plldty_rcpu", CLK_PLLDTY_RCPU, CLK_PLLDTY, CDDIV3_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_rcpu_div4", CLK_PLLDTY_RCPU_DIV4, CLK_PLLDTY_RCPU, 1, 4),
DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
DEF_DDIV(".pllvdo_cru1", CLK_PLLVDO_CRU1, CLK_PLLVDO, CDDIV4_DIVCTL0, dtable_2_4),
@@ -115,6 +124,16 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
};
static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
+ DEF_MOD("dmac_0_aclk", CLK_PLLCM33_DIV4_PLLCM33, 0, 0, 0, 0,
+ BUS_MSTOP(5, BIT(9))),
+ DEF_MOD("dmac_1_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 1, 0, 1,
+ BUS_MSTOP(3, BIT(2))),
+ DEF_MOD("dmac_2_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 2, 0, 2,
+ BUS_MSTOP(3, BIT(3))),
+ DEF_MOD("dmac_3_aclk", CLK_PLLDTY_RCPU_DIV4, 0, 3, 0, 3,
+ BUS_MSTOP(10, BIT(11))),
+ DEF_MOD("dmac_4_aclk", CLK_PLLDTY_RCPU_DIV4, 0, 4, 0, 4,
+ BUS_MSTOP(10, BIT(12))),
DEF_MOD_CRITICAL("icu_0_pclk_i", CLK_PLLCM33_DIV16, 0, 5, 0, 5,
BUS_MSTOP_NONE),
DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
@@ -223,6 +242,11 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 1, 1, 2), /* DMAC_0_ARESETN */
+ DEF_RST(3, 2, 1, 3), /* DMAC_1_ARESETN */
+ DEF_RST(3, 3, 1, 4), /* DMAC_2_ARESETN */
+ DEF_RST(3, 4, 1, 5), /* DMAC_3_ARESETN */
+ DEF_RST(3, 5, 1, 6), /* DMAC_4_ARESETN */
DEF_RST(3, 6, 1, 7), /* ICU_0_PRESETN_I */
DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index bf85501709f0..da021ee446ec 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -338,11 +338,6 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
WARN_DEBUG(id >= priv->num_core_clks);
WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
- if (!core->name) {
- /* Skip NULLified clock */
- return;
- }
-
switch (core->type) {
case CLK_TYPE_IN:
clk = of_clk_get_by_name(priv->np, core->name);
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index ddf722ca79eb..b91dfbfb01e3 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -51,6 +51,7 @@
#define RZG3S_DIV_M GENMASK(25, 22)
#define RZG3S_DIV_NI GENMASK(21, 13)
#define RZG3S_DIV_NF GENMASK(12, 1)
+#define RZG3S_SEL_PLL BIT(0)
#define CLK_ON_R(reg) (reg)
#define CLK_MON_R(reg) (0x180 + (reg))
@@ -60,6 +61,7 @@
#define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
#define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
#define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
+#define GET_REG_SAMPLL_SETTING(val) ((val) & 0xfff)
#define CPG_WEN_BIT BIT(16)
@@ -943,6 +945,7 @@ rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
struct pll_clk {
struct clk_hw hw;
+ unsigned long default_rate;
unsigned int conf;
unsigned int type;
void __iomem *base;
@@ -980,12 +983,19 @@ static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
{
struct pll_clk *pll_clk = to_pll(hw);
struct rzg2l_cpg_priv *priv = pll_clk->priv;
- u32 nir, nfr, mr, pr, val;
+ u32 nir, nfr, mr, pr, val, setting;
u64 rate;
if (pll_clk->type != CLK_TYPE_G3S_PLL)
return parent_rate;
+ setting = GET_REG_SAMPLL_SETTING(pll_clk->conf);
+ if (setting) {
+ val = readl(priv->base + setting);
+ if (val & RZG3S_SEL_PLL)
+ return pll_clk->default_rate;
+ }
+
val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
@@ -1038,6 +1048,7 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
pll_clk->base = priv->base;
pll_clk->priv = priv;
pll_clk->type = core->type;
+ pll_clk->default_rate = core->default_rate;
ret = devm_clk_hw_register(dev, &pll_clk->hw);
if (ret)
@@ -1105,11 +1116,6 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
WARN_DEBUG(id >= priv->num_core_clks);
WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
- if (!core->name) {
- /* Skip NULLified clock */
- return;
- }
-
switch (core->type) {
case CLK_TYPE_IN:
clk = of_clk_get_by_name(priv->dev->of_node, core->name);
@@ -1228,8 +1234,8 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
value & bitmask, 0, 10);
if (error)
- dev_err(dev, "Failed to enable CLK_ON %p\n",
- priv->base + CLK_ON_R(reg));
+ dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n",
+ CLK_ON_R(reg), hw->clk);
return error;
}
@@ -1344,11 +1350,6 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
- if (!mod->name) {
- /* Skip NULLified clock */
- return;
- }
-
parent = priv->clks[mod->parent];
if (IS_ERR(parent)) {
clk = parent;
@@ -1538,28 +1539,6 @@ static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
return devm_reset_controller_register(priv->dev, &priv->rcdev);
}
-static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
- const struct of_phandle_args *clkspec)
-{
- const struct rzg2l_cpg_info *info = priv->info;
- unsigned int id;
- unsigned int i;
-
- if (clkspec->args_count != 2)
- return false;
-
- if (clkspec->args[0] != CPG_MOD)
- return false;
-
- id = clkspec->args[1] + info->num_total_core_clks;
- for (i = 0; i < info->num_no_pm_mod_clks; i++) {
- if (info->no_pm_mod_clks[i] == id)
- return false;
- }
-
- return true;
-}
-
/**
* struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
* @onecell_data: cell data
@@ -1584,45 +1563,73 @@ struct rzg2l_cpg_pd {
u16 id;
};
+static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_pd *pd,
+ const struct of_phandle_args *clkspec)
+{
+ if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
+ return false;
+
+ switch (clkspec->args[0]) {
+ case CPG_MOD: {
+ struct rzg2l_cpg_priv *priv = pd->priv;
+ const struct rzg2l_cpg_info *info = priv->info;
+ unsigned int id = clkspec->args[1];
+
+ if (id >= priv->num_mod_clks)
+ return false;
+
+ id += info->num_total_core_clks;
+
+ for (unsigned int i = 0; i < info->num_no_pm_mod_clks; i++) {
+ if (info->no_pm_mod_clks[i] == id)
+ return false;
+ }
+
+ return true;
+ }
+
+ case CPG_CORE:
+ default:
+ return false;
+ }
+}
+
static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
{
struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
- struct rzg2l_cpg_priv *priv = pd->priv;
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
bool once = true;
struct clk *clk;
+ unsigned int i;
int error;
- int i = 0;
-
- while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
- &clkspec)) {
- if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
- if (once) {
- once = false;
- error = pm_clk_create(dev);
- if (error) {
- of_node_put(clkspec.np);
- goto err;
- }
- }
- clk = of_clk_get_from_provider(&clkspec);
+
+ for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
+ if (!rzg2l_cpg_is_pm_clk(pd, &clkspec)) {
of_node_put(clkspec.np);
- if (IS_ERR(clk)) {
- error = PTR_ERR(clk);
- goto fail_destroy;
- }
+ continue;
+ }
- error = pm_clk_add_clk(dev, clk);
+ if (once) {
+ once = false;
+ error = pm_clk_create(dev);
if (error) {
- dev_err(dev, "pm_clk_add_clk failed %d\n",
- error);
- goto fail_put;
+ of_node_put(clkspec.np);
+ goto err;
}
- } else {
- of_node_put(clkspec.np);
}
- i++;
+ clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
+ if (IS_ERR(clk)) {
+ error = PTR_ERR(clk);
+ goto fail_destroy;
+ }
+
+ error = pm_clk_add_clk(dev, clk);
+ if (error) {
+ dev_err(dev, "pm_clk_add_clk failed %d\n", error);
+ goto fail_put;
+ }
}
return 0;
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index 881a89b5a710..b6eece5ffa20 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -21,6 +21,7 @@
#define CPG_PL2_DDIV (0x204)
#define CPG_PL3A_DDIV (0x208)
#define CPG_PL6_DDIV (0x210)
+#define CPG_PL3C_SDIV (0x214)
#define CPG_CLKSTATUS (0x280)
#define CPG_PL3_SSEL (0x408)
#define CPG_PL6_SSEL (0x414)
@@ -70,6 +71,7 @@
#define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3)
#define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3)
#define DIVPL3C DDIV_PACK(CPG_PL3A_DDIV, 8, 3)
+#define DIVPL3E DDIV_PACK(CPG_PL3C_SDIV, 8, 5)
#define DIVGPU DDIV_PACK(CPG_PL6_DDIV, 0, 2)
#define SEL_PLL_PACK(offset, bitpos, size) \
@@ -102,7 +104,10 @@ struct cpg_core_clk {
const struct clk_div_table *dtable;
const u32 *mtable;
const unsigned long invalid_rate;
- const unsigned long max_rate;
+ union {
+ const unsigned long max_rate;
+ const unsigned long default_rate;
+ };
const char * const *parent_names;
notifier_fn_t notifier;
u32 flag;
@@ -144,8 +149,9 @@ enum clk_types {
DEF_TYPE(_name, _id, _type, .parent = _parent)
#define DEF_SAMPLL(_name, _id, _parent, _conf) \
DEF_TYPE(_name, _id, CLK_TYPE_SAM_PLL, .parent = _parent, .conf = _conf)
-#define DEF_G3S_PLL(_name, _id, _parent, _conf) \
- DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf)
+#define DEF_G3S_PLL(_name, _id, _parent, _conf, _default_rate) \
+ DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf, \
+ .default_rate = _default_rate)
#define DEF_INPUT(_name, _id) \
DEF_TYPE(_name, _id, CLK_TYPE_IN)
#define DEF_FIXED(_name, _id, _parent, _mult, _div) \
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index a4c1e92e1fd7..2b9771ab2b3f 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -447,8 +447,7 @@ static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv,
{
unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
- unsigned int index = (mstop_index - 1) * 16;
- atomic_t *mstop = &priv->mstop_count[index];
+ atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
unsigned long flags;
unsigned int i;
u32 val = 0;
@@ -469,8 +468,7 @@ static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv,
{
unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
- unsigned int index = (mstop_index - 1) * 16;
- atomic_t *mstop = &priv->mstop_count[index];
+ atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
unsigned long flags;
unsigned int i;
u32 val = 0;
@@ -541,8 +539,8 @@ static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
error = readl_poll_timeout_atomic(priv->base + reg, value,
value & bitmask, 0, 10);
if (error)
- dev_err(dev, "Failed to enable CLK_ON %p\n",
- priv->base + reg);
+ dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n",
+ GET_CLK_ON_OFFSET(clock->on_index), hw->clk);
return error;
}
@@ -630,8 +628,7 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
} else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) {
unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data);
u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data);
- unsigned int index = (mstop_index - 1) * 16;
- atomic_t *mstop = &priv->mstop_count[index];
+ atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
unsigned long flags;
unsigned int i;
u32 val = 0;
@@ -926,6 +923,9 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
if (!priv->mstop_count)
return -ENOMEM;
+ /* Adjust for CPG_BUS_m_MSTOP starting from m = 1 */
+ priv->mstop_count -= 16;
+
priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
info->num_resets, GFP_KERNEL);
if (!priv->resets)
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
index fd8eb985c75b..576a070763cb 100644
--- a/drivers/clk/renesas/rzv2h-cpg.h
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -38,11 +38,13 @@ struct ddiv {
#define CPG_CDDIV3 (0x40C)
#define CPG_CDDIV4 (0x410)
+#define CDDIV0_DIVCTL1 DDIV_PACK(CPG_CDDIV0, 4, 3, 1)
#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2)
#define CDDIV1_DIVCTL0 DDIV_PACK(CPG_CDDIV1, 0, 2, 4)
#define CDDIV1_DIVCTL1 DDIV_PACK(CPG_CDDIV1, 4, 2, 5)
#define CDDIV1_DIVCTL2 DDIV_PACK(CPG_CDDIV1, 8, 2, 6)
#define CDDIV1_DIVCTL3 DDIV_PACK(CPG_CDDIV1, 12, 2, 7)
+#define CDDIV3_DIVCTL2 DDIV_PACK(CPG_CDDIV3, 8, 3, 14)
#define CDDIV3_DIVCTL3 DDIV_PACK(CPG_CDDIV3, 12, 1, 15)
#define CDDIV4_DIVCTL0 DDIV_PACK(CPG_CDDIV4, 0, 1, 16)
#define CDDIV4_DIVCTL1 DDIV_PACK(CPG_CDDIV4, 4, 1, 17)
diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig
index 570ad90835d3..febb7944f34b 100644
--- a/drivers/clk/rockchip/Kconfig
+++ b/drivers/clk/rockchip/Kconfig
@@ -93,6 +93,20 @@ config CLK_RK3399
help
Build the driver for RK3399 Clock Driver.
+config CLK_RK3528
+ bool "Rockchip RK3528 clock controller support"
+ depends on ARM64 || COMPILE_TEST
+ default y
+ help
+ Build the driver for RK3528 Clock Controller.
+
+config CLK_RK3562
+ bool "Rockchip RK3562 clock controller support"
+ depends on ARM64 || COMPILE_TEST
+ default y
+ help
+ Build the driver for RK3562 Clock Controller.
+
config CLK_RK3568
bool "Rockchip RK3568 clock controller support"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 3fe7616f0ebe..e8ece20aebfd 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -28,6 +28,8 @@ obj-$(CONFIG_CLK_RK3308) += clk-rk3308.o
obj-$(CONFIG_CLK_RK3328) += clk-rk3328.o
obj-$(CONFIG_CLK_RK3368) += clk-rk3368.o
obj-$(CONFIG_CLK_RK3399) += clk-rk3399.o
+obj-$(CONFIG_CLK_RK3528) += clk-rk3528.o rst-rk3528.o
+obj-$(CONFIG_CLK_RK3562) += clk-rk3562.o rst-rk3562.o
obj-$(CONFIG_CLK_RK3568) += clk-rk3568.o
obj-$(CONFIG_CLK_RK3576) += clk-rk3576.o rst-rk3576.o
obj-$(CONFIG_CLK_RK3588) += clk-rk3588.o rst-rk3588.o
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index fe76756e592e..2c2abb3b4210 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -204,10 +204,12 @@ static int rockchip_rk3036_pll_set_params(struct rockchip_clk_pll *pll,
rockchip_rk3036_pll_get_params(pll, &cur);
cur.rate = 0;
- cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
- if (cur_parent == PLL_MODE_NORM) {
- pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
- rate_change_remuxed = 1;
+ if (!(pll->flags & ROCKCHIP_PLL_FIXED_MODE)) {
+ cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
+ if (cur_parent == PLL_MODE_NORM) {
+ pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
+ rate_change_remuxed = 1;
+ }
}
/* update pll values */
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 30e670c8afba..318c8ddc8a76 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -337,7 +337,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(0, "pclkin_cif0", "ext_cif0", 0,
RK2928_CLKGATE_CON(3), 3, GFLAGS),
- INVERTER(0, "pclk_cif0", "pclkin_cif0",
+ INVERTER(PCLK_CIF0, "pclk_cif0", "pclkin_cif0",
RK2928_CLKSEL_CON(30), 8, IFLAGS),
FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
@@ -595,7 +595,7 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
GATE(0, "pclkin_cif1", "ext_cif1", 0,
RK2928_CLKGATE_CON(3), 4, GFLAGS),
- INVERTER(0, "pclk_cif1", "pclkin_cif1",
+ INVERTER(PCLK_CIF1, "pclk_cif1", "pclkin_cif1",
RK2928_CLKSEL_CON(30), 12, IFLAGS),
COMPOSITE(0, "aclk_gpu_src", mux_pll_src_cpll_gpll_p, 0,
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index 3bb87b27b662..cf60fcf2fa5c 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -201,7 +201,7 @@ PNAME(mux_aclk_peri_pre_p) = { "cpll_peri",
"gpll_peri",
"hdmiphy_peri" };
PNAME(mux_ref_usb3otg_src_p) = { "xin24m",
- "clk_usb3otg_ref" };
+ "clk_ref_usb3otg_src" };
PNAME(mux_xin24m_32k_p) = { "xin24m",
"clk_rtc32k" };
PNAME(mux_mac2io_src_p) = { "clk_mac2io_src",
diff --git a/drivers/clk/rockchip/clk-rk3528.c b/drivers/clk/rockchip/clk-rk3528.c
new file mode 100644
index 000000000000..b8b577b902a0
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3528.c
@@ -0,0 +1,1116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Yao Zi <ziyao@disroot.org>
+ * Author: Joseph Chen <chenjh@rock-chips.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/rockchip,rk3528-cru.h>
+
+#include "clk.h"
+
+#define RK3528_GRF_SOC_STATUS0 0x1a0
+
+enum rk3528_plls {
+ apll, cpll, gpll, ppll, dpll,
+};
+
+static struct rockchip_pll_rate_table rk3528_pll_rates[] = {
+ RK3036_PLL_RATE(1896000000, 1, 79, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1800000000, 1, 75, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1704000000, 1, 71, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 1, 99, 2, 1, 1, 0), /* GPLL */
+ RK3036_PLL_RATE(1092000000, 2, 91, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 42, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 1, 125, 3, 1, 1, 0), /* PPLL */
+ RK3036_PLL_RATE(996000000, 2, 83, 1, 1, 1, 0), /* CPLL */
+ RK3036_PLL_RATE(960000000, 1, 40, 1, 1, 1, 0),
+ RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE(600000000, 1, 50, 2, 1, 1, 0),
+ RK3036_PLL_RATE(594000000, 2, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
+ RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE(96000000, 1, 24, 3, 2, 1, 0),
+ { /* sentinel */ },
+};
+
+#define RK3528_DIV_ACLK_M_CORE_MASK 0x1f
+#define RK3528_DIV_ACLK_M_CORE_SHIFT 11
+#define RK3528_DIV_PCLK_DBG_MASK 0x1f
+#define RK3528_DIV_PCLK_DBG_SHIFT 1
+
+#define RK3528_CLKSEL39(_aclk_m_core) \
+{ \
+ .reg = RK3528_CLKSEL_CON(39), \
+ .val = HIWORD_UPDATE(_aclk_m_core, RK3528_DIV_ACLK_M_CORE_MASK, \
+ RK3528_DIV_ACLK_M_CORE_SHIFT), \
+}
+
+#define RK3528_CLKSEL40(_pclk_dbg) \
+{ \
+ .reg = RK3528_CLKSEL_CON(40), \
+ .val = HIWORD_UPDATE(_pclk_dbg, RK3528_DIV_PCLK_DBG_MASK, \
+ RK3528_DIV_PCLK_DBG_SHIFT), \
+}
+
+#define RK3528_CPUCLK_RATE(_prate, _aclk_m_core, _pclk_dbg) \
+{ \
+ .prate = _prate, \
+ .divs = { \
+ RK3528_CLKSEL39(_aclk_m_core), \
+ RK3528_CLKSEL40(_pclk_dbg), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table rk3528_cpuclk_rates[] __initdata = {
+ RK3528_CPUCLK_RATE(1896000000, 1, 13),
+ RK3528_CPUCLK_RATE(1800000000, 1, 12),
+ RK3528_CPUCLK_RATE(1704000000, 1, 11),
+ RK3528_CPUCLK_RATE(1608000000, 1, 11),
+ RK3528_CPUCLK_RATE(1512000000, 1, 11),
+ RK3528_CPUCLK_RATE(1416000000, 1, 9),
+ RK3528_CPUCLK_RATE(1296000000, 1, 8),
+ RK3528_CPUCLK_RATE(1200000000, 1, 8),
+ RK3528_CPUCLK_RATE(1188000000, 1, 8),
+ RK3528_CPUCLK_RATE(1092000000, 1, 7),
+ RK3528_CPUCLK_RATE(1008000000, 1, 6),
+ RK3528_CPUCLK_RATE(1000000000, 1, 6),
+ RK3528_CPUCLK_RATE(996000000, 1, 6),
+ RK3528_CPUCLK_RATE(960000000, 1, 6),
+ RK3528_CPUCLK_RATE(912000000, 1, 6),
+ RK3528_CPUCLK_RATE(816000000, 1, 5),
+ RK3528_CPUCLK_RATE(600000000, 1, 3),
+ RK3528_CPUCLK_RATE(594000000, 1, 3),
+ RK3528_CPUCLK_RATE(408000000, 1, 2),
+ RK3528_CPUCLK_RATE(312000000, 1, 2),
+ RK3528_CPUCLK_RATE(216000000, 1, 1),
+ RK3528_CPUCLK_RATE(96000000, 1, 0),
+};
+
+static const struct rockchip_cpuclk_reg_data rk3528_cpuclk_data = {
+ .core_reg[0] = RK3528_CLKSEL_CON(39),
+ .div_core_shift[0] = 5,
+ .div_core_mask[0] = 0x1f,
+ .num_cores = 1,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
+ .mux_core_shift = 10,
+ .mux_core_mask = 0x1,
+};
+
+PNAME(mux_pll_p) = { "xin24m" };
+PNAME(mux_armclk) = { "apll", "gpll" };
+PNAME(mux_24m_32k_p) = { "xin24m", "clk_32k" };
+PNAME(mux_gpll_cpll_p) = { "gpll", "cpll" };
+PNAME(mux_gpll_cpll_xin24m_p) = { "gpll", "cpll", "xin24m" };
+PNAME(mux_100m_50m_24m_p) = { "clk_100m_src", "clk_50m_src",
+ "xin24m" };
+PNAME(mux_150m_100m_24m_p) = { "clk_150m_src", "clk_100m_src",
+ "xin24m" };
+PNAME(mux_200m_100m_24m_p) = { "clk_200m_src", "clk_100m_src",
+ "xin24m" };
+PNAME(mux_200m_100m_50m_24m_p) = { "clk_200m_src", "clk_100m_src",
+ "clk_50m_src", "xin24m" };
+PNAME(mux_300m_200m_100m_24m_p) = { "clk_300m_src", "clk_200m_src",
+ "clk_100m_src", "xin24m" };
+PNAME(mux_339m_200m_100m_24m_p) = { "clk_339m_src", "clk_200m_src",
+ "clk_100m_src", "xin24m" };
+PNAME(mux_500m_200m_100m_24m_p) = { "clk_500m_src", "clk_200m_src",
+ "clk_100m_src", "xin24m" };
+PNAME(mux_500m_300m_100m_24m_p) = { "clk_500m_src", "clk_300m_src",
+ "clk_100m_src", "xin24m" };
+PNAME(mux_600m_300m_200m_24m_p) = { "clk_600m_src", "clk_300m_src",
+ "clk_200m_src", "xin24m" };
+PNAME(aclk_gpu_p) = { "aclk_gpu_root",
+ "clk_gpu_pvtpll_src" };
+PNAME(aclk_rkvdec_pvtmux_root_p) = { "aclk_rkvdec_root",
+ "clk_rkvdec_pvtpll_src" };
+PNAME(clk_i2c2_p) = { "clk_200m_src", "clk_100m_src",
+ "xin24m", "clk_32k" };
+PNAME(clk_ref_pcie_inner_phy_p) = { "clk_ppll_100m_src", "xin24m" };
+PNAME(dclk_vop0_p) = { "dclk_vop_src0",
+ "clk_hdmiphy_pixel_io" };
+PNAME(mclk_i2s0_2ch_sai_src_p) = { "clk_i2s0_2ch_src",
+ "clk_i2s0_2ch_frac", "xin12m" };
+PNAME(mclk_i2s1_8ch_sai_src_p) = { "clk_i2s1_8ch_src",
+ "clk_i2s1_8ch_frac", "xin12m" };
+PNAME(mclk_i2s2_2ch_sai_src_p) = { "clk_i2s2_2ch_src",
+ "clk_i2s2_2ch_frac", "xin12m" };
+PNAME(mclk_i2s3_8ch_sai_src_p) = { "clk_i2s3_8ch_src",
+ "clk_i2s3_8ch_frac", "xin12m" };
+PNAME(mclk_sai_i2s0_p) = { "mclk_i2s0_2ch_sai_src",
+ "i2s0_mclkin" };
+PNAME(mclk_sai_i2s1_p) = { "mclk_i2s1_8ch_sai_src",
+ "i2s1_mclkin" };
+PNAME(mclk_spdif_src_p) = { "clk_spdif_src", "clk_spdif_frac",
+ "xin12m" };
+PNAME(sclk_uart0_src_p) = { "clk_uart0_src", "clk_uart0_frac",
+ "xin24m" };
+PNAME(sclk_uart1_src_p) = { "clk_uart1_src", "clk_uart1_frac",
+ "xin24m" };
+PNAME(sclk_uart2_src_p) = { "clk_uart2_src", "clk_uart2_frac",
+ "xin24m" };
+PNAME(sclk_uart3_src_p) = { "clk_uart3_src", "clk_uart3_frac",
+ "xin24m" };
+PNAME(sclk_uart4_src_p) = { "clk_uart4_src", "clk_uart4_frac",
+ "xin24m" };
+PNAME(sclk_uart5_src_p) = { "clk_uart5_src", "clk_uart5_frac",
+ "xin24m" };
+PNAME(sclk_uart6_src_p) = { "clk_uart6_src", "clk_uart6_frac",
+ "xin24m" };
+PNAME(sclk_uart7_src_p) = { "clk_uart7_src", "clk_uart7_frac",
+ "xin24m" };
+PNAME(clk_32k_p) = { "xin_osc0_div", "clk_pvtm_32k" };
+
+static struct rockchip_pll_clock rk3528_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3328, PLL_APLL, "apll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3528_PLL_CON(0),
+ RK3528_MODE_CON, 0, 0, 0, rk3528_pll_rates),
+
+ [cpll] = PLL(pll_rk3328, PLL_CPLL, "cpll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3528_PLL_CON(8),
+ RK3528_MODE_CON, 2, 0, 0, rk3528_pll_rates),
+
+ [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3528_PLL_CON(24),
+ RK3528_MODE_CON, 4, 0, 0, rk3528_pll_rates),
+
+ [ppll] = PLL(pll_rk3328, PLL_PPLL, "ppll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3528_PCIE_PLL_CON(32),
+ RK3528_MODE_CON, 6, 0, ROCKCHIP_PLL_FIXED_MODE, rk3528_pll_rates),
+
+ [dpll] = PLL(pll_rk3328, PLL_DPLL, "dpll", mux_pll_p,
+ CLK_IGNORE_UNUSED, RK3528_DDRPHY_PLL_CON(16),
+ RK3528_DDRPHY_MODE_CON, 0, 0, 0, rk3528_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch rk3528_uart0_fracmux __initdata =
+ MUX(CLK_UART0, "clk_uart0", sclk_uart0_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(6), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart1_fracmux __initdata =
+ MUX(CLK_UART1, "clk_uart1", sclk_uart1_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(8), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart2_fracmux __initdata =
+ MUX(CLK_UART2, "clk_uart2", sclk_uart2_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(10), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart3_fracmux __initdata =
+ MUX(CLK_UART3, "clk_uart3", sclk_uart3_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(12), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart4_fracmux __initdata =
+ MUX(CLK_UART4, "clk_uart4", sclk_uart4_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(14), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart5_fracmux __initdata =
+ MUX(CLK_UART5, "clk_uart5", sclk_uart5_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(16), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart6_fracmux __initdata =
+ MUX(CLK_UART6, "clk_uart6", sclk_uart6_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(18), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_uart7_fracmux __initdata =
+ MUX(CLK_UART7, "clk_uart7", sclk_uart7_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(20), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch mclk_i2s0_2ch_sai_src_fracmux __initdata =
+ MUX(MCLK_I2S0_2CH_SAI_SRC_PRE, "mclk_i2s0_2ch_sai_src_pre", mclk_i2s0_2ch_sai_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(22), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch mclk_i2s1_8ch_sai_src_fracmux __initdata =
+ MUX(MCLK_I2S1_8CH_SAI_SRC_PRE, "mclk_i2s1_8ch_sai_src_pre", mclk_i2s1_8ch_sai_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(26), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch mclk_i2s2_2ch_sai_src_fracmux __initdata =
+ MUX(MCLK_I2S2_2CH_SAI_SRC_PRE, "mclk_i2s2_2ch_sai_src_pre", mclk_i2s2_2ch_sai_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(28), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch mclk_i2s3_8ch_sai_src_fracmux __initdata =
+ MUX(MCLK_I2S3_8CH_SAI_SRC_PRE, "mclk_i2s3_8ch_sai_src_pre", mclk_i2s3_8ch_sai_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(24), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch mclk_spdif_src_fracmux __initdata =
+ MUX(MCLK_SDPDIF_SRC_PRE, "mclk_spdif_src_pre", mclk_spdif_src_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(32), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3528_clk_branches[] __initdata = {
+ /* top */
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
+ COMPOSITE(CLK_MATRIX_250M_SRC, "clk_250m_src", mux_gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(1), 15, 1, MFLAGS, 10, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE(CLK_MATRIX_500M_SRC, "clk_500m_src", mux_gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(3), 11, 1, MFLAGS, 6, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_50M_SRC, "clk_50m_src", "cpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(0), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_100M_SRC, "clk_100m_src", "cpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(0), 7, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_150M_SRC, "clk_150m_src", "gpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(1), 0, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 3, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_200M_SRC, "clk_200m_src", "gpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(1), 5, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_300M_SRC, "clk_300m_src", "gpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(2), 0, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 6, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(CLK_MATRIX_339M_SRC, "clk_339m_src", "gpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(2), 5, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_400M_SRC, "clk_400m_src", "gpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(2), 10, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MATRIX_600M_SRC, "clk_600m_src", "gpll", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(4), 0, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 11, GFLAGS),
+ COMPOSITE(DCLK_VOP_SRC0, "dclk_vop_src0", mux_gpll_cpll_p, 0,
+ RK3528_CLKSEL_CON(32), 10, 1, MFLAGS, 2, 8, DFLAGS,
+ RK3528_CLKGATE_CON(3), 7, GFLAGS),
+ COMPOSITE(DCLK_VOP_SRC1, "dclk_vop_src1", mux_gpll_cpll_p, 0,
+ RK3528_CLKSEL_CON(33), 8, 1, MFLAGS, 0, 8, DFLAGS,
+ RK3528_CLKGATE_CON(3), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_HSM, "clk_hsm", "xin24m", 0,
+ RK3528_CLKSEL_CON(36), 5, 5, DFLAGS,
+ RK3528_CLKGATE_CON(3), 13, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART0_SRC, "clk_uart0_src", "gpll", 0,
+ RK3528_CLKSEL_CON(4), 5, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 12, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART0_FRAC, "clk_uart0_frac", "clk_uart0_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(5), 0,
+ RK3528_CLKGATE_CON(0), 13, GFLAGS,
+ &rk3528_uart0_fracmux),
+ GATE(SCLK_UART0, "sclk_uart0", "clk_uart0", 0,
+ RK3528_CLKGATE_CON(0), 14, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART1_SRC, "clk_uart1_src", "gpll", 0,
+ RK3528_CLKSEL_CON(6), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(0), 15, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART1_FRAC, "clk_uart1_frac", "clk_uart1_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(7), 0,
+ RK3528_CLKGATE_CON(1), 0, GFLAGS,
+ &rk3528_uart1_fracmux),
+ GATE(SCLK_UART1, "sclk_uart1", "clk_uart1", 0,
+ RK3528_CLKGATE_CON(1), 1, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART2_SRC, "clk_uart2_src", "gpll", 0,
+ RK3528_CLKSEL_CON(8), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(1), 2, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART2_FRAC, "clk_uart2_frac", "clk_uart2_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(9), 0,
+ RK3528_CLKGATE_CON(1), 3, GFLAGS,
+ &rk3528_uart2_fracmux),
+ GATE(SCLK_UART2, "sclk_uart2", "clk_uart2", 0,
+ RK3528_CLKGATE_CON(1), 4, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART3_SRC, "clk_uart3_src", "gpll", 0,
+ RK3528_CLKSEL_CON(10), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(1), 5, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART3_FRAC, "clk_uart3_frac", "clk_uart3_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(11), 0,
+ RK3528_CLKGATE_CON(1), 6, GFLAGS,
+ &rk3528_uart3_fracmux),
+ GATE(SCLK_UART3, "sclk_uart3", "clk_uart3", 0,
+ RK3528_CLKGATE_CON(1), 7, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART4_SRC, "clk_uart4_src", "gpll", 0,
+ RK3528_CLKSEL_CON(12), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(1), 8, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART4_FRAC, "clk_uart4_frac", "clk_uart4_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(13), 0,
+ RK3528_CLKGATE_CON(1), 9, GFLAGS,
+ &rk3528_uart4_fracmux),
+ GATE(SCLK_UART4, "sclk_uart4", "clk_uart4", 0,
+ RK3528_CLKGATE_CON(1), 10, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART5_SRC, "clk_uart5_src", "gpll", 0,
+ RK3528_CLKSEL_CON(14), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(1), 11, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART5_FRAC, "clk_uart5_frac", "clk_uart5_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(15), 0,
+ RK3528_CLKGATE_CON(1), 12, GFLAGS,
+ &rk3528_uart5_fracmux),
+ GATE(SCLK_UART5, "sclk_uart5", "clk_uart5", 0,
+ RK3528_CLKGATE_CON(1), 13, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART6_SRC, "clk_uart6_src", "gpll", 0,
+ RK3528_CLKSEL_CON(16), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(1), 14, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART6_FRAC, "clk_uart6_frac", "clk_uart6_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(17), 0,
+ RK3528_CLKGATE_CON(1), 15, GFLAGS,
+ &rk3528_uart6_fracmux),
+ GATE(SCLK_UART6, "sclk_uart6", "clk_uart6", 0,
+ RK3528_CLKGATE_CON(2), 0, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_UART7_SRC, "clk_uart7_src", "gpll", 0,
+ RK3528_CLKSEL_CON(18), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(2), 1, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART7_FRAC, "clk_uart7_frac", "clk_uart7_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(19), 0,
+ RK3528_CLKGATE_CON(2), 2, GFLAGS,
+ &rk3528_uart7_fracmux),
+ GATE(SCLK_UART7, "sclk_uart7", "clk_uart7", 0,
+ RK3528_CLKGATE_CON(2), 3, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_I2S0_2CH_SRC, "clk_i2s0_2ch_src", "gpll", 0,
+ RK3528_CLKSEL_CON(20), 8, 5, DFLAGS,
+ RK3528_CLKGATE_CON(2), 5, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_I2S0_2CH_FRAC, "clk_i2s0_2ch_frac", "clk_i2s0_2ch_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(21), 0,
+ RK3528_CLKGATE_CON(2), 6, GFLAGS,
+ &mclk_i2s0_2ch_sai_src_fracmux),
+ GATE(MCLK_I2S0_2CH_SAI_SRC, "mclk_i2s0_2ch_sai_src", "mclk_i2s0_2ch_sai_src_pre", 0,
+ RK3528_CLKGATE_CON(2), 7, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_I2S1_8CH_SRC, "clk_i2s1_8ch_src", "gpll", 0,
+ RK3528_CLKSEL_CON(24), 3, 5, DFLAGS,
+ RK3528_CLKGATE_CON(2), 11, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_I2S1_8CH_FRAC, "clk_i2s1_8ch_frac", "clk_i2s1_8ch_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(25), 0,
+ RK3528_CLKGATE_CON(2), 12, GFLAGS,
+ &mclk_i2s1_8ch_sai_src_fracmux),
+ GATE(MCLK_I2S1_8CH_SAI_SRC, "mclk_i2s1_8ch_sai_src", "mclk_i2s1_8ch_sai_src_pre", 0,
+ RK3528_CLKGATE_CON(2), 13, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_I2S2_2CH_SRC, "clk_i2s2_2ch_src", "gpll", 0,
+ RK3528_CLKSEL_CON(26), 3, 5, DFLAGS,
+ RK3528_CLKGATE_CON(2), 14, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_I2S2_2CH_FRAC, "clk_i2s2_2ch_frac", "clk_i2s2_2ch_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(27), 0,
+ RK3528_CLKGATE_CON(2), 15, GFLAGS,
+ &mclk_i2s2_2ch_sai_src_fracmux),
+ GATE(MCLK_I2S2_2CH_SAI_SRC, "mclk_i2s2_2ch_sai_src", "mclk_i2s2_2ch_sai_src_pre", 0,
+ RK3528_CLKGATE_CON(3), 0, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_I2S3_8CH_SRC, "clk_i2s3_8ch_src", "gpll", 0,
+ RK3528_CLKSEL_CON(22), 3, 5, DFLAGS,
+ RK3528_CLKGATE_CON(2), 8, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_I2S3_8CH_FRAC, "clk_i2s3_8ch_frac", "clk_i2s3_8ch_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(23), 0,
+ RK3528_CLKGATE_CON(2), 9, GFLAGS,
+ &mclk_i2s3_8ch_sai_src_fracmux),
+ GATE(MCLK_I2S3_8CH_SAI_SRC, "mclk_i2s3_8ch_sai_src", "mclk_i2s3_8ch_sai_src_pre", 0,
+ RK3528_CLKGATE_CON(2), 10, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_SPDIF_SRC, "clk_spdif_src", "gpll", 0,
+ RK3528_CLKSEL_CON(30), 2, 5, DFLAGS,
+ RK3528_CLKGATE_CON(3), 4, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_SPDIF_FRAC, "clk_spdif_frac", "clk_spdif_src", CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(31), 0,
+ RK3528_CLKGATE_CON(3), 5, GFLAGS,
+ &mclk_spdif_src_fracmux),
+ GATE(MCLK_SPDIF_SRC, "mclk_spdif_src", "mclk_spdif_src_pre", 0,
+ RK3528_CLKGATE_CON(3), 6, GFLAGS),
+
+ /* bus */
+ COMPOSITE_NODIV(ACLK_BUS_M_ROOT, "aclk_bus_m_root", mux_300m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(43), 12, 2, MFLAGS,
+ RK3528_CLKGATE_CON(8), 7, GFLAGS),
+ GATE(ACLK_GIC, "aclk_gic", "aclk_bus_m_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(9), 1, GFLAGS),
+
+ COMPOSITE_NODIV(ACLK_BUS_ROOT, "aclk_bus_root", mux_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(43), 6, 2, MFLAGS,
+ RK3528_CLKGATE_CON(8), 4, GFLAGS),
+ GATE(ACLK_SPINLOCK, "aclk_spinlock", "aclk_bus_root", 0,
+ RK3528_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(ACLK_DMAC, "aclk_dmac", "aclk_bus_root", 0,
+ RK3528_CLKGATE_CON(9), 4, GFLAGS),
+ GATE(ACLK_DCF, "aclk_dcf", "aclk_bus_root", 0,
+ RK3528_CLKGATE_CON(11), 11, GFLAGS),
+ COMPOSITE(ACLK_BUS_VOPGL_ROOT, "aclk_bus_vopgl_root", mux_gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(43), 3, 1, MFLAGS, 0, 3, DFLAGS,
+ RK3528_CLKGATE_CON(8), 0, GFLAGS),
+ COMPOSITE_NODIV(ACLK_BUS_H_ROOT, "aclk_bus_h_root", mux_500m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(43), 4, 2, MFLAGS,
+ RK3528_CLKGATE_CON(8), 2, GFLAGS),
+ GATE(ACLK_DMA2DDR, "aclk_dma2ddr", "aclk_bus_h_root", 0,
+ RK3528_CLKGATE_CON(10), 14, GFLAGS),
+
+ COMPOSITE_NODIV(HCLK_BUS_ROOT, "hclk_bus_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(43), 8, 2, MFLAGS,
+ RK3528_CLKGATE_CON(8), 5, GFLAGS),
+
+ COMPOSITE_NODIV(PCLK_BUS_ROOT, "pclk_bus_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(43), 10, 2, MFLAGS,
+ RK3528_CLKGATE_CON(8), 6, GFLAGS),
+ GATE(PCLK_DFT2APB, "pclk_dft2apb", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(8), 13, GFLAGS),
+ GATE(PCLK_BUS_GRF, "pclk_bus_grf", "pclk_bus_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(8), 15, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(9), 5, GFLAGS),
+ GATE(PCLK_JDBCK_DAP, "pclk_jdbck_dap", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(9), 12, GFLAGS),
+ GATE(PCLK_WDT_NS, "pclk_wdt_ns", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(9), 15, GFLAGS),
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(10), 7, GFLAGS),
+ GATE(PCLK_PWM0, "pclk_pwm0", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(11), 4, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(11), 7, GFLAGS),
+ GATE(PCLK_DMA2DDR, "pclk_dma2ddr", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(10), 13, GFLAGS),
+ GATE(PCLK_SCR, "pclk_scr", "pclk_bus_root", 0,
+ RK3528_CLKGATE_CON(11), 10, GFLAGS),
+ GATE(PCLK_INTMUX, "pclk_intmux", "pclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3528_CLKGATE_CON(11), 12, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_PWM0, "clk_pwm0", mux_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(44), 6, 2, MFLAGS,
+ RK3528_CLKGATE_CON(11), 5, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM1, "clk_pwm1", mux_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(44), 8, 2, MFLAGS,
+ RK3528_CLKGATE_CON(11), 8, GFLAGS),
+
+ GATE(CLK_CAPTURE_PWM1, "clk_capture_pwm1", "xin24m", 0,
+ RK3528_CLKGATE_CON(11), 9, GFLAGS),
+ GATE(CLK_CAPTURE_PWM0, "clk_capture_pwm0", "xin24m", 0,
+ RK3528_CLKGATE_CON(11), 6, GFLAGS),
+ GATE(CLK_JDBCK_DAP, "clk_jdbck_dap", "xin24m", 0,
+ RK3528_CLKGATE_CON(9), 13, GFLAGS),
+ GATE(TCLK_WDT_NS, "tclk_wdt_ns", "xin24m", 0,
+ RK3528_CLKGATE_CON(10), 0, GFLAGS),
+
+ GATE(CLK_TIMER_ROOT, "clk_timer_root", "xin24m", 0,
+ RK3528_CLKGATE_CON(8), 9, GFLAGS),
+ GATE(CLK_TIMER0, "clk_timer0", "clk_timer_root", 0,
+ RK3528_CLKGATE_CON(9), 6, GFLAGS),
+ GATE(CLK_TIMER1, "clk_timer1", "clk_timer_root", 0,
+ RK3528_CLKGATE_CON(9), 7, GFLAGS),
+ GATE(CLK_TIMER2, "clk_timer2", "clk_timer_root", 0,
+ RK3528_CLKGATE_CON(9), 8, GFLAGS),
+ GATE(CLK_TIMER3, "clk_timer3", "clk_timer_root", 0,
+ RK3528_CLKGATE_CON(9), 9, GFLAGS),
+ GATE(CLK_TIMER4, "clk_timer4", "clk_timer_root", 0,
+ RK3528_CLKGATE_CON(9), 10, GFLAGS),
+ GATE(CLK_TIMER5, "clk_timer5", "clk_timer_root", 0,
+ RK3528_CLKGATE_CON(9), 11, GFLAGS),
+
+ /* pmu */
+ GATE(HCLK_PMU_ROOT, "hclk_pmu_root", "clk_100m_src", CLK_IGNORE_UNUSED,
+ RK3528_PMU_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(PCLK_PMU_ROOT, "pclk_pmu_root", "clk_100m_src", CLK_IGNORE_UNUSED,
+ RK3528_PMU_CLKGATE_CON(0), 0, GFLAGS),
+
+ GATE(FCLK_MCU, "fclk_mcu", "hclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(HCLK_PMU_SRAM, "hclk_pmu_sram", "hclk_pmu_root", CLK_IS_CRITICAL,
+ RK3528_PMU_CLKGATE_CON(5), 4, GFLAGS),
+
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_PMU_HP_TIMER, "pclk_pmu_hp_timer", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_PMU_IOC, "pclk_pmu_ioc", "pclk_pmu_root", CLK_IS_CRITICAL,
+ RK3528_PMU_CLKGATE_CON(1), 5, GFLAGS),
+ GATE(PCLK_PMU_CRU, "pclk_pmu_cru", "pclk_pmu_root", CLK_IS_CRITICAL,
+ RK3528_PMU_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(PCLK_PMU_GRF, "pclk_pmu_grf", "pclk_pmu_root", CLK_IS_CRITICAL,
+ RK3528_PMU_CLKGATE_CON(1), 7, GFLAGS),
+ GATE(PCLK_PMU_WDT, "pclk_pmu_wdt", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(1), 10, GFLAGS),
+ GATE(PCLK_PMU, "pclk_pmu", "pclk_pmu_root", CLK_IS_CRITICAL,
+ RK3528_PMU_CLKGATE_CON(0), 13, GFLAGS),
+ GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(0), 14, GFLAGS),
+ GATE(PCLK_OSCCHK, "pclk_oscchk", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(PCLK_PMU_MAILBOX, "pclk_pmu_mailbox", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(1), 12, GFLAGS),
+ GATE(PCLK_SCRKEYGEN, "pclk_scrkeygen", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(1), 15, GFLAGS),
+ GATE(PCLK_PVTM_PMU, "pclk_pvtm_pmu", "pclk_pmu_root", 0,
+ RK3528_PMU_CLKGATE_CON(5), 1, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_I2C2, "clk_i2c2", clk_i2c2_p, 0,
+ RK3528_PMU_CLKSEL_CON(0), 0, 2, MFLAGS,
+ RK3528_PMU_CLKGATE_CON(0), 3, GFLAGS),
+
+ GATE(CLK_REFOUT, "clk_refout", "xin24m", 0,
+ RK3528_PMU_CLKGATE_CON(2), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PVTM_PMU, "clk_pvtm_pmu", "xin24m", 0,
+ RK3528_PMU_CLKSEL_CON(5), 0, 5, DFLAGS,
+ RK3528_PMU_CLKGATE_CON(5), 0, GFLAGS),
+
+ COMPOSITE_FRAC(XIN_OSC0_DIV, "xin_osc0_div", "xin24m", 0,
+ RK3528_PMU_CLKSEL_CON(1), 0,
+ RK3528_PMU_CLKGATE_CON(1), 0, GFLAGS),
+ /* clk_32k: internal! No path from external osc 32k */
+ MUX(CLK_DEEPSLOW, "clk_32k", clk_32k_p, CLK_IS_CRITICAL,
+ RK3528_PMU_CLKSEL_CON(2), 0, 1, MFLAGS),
+ GATE(RTC_CLK_MCU, "rtc_clk_mcu", "clk_32k", 0,
+ RK3528_PMU_CLKGATE_CON(0), 8, GFLAGS),
+ GATE(CLK_DDR_FAIL_SAFE, "clk_ddr_fail_safe", "xin24m", CLK_IGNORE_UNUSED,
+ RK3528_PMU_CLKGATE_CON(1), 1, GFLAGS),
+
+ COMPOSITE_NODIV(DBCLK_GPIO0, "dbclk_gpio0", mux_24m_32k_p, 0,
+ RK3528_PMU_CLKSEL_CON(0), 2, 1, MFLAGS,
+ RK3528_PMU_CLKGATE_CON(0), 15, GFLAGS),
+ COMPOSITE_NODIV(TCLK_PMU_WDT, "tclk_pmu_wdt", mux_24m_32k_p, 0,
+ RK3528_PMU_CLKSEL_CON(2), 1, 1, MFLAGS,
+ RK3528_PMU_CLKGATE_CON(1), 11, GFLAGS),
+
+ /* core */
+ COMPOSITE_NOMUX(ACLK_M_CORE_BIU, "aclk_m_core", "armclk", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(39), 11, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3528_CLKGATE_CON(5), 12, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_DBG, "pclk_dbg", "armclk", CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(40), 1, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3528_CLKGATE_CON(5), 13, GFLAGS),
+ GATE(PCLK_CPU_ROOT, "pclk_cpu_root", "pclk_dbg", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(6), 1, GFLAGS),
+ GATE(PCLK_CORE_GRF, "pclk_core_grf", "pclk_cpu_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(6), 2, GFLAGS),
+
+ /* ddr */
+ GATE(CLK_DDRC_SRC, "clk_ddrc_src", "dpll", CLK_IS_CRITICAL,
+ RK3528_DDRPHY_CLKGATE_CON(0), 0, GFLAGS),
+ GATE(CLK_DDR_PHY, "clk_ddr_phy", "dpll", CLK_IS_CRITICAL,
+ RK3528_DDRPHY_CLKGATE_CON(0), 1, GFLAGS),
+
+ COMPOSITE_NODIV(PCLK_DDR_ROOT, "pclk_ddr_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(90), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(45), 0, GFLAGS),
+ GATE(PCLK_DDRMON, "pclk_ddrmon", "pclk_ddr_root", CLK_IGNORE_UNUSED,
+ RK3528_CLKGATE_CON(45), 3, GFLAGS),
+ GATE(PCLK_DDR_HWLP, "pclk_ddr_hwlp", "pclk_ddr_root", CLK_IGNORE_UNUSED,
+ RK3528_CLKGATE_CON(45), 8, GFLAGS),
+ GATE(CLK_TIMER_DDRMON, "clk_timer_ddrmon", "xin24m", CLK_IGNORE_UNUSED,
+ RK3528_CLKGATE_CON(45), 4, GFLAGS),
+
+ GATE(PCLK_DDRC, "pclk_ddrc", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 2, GFLAGS),
+ GATE(PCLK_DDR_GRF, "pclk_ddr_grf", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 6, GFLAGS),
+ GATE(PCLK_DDRPHY, "pclk_ddrphy", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 9, GFLAGS),
+
+ GATE(ACLK_DDR_UPCTL, "aclk_ddr_upctl", "clk_ddrc_src", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 11, GFLAGS),
+ GATE(CLK_DDR_UPCTL, "clk_ddr_upctl", "clk_ddrc_src", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 12, GFLAGS),
+ GATE(CLK_DDRMON, "clk_ddrmon", "clk_ddrc_src", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 13, GFLAGS),
+ GATE(ACLK_DDR_SCRAMBLE, "aclk_ddr_scramble", "clk_ddrc_src", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 14, GFLAGS),
+ GATE(ACLK_SPLIT, "aclk_split", "clk_ddrc_src", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(45), 15, GFLAGS),
+
+ /* gpu */
+ COMPOSITE_NODIV(ACLK_GPU_ROOT, "aclk_gpu_root", mux_500m_300m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(76), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(34), 0, GFLAGS),
+ COMPOSITE_NODIV(ACLK_GPU, "aclk_gpu", aclk_gpu_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(76), 6, 1, MFLAGS,
+ RK3528_CLKGATE_CON(34), 7, GFLAGS),
+ GATE(ACLK_GPU_MALI, "aclk_gpu_mali", "aclk_gpu", 0,
+ RK3528_CLKGATE_CON(34), 8, GFLAGS),
+ COMPOSITE_NODIV(PCLK_GPU_ROOT, "pclk_gpu_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(76), 4, 2, MFLAGS,
+ RK3528_CLKGATE_CON(34), 2, GFLAGS),
+
+ /* rkvdec */
+ COMPOSITE_NODIV(ACLK_RKVDEC_ROOT_NDFT, "aclk_rkvdec_root", mux_339m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(88), 6, 2, MFLAGS,
+ RK3528_CLKGATE_CON(44), 3, GFLAGS),
+ COMPOSITE_NODIV(HCLK_RKVDEC_ROOT, "hclk_rkvdec_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(88), 4, 2, MFLAGS,
+ RK3528_CLKGATE_CON(44), 2, GFLAGS),
+ GATE(PCLK_DDRPHY_CRU, "pclk_ddrphy_cru", "hclk_rkvdec_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(44), 4, GFLAGS),
+ GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_rkvdec_root", 0,
+ RK3528_CLKGATE_CON(44), 9, GFLAGS),
+ COMPOSITE_NODIV(CLK_HEVC_CA_RKVDEC, "clk_hevc_ca_rkvdec", mux_600m_300m_200m_24m_p, 0,
+ RK3528_CLKSEL_CON(88), 11, 2, MFLAGS,
+ RK3528_CLKGATE_CON(44), 11, GFLAGS),
+ MUX(ACLK_RKVDEC_PVTMUX_ROOT, "aclk_rkvdec_pvtmux_root", aclk_rkvdec_pvtmux_root_p, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(88), 13, 1, MFLAGS),
+ GATE(ACLK_RKVDEC, "aclk_rkvdec", "aclk_rkvdec_pvtmux_root", 0,
+ RK3528_CLKGATE_CON(44), 8, GFLAGS),
+
+ /* rkvenc */
+ COMPOSITE_NODIV(ACLK_RKVENC_ROOT, "aclk_rkvenc_root", mux_300m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(79), 2, 2, MFLAGS,
+ RK3528_CLKGATE_CON(36), 1, GFLAGS),
+ GATE(ACLK_RKVENC, "aclk_rkvenc", "aclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(36), 7, GFLAGS),
+
+ COMPOSITE_NODIV(PCLK_RKVENC_ROOT, "pclk_rkvenc_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(79), 4, 2, MFLAGS,
+ RK3528_CLKGATE_CON(36), 2, GFLAGS),
+ GATE(PCLK_RKVENC_IOC, "pclk_rkvenc_ioc", "pclk_rkvenc_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(37), 10, GFLAGS),
+ GATE(PCLK_RKVENC_GRF, "pclk_rkvenc_grf", "pclk_rkvenc_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(38), 6, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(36), 11, GFLAGS),
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(36), 13, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(37), 2, GFLAGS),
+ GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(37), 8, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(38), 2, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(38), 4, GFLAGS),
+ GATE(PCLK_CAN0, "pclk_can0", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(38), 7, GFLAGS),
+ GATE(PCLK_CAN1, "pclk_can1", "pclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(38), 9, GFLAGS),
+
+ COMPOSITE_NODIV(MCLK_PDM, "mclk_pdm", mux_150m_100m_24m_p, 0,
+ RK3528_CLKSEL_CON(80), 12, 2, MFLAGS,
+ RK3528_CLKGATE_CON(38), 1, GFLAGS),
+ COMPOSITE(CLK_CAN0, "clk_can0", mux_gpll_cpll_p, 0,
+ RK3528_CLKSEL_CON(81), 6, 1, MFLAGS, 0, 6, DFLAGS,
+ RK3528_CLKGATE_CON(38), 8, GFLAGS),
+ COMPOSITE(CLK_CAN1, "clk_can1", mux_gpll_cpll_p, 0,
+ RK3528_CLKSEL_CON(81), 13, 1, MFLAGS, 7, 6, DFLAGS,
+ RK3528_CLKGATE_CON(38), 10, GFLAGS),
+
+ COMPOSITE_NODIV(HCLK_RKVENC_ROOT, "hclk_rkvenc_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(79), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(36), 0, GFLAGS),
+ GATE(HCLK_SAI_I2S1, "hclk_sai_i2s1", "hclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(36), 9, GFLAGS),
+ GATE(HCLK_SPDIF, "hclk_spdif", "hclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(37), 14, GFLAGS),
+ GATE(HCLK_PDM, "hclk_pdm", "hclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(38), 0, GFLAGS),
+ GATE(HCLK_RKVENC, "hclk_rkvenc", "hclk_rkvenc_root", 0,
+ RK3528_CLKGATE_CON(36), 6, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_CORE_RKVENC, "clk_core_rkvenc", mux_300m_200m_100m_24m_p, 0,
+ RK3528_CLKSEL_CON(79), 6, 2, MFLAGS,
+ RK3528_CLKGATE_CON(36), 8, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C0, "clk_i2c0", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(79), 11, 2, MFLAGS,
+ RK3528_CLKGATE_CON(36), 14, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C1, "clk_i2c1", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(79), 9, 2, MFLAGS,
+ RK3528_CLKGATE_CON(36), 12, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_SPI0, "clk_spi0", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(79), 13, 2, MFLAGS,
+ RK3528_CLKGATE_CON(37), 3, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI_I2S1, "mclk_sai_i2s1", mclk_sai_i2s1_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(79), 8, 1, MFLAGS,
+ RK3528_CLKGATE_CON(36), 10, GFLAGS),
+ GATE(DBCLK_GPIO4, "dbclk_gpio4", "xin24m", 0,
+ RK3528_CLKGATE_CON(37), 9, GFLAGS),
+
+ /* vo */
+ COMPOSITE_NODIV(HCLK_VO_ROOT, "hclk_vo_root", mux_150m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(83), 2, 2, MFLAGS,
+ RK3528_CLKGATE_CON(39), 1, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(40), 2, GFLAGS),
+ GATE(HCLK_USBHOST, "hclk_usbhost", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(43), 3, GFLAGS),
+ GATE(HCLK_JPEG_DECODER, "hclk_jpeg_decoder", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(41), 7, GFLAGS),
+ GATE(HCLK_VDPP, "hclk_vdpp", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(39), 10, GFLAGS),
+ GATE(HCLK_CVBS, "hclk_cvbs", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(41), 3, GFLAGS),
+ GATE(HCLK_USBHOST_ARB, "hclk_usbhost_arb", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(43), 4, GFLAGS),
+ GATE(HCLK_SAI_I2S3, "hclk_sai_i2s3", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(42), 1, GFLAGS),
+ GATE(HCLK_HDCP, "hclk_hdcp", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(41), 1, GFLAGS),
+ GATE(HCLK_RGA2E, "hclk_rga2e", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(39), 7, GFLAGS),
+ GATE(HCLK_SDMMC0, "hclk_sdmmc0", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(42), 9, GFLAGS),
+ GATE(HCLK_HDCP_KEY, "hclk_hdcp_key", "hclk_vo_root", 0,
+ RK3528_CLKGATE_CON(40), 15, GFLAGS),
+
+ COMPOSITE_NODIV(ACLK_VO_L_ROOT, "aclk_vo_l_root", mux_150m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(84), 1, 2, MFLAGS,
+ RK3528_CLKGATE_CON(41), 8, GFLAGS),
+ GATE(ACLK_MAC_VO, "aclk_gmac0", "aclk_vo_l_root", 0,
+ RK3528_CLKGATE_CON(41), 10, GFLAGS),
+
+ COMPOSITE_NODIV(PCLK_VO_ROOT, "pclk_vo_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(83), 4, 2, MFLAGS,
+ RK3528_CLKGATE_CON(39), 2, GFLAGS),
+ GATE(PCLK_MAC_VO, "pclk_gmac0", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(41), 11, GFLAGS),
+ GATE(PCLK_VCDCPHY, "pclk_vcdcphy", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(42), 4, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(42), 5, GFLAGS),
+ GATE(PCLK_VO_IOC, "pclk_vo_ioc", "pclk_vo_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(42), 7, GFLAGS),
+ GATE(PCLK_OTPC_NS, "pclk_otpc_ns", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(42), 11, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(43), 7, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(43), 9, GFLAGS),
+ GATE(PCLK_I2C7, "pclk_i2c7", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(43), 11, GFLAGS),
+
+ GATE(PCLK_USBPHY, "pclk_usbphy", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(43), 13, GFLAGS),
+
+ GATE(PCLK_VO_GRF, "pclk_vo_grf", "pclk_vo_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(39), 13, GFLAGS),
+ GATE(PCLK_CRU, "pclk_cru", "pclk_vo_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(39), 15, GFLAGS),
+ GATE(PCLK_HDMI, "pclk_hdmi", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(40), 6, GFLAGS),
+ GATE(PCLK_HDMIPHY, "pclk_hdmiphy", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(40), 14, GFLAGS),
+ GATE(PCLK_HDCP, "pclk_hdcp", "pclk_vo_root", 0,
+ RK3528_CLKGATE_CON(41), 2, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_CORE_VDPP, "clk_core_vdpp", mux_339m_200m_100m_24m_p, 0,
+ RK3528_CLKSEL_CON(83), 10, 2, MFLAGS,
+ RK3528_CLKGATE_CON(39), 12, GFLAGS),
+ COMPOSITE_NODIV(CLK_CORE_RGA2E, "clk_core_rga2e", mux_339m_200m_100m_24m_p, 0,
+ RK3528_CLKSEL_CON(83), 8, 2, MFLAGS,
+ RK3528_CLKGATE_CON(39), 9, GFLAGS),
+ COMPOSITE_NODIV(ACLK_JPEG_ROOT, "aclk_jpeg_root", mux_339m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(84), 9, 2, MFLAGS,
+ RK3528_CLKGATE_CON(41), 15, GFLAGS),
+ GATE(ACLK_JPEG_DECODER, "aclk_jpeg_decoder", "aclk_jpeg_root", 0,
+ RK3528_CLKGATE_CON(41), 6, GFLAGS),
+
+ COMPOSITE_NODIV(ACLK_VO_ROOT, "aclk_vo_root", mux_339m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(83), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(39), 0, GFLAGS),
+ GATE(ACLK_RGA2E, "aclk_rga2e", "aclk_vo_root", 0,
+ RK3528_CLKGATE_CON(39), 8, GFLAGS),
+ GATE(ACLK_VDPP, "aclk_vdpp", "aclk_vo_root", 0,
+ RK3528_CLKGATE_CON(39), 11, GFLAGS),
+ GATE(ACLK_HDCP, "aclk_hdcp", "aclk_vo_root", 0,
+ RK3528_CLKGATE_CON(41), 0, GFLAGS),
+
+ COMPOSITE(CCLK_SRC_SDMMC0, "cclk_src_sdmmc0", mux_gpll_cpll_xin24m_p, 0,
+ RK3528_CLKSEL_CON(85), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3528_CLKGATE_CON(42), 8, GFLAGS),
+
+ COMPOSITE(ACLK_VOP_ROOT, "aclk_vop_root", mux_gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(83), 15, 1, MFLAGS, 12, 3, DFLAGS,
+ RK3528_CLKGATE_CON(40), 0, GFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vop_root", 0,
+ RK3528_CLKGATE_CON(40), 5, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_I2C4, "clk_i2c4", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(85), 13, 2, MFLAGS,
+ RK3528_CLKGATE_CON(43), 10, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C7, "clk_i2c7", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(86), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(43), 12, GFLAGS),
+ GATE(DBCLK_GPIO2, "dbclk_gpio2", "xin24m", 0,
+ RK3528_CLKGATE_CON(42), 6, GFLAGS),
+
+ GATE(CLK_HDMIHDP0, "clk_hdmihdp0", "xin24m", 0,
+ RK3528_CLKGATE_CON(43), 2, GFLAGS),
+ GATE(CLK_MACPHY, "clk_macphy", "xin24m", 0,
+ RK3528_CLKGATE_CON(42), 3, GFLAGS),
+ GATE(CLK_REF_USBPHY, "clk_ref_usbphy", "xin24m", 0,
+ RK3528_CLKGATE_CON(43), 14, GFLAGS),
+ GATE(CLK_SBPI_OTPC_NS, "clk_sbpi_otpc_ns", "xin24m", 0,
+ RK3528_CLKGATE_CON(42), 12, GFLAGS),
+ FACTOR(CLK_USER_OTPC_NS, "clk_user_otpc_ns", "clk_sbpi_otpc_ns",
+ 0, 1, 2),
+
+ GATE(MCLK_SAI_I2S3, "mclk_sai_i2s3", "mclk_i2s3_8ch_sai_src", 0,
+ RK3528_CLKGATE_CON(42), 2, GFLAGS),
+ COMPOSITE_NODIV(DCLK_VOP0, "dclk_vop0", dclk_vop0_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ RK3528_CLKSEL_CON(84), 0, 1, MFLAGS,
+ RK3528_CLKGATE_CON(40), 3, GFLAGS),
+ GATE(DCLK_VOP1, "dclk_vop1", "dclk_vop_src1", CLK_SET_RATE_PARENT,
+ RK3528_CLKGATE_CON(40), 4, GFLAGS),
+ FACTOR_GATE(DCLK_CVBS, "dclk_cvbs", "dclk_vop1", 0, 1, 4,
+ RK3528_CLKGATE_CON(41), 4, GFLAGS),
+ GATE(DCLK_4X_CVBS, "dclk_4x_cvbs", "dclk_vop1", 0,
+ RK3528_CLKGATE_CON(41), 5, GFLAGS),
+
+ FACTOR_GATE(CLK_SFR_HDMI, "clk_sfr_hdmi", "dclk_vop_src1", 0, 1, 4,
+ RK3528_CLKGATE_CON(40), 7, GFLAGS),
+
+ GATE(CLK_SPDIF_HDMI, "clk_spdif_hdmi", "mclk_spdif_src", 0,
+ RK3528_CLKGATE_CON(40), 10, GFLAGS),
+ GATE(MCLK_SPDIF, "mclk_spdif", "mclk_spdif_src", 0,
+ RK3528_CLKGATE_CON(37), 15, GFLAGS),
+ GATE(CLK_CEC_HDMI, "clk_cec_hdmi", "clk_32k", 0,
+ RK3528_CLKGATE_CON(40), 8, GFLAGS),
+
+ /* vpu */
+ GATE(DBCLK_GPIO1, "dbclk_gpio1", "xin24m", 0,
+ RK3528_CLKGATE_CON(26), 5, GFLAGS),
+ GATE(DBCLK_GPIO3, "dbclk_gpio3", "xin24m", 0,
+ RK3528_CLKGATE_CON(27), 1, GFLAGS),
+ GATE(CLK_SUSPEND_USB3OTG, "clk_suspend_usb3otg", "xin24m", 0,
+ RK3528_CLKGATE_CON(33), 4, GFLAGS),
+ GATE(CLK_PCIE_AUX, "clk_pcie_aux", "xin24m", 0,
+ RK3528_CLKGATE_CON(30), 2, GFLAGS),
+ GATE(TCLK_EMMC, "tclk_emmc", "xin24m", 0,
+ RK3528_CLKGATE_CON(26), 3, GFLAGS),
+ GATE(CLK_REF_USB3OTG, "clk_ref_usb3otg", "xin24m", 0,
+ RK3528_CLKGATE_CON(33), 2, GFLAGS),
+ COMPOSITE(CCLK_SRC_SDIO0, "cclk_src_sdio0", mux_gpll_cpll_xin24m_p, 0,
+ RK3528_CLKSEL_CON(72), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3528_CLKGATE_CON(32), 1, GFLAGS),
+
+ COMPOSITE_NODIV(PCLK_VPU_ROOT, "pclk_vpu_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(61), 4, 2, MFLAGS,
+ RK3528_CLKGATE_CON(25), 5, GFLAGS),
+ GATE(PCLK_VPU_GRF, "pclk_vpu_grf", "pclk_vpu_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(25), 12, GFLAGS),
+ GATE(PCLK_CRU_PCIE, "pclk_cru_pcie", "pclk_vpu_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(25), 11, GFLAGS),
+ GATE(PCLK_UART6, "pclk_uart6", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 11, GFLAGS),
+ GATE(PCLK_CAN2, "pclk_can2", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(32), 7, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 4, GFLAGS),
+ GATE(PCLK_CAN3, "pclk_can3", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(32), 9, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 0, GFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(26), 4, GFLAGS),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(32), 11, GFLAGS),
+ GATE(PCLK_ACODEC, "pclk_acodec", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(26), 13, GFLAGS),
+ GATE(PCLK_UART7, "pclk_uart7", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 13, GFLAGS),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 9, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(32), 14, GFLAGS),
+ GATE(PCLK_PCIE, "pclk_pcie", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(30), 1, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 7, GFLAGS),
+ GATE(PCLK_VPU_IOC, "pclk_vpu_ioc", "pclk_vpu_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(26), 8, GFLAGS),
+ GATE(PCLK_PIPE_GRF, "pclk_pipe_grf", "pclk_vpu_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(30), 7, GFLAGS),
+ GATE(PCLK_I2C5, "pclk_i2c5", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(28), 1, GFLAGS),
+ GATE(PCLK_PCIE_PHY, "pclk_pcie_phy", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(30), 6, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(27), 15, GFLAGS),
+ GATE(PCLK_MAC_VPU, "pclk_gmac1", "pclk_vpu_root", CLK_IS_CRITICAL,
+ RK3528_CLKGATE_CON(28), 6, GFLAGS),
+ GATE(PCLK_I2C6, "pclk_i2c6", "pclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(28), 3, GFLAGS),
+
+ COMPOSITE_NODIV(ACLK_VPU_L_ROOT, "aclk_vpu_l_root", mux_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(60), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(25), 0, GFLAGS),
+ GATE(ACLK_EMMC, "aclk_emmc", "aclk_vpu_l_root", 0,
+ RK3528_CLKGATE_CON(26), 1, GFLAGS),
+ GATE(ACLK_MAC_VPU, "aclk_gmac1", "aclk_vpu_l_root", 0,
+ RK3528_CLKGATE_CON(28), 5, GFLAGS),
+ GATE(ACLK_PCIE, "aclk_pcie", "aclk_vpu_l_root", 0,
+ RK3528_CLKGATE_CON(30), 3, GFLAGS),
+
+ GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_vpu_l_root", 0,
+ RK3528_CLKGATE_CON(33), 1, GFLAGS),
+
+ COMPOSITE_NODIV(HCLK_VPU_ROOT, "hclk_vpu_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(61), 2, 2, MFLAGS,
+ RK3528_CLKGATE_CON(25), 4, GFLAGS),
+ GATE(HCLK_VPU, "hclk_vpu", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(25), 10, GFLAGS),
+ GATE(HCLK_SFC, "hclk_sfc", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(25), 13, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(26), 0, GFLAGS),
+ GATE(HCLK_SAI_I2S0, "hclk_sai_i2s0", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(26), 9, GFLAGS),
+ GATE(HCLK_SAI_I2S2, "hclk_sai_i2s2", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(26), 11, GFLAGS),
+
+ GATE(HCLK_PCIE_SLV, "hclk_pcie_slv", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(30), 4, GFLAGS),
+ GATE(HCLK_PCIE_DBI, "hclk_pcie_dbi", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(30), 5, GFLAGS),
+ GATE(HCLK_SDIO0, "hclk_sdio0", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(32), 2, GFLAGS),
+ GATE(HCLK_SDIO1, "hclk_sdio1", "hclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(32), 4, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_GMAC1_VPU_25M, "clk_gmac1_25m", "ppll", 0,
+ RK3528_CLKSEL_CON(60), 2, 8, DFLAGS,
+ RK3528_CLKGATE_CON(25), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PPLL_125M_MATRIX, "clk_ppll_125m_src", "ppll", 0,
+ RK3528_CLKSEL_CON(60), 10, 5, DFLAGS,
+ RK3528_CLKGATE_CON(25), 2, GFLAGS),
+
+ COMPOSITE(CLK_CAN3, "clk_can3", mux_gpll_cpll_p, 0,
+ RK3528_CLKSEL_CON(73), 13, 1, MFLAGS, 7, 6, DFLAGS,
+ RK3528_CLKGATE_CON(32), 10, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C6, "clk_i2c6", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(64), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(28), 4, GFLAGS),
+
+ COMPOSITE(SCLK_SFC, "sclk_sfc", mux_gpll_cpll_xin24m_p, 0,
+ RK3528_CLKSEL_CON(61), 12, 2, MFLAGS, 6, 6, DFLAGS,
+ RK3528_CLKGATE_CON(25), 14, GFLAGS),
+ COMPOSITE(CCLK_SRC_EMMC, "cclk_src_emmc", mux_gpll_cpll_xin24m_p, 0,
+ RK3528_CLKSEL_CON(62), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3528_CLKGATE_CON(25), 15, GFLAGS),
+
+ COMPOSITE_NODIV(ACLK_VPU_ROOT, "aclk_vpu_root",
+ mux_300m_200m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3528_CLKSEL_CON(61), 0, 2, MFLAGS,
+ RK3528_CLKGATE_CON(25), 3, GFLAGS),
+ GATE(ACLK_VPU, "aclk_vpu", "aclk_vpu_root", 0,
+ RK3528_CLKGATE_CON(25), 9, GFLAGS),
+
+ COMPOSITE_NODIV(CLK_SPI1, "clk_spi1", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(63), 10, 2, MFLAGS,
+ RK3528_CLKGATE_CON(27), 5, GFLAGS),
+ COMPOSITE(CCLK_SRC_SDIO1, "cclk_src_sdio1", mux_gpll_cpll_xin24m_p, 0,
+ RK3528_CLKSEL_CON(72), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3528_CLKGATE_CON(32), 3, GFLAGS),
+ COMPOSITE(CLK_CAN2, "clk_can2", mux_gpll_cpll_p, 0,
+ RK3528_CLKSEL_CON(73), 6, 1, MFLAGS, 0, 6, DFLAGS,
+ RK3528_CLKGATE_CON(32), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC, "clk_tsadc", "xin24m", 0,
+ RK3528_CLKSEL_CON(74), 3, 5, DFLAGS,
+ RK3528_CLKGATE_CON(32), 15, GFLAGS),
+ COMPOSITE_NOMUX(CLK_SARADC, "clk_saradc", "xin24m", 0,
+ RK3528_CLKSEL_CON(74), 0, 3, DFLAGS,
+ RK3528_CLKGATE_CON(32), 12, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC_TSEN, "clk_tsadc_tsen", "xin24m", 0,
+ RK3528_CLKSEL_CON(74), 8, 5, DFLAGS,
+ RK3528_CLKGATE_CON(33), 0, GFLAGS),
+ COMPOSITE_NODIV(BCLK_EMMC, "bclk_emmc", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(62), 8, 2, MFLAGS,
+ RK3528_CLKGATE_CON(26), 2, GFLAGS),
+ COMPOSITE_NOMUX(MCLK_ACODEC_TX, "mclk_acodec_tx", "mclk_i2s2_2ch_sai_src", 0,
+ RK3528_CLKSEL_CON(63), 0, 8, DFLAGS,
+ RK3528_CLKGATE_CON(26), 14, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C3, "clk_i2c3", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(63), 12, 2, MFLAGS,
+ RK3528_CLKGATE_CON(28), 0, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C5, "clk_i2c5", mux_200m_100m_50m_24m_p, 0,
+ RK3528_CLKSEL_CON(63), 14, 2, MFLAGS,
+ RK3528_CLKGATE_CON(28), 2, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI_I2S0, "mclk_sai_i2s0", mclk_sai_i2s0_p, CLK_SET_RATE_PARENT,
+ RK3528_CLKSEL_CON(62), 10, 1, MFLAGS,
+ RK3528_CLKGATE_CON(26), 10, GFLAGS),
+ GATE(MCLK_SAI_I2S2, "mclk_sai_i2s2", "mclk_i2s2_2ch_sai_src", 0,
+ RK3528_CLKGATE_CON(26), 12, GFLAGS),
+
+ /* pcie */
+ COMPOSITE_NOMUX(CLK_PPLL_100M_MATRIX, "clk_ppll_100m_src", "ppll", CLK_IS_CRITICAL,
+ RK3528_PCIE_CLKSEL_CON(1), 2, 5, DFLAGS,
+ RK3528_PCIE_CLKGATE_CON(0), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PPLL_50M_MATRIX, "clk_ppll_50m_src", "ppll", CLK_IS_CRITICAL,
+ RK3528_PCIE_CLKSEL_CON(1), 7, 5, DFLAGS,
+ RK3528_PCIE_CLKGATE_CON(0), 2, GFLAGS),
+ MUX(CLK_REF_PCIE_INNER_PHY, "clk_ref_pcie_inner_phy", clk_ref_pcie_inner_phy_p, 0,
+ RK3528_PCIE_CLKSEL_CON(1), 13, 1, MFLAGS),
+ FACTOR(CLK_REF_PCIE_100M_PHY, "clk_ref_pcie_100m_phy", "clk_ppll_100m_src",
+ 0, 1, 1),
+
+ /* gmac */
+ DIV(CLK_GMAC0_SRC, "clk_gmac0_src", "gmac0", 0,
+ RK3528_CLKSEL_CON(84), 3, 6, DFLAGS),
+ GATE(CLK_GMAC0_TX, "clk_gmac0_tx", "clk_gmac0_src", 0,
+ RK3528_CLKGATE_CON(41), 13, GFLAGS),
+ GATE(CLK_GMAC0_RX, "clk_gmac0_rx", "clk_gmac0_src", 0,
+ RK3528_CLKGATE_CON(41), 14, GFLAGS),
+ GATE(CLK_GMAC0_RMII_50M, "clk_gmac0_rmii_50m", "gmac0", 0,
+ RK3528_CLKGATE_CON(41), 12, GFLAGS),
+
+ FACTOR(CLK_GMAC1_RMII_VPU, "clk_gmac1_50m", "clk_ppll_50m_src",
+ 0, 1, 1),
+ FACTOR(CLK_GMAC1_SRC_VPU, "clk_gmac1_125m", "clk_ppll_125m_src",
+ 0, 1, 1),
+};
+
+static int __init clk_rk3528_probe(struct platform_device *pdev)
+{
+ struct rockchip_clk_provider *ctx;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ unsigned long nr_branches = ARRAY_SIZE(rk3528_clk_branches);
+ unsigned long nr_clks;
+ void __iomem *reg_base;
+
+ nr_clks = rockchip_clk_find_max_clk_id(rk3528_clk_branches,
+ nr_branches) + 1;
+
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg_base))
+ return dev_err_probe(dev, PTR_ERR(reg_base),
+ "could not map cru region");
+
+ ctx = rockchip_clk_init(np, reg_base, nr_clks);
+ if (IS_ERR(ctx))
+ return dev_err_probe(dev, PTR_ERR(ctx),
+ "rockchip clk init failed");
+
+ rockchip_clk_register_plls(ctx, rk3528_pll_clks,
+ ARRAY_SIZE(rk3528_pll_clks),
+ RK3528_GRF_SOC_STATUS0);
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
+ mux_armclk, ARRAY_SIZE(mux_armclk),
+ &rk3528_cpuclk_data, rk3528_cpuclk_rates,
+ ARRAY_SIZE(rk3528_cpuclk_rates));
+ rockchip_clk_register_branches(ctx, rk3528_clk_branches, nr_branches);
+
+ rk3528_rst_init(np, reg_base);
+
+ rockchip_register_restart_notifier(ctx, RK3528_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+
+ return 0;
+}
+
+static const struct of_device_id clk_rk3528_match_table[] = {
+ { .compatible = "rockchip,rk3528-cru" },
+ { /* end */ }
+};
+
+static struct platform_driver clk_rk3528_driver = {
+ .driver = {
+ .name = "clk-rk3528",
+ .of_match_table = clk_rk3528_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_rk3528_driver, clk_rk3528_probe);
diff --git a/drivers/clk/rockchip/clk-rk3562.c b/drivers/clk/rockchip/clk-rk3562.c
new file mode 100644
index 000000000000..b8858e5d5530
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3562.c
@@ -0,0 +1,1101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/rockchip,rk3562-cru.h>
+#include "clk.h"
+
+#define RK3562_GRF_SOC_STATUS0 0x430
+#define ROCKCHIP_PLL_ALLOW_POWER_DOWN BIT(2)
+
+enum rk3562_plls {
+ apll, gpll, vpll, hpll, cpll, dpll,
+};
+
+static struct rockchip_pll_rate_table rk3562_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(2208000000, 1, 92, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2184000000, 1, 91, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2160000000, 1, 90, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2088000000, 1, 87, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2064000000, 1, 86, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2040000000, 1, 85, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2016000000, 1, 84, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1992000000, 1, 83, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1920000000, 1, 80, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1896000000, 1, 79, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1800000000, 1, 75, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1704000000, 1, 71, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1600000000, 3, 200, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1584000000, 1, 132, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1560000000, 1, 130, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1536000000, 1, 128, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 126, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1488000000, 1, 124, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1464000000, 1, 122, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1440000000, 1, 120, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 118, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1400000000, 3, 350, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1392000000, 1, 116, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1368000000, 1, 114, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1344000000, 1, 112, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1320000000, 1, 110, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 108, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1272000000, 1, 106, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1248000000, 1, 104, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 100, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 1, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1104000000, 1, 92, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1100000000, 3, 275, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 3, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE(800000000, 3, 200, 2, 1, 1, 0),
+ RK3036_PLL_RATE(700000000, 3, 350, 4, 1, 1, 0),
+ RK3036_PLL_RATE(696000000, 1, 116, 4, 1, 1, 0),
+ RK3036_PLL_RATE(600000000, 1, 100, 4, 1, 1, 0),
+ RK3036_PLL_RATE(594000000, 1, 99, 4, 1, 1, 0),
+ RK3036_PLL_RATE(500000000, 1, 125, 6, 1, 1, 0),
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
+ RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
+ RK3036_PLL_RATE(148500000, 1, 99, 4, 4, 1, 0),
+ RK3036_PLL_RATE(100000000, 1, 150, 6, 6, 1, 0),
+ RK3036_PLL_RATE(96000000, 1, 96, 6, 4, 1, 0),
+ RK3036_PLL_RATE(74250000, 2, 99, 4, 4, 1, 0),
+ { /* sentinel */ },
+};
+
+PNAME(mux_pll_p) = { "xin24m" };
+PNAME(gpll_cpll_p) = { "gpll", "cpll" };
+PNAME(gpll_cpll_hpll_p) = { "gpll", "cpll", "hpll" };
+PNAME(gpll_cpll_pvtpll_dmyapll_p) = { "gpll", "cpll", "log_pvtpll", "dummy_apll" };
+PNAME(gpll_cpll_hpll_xin24m_p) = { "gpll", "cpll", "hpll", "xin24m" };
+PNAME(gpll_cpll_vpll_dmyhpll_p) = { "gpll", "cpll", "vpll", "dummy_hpll" };
+PNAME(gpll_dmyhpll_vpll_apll_p) = { "gpll", "dummy_hpll", "vpll", "apll" };
+PNAME(gpll_cpll_xin24m_p) = { "gpll", "cpll", "xin24m" };
+PNAME(gpll_cpll_xin24m_dmyapll_p) = { "gpll", "cpll", "xin24m", "dummy_apll" };
+PNAME(gpll_cpll_xin24m_dmyhpll_p) = { "gpll", "cpll", "xin24m", "dummy_hpll" };
+PNAME(vpll_dmyhpll_gpll_cpll_p) = { "vpll", "dummy_hpll", "gpll", "cpll" };
+PNAME(mux_xin24m_32k_p) = { "xin24m", "clk_rtc_32k" };
+PNAME(mux_50m_xin24m_p) = { "clk_matrix_50m_src", "xin24m" };
+PNAME(mux_100m_50m_xin24m_p) = { "clk_matrix_100m_src", "clk_matrix_50m_src", "xin24m" };
+PNAME(mux_125m_xin24m_p) = { "clk_matrix_125m_src", "xin24m" };
+PNAME(mux_200m_xin24m_32k_p) = { "clk_200m_pmu", "xin24m", "clk_rtc_32k" };
+PNAME(mux_200m_100m_p) = { "clk_matrix_200m_src", "clk_matrix_100m_src" };
+PNAME(mux_200m_100m_50m_xin24m_p) = { "clk_matrix_200m_src", "clk_matrix_100m_src", "clk_matrix_50m_src", "xin24m" };
+PNAME(clk_sai0_p) = { "clk_sai0_src", "clk_sai0_frac", "xin_osc0_half", "mclk_sai0_from_io" };
+PNAME(mclk_sai0_out2io_p) = { "mclk_sai0", "xin_osc0_half" };
+PNAME(clk_sai1_p) = { "clk_sai1_src", "clk_sai1_frac", "xin_osc0_half", "mclk_sai1_from_io" };
+PNAME(mclk_sai1_out2io_p) = { "mclk_sai1", "xin_osc0_half" };
+PNAME(clk_sai2_p) = { "clk_sai2_src", "clk_sai2_frac", "xin_osc0_half", "mclk_sai2_from_io" };
+PNAME(mclk_sai2_out2io_p) = { "mclk_sai2", "xin_osc0_half" };
+PNAME(clk_spdif_p) = { "clk_spdif_src", "clk_spdif_frac", "xin_osc0_half" };
+PNAME(clk_uart1_p) = { "clk_uart1_src", "clk_uart1_frac", "xin24m" };
+PNAME(clk_uart2_p) = { "clk_uart2_src", "clk_uart2_frac", "xin24m" };
+PNAME(clk_uart3_p) = { "clk_uart3_src", "clk_uart3_frac", "xin24m" };
+PNAME(clk_uart4_p) = { "clk_uart4_src", "clk_uart4_frac", "xin24m" };
+PNAME(clk_uart5_p) = { "clk_uart5_src", "clk_uart5_frac", "xin24m" };
+PNAME(clk_uart6_p) = { "clk_uart6_src", "clk_uart6_frac", "xin24m" };
+PNAME(clk_uart7_p) = { "clk_uart7_src", "clk_uart7_frac", "xin24m" };
+PNAME(clk_uart8_p) = { "clk_uart8_src", "clk_uart8_frac", "xin24m" };
+PNAME(clk_uart9_p) = { "clk_uart9_src", "clk_uart9_frac", "xin24m" };
+PNAME(clk_rtc32k_pmu_p) = { "clk_rtc32k_frac", "xin32k", "clk_32k_pvtm" };
+PNAME(clk_pmu1_uart0_p) = { "clk_pmu1_uart0_src", "clk_pmu1_uart0_frac", "xin24m" };
+PNAME(clk_pipephy_ref_p) = { "clk_pipephy_div", "clk_pipephy_xin24m" };
+PNAME(clk_usbphy_ref_p) = { "clk_usb2phy_xin24m", "clk_24m_sscsrc" };
+PNAME(clk_mipidsi_ref_p) = { "clk_mipidsiphy_xin24m", "clk_24m_sscsrc" };
+
+static struct rockchip_pll_clock rk3562_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3328, PLL_APLL, "apll", mux_pll_p,
+ 0, RK3562_PLL_CON(0),
+ RK3562_MODE_CON, 0, 0,
+ ROCKCHIP_PLL_ALLOW_POWER_DOWN, rk3562_pll_rates),
+ [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p,
+ 0, RK3562_PLL_CON(24),
+ RK3562_MODE_CON, 2, 3, 0, rk3562_pll_rates),
+ [vpll] = PLL(pll_rk3328, PLL_VPLL, "vpll", mux_pll_p,
+ 0, RK3562_PLL_CON(32),
+ RK3562_MODE_CON, 6, 4,
+ ROCKCHIP_PLL_ALLOW_POWER_DOWN, rk3562_pll_rates),
+ [hpll] = PLL(pll_rk3328, PLL_HPLL, "hpll", mux_pll_p,
+ 0, RK3562_PLL_CON(40),
+ RK3562_MODE_CON, 8, 5,
+ ROCKCHIP_PLL_ALLOW_POWER_DOWN, rk3562_pll_rates),
+ [cpll] = PLL(pll_rk3328, PLL_CPLL, "cpll", mux_pll_p,
+ 0, RK3562_PMU1_PLL_CON(0),
+ RK3562_PMU1_MODE_CON, 0, 2, 0, rk3562_pll_rates),
+ [dpll] = PLL(pll_rk3328, PLL_DPLL, "dpll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3562_SUBDDR_PLL_CON(0),
+ RK3562_SUBDDR_MODE_CON, 0, 1, 0, NULL),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch rk3562_clk_sai0_fracmux __initdata =
+ MUX(CLK_SAI0, "clk_sai0", clk_sai0_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(3), 6, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_sai1_fracmux __initdata =
+ MUX(CLK_SAI1, "clk_sai1", clk_sai1_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(5), 6, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_sai2_fracmux __initdata =
+ MUX(CLK_SAI2, "clk_sai2", clk_sai2_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(8), 6, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_spdif_fracmux __initdata =
+ MUX(CLK_SPDIF, "clk_spdif", clk_spdif_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(15), 6, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart1_fracmux __initdata =
+ MUX(CLK_UART1, "clk_uart1", clk_uart1_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(21), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart2_fracmux __initdata =
+ MUX(CLK_UART2, "clk_uart2", clk_uart2_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(23), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart3_fracmux __initdata =
+ MUX(CLK_UART3, "clk_uart3", clk_uart3_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(25), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart4_fracmux __initdata =
+ MUX(CLK_UART4, "clk_uart4", clk_uart4_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(27), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart5_fracmux __initdata =
+ MUX(CLK_UART5, "clk_uart5", clk_uart5_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(29), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart6_fracmux __initdata =
+ MUX(CLK_UART6, "clk_uart6", clk_uart6_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(31), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart7_fracmux __initdata =
+ MUX(CLK_UART7, "clk_uart7", clk_uart7_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(33), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart8_fracmux __initdata =
+ MUX(CLK_UART8, "clk_uart8", clk_uart8_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(35), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_uart9_fracmux __initdata =
+ MUX(CLK_UART9, "clk_uart9", clk_uart9_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(37), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_rtc32k_pmu_fracmux __initdata =
+ MUX(CLK_RTC_32K, "clk_rtc_32k", clk_rtc32k_pmu_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ RK3562_PMU0_CLKSEL_CON(1), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_pmu1_uart0_fracmux __initdata =
+ MUX(CLK_PMU1_UART0, "clk_pmu1_uart0", clk_pmu1_uart0_p, CLK_SET_RATE_PARENT,
+ RK3562_PMU1_CLKSEL_CON(2), 6, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3562_clk_branches[] __initdata = {
+ /*
+ * CRU Clock-Architecture
+ */
+ /* PD_TOP */
+ COMPOSITE(CLK_MATRIX_50M_SRC, "clk_matrix_50m_src", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(0), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE(CLK_MATRIX_100M_SRC, "clk_matrix_100m_src", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(0), 15, 1, MFLAGS, 8, 4, DFLAGS,
+ RK3562_CLKGATE_CON(0), 1, GFLAGS),
+ COMPOSITE(CLK_MATRIX_125M_SRC, "clk_matrix_125m_src", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(1), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE(CLK_MATRIX_200M_SRC, "clk_matrix_200m_src", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(2), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(0), 4, GFLAGS),
+ COMPOSITE(CLK_MATRIX_300M_SRC, "clk_matrix_300m_src", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(3), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(0), 6, GFLAGS),
+ COMPOSITE(ACLK_TOP, "aclk_top", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(5), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE(ACLK_TOP_VIO, "aclk_top_vio", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(5), 15, 1, MFLAGS, 8, 4, DFLAGS,
+ RK3562_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE(CLK_24M_SSCSRC, "clk_24m_sscsrc", vpll_dmyhpll_gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(6), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3562_CLKGATE_CON(1), 9, GFLAGS),
+ COMPOSITE(CLK_CAM0_OUT2IO, "clk_cam0_out2io", gpll_cpll_xin24m_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(8), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3562_CLKGATE_CON(1), 12, GFLAGS),
+ COMPOSITE(CLK_CAM1_OUT2IO, "clk_cam1_out2io", gpll_cpll_xin24m_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(8), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3562_CLKGATE_CON(1), 13, GFLAGS),
+ COMPOSITE(CLK_CAM2_OUT2IO, "clk_cam2_out2io", gpll_cpll_xin24m_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(9), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3562_CLKGATE_CON(1), 14, GFLAGS),
+ COMPOSITE(CLK_CAM3_OUT2IO, "clk_cam3_out2io", gpll_cpll_xin24m_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(9), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3562_CLKGATE_CON(1), 15, GFLAGS),
+ FACTOR(0, "xin_osc0_half", "xin24m", 0, 1, 2),
+
+ /* PD_BUS */
+ COMPOSITE(ACLK_BUS, "aclk_bus", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(40), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(18), 0, GFLAGS),
+ COMPOSITE(HCLK_BUS, "hclk_bus", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(40), 15, 1, MFLAGS, 8, 6, DFLAGS,
+ RK3562_CLKGATE_CON(18), 1, GFLAGS),
+ COMPOSITE(PCLK_BUS, "pclk_bus", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(41), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(18), 2, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(19), 0, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(19), 1, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(19), 2, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(19), 3, GFLAGS),
+ GATE(PCLK_I2C5, "pclk_i2c5", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(19), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C, "clk_i2c", mux_200m_100m_50m_xin24m_p, 0,
+ RK3562_CLKSEL_CON(41), 8, 2, MFLAGS,
+ RK3562_CLKGATE_CON(19), 5, GFLAGS),
+ GATE(CLK_I2C1, "clk_i2c1", "clk_i2c", 0,
+ RK3562_CLKGATE_CON(19), 6, GFLAGS),
+ GATE(CLK_I2C2, "clk_i2c2", "clk_i2c", 0,
+ RK3562_CLKGATE_CON(19), 7, GFLAGS),
+ GATE(CLK_I2C3, "clk_i2c3", "clk_i2c", 0,
+ RK3562_CLKGATE_CON(19), 8, GFLAGS),
+ GATE(CLK_I2C4, "clk_i2c4", "clk_i2c", 0,
+ RK3562_CLKGATE_CON(19), 9, GFLAGS),
+ GATE(CLK_I2C5, "clk_i2c5", "clk_i2c", 0,
+ RK3562_CLKGATE_CON(19), 10, GFLAGS),
+ COMPOSITE_NODIV(DCLK_BUS_GPIO, "dclk_bus_gpio", mux_xin24m_32k_p, 0,
+ RK3562_CLKSEL_CON(41), 15, 1, MFLAGS,
+ RK3562_CLKGATE_CON(20), 4, GFLAGS),
+ GATE(DCLK_BUS_GPIO3, "dclk_bus_gpio3", "dclk_bus_gpio", 0,
+ RK3562_CLKGATE_CON(20), 5, GFLAGS),
+ GATE(DCLK_BUS_GPIO4, "dclk_bus_gpio4", "dclk_bus_gpio", 0,
+ RK3562_CLKGATE_CON(20), 6, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(21), 0, GFLAGS),
+ GATE(CLK_TIMER0, "clk_timer0", "xin24m", 0,
+ RK3562_CLKGATE_CON(21), 1, GFLAGS),
+ GATE(CLK_TIMER1, "clk_timer1", "xin24m", 0,
+ RK3562_CLKGATE_CON(21), 2, GFLAGS),
+ GATE(CLK_TIMER2, "clk_timer2", "xin24m", 0,
+ RK3562_CLKGATE_CON(21), 3, GFLAGS),
+ GATE(CLK_TIMER3, "clk_timer3", "xin24m", 0,
+ RK3562_CLKGATE_CON(21), 4, GFLAGS),
+ GATE(CLK_TIMER4, "clk_timer4", "xin24m", 0,
+ RK3562_CLKGATE_CON(21), 5, GFLAGS),
+ GATE(CLK_TIMER5, "clk_timer5", "xin24m", 0,
+ RK3562_CLKGATE_CON(21), 6, GFLAGS),
+ GATE(PCLK_STIMER, "pclk_stimer", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(21), 7, GFLAGS),
+ GATE(CLK_STIMER0, "clk_stimer0", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(21), 8, GFLAGS),
+ GATE(CLK_STIMER1, "clk_stimer1", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(21), 9, GFLAGS),
+ GATE(PCLK_WDTNS, "pclk_wdtns", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(22), 0, GFLAGS),
+ GATE(CLK_WDTNS, "clk_wdtns", "xin24m", 0,
+ RK3562_CLKGATE_CON(22), 1, GFLAGS),
+ GATE(PCLK_GRF, "pclk_grf", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(22), 2, GFLAGS),
+ GATE(PCLK_SGRF, "pclk_sgrf", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(22), 3, GFLAGS),
+ GATE(PCLK_MAILBOX, "pclk_mailbox", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(22), 4, GFLAGS),
+ GATE(PCLK_INTC, "pclk_intc", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(22), 5, GFLAGS),
+ GATE(ACLK_BUS_GIC400, "aclk_bus_gic400", "aclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(22), 6, GFLAGS),
+ GATE(ACLK_BUS_SPINLOCK, "aclk_bus_spinlock", "aclk_bus", 0,
+ RK3562_CLKGATE_CON(23), 0, GFLAGS),
+ GATE(ACLK_DCF, "aclk_dcf", "aclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(23), 1, GFLAGS),
+ GATE(PCLK_DCF, "pclk_dcf", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(23), 2, GFLAGS),
+ GATE(FCLK_BUS_CM0_CORE, "fclk_bus_cm0_core", "hclk_bus", 0,
+ RK3562_CLKGATE_CON(23), 3, GFLAGS),
+ GATE(CLK_BUS_CM0_RTC, "clk_bus_cm0_rtc", "clk_rtc_32k", 0,
+ RK3562_CLKGATE_CON(23), 4, GFLAGS),
+ GATE(HCLK_ICACHE, "hclk_icache", "hclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(23), 8, GFLAGS),
+ GATE(HCLK_DCACHE, "hclk_dcache", "hclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(23), 9, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(24), 0, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC, "clk_tsadc", "xin24m", 0,
+ RK3562_CLKSEL_CON(43), 0, 11, DFLAGS,
+ RK3562_CLKGATE_CON(24), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC_TSEN, "clk_tsadc_tsen", "xin24m", 0,
+ RK3562_CLKSEL_CON(43), 11, 5, DFLAGS,
+ RK3562_CLKGATE_CON(24), 3, GFLAGS),
+ GATE(PCLK_DFT2APB, "pclk_dft2apb", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(24), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_SARADC_VCCIO156, "clk_saradc_vccio156", "xin24m", 0,
+ RK3562_CLKSEL_CON(44), 0, 12, DFLAGS,
+ RK3562_CLKGATE_CON(24), 9, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(25), 0, GFLAGS),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_bus", 0,
+ RK3562_CLKGATE_CON(25), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_GMAC_125M_CRU_I, "clk_gmac_125m_cru_i", mux_125m_xin24m_p, 0,
+ RK3562_CLKSEL_CON(45), 8, 1, MFLAGS,
+ RK3562_CLKGATE_CON(25), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_GMAC_50M_CRU_I, "clk_gmac_50m_cru_i", mux_50m_xin24m_p, 0,
+ RK3562_CLKSEL_CON(45), 7, 1, MFLAGS,
+ RK3562_CLKGATE_CON(25), 3, GFLAGS),
+ COMPOSITE(CLK_GMAC_ETH_OUT2IO, "clk_gmac_eth_out2io", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(46), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_CLKGATE_CON(25), 4, GFLAGS),
+ GATE(PCLK_APB2ASB_VCCIO156, "pclk_apb2asb_vccio156", "pclk_bus", CLK_IS_CRITICAL,
+ RK3562_CLKGATE_CON(25), 5, GFLAGS),
+ GATE(PCLK_TO_VCCIO156, "pclk_to_vccio156", "pclk_bus", CLK_IS_CRITICAL,
+ RK3562_CLKGATE_CON(25), 6, GFLAGS),
+ GATE(PCLK_DSIPHY, "pclk_dsiphy", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(25), 8, GFLAGS),
+ GATE(PCLK_DSITX, "pclk_dsitx", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(25), 9, GFLAGS),
+ GATE(PCLK_CPU_EMA_DET, "pclk_cpu_ema_det", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(25), 10, GFLAGS),
+ GATE(PCLK_HASH, "pclk_hash", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(25), 11, GFLAGS),
+ GATE(PCLK_TOPCRU, "pclk_topcru", "pclk_bus", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(25), 15, GFLAGS),
+ GATE(PCLK_ASB2APB_VCCIO156, "pclk_asb2apb_vccio156", "pclk_to_vccio156", CLK_IS_CRITICAL,
+ RK3562_CLKGATE_CON(26), 0, GFLAGS),
+ GATE(PCLK_IOC_VCCIO156, "pclk_ioc_vccio156", "pclk_to_vccio156", CLK_IS_CRITICAL,
+ RK3562_CLKGATE_CON(26), 1, GFLAGS),
+ GATE(PCLK_GPIO3_VCCIO156, "pclk_gpio3_vccio156", "pclk_to_vccio156", 0,
+ RK3562_CLKGATE_CON(26), 2, GFLAGS),
+ GATE(PCLK_GPIO4_VCCIO156, "pclk_gpio4_vccio156", "pclk_to_vccio156", 0,
+ RK3562_CLKGATE_CON(26), 3, GFLAGS),
+ GATE(PCLK_SARADC_VCCIO156, "pclk_saradc_vccio156", "pclk_to_vccio156", 0,
+ RK3562_CLKGATE_CON(26), 4, GFLAGS),
+ GATE(PCLK_MAC100, "pclk_mac100", "pclk_bus", 0,
+ RK3562_CLKGATE_CON(27), 0, GFLAGS),
+ GATE(ACLK_MAC100, "aclk_mac100", "aclk_bus", 0,
+ RK3562_CLKGATE_CON(27), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_MAC100_50M_MATRIX, "clk_mac100_50m_matrix", mux_50m_xin24m_p, 0,
+ RK3562_CLKSEL_CON(47), 7, 1, MFLAGS,
+ RK3562_CLKGATE_CON(27), 2, GFLAGS),
+
+ /* PD_CORE */
+ COMPOSITE_NOMUX(0, "aclk_core_pre", "scmi_clk_cpu", CLK_IGNORE_UNUSED,
+ RK3562_CLKSEL_CON(11), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3562_CLKGATE_CON(4), 3, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_dbg_pre", "scmi_clk_cpu", CLK_IGNORE_UNUSED,
+ RK3562_CLKSEL_CON(12), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3562_CLKGATE_CON(4), 5, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_CORE, "hclk_core", "gpll", CLK_IS_CRITICAL,
+ RK3562_CLKSEL_CON(13), 0, 6, DFLAGS,
+ RK3562_CLKGATE_CON(5), 2, GFLAGS),
+ GATE(0, "pclk_dbg_daplite", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
+ RK3562_CLKGATE_CON(4), 10, GFLAGS),
+
+ /* PD_DDR */
+ FACTOR_GATE(0, "clk_gpll_mux_to_ddr", "gpll", 0, 1, 4,
+ RK3328_CLKGATE_CON(1), 6, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_DDR, "pclk_ddr", "clk_gpll_mux_to_ddr", CLK_IS_CRITICAL,
+ RK3562_DDR_CLKSEL_CON(1), 8, 5, DFLAGS,
+ RK3562_DDR_CLKGATE_CON(0), 3, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MSCH_BRG_BIU, "clk_msch_brg_biu", "clk_gpll_mux_to_ddr", CLK_IS_CRITICAL,
+ RK3562_DDR_CLKSEL_CON(1), 0, 4, DFLAGS,
+ RK3562_DDR_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(PCLK_DDR_HWLP, "pclk_ddr_hwlp", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(PCLK_DDR_UPCTL, "pclk_ddr_upctl", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(PCLK_DDR_PHY, "pclk_ddr_phy", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_DDR_DFICTL, "pclk_ddr_dfictl", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(PCLK_DDR_DMA2DDR, "pclk_ddr_dma2ddr", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(0), 10, GFLAGS),
+ GATE(PCLK_DDR_MON, "pclk_ddr_mon", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(1), 0, GFLAGS),
+ GATE(TMCLK_DDR_MON, "tmclk_ddr_mon", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(PCLK_DDR_GRF, "pclk_ddr_grf", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_DDR_CRU, "pclk_ddr_cru", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(1), 3, GFLAGS),
+ GATE(PCLK_SUBDDR_CRU, "pclk_subddr_cru", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3562_DDR_CLKGATE_CON(1), 4, GFLAGS),
+
+ /* PD_GPU */
+ COMPOSITE(CLK_GPU_PRE, "clk_gpu_pre", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(18), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(8), 0, GFLAGS),
+ COMPOSITE_NOMUX(ACLK_GPU_PRE, "aclk_gpu_pre", "clk_gpu_pre", 0,
+ RK3562_CLKSEL_CON(19), 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(8), 2, GFLAGS),
+ GATE(CLK_GPU, "clk_gpu", "clk_gpu_pre", 0,
+ RK3562_CLKGATE_CON(8), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_GPU_BRG, "clk_gpu_brg", mux_200m_100m_p, 0,
+ RK3562_CLKSEL_CON(19), 15, 1, MFLAGS,
+ RK3562_CLKGATE_CON(8), 8, GFLAGS),
+
+ /* PD_NPU */
+ COMPOSITE(CLK_NPU_PRE, "clk_npu_pre", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(15), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(6), 0, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_NPU_PRE, "hclk_npu_pre", "clk_npu_pre", 0,
+ RK3562_CLKSEL_CON(16), 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(6), 1, GFLAGS),
+ GATE(ACLK_RKNN, "aclk_rknn", "clk_npu_pre", 0,
+ RK3562_CLKGATE_CON(6), 4, GFLAGS),
+ GATE(HCLK_RKNN, "hclk_rknn", "hclk_npu_pre", 0,
+ RK3562_CLKGATE_CON(6), 5, GFLAGS),
+
+ /* PD_PERI */
+ COMPOSITE(ACLK_PERI, "aclk_peri", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_PERI_CLKSEL_CON(0), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE(HCLK_PERI, "hclk_peri", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_PERI_CLKSEL_CON(0), 15, 1, MFLAGS, 8, 6, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE(PCLK_PERI, "pclk_peri", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3562_PERI_CLKSEL_CON(1), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_PERICRU, "pclk_pericru", "pclk_peri", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(HCLK_SAI0, "hclk_sai0", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE(CLK_SAI0_SRC, "clk_sai0_src", gpll_cpll_hpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(1), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(2), 1, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_SAI0_FRAC, "clk_sai0_frac", "clk_sai0_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(2), 0,
+ RK3562_PERI_CLKGATE_CON(2), 2, GFLAGS,
+ &rk3562_clk_sai0_fracmux),
+ GATE(MCLK_SAI0, "mclk_sai0", "clk_sai0", 0,
+ RK3562_PERI_CLKGATE_CON(2), 3, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI0_OUT2IO, "mclk_sai0_out2io", mclk_sai0_out2io_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(3), 5, 1, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(2), 4, GFLAGS),
+ GATE(HCLK_SAI1, "hclk_sai1", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(2), 5, GFLAGS),
+ COMPOSITE(CLK_SAI1_SRC, "clk_sai1_src", gpll_cpll_hpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(3), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(2), 6, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_SAI1_FRAC, "clk_sai1_frac", "clk_sai1_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(4), 0,
+ RK3562_PERI_CLKGATE_CON(2), 7, GFLAGS,
+ &rk3562_clk_sai1_fracmux),
+ GATE(MCLK_SAI1, "mclk_sai1", "clk_sai1", 0,
+ RK3562_PERI_CLKGATE_CON(2), 8, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI1_OUT2IO, "mclk_sai1_out2io", mclk_sai1_out2io_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(5), 5, 1, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(2), 9, GFLAGS),
+ GATE(HCLK_SAI2, "hclk_sai2", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(2), 10, GFLAGS),
+ COMPOSITE(CLK_SAI2_SRC, "clk_sai2_src", gpll_cpll_hpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(6), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(2), 11, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_SAI2_FRAC, "clk_sai2_frac", "clk_sai2_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(7), 0,
+ RK3562_PERI_CLKGATE_CON(2), 12, GFLAGS,
+ &rk3562_clk_sai2_fracmux),
+ GATE(MCLK_SAI2, "mclk_sai2", "clk_sai2", 0,
+ RK3562_PERI_CLKGATE_CON(2), 13, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI2_OUT2IO, "mclk_sai2_out2io", mclk_sai2_out2io_p, CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(8), 5, 1, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(2), 14, GFLAGS),
+ GATE(HCLK_DSM, "hclk_dsm", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(3), 1, GFLAGS),
+ GATE(CLK_DSM, "clk_dsm", "mclk_sai1", 0,
+ RK3562_PERI_CLKGATE_CON(3), 2, GFLAGS),
+ GATE(HCLK_PDM, "hclk_pdm", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(3), 4, GFLAGS),
+ COMPOSITE(MCLK_PDM, "mclk_pdm", gpll_cpll_hpll_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(12), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(3), 5, GFLAGS),
+ GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(3), 8, GFLAGS),
+ COMPOSITE(CLK_SPDIF_SRC, "clk_spdif_src", gpll_cpll_hpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(13), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(3), 9, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_SPDIF_FRAC, "clk_spdif_frac", "clk_spdif_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(14), 0,
+ RK3562_PERI_CLKGATE_CON(3), 10, GFLAGS,
+ &rk3562_clk_spdif_fracmux),
+ GATE(MCLK_SPDIF, "mclk_spdif", "clk_spdif", 0,
+ RK3562_PERI_CLKGATE_CON(3), 11, GFLAGS),
+ GATE(HCLK_SDMMC0, "hclk_sdmmc0", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(4), 0, GFLAGS),
+ COMPOSITE(CCLK_SDMMC0, "cclk_sdmmc0", gpll_cpll_xin24m_dmyhpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(16), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(4), 1, GFLAGS),
+ MMC(SCLK_SDMMC0_DRV, "sdmmc0_drv", "cclk_sdmmc0", RK3562_SDMMC0_CON0, 1),
+ MMC(SCLK_SDMMC0_SAMPLE, "sdmmc0_sample", "cclk_sdmmc0", RK3562_SDMMC0_CON1, 1),
+ GATE(HCLK_SDMMC1, "hclk_sdmmc1", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE(CCLK_SDMMC1, "cclk_sdmmc1", gpll_cpll_xin24m_dmyhpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(17), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(4), 3, GFLAGS),
+ MMC(SCLK_SDMMC1_DRV, "sdmmc1_drv", "cclk_sdmmc1", RK3562_SDMMC1_CON0, 1),
+ MMC(SCLK_SDMMC1_SAMPLE, "sdmmc1_sample", "cclk_sdmmc1", RK3562_SDMMC1_CON1, 1),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(4), 8, GFLAGS),
+ GATE(ACLK_EMMC, "aclk_emmc", "aclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(4), 9, GFLAGS),
+ COMPOSITE(CCLK_EMMC, "cclk_emmc", gpll_cpll_xin24m_dmyhpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(18), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(4), 10, GFLAGS),
+ COMPOSITE(BCLK_EMMC, "bclk_emmc", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(19), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(4), 11, GFLAGS),
+ GATE(TMCLK_EMMC, "tmclk_emmc", "xin24m", 0,
+ RK3562_PERI_CLKGATE_CON(4), 12, GFLAGS),
+ COMPOSITE(SCLK_SFC, "sclk_sfc", gpll_cpll_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(20), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(4), 13, GFLAGS),
+ GATE(HCLK_SFC, "hclk_sfc", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(4), 14, GFLAGS),
+ GATE(HCLK_USB2HOST, "hclk_usb2host", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(5), 0, GFLAGS),
+ GATE(HCLK_USB2HOST_ARB, "hclk_usb2host_arb", "hclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(5), 1, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(6), 0, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI1, "clk_spi1", mux_200m_100m_50m_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(20), 12, 2, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(6), 1, GFLAGS),
+ GATE(SCLK_IN_SPI1, "sclk_in_spi1", "sclk_in_spi1_io", 0,
+ RK3562_PERI_CLKGATE_CON(6), 2, GFLAGS),
+ GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(6), 3, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI2, "clk_spi2", mux_200m_100m_50m_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(20), 14, 2, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(6), 4, GFLAGS),
+ GATE(SCLK_IN_SPI2, "sclk_in_spi2", "sclk_in_spi2_io", 0,
+ RK3562_PERI_CLKGATE_CON(6), 5, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 0, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 1, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 2, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(PCLK_UART6, "pclk_uart6", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 5, GFLAGS),
+ GATE(PCLK_UART7, "pclk_uart7", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 6, GFLAGS),
+ GATE(PCLK_UART8, "pclk_uart8", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 7, GFLAGS),
+ GATE(PCLK_UART9, "pclk_uart9", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(7), 8, GFLAGS),
+ COMPOSITE(CLK_UART1_SRC, "clk_uart1_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(21), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(7), 9, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART1_FRAC, "clk_uart1_frac", "clk_uart1_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(22), 0,
+ RK3562_PERI_CLKGATE_CON(7), 10, GFLAGS,
+ &rk3562_clk_uart1_fracmux),
+ GATE(SCLK_UART1, "sclk_uart1", "clk_uart1", 0,
+ RK3562_PERI_CLKGATE_CON(7), 11, GFLAGS),
+ COMPOSITE(CLK_UART2_SRC, "clk_uart2_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(23), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(7), 12, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART2_FRAC, "clk_uart2_frac", "clk_uart2_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(24), 0,
+ RK3562_PERI_CLKGATE_CON(7), 13, GFLAGS,
+ &rk3562_clk_uart2_fracmux),
+ GATE(SCLK_UART2, "sclk_uart2", "clk_uart2", 0,
+ RK3562_PERI_CLKGATE_CON(7), 14, GFLAGS),
+ COMPOSITE(CLK_UART3_SRC, "clk_uart3_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(7), 15, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART3_FRAC, "clk_uart3_frac", "clk_uart3_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(26), 0,
+ RK3562_PERI_CLKGATE_CON(8), 0, GFLAGS,
+ &rk3562_clk_uart3_fracmux),
+ GATE(SCLK_UART3, "sclk_uart3", "clk_uart3", 0,
+ RK3562_PERI_CLKGATE_CON(8), 1, GFLAGS),
+ COMPOSITE(CLK_UART4_SRC, "clk_uart4_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(27), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(8), 2, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART4_FRAC, "clk_uart4_frac", "clk_uart4_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(28), 0,
+ RK3562_PERI_CLKGATE_CON(8), 3, GFLAGS,
+ &rk3562_clk_uart4_fracmux),
+ GATE(SCLK_UART4, "sclk_uart4", "clk_uart4", 0,
+ RK3562_PERI_CLKGATE_CON(8), 4, GFLAGS),
+ COMPOSITE(CLK_UART5_SRC, "clk_uart5_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(29), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(8), 5, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART5_FRAC, "clk_uart5_frac", "clk_uart5_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(30), 0,
+ RK3562_PERI_CLKGATE_CON(8), 6, GFLAGS,
+ &rk3562_clk_uart5_fracmux),
+ GATE(SCLK_UART5, "sclk_uart5", "clk_uart5", 0,
+ RK3562_PERI_CLKGATE_CON(8), 7, GFLAGS),
+ COMPOSITE(CLK_UART6_SRC, "clk_uart6_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(31), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(8), 8, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART6_FRAC, "clk_uart6_frac", "clk_uart6_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(32), 0,
+ RK3562_PERI_CLKGATE_CON(8), 9, GFLAGS,
+ &rk3562_clk_uart6_fracmux),
+ GATE(SCLK_UART6, "sclk_uart6", "clk_uart6", 0,
+ RK3562_PERI_CLKGATE_CON(8), 10, GFLAGS),
+ COMPOSITE(CLK_UART7_SRC, "clk_uart7_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(33), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(8), 11, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART7_FRAC, "clk_uart7_frac", "clk_uart7_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(34), 0,
+ RK3562_PERI_CLKGATE_CON(8), 12, GFLAGS,
+ &rk3562_clk_uart7_fracmux),
+ GATE(SCLK_UART7, "sclk_uart7", "clk_uart7", 0,
+ RK3562_PERI_CLKGATE_CON(8), 13, GFLAGS),
+ COMPOSITE(CLK_UART8_SRC, "clk_uart8_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(35), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(8), 14, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART8_FRAC, "clk_uart8_frac", "clk_uart8_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(36), 0,
+ RK3562_PERI_CLKGATE_CON(8), 15, GFLAGS,
+ &rk3562_clk_uart8_fracmux),
+ GATE(SCLK_UART8, "sclk_uart8", "clk_uart8", 0,
+ RK3562_PERI_CLKGATE_CON(9), 0, GFLAGS),
+ COMPOSITE(CLK_UART9_SRC, "clk_uart9_src", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(37), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(9), 1, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_UART9_FRAC, "clk_uart9_frac", "clk_uart9_src", CLK_SET_RATE_PARENT,
+ RK3562_PERI_CLKSEL_CON(38), 0,
+ RK3562_PERI_CLKGATE_CON(9), 2, GFLAGS,
+ &rk3562_clk_uart9_fracmux),
+ GATE(SCLK_UART9, "sclk_uart9", "clk_uart9", 0,
+ RK3562_PERI_CLKGATE_CON(9), 3, GFLAGS),
+ GATE(PCLK_PWM1_PERI, "pclk_pwm1_peri", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(10), 0, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM1_PERI, "clk_pwm1_peri", mux_100m_50m_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(40), 0, 2, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(10), 1, GFLAGS),
+ GATE(CLK_CAPTURE_PWM1_PERI, "clk_capture_pwm1_peri", "xin24m", 0,
+ RK3562_PERI_CLKGATE_CON(10), 2, GFLAGS),
+ GATE(PCLK_PWM2_PERI, "pclk_pwm2_peri", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(10), 3, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM2_PERI, "clk_pwm2_peri", mux_100m_50m_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(40), 6, 2, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(10), 4, GFLAGS),
+ GATE(CLK_CAPTURE_PWM2_PERI, "clk_capture_pwm2_peri", "xin24m", 0,
+ RK3562_PERI_CLKGATE_CON(10), 5, GFLAGS),
+ GATE(PCLK_PWM3_PERI, "pclk_pwm3_peri", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(10), 6, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM3_PERI, "clk_pwm3_peri", mux_100m_50m_xin24m_p, 0,
+ RK3562_PERI_CLKSEL_CON(40), 8, 2, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(10), 7, GFLAGS),
+ GATE(CLK_CAPTURE_PWM3_PERI, "clk_capture_pwm3_peri", "xin24m", 0,
+ RK3562_PERI_CLKGATE_CON(10), 8, GFLAGS),
+ GATE(PCLK_CAN0, "pclk_can0", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(11), 0, GFLAGS),
+ COMPOSITE(CLK_CAN0, "clk_can0", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(41), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(11), 1, GFLAGS),
+ GATE(PCLK_CAN1, "pclk_can1", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(11), 2, GFLAGS),
+ COMPOSITE(CLK_CAN1, "clk_can1", gpll_cpll_p, 0,
+ RK3562_PERI_CLKSEL_CON(41), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(11), 3, GFLAGS),
+ GATE(PCLK_PERI_WDT, "pclk_peri_wdt", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(13), 0, GFLAGS),
+ COMPOSITE_NODIV(TCLK_PERI_WDT, "tclk_peri_wdt", mux_xin24m_32k_p, 0,
+ RK3562_PERI_CLKSEL_CON(43), 15, 1, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(ACLK_SYSMEM, "aclk_sysmem", "aclk_peri", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKGATE_CON(13), 2, GFLAGS),
+ GATE(HCLK_BOOTROM, "hclk_bootrom", "hclk_peri", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKGATE_CON(13), 3, GFLAGS),
+ GATE(PCLK_PERI_GRF, "pclk_peri_grf", "pclk_peri", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKGATE_CON(13), 4, GFLAGS),
+ GATE(ACLK_DMAC, "aclk_dmac", "aclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(13), 5, GFLAGS),
+ GATE(ACLK_RKDMAC, "aclk_rkdmac", "aclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(13), 6, GFLAGS),
+ GATE(PCLK_OTPC_NS, "pclk_otpc_ns", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(14), 0, GFLAGS),
+ GATE(CLK_SBPI_OTPC_NS, "clk_sbpi_otpc_ns", "xin24m", 0,
+ RK3562_PERI_CLKGATE_CON(14), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_USER_OTPC_NS, "clk_user_otpc_ns", "xin24m", 0,
+ RK3562_PERI_CLKSEL_CON(44), 0, 8, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(14), 2, GFLAGS),
+ GATE(PCLK_OTPC_S, "pclk_otpc_s", "pclk_peri", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKGATE_CON(14), 3, GFLAGS),
+ GATE(CLK_SBPI_OTPC_S, "clk_sbpi_otpc_s", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKGATE_CON(14), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_USER_OTPC_S, "clk_user_otpc_s", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_PERI_CLKSEL_CON(44), 8, 8, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(14), 5, GFLAGS),
+ GATE(CLK_OTPC_ARB, "clk_otpc_arb", "xin24m", 0,
+ RK3562_PERI_CLKGATE_CON(14), 6, GFLAGS),
+ GATE(PCLK_OTPPHY, "pclk_otpphy", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(14), 7, GFLAGS),
+ GATE(PCLK_USB2PHY, "pclk_usb2phy", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(15), 0, GFLAGS),
+ GATE(PCLK_PIPEPHY, "pclk_pipephy", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(15), 7, GFLAGS),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(16), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_SARADC, "clk_saradc", "xin24m", 0,
+ RK3562_PERI_CLKSEL_CON(46), 0, 12, DFLAGS,
+ RK3562_PERI_CLKGATE_CON(16), 5, GFLAGS),
+ GATE(PCLK_IOC_VCCIO234, "pclk_ioc_vccio234", "pclk_peri", CLK_IS_CRITICAL,
+ RK3562_PERI_CLKGATE_CON(16), 12, GFLAGS),
+ GATE(PCLK_PERI_GPIO1, "pclk_peri_gpio1", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(17), 0, GFLAGS),
+ GATE(PCLK_PERI_GPIO2, "pclk_peri_gpio2", "pclk_peri", 0,
+ RK3562_PERI_CLKGATE_CON(17), 1, GFLAGS),
+ COMPOSITE_NODIV(DCLK_PERI_GPIO, "dclk_peri_gpio", mux_xin24m_32k_p, 0,
+ RK3562_PERI_CLKSEL_CON(47), 8, 1, MFLAGS,
+ RK3562_PERI_CLKGATE_CON(17), 4, GFLAGS),
+ GATE(DCLK_PERI_GPIO1, "dclk_peri_gpio1", "dclk_peri_gpio", 0,
+ RK3562_PERI_CLKGATE_CON(17), 2, GFLAGS),
+ GATE(DCLK_PERI_GPIO2, "dclk_peri_gpio2", "dclk_peri_gpio", 0,
+ RK3562_PERI_CLKGATE_CON(17), 3, GFLAGS),
+
+ /* PD_PHP */
+ COMPOSITE(ACLK_PHP, "aclk_php", gpll_cpll_p, 0,
+ RK3562_CLKSEL_CON(36), 7, 1, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(16), 0, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_PHP, "pclk_php", "aclk_php", 0,
+ RK3562_CLKSEL_CON(36), 8, 4, DFLAGS,
+ RK3562_CLKGATE_CON(16), 1, GFLAGS),
+ GATE(ACLK_PCIE20_MST, "aclk_pcie20_mst", "aclk_php", 0,
+ RK3562_CLKGATE_CON(16), 4, GFLAGS),
+ GATE(ACLK_PCIE20_SLV, "aclk_pcie20_slv", "aclk_php", 0,
+ RK3562_CLKGATE_CON(16), 5, GFLAGS),
+ GATE(ACLK_PCIE20_DBI, "aclk_pcie20_dbi", "aclk_php", 0,
+ RK3562_CLKGATE_CON(16), 6, GFLAGS),
+ GATE(PCLK_PCIE20, "pclk_pcie20", "pclk_php", 0,
+ RK3562_CLKGATE_CON(16), 7, GFLAGS),
+ GATE(CLK_PCIE20_AUX, "clk_pcie20_aux", "xin24m", 0,
+ RK3562_CLKGATE_CON(16), 8, GFLAGS),
+ GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_php", 0,
+ RK3562_CLKGATE_CON(16), 10, GFLAGS),
+ COMPOSITE_NODIV(CLK_USB3OTG_SUSPEND, "clk_usb3otg_suspend", mux_xin24m_32k_p, 0,
+ RK3562_CLKSEL_CON(36), 15, 1, MFLAGS,
+ RK3562_CLKGATE_CON(16), 11, GFLAGS),
+ GATE(CLK_USB3OTG_REF, "clk_usb3otg_ref", "xin24m", 0,
+ RK3562_CLKGATE_CON(16), 12, GFLAGS),
+ GATE(CLK_PIPEPHY_REF_FUNC, "clk_pipephy_ref_func", "pclk_pcie20", 0,
+ RK3562_CLKGATE_CON(17), 3, GFLAGS),
+
+ /* PD_PMU1 */
+ COMPOSITE_NOMUX(CLK_200M_PMU, "clk_200m_pmu", "cpll", CLK_IS_CRITICAL,
+ RK3562_PMU1_CLKSEL_CON(0), 0, 5, DFLAGS,
+ RK3562_PMU1_CLKGATE_CON(0), 1, GFLAGS),
+ /* PD_PMU0 */
+ COMPOSITE_FRACMUX(CLK_RTC32K_FRAC, "clk_rtc32k_frac", "xin24m", CLK_IS_CRITICAL,
+ RK3562_PMU0_CLKSEL_CON(0), 0,
+ RK3562_PMU0_CLKGATE_CON(0), 15, GFLAGS,
+ &rk3562_rtc32k_pmu_fracmux),
+ COMPOSITE_NOMUX(BUSCLK_PDPMU0, "busclk_pdpmu0", "clk_200m_pmu", CLK_IS_CRITICAL,
+ RK3562_PMU0_CLKSEL_CON(1), 3, 2, DFLAGS,
+ RK3562_PMU0_CLKGATE_CON(0), 14, GFLAGS),
+ GATE(PCLK_PMU0_CRU, "pclk_pmu0_cru", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 0, GFLAGS),
+ GATE(PCLK_PMU0_PMU, "pclk_pmu0_pmu", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(CLK_PMU0_PMU, "clk_pmu0_pmu", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_PMU0_HP_TIMER, "pclk_pmu0_hp_timer", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(CLK_PMU0_HP_TIMER, "clk_pmu0_hp_timer", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(CLK_PMU0_32K_HP_TIMER, "clk_pmu0_32k_hp_timer", "clk_rtc_32k", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(PCLK_PMU0_PVTM, "pclk_pmu0_pvtm", "busclk_pdpmu0", 0,
+ RK3562_PMU0_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(CLK_PMU0_PVTM, "clk_pmu0_pvtm", "xin24m", 0,
+ RK3562_PMU0_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(PCLK_IOC_PMUIO, "pclk_ioc_pmuio", "busclk_pdpmu0", CLK_IS_CRITICAL,
+ RK3562_PMU0_CLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_PMU0_GPIO0, "pclk_pmu0_gpio0", "busclk_pdpmu0", 0,
+ RK3562_PMU0_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(DBCLK_PMU0_GPIO0, "dbclk_pmu0_gpio0", "xin24m", 0,
+ RK3562_PMU0_CLKGATE_CON(0), 10, GFLAGS),
+ GATE(PCLK_PMU0_GRF, "pclk_pmu0_grf", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 11, GFLAGS),
+ GATE(PCLK_PMU0_SGRF, "pclk_pmu0_sgrf", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(0), 12, GFLAGS),
+ GATE(CLK_DDR_FAIL_SAFE, "clk_ddr_fail_safe", "xin24m", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(1), 0, GFLAGS),
+ GATE(PCLK_PMU0_SCRKEYGEN, "pclk_pmu0_scrkeygen", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU0_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PIPEPHY_DIV, "clk_pipephy_div", "cpll", 0,
+ RK3562_PMU0_CLKSEL_CON(2), 0, 6, DFLAGS,
+ RK3562_PMU0_CLKGATE_CON(2), 0, GFLAGS),
+ GATE(CLK_PIPEPHY_XIN24M, "clk_pipephy_xin24m", "xin24m", 0,
+ RK3562_PMU0_CLKGATE_CON(2), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_PIPEPHY_REF, "clk_pipephy_ref", clk_pipephy_ref_p, 0,
+ RK3562_PMU0_CLKSEL_CON(2), 7, 1, MFLAGS,
+ RK3562_PMU0_CLKGATE_CON(2), 2, GFLAGS),
+ GATE(CLK_USB2PHY_XIN24M, "clk_usb2phy_xin24m", "xin24m", 0,
+ RK3562_PMU0_CLKGATE_CON(2), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_USB2PHY_REF, "clk_usb2phy_ref", clk_usbphy_ref_p, 0,
+ RK3562_PMU0_CLKSEL_CON(2), 8, 1, MFLAGS,
+ RK3562_PMU0_CLKGATE_CON(2), 5, GFLAGS),
+ GATE(CLK_MIPIDSIPHY_XIN24M, "clk_mipidsiphy_xin24m", "xin24m", 0,
+ RK3562_PMU0_CLKGATE_CON(2), 6, GFLAGS),
+ COMPOSITE_NODIV(CLK_MIPIDSIPHY_REF, "clk_mipidsiphy_ref", clk_mipidsi_ref_p, 0,
+ RK3562_PMU0_CLKSEL_CON(2), 15, 1, MFLAGS,
+ RK3562_PMU0_CLKGATE_CON(2), 7, GFLAGS),
+ GATE(PCLK_PMU0_I2C0, "pclk_pmu0_i2c0", "busclk_pdpmu0", 0,
+ RK3562_PMU0_CLKGATE_CON(2), 8, GFLAGS),
+ COMPOSITE(CLK_PMU0_I2C0, "clk_pmu0_i2c0", mux_200m_xin24m_32k_p, 0,
+ RK3562_PMU0_CLKSEL_CON(3), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3562_PMU0_CLKGATE_CON(2), 9, GFLAGS),
+ /* PD_PMU1 */
+ GATE(PCLK_PMU1_CRU, "pclk_pmu1_cru", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU1_CLKGATE_CON(0), 0, GFLAGS),
+ GATE(HCLK_PMU1_MEM, "hclk_pmu1_mem", "busclk_pdpmu0", CLK_IGNORE_UNUSED,
+ RK3562_PMU1_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_PMU1_UART0, "pclk_pmu1_uart0", "busclk_pdpmu0", 0,
+ RK3562_PMU1_CLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PMU1_UART0_SRC, "clk_pmu1_uart0_src", "cpll", 0,
+ RK3562_PMU1_CLKSEL_CON(2), 0, 4, DFLAGS,
+ RK3562_PMU1_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_FRACMUX(CLK_PMU1_UART0_FRAC, "clk_pmu1_uart0_frac", "clk_pmu1_uart0_src", CLK_SET_RATE_PARENT,
+ RK3562_PMU1_CLKSEL_CON(3), 0,
+ RK3562_PMU1_CLKGATE_CON(0), 9, GFLAGS,
+ &rk3562_clk_pmu1_uart0_fracmux),
+ GATE(SCLK_PMU1_UART0, "sclk_pmu1_uart0", "clk_pmu1_uart0", 0,
+ RK3562_PMU1_CLKGATE_CON(0), 10, GFLAGS),
+ GATE(PCLK_PMU1_SPI0, "pclk_pmu1_spi0", "busclk_pdpmu0", 0,
+ RK3562_PMU1_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE(CLK_PMU1_SPI0, "clk_pmu1_spi0", mux_200m_xin24m_32k_p, 0,
+ RK3562_PMU1_CLKSEL_CON(4), 6, 2, MFLAGS, 0, 2, DFLAGS,
+ RK3562_PMU1_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(SCLK_IN_PMU1_SPI0, "sclk_in_pmu1_spi0", "sclk_in_pmu1_spi0_io", 0,
+ RK3562_PMU1_CLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_PMU1_PWM0, "pclk_pmu1_pwm0", "busclk_pdpmu0", 0,
+ RK3562_PMU1_CLKGATE_CON(1), 3, GFLAGS),
+ COMPOSITE(CLK_PMU1_PWM0, "clk_pmu1_pwm0", mux_200m_xin24m_32k_p, 0,
+ RK3562_PMU1_CLKSEL_CON(4), 14, 2, MFLAGS, 8, 2, DFLAGS,
+ RK3562_PMU1_CLKGATE_CON(1), 4, GFLAGS),
+ GATE(CLK_CAPTURE_PMU1_PWM0, "clk_capture_pmu1_pwm0", "xin24m", 0,
+ RK3562_PMU1_CLKGATE_CON(1), 5, GFLAGS),
+ GATE(CLK_PMU1_WIFI, "clk_pmu1_wifi", "xin24m", 0,
+ RK3562_PMU1_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(FCLK_PMU1_CM0_CORE, "fclk_pmu1_cm0_core", "busclk_pdpmu0", 0,
+ RK3562_PMU1_CLKGATE_CON(2), 0, GFLAGS),
+ GATE(CLK_PMU1_CM0_RTC, "clk_pmu1_cm0_rtc", "clk_rtc_32k", 0,
+ RK3562_PMU1_CLKGATE_CON(2), 1, GFLAGS),
+ GATE(PCLK_PMU1_WDTNS, "pclk_pmu1_wdtns", "busclk_pdpmu0", 0,
+ RK3562_PMU1_CLKGATE_CON(2), 3, GFLAGS),
+ GATE(CLK_PMU1_WDTNS, "clk_pmu1_wdtns", "xin24m", 0,
+ RK3562_PMU1_CLKGATE_CON(2), 4, GFLAGS),
+ GATE(PCLK_PMU1_MAILBOX, "pclk_pmu1_mailbox", "busclk_pdpmu0", 0,
+ RK3562_PMU1_CLKGATE_CON(3), 8, GFLAGS),
+
+ /* PD_RGA */
+ COMPOSITE(ACLK_RGA_PRE, "aclk_rga_pre", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(32), 6, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(14), 0, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_RGA_PRE, "hclk_rga_pre", "aclk_rga_jdec", 0,
+ RK3562_CLKSEL_CON(32), 8, 3, DFLAGS,
+ RK3562_CLKGATE_CON(14), 1, GFLAGS),
+ GATE(ACLK_RGA, "aclk_rga", "aclk_rga_jdec", 0,
+ RK3562_CLKGATE_CON(14), 6, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_rga_pre", 0,
+ RK3562_CLKGATE_CON(14), 7, GFLAGS),
+ COMPOSITE(CLK_RGA_CORE, "clk_rga_core", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(33), 6, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(14), 8, GFLAGS),
+ GATE(ACLK_JDEC, "aclk_jdec", "aclk_rga_jdec", 0,
+ RK3562_CLKGATE_CON(14), 9, GFLAGS),
+ GATE(HCLK_JDEC, "hclk_jdec", "hclk_rga_pre", 0,
+ RK3562_CLKGATE_CON(14), 10, GFLAGS),
+
+ /* PD_VDPU */
+ COMPOSITE(ACLK_VDPU_PRE, "aclk_vdpu_pre", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(22), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(10), 0, GFLAGS),
+ COMPOSITE(CLK_RKVDEC_HEVC_CA, "clk_rkvdec_hevc_ca", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(23), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3562_CLKGATE_CON(10), 3, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VDPU_PRE, "hclk_vdpu_pre", "aclk_vdpu", 0,
+ RK3562_CLKSEL_CON(24), 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(10), 4, GFLAGS),
+ GATE(ACLK_RKVDEC, "aclk_rkvdec", "aclk_vdpu", 0,
+ RK3562_CLKGATE_CON(10), 7, GFLAGS),
+ GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_vdpu_pre", 0,
+ RK3562_CLKGATE_CON(10), 8, GFLAGS),
+
+ /* PD_VEPU */
+ COMPOSITE(CLK_RKVENC_CORE, "clk_rkvenc_core", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(20), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(9), 0, GFLAGS),
+ COMPOSITE(ACLK_VEPU_PRE, "aclk_vepu_pre", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(20), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3562_CLKGATE_CON(9), 1, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VEPU_PRE, "hclk_vepu_pre", "aclk_vepu", 0,
+ RK3562_CLKSEL_CON(21), 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(ACLK_RKVENC, "aclk_rkvenc", "aclk_vepu", 0,
+ RK3562_CLKGATE_CON(9), 5, GFLAGS),
+ GATE(HCLK_RKVENC, "hclk_rkvenc", "hclk_vepu", 0,
+ RK3562_CLKGATE_CON(9), 6, GFLAGS),
+
+ /* PD_VI */
+ COMPOSITE(ACLK_VI, "aclk_vi", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(11), 0, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VI, "hclk_vi", "aclk_vi_isp", 0,
+ RK3562_CLKSEL_CON(26), 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(11), 1, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_VI, "pclk_vi", "aclk_vi_isp", 0,
+ RK3562_CLKSEL_CON(26), 8, 4, DFLAGS,
+ RK3562_CLKGATE_CON(11), 2, GFLAGS),
+ GATE(ACLK_ISP, "aclk_isp", "aclk_vi_isp", 0,
+ RK3562_CLKGATE_CON(11), 6, GFLAGS),
+ GATE(HCLK_ISP, "hclk_isp", "hclk_vi", 0,
+ RK3562_CLKGATE_CON(11), 7, GFLAGS),
+ COMPOSITE(CLK_ISP, "clk_isp", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(27), 6, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3562_CLKGATE_CON(11), 8, GFLAGS),
+ GATE(ACLK_VICAP, "aclk_vicap", "aclk_vi_isp", 0,
+ RK3562_CLKGATE_CON(11), 9, GFLAGS),
+ GATE(HCLK_VICAP, "hclk_vicap", "hclk_vi", 0,
+ RK3562_CLKGATE_CON(11), 10, GFLAGS),
+ COMPOSITE(DCLK_VICAP, "dclk_vicap", gpll_cpll_pvtpll_dmyapll_p, 0,
+ RK3562_CLKSEL_CON(27), 14, 2, MFLAGS, 8, 4, DFLAGS,
+ RK3562_CLKGATE_CON(11), 11, GFLAGS),
+ GATE(CSIRX0_CLK_DATA, "csirx0_clk_data", "csirx0_clk_data_io", 0,
+ RK3562_CLKGATE_CON(11), 12, GFLAGS),
+ GATE(CSIRX1_CLK_DATA, "csirx1_clk_data", "csirx1_clk_data_io", 0,
+ RK3562_CLKGATE_CON(11), 13, GFLAGS),
+ GATE(CSIRX2_CLK_DATA, "csirx2_clk_data", "csirx2_clk_data_io", 0,
+ RK3562_CLKGATE_CON(11), 14, GFLAGS),
+ GATE(CSIRX3_CLK_DATA, "csirx3_clk_data", "csirx3_clk_data_io", 0,
+ RK3562_CLKGATE_CON(11), 15, GFLAGS),
+ GATE(PCLK_CSIHOST0, "pclk_csihost0", "pclk_vi", 0,
+ RK3562_CLKGATE_CON(12), 0, GFLAGS),
+ GATE(PCLK_CSIHOST1, "pclk_csihost1", "pclk_vi", 0,
+ RK3562_CLKGATE_CON(12), 1, GFLAGS),
+ GATE(PCLK_CSIHOST2, "pclk_csihost2", "pclk_vi", 0,
+ RK3562_CLKGATE_CON(12), 2, GFLAGS),
+ GATE(PCLK_CSIHOST3, "pclk_csihost3", "pclk_vi", 0,
+ RK3562_CLKGATE_CON(12), 3, GFLAGS),
+ GATE(PCLK_CSIPHY0, "pclk_csiphy0", "pclk_vi", 0,
+ RK3562_CLKGATE_CON(12), 4, GFLAGS),
+ GATE(PCLK_CSIPHY1, "pclk_csiphy1", "pclk_vi", 0,
+ RK3562_CLKGATE_CON(12), 5, GFLAGS),
+
+ /* PD_VO */
+ COMPOSITE(ACLK_VO_PRE, "aclk_vo_pre", gpll_cpll_vpll_dmyhpll_p, 0,
+ RK3562_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(13), 0, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VO_PRE, "hclk_vo_pre", "aclk_vo", 0,
+ RK3562_CLKSEL_CON(29), 0, 5, DFLAGS,
+ RK3562_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vo", 0,
+ RK3562_CLKGATE_CON(13), 6, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vo_pre", 0,
+ RK3562_CLKGATE_CON(13), 7, GFLAGS),
+ COMPOSITE(DCLK_VOP, "dclk_vop", gpll_dmyhpll_vpll_apll_p, CLK_SET_RATE_NO_REPARENT,
+ RK3562_CLKSEL_CON(30), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3562_CLKGATE_CON(13), 8, GFLAGS),
+ COMPOSITE(DCLK_VOP1, "dclk_vop1", gpll_dmyhpll_vpll_apll_p, CLK_SET_RATE_NO_REPARENT,
+ RK3562_CLKSEL_CON(31), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3562_CLKGATE_CON(13), 9, GFLAGS),
+};
+
+static void __init rk3562_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
+ void __iomem *reg_base;
+
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3562_clk_branches,
+ ARRAY_SIZE(rk3562_clk_branches)) + 1;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rk3562_pll_clks,
+ ARRAY_SIZE(rk3562_pll_clks),
+ RK3562_GRF_SOC_STATUS0);
+
+ rockchip_clk_register_branches(ctx, rk3562_clk_branches,
+ ARRAY_SIZE(rk3562_clk_branches));
+
+ rk3562_rst_init(np, reg_base);
+
+ rockchip_register_restart_notifier(ctx, RK3562_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+
+CLK_OF_DECLARE(rk3562_cru, "rockchip,rk3562-cru", rk3562_clk_init);
+
+struct clk_rk3562_inits {
+ void (*inits)(struct device_node *np);
+};
+
+static const struct clk_rk3562_inits clk_rk3562_cru_init = {
+ .inits = rk3562_clk_init,
+};
+
+static const struct of_device_id clk_rk3562_match_table[] = {
+ {
+ .compatible = "rockchip,rk3562-cru",
+ .data = &clk_rk3562_cru_init,
+ },
+ { }
+};
+
+static int clk_rk3562_probe(struct platform_device *pdev)
+{
+ const struct clk_rk3562_inits *init_data;
+ struct device *dev = &pdev->dev;
+
+ init_data = device_get_match_data(dev);
+ if (!init_data)
+ return -EINVAL;
+
+ if (init_data->inits)
+ init_data->inits(dev->of_node);
+
+ return 0;
+}
+
+static struct platform_driver clk_rk3562_driver = {
+ .probe = clk_rk3562_probe,
+ .driver = {
+ .name = "clk-rk3562",
+ .of_match_table = clk_rk3562_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_rk3562_driver, clk_rk3562_probe);
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 53d10b1c627b..7d9279291e76 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -1602,6 +1602,7 @@ static const char *const rk3568_cru_critical_clocks[] __initconst = {
"pclk_php",
"hclk_usb",
"pclk_usb",
+ "hclk_vi",
"hclk_vo",
};
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 9b37d44b9e5d..df2b2d706450 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -207,6 +207,65 @@ struct clk;
#define RK3399_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x100)
#define RK3399_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x110)
+#define RK3528_PMU_CRU_BASE 0x10000
+#define RK3528_PCIE_CRU_BASE 0x20000
+#define RK3528_DDRPHY_CRU_BASE 0x28000
+#define RK3528_PLL_CON(x) RK2928_PLL_CON(x)
+#define RK3528_PCIE_PLL_CON(x) ((x) * 0x4 + RK3528_PCIE_CRU_BASE)
+#define RK3528_DDRPHY_PLL_CON(x) ((x) * 0x4 + RK3528_DDRPHY_CRU_BASE)
+#define RK3528_MODE_CON 0x280
+#define RK3528_CLKSEL_CON(x) ((x) * 0x4 + 0x300)
+#define RK3528_CLKGATE_CON(x) ((x) * 0x4 + 0x800)
+#define RK3528_SOFTRST_CON(x) ((x) * 0x4 + 0xa00)
+#define RK3528_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PMU_CRU_BASE)
+#define RK3528_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RK3528_PMU_CRU_BASE)
+#define RK3528_PCIE_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PCIE_CRU_BASE)
+#define RK3528_PCIE_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RK3528_PCIE_CRU_BASE)
+#define RK3528_DDRPHY_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RK3528_DDRPHY_CRU_BASE)
+#define RK3528_DDRPHY_MODE_CON (0x280 + RK3528_DDRPHY_CRU_BASE)
+#define RK3528_GLB_CNT_TH 0xc00
+#define RK3528_GLB_SRST_FST 0xc08
+#define RK3528_GLB_SRST_SND 0xc0c
+
+#define RK3562_PMU0_CRU_BASE 0x10000
+#define RK3562_PMU1_CRU_BASE 0x18000
+#define RK3562_DDR_CRU_BASE 0x20000
+#define RK3562_SUBDDR_CRU_BASE 0x28000
+#define RK3562_PERI_CRU_BASE 0x30000
+
+#define RK3562_PLL_CON(x) RK2928_PLL_CON(x)
+#define RK3562_PMU1_PLL_CON(x) ((x) * 0x4 + RK3562_PMU1_CRU_BASE + 0x40)
+#define RK3562_SUBDDR_PLL_CON(x) ((x) * 0x4 + RK3562_SUBDDR_CRU_BASE + 0x20)
+#define RK3562_MODE_CON 0x600
+#define RK3562_PMU1_MODE_CON (RK3562_PMU1_CRU_BASE + 0x380)
+#define RK3562_SUBDDR_MODE_CON (RK3562_SUBDDR_CRU_BASE + 0x380)
+#define RK3562_CLKSEL_CON(x) ((x) * 0x4 + 0x100)
+#define RK3562_CLKGATE_CON(x) ((x) * 0x4 + 0x300)
+#define RK3562_SOFTRST_CON(x) ((x) * 0x4 + 0x400)
+#define RK3562_DDR_CLKSEL_CON(x) ((x) * 0x4 + RK3562_DDR_CRU_BASE + 0x100)
+#define RK3562_DDR_CLKGATE_CON(x) ((x) * 0x4 + RK3562_DDR_CRU_BASE + 0x180)
+#define RK3562_DDR_SOFTRST_CON(x) ((x) * 0x4 + RK3562_DDR_CRU_BASE + 0x200)
+#define RK3562_SUBDDR_CLKSEL_CON(x) ((x) * 0x4 + RK3562_SUBDDR_CRU_BASE + 0x100)
+#define RK3562_SUBDDR_CLKGATE_CON(x) ((x) * 0x4 + RK3562_SUBDDR_CRU_BASE + 0x180)
+#define RK3562_SUBDDR_SOFTRST_CON(x) ((x) * 0x4 + RK3562_SUBDDR_CRU_BASE + 0x200)
+#define RK3562_PERI_CLKSEL_CON(x) ((x) * 0x4 + RK3562_PERI_CRU_BASE + 0x100)
+#define RK3562_PERI_CLKGATE_CON(x) ((x) * 0x4 + RK3562_PERI_CRU_BASE + 0x300)
+#define RK3562_PERI_SOFTRST_CON(x) ((x) * 0x4 + RK3562_PERI_CRU_BASE + 0x400)
+#define RK3562_PMU0_CLKSEL_CON(x) ((x) * 0x4 + RK3562_PMU0_CRU_BASE + 0x100)
+#define RK3562_PMU0_CLKGATE_CON(x) ((x) * 0x4 + RK3562_PMU0_CRU_BASE + 0x180)
+#define RK3562_PMU0_SOFTRST_CON(x) ((x) * 0x4 + RK3562_PMU0_CRU_BASE + 0x200)
+#define RK3562_PMU1_CLKSEL_CON(x) ((x) * 0x4 + RK3562_PMU1_CRU_BASE + 0x100)
+#define RK3562_PMU1_CLKGATE_CON(x) ((x) * 0x4 + RK3562_PMU1_CRU_BASE + 0x180)
+#define RK3562_PMU1_SOFTRST_CON(x) ((x) * 0x4 + RK3562_PMU1_CRU_BASE + 0x200)
+#define RK3562_GLB_SRST_FST 0x614
+#define RK3562_GLB_SRST_SND 0x618
+#define RK3562_GLB_RST_CON 0x61c
+#define RK3562_GLB_RST_ST 0x620
+#define RK3562_SDMMC0_CON0 0x624
+#define RK3562_SDMMC0_CON1 0x628
+#define RK3562_SDMMC1_CON0 0x62c
+#define RK3562_SDMMC1_CON1 0x630
+
#define RK3568_PLL_CON(x) RK2928_PLL_CON(x)
#define RK3568_MODE_CON0 0xc0
#define RK3568_MISC_CON0 0xc4
@@ -444,6 +503,7 @@ struct rockchip_pll_rate_table {
* Flags:
* ROCKCHIP_PLL_SYNC_RATE - check rate parameters to match against the
* rate_table parameters and ajust them if necessary.
+ * ROCKCHIP_PLL_FIXED_MODE - the pll operates in normal mode only
*/
struct rockchip_pll_clock {
unsigned int id;
@@ -461,6 +521,7 @@ struct rockchip_pll_clock {
};
#define ROCKCHIP_PLL_SYNC_RATE BIT(0)
+#define ROCKCHIP_PLL_FIXED_MODE BIT(1)
#define PLL(_type, _id, _name, _pnames, _flags, _con, _mode, _mshift, \
_lshift, _pflags, _rtable) \
@@ -1118,6 +1179,8 @@ static inline void rockchip_register_softrst(struct device_node *np,
return rockchip_register_softrst_lut(np, NULL, num_regs, base, flags);
}
+void rk3528_rst_init(struct device_node *np, void __iomem *reg_base);
+void rk3562_rst_init(struct device_node *np, void __iomem *reg_base);
void rk3576_rst_init(struct device_node *np, void __iomem *reg_base);
void rk3588_rst_init(struct device_node *np, void __iomem *reg_base);
diff --git a/drivers/clk/rockchip/rst-rk3528.c b/drivers/clk/rockchip/rst-rk3528.c
new file mode 100644
index 000000000000..b24f2c367929
--- /dev/null
+++ b/drivers/clk/rockchip/rst-rk3528.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ * Based on Sebastian Reichel's implementation for RK3588
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <dt-bindings/reset/rockchip,rk3528-cru.h>
+#include "clk.h"
+
+/* 0xFF4A0000 + 0x0A00 */
+#define RK3528_CRU_RESET_OFFSET(id, reg, bit) [id] = (0 + reg * 16 + bit)
+
+/* mapping table for reset ID to register offset */
+static const int rk3528_register_offset[] = {
+ /* CRU_SOFTRST_CON03 */
+ RK3528_CRU_RESET_OFFSET(SRST_CORE0_PO, 3, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE1_PO, 3, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE2_PO, 3, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE3_PO, 3, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE0, 3, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE1, 3, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE2, 3, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE3, 3, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_NL2, 3, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE_BIU, 3, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE_CRYPTO, 3, 10),
+
+ /* CRU_SOFTRST_CON05 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_DBG, 5, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_POT_DBG, 5, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_NT_DBG, 5, 15),
+
+ /* CRU_SOFTRST_CON06 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_CORE_GRF, 6, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DAPLITE_BIU, 6, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CPU_BIU, 6, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_REF_PVTPLL_CORE, 6, 7),
+
+ /* CRU_SOFTRST_CON08 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_BUS_VOPGL_BIU, 8, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_A_BUS_H_BIU, 8, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_A_SYSMEM_BIU, 8, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_A_BUS_BIU, 8, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_H_BUS_BIU, 8, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_P_BUS_BIU, 8, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DFT2APB, 8, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_P_BUS_GRF, 8, 15),
+
+ /* CRU_SOFTRST_CON09 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_BUS_M_BIU, 9, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_A_GIC, 9, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_A_SPINLOCK, 9, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_A_DMAC, 9, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_P_TIMER, 9, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER0, 9, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER1, 9, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER2, 9, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER3, 9, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER4, 9, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER5, 9, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_P_JDBCK_DAP, 9, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_JDBCK_DAP, 9, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_P_WDT_NS, 9, 15),
+
+ /* CRU_SOFTRST_CON10 */
+ RK3528_CRU_RESET_OFFSET(SRST_T_WDT_NS, 10, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_H_TRNG_NS, 10, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART0, 10, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART0, 10, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_PKA_CRYPTO, 10, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_A_CRYPTO, 10, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_H_CRYPTO, 10, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DMA2DDR, 10, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_A_DMA2DDR, 10, 14),
+
+ /* CRU_SOFTRST_CON11 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_PWM0, 11, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_PWM0, 11, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_P_PWM1, 11, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_PWM1, 11, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_P_SCR, 11, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_A_DCF, 11, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_P_INTMUX, 11, 12),
+
+ /* CRU_SOFTRST_CON25 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_VPU_BIU, 25, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_H_VPU_BIU, 25, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VPU_BIU, 25, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_A_VPU, 25, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_H_VPU, 25, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CRU_PCIE, 25, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VPU_GRF, 25, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SFC, 25, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_S_SFC, 25, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_C_EMMC, 25, 15),
+
+ /* CRU_SOFTRST_CON26 */
+ RK3528_CRU_RESET_OFFSET(SRST_H_EMMC, 26, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_A_EMMC, 26, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_B_EMMC, 26, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_T_EMMC, 26, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_GPIO1, 26, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_DB_GPIO1, 26, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_A_VPU_L_BIU, 26, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VPU_IOC, 26, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SAI_I2S0, 26, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_M_SAI_I2S0, 26, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SAI_I2S2, 26, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_M_SAI_I2S2, 26, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_ACODEC, 26, 13),
+
+ /* CRU_SOFTRST_CON27 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_GPIO3, 27, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_DB_GPIO3, 27, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_P_SPI1, 27, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_SPI1, 27, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART2, 27, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART2, 27, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART5, 27, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART5, 27, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART6, 27, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART6, 27, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART7, 27, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART7, 27, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C3, 27, 15),
+
+ /* CRU_SOFTRST_CON28 */
+ RK3528_CRU_RESET_OFFSET(SRST_I2C3, 28, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C5, 28, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_I2C5, 28, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C6, 28, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_I2C6, 28, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_A_MAC, 28, 5),
+
+ /* CRU_SOFTRST_CON30 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_PCIE, 30, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_PCIE_PIPE_PHY, 30, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_PCIE_POWER_UP, 30, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_PCIE_PHY, 30, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_PIPE_GRF, 30, 7),
+
+ /* CRU_SOFTRST_CON32 */
+ RK3528_CRU_RESET_OFFSET(SRST_H_SDIO0, 32, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SDIO1, 32, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_TS_0, 32, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_TS_1, 32, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CAN2, 32, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_CAN2, 32, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CAN3, 32, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_CAN3, 32, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_P_SARADC, 32, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_SARADC, 32, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_SARADC_PHY, 32, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_P_TSADC, 32, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_TSADC, 32, 15),
+
+ /* CRU_SOFTRST_CON33 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_USB3OTG, 33, 1),
+
+ /* CRU_SOFTRST_CON34 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_GPU_BIU, 34, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_GPU_BIU, 34, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_A_GPU, 34, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_REF_PVTPLL_GPU, 34, 9),
+
+ /* CRU_SOFTRST_CON36 */
+ RK3528_CRU_RESET_OFFSET(SRST_H_RKVENC_BIU, 36, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_A_RKVENC_BIU, 36, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_P_RKVENC_BIU, 36, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_H_RKVENC, 36, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_A_RKVENC, 36, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE_RKVENC, 36, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SAI_I2S1, 36, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_M_SAI_I2S1, 36, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C1, 36, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_I2C1, 36, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C0, 36, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_I2C0, 36, 14),
+
+ /* CRU_SOFTRST_CON37 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_SPI0, 37, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_SPI0, 37, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_GPIO4, 37, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_DB_GPIO4, 37, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_P_RKVENC_IOC, 37, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SPDIF, 37, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_M_SPDIF, 37, 15),
+
+ /* CRU_SOFTRST_CON38 */
+ RK3528_CRU_RESET_OFFSET(SRST_H_PDM, 38, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_M_PDM, 38, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART1, 38, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART1, 38, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART3, 38, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART3, 38, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_P_RKVENC_GRF, 38, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CAN0, 38, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_CAN0, 38, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CAN1, 38, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_CAN1, 38, 10),
+
+ /* CRU_SOFTRST_CON39 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_VO_BIU, 39, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_H_VO_BIU, 39, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VO_BIU, 39, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_H_RGA2E, 39, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_A_RGA2E, 39, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE_RGA2E, 39, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_H_VDPP, 39, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_A_VDPP, 39, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_CORE_VDPP, 39, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VO_GRF, 39, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_P_CRU, 39, 15),
+
+ /* CRU_SOFTRST_CON40 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_VOP_BIU, 40, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_H_VOP, 40, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_D_VOP0, 40, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_D_VOP1, 40, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_A_VOP, 40, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_P_HDMI, 40, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_HDMI, 40, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_P_HDMIPHY, 40, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_H_HDCP_KEY, 40, 15),
+
+ /* CRU_SOFTRST_CON41 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_HDCP, 41, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_H_HDCP, 41, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_P_HDCP, 41, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_H_CVBS, 41, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_D_CVBS_VOP, 41, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_D_4X_CVBS_VOP, 41, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_A_JPEG_DECODER, 41, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_H_JPEG_DECODER, 41, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_A_VO_L_BIU, 41, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_A_MAC_VO, 41, 10),
+
+ /* CRU_SOFTRST_CON42 */
+ RK3528_CRU_RESET_OFFSET(SRST_A_JPEG_BIU, 42, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SAI_I2S3, 42, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_M_SAI_I2S3, 42, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_MACPHY, 42, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VCDCPHY, 42, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_P_GPIO2, 42, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_DB_GPIO2, 42, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_VO_IOC, 42, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_H_SDMMC0, 42, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_P_OTPC_NS, 42, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_SBPI_OTPC_NS, 42, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_USER_OTPC_NS, 42, 13),
+
+ /* CRU_SOFTRST_CON43 */
+ RK3528_CRU_RESET_OFFSET(SRST_HDMIHDP0, 43, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_H_USBHOST, 43, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_H_USBHOST_ARB, 43, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_HOST_UTMI, 43, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_UART4, 43, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_S_UART4, 43, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C4, 43, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_I2C4, 43, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_P_I2C7, 43, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_I2C7, 43, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_P_USBPHY, 43, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_USBPHY_POR, 43, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_USBPHY_OTG, 43, 15),
+
+ /* CRU_SOFTRST_CON44 */
+ RK3528_CRU_RESET_OFFSET(SRST_USBPHY_HOST, 44, 0),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDRPHY_CRU, 44, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_H_RKVDEC_BIU, 44, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_A_RKVDEC_BIU, 44, 7),
+ RK3528_CRU_RESET_OFFSET(SRST_A_RKVDEC, 44, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_H_RKVDEC, 44, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_HEVC_CA_RKVDEC, 44, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_REF_PVTPLL_RKVDEC, 44, 12),
+
+ /* CRU_SOFTRST_CON45 */
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDR_BIU, 45, 1),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDRC, 45, 2),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDRMON, 45, 3),
+ RK3528_CRU_RESET_OFFSET(SRST_TIMER_DDRMON, 45, 4),
+ RK3528_CRU_RESET_OFFSET(SRST_P_MSCH_BIU, 45, 5),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDR_GRF, 45, 6),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDR_HWLP, 45, 8),
+ RK3528_CRU_RESET_OFFSET(SRST_P_DDRPHY, 45, 9),
+ RK3528_CRU_RESET_OFFSET(SRST_MSCH_BIU, 45, 10),
+ RK3528_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL, 45, 11),
+ RK3528_CRU_RESET_OFFSET(SRST_DDR_UPCTL, 45, 12),
+ RK3528_CRU_RESET_OFFSET(SRST_DDRMON, 45, 13),
+ RK3528_CRU_RESET_OFFSET(SRST_A_DDR_SCRAMBLE, 45, 14),
+ RK3528_CRU_RESET_OFFSET(SRST_A_SPLIT, 45, 15),
+
+ /* CRU_SOFTRST_CON46 */
+ RK3528_CRU_RESET_OFFSET(SRST_DDR_PHY, 46, 0),
+};
+
+void rk3528_rst_init(struct device_node *np, void __iomem *reg_base)
+{
+ rockchip_register_softrst_lut(np,
+ rk3528_register_offset,
+ ARRAY_SIZE(rk3528_register_offset),
+ reg_base + RK3528_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
diff --git a/drivers/clk/rockchip/rst-rk3562.c b/drivers/clk/rockchip/rst-rk3562.c
new file mode 100644
index 000000000000..a3854eaef3be
--- /dev/null
+++ b/drivers/clk/rockchip/rst-rk3562.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2024 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ * Based on Sebastien Reichel's implementation for RK3588
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <dt-bindings/reset/rockchip,rk3562-cru.h>
+#include "clk.h"
+
+/* 0xff100000 + 0x0A00 */
+#define RK3562_CRU_RESET_OFFSET(id, reg, bit) [id] = (0 + reg * 16 + bit)
+/* 0xff110000 + 0x0A00 */
+#define RK3562_PMU0CRU_RESET_OFFSET(id, reg, bit) [id] = (0x10000*4 + reg * 16 + bit)
+/* 0xff118000 + 0x0A00 */
+#define RK3562_PMU1CRU_RESET_OFFSET(id, reg, bit) [id] = (0x18000*4 + reg * 16 + bit)
+/* 0xff120000 + 0x0A00 */
+#define RK3562_DDRCRU_RESET_OFFSET(id, reg, bit) [id] = (0x20000*4 + reg * 16 + bit)
+/* 0xff128000 + 0x0A00 */
+#define RK3562_SUBDDRCRU_RESET_OFFSET(id, reg, bit) [id] = (0x28000*4 + reg * 16 + bit)
+/* 0xff130000 + 0x0A00 */
+#define RK3562_PERICRU_RESET_OFFSET(id, reg, bit) [id] = (0x30000*4 + reg * 16 + bit)
+
+/* mapping table for reset ID to register offset */
+static const int rk3562_register_offset[] = {
+ /* SOFTRST_CON01 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_TOP_BIU, 1, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_A_TOP_VIO_BIU, 1, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_REF_PVTPLL_LOGIC, 1, 2),
+
+ /* SOFTRST_CON03 */
+ RK3562_CRU_RESET_OFFSET(SRST_NCOREPORESET0, 3, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_NCOREPORESET1, 3, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_NCOREPORESET2, 3, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_NCOREPORESET3, 3, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_NCORESET0, 3, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_NCORESET1, 3, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_NCORESET2, 3, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_NCORESET3, 3, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_NL2RESET, 3, 8),
+
+ /* SOFTRST_CON04 */
+ RK3562_CRU_RESET_OFFSET(SRST_DAP, 4, 9),
+ RK3562_CRU_RESET_OFFSET(SRST_P_DBG_DAPLITE, 4, 10),
+ RK3562_CRU_RESET_OFFSET(SRST_REF_PVTPLL_CORE, 4, 13),
+
+ /* SOFTRST_CON05 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_CORE_BIU, 5, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CORE_BIU, 5, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_H_CORE_BIU, 5, 2),
+
+ /* SOFTRST_CON06 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_NPU_BIU, 6, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_H_NPU_BIU, 6, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_A_RKNN, 6, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_H_RKNN, 6, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_REF_PVTPLL_NPU, 6, 6),
+
+ /* SOFTRST_CON08 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_GPU_BIU, 8, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_GPU, 8, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_REF_PVTPLL_GPU, 8, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_GPU_BRG_BIU, 8, 8),
+
+ /* SOFTRST_CON09 */
+ RK3562_CRU_RESET_OFFSET(SRST_RKVENC_CORE, 9, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_A_VEPU_BIU, 9, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_H_VEPU_BIU, 9, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_A_RKVENC, 9, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_H_RKVENC, 9, 6),
+
+ /* SOFTRST_CON10 */
+ RK3562_CRU_RESET_OFFSET(SRST_RKVDEC_HEVC_CA, 10, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_A_VDPU_BIU, 10, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_H_VDPU_BIU, 10, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_A_RKVDEC, 10, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_H_RKVDEC, 10, 8),
+
+ /* SOFTRST_CON11 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_VI_BIU, 11, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_H_VI_BIU, 11, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_P_VI_BIU, 11, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_ISP, 11, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_A_VICAP, 11, 9),
+ RK3562_CRU_RESET_OFFSET(SRST_H_VICAP, 11, 10),
+ RK3562_CRU_RESET_OFFSET(SRST_D_VICAP, 11, 11),
+ RK3562_CRU_RESET_OFFSET(SRST_I0_VICAP, 11, 12),
+ RK3562_CRU_RESET_OFFSET(SRST_I1_VICAP, 11, 13),
+ RK3562_CRU_RESET_OFFSET(SRST_I2_VICAP, 11, 14),
+ RK3562_CRU_RESET_OFFSET(SRST_I3_VICAP, 11, 15),
+
+ /* SOFTRST_CON12 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_CSIHOST0, 12, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CSIHOST1, 12, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CSIHOST2, 12, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CSIHOST3, 12, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CSIPHY0, 12, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CSIPHY1, 12, 5),
+
+ /* SOFTRST_CON13 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_VO_BIU, 13, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_H_VO_BIU, 13, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_A_VOP, 13, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_H_VOP, 13, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_D_VOP, 13, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_D_VOP1, 13, 9),
+
+ /* SOFTRST_CON14 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_RGA_BIU, 14, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_H_RGA_BIU, 14, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_A_RGA, 14, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_H_RGA, 14, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_RGA_CORE, 14, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_A_JDEC, 14, 9),
+ RK3562_CRU_RESET_OFFSET(SRST_H_JDEC, 14, 10),
+
+ /* SOFTRST_CON15 */
+ RK3562_CRU_RESET_OFFSET(SRST_B_EBK_BIU, 15, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_P_EBK_BIU, 15, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_AHB2AXI_EBC, 15, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_H_EBC, 15, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_D_EBC, 15, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_H_EINK, 15, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_P_EINK, 15, 8),
+
+ /* SOFTRST_CON16 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_PHP_BIU, 16, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_A_PHP_BIU, 16, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_P_PCIE20, 16, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_PCIE20_POWERUP, 16, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_USB3OTG, 16, 10),
+
+ /* SOFTRST_CON17 */
+ RK3562_CRU_RESET_OFFSET(SRST_PIPEPHY, 17, 3),
+
+ /* SOFTRST_CON18 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_BUS_BIU, 18, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_H_BUS_BIU, 18, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_P_BUS_BIU, 18, 5),
+
+ /* SOFTRST_CON19 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_I2C1, 19, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_P_I2C2, 19, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_P_I2C3, 19, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_P_I2C4, 19, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_P_I2C5, 19, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_I2C1, 19, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_I2C2, 19, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_I2C3, 19, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_I2C4, 19, 9),
+ RK3562_CRU_RESET_OFFSET(SRST_I2C5, 19, 10),
+
+ /* SOFTRST_CON20 */
+ RK3562_CRU_RESET_OFFSET(SRST_BUS_GPIO3, 20, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_BUS_GPIO4, 20, 6),
+
+ /* SOFTRST_CON21 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_TIMER, 21, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_TIMER0, 21, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_TIMER1, 21, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_TIMER2, 21, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_TIMER3, 21, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_TIMER4, 21, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_TIMER5, 21, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_P_STIMER, 21, 7),
+ RK3562_CRU_RESET_OFFSET(SRST_STIMER0, 21, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_STIMER1, 21, 9),
+
+ /* SOFTRST_CON22 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_WDTNS, 22, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_WDTNS, 22, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_P_GRF, 22, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_P_SGRF, 22, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_P_MAILBOX, 22, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_P_INTC, 22, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_A_BUS_GIC400, 22, 6),
+ RK3562_CRU_RESET_OFFSET(SRST_A_BUS_GIC400_DEBUG, 22, 7),
+
+ /* SOFTRST_CON23 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_BUS_SPINLOCK, 23, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_A_DCF, 23, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_P_DCF, 23, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_F_BUS_CM0_CORE, 23, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_T_BUS_CM0_JTAG, 23, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_H_ICACHE, 23, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_H_DCACHE, 23, 9),
+
+ /* SOFTRST_CON24 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_TSADC, 24, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_TSADC, 24, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_TSADCPHY, 24, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_P_DFT2APB, 24, 4),
+
+ /* SOFTRST_CON25 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_GMAC, 25, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_P_APB2ASB_VCCIO156, 25, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_P_DSIPHY, 25, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_P_DSITX, 25, 8),
+ RK3562_CRU_RESET_OFFSET(SRST_P_CPU_EMA_DET, 25, 9),
+ RK3562_CRU_RESET_OFFSET(SRST_P_HASH, 25, 10),
+ RK3562_CRU_RESET_OFFSET(SRST_P_TOPCRU, 25, 11),
+
+ /* SOFTRST_CON26 */
+ RK3562_CRU_RESET_OFFSET(SRST_P_ASB2APB_VCCIO156, 26, 0),
+ RK3562_CRU_RESET_OFFSET(SRST_P_IOC_VCCIO156, 26, 1),
+ RK3562_CRU_RESET_OFFSET(SRST_P_GPIO3_VCCIO156, 26, 2),
+ RK3562_CRU_RESET_OFFSET(SRST_P_GPIO4_VCCIO156, 26, 3),
+ RK3562_CRU_RESET_OFFSET(SRST_P_SARADC_VCCIO156, 26, 4),
+ RK3562_CRU_RESET_OFFSET(SRST_SARADC_VCCIO156, 26, 5),
+ RK3562_CRU_RESET_OFFSET(SRST_SARADC_VCCIO156_PHY, 26, 6),
+
+ /* SOFTRST_CON27 */
+ RK3562_CRU_RESET_OFFSET(SRST_A_MAC100, 27, 1),
+
+ /* PMU0_SOFTRST_CON00 */
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_CRU, 0, 0),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_PMU, 0, 1),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_PMU0_PMU, 0, 2),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_HP_TIMER, 0, 3),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_PMU0_HP_TIMER, 0, 4),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_PMU0_32K_HP_TIMER, 0, 5),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_PVTM, 0, 6),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_PMU0_PVTM, 0, 7),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_IOC_PMUIO, 0, 8),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_GPIO0, 0, 9),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_PMU0_GPIO0, 0, 10),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_GRF, 0, 11),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_SGRF, 0, 12),
+
+ /* PMU0_SOFTRST_CON01 */
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_DDR_FAIL_SAFE, 1, 0),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_SCRKEYGEN, 1, 1),
+
+ /* PMU0_SOFTRST_CON02 */
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_P_PMU0_I2C0, 2, 8),
+ RK3562_PMU0CRU_RESET_OFFSET(SRST_PMU0_I2C0, 2, 9),
+
+ /* PMU1_SOFTRST_CON00 */
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_CRU, 0, 0),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_H_PMU1_MEM, 0, 2),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_H_PMU1_BIU, 0, 3),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_BIU, 0, 4),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_UART0, 0, 7),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_S_PMU1_UART0, 0, 10),
+
+ /* PMU1_SOFTRST_CON01 */
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_SPI0, 1, 0),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_PMU1_SPI0, 1, 1),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_PWM0, 1, 3),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_PMU1_PWM0, 1, 4),
+
+ /* PMU1_SOFTRST_CON02 */
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_F_PMU1_CM0_CORE, 2, 0),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_T_PMU1_CM0_JTAG, 2, 2),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_WDTNS, 2, 3),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_PMU1_WDTNS, 2, 4),
+ RK3562_PMU1CRU_RESET_OFFSET(SRST_PMU1_MAILBOX, 2, 8),
+
+ /* DDR_SOFTRST_CON00 */
+ RK3562_DDRCRU_RESET_OFFSET(SRST_MSCH_BRG_BIU, 0, 4),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_MSCH_BIU, 0, 5),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_HWLP, 0, 6),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_PHY, 0, 8),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_DFICTL, 0, 9),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_DMA2DDR, 0, 10),
+
+ /* DDR_SOFTRST_CON01 */
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_MON, 1, 0),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_TM_DDR_MON, 1, 1),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_GRF, 1, 2),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_DDR_CRU, 1, 3),
+ RK3562_DDRCRU_RESET_OFFSET(SRST_P_SUBDDR_CRU, 1, 4),
+
+ /* SUBDDR_SOFTRST_CON00 */
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_MSCH_BIU, 0, 1),
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_DDR_PHY, 0, 4),
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_DDR_DFICTL, 0, 5),
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_DDR_SCRAMBLE, 0, 6),
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_DDR_MON, 0, 7),
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_A_DDR_SPLIT, 0, 8),
+ RK3562_SUBDDRCRU_RESET_OFFSET(SRST_DDR_DMA2DDR, 0, 9),
+
+ /* PERI_SOFTRST_CON01 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_A_PERI_BIU, 1, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_PERI_BIU, 1, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PERI_BIU, 1, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PERICRU, 1, 6),
+
+ /* PERI_SOFTRST_CON02 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SAI0_8CH, 2, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_M_SAI0_8CH, 2, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SAI1_8CH, 2, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_M_SAI1_8CH, 2, 8),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SAI2_2CH, 2, 10),
+ RK3562_PERICRU_RESET_OFFSET(SRST_M_SAI2_2CH, 2, 13),
+
+ /* PERI_SOFTRST_CON03 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_DSM, 3, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_DSM, 3, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_PDM, 3, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_M_PDM, 3, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SPDIF, 3, 8),
+ RK3562_PERICRU_RESET_OFFSET(SRST_M_SPDIF, 3, 11),
+
+ /* PERI_SOFTRST_CON04 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SDMMC0, 4, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SDMMC1, 4, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_EMMC, 4, 8),
+ RK3562_PERICRU_RESET_OFFSET(SRST_A_EMMC, 4, 9),
+ RK3562_PERICRU_RESET_OFFSET(SRST_C_EMMC, 4, 10),
+ RK3562_PERICRU_RESET_OFFSET(SRST_B_EMMC, 4, 11),
+ RK3562_PERICRU_RESET_OFFSET(SRST_T_EMMC, 4, 12),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_SFC, 4, 13),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_SFC, 4, 14),
+
+ /* PERI_SOFTRST_CON05 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_USB2HOST, 5, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_USB2HOST_ARB, 5, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_USB2HOST_UTMI, 5, 2),
+
+ /* PERI_SOFTRST_CON06 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_SPI1, 6, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_SPI1, 6, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_SPI2, 6, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_SPI2, 6, 4),
+
+ /* PERI_SOFTRST_CON07 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART1, 7, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART2, 7, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART3, 7, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART4, 7, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART5, 7, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART6, 7, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART7, 7, 6),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART8, 7, 7),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_UART9, 7, 8),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART1, 7, 11),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART2, 7, 14),
+
+ /* PERI_SOFTRST_CON08 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART3, 8, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART4, 8, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART5, 8, 7),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART6, 8, 10),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART7, 8, 13),
+
+ /* PERI_SOFTRST_CON09 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART8, 9, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_S_UART9, 9, 3),
+
+ /* PERI_SOFTRST_CON10 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PWM1_PERI, 10, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_PWM1_PERI, 10, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PWM2_PERI, 10, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_PWM2_PERI, 10, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PWM3_PERI, 10, 6),
+ RK3562_PERICRU_RESET_OFFSET(SRST_PWM3_PERI, 10, 7),
+
+ /* PERI_SOFTRST_CON11 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_CAN0, 11, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_CAN0, 11, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_CAN1, 11, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_CAN1, 11, 3),
+
+ /* PERI_SOFTRST_CON12 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_A_CRYPTO, 12, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_CRYPTO, 12, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_CRYPTO, 12, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_CORE_CRYPTO, 12, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_PKA_CRYPTO, 12, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_KLAD, 12, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_KEY_READER, 12, 6),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_RK_RNG_NS, 12, 7),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_RK_RNG_S, 12, 8),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_TRNG_NS, 12, 9),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_TRNG_S, 12, 10),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_CRYPTO_S, 12, 11),
+
+ /* PERI_SOFTRST_CON13 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PERI_WDT, 13, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_T_PERI_WDT, 13, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_A_SYSMEM, 13, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_H_BOOTROM, 13, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PERI_GRF, 13, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_A_DMAC, 13, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_A_RKDMAC, 13, 6),
+
+ /* PERI_SOFTRST_CON14 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_OTPC_NS, 14, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_SBPI_OTPC_NS, 14, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_USER_OTPC_NS, 14, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_OTPC_S, 14, 3),
+ RK3562_PERICRU_RESET_OFFSET(SRST_SBPI_OTPC_S, 14, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_USER_OTPC_S, 14, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_OTPC_ARB, 14, 6),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_OTPPHY, 14, 7),
+ RK3562_PERICRU_RESET_OFFSET(SRST_OTP_NPOR, 14, 8),
+
+ /* PERI_SOFTRST_CON15 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_USB2PHY, 15, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_USB2PHY_POR, 15, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_USB2PHY_OTG, 15, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_USB2PHY_HOST, 15, 6),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PIPEPHY, 15, 7),
+
+ /* PERI_SOFTRST_CON16 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_SARADC, 16, 4),
+ RK3562_PERICRU_RESET_OFFSET(SRST_SARADC, 16, 5),
+ RK3562_PERICRU_RESET_OFFSET(SRST_SARADC_PHY, 16, 6),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_IOC_VCCIO234, 16, 12),
+
+ /* PERI_SOFTRST_CON17 */
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PERI_GPIO1, 17, 0),
+ RK3562_PERICRU_RESET_OFFSET(SRST_P_PERI_GPIO2, 17, 1),
+ RK3562_PERICRU_RESET_OFFSET(SRST_PERI_GPIO1, 17, 2),
+ RK3562_PERICRU_RESET_OFFSET(SRST_PERI_GPIO2, 17, 3),
+};
+
+void rk3562_rst_init(struct device_node *np, void __iomem *reg_base)
+{
+ rockchip_register_softrst_lut(np,
+ rk3562_register_offset,
+ ARRAY_SIZE(rk3562_register_offset),
+ reg_base + RK3562_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 90e5b114872c..b77fe288e4bb 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -17,7 +17,9 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
obj-$(CONFIG_EXYNOS_CLKOUT) += clk-exynos-clkout.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos-arm64.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos2200.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7870.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos8895.o
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index dfa149e648aa..97982662e1a6 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -133,7 +133,7 @@ static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
if (!(readl(div_reg) & mask))
return;
- pr_err("%s: timeout in divider stablization\n", __func__);
+ pr_err("%s: timeout in divider stabilization\n", __func__);
}
/*
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index e11ac67819ef..0f5ae3e8d000 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 2ef5748c139b..5f1a4f5e2e59 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos2200.c b/drivers/clk/samsung/clk-exynos2200.c
new file mode 100644
index 000000000000..eab9f5eecfa3
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos2200.c
@@ -0,0 +1,3928 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ * Author: Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ *
+ * Common Clock Framework support for Exynos2200 SoC.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/samsung,exynos2200-cmu.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CLKS_NR_TOP (CLK_DOUT_TCXO_DIV4 + 1)
+#define CLKS_NR_ALIVE (CLK_DOUT_ALIVE_DSP_NOC + 1)
+#define CLKS_NR_PERIS (CLK_DOUT_PERIS_DDD_CTRL + 1)
+#define CLKS_NR_CMGP (CLK_DOUT_CMGP_USI6 + 1)
+#define CLKS_NR_HSI0 (CLK_DOUT_DIV_CLK_HSI0_EUSB + 1)
+#define CLKS_NR_PERIC0 (CLK_DOUT_PERIC0_USI04 + 1)
+#define CLKS_NR_PERIC1 (CLK_DOUT_PERIC1_USI10 + 1)
+#define CLKS_NR_PERIC2 (CLK_DOUT_PERIC2_USI11 + 1)
+#define CLKS_NR_UFS (CLK_MOUT_UFS_UFS_EMBD_USER + 1)
+#define CLKS_NR_VTS (CLK_DOUT_CLKVTS_SERIAL_LIF_CORE + 1)
+
+/* ---- CMU_TOP ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_TOP (0x1a320000) */
+#define PLL_LOCKTIME_PLL_MMC 0x0
+#define PLL_LOCKTIME_PLL_SHARED0 0x4
+#define PLL_LOCKTIME_PLL_SHARED1 0x8
+#define PLL_LOCKTIME_PLL_SHARED2 0xc
+#define PLL_LOCKTIME_PLL_SHARED3 0x10
+#define PLL_LOCKTIME_PLL_SHARED4 0x14
+#define PLL_LOCKTIME_PLL_SHARED_MIF 0x18
+#define PLL_CON3_PLL_MMC 0x10c
+#define PLL_CON8_PLL_MMC 0x120
+#define PLL_CON3_PLL_SHARED0 0x14c
+#define PLL_CON8_PLL_SHARED0 0x160
+#define PLL_CON3_PLL_SHARED1 0x18c
+#define PLL_CON8_PLL_SHARED1 0x1a0
+#define PLL_CON3_PLL_SHARED2 0x1cc
+#define PLL_CON8_PLL_SHARED2 0x1e0
+#define PLL_CON3_PLL_SHARED3 0x20c
+#define PLL_CON8_PLL_SHARED3 0x220
+#define PLL_CON3_PLL_SHARED4 0x24c
+#define PLL_CON8_PLL_SHARED4 0x260
+#define PLL_CON3_PLL_SHARED_MIF 0x28c
+#define PLL_CON8_PLL_SHARED_MIF 0x2a0
+#define PLL_CON0_MUX_CP_MPLL_CLK_D2_USER 0x600
+#define PLL_CON1_MUX_CP_MPLL_CLK_D2_USER 0x604
+#define PLL_CON0_MUX_CP_MPLL_CLK_USER 0x610
+#define PLL_CON1_MUX_CP_MPLL_CLK_USER 0x614
+#define CLK_CON_MUX_CLKCMU_AUD_AUDIF0 0x1000
+#define CLK_CON_MUX_CLKCMU_AUD_AUDIF1 0x1004
+#define CLK_CON_MUX_CLKCMU_AUD_CPU 0x1008
+#define CLK_CON_MUX_CLKCMU_CPUCL0_DBG_NOC 0x100c
+#define CLK_CON_MUX_CLKCMU_CPUCL0_SWITCH 0x1010
+#define CLK_CON_MUX_CLKCMU_CPUCL1_SWITCH 0x1014
+#define CLK_CON_MUX_CLKCMU_CPUCL2_SWITCH 0x1018
+#define CLK_CON_MUX_CLKCMU_DNC_NOC 0x101c
+#define CLK_CON_MUX_CLKCMU_DPUB_NOC 0x1020
+#define CLK_CON_MUX_CLKCMU_DPUF_NOC 0x1024
+#define CLK_CON_MUX_CLKCMU_DSP_NOC 0x102c
+#define CLK_CON_MUX_CLKCMU_DSU_SWITCH 0x1030
+#define CLK_CON_MUX_CLKCMU_G3D_SWITCH 0x1034
+#define CLK_CON_MUX_CLKCMU_GNPU_NOC 0x103c
+#define CLK_CON_MUX_CLKCMU_UFS_MMC_CARD 0x1040
+#define CLK_CON_MUX_CLKCMU_M2M_NOC 0x1044
+#define CLK_CON_MUX_CLKCMU_NOCL0_NOC 0x1048
+#define CLK_CON_MUX_CLKCMU_NOCL1A_NOC 0x104c
+#define CLK_CON_MUX_CLKCMU_NOCL1B_NOC0 0x1050
+#define CLK_CON_MUX_CLKCMU_NOCL1C_NOC 0x1054
+#define CLK_CON_MUX_CLKCMU_SDMA_NOC 0x1058
+#define CLK_CON_MUX_CP_HISPEEDY_CLK 0x105c
+#define CLK_CON_MUX_CP_SHARED0_CLK 0x1060
+#define CLK_CON_MUX_CP_SHARED2_CLK 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_ALIVE_NOC 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_AUDIF0 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_AUDIF1 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_NOC 0x1078
+#define CLK_CON_MUX_MUX_CLKCMU_BRP_NOC 0x107c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0 0x1080
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1 0x1084
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2 0x1088
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3 0x108c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4 0x1090
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5 0x1094
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK6 0x1098
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK7 0x109c
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST 0x10a0
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CAM 0x10a4
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU 0x10a8
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_MIF 0x10ac
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_NOC 0x10b0
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_NOCP 0x10b4
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x10b8
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x10bc
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH 0x10c0
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_DCPHY 0x10c4
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_NOC 0x10c8
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU 0x10cc
+#define CLK_CON_MUX_MUX_CLKCMU_CSTAT_NOC 0x10d0
+#define CLK_CON_MUX_MUX_CLKCMU_DNC_NOC 0x10d4
+#define CLK_CON_MUX_MUX_CLKCMU_DPUB 0x10d8
+#define CLK_CON_MUX_MUX_CLKCMU_DPUB_ALT 0x10dc
+#define CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM 0x10e0
+#define CLK_CON_MUX_MUX_CLKCMU_DPUF 0x10e4
+#define CLK_CON_MUX_MUX_CLKCMU_DPUF_ALT 0x10e8
+#define CLK_CON_MUX_MUX_CLKCMU_DSP_NOC 0x10f8
+#define CLK_CON_MUX_MUX_CLKCMU_DSU_SWITCH 0x10fc
+#define CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP 0x1100
+#define CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH 0x1104
+#define CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC 0x110c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC 0x1114
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_DPOSC 0x1118
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC 0x111c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_USB32DRD 0x1120
+#define CLK_CON_MUX_MUX_CLKCMU_UFS_MMC_CARD 0x1124
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC 0x1128
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE 0x112c
+#define CLK_CON_MUX_MUX_CLKCMU_UFS_UFS_EMBD 0x1130
+#define CLK_CON_MUX_MUX_CLKCMU_LME_LME 0x1134
+#define CLK_CON_MUX_MUX_CLKCMU_LME_NOC 0x1138
+#define CLK_CON_MUX_MUX_CLKCMU_M2M_NOC 0x1140
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_MCSC 0x1148
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_NOC 0x114c
+#define CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0 0x1150
+#define CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD 0x1154
+#define CLK_CON_MUX_MUX_CLKCMU_MFC1_MFC1 0x1158
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP 0x115c
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x1160
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC 0x1164
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL1A_NOC 0x1168
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL1B_NOC0 0x116c
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL1B_NOC1 0x1170
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL1C_NOC 0x1174
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP0 0x1178
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP1 0x117c
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC 0x1180
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP0 0x1184
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP1 0x1188
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC 0x118c
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC2_IP0 0x1190
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC2_IP1 0x1194
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC2_NOC 0x1198
+#define CLK_CON_MUX_MUX_CLKCMU_PERIS_GIC 0x119c
+#define CLK_CON_MUX_MUX_CLKCMU_PERIS_NOC 0x11a0
+#define CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC 0x11a8
+#define CLK_CON_MUX_MUX_CLKCMU_SSP_NOC 0x11ac
+#define CLK_CON_MUX_MUX_CLKCMU_VTS_DMIC 0x11b0
+#define CLK_CON_MUX_MUX_CLKCMU_YUVP_NOC 0x11b4
+#define CLK_CON_MUX_MUX_CMU_CMUREF 0x11b8
+#define CLK_CON_MUX_MUX_CP_HISPEEDY_CLK 0x11bc
+#define CLK_CON_MUX_MUX_CP_SHARED0_CLK 0x11c0
+#define CLK_CON_MUX_MUX_CP_SHARED1_CLK 0x11c4
+#define CLK_CON_MUX_MUX_CP_SHARED2_CLK 0x11c8
+#define CLK_CON_MUX_CLKCMU_M2M_FRC 0x11cc
+#define CLK_CON_MUX_CLKCMU_MCSC_MCSC 0x11d0
+#define CLK_CON_MUX_CLKCMU_MCSC_NOC 0x11d4
+#define CLK_CON_MUX_MUX_CLKCMU_M2M_FRC 0x11d8
+#define CLK_CON_MUX_MUX_CLKCMU_UFS_NOC 0x11dc
+#define CLK_CON_DIV_CLKCMU_ALIVE_NOC 0x1800
+#define CLK_CON_DIV_CLKCMU_AUD_NOC 0x1804
+#define CLK_CON_DIV_CLKCMU_BRP_NOC 0x1808
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST 0x180c
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST_CAM 0x1810
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU 0x1814
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST_MIF 0x1818
+#define CLK_CON_DIV_CLKCMU_CPUCL0_NOCP 0x181c
+#define CLK_CON_DIV_CLKCMU_CSIS_DCPHY 0x1820
+#define CLK_CON_DIV_CLKCMU_CSIS_NOC 0x1824
+#define CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU 0x1828
+#define CLK_CON_DIV_CLKCMU_CSTAT_NOC 0x182c
+#define CLK_CON_DIV_CLKCMU_DPUB_DSIM 0x1830
+#define CLK_CON_DIV_CLKCMU_LME_LME 0x1834
+#define CLK_CON_DIV_CLKCMU_G3D_NOCP 0x1838
+#define CLK_CON_DIV_CLKCMU_HSI0_DPGTC 0x1840
+#define CLK_CON_DIV_CLKCMU_HSI0_DPOSC 0x1844
+#define CLK_CON_DIV_CLKCMU_HSI0_NOC 0x1848
+#define CLK_CON_DIV_CLKCMU_HSI0_USB32DRD 0x184c
+#define CLK_CON_DIV_CLKCMU_HSI1_NOC 0x1850
+#define CLK_CON_DIV_CLKCMU_HSI1_PCIE 0x1854
+#define CLK_CON_DIV_CLKCMU_UFS_UFS_EMBD 0x1858
+#define CLK_CON_DIV_CLKCMU_LME_NOC 0x1860
+#define CLK_CON_DIV_CLKCMU_MFC0_MFC0 0x1874
+#define CLK_CON_DIV_CLKCMU_MFC0_WFD 0x1878
+#define CLK_CON_DIV_CLKCMU_MFC1_MFC1 0x187c
+#define CLK_CON_DIV_CLKCMU_MIF_NOCP 0x1880
+#define CLK_CON_DIV_CLKCMU_NOCL1B_NOC1 0x1884
+#define CLK_CON_DIV_CLKCMU_PERIC0_IP0 0x1888
+#define CLK_CON_DIV_CLKCMU_PERIC0_IP1 0x188c
+#define CLK_CON_DIV_CLKCMU_PERIC0_NOC 0x1890
+#define CLK_CON_DIV_CLKCMU_PERIC1_IP0 0x1894
+#define CLK_CON_DIV_CLKCMU_PERIC1_IP1 0x1898
+#define CLK_CON_DIV_CLKCMU_PERIC1_NOC 0x189c
+#define CLK_CON_DIV_CLKCMU_PERIC2_IP0 0x18a0
+#define CLK_CON_DIV_CLKCMU_PERIC2_IP1 0x18a4
+#define CLK_CON_DIV_CLKCMU_PERIC2_NOC 0x18a8
+#define CLK_CON_DIV_CLKCMU_PERIS_GIC 0x18ac
+#define CLK_CON_DIV_CLKCMU_PERIS_NOC 0x18b0
+#define CLK_CON_DIV_CLKCMU_SSP_NOC 0x18b8
+#define CLK_CON_DIV_CLKCMU_VTS_DMIC 0x18bc
+#define CLK_CON_DIV_CLKCMU_YUVP_NOC 0x18c0
+#define CLK_CON_DIV_CP_SHARED1_CLK 0x18c4
+#define CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF0 0x18c8
+#define CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF0_SM 0x18cc
+#define CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF1 0x18d0
+#define CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF1_SM 0x18d4
+#define CLK_CON_DIV_DIV_CLKCMU_AUD_CPU 0x18d8
+#define CLK_CON_DIV_DIV_CLKCMU_AUD_CPU_SM 0x18dc
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK0 0x18e0
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK1 0x18e4
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK2 0x18e8
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK3 0x18ec
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK4 0x18f0
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK5 0x18f4
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK6 0x18f8
+#define CLK_CON_DIV_DIV_CLKCMU_CIS_CLK7 0x18fc
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL0_DBG_NOC 0x1900
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL0_DBG_NOC_SM 0x1904
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL0_SWITCH 0x1908
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL0_SWITCH_SM 0x190c
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL1_SWITCH 0x1910
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL1_SWITCH_SM 0x1914
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL2_SWITCH 0x1918
+#define CLK_CON_DIV_DIV_CLKCMU_CPUCL2_SWITCH_SM 0x191c
+#define CLK_CON_DIV_DIV_CLKCMU_DNC_NOC 0x1920
+#define CLK_CON_DIV_DIV_CLKCMU_DNC_NOC_SM 0x1924
+#define CLK_CON_DIV_DIV_CLKCMU_DPUB 0x1928
+#define CLK_CON_DIV_DIV_CLKCMU_DPUB_ALT 0x192c
+#define CLK_CON_DIV_DIV_CLKCMU_DPUF 0x1930
+#define CLK_CON_DIV_DIV_CLKCMU_DPUF_ALT 0x1934
+#define CLK_CON_DIV_DIV_CLKCMU_DSP_NOC 0x1940
+#define CLK_CON_DIV_DIV_CLKCMU_DSP_NOC_SM 0x1944
+#define CLK_CON_DIV_DIV_CLKCMU_DSU_SWITCH 0x1948
+#define CLK_CON_DIV_DIV_CLKCMU_DSU_SWITCH_SM 0x194c
+#define CLK_CON_DIV_DIV_CLKCMU_G3D_SWITCH 0x1950
+#define CLK_CON_DIV_DIV_CLKCMU_G3D_SWITCH_SM 0x1954
+#define CLK_CON_DIV_DIV_CLKCMU_GNPU_NOC 0x1960
+#define CLK_CON_DIV_DIV_CLKCMU_GNPU_NOC_SM 0x1964
+#define CLK_CON_DIV_DIV_CLKCMU_UFS_MMC_CARD 0x1968
+#define CLK_CON_DIV_DIV_CLKCMU_UFS_MMC_CARD_SM 0x196c
+#define CLK_CON_DIV_DIV_CLKCMU_M2M_NOC 0x1970
+#define CLK_CON_DIV_DIV_CLKCMU_M2M_NOC_SM 0x1974
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL0_NOC 0x1978
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL0_NOC_SM 0x197c
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL1A_NOC 0x1980
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL1A_NOC_SM 0x1984
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL1B_NOC0 0x1988
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL1B_NOC0_SM 0x198c
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL1C_NOC 0x1990
+#define CLK_CON_DIV_DIV_CLKCMU_NOCL1C_NOC_SM 0x1994
+#define CLK_CON_DIV_DIV_CLKCMU_SDMA_NOC 0x1998
+#define CLK_CON_DIV_DIV_CLKCMU_SDMA_NOC_SM 0x199c
+#define CLK_CON_DIV_DIV_CP_HISPEEDY_CLK 0x19a0
+#define CLK_CON_DIV_DIV_CP_HISPEEDY_CLK_SM 0x19a4
+#define CLK_CON_DIV_DIV_CP_SHARED0_CLK 0x19a8
+#define CLK_CON_DIV_DIV_CP_SHARED0_CLK_SM 0x19ac
+#define CLK_CON_DIV_DIV_CP_SHARED2_CLK 0x19b0
+#define CLK_CON_DIV_DIV_CP_SHARED2_CLK_SM 0x19b4
+#define CLK_CON_DIV_CLKCMU_UFS_NOC 0x19b8
+#define CLK_CON_DIV_DIV_CLKCMU_M2M_FRC 0x19bc
+#define CLK_CON_DIV_DIV_CLKCMU_M2M_FRC_SM 0x19c0
+#define CLK_CON_DIV_DIV_CLKCMU_MCSC_MCSC 0x19c4
+#define CLK_CON_DIV_DIV_CLKCMU_MCSC_MCSC_SM 0x19c8
+#define CLK_CON_DIV_DIV_CLKCMU_MCSC_NOC 0x19cc
+#define CLK_CON_DIV_DIV_CLKCMU_MCSC_NOC_SM 0x19d0
+#define CLK_CON_GAT_CLKCMU_MIF01_SWITCH 0x2000
+#define CLK_CON_GAT_CLKCMU_MIF23_SWITCH 0x2004
+#define CLK_CON_GAT_GATE_CLKCMU_ALIVE_NOC 0x200c
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF0 0x2010
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF0_SM 0x2014
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF1 0x2018
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF1_SM 0x201c
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_CPU 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_CPU_SM 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_NOC 0x2028
+#define CLK_CON_GAT_GATE_CLKCMU_BRP_NOC 0x202c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0 0x2030
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1 0x2034
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2 0x2038
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3 0x203c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4 0x2040
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK6 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK7 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST_CAM 0x2054
+#define CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST_CPU 0x2058
+#define CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST_CPU_MIF 0x205c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_NOC 0x2060
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_NOC_SM 0x2064
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_NOCP 0x2068
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x206c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH_SM 0x2070
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2074
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH_SM 0x2078
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH 0x207c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH_SM 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_DCPHY 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_NOC 0x2088
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU 0x208c
+#define CLK_CON_GAT_GATE_CLKCMU_CSTAT_NOC 0x2090
+#define CLK_CON_GAT_GATE_CLKCMU_DNC_NOC 0x2094
+#define CLK_CON_GAT_GATE_CLKCMU_DNC_NOC_SM 0x2098
+#define CLK_CON_GAT_GATE_CLKCMU_DPUB 0x209c
+#define CLK_CON_GAT_GATE_CLKCMU_DPUB_ALT 0x20a0
+#define CLK_CON_GAT_GATE_CLKCMU_DPUB_DSIM 0x20a4
+#define CLK_CON_GAT_GATE_CLKCMU_DPUF 0x20a8
+#define CLK_CON_GAT_GATE_CLKCMU_DPUF_ALT 0x20ac
+#define CLK_CON_GAT_GATE_CLKCMU_DSP_NOC 0x20bc
+#define CLK_CON_GAT_GATE_CLKCMU_DSP_NOC_SM 0x20c0
+#define CLK_CON_GAT_GATE_CLKCMU_DSU_SWITCH 0x20c4
+#define CLK_CON_GAT_GATE_CLKCMU_DSU_SWITCH_SM 0x20c8
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_NOCP 0x20cc
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x20d0
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH_SM 0x20d4
+#define CLK_CON_GAT_GATE_CLKCMU_GNPU_NOC 0x20e0
+#define CLK_CON_GAT_GATE_CLKCMU_GNPU_NOC_SM 0x20e4
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC 0x20ec
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_DPOSC 0x20f0
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_NOC 0x20f4
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_USB32DRD 0x20f8
+#define CLK_CON_GAT_GATE_CLKCMU_UFS_MMC_CARD 0x20fc
+#define CLK_CON_GAT_GATE_CLKCMU_UFS_MMC_CARD_SM 0x2100
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_NOC 0x2104
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE 0x2108
+#define CLK_CON_GAT_GATE_CLKCMU_UFS_UFS_EMBD 0x210c
+#define CLK_CON_GAT_GATE_CLKCMU_LME_LME 0x2110
+#define CLK_CON_GAT_GATE_CLKCMU_LME_NOC 0x2114
+#define CLK_CON_GAT_GATE_CLKCMU_M2M_NOC 0x2118
+#define CLK_CON_GAT_GATE_CLKCMU_M2M_NOC_SM 0x211c
+#define CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0 0x212c
+#define CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD 0x2130
+#define CLK_CON_GAT_GATE_CLKCMU_MFC1_MFC1 0x2134
+#define CLK_CON_GAT_GATE_CLKCMU_MIF_NOCP 0x2138
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL0_NOC 0x213c
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL0_NOC_SM 0x2140
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1A_NOC 0x2144
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1A_NOC_SM 0x2148
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1B_NOC0 0x214c
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1B_NOC0_SM 0x2150
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1B_NOC1 0x2154
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1C_NOC 0x2158
+#define CLK_CON_GAT_GATE_CLKCMU_NOCL1C_NOC_SM 0x215c
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP0 0x2160
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP1 0x2164
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_NOC 0x2168
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP0 0x216c
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP1 0x2170
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_NOC 0x2174
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC2_IP0 0x2178
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC2_IP1 0x217c
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC2_NOC 0x2180
+#define CLK_CON_GAT_GATE_CLKCMU_PERIS_GIC 0x2184
+#define CLK_CON_GAT_GATE_CLKCMU_PERIS_NOC 0x2188
+#define CLK_CON_GAT_GATE_CLKCMU_SDMA_NOC 0x2190
+#define CLK_CON_GAT_GATE_CLKCMU_SDMA_NOC_SM 0x2194
+#define CLK_CON_GAT_GATE_CLKCMU_SSP_NOC 0x2198
+#define CLK_CON_GAT_GATE_CLKCMU_VTS_DMIC 0x219c
+#define CLK_CON_GAT_GATE_CLKCMU_YUVP_NOC 0x21a0
+#define CLK_CON_GAT_GATE_CP_HISPEEDY_CLK 0x21a4
+#define CLK_CON_GAT_GATE_CP_HISPEEDY_CLK_SM 0x21a8
+#define CLK_CON_GAT_GATE_CP_SHARED0_CLK 0x21ac
+#define CLK_CON_GAT_GATE_CP_SHARED0_CLK_SM 0x21b0
+#define CLK_CON_GAT_GATE_CP_SHARED1_CLK 0x21b4
+#define CLK_CON_GAT_GATE_CP_SHARED2_CLK 0x21b8
+#define CLK_CON_GAT_GATE_CP_SHARED2_CLK_SM 0x21bc
+#define CLK_CON_GAT_GATE_CLKCMU_UFS_NOC 0x21c0
+#define CLK_CON_GAT_GATE_CLKCMU_M2M_FRC 0x21c4
+#define CLK_CON_GAT_GATE_CLKCMU_M2M_FRC_SM 0x21c8
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_MCSC 0x21cc
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_MCSC_SM 0x21d0
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_NOC 0x21d4
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_NOC_SM 0x21d8
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_MMC,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_LOCKTIME_PLL_SHARED2,
+ PLL_LOCKTIME_PLL_SHARED3,
+ PLL_LOCKTIME_PLL_SHARED4,
+ PLL_LOCKTIME_PLL_SHARED_MIF,
+ PLL_CON3_PLL_MMC,
+ PLL_CON8_PLL_MMC,
+ PLL_CON3_PLL_SHARED0,
+ PLL_CON8_PLL_SHARED0,
+ PLL_CON3_PLL_SHARED1,
+ PLL_CON8_PLL_SHARED1,
+ PLL_CON3_PLL_SHARED2,
+ PLL_CON8_PLL_SHARED2,
+ PLL_CON3_PLL_SHARED3,
+ PLL_CON8_PLL_SHARED3,
+ PLL_CON3_PLL_SHARED4,
+ PLL_CON8_PLL_SHARED4,
+ PLL_CON3_PLL_SHARED_MIF,
+ PLL_CON8_PLL_SHARED_MIF,
+ PLL_CON0_MUX_CP_MPLL_CLK_D2_USER,
+ PLL_CON1_MUX_CP_MPLL_CLK_D2_USER,
+ PLL_CON0_MUX_CP_MPLL_CLK_USER,
+ PLL_CON1_MUX_CP_MPLL_CLK_USER,
+ CLK_CON_MUX_CLKCMU_AUD_AUDIF0,
+ CLK_CON_MUX_CLKCMU_AUD_AUDIF1,
+ CLK_CON_MUX_CLKCMU_AUD_CPU,
+ CLK_CON_MUX_CLKCMU_CPUCL0_DBG_NOC,
+ CLK_CON_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_MUX_CLKCMU_DNC_NOC,
+ CLK_CON_MUX_CLKCMU_DPUB_NOC,
+ CLK_CON_MUX_CLKCMU_DPUF_NOC,
+ CLK_CON_MUX_CLKCMU_DSP_NOC,
+ CLK_CON_MUX_CLKCMU_DSU_SWITCH,
+ CLK_CON_MUX_CLKCMU_G3D_SWITCH,
+ CLK_CON_MUX_CLKCMU_GNPU_NOC,
+ CLK_CON_MUX_CLKCMU_UFS_MMC_CARD,
+ CLK_CON_MUX_CLKCMU_M2M_NOC,
+ CLK_CON_MUX_CLKCMU_NOCL0_NOC,
+ CLK_CON_MUX_CLKCMU_NOCL1A_NOC,
+ CLK_CON_MUX_CLKCMU_NOCL1B_NOC0,
+ CLK_CON_MUX_CLKCMU_NOCL1C_NOC,
+ CLK_CON_MUX_CLKCMU_SDMA_NOC,
+ CLK_CON_MUX_CP_HISPEEDY_CLK,
+ CLK_CON_MUX_CP_SHARED0_CLK,
+ CLK_CON_MUX_CP_SHARED2_CLK,
+ CLK_CON_MUX_MUX_CLKCMU_ALIVE_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_AUDIF0,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_AUDIF1,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_BRP_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK6,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK7,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CAM,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_MIF,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_NOCP,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_DCPHY,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_MUX_MUX_CLKCMU_CSTAT_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DNC_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DPUB,
+ CLK_CON_MUX_MUX_CLKCMU_DPUB_ALT,
+ CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM,
+ CLK_CON_MUX_MUX_CLKCMU_DPUF,
+ CLK_CON_MUX_MUX_CLKCMU_DPUF_ALT,
+ CLK_CON_MUX_MUX_CLKCMU_DSP_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DSU_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_DPOSC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USB32DRD,
+ CLK_CON_MUX_MUX_CLKCMU_UFS_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_UFS_UFS_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_LME_LME,
+ CLK_CON_MUX_MUX_CLKCMU_LME_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_M2M_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_MCSC,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0,
+ CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD,
+ CLK_CON_MUX_MUX_CLKCMU_MFC1_MFC1,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL1A_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL1B_NOC0,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL1B_NOC1,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL1C_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP0,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP1,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP0,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP1,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC2_IP0,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC2_IP1,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC2_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIS_GIC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIS_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_SSP_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_VTS_DMIC,
+ CLK_CON_MUX_MUX_CLKCMU_YUVP_NOC,
+ CLK_CON_MUX_MUX_CMU_CMUREF,
+ CLK_CON_MUX_MUX_CP_HISPEEDY_CLK,
+ CLK_CON_MUX_MUX_CP_SHARED0_CLK,
+ CLK_CON_MUX_MUX_CP_SHARED1_CLK,
+ CLK_CON_MUX_MUX_CP_SHARED2_CLK,
+ CLK_CON_MUX_CLKCMU_M2M_FRC,
+ CLK_CON_MUX_CLKCMU_MCSC_MCSC,
+ CLK_CON_MUX_CLKCMU_MCSC_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_M2M_FRC,
+ CLK_CON_MUX_MUX_CLKCMU_UFS_NOC,
+ CLK_CON_DIV_CLKCMU_ALIVE_NOC,
+ CLK_CON_DIV_CLKCMU_AUD_NOC,
+ CLK_CON_DIV_CLKCMU_BRP_NOC,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST_CAM,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST_MIF,
+ CLK_CON_DIV_CLKCMU_CPUCL0_NOCP,
+ CLK_CON_DIV_CLKCMU_CSIS_DCPHY,
+ CLK_CON_DIV_CLKCMU_CSIS_NOC,
+ CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_DIV_CLKCMU_CSTAT_NOC,
+ CLK_CON_DIV_CLKCMU_DPUB_DSIM,
+ CLK_CON_DIV_CLKCMU_LME_LME,
+ CLK_CON_DIV_CLKCMU_G3D_NOCP,
+ CLK_CON_DIV_CLKCMU_HSI0_DPGTC,
+ CLK_CON_DIV_CLKCMU_HSI0_DPOSC,
+ CLK_CON_DIV_CLKCMU_HSI0_NOC,
+ CLK_CON_DIV_CLKCMU_HSI0_USB32DRD,
+ CLK_CON_DIV_CLKCMU_HSI1_NOC,
+ CLK_CON_DIV_CLKCMU_HSI1_PCIE,
+ CLK_CON_DIV_CLKCMU_UFS_UFS_EMBD,
+ CLK_CON_DIV_CLKCMU_LME_NOC,
+ CLK_CON_DIV_CLKCMU_MFC0_MFC0,
+ CLK_CON_DIV_CLKCMU_MFC0_WFD,
+ CLK_CON_DIV_CLKCMU_MFC1_MFC1,
+ CLK_CON_DIV_CLKCMU_MIF_NOCP,
+ CLK_CON_DIV_CLKCMU_NOCL1B_NOC1,
+ CLK_CON_DIV_CLKCMU_PERIC0_IP0,
+ CLK_CON_DIV_CLKCMU_PERIC0_IP1,
+ CLK_CON_DIV_CLKCMU_PERIC0_NOC,
+ CLK_CON_DIV_CLKCMU_PERIC1_IP0,
+ CLK_CON_DIV_CLKCMU_PERIC1_IP1,
+ CLK_CON_DIV_CLKCMU_PERIC1_NOC,
+ CLK_CON_DIV_CLKCMU_PERIC2_IP0,
+ CLK_CON_DIV_CLKCMU_PERIC2_IP1,
+ CLK_CON_DIV_CLKCMU_PERIC2_NOC,
+ CLK_CON_DIV_CLKCMU_PERIS_GIC,
+ CLK_CON_DIV_CLKCMU_PERIS_NOC,
+ CLK_CON_DIV_CLKCMU_SSP_NOC,
+ CLK_CON_DIV_CLKCMU_VTS_DMIC,
+ CLK_CON_DIV_CLKCMU_YUVP_NOC,
+ CLK_CON_DIV_CP_SHARED1_CLK,
+ CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF0,
+ CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF0_SM,
+ CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF1,
+ CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF1_SM,
+ CLK_CON_DIV_DIV_CLKCMU_AUD_CPU,
+ CLK_CON_DIV_DIV_CLKCMU_AUD_CPU_SM,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK0,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK1,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK2,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK3,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK4,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK5,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK6,
+ CLK_CON_DIV_DIV_CLKCMU_CIS_CLK7,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL0_DBG_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL0_DBG_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL0_SWITCH_SM,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL1_SWITCH_SM,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL2_SWITCH_SM,
+ CLK_CON_DIV_DIV_CLKCMU_DNC_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_DNC_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_DPUB,
+ CLK_CON_DIV_DIV_CLKCMU_DPUB_ALT,
+ CLK_CON_DIV_DIV_CLKCMU_DPUF,
+ CLK_CON_DIV_DIV_CLKCMU_DPUF_ALT,
+ CLK_CON_DIV_DIV_CLKCMU_DSP_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_DSP_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_DSU_SWITCH,
+ CLK_CON_DIV_DIV_CLKCMU_DSU_SWITCH_SM,
+ CLK_CON_DIV_DIV_CLKCMU_G3D_SWITCH,
+ CLK_CON_DIV_DIV_CLKCMU_G3D_SWITCH_SM,
+ CLK_CON_DIV_DIV_CLKCMU_GNPU_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_GNPU_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_UFS_MMC_CARD,
+ CLK_CON_DIV_DIV_CLKCMU_UFS_MMC_CARD_SM,
+ CLK_CON_DIV_DIV_CLKCMU_M2M_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_M2M_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL0_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL0_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL1A_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL1A_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL1B_NOC0,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL1B_NOC0_SM,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL1C_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_NOCL1C_NOC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_SDMA_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_SDMA_NOC_SM,
+ CLK_CON_DIV_DIV_CP_HISPEEDY_CLK,
+ CLK_CON_DIV_DIV_CP_HISPEEDY_CLK_SM,
+ CLK_CON_DIV_DIV_CP_SHARED0_CLK,
+ CLK_CON_DIV_DIV_CP_SHARED0_CLK_SM,
+ CLK_CON_DIV_DIV_CP_SHARED2_CLK,
+ CLK_CON_DIV_DIV_CP_SHARED2_CLK_SM,
+ CLK_CON_DIV_CLKCMU_UFS_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_M2M_FRC,
+ CLK_CON_DIV_DIV_CLKCMU_M2M_FRC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_MCSC_MCSC,
+ CLK_CON_DIV_DIV_CLKCMU_MCSC_MCSC_SM,
+ CLK_CON_DIV_DIV_CLKCMU_MCSC_NOC,
+ CLK_CON_DIV_DIV_CLKCMU_MCSC_NOC_SM,
+ CLK_CON_GAT_CLKCMU_MIF01_SWITCH,
+ CLK_CON_GAT_CLKCMU_MIF23_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_ALIVE_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF0,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF0_SM,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF1,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_AUDIF1_SM,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU_SM,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_BRP_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK6,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK7,
+ CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST,
+ CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST_CAM,
+ CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST_CPU_MIF,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_NOCP,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH_SM,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH_SM,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH_SM,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_DCPHY,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_GAT_GATE_CLKCMU_CSTAT_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_DNC_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_DNC_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_DPUB,
+ CLK_CON_GAT_GATE_CLKCMU_DPUB_ALT,
+ CLK_CON_GAT_GATE_CLKCMU_DPUB_DSIM,
+ CLK_CON_GAT_GATE_CLKCMU_DPUF,
+ CLK_CON_GAT_GATE_CLKCMU_DPUF_ALT,
+ CLK_CON_GAT_GATE_CLKCMU_DSP_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_DSP_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_DSU_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_DSU_SWITCH_SM,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_NOCP,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH_SM,
+ CLK_CON_GAT_GATE_CLKCMU_GNPU_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_GNPU_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_DPOSC,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_USB32DRD,
+ CLK_CON_GAT_GATE_CLKCMU_UFS_MMC_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_UFS_MMC_CARD_SM,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_UFS_UFS_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_LME_LME,
+ CLK_CON_GAT_GATE_CLKCMU_LME_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_M2M_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_M2M_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0,
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD,
+ CLK_CON_GAT_GATE_CLKCMU_MFC1_MFC1,
+ CLK_CON_GAT_GATE_CLKCMU_MIF_NOCP,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL0_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL0_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1A_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1A_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1B_NOC0,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1B_NOC0_SM,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1B_NOC1,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1C_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_NOCL1C_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP0,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP1,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP0,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP1,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC2_IP0,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC2_IP1,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC2_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_PERIS_GIC,
+ CLK_CON_GAT_GATE_CLKCMU_PERIS_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_SDMA_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_SDMA_NOC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_SSP_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_VTS_DMIC,
+ CLK_CON_GAT_GATE_CLKCMU_YUVP_NOC,
+ CLK_CON_GAT_GATE_CP_HISPEEDY_CLK,
+ CLK_CON_GAT_GATE_CP_HISPEEDY_CLK_SM,
+ CLK_CON_GAT_GATE_CP_SHARED0_CLK,
+ CLK_CON_GAT_GATE_CP_SHARED0_CLK_SM,
+ CLK_CON_GAT_GATE_CP_SHARED1_CLK,
+ CLK_CON_GAT_GATE_CP_SHARED2_CLK,
+ CLK_CON_GAT_GATE_CP_SHARED2_CLK_SM,
+ CLK_CON_GAT_GATE_CLKCMU_UFS_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_M2M_FRC,
+ CLK_CON_GAT_GATE_CLKCMU_M2M_FRC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_MCSC,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_MCSC_SM,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_NOC,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_NOC_SM,
+};
+
+/* List of parent clocks for Muxes in CMU_TOP */
+PNAME(mout_cmu_cp_mpll_clk_d2_user_parents) = { "oscclk" };
+PNAME(mout_cmu_cp_mpll_clk_user_parents) = { "oscclk" };
+PNAME(mout_cmu_aud_audif0_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "oscclk", "oscclk",
+ "oscclk" };
+PNAME(mout_cmu_aud_audif1_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "oscclk", "oscclk",
+ "oscclk" };
+PNAME(mout_cmu_aud_cpu_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "mout_cmu_cp_mpll_clk_d2_user" };
+PNAME(mout_cmu_cpucl0_dbg_noc_p) = { "dout_shared2_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2" };
+PNAME(mout_cmu_cpucl0_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_cpucl1_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_cpucl2_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_dnc_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_dpub_noc_p) = { "dout_cmu_div_dpub",
+ "dout_cmu_div_dpub_alt"};
+PNAME(mout_cmu_dpuf_noc_p) = { "dout_cmu_div_dpuf",
+ "dout_cmu_div_dpuf_alt" };
+PNAME(mout_cmu_dsp_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_dsu_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_g3d_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_gnpu_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_ufs_mmc_card_p) = { "oscclk",
+ "dout_shared2_div1",
+ "dout_mmc_div1",
+ "dout_shared0_div2" };
+PNAME(mout_cmu_m2m_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_nocl0_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "dout_shared_mif_div2" };
+PNAME(mout_cmu_nocl1a_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_nocl1b_noc0_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_nocl1c_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_sdma_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_cp_hispeedy_clk_p) = { "dout_shared2_div1",
+ "dout_shared3_div1" };
+PNAME(mout_cmu_cp_shared0_clk_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1" };
+PNAME(mout_cmu_cp_shared2_clk_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared1_div2" };
+PNAME(mout_cmu_mux_alive_noc_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_aud_audif0_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "oscclk", "oscclk",
+ "oscclk" };
+PNAME(mout_cmu_mux_aud_audif1_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "oscclk", "oscclk",
+ "oscclk" };
+PNAME(mout_cmu_mux_aud_cpu_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "mout_cmu_cp_mpll_clk_d2_user" };
+PNAME(mout_cmu_mux_aud_noc_p) = { "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_brp_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_cis_clk0_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk1_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk2_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk3_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk4_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk5_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk6_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cis_clk7_p) = { "oscclk",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cmu_boost_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_cmu_boost_cam_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_cmu_boost_cpu_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_cmu_boost_mif_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_cpucl0_dbg_noc_p) = { "dout_shared2_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2" };
+PNAME(mout_cmu_mux_cpucl0_nocp_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_cpucl0_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_cpucl1_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_cpucl2_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_csis_dcphy_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_csis_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_csis_ois_mcu_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cstat_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_dnc_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_dpub_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_dpub_alt_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_dpub_dsim_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2" };
+PNAME(mout_cmu_mux_dpuf_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_dpuf_alt_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_dsp_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_dsu_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mux_g3d_nocp_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_g3d_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_gnpu_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_hsi0_dpgtc_p) = { "oscclk",
+ "dout_shared0_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_hsi0_dposc_p) = { "oscclk",
+ "dout_shared2_div1" };
+PNAME(mout_cmu_mux_hsi0_noc_p) = { "dout_shared0_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_hsi0_usb32drd_p) = { "oscclk",
+ "dout_shared0_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_ufs_mmc_card_p) = { "oscclk",
+ "dout_shared2_div1",
+ "dout_mmc_div1",
+ "dout_shared0_div2" };
+PNAME(mout_cmu_mux_hsi1_noc_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_hsi1_pcie_p) = { "oscclk",
+ "dout_shared2_div1" };
+PNAME(mout_cmu_mux_ufs_ufs_embd_p) = { "oscclk",
+ "dout_shared0_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_lme_lme_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_lme_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_m2m_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_mcsc_mcsc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_mcsc_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_mfc0_mfc0_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_mfc0_wfd_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2",
+ "oscclk", "oscclk",
+ "oscclk" };
+PNAME(mout_cmu_mux_mfc1_mfc1_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_mif_nocp_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_mif_switch_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "mout_cmu_cp_mpll_clk_user",
+ "dout_shared_mif_div1" };
+PNAME(mout_cmu_mux_nocl0_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "mout_cmu_cp_mpll_clk_d2_user",
+ "dout_shared_mif_div2" };
+PNAME(mout_cmu_mux_nocl1a_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_nocl1b_noc0_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_nocl1b_noc1_p) = { "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_nocl1c_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric0_ip0_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric0_ip1_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric0_noc_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_peric1_ip0_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric1_ip1_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric1_noc_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_peric2_ip0_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric2_ip1_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_peric2_noc_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_peris_gic_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_peris_noc_p) = { "dout_shared0_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_sdma_noc_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_ssp_noc_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+PNAME(mout_cmu_mux_vts_dmic_p) = { "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2" };
+PNAME(mout_cmu_mux_yuvp_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_cmu_cmuref_p) = { "oscclk",
+ "dout_cmu_boost" };
+PNAME(mout_cmu_mux_cp_hispeedy_clk_p) = { "dout_shared2_div1",
+ "dout_shared3_div1" };
+PNAME(mout_cmu_mux_cp_shared0_clk_p) = { "dout_shared0_div1",
+ "dout_shared1_div1",
+ "dout_shared2_div1",
+ "dout_shared3_div1" };
+PNAME(mout_cmu_mux_cp_shared1_clk_p) = { "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2" };
+PNAME(mout_cmu_mux_cp_shared2_clk_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared1_div2" };
+PNAME(mout_cmu_m2m_frc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mcsc_mcsc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mcsc_noc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_m2m_frc_p) = { "dout_shared2_div1",
+ "dout_shared3_div1",
+ "dout_shared4_div1",
+ "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared3_div2",
+ "oscclk" };
+PNAME(mout_cmu_mux_ufs_noc_p) = { "dout_shared0_div2",
+ "dout_shared1_div2",
+ "dout_shared2_div2",
+ "dout_shared4_div2" };
+
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ PLL(pll_4311, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
+ PLL(pll_4311, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
+ PLL(pll_4311, CLK_FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
+ PLL(pll_4311, CLK_FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
+ PLL(pll_4311, CLK_FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
+ PLL(pll_4311, CLK_FOUT_MMC_PLL, "fout_mmc_pll", "oscclk",
+ PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL),
+ PLL(pll_4311, CLK_FOUT_SHARED_MIF_PLL, "fout_shared_mif_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED_MIF, PLL_CON3_PLL_SHARED_MIF, NULL),
+};
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CMU_CP_MPLL_CLK_D2_USER, "mout_cmu_cp_mpll_clk_d2_user",
+ mout_cmu_cp_mpll_clk_d2_user_parents,
+ PLL_CON0_MUX_CP_MPLL_CLK_D2_USER, 4, 1),
+ MUX(CLK_MOUT_CMU_CP_MPLL_CLK_USER, "mout_cmu_cp_mpll_clk_user",
+ mout_cmu_cp_mpll_clk_user_parents, PLL_CON0_MUX_CP_MPLL_CLK_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CMU_AUD_AUDIF0, "mout_cmu_aud_audif0",
+ mout_cmu_aud_audif0_p, CLK_CON_MUX_CLKCMU_AUD_AUDIF0, 0, 3),
+ MUX(CLK_MOUT_CMU_AUD_AUDIF1, "mout_cmu_aud_audif1",
+ mout_cmu_aud_audif1_p, CLK_CON_MUX_CLKCMU_AUD_AUDIF1, 0, 3),
+ MUX(CLK_MOUT_CMU_AUD_CPU, "mout_cmu_aud_cpu", mout_cmu_aud_cpu_p,
+ CLK_CON_MUX_CLKCMU_AUD_CPU, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL0_DBG_NOC, "mout_cmu_cpucl0_dbg_noc",
+ mout_cmu_cpucl0_dbg_noc_p, CLK_CON_MUX_CLKCMU_CPUCL0_DBG_NOC,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL0_SWITCH, "mout_cmu_cpucl0_switch",
+ mout_cmu_cpucl0_switch_p, CLK_CON_MUX_CLKCMU_CPUCL0_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL1_SWITCH, "mout_cmu_cpucl1_switch",
+ mout_cmu_cpucl1_switch_p, CLK_CON_MUX_CLKCMU_CPUCL1_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL2_SWITCH, "mout_cmu_cpucl2_switch",
+ mout_cmu_cpucl2_switch_p, CLK_CON_MUX_CLKCMU_CPUCL2_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_DNC_NOC, "mout_cmu_dnc_noc", mout_cmu_dnc_noc_p,
+ CLK_CON_MUX_CLKCMU_DNC_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_DPUB_NOC, "mout_cmu_dpub_noc", mout_cmu_dpub_noc_p,
+ CLK_CON_MUX_CLKCMU_DPUB_NOC, 0, 1),
+ MUX(CLK_MOUT_CMU_DPUF_NOC, "mout_cmu_dpuf_noc", mout_cmu_dpuf_noc_p,
+ CLK_CON_MUX_CLKCMU_DPUF_NOC, 0, 1),
+ MUX(CLK_MOUT_CMU_DSP_NOC, "mout_cmu_dsp_noc", mout_cmu_dsp_noc_p,
+ CLK_CON_MUX_CLKCMU_DSP_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_DSU_SWITCH, "mout_cmu_dsu_switch",
+ mout_cmu_dsu_switch_p, CLK_CON_MUX_CLKCMU_DSU_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_G3D_SWITCH, "mout_cmu_g3d_switch",
+ mout_cmu_g3d_switch_p, CLK_CON_MUX_CLKCMU_G3D_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_GNPU_NOC, "mout_cmu_gnpu_noc", mout_cmu_gnpu_noc_p,
+ CLK_CON_MUX_CLKCMU_GNPU_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_UFS_MMC_CARD, "mout_cmu_ufs_mmc_card",
+ mout_cmu_ufs_mmc_card_p, CLK_CON_MUX_CLKCMU_UFS_MMC_CARD, 0, 2),
+ MUX(CLK_MOUT_CMU_M2M_NOC, "mout_cmu_m2m_noc", mout_cmu_m2m_noc_p,
+ CLK_CON_MUX_CLKCMU_M2M_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_NOCL0_NOC, "mout_cmu_nocl0_noc", mout_cmu_nocl0_noc_p,
+ CLK_CON_MUX_CLKCMU_NOCL0_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_NOCL1A_NOC, "mout_cmu_nocl1a_noc",
+ mout_cmu_nocl1a_noc_p, CLK_CON_MUX_CLKCMU_NOCL1A_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_NOCL1B_NOC0, "mout_cmu_nocl1b_noc0",
+ mout_cmu_nocl1b_noc0_p, CLK_CON_MUX_CLKCMU_NOCL1B_NOC0, 0, 3),
+ MUX(CLK_MOUT_CMU_NOCL1C_NOC, "mout_cmu_nocl1c_noc",
+ mout_cmu_nocl1c_noc_p, CLK_CON_MUX_CLKCMU_NOCL1C_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_SDMA_NOC, "mout_cmu_sdma_noc", mout_cmu_sdma_noc_p,
+ CLK_CON_MUX_CLKCMU_SDMA_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_CP_HISPEEDY_CLK, "mout_cmu_cp_hispeedy_clk",
+ mout_cmu_cp_hispeedy_clk_p, CLK_CON_MUX_CP_HISPEEDY_CLK, 0, 1),
+ MUX(CLK_MOUT_CMU_CP_SHARED0_CLK, "mout_cmu_cp_shared0_clk",
+ mout_cmu_cp_shared0_clk_p, CLK_CON_MUX_CP_SHARED0_CLK, 0, 2),
+ MUX(CLK_MOUT_CMU_CP_SHARED2_CLK, "mout_cmu_cp_shared2_clk",
+ mout_cmu_cp_shared2_clk_p, CLK_CON_MUX_CP_SHARED2_CLK, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_ALIVE_NOC, "mout_cmu_mux_alive_noc",
+ mout_cmu_mux_alive_noc_p, CLK_CON_MUX_MUX_CLKCMU_ALIVE_NOC, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_AUD_AUDIF0, "mout_cmu_mux_aud_audif0",
+ mout_cmu_mux_aud_audif0_p, CLK_CON_MUX_MUX_CLKCMU_AUD_AUDIF0,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_AUD_AUDIF1, "mout_cmu_mux_aud_audif1",
+ mout_cmu_mux_aud_audif1_p, CLK_CON_MUX_MUX_CLKCMU_AUD_AUDIF1,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_AUD_CPU, "mout_cmu_mux_aud_cpu",
+ mout_cmu_mux_aud_cpu_p, CLK_CON_MUX_MUX_CLKCMU_AUD_CPU, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_AUD_NOC, "mout_cmu_mux_aud_noc",
+ mout_cmu_mux_aud_noc_p, CLK_CON_MUX_MUX_CLKCMU_AUD_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_BRP_NOC, "mout_cmu_mux_brp_noc",
+ mout_cmu_mux_brp_noc_p, CLK_CON_MUX_MUX_CLKCMU_BRP_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK0, "mout_cmu_mux_cis_clk0",
+ mout_cmu_mux_cis_clk0_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK1, "mout_cmu_mux_cis_clk1",
+ mout_cmu_mux_cis_clk1_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK2, "mout_cmu_mux_cis_clk2",
+ mout_cmu_mux_cis_clk2_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK3, "mout_cmu_mux_cis_clk3",
+ mout_cmu_mux_cis_clk3_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK4, "mout_cmu_mux_cis_clk4",
+ mout_cmu_mux_cis_clk4_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK5, "mout_cmu_mux_cis_clk5",
+ mout_cmu_mux_cis_clk5_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK6, "mout_cmu_mux_cis_clk6",
+ mout_cmu_mux_cis_clk6_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK6, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CIS_CLK7, "mout_cmu_mux_cis_clk7",
+ mout_cmu_mux_cis_clk7_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK7, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CMU_BOOST, "mout_cmu_mux_cmu_boost",
+ mout_cmu_mux_cmu_boost_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CMU_BOOST_CAM, "mout_cmu_mux_cmu_boost_cam",
+ mout_cmu_mux_cmu_boost_cam_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CAM,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CMU_BOOST_CPU, "mout_cmu_mux_cmu_boost_cpu",
+ mout_cmu_mux_cmu_boost_cpu_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CMU_BOOST_MIF, "mout_cmu_mux_cmu_boost_mif",
+ mout_cmu_mux_cmu_boost_mif_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_MIF,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CPUCL0_DBG_NOC, "mout_cmu_mux_cpucl0_dbg_noc",
+ mout_cmu_mux_cpucl0_dbg_noc_p,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_NOC, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CPUCL0_NOCP, "mout_cmu_mux_cpucl0_nocp",
+ mout_cmu_mux_cpucl0_nocp_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_NOCP,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CPUCL0_SWITCH, "mout_cmu_mux_cpucl0_switch",
+ mout_cmu_mux_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_CPUCL1_SWITCH, "mout_cmu_mux_cpucl1_switch",
+ mout_cmu_mux_cpucl1_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_CPUCL2_SWITCH, "mout_cmu_mux_cpucl2_switch",
+ mout_cmu_mux_cpucl2_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_CSIS_DCPHY, "mout_cmu_mux_csis_dcphy",
+ mout_cmu_mux_csis_dcphy_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_DCPHY,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CSIS_NOC, "mout_cmu_mux_csis_noc",
+ mout_cmu_mux_csis_noc_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_CSIS_OIS_MCU, "mout_cmu_mux_csis_ois_mcu",
+ mout_cmu_mux_csis_ois_mcu_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CSTAT_NOC, "mout_cmu_mux_cstat_noc",
+ mout_cmu_mux_cstat_noc_p, CLK_CON_MUX_MUX_CLKCMU_CSTAT_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DNC_NOC, "mout_cmu_mux_dnc_noc",
+ mout_cmu_mux_dnc_noc_p, CLK_CON_MUX_MUX_CLKCMU_DNC_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DPUB, "mout_cmu_mux_dpub", mout_cmu_mux_dpub_p,
+ CLK_CON_MUX_MUX_CLKCMU_DPUB, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DPUB_ALT, "mout_cmu_mux_dpub_alt",
+ mout_cmu_mux_dpub_alt_p, CLK_CON_MUX_MUX_CLKCMU_DPUB_ALT, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DPUB_DSIM, "mout_cmu_mux_dpub_dsim",
+ mout_cmu_mux_dpub_dsim_p, CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_DPUF, "mout_cmu_mux_dpuf", mout_cmu_mux_dpuf_p,
+ CLK_CON_MUX_MUX_CLKCMU_DPUF, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DPUF_ALT, "mout_cmu_mux_dpuf_alt",
+ mout_cmu_mux_dpuf_alt_p, CLK_CON_MUX_MUX_CLKCMU_DPUF_ALT, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DSP_NOC, "mout_cmu_mux_dsp_noc",
+ mout_cmu_mux_dsp_noc_p, CLK_CON_MUX_MUX_CLKCMU_DSP_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_DSU_SWITCH, "mout_cmu_mux_dsu_switch",
+ mout_cmu_mux_dsu_switch_p, CLK_CON_MUX_MUX_CLKCMU_DSU_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_G3D_NOCP, "mout_cmu_mux_g3d_nocp",
+ mout_cmu_mux_g3d_nocp_p, CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_G3D_SWITCH, "mout_cmu_mux_g3d_switch",
+ mout_cmu_mux_g3d_switch_p, CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_GNPU_NOC, "mout_cmu_mux_gnpu_noc",
+ mout_cmu_mux_gnpu_noc_p, CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_HSI0_DPGTC, "mout_cmu_mux_hsi0_dpgtc",
+ mout_cmu_mux_hsi0_dpgtc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_HSI0_DPOSC, "mout_cmu_mux_hsi0_dposc",
+ mout_cmu_mux_hsi0_dposc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_DPOSC,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_HSI0_NOC, "mout_cmu_mux_hsi0_noc",
+ mout_cmu_mux_hsi0_noc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_HSI0_USB32DRD, "mout_cmu_mux_hsi0_usb32drd",
+ mout_cmu_mux_hsi0_usb32drd_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_USB32DRD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_UFS_MMC_CARD, "mout_cmu_mux_ufs_mmc_card",
+ mout_cmu_mux_ufs_mmc_card_p, CLK_CON_MUX_MUX_CLKCMU_UFS_MMC_CARD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_HSI1_NOC, "mout_cmu_mux_hsi1_noc",
+ mout_cmu_mux_hsi1_noc_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_HSI1_PCIE, "mout_cmu_mux_hsi1_pcie",
+ mout_cmu_mux_hsi1_pcie_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_UFS_UFS_EMBD, "mout_cmu_mux_ufs_ufs_embd",
+ mout_cmu_mux_ufs_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_UFS_UFS_EMBD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_LME_LME, "mout_cmu_mux_lme_lme",
+ mout_cmu_mux_lme_lme_p, CLK_CON_MUX_MUX_CLKCMU_LME_LME, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_LME_NOC, "mout_cmu_mux_lme_noc",
+ mout_cmu_mux_lme_noc_p, CLK_CON_MUX_MUX_CLKCMU_LME_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_M2M_NOC, "mout_cmu_mux_m2m_noc",
+ mout_cmu_mux_m2m_noc_p, CLK_CON_MUX_MUX_CLKCMU_M2M_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_MCSC_MCSC, "mout_cmu_mux_mcsc_mcsc",
+ mout_cmu_mux_mcsc_mcsc_p, CLK_CON_MUX_MUX_CLKCMU_MCSC_MCSC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_MCSC_NOC, "mout_cmu_mux_mcsc_noc",
+ mout_cmu_mux_mcsc_noc_p, CLK_CON_MUX_MUX_CLKCMU_MCSC_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_MFC0_MFC0, "mout_cmu_mux_mfc0_mfc0",
+ mout_cmu_mux_mfc0_mfc0_p, CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_MFC0_WFD, "mout_cmu_mux_mfc0_wfd",
+ mout_cmu_mux_mfc0_wfd_p, CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_MFC1_MFC1, "mout_cmu_mux_mfc1_mfc1",
+ mout_cmu_mux_mfc1_mfc1_p, CLK_CON_MUX_MUX_CLKCMU_MFC1_MFC1, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_MIF_NOCP, "mout_cmu_mux_mif_nocp",
+ mout_cmu_mux_mif_nocp_p, CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_MIF_SWITCH, "mout_cmu_mux_mif_switch",
+ mout_cmu_mux_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_NOCL0_NOC, "mout_cmu_mux_nocl0_noc",
+ mout_cmu_mux_nocl0_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_NOCL1A_NOC, "mout_cmu_mux_nocl1a_noc",
+ mout_cmu_mux_nocl1a_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL1A_NOC,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_NOCL1B_NOC0, "mout_cmu_mux_nocl1b_noc0",
+ mout_cmu_mux_nocl1b_noc0_p, CLK_CON_MUX_MUX_CLKCMU_NOCL1B_NOC0,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_NOCL1B_NOC1, "mout_cmu_mux_nocl1b_noc1",
+ mout_cmu_mux_nocl1b_noc1_p, CLK_CON_MUX_MUX_CLKCMU_NOCL1B_NOC1,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_NOCL1C_NOC, "mout_cmu_mux_nocl1c_noc",
+ mout_cmu_mux_nocl1c_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL1C_NOC,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_PERIC0_IP0, "mout_cmu_mux_peric0_ip0",
+ mout_cmu_mux_peric0_ip0_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP0,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_PERIC0_IP1, "mout_cmu_mux_peric0_ip1",
+ mout_cmu_mux_peric0_ip1_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP1,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_PERIC0_NOC, "mout_cmu_mux_peric0_noc",
+ mout_cmu_mux_peric0_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_PERIC1_IP0, "mout_cmu_mux_peric1_ip0",
+ mout_cmu_mux_peric1_ip0_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP0,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_PERIC1_IP1, "mout_cmu_mux_peric1_ip1",
+ mout_cmu_mux_peric1_ip1_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP1,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_PERIC1_NOC, "mout_cmu_mux_peric1_noc",
+ mout_cmu_mux_peric1_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_PERIC2_IP0, "mout_cmu_mux_peric2_ip0",
+ mout_cmu_mux_peric2_ip0_p, CLK_CON_MUX_MUX_CLKCMU_PERIC2_IP0,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_PERIC2_IP1, "mout_cmu_mux_peric2_ip1",
+ mout_cmu_mux_peric2_ip1_p, CLK_CON_MUX_MUX_CLKCMU_PERIC2_IP1,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_PERIC2_NOC, "mout_cmu_mux_peric2_noc",
+ mout_cmu_mux_peric2_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIC2_NOC,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_PERIS_GIC, "mout_cmu_mux_peris_gic",
+ mout_cmu_mux_peris_gic_p, CLK_CON_MUX_MUX_CLKCMU_PERIS_GIC, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_PERIS_NOC, "mout_cmu_mux_peris_noc",
+ mout_cmu_mux_peris_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIS_NOC, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_SDMA_NOC, "mout_cmu_mux_sdma_noc",
+ mout_cmu_mux_sdma_noc_p, CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_SSP_NOC, "mout_cmu_mux_ssp_noc",
+ mout_cmu_mux_ssp_noc_p, CLK_CON_MUX_MUX_CLKCMU_SSP_NOC, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_VTS_DMIC, "mout_cmu_mux_vts_dmic",
+ mout_cmu_mux_vts_dmic_p, CLK_CON_MUX_MUX_CLKCMU_VTS_DMIC, 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_YUVP_NOC, "mout_cmu_mux_yuvp_noc",
+ mout_cmu_mux_yuvp_noc_p, CLK_CON_MUX_MUX_CLKCMU_YUVP_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_CMU_CMUREF, "mout_cmu_mux_cmu_cmuref",
+ mout_cmu_mux_cmu_cmuref_p, CLK_CON_MUX_MUX_CMU_CMUREF, 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CP_HISPEEDY_CLK, "mout_cmu_mux_cp_hispeedy_clk",
+ mout_cmu_mux_cp_hispeedy_clk_p, CLK_CON_MUX_MUX_CP_HISPEEDY_CLK,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_MUX_CP_SHARED0_CLK, "mout_cmu_mux_cp_shared0_clk",
+ mout_cmu_mux_cp_shared0_clk_p, CLK_CON_MUX_MUX_CP_SHARED0_CLK,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CP_SHARED1_CLK, "mout_cmu_mux_cp_shared1_clk",
+ mout_cmu_mux_cp_shared1_clk_p, CLK_CON_MUX_MUX_CP_SHARED1_CLK,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MUX_CP_SHARED2_CLK, "mout_cmu_mux_cp_shared2_clk",
+ mout_cmu_mux_cp_shared2_clk_p, CLK_CON_MUX_MUX_CP_SHARED2_CLK,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_M2M_FRC, "mout_cmu_m2m_frc", mout_cmu_m2m_frc_p,
+ CLK_CON_MUX_CLKCMU_M2M_FRC, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_MCSC, "mout_cmu_mcsc_mcsc", mout_cmu_mcsc_mcsc_p,
+ CLK_CON_MUX_CLKCMU_MCSC_MCSC, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_NOC, "mout_cmu_mcsc_noc", mout_cmu_mcsc_noc_p,
+ CLK_CON_MUX_CLKCMU_MCSC_NOC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_M2M_FRC, "mout_cmu_mux_m2m_frc",
+ mout_cmu_mux_m2m_frc_p, CLK_CON_MUX_MUX_CLKCMU_M2M_FRC, 0, 3),
+ MUX(CLK_MOUT_CMU_MUX_UFS_NOC, "mout_cmu_mux_ufs_noc",
+ mout_cmu_mux_ufs_noc_p, CLK_CON_MUX_MUX_CLKCMU_UFS_NOC, 0, 2),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CMU_ALIVE_NOC, "dout_cmu_alive_noc",
+ "mout_cmu_mux_alive_noc", CLK_CON_DIV_CLKCMU_ALIVE_NOC, 0, 2),
+ DIV(CLK_DOUT_CMU_AUD_NOC, "dout_cmu_aud_noc", "mout_cmu_mux_aud_noc",
+ CLK_CON_DIV_CLKCMU_AUD_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_BRP_NOC, "dout_cmu_brp_noc", "mout_cmu_mux_brp_noc",
+ CLK_CON_DIV_CLKCMU_BRP_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_CMU_BOOST, "dout_cmu_cmu_boost",
+ "mout_cmu_mux_cmu_boost", CLK_CON_DIV_CLKCMU_CMU_BOOST, 0, 3),
+ DIV(CLK_DOUT_CMU_CMU_BOOST_CAM, "dout_cmu_cmu_boost_cam",
+ "mout_cmu_mux_cmu_boost_cam", CLK_CON_DIV_CLKCMU_CMU_BOOST_CAM,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_CMU_BOOST_CPU, "dout_cmu_cmu_boost_cpu",
+ "mout_cmu_mux_cmu_boost_cpu", CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_CMU_BOOST_MIF, "dout_cmu_cmu_boost_mif",
+ "mout_cmu_mux_cmu_boost_mif", CLK_CON_DIV_CLKCMU_CMU_BOOST_MIF,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL0_NOCP, "dout_cmu_cpucl0_nocp",
+ "mout_cmu_mux_cpucl0_nocp", CLK_CON_DIV_CLKCMU_CPUCL0_NOCP, 0, 4),
+ DIV(CLK_DOUT_CMU_CSIS_DCPHY, "dout_cmu_csis_dcphy",
+ "mout_cmu_mux_csis_dcphy", CLK_CON_DIV_CLKCMU_CSIS_DCPHY, 0, 4),
+ DIV(CLK_DOUT_CMU_CSIS_NOC, "dout_cmu_csis_noc",
+ "mout_cmu_mux_csis_noc", CLK_CON_DIV_CLKCMU_CSIS_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_CSIS_OIS_MCU, "dout_cmu_csis_ois_mcu",
+ "mout_cmu_mux_csis_ois_mcu", CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_CSTAT_NOC, "dout_cmu_cstat_noc",
+ "mout_cmu_mux_cstat_noc", CLK_CON_DIV_CLKCMU_CSTAT_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DPUB_DSIM, "dout_cmu_dpub_dsim",
+ "mout_cmu_mux_dpub_dsim", CLK_CON_DIV_CLKCMU_DPUB_DSIM, 0, 4),
+ DIV(CLK_DOUT_CMU_LME_LME, "dout_cmu_lme_lme", "mout_cmu_mux_lme_lme",
+ CLK_CON_DIV_CLKCMU_LME_LME, 0, 4),
+ DIV(CLK_DOUT_CMU_G3D_NOCP, "dout_cmu_g3d_nocp",
+ "mout_cmu_mux_g3d_nocp", CLK_CON_DIV_CLKCMU_G3D_NOCP, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI0_DPGTC, "dout_cmu_hsi0_dpgtc",
+ "mout_cmu_mux_hsi0_dpgtc", CLK_CON_DIV_CLKCMU_HSI0_DPGTC, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI0_DPOSC, "dout_cmu_hsi0_dposc",
+ "mout_cmu_mux_hsi0_dposc", CLK_CON_DIV_CLKCMU_HSI0_DPOSC, 0, 5),
+ DIV(CLK_DOUT_CMU_HSI0_NOC, "dout_cmu_hsi0_noc",
+ "mout_cmu_mux_hsi0_noc", CLK_CON_DIV_CLKCMU_HSI0_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI0_USB32DRD, "dout_cmu_hsi0_usb32drd",
+ "mout_cmu_mux_hsi0_usb32drd", CLK_CON_DIV_CLKCMU_HSI0_USB32DRD,
+ 0, 5),
+ DIV(CLK_DOUT_CMU_HSI1_NOC, "dout_cmu_hsi1_noc",
+ "mout_cmu_mux_hsi1_noc", CLK_CON_DIV_CLKCMU_HSI1_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie",
+ "mout_cmu_mux_hsi1_pcie", CLK_CON_DIV_CLKCMU_HSI1_PCIE, 0, 8),
+ DIV(CLK_DOUT_CMU_UFS_UFS_EMBD, "dout_cmu_ufs_ufs_embd",
+ "mout_cmu_mux_ufs_ufs_embd", CLK_CON_DIV_CLKCMU_UFS_UFS_EMBD,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_LME_NOC, "dout_cmu_lme_noc", "mout_cmu_mux_lme_noc",
+ CLK_CON_DIV_CLKCMU_LME_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_MFC0_MFC0, "dout_cmu_mfc0_mfc0",
+ "mout_cmu_mux_mfc0_mfc0", CLK_CON_DIV_CLKCMU_MFC0_MFC0, 0, 4),
+ DIV(CLK_DOUT_CMU_MFC0_WFD, "dout_cmu_mfc0_wfd",
+ "mout_cmu_mux_mfc0_wfd", CLK_CON_DIV_CLKCMU_MFC0_WFD, 0, 4),
+ DIV(CLK_DOUT_CMU_MFC1_MFC1, "dout_cmu_mfc1_mfc1",
+ "mout_cmu_mux_mfc1_mfc1", CLK_CON_DIV_CLKCMU_MFC1_MFC1, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_NOCP, "dout_cmu_mif_nocp",
+ "mout_cmu_mux_mif_nocp", CLK_CON_DIV_CLKCMU_MIF_NOCP, 0, 4),
+ DIV(CLK_DOUT_CMU_NOCL1B_NOC1, "dout_cmu_nocl1b_noc1",
+ "mout_cmu_mux_nocl1b_noc1", CLK_CON_DIV_CLKCMU_NOCL1B_NOC1, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_IP0, "dout_cmu_peric0_ip0",
+ "mout_cmu_mux_peric0_ip0", CLK_CON_DIV_CLKCMU_PERIC0_IP0, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_IP1, "dout_cmu_peric0_ip1",
+ "mout_cmu_mux_peric0_ip1", CLK_CON_DIV_CLKCMU_PERIC0_IP1, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_NOC, "dout_cmu_peric0_noc",
+ "mout_cmu_mux_peric0_noc", CLK_CON_DIV_CLKCMU_PERIC0_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_IP0, "dout_cmu_peric1_ip0",
+ "mout_cmu_mux_peric1_ip0", CLK_CON_DIV_CLKCMU_PERIC1_IP0, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_IP1, "dout_cmu_peric1_ip1",
+ "mout_cmu_mux_peric1_ip1", CLK_CON_DIV_CLKCMU_PERIC1_IP1, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_NOC, "dout_cmu_peric1_noc",
+ "mout_cmu_mux_peric1_noc", CLK_CON_DIV_CLKCMU_PERIC1_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC2_IP0, "dout_cmu_peric2_ip0",
+ "mout_cmu_mux_peric2_ip0", CLK_CON_DIV_CLKCMU_PERIC2_IP0, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC2_IP1, "dout_cmu_peric2_ip1",
+ "mout_cmu_mux_peric2_ip1", CLK_CON_DIV_CLKCMU_PERIC2_IP1, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC2_NOC, "dout_cmu_peric2_noc",
+ "mout_cmu_mux_peric2_noc", CLK_CON_DIV_CLKCMU_PERIC2_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIS_GIC, "dout_cmu_peris_gic",
+ "mout_cmu_mux_peris_gic", CLK_CON_DIV_CLKCMU_PERIS_GIC, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIS_NOC, "dout_cmu_peris_noc",
+ "mout_cmu_mux_peris_noc", CLK_CON_DIV_CLKCMU_PERIS_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_SSP_NOC, "dout_cmu_ssp_noc", "mout_cmu_mux_ssp_noc",
+ CLK_CON_DIV_CLKCMU_SSP_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_VTS_DMIC, "dout_cmu_vts_dmic",
+ "mout_cmu_mux_vts_dmic", CLK_CON_DIV_CLKCMU_VTS_DMIC, 0, 6),
+ DIV(CLK_DOUT_CMU_YUVP_NOC, "dout_cmu_yuvp_noc",
+ "mout_cmu_mux_yuvp_noc", CLK_CON_DIV_CLKCMU_YUVP_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_CP_SHARED1_CLK, "dout_cmu_cp_shared1_clk",
+ "mout_cmu_mux_cp_shared1_clk", CLK_CON_DIV_CP_SHARED1_CLK, 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_AUD_AUDIF0, "dout_cmu_div_aud_audif0",
+ "mout_cmu_mux_aud_audif0", CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF0,
+ 0, 6),
+ DIV(CLK_DOUT_CMU_DIV_AUD_AUDIF0_SM, "dout_cmu_div_aud_audif0_sm",
+ "mout_cmu_aud_audif0", CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF0_SM, 0, 6),
+ DIV(CLK_DOUT_CMU_DIV_AUD_AUDIF1, "dout_cmu_div_aud_audif1",
+ "mout_cmu_mux_aud_audif1", CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF1,
+ 0, 6),
+ DIV(CLK_DOUT_CMU_DIV_AUD_AUDIF1_SM, "dout_cmu_div_aud_audif1_sm",
+ "mout_cmu_aud_audif1", CLK_CON_DIV_DIV_CLKCMU_AUD_AUDIF1_SM, 0, 6),
+ DIV(CLK_DOUT_CMU_DIV_AUD_CPU, "dout_cmu_div_aud_cpu",
+ "mout_cmu_mux_aud_cpu", CLK_CON_DIV_DIV_CLKCMU_AUD_CPU, 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_AUD_CPU_SM, "dout_cmu_div_aud_cpu_sm",
+ "mout_cmu_aud_cpu", CLK_CON_DIV_DIV_CLKCMU_AUD_CPU_SM, 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK0, "dout_cmu_div_cis_clk0",
+ "mout_cmu_mux_cis_clk0", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK0, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK1, "dout_cmu_div_cis_clk1",
+ "mout_cmu_mux_cis_clk1", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK1, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK2, "dout_cmu_div_cis_clk2",
+ "mout_cmu_mux_cis_clk2", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK2, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK3, "dout_cmu_div_cis_clk3",
+ "mout_cmu_mux_cis_clk3", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK3, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK4, "dout_cmu_div_cis_clk4",
+ "mout_cmu_mux_cis_clk4", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK4, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK5, "dout_cmu_div_cis_clk5",
+ "mout_cmu_mux_cis_clk5", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK5, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK6, "dout_cmu_div_cis_clk6",
+ "mout_cmu_mux_cis_clk6", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK6, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CIS_CLK7, "dout_cmu_div_cis_clk7",
+ "mout_cmu_mux_cis_clk7", CLK_CON_DIV_DIV_CLKCMU_CIS_CLK7, 0, 5),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL0_DBG_NOC, "dout_cmu_div_cpucl0_dbg_noc",
+ "mout_cmu_mux_cpucl0_dbg_noc",
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL0_DBG_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL0_DBG_NOC_SM,
+ "dout_cmu_div_cpucl0_dbg_noc_sm", "mout_cmu_cpucl0_dbg_noc",
+ CLK_CON_DIV_DIV_CLKCMU_CPUCL0_DBG_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL0_SWITCH, "dout_cmu_div_cpucl0_switch",
+ "mout_cmu_mux_cpucl0_switch", CLK_CON_DIV_DIV_CLKCMU_CPUCL0_SWITCH,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL0_SWITCH_SM, "dout_cmu_div_cpucl0_switch_sm",
+ "mout_cmu_cpucl0_switch", CLK_CON_DIV_DIV_CLKCMU_CPUCL0_SWITCH_SM,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL1_SWITCH, "dout_cmu_div_cpucl1_switch",
+ "mout_cmu_mux_cpucl1_switch", CLK_CON_DIV_DIV_CLKCMU_CPUCL1_SWITCH,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL1_SWITCH_SM, "dout_cmu_div_cpucl1_switch_sm",
+ "mout_cmu_cpucl1_switch", CLK_CON_DIV_DIV_CLKCMU_CPUCL1_SWITCH_SM,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL2_SWITCH, "dout_cmu_div_cpucl2_switch",
+ "mout_cmu_mux_cpucl2_switch", CLK_CON_DIV_DIV_CLKCMU_CPUCL2_SWITCH,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CPUCL2_SWITCH_SM, "dout_cmu_div_cpucl2_switch_sm",
+ "mout_cmu_cpucl2_switch", CLK_CON_DIV_DIV_CLKCMU_CPUCL2_SWITCH_SM,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_DNC_NOC, "dout_cmu_div_dnc_noc",
+ "mout_cmu_mux_dnc_noc", CLK_CON_DIV_DIV_CLKCMU_DNC_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DNC_NOC_SM, "dout_cmu_div_dnc_noc_sm",
+ "mout_cmu_dnc_noc", CLK_CON_DIV_DIV_CLKCMU_DNC_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DPUB, "dout_cmu_div_dpub", "mout_cmu_mux_dpub",
+ CLK_CON_DIV_DIV_CLKCMU_DPUB, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DPUB_ALT, "dout_cmu_div_dpub_alt",
+ "mout_cmu_mux_dpub_alt", CLK_CON_DIV_DIV_CLKCMU_DPUB_ALT, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DPUF, "dout_cmu_div_dpuf", "mout_cmu_mux_dpuf",
+ CLK_CON_DIV_DIV_CLKCMU_DPUF, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DPUF_ALT, "dout_cmu_div_dpuf_alt",
+ "mout_cmu_mux_dpuf_alt", CLK_CON_DIV_DIV_CLKCMU_DPUF_ALT, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DSP_NOC, "dout_cmu_div_dsp_noc",
+ "mout_cmu_mux_dsp_noc", CLK_CON_DIV_DIV_CLKCMU_DSP_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DSP_NOC_SM, "dout_cmu_div_dsp_noc_sm",
+ "mout_cmu_dsp_noc", CLK_CON_DIV_DIV_CLKCMU_DSP_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_DSU_SWITCH, "dout_cmu_div_dsu_switch",
+ "mout_cmu_mux_dsu_switch", CLK_CON_DIV_DIV_CLKCMU_DSU_SWITCH,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_DSU_SWITCH_SM, "dout_cmu_div_dsu_switch_sm",
+ "mout_cmu_dsu_switch", CLK_CON_DIV_DIV_CLKCMU_DSU_SWITCH_SM, 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_G3D_SWITCH, "dout_cmu_div_g3d_switch",
+ "mout_cmu_mux_g3d_switch", CLK_CON_DIV_DIV_CLKCMU_G3D_SWITCH,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_G3D_SWITCH_SM, "dout_cmu_div_g3d_switch_sm",
+ "mout_cmu_g3d_switch", CLK_CON_DIV_DIV_CLKCMU_G3D_SWITCH_SM, 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_GNPU_NOC, "dout_cmu_div_gnpu_noc",
+ "mout_cmu_mux_gnpu_noc", CLK_CON_DIV_DIV_CLKCMU_GNPU_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_GNPU_NOC_SM, "dout_cmu_div_gnpu_noc_sm",
+ "mout_cmu_gnpu_noc", CLK_CON_DIV_DIV_CLKCMU_GNPU_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_UFS_MMC_CARD, "dout_cmu_div_ufs_mmc_card",
+ "mout_cmu_mux_ufs_mmc_card", CLK_CON_DIV_DIV_CLKCMU_UFS_MMC_CARD,
+ 0, 9),
+ DIV(CLK_DOUT_CMU_DIV_UFS_MMC_CARD_SM, "dout_cmu_div_ufs_mmc_card_sm",
+ "mout_cmu_ufs_mmc_card", CLK_CON_DIV_DIV_CLKCMU_UFS_MMC_CARD_SM,
+ 0, 9),
+ DIV(CLK_DOUT_CMU_DIV_M2M_NOC, "dout_cmu_div_m2m_noc",
+ "mout_cmu_mux_m2m_noc", CLK_CON_DIV_DIV_CLKCMU_M2M_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_M2M_NOC_SM, "dout_cmu_div_m2m_noc_sm",
+ "mout_cmu_m2m_noc", CLK_CON_DIV_DIV_CLKCMU_M2M_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL0_NOC, "dout_cmu_div_nocl0_noc",
+ "mout_cmu_mux_nocl0_noc", CLK_CON_DIV_DIV_CLKCMU_NOCL0_NOC,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL0_NOC_SM, "dout_cmu_div_nocl0_noc_sm",
+ "mout_cmu_nocl0_noc", CLK_CON_DIV_DIV_CLKCMU_NOCL0_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL1A_NOC, "dout_cmu_div_nocl1a_noc",
+ "mout_cmu_mux_nocl1a_noc", CLK_CON_DIV_DIV_CLKCMU_NOCL1A_NOC,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL1A_NOC_SM, "dout_cmu_div_nocl1a_noc_sm",
+ "mout_cmu_nocl1a_noc", CLK_CON_DIV_DIV_CLKCMU_NOCL1A_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL1B_NOC0, "dout_cmu_div_nocl1b_noc0",
+ "mout_cmu_mux_nocl1b_noc0", CLK_CON_DIV_DIV_CLKCMU_NOCL1B_NOC0,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL1B_NOC0_SM, "dout_cmu_div_nocl1b_noc0_sm",
+ "mout_cmu_nocl1b_noc0", CLK_CON_DIV_DIV_CLKCMU_NOCL1B_NOC0_SM,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL1C_NOC, "dout_cmu_div_nocl1c_noc",
+ "mout_cmu_mux_nocl1c_noc", CLK_CON_DIV_DIV_CLKCMU_NOCL1C_NOC,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_NOCL1C_NOC_SM, "dout_cmu_div_nocl1c_noc_sm",
+ "mout_cmu_nocl1c_noc", CLK_CON_DIV_DIV_CLKCMU_NOCL1C_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_SDMA_NOC, "dout_cmu_div_sdma_noc",
+ "mout_cmu_mux_sdma_noc", CLK_CON_DIV_DIV_CLKCMU_SDMA_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_SDMA_NOC_SM, "dout_cmu_div_sdma_noc_sm",
+ "mout_cmu_sdma_noc", CLK_CON_DIV_DIV_CLKCMU_SDMA_NOC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_CP_HISPEEDY_CLK, "dout_cmu_div_cp_hispeedy_clk",
+ "mout_cmu_mux_cp_hispeedy_clk", CLK_CON_DIV_DIV_CP_HISPEEDY_CLK,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_CP_HISPEEDY_CLK_SM,
+ "dout_cmu_div_cp_hispeedy_clk_sm", "mout_cmu_mux_cp_hispeedy_clk",
+ CLK_CON_DIV_DIV_CP_HISPEEDY_CLK_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_CP_SHARED0_CLK, "dout_cmu_div_cp_shared0_clk",
+ "mout_cmu_mux_cp_shared0_clk", CLK_CON_DIV_DIV_CP_SHARED0_CLK,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CP_SHARED0_CLK_SM,
+ "dout_cmu_div_cp_shared0_clk_sm", "mout_cmu_cp_shared0_clk",
+ CLK_CON_DIV_DIV_CP_SHARED0_CLK_SM, 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CP_SHARED2_CLK, "dout_cmu_div_cp_shared2_clk",
+ "mout_cmu_mux_cp_shared2_clk", CLK_CON_DIV_DIV_CP_SHARED2_CLK,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_DIV_CP_SHARED2_CLK_SM,
+ "dout_cmu_div_cp_shared2_clk_sm", "mout_cmu_cp_shared2_clk",
+ CLK_CON_DIV_DIV_CP_SHARED2_CLK_SM, 0, 3),
+ DIV(CLK_DOUT_CMU_UFS_NOC, "dout_cmu_ufs_noc", "mout_cmu_mux_ufs_noc",
+ CLK_CON_DIV_CLKCMU_UFS_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_M2M_FRC, "dout_cmu_div_m2m_frc",
+ "mout_cmu_mux_m2m_frc", CLK_CON_DIV_DIV_CLKCMU_M2M_FRC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_M2M_FRC_SM, "dout_cmu_div_m2m_frc_sm",
+ "mout_cmu_m2m_frc", CLK_CON_DIV_DIV_CLKCMU_M2M_FRC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_MCSC_MCSC, "dout_cmu_div_mcsc_mcsc",
+ "mout_cmu_mux_mcsc_mcsc", CLK_CON_DIV_DIV_CLKCMU_MCSC_MCSC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_MCSC_MCSC_SM, "dout_cmu_div_mcsc_mcsc_sm",
+ "mout_cmu_mcsc_mcsc", CLK_CON_DIV_DIV_CLKCMU_MCSC_MCSC_SM, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_MCSC_NOC, "dout_cmu_div_mcsc_noc",
+ "mout_cmu_mux_mcsc_noc", CLK_CON_DIV_DIV_CLKCMU_MCSC_NOC, 0, 4),
+ DIV(CLK_DOUT_CMU_DIV_MCSC_NOC_SM, "dout_cmu_div_mcsc_noc_sm",
+ "mout_cmu_mcsc_noc", CLK_CON_DIV_DIV_CLKCMU_MCSC_NOC_SM, 0, 4),
+};
+
+static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_SHARED0_DIV1, "dout_shared0_div1",
+ "fout_shared0_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED0_DIV2, "dout_shared0_div2",
+ "fout_shared0_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4",
+ "fout_shared0_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_SHARED1_DIV1, "dout_shared1_div1",
+ "fout_shared1_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED1_DIV2, "dout_shared1_div2",
+ "fout_shared1_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4",
+ "fout_shared1_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_SHARED2_DIV1, "dout_shared2_div1",
+ "fout_shared2_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED2_DIV2, "dout_shared2_div2",
+ "fout_shared2_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED2_DIV4, "dout_shared2_div4",
+ "fout_shared2_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_SHARED3_DIV1, "dout_shared3_div1",
+ "fout_shared3_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED3_DIV2, "dout_shared3_div2",
+ "fout_shared3_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED3_DIV4, "dout_shared3_div4",
+ "fout_shared3_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_SHARED4_DIV1, "dout_shared4_div1",
+ "fout_shared4_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED4_DIV2, "dout_shared4_div2",
+ "fout_shared4_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED4_DIV4, "dout_shared4_div4",
+ "fout_shared4_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_SHARED_MIF_DIV1, "dout_shared_mif_div1",
+ "fout_shared_mif_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED_MIF_DIV2, "dout_shared_mif_div2",
+ "fout_shared_mif_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED_MIF_DIV4, "dout_shared_mif_div4",
+ "fout_shared_mif_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_SHARED_MIF_DIV4, "dout_mmc_div1",
+ "fout_mmc_pll", 1, 1, 0),
+ FFACTOR(CLK_DOUT_SHARED_MIF_DIV4, "dout_mmc_div2",
+ "fout_mmc_pll", 1, 2, 0),
+ FFACTOR(CLK_DOUT_SHARED_MIF_DIV4, "dout_mmc_div4",
+ "fout_mmc_pll", 1, 4, 0),
+ FFACTOR(CLK_DOUT_TCXO_DIV3, "dout_tcxo_div3",
+ "oscclk", 1, 3, 0),
+ FFACTOR(CLK_DOUT_TCXO_DIV4, "dout_tcxo_div4",
+ "oscclk", 1, 4, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .fixed_factor_clks = top_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(top_fixed_factor_clks),
+ .nr_clk_ids = CLKS_NR_TOP,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynos2200_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynos2200_cmu_top, "samsung,exynos2200-cmu-top",
+ exynos2200_cmu_top_init);
+
+/* ---- CMU_ALIVE ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_ALIVE (0x15800000) */
+#define PLL_CON0_MUX_CLKCMU_ALIVE_NOC_USER 0x600
+#define PLL_CON1_MUX_CLKCMU_ALIVE_NOC_USER 0x604
+#define PLL_CON0_MUX_CLKMUX_ALIVE_RCO_SPMI_USER 0x610
+#define PLL_CON1_MUX_CLKMUX_ALIVE_RCO_SPMI_USER 0x614
+#define PLL_CON0_MUX_CLK_RCO_ALIVE_USER 0x620
+#define PLL_CON1_MUX_CLK_RCO_ALIVE_USER 0x624
+#define CLK_CON_MUX_MUX_CLKALIVE_CHUB_PERI 0x1004
+#define CLK_CON_MUX_MUX_CLKALIVE_CMGP_NOC 0x1008
+#define CLK_CON_MUX_MUX_CLKALIVE_CMGP_PERI 0x100c
+#define CLK_CON_MUX_MUX_CLKALIVE_DBGCORE_NOC 0x1010
+#define CLK_CON_MUX_MUX_CLKALIVE_DNC_NOC 0x1014
+#define CLK_CON_MUX_MUX_CLKALIVE_CHUBVTS_NOC 0x1018
+#define CLK_CON_MUX_MUX_CLKALIVE_GNPU_NOC 0x101c
+#define CLK_CON_MUX_MUX_CLKALIVE_GNSS_NOC 0x1020
+#define CLK_CON_MUX_MUX_CLKALIVE_SDMA_NOC 0x1024
+#define CLK_CON_MUX_MUX_CLKALIVE_UFD_NOC 0x1028
+#define CLK_CON_MUX_MUX_CLK_ALIVE_DBGCORE_UART 0x1030
+#define CLK_CON_MUX_MUX_CLK_ALIVE_NOC 0x1034
+#define CLK_CON_MUX_MUX_CLK_ALIVE_PMU_SUB 0x1038
+#define CLK_CON_MUX_MUX_CLK_ALIVE_SPMI 0x103c
+#define CLK_CON_MUX_MUX_CLK_ALIVE_TIMER 0x1040
+#define CLK_CON_MUX_MUX_CLKALIVE_CSIS_NOC 0x1044
+#define CLK_CON_MUX_MUX_CLKALIVE_DSP_NOC 0x1048
+#define CLK_CON_DIV_CLKALIVE_CHUB_PERI 0x1804
+#define CLK_CON_DIV_CLKALIVE_CMGP_NOC 0x1808
+#define CLK_CON_DIV_CLKALIVE_CMGP_PERI 0x180c
+#define CLK_CON_DIV_CLKALIVE_DBGCORE_NOC 0x1810
+#define CLK_CON_DIV_CLKALIVE_DNC_NOC 0x1814
+#define CLK_CON_DIV_CLKALIVE_CHUBVTS_NOC 0x1818
+#define CLK_CON_DIV_CLKALIVE_GNPU_NOC 0x181c
+#define CLK_CON_DIV_CLKALIVE_SDMA_NOC 0x1820
+#define CLK_CON_DIV_CLKALIVE_UFD_NOC 0x1824
+#define CLK_CON_DIV_DIV_CLK_ALIVE_DBGCORE_UART 0x182c
+#define CLK_CON_DIV_DIV_CLK_ALIVE_NOC 0x1830
+#define CLK_CON_DIV_DIV_CLK_ALIVE_PMU_SUB 0x1834
+#define CLK_CON_DIV_DIV_CLK_ALIVE_SPMI 0x1838
+#define CLK_CON_DIV_CLKALIVE_CSIS_NOC 0x183c
+#define CLK_CON_DIV_CLKALIVE_DSP_NOC 0x1840
+#define CLK_CON_GAT_CLKALIVE_CHUBVTS_RCO 0x2000
+#define CLK_CON_GAT_CLKALIVE_DNC_RCO 0x2004
+#define CLK_CON_GAT_CLKALIVE_CSIS_RCO 0x2008
+#define CLK_CON_GAT_CLKALIVE_GNPU_RCO 0x200c
+#define CLK_CON_GAT_CLKALIVE_GNSS_NOC 0x2010
+#define CLK_CON_GAT_CLKALIVE_SDMA_RCO 0x2014
+#define CLK_CON_GAT_CLKALIVE_UFD_RCO 0x2018
+#define CLK_CON_GAT_CLKALIVE_DSP_RCO 0x201c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_ALIVE_CMU_ALIVE_IPCLKPORT_PCLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_GPIO_ALIVE_IPCLKPORT_PCLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_INTCOMB_VGPIO2APM_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_INTCOMB_VGPIO2AP_IPCLKPORT_PCLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_INTCOMB_VGPIO2PMU_IPCLKPORT_PCLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_PMU_ALIVE_IPCLKPORT_PCLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_APM_DMA_IPCLKPORT_PCLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_CHUB_RTC_IPCLKPORT_OSCCLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_CHUB_RTC_IPCLKPORT_PCLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_CLKMON_IPCLKPORT_PCLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_DBGCORE_UART_IPCLKPORT_IPCLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_DBGCORE_UART_IPCLKPORT_PCLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_BLK_ALIVE_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK 0x2050
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_DTZPC_ALIVE_IPCLKPORT_PCLK 0x2054
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_GREBEINTEGRATION_IPCLKPORT_HCLK 0x2058
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_HW_SCANDUMP_CLKSTOP_CTRL_IPCLKPORT_ACLK 0x205c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_INTMEM_IPCLKPORT_I_ACLK 0x2060
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_INTMEM_IPCLKPORT_I_PCLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_LH_AXI_SI_D_APM_IPCLKPORT_I_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_AP_IPCLKPORT_PCLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_CHUB_IPCLKPORT_PCLK 0x2070
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_CP_IPCLKPORT_PCLK 0x2074
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_GNSS_IPCLKPORT_PCLK 0x2078
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_VTS_IPCLKPORT_PCLK 0x207c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_CHUB_IPCLKPORT_PCLK 0x2080
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_CP_IPCLKPORT_PCLK 0x2084
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_CP_S_IPCLKPORT_PCLK 0x2088
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_DBGCORE_IPCLKPORT_PCLK 0x208c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_GNSS_IPCLKPORT_PCLK 0x2090
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_CP_CHUB_IPCLKPORT_PCLK 0x2094
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_CP_GNSS_IPCLKPORT_PCLK 0x2098
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_GNSS_CHUB_IPCLKPORT_PCLK 0x209c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_SHARED_SRAM_IPCLKPORT_PCLK 0x20a0
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_AUD_IPCLKPORT_PCLK 0x20a4
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MCT_ALIVE_IPCLKPORT_I_PCLK 0x20a8
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_PMU_INTR_GEN_IPCLKPORT_PCLK 0x20ac
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_PMU_IPCLKPORT_ACLK 0x20b0
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_PMU_IPCLKPORT_CLKIN_PMU_SUB 0x20b4
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_CP_1_IPCLKPORT_PCLK 0x20b8
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_OTP_DESERIAL_ALIVE_IPCLKPORT_I_CLK 0x20bc
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_DBGCORE_UART_IPCLKPORT_CLK 0x20c0
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_FREE_OSCCLK_IPCLKPORT_CLK 0x20c4
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_GREBE_IPCLKPORT_CLK 0x20c8
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_NOC_IPCLKPORT_CLK 0x20cc
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_OSCCLK_RCO_IPCLKPORT_CLK 0x20d0
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_SPMI_IPCLKPORT_CLK 0x20d4
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_TIMER_IPCLKPORT_CLK 0x20d8
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_NOC_IPCLKPORT_CLK 0x20dc
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RTC_IPCLKPORT_PCLK 0x20e0
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_OTP_HCU_DESERIAL_IPCLKPORT_I_CLK 0x20e4
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_LD_GNSS_IPCLKPORT_I_CLK 0x20e8
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_LP_MODEM_IPCLKPORT_I_CLK 0x20ec
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_LD_CHUBVTS_IPCLKPORT_I_CLK 0x20f0
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_ID_DBGCORE_IPCLKPORT_I_CLK 0x20f4
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_P_APM_IPCLKPORT_I_CLK 0x20f8
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_CMGP_IPCLKPORT_I_CLK 0x20fc
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_IP_APM_IPCLKPORT_I_CLK 0x2100
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_FREE_OSCCLK_IPCLKPORT_CLK 0x2104
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_CHUBVTS_IPCLKPORT_I_CLK 0x2108
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_PPU_IPCLKPORT_I_CLK 0x210c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_ALIVEDNC_IPCLKPORT_I_CLK 0x2110
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SPC_ALIVE_IPCLKPORT_PCLK 0x2114
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SPMI_MASTER_PMIC_IPCLKPORT_I_IPCLK 0x2118
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SPMI_MASTER_PMIC_IPCLKPORT_I_PCLK 0x211c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SWEEPER_P_ALIVE_IPCLKPORT_ACLK 0x2120
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_SYSREG_ALIVE_IPCLKPORT_PCLK 0x2124
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_TOP_RTC_IPCLKPORT_OSCCLK 0x2128
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_TOP_RTC_IPCLKPORT_PCLK 0x212c
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_VGEN_LITE_ALIVE_IPCLKPORT_CLK 0x2130
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_WDT_ALIVE_IPCLKPORT_PCLK 0x2134
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_XIU_DP_ALIVE_IPCLKPORT_ACLK 0x2138
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_OSCCLK_RCO_IPCLKPORT_CLK 0x213c
+#define CLK_CON_GAT_GATE_CLKALIVE_CHUB_PERI 0x2140
+#define CLK_CON_GAT_GATE_CLKALIVE_CMGP_NOC 0x2144
+#define CLK_CON_GAT_GATE_CLKALIVE_CMGP_PERI 0x2148
+#define CLK_CON_GAT_GATE_CLKALIVE_DBGCORE_NOC 0x214c
+#define CLK_CON_GAT_GATE_CLKALIVE_DNC_NOC 0x2150
+#define CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_SPMI_IPCLKPORT_CLK 0x2154
+#define CLK_CON_GAT_GATE_CLKALIVE_GNPU_NOC 0x2158
+#define CLK_CON_GAT_GATE_CLKALIVE_SDMA_NOC 0x215c
+#define CLK_CON_GAT_GATE_CLKALIVE_UFD_NOC 0x2160
+#define CLK_CON_GAT_GATE_CLKALIVE_CHUBVTS_NOC 0x2164
+#define CLK_CON_GAT_GATE_CLKALIVE_CSIS_NOC 0x2168
+#define CLK_CON_GAT_GATE_CLKALIVE_DSP_NOC 0x216c
+
+static const unsigned long alive_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_ALIVE_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_ALIVE_NOC_USER,
+ PLL_CON0_MUX_CLKMUX_ALIVE_RCO_SPMI_USER,
+ PLL_CON1_MUX_CLKMUX_ALIVE_RCO_SPMI_USER,
+ PLL_CON0_MUX_CLK_RCO_ALIVE_USER,
+ PLL_CON1_MUX_CLK_RCO_ALIVE_USER,
+ CLK_CON_MUX_MUX_CLKALIVE_CHUB_PERI,
+ CLK_CON_MUX_MUX_CLKALIVE_CMGP_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_CMGP_PERI,
+ CLK_CON_MUX_MUX_CLKALIVE_DBGCORE_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_DNC_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_CHUBVTS_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_GNPU_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_GNSS_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_SDMA_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_UFD_NOC,
+ CLK_CON_MUX_MUX_CLK_ALIVE_DBGCORE_UART,
+ CLK_CON_MUX_MUX_CLK_ALIVE_NOC,
+ CLK_CON_MUX_MUX_CLK_ALIVE_PMU_SUB,
+ CLK_CON_MUX_MUX_CLK_ALIVE_SPMI,
+ CLK_CON_MUX_MUX_CLK_ALIVE_TIMER,
+ CLK_CON_MUX_MUX_CLKALIVE_CSIS_NOC,
+ CLK_CON_MUX_MUX_CLKALIVE_DSP_NOC,
+ CLK_CON_DIV_CLKALIVE_CHUB_PERI,
+ CLK_CON_DIV_CLKALIVE_CMGP_NOC,
+ CLK_CON_DIV_CLKALIVE_CMGP_PERI,
+ CLK_CON_DIV_CLKALIVE_DBGCORE_NOC,
+ CLK_CON_DIV_CLKALIVE_DNC_NOC,
+ CLK_CON_DIV_CLKALIVE_CHUBVTS_NOC,
+ CLK_CON_DIV_CLKALIVE_GNPU_NOC,
+ CLK_CON_DIV_CLKALIVE_SDMA_NOC,
+ CLK_CON_DIV_CLKALIVE_UFD_NOC,
+ CLK_CON_DIV_DIV_CLK_ALIVE_DBGCORE_UART,
+ CLK_CON_DIV_DIV_CLK_ALIVE_NOC,
+ CLK_CON_DIV_DIV_CLK_ALIVE_PMU_SUB,
+ CLK_CON_DIV_DIV_CLK_ALIVE_SPMI,
+ CLK_CON_DIV_CLKALIVE_CSIS_NOC,
+ CLK_CON_DIV_CLKALIVE_DSP_NOC,
+ CLK_CON_GAT_CLKALIVE_CHUBVTS_RCO,
+ CLK_CON_GAT_CLKALIVE_DNC_RCO,
+ CLK_CON_GAT_CLKALIVE_CSIS_RCO,
+ CLK_CON_GAT_CLKALIVE_GNPU_RCO,
+ CLK_CON_GAT_CLKALIVE_GNSS_NOC,
+ CLK_CON_GAT_CLKALIVE_SDMA_RCO,
+ CLK_CON_GAT_CLKALIVE_UFD_RCO,
+ CLK_CON_GAT_CLKALIVE_DSP_RCO,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_ALIVE_CMU_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_GPIO_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_INTCOMB_VGPIO2APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_INTCOMB_VGPIO2AP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_INTCOMB_VGPIO2PMU_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_APBIF_PMU_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_APM_DMA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_CHUB_RTC_IPCLKPORT_OSCCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_CHUB_RTC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_CLKMON_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_DBGCORE_UART_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_DBGCORE_UART_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_BLK_ALIVE_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_DTZPC_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_GREBEINTEGRATION_IPCLKPORT_HCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_HW_SCANDUMP_CLKSTOP_CTRL_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_INTMEM_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_INTMEM_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_LH_AXI_SI_D_APM_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_AP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_CHUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_CP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_GNSS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_CHUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_CP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_CP_S_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_DBGCORE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_AP_GNSS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_CP_CHUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_CP_GNSS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_GNSS_CHUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_SHARED_SRAM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_AUD_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MCT_ALIVE_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_PMU_INTR_GEN_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_PMU_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_PMU_IPCLKPORT_CLKIN_PMU_SUB,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_MAILBOX_APM_CP_1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_OTP_DESERIAL_ALIVE_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_DBGCORE_UART_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_GREBE_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_OSCCLK_RCO_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_SPMI_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_CLK_ALIVE_TIMER_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RTC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_OTP_HCU_DESERIAL_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_LD_GNSS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_LP_MODEM_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_LD_CHUBVTS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_ID_DBGCORE_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_MI_P_APM_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_CMGP_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_IP_APM_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_CHUBVTS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_PPU_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SLH_AXI_SI_LP_ALIVEDNC_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SPC_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SPMI_MASTER_PMIC_IPCLKPORT_I_IPCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SPMI_MASTER_PMIC_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SWEEPER_P_ALIVE_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_SYSREG_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_TOP_RTC_IPCLKPORT_OSCCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_TOP_RTC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_VGEN_LITE_ALIVE_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_WDT_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_XIU_DP_ALIVE_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_OSCCLK_RCO_IPCLKPORT_CLK,
+ CLK_CON_GAT_GATE_CLKALIVE_CHUB_PERI,
+ CLK_CON_GAT_GATE_CLKALIVE_CMGP_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_CMGP_PERI,
+ CLK_CON_GAT_GATE_CLKALIVE_DBGCORE_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_DNC_NOC,
+ CLK_CON_GAT_CLK_BLK_ALIVE_UID_RSTNSYNC_SR_CLK_ALIVE_SPMI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GATE_CLKALIVE_GNPU_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_SDMA_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_UFD_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_CHUBVTS_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_CSIS_NOC,
+ CLK_CON_GAT_GATE_CLKALIVE_DSP_NOC,
+};
+
+PNAME(mout_alive_noc_user_p) = { "oscclk", "dout_cmu_alive_noc" };
+PNAME(mout_alive_rco_spmi_user_p) = { "oscclk", "rco_i3c_pmic" };
+PNAME(mout_rco_alive_user_p) = { "oscclk", "rco_alive" };
+PNAME(mout_alive_chub_peri_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_cmgp_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_cmgp_peri_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_dbgcore_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_dnc_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_chubvts_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_gnpu_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_gnss_noc_p) = { "rco_400", "mout_alive_noc_user" };
+PNAME(mout_alive_sdma_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_ufd_noc_p) = { "mout_rco_alive_user",
+ "rco_400",
+ "mout_alive_noc_user",
+ "oscclk" };
+PNAME(mout_alive_dbgcore_uart_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_pmu_sub_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_spmi_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_rco_spmi_user",
+ "oscclk" };
+PNAME(mout_alive_timer_p) = { "oscclk", "oscclk" };
+PNAME(mout_alive_csis_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+PNAME(mout_alive_dsp_noc_p) = { "mout_rco_alive_user", "rco_400",
+ "mout_alive_noc_user", "oscclk" };
+
+static const struct samsung_mux_clock alive_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_ALIVE_NOC_USER, "mout_alive_noc_user",
+ mout_alive_noc_user_p, PLL_CON0_MUX_CLKCMU_ALIVE_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_ALIVE_RCO_SPMI_USER, "mout_alive_rco_spmi_user",
+ mout_alive_rco_spmi_user_p,
+ PLL_CON0_MUX_CLKMUX_ALIVE_RCO_SPMI_USER, 4, 1),
+ MUX(CLK_MOUT_RCO_ALIVE_USER, "mout_rco_alive_user",
+ mout_rco_alive_user_p, PLL_CON0_MUX_CLK_RCO_ALIVE_USER, 4, 1),
+ MUX(CLK_MOUT_ALIVE_CHUB_PERI, "mout_alive_chub_peri",
+ mout_alive_chub_peri_p, CLK_CON_MUX_MUX_CLKALIVE_CHUB_PERI, 0, 2),
+ MUX(CLK_MOUT_ALIVE_CMGP_NOC, "mout_alive_cmgp_noc",
+ mout_alive_cmgp_noc_p, CLK_CON_MUX_MUX_CLKALIVE_CMGP_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_CMGP_PERI, "mout_alive_cmgp_peri",
+ mout_alive_cmgp_peri_p, CLK_CON_MUX_MUX_CLKALIVE_CMGP_PERI, 0, 2),
+ MUX(CLK_MOUT_ALIVE_DBGCORE_NOC, "mout_alive_dbgcore_noc",
+ mout_alive_dbgcore_noc_p, CLK_CON_MUX_MUX_CLKALIVE_DBGCORE_NOC,
+ 0, 2),
+ MUX(CLK_MOUT_ALIVE_DNC_NOC, "mout_alive_dnc_noc", mout_alive_dnc_noc_p,
+ CLK_CON_MUX_MUX_CLKALIVE_DNC_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_CHUBVTS_NOC, "mout_alive_chubvts_noc",
+ mout_alive_chubvts_noc_p, CLK_CON_MUX_MUX_CLKALIVE_CHUBVTS_NOC,
+ 0, 2),
+ MUX(CLK_MOUT_ALIVE_GNPU_NOC, "mout_alive_gnpu_noc",
+ mout_alive_gnpu_noc_p, CLK_CON_MUX_MUX_CLKALIVE_GNPU_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_GNSS_NOC, "mout_alive_gnss_noc",
+ mout_alive_gnss_noc_p, CLK_CON_MUX_MUX_CLKALIVE_GNSS_NOC, 0, 1),
+ MUX(CLK_MOUT_ALIVE_SDMA_NOC, "mout_alive_sdma_noc",
+ mout_alive_sdma_noc_p, CLK_CON_MUX_MUX_CLKALIVE_SDMA_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_UFD_NOC, "mout_alive_ufd_noc", mout_alive_ufd_noc_p,
+ CLK_CON_MUX_MUX_CLKALIVE_UFD_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_DBGCORE_UART, "mout_alive_dbgcore_uart",
+ mout_alive_dbgcore_uart_p, CLK_CON_MUX_MUX_CLK_ALIVE_DBGCORE_UART,
+ 0, 2),
+ MUX(CLK_MOUT_ALIVE_NOC, "mout_alive_noc", mout_alive_noc_p,
+ CLK_CON_MUX_MUX_CLK_ALIVE_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_PMU_SUB, "mout_alive_pmu_sub", mout_alive_pmu_sub_p,
+ CLK_CON_MUX_MUX_CLK_ALIVE_PMU_SUB, 0, 2),
+ MUX(CLK_MOUT_ALIVE_SPMI, "mout_alive_spmi", mout_alive_spmi_p,
+ CLK_CON_MUX_MUX_CLK_ALIVE_SPMI, 0, 2),
+ MUX(CLK_MOUT_ALIVE_TIMER, "mout_alive_timer", mout_alive_timer_p,
+ CLK_CON_MUX_MUX_CLK_ALIVE_TIMER, 0, 1),
+ MUX(CLK_MOUT_ALIVE_CSIS_NOC, "mout_alive_csis_noc",
+ mout_alive_csis_noc_p, CLK_CON_MUX_MUX_CLKALIVE_CSIS_NOC, 0, 2),
+ MUX(CLK_MOUT_ALIVE_DSP_NOC, "mout_alive_dsp_noc", mout_alive_dsp_noc_p,
+ CLK_CON_MUX_MUX_CLKALIVE_DSP_NOC, 0, 2),
+};
+
+static const struct samsung_div_clock alive_div_clks[] __initconst = {
+ DIV(CLK_DOUT_ALIVE_CHUB_PERI, "dout_alive_chub_peri",
+ "mout_alive_chub_peri", CLK_CON_DIV_CLKALIVE_CHUB_PERI, 0, 3),
+ DIV(CLK_DOUT_ALIVE_CMGP_NOC, "dout_alive_cmgp_noc",
+ "mout_alive_cmgp_noc", CLK_CON_DIV_CLKALIVE_CMGP_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_CMGP_PERI, "dout_alive_cmgp_peri",
+ "mout_alive_cmgp_peri", CLK_CON_DIV_CLKALIVE_CMGP_PERI, 0, 3),
+ DIV(CLK_DOUT_ALIVE_DBGCORE_NOC, "dout_alive_dbgcore_noc",
+ "mout_alive_dbgcore_noc", CLK_CON_DIV_CLKALIVE_DBGCORE_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_DNC_NOC, "dout_alive_dnc_noc", "mout_alive_dnc_noc",
+ CLK_CON_DIV_CLKALIVE_DNC_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_CHUBVTS_NOC, "dout_alive_chubvts_noc",
+ "mout_alive_chubvts_noc", CLK_CON_DIV_CLKALIVE_CHUBVTS_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_GNPU_NOC, "dout_alive_gnpu_noc",
+ "mout_alive_gnpu_noc", CLK_CON_DIV_CLKALIVE_GNPU_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_SDMA_NOC, "dout_alive_sdma_noc",
+ "mout_alive_sdma_noc", CLK_CON_DIV_CLKALIVE_SDMA_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_UFD_NOC, "dout_alive_ufd_noc", "mout_alive_ufd_noc",
+ CLK_CON_DIV_CLKALIVE_UFD_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_DBGCORE_UART, "dout_alive_dbgcore_uart",
+ "mout_alive_dbgcore_uart", CLK_CON_DIV_DIV_CLK_ALIVE_DBGCORE_UART,
+ 0, 4),
+ DIV(CLK_DOUT_ALIVE_NOC, "dout_alive_noc", "mout_alive_noc",
+ CLK_CON_DIV_DIV_CLK_ALIVE_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_PMU_SUB, "dout_alive_pmu_sub", "mout_alive_pmu_sub",
+ CLK_CON_DIV_DIV_CLK_ALIVE_PMU_SUB, 0, 3),
+ DIV(CLK_DOUT_ALIVE_SPMI, "dout_alive_spmi", "mout_alive_spmi",
+ CLK_CON_DIV_DIV_CLK_ALIVE_SPMI, 0, 5),
+ DIV(CLK_DOUT_ALIVE_CSIS_NOC, "dout_alive_csis_noc",
+ "mout_alive_csis_noc", CLK_CON_DIV_CLKALIVE_CSIS_NOC, 0, 3),
+ DIV(CLK_DOUT_ALIVE_DSP_NOC, "dout_alive_dsp_noc",
+ "mout_alive_dsp_noc", CLK_CON_DIV_CLKALIVE_DSP_NOC, 0, 3),
+};
+
+static const struct samsung_fixed_rate_clock alive_fixed_clks[] __initconst = {
+ FRATE(0, "rco_i3c_pmic", NULL, 0, 49152000),
+ FRATE(0, "rco_alive", NULL, 0, 49152000),
+ FRATE(0, "rco_400", NULL, 0, 393216000),
+};
+
+static const struct samsung_cmu_info alive_cmu_info __initconst = {
+ .mux_clks = alive_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(alive_mux_clks),
+ .div_clks = alive_div_clks,
+ .nr_div_clks = ARRAY_SIZE(alive_div_clks),
+ .fixed_clks = alive_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(alive_fixed_clks),
+ .nr_clk_ids = CLKS_NR_ALIVE,
+ .clk_regs = alive_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(alive_clk_regs),
+ .clk_name = "noc",
+};
+
+static void __init exynos2200_cmu_alive_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &alive_cmu_info);
+}
+
+/* Register CMU_ALIVE early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynos2200_cmu_alive, "samsung,exynos2200-cmu-alive",
+ exynos2200_cmu_alive_init);
+
+/* ---- CMU_PERIS ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIS (0x10020000) */
+#define PLL_CON0_MUX_CLKCMU_PERIS_GIC_USER 0x600
+#define PLL_CON1_MUX_CLKCMU_PERIS_GIC_USER 0x604
+#define PLL_CON0_MUX_CLKCMU_PERIS_NOC_USER 0x610
+#define PLL_CON1_MUX_CLKCMU_PERIS_NOC_USER 0x614
+#define CLK_CON_MUX_MUX_CLK_PERIS_GIC 0x1000
+#define CLK_CON_DIV_CLKCMU_OTP 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIS_DDD_CTRL 0x1804
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_BUSIF_DDD_PERIS_IPCLKPORT_ATCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_BUSIF_DDD_PERIS_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_DDD_PERIS_IPCLKPORT_CK_IN 0x2008
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_D_TZPC_PERIS_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_GIC_IPCLKPORT_GICCLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_LH_AST_MI_LD_ICC_CPUGIC_CLUSTER0_IPCLKPORT_I_CLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_LH_AST_SI_LD_IRI_GICCPU_CLUSTER0_IPCLKPORT_I_CLK 0x2018
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK 0x201c
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_I_OSCCLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_I_OSCCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_FREE_OSCCLK_IPCLKPORT_CLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_NOCP_IPCLKPORT_CLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_SLH_AXI_MI_P_PERISGIC_IPCLKPORT_I_CLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_SLH_AXI_MI_P_PERIS_IPCLKPORT_I_CLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_TMU_SUB_IPCLKPORT_PCLK 0x2050
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_TMU_TOP_IPCLKPORT_PCLK 0x2054
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_WDT0_IPCLKPORT_PCLK 0x2058
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_WDT1_IPCLKPORT_PCLK 0x205c
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_DDD_CTRL_IPCLKPORT_CLK 0x2060
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_FREE_OSCCLK_IPCLKPORT_CLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_GIC_IPCLKPORT_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_NOCP_IPCLKPORT_CLK 0x206c
+
+static const unsigned long peris_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIS_GIC_USER,
+ PLL_CON1_MUX_CLKCMU_PERIS_GIC_USER,
+ PLL_CON0_MUX_CLKCMU_PERIS_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_PERIS_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_PERIS_GIC,
+ CLK_CON_DIV_CLKCMU_OTP,
+ CLK_CON_DIV_DIV_CLK_PERIS_DDD_CTRL,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_BUSIF_DDD_PERIS_IPCLKPORT_ATCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_BUSIF_DDD_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_DDD_PERIS_IPCLKPORT_CK_IN,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_D_TZPC_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_GIC_IPCLKPORT_GICCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_LH_AST_MI_LD_ICC_CPUGIC_CLUSTER0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_LH_AST_SI_LD_IRI_GICCPU_CLUSTER0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_I_OSCCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_I_OSCCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_NOCP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_SLH_AXI_MI_P_PERISGIC_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_SLH_AXI_MI_P_PERIS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_TMU_SUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_TMU_TOP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_WDT0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_WDT1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_DDD_CTRL_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_GIC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_SR_CLK_PERIS_NOCP_IPCLKPORT_CLK,
+};
+
+PNAME(mout_peris_gic_user_p) = { "dout_tcxo_div3",
+ "dout_cmu_peris_gic" };
+PNAME(mout_peris_noc_user_p) = { "dout_tcxo_div3",
+ "dout_cmu_peris_noc" };
+PNAME(mout_peris_gic_p) = { "mout_peris_gic_user", "dout_tcxo_div3" };
+
+static const struct samsung_mux_clock peris_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIS_GIC_USER, "mout_peris_gic_user",
+ mout_peris_gic_user_p, PLL_CON0_MUX_CLKCMU_PERIS_GIC_USER, 4, 1),
+ MUX(CLK_MOUT_PERIS_NOC_USER, "mout_peris_noc_user",
+ mout_peris_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIS_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_PERIS_GIC, "mout_peris_gic", mout_peris_gic_p,
+ CLK_CON_MUX_MUX_CLK_PERIS_GIC, 0, 0),
+};
+
+static const struct samsung_fixed_factor_clock peris_fixed_factor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_PERIS_OTP, "dout_peris_otp",
+ "dout_tcxo_div3", 1, 8, 0),
+ FFACTOR(CLK_DOUT_PERIS_DDD_CTRL, "dout_peris_ddd_ctrl",
+ "mout_peris_gic", 1, 4, 0),
+};
+
+static const struct samsung_cmu_info peris_cmu_info __initconst = {
+ .mux_clks = peris_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peris_mux_clks),
+ .fixed_factor_clks = peris_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(peris_fixed_factor_clks),
+ .nr_clk_ids = CLKS_NR_PERIS,
+ .clk_regs = peris_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peris_clk_regs),
+ .clk_name = "noc",
+};
+
+static void __init exynos2200_cmu_peris_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &peris_cmu_info);
+}
+
+/* Register CMU_PERIS early, as it's a dependency for GIC and MCT */
+CLK_OF_DECLARE(exynos2200_cmu_peris, "samsung,exynos2200-cmu-peris",
+ exynos2200_cmu_peris_init);
+
+/* ---- CMU_CMGP ----------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CMGP (0x14e00000) */
+#define PLL_CON0_MUX_CLKALIVE_CMGP_NOC_USER 0x610
+#define PLL_CON1_MUX_CLKALIVE_CMGP_NOC_USER 0x614
+#define PLL_CON0_MUX_CLKALIVE_CMGP_PERI_USER 0x620
+#define PLL_CON1_MUX_CLKALIVE_CMGP_PERI_USER 0x624
+#define CLK_CON_MUX_MUX_CLK_CMGP_I2C 0x1000
+#define CLK_CON_MUX_MUX_CLK_CMGP_SPI_I2C0 0x1008
+#define CLK_CON_MUX_MUX_CLK_CMGP_SPI_I2C1 0x100c
+#define CLK_CON_MUX_MUX_CLK_CMGP_SPI_MS_CTRL 0x1010
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI0 0x1014
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI1 0x1018
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI2 0x101c
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI3 0x1020
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI4 0x1024
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI5 0x1028
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI6 0x102c
+#define CLK_CON_DIV_DIV_CLK_CMGP_I2C 0x1800
+#define CLK_CON_DIV_DIV_CLK_CMGP_SPI_I2C0 0x1808
+#define CLK_CON_DIV_DIV_CLK_CMGP_SPI_I2C1 0x180c
+#define CLK_CON_DIV_DIV_CLK_CMGP_SPI_MS_CTRL 0x1810
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI0 0x1814
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI1 0x1818
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI2 0x181c
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI3 0x1820
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI4 0x1824
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI5 0x1828
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI6 0x182c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_APBIF_GPIO_CMGP_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_CMGP_CMU_CMGP_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_CMGP_I2C_IPCLKPORT_IPCLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_CMGP_I2C_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_D_TZPC_CMGP_IPCLKPORT_PCLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP2_IPCLKPORT_IPCLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP2_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP3_IPCLKPORT_IPCLK 0x201c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP3_IPCLKPORT_PCLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP4_IPCLKPORT_IPCLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP4_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP5_IPCLKPORT_IPCLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP5_IPCLKPORT_PCLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP6_IPCLKPORT_IPCLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP6_IPCLKPORT_PCLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_NOC_IPCLKPORT_CLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_CLK_CMGP_FREE_OSCCLK_IPCLKPORT_CLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_I2C_IPCLKPORT_CLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_CLK_CMGP_NOC_IPCLKPORT_CLK 0x2050
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_SPI_I2C0_IPCLKPORT_CLK 0x2054
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_SPI_I2C1_IPCLKPORT_CLK 0x2058
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI0_IPCLKPORT_CLK 0x205c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI1_IPCLKPORT_CLK 0x2060
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI2_IPCLKPORT_CLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI3_IPCLKPORT_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI4_IPCLKPORT_CLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI5_IPCLKPORT_CLK 0x2070
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI6_IPCLKPORT_CLK 0x2074
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_SPI_MS_CTRL_IPCLKPORT_CLK 0x2078
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SLH_AXI_MI_LP_CMGP_IPCLKPORT_I_CLK 0x207c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SLH_AXI_SI_LP_CMGPUFD_IPCLKPORT_I_CLK 0x2080
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP0_IPCLKPORT_IPCLK 0x2084
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP0_IPCLKPORT_PCLK 0x2088
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP1_IPCLKPORT_IPCLK 0x208c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP1_IPCLKPORT_PCLK 0x2090
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_MULTI_SLV_Q_CTRL_CMGP_IPCLKPORT_CLK 0x2094
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2APM_IPCLKPORT_PCLK 0x2098
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2CHUB_IPCLKPORT_PCLK 0x209c
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2CP_IPCLKPORT_PCLK 0x20a0
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2GNSS_IPCLKPORT_PCLK 0x20a4
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2PMU_AP_IPCLKPORT_PCLK 0x20a8
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP_IPCLKPORT_PCLK 0x20ac
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP0_IPCLKPORT_IPCLK 0x20b0
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP0_IPCLKPORT_PCLK 0x20b4
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP1_IPCLKPORT_IPCLK 0x20b8
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP1_IPCLKPORT_PCLK 0x20bc
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP2_IPCLKPORT_IPCLK 0x20c0
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP2_IPCLKPORT_PCLK 0x20c4
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP3_IPCLKPORT_IPCLK 0x20c8
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP3_IPCLKPORT_PCLK 0x20cc
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP4_IPCLKPORT_IPCLK 0x20d0
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP4_IPCLKPORT_PCLK 0x20d4
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP5_IPCLKPORT_IPCLK 0x20d8
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP5_IPCLKPORT_PCLK 0x20dc
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP6_IPCLKPORT_IPCLK 0x20e0
+#define CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP6_IPCLKPORT_PCLK 0x20e4
+
+static const unsigned long cmgp_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKALIVE_CMGP_NOC_USER,
+ PLL_CON1_MUX_CLKALIVE_CMGP_NOC_USER,
+ PLL_CON0_MUX_CLKALIVE_CMGP_PERI_USER,
+ PLL_CON1_MUX_CLKALIVE_CMGP_PERI_USER,
+ CLK_CON_MUX_MUX_CLK_CMGP_I2C,
+ CLK_CON_MUX_MUX_CLK_CMGP_SPI_I2C0,
+ CLK_CON_MUX_MUX_CLK_CMGP_SPI_I2C1,
+ CLK_CON_MUX_MUX_CLK_CMGP_SPI_MS_CTRL,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI0,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI1,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI2,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI3,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI4,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI5,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI6,
+ CLK_CON_DIV_DIV_CLK_CMGP_I2C,
+ CLK_CON_DIV_DIV_CLK_CMGP_SPI_I2C0,
+ CLK_CON_DIV_DIV_CLK_CMGP_SPI_I2C1,
+ CLK_CON_DIV_DIV_CLK_CMGP_SPI_MS_CTRL,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI0,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI1,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI2,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI3,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI4,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI5,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI6,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_APBIF_GPIO_CMGP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_CMGP_CMU_CMGP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_CMGP_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_CMGP_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_D_TZPC_CMGP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP2_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP3_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP3_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP4_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP4_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP5_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP5_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP6_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_I2C_CMGP6_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_CLK_CMGP_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_CLK_CMGP_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_SPI_I2C0_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_SPI_I2C1_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI0_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI1_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI2_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI3_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI4_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI5_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_USI6_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_RSTNSYNC_SR_CLK_CMGP_SPI_MS_CTRL_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SLH_AXI_MI_LP_CMGP_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SLH_AXI_SI_LP_CMGPUFD_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP0_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP1_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_I2C_CMGP1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SPI_MULTI_SLV_Q_CTRL_CMGP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2CHUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2CP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2GNSS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP2PMU_AP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_SYSREG_CMGP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP0_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP1_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP2_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP3_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP3_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP4_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP4_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP5_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP5_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP6_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_CMGP_UID_USI_CMGP6_IPCLKPORT_PCLK,
+};
+
+PNAME(mout_cmgp_clkalive_noc_user_p) = { "oscclk", "dout_alive_cmgp_noc" };
+PNAME(mout_cmgp_clkalive_peri_user_p) = { "oscclk", "dout_alive_cmgp_peri" };
+PNAME(mout_cmgp_i2c_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_spi_i2c0_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_spi_i2c1_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_spi_ms_ctrl_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi0_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi1_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi2_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi3_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi4_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi5_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+PNAME(mout_cmgp_usi6_p) = { "oscclk",
+ "mout_cmgp_clkalive_peri_user" };
+
+static const struct samsung_mux_clock cmgp_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CMGP_CLKALIVE_NOC_USER, "mout_cmgp_clkalive_noc_user",
+ mout_cmgp_clkalive_noc_user_p, PLL_CON0_MUX_CLKALIVE_CMGP_NOC_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CMGP_CLKALIVE_PERI_USER, "mout_cmgp_clkalive_peri_user",
+ mout_cmgp_clkalive_peri_user_p,
+ PLL_CON0_MUX_CLKALIVE_CMGP_PERI_USER, 4, 1),
+ MUX(CLK_MOUT_CMGP_I2C, "mout_cmgp_i2c", mout_cmgp_i2c_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_I2C, 0, 1),
+ MUX(CLK_MOUT_CMGP_SPI_I2C0, "mout_cmgp_spi_i2c0", mout_cmgp_spi_i2c0_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_SPI_I2C0, 0, 1),
+ MUX(CLK_MOUT_CMGP_SPI_I2C1, "mout_cmgp_spi_i2c1", mout_cmgp_spi_i2c1_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_SPI_I2C1, 0, 1),
+ MUX(CLK_MOUT_CMGP_SPI_MS_CTRL, "mout_cmgp_spi_ms_ctrl",
+ mout_cmgp_spi_ms_ctrl_p, CLK_CON_MUX_MUX_CLK_CMGP_SPI_MS_CTRL,
+ 0, 1),
+ MUX(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI0, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI1, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI2, "mout_cmgp_usi2", mout_cmgp_usi2_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI2, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI3, "mout_cmgp_usi3", mout_cmgp_usi3_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI3, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI4, "mout_cmgp_usi4", mout_cmgp_usi4_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI4, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI5, "mout_cmgp_usi5", mout_cmgp_usi5_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI5, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI6, "mout_cmgp_usi6", mout_cmgp_usi6_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI6, 0, 1),
+};
+
+static const struct samsung_div_clock cmgp_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CMGP_I2C, "dout_cmgp_i2c", "mout_cmgp_i2c",
+ CLK_CON_DIV_DIV_CLK_CMGP_I2C, 0, 4),
+ DIV(CLK_DOUT_CMGP_SPI_I2C0, "dout_cmgp_spi_i2c0", "mout_cmgp_spi_i2c0",
+ CLK_CON_DIV_DIV_CLK_CMGP_SPI_I2C0, 0, 4),
+ DIV(CLK_DOUT_CMGP_SPI_I2C1, "dout_cmgp_spi_i2c1", "mout_cmgp_spi_i2c1",
+ CLK_CON_DIV_DIV_CLK_CMGP_SPI_I2C1, 0, 4),
+ DIV(CLK_DOUT_CMGP_SPI_MS_CTRL, "dout_cmgp_spi_ms_ctrl",
+ "mout_cmgp_spi_ms_ctrl", CLK_CON_DIV_DIV_CLK_CMGP_SPI_MS_CTRL,
+ 0, 4),
+ DIV(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI0, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI1, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI2, "dout_cmgp_usi2", "mout_cmgp_usi2",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI2, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI3, "dout_cmgp_usi3", "mout_cmgp_usi3",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI3, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI4, "dout_cmgp_usi4", "mout_cmgp_usi4",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI4, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI5, "dout_cmgp_usi5", "mout_cmgp_usi5",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI5, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI6, "dout_cmgp_usi6", "mout_cmgp_usi6",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI6, 0, 4),
+};
+
+static const struct samsung_cmu_info cmgp_cmu_info __initconst = {
+ .mux_clks = cmgp_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmgp_mux_clks),
+ .div_clks = cmgp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmgp_div_clks),
+ .nr_clk_ids = CLKS_NR_CMGP,
+ .clk_regs = cmgp_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmgp_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_HSI0 ----------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_HSI0 (0x10a00000) */
+#define PLL_CON0_MUX_CLKAUD_HSI0_NOC_USER 0x600
+#define PLL_CON1_MUX_CLKAUD_HSI0_NOC_USER 0x604
+#define PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER 0x610
+#define PLL_CON1_MUX_CLKCMU_HSI0_DPGTC_USER 0x614
+#define PLL_CON0_MUX_CLKCMU_HSI0_DPOSC_USER 0x620
+#define PLL_CON1_MUX_CLKCMU_HSI0_DPOSC_USER 0x624
+#define PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER 0x630
+#define PLL_CON1_MUX_CLKCMU_HSI0_NOC_USER 0x634
+#define PLL_CON0_MUX_CLKCMU_HSI0_USB32DRD_USER 0x640
+#define PLL_CON1_MUX_CLKCMU_HSI0_USB32DRD_USER 0x644
+#define CLK_CON_MUX_MUX_CLK_HSI0_NOC 0x1000
+#define CLK_CON_MUX_MUX_CLK_HSI0_RTCCLK 0x1004
+#define CLK_CON_MUX_MUX_CLK_HSI0_USB32DRD 0x1008
+#define CLK_CON_DIV_DIV_CLK_HSI0_EUSB 0x1800
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_AS_APB_EUSBPHY_HSI0_IPCLKPORT_PCLKM 0x2000
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_OSC_CLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK 0x2018
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK 0x201c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_EUSB_IPCLKPORT_CLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_NOC_IPCLKPORT_CLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_ACEL_SI_D_HSI0_IPCLKPORT_I_CLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AST_SI_G_PPMU_HSI0_IPCLKPORT_I_CLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AXI_MI_LD_AUDHSI0_IPCLKPORT_I_CLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AXI_MI_P_HSI0_IPCLKPORT_I_CLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AXI_SI_LD_HSI0AUD_IPCLKPORT_I_CLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_BLK_HSI0_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SYSMMU_D_HSI0_IPCLKPORT_CLK_S2 0x2040
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_URAM_IPCLKPORT_ACLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_EUSB_APB_CLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_EUSB_CTRL_PCLK 0x2050
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USB32DRD_REF_CLK_40 0x2054
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBDPPHY_CTRL_PCLK 0x2058
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBDPPHY_TCA_APB_CLK 0x205c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBLINK_ACLK 0x2060
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBSUBCTL_APB_PCLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_XIU_P0_HSI0_IPCLKPORT_ACLK 0x2070
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_OTP_DESERIAL_DPLINK_HDCP_IPCLKPORT_I_CLK 0x2074
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_SR_CLK_HSI0_FREE_OSCCLK_IPCLKPORT_CLK 0x2078
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_SR_CLK_HSI0_NOC_IPCLKPORT_CLK 0x207c
+#define CLK_CON_GAT_CLK_BLK_HSI0_UID_SPC_HSI0_IPCLKPORT_PCLK 0x2080
+
+static const unsigned long hsi0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKAUD_HSI0_NOC_USER,
+ PLL_CON1_MUX_CLKAUD_HSI0_NOC_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_DPGTC_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_DPOSC_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_DPOSC_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_NOC_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_USB32DRD_USER,
+ PLL_CON1_MUX_CLKCMU_HSI0_USB32DRD_USER,
+ CLK_CON_MUX_MUX_CLK_HSI0_NOC,
+ CLK_CON_MUX_MUX_CLK_HSI0_RTCCLK,
+ CLK_CON_MUX_MUX_CLK_HSI0_USB32DRD,
+ CLK_CON_DIV_DIV_CLK_HSI0_EUSB,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_AS_APB_EUSBPHY_HSI0_IPCLKPORT_PCLKM,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_OSC_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_EUSB_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_ACEL_SI_D_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AST_SI_G_PPMU_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AXI_MI_LD_AUDHSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AXI_MI_P_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SLH_AXI_SI_LD_HSI0AUD_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_BLK_HSI0_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SYSMMU_D_HSI0_IPCLKPORT_CLK_S2,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_URAM_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_EUSB_APB_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_EUSB_CTRL_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USB32DRD_REF_CLK_40,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBDPPHY_CTRL_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBDPPHY_TCA_APB_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBLINK_ACLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_USB32DRD_IPCLKPORT_I_USBSUBCTL_APB_PCLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_XIU_P0_HSI0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_OTP_DESERIAL_DPLINK_HDCP_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_SR_CLK_HSI0_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_RSTNSYNC_SR_CLK_HSI0_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_HSI0_UID_SPC_HSI0_IPCLKPORT_PCLK,
+};
+
+PNAME(mout_clkcmu_hsi0_dpgtc_user_p) = { "oscclk", "dout_cmu_hsi0_dpgtc" };
+PNAME(mout_clkcmu_hsi0_dposc_user_p) = { "oscclk", "dout_cmu_hsi0_dposc" };
+PNAME(mout_clkcmu_hsi0_noc_user_p) = { "oscclk", "dout_cmu_hsi0_noc" };
+PNAME(mout_clkcmu_hsi0_usb32drd_user_p) = { "oscclk",
+ "dout_cmu_hsi0_usb32drd" };
+PNAME(mout_mux_clk_hsi0_noc_p) = { "mout_clkcmu_hsi0_noc_user" };
+PNAME(mout_mux_clk_hsi0_rtcclk_p) = { "rtcclk", "oscclk" };
+PNAME(mout_mux_clk_hsi0_usb32drd_p) = { "dout_tcxo_div4",
+ "mout_clkcmu_hsi0_usb32drd_user" };
+
+static const struct samsung_mux_clock hsi0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CLKCMU_HSI0_DPGTC_USER, "mout_clkcmu_hsi0_dpgtc_user",
+ mout_clkcmu_hsi0_dpgtc_user_p, PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CLKCMU_HSI0_DPOSC_USER, "mout_clkcmu_hsi0_dposc_user",
+ mout_clkcmu_hsi0_dposc_user_p, PLL_CON0_MUX_CLKCMU_HSI0_DPOSC_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CLKCMU_HSI0_NOC_USER, "mout_clkcmu_hsi0_noc_user",
+ mout_clkcmu_hsi0_noc_user_p, PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CLKCMU_HSI0_USB32DRD_USER,
+ "mout_clkcmu_hsi0_usb32drd_user", mout_clkcmu_hsi0_usb32drd_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI0_USB32DRD_USER, 4, 1),
+ MUX(CLK_MOUT_HSI0_NOC, "mout_hsi0_noc", mout_mux_clk_hsi0_noc_p,
+ CLK_CON_MUX_MUX_CLK_HSI0_NOC, 0, 1),
+ MUX(CLK_MOUT_HSI0_RTCCLK, "mout_hsi0_rtcclk",
+ mout_mux_clk_hsi0_rtcclk_p, CLK_CON_MUX_MUX_CLK_HSI0_RTCCLK, 0, 1),
+ MUX(CLK_MOUT_HSI0_USB32DRD, "mout_hsi0_usb32drd",
+ mout_mux_clk_hsi0_usb32drd_p, CLK_CON_MUX_MUX_CLK_HSI0_USB32DRD,
+ 0, 1),
+};
+
+static const struct samsung_div_clock hsi0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_DIV_CLK_HSI0_EUSB, "dout_div_clk_hsi0_eusb",
+ "mout_hsi0_noc", CLK_CON_DIV_DIV_CLK_HSI0_EUSB, 0, 2),
+};
+
+static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
+ .mux_clks = hsi0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi0_mux_clks),
+ .div_clks = hsi0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(hsi0_div_clks),
+ .nr_clk_ids = CLKS_NR_HSI0,
+ .clk_regs = hsi0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi0_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_PERIC0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC0 (0x10400000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC0_IP0_USER 0x600
+#define PLL_CON1_MUX_CLKCMU_PERIC0_IP0_USER 0x604
+#define PLL_CON0_MUX_CLKCMU_PERIC0_IP1_USER 0x610
+#define PLL_CON1_MUX_CLKCMU_PERIC0_IP1_USER 0x614
+#define PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER 0x620
+#define PLL_CON1_MUX_CLKCMU_PERIC0_NOC_USER 0x624
+#define CLK_CON_MUX_MUX_CLK_PERIC0_I2C 0x1000
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI04 0x1010
+#define CLK_CON_DIV_DIV_CLK_PERIC0_I2C 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI04 0x1810
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_FREE_OSCCLK_IPCLKPORT_CLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_I2C_IPCLKPORT_CLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_NOCP_IPCLKPORT_CLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_SR_CLK_PERIC0_USI04_IPCLKPORT_CLK 0x2018
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_SLH_AXI_MI_P_PERIC0_IPCLKPORT_I_CLK 0x201c
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_I2C_IPCLKPORT_IPCLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_I2C_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_USI_IPCLKPORT_IPCLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_USI_IPCLKPORT_PCLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C00_IPCLKPORT_I_PCLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C00_IPCLKPORT_I_SCLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C01_IPCLKPORT_I_PCLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C01_IPCLKPORT_I_SCLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C02_IPCLKPORT_I_PCLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C02_IPCLKPORT_I_SCLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_SR_CLK_PERIC0_I2C_IPCLKPORT_CLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_SR_CLK_PERIC0_NOCP_IPCLKPORT_CLK 0x2050
+
+static const unsigned long peric0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC0_IP0_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_IP0_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_IP1_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_IP1_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_PERIC0_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI04,
+ CLK_CON_DIV_DIV_CLK_PERIC0_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_NOCP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_SR_CLK_PERIC0_USI04_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_SLH_AXI_MI_P_PERIC0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_USI04_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C00_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C00_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C01_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C01_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C02_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_I3C02_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_SR_CLK_PERIC0_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_SR_CLK_PERIC0_NOCP_IPCLKPORT_CLK,
+};
+
+PNAME(mout_peric0_ip0_user_p) = { "oscclk", "dout_cmu_peric0_ip0" };
+PNAME(mout_peric0_ip1_user_p) = { "oscclk", "dout_cmu_peric0_ip1" };
+PNAME(mout_peric0_noc_user_p) = { "oscclk", "dout_cmu_peric0_noc" };
+PNAME(mout_peric0_i2c_p) = { "oscclk", "mout_peric0_ip0_user",
+ "mout_peric0_ip0_user", "oscclk" };
+PNAME(mout_peric0_usi04_p) = { "oscclk", "mout_peric0_ip0_user",
+ "mout_peric0_ip0_user", "oscclk" };
+
+static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC0_IP0_USER, "mout_peric0_ip0_user",
+ mout_peric0_ip0_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_IP0_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_IP1_USER, "mout_peric0_ip1_user",
+ mout_peric0_ip1_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_IP1_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_NOC_USER, "mout_peric0_noc_user",
+ mout_peric0_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_I2C, "mout_peric0_i2c", mout_peric0_i2c_p,
+ CLK_CON_MUX_MUX_CLK_PERIC0_I2C, 0, 2),
+ MUX(CLK_MOUT_PERIC0_USI04, "mout_peric0_usi04", mout_peric0_usi04_p,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI04, 0, 2),
+};
+
+static const struct samsung_div_clock peric0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC0_I2C, "dout_peric0_i2c", "mout_peric0_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC0_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI04, "dout_peric0_usi04", "mout_peric0_usi04",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04, 0, 7),
+};
+
+static const struct samsung_cmu_info peric0_cmu_info __initconst = {
+ .mux_clks = peric0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
+ .div_clks = peric0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric0_div_clks),
+ .nr_clk_ids = CLKS_NR_PERIC0,
+ .clk_regs = peric0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_PERIC1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC1 (0x10700000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC1_IP0_USER 0x600
+#define PLL_CON1_MUX_CLKCMU_PERIC1_IP0_USER 0x604
+#define PLL_CON0_MUX_CLKCMU_PERIC1_IP1_USER 0x610
+#define PLL_CON1_MUX_CLKCMU_PERIC1_IP1_USER 0x614
+#define PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER 0x620
+#define PLL_CON1_MUX_CLKCMU_PERIC1_NOC_USER 0x624
+#define CLK_CON_MUX_MUX_CLK_PERIC1_I2C 0x1000
+#define CLK_CON_MUX_MUX_CLK_PERIC1_SPI_MS_CTRL 0x1004
+#define CLK_CON_MUX_MUX_CLK_PERIC1_UART_BT 0x1008
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI07 0x100c
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI07_SPI_I2C 0x1010
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI08 0x1014
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI08_SPI_I2C 0x1018
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI09 0x101c
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI10 0x1020
+#define CLK_CON_DIV_DIV_CLK_PERIC1_I2C 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC1_SPI_MS_CTRL 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI07 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI07_SPI_I2C 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI08 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI08_SPI_I2C 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI09 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI10 0x1820
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_BT_UART_IPCLKPORT_IPCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_BT_UART_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_I2C_IPCLKPORT_CLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_NOCP_IPCLKPORT_CLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_FREE_OSCCLK_IPCLKPORT_CLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_NOCP_IPCLKPORT_CLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_UART_BT_IPCLKPORT_CLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI07_IPCLKPORT_CLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI07_SPI_I2C_IPCLKPORT_CLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI08_IPCLKPORT_CLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI08_SPI_I2C_IPCLKPORT_CLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI09_IPCLKPORT_CLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI10_IPCLKPORT_CLK 0x2050
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_SPI_MS_CTRL_IPCLKPORT_CLK 0x2054
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_SLH_AXI_MI_P_PERIC1_IPCLKPORT_I_CLK 0x205c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_SPI_MULTI_SLV_Q_CTRL_PERIC1_IPCLKPORT_CLK 0x2060
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_SPI_I2C_IPCLKPORT_IPCLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_SPI_I2C_IPCLKPORT_PCLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_USI_IPCLKPORT_IPCLK 0x2070
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_USI_IPCLKPORT_PCLK 0x2074
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_SPI_I2C_IPCLKPORT_IPCLK 0x2078
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_SPI_I2C_IPCLKPORT_PCLK 0x207c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_USI_IPCLKPORT_IPCLK 0x2080
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_USI_IPCLKPORT_PCLK 0x2084
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_I2C_IPCLKPORT_IPCLK 0x2088
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_I2C_IPCLKPORT_PCLK 0x208c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_USI_IPCLKPORT_IPCLK 0x2090
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_USI_IPCLKPORT_PCLK 0x2094
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_I2C_IPCLKPORT_IPCLK 0x2098
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_I2C_IPCLKPORT_PCLK 0x209c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_USI_IPCLKPORT_IPCLK 0x20a0
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_USI_IPCLKPORT_PCLK 0x20a4
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK 0x20a8
+
+static const unsigned long peric1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC1_IP0_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_IP0_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_IP1_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_IP1_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_PERIC1_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC1_SPI_MS_CTRL,
+ CLK_CON_MUX_MUX_CLK_PERIC1_UART_BT,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI07,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI07_SPI_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI08,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI08_SPI_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI09,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI10,
+ CLK_CON_DIV_DIV_CLK_PERIC1_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC1_SPI_MS_CTRL,
+ CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07_SPI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08_SPI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_BT_UART_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_BT_UART_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_NOCP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_NOCP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_UART_BT_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI07_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI07_SPI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI08_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI08_SPI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI09_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_USI10_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_SR_CLK_PERIC1_SPI_MS_CTRL_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_SLH_AXI_MI_P_PERIC1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_SPI_MULTI_SLV_Q_CTRL_PERIC1_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_SPI_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_SPI_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI07_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_SPI_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_SPI_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI08_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI09_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_USI10_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+};
+
+PNAME(mout_peric1_ip0_user_p) = { "oscclk", "dout_cmu_peric1_ip0" };
+PNAME(mout_peric1_ip1_user_p) = { "oscclk", "dout_cmu_peric1_ip1" };
+PNAME(mout_peric1_noc_user_p) = { "oscclk", "dout_cmu_peric1_noc" };
+PNAME(mout_peric1_i2c_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_spi_ms_ctrl_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_uart_bt_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_usi07_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_usi07_spi_i2c_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_usi08_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_usi08_spi_i2c_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_usi09_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+PNAME(mout_peric1_usi10_p) = { "oscclk", "mout_peric1_ip0_user",
+ "mout_peric1_ip1_user", "oscclk" };
+
+static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC1_IP0_USER, "mout_peric1_ip0_user",
+ mout_peric1_ip0_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_IP0_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_IP1_USER, "mout_peric1_ip1_user",
+ mout_peric1_ip1_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_IP1_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_NOC_USER, "mout_peric1_noc_user",
+ mout_peric1_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_I2C, "mout_peric1_i2c", mout_peric1_i2c_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_I2C, 0, 2),
+ MUX(CLK_MOUT_PERIC1_SPI_MS_CTRL, "mout_peric1_spi_ms_ctrl",
+ mout_peric1_spi_ms_ctrl_p, CLK_CON_MUX_MUX_CLK_PERIC1_SPI_MS_CTRL,
+ 0, 2),
+ MUX(CLK_MOUT_PERIC1_UART_BT, "mout_peric1_uart_bt",
+ mout_peric1_uart_bt_p, CLK_CON_MUX_MUX_CLK_PERIC1_UART_BT, 0, 2),
+ MUX(CLK_MOUT_PERIC1_USI07, "mout_peric1_usi07", mout_peric1_usi07_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI07, 0, 2),
+ MUX(CLK_MOUT_PERIC1_USI07_SPI_I2C, "mout_peric1_usi07_spi_i2c",
+ mout_peric1_usi07_spi_i2c_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI07_SPI_I2C, 0, 2),
+ MUX(CLK_MOUT_PERIC1_USI08, "mout_peric1_usi08", mout_peric1_usi08_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI08, 0, 2),
+ MUX(CLK_MOUT_PERIC1_USI08_SPI_I2C, "mout_peric1_usi08_spi_i2c",
+ mout_peric1_usi08_spi_i2c_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI08_SPI_I2C, 0, 2),
+ MUX(CLK_MOUT_PERIC1_USI09, "mout_peric1_usi09", mout_peric1_usi09_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI09, 0, 2),
+ MUX(CLK_MOUT_PERIC1_USI10, "mout_peric1_usi10", mout_peric1_usi10_p,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI10, 0, 2),
+};
+
+static const struct samsung_div_clock peric1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC1_I2C, "dout_peric1_i2c", "mout_peric1_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC1_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC1_SPI_MS_CTRL, "dout_peric1_spi_ms_ctrl",
+ "mout_peric1_spi_ms_ctrl", CLK_CON_DIV_DIV_CLK_PERIC1_SPI_MS_CTRL,
+ 0, 7),
+ DIV(CLK_DOUT_PERIC1_UART_BT, "dout_peric1_uart_bt",
+ "mout_peric1_uart_bt", CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT, 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI07, "dout_peric1_usi07", "mout_peric1_usi07",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07, 0, 7),
+ DIV(CLK_DOUT_PERIC1_USI07_SPI_I2C, "dout_peric1_usi07_spi_i2c",
+ "mout_peric1_usi07_spi_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07_SPI_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI08, "dout_peric1_usi08", "mout_peric1_usi08",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08, 0, 7),
+ DIV(CLK_DOUT_PERIC1_USI08_SPI_I2C, "dout_peric1_usi08_spi_i2c",
+ "mout_peric1_usi08_spi_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08_SPI_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI09, "dout_peric1_usi09", "mout_peric1_usi09",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09, 0, 7),
+ DIV(CLK_DOUT_PERIC1_USI10, "dout_peric1_usi10", "mout_peric1_usi10",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10, 0, 7),
+};
+
+static const struct samsung_cmu_info peric1_cmu_info __initconst = {
+ .mux_clks = peric1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
+ .div_clks = peric1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric1_div_clks),
+ .nr_clk_ids = CLKS_NR_PERIC1,
+ .clk_regs = peric1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_PERIC2 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC2 (0x11c00000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC2_IP0_USER 0x600
+#define PLL_CON1_MUX_CLKCMU_PERIC2_IP0_USER 0x604
+#define PLL_CON0_MUX_CLKCMU_PERIC2_IP1_USER 0x610
+#define PLL_CON1_MUX_CLKCMU_PERIC2_IP1_USER 0x614
+#define PLL_CON0_MUX_CLKCMU_PERIC2_NOC_USER 0x620
+#define PLL_CON1_MUX_CLKCMU_PERIC2_NOC_USER 0x624
+#define CLK_CON_MUX_MUX_CLK_PERIC2_I2C 0x1000
+#define CLK_CON_MUX_MUX_CLK_PERIC2_SPI_MS_CTRL 0x1004
+#define CLK_CON_MUX_MUX_CLK_PERIC2_UART_DBG 0x1008
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI00 0x100c
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI00_SPI_I2C 0x1010
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI01 0x1014
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI01_SPI_I2C 0x1018
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI02 0x101c
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI03 0x1020
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI05 0x1024
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI06 0x1028
+#define CLK_CON_MUX_MUX_CLK_PERIC2_USI11 0x102c
+#define CLK_CON_DIV_DIV_CLK_PERIC2_I2C 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC2_SPI_MS_CTRL 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC2_UART_DBG 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI00 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI00_SPI_I2C 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI01 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI01_SPI_I2C 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI02 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI03 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI05 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI06 0x1828
+#define CLK_CON_DIV_DIV_CLK_PERIC2_USI11 0x182c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_DBG_UART_IPCLKPORT_IPCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_DBG_UART_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_D_TZPC_PERIC2_IPCLKPORT_PCLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_GPIO_PERIC2_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C03_OIS_IPCLKPORT_I_PCLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C03_OIS_IPCLKPORT_I_SCLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C04_IPCLKPORT_I_PCLK 0x2018
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C04_IPCLKPORT_I_SCLK 0x201c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C05_IPCLKPORT_I_PCLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C05_IPCLKPORT_I_SCLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C06_IPCLKPORT_I_PCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C06_IPCLKPORT_I_SCLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C07_IPCLKPORT_I_PCLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C07_IPCLKPORT_I_SCLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C08_IPCLKPORT_I_PCLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C08_IPCLKPORT_I_SCLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C09_IPCLKPORT_I_PCLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C09_IPCLKPORT_I_SCLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C10_IPCLKPORT_I_PCLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C10_IPCLKPORT_I_SCLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C11_IPCLKPORT_I_PCLK 0x2050
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C11_IPCLKPORT_I_SCLK 0x2054
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_PERIC2_CMU_PERIC2_IPCLKPORT_PCLK 0x2058
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_PWM_IPCLKPORT_I_PCLK_S0 0x205c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_CLK_PERIC2_FREE_OSCCLK_IPCLKPORT_CLK 0x2060
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_CLK_PERIC2_I2C_IPCLKPORT_CLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_CLK_PERIC2_NOCP_IPCLKPORT_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_UART_DBG_IPCLKPORT_CLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI00_IPCLKPORT_CLK 0x2070
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI00_SPI_I2C_IPCLKPORT_CLK 0x2074
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI01_IPCLKPORT_CLK 0x2078
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI01_SPI_I2C_IPCLKPORT_CLK 0x207c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI02_IPCLKPORT_CLK 0x2080
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI03_IPCLKPORT_CLK 0x2084
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI05_IPCLKPORT_CLK 0x2088
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI06_IPCLKPORT_CLK 0x208c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI11_IPCLKPORT_CLK 0x2090
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_SPI_MS_CTRL_IPCLKPORT_CLK 0x2094
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_SLH_AXI_MI_P_PERIC2_IPCLKPORT_I_CLK 0x2098
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_SPI_MULTI_SLV_Q_CTRL_PERIC2_IPCLKPORT_CLK 0x209c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_SYSREG_PERIC2_IPCLKPORT_PCLK 0x20a0
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_SPI_I2C_IPCLKPORT_IPCLK 0x20a4
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_SPI_I2C_IPCLKPORT_PCLK 0x20a8
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_USI_IPCLKPORT_IPCLK 0x20ac
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_USI_IPCLKPORT_PCLK 0x20b0
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_SPI_I2C_IPCLKPORT_IPCLK 0x20b4
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_SPI_I2C_IPCLKPORT_PCLK 0x20b8
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_USI_IPCLKPORT_IPCLK 0x20bc
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_USI_IPCLKPORT_PCLK 0x20c0
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_I2C_IPCLKPORT_IPCLK 0x20c4
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_I2C_IPCLKPORT_PCLK 0x20c8
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_USI_IPCLKPORT_IPCLK 0x20cc
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_USI_IPCLKPORT_PCLK 0x20d0
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_I2C_IPCLKPORT_IPCLK 0x20d4
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_I2C_IPCLKPORT_PCLK 0x20d8
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_USI_IPCLKPORT_IPCLK 0x20dc
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_USI_IPCLKPORT_PCLK 0x20e0
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_I2C_IPCLKPORT_IPCLK 0x20e4
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_I2C_IPCLKPORT_PCLK 0x20e8
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_USI_OIS_IPCLKPORT_IPCLK 0x20ec
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_USI_OIS_IPCLKPORT_PCLK 0x20f0
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_I2C_IPCLKPORT_IPCLK 0x20f4
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_I2C_IPCLKPORT_PCLK 0x20f8
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_USI_OIS_IPCLKPORT_IPCLK 0x20fc
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_USI_OIS_IPCLKPORT_PCLK 0x2100
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_I2C_IPCLKPORT_IPCLK 0x2104
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_I2C_IPCLKPORT_PCLK 0x2108
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_USI_IPCLKPORT_IPCLK 0x210c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_USI_IPCLKPORT_PCLK 0x2110
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_I2C_IPCLKPORT_CLK 0x2114
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_NOCP_IPCLKPORT_CLK 0x2118
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_SLH_AXI_MI_LP_CSISPERIC2_IPCLKPORT_I_CLK 0x211c
+#define CLK_CON_GAT_CLK_BLK_PERIC2_UID_XIU_P_PERIC2_IPCLKPORT_ACLK 0x2120
+
+static const unsigned long peric2_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC2_IP0_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC2_IP0_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC2_IP1_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC2_IP1_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC2_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC2_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_PERIC2_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC2_SPI_MS_CTRL,
+ CLK_CON_MUX_MUX_CLK_PERIC2_UART_DBG,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI00,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI00_SPI_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI01,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI01_SPI_I2C,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI02,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI03,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI05,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI06,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI11,
+ CLK_CON_DIV_DIV_CLK_PERIC2_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC2_SPI_MS_CTRL,
+ CLK_CON_DIV_DIV_CLK_PERIC2_UART_DBG,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI00,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI00_SPI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI01,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI01_SPI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI02,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI03,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI05,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI06,
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI11,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_DBG_UART_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_DBG_UART_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_D_TZPC_PERIC2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_GPIO_PERIC2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C03_OIS_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C03_OIS_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C04_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C04_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C05_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C05_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C06_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C06_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C07_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C07_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C08_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C08_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C09_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C09_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C10_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C10_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C11_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_I3C11_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_PERIC2_CMU_PERIC2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_PWM_IPCLKPORT_I_PCLK_S0,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_CLK_PERIC2_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_CLK_PERIC2_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_CLK_PERIC2_NOCP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_UART_DBG_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI00_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI00_SPI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI01_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI01_SPI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI02_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI03_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI05_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI06_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_USI11_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_SPI_MS_CTRL_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_SLH_AXI_MI_P_PERIC2_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_SPI_MULTI_SLV_Q_CTRL_PERIC2_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_SYSREG_PERIC2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_SPI_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_SPI_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI00_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_SPI_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_SPI_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI01_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI02_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI03_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_USI_OIS_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI05_USI_OIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_USI_OIS_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI06_USI_OIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_I2C_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_I2C_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_USI11_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_RSTNSYNC_SR_CLK_PERIC2_NOCP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_SLH_AXI_MI_LP_CSISPERIC2_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC2_UID_XIU_P_PERIC2_IPCLKPORT_ACLK,
+};
+
+PNAME(mout_peric2_ip0_user_p) = { "oscclk",
+ "dout_cmu_peric2_ip0" };
+PNAME(mout_peric2_ip1_user_p) = { "oscclk",
+ "dout_cmu_peric2_ip1" };
+PNAME(mout_peric2_noc_user_p) = { "oscclk",
+ "dout_cmu_peric2_noc" };
+PNAME(mout_peric2_i2c_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_spi_ms_ctrl_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_uart_dbg_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi00_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi00_spi_i2c_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi01_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi01_spi_i2c_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi02_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi03_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi05_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi06_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+PNAME(mout_peric2_usi11_p) = { "oscclk",
+ "mout_peric2_ip0_user",
+ "mout_peric2_ip1_user",
+ "oscclk" };
+
+static const struct samsung_mux_clock peric2_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC2_IP0_USER, "mout_peric2_ip0_user",
+ mout_peric2_ip0_user_p, PLL_CON0_MUX_CLKCMU_PERIC2_IP0_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC2_IP1_USER, "mout_peric2_ip1_user",
+ mout_peric2_ip1_user_p, PLL_CON0_MUX_CLKCMU_PERIC2_IP1_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC2_NOC_USER, "mout_peric2_noc_user",
+ mout_peric2_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIC2_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC2_I2C, "mout_peric2_i2c", mout_peric2_i2c_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_I2C, 0, 2),
+ MUX(CLK_MOUT_PERIC2_SPI_MS_CTRL, "mout_peric2_spi_ms_ctrl",
+ mout_peric2_spi_ms_ctrl_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_SPI_MS_CTRL, 0, 2),
+ MUX(CLK_MOUT_PERIC2_UART_DBG, "mout_peric2_uart_dbg",
+ mout_peric2_uart_dbg_p, CLK_CON_MUX_MUX_CLK_PERIC2_UART_DBG, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI00, "mout_peric2_usi00", mout_peric2_usi00_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI00, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI00_SPI_I2C, "mout_peric2_usi00_spi_i2c",
+ mout_peric2_usi00_spi_i2c_p, CLK_CON_MUX_MUX_CLK_PERIC2_USI00_SPI_I2C,
+ 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI01, "mout_peric2_usi01", mout_peric2_usi01_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI01, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI01_SPI_I2C, "mout_peric2_usi01_spi_i2c",
+ mout_peric2_usi01_spi_i2c_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI01_SPI_I2C, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI02, "mout_peric2_usi02", mout_peric2_usi02_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI02, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI03, "mout_peric2_usi03", mout_peric2_usi03_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI03, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI05, "mout_peric2_usi05", mout_peric2_usi05_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI05, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI06, "mout_peric2_usi06", mout_peric2_usi06_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI06, 0, 2),
+ MUX(CLK_MOUT_PERIC2_USI11, "mout_peric2_usi11", mout_peric2_usi11_p,
+ CLK_CON_MUX_MUX_CLK_PERIC2_USI11, 0, 2),
+};
+
+static const struct samsung_div_clock peric2_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC2_I2C, "dout_peric2_i2c", "mout_peric2_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC2_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC2_SPI_MS_CTRL, "dout_peric2_spi_ms_ctrl",
+ "mout_peric2_spi_ms_ctrl", CLK_CON_DIV_DIV_CLK_PERIC2_SPI_MS_CTRL,
+ 0, 7),
+ DIV(CLK_DOUT_PERIC2_UART_DBG, "dout_peric2_uart_dbg",
+ "mout_peric2_uart_dbg", CLK_CON_DIV_DIV_CLK_PERIC2_UART_DBG, 0, 4),
+ DIV(CLK_DOUT_PERIC2_USI00, "dout_peric2_usi00", "mout_peric2_usi00",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI00, 0, 7),
+ DIV(CLK_DOUT_PERIC2_USI00_SPI_I2C, "dout_peric2_usi00_spi_i2c",
+ "mout_peric2_usi00_spi_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI00_SPI_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC2_USI01, "dout_peric2_usi01", "mout_peric2_usi01",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI01, 0, 7),
+ DIV(CLK_DOUT_PERIC2_USI01_SPI_I2C, "dout_peric2_usi01_spi_i2c",
+ "mout_peric2_usi01_spi_i2c",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI01_SPI_I2C, 0, 4),
+ DIV(CLK_DOUT_PERIC2_USI02, "dout_peric2_usi02", "mout_peric2_usi02",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI02, 0, 7),
+ DIV(CLK_DOUT_PERIC2_USI03, "dout_peric2_usi03", "mout_peric2_usi03",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI03, 0, 7),
+ DIV(CLK_DOUT_PERIC2_USI05, "dout_peric2_usi05", "mout_peric2_usi05",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI05, 0, 7),
+ DIV(CLK_DOUT_PERIC2_USI06, "dout_peric2_usi06", "mout_peric2_usi06",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI06, 0, 7),
+ DIV(CLK_DOUT_PERIC2_USI11, "dout_peric2_usi11", "mout_peric2_usi11",
+ CLK_CON_DIV_DIV_CLK_PERIC2_USI11, 0, 7),
+};
+
+static const struct samsung_cmu_info peric2_cmu_info __initconst = {
+ .mux_clks = peric2_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric2_mux_clks),
+ .div_clks = peric2_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric2_div_clks),
+ .nr_clk_ids = CLKS_NR_PERIC2,
+ .clk_regs = peric2_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric2_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_UFS ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_UFS(0x11000000) */
+#define PLL_CON0_MUX_CLKCMU_UFS_MMC_CARD_USER 0x600
+#define PLL_CON1_MUX_CLKCMU_UFS_MMC_CARD_USER 0x604
+#define PLL_CON0_MUX_CLKCMU_UFS_NOC_USER 0x610
+#define PLL_CON1_MUX_CLKCMU_UFS_NOC_USER 0x614
+#define PLL_CON0_MUX_CLKCMU_UFS_UFS_EMBD_USER 0x620
+#define PLL_CON1_MUX_CLKCMU_UFS_UFS_EMBD_USER 0x624
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_BLK_UFS_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_D_TZPC_UFS_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_GPIO_HSI1UFS_IPCLKPORT_PCLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_GPIO_UFS_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_LH_ACEL_SI_D_UFS_IPCLKPORT_I_CLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_MMC_CARD_IPCLKPORT_I_ACLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_MMC_CARD_IPCLKPORT_SDCLKIN 0x2018
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_OTP_DESERIAL_UFS_EMBD_FMP_IPCLKPORT_I_CLK 0x201c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_PPMU_UFS_IPCLKPORT_ACLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_PPMU_UFS_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_FREE_OSCCLK_IPCLKPORT_CLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_MMC_CARD_IPCLKPORT_CLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_NOC_IPCLKPORT_CLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_UFS_EMBD_IPCLKPORT_CLK 0x2038
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_SR_CLK_UFS_FREE_OSCCLK_IPCLKPORT_CLK 0x203c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_SR_CLK_UFS_NOC_IPCLKPORT_CLK 0x2040
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_SLH_AST_SI_G_PPMU_UFS_IPCLKPORT_I_CLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_SLH_AXI_MI_P_UFS_IPCLKPORT_I_CLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_SPC_UFS_IPCLKPORT_PCLK 0x204c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_SYSMMU_UFS_IPCLKPORT_CLK_S2 0x2050
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_SYSREG_UFS_IPCLKPORT_PCLK 0x2054
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_CMU_UFS_IPCLKPORT_PCLK 0x2058
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_EMBD_IPCLKPORT_I_ACLK 0x205c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO 0x2060
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK 0x2064
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_VGEN_LITE_UFS_IPCLKPORT_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_XIU_D_UFS_IPCLKPORT_ACLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_UFS_UID_XIU_P_UFS_IPCLKPORT_ACLK 0x2070
+
+static const unsigned long ufs_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_UFS_MMC_CARD_USER,
+ PLL_CON1_MUX_CLKCMU_UFS_MMC_CARD_USER,
+ PLL_CON0_MUX_CLKCMU_UFS_NOC_USER,
+ PLL_CON1_MUX_CLKCMU_UFS_NOC_USER,
+ PLL_CON0_MUX_CLKCMU_UFS_UFS_EMBD_USER,
+ PLL_CON1_MUX_CLKCMU_UFS_UFS_EMBD_USER,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_BLK_UFS_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_D_TZPC_UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_GPIO_HSI1UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_GPIO_UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_LH_ACEL_SI_D_UFS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_MMC_CARD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_MMC_CARD_IPCLKPORT_SDCLKIN,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_OTP_DESERIAL_UFS_EMBD_FMP_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_PPMU_UFS_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_PPMU_UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_MMC_CARD_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_CLK_UFS_UFS_EMBD_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_SR_CLK_UFS_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_RSTNSYNC_SR_CLK_UFS_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_SLH_AST_SI_G_PPMU_UFS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_SLH_AXI_MI_P_UFS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_SPC_UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_SYSMMU_UFS_IPCLKPORT_CLK_S2,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_SYSREG_UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_CMU_UFS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_EMBD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_VGEN_LITE_UFS_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_XIU_D_UFS_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_UFS_UID_XIU_P_UFS_IPCLKPORT_ACLK,
+};
+
+PNAME(mout_clkcmu_ufs_mmc_card_user_p) = { "oscclk",
+ "mout_cmu_ufs_mmc_card" };
+PNAME(mout_clkcmu_ufs_noc_user_p) = { "oscclk", "dout_cmu_ufs_noc" };
+PNAME(mout_clkcmu_ufs_ufs_embd_user_p) = { "oscclk",
+ "dout_cmu_ufs_ufs_embd" };
+
+static const struct samsung_mux_clock ufs_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_UFS_MMC_CARD_USER, "mout_ufs_mmc_card_user",
+ mout_clkcmu_ufs_mmc_card_user_p,
+ PLL_CON0_MUX_CLKCMU_UFS_MMC_CARD_USER, 4, 1),
+ MUX(CLK_MOUT_UFS_NOC_USER, "mout_ufs_noc_user",
+ mout_clkcmu_ufs_noc_user_p,
+ PLL_CON0_MUX_CLKCMU_UFS_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_UFS_UFS_EMBD_USER, "mout_ufs_ufs_embd_user",
+ mout_clkcmu_ufs_ufs_embd_user_p,
+ PLL_CON0_MUX_CLKCMU_UFS_UFS_EMBD_USER, 4, 1),
+};
+
+static const struct samsung_cmu_info ufs_cmu_info __initconst = {
+ .mux_clks = ufs_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(ufs_mux_clks),
+ .nr_clk_ids = CLKS_NR_UFS,
+ .clk_regs = ufs_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(ufs_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_VTS ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_VTS (0x15300000) */
+#define PLL_CON0_MUX_CLKALIVE_VTS_NOC_USER 0x600
+#define PLL_CON1_MUX_CLKALIVE_VTS_NOC_USER 0x604
+#define PLL_CON0_MUX_CLKALIVE_VTS_RCO_USER 0x610
+#define PLL_CON1_MUX_CLKALIVE_VTS_RCO_USER 0x614
+#define PLL_CON0_MUX_CLKCMU_VTS_DMIC_USER 0x620
+#define PLL_CON1_MUX_CLKCMU_VTS_DMIC_USER 0x624
+#define CLK_CON_MUX_MUX_CLKVTS_AUD_DMIC1 0x1000
+#define CLK_CON_MUX_MUX_CLK_VTS_NOC 0x1004
+#define CLK_CON_MUX_MUX_CLK_VTS_DMIC_PAD 0x100c
+#define CLK_CON_DIV_DIV_CLKVTS_AUD_DMIC0 0x1800
+#define CLK_CON_DIV_DIV_CLKVTS_AUD_DMIC1 0x1804
+#define CLK_CON_DIV_DIV_CLK_VTS_CPU 0x1808
+#define CLK_CON_DIV_DIV_CLK_VTS_DMIC_IF 0x1814
+#define CLK_CON_DIV_DIV_CLK_VTS_DMIC_IF_DIV2 0x1818
+#define CLK_CON_DIV_DIV_CLK_VTS_NOC 0x181c
+#define CLK_CON_DIV_DIV_CLK_VTS_SERIAL_LIF 0x1820
+#define CLK_CON_DIV_DIV_CLK_VTS_SERIAL_LIF_CORE 0x1824
+#define CLK_CON_GAT_CLKVTS_AUD_DMIC0 0x2000
+#define CLK_CON_GAT_CLKVTS_AUD_DMIC1 0x2004
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_BAAW_VTS_IPCLKPORT_I_PCLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_LH_AXI_MI_IP_VC2VTS_IPCLKPORT_I_CLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_LH_AXI_SI_ID_VTS2VC_IPCLKPORT_I_CLK 0x2010
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_CLK_VTS_FREE_OSCCLK_IPCLKPORT_CLK 0x2014
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_DMIC_IF_IPCLKPORT_CLK 0x2018
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_SERIAL_LIF_CORE_IPCLKPORT_CLK 0x2020
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_SERIAL_LIF_IPCLKPORT_CLK 0x2024
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_YAMIN_IPCLKPORT_CLK 0x2028
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_ACLK 0x202c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK 0x2030
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_ASYNCINTERRUPT_VTS_IPCLKPORT_CLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF0_IPCLKPORT_DMIC_IF_CLK 0x2068
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF0_IPCLKPORT_DMIC_IF_DIV2_CLK 0x206c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF0_IPCLKPORT_PCLK 0x2070
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF1_IPCLKPORT_DMIC_IF_CLK 0x2074
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF1_IPCLKPORT_DMIC_IF_DIV2_CLK 0x2078
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF1_IPCLKPORT_PCLK 0x207c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF2_IPCLKPORT_DMIC_IF_CLK 0x2080
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF2_IPCLKPORT_DMIC_IF_DIV2_CLK 0x2084
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF2_IPCLKPORT_PCLK 0x2088
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_GPIO_VTS_IPCLKPORT_PCLK 0x2090
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_CODE_IPCLKPORT_I_ACLK 0x20ac
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_CODE_IPCLKPORT_I_PCLK 0x20b0
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA0_IPCLKPORT_I_ACLK 0x20b4
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA0_IPCLKPORT_I_PCLK 0x20b8
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA1_IPCLKPORT_I_ACLK 0x20bc
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA1_IPCLKPORT_I_PCLK 0x20c0
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_PCM_IPCLKPORT_I_ACLK 0x20c4
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_PCM_IPCLKPORT_I_PCLK 0x20c8
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_MAILBOX_ABOX_VTS_IPCLKPORT_PCLK 0x20cc
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_MAILBOX_AP_VTS_IPCLKPORT_PCLK 0x20d0
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_MAILBOX_DNC_VTS_IPCLKPORT_PCLK 0x20d4
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_CLK_VTS_NOC_IPCLKPORT_CLK 0x20ec
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_CLK_VTS_YAMIN_IPCLKPORT_CLK 0x20f8
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_NOC_IPCLKPORT_CLK 0x20fc
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_BCLK 0x2104
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_CCLK 0x2108
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_PCLK 0x210c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_ACLK_CPU 0x2124
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_AUD_DIV2_CLK 0x2128
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_IF_PAD_CLK0 0x212c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_IF_PAD_CLK1 0x2130
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_IF_PAD_CLK2 0x2134
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_SYSREG_VTS_IPCLKPORT_PCLK 0x213c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_TIMER1_IPCLKPORT_PCLK 0x2140
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_TIMER2_IPCLKPORT_PCLK 0x2144
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_TIMER_IPCLKPORT_PCLK 0x2148
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_VTS_CMU_VTS_IPCLKPORT_PCLK 0x2150
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_WDT_VTS_IPCLKPORT_PCLK 0x2154
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_XIU_DP0_VTS_IPCLKPORT_ACLK 0x2158
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_YAMIN_MCU_VTS_IPCLKPORT_CLKIN 0x215c
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_YAMIN_MCU_VTS_IPCLKPORT_DBGCLK 0x2160
+#define CLK_CON_GAT_CLK_BLK_VTS_UID_YAMIN_MCU_VTS_IPCLKPORT_IWICCLK 0x2164
+
+static const unsigned long vts_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKALIVE_VTS_NOC_USER,
+ PLL_CON1_MUX_CLKALIVE_VTS_NOC_USER,
+ PLL_CON0_MUX_CLKALIVE_VTS_RCO_USER,
+ PLL_CON1_MUX_CLKALIVE_VTS_RCO_USER,
+ PLL_CON0_MUX_CLKCMU_VTS_DMIC_USER,
+ PLL_CON1_MUX_CLKCMU_VTS_DMIC_USER,
+ CLK_CON_MUX_MUX_CLKVTS_AUD_DMIC1,
+ CLK_CON_MUX_MUX_CLK_VTS_NOC,
+ CLK_CON_MUX_MUX_CLK_VTS_DMIC_PAD,
+ CLK_CON_DIV_DIV_CLKVTS_AUD_DMIC0,
+ CLK_CON_DIV_DIV_CLKVTS_AUD_DMIC1,
+ CLK_CON_DIV_DIV_CLK_VTS_CPU,
+ CLK_CON_DIV_DIV_CLK_VTS_DMIC_IF,
+ CLK_CON_DIV_DIV_CLK_VTS_DMIC_IF_DIV2,
+ CLK_CON_DIV_DIV_CLK_VTS_NOC,
+ CLK_CON_DIV_DIV_CLK_VTS_SERIAL_LIF,
+ CLK_CON_DIV_DIV_CLK_VTS_SERIAL_LIF_CORE,
+ CLK_CON_GAT_CLKVTS_AUD_DMIC0,
+ CLK_CON_GAT_CLKVTS_AUD_DMIC1,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_BAAW_VTS_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_LH_AXI_MI_IP_VC2VTS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_LH_AXI_SI_ID_VTS2VC_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_CLK_VTS_FREE_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_DMIC_IF_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_SERIAL_LIF_CORE_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_SERIAL_LIF_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_YAMIN_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_FRC_OTP_DESERIAL_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_ASYNCINTERRUPT_VTS_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF0_IPCLKPORT_DMIC_IF_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF0_IPCLKPORT_DMIC_IF_DIV2_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF1_IPCLKPORT_DMIC_IF_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF1_IPCLKPORT_DMIC_IF_DIV2_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF2_IPCLKPORT_DMIC_IF_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF2_IPCLKPORT_DMIC_IF_DIV2_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_DMIC_IF2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_GPIO_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_CODE_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_CODE_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA0_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA0_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA1_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_DATA1_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_PCM_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_INTMEM_PCM_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_MAILBOX_ABOX_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_MAILBOX_AP_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_MAILBOX_DNC_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_CLK_VTS_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_CLK_VTS_YAMIN_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_RSTNSYNC_SR_CLK_VTS_NOC_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_BCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_CCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SERIAL_LIF_VT_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_ACLK_CPU,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_AUD_DIV2_CLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_IF_PAD_CLK0,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_IF_PAD_CLK1,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SS_VTS_GLUE_IPCLKPORT_DMIC_IF_PAD_CLK2,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_SYSREG_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_TIMER1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_TIMER2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_TIMER_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_VTS_CMU_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_WDT_VTS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_XIU_DP0_VTS_IPCLKPORT_ACLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_YAMIN_MCU_VTS_IPCLKPORT_CLKIN,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_YAMIN_MCU_VTS_IPCLKPORT_DBGCLK,
+ CLK_CON_GAT_CLK_BLK_VTS_UID_YAMIN_MCU_VTS_IPCLKPORT_IWICCLK,
+};
+
+PNAME(mout_clkalive_vts_noc_user_p) = { "oscclk" };
+PNAME(mout_clkalive_vts_rco_user_p) = { "oscclk" };
+PNAME(mout_clkcmu_vts_dmic_user_p) = { "oscclk", "dout_cmu_vts_dmic" };
+PNAME(mout_clkvts_aud_dmic1_p) = { "dout_clkvts_aud_dmic1",
+ "dout_clkvts_dmic_if_div2",
+ "dmic_clk0_in", "dmic_clk1_in",
+ "dmic_clk2_in", "oscclk", "oscclk",
+ "oscclk" };
+PNAME(mout_clkvts_noc_p) = { "mout_clkalive_vts_noc_user",
+ "mout_clkalive_vts_rco_user" };
+PNAME(mout_clkvts_dmic_pad_p) = { "mout_clkalive_vts_rco_user",
+ "mout_clkcmu_vts_dmic_user" };
+
+static const struct samsung_mux_clock vts_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CLKALIVE_VTS_NOC_USER, "mout_clkalive_vts_noc_user",
+ mout_clkalive_vts_noc_user_p, PLL_CON0_MUX_CLKALIVE_VTS_NOC_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CLKALIVE_VTS_RCO_USER, "mout_clkalive_vts_rco_user",
+ mout_clkalive_vts_rco_user_p, PLL_CON0_MUX_CLKALIVE_VTS_RCO_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CLKCMU_VTS_DMIC_USER, "mout_clkcmu_vts_dmic_user",
+ mout_clkcmu_vts_dmic_user_p, PLL_CON0_MUX_CLKCMU_VTS_DMIC_USER,
+ 4, 1),
+ MUX(CLK_MOUT_CLKVTS_AUD_DMIC1, "mout_clkvts_aud_dmic1",
+ mout_clkvts_aud_dmic1_p, CLK_CON_MUX_MUX_CLKVTS_AUD_DMIC1, 0, 3),
+ MUX(CLK_MOUT_CLKVTS_NOC, "mout_clkvts_noc", mout_clkvts_noc_p,
+ CLK_CON_MUX_MUX_CLK_VTS_NOC, 0, 1),
+ MUX(CLK_MOUT_CLKVTS_DMIC_PAD, "mout_clkvts_dmic_pad",
+ mout_clkvts_dmic_pad_p, CLK_CON_MUX_MUX_CLK_VTS_DMIC_PAD, 0, 1),
+};
+
+static const struct samsung_div_clock vts_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLKVTS_AUD_DMIC0, "dout_clkvts_aud_dmic0",
+ "mout_clkvts_dmic_pad", CLK_CON_DIV_DIV_CLKVTS_AUD_DMIC0, 0, 5),
+ DIV(CLK_DOUT_CLKVTS_AUD_DMIC1, "dout_clkvts_aud_dmic1",
+ "dout_clkvts_aud_dmic0", CLK_CON_DIV_DIV_CLKVTS_AUD_DMIC1, 0, 4),
+ DIV(CLK_DOUT_CLKVTS_CPU, "dout_clkvts_cpu", "mout_clkvts_noc",
+ CLK_CON_DIV_DIV_CLK_VTS_CPU, 0, 3),
+ DIV(CLK_DOUT_CLKVTS_DMIC_IF, "dout_clkvts_dmic_if",
+ "dout_clkvts_aud_dmic0", CLK_CON_DIV_DIV_CLK_VTS_DMIC_IF, 0, 7),
+ DIV(CLK_DOUT_CLKVTS_DMIC_IF_DIV2, "dout_clkvts_dmic_if_div2",
+ "dout_clkvts_dmic_if", CLK_CON_DIV_DIV_CLK_VTS_DMIC_IF_DIV2, 0, 4),
+ DIV(CLK_DOUT_CLKVTS_NOC, "dout_clkvts_noc", "dout_clkvts_cpu",
+ CLK_CON_DIV_DIV_CLK_VTS_NOC, 0, 3),
+ DIV(CLK_DOUT_CLKVTS_SERIAL_LIF, "dout_clkvts_serial_lif",
+ "mout_clkalive_vts_rco_user", CLK_CON_DIV_DIV_CLK_VTS_SERIAL_LIF,
+ 0, 7),
+ DIV(CLK_DOUT_CLKVTS_SERIAL_LIF_CORE, "dout_clkvts_serial_lif_core",
+ "mout_clkalive_vts_rco_user",
+ CLK_CON_DIV_DIV_CLK_VTS_SERIAL_LIF_CORE, 0, 7),
+};
+
+static const struct samsung_fixed_rate_clock vts_fixed_clks[] __initconst = {
+ FRATE(0, "dmic_clk0_in", NULL, 0, 100000000),
+ FRATE(0, "dmic_clk1_in", NULL, 0, 100000000),
+ FRATE(0, "dmic_clk2_in", NULL, 0, 100000000),
+};
+
+static const struct samsung_cmu_info vts_cmu_info __initconst = {
+ .mux_clks = vts_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(vts_mux_clks),
+ .div_clks = vts_div_clks,
+ .nr_div_clks = ARRAY_SIZE(vts_div_clks),
+ .fixed_clks = vts_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(vts_fixed_clks),
+ .nr_clk_ids = CLKS_NR_VTS,
+ .clk_regs = vts_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(vts_clk_regs),
+ .clk_name = "dmic",
+};
+
+static int __init exynos2200_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynos2200_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos2200-cmu-cmgp",
+ .data = &cmgp_cmu_info,
+ }, {
+ .compatible = "samsung,exynos2200-cmu-hsi0",
+ .data = &hsi0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos2200-cmu-peric0",
+ .data = &peric0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos2200-cmu-peric1",
+ .data = &peric1_cmu_info,
+ }, {
+ .compatible = "samsung,exynos2200-cmu-peric2",
+ .data = &peric2_cmu_info,
+ }, {
+ .compatible = "samsung,exynos2200-cmu-ufs",
+ .data = &ufs_cmu_info,
+ }, {
+ .compatible = "samsung,exynos2200-cmu-vts",
+ .data = &vts_cmu_info,
+ }, { }
+};
+
+static struct platform_driver exynos2200_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos2200-cmu",
+ .of_match_table = exynos2200_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos2200_cmu_probe,
+};
+
+static int __init exynos2200_cmu_init(void)
+{
+ return platform_driver_register(&exynos2200_cmu_driver);
+}
+core_initcall(exynos2200_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index aec4d18c1f9e..84564ec4c8ec 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -7,10 +7,8 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-
#include <dt-bindings/clock/exynos3250.h>
#include "clk.h"
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 16be0c53903c..374c26e5d9fd 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -9,9 +9,9 @@
#include <dt-bindings/clock/exynos4.h>
#include <linux/slab.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos4412-isp.c b/drivers/clk/samsung/clk-exynos4412-isp.c
index 29c5644f0593..fa915057e109 100644
--- a/drivers/clk/samsung/clk-exynos4412-isp.c
+++ b/drivers/clk/samsung/clk-exynos4412-isp.c
@@ -8,8 +8,8 @@
#include <dt-bindings/clock/exynos4.h>
#include <linux/slab.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 373129847301..03bbde76e3ce 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -5,6 +5,7 @@
// Common Clock Framework support for Exynos5 power-domain dependent clocks
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 47e9ac2275ee..e90d3a0848cb 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -10,6 +10,7 @@
#include <dt-bindings/clock/exynos5250.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index fd0520d204dc..0a5959823370 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -6,9 +6,6 @@
* Common Clock Framework support for Exynos5260 SoC.
*/
-#include <linux/of.h>
-#include <linux/of_address.h>
-
#include "clk-exynos5260.h"
#include "clk.h"
#include "clk-pll.h"
diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c
index 99b1bb4539fd..baa9988c7bb7 100644
--- a/drivers/clk/samsung/clk-exynos5410.c
+++ b/drivers/clk/samsung/clk-exynos5410.c
@@ -9,8 +9,6 @@
#include <dt-bindings/clock/exynos5410.h>
#include <linux/clk-provider.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/clk.h>
#include "clk.h"
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 333c52fda17f..a9df4e6db82f 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -10,6 +10,7 @@
#include <dt-bindings/clock/exynos5420.h>
#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk.h>
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 609d31a7aa52..4b2a861e7d57 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -6,10 +6,8 @@
* Common Clock Framework support for Exynos5433 SoC.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index e6c938effa29..fe0fa5bdbd4b 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -5,7 +5,6 @@
*/
#include <linux/clk-provider.h>
-#include <linux/of.h>
#include "clk.h"
#include <dt-bindings/clock/exynos7-clk.h>
diff --git a/drivers/clk/samsung/clk-exynos7870.c b/drivers/clk/samsung/clk-exynos7870.c
new file mode 100644
index 000000000000..b3bcf3a1d0b7
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos7870.c
@@ -0,0 +1,1829 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd.
+ * Author: Kaustabh Chakraborty <kauschluss@disroot.org>
+ *
+ * Common Clock Framework support for Exynos7870.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/samsung,exynos7870-cmu.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/*
+ * Register offsets for CMU_MIF (0x10460000)
+ */
+#define PLL_LOCKTIME_MIF_MEM_PLL 0x0000
+#define PLL_LOCKTIME_MIF_MEDIA_PLL 0x0020
+#define PLL_LOCKTIME_MIF_BUS_PLL 0x0040
+#define PLL_CON0_MIF_MEM_PLL 0x0100
+#define PLL_CON0_MIF_MEDIA_PLL 0x0120
+#define PLL_CON0_MIF_BUS_PLL 0x0140
+#define CLK_CON_GAT_MIF_MUX_MEM_PLL 0x0200
+#define CLK_CON_GAT_MIF_MUX_MEM_PLL_CON 0x0200
+#define CLK_CON_GAT_MIF_MUX_MEDIA_PLL 0x0204
+#define CLK_CON_GAT_MIF_MUX_MEDIA_PLL_CON 0x0204
+#define CLK_CON_GAT_MIF_MUX_BUS_PLL 0x0208
+#define CLK_CON_GAT_MIF_MUX_BUS_PLL_CON 0x0208
+#define CLK_CON_GAT_MIF_MUX_BUSD 0x0220
+#define CLK_CON_MUX_MIF_BUSD 0x0220
+#define CLK_CON_GAT_MIF_MUX_CMU_ISP_VRA 0x0264
+#define CLK_CON_MUX_MIF_CMU_ISP_VRA 0x0264
+#define CLK_CON_GAT_MIF_MUX_CMU_ISP_CAM 0x0268
+#define CLK_CON_MUX_MIF_CMU_ISP_CAM 0x0268
+#define CLK_CON_GAT_MIF_MUX_CMU_ISP_ISP 0x026c
+#define CLK_CON_MUX_MIF_CMU_ISP_ISP 0x026c
+#define CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_BUS 0x0270
+#define CLK_CON_MUX_MIF_CMU_DISPAUD_BUS 0x0270
+#define CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_DECON_VCLK 0x0274
+#define CLK_CON_MUX_MIF_CMU_DISPAUD_DECON_VCLK 0x0274
+#define CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_DECON_ECLK 0x0278
+#define CLK_CON_MUX_MIF_CMU_DISPAUD_DECON_ECLK 0x0278
+#define CLK_CON_GAT_MIF_MUX_CMU_MFCMSCL_MSCL 0x027c
+#define CLK_CON_MUX_MIF_CMU_MFCMSCL_MSCL 0x027c
+#define CLK_CON_GAT_MIF_MUX_CMU_MFCMSCL_MFC 0x0280
+#define CLK_CON_MUX_MIF_CMU_MFCMSCL_MFC 0x0280
+#define CLK_CON_GAT_MIF_MUX_CMU_FSYS_BUS 0x0284
+#define CLK_CON_MUX_MIF_CMU_FSYS_BUS 0x0284
+#define CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC0 0x0288
+#define CLK_CON_MUX_MIF_CMU_FSYS_MMC0 0x0288
+#define CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC1 0x028c
+#define CLK_CON_MUX_MIF_CMU_FSYS_MMC1 0x028c
+#define CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC2 0x0290
+#define CLK_CON_MUX_MIF_CMU_FSYS_MMC2 0x0290
+#define CLK_CON_GAT_MIF_MUX_CMU_FSYS_USB20DRD_REFCLK 0x029c
+#define CLK_CON_MUX_MIF_CMU_FSYS_USB20DRD_REFCLK 0x029c
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_BUS 0x02a0
+#define CLK_CON_MUX_MIF_CMU_PERI_BUS 0x02a0
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_UART1 0x02a4
+#define CLK_CON_MUX_MIF_CMU_PERI_UART1 0x02a4
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_UART2 0x02a8
+#define CLK_CON_MUX_MIF_CMU_PERI_UART2 0x02a8
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_UART0 0x02ac
+#define CLK_CON_MUX_MIF_CMU_PERI_UART0 0x02ac
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI2 0x02b0
+#define CLK_CON_MUX_MIF_CMU_PERI_SPI2 0x02b0
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI1 0x02b4
+#define CLK_CON_MUX_MIF_CMU_PERI_SPI1 0x02b4
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI0 0x02b8
+#define CLK_CON_MUX_MIF_CMU_PERI_SPI0 0x02b8
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI3 0x02bc
+#define CLK_CON_MUX_MIF_CMU_PERI_SPI3 0x02bc
+#define CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI4 0x02c0
+#define CLK_CON_MUX_MIF_CMU_PERI_SPI4 0x02c0
+#define CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR0 0x02c4
+#define CLK_CON_MUX_MIF_CMU_ISP_SENSOR0 0x02c4
+#define CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR1 0x02c8
+#define CLK_CON_MUX_MIF_CMU_ISP_SENSOR1 0x02c8
+#define CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR2 0x02cc
+#define CLK_CON_MUX_MIF_CMU_ISP_SENSOR2 0x02cc
+#define CLK_CON_DIV_MIF_BUSD 0x0420
+#define CLK_CON_DIV_MIF_APB 0x0424
+#define CLK_CON_DIV_MIF_HSI2C 0x0430
+#define CLK_CON_DIV_MIF_CMU_G3D_SWITCH 0x0460
+#define CLK_CON_DIV_MIF_CMU_ISP_VRA 0x0464
+#define CLK_CON_DIV_MIF_CMU_ISP_CAM 0x0468
+#define CLK_CON_DIV_MIF_CMU_ISP_ISP 0x046c
+#define CLK_CON_DIV_MIF_CMU_DISPAUD_BUS 0x0470
+#define CLK_CON_DIV_MIF_CMU_DISPAUD_DECON_VCLK 0x0474
+#define CLK_CON_DIV_MIF_CMU_DISPAUD_DECON_ECLK 0x0478
+#define CLK_CON_DIV_MIF_CMU_MFCMSCL_MSCL 0x047c
+#define CLK_CON_DIV_MIF_CMU_MFCMSCL_MFC 0x0480
+#define CLK_CON_DIV_MIF_CMU_FSYS_BUS 0x0484
+#define CLK_CON_DIV_MIF_CMU_FSYS_MMC0 0x0488
+#define CLK_CON_DIV_MIF_CMU_FSYS_MMC1 0x048c
+#define CLK_CON_DIV_MIF_CMU_FSYS_MMC2 0x0490
+#define CLK_CON_DIV_MIF_CMU_FSYS_USB20DRD_REFCLK 0x049c
+#define CLK_CON_DIV_MIF_CMU_PERI_BUS 0x04a0
+#define CLK_CON_DIV_MIF_CMU_PERI_UART1 0x04a4
+#define CLK_CON_DIV_MIF_CMU_PERI_UART2 0x04a8
+#define CLK_CON_DIV_MIF_CMU_PERI_UART0 0x04ac
+#define CLK_CON_DIV_MIF_CMU_PERI_SPI2 0x04b0
+#define CLK_CON_DIV_MIF_CMU_PERI_SPI1 0x04b4
+#define CLK_CON_DIV_MIF_CMU_PERI_SPI0 0x04b8
+#define CLK_CON_DIV_MIF_CMU_PERI_SPI3 0x04bc
+#define CLK_CON_DIV_MIF_CMU_PERI_SPI4 0x04c0
+#define CLK_CON_DIV_MIF_CMU_ISP_SENSOR0 0x04c4
+#define CLK_CON_DIV_MIF_CMU_ISP_SENSOR1 0x04c8
+#define CLK_CON_DIV_MIF_CMU_ISP_SENSOR2 0x04cc
+#define CLK_CON_GAT_MIF_WRAP_ADC_IF_OSC_SYS 0x080c
+#define CLK_CON_GAT_MIF_HSI2C_AP_PCLKS 0x0828
+#define CLK_CON_GAT_MIF_HSI2C_CP_PCLKS 0x0828
+#define CLK_CON_GAT_MIF_WRAP_ADC_IF_PCLK_S0 0x0828
+#define CLK_CON_GAT_MIF_WRAP_ADC_IF_PCLK_S1 0x0828
+#define CLK_CON_GAT_MIF_HSI2C_AP_PCLKM 0x0840
+#define CLK_CON_GAT_MIF_HSI2C_CP_PCLKM 0x0840
+#define CLK_CON_GAT_MIF_HSI2C_IPCLK 0x0840
+#define CLK_CON_GAT_MIF_HSI2C_ITCLK 0x0840
+#define CLK_CON_GAT_MIF_CP_PCLK_HSI2C 0x0840
+#define CLK_CON_GAT_MIF_CP_PCLK_HSI2C_BAT_0 0x0840
+#define CLK_CON_GAT_MIF_CP_PCLK_HSI2C_BAT_1 0x0840
+#define CLK_CON_GAT_MIF_CMU_G3D_SWITCH 0x0860
+#define CLK_CON_GAT_MIF_CMU_ISP_VRA 0x0864
+#define CLK_CON_GAT_MIF_CMU_ISP_CAM 0x0868
+#define CLK_CON_GAT_MIF_CMU_ISP_ISP 0x086c
+#define CLK_CON_GAT_MIF_CMU_DISPAUD_BUS 0x0870
+#define CLK_CON_GAT_MIF_CMU_DISPAUD_DECON_VCLK 0x0874
+#define CLK_CON_GAT_MIF_CMU_DISPAUD_DECON_ECLK 0x0878
+#define CLK_CON_GAT_MIF_CMU_MFCMSCL_MSCL 0x087c
+#define CLK_CON_GAT_MIF_CMU_MFCMSCL_MFC 0x0880
+#define CLK_CON_GAT_MIF_CMU_FSYS_BUS 0x0884
+#define CLK_CON_GAT_MIF_CMU_FSYS_MMC0 0x0888
+#define CLK_CON_GAT_MIF_CMU_FSYS_MMC1 0x088c
+#define CLK_CON_GAT_MIF_CMU_FSYS_MMC2 0x0890
+#define CLK_CON_GAT_MIF_CMU_FSYS_USB20DRD_REFCLK 0x089c
+#define CLK_CON_GAT_MIF_CMU_PERI_BUS 0x08a0
+#define CLK_CON_GAT_MIF_CMU_PERI_UART1 0x08a4
+#define CLK_CON_GAT_MIF_CMU_PERI_UART2 0x08a8
+#define CLK_CON_GAT_MIF_CMU_PERI_UART0 0x08ac
+#define CLK_CON_GAT_MIF_CMU_PERI_SPI2 0x08b0
+#define CLK_CON_GAT_MIF_CMU_PERI_SPI1 0x08b4
+#define CLK_CON_GAT_MIF_CMU_PERI_SPI0 0x08b8
+#define CLK_CON_GAT_MIF_CMU_PERI_SPI3 0x08bc
+#define CLK_CON_GAT_MIF_CMU_PERI_SPI4 0x08c0
+#define CLK_CON_GAT_MIF_CMU_ISP_SENSOR0 0x08c4
+#define CLK_CON_GAT_MIF_CMU_ISP_SENSOR1 0x08c8
+#define CLK_CON_GAT_MIF_CMU_ISP_SENSOR2 0x08cc
+
+static const unsigned long mif_clk_regs[] __initconst = {
+ PLL_LOCKTIME_MIF_MEM_PLL,
+ PLL_LOCKTIME_MIF_MEDIA_PLL,
+ PLL_LOCKTIME_MIF_BUS_PLL,
+ PLL_CON0_MIF_MEM_PLL,
+ PLL_CON0_MIF_MEDIA_PLL,
+ PLL_CON0_MIF_BUS_PLL,
+ CLK_CON_GAT_MIF_MUX_MEM_PLL,
+ CLK_CON_GAT_MIF_MUX_MEM_PLL_CON,
+ CLK_CON_GAT_MIF_MUX_MEDIA_PLL,
+ CLK_CON_GAT_MIF_MUX_MEDIA_PLL_CON,
+ CLK_CON_GAT_MIF_MUX_BUS_PLL,
+ CLK_CON_GAT_MIF_MUX_BUS_PLL_CON,
+ CLK_CON_GAT_MIF_MUX_BUSD,
+ CLK_CON_MUX_MIF_BUSD,
+ CLK_CON_GAT_MIF_MUX_CMU_ISP_VRA,
+ CLK_CON_MUX_MIF_CMU_ISP_VRA,
+ CLK_CON_GAT_MIF_MUX_CMU_ISP_CAM,
+ CLK_CON_MUX_MIF_CMU_ISP_CAM,
+ CLK_CON_GAT_MIF_MUX_CMU_ISP_ISP,
+ CLK_CON_MUX_MIF_CMU_ISP_ISP,
+ CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_BUS,
+ CLK_CON_MUX_MIF_CMU_DISPAUD_BUS,
+ CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_DECON_VCLK,
+ CLK_CON_MUX_MIF_CMU_DISPAUD_DECON_VCLK,
+ CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_DECON_ECLK,
+ CLK_CON_MUX_MIF_CMU_DISPAUD_DECON_ECLK,
+ CLK_CON_GAT_MIF_MUX_CMU_MFCMSCL_MSCL,
+ CLK_CON_MUX_MIF_CMU_MFCMSCL_MSCL,
+ CLK_CON_GAT_MIF_MUX_CMU_MFCMSCL_MFC,
+ CLK_CON_MUX_MIF_CMU_MFCMSCL_MFC,
+ CLK_CON_GAT_MIF_MUX_CMU_FSYS_BUS,
+ CLK_CON_MUX_MIF_CMU_FSYS_BUS,
+ CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC0,
+ CLK_CON_MUX_MIF_CMU_FSYS_MMC0,
+ CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC1,
+ CLK_CON_MUX_MIF_CMU_FSYS_MMC1,
+ CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC2,
+ CLK_CON_MUX_MIF_CMU_FSYS_MMC2,
+ CLK_CON_GAT_MIF_MUX_CMU_FSYS_USB20DRD_REFCLK,
+ CLK_CON_MUX_MIF_CMU_FSYS_USB20DRD_REFCLK,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_BUS,
+ CLK_CON_MUX_MIF_CMU_PERI_BUS,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_UART1,
+ CLK_CON_MUX_MIF_CMU_PERI_UART1,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_UART2,
+ CLK_CON_MUX_MIF_CMU_PERI_UART2,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_UART0,
+ CLK_CON_MUX_MIF_CMU_PERI_UART0,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI2,
+ CLK_CON_MUX_MIF_CMU_PERI_SPI2,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI1,
+ CLK_CON_MUX_MIF_CMU_PERI_SPI1,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI0,
+ CLK_CON_MUX_MIF_CMU_PERI_SPI0,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI3,
+ CLK_CON_MUX_MIF_CMU_PERI_SPI3,
+ CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI4,
+ CLK_CON_MUX_MIF_CMU_PERI_SPI4,
+ CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR0,
+ CLK_CON_MUX_MIF_CMU_ISP_SENSOR0,
+ CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR1,
+ CLK_CON_MUX_MIF_CMU_ISP_SENSOR1,
+ CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR2,
+ CLK_CON_MUX_MIF_CMU_ISP_SENSOR2,
+ CLK_CON_DIV_MIF_BUSD,
+ CLK_CON_DIV_MIF_APB,
+ CLK_CON_DIV_MIF_HSI2C,
+ CLK_CON_DIV_MIF_CMU_G3D_SWITCH,
+ CLK_CON_DIV_MIF_CMU_ISP_VRA,
+ CLK_CON_DIV_MIF_CMU_ISP_CAM,
+ CLK_CON_DIV_MIF_CMU_ISP_ISP,
+ CLK_CON_DIV_MIF_CMU_DISPAUD_BUS,
+ CLK_CON_DIV_MIF_CMU_DISPAUD_DECON_VCLK,
+ CLK_CON_DIV_MIF_CMU_DISPAUD_DECON_ECLK,
+ CLK_CON_DIV_MIF_CMU_MFCMSCL_MSCL,
+ CLK_CON_DIV_MIF_CMU_MFCMSCL_MFC,
+ CLK_CON_DIV_MIF_CMU_FSYS_BUS,
+ CLK_CON_DIV_MIF_CMU_FSYS_MMC0,
+ CLK_CON_DIV_MIF_CMU_FSYS_MMC1,
+ CLK_CON_DIV_MIF_CMU_FSYS_MMC2,
+ CLK_CON_DIV_MIF_CMU_FSYS_USB20DRD_REFCLK,
+ CLK_CON_DIV_MIF_CMU_PERI_BUS,
+ CLK_CON_DIV_MIF_CMU_PERI_UART1,
+ CLK_CON_DIV_MIF_CMU_PERI_UART2,
+ CLK_CON_DIV_MIF_CMU_PERI_UART0,
+ CLK_CON_DIV_MIF_CMU_PERI_SPI2,
+ CLK_CON_DIV_MIF_CMU_PERI_SPI1,
+ CLK_CON_DIV_MIF_CMU_PERI_SPI0,
+ CLK_CON_DIV_MIF_CMU_PERI_SPI3,
+ CLK_CON_DIV_MIF_CMU_PERI_SPI4,
+ CLK_CON_DIV_MIF_CMU_ISP_SENSOR0,
+ CLK_CON_DIV_MIF_CMU_ISP_SENSOR1,
+ CLK_CON_DIV_MIF_CMU_ISP_SENSOR2,
+ CLK_CON_GAT_MIF_WRAP_ADC_IF_OSC_SYS,
+ CLK_CON_GAT_MIF_HSI2C_AP_PCLKS,
+ CLK_CON_GAT_MIF_HSI2C_CP_PCLKS,
+ CLK_CON_GAT_MIF_WRAP_ADC_IF_PCLK_S0,
+ CLK_CON_GAT_MIF_WRAP_ADC_IF_PCLK_S1,
+ CLK_CON_GAT_MIF_HSI2C_AP_PCLKM,
+ CLK_CON_GAT_MIF_HSI2C_CP_PCLKM,
+ CLK_CON_GAT_MIF_HSI2C_IPCLK,
+ CLK_CON_GAT_MIF_HSI2C_ITCLK,
+ CLK_CON_GAT_MIF_CP_PCLK_HSI2C,
+ CLK_CON_GAT_MIF_CP_PCLK_HSI2C_BAT_0,
+ CLK_CON_GAT_MIF_CP_PCLK_HSI2C_BAT_1,
+ CLK_CON_GAT_MIF_CMU_G3D_SWITCH,
+ CLK_CON_GAT_MIF_CMU_ISP_VRA,
+ CLK_CON_GAT_MIF_CMU_ISP_CAM,
+ CLK_CON_GAT_MIF_CMU_ISP_ISP,
+ CLK_CON_GAT_MIF_CMU_DISPAUD_BUS,
+ CLK_CON_GAT_MIF_CMU_DISPAUD_DECON_VCLK,
+ CLK_CON_GAT_MIF_CMU_DISPAUD_DECON_ECLK,
+ CLK_CON_GAT_MIF_CMU_MFCMSCL_MSCL,
+ CLK_CON_GAT_MIF_CMU_MFCMSCL_MFC,
+ CLK_CON_GAT_MIF_CMU_FSYS_BUS,
+ CLK_CON_GAT_MIF_CMU_FSYS_MMC0,
+ CLK_CON_GAT_MIF_CMU_FSYS_MMC1,
+ CLK_CON_GAT_MIF_CMU_FSYS_MMC2,
+ CLK_CON_GAT_MIF_CMU_FSYS_USB20DRD_REFCLK,
+ CLK_CON_GAT_MIF_CMU_PERI_BUS,
+ CLK_CON_GAT_MIF_CMU_PERI_UART1,
+ CLK_CON_GAT_MIF_CMU_PERI_UART2,
+ CLK_CON_GAT_MIF_CMU_PERI_UART0,
+ CLK_CON_GAT_MIF_CMU_PERI_SPI2,
+ CLK_CON_GAT_MIF_CMU_PERI_SPI1,
+ CLK_CON_GAT_MIF_CMU_PERI_SPI0,
+ CLK_CON_GAT_MIF_CMU_PERI_SPI3,
+ CLK_CON_GAT_MIF_CMU_PERI_SPI4,
+ CLK_CON_GAT_MIF_CMU_ISP_SENSOR0,
+ CLK_CON_GAT_MIF_CMU_ISP_SENSOR1,
+ CLK_CON_GAT_MIF_CMU_ISP_SENSOR2,
+};
+
+static const struct samsung_fixed_factor_clock mif_fixed_factor_clks[] __initconst = {
+ FFACTOR(0, "ffac_mif_mux_bus_pll_div2", "gout_mif_mux_bus_pll_con", 1, 2,
+ 0),
+ FFACTOR(0, "ffac_mif_mux_media_pll_div2", "gout_mif_mux_media_pll_con",
+ 1, 2, 0),
+ FFACTOR(0, "ffac_mif_mux_mem_pll_div2", "gout_mif_mux_mem_pll_con", 1, 2,
+ 0),
+};
+
+static const struct samsung_pll_clock mif_pll_clks[] __initconst = {
+ PLL(pll_1417x, CLK_FOUT_MIF_BUS_PLL, "fout_mif_bus_pll", "oscclk",
+ PLL_LOCKTIME_MIF_BUS_PLL, PLL_CON0_MIF_BUS_PLL, NULL),
+ PLL(pll_1417x, CLK_FOUT_MIF_MEDIA_PLL, "fout_mif_media_pll", "oscclk",
+ PLL_LOCKTIME_MIF_MEDIA_PLL, PLL_CON0_MIF_MEDIA_PLL, NULL),
+ PLL(pll_1417x, CLK_FOUT_MIF_MEM_PLL, "fout_mif_mem_pll", "oscclk",
+ PLL_LOCKTIME_MIF_MEM_PLL, PLL_CON0_MIF_MEM_PLL, NULL),
+};
+
+/* List of parent clocks for muxes in CMU_MIF */
+PNAME(mout_mif_cmu_dispaud_bus_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_dispaud_decon_eclk_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_dispaud_decon_vclk_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_fsys_bus_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_fsys_mmc0_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_fsys_mmc1_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_fsys_mmc2_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_fsys_usb20drd_refclk_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_isp_cam_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_isp_isp_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_isp_sensor0_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_isp_sensor1_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_isp_sensor2_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_isp_vra_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2",
+ "gout_mif_mux_bus_pll_con" };
+PNAME(mout_mif_cmu_mfcmscl_mfc_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2",
+ "gout_mif_mux_bus_pll_con" };
+PNAME(mout_mif_cmu_mfcmscl_mscl_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2",
+ "gout_mif_mux_bus_pll_con" };
+PNAME(mout_mif_cmu_peri_bus_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_peri_spi0_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_peri_spi2_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_peri_spi1_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_peri_spi4_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_peri_spi3_p) = { "ffac_mif_mux_bus_pll_div2",
+ "oscclk" };
+PNAME(mout_mif_cmu_peri_uart1_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_peri_uart2_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_cmu_peri_uart0_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2" };
+PNAME(mout_mif_busd_p) = { "ffac_mif_mux_bus_pll_div2",
+ "ffac_mif_mux_media_pll_div2",
+ "ffac_mif_mux_mem_pll_div2" };
+
+static const struct samsung_mux_clock mif_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_MIF_CMU_DISPAUD_BUS, "mout_mif_cmu_dispaud_bus",
+ mout_mif_cmu_dispaud_bus_p, CLK_CON_MUX_MIF_CMU_DISPAUD_BUS, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_DISPAUD_DECON_ECLK,
+ "mout_mif_cmu_dispaud_decon_eclk",
+ mout_mif_cmu_dispaud_decon_eclk_p,
+ CLK_CON_MUX_MIF_CMU_DISPAUD_DECON_ECLK, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_DISPAUD_DECON_VCLK,
+ "mout_mif_cmu_dispaud_decon_vclk",
+ mout_mif_cmu_dispaud_decon_vclk_p,
+ CLK_CON_MUX_MIF_CMU_DISPAUD_DECON_VCLK, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_FSYS_BUS, "mout_mif_cmu_fsys_bus",
+ mout_mif_cmu_fsys_bus_p, CLK_CON_MUX_MIF_CMU_FSYS_BUS, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_FSYS_MMC0, "mout_mif_cmu_fsys_mmc0",
+ mout_mif_cmu_fsys_mmc0_p, CLK_CON_MUX_MIF_CMU_FSYS_MMC0, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_FSYS_MMC1, "mout_mif_cmu_fsys_mmc1",
+ mout_mif_cmu_fsys_mmc1_p, CLK_CON_MUX_MIF_CMU_FSYS_MMC1, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_FSYS_MMC2, "mout_mif_cmu_fsys_mmc2",
+ mout_mif_cmu_fsys_mmc2_p, CLK_CON_MUX_MIF_CMU_FSYS_MMC2, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_FSYS_USB20DRD_REFCLK,
+ "mout_mif_cmu_fsys_usb20drd_refclk",
+ mout_mif_cmu_fsys_usb20drd_refclk_p,
+ CLK_CON_MUX_MIF_CMU_FSYS_USB20DRD_REFCLK, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_ISP_CAM, "mout_mif_cmu_isp_cam",
+ mout_mif_cmu_isp_cam_p, CLK_CON_MUX_MIF_CMU_ISP_CAM, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_ISP_ISP, "mout_mif_cmu_isp_isp",
+ mout_mif_cmu_isp_isp_p, CLK_CON_MUX_MIF_CMU_ISP_ISP, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_ISP_SENSOR0, "mout_mif_cmu_isp_sensor0",
+ mout_mif_cmu_isp_sensor0_p, CLK_CON_MUX_MIF_CMU_ISP_SENSOR0, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_ISP_SENSOR1, "mout_mif_cmu_isp_sensor1",
+ mout_mif_cmu_isp_sensor1_p, CLK_CON_MUX_MIF_CMU_ISP_SENSOR1, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_ISP_SENSOR2, "mout_mif_cmu_isp_sensor2",
+ mout_mif_cmu_isp_sensor2_p, CLK_CON_MUX_MIF_CMU_ISP_SENSOR2, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_ISP_VRA, "mout_mif_cmu_isp_vra",
+ mout_mif_cmu_isp_vra_p, CLK_CON_MUX_MIF_CMU_ISP_VRA, 12, 2),
+ MUX(CLK_MOUT_MIF_CMU_MFCMSCL_MFC, "mout_mif_cmu_mfcmscl_mfc",
+ mout_mif_cmu_mfcmscl_mfc_p, CLK_CON_MUX_MIF_CMU_MFCMSCL_MFC, 12, 2),
+ MUX(CLK_MOUT_MIF_CMU_MFCMSCL_MSCL, "mout_mif_cmu_mfcmscl_mscl",
+ mout_mif_cmu_mfcmscl_mscl_p, CLK_CON_MUX_MIF_CMU_MFCMSCL_MSCL, 12,
+ 2),
+ MUX(CLK_MOUT_MIF_CMU_PERI_BUS, "mout_mif_cmu_peri_bus",
+ mout_mif_cmu_peri_bus_p, CLK_CON_MUX_MIF_CMU_PERI_BUS, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_SPI0, "mout_mif_cmu_peri_spi0",
+ mout_mif_cmu_peri_spi0_p, CLK_CON_MUX_MIF_CMU_PERI_SPI0, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_SPI2, "mout_mif_cmu_peri_spi2",
+ mout_mif_cmu_peri_spi2_p, CLK_CON_MUX_MIF_CMU_PERI_SPI2, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_SPI1, "mout_mif_cmu_peri_spi1",
+ mout_mif_cmu_peri_spi1_p, CLK_CON_MUX_MIF_CMU_PERI_SPI1, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_SPI4, "mout_mif_cmu_peri_spi4",
+ mout_mif_cmu_peri_spi4_p, CLK_CON_MUX_MIF_CMU_PERI_SPI4, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_SPI3, "mout_mif_cmu_peri_spi3",
+ mout_mif_cmu_peri_spi3_p, CLK_CON_MUX_MIF_CMU_PERI_SPI3, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_UART1, "mout_mif_cmu_peri_uart1",
+ mout_mif_cmu_peri_uart1_p, CLK_CON_MUX_MIF_CMU_PERI_UART1, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_UART2, "mout_mif_cmu_peri_uart2",
+ mout_mif_cmu_peri_uart2_p, CLK_CON_MUX_MIF_CMU_PERI_UART2, 12, 1),
+ MUX(CLK_MOUT_MIF_CMU_PERI_UART0, "mout_mif_cmu_peri_uart0",
+ mout_mif_cmu_peri_uart0_p, CLK_CON_MUX_MIF_CMU_PERI_UART0, 12, 1),
+ MUX(CLK_MOUT_MIF_BUSD, "mout_mif_busd", mout_mif_busd_p,
+ CLK_CON_MUX_MIF_BUSD, 12, 2),
+};
+
+static const struct samsung_div_clock mif_div_clks[] __initconst = {
+ DIV(CLK_DOUT_MIF_CMU_DISPAUD_BUS, "dout_mif_cmu_dispaud_bus",
+ "gout_mif_mux_cmu_dispaud_bus", CLK_CON_DIV_MIF_CMU_DISPAUD_BUS, 0,
+ 4),
+ DIV(CLK_DOUT_MIF_CMU_DISPAUD_DECON_ECLK,
+ "dout_mif_cmu_dispaud_decon_eclk",
+ "gout_mif_mux_cmu_dispaud_decon_eclk",
+ CLK_CON_DIV_MIF_CMU_DISPAUD_DECON_ECLK, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_DISPAUD_DECON_VCLK,
+ "dout_mif_cmu_dispaud_decon_vclk",
+ "gout_mif_mux_cmu_dispaud_decon_vclk",
+ CLK_CON_DIV_MIF_CMU_DISPAUD_DECON_VCLK, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_FSYS_BUS, "dout_mif_cmu_fsys_bus",
+ "gout_mif_mux_cmu_fsys_bus", CLK_CON_DIV_MIF_CMU_FSYS_BUS, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_FSYS_MMC0, "dout_mif_cmu_fsys_mmc0",
+ "gout_mif_mux_cmu_fsys_mmc0", CLK_CON_DIV_MIF_CMU_FSYS_MMC0, 0, 10),
+ DIV(CLK_DOUT_MIF_CMU_FSYS_MMC1, "dout_mif_cmu_fsys_mmc1",
+ "gout_mif_mux_cmu_fsys_mmc1", CLK_CON_DIV_MIF_CMU_FSYS_MMC1, 0, 10),
+ DIV(CLK_DOUT_MIF_CMU_FSYS_MMC2, "dout_mif_cmu_fsys_mmc2",
+ "gout_mif_mux_cmu_fsys_mmc2", CLK_CON_DIV_MIF_CMU_FSYS_MMC2, 0, 10),
+ DIV(CLK_DOUT_MIF_CMU_FSYS_USB20DRD_REFCLK,
+ "dout_mif_cmu_fsys_usb20drd_refclk",
+ "gout_mif_mux_cmu_fsys_usb20drd_refclk",
+ CLK_CON_DIV_MIF_CMU_FSYS_USB20DRD_REFCLK, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_G3D_SWITCH, "dout_mif_cmu_g3d_switch",
+ "ffac_mif_mux_bus_pll_div2", CLK_CON_DIV_MIF_CMU_G3D_SWITCH, 0, 2),
+ DIV(CLK_DOUT_MIF_CMU_ISP_CAM, "dout_mif_cmu_isp_cam",
+ "gout_mif_mux_cmu_isp_cam", CLK_CON_DIV_MIF_CMU_ISP_CAM, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_ISP_ISP, "dout_mif_cmu_isp_isp",
+ "gout_mif_mux_cmu_isp_isp", CLK_CON_DIV_MIF_CMU_ISP_ISP, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_ISP_SENSOR0, "dout_mif_cmu_isp_sensor0",
+ "gout_mif_mux_cmu_isp_sensor0", CLK_CON_DIV_MIF_CMU_ISP_SENSOR0, 0,
+ 6),
+ DIV(CLK_DOUT_MIF_CMU_ISP_SENSOR1, "dout_mif_cmu_isp_sensor1",
+ "gout_mif_mux_cmu_isp_sensor1", CLK_CON_DIV_MIF_CMU_ISP_SENSOR1, 0,
+ 6),
+ DIV(CLK_DOUT_MIF_CMU_ISP_SENSOR2, "dout_mif_cmu_isp_sensor2",
+ "gout_mif_mux_cmu_isp_sensor2", CLK_CON_DIV_MIF_CMU_ISP_SENSOR2, 0,
+ 6),
+ DIV(CLK_DOUT_MIF_CMU_ISP_VRA, "dout_mif_cmu_isp_vra",
+ "gout_mif_mux_cmu_isp_vra", CLK_CON_DIV_MIF_CMU_ISP_VRA, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_MFCMSCL_MFC, "dout_mif_cmu_mfcmscl_mfc",
+ "gout_mif_mux_cmu_mfcmscl_mfc", CLK_CON_DIV_MIF_CMU_MFCMSCL_MFC, 0,
+ 4),
+ DIV(CLK_DOUT_MIF_CMU_MFCMSCL_MSCL, "dout_mif_cmu_mfcmscl_mscl",
+ "gout_mif_mux_cmu_mfcmscl_mscl", CLK_CON_DIV_MIF_CMU_MFCMSCL_MSCL,
+ 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_PERI_BUS, "dout_mif_cmu_peri_bus",
+ "gout_mif_mux_cmu_peri_bus", CLK_CON_DIV_MIF_CMU_PERI_BUS, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_PERI_SPI0, "dout_mif_cmu_peri_spi0",
+ "gout_mif_mux_cmu_peri_spi0", CLK_CON_DIV_MIF_CMU_PERI_SPI0, 0, 6),
+ DIV(CLK_DOUT_MIF_CMU_PERI_SPI2, "dout_mif_cmu_peri_spi2",
+ "gout_mif_mux_cmu_peri_spi2", CLK_CON_DIV_MIF_CMU_PERI_SPI2, 0, 6),
+ DIV(CLK_DOUT_MIF_CMU_PERI_SPI1, "dout_mif_cmu_peri_spi1",
+ "gout_mif_mux_cmu_peri_spi1", CLK_CON_DIV_MIF_CMU_PERI_SPI1, 0, 6),
+ DIV(CLK_DOUT_MIF_CMU_PERI_SPI4, "dout_mif_cmu_peri_spi4",
+ "gout_mif_mux_cmu_peri_spi4", CLK_CON_DIV_MIF_CMU_PERI_SPI4, 0, 6),
+ DIV(CLK_DOUT_MIF_CMU_PERI_SPI3, "dout_mif_cmu_peri_spi3",
+ "gout_mif_mux_cmu_peri_spi3", CLK_CON_DIV_MIF_CMU_PERI_SPI3, 0, 6),
+ DIV(CLK_DOUT_MIF_CMU_PERI_UART1, "dout_mif_cmu_peri_uart1",
+ "gout_mif_mux_cmu_peri_uart1", CLK_CON_DIV_MIF_CMU_PERI_UART1, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_PERI_UART2, "dout_mif_cmu_peri_uart2",
+ "gout_mif_mux_cmu_peri_uart2", CLK_CON_DIV_MIF_CMU_PERI_UART2, 0, 4),
+ DIV(CLK_DOUT_MIF_CMU_PERI_UART0, "dout_mif_cmu_peri_uart0",
+ "gout_mif_mux_cmu_peri_uart0", CLK_CON_DIV_MIF_CMU_PERI_UART0, 0, 4),
+ DIV(CLK_DOUT_MIF_APB, "dout_mif_apb", "dout_mif_busd",
+ CLK_CON_DIV_MIF_APB, 0, 2),
+ DIV(CLK_DOUT_MIF_BUSD, "dout_mif_busd", "gout_mif_mux_busd",
+ CLK_CON_DIV_MIF_BUSD, 0, 4),
+ DIV(CLK_DOUT_MIF_HSI2C, "dout_mif_hsi2c", "ffac_mif_mux_media_pll_div2",
+ CLK_CON_DIV_MIF_HSI2C, 0, 4),
+};
+
+static const struct samsung_gate_clock mif_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_MIF_CMU_DISPAUD_BUS, "gout_mif_cmu_dispaud_bus",
+ "dout_mif_cmu_dispaud_bus", CLK_CON_GAT_MIF_CMU_DISPAUD_BUS, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_DISPAUD_DECON_ECLK,
+ "gout_mif_cmu_dispaud_decon_eclk",
+ "dout_mif_cmu_dispaud_decon_eclk",
+ CLK_CON_GAT_MIF_CMU_DISPAUD_DECON_ECLK, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_DISPAUD_DECON_VCLK,
+ "gout_mif_cmu_dispaud_decon_vclk",
+ "dout_mif_cmu_dispaud_decon_vclk",
+ CLK_CON_GAT_MIF_CMU_DISPAUD_DECON_VCLK, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_FSYS_BUS, "gout_mif_cmu_fsys_bus",
+ "dout_mif_cmu_fsys_bus", CLK_CON_GAT_MIF_CMU_FSYS_BUS, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_FSYS_MMC0, "gout_mif_cmu_fsys_mmc0",
+ "dout_mif_cmu_fsys_mmc0", CLK_CON_GAT_MIF_CMU_FSYS_MMC0, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_FSYS_MMC1, "gout_mif_cmu_fsys_mmc1",
+ "dout_mif_cmu_fsys_mmc1", CLK_CON_GAT_MIF_CMU_FSYS_MMC1, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_FSYS_MMC2, "gout_mif_cmu_fsys_mmc2",
+ "dout_mif_cmu_fsys_mmc2", CLK_CON_GAT_MIF_CMU_FSYS_MMC2, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_FSYS_USB20DRD_REFCLK,
+ "gout_mif_cmu_fsys_usb20drd_refclk",
+ "dout_mif_cmu_fsys_usb20drd_refclk",
+ CLK_CON_GAT_MIF_CMU_FSYS_USB20DRD_REFCLK, 0, CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_MIF_CMU_G3D_SWITCH, "gout_mif_cmu_g3d_switch",
+ "dout_mif_cmu_g3d_switch", CLK_CON_GAT_MIF_CMU_G3D_SWITCH, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_ISP_CAM, "gout_mif_cmu_isp_cam",
+ "dout_mif_cmu_isp_cam", CLK_CON_GAT_MIF_CMU_ISP_CAM, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_ISP_ISP, "gout_mif_cmu_isp_isp",
+ "dout_mif_cmu_isp_isp", CLK_CON_GAT_MIF_CMU_ISP_ISP, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_ISP_SENSOR0, "gout_mif_cmu_isp_sensor0",
+ "dout_mif_cmu_isp_sensor0", CLK_CON_GAT_MIF_CMU_ISP_SENSOR0, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_ISP_SENSOR1, "gout_mif_cmu_isp_sensor1",
+ "dout_mif_cmu_isp_sensor1", CLK_CON_GAT_MIF_CMU_ISP_SENSOR1, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_ISP_SENSOR2, "gout_mif_cmu_isp_sensor2",
+ "dout_mif_cmu_isp_sensor2", CLK_CON_GAT_MIF_CMU_ISP_SENSOR2, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_ISP_VRA, "gout_mif_cmu_isp_vra",
+ "dout_mif_cmu_isp_vra", CLK_CON_GAT_MIF_CMU_ISP_VRA, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_MFCMSCL_MFC, "gout_mif_cmu_mfcmscl_mfc",
+ "dout_mif_cmu_mfcmscl_mfc", CLK_CON_GAT_MIF_CMU_MFCMSCL_MFC, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_MFCMSCL_MSCL, "gout_mif_cmu_mfcmscl_mscl",
+ "dout_mif_cmu_mfcmscl_mscl", CLK_CON_GAT_MIF_CMU_MFCMSCL_MSCL, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_BUS, "gout_mif_cmu_peri_bus",
+ "dout_mif_cmu_peri_bus", CLK_CON_GAT_MIF_CMU_PERI_BUS, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_SPI0, "gout_mif_cmu_peri_spi0",
+ "dout_mif_cmu_peri_spi0", CLK_CON_GAT_MIF_CMU_PERI_SPI0, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_SPI2, "gout_mif_cmu_peri_spi2",
+ "dout_mif_cmu_peri_spi2", CLK_CON_GAT_MIF_CMU_PERI_SPI2, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_SPI1, "gout_mif_cmu_peri_spi1",
+ "dout_mif_cmu_peri_spi1", CLK_CON_GAT_MIF_CMU_PERI_SPI1, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_SPI4, "gout_mif_cmu_peri_spi4",
+ "dout_mif_cmu_peri_spi4", CLK_CON_GAT_MIF_CMU_PERI_SPI4, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_SPI3, "gout_mif_cmu_peri_spi3",
+ "dout_mif_cmu_peri_spi3", CLK_CON_GAT_MIF_CMU_PERI_SPI3, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_UART1, "gout_mif_cmu_peri_uart1",
+ "dout_mif_cmu_peri_uart1", CLK_CON_GAT_MIF_CMU_PERI_UART1, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_UART2, "gout_mif_cmu_peri_uart2",
+ "dout_mif_cmu_peri_uart2", CLK_CON_GAT_MIF_CMU_PERI_UART2, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CMU_PERI_UART0, "gout_mif_cmu_peri_uart0",
+ "dout_mif_cmu_peri_uart0", CLK_CON_GAT_MIF_CMU_PERI_UART0, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_HSI2C_AP_PCLKM, "gout_mif_hsi2c_ap_pclkm",
+ "dout_mif_hsi2c", CLK_CON_GAT_MIF_HSI2C_AP_PCLKM, 0,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_HSI2C_AP_PCLKS, "gout_mif_hsi2c_ap_pclks",
+ "dout_mif_apb", CLK_CON_GAT_MIF_HSI2C_AP_PCLKS, 14, CLK_IS_CRITICAL
+ | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_HSI2C_CP_PCLKM, "gout_mif_hsi2c_cp_pclkm",
+ "dout_mif_hsi2c", CLK_CON_GAT_MIF_HSI2C_CP_PCLKM, 1,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_HSI2C_CP_PCLKS, "gout_mif_hsi2c_cp_pclks",
+ "dout_mif_apb", CLK_CON_GAT_MIF_HSI2C_CP_PCLKS, 15, CLK_IS_CRITICAL
+ | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_HSI2C_IPCLK, "gout_mif_hsi2c_ipclk", "dout_mif_hsi2c",
+ CLK_CON_GAT_MIF_HSI2C_IPCLK, 2, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_HSI2C_ITCLK, "gout_mif_hsi2c_itclk", "dout_mif_hsi2c",
+ CLK_CON_GAT_MIF_HSI2C_ITCLK, 3, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CP_PCLK_HSI2C, "gout_mif_cp_pclk_hsi2c",
+ "dout_mif_hsi2c", CLK_CON_GAT_MIF_CP_PCLK_HSI2C, 6, CLK_IS_CRITICAL
+ | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CP_PCLK_HSI2C_BAT_0, "gout_mif_cp_pclk_hsi2c_bat_0",
+ "dout_mif_hsi2c", CLK_CON_GAT_MIF_CP_PCLK_HSI2C_BAT_0, 4,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_CP_PCLK_HSI2C_BAT_1, "gout_mif_cp_pclk_hsi2c_bat_1",
+ "dout_mif_hsi2c", CLK_CON_GAT_MIF_CP_PCLK_HSI2C_BAT_1, 5,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_WRAP_ADC_IF_OSC_SYS, "gout_mif_wrap_adc_if_osc_sys",
+ "oscclk", CLK_CON_GAT_MIF_WRAP_ADC_IF_OSC_SYS, 3,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_WRAP_ADC_IF_PCLK_S0, "gout_mif_wrap_adc_if_pclk_s0",
+ "dout_mif_apb", CLK_CON_GAT_MIF_WRAP_ADC_IF_PCLK_S0, 20,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_WRAP_ADC_IF_PCLK_S1, "gout_mif_wrap_adc_if_pclk_s1",
+ "dout_mif_apb", CLK_CON_GAT_MIF_WRAP_ADC_IF_PCLK_S1, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_BUS_PLL, "gout_mif_mux_bus_pll",
+ "gout_mif_mux_bus_pll_con", CLK_CON_GAT_MIF_MUX_BUS_PLL, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_BUS_PLL_CON, "gout_mif_mux_bus_pll_con",
+ "fout_mif_bus_pll", CLK_CON_GAT_MIF_MUX_BUS_PLL_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_DISPAUD_BUS, "gout_mif_mux_cmu_dispaud_bus",
+ "mout_mif_cmu_dispaud_bus", CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_BUS,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_DISPAUD_DECON_ECLK,
+ "gout_mif_mux_cmu_dispaud_decon_eclk",
+ "mout_mif_cmu_dispaud_decon_eclk",
+ CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_DECON_ECLK, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_DISPAUD_DECON_VCLK,
+ "gout_mif_mux_cmu_dispaud_decon_vclk",
+ "mout_mif_cmu_dispaud_decon_vclk",
+ CLK_CON_GAT_MIF_MUX_CMU_DISPAUD_DECON_VCLK, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_FSYS_BUS, "gout_mif_mux_cmu_fsys_bus",
+ "mout_mif_cmu_fsys_bus", CLK_CON_GAT_MIF_MUX_CMU_FSYS_BUS, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_FSYS_MMC0, "gout_mif_mux_cmu_fsys_mmc0",
+ "mout_mif_cmu_fsys_mmc0", CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC0, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_FSYS_MMC1, "gout_mif_mux_cmu_fsys_mmc1",
+ "mout_mif_cmu_fsys_mmc1", CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC1, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_FSYS_MMC2, "gout_mif_mux_cmu_fsys_mmc2",
+ "mout_mif_cmu_fsys_mmc2", CLK_CON_GAT_MIF_MUX_CMU_FSYS_MMC2, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_FSYS_USB20DRD_REFCLK,
+ "gout_mif_mux_cmu_fsys_usb20drd_refclk",
+ "mout_mif_cmu_fsys_usb20drd_refclk",
+ CLK_CON_GAT_MIF_MUX_CMU_FSYS_USB20DRD_REFCLK, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_ISP_CAM, "gout_mif_mux_cmu_isp_cam",
+ "mout_mif_cmu_isp_cam", CLK_CON_GAT_MIF_MUX_CMU_ISP_CAM, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_ISP_ISP, "gout_mif_mux_cmu_isp_isp",
+ "mout_mif_cmu_isp_isp", CLK_CON_GAT_MIF_MUX_CMU_ISP_ISP, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_ISP_SENSOR0, "gout_mif_mux_cmu_isp_sensor0",
+ "mout_mif_cmu_isp_sensor0", CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR0,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_ISP_SENSOR1, "gout_mif_mux_cmu_isp_sensor1",
+ "mout_mif_cmu_isp_sensor1", CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR1,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_ISP_SENSOR2, "gout_mif_mux_cmu_isp_sensor2",
+ "mout_mif_cmu_isp_sensor2", CLK_CON_GAT_MIF_MUX_CMU_ISP_SENSOR2,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_ISP_VRA, "gout_mif_mux_cmu_isp_vra",
+ "mout_mif_cmu_isp_vra", CLK_CON_GAT_MIF_MUX_CMU_ISP_VRA, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_MFCMSCL_MFC, "gout_mif_mux_cmu_mfcmscl_mfc",
+ "mout_mif_cmu_mfcmscl_mfc", CLK_CON_GAT_MIF_MUX_CMU_MFCMSCL_MFC,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_MFCMSCL_MSCL, "gout_mif_mux_cmu_mfcmscl_mscl",
+ "mout_mif_cmu_mfcmscl_mscl", CLK_CON_GAT_MIF_MUX_CMU_MFCMSCL_MSCL,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_BUS, "gout_mif_mux_cmu_peri_bus",
+ "mout_mif_cmu_peri_bus", CLK_CON_GAT_MIF_MUX_CMU_PERI_BUS, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_SPI0, "gout_mif_mux_cmu_peri_spi0",
+ "mout_mif_cmu_peri_spi0", CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI0, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_SPI2, "gout_mif_mux_cmu_peri_spi2",
+ "mout_mif_cmu_peri_spi2", CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI2, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_SPI1, "gout_mif_mux_cmu_peri_spi1",
+ "mout_mif_cmu_peri_spi1", CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI1, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_SPI4, "gout_mif_mux_cmu_peri_spi4",
+ "mout_mif_cmu_peri_spi4", CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI4, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_SPI3, "gout_mif_mux_cmu_peri_spi3",
+ "mout_mif_cmu_peri_spi3", CLK_CON_GAT_MIF_MUX_CMU_PERI_SPI3, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_UART1, "gout_mif_mux_cmu_peri_uart1",
+ "mout_mif_cmu_peri_uart1", CLK_CON_GAT_MIF_MUX_CMU_PERI_UART1, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_UART2, "gout_mif_mux_cmu_peri_uart2",
+ "mout_mif_cmu_peri_uart2", CLK_CON_GAT_MIF_MUX_CMU_PERI_UART2, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_CMU_PERI_UART0, "gout_mif_mux_cmu_peri_uart0",
+ "mout_mif_cmu_peri_uart0", CLK_CON_GAT_MIF_MUX_CMU_PERI_UART0, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_BUSD, "gout_mif_mux_busd", "mout_mif_busd",
+ CLK_CON_GAT_MIF_MUX_BUSD, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_MEDIA_PLL, "gout_mif_mux_media_pll",
+ "gout_mif_mux_media_pll_con", CLK_CON_GAT_MIF_MUX_MEDIA_PLL, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_MEDIA_PLL_CON, "gout_mif_mux_media_pll_con",
+ "fout_mif_media_pll", CLK_CON_GAT_MIF_MUX_MEDIA_PLL_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_MEM_PLL, "gout_mif_mux_mem_pll",
+ "gout_mif_mux_mem_pll_con", CLK_CON_GAT_MIF_MUX_MEM_PLL, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MIF_MUX_MEM_PLL_CON, "gout_mif_mux_mem_pll_con",
+ "fout_mif_mem_pll", CLK_CON_GAT_MIF_MUX_MEM_PLL_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info mif_cmu_info __initconst = {
+ .fixed_factor_clks = mif_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(mif_fixed_factor_clks),
+ .pll_clks = mif_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(mif_pll_clks),
+ .mux_clks = mif_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mif_mux_clks),
+ .div_clks = mif_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mif_div_clks),
+ .gate_clks = mif_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(mif_gate_clks),
+ .clk_regs = mif_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mif_clk_regs),
+ .nr_clk_ids = MIF_NR_CLK,
+};
+
+/*
+ * Register offsets for CMU_DISPAUD (0x148d0000)
+ */
+#define PLL_LOCKTIME_DISPAUD_PLL 0x0000
+#define PLL_LOCKTIME_DISPAUD_AUD_PLL 0x00c0
+#define PLL_CON0_DISPAUD_PLL 0x0100
+#define PLL_CON0_DISPAUD_AUD_PLL 0x01c0
+#define CLK_CON_GAT_DISPAUD_MUX_PLL 0x0200
+#define CLK_CON_GAT_DISPAUD_MUX_PLL_CON 0x0200
+#define CLK_CON_GAT_DISPAUD_MUX_AUD_PLL 0x0204
+#define CLK_CON_GAT_DISPAUD_MUX_AUD_PLL_CON 0x0204
+#define CLK_CON_GAT_DISPAUD_MUX_BUS_USER 0x0210
+#define CLK_CON_MUX_DISPAUD_BUS_USER 0x0210
+#define CLK_CON_GAT_DISPAUD_MUX_DECON_VCLK_USER 0x0214
+#define CLK_CON_MUX_DISPAUD_DECON_VCLK_USER 0x0214
+#define CLK_CON_GAT_DISPAUD_MUX_DECON_ECLK_USER 0x0218
+#define CLK_CON_MUX_DISPAUD_DECON_ECLK_USER 0x0218
+#define CLK_CON_GAT_DISPAUD_MUX_DECON_VCLK 0x021c
+#define CLK_CON_MUX_DISPAUD_DECON_VCLK 0x021c
+#define CLK_CON_GAT_DISPAUD_MUX_DECON_ECLK 0x0220
+#define CLK_CON_MUX_DISPAUD_DECON_ECLK 0x0220
+#define CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER 0x0224
+#define CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER_CON 0x0224
+#define CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER 0x0228
+#define CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER_CON 0x0228
+#define CLK_CON_GAT_DISPAUD_MUX_MI2S 0x022c
+#define CLK_CON_MUX_DISPAUD_MI2S 0x022c
+#define CLK_CON_DIV_DISPAUD_APB 0x0400
+#define CLK_CON_DIV_DISPAUD_DECON_VCLK 0x0404
+#define CLK_CON_DIV_DISPAUD_DECON_ECLK 0x0408
+#define CLK_CON_DIV_DISPAUD_MI2S 0x040c
+#define CLK_CON_DIV_DISPAUD_MIXER 0x0410
+#define CLK_CON_GAT_DISPAUD_BUS 0x0810
+#define CLK_CON_GAT_DISPAUD_BUS_DISP 0x0810
+#define CLK_CON_GAT_DISPAUD_BUS_PPMU 0x0810
+#define CLK_CON_GAT_DISPAUD_APB_AUD 0x0814
+#define CLK_CON_GAT_DISPAUD_APB_AUD_AMP 0x0814
+#define CLK_CON_GAT_DISPAUD_APB_DISP 0x0814
+#define CLK_CON_GAT_DISPAUD_DECON_VCLK 0x081c
+#define CLK_CON_GAT_DISPAUD_DECON_ECLK 0x0820
+#define CLK_CON_GAT_DISPAUD_MI2S_AMP_I2SCODCLKI 0x082c
+#define CLK_CON_GAT_DISPAUD_MI2S_AUD_I2SCODCLKI 0x082c
+#define CLK_CON_GAT_DISPAUD_MIXER_AUD_SYSCLK 0x0830
+#define CLK_CON_GAT_DISPAUD_CON_EXT2AUD_BCK_GPIO_I2S 0x0834
+#define CLK_CON_GAT_DISPAUD_CON_AUD_I2S_BCLK_BT_IN 0x0838
+#define CLK_CON_GAT_DISPAUD_CON_CP2AUD_BCK 0x083c
+#define CLK_CON_GAT_DISPAUD_CON_AUD_I2S_BCLK_FM_IN 0x0840
+
+static const unsigned long dispaud_clk_regs[] __initconst = {
+ PLL_LOCKTIME_DISPAUD_PLL,
+ PLL_LOCKTIME_DISPAUD_AUD_PLL,
+ PLL_CON0_DISPAUD_PLL,
+ PLL_CON0_DISPAUD_AUD_PLL,
+ CLK_CON_GAT_DISPAUD_MUX_PLL,
+ CLK_CON_GAT_DISPAUD_MUX_PLL_CON,
+ CLK_CON_GAT_DISPAUD_MUX_AUD_PLL,
+ CLK_CON_GAT_DISPAUD_MUX_AUD_PLL_CON,
+ CLK_CON_GAT_DISPAUD_MUX_BUS_USER,
+ CLK_CON_MUX_DISPAUD_BUS_USER,
+ CLK_CON_GAT_DISPAUD_MUX_DECON_VCLK_USER,
+ CLK_CON_MUX_DISPAUD_DECON_VCLK_USER,
+ CLK_CON_GAT_DISPAUD_MUX_DECON_ECLK_USER,
+ CLK_CON_MUX_DISPAUD_DECON_ECLK_USER,
+ CLK_CON_GAT_DISPAUD_MUX_DECON_VCLK,
+ CLK_CON_MUX_DISPAUD_DECON_VCLK,
+ CLK_CON_GAT_DISPAUD_MUX_DECON_ECLK,
+ CLK_CON_MUX_DISPAUD_DECON_ECLK,
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER,
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER_CON,
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER,
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER_CON,
+ CLK_CON_GAT_DISPAUD_MUX_MI2S,
+ CLK_CON_MUX_DISPAUD_MI2S,
+ CLK_CON_DIV_DISPAUD_APB,
+ CLK_CON_DIV_DISPAUD_DECON_VCLK,
+ CLK_CON_DIV_DISPAUD_DECON_ECLK,
+ CLK_CON_DIV_DISPAUD_MI2S,
+ CLK_CON_DIV_DISPAUD_MIXER,
+ CLK_CON_GAT_DISPAUD_BUS,
+ CLK_CON_GAT_DISPAUD_BUS_DISP,
+ CLK_CON_GAT_DISPAUD_BUS_PPMU,
+ CLK_CON_GAT_DISPAUD_APB_AUD,
+ CLK_CON_GAT_DISPAUD_APB_AUD_AMP,
+ CLK_CON_GAT_DISPAUD_APB_DISP,
+ CLK_CON_GAT_DISPAUD_DECON_VCLK,
+ CLK_CON_GAT_DISPAUD_DECON_ECLK,
+ CLK_CON_GAT_DISPAUD_MI2S_AMP_I2SCODCLKI,
+ CLK_CON_GAT_DISPAUD_MI2S_AUD_I2SCODCLKI,
+ CLK_CON_GAT_DISPAUD_MIXER_AUD_SYSCLK,
+ CLK_CON_GAT_DISPAUD_CON_EXT2AUD_BCK_GPIO_I2S,
+ CLK_CON_GAT_DISPAUD_CON_AUD_I2S_BCLK_BT_IN,
+ CLK_CON_GAT_DISPAUD_CON_CP2AUD_BCK,
+ CLK_CON_GAT_DISPAUD_CON_AUD_I2S_BCLK_FM_IN,
+};
+
+static const struct samsung_fixed_rate_clock dispaud_fixed_clks[] __initconst = {
+ FRATE(0, "frat_dispaud_audiocdclk0", NULL, 0, 100000000),
+ FRATE(0, "frat_dispaud_mixer_bclk_bt", NULL, 0, 12500000),
+ FRATE(0, "frat_dispaud_mixer_bclk_cp", NULL, 0, 12500000),
+ FRATE(0, "frat_dispaud_mixer_bclk_fm", NULL, 0, 12500000),
+ FRATE(0, "frat_dispaud_mixer_sclk_ap", NULL, 0, 12500000),
+ FRATE(0, "frat_dispaud_mipiphy_rxclkesc0", NULL, 0, 188000000),
+ FRATE(0, "frat_dispaud_mipiphy_txbyteclkhs", NULL, 0, 188000000),
+};
+
+static const struct samsung_pll_clock dispaud_pll_clks[] __initconst = {
+ PLL(pll_1417x, CLK_FOUT_DISPAUD_AUD_PLL, "fout_dispaud_aud_pll",
+ "oscclk", PLL_LOCKTIME_DISPAUD_AUD_PLL, PLL_CON0_DISPAUD_AUD_PLL,
+ NULL),
+ PLL(pll_1417x, CLK_FOUT_DISPAUD_PLL, "fout_dispaud_pll", "oscclk",
+ PLL_LOCKTIME_DISPAUD_PLL, PLL_CON0_DISPAUD_PLL, NULL),
+};
+
+/* List of parent clocks for muxes in CMU_DISPAUD */
+PNAME(mout_dispaud_bus_user_p) = { "oscclk", "gout_mif_cmu_dispaud_bus" };
+PNAME(mout_dispaud_decon_eclk_user_p) = { "oscclk",
+ "gout_mif_cmu_dispaud_decon_eclk" };
+PNAME(mout_dispaud_decon_vclk_user_p) = { "oscclk",
+ "gout_mif_cmu_dispaud_decon_vclk" };
+PNAME(mout_dispaud_decon_eclk_p) = { "gout_dispaud_mux_decon_eclk_user",
+ "gout_dispaud_mux_pll_con" };
+PNAME(mout_dispaud_decon_vclk_p) = { "gout_dispaud_mux_decon_vclk_user",
+ "gout_dispaud_mux_pll_con" };
+PNAME(mout_dispaud_mi2s_p) = { "gout_dispaud_mux_aud_pll_con",
+ "frat_dispaud_audiocdclk0" };
+
+static const struct samsung_mux_clock dispaud_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_DISPAUD_BUS_USER, "mout_dispaud_bus_user",
+ mout_dispaud_bus_user_p, CLK_CON_MUX_DISPAUD_BUS_USER, 12, 1),
+ MUX(CLK_MOUT_DISPAUD_DECON_ECLK_USER, "mout_dispaud_decon_eclk_user",
+ mout_dispaud_decon_eclk_user_p, CLK_CON_MUX_DISPAUD_DECON_ECLK_USER,
+ 12, 1),
+ MUX(CLK_MOUT_DISPAUD_DECON_VCLK_USER, "mout_dispaud_decon_vclk_user",
+ mout_dispaud_decon_vclk_user_p, CLK_CON_MUX_DISPAUD_DECON_VCLK_USER,
+ 12, 1),
+ MUX(CLK_MOUT_DISPAUD_DECON_ECLK, "mout_dispaud_decon_eclk",
+ mout_dispaud_decon_eclk_p, CLK_CON_MUX_DISPAUD_DECON_ECLK, 12, 1),
+ MUX(CLK_MOUT_DISPAUD_DECON_VCLK, "mout_dispaud_decon_vclk",
+ mout_dispaud_decon_vclk_p, CLK_CON_MUX_DISPAUD_DECON_VCLK, 12, 1),
+ MUX(CLK_MOUT_DISPAUD_MI2S, "mout_dispaud_mi2s", mout_dispaud_mi2s_p,
+ CLK_CON_MUX_DISPAUD_MI2S, 12, 1),
+};
+
+static const struct samsung_div_clock dispaud_div_clks[] __initconst = {
+ DIV(CLK_DOUT_DISPAUD_APB, "dout_dispaud_apb",
+ "gout_dispaud_mux_bus_user", CLK_CON_DIV_DISPAUD_APB, 0, 2),
+ DIV(CLK_DOUT_DISPAUD_DECON_ECLK, "dout_dispaud_decon_eclk",
+ "gout_dispaud_mux_decon_eclk", CLK_CON_DIV_DISPAUD_DECON_ECLK, 0, 3),
+ DIV(CLK_DOUT_DISPAUD_DECON_VCLK, "dout_dispaud_decon_vclk",
+ "gout_dispaud_mux_decon_vclk", CLK_CON_DIV_DISPAUD_DECON_VCLK, 0, 3),
+ DIV(CLK_DOUT_DISPAUD_MI2S, "dout_dispaud_mi2s", "gout_dispaud_mux_mi2s",
+ CLK_CON_DIV_DISPAUD_MI2S, 0, 4),
+ DIV(CLK_DOUT_DISPAUD_MIXER, "dout_dispaud_mixer",
+ "gout_dispaud_mux_aud_pll_con", CLK_CON_DIV_DISPAUD_MIXER, 0, 4),
+};
+
+static const struct samsung_gate_clock dispaud_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_DISPAUD_BUS, "gout_dispaud_bus",
+ "gout_dispaud_mux_bus_user", CLK_CON_GAT_DISPAUD_BUS, 0,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_BUS_DISP, "gout_dispaud_bus_disp",
+ "gout_dispaud_mux_bus_user", CLK_CON_GAT_DISPAUD_BUS_DISP, 2,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_BUS_PPMU, "gout_dispaud_bus_ppmu",
+ "gout_dispaud_mux_bus_user", CLK_CON_GAT_DISPAUD_BUS_PPMU, 3,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_APB_AUD, "gout_dispaud_apb_aud",
+ "dout_dispaud_apb", CLK_CON_GAT_DISPAUD_APB_AUD, 2,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_APB_AUD_AMP, "gout_dispaud_apb_aud_amp",
+ "dout_dispaud_apb", CLK_CON_GAT_DISPAUD_APB_AUD_AMP, 3,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_APB_DISP, "gout_dispaud_apb_disp",
+ "dout_dispaud_apb", CLK_CON_GAT_DISPAUD_APB_DISP, 1,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_CON_AUD_I2S_BCLK_BT_IN,
+ "gout_dispaud_con_aud_i2s_bclk_bt_in",
+ "frat_dispaud_mixer_bclk_bt",
+ CLK_CON_GAT_DISPAUD_CON_AUD_I2S_BCLK_BT_IN, 0, CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_DISPAUD_CON_AUD_I2S_BCLK_FM_IN,
+ "gout_dispaud_con_aud_i2s_bclk_fm_in",
+ "frat_dispaud_mixer_bclk_fm",
+ CLK_CON_GAT_DISPAUD_CON_AUD_I2S_BCLK_FM_IN, 0, CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_DISPAUD_CON_CP2AUD_BCK, "gout_dispaud_con_cp2aud_bck",
+ "frat_dispaud_mixer_bclk_cp", CLK_CON_GAT_DISPAUD_CON_CP2AUD_BCK,
+ 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_CON_EXT2AUD_BCK_GPIO_I2S,
+ "gout_dispaud_con_ext2aud_bck_gpio_i2s",
+ "frat_dispaud_mixer_sclk_ap",
+ CLK_CON_GAT_DISPAUD_CON_EXT2AUD_BCK_GPIO_I2S, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_DECON_ECLK, "gout_dispaud_decon_eclk",
+ "dout_dispaud_decon_eclk", CLK_CON_GAT_DISPAUD_DECON_ECLK, 0,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_DECON_VCLK, "gout_dispaud_decon_vclk",
+ "dout_dispaud_decon_vclk", CLK_CON_GAT_DISPAUD_DECON_VCLK, 0,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MI2S_AMP_I2SCODCLKI,
+ "gout_dispaud_mi2s_amp_i2scodclki", "dout_dispaud_mi2s",
+ CLK_CON_GAT_DISPAUD_MI2S_AMP_I2SCODCLKI, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MI2S_AUD_I2SCODCLKI,
+ "gout_dispaud_mi2s_aud_i2scodclki", "dout_dispaud_mi2s",
+ CLK_CON_GAT_DISPAUD_MI2S_AUD_I2SCODCLKI, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MIXER_AUD_SYSCLK, "gout_dispaud_mixer_aud_sysclk",
+ "dout_dispaud_mixer", CLK_CON_GAT_DISPAUD_MIXER_AUD_SYSCLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_AUD_PLL, "gout_dispaud_mux_aud_pll",
+ "gout_dispaud_mux_aud_pll_con", CLK_CON_GAT_DISPAUD_MUX_AUD_PLL,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_AUD_PLL_CON, "gout_dispaud_mux_aud_pll_con",
+ "fout_dispaud_aud_pll", CLK_CON_GAT_DISPAUD_MUX_AUD_PLL_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_BUS_USER, "gout_dispaud_mux_bus_user",
+ "mout_dispaud_bus_user", CLK_CON_GAT_DISPAUD_MUX_BUS_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_DECON_ECLK_USER,
+ "gout_dispaud_mux_decon_eclk_user", "mout_dispaud_decon_eclk_user",
+ CLK_CON_GAT_DISPAUD_MUX_DECON_ECLK_USER, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_DECON_VCLK_USER,
+ "gout_dispaud_mux_decon_vclk_user", "mout_dispaud_decon_vclk_user",
+ CLK_CON_GAT_DISPAUD_MUX_DECON_VCLK_USER, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER,
+ "gout_dispaud_mux_mipiphy_rxclkesc0_user",
+ "gout_dispaud_mux_mipiphy_rxclkesc0_user_con",
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER, 21, CLK_IS_CRITICAL
+ | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER_CON,
+ "gout_dispaud_mux_mipiphy_rxclkesc0_user_con",
+ "frat_dispaud_mipiphy_rxclkesc0",
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER,
+ "gout_dispaud_mux_mipiphy_txbyteclkhs_user",
+ "gout_dispaud_mux_mipiphy_txbyteclkhs_user_con",
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER_CON,
+ "gout_dispaud_mux_mipiphy_txbyteclkhs_user_con",
+ "frat_dispaud_mipiphy_txbyteclkhs",
+ CLK_CON_GAT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_DECON_ECLK, "gout_dispaud_mux_decon_eclk",
+ "mout_dispaud_decon_eclk", CLK_CON_GAT_DISPAUD_MUX_DECON_ECLK, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_DECON_VCLK, "gout_dispaud_mux_decon_vclk",
+ "mout_dispaud_decon_vclk", CLK_CON_GAT_DISPAUD_MUX_DECON_VCLK, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_MI2S, "gout_dispaud_mux_mi2s",
+ "mout_dispaud_mi2s", CLK_CON_GAT_DISPAUD_MUX_MI2S, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_PLL, "gout_dispaud_mux_pll",
+ "gout_dispaud_mux_pll_con", CLK_CON_GAT_DISPAUD_MUX_PLL, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_DISPAUD_MUX_PLL_CON, "gout_dispaud_mux_pll_con",
+ "fout_dispaud_pll", CLK_CON_GAT_DISPAUD_MUX_PLL_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info dispaud_cmu_info __initconst = {
+ .fixed_clks = dispaud_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(dispaud_fixed_clks),
+ .pll_clks = dispaud_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(dispaud_pll_clks),
+ .mux_clks = dispaud_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(dispaud_mux_clks),
+ .div_clks = dispaud_div_clks,
+ .nr_div_clks = ARRAY_SIZE(dispaud_div_clks),
+ .gate_clks = dispaud_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(dispaud_gate_clks),
+ .clk_regs = dispaud_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(dispaud_clk_regs),
+ .nr_clk_ids = DISPAUD_NR_CLK,
+};
+
+/*
+ * Register offsets for CMU_FSYS (0x13730000)
+ */
+#define PLL_LOCKTIME_FSYS_USB_PLL 0x0000
+#define PLL_CON0_FSYS_USB_PLL 0x0100
+#define CLK_CON_GAT_FSYS_MUX_USB_PLL 0x0200
+#define CLK_CON_GAT_FSYS_MUX_USB_PLL_CON 0x0200
+#define CLK_CON_GAT_FSYS_MUX_USB20DRD_PHYCLOCK_USER 0x0230
+#define CLK_CON_GAT_FSYS_MUX_USB20DRD_PHYCLOCK_USER_CON 0x0230
+#define CLK_CON_GAT_FSYS_BUSP3_HCLK 0x0804
+#define CLK_CON_GAT_FSYS_MMC0_ACLK 0x0804
+#define CLK_CON_GAT_FSYS_MMC1_ACLK 0x0804
+#define CLK_CON_GAT_FSYS_MMC2_ACLK 0x0804
+#define CLK_CON_GAT_FSYS_PDMA0_ACLK_PDMA0 0x0804
+#define CLK_CON_GAT_FSYS_PPMU_ACLK 0x0804
+#define CLK_CON_GAT_FSYS_PPMU_PCLK 0x0804
+#define CLK_CON_GAT_FSYS_SROMC_HCLK 0x0804
+#define CLK_CON_GAT_FSYS_UPSIZER_BUS1_ACLK 0x0804
+#define CLK_CON_GAT_FSYS_USB20DRD_ACLK_HSDRD 0x0804
+#define CLK_CON_GAT_FSYS_USB20DRD_HCLK_USB20_CTRL 0x0804
+#define CLK_CON_GAT_FSYS_USB20DRD_HSDRD_REF_CLK 0x0828
+
+static const unsigned long fsys_clk_regs[] __initconst = {
+ PLL_LOCKTIME_FSYS_USB_PLL,
+ PLL_CON0_FSYS_USB_PLL,
+ CLK_CON_GAT_FSYS_MUX_USB_PLL,
+ CLK_CON_GAT_FSYS_MUX_USB_PLL_CON,
+ CLK_CON_GAT_FSYS_MUX_USB20DRD_PHYCLOCK_USER,
+ CLK_CON_GAT_FSYS_MUX_USB20DRD_PHYCLOCK_USER_CON,
+ CLK_CON_GAT_FSYS_BUSP3_HCLK,
+ CLK_CON_GAT_FSYS_MMC0_ACLK,
+ CLK_CON_GAT_FSYS_MMC1_ACLK,
+ CLK_CON_GAT_FSYS_MMC2_ACLK,
+ CLK_CON_GAT_FSYS_PDMA0_ACLK_PDMA0,
+ CLK_CON_GAT_FSYS_PPMU_ACLK,
+ CLK_CON_GAT_FSYS_PPMU_PCLK,
+ CLK_CON_GAT_FSYS_SROMC_HCLK,
+ CLK_CON_GAT_FSYS_UPSIZER_BUS1_ACLK,
+ CLK_CON_GAT_FSYS_USB20DRD_ACLK_HSDRD,
+ CLK_CON_GAT_FSYS_USB20DRD_HCLK_USB20_CTRL,
+ CLK_CON_GAT_FSYS_USB20DRD_HSDRD_REF_CLK,
+};
+
+static const struct samsung_fixed_rate_clock fsys_fixed_clks[] __initconst = {
+ FRATE(0, "frat_fsys_usb20drd_phyclock", NULL, 0, 60000000),
+};
+
+static const struct samsung_pll_clock fsys_pll_clks[] __initconst = {
+ PLL(pll_1417x, CLK_FOUT_FSYS_USB_PLL, "fout_fsys_usb_pll", "oscclk",
+ PLL_LOCKTIME_FSYS_USB_PLL, PLL_CON0_FSYS_USB_PLL, NULL),
+};
+
+static const struct samsung_gate_clock fsys_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_FSYS_BUSP3_HCLK, "gout_fsys_busp3_hclk",
+ "gout_mif_cmu_fsys_bus", CLK_CON_GAT_FSYS_BUSP3_HCLK, 2,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC0_ACLK, "gout_fsys_mmc0_aclk",
+ "gout_fsys_busp3_hclk", CLK_CON_GAT_FSYS_MMC0_ACLK, 8,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC1_ACLK, "gout_fsys_mmc1_aclk",
+ "gout_fsys_busp3_hclk", CLK_CON_GAT_FSYS_MMC1_ACLK, 9,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC2_ACLK, "gout_fsys_mmc2_aclk",
+ "gout_fsys_busp3_hclk", CLK_CON_GAT_FSYS_MMC2_ACLK, 10,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_PDMA0_ACLK_PDMA0, "gout_fsys_pdma0_aclk_pdma0",
+ "gout_fsys_upsizer_bus1_aclk", CLK_CON_GAT_FSYS_PDMA0_ACLK_PDMA0,
+ 7, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_PPMU_ACLK, "gout_fsys_ppmu_aclk",
+ "gout_mif_cmu_fsys_bus", CLK_CON_GAT_FSYS_PPMU_ACLK, 17,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_PPMU_PCLK, "gout_fsys_ppmu_pclk",
+ "gout_mif_cmu_fsys_bus", CLK_CON_GAT_FSYS_PPMU_PCLK, 18,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_SROMC_HCLK, "gout_fsys_sromc_hclk",
+ "gout_fsys_busp3_hclk", CLK_CON_GAT_FSYS_SROMC_HCLK, 6,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_UPSIZER_BUS1_ACLK, "gout_fsys_upsizer_bus1_aclk",
+ "gout_mif_cmu_fsys_bus", CLK_CON_GAT_FSYS_UPSIZER_BUS1_ACLK, 12,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_ACLK_HSDRD, "gout_fsys_usb20drd_aclk_hsdrd",
+ "gout_fsys_busp3_hclk", CLK_CON_GAT_FSYS_USB20DRD_ACLK_HSDRD, 20,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_HCLK_USB20_CTRL,
+ "gout_fsys_usb20drd_hclk_usb20_ctrl", "gout_fsys_busp3_hclk",
+ CLK_CON_GAT_FSYS_USB20DRD_HCLK_USB20_CTRL, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_HSDRD_REF_CLK,
+ "gout_fsys_usb20drd_hsdrd_ref_clk",
+ "gout_mif_cmu_fsys_usb20drd_refclk",
+ CLK_CON_GAT_FSYS_USB20DRD_HSDRD_REF_CLK, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MUX_USB20DRD_PHYCLOCK_USER,
+ "gout_fsys_mux_usb20drd_phyclock_user",
+ "gout_fsys_mux_usb20drd_phyclock_user_con",
+ CLK_CON_GAT_FSYS_MUX_USB20DRD_PHYCLOCK_USER, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MUX_USB20DRD_PHYCLOCK_USER_CON,
+ "gout_fsys_mux_usb20drd_phyclock_user_con",
+ "frat_fsys_usb20drd_phyclock",
+ CLK_CON_GAT_FSYS_MUX_USB20DRD_PHYCLOCK_USER_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MUX_USB_PLL, "gout_fsys_mux_usb_pll",
+ "gout_fsys_mux_usb_pll_con", CLK_CON_GAT_FSYS_MUX_USB_PLL, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MUX_USB_PLL_CON, "gout_fsys_mux_usb_pll_con",
+ "fout_fsys_usb_pll", CLK_CON_GAT_FSYS_MUX_USB_PLL_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info fsys_cmu_info __initconst = {
+ .fixed_clks = fsys_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(fsys_fixed_clks),
+ .pll_clks = fsys_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(fsys_pll_clks),
+ .gate_clks = fsys_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys_gate_clks),
+ .clk_regs = fsys_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys_clk_regs),
+ .nr_clk_ids = FSYS_NR_CLK,
+};
+
+/*
+ * Register offsets for CMU_G3D (0x11460000)
+ */
+#define PLL_LOCKTIME_G3D_PLL 0x0000
+#define PLL_CON0_G3D_PLL 0x0100
+#define CLK_CON_GAT_G3D_MUX_PLL 0x0200
+#define CLK_CON_GAT_G3D_MUX_PLL_CON 0x0200
+#define CLK_CON_GAT_G3D_MUX_SWITCH_USER 0x0204
+#define CLK_CON_MUX_G3D_SWITCH_USER 0x0204
+#define CLK_CON_GAT_G3D_MUX 0x0208
+#define CLK_CON_MUX_G3D 0x0208
+#define CLK_CON_DIV_G3D_BUS 0x0400
+#define CLK_CON_DIV_G3D_APB 0x0404
+#define CLK_CON_GAT_G3D_ASYNCS_D0_CLK 0x0804
+#define CLK_CON_GAT_G3D_ASYNC_PCLKM 0x0804
+#define CLK_CON_GAT_G3D_CLK 0x0804
+#define CLK_CON_GAT_G3D_PPMU_ACLK 0x0804
+#define CLK_CON_GAT_G3D_QE_ACLK 0x0804
+#define CLK_CON_GAT_G3D_PPMU_PCLK 0x0808
+#define CLK_CON_GAT_G3D_QE_PCLK 0x0808
+#define CLK_CON_GAT_G3D_SYSREG_PCLK 0x0808
+
+static const unsigned long g3d_clk_regs[] __initconst = {
+ PLL_LOCKTIME_G3D_PLL,
+ PLL_CON0_G3D_PLL,
+ CLK_CON_GAT_G3D_MUX_PLL,
+ CLK_CON_GAT_G3D_MUX_PLL_CON,
+ CLK_CON_GAT_G3D_MUX_SWITCH_USER,
+ CLK_CON_MUX_G3D_SWITCH_USER,
+ CLK_CON_GAT_G3D_MUX,
+ CLK_CON_MUX_G3D,
+ CLK_CON_DIV_G3D_BUS,
+ CLK_CON_DIV_G3D_APB,
+ CLK_CON_GAT_G3D_ASYNCS_D0_CLK,
+ CLK_CON_GAT_G3D_ASYNC_PCLKM,
+ CLK_CON_GAT_G3D_CLK,
+ CLK_CON_GAT_G3D_PPMU_ACLK,
+ CLK_CON_GAT_G3D_QE_ACLK,
+ CLK_CON_GAT_G3D_PPMU_PCLK,
+ CLK_CON_GAT_G3D_QE_PCLK,
+ CLK_CON_GAT_G3D_SYSREG_PCLK,
+};
+
+static const struct samsung_pll_clock g3d_pll_clks[] __initconst = {
+ PLL(pll_1417x, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "oscclk",
+ PLL_LOCKTIME_G3D_PLL, PLL_CON0_G3D_PLL, NULL),
+};
+
+/* List of parent clocks for muxes in CMU_G3D */
+PNAME(mout_g3d_switch_user_p) = { "oscclk", "gout_mif_cmu_g3d_switch" };
+PNAME(mout_g3d_p) = { "gout_g3d_mux_pll_con",
+ "gout_g3d_mux_switch_user" };
+
+static const struct samsung_mux_clock g3d_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_G3D_SWITCH_USER, "mout_g3d_switch_user",
+ mout_g3d_switch_user_p, CLK_CON_MUX_G3D_SWITCH_USER, 12, 1),
+ MUX(CLK_MOUT_G3D, "mout_g3d", mout_g3d_p, CLK_CON_MUX_G3D, 12, 1),
+};
+
+static const struct samsung_div_clock g3d_div_clks[] __initconst = {
+ DIV(CLK_DOUT_G3D_APB, "dout_g3d_apb", "dout_g3d_bus",
+ CLK_CON_DIV_G3D_APB, 0, 3),
+ DIV(CLK_DOUT_G3D_BUS, "dout_g3d_bus", "gout_g3d_mux",
+ CLK_CON_DIV_G3D_BUS, 0, 3),
+};
+
+static const struct samsung_gate_clock g3d_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_G3D_ASYNCS_D0_CLK, "gout_g3d_asyncs_d0_clk",
+ "dout_g3d_bus", CLK_CON_GAT_G3D_ASYNCS_D0_CLK, 1, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_ASYNC_PCLKM, "gout_g3d_async_pclkm", "dout_g3d_bus",
+ CLK_CON_GAT_G3D_ASYNC_PCLKM, 0, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_CLK, "gout_g3d_clk", "dout_g3d_bus",
+ CLK_CON_GAT_G3D_CLK, 6, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_PPMU_ACLK, "gout_g3d_ppmu_aclk", "dout_g3d_bus",
+ CLK_CON_GAT_G3D_PPMU_ACLK, 7, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_PPMU_PCLK, "gout_g3d_ppmu_pclk", "dout_g3d_apb",
+ CLK_CON_GAT_G3D_PPMU_PCLK, 4, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_QE_ACLK, "gout_g3d_qe_aclk", "dout_g3d_bus",
+ CLK_CON_GAT_G3D_QE_ACLK, 8, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_G3D_QE_PCLK, "gout_g3d_qe_pclk", "dout_g3d_apb",
+ CLK_CON_GAT_G3D_QE_PCLK, 5, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_G3D_SYSREG_PCLK, "gout_g3d_sysreg_pclk", "dout_g3d_apb",
+ CLK_CON_GAT_G3D_SYSREG_PCLK, 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_MUX_SWITCH_USER, "gout_g3d_mux_switch_user",
+ "mout_g3d_switch_user", CLK_CON_GAT_G3D_MUX_SWITCH_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_MUX, "gout_g3d_mux", "mout_g3d", CLK_CON_GAT_G3D_MUX,
+ 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_G3D_MUX_PLL, "gout_g3d_mux_pll", "gout_g3d_mux_pll_con",
+ CLK_CON_GAT_G3D_MUX_PLL, 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_G3D_MUX_PLL_CON, "gout_g3d_mux_pll_con", "fout_g3d_pll",
+ CLK_CON_GAT_G3D_MUX_PLL_CON, 12, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info g3d_cmu_info __initconst = {
+ .pll_clks = g3d_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(g3d_pll_clks),
+ .mux_clks = g3d_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(g3d_mux_clks),
+ .div_clks = g3d_div_clks,
+ .nr_div_clks = ARRAY_SIZE(g3d_div_clks),
+ .gate_clks = g3d_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(g3d_gate_clks),
+ .clk_regs = g3d_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(g3d_clk_regs),
+ .nr_clk_ids = G3D_NR_CLK,
+};
+
+/*
+ * Register offsets for CMU_ISP (0x144d0000)
+ */
+#define PLL_LOCKTIME_ISP_PLL 0x0000
+#define PLL_CON0_ISP_PLL 0x0100
+#define CLK_CON_GAT_ISP_MUX_PLL 0x0200
+#define CLK_CON_GAT_ISP_MUX_PLL_CON 0x0200
+#define CLK_CON_GAT_ISP_MUX_VRA_USER 0x0210
+#define CLK_CON_MUX_ISP_VRA_USER 0x0210
+#define CLK_CON_GAT_ISP_MUX_CAM_USER 0x0214
+#define CLK_CON_MUX_ISP_CAM_USER 0x0214
+#define CLK_CON_GAT_ISP_MUX_USER 0x0218
+#define CLK_CON_MUX_ISP_USER 0x0218
+#define CLK_CON_GAT_ISP_MUX_VRA 0x0220
+#define CLK_CON_MUX_ISP_VRA 0x0220
+#define CLK_CON_GAT_ISP_MUX_CAM 0x0224
+#define CLK_CON_MUX_ISP_CAM 0x0224
+#define CLK_CON_GAT_ISP_MUX_ISP 0x0228
+#define CLK_CON_MUX_ISP_ISP 0x0228
+#define CLK_CON_GAT_ISP_MUX_ISPD 0x022c
+#define CLK_CON_MUX_ISP_ISPD 0x022c
+#define CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER 0x0230
+#define CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER_CON 0x0230
+#define CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER 0x0234
+#define CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER_CON 0x0234
+#define CLK_CON_DIV_ISP_APB 0x0400
+#define CLK_CON_DIV_ISP_CAM_HALF 0x0404
+#define CLK_CON_GAT_ISP_VRA 0x0810
+#define CLK_CON_GAT_ISP_ISPD 0x0818
+#define CLK_CON_GAT_ISP_ISPD_PPMU 0x0818
+#define CLK_CON_GAT_ISP_CAM 0x081c
+#define CLK_CON_GAT_ISP_CAM_HALF 0x0820
+
+static const unsigned long isp_clk_regs[] __initconst = {
+ PLL_LOCKTIME_ISP_PLL,
+ PLL_CON0_ISP_PLL,
+ CLK_CON_GAT_ISP_MUX_PLL,
+ CLK_CON_GAT_ISP_MUX_PLL_CON,
+ CLK_CON_GAT_ISP_MUX_VRA_USER,
+ CLK_CON_MUX_ISP_VRA_USER,
+ CLK_CON_GAT_ISP_MUX_CAM_USER,
+ CLK_CON_MUX_ISP_CAM_USER,
+ CLK_CON_GAT_ISP_MUX_USER,
+ CLK_CON_MUX_ISP_USER,
+ CLK_CON_GAT_ISP_MUX_VRA,
+ CLK_CON_MUX_ISP_VRA,
+ CLK_CON_GAT_ISP_MUX_CAM,
+ CLK_CON_MUX_ISP_CAM,
+ CLK_CON_GAT_ISP_MUX_ISP,
+ CLK_CON_MUX_ISP_ISP,
+ CLK_CON_GAT_ISP_MUX_ISPD,
+ CLK_CON_MUX_ISP_ISPD,
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER,
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER_CON,
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER,
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER_CON,
+ CLK_CON_DIV_ISP_APB,
+ CLK_CON_DIV_ISP_CAM_HALF,
+ CLK_CON_GAT_ISP_VRA,
+ CLK_CON_GAT_ISP_ISPD,
+ CLK_CON_GAT_ISP_ISPD_PPMU,
+ CLK_CON_GAT_ISP_CAM,
+ CLK_CON_GAT_ISP_CAM_HALF,
+};
+
+static const struct samsung_fixed_rate_clock isp_fixed_clks[] __initconst = {
+ FRATE(0, "frat_isp_rxbyteclkhs0_sensor0", NULL, 0, 188000000),
+ FRATE(0, "frat_isp_rxbyteclkhs0_sensor1", NULL, 0, 188000000),
+};
+
+static const struct samsung_pll_clock isp_pll_clks[] __initconst = {
+ PLL(pll_1417x, CLK_FOUT_ISP_PLL, "fout_isp_pll", "oscclk",
+ PLL_LOCKTIME_ISP_PLL, PLL_CON0_ISP_PLL, NULL),
+};
+
+/* List of parent clocks for muxes in CMU_ISP */
+PNAME(mout_isp_cam_user_p) = { "oscclk", "gout_mif_cmu_isp_cam" };
+PNAME(mout_isp_user_p) = { "oscclk", "gout_mif_cmu_isp_isp" };
+PNAME(mout_isp_vra_user_p) = { "oscclk", "gout_mif_cmu_isp_vra" };
+PNAME(mout_isp_cam_p) = { "gout_isp_mux_cam_user",
+ "gout_isp_mux_pll_con" };
+PNAME(mout_isp_isp_p) = { "gout_isp_mux_user", "gout_isp_mux_pll_con" };
+PNAME(mout_isp_ispd_p) = { "gout_isp_mux_vra", "gout_isp_mux_cam" };
+PNAME(mout_isp_vra_p) = { "gout_isp_mux_vra_user",
+ "gout_isp_mux_pll_con" };
+
+static const struct samsung_mux_clock isp_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_ISP_CAM_USER, "mout_isp_cam_user", mout_isp_cam_user_p,
+ CLK_CON_MUX_ISP_CAM_USER, 12, 1),
+ MUX(CLK_MOUT_ISP_USER, "mout_isp_user", mout_isp_user_p,
+ CLK_CON_MUX_ISP_USER, 12, 1),
+ MUX(CLK_MOUT_ISP_VRA_USER, "mout_isp_vra_user", mout_isp_vra_user_p,
+ CLK_CON_MUX_ISP_VRA_USER, 12, 1),
+ MUX(CLK_MOUT_ISP_CAM, "mout_isp_cam", mout_isp_cam_p,
+ CLK_CON_MUX_ISP_CAM, 12, 1),
+ MUX(CLK_MOUT_ISP_ISP, "mout_isp_isp", mout_isp_isp_p,
+ CLK_CON_MUX_ISP_ISP, 12, 1),
+ MUX(CLK_MOUT_ISP_ISPD, "mout_isp_ispd", mout_isp_ispd_p,
+ CLK_CON_MUX_ISP_ISPD, 12, 1),
+ MUX(CLK_MOUT_ISP_VRA, "mout_isp_vra", mout_isp_vra_p,
+ CLK_CON_MUX_ISP_VRA, 12, 1),
+};
+
+static const struct samsung_div_clock isp_div_clks[] __initconst = {
+ DIV(CLK_DOUT_ISP_APB, "dout_isp_apb", "gout_isp_mux_vra",
+ CLK_CON_DIV_ISP_APB, 0, 2),
+ DIV(CLK_DOUT_ISP_CAM_HALF, "dout_isp_cam_half", "gout_isp_mux_cam",
+ CLK_CON_DIV_ISP_CAM_HALF, 0, 2),
+};
+
+static const struct samsung_gate_clock isp_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_ISP_CAM, "gout_isp_cam", "gout_isp_mux_cam",
+ CLK_CON_GAT_ISP_CAM, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_CAM_HALF, "gout_isp_cam_half", "dout_isp_cam_half",
+ CLK_CON_GAT_ISP_CAM_HALF, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_ISPD, "gout_isp_ispd", "gout_isp_mux_ispd",
+ CLK_CON_GAT_ISP_ISPD, 0, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_ISPD_PPMU, "gout_isp_ispd_ppmu", "gout_isp_mux_ispd",
+ CLK_CON_GAT_ISP_ISPD_PPMU, 1, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_VRA, "gout_isp_vra", "gout_isp_mux_vra",
+ CLK_CON_GAT_ISP_VRA, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_CAM_USER, "gout_isp_mux_cam_user",
+ "mout_isp_cam_user", CLK_CON_GAT_ISP_MUX_CAM_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_USER, "gout_isp_mux_user", "mout_isp_user",
+ CLK_CON_GAT_ISP_MUX_USER, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_VRA_USER, "gout_isp_mux_vra_user",
+ "mout_isp_vra_user", CLK_CON_GAT_ISP_MUX_VRA_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER,
+ "gout_isp_mux_rxbyteclkhs0_sensor1_user",
+ "gout_isp_mux_rxbyteclkhs0_sensor1_user_con",
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER, 21, CLK_IS_CRITICAL
+ | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER_CON,
+ "gout_isp_mux_rxbyteclkhs0_sensor1_user_con",
+ "frat_isp_rxbyteclkhs0_sensor1",
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER,
+ "gout_isp_mux_rxbyteclkhs0_sensor0_user",
+ "gout_isp_mux_rxbyteclkhs0_sensor0_user_con",
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER, 21, CLK_IS_CRITICAL
+ | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER_CON,
+ "gout_isp_mux_rxbyteclkhs0_sensor0_user_con",
+ "frat_isp_rxbyteclkhs0_sensor0",
+ CLK_CON_GAT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER_CON, 12,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_CAM, "gout_isp_mux_cam", "mout_isp_cam",
+ CLK_CON_GAT_ISP_MUX_CAM, 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_ISP_MUX_ISP, "gout_isp_mux_isp", "mout_isp_isp",
+ CLK_CON_GAT_ISP_MUX_ISP, 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_ISP_MUX_ISPD, "gout_isp_mux_ispd", "mout_isp_ispd",
+ CLK_CON_GAT_ISP_MUX_ISPD, 21, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_ISP_MUX_VRA, "gout_isp_mux_vra", "mout_isp_vra",
+ CLK_CON_GAT_ISP_MUX_VRA, 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_ISP_MUX_PLL, "gout_isp_mux_pll", "gout_isp_mux_pll_con",
+ CLK_CON_GAT_ISP_MUX_PLL, 21, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ 0),
+ GATE(CLK_GOUT_ISP_MUX_PLL_CON, "gout_isp_mux_pll_con", "fout_isp_pll",
+ CLK_CON_GAT_ISP_MUX_PLL_CON, 12, CLK_IS_CRITICAL |
+ CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info isp_cmu_info __initconst = {
+ .fixed_clks = isp_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(isp_fixed_clks),
+ .pll_clks = isp_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(isp_pll_clks),
+ .mux_clks = isp_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(isp_mux_clks),
+ .div_clks = isp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(isp_div_clks),
+ .gate_clks = isp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(isp_gate_clks),
+ .clk_regs = isp_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(isp_clk_regs),
+ .nr_clk_ids = ISP_NR_CLK,
+};
+
+/*
+ * Register offsets for CMU_MFCMSCL (0x12cb0000)
+ */
+#define CLK_CON_GAT_MFCMSCL_MUX_MSCL_USER 0x0200
+#define CLK_CON_MUX_MFCMSCL_MSCL_USER 0x0200
+#define CLK_CON_GAT_MFCMSCL_MUX_MFC_USER 0x0204
+#define CLK_CON_MUX_MFCMSCL_MFC_USER 0x0204
+#define CLK_CON_DIV_MFCMSCL_APB 0x0400
+#define CLK_CON_GAT_MFCMSCL_MSCL 0x0804
+#define CLK_CON_GAT_MFCMSCL_MSCL_BI 0x0804
+#define CLK_CON_GAT_MFCMSCL_MSCL_D 0x0804
+#define CLK_CON_GAT_MFCMSCL_MSCL_JPEG 0x0804
+#define CLK_CON_GAT_MFCMSCL_MSCL_POLY 0x0804
+#define CLK_CON_GAT_MFCMSCL_MSCL_PPMU 0x0804
+#define CLK_CON_GAT_MFCMSCL_MFC 0x0808
+
+static const unsigned long mfcmscl_clk_regs[] __initconst = {
+ CLK_CON_GAT_MFCMSCL_MUX_MSCL_USER,
+ CLK_CON_MUX_MFCMSCL_MSCL_USER,
+ CLK_CON_GAT_MFCMSCL_MUX_MFC_USER,
+ CLK_CON_MUX_MFCMSCL_MFC_USER,
+ CLK_CON_DIV_MFCMSCL_APB,
+ CLK_CON_GAT_MFCMSCL_MSCL,
+ CLK_CON_GAT_MFCMSCL_MSCL_BI,
+ CLK_CON_GAT_MFCMSCL_MSCL_D,
+ CLK_CON_GAT_MFCMSCL_MSCL_JPEG,
+ CLK_CON_GAT_MFCMSCL_MSCL_POLY,
+ CLK_CON_GAT_MFCMSCL_MSCL_PPMU,
+ CLK_CON_GAT_MFCMSCL_MFC,
+};
+
+/* List of parent clocks for muxes in CMU_MFCMSCL */
+PNAME(mout_mfcmscl_mfc_user_p) = { "oscclk", "gout_mif_cmu_mfcmscl_mfc" };
+PNAME(mout_mfcmscl_mscl_user_p) = { "oscclk", "gout_mif_cmu_mfcmscl_mscl" };
+
+static const struct samsung_mux_clock mfcmscl_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_MFCMSCL_MFC_USER, "mout_mfcmscl_mfc_user",
+ mout_mfcmscl_mfc_user_p, CLK_CON_MUX_MFCMSCL_MFC_USER, 12, 1),
+ MUX(CLK_MOUT_MFCMSCL_MSCL_USER, "mout_mfcmscl_mscl_user",
+ mout_mfcmscl_mscl_user_p, CLK_CON_MUX_MFCMSCL_MSCL_USER, 12, 1),
+};
+
+static const struct samsung_div_clock mfcmscl_div_clks[] __initconst = {
+ DIV(CLK_DOUT_MFCMSCL_APB, "dout_mfcmscl_apb",
+ "gout_mfcmscl_mux_mscl_user", CLK_CON_DIV_MFCMSCL_APB, 0, 2),
+};
+
+static const struct samsung_gate_clock mfcmscl_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_MFCMSCL_MFC, "gout_mfcmscl_mfc",
+ "gout_mfcmscl_mux_mfc_user", CLK_CON_GAT_MFCMSCL_MFC, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MSCL, "gout_mfcmscl_mscl",
+ "gout_mfcmscl_mux_mscl_user", CLK_CON_GAT_MFCMSCL_MSCL, 0,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MSCL_BI, "gout_mfcmscl_mscl_bi",
+ "gout_mfcmscl_mscl_d", CLK_CON_GAT_MFCMSCL_MSCL_BI, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MSCL_D, "gout_mfcmscl_mscl_d",
+ "gout_mfcmscl_mux_mscl_user", CLK_CON_GAT_MFCMSCL_MSCL_D, 1,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MSCL_JPEG, "gout_mfcmscl_mscl_jpeg",
+ "gout_mfcmscl_mscl_d", CLK_CON_GAT_MFCMSCL_MSCL_JPEG, 2,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MSCL_POLY, "gout_mfcmscl_mscl_poly",
+ "gout_mfcmscl_mscl_d", CLK_CON_GAT_MFCMSCL_MSCL_POLY, 3,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MSCL_PPMU, "gout_mfcmscl_mscl_ppmu",
+ "gout_mfcmscl_mux_mscl_user", CLK_CON_GAT_MFCMSCL_MSCL_PPMU, 5,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MUX_MFC_USER, "gout_mfcmscl_mux_mfc_user",
+ "mout_mfcmscl_mfc_user", CLK_CON_GAT_MFCMSCL_MUX_MFC_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MFCMSCL_MUX_MSCL_USER, "gout_mfcmscl_mux_mscl_user",
+ "mout_mfcmscl_mscl_user", CLK_CON_GAT_MFCMSCL_MUX_MSCL_USER, 21,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info mfcmscl_cmu_info __initconst = {
+ .mux_clks = mfcmscl_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mfcmscl_mux_clks),
+ .div_clks = mfcmscl_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mfcmscl_div_clks),
+ .gate_clks = mfcmscl_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(mfcmscl_gate_clks),
+ .clk_regs = mfcmscl_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mfcmscl_clk_regs),
+ .nr_clk_ids = MFCMSCL_NR_CLK,
+};
+
+/*
+ * Register offsets for CMU_PERI (0x101f0000)
+ */
+#define CLK_CON_GAT_PERI_PWM_MOTOR_OSCCLK 0x0800
+#define CLK_CON_GAT_PERI_TMU_CPUCL0_CLK 0x0800
+#define CLK_CON_GAT_PERI_TMU_CPUCL1_CLK 0x0800
+#define CLK_CON_GAT_PERI_TMU_CLK 0x0800
+#define CLK_CON_GAT_PERI_BUSP1_PERIC0_HCLK 0x0810
+#define CLK_CON_GAT_PERI_GPIO2_PCLK 0x0810
+#define CLK_CON_GAT_PERI_GPIO5_PCLK 0x0810
+#define CLK_CON_GAT_PERI_GPIO6_PCLK 0x0810
+#define CLK_CON_GAT_PERI_GPIO7_PCLK 0x0810
+#define CLK_CON_GAT_PERI_HSI2C4_IPCLK 0x0810
+#define CLK_CON_GAT_PERI_HSI2C6_IPCLK 0x0810
+#define CLK_CON_GAT_PERI_HSI2C3_IPCLK 0x0810
+#define CLK_CON_GAT_PERI_HSI2C5_IPCLK 0x0810
+#define CLK_CON_GAT_PERI_HSI2C2_IPCLK 0x0810
+#define CLK_CON_GAT_PERI_HSI2C1_IPCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C0_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C4_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C5_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C6_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C7_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C8_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C3_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C2_PCLK 0x0810
+#define CLK_CON_GAT_PERI_I2C1_PCLK 0x0810
+#define CLK_CON_GAT_PERI_MCT_PCLK 0x0810
+#define CLK_CON_GAT_PERI_PWM_MOTOR_PCLK_S0 0x0810
+#define CLK_CON_GAT_PERI_SFRIF_TMU_CPUCL0_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SFRIF_TMU_CPUCL1_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SFRIF_TMU_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SPI0_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SPI2_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SPI1_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SPI4_PCLK 0x0814
+#define CLK_CON_GAT_PERI_SPI3_PCLK 0x0814
+#define CLK_CON_GAT_PERI_UART1_PCLK 0x0814
+#define CLK_CON_GAT_PERI_UART2_PCLK 0x0814
+#define CLK_CON_GAT_PERI_UART0_PCLK 0x0814
+#define CLK_CON_GAT_PERI_WDT_CPUCL0_PCLK 0x0814
+#define CLK_CON_GAT_PERI_WDT_CPUCL1_PCLK 0x0814
+#define CLK_CON_GAT_PERI_UART1_EXT_UCLK 0x0830
+#define CLK_CON_GAT_PERI_UART2_EXT_UCLK 0x0834
+#define CLK_CON_GAT_PERI_UART0_EXT_UCLK 0x0838
+#define CLK_CON_GAT_PERI_SPI2_SPI_EXT_CLK 0x083c
+#define CLK_CON_GAT_PERI_SPI1_SPI_EXT_CLK 0x0840
+#define CLK_CON_GAT_PERI_SPI0_SPI_EXT_CLK 0x0844
+#define CLK_CON_GAT_PERI_SPI3_SPI_EXT_CLK 0x0848
+#define CLK_CON_GAT_PERI_SPI4_SPI_EXT_CLK 0x084c
+
+static const unsigned long peri_clk_regs[] __initconst = {
+ CLK_CON_GAT_PERI_PWM_MOTOR_OSCCLK,
+ CLK_CON_GAT_PERI_TMU_CPUCL0_CLK,
+ CLK_CON_GAT_PERI_TMU_CPUCL1_CLK,
+ CLK_CON_GAT_PERI_TMU_CLK,
+ CLK_CON_GAT_PERI_BUSP1_PERIC0_HCLK,
+ CLK_CON_GAT_PERI_GPIO2_PCLK,
+ CLK_CON_GAT_PERI_GPIO5_PCLK,
+ CLK_CON_GAT_PERI_GPIO6_PCLK,
+ CLK_CON_GAT_PERI_GPIO7_PCLK,
+ CLK_CON_GAT_PERI_HSI2C4_IPCLK,
+ CLK_CON_GAT_PERI_HSI2C6_IPCLK,
+ CLK_CON_GAT_PERI_HSI2C3_IPCLK,
+ CLK_CON_GAT_PERI_HSI2C5_IPCLK,
+ CLK_CON_GAT_PERI_HSI2C2_IPCLK,
+ CLK_CON_GAT_PERI_HSI2C1_IPCLK,
+ CLK_CON_GAT_PERI_I2C0_PCLK,
+ CLK_CON_GAT_PERI_I2C4_PCLK,
+ CLK_CON_GAT_PERI_I2C5_PCLK,
+ CLK_CON_GAT_PERI_I2C6_PCLK,
+ CLK_CON_GAT_PERI_I2C7_PCLK,
+ CLK_CON_GAT_PERI_I2C8_PCLK,
+ CLK_CON_GAT_PERI_I2C3_PCLK,
+ CLK_CON_GAT_PERI_I2C2_PCLK,
+ CLK_CON_GAT_PERI_I2C1_PCLK,
+ CLK_CON_GAT_PERI_MCT_PCLK,
+ CLK_CON_GAT_PERI_PWM_MOTOR_PCLK_S0,
+ CLK_CON_GAT_PERI_SFRIF_TMU_CPUCL0_PCLK,
+ CLK_CON_GAT_PERI_SFRIF_TMU_CPUCL1_PCLK,
+ CLK_CON_GAT_PERI_SFRIF_TMU_PCLK,
+ CLK_CON_GAT_PERI_SPI0_PCLK,
+ CLK_CON_GAT_PERI_SPI2_PCLK,
+ CLK_CON_GAT_PERI_SPI1_PCLK,
+ CLK_CON_GAT_PERI_SPI4_PCLK,
+ CLK_CON_GAT_PERI_SPI3_PCLK,
+ CLK_CON_GAT_PERI_UART1_PCLK,
+ CLK_CON_GAT_PERI_UART2_PCLK,
+ CLK_CON_GAT_PERI_UART0_PCLK,
+ CLK_CON_GAT_PERI_WDT_CPUCL0_PCLK,
+ CLK_CON_GAT_PERI_WDT_CPUCL1_PCLK,
+ CLK_CON_GAT_PERI_UART1_EXT_UCLK,
+ CLK_CON_GAT_PERI_UART2_EXT_UCLK,
+ CLK_CON_GAT_PERI_UART0_EXT_UCLK,
+ CLK_CON_GAT_PERI_SPI2_SPI_EXT_CLK,
+ CLK_CON_GAT_PERI_SPI1_SPI_EXT_CLK,
+ CLK_CON_GAT_PERI_SPI0_SPI_EXT_CLK,
+ CLK_CON_GAT_PERI_SPI3_SPI_EXT_CLK,
+ CLK_CON_GAT_PERI_SPI4_SPI_EXT_CLK,
+};
+
+static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERI_BUSP1_PERIC0_HCLK, "gout_peri_busp1_peric0_hclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_BUSP1_PERIC0_HCLK, 3,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_GPIO2_PCLK, "gout_peri_gpio2_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_GPIO2_PCLK, 7,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_GPIO5_PCLK, "gout_peri_gpio5_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_GPIO5_PCLK, 8,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_GPIO6_PCLK, "gout_peri_gpio6_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_GPIO6_PCLK, 9,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_GPIO7_PCLK, "gout_peri_gpio7_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_GPIO7_PCLK, 10,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_HSI2C4_IPCLK, "gout_peri_hsi2c4_ipclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_HSI2C4_IPCLK, 14,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_HSI2C6_IPCLK, "gout_peri_hsi2c6_ipclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_HSI2C6_IPCLK, 16,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_HSI2C3_IPCLK, "gout_peri_hsi2c3_ipclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_HSI2C3_IPCLK, 13,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_HSI2C5_IPCLK, "gout_peri_hsi2c5_ipclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_HSI2C5_IPCLK, 15,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_HSI2C2_IPCLK, "gout_peri_hsi2c2_ipclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_HSI2C2_IPCLK, 12,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_HSI2C1_IPCLK, "gout_peri_hsi2c1_ipclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_HSI2C1_IPCLK, 11,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C0_PCLK, "gout_peri_i2c0_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C0_PCLK, 21,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C4_PCLK, "gout_peri_i2c4_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C4_PCLK, 17,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C5_PCLK, "gout_peri_i2c5_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C5_PCLK, 18,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C6_PCLK, "gout_peri_i2c6_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C6_PCLK, 19,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C7_PCLK, "gout_peri_i2c7_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C7_PCLK, 24,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C8_PCLK, "gout_peri_i2c8_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C8_PCLK, 25,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C3_PCLK, "gout_peri_i2c3_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C3_PCLK, 20,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C2_PCLK, "gout_peri_i2c2_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C2_PCLK, 22,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2C1_PCLK, "gout_peri_i2c1_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_I2C1_PCLK, 23,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_MCT_PCLK, "gout_peri_mct_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_MCT_PCLK, 26,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_PWM_MOTOR_OSCCLK, "gout_peri_pwm_motor_oscclk",
+ "oscclk", CLK_CON_GAT_PERI_PWM_MOTOR_OSCCLK, 2,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_PWM_MOTOR_PCLK_S0, "gout_peri_pwm_motor_pclk_s0",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_PWM_MOTOR_PCLK_S0, 29,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SFRIF_TMU_CPUCL0_PCLK,
+ "gout_peri_sfrif_tmu_cpucl0_pclk", "gout_mif_cmu_peri_bus",
+ CLK_CON_GAT_PERI_SFRIF_TMU_CPUCL0_PCLK, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SFRIF_TMU_CPUCL1_PCLK,
+ "gout_peri_sfrif_tmu_cpucl1_pclk", "gout_mif_cmu_peri_bus",
+ CLK_CON_GAT_PERI_SFRIF_TMU_CPUCL1_PCLK, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SFRIF_TMU_PCLK, "gout_peri_sfrif_tmu_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_SFRIF_TMU_PCLK, 3,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI0_PCLK, "gout_peri_spi0_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_SPI0_PCLK, 6,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI0_SPI_EXT_CLK, "gout_peri_spi0_spi_ext_clk",
+ "gout_mif_cmu_peri_spi0", CLK_CON_GAT_PERI_SPI0_SPI_EXT_CLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI2_PCLK, "gout_peri_spi2_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_SPI2_PCLK, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI2_SPI_EXT_CLK, "gout_peri_spi2_spi_ext_clk",
+ "gout_mif_cmu_peri_spi2", CLK_CON_GAT_PERI_SPI2_SPI_EXT_CLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI1_PCLK, "gout_peri_spi1_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_SPI1_PCLK, 5,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI1_SPI_EXT_CLK, "gout_peri_spi1_spi_ext_clk",
+ "gout_mif_cmu_peri_spi1", CLK_CON_GAT_PERI_SPI1_SPI_EXT_CLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI4_PCLK, "gout_peri_spi4_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_SPI4_PCLK, 8,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI4_SPI_EXT_CLK, "gout_peri_spi4_spi_ext_clk",
+ "gout_mif_cmu_peri_spi4", CLK_CON_GAT_PERI_SPI4_SPI_EXT_CLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI3_PCLK, "gout_peri_spi3_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_SPI3_PCLK, 7,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI3_SPI_EXT_CLK, "gout_peri_spi3_spi_ext_clk",
+ "gout_mif_cmu_peri_spi3", CLK_CON_GAT_PERI_SPI3_SPI_EXT_CLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_TMU_CPUCL0_CLK, "gout_peri_tmu_cpucl0_clk", "oscclk",
+ CLK_CON_GAT_PERI_TMU_CPUCL0_CLK, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_TMU_CPUCL1_CLK, "gout_peri_tmu_cpucl1_clk", "oscclk",
+ CLK_CON_GAT_PERI_TMU_CPUCL1_CLK, 5, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_TMU_CLK, "gout_peri_tmu_clk", "oscclk",
+ CLK_CON_GAT_PERI_TMU_CLK, 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_UART1_EXT_UCLK, "gout_peri_uart1_ext_uclk",
+ "gout_mif_cmu_peri_uart1", CLK_CON_GAT_PERI_UART1_EXT_UCLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_UART1_PCLK, "gout_peri_uart1_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_UART1_PCLK, 11,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_UART2_EXT_UCLK, "gout_peri_uart2_ext_uclk",
+ "gout_mif_cmu_peri_uart2", CLK_CON_GAT_PERI_UART2_EXT_UCLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_UART2_PCLK, "gout_peri_uart2_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_UART2_PCLK, 12,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_UART0_EXT_UCLK, "gout_peri_uart0_ext_uclk",
+ "gout_mif_cmu_peri_uart0", CLK_CON_GAT_PERI_UART0_EXT_UCLK, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_UART0_PCLK, "gout_peri_uart0_pclk",
+ "gout_peri_busp1_peric0_hclk", CLK_CON_GAT_PERI_UART0_PCLK, 10,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_WDT_CPUCL0_PCLK, "gout_peri_wdt_cpucl0_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_WDT_CPUCL0_PCLK, 13,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_WDT_CPUCL1_PCLK, "gout_peri_wdt_cpucl1_pclk",
+ "gout_mif_cmu_peri_bus", CLK_CON_GAT_PERI_WDT_CPUCL1_PCLK, 14,
+ CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info peri_cmu_info __initconst = {
+ .gate_clks = peri_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peri_gate_clks),
+ .clk_regs = peri_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peri_clk_regs),
+ .nr_clk_ids = PERI_NR_CLK,
+};
+
+static int __init exynos7870_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynos7870_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos7870-cmu-mif",
+ .data = &mif_cmu_info,
+ }, {
+ .compatible = "samsung,exynos7870-cmu-dispaud",
+ .data = &dispaud_cmu_info,
+ }, {
+ .compatible = "samsung,exynos7870-cmu-fsys",
+ .data = &fsys_cmu_info,
+ }, {
+ .compatible = "samsung,exynos7870-cmu-g3d",
+ .data = &g3d_cmu_info,
+ }, {
+ .compatible = "samsung,exynos7870-cmu-isp",
+ .data = &isp_cmu_info,
+ }, {
+ .compatible = "samsung,exynos7870-cmu-mfcmscl",
+ .data = &mfcmscl_cmu_info,
+ }, {
+ .compatible = "samsung,exynos7870-cmu-peri",
+ .data = &peri_cmu_info,
+ }, {
+ },
+};
+
+static struct platform_driver exynos7870_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos7870-cmu",
+ .of_match_table = exynos7870_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos7870_cmu_probe,
+};
+
+static int __init exynos7870_cmu_init(void)
+{
+ return platform_driver_register(&exynos7870_cmu_driver);
+}
+core_initcall(exynos7870_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos7885.c b/drivers/clk/samsung/clk-exynos7885.c
index fc42251731ed..ba7cf79bc300 100644
--- a/drivers/clk/samsung/clk-exynos7885.c
+++ b/drivers/clk/samsung/clk-exynos7885.c
@@ -6,8 +6,8 @@
* Common Clock Framework support for Exynos7885 SoC.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index e00e213b1201..cf7e08cca78e 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -6,8 +6,8 @@
* Common Clock Framework support for Exynos850 SoC.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/samsung/clk-exynos8895.c b/drivers/clk/samsung/clk-exynos8895.c
index 29ec0c4a8635..e6980a8f026f 100644
--- a/drivers/clk/samsung/clk-exynos8895.c
+++ b/drivers/clk/samsung/clk-exynos8895.c
@@ -6,8 +6,8 @@
* Common Clock Framework support for Exynos8895 SoC.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/samsung/clk-exynos990.c b/drivers/clk/samsung/clk-exynos990.c
index 8e2a2e8eccee..8d3f193d2b4d 100644
--- a/drivers/clk/samsung/clk-exynos990.c
+++ b/drivers/clk/samsung/clk-exynos990.c
@@ -5,8 +5,8 @@
* Common Clock Framework support for Exynos990.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -19,6 +19,7 @@
/* NOTE: Must be equal to the last clock ID increased by one */
#define CLKS_NR_TOP (CLK_GOUT_CMU_VRA_BUS + 1)
#define CLKS_NR_HSI0 (CLK_GOUT_HSI0_XIU_D_HSI0_ACLK + 1)
+#define CLKS_NR_PERIS (CLK_GOUT_PERIS_OTP_CON_TOP_OSCCLK + 1)
/* ---- CMU_TOP ------------------------------------------------------------- */
@@ -449,7 +450,7 @@ static const struct samsung_pll_clock top_pll_clks[] __initconst = {
PLL_LOCKTIME_PLL_G3D, PLL_CON3_PLL_G3D, NULL),
};
-/* Parent clock list for CMU_TOP muxes*/
+/* Parent clock list for CMU_TOP muxes */
PNAME(mout_pll_shared0_p) = { "oscclk", "fout_shared0_pll" };
PNAME(mout_pll_shared1_p) = { "oscclk", "fout_shared1_pll" };
PNAME(mout_pll_shared2_p) = { "oscclk", "fout_shared2_pll" };
@@ -1192,6 +1193,7 @@ static const unsigned long hsi0_clk_regs[] __initconst = {
CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
};
+/* Parent clock list for CMU_HSI0 muxes */
PNAME(mout_hsi0_bus_user_p) = { "oscclk", "dout_cmu_hsi0_bus" };
PNAME(mout_hsi0_usb31drd_user_p) = { "oscclk", "dout_cmu_hsi0_usb31drd" };
PNAME(mout_hsi0_usbdp_debug_user_p) = { "oscclk",
@@ -1305,6 +1307,182 @@ static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
.clk_name = "bus",
};
+/* ---- CMU_PERIS ----------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIS (0x10020000) */
+#define PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_PERIS_BUS_USER 0x0604
+#define CLK_CON_MUX_MUX_CLK_PERIS_GIC 0x1000
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER2_IPCLKPORT_PCLK 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK 0x2048
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK 0x2034
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_OSCCLK_IPCLKPORT_CLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_D_TZPC_PERIS_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TMU_SUB_IPCLKPORT_PCLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TMU_TOP_IPCLKPORT_PCLK 0x2044
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_I_OSCCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_I_OSCCLK 0x2008
+#define QCH_CON_D_TZPC_PERIS_QCH 0x3004
+#define QCH_CON_GIC_QCH 0x3008
+#define QCH_CON_LHM_AXI_P_PERIS_QCH 0x300c
+#define QCH_CON_MCT_QCH 0x3010
+#define QCH_CON_OTP_CON_BIRA_QCH 0x3014
+#define QCH_CON_OTP_CON_TOP_QCH 0x301c
+#define QCH_CON_PERIS_CMU_PERIS_QCH 0x3020
+#define QCH_CON_SYSREG_PERIS_QCH 0x3024
+#define QCH_CON_TMU_SUB_QCH 0x3028
+#define QCH_CON_TMU_TOP_QCH 0x302c
+#define QCH_CON_WDT_CLUSTER0_QCH 0x3030
+#define QCH_CON_WDT_CLUSTER2_QCH 0x3034
+
+static const unsigned long peris_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_PERIS_BUS_USER,
+ CLK_CON_MUX_MUX_CLK_PERIS_GIC,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER2_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_D_TZPC_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TMU_SUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TMU_TOP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_I_OSCCLK,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_I_OSCCLK,
+ QCH_CON_D_TZPC_PERIS_QCH,
+ QCH_CON_GIC_QCH,
+ QCH_CON_LHM_AXI_P_PERIS_QCH,
+ QCH_CON_MCT_QCH,
+ QCH_CON_OTP_CON_BIRA_QCH,
+ QCH_CON_OTP_CON_TOP_QCH,
+ QCH_CON_PERIS_CMU_PERIS_QCH,
+ QCH_CON_SYSREG_PERIS_QCH,
+ QCH_CON_TMU_SUB_QCH,
+ QCH_CON_TMU_TOP_QCH,
+ QCH_CON_WDT_CLUSTER0_QCH,
+ QCH_CON_WDT_CLUSTER2_QCH,
+};
+
+/* Parent clock list for CMU_PERIS muxes */
+PNAME(mout_peris_bus_user_p) = { "oscclk", "mout_cmu_peris_bus" };
+PNAME(mout_peris_clk_peris_gic_p) = { "oscclk", "mout_peris_bus_user" };
+
+static const struct samsung_mux_clock peris_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIS_BUS_USER, "mout_peris_bus_user",
+ mout_peris_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIS_CLK_PERIS_GIC, "mout_peris_clk_peris_gic",
+ mout_peris_clk_peris_gic_p, CLK_CON_MUX_MUX_CLK_PERIS_GIC,
+ 4, 1),
+};
+
+static const struct samsung_gate_clock peris_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIS_SYSREG_PERIS_PCLK,
+ "gout_peris_sysreg_peris_pclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_WDT_CLUSTER2_PCLK,
+ "gout_peris_wdt_cluster2_pclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER2_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_WDT_CLUSTER0_PCLK,
+ "gout_peris_wdt_cluster0_pclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_CLK_PERIS_PERIS_CMU_PERIS_PCLK,
+ "clk_peris_peris_cmu_peris_pclk", "mout_peris_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIS_CLK_PERIS_BUSP_CLK,
+ "gout_peris_clk_peris_busp_clk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_CLK_PERIS_OSCCLK_CLK,
+ "gout_peris_clk_peris_oscclk_clk", "mout_peris_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_CLK_PERIS_GIC_CLK,
+ "gout_peris_clk_peris_gic_clk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKM,
+ "gout_peris_ad_axi_p_peris_aclkm", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIS_OTP_CON_BIRA_PCLK,
+ "gout_peris_otp_con_bira_pclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_GIC_CLK,
+ "gout_peris_gic_clk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_LHM_AXI_P_PERIS_CLK,
+ "gout_peris_lhm_axi_p_peris_clk", "oscclk",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIS_MCT_PCLK,
+ "gout_peris_mct_pclk", "mout_peris_clk_peris_gic",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_OTP_CON_TOP_PCLK,
+ "gout_peris_otp_con_top_pclk", "mout_peris_clk_peris_gic",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_D_TZPC_PERIS_PCLK,
+ "gout_peris_d_tzpc_peris_pclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_D_TZPC_PERIS_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TMU_TOP_PCLK,
+ "gout_peris_tmu_top_pclk", "mout_peris_clk_peris_gic",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TMU_TOP_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_OTP_CON_BIRA_OSCCLK,
+ "gout_peris_otp_con_bira_oscclk", "oscclk",
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_I_OSCCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_OTP_CON_TOP_OSCCLK,
+ "gout_peris_otp_con_top_oscclk", "oscclk",
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_I_OSCCLK,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info peris_cmu_info __initconst = {
+ .mux_clks = peris_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peris_mux_clks),
+ .gate_clks = peris_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peris_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIS,
+ .clk_regs = peris_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peris_clk_regs),
+};
+
+static void __init exynos990_cmu_peris_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &peris_cmu_info);
+}
+
+/* Register CMU_PERIS early, as it's a dependency for the MCT. */
+CLK_OF_DECLARE(exynos990_cmu_peris, "samsung,exynos990-cmu-peris",
+ exynos990_cmu_peris_init);
+
/* ----- platform_driver ----- */
static int __init exynos990_cmu_probe(struct platform_device *pdev)
diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c
index 5971e680e566..e4d7c7b96aa8 100644
--- a/drivers/clk/samsung/clk-exynosautov9.c
+++ b/drivers/clk/samsung/clk-exynosautov9.c
@@ -6,8 +6,8 @@
* Common Clock Framework support for ExynosAuto V9 SoC.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c
index 2a8bfd5d9abc..dc8d4240f6de 100644
--- a/drivers/clk/samsung/clk-exynosautov920.c
+++ b/drivers/clk/samsung/clk-exynosautov920.c
@@ -6,8 +6,8 @@
* Common Clock Framework support for ExynosAuto v920 SoC.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c
index 9a6006c298c2..594931334574 100644
--- a/drivers/clk/samsung/clk-fsd.c
+++ b/drivers/clk/samsung/clk-fsd.c
@@ -8,10 +8,10 @@
* Common Clock Framework support for FSD SoC.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index 86b39edba122..f9c3d68d449c 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -6,8 +6,8 @@
* Common Clock Framework support for GS101.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -382,17 +382,9 @@ static const unsigned long cmu_top_clk_regs[] __initconst = {
EARLY_WAKEUP_DPU_DEST,
EARLY_WAKEUP_CSIS_DEST,
EARLY_WAKEUP_SW_TRIG_APM,
- EARLY_WAKEUP_SW_TRIG_APM_SET,
- EARLY_WAKEUP_SW_TRIG_APM_CLEAR,
EARLY_WAKEUP_SW_TRIG_CLUSTER0,
- EARLY_WAKEUP_SW_TRIG_CLUSTER0_SET,
- EARLY_WAKEUP_SW_TRIG_CLUSTER0_CLEAR,
EARLY_WAKEUP_SW_TRIG_DPU,
- EARLY_WAKEUP_SW_TRIG_DPU_SET,
- EARLY_WAKEUP_SW_TRIG_DPU_CLEAR,
EARLY_WAKEUP_SW_TRIG_CSIS,
- EARLY_WAKEUP_SW_TRIG_CSIS_SET,
- EARLY_WAKEUP_SW_TRIG_CSIS_CLEAR,
CLK_CON_MUX_MUX_CLKCMU_BO_BUS,
CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS,
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 2e94bba6c396..fe8abe442c51 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -206,6 +206,7 @@ static const struct clk_ops samsung_pll3000_clk_ops = {
*/
/* Maximum lock time can be 270 * PDIV cycles */
#define PLL35XX_LOCK_FACTOR (270)
+#define PLL142XX_LOCK_FACTOR (150)
#define PLL35XX_MDIV_MASK (0x3FF)
#define PLL35XX_PDIV_MASK (0x3F)
@@ -272,7 +273,11 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
}
/* Set PLL lock time. */
- writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR,
+ if (pll->type == pll_142xx)
+ writel_relaxed(rate->pdiv * PLL142XX_LOCK_FACTOR,
+ pll->lock_reg);
+ else
+ writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR,
pll->lock_reg);
/* Change PLL PMS values */
@@ -1460,6 +1465,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
init.ops = &samsung_pll2650xx_clk_ops;
break;
case pll_531x:
+ case pll_4311:
init.ops = &samsung_pll531x_clk_ops;
break;
default:
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index 6ddc54d173a0..e9a5f8e0e0a3 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -48,6 +48,7 @@ enum samsung_pll_type {
pll_0717x,
pll_0718x,
pll_0732x,
+ pll_4311,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index e2ec8fe32e39..397a057af5d1 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -8,7 +8,6 @@
#include <linux/slab.h>
#include <linux/clk-provider.h>
#include <linux/clk/samsung.h>
-#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/samsung,s3c64xx-clock.h>
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
index d19a3d9fd452..b1fd8fac3a4c 100644
--- a/drivers/clk/samsung/clk-s5pv210-audss.c
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
#include <linux/init.h>
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
index cd85342e4ddb..9a4217cc1908 100644
--- a/drivers/clk/samsung/clk-s5pv210.c
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -9,7 +9,6 @@
*/
#include <linux/clk-provider.h>
-#include <linux/of.h>
#include <linux/of_address.h>
#include "clk.h"
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 283c523763e6..dbc9925ca8f4 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -10,9 +10,9 @@
#include <linux/slab.h>
#include <linux/clkdev.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
@@ -74,12 +74,12 @@ struct samsung_clk_provider * __init samsung_clk_init(struct device *dev,
if (!ctx)
panic("could not allocate clock provider context.\n");
+ ctx->clk_data.num = nr_clks;
for (i = 0; i < nr_clks; ++i)
ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
ctx->dev = dev;
ctx->reg_base = base;
- ctx->clk_data.num = nr_clks;
spin_lock_init(&ctx->lock);
return ctx;
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index fb06caa71f0a..18660c1ac6f0 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -11,6 +11,7 @@
#define __SAMSUNG_CLK_H
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include "clk-pll.h"
#include "clk-cpu.h"
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index b547198a2c65..5830a9d87bf2 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -52,6 +52,16 @@ config SUN50I_H6_R_CCU
default y
depends on ARM64 || COMPILE_TEST
+config SUN55I_A523_CCU
+ tristate "Support for the Allwinner A523/T527 CCU"
+ default y
+ depends on ARM64 || COMPILE_TEST
+
+config SUN55I_A523_R_CCU
+ tristate "Support for the Allwinner A523/T527 PRCM CCU"
+ default y
+ depends on ARM64 || COMPILE_TEST
+
config SUN4I_A10_CCU
tristate "Support for the Allwinner A10/A20 CCU"
default y
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 6b3ae2b620db..82e471036de6 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -33,6 +33,8 @@ obj-$(CONFIG_SUN50I_A100_R_CCU) += sun50i-a100-r-ccu.o
obj-$(CONFIG_SUN50I_H6_CCU) += sun50i-h6-ccu.o
obj-$(CONFIG_SUN50I_H6_R_CCU) += sun50i-h6-r-ccu.o
obj-$(CONFIG_SUN50I_H616_CCU) += sun50i-h616-ccu.o
+obj-$(CONFIG_SUN55I_A523_CCU) += sun55i-a523-ccu.o
+obj-$(CONFIG_SUN55I_A523_R_CCU) += sun55i-a523-r-ccu.o
obj-$(CONFIG_SUN4I_A10_CCU) += sun4i-a10-ccu.o
obj-$(CONFIG_SUN5I_CCU) += sun5i-ccu.o
obj-$(CONFIG_SUN6I_A31_CCU) += sun6i-a31-ccu.o
@@ -58,6 +60,8 @@ sun50i-a100-r-ccu-y += ccu-sun50i-a100-r.o
sun50i-h6-ccu-y += ccu-sun50i-h6.o
sun50i-h6-r-ccu-y += ccu-sun50i-h6-r.o
sun50i-h616-ccu-y += ccu-sun50i-h616.o
+sun55i-a523-ccu-y += ccu-sun55i-a523.o
+sun55i-a523-r-ccu-y += ccu-sun55i-a523-r.o
sun4i-a10-ccu-y += ccu-sun4i-a10.o
sun5i-ccu-y += ccu-sun5i.o
sun6i-a31-ccu-y += ccu-sun6i-a31.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index 190816c35da9..daa462c7d477 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -328,10 +328,16 @@ static SUNXI_CCU_M_WITH_MUX_GATE(gpu0_clk, "gpu0", gpu0_parents, 0x670,
24, 1, /* mux */
BIT(31), /* gate */
CLK_SET_RATE_PARENT);
+
+/*
+ * This clk is needed as a temporary fall back during GPU PLL freq changes.
+ * Set CLK_IS_CRITICAL flag to prevent from being disabled.
+ */
+#define SUN50I_H616_GPU_CLK1_REG 0x674
static SUNXI_CCU_M_WITH_GATE(gpu1_clk, "gpu1", "pll-periph0-2x", 0x674,
0, 2, /* M */
BIT(31),/* gate */
- 0);
+ CLK_IS_CRITICAL);
static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "psi-ahb1-ahb2",
0x67c, BIT(0), 0);
@@ -645,6 +651,20 @@ static const char * const tcon_tv_parents[] = { "pll-video0",
"pll-video0-4x",
"pll-video1",
"pll-video1-4x" };
+static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd0_clk, "tcon-lcd0",
+ tcon_tv_parents, 0xb60,
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd1_clk, "tcon-lcd1",
+ tcon_tv_parents, 0xb64,
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_GATE(bus_tcon_lcd0_clk, "bus-tcon-lcd0", "ahb3",
+ 0xb7c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_tcon_lcd1_clk, "bus-tcon-lcd1", "ahb3",
+ 0xb7c, BIT(1), 0);
static SUNXI_CCU_MP_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0",
tcon_tv_parents, 0xb80,
0, 4, /* M */
@@ -855,8 +875,12 @@ static struct ccu_common *sun50i_h616_ccu_clks[] = {
&hdmi_cec_clk.common,
&bus_hdmi_clk.common,
&bus_tcon_top_clk.common,
+ &tcon_lcd0_clk.common,
+ &tcon_lcd1_clk.common,
&tcon_tv0_clk.common,
&tcon_tv1_clk.common,
+ &bus_tcon_lcd0_clk.common,
+ &bus_tcon_lcd1_clk.common,
&bus_tcon_tv0_clk.common,
&bus_tcon_tv1_clk.common,
&tve0_clk.common,
@@ -989,8 +1013,12 @@ static struct clk_hw_onecell_data sun50i_h616_hw_clks = {
[CLK_HDMI_CEC] = &hdmi_cec_clk.common.hw,
[CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
[CLK_BUS_TCON_TOP] = &bus_tcon_top_clk.common.hw,
+ [CLK_TCON_LCD0] = &tcon_lcd0_clk.common.hw,
+ [CLK_TCON_LCD1] = &tcon_lcd1_clk.common.hw,
[CLK_TCON_TV0] = &tcon_tv0_clk.common.hw,
[CLK_TCON_TV1] = &tcon_tv1_clk.common.hw,
+ [CLK_BUS_TCON_LCD0] = &bus_tcon_lcd0_clk.common.hw,
+ [CLK_BUS_TCON_LCD1] = &bus_tcon_lcd1_clk.common.hw,
[CLK_BUS_TCON_TV0] = &bus_tcon_tv0_clk.common.hw,
[CLK_BUS_TCON_TV1] = &bus_tcon_tv1_clk.common.hw,
[CLK_TVE0] = &tve0_clk.common.hw,
@@ -1062,6 +1090,8 @@ static const struct ccu_reset_map sun50i_h616_ccu_resets[] = {
[RST_BUS_HDMI] = { 0xb1c, BIT(16) },
[RST_BUS_HDMI_SUB] = { 0xb1c, BIT(17) },
[RST_BUS_TCON_TOP] = { 0xb5c, BIT(16) },
+ [RST_BUS_TCON_LCD0] = { 0xb7c, BIT(16) },
+ [RST_BUS_TCON_LCD1] = { 0xb7c, BIT(17) },
[RST_BUS_TCON_TV0] = { 0xb9c, BIT(16) },
[RST_BUS_TCON_TV1] = { 0xb9c, BIT(17) },
[RST_BUS_TVE_TOP] = { 0xbbc, BIT(16) },
@@ -1120,6 +1150,19 @@ static struct ccu_pll_nb sun50i_h616_pll_cpu_nb = {
.lock = BIT(28),
};
+static struct ccu_mux_nb sun50i_h616_gpu_nb = {
+ .common = &gpu0_clk.common,
+ .cm = &gpu0_clk.mux,
+ .delay_us = 1, /* manual doesn't really say */
+ .bypass_index = 1, /* GPU_CLK1@400MHz */
+};
+
+static struct ccu_pll_nb sun50i_h616_pll_gpu_nb = {
+ .common = &pll_gpu_clk.common,
+ .enable = BIT(29), /* LOCK_ENABLE */
+ .lock = BIT(28),
+};
+
static int sun50i_h616_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
@@ -1171,6 +1214,14 @@ static int sun50i_h616_ccu_probe(struct platform_device *pdev)
writel(val, reg + SUN50I_H616_PLL_AUDIO_REG);
/*
+ * Set the input-divider for the gpu1 clock to 3, to reach a safe 400 MHz.
+ */
+ val = readl(reg + SUN50I_H616_GPU_CLK1_REG);
+ val &= ~GENMASK(1, 0);
+ val |= 2;
+ writel(val, reg + SUN50I_H616_GPU_CLK1_REG);
+
+ /*
* First clock parent (osc32K) is unusable for CEC. But since there
* is no good way to force parent switch (both run with same frequency),
* just set second clock parent here.
@@ -1190,6 +1241,13 @@ static int sun50i_h616_ccu_probe(struct platform_device *pdev)
/* Re-lock the CPU PLL after any rate changes */
ccu_pll_notifier_register(&sun50i_h616_pll_cpu_nb);
+ /* Reparent GPU during GPU PLL rate changes */
+ ccu_mux_notifier_register(pll_gpu_clk.common.hw.clk,
+ &sun50i_h616_gpu_nb);
+
+ /* Re-lock the GPU PLL after any rate changes */
+ ccu_pll_notifier_register(&sun50i_h616_pll_gpu_nb);
+
return 0;
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.h b/drivers/clk/sunxi-ng/ccu-sun50i-h616.h
index a75803b49f6a..7056f293a8e0 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.h
@@ -51,6 +51,6 @@
#define CLK_BUS_DRAM 56
-#define CLK_NUMBER (CLK_BUS_GPADC + 1)
+#define CLK_NUMBER (CLK_BUS_TCON_LCD1 + 1)
#endif /* _CCU_SUN50I_H616_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
new file mode 100644
index 000000000000..b5464d8083c8
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ * Based on the D1 CCU driver:
+ * Copyright (c) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+
+#include "ccu-sun55i-a523-r.h"
+
+static const struct clk_parent_data r_ahb_apb_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .fw_name = "pll-periph" },
+ { .fw_name = "pll-audio" },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX(r_ahb_clk, "r-ahb",
+ r_ahb_apb_parents, 0x000,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ 0);
+
+static SUNXI_CCU_M_DATA_WITH_MUX(r_apb0_clk, "r-apb0",
+ r_ahb_apb_parents, 0x00c,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ 0);
+
+static SUNXI_CCU_M_DATA_WITH_MUX(r_apb1_clk, "r-apb1",
+ r_ahb_apb_parents, 0x010,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(r_cpu_timer0, "r-timer0",
+ r_ahb_apb_parents, 0x100,
+ 0, 0, /* no M */
+ 1, 3, /* P */
+ 4, 3, /* mux */
+ BIT(0),
+ 0);
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(r_cpu_timer1, "r-timer1",
+ r_ahb_apb_parents, 0x104,
+ 0, 0, /* no M */
+ 1, 3, /* P */
+ 4, 3, /* mux */
+ BIT(0),
+ 0);
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(r_cpu_timer2, "r-timer2",
+ r_ahb_apb_parents, 0x108,
+ 0, 0, /* no M */
+ 1, 3, /* P */
+ 4, 3, /* mux */
+ BIT(0),
+ 0);
+
+static SUNXI_CCU_GATE_HW(bus_r_timer_clk, "bus-r-timer", &r_ahb_clk.common.hw,
+ 0x11c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_twd_clk, "bus-r-twd", &r_apb0_clk.common.hw,
+ 0x12c, BIT(0), 0);
+
+static const struct clk_parent_data r_pwmctrl_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+};
+static SUNXI_CCU_MUX_DATA_WITH_GATE(r_pwmctrl_clk, "r-pwmctrl",
+ r_pwmctrl_parents, 0x130,
+ 24, 2, /* mux */
+ BIT(31),
+ 0);
+static SUNXI_CCU_GATE_HW(bus_r_pwmctrl_clk, "bus-r-pwmctrl",
+ &r_apb0_clk.common.hw, 0x13c, BIT(0), 0);
+
+/* SPI clock is /M/N (same as new MMC?) */
+static SUNXI_CCU_GATE_HW(bus_r_spi_clk, "bus-r-spi",
+ &r_ahb_clk.common.hw, 0x15c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_spinlock_clk, "bus-r-spinlock",
+ &r_ahb_clk.common.hw, 0x16c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_msgbox_clk, "bus-r-msgbox",
+ &r_ahb_clk.common.hw, 0x17c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_uart0_clk, "bus-r-uart0",
+ &r_apb1_clk.common.hw, 0x18c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_uart1_clk, "bus-r-uart1",
+ &r_apb1_clk.common.hw, 0x18c, BIT(1), 0);
+static SUNXI_CCU_GATE_HW(bus_r_i2c0_clk, "bus-r-i2c0",
+ &r_apb1_clk.common.hw, 0x19c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_i2c1_clk, "bus-r-i2c1",
+ &r_apb1_clk.common.hw, 0x19c, BIT(1), 0);
+static SUNXI_CCU_GATE_HW(bus_r_i2c2_clk, "bus-r-i2c2",
+ &r_apb1_clk.common.hw, 0x19c, BIT(2), 0);
+static SUNXI_CCU_GATE_HW(bus_r_ppu0_clk, "bus-r-ppu0",
+ &r_apb0_clk.common.hw, 0x1ac, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_ppu1_clk, "bus-r-ppu1",
+ &r_apb0_clk.common.hw, 0x1ac, BIT(1), 0);
+static SUNXI_CCU_GATE_HW(bus_r_cpu_bist_clk, "bus-r-cpu-bist",
+ &r_apb0_clk.common.hw, 0x1bc, BIT(0), 0);
+
+static const struct clk_parent_data r_ir_rx_parents[] = {
+ { .fw_name = "losc" },
+ { .fw_name = "hosc" },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(r_ir_rx_clk, "r-ir-rx",
+ r_ir_rx_parents, 0x1c0,
+ 0, 5, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_GATE_HW(bus_r_ir_rx_clk, "bus-r-ir-rx",
+ &r_apb0_clk.common.hw, 0x1cc, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HW(bus_r_dma_clk, "bus-r-dma",
+ &r_apb0_clk.common.hw, 0x1dc, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_rtc_clk, "bus-r-rtc",
+ &r_apb0_clk.common.hw, 0x20c, BIT(0), 0);
+static SUNXI_CCU_GATE_HW(bus_r_cpucfg_clk, "bus-r-cpucfg",
+ &r_apb0_clk.common.hw, 0x22c, BIT(0), 0);
+
+static struct ccu_common *sun55i_a523_r_ccu_clks[] = {
+ &r_ahb_clk.common,
+ &r_apb0_clk.common,
+ &r_apb1_clk.common,
+ &r_cpu_timer0.common,
+ &r_cpu_timer1.common,
+ &r_cpu_timer2.common,
+ &bus_r_timer_clk.common,
+ &bus_r_twd_clk.common,
+ &r_pwmctrl_clk.common,
+ &bus_r_pwmctrl_clk.common,
+ &bus_r_spi_clk.common,
+ &bus_r_spinlock_clk.common,
+ &bus_r_msgbox_clk.common,
+ &bus_r_uart0_clk.common,
+ &bus_r_uart1_clk.common,
+ &bus_r_i2c0_clk.common,
+ &bus_r_i2c1_clk.common,
+ &bus_r_i2c2_clk.common,
+ &bus_r_ppu0_clk.common,
+ &bus_r_ppu1_clk.common,
+ &bus_r_cpu_bist_clk.common,
+ &r_ir_rx_clk.common,
+ &bus_r_ir_rx_clk.common,
+ &bus_r_dma_clk.common,
+ &bus_r_rtc_clk.common,
+ &bus_r_cpucfg_clk.common,
+};
+
+static struct clk_hw_onecell_data sun55i_a523_r_hw_clks = {
+ .num = CLK_NUMBER,
+ .hws = {
+ [CLK_R_AHB] = &r_ahb_clk.common.hw,
+ [CLK_R_APB0] = &r_apb0_clk.common.hw,
+ [CLK_R_APB1] = &r_apb1_clk.common.hw,
+ [CLK_R_TIMER0] = &r_cpu_timer0.common.hw,
+ [CLK_R_TIMER1] = &r_cpu_timer1.common.hw,
+ [CLK_R_TIMER2] = &r_cpu_timer2.common.hw,
+ [CLK_BUS_R_TIMER] = &bus_r_timer_clk.common.hw,
+ [CLK_BUS_R_TWD] = &bus_r_twd_clk.common.hw,
+ [CLK_R_PWMCTRL] = &r_pwmctrl_clk.common.hw,
+ [CLK_BUS_R_PWMCTRL] = &bus_r_pwmctrl_clk.common.hw,
+ [CLK_BUS_R_SPI] = &bus_r_spi_clk.common.hw,
+ [CLK_BUS_R_SPINLOCK] = &bus_r_spinlock_clk.common.hw,
+ [CLK_BUS_R_MSGBOX] = &bus_r_msgbox_clk.common.hw,
+ [CLK_BUS_R_UART0] = &bus_r_uart0_clk.common.hw,
+ [CLK_BUS_R_UART1] = &bus_r_uart1_clk.common.hw,
+ [CLK_BUS_R_I2C0] = &bus_r_i2c0_clk.common.hw,
+ [CLK_BUS_R_I2C1] = &bus_r_i2c1_clk.common.hw,
+ [CLK_BUS_R_I2C2] = &bus_r_i2c2_clk.common.hw,
+ [CLK_BUS_R_PPU0] = &bus_r_ppu0_clk.common.hw,
+ [CLK_BUS_R_PPU1] = &bus_r_ppu1_clk.common.hw,
+ [CLK_BUS_R_CPU_BIST] = &bus_r_cpu_bist_clk.common.hw,
+ [CLK_R_IR_RX] = &r_ir_rx_clk.common.hw,
+ [CLK_BUS_R_IR_RX] = &bus_r_ir_rx_clk.common.hw,
+ [CLK_BUS_R_DMA] = &bus_r_dma_clk.common.hw,
+ [CLK_BUS_R_RTC] = &bus_r_rtc_clk.common.hw,
+ [CLK_BUS_R_CPUCFG] = &bus_r_cpucfg_clk.common.hw,
+ },
+};
+
+static struct ccu_reset_map sun55i_a523_r_ccu_resets[] = {
+ [RST_BUS_R_TIMER] = { 0x11c, BIT(16) },
+ [RST_BUS_R_TWD] = { 0x12c, BIT(16) },
+ [RST_BUS_R_PWMCTRL] = { 0x13c, BIT(16) },
+ [RST_BUS_R_SPI] = { 0x15c, BIT(16) },
+ [RST_BUS_R_SPINLOCK] = { 0x16c, BIT(16) },
+ [RST_BUS_R_MSGBOX] = { 0x17c, BIT(16) },
+ [RST_BUS_R_UART0] = { 0x18c, BIT(16) },
+ [RST_BUS_R_UART1] = { 0x18c, BIT(17) },
+ [RST_BUS_R_I2C0] = { 0x19c, BIT(16) },
+ [RST_BUS_R_I2C1] = { 0x19c, BIT(17) },
+ [RST_BUS_R_I2C2] = { 0x19c, BIT(18) },
+ [RST_BUS_R_PPU1] = { 0x1ac, BIT(17) },
+ [RST_BUS_R_IR_RX] = { 0x1cc, BIT(16) },
+ [RST_BUS_R_RTC] = { 0x20c, BIT(16) },
+ [RST_BUS_R_CPUCFG] = { 0x22c, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun55i_a523_r_ccu_desc = {
+ .ccu_clks = sun55i_a523_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun55i_a523_r_ccu_clks),
+
+ .hw_clks = &sun55i_a523_r_hw_clks,
+
+ .resets = sun55i_a523_r_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun55i_a523_r_ccu_resets),
+};
+
+static int sun55i_a523_r_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun55i_a523_r_ccu_desc);
+}
+
+static const struct of_device_id sun55i_a523_r_ccu_ids[] = {
+ { .compatible = "allwinner,sun55i-a523-r-ccu" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun55i_a523_r_ccu_ids);
+
+static struct platform_driver sun55i_a523_r_ccu_driver = {
+ .probe = sun55i_a523_r_ccu_probe,
+ .driver = {
+ .name = "sun55i-a523-r-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun55i_a523_r_ccu_ids,
+ },
+};
+module_platform_driver(sun55i_a523_r_ccu_driver);
+
+MODULE_IMPORT_NS("SUNXI_CCU");
+MODULE_DESCRIPTION("Support for the Allwinner A523 PRCM CCU");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.h b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.h
new file mode 100644
index 000000000000..d50f46ac4f3f
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2024 Arm Ltd.
+ */
+
+#ifndef _CCU_SUN55I_A523_R_H
+#define _CCU_SUN55I_A523_R_H
+
+#include <dt-bindings/clock/sun55i-a523-r-ccu.h>
+#include <dt-bindings/reset/sun55i-a523-r-ccu.h>
+
+#define CLK_NUMBER (CLK_BUS_R_CPUCFG + 1)
+
+#endif /* _CCU_SUN55I_A523_R_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
new file mode 100644
index 000000000000..9efb9fd24b42
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
@@ -0,0 +1,1685 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024 Arm Ltd.
+ * Based on the D1 CCU driver:
+ * Copyright (c) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "../clk.h"
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+
+#include "ccu-sun55i-a523.h"
+
+/*
+ * The 24 MHz oscillator, the root of most of the clock tree.
+ * .fw_name is the string used in the DT "clock-names" property, used to
+ * identify the corresponding clock in the "clocks" property.
+ */
+static const struct clk_parent_data osc24M[] = {
+ { .fw_name = "hosc" }
+};
+
+/**************************************************************************
+ * PLLs *
+ **************************************************************************/
+
+/* Some PLLs are input * N / div1 / P. Model them as NKMP with no K */
+#define SUN55I_A523_PLL_DDR0_REG 0x010
+static struct ccu_nkmp pll_ddr_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x010,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-ddr0", osc24M,
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_GATE |
+ CLK_IS_CRITICAL),
+ },
+};
+
+/*
+ * There is no actual clock output with that frequency (2.4 GHz), instead it
+ * has multiple outputs with adjustable dividers from that base frequency.
+ * Model them separately as divider clocks based on that parent here.
+ */
+#define SUN55I_A523_PLL_PERIPH0_REG 0x020
+static struct ccu_nm pll_periph0_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x020,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-periph0-4x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+/*
+ * Most clock-defining macros expect an *array* of parent clocks, even if
+ * they do not contain a muxer to select between different parents.
+ * The macros ending in just _HW take a simple clock pointer, but then create
+ * a single-entry array out of that. The macros using _HWS take such an
+ * array (even when it is a single entry one), this avoids having those
+ * helper arrays created inside *every* clock definition.
+ * This means for every clock that is referenced more than once it is
+ * useful to create such a dummy array and use _HWS.
+ */
+static const struct clk_hw *pll_periph0_4x_hws[] = {
+ &pll_periph0_4x_clk.common.hw
+};
+
+static SUNXI_CCU_M_HWS(pll_periph0_2x_clk, "pll-periph0-2x",
+ pll_periph0_4x_hws, 0x020, 16, 3, 0);
+static const struct clk_hw *pll_periph0_2x_hws[] = {
+ &pll_periph0_2x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_periph0_800M_clk, "pll-periph0-800M",
+ pll_periph0_4x_hws, 0x020, 20, 3, 0);
+static SUNXI_CCU_M_HWS(pll_periph0_480M_clk, "pll-periph0-480M",
+ pll_periph0_4x_hws, 0x020, 2, 3, 0);
+static const struct clk_hw *pll_periph0_480M_hws[] = {
+ &pll_periph0_480M_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_periph0_600M_clk, "pll-periph0-600M",
+ pll_periph0_2x_hws, 2, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph0_400M_clk, "pll-periph0-400M",
+ pll_periph0_2x_hws, 3, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph0_300M_clk, "pll-periph0-300M",
+ pll_periph0_2x_hws, 4, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph0_200M_clk, "pll-periph0-200M",
+ pll_periph0_2x_hws, 6, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph0_150M_clk, "pll-periph0-150M",
+ pll_periph0_2x_hws, 8, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph0_160M_clk, "pll-periph0-160M",
+ pll_periph0_480M_hws, 3, 1, 0);
+static const struct clk_hw *pll_periph0_150M_hws[] = {
+ &pll_periph0_150M_clk.hw
+};
+
+#define SUN55I_A523_PLL_PERIPH1_REG 0x028
+static struct ccu_nm pll_periph1_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x028,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-periph1-4x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+static const struct clk_hw *pll_periph1_4x_hws[] = {
+ &pll_periph1_4x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_periph1_2x_clk, "pll-periph1-2x",
+ pll_periph1_4x_hws, 0x028, 16, 3, 0);
+static SUNXI_CCU_M_HWS(pll_periph1_800M_clk, "pll-periph1-800M",
+ pll_periph1_4x_hws, 0x028, 20, 3, 0);
+static SUNXI_CCU_M_HWS(pll_periph1_480M_clk, "pll-periph1-480M",
+ pll_periph1_4x_hws, 0x028, 2, 3, 0);
+
+static const struct clk_hw *pll_periph1_2x_hws[] = {
+ &pll_periph1_2x_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_periph1_600M_clk, "pll-periph1-600M",
+ pll_periph1_2x_hws, 2, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph1_400M_clk, "pll-periph1-400M",
+ pll_periph1_2x_hws, 3, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph1_300M_clk, "pll-periph1-300M",
+ pll_periph1_2x_hws, 4, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph1_200M_clk, "pll-periph1-200M",
+ pll_periph1_2x_hws, 6, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_periph1_150M_clk, "pll-periph1-150M",
+ pll_periph1_2x_hws, 8, 1, 0);
+static const struct clk_hw *pll_periph1_480M_hws[] = {
+ &pll_periph1_480M_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_periph1_160M_clk, "pll-periph1-160M",
+ pll_periph1_480M_hws, 3, 1, 0);
+
+#define SUN55I_A523_PLL_GPU_REG 0x030
+static struct ccu_nkmp pll_gpu_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x030,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-gpu", osc24M,
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+#define SUN55I_A523_PLL_VIDEO0_REG 0x040
+static struct ccu_nm pll_video0_8x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x040,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video0-8x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+static const struct clk_hw *pll_video0_8x_hws[] = {
+ &pll_video0_8x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_video0_4x_clk, "pll-video0-4x",
+ pll_video0_8x_hws, 0x040, 0, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_video0_3x_clk, "pll-video0-3x",
+ pll_video0_8x_hws, 3, 1, CLK_SET_RATE_PARENT);
+
+#define SUN55I_A523_PLL_VIDEO1_REG 0x048
+static struct ccu_nm pll_video1_8x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x048,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video1-8x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+static const struct clk_hw *pll_video1_8x_hws[] = {
+ &pll_video1_8x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_video1_4x_clk, "pll-video1-4x",
+ pll_video1_8x_hws, 0x048, 0, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_video1_3x_clk, "pll-video1-3x",
+ pll_video1_8x_hws, 3, 1, CLK_SET_RATE_PARENT);
+
+#define SUN55I_A523_PLL_VIDEO2_REG 0x050
+static struct ccu_nm pll_video2_8x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x050,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video2-8x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+static const struct clk_hw *pll_video2_8x_hws[] = {
+ &pll_video2_8x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_video2_4x_clk, "pll-video2-4x",
+ pll_video2_8x_hws, 0x050, 0, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_video2_3x_clk, "pll-video2-3x",
+ pll_video2_8x_hws, 3, 1, CLK_SET_RATE_PARENT);
+
+#define SUN55I_A523_PLL_VE_REG 0x058
+static struct ccu_nkmp pll_ve_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x058,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-ve", osc24M,
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+#define SUN55I_A523_PLL_VIDEO3_REG 0x068
+static struct ccu_nm pll_video3_8x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x068,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video3-8x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+static const struct clk_hw *pll_video3_8x_hws[] = {
+ &pll_video3_8x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_video3_4x_clk, "pll-video3-4x",
+ pll_video3_8x_hws, 0x068, 0, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_video3_3x_clk, "pll-video3-3x",
+ pll_video3_8x_hws, 3, 1, CLK_SET_RATE_PARENT);
+
+/*
+ * PLL_AUDIO0 has m0, m1 dividers in addition to the usual N, M factors.
+ * Since we only need some fixed frequency from this PLL (22.5792MHz x 4 and
+ * 24.576MHz x 4), ignore those dividers and force both of them to 1 (encoded
+ * as 0), in the probe function below.
+ * The M factor must be an even number to produce a 50% duty cycle output.
+ */
+#define SUN55I_A523_PLL_AUDIO0_REG 0x078
+static struct ccu_sdm_setting pll_audio0_sdm_table[] = {
+ { .rate = 90316800, .pattern = 0xc000872b, .m = 20, .n = 75 },
+ { .rate = 98304000, .pattern = 0xc0004dd3, .m = 12, .n = 49 },
+
+};
+
+static struct ccu_nm pll_audio0_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(16, 6),
+ .sdm = _SUNXI_CCU_SDM(pll_audio0_sdm_table, BIT(24),
+ 0x178, BIT(31)),
+ .min_rate = 180000000U,
+ .max_rate = 3000000000U,
+ .common = {
+ .reg = 0x078,
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-audio0-4x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+static CLK_FIXED_FACTOR_HW(pll_audio0_2x_clk, "pll-audio0-2x",
+ &pll_audio0_4x_clk.common.hw, 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(pll_audio0_clk, "pll-audio0",
+ &pll_audio0_4x_clk.common.hw, 4, 1, 0);
+
+#define SUN55I_A523_PLL_NPU_REG 0x080
+static struct ccu_nm pll_npu_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x0080,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-npu-4x",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+static CLK_FIXED_FACTOR_HW(pll_npu_2x_clk, "pll-npu-2x",
+ &pll_npu_4x_clk.common.hw, 2, 1, CLK_SET_RATE_PARENT);
+
+static CLK_FIXED_FACTOR_HW(pll_npu_1x_clk, "pll-npu-1x",
+ &pll_npu_4x_clk.common.hw, 4, 1, 0);
+
+
+/**************************************************************************
+ * bus clocks *
+ **************************************************************************/
+
+static const struct clk_parent_data ahb_apb0_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_periph0_600M_clk.hw },
+};
+
+static SUNXI_CCU_M_DATA_WITH_MUX(ahb_clk, "ahb", ahb_apb0_parents, 0x510,
+ 0, 5, /* M */
+ 24, 2, /* mux */
+ 0);
+static const struct clk_hw *ahb_hws[] = { &ahb_clk.common.hw };
+
+static SUNXI_CCU_M_DATA_WITH_MUX(apb0_clk, "apb0", ahb_apb0_parents, 0x520,
+ 0, 5, /* M */
+ 24, 2, /* mux */
+ 0);
+static const struct clk_hw *apb0_hws[] = { &apb0_clk.common.hw };
+
+static const struct clk_parent_data apb1_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_periph0_600M_clk.hw },
+ { .hw = &pll_periph0_480M_clk.common.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX(apb1_clk, "apb1", apb1_parents, 0x524,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ 0);
+static const struct clk_hw *apb1_hws[] = { &apb1_clk.common.hw };
+
+static const struct clk_parent_data mbus_parents[] = {
+ { .hw = &pll_ddr_clk.common.hw },
+ { .hw = &pll_periph1_600M_clk.hw },
+ { .hw = &pll_periph1_480M_clk.common.hw },
+ { .hw = &pll_periph1_400M_clk.hw },
+ { .hw = &pll_periph1_150M_clk.hw },
+ { .fw_name = "hosc" },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(mbus_clk, "mbus", mbus_parents,
+ 0x540,
+ 0, 5, /* M */
+ 0, 0, /* no P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0, CCU_FEATURE_UPDATE_BIT);
+
+static const struct clk_hw *mbus_hws[] = { &mbus_clk.common.hw };
+
+/**************************************************************************
+ * mod clocks with gates *
+ **************************************************************************/
+
+static const struct clk_hw *de_parents[] = {
+ &pll_periph0_300M_clk.hw,
+ &pll_periph0_400M_clk.hw,
+ &pll_video3_4x_clk.common.hw,
+ &pll_video3_3x_clk.hw,
+};
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(de_clk, "de", de_parents, 0x600,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_de_clk, "bus-de", ahb_hws, 0x60c, BIT(0), 0);
+
+static const struct clk_hw *di_parents[] = {
+ &pll_periph0_300M_clk.hw,
+ &pll_periph0_400M_clk.hw,
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_4x_clk.common.hw,
+};
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(di_clk, "di", di_parents, 0x620,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_di_clk, "bus-di", ahb_hws, 0x62c, BIT(0), 0);
+
+static const struct clk_hw *g2d_parents[] = {
+ &pll_periph0_400M_clk.hw,
+ &pll_periph0_300M_clk.hw,
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_4x_clk.common.hw,
+};
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(g2d_clk, "g2d", g2d_parents, 0x630,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_g2d_clk, "bus-g2d", ahb_hws, 0x63c, BIT(0), 0);
+
+static const struct clk_hw *gpu_parents[] = {
+ &pll_gpu_clk.common.hw,
+ &pll_periph0_800M_clk.common.hw,
+ &pll_periph0_600M_clk.hw,
+ &pll_periph0_400M_clk.hw,
+ &pll_periph0_300M_clk.hw,
+ &pll_periph0_200M_clk.hw,
+};
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(gpu_clk, "gpu", gpu_parents, 0x670,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_gpu_clk, "bus-gpu", ahb_hws, 0x67c, BIT(0), 0);
+
+static const struct clk_parent_data ce_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_480M_clk.common.hw },
+ { .hw = &pll_periph0_400M_clk.hw },
+ { .hw = &pll_periph0_300M_clk.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(ce_clk, "ce", ce_parents, 0x680,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ce_clk, "bus-ce", ahb_hws, 0x68c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_ce_sys_clk, "bus-ce-sys", ahb_hws, 0x68c,
+ BIT(1), 0);
+
+static const struct clk_hw *ve_parents[] = {
+ &pll_ve_clk.common.hw,
+ &pll_periph0_480M_clk.common.hw,
+ &pll_periph0_400M_clk.hw,
+ &pll_periph0_300M_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_ve_clk, "bus-ve", ahb_hws, 0x69c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dma_clk, "bus-dma", ahb_hws, 0x70c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_msgbox_clk, "bus-msgbox", ahb_hws, 0x71c,
+ BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_spinlock_clk, "bus-spinlock", ahb_hws, 0x72c,
+ BIT(0), 0);
+
+static const struct clk_parent_data hstimer_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "iosc" },
+ { .fw_name = "losc" },
+ { .hw = &pll_periph0_200M_clk.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(hstimer0_clk, "hstimer0",
+ hstimer_parents, 0x730,
+ 0, 0, /* M */
+ 0, 3, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(hstimer1_clk, "hstimer1",
+ hstimer_parents,
+ 0x734,
+ 0, 0, /* M */
+ 0, 3, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(hstimer2_clk, "hstimer2",
+ hstimer_parents,
+ 0x738,
+ 0, 0, /* M */
+ 0, 3, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(hstimer3_clk, "hstimer3",
+ hstimer_parents,
+ 0x73c,
+ 0, 0, /* M */
+ 0, 3, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(hstimer4_clk, "hstimer4",
+ hstimer_parents,
+ 0x740,
+ 0, 0, /* M */
+ 0, 3, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(hstimer5_clk, "hstimer5",
+ hstimer_parents,
+ 0x744,
+ 0, 0, /* M */
+ 0, 3, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_hstimer_clk, "bus-hstimer", ahb_hws, 0x74c,
+ BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dbg_clk, "bus-dbg", ahb_hws, 0x78c,
+ BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_pwm0_clk, "bus-pwm0", apb1_hws, 0x7ac, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_pwm1_clk, "bus-pwm1", apb1_hws, 0x7ac, BIT(1), 0);
+
+static const struct clk_parent_data iommu_parents[] = {
+ { .hw = &pll_periph0_600M_clk.hw },
+ { .hw = &pll_ddr_clk.common.hw },
+ { .hw = &pll_periph0_480M_clk.common.hw },
+ { .hw = &pll_periph0_400M_clk.hw },
+ { .hw = &pll_periph0_150M_clk.hw },
+ { .fw_name = "hosc" },
+};
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(iommu_clk, "iommu", iommu_parents,
+ 0x7b0,
+ 0, 5, /* M */
+ 0, 0, /* no P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT,
+ CCU_FEATURE_UPDATE_BIT);
+
+static SUNXI_CCU_GATE_HWS(bus_iommu_clk, "bus-iommu", apb0_hws, 0x7bc,
+ BIT(0), 0);
+
+static const struct clk_parent_data dram_parents[] = {
+ { .hw = &pll_ddr_clk.common.hw },
+ { .hw = &pll_periph0_600M_clk.hw },
+ { .hw = &pll_periph0_480M_clk.common.hw },
+ { .hw = &pll_periph0_400M_clk.hw },
+ { .hw = &pll_periph0_150M_clk.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(dram_clk, "dram", dram_parents,
+ 0x800,
+ 0, 5, /* M */
+ 0, 0, /* no P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_IS_CRITICAL,
+ CCU_FEATURE_UPDATE_BIT);
+
+static SUNXI_CCU_GATE_HWS(mbus_dma_clk, "mbus-dma", mbus_hws,
+ 0x804, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(mbus_ve_clk, "mbus-ve", mbus_hws,
+ 0x804, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(mbus_ce_clk, "mbus-ce", mbus_hws,
+ 0x804, BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(mbus_nand_clk, "mbus-nand", mbus_hws,
+ 0x804, BIT(5), 0);
+static SUNXI_CCU_GATE_HWS(mbus_usb3_clk, "mbus-usb3", mbus_hws,
+ 0x804, BIT(6), 0);
+static SUNXI_CCU_GATE_HWS(mbus_csi_clk, "mbus-csi", mbus_hws,
+ 0x804, BIT(8), 0);
+static SUNXI_CCU_GATE_HWS(mbus_isp_clk, "mbus-isp", mbus_hws,
+ 0x804, BIT(9), 0);
+static SUNXI_CCU_GATE_HWS(mbus_gmac1_clk, "mbus-gmac1", mbus_hws,
+ 0x804, BIT(12), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dram_clk, "bus-dram", ahb_hws, 0x80c,
+ BIT(0), CLK_IS_CRITICAL);
+
+static const struct clk_parent_data nand_mmc_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_400M_clk.hw },
+ { .hw = &pll_periph0_300M_clk.hw },
+ { .hw = &pll_periph1_400M_clk.hw },
+ { .hw = &pll_periph1_300M_clk.hw },
+};
+
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(nand0_clk, "nand0", nand_mmc_parents,
+ 0x810,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(nand1_clk, "nand1", nand_mmc_parents,
+ 0x814,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_nand_clk, "bus-nand", ahb_hws, 0x82c,
+ BIT(0), 0);
+
+static SUNXI_CCU_MP_MUX_GATE_POSTDIV_DUALDIV(mmc0_clk, "mmc0", nand_mmc_parents,
+ 0x830,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post div */
+ 0);
+
+static SUNXI_CCU_MP_MUX_GATE_POSTDIV_DUALDIV(mmc1_clk, "mmc1", nand_mmc_parents,
+ 0x834,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post div */
+ 0);
+
+static const struct clk_parent_data mmc2_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_800M_clk.common.hw },
+ { .hw = &pll_periph0_600M_clk.hw },
+ { .hw = &pll_periph1_800M_clk.common.hw },
+ { .hw = &pll_periph1_600M_clk.hw },
+};
+
+static SUNXI_CCU_MP_MUX_GATE_POSTDIV_DUALDIV(mmc2_clk, "mmc2", mmc2_parents,
+ 0x838,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post div */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", ahb_hws, 0x84c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_mmc1_clk, "bus-mmc1", ahb_hws, 0x84c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_mmc2_clk, "bus-mmc2", ahb_hws, 0x84c, BIT(2), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_sysdap_clk, "bus-sysdap", apb1_hws, 0x88c,
+ BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_uart0_clk, "bus-uart0", apb1_hws, 0x90c,
+ BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart1_clk, "bus-uart1", apb1_hws, 0x90c,
+ BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart2_clk, "bus-uart2", apb1_hws, 0x90c,
+ BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart3_clk, "bus-uart3", apb1_hws, 0x90c,
+ BIT(3), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart4_clk, "bus-uart4", apb1_hws, 0x90c,
+ BIT(4), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart5_clk, "bus-uart5", apb1_hws, 0x90c,
+ BIT(5), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart6_clk, "bus-uart6", apb1_hws, 0x90c,
+ BIT(6), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart7_clk, "bus-uart7", apb1_hws, 0x90c,
+ BIT(7), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_i2c0_clk, "bus-i2c0", apb1_hws, 0x91c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c1_clk, "bus-i2c1", apb1_hws, 0x91c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c2_clk, "bus-i2c2", apb1_hws, 0x91c, BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c3_clk, "bus-i2c3", apb1_hws, 0x91c, BIT(3), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c4_clk, "bus-i2c4", apb1_hws, 0x91c, BIT(4), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c5_clk, "bus-i2c5", apb1_hws, 0x91c, BIT(5), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_can_clk, "bus-can", apb1_hws, 0x92c, BIT(0), 0);
+
+static const struct clk_parent_data spi_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_300M_clk.hw },
+ { .hw = &pll_periph0_200M_clk.hw },
+ { .hw = &pll_periph1_300M_clk.hw },
+ { .hw = &pll_periph1_200M_clk.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(spi0_clk, "spi0", spi_parents, 0x940,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_DUALDIV_MUX_GATE(spi1_clk, "spi1", spi_parents, 0x944,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_DUALDIV_MUX_GATE(spi2_clk, "spi2", spi_parents, 0x948,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_DUALDIV_MUX_GATE(spifc_clk, "spifc", nand_mmc_parents, 0x950,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_GATE_HWS(bus_spi0_clk, "bus-spi0", ahb_hws, 0x96c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_spi1_clk, "bus-spi1", ahb_hws, 0x96c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_spi2_clk, "bus-spi2", ahb_hws, 0x96c, BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(bus_spifc_clk, "bus-spifc", ahb_hws, 0x96c,
+ BIT(3), 0);
+
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(emac0_25M_clk, "emac0-25M",
+ pll_periph0_150M_hws,
+ 0x970, BIT(31) | BIT(30), 6, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(emac1_25M_clk, "emac1-25M",
+ pll_periph0_150M_hws,
+ 0x974, BIT(31) | BIT(30), 6, 0);
+static SUNXI_CCU_GATE_HWS(bus_emac0_clk, "bus-emac0", ahb_hws, 0x97c,
+ BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_emac1_clk, "bus-emac1", ahb_hws, 0x98c,
+ BIT(0), 0);
+
+static const struct clk_parent_data ir_rx_parents[] = {
+ { .fw_name = "losc" },
+ { .fw_name = "hosc" },
+};
+
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(ir_rx_clk, "ir-rx", ir_rx_parents, 0x990,
+ 0, 5, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_GATE_HWS(bus_ir_rx_clk, "bus-ir-rx", apb0_hws, 0x99c,
+ BIT(0), 0);
+
+static const struct clk_parent_data ir_tx_ledc_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph1_600M_clk.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(ir_tx_clk, "ir-tx", ir_tx_ledc_parents,
+ 0x9c0,
+ 0, 5, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_GATE_HWS(bus_ir_tx_clk, "bus-ir-tx", apb0_hws, 0x9cc,
+ BIT(0), 0);
+
+static SUNXI_CCU_M_WITH_GATE(gpadc0_clk, "gpadc0", "hosc", 0x9e0,
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_M_WITH_GATE(gpadc1_clk, "gpadc1", "hosc", 0x9e4,
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ 0);
+static SUNXI_CCU_GATE_HWS(bus_gpadc0_clk, "bus-gpadc0", ahb_hws, 0x9ec,
+ BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_gpadc1_clk, "bus-gpadc1", ahb_hws, 0x9ec,
+ BIT(1), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ths_clk, "bus-ths", apb0_hws, 0x9fc, BIT(0), 0);
+
+/*
+ * The first parent is a 48 MHz input clock divided by 4. That 48 MHz clock is
+ * a 2x multiplier from osc24M synchronized by pll-periph0, and is also used by
+ * the OHCI module.
+ */
+static const struct clk_parent_data usb_ohci_parents[] = {
+ { .hw = &pll_periph0_4x_clk.common.hw },
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+};
+static const struct ccu_mux_fixed_prediv usb_ohci_predivs[] = {
+ { .index = 0, .div = 50 },
+ { .index = 1, .div = 2 },
+};
+
+static struct ccu_mux usb_ohci0_clk = {
+ .enable = BIT(31),
+ .mux = {
+ .shift = 24,
+ .width = 2,
+ .fixed_predivs = usb_ohci_predivs,
+ .n_predivs = ARRAY_SIZE(usb_ohci_predivs),
+ },
+ .common = {
+ .reg = 0xa70,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("usb-ohci0",
+ usb_ohci_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static struct ccu_mux usb_ohci1_clk = {
+ .enable = BIT(31),
+ .mux = {
+ .shift = 24,
+ .width = 2,
+ .fixed_predivs = usb_ohci_predivs,
+ .n_predivs = ARRAY_SIZE(usb_ohci_predivs),
+ },
+ .common = {
+ .reg = 0xa74,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("usb-ohci1",
+ usb_ohci_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE_HWS(bus_ohci0_clk, "bus-ohci0", ahb_hws, 0xa8c,
+ BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_ohci1_clk, "bus-ohci1", ahb_hws, 0xa8c,
+ BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_ehci0_clk, "bus-ehci0", ahb_hws, 0xa8c,
+ BIT(4), 0);
+static SUNXI_CCU_GATE_HWS(bus_ehci1_clk, "bus-ehci1", ahb_hws, 0xa8c,
+ BIT(5), 0);
+static SUNXI_CCU_GATE_HWS(bus_otg_clk, "bus-otg", ahb_hws, 0xa8c, BIT(8), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_lradc_clk, "bus-lradc", apb0_hws, 0xa9c,
+ BIT(0), 0);
+
+static const struct clk_parent_data losc_hosc_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+};
+
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(pcie_aux_clk, "pcie-aux",
+ losc_hosc_parents, 0xaa0,
+ 0, 5, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_display0_top_clk, "bus-display0-top", ahb_hws,
+ 0xabc, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_display1_top_clk, "bus-display1-top", ahb_hws,
+ 0xacc, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(hdmi_24M_clk, "hdmi-24M", osc24M, 0xb04, BIT(31), 0);
+
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(hdmi_cec_32k_clk, "hdmi-cec-32k",
+ pll_periph0_2x_hws,
+ 0xb10, BIT(30), 36621, 0);
+
+static const struct clk_parent_data hdmi_cec_parents[] = {
+ { .fw_name = "losc" },
+ { .hw = &hdmi_cec_32k_clk.common.hw },
+};
+static SUNXI_CCU_MUX_DATA_WITH_GATE(hdmi_cec_clk, "hdmi-cec", hdmi_cec_parents,
+ 0xb10,
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_hdmi_clk, "bus-hdmi", ahb_hws, 0xb1c, BIT(0), 0);
+
+static const struct clk_parent_data mipi_dsi_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_200M_clk.hw },
+ { .hw = &pll_periph0_150M_clk.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(mipi_dsi0_clk, "mipi-dsi0",
+ mipi_dsi_parents, 0xb24,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(mipi_dsi1_clk, "mipi-dsi1",
+ mipi_dsi_parents, 0xb28,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_mipi_dsi0_clk, "bus-mipi-dsi0", ahb_hws, 0xb4c,
+ BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_mipi_dsi1_clk, "bus-mipi-dsi1", ahb_hws, 0xb4c,
+ BIT(1), 0);
+
+static const struct clk_hw *tcon_parents[] = {
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_4x_clk.common.hw,
+ &pll_video2_4x_clk.common.hw,
+ &pll_video3_4x_clk.common.hw,
+ &pll_periph0_2x_clk.common.hw,
+ &pll_video0_3x_clk.hw,
+ &pll_video1_3x_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(tcon_lcd0_clk, "tcon-lcd0", tcon_parents,
+ 0xb60,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(tcon_lcd1_clk, "tcon-lcd1", tcon_parents,
+ 0xb64,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static const struct clk_hw *tcon_tv_parents[] = {
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_4x_clk.common.hw,
+ &pll_video2_4x_clk.common.hw,
+ &pll_video3_4x_clk.common.hw,
+ &pll_periph0_2x_clk.common.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(tcon_lcd2_clk, "tcon-lcd2",
+ tcon_tv_parents, 0xb68,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(combophy_dsi0_clk, "combophy-dsi0",
+ tcon_parents, 0xb6c,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(combophy_dsi1_clk, "combophy-dsi1",
+ tcon_parents, 0xb70,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_tcon_lcd0_clk, "bus-tcon-lcd0", ahb_hws, 0xb7c,
+ BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_tcon_lcd1_clk, "bus-tcon-lcd1", ahb_hws, 0xb7c,
+ BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_tcon_lcd2_clk, "bus-tcon-lcd2", ahb_hws, 0xb7c,
+ BIT(2), 0);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0", tcon_tv_parents,
+ 0xb80,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(tcon_tv1_clk, "tcon-tv1", tcon_tv_parents,
+ 0xb84,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_tcon_tv0_clk, "bus-tcon-tv0", ahb_hws, 0xb9c,
+ BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_tcon_tv1_clk, "bus-tcon-tv1", ahb_hws, 0xb9c,
+ BIT(1), 0);
+
+static const struct clk_hw *edp_parents[] = {
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_4x_clk.common.hw,
+ &pll_video2_4x_clk.common.hw,
+ &pll_video3_4x_clk.common.hw,
+ &pll_periph0_2x_clk.common.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(edp_clk, "edp", edp_parents, 0xbb0,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_edp_clk, "bus-edp", ahb_hws, 0xbbc, BIT(0), 0);
+
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(ledc_clk, "ledc", ir_tx_ledc_parents,
+ 0xbf0,
+ 0, 4, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ledc_clk, "bus-ledc", apb0_hws, 0xbfc, BIT(0), 0);
+
+static const struct clk_hw *csi_top_parents[] = {
+ &pll_periph0_300M_clk.hw,
+ &pll_periph0_400M_clk.hw,
+ &pll_periph0_480M_clk.common.hw,
+ &pll_video3_4x_clk.common.hw,
+ &pll_video3_3x_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(csi_top_clk, "csi-top", csi_top_parents,
+ 0xc04,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data csi_mclk_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_video3_4x_clk.common.hw },
+ { .hw = &pll_video0_4x_clk.common.hw },
+ { .hw = &pll_video1_4x_clk.common.hw },
+ { .hw = &pll_video2_4x_clk.common.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(csi_mclk0_clk, "csi-mclk0", csi_mclk_parents,
+ 0xc08,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(csi_mclk1_clk, "csi-mclk1", csi_mclk_parents,
+ 0xc0c,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(csi_mclk2_clk, "csi-mclk2", csi_mclk_parents,
+ 0xc10,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(csi_mclk3_clk, "csi-mclk3", csi_mclk_parents,
+ 0xc14,
+ 0, 5, /* M */
+ 8, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_csi_clk, "bus-csi", ahb_hws, 0xc1c, BIT(0), 0);
+
+static const struct clk_hw *isp_parents[] = {
+ &pll_periph0_300M_clk.hw,
+ &pll_periph0_400M_clk.hw,
+ &pll_video2_4x_clk.common.hw,
+ &pll_video3_4x_clk.common.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(isp_clk, "isp", isp_parents, 0xc20,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data dsp_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_periph0_480M_clk.common.hw, },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(dsp_clk, "dsp", dsp_parents, 0xc70,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_DATA(fanout_24M_clk, "fanout-24M", osc24M,
+ 0xf30, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA_WITH_PREDIV(fanout_12M_clk, "fanout-12M", osc24M,
+ 0xf30, BIT(1), 2, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(fanout_16M_clk, "fanout-16M",
+ pll_periph0_480M_hws,
+ 0xf30, BIT(2), 30, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(fanout_25M_clk, "fanout-25M",
+ pll_periph0_2x_hws,
+ 0xf30, BIT(3), 48, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(fanout_50M_clk, "fanout-50M",
+ pll_periph0_2x_hws,
+ 0xf30, BIT(4), 24, 0);
+
+static const struct clk_parent_data fanout_27M_parents[] = {
+ { .hw = &pll_video0_4x_clk.common.hw },
+ { .hw = &pll_video1_4x_clk.common.hw },
+ { .hw = &pll_video2_4x_clk.common.hw },
+ { .hw = &pll_video3_4x_clk.common.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(fanout_27M_clk, "fanout-27M",
+ fanout_27M_parents, 0xf34,
+ 0, 5, /* div0 */
+ 8, 5, /* div1 */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data fanout_pclk_parents[] = {
+ { .hw = &apb0_clk.common.hw }
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(fanout_pclk_clk, "fanout-pclk",
+ fanout_pclk_parents,
+ 0xf38,
+ 0, 5, /* div0 */
+ 5, 5, /* div1 */
+ 0, 0, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data fanout_parents[] = {
+ { .fw_name = "losc-fanout" },
+ { .hw = &fanout_12M_clk.common.hw, },
+ { .hw = &fanout_16M_clk.common.hw, },
+ { .hw = &fanout_24M_clk.common.hw, },
+ { .hw = &fanout_25M_clk.common.hw, },
+ { .hw = &fanout_27M_clk.common.hw, },
+ { .hw = &fanout_pclk_clk.common.hw, },
+ { .hw = &fanout_50M_clk.common.hw, },
+};
+static SUNXI_CCU_MUX_DATA_WITH_GATE(fanout0_clk, "fanout0", fanout_parents,
+ 0xf3c,
+ 0, 3, /* mux */
+ BIT(21), /* gate */
+ 0);
+static SUNXI_CCU_MUX_DATA_WITH_GATE(fanout1_clk, "fanout1", fanout_parents,
+ 0xf3c,
+ 3, 3, /* mux */
+ BIT(22), /* gate */
+ 0);
+static SUNXI_CCU_MUX_DATA_WITH_GATE(fanout2_clk, "fanout2", fanout_parents,
+ 0xf3c,
+ 6, 3, /* mux */
+ BIT(23), /* gate */
+ 0);
+
+/*
+ * Contains all clocks that are controlled by a hardware register. They
+ * have a (sunxi) .common member, which needs to be initialised by the common
+ * sunxi CCU code, to be filled with the MMIO base address and the shared lock.
+ */
+static struct ccu_common *sun55i_a523_ccu_clks[] = {
+ &pll_ddr_clk.common,
+ &pll_periph0_4x_clk.common,
+ &pll_periph0_2x_clk.common,
+ &pll_periph0_800M_clk.common,
+ &pll_periph0_480M_clk.common,
+ &pll_periph1_4x_clk.common,
+ &pll_periph1_2x_clk.common,
+ &pll_periph1_800M_clk.common,
+ &pll_periph1_480M_clk.common,
+ &pll_gpu_clk.common,
+ &pll_video0_8x_clk.common,
+ &pll_video0_4x_clk.common,
+ &pll_video1_8x_clk.common,
+ &pll_video1_4x_clk.common,
+ &pll_video2_8x_clk.common,
+ &pll_video2_4x_clk.common,
+ &pll_video3_8x_clk.common,
+ &pll_video3_4x_clk.common,
+ &pll_ve_clk.common,
+ &pll_audio0_4x_clk.common,
+ &pll_npu_4x_clk.common,
+ &ahb_clk.common,
+ &apb0_clk.common,
+ &apb1_clk.common,
+ &mbus_clk.common,
+ &de_clk.common,
+ &bus_de_clk.common,
+ &di_clk.common,
+ &bus_di_clk.common,
+ &g2d_clk.common,
+ &bus_g2d_clk.common,
+ &gpu_clk.common,
+ &bus_gpu_clk.common,
+ &ce_clk.common,
+ &bus_ce_clk.common,
+ &bus_ce_sys_clk.common,
+ &ve_clk.common,
+ &bus_ve_clk.common,
+ &bus_dma_clk.common,
+ &bus_msgbox_clk.common,
+ &bus_spinlock_clk.common,
+ &hstimer0_clk.common,
+ &hstimer1_clk.common,
+ &hstimer2_clk.common,
+ &hstimer3_clk.common,
+ &hstimer4_clk.common,
+ &hstimer5_clk.common,
+ &bus_hstimer_clk.common,
+ &bus_dbg_clk.common,
+ &bus_pwm0_clk.common,
+ &bus_pwm1_clk.common,
+ &iommu_clk.common,
+ &bus_iommu_clk.common,
+ &dram_clk.common,
+ &mbus_dma_clk.common,
+ &mbus_ve_clk.common,
+ &mbus_ce_clk.common,
+ &mbus_nand_clk.common,
+ &mbus_usb3_clk.common,
+ &mbus_csi_clk.common,
+ &mbus_isp_clk.common,
+ &mbus_gmac1_clk.common,
+ &bus_dram_clk.common,
+ &nand0_clk.common,
+ &nand1_clk.common,
+ &bus_nand_clk.common,
+ &mmc0_clk.common,
+ &mmc1_clk.common,
+ &mmc2_clk.common,
+ &bus_sysdap_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_uart4_clk.common,
+ &bus_uart5_clk.common,
+ &bus_uart6_clk.common,
+ &bus_uart7_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_i2c3_clk.common,
+ &bus_i2c4_clk.common,
+ &bus_i2c5_clk.common,
+ &bus_can_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &spi2_clk.common,
+ &spifc_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &bus_spi2_clk.common,
+ &bus_spifc_clk.common,
+ &emac0_25M_clk.common,
+ &emac1_25M_clk.common,
+ &bus_emac0_clk.common,
+ &bus_emac1_clk.common,
+ &ir_rx_clk.common,
+ &bus_ir_rx_clk.common,
+ &ir_tx_clk.common,
+ &bus_ir_tx_clk.common,
+ &gpadc0_clk.common,
+ &gpadc1_clk.common,
+ &bus_gpadc0_clk.common,
+ &bus_gpadc1_clk.common,
+ &bus_ths_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_ohci1_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ohci1_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_ehci1_clk.common,
+ &bus_otg_clk.common,
+ &bus_lradc_clk.common,
+ &pcie_aux_clk.common,
+ &bus_display0_top_clk.common,
+ &bus_display1_top_clk.common,
+ &hdmi_24M_clk.common,
+ &hdmi_cec_32k_clk.common,
+ &hdmi_cec_clk.common,
+ &bus_hdmi_clk.common,
+ &mipi_dsi0_clk.common,
+ &mipi_dsi1_clk.common,
+ &bus_mipi_dsi0_clk.common,
+ &bus_mipi_dsi1_clk.common,
+ &tcon_lcd0_clk.common,
+ &tcon_lcd1_clk.common,
+ &tcon_lcd2_clk.common,
+ &combophy_dsi0_clk.common,
+ &combophy_dsi1_clk.common,
+ &bus_tcon_lcd0_clk.common,
+ &bus_tcon_lcd1_clk.common,
+ &bus_tcon_lcd2_clk.common,
+ &tcon_tv0_clk.common,
+ &tcon_tv1_clk.common,
+ &bus_tcon_tv0_clk.common,
+ &bus_tcon_tv1_clk.common,
+ &edp_clk.common,
+ &bus_edp_clk.common,
+ &ledc_clk.common,
+ &bus_ledc_clk.common,
+ &csi_top_clk.common,
+ &csi_mclk0_clk.common,
+ &csi_mclk1_clk.common,
+ &csi_mclk2_clk.common,
+ &csi_mclk3_clk.common,
+ &bus_csi_clk.common,
+ &isp_clk.common,
+ &dsp_clk.common,
+ &fanout_24M_clk.common,
+ &fanout_12M_clk.common,
+ &fanout_16M_clk.common,
+ &fanout_25M_clk.common,
+ &fanout_27M_clk.common,
+ &fanout_pclk_clk.common,
+ &fanout0_clk.common,
+ &fanout1_clk.common,
+ &fanout2_clk.common,
+};
+
+static struct clk_hw_onecell_data sun55i_a523_hw_clks = {
+ .num = CLK_NUMBER,
+ .hws = {
+ [CLK_PLL_DDR0] = &pll_ddr_clk.common.hw,
+ [CLK_PLL_PERIPH0_4X] = &pll_periph0_4x_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.common.hw,
+ [CLK_PLL_PERIPH0_800M] = &pll_periph0_800M_clk.common.hw,
+ [CLK_PLL_PERIPH0_480M] = &pll_periph0_480M_clk.common.hw,
+ [CLK_PLL_PERIPH0_600M] = &pll_periph0_600M_clk.hw,
+ [CLK_PLL_PERIPH0_400M] = &pll_periph0_400M_clk.hw,
+ [CLK_PLL_PERIPH0_300M] = &pll_periph0_300M_clk.hw,
+ [CLK_PLL_PERIPH0_200M] = &pll_periph0_200M_clk.hw,
+ [CLK_PLL_PERIPH0_160M] = &pll_periph0_160M_clk.hw,
+ [CLK_PLL_PERIPH0_150M] = &pll_periph0_150M_clk.hw,
+ [CLK_PLL_PERIPH1_4X] = &pll_periph1_4x_clk.common.hw,
+ [CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.common.hw,
+ [CLK_PLL_PERIPH1_800M] = &pll_periph1_800M_clk.common.hw,
+ [CLK_PLL_PERIPH1_480M] = &pll_periph1_480M_clk.common.hw,
+ [CLK_PLL_PERIPH1_600M] = &pll_periph1_600M_clk.hw,
+ [CLK_PLL_PERIPH1_400M] = &pll_periph1_400M_clk.hw,
+ [CLK_PLL_PERIPH1_300M] = &pll_periph1_300M_clk.hw,
+ [CLK_PLL_PERIPH1_200M] = &pll_periph1_200M_clk.hw,
+ [CLK_PLL_PERIPH1_160M] = &pll_periph1_160M_clk.hw,
+ [CLK_PLL_PERIPH1_150M] = &pll_periph1_150M_clk.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_VIDEO0_8X] = &pll_video0_8x_clk.common.hw,
+ [CLK_PLL_VIDEO0_4X] = &pll_video0_4x_clk.common.hw,
+ [CLK_PLL_VIDEO0_3X] = &pll_video0_3x_clk.hw,
+ [CLK_PLL_VIDEO1_8X] = &pll_video1_8x_clk.common.hw,
+ [CLK_PLL_VIDEO1_4X] = &pll_video1_4x_clk.common.hw,
+ [CLK_PLL_VIDEO1_3X] = &pll_video1_3x_clk.hw,
+ [CLK_PLL_VIDEO2_8X] = &pll_video2_8x_clk.common.hw,
+ [CLK_PLL_VIDEO2_4X] = &pll_video2_4x_clk.common.hw,
+ [CLK_PLL_VIDEO2_3X] = &pll_video2_3x_clk.hw,
+ [CLK_PLL_VIDEO3_8X] = &pll_video3_8x_clk.common.hw,
+ [CLK_PLL_VIDEO3_4X] = &pll_video3_4x_clk.common.hw,
+ [CLK_PLL_VIDEO3_3X] = &pll_video3_3x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_AUDIO0_4X] = &pll_audio0_4x_clk.common.hw,
+ [CLK_PLL_AUDIO0_2X] = &pll_audio0_2x_clk.hw,
+ [CLK_PLL_AUDIO0] = &pll_audio0_clk.hw,
+ [CLK_PLL_NPU_4X] = &pll_npu_4x_clk.common.hw,
+ [CLK_PLL_NPU_2X] = &pll_npu_2x_clk.hw,
+ [CLK_PLL_NPU] = &pll_npu_1x_clk.hw,
+ [CLK_AHB] = &ahb_clk.common.hw,
+ [CLK_APB0] = &apb0_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_DI] = &di_clk.common.hw,
+ [CLK_BUS_DI] = &bus_di_clk.common.hw,
+ [CLK_G2D] = &g2d_clk.common.hw,
+ [CLK_BUS_G2D] = &bus_g2d_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_BUS_CE] = &bus_ce_clk.common.hw,
+ [CLK_BUS_CE_SYS] = &bus_ce_sys_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw,
+ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
+ [CLK_HSTIMER0] = &hstimer0_clk.common.hw,
+ [CLK_HSTIMER1] = &hstimer1_clk.common.hw,
+ [CLK_HSTIMER2] = &hstimer2_clk.common.hw,
+ [CLK_HSTIMER3] = &hstimer3_clk.common.hw,
+ [CLK_HSTIMER4] = &hstimer4_clk.common.hw,
+ [CLK_HSTIMER5] = &hstimer5_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
+ [CLK_BUS_PWM0] = &bus_pwm0_clk.common.hw,
+ [CLK_BUS_PWM1] = &bus_pwm1_clk.common.hw,
+ [CLK_IOMMU] = &iommu_clk.common.hw,
+ [CLK_BUS_IOMMU] = &bus_iommu_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_MBUS_DMA] = &mbus_dma_clk.common.hw,
+ [CLK_MBUS_VE] = &mbus_ve_clk.common.hw,
+ [CLK_MBUS_CE] = &mbus_ce_clk.common.hw,
+ [CLK_MBUS_CSI] = &mbus_csi_clk.common.hw,
+ [CLK_MBUS_ISP] = &mbus_isp_clk.common.hw,
+ [CLK_MBUS_EMAC1] = &mbus_gmac1_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_NAND0] = &nand0_clk.common.hw,
+ [CLK_NAND1] = &nand1_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_BUS_SYSDAP] = &bus_sysdap_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
+ [CLK_BUS_UART5] = &bus_uart5_clk.common.hw,
+ [CLK_BUS_UART6] = &bus_uart6_clk.common.hw,
+ [CLK_BUS_UART7] = &bus_uart7_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_I2C3] = &bus_i2c3_clk.common.hw,
+ [CLK_BUS_I2C4] = &bus_i2c4_clk.common.hw,
+ [CLK_BUS_I2C5] = &bus_i2c5_clk.common.hw,
+ [CLK_BUS_CAN] = &bus_can_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_SPI2] = &spi2_clk.common.hw,
+ [CLK_SPIFC] = &spifc_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_BUS_SPI2] = &bus_spi2_clk.common.hw,
+ [CLK_BUS_SPIFC] = &bus_spifc_clk.common.hw,
+ [CLK_EMAC0_25M] = &emac0_25M_clk.common.hw,
+ [CLK_EMAC1_25M] = &emac1_25M_clk.common.hw,
+ [CLK_BUS_EMAC0] = &bus_emac0_clk.common.hw,
+ [CLK_BUS_EMAC1] = &bus_emac1_clk.common.hw,
+ [CLK_IR_RX] = &ir_rx_clk.common.hw,
+ [CLK_BUS_IR_RX] = &bus_ir_rx_clk.common.hw,
+ [CLK_IR_TX] = &ir_tx_clk.common.hw,
+ [CLK_BUS_IR_TX] = &bus_ir_tx_clk.common.hw,
+ [CLK_GPADC0] = &gpadc0_clk.common.hw,
+ [CLK_GPADC1] = &gpadc1_clk.common.hw,
+ [CLK_BUS_GPADC0] = &bus_gpadc0_clk.common.hw,
+ [CLK_BUS_GPADC1] = &bus_gpadc1_clk.common.hw,
+ [CLK_BUS_THS] = &bus_ths_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_LRADC] = &bus_lradc_clk.common.hw,
+ [CLK_PCIE_AUX] = &pcie_aux_clk.common.hw,
+ [CLK_BUS_DISPLAY0_TOP] = &bus_display0_top_clk.common.hw,
+ [CLK_BUS_DISPLAY1_TOP] = &bus_display1_top_clk.common.hw,
+ [CLK_HDMI_24M] = &hdmi_24M_clk.common.hw,
+ [CLK_HDMI_CEC_32K] = &hdmi_cec_32k_clk.common.hw,
+ [CLK_HDMI_CEC] = &hdmi_cec_clk.common.hw,
+ [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
+ [CLK_MIPI_DSI0] = &mipi_dsi0_clk.common.hw,
+ [CLK_MIPI_DSI1] = &mipi_dsi1_clk.common.hw,
+ [CLK_BUS_MIPI_DSI0] = &bus_mipi_dsi0_clk.common.hw,
+ [CLK_BUS_MIPI_DSI1] = &bus_mipi_dsi1_clk.common.hw,
+ [CLK_TCON_LCD0] = &tcon_lcd0_clk.common.hw,
+ [CLK_TCON_LCD1] = &tcon_lcd1_clk.common.hw,
+ [CLK_TCON_LCD2] = &tcon_lcd2_clk.common.hw,
+ [CLK_COMBOPHY_DSI0] = &combophy_dsi0_clk.common.hw,
+ [CLK_COMBOPHY_DSI1] = &combophy_dsi1_clk.common.hw,
+ [CLK_BUS_TCON_LCD0] = &bus_tcon_lcd0_clk.common.hw,
+ [CLK_BUS_TCON_LCD1] = &bus_tcon_lcd1_clk.common.hw,
+ [CLK_BUS_TCON_LCD2] = &bus_tcon_lcd2_clk.common.hw,
+ [CLK_TCON_TV0] = &tcon_tv0_clk.common.hw,
+ [CLK_TCON_TV1] = &tcon_tv1_clk.common.hw,
+ [CLK_BUS_TCON_TV0] = &bus_tcon_tv0_clk.common.hw,
+ [CLK_BUS_TCON_TV1] = &bus_tcon_tv1_clk.common.hw,
+ [CLK_EDP] = &edp_clk.common.hw,
+ [CLK_BUS_EDP] = &bus_edp_clk.common.hw,
+ [CLK_LEDC] = &ledc_clk.common.hw,
+ [CLK_BUS_LEDC] = &bus_ledc_clk.common.hw,
+ [CLK_CSI_TOP] = &csi_top_clk.common.hw,
+ [CLK_CSI_MCLK0] = &csi_mclk0_clk.common.hw,
+ [CLK_CSI_MCLK1] = &csi_mclk1_clk.common.hw,
+ [CLK_CSI_MCLK2] = &csi_mclk2_clk.common.hw,
+ [CLK_CSI_MCLK3] = &csi_mclk3_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_ISP] = &isp_clk.common.hw,
+ [CLK_DSP] = &dsp_clk.common.hw,
+ [CLK_FANOUT_24M] = &fanout_24M_clk.common.hw,
+ [CLK_FANOUT_12M] = &fanout_12M_clk.common.hw,
+ [CLK_FANOUT_16M] = &fanout_16M_clk.common.hw,
+ [CLK_FANOUT_25M] = &fanout_25M_clk.common.hw,
+ [CLK_FANOUT_27M] = &fanout_27M_clk.common.hw,
+ [CLK_FANOUT_PCLK] = &fanout_pclk_clk.common.hw,
+ [CLK_FANOUT0] = &fanout0_clk.common.hw,
+ [CLK_FANOUT1] = &fanout1_clk.common.hw,
+ [CLK_FANOUT2] = &fanout2_clk.common.hw,
+ },
+};
+
+static struct ccu_reset_map sun55i_a523_ccu_resets[] = {
+ [RST_MBUS] = { 0x540, BIT(30) },
+ [RST_BUS_NSI] = { 0x54c, BIT(16) },
+ [RST_BUS_DE] = { 0x60c, BIT(16) },
+ [RST_BUS_DI] = { 0x62c, BIT(16) },
+ [RST_BUS_G2D] = { 0x63c, BIT(16) },
+ [RST_BUS_SYS] = { 0x64c, BIT(16) },
+ [RST_BUS_GPU] = { 0x67c, BIT(16) },
+ [RST_BUS_CE] = { 0x68c, BIT(16) },
+ [RST_BUS_SYS_CE] = { 0x68c, BIT(17) },
+ [RST_BUS_VE] = { 0x69c, BIT(16) },
+ [RST_BUS_DMA] = { 0x70c, BIT(16) },
+ [RST_BUS_MSGBOX] = { 0x71c, BIT(16) },
+ [RST_BUS_SPINLOCK] = { 0x72c, BIT(16) },
+ [RST_BUS_CPUXTIMER] = { 0x74c, BIT(16) },
+ [RST_BUS_DBG] = { 0x78c, BIT(16) },
+ [RST_BUS_PWM0] = { 0x7ac, BIT(16) },
+ [RST_BUS_PWM1] = { 0x7ac, BIT(17) },
+ [RST_BUS_DRAM] = { 0x80c, BIT(16) },
+ [RST_BUS_NAND] = { 0x82c, BIT(16) },
+ [RST_BUS_MMC0] = { 0x84c, BIT(16) },
+ [RST_BUS_MMC1] = { 0x84c, BIT(17) },
+ [RST_BUS_MMC2] = { 0x84c, BIT(18) },
+ [RST_BUS_SYSDAP] = { 0x88c, BIT(16) },
+ [RST_BUS_UART0] = { 0x90c, BIT(16) },
+ [RST_BUS_UART1] = { 0x90c, BIT(17) },
+ [RST_BUS_UART2] = { 0x90c, BIT(18) },
+ [RST_BUS_UART3] = { 0x90c, BIT(19) },
+ [RST_BUS_UART4] = { 0x90c, BIT(20) },
+ [RST_BUS_UART5] = { 0x90c, BIT(21) },
+ [RST_BUS_UART6] = { 0x90c, BIT(22) },
+ [RST_BUS_UART7] = { 0x90c, BIT(23) },
+ [RST_BUS_I2C0] = { 0x91c, BIT(16) },
+ [RST_BUS_I2C1] = { 0x91c, BIT(17) },
+ [RST_BUS_I2C2] = { 0x91c, BIT(18) },
+ [RST_BUS_I2C3] = { 0x91c, BIT(19) },
+ [RST_BUS_I2C4] = { 0x91c, BIT(20) },
+ [RST_BUS_I2C5] = { 0x91c, BIT(21) },
+ [RST_BUS_CAN] = { 0x92c, BIT(16) },
+ [RST_BUS_SPI0] = { 0x96c, BIT(16) },
+ [RST_BUS_SPI1] = { 0x96c, BIT(17) },
+ [RST_BUS_SPI2] = { 0x96c, BIT(18) },
+ [RST_BUS_SPIFC] = { 0x96c, BIT(19) },
+ [RST_BUS_EMAC0] = { 0x97c, BIT(16) },
+ [RST_BUS_EMAC1] = { 0x98c, BIT(16) | BIT(17) }, /* GMAC1-AXI */
+ [RST_BUS_IR_RX] = { 0x99c, BIT(16) },
+ [RST_BUS_IR_TX] = { 0x9cc, BIT(16) },
+ [RST_BUS_GPADC0] = { 0x9ec, BIT(16) },
+ [RST_BUS_GPADC1] = { 0x9ec, BIT(17) },
+ [RST_BUS_THS] = { 0x9fc, BIT(16) },
+ [RST_USB_PHY0] = { 0xa70, BIT(30) },
+ [RST_USB_PHY1] = { 0xa74, BIT(30) },
+ [RST_BUS_OHCI0] = { 0xa8c, BIT(16) },
+ [RST_BUS_OHCI1] = { 0xa8c, BIT(17) },
+ [RST_BUS_EHCI0] = { 0xa8c, BIT(20) },
+ [RST_BUS_EHCI1] = { 0xa8c, BIT(21) },
+ [RST_BUS_OTG] = { 0xa8c, BIT(24) },
+ [RST_BUS_3] = { 0xa8c, BIT(25) }, /* BSP + register */
+ [RST_BUS_LRADC] = { 0xa9c, BIT(16) },
+ [RST_BUS_PCIE_USB3] = { 0xaac, BIT(16) },
+ [RST_BUS_DISPLAY0_TOP] = { 0xabc, BIT(16) },
+ [RST_BUS_DISPLAY1_TOP] = { 0xacc, BIT(16) },
+ [RST_BUS_HDMI_MAIN] = { 0xb1c, BIT(16) },
+ [RST_BUS_HDMI_SUB] = { 0xb1c, BIT(17) },
+ [RST_BUS_MIPI_DSI0] = { 0xb4c, BIT(16) },
+ [RST_BUS_MIPI_DSI1] = { 0xb4c, BIT(17) },
+ [RST_BUS_TCON_LCD0] = { 0xb7c, BIT(16) },
+ [RST_BUS_TCON_LCD1] = { 0xb7c, BIT(17) },
+ [RST_BUS_TCON_LCD2] = { 0xb7c, BIT(18) },
+ [RST_BUS_TCON_TV0] = { 0xb9c, BIT(16) },
+ [RST_BUS_TCON_TV1] = { 0xb9c, BIT(17) },
+ [RST_BUS_LVDS0] = { 0xbac, BIT(16) },
+ [RST_BUS_LVDS1] = { 0xbac, BIT(17) },
+ [RST_BUS_EDP] = { 0xbbc, BIT(16) },
+ [RST_BUS_VIDEO_OUT0] = { 0xbcc, BIT(16) },
+ [RST_BUS_VIDEO_OUT1] = { 0xbcc, BIT(17) },
+ [RST_BUS_LEDC] = { 0xbfc, BIT(16) },
+ [RST_BUS_CSI] = { 0xc1c, BIT(16) },
+ [RST_BUS_ISP] = { 0xc2c, BIT(16) }, /* BSP + register */
+};
+
+static const struct sunxi_ccu_desc sun55i_a523_ccu_desc = {
+ .ccu_clks = sun55i_a523_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun55i_a523_ccu_clks),
+
+ .hw_clks = &sun55i_a523_hw_clks,
+
+ .resets = sun55i_a523_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun55i_a523_ccu_resets),
+};
+
+static const u32 pll_regs[] = {
+ SUN55I_A523_PLL_DDR0_REG,
+ SUN55I_A523_PLL_PERIPH0_REG,
+ SUN55I_A523_PLL_PERIPH1_REG,
+ SUN55I_A523_PLL_GPU_REG,
+ SUN55I_A523_PLL_VIDEO0_REG,
+ SUN55I_A523_PLL_VIDEO1_REG,
+ SUN55I_A523_PLL_VIDEO2_REG,
+ SUN55I_A523_PLL_VE_REG,
+ SUN55I_A523_PLL_VIDEO3_REG,
+ SUN55I_A523_PLL_AUDIO0_REG,
+ SUN55I_A523_PLL_NPU_REG,
+};
+
+static int sun55i_a523_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+ u32 val;
+ int i, ret;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ /*
+ * The PLL clock code does not model all bits, for instance it does
+ * not support a separate enable and gate bit. We present the
+ * gate bit(27) as the enable bit, but then have to set the
+ * PLL Enable, LDO Enable, and Lock Enable bits on all PLLs here.
+ */
+ for (i = 0; i < ARRAY_SIZE(pll_regs); i++) {
+ val = readl(reg + pll_regs[i]);
+ val |= BIT(31) | BIT(30) | BIT(29);
+ writel(val, reg + pll_regs[i]);
+ }
+
+ /* Enforce m1 = 0, m0 = 0 for PLL_AUDIO0 */
+ val = readl(reg + SUN55I_A523_PLL_AUDIO0_REG);
+ val &= ~(BIT(1) | BIT(0));
+ writel(val, reg + SUN55I_A523_PLL_AUDIO0_REG);
+
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun55i_a523_ccu_desc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id sun55i_a523_ccu_ids[] = {
+ { .compatible = "allwinner,sun55i-a523-ccu" },
+ { }
+};
+
+static struct platform_driver sun55i_a523_ccu_driver = {
+ .probe = sun55i_a523_ccu_probe,
+ .driver = {
+ .name = "sun55i-a523-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun55i_a523_ccu_ids,
+ },
+};
+module_platform_driver(sun55i_a523_ccu_driver);
+
+MODULE_IMPORT_NS("SUNXI_CCU");
+MODULE_DESCRIPTION("Support for the Allwinner A523 CCU");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523.h b/drivers/clk/sunxi-ng/ccu-sun55i-a523.h
new file mode 100644
index 000000000000..fc8dd42f1b47
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2024 Arm Ltd.
+ */
+
+#ifndef _CCU_SUN55I_A523_H
+#define _CCU_SUN55I_A523_H
+
+#include <dt-bindings/clock/sun55i-a523-ccu.h>
+#include <dt-bindings/reset/sun55i-a523-ccu.h>
+
+#define CLK_NUMBER (CLK_FANOUT2 + 1)
+
+#endif /* _CCU_SUN55I_A523_H */
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index dd330426a6e5..bbec283b9d99 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -19,10 +19,15 @@
#define CCU_FEATURE_SIGMA_DELTA_MOD BIT(7)
#define CCU_FEATURE_KEY_FIELD BIT(8)
#define CCU_FEATURE_CLOSEST_RATE BIT(9)
+#define CCU_FEATURE_DUAL_DIV BIT(10)
+#define CCU_FEATURE_UPDATE_BIT BIT(11)
/* MMC timing mode switch bit */
#define CCU_MMC_NEW_TIMING_MODE BIT(30)
+/* Some clocks need this bit to actually apply register changes */
+#define CCU_SUNXI_UPDATE_BIT BIT(27)
+
struct device_node;
struct ccu_common {
diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
index 7f4691f09e01..916d6da6d8a3 100644
--- a/drivers/clk/sunxi-ng/ccu_div.c
+++ b/drivers/clk/sunxi-ng/ccu_div.c
@@ -106,6 +106,8 @@ static int ccu_div_set_rate(struct clk_hw *hw, unsigned long rate,
reg = readl(cd->common.base + cd->common.reg);
reg &= ~GENMASK(cd->div.width + cd->div.shift - 1, cd->div.shift);
+ if (cd->common.features & CCU_FEATURE_UPDATE_BIT)
+ reg |= CCU_SUNXI_UPDATE_BIT;
writel(reg | (val << cd->div.shift),
cd->common.base + cd->common.reg);
diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c
index ac52fd6bff67..474a9e8831f8 100644
--- a/drivers/clk/sunxi-ng/ccu_gate.c
+++ b/drivers/clk/sunxi-ng/ccu_gate.c
@@ -20,6 +20,8 @@ void ccu_gate_helper_disable(struct ccu_common *common, u32 gate)
spin_lock_irqsave(common->lock, flags);
reg = readl(common->base + common->reg);
+ if (common->features & CCU_FEATURE_UPDATE_BIT)
+ reg |= CCU_SUNXI_UPDATE_BIT;
writel(reg & ~gate, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
@@ -44,6 +46,8 @@ int ccu_gate_helper_enable(struct ccu_common *common, u32 gate)
spin_lock_irqsave(common->lock, flags);
reg = readl(common->base + common->reg);
+ if (common->features & CCU_FEATURE_UPDATE_BIT)
+ reg |= CCU_SUNXI_UPDATE_BIT;
writel(reg | gate, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 2bb8987ddcc2..354c981943b6 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -10,15 +10,23 @@
#include "ccu_gate.h"
#include "ccu_mp.h"
+static unsigned int next_div(unsigned int div, bool shift)
+{
+ if (shift)
+ return div << 1;
+ return div + 1;
+}
+
static unsigned long ccu_mp_find_best(unsigned long parent, unsigned long rate,
unsigned int max_m, unsigned int max_p,
+ bool shift,
unsigned int *m, unsigned int *p)
{
unsigned long best_rate = 0;
unsigned int best_m = 0, best_p = 0;
unsigned int _m, _p;
- for (_p = 1; _p <= max_p; _p <<= 1) {
+ for (_p = 1; _p <= max_p; _p = next_div(_p, shift)) {
for (_m = 1; _m <= max_m; _m++) {
unsigned long tmp_rate = parent / _p / _m;
@@ -43,7 +51,8 @@ static unsigned long ccu_mp_find_best_with_parent_adj(struct clk_hw *hw,
unsigned long *parent,
unsigned long rate,
unsigned int max_m,
- unsigned int max_p)
+ unsigned int max_p,
+ bool shift)
{
unsigned long parent_rate_saved;
unsigned long parent_rate, now;
@@ -60,7 +69,7 @@ static unsigned long ccu_mp_find_best_with_parent_adj(struct clk_hw *hw,
maxdiv = max_m * max_p;
maxdiv = min(ULONG_MAX / rate, maxdiv);
- for (_p = 1; _p <= max_p; _p <<= 1) {
+ for (_p = 1; _p <= max_p; _p = next_div(_p, shift)) {
for (_m = 1; _m <= max_m; _m++) {
div = _m * _p;
@@ -103,18 +112,26 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
struct ccu_mp *cmp = data;
unsigned int max_m, max_p;
unsigned int m, p;
+ bool shift = true;
if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate *= cmp->fixed_post_div;
+ if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
+ shift = false;
+
max_m = cmp->m.max ?: 1 << cmp->m.width;
- max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
+ if (shift)
+ max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
+ else
+ max_p = cmp->p.max ?: 1 << cmp->p.width;
if (!clk_hw_can_set_rate_parent(&cmp->common.hw)) {
- rate = ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
+ rate = ccu_mp_find_best(*parent_rate, rate, max_m, max_p, shift,
+ &m, &p);
} else {
rate = ccu_mp_find_best_with_parent_adj(hw, parent_rate, rate,
- max_m, max_p);
+ max_m, max_p, shift);
}
if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
@@ -167,7 +184,11 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
p = reg >> cmp->p.shift;
p &= (1 << cmp->p.width) - 1;
- rate = (parent_rate >> p) / m;
+ if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
+ rate = (parent_rate / p) / m;
+ else
+ rate = (parent_rate >> p) / m;
+
if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate /= cmp->fixed_post_div;
@@ -190,20 +211,27 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long flags;
unsigned int max_m, max_p;
unsigned int m, p;
+ bool shift = true;
u32 reg;
+ if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
+ shift = false;
+
/* Adjust parent_rate according to pre-dividers */
parent_rate = ccu_mux_helper_apply_prediv(&cmp->common, &cmp->mux, -1,
parent_rate);
max_m = cmp->m.max ?: 1 << cmp->m.width;
- max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
+ if (shift)
+ max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
+ else
+ max_p = cmp->p.max ?: 1 << cmp->p.width;
/* Adjust target rate according to post-dividers */
if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate = rate * cmp->fixed_post_div;
- ccu_mp_find_best(parent_rate, rate, max_m, max_p, &m, &p);
+ ccu_mp_find_best(parent_rate, rate, max_m, max_p, shift, &m, &p);
spin_lock_irqsave(cmp->common.lock, flags);
@@ -211,7 +239,10 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
reg &= ~GENMASK(cmp->m.width + cmp->m.shift - 1, cmp->m.shift);
reg &= ~GENMASK(cmp->p.width + cmp->p.shift - 1, cmp->p.shift);
reg |= (m - cmp->m.offset) << cmp->m.shift;
- reg |= ilog2(p) << cmp->p.shift;
+ if (shift)
+ reg |= ilog2(p) << cmp->p.shift;
+ else
+ reg |= (p - cmp->p.offset) << cmp->p.shift;
writel(reg, cmp->common.base + cmp->common.reg);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
index 6e50f3728fb5..b35aeec70484 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.h
+++ b/drivers/clk/sunxi-ng/ccu_mp.h
@@ -82,11 +82,35 @@ struct ccu_mp {
_muxshift, _muxwidth, \
0, _flags)
-#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
- _mshift, _mwidth, \
- _pshift, _pwidth, \
- _muxshift, _muxwidth, \
- _gate, _flags) \
+#define SUNXI_CCU_MP_MUX_GATE_POSTDIV_DUALDIV(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _postdiv, \
+ _flags) \
+ struct ccu_mp _struct = { \
+ .enable = _gate, \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .fixed_post_div = _postdiv, \
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_FIXED_POSTDIV | \
+ CCU_FEATURE_DUAL_DIV, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_mp_ops, \
+ _flags), \
+ } \
+ }
+
+#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _features, \
+ _flags) \
struct ccu_mp _struct = { \
.enable = _gate, \
.m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
@@ -94,6 +118,7 @@ struct ccu_mp {
.mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
.common = { \
.reg = _reg, \
+ .features = _features, \
.hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
_parents, \
&ccu_mp_ops, \
@@ -101,6 +126,29 @@ struct ccu_mp {
} \
}
+#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(_struct, _name, _parents, \
+ _reg, _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags, 0)
+
+#define SUNXI_CCU_DUALDIV_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(_struct, _name, _parents, \
+ _reg, _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags, \
+ CCU_FEATURE_DUAL_DIV)
+
#define SUNXI_CCU_MP_DATA_WITH_MUX(_struct, _name, _parents, _reg, \
_mshift, _mwidth, \
_pshift, _pwidth, \
diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c
index d7ffbdeee9e0..74f9e98a5d35 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.c
+++ b/drivers/clk/sunxi-ng/ccu_mux.c
@@ -197,6 +197,8 @@ int ccu_mux_helper_set_parent(struct ccu_common *common,
/* The key field always reads as zero. */
if (common->features & CCU_FEATURE_KEY_FIELD)
reg |= CCU_MUX_KEY_VALUE;
+ if (common->features & CCU_FEATURE_UPDATE_BIT)
+ reg |= CCU_SUNXI_UPDATE_BIT;
reg &= ~GENMASK(cm->width + cm->shift - 1, cm->shift);
writel(reg | (index << cm->shift), common->base + common->reg);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 808f259781fd..981a578043a5 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -842,7 +842,7 @@ static u64 __arch_timer_check_delta(void)
{},
};
- if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
+ if (is_midr_in_range_list(broken_cval_midrs)) {
pr_warn_once("Broken CNTx_CVAL_EL1, using 31 bit TVAL instead.\n");
return CLOCKSOURCE_MASK(31);
}
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index e6a02e351d77..da09f467a6bb 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -238,7 +238,7 @@ static cycles_t exynos4_read_current_timer(void)
static int __init exynos4_clocksource_init(bool frc_shared)
{
/*
- * When the frc is shared, the main processer should have already
+ * When the frc is shared, the main processor should have already
* turned it on and we shouldn't be writing to TCON.
*/
if (frc_shared)
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index f00019b078a7..09549451dd51 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -582,7 +582,7 @@ static void __init hv_init_tsc_clocksource(void)
* mapped.
*/
tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
- if (hv_root_partition)
+ if (hv_root_partition())
tsc_pfn = tsc_msr.pfn;
else
tsc_pfn = HVPFN_DOWN(virt_to_phys(tsc_page));
@@ -627,7 +627,7 @@ void __init hv_remap_tsc_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return;
- if (!hv_root_partition) {
+ if (!hv_root_partition()) {
WARN(1, "%s: attempt to remap TSC page in guest partition\n",
__func__);
return;
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 7907b740497a..abb685a080a5 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -115,6 +115,9 @@ static void gic_update_frequency(void *data)
static int gic_starting_cpu(unsigned int cpu)
{
+ /* Ensure the GIC counter is running */
+ clear_gic_config(GIC_CONFIG_COUNTSTOP);
+
gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
return 0;
}
@@ -288,9 +291,6 @@ static int __init gic_clocksource_of_init(struct device_node *node)
pr_warn("Unable to register clock notifier\n");
}
- /* And finally start the counter */
- clear_gic_config(GIC_CONFIG_COUNTSTOP);
-
/*
* It's safe to use the MIPS GIC timer as a sched clock source only if
* its ticks are stable, which is true on either the platforms with
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index a4c95161cb22..928da2f6de69 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -24,7 +24,9 @@ struct stm32_lp_private {
struct regmap *reg;
struct clock_event_device clkevt;
unsigned long period;
+ u32 psc;
struct device *dev;
+ struct clk *clk;
};
static struct stm32_lp_private*
@@ -120,6 +122,27 @@ static void stm32_clkevent_lp_set_prescaler(struct stm32_lp_private *priv,
/* Adjust rate and period given the prescaler value */
*rate = DIV_ROUND_CLOSEST(*rate, (1 << i));
priv->period = DIV_ROUND_UP(*rate, HZ);
+ priv->psc = i;
+}
+
+static void stm32_clkevent_lp_suspend(struct clock_event_device *clkevt)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+
+ stm32_clkevent_lp_shutdown(clkevt);
+
+ /* balance clk_prepare_enable() from the probe */
+ clk_disable_unprepare(priv->clk);
+}
+
+static void stm32_clkevent_lp_resume(struct clock_event_device *clkevt)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+
+ clk_prepare_enable(priv->clk);
+
+ /* restore prescaler */
+ regmap_write(priv->reg, STM32_LPTIM_CFGR, priv->psc << CFGR_PSC_OFFSET);
}
static void stm32_clkevent_lp_init(struct stm32_lp_private *priv,
@@ -134,6 +157,8 @@ static void stm32_clkevent_lp_init(struct stm32_lp_private *priv,
priv->clkevt.set_state_oneshot = stm32_clkevent_lp_set_oneshot;
priv->clkevt.set_next_event = stm32_clkevent_lp_set_next_event;
priv->clkevt.rating = STM32_LP_RATING;
+ priv->clkevt.suspend = stm32_clkevent_lp_suspend;
+ priv->clkevt.resume = stm32_clkevent_lp_resume;
clockevents_config_and_register(&priv->clkevt, rate, 0x1,
STM32_LPTIM_MAX_ARR);
@@ -151,11 +176,12 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
return -ENOMEM;
priv->reg = ddata->regmap;
- ret = clk_prepare_enable(ddata->clk);
+ priv->clk = ddata->clk;
+ ret = clk_prepare_enable(priv->clk);
if (ret)
return -EINVAL;
- rate = clk_get_rate(ddata->clk);
+ rate = clk_get_rate(priv->clk);
if (!rate) {
ret = -EINVAL;
goto out_clk_disable;
@@ -168,9 +194,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
}
if (of_property_read_bool(pdev->dev.parent->of_node, "wakeup-source")) {
- ret = device_init_wakeup(&pdev->dev, true);
- if (ret)
- goto out_clk_disable;
+ device_set_wakeup_capable(&pdev->dev, true);
ret = dev_pm_set_wake_irq(&pdev->dev, irq);
if (ret)
@@ -191,7 +215,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
return 0;
out_clk_disable:
- clk_disable_unprepare(ddata->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 9e46960f6a86..4f9cb943d945 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -254,7 +254,7 @@ config ARM_TEGRA186_CPUFREQ
config ARM_TEGRA194_CPUFREQ
tristate "Tegra194 CPUFreq support"
- depends on ARCH_TEGRA_194_SOC || (64BIT && COMPILE_TEST)
+ depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST)
depends on TEGRA_BPMP
default y
help
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index eb678fa5260a..551e65d35a1d 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,22 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config CPU_FREQ_CBE
- tristate "CBE frequency scaling"
- depends on CBE_RAS && PPC_CELL
- default m
- help
- This adds the cpufreq driver for Cell BE processors.
- For details, take a look at <file:Documentation/cpu-freq/>.
- If you don't have such processor, say N
-
-config CPU_FREQ_CBE_PMI
- bool "CBE frequency scaling using PMI interface"
- depends on CPU_FREQ_CBE
- default n
- help
- Select this, if you want to use the PMI interface to switch
- frequencies. Using PMI, the processor will not only be able to run at
- lower speed, but also at lower core voltage.
-
config CPU_FREQ_PMAC
bool "Support for Apple PowerBooks"
depends on ADB_PMU && PPC32
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 97c2d4f15d76..2c5c228408bf 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -340,3 +340,15 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK
option lets the probing code bypass some of those checks if the
parameter "relaxed_check=1" is passed to the module.
+config CPUFREQ_ARCH_CUR_FREQ
+ default y
+ bool "Current frequency derived from HW provided feedback"
+ help
+ This determines whether the scaling_cur_freq sysfs attribute returns
+ the last requested frequency or a more precise value based on hardware
+ provided feedback (as architected counters).
+ Given that a more precise frequency can now be provided via the
+ cpuinfo_avg_freq attribute, by enabling this option,
+ scaling_cur_freq maintains the provision of a counter based frequency,
+ for compatibility reasons.
+
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 890fff99f37d..22ab45209f9b 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -91,9 +91,6 @@ obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
##################################################################################
# PowerPC platform drivers
-obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
-ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
-obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
obj-$(CONFIG_QORIQ_CPUFREQ) += qoriq-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 463b69a2dff5..924314cdeebc 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -909,6 +909,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
+ if (acpi_cpufreq_driver.set_boost)
+ policy->boost_supported = true;
+
return result;
err_unreg:
@@ -949,7 +952,6 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
}
static struct freq_attr *acpi_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&freqdomain_cpus,
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
&cpb,
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 8d692415d905..32e1bdc588c5 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -24,9 +24,9 @@
TRACE_EVENT(amd_pstate_perf,
- TP_PROTO(unsigned long min_perf,
- unsigned long target_perf,
- unsigned long capacity,
+ TP_PROTO(u8 min_perf,
+ u8 target_perf,
+ u8 capacity,
u64 freq,
u64 mperf,
u64 aperf,
@@ -47,9 +47,9 @@ TRACE_EVENT(amd_pstate_perf,
),
TP_STRUCT__entry(
- __field(unsigned long, min_perf)
- __field(unsigned long, target_perf)
- __field(unsigned long, capacity)
+ __field(u8, min_perf)
+ __field(u8, target_perf)
+ __field(u8, capacity)
__field(unsigned long long, freq)
__field(unsigned long long, mperf)
__field(unsigned long long, aperf)
@@ -70,10 +70,10 @@ TRACE_EVENT(amd_pstate_perf,
__entry->fast_switch = fast_switch;
),
- TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
- (unsigned long)__entry->min_perf,
- (unsigned long)__entry->target_perf,
- (unsigned long)__entry->capacity,
+ TP_printk("amd_min_perf=%hhu amd_des_perf=%hhu amd_max_perf=%hhu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
+ (u8)__entry->min_perf,
+ (u8)__entry->target_perf,
+ (u8)__entry->capacity,
(unsigned long long)__entry->freq,
(unsigned long long)__entry->mperf,
(unsigned long long)__entry->aperf,
@@ -86,11 +86,12 @@ TRACE_EVENT(amd_pstate_perf,
TRACE_EVENT(amd_pstate_epp_perf,
TP_PROTO(unsigned int cpu_id,
- unsigned int highest_perf,
- unsigned int epp,
- unsigned int min_perf,
- unsigned int max_perf,
- bool boost
+ u8 highest_perf,
+ u8 epp,
+ u8 min_perf,
+ u8 max_perf,
+ bool boost,
+ bool changed
),
TP_ARGS(cpu_id,
@@ -98,15 +99,17 @@ TRACE_EVENT(amd_pstate_epp_perf,
epp,
min_perf,
max_perf,
- boost),
+ boost,
+ changed),
TP_STRUCT__entry(
__field(unsigned int, cpu_id)
- __field(unsigned int, highest_perf)
- __field(unsigned int, epp)
- __field(unsigned int, min_perf)
- __field(unsigned int, max_perf)
+ __field(u8, highest_perf)
+ __field(u8, epp)
+ __field(u8, min_perf)
+ __field(u8, max_perf)
__field(bool, boost)
+ __field(bool, changed)
),
TP_fast_assign(
@@ -116,15 +119,17 @@ TRACE_EVENT(amd_pstate_epp_perf,
__entry->min_perf = min_perf;
__entry->max_perf = max_perf;
__entry->boost = boost;
+ __entry->changed = changed;
),
- TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
+ TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u, changed=%u",
(unsigned int)__entry->cpu_id,
- (unsigned int)__entry->min_perf,
- (unsigned int)__entry->max_perf,
- (unsigned int)__entry->highest_perf,
- (unsigned int)__entry->epp,
- (bool)__entry->boost
+ (u8)__entry->min_perf,
+ (u8)__entry->max_perf,
+ (u8)__entry->highest_perf,
+ (u8)__entry->epp,
+ (bool)__entry->boost,
+ (bool)__entry->changed
)
);
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index 3a0a380c3590..e671bc7d1550 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -22,39 +22,31 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/fs.h>
+#include <linux/cleanup.h>
#include <acpi/cppc_acpi.h>
#include "amd-pstate.h"
-/*
- * Abbreviations:
- * amd_pstate_ut: used as a shortform for AMD P-State unit test.
- * It helps to keep variable names smaller, simpler
- */
-enum amd_pstate_ut_result {
- AMD_PSTATE_UT_RESULT_PASS,
- AMD_PSTATE_UT_RESULT_FAIL,
-};
struct amd_pstate_ut_struct {
const char *name;
- void (*func)(u32 index);
- enum amd_pstate_ut_result result;
+ int (*func)(u32 index);
};
/*
* Kernel module for testing the AMD P-State unit test
*/
-static void amd_pstate_ut_acpi_cpc_valid(u32 index);
-static void amd_pstate_ut_check_enabled(u32 index);
-static void amd_pstate_ut_check_perf(u32 index);
-static void amd_pstate_ut_check_freq(u32 index);
-static void amd_pstate_ut_check_driver(u32 index);
+static int amd_pstate_ut_acpi_cpc_valid(u32 index);
+static int amd_pstate_ut_check_enabled(u32 index);
+static int amd_pstate_ut_check_perf(u32 index);
+static int amd_pstate_ut_check_freq(u32 index);
+static int amd_pstate_ut_check_driver(u32 index);
static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
{"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid },
@@ -77,71 +69,67 @@ static bool get_shared_mem(void)
/*
* check the _CPC object is present in SBIOS.
*/
-static void amd_pstate_ut_acpi_cpc_valid(u32 index)
+static int amd_pstate_ut_acpi_cpc_valid(u32 index)
{
- if (acpi_cpc_valid())
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if (!acpi_cpc_valid()) {
pr_err("%s the _CPC object is not present in SBIOS!\n", __func__);
+ return -EINVAL;
}
+
+ return 0;
}
-static void amd_pstate_ut_pstate_enable(u32 index)
+/*
+ * check if amd pstate is enabled
+ */
+static int amd_pstate_ut_check_enabled(u32 index)
{
- int ret = 0;
u64 cppc_enable = 0;
+ int ret;
+
+ if (get_shared_mem())
+ return 0;
ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
- return;
+ return ret;
}
- if (cppc_enable)
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+
+ if (!cppc_enable) {
pr_err("%s amd pstate must be enabled!\n", __func__);
+ return -EINVAL;
}
-}
-/*
- * check if amd pstate is enabled
- */
-static void amd_pstate_ut_check_enabled(u32 index)
-{
- if (get_shared_mem())
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else
- amd_pstate_ut_pstate_enable(index);
+ return 0;
}
/*
* check if performance values are reasonable.
* highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0
*/
-static void amd_pstate_ut_check_perf(u32 index)
+static int amd_pstate_ut_check_perf(u32 index)
{
int cpu = 0, ret = 0;
u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0;
u64 cap1 = 0;
struct cppc_perf_caps cppc_perf;
- struct cpufreq_policy *policy = NULL;
- struct amd_cpudata *cpudata = NULL;
+ union perf_cached cur_perf;
+
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
+ struct amd_cpudata *cpudata;
- for_each_possible_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
if (!policy)
- break;
+ continue;
cpudata = policy->driver_data;
if (get_shared_mem()) {
ret = cppc_get_perf_caps(cpu, &cppc_perf);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
- goto skip_test;
+ return ret;
}
highest_perf = cppc_perf.highest_perf;
@@ -151,50 +139,44 @@ static void amd_pstate_ut_check_perf(u32 index)
} else {
ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
- goto skip_test;
+ return ret;
}
- highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
- nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1);
- lowest_nonlinear_perf = AMD_CPPC_LOWNONLIN_PERF(cap1);
- lowest_perf = AMD_CPPC_LOWEST_PERF(cap1);
+ highest_perf = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1);
+ nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1);
+ lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1);
+ lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
}
- if (highest_perf != READ_ONCE(cpudata->highest_perf) && !cpudata->hw_prefcore) {
+ cur_perf = READ_ONCE(cpudata->perf);
+ if (highest_perf != cur_perf.highest_perf && !cpudata->hw_prefcore) {
pr_err("%s cpu%d highest=%d %d highest perf doesn't match\n",
- __func__, cpu, highest_perf, cpudata->highest_perf);
- goto skip_test;
+ __func__, cpu, highest_perf, cur_perf.highest_perf);
+ return -EINVAL;
}
- if ((nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
- (lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) ||
- (lowest_perf != READ_ONCE(cpudata->lowest_perf))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if (nominal_perf != cur_perf.nominal_perf ||
+ (lowest_nonlinear_perf != cur_perf.lowest_nonlinear_perf) ||
+ (lowest_perf != cur_perf.lowest_perf)) {
pr_err("%s cpu%d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
- __func__, cpu, nominal_perf, cpudata->nominal_perf,
- lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
- lowest_perf, cpudata->lowest_perf);
- goto skip_test;
+ __func__, cpu, nominal_perf, cur_perf.nominal_perf,
+ lowest_nonlinear_perf, cur_perf.lowest_nonlinear_perf,
+ lowest_perf, cur_perf.lowest_perf);
+ return -EINVAL;
}
if (!((highest_perf >= nominal_perf) &&
(nominal_perf > lowest_nonlinear_perf) &&
- (lowest_nonlinear_perf > lowest_perf) &&
+ (lowest_nonlinear_perf >= lowest_perf) &&
(lowest_perf > 0))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
__func__, cpu, highest_perf, nominal_perf,
lowest_nonlinear_perf, lowest_perf);
- goto skip_test;
+ return -EINVAL;
}
- cpufreq_cpu_put(policy);
}
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- return;
-skip_test:
- cpufreq_cpu_put(policy);
+ return 0;
}
/*
@@ -202,59 +184,50 @@ skip_test:
* max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0
* check max freq when set support boost mode.
*/
-static void amd_pstate_ut_check_freq(u32 index)
+static int amd_pstate_ut_check_freq(u32 index)
{
int cpu = 0;
- struct cpufreq_policy *policy = NULL;
- struct amd_cpudata *cpudata = NULL;
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
+ struct amd_cpudata *cpudata;
+
policy = cpufreq_cpu_get(cpu);
if (!policy)
- break;
+ continue;
cpudata = policy->driver_data;
- if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
+ if (!((policy->cpuinfo.max_freq >= cpudata->nominal_freq) &&
(cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
- (cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
- (cpudata->min_freq > 0))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ (cpudata->lowest_nonlinear_freq >= policy->cpuinfo.min_freq) &&
+ (policy->cpuinfo.min_freq > 0))) {
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
- __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
- cpudata->lowest_nonlinear_freq, cpudata->min_freq);
- goto skip_test;
+ __func__, cpu, policy->cpuinfo.max_freq, cpudata->nominal_freq,
+ cpudata->lowest_nonlinear_freq, policy->cpuinfo.min_freq);
+ return -EINVAL;
}
if (cpudata->lowest_nonlinear_freq != policy->min) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d cpudata_lowest_nonlinear_freq=%d policy_min=%d, they should be equal!\n",
__func__, cpu, cpudata->lowest_nonlinear_freq, policy->min);
- goto skip_test;
+ return -EINVAL;
}
if (cpudata->boost_supported) {
- if ((policy->max == cpudata->max_freq) ||
- (policy->max == cpudata->nominal_freq))
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if ((policy->max != policy->cpuinfo.max_freq) &&
+ (policy->max != cpudata->nominal_freq)) {
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
- __func__, cpu, policy->max, cpudata->max_freq,
+ __func__, cpu, policy->max, policy->cpuinfo.max_freq,
cpudata->nominal_freq);
- goto skip_test;
+ return -EINVAL;
}
} else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d must support boost!\n", __func__, cpu);
- goto skip_test;
+ return -EINVAL;
}
- cpufreq_cpu_put(policy);
}
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- return;
-skip_test:
- cpufreq_cpu_put(policy);
+ return 0;
}
static int amd_pstate_set_mode(enum amd_pstate_mode mode)
@@ -266,32 +239,28 @@ static int amd_pstate_set_mode(enum amd_pstate_mode mode)
return amd_pstate_update_status(mode_str, strlen(mode_str));
}
-static void amd_pstate_ut_check_driver(u32 index)
+static int amd_pstate_ut_check_driver(u32 index)
{
enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE;
- int ret;
for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) {
- ret = amd_pstate_set_mode(mode1);
+ int ret = amd_pstate_set_mode(mode1);
if (ret)
- goto out;
+ return ret;
for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) {
if (mode1 == mode2)
continue;
ret = amd_pstate_set_mode(mode2);
- if (ret)
- goto out;
+ if (ret) {
+ pr_err("%s: failed to update status for %s->%s\n", __func__,
+ amd_pstate_get_mode_string(mode1),
+ amd_pstate_get_mode_string(mode2));
+ return ret;
+ }
}
}
-out:
- if (ret)
- pr_warn("%s: failed to update status for %s->%s: %d\n", __func__,
- amd_pstate_get_mode_string(mode1),
- amd_pstate_get_mode_string(mode2), ret);
-
- amd_pstate_ut_cases[index].result = ret ?
- AMD_PSTATE_UT_RESULT_FAIL :
- AMD_PSTATE_UT_RESULT_PASS;
+
+ return 0;
}
static int __init amd_pstate_ut_init(void)
@@ -299,16 +268,12 @@ static int __init amd_pstate_ut_init(void)
u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases);
for (i = 0; i < arr_size; i++) {
- amd_pstate_ut_cases[i].func(i);
- switch (amd_pstate_ut_cases[i].result) {
- case AMD_PSTATE_UT_RESULT_PASS:
+ int ret = amd_pstate_ut_cases[i].func(i);
+
+ if (ret)
+ pr_err("%-4d %-20s\t fail: %d!\n", i+1, amd_pstate_ut_cases[i].name, ret);
+ else
pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name);
- break;
- case AMD_PSTATE_UT_RESULT_FAIL:
- default:
- pr_info("%-4d %-20s\t fail!\n", i+1, amd_pstate_ut_cases[i].name);
- break;
- }
}
return 0;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 313550fa62d4..6789eed1bb5b 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -85,15 +85,9 @@ static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
static struct cpufreq_driver amd_pstate_epp_driver;
static int cppc_state = AMD_PSTATE_UNDEFINED;
-static bool cppc_enabled;
static bool amd_pstate_prefcore = true;
static struct quirk_entry *quirks;
-#define AMD_CPPC_MAX_PERF_MASK GENMASK(7, 0)
-#define AMD_CPPC_MIN_PERF_MASK GENMASK(15, 8)
-#define AMD_CPPC_DES_PERF_MASK GENMASK(23, 16)
-#define AMD_CPPC_EPP_PERF_MASK GENMASK(31, 24)
-
/*
* AMD Energy Preference Performance (EPP)
* The EPP is used in the CCLK DPM controller to drive
@@ -142,6 +136,19 @@ static struct quirk_entry quirk_amd_7k62 = {
.lowest_freq = 550,
};
+static inline u8 freq_to_perf(union perf_cached perf, u32 nominal_freq, unsigned int freq_val)
+{
+ u32 perf_val = DIV_ROUND_UP_ULL((u64)freq_val * perf.nominal_perf, nominal_freq);
+
+ return (u8)clamp(perf_val, perf.lowest_perf, perf.highest_perf);
+}
+
+static inline u32 perf_to_freq(union perf_cached perf, u32 nominal_freq, u8 perf_val)
+{
+ return DIV_ROUND_UP_ULL((u64)nominal_freq * perf_val,
+ perf.nominal_perf);
+}
+
static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
{
/**
@@ -183,10 +190,9 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
return -EINVAL;
}
-static DEFINE_MUTEX(amd_pstate_limits_lock);
static DEFINE_MUTEX(amd_pstate_driver_lock);
-static s16 msr_get_epp(struct amd_cpudata *cpudata)
+static u8 msr_get_epp(struct amd_cpudata *cpudata)
{
u64 value;
int ret;
@@ -207,7 +213,7 @@ static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
return static_call(amd_pstate_get_epp)(cpudata);
}
-static s16 shmem_get_epp(struct amd_cpudata *cpudata)
+static u8 shmem_get_epp(struct amd_cpudata *cpudata)
{
u64 epp;
int ret;
@@ -218,12 +224,13 @@ static s16 shmem_get_epp(struct amd_cpudata *cpudata)
return ret;
}
- return (s16)(epp & 0xff);
+ return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp);
}
-static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
+static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
+ u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
u64 value, prev;
value = prev = READ_ONCE(cpudata->cppc_req_cached);
@@ -235,6 +242,18 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
+
if (value == prev)
return 0;
@@ -249,24 +268,24 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
}
WRITE_ONCE(cpudata->cppc_req_cached, value);
- WRITE_ONCE(cpudata->epp_cached, epp);
return 0;
}
DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
-static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, u32 epp,
+static inline int amd_pstate_update_perf(struct cpufreq_policy *policy,
+ u8 min_perf, u8 des_perf,
+ u8 max_perf, u8 epp,
bool fast_switch)
{
- return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+ return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf,
max_perf, epp, fast_switch);
}
-static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
u64 value, prev;
int ret;
@@ -274,6 +293,19 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
value &= ~AMD_CPPC_EPP_PERF_MASK;
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
+
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ value != prev);
+ }
+
if (value == prev)
return 0;
@@ -284,7 +316,6 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
}
/* update both so that msr_update_perf() can effectively check */
- WRITE_ONCE(cpudata->epp_cached, epp);
WRITE_ONCE(cpudata->cppc_req_cached, value);
return ret;
@@ -292,17 +323,35 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
-static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp)
{
- return static_call(amd_pstate_set_epp)(cpudata, epp);
+ return static_call(amd_pstate_set_epp)(policy, epp);
}
-static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
{
- int ret;
+ struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
+ u8 epp_cached;
+ u64 value;
+ int ret;
+
+
+ epp_cached = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
+
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ epp != epp_cached);
+ }
- if (epp == cpudata->epp_cached)
+ if (epp == epp_cached)
return 0;
perf_ctrls.energy_perf = epp;
@@ -311,106 +360,35 @@ static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
pr_debug("failed to set energy perf value (%d)\n", ret);
return ret;
}
- WRITE_ONCE(cpudata->epp_cached, epp);
- return ret;
-}
-
-static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
- int pref_index)
-{
- struct amd_cpudata *cpudata = policy->driver_data;
- int epp;
-
- if (!pref_index)
- epp = cpudata->epp_default;
- else
- epp = epp_values[pref_index];
-
- if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
- pr_debug("EPP cannot be set under performance policy\n");
- return -EBUSY;
- }
-
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
- epp,
- FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
- FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
- policy->boost_enabled);
- }
+ value = READ_ONCE(cpudata->cppc_req_cached);
+ value &= ~AMD_CPPC_EPP_PERF_MASK;
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
- return amd_pstate_set_epp(cpudata, epp);
+ return ret;
}
-static inline int msr_cppc_enable(bool enable)
+static inline int msr_cppc_enable(struct cpufreq_policy *policy)
{
- int ret, cpu;
- unsigned long logical_proc_id_mask = 0;
-
- /*
- * MSR_AMD_CPPC_ENABLE is write-once, once set it cannot be cleared.
- */
- if (!enable)
- return 0;
-
- if (enable == cppc_enabled)
- return 0;
-
- for_each_present_cpu(cpu) {
- unsigned long logical_id = topology_logical_package_id(cpu);
-
- if (test_bit(logical_id, &logical_proc_id_mask))
- continue;
-
- set_bit(logical_id, &logical_proc_id_mask);
-
- ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE,
- enable);
- if (ret)
- return ret;
- }
-
- cppc_enabled = enable;
- return 0;
+ return wrmsrl_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
}
-static int shmem_cppc_enable(bool enable)
+static int shmem_cppc_enable(struct cpufreq_policy *policy)
{
- int cpu, ret = 0;
- struct cppc_perf_ctrls perf_ctrls;
-
- if (enable == cppc_enabled)
- return 0;
-
- for_each_present_cpu(cpu) {
- ret = cppc_set_enable(cpu, enable);
- if (ret)
- return ret;
-
- /* Enable autonomous mode for EPP */
- if (cppc_state == AMD_PSTATE_ACTIVE) {
- /* Set desired perf as zero to allow EPP firmware control */
- perf_ctrls.desired_perf = 0;
- ret = cppc_set_perf(cpu, &perf_ctrls);
- if (ret)
- return ret;
- }
- }
-
- cppc_enabled = enable;
- return ret;
+ return cppc_set_enable(policy->cpu, 1);
}
DEFINE_STATIC_CALL(amd_pstate_cppc_enable, msr_cppc_enable);
-static inline int amd_pstate_cppc_enable(bool enable)
+static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy)
{
- return static_call(amd_pstate_cppc_enable)(enable);
+ return static_call(amd_pstate_cppc_enable)(policy);
}
static int msr_init_perf(struct amd_cpudata *cpudata)
{
+ union perf_cached perf = READ_ONCE(cpudata->perf);
u64 cap1, numerator;
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
@@ -422,19 +400,22 @@ static int msr_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, numerator);
- WRITE_ONCE(cpudata->max_limit_perf, numerator);
- WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
- WRITE_ONCE(cpudata->prefcore_ranking, AMD_CPPC_HIGHEST_PERF(cap1));
- WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
+ perf.highest_perf = numerator;
+ perf.max_limit_perf = numerator;
+ perf.min_limit_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
+ perf.nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1);
+ perf.lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1);
+ perf.lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
+ WRITE_ONCE(cpudata->perf, perf);
+ WRITE_ONCE(cpudata->prefcore_ranking, FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1));
+
return 0;
}
static int shmem_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
u64 numerator;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
@@ -445,14 +426,14 @@ static int shmem_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, numerator);
- WRITE_ONCE(cpudata->max_limit_perf, numerator);
- WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
- WRITE_ONCE(cpudata->lowest_nonlinear_perf,
- cppc_perf.lowest_nonlinear_perf);
- WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+ perf.highest_perf = numerator;
+ perf.max_limit_perf = numerator;
+ perf.min_limit_perf = cppc_perf.lowest_perf;
+ perf.nominal_perf = cppc_perf.nominal_perf;
+ perf.lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+ perf.lowest_perf = cppc_perf.lowest_perf;
+ WRITE_ONCE(cpudata->perf, perf);
WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf);
- WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
@@ -479,23 +460,56 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
return static_call(amd_pstate_init_perf)(cpudata);
}
-static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
+static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf,
+ u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
+ u64 value, prev;
+ int ret;
if (cppc_state == AMD_PSTATE_ACTIVE) {
- int ret = shmem_set_epp(cpudata, epp);
+ int ret = shmem_set_epp(policy, epp);
if (ret)
return ret;
}
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
+ value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
+
+ if (value == prev)
+ return 0;
+
perf_ctrls.max_perf = max_perf;
perf_ctrls.min_perf = min_perf;
perf_ctrls.desired_perf = des_perf;
- return cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ return 0;
}
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
@@ -531,17 +545,18 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
return true;
}
-static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
+static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
+ u8 des_perf, u8 max_perf, bool fast_switch, int gov_flags)
{
- unsigned long max_freq;
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
- u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ if (!policy)
+ return;
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+ des_perf = clamp_t(u8, des_perf, min_perf, max_perf);
- max_freq = READ_ONCE(cpudata->max_limit_freq);
- policy->cur = div_u64(des_perf * max_freq, max_perf);
+ policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf);
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
min_perf = des_perf;
@@ -550,7 +565,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
/* limit the max perf when core performance boost feature is disabled */
if (!cpudata->boost_supported)
- max_perf = min_t(unsigned long, nominal_perf, max_perf);
+ max_perf = min_t(u8, perf.nominal_perf, max_perf);
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
@@ -558,9 +573,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
cpudata->cpu, fast_switch);
}
- amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
-
- cpufreq_cpu_put(policy);
+ amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_switch);
}
static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
@@ -572,7 +585,8 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
* amd-pstate qos_requests.
*/
if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) {
- struct cpufreq_policy *policy = cpufreq_cpu_get(policy_data->cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) =
+ cpufreq_cpu_get(policy_data->cpu);
struct amd_cpudata *cpudata;
if (!policy)
@@ -580,58 +594,48 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
cpudata = policy->driver_data;
policy_data->min = cpudata->lowest_nonlinear_freq;
- cpufreq_cpu_put(policy);
}
cpufreq_verify_within_cpu_limits(policy_data);
- pr_debug("policy_max =%d, policy_min=%d\n", policy_data->max, policy_data->min);
return 0;
}
-static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
+static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- u32 max_limit_perf, min_limit_perf, max_perf, max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
- max_perf = READ_ONCE(cpudata->highest_perf);
- max_freq = READ_ONCE(cpudata->max_freq);
- max_limit_perf = div_u64(policy->max * max_perf, max_freq);
- min_limit_perf = div_u64(policy->min * max_perf, max_freq);
+ perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max);
+ perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- min_limit_perf = min(cpudata->nominal_perf, max_limit_perf);
+ perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf);
- WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
- WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
WRITE_ONCE(cpudata->max_limit_freq, policy->max);
WRITE_ONCE(cpudata->min_limit_freq, policy->min);
-
- return 0;
+ WRITE_ONCE(cpudata->perf, perf);
}
static int amd_pstate_update_freq(struct cpufreq_policy *policy,
unsigned int target_freq, bool fast_switch)
{
struct cpufreq_freqs freqs;
- struct amd_cpudata *cpudata = policy->driver_data;
- unsigned long max_perf, min_perf, des_perf, cap_perf;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
+ u8 des_perf;
- if (!cpudata->max_freq)
- return -ENODEV;
+ cpudata = policy->driver_data;
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
- cap_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
- max_perf = cap_perf;
+ perf = READ_ONCE(cpudata->perf);
freqs.old = policy->cur;
freqs.new = target_freq;
- des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
- cpudata->max_freq);
+ des_perf = freq_to_perf(perf, cpudata->nominal_freq, target_freq);
WARN_ON(fast_switch && !policy->fast_switch_enabled);
/*
@@ -642,8 +646,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
if (!fast_switch)
cpufreq_freq_transition_begin(policy, &freqs);
- amd_pstate_update(cpudata, min_perf, des_perf,
- max_perf, fast_switch, policy->governor->flags);
+ amd_pstate_update(cpudata, perf.min_limit_perf, des_perf,
+ perf.max_limit_perf, fast_switch,
+ policy->governor->flags);
if (!fast_switch)
cpufreq_freq_transition_end(policy, &freqs, false);
@@ -671,10 +676,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
unsigned long target_perf,
unsigned long capacity)
{
- unsigned long max_perf, min_perf, des_perf,
- cap_perf, lowest_nonlinear_perf;
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ u8 max_perf, min_perf, des_perf, cap_perf;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata;
+ union perf_cached perf;
if (!policy)
return;
@@ -684,40 +689,38 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
-
- cap_perf = READ_ONCE(cpudata->highest_perf);
- lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ perf = READ_ONCE(cpudata->perf);
+ cap_perf = perf.highest_perf;
des_perf = cap_perf;
if (target_perf < capacity)
des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
- min_perf = READ_ONCE(cpudata->lowest_perf);
if (_min_perf < capacity)
min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
+ else
+ min_perf = cap_perf;
- if (min_perf < lowest_nonlinear_perf)
- min_perf = lowest_nonlinear_perf;
+ if (min_perf < perf.min_limit_perf)
+ min_perf = perf.min_limit_perf;
- max_perf = cpudata->max_limit_perf;
+ max_perf = perf.max_limit_perf;
if (max_perf < min_perf)
max_perf = min_perf;
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
-
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
policy->governor->flags);
- cpufreq_cpu_put(policy);
}
static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
u32 nominal_freq, max_freq;
int ret = 0;
nominal_freq = READ_ONCE(cpudata->nominal_freq);
- max_freq = READ_ONCE(cpudata->max_freq);
+ max_freq = perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf);
if (on)
policy->cpuinfo.max_freq = max_freq;
@@ -744,7 +747,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
pr_err("Boost mode is not supported by this processor or SBIOS\n");
return -EOPNOTSUPP;
}
- guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_cpu_boost_update(policy, state);
refresh_frequency_limits(policy);
@@ -821,28 +823,21 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
static void amd_pstate_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy = NULL;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
- int ret;
bool highest_perf_changed = false;
if (!amd_pstate_prefcore)
return;
- policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
- cpudata = policy->driver_data;
-
- guard(mutex)(&amd_pstate_driver_lock);
-
- ret = amd_get_highest_perf(cpu, &cur_high);
- if (ret) {
- cpufreq_cpu_put(policy);
+ if (amd_get_highest_perf(cpu, &cur_high))
return;
- }
+
+ cpudata = policy->driver_data;
prev_high = READ_ONCE(cpudata->prefcore_ranking);
highest_perf_changed = (prev_high != cur_high);
@@ -852,11 +847,6 @@ static void amd_pstate_update_limits(unsigned int cpu)
if (cur_high < CPPC_MAX_PERF)
sched_set_itmt_core_prio((int)cur_high, cpu);
}
- cpufreq_cpu_put(policy);
-
- if (!highest_perf_changed)
- cpufreq_update_policy(cpu);
-
}
/*
@@ -894,48 +884,45 @@ static u32 amd_pstate_get_transition_latency(unsigned int cpu)
}
/*
- * amd_pstate_init_freq: Initialize the max_freq, min_freq,
- * nominal_freq and lowest_nonlinear_freq for
- * the @cpudata object.
+ * amd_pstate_init_freq: Initialize the nominal_freq and lowest_nonlinear_freq
+ * for the @cpudata object.
*
- * Requires: highest_perf, lowest_perf, nominal_perf and
- * lowest_nonlinear_perf members of @cpudata to be
- * initialized.
+ * Requires: all perf members of @cpudata to be initialized.
*
- * Returns 0 on success, non-zero value on failure.
+ * Returns 0 on success, non-zero value on failure.
*/
static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
{
- int ret;
- u32 min_freq, max_freq;
- u32 highest_perf, nominal_perf, nominal_freq;
- u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
+ u32 min_freq, max_freq, nominal_freq, lowest_nonlinear_freq;
struct cppc_perf_caps cppc_perf;
+ union perf_cached perf;
+ int ret;
ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
-
- if (quirks && quirks->lowest_freq)
- min_freq = quirks->lowest_freq;
- else
- min_freq = cppc_perf.lowest_freq;
+ perf = READ_ONCE(cpudata->perf);
if (quirks && quirks->nominal_freq)
nominal_freq = quirks->nominal_freq;
else
nominal_freq = cppc_perf.nominal_freq;
+ nominal_freq *= 1000;
+
+ if (quirks && quirks->lowest_freq) {
+ min_freq = quirks->lowest_freq;
+ perf.lowest_perf = freq_to_perf(perf, nominal_freq, min_freq);
+ WRITE_ONCE(cpudata->perf, perf);
+ } else
+ min_freq = cppc_perf.lowest_freq;
- highest_perf = READ_ONCE(cpudata->highest_perf);
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
- max_freq = div_u64((u64)highest_perf * nominal_freq, nominal_perf);
+ min_freq *= 1000;
- lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
- lowest_nonlinear_freq = div_u64((u64)nominal_freq * lowest_nonlinear_perf, nominal_perf);
- WRITE_ONCE(cpudata->min_freq, min_freq * 1000);
- WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq * 1000);
- WRITE_ONCE(cpudata->nominal_freq, nominal_freq * 1000);
- WRITE_ONCE(cpudata->max_freq, max_freq * 1000);
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
+
+ max_freq = perf_to_freq(perf, nominal_freq, perf.highest_perf);
+ lowest_nonlinear_freq = perf_to_freq(perf, nominal_freq, perf.lowest_nonlinear_perf);
+ WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
/**
* Below values need to be initialized correctly, otherwise driver will fail to load
@@ -960,9 +947,10 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, ret;
- struct device *dev;
struct amd_cpudata *cpudata;
+ union perf_cached perf;
+ struct device *dev;
+ int ret;
/*
* Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
@@ -993,19 +981,23 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = READ_ONCE(cpudata->min_freq);
- max_freq = READ_ONCE(cpudata->max_freq);
-
policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
- policy->min = min_freq;
- policy->max = max_freq;
+ perf = READ_ONCE(cpudata->perf);
- policy->cpuinfo.min_freq = min_freq;
- policy->cpuinfo.max_freq = max_freq;
+ policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.lowest_perf);
+ policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.highest_perf);
- policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
+ ret = amd_pstate_cppc_enable(policy);
+ if (ret)
+ goto free_cpudata1;
+
+ policy->boost_supported = READ_ONCE(cpudata->boost_supported);
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
@@ -1027,9 +1019,6 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
goto free_cpudata2;
}
- cpudata->max_limit_freq = max_freq;
- cpudata->min_limit_freq = min_freq;
-
policy->driver_data = cpudata;
if (!current_pstate_driver->adjust_perf)
@@ -1040,6 +1029,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
free_cpudata2:
freq_qos_remove_request(&cpudata->req[0]);
free_cpudata1:
+ pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
kfree(cpudata);
return ret;
}
@@ -1054,28 +1044,6 @@ static void amd_pstate_cpu_exit(struct cpufreq_policy *policy)
kfree(cpudata);
}
-static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = amd_pstate_cppc_enable(true);
- if (ret)
- pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
-
- return ret;
-}
-
-static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = amd_pstate_cppc_enable(false);
- if (ret)
- pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
-
- return ret;
-}
-
/* Sysfs attributes */
/*
@@ -1086,27 +1054,27 @@ static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
char *buf)
{
- int max_freq;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
- max_freq = READ_ONCE(cpudata->max_freq);
- if (max_freq < 0)
- return max_freq;
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
- return sysfs_emit(buf, "%u\n", max_freq);
+ return sysfs_emit(buf, "%u\n",
+ perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf));
}
static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
char *buf)
{
- int freq;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
- freq = READ_ONCE(cpudata->lowest_nonlinear_freq);
- if (freq < 0)
- return freq;
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
- return sysfs_emit(buf, "%u\n", freq);
+ return sysfs_emit(buf, "%u\n",
+ perf_to_freq(perf, cpudata->nominal_freq, perf.lowest_nonlinear_perf));
}
/*
@@ -1116,18 +1084,17 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
char *buf)
{
- u32 perf;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
- perf = READ_ONCE(cpudata->highest_perf);
+ cpudata = policy->driver_data;
- return sysfs_emit(buf, "%u\n", perf);
+ return sysfs_emit(buf, "%u\n", cpudata->perf.highest_perf);
}
static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
char *buf)
{
- u32 perf;
+ u8 perf;
struct amd_cpudata *cpudata = policy->driver_data;
perf = READ_ONCE(cpudata->prefcore_ranking);
@@ -1168,8 +1135,10 @@ static ssize_t show_energy_performance_available_preferences(
static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
char str_preference[21];
ssize_t ret;
+ u8 epp;
ret = sscanf(buf, "%20s", str_preference);
if (ret != 1)
@@ -1179,9 +1148,17 @@ static ssize_t store_energy_performance_preference(
if (ret < 0)
return -EINVAL;
- guard(mutex)(&amd_pstate_limits_lock);
+ if (!ret)
+ epp = cpudata->epp_default;
+ else
+ epp = epp_values[ret];
- ret = amd_pstate_set_energy_pref_index(policy, ret);
+ if (epp > 0 && policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ pr_debug("EPP cannot be set under performance policy\n");
+ return -EBUSY;
+ }
+
+ ret = amd_pstate_set_epp(policy, epp);
return ret ? ret : count;
}
@@ -1190,9 +1167,11 @@ static ssize_t show_energy_performance_preference(
struct cpufreq_policy *policy, char *buf)
{
struct amd_cpudata *cpudata = policy->driver_data;
- int preference;
+ u8 preference, epp;
+
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
- switch (cpudata->epp_cached) {
+ switch (epp) {
case AMD_CPPC_EPP_PERFORMANCE:
preference = EPP_INDEX_PERFORMANCE;
break;
@@ -1214,7 +1193,6 @@ static ssize_t show_energy_performance_preference(
static void amd_pstate_driver_cleanup(void)
{
- amd_pstate_cppc_enable(false);
cppc_state = AMD_PSTATE_DISABLE;
current_pstate_driver = NULL;
}
@@ -1248,14 +1226,6 @@ static int amd_pstate_register_driver(int mode)
cppc_state = mode;
- ret = amd_pstate_cppc_enable(true);
- if (ret) {
- pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n",
- ret);
- amd_pstate_driver_cleanup();
- return ret;
- }
-
/* at least one CPU supports CPB */
current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB);
@@ -1353,8 +1323,10 @@ int amd_pstate_update_status(const char *buf, size_t size)
if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
return -EINVAL;
- if (mode_state_machine[cppc_state][mode_idx])
+ if (mode_state_machine[cppc_state][mode_idx]) {
+ guard(mutex)(&amd_pstate_driver_lock);
return mode_state_machine[cppc_state][mode_idx](mode_idx);
+ }
return 0;
}
@@ -1375,7 +1347,6 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
char *p = memchr(buf, '\n', count);
int ret;
- guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_update_status(buf, p ? p - buf : count);
return ret < 0 ? ret : count;
@@ -1451,10 +1422,11 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, ret;
struct amd_cpudata *cpudata;
+ union perf_cached perf;
struct device *dev;
u64 value;
+ int ret;
/*
* Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
@@ -1485,20 +1457,25 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = READ_ONCE(cpudata->min_freq);
- max_freq = READ_ONCE(cpudata->max_freq);
+ perf = READ_ONCE(cpudata->perf);
+
+ policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.lowest_perf);
+ policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.highest_perf);
+ policy->driver_data = cpudata;
+
+ ret = amd_pstate_cppc_enable(policy);
+ if (ret)
+ goto free_cpudata1;
- policy->cpuinfo.min_freq = min_freq;
- policy->cpuinfo.max_freq = max_freq;
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
- policy->driver_data = cpudata;
-
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
- policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
+ policy->boost_supported = READ_ONCE(cpudata->boost_supported);
/*
* Set the policy to provide a valid fallback value in case
@@ -1518,13 +1495,8 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
return ret;
WRITE_ONCE(cpudata->cppc_req_cached, value);
-
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
- if (ret)
- return ret;
- WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
- ret = amd_pstate_set_epp(cpudata, cpudata->epp_default);
+ ret = amd_pstate_set_epp(policy, cpudata->epp_default);
if (ret)
return ret;
@@ -1533,6 +1505,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return 0;
free_cpudata1:
+ pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
kfree(cpudata);
return ret;
}
@@ -1552,24 +1525,21 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- u32 epp;
+ union perf_cached perf;
+ u8 epp;
- amd_pstate_update_min_max_limit(policy);
+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
epp = 0;
else
- epp = READ_ONCE(cpudata->epp_cached);
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
- cpudata->min_limit_perf,
- cpudata->max_limit_perf,
- policy->boost_enabled);
- }
+ perf = READ_ONCE(cpudata->perf);
- return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
- cpudata->max_limit_perf, epp, false);
+ return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U,
+ perf.max_limit_perf, epp, false);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1580,9 +1550,6 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
- policy->cpuinfo.max_freq, policy->max);
-
cpudata->policy = policy->policy;
ret = amd_pstate_epp_update_limit(policy);
@@ -1598,82 +1565,28 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
return 0;
}
-static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
-{
- struct amd_cpudata *cpudata = policy->driver_data;
- u64 max_perf;
- int ret;
-
- ret = amd_pstate_cppc_enable(true);
- if (ret)
- pr_err("failed to enable amd pstate during resume, return %d\n", ret);
-
- max_perf = READ_ONCE(cpudata->highest_perf);
-
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
- cpudata->epp_cached,
- FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
- max_perf, policy->boost_enabled);
- }
-
- return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
-}
-
static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
{
- struct amd_cpudata *cpudata = policy->driver_data;
- int ret;
+ pr_debug("AMD CPU Core %d going online\n", policy->cpu);
- pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
-
- ret = amd_pstate_epp_reenable(policy);
- if (ret)
- return ret;
- cpudata->suspended = false;
-
- return 0;
+ return amd_pstate_cppc_enable(policy);
}
static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
{
- struct amd_cpudata *cpudata = policy->driver_data;
- int min_perf;
-
- if (cpudata->suspended)
- return 0;
-
- min_perf = READ_ONCE(cpudata->lowest_perf);
-
- guard(mutex)(&amd_pstate_limits_lock);
-
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
- AMD_CPPC_EPP_BALANCE_POWERSAVE,
- min_perf, min_perf, policy->boost_enabled);
- }
-
- return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
- AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
+ return 0;
}
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- int ret;
- /* avoid suspending when EPP is not enabled */
- if (cppc_state != AMD_PSTATE_ACTIVE)
- return 0;
+ /* invalidate to ensure it's rewritten during resume */
+ cpudata->cppc_req_cached = 0;
/* set this flag to avoid setting core offline*/
cpudata->suspended = true;
- /* disable CPPC in lowlevel firmware */
- ret = amd_pstate_cppc_enable(false);
- if (ret)
- pr_err("failed to suspend, return %d\n", ret);
-
return 0;
}
@@ -1682,10 +1595,12 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->suspended) {
- guard(mutex)(&amd_pstate_limits_lock);
+ int ret;
/* enable amd pstate from suspend state*/
- amd_pstate_epp_reenable(policy);
+ ret = amd_pstate_epp_update_limit(policy);
+ if (ret)
+ return ret;
cpudata->suspended = false;
}
@@ -1700,8 +1615,6 @@ static struct cpufreq_driver amd_pstate_driver = {
.fast_switch = amd_pstate_fast_switch,
.init = amd_pstate_cpu_init,
.exit = amd_pstate_cpu_exit,
- .suspend = amd_pstate_cpu_suspend,
- .resume = amd_pstate_cpu_resume,
.set_boost = amd_pstate_set_boost,
.update_limits = amd_pstate_update_limits,
.name = "amd-pstate",
@@ -1868,7 +1781,6 @@ static int __init amd_pstate_init(void)
global_attr_free:
cpufreq_unregister_driver(current_pstate_driver);
- amd_pstate_cppc_enable(false);
return ret;
}
device_initcall(amd_pstate_init);
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index 9747e3be6cee..fbe1c08d3f06 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -13,6 +13,36 @@
/*********************************************************************
* AMD P-state INTERFACE *
*********************************************************************/
+
+/**
+ * union perf_cached - A union to cache performance-related data.
+ * @highest_perf: the maximum performance an individual processor may reach,
+ * assuming ideal conditions
+ * For platforms that support the preferred core feature, the highest_perf value maybe
+ * configured to any value in the range 166-255 by the firmware (because the preferred
+ * core ranking is encoded in the highest_perf value). To maintain consistency across
+ * all platforms, we split the highest_perf and preferred core ranking values into
+ * cpudata->perf.highest_perf and cpudata->prefcore_ranking.
+ * @nominal_perf: the maximum sustained performance level of the processor,
+ * assuming ideal operating conditions
+ * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
+ * savings are achieved
+ * @lowest_perf: the absolute lowest performance level of the processor
+ * @min_limit_perf: Cached value of the performance corresponding to policy->min
+ * @max_limit_perf: Cached value of the performance corresponding to policy->max
+ */
+union perf_cached {
+ struct {
+ u8 highest_perf;
+ u8 nominal_perf;
+ u8 lowest_nonlinear_perf;
+ u8 lowest_perf;
+ u8 min_limit_perf;
+ u8 max_limit_perf;
+ };
+ u64 val;
+};
+
/**
* struct amd_aperf_mperf
* @aperf: actual performance frequency clock count
@@ -30,24 +60,11 @@ struct amd_aperf_mperf {
* @cpu: CPU number
* @req: constraint request to apply
* @cppc_req_cached: cached performance request hints
- * @highest_perf: the maximum performance an individual processor may reach,
- * assuming ideal conditions
- * For platforms that do not support the preferred core feature, the
- * highest_pef may be configured with 166 or 255, to avoid max frequency
- * calculated wrongly. we take the fixed value as the highest_perf.
- * @nominal_perf: the maximum sustained performance level of the processor,
- * assuming ideal operating conditions
- * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
- * savings are achieved
- * @lowest_perf: the absolute lowest performance level of the processor
+ * @perf: cached performance-related data
* @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
* priority.
- * @min_limit_perf: Cached value of the performance corresponding to policy->min
- * @max_limit_perf: Cached value of the performance corresponding to policy->max
* @min_limit_freq: Cached value of policy->min (in khz)
* @max_limit_freq: Cached value of policy->max (in khz)
- * @max_freq: the frequency (in khz) that mapped to highest_perf
- * @min_freq: the frequency (in khz) that mapped to lowest_perf
* @nominal_freq: the frequency (in khz) that mapped to nominal_perf
* @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf
* @cur: Difference of Aperf/Mperf/tsc count between last and current sample
@@ -59,7 +76,6 @@ struct amd_aperf_mperf {
* AMD P-State driver supports preferred core featue.
* @epp_cached: Cached CPPC energy-performance preference value
* @policy: Cpufreq policy value
- * @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
*
* The amd_cpudata is key private data for each CPU thread in AMD P-State, and
* represents all the attributes and goals that AMD P-State requests at runtime.
@@ -70,18 +86,11 @@ struct amd_cpudata {
struct freq_qos_request req[2];
u64 cppc_req_cached;
- u32 highest_perf;
- u32 nominal_perf;
- u32 lowest_nonlinear_perf;
- u32 lowest_perf;
- u32 prefcore_ranking;
- u32 min_limit_perf;
- u32 max_limit_perf;
- u32 min_limit_freq;
- u32 max_limit_freq;
+ union perf_cached perf;
- u32 max_freq;
- u32 min_freq;
+ u8 prefcore_ranking;
+ u32 min_limit_freq;
+ u32 max_limit_freq;
u32 nominal_freq;
u32 lowest_nonlinear_freq;
@@ -93,11 +102,9 @@ struct amd_cpudata {
bool hw_prefcore;
/* EPP feature related attributes*/
- s16 epp_cached;
u32 policy;
- u64 cppc_cap1_cached;
bool suspended;
- s16 epp_default;
+ u8 epp_default;
};
/*
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index 269b18c62d04..4994c86feb57 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -229,12 +229,6 @@ static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy,
return 0;
}
-static struct freq_attr *apple_soc_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL, /* Filled in below if boost is enabled */
- NULL,
-};
-
static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, i;
@@ -316,16 +310,6 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = true;
policy->suspend_freq = freq_table[0].frequency;
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret) {
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- } else {
- apple_soc_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- apple_soc_cpufreq_driver.boost_enabled = true;
- }
- }
-
return 0;
out_free_cpufreq_table:
@@ -360,7 +344,7 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
.target_index = apple_soc_cpufreq_set_target,
.fast_switch = apple_soc_cpufreq_fast_switch,
.register_em = cpufreq_register_em_with_opp,
- .attr = apple_soc_cpufreq_hw_attr,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index bea41ccabf1f..f28a4435fba7 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -102,11 +102,7 @@ struct armada_37xx_dvfs {
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
- /*
- * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
- * unstable because we do not know how to configure it properly.
- */
- /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
+ {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index 7a979db81f09..5a3545bd0d8d 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -47,7 +47,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
{
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c
index 17a4c174553d..36051880640b 100644
--- a/drivers/cpufreq/bmips-cpufreq.c
+++ b/drivers/cpufreq/bmips-cpufreq.c
@@ -150,7 +150,6 @@ static struct cpufreq_driver bmips_cpufreq_driver = {
.get = bmips_cpufreq_get,
.init = bmips_cpufreq_init,
.exit = bmips_cpufreq_exit,
- .attr = cpufreq_generic_attr,
.name = BMIPS_CPUFREQ_PREFIX,
};
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 2fd0f6be6fa3..7b841a086acc 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -720,7 +720,6 @@ cpufreq_freq_attr_ro(brcm_avs_voltage);
cpufreq_freq_attr_ro(brcm_avs_frequency);
static struct freq_attr *brcm_avs_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&brcm_avs_pstate,
&brcm_avs_mode,
&brcm_avs_pmap,
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 8f512448382f..b3d74f9adcf0 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -34,8 +34,6 @@
*/
static LIST_HEAD(cpu_data_list);
-static bool boost_supported;
-
static struct cpufreq_driver cppc_cpufreq_driver;
#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
@@ -653,7 +651,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* is supported.
*/
if (caps->highest_perf > caps->nominal_perf)
- boost_supported = true;
+ policy->boost_supported = true;
/* Set policy->cur to max now. The governors will adjust later. */
policy->cur = cppc_perf_to_khz(caps, caps->highest_perf);
@@ -791,11 +789,6 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
int ret;
- if (!boost_supported) {
- pr_err("BOOST not supported by CPU or firmware\n");
- return -EINVAL;
- }
-
if (state)
policy->max = cppc_perf_to_khz(caps, caps->highest_perf);
else
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 3a7c3372bda7..e80dd982a3e2 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -36,12 +36,6 @@ struct private_data {
static LIST_HEAD(priv_list);
-static struct freq_attr *cpufreq_dt_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL, /* Extra space for boost-attr if required */
- NULL,
-};
-
static struct private_data *cpufreq_dt_find_data(int cpu)
{
struct private_data *priv;
@@ -120,21 +114,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
- /* Support turbo/boost mode */
- if (policy_has_boost_freq(policy)) {
- /* This gets disabled by core on driver unregister */
- ret = cpufreq_enable_boost_support();
- if (ret)
- goto out_clk_put;
- cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- }
-
return 0;
-
-out_clk_put:
- clk_put(cpu_clk);
-
- return ret;
}
static int cpufreq_online(struct cpufreq_policy *policy)
@@ -169,7 +149,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.offline = cpufreq_offline,
.register_em = cpufreq_register_em_with_opp,
.name = "cpufreq-dt",
- .attr = cpufreq_dt_attr,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
@@ -303,7 +283,7 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
int ret, cpu;
/* Request resources early so we can return in case of -EPROBE_DEFER */
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = dt_cpufreq_early_init(&pdev->dev, cpu);
if (ret)
goto err;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 30ffbddc7ece..0cf5a320bb5e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -88,6 +88,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_governor *new_gov,
unsigned int new_pol);
static bool cpufreq_boost_supported(void);
+static int cpufreq_boost_trigger_state(int state);
/*
* Two notifier lists: the "policy" list is involved in the
@@ -631,6 +632,9 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (!cpufreq_driver->boost_enabled)
return -EINVAL;
+ if (!policy->boost_supported)
+ return -EINVAL;
+
if (policy->boost_enabled == enable)
return count;
@@ -729,18 +733,26 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
-__weak unsigned int arch_freq_get_on_cpu(int cpu)
+__weak int arch_freq_get_on_cpu(int cpu)
{
- return 0;
+ return -EOPNOTSUPP;
+}
+
+static inline bool cpufreq_avg_freq_supported(struct cpufreq_policy *policy)
+{
+ return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP;
}
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
{
ssize_t ret;
- unsigned int freq;
+ int freq;
- freq = arch_freq_get_on_cpu(policy->cpu);
- if (freq)
+ freq = IS_ENABLED(CONFIG_CPUFREQ_ARCH_CUR_FREQ)
+ ? arch_freq_get_on_cpu(policy->cpu)
+ : 0;
+
+ if (freq > 0)
ret = sysfs_emit(buf, "%u\n", freq);
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
@@ -785,6 +797,19 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
}
/*
+ * show_cpuinfo_avg_freq - average CPU frequency as detected by hardware
+ */
+static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy,
+ char *buf)
+{
+ int avg_freq = arch_freq_get_on_cpu(policy->cpu);
+
+ if (avg_freq > 0)
+ return sysfs_emit(buf, "%u\n", avg_freq);
+ return avg_freq != 0 ? avg_freq : -EINVAL;
+}
+
+/*
* show_scaling_governor - show the current policy for the specified CPU
*/
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
@@ -946,6 +971,7 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
}
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
+cpufreq_freq_attr_ro(cpuinfo_avg_freq);
cpufreq_freq_attr_ro(cpuinfo_min_freq);
cpufreq_freq_attr_ro(cpuinfo_max_freq);
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
@@ -1059,6 +1085,21 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
struct freq_attr **drv_attr;
int ret = 0;
+ /* Attributes that need freq_table */
+ if (policy->freq_table) {
+ ret = sysfs_create_file(&policy->kobj,
+ &cpufreq_freq_attr_scaling_available_freqs.attr);
+ if (ret)
+ return ret;
+
+ if (cpufreq_boost_supported()) {
+ ret = sysfs_create_file(&policy->kobj,
+ &cpufreq_freq_attr_scaling_boost_freqs.attr);
+ if (ret)
+ return ret;
+ }
+ }
+
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while (drv_attr && *drv_attr) {
@@ -1073,6 +1114,12 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return ret;
}
+ if (cpufreq_avg_freq_supported(policy)) {
+ ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr);
+ if (ret)
+ return ret;
+ }
+
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
return ret;
@@ -1571,14 +1618,14 @@ static int cpufreq_online(unsigned int cpu)
policy->cdev = of_cpufreq_cooling_register(policy);
/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
- if (cpufreq_driver->set_boost &&
+ if (cpufreq_driver->set_boost && policy->boost_supported &&
policy->boost_enabled != cpufreq_boost_enabled()) {
policy->boost_enabled = cpufreq_boost_enabled();
ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
if (ret) {
/* If the set_boost fails, the online operation is not affected */
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
- policy->boost_enabled ? "enable" : "disable");
+ str_enable_disable(policy->boost_enabled));
policy->boost_enabled = !policy->boost_enabled;
}
}
@@ -2772,7 +2819,7 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits);
/*********************************************************************
* BOOST *
*********************************************************************/
-static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
+int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
{
int ret;
@@ -2791,8 +2838,9 @@ static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
return 0;
}
+EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw);
-int cpufreq_boost_trigger_state(int state)
+static int cpufreq_boost_trigger_state(int state)
{
struct cpufreq_policy *policy;
unsigned long flags;
@@ -2807,6 +2855,9 @@ int cpufreq_boost_trigger_state(int state)
cpus_read_lock();
for_each_active_policy(policy) {
+ if (!policy->boost_supported)
+ continue;
+
policy->boost_enabled = state;
ret = cpufreq_driver->set_boost(policy, state);
if (ret) {
@@ -2854,21 +2905,6 @@ static void remove_boost_sysfs_file(void)
sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
}
-int cpufreq_enable_boost_support(void)
-{
- if (!cpufreq_driver)
- return -EINVAL;
-
- if (cpufreq_boost_supported())
- return 0;
-
- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
-
- /* This will get removed on driver unregister */
- return create_boost_sysfs_file();
-}
-EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
-
bool cpufreq_boost_enabled(void)
{
return cpufreq_driver->boost_enabled;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index af44ee6a6430..1a7fcaf39cc9 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
time_elapsed = update_time - j_cdbs->prev_update_time;
j_cdbs->prev_update_time = update_time;
- idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ /*
+ * cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if
+ * it's obtained from get_cpu_idle_time_jiffy() when NOHZ is
+ * off, where idle_time is calculated by the difference between
+ * time elapsed in jiffies and "busy time" obtained from CPU
+ * statistics. If a CPU is 100% busy, the time elapsed and busy
+ * time should grow with the same amount in two consecutive
+ * samples, but in practice there could be a tiny difference,
+ * making the accumulated idle time decrease sometimes. Hence,
+ * in this case, idle_time should be regarded as 0 in order to
+ * make the further process correct.
+ */
+ if (cur_idle_time > j_cdbs->prev_cpu_idle)
+ idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ else
+ idle_time = 0;
+
j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
@@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
* calls, so the previous load value can be used then.
*/
load = j_cdbs->prev_load;
- } else if (unlikely((int)idle_time > 2 * sampling_rate &&
+ } else if (unlikely(idle_time > 2 * sampling_rate &&
j_cdbs->prev_load)) {
/*
* If the CPU had gone completely idle and a task has
@@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
load = j_cdbs->prev_load;
j_cdbs->prev_load = 0;
} else {
- if (time_elapsed >= idle_time) {
+ if (time_elapsed > idle_time)
load = 100 * (time_elapsed - idle_time) / time_elapsed;
- } else {
- /*
- * That can happen if idle_time is returned by
- * get_cpu_idle_time_jiffy(). In that case
- * idle_time is roughly equal to the difference
- * between time_elapsed and "busy time" obtained
- * from CPU statistics. Then, the "busy time"
- * can end up being greater than time_elapsed
- * (for example, if jiffies_64 and the CPU
- * statistics are updated by different CPUs),
- * so idle_time may in fact be negative. That
- * means, though, that the CPU was busy all
- * the time (on the rough average) during the
- * last sampling interval and 100 can be
- * returned as the load.
- */
- load = (int)idle_time < 0 ? 100 : 0;
- }
+ else
+ load = 0;
+
j_cdbs->prev_load = load;
}
- if (unlikely((int)idle_time > 2 * sampling_rate)) {
+ if (unlikely(idle_time > 2 * sampling_rate)) {
unsigned int periods = idle_time / sampling_rate;
if (periods < idle_periods)
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 8736be3a06ce..2c277eb3795a 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -101,7 +101,6 @@ static struct cpufreq_driver davinci_driver = {
.get = cpufreq_generic_get,
.init = davinci_cpu_init,
.name = "davinci",
- .attr = cpufreq_generic_attr,
};
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 6e958b09e1b5..d23a97ba6478 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -376,7 +376,6 @@ static struct cpufreq_driver eps_driver = {
.exit = eps_cpu_exit,
.get = eps_get,
.name = "e_powersaver",
- .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 4ce5eb35dc46..36494b855e41 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -194,7 +194,6 @@ static struct cpufreq_driver elanfreq_driver = {
.target_index = elanfreq_target,
.init = elanfreq_cpu_init,
.name = "elanfreq",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id elan_id[] = {
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 10e80d912b8d..c03a91502f84 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -14,7 +14,7 @@
* FREQUENCY TABLE HELPERS *
*********************************************************************/
-bool policy_has_boost_freq(struct cpufreq_policy *policy)
+static bool policy_has_boost_freq(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
@@ -27,7 +27,6 @@ bool policy_has_boost_freq(struct cpufreq_policy *policy)
return false;
}
-EXPORT_SYMBOL_GPL(policy_has_boost_freq);
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
@@ -276,7 +275,6 @@ static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
return show_available_freqs(policy, buf, false);
}
cpufreq_attr_available_freq(scaling_available);
-EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
/*
* scaling_boost_frequencies_show - show available boost frequencies for
@@ -288,13 +286,6 @@ static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
return show_available_freqs(policy, buf, true);
}
cpufreq_attr_available_freq(scaling_boost);
-EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
-
-struct freq_attr *cpufreq_generic_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
static int set_freq_table_sorted(struct cpufreq_policy *policy)
{
@@ -367,6 +358,10 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
if (ret)
return ret;
+ /* Driver's may have set this field already */
+ if (policy_has_boost_freq(policy))
+ policy->boost_supported = true;
+
return set_freq_table_sorted(policy);
}
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index f3c99f378ad6..db1c88e9d3f9 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -207,7 +207,6 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.init = imx6q_cpufreq_init,
.register_em = cpufreq_register_em_with_opp,
.name = "imx6q-cpufreq",
- .attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9c4cc01fd51a..4aad79d26c64 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -936,6 +936,8 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
+static bool no_cas __ro_after_init;
+
static struct cpudata *hybrid_max_perf_cpu __read_mostly;
/*
* Protects hybrid_max_perf_cpu, the capacity_perf fields in struct cpudata,
@@ -1041,6 +1043,10 @@ static void hybrid_refresh_cpu_capacity_scaling(void)
static void hybrid_init_cpu_capacity_scaling(bool refresh)
{
+ /* Bail out if enabling capacity-aware scheduling is prohibited. */
+ if (no_cas)
+ return;
+
/*
* If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity
* scaling has been enabled already and the driver is just changing the
@@ -2200,28 +2206,20 @@ static int knl_get_turbo_pstate(int cpu)
return ret;
}
-static void hybrid_get_type(void *data)
-{
- u8 *cpu_type = data;
-
- *cpu_type = get_this_hybrid_cpu_type();
-}
-
static int hwp_get_cpu_scaling(int cpu)
{
if (hybrid_scaling_factor) {
- u8 cpu_type = 0;
-
- smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ u8 cpu_type = c->topo.intel_type;
/*
* Return the hybrid scaling factor for P-cores and use the
* default core scaling for E-cores.
*/
- if (cpu_type == 0x40)
+ if (cpu_type == INTEL_CPU_TYPE_CORE)
return hybrid_scaling_factor;
- if (cpu_type == 0x20)
+ if (cpu_type == INTEL_CPU_TYPE_ATOM)
return core_get_scaling();
}
@@ -3688,6 +3686,15 @@ static int __init intel_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
+ /*
+ * The Intel pstate driver will be ignored if the platform
+ * firmware has its own power management modes.
+ */
+ if (intel_pstate_platform_pwr_mgmt_exists()) {
+ pr_info("P-states controlled by the platform\n");
+ return -ENODEV;
+ }
+
id = x86_match_cpu(hwp_support_ids);
if (id) {
hwp_forced = intel_pstate_hwp_is_enabled();
@@ -3743,15 +3750,6 @@ static int __init intel_pstate_init(void)
default_driver = &intel_cpufreq;
hwp_cpu_matched:
- /*
- * The Intel pstate driver will be ignored if the platform
- * firmware has its own power management modes.
- */
- if (intel_pstate_platform_pwr_mgmt_exists()) {
- pr_info("P-states controlled by the platform\n");
- return -ENODEV;
- }
-
if (!hwp_active && hwp_only)
return -ENOTSUPP;
@@ -3835,6 +3833,9 @@ static int __init intel_pstate_setup(char *str)
if (!strcmp(str, "no_hwp"))
no_hwp = 1;
+ if (!strcmp(str, "no_cas"))
+ no_cas = true;
+
if (!strcmp(str, "force"))
force_load = 1;
if (!strcmp(str, "hwp_only"))
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 312f2654d1d5..24b285cbeb8d 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -96,7 +96,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
.target_index = kirkwood_cpufreq_target,
.init = kirkwood_cpufreq_cpu_init,
.name = "kirkwood-cpufreq",
- .attr = cpufreq_generic_attr,
};
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index bd6fe8638d39..68ccd73c8129 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -906,7 +906,6 @@ static struct cpufreq_driver longhaul_driver = {
.get = longhaul_get,
.init = longhaul_cpu_init,
.name = "longhaul",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id longhaul_id[] = {
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index ed1a6dbad638..39a6c4315a60 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -91,7 +91,6 @@ static struct cpufreq_driver loongson2_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = loongson2_cpufreq_target,
.get = cpufreq_generic_get,
- .attr = cpufreq_generic_attr,
};
static const struct platform_device_id platform_device_ids[] = {
diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c
index bd34bf0fafa5..1e8715ea1b77 100644
--- a/drivers/cpufreq/loongson3_cpufreq.c
+++ b/drivers/cpufreq/loongson3_cpufreq.c
@@ -299,15 +299,6 @@ static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy)
per_cpu(freq_data, i) = per_cpu(freq_data, cpu);
}
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret < 0) {
- pr_warn("cpufreq: Failed to enable boost: %d\n", ret);
- return ret;
- }
- loongson3_cpufreq_driver.boost_enabled = true;
- }
-
return 0;
}
@@ -337,8 +328,8 @@ static struct cpufreq_driver loongson3_cpufreq_driver = {
.offline = loongson3_cpufreq_cpu_offline,
.get = loongson3_cpufreq_get,
.target_index = loongson3_cpufreq_target,
- .attr = cpufreq_generic_attr,
.verify = cpufreq_generic_frequency_table_verify,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 9252ebd60373..74f1b4c796e4 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -293,7 +293,6 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
.register_em = mtk_cpufreq_register_em,
.fast_switch = mtk_cpufreq_hw_fast_switch,
.name = "mtk-cpufreq-hw",
- .attr = cpufreq_generic_attr,
};
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
@@ -304,7 +303,7 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
struct regulator *cpu_reg;
/* Make sure that all CPU supplies are available before proceeding. */
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 663f61565cf7..f3f02c4b6888 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -618,7 +618,6 @@ static struct cpufreq_driver mtk_cpufreq_driver = {
.exit = mtk_cpufreq_exit,
.register_em = cpufreq_register_em_with_opp,
.name = "mtk-cpufreq",
- .attr = cpufreq_generic_attr,
};
static int mtk_cpufreq_probe(struct platform_device *pdev)
@@ -632,7 +631,7 @@ static int mtk_cpufreq_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, -ENODEV,
"failed to get mtk cpufreq platform data\n");
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
info = mtk_cpu_dvfs_info_lookup(cpu);
if (info)
continue;
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index 7f3cfe668f30..2aad4c04673c 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -56,7 +56,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
* it), and registers the clock notifier that will take care
* of doing the PMSU part of a frequency transition.
*/
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
int ret;
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 106220c0fd11..bbb01d93b54b 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -147,7 +147,6 @@ static struct cpufreq_driver omap_driver = {
.exit = omap_cpu_exit,
.register_em = cpufreq_register_em_with_opp,
.name = "omap",
- .attr = cpufreq_generic_attr,
};
static int omap_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index ef0a3216a386..69c19233fcd4 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -227,7 +227,6 @@ static struct cpufreq_driver p4clockmod_driver = {
.init = cpufreq_p4_cpu_init,
.get = cpufreq_p4_get,
.name = "p4-clockmod",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id cpufreq_p4_id[] = {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 5fc9cb480516..a3931349360f 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -245,7 +245,6 @@ static struct cpufreq_driver pas_cpufreq_driver = {
.exit = pas_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pas_cpufreq_target,
- .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 6c9f0888a2a7..a22c22bd693a 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -439,7 +439,6 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
- .attr = cpufreq_generic_attr,
.name = "powermac",
};
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 74ff6c47df29..80897ec8f00e 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -332,7 +332,6 @@ static struct cpufreq_driver g5_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = g5_cpufreq_target,
.get = g5_cpufreq_get_speed,
- .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index f0a4a6c31204..99d2244e03b0 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -253,7 +253,6 @@ static struct cpufreq_driver powernow_k6_driver = {
.exit = powernow_k6_cpu_exit,
.get = powernow_k6_get,
.name = "powernow-k6",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id powernow_k6_ids[] = {
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 4271446c8725..fb2197dc170f 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -667,7 +667,6 @@ static struct cpufreq_driver powernow_driver = {
.init = powernow_cpu_init,
.exit = powernow_cpu_exit,
.name = "powernow-k7",
- .attr = cpufreq_generic_attr,
};
static int __init powernow_init(void)
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index a01170f7d01c..4e3ba6e68c32 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1143,7 +1143,6 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.exit = powernowk8_cpu_exit,
.get = powernowk8_get,
.name = "powernow-k8",
- .attr = cpufreq_generic_attr,
};
static void __request_acpi_cpufreq(void)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index ae79d909943b..6094c530bf57 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -386,12 +386,8 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
static struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
__ATTR_RO(cpuinfo_nominal_freq);
-#define SCALING_BOOST_FREQS_ATTR_INDEX 2
-
static struct freq_attr *powernv_cpu_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&cpufreq_freq_attr_cpuinfo_nominal_freq,
- &cpufreq_freq_attr_scaling_boost_freqs,
NULL,
};
@@ -1128,9 +1124,7 @@ static int __init powernv_cpufreq_init(void)
goto out;
if (powernv_pstate_info.wof_enabled)
- powernv_cpufreq_driver.boost_enabled = true;
- else
- powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
+ powernv_cpufreq_driver.set_boost = cpufreq_boost_set_sw;
rc = cpufreq_register_driver(&powernv_cpufreq_driver);
if (rc) {
@@ -1138,9 +1132,6 @@ static int __init powernv_cpufreq_init(void)
goto cleanup;
}
- if (powernv_pstate_info.wof_enabled)
- cpufreq_enable_boost_support();
-
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
deleted file mode 100644
index 98595b3ea13f..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * cpufreq driver for the cell processor
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/cpufreq.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#include <asm/machdep.h>
-#include <asm/cell-regs.h>
-
-#include "ppc_cbe_cpufreq.h"
-
-/* the CBE supports an 8 step frequency scaling */
-static struct cpufreq_frequency_table cbe_freqs[] = {
- {0, 1, 0},
- {0, 2, 0},
- {0, 3, 0},
- {0, 4, 0},
- {0, 5, 0},
- {0, 6, 0},
- {0, 8, 0},
- {0, 10, 0},
- {0, 0, CPUFREQ_TABLE_END},
-};
-
-/*
- * hardware specific functions
- */
-
-static int set_pmode(unsigned int cpu, unsigned int slow_mode)
-{
- int rc;
-
- if (cbe_cpufreq_has_pmi)
- rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
- else
- rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
-
- pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
-
- return rc;
-}
-
-/*
- * cpufreq functions
- */
-
-static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- struct cpufreq_frequency_table *pos;
- const u32 *max_freqp;
- u32 max_freq;
- int cur_pmode;
- struct device_node *cpu;
-
- cpu = of_get_cpu_node(policy->cpu, NULL);
-
- if (!cpu)
- return -ENODEV;
-
- pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
- /*
- * Let's check we can actually get to the CELL regs
- */
- if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
- !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
- pr_info("invalid CBE regs pointers for cpufreq\n");
- of_node_put(cpu);
- return -EINVAL;
- }
-
- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
-
- of_node_put(cpu);
-
- if (!max_freqp)
- return -EINVAL;
-
- /* we need the freq in kHz */
- max_freq = *max_freqp / 1000;
-
- pr_debug("max clock-frequency is at %u kHz\n", max_freq);
- pr_debug("initializing frequency table\n");
-
- /* initialize frequency table */
- cpufreq_for_each_entry(pos, cbe_freqs) {
- pos->frequency = max_freq / pos->driver_data;
- pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency);
- }
-
- /* if DEBUG is enabled set_pmode() measures the latency
- * of a transition */
- policy->cpuinfo.transition_latency = 25000;
-
- cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
- pr_debug("current pmode is at %d\n",cur_pmode);
-
- policy->cur = cbe_freqs[cur_pmode].frequency;
-
-#ifdef CONFIG_SMP
- cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
-#endif
-
- policy->freq_table = cbe_freqs;
- cbe_cpufreq_pmi_policy_init(policy);
- return 0;
-}
-
-static void cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cbe_cpufreq_pmi_policy_exit(policy);
-}
-
-static int cbe_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int cbe_pmode_new)
-{
- pr_debug("setting frequency for cpu %d to %d kHz, " \
- "1/%d of max frequency\n",
- policy->cpu,
- cbe_freqs[cbe_pmode_new].frequency,
- cbe_freqs[cbe_pmode_new].driver_data);
-
- return set_pmode(policy->cpu, cbe_pmode_new);
-}
-
-static struct cpufreq_driver cbe_cpufreq_driver = {
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = cbe_cpufreq_target,
- .init = cbe_cpufreq_cpu_init,
- .exit = cbe_cpufreq_cpu_exit,
- .name = "cbe-cpufreq",
- .flags = CPUFREQ_CONST_LOOPS,
-};
-
-/*
- * module init and destoy
- */
-
-static int __init cbe_cpufreq_init(void)
-{
- int ret;
-
- if (!machine_is(cell))
- return -ENODEV;
-
- cbe_cpufreq_pmi_init();
-
- ret = cpufreq_register_driver(&cbe_cpufreq_driver);
- if (ret)
- cbe_cpufreq_pmi_exit();
-
- return ret;
-}
-
-static void __exit cbe_cpufreq_exit(void)
-{
- cpufreq_unregister_driver(&cbe_cpufreq_driver);
- cbe_cpufreq_pmi_exit();
-}
-
-module_init(cbe_cpufreq_init);
-module_exit(cbe_cpufreq_exit);
-
-MODULE_DESCRIPTION("cpufreq driver for Cell BE processors");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
deleted file mode 100644
index 00cd8633b0d9..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ppc_cbe_cpufreq.h
- *
- * This file contains the definitions used by the cbe_cpufreq driver.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- *
- */
-
-#include <linux/cpufreq.h>
-#include <linux/types.h>
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
-int cbe_cpufreq_get_pmode(int cpu);
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
-
-#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
-extern bool cbe_cpufreq_has_pmi;
-void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy);
-void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy);
-void cbe_cpufreq_pmi_init(void);
-void cbe_cpufreq_pmi_exit(void);
-#else
-#define cbe_cpufreq_has_pmi (0)
-static inline void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) {}
-static inline void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) {}
-static inline void cbe_cpufreq_pmi_init(void) {}
-static inline void cbe_cpufreq_pmi_exit(void) {}
-#endif
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
deleted file mode 100644
index 04830cd95333..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pervasive backend for the cbe_cpufreq driver
- *
- * This driver makes use of the pervasive unit to
- * engage the desired frequency.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <asm/machdep.h>
-#include <asm/hw_irq.h>
-#include <asm/cell-regs.h>
-
-#include "ppc_cbe_cpufreq.h"
-
-/* to write to MIC register */
-static u64 MIC_Slow_Fast_Timer_table[] = {
- [0 ... 7] = 0x007fc00000000000ull,
-};
-
-/* more values for the MIC */
-static u64 MIC_Slow_Next_Timer_table[] = {
- 0x0000240000000000ull,
- 0x0000268000000000ull,
- 0x000029C000000000ull,
- 0x00002D0000000000ull,
- 0x0000300000000000ull,
- 0x0000334000000000ull,
- 0x000039C000000000ull,
- 0x00003FC000000000ull,
-};
-
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
-{
- struct cbe_pmd_regs __iomem *pmd_regs;
- struct cbe_mic_tm_regs __iomem *mic_tm_regs;
- unsigned long flags;
- u64 value;
-#ifdef DEBUG
- long time;
-#endif
-
- local_irq_save(flags);
-
- mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
-
-#ifdef DEBUG
- time = jiffies;
-#endif
-
- out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
-
- out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
-
- value = in_be64(&pmd_regs->pmcr);
- /* set bits to zero */
- value &= 0xFFFFFFFFFFFFFFF8ull;
- /* set bits to next pmode */
- value |= pmode;
-
- out_be64(&pmd_regs->pmcr, value);
-
-#ifdef DEBUG
- /* wait until new pmode appears in status register */
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- while (value != pmode) {
- cpu_relax();
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- }
-
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "pervasive unit\n", time);
-#endif
- local_irq_restore(flags);
-
- return 0;
-}
-
-
-int cbe_cpufreq_get_pmode(int cpu)
-{
- int ret;
- struct cbe_pmd_regs __iomem *pmd_regs;
-
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
- ret = in_be64(&pmd_regs->pmsr) & 0x07;
-
- return ret;
-}
-
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
deleted file mode 100644
index 6f0c32592416..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pmi backend for the cbe_cpufreq driver
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include <linux/pm_qos.h>
-#include <linux/slab.h>
-
-#include <asm/processor.h>
-#include <asm/pmi.h>
-#include <asm/cell-regs.h>
-
-#ifdef DEBUG
-#include <asm/time.h>
-#endif
-
-#include "ppc_cbe_cpufreq.h"
-
-bool cbe_cpufreq_has_pmi = false;
-EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
-
-/*
- * hardware specific functions
- */
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
-{
- int ret;
- pmi_message_t pmi_msg;
-#ifdef DEBUG
- long time;
-#endif
- pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
- pmi_msg.data1 = cbe_cpu_to_node(cpu);
- pmi_msg.data2 = pmode;
-
-#ifdef DEBUG
- time = jiffies;
-#endif
- pmi_send_message(pmi_msg);
-
-#ifdef DEBUG
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "PMI\n", time);
-#endif
- ret = pmi_msg.data2;
- pr_debug("PMI returned slow mode %d\n", ret);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
-
-
-static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
-{
- struct cpufreq_policy *policy;
- struct freq_qos_request *req;
- u8 node, slow_mode;
- int cpu, ret;
-
- BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
-
- node = pmi_msg.data1;
- slow_mode = pmi_msg.data2;
-
- cpu = cbe_node_to_cpu(node);
-
- pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
-
- policy = cpufreq_cpu_get(cpu);
- if (!policy) {
- pr_warn("cpufreq policy not found cpu%d\n", cpu);
- return;
- }
-
- req = policy->driver_data;
-
- ret = freq_qos_update_request(req,
- policy->freq_table[slow_mode].frequency);
- if (ret < 0)
- pr_warn("Failed to update freq constraint: %d\n", ret);
- else
- pr_debug("limiting node %d to slow mode %d\n", node, slow_mode);
-
- cpufreq_cpu_put(policy);
-}
-
-static struct pmi_handler cbe_pmi_handler = {
- .type = PMI_TYPE_FREQ_CHANGE,
- .handle_pmi_message = cbe_cpufreq_handle_pmi,
-};
-
-void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
-{
- struct freq_qos_request *req;
- int ret;
-
- if (!cbe_cpufreq_has_pmi)
- return;
-
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return;
-
- ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
- policy->freq_table[0].frequency);
- if (ret < 0) {
- pr_err("Failed to add freq constraint (%d)\n", ret);
- kfree(req);
- return;
- }
-
- policy->driver_data = req;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
-
-void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
-{
- struct freq_qos_request *req = policy->driver_data;
-
- if (cbe_cpufreq_has_pmi) {
- freq_qos_remove_request(req);
- kfree(req);
- }
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_exit);
-
-void cbe_cpufreq_pmi_init(void)
-{
- if (!pmi_register_handler(&cbe_pmi_handler))
- cbe_cpufreq_has_pmi = true;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_init);
-
-void cbe_cpufreq_pmi_exit(void)
-{
- pmi_unregister_handler(&cbe_pmi_handler);
- cbe_cpufreq_has_pmi = false;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_exit);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index b2e7e89feaac..8422704a3b10 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -306,7 +306,7 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
struct of_phandle_args args;
int cpu, ret;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpu_np = of_cpu_device_node_get(cpu);
if (!cpu_np)
continue;
@@ -566,12 +566,6 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret)
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- }
-
return qcom_cpufreq_hw_lmh_init(policy, index);
}
@@ -595,12 +589,6 @@ static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
enable_irq(data->throttle_irq);
}
-static struct freq_attr *qcom_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- &cpufreq_freq_attr_scaling_boost_freqs,
- NULL
-};
-
static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -615,8 +603,8 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
- .attr = qcom_cpufreq_hw_attr,
.ready = qcom_cpufreq_ready,
+ .set_boost = cpufreq_boost_set_sw,
};
static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 3a8ed723a23e..54f8117103c8 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -489,7 +489,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
nvmem_cell_put(speedbin_nvmem);
}
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct dev_pm_opp_config config = {
.supported_hw = NULL,
};
@@ -543,7 +543,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
dev_err(cpu_dev, "Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
@@ -557,7 +557,7 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
@@ -568,7 +568,7 @@ static int qcom_cpufreq_suspend(struct device *dev)
struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev);
unsigned int cpu;
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
qcom_cpufreq_suspend_pd_devs(drv, cpu);
return 0;
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index a37ce051236c..8d1f5ac59132 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -254,7 +254,6 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = qoriq_cpufreq_target,
.get = cpufreq_generic_get,
- .attr = cpufreq_generic_attr,
};
static const struct of_device_id qoriq_cpufreq_blacklist[] = {
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 330c8d6cf93c..103d2519dff7 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -92,7 +92,6 @@ static struct cpufreq_driver sc520_freq_driver = {
.target_index = sc520_freq_target,
.init = sc520_freq_cpu_init,
.name = "sc520_freq",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id sc520_ids[] = {
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index b8fe758aeb01..c310aeebc8f3 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -104,7 +104,7 @@ scmi_get_sharing_cpus(struct device *cpu_dev, int domain,
int cpu, tdomain;
struct device *tcpu_dev;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
if (cpu == cpu_dev->id)
continue;
@@ -171,12 +171,6 @@ scmi_get_rate_limit(u32 domain, bool has_fast_switch)
return rate_limit;
}
-static struct freq_attr *scmi_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
- NULL,
-};
-
static int scmi_limit_notify_cb(struct notifier_block *nb, unsigned long event, void *data)
{
struct scmi_data *priv = container_of(nb, struct scmi_data, limit_notify_nb);
@@ -303,17 +297,6 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->transition_delay_us =
scmi_get_rate_limit(domain, policy->fast_switch_possible);
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret) {
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- goto out_free_table;
- } else {
- scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- scmi_cpufreq_driver.boost_enabled = true;
- }
- }
-
ret = freq_qos_add_request(&policy->constraints, &priv->limits_freq_req, FREQ_QOS_MAX,
FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0) {
@@ -395,13 +378,13 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = scmi_cpufreq_hw_attr,
.target_index = scmi_cpufreq_set_target,
.fast_switch = scmi_cpufreq_fast_switch,
.get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit,
.register_em = scmi_cpufreq_register_em,
+ .set_boost = cpufreq_boost_set_sw,
};
static int scmi_cpufreq_probe(struct scmi_device *sdev)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index cd89c1b9832c..17cda84f00df 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -39,8 +39,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
static int
scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- u64 rate = policy->freq_table[index].frequency * 1000;
+ unsigned long freq_khz = policy->freq_table[index].frequency;
struct scpi_data *priv = policy->driver_data;
+ unsigned long rate = freq_khz * 1000;
int ret;
ret = clk_set_rate(priv->clk, rate);
@@ -48,7 +49,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
if (ret)
return ret;
- if (clk_get_rate(priv->clk) != rate)
+ if (clk_get_rate(priv->clk) / 1000 != freq_khz)
return -EIO;
return 0;
@@ -64,7 +65,7 @@ scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
if (domain < 0)
return domain;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
if (cpu == cpu_dev->id)
continue;
@@ -183,7 +184,6 @@ static struct cpufreq_driver scpi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = cpufreq_generic_attr,
.get = scpi_cpufreq_get_rate,
.init = scpi_cpufreq_init,
.exit = scpi_cpufreq_exit,
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index aa74036d0420..9c0b01e00508 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -151,7 +151,6 @@ static struct cpufreq_driver sh_cpufreq_driver = {
.verify = sh_cpufreq_verify,
.init = sh_cpufreq_cpu_init,
.exit = sh_cpufreq_cpu_exit,
- .attr = cpufreq_generic_attr,
};
static int __init sh_cpufreq_module_init(void)
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index d8ab5b01d46d..707c71090cc3 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -165,7 +165,6 @@ static struct cpufreq_driver spear_cpufreq_driver = {
.target_index = spear_cpufreq_target,
.get = cpufreq_generic_get,
.init = spear_cpufreq_init,
- .attr = cpufreq_generic_attr,
};
static int spear_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 3fafedb983b5..3e6e85a92212 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -507,7 +507,6 @@ static struct cpufreq_driver centrino_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = centrino_target,
.get = get_cur_freq,
- .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index f2076d72bf39..262cfbde9ca7 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -315,7 +315,6 @@ static struct cpufreq_driver speedstep_driver = {
.target_index = speedstep_target,
.init = speedstep_cpu_init,
.get = speedstep_get,
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 0ce9d4b6dfcc..39265884c3f1 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -295,7 +295,6 @@ static struct cpufreq_driver speedstep_driver = {
.init = speedstep_cpu_init,
.get = speedstep_get,
.resume = speedstep_resume,
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 17d6a149f580..47d6840b3489 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -262,7 +262,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
snprintf(name, sizeof(name), "speed%d", speed);
config.prop_name = name;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
@@ -288,7 +288,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
pr_err("Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
@@ -302,7 +302,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index c7761eb99f3c..cbabb726c664 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
+ u32 cpu;
policy->freq_table = data->clusters[cluster].table;
policy->cpuinfo.transition_latency = 300 * 1000;
policy->driver_data = NULL;
+ /* set same policy for all cpus in a cluster */
+ for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
+ if (data->cpus[cpu].bpmp_cluster_id == cluster)
+ cpumask_set_cpu(cpu, policy->cpus);
+ }
+
return 0;
}
@@ -123,7 +130,6 @@ static struct cpufreq_driver tegra186_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra186_cpufreq_set_target,
.init = tegra186_cpufreq_init,
- .attr = cpufreq_generic_attr,
};
static struct cpufreq_frequency_table *init_vhint_table(
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 9055dd398e7f..9b4f516f313e 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -589,7 +589,6 @@ static struct cpufreq_driver tegra194_cpufreq_driver = {
.exit = tegra194_cpufreq_exit,
.online = tegra194_cpufreq_online,
.offline = tegra194_cpufreq_offline,
- .attr = cpufreq_generic_attr,
};
static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 0f86cdb7ec8a..65fea47b82e6 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -471,7 +471,6 @@ static struct cpufreq_driver ve_spc_cpufreq_driver = {
.init = ve_spc_cpufreq_init,
.exit = ve_spc_cpufreq_exit,
.register_em = cpufreq_register_em_with_opp,
- .attr = cpufreq_generic_attr,
};
#ifdef CONFIG_BL_SWITCHER
diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c
index a050b3a6737f..7dd1b0c263c7 100644
--- a/drivers/cpufreq/virtual-cpufreq.c
+++ b/drivers/cpufreq/virtual-cpufreq.c
@@ -138,7 +138,7 @@ static int virt_cpufreq_get_sharing_cpus(struct cpufreq_policy *policy)
cur_perf_domain = readl_relaxed(base + policy->cpu *
PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET);
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
continue;
@@ -265,7 +265,6 @@ static struct cpufreq_driver cpufreq_virt_driver = {
.verify = virt_cpufreq_verify_policy,
.target = virt_cpufreq_target,
.fast_switch = virt_cpufreq_fast_switch,
- .attr = cpufreq_generic_attr,
};
static int virt_cpufreq_driver_probe(struct platform_device *pdev)
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index d103342b7cfc..1de9e92c5b0f 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -3,6 +3,9 @@
# Makefile for cpuidle.
#
+# Branch profiling isn't noinstr-safe
+ccflags-$(CONFIG_TRACE_BRANCH_PROFILING) += -DDISABLE_BRANCH_PROFILING
+
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index caba6f4bb1b7..e044fefdb816 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -137,9 +137,9 @@ out_kfree_drv:
/*
* arm_idle_init - Initializes arm cpuidle driver
*
- * Initializes arm cpuidle driver for all CPUs, if any CPU fails
- * to register cpuidle driver then rollback to cancel all CPUs
- * registration.
+ * Initializes arm cpuidle driver for all present CPUs, if any
+ * CPU fails to register cpuidle driver then rollback to cancel
+ * all CPUs registration.
*/
static int __init arm_idle_init(void)
{
@@ -147,7 +147,7 @@ static int __init arm_idle_init(void)
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = arm_idle_init_cpu(cpu);
if (ret)
goto out_fail;
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index 74972deda0ea..4abba42fcc31 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -148,7 +148,7 @@ static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id)
if (!cpumask)
return -ENOMEM;
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
if (smp_cpuid_part(cpu) == part_id)
cpumask_set_cpu(cpu, cpumask);
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 2562dc001fc1..b46a83f5ffe4 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -25,6 +25,7 @@
#include <linux/syscore_ops.h>
#include <asm/cpuidle.h>
+#include <trace/events/power.h>
#include "cpuidle-psci.h"
#include "dt_idle_states.h"
@@ -74,7 +75,9 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
if (!state)
state = states[idx];
+ trace_psci_domain_idle_enter(dev->cpu, state, s2idle);
ret = psci_cpu_suspend_enter(state) ? -1 : idx;
+ trace_psci_domain_idle_exit(dev->cpu, state, s2idle);
if (s2idle)
dev_pm_genpd_resume(pd_dev);
@@ -400,7 +403,7 @@ deinit:
/*
* psci_idle_probe - Initializes PSCI cpuidle driver
*
- * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails
+ * Initializes PSCI cpuidle driver for all present CPUs, if any CPU fails
* to register cpuidle driver then rollback to cancel all CPUs
* registration.
*/
@@ -410,7 +413,7 @@ static int psci_cpuidle_probe(struct platform_device *pdev)
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = psci_idle_init_cpu(&pdev->dev, cpu);
if (ret)
goto out_fail;
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 3ab240e0e122..5f386761b156 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -135,7 +135,7 @@ static int spm_cpuidle_drv_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret, "set warm boot addr failed");
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = spm_cpuidle_register(&pdev->dev, cpu);
if (ret && ret != -ENODEV) {
dev_err(&pdev->dev,
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 0c92a628bbd4..0fe1ece9fbdc 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -529,8 +529,8 @@ static int sbi_cpuidle_probe(struct platform_device *pdev)
return ret;
}
- /* Initialize CPU idle driver for each CPU */
- for_each_possible_cpu(cpu) {
+ /* Initialize CPU idle driver for each present CPU */
+ for_each_present_cpu(cpu) {
ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
if (ret) {
pr_debug("HART%ld: idle driver init failed\n",
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 28363bfa3e4c..39aa0aea61c6 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -41,7 +41,7 @@
* the C state is required to actually break even on this cost. CPUIDLE
* provides us this duration in the "target_residency" field. So all that we
* need is a good prediction of how long we'll be idle. Like the traditional
- * menu governor, we start with the actual known "next timer event" time.
+ * menu governor, we take the actual known "next timer event" time.
*
* Since there are other source of wakeups (interrupts for example) than
* the next timer event, this estimation is rather optimistic. To get a
@@ -50,30 +50,21 @@
* duration always was 50% of the next timer tick, the correction factor will
* be 0.5.
*
- * menu uses a running average for this correction factor, however it uses a
- * set of factors, not just a single factor. This stems from the realization
- * that the ratio is dependent on the order of magnitude of the expected
- * duration; if we expect 500 milliseconds of idle time the likelihood of
- * getting an interrupt very early is much higher than if we expect 50 micro
- * seconds of idle time. A second independent factor that has big impact on
- * the actual factor is if there is (disk) IO outstanding or not.
- * (as a special twist, we consider every sleep longer than 50 milliseconds
- * as perfect; there are no power gains for sleeping longer than this)
- *
- * For these two reasons we keep an array of 12 independent factors, that gets
- * indexed based on the magnitude of the expected duration as well as the
- * "is IO outstanding" property.
+ * menu uses a running average for this correction factor, but it uses a set of
+ * factors, not just a single factor. This stems from the realization that the
+ * ratio is dependent on the order of magnitude of the expected duration; if we
+ * expect 500 milliseconds of idle time the likelihood of getting an interrupt
+ * very early is much higher than if we expect 50 micro seconds of idle time.
+ * For this reason, menu keeps an array of 6 independent factors, that gets
+ * indexed based on the magnitude of the expected duration.
*
* Repeatable-interval-detector
* ----------------------------
* There are some cases where "next timer" is a completely unusable predictor:
* Those cases where the interval is fixed, for example due to hardware
- * interrupt mitigation, but also due to fixed transfer rate devices such as
- * mice.
+ * interrupt mitigation, but also due to fixed transfer rate devices like mice.
* For this, we use a different predictor: We track the duration of the last 8
- * intervals and if the stand deviation of these 8 intervals is below a
- * threshold value, we use the average of these intervals as prediction.
- *
+ * intervals and use them to estimate the duration of the next one.
*/
struct menu_device {
@@ -116,53 +107,52 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
*/
static unsigned int get_typical_interval(struct menu_device *data)
{
- int i, divisor;
- unsigned int min, max, thresh, avg;
- uint64_t sum, variance;
-
- thresh = INT_MAX; /* Discard outliers above this value */
+ s64 value, min_thresh = -1, max_thresh = UINT_MAX;
+ unsigned int max, min, divisor;
+ u64 avg, variance, avg_sq;
+ int i;
again:
-
- /* First calculate the average of past intervals */
- min = UINT_MAX;
+ /* Compute the average and variance of past intervals. */
max = 0;
- sum = 0;
+ min = UINT_MAX;
+ avg = 0;
+ variance = 0;
divisor = 0;
for (i = 0; i < INTERVALS; i++) {
- unsigned int value = data->intervals[i];
- if (value <= thresh) {
- sum += value;
- divisor++;
- if (value > max)
- max = value;
-
- if (value < min)
- min = value;
- }
+ value = data->intervals[i];
+ /*
+ * Discard the samples outside the interval between the min and
+ * max thresholds.
+ */
+ if (value <= min_thresh || value >= max_thresh)
+ continue;
+
+ divisor++;
+
+ avg += value;
+ variance += value * value;
+
+ if (value > max)
+ max = value;
+
+ if (value < min)
+ min = value;
}
if (!max)
return UINT_MAX;
- if (divisor == INTERVALS)
- avg = sum >> INTERVAL_SHIFT;
- else
- avg = div_u64(sum, divisor);
-
- /* Then try to determine variance */
- variance = 0;
- for (i = 0; i < INTERVALS; i++) {
- unsigned int value = data->intervals[i];
- if (value <= thresh) {
- int64_t diff = (int64_t)value - avg;
- variance += diff * diff;
- }
- }
- if (divisor == INTERVALS)
+ if (divisor == INTERVALS) {
+ avg >>= INTERVAL_SHIFT;
variance >>= INTERVAL_SHIFT;
- else
+ } else {
+ do_div(avg, divisor);
do_div(variance, divisor);
+ }
+
+ avg_sq = avg * avg;
+ variance -= avg_sq;
/*
* The typical interval is obtained when standard deviation is
@@ -177,25 +167,40 @@ again:
* Use this result only if there is no timer to wake us up sooner.
*/
if (likely(variance <= U64_MAX/36)) {
- if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
- || variance <= 400) {
+ if ((avg_sq > variance * 36 && divisor * 4 >= INTERVALS * 3) ||
+ variance <= 400)
return avg;
- }
}
/*
- * If we have outliers to the upside in our distribution, discard
- * those by setting the threshold to exclude these outliers, then
+ * If there are outliers, discard them by setting thresholds to exclude
+ * data points at a large enough distance from the average, then
* calculate the average and standard deviation again. Once we get
- * down to the bottom 3/4 of our samples, stop excluding samples.
+ * down to the last 3/4 of our samples, stop excluding samples.
*
* This can deal with workloads that have long pauses interspersed
* with sporadic activity with a bunch of short pauses.
*/
- if ((divisor * 4) <= INTERVALS * 3)
+ if (divisor * 4 <= INTERVALS * 3) {
+ /*
+ * If there are sufficiently many data points still under
+ * consideration after the outliers have been eliminated,
+ * returning without a prediction would be a mistake because it
+ * is likely that the next interval will not exceed the current
+ * maximum, so return the latter in that case.
+ */
+ if (divisor >= INTERVALS / 2)
+ return max;
+
return UINT_MAX;
+ }
+
+ /* Update the thresholds for the next round. */
+ if (avg - min > max - avg)
+ min_thresh = min;
+ else
+ max_thresh = max;
- thresh = max - 1;
goto again;
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 19ab145f912e..47082782008a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -855,5 +855,6 @@ config CRYPTO_DEV_SA2UL
source "drivers/crypto/aspeed/Kconfig"
source "drivers/crypto/starfive/Kconfig"
+source "drivers/crypto/inside-secure/eip93/Kconfig"
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index fef18ffdb128..c97f0ebc55ec 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
#obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
-obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
+obj-y += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-y += xilinx/
obj-y += hisilicon/
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 1c1f57baef0e..500b08e42282 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2897,13 +2897,13 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
tasklet_init(&ac->task, artpec6_crypto_task,
(unsigned long)ac);
- ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
+ ac->pad_buffer = devm_kcalloc(&pdev->dev, 2, ARTPEC_CACHE_LINE_MAX,
GFP_KERNEL);
if (!ac->pad_buffer)
return -ENOMEM;
ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
- ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
+ ac->zero_buffer = devm_kcalloc(&pdev->dev, 2, ARTPEC_CACHE_LINE_MAX,
GFP_KERNEL);
if (!ac->zero_buffer)
return -ENOMEM;
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 9e6798efbfb7..6b80d033648e 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -15,6 +15,7 @@
#include <linux/kthread.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
+#include <linux/string_choices.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/bitops.h>
@@ -140,8 +141,8 @@ spu_skcipher_rx_sg_create(struct brcm_message *mssg,
struct iproc_ctx_s *ctx = rctx->ctx;
u32 datalen; /* Number of bytes of response data expected */
- mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
- rctx->gfp);
+ mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist),
+ rctx->gfp);
if (!mssg->spu.dst)
return -ENOMEM;
@@ -204,8 +205,8 @@ spu_skcipher_tx_sg_create(struct brcm_message *mssg,
u32 datalen; /* Number of bytes of response data expected */
u32 stat_len;
- mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
- rctx->gfp);
+ mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist),
+ rctx->gfp);
if (unlikely(!mssg->spu.src))
return -ENOMEM;
@@ -531,8 +532,8 @@ spu_ahash_rx_sg_create(struct brcm_message *mssg,
struct scatterlist *sg; /* used to build sgs in mbox message */
struct iproc_ctx_s *ctx = rctx->ctx;
- mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
- rctx->gfp);
+ mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist),
+ rctx->gfp);
if (!mssg->spu.dst)
return -ENOMEM;
@@ -586,8 +587,8 @@ spu_ahash_tx_sg_create(struct brcm_message *mssg,
u32 datalen; /* Number of bytes of response data expected */
u32 stat_len;
- mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
- rctx->gfp);
+ mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist),
+ rctx->gfp);
if (!mssg->spu.src)
return -ENOMEM;
@@ -1076,8 +1077,8 @@ static int spu_aead_rx_sg_create(struct brcm_message *mssg,
/* have to catch gcm pad in separate buffer */
rx_frag_num++;
- mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
- rctx->gfp);
+ mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist),
+ rctx->gfp);
if (!mssg->spu.dst)
return -ENOMEM;
@@ -1178,8 +1179,8 @@ static int spu_aead_tx_sg_create(struct brcm_message *mssg,
u32 assoc_offset = 0;
u32 stat_len;
- mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
- rctx->gfp);
+ mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist),
+ rctx->gfp);
if (!mssg->spu.src)
return -ENOMEM;
@@ -2687,7 +2688,7 @@ static int aead_enqueue(struct aead_request *req, bool is_encrypt)
flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len);
flow_dump(" iv: ", req->iv, rctx->iv_ctr_len);
flow_log(" authkeylen:%u\n", ctx->authkeylen);
- flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no");
+ flow_log(" is_esp: %s\n", str_yes_no(ctx->is_esp));
if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
flow_log(" max_payload infinite");
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 3fdc64b5a65e..ce322cf1baa5 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include "util.h"
#include "spu.h"
@@ -999,7 +1000,7 @@ u32 spu2_create_request(u8 *spu_hdr,
req_opts->is_inbound, req_opts->auth_first);
flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
cipher_parms->mode, cipher_parms->type);
- flow_log(" is_esp: %s\n", req_opts->is_esp ? "yes" : "no");
+ flow_log(" is_esp: %s\n", str_yes_no(req_opts->is_esp));
flow_log(" key: %d\n", cipher_parms->key_len);
flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len);
flow_log(" iv: %d\n", cipher_parms->iv_len);
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index e809d030ab11..107ccb2ade42 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/fsl/mc.h>
#include <linux/kernel.h>
+#include <linux/string_choices.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
#include <crypto/xts.h>
@@ -5175,7 +5176,7 @@ static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
return err;
}
- dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
+ dev_dbg(dev, "disable: %s\n", str_false_true(enabled));
for (i = 0; i < priv->num_pairs; i++) {
ppriv = per_cpu_ptr(priv->ppriv, i);
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
index 1046a746d36f..02e87f2d50db 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -195,48 +195,8 @@ static int zip_decompress(const u8 *src, unsigned int slen,
return ret;
}
-/* Legacy Compress framework start */
-int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
-{
- struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
-
- return zip_ctx_init(zip_ctx, 0);
-}
-
-int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
-{
- struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
-
- return zip_ctx_init(zip_ctx, 1);
-}
-
-void zip_free_comp_ctx(struct crypto_tfm *tfm)
-{
- struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
-
- zip_ctx_exit(zip_ctx);
-}
-
-int zip_comp_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
-
- return zip_compress(src, slen, dst, dlen, zip_ctx);
-}
-
-int zip_comp_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
-
- return zip_decompress(src, slen, dst, dlen, zip_ctx);
-} /* Legacy compress framework end */
-
/* SCOMP framework start */
-void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm)
+void *zip_alloc_scomp_ctx_deflate(void)
{
int ret;
struct zip_kernel_ctx *zip_ctx;
@@ -255,7 +215,7 @@ void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm)
return zip_ctx;
}
-void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm)
+void *zip_alloc_scomp_ctx_lzs(void)
{
int ret;
struct zip_kernel_ctx *zip_ctx;
@@ -274,7 +234,7 @@ void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm)
return zip_ctx;
}
-void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *ctx)
+void zip_free_scomp_ctx(void *ctx)
{
struct zip_kernel_ctx *zip_ctx = ctx;
diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h
index b59ddfcacd34..10899ece2d1f 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.h
+++ b/drivers/crypto/cavium/zip/zip_crypto.h
@@ -46,7 +46,6 @@
#ifndef __ZIP_CRYPTO_H__
#define __ZIP_CRYPTO_H__
-#include <linux/crypto.h>
#include <crypto/internal/scompress.h>
#include "common.h"
#include "zip_deflate.h"
@@ -57,19 +56,9 @@ struct zip_kernel_ctx {
struct zip_operation zip_decomp;
};
-int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm);
-int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm);
-void zip_free_comp_ctx(struct crypto_tfm *tfm);
-int zip_comp_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-int zip_comp_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-
-void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm);
-void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm);
-void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *zip_ctx);
+void *zip_alloc_scomp_ctx_deflate(void);
+void *zip_alloc_scomp_ctx_lzs(void);
+void zip_free_scomp_ctx(void *zip_ctx);
int zip_scomp_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx);
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
index dc5b7bf7e1fd..abd58de4343d 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -371,36 +371,6 @@ static struct pci_driver zip_driver = {
/* Kernel Crypto Subsystem Interface */
-static struct crypto_alg zip_comp_deflate = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-cavium",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct zip_kernel_ctx),
- .cra_priority = 300,
- .cra_module = THIS_MODULE,
- .cra_init = zip_alloc_comp_ctx_deflate,
- .cra_exit = zip_free_comp_ctx,
- .cra_u = { .compress = {
- .coa_compress = zip_comp_compress,
- .coa_decompress = zip_comp_decompress
- } }
-};
-
-static struct crypto_alg zip_comp_lzs = {
- .cra_name = "lzs",
- .cra_driver_name = "lzs-cavium",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct zip_kernel_ctx),
- .cra_priority = 300,
- .cra_module = THIS_MODULE,
- .cra_init = zip_alloc_comp_ctx_lzs,
- .cra_exit = zip_free_comp_ctx,
- .cra_u = { .compress = {
- .coa_compress = zip_comp_compress,
- .coa_decompress = zip_comp_decompress
- } }
-};
-
static struct scomp_alg zip_scomp_deflate = {
.alloc_ctx = zip_alloc_scomp_ctx_deflate,
.free_ctx = zip_free_scomp_ctx,
@@ -431,22 +401,10 @@ static int zip_register_compression_device(void)
{
int ret;
- ret = crypto_register_alg(&zip_comp_deflate);
- if (ret < 0) {
- zip_err("Deflate algorithm registration failed\n");
- return ret;
- }
-
- ret = crypto_register_alg(&zip_comp_lzs);
- if (ret < 0) {
- zip_err("LZS algorithm registration failed\n");
- goto err_unregister_alg_deflate;
- }
-
ret = crypto_register_scomp(&zip_scomp_deflate);
if (ret < 0) {
zip_err("Deflate scomp algorithm registration failed\n");
- goto err_unregister_alg_lzs;
+ return ret;
}
ret = crypto_register_scomp(&zip_scomp_lzs);
@@ -459,18 +417,12 @@ static int zip_register_compression_device(void)
err_unregister_scomp_deflate:
crypto_unregister_scomp(&zip_scomp_deflate);
-err_unregister_alg_lzs:
- crypto_unregister_alg(&zip_comp_lzs);
-err_unregister_alg_deflate:
- crypto_unregister_alg(&zip_comp_deflate);
return ret;
}
static void zip_unregister_compression_device(void)
{
- crypto_unregister_alg(&zip_comp_deflate);
- crypto_unregister_alg(&zip_comp_lzs);
crypto_unregister_scomp(&zip_scomp_deflate);
crypto_unregister_scomp(&zip_scomp_lzs);
}
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 248d98fd8c48..2ebc878da160 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -189,14 +189,17 @@ static bool sp_pci_is_master(struct sp_device *sp)
pdev_new = to_pci_dev(dev_new);
pdev_cur = to_pci_dev(dev_cur);
- if (pdev_new->bus->number < pdev_cur->bus->number)
- return true;
+ if (pci_domain_nr(pdev_new->bus) != pci_domain_nr(pdev_cur->bus))
+ return pci_domain_nr(pdev_new->bus) < pci_domain_nr(pdev_cur->bus);
- if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn))
- return true;
+ if (pdev_new->bus->number != pdev_cur->bus->number)
+ return pdev_new->bus->number < pdev_cur->bus->number;
- if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn))
- return true;
+ if (PCI_SLOT(pdev_new->devfn) != PCI_SLOT(pdev_cur->devfn))
+ return PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn);
+
+ if (PCI_FUNC(pdev_new->devfn) != PCI_FUNC(pdev_cur->devfn))
+ return PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn);
return false;
}
@@ -529,6 +532,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
+ { PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
/* Last entry must be zero */
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 2a2910261210..61b5e1c5d019 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -39,6 +39,8 @@ struct hpre_ctx;
#define HPRE_DFX_SEC_TO_US 1000000
#define HPRE_DFX_US_TO_NS 1000
+#define HPRE_ENABLE_HPCORE_SHIFT 7
+
/* due to nist p521 */
#define HPRE_ECC_MAX_KSZ 66
@@ -131,6 +133,8 @@ struct hpre_ctx {
};
/* for ecc algorithms */
unsigned int curve_id;
+ /* for high performance core */
+ u8 enable_hpcore;
};
struct hpre_asym_request {
@@ -1619,6 +1623,8 @@ static int hpre_ecdh_compute_value(struct kpp_request *req)
}
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
+ msg->resv1 = ctx->enable_hpcore << HPRE_ENABLE_HPCORE_SHIFT;
+
ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -1653,6 +1659,7 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
ctx->curve_id = ECC_CURVE_NIST_P256;
+ ctx->enable_hpcore = 1;
kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 4b9970230822..703920b49c7c 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -37,7 +37,6 @@ struct sec_aead_req {
u8 *a_ivin;
dma_addr_t a_ivin_dma;
struct aead_request *aead_req;
- bool fallback;
};
/* SEC request of Crypto */
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 66bc07da9eb6..8ea5305bc320 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -57,7 +57,6 @@
#define SEC_TYPE_MASK 0x0F
#define SEC_DONE_MASK 0x0001
#define SEC_ICV_MASK 0x000E
-#define SEC_SQE_LEN_RATE_MASK 0x3
#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth))
#define SEC_SGL_SGE_NR 128
@@ -80,16 +79,16 @@
#define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \
SEC_PBUF_LEFT_SZ(depth))
-#define SEC_SQE_LEN_RATE 4
#define SEC_SQE_CFLAG 2
#define SEC_SQE_AEAD_FLAG 3
#define SEC_SQE_DONE 0x1
#define SEC_ICV_ERR 0x2
-#define MIN_MAC_LEN 4
#define MAC_LEN_MASK 0x1U
#define MAX_INPUT_DATA_LEN 0xFFFE00
#define BITS_MASK 0xFF
+#define WORD_MASK 0x3
#define BYTE_BITS 0x8
+#define BYTES_TO_WORDS(bcount) ((bcount) >> 2)
#define SEC_XTS_NAME_SZ 0x3
#define IV_CM_CAL_NUM 2
#define IV_CL_MASK 0x7
@@ -691,14 +690,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
c_ctx->fallback = false;
- /* Currently, only XTS mode need fallback tfm when using 192bit key */
- if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
- return 0;
-
c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(c_ctx->fbtfm)) {
- pr_err("failed to alloc xts mode fallback tfm!\n");
+ pr_err("failed to alloc fallback tfm for %s!\n", alg);
return PTR_ERR(c_ctx->fbtfm);
}
@@ -858,7 +853,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (c_ctx->fallback && c_ctx->fbtfm) {
+ if (c_ctx->fbtfm) {
ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
if (ret) {
dev_err(dev, "failed to set fallback skcipher key!\n");
@@ -1090,11 +1085,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
struct crypto_shash *hash_tfm = ctx->hash_tfm;
int blocksize, digestsize, ret;
- if (!keys->authkeylen) {
- pr_err("hisi_sec2: aead auth key error!\n");
- return -EINVAL;
- }
-
blocksize = crypto_shash_blocksize(hash_tfm);
digestsize = crypto_shash_digestsize(hash_tfm);
if (keys->authkeylen > blocksize) {
@@ -1106,7 +1096,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
}
ctx->a_key_len = digestsize;
} else {
- memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
+ if (keys->authkeylen)
+ memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
ctx->a_key_len = keys->authkeylen;
}
@@ -1160,8 +1151,10 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
}
ret = crypto_authenc_extractkeys(&keys, key, keylen);
- if (ret)
+ if (ret) {
+ dev_err(dev, "sec extract aead keys err!\n");
goto bad_key;
+ }
ret = sec_aead_aes_set_key(c_ctx, &keys);
if (ret) {
@@ -1175,12 +1168,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
goto bad_key;
}
- if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
- ret = -EINVAL;
- dev_err(dev, "AUTH key length error!\n");
- goto bad_key;
- }
-
ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
if (ret) {
dev_err(dev, "set sec fallback key err!\n");
@@ -1583,11 +1570,10 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
- sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
+ sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize));
sec_sqe->type2.mac_key_alg |=
- cpu_to_le32((u32)((ctx->a_key_len) /
- SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
+ cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET);
sec_sqe->type2.mac_key_alg |=
cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
@@ -1639,12 +1625,10 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
sqe3->auth_mac_key |=
- cpu_to_le32((u32)(authsize /
- SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
+ cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3);
sqe3->auth_mac_key |=
- cpu_to_le32((u32)(ctx->a_key_len /
- SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
+ cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3);
sqe3->auth_mac_key |=
cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
@@ -2003,8 +1987,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
return sec_aead_ctx_init(tfm, "sha512");
}
-static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
- struct sec_req *sreq)
+static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
u32 cryptlen = sreq->c_req.sk_req->cryptlen;
struct device *dev = ctx->dev;
@@ -2026,10 +2009,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
}
break;
case SEC_CMODE_CTR:
- if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
- dev_err(dev, "skcipher HW version error!\n");
- ret = -EINVAL;
- }
break;
default:
ret = -EINVAL;
@@ -2038,17 +2017,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
return ret;
}
-static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+static int sec_skcipher_param_check(struct sec_ctx *ctx,
+ struct sec_req *sreq, bool *need_fallback)
{
struct skcipher_request *sk_req = sreq->c_req.sk_req;
struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
- if (unlikely(!sk_req->src || !sk_req->dst ||
- sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
+ if (unlikely(!sk_req->src || !sk_req->dst)) {
dev_err(dev, "skcipher input param error!\n");
return -EINVAL;
}
+
+ if (sk_req->cryptlen > MAX_INPUT_DATA_LEN)
+ *need_fallback = true;
+
sreq->c_req.c_len = sk_req->cryptlen;
if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
@@ -2106,6 +2089,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
struct sec_req *req = skcipher_request_ctx(sk_req);
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ bool need_fallback = false;
int ret;
if (!sk_req->cryptlen) {
@@ -2119,11 +2103,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
req->c_req.encrypt = encrypt;
req->ctx = ctx;
- ret = sec_skcipher_param_check(ctx, req);
+ ret = sec_skcipher_param_check(ctx, req, &need_fallback);
if (unlikely(ret))
return -EINVAL;
- if (unlikely(ctx->c_ctx.fallback))
+ if (unlikely(ctx->c_ctx.fallback || need_fallback))
return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
return ctx->req_op->process(ctx, req);
@@ -2231,52 +2215,35 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
size_t sz = crypto_aead_authsize(tfm);
u8 c_mode = ctx->c_ctx.c_mode;
- struct device *dev = ctx->dev;
int ret;
- /* Hardware does not handle cases where authsize is less than 4 bytes */
- if (unlikely(sz < MIN_MAC_LEN)) {
- sreq->aead_req.fallback = true;
+ if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len))
return -EINVAL;
- }
if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
- req->assoclen > SEC_MAX_AAD_LEN)) {
- dev_err(dev, "aead input spec error!\n");
+ req->assoclen > SEC_MAX_AAD_LEN))
return -EINVAL;
- }
if (c_mode == SEC_CMODE_CCM) {
- if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
- dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
+ if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN))
return -EINVAL;
- }
- ret = aead_iv_demension_check(req);
- if (ret) {
- dev_err(dev, "aead input iv param error!\n");
- return ret;
- }
- }
- if (sreq->c_req.encrypt)
- sreq->c_req.c_len = req->cryptlen;
- else
- sreq->c_req.c_len = req->cryptlen - sz;
- if (c_mode == SEC_CMODE_CBC) {
- if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
- dev_err(dev, "aead crypto length error!\n");
+ ret = aead_iv_demension_check(req);
+ if (unlikely(ret))
+ return -EINVAL;
+ } else if (c_mode == SEC_CMODE_CBC) {
+ if (unlikely(sz & WORD_MASK))
+ return -EINVAL;
+ if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK))
return -EINVAL;
- }
}
return 0;
}
-static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback)
{
struct aead_request *req = sreq->aead_req.aead_req;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- size_t authsize = crypto_aead_authsize(tfm);
struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
@@ -2285,12 +2252,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
return -EINVAL;
}
- if (ctx->sec->qm.ver == QM_HW_V2) {
- if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
- req->cryptlen <= authsize))) {
- sreq->aead_req.fallback = true;
- return -EINVAL;
- }
+ if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC &&
+ sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+ dev_err(dev, "aead cbc mode input data length error!\n");
+ return -EINVAL;
}
/* Support AES or SM4 */
@@ -2299,8 +2264,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
return -EINVAL;
}
- if (unlikely(sec_aead_spec_check(ctx, sreq)))
+ if (unlikely(sec_aead_spec_check(ctx, sreq))) {
+ *need_fallback = true;
return -EINVAL;
+ }
if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
SEC_PBUF_SZ)
@@ -2344,17 +2311,19 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
struct sec_req *req = aead_request_ctx(a_req);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ size_t sz = crypto_aead_authsize(tfm);
+ bool need_fallback = false;
int ret;
req->flag = a_req->base.flags;
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
- req->aead_req.fallback = false;
+ req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
- ret = sec_aead_param_check(ctx, req);
+ ret = sec_aead_param_check(ctx, req, &need_fallback);
if (unlikely(ret)) {
- if (req->aead_req.fallback)
+ if (need_fallback)
return sec_aead_soft_crypto(ctx, a_req, encrypt);
return -EINVAL;
}
diff --git a/drivers/crypto/inside-secure/Makefile b/drivers/crypto/inside-secure/Makefile
index 13f64f96c626..30d13fd5d58e 100644
--- a/drivers/crypto/inside-secure/Makefile
+++ b/drivers/crypto/inside-secure/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += crypto_safexcel.o
crypto_safexcel-objs := safexcel.o safexcel_ring.o safexcel_cipher.o safexcel_hash.o
+obj-y += eip93/
diff --git a/drivers/crypto/inside-secure/eip93/Kconfig b/drivers/crypto/inside-secure/eip93/Kconfig
new file mode 100644
index 000000000000..8353d3d7ec9b
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+config CRYPTO_DEV_EIP93
+ tristate "Support for EIP93 crypto HW accelerators"
+ depends on SOC_MT7621 || ARCH_AIROHA ||COMPILE_TEST
+ select CRYPTO_LIB_AES
+ select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ help
+ EIP93 have various crypto HW accelerators. Select this if
+ you want to use the EIP93 modules for any of the crypto algorithms.
+
+ If the IP supports it, this provide offload for AES - ECB, CBC and
+ CTR crypto. Also provide DES and 3DES ECB and CBC.
+
+ Also provide AEAD authenc(hmac(x), cipher(y)) for supported algo.
diff --git a/drivers/crypto/inside-secure/eip93/Makefile b/drivers/crypto/inside-secure/eip93/Makefile
new file mode 100644
index 000000000000..a3d3d3677cdc
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_CRYPTO_DEV_EIP93) += crypto-hw-eip93.o
+
+crypto-hw-eip93-y += eip93-main.o eip93-common.o
+crypto-hw-eip93-y += eip93-cipher.o eip93-aead.o
+crypto-hw-eip93-y += eip93-hash.o
diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.c b/drivers/crypto/inside-secure/eip93/eip93-aead.c
new file mode 100644
index 000000000000..18dd8a9a5165
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-aead.c
@@ -0,0 +1,711 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/ctr.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/aead.h>
+#include <crypto/md5.h>
+#include <crypto/null.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+
+#include <crypto/internal/des.h>
+
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+
+#include "eip93-aead.h"
+#include "eip93-cipher.h"
+#include "eip93-common.h"
+#include "eip93-regs.h"
+
+void eip93_aead_handle_result(struct crypto_async_request *async, int err)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
+ struct eip93_device *eip93 = ctx->eip93;
+ struct aead_request *req = aead_request_cast(async);
+ struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
+
+ eip93_unmap_dma(eip93, rctx, req->src, req->dst);
+ eip93_handle_result(eip93, rctx, req->iv);
+
+ aead_request_complete(req, err);
+}
+
+static int eip93_aead_send_req(struct crypto_async_request *async)
+{
+ struct aead_request *req = aead_request_cast(async);
+ struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
+ int err;
+
+ err = check_valid_request(rctx);
+ if (err) {
+ aead_request_complete(req, err);
+ return err;
+ }
+
+ return eip93_send_req(async, req->iv, rctx);
+}
+
+/* Crypto aead API functions */
+static int eip93_aead_cra_init(struct crypto_tfm *tfm)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
+ struct eip93_alg_template, alg.aead.base);
+
+ crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+ sizeof(struct eip93_cipher_reqctx));
+
+ ctx->eip93 = tmpl->eip93;
+ ctx->flags = tmpl->flags;
+ ctx->type = tmpl->type;
+ ctx->set_assoc = true;
+
+ ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL);
+ if (!ctx->sa_record)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void eip93_aead_cra_exit(struct crypto_tfm *tfm)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ dma_unmap_single(ctx->eip93->dev, ctx->sa_record_base,
+ sizeof(*ctx->sa_record), DMA_TO_DEVICE);
+ kfree(ctx->sa_record);
+}
+
+static int eip93_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_authenc_keys keys;
+ struct crypto_aes_ctx aes;
+ struct sa_record *sa_record = ctx->sa_record;
+ u32 nonce = 0;
+ int ret;
+
+ if (crypto_authenc_extractkeys(&keys, key, len))
+ return -EINVAL;
+
+ if (IS_RFC3686(ctx->flags)) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ return -EINVAL;
+
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ memcpy(&nonce, keys.enckey + keys.enckeylen,
+ CTR_RFC3686_NONCE_SIZE);
+ }
+
+ switch ((ctx->flags & EIP93_ALG_MASK)) {
+ case EIP93_ALG_DES:
+ ret = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
+ if (ret)
+ return ret;
+
+ break;
+ case EIP93_ALG_3DES:
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ ret = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
+ if (ret)
+ return ret;
+
+ break;
+ case EIP93_ALG_AES:
+ ret = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
+ ctx->blksize = crypto_aead_blocksize(ctfm);
+ /* Encryption key */
+ eip93_set_sa_record(sa_record, keys.enckeylen, ctx->flags);
+ sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE;
+ sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE,
+ EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH);
+ sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
+ sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
+ ctx->authsize / sizeof(u32));
+
+ memcpy(sa_record->sa_key, keys.enckey, keys.enckeylen);
+ ctx->sa_nonce = nonce;
+ sa_record->sa_nonce = nonce;
+
+ /* authentication key */
+ ret = eip93_hmac_setkey(ctx->flags, keys.authkey, keys.authkeylen,
+ ctx->authsize, sa_record->sa_i_digest,
+ sa_record->sa_o_digest, false);
+
+ ctx->set_assoc = true;
+
+ return ret;
+}
+
+static int eip93_aead_setauthsize(struct crypto_aead *ctfm,
+ unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->authsize = authsize;
+ ctx->sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
+ ctx->sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
+ ctx->authsize / sizeof(u32));
+
+ return 0;
+}
+
+static void eip93_aead_setassoc(struct eip93_crypto_ctx *ctx,
+ struct aead_request *req)
+{
+ struct sa_record *sa_record = ctx->sa_record;
+
+ sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HASH_CRYPT_OFFSET;
+ sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_HASH_CRYPT_OFFSET,
+ req->assoclen / sizeof(u32));
+
+ ctx->assoclen = req->assoclen;
+}
+
+static int eip93_aead_crypt(struct aead_request *req)
+{
+ struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
+ struct crypto_async_request *async = &req->base;
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ret;
+
+ ctx->sa_record_base = dma_map_single(ctx->eip93->dev, ctx->sa_record,
+ sizeof(*ctx->sa_record), DMA_TO_DEVICE);
+ ret = dma_mapping_error(ctx->eip93->dev, ctx->sa_record_base);
+ if (ret)
+ return ret;
+
+ rctx->textsize = req->cryptlen;
+ rctx->blksize = ctx->blksize;
+ rctx->assoclen = req->assoclen;
+ rctx->authsize = ctx->authsize;
+ rctx->sg_src = req->src;
+ rctx->sg_dst = req->dst;
+ rctx->ivsize = crypto_aead_ivsize(aead);
+ rctx->desc_flags = EIP93_DESC_AEAD;
+ rctx->sa_record_base = ctx->sa_record_base;
+
+ if (IS_DECRYPT(rctx->flags))
+ rctx->textsize -= rctx->authsize;
+
+ return eip93_aead_send_req(async);
+}
+
+static int eip93_aead_encrypt(struct aead_request *req)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
+
+ rctx->flags = ctx->flags;
+ rctx->flags |= EIP93_ENCRYPT;
+ if (ctx->set_assoc) {
+ eip93_aead_setassoc(ctx, req);
+ ctx->set_assoc = false;
+ }
+
+ if (req->assoclen != ctx->assoclen) {
+ dev_err(ctx->eip93->dev, "Request AAD length error\n");
+ return -EINVAL;
+ }
+
+ return eip93_aead_crypt(req);
+}
+
+static int eip93_aead_decrypt(struct aead_request *req)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct eip93_cipher_reqctx *rctx = aead_request_ctx(req);
+
+ ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN;
+ ctx->sa_record->sa_cmd1_word &= ~(EIP93_SA_CMD_COPY_PAD |
+ EIP93_SA_CMD_COPY_DIGEST);
+
+ rctx->flags = ctx->flags;
+ rctx->flags |= EIP93_DECRYPT;
+ if (ctx->set_assoc) {
+ eip93_aead_setassoc(ctx, req);
+ ctx->set_assoc = false;
+ }
+
+ if (req->assoclen != ctx->assoclen) {
+ dev_err(ctx->eip93->dev, "Request AAD length error\n");
+ return -EINVAL;
+ }
+
+ return eip93_aead_crypt(req);
+}
+
+/* Available authenc algorithms in this module */
+struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(md5-eip93), cbc(aes-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(sha1-eip93),cbc(aes-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(sha224-eip93),cbc(aes-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(sha256-eip93),cbc(aes-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 |
+ EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc(hmac(md5-eip93),rfc3686(ctr(aes-eip93)))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 |
+ EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc(hmac(sha1-eip93),rfc3686(ctr(aes-eip93)))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 |
+ EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc(hmac(sha224-eip93),rfc3686(ctr(aes-eip93)))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 |
+ EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc(hmac(sha256-eip93),rfc3686(ctr(aes-eip93)))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name =
+ "authenc(hmac(md5-eip93),cbc(des-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name =
+ "authenc(hmac(sha1-eip93),cbc(des-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name =
+ "authenc(hmac(sha224-eip93),cbc(des-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name =
+ "authenc(hmac(sha256-eip93),cbc(des-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_3DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name =
+ "authenc(hmac(md5-eip93),cbc(des3_ede-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0x0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_3DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .cra_driver_name =
+ "authenc(hmac(sha1-eip93),cbc(des3_ede-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0x0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_3DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
+ .cra_driver_name =
+ "authenc(hmac(sha224-eip93),cbc(des3_ede-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0x0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede = {
+ .type = EIP93_ALG_TYPE_AEAD,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_3DES,
+ .alg.aead = {
+ .setkey = eip93_aead_setkey,
+ .encrypt = eip93_aead_encrypt,
+ .decrypt = eip93_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setauthsize = eip93_aead_setauthsize,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .cra_driver_name =
+ "authenc(hmac(sha256-eip93),cbc(des3_ede-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0x0,
+ .cra_init = eip93_aead_cra_init,
+ .cra_exit = eip93_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.h b/drivers/crypto/inside-secure/eip93/eip93-aead.h
new file mode 100644
index 000000000000..e2fa8fd39c50
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-aead.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef _EIP93_AEAD_H_
+#define _EIP93_AEAD_H_
+
+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ctr_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ctr_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ctr_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ctr_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ecb_null;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ecb_null;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ecb_null;
+extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ecb_null;
+
+void eip93_aead_handle_result(struct crypto_async_request *async, int err);
+
+#endif /* _EIP93_AEAD_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-aes.h b/drivers/crypto/inside-secure/eip93/eip93-aes.h
new file mode 100644
index 000000000000..1d83d39cab2a
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-aes.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef _EIP93_AES_H_
+#define _EIP93_AES_H_
+
+extern struct eip93_alg_template eip93_alg_ecb_aes;
+extern struct eip93_alg_template eip93_alg_cbc_aes;
+extern struct eip93_alg_template eip93_alg_ctr_aes;
+extern struct eip93_alg_template eip93_alg_rfc3686_aes;
+
+#endif /* _EIP93_AES_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.c b/drivers/crypto/inside-secure/eip93/eip93-cipher.c
new file mode 100644
index 000000000000..1f2d6846610f
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/internal/des.h>
+#include <linux/dma-mapping.h>
+
+#include "eip93-aes.h"
+#include "eip93-cipher.h"
+#include "eip93-common.h"
+#include "eip93-des.h"
+#include "eip93-regs.h"
+
+void eip93_skcipher_handle_result(struct crypto_async_request *async, int err)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
+ struct eip93_device *eip93 = ctx->eip93;
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
+
+ eip93_unmap_dma(eip93, rctx, req->src, req->dst);
+ eip93_handle_result(eip93, rctx, req->iv);
+
+ skcipher_request_complete(req, err);
+}
+
+static int eip93_skcipher_send_req(struct crypto_async_request *async)
+{
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ int err;
+
+ err = check_valid_request(rctx);
+
+ if (err) {
+ skcipher_request_complete(req, err);
+ return err;
+ }
+
+ return eip93_send_req(async, req->iv, rctx);
+}
+
+/* Crypto skcipher API functions */
+static int eip93_skcipher_cra_init(struct crypto_tfm *tfm)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
+ struct eip93_alg_template, alg.skcipher.base);
+
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct eip93_cipher_reqctx));
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->eip93 = tmpl->eip93;
+ ctx->type = tmpl->type;
+
+ ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL);
+ if (!ctx->sa_record)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void eip93_skcipher_cra_exit(struct crypto_tfm *tfm)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ dma_unmap_single(ctx->eip93->dev, ctx->sa_record_base,
+ sizeof(*ctx->sa_record), DMA_TO_DEVICE);
+ kfree(ctx->sa_record);
+}
+
+static int eip93_skcipher_setkey(struct crypto_skcipher *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
+ struct eip93_alg_template,
+ alg.skcipher.base);
+ struct sa_record *sa_record = ctx->sa_record;
+ unsigned int keylen = len;
+ u32 flags = tmpl->flags;
+ u32 nonce = 0;
+ int ret;
+
+ if (!key || !keylen)
+ return -EINVAL;
+
+ if (IS_RFC3686(flags)) {
+ if (len < CTR_RFC3686_NONCE_SIZE)
+ return -EINVAL;
+
+ keylen = len - CTR_RFC3686_NONCE_SIZE;
+ memcpy(&nonce, key + keylen, CTR_RFC3686_NONCE_SIZE);
+ }
+
+ if (flags & EIP93_ALG_DES) {
+ ctx->blksize = DES_BLOCK_SIZE;
+ ret = verify_skcipher_des_key(ctfm, key);
+ if (ret)
+ return ret;
+ }
+ if (flags & EIP93_ALG_3DES) {
+ ctx->blksize = DES3_EDE_BLOCK_SIZE;
+ ret = verify_skcipher_des3_key(ctfm, key);
+ if (ret)
+ return ret;
+ }
+
+ if (flags & EIP93_ALG_AES) {
+ struct crypto_aes_ctx aes;
+
+ ctx->blksize = AES_BLOCK_SIZE;
+ ret = aes_expandkey(&aes, key, keylen);
+ if (ret)
+ return ret;
+ }
+
+ eip93_set_sa_record(sa_record, keylen, flags);
+
+ memcpy(sa_record->sa_key, key, keylen);
+ ctx->sa_nonce = nonce;
+ sa_record->sa_nonce = nonce;
+
+ return 0;
+}
+
+static int eip93_skcipher_crypt(struct skcipher_request *req)
+{
+ struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_async_request *async = &req->base;
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ret;
+
+ if (!req->cryptlen)
+ return 0;
+
+ /*
+ * ECB and CBC algorithms require message lengths to be
+ * multiples of block size.
+ */
+ if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
+ if (!IS_ALIGNED(req->cryptlen,
+ crypto_skcipher_blocksize(skcipher)))
+ return -EINVAL;
+
+ ctx->sa_record_base = dma_map_single(ctx->eip93->dev, ctx->sa_record,
+ sizeof(*ctx->sa_record), DMA_TO_DEVICE);
+ ret = dma_mapping_error(ctx->eip93->dev, ctx->sa_record_base);
+ if (ret)
+ return ret;
+
+ rctx->assoclen = 0;
+ rctx->textsize = req->cryptlen;
+ rctx->authsize = 0;
+ rctx->sg_src = req->src;
+ rctx->sg_dst = req->dst;
+ rctx->ivsize = crypto_skcipher_ivsize(skcipher);
+ rctx->blksize = ctx->blksize;
+ rctx->desc_flags = EIP93_DESC_SKCIPHER;
+ rctx->sa_record_base = ctx->sa_record_base;
+
+ return eip93_skcipher_send_req(async);
+}
+
+static int eip93_skcipher_encrypt(struct skcipher_request *req)
+{
+ struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg,
+ struct eip93_alg_template, alg.skcipher.base);
+
+ rctx->flags = tmpl->flags;
+ rctx->flags |= EIP93_ENCRYPT;
+
+ return eip93_skcipher_crypt(req);
+}
+
+static int eip93_skcipher_decrypt(struct skcipher_request *req)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg,
+ struct eip93_alg_template, alg.skcipher.base);
+
+ ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN;
+
+ rctx->flags = tmpl->flags;
+ rctx->flags |= EIP93_DECRYPT;
+
+ return eip93_skcipher_crypt(req);
+}
+
+/* Available algorithms in this module */
+struct eip93_alg_template eip93_alg_ecb_aes = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_ECB | EIP93_ALG_AES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb(aes-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0xf,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_cbc_aes = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_CBC | EIP93_ALG_AES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc(aes-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0xf,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_ctr_aes = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_CTR | EIP93_ALG_AES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr(aes-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0xf,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_rfc3686_aes = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686(ctr(aes-eip93))",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0xf,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_ecb_des = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_ECB | EIP93_ALG_DES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ebc(des-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_cbc_des = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_CBC | EIP93_ALG_DES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc(des-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_ecb_des3_ede = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_ECB | EIP93_ALG_3DES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb(des3_ede-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_cbc_des3_ede = {
+ .type = EIP93_ALG_TYPE_SKCIPHER,
+ .flags = EIP93_MODE_CBC | EIP93_ALG_3DES,
+ .alg.skcipher = {
+ .setkey = eip93_skcipher_setkey,
+ .encrypt = eip93_skcipher_encrypt,
+ .decrypt = eip93_skcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc(des3_ede-eip93)",
+ .cra_priority = EIP93_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_init = eip93_skcipher_cra_init,
+ .cra_exit = eip93_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.h b/drivers/crypto/inside-secure/eip93/eip93-cipher.h
new file mode 100644
index 000000000000..6e2545ebd879
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef _EIP93_CIPHER_H_
+#define _EIP93_CIPHER_H_
+
+#include "eip93-main.h"
+
+struct eip93_crypto_ctx {
+ struct eip93_device *eip93;
+ u32 flags;
+ struct sa_record *sa_record;
+ u32 sa_nonce;
+ int blksize;
+ dma_addr_t sa_record_base;
+ /* AEAD specific */
+ unsigned int authsize;
+ unsigned int assoclen;
+ bool set_assoc;
+ enum eip93_alg_type type;
+};
+
+struct eip93_cipher_reqctx {
+ u16 desc_flags;
+ u16 flags;
+ unsigned int blksize;
+ unsigned int ivsize;
+ unsigned int textsize;
+ unsigned int assoclen;
+ unsigned int authsize;
+ dma_addr_t sa_record_base;
+ struct sa_state *sa_state;
+ dma_addr_t sa_state_base;
+ struct eip93_descriptor *cdesc;
+ struct scatterlist *sg_src;
+ struct scatterlist *sg_dst;
+ int src_nents;
+ int dst_nents;
+ struct sa_state *sa_state_ctr;
+ dma_addr_t sa_state_ctr_base;
+};
+
+int check_valid_request(struct eip93_cipher_reqctx *rctx);
+
+void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,
+ struct scatterlist *reqsrc, struct scatterlist *reqdst);
+
+void eip93_skcipher_handle_result(struct crypto_async_request *async, int err);
+
+int eip93_send_req(struct crypto_async_request *async,
+ const u8 *reqiv, struct eip93_cipher_reqctx *rctx);
+
+void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,
+ u8 *reqiv);
+
+#endif /* _EIP93_CIPHER_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.c b/drivers/crypto/inside-secure/eip93/eip93-common.c
new file mode 100644
index 000000000000..66153aa2493f
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-common.c
@@ -0,0 +1,822 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/hmac.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
+#include "eip93-cipher.h"
+#include "eip93-hash.h"
+#include "eip93-common.h"
+#include "eip93-main.h"
+#include "eip93-regs.h"
+
+int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err)
+{
+ u32 ext_err;
+
+ if (!err)
+ return 0;
+
+ switch (err & ~EIP93_PE_CTRL_PE_EXT_ERR_CODE) {
+ case EIP93_PE_CTRL_PE_AUTH_ERR:
+ case EIP93_PE_CTRL_PE_PAD_ERR:
+ return -EBADMSG;
+ /* let software handle anti-replay errors */
+ case EIP93_PE_CTRL_PE_SEQNUM_ERR:
+ return 0;
+ case EIP93_PE_CTRL_PE_EXT_ERR:
+ break;
+ default:
+ dev_err(eip93->dev, "Unhandled error 0x%08x\n", err);
+ return -EINVAL;
+ }
+
+ /* Parse additional ext errors */
+ ext_err = FIELD_GET(EIP93_PE_CTRL_PE_EXT_ERR_CODE, err);
+ switch (ext_err) {
+ case EIP93_PE_CTRL_PE_EXT_ERR_BUS:
+ case EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING:
+ return -EIO;
+ case EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER:
+ return -EACCES;
+ case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP:
+ case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO:
+ case EIP93_PE_CTRL_PE_EXT_ERR_SPI:
+ return -EINVAL;
+ case EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH:
+ case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH:
+ case EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR:
+ return -EBADMSG;
+ default:
+ dev_err(eip93->dev, "Unhandled ext error 0x%08x\n", ext_err);
+ return -EINVAL;
+ }
+}
+
+static void *eip93_ring_next_wptr(struct eip93_device *eip93,
+ struct eip93_desc_ring *ring)
+{
+ void *ptr = ring->write;
+
+ if ((ring->write == ring->read - ring->offset) ||
+ (ring->read == ring->base && ring->write == ring->base_end))
+ return ERR_PTR(-ENOMEM);
+
+ if (ring->write == ring->base_end)
+ ring->write = ring->base;
+ else
+ ring->write += ring->offset;
+
+ return ptr;
+}
+
+static void *eip93_ring_next_rptr(struct eip93_device *eip93,
+ struct eip93_desc_ring *ring)
+{
+ void *ptr = ring->read;
+
+ if (ring->write == ring->read)
+ return ERR_PTR(-ENOENT);
+
+ if (ring->read == ring->base_end)
+ ring->read = ring->base;
+ else
+ ring->read += ring->offset;
+
+ return ptr;
+}
+
+int eip93_put_descriptor(struct eip93_device *eip93,
+ struct eip93_descriptor *desc)
+{
+ struct eip93_descriptor *cdesc;
+ struct eip93_descriptor *rdesc;
+
+ rdesc = eip93_ring_next_wptr(eip93, &eip93->ring->rdr);
+ if (IS_ERR(rdesc))
+ return -ENOENT;
+
+ cdesc = eip93_ring_next_wptr(eip93, &eip93->ring->cdr);
+ if (IS_ERR(cdesc))
+ return -ENOENT;
+
+ memset(rdesc, 0, sizeof(struct eip93_descriptor));
+
+ memcpy(cdesc, desc, sizeof(struct eip93_descriptor));
+
+ return 0;
+}
+
+void *eip93_get_descriptor(struct eip93_device *eip93)
+{
+ struct eip93_descriptor *cdesc;
+ void *ptr;
+
+ cdesc = eip93_ring_next_rptr(eip93, &eip93->ring->cdr);
+ if (IS_ERR(cdesc))
+ return ERR_PTR(-ENOENT);
+
+ memset(cdesc, 0, sizeof(struct eip93_descriptor));
+
+ ptr = eip93_ring_next_rptr(eip93, &eip93->ring->rdr);
+ if (IS_ERR(ptr))
+ return ERR_PTR(-ENOENT);
+
+ return ptr;
+}
+
+static void eip93_free_sg_copy(const int len, struct scatterlist **sg)
+{
+ if (!*sg || !len)
+ return;
+
+ free_pages((unsigned long)sg_virt(*sg), get_order(len));
+ kfree(*sg);
+ *sg = NULL;
+}
+
+static int eip93_make_sg_copy(struct scatterlist *src, struct scatterlist **dst,
+ const u32 len, const bool copy)
+{
+ void *pages;
+
+ *dst = kmalloc(sizeof(**dst), GFP_KERNEL);
+ if (!*dst)
+ return -ENOMEM;
+
+ pages = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA,
+ get_order(len));
+ if (!pages) {
+ kfree(*dst);
+ *dst = NULL;
+ return -ENOMEM;
+ }
+
+ sg_init_table(*dst, 1);
+ sg_set_buf(*dst, pages, len);
+
+ /* copy only as requested */
+ if (copy)
+ sg_copy_to_buffer(src, sg_nents(src), pages, len);
+
+ return 0;
+}
+
+static bool eip93_is_sg_aligned(struct scatterlist *sg, u32 len,
+ const int blksize)
+{
+ int nents;
+
+ for (nents = 0; sg; sg = sg_next(sg), ++nents) {
+ if (!IS_ALIGNED(sg->offset, 4))
+ return false;
+
+ if (len <= sg->length) {
+ if (!IS_ALIGNED(len, blksize))
+ return false;
+
+ return true;
+ }
+
+ if (!IS_ALIGNED(sg->length, blksize))
+ return false;
+
+ len -= sg->length;
+ }
+ return false;
+}
+
+int check_valid_request(struct eip93_cipher_reqctx *rctx)
+{
+ struct scatterlist *src = rctx->sg_src;
+ struct scatterlist *dst = rctx->sg_dst;
+ u32 textsize = rctx->textsize;
+ u32 authsize = rctx->authsize;
+ u32 blksize = rctx->blksize;
+ u32 totlen_src = rctx->assoclen + rctx->textsize;
+ u32 totlen_dst = rctx->assoclen + rctx->textsize;
+ u32 copy_len;
+ bool src_align, dst_align;
+ int src_nents, dst_nents;
+ int err = -EINVAL;
+
+ if (!IS_CTR(rctx->flags)) {
+ if (!IS_ALIGNED(textsize, blksize))
+ return err;
+ }
+
+ if (authsize) {
+ if (IS_ENCRYPT(rctx->flags))
+ totlen_dst += authsize;
+ else
+ totlen_src += authsize;
+ }
+
+ src_nents = sg_nents_for_len(src, totlen_src);
+ if (src_nents < 0)
+ return src_nents;
+
+ dst_nents = sg_nents_for_len(dst, totlen_dst);
+ if (dst_nents < 0)
+ return dst_nents;
+
+ if (src == dst) {
+ src_nents = max(src_nents, dst_nents);
+ dst_nents = src_nents;
+ if (unlikely((totlen_src || totlen_dst) && !src_nents))
+ return err;
+
+ } else {
+ if (unlikely(totlen_src && !src_nents))
+ return err;
+
+ if (unlikely(totlen_dst && !dst_nents))
+ return err;
+ }
+
+ if (authsize) {
+ if (dst_nents == 1 && src_nents == 1) {
+ src_align = eip93_is_sg_aligned(src, totlen_src, blksize);
+ if (src == dst)
+ dst_align = src_align;
+ else
+ dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);
+ } else {
+ src_align = false;
+ dst_align = false;
+ }
+ } else {
+ src_align = eip93_is_sg_aligned(src, totlen_src, blksize);
+ if (src == dst)
+ dst_align = src_align;
+ else
+ dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);
+ }
+
+ copy_len = max(totlen_src, totlen_dst);
+ if (!src_align) {
+ err = eip93_make_sg_copy(src, &rctx->sg_src, copy_len, true);
+ if (err)
+ return err;
+ }
+
+ if (!dst_align) {
+ err = eip93_make_sg_copy(dst, &rctx->sg_dst, copy_len, false);
+ if (err)
+ return err;
+ }
+
+ src_nents = sg_nents_for_len(rctx->sg_src, totlen_src);
+ if (src_nents < 0)
+ return src_nents;
+
+ dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst);
+ if (dst_nents < 0)
+ return dst_nents;
+
+ rctx->src_nents = src_nents;
+ rctx->dst_nents = dst_nents;
+
+ return 0;
+}
+
+/*
+ * Set sa_record function:
+ * Even sa_record is set to "0", keep " = 0" for readability.
+ */
+void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen,
+ const u32 flags)
+{
+ /* Reset cmd word */
+ sa_record->sa_cmd0_word = 0;
+ sa_record->sa_cmd1_word = 0;
+
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_IV_FROM_STATE;
+ if (!IS_ECB(flags))
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_IV;
+
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_OP_BASIC;
+
+ switch ((flags & EIP93_ALG_MASK)) {
+ case EIP93_ALG_AES:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_AES;
+ sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH,
+ keylen >> 3);
+ break;
+ case EIP93_ALG_3DES:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_3DES;
+ break;
+ case EIP93_ALG_DES:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_DES;
+ break;
+ default:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_NULL;
+ }
+
+ switch ((flags & EIP93_HASH_MASK)) {
+ case EIP93_HASH_SHA256:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA256;
+ break;
+ case EIP93_HASH_SHA224:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA224;
+ break;
+ case EIP93_HASH_SHA1:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA1;
+ break;
+ case EIP93_HASH_MD5:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_MD5;
+ break;
+ default:
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_NULL;
+ }
+
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_PAD_ZERO;
+
+ switch ((flags & EIP93_MODE_MASK)) {
+ case EIP93_MODE_CBC:
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CBC;
+ break;
+ case EIP93_MODE_CTR:
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CTR;
+ break;
+ case EIP93_MODE_ECB:
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_ECB;
+ break;
+ }
+
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIGEST_3WORD;
+ if (IS_HASH(flags)) {
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_PAD;
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_DIGEST;
+ }
+
+ if (IS_HMAC(flags)) {
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_HMAC;
+ sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_HEADER;
+ }
+
+ sa_record->sa_spi = 0x0;
+ sa_record->sa_seqmum_mask[0] = 0xFFFFFFFF;
+ sa_record->sa_seqmum_mask[1] = 0x0;
+}
+
+/*
+ * Poor mans Scatter/gather function:
+ * Create a Descriptor for every segment to avoid copying buffers.
+ * For performance better to wait for hardware to perform multiple DMA
+ */
+static int eip93_scatter_combine(struct eip93_device *eip93,
+ struct eip93_cipher_reqctx *rctx,
+ u32 datalen, u32 split, int offsetin)
+{
+ struct eip93_descriptor *cdesc = rctx->cdesc;
+ struct scatterlist *sgsrc = rctx->sg_src;
+ struct scatterlist *sgdst = rctx->sg_dst;
+ unsigned int remainin = sg_dma_len(sgsrc);
+ unsigned int remainout = sg_dma_len(sgdst);
+ dma_addr_t saddr = sg_dma_address(sgsrc);
+ dma_addr_t daddr = sg_dma_address(sgdst);
+ dma_addr_t state_addr;
+ u32 src_addr, dst_addr, len, n;
+ bool nextin = false;
+ bool nextout = false;
+ int offsetout = 0;
+ int err;
+
+ if (IS_ECB(rctx->flags))
+ rctx->sa_state_base = 0;
+
+ if (split < datalen) {
+ state_addr = rctx->sa_state_ctr_base;
+ n = split;
+ } else {
+ state_addr = rctx->sa_state_base;
+ n = datalen;
+ }
+
+ do {
+ if (nextin) {
+ sgsrc = sg_next(sgsrc);
+ remainin = sg_dma_len(sgsrc);
+ if (remainin == 0)
+ continue;
+
+ saddr = sg_dma_address(sgsrc);
+ offsetin = 0;
+ nextin = false;
+ }
+
+ if (nextout) {
+ sgdst = sg_next(sgdst);
+ remainout = sg_dma_len(sgdst);
+ if (remainout == 0)
+ continue;
+
+ daddr = sg_dma_address(sgdst);
+ offsetout = 0;
+ nextout = false;
+ }
+ src_addr = saddr + offsetin;
+ dst_addr = daddr + offsetout;
+
+ if (remainin == remainout) {
+ len = remainin;
+ if (len > n) {
+ len = n;
+ remainin -= n;
+ remainout -= n;
+ offsetin += n;
+ offsetout += n;
+ } else {
+ nextin = true;
+ nextout = true;
+ }
+ } else if (remainin < remainout) {
+ len = remainin;
+ if (len > n) {
+ len = n;
+ remainin -= n;
+ remainout -= n;
+ offsetin += n;
+ offsetout += n;
+ } else {
+ offsetout += len;
+ remainout -= len;
+ nextin = true;
+ }
+ } else {
+ len = remainout;
+ if (len > n) {
+ len = n;
+ remainin -= n;
+ remainout -= n;
+ offsetin += n;
+ offsetout += n;
+ } else {
+ offsetin += len;
+ remainin -= len;
+ nextout = true;
+ }
+ }
+ n -= len;
+
+ cdesc->src_addr = src_addr;
+ cdesc->dst_addr = dst_addr;
+ cdesc->state_addr = state_addr;
+ cdesc->pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,
+ EIP93_PE_LENGTH_HOST_READY);
+ cdesc->pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, len);
+
+ if (n == 0) {
+ n = datalen - split;
+ split = datalen;
+ state_addr = rctx->sa_state_base;
+ }
+
+ if (n == 0)
+ cdesc->user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS,
+ EIP93_DESC_LAST);
+
+ /*
+ * Loop - Delay - No need to rollback
+ * Maybe refine by slowing down at EIP93_RING_BUSY
+ */
+again:
+ scoped_guard(spinlock_irqsave, &eip93->ring->write_lock)
+ err = eip93_put_descriptor(eip93, cdesc);
+ if (err) {
+ usleep_range(EIP93_RING_BUSY_DELAY,
+ EIP93_RING_BUSY_DELAY * 2);
+ goto again;
+ }
+ /* Writing new descriptor count starts DMA action */
+ writel(1, eip93->base + EIP93_REG_PE_CD_COUNT);
+ } while (n);
+
+ return -EINPROGRESS;
+}
+
+int eip93_send_req(struct crypto_async_request *async,
+ const u8 *reqiv, struct eip93_cipher_reqctx *rctx)
+{
+ struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
+ struct eip93_device *eip93 = ctx->eip93;
+ struct scatterlist *src = rctx->sg_src;
+ struct scatterlist *dst = rctx->sg_dst;
+ struct sa_state *sa_state;
+ struct eip93_descriptor cdesc;
+ u32 flags = rctx->flags;
+ int offsetin = 0, err;
+ u32 datalen = rctx->assoclen + rctx->textsize;
+ u32 split = datalen;
+ u32 start, end, ctr, blocks;
+ u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
+ int crypto_async_idr;
+
+ rctx->sa_state_ctr = NULL;
+ rctx->sa_state = NULL;
+
+ if (IS_ECB(flags))
+ goto skip_iv;
+
+ memcpy(iv, reqiv, rctx->ivsize);
+
+ rctx->sa_state = kzalloc(sizeof(*rctx->sa_state), GFP_KERNEL);
+ if (!rctx->sa_state)
+ return -ENOMEM;
+
+ sa_state = rctx->sa_state;
+
+ memcpy(sa_state->state_iv, iv, rctx->ivsize);
+ if (IS_RFC3686(flags)) {
+ sa_state->state_iv[0] = ctx->sa_nonce;
+ sa_state->state_iv[1] = iv[0];
+ sa_state->state_iv[2] = iv[1];
+ sa_state->state_iv[3] = (u32 __force)cpu_to_be32(0x1);
+ } else if (!IS_HMAC(flags) && IS_CTR(flags)) {
+ /* Compute data length. */
+ blocks = DIV_ROUND_UP(rctx->textsize, AES_BLOCK_SIZE);
+ ctr = be32_to_cpu((__be32 __force)iv[3]);
+ /* Check 32bit counter overflow. */
+ start = ctr;
+ end = start + blocks - 1;
+ if (end < start) {
+ split = AES_BLOCK_SIZE * -start;
+ /*
+ * Increment the counter manually to cope with
+ * the hardware counter overflow.
+ */
+ iv[3] = 0xffffffff;
+ crypto_inc((u8 *)iv, AES_BLOCK_SIZE);
+
+ rctx->sa_state_ctr = kzalloc(sizeof(*rctx->sa_state_ctr),
+ GFP_KERNEL);
+ if (!rctx->sa_state_ctr) {
+ err = -ENOMEM;
+ goto free_sa_state;
+ }
+
+ memcpy(rctx->sa_state_ctr->state_iv, reqiv, rctx->ivsize);
+ memcpy(sa_state->state_iv, iv, rctx->ivsize);
+
+ rctx->sa_state_ctr_base = dma_map_single(eip93->dev, rctx->sa_state_ctr,
+ sizeof(*rctx->sa_state_ctr),
+ DMA_TO_DEVICE);
+ err = dma_mapping_error(eip93->dev, rctx->sa_state_ctr_base);
+ if (err)
+ goto free_sa_state_ctr;
+ }
+ }
+
+ rctx->sa_state_base = dma_map_single(eip93->dev, rctx->sa_state,
+ sizeof(*rctx->sa_state), DMA_TO_DEVICE);
+ err = dma_mapping_error(eip93->dev, rctx->sa_state_base);
+ if (err)
+ goto free_sa_state_ctr_dma;
+
+skip_iv:
+
+ cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,
+ EIP93_PE_CTRL_HOST_READY);
+ cdesc.sa_addr = rctx->sa_record_base;
+ cdesc.arc4_addr = 0;
+
+ scoped_guard(spinlock_bh, &eip93->ring->idr_lock)
+ crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0,
+ EIP93_RING_NUM - 1, GFP_ATOMIC);
+
+ cdesc.user_id = FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |
+ FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, rctx->desc_flags);
+
+ rctx->cdesc = &cdesc;
+
+ /* map DMA_BIDIRECTIONAL to invalidate cache on destination
+ * implies __dma_cache_wback_inv
+ */
+ if (!dma_map_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL)) {
+ err = -ENOMEM;
+ goto free_sa_state_ctr_dma;
+ }
+
+ if (src != dst &&
+ !dma_map_sg(eip93->dev, src, rctx->src_nents, DMA_TO_DEVICE)) {
+ err = -ENOMEM;
+ goto free_sg_dma;
+ }
+
+ return eip93_scatter_combine(eip93, rctx, datalen, split, offsetin);
+
+free_sg_dma:
+ dma_unmap_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL);
+free_sa_state_ctr_dma:
+ if (rctx->sa_state_ctr)
+ dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base,
+ sizeof(*rctx->sa_state_ctr),
+ DMA_TO_DEVICE);
+free_sa_state_ctr:
+ kfree(rctx->sa_state_ctr);
+ if (rctx->sa_state)
+ dma_unmap_single(eip93->dev, rctx->sa_state_base,
+ sizeof(*rctx->sa_state),
+ DMA_TO_DEVICE);
+free_sa_state:
+ kfree(rctx->sa_state);
+
+ return err;
+}
+
+void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,
+ struct scatterlist *reqsrc, struct scatterlist *reqdst)
+{
+ u32 len = rctx->assoclen + rctx->textsize;
+ u32 authsize = rctx->authsize;
+ u32 flags = rctx->flags;
+ u32 *otag;
+ int i;
+
+ if (rctx->sg_src == rctx->sg_dst) {
+ dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents,
+ DMA_BIDIRECTIONAL);
+ goto process_tag;
+ }
+
+ dma_unmap_sg(eip93->dev, rctx->sg_src, rctx->src_nents,
+ DMA_TO_DEVICE);
+
+ if (rctx->sg_src != reqsrc)
+ eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_src);
+
+ dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents,
+ DMA_BIDIRECTIONAL);
+
+ /* SHA tags need conversion from net-to-host */
+process_tag:
+ if (IS_DECRYPT(flags))
+ authsize = 0;
+
+ if (authsize) {
+ if (!IS_HASH_MD5(flags)) {
+ otag = sg_virt(rctx->sg_dst) + len;
+ for (i = 0; i < (authsize / 4); i++)
+ otag[i] = be32_to_cpu((__be32 __force)otag[i]);
+ }
+ }
+
+ if (rctx->sg_dst != reqdst) {
+ sg_copy_from_buffer(reqdst, sg_nents(reqdst),
+ sg_virt(rctx->sg_dst), len + authsize);
+ eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_dst);
+ }
+}
+
+void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,
+ u8 *reqiv)
+{
+ if (rctx->sa_state_ctr)
+ dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base,
+ sizeof(*rctx->sa_state_ctr),
+ DMA_FROM_DEVICE);
+
+ if (rctx->sa_state)
+ dma_unmap_single(eip93->dev, rctx->sa_state_base,
+ sizeof(*rctx->sa_state),
+ DMA_FROM_DEVICE);
+
+ if (!IS_ECB(rctx->flags))
+ memcpy(reqiv, rctx->sa_state->state_iv, rctx->ivsize);
+
+ kfree(rctx->sa_state_ctr);
+ kfree(rctx->sa_state);
+}
+
+int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen,
+ unsigned int hashlen, u8 *dest_ipad, u8 *dest_opad,
+ bool skip_ipad)
+{
+ u8 ipad[SHA256_BLOCK_SIZE], opad[SHA256_BLOCK_SIZE];
+ struct crypto_ahash *ahash_tfm;
+ struct eip93_hash_reqctx *rctx;
+ struct ahash_request *req;
+ DECLARE_CRYPTO_WAIT(wait);
+ struct scatterlist sg[1];
+ const char *alg_name;
+ int i, ret;
+
+ switch (ctx_flags & EIP93_HASH_MASK) {
+ case EIP93_HASH_SHA256:
+ alg_name = "sha256-eip93";
+ break;
+ case EIP93_HASH_SHA224:
+ alg_name = "sha224-eip93";
+ break;
+ case EIP93_HASH_SHA1:
+ alg_name = "sha1-eip93";
+ break;
+ case EIP93_HASH_MD5:
+ alg_name = "md5-eip93";
+ break;
+ default: /* Impossible */
+ return -EINVAL;
+ }
+
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ahash_tfm))
+ return PTR_ERR(ahash_tfm);
+
+ req = ahash_request_alloc(ahash_tfm, GFP_ATOMIC);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_ahash;
+ }
+
+ rctx = ahash_request_ctx_dma(req);
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ /* Hash the key if > SHA256_BLOCK_SIZE */
+ if (keylen > SHA256_BLOCK_SIZE) {
+ sg_init_one(&sg[0], key, keylen);
+
+ ahash_request_set_crypt(req, sg, ipad, keylen);
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ if (ret)
+ goto err_req;
+
+ keylen = hashlen;
+ } else {
+ memcpy(ipad, key, keylen);
+ }
+
+ /* Copy to opad */
+ memset(ipad + keylen, 0, SHA256_BLOCK_SIZE - keylen);
+ memcpy(opad, ipad, SHA256_BLOCK_SIZE);
+
+ /* Pad with HMAC constants */
+ for (i = 0; i < SHA256_BLOCK_SIZE; i++) {
+ ipad[i] ^= HMAC_IPAD_VALUE;
+ opad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ if (skip_ipad) {
+ memcpy(dest_ipad, ipad, SHA256_BLOCK_SIZE);
+ } else {
+ /* Hash ipad */
+ sg_init_one(&sg[0], ipad, SHA256_BLOCK_SIZE);
+ ahash_request_set_crypt(req, sg, dest_ipad, SHA256_BLOCK_SIZE);
+ ret = crypto_ahash_init(req);
+ if (ret)
+ goto err_req;
+
+ /* Disable HASH_FINALIZE for ipad hash */
+ rctx->partial_hash = true;
+
+ ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
+ if (ret)
+ goto err_req;
+ }
+
+ /* Hash opad */
+ sg_init_one(&sg[0], opad, SHA256_BLOCK_SIZE);
+ ahash_request_set_crypt(req, sg, dest_opad, SHA256_BLOCK_SIZE);
+ ret = crypto_ahash_init(req);
+ if (ret)
+ goto err_req;
+
+ /* Disable HASH_FINALIZE for opad hash */
+ rctx->partial_hash = true;
+
+ ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
+ if (ret)
+ goto err_req;
+
+ if (!IS_HASH_MD5(ctx_flags)) {
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) {
+ u32 *ipad_hash = (u32 *)dest_ipad;
+ u32 *opad_hash = (u32 *)dest_opad;
+
+ if (!skip_ipad)
+ ipad_hash[i] = (u32 __force)cpu_to_be32(ipad_hash[i]);
+ opad_hash[i] = (u32 __force)cpu_to_be32(opad_hash[i]);
+ }
+ }
+
+err_req:
+ ahash_request_free(req);
+err_ahash:
+ crypto_free_ahash(ahash_tfm);
+
+ return ret;
+}
diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.h b/drivers/crypto/inside-secure/eip93/eip93-common.h
new file mode 100644
index 000000000000..80964cfa34df
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-common.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+
+#ifndef _EIP93_COMMON_H_
+#define _EIP93_COMMON_H_
+
+void *eip93_get_descriptor(struct eip93_device *eip93);
+int eip93_put_descriptor(struct eip93_device *eip93, struct eip93_descriptor *desc);
+
+void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen,
+ const u32 flags);
+
+int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err);
+
+int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen,
+ unsigned int hashlen, u8 *ipad, u8 *opad,
+ bool skip_ipad);
+
+#endif /* _EIP93_COMMON_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-des.h b/drivers/crypto/inside-secure/eip93/eip93-des.h
new file mode 100644
index 000000000000..74748df04acf
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-des.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef _EIP93_DES_H_
+#define _EIP93_DES_H_
+
+extern struct eip93_alg_template eip93_alg_ecb_des;
+extern struct eip93_alg_template eip93_alg_cbc_des;
+extern struct eip93_alg_template eip93_alg_ecb_des3_ede;
+extern struct eip93_alg_template eip93_alg_cbc_des3_ede;
+
+#endif /* _EIP93_DES_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.c b/drivers/crypto/inside-secure/eip93/eip93-hash.c
new file mode 100644
index 000000000000..5e9627467a42
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c
@@ -0,0 +1,866 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024
+ *
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/md5.h>
+#include <crypto/hmac.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+#include "eip93-cipher.h"
+#include "eip93-hash.h"
+#include "eip93-main.h"
+#include "eip93-common.h"
+#include "eip93-regs.h"
+
+static void eip93_hash_free_data_blocks(struct ahash_request *req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct eip93_device *eip93 = ctx->eip93;
+ struct mkt_hash_block *block, *tmp;
+
+ list_for_each_entry_safe(block, tmp, &rctx->blocks, list) {
+ dma_unmap_single(eip93->dev, block->data_dma,
+ SHA256_BLOCK_SIZE, DMA_TO_DEVICE);
+ kfree(block);
+ }
+ if (!list_empty(&rctx->blocks))
+ INIT_LIST_HEAD(&rctx->blocks);
+
+ if (rctx->finalize)
+ dma_unmap_single(eip93->dev, rctx->data_dma,
+ rctx->data_used,
+ DMA_TO_DEVICE);
+}
+
+static void eip93_hash_free_sa_record(struct ahash_request *req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct eip93_device *eip93 = ctx->eip93;
+
+ if (IS_HMAC(ctx->flags))
+ dma_unmap_single(eip93->dev, rctx->sa_record_hmac_base,
+ sizeof(rctx->sa_record_hmac), DMA_TO_DEVICE);
+
+ dma_unmap_single(eip93->dev, rctx->sa_record_base,
+ sizeof(rctx->sa_record), DMA_TO_DEVICE);
+}
+
+void eip93_hash_handle_result(struct crypto_async_request *async, int err)
+{
+ struct ahash_request *req = ahash_request_cast(async);
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct sa_state *sa_state = &rctx->sa_state;
+ struct eip93_device *eip93 = ctx->eip93;
+ int i;
+
+ dma_unmap_single(eip93->dev, rctx->sa_state_base,
+ sizeof(*sa_state), DMA_FROM_DEVICE);
+
+ /*
+ * With partial_hash assume SHA256_DIGEST_SIZE buffer is passed.
+ * This is to handle SHA224 that have a 32 byte intermediate digest.
+ */
+ if (rctx->partial_hash)
+ digestsize = SHA256_DIGEST_SIZE;
+
+ if (rctx->finalize || rctx->partial_hash) {
+ /* bytes needs to be swapped for req->result */
+ if (!IS_HASH_MD5(ctx->flags)) {
+ for (i = 0; i < digestsize / sizeof(u32); i++) {
+ u32 *digest = (u32 *)sa_state->state_i_digest;
+
+ digest[i] = be32_to_cpu((__be32 __force)digest[i]);
+ }
+ }
+
+ memcpy(req->result, sa_state->state_i_digest, digestsize);
+ }
+
+ eip93_hash_free_sa_record(req);
+ eip93_hash_free_data_blocks(req);
+
+ ahash_request_complete(req, err);
+}
+
+static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest)
+{
+ u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 };
+ u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+ SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 };
+ u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
+ u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 };
+
+ /* Init HASH constant */
+ switch (hash) {
+ case EIP93_HASH_SHA256:
+ memcpy(digest, sha256_init, sizeof(sha256_init));
+ return;
+ case EIP93_HASH_SHA224:
+ memcpy(digest, sha224_init, sizeof(sha224_init));
+ return;
+ case EIP93_HASH_SHA1:
+ memcpy(digest, sha1_init, sizeof(sha1_init));
+ return;
+ case EIP93_HASH_MD5:
+ memcpy(digest, md5_init, sizeof(md5_init));
+ return;
+ default: /* Impossible */
+ return;
+ }
+}
+
+static void eip93_hash_export_sa_state(struct ahash_request *req,
+ struct eip93_hash_export_state *state)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct sa_state *sa_state = &rctx->sa_state;
+
+ /*
+ * EIP93 have special handling for state_byte_cnt in sa_state.
+ * Even if a zero packet is passed (and a BADMSG is returned),
+ * state_byte_cnt is incremented to the digest handled (with the hash
+ * primitive). This is problematic with export/import as EIP93
+ * expect 0 state_byte_cnt for the very first iteration.
+ */
+ if (!rctx->len)
+ memset(state->state_len, 0, sizeof(u32) * 2);
+ else
+ memcpy(state->state_len, sa_state->state_byte_cnt,
+ sizeof(u32) * 2);
+ memcpy(state->state_hash, sa_state->state_i_digest,
+ SHA256_DIGEST_SIZE);
+ state->len = rctx->len;
+ state->data_used = rctx->data_used;
+}
+
+static void __eip93_hash_init(struct ahash_request *req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct sa_record *sa_record = &rctx->sa_record;
+ int digestsize;
+
+ digestsize = crypto_ahash_digestsize(ahash);
+
+ eip93_set_sa_record(sa_record, 0, ctx->flags);
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_FROM_STATE;
+ sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_HASH;
+ sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE;
+ sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE,
+ EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH);
+ sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
+ sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
+ digestsize / sizeof(u32));
+
+ /*
+ * HMAC special handling
+ * Enabling CMD_HMAC force the inner hash to be always finalized.
+ * This cause problems on handling message > 64 byte as we
+ * need to produce intermediate inner hash on sending intermediate
+ * 64 bytes blocks.
+ *
+ * To handle this, enable CMD_HMAC only on the last block.
+ * We make a duplicate of sa_record and on the last descriptor,
+ * we pass a dedicated sa_record with CMD_HMAC enabled to make
+ * EIP93 apply the outer hash.
+ */
+ if (IS_HMAC(ctx->flags)) {
+ struct sa_record *sa_record_hmac = &rctx->sa_record_hmac;
+
+ memcpy(sa_record_hmac, sa_record, sizeof(*sa_record));
+ /* Copy pre-hashed opad for HMAC */
+ memcpy(sa_record_hmac->sa_o_digest, ctx->opad, SHA256_DIGEST_SIZE);
+
+ /* Disable HMAC for hash normal sa_record */
+ sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HMAC;
+ }
+
+ rctx->len = 0;
+ rctx->data_used = 0;
+ rctx->partial_hash = false;
+ rctx->finalize = false;
+ INIT_LIST_HEAD(&rctx->blocks);
+}
+
+static int eip93_send_hash_req(struct crypto_async_request *async, u8 *data,
+ dma_addr_t *data_dma, u32 len, bool last)
+{
+ struct ahash_request *req = ahash_request_cast(async);
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct eip93_device *eip93 = ctx->eip93;
+ struct eip93_descriptor cdesc = { };
+ dma_addr_t src_addr;
+ int ret;
+
+ /* Map block data to DMA */
+ src_addr = dma_map_single(eip93->dev, data, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, src_addr);
+ if (ret)
+ return ret;
+
+ cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,
+ EIP93_PE_CTRL_HOST_READY);
+ cdesc.sa_addr = rctx->sa_record_base;
+ cdesc.arc4_addr = 0;
+
+ cdesc.state_addr = rctx->sa_state_base;
+ cdesc.src_addr = src_addr;
+ cdesc.pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,
+ EIP93_PE_LENGTH_HOST_READY);
+ cdesc.pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH,
+ len);
+
+ cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_HASH);
+
+ if (last) {
+ int crypto_async_idr;
+
+ if (rctx->finalize && !rctx->partial_hash) {
+ /* For last block, pass sa_record with CMD_HMAC enabled */
+ if (IS_HMAC(ctx->flags)) {
+ struct sa_record *sa_record_hmac = &rctx->sa_record_hmac;
+
+ rctx->sa_record_hmac_base = dma_map_single(eip93->dev,
+ sa_record_hmac,
+ sizeof(*sa_record_hmac),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_record_hmac_base);
+ if (ret)
+ return ret;
+
+ cdesc.sa_addr = rctx->sa_record_hmac_base;
+ }
+
+ cdesc.pe_ctrl_stat_word |= EIP93_PE_CTRL_PE_HASH_FINAL;
+ }
+
+ scoped_guard(spinlock_bh, &eip93->ring->idr_lock)
+ crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0,
+ EIP93_RING_NUM - 1, GFP_ATOMIC);
+
+ cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |
+ FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_LAST);
+ }
+
+again:
+ ret = eip93_put_descriptor(eip93, &cdesc);
+ if (ret) {
+ usleep_range(EIP93_RING_BUSY_DELAY,
+ EIP93_RING_BUSY_DELAY * 2);
+ goto again;
+ }
+
+ /* Writing new descriptor count starts DMA action */
+ writel(1, eip93->base + EIP93_REG_PE_CD_COUNT);
+
+ *data_dma = src_addr;
+ return 0;
+}
+
+static int eip93_hash_init(struct ahash_request *req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct sa_state *sa_state = &rctx->sa_state;
+
+ memset(sa_state->state_byte_cnt, 0, sizeof(u32) * 2);
+ eip93_hash_init_sa_state_digest(ctx->flags & EIP93_HASH_MASK,
+ sa_state->state_i_digest);
+
+ __eip93_hash_init(req);
+
+ /* For HMAC setup the initial block for ipad */
+ if (IS_HMAC(ctx->flags)) {
+ memcpy(rctx->data, ctx->ipad, SHA256_BLOCK_SIZE);
+
+ rctx->data_used = SHA256_BLOCK_SIZE;
+ rctx->len += SHA256_BLOCK_SIZE;
+ }
+
+ return 0;
+}
+
+/*
+ * With complete_req true, we wait for the engine to consume all the block in list,
+ * else we just queue the block to the engine as final() will wait. This is useful
+ * for finup().
+ */
+static int __eip93_hash_update(struct ahash_request *req, bool complete_req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_async_request *async = &req->base;
+ unsigned int read, to_consume = req->nbytes;
+ unsigned int max_read, consumed = 0;
+ struct mkt_hash_block *block;
+ bool wait_req = false;
+ int offset;
+ int ret;
+
+ /* Get the offset and available space to fill req data */
+ offset = rctx->data_used;
+ max_read = SHA256_BLOCK_SIZE - offset;
+
+ /* Consume req in block of SHA256_BLOCK_SIZE.
+ * to_read is initially set to space available in the req data
+ * and then reset to SHA256_BLOCK_SIZE.
+ */
+ while (to_consume > max_read) {
+ block = kzalloc(sizeof(*block), GFP_ATOMIC);
+ if (!block) {
+ ret = -ENOMEM;
+ goto free_blocks;
+ }
+
+ read = sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ block->data + offset,
+ max_read, consumed);
+
+ /*
+ * For first iteration only, copy req data to block
+ * and reset offset and max_read for next iteration.
+ */
+ if (offset > 0) {
+ memcpy(block->data, rctx->data, offset);
+ offset = 0;
+ max_read = SHA256_BLOCK_SIZE;
+ }
+
+ list_add(&block->list, &rctx->blocks);
+ to_consume -= read;
+ consumed += read;
+ }
+
+ /* Write the remaining data to req data */
+ read = sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ rctx->data + offset, to_consume,
+ consumed);
+ rctx->data_used = offset + read;
+
+ /* Update counter with processed bytes */
+ rctx->len += read + consumed;
+
+ /* Consume all the block added to list */
+ list_for_each_entry_reverse(block, &rctx->blocks, list) {
+ wait_req = complete_req &&
+ list_is_first(&block->list, &rctx->blocks);
+
+ ret = eip93_send_hash_req(async, block->data,
+ &block->data_dma,
+ SHA256_BLOCK_SIZE, wait_req);
+ if (ret)
+ goto free_blocks;
+ }
+
+ return wait_req ? -EINPROGRESS : 0;
+
+free_blocks:
+ eip93_hash_free_data_blocks(req);
+
+ return ret;
+}
+
+static int eip93_hash_update(struct ahash_request *req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct sa_record *sa_record = &rctx->sa_record;
+ struct sa_state *sa_state = &rctx->sa_state;
+ struct eip93_device *eip93 = ctx->eip93;
+ int ret;
+
+ if (!req->nbytes)
+ return 0;
+
+ rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
+ sizeof(*sa_state),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
+ if (ret)
+ return ret;
+
+ rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
+ sizeof(*sa_record),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
+ if (ret)
+ goto free_sa_state;
+
+ ret = __eip93_hash_update(req, true);
+ if (ret && ret != -EINPROGRESS)
+ goto free_sa_record;
+
+ return ret;
+
+free_sa_record:
+ dma_unmap_single(eip93->dev, rctx->sa_record_base,
+ sizeof(*sa_record), DMA_TO_DEVICE);
+
+free_sa_state:
+ dma_unmap_single(eip93->dev, rctx->sa_state_base,
+ sizeof(*sa_state), DMA_TO_DEVICE);
+
+ return ret;
+}
+
+/*
+ * With map_data true, we map the sa_record and sa_state. This is needed
+ * for finup() as the they are mapped before calling update()
+ */
+static int __eip93_hash_final(struct ahash_request *req, bool map_dma)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct crypto_async_request *async = &req->base;
+ struct sa_record *sa_record = &rctx->sa_record;
+ struct sa_state *sa_state = &rctx->sa_state;
+ struct eip93_device *eip93 = ctx->eip93;
+ int ret;
+
+ /* EIP93 can't handle zero bytes hash */
+ if (!rctx->len && !IS_HMAC(ctx->flags)) {
+ switch ((ctx->flags & EIP93_HASH_MASK)) {
+ case EIP93_HASH_SHA256:
+ memcpy(req->result, sha256_zero_message_hash,
+ SHA256_DIGEST_SIZE);
+ break;
+ case EIP93_HASH_SHA224:
+ memcpy(req->result, sha224_zero_message_hash,
+ SHA224_DIGEST_SIZE);
+ break;
+ case EIP93_HASH_SHA1:
+ memcpy(req->result, sha1_zero_message_hash,
+ SHA1_DIGEST_SIZE);
+ break;
+ case EIP93_HASH_MD5:
+ memcpy(req->result, md5_zero_message_hash,
+ MD5_DIGEST_SIZE);
+ break;
+ default: /* Impossible */
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* Signal interrupt from engine is for last block */
+ rctx->finalize = true;
+
+ if (map_dma) {
+ rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
+ sizeof(*sa_state),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
+ if (ret)
+ return ret;
+
+ rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
+ sizeof(*sa_record),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
+ if (ret)
+ goto free_sa_state;
+ }
+
+ /* Send last block */
+ ret = eip93_send_hash_req(async, rctx->data, &rctx->data_dma,
+ rctx->data_used, true);
+ if (ret)
+ goto free_blocks;
+
+ return -EINPROGRESS;
+
+free_blocks:
+ eip93_hash_free_data_blocks(req);
+
+ dma_unmap_single(eip93->dev, rctx->sa_record_base,
+ sizeof(*sa_record), DMA_TO_DEVICE);
+
+free_sa_state:
+ dma_unmap_single(eip93->dev, rctx->sa_state_base,
+ sizeof(*sa_state), DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int eip93_hash_final(struct ahash_request *req)
+{
+ return __eip93_hash_final(req, true);
+}
+
+static int eip93_hash_finup(struct ahash_request *req)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct sa_record *sa_record = &rctx->sa_record;
+ struct sa_state *sa_state = &rctx->sa_state;
+ struct eip93_device *eip93 = ctx->eip93;
+ int ret;
+
+ if (rctx->len + req->nbytes || IS_HMAC(ctx->flags)) {
+ rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
+ sizeof(*sa_state),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
+ if (ret)
+ return ret;
+
+ rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
+ sizeof(*sa_record),
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
+ if (ret)
+ goto free_sa_state;
+
+ ret = __eip93_hash_update(req, false);
+ if (ret)
+ goto free_sa_record;
+ }
+
+ return __eip93_hash_final(req, false);
+
+free_sa_record:
+ dma_unmap_single(eip93->dev, rctx->sa_record_base,
+ sizeof(*sa_record), DMA_TO_DEVICE);
+free_sa_state:
+ dma_unmap_single(eip93->dev, rctx->sa_state_base,
+ sizeof(*sa_state), DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int eip93_hash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
+ u32 keylen)
+{
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
+ struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return eip93_hmac_setkey(ctx->flags, key, keylen, digestsize,
+ ctx->ipad, ctx->opad, true);
+}
+
+static int eip93_hash_cra_init(struct crypto_tfm *tfm)
+{
+ struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
+ struct eip93_alg_template, alg.ahash.halg.base);
+
+ crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
+ sizeof(struct eip93_hash_reqctx));
+
+ ctx->eip93 = tmpl->eip93;
+ ctx->flags = tmpl->flags;
+
+ return 0;
+}
+
+static int eip93_hash_digest(struct ahash_request *req)
+{
+ int ret;
+
+ ret = eip93_hash_init(req);
+ if (ret)
+ return ret;
+
+ return eip93_hash_finup(req);
+}
+
+static int eip93_hash_import(struct ahash_request *req, const void *in)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ const struct eip93_hash_export_state *state = in;
+ struct sa_state *sa_state = &rctx->sa_state;
+
+ memcpy(sa_state->state_byte_cnt, state->state_len, sizeof(u32) * 2);
+ memcpy(sa_state->state_i_digest, state->state_hash, SHA256_DIGEST_SIZE);
+
+ __eip93_hash_init(req);
+
+ rctx->len = state->len;
+ rctx->data_used = state->data_used;
+
+ /* Skip copying data if we have nothing to copy */
+ if (rctx->len)
+ memcpy(rctx->data, state->data, rctx->data_used);
+
+ return 0;
+}
+
+static int eip93_hash_export(struct ahash_request *req, void *out)
+{
+ struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
+ struct eip93_hash_export_state *state = out;
+
+ /* Save the first block in state data */
+ if (rctx->len)
+ memcpy(state->data, rctx->data, rctx->data_used);
+
+ eip93_hash_export_sa_state(req, state);
+
+ return 0;
+}
+
+struct eip93_alg_template eip93_alg_md5 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_MD5,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-eip93",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_sha1 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_SHA1,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-eip93",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_sha224 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_SHA224,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-eip93",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_sha256 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_SHA256,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-eip93",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_hmac_md5 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .setkey = eip93_hash_hmac_setkey,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "hmac(md5-eip93)",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_hmac_sha1 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .setkey = eip93_hash_hmac_setkey,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "hmac(sha1-eip93)",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_hmac_sha224 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .setkey = eip93_hash_hmac_setkey,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "hmac(sha224-eip93)",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+struct eip93_alg_template eip93_alg_hmac_sha256 = {
+ .type = EIP93_ALG_TYPE_HASH,
+ .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256,
+ .alg.ahash = {
+ .init = eip93_hash_init,
+ .update = eip93_hash_update,
+ .final = eip93_hash_final,
+ .finup = eip93_hash_finup,
+ .digest = eip93_hash_digest,
+ .setkey = eip93_hash_hmac_setkey,
+ .export = eip93_hash_export,
+ .import = eip93_hash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct eip93_hash_export_state),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "hmac(sha256-eip93)",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct eip93_hash_ctx),
+ .cra_init = eip93_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.h b/drivers/crypto/inside-secure/eip93/eip93-hash.h
new file mode 100644
index 000000000000..556f22fc1dd0
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef _EIP93_HASH_H_
+#define _EIP93_HASH_H_
+
+#include <crypto/sha2.h>
+
+#include "eip93-main.h"
+#include "eip93-regs.h"
+
+struct eip93_hash_ctx {
+ struct eip93_device *eip93;
+ u32 flags;
+
+ u8 ipad[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 opad[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+};
+
+struct eip93_hash_reqctx {
+ /* Placement is important for DMA align */
+ struct {
+ struct sa_record sa_record;
+ struct sa_record sa_record_hmac;
+ struct sa_state sa_state;
+ } __aligned(CRYPTO_DMA_ALIGN);
+
+ dma_addr_t sa_record_base;
+ dma_addr_t sa_record_hmac_base;
+ dma_addr_t sa_state_base;
+
+ /* Don't enable HASH_FINALIZE when last block is sent */
+ bool partial_hash;
+
+ /* Set to signal interrupt is for final packet */
+ bool finalize;
+
+ /*
+ * EIP93 requires data to be accumulated in block of 64 bytes
+ * for intermediate hash calculation.
+ */
+ u64 len;
+ u32 data_used;
+
+ u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ dma_addr_t data_dma;
+
+ struct list_head blocks;
+};
+
+struct mkt_hash_block {
+ struct list_head list;
+ u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ dma_addr_t data_dma;
+};
+
+struct eip93_hash_export_state {
+ u64 len;
+ u32 data_used;
+
+ u32 state_len[2];
+ u8 state_hash[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+
+ u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+};
+
+void eip93_hash_handle_result(struct crypto_async_request *async, int err);
+
+extern struct eip93_alg_template eip93_alg_md5;
+extern struct eip93_alg_template eip93_alg_sha1;
+extern struct eip93_alg_template eip93_alg_sha224;
+extern struct eip93_alg_template eip93_alg_sha256;
+extern struct eip93_alg_template eip93_alg_hmac_md5;
+extern struct eip93_alg_template eip93_alg_hmac_sha1;
+extern struct eip93_alg_template eip93_alg_hmac_sha224;
+extern struct eip93_alg_template eip93_alg_hmac_sha256;
+
+#endif /* _EIP93_HASH_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.c b/drivers/crypto/inside-secure/eip93/eip93-main.c
new file mode 100644
index 000000000000..0b38a567da0e
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-main.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+
+#include "eip93-main.h"
+#include "eip93-regs.h"
+#include "eip93-common.h"
+#include "eip93-cipher.h"
+#include "eip93-aes.h"
+#include "eip93-des.h"
+#include "eip93-aead.h"
+#include "eip93-hash.h"
+
+static struct eip93_alg_template *eip93_algs[] = {
+ &eip93_alg_ecb_des,
+ &eip93_alg_cbc_des,
+ &eip93_alg_ecb_des3_ede,
+ &eip93_alg_cbc_des3_ede,
+ &eip93_alg_ecb_aes,
+ &eip93_alg_cbc_aes,
+ &eip93_alg_ctr_aes,
+ &eip93_alg_rfc3686_aes,
+ &eip93_alg_authenc_hmac_md5_cbc_des,
+ &eip93_alg_authenc_hmac_sha1_cbc_des,
+ &eip93_alg_authenc_hmac_sha224_cbc_des,
+ &eip93_alg_authenc_hmac_sha256_cbc_des,
+ &eip93_alg_authenc_hmac_md5_cbc_des3_ede,
+ &eip93_alg_authenc_hmac_sha1_cbc_des3_ede,
+ &eip93_alg_authenc_hmac_sha224_cbc_des3_ede,
+ &eip93_alg_authenc_hmac_sha256_cbc_des3_ede,
+ &eip93_alg_authenc_hmac_md5_cbc_aes,
+ &eip93_alg_authenc_hmac_sha1_cbc_aes,
+ &eip93_alg_authenc_hmac_sha224_cbc_aes,
+ &eip93_alg_authenc_hmac_sha256_cbc_aes,
+ &eip93_alg_authenc_hmac_md5_rfc3686_aes,
+ &eip93_alg_authenc_hmac_sha1_rfc3686_aes,
+ &eip93_alg_authenc_hmac_sha224_rfc3686_aes,
+ &eip93_alg_authenc_hmac_sha256_rfc3686_aes,
+ &eip93_alg_md5,
+ &eip93_alg_sha1,
+ &eip93_alg_sha224,
+ &eip93_alg_sha256,
+ &eip93_alg_hmac_md5,
+ &eip93_alg_hmac_sha1,
+ &eip93_alg_hmac_sha224,
+ &eip93_alg_hmac_sha256,
+};
+
+inline void eip93_irq_disable(struct eip93_device *eip93, u32 mask)
+{
+ __raw_writel(mask, eip93->base + EIP93_REG_MASK_DISABLE);
+}
+
+inline void eip93_irq_enable(struct eip93_device *eip93, u32 mask)
+{
+ __raw_writel(mask, eip93->base + EIP93_REG_MASK_ENABLE);
+}
+
+inline void eip93_irq_clear(struct eip93_device *eip93, u32 mask)
+{
+ __raw_writel(mask, eip93->base + EIP93_REG_INT_CLR);
+}
+
+static void eip93_unregister_algs(unsigned int i)
+{
+ unsigned int j;
+
+ for (j = 0; j < i; j++) {
+ switch (eip93_algs[j]->type) {
+ case EIP93_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&eip93_algs[j]->alg.skcipher);
+ break;
+ case EIP93_ALG_TYPE_AEAD:
+ crypto_unregister_aead(&eip93_algs[j]->alg.aead);
+ break;
+ case EIP93_ALG_TYPE_HASH:
+ crypto_unregister_ahash(&eip93_algs[i]->alg.ahash);
+ break;
+ }
+ }
+}
+
+static int eip93_register_algs(struct eip93_device *eip93, u32 supported_algo_flags)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(eip93_algs); i++) {
+ u32 alg_flags = eip93_algs[i]->flags;
+
+ eip93_algs[i]->eip93 = eip93;
+
+ if ((IS_DES(alg_flags) || IS_3DES(alg_flags)) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_TDES))
+ continue;
+
+ if (IS_AES(alg_flags)) {
+ if (!(supported_algo_flags & EIP93_PE_OPTION_AES))
+ continue;
+
+ if (!IS_HMAC(alg_flags)) {
+ if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY128)
+ eip93_algs[i]->alg.skcipher.max_keysize =
+ AES_KEYSIZE_128;
+
+ if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY192)
+ eip93_algs[i]->alg.skcipher.max_keysize =
+ AES_KEYSIZE_192;
+
+ if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY256)
+ eip93_algs[i]->alg.skcipher.max_keysize =
+ AES_KEYSIZE_256;
+
+ if (IS_RFC3686(alg_flags))
+ eip93_algs[i]->alg.skcipher.max_keysize +=
+ CTR_RFC3686_NONCE_SIZE;
+ }
+ }
+
+ if (IS_HASH_MD5(alg_flags) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_MD5))
+ continue;
+
+ if (IS_HASH_SHA1(alg_flags) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_SHA_1))
+ continue;
+
+ if (IS_HASH_SHA224(alg_flags) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_SHA_224))
+ continue;
+
+ if (IS_HASH_SHA256(alg_flags) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_SHA_256))
+ continue;
+
+ switch (eip93_algs[i]->type) {
+ case EIP93_ALG_TYPE_SKCIPHER:
+ ret = crypto_register_skcipher(&eip93_algs[i]->alg.skcipher);
+ break;
+ case EIP93_ALG_TYPE_AEAD:
+ ret = crypto_register_aead(&eip93_algs[i]->alg.aead);
+ break;
+ case EIP93_ALG_TYPE_HASH:
+ ret = crypto_register_ahash(&eip93_algs[i]->alg.ahash);
+ break;
+ }
+ if (ret)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ eip93_unregister_algs(i);
+
+ return ret;
+}
+
+static void eip93_handle_result_descriptor(struct eip93_device *eip93)
+{
+ struct crypto_async_request *async;
+ struct eip93_descriptor *rdesc;
+ u16 desc_flags, crypto_idr;
+ bool last_entry;
+ int handled, left, err;
+ u32 pe_ctrl_stat;
+ u32 pe_length;
+
+get_more:
+ handled = 0;
+
+ left = readl(eip93->base + EIP93_REG_PE_RD_COUNT) & EIP93_PE_RD_COUNT;
+
+ if (!left) {
+ eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH);
+ eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH);
+ return;
+ }
+
+ last_entry = false;
+
+ while (left) {
+ scoped_guard(spinlock_irqsave, &eip93->ring->read_lock)
+ rdesc = eip93_get_descriptor(eip93);
+ if (IS_ERR(rdesc)) {
+ dev_err(eip93->dev, "Ndesc: %d nreq: %d\n",
+ handled, left);
+ err = -EIO;
+ break;
+ }
+ /* make sure DMA is finished writing */
+ do {
+ pe_ctrl_stat = READ_ONCE(rdesc->pe_ctrl_stat_word);
+ pe_length = READ_ONCE(rdesc->pe_length_word);
+ } while (FIELD_GET(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, pe_ctrl_stat) !=
+ EIP93_PE_CTRL_PE_READY ||
+ FIELD_GET(EIP93_PE_LENGTH_HOST_PE_READY, pe_length) !=
+ EIP93_PE_LENGTH_PE_READY);
+
+ err = rdesc->pe_ctrl_stat_word & (EIP93_PE_CTRL_PE_EXT_ERR_CODE |
+ EIP93_PE_CTRL_PE_EXT_ERR |
+ EIP93_PE_CTRL_PE_SEQNUM_ERR |
+ EIP93_PE_CTRL_PE_PAD_ERR |
+ EIP93_PE_CTRL_PE_AUTH_ERR);
+
+ desc_flags = FIELD_GET(EIP93_PE_USER_ID_DESC_FLAGS, rdesc->user_id);
+ crypto_idr = FIELD_GET(EIP93_PE_USER_ID_CRYPTO_IDR, rdesc->user_id);
+
+ writel(1, eip93->base + EIP93_REG_PE_RD_COUNT);
+ eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH);
+
+ handled++;
+ left--;
+
+ if (desc_flags & EIP93_DESC_LAST) {
+ last_entry = true;
+ break;
+ }
+ }
+
+ if (!last_entry)
+ goto get_more;
+
+ /* Get crypto async ref only for last descriptor */
+ scoped_guard(spinlock_bh, &eip93->ring->idr_lock) {
+ async = idr_find(&eip93->ring->crypto_async_idr, crypto_idr);
+ idr_remove(&eip93->ring->crypto_async_idr, crypto_idr);
+ }
+
+ /* Parse error in ctrl stat word */
+ err = eip93_parse_ctrl_stat_err(eip93, err);
+
+ if (desc_flags & EIP93_DESC_SKCIPHER)
+ eip93_skcipher_handle_result(async, err);
+
+ if (desc_flags & EIP93_DESC_AEAD)
+ eip93_aead_handle_result(async, err);
+
+ if (desc_flags & EIP93_DESC_HASH)
+ eip93_hash_handle_result(async, err);
+
+ goto get_more;
+}
+
+static void eip93_done_task(unsigned long data)
+{
+ struct eip93_device *eip93 = (struct eip93_device *)data;
+
+ eip93_handle_result_descriptor(eip93);
+}
+
+static irqreturn_t eip93_irq_handler(int irq, void *data)
+{
+ struct eip93_device *eip93 = data;
+ u32 irq_status;
+
+ irq_status = readl(eip93->base + EIP93_REG_INT_MASK_STAT);
+ if (FIELD_GET(EIP93_INT_RDR_THRESH, irq_status)) {
+ eip93_irq_disable(eip93, EIP93_INT_RDR_THRESH);
+ tasklet_schedule(&eip93->ring->done_task);
+ return IRQ_HANDLED;
+ }
+
+ /* Ignore errors in AUTO mode, handled by the RDR */
+ eip93_irq_clear(eip93, irq_status);
+ if (irq_status)
+ eip93_irq_disable(eip93, irq_status);
+
+ return IRQ_NONE;
+}
+
+static void eip93_initialize(struct eip93_device *eip93, u32 supported_algo_flags)
+{
+ u32 val;
+
+ /* Reset PE and rings */
+ val = EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING;
+ val |= EIP93_PE_TARGET_AUTO_RING_MODE;
+ /* For Auto more, update the CDR ring owner after processing */
+ val |= EIP93_PE_CONFIG_EN_CDR_UPDATE;
+ writel(val, eip93->base + EIP93_REG_PE_CONFIG);
+
+ /* Wait for PE and ring to reset */
+ usleep_range(10, 20);
+
+ /* Release PE and ring reset */
+ val = readl(eip93->base + EIP93_REG_PE_CONFIG);
+ val &= ~(EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING);
+ writel(val, eip93->base + EIP93_REG_PE_CONFIG);
+
+ /* Config Clocks */
+ val = EIP93_PE_CLOCK_EN_PE_CLK;
+ if (supported_algo_flags & EIP93_PE_OPTION_TDES)
+ val |= EIP93_PE_CLOCK_EN_DES_CLK;
+ if (supported_algo_flags & EIP93_PE_OPTION_AES)
+ val |= EIP93_PE_CLOCK_EN_AES_CLK;
+ if (supported_algo_flags &
+ (EIP93_PE_OPTION_MD5 | EIP93_PE_OPTION_SHA_1 | EIP93_PE_OPTION_SHA_224 |
+ EIP93_PE_OPTION_SHA_256))
+ val |= EIP93_PE_CLOCK_EN_HASH_CLK;
+ writel(val, eip93->base + EIP93_REG_PE_CLOCK_CTRL);
+
+ /* Config DMA thresholds */
+ val = FIELD_PREP(EIP93_PE_OUTBUF_THRESH, 128) |
+ FIELD_PREP(EIP93_PE_INBUF_THRESH, 128);
+ writel(val, eip93->base + EIP93_REG_PE_BUF_THRESH);
+
+ /* Clear/ack all interrupts before disable all */
+ eip93_irq_clear(eip93, EIP93_INT_ALL);
+ eip93_irq_disable(eip93, EIP93_INT_ALL);
+
+ /* Setup CRD threshold to trigger interrupt */
+ val = FIELD_PREP(EIPR93_PE_CDR_THRESH, EIP93_RING_NUM - EIP93_RING_BUSY);
+ /*
+ * Configure RDR interrupt to be triggered if RD counter is not 0
+ * for more than 2^(N+10) system clocks.
+ */
+ val |= FIELD_PREP(EIPR93_PE_RD_TIMEOUT, 5) | EIPR93_PE_TIMEROUT_EN;
+ writel(val, eip93->base + EIP93_REG_PE_RING_THRESH);
+}
+
+static void eip93_desc_free(struct eip93_device *eip93)
+{
+ writel(0, eip93->base + EIP93_REG_PE_RING_CONFIG);
+ writel(0, eip93->base + EIP93_REG_PE_CDR_BASE);
+ writel(0, eip93->base + EIP93_REG_PE_RDR_BASE);
+}
+
+static int eip93_set_ring(struct eip93_device *eip93, struct eip93_desc_ring *ring)
+{
+ ring->offset = sizeof(struct eip93_descriptor);
+ ring->base = dmam_alloc_coherent(eip93->dev,
+ sizeof(struct eip93_descriptor) * EIP93_RING_NUM,
+ &ring->base_dma, GFP_KERNEL);
+ if (!ring->base)
+ return -ENOMEM;
+
+ ring->write = ring->base;
+ ring->base_end = ring->base + sizeof(struct eip93_descriptor) * (EIP93_RING_NUM - 1);
+ ring->read = ring->base;
+
+ return 0;
+}
+
+static int eip93_desc_init(struct eip93_device *eip93)
+{
+ struct eip93_desc_ring *cdr = &eip93->ring->cdr;
+ struct eip93_desc_ring *rdr = &eip93->ring->rdr;
+ int ret;
+ u32 val;
+
+ ret = eip93_set_ring(eip93, cdr);
+ if (ret)
+ return ret;
+
+ ret = eip93_set_ring(eip93, rdr);
+ if (ret)
+ return ret;
+
+ writel((u32 __force)cdr->base_dma, eip93->base + EIP93_REG_PE_CDR_BASE);
+ writel((u32 __force)rdr->base_dma, eip93->base + EIP93_REG_PE_RDR_BASE);
+
+ val = FIELD_PREP(EIP93_PE_RING_SIZE, EIP93_RING_NUM - 1);
+ writel(val, eip93->base + EIP93_REG_PE_RING_CONFIG);
+
+ return 0;
+}
+
+static void eip93_cleanup(struct eip93_device *eip93)
+{
+ tasklet_kill(&eip93->ring->done_task);
+
+ /* Clear/ack all interrupts before disable all */
+ eip93_irq_clear(eip93, EIP93_INT_ALL);
+ eip93_irq_disable(eip93, EIP93_INT_ALL);
+
+ writel(0, eip93->base + EIP93_REG_PE_CLOCK_CTRL);
+
+ eip93_desc_free(eip93);
+
+ idr_destroy(&eip93->ring->crypto_async_idr);
+}
+
+static int eip93_crypto_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct eip93_device *eip93;
+ u32 ver, algo_flags;
+ int ret;
+
+ eip93 = devm_kzalloc(dev, sizeof(*eip93), GFP_KERNEL);
+ if (!eip93)
+ return -ENOMEM;
+
+ eip93->dev = dev;
+ platform_set_drvdata(pdev, eip93);
+
+ eip93->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(eip93->base))
+ return PTR_ERR(eip93->base);
+
+ eip93->irq = platform_get_irq(pdev, 0);
+ if (eip93->irq < 0)
+ return eip93->irq;
+
+ ret = devm_request_threaded_irq(eip93->dev, eip93->irq, eip93_irq_handler,
+ NULL, IRQF_ONESHOT,
+ dev_name(eip93->dev), eip93);
+
+ eip93->ring = devm_kcalloc(eip93->dev, 1, sizeof(*eip93->ring), GFP_KERNEL);
+ if (!eip93->ring)
+ return -ENOMEM;
+
+ ret = eip93_desc_init(eip93);
+
+ if (ret)
+ return ret;
+
+ tasklet_init(&eip93->ring->done_task, eip93_done_task, (unsigned long)eip93);
+
+ spin_lock_init(&eip93->ring->read_lock);
+ spin_lock_init(&eip93->ring->write_lock);
+
+ spin_lock_init(&eip93->ring->idr_lock);
+ idr_init(&eip93->ring->crypto_async_idr);
+
+ algo_flags = readl(eip93->base + EIP93_REG_PE_OPTION_1);
+
+ eip93_initialize(eip93, algo_flags);
+
+ /* Init finished, enable RDR interrupt */
+ eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH);
+
+ ret = eip93_register_algs(eip93, algo_flags);
+ if (ret) {
+ eip93_cleanup(eip93);
+ return ret;
+ }
+
+ ver = readl(eip93->base + EIP93_REG_PE_REVISION);
+ /* EIP_EIP_NO:MAJOR_HW_REV:MINOR_HW_REV:HW_PATCH,PE(ALGO_FLAGS) */
+ dev_info(eip93->dev, "EIP%lu:%lx:%lx:%lx,PE(0x%x:0x%x)\n",
+ FIELD_GET(EIP93_PE_REVISION_EIP_NO, ver),
+ FIELD_GET(EIP93_PE_REVISION_MAJ_HW_REV, ver),
+ FIELD_GET(EIP93_PE_REVISION_MIN_HW_REV, ver),
+ FIELD_GET(EIP93_PE_REVISION_HW_PATCH, ver),
+ algo_flags,
+ readl(eip93->base + EIP93_REG_PE_OPTION_0));
+
+ return 0;
+}
+
+static void eip93_crypto_remove(struct platform_device *pdev)
+{
+ struct eip93_device *eip93 = platform_get_drvdata(pdev);
+
+ eip93_unregister_algs(ARRAY_SIZE(eip93_algs));
+ eip93_cleanup(eip93);
+}
+
+static const struct of_device_id eip93_crypto_of_match[] = {
+ { .compatible = "inside-secure,safexcel-eip93i", },
+ { .compatible = "inside-secure,safexcel-eip93ie", },
+ { .compatible = "inside-secure,safexcel-eip93is", },
+ { .compatible = "inside-secure,safexcel-eip93ies", },
+ /* IW not supported currently, missing AES-XCB-MAC/AES-CCM */
+ /* { .compatible = "inside-secure,safexcel-eip93iw", }, */
+ {}
+};
+MODULE_DEVICE_TABLE(of, eip93_crypto_of_match);
+
+static struct platform_driver eip93_crypto_driver = {
+ .probe = eip93_crypto_probe,
+ .remove = eip93_crypto_remove,
+ .driver = {
+ .name = "inside-secure-eip93",
+ .of_match_table = eip93_crypto_of_match,
+ },
+};
+module_platform_driver(eip93_crypto_driver);
+
+MODULE_AUTHOR("Richard van Schagen <vschagen@cs.com>");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("Mediatek EIP-93 crypto engine driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.h b/drivers/crypto/inside-secure/eip93/eip93-main.h
new file mode 100644
index 000000000000..79b078f0e5da
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-main.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef _EIP93_MAIN_H_
+#define _EIP93_MAIN_H_
+
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+
+#define EIP93_RING_BUSY_DELAY 500
+
+#define EIP93_RING_NUM 512
+#define EIP93_RING_BUSY 32
+#define EIP93_CRA_PRIORITY 1500
+
+#define EIP93_RING_SA_STATE_ADDR(base, idx) ((base) + (idx))
+#define EIP93_RING_SA_STATE_DMA(dma_base, idx) ((u32 __force)(dma_base) + \
+ ((idx) * sizeof(struct sa_state)))
+
+/* cipher algorithms */
+#define EIP93_ALG_DES BIT(0)
+#define EIP93_ALG_3DES BIT(1)
+#define EIP93_ALG_AES BIT(2)
+#define EIP93_ALG_MASK GENMASK(2, 0)
+/* hash and hmac algorithms */
+#define EIP93_HASH_MD5 BIT(3)
+#define EIP93_HASH_SHA1 BIT(4)
+#define EIP93_HASH_SHA224 BIT(5)
+#define EIP93_HASH_SHA256 BIT(6)
+#define EIP93_HASH_HMAC BIT(7)
+#define EIP93_HASH_MASK GENMASK(6, 3)
+/* cipher modes */
+#define EIP93_MODE_CBC BIT(8)
+#define EIP93_MODE_ECB BIT(9)
+#define EIP93_MODE_CTR BIT(10)
+#define EIP93_MODE_RFC3686 BIT(11)
+#define EIP93_MODE_MASK GENMASK(10, 8)
+
+/* cipher encryption/decryption operations */
+#define EIP93_ENCRYPT BIT(12)
+#define EIP93_DECRYPT BIT(13)
+
+#define EIP93_BUSY BIT(14)
+
+/* descriptor flags */
+#define EIP93_DESC_DMA_IV BIT(0)
+#define EIP93_DESC_IPSEC BIT(1)
+#define EIP93_DESC_FINISH BIT(2)
+#define EIP93_DESC_LAST BIT(3)
+#define EIP93_DESC_FAKE_HMAC BIT(4)
+#define EIP93_DESC_PRNG BIT(5)
+#define EIP93_DESC_HASH BIT(6)
+#define EIP93_DESC_AEAD BIT(7)
+#define EIP93_DESC_SKCIPHER BIT(8)
+#define EIP93_DESC_ASYNC BIT(9)
+
+#define IS_DMA_IV(desc_flags) ((desc_flags) & EIP93_DESC_DMA_IV)
+
+#define IS_DES(flags) ((flags) & EIP93_ALG_DES)
+#define IS_3DES(flags) ((flags) & EIP93_ALG_3DES)
+#define IS_AES(flags) ((flags) & EIP93_ALG_AES)
+
+#define IS_HASH_MD5(flags) ((flags) & EIP93_HASH_MD5)
+#define IS_HASH_SHA1(flags) ((flags) & EIP93_HASH_SHA1)
+#define IS_HASH_SHA224(flags) ((flags) & EIP93_HASH_SHA224)
+#define IS_HASH_SHA256(flags) ((flags) & EIP93_HASH_SHA256)
+#define IS_HMAC(flags) ((flags) & EIP93_HASH_HMAC)
+
+#define IS_CBC(mode) ((mode) & EIP93_MODE_CBC)
+#define IS_ECB(mode) ((mode) & EIP93_MODE_ECB)
+#define IS_CTR(mode) ((mode) & EIP93_MODE_CTR)
+#define IS_RFC3686(mode) ((mode) & EIP93_MODE_RFC3686)
+
+#define IS_BUSY(flags) ((flags) & EIP93_BUSY)
+
+#define IS_ENCRYPT(dir) ((dir) & EIP93_ENCRYPT)
+#define IS_DECRYPT(dir) ((dir) & EIP93_DECRYPT)
+
+#define IS_CIPHER(flags) ((flags) & (EIP93_ALG_DES | \
+ EIP93_ALG_3DES | \
+ EIP93_ALG_AES))
+
+#define IS_HASH(flags) ((flags) & (EIP93_HASH_MD5 | \
+ EIP93_HASH_SHA1 | \
+ EIP93_HASH_SHA224 | \
+ EIP93_HASH_SHA256))
+
+/**
+ * struct eip93_device - crypto engine device structure
+ */
+struct eip93_device {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+ int irq;
+ struct eip93_ring *ring;
+};
+
+struct eip93_desc_ring {
+ void *base;
+ void *base_end;
+ dma_addr_t base_dma;
+ /* write and read pointers */
+ void *read;
+ void *write;
+ /* descriptor element offset */
+ u32 offset;
+};
+
+struct eip93_state_pool {
+ void *base;
+ dma_addr_t base_dma;
+};
+
+struct eip93_ring {
+ struct tasklet_struct done_task;
+ /* command/result rings */
+ struct eip93_desc_ring cdr;
+ struct eip93_desc_ring rdr;
+ spinlock_t write_lock;
+ spinlock_t read_lock;
+ /* aync idr */
+ spinlock_t idr_lock;
+ struct idr crypto_async_idr;
+};
+
+enum eip93_alg_type {
+ EIP93_ALG_TYPE_AEAD,
+ EIP93_ALG_TYPE_SKCIPHER,
+ EIP93_ALG_TYPE_HASH,
+};
+
+struct eip93_alg_template {
+ struct eip93_device *eip93;
+ enum eip93_alg_type type;
+ u32 flags;
+ union {
+ struct aead_alg aead;
+ struct skcipher_alg skcipher;
+ struct ahash_alg ahash;
+ } alg;
+};
+
+#endif /* _EIP93_MAIN_H_ */
diff --git a/drivers/crypto/inside-secure/eip93/eip93-regs.h b/drivers/crypto/inside-secure/eip93/eip93-regs.h
new file mode 100644
index 000000000000..0490b8d15131
--- /dev/null
+++ b/drivers/crypto/inside-secure/eip93/eip93-regs.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 - 2021
+ *
+ * Richard van Schagen <vschagen@icloud.com>
+ * Christian Marangi <ansuelsmth@gmail.com
+ */
+#ifndef REG_EIP93_H
+#define REG_EIP93_H
+
+#define EIP93_REG_PE_CTRL_STAT 0x0
+#define EIP93_PE_CTRL_PE_PAD_CTRL_STAT GENMASK(31, 24)
+#define EIP93_PE_CTRL_PE_EXT_ERR_CODE GENMASK(23, 20)
+#define EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING 0x8
+#define EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR 0x7
+#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH 0x6
+#define EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH 0x5
+#define EIP93_PE_CTRL_PE_EXT_ERR_SPI 0x4
+#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO 0x3
+#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP 0x2
+#define EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER 0x1
+#define EIP93_PE_CTRL_PE_EXT_ERR_BUS 0x0
+#define EIP93_PE_CTRL_PE_EXT_ERR BIT(19)
+#define EIP93_PE_CTRL_PE_SEQNUM_ERR BIT(18)
+#define EIP93_PE_CTRL_PE_PAD_ERR BIT(17)
+#define EIP93_PE_CTRL_PE_AUTH_ERR BIT(16)
+#define EIP93_PE_CTRL_PE_PAD_VALUE GENMASK(15, 8)
+#define EIP93_PE_CTRL_PE_PRNG_MODE GENMASK(7, 6)
+#define EIP93_PE_CTRL_PE_HASH_FINAL BIT(4)
+#define EIP93_PE_CTRL_PE_INIT_ARC4 BIT(3)
+#define EIP93_PE_CTRL_PE_READY_DES_TRING_OWN GENMASK(1, 0)
+#define EIP93_PE_CTRL_PE_READY 0x2
+#define EIP93_PE_CTRL_HOST_READY 0x1
+#define EIP93_REG_PE_SOURCE_ADDR 0x4
+#define EIP93_REG_PE_DEST_ADDR 0x8
+#define EIP93_REG_PE_SA_ADDR 0xc
+#define EIP93_REG_PE_ADDR 0x10 /* STATE_ADDR */
+/*
+ * Special implementation for user ID
+ * user_id in eip93_descriptor is used to identify the
+ * descriptor and is opaque and can be used by the driver
+ * in custom way.
+ *
+ * The usage of this should be to put an address to the crypto
+ * request struct from the kernel but this can't work in 64bit
+ * world.
+ *
+ * Also it's required to put some flags to identify the last
+ * descriptor.
+ *
+ * To handle this, split the u32 in 2 part:
+ * - 31:16 descriptor flags
+ * - 15:0 IDR to connect the crypto request address
+ */
+#define EIP93_REG_PE_USER_ID 0x18
+#define EIP93_PE_USER_ID_DESC_FLAGS GENMASK(31, 16)
+#define EIP93_PE_USER_ID_CRYPTO_IDR GENMASK(15, 0)
+#define EIP93_REG_PE_LENGTH 0x1c
+#define EIP93_PE_LENGTH_BYPASS GENMASK(31, 24)
+#define EIP93_PE_LENGTH_HOST_PE_READY GENMASK(23, 22)
+#define EIP93_PE_LENGTH_PE_READY 0x2
+#define EIP93_PE_LENGTH_HOST_READY 0x1
+#define EIP93_PE_LENGTH_LENGTH GENMASK(19, 0)
+
+/* PACKET ENGINE RING configuration registers */
+#define EIP93_REG_PE_CDR_BASE 0x80
+#define EIP93_REG_PE_RDR_BASE 0x84
+#define EIP93_REG_PE_RING_CONFIG 0x88
+#define EIP93_PE_EN_EXT_TRIG BIT(31)
+/* Absent in later revision of eip93 */
+/* #define EIP93_PE_RING_OFFSET GENMASK(23, 15) */
+#define EIP93_PE_RING_SIZE GENMASK(9, 0)
+#define EIP93_REG_PE_RING_THRESH 0x8c
+#define EIPR93_PE_TIMEROUT_EN BIT(31)
+#define EIPR93_PE_RD_TIMEOUT GENMASK(29, 26)
+#define EIPR93_PE_RDR_THRESH GENMASK(25, 16)
+#define EIPR93_PE_CDR_THRESH GENMASK(9, 0)
+#define EIP93_REG_PE_CD_COUNT 0x90
+#define EIP93_PE_CD_COUNT GENMASK(10, 0)
+/*
+ * In the same register, writing a value in GENMASK(7, 0) will
+ * increment the descriptor count and start DMA action.
+ */
+#define EIP93_PE_CD_COUNT_INCR GENMASK(7, 0)
+#define EIP93_REG_PE_RD_COUNT 0x94
+#define EIP93_PE_RD_COUNT GENMASK(10, 0)
+/*
+ * In the same register, writing a value in GENMASK(7, 0) will
+ * increment the descriptor count and start DMA action.
+ */
+#define EIP93_PE_RD_COUNT_INCR GENMASK(7, 0)
+#define EIP93_REG_PE_RING_RW_PNTR 0x98 /* RING_PNTR */
+
+/* PACKET ENGINE configuration registers */
+#define EIP93_REG_PE_CONFIG 0x100
+#define EIP93_PE_CONFIG_SWAP_TARGET BIT(20)
+#define EIP93_PE_CONFIG_SWAP_DATA BIT(18)
+#define EIP93_PE_CONFIG_SWAP_SA BIT(17)
+#define EIP93_PE_CONFIG_SWAP_CDRD BIT(16)
+#define EIP93_PE_CONFIG_EN_CDR_UPDATE BIT(10)
+#define EIP93_PE_CONFIG_PE_MODE GENMASK(9, 8)
+#define EIP93_PE_TARGET_AUTO_RING_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x3)
+#define EIP93_PE_TARGET_COMMAND_NO_RDR_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x2)
+#define EIP93_PE_TARGET_COMMAND_WITH_RDR_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x1)
+#define EIP93_PE_DIRECT_HOST_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x0)
+#define EIP93_PE_CONFIG_RST_RING BIT(2)
+#define EIP93_PE_CONFIG_RST_PE BIT(0)
+#define EIP93_REG_PE_STATUS 0x104
+#define EIP93_REG_PE_BUF_THRESH 0x10c
+#define EIP93_PE_OUTBUF_THRESH GENMASK(23, 16)
+#define EIP93_PE_INBUF_THRESH GENMASK(7, 0)
+#define EIP93_REG_PE_INBUF_COUNT 0x100
+#define EIP93_REG_PE_OUTBUF_COUNT 0x114
+#define EIP93_REG_PE_BUF_RW_PNTR 0x118 /* BUF_PNTR */
+
+/* PACKET ENGINE endian config */
+#define EIP93_REG_PE_ENDIAN_CONFIG 0x1cc
+#define EIP93_AIROHA_REG_PE_ENDIAN_CONFIG 0x1d0
+#define EIP93_PE_ENDIAN_TARGET_BYTE_SWAP GENMASK(23, 16)
+#define EIP93_PE_ENDIAN_MASTER_BYTE_SWAP GENMASK(7, 0)
+/*
+ * Byte goes 2 and 2 and are referenced by ID
+ * Split GENMASK(7, 0) in 4 part, one for each byte.
+ * Example LITTLE ENDIAN: Example BIG ENDIAN
+ * GENMASK(7, 6) 0x3 GENMASK(7, 6) 0x0
+ * GENMASK(5, 4) 0x2 GENMASK(7, 6) 0x1
+ * GENMASK(3, 2) 0x1 GENMASK(3, 2) 0x2
+ * GENMASK(1, 0) 0x0 GENMASK(1, 0) 0x3
+ */
+#define EIP93_PE_ENDIAN_BYTE0 0x0
+#define EIP93_PE_ENDIAN_BYTE1 0x1
+#define EIP93_PE_ENDIAN_BYTE2 0x2
+#define EIP93_PE_ENDIAN_BYTE3 0x3
+
+/* EIP93 CLOCK control registers */
+#define EIP93_REG_PE_CLOCK_CTRL 0x1e8
+#define EIP93_PE_CLOCK_EN_HASH_CLK BIT(4)
+#define EIP93_PE_CLOCK_EN_ARC4_CLK BIT(3)
+#define EIP93_PE_CLOCK_EN_AES_CLK BIT(2)
+#define EIP93_PE_CLOCK_EN_DES_CLK BIT(1)
+#define EIP93_PE_CLOCK_EN_PE_CLK BIT(0)
+
+/* EIP93 Device Option and Revision Register */
+#define EIP93_REG_PE_OPTION_1 0x1f4
+#define EIP93_PE_OPTION_MAC_KEY256 BIT(31)
+#define EIP93_PE_OPTION_MAC_KEY192 BIT(30)
+#define EIP93_PE_OPTION_MAC_KEY128 BIT(29)
+#define EIP93_PE_OPTION_AES_CBC_MAC BIT(28)
+#define EIP93_PE_OPTION_AES_XCBX BIT(23)
+#define EIP93_PE_OPTION_SHA_256 BIT(19)
+#define EIP93_PE_OPTION_SHA_224 BIT(18)
+#define EIP93_PE_OPTION_SHA_1 BIT(17)
+#define EIP93_PE_OPTION_MD5 BIT(16)
+#define EIP93_PE_OPTION_AES_KEY256 BIT(15)
+#define EIP93_PE_OPTION_AES_KEY192 BIT(14)
+#define EIP93_PE_OPTION_AES_KEY128 BIT(13)
+#define EIP93_PE_OPTION_AES BIT(2)
+#define EIP93_PE_OPTION_ARC4 BIT(1)
+#define EIP93_PE_OPTION_TDES BIT(0) /* DES and TDES */
+#define EIP93_REG_PE_OPTION_0 0x1f8
+#define EIP93_REG_PE_REVISION 0x1fc
+#define EIP93_PE_REVISION_MAJ_HW_REV GENMASK(27, 24)
+#define EIP93_PE_REVISION_MIN_HW_REV GENMASK(23, 20)
+#define EIP93_PE_REVISION_HW_PATCH GENMASK(19, 16)
+#define EIP93_PE_REVISION_EIP_NO GENMASK(7, 0)
+
+/* EIP93 Interrupt Control Register */
+#define EIP93_REG_INT_UNMASK_STAT 0x200
+#define EIP93_REG_INT_MASK_STAT 0x204
+#define EIP93_REG_INT_CLR 0x204
+#define EIP93_REG_INT_MASK 0x208 /* INT_EN */
+/* Each int reg have the same bitmap */
+#define EIP93_INT_INTERFACE_ERR BIT(18)
+#define EIP93_INT_RPOC_ERR BIT(17)
+#define EIP93_INT_PE_RING_ERR BIT(16)
+#define EIP93_INT_HALT BIT(15)
+#define EIP93_INT_OUTBUF_THRESH BIT(11)
+#define EIP93_INT_INBUF_THRESH BIT(10)
+#define EIP93_INT_OPERATION_DONE BIT(9)
+#define EIP93_INT_RDR_THRESH BIT(1)
+#define EIP93_INT_CDR_THRESH BIT(0)
+#define EIP93_INT_ALL (EIP93_INT_INTERFACE_ERR | \
+ EIP93_INT_RPOC_ERR | \
+ EIP93_INT_PE_RING_ERR | \
+ EIP93_INT_HALT | \
+ EIP93_INT_OUTBUF_THRESH | \
+ EIP93_INT_INBUF_THRESH | \
+ EIP93_INT_OPERATION_DONE | \
+ EIP93_INT_RDR_THRESH | \
+ EIP93_INT_CDR_THRESH)
+
+#define EIP93_REG_INT_CFG 0x20c
+#define EIP93_INT_TYPE_PULSE BIT(0)
+#define EIP93_REG_MASK_ENABLE 0x210
+#define EIP93_REG_MASK_DISABLE 0x214
+
+/* EIP93 SA Record register */
+#define EIP93_REG_SA_CMD_0 0x400
+#define EIP93_SA_CMD_SAVE_HASH BIT(29)
+#define EIP93_SA_CMD_SAVE_IV BIT(28)
+#define EIP93_SA_CMD_HASH_SOURCE GENMASK(27, 26)
+#define EIP93_SA_CMD_HASH_NO_LOAD FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x3)
+#define EIP93_SA_CMD_HASH_FROM_STATE FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x2)
+#define EIP93_SA_CMD_HASH_FROM_SA FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x0)
+#define EIP93_SA_CMD_IV_SOURCE GENMASK(25, 24)
+#define EIP93_SA_CMD_IV_FROM_PRNG FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x3)
+#define EIP93_SA_CMD_IV_FROM_STATE FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x2)
+#define EIP93_SA_CMD_IV_FROM_INPUT FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x1)
+#define EIP93_SA_CMD_IV_NO_LOAD FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x0)
+#define EIP93_SA_CMD_DIGEST_LENGTH GENMASK(23, 20)
+#define EIP93_SA_CMD_DIGEST_10WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0xa) /* SRTP and TLS */
+#define EIP93_SA_CMD_DIGEST_8WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x8) /* SHA-256 */
+#define EIP93_SA_CMD_DIGEST_7WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x7) /* SHA-224 */
+#define EIP93_SA_CMD_DIGEST_6WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x6)
+#define EIP93_SA_CMD_DIGEST_5WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x5) /* SHA1 */
+#define EIP93_SA_CMD_DIGEST_4WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x4) /* MD5 and AES-based */
+#define EIP93_SA_CMD_DIGEST_3WORD_IPSEC FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x3) /* IPSEC */
+#define EIP93_SA_CMD_DIGEST_2WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x2)
+#define EIP93_SA_CMD_DIGEST_1WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x1)
+#define EIP93_SA_CMD_DIGEST_3WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x0) /* 96bit output */
+#define EIP93_SA_CMD_HDR_PROC BIT(19)
+#define EIP93_SA_CMD_EXT_PAD BIT(18)
+#define EIP93_SA_CMD_SCPAD BIT(17)
+#define EIP93_SA_CMD_HASH GENMASK(15, 12)
+#define EIP93_SA_CMD_HASH_NULL FIELD_PREP(EIP93_SA_CMD_HASH, 0xf)
+#define EIP93_SA_CMD_HASH_SHA256 FIELD_PREP(EIP93_SA_CMD_HASH, 0x3)
+#define EIP93_SA_CMD_HASH_SHA224 FIELD_PREP(EIP93_SA_CMD_HASH, 0x2)
+#define EIP93_SA_CMD_HASH_SHA1 FIELD_PREP(EIP93_SA_CMD_HASH, 0x1)
+#define EIP93_SA_CMD_HASH_MD5 FIELD_PREP(EIP93_SA_CMD_HASH, 0x0)
+#define EIP93_SA_CMD_CIPHER GENMASK(11, 8)
+#define EIP93_SA_CMD_CIPHER_NULL FIELD_PREP(EIP93_SA_CMD_CIPHER, 0xf)
+#define EIP93_SA_CMD_CIPHER_AES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x3)
+#define EIP93_SA_CMD_CIPHER_ARC4 FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x2)
+#define EIP93_SA_CMD_CIPHER_3DES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x1)
+#define EIP93_SA_CMD_CIPHER_DES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x0)
+#define EIP93_SA_CMD_PAD_TYPE GENMASK(7, 6)
+#define EIP93_SA_CMD_PAD_CONST_SSL FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x6)
+#define EIP93_SA_CMD_PAD_TLS_DTLS FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x5)
+#define EIP93_SA_CMD_PAD_ZERO FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x3)
+#define EIP93_SA_CMD_PAD_CONST FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x2)
+#define EIP93_SA_CMD_PAD_PKCS7 FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x1)
+#define EIP93_SA_CMD_PAD_IPSEC FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x0)
+#define EIP93_SA_CMD_OPGROUP GENMASK(5, 4)
+#define EIP93_SA_CMD_OP_EXT FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x2)
+#define EIP93_SA_CMD_OP_PROTOCOL FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x1)
+#define EIP93_SA_CMD_OP_BASIC FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x0)
+#define EIP93_SA_CMD_DIRECTION_IN BIT(3) /* 0: outbount 1: inbound */
+#define EIP93_SA_CMD_OPCODE GENMASK(2, 0)
+#define EIP93_SA_CMD_OPCODE_BASIC_OUT_PRNG 0x7
+#define EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH 0x3
+#define EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH 0x1
+#define EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC 0x0
+#define EIP93_SA_CMD_OPCODE_BASIC_IN_HASH 0x3
+#define EIP93_SA_CMD_OPCODE_BASIC_IN_HASH_DEC 0x1
+#define EIP93_SA_CMD_OPCODE_BASIC_IN_DEC 0x0
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_ESP 0x0
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SSL 0x4
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_TLS 0x5
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SRTP 0x7
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_ESP 0x0
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SSL 0x2
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_TLS 0x3
+#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SRTP 0x7
+#define EIP93_SA_CMD_OPCODE_EXT_OUT_DTSL 0x1
+#define EIP93_SA_CMD_OPCODE_EXT_OUT_SSL 0x4
+#define EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV10 0x5
+#define EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV11 0x6
+#define EIP93_SA_CMD_OPCODE_EXT_IN_DTSL 0x1
+#define EIP93_SA_CMD_OPCODE_EXT_IN_SSL 0x4
+#define EIP93_SA_CMD_OPCODE_EXT_IN_TLSV10 0x5
+#define EIP93_SA_CMD_OPCODE_EXT_IN_TLSV11 0x6
+#define EIP93_REG_SA_CMD_1 0x404
+#define EIP93_SA_CMD_EN_SEQNUM_CHK BIT(29)
+/* This mask can be either used for ARC4 or AES */
+#define EIP93_SA_CMD_ARC4_KEY_LENGHT GENMASK(28, 24)
+#define EIP93_SA_CMD_AES_DEC_KEY BIT(28) /* 0: encrypt key 1: decrypt key */
+#define EIP93_SA_CMD_AES_KEY_LENGTH GENMASK(26, 24)
+#define EIP93_SA_CMD_AES_KEY_256BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x4)
+#define EIP93_SA_CMD_AES_KEY_192BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x3)
+#define EIP93_SA_CMD_AES_KEY_128BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x2)
+#define EIP93_SA_CMD_HASH_CRYPT_OFFSET GENMASK(23, 16)
+#define EIP93_SA_CMD_BYTE_OFFSET BIT(13) /* 0: CRYPT_OFFSET in 32bit word 1: CRYPT_OFFSET in 8bit bytes */
+#define EIP93_SA_CMD_HMAC BIT(12)
+#define EIP93_SA_CMD_SSL_MAC BIT(12)
+/* This mask can be either used for ARC4 or AES */
+#define EIP93_SA_CMD_CHIPER_MODE GENMASK(9, 8)
+/* AES or DES operations */
+#define EIP93_SA_CMD_CHIPER_MODE_ICM FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x3)
+#define EIP93_SA_CMD_CHIPER_MODE_CTR FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x2)
+#define EIP93_SA_CMD_CHIPER_MODE_CBC FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1)
+#define EIP93_SA_CMD_CHIPER_MODE_ECB FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0)
+/* ARC4 operations */
+#define EIP93_SA_CMD_CHIPER_MODE_STATEFULL FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1)
+#define EIP93_SA_CMD_CHIPER_MODE_STATELESS FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0)
+#define EIP93_SA_CMD_COPY_PAD BIT(3)
+#define EIP93_SA_CMD_COPY_PAYLOAD BIT(2)
+#define EIP93_SA_CMD_COPY_HEADER BIT(1)
+#define EIP93_SA_CMD_COPY_DIGEST BIT(0) /* With this enabled, COPY_PAD is required */
+
+/* State save register */
+#define EIP93_REG_STATE_IV_0 0x500
+#define EIP93_REG_STATE_IV_1 0x504
+
+#define EIP93_REG_PE_ARC4STATE 0x700
+
+struct sa_record {
+ u32 sa_cmd0_word;
+ u32 sa_cmd1_word;
+ u32 sa_key[8];
+ u8 sa_i_digest[32];
+ u8 sa_o_digest[32];
+ u32 sa_spi;
+ u32 sa_seqnum[2];
+ u32 sa_seqmum_mask[2];
+ u32 sa_nonce;
+} __packed;
+
+struct sa_state {
+ u32 state_iv[4];
+ u32 state_byte_cnt[2];
+ u8 state_i_digest[32];
+} __packed;
+
+struct eip93_descriptor {
+ u32 pe_ctrl_stat_word;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 sa_addr;
+ u32 state_addr;
+ u32 arc4_addr;
+ u32 user_id;
+ u32 pe_length_word;
+} __packed;
+
+#endif
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index c3776b0de51d..09d9589f2d68 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -33,8 +33,6 @@ static unsigned int nr_cpus_per_node;
/* Number of physical cpus sharing each iaa instance */
static unsigned int cpus_per_iaa;
-static struct crypto_comp *deflate_generic_tfm;
-
/* Per-cpu lookup table for balanced wqs */
static struct wq_table_entry __percpu *wq_table;
@@ -1001,17 +999,14 @@ out:
static int deflate_generic_decompress(struct acomp_req *req)
{
- void *src, *dst;
+ ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req));
int ret;
- src = kmap_local_page(sg_page(req->src)) + req->src->offset;
- dst = kmap_local_page(sg_page(req->dst)) + req->dst->offset;
-
- ret = crypto_comp_decompress(deflate_generic_tfm,
- src, req->slen, dst, &req->dlen);
-
- kunmap_local(src);
- kunmap_local(dst);
+ acomp_request_set_callback(fbreq, 0, NULL, NULL);
+ acomp_request_set_params(fbreq, req->src, req->dst, req->slen,
+ req->dlen);
+ ret = crypto_acomp_decompress(fbreq);
+ req->dlen = fbreq->dlen;
update_total_sw_decomp_calls();
@@ -1136,8 +1131,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
dma_addr_t dst_addr, unsigned int *dlen,
- u32 *compression_crc,
- bool disable_async)
+ u32 *compression_crc)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1180,7 +1174,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
desc->src2_size = sizeof(struct aecs_comp_table_record);
desc->completion_addr = idxd_desc->compl_dma;
- if (ctx->use_irq && !disable_async) {
+ if (ctx->use_irq) {
desc->flags |= IDXD_OP_FLAG_RCI;
idxd_desc->crypto.req = req;
@@ -1193,7 +1187,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
- } else if (ctx->async_mode && !disable_async)
+ } else if (ctx->async_mode)
req->base.data = idxd_desc;
dev_dbg(dev, "%s: compression mode %s,"
@@ -1214,7 +1208,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
update_total_comp_calls();
update_wq_comp_calls(wq);
- if (ctx->async_mode && !disable_async) {
+ if (ctx->async_mode) {
ret = -EINPROGRESS;
dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
goto out;
@@ -1234,7 +1228,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
*compression_crc = idxd_desc->iax_completion->crc;
- if (!ctx->async_mode || disable_async)
+ if (!ctx->async_mode)
idxd_free_desc(wq, idxd_desc);
out:
return ret;
@@ -1500,13 +1494,11 @@ static int iaa_comp_acompress(struct acomp_req *req)
struct iaa_compression_ctx *compression_ctx;
struct crypto_tfm *tfm = req->base.tfm;
dma_addr_t src_addr, dst_addr;
- bool disable_async = false;
int nr_sgs, cpu, ret = 0;
struct iaa_wq *iaa_wq;
u32 compression_crc;
struct idxd_wq *wq;
struct device *dev;
- int order = -1;
compression_ctx = crypto_tfm_ctx(tfm);
@@ -1536,21 +1528,6 @@ static int iaa_comp_acompress(struct acomp_req *req)
iaa_wq = idxd_wq_get_private(wq);
- if (!req->dst) {
- gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
-
- /* incompressible data will always be < 2 * slen */
- req->dlen = 2 * req->slen;
- order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
- req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
- if (!req->dst) {
- ret = -ENOMEM;
- order = -1;
- goto out;
- }
- disable_async = true;
- }
-
dev = &wq->idxd->pdev->dev;
nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
@@ -1580,7 +1557,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
req->dst, req->dlen, sg_dma_len(req->dst));
ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
- &req->dlen, &compression_crc, disable_async);
+ &req->dlen, &compression_crc);
if (ret == -EINPROGRESS)
return ret;
@@ -1611,100 +1588,6 @@ err_map_dst:
out:
iaa_wq_put(wq);
- if (order >= 0)
- sgl_free_order(req->dst, order);
-
- return ret;
-}
-
-static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
-{
- gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
- GFP_KERNEL : GFP_ATOMIC;
- struct crypto_tfm *tfm = req->base.tfm;
- dma_addr_t src_addr, dst_addr;
- int nr_sgs, cpu, ret = 0;
- struct iaa_wq *iaa_wq;
- struct device *dev;
- struct idxd_wq *wq;
- int order = -1;
-
- cpu = get_cpu();
- wq = wq_table_next_wq(cpu);
- put_cpu();
- if (!wq) {
- pr_debug("no wq configured for cpu=%d\n", cpu);
- return -ENODEV;
- }
-
- ret = iaa_wq_get(wq);
- if (ret) {
- pr_debug("no wq available for cpu=%d\n", cpu);
- return -ENODEV;
- }
-
- iaa_wq = idxd_wq_get_private(wq);
-
- dev = &wq->idxd->pdev->dev;
-
- nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
- if (nr_sgs <= 0 || nr_sgs > 1) {
- dev_dbg(dev, "couldn't map src sg for iaa device %d,"
- " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
- iaa_wq->wq->id, ret);
- ret = -EIO;
- goto out;
- }
- src_addr = sg_dma_address(req->src);
- dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
- " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
- req->src, req->slen, sg_dma_len(req->src));
-
- req->dlen = 4 * req->slen; /* start with ~avg comp rato */
-alloc_dest:
- order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
- req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
- if (!req->dst) {
- ret = -ENOMEM;
- order = -1;
- goto out;
- }
-
- nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
- if (nr_sgs <= 0 || nr_sgs > 1) {
- dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
- " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
- iaa_wq->wq->id, ret);
- ret = -EIO;
- goto err_map_dst;
- }
-
- dst_addr = sg_dma_address(req->dst);
- dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
- " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
- req->dst, req->dlen, sg_dma_len(req->dst));
- ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
- dst_addr, &req->dlen, true);
- if (ret == -EOVERFLOW) {
- dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
- req->dlen *= 2;
- if (req->dlen > CRYPTO_ACOMP_DST_MAX)
- goto err_map_dst;
- goto alloc_dest;
- }
-
- if (ret != 0)
- dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret);
-
- dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
-err_map_dst:
- dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
-out:
- iaa_wq_put(wq);
-
- if (order >= 0)
- sgl_free_order(req->dst, order);
-
return ret;
}
@@ -1727,9 +1610,6 @@ static int iaa_comp_adecompress(struct acomp_req *req)
return -EINVAL;
}
- if (!req->dst)
- return iaa_comp_adecompress_alloc_dest(req);
-
cpu = get_cpu();
wq = wq_table_next_wq(cpu);
put_cpu();
@@ -1810,19 +1690,10 @@ static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
return 0;
}
-static void dst_free(struct scatterlist *sgl)
-{
- /*
- * Called for req->dst = NULL cases but we free elsewhere
- * using sgl_free_order().
- */
-}
-
static struct acomp_alg iaa_acomp_fixed_deflate = {
.init = iaa_comp_init_fixed,
.compress = iaa_comp_acompress,
.decompress = iaa_comp_adecompress,
- .dst_free = dst_free,
.base = {
.cra_name = "deflate",
.cra_driver_name = "deflate-iaa",
@@ -2022,15 +1893,6 @@ static int __init iaa_crypto_init_module(void)
}
nr_cpus_per_node = nr_cpus / nr_nodes;
- if (crypto_has_comp("deflate-generic", 0, 0))
- deflate_generic_tfm = crypto_alloc_comp("deflate-generic", 0, 0);
-
- if (IS_ERR_OR_NULL(deflate_generic_tfm)) {
- pr_err("IAA could not alloc %s tfm: errcode = %ld\n",
- "deflate-generic", PTR_ERR(deflate_generic_tfm));
- return -ENOMEM;
- }
-
ret = iaa_aecs_init_fixed();
if (ret < 0) {
pr_debug("IAA fixed compression mode init failed\n");
@@ -2072,7 +1934,6 @@ err_verify_attr_create:
err_driver_reg:
iaa_aecs_cleanup_fixed();
err_aecs_init:
- crypto_free_comp(deflate_generic_tfm);
goto out;
}
@@ -2089,7 +1950,6 @@ static void __exit iaa_crypto_cleanup_module(void)
&driver_attr_verify_compress);
idxd_driver_unregister(&iaa_crypto_driver);
iaa_aecs_cleanup_fixed();
- crypto_free_comp(deflate_generic_tfm);
pr_debug("cleaned up\n");
}
diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile
index 45728659fbc4..72b24b1804cf 100644
--- a/drivers/crypto/intel/qat/qat_420xx/Makefile
+++ b/drivers/crypto/intel/qat/qat_420xx/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o
-qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o
+qat_420xx-y := adf_drv.o adf_420xx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 9faef33e54bd..4feeef83f7a3 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -98,7 +98,7 @@ static struct adf_hw_device_class adf_420xx_class = {
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
- u32 me_disable = self->fuses;
+ u32 me_disable = self->fuses[ADF_FUSECTL4];
return ~me_disable & ADF_420XX_ACCELENGINES_MASK;
}
@@ -106,8 +106,7 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
{
switch (adf_get_service_enabled(accel_dev)) {
- case SVC_CY:
- case SVC_CY2:
+ case SVC_SYM_ASYM:
return ARRAY_SIZE(adf_fw_cy_config);
case SVC_DC:
return ARRAY_SIZE(adf_fw_dc_config);
@@ -118,10 +117,8 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
case SVC_ASYM:
return ARRAY_SIZE(adf_fw_asym_config);
case SVC_ASYM_DC:
- case SVC_DC_ASYM:
return ARRAY_SIZE(adf_fw_asym_dc_config);
case SVC_SYM_DC:
- case SVC_DC_SYM:
return ARRAY_SIZE(adf_fw_sym_dc_config);
default:
return 0;
@@ -131,8 +128,7 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
{
switch (adf_get_service_enabled(accel_dev)) {
- case SVC_CY:
- case SVC_CY2:
+ case SVC_SYM_ASYM:
return adf_fw_cy_config;
case SVC_DC:
return adf_fw_dc_config;
@@ -143,10 +139,8 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev
case SVC_ASYM:
return adf_fw_asym_config;
case SVC_ASYM_DC:
- case SVC_DC_ASYM:
return adf_fw_asym_dc_config;
case SVC_SYM_DC:
- case SVC_DC_SYM:
return adf_fw_sym_dc_config;
default:
return NULL;
@@ -266,8 +260,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
}
switch (adf_get_service_enabled(accel_dev)) {
- case SVC_CY:
- case SVC_CY2:
+ case SVC_SYM_ASYM:
return capabilities_sym | capabilities_asym;
case SVC_DC:
return capabilities_dc;
@@ -284,10 +277,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
case SVC_ASYM:
return capabilities_asym;
case SVC_ASYM_DC:
- case SVC_DC_ASYM:
return capabilities_asym | capabilities_dc;
case SVC_SYM_DC:
- case SVC_DC_SYM:
return capabilities_sym | capabilities_dc;
default:
return 0;
@@ -420,6 +411,7 @@ static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask)
dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK;
dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK;
dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK;
+ dev_err_mask->parerr_wat_wcp_mask = ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK;
dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK;
}
@@ -482,6 +474,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_420XX_AE_FREQ;
+ hw_data->services_supported = adf_gen4_services_supported;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
index 9589d60fb281..8084aa0f7f41 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
@@ -79,7 +79,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adf_init_hw_data_420xx(accel_dev->hw_device, ent->device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
- pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses);
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile
index 9ba202079a22..e8480bb80dee 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o
-qat_4xxx-objs := adf_drv.o adf_4xxx_hw_data.o
+qat_4xxx-y := adf_drv.o adf_4xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index bbd92c017c28..4eb6ef99efdd 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -101,7 +101,7 @@ static struct adf_hw_device_class adf_4xxx_class = {
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
- u32 me_disable = self->fuses;
+ u32 me_disable = self->fuses[ADF_FUSECTL4];
return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
}
@@ -178,8 +178,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
}
switch (adf_get_service_enabled(accel_dev)) {
- case SVC_CY:
- case SVC_CY2:
+ case SVC_SYM_ASYM:
return capabilities_sym | capabilities_asym;
case SVC_DC:
return capabilities_dc;
@@ -196,10 +195,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
case SVC_ASYM:
return capabilities_asym;
case SVC_ASYM_DC:
- case SVC_DC_ASYM:
return capabilities_asym | capabilities_dc;
case SVC_SYM_DC:
- case SVC_DC_SYM:
return capabilities_sym | capabilities_dc;
default:
return 0;
@@ -241,8 +238,7 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
{
switch (adf_get_service_enabled(accel_dev)) {
- case SVC_CY:
- case SVC_CY2:
+ case SVC_SYM_ASYM:
return adf_fw_cy_config;
case SVC_DC:
return adf_fw_dc_config;
@@ -253,10 +249,8 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev
case SVC_ASYM:
return adf_fw_asym_config;
case SVC_ASYM_DC:
- case SVC_DC_ASYM:
return adf_fw_asym_dc_config;
case SVC_SYM_DC:
- case SVC_DC_SYM:
return adf_fw_sym_dc_config;
default:
return NULL;
@@ -466,6 +460,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
+ hw_data->services_supported = adf_gen4_services_supported;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index d7de1cad1335..5537a9991e4e 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -81,7 +81,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
- pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses);
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
index 7a06ad519bfc..d9e568572da8 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
-qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
+qat_c3xxx-y := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 201f9412c582..e78f7bfd30b8 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -27,8 +27,8 @@ static struct adf_hw_device_class c3xxx_class = {
static u32 get_accel_mask(struct adf_hw_device_data *self)
{
+ u32 fuses = self->fuses[ADF_FUSECTL0];
u32 straps = self->straps;
- u32 fuses = self->fuses;
u32 accel;
accel = ~(fuses | straps) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET;
@@ -39,8 +39,8 @@ static u32 get_accel_mask(struct adf_hw_device_data *self)
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
+ u32 fuses = self->fuses[ADF_FUSECTL0];
u32 straps = self->straps;
- u32 fuses = self->fuses;
unsigned long disabled;
u32 ae_disable;
int accel;
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index caa53882fda6..b825b35ab4bf 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -126,7 +126,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adf_init_hw_data_c3xxx(accel_dev->hw_device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
- &hw_data->fuses);
+ &hw_data->fuses[ADF_FUSECTL0]);
pci_read_config_dword(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET,
&hw_data->straps);
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
index 7ef633058c4f..31a908a211ac 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
-qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
+qat_c3xxxvf-y := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile
index cc9255b3b198..cbdaaa135e84 100644
--- a/drivers/crypto/intel/qat/qat_c62x/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62x/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
-qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
+qat_c62x-y := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index 6b5b0cf9c7c7..32ebe09477a8 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -27,8 +27,8 @@ static struct adf_hw_device_class c62x_class = {
static u32 get_accel_mask(struct adf_hw_device_data *self)
{
+ u32 fuses = self->fuses[ADF_FUSECTL0];
u32 straps = self->straps;
- u32 fuses = self->fuses;
u32 accel;
accel = ~(fuses | straps) >> ADF_C62X_ACCELERATORS_REG_OFFSET;
@@ -39,8 +39,8 @@ static u32 get_accel_mask(struct adf_hw_device_data *self)
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
+ u32 fuses = self->fuses[ADF_FUSECTL0];
u32 straps = self->straps;
- u32 fuses = self->fuses;
unsigned long disabled;
u32 ae_disable;
int accel;
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index b7398fee19ed..8a7bdec358d6 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -126,7 +126,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adf_init_hw_data_c62x(accel_dev->hw_device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
- &hw_data->fuses);
+ &hw_data->fuses[ADF_FUSECTL0]);
pci_read_config_dword(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET,
&hw_data->straps);
@@ -169,7 +169,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
/* Find and map all the device's BARS */
- i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
+ i = (hw_data->fuses[ADF_FUSECTL0] & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
index 256786662d60..60e499b041ec 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
-qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
+qat_c62xvf-y := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 7acf9c576149..af5df29fd2e3 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -1,62 +1,62 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"'
-intel_qat-objs := adf_cfg.o \
- adf_isr.o \
- adf_ctl_drv.o \
+intel_qat-y := adf_accel_engine.o \
+ adf_admin.o \
+ adf_aer.o \
+ adf_cfg.o \
adf_cfg_services.o \
+ adf_clock.o \
+ adf_ctl_drv.o \
adf_dev_mgr.o \
- adf_init.o \
- adf_accel_engine.o \
- adf_aer.o \
- adf_transport.o \
- adf_admin.o \
- adf_hw_arbiter.o \
- adf_sysfs.o \
- adf_sysfs_ras_counters.o \
+ adf_gen2_config.o \
+ adf_gen2_dc.o \
adf_gen2_hw_csr_data.o \
adf_gen2_hw_data.o \
- adf_gen2_config.o \
adf_gen4_config.o \
+ adf_gen4_dc.o \
adf_gen4_hw_csr_data.o \
adf_gen4_hw_data.o \
- adf_gen4_vf_mig.o \
adf_gen4_pm.o \
- adf_gen2_dc.o \
- adf_gen4_dc.o \
adf_gen4_ras.o \
adf_gen4_timer.o \
- adf_clock.o \
+ adf_gen4_vf_mig.o \
+ adf_hw_arbiter.o \
+ adf_init.o \
+ adf_isr.o \
adf_mstate_mgr.o \
- qat_crypto.o \
- qat_compression.o \
- qat_comp_algs.o \
- qat_algs.o \
- qat_asym_algs.o \
- qat_algs_send.o \
- adf_rl.o \
adf_rl_admin.o \
+ adf_rl.o \
+ adf_sysfs.o \
+ adf_sysfs_ras_counters.o \
adf_sysfs_rl.o \
- qat_uclo.o \
- qat_hal.o \
+ adf_transport.o \
+ qat_algs.o \
+ qat_algs_send.o \
+ qat_asym_algs.o \
qat_bl.o \
- qat_mig_dev.o
+ qat_comp_algs.o \
+ qat_compression.o \
+ qat_crypto.o \
+ qat_hal.o \
+ qat_mig_dev.o \
+ qat_uclo.o
-intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
+intel_qat-$(CONFIG_DEBUG_FS) += adf_cnv_dbgfs.o \
+ adf_dbgfs.o \
adf_fw_counters.o \
- adf_cnv_dbgfs.o \
adf_gen4_pm_debugfs.o \
adf_gen4_tl.o \
- adf_heartbeat.o \
adf_heartbeat_dbgfs.o \
+ adf_heartbeat.o \
adf_pm_dbgfs.o \
adf_telemetry.o \
adf_tl_debugfs.o \
- adf_dbgfs.o
+ adf_transport_debug.o
-intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
+intel_qat-$(CONFIG_PCI_IOV) += adf_gen2_pfvf.o adf_gen4_pfvf.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
- adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
- adf_gen2_pfvf.o adf_gen4_pfvf.o
+ adf_pfvf_utils.o adf_pfvf_vf_msg.o \
+ adf_pfvf_vf_proto.o adf_sriov.o adf_vf_isr.o
intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 7830ecb1a1f1..dc21551153cb 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -10,6 +10,7 @@
#include <linux/ratelimit.h>
#include <linux/types.h>
#include <linux/qat/qat_mig_dev.h>
+#include <linux/wordpart.h>
#include "adf_cfg_common.h"
#include "adf_rl.h"
#include "adf_telemetry.h"
@@ -52,6 +53,16 @@ enum adf_accel_capabilities {
ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
};
+enum adf_fuses {
+ ADF_FUSECTL0,
+ ADF_FUSECTL1,
+ ADF_FUSECTL2,
+ ADF_FUSECTL3,
+ ADF_FUSECTL4,
+ ADF_FUSECTL5,
+ ADF_MAX_FUSES
+};
+
struct adf_bar {
resource_size_t base_addr;
void __iomem *virt_addr;
@@ -333,6 +344,7 @@ struct adf_hw_device_data {
int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*dev_config)(struct adf_accel_dev *accel_dev);
+ bool (*services_supported)(unsigned long mask);
struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
struct adf_dc_ops dc_ops;
@@ -343,7 +355,7 @@ struct adf_hw_device_data {
struct qat_migdev_ops vfmig_ops;
const char *fw_name;
const char *fw_mmp_name;
- u32 fuses;
+ u32 fuses[ADF_MAX_FUSES];
u32 straps;
u32 accel_capabilities_mask;
u32 extended_dc_capabilities;
@@ -370,6 +382,15 @@ struct adf_hw_device_data {
/* CSR write macro */
#define ADF_CSR_WR(csr_base, csr_offset, val) \
__raw_writel(val, csr_base + csr_offset)
+/*
+ * CSR write macro to handle cases where the high and low
+ * offsets are sparsely located.
+ */
+#define ADF_CSR_WR64_LO_HI(csr_base, csr_low_offset, csr_high_offset, val) \
+do { \
+ ADF_CSR_WR(csr_base, csr_low_offset, lower_32_bits(val)); \
+ ADF_CSR_WR(csr_base, csr_high_offset, upper_32_bits(val)); \
+} while (0)
/* CSR read macro */
#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
index 268052294468..30abcd9e1283 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
+#include <linux/array_size.h>
+#include <linux/bitops.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/string.h>
@@ -8,40 +10,165 @@
#include "adf_cfg_services.h"
#include "adf_cfg_strings.h"
-const char *const adf_cfg_services[] = {
- [SVC_CY] = ADF_CFG_CY,
- [SVC_CY2] = ADF_CFG_ASYM_SYM,
+static const char *const adf_cfg_services[] = {
+ [SVC_ASYM] = ADF_CFG_ASYM,
+ [SVC_SYM] = ADF_CFG_SYM,
[SVC_DC] = ADF_CFG_DC,
[SVC_DCC] = ADF_CFG_DCC,
- [SVC_SYM] = ADF_CFG_SYM,
- [SVC_ASYM] = ADF_CFG_ASYM,
- [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
- [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
- [SVC_DC_SYM] = ADF_CFG_DC_SYM,
- [SVC_SYM_DC] = ADF_CFG_SYM_DC,
};
-EXPORT_SYMBOL_GPL(adf_cfg_services);
-int adf_get_service_enabled(struct adf_accel_dev *accel_dev)
+/*
+ * Ensure that the size of the array matches the number of services,
+ * SVC_BASE_COUNT, that is used to size the bitmap.
+ */
+static_assert(ARRAY_SIZE(adf_cfg_services) == SVC_BASE_COUNT);
+
+/*
+ * Ensure that the maximum number of concurrent services that can be
+ * enabled on a device is less than or equal to the number of total
+ * supported services.
+ */
+static_assert(ARRAY_SIZE(adf_cfg_services) >= MAX_NUM_CONCURR_SVC);
+
+/*
+ * Ensure that the number of services fit a single unsigned long, as each
+ * service is represented by a bit in the mask.
+ */
+static_assert(BITS_PER_LONG >= SVC_BASE_COUNT);
+
+/*
+ * Ensure that size of the concatenation of all service strings is smaller
+ * than the size of the buffer that will contain them.
+ */
+static_assert(sizeof(ADF_CFG_SYM ADF_SERVICES_DELIMITER
+ ADF_CFG_ASYM ADF_SERVICES_DELIMITER
+ ADF_CFG_DC ADF_SERVICES_DELIMITER
+ ADF_CFG_DCC) < ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+
+static int adf_service_string_to_mask(struct adf_accel_dev *accel_dev, const char *buf,
+ size_t len, unsigned long *out_mask)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
+ unsigned long mask = 0;
+ char *substr, *token;
+ int id, num_svc = 0;
+
+ if (len > ADF_CFG_MAX_VAL_LEN_IN_BYTES - 1)
+ return -EINVAL;
+
+ strscpy(services, buf, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+ substr = services;
+
+ while ((token = strsep(&substr, ADF_SERVICES_DELIMITER))) {
+ id = sysfs_match_string(adf_cfg_services, token);
+ if (id < 0)
+ return id;
+
+ if (test_and_set_bit(id, &mask))
+ return -EINVAL;
+
+ if (num_svc++ == MAX_NUM_CONCURR_SVC)
+ return -EINVAL;
+ }
+
+ if (hw_data->services_supported && !hw_data->services_supported(mask))
+ return -EINVAL;
+
+ *out_mask = mask;
+
+ return 0;
+}
+
+static int adf_service_mask_to_string(unsigned long mask, char *buf, size_t len)
+{
+ int offset = 0;
+ int bit;
+
+ if (len < ADF_CFG_MAX_VAL_LEN_IN_BYTES)
+ return -ENOSPC;
+
+ for_each_set_bit(bit, &mask, SVC_BASE_COUNT) {
+ if (offset)
+ offset += scnprintf(buf + offset, len - offset,
+ ADF_SERVICES_DELIMITER);
+
+ offset += scnprintf(buf + offset, len - offset, "%s",
+ adf_cfg_services[bit]);
+ }
+
+ return 0;
+}
+
+int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in,
+ size_t in_len, char *out, size_t out_len)
{
- char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ unsigned long mask;
+ int ret;
+
+ ret = adf_service_string_to_mask(accel_dev, in, in_len, &mask);
+ if (ret)
+ return ret;
+
+ if (!mask)
+ return -EINVAL;
+
+ return adf_service_mask_to_string(mask, out, out_len);
+}
+
+static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
+ size_t len;
int ret;
ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_SERVICES_ENABLED, services);
if (ret) {
- dev_err(&GET_DEV(accel_dev),
- ADF_SERVICES_ENABLED " param not found\n");
+ dev_err(&GET_DEV(accel_dev), "%s param not found\n",
+ ADF_SERVICES_ENABLED);
return ret;
}
- ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services),
- services);
- if (ret < 0)
- dev_err(&GET_DEV(accel_dev),
- "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
- services);
+ len = strnlen(services, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+ ret = adf_service_string_to_mask(accel_dev, services, len, mask);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Invalid value of %s param: %s\n",
+ ADF_SERVICES_ENABLED, services);
return ret;
}
+
+int adf_get_service_enabled(struct adf_accel_dev *accel_dev)
+{
+ unsigned long mask;
+ int ret;
+
+ ret = adf_get_service_mask(accel_dev, &mask);
+ if (ret)
+ return ret;
+
+ if (test_bit(SVC_SYM, &mask) && test_bit(SVC_ASYM, &mask))
+ return SVC_SYM_ASYM;
+
+ if (test_bit(SVC_SYM, &mask) && test_bit(SVC_DC, &mask))
+ return SVC_SYM_DC;
+
+ if (test_bit(SVC_ASYM, &mask) && test_bit(SVC_DC, &mask))
+ return SVC_ASYM_DC;
+
+ if (test_bit(SVC_SYM, &mask))
+ return SVC_SYM;
+
+ if (test_bit(SVC_ASYM, &mask))
+ return SVC_ASYM;
+
+ if (test_bit(SVC_DC, &mask))
+ return SVC_DC;
+
+ if (test_bit(SVC_DCC, &mask))
+ return SVC_DCC;
+
+ return -EINVAL;
+}
EXPORT_SYMBOL_GPL(adf_get_service_enabled);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
index c6b0328b0f5b..f6bafc15cbc6 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
@@ -8,21 +8,29 @@
struct adf_accel_dev;
enum adf_services {
- SVC_CY = 0,
- SVC_CY2,
+ SVC_ASYM = 0,
+ SVC_SYM,
SVC_DC,
SVC_DCC,
- SVC_SYM,
- SVC_ASYM,
- SVC_DC_ASYM,
- SVC_ASYM_DC,
- SVC_DC_SYM,
+ SVC_BASE_COUNT
+};
+
+enum adf_composed_services {
+ SVC_SYM_ASYM = SVC_BASE_COUNT,
SVC_SYM_DC,
- SVC_COUNT
+ SVC_ASYM_DC,
+};
+
+enum {
+ ADF_ONE_SERVICE = 1,
+ ADF_TWO_SERVICES,
+ ADF_THREE_SERVICES,
};
-extern const char *const adf_cfg_services[SVC_COUNT];
+#define MAX_NUM_CONCURR_SVC ADF_THREE_SERVICES
+int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in,
+ size_t in_len, char *out, size_t out_len);
int adf_get_service_enabled(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index e015ad6cace2..b79982c4a856 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
@@ -27,13 +27,9 @@
#define ADF_CFG_CY "sym;asym"
#define ADF_CFG_SYM "sym"
#define ADF_CFG_ASYM "asym"
-#define ADF_CFG_ASYM_SYM "asym;sym"
-#define ADF_CFG_ASYM_DC "asym;dc"
-#define ADF_CFG_DC_ASYM "dc;asym"
-#define ADF_CFG_SYM_DC "sym;dc"
-#define ADF_CFG_DC_SYM "dc;sym"
#define ADF_CFG_DCC "dcc"
#define ADF_SERVICES_ENABLED "ServicesEnabled"
+#define ADF_SERVICES_DELIMITER ";"
#define ADF_PM_IDLE_SUPPORT "PmIdleSupport"
#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
index 1f64bf49b221..2b263442c856 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
@@ -115,8 +115,8 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+ u32 fuses = hw_data->fuses[ADF_FUSECTL0];
u32 straps = hw_data->straps;
- u32 fuses = hw_data->fuses;
u32 legfuses;
u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
index fe1f3d727dc5..f97e7a880f3a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
@@ -213,7 +213,6 @@ static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
*/
int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
{
- char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
int ret;
ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
@@ -224,18 +223,8 @@ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
- ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED, services);
- if (ret)
- goto err;
-
- ret = sysfs_match_string(adf_cfg_services, services);
- if (ret < 0)
- goto err;
-
- switch (ret) {
- case SVC_CY:
- case SVC_CY2:
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_SYM_ASYM:
ret = adf_crypto_dev_config(accel_dev);
break;
case SVC_DC:
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 41a0979e68c1..099949a2421c 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
+#include <linux/bitops.h>
#include <linux/iopoll.h>
#include <asm/div64.h>
#include "adf_accel_devices.h"
@@ -134,36 +135,18 @@ int adf_gen4_init_device(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_gen4_init_device);
-static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
- u32 *lower)
-{
- *lower = lower_32_bits(value);
- *upper = upper_32_bits(value);
-}
-
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
{
void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
- u32 ssm_wdt_pke_high = 0;
- u32 ssm_wdt_pke_low = 0;
- u32 ssm_wdt_high = 0;
- u32 ssm_wdt_low = 0;
- /* Convert 64bit WDT timer value into 32bit values for
- * mmio write to 32bit CSRs.
- */
- adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
- adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
- &ssm_wdt_pke_low);
-
- /* Enable WDT for sym and dc */
- ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
- ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
- /* Enable WDT for pke */
- ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
- ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
+ /* Enable watchdog timer for sym and dc */
+ ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTL_OFFSET, ADF_SSMWDTH_OFFSET, timer_val);
+
+ /* Enable watchdog timer for pke */
+ ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET,
+ timer_val_pke);
}
EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
@@ -265,18 +248,29 @@ static bool is_single_service(int service_id)
case SVC_SYM:
case SVC_ASYM:
return true;
- case SVC_CY:
- case SVC_CY2:
- case SVC_DCC:
- case SVC_ASYM_DC:
- case SVC_DC_ASYM:
- case SVC_SYM_DC:
- case SVC_DC_SYM:
default:
return false;
}
}
+bool adf_gen4_services_supported(unsigned long mask)
+{
+ unsigned long num_svc = hweight_long(mask);
+
+ if (mask >= BIT(SVC_BASE_COUNT))
+ return false;
+
+ switch (num_svc) {
+ case ADF_ONE_SERVICE:
+ return true;
+ case ADF_TWO_SERVICES:
+ return !test_bit(SVC_DCC, &mask);
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen4_services_supported);
+
int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index e8c53bd76f1b..51fc2eaa263e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -179,5 +179,6 @@ int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
struct bank_state *state);
int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
u32 bank_number, struct bank_state *state);
+bool adf_gen4_services_supported(unsigned long service_mask);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
index 2dd3772bf58a..0f7f00a19e7d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
@@ -695,7 +695,7 @@ static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev,
if (err_mask->parerr_wat_wcp_mask)
adf_poll_slicehang_csr(accel_dev, csr,
ADF_GEN4_SLICEHANGSTATUS_WAT_WCP,
- "ath_cph");
+ "wat_wcp");
return false;
}
@@ -1043,63 +1043,16 @@ static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev,
return reset_required;
}
-static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev,
+static void adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev,
void __iomem *csr, u32 iastatssm)
{
- struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
- u32 reg;
-
if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT))
- return false;
-
- reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC);
- reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT;
- if (reg) {
- ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
- ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg);
- }
-
- reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH);
- reg &= err_mask->parerr_ath_cph_mask;
- if (reg) {
- ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
- ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg);
- }
-
- reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT);
- reg &= err_mask->parerr_cpr_xlt_mask;
- if (reg) {
- ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
- ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg);
- }
-
- reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS);
- reg &= err_mask->parerr_dcpr_ucs_mask;
- if (reg) {
- ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
- ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg);
- }
-
- reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE);
- reg &= err_mask->parerr_pke_mask;
- if (reg) {
- ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
- ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg);
- }
-
- if (err_mask->parerr_wat_wcp_mask) {
- reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP);
- reg &= err_mask->parerr_wat_wcp_mask;
- if (reg) {
- ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
- ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP,
- reg);
- }
- }
+ return;
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported");
- return false;
+ return;
}
static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev,
@@ -1171,8 +1124,8 @@ static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev,
reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm);
reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm);
reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm);
- reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm);
reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm);
+ adf_handle_rf_parr_err(accel_dev, csr, iastatssm);
ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index 4fcd61ff70d1..6c39194647f0 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -3,6 +3,7 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/pci.h>
+#include <linux/string_choices.h>
#include "adf_accel_devices.h"
#include "adf_cfg.h"
#include "adf_cfg_services.h"
@@ -19,14 +20,12 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adf_accel_dev *accel_dev;
- char *state;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
- state = adf_dev_started(accel_dev) ? "up" : "down";
- return sysfs_emit(buf, "%s\n", state);
+ return sysfs_emit(buf, "%s\n", str_up_down(adf_dev_started(accel_dev)));
}
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
@@ -117,25 +116,27 @@ static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev,
static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
struct adf_hw_device_data *hw_data;
struct adf_accel_dev *accel_dev;
int ret;
- ret = sysfs_match_string(adf_cfg_services, buf);
- if (ret < 0)
- return ret;
-
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
+ ret = adf_parse_service_string(accel_dev, buf, count, services,
+ ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+ if (ret)
+ return ret;
+
if (adf_dev_started(accel_dev)) {
dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n",
accel_dev->accel_id);
return -EINVAL;
}
- ret = adf_sysfs_update_dev_config(accel_dev, adf_cfg_services[ret]);
+ ret = adf_sysfs_update_dev_config(accel_dev, services);
if (ret < 0)
return ret;
@@ -207,16 +208,13 @@ static DEVICE_ATTR_RW(pm_idle_enabled);
static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- char *auto_reset;
struct adf_accel_dev *accel_dev;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
- auto_reset = accel_dev->autoreset_on_error ? "on" : "off";
-
- return sysfs_emit(buf, "%s\n", auto_reset);
+ return sysfs_emit(buf, "%s\n", str_on_off(accel_dev->autoreset_on_error));
}
static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
index a03d43fef2b3..04f645957e28 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
@@ -16,8 +16,8 @@ enum icp_qat_fw_comp_20_cmd_id {
ICP_QAT_FW_COMP_20_CMD_LZ4_DECOMPRESS = 4,
ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS = 5,
ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6,
- ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7,
- ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8,
+ ICP_QAT_FW_COMP_20_CMD_RESERVED_7 = 7,
+ ICP_QAT_FW_COMP_20_CMD_RESERVED_8 = 8,
ICP_QAT_FW_COMP_20_CMD_RESERVED_9 = 9,
ICP_QAT_FW_COMP_23_CMD_ZSTD_COMPRESS = 10,
ICP_QAT_FW_COMP_23_CMD_ZSTD_DECOMPRESS = 11,
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
index e28241bdd0f4..1c7bcd8e4055 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
@@ -43,7 +43,6 @@
#define ICP_QAT_SUOF_OBJS "SUF_OBJS"
#define ICP_QAT_SUOF_IMAG "SUF_IMAG"
#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long))
-#define ICP_QAT_SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long))
#define DSS_FWSK_MODULUS_LEN 384 /* RSA3K */
#define DSS_FWSK_EXPONENT_LEN 4
@@ -75,13 +74,6 @@
DSS_SIGNATURE_LEN : \
CSS_SIGNATURE_LEN)
-#define ICP_QAT_CSS_AE_IMG_LEN (sizeof(struct icp_qat_simg_ae_mode) + \
- ICP_QAT_SIMG_AE_INIT_SEQ_LEN + \
- ICP_QAT_SIMG_AE_INSTS_LEN)
-#define ICP_QAT_CSS_AE_SIMG_LEN(handle) (sizeof(struct icp_qat_css_hdr) + \
- ICP_QAT_CSS_FWSK_PUB_LEN(handle) + \
- ICP_QAT_CSS_SIGNATURE_LEN(handle) + \
- ICP_QAT_CSS_AE_IMG_LEN)
#define ICP_QAT_AE_IMG_OFFSET(handle) (sizeof(struct icp_qat_css_hdr) + \
ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
@@ -404,8 +396,6 @@ struct icp_qat_suof_img_hdr {
char *simg_buf;
unsigned long simg_len;
char *css_header;
- char *css_key;
- char *css_signature;
char *css_simg;
unsigned long simg_size;
unsigned int ae_num;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
index 338acf29c487..5e4dad4693ca 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c
@@ -251,162 +251,3 @@ int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
extra_dst_buff, sz_extra_dst_buff,
sskip, dskip, flags);
}
-
-static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
- struct qat_alg_buf_list *bl)
-{
- struct device *dev = &GET_DEV(accel_dev);
- int n = bl->num_bufs;
- int i;
-
- for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, bl->buffers[i].addr))
- dma_unmap_single(dev, bl->buffers[i].addr,
- bl->buffers[i].len, DMA_FROM_DEVICE);
-}
-
-static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
- struct scatterlist *sgl,
- struct qat_alg_buf_list **bl)
-{
- struct device *dev = &GET_DEV(accel_dev);
- struct qat_alg_buf_list *bufl;
- int node = dev_to_node(dev);
- struct scatterlist *sg;
- int n, i, sg_nctr;
- size_t sz;
-
- n = sg_nents(sgl);
- sz = struct_size(bufl, buffers, n);
- bufl = kzalloc_node(sz, GFP_KERNEL, node);
- if (unlikely(!bufl))
- return -ENOMEM;
-
- for (i = 0; i < n; i++)
- bufl->buffers[i].addr = DMA_MAPPING_ERROR;
-
- sg_nctr = 0;
- for_each_sg(sgl, sg, n, i) {
- int y = sg_nctr;
-
- if (!sg->length)
- continue;
-
- bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
- sg->length,
- DMA_FROM_DEVICE);
- bufl->buffers[y].len = sg->length;
- if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
- goto err_map;
- sg_nctr++;
- }
- bufl->num_bufs = sg_nctr;
- bufl->num_mapped_bufs = sg_nctr;
-
- *bl = bufl;
-
- return 0;
-
-err_map:
- for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, bufl->buffers[i].addr))
- dma_unmap_single(dev, bufl->buffers[i].addr,
- bufl->buffers[i].len,
- DMA_FROM_DEVICE);
- kfree(bufl);
- *bl = NULL;
-
- return -ENOMEM;
-}
-
-static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
- struct scatterlist *sgl,
- struct qat_alg_buf_list *bl,
- bool free_bl)
-{
- if (bl) {
- qat_bl_sgl_unmap(accel_dev, bl);
-
- if (free_bl)
- kfree(bl);
- }
- if (sgl)
- sgl_free(sgl);
-}
-
-static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
- struct scatterlist **sgl,
- struct qat_alg_buf_list **bl,
- unsigned int dlen,
- gfp_t gfp)
-{
- struct scatterlist *dst;
- int ret;
-
- dst = sgl_alloc(dlen, gfp, NULL);
- if (!dst) {
- dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
- return -ENOMEM;
- }
-
- ret = qat_bl_sgl_map(accel_dev, dst, bl);
- if (ret)
- goto err;
-
- *sgl = dst;
-
- return 0;
-
-err:
- sgl_free(dst);
- *sgl = NULL;
- return ret;
-}
-
-int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
- struct scatterlist **sg,
- unsigned int dlen,
- struct qat_request_buffs *qat_bufs,
- gfp_t gfp)
-{
- struct device *dev = &GET_DEV(accel_dev);
- dma_addr_t new_blp = DMA_MAPPING_ERROR;
- struct qat_alg_buf_list *new_bl;
- struct scatterlist *new_sg;
- size_t new_bl_size;
- int ret;
-
- ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
- if (ret)
- return ret;
-
- new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
-
- /* Map new firmware SGL descriptor */
- new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, new_blp)))
- goto err;
-
- /* Unmap old firmware SGL descriptor */
- dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
-
- /* Free and unmap old scatterlist */
- qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
- !qat_bufs->sgl_dst_valid);
-
- qat_bufs->sgl_dst_valid = false;
- qat_bufs->blout = new_bl;
- qat_bufs->bloutp = new_blp;
- qat_bufs->sz_out = new_bl_size;
-
- *sg = new_sg;
-
- return 0;
-err:
- qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
-
- if (!dma_mapping_error(dev, new_blp))
- dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);
-
- return -ENOMEM;
-}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h
index 3f5b79015400..2827d5055d3c 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h
@@ -65,10 +65,4 @@ static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
}
-int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
- struct scatterlist **newd,
- unsigned int dlen,
- struct qat_request_buffs *qat_bufs,
- gfp_t gfp);
-
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
index 2ba4aa22e092..a6e02405d402 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
@@ -29,11 +29,6 @@ struct qat_compression_ctx {
int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
};
-struct qat_dst {
- bool is_null;
- int resubmitted;
-};
-
struct qat_compression_req {
u8 req[QAT_COMP_REQ_SIZE];
struct qat_compression_ctx *qat_compression_ctx;
@@ -42,8 +37,6 @@ struct qat_compression_req {
enum direction dir;
int actual_dlen;
struct qat_alg_req alg_req;
- struct work_struct resubmit;
- struct qat_dst dst;
};
static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
@@ -60,46 +53,6 @@ static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
return qat_alg_send_message(alg_req);
}
-static void qat_comp_resubmit(struct work_struct *work)
-{
- struct qat_compression_req *qat_req =
- container_of(work, struct qat_compression_req, resubmit);
- struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
- struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
- struct qat_request_buffs *qat_bufs = &qat_req->buf;
- struct qat_compression_instance *inst = ctx->inst;
- struct acomp_req *areq = qat_req->acompress_req;
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
- unsigned int dlen = CRYPTO_ACOMP_DST_MAX;
- u8 *req = qat_req->req;
- dma_addr_t dfbuf;
- int ret;
-
- areq->dlen = dlen;
-
- dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n",
- crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
- qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen);
-
- ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs,
- qat_algs_alloc_flags(&areq->base));
- if (ret)
- goto err;
-
- qat_req->dst.resubmitted = true;
-
- dfbuf = qat_req->buf.bloutp;
- qat_comp_override_dst(req, dfbuf, dlen);
-
- ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
- if (ret != -ENOSPC)
- return;
-
-err:
- qat_bl_free_bufl(accel_dev, qat_bufs);
- acomp_request_complete(areq, ret);
-}
-
static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
void *resp)
{
@@ -131,21 +84,6 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
areq->dlen = 0;
- if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) {
- if (cmp_err == ERR_CODE_OVERFLOW_ERROR) {
- if (qat_req->dst.resubmitted) {
- dev_dbg(&GET_DEV(accel_dev),
- "Output does not fit destination buffer\n");
- res = -EOVERFLOW;
- goto end;
- }
-
- INIT_WORK(&qat_req->resubmit, qat_comp_resubmit);
- adf_misc_wq_queue_work(&qat_req->resubmit);
- return;
- }
- }
-
if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
goto end;
@@ -245,29 +183,9 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum directi
if (!areq->src || !slen)
return -EINVAL;
- if (areq->dst && !dlen)
+ if (!areq->dst || !dlen)
return -EINVAL;
- qat_req->dst.is_null = false;
-
- /* Handle acomp requests that require the allocation of a destination
- * buffer. The size of the destination buffer is double the source
- * buffer (rounded up to the size of a page) to fit the decompressed
- * output or an expansion on the data for compression.
- */
- if (!areq->dst) {
- qat_req->dst.is_null = true;
-
- dlen = round_up(2 * slen, PAGE_SIZE);
- areq->dst = sgl_alloc(dlen, f, NULL);
- if (!areq->dst)
- return -ENOMEM;
-
- dlen -= dhdr + dftr;
- areq->dlen = dlen;
- qat_req->dst.resubmitted = false;
- }
-
if (dir == COMPRESSION) {
params.extra_dst_buff = inst->dc_data->ovf_buff_p;
ovf_buff_sz = inst->dc_data->ovf_buff_sz;
@@ -329,7 +247,6 @@ static struct acomp_alg qat_acomp[] = { {
.exit = qat_comp_alg_exit_tfm,
.compress = qat_comp_alg_compress,
.decompress = qat_comp_alg_decompress,
- .dst_free = sgl_free,
.reqsize = sizeof(struct qat_compression_req),
}};
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
index 404e32c5e778..18a1f33a6db9 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
@@ -25,16 +25,6 @@ static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen,
req_pars->out_buffer_sz = dlen;
}
-static inline void qat_comp_override_dst(void *req, u64 dst, u32 dlen)
-{
- struct icp_qat_fw_comp_req *fw_req = req;
- struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
-
- fw_req->comn_mid.dest_data_addr = dst;
- fw_req->comn_mid.dst_length = dlen;
- req_pars->out_buffer_sz = dlen;
-}
-
static inline void qat_comp_create_compression_req(void *ctx, void *req,
u64 src, u32 slen,
u64 dst, u32 dlen,
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 7ea40b4f6e5b..7678a93c6853 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/align.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
@@ -1064,6 +1065,7 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
{
struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+ unsigned int offset = ICP_QAT_AE_IMG_OFFSET(handle);
struct icp_qat_simg_ae_mode *ae_mode;
struct icp_qat_suof_objhdr *suof_objhdr;
@@ -1075,13 +1077,7 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
suof_chunk_hdr->offset))->img_length;
suof_img_hdr->css_header = suof_img_hdr->simg_buf;
- suof_img_hdr->css_key = (suof_img_hdr->css_header +
- sizeof(struct icp_qat_css_hdr));
- suof_img_hdr->css_signature = suof_img_hdr->css_key +
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
- ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
- suof_img_hdr->css_simg = suof_img_hdr->css_signature +
- ICP_QAT_CSS_SIGNATURE_LEN(handle);
+ suof_img_hdr->css_simg = suof_img_hdr->css_header + offset;
ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
suof_img_hdr->ae_mask = ae_mode->ae_mask;
@@ -1419,20 +1415,21 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *auth_desc;
struct icp_qat_auth_chunk *auth_chunk;
u64 virt_addr, bus_addr, virt_base;
- unsigned int length, simg_offset = sizeof(*auth_chunk);
+ unsigned int simg_offset = sizeof(*auth_chunk);
struct icp_qat_simg_ae_mode *simg_ae_mode;
struct icp_firml_dram_desc img_desc;
+ int ret;
- if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
- pr_err("QAT: error, input image size overflow %d\n", size);
- return -EINVAL;
- }
- length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
- ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset :
- size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset;
- if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
+ ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN);
+ if (ret) {
pr_err("QAT: error, allocate continuous dram fail\n");
- return -ENOMEM;
+ return ret;
+ }
+
+ if (!IS_ALIGNED(img_desc.dram_size, 8) || !img_desc.dram_bus_addr) {
+ pr_debug("QAT: invalid address\n");
+ qat_uclo_simg_free(handle, &img_desc);
+ return -EINVAL;
}
auth_chunk = img_desc.dram_base_addr_v;
@@ -1490,6 +1487,13 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
auth_desc->img_low = (unsigned int)bus_addr;
auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
+ if (bus_addr + auth_desc->img_len > img_desc.dram_bus_addr +
+ ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) {
+ pr_err("QAT: insufficient memory size for authentication data\n");
+ qat_uclo_simg_free(handle, &img_desc);
+ return -ENOMEM;
+ }
+
memcpy((void *)(uintptr_t)virt_addr,
(void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
auth_desc->img_len);
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
index cfd3bd757715..5bf5c890c362 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
-qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
+qat_dh895xcc-y := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index c0661ff5e929..e48bcf1818cd 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -29,7 +29,7 @@ static struct adf_hw_device_class dh895xcc_class = {
static u32 get_accel_mask(struct adf_hw_device_data *self)
{
- u32 fuses = self->fuses;
+ u32 fuses = self->fuses[ADF_FUSECTL0];
return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
ADF_DH895XCC_ACCELERATORS_MASK;
@@ -37,7 +37,7 @@ static u32 get_accel_mask(struct adf_hw_device_data *self)
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
- u32 fuses = self->fuses;
+ u32 fuses = self->fuses[ADF_FUSECTL0];
return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
}
@@ -99,7 +99,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
- int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
+ int sku = (self->fuses[ADF_FUSECTL0] & ADF_DH895XCC_FUSECTL_SKU_MASK)
>> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
switch (sku) {
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index 3137fc3b5cf6..07e9d7e52861 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -126,7 +126,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adf_init_hw_data_dh895xcc(accel_dev->hw_device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
- &hw_data->fuses);
+ &hw_data->fuses[ADF_FUSECTL0]);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
index 64b54e92b2b4..93f9c81edf09 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
-qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
+qat_dh895xccvf-y := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
index 4c25a78ab3ed..aa269abb0499 100644
--- a/drivers/crypto/marvell/Kconfig
+++ b/drivers/crypto/marvell/Kconfig
@@ -24,7 +24,7 @@ config CRYPTO_DEV_OCTEONTX_CPT
tristate "Support for Marvell OcteonTX CPT driver"
depends on ARCH_THUNDER || COMPILE_TEST
depends on PCI_MSI && 64BIT
- depends on CRYPTO_LIB_AES
+ select CRYPTO_LIB_AES
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_AEAD
@@ -41,10 +41,10 @@ config CRYPTO_DEV_OCTEONTX2_CPT
tristate "Marvell OcteonTX2 CPT driver"
depends on ARCH_THUNDER2 || COMPILE_TEST
depends on PCI_MSI && 64BIT
- depends on CRYPTO_LIB_AES
depends on NET_VENDOR_MARVELL
select OCTEONTX2_MBOX
select CRYPTO_DEV_MARVELL
+ select CRYPTO_LIB_AES
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_AEAD
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index c4250e5fcf8f..9f5601c0280b 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -10,6 +10,7 @@
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/string_choices.h>
#include "otx_cpt_common.h"
#include "otx_cptpf_ucode.h"
#include "otx_cptpf.h"
@@ -505,17 +506,6 @@ int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type)
}
EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type);
-int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
- int eng_type)
-{
- struct otx_cpt_engs_rsvd *engs;
-
- engs = find_engines_by_type(eng_grp, eng_type);
-
- return (engs != NULL ? 1 : 0);
-}
-EXPORT_SYMBOL_GPL(otx_cpt_eng_grp_has_eng_type);
-
static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp,
char *buf, int size)
{
@@ -614,8 +604,8 @@ static void print_dbg_info(struct device *dev,
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
- pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ?
- "enabled" : "disabled");
+ pr_debug("engine_group%d, state %s\n", i,
+ str_enabled_disabled(grp->is_enabled));
if (grp->is_enabled) {
mirrored_grp = &eng_grps->grp[grp->mirror.idx];
pr_debug("Ucode0 filename %s, version %s\n",
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
index 8620ac87a447..df79ee416c0d 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
@@ -174,7 +174,5 @@ int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
bool is_rdonly);
int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type);
-int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
- int eng_type);
#endif /* __OTX_CPTPF_UCODE_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 5c9484646172..42c5484ce66a 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -3,6 +3,7 @@
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/string_choices.h>
#include "otx2_cptpf_ucode.h"
#include "otx2_cpt_common.h"
#include "otx2_cptpf.h"
@@ -1774,102 +1775,3 @@ err_print:
dev_err(dev, "%s\n", err_msg);
return -EINVAL;
}
-
-static void get_engs_info(struct otx2_cpt_eng_grp_info *eng_grp, char *buf,
- int size, int idx)
-{
- struct otx2_cpt_engs_rsvd *mirrored_engs = NULL;
- struct otx2_cpt_engs_rsvd *engs;
- int len, i;
-
- buf[0] = '\0';
- for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
- engs = &eng_grp->engs[i];
- if (!engs->type)
- continue;
- if (idx != -1 && idx != i)
- continue;
-
- if (eng_grp->mirror.is_ena)
- mirrored_engs = find_engines_by_type(
- &eng_grp->g->grp[eng_grp->mirror.idx],
- engs->type);
- if (i > 0 && idx == -1) {
- len = strlen(buf);
- scnprintf(buf + len, size - len, ", ");
- }
-
- len = strlen(buf);
- scnprintf(buf + len, size - len, "%d %s ",
- mirrored_engs ? engs->count + mirrored_engs->count :
- engs->count,
- get_eng_type_str(engs->type));
- if (mirrored_engs) {
- len = strlen(buf);
- scnprintf(buf + len, size - len,
- "(%d shared with engine_group%d) ",
- engs->count <= 0 ?
- engs->count + mirrored_engs->count :
- mirrored_engs->count,
- eng_grp->mirror.idx);
- }
- }
-}
-
-void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
-{
- struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
- struct otx2_cpt_eng_grp_info *mirrored_grp;
- char engs_info[2 * OTX2_CPT_NAME_LENGTH];
- struct otx2_cpt_eng_grp_info *grp;
- struct otx2_cpt_engs_rsvd *engs;
- int i, j;
-
- pr_debug("Engine groups global info");
- pr_debug("max SE %d, max IE %d, max AE %d", eng_grps->avail.max_se_cnt,
- eng_grps->avail.max_ie_cnt, eng_grps->avail.max_ae_cnt);
- pr_debug("free SE %d", eng_grps->avail.se_cnt);
- pr_debug("free IE %d", eng_grps->avail.ie_cnt);
- pr_debug("free AE %d", eng_grps->avail.ae_cnt);
-
- for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
- grp = &eng_grps->grp[i];
- pr_debug("engine_group%d, state %s", i,
- grp->is_enabled ? "enabled" : "disabled");
- if (grp->is_enabled) {
- mirrored_grp = &eng_grps->grp[grp->mirror.idx];
- pr_debug("Ucode0 filename %s, version %s",
- grp->mirror.is_ena ?
- mirrored_grp->ucode[0].filename :
- grp->ucode[0].filename,
- grp->mirror.is_ena ?
- mirrored_grp->ucode[0].ver_str :
- grp->ucode[0].ver_str);
- if (is_2nd_ucode_used(grp))
- pr_debug("Ucode1 filename %s, version %s",
- grp->ucode[1].filename,
- grp->ucode[1].ver_str);
- }
-
- for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
- engs = &grp->engs[j];
- if (engs->type) {
- u32 mask[5] = { };
-
- get_engs_info(grp, engs_info,
- 2 * OTX2_CPT_NAME_LENGTH, j);
- pr_debug("Slot%d: %s", j, engs_info);
- bitmap_to_arr32(mask, engs->bmap,
- eng_grps->engs_num);
- if (is_dev_otx2(cptpf->pdev))
- pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
- mask[3], mask[2], mask[1],
- mask[0]);
- else
- pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x",
- mask[4], mask[3], mask[2], mask[1],
- mask[0]);
- }
- }
- }
-}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
index 365fe8943bd9..7e6a6a4ec37c 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
@@ -166,7 +166,6 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
struct devlink_param_gset_ctx *ctx);
int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
struct devlink_param_gset_ctx *ctx);
-void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf);
struct otx2_cpt_engs_rsvd *find_engines_by_type(
struct otx2_cpt_eng_grp_info *eng_grp,
int eng_type);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
index 5387c68f3c9d..426244107037 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -264,9 +264,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs,
break;
}
- dev_err(&pdev->dev,
- "Request failed with software error code 0x%x\n",
- cpt_status->s.uc_compcode);
+ pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n",
+ cpt_status->s.uc_compcode,
+ info->req->areq->tfm->__crt_alg->cra_name,
+ info->req->areq->tfm->__crt_alg->cra_driver_name);
otx2_cpt_dump_sg_list(pdev, info->req);
break;
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index d94a26c3541a..133ebc998236 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -265,12 +265,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
MXS_DCP_CONTROL0_INTERRUPT |
MXS_DCP_CONTROL0_ENABLE_CIPHER;
- if (key_referenced)
- /* Set OTP key bit to select the key via KEY_SELECT. */
- desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
- else
+ if (!key_referenced)
/* Payload contains the key. */
desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
+ else if (actx->key[0] == DCP_PAES_KEY_OTP)
+ /* Set OTP key bit to select the key via KEY_SELECT. */
+ desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
if (rctx->enc)
desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 82214cde2bcd..b950fcce8a9b 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -101,9 +101,13 @@ static int update_param(struct nx842_crypto_param *p,
return 0;
}
-int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
+void *nx842_crypto_alloc_ctx(struct nx842_driver *driver)
{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
spin_lock_init(&ctx->lock);
ctx->driver = driver;
@@ -114,22 +118,23 @@ int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
kfree(ctx->wmem);
free_page((unsigned long)ctx->sbounce);
free_page((unsigned long)ctx->dbounce);
- return -ENOMEM;
+ kfree(ctx);
+ return ERR_PTR(-ENOMEM);
}
- return 0;
+ return ctx;
}
-EXPORT_SYMBOL_GPL(nx842_crypto_init);
+EXPORT_SYMBOL_GPL(nx842_crypto_alloc_ctx);
-void nx842_crypto_exit(struct crypto_tfm *tfm)
+void nx842_crypto_free_ctx(void *p)
{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_ctx *ctx = p;
kfree(ctx->wmem);
free_page((unsigned long)ctx->sbounce);
free_page((unsigned long)ctx->dbounce);
}
-EXPORT_SYMBOL_GPL(nx842_crypto_exit);
+EXPORT_SYMBOL_GPL(nx842_crypto_free_ctx);
static void check_constraints(struct nx842_constraints *c)
{
@@ -246,11 +251,11 @@ nospc:
return update_param(p, slen, dskip + dlen);
}
-int nx842_crypto_compress(struct crypto_tfm *tfm,
+int nx842_crypto_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+ u8 *dst, unsigned int *dlen, void *pctx)
{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_ctx *ctx = pctx;
struct nx842_crypto_header *hdr =
container_of(&ctx->header,
struct nx842_crypto_header, hdr);
@@ -431,11 +436,11 @@ usesw:
return update_param(p, slen + padding, dlen);
}
-int nx842_crypto_decompress(struct crypto_tfm *tfm,
+int nx842_crypto_decompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
+ u8 *dst, unsigned int *dlen, void *pctx)
{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_ctx *ctx = pctx;
struct nx842_crypto_header *hdr;
struct nx842_crypto_param p;
struct nx842_constraints c = *ctx->driver->constraints;
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index 887d4ce3cb49..f5e2c82ba876 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -3,7 +3,6 @@
#ifndef __NX_842_H__
#define __NX_842_H__
-#include <crypto/algapi.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -101,6 +100,8 @@
#define LEN_ON_SIZE(pa, size) ((size) - ((pa) & ((size) - 1)))
#define LEN_ON_PAGE(pa) LEN_ON_SIZE(pa, PAGE_SIZE)
+struct crypto_scomp;
+
static inline unsigned long nx842_get_pa(void *addr)
{
if (!is_vmalloc_addr(addr))
@@ -182,13 +183,13 @@ struct nx842_crypto_ctx {
struct nx842_driver *driver;
};
-int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver);
-void nx842_crypto_exit(struct crypto_tfm *tfm);
-int nx842_crypto_compress(struct crypto_tfm *tfm,
+void *nx842_crypto_alloc_ctx(struct nx842_driver *driver);
+void nx842_crypto_free_ctx(void *ctx);
+int nx842_crypto_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-int nx842_crypto_decompress(struct crypto_tfm *tfm,
+ u8 *dst, unsigned int *dlen, void *ctx);
+int nx842_crypto_decompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
+ u8 *dst, unsigned int *dlen, void *ctx);
#endif /* __NX_842_H__ */
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index c843f4c6f684..56a0b3a67c33 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -217,13 +217,11 @@ static int generate_pat(u8 *iv,
memset(b1, 0, 16);
if (assoclen <= 65280) {
*(u16 *)b1 = assoclen;
- scatterwalk_map_and_copy(b1 + 2, req->src, 0,
- iauth_len, SCATTERWALK_FROM_SG);
+ memcpy_from_sglist(b1 + 2, req->src, 0, iauth_len);
} else {
*(u16 *)b1 = (u16)(0xfffe);
*(u32 *)&b1[2] = assoclen;
- scatterwalk_map_and_copy(b1 + 6, req->src, 0,
- iauth_len, SCATTERWALK_FROM_SG);
+ memcpy_from_sglist(b1 + 6, req->src, 0, iauth_len);
}
}
@@ -341,9 +339,8 @@ static int ccm_nx_decrypt(struct aead_request *req,
nbytes -= authsize;
/* copy out the auth tag to compare with later */
- scatterwalk_map_and_copy(priv->oauth_tag,
- req->src, nbytes + req->assoclen, authsize,
- SCATTERWALK_FROM_SG);
+ memcpy_from_sglist(priv->oauth_tag, req->src, nbytes + req->assoclen,
+ authsize);
rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
@@ -465,9 +462,8 @@ static int ccm_nx_encrypt(struct aead_request *req,
} while (processed < nbytes);
/* copy out the auth tag */
- scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
- req->dst, nbytes + req->assoclen, authsize,
- SCATTERWALK_TO_SG);
+ memcpy_to_sglist(req->dst, nbytes + req->assoclen,
+ csbcpb->cpb.aes_ccm.out_pat_or_mac, authsize);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 4a796318b430..b7fe2de96d96 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -103,16 +103,13 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
{
int rc;
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
- struct scatter_walk walk;
struct nx_sg *nx_sg = nx_ctx->in_sg;
unsigned int nbytes = assoclen;
unsigned int processed = 0, to_process;
unsigned int max_sg_len;
if (nbytes <= AES_BLOCK_SIZE) {
- scatterwalk_start(&walk, req->src);
- scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
- scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
+ memcpy_from_sglist(out, req->src, 0, nbytes);
return 0;
}
@@ -391,19 +388,17 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
mac:
if (enc) {
/* copy out the auth tag */
- scatterwalk_map_and_copy(
- csbcpb->cpb.aes_gcm.out_pat_or_mac,
+ memcpy_to_sglist(
req->dst, req->assoclen + nbytes,
- crypto_aead_authsize(crypto_aead_reqtfm(req)),
- SCATTERWALK_TO_SG);
+ csbcpb->cpb.aes_gcm.out_pat_or_mac,
+ crypto_aead_authsize(crypto_aead_reqtfm(req)));
} else {
u8 *itag = nx_ctx->priv.gcm.iauth_tag;
u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
- scatterwalk_map_and_copy(
+ memcpy_from_sglist(
itag, req->src, req->assoclen + nbytes,
- crypto_aead_authsize(crypto_aead_reqtfm(req)),
- SCATTERWALK_FROM_SG);
+ crypto_aead_authsize(crypto_aead_reqtfm(req)));
rc = crypto_memneq(itag, otag,
crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
-EBADMSG : 0;
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 8c859872c183..fd0a98b2fb1b 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -9,6 +9,7 @@
#include "nx-842.h"
+#include <crypto/internal/scompress.h>
#include <linux/timer.h>
#include <asm/prom.h>
@@ -1031,23 +1032,21 @@ static struct nx842_driver nx842_powernv_driver = {
.decompress = nx842_powernv_decompress,
};
-static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
+static void *nx842_powernv_crypto_alloc_ctx(void)
{
- return nx842_crypto_init(tfm, &nx842_powernv_driver);
+ return nx842_crypto_alloc_ctx(&nx842_powernv_driver);
}
-static struct crypto_alg nx842_powernv_alg = {
- .cra_name = "842",
- .cra_driver_name = "842-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = nx842_powernv_crypto_init,
- .cra_exit = nx842_crypto_exit,
- .cra_u = { .compress = {
- .coa_compress = nx842_crypto_compress,
- .coa_decompress = nx842_crypto_decompress } }
+static struct scomp_alg nx842_powernv_alg = {
+ .base.cra_name = "842",
+ .base.cra_driver_name = "842-nx",
+ .base.cra_priority = 300,
+ .base.cra_module = THIS_MODULE,
+
+ .alloc_ctx = nx842_powernv_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ .compress = nx842_crypto_compress,
+ .decompress = nx842_crypto_decompress,
};
static __init int nx_compress_powernv_init(void)
@@ -1107,7 +1106,7 @@ static __init int nx_compress_powernv_init(void)
nx842_powernv_exec = nx842_exec_vas;
}
- ret = crypto_register_alg(&nx842_powernv_alg);
+ ret = crypto_register_scomp(&nx842_powernv_alg);
if (ret) {
nx_delete_coprocs();
return ret;
@@ -1128,7 +1127,7 @@ static void __exit nx_compress_powernv_exit(void)
if (!nx842_ct)
vas_unregister_api_powernv();
- crypto_unregister_alg(&nx842_powernv_alg);
+ crypto_unregister_scomp(&nx842_powernv_alg);
nx_delete_coprocs();
}
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index 1660c5cf3641..f528e072494a 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -11,6 +11,7 @@
#include <asm/vio.h>
#include <asm/hvcall.h>
#include <asm/vas.h>
+#include <crypto/internal/scompress.h>
#include "nx-842.h"
#include "nx_csbcpb.h" /* struct nx_csbcpb */
@@ -1008,23 +1009,21 @@ static struct nx842_driver nx842_pseries_driver = {
.decompress = nx842_pseries_decompress,
};
-static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
+static void *nx842_pseries_crypto_alloc_ctx(void)
{
- return nx842_crypto_init(tfm, &nx842_pseries_driver);
+ return nx842_crypto_alloc_ctx(&nx842_pseries_driver);
}
-static struct crypto_alg nx842_pseries_alg = {
- .cra_name = "842",
- .cra_driver_name = "842-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = nx842_pseries_crypto_init,
- .cra_exit = nx842_crypto_exit,
- .cra_u = { .compress = {
- .coa_compress = nx842_crypto_compress,
- .coa_decompress = nx842_crypto_decompress } }
+static struct scomp_alg nx842_pseries_alg = {
+ .base.cra_name = "842",
+ .base.cra_driver_name = "842-nx",
+ .base.cra_priority = 300,
+ .base.cra_module = THIS_MODULE,
+
+ .alloc_ctx = nx842_pseries_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ .compress = nx842_crypto_compress,
+ .decompress = nx842_crypto_decompress,
};
static int nx842_probe(struct vio_dev *viodev,
@@ -1072,7 +1071,7 @@ static int nx842_probe(struct vio_dev *viodev,
if (ret)
goto error;
- ret = crypto_register_alg(&nx842_pseries_alg);
+ ret = crypto_register_scomp(&nx842_pseries_alg);
if (ret) {
dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
goto error;
@@ -1120,7 +1119,7 @@ static void nx842_remove(struct vio_dev *viodev)
if (caps_feat)
sysfs_remove_group(&viodev->dev.kobj, &nxcop_caps_attr_group);
- crypto_unregister_alg(&nx842_pseries_alg);
+ crypto_unregister_scomp(&nx842_pseries_alg);
of_reconfig_notifier_unregister(&nx842_of_nb);
@@ -1145,6 +1144,7 @@ static void __init nxcop_get_capabilities(void)
{
struct hv_vas_all_caps *hv_caps;
struct hv_nx_cop_caps *hv_nxc;
+ u64 feat;
int rc;
hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
@@ -1155,27 +1155,26 @@ static void __init nxcop_get_capabilities(void)
*/
rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0,
(u64)virt_to_phys(hv_caps));
+ if (!rc)
+ feat = be64_to_cpu(hv_caps->feat_type);
+ kfree(hv_caps);
if (rc)
- goto out;
+ return;
+ if (!(feat & VAS_NX_GZIP_FEAT_BIT))
+ return;
- caps_feat = be64_to_cpu(hv_caps->feat_type);
/*
* NX-GZIP feature available
*/
- if (caps_feat & VAS_NX_GZIP_FEAT_BIT) {
- hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
- if (!hv_nxc)
- goto out;
- /*
- * Get capabilities for NX-GZIP feature
- */
- rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
- VAS_NX_GZIP_FEAT,
- (u64)virt_to_phys(hv_nxc));
- } else {
- pr_err("NX-GZIP feature is not available\n");
- rc = -EINVAL;
- }
+ hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
+ if (!hv_nxc)
+ return;
+ /*
+ * Get capabilities for NX-GZIP feature
+ */
+ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
+ VAS_NX_GZIP_FEAT,
+ (u64)virt_to_phys(hv_nxc));
if (!rc) {
nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor);
@@ -1185,13 +1184,10 @@ static void __init nxcop_get_capabilities(void)
be64_to_cpu(hv_nxc->min_compress_len);
nx_cop_caps.min_decompress_len =
be64_to_cpu(hv_nxc->min_decompress_len);
- } else {
- caps_feat = 0;
+ caps_feat = feat;
}
kfree(hv_nxc);
-out:
- kfree(hv_caps);
}
static const struct vio_device_id nx842_vio_driver_ids[] = {
@@ -1256,7 +1252,7 @@ static void __exit nx842_pseries_exit(void)
vas_unregister_api_pseries();
- crypto_unregister_alg(&nx842_pseries_alg);
+ crypto_unregister_scomp(&nx842_pseries_alg);
spin_lock_irqsave(&devdata_spinlock, flags);
old_devdata = rcu_dereference_check(devdata,
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 010e87d9da36..a3b979193d9b 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -153,40 +153,18 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
{
struct scatter_walk walk;
struct nx_sg *nx_sg = nx_dst;
- unsigned int n, offset = 0, len = *src_len;
- char *dst;
+ unsigned int n, len = *src_len;
/* we need to fast forward through @start bytes first */
- for (;;) {
- scatterwalk_start(&walk, sg_src);
-
- if (start < offset + sg_src->length)
- break;
-
- offset += sg_src->length;
- sg_src = sg_next(sg_src);
- }
-
- /* start - offset is the number of bytes to advance in the scatterlist
- * element we're currently looking at */
- scatterwalk_advance(&walk, start - offset);
+ scatterwalk_start_at_pos(&walk, sg_src, start);
while (len && (nx_sg - nx_dst) < sglen) {
- n = scatterwalk_clamp(&walk, len);
- if (!n) {
- /* In cases where we have scatterlist chain sg_next
- * handles with it properly */
- scatterwalk_start(&walk, sg_next(walk.sg));
- n = scatterwalk_clamp(&walk, len);
- }
- dst = scatterwalk_map(&walk);
+ n = scatterwalk_next(&walk, len);
- nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
- len -= n;
+ nx_sg = nx_build_sg_list(nx_sg, walk.addr, &n, sglen - (nx_sg - nx_dst));
- scatterwalk_unmap(dst);
- scatterwalk_advance(&walk, n);
- scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
+ scatterwalk_done_src(&walk, n);
+ len -= n;
}
/* update to_process */
*src_len -= len;
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 2697baebb6a3..e1b4b6927bec 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -189,7 +189,4 @@ extern struct shash_alg nx_shash_sha256_alg;
extern struct nx_crypto_driver nx_driver;
-#define SCATTERWALK_TO_SG 1
-#define SCATTERWALK_FROM_SG 0
-
#endif
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 6865c7f1fc1a..db9e84c0c9fb 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -125,7 +125,7 @@ out:
static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
{
- u8 buf[4];
+ const u8 *buf = (void *)desc;
return padlock_sha1_finup(desc, buf, 0, out);
}
@@ -186,7 +186,7 @@ out:
static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
{
- u8 buf[4];
+ const u8 *buf = (void *)desc;
return padlock_sha256_finup(desc, buf, 0, out);
}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 57ab237e899e..b4c3c14dafd5 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -458,19 +458,6 @@ static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
*sg = NULL;
}
-static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
- unsigned int nbytes, int out)
-{
- struct scatter_walk walk;
-
- if (!nbytes)
- return;
-
- scatterwalk_start(&walk, sg);
- scatterwalk_copychunks(buf, &walk, nbytes, out);
- scatterwalk_done(&walk, out, 0);
-}
-
static void s5p_sg_done(struct s5p_aes_dev *dev)
{
struct skcipher_request *req = dev->req;
@@ -480,8 +467,8 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
dev_dbg(dev->dev,
"Copying %d bytes of output data back to original place\n",
dev->req->cryptlen);
- s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
- dev->req->cryptlen, 1);
+ memcpy_to_sglist(dev->req->dst, 0, sg_virt(dev->sg_dst_cpy),
+ dev->req->cryptlen);
}
s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
@@ -526,7 +513,7 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
return -ENOMEM;
}
- s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0);
+ memcpy_from_sglist(pages, src, 0, dev->req->cryptlen);
sg_init_table(*dst, 1);
sg_set_buf(*dst, pages, len);
@@ -1035,8 +1022,7 @@ static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
if (ctx->bufcnt)
memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
- scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
- new_len, 0);
+ memcpy_from_sglist(buf + ctx->bufcnt, sg, ctx->skip, new_len);
sg_init_table(ctx->sgl, 1);
sg_set_buf(ctx->sgl, buf, len);
ctx->sg = ctx->sgl;
@@ -1229,8 +1215,7 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
if (len > nbytes)
len = nbytes;
- scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
- 0, len, 0);
+ memcpy_from_sglist(ctx->buffer + ctx->bufcnt, req->src, 0, len);
ctx->bufcnt += len;
nbytes -= len;
ctx->skip = len;
@@ -1253,9 +1238,8 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
hash_later = ctx->total - xmit_len;
/* copy hash_later bytes from end of req->src */
/* previous bytes are in xmit_buf, so no overwrite */
- scatterwalk_map_and_copy(ctx->buffer, req->src,
- req->nbytes - hash_later,
- hash_later, 0);
+ memcpy_from_sglist(ctx->buffer, req->src,
+ req->nbytes - hash_later, hash_later);
}
if (xmit_len > BUFLEN) {
@@ -1267,8 +1251,8 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
/* have buffered data only */
if (unlikely(!ctx->bufcnt)) {
/* first update didn't fill up buffer */
- scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
- 0, xmit_len, 0);
+ memcpy_from_sglist(ctx->dd->xmit_buf, req->src,
+ 0, xmit_len);
}
sg_init_table(ctx->sgl, 1);
@@ -1506,8 +1490,8 @@ static int s5p_hash_update(struct ahash_request *req)
return 0;
if (ctx->bufcnt + req->nbytes <= BUFLEN) {
- scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
- 0, req->nbytes, 0);
+ memcpy_from_sglist(ctx->buffer + ctx->bufcnt, req->src,
+ 0, req->nbytes);
ctx->bufcnt += req->nbytes;
return 0;
}
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index de4d0402f133..fd29785a3ecf 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -162,7 +162,7 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
if (mctx->poly == CRC32_POLY_LE)
ctx->partial = crc32_le(ctx->partial, d8, length);
else
- ctx->partial = __crc32c_le(ctx->partial, d8, length);
+ ctx->partial = crc32c(ctx->partial, d8, length);
goto pm_out;
}
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 14c6339c2e43..5ce88e7a8f65 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -666,7 +666,7 @@ static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp)
written = min_t(size_t, AES_BLOCK_SIZE - len, alen);
- scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0);
+ memcpy_from_scatterwalk((char *)block + len, &cryp->in_walk, written);
writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32);
@@ -993,7 +993,7 @@ static int stm32_cryp_header_dma_start(struct stm32_cryp *cryp)
/* Advance scatterwalk to not DMA'ed data */
align_size = ALIGN_DOWN(cryp->header_in, cryp->hw_blocksize);
- scatterwalk_copychunks(NULL, &cryp->in_walk, align_size, 2);
+ scatterwalk_skip(&cryp->in_walk, align_size);
cryp->header_in -= align_size;
ret = dma_submit_error(dmaengine_submit(tx_in));
@@ -1056,7 +1056,7 @@ static int stm32_cryp_dma_start(struct stm32_cryp *cryp)
/* Advance scatterwalk to not DMA'ed data */
align_size = ALIGN_DOWN(cryp->payload_in, cryp->hw_blocksize);
- scatterwalk_copychunks(NULL, &cryp->in_walk, align_size, 2);
+ scatterwalk_skip(&cryp->in_walk, align_size);
cryp->payload_in -= align_size;
ret = dma_submit_error(dmaengine_submit(tx_in));
@@ -1067,7 +1067,7 @@ static int stm32_cryp_dma_start(struct stm32_cryp *cryp)
dma_async_issue_pending(cryp->dma_lch_in);
/* Advance scatterwalk to not DMA'ed data */
- scatterwalk_copychunks(NULL, &cryp->out_walk, align_size, 2);
+ scatterwalk_skip(&cryp->out_walk, align_size);
cryp->payload_out -= align_size;
ret = dma_submit_error(dmaengine_submit(tx_out));
if (ret < 0) {
@@ -1737,9 +1737,9 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
out_sg = areq->dst;
scatterwalk_start(&cryp->in_walk, in_sg);
- scatterwalk_start(&cryp->out_walk, out_sg);
/* In output, jump after assoc data */
- scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->areq->assoclen, 2);
+ scatterwalk_start_at_pos(&cryp->out_walk, out_sg,
+ areq->assoclen);
ret = stm32_cryp_hw_init(cryp);
if (ret)
@@ -1873,12 +1873,12 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
/* Get and write tag */
readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32);
- scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1);
+ memcpy_to_scatterwalk(&cryp->out_walk, out_tag, cryp->authsize);
} else {
/* Get and check tag */
u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32];
- scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0);
+ memcpy_from_scatterwalk(in_tag, &cryp->in_walk, cryp->authsize);
readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32);
if (crypto_memneq(in_tag, out_tag, cryp->authsize))
@@ -1923,8 +1923,8 @@ static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
u32 block[AES_BLOCK_32];
readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
- scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
- cryp->payload_out), 1);
+ memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out));
cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
cryp->payload_out);
}
@@ -1933,8 +1933,8 @@ static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
{
u32 block[AES_BLOCK_32] = {0};
- scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize,
- cryp->payload_in), 0);
+ memcpy_from_scatterwalk(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_in));
writesl(cryp->regs + cryp->caps->din, block, cryp->hw_blocksize / sizeof(u32));
cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in);
}
@@ -1981,8 +1981,8 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
*/
readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
- scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
- cryp->payload_out), 1);
+ memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out));
cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
cryp->payload_out);
@@ -2079,8 +2079,8 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
*/
readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
- scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
- cryp->payload_out), 1);
+ memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out));
cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out);
/* d) Load again CRYP_CSGCMCCMxR */
@@ -2161,7 +2161,7 @@ static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp)
written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in);
- scatterwalk_copychunks(block, &cryp->in_walk, written, 0);
+ memcpy_from_scatterwalk(block, &cryp->in_walk, written);
writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32);
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
index d734c9a56786..ca9d0cca1f74 100644
--- a/drivers/crypto/tegra/tegra-se-aes.c
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -28,6 +28,9 @@ struct tegra_aes_ctx {
u32 ivsize;
u32 key1_id;
u32 key2_id;
+ u32 keylen;
+ u8 key1[AES_MAX_KEY_SIZE];
+ u8 key2[AES_MAX_KEY_SIZE];
};
struct tegra_aes_reqctx {
@@ -43,8 +46,9 @@ struct tegra_aead_ctx {
struct tegra_se *se;
unsigned int authsize;
u32 alg;
- u32 keylen;
u32 key_id;
+ u32 keylen;
+ u8 key[AES_MAX_KEY_SIZE];
};
struct tegra_aead_reqctx {
@@ -56,8 +60,8 @@ struct tegra_aead_reqctx {
unsigned int cryptlen;
unsigned int authsize;
bool encrypt;
- u32 config;
u32 crypto_config;
+ u32 config;
u32 key_id;
u32 iv[4];
u8 authdata[16];
@@ -67,6 +71,8 @@ struct tegra_cmac_ctx {
struct tegra_se *se;
unsigned int alg;
u32 key_id;
+ u32 keylen;
+ u8 key[AES_MAX_KEY_SIZE];
struct crypto_shash *fallback_tfm;
};
@@ -260,17 +266,13 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
struct tegra_se *se = ctx->se;
- unsigned int cmdlen;
+ unsigned int cmdlen, key1_id, key2_id;
int ret;
- rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN,
- &rctx->datbuf.addr, GFP_KERNEL);
- if (!rctx->datbuf.buf)
- return -ENOMEM;
-
- rctx->datbuf.size = SE_AES_BUFLEN;
rctx->iv = (u32 *)req->iv;
rctx->len = req->cryptlen;
+ key1_id = ctx->key1_id;
+ key2_id = ctx->key2_id;
/* Pad input to AES Block size */
if (ctx->alg != SE_ALG_XTS) {
@@ -278,20 +280,59 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
}
+ rctx->datbuf.size = rctx->len;
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf) {
+ ret = -ENOMEM;
+ goto out_finalize;
+ }
+
scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
+ rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt);
+
+ if (!key1_id) {
+ ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1,
+ ctx->keylen, ctx->alg, &key1_id);
+ if (ret)
+ goto out;
+ }
+
+ rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id);
+
+ if (ctx->alg == SE_ALG_XTS) {
+ if (!key2_id) {
+ ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2,
+ ctx->keylen, ctx->alg, &key2_id);
+ if (ret)
+ goto out;
+ }
+
+ rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id);
+ }
+
/* Prepare the command and submit for execution */
cmdlen = tegra_aes_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
/* Copy the result */
tegra_aes_update_iv(req, ctx);
scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
+out:
/* Free the buffer */
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
rctx->datbuf.buf, rctx->datbuf.addr);
+ if (tegra_key_is_reserved(key1_id))
+ tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg);
+
+ if (tegra_key_is_reserved(key2_id))
+ tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg);
+
+out_finalize:
crypto_finalize_skcipher_request(se->engine, req, ret);
return 0;
@@ -313,6 +354,7 @@ static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
ctx->se = se_alg->se_dev;
ctx->key1_id = 0;
ctx->key2_id = 0;
+ ctx->keylen = 0;
algname = crypto_tfm_alg_name(&tfm->base);
ret = se_algname_to_algid(algname);
@@ -341,13 +383,20 @@ static int tegra_aes_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
if (aes_check_keylen(keylen)) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
return -EINVAL;
}
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
+ if (ret) {
+ ctx->keylen = keylen;
+ memcpy(ctx->key1, key, keylen);
+ }
+
+ return 0;
}
static int tegra_xts_setkey(struct crypto_skcipher *tfm,
@@ -365,11 +414,17 @@ static int tegra_xts_setkey(struct crypto_skcipher *tfm,
ret = tegra_key_submit(ctx->se, key, len,
ctx->alg, &ctx->key1_id);
- if (ret)
- return ret;
+ if (ret) {
+ ctx->keylen = len;
+ memcpy(ctx->key1, key, len);
+ }
- return tegra_key_submit(ctx->se, key + len, len,
+ ret = tegra_key_submit(ctx->se, key + len, len,
ctx->alg, &ctx->key2_id);
+ if (ret) {
+ ctx->keylen = len;
+ memcpy(ctx->key2, key + len, len);
+ }
return 0;
}
@@ -443,13 +498,10 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
if (!req->cryptlen)
return 0;
- rctx->encrypt = encrypt;
- rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
- rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
- rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id);
+ if (ctx->alg == SE_ALG_ECB)
+ req->iv = NULL;
- if (ctx->key2_id)
- rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id);
+ rctx->encrypt = encrypt;
return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
}
@@ -715,11 +767,11 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct
rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
- return tegra_se_host1x_submit(se, cmdlen);
+ return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
}
static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
@@ -732,11 +784,11 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -755,11 +807,11 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -886,12 +938,12 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req
rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
- return tegra_se_host1x_submit(se, cmdlen);
+ return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
}
static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
@@ -1073,7 +1125,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
/* Copy authdata in the top of buffer for encryption/decryption */
if (rctx->encrypt)
@@ -1098,7 +1150,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
/* Prepare command and submit */
cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -1117,6 +1169,11 @@ static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
rctx->assoclen = req->assoclen;
rctx->authsize = crypto_aead_authsize(tfm);
+ if (rctx->encrypt)
+ rctx->cryptlen = req->cryptlen;
+ else
+ rctx->cryptlen = req->cryptlen - rctx->authsize;
+
memcpy(iv, req->iv, 16);
ret = tegra_ccm_check_iv(iv);
@@ -1145,30 +1202,35 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_se *se = ctx->se;
int ret;
+ ret = tegra_ccm_crypt_init(req, se, rctx);
+ if (ret)
+ goto out_finalize;
+
+ rctx->key_id = ctx->key_id;
+
/* Allocate buffers required */
- rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
+ rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
&rctx->inbuf.addr, GFP_KERNEL);
if (!rctx->inbuf.buf)
- return -ENOMEM;
-
- rctx->inbuf.size = SE_AES_BUFLEN;
+ goto out_finalize;
- rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
+ rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
&rctx->outbuf.addr, GFP_KERNEL);
if (!rctx->outbuf.buf) {
ret = -ENOMEM;
- goto outbuf_err;
+ goto out_free_inbuf;
}
- rctx->outbuf.size = SE_AES_BUFLEN;
-
- ret = tegra_ccm_crypt_init(req, se, rctx);
- if (ret)
- goto out;
+ if (!ctx->key_id) {
+ ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
+ ctx->keylen, ctx->alg, &rctx->key_id);
+ if (ret)
+ goto out;
+ }
if (rctx->encrypt) {
- rctx->cryptlen = req->cryptlen;
-
/* CBC MAC Operation */
ret = tegra_ccm_compute_auth(ctx, rctx);
if (ret)
@@ -1179,8 +1241,6 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
if (ret)
goto out;
} else {
- rctx->cryptlen = req->cryptlen - ctx->authsize;
-
/* CTR operation */
ret = tegra_ccm_do_ctr(ctx, rctx);
if (ret)
@@ -1193,13 +1253,17 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
}
out:
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
rctx->outbuf.buf, rctx->outbuf.addr);
-outbuf_err:
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+out_free_inbuf:
+ dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
rctx->inbuf.buf, rctx->inbuf.addr);
+ if (tegra_key_is_reserved(rctx->key_id))
+ tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
+
+out_finalize:
crypto_finalize_aead_request(ctx->se->engine, req, ret);
return 0;
@@ -1213,23 +1277,6 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
int ret;
- /* Allocate buffers required */
- rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
- &rctx->inbuf.addr, GFP_KERNEL);
- if (!rctx->inbuf.buf)
- return -ENOMEM;
-
- rctx->inbuf.size = SE_AES_BUFLEN;
-
- rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
- &rctx->outbuf.addr, GFP_KERNEL);
- if (!rctx->outbuf.buf) {
- ret = -ENOMEM;
- goto outbuf_err;
- }
-
- rctx->outbuf.size = SE_AES_BUFLEN;
-
rctx->src_sg = req->src;
rctx->dst_sg = req->dst;
rctx->assoclen = req->assoclen;
@@ -1243,6 +1290,32 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
rctx->iv[3] = (1 << 24);
+ rctx->key_id = ctx->key_id;
+
+ /* Allocate buffers required */
+ rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
+ rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
+ &rctx->inbuf.addr, GFP_KERNEL);
+ if (!rctx->inbuf.buf) {
+ ret = -ENOMEM;
+ goto out_finalize;
+ }
+
+ rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
+ rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
+ &rctx->outbuf.addr, GFP_KERNEL);
+ if (!rctx->outbuf.buf) {
+ ret = -ENOMEM;
+ goto out_free_inbuf;
+ }
+
+ if (!ctx->key_id) {
+ ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
+ ctx->keylen, ctx->alg, &rctx->key_id);
+ if (ret)
+ goto out;
+ }
+
/* If there is associated data perform GMAC operation */
if (rctx->assoclen) {
ret = tegra_gcm_do_gmac(ctx, rctx);
@@ -1266,14 +1339,17 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
ret = tegra_gcm_do_verify(ctx->se, rctx);
out:
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
rctx->outbuf.buf, rctx->outbuf.addr);
-outbuf_err:
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+out_free_inbuf:
+ dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
rctx->inbuf.buf, rctx->inbuf.addr);
- /* Finalize the request if there are no errors */
+ if (tegra_key_is_reserved(rctx->key_id))
+ tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
+
+out_finalize:
crypto_finalize_aead_request(ctx->se->engine, req, ret);
return 0;
@@ -1295,6 +1371,7 @@ static int tegra_aead_cra_init(struct crypto_aead *tfm)
ctx->se = se_alg->se_dev;
ctx->key_id = 0;
+ ctx->keylen = 0;
ret = se_algname_to_algid(algname);
if (ret < 0) {
@@ -1376,13 +1453,20 @@ static int tegra_aead_setkey(struct crypto_aead *tfm,
const u8 *key, u32 keylen)
{
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
if (aes_check_keylen(keylen)) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
return -EINVAL;
}
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ if (ret) {
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+ }
+
+ return 0;
}
static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
@@ -1456,6 +1540,35 @@ static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqct
se->base + se->hw->regs->result + (i * 4));
}
+static int tegra_cmac_do_init(struct ahash_request *req)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ int i;
+
+ rctx->total_len = 0;
+ rctx->datbuf.size = 0;
+ rctx->residue.size = 0;
+ rctx->key_id = ctx->key_id;
+ rctx->task |= SHA_FIRST;
+ rctx->blk_size = crypto_ahash_blocksize(tfm);
+
+ rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
+ &rctx->residue.addr, GFP_KERNEL);
+ if (!rctx->residue.buf)
+ return -ENOMEM;
+
+ rctx->residue.size = 0;
+
+ /* Clear any previous result */
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(0, se->base + se->hw->regs->result + (i * 4));
+
+ return 0;
+}
+
static int tegra_cmac_do_update(struct ahash_request *req)
{
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
@@ -1483,7 +1596,7 @@ static int tegra_cmac_do_update(struct ahash_request *req)
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
rctx->total_len += rctx->datbuf.size;
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
- rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id);
+ rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id);
/*
* Keep one block and residue bytes in residue and
@@ -1497,6 +1610,11 @@ static int tegra_cmac_do_update(struct ahash_request *req)
return 0;
}
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf)
+ return -ENOMEM;
+
/* Copy the previous residue first */
if (rctx->residue.size)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
@@ -1511,23 +1629,19 @@ static int tegra_cmac_do_update(struct ahash_request *req)
rctx->residue.size = nresidue;
/*
- * If this is not the first 'update' call, paste the previous copied
+ * If this is not the first task, paste the previous copied
* intermediate results to the registers so that it gets picked up.
- * This is to support the import/export functionality.
*/
if (!(rctx->task & SHA_FIRST))
tegra_cmac_paste_result(ctx->se, rctx);
cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
- ret = tegra_se_host1x_submit(se, cmdlen);
- /*
- * If this is not the final update, copy the intermediate results
- * from the registers so that it can be used in the next 'update'
- * call. This is to support the import/export functionality.
- */
- if (!(rctx->task & SHA_FINAL))
- tegra_cmac_copy_result(ctx->se, rctx);
+ tegra_cmac_copy_result(ctx->se, rctx);
+
+ dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
return ret;
}
@@ -1543,17 +1657,34 @@ static int tegra_cmac_do_final(struct ahash_request *req)
if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
return crypto_shash_tfm_digest(ctx->fallback_tfm,
- rctx->datbuf.buf, 0, req->result);
+ NULL, 0, req->result);
+ }
+
+ if (rctx->residue.size) {
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
}
- memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size;
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
+ /*
+ * If this is not the first task, paste the previous copied
+ * intermediate results to the registers so that it gets picked up.
+ */
+ if (!(rctx->task & SHA_FIRST))
+ tegra_cmac_paste_result(ctx->se, rctx);
+
/* Prepare command and submit */
cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
goto out;
@@ -1565,8 +1696,10 @@ static int tegra_cmac_do_final(struct ahash_request *req)
writel(0, se->base + se->hw->regs->result + (i * 4));
out:
- dma_free_coherent(se->dev, SE_SHA_BUFLEN,
- rctx->datbuf.buf, rctx->datbuf.addr);
+ if (rctx->residue.size)
+ dma_free_coherent(se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
+out_free:
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
rctx->residue.buf, rctx->residue.addr);
return ret;
@@ -1579,17 +1712,41 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
- int ret;
+ int ret = 0;
+
+ if (rctx->task & SHA_INIT) {
+ ret = tegra_cmac_do_init(req);
+ if (ret)
+ goto out;
+
+ rctx->task &= ~SHA_INIT;
+ }
+
+ if (!ctx->key_id) {
+ ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
+ ctx->keylen, ctx->alg, &rctx->key_id);
+ if (ret)
+ goto out;
+ }
if (rctx->task & SHA_UPDATE) {
ret = tegra_cmac_do_update(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_UPDATE;
}
if (rctx->task & SHA_FINAL) {
ret = tegra_cmac_do_final(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_FINAL;
}
+out:
+ if (tegra_key_is_reserved(rctx->key_id))
+ tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
crypto_finalize_hash_request(se->engine, req, ret);
@@ -1631,6 +1788,7 @@ static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
ctx->se = se_alg->se_dev;
ctx->key_id = 0;
+ ctx->keylen = 0;
ret = se_algname_to_algid(algname);
if (ret < 0) {
@@ -1655,51 +1813,11 @@ static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
-static int tegra_cmac_init(struct ahash_request *req)
-{
- struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
- struct tegra_se *se = ctx->se;
- int i;
-
- rctx->total_len = 0;
- rctx->datbuf.size = 0;
- rctx->residue.size = 0;
- rctx->task = SHA_FIRST;
- rctx->blk_size = crypto_ahash_blocksize(tfm);
-
- rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
- &rctx->residue.addr, GFP_KERNEL);
- if (!rctx->residue.buf)
- goto resbuf_fail;
-
- rctx->residue.size = 0;
-
- rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
- &rctx->datbuf.addr, GFP_KERNEL);
- if (!rctx->datbuf.buf)
- goto datbuf_fail;
-
- rctx->datbuf.size = 0;
-
- /* Clear any previous result */
- for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
- writel(0, se->base + se->hw->regs->result + (i * 4));
-
- return 0;
-
-datbuf_fail:
- dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
- rctx->residue.addr);
-resbuf_fail:
- return -ENOMEM;
-}
-
static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret;
if (aes_check_keylen(keylen)) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
@@ -1709,7 +1827,24 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ctx->fallback_tfm)
crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ if (ret) {
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+ }
+
+ return 0;
+}
+
+static int tegra_cmac_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ rctx->task = SHA_INIT;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_cmac_update(struct ahash_request *req)
@@ -1750,13 +1885,9 @@ static int tegra_cmac_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
- int ret;
- ret = tegra_cmac_init(req);
- if (ret)
- return ret;
+ rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
- rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 0b5cdd5676b1..42d007b7af45 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -34,6 +34,7 @@ struct tegra_sha_reqctx {
struct tegra_se_datbuf datbuf;
struct tegra_se_datbuf residue;
struct tegra_se_datbuf digest;
+ struct tegra_se_datbuf intr_res;
unsigned int alg;
unsigned int config;
unsigned int total_len;
@@ -211,9 +212,62 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out)
return crypto_ahash_export(&rctx->fallback_req, out);
}
-static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
+static int tegra_se_insert_hash_result(struct tegra_sha_ctx *ctx, u32 *cpuvaddr,
+ struct tegra_sha_reqctx *rctx)
+{
+ __be32 *res_be = (__be32 *)rctx->intr_res.buf;
+ u32 *res = (u32 *)rctx->intr_res.buf;
+ int i = 0, j;
+
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = host1x_opcode_setpayload(HASH_RESULT_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_HASH_RESULT);
+
+ for (j = 0; j < HASH_RESULT_REG_COUNT; j++) {
+ int idx = j;
+
+ /*
+ * The initial, intermediate and final hash value of SHA-384, SHA-512
+ * in SHA_HASH_RESULT registers follow the below layout of bytes.
+ *
+ * +---------------+------------+
+ * | HASH_RESULT_0 | B4...B7 |
+ * +---------------+------------+
+ * | HASH_RESULT_1 | B0...B3 |
+ * +---------------+------------+
+ * | HASH_RESULT_2 | B12...B15 |
+ * +---------------+------------+
+ * | HASH_RESULT_3 | B8...B11 |
+ * +---------------+------------+
+ * | ...... |
+ * +---------------+------------+
+ * | HASH_RESULT_14| B60...B63 |
+ * +---------------+------------+
+ * | HASH_RESULT_15| B56...B59 |
+ * +---------------+------------+
+ *
+ */
+ if (ctx->alg == SE_ALG_SHA384 || ctx->alg == SE_ALG_SHA512)
+ idx = (j % 2) ? j - 1 : j + 1;
+
+ /* For SHA-1, SHA-224, SHA-256, SHA-384, SHA-512 the initial
+ * intermediate and final hash value when stored in
+ * SHA_HASH_RESULT registers, the byte order is NOT in
+ * little-endian.
+ */
+ if (ctx->alg <= SE_ALG_SHA512)
+ cpuvaddr[i++] = be32_to_cpu(res_be[idx]);
+ else
+ cpuvaddr[i++] = res[idx];
+ }
+
+ return i;
+}
+
+static int tegra_sha_prep_cmd(struct tegra_sha_ctx *ctx, u32 *cpuvaddr,
struct tegra_sha_reqctx *rctx)
{
+ struct tegra_se *se = ctx->se;
u64 msg_len, msg_left;
int i = 0;
@@ -241,7 +295,7 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
cpuvaddr[i++] = upper_32_bits(msg_left);
cpuvaddr[i++] = 0;
cpuvaddr[i++] = 0;
- cpuvaddr[i++] = host1x_opcode_setpayload(6);
+ cpuvaddr[i++] = host1x_opcode_setpayload(2);
cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG);
cpuvaddr[i++] = rctx->config;
@@ -249,15 +303,29 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT;
rctx->task &= ~SHA_FIRST;
} else {
- cpuvaddr[i++] = 0;
+ /*
+ * If it isn't the first task, program the HASH_RESULT register
+ * with the intermediate result from the previous task
+ */
+ i += tegra_se_insert_hash_result(ctx, cpuvaddr + i, rctx);
}
+ cpuvaddr[i++] = host1x_opcode_setpayload(4);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_IN_ADDR);
cpuvaddr[i++] = rctx->datbuf.addr;
cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
SE_ADDR_HI_SZ(rctx->datbuf.size));
- cpuvaddr[i++] = rctx->digest.addr;
- cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
- SE_ADDR_HI_SZ(rctx->digest.size));
+
+ if (rctx->task & SHA_UPDATE) {
+ cpuvaddr[i++] = rctx->intr_res.addr;
+ cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->intr_res.addr)) |
+ SE_ADDR_HI_SZ(rctx->intr_res.size));
+ } else {
+ cpuvaddr[i++] = rctx->digest.addr;
+ cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
+ SE_ADDR_HI_SZ(rctx->digest.size));
+ }
+
if (rctx->key_id) {
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG);
@@ -266,42 +334,72 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION);
- cpuvaddr[i++] = SE_SHA_OP_WRSTALL |
- SE_SHA_OP_START |
+ cpuvaddr[i++] = SE_SHA_OP_WRSTALL | SE_SHA_OP_START |
SE_SHA_OP_LASTBUF;
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
- dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x",
- msg_len, msg_left, rctx->config);
+ dev_dbg(se->dev, "msg len %llu msg left %llu sz %zd cfg %#x",
+ msg_len, msg_left, rctx->datbuf.size, rctx->config);
return i;
}
-static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
+static int tegra_sha_do_init(struct ahash_request *req)
{
- int i;
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
- for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
- rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
-}
+ if (ctx->fallback)
+ return tegra_sha_fallback_init(req);
-static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
-{
- int i;
+ rctx->total_len = 0;
+ rctx->datbuf.size = 0;
+ rctx->residue.size = 0;
+ rctx->key_id = ctx->key_id;
+ rctx->task |= SHA_FIRST;
+ rctx->alg = ctx->alg;
+ rctx->blk_size = crypto_ahash_blocksize(tfm);
+ rctx->digest.size = crypto_ahash_digestsize(tfm);
+
+ rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
+ &rctx->digest.addr, GFP_KERNEL);
+ if (!rctx->digest.buf)
+ goto digbuf_fail;
+
+ rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
+ &rctx->residue.addr, GFP_KERNEL);
+ if (!rctx->residue.buf)
+ goto resbuf_fail;
+
+ rctx->intr_res.size = HASH_RESULT_REG_COUNT * 4;
+ rctx->intr_res.buf = dma_alloc_coherent(se->dev, rctx->intr_res.size,
+ &rctx->intr_res.addr, GFP_KERNEL);
+ if (!rctx->intr_res.buf)
+ goto intr_res_fail;
+
+ return 0;
- for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
- writel(rctx->result[i],
- se->base + se->hw->regs->result + (i * 4));
+intr_res_fail:
+ dma_free_coherent(se->dev, rctx->residue.size, rctx->residue.buf,
+ rctx->residue.addr);
+resbuf_fail:
+ dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
+ rctx->digest.addr);
+digbuf_fail:
+ return -ENOMEM;
}
static int tegra_sha_do_update(struct ahash_request *req)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct tegra_se *se = ctx->se;
unsigned int nblks, nresidue, size, ret;
- u32 *cpuvaddr = ctx->se->cmdbuf->addr;
+ u32 *cpuvaddr = se->cmdbuf->addr;
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
@@ -317,7 +415,6 @@ static int tegra_sha_do_update(struct ahash_request *req)
rctx->src_sg = req->src;
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
- rctx->total_len += rctx->datbuf.size;
/*
* If nbytes are less than a block size, copy it residue and
@@ -326,11 +423,16 @@ static int tegra_sha_do_update(struct ahash_request *req)
if (nblks < 1) {
scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
rctx->src_sg, 0, req->nbytes, 0);
-
rctx->residue.size += req->nbytes;
+
return 0;
}
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf)
+ return -ENOMEM;
+
/* Copy the previous residue first */
if (rctx->residue.size)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
@@ -343,29 +445,16 @@ static int tegra_sha_do_update(struct ahash_request *req)
/* Update residue value with the residue after current block */
rctx->residue.size = nresidue;
+ rctx->total_len += rctx->datbuf.size;
rctx->config = tegra_sha_get_config(rctx->alg) |
- SE_SHA_DST_HASH_REG;
-
- /*
- * If this is not the first 'update' call, paste the previous copied
- * intermediate results to the registers so that it gets picked up.
- * This is to support the import/export functionality.
- */
- if (!(rctx->task & SHA_FIRST))
- tegra_sha_paste_hash_result(ctx->se, rctx);
+ SE_SHA_DST_MEMORY;
- size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
+ size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
- ret = tegra_se_host1x_submit(ctx->se, size);
-
- /*
- * If this is not the final update, copy the intermediate results
- * from the registers so that it can be used in the next 'update'
- * call. This is to support the import/export functionality.
- */
- if (!(rctx->task & SHA_FINAL))
- tegra_sha_copy_hash_result(ctx->se, rctx);
+ dma_free_coherent(se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
return ret;
}
@@ -379,16 +468,25 @@ static int tegra_sha_do_final(struct ahash_request *req)
u32 *cpuvaddr = se->cmdbuf->addr;
int size, ret = 0;
- memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ if (rctx->residue.size) {
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ }
+
rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size;
rctx->config = tegra_sha_get_config(rctx->alg) |
SE_SHA_DST_MEMORY;
- size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
-
- ret = tegra_se_host1x_submit(se, size);
+ size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
if (ret)
goto out;
@@ -396,12 +494,18 @@ static int tegra_sha_do_final(struct ahash_request *req)
memcpy(req->result, rctx->digest.buf, rctx->digest.size);
out:
- dma_free_coherent(se->dev, SE_SHA_BUFLEN,
- rctx->datbuf.buf, rctx->datbuf.addr);
+ if (rctx->residue.size)
+ dma_free_coherent(se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
+out_free:
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm),
rctx->residue.buf, rctx->residue.addr);
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
rctx->digest.addr);
+
+ dma_free_coherent(se->dev, rctx->intr_res.size, rctx->intr_res.buf,
+ rctx->intr_res.addr);
+
return ret;
}
@@ -414,16 +518,31 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_se *se = ctx->se;
int ret = 0;
+ if (rctx->task & SHA_INIT) {
+ ret = tegra_sha_do_init(req);
+ if (ret)
+ goto out;
+
+ rctx->task &= ~SHA_INIT;
+ }
+
if (rctx->task & SHA_UPDATE) {
ret = tegra_sha_do_update(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_UPDATE;
}
if (rctx->task & SHA_FINAL) {
ret = tegra_sha_do_final(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_FINAL;
}
+out:
crypto_finalize_hash_request(se->engine, req, ret);
return 0;
@@ -497,52 +616,6 @@ static void tegra_sha_cra_exit(struct crypto_tfm *tfm)
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
-static int tegra_sha_init(struct ahash_request *req)
-{
- struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
- struct tegra_se *se = ctx->se;
-
- if (ctx->fallback)
- return tegra_sha_fallback_init(req);
-
- rctx->total_len = 0;
- rctx->datbuf.size = 0;
- rctx->residue.size = 0;
- rctx->key_id = ctx->key_id;
- rctx->task = SHA_FIRST;
- rctx->alg = ctx->alg;
- rctx->blk_size = crypto_ahash_blocksize(tfm);
- rctx->digest.size = crypto_ahash_digestsize(tfm);
-
- rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
- &rctx->digest.addr, GFP_KERNEL);
- if (!rctx->digest.buf)
- goto digbuf_fail;
-
- rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
- &rctx->residue.addr, GFP_KERNEL);
- if (!rctx->residue.buf)
- goto resbuf_fail;
-
- rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
- &rctx->datbuf.addr, GFP_KERNEL);
- if (!rctx->datbuf.buf)
- goto datbuf_fail;
-
- return 0;
-
-datbuf_fail:
- dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
- rctx->residue.addr);
-resbuf_fail:
- dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf,
- rctx->datbuf.addr);
-digbuf_fail:
- return -ENOMEM;
-}
-
static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key,
unsigned int keylen)
{
@@ -559,13 +632,29 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret;
if (aes_check_keylen(keylen))
return tegra_hmac_fallback_setkey(ctx, key, keylen);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ if (ret)
+ return tegra_hmac_fallback_setkey(ctx, key, keylen);
+
ctx->fallback = false;
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ return 0;
+}
+
+static int tegra_sha_init(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ rctx->task = SHA_INIT;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_sha_update(struct ahash_request *req)
@@ -615,16 +704,12 @@ static int tegra_sha_digest(struct ahash_request *req)
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
- int ret;
if (ctx->fallback)
return tegra_sha_fallback_digest(req);
- ret = tegra_sha_init(req);
- if (ret)
- return ret;
+ rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
- rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c
index ac14678dbd30..956fa9b4e9b1 100644
--- a/drivers/crypto/tegra/tegra-se-key.c
+++ b/drivers/crypto/tegra/tegra-se-key.c
@@ -115,11 +115,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key,
u32 keylen, u16 slot, u32 alg)
{
const u32 *keyval = (u32 *)key;
- u32 *addr = se->cmdbuf->addr, size;
+ u32 *addr = se->keybuf->addr, size;
+ int ret;
+
+ mutex_lock(&kslt_lock);
size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
+ ret = tegra_se_host1x_submit(se, se->keybuf, size);
+
+ mutex_unlock(&kslt_lock);
- return tegra_se_host1x_submit(se, size);
+ return ret;
}
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
@@ -135,6 +141,23 @@ void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
tegra_keyslot_free(keyid);
}
+void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg)
+{
+ u8 zkey[AES_MAX_KEY_SIZE] = {0};
+
+ if (!keyid)
+ return;
+
+ /* Overwrite the key with 0s */
+ tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg);
+}
+
+inline int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid)
+{
+ return tegra_key_insert(se, key, keylen, *keyid, alg);
+}
+
int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid)
{
int ret;
@@ -143,7 +166,7 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3
if (!tegra_key_in_kslt(*keyid)) {
*keyid = tegra_keyslot_alloc();
if (!(*keyid)) {
- dev_err(se->dev, "failed to allocate key slot\n");
+ dev_dbg(se->dev, "failed to allocate key slot\n");
return -ENOMEM;
}
}
diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c
index 918c0b10614d..1c94f1de0546 100644
--- a/drivers/crypto/tegra/tegra-se-main.c
+++ b/drivers/crypto/tegra/tegra-se-main.c
@@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi
return cmdbuf;
}
-int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
+int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size)
{
struct host1x_job *job;
int ret;
@@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
job->engine_fallback_streamid = se->stream_id;
job->engine_streamid_offset = SE_STREAM_ID;
- se->cmdbuf->words = size;
+ cmdbuf->words = size;
- host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
+ host1x_job_add_gather(job, &cmdbuf->bo, size, 0);
ret = host1x_job_pin(job, se->dev);
if (ret) {
@@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client)
goto syncpt_put;
}
+ se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
+ if (!se->keybuf) {
+ ret = -ENOMEM;
+ goto cmdbuf_put;
+ }
+
ret = se->hw->init_alg(se);
if (ret) {
dev_err(se->dev, "failed to register algorithms\n");
- goto cmdbuf_put;
+ goto keybuf_put;
}
return 0;
+keybuf_put:
+ tegra_se_cmdbuf_put(&se->keybuf->bo);
cmdbuf_put:
tegra_se_cmdbuf_put(&se->cmdbuf->bo);
syncpt_put:
diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h
index b9dd7ceb8783..b6cac9384f66 100644
--- a/drivers/crypto/tegra/tegra-se.h
+++ b/drivers/crypto/tegra/tegra-se.h
@@ -24,6 +24,7 @@
#define SE_STREAM_ID 0x90
#define SE_SHA_CFG 0x4004
+#define SE_SHA_IN_ADDR 0x400c
#define SE_SHA_KEY_ADDR 0x4094
#define SE_SHA_KEY_DATA 0x4098
#define SE_SHA_KEYMANIFEST 0x409c
@@ -340,12 +341,14 @@
#define SE_CRYPTO_CTR_REG_COUNT 4
#define SE_MAX_KEYSLOT 15
#define SE_MAX_MEM_ALLOC SZ_4M
-#define SE_AES_BUFLEN 0x8000
-#define SE_SHA_BUFLEN 0x2000
+
+#define TEGRA_AES_RESERVED_KSLT 14
+#define TEGRA_XTS_RESERVED_KSLT 15
#define SHA_FIRST BIT(0)
-#define SHA_UPDATE BIT(1)
-#define SHA_FINAL BIT(2)
+#define SHA_INIT BIT(1)
+#define SHA_UPDATE BIT(2)
+#define SHA_FINAL BIT(3)
/* Security Engine operation modes */
enum se_aes_alg {
@@ -420,6 +423,7 @@ struct tegra_se {
struct host1x_client client;
struct host1x_channel *channel;
struct tegra_se_cmdbuf *cmdbuf;
+ struct tegra_se_cmdbuf *keybuf;
struct crypto_engine *engine;
struct host1x_syncpt *syncpt;
struct device *dev;
@@ -501,8 +505,33 @@ void tegra_deinit_aes(struct tegra_se *se);
void tegra_deinit_hash(struct tegra_se *se);
int tegra_key_submit(struct tegra_se *se, const u8 *key,
u32 keylen, u32 alg, u32 *keyid);
+
+int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid);
+
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
-int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
+void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg);
+int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size);
+
+static inline int tegra_key_submit_reserved_aes(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid)
+{
+ *keyid = TEGRA_AES_RESERVED_KSLT;
+ return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
+}
+
+static inline int tegra_key_submit_reserved_xts(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid)
+{
+ *keyid = TEGRA_XTS_RESERVED_KSLT;
+ return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
+}
+
+static inline bool tegra_key_is_reserved(u32 keyid)
+{
+ return ((keyid == TEGRA_AES_RESERVED_KSLT) ||
+ (keyid == TEGRA_XTS_RESERVED_KSLT));
+}
/* HOST1x OPCODES */
static inline u32 host1x_opcode_setpayload(unsigned int payload)
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index 48fee07b7e51..2e44915c9f23 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -21,12 +21,11 @@
#include "virtio_crypto_common.h"
struct virtio_crypto_rsa_ctx {
- MPI n;
+ unsigned int key_size;
};
struct virtio_crypto_akcipher_ctx {
struct virtio_crypto *vcrypto;
- struct crypto_akcipher *tfm;
bool session_valid;
__u64 session_id;
union {
@@ -36,8 +35,6 @@ struct virtio_crypto_akcipher_ctx {
struct virtio_crypto_akcipher_request {
struct virtio_crypto_request base;
- struct virtio_crypto_akcipher_ctx *akcipher_ctx;
- struct akcipher_request *akcipher_req;
void *src_buf;
void *dst_buf;
uint32_t opcode;
@@ -69,7 +66,9 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
{
struct virtio_crypto_akcipher_request *vc_akcipher_req =
container_of(vc_req, struct virtio_crypto_akcipher_request, base);
- struct akcipher_request *akcipher_req;
+ struct akcipher_request *akcipher_req =
+ container_of((void *)vc_akcipher_req, struct akcipher_request,
+ __ctx);
int error;
switch (vc_req->status) {
@@ -88,8 +87,7 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
break;
}
- akcipher_req = vc_akcipher_req->akcipher_req;
- /* actual length maybe less than dst buffer */
+ /* actual length may be less than dst buffer */
akcipher_req->dst_len = len - sizeof(vc_req->status);
sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
vc_akcipher_req->dst_buf, akcipher_req->dst_len);
@@ -213,7 +211,8 @@ out:
static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req,
struct akcipher_request *req, struct data_queue *data_vq)
{
- struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
+ struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
+ struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data = vc_req->req_data;
@@ -273,7 +272,8 @@ static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
struct akcipher_request *req = container_of(vreq, struct akcipher_request, base);
struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
- struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
+ struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
+ struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct data_queue *data_vq = vc_req->dataq;
struct virtio_crypto_op_header *header;
@@ -319,8 +319,6 @@ static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode)
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback;
- vc_akcipher_req->akcipher_ctx = ctx;
- vc_akcipher_req->akcipher_req = req;
vc_akcipher_req->opcode = opcode;
return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req);
@@ -352,10 +350,7 @@ static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
int node = virtio_crypto_get_current_node();
uint32_t keytype;
int ret;
-
- /* mpi_free will test n, just free it. */
- mpi_free(rsa_ctx->n);
- rsa_ctx->n = NULL;
+ MPI n;
if (private) {
keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
@@ -368,10 +363,13 @@ static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
if (ret)
return ret;
- rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
- if (!rsa_ctx->n)
+ n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
+ if (!n)
return -ENOMEM;
+ rsa_ctx->key_size = mpi_get_size(n);
+ mpi_free(n);
+
if (!ctx->vcrypto) {
vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER,
VIRTIO_CRYPTO_AKCIPHER_RSA);
@@ -442,15 +440,11 @@ static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm)
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
- return mpi_get_size(rsa_ctx->n);
+ return rsa_ctx->key_size;
}
static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
{
- struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
-
- ctx->tfm = tfm;
-
akcipher_set_reqsize(tfm,
sizeof(struct virtio_crypto_akcipher_request));
@@ -460,12 +454,9 @@ static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm)
{
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
virtio_crypto_alg_akcipher_close_session(ctx);
virtcrypto_dev_put(ctx->vcrypto);
- mpi_free(rsa_ctx->n);
- rsa_ctx->n = NULL;
}
static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index d0278eb568b9..0d522049f595 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -480,10 +480,8 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
for (i = 0; i < vcrypto->max_data_queues; i++) {
vq = vcrypto->data_vq[i].vq;
- while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
- kfree(vc_req->req_data);
- kfree(vc_req->sgs);
- }
+ while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtcrypto_clear_request(vc_req);
cond_resched();
}
}
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
index 70e778aac0f2..bddbd8ebfebe 100644
--- a/drivers/crypto/virtio/virtio_crypto_mgr.c
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -256,7 +256,7 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
* @vcrypto: Pointer to virtio crypto device.
*
* Function notifies all the registered services that the virtio crypto device
- * is ready to be used.
+ * shall no longer be used.
* To be used by virtio crypto device specific drivers.
*
* Return: void
diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
index 23c41d87d835..1b3fb21a2a7d 100644
--- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
@@ -17,7 +17,6 @@
struct virtio_crypto_skcipher_ctx {
struct virtio_crypto *vcrypto;
- struct crypto_skcipher *tfm;
struct virtio_crypto_sym_session_info enc_sess_info;
struct virtio_crypto_sym_session_info dec_sess_info;
@@ -28,8 +27,6 @@ struct virtio_crypto_sym_request {
/* Cipher or aead */
uint32_t type;
- struct virtio_crypto_skcipher_ctx *skcipher_ctx;
- struct skcipher_request *skcipher_req;
uint8_t *iv;
/* Encryption? */
bool encrypt;
@@ -57,7 +54,9 @@ static void virtio_crypto_dataq_sym_callback
{
struct virtio_crypto_sym_request *vc_sym_req =
container_of(vc_req, struct virtio_crypto_sym_request, base);
- struct skcipher_request *ablk_req;
+ struct skcipher_request *ablk_req =
+ container_of((void *)vc_sym_req, struct skcipher_request,
+ __ctx);
int error;
/* Finish the encrypt or decrypt process */
@@ -77,7 +76,6 @@ static void virtio_crypto_dataq_sym_callback
error = -EIO;
break;
}
- ablk_req = vc_sym_req->skcipher_req;
virtio_crypto_skcipher_finalize_req(vc_sym_req,
ablk_req, error);
}
@@ -325,7 +323,7 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
struct data_queue *data_vq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct virtio_crypto *vcrypto = ctx->vcrypto;
@@ -481,8 +479,6 @@ static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
- vc_sym_req->skcipher_ctx = ctx;
- vc_sym_req->skcipher_req = req;
vc_sym_req->encrypt = true;
return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
@@ -506,8 +502,6 @@ static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
- vc_sym_req->skcipher_ctx = ctx;
- vc_sym_req->skcipher_req = req;
vc_sym_req->encrypt = false;
return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
@@ -515,10 +509,7 @@ static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
{
- struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-
crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
- ctx->tfm = tfm;
return 0;
}
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 876469e23f7a..8ac1e9d70eeb 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -7,6 +7,7 @@ menuconfig CXL_BUS
select PCI_DOE
select FIRMWARE_TABLE
select NUMA_KEEP_MEMINFO if NUMA_MEMBLKS
+ select FWCTL if CXL_FEATURES
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
@@ -102,6 +103,17 @@ config CXL_MEM
If unsure say 'm'.
+config CXL_FEATURES
+ bool "CXL: Features"
+ depends on CXL_PCI
+ help
+ Enable support for CXL Features. A CXL device that includes a mailbox
+ supports commands that allows listing, getting, and setting of
+ optionally defined features such as memory sparing or post package
+ sparing. Vendors may define custom features for the device.
+
+ If unsure say 'n'
+
config CXL_PORT
default CXL_BUS
tristate
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 9259bcc6773c..b0bfbd9eac9b 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -16,3 +16,4 @@ cxl_core-y += pmu.o
cxl_core-y += cdat.o
cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
+cxl_core-$(CONFIG_CXL_FEATURES) += features.o
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 800466f96a68..17e99a25c29a 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -4,6 +4,8 @@
#ifndef __CXL_CORE_H__
#define __CXL_CORE_H__
+#include <cxl/mailbox.h>
+
extern const struct device_type cxl_nvdimm_bridge_type;
extern const struct device_type cxl_nvdimm_type;
extern const struct device_type cxl_pmu_type;
@@ -65,9 +67,9 @@ static inline void cxl_region_exit(void)
struct cxl_send_command;
struct cxl_mem_query_commands;
-int cxl_query_cmd(struct cxl_memdev *cxlmd,
+int cxl_query_cmd(struct cxl_mailbox *cxl_mbox,
struct cxl_mem_query_commands __user *q);
-int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s);
+int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s);
void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
resource_size_t length);
@@ -115,4 +117,15 @@ bool cxl_need_node_perf_attrs_update(int nid);
int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
struct access_coordinate *c);
+#ifdef CONFIG_CXL_FEATURES
+size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
+ enum cxl_get_feat_selection selection,
+ void *feat_out, size_t feat_out_size, u16 offset,
+ u16 *return_code);
+int cxl_set_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
+ u8 feat_version, const void *feat_data,
+ size_t feat_data_size, u32 feat_flag, u16 offset,
+ u16 *return_code);
+#endif
+
#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c
new file mode 100644
index 000000000000..f4daefe3180e
--- /dev/null
+++ b/drivers/cxl/core/features.c
@@ -0,0 +1,708 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
+#include <linux/fwctl.h>
+#include <linux/device.h>
+#include <cxl/mailbox.h>
+#include <cxl/features.h>
+#include <uapi/fwctl/cxl.h>
+#include "cxl.h"
+#include "core.h"
+#include "cxlmem.h"
+
+/* All the features below are exclusive to the kernel */
+static const uuid_t cxl_exclusive_feats[] = {
+ CXL_FEAT_PATROL_SCRUB_UUID,
+ CXL_FEAT_ECS_UUID,
+ CXL_FEAT_SPPR_UUID,
+ CXL_FEAT_HPPR_UUID,
+ CXL_FEAT_CACHELINE_SPARING_UUID,
+ CXL_FEAT_ROW_SPARING_UUID,
+ CXL_FEAT_BANK_SPARING_UUID,
+ CXL_FEAT_RANK_SPARING_UUID,
+};
+
+static bool is_cxl_feature_exclusive_by_uuid(const uuid_t *uuid)
+{
+ for (int i = 0; i < ARRAY_SIZE(cxl_exclusive_feats); i++) {
+ if (uuid_equal(uuid, &cxl_exclusive_feats[i]))
+ return true;
+ }
+
+ return false;
+}
+
+static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
+{
+ return is_cxl_feature_exclusive_by_uuid(&entry->uuid);
+}
+
+inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
+{
+ return cxlds->cxlfs;
+}
+EXPORT_SYMBOL_NS_GPL(to_cxlfs, "CXL");
+
+static int cxl_get_supported_features_count(struct cxl_mailbox *cxl_mbox)
+{
+ struct cxl_mbox_get_sup_feats_out mbox_out;
+ struct cxl_mbox_get_sup_feats_in mbox_in;
+ struct cxl_mbox_cmd mbox_cmd;
+ int rc;
+
+ memset(&mbox_in, 0, sizeof(mbox_in));
+ mbox_in.count = cpu_to_le32(sizeof(mbox_out));
+ memset(&mbox_out, 0, sizeof(mbox_out));
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
+ .size_in = sizeof(mbox_in),
+ .payload_in = &mbox_in,
+ .size_out = sizeof(mbox_out),
+ .payload_out = &mbox_out,
+ .min_out = sizeof(mbox_out),
+ };
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+ if (rc < 0)
+ return rc;
+
+ return le16_to_cpu(mbox_out.supported_feats);
+}
+
+static struct cxl_feat_entries *
+get_supported_features(struct cxl_features_state *cxlfs)
+{
+ int remain_feats, max_size, max_feats, start, rc, hdr_size;
+ struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
+ int feat_size = sizeof(struct cxl_feat_entry);
+ struct cxl_mbox_get_sup_feats_in mbox_in;
+ struct cxl_feat_entry *entry;
+ struct cxl_mbox_cmd mbox_cmd;
+ int user_feats = 0;
+ int count;
+
+ count = cxl_get_supported_features_count(cxl_mbox);
+ if (count <= 0)
+ return NULL;
+
+ struct cxl_feat_entries *entries __free(kvfree) =
+ kvmalloc(struct_size(entries, ent, count), GFP_KERNEL);
+ if (!entries)
+ return NULL;
+
+ struct cxl_mbox_get_sup_feats_out *mbox_out __free(kvfree) =
+ kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
+ if (!mbox_out)
+ return NULL;
+
+ hdr_size = struct_size(mbox_out, ents, 0);
+ max_size = cxl_mbox->payload_size - hdr_size;
+ /* max feat entries that can fit in mailbox max payload size */
+ max_feats = max_size / feat_size;
+ entry = entries->ent;
+
+ start = 0;
+ remain_feats = count;
+ do {
+ int retrieved, alloc_size, copy_feats;
+ int num_entries;
+
+ if (remain_feats > max_feats) {
+ alloc_size = struct_size(mbox_out, ents, max_feats);
+ remain_feats = remain_feats - max_feats;
+ copy_feats = max_feats;
+ } else {
+ alloc_size = struct_size(mbox_out, ents, remain_feats);
+ copy_feats = remain_feats;
+ remain_feats = 0;
+ }
+
+ memset(&mbox_in, 0, sizeof(mbox_in));
+ mbox_in.count = cpu_to_le32(alloc_size);
+ mbox_in.start_idx = cpu_to_le16(start);
+ memset(mbox_out, 0, alloc_size);
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
+ .size_in = sizeof(mbox_in),
+ .payload_in = &mbox_in,
+ .size_out = alloc_size,
+ .payload_out = mbox_out,
+ .min_out = hdr_size,
+ };
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+ if (rc < 0)
+ return NULL;
+
+ if (mbox_cmd.size_out <= hdr_size)
+ return NULL;
+
+ /*
+ * Make sure retrieved out buffer is multiple of feature
+ * entries.
+ */
+ retrieved = mbox_cmd.size_out - hdr_size;
+ if (retrieved % feat_size)
+ return NULL;
+
+ num_entries = le16_to_cpu(mbox_out->num_entries);
+ /*
+ * If the reported output entries * defined entry size !=
+ * retrieved output bytes, then the output package is incorrect.
+ */
+ if (num_entries * feat_size != retrieved)
+ return NULL;
+
+ memcpy(entry, mbox_out->ents, retrieved);
+ for (int i = 0; i < num_entries; i++) {
+ if (!is_cxl_feature_exclusive(entry + i))
+ user_feats++;
+ }
+ entry += num_entries;
+ /*
+ * If the number of output entries is less than expected, add the
+ * remaining entries to the next batch.
+ */
+ remain_feats += copy_feats - num_entries;
+ start += num_entries;
+ } while (remain_feats);
+
+ entries->num_features = count;
+ entries->num_user_features = user_feats;
+
+ return no_free_ptr(entries);
+}
+
+static void free_cxlfs(void *_cxlfs)
+{
+ struct cxl_features_state *cxlfs = _cxlfs;
+ struct cxl_dev_state *cxlds = cxlfs->cxlds;
+
+ cxlds->cxlfs = NULL;
+ kvfree(cxlfs->entries);
+ kfree(cxlfs);
+}
+
+/**
+ * devm_cxl_setup_features() - Allocate and initialize features context
+ * @cxlds: CXL device context
+ *
+ * Return 0 on success or -errno on failure.
+ */
+int devm_cxl_setup_features(struct cxl_dev_state *cxlds)
+{
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
+
+ if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
+ return -ENODEV;
+
+ struct cxl_features_state *cxlfs __free(kfree) =
+ kzalloc(sizeof(*cxlfs), GFP_KERNEL);
+ if (!cxlfs)
+ return -ENOMEM;
+
+ cxlfs->cxlds = cxlds;
+
+ cxlfs->entries = get_supported_features(cxlfs);
+ if (!cxlfs->entries)
+ return -ENOMEM;
+
+ cxlds->cxlfs = cxlfs;
+
+ return devm_add_action_or_reset(cxlds->dev, free_cxlfs, no_free_ptr(cxlfs));
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_features, "CXL");
+
+size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
+ enum cxl_get_feat_selection selection,
+ void *feat_out, size_t feat_out_size, u16 offset,
+ u16 *return_code)
+{
+ size_t data_to_rd_size, size_out;
+ struct cxl_mbox_get_feat_in pi;
+ struct cxl_mbox_cmd mbox_cmd;
+ size_t data_rcvd_size = 0;
+ int rc;
+
+ if (return_code)
+ *return_code = CXL_MBOX_CMD_RC_INPUT;
+
+ if (!feat_out || !feat_out_size)
+ return 0;
+
+ size_out = min(feat_out_size, cxl_mbox->payload_size);
+ uuid_copy(&pi.uuid, feat_uuid);
+ pi.selection = selection;
+ do {
+ data_to_rd_size = min(feat_out_size - data_rcvd_size,
+ cxl_mbox->payload_size);
+ pi.offset = cpu_to_le16(offset + data_rcvd_size);
+ pi.count = cpu_to_le16(data_to_rd_size);
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_FEATURE,
+ .size_in = sizeof(pi),
+ .payload_in = &pi,
+ .size_out = size_out,
+ .payload_out = feat_out + data_rcvd_size,
+ .min_out = data_to_rd_size,
+ };
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+ if (rc < 0 || !mbox_cmd.size_out) {
+ if (return_code)
+ *return_code = mbox_cmd.return_code;
+ return 0;
+ }
+ data_rcvd_size += mbox_cmd.size_out;
+ } while (data_rcvd_size < feat_out_size);
+
+ if (return_code)
+ *return_code = CXL_MBOX_CMD_RC_SUCCESS;
+
+ return data_rcvd_size;
+}
+
+/*
+ * FEAT_DATA_MIN_PAYLOAD_SIZE - min extra number of bytes should be
+ * available in the mailbox for storing the actual feature data so that
+ * the feature data transfer would work as expected.
+ */
+#define FEAT_DATA_MIN_PAYLOAD_SIZE 10
+int cxl_set_feature(struct cxl_mailbox *cxl_mbox,
+ const uuid_t *feat_uuid, u8 feat_version,
+ const void *feat_data, size_t feat_data_size,
+ u32 feat_flag, u16 offset, u16 *return_code)
+{
+ size_t data_in_size, data_sent_size = 0;
+ struct cxl_mbox_cmd mbox_cmd;
+ size_t hdr_size;
+
+ if (return_code)
+ *return_code = CXL_MBOX_CMD_RC_INPUT;
+
+ struct cxl_mbox_set_feat_in *pi __free(kfree) =
+ kzalloc(cxl_mbox->payload_size, GFP_KERNEL);
+ if (!pi)
+ return -ENOMEM;
+
+ uuid_copy(&pi->uuid, feat_uuid);
+ pi->version = feat_version;
+ feat_flag &= ~CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK;
+ feat_flag |= CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET;
+ hdr_size = sizeof(pi->hdr);
+ /*
+ * Check minimum mbox payload size is available for
+ * the feature data transfer.
+ */
+ if (hdr_size + FEAT_DATA_MIN_PAYLOAD_SIZE > cxl_mbox->payload_size)
+ return -ENOMEM;
+
+ if (hdr_size + feat_data_size <= cxl_mbox->payload_size) {
+ pi->flags = cpu_to_le32(feat_flag |
+ CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER);
+ data_in_size = feat_data_size;
+ } else {
+ pi->flags = cpu_to_le32(feat_flag |
+ CXL_SET_FEAT_FLAG_INITIATE_DATA_TRANSFER);
+ data_in_size = cxl_mbox->payload_size - hdr_size;
+ }
+
+ do {
+ int rc;
+
+ pi->offset = cpu_to_le16(offset + data_sent_size);
+ memcpy(pi->feat_data, feat_data + data_sent_size, data_in_size);
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_SET_FEATURE,
+ .size_in = hdr_size + data_in_size,
+ .payload_in = pi,
+ };
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+ if (rc < 0) {
+ if (return_code)
+ *return_code = mbox_cmd.return_code;
+ return rc;
+ }
+
+ data_sent_size += data_in_size;
+ if (data_sent_size >= feat_data_size) {
+ if (return_code)
+ *return_code = CXL_MBOX_CMD_RC_SUCCESS;
+ return 0;
+ }
+
+ if ((feat_data_size - data_sent_size) <= (cxl_mbox->payload_size - hdr_size)) {
+ data_in_size = feat_data_size - data_sent_size;
+ pi->flags = cpu_to_le32(feat_flag |
+ CXL_SET_FEAT_FLAG_FINISH_DATA_TRANSFER);
+ } else {
+ pi->flags = cpu_to_le32(feat_flag |
+ CXL_SET_FEAT_FLAG_CONTINUE_DATA_TRANSFER);
+ }
+ } while (true);
+}
+
+/* FWCTL support */
+
+static inline struct cxl_memdev *fwctl_to_memdev(struct fwctl_device *fwctl_dev)
+{
+ return to_cxl_memdev(fwctl_dev->dev.parent);
+}
+
+static int cxlctl_open_uctx(struct fwctl_uctx *uctx)
+{
+ return 0;
+}
+
+static void cxlctl_close_uctx(struct fwctl_uctx *uctx)
+{
+}
+
+static struct cxl_feat_entry *
+get_support_feature_info(struct cxl_features_state *cxlfs,
+ const struct fwctl_rpc_cxl *rpc_in)
+{
+ struct cxl_feat_entry *feat;
+ const uuid_t *uuid;
+
+ if (rpc_in->op_size < sizeof(uuid))
+ return ERR_PTR(-EINVAL);
+
+ uuid = &rpc_in->set_feat_in.uuid;
+
+ for (int i = 0; i < cxlfs->entries->num_features; i++) {
+ feat = &cxlfs->entries->ent[i];
+ if (uuid_equal(uuid, &feat->uuid))
+ return feat;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs,
+ const struct fwctl_rpc_cxl *rpc_in,
+ size_t *out_len)
+{
+ const struct cxl_mbox_get_sup_feats_in *feat_in;
+ struct cxl_mbox_get_sup_feats_out *feat_out;
+ struct cxl_feat_entry *pos;
+ size_t out_size;
+ int requested;
+ u32 count;
+ u16 start;
+ int i;
+
+ if (rpc_in->op_size != sizeof(*feat_in))
+ return ERR_PTR(-EINVAL);
+
+ feat_in = &rpc_in->get_sup_feats_in;
+ count = le32_to_cpu(feat_in->count);
+ start = le16_to_cpu(feat_in->start_idx);
+ requested = count / sizeof(*pos);
+
+ /*
+ * Make sure that the total requested number of entries is not greater
+ * than the total number of supported features allowed for userspace.
+ */
+ if (start >= cxlfs->entries->num_features)
+ return ERR_PTR(-EINVAL);
+
+ requested = min_t(int, requested, cxlfs->entries->num_features - start);
+
+ out_size = sizeof(struct fwctl_rpc_cxl_out) +
+ struct_size(feat_out, ents, requested);
+
+ struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
+ kvzalloc(out_size, GFP_KERNEL);
+ if (!rpc_out)
+ return ERR_PTR(-ENOMEM);
+
+ rpc_out->size = struct_size(feat_out, ents, requested);
+ feat_out = &rpc_out->get_sup_feats_out;
+ if (requested == 0) {
+ feat_out->num_entries = cpu_to_le16(requested);
+ feat_out->supported_feats =
+ cpu_to_le16(cxlfs->entries->num_features);
+ rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
+ *out_len = out_size;
+ return no_free_ptr(rpc_out);
+ }
+
+ for (i = start, pos = &feat_out->ents[0];
+ i < cxlfs->entries->num_features; i++, pos++) {
+ if (i - start == requested)
+ break;
+
+ memcpy(pos, &cxlfs->entries->ent[i], sizeof(*pos));
+ /*
+ * If the feature is exclusive, set the set_feat_size to 0 to
+ * indicate that the feature is not changeable.
+ */
+ if (is_cxl_feature_exclusive(pos)) {
+ u32 flags;
+
+ pos->set_feat_size = 0;
+ flags = le32_to_cpu(pos->flags);
+ flags &= ~CXL_FEATURE_F_CHANGEABLE;
+ pos->flags = cpu_to_le32(flags);
+ }
+ }
+
+ feat_out->num_entries = cpu_to_le16(requested);
+ feat_out->supported_feats = cpu_to_le16(cxlfs->entries->num_features);
+ rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
+ *out_len = out_size;
+
+ return no_free_ptr(rpc_out);
+}
+
+static void *cxlctl_get_feature(struct cxl_features_state *cxlfs,
+ const struct fwctl_rpc_cxl *rpc_in,
+ size_t *out_len)
+{
+ struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
+ const struct cxl_mbox_get_feat_in *feat_in;
+ u16 offset, count, return_code;
+ size_t out_size = *out_len;
+
+ if (rpc_in->op_size != sizeof(*feat_in))
+ return ERR_PTR(-EINVAL);
+
+ feat_in = &rpc_in->get_feat_in;
+ offset = le16_to_cpu(feat_in->offset);
+ count = le16_to_cpu(feat_in->count);
+
+ if (!count)
+ return ERR_PTR(-EINVAL);
+
+ struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
+ kvzalloc(out_size, GFP_KERNEL);
+ if (!rpc_out)
+ return ERR_PTR(-ENOMEM);
+
+ out_size = cxl_get_feature(cxl_mbox, &feat_in->uuid,
+ feat_in->selection, rpc_out->payload,
+ count, offset, &return_code);
+ *out_len = sizeof(struct fwctl_rpc_cxl_out);
+ if (!out_size) {
+ rpc_out->size = 0;
+ rpc_out->retval = return_code;
+ return no_free_ptr(rpc_out);
+ }
+
+ rpc_out->size = out_size;
+ rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
+ *out_len += out_size;
+
+ return no_free_ptr(rpc_out);
+}
+
+static void *cxlctl_set_feature(struct cxl_features_state *cxlfs,
+ const struct fwctl_rpc_cxl *rpc_in,
+ size_t *out_len)
+{
+ struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
+ const struct cxl_mbox_set_feat_in *feat_in;
+ size_t out_size, data_size;
+ u16 offset, return_code;
+ u32 flags;
+ int rc;
+
+ if (rpc_in->op_size <= sizeof(feat_in->hdr))
+ return ERR_PTR(-EINVAL);
+
+ feat_in = &rpc_in->set_feat_in;
+
+ if (is_cxl_feature_exclusive_by_uuid(&feat_in->uuid))
+ return ERR_PTR(-EPERM);
+
+ offset = le16_to_cpu(feat_in->offset);
+ flags = le32_to_cpu(feat_in->flags);
+ out_size = *out_len;
+
+ struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
+ kvzalloc(out_size, GFP_KERNEL);
+ if (!rpc_out)
+ return ERR_PTR(-ENOMEM);
+
+ rpc_out->size = 0;
+
+ data_size = rpc_in->op_size - sizeof(feat_in->hdr);
+ rc = cxl_set_feature(cxl_mbox, &feat_in->uuid,
+ feat_in->version, feat_in->feat_data,
+ data_size, flags, offset, &return_code);
+ if (rc) {
+ rpc_out->retval = return_code;
+ return no_free_ptr(rpc_out);
+ }
+
+ rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
+ *out_len = sizeof(*rpc_out);
+
+ return no_free_ptr(rpc_out);
+}
+
+static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
+ const struct fwctl_rpc_cxl *rpc_in,
+ enum fwctl_rpc_scope scope)
+{
+ u16 effects, imm_mask, reset_mask;
+ struct cxl_feat_entry *feat;
+ u32 flags;
+
+ feat = get_support_feature_info(cxlfs, rpc_in);
+ if (IS_ERR(feat))
+ return false;
+
+ /* Ensure that the attribute is changeable */
+ flags = le32_to_cpu(feat->flags);
+ if (!(flags & CXL_FEATURE_F_CHANGEABLE))
+ return false;
+
+ effects = le16_to_cpu(feat->effects);
+
+ /*
+ * Reserved bits are set, rejecting since the effects is not
+ * comprehended by the driver.
+ */
+ if (effects & CXL_CMD_EFFECTS_RESERVED) {
+ dev_warn_once(cxlfs->cxlds->dev,
+ "Reserved bits set in the Feature effects field!\n");
+ return false;
+ }
+
+ /* Currently no user background command support */
+ if (effects & CXL_CMD_BACKGROUND)
+ return false;
+
+ /* Effects cause immediate change, highest security scope is needed */
+ imm_mask = CXL_CMD_CONFIG_CHANGE_IMMEDIATE |
+ CXL_CMD_DATA_CHANGE_IMMEDIATE |
+ CXL_CMD_POLICY_CHANGE_IMMEDIATE |
+ CXL_CMD_LOG_CHANGE_IMMEDIATE;
+
+ reset_mask = CXL_CMD_CONFIG_CHANGE_COLD_RESET |
+ CXL_CMD_CONFIG_CHANGE_CONV_RESET |
+ CXL_CMD_CONFIG_CHANGE_CXL_RESET;
+
+ /* If no immediate or reset effect set, The hardware has a bug */
+ if (!(effects & imm_mask) && !(effects & reset_mask))
+ return false;
+
+ /*
+ * If the Feature setting causes immediate configuration change
+ * then we need the full write permission policy.
+ */
+ if (effects & imm_mask && scope >= FWCTL_RPC_DEBUG_WRITE_FULL)
+ return true;
+
+ /*
+ * If the Feature setting only causes configuration change
+ * after a reset, then the lesser level of write permission
+ * policy is ok.
+ */
+ if (!(effects & imm_mask) && scope >= FWCTL_RPC_DEBUG_WRITE)
+ return true;
+
+ return false;
+}
+
+static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs,
+ const struct fwctl_rpc_cxl *rpc_in,
+ enum fwctl_rpc_scope scope,
+ u16 opcode)
+{
+ struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
+
+ switch (opcode) {
+ case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
+ case CXL_MBOX_OP_GET_FEATURE:
+ if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
+ return false;
+ if (scope >= FWCTL_RPC_CONFIGURATION)
+ return true;
+ return false;
+ case CXL_MBOX_OP_SET_FEATURE:
+ if (cxl_mbox->feat_cap < CXL_FEATURES_RW)
+ return false;
+ return cxlctl_validate_set_features(cxlfs, rpc_in, scope);
+ default:
+ return false;
+ }
+}
+
+static void *cxlctl_handle_commands(struct cxl_features_state *cxlfs,
+ const struct fwctl_rpc_cxl *rpc_in,
+ size_t *out_len, u16 opcode)
+{
+ switch (opcode) {
+ case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
+ return cxlctl_get_supported_features(cxlfs, rpc_in, out_len);
+ case CXL_MBOX_OP_GET_FEATURE:
+ return cxlctl_get_feature(cxlfs, rpc_in, out_len);
+ case CXL_MBOX_OP_SET_FEATURE:
+ return cxlctl_set_feature(cxlfs, rpc_in, out_len);
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
+
+static void *cxlctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
+ void *in, size_t in_len, size_t *out_len)
+{
+ struct fwctl_device *fwctl_dev = uctx->fwctl;
+ struct cxl_memdev *cxlmd = fwctl_to_memdev(fwctl_dev);
+ struct cxl_features_state *cxlfs = to_cxlfs(cxlmd->cxlds);
+ const struct fwctl_rpc_cxl *rpc_in = in;
+ u16 opcode = rpc_in->opcode;
+
+ if (!cxlctl_validate_hw_command(cxlfs, rpc_in, scope, opcode))
+ return ERR_PTR(-EINVAL);
+
+ return cxlctl_handle_commands(cxlfs, rpc_in, out_len, opcode);
+}
+
+static const struct fwctl_ops cxlctl_ops = {
+ .device_type = FWCTL_DEVICE_TYPE_CXL,
+ .uctx_size = sizeof(struct fwctl_uctx),
+ .open_uctx = cxlctl_open_uctx,
+ .close_uctx = cxlctl_close_uctx,
+ .fw_rpc = cxlctl_fw_rpc,
+};
+
+DEFINE_FREE(free_fwctl_dev, struct fwctl_device *, if (_T) fwctl_put(_T))
+
+static void free_memdev_fwctl(void *_fwctl_dev)
+{
+ struct fwctl_device *fwctl_dev = _fwctl_dev;
+
+ fwctl_unregister(fwctl_dev);
+ fwctl_put(fwctl_dev);
+}
+
+int devm_cxl_setup_fwctl(struct cxl_memdev *cxlmd)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_features_state *cxlfs;
+ int rc;
+
+ cxlfs = to_cxlfs(cxlds);
+ if (!cxlfs)
+ return -ENODEV;
+
+ /* No need to setup FWCTL if there are no user allowed features found */
+ if (!cxlfs->entries->num_user_features)
+ return -ENODEV;
+
+ struct fwctl_device *fwctl_dev __free(free_fwctl_dev) =
+ _fwctl_alloc_device(&cxlmd->dev, &cxlctl_ops, sizeof(*fwctl_dev));
+ if (!fwctl_dev)
+ return -ENOMEM;
+
+ rc = fwctl_register(fwctl_dev);
+ if (rc)
+ return rc;
+
+ return devm_add_action_or_reset(&cxlmd->dev, free_memdev_fwctl,
+ no_free_ptr(fwctl_dev));
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fwctl, "CXL");
+
+MODULE_IMPORT_NS("FWCTL");
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 548564c770c0..78c5346e3e89 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -349,40 +349,39 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
return true;
}
-static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
- struct cxl_memdev_state *mds, u16 opcode,
+static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox_cmd,
+ struct cxl_mailbox *cxl_mbox, u16 opcode,
size_t in_size, size_t out_size, u64 in_payload)
{
- struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
- *mbox = (struct cxl_mbox_cmd) {
+ *mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = opcode,
.size_in = in_size,
};
if (in_size) {
- mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
- in_size);
- if (IS_ERR(mbox->payload_in))
- return PTR_ERR(mbox->payload_in);
+ mbox_cmd->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
+ in_size);
+ if (IS_ERR(mbox_cmd->payload_in))
+ return PTR_ERR(mbox_cmd->payload_in);
- if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
- dev_dbg(mds->cxlds.dev, "%s: input payload not allowed\n",
+ if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in)) {
+ dev_dbg(cxl_mbox->host, "%s: input payload not allowed\n",
cxl_mem_opcode_to_name(opcode));
- kvfree(mbox->payload_in);
+ kvfree(mbox_cmd->payload_in);
return -EBUSY;
}
}
/* Prepare to handle a full payload for variable sized output */
if (out_size == CXL_VARIABLE_PAYLOAD)
- mbox->size_out = cxl_mbox->payload_size;
+ mbox_cmd->size_out = cxl_mbox->payload_size;
else
- mbox->size_out = out_size;
+ mbox_cmd->size_out = out_size;
- if (mbox->size_out) {
- mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
- if (!mbox->payload_out) {
- kvfree(mbox->payload_in);
+ if (mbox_cmd->size_out) {
+ mbox_cmd->payload_out = kvzalloc(mbox_cmd->size_out, GFP_KERNEL);
+ if (!mbox_cmd->payload_out) {
+ kvfree(mbox_cmd->payload_in);
return -ENOMEM;
}
}
@@ -397,10 +396,8 @@ static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
const struct cxl_send_command *send_cmd,
- struct cxl_memdev_state *mds)
+ struct cxl_mailbox *cxl_mbox)
{
- struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
-
if (send_cmd->raw.rsvd)
return -EINVAL;
@@ -415,7 +412,7 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
return -EPERM;
- dev_WARN_ONCE(mds->cxlds.dev, true, "raw command path used\n");
+ dev_WARN_ONCE(cxl_mbox->host, true, "raw command path used\n");
*mem_cmd = (struct cxl_mem_command) {
.info = {
@@ -431,7 +428,7 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
const struct cxl_send_command *send_cmd,
- struct cxl_memdev_state *mds)
+ struct cxl_mailbox *cxl_mbox)
{
struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
const struct cxl_command_info *info = &c->info;
@@ -446,11 +443,11 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
return -EINVAL;
/* Check that the command is enabled for hardware */
- if (!test_bit(info->id, mds->enabled_cmds))
+ if (!test_bit(info->id, cxl_mbox->enabled_cmds))
return -ENOTTY;
/* Check that the command is not claimed for exclusive kernel use */
- if (test_bit(info->id, mds->exclusive_cmds))
+ if (test_bit(info->id, cxl_mbox->exclusive_cmds))
return -EBUSY;
/* Check the input buffer is the expected size */
@@ -479,7 +476,7 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
/**
* cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
* @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
- * @mds: The driver data for the operation
+ * @cxl_mbox: CXL mailbox context
* @send_cmd: &struct cxl_send_command copied in from userspace.
*
* Return:
@@ -494,10 +491,9 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
* safe to send to the hardware.
*/
static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
- struct cxl_memdev_state *mds,
+ struct cxl_mailbox *cxl_mbox,
const struct cxl_send_command *send_cmd)
{
- struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mem_command mem_cmd;
int rc;
@@ -514,24 +510,23 @@ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
/* Sanitize and construct a cxl_mem_command */
if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
- rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, mds);
+ rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxl_mbox);
else
- rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, mds);
+ rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxl_mbox);
if (rc)
return rc;
/* Sanitize and construct a cxl_mbox_cmd */
- return cxl_mbox_cmd_ctor(mbox_cmd, mds, mem_cmd.opcode,
+ return cxl_mbox_cmd_ctor(mbox_cmd, cxl_mbox, mem_cmd.opcode,
mem_cmd.info.size_in, mem_cmd.info.size_out,
send_cmd->in.payload);
}
-int cxl_query_cmd(struct cxl_memdev *cxlmd,
+int cxl_query_cmd(struct cxl_mailbox *cxl_mbox,
struct cxl_mem_query_commands __user *q)
{
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
- struct device *dev = &cxlmd->dev;
+ struct device *dev = cxl_mbox->host;
struct cxl_mem_command *cmd;
u32 n_commands;
int j = 0;
@@ -552,9 +547,9 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
cxl_for_each_cmd(cmd) {
struct cxl_command_info info = cmd->info;
- if (test_bit(info.id, mds->enabled_cmds))
+ if (test_bit(info.id, cxl_mbox->enabled_cmds))
info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED;
- if (test_bit(info.id, mds->exclusive_cmds))
+ if (test_bit(info.id, cxl_mbox->exclusive_cmds))
info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE;
if (copy_to_user(&q->commands[j++], &info, sizeof(info)))
@@ -569,7 +564,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
/**
* handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
- * @mds: The driver data for the operation
+ * @cxl_mbox: The mailbox context for the operation.
* @mbox_cmd: The validated mailbox command.
* @out_payload: Pointer to userspace's output payload.
* @size_out: (Input) Max payload size to copy out.
@@ -590,13 +585,12 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
*
* See cxl_send_cmd().
*/
-static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds,
+static int handle_mailbox_cmd_from_user(struct cxl_mailbox *cxl_mbox,
struct cxl_mbox_cmd *mbox_cmd,
u64 out_payload, s32 *size_out,
u32 *retval)
{
- struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
- struct device *dev = mds->cxlds.dev;
+ struct device *dev = cxl_mbox->host;
int rc;
dev_dbg(dev,
@@ -633,10 +627,9 @@ out:
return rc;
}
-int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
+int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s)
{
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
- struct device *dev = &cxlmd->dev;
+ struct device *dev = cxl_mbox->host;
struct cxl_send_command send;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -646,11 +639,11 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
if (copy_from_user(&send, s, sizeof(send)))
return -EFAULT;
- rc = cxl_validate_cmd_from_user(&mbox_cmd, mds, &send);
+ rc = cxl_validate_cmd_from_user(&mbox_cmd, cxl_mbox, &send);
if (rc)
return rc;
- rc = handle_mailbox_cmd_from_user(mds, &mbox_cmd, send.out.payload,
+ rc = handle_mailbox_cmd_from_user(cxl_mbox, &mbox_cmd, send.out.payload,
&send.out.size, &send.retval);
if (rc)
return rc;
@@ -713,6 +706,35 @@ static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
return 0;
}
+static int check_features_opcodes(u16 opcode, int *ro_cmds, int *wr_cmds)
+{
+ switch (opcode) {
+ case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
+ case CXL_MBOX_OP_GET_FEATURE:
+ (*ro_cmds)++;
+ return 1;
+ case CXL_MBOX_OP_SET_FEATURE:
+ (*wr_cmds)++;
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* 'Get Supported Features' and 'Get Feature' */
+#define MAX_FEATURES_READ_CMDS 2
+static void set_features_cap(struct cxl_mailbox *cxl_mbox,
+ int ro_cmds, int wr_cmds)
+{
+ /* Setting up Features capability while walking the CEL */
+ if (ro_cmds == MAX_FEATURES_READ_CMDS) {
+ if (wr_cmds)
+ cxl_mbox->feat_cap = CXL_FEATURES_RW;
+ else
+ cxl_mbox->feat_cap = CXL_FEATURES_RO;
+ }
+}
+
/**
* cxl_walk_cel() - Walk through the Command Effects Log.
* @mds: The driver data for the operation
@@ -724,10 +746,11 @@ static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
*/
static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_cel_entry *cel_entry;
const int cel_entries = size / sizeof(*cel_entry);
struct device *dev = mds->cxlds.dev;
- int i;
+ int i, ro_cmds = 0, wr_cmds = 0;
cel_entry = (struct cxl_cel_entry *) cel;
@@ -737,10 +760,13 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
int enabled = 0;
if (cmd) {
- set_bit(cmd->info.id, mds->enabled_cmds);
+ set_bit(cmd->info.id, cxl_mbox->enabled_cmds);
enabled++;
}
+ enabled += check_features_opcodes(opcode, &ro_cmds,
+ &wr_cmds);
+
if (cxl_is_poison_command(opcode)) {
cxl_set_poison_cmd_enabled(&mds->poison, opcode);
enabled++;
@@ -754,6 +780,8 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
enabled ? "enabled" : "unsupported by driver");
}
+
+ set_features_cap(cxl_mbox, ro_cmds, wr_cmds);
}
static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
@@ -807,6 +835,7 @@ static const uuid_t log_uuid[] = {
*/
int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_get_supported_logs *gsl;
struct device *dev = mds->cxlds.dev;
struct cxl_mem_command *cmd;
@@ -845,7 +874,7 @@ int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
/* In case CEL was bogus, enable some default commands. */
cxl_for_each_cmd(cmd)
if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
- set_bit(cmd->info.id, mds->enabled_cmds);
+ set_bit(cmd->info.id, cxl_mbox->enabled_cmds);
/* Found the required CEL */
rc = 0;
@@ -1448,6 +1477,7 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mutex_init(&mds->event.log_lock);
mds->cxlds.dev = dev;
mds->cxlds.reg_map.host = dev;
+ mds->cxlds.cxl_mbox.host = dev;
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index ae3dfcbe8938..2e2e035abdaa 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -564,9 +564,11 @@ EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, "CXL");
void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+
down_write(&cxl_memdev_rwsem);
- bitmap_or(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
- CXL_MEM_COMMAND_ID_MAX);
+ bitmap_or(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds,
+ cmds, CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, "CXL");
@@ -579,9 +581,11 @@ EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, "CXL");
void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+
down_write(&cxl_memdev_rwsem);
- bitmap_andnot(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
- CXL_MEM_COMMAND_ID_MAX);
+ bitmap_andnot(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds,
+ cmds, CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, "CXL");
@@ -656,11 +660,14 @@ err:
static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
unsigned long arg)
{
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+
switch (cmd) {
case CXL_MEM_QUERY_COMMANDS:
- return cxl_query_cmd(cxlmd, (void __user *)arg);
+ return cxl_query_cmd(cxl_mbox, (void __user *)arg);
case CXL_MEM_SEND_COMMAND:
- return cxl_send_cmd(cxlmd, (void __user *)arg);
+ return cxl_send_cmd(cxl_mbox, (void __user *)arg);
default:
return -ENOTTY;
}
@@ -994,10 +1001,11 @@ static void cxl_remove_fw_upload(void *fwl)
int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
{
struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
struct device *dev = &cxlds->cxlmd->dev;
struct fw_upload *fwl;
- if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
+ if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, cxl_mbox->enabled_cmds))
return 0;
fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 2a25d1957ddb..dd2b7060d501 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -106,42 +106,6 @@ static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
}
-/**
- * struct cxl_mbox_cmd - A command to be submitted to hardware.
- * @opcode: (input) The command set and command submitted to hardware.
- * @payload_in: (input) Pointer to the input payload.
- * @payload_out: (output) Pointer to the output payload. Must be allocated by
- * the caller.
- * @size_in: (input) Number of bytes to load from @payload_in.
- * @size_out: (input) Max number of bytes loaded into @payload_out.
- * (output) Number of bytes generated by the device. For fixed size
- * outputs commands this is always expected to be deterministic. For
- * variable sized output commands, it tells the exact number of bytes
- * written.
- * @min_out: (input) internal command output payload size validation
- * @poll_count: (input) Number of timeouts to attempt.
- * @poll_interval_ms: (input) Time between mailbox background command polling
- * interval timeouts.
- * @return_code: (output) Error code returned from hardware.
- *
- * This is the primary mechanism used to send commands to the hardware.
- * All the fields except @payload_* correspond exactly to the fields described in
- * Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and
- * @payload_out are written to, and read from the Command Payload Registers
- * defined in CXL 2.0 8.2.8.4.8.
- */
-struct cxl_mbox_cmd {
- u16 opcode;
- void *payload_in;
- void *payload_out;
- size_t size_in;
- size_t size_out;
- size_t min_out;
- int poll_count;
- int poll_interval_ms;
- u16 return_code;
-};
-
/*
* Per CXL 3.0 Section 8.2.8.4.5.1
*/
@@ -428,6 +392,7 @@ struct cxl_dpa_perf {
* @serial: PCIe Device Serial Number
* @type: Generic Memory Class device or Vendor Specific Memory device
* @cxl_mbox: CXL mailbox context
+ * @cxlfs: CXL features context
*/
struct cxl_dev_state {
struct device *dev;
@@ -443,6 +408,9 @@ struct cxl_dev_state {
u64 serial;
enum cxl_devtype type;
struct cxl_mailbox cxl_mbox;
+#ifdef CONFIG_CXL_FEATURES
+ struct cxl_features_state *cxlfs;
+#endif
};
static inline struct cxl_dev_state *mbox_to_cxlds(struct cxl_mailbox *cxl_mbox)
@@ -461,8 +429,6 @@ static inline struct cxl_dev_state *mbox_to_cxlds(struct cxl_mailbox *cxl_mbox)
* @lsa_size: Size of Label Storage Area
* (CXL 2.0 8.2.9.5.1.1 Identify Memory Device)
* @firmware_version: Firmware version for the memory device.
- * @enabled_cmds: Hardware commands found enabled in CEL.
- * @exclusive_cmds: Commands that are kernel-internal only
* @total_bytes: sum of all possible capacities
* @volatile_only_bytes: hard volatile capacity
* @persistent_only_bytes: hard persistent capacity
@@ -485,8 +451,6 @@ struct cxl_memdev_state {
struct cxl_dev_state cxlds;
size_t lsa_size;
char firmware_version[0x10];
- DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
- DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
u64 total_bytes;
u64 volatile_only_bytes;
u64 persistent_only_bytes;
@@ -530,6 +494,9 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_LOG_CAPS = 0x0402,
CXL_MBOX_OP_CLEAR_LOG = 0x0403,
CXL_MBOX_OP_GET_SUP_LOG_SUBLIST = 0x0405,
+ CXL_MBOX_OP_GET_SUPPORTED_FEATURES = 0x0500,
+ CXL_MBOX_OP_GET_FEATURE = 0x0501,
+ CXL_MBOX_OP_SET_FEATURE = 0x0502,
CXL_MBOX_OP_IDENTIFY = 0x4000,
CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index a96e54c6259e..993fa60fe453 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -997,6 +997,10 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
+ rc = devm_cxl_setup_features(cxlds);
+ if (rc)
+ dev_dbg(&pdev->dev, "No CXL Features discovered\n");
+
cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
@@ -1009,6 +1013,10 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
+ rc = devm_cxl_setup_fwctl(cxlmd);
+ if (rc)
+ dev_dbg(&pdev->dev, "No CXL FWCTL setup\n");
+
pmu_count = cxl_count_regblock(pdev, CXL_REGLOC_RBI_PMU);
if (pmu_count < 0)
return pmu_count;
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index e2a1e4463b6f..0470d7c175f4 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -642,8 +642,7 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
if (ret)
return ret;
- hrtimer_init(&dfi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dfi->timer.function = rockchip_dfi_timer;
+ hrtimer_setup(&dfi->timer, rockchip_dfi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
switch (dfi->ddr_type) {
case ROCKCHIP_DDRTYPE_LPDDR2:
diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
index 6345062731f1..2a059ac0ed27 100644
--- a/drivers/dma-buf/dma-fence-unwrap.c
+++ b/drivers/dma-buf/dma-fence-unwrap.c
@@ -84,8 +84,8 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
struct dma_fence_unwrap *iter)
{
+ struct dma_fence *tmp, *unsignaled = NULL, **array;
struct dma_fence_array *result;
- struct dma_fence *tmp, **array;
ktime_t timestamp;
int i, j, count;
@@ -94,6 +94,8 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
for (i = 0; i < num_fences; ++i) {
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
if (!dma_fence_is_signaled(tmp)) {
+ dma_fence_put(unsignaled);
+ unsignaled = dma_fence_get(tmp);
++count;
} else {
ktime_t t = dma_fence_timestamp(tmp);
@@ -107,9 +109,16 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
/*
* If we couldn't find a pending fence just return a private signaled
* fence with the timestamp of the last signaled one.
+ *
+ * Or if there was a single unsignaled fence left we can return it
+ * directly and early since that is a major path on many workloads.
*/
if (count == 0)
return dma_fence_allocate_private_stub(timestamp);
+ else if (count == 1)
+ return unsignaled;
+
+ dma_fence_put(unsignaled);
array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
if (!array)
diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c
index f0cee984b6c7..a3be888ae2e8 100644
--- a/drivers/dma-buf/st-dma-fence-unwrap.c
+++ b/drivers/dma-buf/st-dma-fence-unwrap.c
@@ -28,7 +28,7 @@ static const struct dma_fence_ops mock_ops = {
.get_timeline_name = mock_name,
};
-static struct dma_fence *mock_fence(void)
+static struct dma_fence *__mock_fence(u64 context, u64 seqno)
{
struct mock_fence *f;
@@ -37,12 +37,16 @@ static struct dma_fence *mock_fence(void)
return NULL;
spin_lock_init(&f->lock);
- dma_fence_init(&f->base, &mock_ops, &f->lock,
- dma_fence_context_alloc(1), 1);
+ dma_fence_init(&f->base, &mock_ops, &f->lock, context, seqno);
return &f->base;
}
+static struct dma_fence *mock_fence(void)
+{
+ return __mock_fence(dma_fence_context_alloc(1), 1);
+}
+
static struct dma_fence *mock_array(unsigned int num_fences, ...)
{
struct dma_fence_array *array;
@@ -304,6 +308,177 @@ error_put_f1:
return err;
}
+static int unwrap_merge_duplicate(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = dma_fence_unwrap_merge(f1, f1);
+ if (!f2) {
+ err = -ENOMEM;
+ goto error_put_f1;
+ }
+
+ dma_fence_unwrap_for_each(fence, &iter, f2) {
+ if (fence == f1) {
+ dma_fence_put(f1);
+ f1 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
+static int unwrap_merge_seqno(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *f3, *f4;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+ u64 ctx[2];
+
+ ctx[0] = dma_fence_context_alloc(1);
+ ctx[1] = dma_fence_context_alloc(1);
+
+ f1 = __mock_fence(ctx[1], 1);
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = __mock_fence(ctx[1], 2);
+ if (!f2) {
+ err = -ENOMEM;
+ goto error_put_f1;
+ }
+
+ dma_fence_enable_sw_signaling(f2);
+
+ f3 = __mock_fence(ctx[0], 1);
+ if (!f3) {
+ err = -ENOMEM;
+ goto error_put_f2;
+ }
+
+ dma_fence_enable_sw_signaling(f3);
+
+ f4 = dma_fence_unwrap_merge(f1, f2, f3);
+ if (!f4) {
+ err = -ENOMEM;
+ goto error_put_f3;
+ }
+
+ dma_fence_unwrap_for_each(fence, &iter, f4) {
+ if (fence == f3 && f2) {
+ dma_fence_put(f3);
+ f3 = NULL;
+ } else if (fence == f2 && !f3) {
+ dma_fence_put(f2);
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f2 || f3) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f4);
+error_put_f3:
+ dma_fence_put(f3);
+error_put_f2:
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
+static int unwrap_merge_order(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *a1, *a2, *c1, *c2;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = mock_fence();
+ if (!f2) {
+ dma_fence_put(f1);
+ return -ENOMEM;
+ }
+
+ dma_fence_enable_sw_signaling(f2);
+
+ a1 = mock_array(2, f1, f2);
+ if (!a1)
+ return -ENOMEM;
+
+ c1 = mock_chain(NULL, dma_fence_get(f1));
+ if (!c1)
+ goto error_put_a1;
+
+ c2 = mock_chain(c1, dma_fence_get(f2));
+ if (!c2)
+ goto error_put_a1;
+
+ /*
+ * The fences in the chain are the same as in a1 but in oposite order,
+ * the dma_fence_merge() function should be able to handle that.
+ */
+ a2 = dma_fence_unwrap_merge(a1, c2);
+
+ dma_fence_unwrap_for_each(fence, &iter, a2) {
+ if (fence == f1) {
+ f1 = NULL;
+ if (!f2)
+ pr_err("Unexpected order!\n");
+ } else if (fence == f2) {
+ f2 = NULL;
+ if (f1)
+ pr_err("Unexpected order!\n");
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(a2);
+ return err;
+
+error_put_a1:
+ dma_fence_put(a1);
+ return -ENOMEM;
+}
+
static int unwrap_merge_complex(void *arg)
{
struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5;
@@ -327,7 +502,7 @@ static int unwrap_merge_complex(void *arg)
goto error_put_f2;
/* The resulting array has the fences in reverse */
- f4 = dma_fence_unwrap_merge(f2, f1);
+ f4 = mock_array(2, dma_fence_get(f2), dma_fence_get(f1));
if (!f4)
goto error_put_f3;
@@ -367,6 +542,87 @@ error_put_f1:
return err;
}
+static int unwrap_merge_complex_seqno(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5, *f6, *f7;
+ struct dma_fence_unwrap iter;
+ int err = -ENOMEM;
+ u64 ctx[2];
+
+ ctx[0] = dma_fence_context_alloc(1);
+ ctx[1] = dma_fence_context_alloc(1);
+
+ f1 = __mock_fence(ctx[0], 2);
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = __mock_fence(ctx[1], 1);
+ if (!f2)
+ goto error_put_f1;
+
+ dma_fence_enable_sw_signaling(f2);
+
+ f3 = __mock_fence(ctx[0], 1);
+ if (!f3)
+ goto error_put_f2;
+
+ dma_fence_enable_sw_signaling(f3);
+
+ f4 = __mock_fence(ctx[1], 2);
+ if (!f4)
+ goto error_put_f3;
+
+ dma_fence_enable_sw_signaling(f4);
+
+ f5 = mock_array(2, dma_fence_get(f1), dma_fence_get(f2));
+ if (!f5)
+ goto error_put_f4;
+
+ f6 = mock_array(2, dma_fence_get(f3), dma_fence_get(f4));
+ if (!f6)
+ goto error_put_f5;
+
+ f7 = dma_fence_unwrap_merge(f5, f6);
+ if (!f7)
+ goto error_put_f6;
+
+ err = 0;
+ dma_fence_unwrap_for_each(fence, &iter, f7) {
+ if (fence == f1 && f4) {
+ dma_fence_put(f1);
+ f1 = NULL;
+ } else if (fence == f4 && !f1) {
+ dma_fence_put(f4);
+ f4 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f4) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f7);
+error_put_f6:
+ dma_fence_put(f6);
+error_put_f5:
+ dma_fence_put(f5);
+error_put_f4:
+ dma_fence_put(f4);
+error_put_f3:
+ dma_fence_put(f3);
+error_put_f2:
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
int dma_fence_unwrap(void)
{
static const struct subtest tests[] = {
@@ -375,7 +631,11 @@ int dma_fence_unwrap(void)
SUBTEST(unwrap_chain),
SUBTEST(unwrap_chain_array),
SUBTEST(unwrap_merge),
+ SUBTEST(unwrap_merge_duplicate),
+ SUBTEST(unwrap_merge_seqno),
+ SUBTEST(unwrap_merge_order),
SUBTEST(unwrap_merge_complex),
+ SUBTEST(unwrap_merge_complex_seqno),
};
return subtests(tests, NULL);
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 32019dc33cca..20bdc52f63a5 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -443,8 +443,11 @@ static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
struct dpll_pin_properties *dst)
{
+ if (WARN_ON(src->freq_supported && !src->freq_supported_num))
+ return -EINVAL;
+
memcpy(dst, src, sizeof(*dst));
- if (src->freq_supported && src->freq_supported_num) {
+ if (src->freq_supported) {
size_t freq_size = src->freq_supported_num *
sizeof(*src->freq_supported);
dst->freq_supported = kmemdup(src->freq_supported,
@@ -505,7 +508,7 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
&dpll_pin_xa_id, GFP_KERNEL);
- if (ret)
+ if (ret < 0)
goto err_xa_alloc;
return pin;
err_xa_alloc:
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 2051a7c944a5..19ad3c3b675d 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -75,6 +75,34 @@ config EDAC_GHES
In doubt, say 'Y'.
+config EDAC_SCRUB
+ bool "EDAC scrub feature"
+ help
+ The EDAC scrub feature is optional and is designed to control the
+ memory scrubbers in the system. The common sysfs scrub interface
+ abstracts the control of various arbitrary scrubbing functionalities
+ into a unified set of functions.
+ Say 'y/n' to enable/disable EDAC scrub feature.
+
+config EDAC_ECS
+ bool "EDAC ECS (Error Check Scrub) feature"
+ help
+ The EDAC ECS feature is optional and is designed to control on-die
+ error check scrub (e.g., DDR5 ECS) in the system. The common sysfs
+ ECS interface abstracts the control of various ECS functionalities
+ into a unified set of functions.
+ Say 'y/n' to enable/disable EDAC ECS feature.
+
+config EDAC_MEM_REPAIR
+ bool "EDAC memory repair feature"
+ help
+ The EDAC memory repair feature is optional and is designed to control
+ the memory devices with repair features, such as Post Package Repair
+ (PPR), memory sparing etc. The common sysfs memory repair interface
+ abstracts the control of various memory repair functionalities into
+ a unified set of functions.
+ Say 'y/n' to enable/disable EDAC memory repair feature.
+
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64)"
depends on AMD_NB && EDAC_DECODE_MCE
@@ -168,7 +196,7 @@ config EDAC_I3200
config EDAC_IE31200
tristate "Intel e312xx"
- depends on PCI && X86
+ depends on PCI && X86 && X86_MCE_INTEL
help
Support for error detection and correction on the Intel
E3-1200 based DRAM controllers.
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 89789ba8275f..a8f2d8f6c894 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -12,6 +12,9 @@ edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o
edac_core-y += edac_module.o edac_device_sysfs.o wq.o
edac_core-$(CONFIG_EDAC_DEBUG) += debugfs.o
+edac_core-$(CONFIG_EDAC_SCRUB) += scrub.o
+edac_core-$(CONFIG_EDAC_ECS) += ecs.o
+edac_core-$(CONFIG_EDAC_MEM_REPAIR) += mem_repair.o
ifdef CONFIG_PCI
edac_core-y += edac_pci.o edac_pci_sysfs.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 8414ceb43e4a..90f0eb7cc5b9 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/ras.h>
+#include <linux/string_choices.h>
#include "amd64_edac.h"
#include <asm/amd_nb.h>
#include <asm/amd_node.h>
@@ -1171,22 +1172,21 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
}
- edac_dbg(1, "All DIMMs support ECC:%s\n",
- (dclr & BIT(19)) ? "yes" : "no");
+ edac_dbg(1, "All DIMMs support ECC: %s\n", str_yes_no(dclr & BIT(19)));
edac_dbg(1, " PAR/ERR parity: %s\n",
- (dclr & BIT(8)) ? "enabled" : "disabled");
+ str_enabled_disabled(dclr & BIT(8)));
if (pvt->fam == 0x10)
edac_dbg(1, " DCT 128bit mode width: %s\n",
(dclr & BIT(11)) ? "128b" : "64b");
edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
- (dclr & BIT(12)) ? "yes" : "no",
- (dclr & BIT(13)) ? "yes" : "no",
- (dclr & BIT(14)) ? "yes" : "no",
- (dclr & BIT(15)) ? "yes" : "no");
+ str_yes_no(dclr & BIT(12)),
+ str_yes_no(dclr & BIT(13)),
+ str_yes_no(dclr & BIT(14)),
+ str_yes_no(dclr & BIT(15)));
}
#define CS_EVEN_PRIMARY BIT(0)
@@ -1353,14 +1353,14 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
- i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
- (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
+ i, str_yes_no(umc->umc_cap_hi & BIT(30)),
+ str_yes_no(umc->umc_cap_hi & BIT(31)));
edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
- i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
+ i, str_yes_no(umc->umc_cfg & BIT(12)));
edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
- i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
+ i, str_yes_no(umc->dimm_cfg & BIT(6)));
edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
- i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
+ i, str_yes_no(umc->dimm_cfg & BIT(7)));
umc_debug_display_dimm_sizes(pvt, i);
}
@@ -1371,11 +1371,11 @@ static void dct_dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
edac_dbg(1, " NB two channel DRAM capable: %s\n",
- (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
+ str_yes_no(pvt->nbcap & NBCAP_DCT_DUAL));
edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
+ str_yes_no(pvt->nbcap & NBCAP_SECDED),
+ str_yes_no(pvt->nbcap & NBCAP_CHIPKILL));
debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
@@ -1398,7 +1398,7 @@ static void dct_dump_misc_regs(struct amd64_pvt *pvt)
if (!dct_ganging_enabled(pvt))
debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
- edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
+ edac_dbg(1, " DramHoleValid: %s\n", str_yes_no(dhar_valid(pvt)));
amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
}
@@ -2027,15 +2027,15 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
if (!dct_ganging_enabled(pvt))
edac_dbg(0, " Address range split per DCT: %s\n",
- (dct_high_range_enabled(pvt) ? "yes" : "no"));
+ str_yes_no(dct_high_range_enabled(pvt)));
edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
- (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
- (dct_memory_cleared(pvt) ? "yes" : "no"));
+ str_enabled_disabled(dct_data_intlv_enabled(pvt)),
+ str_yes_no(dct_memory_cleared(pvt)));
edac_dbg(0, " channel interleave: %s, "
"interleave bits selector: 0x%x\n",
- (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
+ str_enabled_disabled(dct_interleave_enabled(pvt)),
dct_sel_interleave_addr(pvt));
}
@@ -3208,8 +3208,7 @@ static bool nb_mce_bank_enabled_on_node(u16 nid)
nbe = reg->l & MSR_MCGCTL_NBE;
edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, reg->q,
- (nbe ? "enabled" : "disabled"));
+ cpu, reg->q, str_enabled_disabled(nbe));
if (!nbe)
goto out;
@@ -3353,12 +3352,9 @@ static bool dct_ecc_enabled(struct amd64_pvt *pvt)
edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, nid);
- edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, str_enabled_disabled(ecc_en));
- if (!ecc_en || !nb_mce_en)
- return false;
- else
- return true;
+ return ecc_en && nb_mce_en;
}
static bool umc_ecc_enabled(struct amd64_pvt *pvt)
@@ -3378,7 +3374,7 @@ static bool umc_ecc_enabled(struct amd64_pvt *pvt)
}
}
- edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, str_enabled_disabled(ecc_en));
return ecc_en;
}
diff --git a/drivers/edac/debugfs.c b/drivers/edac/debugfs.c
index 4804332d9946..8195fc9c9354 100644
--- a/drivers/edac/debugfs.c
+++ b/drivers/edac/debugfs.c
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/string_choices.h>
+
#include "edac_module.h"
static struct dentry *edac_debugfs;
@@ -22,7 +25,7 @@ static ssize_t edac_fake_inject_write(struct file *file,
"Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
errcount,
(type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
- errcount > 1 ? "s" : "",
+ str_plural(errcount),
mci->fake_inject_layer[0],
mci->fake_inject_layer[1],
mci->fake_inject_layer[2]
diff --git a/drivers/edac/ecs.c b/drivers/edac/ecs.c
new file mode 100755
index 000000000000..1d51838a60c1
--- /dev/null
+++ b/drivers/edac/ecs.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic ECS driver is designed to support control of on-die error
+ * check scrub (e.g., DDR5 ECS). The common sysfs ECS interface abstracts
+ * the control of various ECS functionalities into a unified set of functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+#define EDAC_ECS_FRU_NAME "ecs_fru"
+
+enum edac_ecs_attributes {
+ ECS_LOG_ENTRY_TYPE,
+ ECS_MODE,
+ ECS_RESET,
+ ECS_THRESHOLD,
+ ECS_MAX_ATTRS
+};
+
+struct edac_ecs_dev_attr {
+ struct device_attribute dev_attr;
+ int fru_id;
+};
+
+struct edac_ecs_fru_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_ecs_dev_attr dev_attr[ECS_MAX_ATTRS];
+ struct attribute *ecs_attrs[ECS_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+struct edac_ecs_context {
+ u16 num_media_frus;
+ struct edac_ecs_fru_context *fru_ctxs;
+};
+
+#define TO_ECS_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_ecs_dev_attr, dev_attr)
+
+#define EDAC_ECS_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct edac_ecs_dev_attr *dev_attr = TO_ECS_DEV_ATTR(attr); \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->ecs.private, \
+ dev_attr->fru_id, &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+EDAC_ECS_ATTR_SHOW(log_entry_type, get_log_entry_type, u32, "%u\n")
+EDAC_ECS_ATTR_SHOW(mode, get_mode, u32, "%u\n")
+EDAC_ECS_ATTR_SHOW(threshold, get_threshold, u32, "%u\n")
+
+#define EDAC_ECS_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ struct edac_ecs_dev_attr *dev_attr = TO_ECS_DEV_ATTR(attr); \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->ecs.private, \
+ dev_attr->fru_id, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+EDAC_ECS_ATTR_STORE(log_entry_type, set_log_entry_type, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(mode, set_mode, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(reset, reset, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(threshold, set_threshold, unsigned long, kstrtoul)
+
+static umode_t ecs_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops;
+
+ switch (attr_id) {
+ case ECS_LOG_ENTRY_TYPE:
+ if (ops->get_log_entry_type) {
+ if (ops->set_log_entry_type)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case ECS_MODE:
+ if (ops->get_mode) {
+ if (ops->set_mode)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case ECS_RESET:
+ if (ops->reset)
+ return a->mode;
+ break;
+ case ECS_THRESHOLD:
+ if (ops->get_threshold) {
+ if (ops->set_threshold)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define EDAC_ECS_ATTR_RO(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .fru_id = _fru_id })
+
+#define EDAC_ECS_ATTR_WO(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .fru_id = _fru_id })
+
+#define EDAC_ECS_ATTR_RW(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .fru_id = _fru_id })
+
+static int ecs_create_desc(struct device *ecs_dev, const struct attribute_group **attr_groups,
+ u16 num_media_frus)
+{
+ struct edac_ecs_context *ecs_ctx;
+ u32 fru;
+
+ ecs_ctx = devm_kzalloc(ecs_dev, sizeof(*ecs_ctx), GFP_KERNEL);
+ if (!ecs_ctx)
+ return -ENOMEM;
+
+ ecs_ctx->num_media_frus = num_media_frus;
+ ecs_ctx->fru_ctxs = devm_kcalloc(ecs_dev, num_media_frus,
+ sizeof(*ecs_ctx->fru_ctxs),
+ GFP_KERNEL);
+ if (!ecs_ctx->fru_ctxs)
+ return -ENOMEM;
+
+ for (fru = 0; fru < num_media_frus; fru++) {
+ struct edac_ecs_fru_context *fru_ctx = &ecs_ctx->fru_ctxs[fru];
+ struct attribute_group *group = &fru_ctx->group;
+ int i;
+
+ fru_ctx->dev_attr[ECS_LOG_ENTRY_TYPE] = EDAC_ECS_ATTR_RW(log_entry_type, fru);
+ fru_ctx->dev_attr[ECS_MODE] = EDAC_ECS_ATTR_RW(mode, fru);
+ fru_ctx->dev_attr[ECS_RESET] = EDAC_ECS_ATTR_WO(reset, fru);
+ fru_ctx->dev_attr[ECS_THRESHOLD] = EDAC_ECS_ATTR_RW(threshold, fru);
+
+ for (i = 0; i < ECS_MAX_ATTRS; i++)
+ fru_ctx->ecs_attrs[i] = &fru_ctx->dev_attr[i].dev_attr.attr;
+
+ sprintf(fru_ctx->name, "%s%d", EDAC_ECS_FRU_NAME, fru);
+ group->name = fru_ctx->name;
+ group->attrs = fru_ctx->ecs_attrs;
+ group->is_visible = ecs_attr_visible;
+
+ attr_groups[fru] = group;
+ }
+
+ return 0;
+}
+
+/**
+ * edac_ecs_get_desc - get EDAC ECS descriptors
+ * @ecs_dev: client device, supports ECS feature
+ * @attr_groups: pointer to attribute group container
+ * @num_media_frus: number of media FRUs in the device
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_ecs_get_desc(struct device *ecs_dev,
+ const struct attribute_group **attr_groups, u16 num_media_frus)
+{
+ if (!ecs_dev || !attr_groups || !num_media_frus)
+ return -EINVAL;
+
+ return ecs_create_desc(ecs_dev, attr_groups, num_media_frus);
+}
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 621dc2a5d034..0734909b08a4 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -570,3 +570,188 @@ void edac_device_handle_ue_count(struct edac_device_ctl_info *edac_dev,
block ? block->name : "N/A", count, msg);
}
EXPORT_SYMBOL_GPL(edac_device_handle_ue_count);
+
+static void edac_dev_release(struct device *dev)
+{
+ struct edac_dev_feat_ctx *ctx = container_of(dev, struct edac_dev_feat_ctx, dev);
+
+ kfree(ctx->mem_repair);
+ kfree(ctx->scrub);
+ kfree(ctx->dev.groups);
+ kfree(ctx);
+}
+
+static const struct device_type edac_dev_type = {
+ .name = "edac_dev",
+ .release = edac_dev_release,
+};
+
+static void edac_dev_unreg(void *data)
+{
+ device_unregister(data);
+}
+
+/**
+ * edac_dev_register - register device for RAS features with EDAC
+ * @parent: parent device.
+ * @name: name for the folder in the /sys/bus/edac/devices/,
+ * which is derived from the parent device.
+ * For e.g. /sys/bus/edac/devices/cxl_mem0/
+ * @private: parent driver's data to store in the context if any.
+ * @num_features: number of RAS features to register.
+ * @ras_features: list of RAS features to register.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ *
+ */
+int edac_dev_register(struct device *parent, char *name,
+ void *private, int num_features,
+ const struct edac_dev_feature *ras_features)
+{
+ const struct attribute_group **ras_attr_groups;
+ struct edac_dev_data *dev_data;
+ struct edac_dev_feat_ctx *ctx;
+ int mem_repair_cnt = 0;
+ int attr_gcnt = 0;
+ int ret = -ENOMEM;
+ int scrub_cnt = 0;
+ int feat;
+
+ if (!parent || !name || !num_features || !ras_features)
+ return -EINVAL;
+
+ /* Double parse to make space for attributes */
+ for (feat = 0; feat < num_features; feat++) {
+ switch (ras_features[feat].ft_type) {
+ case RAS_FEAT_SCRUB:
+ attr_gcnt++;
+ scrub_cnt++;
+ break;
+ case RAS_FEAT_ECS:
+ attr_gcnt += ras_features[feat].ecs_info.num_media_frus;
+ break;
+ case RAS_FEAT_MEM_REPAIR:
+ attr_gcnt++;
+ mem_repair_cnt++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ras_attr_groups = kcalloc(attr_gcnt + 1, sizeof(*ras_attr_groups), GFP_KERNEL);
+ if (!ras_attr_groups)
+ goto ctx_free;
+
+ if (scrub_cnt) {
+ ctx->scrub = kcalloc(scrub_cnt, sizeof(*ctx->scrub), GFP_KERNEL);
+ if (!ctx->scrub)
+ goto groups_free;
+ }
+
+ if (mem_repair_cnt) {
+ ctx->mem_repair = kcalloc(mem_repair_cnt, sizeof(*ctx->mem_repair), GFP_KERNEL);
+ if (!ctx->mem_repair)
+ goto data_mem_free;
+ }
+
+ attr_gcnt = 0;
+ scrub_cnt = 0;
+ mem_repair_cnt = 0;
+ for (feat = 0; feat < num_features; feat++, ras_features++) {
+ switch (ras_features->ft_type) {
+ case RAS_FEAT_SCRUB:
+ if (!ras_features->scrub_ops || scrub_cnt != ras_features->instance) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->scrub[scrub_cnt];
+ dev_data->instance = scrub_cnt;
+ dev_data->scrub_ops = ras_features->scrub_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_scrub_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->instance);
+ if (ret)
+ goto data_mem_free;
+
+ scrub_cnt++;
+ attr_gcnt++;
+ break;
+ case RAS_FEAT_ECS:
+ if (!ras_features->ecs_ops) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->ecs;
+ dev_data->ecs_ops = ras_features->ecs_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_ecs_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->ecs_info.num_media_frus);
+ if (ret)
+ goto data_mem_free;
+
+ attr_gcnt += ras_features->ecs_info.num_media_frus;
+ break;
+ case RAS_FEAT_MEM_REPAIR:
+ if (!ras_features->mem_repair_ops ||
+ mem_repair_cnt != ras_features->instance) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->mem_repair[mem_repair_cnt];
+ dev_data->instance = mem_repair_cnt;
+ dev_data->mem_repair_ops = ras_features->mem_repair_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_mem_repair_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->instance);
+ if (ret)
+ goto data_mem_free;
+
+ mem_repair_cnt++;
+ attr_gcnt++;
+ break;
+ default:
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+ }
+
+ ctx->dev.parent = parent;
+ ctx->dev.bus = edac_get_sysfs_subsys();
+ ctx->dev.type = &edac_dev_type;
+ ctx->dev.groups = ras_attr_groups;
+ ctx->private = private;
+ dev_set_drvdata(&ctx->dev, ctx);
+
+ ret = dev_set_name(&ctx->dev, "%s", name);
+ if (ret)
+ goto data_mem_free;
+
+ ret = device_register(&ctx->dev);
+ if (ret) {
+ put_device(&ctx->dev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, edac_dev_unreg, &ctx->dev);
+
+data_mem_free:
+ kfree(ctx->mem_repair);
+ kfree(ctx->scrub);
+groups_free:
+ kfree(ras_attr_groups);
+ctx_free:
+ kfree(ctx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(edac_dev_register);
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index f45d849d3f15..355a977019e9 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -751,6 +751,8 @@ static int i10nm_get_ddr_munits(void)
continue;
} else {
d->imc[lmc].mdev = mdev;
+ if (res_cfg->type == SPR)
+ skx_set_mc_mapping(d, i, lmc);
lmc++;
}
}
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 49b4499269fb..b5cf25905b05 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -899,7 +900,7 @@ static void decode_mtr(int slot_row, u16 mtr)
edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+ str_enabled_disabled(MTR_DIMMS_ETHROTTLE(mtr)));
edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
edac_dbg(2, "\t\tNUMRANK: %s\n",
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 61adaa872ba7..69068f8d0cad 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -620,7 +621,7 @@ static int decode_mtr(struct i7300_pvt *pvt,
edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+ str_enabled_disabled(MTR_DIMMS_ETHROTTLE(mtr)));
edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
edac_dbg(2, "\t\tNUMRANK: %s\n",
@@ -871,9 +872,9 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci)
IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
edac_dbg(0, "Error detection is %s\n",
- IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ str_enabled_disabled(IS_ECC_ENABLED(pvt->mc_settings)));
edac_dbg(0, "Retry is %s\n",
- IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ str_enabled_disabled(IS_RETRY_ENABLED(pvt->mc_settings)));
/* Get Memory Interleave Range registers */
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 4fc16922dc1a..204834149579 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -51,6 +51,7 @@
#include <linux/edac.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <asm/mce.h>
#include "edac_module.h"
#define EDAC_MOD_STR "ie31200_edac"
@@ -84,44 +85,23 @@
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_9 0x3ec6
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10 0x3eca
-/* Test if HB is for Skylake or later. */
-#define DEVICE_ID_SKYLAKE_OR_LATER(did) \
- (((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_8) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_9) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_10) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_11) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_12) || \
- (((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \
- PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
-
-#define IE31200_DIMMS 4
-#define IE31200_RANKS 8
-#define IE31200_RANKS_PER_CHANNEL 4
+/* Raptor Lake-S */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1 0xa703
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2 0x4640
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3 0x4630
+
+#define IE31200_RANKS_PER_CHANNEL 8
#define IE31200_DIMMS_PER_CHANNEL 2
#define IE31200_CHANNELS 2
+#define IE31200_IMC_NUM 2
/* Intel IE31200 register addresses - device 0 function 0 - DRAM Controller */
#define IE31200_MCHBAR_LOW 0x48
#define IE31200_MCHBAR_HIGH 0x4c
-#define IE31200_MCHBAR_MASK GENMASK_ULL(38, 15)
-#define IE31200_MMR_WINDOW_SIZE BIT(15)
/*
* Error Status Register (16b)
*
- * 15 reserved
- * 14 Isochronous TBWRR Run Behind FIFO Full
- * (ITCV)
- * 13 Isochronous TBWRR Run Behind FIFO Put
- * (ITSTV)
- * 12 reserved
- * 11 MCH Thermal Sensor Event
- * for SMI/SCI/SERR (GTSE)
- * 10 reserved
- * 9 LOCK to non-DRAM Memory Flag (LCKF)
- * 8 reserved
- * 7 DRAM Throttle Flag (DTF)
- * 6:2 reserved
* 1 Multi-bit DRAM ECC Error Flag (DMERR)
* 0 Single-bit DRAM ECC Error Flag (DSERR)
*/
@@ -130,68 +110,60 @@
#define IE31200_ERRSTS_CE BIT(0)
#define IE31200_ERRSTS_BITS (IE31200_ERRSTS_UE | IE31200_ERRSTS_CE)
-/*
- * Channel 0 ECC Error Log (64b)
- *
- * 63:48 Error Column Address (ERRCOL)
- * 47:32 Error Row Address (ERRROW)
- * 31:29 Error Bank Address (ERRBANK)
- * 28:27 Error Rank Address (ERRRANK)
- * 26:24 reserved
- * 23:16 Error Syndrome (ERRSYND)
- * 15: 2 reserved
- * 1 Multiple Bit Error Status (MERRSTS)
- * 0 Correctable Error Status (CERRSTS)
- */
-
-#define IE31200_C0ECCERRLOG 0x40c8
-#define IE31200_C1ECCERRLOG 0x44c8
-#define IE31200_C0ECCERRLOG_SKL 0x4048
-#define IE31200_C1ECCERRLOG_SKL 0x4448
-#define IE31200_ECCERRLOG_CE BIT(0)
-#define IE31200_ECCERRLOG_UE BIT(1)
-#define IE31200_ECCERRLOG_RANK_BITS GENMASK_ULL(28, 27)
-#define IE31200_ECCERRLOG_RANK_SHIFT 27
-#define IE31200_ECCERRLOG_SYNDROME_BITS GENMASK_ULL(23, 16)
-#define IE31200_ECCERRLOG_SYNDROME_SHIFT 16
-
-#define IE31200_ECCERRLOG_SYNDROME(log) \
- ((log & IE31200_ECCERRLOG_SYNDROME_BITS) >> \
- IE31200_ECCERRLOG_SYNDROME_SHIFT)
-
#define IE31200_CAPID0 0xe4
#define IE31200_CAPID0_PDCD BIT(4)
#define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1)
-#define IE31200_MAD_DIMM_0_OFFSET 0x5004
-#define IE31200_MAD_DIMM_0_OFFSET_SKL 0x500C
-#define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0)
-#define IE31200_MAD_DIMM_A_RANK BIT(17)
-#define IE31200_MAD_DIMM_A_RANK_SHIFT 17
-#define IE31200_MAD_DIMM_A_RANK_SKL BIT(10)
-#define IE31200_MAD_DIMM_A_RANK_SKL_SHIFT 10
-#define IE31200_MAD_DIMM_A_WIDTH BIT(19)
-#define IE31200_MAD_DIMM_A_WIDTH_SHIFT 19
-#define IE31200_MAD_DIMM_A_WIDTH_SKL GENMASK_ULL(9, 8)
-#define IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT 8
-
-/* Skylake reports 1GB increments, everything else is 256MB */
-#define IE31200_PAGES(n, skl) \
- (n << (28 + (2 * skl) - PAGE_SHIFT))
+/* Non-constant mask variant of FIELD_GET() */
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
static int nr_channels;
static struct pci_dev *mci_pdev;
static int ie31200_registered = 1;
+struct res_config {
+ enum mem_type mtype;
+ bool cmci;
+ int imc_num;
+ /* Host MMIO configuration register */
+ u64 reg_mchbar_mask;
+ u64 reg_mchbar_window_size;
+ /* ECC error log register */
+ u64 reg_eccerrlog_offset[IE31200_CHANNELS];
+ u64 reg_eccerrlog_ce_mask;
+ u64 reg_eccerrlog_ce_ovfl_mask;
+ u64 reg_eccerrlog_ue_mask;
+ u64 reg_eccerrlog_ue_ovfl_mask;
+ u64 reg_eccerrlog_rank_mask;
+ u64 reg_eccerrlog_syndrome_mask;
+ /* MSR to clear ECC error log register */
+ u32 msr_clear_eccerrlog_offset;
+ /* DIMM characteristics register */
+ u64 reg_mad_dimm_size_granularity;
+ u64 reg_mad_dimm_offset[IE31200_CHANNELS];
+ u32 reg_mad_dimm_size_mask[IE31200_DIMMS_PER_CHANNEL];
+ u32 reg_mad_dimm_rank_mask[IE31200_DIMMS_PER_CHANNEL];
+ u32 reg_mad_dimm_width_mask[IE31200_DIMMS_PER_CHANNEL];
+};
+
struct ie31200_priv {
void __iomem *window;
void __iomem *c0errlog;
void __iomem *c1errlog;
+ struct res_config *cfg;
+ struct mem_ctl_info *mci;
+ struct pci_dev *pdev;
+ struct device dev;
};
+static struct ie31200_pvt {
+ struct ie31200_priv *priv[IE31200_IMC_NUM];
+} ie31200_pvt;
+
enum ie31200_chips {
IE31200 = 0,
+ IE31200_1 = 1,
};
struct ie31200_dev_info {
@@ -202,18 +174,22 @@ struct ie31200_error_info {
u16 errsts;
u16 errsts2;
u64 eccerrlog[IE31200_CHANNELS];
+ u64 erraddr;
};
static const struct ie31200_dev_info ie31200_devs[] = {
[IE31200] = {
.ctl_name = "IE31200"
},
+ [IE31200_1] = {
+ .ctl_name = "IE31200_1"
+ },
};
struct dimm_data {
- u8 size; /* in multiples of 256MB, except Skylake is 1GB */
- u8 dual_rank : 1,
- x16_width : 2; /* 0 means x8 width */
+ u64 size; /* in bytes */
+ u8 ranks;
+ enum dev_type dtype;
};
static int how_many_channels(struct pci_dev *pdev)
@@ -251,29 +227,54 @@ static bool ecc_capable(struct pci_dev *pdev)
return true;
}
-static int eccerrlog_row(u64 log)
-{
- return ((log & IE31200_ECCERRLOG_RANK_BITS) >>
- IE31200_ECCERRLOG_RANK_SHIFT);
-}
+#define mci_to_pci_dev(mci) (((struct ie31200_priv *)(mci)->pvt_info)->pdev)
static void ie31200_clear_error_info(struct mem_ctl_info *mci)
{
+ struct ie31200_priv *priv = mci->pvt_info;
+ struct res_config *cfg = priv->cfg;
+
+ /*
+ * The PCI ERRSTS register is deprecated. Write the MSR to clear
+ * the ECC error log registers in all memory controllers.
+ */
+ if (cfg->msr_clear_eccerrlog_offset) {
+ if (wrmsr_safe(cfg->msr_clear_eccerrlog_offset,
+ cfg->reg_eccerrlog_ce_mask |
+ cfg->reg_eccerrlog_ce_ovfl_mask |
+ cfg->reg_eccerrlog_ue_mask |
+ cfg->reg_eccerrlog_ue_ovfl_mask, 0) < 0)
+ ie31200_printk(KERN_ERR, "Failed to wrmsr.\n");
+
+ return;
+ }
+
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
- pci_write_bits16(to_pci_dev(mci->pdev), IE31200_ERRSTS,
+ pci_write_bits16(mci_to_pci_dev(mci), IE31200_ERRSTS,
IE31200_ERRSTS_BITS, IE31200_ERRSTS_BITS);
}
static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
struct ie31200_error_info *info)
{
- struct pci_dev *pdev;
+ struct pci_dev *pdev = mci_to_pci_dev(mci);
struct ie31200_priv *priv = mci->pvt_info;
- pdev = to_pci_dev(mci->pdev);
+ /*
+ * The PCI ERRSTS register is deprecated, directly read the
+ * MMIO-mapped ECC error log registers.
+ */
+ if (priv->cfg->msr_clear_eccerrlog_offset) {
+ info->eccerrlog[0] = lo_hi_readq(priv->c0errlog);
+ if (nr_channels == 2)
+ info->eccerrlog[1] = lo_hi_readq(priv->c1errlog);
+
+ ie31200_clear_error_info(mci);
+ return;
+ }
/*
* This is a mess because there is no atomic way to read all the
@@ -309,46 +310,56 @@ static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
static void ie31200_process_error_info(struct mem_ctl_info *mci,
struct ie31200_error_info *info)
{
+ struct ie31200_priv *priv = mci->pvt_info;
+ struct res_config *cfg = priv->cfg;
int channel;
u64 log;
- if (!(info->errsts & IE31200_ERRSTS_BITS))
- return;
+ if (!cfg->msr_clear_eccerrlog_offset) {
+ if (!(info->errsts & IE31200_ERRSTS_BITS))
+ return;
- if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
- -1, -1, -1, "UE overwrote CE", "");
- info->errsts = info->errsts2;
+ if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "");
+ info->errsts = info->errsts2;
+ }
}
for (channel = 0; channel < nr_channels; channel++) {
log = info->eccerrlog[channel];
- if (log & IE31200_ECCERRLOG_UE) {
+ if (log & cfg->reg_eccerrlog_ue_mask) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
- 0, 0, 0,
- eccerrlog_row(log),
+ info->erraddr >> PAGE_SHIFT, 0, 0,
+ field_get(cfg->reg_eccerrlog_rank_mask, log),
channel, -1,
"ie31200 UE", "");
- } else if (log & IE31200_ECCERRLOG_CE) {
+ } else if (log & cfg->reg_eccerrlog_ce_mask) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
- 0, 0,
- IE31200_ECCERRLOG_SYNDROME(log),
- eccerrlog_row(log),
+ info->erraddr >> PAGE_SHIFT, 0,
+ field_get(cfg->reg_eccerrlog_syndrome_mask, log),
+ field_get(cfg->reg_eccerrlog_rank_mask, log),
channel, -1,
"ie31200 CE", "");
}
}
}
-static void ie31200_check(struct mem_ctl_info *mci)
+static void __ie31200_check(struct mem_ctl_info *mci, struct mce *mce)
{
struct ie31200_error_info info;
+ info.erraddr = mce ? mce->addr : 0;
ie31200_get_and_clear_error_info(mci, &info);
ie31200_process_error_info(mci, &info);
}
-static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
+static void ie31200_check(struct mem_ctl_info *mci)
+{
+ __ie31200_check(mci, NULL);
+}
+
+static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev, struct res_config *cfg, int mc)
{
union {
u64 mchbar;
@@ -361,7 +372,8 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
pci_read_config_dword(pdev, IE31200_MCHBAR_LOW, &u.mchbar_low);
pci_read_config_dword(pdev, IE31200_MCHBAR_HIGH, &u.mchbar_high);
- u.mchbar &= IE31200_MCHBAR_MASK;
+ u.mchbar &= cfg->reg_mchbar_mask;
+ u.mchbar += cfg->reg_mchbar_window_size * mc;
if (u.mchbar != (resource_size_t)u.mchbar) {
ie31200_printk(KERN_ERR, "mmio space beyond accessible range (0x%llx)\n",
@@ -369,7 +381,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap(u.mchbar, IE31200_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, cfg->reg_mchbar_window_size);
if (!window)
ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
@@ -377,155 +389,108 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return window;
}
-static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
- int chan)
+static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int dimm,
+ struct res_config *cfg)
{
- dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE;
- dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0;
- dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >>
- (IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4)));
+ dd->size = field_get(cfg->reg_mad_dimm_size_mask[dimm], addr_decode) * cfg->reg_mad_dimm_size_granularity;
+ dd->ranks = field_get(cfg->reg_mad_dimm_rank_mask[dimm], addr_decode) + 1;
+ dd->dtype = field_get(cfg->reg_mad_dimm_width_mask[dimm], addr_decode) + DEV_X8;
}
-static void __populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
- int chan)
+static void ie31200_get_dimm_config(struct mem_ctl_info *mci, void __iomem *window,
+ struct res_config *cfg, int mc)
{
- dd->size = (addr_decode >> (chan << 3)) & IE31200_MAD_DIMM_SIZE;
- dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK << chan)) ? 1 : 0;
- dd->x16_width = (addr_decode & (IE31200_MAD_DIMM_A_WIDTH << chan)) ? 1 : 0;
-}
+ struct dimm_data dimm_info;
+ struct dimm_info *dimm;
+ unsigned long nr_pages;
+ u32 addr_decode;
+ int i, j, k;
-static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan,
- bool skl)
-{
- if (skl)
- __skl_populate_dimm_info(dd, addr_decode, chan);
- else
- __populate_dimm_info(dd, addr_decode, chan);
-}
+ for (i = 0; i < IE31200_CHANNELS; i++) {
+ addr_decode = readl(window + cfg->reg_mad_dimm_offset[i]);
+ edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
+ for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
+ populate_dimm_info(&dimm_info, addr_decode, j, cfg);
+ edac_dbg(0, "mc: %d, channel: %d, dimm: %d, size: %lld MiB, ranks: %d, DRAM chip type: %d\n",
+ mc, i, j, dimm_info.size >> 20,
+ dimm_info.ranks,
+ dimm_info.dtype);
-static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
+ nr_pages = MiB_TO_PAGES(dimm_info.size >> 20);
+ if (nr_pages == 0)
+ continue;
+
+ nr_pages = nr_pages / dimm_info.ranks;
+ for (k = 0; k < dimm_info.ranks; k++) {
+ dimm = edac_get_dimm(mci, (j * dimm_info.ranks) + k, i, 0);
+ dimm->nr_pages = nr_pages;
+ edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
+ dimm->grain = 8; /* just a guess */
+ dimm->mtype = cfg->mtype;
+ dimm->dtype = dimm_info.dtype;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
+ }
+ }
+}
+
+static int ie31200_register_mci(struct pci_dev *pdev, struct res_config *cfg, int mc)
{
- int i, j, ret;
- struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
- struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
- void __iomem *window;
struct ie31200_priv *priv;
- u32 addr_decode, mad_offset;
-
- /*
- * Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit
- * this logic when adding new CPU support.
- */
- bool skl = DEVICE_ID_SKYLAKE_OR_LATER(pdev->device);
-
- edac_dbg(0, "MC:\n");
-
- if (!ecc_capable(pdev)) {
- ie31200_printk(KERN_INFO, "No ECC support\n");
- return -ENODEV;
- }
+ struct mem_ctl_info *mci;
+ void __iomem *window;
+ int ret;
nr_channels = how_many_channels(pdev);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = IE31200_DIMMS;
+ layers[0].size = IE31200_RANKS_PER_CHANNEL;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_channels;
layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers,
sizeof(struct ie31200_priv));
if (!mci)
return -ENOMEM;
- window = ie31200_map_mchbar(pdev);
+ window = ie31200_map_mchbar(pdev, cfg, mc);
if (!window) {
ret = -ENODEV;
goto fail_free;
}
edac_dbg(3, "MC: init mci\n");
- mci->pdev = &pdev->dev;
- if (skl)
- mci->mtype_cap = MEM_FLAG_DDR4;
- else
- mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->mtype_cap = BIT(cfg->mtype);
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->ctl_name = ie31200_devs[dev_idx].ctl_name;
+ mci->ctl_name = ie31200_devs[mc].ctl_name;
mci->dev_name = pci_name(pdev);
- mci->edac_check = ie31200_check;
+ mci->edac_check = cfg->cmci ? NULL : ie31200_check;
mci->ctl_page_to_phys = NULL;
priv = mci->pvt_info;
priv->window = window;
- if (skl) {
- priv->c0errlog = window + IE31200_C0ECCERRLOG_SKL;
- priv->c1errlog = window + IE31200_C1ECCERRLOG_SKL;
- mad_offset = IE31200_MAD_DIMM_0_OFFSET_SKL;
- } else {
- priv->c0errlog = window + IE31200_C0ECCERRLOG;
- priv->c1errlog = window + IE31200_C1ECCERRLOG;
- mad_offset = IE31200_MAD_DIMM_0_OFFSET;
- }
-
- /* populate DIMM info */
- for (i = 0; i < IE31200_CHANNELS; i++) {
- addr_decode = readl(window + mad_offset +
- (i * 4));
- edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
- for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
- populate_dimm_info(&dimm_info[i][j], addr_decode, j,
- skl);
- edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
- dimm_info[i][j].size,
- dimm_info[i][j].dual_rank,
- dimm_info[i][j].x16_width);
- }
- }
-
+ priv->c0errlog = window + cfg->reg_eccerrlog_offset[0];
+ priv->c1errlog = window + cfg->reg_eccerrlog_offset[1];
+ priv->cfg = cfg;
+ priv->mci = mci;
+ priv->pdev = pdev;
+ device_initialize(&priv->dev);
/*
- * The dram rank boundary (DRB) reg values are boundary addresses
- * for each DRAM rank with a granularity of 64MB. DRB regs are
- * cumulative; the last one will contain the total memory
- * contained in all ranks.
+ * The EDAC core uses mci->pdev (pointer to the structure device)
+ * as the memory controller ID. The SoCs attach one or more memory
+ * controllers to a single pci_dev (a single pci_dev->dev can
+ * correspond to multiple memory controllers).
+ *
+ * To make mci->pdev unique, assign pci_dev->dev to mci->pdev
+ * for the first memory controller and assign a unique priv->dev
+ * to mci->pdev for each additional memory controller.
*/
- for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) {
- for (j = 0; j < IE31200_CHANNELS; j++) {
- struct dimm_info *dimm;
- unsigned long nr_pages;
-
- nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
- if (nr_pages == 0)
- continue;
-
- if (dimm_info[j][i].dual_rank) {
- nr_pages = nr_pages / 2;
- dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0);
- dimm->nr_pages = nr_pages;
- edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
- dimm->grain = 8; /* just a guess */
- if (skl)
- dimm->mtype = MEM_DDR4;
- else
- dimm->mtype = MEM_DDR3;
- dimm->dtype = DEV_UNKNOWN;
- dimm->edac_mode = EDAC_UNKNOWN;
- }
- dimm = edac_get_dimm(mci, i * 2, j, 0);
- dimm->nr_pages = nr_pages;
- edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
- dimm->grain = 8; /* same guess */
- if (skl)
- dimm->mtype = MEM_DDR4;
- else
- dimm->mtype = MEM_DDR3;
- dimm->dtype = DEV_UNKNOWN;
- dimm->edac_mode = EDAC_UNKNOWN;
- }
- }
+ mci->pdev = mc ? &priv->dev : &pdev->dev;
+ ie31200_get_dimm_config(mci, window, cfg, mc);
ie31200_clear_error_info(mci);
if (edac_mc_add_mc(mci)) {
@@ -534,16 +499,115 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
goto fail_unmap;
}
- /* get this far and it's successful */
- edac_dbg(3, "MC: success\n");
+ ie31200_pvt.priv[mc] = priv;
return 0;
-
fail_unmap:
iounmap(window);
-
fail_free:
edac_mc_free(mci);
+ return ret;
+}
+
+static void mce_check(struct mce *mce)
+{
+ struct ie31200_priv *priv;
+ int i;
+
+ for (i = 0; i < IE31200_IMC_NUM; i++) {
+ priv = ie31200_pvt.priv[i];
+ if (!priv)
+ continue;
+
+ __ie31200_check(priv->mci, mce);
+ }
+}
+
+static int mce_handler(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ char *type;
+
+ if (mce->kflags & MCE_HANDLED_CEC)
+ return NOTIFY_DONE;
+
+ /*
+ * Ignore unless this is a memory related error.
+ * Don't check MCI_STATUS_ADDRV since it's not set on some CPUs.
+ */
+ if ((mce->status & 0xefff) >> 7 != 1)
+ return NOTIFY_DONE;
+
+ type = mce->mcgstatus & MCG_STATUS_MCIP ? "Exception" : "Event";
+
+ edac_dbg(0, "CPU %d: Machine Check %s: 0x%llx Bank %d: 0x%llx\n",
+ mce->extcpu, type, mce->mcgstatus,
+ mce->bank, mce->status);
+ edac_dbg(0, "TSC 0x%llx\n", mce->tsc);
+ edac_dbg(0, "ADDR 0x%llx\n", mce->addr);
+ edac_dbg(0, "MISC 0x%llx\n", mce->misc);
+ edac_dbg(0, "PROCESSOR %u:0x%x TIME %llu SOCKET %u APIC 0x%x\n",
+ mce->cpuvendor, mce->cpuid, mce->time,
+ mce->socketid, mce->apicid);
+
+ mce_check(mce);
+ mce->kflags |= MCE_HANDLED_EDAC;
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ie31200_mce_dec = {
+ .notifier_call = mce_handler,
+ .priority = MCE_PRIO_EDAC,
+};
+
+static void ie31200_unregister_mcis(void)
+{
+ struct ie31200_priv *priv;
+ struct mem_ctl_info *mci;
+ int i;
+
+ for (i = 0; i < IE31200_IMC_NUM; i++) {
+ priv = ie31200_pvt.priv[i];
+ if (!priv)
+ continue;
+ mci = priv->mci;
+ edac_mc_del_mc(mci->pdev);
+ iounmap(priv->window);
+ edac_mc_free(mci);
+ }
+}
+
+static int ie31200_probe1(struct pci_dev *pdev, struct res_config *cfg)
+{
+ int i, ret;
+
+ edac_dbg(0, "MC:\n");
+
+ if (!ecc_capable(pdev)) {
+ ie31200_printk(KERN_INFO, "No ECC support\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < cfg->imc_num; i++) {
+ ret = ie31200_register_mci(pdev, cfg, i);
+ if (ret)
+ goto fail_register;
+ }
+
+ if (cfg->cmci) {
+ mce_register_decode_chain(&ie31200_mce_dec);
+ edac_op_state = EDAC_OPSTATE_INT;
+ } else {
+ edac_op_state = EDAC_OPSTATE_POLL;
+ }
+
+ /* get this far and it's successful. */
+ edac_dbg(3, "MC: success\n");
+ return 0;
+
+fail_register:
+ ie31200_unregister_mcis();
return ret;
}
@@ -555,7 +619,7 @@ static int ie31200_init_one(struct pci_dev *pdev,
edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
- rc = ie31200_probe1(pdev, ent->driver_data);
+ rc = ie31200_probe1(pdev, (struct res_config *)ent->driver_data);
if (rc == 0 && !mci_pdev)
mci_pdev = pci_dev_get(pdev);
@@ -564,43 +628,112 @@ static int ie31200_init_one(struct pci_dev *pdev,
static void ie31200_remove_one(struct pci_dev *pdev)
{
- struct mem_ctl_info *mci;
- struct ie31200_priv *priv;
+ struct ie31200_priv *priv = ie31200_pvt.priv[0];
edac_dbg(0, "\n");
pci_dev_put(mci_pdev);
mci_pdev = NULL;
- mci = edac_mc_del_mc(&pdev->dev);
- if (!mci)
- return;
- priv = mci->pvt_info;
- iounmap(priv->window);
- edac_mc_free(mci);
+ if (priv->cfg->cmci)
+ mce_unregister_decode_chain(&ie31200_mce_dec);
+ ie31200_unregister_mcis();
}
+static struct res_config snb_cfg = {
+ .mtype = MEM_DDR3,
+ .imc_num = 1,
+ .reg_mchbar_mask = GENMASK_ULL(38, 15),
+ .reg_mchbar_window_size = BIT_ULL(15),
+ .reg_eccerrlog_offset[0] = 0x40c8,
+ .reg_eccerrlog_offset[1] = 0x44c8,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ue_mask = BIT_ULL(1),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .reg_mad_dimm_size_granularity = BIT_ULL(28),
+ .reg_mad_dimm_offset[0] = 0x5004,
+ .reg_mad_dimm_offset[1] = 0x5008,
+ .reg_mad_dimm_size_mask[0] = GENMASK(7, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(15, 8),
+ .reg_mad_dimm_rank_mask[0] = BIT(17),
+ .reg_mad_dimm_rank_mask[1] = BIT(18),
+ .reg_mad_dimm_width_mask[0] = BIT(19),
+ .reg_mad_dimm_width_mask[1] = BIT(20),
+};
+
+static struct res_config skl_cfg = {
+ .mtype = MEM_DDR4,
+ .imc_num = 1,
+ .reg_mchbar_mask = GENMASK_ULL(38, 15),
+ .reg_mchbar_window_size = BIT_ULL(15),
+ .reg_eccerrlog_offset[0] = 0x4048,
+ .reg_eccerrlog_offset[1] = 0x4448,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ue_mask = BIT_ULL(1),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .reg_mad_dimm_size_granularity = BIT_ULL(30),
+ .reg_mad_dimm_offset[0] = 0x500c,
+ .reg_mad_dimm_offset[1] = 0x5010,
+ .reg_mad_dimm_size_mask[0] = GENMASK(5, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(21, 16),
+ .reg_mad_dimm_rank_mask[0] = BIT(10),
+ .reg_mad_dimm_rank_mask[1] = BIT(26),
+ .reg_mad_dimm_width_mask[0] = GENMASK(9, 8),
+ .reg_mad_dimm_width_mask[1] = GENMASK(25, 24),
+};
+
+struct res_config rpl_s_cfg = {
+ .mtype = MEM_DDR5,
+ .cmci = true,
+ .imc_num = 2,
+ .reg_mchbar_mask = GENMASK_ULL(41, 17),
+ .reg_mchbar_window_size = BIT_ULL(16),
+ .reg_eccerrlog_offset[0] = 0xe048,
+ .reg_eccerrlog_offset[1] = 0xe848,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ce_ovfl_mask = BIT_ULL(1),
+ .reg_eccerrlog_ue_mask = BIT_ULL(2),
+ .reg_eccerrlog_ue_ovfl_mask = BIT_ULL(3),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .msr_clear_eccerrlog_offset = 0x791,
+ .reg_mad_dimm_offset[0] = 0xd80c,
+ .reg_mad_dimm_offset[1] = 0xd810,
+ .reg_mad_dimm_size_granularity = BIT_ULL(29),
+ .reg_mad_dimm_size_mask[0] = GENMASK(6, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(22, 16),
+ .reg_mad_dimm_rank_mask[0] = GENMASK(10, 9),
+ .reg_mad_dimm_rank_mask[1] = GENMASK(27, 26),
+ .reg_mad_dimm_width_mask[0] = GENMASK(8, 7),
+ .reg_mad_dimm_width_mask[1] = GENMASK(25, 24),
+};
+
static const struct pci_device_id ie31200_pci_tbl[] = {
- { PCI_VEND_DEV(INTEL, IE31200_HB_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_11), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_12), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_1), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_2), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_3), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_4), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_5), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_6), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_7), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_8), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_9), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_10), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_11), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_12), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_1), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_2), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_3), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_4), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_5), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_6), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_7), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_8), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_9), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3), (kernel_ulong_t)&rpl_s_cfg},
{ 0, } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
@@ -617,12 +750,10 @@ static int __init ie31200_init(void)
int pci_rc, i;
edac_dbg(3, "MC:\n");
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
pci_rc = pci_register_driver(&ie31200_driver);
if (pci_rc < 0)
- goto fail0;
+ return pci_rc;
if (!mci_pdev) {
ie31200_registered = 0;
@@ -633,11 +764,13 @@ static int __init ie31200_init(void)
if (mci_pdev)
break;
}
+
if (!mci_pdev) {
edac_dbg(0, "ie31200 pci_get_device fail\n");
pci_rc = -ENODEV;
- goto fail1;
+ goto fail0;
}
+
pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
if (pci_rc < 0) {
edac_dbg(0, "ie31200 init fail\n");
@@ -645,12 +778,12 @@ static int __init ie31200_init(void)
goto fail1;
}
}
- return 0;
+ return 0;
fail1:
- pci_unregister_driver(&ie31200_driver);
-fail0:
pci_dev_put(mci_pdev);
+fail0:
+ pci_unregister_driver(&ie31200_driver);
return pci_rc;
}
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index fdf3a84fe698..5807517ee32d 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -125,7 +125,7 @@
#define MEM_SLICE_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6)
#define MEM_SLICE_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26)
-static struct res_config {
+static const struct res_config {
bool machine_check;
int num_imc;
u32 imc_base;
@@ -472,7 +472,7 @@ static u64 rpl_p_err_addr(u64 ecclog)
return ECC_ERROR_LOG_ADDR45(ecclog);
}
-static struct res_config ehl_cfg = {
+static const struct res_config ehl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xdc00,
@@ -482,7 +482,7 @@ static struct res_config ehl_cfg = {
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
-static struct res_config icl_cfg = {
+static const struct res_config icl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xd800,
@@ -492,7 +492,7 @@ static struct res_config icl_cfg = {
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
-static struct res_config tgl_cfg = {
+static const struct res_config tgl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0x5000,
@@ -506,7 +506,7 @@ static struct res_config tgl_cfg = {
.err_addr_to_imc_addr = tgl_err_addr_to_imc_addr,
};
-static struct res_config adl_cfg = {
+static const struct res_config adl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -517,7 +517,7 @@ static struct res_config adl_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config adl_n_cfg = {
+static const struct res_config adl_n_cfg = {
.machine_check = true,
.num_imc = 1,
.imc_base = 0xd800,
@@ -528,7 +528,7 @@ static struct res_config adl_n_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config rpl_p_cfg = {
+static const struct res_config rpl_p_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -540,7 +540,7 @@ static struct res_config rpl_p_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config mtl_ps_cfg = {
+static const struct res_config mtl_ps_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -551,7 +551,7 @@ static struct res_config mtl_ps_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config mtl_p_cfg = {
+static const struct res_config mtl_p_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -785,13 +785,22 @@ static u64 ecclog_read_and_clear(struct igen6_imc *imc)
{
u64 ecclog = readq(imc->window + ECC_ERROR_LOG_OFFSET);
- if (ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)) {
- /* Clear CE/UE bits by writing 1s */
- writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET);
- return ecclog;
- }
+ /*
+ * Quirk: The ECC_ERROR_LOG register of certain SoCs may contain
+ * the invalid value ~0. This will result in a flood of invalid
+ * error reports in polling mode. Skip it.
+ */
+ if (ecclog == ~0)
+ return 0;
- return 0;
+ /* Neither a CE nor a UE. Skip it.*/
+ if (!(ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)))
+ return 0;
+
+ /* Clear CE/UE bits by writing 1s */
+ writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET);
+
+ return ecclog;
}
static void errsts_clear(struct igen6_imc *imc)
@@ -1374,7 +1383,7 @@ static void unregister_err_handler(void)
unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
}
-static void opstate_set(struct res_config *cfg, const struct pci_device_id *ent)
+static void opstate_set(const struct res_config *cfg, const struct pci_device_id *ent)
{
/*
* Quirk: Certain SoCs' error reporting interrupts don't work.
diff --git a/drivers/edac/mem_repair.c b/drivers/edac/mem_repair.c
new file mode 100755
index 000000000000..3b1a845457b0
--- /dev/null
+++ b/drivers/edac/mem_repair.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic EDAC memory repair driver is designed to control the memory
+ * devices with memory repair features, such as Post Package Repair (PPR),
+ * memory sparing etc. The common sysfs memory repair interface abstracts
+ * the control of various arbitrary memory repair functionalities into a
+ * unified set of functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+enum edac_mem_repair_attributes {
+ MR_TYPE,
+ MR_PERSIST_MODE,
+ MR_SAFE_IN_USE,
+ MR_HPA,
+ MR_MIN_HPA,
+ MR_MAX_HPA,
+ MR_DPA,
+ MR_MIN_DPA,
+ MR_MAX_DPA,
+ MR_NIBBLE_MASK,
+ MR_BANK_GROUP,
+ MR_BANK,
+ MR_RANK,
+ MR_ROW,
+ MR_COLUMN,
+ MR_CHANNEL,
+ MR_SUB_CHANNEL,
+ MEM_DO_REPAIR,
+ MR_MAX_ATTRS
+};
+
+struct edac_mem_repair_dev_attr {
+ struct device_attribute dev_attr;
+ u8 instance;
+};
+
+struct edac_mem_repair_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_mem_repair_dev_attr mem_repair_dev_attr[MR_MAX_ATTRS];
+ struct attribute *mem_repair_attrs[MR_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+#define TO_MR_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_mem_repair_dev_attr, dev_attr)
+
+#define MR_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = \
+ ctx->mem_repair[inst].mem_repair_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, \
+ &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+MR_ATTR_SHOW(repair_type, get_repair_type, const char *, "%s\n")
+MR_ATTR_SHOW(persist_mode, get_persist_mode, bool, "%u\n")
+MR_ATTR_SHOW(repair_safe_when_in_use, get_repair_safe_when_in_use, bool, "%u\n")
+MR_ATTR_SHOW(hpa, get_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(min_hpa, get_min_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(max_hpa, get_max_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(dpa, get_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(min_dpa, get_min_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(max_dpa, get_max_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(nibble_mask, get_nibble_mask, u32, "0x%x\n")
+MR_ATTR_SHOW(bank_group, get_bank_group, u32, "%u\n")
+MR_ATTR_SHOW(bank, get_bank, u32, "%u\n")
+MR_ATTR_SHOW(rank, get_rank, u32, "%u\n")
+MR_ATTR_SHOW(row, get_row, u32, "0x%x\n")
+MR_ATTR_SHOW(column, get_column, u32, "%u\n")
+MR_ATTR_SHOW(channel, get_channel, u32, "%u\n")
+MR_ATTR_SHOW(sub_channel, get_sub_channel, u32, "%u\n")
+
+#define MR_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = \
+ ctx->mem_repair[inst].mem_repair_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, \
+ data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+MR_ATTR_STORE(persist_mode, set_persist_mode, unsigned long, kstrtoul)
+MR_ATTR_STORE(hpa, set_hpa, u64, kstrtou64)
+MR_ATTR_STORE(dpa, set_dpa, u64, kstrtou64)
+MR_ATTR_STORE(nibble_mask, set_nibble_mask, unsigned long, kstrtoul)
+MR_ATTR_STORE(bank_group, set_bank_group, unsigned long, kstrtoul)
+MR_ATTR_STORE(bank, set_bank, unsigned long, kstrtoul)
+MR_ATTR_STORE(rank, set_rank, unsigned long, kstrtoul)
+MR_ATTR_STORE(row, set_row, unsigned long, kstrtoul)
+MR_ATTR_STORE(column, set_column, unsigned long, kstrtoul)
+MR_ATTR_STORE(channel, set_channel, unsigned long, kstrtoul)
+MR_ATTR_STORE(sub_channel, set_sub_channel, unsigned long, kstrtoul)
+
+#define MR_DO_OP(attrib, cb) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = ctx->mem_repair[inst].mem_repair_ops; \
+ unsigned long data; \
+ int ret; \
+ \
+ ret = kstrtoul(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+MR_DO_OP(repair, do_repair)
+
+static umode_t mem_repair_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct device_attribute *dev_attr = container_of(a, struct device_attribute, attr);
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ u8 inst = TO_MR_DEV_ATTR(dev_attr)->instance;
+ const struct edac_mem_repair_ops *ops = ctx->mem_repair[inst].mem_repair_ops;
+
+ switch (attr_id) {
+ case MR_TYPE:
+ if (ops->get_repair_type)
+ return a->mode;
+ break;
+ case MR_PERSIST_MODE:
+ if (ops->get_persist_mode) {
+ if (ops->set_persist_mode)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_SAFE_IN_USE:
+ if (ops->get_repair_safe_when_in_use)
+ return a->mode;
+ break;
+ case MR_HPA:
+ if (ops->get_hpa) {
+ if (ops->set_hpa)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_MIN_HPA:
+ if (ops->get_min_hpa)
+ return a->mode;
+ break;
+ case MR_MAX_HPA:
+ if (ops->get_max_hpa)
+ return a->mode;
+ break;
+ case MR_DPA:
+ if (ops->get_dpa) {
+ if (ops->set_dpa)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_MIN_DPA:
+ if (ops->get_min_dpa)
+ return a->mode;
+ break;
+ case MR_MAX_DPA:
+ if (ops->get_max_dpa)
+ return a->mode;
+ break;
+ case MR_NIBBLE_MASK:
+ if (ops->get_nibble_mask) {
+ if (ops->set_nibble_mask)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_BANK_GROUP:
+ if (ops->get_bank_group) {
+ if (ops->set_bank_group)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_BANK:
+ if (ops->get_bank) {
+ if (ops->set_bank)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_RANK:
+ if (ops->get_rank) {
+ if (ops->set_rank)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_ROW:
+ if (ops->get_row) {
+ if (ops->set_row)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_COLUMN:
+ if (ops->get_column) {
+ if (ops->set_column)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_CHANNEL:
+ if (ops->get_channel) {
+ if (ops->set_channel)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_SUB_CHANNEL:
+ if (ops->get_sub_channel) {
+ if (ops->set_sub_channel)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MEM_DO_REPAIR:
+ if (ops->do_repair)
+ return a->mode;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define MR_ATTR_RO(_name, _instance) \
+ ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .instance = _instance })
+
+#define MR_ATTR_WO(_name, _instance) \
+ ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .instance = _instance })
+
+#define MR_ATTR_RW(_name, _instance) \
+ ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .instance = _instance })
+
+static int mem_repair_create_desc(struct device *dev,
+ const struct attribute_group **attr_groups,
+ u8 instance)
+{
+ struct edac_mem_repair_context *ctx;
+ struct attribute_group *group;
+ int i;
+ struct edac_mem_repair_dev_attr dev_attr[] = {
+ [MR_TYPE] = MR_ATTR_RO(repair_type, instance),
+ [MR_PERSIST_MODE] = MR_ATTR_RW(persist_mode, instance),
+ [MR_SAFE_IN_USE] = MR_ATTR_RO(repair_safe_when_in_use, instance),
+ [MR_HPA] = MR_ATTR_RW(hpa, instance),
+ [MR_MIN_HPA] = MR_ATTR_RO(min_hpa, instance),
+ [MR_MAX_HPA] = MR_ATTR_RO(max_hpa, instance),
+ [MR_DPA] = MR_ATTR_RW(dpa, instance),
+ [MR_MIN_DPA] = MR_ATTR_RO(min_dpa, instance),
+ [MR_MAX_DPA] = MR_ATTR_RO(max_dpa, instance),
+ [MR_NIBBLE_MASK] = MR_ATTR_RW(nibble_mask, instance),
+ [MR_BANK_GROUP] = MR_ATTR_RW(bank_group, instance),
+ [MR_BANK] = MR_ATTR_RW(bank, instance),
+ [MR_RANK] = MR_ATTR_RW(rank, instance),
+ [MR_ROW] = MR_ATTR_RW(row, instance),
+ [MR_COLUMN] = MR_ATTR_RW(column, instance),
+ [MR_CHANNEL] = MR_ATTR_RW(channel, instance),
+ [MR_SUB_CHANNEL] = MR_ATTR_RW(sub_channel, instance),
+ [MEM_DO_REPAIR] = MR_ATTR_WO(repair, instance)
+ };
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < MR_MAX_ATTRS; i++) {
+ memcpy(&ctx->mem_repair_dev_attr[i],
+ &dev_attr[i], sizeof(dev_attr[i]));
+ ctx->mem_repair_attrs[i] =
+ &ctx->mem_repair_dev_attr[i].dev_attr.attr;
+ }
+
+ sprintf(ctx->name, "%s%d", "mem_repair", instance);
+ group = &ctx->group;
+ group->name = ctx->name;
+ group->attrs = ctx->mem_repair_attrs;
+ group->is_visible = mem_repair_attr_visible;
+ attr_groups[0] = group;
+
+ return 0;
+}
+
+/**
+ * edac_mem_repair_get_desc - get EDAC memory repair descriptors
+ * @dev: client device with memory repair feature
+ * @attr_groups: pointer to attribute group container
+ * @instance: device's memory repair instance number.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_mem_repair_get_desc(struct device *dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ if (!dev || !attr_groups)
+ return -EINVAL;
+
+ return mem_repair_create_desc(dev, attr_groups, instance);
+}
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index f93f2f2b1cf2..af14c8a3279f 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -372,7 +372,7 @@ static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
struct b_cr_asym_mem_region1_mchbar *as1,
struct b_cr_asym_2way_mem_region_mchbar *as2way)
{
- const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
+ static const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
int mask = 0;
if (as2way->asym_2way_interleave_enable)
@@ -489,7 +489,7 @@ static int dnv_get_registers(void)
*/
static int get_registers(void)
{
- const int intlv[] = { 10, 11, 12, 12 };
+ static const int intlv[] = { 10, 11, 12, 12 };
if (RD_REG(&tolud, b_cr_tolud_pci) ||
RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
diff --git a/drivers/edac/scrub.c b/drivers/edac/scrub.c
new file mode 100755
index 000000000000..e421d3ebd959
--- /dev/null
+++ b/drivers/edac/scrub.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic EDAC scrub driver controls the memory scrubbers in the
+ * system. The common sysfs scrub interface abstracts the control of
+ * various arbitrary scrubbing functionalities into a unified set of
+ * functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+enum edac_scrub_attributes {
+ SCRUB_ADDRESS,
+ SCRUB_SIZE,
+ SCRUB_ENABLE_BACKGROUND,
+ SCRUB_MIN_CYCLE_DURATION,
+ SCRUB_MAX_CYCLE_DURATION,
+ SCRUB_CUR_CYCLE_DURATION,
+ SCRUB_MAX_ATTRS
+};
+
+struct edac_scrub_dev_attr {
+ struct device_attribute dev_attr;
+ u8 instance;
+};
+
+struct edac_scrub_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_scrub_dev_attr scrub_dev_attr[SCRUB_MAX_ATTRS];
+ struct attribute *scrub_attrs[SCRUB_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+#define TO_SCRUB_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_scrub_dev_attr, dev_attr)
+
+#define EDAC_SCRUB_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ u8 inst = TO_SCRUB_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+EDAC_SCRUB_ATTR_SHOW(addr, read_addr, u64, "0x%llx\n")
+EDAC_SCRUB_ATTR_SHOW(size, read_size, u64, "0x%llx\n")
+EDAC_SCRUB_ATTR_SHOW(enable_background, get_enabled_bg, bool, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(min_cycle_duration, get_min_cycle, u32, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(max_cycle_duration, get_max_cycle, u32, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(current_cycle_duration, get_cycle_duration, u32, "%u\n")
+
+#define EDAC_SCRUB_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_SCRUB_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+EDAC_SCRUB_ATTR_STORE(addr, write_addr, u64, kstrtou64)
+EDAC_SCRUB_ATTR_STORE(size, write_size, u64, kstrtou64)
+EDAC_SCRUB_ATTR_STORE(enable_background, set_enabled_bg, unsigned long, kstrtoul)
+EDAC_SCRUB_ATTR_STORE(current_cycle_duration, set_cycle_duration, unsigned long, kstrtoul)
+
+static umode_t scrub_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct device_attribute *dev_attr = container_of(a, struct device_attribute, attr);
+ u8 inst = TO_SCRUB_DEV_ATTR(dev_attr)->instance;
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops;
+
+ switch (attr_id) {
+ case SCRUB_ADDRESS:
+ if (ops->read_addr) {
+ if (ops->write_addr)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_SIZE:
+ if (ops->read_size) {
+ if (ops->write_size)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_ENABLE_BACKGROUND:
+ if (ops->get_enabled_bg) {
+ if (ops->set_enabled_bg)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_MIN_CYCLE_DURATION:
+ if (ops->get_min_cycle)
+ return a->mode;
+ break;
+ case SCRUB_MAX_CYCLE_DURATION:
+ if (ops->get_max_cycle)
+ return a->mode;
+ break;
+ case SCRUB_CUR_CYCLE_DURATION:
+ if (ops->get_cycle_duration) {
+ if (ops->set_cycle_duration)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define EDAC_SCRUB_ATTR_RO(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .instance = _instance })
+
+#define EDAC_SCRUB_ATTR_WO(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .instance = _instance })
+
+#define EDAC_SCRUB_ATTR_RW(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .instance = _instance })
+
+static int scrub_create_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ struct edac_scrub_context *scrub_ctx;
+ struct attribute_group *group;
+ int i;
+ struct edac_scrub_dev_attr dev_attr[] = {
+ [SCRUB_ADDRESS] = EDAC_SCRUB_ATTR_RW(addr, instance),
+ [SCRUB_SIZE] = EDAC_SCRUB_ATTR_RW(size, instance),
+ [SCRUB_ENABLE_BACKGROUND] = EDAC_SCRUB_ATTR_RW(enable_background, instance),
+ [SCRUB_MIN_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RO(min_cycle_duration, instance),
+ [SCRUB_MAX_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RO(max_cycle_duration, instance),
+ [SCRUB_CUR_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RW(current_cycle_duration, instance)
+ };
+
+ scrub_ctx = devm_kzalloc(scrub_dev, sizeof(*scrub_ctx), GFP_KERNEL);
+ if (!scrub_ctx)
+ return -ENOMEM;
+
+ group = &scrub_ctx->group;
+ for (i = 0; i < SCRUB_MAX_ATTRS; i++) {
+ memcpy(&scrub_ctx->scrub_dev_attr[i], &dev_attr[i], sizeof(dev_attr[i]));
+ scrub_ctx->scrub_attrs[i] = &scrub_ctx->scrub_dev_attr[i].dev_attr.attr;
+ }
+ sprintf(scrub_ctx->name, "%s%d", "scrub", instance);
+ group->name = scrub_ctx->name;
+ group->attrs = scrub_ctx->scrub_attrs;
+ group->is_visible = scrub_attr_visible;
+
+ attr_groups[0] = group;
+
+ return 0;
+}
+
+/**
+ * edac_scrub_get_desc - get EDAC scrub descriptors
+ * @scrub_dev: client device, with scrub support
+ * @attr_groups: pointer to attribute group container
+ * @instance: device's scrub instance number.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_scrub_get_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ if (!scrub_dev || !attr_groups)
+ return -EINVAL;
+
+ return scrub_create_desc(scrub_dev, attr_groups, instance);
+}
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index f7bd930e058f..fa5b442b1844 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -121,6 +121,35 @@ void skx_adxl_put(void)
}
EXPORT_SYMBOL_GPL(skx_adxl_put);
+static void skx_init_mc_mapping(struct skx_dev *d)
+{
+ /*
+ * By default, the BIOS presents all memory controllers within each
+ * socket to the EDAC driver. The physical indices are the same as
+ * the logical indices of the memory controllers enumerated by the
+ * EDAC driver.
+ */
+ for (int i = 0; i < NUM_IMC; i++)
+ d->mc_mapping[i] = i;
+}
+
+void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc)
+{
+ edac_dbg(0, "Set the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, lmc);
+
+ d->mc_mapping[pmc] = lmc;
+}
+EXPORT_SYMBOL_GPL(skx_set_mc_mapping);
+
+static u8 skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
+{
+ edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, d->mc_mapping[pmc]);
+
+ return d->mc_mapping[pmc];
+}
+
static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
{
struct skx_dev *d;
@@ -188,6 +217,8 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
return false;
}
+ res->imc = skx_get_mc_mapping(d, res->imc);
+
for (i = 0; i < adxl_component_count; i++) {
if (adxl_values[i] == ~0x0ull)
continue;
@@ -326,6 +357,8 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
list_add_tail(&d->list, &dev_edac_list);
prev = pdev;
+
+ skx_init_mc_mapping(d);
}
if (list)
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index b0845bdd4516..ca5408803f87 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -93,6 +93,16 @@ struct skx_dev {
struct pci_dev *uracu; /* for i10nm CPU */
struct pci_dev *pcu_cr3; /* for HBM memory detection */
u32 mcroute;
+ /*
+ * Some server BIOS may hide certain memory controllers, and the
+ * EDAC driver skips those hidden memory controllers. However, the
+ * ADXL still decodes memory error address using physical memory
+ * controller indices. The mapping table is used to convert the
+ * physical indices (reported by ADXL) to the logical indices
+ * (used the EDAC driver) of present memory controllers during the
+ * error handling process.
+ */
+ u8 mc_mapping[NUM_IMC];
struct skx_imc {
struct mem_ctl_info *mci;
struct pci_dev *mdev; /* for i10nm CPU */
@@ -242,6 +252,7 @@ void skx_adxl_put(void);
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
void skx_set_mem_cfg(bool mem_cfg_2lm);
void skx_set_res_cfg(struct res_config *cfg);
+void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 699c7d29d80c..9955396c9a52 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -1407,7 +1408,7 @@ static void xgene_edac_iob_gic_report(struct edac_device_ctl_info *edac_dev)
dev_err(edac_dev->dev, "Multiple XGIC write size error\n");
info = readl(ctx->dev_csr + XGICTRANSERRREQINFO);
dev_err(edac_dev->dev, "XGIC %s access @ 0x%08X (0x%08X)\n",
- info & REQTYPE_MASK ? "read" : "write", ERRADDR_RD(info),
+ str_read_write(info & REQTYPE_MASK), ERRADDR_RD(info),
info);
writel(reg, ctx->dev_csr + XGICTRANSERRINTSTS);
@@ -1489,19 +1490,19 @@ static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
if (reg & AGENT_OFFLINE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to offline agent error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & UNIMPL_RBPAGE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to unimplemented page error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & WORD_ALIGNED_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s word aligned access error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & PAGE_ACCESS_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s to page out of range access error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (regmap_write(ctx->edac->rb_map, RBEIR, 0))
return;
if (regmap_write(ctx->edac->rb_map, RBCSR, 0))
@@ -1560,7 +1561,7 @@ rb_skip:
err_addr_lo = readl(ctx->dev_csr + IOBBATRANSERRREQINFOL);
err_addr_hi = readl(ctx->dev_csr + IOBBATRANSERRREQINFOH);
dev_err(edac_dev->dev, "IOB BA %s access at 0x%02X.%08X (0x%08X)\n",
- REQTYPE_F2_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_F2_RD(err_addr_hi)),
ERRADDRH_F2_RD(err_addr_hi), err_addr_lo, err_addr_hi);
if (reg & WRERR_RESP_MASK)
dev_err(edac_dev->dev, "IOB BA requestor ID 0x%08X\n",
@@ -1611,7 +1612,7 @@ chk_iob_axi0:
dev_err(edac_dev->dev,
"%sAXI slave 0 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
- REQTYPE_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_RD(err_addr_hi)),
ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
writel(reg, ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
@@ -1625,7 +1626,7 @@ chk_iob_axi1:
dev_err(edac_dev->dev,
"%sAXI slave 1 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
- REQTYPE_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_RD(err_addr_hi)),
ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
writel(reg, ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 9f35f69e0f9e..aadc395ee168 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -212,9 +212,20 @@ config SYSFB_SIMPLEFB
If unsure, say Y.
+config TH1520_AON_PROTOCOL
+ tristate "Always-On firmware protocol"
+ depends on ARCH_THEAD || COMPILE_TEST
+ depends on MAILBOX
+ help
+ Power, clock, and resource management capabilities on the TH1520 SoC are
+ managed by the E902 core. Firmware running on this core communicates with
+ the kernel through the Always-On protocol, using hardware mailbox as a medium.
+ Say yes if you need such capabilities.
+
config TI_SCI_PROTOCOL
tristate "TI System Control Interface (TISCI) Message Protocol"
depends on TI_MESSAGE_MANAGER
+ default ARCH_K3
help
TI System Control Interface (TISCI) Message Protocol is used to manage
compute systems such as ARM, DSP etc with the system controller in
@@ -267,6 +278,7 @@ source "drivers/firmware/meson/Kconfig"
source "drivers/firmware/microchip/Kconfig"
source "drivers/firmware/psci/Kconfig"
source "drivers/firmware/qcom/Kconfig"
+source "drivers/firmware/samsung/Kconfig"
source "drivers/firmware/smccc/Kconfig"
source "drivers/firmware/tegra/Kconfig"
source "drivers/firmware/xilinx/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 7a8d486e718f..4ddec2820c96 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o
obj-$(CONFIG_SYSFB) += sysfb.o
obj-$(CONFIG_SYSFB_SIMPLEFB) += sysfb_simplefb.o
+obj-$(CONFIG_TH1520_AON_PROTOCOL) += thead,th1520-aon.o
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
@@ -33,6 +34,7 @@ obj-y += efi/
obj-y += imx/
obj-y += psci/
obj-y += qcom/
+obj-y += samsung/
obj-y += smccc/
obj-y += tegra/
obj-y += xilinx/
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index dfda5ffc14db..50bfe56c755e 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -15,7 +15,7 @@
#include "common.h"
-#define SCMI_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb"
+#define FFA_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb"
static DEFINE_IDA(ffa_bus_id);
@@ -68,7 +68,7 @@ static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *e
{
const struct ffa_device *ffa_dev = to_ffa_dev(dev);
- return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT,
+ return add_uevent_var(env, "MODALIAS=" FFA_UEVENT_MODALIAS_FMT,
ffa_dev->vm_id, &ffa_dev->uuid);
}
@@ -77,7 +77,7 @@ static ssize_t modalias_show(struct device *dev,
{
struct ffa_device *ffa_dev = to_ffa_dev(dev);
- return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT, ffa_dev->vm_id,
+ return sysfs_emit(buf, FFA_UEVENT_MODALIAS_FMT, ffa_dev->vm_id,
&ffa_dev->uuid);
}
static DEVICE_ATTR_RO(modalias);
@@ -160,11 +160,12 @@ static int __ffa_devices_unregister(struct device *dev, void *data)
return 0;
}
-static void ffa_devices_unregister(void)
+void ffa_devices_unregister(void)
{
bus_for_each_dev(&ffa_bus_type, NULL, NULL,
__ffa_devices_unregister);
}
+EXPORT_SYMBOL_GPL(ffa_devices_unregister);
bool ffa_device_is_valid(struct ffa_device *ffa_dev)
{
@@ -192,7 +193,6 @@ ffa_device_register(const struct ffa_partition_info *part_info,
const struct ffa_ops *ops)
{
int id, ret;
- uuid_t uuid;
struct device *dev;
struct ffa_device *ffa_dev;
@@ -212,14 +212,14 @@ ffa_device_register(const struct ffa_partition_info *part_info,
dev = &ffa_dev->dev;
dev->bus = &ffa_bus_type;
dev->release = ffa_release_device;
+ dev->dma_mask = &dev->coherent_dma_mask;
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->id = id;
ffa_dev->vm_id = part_info->id;
ffa_dev->properties = part_info->properties;
ffa_dev->ops = ops;
- import_uuid(&uuid, (u8 *)part_info->uuid);
- uuid_copy(&ffa_dev->uuid, &uuid);
+ uuid_copy(&ffa_dev->uuid, &part_info->uuid);
ret = device_register(&ffa_dev->dev);
if (ret) {
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 2c2ec3c35f15..19295282de24 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -44,7 +44,7 @@
#include "common.h"
-#define FFA_DRIVER_VERSION FFA_VERSION_1_1
+#define FFA_DRIVER_VERSION FFA_VERSION_1_2
#define FFA_MIN_VERSION FFA_VERSION_1_0
#define SENDER_ID_MASK GENMASK(31, 16)
@@ -114,7 +114,6 @@ struct ffa_drv_info {
};
static struct ffa_drv_info *drv_info;
-static void ffa_partitions_cleanup(void);
/*
* The driver must be able to support all the versions from the earliest
@@ -145,11 +144,19 @@ static int ffa_version_check(u32 *version)
.a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
}, &ver);
- if (ver.a0 == FFA_RET_NOT_SUPPORTED) {
+ if ((s32)ver.a0 == FFA_RET_NOT_SUPPORTED) {
pr_info("FFA_VERSION returned not supported\n");
return -EOPNOTSUPP;
}
+ if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) {
+ pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n",
+ FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
+ FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
+ FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
+ return -EINVAL;
+ }
+
if (ver.a0 < FFA_MIN_VERSION) {
pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
@@ -276,9 +283,21 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
}
if (buffer && count <= num_partitions)
- for (idx = 0; idx < count; idx++)
- memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
- buf_sz);
+ for (idx = 0; idx < count; idx++) {
+ struct ffa_partition_info_le {
+ __le16 id;
+ __le16 exec_ctxt;
+ __le32 properties;
+ uuid_t uuid;
+ } *rx_buf = drv_info->rx_buffer + idx * sz;
+ struct ffa_partition_info *buf = buffer + idx;
+
+ buf->id = le16_to_cpu(rx_buf->id);
+ buf->exec_ctxt = le16_to_cpu(rx_buf->exec_ctxt);
+ buf->properties = le32_to_cpu(rx_buf->properties);
+ if (buf_sz > 8)
+ import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid);
+ }
ffa_rx_release();
@@ -295,14 +314,24 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
#define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x))))
#define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x))))
#define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x))))
+#define PART_INFO_ID_MASK GENMASK(15, 0)
+#define PART_INFO_EXEC_CXT_MASK GENMASK(31, 16)
+#define PART_INFO_PROPS_MASK GENMASK(63, 32)
+#define PART_INFO_ID(x) ((u16)(FIELD_GET(PART_INFO_ID_MASK, (x))))
+#define PART_INFO_EXEC_CXT(x) ((u16)(FIELD_GET(PART_INFO_EXEC_CXT_MASK, (x))))
+#define PART_INFO_PROPERTIES(x) ((u32)(FIELD_GET(PART_INFO_PROPS_MASK, (x))))
static int
__ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
struct ffa_partition_info *buffer, int num_parts)
{
u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0;
+ struct ffa_partition_info *buf = buffer;
ffa_value_t partition_info;
do {
+ __le64 *regs;
+ int idx;
+
start_idx = prev_idx ? prev_idx + 1 : 0;
invoke_ffa_fn((ffa_value_t){
@@ -326,8 +355,25 @@ __ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
if (buf_sz > sizeof(*buffer))
buf_sz = sizeof(*buffer);
- memcpy(buffer + prev_idx * buf_sz, &partition_info.a3,
- (cur_idx - start_idx + 1) * buf_sz);
+ regs = (void *)&partition_info.a3;
+ for (idx = 0; idx < cur_idx - start_idx + 1; idx++, buf++) {
+ union {
+ uuid_t uuid;
+ u64 regs[2];
+ } uuid_regs = {
+ .regs = {
+ le64_to_cpu(*(regs + 1)),
+ le64_to_cpu(*(regs + 2)),
+ }
+ };
+ u64 val = *(u64 *)regs;
+
+ buf->id = PART_INFO_ID(val);
+ buf->exec_ctxt = PART_INFO_EXEC_CXT(val);
+ buf->properties = PART_INFO_PROPERTIES(val);
+ uuid_copy(&buf->uuid, &uuid_regs.uuid);
+ regs += 3;
+ }
prev_idx = cur_idx;
} while (cur_idx < (count - 1));
@@ -445,9 +491,9 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
return -EINVAL;
}
-static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
+static int ffa_msg_send2(struct ffa_device *dev, u16 src_id, void *buf, size_t sz)
{
- u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
+ u32 src_dst_ids = PACK_TARGET_INFO(src_id, dev->vm_id);
struct ffa_indirect_msg_hdr *msg;
ffa_value_t ret;
int retval = 0;
@@ -463,6 +509,7 @@ static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
msg->offset = sizeof(*msg);
msg->send_recv_id = src_dst_ids;
msg->size = sz;
+ uuid_copy(&msg->uuid, &dev->uuid);
memcpy((u8 *)msg + msg->offset, buf, sz);
/* flags = 0, sender VMID = 0 works for both physical/virtual NS */
@@ -760,6 +807,13 @@ static int ffa_notification_bitmap_destroy(void)
return 0;
}
+enum notify_type {
+ SECURE_PARTITION,
+ NON_SECURE_VM,
+ SPM_FRAMEWORK,
+ NS_HYP_FRAMEWORK,
+};
+
#define NOTIFICATION_LOW_MASK GENMASK(31, 0)
#define NOTIFICATION_HIGH_MASK GENMASK(63, 32)
#define NOTIFICATION_BITMAP_HIGH(x) \
@@ -783,10 +837,22 @@ static int ffa_notification_bitmap_destroy(void)
#define MAX_IDS_32 10
#define PER_VCPU_NOTIFICATION_FLAG BIT(0)
-#define SECURE_PARTITION_BITMAP BIT(0)
-#define NON_SECURE_VM_BITMAP BIT(1)
-#define SPM_FRAMEWORK_BITMAP BIT(2)
-#define NS_HYP_FRAMEWORK_BITMAP BIT(3)
+#define SECURE_PARTITION_BITMAP_ENABLE BIT(SECURE_PARTITION)
+#define NON_SECURE_VM_BITMAP_ENABLE BIT(NON_SECURE_VM)
+#define SPM_FRAMEWORK_BITMAP_ENABLE BIT(SPM_FRAMEWORK)
+#define NS_HYP_FRAMEWORK_BITMAP_ENABLE BIT(NS_HYP_FRAMEWORK)
+#define FFA_BITMAP_SECURE_ENABLE_MASK \
+ (SECURE_PARTITION_BITMAP_ENABLE | SPM_FRAMEWORK_BITMAP_ENABLE)
+#define FFA_BITMAP_NS_ENABLE_MASK \
+ (NON_SECURE_VM_BITMAP_ENABLE | NS_HYP_FRAMEWORK_BITMAP_ENABLE)
+#define FFA_BITMAP_ALL_ENABLE_MASK \
+ (FFA_BITMAP_SECURE_ENABLE_MASK | FFA_BITMAP_NS_ENABLE_MASK)
+
+#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
+
+#define SPM_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_LOW(x)
+#define NS_HYP_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_HIGH(x)
+#define FRAMEWORK_NOTIFY_RX_BUFFER_FULL BIT(0)
static int ffa_notification_bind_common(u16 dst_id, u64 bitmap,
u32 flags, bool is_bind)
@@ -852,9 +918,15 @@ static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify)
else if (ret.a0 != FFA_SUCCESS)
return -EINVAL; /* Something else went wrong. */
- notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
- notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
- notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7);
+ if (flags & SECURE_PARTITION_BITMAP_ENABLE)
+ notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
+ if (flags & NON_SECURE_VM_BITMAP_ENABLE)
+ notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
+ if (flags & SPM_FRAMEWORK_BITMAP_ENABLE)
+ notify->arch_map = SPM_FRAMEWORK_BITMAP(ret.a6);
+ if (flags & NS_HYP_FRAMEWORK_BITMAP_ENABLE)
+ notify->arch_map = PACK_NOTIFICATION_BITMAP(notify->arch_map,
+ ret.a7);
return 0;
}
@@ -863,27 +935,32 @@ struct ffa_dev_part_info {
ffa_sched_recv_cb callback;
void *cb_data;
rwlock_t rw_lock;
+ struct ffa_device *dev;
+ struct list_head node;
};
static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
{
- struct ffa_dev_part_info *partition;
+ struct ffa_dev_part_info *partition = NULL, *tmp;
ffa_sched_recv_cb callback;
+ struct list_head *phead;
void *cb_data;
- partition = xa_load(&drv_info->partition_info, part_id);
- if (!partition) {
+ phead = xa_load(&drv_info->partition_info, part_id);
+ if (!phead) {
pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
return;
}
- read_lock(&partition->rw_lock);
- callback = partition->callback;
- cb_data = partition->cb_data;
- read_unlock(&partition->rw_lock);
+ list_for_each_entry_safe(partition, tmp, phead, node) {
+ read_lock(&partition->rw_lock);
+ callback = partition->callback;
+ cb_data = partition->cb_data;
+ read_unlock(&partition->rw_lock);
- if (callback)
- callback(vcpu, is_per_vcpu, cb_data);
+ if (callback)
+ callback(vcpu, is_per_vcpu, cb_data);
+ }
}
static void ffa_notification_info_get(void)
@@ -899,7 +976,7 @@ static void ffa_notification_info_get(void)
}, &ret);
if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) {
- if (ret.a2 != FFA_RET_NO_DATA)
+ if ((s32)ret.a2 != FFA_RET_NO_DATA)
pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
ret.a0, ret.a2);
return;
@@ -935,7 +1012,7 @@ static void ffa_notification_info_get(void)
}
/* Per vCPU Notification */
- for (idx = 0; idx < ids_count[list]; idx++) {
+ for (idx = 1; idx < ids_count[list]; idx++) {
if (ids_processed >= max_ids - 1)
break;
@@ -1015,17 +1092,17 @@ static int ffa_sync_send_receive(struct ffa_device *dev,
static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz)
{
- return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz);
+ return ffa_msg_send2(dev, drv_info->vm_id, buf, sz);
}
-static int ffa_sync_send_receive2(struct ffa_device *dev, const uuid_t *uuid,
+static int ffa_sync_send_receive2(struct ffa_device *dev,
struct ffa_send_direct_data2 *data)
{
if (!drv_info->msg_direct_req2_supp)
return -EOPNOTSUPP;
return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id,
- uuid, data);
+ &dev->uuid, data);
}
static int ffa_memory_share(struct ffa_mem_ops_args *args)
@@ -1051,35 +1128,39 @@ static int ffa_memory_lend(struct ffa_mem_ops_args *args)
return ffa_memory_ops(FFA_MEM_LEND, args);
}
-#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
-
#define ffa_notifications_disabled() (!drv_info->notif_enabled)
-enum notify_type {
- NON_SECURE_VM,
- SECURE_PARTITION,
- FRAMEWORK,
-};
-
struct notifier_cb_info {
struct hlist_node hnode;
+ struct ffa_device *dev;
+ ffa_fwk_notifier_cb fwk_cb;
ffa_notifier_cb cb;
void *cb_data;
- enum notify_type type;
};
-static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
- void *cb_data, bool is_registration)
+static int
+ffa_sched_recv_cb_update(struct ffa_device *dev, ffa_sched_recv_cb callback,
+ void *cb_data, bool is_registration)
{
- struct ffa_dev_part_info *partition;
+ struct ffa_dev_part_info *partition = NULL, *tmp;
+ struct list_head *phead;
bool cb_valid;
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
- partition = xa_load(&drv_info->partition_info, part_id);
+ phead = xa_load(&drv_info->partition_info, dev->vm_id);
+ if (!phead) {
+ pr_err("%s: Invalid partition ID 0x%x\n", __func__, dev->vm_id);
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(partition, tmp, phead, node)
+ if (partition->dev == dev)
+ break;
+
if (!partition) {
- pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
+ pr_err("%s: No such partition ID 0x%x\n", __func__, dev->vm_id);
return -EINVAL;
}
@@ -1101,12 +1182,12 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
static int ffa_sched_recv_cb_register(struct ffa_device *dev,
ffa_sched_recv_cb cb, void *cb_data)
{
- return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true);
+ return ffa_sched_recv_cb_update(dev, cb, cb_data, true);
}
static int ffa_sched_recv_cb_unregister(struct ffa_device *dev)
{
- return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false);
+ return ffa_sched_recv_cb_update(dev, NULL, NULL, false);
}
static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags)
@@ -1119,27 +1200,69 @@ static int ffa_notification_unbind(u16 dst_id, u64 bitmap)
return ffa_notification_bind_common(dst_id, bitmap, 0, false);
}
-/* Should be called while the notify_lock is taken */
+static enum notify_type ffa_notify_type_get(u16 vm_id)
+{
+ if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
+ return SECURE_PARTITION;
+ else
+ return NON_SECURE_VM;
+}
+
+/* notifier_hnode_get* should be called with notify_lock held */
static struct notifier_cb_info *
-notifier_hash_node_get(u16 notify_id, enum notify_type type)
+notifier_hnode_get_by_vmid(u16 notify_id, int vmid)
{
struct notifier_cb_info *node;
hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
- if (type == node->type)
+ if (node->fwk_cb && vmid == node->dev->vm_id)
+ return node;
+
+ return NULL;
+}
+
+static struct notifier_cb_info *
+notifier_hnode_get_by_vmid_uuid(u16 notify_id, int vmid, const uuid_t *uuid)
+{
+ struct notifier_cb_info *node;
+
+ if (uuid_is_null(uuid))
+ return notifier_hnode_get_by_vmid(notify_id, vmid);
+
+ hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
+ if (node->fwk_cb && vmid == node->dev->vm_id &&
+ uuid_equal(&node->dev->uuid, uuid))
+ return node;
+
+ return NULL;
+}
+
+static struct notifier_cb_info *
+notifier_hnode_get_by_type(u16 notify_id, enum notify_type type)
+{
+ struct notifier_cb_info *node;
+
+ hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
+ if (node->cb && type == ffa_notify_type_get(node->dev->vm_id))
return node;
return NULL;
}
static int
-update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
- void *cb_data, bool is_registration)
+update_notifier_cb(struct ffa_device *dev, int notify_id, void *cb,
+ void *cb_data, bool is_registration, bool is_framework)
{
struct notifier_cb_info *cb_info = NULL;
+ enum notify_type type = ffa_notify_type_get(dev->vm_id);
bool cb_found;
- cb_info = notifier_hash_node_get(notify_id, type);
+ if (is_framework)
+ cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, dev->vm_id,
+ &dev->uuid);
+ else
+ cb_info = notifier_hnode_get_by_type(notify_id, type);
+
cb_found = !!cb_info;
if (!(is_registration ^ cb_found))
@@ -1150,9 +1273,12 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
if (!cb_info)
return -ENOMEM;
- cb_info->type = type;
- cb_info->cb = cb;
+ cb_info->dev = dev;
cb_info->cb_data = cb_data;
+ if (is_framework)
+ cb_info->fwk_cb = cb;
+ else
+ cb_info->cb = cb;
hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id);
} else {
@@ -1162,18 +1288,10 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
return 0;
}
-static enum notify_type ffa_notify_type_get(u16 vm_id)
-{
- if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
- return SECURE_PARTITION;
- else
- return NON_SECURE_VM;
-}
-
-static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
+static int __ffa_notify_relinquish(struct ffa_device *dev, int notify_id,
+ bool is_framework)
{
int rc;
- enum notify_type type = ffa_notify_type_get(dev->vm_id);
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
@@ -1183,26 +1301,38 @@ static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
mutex_lock(&drv_info->notify_lock);
- rc = update_notifier_cb(notify_id, type, NULL, NULL, false);
+ rc = update_notifier_cb(dev, notify_id, NULL, NULL, false,
+ is_framework);
if (rc) {
pr_err("Could not unregister notification callback\n");
mutex_unlock(&drv_info->notify_lock);
return rc;
}
- rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
+ if (!is_framework)
+ rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
mutex_unlock(&drv_info->notify_lock);
return rc;
}
-static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
- ffa_notifier_cb cb, void *cb_data, int notify_id)
+static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
+{
+ return __ffa_notify_relinquish(dev, notify_id, false);
+}
+
+static int ffa_fwk_notify_relinquish(struct ffa_device *dev, int notify_id)
+{
+ return __ffa_notify_relinquish(dev, notify_id, true);
+}
+
+static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
+ void *cb, void *cb_data,
+ int notify_id, bool is_framework)
{
int rc;
u32 flags = 0;
- enum notify_type type = ffa_notify_type_get(dev->vm_id);
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
@@ -1212,26 +1342,44 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
mutex_lock(&drv_info->notify_lock);
- if (is_per_vcpu)
- flags = PER_VCPU_NOTIFICATION_FLAG;
+ if (!is_framework) {
+ if (is_per_vcpu)
+ flags = PER_VCPU_NOTIFICATION_FLAG;
- rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
- if (rc) {
- mutex_unlock(&drv_info->notify_lock);
- return rc;
+ rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
+ if (rc) {
+ mutex_unlock(&drv_info->notify_lock);
+ return rc;
+ }
}
- rc = update_notifier_cb(notify_id, type, cb, cb_data, true);
+ rc = update_notifier_cb(dev, notify_id, cb, cb_data, true,
+ is_framework);
if (rc) {
pr_err("Failed to register callback for %d - %d\n",
notify_id, rc);
- ffa_notification_unbind(dev->vm_id, BIT(notify_id));
+ if (!is_framework)
+ ffa_notification_unbind(dev->vm_id, BIT(notify_id));
}
mutex_unlock(&drv_info->notify_lock);
return rc;
}
+static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
+ ffa_notifier_cb cb, void *cb_data, int notify_id)
+{
+ return __ffa_notify_request(dev, is_per_vcpu, cb, cb_data, notify_id,
+ false);
+}
+
+static int
+ffa_fwk_notify_request(struct ffa_device *dev, ffa_fwk_notifier_cb cb,
+ void *cb_data, int notify_id)
+{
+ return __ffa_notify_request(dev, false, cb, cb_data, notify_id, true);
+}
+
static int ffa_notify_send(struct ffa_device *dev, int notify_id,
bool is_per_vcpu, u16 vcpu)
{
@@ -1258,7 +1406,7 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
continue;
mutex_lock(&drv_info->notify_lock);
- cb_info = notifier_hash_node_get(notify_id, type);
+ cb_info = notifier_hnode_get_by_type(notify_id, type);
mutex_unlock(&drv_info->notify_lock);
if (cb_info && cb_info->cb)
@@ -1266,21 +1414,68 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
}
}
-static void notif_get_and_handle(void *unused)
+static void handle_fwk_notif_callbacks(u32 bitmap)
+{
+ void *buf;
+ uuid_t uuid;
+ int notify_id = 0, target;
+ struct ffa_indirect_msg_hdr *msg;
+ struct notifier_cb_info *cb_info = NULL;
+
+ /* Only one framework notification defined and supported for now */
+ if (!(bitmap & FRAMEWORK_NOTIFY_RX_BUFFER_FULL))
+ return;
+
+ mutex_lock(&drv_info->rx_lock);
+
+ msg = drv_info->rx_buffer;
+ buf = kmemdup((void *)msg + msg->offset, msg->size, GFP_KERNEL);
+ if (!buf) {
+ mutex_unlock(&drv_info->rx_lock);
+ return;
+ }
+
+ target = SENDER_ID(msg->send_recv_id);
+ if (msg->offset >= sizeof(*msg))
+ uuid_copy(&uuid, &msg->uuid);
+ else
+ uuid_copy(&uuid, &uuid_null);
+
+ mutex_unlock(&drv_info->rx_lock);
+
+ ffa_rx_release();
+
+ mutex_lock(&drv_info->notify_lock);
+ cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, target, &uuid);
+ mutex_unlock(&drv_info->notify_lock);
+
+ if (cb_info && cb_info->fwk_cb)
+ cb_info->fwk_cb(notify_id, cb_info->cb_data, buf);
+ kfree(buf);
+}
+
+static void notif_get_and_handle(void *cb_data)
{
int rc;
- struct ffa_notify_bitmaps bitmaps;
+ u32 flags;
+ struct ffa_drv_info *info = cb_data;
+ struct ffa_notify_bitmaps bitmaps = { 0 };
+
+ if (info->vm_id == 0) /* Non secure physical instance */
+ flags = FFA_BITMAP_SECURE_ENABLE_MASK;
+ else
+ flags = FFA_BITMAP_ALL_ENABLE_MASK;
- rc = ffa_notification_get(SECURE_PARTITION_BITMAP |
- SPM_FRAMEWORK_BITMAP, &bitmaps);
+ rc = ffa_notification_get(flags, &bitmaps);
if (rc) {
pr_err("Failed to retrieve notifications with %d!\n", rc);
return;
}
+ handle_fwk_notif_callbacks(SPM_FRAMEWORK_BITMAP(bitmaps.arch_map));
+ handle_fwk_notif_callbacks(NS_HYP_FRAMEWORK_BITMAP(bitmaps.arch_map));
handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM);
handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION);
- handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK);
}
static void
@@ -1329,6 +1524,8 @@ static const struct ffa_notifier_ops ffa_drv_notifier_ops = {
.sched_recv_cb_unregister = ffa_sched_recv_cb_unregister,
.notify_request = ffa_notify_request,
.notify_relinquish = ffa_notify_relinquish,
+ .fwk_notify_request = ffa_fwk_notify_request,
+ .fwk_notify_relinquish = ffa_fwk_notify_relinquish,
.notify_send = ffa_notify_send,
};
@@ -1384,11 +1581,110 @@ static struct notifier_block ffa_bus_nb = {
.notifier_call = ffa_bus_notifier,
};
+static int ffa_xa_add_partition_info(struct ffa_device *dev)
+{
+ struct ffa_dev_part_info *info;
+ struct list_head *head, *phead;
+ int ret = -ENOMEM;
+
+ phead = xa_load(&drv_info->partition_info, dev->vm_id);
+ if (phead) {
+ head = phead;
+ list_for_each_entry(info, head, node) {
+ if (info->dev == dev) {
+ pr_err("%s: duplicate dev %p part ID 0x%x\n",
+ __func__, dev, dev->vm_id);
+ return -EEXIST;
+ }
+ }
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ret;
+
+ rwlock_init(&info->rw_lock);
+ info->dev = dev;
+
+ if (!phead) {
+ phead = kzalloc(sizeof(*phead), GFP_KERNEL);
+ if (!phead)
+ goto free_out;
+
+ INIT_LIST_HEAD(phead);
+
+ ret = xa_insert(&drv_info->partition_info, dev->vm_id, phead,
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: failed to save part ID 0x%x Ret:%d\n",
+ __func__, dev->vm_id, ret);
+ goto free_out;
+ }
+ }
+ list_add(&info->node, phead);
+ return 0;
+
+free_out:
+ kfree(phead);
+ kfree(info);
+ return ret;
+}
+
+static int ffa_setup_host_partition(int vm_id)
+{
+ struct ffa_partition_info buf = { 0 };
+ struct ffa_device *ffa_dev;
+ int ret;
+
+ buf.id = vm_id;
+ ffa_dev = ffa_device_register(&buf, &ffa_drv_ops);
+ if (!ffa_dev) {
+ pr_err("%s: failed to register host partition ID 0x%x\n",
+ __func__, vm_id);
+ return -EINVAL;
+ }
+
+ ret = ffa_xa_add_partition_info(ffa_dev);
+ if (ret)
+ return ret;
+
+ if (ffa_notifications_disabled())
+ return 0;
+
+ ret = ffa_sched_recv_cb_update(ffa_dev, ffa_self_notif_handle,
+ drv_info, true);
+ if (ret)
+ pr_info("Failed to register driver sched callback %d\n", ret);
+
+ return ret;
+}
+
+static void ffa_partitions_cleanup(void)
+{
+ struct list_head *phead;
+ unsigned long idx;
+
+ /* Clean up/free all registered devices */
+ ffa_devices_unregister();
+
+ xa_for_each(&drv_info->partition_info, idx, phead) {
+ struct ffa_dev_part_info *info, *tmp;
+
+ xa_erase(&drv_info->partition_info, idx);
+ list_for_each_entry_safe(info, tmp, phead, node) {
+ list_del(&info->node);
+ kfree(info);
+ }
+ kfree(phead);
+ }
+
+ xa_destroy(&drv_info->partition_info);
+}
+
static int ffa_setup_partitions(void)
{
int count, idx, ret;
struct ffa_device *ffa_dev;
- struct ffa_dev_part_info *info;
struct ffa_partition_info *pbuf, *tpbuf;
if (drv_info->version == FFA_VERSION_1_0) {
@@ -1422,59 +1718,30 @@ static int ffa_setup_partitions(void)
!(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
ffa_mode_32bit_set(ffa_dev);
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
+ if (ffa_xa_add_partition_info(ffa_dev)) {
ffa_device_unregister(ffa_dev);
continue;
}
- rwlock_init(&info->rw_lock);
- ret = xa_insert(&drv_info->partition_info, tpbuf->id,
- info, GFP_KERNEL);
- if (ret) {
- pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
- __func__, tpbuf->id, ret);
- ffa_device_unregister(ffa_dev);
- kfree(info);
- }
}
kfree(pbuf);
- /* Allocate for the host */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- /* Already registered devices are freed on bus_exit */
- ffa_partitions_cleanup();
- return -ENOMEM;
- }
+ /*
+ * Check if the host is already added as part of partition info
+ * No multiple UUID possible for the host, so just checking if
+ * there is an entry will suffice
+ */
+ if (xa_load(&drv_info->partition_info, drv_info->vm_id))
+ return 0;
- rwlock_init(&info->rw_lock);
- ret = xa_insert(&drv_info->partition_info, drv_info->vm_id,
- info, GFP_KERNEL);
- if (ret) {
- pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
- __func__, drv_info->vm_id, ret);
- kfree(info);
- /* Already registered devices are freed on bus_exit */
+ /* Allocate for the host */
+ ret = ffa_setup_host_partition(drv_info->vm_id);
+ if (ret)
ffa_partitions_cleanup();
- }
return ret;
}
-static void ffa_partitions_cleanup(void)
-{
- struct ffa_dev_part_info *info;
- unsigned long idx;
-
- xa_for_each(&drv_info->partition_info, idx, info) {
- xa_erase(&drv_info->partition_info, idx);
- kfree(info);
- }
-
- xa_destroy(&drv_info->partition_info);
-}
-
/* FFA FEATURE IDs */
#define FFA_FEAT_NOTIFICATION_PENDING_INT (1)
#define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
@@ -1777,19 +2044,10 @@ static int __init ffa_init(void)
ffa_notifications_setup();
ret = ffa_setup_partitions();
- if (ret) {
- pr_err("failed to setup partitions\n");
- goto cleanup_notifs;
- }
-
- ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
- drv_info, true);
- if (ret)
- pr_info("Failed to register driver sched callback %d\n", ret);
-
- return 0;
+ if (!ret)
+ return ret;
-cleanup_notifs:
+ pr_err("failed to setup partitions\n");
ffa_notifications_cleanup();
free_pages:
if (drv_info->tx_buffer)
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index a3386bf36de5..7af01664ce7e 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -17,6 +17,8 @@
#include "common.h"
+#define SCMI_UEVENT_MODALIAS_FMT "%s:%02x:%s"
+
BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh);
EXPORT_SYMBOL_GPL(scmi_requested_devices_nh);
@@ -42,7 +44,7 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
* This helper let an SCMI driver request specific devices identified by the
* @id_table to be created for each active SCMI instance.
*
- * The requested device name MUST NOT be already existent for any protocol;
+ * The requested device name MUST NOT be already existent for this protocol;
* at first the freshly requested @id_table is annotated in the IDR table
* @scmi_requested_devices and then the requested device is advertised to any
* registered party via the @scmi_requested_devices_nh notification chain.
@@ -52,7 +54,6 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
{
int ret = 0;
- unsigned int id = 0;
struct list_head *head, *phead = NULL;
struct scmi_requested_dev *rdev;
@@ -67,19 +68,13 @@ static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
}
/*
- * Search for the matching protocol rdev list and then search
- * of any existent equally named device...fails if any duplicate found.
+ * Find the matching protocol rdev list and then search of any
+ * existent equally named device...fails if any duplicate found.
*/
mutex_lock(&scmi_requested_devices_mtx);
- idr_for_each_entry(&scmi_requested_devices, head, id) {
- if (!phead) {
- /* A list found registered in the IDR is never empty */
- rdev = list_first_entry(head, struct scmi_requested_dev,
- node);
- if (rdev->id_table->protocol_id ==
- id_table->protocol_id)
- phead = head;
- }
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+ if (phead) {
+ head = phead;
list_for_each_entry(rdev, head, node) {
if (!strcmp(rdev->id_table->name, id_table->name)) {
pr_err("Ignoring duplicate request [%d] %s\n",
@@ -283,11 +278,59 @@ static void scmi_dev_remove(struct device *dev)
scmi_drv->remove(scmi_dev);
}
+static int scmi_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT,
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+}
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT,
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t protocol_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sprintf(buf, "0x%02x\n", scmi_dev->protocol_id);
+}
+static DEVICE_ATTR_RO(protocol_id);
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sprintf(buf, "%s\n", scmi_dev->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *scmi_device_attributes_attrs[] = {
+ &dev_attr_protocol_id.attr,
+ &dev_attr_name.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(scmi_device_attributes);
+
const struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
.probe = scmi_dev_probe,
.remove = scmi_dev_remove,
+ .uevent = scmi_device_uevent,
+ .dev_groups = scmi_device_attributes_groups,
};
EXPORT_SYMBOL_GPL(scmi_bus_type);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 60050da54bf2..1c75a4c9c371 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -1997,17 +1997,7 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
else if (db->width == 4)
SCMI_PROTO_FC_RING_DB(32);
else /* db->width == 8 */
-#ifdef CONFIG_64BIT
SCMI_PROTO_FC_RING_DB(64);
-#else
- {
- u64 val = 0;
-
- if (db->mask)
- val = ioread64_hi_lo(db->addr) & db->mask;
- iowrite64_hi_lo(db->set | val, db->addr);
- }
-#endif
}
/**
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
index 5dcf62f19faf..8748874f0552 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
@@ -534,11 +534,6 @@ static int cs_dsp_bin_err_test_adsp2_16bit_init(struct kunit *test)
return cs_dsp_bin_err_test_common_init(test, dsp, 1);
}
-static struct kunit_case cs_dsp_bin_err_test_cases_halo[] = {
-
- { } /* terminator */
-};
-
static void cs_dsp_bin_err_block_types_desc(const struct cs_dsp_bin_test_param *param,
char *desc)
{
@@ -560,7 +555,7 @@ KUNIT_ARRAY_PARAM(bin_test_block_types,
bin_test_block_types_cases,
cs_dsp_bin_err_block_types_desc);
-static struct kunit_case cs_dsp_bin_err_test_cases_adsp2[] = {
+static struct kunit_case cs_dsp_bin_err_test_cases[] = {
KUNIT_CASE(bin_load_with_unknown_blocks),
KUNIT_CASE(bin_err_wrong_magic),
KUNIT_CASE(bin_err_too_short_for_header),
@@ -578,21 +573,21 @@ static struct kunit_suite cs_dsp_bin_err_test_halo = {
.name = "cs_dsp_bin_err_halo",
.init = cs_dsp_bin_err_test_halo_init,
.exit = cs_dsp_bin_err_test_exit,
- .test_cases = cs_dsp_bin_err_test_cases_halo,
+ .test_cases = cs_dsp_bin_err_test_cases,
};
static struct kunit_suite cs_dsp_bin_err_test_adsp2_32bit = {
.name = "cs_dsp_bin_err_adsp2_32bit",
.init = cs_dsp_bin_err_test_adsp2_32bit_init,
.exit = cs_dsp_bin_err_test_exit,
- .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+ .test_cases = cs_dsp_bin_err_test_cases,
};
static struct kunit_suite cs_dsp_bin_err_test_adsp2_16bit = {
.name = "cs_dsp_bin_err_adsp2_16bit",
.init = cs_dsp_bin_err_test_adsp2_16bit_init,
.exit = cs_dsp_bin_err_test_exit,
- .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+ .test_cases = cs_dsp_bin_err_test_cases,
};
kunit_test_suites(&cs_dsp_bin_err_test_halo,
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
index cb90964740ea..942ba1af5e7c 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
@@ -73,6 +73,18 @@ static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
.length_bytes = 4,
};
+static char *cs_dsp_ctl_alloc_test_string(struct kunit *test, char c, size_t len)
+{
+ char *str;
+
+ str = kunit_kmalloc(test, len + 1, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, str);
+ memset(str, c, len);
+ str[len] = '\0';
+
+ return str;
+}
+
/* Algorithm info block without controls should load */
static void cs_dsp_ctl_parse_no_coeffs(struct kunit *test)
{
@@ -160,12 +172,8 @@ static void cs_dsp_ctl_parse_max_v1_name(struct kunit *test)
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
struct firmware *wmfw;
- char *name;
- name = kunit_kzalloc(test, 256, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
- memset(name, 'A', 255);
- def.fullname = name;
+ def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
@@ -252,14 +260,9 @@ static void cs_dsp_ctl_parse_max_short_name(struct kunit *test)
struct cs_dsp_test_local *local = priv->local;
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
- char *name;
struct firmware *wmfw;
- name = kunit_kmalloc(test, 255, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
- memset(name, 'A', 255);
-
- def.shortname = name;
+ def.shortname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
@@ -273,7 +276,7 @@ static void cs_dsp_ctl_parse_max_short_name(struct kunit *test)
ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
KUNIT_ASSERT_NOT_NULL(test, ctl);
KUNIT_EXPECT_EQ(test, ctl->subname_len, 255);
- KUNIT_EXPECT_MEMEQ(test, ctl->subname, name, ctl->subname_len);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
KUNIT_EXPECT_EQ(test, ctl->type, def.type);
KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
@@ -323,12 +326,8 @@ static void cs_dsp_ctl_parse_with_max_fullname(struct kunit *test)
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
struct firmware *wmfw;
- char *fullname;
- fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
- memset(fullname, 'A', 255);
- def.fullname = fullname;
+ def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
@@ -392,12 +391,8 @@ static void cs_dsp_ctl_parse_with_max_description(struct kunit *test)
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
struct firmware *wmfw;
- char *description;
- description = kunit_kmalloc(test, 65535, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
- memset(description, 'A', 65535);
- def.description = description;
+ def.description = cs_dsp_ctl_alloc_test_string(test, 'A', 65535);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
@@ -429,17 +424,9 @@ static void cs_dsp_ctl_parse_with_max_fullname_and_description(struct kunit *tes
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
struct firmware *wmfw;
- char *fullname, *description;
-
- fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
- memset(fullname, 'A', 255);
- def.fullname = fullname;
- description = kunit_kmalloc(test, 65535, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
- memset(description, 'A', 65535);
- def.description = description;
+ def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
+ def.description = cs_dsp_ctl_alloc_test_string(test, 'A', 65535);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 4f9fb086eab7..0a7c764dcc61 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -47,7 +47,7 @@ static int efibc_reboot_notifier_call(struct notifier_block *notifier,
if (ret || !data)
return NOTIFY_DONE;
- wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL);
+ wdata = kmalloc_array(MAX_DATA_LEN, sizeof(efi_char16_t), GFP_KERNEL);
if (!wdata)
return NOTIFY_DONE;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 1141cd06011f..d23a1b9fed75 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -62,6 +62,8 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
# `-fdata-sections` flag from KBUILD_CFLAGS_KERNEL
KBUILD_CFLAGS_KERNEL := $(filter-out -fdata-sections, $(KBUILD_CFLAGS_KERNEL))
+KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
file.o mem.o random.o randomalloc.o pci.o \
skip_spaces.o lib-cmdline.o lib-ctype.o \
@@ -83,13 +85,19 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o smbios.o
+lib-$(CONFIG_EFI_MIXED) += x86-mixed.o
lib-$(CONFIG_X86_64) += x86-5lvl.o
lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
-zboot-obj-$(CONFIG_RISCV) := lib-clz_ctz.o lib-ashldi3.o
+zboot-obj-y := zboot-decompress-gzip.o
+CFLAGS_zboot-decompress-gzip.o += -I$(srctree)/lib/zlib_inflate
+zboot-obj-$(CONFIG_KERNEL_ZSTD) := zboot-decompress-zstd.o lib-xxhash.o
+CFLAGS_zboot-decompress-zstd.o += -I$(srctree)/lib/zstd
+
+zboot-obj-$(CONFIG_RISCV) += lib-clz_ctz.o lib-ashldi3.o
lib-$(CONFIG_EFI_ZBOOT) += zboot.o $(zboot-obj-y)
lib-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o bitmap.o find.o
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index d96d4494070d..f5ba032863a9 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -1234,4 +1234,7 @@ void process_unaccepted_memory(u64 start, u64 end);
void accept_memory(phys_addr_t start, unsigned long size);
void arch_accept_memory(phys_addr_t start, phys_addr_t end);
+efi_status_t efi_zboot_decompress_init(unsigned long *alloc_size);
+efi_status_t efi_zboot_decompress(u8 *out, unsigned long outlen);
+
#endif
diff --git a/drivers/firmware/efi/libstub/intrinsics.c b/drivers/firmware/efi/libstub/intrinsics.c
index 965e734f6f98..418cd2e6dccc 100644
--- a/drivers/firmware/efi/libstub/intrinsics.c
+++ b/drivers/firmware/efi/libstub/intrinsics.c
@@ -15,8 +15,31 @@ void *__memmove(void *__dest, const void *__src, size_t count) __alias(memmove);
void *__memset(void *s, int c, size_t count) __alias(memset);
#endif
+static void *efistub_memmove(u8 *dst, const u8 *src, size_t len)
+{
+ if (src > dst || dst >= (src + len))
+ for (size_t i = 0; i < len; i++)
+ dst[i] = src[i];
+ else
+ for (ssize_t i = len - 1; i >= 0; i--)
+ dst[i] = src[i];
+
+ return dst;
+}
+
+static void *efistub_memset(void *dst, int c, size_t len)
+{
+ for (u8 *d = dst; len--; d++)
+ *d = c;
+
+ return dst;
+}
+
void *memcpy(void *dst, const void *src, size_t len)
{
+ if (efi_table_attr(efi_system_table, boottime) == NULL)
+ return efistub_memmove(dst, src, len);
+
efi_bs_call(copy_mem, dst, src, len);
return dst;
}
@@ -25,6 +48,9 @@ extern void *memmove(void *dst, const void *src, size_t len) __alias(memcpy);
void *memset(void *dst, int c, size_t len)
{
+ if (efi_table_attr(efi_system_table, boottime) == NULL)
+ return efistub_memset(dst, c, len);
+
efi_bs_call(set_mem, dst, len, c & U8_MAX);
return dst;
}
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 5a732018be36..fd80b2f3233a 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -75,6 +75,10 @@ efi_status_t efi_random_alloc(unsigned long size,
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
+ /* Avoid address 0x0, as it can be mistaken for NULL */
+ if (alloc_min == 0)
+ alloc_min = align;
+
size = round_up(size, EFI_ALLOC_ALIGN);
/* count the suitable slots in each memory map entry */
diff --git a/drivers/firmware/efi/libstub/x86-mixed.S b/drivers/firmware/efi/libstub/x86-mixed.S
new file mode 100644
index 000000000000..e04ed99bc449
--- /dev/null
+++ b/drivers/firmware/efi/libstub/x86-mixed.S
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
+ *
+ * Early support for invoking 32-bit EFI services from a 64-bit kernel.
+ *
+ * Because this thunking occurs before ExitBootServices() we have to
+ * restore the firmware's 32-bit GDT and IDT before we make EFI service
+ * calls.
+ *
+ * On the plus side, we don't have to worry about mangling 64-bit
+ * addresses into 32-bits because we're executing with an identity
+ * mapped pagetable and haven't transitioned to 64-bit virtual addresses
+ * yet.
+ */
+
+#include <linux/linkage.h>
+#include <asm/desc_defs.h>
+#include <asm/msr.h>
+#include <asm/page_types.h>
+#include <asm/pgtable_types.h>
+#include <asm/processor-flags.h>
+#include <asm/segment.h>
+
+ .text
+ .code32
+#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+SYM_FUNC_START(efi32_stub_entry)
+ call 1f
+1: popl %ecx
+
+ /* Clear BSS */
+ xorl %eax, %eax
+ leal (_bss - 1b)(%ecx), %edi
+ leal (_ebss - 1b)(%ecx), %ecx
+ subl %edi, %ecx
+ shrl $2, %ecx
+ cld
+ rep stosl
+
+ add $0x4, %esp /* Discard return address */
+ movl 8(%esp), %ebx /* struct boot_params pointer */
+ jmp efi32_startup
+SYM_FUNC_END(efi32_stub_entry)
+#endif
+
+/*
+ * Called using a far call from __efi64_thunk() below, using the x86_64 SysV
+ * ABI (except for R8/R9 which are inaccessible to 32-bit code - EAX/EBX are
+ * used instead). EBP+16 points to the arguments passed via the stack.
+ *
+ * The first argument (EDI) is a pointer to the boot service or protocol, to
+ * which the remaining arguments are passed, each truncated to 32 bits.
+ */
+SYM_FUNC_START_LOCAL(efi_enter32)
+ /*
+ * Convert x86-64 SysV ABI params to i386 ABI
+ */
+ pushl 32(%ebp) /* Up to 3 args passed via the stack */
+ pushl 24(%ebp)
+ pushl 16(%ebp)
+ pushl %ebx /* R9 */
+ pushl %eax /* R8 */
+ pushl %ecx
+ pushl %edx
+ pushl %esi
+
+ /* Disable paging */
+ movl %cr0, %eax
+ btrl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+
+ /* Disable long mode via EFER */
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btrl $_EFER_LME, %eax
+ wrmsr
+
+ call *%edi
+
+ /* We must preserve return value */
+ movl %eax, %edi
+
+ call efi32_enable_long_mode
+
+ addl $32, %esp
+ movl %edi, %eax
+ lret
+SYM_FUNC_END(efi_enter32)
+
+ .code64
+SYM_FUNC_START(__efi64_thunk)
+ push %rbp
+ movl %esp, %ebp
+ push %rbx
+
+ /* Move args #5 and #6 into 32-bit accessible registers */
+ movl %r8d, %eax
+ movl %r9d, %ebx
+
+ lcalll *efi32_call(%rip)
+
+ pop %rbx
+ pop %rbp
+ RET
+SYM_FUNC_END(__efi64_thunk)
+
+ .code32
+SYM_FUNC_START_LOCAL(efi32_enable_long_mode)
+ movl %cr4, %eax
+ btsl $(X86_CR4_PAE_BIT), %eax
+ movl %eax, %cr4
+
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_LME, %eax
+ wrmsr
+
+ /* Disable interrupts - the firmware's IDT does not work in long mode */
+ cli
+
+ /* Enable paging */
+ movl %cr0, %eax
+ btsl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+ ret
+SYM_FUNC_END(efi32_enable_long_mode)
+
+/*
+ * This is the common EFI stub entry point for mixed mode. It sets up the GDT
+ * and page tables needed for 64-bit execution, after which it calls the
+ * common 64-bit EFI entrypoint efi_stub_entry().
+ *
+ * Arguments: 0(%esp) image handle
+ * 4(%esp) EFI system table pointer
+ * %ebx struct boot_params pointer (or NULL)
+ *
+ * Since this is the point of no return for ordinary execution, no registers
+ * are considered live except for the function parameters. [Note that the EFI
+ * stub may still exit and return to the firmware using the Exit() EFI boot
+ * service.]
+ */
+SYM_FUNC_START_LOCAL(efi32_startup)
+ movl %esp, %ebp
+
+ subl $8, %esp
+ sgdtl (%esp) /* Save GDT descriptor to the stack */
+ movl 2(%esp), %esi /* Existing GDT pointer */
+ movzwl (%esp), %ecx /* Existing GDT limit */
+ inc %ecx /* Existing GDT size */
+ andl $~7, %ecx /* Ensure size is multiple of 8 */
+
+ subl %ecx, %esp /* Allocate new GDT */
+ andl $~15, %esp /* Realign the stack */
+ movl %esp, %edi /* New GDT address */
+ leal 7(%ecx), %eax /* New GDT limit */
+ pushw %cx /* Push 64-bit CS (for LJMP below) */
+ pushl %edi /* Push new GDT address */
+ pushw %ax /* Push new GDT limit */
+
+ /* Copy GDT to the stack and add a 64-bit code segment at the end */
+ movl $GDT_ENTRY(DESC_CODE64, 0, 0xfffff) & 0xffffffff, (%edi,%ecx)
+ movl $GDT_ENTRY(DESC_CODE64, 0, 0xfffff) >> 32, 4(%edi,%ecx)
+ shrl $2, %ecx
+ cld
+ rep movsl /* Copy the firmware GDT */
+ lgdtl (%esp) /* Switch to the new GDT */
+
+ call 1f
+1: pop %edi
+
+ /* Record mixed mode entry */
+ movb $0x0, (efi_is64 - 1b)(%edi)
+
+ /* Set up indirect far call to re-enter 32-bit mode */
+ leal (efi32_call - 1b)(%edi), %eax
+ addl %eax, (%eax)
+ movw %cs, 4(%eax)
+
+ /* Disable paging */
+ movl %cr0, %eax
+ btrl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+
+ /* Set up 1:1 mapping */
+ leal (pte - 1b)(%edi), %eax
+ movl $_PAGE_PRESENT | _PAGE_RW | _PAGE_PSE, %ecx
+ leal (_PAGE_PRESENT | _PAGE_RW)(%eax), %edx
+2: movl %ecx, (%eax)
+ addl $8, %eax
+ addl $PMD_SIZE, %ecx
+ jnc 2b
+
+ movl $PAGE_SIZE, %ecx
+ .irpc l, 0123
+ movl %edx, \l * 8(%eax)
+ addl %ecx, %edx
+ .endr
+ addl %ecx, %eax
+ movl %edx, (%eax)
+ movl %eax, %cr3
+
+ call efi32_enable_long_mode
+
+ /* Set up far jump to 64-bit mode (CS is already on the stack) */
+ leal (efi_stub_entry - 1b)(%edi), %eax
+ movl %eax, 2(%esp)
+
+ movl 0(%ebp), %edi
+ movl 4(%ebp), %esi
+ movl %ebx, %edx
+ ljmpl *2(%esp)
+SYM_FUNC_END(efi32_startup)
+
+/*
+ * efi_status_t efi32_pe_entry(efi_handle_t image_handle,
+ * efi_system_table_32_t *sys_table)
+ */
+SYM_FUNC_START(efi32_pe_entry)
+ pushl %ebx // save callee-save registers
+
+ /* Check whether the CPU supports long mode */
+ movl $0x80000001, %eax // assume extended info support
+ cpuid
+ btl $29, %edx // check long mode bit
+ jnc 1f
+ leal 8(%esp), %esp // preserve stack alignment
+ xor %ebx, %ebx // no struct boot_params pointer
+ jmp efi32_startup // only ESP and EBX remain live
+1: movl $0x80000003, %eax // EFI_UNSUPPORTED
+ popl %ebx
+ RET
+SYM_FUNC_END(efi32_pe_entry)
+
+#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+ .org efi32_stub_entry + 0x200
+ .code64
+SYM_FUNC_START_NOALIGN(efi64_stub_entry)
+ jmp efi_handover_entry
+SYM_FUNC_END(efi64_stub_entry)
+#endif
+
+ .data
+ .balign 8
+SYM_DATA_START_LOCAL(efi32_call)
+ .long efi_enter32 - .
+ .word 0x0
+SYM_DATA_END(efi32_call)
+SYM_DATA(efi_is64, .byte 1)
+
+ .bss
+ .balign PAGE_SIZE
+SYM_DATA_LOCAL(pte, .fill 6 * PAGE_SIZE, 1, 0)
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 863910e9eefc..cafc90d4caaf 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -397,17 +397,13 @@ static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
asm("hlt");
}
-void __noreturn efi_stub_entry(efi_handle_t handle,
- efi_system_table_t *sys_table_arg,
- struct boot_params *boot_params);
-
/*
* Because the x86 boot code expects to be passed a boot_params we
* need to create one ourselves (usually the bootloader would create
* one for us).
*/
-efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
- efi_system_table_t *sys_table_arg)
+static efi_status_t efi_allocate_bootparams(efi_handle_t handle,
+ struct boot_params **bp)
{
efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
struct boot_params *boot_params;
@@ -416,21 +412,15 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
unsigned long alloc;
char *cmdline_ptr;
- efi_system_table = sys_table_arg;
-
- /* Check if we were booted by the EFI firmware */
- if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
- efi_exit(handle, EFI_INVALID_PARAMETER);
-
status = efi_bs_call(handle_protocol, handle, &proto, (void **)&image);
if (status != EFI_SUCCESS) {
efi_err("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
- efi_exit(handle, status);
+ return status;
}
status = efi_allocate_pages(PARAM_SIZE, &alloc, ULONG_MAX);
if (status != EFI_SUCCESS)
- efi_exit(handle, status);
+ return status;
boot_params = memset((void *)alloc, 0x0, PARAM_SIZE);
hdr = &boot_params->hdr;
@@ -446,14 +436,14 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
cmdline_ptr = efi_convert_cmdline(image);
if (!cmdline_ptr) {
efi_free(PARAM_SIZE, alloc);
- efi_exit(handle, EFI_OUT_OF_RESOURCES);
+ return EFI_OUT_OF_RESOURCES;
}
efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr,
&boot_params->ext_cmd_line_ptr);
- efi_stub_entry(handle, sys_table_arg, boot_params);
- /* not reached */
+ *bp = boot_params;
+ return EFI_SUCCESS;
}
static void add_e820ext(struct boot_params *params,
@@ -740,13 +730,16 @@ static efi_status_t parse_options(const char *cmdline)
return efi_parse_options(cmdline);
}
-static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
+static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry,
+ struct boot_params *boot_params)
{
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
unsigned long addr, alloc_size, entry;
efi_status_t status;
u32 seed[2] = {};
+ boot_params_ptr = boot_params;
+
/* determine the required size of the allocation */
alloc_size = ALIGN(max_t(unsigned long, output_len, kernel_total_size),
MIN_KERNEL_ALIGN);
@@ -777,7 +770,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
seed[0] = 0;
}
- boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
+ boot_params->hdr.loadflags |= KASLR_FLAG;
}
status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
@@ -815,20 +808,27 @@ static void __noreturn enter_kernel(unsigned long kernel_addr,
void __noreturn efi_stub_entry(efi_handle_t handle,
efi_system_table_t *sys_table_arg,
struct boot_params *boot_params)
+
{
efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID;
- struct setup_header *hdr = &boot_params->hdr;
const struct linux_efi_initrd *initrd = NULL;
unsigned long kernel_entry;
+ struct setup_header *hdr;
efi_status_t status;
- boot_params_ptr = boot_params;
-
efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
efi_exit(handle, EFI_INVALID_PARAMETER);
+ if (!IS_ENABLED(CONFIG_EFI_HANDOVER_PROTOCOL) || !boot_params) {
+ status = efi_allocate_bootparams(handle, &boot_params);
+ if (status != EFI_SUCCESS)
+ efi_exit(handle, status);
+ }
+
+ hdr = &boot_params->hdr;
+
if (have_unsupported_snp_features())
efi_exit(handle, EFI_UNSUPPORTED);
@@ -870,7 +870,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
if (efi_mem_encrypt > 0)
hdr->xloadflags |= XLF_MEM_ENCRYPTION;
- status = efi_decompress_kernel(&kernel_entry);
+ status = efi_decompress_kernel(&kernel_entry, boot_params);
if (status != EFI_SUCCESS) {
efi_err("Failed to decompress kernel\n");
goto fail;
@@ -940,6 +940,12 @@ fail:
efi_exit(handle, status);
}
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg)
+{
+ efi_stub_entry(handle, sys_table_arg, NULL);
+}
+
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
struct boot_params *boot_params)
diff --git a/drivers/firmware/efi/libstub/zboot-decompress-gzip.c b/drivers/firmware/efi/libstub/zboot-decompress-gzip.c
new file mode 100644
index 000000000000..e97a7e9d3c98
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot-decompress-gzip.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/zlib.h>
+
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+#include "inftrees.c"
+#include "inffast.c"
+#include "inflate.c"
+
+extern unsigned char _gzdata_start[], _gzdata_end[];
+extern u32 __aligned(1) payload_size;
+
+static struct z_stream_s stream;
+
+efi_status_t efi_zboot_decompress_init(unsigned long *alloc_size)
+{
+ efi_status_t status;
+ int rc;
+
+ /* skip the 10 byte header, assume no recorded filename */
+ stream.next_in = _gzdata_start + 10;
+ stream.avail_in = _gzdata_end - stream.next_in;
+
+ status = efi_allocate_pages(zlib_inflate_workspacesize(),
+ (unsigned long *)&stream.workspace,
+ ULONG_MAX);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ rc = zlib_inflateInit2(&stream, -MAX_WBITS);
+ if (rc != Z_OK) {
+ efi_err("failed to initialize GZIP decompressor: %d\n", rc);
+ status = EFI_LOAD_ERROR;
+ goto out;
+ }
+
+ *alloc_size = payload_size;
+ return EFI_SUCCESS;
+out:
+ efi_free(zlib_inflate_workspacesize(), (unsigned long)stream.workspace);
+ return status;
+}
+
+efi_status_t efi_zboot_decompress(u8 *out, unsigned long outlen)
+{
+ int rc;
+
+ stream.next_out = out;
+ stream.avail_out = outlen;
+
+ rc = zlib_inflate(&stream, 0);
+ zlib_inflateEnd(&stream);
+
+ efi_free(zlib_inflate_workspacesize(), (unsigned long)stream.workspace);
+
+ if (rc != Z_STREAM_END) {
+ efi_err("GZIP decompression failed with status %d\n", rc);
+ return EFI_LOAD_ERROR;
+ }
+
+ efi_cache_sync_image((unsigned long)out, outlen);
+
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/zboot-decompress-zstd.c b/drivers/firmware/efi/libstub/zboot-decompress-zstd.c
new file mode 100644
index 000000000000..bde9d94dd2e3
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot-decompress-zstd.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/zstd.h>
+
+#include <asm/efi.h>
+
+#include "decompress_sources.h"
+#include "efistub.h"
+
+extern unsigned char _gzdata_start[], _gzdata_end[];
+extern u32 __aligned(1) payload_size;
+
+static size_t wksp_size;
+static void *wksp;
+
+efi_status_t efi_zboot_decompress_init(unsigned long *alloc_size)
+{
+ efi_status_t status;
+
+ wksp_size = zstd_dctx_workspace_bound();
+ status = efi_allocate_pages(wksp_size, (unsigned long *)&wksp, ULONG_MAX);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *alloc_size = payload_size;
+ return EFI_SUCCESS;
+}
+
+efi_status_t efi_zboot_decompress(u8 *out, unsigned long outlen)
+{
+ zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size);
+ size_t ret;
+ int retval;
+
+ ret = zstd_decompress_dctx(dctx, out, outlen, _gzdata_start,
+ _gzdata_end - _gzdata_start - 4);
+ efi_free(wksp_size, (unsigned long)wksp);
+
+ retval = zstd_get_error_code(ret);
+ if (retval) {
+ efi_err("ZSTD-decompression failed with status %d\n", retval);
+ return EFI_LOAD_ERROR;
+ }
+
+ efi_cache_sync_image((unsigned long)out, outlen);
+
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
index af23b3c50228..c47ace06f010 100644
--- a/drivers/firmware/efi/libstub/zboot.c
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -7,36 +7,6 @@
#include "efistub.h"
-static unsigned char zboot_heap[SZ_256K] __aligned(64);
-static unsigned long free_mem_ptr, free_mem_end_ptr;
-
-#define STATIC static
-#if defined(CONFIG_KERNEL_GZIP)
-#include "../../../../lib/decompress_inflate.c"
-#elif defined(CONFIG_KERNEL_LZ4)
-#include "../../../../lib/decompress_unlz4.c"
-#elif defined(CONFIG_KERNEL_LZMA)
-#include "../../../../lib/decompress_unlzma.c"
-#elif defined(CONFIG_KERNEL_LZO)
-#include "../../../../lib/decompress_unlzo.c"
-#elif defined(CONFIG_KERNEL_XZ)
-#undef memcpy
-#define memcpy memcpy
-#undef memmove
-#define memmove memmove
-#include "../../../../lib/decompress_unxz.c"
-#elif defined(CONFIG_KERNEL_ZSTD)
-#include "../../../../lib/decompress_unzstd.c"
-#endif
-
-extern char efi_zboot_header[];
-extern char _gzdata_start[], _gzdata_end[];
-
-static void error(char *x)
-{
- efi_err("EFI decompressor: %s\n", x);
-}
-
static unsigned long alloc_preferred_address(unsigned long alloc_size)
{
#ifdef EFI_KIMG_PREFERRED_ADDRESS
@@ -64,22 +34,17 @@ struct screen_info *alloc_screen_info(void)
asmlinkage efi_status_t __efiapi
efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
{
- unsigned long compressed_size = _gzdata_end - _gzdata_start;
+ char *cmdline_ptr __free(efi_pool) = NULL;
unsigned long image_base, alloc_size;
efi_loaded_image_t *image;
efi_status_t status;
- char *cmdline_ptr;
- int ret;
WRITE_ONCE(efi_system_table, systab);
- free_mem_ptr = (unsigned long)&zboot_heap;
- free_mem_end_ptr = free_mem_ptr + sizeof(zboot_heap);
-
status = efi_bs_call(handle_protocol, handle,
&LOADED_IMAGE_PROTOCOL_GUID, (void **)&image);
if (status != EFI_SUCCESS) {
- error("Failed to locate parent's loaded image protocol");
+ efi_err("Failed to locate parent's loaded image protocol\n");
return status;
}
@@ -89,9 +54,9 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
efi_info("Decompressing Linux Kernel...\n");
- // SizeOfImage from the compressee's PE/COFF header
- alloc_size = round_up(get_unaligned_le32(_gzdata_end - 4),
- EFI_ALLOC_ALIGN);
+ status = efi_zboot_decompress_init(&alloc_size);
+ if (status != EFI_SUCCESS)
+ return status;
// If the architecture has a preferred address for the image,
// try that first.
@@ -122,26 +87,14 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS) {
efi_err("Failed to allocate memory\n");
- goto free_cmdline;
+ return status;
}
}
- // Decompress the payload into the newly allocated buffer.
- ret = __decompress(_gzdata_start, compressed_size, NULL, NULL,
- (void *)image_base, alloc_size, NULL, error);
- if (ret < 0) {
- error("Decompression failed");
- status = EFI_DEVICE_ERROR;
- goto free_image;
- }
-
- efi_cache_sync_image(image_base, alloc_size);
-
- status = efi_stub_common(handle, image, image_base, cmdline_ptr);
+ // Decompress the payload into the newly allocated buffer
+ status = efi_zboot_decompress((void *)image_base, alloc_size) ?:
+ efi_stub_common(handle, image, image_base, cmdline_ptr);
-free_image:
efi_free(alloc_size, image_base);
-free_cmdline:
- efi_bs_call(free_pool, cmdline_ptr);
return status;
}
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
index af2c82f7bd90..9ecc57ff5b45 100644
--- a/drivers/firmware/efi/libstub/zboot.lds
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -17,6 +17,7 @@ SECTIONS
.rodata : ALIGN(8) {
__efistub__gzdata_start = .;
*(.gzdata)
+ __efistub_payload_size = . - 4;
__efistub__gzdata_end = .;
*(.rodata* .init.rodata* .srodata*)
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 1dd4362ef9a3..8c28e25ddc8a 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -280,6 +280,7 @@ static int imx_scu_probe(struct platform_device *pdev)
return ret;
sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
+ of_node_put(args.np);
num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
for (i = 0; i < num_channel; i++) {
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 447246bd04be..98a463e9774b 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -814,15 +814,6 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
qcuefi->client = container_of(aux_dev, struct qseecom_client, aux_dev);
- auxiliary_set_drvdata(aux_dev, qcuefi);
- status = qcuefi_set_reference(qcuefi);
- if (status)
- return status;
-
- status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
- if (status)
- qcuefi_set_reference(NULL);
-
memset(&pool_config, 0, sizeof(pool_config));
pool_config.initial_size = SZ_4K;
pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER;
@@ -833,6 +824,15 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
if (IS_ERR(qcuefi->mempool))
return PTR_ERR(qcuefi->mempool);
+ auxiliary_set_drvdata(aux_dev, qcuefi);
+ status = qcuefi_set_reference(qcuefi);
+ if (status)
+ return status;
+
+ status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
+ if (status)
+ qcuefi_set_reference(NULL);
+
return status;
}
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index f0569bb9411f..fc4d67e4c4a6 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -2301,8 +2301,8 @@ static int qcom_scm_probe(struct platform_device *pdev)
__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
if (IS_ERR(__scm->mempool)) {
- dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
- "Failed to create the SCM memory pool\n");
+ ret = dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
+ "Failed to create the SCM memory pool\n");
goto err;
}
diff --git a/drivers/firmware/samsung/Kconfig b/drivers/firmware/samsung/Kconfig
new file mode 100644
index 000000000000..16d81aeb1d41
--- /dev/null
+++ b/drivers/firmware/samsung/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config EXYNOS_ACPM_PROTOCOL
+ tristate "Exynos Alive Clock and Power Manager (ACPM) Message Protocol"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on MAILBOX
+ help
+ Alive Clock and Power Manager (ACPM) Message Protocol is defined for
+ the purpose of communication between the ACPM firmware and masters
+ (AP, AOC, ...). ACPM firmware operates on the Active Power Management
+ (APM) module that handles overall power activities.
+
+ This protocol driver provides interface for all the client drivers
+ making use of the features offered by the APM.
diff --git a/drivers/firmware/samsung/Makefile b/drivers/firmware/samsung/Makefile
new file mode 100644
index 000000000000..7b4c9f6f34f5
--- /dev/null
+++ b/drivers/firmware/samsung/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+acpm-protocol-objs := exynos-acpm.o exynos-acpm-pmic.o
+obj-$(CONFIG_EXYNOS_ACPM_PROTOCOL) += acpm-protocol.o
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c
new file mode 100644
index 000000000000..85e90d236da2
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#include <linux/bitfield.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#include "exynos-acpm.h"
+#include "exynos-acpm-pmic.h"
+
+#define ACPM_PMIC_CHANNEL GENMASK(15, 12)
+#define ACPM_PMIC_TYPE GENMASK(11, 8)
+#define ACPM_PMIC_REG GENMASK(7, 0)
+
+#define ACPM_PMIC_RETURN GENMASK(31, 24)
+#define ACPM_PMIC_MASK GENMASK(23, 16)
+#define ACPM_PMIC_VALUE GENMASK(15, 8)
+#define ACPM_PMIC_FUNC GENMASK(7, 0)
+
+#define ACPM_PMIC_BULK_SHIFT 8
+#define ACPM_PMIC_BULK_MASK GENMASK(7, 0)
+#define ACPM_PMIC_BULK_MAX_COUNT 8
+
+enum exynos_acpm_pmic_func {
+ ACPM_PMIC_READ,
+ ACPM_PMIC_WRITE,
+ ACPM_PMIC_UPDATE,
+ ACPM_PMIC_BULK_READ,
+ ACPM_PMIC_BULK_WRITE,
+};
+
+static inline u32 acpm_pmic_set_bulk(u32 data, unsigned int i)
+{
+ return (data & ACPM_PMIC_BULK_MASK) << (ACPM_PMIC_BULK_SHIFT * i);
+}
+
+static inline u32 acpm_pmic_get_bulk(u32 data, unsigned int i)
+{
+ return (data >> (ACPM_PMIC_BULK_SHIFT * i)) & ACPM_PMIC_BULK_MASK;
+}
+
+static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd,
+ unsigned int acpm_chan_id)
+{
+ xfer->txd = cmd;
+ xfer->rxd = cmd;
+ xfer->txlen = sizeof(cmd);
+ xfer->rxlen = sizeof(cmd);
+ xfer->acpm_chan_id = acpm_chan_id;
+}
+
+static void acpm_pmic_init_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_READ);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_read_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_read_cmd(cmd, type, reg, chan);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ *buf = FIELD_GET(ACPM_PMIC_VALUE, xfer.rxd[1]);
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
+
+static void acpm_pmic_init_bulk_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 count)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_READ) |
+ FIELD_PREP(ACPM_PMIC_VALUE, count);
+}
+
+int acpm_pmic_bulk_read(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int i, ret;
+
+ if (count > ACPM_PMIC_BULK_MAX_COUNT)
+ return -EINVAL;
+
+ acpm_pmic_init_bulk_read_cmd(cmd, type, reg, chan, count);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ ret = FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ if (i < 4)
+ buf[i] = acpm_pmic_get_bulk(xfer.rxd[2], i);
+ else
+ buf[i] = acpm_pmic_get_bulk(xfer.rxd[3], i - 4);
+ }
+
+ return 0;
+}
+
+static void acpm_pmic_init_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 value)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_WRITE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, value);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_write_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_write_cmd(cmd, type, reg, chan, value);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
+
+static void acpm_pmic_init_bulk_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf)
+{
+ int i;
+
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_WRITE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, count);
+
+ for (i = 0; i < count; i++) {
+ if (i < 4)
+ cmd[2] |= acpm_pmic_set_bulk(buf[i], i);
+ else
+ cmd[3] |= acpm_pmic_set_bulk(buf[i], i - 4);
+ }
+}
+
+int acpm_pmic_bulk_write(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ if (count > ACPM_PMIC_BULK_MAX_COUNT)
+ return -EINVAL;
+
+ acpm_pmic_init_bulk_write_cmd(cmd, type, reg, chan, count, buf);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
+
+static void acpm_pmic_init_update_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_UPDATE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, value) |
+ FIELD_PREP(ACPM_PMIC_MASK, mask);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_update_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_update_cmd(cmd, type, reg, chan, value, mask);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.h b/drivers/firmware/samsung/exynos-acpm-pmic.h
new file mode 100644
index 000000000000..078421888a14
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#ifndef __EXYNOS_ACPM_PMIC_H__
+#define __EXYNOS_ACPM_PMIC_H__
+
+#include <linux/types.h>
+
+struct acpm_handle;
+
+int acpm_pmic_read_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 *buf);
+int acpm_pmic_bulk_read(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, u8 *buf);
+int acpm_pmic_write_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value);
+int acpm_pmic_bulk_write(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf);
+int acpm_pmic_update_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask);
+#endif /* __EXYNOS_ACPM_PMIC_H__ */
diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c
new file mode 100644
index 000000000000..a85b2dbdd9f0
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mailbox/exynos-message.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/math.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "exynos-acpm.h"
+#include "exynos-acpm-pmic.h"
+
+#define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
+
+/* The unit of counter is 20 us. 5000 * 20 = 100 ms */
+#define ACPM_POLL_TIMEOUT 5000
+#define ACPM_TX_TIMEOUT_US 500000
+
+#define ACPM_GS101_INITDATA_BASE 0xa000
+
+/**
+ * struct acpm_shmem - shared memory configuration information.
+ * @reserved: unused fields.
+ * @chans: offset to array of struct acpm_chan_shmem.
+ * @reserved1: unused fields.
+ * @num_chans: number of channels.
+ */
+struct acpm_shmem {
+ u32 reserved[2];
+ u32 chans;
+ u32 reserved1[3];
+ u32 num_chans;
+};
+
+/**
+ * struct acpm_chan_shmem - descriptor of a shared memory channel.
+ *
+ * @id: channel ID.
+ * @reserved: unused fields.
+ * @rx_rear: rear pointer of APM RX queue (TX for AP).
+ * @rx_front: front pointer of APM RX queue (TX for AP).
+ * @rx_base: base address of APM RX queue (TX for AP).
+ * @reserved1: unused fields.
+ * @tx_rear: rear pointer of APM TX queue (RX for AP).
+ * @tx_front: front pointer of APM TX queue (RX for AP).
+ * @tx_base: base address of APM TX queue (RX for AP).
+ * @qlen: queue length. Applies to both TX/RX queues.
+ * @mlen: message length. Applies to both TX/RX queues.
+ * @reserved2: unused fields.
+ * @poll_completion: true when the channel works on polling.
+ */
+struct acpm_chan_shmem {
+ u32 id;
+ u32 reserved[3];
+ u32 rx_rear;
+ u32 rx_front;
+ u32 rx_base;
+ u32 reserved1[3];
+ u32 tx_rear;
+ u32 tx_front;
+ u32 tx_base;
+ u32 qlen;
+ u32 mlen;
+ u32 reserved2[2];
+ u32 poll_completion;
+};
+
+/**
+ * struct acpm_queue - exynos acpm queue.
+ *
+ * @rear: rear address of the queue.
+ * @front: front address of the queue.
+ * @base: base address of the queue.
+ */
+struct acpm_queue {
+ void __iomem *rear;
+ void __iomem *front;
+ void __iomem *base;
+};
+
+/**
+ * struct acpm_rx_data - RX queue data.
+ *
+ * @cmd: pointer to where the data shall be saved.
+ * @n_cmd: number of 32-bit commands.
+ * @response: true if the client expects the RX data.
+ */
+struct acpm_rx_data {
+ u32 *cmd;
+ size_t n_cmd;
+ bool response;
+};
+
+#define ACPM_SEQNUM_MAX 64
+
+/**
+ * struct acpm_chan - driver internal representation of a channel.
+ * @cl: mailbox client.
+ * @chan: mailbox channel.
+ * @acpm: pointer to driver private data.
+ * @tx: TX queue. The enqueue is done by the host.
+ * - front index is written by the host.
+ * - rear index is written by the firmware.
+ *
+ * @rx: RX queue. The enqueue is done by the firmware.
+ * - front index is written by the firmware.
+ * - rear index is written by the host.
+ * @tx_lock: protects TX queue.
+ * @rx_lock: protects RX queue.
+ * @qlen: queue length. Applies to both TX/RX queues.
+ * @mlen: message length. Applies to both TX/RX queues.
+ * @seqnum: sequence number of the last message enqueued on TX queue.
+ * @id: channel ID.
+ * @poll_completion: indicates if the transfer needs to be polled for
+ * completion or interrupt mode is used.
+ * @bitmap_seqnum: bitmap that tracks the messages on the TX/RX queues.
+ * @rx_data: internal buffer used to drain the RX queue.
+ */
+struct acpm_chan {
+ struct mbox_client cl;
+ struct mbox_chan *chan;
+ struct acpm_info *acpm;
+ struct acpm_queue tx;
+ struct acpm_queue rx;
+ struct mutex tx_lock;
+ struct mutex rx_lock;
+
+ unsigned int qlen;
+ unsigned int mlen;
+ u8 seqnum;
+ u8 id;
+ bool poll_completion;
+
+ DECLARE_BITMAP(bitmap_seqnum, ACPM_SEQNUM_MAX - 1);
+ struct acpm_rx_data rx_data[ACPM_SEQNUM_MAX];
+};
+
+/**
+ * struct acpm_info - driver's private data.
+ * @shmem: pointer to the SRAM configuration data.
+ * @sram_base: base address of SRAM.
+ * @chans: pointer to the ACPM channel parameters retrieved from SRAM.
+ * @dev: pointer to the exynos-acpm device.
+ * @handle: instance of acpm_handle to send to clients.
+ * @num_chans: number of channels available for this controller.
+ */
+struct acpm_info {
+ struct acpm_shmem __iomem *shmem;
+ void __iomem *sram_base;
+ struct acpm_chan *chans;
+ struct device *dev;
+ struct acpm_handle handle;
+ u32 num_chans;
+};
+
+/**
+ * struct acpm_match_data - of_device_id data.
+ * @initdata_base: offset in SRAM where the channels configuration resides.
+ */
+struct acpm_match_data {
+ loff_t initdata_base;
+};
+
+#define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl)
+#define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle)
+
+/**
+ * acpm_get_rx() - get response from RX queue.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer to get response for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
+{
+ u32 rx_front, rx_seqnum, tx_seqnum, seqnum;
+ const void __iomem *base, *addr;
+ struct acpm_rx_data *rx_data;
+ u32 i, val, mlen;
+ bool rx_set = false;
+
+ guard(mutex)(&achan->rx_lock);
+
+ rx_front = readl(achan->rx.front);
+ i = readl(achan->rx.rear);
+
+ /* Bail out if RX is empty. */
+ if (i == rx_front)
+ return 0;
+
+ base = achan->rx.base;
+ mlen = achan->mlen;
+
+ tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+
+ /* Drain RX queue. */
+ do {
+ /* Read RX seqnum. */
+ addr = base + mlen * i;
+ val = readl(addr);
+
+ rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, val);
+ if (!rx_seqnum)
+ return -EIO;
+ /*
+ * mssg seqnum starts with value 1, whereas the driver considers
+ * the first mssg at index 0.
+ */
+ seqnum = rx_seqnum - 1;
+ rx_data = &achan->rx_data[seqnum];
+
+ if (rx_data->response) {
+ if (rx_seqnum == tx_seqnum) {
+ __ioread32_copy(xfer->rxd, addr,
+ xfer->rxlen / 4);
+ rx_set = true;
+ clear_bit(seqnum, achan->bitmap_seqnum);
+ } else {
+ /*
+ * The RX data corresponds to another request.
+ * Save the data to drain the queue, but don't
+ * clear yet the bitmap. It will be cleared
+ * after the response is copied to the request.
+ */
+ __ioread32_copy(rx_data->cmd, addr,
+ xfer->rxlen / 4);
+ }
+ } else {
+ clear_bit(seqnum, achan->bitmap_seqnum);
+ }
+
+ i = (i + 1) % achan->qlen;
+ } while (i != rx_front);
+
+ /* We saved all responses, mark RX empty. */
+ writel(rx_front, achan->rx.rear);
+
+ /*
+ * If the response was not in this iteration of the queue, check if the
+ * RX data was previously saved.
+ */
+ rx_data = &achan->rx_data[tx_seqnum - 1];
+ if (!rx_set && rx_data->response) {
+ rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM,
+ rx_data->cmd[0]);
+
+ if (rx_seqnum == tx_seqnum) {
+ memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
+ clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_dequeue_by_polling() - RX dequeue by polling.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being waited for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_dequeue_by_polling(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ struct device *dev = achan->acpm->dev;
+ unsigned int cnt_20us = 0;
+ u32 seqnum;
+ int ret;
+
+ seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+
+ do {
+ ret = acpm_get_rx(achan, xfer);
+ if (ret)
+ return ret;
+
+ if (!test_bit(seqnum - 1, achan->bitmap_seqnum))
+ return 0;
+
+ /* Determined experimentally. */
+ usleep_range(20, 30);
+ cnt_20us++;
+ } while (cnt_20us < ACPM_POLL_TIMEOUT);
+
+ dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx, cnt_20us = %d.\n",
+ achan->id, seqnum, achan->bitmap_seqnum[0], cnt_20us);
+
+ return -ETIME;
+}
+
+/**
+ * acpm_wait_for_queue_slots() - wait for queue slots.
+ *
+ * @achan: ACPM channel info.
+ * @next_tx_front: next front index of the TX queue.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_wait_for_queue_slots(struct acpm_chan *achan, u32 next_tx_front)
+{
+ u32 val, ret;
+
+ /*
+ * Wait for RX front to keep up with TX front. Make sure there's at
+ * least one element between them.
+ */
+ ret = readl_poll_timeout(achan->rx.front, val, next_tx_front != val, 0,
+ ACPM_TX_TIMEOUT_US);
+ if (ret) {
+ dev_err(achan->acpm->dev, "RX front can not keep up with TX front.\n");
+ return ret;
+ }
+
+ ret = readl_poll_timeout(achan->tx.rear, val, next_tx_front != val, 0,
+ ACPM_TX_TIMEOUT_US);
+ if (ret)
+ dev_err(achan->acpm->dev, "TX queue is full.\n");
+
+ return ret;
+}
+
+/**
+ * acpm_prepare_xfer() - prepare a transfer before writing the message to the
+ * TX queue.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being prepared.
+ */
+static void acpm_prepare_xfer(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ struct acpm_rx_data *rx_data;
+ u32 *txd = (u32 *)xfer->txd;
+
+ /* Prevent chan->seqnum from being re-used */
+ do {
+ if (++achan->seqnum == ACPM_SEQNUM_MAX)
+ achan->seqnum = 1;
+ } while (test_bit(achan->seqnum - 1, achan->bitmap_seqnum));
+
+ txd[0] |= FIELD_PREP(ACPM_PROTOCOL_SEQNUM, achan->seqnum);
+
+ /* Clear data for upcoming responses */
+ rx_data = &achan->rx_data[achan->seqnum - 1];
+ memset(rx_data->cmd, 0, sizeof(*rx_data->cmd) * rx_data->n_cmd);
+ if (xfer->rxd)
+ rx_data->response = true;
+
+ /* Flag the index based on seqnum. (seqnum: 1~63, bitmap: 0~62) */
+ set_bit(achan->seqnum - 1, achan->bitmap_seqnum);
+}
+
+/**
+ * acpm_wait_for_message_response - an helper to group all possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being waited for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_wait_for_message_response(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ /* Just polling mode supported for now. */
+ return acpm_dequeue_by_polling(achan, xfer);
+}
+
+/**
+ * acpm_do_xfer() - do one transfer.
+ * @handle: pointer to the acpm handle.
+ * @xfer: transfer to initiate and wait for response.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
+{
+ struct acpm_info *acpm = handle_to_acpm_info(handle);
+ struct exynos_mbox_msg msg;
+ struct acpm_chan *achan;
+ u32 idx, tx_front;
+ int ret;
+
+ if (xfer->acpm_chan_id >= acpm->num_chans)
+ return -EINVAL;
+
+ achan = &acpm->chans[xfer->acpm_chan_id];
+
+ if (!xfer->txd || xfer->txlen > achan->mlen || xfer->rxlen > achan->mlen)
+ return -EINVAL;
+
+ if (!achan->poll_completion) {
+ dev_err(achan->acpm->dev, "Interrupt mode not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ scoped_guard(mutex, &achan->tx_lock) {
+ tx_front = readl(achan->tx.front);
+ idx = (tx_front + 1) % achan->qlen;
+
+ ret = acpm_wait_for_queue_slots(achan, idx);
+ if (ret)
+ return ret;
+
+ acpm_prepare_xfer(achan, xfer);
+
+ /* Write TX command. */
+ __iowrite32_copy(achan->tx.base + achan->mlen * tx_front,
+ xfer->txd, xfer->txlen / 4);
+
+ /* Advance TX front. */
+ writel(idx, achan->tx.front);
+ }
+
+ msg.chan_id = xfer->acpm_chan_id;
+ msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
+ ret = mbox_send_message(achan->chan, (void *)&msg);
+ if (ret < 0)
+ return ret;
+
+ ret = acpm_wait_for_message_response(achan, xfer);
+
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+ * transfer queueing since the protocol layer queues things by itself.
+ * Unfortunately, we have to kick the mailbox framework after we have
+ * received our message.
+ */
+ mbox_client_txdone(achan->chan, ret);
+
+ return ret;
+}
+
+/**
+ * acpm_chan_shmem_get_params() - get channel parameters and addresses of the
+ * TX/RX queues.
+ * @achan: ACPM channel info.
+ * @chan_shmem: __iomem pointer to a channel described in shared memory.
+ */
+static void acpm_chan_shmem_get_params(struct acpm_chan *achan,
+ struct acpm_chan_shmem __iomem *chan_shmem)
+{
+ void __iomem *base = achan->acpm->sram_base;
+ struct acpm_queue *rx = &achan->rx;
+ struct acpm_queue *tx = &achan->tx;
+
+ achan->mlen = readl(&chan_shmem->mlen);
+ achan->poll_completion = readl(&chan_shmem->poll_completion);
+ achan->id = readl(&chan_shmem->id);
+ achan->qlen = readl(&chan_shmem->qlen);
+
+ tx->base = base + readl(&chan_shmem->rx_base);
+ tx->rear = base + readl(&chan_shmem->rx_rear);
+ tx->front = base + readl(&chan_shmem->rx_front);
+
+ rx->base = base + readl(&chan_shmem->tx_base);
+ rx->rear = base + readl(&chan_shmem->tx_rear);
+ rx->front = base + readl(&chan_shmem->tx_front);
+
+ dev_vdbg(achan->acpm->dev, "ID = %d poll = %d, mlen = %d, qlen = %d\n",
+ achan->id, achan->poll_completion, achan->mlen, achan->qlen);
+}
+
+/**
+ * acpm_achan_alloc_cmds() - allocate buffers for retrieving data from the ACPM
+ * firmware.
+ * @achan: ACPM channel info.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_achan_alloc_cmds(struct acpm_chan *achan)
+{
+ struct device *dev = achan->acpm->dev;
+ struct acpm_rx_data *rx_data;
+ size_t cmd_size, n_cmd;
+ int i;
+
+ if (achan->mlen == 0)
+ return 0;
+
+ cmd_size = sizeof(*(achan->rx_data[0].cmd));
+ n_cmd = DIV_ROUND_UP_ULL(achan->mlen, cmd_size);
+
+ for (i = 0; i < ACPM_SEQNUM_MAX; i++) {
+ rx_data = &achan->rx_data[i];
+ rx_data->n_cmd = n_cmd;
+ rx_data->cmd = devm_kcalloc(dev, n_cmd, cmd_size, GFP_KERNEL);
+ if (!rx_data->cmd)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_free_mbox_chans() - free mailbox channels.
+ * @acpm: pointer to driver data.
+ */
+static void acpm_free_mbox_chans(struct acpm_info *acpm)
+{
+ int i;
+
+ for (i = 0; i < acpm->num_chans; i++)
+ if (!IS_ERR_OR_NULL(acpm->chans[i].chan))
+ mbox_free_channel(acpm->chans[i].chan);
+}
+
+/**
+ * acpm_channels_init() - initialize channels based on the configuration data in
+ * the shared memory.
+ * @acpm: pointer to driver data.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_channels_init(struct acpm_info *acpm)
+{
+ struct acpm_shmem __iomem *shmem = acpm->shmem;
+ struct acpm_chan_shmem __iomem *chans_shmem;
+ struct device *dev = acpm->dev;
+ int i, ret;
+
+ acpm->num_chans = readl(&shmem->num_chans);
+ acpm->chans = devm_kcalloc(dev, acpm->num_chans, sizeof(*acpm->chans),
+ GFP_KERNEL);
+ if (!acpm->chans)
+ return -ENOMEM;
+
+ chans_shmem = acpm->sram_base + readl(&shmem->chans);
+
+ for (i = 0; i < acpm->num_chans; i++) {
+ struct acpm_chan_shmem __iomem *chan_shmem = &chans_shmem[i];
+ struct acpm_chan *achan = &acpm->chans[i];
+ struct mbox_client *cl = &achan->cl;
+
+ achan->acpm = acpm;
+
+ acpm_chan_shmem_get_params(achan, chan_shmem);
+
+ ret = acpm_achan_alloc_cmds(achan);
+ if (ret)
+ return ret;
+
+ mutex_init(&achan->rx_lock);
+ mutex_init(&achan->tx_lock);
+
+ cl->dev = dev;
+
+ achan->chan = mbox_request_channel(cl, 0);
+ if (IS_ERR(achan->chan)) {
+ acpm_free_mbox_chans(acpm);
+ return PTR_ERR(achan->chan);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_setup_ops() - setup the operations structures.
+ * @acpm: pointer to the driver data.
+ */
+static void acpm_setup_ops(struct acpm_info *acpm)
+{
+ struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops;
+
+ pmic_ops->read_reg = acpm_pmic_read_reg;
+ pmic_ops->bulk_read = acpm_pmic_bulk_read;
+ pmic_ops->write_reg = acpm_pmic_write_reg;
+ pmic_ops->bulk_write = acpm_pmic_bulk_write;
+ pmic_ops->update_reg = acpm_pmic_update_reg;
+}
+
+static int acpm_probe(struct platform_device *pdev)
+{
+ const struct acpm_match_data *match_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *shmem;
+ struct acpm_info *acpm;
+ resource_size_t size;
+ struct resource res;
+ int ret;
+
+ acpm = devm_kzalloc(dev, sizeof(*acpm), GFP_KERNEL);
+ if (!acpm)
+ return -ENOMEM;
+
+ shmem = of_parse_phandle(dev->of_node, "shmem", 0);
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get shared memory.\n");
+
+ size = resource_size(&res);
+ acpm->sram_base = devm_ioremap(dev, res.start, size);
+ if (!acpm->sram_base)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to ioremap shared memory.\n");
+
+ match_data = of_device_get_match_data(dev);
+ if (!match_data)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to get match data.\n");
+
+ acpm->shmem = acpm->sram_base + match_data->initdata_base;
+ acpm->dev = dev;
+
+ ret = acpm_channels_init(acpm);
+ if (ret)
+ return ret;
+
+ acpm_setup_ops(acpm);
+
+ platform_set_drvdata(pdev, acpm);
+
+ return 0;
+}
+
+/**
+ * acpm_handle_put() - release the handle acquired by acpm_get_by_phandle.
+ * @handle: Handle acquired by acpm_get_by_phandle.
+ */
+static void acpm_handle_put(const struct acpm_handle *handle)
+{
+ struct acpm_info *acpm = handle_to_acpm_info(handle);
+ struct device *dev = acpm->dev;
+
+ module_put(dev->driver->owner);
+ /* Drop reference taken with of_find_device_by_node(). */
+ put_device(dev);
+}
+
+/**
+ * devm_acpm_release() - devres release method.
+ * @dev: pointer to device.
+ * @res: pointer to resource.
+ */
+static void devm_acpm_release(struct device *dev, void *res)
+{
+ acpm_handle_put(*(struct acpm_handle **)res);
+}
+
+/**
+ * acpm_get_by_phandle() - get the ACPM handle using DT phandle.
+ * @dev: device pointer requesting ACPM handle.
+ * @property: property name containing phandle on ACPM node.
+ *
+ * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
+ */
+static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
+ const char *property)
+{
+ struct platform_device *pdev;
+ struct device_node *acpm_np;
+ struct device_link *link;
+ struct acpm_info *acpm;
+
+ acpm_np = of_parse_phandle(dev->of_node, property, 0);
+ if (!acpm_np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(acpm_np);
+ if (!pdev) {
+ dev_err(dev, "Cannot find device node %s\n", acpm_np->name);
+ of_node_put(acpm_np);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ of_node_put(acpm_np);
+
+ acpm = platform_get_drvdata(pdev);
+ if (!acpm) {
+ dev_err(dev, "Cannot get drvdata from %s\n",
+ dev_name(&pdev->dev));
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (!try_module_get(pdev->dev.driver->owner)) {
+ dev_err(dev, "Cannot get module reference.\n");
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
+ if (!link) {
+ dev_err(&pdev->dev,
+ "Failed to create device link to consumer %s.\n",
+ dev_name(dev));
+ platform_device_put(pdev);
+ module_put(pdev->dev.driver->owner);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &acpm->handle;
+}
+
+/**
+ * devm_acpm_get_by_phandle() - managed get handle using phandle.
+ * @dev: device pointer requesting ACPM handle.
+ * @property: property name containing phandle on ACPM node.
+ *
+ * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
+ */
+const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
+ const char *property)
+{
+ const struct acpm_handle **ptr, *handle;
+
+ ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ handle = acpm_get_by_phandle(dev, property);
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+
+static const struct acpm_match_data acpm_gs101 = {
+ .initdata_base = ACPM_GS101_INITDATA_BASE,
+};
+
+static const struct of_device_id acpm_match[] = {
+ {
+ .compatible = "google,gs101-acpm-ipc",
+ .data = &acpm_gs101,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, acpm_match);
+
+static struct platform_driver acpm_driver = {
+ .probe = acpm_probe,
+ .driver = {
+ .name = "exynos-acpm-protocol",
+ .of_match_table = acpm_match,
+ },
+};
+module_platform_driver(acpm_driver);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
+MODULE_DESCRIPTION("Samsung Exynos ACPM mailbox protocol driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/samsung/exynos-acpm.h b/drivers/firmware/samsung/exynos-acpm.h
new file mode 100644
index 000000000000..2d14cb58f98c
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#ifndef __EXYNOS_ACPM_H__
+#define __EXYNOS_ACPM_H__
+
+struct acpm_xfer {
+ const u32 *txd;
+ u32 *rxd;
+ size_t txlen;
+ size_t rxlen;
+ unsigned int acpm_chan_id;
+};
+
+struct acpm_handle;
+
+int acpm_do_xfer(const struct acpm_handle *handle,
+ const struct acpm_xfer *xfer);
+
+#endif /* __EXYNOS_ACPM_H__ */
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
index f3319be20b36..5767aed25cdc 100644
--- a/drivers/firmware/smccc/kvm_guest.c
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -6,8 +6,11 @@
#include <linux/bitmap.h>
#include <linux/cache.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/string.h>
+#include <uapi/linux/psci.h>
+
#include <asm/hypervisor.h>
static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
@@ -51,3 +54,66 @@ bool kvm_arm_hyp_service_available(u32 func_id)
return test_bit(func_id, __kvm_arm_hyp_services);
}
EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
+
+#ifdef CONFIG_ARM64
+void __init kvm_arm_target_impl_cpu_init(void)
+{
+ int i;
+ u32 ver;
+ u64 max_cpus;
+ struct arm_smccc_res res;
+ struct target_impl_cpu *target;
+
+ if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER) ||
+ !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS))
+ return;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID,
+ 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return;
+
+ /* Version info is in lower 32 bits and is in SMMCCC_VERSION format */
+ ver = lower_32_bits(res.a1);
+ if (PSCI_VERSION_MAJOR(ver) != 1) {
+ pr_warn("Unsupported target CPU implementation version v%d.%d\n",
+ PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
+ return;
+ }
+
+ if (!res.a2) {
+ pr_warn("No target implementation CPUs specified\n");
+ return;
+ }
+
+ max_cpus = res.a2;
+ target = memblock_alloc(sizeof(*target) * max_cpus, __alignof__(*target));
+ if (!target) {
+ pr_warn("Not enough memory for struct target_impl_cpu\n");
+ return;
+ }
+
+ for (i = 0; i < max_cpus; i++) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID,
+ i, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_warn("Discovering target implementation CPUs failed\n");
+ goto mem_free;
+ }
+ target[i].midr = res.a1;
+ target[i].revidr = res.a2;
+ target[i].aidr = res.a3;
+ };
+
+ if (!cpu_errata_set_target_impl(max_cpus, target)) {
+ pr_warn("Failed to set target implementation CPUs\n");
+ goto mem_free;
+ }
+
+ pr_info("Number of target implementation CPUs is %lld\n", max_cpus);
+ return;
+
+mem_free:
+ memblock_free(target, sizeof(*target) * max_cpus);
+}
+#endif
diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
index 1990263fbba0..c24b3fca1cfe 100644
--- a/drivers/firmware/smccc/soc_id.c
+++ b/drivers/firmware/smccc/soc_id.c
@@ -32,6 +32,85 @@
static struct soc_device *soc_dev;
static struct soc_device_attribute *soc_dev_attr;
+#ifdef CONFIG_ARM64
+
+static char __ro_after_init smccc_soc_id_name[136] = "";
+
+static inline void str_fragment_from_reg(char *dst, unsigned long reg)
+{
+ dst[0] = (reg >> 0) & 0xff;
+ dst[1] = (reg >> 8) & 0xff;
+ dst[2] = (reg >> 16) & 0xff;
+ dst[3] = (reg >> 24) & 0xff;
+ dst[4] = (reg >> 32) & 0xff;
+ dst[5] = (reg >> 40) & 0xff;
+ dst[6] = (reg >> 48) & 0xff;
+ dst[7] = (reg >> 56) & 0xff;
+}
+
+static char __init *smccc_soc_name_init(void)
+{
+ struct arm_smccc_1_2_regs args;
+ struct arm_smccc_1_2_regs res;
+ size_t len;
+
+ /*
+ * Issue Number 1.6 of the Arm SMC Calling Convention
+ * specification introduces an optional "name" string
+ * to the ARM_SMCCC_ARCH_SOC_ID function. Fetch it if
+ * available.
+ */
+ args.a0 = ARM_SMCCC_ARCH_SOC_ID;
+ args.a1 = 2; /* SOC_ID name */
+ arm_smccc_1_2_invoke(&args, &res);
+
+ if ((u32)res.a0 == 0) {
+ /*
+ * Copy res.a1..res.a17 to the smccc_soc_id_name string
+ * 8 bytes at a time. As per Issue 1.6 of the Arm SMC
+ * Calling Convention, the string will be NUL terminated
+ * and padded, from the end of the string to the end of the
+ * 136 byte buffer, with NULs.
+ */
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 0, res.a1);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 1, res.a2);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 2, res.a3);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 3, res.a4);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 4, res.a5);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 5, res.a6);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 6, res.a7);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 7, res.a8);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 8, res.a9);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 9, res.a10);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 10, res.a11);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 11, res.a12);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 12, res.a13);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 13, res.a14);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 14, res.a15);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 15, res.a16);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 16, res.a17);
+
+ len = strnlen(smccc_soc_id_name, sizeof(smccc_soc_id_name));
+ if (len) {
+ if (len == sizeof(smccc_soc_id_name))
+ pr_warn(FW_BUG "Ignoring improperly formatted name\n");
+ else
+ return smccc_soc_id_name;
+ }
+ }
+
+ return NULL;
+}
+
+#else
+
+static char __init *smccc_soc_name_init(void)
+{
+ return NULL;
+}
+
+#endif
+
static int __init smccc_soc_init(void)
{
int soc_id_rev, soc_id_version;
@@ -72,6 +151,7 @@ static int __init smccc_soc_init(void)
soc_dev_attr->soc_id = soc_id_str;
soc_dev_attr->revision = soc_id_rev_str;
soc_dev_attr->family = soc_id_jep106_id_str;
+ soc_dev_attr->machine = smccc_soc_name_init();
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
diff --git a/drivers/firmware/thead,th1520-aon.c b/drivers/firmware/thead,th1520-aon.c
new file mode 100644
index 000000000000..38f812ac9920
--- /dev/null
+++ b/drivers/firmware/thead,th1520-aon.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Alibaba Group Holding Limited.
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Michal Wilczynski <m.wilczynski@samsung.com>
+ */
+
+#include <linux/device.h>
+#include <linux/firmware/thead/thead,th1520-aon.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include <linux/slab.h>
+
+#define MAX_RX_TIMEOUT (msecs_to_jiffies(3000))
+#define MAX_TX_TIMEOUT 500
+
+struct th1520_aon_chan {
+ struct mbox_chan *ch;
+ struct th1520_aon_rpc_ack_common ack_msg;
+ struct mbox_client cl;
+ struct completion done;
+
+ /* make sure only one RPC is performed at a time */
+ struct mutex transaction_lock;
+};
+
+struct th1520_aon_msg_req_set_resource_power_mode {
+ struct th1520_aon_rpc_msg_hdr hdr;
+ u16 resource;
+ u16 mode;
+ u16 reserved[10];
+} __packed __aligned(1);
+
+/*
+ * This type is used to indicate error response for most functions.
+ */
+enum th1520_aon_error_codes {
+ LIGHT_AON_ERR_NONE = 0, /* Success */
+ LIGHT_AON_ERR_VERSION = 1, /* Incompatible API version */
+ LIGHT_AON_ERR_CONFIG = 2, /* Configuration error */
+ LIGHT_AON_ERR_PARM = 3, /* Bad parameter */
+ LIGHT_AON_ERR_NOACCESS = 4, /* Permission error (no access) */
+ LIGHT_AON_ERR_LOCKED = 5, /* Permission error (locked) */
+ LIGHT_AON_ERR_UNAVAILABLE = 6, /* Unavailable (out of resources) */
+ LIGHT_AON_ERR_NOTFOUND = 7, /* Not found */
+ LIGHT_AON_ERR_NOPOWER = 8, /* No power */
+ LIGHT_AON_ERR_IPC = 9, /* Generic IPC error */
+ LIGHT_AON_ERR_BUSY = 10, /* Resource is currently busy/active */
+ LIGHT_AON_ERR_FAIL = 11, /* General I/O failure */
+ LIGHT_AON_ERR_LAST
+};
+
+static int th1520_aon_linux_errmap[LIGHT_AON_ERR_LAST] = {
+ 0, /* LIGHT_AON_ERR_NONE */
+ -EINVAL, /* LIGHT_AON_ERR_VERSION */
+ -EINVAL, /* LIGHT_AON_ERR_CONFIG */
+ -EINVAL, /* LIGHT_AON_ERR_PARM */
+ -EACCES, /* LIGHT_AON_ERR_NOACCESS */
+ -EACCES, /* LIGHT_AON_ERR_LOCKED */
+ -ERANGE, /* LIGHT_AON_ERR_UNAVAILABLE */
+ -EEXIST, /* LIGHT_AON_ERR_NOTFOUND */
+ -EPERM, /* LIGHT_AON_ERR_NOPOWER */
+ -EPIPE, /* LIGHT_AON_ERR_IPC */
+ -EBUSY, /* LIGHT_AON_ERR_BUSY */
+ -EIO, /* LIGHT_AON_ERR_FAIL */
+};
+
+static inline int th1520_aon_to_linux_errno(int errno)
+{
+ if (errno >= LIGHT_AON_ERR_NONE && errno < LIGHT_AON_ERR_LAST)
+ return th1520_aon_linux_errmap[errno];
+
+ return -EIO;
+}
+
+static void th1520_aon_rx_callback(struct mbox_client *c, void *rx_msg)
+{
+ struct th1520_aon_chan *aon_chan =
+ container_of(c, struct th1520_aon_chan, cl);
+ struct th1520_aon_rpc_msg_hdr *hdr =
+ (struct th1520_aon_rpc_msg_hdr *)rx_msg;
+ u8 recv_size = sizeof(struct th1520_aon_rpc_msg_hdr) + hdr->size;
+
+ if (recv_size != sizeof(struct th1520_aon_rpc_ack_common)) {
+ dev_err(c->dev, "Invalid ack size, not completing\n");
+ return;
+ }
+
+ memcpy(&aon_chan->ack_msg, rx_msg, recv_size);
+ complete(&aon_chan->done);
+}
+
+/**
+ * th1520_aon_call_rpc() - Send an RPC request to the TH1520 AON subsystem
+ * @aon_chan: Pointer to the AON channel structure
+ * @msg: Pointer to the message (RPC payload) that will be sent
+ *
+ * This function sends an RPC message to the TH1520 AON subsystem via mailbox.
+ * It takes the provided @msg buffer, formats it with version and service flags,
+ * then blocks until the RPC completes or times out. The completion is signaled
+ * by the `aon_chan->done` completion, which is waited upon for a duration
+ * defined by `MAX_RX_TIMEOUT`.
+ *
+ * Return:
+ * * 0 on success
+ * * -ETIMEDOUT if the RPC call times out
+ * * A negative error code if the mailbox send fails or if AON responds with
+ * a non-zero error code (converted via th1520_aon_to_linux_errno()).
+ */
+int th1520_aon_call_rpc(struct th1520_aon_chan *aon_chan, void *msg)
+{
+ struct th1520_aon_rpc_msg_hdr *hdr = msg;
+ int ret;
+
+ mutex_lock(&aon_chan->transaction_lock);
+ reinit_completion(&aon_chan->done);
+
+ RPC_SET_VER(hdr, TH1520_AON_RPC_VERSION);
+ RPC_SET_SVC_ID(hdr, hdr->svc);
+ RPC_SET_SVC_FLAG_MSG_TYPE(hdr, RPC_SVC_MSG_TYPE_DATA);
+ RPC_SET_SVC_FLAG_ACK_TYPE(hdr, RPC_SVC_MSG_NEED_ACK);
+
+ ret = mbox_send_message(aon_chan->ch, msg);
+ if (ret < 0) {
+ dev_err(aon_chan->cl.dev, "RPC send msg failed: %d\n", ret);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&aon_chan->done, MAX_RX_TIMEOUT)) {
+ dev_err(aon_chan->cl.dev, "RPC send msg timeout\n");
+ mutex_unlock(&aon_chan->transaction_lock);
+ return -ETIMEDOUT;
+ }
+
+ ret = aon_chan->ack_msg.err_code;
+
+out:
+ mutex_unlock(&aon_chan->transaction_lock);
+
+ return th1520_aon_to_linux_errno(ret);
+}
+EXPORT_SYMBOL_GPL(th1520_aon_call_rpc);
+
+/**
+ * th1520_aon_power_update() - Change power state of a resource via TH1520 AON
+ * @aon_chan: Pointer to the AON channel structure
+ * @rsrc: Resource ID whose power state needs to be updated
+ * @power_on: Boolean indicating whether the resource should be powered on (true)
+ * or powered off (false)
+ *
+ * This function requests the TH1520 AON subsystem to set the power mode of the
+ * given resource (@rsrc) to either on or off. It constructs the message in
+ * `struct th1520_aon_msg_req_set_resource_power_mode` and then invokes
+ * th1520_aon_call_rpc() to make the request. If the AON call fails, an error
+ * message is logged along with the specific return code.
+ *
+ * Return:
+ * * 0 on success
+ * * A negative error code in case of failures (propagated from
+ * th1520_aon_call_rpc()).
+ */
+int th1520_aon_power_update(struct th1520_aon_chan *aon_chan, u16 rsrc,
+ bool power_on)
+{
+ struct th1520_aon_msg_req_set_resource_power_mode msg = {};
+ struct th1520_aon_rpc_msg_hdr *hdr = &msg.hdr;
+ int ret;
+
+ hdr->svc = TH1520_AON_RPC_SVC_PM;
+ hdr->func = TH1520_AON_PM_FUNC_SET_RESOURCE_POWER_MODE;
+ hdr->size = TH1520_AON_RPC_MSG_NUM;
+
+ RPC_SET_BE16(&msg.resource, 0, rsrc);
+ RPC_SET_BE16(&msg.resource, 2,
+ (power_on ? TH1520_AON_PM_PW_MODE_ON :
+ TH1520_AON_PM_PW_MODE_OFF));
+
+ ret = th1520_aon_call_rpc(aon_chan, &msg);
+ if (ret)
+ dev_err(aon_chan->cl.dev, "failed to power %s resource %d ret %d\n",
+ power_on ? "up" : "off", rsrc, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(th1520_aon_power_update);
+
+/**
+ * th1520_aon_init() - Initialize TH1520 AON firmware protocol interface
+ * @dev: Device pointer for the AON subsystem
+ *
+ * This function initializes the TH1520 AON firmware protocol interface by:
+ * - Allocating and initializing the AON channel structure
+ * - Setting up the mailbox client
+ * - Requesting the AON mailbox channel
+ * - Initializing synchronization primitives
+ *
+ * Return:
+ * * Valid pointer to th1520_aon_chan structure on success
+ * * ERR_PTR(-ENOMEM) if memory allocation fails
+ * * ERR_PTR() with other negative error codes from mailbox operations
+ */
+struct th1520_aon_chan *th1520_aon_init(struct device *dev)
+{
+ struct th1520_aon_chan *aon_chan;
+ struct mbox_client *cl;
+ int ret;
+
+ aon_chan = kzalloc(sizeof(*aon_chan), GFP_KERNEL);
+ if (!aon_chan)
+ return ERR_PTR(-ENOMEM);
+
+ cl = &aon_chan->cl;
+ cl->dev = dev;
+ cl->tx_block = true;
+ cl->tx_tout = MAX_TX_TIMEOUT;
+ cl->rx_callback = th1520_aon_rx_callback;
+
+ aon_chan->ch = mbox_request_channel_byname(cl, "aon");
+ if (IS_ERR(aon_chan->ch)) {
+ dev_err(dev, "Failed to request aon mbox chan\n");
+ ret = PTR_ERR(aon_chan->ch);
+ kfree(aon_chan);
+ return ERR_PTR(ret);
+ }
+
+ mutex_init(&aon_chan->transaction_lock);
+ init_completion(&aon_chan->done);
+
+ return aon_chan;
+}
+EXPORT_SYMBOL_GPL(th1520_aon_init);
+
+/**
+ * th1520_aon_deinit() - Clean up TH1520 AON firmware protocol interface
+ * @aon_chan: Pointer to the AON channel structure to clean up
+ *
+ * This function cleans up resources allocated by th1520_aon_init():
+ * - Frees the mailbox channel
+ * - Frees the AON channel
+ */
+void th1520_aon_deinit(struct th1520_aon_chan *aon_chan)
+{
+ mbox_free_channel(aon_chan->ch);
+ kfree(aon_chan);
+}
+EXPORT_SYMBOL_GPL(th1520_aon_deinit);
+
+MODULE_AUTHOR("Michal Wilczynski <m.wilczynski@samsung.com>");
+MODULE_DESCRIPTION("T-HEAD TH1520 Always-On firmware protocol library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 720fa8b5d8e9..7356e860e65c 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -1139,17 +1139,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
int zynqmp_pm_fpga_get_config_status(u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
- u32 buf, lower_addr, upper_addr;
int ret;
if (!value)
return -EINVAL;
- lower_addr = lower_32_bits((u64)&buf);
- upper_addr = upper_32_bits((u64)&buf);
-
ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, ret_payload, 4,
- XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, lower_addr, upper_addr,
+ XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, 0, 0,
XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG);
*value = ret_payload[1];
diff --git a/drivers/fwctl/Kconfig b/drivers/fwctl/Kconfig
new file mode 100644
index 000000000000..b5583b12a011
--- /dev/null
+++ b/drivers/fwctl/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig FWCTL
+ tristate "fwctl device firmware access framework"
+ help
+ fwctl provides a userspace API for restricted access to communicate
+ with on-device firmware. The communication channel is intended to
+ support a wide range of lockdown compatible device behaviors including
+ manipulating device FLASH, debugging, and other activities that don't
+ fit neatly into an existing subsystem.
+
+if FWCTL
+config FWCTL_MLX5
+ tristate "mlx5 ConnectX control fwctl driver"
+ depends on MLX5_CORE
+ help
+ MLX5 provides interface for the user process to access the debug and
+ configuration registers of the ConnectX hardware family
+ (NICs, PCI switches and SmartNIC SoCs).
+ This will allow configuration and debug tools to work out of the box on
+ mainstream kernel.
+
+ If you don't know what to do here, say N.
+
+config FWCTL_PDS
+ tristate "AMD/Pensando pds fwctl driver"
+ depends on PDS_CORE
+ help
+ The pds_fwctl driver provides an fwctl interface for a user process
+ to access the debug and configuration information of the AMD/Pensando
+ DSC hardware family.
+
+ If you don't know what to do here, say N.
+endif
diff --git a/drivers/fwctl/Makefile b/drivers/fwctl/Makefile
new file mode 100644
index 000000000000..c093b5f661d6
--- /dev/null
+++ b/drivers/fwctl/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_FWCTL) += fwctl.o
+obj-$(CONFIG_FWCTL_MLX5) += mlx5/
+obj-$(CONFIG_FWCTL_PDS) += pds/
+
+fwctl-y += main.o
diff --git a/drivers/fwctl/main.c b/drivers/fwctl/main.c
new file mode 100644
index 000000000000..cb1ac9c40239
--- /dev/null
+++ b/drivers/fwctl/main.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#define pr_fmt(fmt) "fwctl: " fmt
+#include <linux/fwctl.h>
+
+#include <linux/container_of.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include <uapi/fwctl/fwctl.h>
+
+enum {
+ FWCTL_MAX_DEVICES = 4096,
+ MAX_RPC_LEN = SZ_2M,
+};
+static_assert(FWCTL_MAX_DEVICES < (1U << MINORBITS));
+
+static dev_t fwctl_dev;
+static DEFINE_IDA(fwctl_ida);
+static unsigned long fwctl_tainted;
+
+struct fwctl_ucmd {
+ struct fwctl_uctx *uctx;
+ void __user *ubuffer;
+ void *cmd;
+ u32 user_size;
+};
+
+static int ucmd_respond(struct fwctl_ucmd *ucmd, size_t cmd_len)
+{
+ if (copy_to_user(ucmd->ubuffer, ucmd->cmd,
+ min_t(size_t, ucmd->user_size, cmd_len)))
+ return -EFAULT;
+ return 0;
+}
+
+static int copy_to_user_zero_pad(void __user *to, const void *from,
+ size_t from_len, size_t user_len)
+{
+ size_t copy_len;
+
+ copy_len = min(from_len, user_len);
+ if (copy_to_user(to, from, copy_len))
+ return -EFAULT;
+ if (copy_len < user_len) {
+ if (clear_user(to + copy_len, user_len - copy_len))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int fwctl_cmd_info(struct fwctl_ucmd *ucmd)
+{
+ struct fwctl_device *fwctl = ucmd->uctx->fwctl;
+ struct fwctl_info *cmd = ucmd->cmd;
+ size_t driver_info_len = 0;
+
+ if (cmd->flags)
+ return -EOPNOTSUPP;
+
+ if (!fwctl->ops->info && cmd->device_data_len) {
+ if (clear_user(u64_to_user_ptr(cmd->out_device_data),
+ cmd->device_data_len))
+ return -EFAULT;
+ } else if (cmd->device_data_len) {
+ void *driver_info __free(kfree) =
+ fwctl->ops->info(ucmd->uctx, &driver_info_len);
+ if (IS_ERR(driver_info))
+ return PTR_ERR(driver_info);
+
+ if (copy_to_user_zero_pad(u64_to_user_ptr(cmd->out_device_data),
+ driver_info, driver_info_len,
+ cmd->device_data_len))
+ return -EFAULT;
+ }
+
+ cmd->out_device_type = fwctl->ops->device_type;
+ cmd->device_data_len = driver_info_len;
+ return ucmd_respond(ucmd, sizeof(*cmd));
+}
+
+static int fwctl_cmd_rpc(struct fwctl_ucmd *ucmd)
+{
+ struct fwctl_device *fwctl = ucmd->uctx->fwctl;
+ struct fwctl_rpc *cmd = ucmd->cmd;
+ size_t out_len;
+
+ if (cmd->in_len > MAX_RPC_LEN || cmd->out_len > MAX_RPC_LEN)
+ return -EMSGSIZE;
+
+ switch (cmd->scope) {
+ case FWCTL_RPC_CONFIGURATION:
+ case FWCTL_RPC_DEBUG_READ_ONLY:
+ break;
+
+ case FWCTL_RPC_DEBUG_WRITE_FULL:
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ fallthrough;
+ case FWCTL_RPC_DEBUG_WRITE:
+ if (!test_and_set_bit(0, &fwctl_tainted)) {
+ dev_warn(
+ &fwctl->dev,
+ "%s(%d): has requested full access to the physical device device",
+ current->comm, task_pid_nr(current));
+ add_taint(TAINT_FWCTL, LOCKDEP_STILL_OK);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ void *inbuf __free(kvfree) = kvzalloc(cmd->in_len, GFP_KERNEL_ACCOUNT);
+ if (!inbuf)
+ return -ENOMEM;
+ if (copy_from_user(inbuf, u64_to_user_ptr(cmd->in), cmd->in_len))
+ return -EFAULT;
+
+ out_len = cmd->out_len;
+ void *outbuf __free(kvfree) = fwctl->ops->fw_rpc(
+ ucmd->uctx, cmd->scope, inbuf, cmd->in_len, &out_len);
+ if (IS_ERR(outbuf))
+ return PTR_ERR(outbuf);
+ if (outbuf == inbuf) {
+ /* The driver can re-use inbuf as outbuf */
+ inbuf = NULL;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(cmd->out), outbuf,
+ min(cmd->out_len, out_len)))
+ return -EFAULT;
+
+ cmd->out_len = out_len;
+ return ucmd_respond(ucmd, sizeof(*cmd));
+}
+
+/* On stack memory for the ioctl structs */
+union fwctl_ucmd_buffer {
+ struct fwctl_info info;
+ struct fwctl_rpc rpc;
+};
+
+struct fwctl_ioctl_op {
+ unsigned int size;
+ unsigned int min_size;
+ unsigned int ioctl_num;
+ int (*execute)(struct fwctl_ucmd *ucmd);
+};
+
+#define IOCTL_OP(_ioctl, _fn, _struct, _last) \
+ [_IOC_NR(_ioctl) - FWCTL_CMD_BASE] = { \
+ .size = sizeof(_struct) + \
+ BUILD_BUG_ON_ZERO(sizeof(union fwctl_ucmd_buffer) < \
+ sizeof(_struct)), \
+ .min_size = offsetofend(_struct, _last), \
+ .ioctl_num = _ioctl, \
+ .execute = _fn, \
+ }
+static const struct fwctl_ioctl_op fwctl_ioctl_ops[] = {
+ IOCTL_OP(FWCTL_INFO, fwctl_cmd_info, struct fwctl_info, out_device_data),
+ IOCTL_OP(FWCTL_RPC, fwctl_cmd_rpc, struct fwctl_rpc, out),
+};
+
+static long fwctl_fops_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct fwctl_uctx *uctx = filp->private_data;
+ const struct fwctl_ioctl_op *op;
+ struct fwctl_ucmd ucmd = {};
+ union fwctl_ucmd_buffer buf;
+ unsigned int nr;
+ int ret;
+
+ nr = _IOC_NR(cmd);
+ if ((nr - FWCTL_CMD_BASE) >= ARRAY_SIZE(fwctl_ioctl_ops))
+ return -ENOIOCTLCMD;
+
+ op = &fwctl_ioctl_ops[nr - FWCTL_CMD_BASE];
+ if (op->ioctl_num != cmd)
+ return -ENOIOCTLCMD;
+
+ ucmd.uctx = uctx;
+ ucmd.cmd = &buf;
+ ucmd.ubuffer = (void __user *)arg;
+ ret = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer);
+ if (ret)
+ return ret;
+
+ if (ucmd.user_size < op->min_size)
+ return -EINVAL;
+
+ ret = copy_struct_from_user(ucmd.cmd, op->size, ucmd.ubuffer,
+ ucmd.user_size);
+ if (ret)
+ return ret;
+
+ guard(rwsem_read)(&uctx->fwctl->registration_lock);
+ if (!uctx->fwctl->ops)
+ return -ENODEV;
+ return op->execute(&ucmd);
+}
+
+static int fwctl_fops_open(struct inode *inode, struct file *filp)
+{
+ struct fwctl_device *fwctl =
+ container_of(inode->i_cdev, struct fwctl_device, cdev);
+ int ret;
+
+ guard(rwsem_read)(&fwctl->registration_lock);
+ if (!fwctl->ops)
+ return -ENODEV;
+
+ struct fwctl_uctx *uctx __free(kfree) =
+ kzalloc(fwctl->ops->uctx_size, GFP_KERNEL_ACCOUNT);
+ if (!uctx)
+ return -ENOMEM;
+
+ uctx->fwctl = fwctl;
+ ret = fwctl->ops->open_uctx(uctx);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &fwctl->uctx_list_lock) {
+ list_add_tail(&uctx->uctx_list_entry, &fwctl->uctx_list);
+ }
+
+ get_device(&fwctl->dev);
+ filp->private_data = no_free_ptr(uctx);
+ return 0;
+}
+
+static void fwctl_destroy_uctx(struct fwctl_uctx *uctx)
+{
+ lockdep_assert_held(&uctx->fwctl->uctx_list_lock);
+ list_del(&uctx->uctx_list_entry);
+ uctx->fwctl->ops->close_uctx(uctx);
+}
+
+static int fwctl_fops_release(struct inode *inode, struct file *filp)
+{
+ struct fwctl_uctx *uctx = filp->private_data;
+ struct fwctl_device *fwctl = uctx->fwctl;
+
+ scoped_guard(rwsem_read, &fwctl->registration_lock) {
+ /*
+ * NULL ops means fwctl_unregister() has already removed the
+ * driver and destroyed the uctx.
+ */
+ if (fwctl->ops) {
+ guard(mutex)(&fwctl->uctx_list_lock);
+ fwctl_destroy_uctx(uctx);
+ }
+ }
+
+ kfree(uctx);
+ fwctl_put(fwctl);
+ return 0;
+}
+
+static const struct file_operations fwctl_fops = {
+ .owner = THIS_MODULE,
+ .open = fwctl_fops_open,
+ .release = fwctl_fops_release,
+ .unlocked_ioctl = fwctl_fops_ioctl,
+};
+
+static void fwctl_device_release(struct device *device)
+{
+ struct fwctl_device *fwctl =
+ container_of(device, struct fwctl_device, dev);
+
+ ida_free(&fwctl_ida, fwctl->dev.devt - fwctl_dev);
+ mutex_destroy(&fwctl->uctx_list_lock);
+ kfree(fwctl);
+}
+
+static char *fwctl_devnode(const struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "fwctl/%s", dev_name(dev));
+}
+
+static struct class fwctl_class = {
+ .name = "fwctl",
+ .dev_release = fwctl_device_release,
+ .devnode = fwctl_devnode,
+};
+
+static struct fwctl_device *
+_alloc_device(struct device *parent, const struct fwctl_ops *ops, size_t size)
+{
+ struct fwctl_device *fwctl __free(kfree) = kzalloc(size, GFP_KERNEL);
+ int devnum;
+
+ if (!fwctl)
+ return NULL;
+
+ devnum = ida_alloc_max(&fwctl_ida, FWCTL_MAX_DEVICES - 1, GFP_KERNEL);
+ if (devnum < 0)
+ return NULL;
+
+ fwctl->dev.devt = fwctl_dev + devnum;
+ fwctl->dev.class = &fwctl_class;
+ fwctl->dev.parent = parent;
+
+ init_rwsem(&fwctl->registration_lock);
+ mutex_init(&fwctl->uctx_list_lock);
+ INIT_LIST_HEAD(&fwctl->uctx_list);
+
+ device_initialize(&fwctl->dev);
+ return_ptr(fwctl);
+}
+
+/* Drivers use the fwctl_alloc_device() wrapper */
+struct fwctl_device *_fwctl_alloc_device(struct device *parent,
+ const struct fwctl_ops *ops,
+ size_t size)
+{
+ struct fwctl_device *fwctl __free(fwctl) =
+ _alloc_device(parent, ops, size);
+
+ if (!fwctl)
+ return NULL;
+
+ cdev_init(&fwctl->cdev, &fwctl_fops);
+ /*
+ * The driver module is protected by fwctl_register/unregister(),
+ * unregister won't complete until we are done with the driver's module.
+ */
+ fwctl->cdev.owner = THIS_MODULE;
+
+ if (dev_set_name(&fwctl->dev, "fwctl%d", fwctl->dev.devt - fwctl_dev))
+ return NULL;
+
+ fwctl->ops = ops;
+ return_ptr(fwctl);
+}
+EXPORT_SYMBOL_NS_GPL(_fwctl_alloc_device, "FWCTL");
+
+/**
+ * fwctl_register - Register a new device to the subsystem
+ * @fwctl: Previously allocated fwctl_device
+ *
+ * On return the device is visible through sysfs and /dev, driver ops may be
+ * called.
+ */
+int fwctl_register(struct fwctl_device *fwctl)
+{
+ return cdev_device_add(&fwctl->cdev, &fwctl->dev);
+}
+EXPORT_SYMBOL_NS_GPL(fwctl_register, "FWCTL");
+
+/**
+ * fwctl_unregister - Unregister a device from the subsystem
+ * @fwctl: Previously allocated and registered fwctl_device
+ *
+ * Undoes fwctl_register(). On return no driver ops will be called. The
+ * caller must still call fwctl_put() to free the fwctl.
+ *
+ * Unregister will return even if userspace still has file descriptors open.
+ * This will call ops->close_uctx() on any open FDs and after return no driver
+ * op will be called. The FDs remain open but all fops will return -ENODEV.
+ *
+ * The design of fwctl allows this sort of disassociation of the driver from the
+ * subsystem primarily by keeping memory allocations owned by the core subsytem.
+ * The fwctl_device and fwctl_uctx can both be freed without requiring a driver
+ * callback. This allows the module to remain unlocked while FDs are open.
+ */
+void fwctl_unregister(struct fwctl_device *fwctl)
+{
+ struct fwctl_uctx *uctx;
+
+ cdev_device_del(&fwctl->cdev, &fwctl->dev);
+
+ /* Disable and free the driver's resources for any still open FDs. */
+ guard(rwsem_write)(&fwctl->registration_lock);
+ guard(mutex)(&fwctl->uctx_list_lock);
+ while ((uctx = list_first_entry_or_null(&fwctl->uctx_list,
+ struct fwctl_uctx,
+ uctx_list_entry)))
+ fwctl_destroy_uctx(uctx);
+
+ /*
+ * The driver module may unload after this returns, the op pointer will
+ * not be valid.
+ */
+ fwctl->ops = NULL;
+}
+EXPORT_SYMBOL_NS_GPL(fwctl_unregister, "FWCTL");
+
+static int __init fwctl_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&fwctl_dev, 0, FWCTL_MAX_DEVICES, "fwctl");
+ if (ret)
+ return ret;
+
+ ret = class_register(&fwctl_class);
+ if (ret)
+ goto err_chrdev;
+ return 0;
+
+err_chrdev:
+ unregister_chrdev_region(fwctl_dev, FWCTL_MAX_DEVICES);
+ return ret;
+}
+
+static void __exit fwctl_exit(void)
+{
+ class_unregister(&fwctl_class);
+ unregister_chrdev_region(fwctl_dev, FWCTL_MAX_DEVICES);
+}
+
+module_init(fwctl_init);
+module_exit(fwctl_exit);
+MODULE_DESCRIPTION("fwctl device firmware access framework");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fwctl/mlx5/Makefile b/drivers/fwctl/mlx5/Makefile
new file mode 100644
index 000000000000..139a23e3c7c5
--- /dev/null
+++ b/drivers/fwctl/mlx5/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_FWCTL_MLX5) += mlx5_fwctl.o
+
+mlx5_fwctl-y += main.o
diff --git a/drivers/fwctl/mlx5/main.c b/drivers/fwctl/mlx5/main.c
new file mode 100644
index 000000000000..f93aa0cecdb9
--- /dev/null
+++ b/drivers/fwctl/mlx5/main.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#include <linux/fwctl.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/driver.h>
+#include <uapi/fwctl/mlx5.h>
+
+#define mlx5ctl_err(mcdev, format, ...) \
+ dev_err(&mcdev->fwctl.dev, format, ##__VA_ARGS__)
+
+#define mlx5ctl_dbg(mcdev, format, ...) \
+ dev_dbg(&mcdev->fwctl.dev, "PID %u: " format, current->pid, \
+ ##__VA_ARGS__)
+
+struct mlx5ctl_uctx {
+ struct fwctl_uctx uctx;
+ u32 uctx_caps;
+ u32 uctx_uid;
+};
+
+struct mlx5ctl_dev {
+ struct fwctl_device fwctl;
+ struct mlx5_core_dev *mdev;
+};
+DEFINE_FREE(mlx5ctl, struct mlx5ctl_dev *, if (_T) fwctl_put(&_T->fwctl));
+
+struct mlx5_ifc_mbox_in_hdr_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_mbox_out_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES = 0x4,
+};
+
+enum {
+ MLX5_CMD_OP_QUERY_DRIVER_VERSION = 0x10c,
+ MLX5_CMD_OP_QUERY_OTHER_HCA_CAP = 0x10e,
+ MLX5_CMD_OP_QUERY_RDB = 0x512,
+ MLX5_CMD_OP_QUERY_PSV = 0x602,
+ MLX5_CMD_OP_QUERY_DC_CNAK_TRACE = 0x716,
+ MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER = 0x722,
+ MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT = 0x728,
+ MLX5_CMD_OP_QUERY_BURST_SIZE = 0x813,
+ MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS = 0x819,
+ MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS = 0x820,
+ MLX5_CMD_OP_QUERY_DIAGNOSTIC_COUNTERS = 0x821,
+ MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS = 0x911,
+ MLX5_CMD_OP_QUERY_AFU = 0x971,
+ MLX5_CMD_OP_QUERY_CAPI_PEC = 0x981,
+ MLX5_CMD_OP_QUERY_UCTX = 0xa05,
+ MLX5_CMD_OP_QUERY_UMEM = 0xa09,
+ MLX5_CMD_OP_QUERY_NVMF_CC_RESPONSE = 0xb02,
+ MLX5_CMD_OP_QUERY_EMULATED_FUNCTIONS_INFO = 0xb03,
+ MLX5_CMD_OP_QUERY_REGEXP_PARAMS = 0xb05,
+ MLX5_CMD_OP_QUERY_REGEXP_REGISTER = 0xb07,
+ MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY = 0xb08,
+ MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS = 0xb0a,
+ MLX5_CMD_OP_ACCESS_REGISTER_USER = 0xb0c,
+ MLX5_CMD_OP_QUERY_EMULATION_DEVICE_EQ_MSIX_MAPPING = 0xb0f,
+ MLX5_CMD_OP_QUERY_MATCH_SAMPLE_INFO = 0xb13,
+ MLX5_CMD_OP_QUERY_CRYPTO_STATE = 0xb14,
+ MLX5_CMD_OP_QUERY_VUID = 0xb22,
+ MLX5_CMD_OP_QUERY_DPA_PARTITION = 0xb28,
+ MLX5_CMD_OP_QUERY_DPA_PARTITIONS = 0xb2a,
+ MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT = 0xb2e,
+ MLX5_CMD_OP_QUERY_EMULATED_RESOURCES_INFO = 0xb2f,
+ MLX5_CMD_OP_QUERY_RSV_RESOURCES = 0x8000,
+ MLX5_CMD_OP_QUERY_MTT = 0x8001,
+ MLX5_CMD_OP_QUERY_SCHED_QUEUE = 0x8006,
+};
+
+static int mlx5ctl_alloc_uid(struct mlx5ctl_dev *mcdev, u32 cap)
+{
+ u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
+ void *uctx;
+ int ret;
+ u16 uid;
+
+ uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
+
+ mlx5ctl_dbg(mcdev, "%s: caps 0x%x\n", __func__, cap);
+ MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
+ MLX5_SET(uctx, uctx, cap, cap);
+
+ ret = mlx5_cmd_exec(mcdev->mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ uid = MLX5_GET(create_uctx_out, out, uid);
+ mlx5ctl_dbg(mcdev, "allocated uid %u with caps 0x%x\n", uid, cap);
+ return uid;
+}
+
+static void mlx5ctl_release_uid(struct mlx5ctl_dev *mcdev, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
+ struct mlx5_core_dev *mdev = mcdev->mdev;
+ int ret;
+
+ MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
+ MLX5_SET(destroy_uctx_in, in, uid, uid);
+
+ ret = mlx5_cmd_exec_in(mdev, destroy_uctx, in);
+ mlx5ctl_dbg(mcdev, "released uid %u %pe\n", uid, ERR_PTR(ret));
+}
+
+static int mlx5ctl_open_uctx(struct fwctl_uctx *uctx)
+{
+ struct mlx5ctl_uctx *mfd =
+ container_of(uctx, struct mlx5ctl_uctx, uctx);
+ struct mlx5ctl_dev *mcdev =
+ container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl);
+ int uid;
+
+ /*
+ * New FW supports the TOOLS_RESOURCES uid security label
+ * which allows commands to manipulate the global device state.
+ * Otherwise only basic existing RDMA devx privilege are allowed.
+ */
+ if (MLX5_CAP_GEN(mcdev->mdev, uctx_cap) &
+ MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES)
+ mfd->uctx_caps |= MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES;
+
+ uid = mlx5ctl_alloc_uid(mcdev, mfd->uctx_caps);
+ if (uid < 0)
+ return uid;
+
+ mfd->uctx_uid = uid;
+ return 0;
+}
+
+static void mlx5ctl_close_uctx(struct fwctl_uctx *uctx)
+{
+ struct mlx5ctl_dev *mcdev =
+ container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl);
+ struct mlx5ctl_uctx *mfd =
+ container_of(uctx, struct mlx5ctl_uctx, uctx);
+
+ mlx5ctl_release_uid(mcdev, mfd->uctx_uid);
+}
+
+static void *mlx5ctl_info(struct fwctl_uctx *uctx, size_t *length)
+{
+ struct mlx5ctl_uctx *mfd =
+ container_of(uctx, struct mlx5ctl_uctx, uctx);
+ struct fwctl_info_mlx5 *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ info->uid = mfd->uctx_uid;
+ info->uctx_caps = mfd->uctx_caps;
+ *length = sizeof(*info);
+ return info;
+}
+
+static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
+{
+ u16 opcode = MLX5_GET(mbox_in_hdr, in, opcode);
+ u16 op_mod = MLX5_GET(mbox_in_hdr, in, op_mod);
+
+ /*
+ * Currently the driver can't keep track of commands that allocate
+ * objects in the FW, these commands are safe from a security
+ * perspective but nothing will free the memory when the FD is closed.
+ * For now permit only query commands and set commands that don't alter
+ * objects. Also the caps for the scope have not been defined yet,
+ * filter commands manually for now.
+ */
+ switch (opcode) {
+ case MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT:
+ case MLX5_CMD_OP_QUERY_ADAPTER:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+ case MLX5_CMD_OPCODE_QUERY_VUID:
+ /*
+ * FW limits SET_HCA_CAP on the tools UID to only the other function
+ * mode which is used for function pre-configuration
+ */
+ case MLX5_CMD_OP_SET_HCA_CAP:
+ return true; /* scope >= FWCTL_RPC_CONFIGURATION; */
+
+ case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
+ case MLX5_CMD_OP_FPGA_QUERY_QP:
+ case MLX5_CMD_OP_NOP:
+ case MLX5_CMD_OP_QUERY_AFU:
+ case MLX5_CMD_OP_QUERY_BURST_SIZE:
+ case MLX5_CMD_OP_QUERY_CAPI_PEC:
+ case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+ case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ case MLX5_CMD_OP_QUERY_CONG_STATUS:
+ case MLX5_CMD_OP_QUERY_CQ:
+ case MLX5_CMD_OP_QUERY_CRYPTO_STATE:
+ case MLX5_CMD_OP_QUERY_DC_CNAK_TRACE:
+ case MLX5_CMD_OP_QUERY_DCT:
+ case MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS:
+ case MLX5_CMD_OP_QUERY_DIAGNOSTIC_COUNTERS:
+ case MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS:
+ case MLX5_CMD_OP_QUERY_DPA_PARTITION:
+ case MLX5_CMD_OP_QUERY_DPA_PARTITIONS:
+ case MLX5_CMD_OP_QUERY_DRIVER_VERSION:
+ case MLX5_CMD_OP_QUERY_EMULATED_FUNCTIONS_INFO:
+ case MLX5_CMD_OP_QUERY_EMULATED_RESOURCES_INFO:
+ case MLX5_CMD_OP_QUERY_EMULATION_DEVICE_EQ_MSIX_MAPPING:
+ case MLX5_CMD_OP_QUERY_EQ:
+ case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
+ case MLX5_CMD_OP_QUERY_ISSI:
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_LAG:
+ case MLX5_CMD_OP_QUERY_MAD_DEMUX:
+ case MLX5_CMD_OP_QUERY_MATCH_SAMPLE_INFO:
+ case MLX5_CMD_OP_QUERY_MKEY:
+ case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_QUERY_MTT:
+ case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER:
+ case MLX5_CMD_OP_QUERY_NVMF_CC_RESPONSE:
+ case MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT:
+ case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_PAGES:
+ case MLX5_CMD_OP_QUERY_PSV:
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_QUERY_QP:
+ case MLX5_CMD_OP_QUERY_RATE_LIMIT:
+ case MLX5_CMD_OP_QUERY_RDB:
+ case MLX5_CMD_OP_QUERY_REGEXP_PARAMS:
+ case MLX5_CMD_OP_QUERY_REGEXP_REGISTER:
+ case MLX5_CMD_OP_QUERY_RMP:
+ case MLX5_CMD_OP_QUERY_RQ:
+ case MLX5_CMD_OP_QUERY_RQT:
+ case MLX5_CMD_OP_QUERY_RSV_RESOURCES:
+ case MLX5_CMD_OP_QUERY_SCHED_QUEUE:
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_QUERY_SF_PARTITION:
+ case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
+ case MLX5_CMD_OP_QUERY_SQ:
+ case MLX5_CMD_OP_QUERY_SRQ:
+ case MLX5_CMD_OP_QUERY_TIR:
+ case MLX5_CMD_OP_QUERY_TIS:
+ case MLX5_CMD_OP_QUERY_UCTX:
+ case MLX5_CMD_OP_QUERY_UMEM:
+ case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
+ case MLX5_CMD_OP_QUERY_VHCA_STATE:
+ case MLX5_CMD_OP_QUERY_VNIC_ENV:
+ case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+ case MLX5_CMD_OP_QUERY_VPORT_STATE:
+ case MLX5_CMD_OP_QUERY_WOL_ROL:
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
+ case MLX5_CMD_OP_QUERY_XRQ:
+ case MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS:
+ return scope >= FWCTL_RPC_DEBUG_READ_ONLY;
+
+ case MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS:
+ return scope >= FWCTL_RPC_DEBUG_WRITE;
+
+ case MLX5_CMD_OP_ACCESS_REG:
+ case MLX5_CMD_OP_ACCESS_REGISTER_USER:
+ if (op_mod == 0) /* write */
+ return true; /* scope >= FWCTL_RPC_CONFIGURATION; */
+ return scope >= FWCTL_RPC_DEBUG_READ_ONLY;
+ default:
+ return false;
+ }
+}
+
+static void *mlx5ctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
+ void *rpc_in, size_t in_len, size_t *out_len)
+{
+ struct mlx5ctl_dev *mcdev =
+ container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl);
+ struct mlx5ctl_uctx *mfd =
+ container_of(uctx, struct mlx5ctl_uctx, uctx);
+ void *rpc_out;
+ int ret;
+
+ if (in_len < MLX5_ST_SZ_BYTES(mbox_in_hdr) ||
+ *out_len < MLX5_ST_SZ_BYTES(mbox_out_hdr))
+ return ERR_PTR(-EMSGSIZE);
+
+ mlx5ctl_dbg(mcdev, "[UID %d] cmdif: opcode 0x%x inlen %zu outlen %zu\n",
+ mfd->uctx_uid, MLX5_GET(mbox_in_hdr, rpc_in, opcode),
+ in_len, *out_len);
+
+ if (!mlx5ctl_validate_rpc(rpc_in, scope))
+ return ERR_PTR(-EBADMSG);
+
+ /*
+ * mlx5_cmd_do() copies the input message to its own buffer before
+ * executing it, so we can reuse the allocation for the output.
+ */
+ if (*out_len <= in_len) {
+ rpc_out = rpc_in;
+ } else {
+ rpc_out = kvzalloc(*out_len, GFP_KERNEL);
+ if (!rpc_out)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Enforce the user context for the command */
+ MLX5_SET(mbox_in_hdr, rpc_in, uid, mfd->uctx_uid);
+ ret = mlx5_cmd_do(mcdev->mdev, rpc_in, in_len, rpc_out, *out_len);
+
+ mlx5ctl_dbg(mcdev,
+ "[UID %d] cmdif: opcode 0x%x status 0x%x retval %pe\n",
+ mfd->uctx_uid, MLX5_GET(mbox_in_hdr, rpc_in, opcode),
+ MLX5_GET(mbox_out_hdr, rpc_out, status), ERR_PTR(ret));
+
+ /*
+ * -EREMOTEIO means execution succeeded and the out is valid,
+ * but an error code was returned inside out. Everything else
+ * means the RPC did not make it to the device.
+ */
+ if (ret && ret != -EREMOTEIO) {
+ if (rpc_out != rpc_in)
+ kfree(rpc_out);
+ return ERR_PTR(ret);
+ }
+ return rpc_out;
+}
+
+static const struct fwctl_ops mlx5ctl_ops = {
+ .device_type = FWCTL_DEVICE_TYPE_MLX5,
+ .uctx_size = sizeof(struct mlx5ctl_uctx),
+ .open_uctx = mlx5ctl_open_uctx,
+ .close_uctx = mlx5ctl_close_uctx,
+ .info = mlx5ctl_info,
+ .fw_rpc = mlx5ctl_fw_rpc,
+};
+
+static int mlx5ctl_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+
+{
+ struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = madev->mdev;
+ struct mlx5ctl_dev *mcdev __free(mlx5ctl) = fwctl_alloc_device(
+ &mdev->pdev->dev, &mlx5ctl_ops, struct mlx5ctl_dev, fwctl);
+ int ret;
+
+ if (!mcdev)
+ return -ENOMEM;
+
+ mcdev->mdev = mdev;
+
+ ret = fwctl_register(&mcdev->fwctl);
+ if (ret)
+ return ret;
+ auxiliary_set_drvdata(adev, no_free_ptr(mcdev));
+ return 0;
+}
+
+static void mlx5ctl_remove(struct auxiliary_device *adev)
+{
+ struct mlx5ctl_dev *mcdev = auxiliary_get_drvdata(adev);
+
+ fwctl_unregister(&mcdev->fwctl);
+ fwctl_put(&mcdev->fwctl);
+}
+
+static const struct auxiliary_device_id mlx5ctl_id_table[] = {
+ {.name = MLX5_ADEV_NAME ".fwctl",},
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, mlx5ctl_id_table);
+
+static struct auxiliary_driver mlx5ctl_driver = {
+ .name = "mlx5_fwctl",
+ .probe = mlx5ctl_probe,
+ .remove = mlx5ctl_remove,
+ .id_table = mlx5ctl_id_table,
+};
+
+module_auxiliary_driver(mlx5ctl_driver);
+
+MODULE_IMPORT_NS("FWCTL");
+MODULE_DESCRIPTION("mlx5 ConnectX fwctl driver");
+MODULE_AUTHOR("Saeed Mahameed <saeedm@nvidia.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/fwctl/pds/Makefile b/drivers/fwctl/pds/Makefile
new file mode 100644
index 000000000000..cc2317c07be1
--- /dev/null
+++ b/drivers/fwctl/pds/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_FWCTL_PDS) += pds_fwctl.o
+
+pds_fwctl-y += main.o
diff --git a/drivers/fwctl/pds/main.c b/drivers/fwctl/pds/main.c
new file mode 100644
index 000000000000..284c4165fdd4
--- /dev/null
+++ b/drivers/fwctl/pds/main.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) Advanced Micro Devices, Inc */
+
+#include <linux/module.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/bitfield.h>
+
+#include <uapi/fwctl/fwctl.h>
+#include <uapi/fwctl/pds.h>
+#include <linux/fwctl.h>
+
+#include <linux/pds/pds_common.h>
+#include <linux/pds/pds_core_if.h>
+#include <linux/pds/pds_adminq.h>
+#include <linux/pds/pds_auxbus.h>
+
+struct pdsfc_uctx {
+ struct fwctl_uctx uctx;
+ u32 uctx_caps;
+};
+
+struct pdsfc_rpc_endpoint_info {
+ u32 endpoint;
+ dma_addr_t operations_pa;
+ struct pds_fwctl_query_data *operations;
+ struct mutex lock; /* lock for endpoint info management */
+};
+
+struct pdsfc_dev {
+ struct fwctl_device fwctl;
+ struct pds_auxiliary_dev *padev;
+ u32 caps;
+ struct pds_fwctl_ident ident;
+ dma_addr_t endpoints_pa;
+ struct pds_fwctl_query_data *endpoints;
+ struct pdsfc_rpc_endpoint_info *endpoint_info;
+};
+
+static int pdsfc_open_uctx(struct fwctl_uctx *uctx)
+{
+ struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl);
+ struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx);
+
+ pdsfc_uctx->uctx_caps = pdsfc->caps;
+
+ return 0;
+}
+
+static void pdsfc_close_uctx(struct fwctl_uctx *uctx)
+{
+}
+
+static void *pdsfc_info(struct fwctl_uctx *uctx, size_t *length)
+{
+ struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx);
+ struct fwctl_info_pds *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ info->uctx_caps = pdsfc_uctx->uctx_caps;
+
+ return info;
+}
+
+static int pdsfc_identify(struct pdsfc_dev *pdsfc)
+{
+ struct device *dev = &pdsfc->fwctl.dev;
+ union pds_core_adminq_comp comp = {0};
+ union pds_core_adminq_cmd cmd;
+ struct pds_fwctl_ident *ident;
+ dma_addr_t ident_pa;
+ int err;
+
+ ident = dma_alloc_coherent(dev->parent, sizeof(*ident), &ident_pa, GFP_KERNEL);
+ if (!ident) {
+ dev_err(dev, "Failed to map ident buffer\n");
+ return -ENOMEM;
+ }
+
+ cmd = (union pds_core_adminq_cmd) {
+ .fwctl_ident = {
+ .opcode = PDS_FWCTL_CMD_IDENT,
+ .version = 0,
+ .len = cpu_to_le32(sizeof(*ident)),
+ .ident_pa = cpu_to_le64(ident_pa),
+ }
+ };
+
+ err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
+ if (err)
+ dev_err(dev, "Failed to send adminq cmd opcode: %u err: %d\n",
+ cmd.fwctl_ident.opcode, err);
+ else
+ pdsfc->ident = *ident;
+
+ dma_free_coherent(dev->parent, sizeof(*ident), ident, ident_pa);
+
+ return err;
+}
+
+static void pdsfc_free_endpoints(struct pdsfc_dev *pdsfc)
+{
+ struct device *dev = &pdsfc->fwctl.dev;
+ int i;
+
+ if (!pdsfc->endpoints)
+ return;
+
+ for (i = 0; pdsfc->endpoint_info && i < pdsfc->endpoints->num_entries; i++)
+ mutex_destroy(&pdsfc->endpoint_info[i].lock);
+ vfree(pdsfc->endpoint_info);
+ pdsfc->endpoint_info = NULL;
+ dma_free_coherent(dev->parent, PAGE_SIZE,
+ pdsfc->endpoints, pdsfc->endpoints_pa);
+ pdsfc->endpoints = NULL;
+ pdsfc->endpoints_pa = DMA_MAPPING_ERROR;
+}
+
+static void pdsfc_free_operations(struct pdsfc_dev *pdsfc)
+{
+ struct device *dev = &pdsfc->fwctl.dev;
+ u32 num_endpoints;
+ int i;
+
+ num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
+ for (i = 0; i < num_endpoints; i++) {
+ struct pdsfc_rpc_endpoint_info *ei = &pdsfc->endpoint_info[i];
+
+ if (ei->operations) {
+ dma_free_coherent(dev->parent, PAGE_SIZE,
+ ei->operations, ei->operations_pa);
+ ei->operations = NULL;
+ ei->operations_pa = DMA_MAPPING_ERROR;
+ }
+ }
+}
+
+static struct pds_fwctl_query_data *pdsfc_get_endpoints(struct pdsfc_dev *pdsfc,
+ dma_addr_t *pa)
+{
+ struct device *dev = &pdsfc->fwctl.dev;
+ union pds_core_adminq_comp comp = {0};
+ struct pds_fwctl_query_data *data;
+ union pds_core_adminq_cmd cmd;
+ dma_addr_t data_pa;
+ int err;
+
+ data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL);
+ if (!data) {
+ dev_err(dev, "Failed to map endpoint list\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cmd = (union pds_core_adminq_cmd) {
+ .fwctl_query = {
+ .opcode = PDS_FWCTL_CMD_QUERY,
+ .entity = PDS_FWCTL_RPC_ROOT,
+ .version = 0,
+ .query_data_buf_len = cpu_to_le32(PAGE_SIZE),
+ .query_data_buf_pa = cpu_to_le64(data_pa),
+ }
+ };
+
+ err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
+ if (err) {
+ dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n",
+ cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err);
+ dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa);
+ return ERR_PTR(err);
+ }
+
+ *pa = data_pa;
+
+ return data;
+}
+
+static int pdsfc_init_endpoints(struct pdsfc_dev *pdsfc)
+{
+ struct pds_fwctl_query_data_endpoint *ep_entry;
+ u32 num_endpoints;
+ int i;
+
+ pdsfc->endpoints = pdsfc_get_endpoints(pdsfc, &pdsfc->endpoints_pa);
+ if (IS_ERR(pdsfc->endpoints))
+ return PTR_ERR(pdsfc->endpoints);
+
+ num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
+ pdsfc->endpoint_info = vcalloc(num_endpoints,
+ sizeof(*pdsfc->endpoint_info));
+ if (!pdsfc->endpoint_info) {
+ pdsfc_free_endpoints(pdsfc);
+ return -ENOMEM;
+ }
+
+ ep_entry = (struct pds_fwctl_query_data_endpoint *)pdsfc->endpoints->entries;
+ for (i = 0; i < num_endpoints; i++) {
+ mutex_init(&pdsfc->endpoint_info[i].lock);
+ pdsfc->endpoint_info[i].endpoint = ep_entry[i].id;
+ }
+
+ return 0;
+}
+
+static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc,
+ dma_addr_t *pa, u32 ep)
+{
+ struct pds_fwctl_query_data_operation *entries;
+ struct device *dev = &pdsfc->fwctl.dev;
+ union pds_core_adminq_comp comp = {0};
+ struct pds_fwctl_query_data *data;
+ union pds_core_adminq_cmd cmd;
+ dma_addr_t data_pa;
+ int err;
+ int i;
+
+ /* Query the operations list for the given endpoint */
+ data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL);
+ if (!data) {
+ dev_err(dev, "Failed to map operations list\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cmd = (union pds_core_adminq_cmd) {
+ .fwctl_query = {
+ .opcode = PDS_FWCTL_CMD_QUERY,
+ .entity = PDS_FWCTL_RPC_ENDPOINT,
+ .version = 0,
+ .query_data_buf_len = cpu_to_le32(PAGE_SIZE),
+ .query_data_buf_pa = cpu_to_le64(data_pa),
+ .ep = cpu_to_le32(ep),
+ }
+ };
+
+ err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
+ if (err) {
+ dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n",
+ cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err);
+ dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa);
+ return ERR_PTR(err);
+ }
+
+ *pa = data_pa;
+
+ entries = (struct pds_fwctl_query_data_operation *)data->entries;
+ dev_dbg(dev, "num_entries %d\n", data->num_entries);
+ for (i = 0; i < data->num_entries; i++) {
+
+ /* Translate FW command attribute to fwctl scope */
+ switch (entries[i].scope) {
+ case PDSFC_FW_CMD_ATTR_READ:
+ case PDSFC_FW_CMD_ATTR_WRITE:
+ case PDSFC_FW_CMD_ATTR_SYNC:
+ entries[i].scope = FWCTL_RPC_CONFIGURATION;
+ break;
+ case PDSFC_FW_CMD_ATTR_DEBUG_READ:
+ entries[i].scope = FWCTL_RPC_DEBUG_READ_ONLY;
+ break;
+ case PDSFC_FW_CMD_ATTR_DEBUG_WRITE:
+ entries[i].scope = FWCTL_RPC_DEBUG_WRITE;
+ break;
+ default:
+ entries[i].scope = FWCTL_RPC_DEBUG_WRITE_FULL;
+ break;
+ }
+ dev_dbg(dev, "endpoint %d operation: id %x scope %d\n",
+ ep, entries[i].id, entries[i].scope);
+ }
+
+ return data;
+}
+
+static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc,
+ struct fwctl_rpc_pds *rpc,
+ enum fwctl_rpc_scope scope)
+{
+ struct pds_fwctl_query_data_operation *op_entry;
+ struct pdsfc_rpc_endpoint_info *ep_info = NULL;
+ struct device *dev = &pdsfc->fwctl.dev;
+ int i;
+
+ /* validate rpc in_len & out_len based
+ * on ident.max_req_sz & max_resp_sz
+ */
+ if (rpc->in.len > pdsfc->ident.max_req_sz) {
+ dev_dbg(dev, "Invalid request size %u, max %u\n",
+ rpc->in.len, pdsfc->ident.max_req_sz);
+ return -EINVAL;
+ }
+
+ if (rpc->out.len > pdsfc->ident.max_resp_sz) {
+ dev_dbg(dev, "Invalid response size %u, max %u\n",
+ rpc->out.len, pdsfc->ident.max_resp_sz);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < pdsfc->endpoints->num_entries; i++) {
+ if (pdsfc->endpoint_info[i].endpoint == rpc->in.ep) {
+ ep_info = &pdsfc->endpoint_info[i];
+ break;
+ }
+ }
+ if (!ep_info) {
+ dev_dbg(dev, "Invalid endpoint %d\n", rpc->in.ep);
+ return -EINVAL;
+ }
+
+ /* query and cache this endpoint's operations */
+ mutex_lock(&ep_info->lock);
+ if (!ep_info->operations) {
+ struct pds_fwctl_query_data *operations;
+
+ operations = pdsfc_get_operations(pdsfc,
+ &ep_info->operations_pa,
+ rpc->in.ep);
+ if (IS_ERR(operations)) {
+ mutex_unlock(&ep_info->lock);
+ return -ENOMEM;
+ }
+ ep_info->operations = operations;
+ }
+ mutex_unlock(&ep_info->lock);
+
+ /* reject unsupported and/or out of scope commands */
+ op_entry = (struct pds_fwctl_query_data_operation *)ep_info->operations->entries;
+ for (i = 0; i < ep_info->operations->num_entries; i++) {
+ if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, op_entry[i].id)) {
+ if (scope < op_entry[i].scope)
+ return -EPERM;
+ return 0;
+ }
+ }
+
+ dev_dbg(dev, "Invalid operation %d for endpoint %d\n", rpc->in.op, rpc->in.ep);
+
+ return -EINVAL;
+}
+
+static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
+ void *in, size_t in_len, size_t *out_len)
+{
+ struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl);
+ struct device *dev = &uctx->fwctl->dev;
+ union pds_core_adminq_comp comp = {0};
+ dma_addr_t out_payload_dma_addr = 0;
+ dma_addr_t in_payload_dma_addr = 0;
+ struct fwctl_rpc_pds *rpc = in;
+ union pds_core_adminq_cmd cmd;
+ void *out_payload = NULL;
+ void *in_payload = NULL;
+ void *out = NULL;
+ int err;
+
+ err = pdsfc_validate_rpc(pdsfc, rpc, scope);
+ if (err)
+ return ERR_PTR(err);
+
+ if (rpc->in.len > 0) {
+ in_payload = kzalloc(rpc->in.len, GFP_KERNEL);
+ if (!in_payload) {
+ dev_err(dev, "Failed to allocate in_payload\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ if (copy_from_user(in_payload, u64_to_user_ptr(rpc->in.payload),
+ rpc->in.len)) {
+ dev_dbg(dev, "Failed to copy in_payload from user\n");
+ err = -EFAULT;
+ goto err_in_payload;
+ }
+
+ in_payload_dma_addr = dma_map_single(dev->parent, in_payload,
+ rpc->in.len, DMA_TO_DEVICE);
+ err = dma_mapping_error(dev->parent, in_payload_dma_addr);
+ if (err) {
+ dev_dbg(dev, "Failed to map in_payload\n");
+ goto err_in_payload;
+ }
+ }
+
+ if (rpc->out.len > 0) {
+ out_payload = kzalloc(rpc->out.len, GFP_KERNEL);
+ if (!out_payload) {
+ dev_dbg(dev, "Failed to allocate out_payload\n");
+ err = -ENOMEM;
+ goto err_out_payload;
+ }
+
+ out_payload_dma_addr = dma_map_single(dev->parent, out_payload,
+ rpc->out.len, DMA_FROM_DEVICE);
+ err = dma_mapping_error(dev->parent, out_payload_dma_addr);
+ if (err) {
+ dev_dbg(dev, "Failed to map out_payload\n");
+ goto err_out_payload;
+ }
+ }
+
+ cmd = (union pds_core_adminq_cmd) {
+ .fwctl_rpc = {
+ .opcode = PDS_FWCTL_CMD_RPC,
+ .flags = PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP,
+ .ep = cpu_to_le32(rpc->in.ep),
+ .op = cpu_to_le32(rpc->in.op),
+ .req_pa = cpu_to_le64(in_payload_dma_addr),
+ .req_sz = cpu_to_le32(rpc->in.len),
+ .resp_pa = cpu_to_le64(out_payload_dma_addr),
+ .resp_sz = cpu_to_le32(rpc->out.len),
+ }
+ };
+
+ err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
+ if (err) {
+ dev_dbg(dev, "%s: ep %d op %x req_pa %llx req_sz %d req_sg %d resp_pa %llx resp_sz %d resp_sg %d err %d\n",
+ __func__, rpc->in.ep, rpc->in.op,
+ cmd.fwctl_rpc.req_pa, cmd.fwctl_rpc.req_sz, cmd.fwctl_rpc.req_sg_elems,
+ cmd.fwctl_rpc.resp_pa, cmd.fwctl_rpc.resp_sz, cmd.fwctl_rpc.resp_sg_elems,
+ err);
+ goto done;
+ }
+
+ dynamic_hex_dump("out ", DUMP_PREFIX_OFFSET, 16, 1, out_payload, rpc->out.len, true);
+
+ if (copy_to_user(u64_to_user_ptr(rpc->out.payload), out_payload, rpc->out.len)) {
+ dev_dbg(dev, "Failed to copy out_payload to user\n");
+ out = ERR_PTR(-EFAULT);
+ goto done;
+ }
+
+ rpc->out.retval = le32_to_cpu(comp.fwctl_rpc.err);
+ *out_len = in_len;
+ out = in;
+
+done:
+ if (out_payload_dma_addr)
+ dma_unmap_single(dev->parent, out_payload_dma_addr,
+ rpc->out.len, DMA_FROM_DEVICE);
+err_out_payload:
+ kfree(out_payload);
+
+ if (in_payload_dma_addr)
+ dma_unmap_single(dev->parent, in_payload_dma_addr,
+ rpc->in.len, DMA_TO_DEVICE);
+err_in_payload:
+ kfree(in_payload);
+err_out:
+ if (err)
+ return ERR_PTR(err);
+
+ return out;
+}
+
+static const struct fwctl_ops pdsfc_ops = {
+ .device_type = FWCTL_DEVICE_TYPE_PDS,
+ .uctx_size = sizeof(struct pdsfc_uctx),
+ .open_uctx = pdsfc_open_uctx,
+ .close_uctx = pdsfc_close_uctx,
+ .info = pdsfc_info,
+ .fw_rpc = pdsfc_fw_rpc,
+};
+
+static int pdsfc_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct pds_auxiliary_dev *padev =
+ container_of(adev, struct pds_auxiliary_dev, aux_dev);
+ struct device *dev = &adev->dev;
+ struct pdsfc_dev *pdsfc;
+ int err;
+
+ pdsfc = fwctl_alloc_device(&padev->vf_pdev->dev, &pdsfc_ops,
+ struct pdsfc_dev, fwctl);
+ if (!pdsfc)
+ return dev_err_probe(dev, -ENOMEM, "Failed to allocate fwctl device struct\n");
+ pdsfc->padev = padev;
+
+ err = pdsfc_identify(pdsfc);
+ if (err) {
+ fwctl_put(&pdsfc->fwctl);
+ return dev_err_probe(dev, err, "Failed to identify device\n");
+ }
+
+ err = pdsfc_init_endpoints(pdsfc);
+ if (err) {
+ fwctl_put(&pdsfc->fwctl);
+ return dev_err_probe(dev, err, "Failed to init endpoints\n");
+ }
+
+ pdsfc->caps = PDS_FWCTL_QUERY_CAP | PDS_FWCTL_SEND_CAP;
+
+ err = fwctl_register(&pdsfc->fwctl);
+ if (err) {
+ pdsfc_free_endpoints(pdsfc);
+ fwctl_put(&pdsfc->fwctl);
+ return dev_err_probe(dev, err, "Failed to register device\n");
+ }
+
+ auxiliary_set_drvdata(adev, pdsfc);
+
+ return 0;
+}
+
+static void pdsfc_remove(struct auxiliary_device *adev)
+{
+ struct pdsfc_dev *pdsfc = auxiliary_get_drvdata(adev);
+
+ fwctl_unregister(&pdsfc->fwctl);
+ pdsfc_free_operations(pdsfc);
+ pdsfc_free_endpoints(pdsfc);
+
+ fwctl_put(&pdsfc->fwctl);
+}
+
+static const struct auxiliary_device_id pdsfc_id_table[] = {
+ {.name = PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_FWCTL_STR },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pdsfc_id_table);
+
+static struct auxiliary_driver pdsfc_driver = {
+ .name = "pds_fwctl",
+ .probe = pdsfc_probe,
+ .remove = pdsfc_remove,
+ .id_table = pdsfc_id_table,
+};
+
+module_auxiliary_driver(pdsfc_driver);
+
+MODULE_IMPORT_NS("FWCTL");
+MODULE_DESCRIPTION("pds fwctl driver");
+MODULE_AUTHOR("Shannon Nelson <shannon.nelson@amd.com>");
+MODULE_AUTHOR("Brett Creeley <brett.creeley@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 98b4d1633b25..f2c39bbff83a 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -757,6 +757,7 @@ config GPIO_VF610
default y if SOC_VF610
depends on ARCH_MXC || COMPILE_TEST
select GPIOLIB_IRQCHIP
+ select GPIO_GENERIC
help
Say yes here to support i.MX or Vybrid vf610 GPIOs.
@@ -1670,7 +1671,7 @@ config GPIO_AMD8111
config GPIO_BT8XX
tristate "BT8XX GPIO abuser"
- depends on VIDEO_BT848=n
+ depends on VIDEO_BT848=n || COMPILE_TEST
help
The BT8xx frame grabber chip has 24 GPIO pins that can be abused
as a cheap PCI GPIO card.
@@ -1791,7 +1792,6 @@ menu "SPI GPIO expanders"
config GPIO_74X164
tristate "74x164 serial-in/parallel-out 8-bits shift register"
- depends on OF_GPIO
help
Driver for 74x164 compatible serial-in/parallel-out 8-outputs
shift registers. This driver can be used to provide access
@@ -1911,6 +1911,7 @@ config GPIO_SIM
tristate "GPIO Simulator Module"
select IRQ_SIM
select CONFIGFS_FS
+ select DEV_SYNC_PROBE
help
This enables the GPIO simulator - a configfs-based GPIO testing
driver.
@@ -1939,6 +1940,7 @@ config GPIO_VIRTUSER
select DEBUG_FS
select CONFIGFS_FS
select IRQ_WORK
+ select DEV_SYNC_PROBE
help
Say Y here to enable the configurable, configfs-based virtual GPIO
consumer testing driver.
@@ -1949,3 +1951,6 @@ config GPIO_VIRTUSER
endmenu
endif
+
+config DEV_SYNC_PROBE
+ tristate
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index af3ba4d81b58..af130882ffee 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -19,6 +19,9 @@ obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
# directly supported by gpio-generic
gpio-generic-$(CONFIG_GPIO_GENERIC) += gpio-mmio.o
+# Utilities for drivers that need synchronous fake device creation
+obj-$(CONFIG_DEV_SYNC_PROBE) += dev-sync-probe.o
+
obj-$(CONFIG_GPIO_104_DIO_48E) += gpio-104-dio-48e.o
obj-$(CONFIG_GPIO_104_IDI_48) += gpio-104-idi-48.o
obj-$(CONFIG_GPIO_104_IDIO_16) += gpio-104-idio-16.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 942d1cd2bd3c..b5f0a7a2e1bf 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -1,6 +1,7 @@
This is a place for planning the ongoing long-term work in the GPIO
subsystem.
+===============================================================================
GPIO descriptors
@@ -48,6 +49,7 @@ Work items:
numberspace accessors from <linux/gpio.h> and eventually delete
<linux/gpio.h> altogether.
+-------------------------------------------------------------------------------
Get rid of <linux/of_gpio.h>
@@ -75,6 +77,7 @@ Work items:
- Delete <linux/of_gpio.h> when all the above is complete and everything
uses <linux/gpio/consumer.h> or <linux/gpio/driver.h> instead.
+-------------------------------------------------------------------------------
Get rid of <linux/gpio/legacy-of-mm-gpiochip.h>
@@ -85,15 +88,7 @@ Work items:
to_of_mm_gpio_chip(), of_mm_gpiochip_add_data(), of_mm_gpiochip_remove(),
CONFIG_OF_GPIO_MM_GPIOCHIP from the kernel.
-
-Get rid of <linux/gpio.h>
-
-This legacy header is a one stop shop for anything GPIO is closely tied
-to the global GPIO numberspace. The endgame of the above refactorings will
-be the removal of <linux/gpio.h> and from that point only the specialized
-headers under <linux/gpio/*.h> will be used. This requires all the above to
-be completed and is expected to take a long time.
-
+-------------------------------------------------------------------------------
Collect drivers
@@ -108,6 +103,7 @@ At the same time it makes sense to get rid of code duplication in existing or
new coming drivers. For example, gpio-ml-ioh should be incorporated into
gpio-pch.
+-------------------------------------------------------------------------------
Generic MMIO GPIO
@@ -128,6 +124,7 @@ Work items:
helpers (x86 inb()/outb()) and convert port-mapped I/O drivers to use
this with dry-coding and sending to maintainers to test
+-------------------------------------------------------------------------------
Generic regmap GPIO
@@ -135,6 +132,7 @@ In the very similar way to Generic MMIO GPIO convert the users which can
take advantage of using regmap over direct IO accessors. Note, even in
MMIO case the regmap MMIO with gpio-regmap.c is preferable over gpio-mmio.c.
+-------------------------------------------------------------------------------
GPIOLIB irqchip
@@ -144,53 +142,7 @@ try to cover any generic kind of irqchip cascaded from a GPIO.
- Look over and identify any remaining easily converted drivers and
dry-code conversions to gpiolib irqchip for maintainers to test
-
-Increase integration with pin control
-
-There are already ways to use pin control as back-end for GPIO and
-it may make sense to bring these subsystems closer. One reason for
-creating pin control as its own subsystem was that we could avoid any
-use of the global GPIO numbers. Once the above is complete, it may
-make sense to simply join the subsystems into one and make pin
-multiplexing, pin configuration, GPIO, etc selectable options in one
-and the same pin control and GPIO subsystem.
-
-
-Debugfs in place of sysfs
-
-The old sysfs code that enables simple uses of GPIOs from the
-command line is still popular despite the existance of the proper
-character device. The reason is that it is simple to use on
-root filesystems where you only have a minimal set of tools such
-as "cat", "echo" etc.
-
-The old sysfs still need to be strongly deprecated and removed
-as it relies on the global GPIO numberspace that assume a strict
-order of global GPIO numbers that do not change between boots
-and is independent of probe order.
-
-To solve this and provide an ABI that people can use for hacks
-and development, implement a debugfs interface to manipulate
-GPIO lines that can do everything that sysfs can do today: one
-directory per gpiochip and one file entry per line:
-
-/sys/kernel/debug/gpiochip/gpiochip0
-/sys/kernel/debug/gpiochip/gpiochip0/gpio0
-/sys/kernel/debug/gpiochip/gpiochip0/gpio1
-/sys/kernel/debug/gpiochip/gpiochip0/gpio2
-/sys/kernel/debug/gpiochip/gpiochip0/gpio3
-...
-/sys/kernel/debug/gpiochip/gpiochip1
-/sys/kernel/debug/gpiochip/gpiochip1/gpio0
-/sys/kernel/debug/gpiochip/gpiochip1/gpio1
-...
-
-The exact files and design of the debugfs interface can be
-discussed but the idea is to provide a low-level access point
-for debugging and hacking and to expose all lines without the
-need of any exporting. Also provide ample ammunition to shoot
-oneself in the foot, because this is debugfs after all.
-
+-------------------------------------------------------------------------------
Moving over to immutable irq_chip structures
@@ -209,3 +161,28 @@ A small number of drivers have been converted (pl061, tegra186, msm,
amd, apple), and can be used as examples of how to proceed with this
conversion. Note that drivers using the generic irqchip framework
cannot be converted yet, but watch this space!
+
+-------------------------------------------------------------------------------
+
+Convert all GPIO chips to using the new, value returning line setters
+
+struct gpio_chip's set() and set_multiple() callbacks are now deprecated. They
+return void and thus do not allow drivers to indicate failure to set the line
+value back to the caller.
+
+We've now added new variants - set_rv() and set_multiple_rv() that return an
+integer. Let's convert all GPIO drivers treewide to use the new callbacks,
+remove the old ones and finally rename the new ones back to the old names.
+
+-------------------------------------------------------------------------------
+
+Extend the sysfs ABI to allow exporting lines by their HW offsets
+
+The need to support the sysfs GPIO class is one of the main obstacles to
+removing the global GPIO numberspace from the kernel. In order to wean users
+off using global numbers from user-space, extend the existing interface with
+new per-gpiochip export/unexport attributes that allow to refer to GPIOs using
+their hardware offsets within the chip.
+
+Encourage users to switch to using them and eventually remove the existing
+global export/unexport attribues.
diff --git a/drivers/gpio/dev-sync-probe.c b/drivers/gpio/dev-sync-probe.c
new file mode 100644
index 000000000000..9ea733b863b2
--- /dev/null
+++ b/drivers/gpio/dev-sync-probe.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Common code for drivers creating fake platform devices.
+ *
+ * Provides synchronous device creation: waits for probe completion and
+ * returns the probe success or error status to the device creator.
+ *
+ * Copyright (C) 2021 Bartosz Golaszewski <brgl@bgdev.pl>
+ * Copyright (C) 2025 Koichiro Den <koichiro.den@canonical.com>
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include "dev-sync-probe.h"
+
+static int dev_sync_probe_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct dev_sync_probe_data *pdata;
+ struct device *dev = data;
+
+ pdata = container_of(nb, struct dev_sync_probe_data, bus_notifier);
+ if (!device_match_name(dev, pdata->name))
+ return NOTIFY_DONE;
+
+ switch (action) {
+ case BUS_NOTIFY_BOUND_DRIVER:
+ pdata->driver_bound = true;
+ break;
+ case BUS_NOTIFY_DRIVER_NOT_BOUND:
+ pdata->driver_bound = false;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ complete(&pdata->probe_completion);
+ return NOTIFY_OK;
+}
+
+void dev_sync_probe_init(struct dev_sync_probe_data *data)
+{
+ memset(data, 0, sizeof(*data));
+ init_completion(&data->probe_completion);
+ data->bus_notifier.notifier_call = dev_sync_probe_notifier_call;
+}
+EXPORT_SYMBOL_GPL(dev_sync_probe_init);
+
+int dev_sync_probe_register(struct dev_sync_probe_data *data,
+ struct platform_device_info *pdevinfo)
+{
+ struct platform_device *pdev;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "%s.%d", pdevinfo->name, pdevinfo->id);
+ if (!name)
+ return -ENOMEM;
+
+ data->driver_bound = false;
+ data->name = name;
+ reinit_completion(&data->probe_completion);
+ bus_register_notifier(&platform_bus_type, &data->bus_notifier);
+
+ pdev = platform_device_register_full(pdevinfo);
+ if (IS_ERR(pdev)) {
+ bus_unregister_notifier(&platform_bus_type, &data->bus_notifier);
+ kfree(data->name);
+ return PTR_ERR(pdev);
+ }
+
+ wait_for_completion(&data->probe_completion);
+ bus_unregister_notifier(&platform_bus_type, &data->bus_notifier);
+
+ if (!data->driver_bound) {
+ platform_device_unregister(pdev);
+ kfree(data->name);
+ return -ENXIO;
+ }
+
+ data->pdev = pdev;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_sync_probe_register);
+
+void dev_sync_probe_unregister(struct dev_sync_probe_data *data)
+{
+ platform_device_unregister(data->pdev);
+ kfree(data->name);
+ data->pdev = NULL;
+}
+EXPORT_SYMBOL_GPL(dev_sync_probe_unregister);
+
+MODULE_AUTHOR("Bartosz Golaszewski <brgl@bgdev.pl>");
+MODULE_AUTHOR("Koichiro Den <koichiro.den@canonical.com>");
+MODULE_DESCRIPTION("Utilities for synchronous fake device creation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/dev-sync-probe.h b/drivers/gpio/dev-sync-probe.h
new file mode 100644
index 000000000000..4b3d52b70519
--- /dev/null
+++ b/drivers/gpio/dev-sync-probe.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef DEV_SYNC_PROBE_H
+#define DEV_SYNC_PROBE_H
+
+#include <linux/completion.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+
+struct dev_sync_probe_data {
+ struct platform_device *pdev;
+ const char *name;
+
+ /* Synchronize with probe */
+ struct notifier_block bus_notifier;
+ struct completion probe_completion;
+ bool driver_bound;
+};
+
+void dev_sync_probe_init(struct dev_sync_probe_data *data);
+int dev_sync_probe_register(struct dev_sync_probe_data *data,
+ struct platform_device_info *pdevinfo);
+void dev_sync_probe_unregister(struct dev_sync_probe_data *data);
+
+#endif /* DEV_SYNC_PROBE_H */
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index fca6cd2eb1dd..4dd5c2c330bb 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/module.h>
@@ -29,7 +30,7 @@ struct gen_74x164_chip {
* register at the end of the transfer. So, to have a logical
* numbering, store the bytes in reverse order.
*/
- u8 buffer[];
+ u8 buffer[] __counted_by(registers);
};
static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
@@ -43,34 +44,31 @@ static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
struct gen_74x164_chip *chip = gpiochip_get_data(gc);
u8 bank = chip->registers - 1 - offset / 8;
u8 pin = offset % 8;
- int ret;
- mutex_lock(&chip->lock);
- ret = (chip->buffer[bank] >> pin) & 0x1;
- mutex_unlock(&chip->lock);
+ guard(mutex)(&chip->lock);
- return ret;
+ return !!(chip->buffer[bank] & BIT(pin));
}
-static void gen_74x164_set_value(struct gpio_chip *gc,
- unsigned offset, int val)
+static int gen_74x164_set_value(struct gpio_chip *gc,
+ unsigned int offset, int val)
{
struct gen_74x164_chip *chip = gpiochip_get_data(gc);
u8 bank = chip->registers - 1 - offset / 8;
u8 pin = offset % 8;
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
+
if (val)
- chip->buffer[bank] |= (1 << pin);
+ chip->buffer[bank] |= BIT(pin);
else
- chip->buffer[bank] &= ~(1 << pin);
+ chip->buffer[bank] &= ~BIT(pin);
- __gen_74x164_write_config(chip);
- mutex_unlock(&chip->lock);
+ return __gen_74x164_write_config(chip);
}
-static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
struct gen_74x164_chip *chip = gpiochip_get_data(gc);
unsigned long offset;
@@ -78,7 +76,8 @@ static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
size_t bank;
unsigned long bitmask;
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
+
for_each_set_clump8(offset, bankmask, mask, chip->registers * 8) {
bank = chip->registers - 1 - offset / 8;
bitmask = bitmap_get_value8(bits, offset) & bankmask;
@@ -86,8 +85,7 @@ static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
chip->buffer[bank] &= ~bankmask;
chip->buffer[bank] |= bitmask;
}
- __gen_74x164_write_config(chip);
- mutex_unlock(&chip->lock);
+ return __gen_74x164_write_config(chip);
}
static int gen_74x164_direction_output(struct gpio_chip *gc,
@@ -97,8 +95,22 @@ static int gen_74x164_direction_output(struct gpio_chip *gc,
return 0;
}
+static void gen_74x164_deactivate(void *data)
+{
+ struct gen_74x164_chip *chip = data;
+
+ gpiod_set_value_cansleep(chip->gpiod_oe, 0);
+}
+
+static int gen_74x164_activate(struct device *dev, struct gen_74x164_chip *chip)
+{
+ gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+ return devm_add_action_or_reset(dev, gen_74x164_deactivate, chip);
+}
+
static int gen_74x164_probe(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
struct gen_74x164_chip *chip;
u32 nregs;
int ret;
@@ -112,55 +124,44 @@ static int gen_74x164_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- ret = device_property_read_u32(&spi->dev, "registers-number", &nregs);
- if (ret) {
- dev_err(&spi->dev, "Missing 'registers-number' property.\n");
- return -EINVAL;
- }
+ ret = device_property_read_u32(dev, "registers-number", &nregs);
+ if (ret)
+ return dev_err_probe(dev, ret, "Missing 'registers-number' property.\n");
- chip = devm_kzalloc(&spi->dev, sizeof(*chip) + nregs, GFP_KERNEL);
+ chip = devm_kzalloc(dev, struct_size(chip, buffer, nregs), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- chip->gpiod_oe = devm_gpiod_get_optional(&spi->dev, "enable",
- GPIOD_OUT_LOW);
+ chip->registers = nregs;
+
+ chip->gpiod_oe = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(chip->gpiod_oe))
return PTR_ERR(chip->gpiod_oe);
- spi_set_drvdata(spi, chip);
-
chip->gpio_chip.label = spi->modalias;
chip->gpio_chip.direction_output = gen_74x164_direction_output;
chip->gpio_chip.get = gen_74x164_get_value;
- chip->gpio_chip.set = gen_74x164_set_value;
- chip->gpio_chip.set_multiple = gen_74x164_set_multiple;
+ chip->gpio_chip.set_rv = gen_74x164_set_value;
+ chip->gpio_chip.set_multiple_rv = gen_74x164_set_multiple;
chip->gpio_chip.base = -1;
-
- chip->registers = nregs;
chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers;
-
chip->gpio_chip.can_sleep = true;
- chip->gpio_chip.parent = &spi->dev;
+ chip->gpio_chip.parent = dev;
chip->gpio_chip.owner = THIS_MODULE;
- ret = devm_mutex_init(&spi->dev, &chip->lock);
+ ret = devm_mutex_init(dev, &chip->lock);
if (ret)
return ret;
ret = __gen_74x164_write_config(chip);
if (ret)
- return dev_err_probe(&spi->dev, ret, "Config write failed\n");
-
- gpiod_set_value_cansleep(chip->gpiod_oe, 1);
-
- return devm_gpiochip_add_data(&spi->dev, &chip->gpio_chip, chip);
-}
+ return dev_err_probe(dev, ret, "Config write failed\n");
-static void gen_74x164_remove(struct spi_device *spi)
-{
- struct gen_74x164_chip *chip = spi_get_drvdata(spi);
+ ret = gen_74x164_activate(dev, chip);
+ if (ret)
+ return ret;
- gpiod_set_value_cansleep(chip->gpiod_oe, 0);
+ return devm_gpiochip_add_data(dev, &chip->gpio_chip, chip);
}
static const struct spi_device_id gen_74x164_spi_ids[] = {
@@ -183,7 +184,6 @@ static struct spi_driver gen_74x164_driver = {
.of_match_table = gen_74x164_dt_ids,
},
.probe = gen_74x164_probe,
- .remove = gen_74x164_remove,
.id_table = gen_74x164_spi_ids,
};
module_spi_driver(gen_74x164_driver);
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 6dafab0cf964..dc2b941c3726 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -3,11 +3,13 @@
* Copyright (C) 2011-2012 Avionic Design GmbH
*/
+#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -78,7 +80,7 @@ static int adnp_gpio_get(struct gpio_chip *chip, unsigned offset)
return (value & BIT(pos)) ? 1 : 0;
}
-static void __adnp_gpio_set(struct adnp *adnp, unsigned offset, int value)
+static int __adnp_gpio_set(struct adnp *adnp, unsigned int offset, int value)
{
unsigned int reg = offset >> adnp->reg_shift;
unsigned int pos = offset & 7;
@@ -87,23 +89,23 @@ static void __adnp_gpio_set(struct adnp *adnp, unsigned offset, int value)
err = adnp_read(adnp, GPIO_PLR(adnp) + reg, &val);
if (err < 0)
- return;
+ return err;
if (value)
val |= BIT(pos);
else
val &= ~BIT(pos);
- adnp_write(adnp, GPIO_PLR(adnp) + reg, val);
+ return adnp_write(adnp, GPIO_PLR(adnp) + reg, val);
}
-static void adnp_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int adnp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct adnp *adnp = gpiochip_get_data(chip);
- mutex_lock(&adnp->i2c_lock);
- __adnp_gpio_set(adnp, offset, value);
- mutex_unlock(&adnp->i2c_lock);
+ guard(mutex)(&adnp->i2c_lock);
+
+ return __adnp_gpio_set(adnp, offset, value);
}
static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -114,32 +116,26 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
u8 value;
int err;
- mutex_lock(&adnp->i2c_lock);
+ guard(mutex)(&adnp->i2c_lock);
err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &value);
if (err < 0)
- goto out;
+ return err;
value &= ~BIT(pos);
err = adnp_write(adnp, GPIO_DDR(adnp) + reg, value);
if (err < 0)
- goto out;
+ return err;
err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &value);
if (err < 0)
- goto out;
-
- if (value & BIT(pos)) {
- err = -EPERM;
- goto out;
- }
+ return err;
- err = 0;
+ if (value & BIT(pos))
+ return -EPERM;
-out:
- mutex_unlock(&adnp->i2c_lock);
- return err;
+ return 0;
}
static int adnp_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -151,33 +147,28 @@ static int adnp_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int err;
u8 val;
- mutex_lock(&adnp->i2c_lock);
+ guard(mutex)(&adnp->i2c_lock);
err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &val);
if (err < 0)
- goto out;
+ return err;
val |= BIT(pos);
err = adnp_write(adnp, GPIO_DDR(adnp) + reg, val);
if (err < 0)
- goto out;
+ return err;
err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &val);
if (err < 0)
- goto out;
+ return err;
- if (!(val & BIT(pos))) {
- err = -EPERM;
- goto out;
- }
+ if (!(val & BIT(pos)))
+ return -EPERM;
__adnp_gpio_set(adnp, offset, value);
- err = 0;
-out:
- mutex_unlock(&adnp->i2c_lock);
- return err;
+ return 0;
}
static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
@@ -187,27 +178,26 @@ static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
int err;
for (i = 0; i < num_regs; i++) {
- u8 ddr, plr, ier, isr;
+ u8 ddr = 0, plr = 0, ier = 0, isr = 0;
- mutex_lock(&adnp->i2c_lock);
+ scoped_guard(mutex, &adnp->i2c_lock) {
+ err = adnp_read(adnp, GPIO_DDR(adnp) + i, &ddr);
+ if (err < 0)
+ return;
- err = adnp_read(adnp, GPIO_DDR(adnp) + i, &ddr);
- if (err < 0)
- goto unlock;
+ err = adnp_read(adnp, GPIO_PLR(adnp) + i, &plr);
+ if (err < 0)
+ return;
- err = adnp_read(adnp, GPIO_PLR(adnp) + i, &plr);
- if (err < 0)
- goto unlock;
+ err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
+ if (err < 0)
+ return;
- err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
- if (err < 0)
- goto unlock;
-
- err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
- if (err < 0)
- goto unlock;
+ err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
+ if (err < 0)
+ return;
- mutex_unlock(&adnp->i2c_lock);
+ }
for (j = 0; j < 8; j++) {
unsigned int bit = (i << adnp->reg_shift) + j;
@@ -232,11 +222,6 @@ static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
direction, level, interrupt, pending);
}
}
-
- return;
-
-unlock:
- mutex_unlock(&adnp->i2c_lock);
}
static irqreturn_t adnp_irq(int irq, void *data)
@@ -248,32 +233,24 @@ static irqreturn_t adnp_irq(int irq, void *data)
for (i = 0; i < num_regs; i++) {
unsigned int base = i << adnp->reg_shift, bit;
- u8 changed, level, isr, ier;
+ u8 changed, level = 0, isr = 0, ier = 0;
unsigned long pending;
int err;
- mutex_lock(&adnp->i2c_lock);
+ scoped_guard(mutex, &adnp->i2c_lock) {
+ err = adnp_read(adnp, GPIO_PLR(adnp) + i, &level);
+ if (err < 0)
+ continue;
- err = adnp_read(adnp, GPIO_PLR(adnp) + i, &level);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- continue;
- }
+ err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
+ if (err < 0)
+ continue;
- err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- continue;
+ err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
+ if (err < 0)
+ continue;
}
- err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- continue;
- }
-
- mutex_unlock(&adnp->i2c_lock);
-
/* determine pins that changed levels */
changed = level ^ adnp->irq_level[i];
@@ -365,12 +342,12 @@ static void adnp_irq_bus_unlock(struct irq_data *d)
struct adnp *adnp = gpiochip_get_data(gc);
unsigned int num_regs = 1 << adnp->reg_shift, i;
- mutex_lock(&adnp->i2c_lock);
-
- for (i = 0; i < num_regs; i++)
- adnp_write(adnp, GPIO_IER(adnp) + i, adnp->irq_enable[i]);
+ scoped_guard(mutex, &adnp->i2c_lock) {
+ for (i = 0; i < num_regs; i++)
+ adnp_write(adnp, GPIO_IER(adnp) + i,
+ adnp->irq_enable[i]);
+ }
- mutex_unlock(&adnp->i2c_lock);
mutex_unlock(&adnp->irq_lock);
}
@@ -453,7 +430,7 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios,
chip->direction_input = adnp_gpio_direction_input;
chip->direction_output = adnp_gpio_direction_output;
chip->get = adnp_gpio_get;
- chip->set = adnp_gpio_set;
+ chip->set_rv = adnp_gpio_set;
chip->can_sleep = true;
if (IS_ENABLED(CONFIG_DEBUG_FS))
@@ -506,7 +483,10 @@ static int adnp_i2c_probe(struct i2c_client *client)
if (!adnp)
return -ENOMEM;
- mutex_init(&adnp->i2c_lock);
+ err = devm_mutex_init(&client->dev, &adnp->i2c_lock);
+ if (err)
+ return err;
+
adnp->client = client;
err = adnp_gpio_setup(adnp, num_gpios, device_property_read_bool(dev, "interrupt-controller"));
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index c55e821c63b6..57d12c10cbda 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -40,16 +40,18 @@ static int adp5520_gpio_get_value(struct gpio_chip *chip, unsigned off)
return !!(reg_val & dev->lut[off]);
}
-static void adp5520_gpio_set_value(struct gpio_chip *chip,
- unsigned off, int val)
+static int adp5520_gpio_set_value(struct gpio_chip *chip,
+ unsigned int off, int val)
{
struct adp5520_gpio *dev;
dev = gpiochip_get_data(chip);
if (val)
- adp5520_set_bits(dev->master, ADP5520_GPIO_OUT, dev->lut[off]);
+ return adp5520_set_bits(dev->master, ADP5520_GPIO_OUT,
+ dev->lut[off]);
else
- adp5520_clr_bits(dev->master, ADP5520_GPIO_OUT, dev->lut[off]);
+ return adp5520_clr_bits(dev->master, ADP5520_GPIO_OUT,
+ dev->lut[off]);
}
static int adp5520_gpio_direction_input(struct gpio_chip *chip, unsigned off)
@@ -120,7 +122,7 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
gc->direction_input = adp5520_gpio_direction_input;
gc->direction_output = adp5520_gpio_direction_output;
gc->get = adp5520_gpio_get_value;
- gc->set = adp5520_gpio_set_value;
+ gc->set_rv = adp5520_gpio_set_value;
gc->can_sleep = true;
gc->base = pdata->gpio_start;
diff --git a/drivers/gpio/gpio-adp5585.c b/drivers/gpio/gpio-adp5585.c
index 000d31f09671..d5c0f1b267c8 100644
--- a/drivers/gpio/gpio-adp5585.c
+++ b/drivers/gpio/gpio-adp5585.c
@@ -86,14 +86,16 @@ static int adp5585_gpio_get_value(struct gpio_chip *chip, unsigned int off)
return !!(val & bit);
}
-static void adp5585_gpio_set_value(struct gpio_chip *chip, unsigned int off, int val)
+static int adp5585_gpio_set_value(struct gpio_chip *chip, unsigned int off,
+ int val)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
unsigned int bank = ADP5585_BANK(off);
unsigned int bit = ADP5585_BIT(off);
- regmap_update_bits(adp5585_gpio->regmap, ADP5585_GPO_DATA_OUT_A + bank,
- bit, val ? bit : 0);
+ return regmap_update_bits(adp5585_gpio->regmap,
+ ADP5585_GPO_DATA_OUT_A + bank,
+ bit, val ? bit : 0);
}
static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio,
@@ -192,7 +194,7 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
gc->direction_input = adp5585_gpio_direction_input;
gc->direction_output = adp5585_gpio_direction_output;
gc->get = adp5585_gpio_get_value;
- gc->set = adp5585_gpio_set_value;
+ gc->set_rv = adp5585_gpio_set_value;
gc->set_config = adp5585_gpio_set_config;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index d668ddb2e81d..d232ea865356 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -372,25 +372,30 @@ static void gpio_fwd_delay(struct gpio_chip *chip, unsigned int offset, int valu
udelay(delay_us);
}
-static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ int ret;
if (chip->can_sleep)
- gpiod_set_value_cansleep(fwd->descs[offset], value);
+ ret = gpiod_set_value_cansleep(fwd->descs[offset], value);
else
- gpiod_set_value(fwd->descs[offset], value);
+ ret = gpiod_set_value(fwd->descs[offset], value);
+ if (ret)
+ return ret;
if (fwd->delay_timings)
gpio_fwd_delay(chip, offset, value);
+
+ return ret;
}
-static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
- unsigned long *bits)
+static int gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
+ unsigned long *bits)
{
struct gpio_desc **descs = fwd_tmp_descs(fwd);
unsigned long *values = fwd_tmp_values(fwd);
- unsigned int i, j = 0;
+ unsigned int i, j = 0, ret;
for_each_set_bit(i, mask, fwd->chip.ngpio) {
__assign_bit(j, values, test_bit(i, bits));
@@ -398,26 +403,31 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
}
if (fwd->chip.can_sleep)
- gpiod_set_array_value_cansleep(j, descs, NULL, values);
+ ret = gpiod_set_array_value_cansleep(j, descs, NULL, values);
else
- gpiod_set_array_value(j, descs, NULL, values);
+ ret = gpiod_set_array_value(j, descs, NULL, values);
+
+ return ret;
}
-static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
+static int gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
unsigned long flags;
+ int ret;
if (chip->can_sleep) {
mutex_lock(&fwd->mlock);
- gpio_fwd_set_multiple(fwd, mask, bits);
+ ret = gpio_fwd_set_multiple(fwd, mask, bits);
mutex_unlock(&fwd->mlock);
} else {
spin_lock_irqsave(&fwd->slock, flags);
- gpio_fwd_set_multiple(fwd, mask, bits);
+ ret = gpio_fwd_set_multiple(fwd, mask, bits);
spin_unlock_irqrestore(&fwd->slock, flags);
}
+
+ return ret;
}
static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset,
@@ -547,8 +557,8 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
chip->direction_output = gpio_fwd_direction_output;
chip->get = gpio_fwd_get;
chip->get_multiple = gpio_fwd_get_multiple_locked;
- chip->set = gpio_fwd_set;
- chip->set_multiple = gpio_fwd_set_multiple_locked;
+ chip->set_rv = gpio_fwd_set;
+ chip->set_multiple_rv = gpio_fwd_set_multiple_locked;
chip->to_irq = gpio_fwd_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 11edf1fe6c90..77a674cf99e4 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -35,15 +35,15 @@ static int altr_a10sr_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset - ALTR_A10SR_LED_VALID_SHIFT));
}
-static void altr_a10sr_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int altr_a10sr_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct altr_a10sr_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->regmap, ALTR_A10SR_LED_REG,
- BIT(ALTR_A10SR_LED_VALID_SHIFT + offset),
- value ? BIT(ALTR_A10SR_LED_VALID_SHIFT + offset)
- : 0);
+ return regmap_update_bits(gpio->regmap, ALTR_A10SR_LED_REG,
+ BIT(ALTR_A10SR_LED_VALID_SHIFT + offset),
+ value ?
+ BIT(ALTR_A10SR_LED_VALID_SHIFT + offset) : 0);
}
static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
@@ -69,7 +69,7 @@ static const struct gpio_chip altr_a10sr_gc = {
.label = "altr_a10sr_gpio",
.owner = THIS_MODULE,
.get = altr_a10sr_gpio_get,
- .set = altr_a10sr_gpio_set,
+ .set_rv = altr_a10sr_gpio_set,
.direction_input = altr_a10sr_gpio_direction_input,
.direction_output = altr_a10sr_gpio_direction_output,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 17ab039c7413..1b28525726d7 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -113,7 +113,7 @@ static int altera_gpio_get(struct gpio_chip *gc, unsigned offset)
return !!(readl(altera_gc->regs + ALTERA_GPIO_DATA) & BIT(offset));
}
-static void altera_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+static int altera_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
unsigned long flags;
@@ -127,6 +127,8 @@ static void altera_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
data_reg &= ~BIT(offset);
writel(data_reg, altera_gc->regs + ALTERA_GPIO_DATA);
raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
+
+ return 0;
}
static int altera_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
@@ -257,7 +259,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
altera_gc->gc.direction_input = altera_gpio_direction_input;
altera_gc->gc.direction_output = altera_gpio_direction_output;
altera_gc->gc.get = altera_gpio_get;
- altera_gc->gc.set = altera_gpio_set;
+ altera_gc->gc.set_rv = altera_gpio_set;
altera_gc->gc.owner = THIS_MODULE;
altera_gc->gc.parent = &pdev->dev;
altera_gc->gc.base = -1;
diff --git a/drivers/gpio/gpio-amd-fch.c b/drivers/gpio/gpio-amd-fch.c
index 2a21354ed6a0..f8d0cea46049 100644
--- a/drivers/gpio/gpio-amd-fch.c
+++ b/drivers/gpio/gpio-amd-fch.c
@@ -95,8 +95,7 @@ static int amd_fch_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
return ret ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
}
-static void amd_fch_gpio_set(struct gpio_chip *gc,
- unsigned int gpio, int value)
+static int amd_fch_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
unsigned long flags;
struct amd_fch_gpio_priv *priv = gpiochip_get_data(gc);
@@ -113,6 +112,8 @@ static void amd_fch_gpio_set(struct gpio_chip *gc,
writel_relaxed(mask, ptr);
spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
}
static int amd_fch_gpio_get(struct gpio_chip *gc,
@@ -164,7 +165,7 @@ static int amd_fch_gpio_probe(struct platform_device *pdev)
priv->gc.direction_output = amd_fch_gpio_direction_output;
priv->gc.get_direction = amd_fch_gpio_get_direction;
priv->gc.get = amd_fch_gpio_get;
- priv->gc.set = amd_fch_gpio_set;
+ priv->gc.set_rv = amd_fch_gpio_set;
spin_lock_init(&priv->lock);
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 3377667a28de..425d8472f744 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -94,7 +94,7 @@ static void amd_gpio_free(struct gpio_chip *chip, unsigned offset)
iowrite8(agp->orig[offset], agp->pm + AMD_REG_GPIO(offset));
}
-static void amd_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int amd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct amd_gpio *agp = gpiochip_get_data(chip);
u8 temp;
@@ -107,6 +107,8 @@ static void amd_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
spin_unlock_irqrestore(&agp->lock, flags);
dev_dbg(&agp->pdev->dev, "Setting gpio %d, value %d, reg=%02x\n", offset, !!value, temp);
+
+ return 0;
}
static int amd_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -163,7 +165,7 @@ static struct amd_gpio gp = {
.ngpio = 32,
.request = amd_gpio_request,
.free = amd_gpio_free,
- .set = amd_gpio_set,
+ .set_rv = amd_gpio_set,
.get = amd_gpio_get,
.direction_output = amd_gpio_dirout,
.direction_input = amd_gpio_dirin,
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index c15fda99120a..e530c94dcce8 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -121,7 +121,8 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip,
ARIZONA_GPN_DIR | ARIZONA_GPN_LVL, value);
}
-static void arizona_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int arizona_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct arizona_gpio *arizona_gpio = gpiochip_get_data(chip);
struct arizona *arizona = arizona_gpio->arizona;
@@ -129,8 +130,8 @@ static void arizona_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (value)
value = ARIZONA_GPN_LVL;
- regmap_update_bits(arizona->regmap, ARIZONA_GPIO1_CTRL + offset,
- ARIZONA_GPN_LVL, value);
+ return regmap_update_bits(arizona->regmap, ARIZONA_GPIO1_CTRL + offset,
+ ARIZONA_GPN_LVL, value);
}
static const struct gpio_chip template_chip = {
@@ -139,7 +140,7 @@ static const struct gpio_chip template_chip = {
.direction_input = arizona_gpio_direction_in,
.get = arizona_gpio_get,
.direction_output = arizona_gpio_direction_out,
- .set = arizona_gpio_set,
+ .set_rv = arizona_gpio_set,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 34eb26298e32..00b31497ecff 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -6,6 +6,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
#include <linux/hashtable.h>
@@ -170,17 +171,14 @@ static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
const struct aspeed_sgpio_bank *bank = to_bank(offset);
- unsigned long flags;
enum aspeed_sgpio_reg reg;
int rc = 0;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata;
rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return rc;
}
@@ -211,16 +209,13 @@ static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val)
return 0;
}
-static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&gpio->lock, flags);
- sgpio_set_value(gc, offset, val);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ return sgpio_set_value(gc, offset, val);
}
static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
@@ -231,15 +226,14 @@ static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
{
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
int rc;
/* No special action is required for setting the direction; we'll
* error-out in sgpio_set_value if this isn't an output GPIO */
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
+
rc = sgpio_set_value(gc, offset, val);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
return rc;
}
@@ -269,7 +263,6 @@ static void aspeed_sgpio_irq_ack(struct irq_data *d)
{
const struct aspeed_sgpio_bank *bank;
struct aspeed_sgpio *gpio;
- unsigned long flags;
void __iomem *status_addr;
int offset;
u32 bit;
@@ -278,18 +271,15 @@ static void aspeed_sgpio_irq_ack(struct irq_data *d)
status_addr = bank_reg(gpio, bank, reg_irq_status);
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
iowrite32(bit, status_addr);
-
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
}
static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
{
const struct aspeed_sgpio_bank *bank;
struct aspeed_sgpio *gpio;
- unsigned long flags;
u32 reg, bit;
void __iomem *addr;
int offset;
@@ -301,17 +291,15 @@ static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
if (set)
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d));
- raw_spin_lock_irqsave(&gpio->lock, flags);
-
- reg = ioread32(addr);
- if (set)
- reg |= bit;
- else
- reg &= ~bit;
-
- iowrite32(reg, addr);
+ scoped_guard(raw_spinlock_irqsave, &gpio->lock) {
+ reg = ioread32(addr);
+ if (set)
+ reg |= bit;
+ else
+ reg &= ~bit;
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ iowrite32(reg, addr);
+ }
/* Masking the IRQ */
if (!set)
@@ -339,7 +327,6 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
const struct aspeed_sgpio_bank *bank;
irq_flow_handler_t handler;
struct aspeed_sgpio *gpio;
- unsigned long flags;
void __iomem *addr;
int offset;
@@ -366,24 +353,22 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&gpio->lock, flags);
-
- addr = bank_reg(gpio, bank, reg_irq_type0);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type0;
- iowrite32(reg, addr);
-
- addr = bank_reg(gpio, bank, reg_irq_type1);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type1;
- iowrite32(reg, addr);
-
- addr = bank_reg(gpio, bank, reg_irq_type2);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type2;
- iowrite32(reg, addr);
-
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &gpio->lock) {
+ addr = bank_reg(gpio, bank, reg_irq_type0);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type0;
+ iowrite32(reg, addr);
+
+ addr = bank_reg(gpio, bank, reg_irq_type1);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type1;
+ iowrite32(reg, addr);
+
+ addr = bank_reg(gpio, bank, reg_irq_type2);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type2;
+ iowrite32(reg, addr);
+ }
irq_set_handler_locked(d, handler);
@@ -487,13 +472,12 @@ static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip,
unsigned int offset, bool enable)
{
struct aspeed_sgpio *gpio = gpiochip_get_data(chip);
- unsigned long flags;
void __iomem *reg;
u32 val;
reg = bank_reg(gpio, to_bank(offset), reg_tolerance);
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
val = readl(reg);
@@ -504,8 +488,6 @@ static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip,
writel(val, reg);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return 0;
}
@@ -614,7 +596,7 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
gpio->chip.request = NULL;
gpio->chip.free = NULL;
gpio->chip.get = aspeed_sgpio_get;
- gpio->chip.set = aspeed_sgpio_set;
+ gpio->chip.set_rv = aspeed_sgpio_set;
gpio->chip.set_config = aspeed_sgpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 40c1bd80f8b0..2d340a343a17 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -5,6 +5,7 @@
* Joel Stanley <joel@jms.id.au>
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/gpio/aspeed.h>
#include <linux/gpio/driver.h>
@@ -423,41 +424,38 @@ static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
gpio->config->llops->reg_bit_get(gpio, offset, reg_val);
}
-static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int val)
+static int aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
bool copro = false;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
+
copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
}
static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
bool copro = false;
if (!have_input(gpio, offset))
return -ENOTSUPP;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
copro = aspeed_gpio_copro_request(gpio, offset);
gpio->config->llops->reg_bit_set(gpio, offset, reg_dir, 0);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return 0;
}
@@ -465,13 +463,12 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
unsigned int offset, int val)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
bool copro = false;
if (!have_output(gpio, offset))
return -ENOTSUPP;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
@@ -479,7 +476,6 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
}
@@ -487,7 +483,6 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
u32 val;
if (!have_input(gpio, offset))
@@ -496,12 +491,10 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
if (!have_output(gpio, offset))
return GPIO_LINE_DIRECTION_IN;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
val = gpio->config->llops->reg_bit_get(gpio, offset, reg_dir);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return val ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
}
@@ -527,7 +520,6 @@ static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
static void aspeed_gpio_irq_ack(struct irq_data *d)
{
struct aspeed_gpio *gpio;
- unsigned long flags;
int rc, offset;
bool copro = false;
@@ -535,20 +527,19 @@ static void aspeed_gpio_irq_ack(struct irq_data *d)
if (rc)
return;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
+
copro = aspeed_gpio_copro_request(gpio, offset);
gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_status, 1);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
}
static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
{
struct aspeed_gpio *gpio;
- unsigned long flags;
int rc, offset;
bool copro = false;
@@ -560,14 +551,14 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
if (set)
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d));
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
+
copro = aspeed_gpio_copro_request(gpio, offset);
gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_enable, set);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
/* Masking the IRQ */
if (!set)
@@ -591,7 +582,6 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
u32 type2 = 0;
irq_flow_handler_t handler;
struct aspeed_gpio *gpio;
- unsigned long flags;
int rc, offset;
bool copro = false;
@@ -620,16 +610,19 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&gpio->lock, flags);
- copro = aspeed_gpio_copro_request(gpio, offset);
+ scoped_guard(raw_spinlock_irqsave, &gpio->lock) {
+ copro = aspeed_gpio_copro_request(gpio, offset);
- gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type0, type0);
- gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type1, type1);
- gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type2, type2);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type0,
+ type0);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type1,
+ type1);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type2,
+ type2);
- if (copro)
- aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
+ }
irq_set_handler_locked(d, handler);
@@ -686,17 +679,16 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
unsigned int offset, bool enable)
{
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
- unsigned long flags;
bool copro = false;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
+
copro = aspeed_gpio_copro_request(gpio, offset);
gpio->config->llops->reg_bit_set(gpio, offset, reg_tolerance, enable);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
}
@@ -798,7 +790,6 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
{
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
u32 requested_cycles;
- unsigned long flags;
int rc;
int i;
@@ -812,12 +803,12 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
return rc;
}
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
if (timer_allocation_registered(gpio, offset)) {
rc = unregister_allocated_timer(gpio, offset);
if (rc < 0)
- goto out;
+ return rc;
}
/* Try to find a timer already configured for the debounce period */
@@ -855,7 +846,7 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
* consistency.
*/
configure_timer(gpio, offset, 0);
- goto out;
+ return rc;
}
i = j;
@@ -863,34 +854,26 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
iowrite32(requested_cycles, gpio->base + gpio->config->debounce_timers_array[i]);
}
- if (WARN(i == 0, "Cannot register index of disabled timer\n")) {
- rc = -EINVAL;
- goto out;
- }
+ if (WARN(i == 0, "Cannot register index of disabled timer\n"))
+ return -EINVAL;
register_allocated_timer(gpio, offset, i);
configure_timer(gpio, offset, i);
-out:
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return rc;
}
static int disable_debounce(struct gpio_chip *chip, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
- unsigned long flags;
int rc;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
rc = unregister_allocated_timer(gpio, offset);
if (!rc)
configure_timer(gpio, offset, 0);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return rc;
}
@@ -961,7 +944,6 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
- unsigned long flags;
if (!aspeed_gpio_support_copro(gpio))
return -EOPNOTSUPP;
@@ -974,13 +956,12 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
return -EINVAL;
bindex = offset >> 3;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
/* Sanity check, this shouldn't happen */
- if (gpio->cf_copro_bankmap[bindex] == 0xff) {
- rc = -EIO;
- goto bail;
- }
+ if (gpio->cf_copro_bankmap[bindex] == 0xff)
+ return -EIO;
+
gpio->cf_copro_bankmap[bindex]++;
/* Switch command source */
@@ -994,8 +975,6 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
*dreg_offset = bank->rdata_reg;
if (bit)
*bit = GPIO_OFFSET(offset);
- bail:
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(aspeed_gpio_copro_grab_gpio);
@@ -1009,7 +988,6 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
struct gpio_chip *chip = gpiod_to_chip(desc);
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
- unsigned long flags;
if (!aspeed_gpio_support_copro(gpio))
return -EOPNOTSUPP;
@@ -1021,21 +999,19 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
return -EINVAL;
bindex = offset >> 3;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
/* Sanity check, this shouldn't happen */
- if (gpio->cf_copro_bankmap[bindex] == 0) {
- rc = -EIO;
- goto bail;
- }
+ if (gpio->cf_copro_bankmap[bindex] == 0)
+ return -EIO;
+
gpio->cf_copro_bankmap[bindex]--;
/* Switch command source */
if (gpio->cf_copro_bankmap[bindex] == 0)
aspeed_gpio_change_cmd_source(gpio, offset,
GPIO_CMDSRC_ARM);
- bail:
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+
return rc;
}
EXPORT_SYMBOL_GPL(aspeed_gpio_copro_release_gpio);
@@ -1376,7 +1352,7 @@ static int aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.request = aspeed_gpio_request;
gpio->chip.free = aspeed_gpio_free;
gpio->chip.get = aspeed_gpio_get;
- gpio->chip.set = aspeed_gpio_set;
+ gpio->chip.set_rv = aspeed_gpio_set;
gpio->chip.set_config = aspeed_gpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 64908f1a5e7f..17c287dc7471 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
@@ -100,7 +101,6 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
unsigned gpio)
{
u32 val;
- unsigned long flags;
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
@@ -112,13 +112,11 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
}
if (--bank->gpio_unlock_count[bit] == 0) {
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
val |= BIT(bit);
bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
-
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
}
@@ -126,19 +124,16 @@ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
unsigned gpio)
{
u32 val;
- unsigned long flags;
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
if (bank->gpio_unlock_count[bit] == 0) {
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
val &= ~BIT(bit);
bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
-
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
++bank->gpio_unlock_count[bit];
@@ -154,22 +149,23 @@ static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
return val ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
}
-static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
+static int bcm_kona_gpio_set(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val, reg_offset;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
/* this function only applies to output pin */
if (bcm_kona_gpio_get_dir(chip, gpio) == GPIO_LINE_DIRECTION_IN)
- goto out;
+ return 0;
reg_offset = value ? GPIO_OUT_SET(bank_id) : GPIO_OUT_CLEAR(bank_id);
@@ -177,8 +173,7 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
val |= BIT(bit);
writel(val, reg_base + reg_offset);
-out:
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ return 0;
}
static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
@@ -188,11 +183,11 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val, reg_offset;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
if (bcm_kona_gpio_get_dir(chip, gpio) == GPIO_LINE_DIRECTION_IN)
reg_offset = GPIO_IN_STATUS(bank_id);
@@ -202,8 +197,6 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
/* read the GPIO bank status */
val = readl(reg_base + reg_offset);
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
/* return the specified bit status */
return !!(val & BIT(bit));
}
@@ -228,19 +221,17 @@ static int bcm_kona_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
u32 val;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_IOTR_MASK;
val |= GPIO_GPCTR0_IOTR_CMD_INPUT;
writel(val, reg_base + GPIO_CONTROL(gpio));
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
return 0;
}
@@ -252,11 +243,11 @@ static int bcm_kona_gpio_direction_output(struct gpio_chip *chip,
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val, reg_offset;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_IOTR_MASK;
@@ -268,8 +259,6 @@ static int bcm_kona_gpio_direction_output(struct gpio_chip *chip,
val |= BIT(bit);
writel(val, reg_base + reg_offset);
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
return 0;
}
@@ -289,7 +278,6 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
u32 val, res;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
@@ -312,7 +300,7 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
}
/* spin lock for read-modify-write of the GPIO register */
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_DBR_MASK;
@@ -327,8 +315,6 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
writel(val, reg_base + GPIO_CONTROL(gpio));
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
return 0;
}
@@ -353,7 +339,7 @@ static const struct gpio_chip template_chip = {
.direction_input = bcm_kona_gpio_direction_input,
.get = bcm_kona_gpio_get,
.direction_output = bcm_kona_gpio_direction_output,
- .set = bcm_kona_gpio_set,
+ .set_rv = bcm_kona_gpio_set,
.set_config = bcm_kona_gpio_set_config,
.to_irq = bcm_kona_gpio_to_irq,
.base = 0,
@@ -367,17 +353,15 @@ static void bcm_kona_gpio_irq_ack(struct irq_data *d)
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val;
- unsigned long flags;
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_INT_STATUS(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_STATUS(bank_id));
-
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static void bcm_kona_gpio_irq_mask(struct irq_data *d)
@@ -388,19 +372,16 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val;
- unsigned long flags;
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_INT_MASK(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_MASK(bank_id));
gpiochip_disable_irq(&kona_gpio->gpio_chip, gpio);
-
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
@@ -411,19 +392,16 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val;
- unsigned long flags;
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_MSKCLR(bank_id));
gpiochip_enable_irq(&kona_gpio->gpio_chip, gpio);
-
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -433,7 +411,6 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
unsigned gpio = d->hwirq;
u32 lvl_type;
u32 val;
- unsigned long flags;
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
@@ -459,15 +436,13 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_ITR_MASK;
val |= lvl_type << GPIO_GPCTR0_ITR_SHIFT;
writel(val, reg_base + GPIO_CONTROL(gpio));
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
return 0;
}
diff --git a/drivers/gpio/gpio-bd71815.c b/drivers/gpio/gpio-bd71815.c
index 08ff2857256f..36701500925e 100644
--- a/drivers/gpio/gpio-bd71815.c
+++ b/drivers/gpio/gpio-bd71815.c
@@ -37,21 +37,18 @@ static int bd71815gpo_get(struct gpio_chip *chip, unsigned int offset)
return (val >> offset) & 1;
}
-static void bd71815gpo_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int bd71815gpo_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct bd71815_gpio *bd71815 = gpiochip_get_data(chip);
- int ret, bit;
+ int bit;
bit = BIT(offset);
if (value)
- ret = regmap_set_bits(bd71815->regmap, BD71815_REG_GPO, bit);
- else
- ret = regmap_clear_bits(bd71815->regmap, BD71815_REG_GPO, bit);
+ return regmap_set_bits(bd71815->regmap, BD71815_REG_GPO, bit);
- if (ret)
- dev_warn(bd71815->dev, "failed to toggle GPO\n");
+ return regmap_clear_bits(bd71815->regmap, BD71815_REG_GPO, bit);
}
static int bd71815_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
@@ -88,7 +85,7 @@ static const struct gpio_chip bd71815gpo_chip = {
.owner = THIS_MODULE,
.get = bd71815gpo_get,
.get_direction = bd71815gpo_direction_get,
- .set = bd71815gpo_set,
+ .set_rv = bd71815gpo_set,
.set_config = bd71815_gpio_set_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
index b2ccc320c7b5..4ba151e5cf25 100644
--- a/drivers/gpio/gpio-bd71828.c
+++ b/drivers/gpio/gpio-bd71828.c
@@ -16,10 +16,9 @@ struct bd71828_gpio {
struct gpio_chip gpio;
};
-static void bd71828_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int bd71828_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
- int ret;
struct bd71828_gpio *bdgpio = gpiochip_get_data(chip);
u8 val = (value) ? BD71828_GPIO_OUT_HI : BD71828_GPIO_OUT_LO;
@@ -28,12 +27,10 @@ static void bd71828_gpio_set(struct gpio_chip *chip, unsigned int offset,
* we are dealing with - then we are done
*/
if (offset == HALL_GPIO_OFFSET)
- return;
+ return 0;
- ret = regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
- BD71828_GPIO_OUT_MASK, val);
- if (ret)
- dev_err(bdgpio->dev, "Could not set gpio to %d\n", value);
+ return regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
+ BD71828_GPIO_OUT_MASK, val);
}
static int bd71828_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -112,7 +109,7 @@ static int bd71828_probe(struct platform_device *pdev)
bdgpio->gpio.set_config = bd71828_gpio_set_config;
bdgpio->gpio.can_sleep = true;
bdgpio->gpio.get = bd71828_gpio_get;
- bdgpio->gpio.set = bd71828_gpio_set;
+ bdgpio->gpio.set_rv = bd71828_gpio_set;
bdgpio->gpio.base = -1;
/*
diff --git a/drivers/gpio/gpio-bd9571mwv.c b/drivers/gpio/gpio-bd9571mwv.c
index 9a4d55f703bb..8df1361e3e84 100644
--- a/drivers/gpio/gpio-bd9571mwv.c
+++ b/drivers/gpio/gpio-bd9571mwv.c
@@ -72,13 +72,13 @@ static int bd9571mwv_gpio_get(struct gpio_chip *chip, unsigned int offset)
return val & BIT(offset);
}
-static void bd9571mwv_gpio_set(struct gpio_chip *chip, unsigned int offset,
+static int bd9571mwv_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_OUT,
- BIT(offset), value ? BIT(offset) : 0);
+ return regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_OUT,
+ BIT(offset), value ? BIT(offset) : 0);
}
static const struct gpio_chip template_chip = {
@@ -88,7 +88,7 @@ static const struct gpio_chip template_chip = {
.direction_input = bd9571mwv_gpio_direction_input,
.direction_output = bd9571mwv_gpio_direction_output,
.get = bd9571mwv_gpio_get,
- .set = bd9571mwv_gpio_set,
+ .set_rv = bd9571mwv_gpio_set,
.base = -1,
.ngpio = 2,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 491b529d25f8..ca3472977431 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -9,6 +9,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
enum gio_reg_index {
GIO_REG_ODEN = 0,
@@ -224,7 +225,7 @@ static int brcmstb_gpio_priv_set_wake(struct brcmstb_gpio_priv *priv,
ret = disable_irq_wake(priv->parent_wake_irq);
if (ret)
dev_err(&priv->pdev->dev, "failed to %s wake-up interrupt\n",
- enable ? "enable" : "disable");
+ str_enable_disable(enable));
return ret;
}
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index 7920cf256798..7c9e81fea37a 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -31,6 +31,7 @@
*/
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
@@ -69,10 +70,9 @@ MODULE_PARM_DESC(gpiobase, "The GPIO number base. -1 means dynamic, which is the
static int bt8xxgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
{
struct bt8xxgpio *bg = gpiochip_get_data(gpio);
- unsigned long flags;
u32 outen, data;
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
data = bgread(BT848_GPIO_DATA);
data &= ~(1 << nr);
@@ -82,20 +82,17 @@ static int bt8xxgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
outen &= ~(1 << nr);
bgwrite(outen, BT848_GPIO_OUT_EN);
- spin_unlock_irqrestore(&bg->lock, flags);
-
return 0;
}
static int bt8xxgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
{
struct bt8xxgpio *bg = gpiochip_get_data(gpio);
- unsigned long flags;
u32 val;
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
+
val = bgread(BT848_GPIO_DATA);
- spin_unlock_irqrestore(&bg->lock, flags);
return !!(val & (1 << nr));
}
@@ -104,10 +101,9 @@ static int bt8xxgpio_gpio_direction_output(struct gpio_chip *gpio,
unsigned nr, int val)
{
struct bt8xxgpio *bg = gpiochip_get_data(gpio);
- unsigned long flags;
u32 outen, data;
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
outen = bgread(BT848_GPIO_OUT_EN);
outen |= (1 << nr);
@@ -120,19 +116,15 @@ static int bt8xxgpio_gpio_direction_output(struct gpio_chip *gpio,
data &= ~(1 << nr);
bgwrite(data, BT848_GPIO_DATA);
- spin_unlock_irqrestore(&bg->lock, flags);
-
return 0;
}
-static void bt8xxgpio_gpio_set(struct gpio_chip *gpio,
- unsigned nr, int val)
+static int bt8xxgpio_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
struct bt8xxgpio *bg = gpiochip_get_data(gpio);
- unsigned long flags;
u32 data;
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
data = bgread(BT848_GPIO_DATA);
if (val)
@@ -141,7 +133,7 @@ static void bt8xxgpio_gpio_set(struct gpio_chip *gpio,
data &= ~(1 << nr);
bgwrite(data, BT848_GPIO_DATA);
- spin_unlock_irqrestore(&bg->lock, flags);
+ return 0;
}
static void bt8xxgpio_gpio_setup(struct bt8xxgpio *bg)
@@ -153,7 +145,7 @@ static void bt8xxgpio_gpio_setup(struct bt8xxgpio *bg)
c->direction_input = bt8xxgpio_gpio_direction_input;
c->get = bt8xxgpio_gpio_get;
c->direction_output = bt8xxgpio_gpio_direction_output;
- c->set = bt8xxgpio_gpio_set;
+ c->set_rv = bt8xxgpio_gpio_set;
c->dbg_show = NULL;
c->base = modparam_gpiobase;
c->ngpio = BT8XXGPIO_NR_GPIOS;
@@ -236,18 +228,15 @@ static void bt8xxgpio_remove(struct pci_dev *pdev)
static int bt8xxgpio_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct bt8xxgpio *bg = pci_get_drvdata(pdev);
- unsigned long flags;
-
- spin_lock_irqsave(&bg->lock, flags);
- bg->saved_outen = bgread(BT848_GPIO_OUT_EN);
- bg->saved_data = bgread(BT848_GPIO_DATA);
+ scoped_guard(spinlock_irqsave, &bg->lock) {
+ bg->saved_outen = bgread(BT848_GPIO_OUT_EN);
+ bg->saved_data = bgread(BT848_GPIO_DATA);
- bgwrite(0, BT848_INT_MASK);
- bgwrite(~0x0, BT848_INT_STAT);
- bgwrite(0x0, BT848_GPIO_OUT_EN);
-
- spin_unlock_irqrestore(&bg->lock, flags);
+ bgwrite(0, BT848_INT_MASK);
+ bgwrite(~0x0, BT848_INT_STAT);
+ bgwrite(0x0, BT848_GPIO_OUT_EN);
+ }
pci_save_state(pdev);
pci_disable_device(pdev);
@@ -259,7 +248,6 @@ static int bt8xxgpio_suspend(struct pci_dev *pdev, pm_message_t state)
static int bt8xxgpio_resume(struct pci_dev *pdev)
{
struct bt8xxgpio *bg = pci_get_drvdata(pdev);
- unsigned long flags;
int err;
pci_set_power_state(pdev, PCI_D0);
@@ -268,7 +256,7 @@ static int bt8xxgpio_resume(struct pci_dev *pdev)
return err;
pci_restore_state(pdev);
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
bgwrite(0, BT848_INT_MASK);
bgwrite(0, BT848_GPIO_DMA_CTL);
@@ -277,8 +265,6 @@ static int bt8xxgpio_resume(struct pci_dev *pdev)
bgwrite(bg->saved_data & bg->saved_outen,
BT848_GPIO_DATA);
- spin_unlock_irqrestore(&bg->lock, flags);
-
return 0;
}
#else
diff --git a/drivers/gpio/gpio-cgbc.c b/drivers/gpio/gpio-cgbc.c
index 9213faa11522..1495bec62456 100644
--- a/drivers/gpio/gpio-cgbc.c
+++ b/drivers/gpio/gpio-cgbc.c
@@ -51,8 +51,8 @@ static int cgbc_gpio_get(struct gpio_chip *chip, unsigned int offset)
return (int)(val & (u8)BIT(offset));
}
-static void __cgbc_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int __cgbc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
struct cgbc_device_data *cgbc = gpio->cgbc;
@@ -61,23 +61,23 @@ static void __cgbc_gpio_set(struct gpio_chip *chip,
ret = cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_GET, (offset > 7) ? 1 : 0, 0, &val);
if (ret)
- return;
+ return ret;
if (value)
val |= BIT(offset % 8);
else
val &= ~(BIT(offset % 8));
- cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_SET, (offset > 7) ? 1 : 0, val, &val);
+ return cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_SET, (offset > 7) ? 1 : 0, val, &val);
}
-static void cgbc_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int cgbc_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
- scoped_guard(mutex, &gpio->lock)
- __cgbc_gpio_set(chip, offset, value);
+ guard(mutex)(&gpio->lock);
+
+ return __cgbc_gpio_set(chip, offset, value);
}
static int cgbc_gpio_direction_set(struct gpio_chip *chip,
@@ -116,10 +116,14 @@ static int cgbc_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+ int ret;
guard(mutex)(&gpio->lock);
- __cgbc_gpio_set(chip, offset, value);
+ ret = __cgbc_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
+
return cgbc_gpio_direction_set(chip, offset, GPIO_LINE_DIRECTION_OUT);
}
@@ -167,7 +171,7 @@ static int cgbc_gpio_probe(struct platform_device *pdev)
chip->direction_output = cgbc_gpio_direction_output;
chip->get_direction = cgbc_gpio_get_direction;
chip->get = cgbc_gpio_get;
- chip->set = cgbc_gpio_set;
+ chip->set_rv = cgbc_gpio_set;
chip->ngpio = CGBC_GPIO_NGPIO;
ret = devm_mutex_init(dev, &gpio->lock);
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index 4968232f70f2..8b49f02c7896 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -27,7 +27,7 @@ struct creg_gpio {
const struct creg_layout *layout;
};
-static void creg_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int creg_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct creg_gpio *hcg = gpiochip_get_data(gc);
const struct creg_layout *layout = hcg->layout;
@@ -47,13 +47,13 @@ static void creg_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
reg |= (value << reg_shift);
writel(reg, hcg->regs);
spin_unlock_irqrestore(&hcg->lock, flags);
+
+ return 0;
}
static int creg_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
{
- creg_gpio_set(gc, offset, val);
-
- return 0;
+ return creg_gpio_set(gc, offset, val);
}
static int creg_gpio_validate_pg(struct device *dev, struct creg_gpio *hcg,
@@ -167,7 +167,7 @@ static int creg_gpio_probe(struct platform_device *pdev)
hcg->gc.label = dev_name(dev);
hcg->gc.base = -1;
hcg->gc.ngpio = ngpios;
- hcg->gc.set = creg_gpio_set;
+ hcg->gc.set_rv = creg_gpio_set;
hcg->gc.direction_output = creg_gpio_dir_out;
ret = devm_gpiochip_add_data(dev, &hcg->gc, hcg);
diff --git a/drivers/gpio/gpio-cros-ec.c b/drivers/gpio/gpio-cros-ec.c
index 0c09bb54dc0c..53cd5ff6247b 100644
--- a/drivers/gpio/gpio-cros-ec.c
+++ b/drivers/gpio/gpio-cros-ec.c
@@ -24,24 +24,21 @@
static const char cros_ec_gpio_prefix[] = "EC:";
/* Setting gpios is only supported when the system is unlocked */
-static void cros_ec_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int cros_ec_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
const char *name = gc->names[gpio] + strlen(cros_ec_gpio_prefix);
struct cros_ec_device *cros_ec = gpiochip_get_data(gc);
struct ec_params_gpio_set params = {
.val = val,
};
- int ret;
ssize_t copied;
copied = strscpy(params.name, name, sizeof(params.name));
if (copied < 0)
- return;
+ return copied;
- ret = cros_ec_cmd(cros_ec, 0, EC_CMD_GPIO_SET, &params,
- sizeof(params), NULL, 0);
- if (ret < 0)
- dev_err(gc->parent, "error setting gpio%d (%s) on EC: %d\n", gpio, name, ret);
+ return cros_ec_cmd(cros_ec, 0, EC_CMD_GPIO_SET, &params,
+ sizeof(params), NULL, 0);
}
static int cros_ec_gpio_get(struct gpio_chip *gc, unsigned int gpio)
@@ -191,7 +188,7 @@ static int cros_ec_gpio_probe(struct platform_device *pdev)
gc->can_sleep = true;
gc->label = dev_name(dev);
gc->base = -1;
- gc->set = cros_ec_gpio_set;
+ gc->set_rv = cros_ec_gpio_set;
gc->get = cros_ec_gpio_get;
gc->get_direction = cros_ec_gpio_get_direction;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 25db014494a4..8db7cca3a060 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#define CRYSTALCOVE_GPIO_NUM 16
@@ -167,18 +168,18 @@ static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
return val & 0x1;
}
-static void crystalcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+static int crystalcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
int reg = to_reg(gpio, CTRL_OUT);
if (reg < 0)
- return;
+ return 0;
if (value)
- regmap_update_bits(cg->regmap, reg, 1, 1);
- else
- regmap_update_bits(cg->regmap, reg, 1, 0);
+ return regmap_update_bits(cg->regmap, reg, 1, 1);
+
+ return regmap_update_bits(cg->regmap, reg, 1, 0);
}
static int crystalcove_irq_type(struct irq_data *data, unsigned int type)
@@ -317,7 +318,7 @@ static void crystalcove_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip
offset = gpio % 8;
seq_printf(s, " gpio-%-2d %s %s %s %s ctlo=%2x,%s %s %s\n",
gpio, ctlo & CTLO_DIR_OUT ? "out" : "in ",
- ctli & 0x1 ? "hi" : "lo",
+ str_hi_lo(ctli & 0x1),
ctli & CTLI_INTCNT_NE ? "fall" : " ",
ctli & CTLI_INTCNT_PE ? "rise" : " ",
ctlo,
@@ -348,7 +349,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
cg->chip.direction_input = crystalcove_gpio_dir_in;
cg->chip.direction_output = crystalcove_gpio_dir_out;
cg->chip.get = crystalcove_gpio_get;
- cg->chip.set = crystalcove_gpio_set;
+ cg->chip.set_rv = crystalcove_gpio_set;
cg->chip.base = -1;
cg->chip.ngpio = CRYSTALCOVE_VGPIO_NUM;
cg->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 6da3a247614a..143d1f4173a6 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -232,12 +232,14 @@ static int chip_gpio_get(struct gpio_chip *chip, unsigned offset)
return cs5535_gpio_isset(offset, GPIO_READ_BACK);
}
-static void chip_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int chip_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
if (val)
cs5535_gpio_set(offset, GPIO_OUTPUT_VAL);
else
cs5535_gpio_clear(offset, GPIO_OUTPUT_VAL);
+
+ return 0;
}
static int chip_direction_input(struct gpio_chip *c, unsigned offset)
@@ -294,7 +296,7 @@ static struct cs5535_gpio_chip cs5535_gpio_chip = {
.request = chip_gpio_request,
.get = chip_gpio_get,
- .set = chip_gpio_set,
+ .set_rv = chip_gpio_set,
.direction_input = chip_direction_input,
.direction_output = chip_direction_output,
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 6f3905f1b8f5..6482c5b267db 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -89,30 +89,20 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
}
}
-static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+static int da9052_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct da9052_gpio *gpio = gpiochip_get_data(gc);
- int ret;
- if (da9052_gpio_port_odd(offset)) {
- ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
- DA9052_GPIO_0_1_REG,
- DA9052_GPIO_ODD_PORT_MODE,
- value << DA9052_GPIO_ODD_SHIFT);
- if (ret != 0)
- dev_err(gpio->da9052->dev,
- "Failed to updated gpio odd reg,%d",
- ret);
- } else {
- ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
- DA9052_GPIO_0_1_REG,
- DA9052_GPIO_EVEN_PORT_MODE,
- value << DA9052_GPIO_EVEN_SHIFT);
- if (ret != 0)
- dev_err(gpio->da9052->dev,
- "Failed to updated gpio even reg,%d",
- ret);
- }
+ if (da9052_gpio_port_odd(offset))
+ return da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_ODD_PORT_MODE,
+ value << DA9052_GPIO_ODD_SHIFT);
+
+ return da9052_reg_update(gpio->da9052,
+ (offset >> 1) + DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_EVEN_PORT_MODE,
+ value << DA9052_GPIO_EVEN_SHIFT);
}
static int da9052_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
@@ -182,7 +172,7 @@ static const struct gpio_chip reference_gp = {
.label = "da9052-gpio",
.owner = THIS_MODULE,
.get = da9052_gpio_get,
- .set = da9052_gpio_set,
+ .set_rv = da9052_gpio_set,
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 49446a030f10..3d9d0c700100 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -59,14 +59,12 @@ static int da9055_gpio_get(struct gpio_chip *gc, unsigned offset)
}
-static void da9055_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+static int da9055_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct da9055_gpio *gpio = gpiochip_get_data(gc);
- da9055_reg_update(gpio->da9055,
- DA9055_REG_GPIO_MODE0_2,
- 1 << offset,
- value << offset);
+ return da9055_reg_update(gpio->da9055, DA9055_REG_GPIO_MODE0_2,
+ 1 << offset, value << offset);
}
static int da9055_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
@@ -102,9 +100,7 @@ static int da9055_gpio_direction_output(struct gpio_chip *gc,
if (ret < 0)
return ret;
- da9055_gpio_set(gc, offset, value);
-
- return 0;
+ return da9055_gpio_set(gc, offset, value);
}
static int da9055_gpio_to_irq(struct gpio_chip *gc, u32 offset)
@@ -120,7 +116,7 @@ static const struct gpio_chip reference_gp = {
.label = "da9055-gpio",
.owner = THIS_MODULE,
.get = da9055_gpio_get,
- .set = da9055_gpio_set,
+ .set_rv = da9055_gpio_set,
.direction_input = da9055_gpio_direction_input,
.direction_output = da9055_gpio_direction_output,
.to_irq = da9055_gpio_to_irq,
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 8c033e8cf3c9..63fc7888c1d4 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -139,7 +139,7 @@ static int davinci_gpio_get(struct gpio_chip *chip, unsigned offset)
/*
* Assuming the pin is muxed as a gpio output, set its output value.
*/
-static void
+static int
davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct davinci_gpio_controller *d = gpiochip_get_data(chip);
@@ -150,6 +150,8 @@ davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
writel_relaxed(__gpio_mask(offset),
value ? &g->set_data : &g->clr_data);
+
+ return 0;
}
static int davinci_gpio_probe(struct platform_device *pdev)
@@ -209,7 +211,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.direction_input = davinci_direction_in;
chips->chip.get = davinci_gpio_get;
chips->chip.direction_output = davinci_direction_out;
- chips->chip.set = davinci_gpio_set;
+ chips->chip.set_rv = davinci_gpio_set;
chips->chip.ngpio = ngpio;
chips->chip.base = -1;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 169f33c41c59..30a0522ae735 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -30,6 +30,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#define GRGPIO_MAX_NGPIO 32
@@ -438,7 +439,7 @@ static int grgpio_probe(struct platform_device *ofdev)
}
dev_info(dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
- priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off");
+ priv->regs, gc->base, gc->ngpio, str_on_off(priv->domain));
return 0;
}
diff --git a/drivers/gpio/gpio-latch.c b/drivers/gpio/gpio-latch.c
index d7c3b20c8482..3d0ff09284fb 100644
--- a/drivers/gpio/gpio-latch.c
+++ b/drivers/gpio/gpio-latch.c
@@ -38,12 +38,14 @@
* in the corresponding device tree properties.
*/
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/delay.h>
#include "gpiolib.h"
@@ -71,46 +73,46 @@ static int gpio_latch_get_direction(struct gpio_chip *gc, unsigned int offset)
return GPIO_LINE_DIRECTION_OUT;
}
-static void gpio_latch_set_unlocked(struct gpio_latch_priv *priv,
- void (*set)(struct gpio_desc *desc, int value),
- unsigned int offset, bool val)
+static int gpio_latch_set_unlocked(struct gpio_latch_priv *priv,
+ int (*set)(struct gpio_desc *desc, int value),
+ unsigned int offset, bool val)
{
- int latch = offset / priv->n_latched_gpios;
- int i;
+ int latch = offset / priv->n_latched_gpios, i, ret;
assign_bit(offset, priv->shadow, val);
- for (i = 0; i < priv->n_latched_gpios; i++)
- set(priv->latched_gpios->desc[i],
- test_bit(latch * priv->n_latched_gpios + i, priv->shadow));
+ for (i = 0; i < priv->n_latched_gpios; i++) {
+ ret = set(priv->latched_gpios->desc[i],
+ test_bit(latch * priv->n_latched_gpios + i,
+ priv->shadow));
+ if (ret)
+ return ret;
+ }
ndelay(priv->setup_duration_ns);
set(priv->clk_gpios->desc[latch], 1);
ndelay(priv->clock_duration_ns);
set(priv->clk_gpios->desc[latch], 0);
+
+ return 0;
}
-static void gpio_latch_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int gpio_latch_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct gpio_latch_priv *priv = gpiochip_get_data(gc);
- unsigned long flags;
-
- spin_lock_irqsave(&priv->spinlock, flags);
- gpio_latch_set_unlocked(priv, gpiod_set_value, offset, val);
+ guard(spinlock_irqsave)(&priv->spinlock);
- spin_unlock_irqrestore(&priv->spinlock, flags);
+ return gpio_latch_set_unlocked(priv, gpiod_set_value, offset, val);
}
-static void gpio_latch_set_can_sleep(struct gpio_chip *gc, unsigned int offset, int val)
+static int gpio_latch_set_can_sleep(struct gpio_chip *gc, unsigned int offset, int val)
{
struct gpio_latch_priv *priv = gpiochip_get_data(gc);
- mutex_lock(&priv->mutex);
-
- gpio_latch_set_unlocked(priv, gpiod_set_value_cansleep, offset, val);
+ guard(mutex)(&priv->mutex);
- mutex_unlock(&priv->mutex);
+ return gpio_latch_set_unlocked(priv, gpiod_set_value_cansleep, offset, val);
}
static bool gpio_latch_can_sleep(struct gpio_latch_priv *priv, unsigned int n_latches)
@@ -138,50 +140,52 @@ static bool gpio_latch_can_sleep(struct gpio_latch_priv *priv, unsigned int n_la
static int gpio_latch_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct gpio_latch_priv *priv;
unsigned int n_latches;
- struct device_node *np = pdev->dev.of_node;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->clk_gpios = devm_gpiod_get_array(&pdev->dev, "clk", GPIOD_OUT_LOW);
+ priv->clk_gpios = devm_gpiod_get_array(dev, "clk", GPIOD_OUT_LOW);
if (IS_ERR(priv->clk_gpios))
return PTR_ERR(priv->clk_gpios);
- priv->latched_gpios = devm_gpiod_get_array(&pdev->dev, "latched", GPIOD_OUT_LOW);
+ priv->latched_gpios = devm_gpiod_get_array(dev, "latched", GPIOD_OUT_LOW);
if (IS_ERR(priv->latched_gpios))
return PTR_ERR(priv->latched_gpios);
n_latches = priv->clk_gpios->ndescs;
priv->n_latched_gpios = priv->latched_gpios->ndescs;
- priv->shadow = devm_bitmap_zalloc(&pdev->dev, n_latches * priv->n_latched_gpios,
+ priv->shadow = devm_bitmap_zalloc(dev, n_latches * priv->n_latched_gpios,
GFP_KERNEL);
if (!priv->shadow)
return -ENOMEM;
if (gpio_latch_can_sleep(priv, n_latches)) {
priv->gc.can_sleep = true;
- priv->gc.set = gpio_latch_set_can_sleep;
+ priv->gc.set_rv = gpio_latch_set_can_sleep;
mutex_init(&priv->mutex);
} else {
priv->gc.can_sleep = false;
- priv->gc.set = gpio_latch_set;
+ priv->gc.set_rv = gpio_latch_set;
spin_lock_init(&priv->spinlock);
}
- of_property_read_u32(np, "setup-duration-ns", &priv->setup_duration_ns);
+ device_property_read_u32(dev, "setup-duration-ns",
+ &priv->setup_duration_ns);
if (priv->setup_duration_ns > DURATION_NS_MAX) {
- dev_warn(&pdev->dev, "setup-duration-ns too high, limit to %d\n",
+ dev_warn(dev, "setup-duration-ns too high, limit to %d\n",
DURATION_NS_MAX);
priv->setup_duration_ns = DURATION_NS_MAX;
}
- of_property_read_u32(np, "clock-duration-ns", &priv->clock_duration_ns);
+ device_property_read_u32(dev, "clock-duration-ns",
+ &priv->clock_duration_ns);
if (priv->clock_duration_ns > DURATION_NS_MAX) {
- dev_warn(&pdev->dev, "clock-duration-ns too high, limit to %d\n",
+ dev_warn(dev, "clock-duration-ns too high, limit to %d\n",
DURATION_NS_MAX);
priv->clock_duration_ns = DURATION_NS_MAX;
}
@@ -190,11 +194,11 @@ static int gpio_latch_probe(struct platform_device *pdev)
priv->gc.ngpio = n_latches * priv->n_latched_gpios;
priv->gc.owner = THIS_MODULE;
priv->gc.base = -1;
- priv->gc.parent = &pdev->dev;
+ priv->gc.parent = dev;
platform_set_drvdata(pdev, priv);
- return devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
+ return devm_gpiochip_add_data(dev, &priv->gc, priv);
}
static const struct of_device_id gpio_latch_ids[] = {
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index 7f4d78fd800e..a9a93036f08f 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -31,7 +31,6 @@ struct loongson_gpio_chip_data {
struct loongson_gpio_chip {
struct gpio_chip chip;
- struct fwnode_handle *fwnode;
spinlock_t lock;
void __iomem *reg_base;
const struct loongson_gpio_chip_data *chip_data;
@@ -138,7 +137,6 @@ static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgp
void __iomem *reg_base)
{
int ret;
- u32 ngpios;
lgpio->reg_base = reg_base;
if (lgpio->chip_data->mode == BIT_CTRL_MODE) {
@@ -159,8 +157,6 @@ static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgp
lgpio->chip.direction_output = loongson_gpio_direction_output;
lgpio->chip.set = loongson_gpio_set;
lgpio->chip.parent = dev;
- device_property_read_u32(dev, "ngpios", &ngpios);
- lgpio->chip.ngpio = ngpios;
spin_lock_init(&lgpio->lock);
}
@@ -258,6 +254,33 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls7a_data = {
.out_offset = 0x900,
};
+/* LS7A2000 chipset GPIO */
+static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data0 = {
+ .label = "ls7a2000_gpio",
+ .mode = BYTE_CTRL_MODE,
+ .conf_offset = 0x800,
+ .in_offset = 0xa00,
+ .out_offset = 0x900,
+};
+
+/* LS7A2000 ACPI GPIO */
+static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data1 = {
+ .label = "ls7a2000_gpio",
+ .mode = BYTE_CTRL_MODE,
+ .conf_offset = 0x4,
+ .in_offset = 0x8,
+ .out_offset = 0x0,
+};
+
+/* Loongson-3A6000 node GPIO */
+static const struct loongson_gpio_chip_data loongson_gpio_ls3a6000_data = {
+ .label = "ls3a6000_gpio",
+ .mode = BIT_CTRL_MODE,
+ .conf_offset = 0x0,
+ .in_offset = 0xc,
+ .out_offset = 0x8,
+};
+
static const struct of_device_id loongson_gpio_of_match[] = {
{
.compatible = "loongson,ls2k-gpio",
@@ -291,6 +314,18 @@ static const struct of_device_id loongson_gpio_of_match[] = {
.compatible = "loongson,ls7a-gpio",
.data = &loongson_gpio_ls7a_data,
},
+ {
+ .compatible = "loongson,ls7a2000-gpio1",
+ .data = &loongson_gpio_ls7a2000_data0,
+ },
+ {
+ .compatible = "loongson,ls7a2000-gpio2",
+ .data = &loongson_gpio_ls7a2000_data1,
+ },
+ {
+ .compatible = "loongson,ls3a6000-gpio",
+ .data = &loongson_gpio_ls3a6000_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, loongson_gpio_of_match);
@@ -316,6 +351,18 @@ static const struct acpi_device_id loongson_gpio_acpi_match[] = {
.id = "LOON000C",
.driver_data = (kernel_ulong_t)&loongson_gpio_ls2k2000_data2,
},
+ {
+ .id = "LOON000D",
+ .driver_data = (kernel_ulong_t)&loongson_gpio_ls7a2000_data0,
+ },
+ {
+ .id = "LOON000E",
+ .driver_data = (kernel_ulong_t)&loongson_gpio_ls7a2000_data1,
+ },
+ {
+ .id = "LOON000F",
+ .driver_data = (kernel_ulong_t)&loongson_gpio_ls3a6000_data,
+ },
{}
};
MODULE_DEVICE_TABLE(acpi, loongson_gpio_acpi_match);
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index bbacc714632b..fc0708ab5192 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -309,23 +309,21 @@ static int max3191x_set_config(struct gpio_chip *gpio, unsigned int offset,
return 0;
}
-static void gpiod_set_array_single_value_cansleep(unsigned int ndescs,
- struct gpio_desc **desc,
- struct gpio_array *info,
+static void max3191x_gpiod_multi_set_single_value(struct gpio_descs *descs,
int value)
{
unsigned long *values;
- values = bitmap_alloc(ndescs, GFP_KERNEL);
+ values = bitmap_alloc(descs->ndescs, GFP_KERNEL);
if (!values)
return;
if (value)
- bitmap_fill(values, ndescs);
+ bitmap_fill(values, descs->ndescs);
else
- bitmap_zero(values, ndescs);
+ bitmap_zero(values, descs->ndescs);
- gpiod_set_array_value_cansleep(ndescs, desc, info, values);
+ gpiod_multi_set_value_cansleep(descs, values);
bitmap_free(values);
}
@@ -396,10 +394,8 @@ static int max3191x_probe(struct spi_device *spi)
max3191x->mode = device_property_read_bool(dev, "maxim,modesel-8bit")
? STATUS_BYTE_DISABLED : STATUS_BYTE_ENABLED;
if (max3191x->modesel_pins)
- gpiod_set_array_single_value_cansleep(
- max3191x->modesel_pins->ndescs,
- max3191x->modesel_pins->desc,
- max3191x->modesel_pins->info, max3191x->mode);
+ max3191x_gpiod_multi_set_single_value(max3191x->modesel_pins,
+ max3191x->mode);
max3191x->ignore_uv = device_property_read_bool(dev,
"maxim,ignore-undervoltage");
diff --git a/drivers/gpio/gpio-max77650.c b/drivers/gpio/gpio-max77650.c
index 3075f2513c6f..a553e141059f 100644
--- a/drivers/gpio/gpio-max77650.c
+++ b/drivers/gpio/gpio-max77650.c
@@ -62,18 +62,16 @@ static int max77650_gpio_direction_output(struct gpio_chip *gc,
MAX77650_REG_CNFG_GPIO, mask, regval);
}
-static void max77650_gpio_set_value(struct gpio_chip *gc,
- unsigned int offset, int value)
+static int max77650_gpio_set_value(struct gpio_chip *gc,
+ unsigned int offset, int value)
{
struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
- int rv, regval;
+ int regval;
regval = value ? MAX77650_GPIO_OUT_HIGH : MAX77650_GPIO_OUT_LOW;
- rv = regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO,
- MAX77650_GPIO_OUTVAL_MASK, regval);
- if (rv)
- dev_err(gc->parent, "cannot set GPIO value: %d\n", rv);
+ return regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_OUTVAL_MASK, regval);
}
static int max77650_gpio_get_value(struct gpio_chip *gc,
@@ -168,7 +166,7 @@ static int max77650_gpio_probe(struct platform_device *pdev)
chip->gc.direction_input = max77650_gpio_direction_input;
chip->gc.direction_output = max77650_gpio_direction_output;
- chip->gc.set = max77650_gpio_set_value;
+ chip->gc.set_rv = max77650_gpio_set_value;
chip->gc.get = max77650_gpio_get_value;
chip->gc.get_direction = max77650_gpio_get_direction;
chip->gc.set_config = max77650_gpio_set_config;
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index d89e78f0ead3..4841e4ebe7a6 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -49,6 +49,7 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
#include <linux/log2.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -323,9 +324,20 @@ static void bgpio_set_multiple_with_clear(struct gpio_chip *gc,
gc->write_reg(gc->reg_clr, clear_mask);
}
+static int bgpio_dir_return(struct gpio_chip *gc, unsigned int gpio, bool dir_out)
+{
+ if (!gc->bgpio_pinctrl)
+ return 0;
+
+ if (dir_out)
+ return pinctrl_gpio_direction_output(gc, gpio);
+ else
+ return pinctrl_gpio_direction_input(gc, gpio);
+}
+
static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- return 0;
+ return bgpio_dir_return(gc, gpio, false);
}
static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
@@ -339,7 +351,7 @@ static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
{
gc->set(gc, gpio, val);
- return 0;
+ return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
@@ -357,7 +369,7 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
- return 0;
+ return bgpio_dir_return(gc, gpio, false);
}
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
@@ -403,7 +415,7 @@ static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
{
bgpio_dir_out(gc, gpio, val);
gc->set(gc, gpio, val);
- return 0;
+ return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
@@ -411,7 +423,7 @@ static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
{
gc->set(gc, gpio, val);
bgpio_dir_out(gc, gpio, val);
- return 0;
+ return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_setup_accessors(struct device *dev,
@@ -562,10 +574,13 @@ static int bgpio_setup_direction(struct gpio_chip *gc,
static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
{
- if (gpio_pin < chip->ngpio)
- return 0;
+ if (gpio_pin >= chip->ngpio)
+ return -EINVAL;
- return -EINVAL;
+ if (chip->bgpio_pinctrl)
+ return gpiochip_generic_request(chip, gpio_pin);
+
+ return 0;
}
/**
@@ -632,6 +647,12 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
if (ret)
return ret;
+ if (flags & BGPIOF_PINCTRL_BACKEND) {
+ gc->bgpio_pinctrl = true;
+ /* Currently this callback is only used for pincontrol */
+ gc->free = gpiochip_generic_free;
+ }
+
gc->bgpio_data = gc->read_reg(gc->reg_dat);
if (gc->set == bgpio_set_set &&
!(flags & BGPIOF_UNREADABLE_REG_SET))
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index d39c6618bade..266c0953d914 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -122,7 +122,7 @@ static void __gpio_mockup_set(struct gpio_mockup_chip *chip,
chip->lines[offset].value = !!value;
}
-static void gpio_mockup_set(struct gpio_chip *gc,
+static int gpio_mockup_set(struct gpio_chip *gc,
unsigned int offset, int value)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -130,10 +130,12 @@ static void gpio_mockup_set(struct gpio_chip *gc,
guard(mutex)(&chip->lock);
__gpio_mockup_set(chip, offset, value);
+
+ return 0;
}
-static void gpio_mockup_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int gpio_mockup_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
unsigned int bit;
@@ -142,6 +144,8 @@ static void gpio_mockup_set_multiple(struct gpio_chip *gc,
for_each_set_bit(bit, mask, gc->ngpio)
__gpio_mockup_set(chip, bit, test_bit(bit, bits));
+
+ return 0;
}
static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
@@ -445,9 +449,9 @@ static int gpio_mockup_probe(struct platform_device *pdev)
gc->owner = THIS_MODULE;
gc->parent = dev;
gc->get = gpio_mockup_get;
- gc->set = gpio_mockup_set;
+ gc->set_rv = gpio_mockup_set;
gc->get_multiple = gpio_mockup_get_multiple;
- gc->set_multiple = gpio_mockup_set_multiple;
+ gc->set_multiple_rv = gpio_mockup_set_multiple;
gc->direction_output = gpio_mockup_dirout;
gc->direction_input = gpio_mockup_dirin;
gc->get_direction = gpio_mockup_get_direction;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 5ffb332e9849..3604abcb6fec 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -49,6 +49,7 @@
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
/*
* GPIO unit register offsets.
@@ -297,12 +298,12 @@ static unsigned int mvebu_pwmreg_blink_off_duration(struct mvebu_pwm *mvpwm)
/*
* Functions implementing the gpio_chip methods
*/
-static void mvebu_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
+static int mvebu_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
- regmap_update_bits(mvchip->regs, GPIO_OUT_OFF + mvchip->offset,
- BIT(pin), value ? BIT(pin) : 0);
+ return regmap_update_bits(mvchip->regs, GPIO_OUT_OFF + mvchip->offset,
+ BIT(pin), value ? BIT(pin) : 0);
}
static int mvebu_gpio_get(struct gpio_chip *chip, unsigned int pin)
@@ -907,14 +908,14 @@ static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
if (is_out) {
seq_printf(s, " out %s %s\n",
- out & msk ? "hi" : "lo",
+ str_hi_lo(out & msk),
blink & msk ? "(blink )" : "");
continue;
}
seq_printf(s, " in %s (act %s) - IRQ",
- (data_in ^ in_pol) & msk ? "hi" : "lo",
- in_pol & msk ? "lo" : "hi");
+ str_hi_lo((data_in ^ in_pol) & msk),
+ str_lo_hi(in_pol & msk));
if (!((edg_msk | lvl_msk) & msk)) {
seq_puts(s, " disabled\n");
continue;
@@ -1172,7 +1173,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.direction_input = mvebu_gpio_direction_input;
mvchip->chip.get = mvebu_gpio_get;
mvchip->chip.direction_output = mvebu_gpio_direction_output;
- mvchip->chip.set = mvebu_gpio_set;
+ mvchip->chip.set_rv = mvebu_gpio_set;
if (have_irqs)
mvchip->chip.to_irq = mvebu_gpio_to_irq;
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 836f1cc760c2..fa19a44943fd 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -30,6 +30,7 @@
#include <linux/reset.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/gpio/gpio-nomadik.h>
@@ -430,7 +431,7 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s",
gpio,
label ?: "(none)",
- data_out ? "hi" : "lo",
+ str_hi_lo(data_out),
(mode < 0) ? "unknown" : modes[mode]);
} else {
int irq = chip->to_irq(chip, offset);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d63c1030e6ac..442435ded020 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -570,7 +570,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
return !!(reg_val & bit);
}
-static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
+static int pca953x_gpio_set_value(struct gpio_chip *gc, unsigned int off,
+ int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
@@ -578,7 +579,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
guard(mutex)(&chip->i2c_lock);
- regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+ return regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
}
static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
@@ -616,8 +617,8 @@ static int pca953x_gpio_get_multiple(struct gpio_chip *gc,
return 0;
}
-static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int pca953x_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
DECLARE_BITMAP(reg_val, MAX_LINE);
@@ -627,11 +628,11 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
ret = pca953x_read_regs(chip, chip->regs->output, reg_val);
if (ret)
- return;
+ return ret;
bitmap_replace(reg_val, reg_val, bits, mask, gc->ngpio);
- pca953x_write_regs(chip, chip->regs->output, reg_val);
+ return pca953x_write_regs(chip, chip->regs->output, reg_val);
}
static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
@@ -693,10 +694,10 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->direction_input = pca953x_gpio_direction_input;
gc->direction_output = pca953x_gpio_direction_output;
gc->get = pca953x_gpio_get_value;
- gc->set = pca953x_gpio_set_value;
+ gc->set_rv = pca953x_gpio_set_value;
gc->get_direction = pca953x_gpio_get_direction;
gc->get_multiple = pca953x_gpio_get_multiple;
- gc->set_multiple = pca953x_gpio_set_multiple;
+ gc->set_multiple_rv = pca953x_gpio_set_multiple;
gc->set_config = pca953x_gpio_set_config;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 7c57eaeb0afe..2e5f5d7f8865 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -5,6 +5,8 @@
* Copyright (C) 2007 David Brownell
*/
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -272,12 +274,11 @@ static const struct irq_chip pcf857x_irq_chip = {
static int pcf857x_probe(struct i2c_client *client)
{
+ struct gpio_desc *reset_gpio;
struct pcf857x *gpio;
unsigned int n_latch = 0;
int status;
- device_property_read_u32(&client->dev, "lines-initial-states", &n_latch);
-
/* Allocate, initialize, and register this gpio_chip. */
gpio = devm_kzalloc(&client->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
@@ -297,6 +298,30 @@ static int pcf857x_probe(struct i2c_client *client)
gpio->chip.direction_output = pcf857x_output;
gpio->chip.ngpio = (uintptr_t)i2c_get_match_data(client);
+ reset_gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(reset_gpio),
+ "failed to get reset GPIO\n");
+
+ if (reset_gpio) {
+ /* Reset already held with devm_gpiod_get_optional with GPIOD_OUT_HIGH */
+ fsleep(4); /* tw(rst) > 4us */
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ fsleep(100); /* trst > 100uS */
+
+ /*
+ * Performing a reset means "The PCA9670 registers and I2C-bus
+ * state machine will be held in their default state until the
+ * RESET input is once again HIGH".
+ *
+ * This is the same as writing 1 for all pins, which is the same
+ * as n_latch=0, the default value of the variable.
+ */
+ } else {
+ device_property_read_u32(&client->dev, "lines-initial-states",
+ &n_latch);
+ }
+
/* NOTE: the OnSemi jlc1562b is also largely compatible with
* these parts, notably for output. It has a low-resolution
* DAC instead of pin change IRQs; and its inputs can be the
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index a7a1cdf7ac66..18c965ee02c8 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -336,9 +336,6 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long flags;
bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
- if (chip->valid_mask)
- bankmask &= chip->valid_mask[0];
-
if (!bankmask)
return 0;
@@ -380,9 +377,6 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
u32 val, bankmask;
bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
- if (chip->valid_mask)
- bankmask &= chip->valid_mask[0];
-
if (!bankmask)
return;
@@ -487,10 +481,13 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
static void gpio_rcar_enable_inputs(struct gpio_rcar_priv *p)
{
u32 mask = GENMASK(p->gpio_chip.ngpio - 1, 0);
+ const unsigned long *valid_mask;
+
+ valid_mask = gpiochip_query_valid_mask(&p->gpio_chip);
/* Select "Input Enable" in INEN */
- if (p->gpio_chip.valid_mask)
- mask &= p->gpio_chip.valid_mask[0];
+ if (valid_mask)
+ mask &= valid_mask[0];
if (mask)
gpio_rcar_write(p, INEN, gpio_rcar_read(p, INEN) | mask);
}
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 05f8781b5204..87c4225784cf 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -17,6 +17,8 @@
#include <linux/gpio/driver.h>
#include <linux/gpio/regmap.h>
+#include "gpiolib.h"
+
struct gpio_regmap {
struct device *parent;
struct regmap *regmap;
@@ -81,33 +83,43 @@ static int gpio_regmap_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & mask);
}
-static void gpio_regmap_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int gpio_regmap_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct gpio_regmap *gpio = gpiochip_get_data(chip);
unsigned int base = gpio_regmap_addr(gpio->reg_set_base);
unsigned int reg, mask;
+ int ret;
+
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
- gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
if (val)
- regmap_update_bits(gpio->regmap, reg, mask, mask);
+ ret = regmap_update_bits(gpio->regmap, reg, mask, mask);
else
- regmap_update_bits(gpio->regmap, reg, mask, 0);
+ ret = regmap_update_bits(gpio->regmap, reg, mask, 0);
+
+ return ret;
}
-static void gpio_regmap_set_with_clear(struct gpio_chip *chip,
- unsigned int offset, int val)
+static int gpio_regmap_set_with_clear(struct gpio_chip *chip,
+ unsigned int offset, int val)
{
struct gpio_regmap *gpio = gpiochip_get_data(chip);
unsigned int base, reg, mask;
+ int ret;
if (val)
base = gpio_regmap_addr(gpio->reg_set_base);
else
base = gpio_regmap_addr(gpio->reg_clr_base);
- gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
- regmap_write(gpio->regmap, reg, mask);
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
+
+ return regmap_write(gpio->regmap, reg, mask);
}
static int gpio_regmap_get_direction(struct gpio_chip *chip,
@@ -210,9 +222,6 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
if (!config->parent)
return ERR_PTR(-EINVAL);
- if (!config->ngpio)
- return ERR_PTR(-EINVAL);
-
/* we need at least one */
if (!config->reg_dat_base && !config->reg_set_base)
return ERR_PTR(-EINVAL);
@@ -233,31 +242,16 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
gpio->parent = config->parent;
gpio->driver_data = config->drvdata;
gpio->regmap = config->regmap;
- gpio->ngpio_per_reg = config->ngpio_per_reg;
- gpio->reg_stride = config->reg_stride;
- gpio->reg_mask_xlate = config->reg_mask_xlate;
gpio->reg_dat_base = config->reg_dat_base;
gpio->reg_set_base = config->reg_set_base;
gpio->reg_clr_base = config->reg_clr_base;
gpio->reg_dir_in_base = config->reg_dir_in_base;
gpio->reg_dir_out_base = config->reg_dir_out_base;
- /* if not set, assume there is only one register */
- if (!gpio->ngpio_per_reg)
- gpio->ngpio_per_reg = config->ngpio;
-
- /* if not set, assume they are consecutive */
- if (!gpio->reg_stride)
- gpio->reg_stride = 1;
-
- if (!gpio->reg_mask_xlate)
- gpio->reg_mask_xlate = gpio_regmap_simple_xlate;
-
chip = &gpio->gpio_chip;
chip->parent = config->parent;
chip->fwnode = config->fwnode;
chip->base = -1;
- chip->ngpio = config->ngpio;
chip->names = config->names;
chip->label = config->label ?: dev_name(config->parent);
chip->can_sleep = regmap_might_sleep(config->regmap);
@@ -266,9 +260,9 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->free = gpiochip_generic_free;
chip->get = gpio_regmap_get;
if (gpio->reg_set_base && gpio->reg_clr_base)
- chip->set = gpio_regmap_set_with_clear;
+ chip->set_rv = gpio_regmap_set_with_clear;
else if (gpio->reg_set_base)
- chip->set = gpio_regmap_set;
+ chip->set_rv = gpio_regmap_set;
chip->get_direction = gpio_regmap_get_direction;
if (gpio->reg_dir_in_base || gpio->reg_dir_out_base) {
@@ -276,6 +270,27 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->direction_output = gpio_regmap_direction_output;
}
+ chip->ngpio = config->ngpio;
+ if (!chip->ngpio) {
+ ret = gpiochip_get_ngpios(chip, chip->parent);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ /* if not set, assume there is only one register */
+ gpio->ngpio_per_reg = config->ngpio_per_reg;
+ if (!gpio->ngpio_per_reg)
+ gpio->ngpio_per_reg = config->ngpio;
+
+ /* if not set, assume they are consecutive */
+ gpio->reg_stride = config->reg_stride;
+ if (!gpio->reg_stride)
+ gpio->reg_stride = 1;
+
+ gpio->reg_mask_xlate = config->reg_mask_xlate;
+ if (!gpio->reg_mask_xlate)
+ gpio->reg_mask_xlate = gpio_regmap_simple_xlate;
+
ret = gpiochip_add_data(chip, gpio);
if (ret < 0)
goto err_free_gpio;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index b6c230fab840..f638219a7c4f 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -10,7 +10,6 @@
#include <linux/array_size.h>
#include <linux/bitmap.h>
#include <linux/cleanup.h>
-#include <linux/completion.h>
#include <linux/configfs.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -37,6 +36,8 @@
#include <linux/sysfs.h>
#include <linux/types.h>
+#include "dev-sync-probe.h"
+
#define GPIO_SIM_NGPIO_MAX 1024
#define GPIO_SIM_PROP_MAX 4 /* Max 3 properties + sentinel. */
#define GPIO_SIM_NUM_ATTRS 3 /* value, pull and sentinel */
@@ -119,12 +120,14 @@ static int gpio_sim_get(struct gpio_chip *gc, unsigned int offset)
return !!test_bit(offset, chip->value_map);
}
-static void gpio_sim_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int gpio_sim_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
scoped_guard(mutex, &chip->lock)
__assign_bit(offset, chip->value_map, value);
+
+ return 0;
}
static int gpio_sim_get_multiple(struct gpio_chip *gc,
@@ -138,14 +141,16 @@ static int gpio_sim_get_multiple(struct gpio_chip *gc,
return 0;
}
-static void gpio_sim_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int gpio_sim_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
scoped_guard(mutex, &chip->lock)
bitmap_replace(chip->value_map, chip->value_map, bits, mask,
gc->ngpio);
+
+ return 0;
}
static int gpio_sim_direction_output(struct gpio_chip *gc,
@@ -481,9 +486,9 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
gc->parent = dev;
gc->fwnode = swnode;
gc->get = gpio_sim_get;
- gc->set = gpio_sim_set;
+ gc->set_rv = gpio_sim_set;
gc->get_multiple = gpio_sim_get_multiple;
- gc->set_multiple = gpio_sim_set_multiple;
+ gc->set_multiple_rv = gpio_sim_set_multiple;
gc->direction_output = gpio_sim_direction_output;
gc->direction_input = gpio_sim_direction_input;
gc->get_direction = gpio_sim_get_direction;
@@ -541,14 +546,9 @@ static struct platform_driver gpio_sim_driver = {
};
struct gpio_sim_device {
+ struct dev_sync_probe_data probe_data;
struct config_group group;
- /*
- * If pdev is NULL, the device is 'pending' (waiting for configuration).
- * Once the pointer is assigned, the device has been created and the
- * item is 'live'.
- */
- struct platform_device *pdev;
int id;
/*
@@ -562,46 +562,11 @@ struct gpio_sim_device {
*/
struct mutex lock;
- /*
- * This is used to synchronously wait for the driver's probe to complete
- * and notify the user-space about any errors.
- */
- struct notifier_block bus_notifier;
- struct completion probe_completion;
- bool driver_bound;
-
struct gpiod_hog *hogs;
struct list_head bank_list;
};
-/* This is called with dev->lock already taken. */
-static int gpio_sim_bus_notifier_call(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct gpio_sim_device *simdev = container_of(nb,
- struct gpio_sim_device,
- bus_notifier);
- struct device *dev = data;
- char devname[32];
-
- snprintf(devname, sizeof(devname), "gpio-sim.%u", simdev->id);
-
- if (!device_match_name(dev, devname))
- return NOTIFY_DONE;
-
- if (action == BUS_NOTIFY_BOUND_DRIVER)
- simdev->driver_bound = true;
- else if (action == BUS_NOTIFY_DRIVER_NOT_BOUND)
- simdev->driver_bound = false;
- else
- return NOTIFY_DONE;
-
- complete(&simdev->probe_completion);
-
- return NOTIFY_OK;
-}
-
static struct gpio_sim_device *to_gpio_sim_device(struct config_item *item)
{
struct config_group *group = to_config_group(item);
@@ -708,7 +673,7 @@ static bool gpio_sim_device_is_live(struct gpio_sim_device *dev)
{
lockdep_assert_held(&dev->lock);
- return !!dev->pdev;
+ return !!dev->probe_data.pdev;
}
static char *gpio_sim_strdup_trimmed(const char *str, size_t count)
@@ -730,7 +695,7 @@ static ssize_t gpio_sim_device_config_dev_name_show(struct config_item *item,
guard(mutex)(&dev->lock);
- pdev = dev->pdev;
+ pdev = dev->probe_data.pdev;
if (pdev)
return sprintf(page, "%s\n", dev_name(&pdev->dev));
@@ -939,7 +904,6 @@ static int gpio_sim_device_activate(struct gpio_sim_device *dev)
{
struct platform_device_info pdevinfo;
struct fwnode_handle *swnode;
- struct platform_device *pdev;
struct gpio_sim_bank *bank;
int ret;
@@ -981,31 +945,13 @@ static int gpio_sim_device_activate(struct gpio_sim_device *dev)
pdevinfo.fwnode = swnode;
pdevinfo.id = dev->id;
- reinit_completion(&dev->probe_completion);
- dev->driver_bound = false;
- bus_register_notifier(&platform_bus_type, &dev->bus_notifier);
-
- pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(pdev)) {
- bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
- gpio_sim_remove_hogs(dev);
- gpio_sim_remove_swnode_recursive(swnode);
- return PTR_ERR(pdev);
- }
-
- wait_for_completion(&dev->probe_completion);
- bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
-
- if (!dev->driver_bound) {
- /* Probe failed, check kernel log. */
- platform_device_unregister(pdev);
+ ret = dev_sync_probe_register(&dev->probe_data, &pdevinfo);
+ if (ret) {
gpio_sim_remove_hogs(dev);
gpio_sim_remove_swnode_recursive(swnode);
- return -ENXIO;
+ return ret;
}
- dev->pdev = pdev;
-
return 0;
}
@@ -1015,11 +961,10 @@ static void gpio_sim_device_deactivate(struct gpio_sim_device *dev)
lockdep_assert_held(&dev->lock);
- swnode = dev_fwnode(&dev->pdev->dev);
- platform_device_unregister(dev->pdev);
+ swnode = dev_fwnode(&dev->probe_data.pdev->dev);
+ dev_sync_probe_unregister(&dev->probe_data);
gpio_sim_remove_hogs(dev);
gpio_sim_remove_swnode_recursive(swnode);
- dev->pdev = NULL;
}
static void
@@ -1120,7 +1065,7 @@ static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item,
guard(mutex)(&dev->lock);
if (gpio_sim_device_is_live(dev))
- return device_for_each_child(&dev->pdev->dev, &ctx,
+ return device_for_each_child(&dev->probe_data.pdev->dev, &ctx,
gpio_sim_emit_chip_name);
return sprintf(page, "none\n");
@@ -1561,8 +1506,7 @@ gpio_sim_config_make_device_group(struct config_group *group, const char *name)
mutex_init(&dev->lock);
INIT_LIST_HEAD(&dev->bank_list);
- dev->bus_notifier.notifier_call = gpio_sim_bus_notifier_call;
- init_completion(&dev->probe_completion);
+ dev_sync_probe_init(&dev->probe_data);
return &no_free_ptr(dev)->group;
}
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 222279a9d82b..dce8ff322e47 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
/*
* These registers are modified under the irq bus lock and cached to avoid
@@ -282,8 +283,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
if (dir) {
seq_printf(s, " gpio-%-3d (%-20.20s) out %s",
- gpio, label ?: "(none)",
- val ? "hi" : "lo");
+ gpio, label ?: "(none)", str_hi_lo(val));
} else {
u8 edge_det_reg;
u8 rise_reg;
@@ -352,7 +352,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %13s %13s %25s %25s",
gpio, label ?: "(none)",
- val ? "hi" : "lo",
+ str_hi_lo(val),
edge_det_values[edge_det],
irqen ? "IRQ-enabled" : "IRQ-disabled",
rise_values[rise],
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index c36a9dbccd4d..4dad7ce0c4dc 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -36,7 +36,6 @@ struct vf610_gpio_port {
struct clk *clk_port;
struct clk *clk_gpio;
int irq;
- spinlock_t lock; /* protect gpio direction registers */
};
#define GPIO_PDOR 0x00
@@ -94,78 +93,6 @@ static inline u32 vf610_gpio_readl(void __iomem *reg)
return readl_relaxed(reg);
}
-static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(gc);
- u32 mask = BIT(gpio);
- unsigned long offset = GPIO_PDIR;
-
- if (port->sdata->have_paddr) {
- mask &= vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
- if (mask)
- offset = GPIO_PDOR;
- }
-
- return !!(vf610_gpio_readl(port->gpio_base + offset) & BIT(gpio));
-}
-
-static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(gc);
- u32 mask = BIT(gpio);
- unsigned long offset = val ? GPIO_PSOR : GPIO_PCOR;
-
- vf610_gpio_writel(mask, port->gpio_base + offset);
-}
-
-static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(chip);
- u32 mask = BIT(gpio);
- u32 val;
-
- if (port->sdata->have_paddr) {
- guard(spinlock_irqsave)(&port->lock);
- val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
- val &= ~mask;
- vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
- }
-
- return pinctrl_gpio_direction_input(chip, gpio);
-}
-
-static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
- int value)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(chip);
- u32 mask = BIT(gpio);
- u32 val;
-
- vf610_gpio_set(chip, gpio, value);
-
- if (port->sdata->have_paddr) {
- guard(spinlock_irqsave)(&port->lock);
- val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
- val |= mask;
- vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
- }
-
- return pinctrl_gpio_direction_output(chip, gpio);
-}
-
-static int vf610_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(gc);
- u32 mask = BIT(gpio);
-
- mask &= vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
-
- if (mask)
- return GPIO_LINE_DIRECTION_OUT;
-
- return GPIO_LINE_DIRECTION_IN;
-}
-
static void vf610_gpio_irq_handler(struct irq_desc *desc)
{
struct vf610_gpio_port *port =
@@ -291,6 +218,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
struct vf610_gpio_port *port;
struct gpio_chip *gc;
struct gpio_irq_chip *girq;
+ unsigned long flags;
int i;
int ret;
bool dual_base;
@@ -300,7 +228,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
port->sdata = device_get_match_data(dev);
- spin_lock_init(&port->lock);
dual_base = port->sdata->have_dual_base;
@@ -367,23 +294,25 @@ static int vf610_gpio_probe(struct platform_device *pdev)
}
gc = &port->gc;
- gc->parent = dev;
- gc->label = dev_name(dev);
- gc->ngpio = VF610_GPIO_PER_PORT;
- gc->base = -1;
-
- gc->request = gpiochip_generic_request;
- gc->free = gpiochip_generic_free;
- gc->direction_input = vf610_gpio_direction_input;
- gc->get = vf610_gpio_get;
- gc->direction_output = vf610_gpio_direction_output;
- gc->set = vf610_gpio_set;
+ flags = BGPIOF_PINCTRL_BACKEND;
/*
- * only IP has Port Data Direction Register(PDDR) can
- * support get direction
+ * We only read the output register for current value on output
+ * lines if the direction register is available so we can switch
+ * direction.
*/
if (port->sdata->have_paddr)
- gc->get_direction = vf610_gpio_get_direction;
+ flags |= BGPIOF_READ_OUTPUT_REG_SET;
+ ret = bgpio_init(gc, dev, 4,
+ port->gpio_base + GPIO_PDIR,
+ port->gpio_base + GPIO_PDOR,
+ NULL,
+ port->sdata->have_paddr ? port->gpio_base + GPIO_PDDR : NULL,
+ NULL,
+ flags);
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to init generic GPIO\n");
+ gc->label = dev_name(dev);
+ gc->base = -1;
/* Mask all GPIO interrupts */
for (i = 0; i < gc->ngpio; i++)
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index 93544ff62513..ac39da17a29b 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -350,19 +350,6 @@ static void virtio_gpio_irq_bus_sync_unlock(struct irq_data *d)
mutex_unlock(&vgpio->irq_lock);
}
-static struct irq_chip vgpio_irq_chip = {
- .name = "virtio-gpio",
- .irq_enable = virtio_gpio_irq_enable,
- .irq_disable = virtio_gpio_irq_disable,
- .irq_mask = virtio_gpio_irq_mask,
- .irq_unmask = virtio_gpio_irq_unmask,
- .irq_set_type = virtio_gpio_irq_set_type,
-
- /* These are required to implement irqchip for slow busses */
- .irq_bus_lock = virtio_gpio_irq_bus_lock,
- .irq_bus_sync_unlock = virtio_gpio_irq_bus_sync_unlock,
-};
-
static bool ignore_irq(struct virtio_gpio *vgpio, int gpio,
struct vgpio_irq_line *irq_line)
{
@@ -542,6 +529,7 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
struct virtio_gpio_config config;
struct device *dev = &vdev->dev;
struct virtio_gpio *vgpio;
+ struct irq_chip *gpio_irq_chip;
u32 gpio_names_size;
u16 ngpio;
int ret, i;
@@ -591,13 +579,26 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
if (!vgpio->irq_lines)
return -ENOMEM;
+ gpio_irq_chip = devm_kzalloc(dev, sizeof(*gpio_irq_chip), GFP_KERNEL);
+ if (!gpio_irq_chip)
+ return -ENOMEM;
+
+ gpio_irq_chip->name = dev_name(dev);
+ gpio_irq_chip->irq_enable = virtio_gpio_irq_enable;
+ gpio_irq_chip->irq_disable = virtio_gpio_irq_disable;
+ gpio_irq_chip->irq_mask = virtio_gpio_irq_mask;
+ gpio_irq_chip->irq_unmask = virtio_gpio_irq_unmask;
+ gpio_irq_chip->irq_set_type = virtio_gpio_irq_set_type;
+ gpio_irq_chip->irq_bus_lock = virtio_gpio_irq_bus_lock;
+ gpio_irq_chip->irq_bus_sync_unlock = virtio_gpio_irq_bus_sync_unlock;
+
/* The event comes from the outside so no parent handler */
vgpio->gc.irq.parent_handler = NULL;
vgpio->gc.irq.num_parents = 0;
vgpio->gc.irq.parents = NULL;
vgpio->gc.irq.default_type = IRQ_TYPE_NONE;
vgpio->gc.irq.handler = handle_level_irq;
- vgpio->gc.irq.chip = &vgpio_irq_chip;
+ vgpio->gc.irq.chip = gpio_irq_chip;
for (i = 0; i < ngpio; i++) {
vgpio->irq_lines[i].type = VIRTIO_GPIO_IRQ_TYPE_NONE;
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index e89f299f2140..13407fd4f0eb 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -11,7 +11,6 @@
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/cleanup.h>
-#include <linux/completion.h>
#include <linux/configfs.h>
#include <linux/debugfs.h>
#include <linux/device.h>
@@ -37,6 +36,8 @@
#include <linux/string_helpers.h>
#include <linux/types.h>
+#include "dev-sync-probe.h"
+
#define GPIO_VIRTUSER_NAME_BUF_LEN 32
static DEFINE_IDA(gpio_virtuser_ida);
@@ -973,49 +974,17 @@ static struct platform_driver gpio_virtuser_driver = {
};
struct gpio_virtuser_device {
+ struct dev_sync_probe_data probe_data;
struct config_group group;
- struct platform_device *pdev;
int id;
struct mutex lock;
- struct notifier_block bus_notifier;
- struct completion probe_completion;
- bool driver_bound;
-
struct gpiod_lookup_table *lookup_table;
struct list_head lookup_list;
};
-static int gpio_virtuser_bus_notifier_call(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct gpio_virtuser_device *vdev;
- struct device *dev = data;
- char devname[32];
-
- vdev = container_of(nb, struct gpio_virtuser_device, bus_notifier);
- snprintf(devname, sizeof(devname), "gpio-virtuser.%d", vdev->id);
-
- if (!device_match_name(dev, devname))
- return NOTIFY_DONE;
-
- switch (action) {
- case BUS_NOTIFY_BOUND_DRIVER:
- vdev->driver_bound = true;
- break;
- case BUS_NOTIFY_DRIVER_NOT_BOUND:
- vdev->driver_bound = false;
- break;
- default:
- return NOTIFY_DONE;
- }
-
- complete(&vdev->probe_completion);
- return NOTIFY_OK;
-}
-
static struct gpio_virtuser_device *
to_gpio_virtuser_device(struct config_item *item)
{
@@ -1029,7 +998,7 @@ gpio_virtuser_device_is_live(struct gpio_virtuser_device *dev)
{
lockdep_assert_held(&dev->lock);
- return !!dev->pdev;
+ return !!dev->probe_data.pdev;
}
struct gpio_virtuser_lookup {
@@ -1369,7 +1338,7 @@ gpio_virtuser_device_config_dev_name_show(struct config_item *item,
guard(mutex)(&dev->lock);
- pdev = dev->pdev;
+ pdev = dev->probe_data.pdev;
if (pdev)
return sprintf(page, "%s\n", dev_name(&pdev->dev));
@@ -1478,7 +1447,6 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
{
struct platform_device_info pdevinfo;
struct fwnode_handle *swnode;
- struct platform_device *pdev;
int ret;
lockdep_assert_held(&dev->lock);
@@ -1499,31 +1467,12 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
if (ret)
goto err_remove_swnode;
- reinit_completion(&dev->probe_completion);
- dev->driver_bound = false;
- bus_register_notifier(&platform_bus_type, &dev->bus_notifier);
-
- pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(pdev)) {
- ret = PTR_ERR(pdev);
- bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
+ ret = dev_sync_probe_register(&dev->probe_data, &pdevinfo);
+ if (ret)
goto err_remove_lookup_table;
- }
-
- wait_for_completion(&dev->probe_completion);
- bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
-
- if (!dev->driver_bound) {
- ret = -ENXIO;
- goto err_unregister_pdev;
- }
-
- dev->pdev = pdev;
return 0;
-err_unregister_pdev:
- platform_device_unregister(pdev);
err_remove_lookup_table:
gpio_virtuser_remove_lookup_table(dev);
err_remove_swnode:
@@ -1539,11 +1488,10 @@ gpio_virtuser_device_deactivate(struct gpio_virtuser_device *dev)
lockdep_assert_held(&dev->lock);
- swnode = dev_fwnode(&dev->pdev->dev);
- platform_device_unregister(dev->pdev);
+ swnode = dev_fwnode(&dev->probe_data.pdev->dev);
+ dev_sync_probe_unregister(&dev->probe_data);
gpio_virtuser_remove_lookup_table(dev);
fwnode_remove_software_node(swnode);
- dev->pdev = NULL;
}
static void
@@ -1772,8 +1720,7 @@ gpio_virtuser_config_make_device_group(struct config_group *group,
&gpio_virtuser_device_config_group_type);
mutex_init(&dev->lock);
INIT_LIST_HEAD(&dev->lookup_list);
- dev->bus_notifier.notifier_call = gpio_virtuser_bus_notifier_call;
- init_completion(&dev->probe_completion);
+ dev_sync_probe_init(&dev->probe_data);
return &no_free_ptr(dev)->group;
}
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 94ca9d03c094..1ec24f6f9300 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
/*
* Whiskey Cove PMIC has 13 physical GPIO pins divided into 3 banks:
@@ -393,7 +394,7 @@ static void wcove_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_printf(s, " gpio-%-2d %s %s %s %s ctlo=%2x,%s %s\n",
gpio, ctlo & CTLO_DIR_OUT ? "out" : "in ",
- ctli & 0x1 ? "hi" : "lo",
+ str_hi_lo(ctli & 0x1),
ctli & CTLI_INTCNT_NE ? "fall" : " ",
ctli & CTLI_INTCNT_PE ? "rise" : " ",
ctlo,
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index f7d5120ff8f1..61bb83a1e8ae 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -16,6 +16,7 @@
#include <linux/mfd/core.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
@@ -234,7 +235,7 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_printf(s, " %s %s %s %s%s\n"
" %s%s (0x%4x)\n",
reg & WM831X_GPN_DIR ? "in" : "out",
- wm831x_gpio_get(chip, i) ? "high" : "low",
+ str_high_low(wm831x_gpio_get(chip, i)),
pull,
powerdomain,
reg & WM831X_GPN_POL ? "" : " inverted",
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 792d94c49077..c58a7e1349b4 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -45,8 +45,7 @@
* struct xgpio_instance - Stores information about GPIO device
* @gc: GPIO chip
* @regs: register block
- * @hw_map: GPIO pin mapping on hardware side
- * @sw_map: GPIO pin mapping on software side
+ * @map: GPIO pin mapping on hardware side
* @state: GPIO write state shadow register
* @last_irq_read: GPIO read state register from last interrupt
* @dir: GPIO direction shadow register
@@ -60,8 +59,7 @@
struct xgpio_instance {
struct gpio_chip gc;
void __iomem *regs;
- DECLARE_BITMAP(hw_map, 64);
- DECLARE_BITMAP(sw_map, 64);
+ DECLARE_BITMAP(map, 64);
DECLARE_BITMAP(state, 64);
DECLARE_BITMAP(last_irq_read, 64);
DECLARE_BITMAP(dir, 64);
@@ -73,33 +71,6 @@ struct xgpio_instance {
struct clk *clk;
};
-static inline int xgpio_from_bit(struct xgpio_instance *chip, int bit)
-{
- return bitmap_bitremap(bit, chip->hw_map, chip->sw_map, 64);
-}
-
-static inline int xgpio_to_bit(struct xgpio_instance *chip, int gpio)
-{
- return bitmap_bitremap(gpio, chip->sw_map, chip->hw_map, 64);
-}
-
-static inline u32 xgpio_get_value32(const unsigned long *map, int bit)
-{
- const size_t index = BIT_WORD(bit);
- const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
-
- return (map[index] >> offset) & 0xFFFFFFFFul;
-}
-
-static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v)
-{
- const size_t index = BIT_WORD(bit);
- const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
-
- map[index] &= ~(0xFFFFFFFFul << offset);
- map[index] |= (unsigned long)v << offset;
-}
-
static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
{
switch (ch) {
@@ -115,20 +86,23 @@ static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
static void xgpio_read_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
{
void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+ unsigned long value = xgpio_readreg(addr);
- xgpio_set_value32(a, bit, xgpio_readreg(addr));
+ bitmap_write(a, value, round_down(bit, 32), 32);
}
static void xgpio_write_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
{
void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+ unsigned long value = bitmap_read(a, round_down(bit, 32), 32);
- xgpio_writereg(addr, xgpio_get_value32(a, bit));
+ xgpio_writereg(addr, value);
}
static void xgpio_read_ch_all(struct xgpio_instance *chip, int reg, unsigned long *a)
{
- int bit, lastbit = xgpio_to_bit(chip, chip->gc.ngpio - 1);
+ unsigned long lastbit = find_nth_bit(chip->map, 64, chip->gc.ngpio - 1);
+ int bit;
for (bit = 0; bit <= lastbit ; bit += 32)
xgpio_read_ch(chip, reg, bit, a);
@@ -136,7 +110,8 @@ static void xgpio_read_ch_all(struct xgpio_instance *chip, int reg, unsigned lon
static void xgpio_write_ch_all(struct xgpio_instance *chip, int reg, unsigned long *a)
{
- int bit, lastbit = xgpio_to_bit(chip, chip->gc.ngpio - 1);
+ unsigned long lastbit = find_nth_bit(chip->map, 64, chip->gc.ngpio - 1);
+ int bit;
for (bit = 0; bit <= lastbit ; bit += 32)
xgpio_write_ch(chip, reg, bit, a);
@@ -156,7 +131,7 @@ static void xgpio_write_ch_all(struct xgpio_instance *chip, int reg, unsigned lo
static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int bit = xgpio_to_bit(chip, gpio);
+ unsigned long bit = find_nth_bit(chip->map, 64, gpio);
DECLARE_BITMAP(state, 64);
xgpio_read_ch(chip, XGPIO_DATA_OFFSET, bit, state);
@@ -177,7 +152,7 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int bit = xgpio_to_bit(chip, gpio);
+ unsigned long bit = find_nth_bit(chip->map, 64, gpio);
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
@@ -207,8 +182,8 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64);
- bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64);
+ bitmap_scatter(hw_mask, mask, chip->map, 64);
+ bitmap_scatter(hw_bits, bits, chip->map, 64);
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
@@ -234,7 +209,7 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int bit = xgpio_to_bit(chip, gpio);
+ unsigned long bit = find_nth_bit(chip->map, 64, gpio);
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
@@ -263,7 +238,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int bit = xgpio_to_bit(chip, gpio);
+ unsigned long bit = find_nth_bit(chip->map, 64, gpio);
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
@@ -395,14 +370,15 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
unsigned long flags;
struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
int irq_offset = irqd_to_hwirq(irq_data);
- int bit = xgpio_to_bit(chip, irq_offset);
+ unsigned long bit = find_nth_bit(chip->map, 64, irq_offset), enable;
u32 mask = BIT(bit / 32), temp;
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__clear_bit(bit, chip->enable);
- if (xgpio_get_value32(chip->enable, bit) == 0) {
+ enable = bitmap_read(chip->enable, round_down(bit, 32), 32);
+ if (enable == 0) {
/* Disable per channel interrupt */
temp = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET);
temp &= ~mask;
@@ -422,17 +398,15 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
unsigned long flags;
struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
int irq_offset = irqd_to_hwirq(irq_data);
- int bit = xgpio_to_bit(chip, irq_offset);
- u32 old_enable = xgpio_get_value32(chip->enable, bit);
+ unsigned long bit = find_nth_bit(chip->map, 64, irq_offset), enable;
u32 mask = BIT(bit / 32), val;
gpiochip_enable_irq(&chip->gc, irq_offset);
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
- __set_bit(bit, chip->enable);
-
- if (old_enable == 0) {
+ enable = bitmap_read(chip->enable, round_down(bit, 32), 32);
+ if (enable == 0) {
/* Clear any existing per-channel interrupts */
val = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET);
val &= mask;
@@ -447,6 +421,8 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
}
+ __set_bit(bit, chip->enable);
+
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
@@ -462,7 +438,7 @@ static int xgpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
{
struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
int irq_offset = irqd_to_hwirq(irq_data);
- int bit = xgpio_to_bit(chip, irq_offset);
+ unsigned long bit = find_nth_bit(chip->map, 64, irq_offset);
/*
* The Xilinx GPIO hardware provides a single interrupt status
@@ -502,10 +478,10 @@ static void xgpio_irqhandler(struct irq_desc *desc)
struct irq_chip *irqchip = irq_desc_get_chip(desc);
DECLARE_BITMAP(rising, 64);
DECLARE_BITMAP(falling, 64);
- DECLARE_BITMAP(all, 64);
+ DECLARE_BITMAP(hw, 64);
+ DECLARE_BITMAP(sw, 64);
int irq_offset;
u32 status;
- u32 bit;
status = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET);
xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, status);
@@ -514,29 +490,28 @@ static void xgpio_irqhandler(struct irq_desc *desc)
raw_spin_lock(&chip->gpio_lock);
- xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all);
+ xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, hw);
bitmap_complement(rising, chip->last_irq_read, 64);
- bitmap_and(rising, rising, all, 64);
+ bitmap_and(rising, rising, hw, 64);
bitmap_and(rising, rising, chip->enable, 64);
bitmap_and(rising, rising, chip->rising_edge, 64);
- bitmap_complement(falling, all, 64);
+ bitmap_complement(falling, hw, 64);
bitmap_and(falling, falling, chip->last_irq_read, 64);
bitmap_and(falling, falling, chip->enable, 64);
bitmap_and(falling, falling, chip->falling_edge, 64);
- bitmap_copy(chip->last_irq_read, all, 64);
- bitmap_or(all, rising, falling, 64);
+ bitmap_copy(chip->last_irq_read, hw, 64);
+ bitmap_or(hw, rising, falling, 64);
raw_spin_unlock(&chip->gpio_lock);
dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling);
- for_each_set_bit(bit, all, 64) {
- irq_offset = xgpio_from_bit(chip, bit);
+ bitmap_gather(sw, hw, chip->map, 64);
+ for_each_set_bit(irq_offset, sw, 64)
generic_handle_domain_irq(gc->irq.domain, irq_offset);
- }
chained_irq_exit(irqchip, desc);
}
@@ -613,17 +588,14 @@ static int xgpio_probe(struct platform_device *pdev)
if (width[1] > 32)
return -EINVAL;
- /* Setup software pin mapping */
- bitmap_set(chip->sw_map, 0, width[0] + width[1]);
-
/* Setup hardware pin mapping */
- bitmap_set(chip->hw_map, 0, width[0]);
- bitmap_set(chip->hw_map, 32, width[1]);
+ bitmap_set(chip->map, 0, width[0]);
+ bitmap_set(chip->map, 32, width[1]);
raw_spin_lock_init(&chip->gpio_lock);
chip->gc.base = -1;
- chip->gc.ngpio = bitmap_weight(chip->hw_map, 64);
+ chip->gc.ngpio = bitmap_weight(chip->map, 64);
chip->gc.parent = dev;
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index dc2710c21c50..842cf875bb92 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -13,6 +13,7 @@
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/spi/spi.h>
+#include <linux/string_choices.h>
#include <linux/regmap.h>
/* XRA1403 registers */
@@ -140,7 +141,7 @@ static void xra1403_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_printf(s, " gpio-%-3d (%-12s) %s %s\n",
chip->base + i, label,
(gcr & BIT(i)) ? "in" : "out",
- (gsr & BIT(i)) ? "hi" : "lo");
+ str_hi_lo(gsr & BIT(i)));
}
}
#else
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index f7746c57ba76..69caa35c58df 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -994,7 +994,7 @@ __acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int
desc = acpi_get_gpiod_from_data(fwnode,
propname, idx, info);
if (PTR_ERR(desc) == -EPROBE_DEFER)
- return ERR_CAST(desc);
+ return desc;
if (!IS_ERR(desc))
return desc;
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 40f76a90fd7d..107d75558b5a 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -2729,8 +2729,9 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
cdev->gdev = gpio_device_get(gdev);
cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
- ret = atomic_notifier_chain_register(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ ret = raw_notifier_chain_register(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
if (ret)
goto out_free_bitmap;
@@ -2754,8 +2755,9 @@ out_unregister_device_notifier:
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
out_unregister_line_notifier:
- atomic_notifier_chain_unregister(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ raw_notifier_chain_unregister(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
out_free_bitmap:
gpio_device_put(gdev);
bitmap_free(cdev->watched_lines);
@@ -2779,8 +2781,9 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file)
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
- atomic_notifier_chain_unregister(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ raw_notifier_chain_unregister(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
bitmap_free(cdev->watched_lines);
gpio_device_put(gdev);
kfree(cdev);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 2e537ee979f3..eb667f8f1ead 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -203,6 +203,15 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
*/
{ "qi,lb60", "rb-gpios", true },
#endif
+#if IS_ENABLED(CONFIG_IEEE802154_CA8210)
+ /*
+ * According to the datasheet, the NRST pin 27 is an active-low
+ * signal. However, the device tree schema and admittedly
+ * the out-of-tree implementations have been used for a long
+ * time incorrectly by describing reset GPIO as active-high.
+ */
+ { "cascoda,ca8210", "reset-gpio", false },
+#endif
#if IS_ENABLED(CONFIG_PCI_LANTIQ)
/*
* According to the PCI specification, the RST# pin is an
@@ -929,7 +938,7 @@ struct notifier_block gpio_of_notifier = {
#endif /* CONFIG_OF_DYNAMIC */
/**
- * of_gpio_simple_xlate - translate gpiospec to the GPIO number and flags
+ * of_gpio_twocell_xlate - translate twocell gpiospec to the GPIO number and flags
* @gc: pointer to the gpio_chip structure
* @gpiospec: GPIO specifier as found in the device tree
* @flags: a flags pointer to fill in
@@ -941,9 +950,9 @@ struct notifier_block gpio_of_notifier = {
* Returns:
* GPIO number (>= 0) on success, negative errno on failure.
*/
-static int of_gpio_simple_xlate(struct gpio_chip *gc,
- const struct of_phandle_args *gpiospec,
- u32 *flags)
+static int of_gpio_twocell_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
{
/*
* We're discouraging gpio_cells < 2, since that way you'll have to
@@ -951,7 +960,7 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc,
* number and the flags from a single gpio cell -- this is possible,
* but not recommended).
*/
- if (gc->of_gpio_n_cells < 2) {
+ if (gc->of_gpio_n_cells != 2) {
WARN_ON(1);
return -EINVAL;
}
@@ -968,6 +977,49 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc,
return gpiospec->args[0];
}
+/**
+ * of_gpio_threecell_xlate - translate threecell gpiospec to the GPIO number and flags
+ * @gc: pointer to the gpio_chip structure
+ * @gpiospec: GPIO specifier as found in the device tree
+ * @flags: a flags pointer to fill in
+ *
+ * This is simple translation function, suitable for the most 1:n mapped
+ * GPIO chips, i.e. several GPIO chip instances from one device tree node.
+ * In this case the following binding is implied:
+ *
+ * foo-gpios = <&gpio instance offset flags>;
+ *
+ * Returns:
+ * GPIO number (>= 0) on success, negative errno on failure.
+ */
+static int of_gpio_threecell_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ if (gc->of_gpio_n_cells != 3) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(gpiospec->args_count != 3))
+ return -EINVAL;
+
+ /*
+ * Check chip instance number, the driver responds with true if
+ * this is the chip we are looking for.
+ */
+ if (!gc->of_node_instance_match(gc, gpiospec->args[0]))
+ return -EINVAL;
+
+ if (gpiospec->args[1] >= gc->ngpio)
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpiospec->args[2];
+
+ return gpiospec->args[1];
+}
+
#if IS_ENABLED(CONFIG_OF_GPIO_MM_GPIOCHIP)
#include <linux/gpio/legacy-of-mm-gpiochip.h>
/**
@@ -1057,6 +1109,9 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
const char *name;
static const char group_names_propname[] = "gpio-ranges-group-names";
bool has_group_names;
+ int offset; /* Offset of the first GPIO line on the chip */
+ int pin; /* Pin base number in the range */
+ int count; /* Number of pins/GPIO lines to map */
np = dev_of_node(&chip->gpiodev->dev);
if (!np)
@@ -1065,7 +1120,15 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
has_group_names = of_property_present(np, group_names_propname);
for (;; index++) {
- ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
+ /*
+ * Ordinary phandles contain 2-3 cells:
+ * gpios = <&gpio [instance] offset flags>;
+ * Ranges always contain one more cell:
+ * gpio-ranges <&pinctrl [gpio_instance] gpio_offet pin_offet count>;
+ * This is why we parse chip->of_gpio_n_cells + 1 cells
+ */
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges",
+ chip->of_gpio_n_cells + 1,
index, &pinspec);
if (ret)
break;
@@ -1075,13 +1138,33 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
if (!pctldev)
return -EPROBE_DEFER;
+ if (chip->of_gpio_n_cells == 3) {
+ /* First cell is the gpiochip instance number */
+ offset = pinspec.args[1];
+ pin = pinspec.args[2];
+ count = pinspec.args[3];
+ } else {
+ offset = pinspec.args[0];
+ pin = pinspec.args[1];
+ count = pinspec.args[2];
+ }
+
+ /*
+ * With multiple GPIO chips per node, check that this chip is the
+ * right instance.
+ */
+ if (chip->of_node_instance_match &&
+ (chip->of_gpio_n_cells == 3) &&
+ !chip->of_node_instance_match(chip, pinspec.args[0]))
+ continue;
+
/* Ignore ranges outside of this GPIO chip */
- if (pinspec.args[0] >= (chip->offset + chip->ngpio))
+ if (offset >= (chip->offset + chip->ngpio))
continue;
- if (pinspec.args[0] + pinspec.args[2] <= chip->offset)
+ if (offset + count <= chip->offset)
continue;
- if (pinspec.args[2]) {
+ if (count) {
/* npins != 0: linear range */
if (has_group_names) {
of_property_read_string_index(np,
@@ -1095,27 +1178,27 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
}
/* Trim the range to fit this GPIO chip */
- if (chip->offset > pinspec.args[0]) {
- trim = chip->offset - pinspec.args[0];
- pinspec.args[2] -= trim;
- pinspec.args[1] += trim;
- pinspec.args[0] = 0;
+ if (chip->offset > offset) {
+ trim = chip->offset - offset;
+ count -= trim;
+ pin += trim;
+ offset = 0;
} else {
- pinspec.args[0] -= chip->offset;
+ offset -= chip->offset;
}
- if ((pinspec.args[0] + pinspec.args[2]) > chip->ngpio)
- pinspec.args[2] = chip->ngpio - pinspec.args[0];
+ if ((offset + count) > chip->ngpio)
+ count = chip->ngpio - offset;
ret = gpiochip_add_pin_range(chip,
pinctrl_dev_get_devname(pctldev),
- pinspec.args[0],
- pinspec.args[1],
- pinspec.args[2]);
+ offset,
+ pin,
+ count);
if (ret)
return ret;
} else {
/* npins == 0: special range */
- if (pinspec.args[1]) {
+ if (pin) {
pr_err("%pOF: Illegal gpio-range format.\n",
np);
break;
@@ -1140,7 +1223,7 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
}
ret = gpiochip_add_pingroup_range(chip, pctldev,
- pinspec.args[0], name);
+ offset, name);
if (ret)
return ret;
}
@@ -1163,8 +1246,14 @@ int of_gpiochip_add(struct gpio_chip *chip)
return 0;
if (!chip->of_xlate) {
- chip->of_gpio_n_cells = 2;
- chip->of_xlate = of_gpio_simple_xlate;
+ if (chip->of_gpio_n_cells == 3) {
+ if (!chip->of_node_instance_match)
+ return -EINVAL;
+ chip->of_xlate = of_gpio_threecell_xlate;
+ } else {
+ chip->of_gpio_n_cells = 2;
+ chip->of_xlate = of_gpio_twocell_xlate;
+ }
}
if (chip->of_gpio_n_cells > MAX_PHANDLE_ARGS)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 8741600af7ef..b8197502a5ac 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/srcu.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/gpio.h>
#include <linux/gpio/driver.h>
@@ -341,6 +342,25 @@ static int gpiochip_find_base_unlocked(u16 ngpio)
}
}
+static int gpiochip_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ if (WARN_ON(!gc->get_direction))
+ return -EOPNOTSUPP;
+
+ ret = gc->get_direction(gc, offset);
+ if (ret < 0)
+ return ret;
+
+ if (ret != GPIO_LINE_DIRECTION_OUT && ret != GPIO_LINE_DIRECTION_IN)
+ ret = -EBADE;
+
+ return ret;
+}
+
/**
* gpiod_get_direction - return the current direction of a GPIO
* @desc: GPIO to get the direction of
@@ -381,7 +401,7 @@ int gpiod_get_direction(struct gpio_desc *desc)
if (!guard.gc->get_direction)
return -ENOTSUPP;
- ret = guard.gc->get_direction(guard.gc, offset);
+ ret = gpiochip_get_direction(guard.gc, offset);
if (ret < 0)
return ret;
@@ -652,7 +672,7 @@ static int gpiochip_apply_reserved_ranges(struct gpio_chip *gc)
if (start >= gc->ngpio || start + count > gc->ngpio)
continue;
- bitmap_clear(gc->valid_mask, start, count);
+ bitmap_clear(gc->gpiodev->valid_mask, start, count);
}
kfree(ranges);
@@ -666,8 +686,8 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gc)
if (!(gpiochip_count_reserved_ranges(gc) || gc->init_valid_mask))
return 0;
- gc->valid_mask = gpiochip_allocate_mask(gc);
- if (!gc->valid_mask)
+ gc->gpiodev->valid_mask = gpiochip_allocate_mask(gc);
+ if (!gc->gpiodev->valid_mask)
return -ENOMEM;
ret = gpiochip_apply_reserved_ranges(gc);
@@ -676,7 +696,7 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gc)
if (gc->init_valid_mask)
return gc->init_valid_mask(gc,
- gc->valid_mask,
+ gc->gpiodev->valid_mask,
gc->ngpio);
return 0;
@@ -684,7 +704,7 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gc)
static void gpiochip_free_valid_mask(struct gpio_chip *gc)
{
- gpiochip_free_mask(&gc->valid_mask);
+ gpiochip_free_mask(&gc->gpiodev->valid_mask);
}
static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
@@ -703,13 +723,29 @@ static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
return 0;
}
+/**
+ * gpiochip_query_valid_mask - return the GPIO validity information
+ * @gc: gpio chip which validity information is queried
+ *
+ * Returns: bitmap representing valid GPIOs or NULL if all GPIOs are valid
+ *
+ * Some GPIO chips may support configurations where some of the pins aren't
+ * available. These chips can have valid_mask set to represent the valid
+ * GPIOs. This function can be used to retrieve this information.
+ */
+const unsigned long *gpiochip_query_valid_mask(const struct gpio_chip *gc)
+{
+ return gc->gpiodev->valid_mask;
+}
+EXPORT_SYMBOL_GPL(gpiochip_query_valid_mask);
+
bool gpiochip_line_is_valid(const struct gpio_chip *gc,
unsigned int offset)
{
/* No mask means all valid */
- if (likely(!gc->valid_mask))
+ if (likely(!gc->gpiodev->valid_mask))
return true;
- return test_bit(offset, gc->valid_mask);
+ return test_bit(offset, gc->gpiodev->valid_mask);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
@@ -882,13 +918,29 @@ void *gpiochip_get_data(struct gpio_chip *gc)
}
EXPORT_SYMBOL_GPL(gpiochip_get_data);
+/*
+ * If the calling driver provides the specific firmware node,
+ * use it. Otherwise use the one from the parent device, if any.
+ */
+static struct fwnode_handle *gpiochip_choose_fwnode(struct gpio_chip *gc)
+{
+ if (gc->fwnode)
+ return gc->fwnode;
+
+ if (gc->parent)
+ return dev_fwnode(gc->parent);
+
+ return NULL;
+}
+
int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev)
{
+ struct fwnode_handle *fwnode = gpiochip_choose_fwnode(gc);
u32 ngpios = gc->ngpio;
int ret;
if (ngpios == 0) {
- ret = device_property_read_u32(dev, "ngpios", &ngpios);
+ ret = fwnode_property_read_u32(fwnode, "ngpios", &ngpios);
if (ret == -ENODATA)
/*
* -ENODATA means that there is no property found and
@@ -925,6 +977,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
int base = 0;
int ret = 0;
+ /* Only allow one set() and one set_multiple(). */
+ if ((gc->set && gc->set_rv) ||
+ (gc->set_multiple && gc->set_multiple_rv))
+ return -EINVAL;
+
/*
* First: allocate and populate the internal stat container, and
* set up the struct device.
@@ -941,14 +998,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gc->gpiodev = gdev;
gpiochip_set_data(gc, data);
- /*
- * If the calling driver did not initialize firmware node,
- * do it here using the parent device, if any.
- */
- if (gc->fwnode)
- device_set_node(&gdev->dev, gc->fwnode);
- else if (gc->parent)
- device_set_node(&gdev->dev, dev_fwnode(gc->parent));
+ device_set_node(&gdev->dev, gpiochip_choose_fwnode(gc));
gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
if (gdev->id < 0) {
@@ -1025,7 +1075,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
}
}
- ATOMIC_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
+ rwlock_init(&gdev->line_state_lock);
+ RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
ret = init_srcu_struct(&gdev->srcu);
@@ -1056,24 +1107,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
desc->gdev = gdev;
- if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index)) {
- ret = gc->get_direction(gc, desc_index);
- if (ret < 0)
- /*
- * FIXME: Bail-out here once all GPIO drivers
- * are updated to not return errors in
- * situations that can be considered normal
- * operation.
- */
- dev_warn(&gdev->dev,
- "%s: get_direction failed: %d\n",
- __func__, ret);
-
- assign_bit(FLAG_IS_OUT, &desc->flags, !ret);
- } else {
+ /*
+ * We would typically want to use gpiochip_get_direction() here
+ * but we must not check the return value and bail-out as pin
+ * controllers can have pins configured to alternate functions
+ * and return -EINVAL. Also: there's no need to take the SRCU
+ * lock here.
+ */
+ if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index))
+ assign_bit(FLAG_IS_OUT, &desc->flags,
+ !gc->get_direction(gc, desc_index));
+ else
assign_bit(FLAG_IS_OUT,
&desc->flags, !gc->direction_input);
- }
}
ret = of_gpiochip_add(gc);
@@ -2329,16 +2375,18 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
if (test_and_set_bit(FLAG_REQUESTED, &desc->flags))
return -EBUSY;
+ offset = gpio_chip_hwgpio(desc);
+ if (!gpiochip_line_is_valid(guard.gc, offset))
+ return -EINVAL;
+
/* NOTE: gpio_request() can be called in early boot,
* before IRQs are enabled, for non-sleeping (SOC) GPIOs.
*/
if (guard.gc->request) {
- offset = gpio_chip_hwgpio(desc);
- if (gpiochip_line_is_valid(guard.gc, offset))
- ret = guard.gc->request(guard.gc, offset);
- else
- ret = -EINVAL;
+ ret = guard.gc->request(guard.gc, offset);
+ if (ret > 0)
+ ret = -EBADE;
if (ret)
goto out_clear_bit;
}
@@ -2581,6 +2629,9 @@ int gpio_do_set_config(struct gpio_desc *desc, unsigned long config)
return -ENOTSUPP;
ret = guard.gc->set_config(guard.gc, gpio_chip_hwgpio(desc), config);
+ if (ret > 0)
+ ret = -EBADE;
+
#ifdef CONFIG_GPIO_CDEV
/*
* Special case - if we're setting debounce period, we need to store
@@ -2686,6 +2737,39 @@ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
return ret;
}
+static int gpiochip_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ if (WARN_ON(!gc->direction_input))
+ return -EOPNOTSUPP;
+
+ ret = gc->direction_input(gc, offset);
+ if (ret > 0)
+ ret = -EBADE;
+
+ return ret;
+}
+
+static int gpiochip_direction_output(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ if (WARN_ON(!gc->direction_output))
+ return -EOPNOTSUPP;
+
+ ret = gc->direction_output(gc, offset, value);
+ if (ret > 0)
+ ret = -EBADE;
+
+ return ret;
+}
+
/**
* gpiod_direction_input - set the GPIO direction to input
* @desc: GPIO to set to input
@@ -2737,11 +2821,10 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
* assume we are in input mode after this.
*/
if (guard.gc->direction_input) {
- ret = guard.gc->direction_input(guard.gc,
- gpio_chip_hwgpio(desc));
+ ret = gpiochip_direction_input(guard.gc,
+ gpio_chip_hwgpio(desc));
} else if (guard.gc->get_direction) {
- dir = guard.gc->get_direction(guard.gc,
- gpio_chip_hwgpio(desc));
+ dir = gpiochip_get_direction(guard.gc, gpio_chip_hwgpio(desc));
if (dir < 0)
return dir;
@@ -2762,6 +2845,27 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
return ret;
}
+static int gpiochip_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ if (WARN_ON(unlikely(!gc->set && !gc->set_rv)))
+ return -EOPNOTSUPP;
+
+ if (gc->set_rv) {
+ ret = gc->set_rv(gc, offset, value);
+ if (ret > 0)
+ ret = -EBADE;
+
+ return ret;
+ }
+
+ gc->set(gc, offset, value);
+ return 0;
+}
+
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
{
int val = !!value, ret = 0, dir;
@@ -2783,13 +2887,13 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
}
if (guard.gc->direction_output) {
- ret = guard.gc->direction_output(guard.gc,
- gpio_chip_hwgpio(desc), val);
+ ret = gpiochip_direction_output(guard.gc,
+ gpio_chip_hwgpio(desc), val);
} else {
/* Check that we are in output mode if we can */
if (guard.gc->get_direction) {
- dir = guard.gc->get_direction(guard.gc,
- gpio_chip_hwgpio(desc));
+ dir = gpiochip_get_direction(guard.gc,
+ gpio_chip_hwgpio(desc));
if (dir < 0)
return dir;
@@ -2804,7 +2908,9 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
* If we can't actively set the direction, we are some
* output-only chip, so just drive the output as desired.
*/
- guard.gc->set(guard.gc, gpio_chip_hwgpio(desc), val);
+ ret = gpiochip_set(guard.gc, gpio_chip_hwgpio(desc), val);
+ if (ret)
+ return ret;
}
if (!ret)
@@ -2894,19 +3000,15 @@ int gpiod_direction_output_nonotify(struct gpio_desc *desc, int value)
if (!ret)
goto set_output_value;
/* Emulate open drain by not actively driving the line high */
- if (value) {
- ret = gpiod_direction_input_nonotify(desc);
+ if (value)
goto set_output_flag;
- }
} else if (test_bit(FLAG_OPEN_SOURCE, &flags)) {
ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE);
if (!ret)
goto set_output_value;
/* Emulate open source by not actively driving the line low */
- if (!value) {
- ret = gpiod_direction_input_nonotify(desc);
+ if (!value)
goto set_output_flag;
- }
} else {
gpio_set_config(desc, PIN_CONFIG_DRIVE_PUSH_PULL);
}
@@ -2918,17 +3020,20 @@ set_output_value:
return gpiod_direction_output_raw_commit(desc, value);
set_output_flag:
+ ret = gpiod_direction_input_nonotify(desc);
+ if (ret)
+ return ret;
/*
* When emulating open-source or open-drain functionalities by not
* actively driving the line (setting mode to input) we still need to
* set the IS_OUT flag or otherwise we won't be able to set the line
* value anymore.
*/
- if (ret == 0)
- set_bit(FLAG_IS_OUT, &desc->flags);
- return ret;
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ return 0;
}
+#if IS_ENABLED(CONFIG_HTE)
/**
* gpiod_enable_hw_timestamp_ns - Enable hardware timestamp in nanoseconds.
*
@@ -2994,6 +3099,7 @@ int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
return ret;
}
EXPORT_SYMBOL_GPL(gpiod_disable_hw_timestamp_ns);
+#endif /* CONFIG_HTE */
/**
* gpiod_set_config - sets @config for a GPIO
@@ -3100,9 +3206,23 @@ void gpiod_toggle_active_low(struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
+static int gpiochip_get(struct gpio_chip *gc, unsigned int offset)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ /* Make sure this is called after checking for gc->get(). */
+ ret = gc->get(gc, offset);
+ if (ret > 1)
+ ret = -EBADE;
+
+ return ret;
+}
+
static int gpio_chip_get_value(struct gpio_chip *gc, const struct gpio_desc *desc)
{
- return gc->get ? gc->get(gc, gpio_chip_hwgpio(desc)) : -EIO;
+ return gc->get ? gpiochip_get(gc, gpio_chip_hwgpio(desc)) : -EIO;
}
/* I/O calls are only valid after configuration completed; the relevant
@@ -3151,15 +3271,21 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
static int gpio_chip_get_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
+ int ret;
+
lockdep_assert_held(&gc->gpiodev->srcu);
- if (gc->get_multiple)
- return gc->get_multiple(gc, mask, bits);
+ if (gc->get_multiple) {
+ ret = gc->get_multiple(gc, mask, bits);
+ if (ret > 0)
+ return -EBADE;
+ }
+
if (gc->get) {
int i, value;
for_each_set_bit(i, mask, gc->ngpio) {
- value = gc->get(gc, i);
+ value = gpiochip_get(gc, i);
if (value < 0)
return value;
__assign_bit(i, bits, value);
@@ -3412,18 +3538,18 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value);
* @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherwise it will set to LOW.
*/
-static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
+static int gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
{
int ret = 0, offset = gpio_chip_hwgpio(desc);
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
- return;
+ return -ENODEV;
if (value) {
- ret = guard.gc->direction_input(guard.gc, offset);
+ ret = gpiochip_direction_input(guard.gc, offset);
} else {
- ret = guard.gc->direction_output(guard.gc, offset, 0);
+ ret = gpiochip_direction_output(guard.gc, offset, 0);
if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
}
@@ -3432,6 +3558,8 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
gpiod_err(desc,
"%s: Error in set_value for open drain err %d\n",
__func__, ret);
+
+ return ret;
}
/*
@@ -3439,36 +3567,38 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
* @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherwise it will set to LOW.
*/
-static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
+static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
{
int ret = 0, offset = gpio_chip_hwgpio(desc);
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
- return;
+ return -ENODEV;
if (value) {
- ret = guard.gc->direction_output(guard.gc, offset, 1);
+ ret = gpiochip_direction_output(guard.gc, offset, 1);
if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
} else {
- ret = guard.gc->direction_input(guard.gc, offset);
+ ret = gpiochip_direction_input(guard.gc, offset);
}
trace_gpio_direction(desc_to_gpio(desc), !value, ret);
if (ret < 0)
gpiod_err(desc,
"%s: Error in set_value for open source err %d\n",
__func__, ret);
+
+ return ret;
}
-static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
+static int gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
- return;
+ return -ENODEV;
trace_gpio_value(desc_to_gpio(desc), 0, value);
- guard.gc->set(guard.gc, gpio_chip_hwgpio(desc), value);
+ return gpiochip_set(guard.gc, gpio_chip_hwgpio(desc), value);
}
/*
@@ -3480,21 +3610,38 @@ static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
* defines which outputs are to be changed
* @bits: bit value array; one bit per output; BITS_PER_LONG bits per word
* defines the values the outputs specified by mask are to be set to
+ *
+ * Returns: 0 on success, negative error number on failure.
*/
-static void gpio_chip_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int gpiochip_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
+ unsigned int i;
+ int ret;
+
lockdep_assert_held(&gc->gpiodev->srcu);
+ if (gc->set_multiple_rv) {
+ ret = gc->set_multiple_rv(gc, mask, bits);
+ if (ret > 0)
+ ret = -EBADE;
+
+ return ret;
+ }
+
if (gc->set_multiple) {
gc->set_multiple(gc, mask, bits);
- } else {
- unsigned int i;
+ return 0;
+ }
- /* set outputs if the corresponding mask bit is set */
- for_each_set_bit(i, mask, gc->ngpio)
- gc->set(gc, i, test_bit(i, bits));
+ /* set outputs if the corresponding mask bit is set */
+ for_each_set_bit(i, mask, gc->ngpio) {
+ ret = gpiochip_set(gc, i, test_bit(i, bits));
+ if (ret)
+ break;
}
+
+ return ret;
}
int gpiod_set_array_value_complex(bool raw, bool can_sleep,
@@ -3504,7 +3651,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
unsigned long *value_bitmap)
{
struct gpio_chip *gc;
- int i = 0;
+ int i = 0, ret;
/*
* Validate array_info against desc_array and its size.
@@ -3527,7 +3674,10 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
bitmap_xor(value_bitmap, value_bitmap,
array_info->invert_mask, array_size);
- gpio_chip_set_multiple(gc, array_info->set_mask, value_bitmap);
+ ret = gpiochip_set_multiple(gc, array_info->set_mask,
+ value_bitmap);
+ if (ret)
+ return ret;
i = find_first_zero_bit(array_info->set_mask, array_size);
if (i == array_size)
@@ -3604,8 +3754,11 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
} while ((i < array_size) &&
gpio_device_chip_cmp(desc_array[i]->gdev, guard.gc));
/* push collected bits to outputs */
- if (count != 0)
- gpio_chip_set_multiple(guard.gc, mask, bits);
+ if (count != 0) {
+ ret = gpiochip_set_multiple(guard.gc, mask, bits);
+ if (ret)
+ return ret;
+ }
if (mask != fastpath_mask)
bitmap_free(mask);
@@ -3625,13 +3778,16 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
*
* This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-void gpiod_set_raw_value(struct gpio_desc *desc, int value)
+int gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
- VALIDATE_DESC_VOID(desc);
+ VALIDATE_DESC(desc);
/* Should be using gpiod_set_raw_value_cansleep() */
WARN_ON(desc->gdev->can_sleep);
- gpiod_set_raw_value_commit(desc, value);
+ return gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
@@ -3643,17 +3799,21 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
* This sets the value of a GPIO line backing a descriptor, applying
* different semantic quirks like active low and open drain/source
* handling.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
+static int gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
{
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
+
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- gpio_set_open_drain_value_commit(desc, value);
+ return gpio_set_open_drain_value_commit(desc, value);
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- gpio_set_open_source_value_commit(desc, value);
- else
- gpiod_set_raw_value_commit(desc, value);
+ return gpio_set_open_source_value_commit(desc, value);
+
+ return gpiod_set_raw_value_commit(desc, value);
}
/**
@@ -3666,13 +3826,16 @@ static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
*
* This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-void gpiod_set_value(struct gpio_desc *desc, int value)
+int gpiod_set_value(struct gpio_desc *desc, int value)
{
- VALIDATE_DESC_VOID(desc);
+ VALIDATE_DESC(desc);
/* Should be using gpiod_set_value_cansleep() */
WARN_ON(desc->gdev->can_sleep);
- gpiod_set_value_nocheck(desc, value);
+ return gpiod_set_value_nocheck(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value);
@@ -4090,12 +4253,15 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value_cansleep);
* regard for its ACTIVE_LOW status.
*
* This function is to be called from contexts that can sleep.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
+int gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep();
- VALIDATE_DESC_VOID(desc);
- gpiod_set_raw_value_commit(desc, value);
+ VALIDATE_DESC(desc);
+ return gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
@@ -4108,12 +4274,15 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
* account
*
* This function is to be called from contexts that can sleep.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
+int gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep();
- VALIDATE_DESC_VOID(desc);
- gpiod_set_value_nocheck(desc, value);
+ VALIDATE_DESC(desc);
+ return gpiod_set_value_nocheck(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
@@ -4193,8 +4362,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep);
void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action)
{
- atomic_notifier_call_chain(&desc->gdev->line_state_notifier,
- action, desc);
+ guard(read_lock_irqsave)(&desc->gdev->line_state_lock);
+
+ raw_notifier_call_chain(&desc->gdev->line_state_notifier, action, desc);
}
/**
@@ -4749,10 +4919,10 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
return ret;
}
- gpiod_dbg(desc, "hogged as %s%s\n",
+ gpiod_dbg(desc, "hogged as %s/%s\n",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ? "output" : "input",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ?
- (dflags & GPIOD_FLAGS_BIT_DIR_VAL) ? "/high" : "/low" : "");
+ str_high_low(dflags & GPIOD_FLAGS_BIT_DIR_VAL) : "?");
return 0;
}
@@ -5026,6 +5196,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
unsigned int gpio = gdev->base;
struct gpio_desc *desc;
struct gpio_chip *gc;
+ unsigned long flags;
int value;
guard(srcu)(&gdev->srcu);
@@ -5038,16 +5209,17 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
for_each_gpio_desc(gc, desc) {
guard(srcu)(&desc->gdev->desc_srcu);
- is_irq = test_bit(FLAG_USED_AS_IRQ, &desc->flags);
- if (is_irq || test_bit(FLAG_REQUESTED, &desc->flags)) {
+ flags = READ_ONCE(desc->flags);
+ is_irq = test_bit(FLAG_USED_AS_IRQ, &flags);
+ if (is_irq || test_bit(FLAG_REQUESTED, &flags)) {
gpiod_get_direction(desc);
- is_out = test_bit(FLAG_IS_OUT, &desc->flags);
+ is_out = test_bit(FLAG_IS_OUT, &flags);
value = gpio_chip_get_value(gc, desc);
- active_low = test_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ active_low = test_bit(FLAG_ACTIVE_LOW, &flags);
seq_printf(s, " gpio-%-3u (%-20.20s|%-20.20s) %s %s %s%s\n",
gpio, desc->name ?: "", gpiod_get_label(desc),
is_out ? "out" : "in ",
- value >= 0 ? (value ? "hi" : "lo") : "? ",
+ value >= 0 ? str_hi_lo(value) : "? ",
is_irq ? "IRQ " : "",
active_low ? "ACTIVE LOW" : "");
} else if (desc->name) {
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 147156ec502b..58f64056de77 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -16,6 +16,7 @@
#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/notifier.h>
+#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <linux/workqueue.h>
@@ -32,6 +33,8 @@
* @chip: pointer to the corresponding gpiochip, holding static
* data for this device
* @descs: array of ngpio descriptors.
+ * @valid_mask: If not %NULL, holds bitmask of GPIOs which are valid to be
+ * used from the chip.
* @desc_srcu: ensures consistent state of GPIO descriptors exposed to users
* @ngpio: the number of GPIO lines on this GPIO device, equal to the size
* of the @descs array.
@@ -45,6 +48,7 @@
* @list: links gpio_device:s together for traversal
* @line_state_notifier: used to notify subscribers about lines being
* requested, released or reconfigured
+ * @line_state_lock: RW-spinlock protecting the line state notifier
* @line_state_wq: used to emit line state events from a separate thread in
* process context
* @device_notifier: used to notify character device wait queues about the GPIO
@@ -65,6 +69,7 @@ struct gpio_device {
struct module *owner;
struct gpio_chip __rcu *chip;
struct gpio_desc *descs;
+ unsigned long *valid_mask;
struct srcu_struct desc_srcu;
unsigned int base;
u16 ngpio;
@@ -72,7 +77,8 @@ struct gpio_device {
const char *label;
void *data;
struct list_head list;
- struct atomic_notifier_head line_state_notifier;
+ struct raw_notifier_head line_state_notifier;
+ rwlock_t line_state_lock;
struct workqueue_struct *line_state_wq;
struct blocking_notifier_head device_notifier;
struct srcu_struct srcu;
@@ -183,24 +189,24 @@ struct gpio_desc {
struct gpio_device *gdev;
unsigned long flags;
/* flag symbols are bit numbers */
-#define FLAG_REQUESTED 0
-#define FLAG_IS_OUT 1
-#define FLAG_EXPORT 2 /* protected by sysfs_lock */
-#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */
-#define FLAG_ACTIVE_LOW 6 /* value has active low */
-#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
-#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
-#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
-#define FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */
-#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
-#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
-#define FLAG_PULL_UP 13 /* GPIO has pull up enabled */
-#define FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
-#define FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
-#define FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
-#define FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
-#define FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
-#define FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
+#define FLAG_REQUESTED 0
+#define FLAG_IS_OUT 1
+#define FLAG_EXPORT 2 /* protected by sysfs_lock */
+#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */
+#define FLAG_ACTIVE_LOW 6 /* value has active low */
+#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
+#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
+#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
+#define FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */
+#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
+#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
+#define FLAG_PULL_UP 13 /* GPIO has pull up enabled */
+#define FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
+#define FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
+#define FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
+#define FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
+#define FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
+#define FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
/* Connection label */
struct gpio_desc_label __rcu *label;
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 8997f0096545..36a54d456630 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -5,3 +5,4 @@
obj-y += host1x/ drm/ vga/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
obj-$(CONFIG_TRACE_GPU_MEM) += trace/
+obj-$(CONFIG_NOVA_CORE) += nova-core/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index fbef3f471bd0..2cba2b6ebe1c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -278,6 +278,15 @@ config DRM_GPUVM
GPU-VM representation providing helpers to manage a GPUs virtual
address space
+config DRM_GPUSVM
+ tristate
+ depends on DRM && DEVICE_PRIVATE
+ select HMM_MIRROR
+ select MMU_NOTIFIER
+ help
+ GPU-SVM representation providing helpers to manage a GPUs shared
+ virtual memory
+
config DRM_BUDDY
tristate
depends on DRM
@@ -326,8 +335,6 @@ config DRM_SCHED
tristate
depends on DRM
-source "drivers/gpu/drm/i2c/Kconfig"
-
source "drivers/gpu/drm/arm/Kconfig"
source "drivers/gpu/drm/radeon/Kconfig"
@@ -441,6 +448,8 @@ source "drivers/gpu/drm/mcde/Kconfig"
source "drivers/gpu/drm/tidss/Kconfig"
+source "drivers/gpu/drm/adp/Kconfig"
+
source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
@@ -494,6 +503,17 @@ config DRM_WERROR
If in doubt, say N.
+config DRM_HEADER_TEST
+ bool "Ensure DRM headers are self-contained and pass kernel-doc"
+ depends on DRM && EXPERT && BROKEN
+ default n
+ help
+ Ensure the DRM subsystem headers both under drivers/gpu/drm and
+ include/drm compile, are self-contained, have header guards, and have
+ no kernel-doc warnings.
+
+ If in doubt, say N.
+
endif
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 19fb370fbc56..ed54a546bbe2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -104,6 +104,7 @@ obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o
#
obj-$(CONFIG_DRM_EXEC) += drm_exec.o
obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o
+obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
@@ -135,7 +136,6 @@ drm_kms_helper-y := \
drm_atomic_state_helper.o \
drm_crtc_helper.o \
drm_damage_helper.o \
- drm_encoder_slave.o \
drm_flip_work.o \
drm_format_helper.o \
drm_gem_atomic_helper.o \
@@ -198,7 +198,6 @@ obj-$(CONFIG_DRM_INGENIC) += ingenic/
obj-$(CONFIG_DRM_LOGICVC) += logicvc/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-$(CONFIG_DRM_MESON) += meson/
-obj-y += i2c/
obj-y += panel/
obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
@@ -208,6 +207,7 @@ obj-y += mxsfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
+obj-$(CONFIG_DRM_ADP) += adp/
obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
@@ -223,3 +223,21 @@ obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
obj-$(CONFIG_DRM_LOONGSON) += loongson/
obj-$(CONFIG_DRM_POWERVR) += imagination/
+
+# Ensure drm headers are self-contained and pass kernel-doc
+hdrtest-files := \
+ $(shell cd $(src) && find . -maxdepth 1 -name 'drm_*.h') \
+ $(shell cd $(src) && find display lib -name '*.h')
+
+always-$(CONFIG_DRM_HEADER_TEST) += \
+ $(patsubst %.h,%.hdrtest, $(hdrtest-files))
+
+# Include the header twice to detect missing include guard.
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+ cmd_hdrtest = \
+ $(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
+ $(srctree)/scripts/kernel-doc -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/adp/Kconfig b/drivers/gpu/drm/adp/Kconfig
new file mode 100644
index 000000000000..9fcc27eb200d
--- /dev/null
+++ b/drivers/gpu/drm/adp/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+config DRM_ADP
+ tristate "DRM Support for pre-DCP Apple display controllers"
+ depends on DRM && OF && ARM64
+ depends on ARCH_APPLE || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_DISPLAY_HELPER
+ select DRM_KMS_DMA_HELPER
+ select DRM_GEM_DMA_HELPER
+ select DRM_PANEL_BRIDGE
+ select VIDEOMODE_HELPERS
+ select DRM_MIPI_DSI
+ help
+ Chose this option if you have an Apple Arm laptop with a touchbar.
+
+ If M is selected, this module will be called adpdrm.
diff --git a/drivers/gpu/drm/adp/Makefile b/drivers/gpu/drm/adp/Makefile
new file mode 100644
index 000000000000..8e7b618edd35
--- /dev/null
+++ b/drivers/gpu/drm/adp/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+adpdrm-y := adp_drv.o
+adpdrm-mipi-y := adp-mipi.o
+obj-$(CONFIG_DRM_ADP) += adpdrm.o adpdrm-mipi.o
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
new file mode 100644
index 000000000000..ad80542b60ed
--- /dev/null
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/component.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+
+#define DSI_GEN_HDR 0x6c
+#define DSI_GEN_PLD_DATA 0x70
+
+#define DSI_CMD_PKT_STATUS 0x74
+
+#define GEN_PLD_R_EMPTY BIT(4)
+#define GEN_PLD_W_FULL BIT(3)
+#define GEN_PLD_W_EMPTY BIT(2)
+#define GEN_CMD_FULL BIT(1)
+#define GEN_CMD_EMPTY BIT(0)
+#define GEN_RD_CMD_BUSY BIT(6)
+#define CMD_PKT_STATUS_TIMEOUT_US 20000
+
+struct adp_mipi_drv_private {
+ struct mipi_dsi_host dsi;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ void __iomem *mipi;
+};
+
+#define mipi_to_adp(x) container_of(x, struct adp_mipi_drv_private, dsi)
+
+static int adp_dsi_gen_pkt_hdr_write(struct adp_mipi_drv_private *adp, u32 hdr_val)
+{
+ int ret;
+ u32 val, mask;
+
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_CMD_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "failed to get available command FIFO\n");
+ return ret;
+ }
+
+ writel(hdr_val, adp->mipi + DSI_GEN_HDR);
+
+ mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY;
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, (val & mask) == mask,
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "failed to write command FIFO\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adp_dsi_write(struct adp_mipi_drv_private *adp,
+ const struct mipi_dsi_packet *packet)
+{
+ const u8 *tx_buf = packet->payload;
+ int len = packet->payload_length, pld_data_bytes = sizeof(u32), ret;
+ __le32 word;
+ u32 val;
+
+ while (len) {
+ if (len < pld_data_bytes) {
+ word = 0;
+ memcpy(&word, tx_buf, len);
+ writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA);
+ len = 0;
+ } else {
+ memcpy(&word, tx_buf, pld_data_bytes);
+ writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA);
+ tx_buf += pld_data_bytes;
+ len -= pld_data_bytes;
+ }
+
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_PLD_W_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev,
+ "failed to get available write payload FIFO\n");
+ return ret;
+ }
+ }
+
+ word = 0;
+ memcpy(&word, packet->header, sizeof(packet->header));
+ return adp_dsi_gen_pkt_hdr_write(adp, le32_to_cpu(word));
+}
+
+static int adp_dsi_read(struct adp_mipi_drv_private *adp,
+ const struct mipi_dsi_msg *msg)
+{
+ int i, j, ret, len = msg->rx_len;
+ u8 *buf = msg->rx_buf;
+ u32 val;
+
+ /* Wait end of the read operation */
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_RD_CMD_BUSY),
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "Timeout during read operation\n");
+ return ret;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ /* Read fifo must not be empty before all bytes are read */
+ ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_PLD_R_EMPTY),
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(adp->dsi.dev, "Read payload FIFO is empty\n");
+ return ret;
+ }
+
+ val = readl(adp->mipi + DSI_GEN_PLD_DATA);
+ for (j = 0; j < 4 && j + i < len; j++)
+ buf[i + j] = val >> (8 * j);
+ }
+
+ return ret;
+}
+
+static ssize_t adp_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+ struct mipi_dsi_packet packet;
+ int ret, nb_bytes;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret) {
+ dev_err(adp->dsi.dev, "failed to create packet: %d\n", ret);
+ return ret;
+ }
+
+ ret = adp_dsi_write(adp, &packet);
+ if (ret)
+ return ret;
+
+ if (msg->rx_buf && msg->rx_len) {
+ ret = adp_dsi_read(adp, msg);
+ if (ret)
+ return ret;
+ nb_bytes = msg->rx_len;
+ } else {
+ nb_bytes = packet.size;
+ }
+
+ return nb_bytes;
+}
+
+static int adp_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ return 0;
+}
+
+static void adp_dsi_unbind(struct device *dev, struct device *master, void *data)
+{
+}
+
+static const struct component_ops adp_dsi_component_ops = {
+ .bind = adp_dsi_bind,
+ .unbind = adp_dsi_unbind,
+};
+
+static int adp_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+ struct drm_bridge *next;
+ int ret;
+
+ next = devm_drm_of_get_bridge(adp->dsi.dev, adp->dsi.dev->of_node, 1, 0);
+ if (IS_ERR(next))
+ return PTR_ERR(next);
+
+ adp->next_bridge = next;
+
+ drm_bridge_add(&adp->bridge);
+
+ ret = component_add(host->dev, &adp_dsi_component_ops);
+ if (ret) {
+ pr_err("failed to add dsi_host component: %d\n", ret);
+ drm_bridge_remove(&adp->bridge);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adp_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+
+ component_del(host->dev, &adp_dsi_component_ops);
+ drm_bridge_remove(&adp->bridge);
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops adp_dsi_host_ops = {
+ .transfer = adp_dsi_host_transfer,
+ .attach = adp_dsi_host_attach,
+ .detach = adp_dsi_host_detach,
+};
+
+static int adp_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct adp_mipi_drv_private *adp =
+ container_of(bridge, struct adp_mipi_drv_private, bridge);
+
+ return drm_bridge_attach(bridge->encoder, adp->next_bridge, bridge, flags);
+}
+
+static const struct drm_bridge_funcs adp_dsi_bridge_funcs = {
+ .attach = adp_dsi_bridge_attach,
+};
+
+static int adp_mipi_probe(struct platform_device *pdev)
+{
+ struct adp_mipi_drv_private *adp;
+
+ adp = devm_kzalloc(&pdev->dev, sizeof(*adp), GFP_KERNEL);
+ if (!adp)
+ return -ENOMEM;
+
+ adp->mipi = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(adp->mipi)) {
+ dev_err(&pdev->dev, "failed to map mipi mmio");
+ return PTR_ERR(adp->mipi);
+ }
+
+ adp->dsi.dev = &pdev->dev;
+ adp->dsi.ops = &adp_dsi_host_ops;
+ adp->bridge.funcs = &adp_dsi_bridge_funcs;
+ adp->bridge.of_node = pdev->dev.of_node;
+ adp->bridge.type = DRM_MODE_CONNECTOR_DSI;
+ dev_set_drvdata(&pdev->dev, adp);
+ return mipi_dsi_host_register(&adp->dsi);
+}
+
+static void adp_mipi_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct adp_mipi_drv_private *adp = dev_get_drvdata(dev);
+
+ mipi_dsi_host_unregister(&adp->dsi);
+}
+
+static const struct of_device_id adp_mipi_of_match[] = {
+ { .compatible = "apple,h7-display-pipe-mipi", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adp_mipi_of_match);
+
+static struct platform_driver adp_mipi_platform_driver = {
+ .driver = {
+ .name = "adp-mipi",
+ .of_match_table = adp_mipi_of_match,
+ },
+ .probe = adp_mipi_probe,
+ .remove = adp_mipi_remove,
+};
+
+module_platform_driver(adp_mipi_platform_driver);
+
+MODULE_DESCRIPTION("Apple Display Pipe MIPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c
new file mode 100644
index 000000000000..0eeb9e5fab26
--- /dev/null
+++ b/drivers/gpu/drm/adp/adp_drv.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/component.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#define ADP_INT_STATUS 0x34
+#define ADP_INT_STATUS_INT_MASK 0x7
+#define ADP_INT_STATUS_VBLANK 0x1
+#define ADP_CTRL 0x100
+#define ADP_CTRL_VBLANK_ON 0x12
+#define ADP_CTRL_FIFO_ON 0x601
+#define ADP_SCREEN_SIZE 0x0c
+#define ADP_SCREEN_HSIZE GENMASK(15, 0)
+#define ADP_SCREEN_VSIZE GENMASK(31, 16)
+
+#define ADBE_FIFO 0x10c0
+#define ADBE_FIFO_SYNC 0xc0000000
+
+#define ADBE_BLEND_BYPASS 0x2020
+#define ADBE_BLEND_EN1 0x2028
+#define ADBE_BLEND_EN2 0x2074
+#define ADBE_BLEND_EN3 0x202c
+#define ADBE_BLEND_EN4 0x2034
+#define ADBE_MASK_BUF 0x2200
+
+#define ADBE_SRC_START 0x4040
+#define ADBE_SRC_SIZE 0x4048
+#define ADBE_DST_START 0x4050
+#define ADBE_DST_SIZE 0x4054
+#define ADBE_STRIDE 0x4038
+#define ADBE_FB_BASE 0x4030
+
+#define ADBE_LAYER_EN1 0x4020
+#define ADBE_LAYER_EN2 0x4068
+#define ADBE_LAYER_EN3 0x40b4
+#define ADBE_LAYER_EN4 0x40f4
+#define ADBE_SCALE_CTL 0x40ac
+#define ADBE_SCALE_CTL_BYPASS 0x100000
+
+#define ADBE_LAYER_CTL 0x1038
+#define ADBE_LAYER_CTL_ENABLE 0x10000
+
+#define ADBE_PIX_FMT 0x402c
+#define ADBE_PIX_FMT_XRGB32 0x53e4001
+
+static int adp_open(struct inode *inode, struct file *filp)
+{
+ /*
+ * The modesetting driver does not check the non-desktop connector
+ * property and keeps the device open and locked. If the touchbar daemon
+ * opens the device first, modesetting breaks the whole X session.
+ * Simply refuse to open the device for X11 server processes as
+ * workaround.
+ */
+ if (current->comm[0] == 'X')
+ return -EBUSY;
+
+ return drm_open(inode, filp);
+}
+
+static const struct file_operations adp_fops = {
+ .owner = THIS_MODULE,
+ .open = adp_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_mmap,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
+ DRM_GEM_DMA_UNMAPPED_AREA_FOPS
+};
+
+static int adp_drm_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ args->height = ALIGN(args->height, 64);
+ args->size = args->pitch * args->height;
+
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
+}
+
+static const struct drm_driver adp_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &adp_fops,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(adp_drm_gem_dumb_create),
+ .name = "adp",
+ .desc = "Apple Display Pipe DRM Driver",
+ .major = 0,
+ .minor = 1,
+};
+
+struct adp_drv_private {
+ struct drm_device drm;
+ struct drm_crtc crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_bridge *next_bridge;
+ void __iomem *be;
+ void __iomem *fe;
+ u32 *mask_buf;
+ u64 mask_buf_size;
+ dma_addr_t mask_iova;
+ int be_irq;
+ int fe_irq;
+ spinlock_t irq_lock;
+ struct drm_pending_vblank_event *event;
+};
+
+#define to_adp(x) container_of(x, struct adp_drv_private, drm)
+#define crtc_to_adp(x) container_of(x, struct adp_drv_private, crtc)
+
+static int adp_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_crtc_state *crtc_state;
+
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+ if (!new_plane_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+}
+
+static void adp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp;
+ struct drm_rect src_rect;
+ struct drm_gem_dma_object *obj;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ u32 src_pos, src_size, dst_pos, dst_size;
+
+ if (!plane || !new_state)
+ return;
+
+ fb = new_state->fb;
+ if (!fb)
+ return;
+ adp = to_adp(plane->dev);
+
+ drm_rect_fp_to_int(&src_rect, &new_state->src);
+ src_pos = src_rect.x1 << 16 | src_rect.y1;
+ dst_pos = new_state->dst.x1 << 16 | new_state->dst.y1;
+ src_size = drm_rect_width(&src_rect) << 16 | drm_rect_height(&src_rect);
+ dst_size = drm_rect_width(&new_state->dst) << 16 |
+ drm_rect_height(&new_state->dst);
+ writel(src_pos, adp->be + ADBE_SRC_START);
+ writel(src_size, adp->be + ADBE_SRC_SIZE);
+ writel(dst_pos, adp->be + ADBE_DST_START);
+ writel(dst_size, adp->be + ADBE_DST_SIZE);
+ writel(fb->pitches[0], adp->be + ADBE_STRIDE);
+ obj = drm_fb_dma_get_gem_obj(fb, 0);
+ if (obj)
+ writel(obj->dma_addr + fb->offsets[0], adp->be + ADBE_FB_BASE);
+
+ writel(BIT(0), adp->be + ADBE_LAYER_EN1);
+ writel(BIT(0), adp->be + ADBE_LAYER_EN2);
+ writel(BIT(0), adp->be + ADBE_LAYER_EN3);
+ writel(BIT(0), adp->be + ADBE_LAYER_EN4);
+ writel(ADBE_SCALE_CTL_BYPASS, adp->be + ADBE_SCALE_CTL);
+ writel(ADBE_LAYER_CTL_ENABLE | BIT(0), adp->be + ADBE_LAYER_CTL);
+ writel(ADBE_PIX_FMT_XRGB32, adp->be + ADBE_PIX_FMT);
+}
+
+static void adp_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp = to_adp(plane->dev);
+
+ writel(0x0, adp->be + ADBE_LAYER_EN1);
+ writel(0x0, adp->be + ADBE_LAYER_EN2);
+ writel(0x0, adp->be + ADBE_LAYER_EN3);
+ writel(0x0, adp->be + ADBE_LAYER_EN4);
+ writel(ADBE_LAYER_CTL_ENABLE, adp->be + ADBE_LAYER_CTL);
+}
+
+static const struct drm_plane_helper_funcs adp_plane_helper_funcs = {
+ .atomic_check = adp_plane_atomic_check,
+ .atomic_update = adp_plane_atomic_update,
+ .atomic_disable = adp_plane_atomic_disable,
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS
+};
+
+static const struct drm_plane_funcs adp_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_GEM_SHADOW_PLANE_FUNCS
+};
+
+static const u32 plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+#define ALL_CRTCS 1
+
+static struct drm_plane *adp_plane_new(struct adp_drv_private *adp)
+{
+ struct drm_device *drm = &adp->drm;
+ struct drm_plane *plane;
+
+ plane = __drmm_universal_plane_alloc(drm, sizeof(struct drm_plane), 0,
+ ALL_CRTCS, &adp_plane_funcs,
+ plane_formats, ARRAY_SIZE(plane_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, "plane");
+ if (!plane) {
+ drm_err(drm, "failed to allocate plane");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drm_plane_helper_add(plane, &adp_plane_helper_funcs);
+ return plane;
+}
+
+static void adp_enable_vblank(struct adp_drv_private *adp)
+{
+ u32 cur_ctrl;
+
+ writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS);
+
+ cur_ctrl = readl(adp->fe + ADP_CTRL);
+ writel(cur_ctrl | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+}
+
+static int adp_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct adp_drv_private *adp = to_adp(dev);
+
+ adp_enable_vblank(adp);
+
+ return 0;
+}
+
+static void adp_disable_vblank(struct adp_drv_private *adp)
+{
+ u32 cur_ctrl;
+
+ cur_ctrl = readl(adp->fe + ADP_CTRL);
+ writel(cur_ctrl & ~ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+ writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS);
+}
+
+static void adp_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct adp_drv_private *adp = to_adp(dev);
+
+ adp_disable_vblank(adp);
+}
+
+static void adp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp = crtc_to_adp(crtc);
+
+ writel(BIT(0), adp->be + ADBE_BLEND_EN2);
+ writel(BIT(4), adp->be + ADBE_BLEND_EN1);
+ writel(BIT(0), adp->be + ADBE_BLEND_EN3);
+ writel(BIT(0), adp->be + ADBE_BLEND_BYPASS);
+ writel(BIT(0), adp->be + ADBE_BLEND_EN4);
+}
+
+static void adp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct adp_drv_private *adp = crtc_to_adp(crtc);
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
+
+ drm_atomic_helper_disable_planes_on_crtc(old_state, false);
+
+ writel(0x0, adp->be + ADBE_BLEND_EN2);
+ writel(0x0, adp->be + ADBE_BLEND_EN1);
+ writel(0x0, adp->be + ADBE_BLEND_EN3);
+ writel(0x0, adp->be + ADBE_BLEND_BYPASS);
+ writel(0x0, adp->be + ADBE_BLEND_EN4);
+ drm_crtc_vblank_off(crtc);
+}
+
+static void adp_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ u32 frame_num = 1;
+ struct adp_drv_private *adp = crtc_to_adp(crtc);
+ struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc);
+ u64 new_size = ALIGN(new_state->mode.hdisplay *
+ new_state->mode.vdisplay * 4, PAGE_SIZE);
+
+ if (new_size != adp->mask_buf_size) {
+ if (adp->mask_buf)
+ dma_free_coherent(crtc->dev->dev, adp->mask_buf_size,
+ adp->mask_buf, adp->mask_iova);
+ adp->mask_buf = NULL;
+ if (new_size != 0) {
+ adp->mask_buf = dma_alloc_coherent(crtc->dev->dev, new_size,
+ &adp->mask_iova, GFP_KERNEL);
+ memset(adp->mask_buf, 0xFF, new_size);
+ writel(adp->mask_iova, adp->be + ADBE_MASK_BUF);
+ }
+ adp->mask_buf_size = new_size;
+ }
+ writel(ADBE_FIFO_SYNC | frame_num, adp->be + ADBE_FIFO);
+ //FIXME: use adbe flush interrupt
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_vblank_get(crtc);
+ adp->event = crtc->state->event;
+ }
+ crtc->state->event = NULL;
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static const struct drm_crtc_funcs adp_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = adp_crtc_enable_vblank,
+ .disable_vblank = adp_crtc_disable_vblank,
+};
+
+
+static const struct drm_crtc_helper_funcs adp_crtc_helper_funcs = {
+ .atomic_enable = adp_crtc_atomic_enable,
+ .atomic_disable = adp_crtc_atomic_disable,
+ .atomic_flush = adp_crtc_atomic_flush,
+};
+
+static int adp_setup_crtc(struct adp_drv_private *adp)
+{
+ struct drm_device *drm = &adp->drm;
+ struct drm_plane *primary;
+ int ret;
+
+ primary = adp_plane_new(adp);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ ret = drm_crtc_init_with_planes(drm, &adp->crtc, primary,
+ NULL, &adp_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(&adp->crtc, &adp_crtc_helper_funcs);
+ return 0;
+}
+
+static const struct drm_mode_config_funcs adp_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int adp_setup_mode_config(struct adp_drv_private *adp)
+{
+ struct drm_device *drm = &adp->drm;
+ int ret;
+ u32 size;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
+ /*
+ * Query screen size restrict the frame buffer size to the screen size
+ * aligned to the next multiple of 64. This is not necessary but can be
+ * used as simple check for non-desktop devices.
+ * Xorg's modesetting driver does not care about the connector
+ * "non-desktop" property. The max frame buffer width or height can be
+ * easily checked and a device can be reject if the max width/height is
+ * smaller than 120 for example.
+ * Any touchbar daemon is not limited by this small framebuffer size.
+ */
+ size = readl(adp->fe + ADP_SCREEN_SIZE);
+
+ drm->mode_config.min_width = 32;
+ drm->mode_config.min_height = 32;
+ drm->mode_config.max_width = ALIGN(FIELD_GET(ADP_SCREEN_HSIZE, size), 64);
+ drm->mode_config.max_height = ALIGN(FIELD_GET(ADP_SCREEN_VSIZE, size), 64);
+ drm->mode_config.preferred_depth = 24;
+ drm->mode_config.prefer_shadow = 0;
+ drm->mode_config.funcs = &adp_mode_config_funcs;
+
+ ret = adp_setup_crtc(adp);
+ if (ret) {
+ drm_err(drm, "failed to create crtc");
+ return ret;
+ }
+
+ adp->encoder = drmm_plain_encoder_alloc(drm, NULL, DRM_MODE_ENCODER_DSI, NULL);
+ if (IS_ERR(adp->encoder)) {
+ drm_err(drm, "failed to init encoder");
+ return PTR_ERR(adp->encoder);
+ }
+ adp->encoder->possible_crtcs = ALL_CRTCS;
+
+ ret = drm_bridge_attach(adp->encoder, adp->next_bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ drm_err(drm, "failed to init bridge chain");
+ return ret;
+ }
+
+ adp->connector = drm_bridge_connector_init(drm, adp->encoder);
+ if (IS_ERR(adp->connector))
+ return PTR_ERR(adp->connector);
+
+ drm_connector_attach_encoder(adp->connector, adp->encoder);
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ drm_err(drm, "failed to initialize vblank");
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ return 0;
+}
+
+static int adp_parse_of(struct platform_device *pdev, struct adp_drv_private *adp)
+{
+ struct device *dev = &pdev->dev;
+
+ adp->be = devm_platform_ioremap_resource_byname(pdev, "be");
+ if (IS_ERR(adp->be)) {
+ dev_err(dev, "failed to map display backend mmio");
+ return PTR_ERR(adp->be);
+ }
+
+ adp->fe = devm_platform_ioremap_resource_byname(pdev, "fe");
+ if (IS_ERR(adp->fe)) {
+ dev_err(dev, "failed to map display pipe mmio");
+ return PTR_ERR(adp->fe);
+ }
+
+ adp->be_irq = platform_get_irq_byname(pdev, "be");
+ if (adp->be_irq < 0)
+ return adp->be_irq;
+
+ adp->fe_irq = platform_get_irq_byname(pdev, "fe");
+ if (adp->fe_irq < 0)
+ return adp->fe_irq;
+
+ return 0;
+}
+
+static irqreturn_t adp_fe_irq(int irq, void *arg)
+{
+ struct adp_drv_private *adp = (struct adp_drv_private *)arg;
+ u32 int_status;
+ u32 int_ctl;
+
+ spin_lock(&adp->irq_lock);
+
+ int_status = readl(adp->fe + ADP_INT_STATUS);
+ if (int_status & ADP_INT_STATUS_VBLANK) {
+ drm_crtc_handle_vblank(&adp->crtc);
+ spin_lock(&adp->crtc.dev->event_lock);
+ if (adp->event) {
+ int_ctl = readl(adp->fe + ADP_CTRL);
+ if ((int_ctl & 0xF00) == 0x600) {
+ drm_crtc_send_vblank_event(&adp->crtc, adp->event);
+ adp->event = NULL;
+ drm_crtc_vblank_put(&adp->crtc);
+ }
+ }
+ spin_unlock(&adp->crtc.dev->event_lock);
+ }
+
+ writel(int_status, adp->fe + ADP_INT_STATUS);
+
+ spin_unlock(&adp->irq_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int adp_drm_bind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct adp_drv_private *adp = to_adp(drm);
+ int err;
+
+ adp_disable_vblank(adp);
+ writel(ADP_CTRL_FIFO_ON | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+
+ adp->next_bridge = drmm_of_get_bridge(&adp->drm, dev->of_node, 0, 0);
+ if (IS_ERR(adp->next_bridge)) {
+ dev_err(dev, "failed to find next bridge");
+ return PTR_ERR(adp->next_bridge);
+ }
+
+ err = adp_setup_mode_config(adp);
+ if (err < 0)
+ return err;
+
+ err = request_irq(adp->fe_irq, adp_fe_irq, 0, "adp-fe", adp);
+ if (err)
+ return err;
+
+ err = drm_dev_register(&adp->drm, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void adp_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct adp_drv_private *adp = to_adp(drm);
+
+ drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
+ free_irq(adp->fe_irq, adp);
+}
+
+static const struct component_master_ops adp_master_ops = {
+ .bind = adp_drm_bind,
+ .unbind = adp_drm_unbind,
+};
+
+static int compare_dev(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int adp_probe(struct platform_device *pdev)
+{
+ struct device_node *port;
+ struct component_match *match = NULL;
+ struct adp_drv_private *adp;
+ int err;
+
+ adp = devm_drm_dev_alloc(&pdev->dev, &adp_driver, struct adp_drv_private, drm);
+ if (IS_ERR(adp))
+ return PTR_ERR(adp);
+
+ spin_lock_init(&adp->irq_lock);
+
+ dev_set_drvdata(&pdev->dev, &adp->drm);
+
+ err = adp_parse_of(pdev, adp);
+ if (err < 0)
+ return err;
+
+ port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
+ if (!port)
+ return -ENODEV;
+
+ drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
+ of_node_put(port);
+
+ return component_master_add_with_match(&pdev->dev, &adp_master_ops, match);
+}
+
+static void adp_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &adp_master_ops);
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static const struct of_device_id adp_of_match[] = {
+ { .compatible = "apple,h7-display-pipe", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adp_of_match);
+
+static struct platform_driver adp_platform_driver = {
+ .driver = {
+ .name = "adp",
+ .of_match_table = adp_of_match,
+ },
+ .probe = adp_probe,
+ .remove = adp_remove,
+};
+
+module_platform_driver(adp_platform_driver);
+
+MODULE_DESCRIPTION("Apple Display Pipe DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 5b21674b07fb..aacc810cabb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -65,7 +65,8 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
- amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o
+ amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \
+ amdgpu_cper.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 69895fccb474..6d83ccfa42ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -109,6 +109,7 @@
#include "amdgpu_mca.h"
#include "amdgpu_aca.h"
#include "amdgpu_ras.h"
+#include "amdgpu_cper.h"
#include "amdgpu_xcp.h"
#include "amdgpu_seq64.h"
#include "amdgpu_reg_state.h"
@@ -415,6 +416,7 @@ bool amdgpu_get_bios(struct amdgpu_device *adev);
bool amdgpu_read_bios(struct amdgpu_device *adev);
bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes);
+void amdgpu_bios_release(struct amdgpu_device *adev);
/*
* Clocks
*/
@@ -1090,6 +1092,9 @@ struct amdgpu_device {
/* ACA */
struct amdgpu_aca aca;
+ /* CPER */
+ struct amdgpu_cper cper;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
uint32_t harvest_ip_mask;
int num_ip_blocks;
@@ -1149,6 +1154,7 @@ struct amdgpu_device {
struct ratelimit_state throttling_logging_rs;
uint32_t ras_hw_enabled;
uint32_t ras_enabled;
+ bool ras_default_ecc_enabled;
bool no_hw_access;
struct pci_saved_state *pci_state;
@@ -1186,12 +1192,24 @@ struct amdgpu_device {
bool debug_use_vram_fw_buf;
bool debug_enable_ras_aca;
bool debug_exp_resets;
+ bool debug_disable_gpu_ring_reset;
- bool enforce_isolation[MAX_XCP];
- /* Added this mutex for cleaner shader isolation between GFX and compute processes */
+ /* Protection for the following isolation structure */
struct mutex enforce_isolation_mutex;
+ bool enforce_isolation[MAX_XCP];
+ struct amdgpu_isolation {
+ void *owner;
+ struct dma_fence *spearhead;
+ struct amdgpu_sync active;
+ struct amdgpu_sync prev;
+ } isolation[MAX_XCP];
struct amdgpu_init_level *init_lvl;
+
+ /* This flag is used to determine how VRAM allocations are handled for APUs
+ * in KFD: VRAM or GTT.
+ */
+ bool apu_prefer_gtt;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1470,6 +1488,9 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev);
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
struct dma_fence *gang);
+struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_job *job);
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 9d6345146495..dc47f5fd4ea1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -30,16 +30,6 @@
typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data);
-struct aca_banks {
- int nr_banks;
- struct list_head list;
-};
-
-struct aca_hwip {
- int hwid;
- int mcatype;
-};
-
static struct aca_hwip aca_hwid_mcatypes[ACA_HWIP_TYPE_COUNT] = {
ACA_BANK_HWID(SMU, 0x01, 0x01),
ACA_BANK_HWID(PCS_XGMI, 0x50, 0x00),
@@ -111,7 +101,7 @@ static struct aca_regs_dump {
{"STATUS", ACA_REG_IDX_STATUS},
{"ADDR", ACA_REG_IDX_ADDR},
{"MISC", ACA_REG_IDX_MISC0},
- {"CONFIG", ACA_REG_IDX_CONFG},
+ {"CONFIG", ACA_REG_IDX_CONFIG},
{"IPID", ACA_REG_IDX_IPID},
{"SYND", ACA_REG_IDX_SYND},
{"DESTAT", ACA_REG_IDX_DESTAT},
@@ -168,7 +158,7 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
if (ret)
return ret;
- bank.type = type;
+ bank.smu_err_type = type;
aca_smu_bank_dump(adev, i, count, &bank, qctx);
@@ -394,6 +384,56 @@ static bool aca_bank_should_update(struct amdgpu_device *adev, enum aca_smu_type
return ret;
}
+static void aca_banks_generate_cper(struct amdgpu_device *adev,
+ enum aca_smu_type type,
+ struct aca_banks *banks,
+ int count)
+{
+ struct aca_bank_node *node;
+ struct aca_bank *bank;
+ int r;
+
+ if (!adev->cper.enabled)
+ return;
+
+ if (!banks || !count) {
+ dev_warn(adev->dev, "fail to generate cper records\n");
+ return;
+ }
+
+ /* UEs must be encoded into separate CPER entries */
+ if (type == ACA_SMU_TYPE_UE) {
+ struct aca_banks de_banks;
+
+ aca_banks_init(&de_banks);
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+ if (bank->aca_err_type == ACA_ERROR_TYPE_DEFERRED) {
+ r = aca_banks_add_bank(&de_banks, bank);
+ if (r)
+ dev_warn(adev->dev, "fail to add de banks, ret = %d\n", r);
+ } else {
+ if (amdgpu_cper_generate_ue_record(adev, bank))
+ dev_warn(adev->dev, "fail to generate ue cper records\n");
+ }
+ }
+
+ if (!list_empty(&de_banks.list)) {
+ if (amdgpu_cper_generate_ce_records(adev, &de_banks, de_banks.nr_banks))
+ dev_warn(adev->dev, "fail to generate de cper records\n");
+ }
+
+ aca_banks_release(&de_banks);
+ } else {
+ /*
+ * SMU_TYPE_CE banks are combined into 1 CPER entries,
+ * they could be CEs or DEs or both
+ */
+ if (amdgpu_cper_generate_ce_records(adev, banks, count))
+ dev_warn(adev->dev, "fail to generate ce cper records\n");
+ }
+}
+
static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type,
bank_handler_t handler, struct ras_query_context *qctx, void *data)
{
@@ -431,6 +471,8 @@ static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type,
if (ret)
goto err_release_banks;
+ aca_banks_generate_cper(adev, type, &banks, count);
+
err_release_banks:
aca_banks_release(&banks);
@@ -516,6 +558,10 @@ static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *h
if (ret)
return ret;
+ /* DEs may contain in CEs or UEs */
+ if (type != ACA_ERROR_TYPE_DEFERRED)
+ aca_log_aca_error(handle, ACA_ERROR_TYPE_DEFERRED, err_data);
+
return aca_log_aca_error(handle, type, err_data);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
index f3289d289913..6b180f1b33fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -76,12 +76,24 @@ struct ras_query_context;
#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+#define ACA_BANK_ERR_IS_DEFFERED(bank) \
+ (ACA_REG__STATUS__POISON((bank)->regs[ACA_REG_IDX_STATUS]) || \
+ ACA_REG__STATUS__DEFERRED((bank)->regs[ACA_REG_IDX_STATUS]))
+
+#define ACA_BANK_ERR_CE_DE_DECODE(bank) \
+ (ACA_BANK_ERR_IS_DEFFERED(bank) ? ACA_ERROR_TYPE_DEFERRED : \
+ ACA_ERROR_TYPE_CE)
+
+#define ACA_BANK_ERR_UE_DE_DECODE(bank) \
+ (ACA_BANK_ERR_IS_DEFFERED(bank) ? ACA_ERROR_TYPE_DEFERRED : \
+ ACA_ERROR_TYPE_UE)
+
enum aca_reg_idx {
ACA_REG_IDX_CTL = 0,
ACA_REG_IDX_STATUS = 1,
ACA_REG_IDX_ADDR = 2,
ACA_REG_IDX_MISC0 = 3,
- ACA_REG_IDX_CONFG = 4,
+ ACA_REG_IDX_CONFIG = 4,
ACA_REG_IDX_IPID = 5,
ACA_REG_IDX_SYND = 6,
ACA_REG_IDX_DESTAT = 8,
@@ -108,13 +120,20 @@ enum aca_error_type {
};
enum aca_smu_type {
+ ACA_SMU_TYPE_INVALID = -1,
ACA_SMU_TYPE_UE = 0,
ACA_SMU_TYPE_CE,
ACA_SMU_TYPE_COUNT,
};
+struct aca_hwip {
+ int hwid;
+ int mcatype;
+};
+
struct aca_bank {
- enum aca_smu_type type;
+ enum aca_error_type aca_err_type;
+ enum aca_smu_type smu_err_type;
u64 regs[ACA_MAX_REGS_COUNT];
};
@@ -123,6 +142,11 @@ struct aca_bank_node {
struct list_head node;
};
+struct aca_banks {
+ int nr_banks;
+ struct list_head list;
+};
+
struct aca_bank_info {
int die_id;
int socket_id;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index deb0785350e8..4926996f94da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -579,7 +579,7 @@ static int acp_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool acp_is_idle(void *handle)
+static bool acp_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index b8d4e07d2043..b7f8f2ff143d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -394,6 +394,10 @@ static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
characteristics.max_input_signal;
atif->backlight_caps.ac_level = characteristics.ac_level;
atif->backlight_caps.dc_level = characteristics.dc_level;
+ atif->backlight_caps.data_points = characteristics.number_of_points;
+ memcpy(atif->backlight_caps.luminance_data,
+ characteristics.data_points,
+ sizeof(atif->backlight_caps.luminance_data));
out:
kfree(info);
return err;
@@ -1277,11 +1281,7 @@ void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps)
{
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
- caps->caps_valid = atif->backlight_caps.caps_valid;
- caps->min_input_signal = atif->backlight_caps.min_input_signal;
- caps->max_input_signal = atif->backlight_caps.max_input_signal;
- caps->ac_level = atif->backlight_caps.ac_level;
- caps->dc_level = atif->backlight_caps.dc_level;
+ memcpy(caps, &atif->backlight_caps, sizeof(*caps));
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 2c1b38c5cfc6..4cec3a873995 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -459,7 +459,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
else
mem_info->local_mem_size_private =
KFD_XCP_MEMORY_SIZE(adev, xcp->id);
- } else if (adev->flags & AMD_IS_APU) {
+ } else if (adev->apu_prefer_gtt) {
mem_info->local_mem_size_public = (ttm_tt_pages_limit() << PAGE_SHIFT);
mem_info->local_mem_size_private = 0;
} else {
@@ -555,48 +555,6 @@ out_put:
return r;
}
-uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
- struct amdgpu_device *src)
-{
- struct amdgpu_device *peer_adev = src;
- struct amdgpu_device *adev = dst;
- int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
-
- if (ret < 0) {
- DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
- adev->gmc.xgmi.physical_node_id,
- peer_adev->gmc.xgmi.physical_node_id, ret);
- ret = 0;
- }
- return (uint8_t)ret;
-}
-
-int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
- struct amdgpu_device *src,
- bool is_min)
-{
- struct amdgpu_device *adev = dst, *peer_adev;
- int num_links;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))
- return 0;
-
- if (src)
- peer_adev = src;
-
- /* num links returns 0 for indirect peers since indirect route is unknown. */
- num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev);
- if (num_links < 0) {
- DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n",
- adev->gmc.xgmi.physical_node_id,
- peer_adev->gmc.xgmi.physical_node_id, num_links);
- num_links = 0;
- }
-
- /* Aldebaran xGMI DPM is defeatured so assume x16 x 25Gbps for bandwidth. */
- return (num_links * 16 * 25000)/BITS_PER_BYTE;
-}
-
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min)
{
int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
@@ -818,7 +776,7 @@ u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
}
do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
return ALIGN_DOWN(tmp, PAGE_SIZE);
- } else if (adev->flags & AMD_IS_APU) {
+ } else if (adev->apu_prefer_gtt) {
return (ttm_tt_pages_limit() << PAGE_SHIFT);
} else {
return adev->gmc.real_vram_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 8af67f18500a..b6ca41859b53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -47,6 +47,7 @@ enum TLB_FLUSH_TYPE {
};
struct amdgpu_device;
+struct kfd_process_device;
struct amdgpu_reset_context;
enum kfd_mem_attachment_type {
@@ -192,7 +193,7 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
#if IS_ENABLED(CONFIG_HSA_AMD)
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo);
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
unsigned long cur_seq, struct kgd_mem *mem);
int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
@@ -212,9 +213,8 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
}
static inline
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
{
- return 0;
}
static inline
@@ -254,11 +254,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
uint64_t *bo_size, void *metadata_buffer,
size_t buffer_size, uint32_t *metadata_size,
uint32_t *flags, int8_t *xcp_id);
-uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
- struct amdgpu_device *src);
-int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
- struct amdgpu_device *src,
- bool is_min);
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
uint32_t *payload);
@@ -299,14 +294,10 @@ bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id);
(&((struct amdgpu_fpriv *) \
((struct drm_file *)(drm_priv))->driver_priv)->vm)
-int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct amdgpu_vm *avm, u32 pasid);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
struct amdgpu_vm *avm,
void **process_info,
struct dma_fence **ef);
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
- void *drm_priv);
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
uint8_t xcp_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
index 8dfdb18197c4..7e9f7a280c1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -189,8 +189,9 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.set_address_watch = kgd_gfx_aldebaran_set_address_watch,
.clear_address_watch = kgd_gfx_v9_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
.hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 9abf29b58ac7..ffbaa8bc5eea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -415,9 +415,10 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.set_address_watch = kgd_gfx_v9_set_address_watch,
.clear_address_watch = kgd_gfx_v9_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v9_hqd_reset
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
index e2ae714a700f..89a45a9218f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
@@ -509,6 +509,17 @@ static uint32_t kgd_gfx_v9_4_3_clear_address_watch(struct amdgpu_device *adev,
return 0;
}
+static uint32_t kgd_gfx_v9_4_3_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ uint32_t reg_offset = get_sdma_rlc_reg_offset(adev, engine, queue);
+ uint32_t status = RREG32(regSDMA_RLC0_CONTEXT_STATUS + reg_offset);
+ uint32_t doorbell_off = RREG32(regSDMA_RLC0_DOORBELL_OFFSET + reg_offset);
+ bool is_active = !!REG_GET_FIELD(status, SDMA_RLC0_CONTEXT_STATUS, SELECTED);
+
+ return is_active ? doorbell_off >> 2 : 0;
+}
+
const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_4_3_set_pasid_vmid_mapping,
@@ -530,8 +541,8 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings =
kgd_gfx_v9_program_trap_handler_settings,
- .build_grace_period_packet_info =
- kgd_gfx_v9_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info =
+ kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
.enable_debug_trap = kgd_aldebaran_enable_debug_trap,
.disable_debug_trap = kgd_gfx_v9_4_3_disable_debug_trap,
@@ -543,5 +554,6 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
.set_address_watch = kgd_gfx_v9_4_3_set_address_watch,
.clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v9_hqd_reset
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_4_3_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 62176d607bef..04ef0ca10541 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -1021,25 +1021,25 @@ void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
*wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
}
-void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
+void kgd_gfx_v10_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data)
{
*reg_data = wait_times;
- /*
- * The CP cannont handle a 0 grace period input and will result in
- * an infinite grace period being set so set to 1 to prevent this.
- */
- if (grace_period == 0)
- grace_period = 1;
-
- *reg_data = REG_SET_FIELD(*reg_data,
- CP_IQ_WAIT_TIME2,
- SCH_WAVE,
- grace_period);
+ if (sch_wave)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ SCH_WAVE,
+ sch_wave);
+ if (que_sleep)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ QUE_SLEEP,
+ que_sleep);
*reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
}
@@ -1084,6 +1084,12 @@ uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
return 0;
}
+uint32_t kgd_gfx_v10_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -1109,8 +1115,9 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.set_address_watch = kgd_gfx_v10_set_address_watch,
.clear_address_watch = kgd_gfx_v10_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v10_build_dequeue_wait_counts_packet_info,
.program_trap_handler_settings = program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v10_hqd_reset
+ .hqd_reset = kgd_gfx_v10_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v10_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
index 9efd2dd4fdd7..a4c607c88178 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
@@ -51,9 +51,10 @@ uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev,
void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
uint32_t *wait_times,
uint32_t inst);
-void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
+void kgd_gfx_v10_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data);
uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev,
@@ -65,3 +66,5 @@ uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
uint32_t queue_id,
uint32_t inst,
unsigned int utimeout);
+uint32_t kgd_gfx_v10_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index c718bedda0ca..6d08bc2781a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -673,7 +673,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
.program_trap_handler_settings = program_trap_handler_settings_v10_3,
.get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v10_build_dequeue_wait_counts_packet_info,
.enable_debug_trap = kgd_gfx_v10_enable_debug_trap,
.disable_debug_trap = kgd_gfx_v10_disable_debug_trap,
.validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request,
@@ -682,5 +682,6 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.set_address_watch = kgd_gfx_v10_set_address_watch,
.clear_address_watch = kgd_gfx_v10_clear_address_watch,
.hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v10_hqd_reset
+ .hqd_reset = kgd_gfx_v10_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v10_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index a4ba49cb22db..e0e6a6a49d90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -800,6 +800,12 @@ static uint64_t kgd_gfx_v11_hqd_reset(struct amdgpu_device *adev,
return 0;
}
+static uint32_t kgd_gfx_v11_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.program_sh_mem_settings = program_sh_mem_settings_v11,
.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11,
@@ -824,5 +830,6 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.set_address_watch = kgd_gfx_v11_set_address_watch,
.clear_address_watch = kgd_gfx_v11_clear_address_watch,
.hqd_get_pq_addr = kgd_gfx_v11_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v11_hqd_reset
+ .hqd_reset = kgd_gfx_v11_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v11_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
index 0dfe7093bd8a..6f0dc23c901b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
@@ -361,6 +361,12 @@ static uint32_t kgd_gfx_v12_clear_address_watch(struct amdgpu_device *adev,
return 0;
}
+static uint32_t kgd_gfx_v12_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v12_kfd2kgd = {
.init_interrupts = init_interrupts_v12,
.hqd_dump = hqd_dump_v12,
@@ -374,4 +380,5 @@ const struct kfd2kgd_calls gfx_v12_kfd2kgd = {
.set_wave_launch_mode = kgd_gfx_v12_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v12_set_address_watch,
.clear_address_watch = kgd_gfx_v12_clear_address_watch,
+ .hqd_sdma_get_doorbell = kgd_gfx_v12_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 441568163e20..088d09cc7a72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -1077,25 +1077,25 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev,
adev->gfx.cu_info.max_waves_per_simd;
}
-void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
+void kgd_gfx_v9_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data)
{
*reg_data = wait_times;
- /*
- * The CP cannot handle a 0 grace period input and will result in
- * an infinite grace period being set so set to 1 to prevent this.
- */
- if (grace_period == 0)
- grace_period = 1;
-
- *reg_data = REG_SET_FIELD(*reg_data,
- CP_IQ_WAIT_TIME2,
- SCH_WAVE,
- grace_period);
+ if (sch_wave)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ SCH_WAVE,
+ sch_wave);
+ if (que_sleep)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ QUE_SLEEP,
+ que_sleep);
*reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
}
@@ -1131,9 +1131,6 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
uint32_t low, high;
uint64_t queue_addr = 0;
- if (!amdgpu_gpu_recovery)
- return 0;
-
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
@@ -1182,9 +1179,6 @@ uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
uint32_t low, high, pipe_reset_data = 0;
uint64_t queue_addr = 0;
- if (!amdgpu_gpu_recovery)
- return 0;
-
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
@@ -1229,6 +1223,13 @@ unlock_out:
return queue_addr;
}
+uint32_t kgd_gfx_v9_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -1254,9 +1255,10 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.set_address_watch = kgd_gfx_v9_set_address_watch,
.clear_address_watch = kgd_gfx_v9_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
- .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
- .hqd_reset = kgd_gfx_v9_hqd_reset
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index b6a91a552aa4..704452ca62f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -97,9 +97,10 @@ uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
uint32_t *wait_times,
uint32_t inst);
-void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
+void kgd_gfx_v9_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data);
uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
@@ -111,3 +112,5 @@ uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
uint32_t queue_id,
uint32_t inst,
unsigned int utimeout);
+uint32_t kgd_gfx_v9_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1e998f972c30..d2ec4130a316 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -197,7 +197,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
return -EINVAL;
vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
- if (adev->flags & AMD_IS_APU) {
+ if (adev->apu_prefer_gtt) {
system_mem_needed = size;
ttm_mem_needed = size;
}
@@ -234,7 +234,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
if (adev && xcp_id >= 0) {
adev->kfd.vram_used[xcp_id] += vram_needed;
adev->kfd.vram_used_aligned[xcp_id] +=
- (adev->flags & AMD_IS_APU) ?
+ adev->apu_prefer_gtt ?
vram_needed :
ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
}
@@ -262,7 +262,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
if (adev) {
adev->kfd.vram_used[xcp_id] -= size;
- if (adev->flags & AMD_IS_APU) {
+ if (adev->apu_prefer_gtt) {
adev->kfd.vram_used_aligned[xcp_id] -= size;
kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size;
@@ -370,40 +370,32 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
return 0;
}
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+/**
+ * amdgpu_amdkfd_remove_all_eviction_fences - Remove all eviction fences
+ * @bo: the BO where to remove the evictions fences from.
+ *
+ * This functions should only be used on release when all references to the BO
+ * are already dropped. We remove the eviction fence from the private copy of
+ * the dma_resv object here since that is what is used during release to
+ * determine of the BO is idle or not.
+ */
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
{
- struct amdgpu_bo *root = bo;
- struct amdgpu_vm_bo_base *vm_bo;
- struct amdgpu_vm *vm;
- struct amdkfd_process_info *info;
- struct amdgpu_amdkfd_fence *ef;
- int ret;
-
- /* we can always get vm_bo from root PD bo.*/
- while (root->parent)
- root = root->parent;
-
- vm_bo = root->vm_bo;
- if (!vm_bo)
- return 0;
-
- vm = vm_bo->vm;
- if (!vm)
- return 0;
-
- info = vm->process_info;
- if (!info || !info->eviction_fence)
- return 0;
+ struct dma_resv *resv = &bo->tbo.base._resv;
+ struct dma_fence *fence, *stub;
+ struct dma_resv_iter cursor;
- ef = container_of(dma_fence_get(&info->eviction_fence->base),
- struct amdgpu_amdkfd_fence, base);
+ dma_resv_assert_held(resv);
- BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
- ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
- dma_resv_unlock(bo->tbo.base.resv);
+ stub = dma_fence_get_stub();
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
+ if (!to_amdgpu_amdkfd_fence(fence))
+ continue;
- dma_fence_put(&ef->base);
- return ret;
+ dma_resv_replace_fences(resv, fence->context, stub,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ dma_fence_put(stub);
}
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
@@ -499,7 +491,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
if (ret)
return ret;
- return amdgpu_sync_fence(sync, vm->last_update);
+ return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL);
}
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
@@ -603,12 +595,6 @@ kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
{
struct ttm_operation_ctx ctx = {.interruptible = true};
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
- int ret;
-
- amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret)
- return ret;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
@@ -890,7 +876,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
* if peer device has large BAR. In contrast, access over xGMI is
* allowed for both small and large BAR configurations of peer device
*/
- if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) &&
+ if ((adev != bo_adev && !adev->apu_prefer_gtt) &&
((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
@@ -1263,7 +1249,7 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
(void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
- (void)amdgpu_sync_fence(sync, bo_va->last_pt_update);
+ (void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
return 0;
}
@@ -1287,7 +1273,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
return ret;
}
- return amdgpu_sync_fence(sync, bo_va->last_pt_update);
+ return amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
}
static int map_bo_to_gpuvm(struct kgd_mem *mem,
@@ -1529,27 +1515,6 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
amdgpu_bo_unreserve(bo);
}
-int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct amdgpu_vm *avm, u32 pasid)
-
-{
- int ret;
-
- /* Free the original amdgpu allocated pasid,
- * will be replaced with kfd allocated pasid.
- */
- if (avm->pasid) {
- amdgpu_pasid_free(avm->pasid);
- amdgpu_vm_set_pasid(adev, avm, 0);
- }
-
- ret = amdgpu_vm_set_pasid(adev, avm, pasid);
- if (ret)
- return ret;
-
- return 0;
-}
-
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
struct amdgpu_vm *avm,
void **process_info,
@@ -1607,27 +1572,6 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
}
}
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
- void *drm_priv)
-{
- struct amdgpu_vm *avm;
-
- if (WARN_ON(!adev || !drm_priv))
- return;
-
- avm = drm_priv_to_vm(drm_priv);
-
- pr_debug("Releasing process vm %p\n", avm);
-
- /* The original pasid of amdgpu vm has already been
- * released during making a amdgpu vm to a compute vm
- * The current pasid is managed by kfd and will be
- * released on kfd process destroy. Set amdgpu pasid
- * to 0 to avoid duplicate release.
- */
- amdgpu_vm_release_compute(adev, avm);
-}
-
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
@@ -1688,7 +1632,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
- reserved_for_pt
- reserved_for_ras;
- if (adev->flags & AMD_IS_APU) {
+ if (adev->apu_prefer_gtt) {
system_mem_available = no_system_mem_limit ?
kfd_mem_limit.max_system_mem_limit :
kfd_mem_limit.max_system_mem_limit -
@@ -1736,7 +1680,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (adev->flags & AMD_IS_APU) {
+ if (adev->apu_prefer_gtt) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
@@ -1987,7 +1931,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (size) {
if (!is_imported &&
(mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
- ((adev->flags & AMD_IS_APU) &&
+ (adev->apu_prefer_gtt &&
mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
*size = bo_size;
else
@@ -2414,7 +2358,7 @@ static int import_obj_create(struct amdgpu_device *adev,
(*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
- !(adev->flags & AMD_IS_APU) ?
+ !adev->apu_prefer_gtt ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
(*mem)->mapped_to_gpu_memory = 0;
@@ -2969,7 +2913,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
}
dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
DMA_RESV_USAGE_KERNEL, fence) {
- ret = amdgpu_sync_fence(&sync_obj, fence);
+ ret = amdgpu_sync_fence(&sync_obj, fence, GFP_KERNEL);
if (ret) {
pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
goto validate_map_fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 093141ad6ed0..e476e45b996a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -36,13 +36,6 @@
#include "atombios_encoders.h"
#include "bif/bif_4_1_d.h"
-static void amdgpu_atombios_lookup_i2c_gpio_quirks(struct amdgpu_device *adev,
- ATOM_GPIO_I2C_ASSIGMENT *gpio,
- u8 index)
-{
-
-}
-
static struct amdgpu_i2c_bus_rec amdgpu_atombios_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
{
struct amdgpu_i2c_bus_rec i2c;
@@ -108,9 +101,6 @@ struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *
gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
-
- amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
-
if (gpio->sucI2cId.ucAccess == id) {
i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
break;
@@ -142,8 +132,6 @@ void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
- amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
-
i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
if (i2c.valid) {
@@ -156,6 +144,38 @@ void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
}
}
+void amdgpu_atombios_oem_i2c_init(struct amdgpu_device *adev, u8 i2c_id)
+{
+ struct atom_context *ctx = adev->mode_info.atom_context;
+ ATOM_GPIO_I2C_ASSIGMENT *gpio;
+ struct amdgpu_i2c_bus_rec i2c;
+ int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+ struct _ATOM_GPIO_I2C_INFO *i2c_info;
+ uint16_t data_offset, size;
+ int i, num_indices;
+ char stmp[32];
+
+ if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+ gpio = &i2c_info->asGPIO_Info[0];
+ for (i = 0; i < num_indices; i++) {
+ i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
+
+ if (i2c.valid && i2c.i2c_id == i2c_id) {
+ sprintf(stmp, "OEM 0x%x", i2c.i2c_id);
+ adev->i2c_bus[i] = amdgpu_i2c_create(adev_to_drm(adev), &i2c, stmp);
+ break;
+ }
+ gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+ ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
+ }
+ }
+}
+
struct amdgpu_gpio_rec
amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
u8 id)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 0e16432d9a72..867bc5c5ce67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -136,6 +136,7 @@ amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev,
uint8_t id);
void amdgpu_atombios_i2c_init(struct amdgpu_device *adev);
+void amdgpu_atombios_oem_i2c_init(struct amdgpu_device *adev, u8 i2c_id);
bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index f873dd3cae16..eb015bdda8a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -549,9 +549,10 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
u16 data_offset, size;
union umc_info *umc_info;
u8 frev, crev;
- bool ecc_default_enabled = false;
+ bool mem_ecc_enabled = false;
u8 umc_config;
u32 umc_config1;
+ adev->ras_default_ecc_enabled = false;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
umc_info);
@@ -563,20 +564,22 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
switch (crev) {
case 1:
umc_config = le32_to_cpu(umc_info->v31.umc_config);
- ecc_default_enabled =
+ mem_ecc_enabled =
(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
break;
case 2:
umc_config = le32_to_cpu(umc_info->v32.umc_config);
- ecc_default_enabled =
+ mem_ecc_enabled =
(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
break;
case 3:
umc_config = le32_to_cpu(umc_info->v33.umc_config);
umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
- ecc_default_enabled =
+ mem_ecc_enabled =
((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
(umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
+ adev->ras_default_ecc_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
break;
default:
/* unsupported crev */
@@ -585,9 +588,12 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
} else if (frev == 4) {
switch (crev) {
case 0:
+ umc_config = le32_to_cpu(umc_info->v40.umc_config);
umc_config1 = le32_to_cpu(umc_info->v40.umc_config1);
- ecc_default_enabled =
+ mem_ecc_enabled =
(umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false;
+ adev->ras_default_ecc_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
break;
default:
/* unsupported crev */
@@ -599,7 +605,7 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
}
}
- return ecc_default_enabled;
+ return mem_ecc_enabled;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 423fd2eebe1e..75fcc521c171 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -84,6 +84,13 @@ static bool check_atom_bios(struct amdgpu_device *adev, size_t size)
return false;
}
+void amdgpu_bios_release(struct amdgpu_device *adev)
+{
+ kfree(adev->bios);
+ adev->bios = NULL;
+ adev->bios_size = 0;
+}
+
/* If you boot an IGP board with a discrete card as the primary,
* the IGP rom is not accessible via the rom bar as the IGP rom is
* part of the system bios. On boot, the system bios puts a
@@ -121,7 +128,7 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
iounmap(bios);
if (!check_atom_bios(adev, size)) {
- kfree(adev->bios);
+ amdgpu_bios_release(adev);
return false;
}
@@ -149,7 +156,7 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
pci_unmap_rom(adev->pdev, bios);
if (!check_atom_bios(adev, size)) {
- kfree(adev->bios);
+ amdgpu_bios_release(adev);
return false;
}
@@ -189,7 +196,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
if (!check_atom_bios(adev, len)) {
- kfree(adev->bios);
+ amdgpu_bios_release(adev);
return false;
}
@@ -225,7 +232,8 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
return true;
free_bios:
- kfree(adev->bios);
+ amdgpu_bios_release(adev);
+
return false;
}
@@ -327,7 +335,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
}
if (!check_atom_bios(adev, size)) {
- kfree(adev->bios);
+ amdgpu_bios_release(adev);
return false;
}
adev->bios_size = size;
@@ -392,7 +400,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
GFP_KERNEL);
if (!check_atom_bios(adev, vhdr->ImageLength)) {
- kfree(adev->bios);
+ amdgpu_bios_release(adev);
return false;
}
adev->bios_size = vhdr->ImageLength;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 344e0a9ee08a..5e375e9c4f5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -674,7 +674,7 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
@@ -839,7 +839,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -1196,7 +1196,7 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
}
static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -1464,7 +1464,7 @@ out:
}
static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
new file mode 100644
index 000000000000..360e07a5c7c1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/list.h>
+#include "amdgpu.h"
+
+static const guid_t MCE = CPER_NOTIFY_MCE;
+static const guid_t CMC = CPER_NOTIFY_CMC;
+static const guid_t BOOT = BOOT_TYPE;
+
+static const guid_t CRASHDUMP = AMD_CRASHDUMP;
+static const guid_t RUNTIME = AMD_GPU_NONSTANDARD_ERROR;
+
+static void __inc_entry_length(struct cper_hdr *hdr, uint32_t size)
+{
+ hdr->record_length += size;
+}
+
+static void amdgpu_cper_get_timestamp(struct cper_timestamp *timestamp)
+{
+ struct tm tm;
+ time64_t now = ktime_get_real_seconds();
+
+ time64_to_tm(now, 0, &tm);
+ timestamp->seconds = tm.tm_sec;
+ timestamp->minutes = tm.tm_min;
+ timestamp->hours = tm.tm_hour;
+ timestamp->flag = 0;
+ timestamp->day = tm.tm_mday;
+ timestamp->month = 1 + tm.tm_mon;
+ timestamp->year = (1900 + tm.tm_year) % 100;
+ timestamp->century = (1900 + tm.tm_year) / 100;
+}
+
+void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ enum amdgpu_cper_type type,
+ enum cper_error_severity sev)
+{
+ char record_id[16];
+
+ hdr->signature[0] = 'C';
+ hdr->signature[1] = 'P';
+ hdr->signature[2] = 'E';
+ hdr->signature[3] = 'R';
+ hdr->revision = CPER_HDR_REV_1;
+ hdr->signature_end = 0xFFFFFFFF;
+ hdr->error_severity = sev;
+
+ hdr->valid_bits.platform_id = 1;
+ hdr->valid_bits.partition_id = 1;
+ hdr->valid_bits.timestamp = 1;
+
+ amdgpu_cper_get_timestamp(&hdr->timestamp);
+
+ snprintf(record_id, 9, "%d:%X",
+ (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0,
+ atomic_inc_return(&adev->cper.unique_id));
+ memcpy(hdr->record_id, record_id, 8);
+
+ snprintf(hdr->platform_id, 16, "0x%04X:0x%04X",
+ adev->pdev->vendor, adev->pdev->device);
+ /* pmfw version should be part of creator_id according to CPER spec */
+ snprintf(hdr->creator_id, 16, "%s", CPER_CREATOR_ID_AMDGPU);
+
+ switch (type) {
+ case AMDGPU_CPER_TYPE_BOOT:
+ hdr->notify_type = BOOT;
+ break;
+ case AMDGPU_CPER_TYPE_FATAL:
+ case AMDGPU_CPER_TYPE_BP_THRESHOLD:
+ hdr->notify_type = MCE;
+ break;
+ case AMDGPU_CPER_TYPE_RUNTIME:
+ if (sev == CPER_SEV_NON_FATAL_CORRECTED)
+ hdr->notify_type = CMC;
+ else
+ hdr->notify_type = MCE;
+ break;
+ default:
+ dev_err(adev->dev, "Unknown CPER Type\n");
+ break;
+ }
+
+ __inc_entry_length(hdr, HDR_LEN);
+}
+
+static int amdgpu_cper_entry_fill_section_desc(struct amdgpu_device *adev,
+ struct cper_sec_desc *section_desc,
+ bool bp_threshold,
+ bool poison,
+ enum cper_error_severity sev,
+ guid_t sec_type,
+ uint32_t section_length,
+ uint32_t section_offset)
+{
+ section_desc->revision_minor = CPER_SEC_MINOR_REV_1;
+ section_desc->revision_major = CPER_SEC_MAJOR_REV_22;
+ section_desc->sec_offset = section_offset;
+ section_desc->sec_length = section_length;
+ section_desc->valid_bits.fru_text = 1;
+ section_desc->flag_bits.primary = 1;
+ section_desc->severity = sev;
+ section_desc->sec_type = sec_type;
+
+ snprintf(section_desc->fru_text, 20, "OAM%d",
+ (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0);
+
+ if (bp_threshold)
+ section_desc->flag_bits.exceed_err_threshold = 1;
+ if (poison)
+ section_desc->flag_bits.latent_err = 1;
+
+ return 0;
+}
+
+int amdgpu_cper_entry_fill_fatal_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ struct cper_sec_crashdump_reg_data reg_data)
+{
+ struct cper_sec_desc *section_desc;
+ struct cper_sec_crashdump_fatal *section;
+
+ section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
+ section = (struct cper_sec_crashdump_fatal *)((uint8_t *)hdr +
+ FATAL_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ amdgpu_cper_entry_fill_section_desc(adev, section_desc, false, false,
+ CPER_SEV_FATAL, CRASHDUMP, FATAL_SEC_LEN,
+ FATAL_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ section->body.reg_ctx_type = CPER_CTX_TYPE_CRASH;
+ section->body.reg_arr_size = sizeof(reg_data);
+ section->body.data = reg_data;
+
+ __inc_entry_length(hdr, SEC_DESC_LEN + FATAL_SEC_LEN);
+
+ return 0;
+}
+
+int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ enum cper_error_severity sev,
+ uint32_t *reg_dump,
+ uint32_t reg_count)
+{
+ struct cper_sec_desc *section_desc;
+ struct cper_sec_nonstd_err *section;
+ bool poison;
+
+ poison = (sev == CPER_SEV_NON_FATAL_CORRECTED) ? false : true;
+ section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
+ section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ amdgpu_cper_entry_fill_section_desc(adev, section_desc, false, poison,
+ sev, RUNTIME, NONSTD_SEC_LEN,
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ reg_count = umin(reg_count, CPER_ACA_REG_COUNT);
+
+ section->hdr.valid_bits.err_info_cnt = 1;
+ section->hdr.valid_bits.err_context_cnt = 1;
+
+ section->info.error_type = RUNTIME;
+ section->info.ms_chk_bits.err_type_valid = 1;
+ section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
+ section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
+
+ memcpy(section->ctx.reg_dump, reg_dump, reg_count * sizeof(uint32_t));
+
+ __inc_entry_length(hdr, SEC_DESC_LEN + NONSTD_SEC_LEN);
+
+ return 0;
+}
+
+int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx)
+{
+ struct cper_sec_desc *section_desc;
+ struct cper_sec_nonstd_err *section;
+
+ section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
+ section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ amdgpu_cper_entry_fill_section_desc(adev, section_desc, true, false,
+ CPER_SEV_NUM, RUNTIME, NONSTD_SEC_LEN,
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ section->hdr.valid_bits.err_info_cnt = 1;
+ section->hdr.valid_bits.err_context_cnt = 1;
+
+ section->info.error_type = RUNTIME;
+ section->info.ms_chk_bits.err_type_valid = 1;
+ section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
+ section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
+
+ /* Hardcoded Reg dump for bad page threshold CPER */
+ section->ctx.reg_dump[CPER_ACA_REG_CTL_LO] = 0x1;
+ section->ctx.reg_dump[CPER_ACA_REG_CTL_HI] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_STATUS_LO] = 0x137;
+ section->ctx.reg_dump[CPER_ACA_REG_STATUS_HI] = 0xB0000000;
+ section->ctx.reg_dump[CPER_ACA_REG_ADDR_LO] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_ADDR_HI] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_MISC0_LO] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_MISC0_HI] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_CONFIG_LO] = 0x2;
+ section->ctx.reg_dump[CPER_ACA_REG_CONFIG_HI] = 0x1ff;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x96;
+ section->ctx.reg_dump[CPER_ACA_REG_SYND_LO] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_SYND_HI] = 0x0;
+
+ __inc_entry_length(hdr, SEC_DESC_LEN + NONSTD_SEC_LEN);
+
+ return 0;
+}
+
+struct cper_hdr *amdgpu_cper_alloc_entry(struct amdgpu_device *adev,
+ enum amdgpu_cper_type type,
+ uint16_t section_count)
+{
+ struct cper_hdr *hdr;
+ uint32_t size = 0;
+
+ size += HDR_LEN;
+ size += (SEC_DESC_LEN * section_count);
+
+ switch (type) {
+ case AMDGPU_CPER_TYPE_RUNTIME:
+ case AMDGPU_CPER_TYPE_BP_THRESHOLD:
+ size += (NONSTD_SEC_LEN * section_count);
+ break;
+ case AMDGPU_CPER_TYPE_FATAL:
+ size += (FATAL_SEC_LEN * section_count);
+ break;
+ case AMDGPU_CPER_TYPE_BOOT:
+ size += (BOOT_SEC_LEN * section_count);
+ break;
+ default:
+ dev_err(adev->dev, "Unknown CPER Type!\n");
+ return NULL;
+ }
+
+ hdr = kzalloc(size, GFP_KERNEL);
+ if (!hdr)
+ return NULL;
+
+ /* Save this early */
+ hdr->sec_cnt = section_count;
+
+ return hdr;
+}
+
+int amdgpu_cper_generate_ue_record(struct amdgpu_device *adev,
+ struct aca_bank *bank)
+{
+ struct cper_hdr *fatal = NULL;
+ struct cper_sec_crashdump_reg_data reg_data = { 0 };
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ int ret;
+
+ fatal = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_FATAL, 1);
+ if (!fatal) {
+ dev_err(adev->dev, "fail to alloc cper entry for ue record\n");
+ return -ENOMEM;
+ }
+
+ reg_data.status_lo = lower_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data.status_hi = upper_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data.addr_lo = lower_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data.addr_hi = upper_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data.ipid_lo = lower_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data.ipid_hi = upper_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data.synd_lo = lower_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+ reg_data.synd_hi = upper_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+
+ amdgpu_cper_entry_fill_hdr(adev, fatal, AMDGPU_CPER_TYPE_FATAL, CPER_SEV_FATAL);
+ ret = amdgpu_cper_entry_fill_fatal_section(adev, fatal, 0, reg_data);
+ if (ret)
+ return ret;
+
+ amdgpu_cper_ring_write(ring, fatal, fatal->record_length);
+ kfree(fatal);
+
+ return 0;
+}
+
+int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev)
+{
+ struct cper_hdr *bp_threshold = NULL;
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ int ret;
+
+ bp_threshold = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_BP_THRESHOLD, 1);
+ if (!bp_threshold) {
+ dev_err(adev->dev, "fail to alloc cper entry for bad page threshold record\n");
+ return -ENOMEM;
+ }
+
+ amdgpu_cper_entry_fill_hdr(adev, bp_threshold, AMDGPU_CPER_TYPE_BP_THRESHOLD, CPER_SEV_NUM);
+ ret = amdgpu_cper_entry_fill_bad_page_threshold_section(adev, bp_threshold, 0);
+ if (ret)
+ return ret;
+
+ amdgpu_cper_ring_write(ring, bp_threshold, bp_threshold->record_length);
+ kfree(bp_threshold);
+
+ return 0;
+}
+
+static enum cper_error_severity amdgpu_aca_err_type_to_cper_sev(struct amdgpu_device *adev,
+ enum aca_error_type aca_err_type)
+{
+ switch (aca_err_type) {
+ case ACA_ERROR_TYPE_UE:
+ return CPER_SEV_FATAL;
+ case ACA_ERROR_TYPE_CE:
+ return CPER_SEV_NON_FATAL_CORRECTED;
+ case ACA_ERROR_TYPE_DEFERRED:
+ return CPER_SEV_NON_FATAL_UNCORRECTED;
+ default:
+ dev_err(adev->dev, "Unknown ACA error type!\n");
+ return CPER_SEV_FATAL;
+ }
+}
+
+int amdgpu_cper_generate_ce_records(struct amdgpu_device *adev,
+ struct aca_banks *banks,
+ uint16_t bank_count)
+{
+ struct cper_hdr *corrected = NULL;
+ enum cper_error_severity sev = CPER_SEV_NON_FATAL_CORRECTED;
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ uint32_t reg_data[CPER_ACA_REG_COUNT] = { 0 };
+ struct aca_bank_node *node;
+ struct aca_bank *bank;
+ uint32_t i = 0;
+ int ret;
+
+ corrected = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_RUNTIME, bank_count);
+ if (!corrected) {
+ dev_err(adev->dev, "fail to allocate cper entry for ce records\n");
+ return -ENOMEM;
+ }
+
+ /* Raise severity if any DE is detected in the ACA bank list */
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+ if (bank->aca_err_type == ACA_ERROR_TYPE_DEFERRED) {
+ sev = CPER_SEV_NON_FATAL_UNCORRECTED;
+ break;
+ }
+ }
+
+ amdgpu_cper_entry_fill_hdr(adev, corrected, AMDGPU_CPER_TYPE_RUNTIME, sev);
+
+ /* Combine CE and DE in cper record */
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+ reg_data[CPER_ACA_REG_CTL_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_CTL]);
+ reg_data[CPER_ACA_REG_CTL_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_CTL]);
+ reg_data[CPER_ACA_REG_STATUS_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data[CPER_ACA_REG_STATUS_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data[CPER_ACA_REG_ADDR_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data[CPER_ACA_REG_ADDR_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data[CPER_ACA_REG_MISC0_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_MISC0]);
+ reg_data[CPER_ACA_REG_MISC0_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_MISC0]);
+ reg_data[CPER_ACA_REG_CONFIG_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_CONFIG]);
+ reg_data[CPER_ACA_REG_CONFIG_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_CONFIG]);
+ reg_data[CPER_ACA_REG_IPID_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data[CPER_ACA_REG_IPID_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data[CPER_ACA_REG_SYND_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+ reg_data[CPER_ACA_REG_SYND_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+
+ ret = amdgpu_cper_entry_fill_runtime_section(adev, corrected, i++,
+ amdgpu_aca_err_type_to_cper_sev(adev, bank->aca_err_type),
+ reg_data, CPER_ACA_REG_COUNT);
+ if (ret)
+ return ret;
+ }
+
+ amdgpu_cper_ring_write(ring, corrected, corrected->record_length);
+ kfree(corrected);
+
+ return 0;
+}
+
+static bool amdgpu_cper_is_hdr(struct amdgpu_ring *ring, u64 pos)
+{
+ struct cper_hdr *chdr;
+
+ chdr = (struct cper_hdr *)&(ring->ring[pos]);
+ return strcmp(chdr->signature, "CPER") ? false : true;
+}
+
+static u32 amdgpu_cper_ring_get_ent_sz(struct amdgpu_ring *ring, u64 pos)
+{
+ struct cper_hdr *chdr;
+ u64 p;
+ u32 chunk, rec_len = 0;
+
+ chdr = (struct cper_hdr *)&(ring->ring[pos]);
+ chunk = ring->ring_size - (pos << 2);
+
+ if (!strcmp(chdr->signature, "CPER")) {
+ rec_len = chdr->record_length;
+ goto calc;
+ }
+
+ /* ring buffer is not full, no cper data after ring->wptr */
+ if (ring->count_dw)
+ goto calc;
+
+ for (p = pos + 1; p <= ring->buf_mask; p++) {
+ chdr = (struct cper_hdr *)&(ring->ring[p]);
+ if (!strcmp(chdr->signature, "CPER")) {
+ rec_len = (p - pos) << 2;
+ goto calc;
+ }
+ }
+
+calc:
+ if (!rec_len)
+ return chunk;
+ else
+ return umin(rec_len, chunk);
+}
+
+void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count)
+{
+ u64 pos, wptr_old, rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
+ int rec_cnt_dw = count >> 2;
+ u32 chunk, ent_sz;
+ u8 *s = (u8 *)src;
+
+ if (count >= ring->ring_size - 4) {
+ dev_err(ring->adev->dev,
+ "CPER data size(%d) is larger than ring size(%d)\n",
+ count, ring->ring_size - 4);
+
+ return;
+ }
+
+ wptr_old = ring->wptr;
+
+ mutex_lock(&ring->adev->cper.ring_lock);
+ while (count) {
+ ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr);
+ chunk = umin(ent_sz, count);
+
+ memcpy(&ring->ring[ring->wptr], s, chunk);
+
+ ring->wptr += (chunk >> 2);
+ ring->wptr &= ring->ptr_mask;
+ count -= chunk;
+ s += chunk;
+ }
+
+ if (ring->count_dw < rec_cnt_dw)
+ ring->count_dw = 0;
+
+ /* the buffer is overflow, adjust rptr */
+ if (((wptr_old < rptr) && (rptr <= ring->wptr)) ||
+ ((ring->wptr < wptr_old) && (wptr_old < rptr)) ||
+ ((rptr <= ring->wptr) && (ring->wptr < wptr_old))) {
+ pos = (ring->wptr + 1) & ring->ptr_mask;
+
+ do {
+ ent_sz = amdgpu_cper_ring_get_ent_sz(ring, pos);
+
+ rptr += (ent_sz >> 2);
+ rptr &= ring->ptr_mask;
+ *ring->rptr_cpu_addr = rptr;
+
+ pos = rptr;
+ } while (!amdgpu_cper_is_hdr(ring, rptr));
+ }
+
+ if (ring->count_dw >= rec_cnt_dw)
+ ring->count_dw -= rec_cnt_dw;
+ mutex_unlock(&ring->adev->cper.ring_lock);
+}
+
+static u64 amdgpu_cper_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ return *(ring->rptr_cpu_addr);
+}
+
+static u64 amdgpu_cper_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ return ring->wptr;
+}
+
+static const struct amdgpu_ring_funcs cper_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_CPER,
+ .align_mask = 0xff,
+ .support_64bit_ptrs = false,
+ .get_rptr = amdgpu_cper_ring_get_rptr,
+ .get_wptr = amdgpu_cper_ring_get_wptr,
+};
+
+static int amdgpu_cper_ring_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = &(adev->cper.ring_buf);
+
+ mutex_init(&adev->cper.ring_lock);
+
+ ring->adev = NULL;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = false;
+ ring->no_scheduler = true;
+ ring->funcs = &cper_ring_funcs;
+
+ sprintf(ring->name, "cper");
+ return amdgpu_ring_init(adev, ring, CPER_MAX_RING_SIZE, NULL, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+}
+
+int amdgpu_cper_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (!amdgpu_aca_is_enabled(adev))
+ return 0;
+
+ r = amdgpu_cper_ring_init(adev);
+ if (r) {
+ dev_err(adev->dev, "failed to initialize cper ring, r = %d\n", r);
+ return r;
+ }
+
+ mutex_init(&adev->cper.cper_lock);
+
+ adev->cper.enabled = true;
+ adev->cper.max_count = CPER_MAX_ALLOWED_COUNT;
+
+ return 0;
+}
+
+int amdgpu_cper_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_aca_is_enabled(adev))
+ return 0;
+
+ adev->cper.enabled = false;
+
+ amdgpu_ring_fini(&(adev->cper.ring_buf));
+ adev->cper.count = 0;
+ adev->cper.wptr = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
new file mode 100644
index 000000000000..bcb97d245673
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_CPER_H__
+#define __AMDGPU_CPER_H__
+
+#include "amd_cper.h"
+#include "amdgpu_aca.h"
+
+#define CPER_MAX_ALLOWED_COUNT 0x1000
+#define CPER_MAX_RING_SIZE 0X100000
+#define HDR_LEN (sizeof(struct cper_hdr))
+#define SEC_DESC_LEN (sizeof(struct cper_sec_desc))
+
+#define BOOT_SEC_LEN (sizeof(struct cper_sec_crashdump_boot))
+#define FATAL_SEC_LEN (sizeof(struct cper_sec_crashdump_fatal))
+#define NONSTD_SEC_LEN (sizeof(struct cper_sec_nonstd_err))
+
+#define SEC_DESC_OFFSET(idx) (HDR_LEN + (SEC_DESC_LEN * idx))
+
+#define BOOT_SEC_OFFSET(count, idx) (HDR_LEN + (SEC_DESC_LEN * count) + (BOOT_SEC_LEN * idx))
+#define FATAL_SEC_OFFSET(count, idx) (HDR_LEN + (SEC_DESC_LEN * count) + (FATAL_SEC_LEN * idx))
+#define NONSTD_SEC_OFFSET(count, idx) (HDR_LEN + (SEC_DESC_LEN * count) + (NONSTD_SEC_LEN * idx))
+
+enum amdgpu_cper_type {
+ AMDGPU_CPER_TYPE_RUNTIME,
+ AMDGPU_CPER_TYPE_FATAL,
+ AMDGPU_CPER_TYPE_BOOT,
+ AMDGPU_CPER_TYPE_BP_THRESHOLD,
+};
+
+struct amdgpu_cper {
+ bool enabled;
+
+ atomic_t unique_id;
+ struct mutex cper_lock;
+
+ /* Lifetime CPERs generated */
+ uint32_t count;
+ uint32_t max_count;
+
+ uint32_t wptr;
+
+ void *ring[CPER_MAX_ALLOWED_COUNT];
+ struct amdgpu_ring ring_buf;
+ struct mutex ring_lock;
+};
+
+void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ enum amdgpu_cper_type type,
+ enum cper_error_severity sev);
+int amdgpu_cper_entry_fill_fatal_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ struct cper_sec_crashdump_reg_data reg_data);
+int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ enum cper_error_severity sev,
+ uint32_t *reg_dump,
+ uint32_t reg_count);
+int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t section_idx);
+
+struct cper_hdr *amdgpu_cper_alloc_entry(struct amdgpu_device *adev,
+ enum amdgpu_cper_type type,
+ uint16_t section_count);
+/* UE must be encoded into separated cper entries, 1 UE 1 cper */
+int amdgpu_cper_generate_ue_record(struct amdgpu_device *adev,
+ struct aca_bank *bank);
+/* CEs and DEs are combined into 1 cper entry */
+int amdgpu_cper_generate_ce_records(struct amdgpu_device *adev,
+ struct aca_banks *banks,
+ uint16_t bank_count);
+/* Bad page threshold is encoded into separated cper entry */
+int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev);
+void amdgpu_cper_ring_write(struct amdgpu_ring *ring,
+ void *src, int count);
+int amdgpu_cper_init(struct amdgpu_device *adev);
+int amdgpu_cper_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 5cc5f59e3018..82df06a72ee0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -428,7 +428,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
dma_fence_put(old);
}
- r = amdgpu_sync_fence(&p->sync, fence);
+ r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL);
dma_fence_put(fence);
if (r)
return r;
@@ -450,7 +450,7 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
return r;
}
- r = amdgpu_sync_fence(&p->sync, fence);
+ r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL);
dma_fence_put(fence);
return r;
}
@@ -1111,7 +1111,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct amdgpu_ring *ring = to_amdgpu_ring(sched);
- if (amdgpu_vmid_uses_reserved(adev, vm, ring->vm_hub))
+ if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
return -EINVAL;
}
}
@@ -1124,7 +1124,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
@@ -1135,7 +1136,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
}
@@ -1154,7 +1156,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
}
@@ -1167,7 +1170,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, vm->last_update);
+ r = amdgpu_sync_fence(&p->sync, vm->last_update, GFP_KERNEL);
if (r)
return r;
@@ -1248,7 +1251,8 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
continue;
}
- r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+ r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence,
+ GFP_KERNEL);
dma_fence_put(fence);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 49ca8c814455..a1450f13d963 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1990,7 +1990,7 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
uint32_t max_freq, min_freq;
struct amdgpu_device *adev = (struct amdgpu_device *)data;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ if (amdgpu_sriov_multi_vf_mode(adev))
return -EINVAL;
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 824f9da5b6ce..7b50741dc097 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -364,5 +364,9 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT,
amdgpu_devcoredump_read, amdgpu_devcoredump_free);
+
+ drm_info(dev, "AMDGPU device coredump file has been created\n");
+ drm_info(dev, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
+ dev->primary->index);
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 018dfccd771b..6ebf6179064b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -102,6 +102,9 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
#define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
#define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
+#define AMDGPU_VBIOS_SKIP (1U << 0)
+#define AMDGPU_VBIOS_OPTIONAL (1U << 1)
+
static const struct drm_driver amdgpu_kms_driver;
const char *amdgpu_asic_name[] = {
@@ -224,6 +227,24 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
static DEVICE_ATTR(pcie_replay_count, 0444,
amdgpu_device_get_pcie_replay_count, NULL);
+static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ if (!amdgpu_sriov_vf(adev))
+ ret = sysfs_create_file(&adev->dev->kobj,
+ &dev_attr_pcie_replay_count.attr);
+
+ return ret;
+}
+
+static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_sriov_vf(adev))
+ sysfs_remove_file(&adev->dev->kobj,
+ &dev_attr_pcie_replay_count.attr);
+}
+
static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t ppos, size_t count)
@@ -1389,6 +1410,17 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
BUG();
}
+static uint32_t amdgpu_device_get_vbios_flags(struct amdgpu_device *adev)
+{
+ if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
+ return AMDGPU_VBIOS_SKIP;
+
+ if (hweight32(adev->aid_mask) && amdgpu_passthrough(adev))
+ return AMDGPU_VBIOS_OPTIONAL;
+
+ return 0;
+}
+
/**
* amdgpu_device_asic_init - Wrapper for atom asic_init
*
@@ -1398,18 +1430,28 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
*/
static int amdgpu_device_asic_init(struct amdgpu_device *adev)
{
+ uint32_t flags;
+ bool optional;
int ret;
amdgpu_asic_pre_asic_init(adev);
+ flags = amdgpu_device_get_vbios_flags(adev);
+ optional = !!(flags & (AMDGPU_VBIOS_OPTIONAL | AMDGPU_VBIOS_SKIP));
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
amdgpu_psp_wait_for_bootloader(adev);
+ if (optional && !adev->bios)
+ return 0;
+
ret = amdgpu_atomfirmware_asic_init(adev, true);
return ret;
} else {
+ if (optional && !adev->bios)
+ return 0;
+
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
}
@@ -1705,14 +1747,6 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
return 0;
}
-static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
-{
- if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
- return false;
-
- return true;
-}
-
/*
* GPU helpers function.
*/
@@ -1727,12 +1761,15 @@ static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
*/
bool amdgpu_device_need_post(struct amdgpu_device *adev)
{
- uint32_t reg;
+ uint32_t reg, flags;
if (amdgpu_sriov_vf(adev))
return false;
- if (!amdgpu_device_read_bios(adev))
+ flags = amdgpu_device_get_vbios_flags(adev);
+ if (flags & AMDGPU_VBIOS_SKIP)
+ return false;
+ if ((flags & AMDGPU_VBIOS_OPTIONAL) && !adev->bios)
return false;
if (amdgpu_passthrough(adev)) {
@@ -2240,7 +2277,8 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
- adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
+ adev->ip_blocks[i].version->funcs->get_clockgating_state(
+ &adev->ip_blocks[i], flags);
}
}
@@ -2585,8 +2623,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
struct amdgpu_ip_block *ip_block;
struct pci_dev *parent;
+ bool total, skip_bios;
+ uint32_t bios_flags;
int i, r;
- bool total;
amdgpu_device_enable_virtual_display(adev);
@@ -2699,16 +2738,31 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (r)
return r;
+ bios_flags = amdgpu_device_get_vbios_flags(adev);
+ skip_bios = !!(bios_flags & AMDGPU_VBIOS_SKIP);
/* Read BIOS */
- if (amdgpu_device_read_bios(adev)) {
- if (!amdgpu_get_bios(adev))
+ if (!skip_bios) {
+ bool optional =
+ !!(bios_flags & AMDGPU_VBIOS_OPTIONAL);
+ if (!amdgpu_get_bios(adev) && !optional)
return -EINVAL;
- r = amdgpu_atombios_init(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_atombios_init failed\n");
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
- return r;
+ if (optional && !adev->bios)
+ dev_info(
+ adev->dev,
+ "VBIOS image optional, proceeding without VBIOS image");
+
+ if (adev->bios) {
+ r = amdgpu_atombios_init(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "amdgpu_atombios_init failed\n");
+ amdgpu_vf_error_put(
+ adev,
+ AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL,
+ 0, 0);
+ return r;
+ }
}
}
@@ -2721,6 +2775,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (!total)
return -ENODEV;
+ if (adev->gmc.xgmi.supported)
+ amdgpu_xgmi_early_init(adev);
+
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
if (ip_block->status.valid != false)
amdgpu_amdkfd_device_probe(adev);
@@ -2830,6 +2887,12 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
{
+ struct drm_sched_init_args args = {
+ .ops = &amdgpu_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .timeout_wq = adev->reset_domain->wq,
+ .dev = adev->dev,
+ };
long timeout;
int r, i;
@@ -2855,12 +2918,12 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
break;
}
- r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- ring->num_hw_submission, 0,
- timeout, adev->reset_domain->wq,
- ring->sched_score, ring->name,
- adev->dev);
+ args.timeout = timeout;
+ args.credit_limit = ring->num_hw_submission;
+ args.score = ring->sched_score;
+ args.name = ring->name;
+
+ r = drm_sched_init(&ring->sched, &args);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
@@ -3063,6 +3126,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_fru_get_product_info(adev);
+ if (!amdgpu_sriov_vf(adev) || amdgpu_sriov_ras_cper_en(adev))
+ r = amdgpu_cper_init(adev);
+
init_failed:
return r;
@@ -3423,6 +3489,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ amdgpu_cper_fini(adev);
+
if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
amdgpu_virt_release_ras_err_handler_data(adev);
@@ -4122,11 +4190,6 @@ static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
}
#endif
-static const struct attribute *amdgpu_dev_attributes[] = {
- &dev_attr_pcie_replay_count.attr,
- NULL
-};
-
static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
{
if (amdgpu_mcbp == 1)
@@ -4223,7 +4286,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
- mutex_init(&adev->virt.rlcg_reg_lock);
hash_init(adev->mn_hash);
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
@@ -4232,7 +4294,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->gfx.reset_sem_mutex);
/* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
mutex_init(&adev->enforce_isolation_mutex);
+ for (i = 0; i < MAX_XCP; ++i) {
+ adev->isolation[i].spearhead = dma_fence_get_stub();
+ amdgpu_sync_create(&adev->isolation[i].active);
+ amdgpu_sync_create(&adev->isolation[i].prev);
+ }
mutex_init(&adev->gfx.kfd_sch_mutex);
+ mutex_init(&adev->gfx.workload_profile_mutex);
+ mutex_init(&adev->vcn.workload_profile_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4249,6 +4318,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->se_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
spin_lock_init(&adev->mm_stats.lock);
+ spin_lock_init(&adev->virt.rlcg_reg_lock);
spin_lock_init(&adev->wb.lock);
INIT_LIST_HEAD(&adev->reset_list);
@@ -4293,10 +4363,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* for throttling interrupt) = 60 seconds.
*/
ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
- ratelimit_state_init(&adev->virt.ras_telemetry_rs, 5 * HZ, 1);
ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
- ratelimit_set_flags(&adev->virt.ras_telemetry_rs, RATELIMIT_MSG_ON_RELEASE);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -4328,7 +4396,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return -ENOMEM;
/* detect hw virtualization here */
- amdgpu_detect_virtualization(adev);
+ amdgpu_virt_init(adev);
amdgpu_device_get_pcie_info(adev);
@@ -4351,10 +4419,17 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
- /* Get rid of things like offb */
- r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
- if (r)
- return r;
+ /*
+ * No need to remove conflicting FBs for non-display class devices.
+ * This prevents the sysfb from being freed accidently.
+ */
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
+ (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
+ /* Get rid of things like offb */
+ r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
+ if (r)
+ return r;
+ }
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
@@ -4472,8 +4547,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
/* init i2c buses */
- if (!amdgpu_device_has_dc_support(adev))
- amdgpu_atombios_i2c_init(adev);
+ amdgpu_i2c_init(adev);
}
}
@@ -4566,7 +4640,7 @@ fence_driver_init:
} else
adev->ucode_sysfs_en = true;
- r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
+ r = amdgpu_device_attr_sysfs_init(adev);
if (r)
dev_err(adev->dev, "Could not create amdgpu device attr\n");
@@ -4703,7 +4777,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_pm_sysfs_fini(adev);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
- sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
+ amdgpu_device_attr_sysfs_fini(adev);
amdgpu_fru_sysfs_fini(adev);
amdgpu_reg_state_sysfs_fini(adev);
@@ -4730,7 +4804,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
- int idx;
+ int i, idx;
bool px;
amdgpu_device_ip_fini(adev);
@@ -4738,22 +4812,29 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
adev->accel_working = false;
dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
+ for (i = 0; i < MAX_XCP; ++i) {
+ dma_fence_put(adev->isolation[i].spearhead);
+ amdgpu_sync_free(&adev->isolation[i].active);
+ amdgpu_sync_free(&adev->isolation[i].prev);
+ }
amdgpu_reset_fini(adev);
/* free i2c buses */
- if (!amdgpu_device_has_dc_support(adev))
- amdgpu_i2c_fini(adev);
-
- if (amdgpu_emu_mode != 1)
- amdgpu_atombios_fini(adev);
+ amdgpu_i2c_fini(adev);
- kfree(adev->bios);
- adev->bios = NULL;
+ if (adev->bios) {
+ if (amdgpu_emu_mode != 1)
+ amdgpu_atombios_fini(adev);
+ amdgpu_bios_release(adev);
+ }
kfree(adev->fru_info);
adev->fru_info = NULL;
+ kfree(adev->xcp_mgr);
+ adev->xcp_mgr = NULL;
+
px = amdgpu_device_supports_px(adev_to_drm(adev));
if (px || (!dev_is_removable(&adev->pdev->dev) &&
@@ -5285,6 +5366,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
amdgpu_ras_resume(adev);
@@ -5375,7 +5457,8 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
u32 i;
int ret = 0;
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+ if (adev->bios)
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
dev_info(adev->dev, "GPU mode1 reset\n");
@@ -5417,7 +5500,8 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
goto mode1_reset_failed;
}
- amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+ if (adev->bios)
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return 0;
@@ -6123,6 +6207,10 @@ end_reset:
dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
atomic_set(&adev->reset_domain->reset_res, r);
+
+ if (!r)
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
+
return r;
}
@@ -6851,22 +6939,117 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
{
struct dma_fence *old = NULL;
+ dma_fence_get(gang);
do {
dma_fence_put(old);
old = amdgpu_device_get_gang(adev);
if (old == gang)
break;
- if (!dma_fence_is_signaled(old))
+ if (!dma_fence_is_signaled(old)) {
+ dma_fence_put(gang);
return old;
+ }
} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
old, gang) != old);
+ /*
+ * Drop it once for the exchanged reference in adev and once for the
+ * thread local reference acquired in amdgpu_device_get_gang().
+ */
+ dma_fence_put(old);
dma_fence_put(old);
return NULL;
}
+/**
+ * amdgpu_device_enforce_isolation - enforce HW isolation
+ * @adev: the amdgpu device pointer
+ * @ring: the HW ring the job is supposed to run on
+ * @job: the job which is about to be pushed to the HW ring
+ *
+ * Makes sure that only one client at a time can use the GFX block.
+ * Returns: The dependency to wait on before the job can be pushed to the HW.
+ * The function is called multiple times until NULL is returned.
+ */
+struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_job *job)
+{
+ struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
+ struct drm_sched_fence *f = job->base.s_fence;
+ struct dma_fence *dep;
+ void *owner;
+ int r;
+
+ /*
+ * For now enforce isolation only for the GFX block since we only need
+ * the cleaner shader on those rings.
+ */
+ if (ring->funcs->type != AMDGPU_RING_TYPE_GFX &&
+ ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
+ return NULL;
+
+ /*
+ * All submissions where enforce isolation is false are handled as if
+ * they come from a single client. Use ~0l as the owner to distinct it
+ * from kernel submissions where the owner is NULL.
+ */
+ owner = job->enforce_isolation ? f->owner : (void *)~0l;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+
+ /*
+ * The "spearhead" submission is the first one which changes the
+ * ownership to its client. We always need to wait for it to be
+ * pushed to the HW before proceeding with anything.
+ */
+ if (&f->scheduled != isolation->spearhead &&
+ !dma_fence_is_signaled(isolation->spearhead)) {
+ dep = isolation->spearhead;
+ goto out_grab_ref;
+ }
+
+ if (isolation->owner != owner) {
+
+ /*
+ * Wait for any gang to be assembled before switching to a
+ * different owner or otherwise we could deadlock the
+ * submissions.
+ */
+ if (!job->gang_submit) {
+ dep = amdgpu_device_get_gang(adev);
+ if (!dma_fence_is_signaled(dep))
+ goto out_return_dep;
+ dma_fence_put(dep);
+ }
+
+ dma_fence_put(isolation->spearhead);
+ isolation->spearhead = dma_fence_get(&f->scheduled);
+ amdgpu_sync_move(&isolation->active, &isolation->prev);
+ trace_amdgpu_isolation(isolation->owner, owner);
+ isolation->owner = owner;
+ }
+
+ /*
+ * Specifying the ring here helps to pipeline submissions even when
+ * isolation is enabled. If that is not desired for testing NULL can be
+ * used instead of the ring to enforce a CPU round trip while switching
+ * between clients.
+ */
+ dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
+ r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
+ if (r)
+ DRM_WARN("OOM tracking isolation\n");
+
+out_grab_ref:
+ dma_fence_get(dep);
+out_return_dep:
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ return dep;
+}
+
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 949d74eff294..dc2713ec95a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -113,8 +113,13 @@
#include "amdgpu_isp.h"
#endif
-#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
-MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
+MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
#define mmIP_DISCOVERY_VERSION 0x16A00
#define mmRCC_CONFIG_MEMSIZE 0xde3
@@ -297,21 +302,13 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
return ret;
}
-static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
+static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
+ uint8_t *binary,
+ const char *fw_name)
{
const struct firmware *fw;
- const char *fw_name;
int r;
- switch (amdgpu_discovery) {
- case 2:
- fw_name = FIRMWARE_IP_DISCOVERY;
- break;
- default:
- dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
- return -EINVAL;
- }
-
r = request_firmware(&fw, fw_name, adev->dev);
if (r) {
dev_err(adev->dev, "can't load firmware \"%s\"\n",
@@ -404,10 +401,39 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
return 0;
}
+static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
+{
+ if (amdgpu_discovery == 2)
+ return "amdgpu/ip_discovery.bin";
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ return "amdgpu/vega10_ip_discovery.bin";
+ case CHIP_VEGA12:
+ return "amdgpu/vega12_ip_discovery.bin";
+ case CHIP_RAVEN:
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "amdgpu/raven2_ip_discovery.bin";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "amdgpu/picasso_ip_discovery.bin";
+ else
+ return "amdgpu/raven_ip_discovery.bin";
+ case CHIP_VEGA20:
+ return "amdgpu/vega20_ip_discovery.bin";
+ case CHIP_ARCTURUS:
+ return "amdgpu/arcturus_ip_discovery.bin";
+ case CHIP_ALDEBARAN:
+ return "amdgpu/aldebaran_ip_discovery.bin";
+ default:
+ return NULL;
+ }
+}
+
static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
+ const char *fw_name;
uint16_t offset;
uint16_t size;
uint16_t checksum;
@@ -419,9 +445,10 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
return -ENOMEM;
/* Read from file if it is the preferred option */
- if (amdgpu_discovery == 2) {
+ fw_name = amdgpu_discovery_get_fw_name(adev);
+ if (fw_name != NULL) {
dev_info(adev->dev, "use ip discovery information from file");
- r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
+ r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
if (r) {
dev_err(adev->dev, "failed to read ip discovery binary from file\n");
@@ -587,16 +614,19 @@ void amdgpu_discovery_fini(struct amdgpu_device *adev)
adev->mman.discovery_bin = NULL;
}
-static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
+static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
+ uint8_t instance, uint16_t hw_id)
{
- if (ip->instance_number >= HWIP_MAX_INSTANCE) {
- DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
- ip->instance_number);
+ if (instance >= HWIP_MAX_INSTANCE) {
+ dev_err(adev->dev,
+ "Unexpected instance_number (%d) from ip discovery blob\n",
+ instance);
return -EINVAL;
}
- if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
- DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
- le16_to_cpu(ip->hw_id));
+ if (hw_id >= HW_ID_MAX) {
+ dev_err(adev->dev,
+ "Unexpected hw_id (%d) from ip discovery blob\n",
+ hw_id);
return -EINVAL;
}
@@ -609,8 +639,10 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
- struct ip_v4 *ip;
+ struct ip *ip;
uint16_t die_offset, ip_offset, num_dies, num_ips;
+ uint16_t hw_id;
+ uint8_t inst;
int i, j;
bhdr = (struct binary_header *)adev->mman.discovery_bin;
@@ -626,16 +658,18 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
-
- if (amdgpu_discovery_validate_ip(ip))
+ ip = (struct ip *)(adev->mman.discovery_bin +
+ ip_offset);
+ inst = ip->number_instance;
+ hw_id = le16_to_cpu(ip->hw_id);
+ if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
goto next_ip;
- if (le16_to_cpu(ip->variant) == 1) {
- switch (le16_to_cpu(ip->hw_id)) {
+ if (ip->harvest == 1) {
+ switch (hw_id) {
case VCN_HWID:
(*vcn_harvest_count)++;
- if (ip->instance_number == 0) {
+ if (inst == 0) {
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
adev->vcn.inst_mask &=
~AMDGPU_VCN_HARVEST_VCN0;
@@ -657,10 +691,8 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
}
}
next_ip:
- if (ihdr->base_addr_64_bit)
- ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
- else
- ip_offset += struct_size(ip, base_address, ip->num_base_address);
+ ip_offset += struct_size(ip, base_address,
+ ip->num_base_address);
}
}
}
@@ -1019,6 +1051,8 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
bool reg_base_64)
{
int ii, jj, kk, res;
+ uint16_t hw_id;
+ uint8_t inst;
DRM_DEBUG("num_ips:%d", num_ips);
@@ -1034,8 +1068,10 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
struct ip_hw_instance *ip_hw_instance;
ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
- if (amdgpu_discovery_validate_ip(ip) ||
- le16_to_cpu(ip->hw_id) != ii)
+ inst = ip->instance_number;
+ hw_id = le16_to_cpu(ip->hw_id);
+ if (amdgpu_discovery_validate_ip(adev, inst, hw_id) ||
+ hw_id != ii)
goto next_ip;
DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
@@ -1281,7 +1317,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
uint16_t die_offset;
uint16_t ip_offset;
uint16_t num_dies;
+ uint32_t wafl_ver;
uint16_t num_ips;
+ uint16_t hw_id;
+ uint8_t inst;
int hw_ip;
int i, j, k;
int r;
@@ -1292,6 +1331,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
return r;
}
+ wafl_ver = 0;
adev->gfx.xcc_mask = 0;
adev->sdma.sdma_mask = 0;
adev->vcn.inst_mask = 0;
@@ -1321,7 +1361,9 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (j = 0; j < num_ips; j++) {
ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
- if (amdgpu_discovery_validate_ip(ip))
+ inst = ip->instance_number;
+ hw_id = le16_to_cpu(ip->hw_id);
+ if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
goto next_ip;
num_base_address = ip->num_base_address;
@@ -1390,6 +1432,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
adev->gfx.xcc_mask |=
(1U << ip->instance_number);
+ if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID)
+ wafl_ver = IP_VERSION_FULL(ip->major, ip->minor,
+ ip->revision, 0, 0);
+
for (k = 0; k < num_base_address; k++) {
/*
* convert the endianness of base addresses in place,
@@ -1455,22 +1501,32 @@ next_ip:
}
}
+ if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0])
+ adev->ip_versions[XGMI_HWIP][0] = wafl_ver;
+
return 0;
}
static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
+ struct ip_discovery_header *ihdr;
+ struct binary_header *bhdr;
int vcn_harvest_count = 0;
int umc_harvest_count = 0;
+ uint16_t offset, ihdr_ver;
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ offset);
+ ihdr_ver = le16_to_cpu(ihdr->version);
/*
* Harvest table does not fit Navi1x and legacy GPUs,
* so read harvest bit per IP data structure to set
* harvest configuration.
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4)) {
+ ihdr_ver <= 2) {
if ((adev->pdev->device == 0x731E &&
(adev->pdev->revision == 0xC6 ||
adev->pdev->revision == 0xC7)) ||
@@ -1864,6 +1920,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
break;
case IP_VERSION(12, 0, 0):
@@ -1919,6 +1976,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
break;
case IP_VERSION(12, 0, 0):
@@ -1998,6 +2056,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
break;
case IP_VERSION(11, 0, 8):
@@ -2029,6 +2088,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
+ case IP_VERSION(14, 0, 5):
amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
break;
default:
@@ -2061,6 +2121,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
break;
case IP_VERSION(12, 0, 0):
@@ -2079,6 +2140,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
case IP_VERSION(14, 0, 0):
@@ -2086,6 +2148,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
break;
default:
@@ -2137,6 +2200,7 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 1, 0):
/* TODO: Fix IP version. DC code expects version 4.0.1 */
if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
@@ -2215,6 +2279,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
break;
case IP_VERSION(12, 0, 0):
@@ -2270,6 +2335,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 1, 0):
case IP_VERSION(6, 1, 1):
case IP_VERSION(6, 1, 2):
+ case IP_VERSION(6, 1, 3):
amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
break;
case IP_VERSION(7, 0, 0):
@@ -2393,6 +2459,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
adev->enable_mes = true;
adev->enable_mes_kiq = true;
@@ -2480,6 +2547,38 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_RAVEN:
+ case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
+ /* this is not fatal. We have a fallback below
+ * if the new firmwares are not present. some of
+ * this will be overridden below to keep things
+ * consistent with the current behavior.
+ */
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (!r) {
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ }
+ break;
+ default:
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ break;
+ }
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
@@ -2642,14 +2741,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
default:
- r = amdgpu_discovery_reg_base_init(adev);
- if (r)
- return -EINVAL;
-
- amdgpu_discovery_harvest_ip(adev);
- amdgpu_discovery_get_gfx_info(adev);
- amdgpu_discovery_get_mall_info(adev);
- amdgpu_discovery_get_vcn_info(adev);
break;
}
@@ -2708,6 +2799,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
adev->family = AMDGPU_FAMILY_GC_11_5_0;
break;
case IP_VERSION(12, 0, 0):
@@ -2733,19 +2825,13 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
adev->flags |= AMD_IS_APU;
break;
default:
break;
}
- if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
- adev->gmc.xgmi.supported = true;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
- adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
-
/* set NBIO version */
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(6, 1, 0):
@@ -2766,11 +2852,13 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
break;
case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
adev->nbio.funcs = &nbio_v7_9_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
break;
case IP_VERSION(7, 11, 0):
case IP_VERSION(7, 11, 1):
+ case IP_VERSION(7, 11, 2):
case IP_VERSION(7, 11, 3):
adev->nbio.funcs = &nbio_v7_11_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
@@ -2898,6 +2986,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 10):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 9):
case IP_VERSION(13, 0, 10):
@@ -2907,6 +2996,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->smuio.funcs = &smuio_v13_0_funcs;
break;
case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 11):
adev->smuio.funcs = &smuio_v13_0_3_funcs;
if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
adev->flags |= AMD_IS_APU;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 95a05b03f799..23cfce5aa1fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -121,9 +121,11 @@
* - 3.59.0 - Cleared VRAM
* - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement)
* - 3.61.0 - Contains fix for RV/PCO compute queues
+ * - 3.62.0 - Add AMDGPU_IDS_FLAGS_MODE_PF, AMDGPU_IDS_FLAGS_MODE_VF & AMDGPU_IDS_FLAGS_MODE_PT
+ * - 3.63.0 - GFX12 display DCC supports 256B max compressed block size
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 61
+#define KMS_DRIVER_MINOR 63
#define KMS_DRIVER_PATCHLEVEL 0
/*
@@ -136,6 +138,8 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
AMDGPU_DEBUG_ENABLE_RAS_ACA = BIT(4),
AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
+ AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
+ AMDGPU_DEBUG_SMU_POOL = BIT(7),
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -173,6 +177,7 @@ uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu;
char *amdgpu_virtual_display;
bool enforce_isolation;
+int amdgpu_modeset = -1;
/* Specifies the default granularity for SVM, used in buffer
* migration and restoration of backing memory when handling
@@ -283,6 +288,7 @@ module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
* DOC: gttsize (int)
* Restrict the size of GTT domain (for userspace use) in MiB for testing.
* The default is -1 (Use value specified by TTM).
+ * This parameter is deprecated and will be removed in the future.
*/
MODULE_PARM_DESC(gttsize, "Size of the GTT userspace domain in megabytes (-1 = auto)");
module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
@@ -963,7 +969,7 @@ module_param_named_unsafe(reset_method, amdgpu_reset_method, int, 0644);
* result in the GPU entering bad status when the number of total
* faulty pages by ECC exceeds the threshold value.
*/
-MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = ignore threshold (default value), 0 = disable bad page retirement, -2 = driver sets threshold)");
+MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = ignore threshold (default value), 0 = disable bad page retirement, -2 = threshold determined by a formula, 0 < threshold < max records, user-defined threshold)");
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
@@ -1034,6 +1040,13 @@ module_param(enforce_isolation, bool, 0444);
MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
/**
+ * DOC: modeset (int)
+ * Override nomodeset (1 = override, -1 = auto). The default is -1 (auto).
+ */
+MODULE_PARM_DESC(modeset, "Override nomodeset (1 = enable, -1 = auto)");
+module_param_named(modeset, amdgpu_modeset, int, 0444);
+
+/**
* DOC: seamless (int)
* Seamless boot will keep the image on the screen during the boot process.
*/
@@ -1049,6 +1062,11 @@ module_param_named(seamless, amdgpu_seamless, int, 0444);
* limits the VRAM size reported to ROCm applications to the visible
* size, usually 256MB.
* - 0x4: Disable GPU soft recovery, always do a full reset
+ * - 0x8: Use VRAM for firmware loading
+ * - 0x10: Enable ACA based RAS logging
+ * - 0x20: Enable experimental resets
+ * - 0x40: Disable ring resets
+ * - 0x80: Use VRAM for SMU pool
*/
MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default");
module_param_named_unsafe(debug_mask, amdgpu_debug_mask, uint, 0444);
@@ -2221,6 +2239,15 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: enable experimental reset features\n");
adev->debug_exp_resets = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_GPU_RING_RESET) {
+ pr_info("debug: ring reset disabled\n");
+ adev->debug_disable_gpu_ring_reset = true;
+ }
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_SMU_POOL) {
+ pr_info("debug: use vram for smu pool\n");
+ adev->pm.smu_debug_mask |= SMU_DEBUG_POOL_USE_VRAM;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2248,6 +2275,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
int ret, retry = 0, i;
bool supports_atomic = false;
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
+ (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
+ if (drm_firmware_drivers_only() && amdgpu_modeset == -1)
+ return -EINVAL;
+ }
+
/* skip devices which are owned by radeon */
for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) {
if (amdgpu_unsupported_pciidlist[i] == pdev->device)
@@ -2555,7 +2588,6 @@ static int amdgpu_pmops_freeze(struct device *dev)
int r;
r = amdgpu_device_suspend(drm_dev, true);
- adev->in_s4 = false;
if (r)
return r;
@@ -2567,8 +2599,13 @@ static int amdgpu_pmops_freeze(struct device *dev)
static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- return amdgpu_device_resume(drm_dev, true);
+ r = amdgpu_device_resume(drm_dev, true);
+ adev->in_s4 = false;
+
+ return r;
}
static int amdgpu_pmops_poweroff(struct device *dev)
@@ -2581,6 +2618,9 @@ static int amdgpu_pmops_poweroff(struct device *dev)
static int amdgpu_pmops_restore(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ adev->in_s4 = false;
return amdgpu_device_resume(drm_dev, true);
}
@@ -2974,9 +3014,6 @@ static int __init amdgpu_init(void)
{
int r;
- if (drm_firmware_drivers_only())
- return -EINVAL;
-
r = amdgpu_sync_init();
if (r)
goto error_sync;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 09c9194d5bd5..1ae88c459da5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -31,6 +31,7 @@
#define FRU_EEPROM_MADDR_6 0x60000
#define FRU_EEPROM_MADDR_8 0x80000
+#define FRU_EEPROM_MADDR_INV 0xFFFFF
static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
{
@@ -63,10 +64,10 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
switch (adev->asic_type) {
case CHIP_VEGA20:
/* D161 and D163 are the VG20 server SKUs */
- if (strnstr(atom_ctx->vbios_pn, "D161",
- sizeof(atom_ctx->vbios_pn)) ||
- strnstr(atom_ctx->vbios_pn, "D163",
- sizeof(atom_ctx->vbios_pn))) {
+ if (atom_ctx && (strnstr(atom_ctx->vbios_pn, "D161",
+ sizeof(atom_ctx->vbios_pn)) ||
+ strnstr(atom_ctx->vbios_pn, "D163",
+ sizeof(atom_ctx->vbios_pn)))) {
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_6;
return true;
@@ -78,8 +79,8 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
return false;
}
case IP_VERSION(11, 0, 7):
- if (strnstr(atom_ctx->vbios_pn, "D603",
- sizeof(atom_ctx->vbios_pn))) {
+ if (atom_ctx && strnstr(atom_ctx->vbios_pn, "D603",
+ sizeof(atom_ctx->vbios_pn))) {
if (strnstr(atom_ctx->vbios_pn, "D603GLXE",
sizeof(atom_ctx->vbios_pn))) {
return false;
@@ -94,8 +95,8 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
}
case IP_VERSION(13, 0, 2):
/* All Aldebaran SKUs have an FRU */
- if (!strnstr(atom_ctx->vbios_pn, "D673",
- sizeof(atom_ctx->vbios_pn)))
+ if (atom_ctx && !strnstr(atom_ctx->vbios_pn, "D673",
+ sizeof(atom_ctx->vbios_pn)))
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_6;
return true;
@@ -104,6 +105,10 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_8;
return true;
+ case IP_VERSION(13, 0, 12):
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_INV;
+ return true;
default:
return false;
}
@@ -120,6 +125,10 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
if (!is_fru_eeprom_supported(adev, &fru_addr))
return 0;
+ /* FRU data avaialble, but no direct EEPROM access */
+ if (fru_addr == FRU_EEPROM_MADDR_INV)
+ return 0;
+
if (!adev->fru_info) {
adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL);
if (!adev->fru_info)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c1f35ded684e..72af5e5a894a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -771,18 +771,8 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
return r;
}
-/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
- *
- * @adev: amdgpu_device pointer
- * @bool enable true: enable gfx off feature, false: disable gfx off feature
- *
- * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
- * 2. other client can send request to disable gfx off feature, the request should be honored.
- * 3. other client can cancel their request of disable gfx off feature
- * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
- */
-
-void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+static void amdgpu_gfx_do_off_ctrl(struct amdgpu_device *adev, bool enable,
+ bool no_delay)
{
unsigned long delay = GFX_OFF_DELAY_ENABLE;
@@ -804,7 +794,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) {
/* If going to s2idle, no need to wait */
- if (adev->in_s0ix) {
+ if (no_delay) {
if (!amdgpu_dpm_set_powergating_by_smu(adev,
AMD_IP_BLOCK_TYPE_GFX, true, 0))
adev->gfx.gfx_off_state = true;
@@ -836,6 +826,43 @@ unlock:
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
+/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
+ *
+ * @adev: amdgpu_device pointer
+ * @bool enable true: enable gfx off feature, false: disable gfx off feature
+ *
+ * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
+ * 2. other client can send request to disable gfx off feature, the request should be honored.
+ * 3. other client can cancel their request of disable gfx off feature
+ * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
+ *
+ * gfx off allow will be delayed by GFX_OFF_DELAY_ENABLE ms.
+ */
+void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+{
+ /* If going to s2idle, no need to wait */
+ bool no_delay = adev->in_s0ix ? true : false;
+
+ amdgpu_gfx_do_off_ctrl(adev, enable, no_delay);
+}
+
+/* amdgpu_gfx_off_ctrl_immediate - Handle gfx off feature enable/disable
+ *
+ * @adev: amdgpu_device pointer
+ * @bool enable true: enable gfx off feature, false: disable gfx off feature
+ *
+ * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
+ * 2. other client can send request to disable gfx off feature, the request should be honored.
+ * 3. other client can cancel their request of disable gfx off feature
+ * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
+ *
+ * gfx off allow will be issued immediately.
+ */
+void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable)
+{
+ amdgpu_gfx_do_off_ctrl(adev, enable, true);
+}
+
int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
{
int r = 0;
@@ -1638,15 +1665,8 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
}
mutex_lock(&adev->enforce_isolation_mutex);
- for (i = 0; i < num_partitions; i++) {
- if (adev->enforce_isolation[i] && !partition_values[i])
- /* Going from enabled to disabled */
- amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
- else if (!adev->enforce_isolation[i] && partition_values[i])
- /* Going from disabled to enabled */
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
+ for (i = 0; i < num_partitions; i++)
adev->enforce_isolation[i] = partition_values[i];
- }
mutex_unlock(&adev->enforce_isolation_mutex);
amdgpu_mes_update_enforce_isolation(adev);
@@ -1975,8 +1995,8 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
if (adev->kfd.init_complete) {
WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
- amdgpu_amdkfd_start_sched(adev, idx);
- adev->gfx.kfd_sch_inactive[idx] = false;
+ amdgpu_amdkfd_start_sched(adev, idx);
+ adev->gfx.kfd_sch_inactive[idx] = false;
}
}
mutex_unlock(&adev->enforce_isolation_mutex);
@@ -2115,6 +2135,80 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
}
+void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, gfx.idle_work.work);
+ enum PP_SMC_POWER_PROFILE profile;
+ u32 i, fences = 0;
+ int r;
+
+ if (adev->gfx.num_gfx_rings)
+ profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ else
+ profile = PP_SMC_POWER_PROFILE_COMPUTE;
+
+ for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
+ for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
+ if (!fences && !atomic_read(&adev->gfx.total_submission_cnt)) {
+ mutex_lock(&adev->gfx.workload_profile_mutex);
+ if (adev->gfx.workload_profile_active) {
+ r = amdgpu_dpm_switch_power_profile(adev, profile, false);
+ if (r)
+ dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
+ profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
+ "fullscreen 3D" : "compute");
+ adev->gfx.workload_profile_active = false;
+ }
+ mutex_unlock(&adev->gfx.workload_profile_mutex);
+ } else {
+ schedule_delayed_work(&adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
+ }
+}
+
+void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ enum PP_SMC_POWER_PROFILE profile;
+ int r;
+
+ if (adev->gfx.num_gfx_rings)
+ profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ else
+ profile = PP_SMC_POWER_PROFILE_COMPUTE;
+
+ atomic_inc(&adev->gfx.total_submission_cnt);
+
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
+
+ /* We can safely return early here because we've cancelled the
+ * the delayed work so there is no one else to set it to false
+ * and we don't care if someone else sets it to true.
+ */
+ if (adev->gfx.workload_profile_active)
+ return;
+
+ mutex_lock(&adev->gfx.workload_profile_mutex);
+ if (!adev->gfx.workload_profile_active) {
+ r = amdgpu_dpm_switch_power_profile(adev, profile, true);
+ if (r)
+ dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
+ profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
+ "fullscreen 3D" : "compute");
+ adev->gfx.workload_profile_active = true;
+ }
+ mutex_unlock(&adev->gfx.workload_profile_mutex);
+}
+
+void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
+{
+ atomic_dec(&ring->adev->gfx.total_submission_cnt);
+
+ schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
+}
+
/*
* debugfs for to enable/disable gfx job submission to specific core.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 8b5bd63b5773..87e862188766 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -57,6 +57,9 @@ enum amdgpu_gfx_pipe_priority {
#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0
#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15
+/* 1 second timeout */
+#define GFX_PROFILE_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
enum amdgpu_gfx_partition {
AMDGPU_SPX_PARTITION_MODE = 0,
AMDGPU_DPX_PARTITION_MODE = 1,
@@ -476,6 +479,11 @@ struct amdgpu_gfx {
bool kfd_sch_inactive[MAX_XCP];
unsigned long enforce_isolation_jiffies[MAX_XCP];
unsigned long enforce_isolation_time[MAX_XCP];
+
+ atomic_t total_submission_cnt;
+ struct delayed_work idle_work;
+ bool workload_profile_active;
+ struct mutex workload_profile_mutex;
};
struct amdgpu_gfx_ras_reg_entry {
@@ -547,6 +555,7 @@ int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
+void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable);
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
@@ -584,6 +593,11 @@ void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work);
void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
+
+void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
+void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
+
void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 1c19a65e6553..464625282872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -269,7 +269,7 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
* @mc: memory controller structure holding memory information
* @gart_placement: GART placement policy with respect to VRAM
*
- * Function will place try to place GART before or after VRAM.
+ * Function will try to place GART before or after VRAM.
* If GART size is bigger than space left then we ajust GART size.
* Thus function will never fails.
*/
@@ -573,6 +573,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = {0};
unsigned i;
unsigned vmhub, inv_eng;
+ struct amdgpu_ring *shared_ring;
/* init the vm inv eng for all vmhubs */
for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
@@ -591,7 +592,12 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
if (ring == &adev->mes.ring[0] ||
ring == &adev->mes.ring[1] ||
- ring == &adev->umsch_mm.ring)
+ ring == &adev->umsch_mm.ring ||
+ ring == &adev->cper.ring_buf)
+ continue;
+
+ /* Skip if the ring is a shared ring */
+ if (amdgpu_sdma_is_shared_inv_eng(adev, ring))
continue;
inv_eng = ffs(vm_inv_engs[vmhub]);
@@ -606,6 +612,21 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
ring->name, ring->vm_inv_eng, ring->vm_hub);
+ /* SDMA has a special packet which allows it to use the same
+ * invalidation engine for all the rings in one instance.
+ * Therefore, we do not allocate a separate VM invalidation engine
+ * for SDMA page rings. Instead, they share the VM invalidation
+ * engine with the SDMA gfx ring. This change ensures efficient
+ * resource management and avoids the issue of insufficient VM
+ * invalidation engines.
+ */
+ shared_ring = amdgpu_sdma_get_shared_ring(adev, ring);
+ if (shared_ring) {
+ shared_ring->vm_inv_eng = ring->vm_inv_eng;
+ dev_info(adev->dev, "ring %s shares VM invalidation engine %u with ring %s on hub %u\n",
+ ring->name, ring->vm_inv_eng, shared_ring->name, ring->vm_hub);
+ continue;
+ }
}
return 0;
@@ -851,6 +872,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
/* Don't enable it by default yet.
*/
if (amdgpu_tmz < 1) {
@@ -888,6 +910,7 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
gc_ver == IP_VERSION(9, 4, 2) ||
gc_ver == IP_VERSION(9, 4, 3) ||
gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0) ||
gc_ver >= IP_VERSION(10, 3, 0));
if (!amdgpu_sriov_xnack_support(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 459a30fe239f..bd7fc123b8f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include "amdgpu_irq.h"
+#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
/* VA hole for 48bit addresses on Vega10 */
@@ -174,28 +175,6 @@ struct amdgpu_gmc_funcs {
bool (*need_reset_on_init)(struct amdgpu_device *adev);
};
-struct amdgpu_xgmi_ras {
- struct amdgpu_ras_block_object ras_block;
-};
-
-struct amdgpu_xgmi {
- /* from psp */
- u64 node_id;
- u64 hive_id;
- /* fixed per family */
- u64 node_segment_size;
- /* physical node (0-3) */
- unsigned physical_node_id;
- /* number of nodes (0-4) */
- unsigned num_physical_nodes;
- /* gpu list in the same hive */
- struct list_head head;
- bool supported;
- struct ras_common_if *ras_if;
- bool connected_to_cpu;
- struct amdgpu_xgmi_ras *ras;
-};
-
struct amdgpu_mem_partition_info {
union {
struct {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index f0765ccde668..8179d0814db9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -225,6 +225,25 @@ void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
kfree(i2c);
}
+void amdgpu_i2c_init(struct amdgpu_device *adev)
+{
+ if (!adev->is_atom_fw) {
+ if (!amdgpu_device_has_dc_support(adev)) {
+ amdgpu_atombios_i2c_init(adev);
+ } else {
+ switch (adev->asic_type) {
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ amdgpu_atombios_oem_i2c_init(adev, 0x97);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+}
+
/* remove all the buses */
void amdgpu_i2c_fini(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
index 21e3d1dad0a1..1d3d3806e0dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
@@ -28,6 +28,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
const struct amdgpu_i2c_bus_rec *rec,
const char *name);
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
+void amdgpu_i2c_init(struct amdgpu_device *adev);
void amdgpu_i2c_fini(struct amdgpu_device *adev);
struct amdgpu_i2c_chan *
amdgpu_i2c_lookup(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 8e712a11aba5..4c4e087230ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -209,7 +209,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
return 0;
}
- fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
+ fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_NOWAIT);
if (!fences)
return -ENOMEM;
@@ -287,46 +287,34 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
(*id)->flushed_updates < updates ||
!(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
- !dma_fence_is_signaled((*id)->last_flush))) {
+ !dma_fence_is_signaled((*id)->last_flush)))
+ needs_flush = true;
+
+ if ((*id)->owner != vm->immediate.fence_context ||
+ (!adev->vm_manager.concurrent_flush && needs_flush)) {
struct dma_fence *tmp;
- /* Wait for the gang to be assembled before using a
- * reserved VMID or otherwise the gang could deadlock.
+ /* Don't use per engine and per process VMID at the
+ * same time
*/
- tmp = amdgpu_device_get_gang(adev);
- if (!dma_fence_is_signaled(tmp) && tmp != job->gang_submit) {
+ if (adev->vm_manager.concurrent_flush)
+ ring = NULL;
+
+ /* to prevent one context starved by another context */
+ (*id)->pd_gpu_addr = 0;
+ tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
+ if (tmp) {
*id = NULL;
- *fence = tmp;
+ *fence = dma_fence_get(tmp);
return 0;
}
- dma_fence_put(tmp);
-
- /* Make sure the id is owned by the gang before proceeding */
- if (!job->gang_submit ||
- (*id)->owner != vm->immediate.fence_context) {
-
- /* Don't use per engine and per process VMID at the
- * same time
- */
- if (adev->vm_manager.concurrent_flush)
- ring = NULL;
-
- /* to prevent one context starved by another context */
- (*id)->pd_gpu_addr = 0;
- tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
- if (tmp) {
- *id = NULL;
- *fence = dma_fence_get(tmp);
- return 0;
- }
- }
- needs_flush = true;
}
/* Good we can use this VMID. Remember this submission as
* user of the VMID.
*/
- r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished);
+ r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished,
+ GFP_NOWAIT);
if (r)
return r;
@@ -385,7 +373,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
* user of the VMID.
*/
r = amdgpu_sync_fence(&(*id)->active,
- &job->base.s_fence->finished);
+ &job->base.s_fence->finished,
+ GFP_NOWAIT);
if (r)
return r;
@@ -422,7 +411,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r || !idle)
goto error;
- if (amdgpu_vmid_uses_reserved(adev, vm, vmhub)) {
+ if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id)
goto error;
@@ -437,7 +426,8 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Remember this submission as user of the VMID */
r = amdgpu_sync_fence(&id->active,
- &job->base.s_fence->finished);
+ &job->base.s_fence->finished,
+ GFP_NOWAIT);
if (r)
goto error;
@@ -474,19 +464,14 @@ error:
/*
* amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
- * @adev: amdgpu_device pointer
* @vm: the VM to check
* @vmhub: the VMHUB which will be used
*
* Returns: True if the VM will use a reserved VMID.
*/
-bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, unsigned int vmhub)
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
{
- return vm->reserved_vmid[vmhub] ||
- (adev->enforce_isolation[(vm->root.bo->xcp_id != AMDGPU_XCP_NO_PARTITION) ?
- vm->root.bo->xcp_id : 0] &&
- AMDGPU_IS_GFXHUB(vmhub));
+ return vm->reserved_vmid[vmhub];
}
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 4012fb2dd08a..240fa6751260 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -78,8 +78,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
-bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, unsigned int vmhub);
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
unsigned vmhub);
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 7d4395a5d8ac..b0a88f92cd82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -78,6 +78,9 @@ struct amdgpu_ih_ring {
#define amdgpu_ih_ts_after(t1, t2) \
(((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) > 0LL)
+#define amdgpu_ih_ts_after_or_equal(t1, t2) \
+ (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) >= 0LL)
+
/* provided by the ih block */
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 732744488b03..43fc941dfa57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -124,7 +124,7 @@ static int isp_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool isp_is_idle(void *handle)
+static bool isp_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index b03664c66dd6..4f3b7b5d9c1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -50,6 +50,7 @@ struct amdgpu_isp {
struct mfd_cell *isp_cell;
struct resource *isp_res;
struct resource *isp_i2c_res;
+ struct resource *isp_gpio_res;
struct isp_platform_data *isp_pdata;
unsigned int harvest_config;
const struct firmware *fw;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 100f04475943..acb21fc8b3ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -130,29 +130,48 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_vm_put_task_info(ti);
}
- dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
-
/* attempt a per ring reset */
- if (amdgpu_gpu_recovery &&
- ring->funcs->reset) {
+ if (unlikely(adev->debug_disable_gpu_ring_reset)) {
+ dev_err(adev->dev, "Ring reset disabled by debug mask\n");
+ } else if (amdgpu_gpu_recovery && ring->funcs->reset) {
+ bool is_guilty;
+
dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name);
/* stop the scheduler, but don't mess with the
* bad job yet because if ring reset fails
* we'll fall back to full GPU reset.
*/
drm_sched_wqueue_stop(&ring->sched);
+
+ /* for engine resets, we need to reset the engine,
+ * but individual queues may be unaffected.
+ * check here to make sure the accounting is correct.
+ */
+ if (ring->funcs->is_guilty)
+ is_guilty = ring->funcs->is_guilty(ring);
+ else
+ is_guilty = true;
+
+ if (is_guilty)
+ dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
+
r = amdgpu_ring_reset(ring, job->vmid);
if (!r) {
if (amdgpu_ring_sched_ready(ring))
drm_sched_stop(&ring->sched, s_job);
- atomic_inc(&ring->adev->gpu_reset_counter);
- amdgpu_fence_driver_force_completion(ring);
+ if (is_guilty) {
+ atomic_inc(&ring->adev->gpu_reset_counter);
+ amdgpu_fence_driver_force_completion(ring);
+ }
if (amdgpu_ring_sched_ready(ring))
drm_sched_start(&ring->sched, 0);
+ dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name);
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
goto exit;
}
dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name);
}
+ dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
if (amdgpu_device_should_recover_gpu(ring->adev)) {
struct amdgpu_reset_context reset_context;
@@ -342,17 +361,24 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
- struct dma_fence *fence = NULL;
+ struct dma_fence *fence;
int r;
r = drm_sched_entity_error(s_entity);
if (r)
goto error;
- if (job->gang_submit)
+ if (job->gang_submit) {
fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
+ if (fence)
+ return fence;
+ }
- if (!fence && job->vm && !job->vmid) {
+ fence = amdgpu_device_enforce_isolation(ring->adev, ring, job);
+ if (fence)
+ return fence;
+
+ if (job->vm && !job->vmid) {
r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
if (r) {
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
@@ -365,9 +391,10 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
*/
if (!fence)
job->vm = NULL;
+ return fence;
}
- return fence;
+ return NULL;
error:
dma_fence_set_error(&job->base.s_fence->finished, r);
@@ -411,8 +438,24 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
return fence;
}
-#define to_drm_sched_job(sched_job) \
- container_of((sched_job), struct drm_sched_job, queue_node)
+/*
+ * This is a duplicate function from DRM scheduler sched_internal.h.
+ * Plan is to remove it when amdgpu_job_stop_all_jobs_on_sched is removed, due
+ * latter being incorrect and racy.
+ *
+ * See https://lore.kernel.org/amd-gfx/44edde63-7181-44fb-a4f7-94e50514f539@amd.com/
+ */
+static struct drm_sched_job *
+drm_sched_entity_queue_pop(struct drm_sched_entity *entity)
+{
+ struct spsc_node *node;
+
+ node = spsc_queue_pop(&entity->job_queue);
+ if (!node)
+ return NULL;
+
+ return container_of(node, struct drm_sched_job, queue_node);
+}
void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
{
@@ -425,7 +468,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
- while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
+ while ((s_job = drm_sched_entity_queue_pop(s_entity))) {
struct drm_sched_fence *s_fence = s_job->s_fence;
dma_fence_signal(&s_fence->scheduled);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index b6d2eb049f54..dda29132dfb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -33,6 +33,7 @@
#define JPEG_IDLE_TIMEOUT msecs_to_jiffies(1000)
static void amdgpu_jpeg_idle_work_handler(struct work_struct *work);
+static void amdgpu_jpeg_reg_dump_fini(struct amdgpu_device *adev);
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
{
@@ -85,6 +86,9 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec[j]);
}
+ if (adev->jpeg.reg_list)
+ amdgpu_jpeg_reg_dump_fini(adev);
+
mutex_destroy(&adev->jpeg.jpeg_pg_lock);
return 0;
@@ -452,3 +456,83 @@ void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_jpeg_reset_mask);
}
}
+
+int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count)
+{
+ adev->jpeg.ip_dump = kcalloc(adev->jpeg.num_jpeg_inst * count,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!adev->jpeg.ip_dump) {
+ DRM_ERROR("Failed to allocate memory for JPEG IP Dump\n");
+ return -ENOMEM;
+ }
+ adev->jpeg.reg_list = reg;
+ adev->jpeg.reg_count = count;
+
+ return 0;
+}
+
+static void amdgpu_jpeg_reg_dump_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->jpeg.ip_dump);
+ adev->jpeg.reg_list = NULL;
+ adev->jpeg.reg_count = 0;
+}
+
+void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ u32 inst_off, inst_id, is_powered;
+ int i, j;
+
+ if (!adev->jpeg.ip_dump)
+ return;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ inst_id = GET_INST(JPEG, i);
+ inst_off = i * adev->jpeg.reg_count;
+ /* check power status from UVD_JPEG_POWER_STATUS */
+ adev->jpeg.ip_dump[inst_off] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->jpeg.reg_list[0],
+ inst_id));
+ is_powered = ((adev->jpeg.ip_dump[inst_off] & 0x1) != 1);
+
+ if (is_powered)
+ for (j = 1; j < adev->jpeg.reg_count; j++)
+ adev->jpeg.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->jpeg.reg_list[j],
+ inst_id));
+ }
+}
+
+void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ u32 inst_off, is_powered;
+ int i, j;
+
+ if (!adev->jpeg.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->jpeg.num_jpeg_inst);
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:JPEG%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * adev->jpeg.reg_count;
+ is_powered = ((adev->jpeg.ip_dump[inst_off] & 0x1) != 1);
+
+ if (is_powered) {
+ drm_printf(p, "Active Instance:JPEG%d\n", i);
+ for (j = 0; j < adev->jpeg.reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", adev->jpeg.reg_list[j].reg_name,
+ adev->jpeg.ip_dump[inst_off + j]);
+ } else
+ drm_printf(p, "\nInactive Instance:JPEG%d\n", i);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index d9cb343a8708..4f0775e39b54 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -92,6 +92,14 @@
*adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = value; \
} while (0)
+struct amdgpu_hwip_reg_entry;
+
+enum amdgpu_jpeg_caps {
+ AMDGPU_JPEG_RRMT_ENABLED,
+};
+
+#define AMDGPU_JPEG_CAPS(caps) BIT(AMDGPU_JPEG_##caps)
+
struct amdgpu_jpeg_reg{
unsigned jpeg_pitch[AMDGPU_MAX_JPEG_RINGS];
};
@@ -130,6 +138,10 @@ struct amdgpu_jpeg {
uint8_t num_inst_per_aid;
bool indirect_sram;
uint32_t supported_reset;
+ uint32_t caps;
+ u32 *ip_dump;
+ u32 reg_count;
+ const struct amdgpu_hwip_reg_entry *reg_list;
};
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
@@ -154,5 +166,9 @@ int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev);
int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev);
void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block);
+void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 98528ee94c15..27bfe9c8af06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -459,7 +459,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->vcn.harvest_config & (1 << i))
continue;
- for (j = 0; j < adev->vcn.num_enc_rings; j++)
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++)
if (adev->vcn.inst[i].ring_enc[j].sched.ready)
++num_rings;
}
@@ -888,6 +888,15 @@ out:
if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
+ if (amdgpu_passthrough(adev))
+ dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT <<
+ AMDGPU_IDS_FLAGS_MODE_SHIFT) &
+ AMDGPU_IDS_FLAGS_MODE_MASK;
+ else if (amdgpu_sriov_vf(adev))
+ dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_VF <<
+ AMDGPU_IDS_FLAGS_MODE_SHIFT) &
+ AMDGPU_IDS_FLAGS_MODE_MASK;
+
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_TOP;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 709c11cbeabd..85f774063f9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -145,9 +145,8 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
adev->mes.vmid_mask_gfxhub = 0xffffff00;
for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
- /* use only 1st MEC pipes */
- if (i >= adev->gfx.mec.num_pipe_per_mec)
- continue;
+ if (i >= (adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec))
+ break;
adev->mes.compute_hqd_mask[i] = 0xc;
}
@@ -155,14 +154,9 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
- if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
- IP_VERSION(6, 0, 0))
- adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
- /* zero sdma_hqd_mask for non-existent engine */
- else if (adev->sdma.num_instances == 1)
- adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
- else
- adev->mes.sdma_hqd_mask[i] = 0xfc;
+ if (i >= adev->sdma.num_instances)
+ break;
+ adev->mes.sdma_hqd_mask[i] = 0xfc;
}
for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
@@ -1336,14 +1330,14 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
DRM_ERROR("failed to do vm_bo_update on meta data\n");
goto error_del_bo_va;
}
- amdgpu_sync_fence(&sync, bo_va->last_pt_update);
+ amdgpu_sync_fence(&sync, bo_va->last_pt_update, GFP_KERNEL);
r = amdgpu_vm_update_pdes(adev, vm, false);
if (r) {
DRM_ERROR("failed to update pdes on meta data\n");
goto error_del_bo_va;
}
- amdgpu_sync_fence(&sync, vm->last_update);
+ amdgpu_sync_fence(&sync, vm->last_update, GFP_KERNEL);
amdgpu_sync_wait(&sync, false);
drm_exec_fini(&exec);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index e98ea7ede1ba..da2c9a8cb3e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -56,7 +56,7 @@ enum amdgpu_mes_priority_level {
struct amdgpu_mes_funcs;
-enum admgpu_mes_pipe {
+enum amdgpu_mes_pipe {
AMDGPU_MES_SCHED_PIPE = 0,
AMDGPU_MES_KIQ_PIPE,
AMDGPU_MAX_MES_PIPES = 2,
@@ -143,9 +143,9 @@ struct amdgpu_mes {
const struct amdgpu_mes_funcs *funcs;
/* mes resource_1 bo*/
- struct amdgpu_bo *resource_1;
- uint64_t resource_1_gpu_addr;
- void *resource_1_addr;
+ struct amdgpu_bo *resource_1[AMDGPU_MAX_MES_PIPES];
+ uint64_t resource_1_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ void *resource_1_addr[AMDGPU_MAX_MES_PIPES];
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 5e3faefc5510..6da4f946cac0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -609,6 +609,7 @@ struct amdgpu_i2c_adapter {
struct i2c_adapter base;
struct ddc_service *ddc_service;
+ bool oem;
};
#define TO_DM_AUX(x) container_of((x), struct amdgpu_dm_dp_aux, aux)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 96f4b8904e9a..80cd6f5273db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1295,28 +1295,36 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (abo->kfd_bo)
amdgpu_amdkfd_release_notify(abo);
- /* We only remove the fence if the resv has individualized. */
- WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
- && bo->base.resv != &bo->base._resv);
- if (bo->base.resv == &bo->base._resv)
- amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
+ /*
+ * We lock the private dma_resv object here and since the BO is about to
+ * be released nobody else should have a pointer to it.
+ * So when this locking here fails something is wrong with the reference
+ * counting.
+ */
+ if (WARN_ON_ONCE(!dma_resv_trylock(&bo->base._resv)))
+ return;
+
+ amdgpu_amdkfd_remove_all_eviction_fences(abo);
if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
- return;
+ goto out;
- if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
- return;
+ r = dma_resv_reserve_fences(&bo->base._resv, 1);
+ if (r)
+ goto out;
- r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true);
- if (!WARN_ON(r)) {
- amdgpu_vram_mgr_set_cleared(bo->resource);
- amdgpu_bo_fence(abo, fence, false);
- dma_fence_put(fence);
- }
+ r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true);
+ if (WARN_ON(r))
+ goto out;
+
+ amdgpu_vram_mgr_set_cleared(bo->resource);
+ dma_resv_add_fence(&bo->base._resv, fence, DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
- dma_resv_unlock(bo->base.resv);
+out:
+ dma_resv_unlock(&bo->base._resv);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index e5fc80ed06ea..d54bb1377262 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -44,7 +44,7 @@
#include "amdgpu_securedisplay.h"
#include "amdgpu_atomfirmware.h"
-#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
+#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16)
static int psp_load_smu_fw(struct psp_context *psp);
static int psp_rap_terminate(struct psp_context *psp);
@@ -153,6 +153,9 @@ static int psp_init_sriov_microcode(struct psp_context *psp)
adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
ret = psp_init_cap_microcode(psp, ucode_prefix);
break;
+ case IP_VERSION(13, 0, 12):
+ ret = psp_init_ta_microcode(psp, ucode_prefix);
+ break;
default:
return -EINVAL;
}
@@ -193,6 +196,7 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
psp_v11_0_set_psp_funcs(psp);
@@ -208,11 +212,15 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block)
psp->boot_time_tmr = false;
fallthrough;
case IP_VERSION(13, 0, 6):
- case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
+ case IP_VERSION(13, 0, 12):
+ psp_v13_0_set_psp_funcs(psp);
+ psp->autoload_supported = false;
+ adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
+ break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
@@ -246,6 +254,10 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(14, 0, 3):
psp_v14_0_set_psp_funcs(psp);
break;
+ case IP_VERSION(14, 0, 5):
+ psp_v14_0_set_psp_funcs(psp);
+ psp->boot_time_tmr = false;
+ break;
default:
return -EINVAL;
}
@@ -533,7 +545,6 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
- struct psp_gfx_cmd_resp *cmd = psp->cmd;
psp_memory_training_fini(psp);
@@ -543,8 +554,8 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_ucode_release(&psp->cap_fw);
amdgpu_ucode_release(&psp->toc_fw);
- kfree(cmd);
- cmd = NULL;
+ kfree(psp->cmd);
+ psp->cmd = NULL;
psp_free_shared_bufs(psp);
@@ -1786,34 +1797,47 @@ int psp_ras_initialize(struct psp_context *psp)
if (ret)
dev_warn(adev->dev, "PSP get boot config failed\n");
- if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
- if (!boot_cfg) {
- dev_info(adev->dev, "GECC is disabled\n");
- } else {
- /* disable GECC in next boot cycle if ras is
- * disabled by module parameter amdgpu_ras_enable
- * and/or amdgpu_ras_mask, or boot_config_get call
- * is failed
- */
- ret = psp_boot_config_set(adev, 0);
- if (ret)
- dev_warn(adev->dev, "PSP set boot config failed\n");
- else
- dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
- }
+ if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
+ dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
+ dev_warn(adev->dev,
+ "To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
} else {
- if (boot_cfg == 1) {
- dev_info(adev->dev, "GECC is enabled\n");
+ if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
+ if (boot_cfg == 1) {
+ dev_info(adev->dev, "GECC is enabled\n");
+ } else {
+ /* enable GECC in next boot cycle if it is disabled
+ * in boot config, or force enable GECC if failed to
+ * get boot configuration
+ */
+ ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
+ if (ret)
+ dev_warn(adev->dev, "PSP set boot config failed\n");
+ else
+ dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
+ }
} else {
- /* enable GECC in next boot cycle if it is disabled
- * in boot config, or force enable GECC if failed to
- * get boot configuration
- */
- ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
- if (ret)
- dev_warn(adev->dev, "PSP set boot config failed\n");
- else
- dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
+ if (!boot_cfg) {
+ if (!adev->ras_default_ecc_enabled &&
+ amdgpu_ras_enable != 1 &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+ dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
+ else
+ dev_info(adev->dev, "GECC is disabled\n");
+ } else {
+ /* disable GECC in next boot cycle if ras is
+ * disabled by module parameter amdgpu_ras_enable
+ * and/or amdgpu_ras_mask, or boot_config_get call
+ * is failed
+ */
+ ret = psp_boot_config_set(adev, 0);
+ if (ret)
+ dev_warn(adev->dev, "PSP set boot config failed\n");
+ else
+ dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
+ }
}
}
}
@@ -1840,6 +1864,7 @@ int psp_ras_initialize(struct psp_context *psp)
if (adev->gmc.gmc_funcs->query_mem_partition_mode)
ras_cmd->ras_in_message.init_flags.nps_mode =
adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask;
ret = psp_ta_load(psp, &psp->ras_context.context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index f0924aa3f4e4..bed2603ae4c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1864,6 +1864,9 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
if (!obj || obj->attr_inuse)
return -EINVAL;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block))
+ return 0;
+
get_obj(obj);
snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
@@ -2796,20 +2799,108 @@ static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
return -EINVAL;
}
+static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, int count)
+{
+ int j;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data = con->eh_data;
+
+ for (j = 0; j < count; j++) {
+ if (amdgpu_ras_check_bad_page_unlock(con,
+ bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ continue;
+
+ if (!data->space_left &&
+ amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
+ return -ENOMEM;
+ }
+
+ amdgpu_ras_reserve_page(adev, bps[j].retired_page);
+
+ memcpy(&data->bps[data->count], &(bps[j]),
+ sizeof(struct eeprom_table_record));
+ data->count++;
+ data->space_left--;
+ }
+
+ return 0;
+}
+
+static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, struct ras_err_data *err_data,
+ enum amdgpu_memory_partition nps)
+{
+ int i = 0;
+ enum amdgpu_memory_partition save_nps;
+
+ save_nps = (bps[0].retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
+
+ /*old asics just have pa in eeprom*/
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
+ memcpy(err_data->err_addr, bps,
+ sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
+ goto out;
+ }
+
+ for (i = 0; i < adev->umc.retire_unit; i++)
+ bps[i].retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+
+ if (save_nps) {
+ if (save_nps == nps) {
+ if (amdgpu_umc_pages_in_a_row(adev, err_data,
+ bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ return -EINVAL;
+ } else {
+ if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
+ return -EINVAL;
+ }
+ } else {
+ if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
+ if (nps == AMDGPU_NPS1_PARTITION_MODE)
+ memcpy(err_data->err_addr, bps,
+ sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
+ else
+ return -EOPNOTSUPP;
+ }
+ }
+
+out:
+ return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit);
+}
+
+static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, struct ras_err_data *err_data,
+ enum amdgpu_memory_partition nps)
+{
+ enum amdgpu_memory_partition save_nps;
+
+ save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
+ bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+
+ if (save_nps == nps) {
+ if (amdgpu_umc_pages_in_a_row(adev, err_data,
+ bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ return -EINVAL;
+ } else {
+ if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
+ return -EINVAL;
+ }
+ return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
+ adev->umc.retire_unit);
+}
+
/* it deal with vram only. */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
struct eeprom_table_record *bps, int pages, bool from_rom)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_err_handler_data *data;
struct ras_err_data err_data;
- struct eeprom_table_record *err_rec;
struct amdgpu_ras_eeprom_control *control =
&adev->psp.ras_context.ras->eeprom_control;
enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
- uint32_t i, j, loop_cnt = 1;
- bool find_pages_per_pa = false;
+ uint32_t i;
if (!con || !con->eh_data || !bps || pages <= 0)
return 0;
@@ -2820,114 +2911,46 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
sizeof(struct eeprom_table_record), GFP_KERNEL);
if (!err_data.err_addr) {
dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
- err_rec = err_data.err_addr;
- loop_cnt = adev->umc.retire_unit;
if (adev->gmc.gmc_funcs->query_mem_partition_mode)
nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
}
mutex_lock(&con->recovery_lock);
- data = con->eh_data;
- if (!data) {
- /* Returning 0 as the absence of eh_data is acceptable */
- goto free;
- }
-
- for (i = 0; i < pages; i++) {
- if (from_rom &&
- control->rec_type == AMDGPU_RAS_EEPROM_REC_MCA) {
- if (!find_pages_per_pa) {
- if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) {
- if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
- /* may use old RAS TA, use PA to find pages in
- * one row
- */
- if (amdgpu_umc_pages_in_a_row(adev, &err_data,
- bps[i].retired_page <<
- AMDGPU_GPU_PAGE_SHIFT)) {
- ret = -EINVAL;
- goto free;
- } else {
- find_pages_per_pa = true;
- }
- } else {
- /* unsupported cases */
- ret = -EOPNOTSUPP;
- goto free;
- }
- }
- } else {
- if (amdgpu_umc_pages_in_a_row(adev, &err_data,
- bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
- ret = -EINVAL;
- goto free;
- }
- }
- } else {
- if (from_rom && !find_pages_per_pa) {
- if (bps[i].retired_page & UMC_CHANNEL_IDX_V2) {
- /* bad page in any NPS mode in eeprom */
- if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) {
- ret = -EINVAL;
+
+ if (from_rom) {
+ for (i = 0; i < pages; i++) {
+ if (control->ras_num_recs - i >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ //deal with retire_unit records a time
+ ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
goto free;
- }
+ i += (adev->umc.retire_unit - 1);
} else {
- /* legacy bad page in eeprom, generated only in
- * NPS1 mode
- */
- if (amdgpu_ras_mca2pa(adev, &bps[i], &err_data)) {
- /* old RAS TA or ASICs which don't support to
- * convert addrss via mca address
- */
- if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
- find_pages_per_pa = true;
- err_rec = &bps[i];
- loop_cnt = 1;
- } else {
- /* non-nps1 mode, old RAS TA
- * can't support it
- */
- ret = -EOPNOTSUPP;
- goto free;
- }
- }
+ break;
}
-
- if (!find_pages_per_pa)
- i += (adev->umc.retire_unit - 1);
} else {
- err_rec = &bps[i];
+ break;
}
}
-
- for (j = 0; j < loop_cnt; j++) {
- if (amdgpu_ras_check_bad_page_unlock(con,
- err_rec[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
- continue;
-
- if (!data->space_left &&
- amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
- ret = -ENOMEM;
+ for (; i < pages; i++) {
+ ret = __amdgpu_ras_convert_rec_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
goto free;
- }
-
- amdgpu_ras_reserve_page(adev, err_rec[j].retired_page);
-
- memcpy(&data->bps[data->count], &(err_rec[j]),
- sizeof(struct eeprom_table_record));
- data->count++;
- data->space_left--;
}
+ } else {
+ ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
}
free:
if (from_rom)
kfree(err_data.err_addr);
-out:
mutex_unlock(&con->recovery_lock);
return ret;
@@ -2966,18 +2989,18 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
/* only new entries are saved */
if (save_count > 0) {
- if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA) {
+ /*old asics only save pa to eeprom like before*/
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[control->ras_num_recs],
- save_count)) {
+ &data->bps[bad_page_num], save_count)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
} else {
for (i = 0; i < unit_num; i++) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[bad_page_num + i * adev->umc.retire_unit],
- 1)) {
+ &data->bps[bad_page_num +
+ i * adev->umc.retire_unit], 1)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
@@ -2999,7 +3022,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
struct amdgpu_ras_eeprom_control *control =
&adev->psp.ras_context.ras->eeprom_control;
struct eeprom_table_record *bps;
- int ret;
+ int ret, i = 0;
/* no bad page record, skip eeprom access */
if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
@@ -3013,13 +3036,23 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
if (ret) {
dev_err(adev->dev, "Failed to load EEPROM table records!");
} else {
- if (control->ras_num_recs > 1 &&
- adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
- if ((bps[0].address == bps[1].address) &&
- (bps[0].mem_channel == bps[1].mem_channel))
- control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
- else
- control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
+ for (i = 0; i < control->ras_num_recs; i++) {
+ if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ control->ras_num_pa_recs += adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ control->ras_num_mca_recs +=
+ (control->ras_num_recs - i);
+ break;
+ }
+ } else {
+ control->ras_num_mca_recs += (control->ras_num_recs - i);
+ break;
+ }
+ }
}
ret = amdgpu_ras_eeprom_check(control);
@@ -3080,31 +3113,29 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
/*
- * Justification of value bad_page_cnt_threshold in ras structure
- *
- * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
- * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
- * scenarios accordingly.
- *
- * Bad page retirement enablement:
- * - If amdgpu_bad_page_threshold = -2,
- * bad_page_cnt_threshold = typical value by formula.
- *
- * - When the value from user is 0 < amdgpu_bad_page_threshold <
- * max record length in eeprom, use it directly.
- *
- * Bad page retirement disablement:
- * - If amdgpu_bad_page_threshold = 0, bad page retirement
- * functionality is disabled, and bad_page_cnt_threshold will
- * take no effect.
+ * amdgpu_bad_page_threshold is used to config
+ * the threshold for the number of bad pages.
+ * -1: Threshold is set to default value
+ * Driver will issue a warning message when threshold is reached
+ * and continue runtime services.
+ * 0: Disable bad page retirement
+ * Driver will not retire bad pages
+ * which is intended for debugging purpose.
+ * -2: Threshold is determined by a formula
+ * that assumes 1 bad page per 100M of local memory.
+ * Driver will continue runtime services when threhold is reached.
+ * 0 < threshold < max number of bad page records in EEPROM,
+ * A user-defined threshold is set
+ * Driver will halt runtime services when this custom threshold is reached.
*/
-
- if (amdgpu_bad_page_threshold < 0) {
+ if (amdgpu_bad_page_threshold == -2) {
u64 val = adev->gmc.mc_vram_size;
do_div(val, RAS_BAD_PAGE_COVER);
con->bad_page_cnt_threshold = min(lower_32_bits(val),
max_count);
+ } else if (amdgpu_bad_page_threshold == -1) {
+ con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4;
} else {
con->bad_page_cnt_threshold = min_t(int, max_count,
amdgpu_bad_page_threshold);
@@ -3427,12 +3458,7 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
return ret;
if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
- control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
-
- /* default status is MCA storage */
- if (control->ras_num_recs <= 1 &&
- adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
- control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
+ control->ras_num_pa_recs = control->ras_num_recs;
if (control->ras_num_recs) {
ret = amdgpu_ras_load_bad_pages(adev);
@@ -3447,6 +3473,13 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
adev, control->bad_channel_bitmap);
con->update_channel_flag = false;
}
+
+ /* The format action is only applied to new ASICs */
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) >= 12 &&
+ control->tbl_hdr.version < RAS_TABLE_VER_V3)
+ if (!amdgpu_ras_eeprom_reset_table(control))
+ if (amdgpu_ras_save_bad_pages(adev, NULL))
+ dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
}
return ret;
@@ -3759,8 +3792,11 @@ init_ras_enabled_flag:
adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
adev->ras_hw_enabled & amdgpu_ras_mask;
- /* aca is disabled by default */
- adev->aca.is_enabled = false;
+ /* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
+ adev->aca.is_enabled =
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
/* bad page feature is not applicable to specific app platform */
if (adev->gmc.is_app_apu &&
@@ -3848,8 +3884,10 @@ static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 12):
+ con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT;
+ break;
case IP_VERSION(13, 0, 14):
- con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE;
+ con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1);
break;
default:
break;
@@ -5127,9 +5165,9 @@ static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
"socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
socket_id, aid_id, fw_status);
- if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
+ if (AMDGPU_RAS_GPU_ERR_GENERIC(boot_error))
dev_info(adev->dev,
- "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
+ "socket: %d, aid: %d, fw_status: 0x%x, Boot Controller Generic Error\n",
socket_id, aid_id, fw_status);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 82db986c36a0..764e9fa0a914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -47,7 +47,7 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
#define AMDGPU_RAS_GPU_ERR_DATA_ABORT(x) AMDGPU_GET_REG_FIELD(x, 29, 29)
-#define AMDGPU_RAS_GPU_ERR_UNKNOWN(x) AMDGPU_GET_REG_FIELD(x, 30, 30)
+#define AMDGPU_RAS_GPU_ERR_GENERIC(x) AMDGPU_GET_REG_FIELD(x, 30, 30)
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 100
#define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA
@@ -65,7 +65,7 @@ struct amdgpu_iv_entry;
/* Reserve 8 physical dram row for possible retirement.
* In worst cases, it will lose 8 * 2MB memory in vram domain */
-#define AMDGPU_RAS_RESERVED_VRAM_SIZE (16ULL << 20)
+#define AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT (16ULL << 20)
/* The high three bits indicates socketid */
#define AMDGPU_RAS_GET_FEATURES(val) ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 52c16bfeccaa..0ea7cfaf3587 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -161,6 +161,7 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 10):
return true;
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
return (adev->gmc.is_app_apu) ? false : true;
default:
@@ -177,7 +178,7 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
if (!control)
return false;
- if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
+ if (adev->bios && amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
/* The address given by VBIOS is an 8-bit, wire-format
* address, i.e. the most significant byte.
*
@@ -223,6 +224,7 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
return true;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
control->i2c_address = EEPROM_I2C_MADDR_4;
return true;
@@ -413,9 +415,11 @@ static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control
switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
case IP_VERSION(8, 10, 0):
- case IP_VERSION(12, 0, 0):
hdr->version = RAS_TABLE_VER_V2_1;
return;
+ case IP_VERSION(12, 0, 0):
+ hdr->version = RAS_TABLE_VER_V3;
+ return;
default:
hdr->version = RAS_TABLE_VER_V1;
return;
@@ -443,7 +447,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
hdr->header = RAS_TABLE_HDR_VAL;
amdgpu_ras_set_eeprom_table_version(control);
- if (hdr->version == RAS_TABLE_VER_V2_1) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
hdr->first_rec_offset = RAS_RECORD_START_V2_1;
hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
RAS_TABLE_V2_1_INFO_SIZE;
@@ -461,7 +465,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
}
csum = __calc_hdr_byte_sum(control);
- if (hdr->version == RAS_TABLE_VER_V2_1)
+ if (hdr->version >= RAS_TABLE_VER_V2_1)
csum += __calc_ras_info_byte_sum(control);
csum = -csum;
hdr->checksum = csum;
@@ -558,16 +562,17 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
return false;
if (con->eeprom_control.tbl_hdr.header == RAS_TABLE_HDR_BAD) {
- if (amdgpu_bad_page_threshold == -1) {
+ if (con->eeprom_control.ras_num_bad_pages > con->bad_page_cnt_threshold)
dev_warn(adev->dev, "RAS records:%d exceed threshold:%d",
- con->eeprom_control.ras_num_bad_pages, con->bad_page_cnt_threshold);
+ con->eeprom_control.ras_num_bad_pages, con->bad_page_cnt_threshold);
+ if ((amdgpu_bad_page_threshold == -1) ||
+ (amdgpu_bad_page_threshold == -2)) {
dev_warn(adev->dev,
- "But GPU can be operated due to bad_page_threshold = -1.\n");
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures.\n");
return false;
} else {
- dev_warn(adev->dev, "This GPU is in BAD status.");
- dev_warn(adev->dev, "Please retire it or set a larger "
- "threshold value when reloading driver.\n");
+ dev_warn(adev->dev,
+ "Please consider adjusting the customized threshold.\n");
return true;
}
}
@@ -726,11 +731,14 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
- control->ras_fri)
% control->ras_max_record_count;
- if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA)
- control->ras_num_bad_pages = control->ras_num_recs;
+ /*old asics only save pa to eeprom like before*/
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12)
+ control->ras_num_pa_recs += num;
else
- control->ras_num_bad_pages =
- control->ras_num_recs * adev->umc.retire_unit;
+ control->ras_num_mca_recs += num;
+
+ control->ras_num_bad_pages = control->ras_num_pa_recs +
+ control->ras_num_mca_recs * adev->umc.retire_unit;
Out:
kfree(buf);
return res;
@@ -748,24 +756,25 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
/* Modify the header if it exceeds.
*/
if (amdgpu_bad_page_threshold != 0 &&
- control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) {
+ control->ras_num_bad_pages > ras->bad_page_cnt_threshold) {
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) {
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
control->tbl_rai.health_percent = 0;
}
- if (amdgpu_bad_page_threshold != -1)
+ if ((amdgpu_bad_page_threshold != -1) &&
+ (amdgpu_bad_page_threshold != -2))
ras->is_rma = true;
/* ignore the -ENOTSUPP return value */
amdgpu_dpm_send_rma_reason(adev);
}
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
RAS_TABLE_V2_1_INFO_SIZE +
control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
@@ -805,8 +814,8 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
* now calculate gpu health percent
*/
if (amdgpu_bad_page_threshold != 0 &&
- control->tbl_hdr.version == RAS_TABLE_VER_V2_1 &&
- control->ras_num_bad_pages < ras->bad_page_cnt_threshold)
+ control->tbl_hdr.version >= RAS_TABLE_VER_V2_1 &&
+ control->ras_num_bad_pages <= ras->bad_page_cnt_threshold)
control->tbl_rai.health_percent = ((ras->bad_page_cnt_threshold -
control->ras_num_bad_pages) * 100) /
ras->bad_page_cnt_threshold;
@@ -818,7 +827,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
csum += *pp;
csum += __calc_hdr_byte_sum(control);
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
csum += __calc_ras_info_byte_sum(control);
/* avoid sign extension when assigning to "checksum" */
csum = -csum;
@@ -850,6 +859,7 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
{
struct amdgpu_device *adev = to_amdgpu_device(control);
int res, i;
+ uint64_t nps = AMDGPU_NPS1_PARTITION_MODE;
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -863,9 +873,12 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
return -EINVAL;
}
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+
/* set the new channel index flag */
for (i = 0; i < num; i++)
- record[i].retired_page |= UMC_CHANNEL_IDX_V2;
+ record[i].retired_page |= (nps << UMC_NPS_SHIFT);
mutex_lock(&control->ras_tbl_mutex);
@@ -879,7 +892,7 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
/* clear channel index flag, the flag is only saved on eeprom */
for (i = 0; i < num; i++)
- record[i].retired_page &= ~UMC_CHANNEL_IDX_V2;
+ record[i].retired_page &= ~(nps << UMC_NPS_SHIFT);
return res;
}
@@ -1031,7 +1044,7 @@ uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *co
/* get available eeprom table version first before eeprom table init */
amdgpu_ras_set_eeprom_table_version(control);
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
return RAS_MAX_RECORD_COUNT_V2_1;
else
return RAS_MAX_RECORD_COUNT;
@@ -1276,7 +1289,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
int buf_size, res;
u8 csum, *buf, *pp;
- if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
buf_size = RAS_TABLE_HEADER_SIZE +
RAS_TABLE_V2_1_INFO_SIZE +
control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
@@ -1379,7 +1392,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
__decode_table_header_from_buf(hdr, buf);
- if (hdr->version == RAS_TABLE_VER_V2_1) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
control->ras_record_offset = RAS_RECORD_START_V2_1;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
@@ -1390,6 +1403,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
}
control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
return 0;
}
@@ -1410,17 +1425,14 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
if (!__get_eeprom_i2c_addr(adev, control))
return -EINVAL;
- if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA)
- control->ras_num_bad_pages = control->ras_num_recs;
- else
- control->ras_num_bad_pages =
- control->ras_num_recs * adev->umc.retire_unit;
+ control->ras_num_bad_pages = control->ras_num_pa_recs +
+ control->ras_num_mca_recs * adev->umc.retire_unit;
if (hdr->header == RAS_TABLE_HDR_VAL) {
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
control->ras_num_bad_pages);
- if (hdr->version == RAS_TABLE_VER_V2_1) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
if (res)
return res;
@@ -1428,8 +1440,9 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
res = __verify_ras_table_checksum(control);
if (res)
- DRM_ERROR("RAS table incorrect checksum or error:%d\n",
- res);
+ dev_err(adev->dev,
+ "RAS table incorrect checksum or error:%d\n",
+ res);
/* Warn if we are at 90% of the threshold or above
*/
@@ -1439,7 +1452,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
ras->bad_page_cnt_threshold);
} else if (hdr->header == RAS_TABLE_HDR_BAD &&
amdgpu_bad_page_threshold != 0) {
- if (hdr->version == RAS_TABLE_VER_V2_1) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
if (res)
return res;
@@ -1447,11 +1460,12 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
res = __verify_ras_table_checksum(control);
if (res) {
- dev_err(adev->dev, "RAS Table incorrect checksum or error:%d\n",
- res);
+ dev_err(adev->dev,
+ "RAS Table incorrect checksum or error:%d\n",
+ res);
return -EINVAL;
}
- if (ras->bad_page_cnt_threshold > control->ras_num_bad_pages) {
+ if (ras->bad_page_cnt_threshold >= control->ras_num_bad_pages) {
/* This means that, the threshold was increased since
* the last time the system was booted, and now,
* ras->bad_page_cnt_threshold - control->num_recs > 0,
@@ -1466,17 +1480,18 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
res = amdgpu_ras_eeprom_correct_header_tag(control,
RAS_TABLE_HDR_VAL);
} else {
- dev_err(adev->dev, "RAS records:%d exceed threshold:%d",
+ dev_warn(adev->dev,
+ "RAS records:%d exceed threshold:%d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
- if (amdgpu_bad_page_threshold == -1) {
- dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -1.");
+ if ((amdgpu_bad_page_threshold == -1) ||
+ (amdgpu_bad_page_threshold == -2)) {
res = 0;
+ dev_warn(adev->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n");
} else {
ras->is_rma = true;
- dev_err(adev->dev,
- "RAS records:%d exceed threshold:%d, "
- "GPU will not be initialized. Replace this GPU or increase the threshold",
- control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+ dev_warn(adev->dev,
+ "User defined threshold is set, runtime service will be halt when threshold is reached\n");
}
}
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index 81d55cb7b397..ec6d7ea37ad0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -28,6 +28,7 @@
#define RAS_TABLE_VER_V1 0x00010000
#define RAS_TABLE_VER_V2_1 0x00021000
+#define RAS_TABLE_VER_V3 0x00030000
struct amdgpu_device;
@@ -43,19 +44,6 @@ enum amdgpu_ras_eeprom_err_type {
AMDGPU_RAS_EEPROM_ERR_COUNT,
};
-/*
- * one UMC MCA address could map to multiply physical address (PA),
- * such as 1:16, we use eeprom_table_record.address to store MCA
- * address and use eeprom_table_record.retired_page to save PA.
- *
- * AMDGPU_RAS_EEPROM_REC_PA: one record store one PA
- * AMDGPU_RAS_EEPROM_REC_MCA: one record store one MCA address
- */
-enum amdgpu_ras_eeprom_rec_type {
- AMDGPU_RAS_EEPROM_REC_PA,
- AMDGPU_RAS_EEPROM_REC_MCA,
-};
-
struct amdgpu_ras_eeprom_table_header {
uint32_t header;
uint32_t version;
@@ -100,6 +88,12 @@ struct amdgpu_ras_eeprom_control {
*/
u32 ras_num_bad_pages;
+ /* Number of records store mca address */
+ u32 ras_num_mca_recs;
+
+ /* Number of records store physical address */
+ u32 ras_num_pa_recs;
+
/* First record index to read, 0-based.
* Range is [0, num_recs-1]. This is
* an absolute index, starting right after
@@ -120,7 +114,6 @@ struct amdgpu_ras_eeprom_control {
/* Record channel info which occurred bad pages
*/
u32 bad_channel_bitmap;
- enum amdgpu_ras_eeprom_rec_type rec_type;
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index a6e28fe3f8d6..d55c8b7fdb59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -324,24 +324,33 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* always set cond_exec_polling to CONTINUE */
*ring->cond_exe_cpu_addr = 1;
- r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
- if (r) {
- dev_err(adev->dev, "failed initializing fences (%d).\n", r);
- return r;
- }
+ if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
+ r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
+ if (r) {
+ dev_err(adev->dev, "failed initializing fences (%d).\n", r);
+ return r;
+ }
- max_ibs_dw = ring->funcs->emit_frame_size +
- amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
- max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
+ max_ibs_dw = ring->funcs->emit_frame_size +
+ amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
+ max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
- if (WARN_ON(max_ibs_dw > max_dw))
- max_dw = max_ibs_dw;
+ if (WARN_ON(max_ibs_dw > max_dw))
+ max_dw = max_ibs_dw;
- ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
+ ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
+ } else {
+ ring->ring_size = roundup_pow_of_two(max_dw * 4);
+ ring->count_dw = (ring->ring_size - 4) >> 2;
+ /* ring buffer is empty now */
+ ring->wptr = *ring->rptr_cpu_addr = 0;
+ }
ring->buf_mask = (ring->ring_size / 4) - 1;
ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
0xffffffffffffffff : ring->buf_mask;
+ /* Initialize cached_rptr to 0 */
+ ring->cached_rptr = 0;
/* Allocate ring buffer */
if (ring->is_mes_queue) {
@@ -493,6 +502,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
uint32_t value, result, early[3];
+ uint64_t p;
loff_t i;
int r;
@@ -502,13 +512,18 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
result = 0;
if (*pos < 12) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
+ mutex_lock(&ring->adev->cper.ring_lock);
+
early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
early[2] = ring->wptr & ring->buf_mask;
for (i = *pos / 4; i < 3 && size; i++) {
r = put_user(early[i], (uint32_t *)buf);
- if (r)
- return r;
+ if (r) {
+ result = r;
+ goto out;
+ }
buf += 4;
result += 4;
size -= 4;
@@ -516,29 +531,79 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
}
}
- while (size) {
- if (*pos >= (ring->ring_size + 12))
- return result;
+ if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
+ while (size) {
+ if (*pos >= (ring->ring_size + 12))
+ return result;
- value = ring->ring[(*pos - 12)/4];
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
- buf += 4;
- result += 4;
- size -= 4;
- *pos += 4;
+ value = ring->ring[(*pos - 12)/4];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
+ }
+ } else {
+ p = early[0];
+ if (early[0] <= early[1])
+ size = (early[1] - early[0]);
+ else
+ size = ring->ring_size - (early[0] - early[1]);
+
+ while (size) {
+ if (p == early[1])
+ goto out;
+
+ value = ring->ring[p];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto out;
+ }
+
+ buf += 4;
+ result += 4;
+ size--;
+ p++;
+ p &= ring->ptr_mask;
+ }
}
+out:
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
+ mutex_unlock(&ring->adev->cper.ring_lock);
+
return result;
}
+static ssize_t amdgpu_debugfs_virt_ring_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
+ amdgpu_virt_req_ras_cper_dump(ring->adev, false);
+
+ return amdgpu_debugfs_ring_read(f, buf, size, pos);
+}
+
static const struct file_operations amdgpu_debugfs_ring_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_ring_read,
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_virt_ring_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_virt_ring_read,
+ .llseek = default_llseek
+};
+
static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -626,9 +691,14 @@ void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
char name[32];
sprintf(name, "amdgpu_ring_%s", ring->name);
- debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
- &amdgpu_debugfs_ring_fops,
- ring->ring_size + 12);
+ if (amdgpu_sriov_vf(adev))
+ debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
+ &amdgpu_debugfs_virt_ring_fops,
+ ring->ring_size + 12);
+ else
+ debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
+ &amdgpu_debugfs_ring_fops,
+ ring->ring_size + 12);
if (ring->mqd_obj) {
sprintf(name, "amdgpu_mqd_%s", ring->name);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index dee5a1b4e572..bb2b66385223 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -37,7 +37,7 @@ struct amdgpu_job;
struct amdgpu_vm;
/* max number of rings */
-#define AMDGPU_MAX_RINGS 124
+#define AMDGPU_MAX_RINGS 149
#define AMDGPU_MAX_HWIP_RINGS 64
#define AMDGPU_MAX_GFX_RINGS 2
#define AMDGPU_MAX_SW_GFX_RINGS 2
@@ -82,6 +82,7 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_KIQ,
AMDGPU_RING_TYPE_MES,
AMDGPU_RING_TYPE_UMSCH_MM,
+ AMDGPU_RING_TYPE_CPER,
};
enum amdgpu_ib_pool_type {
@@ -237,6 +238,7 @@ struct amdgpu_ring_funcs {
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
+ bool (*is_guilty)(struct amdgpu_ring *ring);
};
struct amdgpu_ring {
@@ -306,6 +308,8 @@ struct amdgpu_ring {
bool is_sw_ring;
unsigned int entry_index;
+ /* store the cached rptr to restore after reset */
+ uint64_t cached_rptr;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 174badca27e7..529c9696c2f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
@@ -355,23 +356,44 @@ int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
{
struct amdgpu_device *adev = (struct amdgpu_device *)data;
- u32 i;
+ u64 i, num_ring;
u64 mask = 0;
- struct amdgpu_ring *ring;
+ struct amdgpu_ring *ring, *page = NULL;
if (!adev)
return -ENODEV;
- mask = BIT_ULL(adev->sdma.num_instances) - 1;
+ /* Determine the number of rings per SDMA instance
+ * (1 for sdma gfx ring, 2 if page queue exists)
+ */
+ if (adev->sdma.has_page_queue)
+ num_ring = 2;
+ else
+ num_ring = 1;
+
+ /* Calculate the maximum possible mask value
+ * based on the number of SDMA instances and rings
+ */
+ mask = BIT_ULL(adev->sdma.num_instances * num_ring) - 1;
+
if ((val & mask) == 0)
return -EINVAL;
for (i = 0; i < adev->sdma.num_instances; ++i) {
ring = &adev->sdma.instance[i].ring;
- if (val & BIT_ULL(i))
+ if (adev->sdma.has_page_queue)
+ page = &adev->sdma.instance[i].page;
+ if (val & BIT_ULL(i * num_ring))
ring->sched.ready = true;
else
ring->sched.ready = false;
+
+ if (page) {
+ if (val & BIT_ULL(i * num_ring + 1))
+ page->sched.ready = true;
+ else
+ page->sched.ready = false;
+ }
}
/* publish sched.ready flag update effective immediately across smp */
smp_rmb();
@@ -381,16 +403,37 @@ static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val)
{
struct amdgpu_device *adev = (struct amdgpu_device *)data;
- u32 i;
+ u64 i, num_ring;
u64 mask = 0;
- struct amdgpu_ring *ring;
+ struct amdgpu_ring *ring, *page = NULL;
if (!adev)
return -ENODEV;
+
+ /* Determine the number of rings per SDMA instance
+ * (1 for sdma gfx ring, 2 if page queue exists)
+ */
+ if (adev->sdma.has_page_queue)
+ num_ring = 2;
+ else
+ num_ring = 1;
+
for (i = 0; i < adev->sdma.num_instances; ++i) {
ring = &adev->sdma.instance[i].ring;
+ if (adev->sdma.has_page_queue)
+ page = &adev->sdma.instance[i].page;
+
if (ring->sched.ready)
- mask |= 1 << i;
+ mask |= BIT_ULL(i * num_ring);
+ else
+ mask &= ~BIT_ULL(i * num_ring);
+
+ if (page) {
+ if (page->sched.ready)
+ mask |= BIT_ULL(i * num_ring + 1);
+ else
+ mask &= ~BIT_ULL(i * num_ring + 1);
+ }
}
*val = mask;
@@ -460,3 +503,147 @@ void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_sdma_reset_mask);
}
}
+
+struct amdgpu_ring *amdgpu_sdma_get_shared_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (adev->sdma.has_page_queue &&
+ (ring->me < adev->sdma.num_instances) &&
+ (ring == &adev->sdma.instance[ring->me].ring))
+ return &adev->sdma.instance[ring->me].page;
+ else
+ return NULL;
+}
+
+/**
+* amdgpu_sdma_is_shared_inv_eng - Check if a ring is an SDMA ring that shares a VM invalidation engine
+* @adev: Pointer to the AMDGPU device structure
+* @ring: Pointer to the ring structure to check
+*
+* This function checks if the given ring is an SDMA ring that shares a VM invalidation engine.
+* It returns true if the ring is such an SDMA ring, false otherwise.
+*/
+bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ int i = ring->me;
+
+ if (!adev->sdma.has_page_queue || i >= adev->sdma.num_instances)
+ return false;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ return (ring == &adev->sdma.instance[i].page);
+ else
+ return false;
+}
+
+/**
+ * amdgpu_sdma_register_on_reset_callbacks - Register SDMA reset callbacks
+ * @funcs: Pointer to the callback structure containing pre_reset and post_reset functions
+ *
+ * This function allows KFD and AMDGPU to register their own callbacks for handling
+ * pre-reset and post-reset operations for engine reset. These are needed because engine
+ * reset will stop all queues on that engine.
+ */
+void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs)
+{
+ if (!funcs)
+ return;
+
+ /* Ensure the reset_callback_list is initialized */
+ if (!adev->sdma.reset_callback_list.next) {
+ INIT_LIST_HEAD(&adev->sdma.reset_callback_list);
+ }
+ /* Initialize the list node in the callback structure */
+ INIT_LIST_HEAD(&funcs->list);
+
+ /* Add the callback structure to the global list */
+ list_add_tail(&funcs->list, &adev->sdma.reset_callback_list);
+}
+
+/**
+ * amdgpu_sdma_reset_engine - Reset a specific SDMA engine
+ * @adev: Pointer to the AMDGPU device
+ * @instance_id: ID of the SDMA engine instance to reset
+ *
+ * This function performs the following steps:
+ * 1. Calls all registered pre_reset callbacks to allow KFD and AMDGPU to save their state.
+ * 2. Resets the specified SDMA engine instance.
+ * 3. Calls all registered post_reset callbacks to allow KFD and AMDGPU to restore their state.
+ *
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
+{
+ struct sdma_on_reset_funcs *funcs;
+ int ret = 0;
+ struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
+ struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
+ struct amdgpu_ring *page_ring = &sdma_instance->page;
+ bool gfx_sched_stopped = false, page_sched_stopped = false;
+
+ mutex_lock(&sdma_instance->engine_reset_mutex);
+ /* Stop the scheduler's work queue for the GFX and page rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
+ if (!amdgpu_ring_sched_ready(gfx_ring)) {
+ drm_sched_wqueue_stop(&gfx_ring->sched);
+ gfx_sched_stopped = true;
+ }
+
+ if (adev->sdma.has_page_queue && !amdgpu_ring_sched_ready(page_ring)) {
+ drm_sched_wqueue_stop(&page_ring->sched);
+ page_sched_stopped = true;
+ }
+
+ /* Invoke all registered pre_reset callbacks */
+ list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
+ if (funcs->pre_reset) {
+ ret = funcs->pre_reset(adev, instance_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "beforeReset callback failed for instance %u: %d\n",
+ instance_id, ret);
+ goto exit;
+ }
+ }
+ }
+
+ /* Perform the SDMA reset for the specified instance */
+ ret = amdgpu_dpm_reset_sdma(adev, 1 << instance_id);
+ if (ret) {
+ dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id);
+ goto exit;
+ }
+
+ /* Invoke all registered post_reset callbacks */
+ list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
+ if (funcs->post_reset) {
+ ret = funcs->post_reset(adev, instance_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "afterReset callback failed for instance %u: %d\n",
+ instance_id, ret);
+ goto exit;
+ }
+ }
+ }
+
+exit:
+ /* Restart the scheduler's work queue for the GFX and page rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ if (!ret) {
+ if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) {
+ drm_sched_wqueue_start(&gfx_ring->sched);
+ }
+ if (page_sched_stopped && amdgpu_ring_sched_ready(page_ring)) {
+ drm_sched_wqueue_start(&page_ring->sched);
+ }
+ }
+ mutex_unlock(&sdma_instance->engine_reset_mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 5f60736051d1..47d56fd0589f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -64,6 +64,11 @@ struct amdgpu_sdma_instance {
struct amdgpu_bo *sdma_fw_obj;
uint64_t sdma_fw_gpu_addr;
uint32_t *sdma_fw_ptr;
+ struct mutex engine_reset_mutex;
+ /* track guilty state of GFX and PAGE queues */
+ bool gfx_guilty;
+ bool page_guilty;
+
};
enum amdgpu_sdma_ras_memory_id {
@@ -98,6 +103,13 @@ struct amdgpu_sdma_ras {
struct amdgpu_ras_block_object ras_block;
};
+struct sdma_on_reset_funcs {
+ int (*pre_reset)(struct amdgpu_device *adev, uint32_t instance_id);
+ int (*post_reset)(struct amdgpu_device *adev, uint32_t instance_id);
+ /* Linked list node to store this structure in a list; */
+ struct list_head list;
+};
+
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
@@ -118,6 +130,7 @@ struct amdgpu_sdma {
struct amdgpu_sdma_ras *ras;
uint32_t *ip_dump;
uint32_t supported_reset;
+ struct list_head reset_callback_list;
};
/*
@@ -157,6 +170,9 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
+void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs);
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id);
+
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
@@ -180,4 +196,7 @@ int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev);
void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev);
int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev);
void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+struct amdgpu_ring *amdgpu_sdma_get_shared_ring(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c586ab4c911b..5576ed0b508f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -135,11 +135,16 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
struct amdgpu_sync_entry *e;
hash_for_each_possible(sync->fences, e, node, f->context) {
- if (unlikely(e->fence->context != f->context))
- continue;
+ if (dma_fence_is_signaled(e->fence)) {
+ dma_fence_put(e->fence);
+ e->fence = dma_fence_get(f);
+ return true;
+ }
- amdgpu_sync_keep_later(&e->fence, f);
- return true;
+ if (likely(e->fence->context == f->context)) {
+ amdgpu_sync_keep_later(&e->fence, f);
+ return true;
+ }
}
return false;
}
@@ -149,10 +154,12 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
*
* @sync: sync object to add fence to
* @f: fence to sync to
+ * @flags: memory allocation flags to use when allocating sync entry
*
* Add the fence to the sync object.
*/
-int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ gfp_t flags)
{
struct amdgpu_sync_entry *e;
@@ -162,7 +169,7 @@ int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
if (amdgpu_sync_add_later(sync, f))
return 0;
- e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(amdgpu_sync_slab, flags);
if (!e)
return -ENOMEM;
@@ -249,7 +256,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_fence *tmp = dma_fence_chain_contained(f);
if (amdgpu_sync_test_fence(adev, mode, owner, tmp)) {
- r = amdgpu_sync_fence(sync, f);
+ r = amdgpu_sync_fence(sync, f, GFP_KERNEL);
dma_fence_put(f);
if (r)
return r;
@@ -281,7 +288,7 @@ int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv)
if (fence_owner != AMDGPU_FENCE_OWNER_KFD)
continue;
- r = amdgpu_sync_fence(sync, f);
+ r = amdgpu_sync_fence(sync, f, GFP_KERNEL);
if (r)
break;
}
@@ -388,7 +395,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
hash_for_each_safe(source->fences, i, tmp, e, node) {
f = e->fence;
if (!dma_fence_is_signaled(f)) {
- r = amdgpu_sync_fence(clone, f);
+ r = amdgpu_sync_fence(clone, f, GFP_KERNEL);
if (r)
return r;
} else {
@@ -400,6 +407,25 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
}
/**
+ * amdgpu_sync_move - move all fences from src to dst
+ *
+ * @src: source of the fences, empty after function
+ * @dst: destination for the fences
+ *
+ * Moves all fences from source to destination. All fences in destination are
+ * freed and source is empty after the function call.
+ */
+void amdgpu_sync_move(struct amdgpu_sync *src, struct amdgpu_sync *dst)
+{
+ unsigned int i;
+
+ amdgpu_sync_free(dst);
+
+ for (i = 0; i < HASH_SIZE(src->fences); ++i)
+ hlist_move_list(&src->fences[i], &dst->fences[i]);
+}
+
+/**
* amdgpu_sync_push_to_job - push fences into job
* @sync: sync object to get the fences from
* @job: job to push the fences into
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index e3272dce798d..51eb4382c91e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -47,7 +47,8 @@ struct amdgpu_sync {
};
void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f);
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ gfp_t flags);
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner);
@@ -56,6 +57,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
+void amdgpu_sync_move(struct amdgpu_sync *src, struct amdgpu_sync *dst);
int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job);
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
void amdgpu_sync_free(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 383fce40d4dd..11dd2e0f7979 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -457,6 +457,38 @@ DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_freed,
TP_ARGS(pasid)
);
+TRACE_EVENT(amdgpu_isolation,
+ TP_PROTO(void *prev, void *next),
+ TP_ARGS(prev, next),
+ TP_STRUCT__entry(
+ __field(void *, prev)
+ __field(void *, next)
+ ),
+
+ TP_fast_assign(
+ __entry->prev = prev;
+ __entry->next = next;
+ ),
+ TP_printk("prev=%p, next=%p",
+ __entry->prev,
+ __entry->next)
+);
+
+TRACE_EVENT(amdgpu_cleaner_shader,
+ TP_PROTO(struct amdgpu_ring *ring, struct dma_fence *fence),
+ TP_ARGS(ring, fence),
+ TP_STRUCT__entry(
+ __string(ring, ring->name)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __assign_str(ring);
+ __entry->seqno = fence->seqno;
+ ),
+ TP_printk("ring=%s, seqno=%Lu", __get_str(ring), __entry->seqno)
+);
+
TRACE_EVENT(amdgpu_bo_list_set,
TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo),
TP_ARGS(list, bo),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 262bd010a283..53b71e9d8076 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1964,10 +1964,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Compute GTT size, either based on TTM limit
* or whatever the user passed on module init.
*/
- if (amdgpu_gtt_size == -1)
- gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
- else
- gtt_size = (uint64_t)amdgpu_gtt_size << 20;
+ gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
+ if (amdgpu_gtt_size != -1) {
+ uint64_t configured_size = (uint64_t)amdgpu_gtt_size << 20;
+
+ drm_warn(&adev->ddev,
+ "Configuring gttsize via module parameter is deprecated, please use ttm.pages_limit\n");
+ if (gtt_size != configured_size)
+ drm_warn(&adev->ddev,
+ "GTT size has been set as %llu but TTM size has been set as %llu, this is unusual\n",
+ configured_size, gtt_size);
+
+ gtt_size = configured_size;
+ }
/* Initialize GTT memory pool */
r = amdgpu_gtt_mgr_init(adev, gtt_size);
@@ -1978,6 +1987,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
(unsigned int)(gtt_size / (1024 * 1024)));
+ if (adev->flags & AMD_IS_APU) {
+ if (adev->gmc.real_vram_size < gtt_size)
+ adev->apu_prefer_gtt = true;
+ }
+
/* Initialize doorbell pool on PCI BAR */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index cf700824b960..3d9e9fdc10b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -1216,6 +1216,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
case IP_VERSION(11, 0, 13):
return "beige_goby";
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
return "vangogh";
case IP_VERSION(12, 0, 1):
return "green_sardine";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index eafe20d8fe0b..0a1ef95b2866 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -387,6 +387,45 @@ int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
return 0;
}
+static int amdgpu_umc_loop_all_aid(struct amdgpu_device *adev, umc_func func,
+ void *data)
+{
+ uint32_t umc_node_inst;
+ uint32_t node_inst;
+ uint32_t umc_inst;
+ uint32_t ch_inst;
+ int ret;
+
+ /*
+ * This loop is done based on the following -
+ * umc.active mask = mask of active umc instances across all nodes
+ * umc.umc_inst_num = maximum number of umc instancess per node
+ * umc.node_inst_num = maximum number of node instances
+ * Channel instances are not assumed to be harvested.
+ */
+ dev_dbg(adev->dev, "active umcs :%lx umc_inst per node: %d",
+ adev->umc.active_mask, adev->umc.umc_inst_num);
+ for_each_set_bit(umc_node_inst, &(adev->umc.active_mask),
+ adev->umc.node_inst_num * adev->umc.umc_inst_num) {
+ node_inst = umc_node_inst / adev->umc.umc_inst_num;
+ umc_inst = umc_node_inst % adev->umc.umc_inst_num;
+ LOOP_UMC_CH_INST(ch_inst) {
+ dev_dbg(adev->dev,
+ "node_inst :%d umc_inst: %d ch_inst: %d",
+ node_inst, umc_inst, ch_inst);
+ ret = func(adev, node_inst, umc_inst, ch_inst, data);
+ if (ret) {
+ dev_err(adev->dev,
+ "Node %d umc %d ch %d func returns %d\n",
+ node_inst, umc_inst, ch_inst, ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
umc_func func, void *data)
{
@@ -395,6 +434,9 @@ int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
uint32_t ch_inst = 0;
int ret = 0;
+ if (adev->aid_mask)
+ return amdgpu_umc_loop_all_aid(adev, func, data);
+
if (adev->umc.node_inst_num) {
LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
ret = func(adev, node_inst, umc_inst, ch_inst, data);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index a4a7e61817aa..857693bcd8d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -71,6 +71,13 @@
*/
#define UMC_CHANNEL_IDX_V2 BIT_ULL(47)
+/*
+ * save nps value to eeprom_table_record.retired_page[47:40],
+ * the channel index flag above will be retired.
+ */
+#define UMC_NPS_SHIFT 40
+#define UMC_NPS_MASK 0xffULL
+
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index dde15c6a96e1..cd707d70a0bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -32,462 +32,7 @@
#include "amdgpu_umsch_mm.h"
#include "umsch_mm_v4_0.h"
-struct umsch_mm_test_ctx_data {
- uint8_t process_csa[PAGE_SIZE];
- uint8_t vpe_ctx_csa[PAGE_SIZE];
- uint8_t vcn_ctx_csa[PAGE_SIZE];
-};
-
-struct umsch_mm_test_mqd_data {
- uint8_t vpe_mqd[PAGE_SIZE];
- uint8_t vcn_mqd[PAGE_SIZE];
-};
-
-struct umsch_mm_test_ring_data {
- uint8_t vpe_ring[PAGE_SIZE];
- uint8_t vpe_ib[PAGE_SIZE];
- uint8_t vcn_ring[PAGE_SIZE];
- uint8_t vcn_ib[PAGE_SIZE];
-};
-
-struct umsch_mm_test_queue_info {
- uint64_t mqd_addr;
- uint64_t csa_addr;
- uint32_t doorbell_offset_0;
- uint32_t doorbell_offset_1;
- enum UMSCH_SWIP_ENGINE_TYPE engine;
-};
-
-struct umsch_mm_test {
- struct amdgpu_bo *ctx_data_obj;
- uint64_t ctx_data_gpu_addr;
- uint32_t *ctx_data_cpu_addr;
-
- struct amdgpu_bo *mqd_data_obj;
- uint64_t mqd_data_gpu_addr;
- uint32_t *mqd_data_cpu_addr;
-
- struct amdgpu_bo *ring_data_obj;
- uint64_t ring_data_gpu_addr;
- uint32_t *ring_data_cpu_addr;
-
-
- struct amdgpu_vm *vm;
- struct amdgpu_bo_va *bo_va;
- uint32_t pasid;
- uint32_t vm_cntx_cntl;
- uint32_t num_queues;
-};
-
-static int map_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
- uint64_t addr, uint32_t size)
-{
- struct amdgpu_sync sync;
- struct drm_exec exec;
- int r;
-
- amdgpu_sync_create(&sync);
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec, &bo->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
- }
-
- *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
- if (!*bo_va) {
- r = -ENOMEM;
- goto error_fini_exec;
- }
-
- r = amdgpu_vm_bo_map(adev, *bo_va, addr, 0, size,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
-
- if (r)
- goto error_del_bo_va;
-
-
- r = amdgpu_vm_bo_update(adev, *bo_va, false);
- if (r)
- goto error_del_bo_va;
-
- amdgpu_sync_fence(&sync, (*bo_va)->last_pt_update);
-
- r = amdgpu_vm_update_pdes(adev, vm, false);
- if (r)
- goto error_del_bo_va;
-
- amdgpu_sync_fence(&sync, vm->last_update);
-
- amdgpu_sync_wait(&sync, false);
- drm_exec_fini(&exec);
-
- amdgpu_sync_free(&sync);
-
- return 0;
-
-error_del_bo_va:
- amdgpu_vm_bo_del(adev, *bo_va);
- amdgpu_sync_free(&sync);
-
-error_fini_exec:
- drm_exec_fini(&exec);
- amdgpu_sync_free(&sync);
- return r;
-}
-
-static int unmap_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
- uint64_t addr)
-{
- struct drm_exec exec;
- long r;
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec, &bo->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
- }
-
-
- r = amdgpu_vm_bo_unmap(adev, bo_va, addr);
- if (r)
- goto out_unlock;
-
- amdgpu_vm_bo_del(adev, bo_va);
-
-out_unlock:
- drm_exec_fini(&exec);
-
- return r;
-}
-
-static void setup_vpe_queue(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- struct MQD_INFO *mqd = (struct MQD_INFO *)test->mqd_data_cpu_addr;
- uint64_t ring_gpu_addr = test->ring_data_gpu_addr;
-
- mqd->rb_base_lo = (ring_gpu_addr >> 8);
- mqd->rb_base_hi = (ring_gpu_addr >> 40);
- mqd->rb_size = PAGE_SIZE / 4;
- mqd->wptr_val = 0;
- mqd->rptr_val = 0;
- mqd->unmapped = 1;
-
- if (adev->vpe.collaborate_mode)
- memcpy(++mqd, test->mqd_data_cpu_addr, sizeof(struct MQD_INFO));
-
- qinfo->mqd_addr = test->mqd_data_gpu_addr;
- qinfo->csa_addr = test->ctx_data_gpu_addr +
- offsetof(struct umsch_mm_test_ctx_data, vpe_ctx_csa);
- qinfo->doorbell_offset_0 = 0;
- qinfo->doorbell_offset_1 = 0;
-}
-
-static void setup_vcn_queue(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
-}
-
-static int add_test_queue(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- struct umsch_mm_add_queue_input queue_input = {};
- int r;
-
- queue_input.process_id = test->pasid;
- queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(test->vm->root.bo);
-
- queue_input.process_va_start = 0;
- queue_input.process_va_end = (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
-
- queue_input.process_quantum = 100000; /* 10ms */
- queue_input.process_csa_addr = test->ctx_data_gpu_addr +
- offsetof(struct umsch_mm_test_ctx_data, process_csa);
-
- queue_input.context_quantum = 10000; /* 1ms */
- queue_input.context_csa_addr = qinfo->csa_addr;
-
- queue_input.inprocess_context_priority = CONTEXT_PRIORITY_LEVEL_NORMAL;
- queue_input.context_global_priority_level = CONTEXT_PRIORITY_LEVEL_NORMAL;
- queue_input.doorbell_offset_0 = qinfo->doorbell_offset_0;
- queue_input.doorbell_offset_1 = qinfo->doorbell_offset_1;
-
- queue_input.engine_type = qinfo->engine;
- queue_input.mqd_addr = qinfo->mqd_addr;
- queue_input.vm_context_cntl = test->vm_cntx_cntl;
-
- amdgpu_umsch_mm_lock(&adev->umsch_mm);
- r = adev->umsch_mm.funcs->add_queue(&adev->umsch_mm, &queue_input);
- amdgpu_umsch_mm_unlock(&adev->umsch_mm);
- if (r)
- return r;
-
- return 0;
-}
-
-static int remove_test_queue(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- struct umsch_mm_remove_queue_input queue_input = {};
- int r;
-
- queue_input.doorbell_offset_0 = qinfo->doorbell_offset_0;
- queue_input.doorbell_offset_1 = qinfo->doorbell_offset_1;
- queue_input.context_csa_addr = qinfo->csa_addr;
-
- amdgpu_umsch_mm_lock(&adev->umsch_mm);
- r = adev->umsch_mm.funcs->remove_queue(&adev->umsch_mm, &queue_input);
- amdgpu_umsch_mm_unlock(&adev->umsch_mm);
- if (r)
- return r;
-
- return 0;
-}
-
-static int submit_vpe_queue(struct amdgpu_device *adev, struct umsch_mm_test *test)
-{
- struct MQD_INFO *mqd = (struct MQD_INFO *)test->mqd_data_cpu_addr;
- uint32_t *ring = test->ring_data_cpu_addr +
- offsetof(struct umsch_mm_test_ring_data, vpe_ring) / 4;
- uint32_t *ib = test->ring_data_cpu_addr +
- offsetof(struct umsch_mm_test_ring_data, vpe_ib) / 4;
- uint64_t ib_gpu_addr = test->ring_data_gpu_addr +
- offsetof(struct umsch_mm_test_ring_data, vpe_ib);
- uint32_t *fence = ib + 2048 / 4;
- uint64_t fence_gpu_addr = ib_gpu_addr + 2048;
- const uint32_t test_pattern = 0xdeadbeef;
- int i;
-
- ib[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0);
- ib[1] = lower_32_bits(fence_gpu_addr);
- ib[2] = upper_32_bits(fence_gpu_addr);
- ib[3] = test_pattern;
-
- ring[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0);
- ring[1] = (ib_gpu_addr & 0xffffffe0);
- ring[2] = upper_32_bits(ib_gpu_addr);
- ring[3] = 4;
- ring[4] = 0;
- ring[5] = 0;
-
- mqd->wptr_val = (6 << 2);
- if (adev->vpe.collaborate_mode)
- (++mqd)->wptr_val = (6 << 2);
-
- WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (*fence == test_pattern)
- return 0;
- udelay(1);
- }
-
- dev_err(adev->dev, "vpe queue submission timeout\n");
-
- return -ETIMEDOUT;
-}
-
-static int submit_vcn_queue(struct amdgpu_device *adev, struct umsch_mm_test *test)
-{
- return 0;
-}
-
-static int setup_umsch_mm_test(struct amdgpu_device *adev,
- struct umsch_mm_test *test)
-{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
- int r;
-
- test->vm_cntx_cntl = hub->vm_cntx_cntl;
-
- test->vm = kzalloc(sizeof(*test->vm), GFP_KERNEL);
- if (!test->vm) {
- r = -ENOMEM;
- return r;
- }
-
- r = amdgpu_vm_init(adev, test->vm, -1);
- if (r)
- goto error_free_vm;
-
- r = amdgpu_pasid_alloc(16);
- if (r < 0)
- goto error_fini_vm;
- test->pasid = r;
-
- r = amdgpu_bo_create_kernel(adev, sizeof(struct umsch_mm_test_ctx_data),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &test->ctx_data_obj,
- &test->ctx_data_gpu_addr,
- (void **)&test->ctx_data_cpu_addr);
- if (r)
- goto error_free_pasid;
-
- memset(test->ctx_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ctx_data));
-
- r = amdgpu_bo_create_kernel(adev, PAGE_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &test->mqd_data_obj,
- &test->mqd_data_gpu_addr,
- (void **)&test->mqd_data_cpu_addr);
- if (r)
- goto error_free_ctx_data_obj;
-
- memset(test->mqd_data_cpu_addr, 0, PAGE_SIZE);
-
- r = amdgpu_bo_create_kernel(adev, sizeof(struct umsch_mm_test_ring_data),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &test->ring_data_obj,
- NULL,
- (void **)&test->ring_data_cpu_addr);
- if (r)
- goto error_free_mqd_data_obj;
-
- memset(test->ring_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ring_data));
-
- test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
- r = map_ring_data(adev, test->vm, test->ring_data_obj, &test->bo_va,
- test->ring_data_gpu_addr, sizeof(struct umsch_mm_test_ring_data));
- if (r)
- goto error_free_ring_data_obj;
-
- return 0;
-
-error_free_ring_data_obj:
- amdgpu_bo_free_kernel(&test->ring_data_obj, NULL,
- (void **)&test->ring_data_cpu_addr);
-error_free_mqd_data_obj:
- amdgpu_bo_free_kernel(&test->mqd_data_obj, &test->mqd_data_gpu_addr,
- (void **)&test->mqd_data_cpu_addr);
-error_free_ctx_data_obj:
- amdgpu_bo_free_kernel(&test->ctx_data_obj, &test->ctx_data_gpu_addr,
- (void **)&test->ctx_data_cpu_addr);
-error_free_pasid:
- amdgpu_pasid_free(test->pasid);
-error_fini_vm:
- amdgpu_vm_fini(adev, test->vm);
-error_free_vm:
- kfree(test->vm);
-
- return r;
-}
-
-static void cleanup_umsch_mm_test(struct amdgpu_device *adev,
- struct umsch_mm_test *test)
-{
- unmap_ring_data(adev, test->vm, test->ring_data_obj,
- test->bo_va, test->ring_data_gpu_addr);
- amdgpu_bo_free_kernel(&test->mqd_data_obj, &test->mqd_data_gpu_addr,
- (void **)&test->mqd_data_cpu_addr);
- amdgpu_bo_free_kernel(&test->ring_data_obj, NULL,
- (void **)&test->ring_data_cpu_addr);
- amdgpu_bo_free_kernel(&test->ctx_data_obj, &test->ctx_data_gpu_addr,
- (void **)&test->ctx_data_cpu_addr);
- amdgpu_pasid_free(test->pasid);
- amdgpu_vm_fini(adev, test->vm);
- kfree(test->vm);
-}
-
-static int setup_test_queues(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- int i, r;
-
- for (i = 0; i < test->num_queues; i++) {
- if (qinfo[i].engine == UMSCH_SWIP_ENGINE_TYPE_VPE)
- setup_vpe_queue(adev, test, &qinfo[i]);
- else
- setup_vcn_queue(adev, test, &qinfo[i]);
-
- r = add_test_queue(adev, test, &qinfo[i]);
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static int submit_test_queues(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- int i, r;
-
- for (i = 0; i < test->num_queues; i++) {
- if (qinfo[i].engine == UMSCH_SWIP_ENGINE_TYPE_VPE)
- r = submit_vpe_queue(adev, test);
- else
- r = submit_vcn_queue(adev, test);
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static void cleanup_test_queues(struct amdgpu_device *adev,
- struct umsch_mm_test *test,
- struct umsch_mm_test_queue_info *qinfo)
-{
- int i;
-
- for (i = 0; i < test->num_queues; i++)
- remove_test_queue(adev, test, &qinfo[i]);
-}
-
-static int umsch_mm_test(struct amdgpu_device *adev)
-{
- struct umsch_mm_test_queue_info qinfo[] = {
- { .engine = UMSCH_SWIP_ENGINE_TYPE_VPE },
- };
- struct umsch_mm_test test = { .num_queues = ARRAY_SIZE(qinfo) };
- int r;
-
- r = setup_umsch_mm_test(adev, &test);
- if (r)
- return r;
-
- r = setup_test_queues(adev, &test, qinfo);
- if (r)
- goto cleanup;
-
- r = submit_test_queues(adev, &test, qinfo);
- if (r)
- goto cleanup;
-
- cleanup_test_queues(adev, &test, qinfo);
- cleanup_umsch_mm_test(adev, &test);
-
- return 0;
-
-cleanup:
- cleanup_test_queues(adev, &test, qinfo);
- cleanup_umsch_mm_test(adev, &test);
- return r;
-}
+MODULE_FIRMWARE("amdgpu/umsch_mm_4_0_0.bin");
int amdgpu_umsch_mm_submit_pkt(struct amdgpu_umsch_mm *umsch, void *pkt, int ndws)
{
@@ -581,14 +126,14 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
case IP_VERSION(4, 0, 6):
- fw_name = "amdgpu/umsch_mm_4_0_0.bin";
+ fw_name = "4_0_0";
break;
default:
- break;
+ return -EINVAL;
}
r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, AMDGPU_UCODE_REQUIRED,
- "%s", fw_name);
+ "amdgpu/umsch_mm_%s.bin", fw_name);
if (r) {
release_firmware(adev->umsch_mm.fw);
adev->umsch_mm.fw = NULL;
@@ -792,7 +337,7 @@ static int umsch_mm_late_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
return 0;
- return umsch_mm_test(adev);
+ return 0;
}
static int umsch_mm_sw_init(struct amdgpu_ip_block *ip_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 83faf6e6788a..1991dd3d1056 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -93,47 +93,53 @@ MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
-int amdgpu_vcn_early_init(struct amdgpu_device *adev)
+int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
{
char ucode_prefix[25];
- int r, i;
+ int r;
+ adev->vcn.inst[i].adev = adev;
+ adev->vcn.inst[i].inst = i;
amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (i == 1 && amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6))
- r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
- AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_%d.bin", ucode_prefix, i);
- else
- r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+
+ if (i != 0 && adev->vcn.per_inst_fw) {
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_%d.bin", ucode_prefix, i);
+ if (r)
+ amdgpu_ucode_release(&adev->vcn.inst[i].fw);
+ } else {
+ if (!adev->vcn.inst[0].fw) {
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[0].fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/%s.bin", ucode_prefix);
- if (r) {
- amdgpu_ucode_release(&adev->vcn.inst[i].fw);
- return r;
+ if (r)
+ amdgpu_ucode_release(&adev->vcn.inst[0].fw);
+ } else {
+ r = 0;
}
+ adev->vcn.inst[i].fw = adev->vcn.inst[0].fw;
}
+
return r;
}
-int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
{
unsigned long bo_size;
const struct common_firmware_header *hdr;
unsigned char fw_check;
unsigned int fw_shared_size, log_offset;
- int i, r;
-
- INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
- mutex_init(&adev->vcn.vcn_pg_lock);
- mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
- atomic_set(&adev->vcn.total_submission_cnt, 0);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++)
- atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
+ int r;
+ mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
+ mutex_init(&adev->vcn.inst[i].vcn_pg_lock);
+ atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0);
+ INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
+ atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
+ adev->vcn.inst[i].indirect_sram = true;
/*
* Some Steam Deck's BIOS versions are incompatible with the
@@ -146,18 +152,19 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
- !strncmp("F7A0114", bios_ver, 7))) {
- adev->vcn.indirect_sram = false;
+ !strncmp("F7A0114", bios_ver, 7))) {
+ adev->vcn.inst[i].indirect_sram = false;
dev_info(adev->dev,
- "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
+ "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
}
}
/* from vcn4 and above, only unified queue is used */
- adev->vcn.using_unified_queue =
+ adev->vcn.inst[i].using_unified_queue =
amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
- hdr = (const struct common_firmware_header *)adev->vcn.inst[0].fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ adev->vcn.inst[i].fw_version = le32_to_cpu(hdr->ucode_version);
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
/* Bit 20-23, it is encode major and non-zero for new naming convention.
@@ -175,16 +182,17 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
enc_major = fw_check;
dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
- DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
- enc_major, enc_minor, dec_ver, vep, fw_rev);
+ dev_info(adev->dev,
+ "Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
+ enc_major, enc_minor, dec_ver, vep, fw_rev);
} else {
unsigned int version_major, version_minor, family_id;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
- version_major, version_minor, family_id);
+ dev_info(adev->dev, "Found VCN firmware Version: %u.%u Family ID: %u\n",
+ version_major, version_minor, family_id);
}
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
@@ -207,80 +215,77 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
if (amdgpu_vcnfw_log)
bo_size += AMDGPU_VCNFW_LOG_SIZE;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr,
+ &adev->vcn.inst[i].cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
+ return r;
+ }
- r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
+ bo_size - fw_shared_size;
+ adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
+ bo_size - fw_shared_size;
+
+ adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
+
+ if (amdgpu_vcnfw_log) {
+ adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
+ adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
+ adev->vcn.inst[i].fw_shared.log_offset = log_offset;
+ }
+
+ if (adev->vcn.inst[i].indirect_sram) {
+ r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
- &adev->vcn.inst[i].vcpu_bo,
- &adev->vcn.inst[i].gpu_addr,
- &adev->vcn.inst[i].cpu_addr);
+ &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr,
+ &adev->vcn.inst[i].dpg_sram_cpu_addr);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
+ dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
return r;
}
-
- adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
- bo_size - fw_shared_size;
- adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
- bo_size - fw_shared_size;
-
- adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
-
- if (amdgpu_vcnfw_log) {
- adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
- adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
- adev->vcn.inst[i].fw_shared.log_offset = log_offset;
- }
-
- if (adev->vcn.indirect_sram) {
- r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT,
- &adev->vcn.inst[i].dpg_sram_bo,
- &adev->vcn.inst[i].dpg_sram_gpu_addr,
- &adev->vcn.inst[i].dpg_sram_cpu_addr);
- if (r) {
- dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
- return r;
- }
- }
}
return 0;
}
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
+int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
{
- int i, j;
+ int j;
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- amdgpu_bo_free_kernel(
- &adev->vcn.inst[j].dpg_sram_bo,
- &adev->vcn.inst[j].dpg_sram_gpu_addr,
- (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
+ amdgpu_bo_free_kernel(
+ &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr,
+ (void **)&adev->vcn.inst[i].dpg_sram_cpu_addr);
- kvfree(adev->vcn.inst[j].saved_bo);
+ kvfree(adev->vcn.inst[i].saved_bo);
- amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
- &adev->vcn.inst[j].gpu_addr,
- (void **)&adev->vcn.inst[j].cpu_addr);
+ amdgpu_bo_free_kernel(&adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr,
+ (void **)&adev->vcn.inst[i].cpu_addr);
- amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
+ amdgpu_ring_fini(&adev->vcn.inst[i].ring_dec);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
+ amdgpu_ring_fini(&adev->vcn.inst[i].ring_enc[j]);
- amdgpu_ucode_release(&adev->vcn.inst[j].fw);
+ if (adev->vcn.per_inst_fw) {
+ amdgpu_ucode_release(&adev->vcn.inst[i].fw);
+ } else {
+ amdgpu_ucode_release(&adev->vcn.inst[0].fw);
+ adev->vcn.inst[i].fw = NULL;
}
-
- mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
- mutex_destroy(&adev->vcn.vcn_pg_lock);
+ mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
+ mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
return 0;
}
@@ -300,179 +305,208 @@ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type t
return ret;
}
-int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
+static int amdgpu_vcn_save_vcpu_bo_inst(struct amdgpu_device *adev, int i)
{
unsigned int size;
void *ptr;
- int i, idx;
+ int idx;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- if (adev->vcn.inst[i].vcpu_bo == NULL)
- return 0;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+ if (adev->vcn.inst[i].vcpu_bo == NULL)
+ return 0;
- size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
- ptr = adev->vcn.inst[i].cpu_addr;
+ size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
+ ptr = adev->vcn.inst[i].cpu_addr;
- adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
- if (!adev->vcn.inst[i].saved_bo)
- return -ENOMEM;
+ adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
+ if (!adev->vcn.inst[i].saved_bo)
+ return -ENOMEM;
- if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
- drm_dev_exit(idx);
- }
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
+ drm_dev_exit(idx);
+ }
+
+ return 0;
+}
+
+int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
+{
+ int ret, i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ret = amdgpu_vcn_save_vcpu_bo_inst(adev, i);
+ if (ret)
+ return ret;
}
return 0;
}
-int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
{
bool in_ras_intr = amdgpu_ras_intr_triggered();
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+
+ cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
/* err_event_athub will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
if (in_ras_intr)
return 0;
- return amdgpu_vcn_save_vcpu_bo(adev);
+ return amdgpu_vcn_save_vcpu_bo_inst(adev, i);
}
-int amdgpu_vcn_resume(struct amdgpu_device *adev)
+int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
{
unsigned int size;
void *ptr;
- int i, idx;
+ int idx;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- if (adev->vcn.inst[i].vcpu_bo == NULL)
- return -EINVAL;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+ if (adev->vcn.inst[i].vcpu_bo == NULL)
+ return -EINVAL;
- size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
- ptr = adev->vcn.inst[i].cpu_addr;
+ size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
+ ptr = adev->vcn.inst[i].cpu_addr;
- if (adev->vcn.inst[i].saved_bo != NULL) {
+ if (adev->vcn.inst[i].saved_bo != NULL) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
+ drm_dev_exit(idx);
+ }
+ kvfree(adev->vcn.inst[i].saved_bo);
+ adev->vcn.inst[i].saved_bo = NULL;
+ } else {
+ const struct common_firmware_header *hdr;
+ unsigned int offset;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
+ memcpy_toio(adev->vcn.inst[i].cpu_addr,
+ adev->vcn.inst[i].fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
}
- kvfree(adev->vcn.inst[i].saved_bo);
- adev->vcn.inst[i].saved_bo = NULL;
- } else {
- const struct common_firmware_header *hdr;
- unsigned int offset;
-
- hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(adev->vcn.inst[i].cpu_addr,
- adev->vcn.inst[i].fw->data + offset,
- le32_to_cpu(hdr->ucode_size_bytes));
- drm_dev_exit(idx);
- }
- size -= le32_to_cpu(hdr->ucode_size_bytes);
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
- }
- memset_io(ptr, 0, size);
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
+ memset_io(ptr, 0, size);
}
+
return 0;
}
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
- struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device, vcn.idle_work.work);
+ struct amdgpu_vcn_inst *vcn_inst =
+ container_of(work, struct amdgpu_vcn_inst, idle_work.work);
+ struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
- unsigned int i, j;
+ unsigned int i = vcn_inst->inst, j;
int r = 0;
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
-
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
- /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
- !adev->vcn.using_unified_queue) {
- struct dpg_pause_state new_state;
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
+ fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[j]);
- if (fence[j] ||
- unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
- new_state.fw_based = VCN_DPG_STATE__PAUSE;
- else
- new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ !adev->vcn.inst[i].using_unified_queue) {
+ struct dpg_pause_state new_state;
- adev->vcn.pause_dpg_mode(adev, j, &new_state);
- }
+ if (fence[i] ||
+ unlikely(atomic_read(&vcn_inst->dpg_enc_submission_cnt)))
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
- fences += fence[j];
+ adev->vcn.inst[i].pause_dpg_mode(vcn_inst, &new_state);
}
- if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_GATE);
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
+ fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_dec);
+ fences += fence[i];
+
+ if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
+ vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+ if (adev->vcn.workload_profile_active) {
+ r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
+ false);
+ if (r)
+ dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
+ adev->vcn.workload_profile_active = false;
+ }
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
} else {
- schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
}
}
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
int r = 0;
- atomic_inc(&adev->vcn.total_submission_cnt);
+ atomic_inc(&vcn_inst->total_submission_cnt);
+
+ cancel_delayed_work_sync(&vcn_inst->idle_work);
- if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
+ /* We can safely return early here because we've cancelled the
+ * the delayed work so there is no one else to set it to false
+ * and we don't care if someone else sets it to true.
+ */
+ if (adev->vcn.workload_profile_active)
+ goto pg_lock;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+ if (!adev->vcn.workload_profile_active) {
r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
+ true);
if (r)
dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
+ adev->vcn.workload_profile_active = true;
}
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
- mutex_lock(&adev->vcn.vcn_pg_lock);
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_UNGATE);
+pg_lock:
+ mutex_lock(&vcn_inst->vcn_pg_lock);
+ vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
- !adev->vcn.using_unified_queue) {
+ !vcn_inst->using_unified_queue) {
struct dpg_pause_state new_state;
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
- atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+ atomic_inc(&vcn_inst->dpg_enc_submission_cnt);
new_state.fw_based = VCN_DPG_STATE__PAUSE;
} else {
unsigned int fences = 0;
unsigned int i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
+ for (i = 0; i < vcn_inst->num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[i]);
- if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
+ if (fences || atomic_read(&vcn_inst->dpg_enc_submission_cnt))
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
}
- adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
+ vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
}
- mutex_unlock(&adev->vcn.vcn_pg_lock);
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
@@ -482,12 +516,13 @@ void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
- !adev->vcn.using_unified_queue)
+ !adev->vcn.inst[ring->me].using_unified_queue)
atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
- atomic_dec(&ring->adev->vcn.total_submission_cnt);
+ atomic_dec(&ring->adev->vcn.inst[ring->me].total_submission_cnt);
- schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ schedule_delayed_work(&ring->adev->vcn.inst[ring->me].idle_work,
+ VCN_IDLE_TIMEOUT);
}
int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
@@ -505,7 +540,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -570,14 +605,14 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
goto err;
ib = &job->ibs[0];
- ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
+ ib->ptr[0] = PACKET0(adev->vcn.inst[ring->me].internal.data0, 0);
ib->ptr[1] = addr;
- ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
+ ib->ptr[2] = PACKET0(adev->vcn.inst[ring->me].internal.data1, 0);
ib->ptr[3] = addr >> 32;
- ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
+ ib->ptr[4] = PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0);
ib->ptr[5] = 0;
for (i = 6; i < 16; i += 2) {
- ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
+ ib->ptr[i] = PACKET0(adev->vcn.inst[ring->me].internal.nop, 0);
ib->ptr[i+1] = 0;
}
ib->length_dw = 16;
@@ -740,7 +775,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
uint32_t ib_pack_in_dw;
int i, r;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@@ -753,7 +788,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
ib->length_dw = 0;
/* single queue headers */
- if (adev->vcn.using_unified_queue) {
+ if (adev->vcn.inst[ring->me].using_unified_queue) {
ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+ 4 + 2; /* engine info + decoding ib in dw */
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
@@ -772,7 +807,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
r = amdgpu_job_submit_direct(job, ring, &f);
@@ -870,7 +905,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
uint64_t addr;
int i, r;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@@ -884,7 +919,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
ib->length_dw = 0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -906,7 +941,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);
@@ -937,7 +972,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
uint64_t addr;
int i, r;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@@ -951,7 +986,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
ib->length_dw = 0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -973,7 +1008,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (adev->vcn.using_unified_queue)
+ if (adev->vcn.inst[ring->me].using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);
@@ -1058,36 +1093,32 @@ enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
}
}
-void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i)
{
- int i;
unsigned int idx;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
- /* currently only support 2 FW instances */
- if (i >= 2) {
- dev_info(adev->dev, "More then 2 VCN FW instances!\n");
- break;
- }
- idx = AMDGPU_UCODE_ID_VCN + i;
- adev->firmware.ucode[idx].ucode_id = idx;
- adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
-
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
- IP_VERSION(4, 0, 3) ||
- amdgpu_ip_version(adev, UVD_HWIP, 0) ==
- IP_VERSION(5, 0, 1))
- break;
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
+
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 3) ||
+ amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(5, 0, 1))
+ && (i > 0))
+ return;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ /* currently only support 2 FW instances */
+ if (i >= 2) {
+ dev_info(adev->dev, "More then 2 VCN FW instances!\n");
+ return;
}
+ idx = AMDGPU_UCODE_ID_VCN + i;
+ adev->firmware.ucode[idx].ucode_id = idx;
+ adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
}
}
@@ -1390,10 +1421,33 @@ void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev)
struct dentry *root = minor->debugfs_root;
char name[32];
- if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.using_unified_queue)
+ if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.inst[0].using_unified_queue)
return;
sprintf(name, "amdgpu_vcn_sched_mask");
debugfs_create_file(name, 0600, root, adev,
&amdgpu_debugfs_vcn_sched_mask_fops);
#endif
}
+
+/**
+ * vcn_set_powergating_state - set VCN block powergating state
+ *
+ * @ip_block: amdgpu_ip_block pointer
+ * @state: power gating state
+ *
+ * Set VCN block powergating state
+ */
+int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret = 0, i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
+ ret |= vinst->set_pg_state(vinst, state);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index adaf4388ad28..cdcdae7f71ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -238,6 +238,12 @@
#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2
+enum amdgpu_vcn_caps {
+ AMDGPU_VCN_RRMT_ENABLED,
+};
+
+#define AMDGPU_VCN_CAPS(caps) BIT(AMDGPU_VCN_##caps)
+
enum fw_queue_mode {
FW_QUEUE_RING_RESET = 1,
FW_QUEUE_DPG_HOLD_OFF = 2,
@@ -289,6 +295,8 @@ struct amdgpu_vcn_fw_shared {
};
struct amdgpu_vcn_inst {
+ struct amdgpu_device *adev;
+ int inst;
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
@@ -310,6 +318,20 @@ struct amdgpu_vcn_inst {
const struct firmware *fw; /* VCN firmware */
uint8_t vcn_config;
uint32_t vcn_codec_disable_mask;
+ atomic_t total_submission_cnt;
+ struct mutex vcn_pg_lock;
+ enum amd_powergating_state cur_state;
+ struct delayed_work idle_work;
+ unsigned fw_version;
+ unsigned num_enc_rings;
+ bool indirect_sram;
+ struct amdgpu_vcn_reg internal;
+ struct mutex vcn1_jpeg1_workaround;
+ int (*pause_dpg_mode)(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
+ int (*set_pg_state)(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+ bool using_unified_queue;
};
struct amdgpu_vcn_ras {
@@ -317,34 +339,28 @@ struct amdgpu_vcn_ras {
};
struct amdgpu_vcn {
- unsigned fw_version;
- struct delayed_work idle_work;
- unsigned num_enc_rings;
- enum amd_powergating_state cur_state;
- bool indirect_sram;
-
uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
- struct amdgpu_vcn_reg internal;
- struct mutex vcn_pg_lock;
- struct mutex vcn1_jpeg1_workaround;
- atomic_t total_submission_cnt;
unsigned harvest_config;
- int (*pause_dpg_mode)(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
struct ras_common_if *ras_if;
struct amdgpu_vcn_ras *ras;
uint16_t inst_mask;
uint8_t num_inst_per_aid;
- bool using_unified_queue;
/* IP reg dump */
uint32_t *ip_dump;
uint32_t supported_reset;
+ uint32_t caps;
+
+ bool per_inst_fw;
+ unsigned fw_version;
+
+ bool workload_profile_active;
+ struct mutex workload_profile_mutex;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
@@ -493,11 +509,11 @@ enum vcn_ring_type {
VCN_UNIFIED_RING,
};
-int amdgpu_vcn_early_init(struct amdgpu_device *adev);
-int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
-int amdgpu_vcn_suspend(struct amdgpu_device *adev);
-int amdgpu_vcn_resume(struct amdgpu_device *adev);
+int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_resume(struct amdgpu_device *adev, int i);
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring);
@@ -515,7 +531,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring);
-void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev);
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i);
void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn);
void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
@@ -535,4 +551,7 @@ int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev);
void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev);
void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
+int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 0af469ec6fcc..0bb8cbe0dcc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -614,10 +614,11 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->decode_usage = 0;
vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
- vf2pf_info->mes_info_addr = (uint64_t)adev->mes.resource_1_gpu_addr;
-
- if (adev->mes.resource_1) {
- vf2pf_info->mes_info_size = adev->mes.resource_1->tbo.base.size;
+ if (amdgpu_sriov_is_mes_info_enable(adev)) {
+ vf2pf_info->mes_info_addr =
+ (uint64_t)(adev->mes.resource_1_gpu_addr[0] + AMDGPU_GPU_PAGE_SIZE);
+ vf2pf_info->mes_info_size =
+ adev->mes.resource_1[0]->tbo.base.size - AMDGPU_GPU_PAGE_SIZE;
}
vf2pf_info->checksum =
amd_sriov_msg_checksum(
@@ -739,7 +740,7 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
}
}
-void amdgpu_detect_virtualization(struct amdgpu_device *adev)
+static u32 amdgpu_virt_init_detect_asic(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -775,8 +776,17 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
+ return reg;
+}
+
+static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg)
+{
+ bool is_sriov = false;
+
/* we have the ability to check now */
if (amdgpu_sriov_vf(adev)) {
+ is_sriov = true;
+
switch (adev->asic_type) {
case CHIP_TONGA:
case CHIP_FIJI:
@@ -805,10 +815,39 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
amdgpu_virt_request_init_data(adev);
break;
default: /* other chip doesn't support SRIOV */
+ is_sriov = false;
DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
break;
}
}
+
+ return is_sriov;
+}
+
+static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
+{
+ ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1);
+ ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1);
+
+ ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+
+ mutex_init(&adev->virt.ras.ras_telemetry_mutex);
+
+ adev->virt.ras.cper_rptr = 0;
+}
+
+void amdgpu_virt_init(struct amdgpu_device *adev)
+{
+ bool is_sriov = false;
+ uint32_t reg = amdgpu_virt_init_detect_asic(adev);
+
+ is_sriov = amdgpu_virt_init_req_data(adev, reg);
+
+ if (is_sriov)
+ amdgpu_virt_init_ras(adev);
}
static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
@@ -1017,6 +1056,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
void *scratch_reg2;
void *scratch_reg3;
void *spare_int;
+ unsigned long flags;
if (!adev->gfx.rlc.rlcg_reg_access_supported) {
dev_err(adev->dev,
@@ -1038,7 +1078,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
- mutex_lock(&adev->virt.rlcg_reg_lock);
+ spin_lock_irqsave(&adev->virt.rlcg_reg_lock, flags);
if (reg_access_ctrl->spare_int)
spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
@@ -1097,7 +1137,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
ret = readl(scratch_reg0);
- mutex_unlock(&adev->virt.rlcg_reg_lock);
+ spin_unlock_irqrestore(&adev->virt.rlcg_reg_lock, flags);
return ret;
}
@@ -1246,7 +1286,8 @@ amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block bloc
case AMDGPU_RAS_BLOCK__MPIO:
return RAS_TELEMETRY_GPU_BLOCK_MPIO;
default:
- dev_err(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n", block);
+ DRM_WARN_ONCE("Unsupported SRIOV RAS telemetry block 0x%x\n",
+ block);
return RAS_TELEMETRY_GPU_BLOCK_COUNT;
}
}
@@ -1286,10 +1327,12 @@ static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bo
* will ignore incoming guest messages. Ratelimit the guest messages to
* prevent guest self DOS.
*/
- if (__ratelimit(&adev->virt.ras_telemetry_rs) || force_update) {
+ if (__ratelimit(&virt->ras.ras_error_cnt_rs) || force_update) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
if (!virt->ops->req_ras_err_count(adev))
amdgpu_virt_cache_host_error_counts(adev,
- adev->virt.fw_reserve.ras_telemetry);
+ virt->fw_reserve.ras_telemetry);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
}
return 0;
@@ -1320,6 +1363,98 @@ int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_bl
return 0;
}
+static int
+amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry,
+ u32 *more)
+{
+ struct amd_sriov_ras_cper_dump *cper_dump = NULL;
+ struct cper_hdr *entry = NULL;
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ uint32_t checksum, used_size, i;
+ int ret = 0;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ return 0;
+
+ cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
+ if (!cper_dump)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0))
+ goto out;
+
+ *more = cper_dump->more;
+
+ if (cper_dump->wptr < adev->virt.ras.cper_rptr) {
+ dev_warn(
+ adev->dev,
+ "guest specified rptr that was too high! guest rptr: 0x%llx, host rptr: 0x%llx\n",
+ adev->virt.ras.cper_rptr, cper_dump->wptr);
+
+ adev->virt.ras.cper_rptr = cper_dump->wptr;
+ goto out;
+ }
+
+ entry = (struct cper_hdr *)&cper_dump->buf[0];
+
+ for (i = 0; i < cper_dump->count; i++) {
+ amdgpu_cper_ring_write(ring, entry, entry->record_length);
+ entry = (struct cper_hdr *)((char *)entry +
+ entry->record_length);
+ }
+
+ if (cper_dump->overflow_count)
+ dev_warn(adev->dev,
+ "host reported CPER overflow of 0x%llx entries!\n",
+ cper_dump->overflow_count);
+
+ adev->virt.ras.cper_rptr = cper_dump->wptr;
+out:
+ kfree(cper_dump);
+
+ return ret;
+}
+
+static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int ret = 0;
+ uint32_t more = 0;
+
+ if (!amdgpu_sriov_ras_cper_en(adev))
+ return -EOPNOTSUPP;
+
+ do {
+ if (!virt->ops->req_ras_cper_dump(adev, virt->ras.cper_rptr))
+ ret = amdgpu_virt_write_cpers_to_ring(
+ adev, virt->fw_reserve.ras_telemetry, &more);
+ else
+ ret = 0;
+ } while (more);
+
+ return ret;
+}
+
+int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int ret = 0;
+
+ if ((__ratelimit(&virt->ras.ras_cper_dump_rs) || force_update) &&
+ down_read_trylock(&adev->reset_domain->sem)) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ ret = amdgpu_virt_req_ras_cper_dump_internal(adev);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ up_read(&adev->reset_domain->sem);
+ }
+
+ return ret;
+}
+
int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev)
{
unsigned long ue_count, ce_count;
@@ -1331,3 +1466,17 @@ int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev)
return 0;
}
+
+bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ enum amd_sriov_ras_telemetry_gpu_block sriov_block;
+
+ sriov_block = amdgpu_ras_block_to_sriov(adev, block);
+
+ if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT ||
+ !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block))
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 5381b8d596e6..df03dba67ab8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -96,6 +96,7 @@ struct amdgpu_virt_ops {
enum amdgpu_ras_block block);
bool (*rcvd_ras_intr)(struct amdgpu_device *adev);
int (*req_ras_err_count)(struct amdgpu_device *adev);
+ int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
};
/*
@@ -140,6 +141,7 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8),
AMDGIM_FEATURE_RAS_CAPS = (1 << 9),
AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10),
+ AMDGIM_FEATURE_RAS_CPER = (1 << 11),
};
enum AMDGIM_REG_ACCESS_FLAG {
@@ -242,6 +244,13 @@ struct amdgpu_virt_ras_err_handler_data {
int last_reserved;
};
+struct amdgpu_virt_ras {
+ struct ratelimit_state ras_error_cnt_rs;
+ struct ratelimit_state ras_cper_dump_rs;
+ struct mutex ras_telemetry_mutex;
+ uint64_t cper_rptr;
+};
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@@ -279,12 +288,12 @@ struct amdgpu_virt {
/* the ucode id to signal the autoload */
uint32_t autoload_ucode_id;
- struct mutex rlcg_reg_lock;
+ /* Spinlock to protect access to the RLCG register interface */
+ spinlock_t rlcg_reg_lock;
union amd_sriov_ras_caps ras_en_caps;
union amd_sriov_ras_caps ras_telemetry_en_caps;
-
- struct ratelimit_state ras_telemetry_rs;
+ struct amdgpu_virt_ras ras;
struct amd_sriov_ras_telemetry_error_count count_cache;
};
@@ -339,6 +348,9 @@ struct amdgpu_video_codec_info;
#define amdgpu_sriov_ras_telemetry_block_en(adev, sriov_blk) \
(amdgpu_sriov_ras_telemetry_en((adev)) && (adev)->virt.ras_telemetry_en_caps.all & BIT(sriov_blk))
+#define amdgpu_sriov_ras_cper_en(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CPER)
+
static inline bool is_virtual_machine(void)
{
#if defined(CONFIG_X86)
@@ -352,6 +364,8 @@ static inline bool is_virtual_machine(void)
#define amdgpu_sriov_is_pp_one_vf(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
+#define amdgpu_sriov_multi_vf_mode(adev) \
+ (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
#define amdgpu_sriov_is_debug(adev) \
((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
#define amdgpu_sriov_is_normal(adev) \
@@ -377,7 +391,7 @@ void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
-void amdgpu_detect_virtualization(struct amdgpu_device *adev);
+void amdgpu_virt_init(struct amdgpu_device *adev);
bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
@@ -405,5 +419,8 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev);
int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
struct ras_err_data *err_data);
+int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update);
int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
+bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 03308261f894..155bb9891a17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -188,8 +188,8 @@ static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL;
amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
- hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate;
+ hrtimer_setup(&amdgpu_crtc->vblank_timer, &amdgpu_vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
return ret;
}
@@ -627,7 +627,7 @@ static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block)
return drm_mode_config_helper_resume(adev_to_drm(ip_block->adev));
}
-static bool amdgpu_vkms_is_idle(void *handle)
+static bool amdgpu_vkms_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5c07777d3239..ce52b4d75e94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -754,6 +754,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool need_pipe_sync)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
@@ -761,8 +762,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool gds_switch_needed = ring->funcs->emit_gds_switch &&
job->gds_switch_needed;
bool vm_flush_needed = job->vm_needs_flush;
- struct dma_fence *fence = NULL;
+ bool cleaner_shader_needed = false;
bool pasid_mapping_needed = false;
+ struct dma_fence *fence = NULL;
unsigned int patch;
int r;
@@ -785,8 +787,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
+ cleaner_shader_needed = adev->gfx.enable_cleaner_shader &&
+ ring->funcs->emit_cleaner_shader && job->base.s_fence &&
+ &job->base.s_fence->scheduled == isolation->spearhead;
+
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
- !(job->enforce_isolation && !job->vmid))
+ !cleaner_shader_needed)
return 0;
amdgpu_ring_ib_begin(ring);
@@ -797,9 +803,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (need_pipe_sync)
amdgpu_ring_emit_pipeline_sync(ring);
- if (adev->gfx.enable_cleaner_shader &&
- ring->funcs->emit_cleaner_shader &&
- job->enforce_isolation)
+ if (cleaner_shader_needed)
ring->funcs->emit_cleaner_shader(ring);
if (vm_flush_needed) {
@@ -821,7 +825,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
job->oa_size);
}
- if (vm_flush_needed || pasid_mapping_needed) {
+ if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
r = amdgpu_fence_emit(ring, &fence, NULL, 0);
if (r)
return r;
@@ -843,6 +847,18 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
id->pasid_mapping = dma_fence_get(fence);
mutex_unlock(&id_mgr->lock);
}
+
+ /*
+ * Make sure that all other submissions wait for the cleaner shader to
+ * finish before we push them to the HW.
+ */
+ if (cleaner_shader_needed) {
+ trace_amdgpu_cleaner_shader(ring, fence);
+ mutex_lock(&adev->enforce_isolation_mutex);
+ dma_fence_put(isolation->spearhead);
+ isolation->spearhead = dma_fence_get(fence);
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ }
dma_fence_put(fence);
amdgpu_ring_patch_cond_exec(ring, patch);
@@ -2534,8 +2550,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->done);
- INIT_LIST_HEAD(&vm->pt_freed);
- INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
INIT_KFIFO(vm->faults);
r = amdgpu_vm_init_entities(adev, vm);
@@ -2674,20 +2688,6 @@ unreserve_bo:
return r;
}
-/**
- * amdgpu_vm_release_compute - release a compute vm
- * @adev: amdgpu_device pointer
- * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
- *
- * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
- * pasid from vm. Compute should stop use of vm after this call.
- */
-void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
-{
- amdgpu_vm_set_pasid(adev, vm, 0);
- vm->is_compute_context = false;
-}
-
static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
{
for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
@@ -2717,8 +2717,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
- flush_work(&vm->pt_free_work);
-
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
amdgpu_vm_set_pasid(adev, vm, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index a3e128e373bc..f3ad687125ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -374,10 +374,6 @@ struct amdgpu_vm {
/* BOs which are invalidated, has been updated in the PTs */
struct list_head done;
- /* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
- struct list_head pt_freed;
- struct work_struct pt_free_work;
-
/* contains the page directory */
struct amdgpu_vm_bo_base root;
struct dma_fence *last_update;
@@ -493,7 +489,6 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index b0bf21682115..30022123b0bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -547,27 +547,6 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
amdgpu_bo_unref(&entry->bo);
}
-void amdgpu_vm_pt_free_work(struct work_struct *work)
-{
- struct amdgpu_vm_bo_base *entry, *next;
- struct amdgpu_vm *vm;
- LIST_HEAD(pt_freed);
-
- vm = container_of(work, struct amdgpu_vm, pt_free_work);
-
- spin_lock(&vm->status_lock);
- list_splice_init(&vm->pt_freed, &pt_freed);
- spin_unlock(&vm->status_lock);
-
- /* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */
- amdgpu_bo_reserve(vm->root.bo, true);
-
- list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
- amdgpu_vm_pt_free(entry);
-
- amdgpu_bo_unreserve(vm->root.bo);
-}
-
/**
* amdgpu_vm_pt_free_list - free PD/PT levels
*
@@ -580,19 +559,15 @@ void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
struct amdgpu_vm_update_params *params)
{
struct amdgpu_vm_bo_base *entry, *next;
- struct amdgpu_vm *vm = params->vm;
bool unlocked = params->unlocked;
if (list_empty(&params->tlb_flush_waitlist))
return;
- if (unlocked) {
- spin_lock(&vm->status_lock);
- list_splice_init(&params->tlb_flush_waitlist, &vm->pt_freed);
- spin_unlock(&vm->status_lock);
- schedule_work(&vm->pt_free_work);
- return;
- }
+ /*
+ * unlocked unmap clear page table leaves, warning to free the page entry.
+ */
+ WARN_ON(unlocked);
list_for_each_entry_safe(entry, next, &params->tlb_flush_waitlist, vm_status)
amdgpu_vm_pt_free(entry);
@@ -900,7 +875,15 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
pe_start = ((cursor.pfn >> shift) & mask) * 8;
- entry_end = ((uint64_t)mask + 1) << shift;
+
+ if (cursor.level < AMDGPU_VM_PTB && params->unlocked)
+ /*
+ * MMU notifier callback unlocked unmap huge page, leave is PDE entry,
+ * only clear one entry. Next entry search again for PDE or PTE leave.
+ */
+ entry_end = 1ULL << shift;
+ else
+ entry_end = ((uint64_t)mask + 1) << shift;
entry_end += cursor.pfn & ~(entry_end - 1);
entry_end = min(entry_end, end);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index ff5e52025266..6da8994e0469 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -28,7 +28,6 @@
#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "amdgpu_res_cursor.h"
-#include "amdgpu_atomfirmware.h"
#include "atom.h"
#define AMDGPU_MAX_SG_SEGMENT_SIZE (2UL << 30)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 74b4349e345a..6029a799074d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -315,6 +315,7 @@ int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num)
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
xgmi_state_reg_val = xgmi_v6_4_get_link_status(adev, global_link_num);
break;
default:
@@ -818,28 +819,71 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
* num_hops[2:0] = number of hops
*/
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev)
+ struct amdgpu_device *peer_adev)
{
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
uint8_t num_hops_mask = 0x7;
int i;
+ if (!adev->gmc.xgmi.supported)
+ return 0;
+
for (i = 0 ; i < top->num_nodes; ++i)
if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
return top->nodes[i].num_hops & num_hops_mask;
- return -EINVAL;
+
+ dev_err(adev->dev, "Failed to get xgmi hops count for peer %d.\n",
+ peer_adev->gmc.xgmi.physical_node_id);
+
+ return 0;
}
-int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev)
+int amdgpu_xgmi_get_bandwidth(struct amdgpu_device *adev, struct amdgpu_device *peer_adev,
+ enum amdgpu_xgmi_bw_mode bw_mode, enum amdgpu_xgmi_bw_unit bw_unit,
+ uint32_t *min_bw, uint32_t *max_bw)
{
- struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
- int i;
+ bool peer_mode = bw_mode == AMDGPU_XGMI_BW_MODE_PER_PEER;
+ int unit_scale = bw_unit == AMDGPU_XGMI_BW_UNIT_MBYTES ? 1000 : 1;
+ int num_lanes = adev->gmc.xgmi.max_width;
+ int speed = adev->gmc.xgmi.max_speed;
+ int num_links = !peer_mode ? 1 : -1;
- for (i = 0 ; i < top->num_nodes; ++i)
- if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
- return top->nodes[i].num_links;
- return -EINVAL;
+ if (!(min_bw && max_bw))
+ return -EINVAL;
+
+ *min_bw = 0;
+ *max_bw = 0;
+
+ if (!adev->gmc.xgmi.supported)
+ return -ENODATA;
+
+ if (peer_mode && !peer_adev)
+ return -EINVAL;
+
+ if (peer_mode) {
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ for (i = 0 ; i < top->num_nodes; ++i) {
+ if (top->nodes[i].node_id != peer_adev->gmc.xgmi.node_id)
+ continue;
+
+ num_links = top->nodes[i].num_links;
+ break;
+ }
+ }
+
+ if (num_links == -1) {
+ dev_err(adev->dev, "Failed to get number of xgmi links for peer %d.\n",
+ peer_adev->gmc.xgmi.physical_node_id);
+ } else if (num_links) {
+ int per_link_bw = (speed * num_lanes * unit_scale)/BITS_PER_BYTE;
+
+ *min_bw = per_link_bw;
+ *max_bw = num_links * per_link_bw;
+ }
+
+ return 0;
}
bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev,
@@ -1123,11 +1167,13 @@ static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_ban
if (ext_error_code != 0 && ext_error_code != 9)
count = 0ULL;
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, count);
break;
case ACA_SMU_TYPE_CE:
count = ext_error_code == 6 ? count : 0ULL;
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, count);
+ bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, count);
break;
default:
return -EINVAL;
@@ -1162,6 +1208,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_comm
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL,
&xgmi_v6_4_0_aca_info, NULL);
if (r)
@@ -1221,6 +1268,7 @@ static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++)
pcs_clear_status(adev,
xgmi3x16_pcs_err_status_reg_v6_4[i]);
@@ -1255,6 +1303,7 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
xgmi_v6_4_0_reset_ras_error_count(adev);
break;
default:
@@ -1280,7 +1329,9 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
IP_VERSION(6, 1, 0) ||
amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
- IP_VERSION(6, 4, 0)) {
+ IP_VERSION(6, 4, 0) ||
+ amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
+ IP_VERSION(6, 4, 1)) {
pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0];
field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields);
} else {
@@ -1388,6 +1439,7 @@ static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
/* check xgmi3x16 pcs error */
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++) {
data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_v6_4[i]);
@@ -1484,6 +1536,7 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
{
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
xgmi_v6_4_0_query_ras_error_count(adev, ras_error_status);
break;
default:
@@ -1671,3 +1724,34 @@ int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
return r;
}
+
+bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
+ struct amdgpu_device *bo_adev)
+{
+ return (amdgpu_use_xgmi_p2p && adev != bo_adev &&
+ adev->gmc.xgmi.hive_id &&
+ adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
+}
+
+void amdgpu_xgmi_early_init(struct amdgpu_device *adev)
+{
+ if (!adev->gmc.xgmi.supported)
+ return;
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ adev->gmc.xgmi.max_speed = XGMI_SPEED_25GT;
+ adev->gmc.xgmi.max_width = 16;
+ break;
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
+ adev->gmc.xgmi.max_speed = XGMI_SPEED_32GT;
+ adev->gmc.xgmi.max_width = 16;
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index d1282b4c6348..32dabba4062f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -23,9 +23,14 @@
#define __AMDGPU_XGMI_H__
#include <drm/task_barrier.h>
-#include "amdgpu_psp.h"
#include "amdgpu_ras.h"
+enum amdgpu_xgmi_link_speed {
+ XGMI_SPEED_16GT = 16,
+ XGMI_SPEED_25GT = 25,
+ XGMI_SPEED_32GT = 32
+};
+
struct amdgpu_hive_info {
struct kobject kobj;
uint64_t hive_id;
@@ -55,29 +60,63 @@ struct amdgpu_pcs_ras_field {
uint32_t pcs_err_shift;
};
-extern struct amdgpu_xgmi_ras xgmi_ras;
+/**
+ * Bandwidth range reporting comes in two modes.
+ *
+ * PER_LINK - range for any xgmi link
+ * PER_PEER - range of max of single xgmi link to max of multiple links based on source peer
+ */
+enum amdgpu_xgmi_bw_mode {
+ AMDGPU_XGMI_BW_MODE_PER_LINK = 0,
+ AMDGPU_XGMI_BW_MODE_PER_PEER
+};
+
+enum amdgpu_xgmi_bw_unit {
+ AMDGPU_XGMI_BW_UNIT_GBYTES = 0,
+ AMDGPU_XGMI_BW_UNIT_MBYTES
+};
+
+struct amdgpu_xgmi_ras {
+ struct amdgpu_ras_block_object ras_block;
+};
+extern struct amdgpu_xgmi_ras xgmi_ras;
+
+struct amdgpu_xgmi {
+ /* from psp */
+ u64 node_id;
+ u64 hive_id;
+ /* fixed per family */
+ u64 node_segment_size;
+ /* physical node (0-3) */
+ unsigned physical_node_id;
+ /* number of nodes (0-4) */
+ unsigned num_physical_nodes;
+ /* gpu list in the same hive */
+ struct list_head head;
+ bool supported;
+ struct ras_common_if *ras_if;
+ bool connected_to_cpu;
+ struct amdgpu_xgmi_ras *ras;
+ enum amdgpu_xgmi_link_speed max_speed;
+ uint8_t max_width;
+};
+
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
-int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev);
-int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev);
+int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev);
+int amdgpu_xgmi_get_bandwidth(struct amdgpu_device *adev, struct amdgpu_device *peer_adev,
+ enum amdgpu_xgmi_bw_mode bw_mode, enum amdgpu_xgmi_bw_unit bw_unit,
+ uint32_t *min_bw, uint32_t *max_bw);
bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr);
-static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
- struct amdgpu_device *bo_adev)
-{
- return (amdgpu_use_xgmi_p2p &&
- adev != bo_adev &&
- adev->gmc.xgmi.hive_id &&
- adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
-}
+bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
+ struct amdgpu_device *bo_adev);
int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev);
@@ -87,4 +126,7 @@ int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev,
int global_link_num);
+void amdgpu_xgmi_early_init(struct amdgpu_device *adev);
+uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index b4f9c2f4e92c..d6ac2652f0ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -97,11 +97,12 @@ union amd_sriov_msg_feature_flags {
uint32_t pp_one_vf_mode : 1;
uint32_t reg_indirect_acc : 1;
uint32_t av1_support : 1;
- uint32_t vcn_rb_decouple : 1;
+ uint32_t vcn_rb_decouple : 1;
uint32_t mes_info_dump_enable : 1;
uint32_t ras_caps : 1;
uint32_t ras_telemetry : 1;
- uint32_t reserved : 21;
+ uint32_t ras_cper : 1;
+ uint32_t reserved : 20;
} flags;
uint32_t all;
};
@@ -328,21 +329,25 @@ enum amd_sriov_mailbox_request_message {
MB_REQ_MSG_READY_TO_RESET = 201,
MB_REQ_MSG_RAS_POISON = 202,
MB_REQ_RAS_ERROR_COUNT = 203,
+ MB_REQ_RAS_CPER_DUMP = 204,
};
/* mailbox message send from host to guest */
enum amd_sriov_mailbox_response_message {
- MB_RES_MSG_CLR_MSG_BUF = 0,
- MB_RES_MSG_READY_TO_ACCESS_GPU = 1,
- MB_RES_MSG_FLR_NOTIFICATION,
- MB_RES_MSG_FLR_NOTIFICATION_COMPLETION,
- MB_RES_MSG_SUCCESS,
- MB_RES_MSG_FAIL,
- MB_RES_MSG_QUERY_ALIVE,
- MB_RES_MSG_GPU_INIT_DATA_READY,
- MB_RES_MSG_RAS_ERROR_COUNT_READY = 11,
-
- MB_RES_MSG_TEXT_MESSAGE = 255
+ MB_RES_MSG_CLR_MSG_BUF = 0,
+ MB_RES_MSG_READY_TO_ACCESS_GPU = 1,
+ MB_RES_MSG_FLR_NOTIFICATION = 2,
+ MB_RES_MSG_FLR_NOTIFICATION_COMPLETION = 3,
+ MB_RES_MSG_SUCCESS = 4,
+ MB_RES_MSG_FAIL = 5,
+ MB_RES_MSG_QUERY_ALIVE = 6,
+ MB_RES_MSG_GPU_INIT_DATA_READY = 7,
+ MB_RES_MSG_RAS_POISON_READY = 8,
+ MB_RES_MSG_PF_SOFT_FLR_NOTIFICATION = 9,
+ MB_RES_MSG_GPU_RMA = 10,
+ MB_RES_MSG_RAS_ERROR_COUNT_READY = 11,
+ MB_REQ_RAS_CPER_DUMP_READY = 14,
+ MB_RES_MSG_TEXT_MESSAGE = 255
};
enum amd_sriov_ras_telemetry_gpu_block {
@@ -386,11 +391,20 @@ struct amd_sriov_ras_telemetry_error_count {
} block[RAS_TELEMETRY_GPU_BLOCK_COUNT];
};
+struct amd_sriov_ras_cper_dump {
+ uint32_t more;
+ uint64_t overflow_count;
+ uint64_t count;
+ uint64_t wptr;
+ uint32_t buf[];
+};
+
struct amdsriov_ras_telemetry {
struct amd_sriov_ras_telemetry_header header;
union {
struct amd_sriov_ras_telemetry_error_count error_count;
+ struct amd_sriov_ras_cper_dump cper_dump;
} body;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index e157d6d857b6..3c07517be09a 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -77,7 +77,8 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
- if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+ if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
+ (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
return;
inst_mask = 1 << inst_idx;
@@ -559,8 +560,11 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
adev->gmc.num_mem_partitions == 4) &&
(num_xccs_per_xcp >= 2);
case AMDGPU_CPX_PARTITION_MODE:
+ /* (num_xcc > 1) because 1 XCC is considered SPX, not CPX.
+ * (num_xcc % adev->gmc.num_mem_partitions) == 0 because
+ * num_compute_partitions can't be less than num_mem_partitions
+ */
return ((num_xcc > 1) &&
- (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
(num_xcc % adev->gmc.num_mem_partitions) == 0);
default:
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 622634c08c7b..521b9faab180 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -430,7 +430,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
}
int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.h b/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
index f59d85eaddf0..3e24acf8133f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
@@ -32,7 +32,7 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
const struct drm_display_mode *mode);
int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector);
void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 08d6787893b3..9cd63b4177bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -2148,7 +2148,7 @@ static int cik_common_resume(struct amdgpu_ip_block *ip_block)
return cik_common_hw_init(ip_block);
}
-static bool cik_common_is_idle(void *handle)
+static bool cik_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 444563486769..41f4705bdbbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -345,9 +345,9 @@ static int cik_ih_resume(struct amdgpu_ip_block *ip_block)
return cik_ih_hw_init(ip_block);
}
-static bool cik_ih_is_idle(void *handle)
+static bool cik_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index d9bd8f3f17e2..508cea965983 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1025,9 +1025,9 @@ static int cik_sdma_resume(struct amdgpu_ip_block *ip_block)
return cik_sdma_hw_init(ip_block);
}
-static bool cik_sdma_is_idle(void *handle)
+static bool cik_sdma_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 06088d52d81c..279288365940 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -51,6 +51,15 @@
#define HPD4_REGISTER_OFFSET (0x1813 - 0x1807)
#define HPD5_REGISTER_OFFSET (0x1816 - 0x1807)
+/* audio endpt instance offsets */
+#define AUD0_REGISTER_OFFSET (0x1780 - 0x1780)
+#define AUD1_REGISTER_OFFSET (0x1786 - 0x1780)
+#define AUD2_REGISTER_OFFSET (0x178c - 0x1780)
+#define AUD3_REGISTER_OFFSET (0x1792 - 0x1780)
+#define AUD4_REGISTER_OFFSET (0x1798 - 0x1780)
+#define AUD5_REGISTER_OFFSET (0x179d - 0x1780)
+#define AUD6_REGISTER_OFFSET (0x17a4 - 0x1780)
+
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 82586b76aeda..2f891fb846d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -341,9 +341,9 @@ static int cz_ih_resume(struct amdgpu_ip_block *ip_block)
return cz_ih_hw_init(ip_block);
}
-static bool cz_ih_is_idle(void *handle)
+static bool cz_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index c5e3d2251b18..df401aded662 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2970,7 +2970,7 @@ static int dce_v10_0_resume(struct amdgpu_ip_block *ip_block)
return amdgpu_display_resume_helper(adev);
}
-static bool dce_v10_0_is_idle(void *handle)
+static bool dce_v10_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index ea42a4472bf6..80f01c3989cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3108,7 +3108,7 @@ static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block)
return amdgpu_display_resume_helper(adev);
}
-static bool dce_v11_0_is_idle(void *handle)
+static bool dce_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 915804a6a1d7..255c70959343 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -40,18 +40,25 @@
#include "amdgpu_connectors.h"
#include "amdgpu_display.h"
+#include "dce_v6_0.h"
+#include "sid.h"
+
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
+
#include "gca/gfx_6_0_d.h"
#include "gca/gfx_6_0_sh_mask.h"
+#include "gca/gfx_7_2_enum.h"
+
#include "gmc/gmc_6_0_d.h"
#include "gmc/gmc_6_0_sh_mask.h"
+
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
-#include "gca/gfx_7_2_enum.h"
-#include "dce_v6_0.h"
+
#include "si_enums.h"
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
@@ -59,31 +66,31 @@ static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static const u32 crtc_offsets[6] =
{
- SI_CRTC0_REGISTER_OFFSET,
- SI_CRTC1_REGISTER_OFFSET,
- SI_CRTC2_REGISTER_OFFSET,
- SI_CRTC3_REGISTER_OFFSET,
- SI_CRTC4_REGISTER_OFFSET,
- SI_CRTC5_REGISTER_OFFSET
+ CRTC0_REGISTER_OFFSET,
+ CRTC1_REGISTER_OFFSET,
+ CRTC2_REGISTER_OFFSET,
+ CRTC3_REGISTER_OFFSET,
+ CRTC4_REGISTER_OFFSET,
+ CRTC5_REGISTER_OFFSET
};
static const u32 hpd_offsets[] =
{
- mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
+ HPD0_REGISTER_OFFSET,
+ HPD1_REGISTER_OFFSET,
+ HPD2_REGISTER_OFFSET,
+ HPD3_REGISTER_OFFSET,
+ HPD4_REGISTER_OFFSET,
+ HPD5_REGISTER_OFFSET
};
static const uint32_t dig_offsets[] = {
- SI_CRTC0_REGISTER_OFFSET,
- SI_CRTC1_REGISTER_OFFSET,
- SI_CRTC2_REGISTER_OFFSET,
- SI_CRTC3_REGISTER_OFFSET,
- SI_CRTC4_REGISTER_OFFSET,
- SI_CRTC5_REGISTER_OFFSET,
+ CRTC0_REGISTER_OFFSET,
+ CRTC1_REGISTER_OFFSET,
+ CRTC2_REGISTER_OFFSET,
+ CRTC3_REGISTER_OFFSET,
+ CRTC4_REGISTER_OFFSET,
+ CRTC5_REGISTER_OFFSET,
(0x13830 - 0x7030) >> 2,
};
@@ -206,9 +213,9 @@ static void dce_v6_0_page_flip(struct amdgpu_device *adev,
/* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
+ /* writing to the low address triggers the update */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)crtc_base);
-
/* post the write */
RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
}
@@ -218,11 +225,11 @@ static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
{
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
+
*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
return 0;
-
}
/**
@@ -242,7 +249,8 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
if (hpd >= adev->mode_info.num_hpd)
return connected;
- if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+ if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
+ DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
connected = true;
return connected;
@@ -370,13 +378,41 @@ static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
return mmDC_GPIO_HPD_A;
}
+static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[6];
+ u32 i, j, tmp;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
+ crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
+ }
+
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+
+ return true;
+}
+
static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
if (!render)
WREG32(mmVGA_RENDER_CONTROL,
RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
-
}
static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
@@ -419,7 +455,6 @@ void dce_v6_0_disable_dce(struct amdgpu_device *adev)
static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
{
-
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -895,8 +930,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
wm_high.dram_channels = dram_channels;
wm_high.num_heads = num_heads;
- if (adev->pm.dpm_enabled) {
/* watermark for low clocks */
+ if (adev->pm.dpm_enabled) {
wm_low.yclk =
amdgpu_dpm_get_mclk(adev, true) * 10;
wm_low.sclk =
@@ -1006,6 +1041,20 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
}
/* watermark setup */
+/**
+ * dce_v6_0_line_buffer_adjust - Set up the line buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @mode: the current display mode on the selected display
+ * controller
+ * @other_mode: the display mode of another display controller
+ * that may be sharing the line buffer
+ *
+ * Setup up the line buffer allocation for
+ * the selected display controller (CIK).
+ * Returns the line buffer size in pixels.
+ */
static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
struct amdgpu_crtc *amdgpu_crtc,
struct drm_display_mode *mode,
@@ -1347,13 +1396,13 @@ static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
static const u32 pin_offsets[7] =
{
- (0x1780 - 0x1780),
- (0x1786 - 0x1780),
- (0x178c - 0x1780),
- (0x1792 - 0x1780),
- (0x1798 - 0x1780),
- (0x179d - 0x1780),
- (0x17a4 - 0x1780),
+ AUD0_REGISTER_OFFSET,
+ AUD1_REGISTER_OFFSET,
+ AUD2_REGISTER_OFFSET,
+ AUD3_REGISTER_OFFSET,
+ AUD4_REGISTER_OFFSET,
+ AUD5_REGISTER_OFFSET,
+ AUD6_REGISTER_OFFSET,
};
static int dce_v6_0_audio_init(struct amdgpu_device *adev)
@@ -1386,6 +1435,8 @@ static int dce_v6_0_audio_init(struct amdgpu_device *adev)
adev->mode_info.audio.pin[i].connected = false;
adev->mode_info.audio.pin[i].offset = pin_offsets[i];
adev->mode_info.audio.pin[i].id = i;
+ /* disable audio. it will be set up later */
+ /* XXX remove once we switch to ip funcs */
dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
}
@@ -2865,14 +2916,35 @@ static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block)
return amdgpu_display_resume_helper(adev);
}
-static bool dce_v6_0_is_idle(void *handle)
+static bool dce_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
static int dce_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
+ u32 srbm_soft_reset = 0, tmp;
+ struct amdgpu_device *adev = ip_block->adev;
+
+ if (dce_v6_0_is_display_hung(adev))
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ }
return 0;
}
@@ -2889,22 +2961,22 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
switch (crtc) {
case 0:
- reg_block = SI_CRTC0_REGISTER_OFFSET;
+ reg_block = CRTC0_REGISTER_OFFSET;
break;
case 1:
- reg_block = SI_CRTC1_REGISTER_OFFSET;
+ reg_block = CRTC1_REGISTER_OFFSET;
break;
case 2:
- reg_block = SI_CRTC2_REGISTER_OFFSET;
+ reg_block = CRTC2_REGISTER_OFFSET;
break;
case 3:
- reg_block = SI_CRTC3_REGISTER_OFFSET;
+ reg_block = CRTC3_REGISTER_OFFSET;
break;
case 4:
- reg_block = SI_CRTC4_REGISTER_OFFSET;
+ reg_block = CRTC4_REGISTER_OFFSET;
break;
case 5:
- reg_block = SI_CRTC5_REGISTER_OFFSET;
+ reg_block = CRTC5_REGISTER_OFFSET;
break;
default:
DRM_DEBUG("invalid crtc %d\n", crtc);
@@ -3148,7 +3220,6 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
}
return 0;
-
}
static int dce_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
@@ -3281,8 +3352,7 @@ static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
}
-static void
-dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
+static void dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
@@ -3294,8 +3364,7 @@ static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
}
-static void
-dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
+static void dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
{
}
@@ -3366,7 +3435,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
amdgpu_encoder->devices |= supported_device;
return;
}
-
}
/* add a new one */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index f2edc0fece5b..07358546581f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1395,13 +1395,13 @@ static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
}
static const u32 pin_offsets[7] = {
- (0x1780 - 0x1780),
- (0x1786 - 0x1780),
- (0x178c - 0x1780),
- (0x1792 - 0x1780),
- (0x1798 - 0x1780),
- (0x179d - 0x1780),
- (0x17a4 - 0x1780),
+ AUD0_REGISTER_OFFSET,
+ AUD1_REGISTER_OFFSET,
+ AUD2_REGISTER_OFFSET,
+ AUD3_REGISTER_OFFSET,
+ AUD4_REGISTER_OFFSET,
+ AUD5_REGISTER_OFFSET,
+ AUD6_REGISTER_OFFSET,
};
static int dce_v8_0_audio_init(struct amdgpu_device *adev)
@@ -2887,7 +2887,7 @@ static int dce_v8_0_resume(struct amdgpu_ip_block *ip_block)
return amdgpu_display_resume_helper(adev);
}
-static bool dce_v8_0_is_idle(void *handle)
+static bool dce_v8_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 5ba263fe5512..6d514efb0a6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -40,7 +40,6 @@
#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
#include "soc15.h"
-#include "soc15d.h"
#include "soc15_common.h"
#include "clearstate_gfx10.h"
#include "v10_structs.h"
@@ -3790,12 +3789,65 @@ static void gfx10_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
gfx_v10_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
}
+static void gfx_v10_0_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
+ uint32_t xcc_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ unsigned i;
+ uint32_t tmp;
+
+ /* enter save mode */
+ amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+ nv_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+
+ if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, mmSPI_COMPUTE_QUEUE_RESET, 0x1);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ dev_err(adev->dev, "fail to wait on hqd deactive\n");
+ } else if (queue_type == AMDGPU_RING_TYPE_GFX) {
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX,
+ (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (pipe_id == 0)
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id);
+ else
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id);
+ WREG32_SOC15(GC, 0, mmCP_VMID_RESET, tmp);
+
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n");
+ } else {
+ dev_err(adev->dev, "reset queue_type(%d) not supported\n", queue_type);
+ }
+
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ /* exit safe mode */
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+}
+
static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
.kiq_set_resources = gfx10_kiq_set_resources,
.kiq_map_queues = gfx10_kiq_map_queues,
.kiq_unmap_queues = gfx10_kiq_unmap_queues,
.kiq_query_status = gfx10_kiq_query_status,
.kiq_invalidate_tlbs = gfx10_kiq_invalidate_tlbs,
+ .kiq_reset_hw_queue = gfx_v10_0_kiq_reset_hw_queue,
.set_resources_size = 8,
.map_queues_size = 7,
.unmap_queues_size = 6,
@@ -4701,6 +4753,8 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 1):
@@ -4739,6 +4793,22 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
break;
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ adev->gfx.cleaner_shader_ptr = gfx_10_1_10_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_1_10_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 101 &&
+ adev->gfx.pfp_fw_version >= 158 &&
+ adev->gfx.mec_fw_version >= 152) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 4):
@@ -7467,6 +7537,8 @@ static int gfx_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
+
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
@@ -7511,9 +7583,9 @@ static int gfx_v10_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v10_0_hw_init(ip_block);
}
-static bool gfx_v10_0_is_idle(void *handle)
+static bool gfx_v10_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
@@ -8431,9 +8503,9 @@ static int gfx_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gfx_v10_0_get_clockgating_state(void *handle, u64 *flags)
+static void gfx_v10_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
/* AMD_CG_SUPPORT_GFX_FGCG */
@@ -9748,6 +9820,20 @@ static void gfx_v10_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
}
+static void gfx_v10_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+ amdgpu_gfx_profile_ring_begin_use(ring);
+
+ amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
+}
+
+static void gfx_v10_0_ring_end_use(struct amdgpu_ring *ring)
+{
+ amdgpu_gfx_profile_ring_end_use(ring);
+
+ amdgpu_gfx_enforce_isolation_ring_end_use(ring);
+}
+
static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
.name = "gfx_v10_0",
.early_init = gfx_v10_0_early_init,
@@ -9823,8 +9909,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kgq,
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v10_0_ring_begin_use,
+ .end_use = gfx_v10_0_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -9864,8 +9950,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kcq,
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v10_0_ring_begin_use,
+ .end_use = gfx_v10_0_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
index 663c2572d440..5255378af53c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
@@ -21,6 +21,41 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+/* Define the cleaner shader gfx_10_1_10 */
+static const u32 gfx_10_1_10_cleaner_shader_hex[] = {
+ 0xb0804004, 0xbf8a0000,
+ 0xbf068100, 0xbf840023,
+ 0xbe8203b8, 0xbefc0380,
+ 0x7e008480, 0x7e028480,
+ 0x7e048480, 0x7e068480,
+ 0x7e088480, 0x7e0a8480,
+ 0x7e0c8480, 0x7e0e8480,
+ 0xbefc0302, 0x80828802,
+ 0xbf84fff5, 0xbe8203ff,
+ 0x80000000, 0x87020102,
+ 0xbf840012, 0xbefe03c1,
+ 0xbeff03c1, 0xd7650001,
+ 0x0001007f, 0xd7660001,
+ 0x0002027e, 0x16020288,
+ 0xbe8203bf, 0xbefc03c1,
+ 0xd9382000, 0x00020201,
+ 0xd9386040, 0x00040401,
+ 0xd70f6a01, 0x000202ff,
+ 0x00000400, 0x80828102,
+ 0xbf84fff7, 0xbefc03ff,
+ 0x00000068, 0xbe803080,
+ 0xbe813080, 0xbe823080,
+ 0xbe833080, 0x80fc847c,
+ 0xbf84fffa, 0xbeea0480,
+ 0xbeec0480, 0xbeee0480,
+ 0xbef00480, 0xbef20480,
+ 0xbef40480, 0xbef60480,
+ 0xbef80480, 0xbefa0480,
+ 0xbf810000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+};
+
/* Define the cleaner shader gfx_10_3_0 */
static const u32 gfx_10_3_0_cleaner_shader_hex[] = {
0xb0804004, 0xbf8a0000,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm
new file mode 100644
index 000000000000..9ba3359253c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 256 Dwords cleaner shader.
+
+// GFX10.1 : Clear SGPRs, VGPRs and LDS
+// Launch 32 waves per CU (16 per SIMD) as a workgroup (threadgroup) to fill every wave slot
+// Waves are "wave32" and have 64 VGPRs each, which uses all 1024 VGPRs per SIMD
+// Waves are launched in "CU" mode, and the workgroup shares 64KB of LDS (half of the WGP's LDS)
+// It takes 2 workgroups to use all of LDS: one on each CU of the WGP
+// Each wave clears SGPRs 0 - 107
+// Each wave clears VGPRs 0 - 63
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+
+
+shader main
+ asic(GFX10.1)
+ type(CS)
+ wave_size(32)
+// Note: original source code from SQ team
+
+//
+// Create 32 waves in a threadgroup (CS waves)
+// Each allocates 64 VGPRs
+// The workgroup allocates all of LDS (64kbytes)
+//
+// Takes about 2500 clocks to run.
+// (theorhetical fastest = 1024clks vgpr + 640lds = 1660 clks)
+//
+ S_BARRIER
+ s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_0
+ s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s0 == 1)
+
+ s_mov_b32 s2, 0x00000038 // Loop 64/8=8 times (loop unrolled for performance)
+ s_mov_b32 m0, 0
+ //
+ // CLEAR VGPRs
+ //
+label_0005:
+ v_movreld_b32 v0, 0
+ v_movreld_b32 v1, 0
+ v_movreld_b32 v2, 0
+ v_movreld_b32 v3, 0
+ v_movreld_b32 v4, 0
+ v_movreld_b32 v5, 0
+ v_movreld_b32 v6, 0
+ v_movreld_b32 v7, 0
+ s_mov_b32 m0, s2
+ s_sub_u32 s2, s2, 8
+ s_cbranch_scc0 label_0005
+ //
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iterations
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+
+ //
+ // CLEAR SGPRs
+ //
+label_0023:
+ s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance)
+label_sgpr_loop:
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc
+ s_mov_b64 vcc, 0 //clear vcc
+ //s_setreg_imm32_b32 hw_reg_shader_flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ //s_setreg_imm32_b32 hw_reg_shader_flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+
+ s_endpgm
+
+end
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 56c06b72a70a..d8772cd6db63 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -29,7 +29,6 @@
#include "amdgpu_gfx.h"
#include "amdgpu_psp.h"
#include "amdgpu_smu.h"
-#include "amdgpu_atomfirmware.h"
#include "imu_v11_0.h"
#include "soc21.h"
#include "nvd.h"
@@ -42,7 +41,6 @@
#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
#include "soc15.h"
-#include "soc15d.h"
#include "clearstate_gfx11.h"
#include "v11_structs.h"
#include "gfx_v11_0.h"
@@ -64,6 +62,23 @@
#define regPC_CONFIG_CNTL_1 0x194d
#define regPC_CONFIG_CNTL_1_BASE_IDX 1
+#define regCP_GFX_MQD_CONTROL_DEFAULT 0x00000100
+#define regCP_GFX_HQD_VMID_DEFAULT 0x00000000
+#define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000
+#define regCP_GFX_HQD_QUANTUM_DEFAULT 0x00000a01
+#define regCP_GFX_HQD_CNTL_DEFAULT 0x00a00000
+#define regCP_RB_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_GFX_HQD_RPTR_DEFAULT 0x00000000
+
+#define regCP_HQD_EOP_CONTROL_DEFAULT 0x00000006
+#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_MQD_CONTROL_DEFAULT 0x00000100
+#define regCP_HQD_PQ_CONTROL_DEFAULT 0x00308509
+#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_HQD_PQ_RPTR_DEFAULT 0x00000000
+#define regCP_HQD_PERSISTENT_STATE_DEFAULT 0x0be05501
+#define regCP_HQD_IB_CONTROL_DEFAULT 0x00300000
+
MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
@@ -98,6 +113,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_2_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_pfp.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_me.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_rlc.bin");
static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
@@ -1087,6 +1106,7 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1552,6 +1572,8 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
@@ -1568,6 +1590,7 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
@@ -1603,6 +1626,20 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.mec_fw_version >= 26 &&
+ adev->mes.fw_version[0] >= 114) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -2926,7 +2963,8 @@ static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
IP_VERSION(11, 0, 4) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 1) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 2))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 2) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 3))
bootload_status = RREG32_SOC15(GC, 0,
regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
else
@@ -3958,7 +3996,7 @@ static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev,
if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH)
priority = 1;
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
+ tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority);
mqd->cp_gfx_hqd_queue_priority = tmp;
}
@@ -3980,14 +4018,14 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set up mqd control */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
+ tmp = regCP_GFX_MQD_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
mqd->cp_gfx_mqd_control = tmp;
/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
+ tmp = regCP_GFX_HQD_VMID_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
mqd->cp_gfx_hqd_vmid = 0;
@@ -3995,7 +4033,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
gfx_v11_0_gfx_mqd_set_priority(adev, mqd, prop);
/* set up time quantum */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
+ tmp = regCP_GFX_HQD_QUANTUM_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
mqd->cp_gfx_hqd_quantum = tmp;
@@ -4017,7 +4055,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
+ tmp = regCP_GFX_HQD_CNTL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
#ifdef __BIG_ENDIAN
@@ -4026,7 +4064,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
- tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
+ tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_OFFSET, prop->doorbell_index);
@@ -4038,7 +4076,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_rb_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
+ mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT;
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
@@ -4124,14 +4162,14 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
+ tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
(order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -4160,7 +4198,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set MQD vmid to 0 */
- tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
+ tmp = regCP_MQD_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
mqd->cp_mqd_control = tmp;
@@ -4170,7 +4208,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
/* set up the HQD, this is similar to CP_RB0_CNTL */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
+ tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
@@ -4196,7 +4234,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = 0;
/* enable the doorbell if requested */
if (prop->use_doorbell) {
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_OFFSET, prop->doorbell_index);
@@ -4211,17 +4249,17 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
+ mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
/* set the vmid for the queue */
mqd->cp_hqd_vmid = 0;
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
+ tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
mqd->cp_hqd_persistent_state = tmp;
/* set MIN_IB_AVAIL_SIZE */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
+ tmp = regCP_HQD_IB_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
@@ -4734,6 +4772,8 @@ static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
+
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
@@ -4778,9 +4818,9 @@ static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v11_0_hw_init(ip_block);
}
-static bool gfx_v11_0_is_idle(void *handle)
+static bool gfx_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
@@ -5448,6 +5488,7 @@ static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
break;
default:
@@ -5485,6 +5526,7 @@ static int gfx_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
if (!enable)
amdgpu_gfx_off_ctrl(adev, false);
@@ -5518,6 +5560,7 @@ static int gfx_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
gfx_v11_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -5528,9 +5571,9 @@ static int gfx_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags)
+static void gfx_v11_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
/* AMD_CG_SUPPORT_GFX_MGCG */
@@ -6826,6 +6869,20 @@ static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
}
+static void gfx_v11_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+ amdgpu_gfx_profile_ring_begin_use(ring);
+
+ amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
+}
+
+static void gfx_v11_0_ring_end_use(struct amdgpu_ring *ring)
+{
+ amdgpu_gfx_profile_ring_end_use(ring);
+
+ amdgpu_gfx_enforce_isolation_ring_end_use(ring);
+}
+
static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
.name = "gfx_v11_0",
.early_init = gfx_v11_0_early_init,
@@ -6900,8 +6957,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kgq,
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v11_0_ring_begin_use,
+ .end_use = gfx_v11_0_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
@@ -6942,8 +6999,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kcq,
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v11_0_ring_begin_use,
+ .end_use = gfx_v11_0_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 2523221a2519..dceb5ad38862 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -29,7 +29,6 @@
#include "amdgpu_gfx.h"
#include "amdgpu_psp.h"
#include "amdgpu_smu.h"
-#include "amdgpu_atomfirmware.h"
#include "imu_v12_0.h"
#include "soc24.h"
#include "nvd.h"
@@ -40,7 +39,6 @@
#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
#include "soc15.h"
-#include "soc15d.h"
#include "clearstate_gfx12.h"
#include "v12_structs.h"
#include "gfx_v12_0.h"
@@ -52,6 +50,24 @@
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+#define regCP_GFX_MQD_CONTROL_DEFAULT 0x00000100
+#define regCP_GFX_HQD_VMID_DEFAULT 0x00000000
+#define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000
+#define regCP_GFX_HQD_QUANTUM_DEFAULT 0x00000a01
+#define regCP_GFX_HQD_CNTL_DEFAULT 0x00f00000
+#define regCP_RB_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_GFX_HQD_RPTR_DEFAULT 0x00000000
+
+#define regCP_HQD_EOP_CONTROL_DEFAULT 0x00000006
+#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_MQD_CONTROL_DEFAULT 0x00000100
+#define regCP_HQD_PQ_CONTROL_DEFAULT 0x00308509
+#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_HQD_PQ_RPTR_DEFAULT 0x00000000
+#define regCP_HQD_PERSISTENT_STATE_DEFAULT 0x0be05501
+#define regCP_HQD_IB_CONTROL_DEFAULT 0x00300000
+
+
MODULE_FIRMWARE("amdgpu/gc_12_0_0_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_0_mec.bin");
@@ -1331,6 +1347,8 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
@@ -2437,7 +2455,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
(void **)&adev->gfx.me.me_fw_data_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
- gfx_v12_0_pfp_fini(adev);
+ gfx_v12_0_me_fini(adev);
return r;
}
@@ -2619,7 +2637,6 @@ static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev)
u32 tmp;
u32 rb_bufsz;
u64 rb_addr, rptr_addr, wptr_gpu_addr;
- u32 i;
/* Set the write pointer delay */
WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
@@ -2674,12 +2691,6 @@ static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
gfx_v12_0_cp_gfx_start(adev);
-
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- ring->sched.ready = true;
- }
-
return 0;
}
@@ -2891,25 +2902,25 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set up mqd control */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
+ tmp = regCP_GFX_MQD_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
mqd->cp_gfx_mqd_control = tmp;
/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
+ tmp = regCP_GFX_HQD_VMID_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
mqd->cp_gfx_hqd_vmid = 0;
/* set up default queue priority level
* 0x0 = low priority, 0x1 = high priority */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
+ tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
mqd->cp_gfx_hqd_queue_priority = tmp;
/* set up time quantum */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
+ tmp = regCP_GFX_HQD_QUANTUM_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
mqd->cp_gfx_hqd_quantum = tmp;
@@ -2931,7 +2942,7 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
+ tmp = regCP_GFX_HQD_CNTL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
#ifdef __BIG_ENDIAN
@@ -2940,7 +2951,7 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
- tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
+ tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_OFFSET, prop->doorbell_index);
@@ -2952,7 +2963,7 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_rb_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
+ mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT;
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
@@ -3019,10 +3030,6 @@ static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
if (r)
goto done;
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- ring->sched.ready = true;
- }
done:
return r;
}
@@ -3047,14 +3054,14 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
+ tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
(order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1));
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -3083,7 +3090,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set MQD vmid to 0 */
- tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
+ tmp = regCP_MQD_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
mqd->cp_mqd_control = tmp;
@@ -3093,7 +3100,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
/* set up the HQD, this is similar to CP_RB0_CNTL */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
+ tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
@@ -3118,7 +3125,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = 0;
/* enable the doorbell if requested */
if (prop->use_doorbell) {
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_OFFSET, prop->doorbell_index);
@@ -3133,17 +3140,17 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
+ mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
/* set the vmid for the queue */
mqd->cp_hqd_vmid = 0;
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
+ tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
mqd->cp_hqd_persistent_state = tmp;
/* set MIN_IB_AVAIL_SIZE */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
+ tmp = regCP_HQD_IB_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
@@ -3648,6 +3655,8 @@ static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
uint32_t tmp;
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
+
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
@@ -3693,9 +3702,9 @@ static int gfx_v12_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v12_0_hw_init(ip_block);
}
-static bool gfx_v12_0_is_idle(void *handle)
+static bool gfx_v12_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
@@ -4147,9 +4156,9 @@ static int gfx_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gfx_v12_0_get_clockgating_state(void *handle, u64 *flags)
+static void gfx_v12_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
/* AMD_CG_SUPPORT_GFX_MGCG */
@@ -5280,6 +5289,20 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return amdgpu_ring_test_ring(ring);
}
+static void gfx_v12_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+ amdgpu_gfx_profile_ring_begin_use(ring);
+
+ amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
+}
+
+static void gfx_v12_0_ring_end_use(struct amdgpu_ring *ring)
+{
+ amdgpu_gfx_profile_ring_end_use(ring);
+
+ amdgpu_gfx_enforce_isolation_ring_end_use(ring);
+}
+
static const struct amd_ip_funcs gfx_v12_0_ip_funcs = {
.name = "gfx_v12_0",
.early_init = gfx_v12_0_early_init,
@@ -5345,8 +5368,8 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kgq,
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v12_0_ring_begin_use,
+ .end_use = gfx_v12_0_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
@@ -5384,8 +5407,8 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kcq,
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v12_0_ring_begin_use,
+ .end_use = gfx_v12_0_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index f26e2cdec07a..13fbee46417a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -28,19 +28,30 @@
#include "amdgpu_gfx.h"
#include "amdgpu_ucode.h"
#include "clearstate_si.h"
+#include "si.h"
+#include "sid.h"
+
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
+
#include "gca/gfx_6_0_d.h"
#include "gca/gfx_6_0_sh_mask.h"
+#include "gca/gfx_7_2_enum.h"
+
#include "gmc/gmc_6_0_d.h"
#include "gmc/gmc_6_0_sh_mask.h"
+
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
-#include "gca/gfx_7_2_enum.h"
+
#include "si_enums.h"
-#include "si.h"
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -3167,9 +3178,9 @@ static int gfx_v6_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v6_0_hw_init(ip_block);
}
-static bool gfx_v6_0_is_idle(void *handle)
+static bool gfx_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
return false;
@@ -3183,7 +3194,7 @@ static int gfx_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v6_0_is_idle(adev))
+ if (gfx_v6_0_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 84745b2453ab..8181bd0e4f18 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4515,9 +4515,9 @@ static int gfx_v7_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v7_0_hw_init(ip_block);
}
-static bool gfx_v7_0_is_idle(void *handle)
+static bool gfx_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6a025438f9d0..d116a2e2f469 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4851,9 +4851,9 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
return r;
}
-static bool gfx_v8_0_is_idle(void *handle)
+static bool gfx_v8_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
|| RREG32(mmGRBM_STATUS2) != 0x8)
@@ -4892,7 +4892,7 @@ static int gfx_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v8_0_is_idle(adev))
+ if (gfx_v8_0_is_idle(ip_block))
return 0;
udelay(1);
@@ -5452,9 +5452,9 @@ static int gfx_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gfx_v8_0_get_clockgating_state(void *handle, u64 *flags)
+static void gfx_v8_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 0dce4421418c..d345285ea885 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2637,7 +2637,10 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
u32 tmp;
int i;
- WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
+ if (!amdgpu_sriov_vf(adev) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) {
+ WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
+ }
gfx_v9_0_tiling_mode_table_init(adev);
@@ -4042,7 +4045,8 @@ static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) &&
+ !amdgpu_sriov_vf(adev))
gfx_v9_4_2_set_power_brake_sequence(adev);
return r;
@@ -4110,9 +4114,9 @@ static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block)
return gfx_v9_0_hw_init(ip_block);
}
-static bool gfx_v9_0_is_idle(void *handle)
+static bool gfx_v9_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
@@ -4127,7 +4131,7 @@ static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v9_0_is_idle(adev))
+ if (gfx_v9_0_is_idle(ip_block))
return 0;
udelay(1);
}
@@ -5241,7 +5245,7 @@ static int gfx_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 3, 0):
if (!enable)
- amdgpu_gfx_off_ctrl(adev, false);
+ amdgpu_gfx_off_ctrl_immediate(adev, false);
if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
@@ -5263,10 +5267,10 @@ static int gfx_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
if (enable)
- amdgpu_gfx_off_ctrl(adev, true);
+ amdgpu_gfx_off_ctrl_immediate(adev, true);
break;
case IP_VERSION(9, 2, 1):
- amdgpu_gfx_off_ctrl(adev, enable);
+ amdgpu_gfx_off_ctrl_immediate(adev, enable);
break;
default:
break;
@@ -5301,9 +5305,9 @@ static int gfx_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gfx_v9_0_get_clockgating_state(void *handle, u64 *flags)
+static void gfx_v9_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index f53b379d8971..6028afd81690 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -27,7 +27,6 @@
#include "amdgpu_gfx.h"
#include "soc15.h"
#include "soc15d.h"
-#include "amdgpu_atomfirmware.h"
#include "amdgpu_pm.h"
#include "gc/gc_9_4_1_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 2ba185875baa..736398b0d16d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -349,18 +349,7 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
GOLDEN_GB_ADDR_CONFIG);
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
- } else {
- /* Golden settings applied by driver for ASIC with rev_id 0 */
- if (adev->rev_id == 0) {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
- REDUCE_FIFO_DEPTH_BY_2, 2);
- } else {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
- SPARE, 0x1);
- }
- }
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
}
}
@@ -563,17 +552,6 @@ out:
return err;
}
-static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
-{
- return true;
-}
-
-static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
-{
- if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
-}
-
static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
const char *chip_name)
{
@@ -600,8 +578,6 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
- gfx_v9_4_3_check_if_need_gfxoff(adev);
-
out:
if (err)
amdgpu_ucode_release(&adev->gfx.mec_fw);
@@ -891,12 +867,14 @@ static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
switch (type) {
case ACA_SMU_TYPE_UE:
- ret = aca_error_cache_log_bank_error(handle, &info,
- ACA_ERROR_TYPE_UE, 1ULL);
+ bank->aca_err_type = ACA_BANK_ERR_UE_DE_DECODE(bank);
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1ULL);
break;
case ACA_SMU_TYPE_CE:
+ bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
ret = aca_error_cache_log_bank_error(handle, &info,
- ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0));
+ bank->aca_err_type,
+ ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
return -EINVAL;
@@ -937,28 +915,15 @@ static const struct aca_info gfx_v9_4_3_aca_info = {
static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
{
- u32 gb_addr_config;
-
adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
adev->gfx.ras = &gfx_v9_4_3_ras;
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(9, 4, 3):
- case IP_VERSION(9, 4, 4):
- case IP_VERSION(9, 5, 0):
- adev->gfx.config.max_hw_contexts = 8;
- adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
- adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
- adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
- adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
- gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG);
- break;
- default:
- BUG();
- break;
- }
-
- adev->gfx.config.gb_addr_config = gb_addr_config;
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ adev->gfx.config.gb_addr_config = GOLDEN_GB_ADDR_CONFIG;
adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
REG_GET_FIELD(
@@ -1369,10 +1334,8 @@ static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
{
/*
* Rlc save restore list is workable since v2_1.
- * And it's needed by gfxoff feature.
*/
- if (adev->gfx.rlc.is_rlc_v2_1)
- gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
+ gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
}
static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
@@ -1857,7 +1820,7 @@ static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
DOORBELL_SOURCE, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_HIT, 0);
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_multi_vf_mode(adev))
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_MODE, 1);
} else {
@@ -2415,9 +2378,9 @@ static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block)
return gfx_v9_4_3_hw_init(ip_block);
}
-static bool gfx_v9_4_3_is_idle(void *handle)
+static bool gfx_v9_4_3_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, num_xcc;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
@@ -2435,7 +2398,7 @@ static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v9_4_3_is_idle(adev))
+ if (gfx_v9_4_3_is_idle(ip_block))
return 0;
udelay(1);
}
@@ -2795,22 +2758,16 @@ static int gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(9, 4, 3):
- case IP_VERSION(9, 4, 4):
- for (i = 0; i < num_xcc; i++)
- gfx_v9_4_3_xcc_update_gfx_clock_gating(
- adev, state == AMD_CG_STATE_GATE, i);
- break;
- default:
- break;
- }
+ for (i = 0; i < num_xcc; i++)
+ gfx_v9_4_3_xcc_update_gfx_clock_gating(
+ adev, state == AMD_CG_STATE_GATE, i);
+
return 0;
}
-static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags)
+static void gfx_v9_4_3_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
@@ -4867,34 +4824,13 @@ static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
{
- /* init asci gds info */
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(9, 4, 3):
- case IP_VERSION(9, 4, 4):
- case IP_VERSION(9, 5, 0):
- /* 9.4.3 removed all the GDS internal memory,
- * only support GWS opcode in kernel, like barrier
- * semaphore.etc */
- adev->gds.gds_size = 0;
- break;
- default:
- adev->gds.gds_size = 0x10000;
- break;
- }
-
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(9, 4, 3):
- case IP_VERSION(9, 4, 4):
- case IP_VERSION(9, 5, 0):
- /* deprecated for 9.4.3, no usage at all */
- adev->gds.gds_compute_max_wave_id = 0;
- break;
- default:
- /* this really depends on the chip */
- adev->gds.gds_compute_max_wave_id = 0x7ff;
- break;
- }
+ /* 9.4.3 variants removed all the GDS internal memory,
+ * only support GWS opcode in kernel, like barrier
+ * semaphore.etc */
+ /* init asic gds info */
+ adev->gds.gds_size = 0;
+ adev->gds.gds_compute_max_wave_id = 0;
adev->gds.gws_size = 64;
adev->gds.oa_size = 16;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 0e3ddea7b8e0..a7bfc9f41d0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -92,12 +92,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
- /* Program the AGP BAR */
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
-
if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
+ /* Program the AGP BAR */
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
/* Program the system aperture low logical page number. */
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index 5470cef7e9bd..cb25f7f0dfc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -313,6 +313,16 @@ gfxhub_v1_2_xcc_disable_identity_aperture(struct amdgpu_device *adev,
}
}
+static inline bool
+gfxhub_v1_2_per_process_xnack_support(struct amdgpu_device *adev)
+{
+ /*
+ * TODO: Check if this function is really needed, so far only 9.4.3
+ * variants use GFXHUB 1.2
+ */
+ return !!adev->aid_mask;
+}
+
static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
uint32_t xcc_mask)
{
@@ -355,7 +365,7 @@ static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
PAGE_TABLE_BLOCK_SIZE,
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm.
- * On 9.4.2 and 9.4.3, XNACK can be enabled in
+ * On 9.4.3 variants, XNACK can be enabled in
* the SQ per-process.
* Retry faults need to be enabled for that to work.
*/
@@ -363,14 +373,8 @@ static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
!adev->gmc.noretry ||
- amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 2) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 5, 0));
+ gfxhub_v1_2_per_process_xnack_support(
+ adev));
WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index 17509f32f61a..deb95fab02df 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -505,42 +505,6 @@ static void gfxhub_v2_1_init(struct amdgpu_device *adev)
hub->vmhub_funcs = &gfxhub_v2_1_vmhub_funcs;
}
-static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
-{
- u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_CNTL);
- u32 max_region =
- REG_GET_FIELD(xgmi_lfb_cntl, GCMC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
- u32 max_num_physical_nodes = 0;
- u32 max_physical_node_id = 0;
-
- switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
- case IP_VERSION(4, 8, 0):
- max_num_physical_nodes = 4;
- max_physical_node_id = 3;
- break;
- default:
- return -EINVAL;
- }
-
- /* PF_MAX_REGION=0 means xgmi is disabled */
- if (max_region) {
- adev->gmc.xgmi.num_physical_nodes = max_region + 1;
- if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
- return -EINVAL;
-
- adev->gmc.xgmi.physical_node_id =
- REG_GET_FIELD(xgmi_lfb_cntl, GCMC_VM_XGMI_LFB_CNTL, PF_LFB_REGION);
- if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
- return -EINVAL;
-
- adev->gmc.xgmi.node_segment_size = REG_GET_FIELD(
- RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_SIZE),
- GCMC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
- }
-
- return 0;
-}
-
static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
{
int i;
@@ -696,7 +660,6 @@ const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
.gart_disable = gfxhub_v2_1_gart_disable,
.set_fault_enable_default = gfxhub_v2_1_set_fault_enable_default,
.init = gfxhub_v2_1_init,
- .get_xgmi_info = gfxhub_v2_1_get_xgmi_info,
.utcl2_harvest = gfxhub_v2_1_utcl2_harvest,
.mode2_save_regs = gfxhub_v2_1_save_regs,
.mode2_restore_regs = gfxhub_v2_1_restore_regs,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9bedca9a79c6..95d894a231fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -1076,7 +1076,7 @@ static int gmc_v10_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v10_0_is_idle(void *handle)
+static bool gmc_v10_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* MC is always ready in GMC v10.*/
return true;
@@ -1115,9 +1115,9 @@ static int gmc_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return athub_v2_0_set_clockgating(adev, state);
}
-static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
+static void gmc_v10_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 4))
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 72751ab4c766..ad099f136f84 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -579,6 +579,7 @@ static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
break;
case IP_VERSION(3, 3, 0):
case IP_VERSION(3, 3, 1):
+ case IP_VERSION(3, 3, 2):
adev->mmhub.funcs = &mmhub_v3_3_funcs;
break;
default:
@@ -596,6 +597,7 @@ static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
adev->gfxhub.funcs = &gfxhub_v11_5_0_funcs;
break;
default:
@@ -759,6 +761,7 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
/*
@@ -984,7 +987,7 @@ static int gmc_v11_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v11_0_is_idle(void *handle)
+static bool gmc_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* MC is always ready in GMC v11.*/
return true;
@@ -1009,9 +1012,9 @@ static int gmc_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return athub_v3_0_set_clockgating(adev, state);
}
-static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
+static void gmc_v11_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->get_clockgating(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index b749f1c3f6a9..05c026d0b0d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -501,9 +501,6 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
uint64_t *flags)
{
struct amdgpu_bo *bo = mapping->bo_va->base.bo;
- struct amdgpu_device *bo_adev;
- bool coherent, is_system;
-
*flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
@@ -519,25 +516,11 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
- if (!bo)
- return;
-
- if (bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
- AMDGPU_GEM_CREATE_UNCACHED))
- *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
-
- bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
- coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
- is_system = (bo->tbo.resource->mem_type == TTM_PL_TT) ||
- (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT);
-
if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
*flags |= AMDGPU_PTE_DCC;
- /* WA for HW bug */
- if (is_system || ((bo_adev != adev) && coherent))
- *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
-
+ if (bo && bo->flags & AMDGPU_GEM_CREATE_UNCACHED)
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
}
static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
@@ -984,7 +967,7 @@ static int gmc_v12_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v12_0_is_idle(void *handle)
+static bool gmc_v12_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* MC is always ready in GMC v11.*/
return true;
@@ -1009,9 +992,9 @@ static int gmc_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return athub_v4_1_0_set_clockgating(adev, state);
}
-static void gmc_v12_0_get_clockgating_state(void *handle, u64 *flags)
+static void gmc_v12_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->get_clockgating(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 2245dda92021..a992e79d9581 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -957,9 +957,9 @@ static int gmc_v6_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v6_0_is_idle(void *handle)
+static bool gmc_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -976,7 +976,7 @@ static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gmc_v6_0_is_idle(adev))
+ if (gmc_v6_0_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 9aac4b1101e3..83e39f16044a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1142,9 +1142,9 @@ static int gmc_v7_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v7_0_is_idle(void *handle)
+static bool gmc_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index d06585207c33..99ca08e9bdb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1263,9 +1263,9 @@ static int gmc_v8_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v8_0_is_idle(void *handle)
+static bool gmc_v8_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
@@ -1686,9 +1686,9 @@ static int gmc_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gmc_v8_0_get_clockgating_state(void *handle, u64 *flags)
+static void gmc_v8_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 291549765c38..783e0c3b86b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -411,6 +411,11 @@ static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
(0x001d43e0 + 0x00001800),
};
+static inline bool gmc_v9_0_is_multi_chiplet(struct amdgpu_device *adev)
+{
+ return !!adev->aid_mask;
+}
+
static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned int type,
@@ -647,9 +652,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr, entry->client_id,
soc15_ih_clientid_name[entry->client_id]);
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ if (gmc_v9_0_is_multi_chiplet(adev))
dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
@@ -798,9 +801,7 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ gmc_v9_0_is_multi_chiplet(adev))
return false;
return ((vmhub == AMDGPU_MMHUB0(0) ||
@@ -1278,9 +1279,8 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
/* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
* memory can use more efficient MTYPEs.
*/
- if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 5, 0))
+ if (!(adev->flags & AMD_IS_APU) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3))
return;
/* Only direct-mapped memory allows us to determine the NUMA node from
@@ -1498,13 +1498,13 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
break;
case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 5, 0):
adev->umc.max_ras_err_cnt_per_query =
UMC_V12_0_TOTAL_CHANNEL_NUM(adev) * UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
adev->umc.channel_inst_num = UMC_V12_0_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
- adev->umc.active_mask = adev->aid_mask;
adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
adev->umc.ras = &umc_v12_0_ras;
@@ -1524,6 +1524,7 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
adev->mmhub.funcs = &mmhub_v1_7_funcs;
break;
case IP_VERSION(1, 8, 0):
+ case IP_VERSION(1, 8, 1):
adev->mmhub.funcs = &mmhub_v1_8_funcs;
break;
default:
@@ -1556,9 +1557,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ if (gmc_v9_0_is_multi_chiplet(adev))
adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
else
adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1595,23 +1594,38 @@ static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev)
{
+ enum amdgpu_memory_partition mode;
+ uint32_t supp_modes;
+ int i;
+
adev->gmc.supported_nps_modes = 0;
if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
return;
- /*TODO: Check PSP version also which supports NPS switch. Otherwise keep
+ mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
+
+ /* Mode detected by hardware and supported modes available */
+ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && supp_modes) {
+ while ((i = ffs(supp_modes))) {
+ if (AMDGPU_ALL_NPS_MASK & BIT(i))
+ adev->gmc.supported_nps_modes |= BIT(i);
+ supp_modes &= supp_modes - 1;
+ }
+ } else {
+ /*TODO: Check PSP version also which supports NPS switch. Otherwise keep
* supported modes as 0.
*/
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(9, 4, 3):
- case IP_VERSION(9, 4, 4):
- adev->gmc.supported_nps_modes =
- BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
- break;
- default:
- break;
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ adev->gmc.supported_nps_modes =
+ BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ break;
+ default:
+ break;
+ }
}
}
@@ -1625,9 +1639,7 @@ static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ gmc_v9_0_is_multi_chiplet(adev))
adev->gmc.xgmi.supported = true;
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
@@ -1636,8 +1648,7 @@ static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
}
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
enum amdgpu_pkg_type pkg_type =
adev->smuio.funcs->get_pkg_type(adev);
/* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
@@ -2078,9 +2089,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
spin_lock_init(&adev->gmc.invalidate_lock);
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
+ if (gmc_v9_0_is_multi_chiplet(adev)) {
gmc_v9_4_3_init_vram_info(adev);
} else if (!adev->bios) {
if (adev->flags & AMD_IS_APU) {
@@ -2230,9 +2239,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_gmc_get_vbios_allocations(adev);
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
+ if (gmc_v9_0_is_multi_chiplet(adev)) {
r = gmc_v9_0_init_mem_ranges(adev);
if (r)
return r;
@@ -2261,9 +2268,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vm_manager.first_kfd_vmid =
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) ?
+ gmc_v9_0_is_multi_chiplet(adev)) ?
3 :
8;
@@ -2275,9 +2280,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ if (gmc_v9_0_is_multi_chiplet(adev))
amdgpu_gmc_sysfs_init(adev);
return 0;
@@ -2287,9 +2290,7 @@ static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ if (gmc_v9_0_is_multi_chiplet(adev))
amdgpu_gmc_sysfs_fini(adev);
amdgpu_gmc_ras_fini(adev);
@@ -2541,7 +2542,7 @@ static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-static bool gmc_v9_0_is_idle(void *handle)
+static bool gmc_v9_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* MC is always ready in GMC v9.*/
return true;
@@ -2571,9 +2572,9 @@ static int gmc_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
+static void gmc_v9_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->get_clockgating(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index 194026e9be33..f1dc13b3ab38 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "hdp_v4_0.h"
#include "amdgpu_ras.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index d3962d469088..43195c079748 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "hdp_v5_0.h"
#include "hdp/hdp_5_0_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index f52552c5fa27..fcb8dd2876bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "hdp_v5_2.h"
#include "hdp/hdp_5_2_1_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
index 6948fe9956ce..a88d25a06c29 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "hdp_v6_0.h"
#include "hdp/hdp_6_0_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
index 63820329f67e..49f7eb4fbd11 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "hdp_v7_0.h"
#include "hdp/hdp_7_0_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 8ac3d3282268..1317ede131b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -335,9 +335,9 @@ static int iceland_ih_resume(struct amdgpu_ip_block *ip_block)
return iceland_ih_hw_init(ip_block);
}
-static bool iceland_ih_is_idle(void *handle)
+static bool iceland_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index f8a485164437..eb4185dcbd1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -652,7 +652,7 @@ static int ih_v6_0_resume(struct amdgpu_ip_block *ip_block)
return ih_v6_0_hw_init(ip_block);
}
-static bool ih_v6_0_is_idle(void *handle)
+static bool ih_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
@@ -768,9 +768,9 @@ static int ih_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void ih_v6_0_get_clockgating_state(void *handle, u64 *flags)
+static void ih_v6_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
*flags |= AMD_CG_SUPPORT_IH_CG;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index dd0042efceec..068ed849dbad 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -631,7 +631,7 @@ static int ih_v6_1_resume(struct amdgpu_ip_block *ip_block)
return ih_v6_1_hw_init(ip_block);
}
-static bool ih_v6_1_is_idle(void *handle)
+static bool ih_v6_1_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
@@ -749,9 +749,9 @@ static int ih_v6_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void ih_v6_1_get_clockgating_state(void *handle, u64 *flags)
+static void ih_v6_1_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
*flags |= AMD_CG_SUPPORT_IH_CG;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 8f9b15c171f3..40a3530e0453 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -621,7 +621,7 @@ static int ih_v7_0_resume(struct amdgpu_ip_block *ip_block)
return ih_v7_0_hw_init(ip_block);
}
-static bool ih_v7_0_is_idle(void *handle)
+static bool ih_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
@@ -739,9 +739,9 @@ static int ih_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void ih_v7_0_get_clockgating_state(void *handle, u64 *flags)
+static void ih_v7_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
*flags |= AMD_CG_SUPPORT_IH_CG;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index aeca5c08ea2f..cfa91d709d49 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -39,6 +39,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
index 964c29ef25dc..0027a639c7e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
@@ -50,26 +50,29 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp_base = adev->rmmio_base;
- isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL);
+ isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd cell alloc failed\n", __func__);
goto failure;
}
- num_res = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES + MAX_ISP410_INT_SRC;
+ num_res = MAX_ISP410_MEM_RES + MAX_ISP410_INT_SRC;
isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd res alloc failed\n", __func__);
goto failure;
}
isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
if (!isp->isp_pdata) {
r = -ENOMEM;
- DRM_ERROR("%s: isp platform data alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp platform data alloc failed\n", __func__);
goto failure;
}
@@ -88,14 +91,7 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_res[1].start = isp_base + ISP410_PHY0_OFFSET;
isp->isp_res[1].end = isp_base + ISP410_PHY0_OFFSET + ISP410_PHY0_SIZE;
- isp->isp_res[2].name = "isp_gpio_sensor0_reg";
- isp->isp_res[2].flags = IORESOURCE_MEM;
- isp->isp_res[2].start = isp_base + ISP410_GPIO_SENSOR0_OFFSET;
- isp->isp_res[2].end = isp_base + ISP410_GPIO_SENSOR0_OFFSET +
- ISP410_GPIO_SENSOR0_SIZE;
-
- for (idx = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES, int_idx = 0;
- idx < num_res; idx++, int_idx++) {
+ for (idx = MAX_ISP410_MEM_RES, int_idx = 0; idx < num_res; idx++, int_idx++) {
isp->isp_res[idx].name = "isp_4_1_0_irq";
isp->isp_res[idx].flags = IORESOURCE_IRQ;
isp->isp_res[idx].start =
@@ -110,11 +106,12 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[0].platform_data = isp->isp_pdata;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
- isp->isp_i2c_res = kcalloc(1, sizeof(struct resource),
- GFP_KERNEL);
+ /* initialize isp i2c platform data */
+ isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_i2c_res) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd res alloc failed\n", __func__);
goto failure;
}
@@ -129,9 +126,31 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[1].platform_data = isp->isp_pdata;
isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
+ /* initialize isp gpiochip platform data */
+ isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
+ if (!isp->isp_gpio_res) {
+ r = -ENOMEM;
+ drm_err(&adev->ddev,
+ "%s: isp gpio res alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_gpio_res[0].name = "isp_gpio_reg";
+ isp->isp_gpio_res[0].flags = IORESOURCE_MEM;
+ isp->isp_gpio_res[0].start = isp_base + ISP410_GPIO_SENSOR_OFFSET;
+ isp->isp_gpio_res[0].end = isp_base + ISP410_GPIO_SENSOR_OFFSET +
+ ISP410_GPIO_SENSOR_SIZE;
+
+ isp->isp_cell[2].name = "amdisp-pinctrl";
+ isp->isp_cell[2].num_resources = 1;
+ isp->isp_cell[2].resources = &isp->isp_gpio_res[0];
+ isp->isp_cell[2].platform_data = isp->isp_pdata;
+ isp->isp_cell[2].pdata_size = sizeof(struct isp_platform_data);
+
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 3);
if (r) {
- DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: add mfd hotplug device failed\n", __func__);
goto failure;
}
@@ -143,6 +162,7 @@ failure:
kfree(isp->isp_res);
kfree(isp->isp_cell);
kfree(isp->isp_i2c_res);
+ kfree(isp->isp_gpio_res);
return r;
}
@@ -155,6 +175,7 @@ static int isp_v4_1_0_hw_fini(struct amdgpu_isp *isp)
kfree(isp->isp_cell);
kfree(isp->isp_pdata);
kfree(isp->isp_i2c_res);
+ kfree(isp->isp_gpio_res);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
index 7db24c0f1080..4d239198edd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
@@ -42,8 +42,8 @@
#define ISP410_I2C0_OFFSET 0x66400
#define ISP410_I2C0_SIZE 0x100
-#define ISP410_GPIO_SENSOR0_OFFSET 0x6613C
-#define ISP410_GPIO_SENSOR0_SIZE 0x4
+#define ISP410_GPIO_SENSOR_OFFSET 0x6613C
+#define ISP410_GPIO_SENSOR_SIZE 0x54
void isp_v4_1_0_set_isp_funcs(struct amdgpu_isp *isp);
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index b56f27295468..69dd92f6e86d 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -50,27 +50,30 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp_base = adev->rmmio_base;
- isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL);
+ isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd cell alloc failed\n", __func__);
goto failure;
}
- num_res = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES + MAX_ISP411_INT_SRC;
+ num_res = MAX_ISP411_MEM_RES + MAX_ISP411_INT_SRC;
isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd res alloc failed\n", __func__);
goto failure;
}
isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
if (!isp->isp_pdata) {
r = -ENOMEM;
- DRM_ERROR("%s: isp platform data alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp platform data alloc failed\n", __func__);
goto failure;
}
@@ -89,14 +92,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_res[1].start = isp_base + ISP411_PHY0_OFFSET;
isp->isp_res[1].end = isp_base + ISP411_PHY0_OFFSET + ISP411_PHY0_SIZE;
- isp->isp_res[2].name = "isp_4_1_1_sensor0_reg";
- isp->isp_res[2].flags = IORESOURCE_MEM;
- isp->isp_res[2].start = isp_base + ISP411_GPIO_SENSOR0_OFFSET;
- isp->isp_res[2].end = isp_base + ISP411_GPIO_SENSOR0_OFFSET +
- ISP411_GPIO_SENSOR0_SIZE;
-
- for (idx = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES, int_idx = 0;
- idx < num_res; idx++, int_idx++) {
+ for (idx = MAX_ISP411_MEM_RES, int_idx = 0; idx < num_res; idx++, int_idx++) {
isp->isp_res[idx].name = "isp_4_1_1_irq";
isp->isp_res[idx].flags = IORESOURCE_IRQ;
isp->isp_res[idx].start =
@@ -111,10 +107,12 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[0].platform_data = isp->isp_pdata;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
+ /* initialize isp i2c platform data */
isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_i2c_res) {
r = -ENOMEM;
- DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: isp mfd res alloc failed\n", __func__);
goto failure;
}
@@ -129,9 +127,31 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[1].platform_data = isp->isp_pdata;
isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
+ /* initialize isp gpiochip platform data */
+ isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
+ if (!isp->isp_gpio_res) {
+ r = -ENOMEM;
+ drm_err(&adev->ddev,
+ "%s: isp gpio res alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_gpio_res[0].name = "isp_gpio_reg";
+ isp->isp_gpio_res[0].flags = IORESOURCE_MEM;
+ isp->isp_gpio_res[0].start = isp_base + ISP411_GPIO_SENSOR_OFFSET;
+ isp->isp_gpio_res[0].end = isp_base + ISP411_GPIO_SENSOR_OFFSET +
+ ISP411_GPIO_SENSOR_SIZE;
+
+ isp->isp_cell[2].name = "amdisp-pinctrl";
+ isp->isp_cell[2].num_resources = 1;
+ isp->isp_cell[2].resources = &isp->isp_gpio_res[0];
+ isp->isp_cell[2].platform_data = isp->isp_pdata;
+ isp->isp_cell[2].pdata_size = sizeof(struct isp_platform_data);
+
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 3);
if (r) {
- DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
+ drm_err(&adev->ddev,
+ "%s: add mfd hotplug device failed\n", __func__);
goto failure;
}
@@ -143,6 +163,7 @@ failure:
kfree(isp->isp_res);
kfree(isp->isp_cell);
kfree(isp->isp_i2c_res);
+ kfree(isp->isp_gpio_res);
return r;
}
@@ -155,6 +176,7 @@ static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
kfree(isp->isp_cell);
kfree(isp->isp_pdata);
kfree(isp->isp_i2c_res);
+ kfree(isp->isp_gpio_res);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
index 40887ddeb08c..fe45d70d87f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
@@ -33,7 +33,6 @@
#include "ivsrcid/isp/irqsrcs_isp_4_1.h"
#define MAX_ISP411_MEM_RES 2
-#define MAX_ISP411_SENSOR_RES 1
#define MAX_ISP411_INT_SRC 8
#define ISP411_PHY0_OFFSET 0x66700
@@ -42,8 +41,8 @@
#define ISP411_I2C0_OFFSET 0x66400
#define ISP411_I2C0_SIZE 0x100
-#define ISP411_GPIO_SENSOR0_OFFSET 0x6613C
-#define ISP411_GPIO_SENSOR0_SIZE 0x4
+#define ISP411_GPIO_SENSOR_OFFSET 0x6613C
+#define ISP411_GPIO_SENSOR_SIZE 0x54
void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 03b8b7cd5229..9e428e669ada 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -604,15 +604,15 @@ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
int cnt = 0;
- mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+ mutex_lock(&adev->vcn.inst[0].vcn1_jpeg1_workaround);
if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_dec))
DRM_ERROR("JPEG dec: vcn dec ring may not be empty\n");
- for (cnt = 0; cnt < adev->vcn.num_enc_rings; cnt++) {
+ for (cnt = 0; cnt < adev->vcn.inst[0].num_enc_rings; cnt++) {
if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_enc[cnt]))
DRM_ERROR("JPEG dec: vcn enc ring[%d] may not be empty\n", cnt);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 7c9251c03815..4cde8a8bcc83 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -33,6 +33,22 @@
#include "vcn/vcn_2_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_2_0[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_CNTL),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_SIZE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_UV_PITCH),
+};
+
static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
@@ -98,7 +114,14 @@ static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
- return 0;
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_2_0, ARRAY_SIZE(jpeg_reg_list_2_0));
+ if (r)
+ return r;
+
+ adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+
+ return r;
}
/**
@@ -117,6 +140,8 @@ static int jpeg_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -655,9 +680,9 @@ void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
}
}
-static bool jpeg_v2_0_is_idle(void *handle)
+static bool jpeg_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return ((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
@@ -682,7 +707,7 @@ static int jpeg_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
- if (!jpeg_v2_0_is_idle(adev))
+ if (!jpeg_v2_0_is_idle(ip_block))
return -EBUSY;
jpeg_v2_0_enable_clock_gating(adev);
} else {
@@ -739,6 +764,13 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ jpeg_v2_0_stop(ring->adev);
+ jpeg_v2_0_start(ring->adev);
+ return amdgpu_ring_test_helper(ring);
+}
+
static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
.name = "jpeg_v2_0",
.early_init = jpeg_v2_0_early_init,
@@ -752,6 +784,8 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
.wait_for_idle = jpeg_v2_0_wait_for_idle,
.set_clockgating_state = jpeg_v2_0_set_clockgating_state,
.set_powergating_state = jpeg_v2_0_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
@@ -782,6 +816,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v2_0_ring_reset,
};
static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 11f6af2646e7..8b39e114f3be 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -36,6 +36,22 @@
#define JPEG25_MAX_HW_INSTANCES_ARCTURUS 2
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_2_5[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_CNTL),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_SIZE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_UV_PITCH),
+};
+
static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
@@ -147,7 +163,14 @@ static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- return 0;
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_2_5, ARRAY_SIZE(jpeg_reg_list_2_5));
+ if (r)
+ return r;
+
+ adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+
+ return r;
}
/**
@@ -166,6 +189,8 @@ static int jpeg_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -310,6 +335,44 @@ static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
}
+static void jpeg_v2_5_start_inst(struct amdgpu_device *adev, int i)
+{
+ struct amdgpu_ring *ring = adev->jpeg.inst[i].ring_dec;
+ /* disable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ /* JPEG disable CGC */
+ jpeg_v2_5_disable_clock_gating(adev, i);
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN),
+ JPEG_SYS_INT_EN__DJRBC_MASK,
+ ~JPEG_SYS_INT_EN__DJRBC_MASK);
+
+ WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+ WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR);
+}
+
/**
* jpeg_v2_5_start - start JPEG block
*
@@ -319,52 +382,33 @@ static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
*/
static int jpeg_v2_5_start(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
+ jpeg_v2_5_start_inst(adev, i);
- ring = adev->jpeg.inst[i].ring_dec;
- /* disable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0,
- ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
-
- /* JPEG disable CGC */
- jpeg_v2_5_disable_clock_gating(adev, i);
-
- /* MJPEG global tiling registers */
- WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
- WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* enable JMI channel */
- WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
- /* enable System Interrupt for JRBC */
- WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN),
- JPEG_SYS_INT_EN__DJRBC_MASK,
- ~JPEG_SYS_INT_EN__DJRBC_MASK);
-
- WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0);
- WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
- WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
- upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0);
- WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0);
- WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
- WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
- ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR);
}
return 0;
}
+static void jpeg_v2_5_stop_inst(struct amdgpu_device *adev, int i)
+{
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ jpeg_v2_5_enable_clock_gating(adev, i);
+
+ /* enable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS),
+ UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+}
+
/**
* jpeg_v2_5_stop - stop JPEG block
*
@@ -379,18 +423,7 @@ static int jpeg_v2_5_stop(struct amdgpu_device *adev)
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
-
- /* reset JMI */
- WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL),
- UVD_JMI_CNTL__SOFT_RESET_MASK,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
- jpeg_v2_5_enable_clock_gating(adev, i);
-
- /* enable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS),
- UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
- ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+ jpeg_v2_5_stop_inst(adev, i);
}
return 0;
@@ -482,9 +515,9 @@ static void jpeg_v2_6_dec_ring_insert_end(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, (1 << (ring->me * 2 + 14)));
}
-static bool jpeg_v2_5_is_idle(void *handle)
+static bool jpeg_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -530,7 +563,7 @@ static int jpeg_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
continue;
if (enable) {
- if (!jpeg_v2_5_is_idle(adev))
+ if (!jpeg_v2_5_is_idle(ip_block))
return -EBUSY;
jpeg_v2_5_enable_clock_gating(adev, i);
} else {
@@ -610,6 +643,13 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ jpeg_v2_5_stop_inst(ring->adev, ring->me);
+ jpeg_v2_5_start_inst(ring->adev, ring->me);
+ return amdgpu_ring_test_helper(ring);
+}
+
static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
.name = "jpeg_v2_5",
.early_init = jpeg_v2_5_early_init,
@@ -623,6 +663,8 @@ static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
.wait_for_idle = jpeg_v2_5_wait_for_idle,
.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
.set_powergating_state = jpeg_v2_5_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
@@ -638,6 +680,8 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
.wait_for_idle = jpeg_v2_5_wait_for_idle,
.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
.set_powergating_state = jpeg_v2_5_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
@@ -668,6 +712,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v2_5_ring_reset,
};
static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
@@ -698,6 +743,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v2_5_ring_reset,
};
static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 4eca65ea9053..2f8510c2986b 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -34,6 +34,22 @@
#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_3_0[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_CNTL),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_SIZE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_UV_PITCH),
+};
+
static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
@@ -112,7 +128,14 @@ static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
- return 0;
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_3_0, ARRAY_SIZE(jpeg_reg_list_3_0));
+ if (r)
+ return r;
+
+ adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+
+ return r;
}
/**
@@ -131,6 +154,8 @@ static int jpeg_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -445,9 +470,9 @@ static void jpeg_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v3_0_is_idle(void *handle)
+static bool jpeg_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 1;
ret &= (((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
@@ -473,7 +498,7 @@ static int jpeg_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
- if (!jpeg_v3_0_is_idle(adev))
+ if (!jpeg_v3_0_is_idle(ip_block))
return -EBUSY;
jpeg_v3_0_enable_clock_gating(adev);
} else {
@@ -530,6 +555,13 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ jpeg_v3_0_stop(ring->adev);
+ jpeg_v3_0_start(ring->adev);
+ return amdgpu_ring_test_helper(ring);
+}
+
static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
.name = "jpeg_v3_0",
.early_init = jpeg_v3_0_early_init,
@@ -543,6 +575,8 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
.wait_for_idle = jpeg_v3_0_wait_for_idle,
.set_clockgating_state = jpeg_v3_0_set_clockgating_state,
.set_powergating_state = jpeg_v3_0_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
@@ -573,6 +607,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v3_0_ring_reset,
};
static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 0aef1f64afd0..f17ec5414fd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -36,13 +36,28 @@
#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_4_0[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_CNTL),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_SIZE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH),
+};
+
static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev);
static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev);
-
static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
/**
@@ -123,14 +138,15 @@ static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
r = amdgpu_jpeg_ras_sw_init(adev);
if (r)
return r;
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->jpeg.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
- r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_4_0, ARRAY_SIZE(jpeg_reg_list_4_0));
if (r)
return r;
- return 0;
+ adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+
+ return r;
}
/**
@@ -614,9 +630,9 @@ static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v4_0_is_idle(void *handle)
+static bool jpeg_v4_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 1;
ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) &
@@ -642,7 +658,7 @@ static int jpeg_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
- if (!jpeg_v4_0_is_idle(adev))
+ if (!jpeg_v4_0_is_idle(ip_block))
return -EBUSY;
jpeg_v4_0_enable_clock_gating(adev);
} else {
@@ -704,6 +720,16 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ if (amdgpu_sriov_vf(ring->adev))
+ return -EINVAL;
+
+ jpeg_v4_0_stop(ring->adev);
+ jpeg_v4_0_start(ring->adev);
+ return amdgpu_ring_test_helper(ring);
+}
+
static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
.name = "jpeg_v4_0",
.early_init = jpeg_v4_0_early_init,
@@ -717,6 +743,8 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
.wait_for_idle = jpeg_v4_0_wait_for_idle,
.set_clockgating_state = jpeg_v4_0_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
@@ -747,6 +775,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v4_0_ring_reset,
};
static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 88f9771c1686..5598a35f72af 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -59,10 +59,53 @@ static int amdgpu_ih_srcid_jpeg[] = {
VCN_4_0__SRCID__JPEG7_DECODE
};
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_4_0_3[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_SYS_INT_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_STATUS),
+};
+
static inline bool jpeg_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
{
- return amdgpu_sriov_vf(adev) ||
- (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4));
+ return (adev->jpeg.caps & AMDGPU_JPEG_CAPS(RRMT_ENABLED)) == 0;
+}
+
+static inline int jpeg_v4_0_3_core_reg_offset(u32 pipe)
+{
+ if (pipe)
+ return ((0x40 * pipe) - 0xc80);
+ else
+ return 0;
}
/**
@@ -144,10 +187,8 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.internal.jpeg_pitch[j] =
regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET;
adev->jpeg.inst[i].external.jpeg_pitch[j] =
- SOC15_REG_OFFSET1(
- JPEG, jpeg_inst,
- regUVD_JRBC0_UVD_JRBC_SCRATCH0,
- (j ? (0x40 * j - 0xc80) : 0));
+ SOC15_REG_OFFSET1(JPEG, jpeg_inst, regUVD_JRBC0_UVD_JRBC_SCRATCH0,
+ jpeg_v4_0_3_core_reg_offset(j));
}
}
@@ -159,13 +200,17 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->jpeg.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
- r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_4_0_3, ARRAY_SIZE(jpeg_reg_list_4_0_3));
if (r)
return r;
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+ }
+
return 0;
}
@@ -185,7 +230,9 @@ static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -331,6 +378,11 @@ static int jpeg_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
}
}
} else {
+ /* This flag is not set for VF, assumed to be disabled always */
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) &
+ 0x100)
+ adev->jpeg.caps |= AMDGPU_JPEG_CAPS(RRMT_ENABLED);
+
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
jpeg_inst = GET_INST(JPEG, i);
@@ -475,6 +527,75 @@ static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst
WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data);
}
+static void jpeg_v4_0_3_start_inst(struct amdgpu_device *adev, int inst)
+{
+ int jpeg_inst = GET_INST(JPEG, inst);
+
+ WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG,
+ 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT);
+ SOC15_WAIT_ON_RREG(JPEG, jpeg_inst, regUVD_PGFSM_STATUS,
+ UVD_PGFSM_STATUS__UVDJ_PWR_ON <<
+ UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT,
+ UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
+
+ /* disable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS),
+ 0, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ /* JPEG disable CGC */
+ jpeg_v4_0_3_disable_clock_gating(adev, inst);
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+}
+
+static void jpeg_v4_0_3_start_jrbc(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int jpeg_inst = GET_INST(JPEG, ring->me);
+ int reg_offset = jpeg_v4_0_3_core_reg_offset(ring->pipe);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN),
+ JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe,
+ ~(JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe));
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_LMI_JRBC_RB_VMID,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_CNTL,
+ reg_offset,
+ (0x00000001L | 0x00000002L));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ reg_offset, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ reg_offset, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_RPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_WPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_CNTL,
+ reg_offset, 0x00000002L);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_SIZE,
+ reg_offset, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC0_UVD_JRBC_RB_WPTR,
+ reg_offset);
+}
+
/**
* jpeg_v4_0_3_start - start JPEG block
*
@@ -485,84 +606,36 @@ static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst
static int jpeg_v4_0_3_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- int i, j, jpeg_inst;
+ int i, j;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- jpeg_inst = GET_INST(JPEG, i);
-
- WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG,
- 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT);
- SOC15_WAIT_ON_RREG(
- JPEG, jpeg_inst, regUVD_PGFSM_STATUS,
- UVD_PGFSM_STATUS__UVDJ_PWR_ON
- << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT,
- UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
-
- /* disable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
- regUVD_JPEG_POWER_STATUS),
- 0, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
-
- /* JPEG disable CGC */
- jpeg_v4_0_3_disable_clock_gating(adev, i);
-
- /* MJPEG global tiling registers */
- WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX8_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
- WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* enable JMI channel */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
+ jpeg_v4_0_3_start_inst(adev, i);
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
- unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
-
ring = &adev->jpeg.inst[i].ring_dec[j];
-
- /* enable System Interrupt for JRBC */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
- regJPEG_SYS_INT_EN),
- JPEG_SYS_INT_EN__DJRBC0_MASK << j,
- ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j));
-
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JMI0_UVD_LMI_JRBC_RB_VMID,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC0_UVD_JRBC_RB_CNTL,
- reg_offset,
- (0x00000001L | 0x00000002L));
- WREG32_SOC15_OFFSET(
- JPEG, jpeg_inst,
- regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW,
- reg_offset, lower_32_bits(ring->gpu_addr));
- WREG32_SOC15_OFFSET(
- JPEG, jpeg_inst,
- regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
- reg_offset, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC0_UVD_JRBC_RB_RPTR,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC0_UVD_JRBC_RB_WPTR,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC0_UVD_JRBC_RB_CNTL,
- reg_offset, 0x00000002L);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC0_UVD_JRBC_RB_SIZE,
- reg_offset, ring->ring_size / 4);
- ring->wptr = RREG32_SOC15_OFFSET(
- JPEG, jpeg_inst, regUVD_JRBC0_UVD_JRBC_RB_WPTR,
- reg_offset);
+ jpeg_v4_0_3_start_jrbc(ring);
}
}
return 0;
}
+static void jpeg_v4_0_3_stop_inst(struct amdgpu_device *adev, int inst)
+{
+ int jpeg_inst = GET_INST(JPEG, inst);
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ jpeg_v4_0_3_enable_clock_gating(adev, inst);
+
+ /* enable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS),
+ UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+}
+
/**
* jpeg_v4_0_3_stop - stop JPEG block
*
@@ -572,31 +645,10 @@ static int jpeg_v4_0_3_start(struct amdgpu_device *adev)
*/
static int jpeg_v4_0_3_stop(struct amdgpu_device *adev)
{
- int i, jpeg_inst;
+ int i;
- for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- jpeg_inst = GET_INST(JPEG, i);
- /* reset JMI */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
- UVD_JMI_CNTL__SOFT_RESET_MASK,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
- jpeg_v4_0_3_enable_clock_gating(adev, i);
-
- /* enable anti hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
- regUVD_JPEG_POWER_STATUS),
- UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
- ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
-
- WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG,
- 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT);
- SOC15_WAIT_ON_RREG(
- JPEG, jpeg_inst, regUVD_PGFSM_STATUS,
- UVD_PGFSM_STATUS__UVDJ_PWR_OFF
- << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT,
- UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
- }
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i)
+ jpeg_v4_0_3_stop_inst(adev, i);
return 0;
}
@@ -612,9 +664,8 @@ static uint64_t jpeg_v4_0_3_dec_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- return RREG32_SOC15_OFFSET(
- JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_RPTR,
- ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0);
+ return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_RPTR,
+ jpeg_v4_0_3_core_reg_offset(ring->pipe));
}
/**
@@ -630,14 +681,12 @@ static uint64_t jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
- else
- return RREG32_SOC15_OFFSET(
- JPEG, GET_INST(JPEG, ring->me),
- regUVD_JRBC0_UVD_JRBC_RB_WPTR,
- ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0);
+
+ return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_WPTR,
+ jpeg_v4_0_3_core_reg_offset(ring->pipe));
}
-static void jpeg_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+void jpeg_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
/* JPEG engine access for HDP flush doesn't work when RRMT is enabled.
* This is a workaround to avoid any HDP flush through JPEG ring.
@@ -659,10 +708,8 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring)
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
- WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me),
- regUVD_JRBC0_UVD_JRBC_RB_WPTR,
- (ring->pipe ? (0x40 * ring->pipe - 0xc80) :
- 0),
+ WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_WPTR,
+ jpeg_v4_0_3_core_reg_offset(ring->pipe),
lower_32_bits(ring->wptr));
}
}
@@ -907,21 +954,17 @@ void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
}
}
-static bool jpeg_v4_0_3_is_idle(void *handle)
+static bool jpeg_v4_0_3_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool ret = false;
int i, j;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
- unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
-
- ret &= ((RREG32_SOC15_OFFSET(
- JPEG, GET_INST(JPEG, i),
- regUVD_JRBC0_UVD_JRBC_STATUS,
- reg_offset) &
- UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+ ret &= ((RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, i),
+ regUVD_JRBC0_UVD_JRBC_STATUS, jpeg_v4_0_3_core_reg_offset(j)) &
+ UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
}
@@ -937,13 +980,10 @@ static int jpeg_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
- unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
-
- ret &= SOC15_WAIT_ON_RREG_OFFSET(
- JPEG, GET_INST(JPEG, i),
- regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset,
+ ret &= (SOC15_WAIT_ON_RREG_OFFSET(JPEG, GET_INST(JPEG, i),
+ regUVD_JRBC0_UVD_JRBC_STATUS, jpeg_v4_0_3_core_reg_offset(j),
UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
- UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+ UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
}
}
return ret;
@@ -958,7 +998,7 @@ static int jpeg_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (enable) {
- if (!jpeg_v4_0_3_is_idle(adev))
+ if (!jpeg_v4_0_3_is_idle(ip_block))
return -EBUSY;
jpeg_v4_0_3_enable_clock_gating(adev, i);
} else {
@@ -1055,6 +1095,41 @@ static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void jpeg_v4_0_3_core_stall_reset(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int jpeg_inst = GET_INST(JPEG, ring->me);
+ int reg_offset = jpeg_v4_0_3_core_reg_offset(ring->pipe);
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_STALL,
+ reg_offset, 0x1F);
+ SOC15_WAIT_ON_RREG_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS,
+ reg_offset, 0x1F, 0x1F);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_JPEG_LMI_DROP,
+ reg_offset, 0x1F);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 1 << ring->pipe);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_STALL,
+ reg_offset, 0x00);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_JPEG_LMI_DROP,
+ reg_offset, 0x00);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
+}
+
+static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ if (amdgpu_sriov_vf(ring->adev))
+ return -EOPNOTSUPP;
+
+ jpeg_v4_0_3_core_stall_reset(ring);
+ jpeg_v4_0_3_start_jrbc(ring);
+ return amdgpu_ring_test_helper(ring);
+}
+
static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
.name = "jpeg_v4_0_3",
.early_init = jpeg_v4_0_3_early_init,
@@ -1068,6 +1143,8 @@ static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
.wait_for_idle = jpeg_v4_0_3_wait_for_idle,
.set_clockgating_state = jpeg_v4_0_3_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_3_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
@@ -1099,6 +1176,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v4_0_3_ring_reset,
};
static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -1245,11 +1323,13 @@ static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_ban
misc0 = bank->regs[ACA_REG_IDX_MISC0];
switch (type) {
case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1ULL);
break;
case ACA_SMU_TYPE_CE:
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
index 747a3e5f6856..a90bf370a002 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
@@ -56,6 +56,7 @@ void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
unsigned int flags);
void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr);
+void jpeg_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 6b3656984957..974030a5c03c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -46,11 +46,26 @@
#define regJPEG_CGC_GATE_INTERNAL_OFFSET 0x4160
#define regUVD_NO_OP_INTERNAL_OFFSET 0x0029
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_4_0_5[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_CNTL),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_SIZE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH),
+};
+
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
-
static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring);
static int amdgpu_ih_clientid_jpeg[] = {
@@ -58,6 +73,8 @@ static int amdgpu_ih_clientid_jpeg[] = {
SOC15_IH_CLIENTID_VCN1
};
+
+
/**
* jpeg_v4_0_5_early_init - set function pointers
*
@@ -153,6 +170,10 @@ static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_PITCH);
}
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_4_0_5, ARRAY_SIZE(jpeg_reg_list_4_0_5));
+ if (r)
+ return r;
+
/* TODO: Add queue reset mask when FW fully supports it */
adev->jpeg.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
@@ -627,9 +648,9 @@ static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v4_0_5_is_idle(void *handle)
+static bool jpeg_v4_0_5_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -672,7 +693,7 @@ static int jpeg_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
continue;
if (enable) {
- if (!jpeg_v4_0_5_is_idle(adev))
+ if (!jpeg_v4_0_5_is_idle(ip_block))
return -EBUSY;
jpeg_v4_0_5_enable_clock_gating(adev, i);
@@ -759,6 +780,8 @@ static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
.wait_for_idle = jpeg_v4_0_5_wait_for_idle,
.set_clockgating_state = jpeg_v4_0_5_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_5_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index d5cf0f2799d4..31d213ccbe0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -34,6 +34,22 @@
#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
#include "jpeg_v5_0_0.h"
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_5_0[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_CNTL),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_SIZE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH),
+};
+
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
@@ -100,6 +116,10 @@ static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0, ARRAY_SIZE(jpeg_reg_list_5_0));
+ if (r)
+ return r;
+
/* TODO: Add queue reset mask when FW fully supports it */
adev->jpeg.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
@@ -539,9 +559,9 @@ static void jpeg_v5_0_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v5_0_0_is_idle(void *handle)
+static bool jpeg_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 1;
ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) &
@@ -567,7 +587,7 @@ static int jpeg_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (enable) {
- if (!jpeg_v5_0_0_is_idle(adev))
+ if (!jpeg_v5_0_0_is_idle(ip_block))
return -EBUSY;
jpeg_v5_0_0_enable_clock_gating(adev);
} else {
@@ -637,6 +657,8 @@ static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.wait_for_idle = jpeg_v5_0_0_wait_for_idle,
.set_clockgating_state = jpeg_v5_0_0_set_clockgating_state,
.set_powergating_state = jpeg_v5_0_0_set_powergating_state,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 40d4c32a8c2a..218e16b68f1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -52,6 +52,47 @@ static int amdgpu_ih_srcid_jpeg[] = {
VCN_5_0__SRCID__JPEG9_DECODE,
};
+static const struct amdgpu_hwip_reg_entry jpeg_reg_list_5_0_1[] = {
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC1_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC2_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC3_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC4_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC5_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC6_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC7_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC8_UVD_JRBC_STATUS),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_RB_RPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_RB_WPTR),
+ SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC9_UVD_JRBC_STATUS),
+};
+
static int jpeg_v5_0_1_core_reg_offset(u32 pipe)
{
if (pipe <= AMDGPU_MAX_JPEG_RINGS_4_0_3)
@@ -145,6 +186,17 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
}
}
+ r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0_1, ARRAY_SIZE(jpeg_reg_list_5_0_1));
+ if (r)
+ return r;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+ }
+
return 0;
}
@@ -164,6 +216,9 @@ static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -194,6 +249,9 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
}
return 0;
}
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->jpeg.caps |= AMDGPU_JPEG_CAPS(RRMT_ENABLED);
+
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
jpeg_inst = GET_INST(JPEG, i);
ring = adev->jpeg.inst[i].ring_dec;
@@ -281,11 +339,10 @@ static int jpeg_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
return r;
}
-static int jpeg_v5_0_1_disable_antihang(struct amdgpu_device *adev, int inst_idx)
+static void jpeg_v5_0_1_init_inst(struct amdgpu_device *adev, int i)
{
- int jpeg_inst;
+ int jpeg_inst = GET_INST(JPEG, i);
- jpeg_inst = GET_INST(JPEG, inst_idx);
/* disable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
@@ -294,20 +351,75 @@ static int jpeg_v5_0_1_disable_antihang(struct amdgpu_device *adev, int inst_idx
WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
- return 0;
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
}
-static int jpeg_v5_0_1_enable_antihang(struct amdgpu_device *adev, int inst_idx)
+static void jpeg_v5_0_1_deinit_inst(struct amdgpu_device *adev, int i)
{
- int jpeg_inst;
+ int jpeg_inst = GET_INST(JPEG, i);
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
- jpeg_inst = GET_INST(JPEG, inst_idx);
/* enable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS),
UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
- ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+}
- return 0;
+static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 reg, data, mask;
+ int jpeg_inst = GET_INST(JPEG, ring->me);
+ int reg_offset = ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0;
+
+ /* enable System Interrupt for JRBC */
+ reg = SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN);
+ if (ring->pipe < AMDGPU_MAX_JPEG_RINGS_4_0_3) {
+ data = JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe;
+ mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << ring->pipe);
+ WREG32_P(reg, data, mask);
+ } else {
+ data = JPEG_SYS_INT_EN__DJRBC0_MASK << (ring->pipe+12);
+ mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << (ring->pipe+12));
+ WREG32_P(reg, data, mask);
+ }
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_VMID,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_CNTL,
+ reg_offset,
+ (0x00000001L | 0x00000002L));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ reg_offset, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ reg_offset, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_RPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_WPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_CNTL,
+ reg_offset, 0x00000002L);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_SIZE,
+ reg_offset, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC_RB_WPTR,
+ reg_offset);
}
/**
@@ -320,69 +432,13 @@ static int jpeg_v5_0_1_enable_antihang(struct amdgpu_device *adev, int inst_idx)
static int jpeg_v5_0_1_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- int i, j, jpeg_inst, r;
+ int i, j;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- jpeg_inst = GET_INST(JPEG, i);
-
- /* disable antihang */
- r = jpeg_v5_0_1_disable_antihang(adev, i);
- if (r)
- return r;
-
- /* MJPEG global tiling registers */
- WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* enable JMI channel */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
-
+ jpeg_v5_0_1_init_inst(adev, i);
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
- int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
- u32 reg, data, mask;
-
ring = &adev->jpeg.inst[i].ring_dec[j];
-
- /* enable System Interrupt for JRBC */
- reg = SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN);
- if (j < AMDGPU_MAX_JPEG_RINGS_4_0_3) {
- data = JPEG_SYS_INT_EN__DJRBC0_MASK << j;
- mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j);
- WREG32_P(reg, data, mask);
- } else {
- data = JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12);
- mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12));
- WREG32_P(reg, data, mask);
- }
-
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_LMI_JRBC_RB_VMID,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_CNTL,
- reg_offset,
- (0x00000001L | 0x00000002L));
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
- reg_offset, lower_32_bits(ring->gpu_addr));
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
- reg_offset, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_RPTR,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_WPTR,
- reg_offset, 0);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_CNTL,
- reg_offset, 0x00000002L);
- WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
- regUVD_JRBC_RB_SIZE,
- reg_offset, ring->ring_size / 4);
- ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC_RB_WPTR,
- reg_offset);
+ jpeg_v5_0_1_init_jrbc(ring);
}
}
@@ -398,20 +454,10 @@ static int jpeg_v5_0_1_start(struct amdgpu_device *adev)
*/
static int jpeg_v5_0_1_stop(struct amdgpu_device *adev)
{
- int i, jpeg_inst, r;
-
- for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- jpeg_inst = GET_INST(JPEG, i);
- /* reset JMI */
- WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
- UVD_JMI_CNTL__SOFT_RESET_MASK,
- ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+ int i;
- /* enable antihang */
- r = jpeg_v5_0_1_enable_antihang(adev, i);
- if (r)
- return r;
- }
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i)
+ jpeg_v5_0_1_deinit_inst(adev, i);
return 0;
}
@@ -471,9 +517,9 @@ static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static bool jpeg_v5_0_1_is_idle(void *handle)
+static bool jpeg_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool ret = false;
int i, j;
@@ -522,7 +568,7 @@ static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- if (!jpeg_v5_0_1_is_idle(adev))
+ if (!jpeg_v5_0_1_is_idle(ip_block))
return -EBUSY;
}
@@ -617,6 +663,41 @@ static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int jpeg_inst = GET_INST(JPEG, ring->me);
+ int reg_offset = ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0;
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_STALL,
+ reg_offset, 0x1F);
+ SOC15_WAIT_ON_RREG_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS,
+ reg_offset, 0x1F, 0x1F);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_JPEG_LMI_DROP,
+ reg_offset, 0x1F);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 1 << ring->pipe);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_JMI_CLIENT_STALL,
+ reg_offset, 0x00);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_JPEG_LMI_DROP,
+ reg_offset, 0x00);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
+}
+
+static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ if (amdgpu_sriov_vf(ring->adev))
+ return -EOPNOTSUPP;
+
+ jpeg_v5_0_1_core_stall_reset(ring);
+ jpeg_v5_0_1_init_jrbc(ring);
+ return amdgpu_ring_test_helper(ring);
+}
+
static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
.name = "jpeg_v5_0_1",
.early_init = jpeg_v5_0_1_early_init,
@@ -635,8 +716,8 @@ static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v5_0_1_set_clockgating_state,
.set_powergating_state = jpeg_v5_0_1_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = amdgpu_jpeg_dump_ip_state,
+ .print_ip_state = amdgpu_jpeg_print_ip_state,
};
static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
@@ -655,6 +736,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
.emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
.emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
+ .emit_hdp_flush = jpeg_v4_0_3_ring_emit_hdp_flush,
.test_ring = amdgpu_jpeg_dec_ring_test_ring,
.test_ib = amdgpu_jpeg_dec_ring_test_ib,
.insert_nop = jpeg_v4_0_3_dec_ring_nop,
@@ -666,6 +748,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v5_0_1_ring_reset,
};
static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
index 8ce146c00bb6..efdab57324e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
@@ -26,4 +26,76 @@
extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block;
-#endif /* __JPEG_V5_0_0_H__ */
+#define regUVD_JRBC0_UVD_JRBC_RB_WPTR 0x0640
+#define regUVD_JRBC0_UVD_JRBC_RB_WPTR_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_STATUS 0x0649
+#define regUVD_JRBC0_UVD_JRBC_STATUS_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_RB_RPTR 0x064a
+#define regUVD_JRBC0_UVD_JRBC_RB_RPTR_BASE_IDX 1
+#define regUVD_JRBC1_UVD_JRBC_RB_WPTR 0x0000
+#define regUVD_JRBC1_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_STATUS 0x0009
+#define regUVD_JRBC1_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_RB_RPTR 0x000a
+#define regUVD_JRBC1_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_RB_WPTR 0x0040
+#define regUVD_JRBC2_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_STATUS 0x0049
+#define regUVD_JRBC2_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_RB_RPTR 0x004a
+#define regUVD_JRBC2_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_RB_WPTR 0x0080
+#define regUVD_JRBC3_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_STATUS 0x0089
+#define regUVD_JRBC3_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_RB_RPTR 0x008a
+#define regUVD_JRBC3_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_RB_WPTR 0x00c0
+#define regUVD_JRBC4_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_STATUS 0x00c9
+#define regUVD_JRBC4_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_RB_RPTR 0x00ca
+#define regUVD_JRBC4_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_RB_WPTR 0x0100
+#define regUVD_JRBC5_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_STATUS 0x0109
+#define regUVD_JRBC5_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_RB_RPTR 0x010a
+#define regUVD_JRBC5_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_RB_WPTR 0x0140
+#define regUVD_JRBC6_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_STATUS 0x0149
+#define regUVD_JRBC6_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_RB_RPTR 0x014a
+#define regUVD_JRBC6_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_RB_WPTR 0x0180
+#define regUVD_JRBC7_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_STATUS 0x0189
+#define regUVD_JRBC7_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_RB_RPTR 0x018a
+#define regUVD_JRBC7_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC8_UVD_JRBC_RB_WPTR 0x01c0
+#define regUVD_JRBC8_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC8_UVD_JRBC_STATUS 0x01c9
+#define regUVD_JRBC8_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC8_UVD_JRBC_RB_RPTR 0x01ca
+#define regUVD_JRBC8_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC9_UVD_JRBC_RB_WPTR 0x0440
+#define regUVD_JRBC9_UVD_JRBC_RB_WPTR_BASE_IDX 1
+#define regUVD_JRBC9_UVD_JRBC_STATUS 0x0449
+#define regUVD_JRBC9_UVD_JRBC_STATUS_BASE_IDX 1
+#define regUVD_JRBC9_UVD_JRBC_RB_RPTR 0x044a
+#define regUVD_JRBC9_UVD_JRBC_RB_RPTR_BASE_IDX 1
+#define regUVD_JMI0_JPEG_LMI_DROP 0x0663
+#define regUVD_JMI0_JPEG_LMI_DROP_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_CLIENT_STALL 0x067a
+#define regUVD_JMI0_UVD_JMI_CLIENT_STALL_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS 0x067b
+#define regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 1
+#define regJPEG_CORE_RST_CTRL 0x072e
+#define regJPEG_CORE_RST_CTRL_BASE_IDX 1
+
+#define regVCN_RRMT_CNTL 0x0940
+#define regVCN_RRMT_CNTL_BASE_IDX 1
+
+#endif /* __JPEG_V5_0_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index f9a4d08eef92..e65916ada23b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -54,6 +54,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes1.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_mes_2.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_5_3_mes1.bin");
static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block);
static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block);
@@ -62,6 +64,7 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
#define MES_EOP_SIZE 2048
#define GFX_MES_DRAM_SIZE 0x80000
+#define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE)
static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
{
@@ -730,9 +733,6 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
{
- int size = 128 * PAGE_SIZE;
- int ret = 0;
- struct amdgpu_device *adev = mes->adev;
union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_pkt;
memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
@@ -741,18 +741,13 @@ static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_set_hw_res_pkt.enable_mes_info_ctx = 1;
- ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &mes->resource_1,
- &mes->resource_1_gpu_addr,
- &mes->resource_1_addr);
- if (ret) {
- dev_err(adev->dev, "(%d) failed to create mes resource_1 bo\n", ret);
- return ret;
+ mes_set_hw_res_pkt.cleaner_shader_fence_mc_addr = mes->resource_1_gpu_addr[0];
+ if (amdgpu_sriov_is_mes_info_enable(mes->adev)) {
+ mes_set_hw_res_pkt.mes_info_ctx_mc_addr =
+ mes->resource_1_gpu_addr[0] + AMDGPU_GPU_PAGE_SIZE;
+ mes_set_hw_res_pkt.mes_info_ctx_size = MES11_HW_RESOURCE_1_SIZE;
}
- mes_set_hw_res_pkt.mes_info_ctx_mc_addr = mes->resource_1_gpu_addr;
- mes_set_hw_res_pkt.mes_info_ctx_size = mes->resource_1->tbo.base.size;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
@@ -808,7 +803,7 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
};
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -843,7 +838,7 @@ static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
}
static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -884,7 +879,7 @@ static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
}
static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe],
&adev->mes.data_fw_gpu_addr[pipe],
@@ -982,7 +977,7 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
/* This function is for backdoor MES firmware */
static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe, bool prime_icache)
+ enum amdgpu_mes_pipe pipe, bool prime_icache)
{
int r;
uint32_t data;
@@ -1054,7 +1049,7 @@ static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
}
static int mes_v11_0_allocate_eop_buf(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
u32 *eop;
@@ -1265,7 +1260,7 @@ static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
}
static int mes_v11_0_queue_init(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
struct amdgpu_ring *ring;
int r;
@@ -1348,7 +1343,7 @@ static int mes_v11_0_kiq_ring_init(struct amdgpu_device *adev)
}
static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r, mqd_size = sizeof(struct v11_compute_mqd);
struct amdgpu_ring *ring;
@@ -1389,7 +1384,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
static int mes_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- int pipe, r;
+ int pipe, r, bo_size;
adev->mes.funcs = &mes_v11_0_funcs;
adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
@@ -1424,6 +1419,23 @@ static int mes_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ bo_size = AMDGPU_GPU_PAGE_SIZE;
+ if (amdgpu_sriov_is_mes_info_enable(adev))
+ bo_size += MES11_HW_RESOURCE_1_SIZE;
+
+ /* Only needed for AMDGPU_MES_SCHED_PIPE on MES 11*/
+ r = amdgpu_bo_create_kernel(adev,
+ bo_size,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->mes.resource_1[0],
+ &adev->mes.resource_1_gpu_addr[0],
+ &adev->mes.resource_1_addr[0]);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create mes resource_1 bo\n", r);
+ return r;
+ }
+
return 0;
}
@@ -1432,6 +1444,9 @@ static int mes_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe;
+ amdgpu_bo_free_kernel(&adev->mes.resource_1[0], &adev->mes.resource_1_gpu_addr[0],
+ &adev->mes.resource_1_addr[0]);
+
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
kfree(adev->mes.mqd_backup[pipe]);
@@ -1619,12 +1634,10 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
goto failure;
- if (amdgpu_sriov_is_mes_info_enable(adev)) {
- r = mes_v11_0_set_hw_resources_1(&adev->mes);
- if (r) {
- DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
- goto failure;
- }
+ r = mes_v11_0_set_hw_resources_1(&adev->mes);
+ if (r) {
+ DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
+ goto failure;
}
r = mes_v11_0_query_sched_status(&adev->mes);
@@ -1655,34 +1668,17 @@ failure:
static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
- if (amdgpu_sriov_is_mes_info_enable(adev)) {
- amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr,
- &adev->mes.resource_1_addr);
- }
return 0;
}
static int mes_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
-
- r = amdgpu_mes_suspend(ip_block->adev);
- if (r)
- return r;
-
return mes_v11_0_hw_fini(ip_block);
}
static int mes_v11_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
-
- r = mes_v11_0_hw_init(ip_block);
- if (r)
- return r;
-
- return amdgpu_mes_resume(ip_block->adev);
+ return mes_v11_0_hw_init(ip_block);
}
static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 0fd0fa6ed518..183dd3346da5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -686,6 +686,8 @@ static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 0xa;
+ mes_set_hw_res_1_pkt.cleaner_shader_fence_mc_addr =
+ mes->resource_1_gpu_addr[pipe];
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt),
@@ -899,7 +901,7 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
};
static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -933,7 +935,7 @@ static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
}
static int mes_v12_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -967,7 +969,7 @@ static int mes_v12_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
}
static void mes_v12_0_free_ucode_buffers(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe],
&adev->mes.data_fw_gpu_addr[pipe],
@@ -1073,7 +1075,7 @@ static void mes_v12_0_set_ucode_start_addr(struct amdgpu_device *adev)
/* This function is for backdoor MES firmware */
static int mes_v12_0_load_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe, bool prime_icache)
+ enum amdgpu_mes_pipe pipe, bool prime_icache)
{
int r;
uint32_t data;
@@ -1137,7 +1139,7 @@ static int mes_v12_0_load_microcode(struct amdgpu_device *adev,
}
static int mes_v12_0_allocate_eop_buf(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r;
u32 *eop;
@@ -1358,7 +1360,7 @@ static int mes_v12_0_kiq_enable_queue(struct amdgpu_device *adev)
}
static int mes_v12_0_queue_init(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
struct amdgpu_ring *ring;
int r;
@@ -1458,7 +1460,7 @@ static int mes_v12_0_kiq_ring_init(struct amdgpu_device *adev)
}
static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum amdgpu_mes_pipe pipe)
{
int r, mqd_size = sizeof(struct v12_compute_mqd);
struct amdgpu_ring *ring;
@@ -1517,12 +1519,23 @@ static int mes_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE)
+ if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE) {
r = mes_v12_0_kiq_ring_init(adev);
- else
+ }
+ else {
r = mes_v12_0_ring_init(adev, pipe);
- if (r)
- return r;
+ if (r)
+ return r;
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->mes.resource_1[pipe],
+ &adev->mes.resource_1_gpu_addr[pipe],
+ &adev->mes.resource_1_addr[pipe]);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create mes resource_1 bo pipe[%d]\n", r, pipe);
+ return r;
+ }
+ }
}
return 0;
@@ -1534,6 +1547,10 @@ static int mes_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
int pipe;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
+ amdgpu_bo_free_kernel(&adev->mes.resource_1[pipe],
+ &adev->mes.resource_1_gpu_addr[pipe],
+ &adev->mes.resource_1_addr[pipe]);
+
kfree(adev->mes.mqd_backup[pipe]);
amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
@@ -1732,8 +1749,7 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
goto failure;
- if (adev->enable_uni_mes)
- mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE);
+ mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE);
mes_v12_0_init_aggregated_doorbell(&adev->mes);
@@ -1770,24 +1786,12 @@ static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
static int mes_v12_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
-
- r = amdgpu_mes_suspend(ip_block->adev);
- if (r)
- return r;
-
return mes_v12_0_hw_fini(ip_block);
}
static int mes_v12_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
-
- r = mes_v12_0_hw_init(ip_block);
- if (r)
- return r;
-
- return amdgpu_mes_resume(ip_block->adev);
+ return mes_v12_0_hw_init(ip_block);
}
static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index 9689e2b5d4e5..2adee2b94c37 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -172,6 +172,30 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
}
+/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
+static void mmhub_v1_7_init_snoop_override_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+ int i;
+ uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
+
+ for (i = 0; i < 5; i++) { /* DAGB instances */
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance);
+ tmp |= (1 << 15); /* SDMA client is BIT15 */
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance, tmp);
+
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance);
+ tmp |= (1 << 15);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance, tmp);
+ }
+
+}
+
static void mmhub_v1_7_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
@@ -337,6 +361,7 @@ static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev)
mmhub_v1_7_init_system_aperture_regs(adev);
mmhub_v1_7_init_tlb_regs(adev);
mmhub_v1_7_init_cache_regs(adev);
+ mmhub_v1_7_init_snoop_override_regs(adev);
mmhub_v1_7_enable_system_domain(adev);
mmhub_v1_7_disable_identity_aperture(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index e646e5cef0a2..a54e7b929295 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -213,6 +213,32 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
}
}
+/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
+static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp, inst_mask;
+ int i, j;
+ uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
+
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ for (j = 0; j < 5; j++) { /* DAGB instances */
+ tmp = RREG32_SOC15_OFFSET(MMHUB, i,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance);
+ tmp |= (1 << 15); /* SDMA client is BIT15 */
+ WREG32_SOC15_OFFSET(MMHUB, i,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance, tmp);
+
+ tmp = RREG32_SOC15_OFFSET(MMHUB, i,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance);
+ tmp |= (1 << 15);
+ WREG32_SOC15_OFFSET(MMHUB, i,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance, tmp);
+ }
+ }
+}
+
static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp, inst_mask;
@@ -418,6 +444,7 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
mmhub_v1_8_init_system_aperture_regs(adev);
mmhub_v1_8_init_tlb_regs(adev);
mmhub_v1_8_init_cache_regs(adev);
+ mmhub_v1_8_init_snoop_override_regs(adev);
mmhub_v1_8_enable_system_domain(adev);
mmhub_v1_8_disable_identity_aperture(adev);
@@ -719,11 +746,13 @@ static int mmhub_v1_8_aca_bank_parser(struct aca_handle *handle, struct aca_bank
misc0 = bank->regs[ACA_REG_IDX_MISC0];
switch (type) {
case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1ULL);
break;
case ACA_SMU_TYPE_CE:
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index b4ce3375d3fd..bc3d6c2fc87a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -103,6 +103,7 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
case IP_VERSION(3, 3, 1):
+ case IP_VERSION(3, 3, 2):
mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
mmhub_client_ids_v3_3[cid][rw] :
cid == 0x140 ? "UMSCH" : NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index ff1b58e44689..fe0710b55c3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -198,6 +198,36 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
}
+/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
+static void mmhub_v9_4_init_snoop_override_regs(struct amdgpu_device *adev, int hubid)
+{
+ uint32_t tmp;
+ int i;
+ uint32_t distance = mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
+ uint32_t huboffset = hubid * MMHUB_INSTANCE_REGISTER_OFFSET;
+
+ for (i = 0; i < 5 - (2 * hubid); i++) {
+ /* DAGB instances 0 to 4 are in hub0 and 5 to 7 are in hub1 */
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE,
+ huboffset + i * distance);
+ tmp |= (1 << 15); /* SDMA client is BIT15 */
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE,
+ huboffset + i * distance, tmp);
+
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE,
+ huboffset + i * distance);
+ tmp |= (1 << 15);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE,
+ huboffset + i * distance, tmp);
+ }
+
+}
+
static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
{
uint32_t tmp;
@@ -392,6 +422,7 @@ static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev))
mmhub_v9_4_init_cache_regs(adev, i);
+ mmhub_v9_4_init_snoop_override_regs(adev, i);
mmhub_v9_4_enable_system_domain(adev, i);
if (!amdgpu_sriov_vf(adev))
mmhub_v9_4_disable_identity_aperture(adev, i);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 4dcb72d1bdda..5aadf24cb202 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -184,6 +184,9 @@ send_request:
case IDH_REQ_RAS_ERROR_COUNT:
event = IDH_RAS_ERROR_COUNT_READY;
break;
+ case IDH_REQ_RAS_CPER_DUMP:
+ event = IDH_RAS_CPER_DUMP_READY;
+ break;
default:
break;
}
@@ -467,6 +470,16 @@ static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev)
return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT);
}
+static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
+{
+ uint32_t vf_rptr_hi, vf_rptr_lo;
+
+ vf_rptr_hi = (uint32_t)(vf_rptr >> 32);
+ vf_rptr_lo = (uint32_t)(vf_rptr & 0xFFFFFFFF);
+ return xgpu_nv_send_access_requests_with_param(
+ adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -478,4 +491,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.ras_poison_handler = xgpu_nv_ras_poison_handler,
.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
.req_ras_err_count = xgpu_nv_req_ras_err_count,
+ .req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 9d61d76e1bf9..72c9fceb9d79 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -41,6 +41,7 @@ enum idh_request {
IDH_READY_TO_RESET = 201,
IDH_RAS_POISON = 202,
IDH_REQ_RAS_ERROR_COUNT = 203,
+ IDH_REQ_RAS_CPER_DUMP = 204,
};
enum idh_event {
@@ -56,6 +57,7 @@ enum idh_event {
IDH_PF_SOFT_FLR_NOTIFICATION,
IDH_RAS_ERROR_DETECTED,
IDH_RAS_ERROR_COUNT_READY = 11,
+ IDH_RAS_CPER_DUMP_READY = 14,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 62cdfe10e6f4..4cd325149b63 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -625,7 +625,7 @@ static int navi10_ih_resume(struct amdgpu_ip_block *ip_block)
return navi10_ih_hw_init(ip_block);
}
-static bool navi10_ih_is_idle(void *handle)
+static bool navi10_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
@@ -682,9 +682,9 @@ static int navi10_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void navi10_ih_get_clockgating_state(void *handle, u64 *flags)
+static void navi10_ih_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL))
*flags |= AMD_CG_SUPPORT_IH_CG;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
index c92875ceb31f..9b4025c39e44 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbif_v6_3_1.h"
#include "nbif/nbif_6_3_1_offset.h"
@@ -474,52 +473,6 @@ const struct amdgpu_nbio_funcs nbif_v6_3_1_funcs = {
};
-static void nbif_v6_3_1_sriov_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index)
-{
-}
-
-static void nbif_v6_3_1_sriov_sdma_doorbell_range(struct amdgpu_device *adev,
- int instance, bool use_doorbell,
- int doorbell_index,
- int doorbell_size)
-{
-}
-
-static void nbif_v6_3_1_sriov_vcn_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell,
- int doorbell_index, int instance)
-{
-}
-
-static void nbif_v6_3_1_sriov_gc_doorbell_init(struct amdgpu_device *adev)
-{
-}
-
-const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs = {
- .get_hdp_flush_req_offset = nbif_v6_3_1_get_hdp_flush_req_offset,
- .get_hdp_flush_done_offset = nbif_v6_3_1_get_hdp_flush_done_offset,
- .get_pcie_index_offset = nbif_v6_3_1_get_pcie_index_offset,
- .get_pcie_data_offset = nbif_v6_3_1_get_pcie_data_offset,
- .get_rev_id = nbif_v6_3_1_get_rev_id,
- .mc_access_enable = nbif_v6_3_1_mc_access_enable,
- .get_memsize = nbif_v6_3_1_get_memsize,
- .sdma_doorbell_range = nbif_v6_3_1_sriov_sdma_doorbell_range,
- .vcn_doorbell_range = nbif_v6_3_1_sriov_vcn_doorbell_range,
- .gc_doorbell_init = nbif_v6_3_1_sriov_gc_doorbell_init,
- .enable_doorbell_aperture = nbif_v6_3_1_enable_doorbell_aperture,
- .enable_doorbell_selfring_aperture = nbif_v6_3_1_enable_doorbell_selfring_aperture,
- .ih_doorbell_range = nbif_v6_3_1_sriov_ih_doorbell_range,
- .update_medium_grain_clock_gating = nbif_v6_3_1_update_medium_grain_clock_gating,
- .update_medium_grain_light_sleep = nbif_v6_3_1_update_medium_grain_light_sleep,
- .get_clockgating_state = nbif_v6_3_1_get_clockgating_state,
- .ih_control = nbif_v6_3_1_ih_control,
- .init_registers = nbif_v6_3_1_init_registers,
- .remap_hdp_registers = nbif_v6_3_1_remap_hdp_registers,
- .get_rom_offset = nbif_v6_3_1_get_rom_offset,
- .set_reg_remap = nbif_v6_3_1_set_reg_remap,
-};
-
static int nbif_v6_3_1_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
index 9ac4831d39e1..3afec715a9fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
@@ -28,7 +28,6 @@
extern const struct nbio_hdp_flush_reg nbif_v6_3_1_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbif_v6_3_1_funcs;
-extern const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs;
extern struct amdgpu_nbio_ras nbif_v6_3_1_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index 739fce4fa8fd..04041b398781 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v2_3.h"
#include "nbio/nbio_2_3_default.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
index a54052dea8bf..f89e5f40e1a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v4_3.h"
#include "nbio/nbio_4_3_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 34180c6070dd..e911368c1aeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v6_1.h"
#include "nbio/nbio_6_1_default.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index d1032e9992b4..1569a1e934ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v7_0.h"
#include "nbio/nbio_7_0_default.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
index 41421da63a08..2ece3ae75ec1 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v7_11.h"
#include "nbio/nbio_7_11_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index a766e2d90cd0..acc5f363684a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v7_2.h"
#include "nbio/nbio_7_2_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index a26a9be58eac..d5002ff931d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v7_4.h"
#include "amdgpu_ras.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index 3fb6d2aa7e3b..2ee60b8746a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v7_7.h"
#include "nbio/nbio_7_7_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index 8a0a63ac88d2..f23cb79110d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -21,7 +21,6 @@
*
*/
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "nbio_v7_9.h"
#include "amdgpu_ras.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 47db483c3516..50e77d9b30af 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -78,12 +78,12 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode = {
/* Navi1x */
static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -104,10 +104,10 @@ static const struct amdgpu_video_codecs sc_video_codecs_encode = {
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
@@ -115,10 +115,10 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[]
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
@@ -141,23 +141,23 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -454,6 +454,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
@@ -1034,7 +1035,7 @@ static int nv_common_resume(struct amdgpu_ip_block *ip_block)
return nv_common_hw_init(ip_block);
}
-static bool nv_common_is_idle(void *handle)
+static bool nv_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -1077,9 +1078,9 @@ static int nv_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void nv_common_get_clockgating_state(void *handle, u64 *flags)
+static void nv_common_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
*flags = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/nvd.h b/drivers/gpu/drm/amd/amdgpu/nvd.h
index 631dafb92299..56f1bfac0b20 100644
--- a/drivers/gpu/drm/amd/amdgpu/nvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/nvd.h
@@ -64,6 +64,24 @@
#define PACKET3_INDIRECT_BUFFER_CNST_END 0x19
#define PACKET3_ATOMIC_GDS 0x1D
#define PACKET3_ATOMIC_MEM 0x1E
+#define PACKET3_ATOMIC_MEM__ATOMIC(x) ((((unsigned)(x)) & 0x7F) << 0)
+#define PACKET3_ATOMIC_MEM__COMMAND(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_ATOMIC_MEM__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_ATOMIC_MEM__ADDR_LO(x) (((unsigned)(x)))
+#define PACKET3_ATOMIC_MEM__ADDR_HI(x) (((unsigned)(x)))
+#define PACKET3_ATOMIC_MEM__SRC_DATA_LO(x) (((unsigned)(x)))
+#define PACKET3_ATOMIC_MEM__SRC_DATA_HI(x) (((unsigned)(x)))
+#define PACKET3_ATOMIC_MEM__CMP_DATA_LO(x) (((unsigned)(x)))
+#define PACKET3_ATOMIC_MEM__CMP_DATA_HI(x) (((unsigned)(x)))
+#define PACKET3_ATOMIC_MEM__LOOP_INTERVAL(x) ((((unsigned)(x)) & 0x1FFF) << 0)
+#define PACKET3_ATOMIC_MEM__COMMAND__SINGLE_PASS_ATOMIC 0
+#define PACKET3_ATOMIC_MEM__COMMAND__LOOP_UNTIL_COMPARE_SATISFIED 1
+#define PACKET3_ATOMIC_MEM__COMMAND__WAIT_FOR_WRITE_CONFIRMATION 2
+#define PACKET3_ATOMIC_MEM__COMMAND__SEND_AND_CONTINUE 3
+#define PACKET3_ATOMIC_MEM__CACHE_POLICY__LRU 0
+#define PACKET3_ATOMIC_MEM__CACHE_POLICY__STREAM 1
+#define PACKET3_ATOMIC_MEM__CACHE_POLICY__NOA 2
+#define PACKET3_ATOMIC_MEM__CACHE_POLICY__BYPASS 3
#define PACKET3_OCCLUSION_QUERY 0x1F
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
@@ -105,6 +123,38 @@
* 1 - pfp
* 2 - ce
*/
+#define PACKET3_WRITE_DATA__DST_SEL(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_WRITE_DATA__ADDR_INCR(x) ((((unsigned)(x)) & 0x1) << 16)
+#define PACKET3_WRITE_DATA__WR_CONFIRM(x) ((((unsigned)(x)) & 0x1) << 20)
+#define PACKET3_WRITE_DATA__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_WRITE_DATA__DST_MMREG_ADDR(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_WRITE_DATA__DST_GDS_ADDR(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_WRITE_DATA__DST_MEM_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_WRITE_DATA__DST_MEM_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_WRITE_DATA__MODE(x) ((((unsigned)(x)) & 0x1) << 21)
+#define PACKET3_WRITE_DATA__AID_ID(x) ((((unsigned)(x)) & 0x3) << 22)
+#define PACKET3_WRITE_DATA__TEMPORAL(x) ((((unsigned)(x)) & 0x3) << 24)
+#define PACKET3_WRITE_DATA__DST_MMREG_ADDR_LO(x) ((unsigned)(x))
+#define PACKET3_WRITE_DATA__DST_MMREG_ADDR_HI(x) ((((unsigned)(x)) & 0xFF) << 0)
+#define PACKET3_WRITE_DATA__DST_SEL__MEM_MAPPED_REGISTER 0
+#define PACKET3_WRITE_DATA__DST_SEL__TC_L2 2
+#define PACKET3_WRITE_DATA__DST_SEL__GDS 3
+#define PACKET3_WRITE_DATA__DST_SEL__MEMORY 5
+#define PACKET3_WRITE_DATA__DST_SEL__MEMORY_MAPPED_ADC_PERSISTENT_STATE 6
+#define PACKET3_WRITE_DATA__ADDR_INCR__INCREMENT_ADDRESS 0
+#define PACKET3_WRITE_DATA__ADDR_INCR__DO_NOT_INCREMENT_ADDRESS 1
+#define PACKET3_WRITE_DATA__WR_CONFIRM__DO_NOT_WAIT_FOR_WRITE_CONFIRMATION 0
+#define PACKET3_WRITE_DATA__WR_CONFIRM__WAIT_FOR_WRITE_CONFIRMATION 1
+#define PACKET3_WRITE_DATA__MODE__PF_VF_DISABLED 0
+#define PACKET3_WRITE_DATA__MODE__PF_VF_ENABLED 1
+#define PACKET3_WRITE_DATA__TEMPORAL__RT 0
+#define PACKET3_WRITE_DATA__TEMPORAL__NT 1
+#define PACKET3_WRITE_DATA__TEMPORAL__HT 2
+#define PACKET3_WRITE_DATA__TEMPORAL__LU 3
+#define PACKET3_WRITE_DATA__CACHE_POLICY__LRU 0
+#define PACKET3_WRITE_DATA__CACHE_POLICY__STREAM 1
+#define PACKET3_WRITE_DATA__CACHE_POLICY__NOA 2
+#define PACKET3_WRITE_DATA__CACHE_POLICY__BYPASS 3
#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
# define PACKET3_SEM_USE_MAILBOX (0x1 << 16)
@@ -135,6 +185,42 @@
/* 0 - me
* 1 - pfp
*/
+#define PACKET3_WAIT_REG_MEM__FUNCTION(x) ((((unsigned)(x)) & 0x7) << 0)
+#define PACKET3_WAIT_REG_MEM__MEM_SPACE(x) ((((unsigned)(x)) & 0x3) << 4)
+#define PACKET3_WAIT_REG_MEM__OPERATION(x) ((((unsigned)(x)) & 0x3) << 6)
+#define PACKET3_WAIT_REG_MEM__MES_INTR_PIPE(x) ((((unsigned)(x)) & 0x3) << 22)
+#define PACKET3_WAIT_REG_MEM__MES_ACTION(x) ((((unsigned)(x)) & 0x1) << 24)
+#define PACKET3_WAIT_REG_MEM__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_WAIT_REG_MEM__TEMPORAL(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_WAIT_REG_MEM__MEM_POLL_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_WAIT_REG_MEM__REG_POLL_ADDR(x) ((((unsigned)(x)) & 0X3FFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__REG_WRITE_ADDR1(x) ((((unsigned)(x)) & 0X3FFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__MEM_POLL_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_WAIT_REG_MEM__REG_WRITE_ADDR2(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__REFERENCE(x) ((unsigned)(x))
+#define PACKET3_WAIT_REG_MEM__MASK(x) ((unsigned)(x))
+#define PACKET3_WAIT_REG_MEM__POLL_INTERVAL(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__OPTIMIZE_ACE_OFFLOAD_MODE(x) ((((unsigned)(x)) & 0x1) << 31)
+#define PACKET3_WAIT_REG_MEM__FUNCTION__ALWAYS_PASS 0
+#define PACKET3_WAIT_REG_MEM__FUNCTION__LESS_THAN_REF_VALUE 1
+#define PACKET3_WAIT_REG_MEM__FUNCTION__LESS_THAN_EQUAL_TO_THE_REF_VALUE 2
+#define PACKET3_WAIT_REG_MEM__FUNCTION__EQUAL_TO_THE_REFERENCE_VALUE 3
+#define PACKET3_WAIT_REG_MEM__FUNCTION__NOT_EQUAL_REFERENCE_VALUE 4
+#define PACKET3_WAIT_REG_MEM__FUNCTION__GREATER_THAN_OR_EQUAL_REFERENCE_VALUE 5
+#define PACKET3_WAIT_REG_MEM__FUNCTION__GREATER_THAN_REFERENCE_VALUE 6
+#define PACKET3_WAIT_REG_MEM__MEM_SPACE__REGISTER_SPACE 0
+#define PACKET3_WAIT_REG_MEM__MEM_SPACE__MEMORY_SPACE 1
+#define PACKET3_WAIT_REG_MEM__OPERATION__WAIT_REG_MEM 0
+#define PACKET3_WAIT_REG_MEM__OPERATION__WR_WAIT_WR_REG 1
+#define PACKET3_WAIT_REG_MEM__OPERATION__WAIT_MEM_PREEMPTABLE 3
+#define PACKET3_WAIT_REG_MEM__CACHE_POLICY__LRU 0
+#define PACKET3_WAIT_REG_MEM__CACHE_POLICY__STREAM 1
+#define PACKET3_WAIT_REG_MEM__CACHE_POLICY__NOA 2
+#define PACKET3_WAIT_REG_MEM__CACHE_POLICY__BYPASS 3
+#define PACKET3_WAIT_REG_MEM__TEMPORAL__RT 0
+#define PACKET3_WAIT_REG_MEM__TEMPORAL__NT 1
+#define PACKET3_WAIT_REG_MEM__TEMPORAL__HT 2
+#define PACKET3_WAIT_REG_MEM__TEMPORAL__LU 3
#define PACKET3_INDIRECT_BUFFER 0x3F
#define INDIRECT_BUFFER_VALID (1 << 23)
#define INDIRECT_BUFFER_CACHE_POLICY(x) ((x) << 28)
@@ -144,8 +230,94 @@
*/
#define INDIRECT_BUFFER_PRE_ENB(x) ((x) << 21)
#define INDIRECT_BUFFER_PRE_RESUME(x) ((x) << 30)
+#define PACKET3_INDIRECT_BUFFER__IB_BASE_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_INDIRECT_BUFFER__IB_BASE_HI(x) ((unsigned)(x))
+#define PACKET3_INDIRECT_BUFFER__IB_SIZE(x) ((((unsigned)(x)) & 0xFFFFF) << 0)
+#define PACKET3_INDIRECT_BUFFER__CHAIN(x) ((((unsigned)(x)) & 0x1) << 20)
+#define PACKET3_INDIRECT_BUFFER__OFFLOAD_POLLING(x) ((((unsigned)(x)) & 0x1) << 21)
+#define PACKET3_INDIRECT_BUFFER__VALID(x) ((((unsigned)(x)) & 0x1) << 23)
+#define PACKET3_INDIRECT_BUFFER__VMID(x) ((((unsigned)(x)) & 0xF) << 24)
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 28)
+#define PACKET3_INDIRECT_BUFFER__TEMPORAL(x) ((((unsigned)(x)) & 0x3) << 28)
+#define PACKET3_INDIRECT_BUFFER__PRIV(x) ((((unsigned)(x)) & 0x1) << 31)
+#define PACKET3_INDIRECT_BUFFER__TEMPORAL__RT 0
+#define PACKET3_INDIRECT_BUFFER__TEMPORAL__NT 1
+#define PACKET3_INDIRECT_BUFFER__TEMPORAL__HT 2
+#define PACKET3_INDIRECT_BUFFER__TEMPORAL__LU 3
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY__LRU 0
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY__STREAM 1
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY__NOA 2
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY__BYPASS 3
#define PACKET3_COND_INDIRECT_BUFFER 0x3F
#define PACKET3_COPY_DATA 0x40
+#define PACKET3_COPY_DATA__SRC_SEL(x) ((((unsigned)(x)) & 0xF) << 0)
+#define PACKET3_COPY_DATA__DST_SEL(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 13)
+#define PACKET3_COPY_DATA__SRC_TEMPORAL(x) ((((unsigned)(x)) & 0x3) << 13)
+#define PACKET3_COPY_DATA__COUNT_SEL(x) ((((unsigned)(x)) & 0x1) << 16)
+#define PACKET3_COPY_DATA__WR_CONFIRM(x) ((((unsigned)(x)) & 0x1) << 20)
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_COPY_DATA__PQ_EXE_STATUS(x) ((((unsigned)(x)) & 0x1) << 29)
+#define PACKET3_COPY_DATA__SRC_REG_OFFSET(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_COPY_DATA__SRC_32B_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_COPY_DATA__SRC_64B_ADDR_LO(x) ((((unsigned)(x)) & 0x1FFFFFFF) << 3)
+#define PACKET3_COPY_DATA__SRC_GDS_ADDR_LO(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_COPY_DATA__IMM_DATA(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__SRC_MEMTC_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__SRC_IMM_DATA(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__DST_REG_OFFSET(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_COPY_DATA__DST_32B_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_COPY_DATA__DST_64B_ADDR_LO(x) ((((unsigned)(x)) & 0x1FFFFFFF) << 3)
+#define PACKET3_COPY_DATA__DST_GDS_ADDR_LO(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_COPY_DATA__DST_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__MODE(x) ((((unsigned)(x)) & 0x1) << 21)
+#define PACKET3_COPY_DATA__AID_ID(x) ((((unsigned)(x)) & 0x3) << 23)
+#define PACKET3_COPY_DATA__DST_TEMPORAL(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_COPY_DATA__SRC_REG_OFFSET_LO(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__SRC_REG_OFFSET_HI(x) ((((unsigned)(x)) & 0xFF) << 0)
+#define PACKET3_COPY_DATA__DST_REG_OFFSET_LO(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__DST_REG_OFFSET_HI(x) ((((unsigned)(x)) & 0xFF) << 0)
+#define PACKET3_COPY_DATA__SRC_SEL__MEM_MAPPED_REGISTER 0
+#define PACKET3_COPY_DATA__SRC_SEL__TC_L2_OBSOLETE 1
+#define PACKET3_COPY_DATA__SRC_SEL__TC_L2 2
+#define PACKET3_COPY_DATA__SRC_SEL__GDS 3
+#define PACKET3_COPY_DATA__SRC_SEL__PERFCOUNTERS 4
+#define PACKET3_COPY_DATA__SRC_SEL__IMMEDIATE_DATA 5
+#define PACKET3_COPY_DATA__SRC_SEL__ATOMIC_RETURN_DATA 6
+#define PACKET3_COPY_DATA__SRC_SEL__GDS_ATOMIC_RETURN_DATA0 7
+#define PACKET3_COPY_DATA__SRC_SEL__GDS_ATOMIC_RETURN_DATA1 8
+#define PACKET3_COPY_DATA__SRC_SEL__GPU_CLOCK_COUNT 9
+#define PACKET3_COPY_DATA__SRC_SEL__SYSTEM_CLOCK_COUNT 10
+#define PACKET3_COPY_DATA__DST_SEL__MEM_MAPPED_REGISTER 0
+#define PACKET3_COPY_DATA__DST_SEL__TC_L2 2
+#define PACKET3_COPY_DATA__DST_SEL__GDS 3
+#define PACKET3_COPY_DATA__DST_SEL__PERFCOUNTERS 4
+#define PACKET3_COPY_DATA__DST_SEL__TC_L2_OBSOLETE 5
+#define PACKET3_COPY_DATA__DST_SEL__MEM_MAPPED_REG_DC 6
+#define PACKET3_COPY_DATA__SRC_TEMPORAL__RT 0
+#define PACKET3_COPY_DATA__SRC_TEMPORAL__NT 1
+#define PACKET3_COPY_DATA__SRC_TEMPORAL__HT 2
+#define PACKET3_COPY_DATA__SRC_TEMPORAL__LU 3
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY__LRU 0
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY__STREAM 1
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY__NOA 2
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY__BYPASS 3
+#define PACKET3_COPY_DATA__COUNT_SEL__32_BITS_OF_DATA 0
+#define PACKET3_COPY_DATA__COUNT_SEL__64_BITS_OF_DATA 1
+#define PACKET3_COPY_DATA__WR_CONFIRM__DO_NOT_WAIT_FOR_CONFIRMATION 0
+#define PACKET3_COPY_DATA__WR_CONFIRM__WAIT_FOR_CONFIRMATION 1
+#define PACKET3_COPY_DATA__MODE__PF_VF_DISABLED 0
+#define PACKET3_COPY_DATA__MODE__PF_VF_ENABLED 1
+#define PACKET3_COPY_DATA__DST_TEMPORAL__RT 0
+#define PACKET3_COPY_DATA__DST_TEMPORAL__NT 1
+#define PACKET3_COPY_DATA__DST_TEMPORAL__HT 2
+#define PACKET3_COPY_DATA__DST_TEMPORAL__LU 3
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY__LRU 0
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY__STREAM 1
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY__NOA 2
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY__BYPASS 3
+#define PACKET3_COPY_DATA__PQ_EXE_STATUS__DEFAULT 0
+#define PACKET3_COPY_DATA__PQ_EXE_STATUS__PHASE_UPDATE 1
#define PACKET3_CP_DMA 0x41
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
@@ -160,6 +332,23 @@
* 3 - SAMPLE_STREAMOUTSTAT*
* 4 - *S_PARTIAL_FLUSH
*/
+#define PACKET3_EVENT_WRITE__EVENT_TYPE(x) ((((unsigned)(x)) & 0x3F) << 0)
+#define PACKET3_EVENT_WRITE__EVENT_INDEX(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_EVENT_WRITE__SAMP_PLST_CNTR_MODE(x) ((((unsigned)(x)) & 0x3) << 29)
+#define PACKET3_EVENT_WRITE__OFFLOAD_ENABLE(x) ((((unsigned)(x)) & 0x1) << 0)
+#define PACKET3_EVENT_WRITE__ADDRESS_LO(x) ((((unsigned)(x)) & 0x1FFFFFFF) << 3)
+#define PACKET3_EVENT_WRITE__ADDRESS_HI(x) ((unsigned)(x))
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__OTHER 0
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__SAMPLE_PIPELINESTAT 2
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__CS_PARTIAL_FLUSH 4
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__SAMPLE_STREAMOUTSTATS 8
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__SAMPLE_STREAMOUTSTATS1 9
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__SAMPLE_STREAMOUTSTATS2 10
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__SAMPLE_STREAMOUTSTATS3 11
+#define PACKET3_EVENT_WRITE__SAMP_PLST_CNTR_MODE__LEGACY_MODE 0
+#define PACKET3_EVENT_WRITE__SAMP_PLST_CNTR_MODE__MIXED_MODE1 1
+#define PACKET3_EVENT_WRITE__SAMP_PLST_CNTR_MODE__NEW_MODE 2
+#define PACKET3_EVENT_WRITE__SAMP_PLST_CNTR_MODE__MIXED_MODE3 3
#define PACKET3_EVENT_WRITE_EOP 0x47
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_RELEASE_MEM 0x49
@@ -304,6 +493,12 @@
* 2: REVERSE
*/
#define PACKET3_ACQUIRE_MEM_GCR_RANGE_IS_PA (1 << 18)
+#define PACKET3_ACQUIRE_MEM__COHER_SIZE(x) ((unsigned)(x))
+#define PACKET3_ACQUIRE_MEM__COHER_SIZE_HI(x) ((((unsigned)(x)) & 0xFF) << 0)
+#define PACKET3_ACQUIRE_MEM__COHER_BASE_LO(x) ((unsigned)(x))
+#define PACKET3_ACQUIRE_MEM__COHER_BASE_HI(x) ((((unsigned)(x)) & 0xFFFFFF) << 0)
+#define PACKET3_ACQUIRE_MEM__POLL_INTERVAL(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_ACQUIRE_MEM__GCR_CNTL(x) ((((unsigned)(x)) & 0x7FFFF) << 0)
#define PACKET3_REWIND 0x59
#define PACKET3_INTERRUPT 0x5A
#define PACKET3_GEN_PDEPTE 0x5B
@@ -330,11 +525,17 @@
#define PACKET3_SET_SH_REG 0x76
#define PACKET3_SET_SH_REG_START 0x00002c00
#define PACKET3_SET_SH_REG_END 0x00003000
+#define PACKET3_SET_SH_REG__REG_OFFSET(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_SET_SH_REG__VMID_SHIFT(x) ((((unsigned)(x)) & 0x1F) << 23)
+#define PACKET3_SET_SH_REG__INDEX(x) ((((unsigned)(x)) & 0xF) << 28)
+#define PACKET3_SET_SH_REG__INDEX__DEFAULT 0
+#define PACKET3_SET_SH_REG__INDEX__INSERT_VMID 1
#define PACKET3_SET_SH_REG_OFFSET 0x77
#define PACKET3_SET_QUEUE_REG 0x78
#define PACKET3_SET_UCONFIG_REG 0x79
#define PACKET3_SET_UCONFIG_REG_START 0x0000c000
#define PACKET3_SET_UCONFIG_REG_END 0x0000c400
+#define PACKET3_SET_UCONFIG_REG__REG_OFFSET(x) ((((unsigned)(x)) & 0xFFFF) << 0)
#define PACKET3_SET_UCONFIG_REG_INDEX 0x7A
#define PACKET3_FORWARD_HEADER 0x7C
#define PACKET3_SCRATCH_RAM_WRITE 0x7D
@@ -369,6 +570,7 @@
# define PACKET3_INVALIDATE_TLBS_DST_SEL(x) ((x) << 0)
# define PACKET3_INVALIDATE_TLBS_ALL_HUB(x) ((x) << 4)
# define PACKET3_INVALIDATE_TLBS_PASID(x) ((x) << 5)
+# define PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(x) ((x) << 29)
#define PACKET3_AQL_PACKET 0x99
#define PACKET3_DMA_DATA_FILL_MULTI 0x9A
#define PACKET3_SET_SH_REG_INDEX 0x9B
@@ -462,6 +664,12 @@
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
#define PACKET3_RUN_LIST 0xA5
#define PACKET3_MAP_PROCESS_VM 0xA6
+
+#define PACKET3_RUN_CLEANER_SHADER 0xD2
+/* 1. header
+ * 2. RESERVED [31:0]
+ */
+
/* GFX11 */
#define PACKET3_SET_Q_PREEMPTION_MODE 0xF0
# define PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(x) ((x) << 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 2395f1856962..bb5dfc410a66 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -129,6 +129,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
err = psp_init_ta_microcode(psp, ucode_prefix);
break;
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index 4d33c95a5116..7c49c3f3c388 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -35,6 +35,8 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_5_toc.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_5_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -72,6 +74,14 @@ static int psp_v14_0_init_microcode(struct psp_context *psp)
if (err)
return err;
break;
+ case IP_VERSION(14, 0, 5):
+ err = psp_init_toc_microcode(psp, ucode_prefix);
+ if (err)
+ return err;
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ if (err)
+ return err;
+ break;
default:
BUG();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 135c5099bfb8..92ce580647cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -911,9 +911,9 @@ static int sdma_v2_4_resume(struct amdgpu_ip_block *ip_block)
return sdma_v2_4_hw_init(ip_block);
}
-static bool sdma_v2_4_is_idle(void *handle)
+static bool sdma_v2_4_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index c611328671ed..1c076bd1cf73 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1200,9 +1200,9 @@ static int sdma_v3_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v3_0_hw_init(ip_block);
}
-static bool sdma_v3_0_is_idle(void *handle)
+static bool sdma_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
@@ -1514,9 +1514,9 @@ static int sdma_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v3_0_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v3_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index b48d9c0b2e1c..33ed2b158fcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2015,9 +2015,9 @@ static int sdma_v4_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v4_0_hw_init(ip_block);
}
-static bool sdma_v4_0_is_idle(void *handle)
+static bool sdma_v4_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -2331,9 +2331,9 @@ static int sdma_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v4_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
@@ -2381,7 +2381,6 @@ static void sdma_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
if (!adev->sdma.ip_dump)
return;
- amdgpu_gfx_off_ctrl(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
instance_offset = i * reg_count;
for (j = 0; j < reg_count; j++)
@@ -2389,7 +2388,6 @@ static void sdma_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
RREG32(sdma_v4_0_get_reg_offset(adev, i,
sdma_reg_list_4_0[j].reg_offset));
}
- amdgpu_gfx_off_ctrl(adev, true);
}
const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 5e0066cd6c51..dc94d58d33a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -30,6 +30,8 @@
#include "amdgpu_xcp.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
+#include "amdgpu_reset.h"
+#include "gc/gc_9_0_sh_mask.h"
#include "sdma/sdma_4_4_2_offset.h"
#include "sdma/sdma_4_4_2_sh_mask.h"
@@ -105,6 +107,8 @@ static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
u32 instance, u32 offset)
@@ -681,6 +685,7 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, b
u32 doorbell;
u32 doorbell_offset;
u64 wptr_gpu_addr;
+ u64 rwptr;
wb_offset = (ring->rptr_offs * 4);
@@ -706,12 +711,20 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, b
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
+ /* For the guilty queue, set RPTR to the current wptr to skip bad commands,
+ * It is not a guilty queue, restore cache_rptr and continue execution.
+ */
+ if (adev->sdma.instance[i].gfx_guilty)
+ rwptr = ring->wptr;
+ else
+ rwptr = ring->cached_rptr;
+
/* Initialize the ring buffer's read and write pointers */
if (restore) {
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(rwptr << 2));
} else {
WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
@@ -778,6 +791,7 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i,
u32 doorbell;
u32 doorbell_offset;
u64 wptr_gpu_addr;
+ u64 rwptr;
wb_offset = (ring->rptr_offs * 4);
@@ -785,12 +799,20 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i,
rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
+ /* For the guilty queue, set RPTR to the current wptr to skip bad commands,
+ * It is not a guilty queue, restore cache_rptr and continue execution.
+ */
+ if (adev->sdma.instance[i].page_guilty)
+ rwptr = ring->wptr;
+ else
+ rwptr = ring->cached_rptr;
+
/* Initialize the ring buffer's read and write pointers */
if (restore) {
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, lower_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, upper_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, lower_32_bits(rwptr << 2));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, upper_32_bits(rwptr << 2));
} else {
WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
@@ -1269,21 +1291,71 @@ static void sdma_v4_4_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
seq, 0xffffffff, 4);
}
-
-/**
- * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA
+/*
+ * sdma_v4_4_2_get_invalidate_req - Construct the VM_INVALIDATE_ENG0_REQ register value
+ * @vmid: The VMID to invalidate
+ * @flush_type: The type of flush (0 = legacy, 1 = lightweight, 2 = heavyweight)
*
- * @ring: amdgpu_ring pointer
- * @vmid: vmid number to use
- * @pd_addr: address
+ * This function constructs the VM_INVALIDATE_ENG0_REQ register value for the specified VMID
+ * and flush type. It ensures that all relevant page table cache levels (L1 PTEs, L2 PTEs, and
+ * L2 PDEs) are invalidated.
+ */
+static uint32_t sdma_v4_4_2_get_invalidate_req(unsigned int vmid,
+ uint32_t flush_type)
+{
+ u32 req = 0;
+
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
+ PER_VMID_INVALIDATE_REQ, 1 << vmid);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
+ CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
+
+ return req;
+}
+
+/*
+ * sdma_v4_4_2_ring_emit_vm_flush - Emit VM flush commands for SDMA
+ * @ring: The SDMA ring
+ * @vmid: The VMID to flush
+ * @pd_addr: The page directory address
*
- * Update the page table base and flush the VM TLB
- * using sDMA.
+ * This function emits the necessary register writes and waits to perform a VM flush for the
+ * specified VMID. It updates the PTB address registers and issues a VM invalidation request
+ * using the specified VM invalidation engine.
*/
static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t req = sdma_v4_4_2_get_invalidate_req(vmid, 0);
+ unsigned int eng = ring->vm_inv_eng;
+ struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
+
+ amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
+ (hub->ctx_addr_distance * vmid),
+ lower_32_bits(pd_addr));
+
+ amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
+ (hub->ctx_addr_distance * vmid),
+ upper_32_bits(pd_addr));
+ /*
+ * Construct and emit the VM invalidation packet
+ */
+ amdgpu_ring_write(ring,
+ SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_VM_INVALIDATE) |
+ SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATE) |
+ SDMA_PKT_VM_INVALIDATION_HEADER_XCC0_ENG_ID(0x1f) |
+ SDMA_PKT_VM_INVALIDATION_HEADER_XCC1_ENG_ID(0x1f) |
+ SDMA_PKT_VM_INVALIDATION_HEADER_MMHUB_ENG_ID(eng));
+ amdgpu_ring_write(ring, SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(req));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(BIT(vmid)));
}
static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring,
@@ -1330,6 +1402,7 @@ static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
sdma_v4_4_2_set_vm_pte_funcs(adev);
sdma_v4_4_2_set_irq_funcs(adev);
sdma_v4_4_2_set_ras_funcs(adev);
+ sdma_v4_4_2_set_engine_reset_funcs(adev);
return 0;
}
@@ -1351,6 +1424,12 @@ static int sdma_v4_4_2_late_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_persistent_edc_harvesting_supported(adev))
amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA);
+ /* The initialization is done in the late_init stage to ensure that the SMU
+ * initialization and capability setup are completed before we check the SDMA
+ * reset capability
+ */
+ sdma_v4_4_2_update_reset_mask(adev);
+
return 0;
}
@@ -1415,6 +1494,11 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
+ mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
+ /* Initialize guilty flags for GFX and PAGE queues */
+ adev->sdma.instance[i].gfx_guilty = false;
+ adev->sdma.instance[i].page_guilty = false;
+
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -1458,7 +1542,6 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->sdma.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
@@ -1561,9 +1644,9 @@ static int sdma_v4_4_2_resume(struct amdgpu_ip_block *ip_block)
return sdma_v4_4_2_hw_init(ip_block);
}
-static bool sdma_v4_4_2_is_idle(void *handle)
+static bool sdma_v4_4_2_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1602,25 +1685,94 @@ static int sdma_v4_4_2_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
+static bool sdma_v4_4_2_is_queue_selected(struct amdgpu_device *adev, uint32_t instance_id, bool is_page_queue)
+{
+ uint32_t reg_offset = is_page_queue ? regSDMA_PAGE_CONTEXT_STATUS : regSDMA_GFX_CONTEXT_STATUS;
+ uint32_t context_status = RREG32(sdma_v4_4_2_get_reg_offset(adev, instance_id, reg_offset));
+
+ /* Check if the SELECTED bit is set */
+ return (context_status & SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK) != 0;
+}
+
+static bool sdma_v4_4_2_ring_is_guilty(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t instance_id = ring->me;
+
+ return sdma_v4_4_2_is_queue_selected(adev, instance_id, false);
+}
+
+static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t instance_id = ring->me;
+
+ if (!adev->sdma.has_page_queue)
+ return false;
+
+ return sdma_v4_4_2_is_queue_selected(adev, instance_id, true);
+}
+
static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int i, r;
+ u32 id = GET_INST(SDMA0, ring->me);
+ int r;
+
+ if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ return -EOPNOTSUPP;
+
+ amdgpu_amdkfd_suspend(adev, false);
+ r = amdgpu_sdma_reset_engine(adev, id);
+ amdgpu_amdkfd_resume(adev, false);
+
+ return r;
+}
+
+static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_id)
+{
u32 inst_mask;
+ uint64_t rptr;
+ struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
+ /* Check if this queue is the guilty one */
+ adev->sdma.instance[instance_id].gfx_guilty =
+ sdma_v4_4_2_is_queue_selected(adev, instance_id, false);
+ if (adev->sdma.has_page_queue)
+ adev->sdma.instance[instance_id].page_guilty =
+ sdma_v4_4_2_is_queue_selected(adev, instance_id, true);
+
+ /* Cache the rptr before reset, after the reset,
+ * all of the registers will be reset to 0
+ */
+ rptr = amdgpu_ring_get_rptr(ring);
+ ring->cached_rptr = rptr;
+ /* Cache the rptr for the page queue if it exists */
+ if (adev->sdma.has_page_queue) {
+ struct amdgpu_ring *page_ring = &adev->sdma.instance[instance_id].page;
+ rptr = amdgpu_ring_get_rptr(page_ring);
+ page_ring->cached_rptr = rptr;
+ }
+
/* stop queue */
inst_mask = 1 << ring->me;
sdma_v4_4_2_inst_gfx_stop(adev, inst_mask);
if (adev->sdma.has_page_queue)
sdma_v4_4_2_inst_page_stop(adev, inst_mask);
- r = amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, ring->me));
- if (r)
- return r;
+ return 0;
+}
+
+static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instance_id)
+{
+ int i;
+ u32 inst_mask;
+ struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
+ inst_mask = 1 << ring->me;
udelay(50);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -1638,6 +1790,16 @@ static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
return sdma_v4_4_2_inst_start(adev, inst_mask, true);
}
+static struct sdma_on_reset_funcs sdma_v4_4_2_engine_reset_funcs = {
+ .pre_reset = sdma_v4_4_2_stop_queue,
+ .post_reset = sdma_v4_4_2_restore_queue,
+};
+
+static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev)
+{
+ amdgpu_sdma_register_on_reset_callbacks(adev, &sdma_v4_4_2_engine_reset_funcs);
+}
+
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -1683,6 +1845,9 @@ static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev,
case 0:
amdgpu_fence_process(&adev->sdma.instance[i].ring);
break;
+ case 1:
+ amdgpu_fence_process(&adev->sdma.instance[i].page);
+ break;
default:
break;
}
@@ -1920,9 +2085,9 @@ static int sdma_v4_4_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v4_4_2_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
@@ -2012,8 +2177,7 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
3 + /* hdp invalidate */
6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
/* sdma_v4_4_2_ring_emit_vm_flush */
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ 4 + 2 * 3 +
10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
.emit_ib = sdma_v4_4_2_ring_emit_ib,
@@ -2029,6 +2193,7 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
.reset = sdma_v4_4_2_reset_queue,
+ .is_guilty = sdma_v4_4_2_ring_is_guilty,
};
static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
@@ -2044,8 +2209,7 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
3 + /* hdp invalidate */
6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
/* sdma_v4_4_2_ring_emit_vm_flush */
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ 4 + 2 * 3 +
10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
.emit_ib = sdma_v4_4_2_ring_emit_ib,
@@ -2060,6 +2224,8 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
.emit_wreg = sdma_v4_4_2_ring_emit_wreg,
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = sdma_v4_4_2_reset_queue,
+ .is_guilty = sdma_v4_4_2_page_ring_is_guilty,
};
static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
@@ -2231,6 +2397,38 @@ static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
+/**
+ * sdma_v4_4_2_update_reset_mask - update reset mask for SDMA
+ * @adev: Pointer to the AMDGPU device structure
+ *
+ * This function update reset mask for SDMA and sets the supported
+ * reset types based on the IP version and firmware versions.
+ *
+ */
+static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev)
+{
+ /* per queue reset not supported for SRIOV */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /*
+ * the user queue relies on MEC fw and pmfw when the sdma queue do reset.
+ * it needs to check both of them at here to skip old mec and pmfw.
+ */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ if ((adev->gfx.mec_fw_version >= 0xb0) && amdgpu_dpm_reset_sdma_is_supported(adev))
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
+ case IP_VERSION(9, 5, 0):
+ /*TODO: enable the queue reset flag until fw supported */
+ default:
+ break;
+ }
+
+}
+
const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 4,
@@ -2392,11 +2590,13 @@ static int sdma_v4_4_2_aca_bank_parser(struct aca_handle *handle, struct aca_ban
misc0 = bank->regs[ACA_REG_IDX_MISC0];
switch (type) {
case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1ULL);
break;
case ACA_SMU_TYPE_CE:
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index b764550834a0..0dce59f4f6e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1530,9 +1530,9 @@ static int sdma_v5_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v5_0_hw_init(ip_block);
}
-static bool sdma_v5_0_is_idle(void *handle)
+static bool sdma_v5_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1883,9 +1883,9 @@ static int sdma_v5_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v5_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index b1818e87889a..2b39a03ff0c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1435,9 +1435,9 @@ static int sdma_v5_2_resume(struct amdgpu_ip_block *ip_block)
return sdma_v5_2_hw_init(ip_block);
}
-static bool sdma_v5_2_is_idle(void *handle)
+static bool sdma_v5_2_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1847,9 +1847,9 @@ static int sdma_v5_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v5_2_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 1a023b45f0be..c214c3d2149b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -51,6 +51,7 @@ MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_1_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_1_1.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_1_2.bin");
+MODULE_FIRMWARE("amdgpu/sdma_6_1_3.bin");
#define SDMA1_REG_OFFSET 0x600
#define SDMA0_HYP_DEC_REG_START 0x5880
@@ -1428,9 +1429,9 @@ static int sdma_v6_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v6_0_hw_init(ip_block);
}
-static bool sdma_v6_0_is_idle(void *handle)
+static bool sdma_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1613,7 +1614,7 @@ static int sdma_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v6_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index 7e10e94624e3..b2706221df99 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -1430,9 +1430,9 @@ static int sdma_v7_0_resume(struct amdgpu_ip_block *ip_block)
return sdma_v7_0_hw_init(ip_block);
}
-static bool sdma_v7_0_is_idle(void *handle)
+static bool sdma_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1592,7 +1592,7 @@ static int sdma_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void sdma_v7_0_get_clockgating_state(void *handle, u64 *flags)
+static void sdma_v7_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 77ef7da2e4fe..2247f6a94858 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -909,7 +909,7 @@ static const u32 hainan_mgcg_cgcg_init[] =
/* XXX: update when we support VCE */
#if 0
-/* tahiti, pitcarin, verde */
+/* tahiti, pitcairn, verde */
static const struct amdgpu_video_codec_info tahiti_video_codecs_encode_array[] =
{
{
@@ -940,7 +940,7 @@ static const struct amdgpu_video_codecs hainan_video_codecs_encode =
.codec_array = NULL,
};
-/* tahiti, pitcarin, verde, oland */
+/* tahiti, pitcairn, verde, oland */
static const struct amdgpu_video_codec_info tahiti_video_codecs_decode_array[] =
{
{
@@ -1124,41 +1124,41 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{mmCP_STALLED_STAT3},
{GB_ADDR_CONFIG},
{MC_ARB_RAMCFG},
- {GB_TILE_MODE0},
- {GB_TILE_MODE1},
- {GB_TILE_MODE2},
- {GB_TILE_MODE3},
- {GB_TILE_MODE4},
- {GB_TILE_MODE5},
- {GB_TILE_MODE6},
- {GB_TILE_MODE7},
- {GB_TILE_MODE8},
- {GB_TILE_MODE9},
- {GB_TILE_MODE10},
- {GB_TILE_MODE11},
- {GB_TILE_MODE12},
- {GB_TILE_MODE13},
- {GB_TILE_MODE14},
- {GB_TILE_MODE15},
- {GB_TILE_MODE16},
- {GB_TILE_MODE17},
- {GB_TILE_MODE18},
- {GB_TILE_MODE19},
- {GB_TILE_MODE20},
- {GB_TILE_MODE21},
- {GB_TILE_MODE22},
- {GB_TILE_MODE23},
- {GB_TILE_MODE24},
- {GB_TILE_MODE25},
- {GB_TILE_MODE26},
- {GB_TILE_MODE27},
- {GB_TILE_MODE28},
- {GB_TILE_MODE29},
- {GB_TILE_MODE30},
- {GB_TILE_MODE31},
+ {mmGB_TILE_MODE0},
+ {mmGB_TILE_MODE1},
+ {mmGB_TILE_MODE2},
+ {mmGB_TILE_MODE3},
+ {mmGB_TILE_MODE4},
+ {mmGB_TILE_MODE5},
+ {mmGB_TILE_MODE6},
+ {mmGB_TILE_MODE7},
+ {mmGB_TILE_MODE8},
+ {mmGB_TILE_MODE9},
+ {mmGB_TILE_MODE10},
+ {mmGB_TILE_MODE11},
+ {mmGB_TILE_MODE12},
+ {mmGB_TILE_MODE13},
+ {mmGB_TILE_MODE14},
+ {mmGB_TILE_MODE15},
+ {mmGB_TILE_MODE16},
+ {mmGB_TILE_MODE17},
+ {mmGB_TILE_MODE18},
+ {mmGB_TILE_MODE19},
+ {mmGB_TILE_MODE20},
+ {mmGB_TILE_MODE21},
+ {mmGB_TILE_MODE22},
+ {mmGB_TILE_MODE23},
+ {mmGB_TILE_MODE24},
+ {mmGB_TILE_MODE25},
+ {mmGB_TILE_MODE26},
+ {mmGB_TILE_MODE27},
+ {mmGB_TILE_MODE28},
+ {mmGB_TILE_MODE29},
+ {mmGB_TILE_MODE30},
+ {mmGB_TILE_MODE31},
{CC_RB_BACKEND_DISABLE, true},
- {GC_USER_RB_BACKEND_DISABLE, true},
- {PA_SC_RASTER_CONFIG, true},
+ {mmGC_USER_RB_BACKEND_DISABLE, true},
+ {mmPA_SC_RASTER_CONFIG, true},
};
static uint32_t si_get_register_value(struct amdgpu_device *adev,
@@ -1888,7 +1888,7 @@ static int si_vce_send_vcepll_ctlreq(struct amdgpu_device *adev)
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
if (i == SI_MAX_CTLACKS_ASSERTION_WAIT) {
- DRM_ERROR("Timeout setting UVD clocks!\n");
+ DRM_ERROR("Timeout setting VCE clocks!\n");
return -ETIMEDOUT;
}
@@ -2644,7 +2644,7 @@ static int si_common_resume(struct amdgpu_ip_block *ip_block)
return si_common_hw_init(ip_block);
}
-static bool si_common_is_idle(void *handle)
+static bool si_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index dbd78d5345a4..e2089c8da71b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -541,9 +541,9 @@ static int si_dma_resume(struct amdgpu_ip_block *ip_block)
return si_dma_hw_init(ip_block);
}
-static bool si_dma_is_idle(void *handle)
+static bool si_dma_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(SRBM_STATUS2);
@@ -559,7 +559,7 @@ static int si_dma_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (si_dma_is_idle(adev))
+ if (si_dma_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
index 4e935baa7b91..d656ef1fa6e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_enums.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
@@ -121,15 +121,7 @@
#define CURSOR_UPDATE_LOCK (1 << 16)
#define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
-#define SI_CRTC0_REGISTER_OFFSET 0
-#define SI_CRTC1_REGISTER_OFFSET 0x300
-#define SI_CRTC2_REGISTER_OFFSET 0x2600
-#define SI_CRTC3_REGISTER_OFFSET 0x2900
-#define SI_CRTC4_REGISTER_OFFSET 0x2c00
-#define SI_CRTC5_REGISTER_OFFSET 0x2f00
-#define DMA0_REGISTER_OFFSET 0x000
-#define DMA1_REGISTER_OFFSET 0x200
#define ES_AND_GS_AUTO 3
#define RADEON_PACKET_TYPE3 3
#define CE_PARTITION_BASE 3
@@ -161,10 +153,6 @@
#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
-#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
-#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x02010002
-#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
-
#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index a32b6243c1f8..5c38e1fb1dca 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -210,9 +210,9 @@ static int si_ih_resume(struct amdgpu_ip_block *ip_block)
return si_ih_hw_init(ip_block);
}
-static bool si_ih_is_idle(void *handle)
+static bool si_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(SRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -227,7 +227,7 @@ static int si_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (si_ih_is_idle(adev))
+ if (si_ih_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h
index 9a39cbfe6db9..cbf232f5235b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sid.h
+++ b/drivers/gpu/drm/amd/amdgpu/sid.h
@@ -26,10 +26,6 @@
#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
-#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
-#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
-#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
-
#define SI_MAX_SH_GPRS 256
#define SI_MAX_TEMP_GPRS 16
#define SI_MAX_SH_THREADS 256
@@ -696,18 +692,6 @@
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x1528
/* DCE6 ELD audio interface */
-#define AZ_F0_CODEC_ENDPOINT_INDEX 0x1780
-# define AZ_ENDPOINT_REG_INDEX(x) (((x) & 0xff) << 0)
-# define AZ_ENDPOINT_REG_WRITE_EN (1 << 8)
-#define AZ_F0_CODEC_ENDPOINT_DATA 0x1781
-
-#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x25
-#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
-#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
-#define SPEAKER_ALLOCATION_SHIFT 0
-#define HDMI_CONNECTION (1 << 16)
-#define DP_CONNECTION (1 << 17)
-
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
@@ -909,26 +893,11 @@
#define CRTC_STATUS_FRAME_COUNT 0x1BA6
/* Audio clocks */
-#define DCCG_AUDIO_DTO_SOURCE 0x05ac
-# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
-# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */
-
#define DCCG_AUDIO_DTO0_PHASE 0x05b0
#define DCCG_AUDIO_DTO0_MODULE 0x05b4
#define DCCG_AUDIO_DTO1_PHASE 0x05c0
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
-#define AFMT_AUDIO_SRC_CONTROL 0x1c4f
-#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
-/* AFMT_AUDIO_SRC_SELECT
- * 0 = stream0
- * 1 = stream1
- * 2 = stream2
- * 3 = stream3
- * 4 = stream4
- * 5 = stream5
- */
-
#define GRBM_CNTL 0x2000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
@@ -977,30 +946,6 @@
#define SE_DB_BUSY (1 << 30)
#define SE_CB_BUSY (1 << 31)
-#define GRBM_SOFT_RESET 0x2008
-#define SOFT_RESET_CP (1 << 0)
-#define SOFT_RESET_CB (1 << 1)
-#define SOFT_RESET_RLC (1 << 2)
-#define SOFT_RESET_DB (1 << 3)
-#define SOFT_RESET_GDS (1 << 4)
-#define SOFT_RESET_PA (1 << 5)
-#define SOFT_RESET_SC (1 << 6)
-#define SOFT_RESET_BCI (1 << 7)
-#define SOFT_RESET_SPI (1 << 8)
-#define SOFT_RESET_SX (1 << 10)
-#define SOFT_RESET_TC (1 << 11)
-#define SOFT_RESET_TA (1 << 12)
-#define SOFT_RESET_VGT (1 << 14)
-#define SOFT_RESET_IA (1 << 15)
-
-#define GRBM_GFX_INDEX 0x200B
-#define INSTANCE_INDEX(x) ((x) << 0)
-#define SH_INDEX(x) ((x) << 8)
-#define SE_INDEX(x) ((x) << 16)
-#define SH_BROADCAST_WRITES (1 << 29)
-#define INSTANCE_BROADCAST_WRITES (1 << 30)
-#define SE_BROADCAST_WRITES (1 << 31)
-
#define GRBM_INT_CNTL 0x2018
# define RDERR_INT_ENABLE (1 << 0)
# define GUI_IDLE_INT_ENABLE (1 << 19)
@@ -1045,16 +990,6 @@
#define VGT_VTX_VECT_EJECT_REG 0x222C
-#define VGT_CACHE_INVALIDATION 0x2231
-#define CACHE_INVALIDATION(x) ((x) << 0)
-#define VC_ONLY 0
-#define TC_ONLY 1
-#define VC_AND_TC 2
-#define AUTO_INVLD_EN(x) ((x) << 6)
-#define NO_AUTO 0
-#define ES_AUTO 1
-#define GS_AUTO 2
-#define ES_AND_GS_AUTO 3
#define VGT_ESGS_RING_SIZE 0x2232
#define VGT_GSVS_RING_SIZE 0x2233
@@ -1072,11 +1007,6 @@
#define VGT_TF_MEMORY_BASE 0x226E
-#define CC_GC_SHADER_ARRAY_CONFIG 0x226F
-#define INACTIVE_CUS_MASK 0xFFFF0000
-#define INACTIVE_CUS_SHIFT 16
-#define GC_USER_SHADER_ARRAY_CONFIG 0x2270
-
#define PA_CL_ENHANCE 0x2285
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
@@ -1169,89 +1099,6 @@
#define ROW_SIZE_MASK 0x30000000
#define ROW_SIZE_SHIFT 28
-#define GB_TILE_MODE0 0x2644
-# define MICRO_TILE_MODE(x) ((x) << 0)
-# define ADDR_SURF_DISPLAY_MICRO_TILING 0
-# define ADDR_SURF_THIN_MICRO_TILING 1
-# define ADDR_SURF_DEPTH_MICRO_TILING 2
-# define ARRAY_MODE(x) ((x) << 2)
-# define ARRAY_LINEAR_GENERAL 0
-# define ARRAY_LINEAR_ALIGNED 1
-# define ARRAY_1D_TILED_THIN1 2
-# define ARRAY_2D_TILED_THIN1 4
-# define PIPE_CONFIG(x) ((x) << 6)
-# define ADDR_SURF_P2 0
-# define ADDR_SURF_P4_8x16 4
-# define ADDR_SURF_P4_16x16 5
-# define ADDR_SURF_P4_16x32 6
-# define ADDR_SURF_P4_32x32 7
-# define ADDR_SURF_P8_16x16_8x16 8
-# define ADDR_SURF_P8_16x32_8x16 9
-# define ADDR_SURF_P8_32x32_8x16 10
-# define ADDR_SURF_P8_16x32_16x16 11
-# define ADDR_SURF_P8_32x32_16x16 12
-# define ADDR_SURF_P8_32x32_16x32 13
-# define ADDR_SURF_P8_32x64_32x32 14
-# define TILE_SPLIT(x) ((x) << 11)
-# define ADDR_SURF_TILE_SPLIT_64B 0
-# define ADDR_SURF_TILE_SPLIT_128B 1
-# define ADDR_SURF_TILE_SPLIT_256B 2
-# define ADDR_SURF_TILE_SPLIT_512B 3
-# define ADDR_SURF_TILE_SPLIT_1KB 4
-# define ADDR_SURF_TILE_SPLIT_2KB 5
-# define ADDR_SURF_TILE_SPLIT_4KB 6
-# define BANK_WIDTH(x) ((x) << 14)
-# define ADDR_SURF_BANK_WIDTH_1 0
-# define ADDR_SURF_BANK_WIDTH_2 1
-# define ADDR_SURF_BANK_WIDTH_4 2
-# define ADDR_SURF_BANK_WIDTH_8 3
-# define BANK_HEIGHT(x) ((x) << 16)
-# define ADDR_SURF_BANK_HEIGHT_1 0
-# define ADDR_SURF_BANK_HEIGHT_2 1
-# define ADDR_SURF_BANK_HEIGHT_4 2
-# define ADDR_SURF_BANK_HEIGHT_8 3
-# define MACRO_TILE_ASPECT(x) ((x) << 18)
-# define ADDR_SURF_MACRO_ASPECT_1 0
-# define ADDR_SURF_MACRO_ASPECT_2 1
-# define ADDR_SURF_MACRO_ASPECT_4 2
-# define ADDR_SURF_MACRO_ASPECT_8 3
-# define NUM_BANKS(x) ((x) << 20)
-# define ADDR_SURF_2_BANK 0
-# define ADDR_SURF_4_BANK 1
-# define ADDR_SURF_8_BANK 2
-# define ADDR_SURF_16_BANK 3
-#define GB_TILE_MODE1 0x2645
-#define GB_TILE_MODE2 0x2646
-#define GB_TILE_MODE3 0x2647
-#define GB_TILE_MODE4 0x2648
-#define GB_TILE_MODE5 0x2649
-#define GB_TILE_MODE6 0x264a
-#define GB_TILE_MODE7 0x264b
-#define GB_TILE_MODE8 0x264c
-#define GB_TILE_MODE9 0x264d
-#define GB_TILE_MODE10 0x264e
-#define GB_TILE_MODE11 0x264f
-#define GB_TILE_MODE12 0x2650
-#define GB_TILE_MODE13 0x2651
-#define GB_TILE_MODE14 0x2652
-#define GB_TILE_MODE15 0x2653
-#define GB_TILE_MODE16 0x2654
-#define GB_TILE_MODE17 0x2655
-#define GB_TILE_MODE18 0x2656
-#define GB_TILE_MODE19 0x2657
-#define GB_TILE_MODE20 0x2658
-#define GB_TILE_MODE21 0x2659
-#define GB_TILE_MODE22 0x265a
-#define GB_TILE_MODE23 0x265b
-#define GB_TILE_MODE24 0x265c
-#define GB_TILE_MODE25 0x265d
-#define GB_TILE_MODE26 0x265e
-#define GB_TILE_MODE27 0x265f
-#define GB_TILE_MODE28 0x2660
-#define GB_TILE_MODE29 0x2661
-#define GB_TILE_MODE30 0x2662
-#define GB_TILE_MODE31 0x2663
-
#define CB_PERFCOUNTER0_SELECT0 0x2688
#define CB_PERFCOUNTER0_SELECT1 0x2689
#define CB_PERFCOUNTER1_SELECT0 0x268A
@@ -1263,10 +1110,6 @@
#define CB_CGTT_SCLK_CTRL 0x2698
-#define GC_USER_RB_BACKEND_DISABLE 0x26DF
-#define BACKEND_DISABLE_MASK 0x00FF0000
-#define BACKEND_DISABLE_SHIFT 16
-
#define TCP_CHAN_STEER_LO 0x2B03
#define TCP_CHAN_STEER_HI 0x2B94
@@ -1320,101 +1163,12 @@
# define CP_RINGID1_INT_STAT (1 << 30)
# define CP_RINGID0_INT_STAT (1 << 31)
-#define CP_MEM_SLP_CNTL 0x3079
-# define CP_MEM_LS_EN (1 << 0)
-
-#define CP_DEBUG 0x307F
-
-#define RLC_CNTL 0x30C0
-# define RLC_ENABLE (1 << 0)
-#define RLC_RL_BASE 0x30C1
-#define RLC_RL_SIZE 0x30C2
-#define RLC_LB_CNTL 0x30C3
-# define LOAD_BALANCE_ENABLE (1 << 0)
-#define RLC_SAVE_AND_RESTORE_BASE 0x30C4
-#define RLC_LB_CNTR_MAX 0x30C5
-#define RLC_LB_CNTR_INIT 0x30C6
-
-#define RLC_CLEAR_STATE_RESTORE_BASE 0x30C8
-
-#define RLC_UCODE_ADDR 0x30CB
-#define RLC_UCODE_DATA 0x30CC
-
-#define RLC_GPU_CLOCK_COUNT_LSB 0x30CE
-#define RLC_GPU_CLOCK_COUNT_MSB 0x30CF
-#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x30D0
-#define RLC_MC_CNTL 0x30D1
-#define RLC_UCODE_CNTL 0x30D2
-#define RLC_STAT 0x30D3
-# define RLC_BUSY_STATUS (1 << 0)
-# define GFX_POWER_STATUS (1 << 1)
-# define GFX_CLOCK_STATUS (1 << 2)
-# define GFX_LS_STATUS (1 << 3)
-
-#define RLC_PG_CNTL 0x30D7
-# define GFX_PG_ENABLE (1 << 0)
-# define GFX_PG_SRC (1 << 1)
-
-#define RLC_CGTT_MGCG_OVERRIDE 0x3100
-#define RLC_CGCG_CGLS_CTRL 0x3101
-# define CGCG_EN (1 << 0)
-# define CGLS_EN (1 << 1)
-
-#define RLC_TTOP_D 0x3105
-# define RLC_PUD(x) ((x) << 0)
-# define RLC_PUD_MASK (0xff << 0)
-# define RLC_PDD(x) ((x) << 8)
-# define RLC_PDD_MASK (0xff << 8)
-# define RLC_TTPD(x) ((x) << 16)
-# define RLC_TTPD_MASK (0xff << 16)
-# define RLC_MSD(x) ((x) << 24)
-# define RLC_MSD_MASK (0xff << 24)
-
-#define RLC_LB_INIT_CU_MASK 0x3107
-
-#define RLC_PG_AO_CU_MASK 0x310B
-#define RLC_MAX_PG_CU 0x310C
-# define MAX_PU_CU(x) ((x) << 0)
-# define MAX_PU_CU_MASK (0xff << 0)
-#define RLC_AUTO_PG_CTRL 0x310C
-# define AUTO_PG_EN (1 << 0)
-# define GRBM_REG_SGIT(x) ((x) << 3)
-# define GRBM_REG_SGIT_MASK (0xffff << 3)
-# define PG_AFTER_GRBM_REG_ST(x) ((x) << 19)
-# define PG_AFTER_GRBM_REG_ST_MASK (0x1fff << 19)
-
-#define RLC_SERDES_WR_MASTER_MASK_0 0x3115
-#define RLC_SERDES_WR_MASTER_MASK_1 0x3116
-#define RLC_SERDES_WR_CTRL 0x3117
-
-#define RLC_SERDES_MASTER_BUSY_0 0x3119
-#define RLC_SERDES_MASTER_BUSY_1 0x311A
-
-#define RLC_GCPM_GENERAL_3 0x311E
-
-#define DB_RENDER_CONTROL 0xA000
-
-#define DB_DEPTH_INFO 0xA00F
-
-#define PA_SC_RASTER_CONFIG 0xA0D4
-# define RB_MAP_PKR0(x) ((x) << 0)
-# define RB_MAP_PKR0_MASK (0x3 << 0)
-# define RB_MAP_PKR1(x) ((x) << 2)
-# define RB_MAP_PKR1_MASK (0x3 << 2)
-# define RASTER_CONFIG_RB_MAP_0 0
-# define RASTER_CONFIG_RB_MAP_1 1
-# define RASTER_CONFIG_RB_MAP_2 2
-# define RASTER_CONFIG_RB_MAP_3 3
+// #define PA_SC_RASTER_CONFIG 0xA0D4
# define RB_XSEL2(x) ((x) << 4)
# define RB_XSEL2_MASK (0x3 << 4)
# define RB_XSEL (1 << 6)
# define RB_YSEL (1 << 7)
# define PKR_MAP(x) ((x) << 8)
-# define PKR_MAP_MASK (0x3 << 8)
-# define RASTER_CONFIG_PKR_MAP_0 0
-# define RASTER_CONFIG_PKR_MAP_1 1
-# define RASTER_CONFIG_PKR_MAP_2 2
-# define RASTER_CONFIG_PKR_MAP_3 3
# define PKR_XSEL(x) ((x) << 10)
# define PKR_XSEL_MASK (0x3 << 10)
# define PKR_YSEL(x) ((x) << 12)
@@ -1426,56 +1180,11 @@
# define SC_YSEL(x) ((x) << 20)
# define SC_YSEL_MASK (0x3 << 20)
# define SE_MAP(x) ((x) << 24)
-# define SE_MAP_MASK (0x3 << 24)
-# define RASTER_CONFIG_SE_MAP_0 0
-# define RASTER_CONFIG_SE_MAP_1 1
-# define RASTER_CONFIG_SE_MAP_2 2
-# define RASTER_CONFIG_SE_MAP_3 3
# define SE_XSEL(x) ((x) << 26)
# define SE_XSEL_MASK (0x3 << 26)
# define SE_YSEL(x) ((x) << 28)
# define SE_YSEL_MASK (0x3 << 28)
-
-#define VGT_EVENT_INITIATOR 0xA2A4
-# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
-# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
-# define SAMPLE_STREAMOUTSTATS3 (3 << 0)
-# define CACHE_FLUSH_TS (4 << 0)
-# define CACHE_FLUSH (6 << 0)
-# define CS_PARTIAL_FLUSH (7 << 0)
-# define VGT_STREAMOUT_RESET (10 << 0)
-# define END_OF_PIPE_INCR_DE (11 << 0)
-# define END_OF_PIPE_IB_END (12 << 0)
-# define RST_PIX_CNT (13 << 0)
-# define VS_PARTIAL_FLUSH (15 << 0)
-# define PS_PARTIAL_FLUSH (16 << 0)
-# define CACHE_FLUSH_AND_INV_TS_EVENT (20 << 0)
-# define ZPASS_DONE (21 << 0)
-# define CACHE_FLUSH_AND_INV_EVENT (22 << 0)
-# define PERFCOUNTER_START (23 << 0)
-# define PERFCOUNTER_STOP (24 << 0)
-# define PIPELINESTAT_START (25 << 0)
-# define PIPELINESTAT_STOP (26 << 0)
-# define PERFCOUNTER_SAMPLE (27 << 0)
-# define SAMPLE_PIPELINESTAT (30 << 0)
-# define SAMPLE_STREAMOUTSTATS (32 << 0)
-# define RESET_VTX_CNT (33 << 0)
-# define VGT_FLUSH (36 << 0)
-# define BOTTOM_OF_PIPE_TS (40 << 0)
-# define DB_CACHE_FLUSH_AND_INV (42 << 0)
-# define FLUSH_AND_INV_DB_DATA_TS (43 << 0)
-# define FLUSH_AND_INV_DB_META (44 << 0)
-# define FLUSH_AND_INV_CB_DATA_TS (45 << 0)
-# define FLUSH_AND_INV_CB_META (46 << 0)
-# define CS_DONE (47 << 0)
-# define PS_DONE (48 << 0)
-# define FLUSH_AND_INV_CB_PIXEL_DATA (49 << 0)
-# define THREAD_TRACE_START (51 << 0)
-# define THREAD_TRACE_STOP (52 << 0)
-# define THREAD_TRACE_FLUSH (54 << 0)
-# define THREAD_TRACE_FINISH (55 << 0)
-
/* PIF PHY0 registers idx/data 0x8/0xc */
#define PB0_PIF_CNTL 0x10
# define LS2_EXIT_TIME(x) ((x) << 17)
@@ -1991,12 +1700,29 @@
//#dce stupp
/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
-#define SI_CRTC0_REGISTER_OFFSET 0 //(0x6df0 - 0x6df0)/4
-#define SI_CRTC1_REGISTER_OFFSET 0x300 //(0x79f0 - 0x6df0)/4
-#define SI_CRTC2_REGISTER_OFFSET 0x2600 //(0x105f0 - 0x6df0)/4
-#define SI_CRTC3_REGISTER_OFFSET 0x2900 //(0x111f0 - 0x6df0)/4
-#define SI_CRTC4_REGISTER_OFFSET 0x2c00 //(0x11df0 - 0x6df0)/4
-#define SI_CRTC5_REGISTER_OFFSET 0x2f00 //(0x129f0 - 0x6df0)/4
+#define CRTC0_REGISTER_OFFSET (0x1b7c - 0x1b7c) //(0x6df0 - 0x6df0)/4
+#define CRTC1_REGISTER_OFFSET (0x1e7c - 0x1b7c) //(0x79f0 - 0x6df0)/4
+#define CRTC2_REGISTER_OFFSET (0x417c - 0x1b7c) //(0x105f0 - 0x6df0)/4
+#define CRTC3_REGISTER_OFFSET (0x447c - 0x1b7c) //(0x111f0 - 0x6df0)/4
+#define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c) //(0x11df0 - 0x6df0)/4
+#define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c) //(0x129f0 - 0x6df0)/4
+
+/* hpd instance offsets */
+#define HPD0_REGISTER_OFFSET (0x1807 - 0x1807)
+#define HPD1_REGISTER_OFFSET (0x180a - 0x1807)
+#define HPD2_REGISTER_OFFSET (0x180d - 0x1807)
+#define HPD3_REGISTER_OFFSET (0x1810 - 0x1807)
+#define HPD4_REGISTER_OFFSET (0x1813 - 0x1807)
+#define HPD5_REGISTER_OFFSET (0x1816 - 0x1807)
+
+/* audio endpt instance offsets */
+#define AUD0_REGISTER_OFFSET (0x1780 - 0x1780)
+#define AUD1_REGISTER_OFFSET (0x1786 - 0x1780)
+#define AUD2_REGISTER_OFFSET (0x178c - 0x1780)
+#define AUD3_REGISTER_OFFSET (0x1792 - 0x1780)
+#define AUD4_REGISTER_OFFSET (0x1798 - 0x1780)
+#define AUD5_REGISTER_OFFSET (0x179d - 0x1780)
+#define AUD6_REGISTER_OFFSET (0x17a4 - 0x1780)
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
@@ -2036,9 +1762,6 @@
#define EVERGREEN_DATA_FORMAT 0x1ac0
# define EVERGREEN_INTERLEAVE_EN (1 << 0)
-#define MC_SHARED_CHMAP__NOOFCHAN_MASK 0xf000
-#define MC_SHARED_CHMAP__NOOFCHAN__SHIFT 0xc
-
#define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20)
#define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20)
#define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20)
@@ -2050,32 +1773,6 @@
#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1847
#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a47
-#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK 0x8
-#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK 0x8
-#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK 0x8
-#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK 0x8
-#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK 0x8
-#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK 0x8
-
-#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK 0x4
-#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK 0x4
-#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK 0x4
-#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK 0x4
-#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK 0x4
-#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK 0x4
-
-#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x20000
-#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x20000
-#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x20000
-#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x20000
-#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x20000
-#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x20000
-
-#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x1
-#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x100
-
-#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK 0x1
-
#define R600_D1GRPH_SWAP_CONTROL 0x1843
#define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0)
#define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0)
@@ -2099,8 +1796,6 @@
# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28)
-#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x1
-
#define FMT_BIT_DEPTH_CONTROL 0x1bf2
#define FMT_TRUNCATE_EN (1 << 0)
#define FMT_TRUNCATE_DEPTH (1 << 4)
@@ -2404,19 +2099,6 @@
#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC_MASK 0x800
#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC__SHIFT 0xb
-#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8
-#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x3
-#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40
-#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x6
-#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x200
-#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
-#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x1000
-#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xc
-#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8000
-#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
-#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40000
-#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x12
-
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
#define MC_SEQ_MISC0__MT__DDR2 0x20000000
@@ -2426,10 +2108,7 @@
#define MC_SEQ_MISC0__MT__HBM 0x60000000
#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
-#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000
#define CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK 0x4000000
-#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x800000
-#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x400000
#define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a59b4c36cad7..659eab9b90be 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -28,7 +28,6 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@@ -103,12 +102,11 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
/* Vega */
static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_decode =
@@ -120,12 +118,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
/* Raven */
static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
};
@@ -138,10 +136,10 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
/* Renoir, Arcturus */
static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
@@ -605,12 +603,10 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
{
/* Will reset for the following suspend abort cases.
- * 1) Only reset on APU side, dGPU hasn't checked yet.
- * 2) S3 suspend aborted in the normal S3 suspend or
- * performing pm core test.
+ * 1) S3 suspend aborted in the normal S3 suspend
+ * 2) S3 suspend aborted in performing pm core test.
*/
- if (adev->flags & AMD_IS_APU && adev->in_s3 &&
- !pm_resume_via_firmware())
+ if (adev->in_s3 && !pm_resume_via_firmware())
return true;
else
return false;
@@ -1363,7 +1359,7 @@ static int soc15_common_resume(struct amdgpu_ip_block *ip_block)
return soc15_common_hw_init(ip_block);
}
-static bool soc15_common_is_idle(void *handle)
+static bool soc15_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -1464,9 +1460,9 @@ static int soc15_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
+static void soc15_common_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index b9cbeb389edc..a5000c171c02 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -93,11 +93,25 @@
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_ATOMIC_GDS 0x1D
#define PACKET3_ATOMIC_MEM 0x1E
+#define PACKET3_ATOMIC_MEM__ATOMIC(x) ((((unsigned)(x)) & 0x3F) << 0)
+#define PACKET3_ATOMIC_MEM__COMMAND(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_ATOMIC_MEM__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_ATOMIC_MEM__ADDR_LO(x) (((unsigned)(x)) << 0)
+#define PACKET3_ATOMIC_MEM__ADDR_HI(x) (((unsigned)(x)) << 0)
+#define PACKET3_ATOMIC_MEM__SRC_DATA_LO(x) (((unsigned)(x)) << 0)
+#define PACKET3_ATOMIC_MEM__SRC_DATA_HI(x) (((unsigned)(x)) << 0)
+#define PACKET3_ATOMIC_MEM__CMP_DATA_LO(x) (((unsigned)(x)) << 0)
+#define PACKET3_ATOMIC_MEM__CMP_DATA_HI(x) (((unsigned)(x)) << 0)
+#define PACKET3_ATOMIC_MEM__LOOP_INTERVAL(x) ((((unsigned)(x)) & 0x1FFF) << 0)
+#define PACKET3_ATOMIC_MEM__COMMAND__SINGLE_PASS_ATOMIC 0
+#define PACKET3_ATOMIC_MEM__COMMAND__LOOP_UNTIL_COMPARE_SATISFIED 1
#define PACKET3_OCCLUSION_QUERY 0x1F
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
#define PACKET3_COND_EXEC 0x22
#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_PRED_EXEC__EXEC_COUNT(x) ((((unsigned)(x)) & 0x3FFF) << 0)
+#define PACKET3_PRED_EXEC__VIRTUAL_XCC_ID_SELECT(x) ((((unsigned)(x)) & 0xFF) << 24)
#define PACKET3_DRAW_INDIRECT 0x24
#define PACKET3_DRAW_INDEX_INDIRECT 0x25
#define PACKET3_INDEX_BASE 0x26
@@ -132,6 +146,28 @@
* 1 - pfp
* 2 - ce
*/
+#define PACKET3_WRITE_DATA__DST_SEL(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_WRITE_DATA__ADDR_INCR(x) ((((unsigned)(x)) & 0x1) << 16)
+#define PACKET3_WRITE_DATA__RESUME_VF_MI300(x) ((((unsigned)(x)) & 0x1) << 19)
+#define PACKET3_WRITE_DATA__WR_CONFIRM(x) ((((unsigned)(x)) & 0x1) << 20)
+#define PACKET3_WRITE_DATA__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_WRITE_DATA__DST_MMREG_ADDR(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_WRITE_DATA__DST_GDS_ADDR(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_WRITE_DATA__DST_MEM_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_WRITE_DATA__DST_MEM_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_WRITE_DATA__DST_SEL__MEM_MAPPED_REGISTER 0
+#define PACKET3_WRITE_DATA__DST_SEL__TC_L2 2
+#define PACKET3_WRITE_DATA__DST_SEL__GDS 3
+#define PACKET3_WRITE_DATA__DST_SEL__MEMORY 5
+#define PACKET3_WRITE_DATA__DST_SEL__MEMORY_MAPPED_ADC_PERSISTENT_STATE 6
+#define PACKET3_WRITE_DATA__ADDR_INCR__INCREMENT_ADDRESS 0
+#define PACKET3_WRITE_DATA__ADDR_INCR__DO_NOT_INCREMENT_ADDRESS 1
+#define PACKET3_WRITE_DATA__WR_CONFIRM__DO_NOT_WAIT_FOR_WRITE_CONFIRMATION 0
+#define PACKET3_WRITE_DATA__WR_CONFIRM__WAIT_FOR_WRITE_CONFIRMATION 1
+#define PACKET3_WRITE_DATA__CACHE_POLICY__LRU 0
+#define PACKET3_WRITE_DATA__CACHE_POLICY__STREAM 1
+#define PACKET3_WRITE_DATA__CACHE_POLICY__NOA 2
+#define PACKET3_WRITE_DATA__CACHE_POLICY__BYPASS 3
#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
# define PACKET3_SEM_USE_MAILBOX (0x1 << 16)
@@ -160,6 +196,33 @@
/* 0 - me
* 1 - pfp
*/
+#define PACKET3_WAIT_REG_MEM__FUNCTION(x) ((((unsigned)(x)) & 0x7) << 0)
+#define PACKET3_WAIT_REG_MEM__MEM_SPACE(x) ((((unsigned)(x)) & 0x3) << 4)
+#define PACKET3_WAIT_REG_MEM__OPERATION(x) ((((unsigned)(x)) & 0x3) << 6)
+#define PACKET3_WAIT_REG_MEM__MES_INTR_PIPE(x) ((((unsigned)(x)) & 0x3) << 22)
+#define PACKET3_WAIT_REG_MEM__MES_ACTION(x) ((((unsigned)(x)) & 0x1) << 24)
+#define PACKET3_WAIT_REG_MEM__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_WAIT_REG_MEM__MEM_POLL_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_WAIT_REG_MEM__REG_POLL_ADDR(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__REG_WRITE_ADDR1(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__MEM_POLL_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_WAIT_REG_MEM__REG_WRITE_ADDR2(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__REFERENCE(x) ((unsigned)(x))
+#define PACKET3_WAIT_REG_MEM__MASK(x) ((unsigned)(x))
+#define PACKET3_WAIT_REG_MEM__POLL_INTERVAL(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_WAIT_REG_MEM__OPTIMIZE_ACE_OFFLOAD_MODE(x) ((((unsigned)(x)) & 0x1) << 31)
+#define PACKET3_WAIT_REG_MEM__FUNCTION__ALWAYS_PASS 0
+#define PACKET3_WAIT_REG_MEM__FUNCTION__LESS_THAN_REF_VALUE 1
+#define PACKET3_WAIT_REG_MEM__FUNCTION__LESS_THAN_EQUAL_TO_THE_REF_VALUE 2
+#define PACKET3_WAIT_REG_MEM__FUNCTION__EQUAL_TO_THE_REFERENCE_VALUE 3
+#define PACKET3_WAIT_REG_MEM__FUNCTION__NOT_EQUAL_REFERENCE_VALUE 4
+#define PACKET3_WAIT_REG_MEM__FUNCTION__GREATER_THAN_OR_EQUAL_REFERENCE_VALUE 5
+#define PACKET3_WAIT_REG_MEM__FUNCTION__GREATER_THAN_REFERENCE_VALUE 6
+#define PACKET3_WAIT_REG_MEM__MEM_SPACE__REGISTER_SPACE 0
+#define PACKET3_WAIT_REG_MEM__MEM_SPACE__MEMORY_SPACE 1
+#define PACKET3_WAIT_REG_MEM__OPERATION__WAIT_REG_MEM 0
+#define PACKET3_WAIT_REG_MEM__OPERATION__WR_WAIT_WR_REG 1
+#define PACKET3_WAIT_REG_MEM__OPERATION__WAIT_MEM_PREEMPTABLE 3
#define PACKET3_INDIRECT_BUFFER 0x3F
#define INDIRECT_BUFFER_VALID (1 << 23)
#define INDIRECT_BUFFER_CACHE_POLICY(x) ((x) << 28)
@@ -169,7 +232,63 @@
*/
#define INDIRECT_BUFFER_PRE_ENB(x) ((x) << 21)
#define INDIRECT_BUFFER_PRE_RESUME(x) ((x) << 30)
+#define PACKET3_INDIRECT_BUFFER__IB_BASE_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_INDIRECT_BUFFER__IB_BASE_HI(x) ((unsigned)(x))
+#define PACKET3_INDIRECT_BUFFER__IB_SIZE(x) ((((unsigned)(x)) & 0xFFFFF) << 0)
+#define PACKET3_INDIRECT_BUFFER__CHAIN(x) ((((unsigned)(x)) & 0x1) << 20)
+#define PACKET3_INDIRECT_BUFFER__OFFLOAD_POLLING(x) ((((unsigned)(x)) & 0x1) << 21)
+#define PACKET3_INDIRECT_BUFFER__VALID(x) ((((unsigned)(x)) & 0x1) << 23)
+#define PACKET3_INDIRECT_BUFFER__VMID(x) ((((unsigned)(x)) & 0xF) << 24)
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 28)
+#define PACKET3_INDIRECT_BUFFER__PRIV(x) ((((unsigned)(x)) & 0x1) << 31)
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY__LRU 0
+#define PACKET3_INDIRECT_BUFFER__CACHE_POLICY__STREAM 1
#define PACKET3_COPY_DATA 0x40
+#define PACKET3_COPY_DATA__SRC_SEL(x) ((((unsigned)(x)) & 0xF) << 0)
+#define PACKET3_COPY_DATA__DST_SEL(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 13)
+#define PACKET3_COPY_DATA__COUNT_SEL(x) ((((unsigned)(x)) & 0x1) << 16)
+#define PACKET3_COPY_DATA__WR_CONFIRM(x) ((((unsigned)(x)) & 0x1) << 20)
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY(x) ((((unsigned)(x)) & 0x3) << 25)
+#define PACKET3_COPY_DATA__PQ_EXE_STATUS(x) ((((unsigned)(x)) & 0x1) << 29)
+#define PACKET3_COPY_DATA__SRC_REG_OFFSET(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_COPY_DATA__SRC_32B_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_COPY_DATA__SRC_64B_ADDR_LO(x) ((((unsigned)(x)) & 0x1FFFFFFF) << 3)
+#define PACKET3_COPY_DATA__SRC_GDS_ADDR_LO(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_COPY_DATA__IMM_DATA(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__SRC_MEMTC_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__SRC_IMM_DATA(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__DST_REG_OFFSET(x) ((((unsigned)(x)) & 0x3FFFF) << 0)
+#define PACKET3_COPY_DATA__DST_32B_ADDR_LO(x) ((((unsigned)(x)) & 0x3FFFFFFF) << 2)
+#define PACKET3_COPY_DATA__DST_64B_ADDR_LO(x) ((((unsigned)(x)) & 0x1FFFFFFF) << 3)
+#define PACKET3_COPY_DATA__DST_GDS_ADDR_LO(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_COPY_DATA__DST_ADDR_HI(x) ((unsigned)(x))
+#define PACKET3_COPY_DATA__SRC_SEL__MEM_MAPPED_REGISTER 0
+#define PACKET3_COPY_DATA__SRC_SEL__MEMORY 1
+#define PACKET3_COPY_DATA__SRC_SEL__TC_L2 2
+#define PACKET3_COPY_DATA__SRC_SEL__GDS 3
+#define PACKET3_COPY_DATA__SRC_SEL__PERFCOUNTERS 4
+#define PACKET3_COPY_DATA__SRC_SEL__IMMEDIATE_DATA 5
+#define PACKET3_COPY_DATA__SRC_SEL__ATOMIC_RETURN_DATA 6
+#define PACKET3_COPY_DATA__SRC_SEL__GDS_ATOMIC_RETURN_DATA0 7
+#define PACKET3_COPY_DATA__SRC_SEL__GDS_ATOMIC_RETURN_DATA1 8
+#define PACKET3_COPY_DATA__SRC_SEL__GPU_CLOCK_COUNT 9
+#define PACKET3_COPY_DATA__DST_SEL__MEM_MAPPED_REGISTER 0
+#define PACKET3_COPY_DATA__DST_SEL__TC_L2 2
+#define PACKET3_COPY_DATA__DST_SEL__GDS 3
+#define PACKET3_COPY_DATA__DST_SEL__PERFCOUNTERS 4
+#define PACKET3_COPY_DATA__DST_SEL__MEMORY 5
+#define PACKET3_COPY_DATA__DST_SEL__MEM_MAPPED_REG_DC 6
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY__LRU 0
+#define PACKET3_COPY_DATA__SRC_CACHE_POLICY__STREAM 1
+#define PACKET3_COPY_DATA__COUNT_SEL__32_BITS_OF_DATA 0
+#define PACKET3_COPY_DATA__COUNT_SEL__64_BITS_OF_DATA 1
+#define PACKET3_COPY_DATA__WR_CONFIRM__DO_NOT_WAIT_FOR_CONFIRMATION 0
+#define PACKET3_COPY_DATA__WR_CONFIRM__WAIT_FOR_CONFIRMATION 1
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY__LRU 0
+#define PACKET3_COPY_DATA__DST_CACHE_POLICY__STREAM 1
+#define PACKET3_COPY_DATA__PQ_EXE_STATUS__DEFAULT 0
+#define PACKET3_COPY_DATA__PQ_EXE_STATUS__PHASE_UPDATE 1
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
@@ -181,6 +300,15 @@
* 3 - SAMPLE_STREAMOUTSTAT*
* 4 - *S_PARTIAL_FLUSH
*/
+#define PACKET3_EVENT_WRITE__EVENT_TYPE(x) ((((unsigned)(x)) & 0x3F) << 0)
+#define PACKET3_EVENT_WRITE__EVENT_INDEX(x) ((((unsigned)(x)) & 0xF) << 8)
+#define PACKET3_EVENT_WRITE__OFFLOAD_ENABLE(x) ((((unsigned)(x)) & 0x1) << 31)
+#define PACKET3_EVENT_WRITE__SAMP_PLST_CNTR_MODE(x) ((((unsigned)(x)) & 0x3) << 29)
+#define PACKET3_EVENT_WRITE__ADDRESS_LO(x) ((((unsigned)(x)) & 0x1FFFFFFF) << 3)
+#define PACKET3_EVENT_WRITE__ADDRESS_HI(x) (((unsigned)(x)) << 0)
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__OTHER 0
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__SAMPLE_PIPELINESTATS 2
+#define PACKET3_EVENT_WRITE__EVENT_INDEX__CS_PARTIAL_FLUSH 4
#define PACKET3_RELEASE_MEM 0x49
#define EVENT_TYPE(x) ((x) << 0)
#define EVENT_INDEX(x) ((x) << 8)
@@ -286,6 +414,13 @@
#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(x) ((x) << 29)
#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_WB_ACTION_ENA(x) ((x) << 30)
#define PACKET3_REWIND 0x59
+#define PACKET3_ACQUIRE_MEM__COHER_SIZE(x) ((unsigned)(x))
+#define PACKET3_ACQUIRE_MEM__COHER_SIZE_HI(x) ((((unsigned)(x)) & 0xFF) << 0)
+#define PACKET3_ACQUIRE_MEM__COHER_SIZE_HI_VG10(x) ((((unsigned)(x)) & 0xFFFFFF) << 0)
+#define PACKET3_ACQUIRE_MEM__COHER_BASE_LO(x) ((unsigned)(x))
+#define PACKET3_ACQUIRE_MEM__COHER_BASE_HI(x) ((((unsigned)(x)) & 0xFFFFFF) << 0)
+#define PACKET3_ACQUIRE_MEM__POLL_INTERVAL(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_ACQUIRE_MEM__GCR_CNTL(x) ((((unsigned)(x)) & 0x7FF) << 0)
#define PACKET3_LOAD_UCONFIG_REG 0x5E
#define PACKET3_LOAD_SH_REG 0x5F
#define PACKET3_LOAD_CONFIG_REG 0x60
@@ -300,12 +435,16 @@
#define PACKET3_SET_SH_REG 0x76
#define PACKET3_SET_SH_REG_START 0x00002c00
#define PACKET3_SET_SH_REG_END 0x00003000
+#define PACKET3_SET_SH_REG__REG_OFFSET(x) ((((unsigned)(x)) & 0xFFFF) << 0)
+#define PACKET3_SET_SH_REG__VMID_SHIFT(x) ((((unsigned)(x)) & 0x1F) << 23)
+#define PACKET3_SET_SH_REG__INDEX(x) ((((unsigned)(x)) & 0xF) << 28)
#define PACKET3_SET_SH_REG_OFFSET 0x77
#define PACKET3_SET_QUEUE_REG 0x78
#define PACKET3_SET_UCONFIG_REG 0x79
#define PACKET3_SET_UCONFIG_REG_START 0x0000c000
#define PACKET3_SET_UCONFIG_REG_END 0x0000c400
#define PACKET3_SET_UCONFIG_REG_INDEX_TYPE (2 << 28)
+#define PACKET3_SET_UCONFIG_REG__REG_OFFSET(x) ((((unsigned)(x)) & 0xFFFF) << 0)
#define PACKET3_SCRATCH_RAM_WRITE 0x7D
#define PACKET3_SCRATCH_RAM_READ 0x7E
#define PACKET3_LOAD_CONST_RAM 0x80
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 62ad67d0b598..ad36c96478a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -117,23 +117,17 @@ static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = {
};
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -390,6 +384,7 @@ soc21_asic_reset_method(struct amdgpu_device *adev)
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
return AMD_RESET_METHOD_MODE2;
default:
if (amdgpu_dpm_is_baco_supported(adev))
@@ -781,6 +776,34 @@ static int soc21_common_early_init(struct amdgpu_ip_block *ip_block)
AMD_PG_SUPPORT_GFX_PG;
adev->external_rev_id = adev->rev_id + 0x40;
break;
+ case IP_VERSION(11, 5, 3):
+ adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_FGCG |
+ AMD_CG_SUPPORT_REPEATER_FGCG |
+ AMD_CG_SUPPORT_GFX_PERF_CLK |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS;
+ adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_JPEG_DPG |
+ AMD_PG_SUPPORT_JPEG |
+ AMD_PG_SUPPORT_GFX_PG;
+ adev->external_rev_id = adev->rev_id + 0x50;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -923,7 +946,7 @@ static int soc21_common_resume(struct amdgpu_ip_block *ip_block)
return soc21_common_hw_init(ip_block);
}
-static bool soc21_common_is_idle(void *handle)
+static bool soc21_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -940,6 +963,7 @@ static int soc21_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
case IP_VERSION(7, 7, 1):
case IP_VERSION(7, 11, 0):
case IP_VERSION(7, 11, 1):
+ case IP_VERSION(7, 11, 2):
case IP_VERSION(7, 11, 3):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -972,9 +996,9 @@ static int soc21_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void soc21_common_get_clockgating_state(void *handle, u64 *flags)
+static void soc21_common_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->get_clockgating_state(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index 6b8e078ee7c7..972b449ab89f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -26,7 +26,6 @@
#include <linux/pci.h>
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@@ -532,7 +531,7 @@ static int soc24_common_resume(struct amdgpu_ip_block *ip_block)
return soc24_common_hw_init(ip_block);
}
-static bool soc24_common_is_idle(void *handle)
+static bool soc24_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -575,9 +574,9 @@ static int soc24_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void soc24_common_get_clockgating_state(void *handle, u64 *flags)
+static void soc24_common_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->get_clockgating_state(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 64891f099366..a3b5fda22432 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -151,6 +151,7 @@ struct ta_ras_init_flags {
uint16_t xcc_mask;
uint8_t channel_dis_num;
uint8_t nps_mode;
+ uint32_t active_umc_mask;
};
struct ta_ras_mca_addr {
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 0968e551f7b5..7d17ae56f901 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -353,9 +353,9 @@ static int tonga_ih_resume(struct amdgpu_ip_block *ip_block)
return tonga_ih_hw_init(ip_block);
}
-static bool tonga_ih_is_idle(void *handle)
+static bool tonga_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index a7b9c358a2d4..74f57b2d30a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -415,6 +415,7 @@ static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank
err_type = ACA_ERROR_TYPE_CE;
else
return 0;
+ bank->aca_err_type = err_type;
ret = aca_bank_info_decode(bank, &info);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 5830e799c0a3..5dbaebb592b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -98,7 +98,7 @@ static void uvd_v3_1_ring_emit_ib(struct amdgpu_ring *ring,
}
/**
- * uvd_v3_1_ring_emit_fence - emit an fence & trap command
+ * uvd_v3_1_ring_emit_fence - emit a fence & trap command
*
* @ring: amdgpu_ring pointer
* @addr: address
@@ -242,7 +242,7 @@ static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
uint64_t addr;
uint32_t size;
- /* programm the VCPU memory controller bits 0-27 */
+ /* program the VCPU memory controller bits 0-27 */
addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
@@ -416,7 +416,7 @@ static int uvd_v3_1_start(struct amdgpu_device *adev)
/* Set the write pointer delay */
WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
- /* programm the 4GB memory segment for rptr and ring buffer */
+ /* Program the 4GB memory segment for rptr and ring buffer */
WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
(0x7 << 16) | (0x1 << 31));
@@ -758,9 +758,9 @@ static int uvd_v3_1_resume(struct amdgpu_ip_block *ip_block)
return uvd_v3_1_hw_init(ip_block);
}
-static bool uvd_v3_1_is_idle(void *handle)
+static bool uvd_v3_1_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f93079e09215..4b96fd583772 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -302,7 +302,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
/* enable VCPU clock */
WREG32(mmUVD_VCPU_CNTL, 1 << 9);
- /* disable interupt */
+ /* disable interrupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
#ifdef __BIG_ENDIAN
@@ -312,6 +312,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
#endif
WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
+
/* initialize UVD memory controller */
WREG32(mmUVD_LMI_CTRL, 0x203108);
@@ -658,9 +659,9 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
}
-static bool uvd_v4_2_is_idle(void *handle)
+static bool uvd_v4_2_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 050a0f309390..71409ad8b7ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -580,9 +580,9 @@ static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
}
}
-static bool uvd_v5_0_is_idle(void *handle)
+static bool uvd_v5_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
@@ -837,9 +837,9 @@ out:
return ret;
}
-static void uvd_v5_0_get_clockgating_state(void *handle, u64 *flags)
+static void uvd_v5_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d9d036ee51fb..1c07b701d0e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -1143,9 +1143,9 @@ static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, vmid);
}
-static bool uvd_v6_0_is_idle(void *handle)
+static bool uvd_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
@@ -1156,7 +1156,7 @@ static int uvd_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (uvd_v6_0_is_idle(adev))
+ if (uvd_v6_0_is_idle(ip_block))
return 0;
}
return -ETIMEDOUT;
@@ -1498,9 +1498,9 @@ out:
return ret;
}
-static void uvd_v6_0_get_clockgating_state(void *handle, u64 *flags)
+static void uvd_v6_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index c633b7ff2943..bee3e904a6bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -201,9 +201,9 @@ static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
}
-static bool vce_v2_0_is_idle(void *handle)
+static bool vce_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
}
@@ -214,7 +214,7 @@ static int vce_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
unsigned i;
for (i = 0; i < adev->usec_timeout; i++) {
- if (vce_v2_0_is_idle(adev))
+ if (vce_v2_0_is_idle(ip_block))
return 0;
}
return -ETIMEDOUT;
@@ -280,11 +280,11 @@ static int vce_v2_0_stop(struct amdgpu_device *adev)
if (vce_v2_0_lmi_clean(adev)) {
- DRM_INFO("vce is not idle \n");
+ DRM_INFO("VCE is not idle \n");
return 0;
}
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCN);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
if (!ip_block)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index f8bddcd19b68..708123899c41 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -597,9 +597,9 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
}
-static bool vce_v3_0_is_idle(void *handle)
+static bool vce_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 mask = 0;
mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
@@ -614,7 +614,7 @@ static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++)
- if (vce_v3_0_is_idle(adev))
+ if (vce_v3_0_is_idle(ip_block))
return 0;
return -ETIMEDOUT;
@@ -828,9 +828,9 @@ out:
return ret;
}
-static void vce_v3_0_get_clockgating_state(void *handle, u64 *flags)
+static void vce_v3_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 5ea96c983517..21b57c29bf7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -81,14 +81,14 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_1_0[] = {
SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
};
-static int vcn_v1_0_stop(struct amdgpu_device *adev);
+static int vcn_v1_0_stop(struct amdgpu_vcn_inst *vinst);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v1_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v1_0_idle_work_handler(struct work_struct *work);
static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
@@ -105,7 +105,8 @@ static int vcn_v1_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- adev->vcn.num_enc_rings = 2;
+ adev->vcn.inst[0].num_enc_rings = 2;
+ adev->vcn.inst[0].set_pg_state = vcn_v1_0_set_pg_state;
vcn_v1_0_set_dec_ring_funcs(adev);
vcn_v1_0_set_enc_ring_funcs(adev);
@@ -113,7 +114,7 @@ static int vcn_v1_0_early_init(struct amdgpu_ip_block *ip_block)
jpeg_v1_0_early_init(ip_block);
- return amdgpu_vcn_early_init(adev);
+ return amdgpu_vcn_early_init(adev, 0);
}
/**
@@ -138,23 +139,23 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
/* VCN ENC TRAP */
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
&adev->vcn.inst->irq);
if (r)
return r;
}
- r = amdgpu_vcn_sw_init(adev);
+ r = amdgpu_vcn_sw_init(adev, 0);
if (r)
return r;
/* Override the work func */
- adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
+ adev->vcn.inst[0].idle_work.work.func = vcn_v1_0_idle_work_handler;
- amdgpu_vcn_setup_ucode(adev);
+ amdgpu_vcn_setup_ucode(adev, 0);
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(adev, 0);
if (r)
return r;
@@ -166,18 +167,18 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
+ adev->vcn.inst[0].internal.scratch9 = adev->vcn.inst->external.scratch9 =
SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
- adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
+ adev->vcn.inst[0].internal.data0 = adev->vcn.inst->external.data0 =
SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
- adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
+ adev->vcn.inst[0].internal.data1 = adev->vcn.inst->external.data1 =
SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
- adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
+ adev->vcn.inst[0].internal.cmd = adev->vcn.inst->external.cmd =
SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
- adev->vcn.internal.nop = adev->vcn.inst->external.nop =
+ adev->vcn.inst[0].internal.nop = adev->vcn.inst->external.nop =
SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst->ring_enc[i];
@@ -189,7 +190,7 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
+ adev->vcn.inst[0].pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
if (amdgpu_vcnfw_log) {
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
@@ -223,13 +224,13 @@ static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
int r;
struct amdgpu_device *adev = ip_block->adev;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(adev, 0);
if (r)
return r;
jpeg_v1_0_sw_fini(ip_block);
- r = amdgpu_vcn_sw_fini(adev);
+ r = amdgpu_vcn_sw_fini(adev, 0);
kfree(adev->vcn.ip_dump);
@@ -253,7 +254,7 @@ static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -276,13 +277,14 @@ static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ cancel_delayed_work_sync(&vinst->idle_work);
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
- vcn_v1_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
return 0;
@@ -301,7 +303,7 @@ static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
bool idle_work_unexecuted;
- idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
+ idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
if (idle_work_unexecuted) {
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, 0);
@@ -311,7 +313,7 @@ static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(adev, 0);
return r;
}
@@ -327,7 +329,7 @@ static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- r = amdgpu_vcn_resume(ip_block->adev);
+ r = amdgpu_vcn_resume(ip_block->adev, 0);
if (r)
return r;
@@ -339,12 +341,13 @@ static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v1_0_mc_resume_spg_mode - memory controller programming
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
+static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
@@ -410,8 +413,9 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
adev->gfx.config.gb_addr_config);
}
-static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
+static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
@@ -485,12 +489,13 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
/**
* vcn_v1_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v1_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data;
/* JPEG disable CGC */
@@ -611,12 +616,13 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
/**
* vcn_v1_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: Pointer to the VCN instance structure
*
* Enable clock gating for VCN block
*/
-static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v1_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
/* enable JPEG CGC */
@@ -680,8 +686,10 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
+static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t reg_data = 0;
/* disable JPEG CGC */
@@ -734,8 +742,9 @@ static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t s
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
}
-static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
+static void vcn_1_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -779,8 +788,9 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
}
-static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
+static void vcn_1_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -823,12 +833,13 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
/**
* vcn_v1_0_start_spg_mode - start VCN block
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Setup and start the VCN block
*/
-static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
+static int vcn_v1_0_start_spg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -837,13 +848,13 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
/* disable byte swapping */
lmi_swap_cntl = 0;
- vcn_1_0_disable_static_power_gating(adev);
+ vcn_1_0_disable_static_power_gating(vinst);
tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
/* disable clock gating */
- vcn_v1_0_disable_clock_gating(adev);
+ vcn_v1_0_disable_clock_gating(vinst);
/* disable interupt */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
@@ -885,7 +896,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
- vcn_v1_0_mc_resume_spg_mode(adev);
+ vcn_v1_0_mc_resume_spg_mode(vinst);
WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
@@ -1001,8 +1012,9 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
return 0;
}
-static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+static int vcn_v1_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -1010,7 +1022,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
/* disable byte swapping */
lmi_swap_cntl = 0;
- vcn_1_0_enable_static_power_gating(adev);
+ vcn_1_0_enable_static_power_gating(vinst);
/* enable dynamic power gating mode */
tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
@@ -1019,7 +1031,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
/* enable clock gating */
- vcn_v1_0_clock_gating_dpg_mode(adev, 0);
+ vcn_v1_0_clock_gating_dpg_mode(vinst, 0);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -1068,7 +1080,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
- vcn_v1_0_mc_resume_dpg_mode(adev);
+ vcn_v1_0_mc_resume_dpg_mode(vinst);
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
@@ -1085,7 +1097,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
- vcn_v1_0_clock_gating_dpg_mode(adev, 1);
+ vcn_v1_0_clock_gating_dpg_mode(vinst, 1);
/* setup mmUVD_LMI_CTRL */
WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
@@ -1145,21 +1157,24 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
return 0;
}
-static int vcn_v1_0_start(struct amdgpu_device *adev)
+static int vcn_v1_0_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+
return (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ?
- vcn_v1_0_start_dpg_mode(adev) : vcn_v1_0_start_spg_mode(adev);
+ vcn_v1_0_start_dpg_mode(vinst) : vcn_v1_0_start_spg_mode(vinst);
}
/**
* vcn_v1_0_stop_spg_mode - stop VCN block
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* stop the VCN block
*/
-static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
+static int vcn_v1_0_stop_spg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
int tmp;
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
@@ -1199,13 +1214,14 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
- vcn_v1_0_enable_clock_gating(adev);
- vcn_1_0_enable_static_power_gating(adev);
+ vcn_v1_0_enable_clock_gating(vinst);
+ vcn_1_0_enable_static_power_gating(vinst);
return 0;
}
-static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
+static int vcn_v1_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t tmp;
/* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
@@ -1237,21 +1253,24 @@ static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
return 0;
}
-static int vcn_v1_0_stop(struct amdgpu_device *adev)
+static int vcn_v1_0_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
int r;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- r = vcn_v1_0_stop_dpg_mode(adev);
+ r = vcn_v1_0_stop_dpg_mode(vinst);
else
- r = vcn_v1_0_stop_spg_mode(adev);
+ r = vcn_v1_0_stop_spg_mode(vinst);
return r;
}
-static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state)
+static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
int ret_code;
uint32_t reg_data = 0;
uint32_t reg_data2 = 0;
@@ -1377,9 +1396,9 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
return 0;
}
-static bool vcn_v1_0_is_idle(void *handle)
+static bool vcn_v1_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
@@ -1399,16 +1418,17 @@ static int vcn_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
- if (!vcn_v1_0_is_idle(adev))
+ if (!vcn_v1_0_is_idle(ip_block))
return -EBUSY;
- vcn_v1_0_enable_clock_gating(adev);
+ vcn_v1_0_enable_clock_gating(vinst);
} else {
/* disable HW gating and enable Sw gating */
- vcn_v1_0_disable_clock_gating(adev);
+ vcn_v1_0_disable_clock_gating(vinst);
}
return 0;
}
@@ -1800,8 +1820,8 @@ static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t coun
}
}
-static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v1_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCN block.
* That's done in the dpm code via the SMC. This
@@ -1811,28 +1831,29 @@ static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
* the smc and the hw blocks
*/
int ret;
- struct amdgpu_device *adev = ip_block->adev;
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v1_0_stop(adev);
+ ret = vcn_v1_0_stop(vinst);
else
- ret = vcn_v1_0_start(adev);
+ ret = vcn_v1_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
+
return ret;
}
static void vcn_v1_0_idle_work_handler(struct work_struct *work)
{
- struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device, vcn.idle_work.work);
+ struct amdgpu_vcn_inst *vcn_inst =
+ container_of(work, struct amdgpu_vcn_inst, idle_work.work);
+ struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -1848,7 +1869,7 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
else
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
- adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+ adev->vcn.inst->pause_dpg_mode(vcn_inst, &new_state);
}
fences += amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec);
@@ -1862,16 +1883,16 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
} else {
- schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
}
}
static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
- mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+ mutex_lock(&adev->vcn.inst[0].vcn1_jpeg1_workaround);
if (amdgpu_fence_wait_empty(ring->adev->jpeg.inst->ring_dec))
DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
@@ -1897,7 +1918,7 @@ void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
struct dpg_pause_state new_state;
unsigned int fences = 0, i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
if (fences)
@@ -1915,14 +1936,14 @@ void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
new_state.jpeg = VCN_DPG_STATE__PAUSE;
- adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+ adev->vcn.inst->pause_dpg_mode(adev->vcn.inst, &new_state);
}
}
void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
{
- schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
- mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
+ schedule_delayed_work(&ring->adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
+ mutex_unlock(&ring->adev->vcn.inst[0].vcn1_jpeg1_workaround);
}
static void vcn_v1_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -1997,7 +2018,7 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.is_idle = vcn_v1_0_is_idle,
.wait_for_idle = vcn_v1_0_wait_for_idle,
.set_clockgating_state = vcn_v1_0_set_clockgating_state,
- .set_powergating_state = vcn_v1_0_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v1_0_dump_ip_state,
.print_ip_state = vcn_v1_0_print_ip_state,
};
@@ -2056,11 +2077,11 @@ static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
uint32_t reg = amdgpu_ib_get_value(ib, i);
uint32_t val = amdgpu_ib_get_value(ib, i + 1);
- if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+ if (reg == PACKET0(p->adev->vcn.inst[0].internal.data0, 0)) {
msg_lo = val;
- } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+ } else if (reg == PACKET0(p->adev->vcn.inst[0].internal.data1, 0)) {
msg_hi = val;
- } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) {
+ } else if (reg == PACKET0(p->adev->vcn.inst[0].internal.cmd, 0)) {
r = vcn_v1_0_validate_bo(p, job,
((u64)msg_hi) << 32 | msg_lo);
if (r)
@@ -2145,7 +2166,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
}
@@ -2156,7 +2177,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
+ adev->vcn.inst->irq.num_types = adev->vcn.inst[0].num_enc_rings + 2;
adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index e42cfc731ad8..8e7a36f26e9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -92,10 +92,10 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_0[] = {
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
/**
* vcn_v2_0_early_init - set function pointers and load microcode
@@ -110,15 +110,16 @@ static int vcn_v2_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
- adev->vcn.num_enc_rings = 1;
+ adev->vcn.inst[0].num_enc_rings = 1;
else
- adev->vcn.num_enc_rings = 2;
+ adev->vcn.inst[0].num_enc_rings = 2;
+ adev->vcn.inst->set_pg_state = vcn_v2_0_set_pg_state;
vcn_v2_0_set_dec_ring_funcs(adev);
vcn_v2_0_set_enc_ring_funcs(adev);
vcn_v2_0_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ return amdgpu_vcn_early_init(adev, 0);
}
/**
@@ -145,7 +146,7 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
/* VCN ENC TRAP */
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
&adev->vcn.inst->irq);
@@ -153,13 +154,13 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- r = amdgpu_vcn_sw_init(adev);
+ r = amdgpu_vcn_sw_init(adev, 0);
if (r)
return r;
- amdgpu_vcn_setup_ucode(adev);
+ amdgpu_vcn_setup_ucode(adev, 0);
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(adev, 0);
if (r)
return r;
@@ -175,25 +176,25 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
- adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
- adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
- adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
- adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
- adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
- adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
- adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
+ adev->vcn.inst[0].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst->ring_enc[i];
@@ -210,7 +211,7 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
+ adev->vcn.inst[0].pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -254,11 +255,11 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(adev, 0);
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev);
+ r = amdgpu_vcn_sw_fini(adev, 0);
kfree(adev->vcn.ip_dump);
@@ -292,7 +293,7 @@ static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
ring->sched.ready = false;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -312,13 +313,14 @@ static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ cancel_delayed_work_sync(&vinst->idle_work);
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
- vcn_v2_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
return 0;
}
@@ -338,7 +340,7 @@ static int vcn_v2_0_suspend(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ r = amdgpu_vcn_suspend(ip_block->adev, 0);
return r;
}
@@ -354,7 +356,7 @@ static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- r = amdgpu_vcn_resume(ip_block->adev);
+ r = amdgpu_vcn_resume(ip_block->adev, 0);
if (r)
return r;
@@ -366,12 +368,13 @@ static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v2_0_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
+ * @vinst: Pointer to the VCN instance structure
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
+static void vcn_v2_0_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
@@ -426,8 +429,10 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
}
-static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
+static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
@@ -525,12 +530,13 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
/**
* vcn_v2_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v2_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data;
if (amdgpu_sriov_vf(adev))
@@ -634,9 +640,10 @@ static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
+static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
uint8_t sram_sel, uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t reg_data = 0;
/* enable sw clock gating control */
@@ -685,12 +692,13 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v2_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v2_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (amdgpu_sriov_vf(adev))
@@ -743,8 +751,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
+static void vcn_v2_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (amdgpu_sriov_vf(adev))
@@ -792,8 +801,9 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
}
-static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
+static void vcn_v2_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t data = 0;
if (amdgpu_sriov_vf(adev))
@@ -834,13 +844,14 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
}
}
-static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
+static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
- vcn_v2_0_enable_static_power_gating(adev);
+ vcn_v2_0_enable_static_power_gating(vinst);
/* enable dynamic power gating mode */
tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
@@ -852,7 +863,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
adev->vcn.inst->dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst->dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
+ vcn_v2_0_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -901,7 +912,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
+ vcn_v2_0_mc_resume_dpg_mode(vinst, indirect);
WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
@@ -969,8 +980,9 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
return 0;
}
-static int vcn_v2_0_start(struct amdgpu_device *adev)
+static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
@@ -981,16 +993,16 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
amdgpu_dpm_enable_vcn(adev, true, 0);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
+ return vcn_v2_0_start_dpg_mode(vinst, adev->vcn.inst->indirect_sram);
- vcn_v2_0_disable_static_power_gating(adev);
+ vcn_v2_0_disable_static_power_gating(vinst);
/* set uvd status busy */
tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
/*SW clock gating */
- vcn_v2_0_disable_clock_gating(adev);
+ vcn_v2_0_disable_clock_gating(vinst);
/* enable VCPU clock */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
@@ -1034,7 +1046,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
- vcn_v2_0_mc_resume(adev);
+ vcn_v2_0_mc_resume(vinst);
/* release VCPU reset to boot */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
@@ -1142,12 +1154,13 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
return 0;
}
-static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
+static int vcn_v2_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
- vcn_v2_0_pause_dpg_mode(adev, 0, &state);
+ vcn_v2_0_pause_dpg_mode(vinst, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -1172,13 +1185,14 @@ static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
return 0;
}
-static int vcn_v2_0_stop(struct amdgpu_device *adev)
+static int vcn_v2_0_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
uint32_t tmp;
int r;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v2_0_stop_dpg_mode(adev);
+ r = vcn_v2_0_stop_dpg_mode(vinst);
if (r)
return r;
goto power_off;
@@ -1230,8 +1244,8 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
/* clear status */
WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
- vcn_v2_0_enable_clock_gating(adev);
- vcn_v2_0_enable_static_power_gating(adev);
+ vcn_v2_0_enable_clock_gating(vinst);
+ vcn_v2_0_enable_static_power_gating(vinst);
power_off:
if (adev->pm.dpm_enabled)
@@ -1240,9 +1254,11 @@ power_off:
return 0;
}
-static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state)
+static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code;
@@ -1317,9 +1333,9 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
return 0;
}
-static bool vcn_v2_0_is_idle(void *handle)
+static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
@@ -1346,12 +1362,12 @@ static int vcn_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
if (enable) {
/* wait for STATUS to clear */
- if (!vcn_v2_0_is_idle(adev))
+ if (!vcn_v2_0_is_idle(ip_block))
return -EBUSY;
- vcn_v2_0_enable_clock_gating(adev);
+ vcn_v2_0_enable_clock_gating(&adev->vcn.inst[0]);
} else {
/* disable HW gating and enable Sw gating */
- vcn_v2_0_disable_clock_gating(adev);
+ vcn_v2_0_disable_clock_gating(&adev->vcn.inst[0]);
}
return 0;
}
@@ -1421,9 +1437,9 @@ void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
}
@@ -1438,7 +1454,7 @@ void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[0].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
}
@@ -1458,7 +1474,7 @@ void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.nop, 0));
amdgpu_ring_write(ring, 0);
}
}
@@ -1479,25 +1495,25 @@ void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
struct amdgpu_device *adev = ring->adev;
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.context_id, 0));
amdgpu_ring_write(ring, seq);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, addr & 0xffffffff);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
}
@@ -1520,14 +1536,14 @@ void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_vmid, 0));
amdgpu_ring_write(ring, vmid);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_low, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_bar_low, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_high, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_bar_high, 0));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_size, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_size, 0));
amdgpu_ring_write(ring, ib->length_dw);
}
@@ -1536,16 +1552,16 @@ void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
{
struct amdgpu_device *adev = ring->adev;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, reg << 2);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
amdgpu_ring_write(ring, val);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.gp_scratch8, 0));
amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
}
@@ -1570,13 +1586,13 @@ void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
{
struct amdgpu_device *adev = ring->adev;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
amdgpu_ring_write(ring, reg << 2);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
amdgpu_ring_write(ring, val);
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
}
@@ -1777,9 +1793,9 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 4);
if (r)
return r;
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
- amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -1796,8 +1812,8 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
}
-static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCN block.
* That's done in the dpm code via the SMC. This
@@ -1807,23 +1823,24 @@ static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
* the smc and the hw blocks
*/
int ret;
- struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_device *adev = vinst->adev;
if (amdgpu_sriov_vf(adev)) {
- adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v2_0_stop(adev);
+ ret = vcn_v2_0_stop(vinst);
else
- ret = vcn_v2_0_start(adev);
+ ret = vcn_v2_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
+
return ret;
}
@@ -1862,7 +1879,7 @@ static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
adev->vcn.inst->ring_dec.wptr_old = 0;
vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
adev->vcn.inst->ring_enc[i].wptr = 0;
adev->vcn.inst->ring_enc[i].wptr_old = 0;
vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
@@ -1988,7 +2005,7 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_VCN_CONTEXT_SIZE);
- for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
+ for (r = 0; r < adev->vcn.inst[0].num_enc_rings; ++r) {
ring = &adev->vcn.inst->ring_enc[r];
ring->wptr = 0;
MMSCH_V2_0_INSERT_DIRECT_WT(
@@ -2104,7 +2121,7 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.is_idle = vcn_v2_0_is_idle,
.wait_for_idle = vcn_v2_0_wait_for_idle,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
- .set_powergating_state = vcn_v2_0_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v2_0_dump_ip_state,
.print_ip_state = vcn_v2_0_print_ip_state,
};
@@ -2177,7 +2194,7 @@ static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
}
@@ -2188,7 +2205,7 @@ static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst->irq.num_types = adev->vcn.inst[0].num_enc_rings + 1;
adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index b518202955ca..d716510b8dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -95,10 +95,10 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = {
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v2_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
@@ -107,6 +107,133 @@ static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN1
};
+static void vcn_v2_5_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_vcn_inst *vcn_inst =
+ container_of(work, struct amdgpu_vcn_inst, idle_work.work);
+ struct amdgpu_device *adev = vcn_inst->adev;
+ unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
+ unsigned int i, j;
+ int r = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *v = &adev->vcn.inst[i];
+
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ for (j = 0; j < v->num_enc_rings; ++j)
+ fence[i] += amdgpu_fence_count_emitted(&v->ring_enc[j]);
+
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ !v->using_unified_queue) {
+ struct dpg_pause_state new_state;
+
+ if (fence[i] ||
+ unlikely(atomic_read(&v->dpg_enc_submission_cnt)))
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+ v->pause_dpg_mode(v, &new_state);
+ }
+
+ fence[i] += amdgpu_fence_count_emitted(&v->ring_dec);
+ fences += fence[i];
+
+ }
+
+ if (!fences && !atomic_read(&adev->vcn.inst[0].total_submission_cnt)) {
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_GATE);
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+ if (adev->vcn.workload_profile_active) {
+ r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
+ false);
+ if (r)
+ dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
+ adev->vcn.workload_profile_active = false;
+ }
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+ } else {
+ schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
+ }
+}
+
+static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *v = &adev->vcn.inst[ring->me];
+ int r = 0;
+
+ atomic_inc(&adev->vcn.inst[0].total_submission_cnt);
+
+ cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
+
+ /* We can safely return early here because we've cancelled the
+ * the delayed work so there is no one else to set it to false
+ * and we don't care if someone else sets it to true.
+ */
+ if (adev->vcn.workload_profile_active)
+ goto pg_lock;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+ if (!adev->vcn.workload_profile_active) {
+ r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
+ true);
+ if (r)
+ dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
+ adev->vcn.workload_profile_active = true;
+ }
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+
+pg_lock:
+ mutex_lock(&adev->vcn.inst[0].vcn_pg_lock);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
+
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ !v->using_unified_queue) {
+ struct dpg_pause_state new_state;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
+ atomic_inc(&v->dpg_enc_submission_cnt);
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ } else {
+ unsigned int fences = 0;
+ unsigned int i;
+
+ for (i = 0; i < v->num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&v->ring_enc[i]);
+
+ if (fences || atomic_read(&v->dpg_enc_submission_cnt))
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ }
+ v->pause_dpg_mode(v, &new_state);
+ }
+ mutex_unlock(&adev->vcn.inst[0].vcn_pg_lock);
+}
+
+static void vcn_v2_5_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
+ !adev->vcn.inst[ring->me].using_unified_queue)
+ atomic_dec(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+
+ atomic_dec(&adev->vcn.inst[0].total_submission_cnt);
+
+ schedule_delayed_work(&adev->vcn.inst[0].idle_work,
+ VCN_IDLE_TIMEOUT);
+}
+
/**
* vcn_v2_5_early_init - set function pointers and load microcode
*
@@ -118,11 +245,13 @@ static int amdgpu_ih_clientid_vcns[] = {
static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = 2;
adev->vcn.harvest_config = 0;
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ adev->vcn.inst[i].num_enc_rings = 1;
} else {
u32 harvest;
int i;
@@ -131,13 +260,12 @@ static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
adev->vcn.harvest_config |= 1 << i;
+ adev->vcn.inst[i].num_enc_rings = 2;
}
if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
- AMDGPU_VCN_HARVEST_VCN1))
+ AMDGPU_VCN_HARVEST_VCN1))
/* both instances are harvested, disable the block */
return -ENOENT;
-
- adev->vcn.num_enc_rings = 2;
}
vcn_v2_5_set_dec_ring_funcs(adev);
@@ -145,7 +273,15 @@ static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
vcn_v2_5_set_irq_funcs(adev);
vcn_v2_5_set_ras_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ adev->vcn.inst[i].set_pg_state = vcn_v2_5_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
/**
@@ -164,6 +300,8 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
+ volatile struct amdgpu_fw_shared *fw_shared;
+
if (adev->vcn.harvest_config & (1 << j))
continue;
/* VCN DEC TRAP */
@@ -173,7 +311,7 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
return r;
/* VCN ENC TRAP */
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
if (r)
@@ -185,39 +323,36 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
if (r)
return r;
- }
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
+ r = amdgpu_vcn_sw_init(adev, j);
+ if (r)
+ return r;
- amdgpu_vcn_setup_ucode(adev);
+ /* Override the work func */
+ adev->vcn.inst[j].idle_work.work.func = vcn_v2_5_idle_work_handler;
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
+ amdgpu_vcn_setup_ucode(adev, j);
- for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ r = amdgpu_vcn_resume(adev, j);
+ if (r)
+ return r;
- if (adev->vcn.harvest_config & (1 << j))
- continue;
- adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
- adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
- adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
-
- adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
+
+ adev->vcn.inst[j].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
- adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
- adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
- adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
- adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
+ adev->vcn.inst[j].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
ring = &adev->vcn.inst[j].ring_dec;
@@ -237,7 +372,7 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst[j].ring_enc[i];
@@ -265,6 +400,9 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
}
if (amdgpu_sriov_vf(adev)) {
@@ -273,9 +411,6 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
-
r = amdgpu_vcn_ras_sw_init(adev);
if (r)
return r;
@@ -319,15 +454,18 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
-
- r = amdgpu_vcn_sw_fini(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -366,7 +504,7 @@ static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
ring = &adev->vcn.inst[j].ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -390,19 +528,21 @@ static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
- vcn_v2_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
- amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
+ amdgpu_irq_put(adev, &vinst->ras_poison_irq, 0);
}
return 0;
@@ -417,15 +557,20 @@ static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v2_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
- return r;
+ return 0;
}
/**
@@ -437,11 +582,14 @@ static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v2_5_hw_init(ip_block);
@@ -451,69 +599,71 @@ static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v2_5_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
+static void vcn_v2_5_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t size;
uint32_t offset;
- int i;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
- /* cache window 0: fw */
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
- offset = 0;
- } else {
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.inst[i].gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.inst[i].gpu_addr));
- offset = size;
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
- }
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
- /* cache window 1: stack */
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
-
- /* cache window 2: context */
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
- WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
-
- /* non-cache window */
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
- WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
- AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = size;
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
+
+ /* cache window 1: stack */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+
+ /* cache window 2: context */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+
+ /* non-cache window */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
}
-static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
uint32_t offset;
@@ -611,123 +761,124 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
/**
* vcn_v2_5_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v2_5_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t data;
- int i;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- /* UVD disable CGC */
- data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
- if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
- data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
- else
- data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
- data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
- data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
- WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
-
- data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
- data &= ~(UVD_CGC_GATE__SYS_MASK
- | UVD_CGC_GATE__UDEC_MASK
- | UVD_CGC_GATE__MPEG2_MASK
- | UVD_CGC_GATE__REGS_MASK
- | UVD_CGC_GATE__RBC_MASK
- | UVD_CGC_GATE__LMI_MC_MASK
- | UVD_CGC_GATE__LMI_UMC_MASK
- | UVD_CGC_GATE__IDCT_MASK
- | UVD_CGC_GATE__MPRD_MASK
- | UVD_CGC_GATE__MPC_MASK
- | UVD_CGC_GATE__LBSI_MASK
- | UVD_CGC_GATE__LRBBM_MASK
- | UVD_CGC_GATE__UDEC_RE_MASK
- | UVD_CGC_GATE__UDEC_CM_MASK
- | UVD_CGC_GATE__UDEC_IT_MASK
- | UVD_CGC_GATE__UDEC_DB_MASK
- | UVD_CGC_GATE__UDEC_MP_MASK
- | UVD_CGC_GATE__WCB_MASK
- | UVD_CGC_GATE__VCPU_MASK
- | UVD_CGC_GATE__MMSCH_MASK);
-
- WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
-
- SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
-
- data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
- data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
- | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
- | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
- | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
- | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
- | UVD_CGC_CTRL__SYS_MODE_MASK
- | UVD_CGC_CTRL__UDEC_MODE_MASK
- | UVD_CGC_CTRL__MPEG2_MODE_MASK
- | UVD_CGC_CTRL__REGS_MODE_MASK
- | UVD_CGC_CTRL__RBC_MODE_MASK
- | UVD_CGC_CTRL__LMI_MC_MODE_MASK
- | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
- | UVD_CGC_CTRL__IDCT_MODE_MASK
- | UVD_CGC_CTRL__MPRD_MODE_MASK
- | UVD_CGC_CTRL__MPC_MODE_MASK
- | UVD_CGC_CTRL__LBSI_MODE_MASK
- | UVD_CGC_CTRL__LRBBM_MODE_MASK
- | UVD_CGC_CTRL__WCB_MODE_MASK
- | UVD_CGC_CTRL__VCPU_MODE_MASK
- | UVD_CGC_CTRL__MMSCH_MODE_MASK);
- WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
-
- /* turn on */
- data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
- data |= (UVD_SUVD_CGC_GATE__SRE_MASK
- | UVD_SUVD_CGC_GATE__SIT_MASK
- | UVD_SUVD_CGC_GATE__SMP_MASK
- | UVD_SUVD_CGC_GATE__SCM_MASK
- | UVD_SUVD_CGC_GATE__SDB_MASK
- | UVD_SUVD_CGC_GATE__SRE_H264_MASK
- | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
- | UVD_SUVD_CGC_GATE__SIT_H264_MASK
- | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
- | UVD_SUVD_CGC_GATE__SCM_H264_MASK
- | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
- | UVD_SUVD_CGC_GATE__SDB_H264_MASK
- | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
- | UVD_SUVD_CGC_GATE__SCLR_MASK
- | UVD_SUVD_CGC_GATE__UVD_SC_MASK
- | UVD_SUVD_CGC_GATE__ENT_MASK
- | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
- | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
- | UVD_SUVD_CGC_GATE__SITE_MASK
- | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
- | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
- | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
- | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
- | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
- WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
-
- data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
- data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
- | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
- | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
+ /* UVD disable CGC */
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
+ data &= ~(UVD_CGC_GATE__SYS_MASK
+ | UVD_CGC_GATE__UDEC_MASK
+ | UVD_CGC_GATE__MPEG2_MASK
+ | UVD_CGC_GATE__REGS_MASK
+ | UVD_CGC_GATE__RBC_MASK
+ | UVD_CGC_GATE__LMI_MC_MASK
+ | UVD_CGC_GATE__LMI_UMC_MASK
+ | UVD_CGC_GATE__IDCT_MASK
+ | UVD_CGC_GATE__MPRD_MASK
+ | UVD_CGC_GATE__MPC_MASK
+ | UVD_CGC_GATE__LBSI_MASK
+ | UVD_CGC_GATE__LRBBM_MASK
+ | UVD_CGC_GATE__UDEC_RE_MASK
+ | UVD_CGC_GATE__UDEC_CM_MASK
+ | UVD_CGC_GATE__UDEC_IT_MASK
+ | UVD_CGC_GATE__UDEC_DB_MASK
+ | UVD_CGC_GATE__UDEC_MP_MASK
+ | UVD_CGC_GATE__WCB_MASK
+ | UVD_CGC_GATE__VCPU_MASK
+ | UVD_CGC_GATE__MMSCH_MASK);
+
+ WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
+
+ SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
+ data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
+ | UVD_CGC_CTRL__SYS_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_MODE_MASK
+ | UVD_CGC_CTRL__MPEG2_MODE_MASK
+ | UVD_CGC_CTRL__REGS_MODE_MASK
+ | UVD_CGC_CTRL__RBC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_MC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
+ | UVD_CGC_CTRL__IDCT_MODE_MASK
+ | UVD_CGC_CTRL__MPRD_MODE_MASK
+ | UVD_CGC_CTRL__MPC_MODE_MASK
+ | UVD_CGC_CTRL__LBSI_MODE_MASK
+ | UVD_CGC_CTRL__LRBBM_MODE_MASK
+ | UVD_CGC_CTRL__WCB_MODE_MASK
+ | UVD_CGC_CTRL__VCPU_MODE_MASK
+ | UVD_CGC_CTRL__MMSCH_MODE_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
+
+ /* turn on */
+ data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
+ data |= (UVD_SUVD_CGC_GATE__SRE_MASK
+ | UVD_SUVD_CGC_GATE__SIT_MASK
+ | UVD_SUVD_CGC_GATE__SMP_MASK
+ | UVD_SUVD_CGC_GATE__SCM_MASK
+ | UVD_SUVD_CGC_GATE__SDB_MASK
+ | UVD_SUVD_CGC_GATE__SRE_H264_MASK
+ | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SIT_H264_MASK
+ | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SCM_H264_MASK
+ | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SDB_H264_MASK
+ | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SCLR_MASK
+ | UVD_SUVD_CGC_GATE__UVD_SC_MASK
+ | UVD_SUVD_CGC_GATE__ENT_MASK
+ | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
+ | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
+ | UVD_SUVD_CGC_GATE__SITE_MASK
+ | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
+ | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
+ | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
+ | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
+ | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
+ data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
- WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
- }
+ | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
- uint8_t sram_sel, int inst_idx, uint8_t indirect)
+static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel, uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
/* enable sw clock gating control */
@@ -776,68 +927,69 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v2_5_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
+static void vcn_v2_5_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t data = 0;
- int i;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- /* enable UVD CGC */
- data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
- if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
- data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
- else
- data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
- data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
- data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
- WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
-
- data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
- data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
- | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
- | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
- | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
- | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
- | UVD_CGC_CTRL__SYS_MODE_MASK
- | UVD_CGC_CTRL__UDEC_MODE_MASK
- | UVD_CGC_CTRL__MPEG2_MODE_MASK
- | UVD_CGC_CTRL__REGS_MODE_MASK
- | UVD_CGC_CTRL__RBC_MODE_MASK
- | UVD_CGC_CTRL__LMI_MC_MODE_MASK
- | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
- | UVD_CGC_CTRL__IDCT_MODE_MASK
- | UVD_CGC_CTRL__MPRD_MODE_MASK
- | UVD_CGC_CTRL__MPC_MODE_MASK
- | UVD_CGC_CTRL__LBSI_MODE_MASK
- | UVD_CGC_CTRL__LRBBM_MODE_MASK
- | UVD_CGC_CTRL__WCB_MODE_MASK
- | UVD_CGC_CTRL__VCPU_MODE_MASK);
- WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
-
- data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
- data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
- | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
- | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
- | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
- | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
- WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
+ /* enable UVD CGC */
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
+ data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
+ | UVD_CGC_CTRL__SYS_MODE_MASK
+ | UVD_CGC_CTRL__UDEC_MODE_MASK
+ | UVD_CGC_CTRL__MPEG2_MODE_MASK
+ | UVD_CGC_CTRL__REGS_MODE_MASK
+ | UVD_CGC_CTRL__RBC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_MC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
+ | UVD_CGC_CTRL__IDCT_MODE_MASK
+ | UVD_CGC_CTRL__MPRD_MODE_MASK
+ | UVD_CGC_CTRL__MPC_MODE_MASK
+ | UVD_CGC_CTRL__LBSI_MODE_MASK
+ | UVD_CGC_CTRL__LRBBM_MODE_MASK
+ | UVD_CGC_CTRL__WCB_MODE_MASK
+ | UVD_CGC_CTRL__VCPU_MODE_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
+
+ data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
+ data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
+ WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,
+static void vcn_v2_6_enable_ras(struct amdgpu_vcn_inst *vinst,
bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(2, 6, 0))
@@ -862,8 +1014,10 @@ static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,
tmp, 0, indirect);
}
-static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
@@ -881,7 +1035,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v2_5_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -930,7 +1084,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v2_5_mc_resume_dpg_mode(vinst, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
@@ -941,7 +1095,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
- vcn_v2_6_enable_ras(adev, inst_idx, indirect);
+ vcn_v2_6_enable_ras(vinst, indirect);
/* unblock VCPU register access */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
@@ -1006,197 +1160,187 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
return 0;
}
-static int vcn_v2_5_start(struct amdgpu_device *adev)
+static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ volatile struct amdgpu_fw_shared *fw_shared =
+ adev->vcn.inst[i].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v2_5_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- /* disable register anti-hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
- ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+ /* disable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
- /* set uvd status busy */
- tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
- }
+ /* set uvd status busy */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
return 0;
- /*SW clock gating */
- vcn_v2_5_disable_clock_gating(adev);
+ /* SW clock gating */
+ vcn_v2_5_disable_clock_gating(vinst);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* setup mmUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
- tmp &= ~0xff;
- WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup mmUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup mmUVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
- }
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
- vcn_v2_5_mc_resume(adev);
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
- WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
+ /* setup mmUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
+ tmp &= ~0xff;
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup mmUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup mmUVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v2_5_mc_resume(vinst);
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (k = 0; k < 10; ++k) {
- uint32_t status;
-
- for (j = 0; j < 100; ++j) {
- status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
- if (status & 2)
- break;
- if (amdgpu_emu_mode == 1)
- msleep(500);
- else
- mdelay(10);
- }
- r = 0;
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (k = 0; k < 10; ++k) {
+ uint32_t status;
+
+ for (j = 0; j < 100; ++j) {
+ status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
if (status & 2)
break;
+ if (amdgpu_emu_mode == 1)
+ msleep(500);
+ else
+ mdelay(10);
+ }
+ r = 0;
+ if (status & 2)
+ break;
- DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- r = -1;
- }
+ mdelay(10);
+ r = -1;
+ }
- if (r) {
- DRM_ERROR("VCN decode not responding, giving up!!!\n");
- return r;
- }
+ if (r) {
+ DRM_ERROR("VCN decode not responding, giving up!!!\n");
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
- ring = &adev->vcn.inst[i].ring_dec;
- /* force RBC into idle state */
- rb_bufsz = order_base_2(ring->ring_size);
- tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
+ ring = &adev->vcn.inst[i].ring_dec;
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
- fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
- /* program the RB_BASE for ring buffer */
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
- upper_32_bits(ring->gpu_addr));
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
+ /* program the RB_BASE for ring buffer */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
- /* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
- ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
- lower_32_bits(ring->wptr));
- fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
- fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
- fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
-
- fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
- ring = &adev->vcn.inst[i].ring_enc[1];
- WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
- fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
- }
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
+ ring = &adev->vcn.inst[i].ring_enc[1];
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
return 0;
}
@@ -1397,8 +1541,10 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
}
-static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static int vcn_v2_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
/* Wait for power status to be 1 */
@@ -1425,79 +1571,81 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
return 0;
}
-static int vcn_v2_5_stop(struct amdgpu_device *adev)
+static int vcn_v2_5_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t tmp;
- int i, r = 0;
+ int r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v2_5_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v2_5_stop_dpg_mode(vinst);
+ goto done;
+ }
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- /* block LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* block LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* clear status */
- WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- vcn_v2_5_enable_clock_gating(adev);
+ /* clear status */
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
- /* enable register anti-hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
- ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
- }
+ vcn_v2_5_enable_clock_gating(vinst);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+ /* enable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
- return 0;
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+
+ return r;
}
-static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state)
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code = 0;
@@ -1643,8 +1791,8 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.insert_start = vcn_v2_0_dec_ring_insert_start,
.insert_end = vcn_v2_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
+ .begin_use = vcn_v2_5_ring_begin_use,
+ .end_use = vcn_v2_5_ring_end_use,
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
@@ -1741,8 +1889,8 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
.insert_nop = amdgpu_ring_insert_nop,
.insert_end = vcn_v2_0_enc_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
+ .begin_use = vcn_v2_5_ring_begin_use,
+ .end_use = vcn_v2_5_ring_end_use,
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
@@ -1767,16 +1915,16 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) {
adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
adev->vcn.inst[j].ring_enc[i].me = j;
}
}
}
-static bool vcn_v2_5_is_idle(void *handle)
+static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1811,40 +1959,45 @@ static int vcn_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
{
struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
+ int i;
if (amdgpu_sriov_vf(adev))
return 0;
- if (enable) {
- if (!vcn_v2_5_is_idle(adev))
- return -EBUSY;
- vcn_v2_5_enable_clock_gating(adev);
- } else {
- vcn_v2_5_disable_clock_gating(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
+ if (enable) {
+ if (!vcn_v2_5_is_idle(ip_block))
+ return -EBUSY;
+ vcn_v2_5_enable_clock_gating(vinst);
+ } else {
+ vcn_v2_5_disable_clock_gating(vinst);
+ }
}
return 0;
}
-static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v2_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_device *adev = vinst->adev;
int ret;
if (amdgpu_sriov_vf(adev))
return 0;
- if(state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v2_5_stop(adev);
+ ret = vcn_v2_5_stop(vinst);
else
- ret = vcn_v2_5_start(adev);
+ ret = vcn_v2_5_start(vinst);
- if(!ret)
- adev->vcn.cur_state = state;
+ if (!ret)
+ vinst->cur_state = state;
return ret;
}
@@ -1921,10 +2074,10 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
- adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
}
}
@@ -2001,7 +2154,7 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.is_idle = vcn_v2_5_is_idle,
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
- .set_powergating_state = vcn_v2_5_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v2_5_dump_ip_state,
.print_ip_state = vcn_v2_5_print_ip_state,
};
@@ -2018,7 +2171,7 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.is_idle = vcn_v2_5_is_idle,
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
- .set_powergating_state = vcn_v2_5_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v2_5_dump_ip_state,
.print_ip_state = vcn_v2_5_print_ip_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 63ddd4cca910..22ae1939476f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -105,10 +105,10 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
@@ -124,11 +124,13 @@ static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
adev->vcn.harvest_config = 0;
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ adev->vcn.inst[i].num_enc_rings = 1;
} else {
if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
@@ -136,18 +138,27 @@ static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block)
/* both instances are harvested, disable the block */
return -ENOENT;
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
- IP_VERSION(3, 0, 33))
- adev->vcn.num_enc_rings = 0;
- else
- adev->vcn.num_enc_rings = 2;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
+ IP_VERSION(3, 0, 33))
+ adev->vcn.inst[i].num_enc_rings = 0;
+ else
+ adev->vcn.inst[i].num_enc_rings = 2;
+ }
}
vcn_v3_0_set_dec_ring_funcs(adev);
vcn_v3_0_set_enc_ring_funcs(adev);
vcn_v3_0_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ adev->vcn.inst[i].set_pg_state = vcn_v3_0_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+ return 0;
}
/**
@@ -166,16 +177,6 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
-
/*
* Note: doorbell assignment is fixed for SRIOV multiple VCN engines
* Formula:
@@ -195,22 +196,32 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
- adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
- adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
- adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
+ adev->vcn.inst[i].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
- adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9);
- adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0);
- adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1);
- adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD);
- adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
+ adev->vcn.inst[i].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP);
/* VCN DEC TRAP */
@@ -224,7 +235,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_dec;
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev)) {
- ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1);
+ ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1);
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
}
@@ -236,7 +247,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j);
/* VCN ENC TRAP */
@@ -248,7 +259,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_enc[j];
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev)) {
- ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1 + j;
+ ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1) + 1 + j;
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
}
@@ -274,6 +285,9 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
}
if (amdgpu_sriov_vf(adev)) {
@@ -281,8 +295,6 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
/* Allocate memory for VCN IP Dump buffer */
ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
@@ -325,14 +337,18 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
- r = amdgpu_vcn_sw_fini(adev);
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -370,7 +386,7 @@ static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
ring->sched.ready = true;
}
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
ring = &adev->vcn.inst[i].ring_enc[j];
if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
ring->sched.ready = false;
@@ -398,7 +414,7 @@ static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
ring = &adev->vcn.inst[i].ring_enc[j];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -422,17 +438,19 @@ static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
- vcn_v3_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
}
}
@@ -449,15 +467,20 @@ static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v3_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
- return r;
+ return 0;
}
/**
@@ -469,11 +492,14 @@ static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v3_0_hw_init(ip_block);
@@ -483,13 +509,14 @@ static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v3_0_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst].fw->size + 4);
uint32_t offset;
@@ -538,8 +565,11 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
}
-static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
uint32_t offset;
@@ -634,8 +664,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
-static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -685,8 +717,10 @@ static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int
WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
}
-static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -733,13 +767,14 @@ static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int
/**
* vcn_v3_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: Pointer to the VCN instance structure
*
* Disable clock gating for VCN block
*/
-static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
/* VCN disable CGC */
@@ -866,9 +901,12 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
- uint8_t sram_sel, int inst_idx, uint8_t indirect)
+static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
/* enable sw clock gating control */
@@ -917,13 +955,14 @@ static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v3_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: Pointer to the VCN instance structure
*
* Enable clock gating for VCN block
*/
-static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v3_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
/* enable VCN CGC */
@@ -982,8 +1021,10 @@ static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
}
-static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
@@ -1001,7 +1042,7 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v3_0_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v3_0_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -1050,7 +1091,7 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v3_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v3_0_mc_resume_dpg_mode(vinst, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
@@ -1134,192 +1175,188 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
return 0;
}
-static int vcn_v3_0_start(struct amdgpu_device *adev)
+static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v3_0_start_dpg_mode(vinst, vinst->indirect_sram);
- /* disable VCN power gating */
- vcn_v3_0_disable_static_power_gating(adev, i);
+ /* disable VCN power gating */
+ vcn_v3_0_disable_static_power_gating(vinst);
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
- /*SW clock gating */
- vcn_v3_0_disable_clock_gating(adev, i);
+ /* SW clock gating */
+ vcn_v3_0_disable_clock_gating(vinst);
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
-
- /* setup mmUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
- WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup mmUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup mmUVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
-
- vcn_v3_0_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
- for (j = 0; j < 10; ++j) {
- uint32_t status;
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
- DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
+ /* setup mmUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup mmUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup mmUVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v3_0_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
+ if (status & 2)
+ break;
mdelay(10);
- r = -1;
}
+ r = 0;
+ if (status & 2)
+ break;
- if (r) {
- DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
- return r;
- }
+ DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ mdelay(10);
+ r = -1;
+ }
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ if (r) {
+ DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
+ return r;
+ }
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- ring = &adev->vcn.inst[i].ring_dec;
- /* force RBC into idle state */
- rb_bufsz = order_base_2(ring->ring_size);
- tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
- /* programm the RB_BASE for ring buffer */
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
- upper_32_bits(ring->gpu_addr));
+ ring = &adev->vcn.inst[i].ring_dec;
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
- /* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
- WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
- ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
- WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
- lower_32_bits(ring->wptr));
- fw_shared->rb.wptr = lower_32_bits(ring->wptr);
- fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
-
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
- IP_VERSION(3, 0, 33)) {
- fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
- fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
-
- fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
- ring = &adev->vcn.inst[i].ring_enc[1];
- WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
- WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
- fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
- }
+ /* programm the RB_BASE for ring buffer */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
+
+ WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
+ ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ fw_shared->rb.wptr = lower_32_bits(ring->wptr);
+ fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
+ IP_VERSION(3, 0, 33)) {
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
+ ring = &adev->vcn.inst[i].ring_enc[1];
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
}
return 0;
@@ -1434,7 +1471,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_VCN_CONTEXT_SIZE);
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
ring = &adev->vcn.inst[i].ring_enc[j];
ring->wptr = 0;
rb_addr = ring->gpu_addr;
@@ -1534,12 +1571,14 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
return 0;
}
-static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static int vcn_v3_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
- vcn_v3_0_pause_dpg_mode(adev, inst_idx, &state);
+ vcn_v3_0_pause_dpg_mode(vinst, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
@@ -1565,86 +1604,87 @@ static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
return 0;
}
-static int vcn_v3_0_stop(struct amdgpu_device *adev)
+static int vcn_v3_0_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
uint32_t tmp;
- int i, r = 0;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ int r = 0;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v3_0_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v3_0_stop_dpg_mode(vinst);
+ goto done;
+ }
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
- /* clear status */
- WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
+ /* clear status */
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
- /* apply HW clock gating */
- vcn_v3_0_enable_clock_gating(adev, i);
+ /* apply HW clock gating */
+ vcn_v3_0_enable_clock_gating(vinst);
- /* enable VCN power gating */
- vcn_v3_0_enable_static_power_gating(adev, i);
- }
+ /* enable VCN power gating */
+ vcn_v3_0_enable_static_power_gating(vinst);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
- return 0;
+ return r;
}
-static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state)
+static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
@@ -1928,11 +1968,11 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
uint32_t reg = amdgpu_ib_get_value(ib, i);
uint32_t val = amdgpu_ib_get_value(ib, i + 1);
- if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+ if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data0, 0)) {
msg_lo = val;
- } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+ } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data1, 0)) {
msg_hi = val;
- } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
+ } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.cmd, 0) &&
val == 0) {
r = vcn_v3_0_dec_msg(p, job,
((u64)msg_hi) << 32 | msg_lo);
@@ -2096,16 +2136,16 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
adev->vcn.inst[i].ring_enc[j].me = i;
}
}
}
-static bool vcn_v3_0_is_idle(void *handle)
+static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -2144,46 +2184,47 @@ static int vcn_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
if (adev->vcn.harvest_config & (1 << i))
continue;
if (enable) {
if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v3_0_enable_clock_gating(adev, i);
+ vcn_v3_0_enable_clock_gating(vinst);
} else {
- vcn_v3_0_disable_clock_gating(adev, i);
+ vcn_v3_0_disable_clock_gating(vinst);
}
}
return 0;
}
-static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ struct amdgpu_device *adev = vinst->adev;
+ int ret = 0;
/* for SRIOV, guest should not control VCN Power-gating
* MMSCH FW should control Power-gating and clock-gating
* guest should avoid touching CGC and PG
*/
if (amdgpu_sriov_vf(adev)) {
- adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v3_0_stop(adev);
+ ret = vcn_v3_0_stop(vinst);
else
- ret = vcn_v3_0_start(adev);
+ ret = vcn_v3_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -2248,7 +2289,7 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs;
}
}
@@ -2326,7 +2367,7 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.is_idle = vcn_v3_0_is_idle,
.wait_for_idle = vcn_v3_0_wait_for_idle,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
- .set_powergating_state = vcn_v3_0_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v3_0_dump_ip_state,
.print_ip_state = vcn_v3_0_print_ip_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 00551d6f0370..c6f6392c1c20 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -96,10 +96,10 @@ static int amdgpu_ih_clientid_vcns[] = {
static int vcn_v4_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v4_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v4_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
@@ -114,7 +114,7 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- int i;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
@@ -126,14 +126,23 @@ static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
}
}
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v4_0_set_unified_ring_funcs(adev);
vcn_v4_0_set_irq_funcs(adev);
vcn_v4_0_set_ras_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v4_0_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
@@ -176,20 +185,20 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
uint32_t *ptr;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
/* Init instance 0 sched_score to 1, so it's scheduled after other instances */
if (i == 0)
atomic_set(&adev->vcn.inst[i].sched_score, 1);
@@ -211,7 +220,8 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev))
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i *
+ (adev->vcn.inst[i].num_enc_rings + 1) + 1;
else
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
ring->vm_hub = AMDGPU_MMHUB0(0);
@@ -223,6 +233,9 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
vcn_v4_0_fw_shared_init(adev, i);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
}
/* TODO: Add queue reset mask when FW fully supports it */
@@ -235,8 +248,6 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
r = amdgpu_vcn_ras_sw_init(adev);
if (r)
@@ -288,16 +299,23 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- r = amdgpu_vcn_sw_fini(adev);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -359,20 +377,23 @@ static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v4_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
- amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
+ amdgpu_irq_put(adev, &vinst->ras_poison_irq, 0);
}
return 0;
@@ -387,15 +408,20 @@ static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v4_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
- return r;
+ return 0;
}
/**
@@ -407,11 +433,14 @@ static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v4_0_hw_init(ip_block);
@@ -421,13 +450,14 @@ static int vcn_v4_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v4_0_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -481,14 +511,16 @@ static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
/**
* vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
@@ -588,13 +620,14 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
/**
* vcn_v4_0_disable_static_power_gating - disable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable static power gating for VCN block
*/
-static void vcn_v4_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -653,13 +686,14 @@ static void vcn_v4_0_disable_static_power_gating(struct amdgpu_device *adev, int
/**
* vcn_v4_0_enable_static_power_gating - enable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable static power gating for VCN block
*/
-static void vcn_v4_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -708,13 +742,14 @@ static void vcn_v4_0_enable_static_power_gating(struct amdgpu_device *adev, int
/**
* vcn_v4_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v4_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -819,16 +854,18 @@ static void vcn_v4_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
/**
* vcn_v4_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
* @sram_sel: sram select
- * @inst_idx: instance number index
* @indirect: indirectly write sram
*
* Disable clock gating for VCN block with dpg mode
*/
-static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
- int inst_idx, uint8_t indirect)
+static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -876,13 +913,14 @@ static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, u
/**
* vcn_v4_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v4_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -932,9 +970,11 @@ static void vcn_v4_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data);
}
-static void vcn_v4_0_enable_ras(struct amdgpu_device *adev, int inst_idx,
+static void vcn_v4_0_enable_ras(struct amdgpu_vcn_inst *vinst,
bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
@@ -957,14 +997,15 @@ static void vcn_v4_0_enable_ras(struct amdgpu_device *adev, int inst_idx,
/**
* vcn_v4_0_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
@@ -982,7 +1023,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v4_0_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v4_0_disable_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -1030,7 +1071,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v4_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v4_0_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -1042,7 +1083,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
- vcn_v4_0_enable_ras(adev, inst_idx, indirect);
+ vcn_v4_0_enable_ras(vinst, indirect);
/* enable master interrupt */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
@@ -1086,183 +1127,179 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
/**
* vcn_v4_0_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v4_0_start(struct amdgpu_device *adev)
+static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v4_0_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- /* disable VCN power gating */
- vcn_v4_0_disable_static_power_gating(adev, i);
-
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
-
- /*SW clock gating */
- vcn_v4_0_disable_clock_gating(adev, i);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup regUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
-
- vcn_v4_0_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (j = 0; j < 10; ++j) {
- uint32_t status;
-
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, i, regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- if (amdgpu_emu_mode == 1)
- msleep(1);
- }
+ /* disable VCN power gating */
+ vcn_v4_0_disable_static_power_gating(vinst);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
+
+ /*SW clock gating */
+ vcn_v4_0_disable_clock_gating(vinst);
- if (amdgpu_emu_mode == 1) {
- r = -1;
- if (status & 2) {
- r = 0;
- break;
- }
- } else {
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup regUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v4_0_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ if (amdgpu_emu_mode == 1)
+ msleep(1);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
r = 0;
- if (status & 2)
- break;
-
- dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- mdelay(10);
- r = -1;
+ break;
}
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
}
+ }
- if (r) {
- dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
- return r;
- }
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
- ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
- WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
- ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
- }
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
return 0;
}
@@ -1518,17 +1555,18 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
/**
* vcn_v4_0_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
- vcn_v4_0_pause_dpg_mode(adev, inst_idx, &state);
+ vcn_v4_0_pause_dpg_mode(vinst, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -1548,87 +1586,87 @@ static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v4_0_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v4_0_stop(struct amdgpu_device *adev)
+static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
- int i, r = 0;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ int r = 0;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v4_0_stop_dpg_mode(adev, i);
- continue;
- }
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v4_0_stop_dpg_mode(vinst);
+ r = 0;
+ goto done;
+ }
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- /* clear status */
- WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
+ /* clear status */
+ WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
- /* apply HW clock gating */
- vcn_v4_0_enable_clock_gating(adev, i);
+ /* apply HW clock gating */
+ vcn_v4_0_enable_clock_gating(vinst);
- /* enable VCN power gating */
- vcn_v4_0_enable_static_power_gating(adev, i);
- }
+ /* enable VCN power gating */
+ vcn_v4_0_enable_static_power_gating(vinst);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
return 0;
}
@@ -1636,15 +1674,16 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
/**
* vcn_v4_0_pause_dpg_mode - VCN pause with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @new_state: pause state
*
* Pause dpg mode for VCN block
*/
-static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
- struct dpg_pause_state *new_state)
+static int vcn_v4_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
int ret_code;
@@ -1964,13 +2003,13 @@ static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v4_0_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v4_0_is_idle(void *handle)
+static bool vcn_v4_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -2024,54 +2063,48 @@ static int vcn_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
if (enable) {
if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v4_0_enable_clock_gating(adev, i);
+ vcn_v4_0_enable_clock_gating(vinst);
} else {
- vcn_v4_0_disable_clock_gating(adev, i);
+ vcn_v4_0_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v4_0_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: amdgpu_ip_block pointer
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v4_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ struct amdgpu_device *adev = vinst->adev;
+ int ret = 0;
/* for SRIOV, guest should not control VCN Power-gating
* MMSCH FW should control Power-gating and clock-gating
* guest should avoid touching CGC and PG
*/
if (amdgpu_sriov_vf(adev)) {
- adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v4_0_stop(adev);
+ ret = vcn_v4_0_stop(vinst);
else
- ret = vcn_v4_0_start(adev);
+ ret = vcn_v4_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -2163,10 +2196,10 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs;
- adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs;
}
}
@@ -2244,7 +2277,7 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.is_idle = vcn_v4_0_is_idle,
.wait_for_idle = vcn_v4_0_wait_for_idle,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
- .set_powergating_state = vcn_v4_0_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v4_0_dump_ip_state,
.print_ip_state = vcn_v4_0_print_ip_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index ecdc027f8220..7446ecc55714 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -31,6 +31,7 @@
#include "soc15d.h"
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
+#include "vcn_v4_0_3.h"
#include "mmsch_v4_0_3.h"
#include "vcn/vcn_4_0_3_offset.h"
@@ -87,10 +88,10 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v4_0_3_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
@@ -98,8 +99,7 @@ static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
{
- return (amdgpu_sriov_vf(adev) ||
- (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)));
+ return (adev->vcn.caps & AMDGPU_VCN_CAPS(RRMT_ENABLED)) == 0;
}
/**
@@ -112,15 +112,25 @@ static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v4_0_3_set_unified_ring_funcs(adev);
vcn_v4_0_3_set_irq_funcs(adev);
vcn_v4_0_3_set_ras_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v4_0_3_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
@@ -152,16 +162,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
uint32_t *ptr;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
-
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
@@ -169,6 +169,17 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@@ -192,6 +203,9 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
return r;
vcn_v4_0_3_fw_shared_init(adev, i);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
}
/* TODO: Add queue reset mask when FW fully supports it */
@@ -204,9 +218,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
-
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
r = amdgpu_vcn_ras_sw_init(adev);
if (r) {
@@ -257,16 +268,23 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- r = amdgpu_vcn_sw_fini(adev);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -295,6 +313,11 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
ring->sched.ready = true;
}
} else {
+ /* This flag is not set for VF, assumed to be disabled always */
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) &
+ 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
+
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -345,11 +368,16 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
- if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
- vcn_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ cancel_delayed_work_sync(&vinst->idle_work);
+
+ if (vinst->cur_state != AMD_PG_STATE_GATE)
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
+ }
return 0;
}
@@ -363,15 +391,20 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v4_0_3_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
- return r;
+ return 0;
}
/**
@@ -383,11 +416,14 @@ static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v4_0_3_hw_init(ip_block);
@@ -397,13 +433,14 @@ static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v4_0_3_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_3_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr;
@@ -471,14 +508,16 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v4_0_3_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -585,13 +624,14 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/**
* vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t data;
int vcn_inst;
@@ -678,16 +718,18 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst
/**
* vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
* @sram_sel: sram select
- * @inst_idx: instance number index
* @indirect: indirectly write sram
*
* Disable clock gating for VCN block with dpg mode
*/
-static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
- int inst_idx, uint8_t indirect)
+static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -729,13 +771,14 @@ static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v4_0_3_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t data;
int vcn_inst;
@@ -780,14 +823,16 @@ static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_
/**
* vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
@@ -815,7 +860,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
}
/* enable clock gating */
- vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v4_0_3_disable_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -865,7 +910,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v4_0_3_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -1112,191 +1157,185 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
/**
* vcn_v4_0_3_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v4_0_3_start(struct amdgpu_device *adev)
+static int vcn_v4_0_3_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
- int i, j, k, r, vcn_inst;
+ int j, k, r, vcn_inst;
uint32_t tmp;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ vcn_inst = GET_INST(VCN, i);
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
+ UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
- vcn_inst = GET_INST(VCN, i);
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
- UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
-
- /*SW clock gating */
- vcn_v4_0_3_disable_clock_gating(adev, i);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK,
- ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
- tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup regUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
-
- vcn_v4_0_3_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
- WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* SW clock gating */
+ vcn_v4_0_3_disable_clock_gating(vinst);
- for (j = 0; j < 10; ++j) {
- uint32_t status;
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK,
+ ~UVD_VCPU_CNTL__CLK_EN_MASK);
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, vcn_inst,
- regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- DRM_DEV_ERROR(adev->dev,
- "VCN decode not responding, trying to reset the VCPU!!!\n");
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
- regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
- regUVD_VCPU_CNTL),
- 0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
+ tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup regUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v4_0_3_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, vcn_inst,
+ regUVD_STATUS);
+ if (status & 2)
+ break;
mdelay(10);
- r = -1;
}
+ r = 0;
+ if (status & 2)
+ break;
- if (r) {
- DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
- return r;
- }
+ DRM_DEV_ERROR(adev->dev,
+ "VCN decode not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
+ regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
+ regUVD_VCPU_CNTL),
+ 0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ mdelay(10);
+ r = -1;
+ }
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
+ return r;
+ }
- ring = &adev->vcn.inst[i].ring_enc[0];
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* program the RB_BASE for ring buffer */
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
- upper_32_bits(ring->gpu_addr));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
- ring->ring_size / sizeof(uint32_t));
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- /* resetting ring, fw should not check RB ring */
- tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ /* program the RB_BASE for ring buffer */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
+ upper_32_bits(ring->gpu_addr));
+
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
+ ring->ring_size / sizeof(uint32_t));
+
+ /* resetting ring, fw should not check RB ring */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- /* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
- tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB_EN_MASK;
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
- fw_shared->sq.queue_mode &=
- cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+ fw_shared->sq.queue_mode &=
+ cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
- }
return 0;
}
/**
* vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
int vcn_inst;
@@ -1322,102 +1361,97 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v4_0_3_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
+static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
- int i, r = 0, vcn_inst;
+ int r = 0, vcn_inst;
uint32_t tmp;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
+ vcn_inst = GET_INST(VCN, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v4_0_3_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v4_0_3_stop_dpg_mode(vinst);
+ goto Done;
+ }
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
- UVD_STATUS__IDLE, 0x7);
- if (r)
- goto Done;
-
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
- tmp);
- if (r)
- goto Done;
-
- /* stall UMC channel */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
- tmp);
- if (r)
- goto Done;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
+ UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto Done;
+
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
+ tmp);
+ if (r)
+ goto Done;
+
+ /* stall UMC channel */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
+ tmp);
+ if (r)
+ goto Done;
- /* Unblock VCPU Register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* Unblock VCPU Register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- /* reset LMI UMC/LMI/VCPU */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+ /* reset LMI UMC/LMI/VCPU */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
- /* clear VCN status */
- WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
+ /* clear VCN status */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
- /* apply HW clock gating */
- vcn_v4_0_3_enable_clock_gating(adev, i);
- }
-Done:
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+ /* apply HW clock gating */
+ vcn_v4_0_3_enable_clock_gating(vinst);
+Done:
return 0;
}
/**
* vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @new_state: pause state
*
* Pause dpg mode for VCN block
*/
-static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
- struct dpg_pause_state *new_state)
+static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
return 0;
@@ -1461,8 +1495,8 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
regUVD_RB_WPTR);
}
-static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
+void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
{
/* Use normalized offsets when required */
if (vcn_v4_0_3_normalizn_reqd(ring->adev))
@@ -1474,7 +1508,8 @@ static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t
amdgpu_ring_write(ring, val);
}
-static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
+void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val)
{
/* Use normalized offsets when required */
if (vcn_v4_0_3_normalizn_reqd(ring->adev))
@@ -1485,8 +1520,8 @@ static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg
amdgpu_ring_write(ring, val);
}
-static void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vmid, uint64_t pd_addr)
+void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
@@ -1498,7 +1533,7 @@ static void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
lower_32_bits(pd_addr), 0xffffffff);
}
-static void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
/* VCN engine access for HDP flush doesn't work when RRMT is enabled.
* This is a workaround to avoid any HDP flush through VCN ring.
@@ -1581,13 +1616,13 @@ static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v4_0_3_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v4_0_3_is_idle(void *handle)
+static bool vcn_v4_0_3_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1635,51 +1670,45 @@ static int vcn_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (enable) {
if (RREG32_SOC15(VCN, GET_INST(VCN, i),
regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v4_0_3_enable_clock_gating(adev, i);
+ vcn_v4_0_3_enable_clock_gating(vinst);
} else {
- vcn_v4_0_3_disable_clock_gating(adev, i);
+ vcn_v4_0_3_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v4_0_3_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: amdgpu_ip_block pointer
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v4_0_3_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ struct amdgpu_device *adev = vinst->adev;
+ int ret = 0;
/* for SRIOV, guest should not control VCN Power-gating
* MMSCH FW should control Power-gating and clock-gating
* guest should avoid touching CGC and PG
*/
if (amdgpu_sriov_vf(adev)) {
- adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v4_0_3_stop(adev);
+ ret = vcn_v4_0_3_stop(vinst);
else
- ret = vcn_v4_0_3_start(adev);
+ ret = vcn_v4_0_3_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -1841,7 +1870,7 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.is_idle = vcn_v4_0_3_is_idle,
.wait_for_idle = vcn_v4_0_3_wait_for_idle,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
- .set_powergating_state = vcn_v4_0_3_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v4_0_3_dump_ip_state,
.print_ip_state = vcn_v4_0_3_print_ip_state,
};
@@ -1931,11 +1960,13 @@ static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank
misc0 = bank->regs[ACA_REG_IDX_MISC0];
switch (type) {
case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1ULL);
break;
case ACA_SMU_TYPE_CE:
- ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
index 0b046114373a..03572a1d0c9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
@@ -26,4 +26,13 @@
extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block;
+void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask);
+
+void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val);
+void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr);
+void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring);
+
#endif /* __VCN_V4_0_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 23d3c16c9d9f..ba603b2246e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -95,10 +95,10 @@ static int amdgpu_ih_clientid_vcns[] = {
static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v4_0_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
@@ -112,13 +112,26 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v4_0_5_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
+
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6))
+ adev->vcn.per_inst_fw = true;
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v4_0_5_set_unified_ring_funcs(adev);
vcn_v4_0_5_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v4_0_5_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
/**
@@ -136,15 +149,6 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
uint32_t *ptr;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -152,6 +156,16 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->vcn.harvest_config & (1 << i))
continue;
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
atomic_set(&adev->vcn.inst[i].sched_score, 0);
/* VCN UNIFIED TRAP */
@@ -170,7 +184,7 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev))
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- i * (adev->vcn.num_enc_rings + 1) + 1;
+ i * (adev->vcn.inst[i].num_enc_rings + 1) + 1;
else
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
2 + 8 * i;
@@ -195,6 +209,9 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
}
if (amdgpu_sriov_vf(adev)) {
@@ -203,9 +220,6 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
-
/* Allocate memory for VCN IP Dump buffer */
ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
if (!ptr) {
@@ -247,15 +261,19 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
- r = amdgpu_vcn_sw_fini(adev);
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -300,16 +318,19 @@ static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v4_0_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, regUVD_STATUS))) {
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
}
}
@@ -326,13 +347,18 @@ static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v4_0_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
return r;
}
@@ -346,11 +372,14 @@ static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v4_0_5_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v4_0_5_hw_init(ip_block);
@@ -360,13 +389,14 @@ static int vcn_v4_0_5_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v4_0_5_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -420,14 +450,16 @@ static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
/**
* vcn_v4_0_5_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -534,13 +566,14 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/**
* vcn_v4_0_5_disable_static_power_gating - disable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable static power gating for VCN block
*/
-static void vcn_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -593,13 +626,14 @@ static void vcn_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, i
/**
* vcn_v4_0_5_enable_static_power_gating - enable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable static power gating for VCN block
*/
-static void vcn_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -635,13 +669,14 @@ static void vcn_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, in
/**
* vcn_v4_0_5_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -746,16 +781,18 @@ static void vcn_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst
/**
* vcn_v4_0_5_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
* @sram_sel: sram select
- * @inst_idx: instance number index
* @indirect: indirectly write sram
*
* Disable clock gating for VCN block with dpg mode
*/
-static void vcn_v4_0_5_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
- int inst_idx, uint8_t indirect)
+static void vcn_v4_0_5_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -803,13 +840,14 @@ static void vcn_v4_0_5_disable_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v4_0_5_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v4_0_5_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -862,14 +900,16 @@ static void vcn_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
/**
* vcn_v4_0_5_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
@@ -888,7 +928,7 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
(uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
/* enable clock gating */
- vcn_v4_0_5_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+ vcn_v4_0_5_disable_clock_gating_dpg_mode(vinst, 0, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -936,7 +976,7 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
- vcn_v4_0_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v4_0_5_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -989,184 +1029,180 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
/**
* vcn_v4_0_5_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v4_0_5_start(struct amdgpu_device *adev)
+static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v4_0_5_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- /* disable VCN power gating */
- vcn_v4_0_5_disable_static_power_gating(adev, i);
-
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
-
- /*SW clock gating */
- vcn_v4_0_5_disable_clock_gating(adev, i);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup regUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
-
- vcn_v4_0_5_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (j = 0; j < 10; ++j) {
- uint32_t status;
-
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, i, regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- if (amdgpu_emu_mode == 1)
- msleep(1);
- }
+ /* disable VCN power gating */
+ vcn_v4_0_5_disable_static_power_gating(vinst);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
- if (amdgpu_emu_mode == 1) {
- r = -1;
- if (status & 2) {
- r = 0;
- break;
- }
- } else {
+ /* SW clock gating */
+ vcn_v4_0_5_disable_clock_gating(vinst);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup regUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v4_0_5_mc_resume(vinst);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ if (amdgpu_emu_mode == 1)
+ msleep(1);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
r = 0;
- if (status & 2)
- break;
-
- dev_err(adev->dev,
- "VCN[%d] is not responding, trying to reset VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- mdelay(10);
- r = -1;
+ break;
}
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
}
+ }
- if (r) {
- dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
- return r;
- }
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
- ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
- WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
- ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
- }
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
return 0;
}
@@ -1174,13 +1210,14 @@ static int vcn_v4_0_5_start(struct amdgpu_device *adev)
/**
* vcn_v4_0_5_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
/* Wait for power status to be 1 */
@@ -1202,103 +1239,104 @@ static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v4_0_5_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v4_0_5_stop(struct amdgpu_device *adev)
+static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
- int i, r = 0;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ int r = 0;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v4_0_5_stop_dpg_mode(adev, i);
- continue;
- }
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v4_0_5_stop_dpg_mode(vinst);
+ r = 0;
+ goto done;
+ }
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- /* clear status */
- WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
+ /* clear status */
+ WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
- /* apply HW clock gating */
- vcn_v4_0_5_enable_clock_gating(adev, i);
+ /* apply HW clock gating */
+ vcn_v4_0_5_enable_clock_gating(vinst);
- /* enable VCN power gating */
- vcn_v4_0_5_enable_static_power_gating(adev, i);
- }
+ /* enable VCN power gating */
+ vcn_v4_0_5_enable_static_power_gating(vinst);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
- return 0;
+ return r;
}
/**
* vcn_v4_0_5_pause_dpg_mode - VCN pause with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @new_state: pause state
*
* Pause dpg mode for VCN block
*/
-static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
- struct dpg_pause_state *new_state)
+static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
int ret_code;
@@ -1397,7 +1435,7 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
+static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
@@ -1441,6 +1479,9 @@ static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
+ if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5))
+ vcn_v4_0_5_unified_ring_vm_funcs.secure_submission_supported = true;
+
adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_5_unified_ring_vm_funcs;
adev->vcn.inst[i].ring_enc[0].me = i;
}
@@ -1449,13 +1490,13 @@ static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v4_0_5_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v4_0_5_is_idle(void *handle)
+static bool vcn_v4_0_5_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1509,45 +1550,38 @@ static int vcn_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
if (enable) {
if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v4_0_5_enable_clock_gating(adev, i);
+ vcn_v4_0_5_enable_clock_gating(vinst);
} else {
- vcn_v4_0_5_disable_clock_gating(adev, i);
+ vcn_v4_0_5_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v4_0_5_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: amdgpu_ip_block pointer
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v4_0_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ int ret = 0;
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v4_0_5_stop(adev);
+ ret = vcn_v4_0_5_stop(vinst);
else
- ret = vcn_v4_0_5_start(adev);
+ ret = vcn_v4_0_5_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -1615,7 +1649,7 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v4_0_5_irq_funcs;
}
}
@@ -1693,7 +1727,7 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.is_idle = vcn_v4_0_5_is_idle,
.wait_for_idle = vcn_v4_0_5_wait_for_idle,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
- .set_powergating_state = vcn_v4_0_5_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v4_0_5_dump_ip_state,
.print_ip_state = vcn_v4_0_5_print_ip_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index b6d78381ebfb..d99d05f42f1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -78,10 +78,10 @@ static int amdgpu_ih_clientid_vcns[] = {
static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
-static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev,
- int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
@@ -95,14 +95,24 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v5_0_0_set_unified_ring_funcs(adev);
vcn_v5_0_0_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v5_0_0_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev)
@@ -133,22 +143,22 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
atomic_set(&adev->vcn.inst[i].sched_score, 0);
/* VCN UNIFIED TRAP */
@@ -181,15 +191,15 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.inst[i].pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
}
/* TODO: Add queue reset mask when FW fully supports it */
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
-
vcn_v5_0_0_alloc_ip_dump(adev);
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
@@ -226,16 +236,23 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
drm_dev_exit(idx);
}
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- r = amdgpu_vcn_sw_fini(adev);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -280,16 +297,19 @@ static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ cancel_delayed_work_sync(&vinst->idle_work);
+
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v5_0_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ (vinst->cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, regUVD_STATUS))) {
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
}
}
@@ -306,13 +326,18 @@ static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
r = vcn_v5_0_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(ip_block->adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
return r;
}
@@ -326,11 +351,14 @@ static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
*/
static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
{
- int r;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r, i;
- r = amdgpu_vcn_resume(ip_block->adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v5_0_0_hw_init(ip_block);
@@ -340,13 +368,14 @@ static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v5_0_0_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -400,14 +429,16 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
/**
* vcn_v5_0_0_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -510,13 +541,14 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/**
* vcn_v5_0_0_disable_static_power_gating - disable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable static power gating for VCN block
*/
-static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -577,13 +609,14 @@ static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev, i
/**
* vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable static power gating for VCN block
*/
-static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t data;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
@@ -623,12 +656,11 @@ static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev, in
/**
* vcn_v5_0_0_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
return;
}
@@ -637,15 +669,15 @@ static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_device *adev, int inst
/**
* vcn_v5_0_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
* @sram_sel: sram select
- * @inst_idx: instance number index
* @indirect: indirectly write sram
*
* Disable clock gating for VCN block with dpg mode
*/
-static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
- int inst_idx, uint8_t indirect)
+static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ uint8_t sram_sel,
+ uint8_t indirect)
{
return;
}
@@ -654,12 +686,11 @@ static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev,
/**
* vcn_v5_0_0_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
return;
}
@@ -667,14 +698,16 @@ static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
/**
* vcn_v5_0_0_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
@@ -714,7 +747,7 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
- vcn_v5_0_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v5_0_0_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -766,155 +799,151 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
/**
* vcn_v5_0_0_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v5_0_0_start(struct amdgpu_device *adev)
+static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v5_0_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v5_0_0_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- /* disable VCN power gating */
- vcn_v5_0_0_disable_static_power_gating(adev, i);
-
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- vcn_v5_0_0_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (j = 0; j < 10; ++j) {
- uint32_t status;
-
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, i, regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- if (amdgpu_emu_mode == 1)
- msleep(1);
- }
+ /* disable VCN power gating */
+ vcn_v5_0_0_disable_static_power_gating(vinst);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ vcn_v5_0_0_mc_resume(vinst);
- if (amdgpu_emu_mode == 1) {
- r = -1;
- if (status & 2) {
- r = 0;
- break;
- }
- } else {
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ if (amdgpu_emu_mode == 1)
+ msleep(1);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
r = 0;
- if (status & 2)
- break;
-
- dev_err(adev->dev,
- "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- mdelay(10);
- r = -1;
+ break;
}
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
}
+ }
- if (r) {
- dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
- return r;
- }
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
- ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
- WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
- ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
- }
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
return 0;
}
@@ -922,17 +951,18 @@ static int vcn_v5_0_0_start(struct amdgpu_device *adev)
/**
* vcn_v5_0_0_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
- vcn_v5_0_0_pause_dpg_mode(adev, inst_idx, &state);
+ vcn_v5_0_0_pause_dpg_mode(vinst, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
@@ -952,100 +982,101 @@ static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v5_0_0_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
+static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
volatile struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
- int i, r = 0;
+ int r = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v5_0_0_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v5_0_0_stop_dpg_mode(vinst);
+ r = 0;
+ goto done;
+ }
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
-
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
-
- /* clear status */
- WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
-
- /* enable VCN power gating */
- vcn_v5_0_0_enable_static_power_gating(adev, i);
- }
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- return 0;
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* clear status */
+ WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
+
+ /* enable VCN power gating */
+ vcn_v5_0_0_enable_static_power_gating(vinst);
+
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+
+ return r;
}
/**
* vcn_v5_0_0_pause_dpg_mode - VCN pause with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @new_state: pause state
*
* Pause dpg mode for VCN block
*/
-static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
- struct dpg_pause_state *new_state)
+static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t reg_data = 0;
int ret_code;
@@ -1192,13 +1223,13 @@ static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v5_0_0_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v5_0_0_is_idle(void *handle)
+static bool vcn_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1252,45 +1283,38 @@ static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (adev->vcn.harvest_config & (1 << i))
continue;
if (enable) {
if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v5_0_0_enable_clock_gating(adev, i);
+ vcn_v5_0_0_enable_clock_gating(vinst);
} else {
- vcn_v5_0_0_disable_clock_gating(adev, i);
+ vcn_v5_0_0_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v5_0_0_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: amdgpu_ip_block pointer
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ int ret = 0;
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v5_0_0_stop(adev);
+ ret = vcn_v5_0_0_stop(vinst);
else
- ret = vcn_v5_0_0_start(adev);
+ ret = vcn_v5_0_0_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -1358,7 +1382,7 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v5_0_0_irq_funcs;
}
}
@@ -1436,7 +1460,7 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.is_idle = vcn_v5_0_0_is_idle,
.wait_for_idle = vcn_v5_0_0_wait_for_idle,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
- .set_powergating_state = vcn_v5_0_0_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v5_0_0_dump_ip_state,
.print_ip_state = vcn_v5_0_0_print_ip_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index 8b463c977d08..581d8629b9d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -29,6 +29,7 @@
#include "soc15d.h"
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
+#include "vcn_v4_0_3.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
@@ -40,8 +41,8 @@
static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state);
+static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
@@ -55,14 +56,40 @@ static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
- /* re-use enc ring as unified ring */
- adev->vcn.num_enc_rings = 1;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ /* re-use enc ring as unified ring */
+ adev->vcn.inst[i].num_enc_rings = 1;
vcn_v5_0_1_set_unified_ring_funcs(adev);
vcn_v5_0_1_set_irq_funcs(adev);
- return amdgpu_vcn_early_init(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].set_pg_state = vcn_v5_0_1_set_pg_state;
+
+ r = amdgpu_vcn_early_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
+{
+ struct amdgpu_vcn5_fw_shared *fw_shared;
+
+ fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+
+ if (fw_shared->sq.is_enabled)
+ return;
+ fw_shared->present_flag_0 =
+ cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = 1;
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
}
/**
@@ -78,16 +105,6 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, r, vcn_inst;
- r = amdgpu_vcn_sw_init(adev);
- if (r)
- return r;
-
- amdgpu_vcn_setup_ucode(adev);
-
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
-
/* VCN UNIFIED TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
@@ -95,10 +112,18 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
-
vcn_inst = GET_INST(VCN, i);
+ r = amdgpu_vcn_sw_init(adev, i);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev, i);
+
+ r = amdgpu_vcn_resume(adev, i);
+ if (r)
+ return r;
+
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst;
@@ -111,12 +136,7 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
- fw_shared->sq.is_enabled = true;
-
- if (amdgpu_vcnfw_log)
- amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ vcn_v5_0_1_fw_shared_init(adev, i);
}
/* TODO: Add queue reset mask when FW fully supports it */
@@ -142,7 +162,7 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@@ -152,17 +172,23 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
drm_dev_exit(idx);
}
- r = amdgpu_vcn_suspend(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(adev, i);
+ if (r)
+ return r;
+ }
- r = amdgpu_vcn_sw_fini(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_sw_fini(adev, i);
+ if (r)
+ return r;
+ }
amdgpu_vcn_sysfs_reset_mask_fini(adev);
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -178,6 +204,8 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, r, vcn_inst;
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@@ -188,6 +216,9 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
9 * vcn_inst),
adev->vcn.inst[i].aid_id);
+ /* Re-init fw_shared, if required */
+ vcn_v5_0_1_fw_shared_init(adev, i);
+
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
@@ -206,8 +237,15 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
+ cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
+ if (vinst->cur_state != AMD_PG_STATE_GATE)
+ vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
+ }
return 0;
}
@@ -222,13 +260,17 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- int r;
+ int r, i;
r = vcn_v5_0_1_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ r = amdgpu_vcn_suspend(ip_block->adev, i);
+ if (r)
+ return r;
+ }
return r;
}
@@ -243,11 +285,18 @@ static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- int r;
+ int r, i;
- r = amdgpu_vcn_resume(adev);
- if (r)
- return r;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
+ if (amdgpu_in_reset(adev))
+ vinst->cur_state = AMD_PG_STATE_GATE;
+
+ r = amdgpu_vcn_resume(ip_block->adev, i);
+ if (r)
+ return r;
+ }
r = vcn_v5_0_1_hw_init(ip_block);
@@ -257,13 +306,14 @@ static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
/**
* vcn_v5_0_1_mc_resume - memory controller programming
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v5_0_1_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_1_mc_resume(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst = vinst->inst;
uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr;
@@ -313,20 +363,22 @@ static void vcn_v5_0_1_mc_resume(struct amdgpu_device *adev, int inst)
upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
- AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
}
/**
* vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Let the VCN memory controller know it's offsets with dpg mode
*/
-static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t offset, size;
const struct common_firmware_header *hdr;
@@ -421,7 +473,7 @@ static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
- AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
@@ -431,39 +483,39 @@ static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/**
* vcn_v5_0_1_disable_clock_gating - disable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Disable clock gating for VCN block
*/
-static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
}
/**
* vcn_v5_0_1_enable_clock_gating - enable VCN clock gating
*
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
*
* Enable clock gating for VCN block
*/
-static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
}
/**
* vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
* @indirect: indirectly write sram
*
* Start VCN block with dpg mode
*/
-static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+ bool indirect)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared =
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
int vcn_inst;
@@ -510,7 +562,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
- vcn_v5_0_1_mc_resume_dpg_mode(adev, inst_idx, indirect);
+ vcn_v5_0_1_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -564,153 +616,148 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
/**
* vcn_v5_0_1_start - VCN start
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Start VCN block
*/
-static int vcn_v5_0_1_start(struct amdgpu_device *adev)
+static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
- int i, j, k, r, vcn_inst;
+ int j, k, r, vcn_inst;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v5_0_1_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v5_0_1_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ vcn_inst = GET_INST(VCN, i);
- vcn_inst = GET_INST(VCN, i);
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- vcn_v5_0_1_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (j = 0; j < 10; ++j) {
- uint32_t status;
-
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(100);
- if (amdgpu_emu_mode == 1)
- msleep(20);
- }
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ vcn_v5_0_1_mc_resume(vinst);
- if (amdgpu_emu_mode == 1) {
- r = -1;
- if (status & 2) {
- r = 0;
- break;
- }
- } else {
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(100);
+ if (amdgpu_emu_mode == 1)
+ msleep(20);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
r = 0;
- if (status & 2)
- break;
-
- dev_err(adev->dev,
- "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- mdelay(10);
- r = -1;
+ break;
}
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
}
+ }
- if (r) {
- dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
- return r;
- }
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- ring = &adev->vcn.inst[i].ring_enc[0];
+ ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
- ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
- /* Read DB_CTRL to flush the write DB_CTRL command. */
- RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
- tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
- WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
- ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
- tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
- WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
- }
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
return 0;
}
@@ -718,13 +765,14 @@ static int vcn_v5_0_1_start(struct amdgpu_device *adev)
/**
* vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode
*
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
*
* Stop VCN block with dpg mode
*/
-static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
+ struct amdgpu_device *adev = vinst->adev;
+ int inst_idx = vinst->inst;
uint32_t tmp;
int vcn_inst;
@@ -746,78 +794,75 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v5_0_1_stop - VCN stop
*
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
*
* Stop VCN block
*/
-static int vcn_v5_0_1_stop(struct amdgpu_device *adev)
+static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_device *adev = vinst->adev;
+ int i = vinst->inst;
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
- int i, r = 0, vcn_inst;
+ int r = 0, vcn_inst;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
+ vcn_inst = GET_INST(VCN, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v5_0_1_stop_dpg_mode(adev, i);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v5_0_1_stop_dpg_mode(vinst);
+ return 0;
+ }
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ return r;
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
-
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
-
- /* clear status */
- WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
- }
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ /* clear status */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
return 0;
}
@@ -889,16 +934,17 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.get_rptr = vcn_v5_0_1_unified_ring_get_rptr,
.get_wptr = vcn_v5_0_1_unified_ring_get_wptr,
.set_wptr = vcn_v5_0_1_unified_ring_set_wptr,
- .emit_frame_size =
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
- 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
- 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
- 1, /* vcn_v2_0_enc_ring_insert_end */
+ .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
+ 5 +
+ 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* vcn_v2_0_enc_ring_insert_end */
.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
.emit_ib = vcn_v2_0_enc_ring_emit_ib,
.emit_fence = vcn_v2_0_enc_ring_emit_fence,
- .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush,
+ .emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush,
.test_ring = amdgpu_vcn_enc_ring_test_ring,
.test_ib = amdgpu_vcn_unified_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@@ -906,8 +952,8 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vcn_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
- .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
- .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
@@ -933,13 +979,13 @@ static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev)
/**
* vcn_v5_0_1_is_idle - check VCN block is idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block structure
*
* Check whether VCN block is idle
*/
-static bool vcn_v5_0_1_is_idle(void *handle)
+static bool vcn_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
@@ -986,42 +1032,35 @@ static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
if (enable) {
if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE)
return -EBUSY;
- vcn_v5_0_1_enable_clock_gating(adev, i);
+ vcn_v5_0_1_enable_clock_gating(vinst);
} else {
- vcn_v5_0_1_disable_clock_gating(adev, i);
+ vcn_v5_0_1_disable_clock_gating(vinst);
}
}
return 0;
}
-/**
- * vcn_v5_0_1_set_powergating_state - set VCN block powergating state
- *
- * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
- * @state: power gating state
- *
- * Set VCN block powergating state
- */
-static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
+static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ int ret = 0;
- if (state == adev->vcn.cur_state)
+ if (state == vinst->cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
- ret = vcn_v5_0_1_stop(adev);
+ ret = vcn_v5_0_1_stop(vinst);
else
- ret = vcn_v5_0_1_start(adev);
+ ret = vcn_v5_0_1_start(vinst);
if (!ret)
- adev->vcn.cur_state = state;
+ vinst->cur_state = state;
return ret;
}
@@ -1104,7 +1143,7 @@ static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
.soft_reset = NULL,
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
- .set_powergating_state = vcn_v5_0_1_set_powergating_state,
+ .set_powergating_state = vcn_set_powergating_state,
.dump_ip_state = vcn_v5_0_0_dump_ip_state,
.print_ip_state = vcn_v5_0_0_print_ip_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
index 82ac709f44bf..8fd90bd10807 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
@@ -24,6 +24,9 @@
#ifndef __VCN_v5_0_1_H__
#define __VCN_v5_0_1_H__
+#define regVCN_RRMT_CNTL 0x0940
+#define regVCN_RRMT_CNTL_BASE_IDX 1
+
extern const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block;
#endif /* __VCN_v5_0_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 98fc6941159e..eb16916c6473 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -555,7 +555,7 @@ static int vega10_ih_resume(struct amdgpu_ip_block *ip_block)
return vega10_ih_hw_init(ip_block);
}
-static bool vega10_ih_is_idle(void *handle)
+static bool vega10_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h
index 8de4ccce5e38..3ca8a417c6d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h
@@ -64,6 +64,9 @@
#define HEADER_BARRIER 5
#define SDMA_OP_AQL_COPY 0
#define SDMA_OP_AQL_BARRIER_OR 0
+/* vm invalidation is only available for GC9.4.3/GC9.4.4/GC9.5.0 */
+#define SDMA_OP_VM_INVALIDATE 8
+#define SDMA_SUBOP_VM_INVALIDATE 4
/*define for op field*/
#define SDMA_PKT_HEADER_op_offset 0
@@ -3331,5 +3334,72 @@
#define SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_shift 0
#define SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_COMPLETION_SIGNAL_63_32(x) (((x) & SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_mask) << SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_shift)
+/*
+** Definitions for SDMA_PKT_VM_INVALIDATION packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_VM_INVALIDATION_HEADER_op_offset 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_op_mask 0x000000FF
+#define SDMA_PKT_VM_INVALIDATION_HEADER_op_shift 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_OP(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_offset 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask 0x000000FF
+#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift 8
+#define SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift)
+
+/*define for xcc0_eng_id field*/
+#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_offset 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_mask 0x0000001F
+#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_shift 16
+#define SDMA_PKT_VM_INVALIDATION_HEADER_XCC0_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_shift)
+
+/*define for xcc1_eng_id field*/
+#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_offset 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_mask 0x0000001F
+#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_shift 21
+#define SDMA_PKT_VM_INVALIDATION_HEADER_XCC1_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_shift)
+
+/*define for mmhub_eng_id field*/
+#define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_offset 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_mask 0x0000001F
+#define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_shift 26
+#define SDMA_PKT_VM_INVALIDATION_HEADER_MMHUB_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_shift)
+
+/*define for INVALIDATEREQ word*/
+/*define for invalidatereq field*/
+#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_offset 1
+#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask 0xFFFFFFFF
+#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift 0
+#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(x) ((x & SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask) << SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift)
+
+/*define for ADDRESSRANGELO word*/
+/*define for addressrangelo field*/
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_offset 2
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask 0xFFFFFFFF
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift 0
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_ADDRESSRANGELO(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift)
+
+/*define for ADDRESSRANGEHI word*/
+/*define for invalidateack field*/
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_offset 3
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask 0x0000FFFF
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift 0
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift)
+
+/*define for addressrangehi field*/
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_offset 3
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask 0x0000001F
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift 16
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift)
+
+/*define for reserved field*/
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_offset 3
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask 0x000001FF
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift 23
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_RESERVED(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift)
#endif /* __SDMA_PKT_OPEN_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index e9e3b2ed4b7b..faa0dd75dd6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -651,7 +651,7 @@ static int vega20_ih_resume(struct amdgpu_ip_block *ip_block)
return vega20_ih_hw_init(ip_block);
}
-static bool vega20_ih_is_idle(void *handle)
+static bool vega20_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 06615f160331..86d8bc10d90a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -167,16 +167,16 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 5,
},
{
@@ -188,9 +188,9 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 4,
},
};
@@ -206,16 +206,16 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 5,
},
{
@@ -227,9 +227,9 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 4,
},
{
@@ -239,13 +239,6 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
.max_pixels_per_frame = 4096 * 4096,
.max_level = 186,
},
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
};
static const struct amdgpu_video_codecs cz_video_codecs_decode =
@@ -1736,7 +1729,7 @@ static int vi_common_resume(struct amdgpu_ip_block *ip_block)
return vi_common_hw_init(ip_block);
}
-static bool vi_common_is_idle(void *handle)
+static bool vi_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
@@ -1994,9 +1987,9 @@ static int vi_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
-static void vi_common_get_clockgating_state(void *handle, u64 *flags)
+static void vi_common_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int data;
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 0d3d8972240d..0ce08113c9f0 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -27,7 +27,6 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \
$(AMDKFD_PATH)/kfd_device.o \
$(AMDKFD_PATH)/kfd_chardev.o \
$(AMDKFD_PATH)/kfd_topology.o \
- $(AMDKFD_PATH)/kfd_pasid.o \
$(AMDKFD_PATH)/kfd_doorbell.o \
$(AMDKFD_PATH)/kfd_flat_memory.o \
$(AMDKFD_PATH)/kfd_process.o \
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 795382b55e0a..981d9adcc5e1 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -107,20 +107,30 @@ static void cik_event_interrupt_wq(struct kfd_node *dev,
kfd_signal_hw_exception_event(pasid);
else if (ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
+ struct kfd_process_device *pdd = NULL;
struct kfd_vm_fault_info info;
+ struct kfd_process *p;
kfd_smi_event_update_vmfault(dev, pasid);
- kfd_dqm_evict_pasid(dev->dqm, pasid);
+ p = kfd_lookup_process_by_pasid(pasid, &pdd);
+ if (!pdd)
+ return;
+
+ kfd_evict_process_device(pdd);
memset(&info, 0, sizeof(info));
amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->adev, &info);
- if (!info.page_addr && !info.status)
+ if (!info.page_addr && !info.status) {
+ kfd_unref_process(p);
return;
+ }
if (info.vmid == vmid)
- kfd_signal_vm_fault_event(dev, pasid, &info, NULL);
+ kfd_signal_vm_fault_event(pdd, &info, NULL);
else
- kfd_signal_vm_fault_event(dev, pasid, NULL, NULL);
+ kfd_signal_vm_fault_event(pdd, &info, NULL);
+
+ kfd_unref_process(p);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 651660958e5b..0320163b6e74 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -3644,7 +3644,7 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
};
static const uint32_t cwsr_trap_gfx12_hex[] = {
- 0xbfa00001, 0xbfa0024b,
+ 0xbfa00001, 0xbfa002a2,
0xb0804009, 0xb8f8f804,
0x9178ff78, 0x00008c00,
0xb8fbf811, 0x8b6eff78,
@@ -3718,7 +3718,15 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00011677, 0xd7610000,
0x00011a79, 0xd7610000,
0x00011c7e, 0xd7610000,
- 0x00011e7f, 0xbefe00ff,
+ 0x00011e7f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xbefe00ff,
0x00003fff, 0xbeff0080,
0xee0a407a, 0x000c0000,
0x00004000, 0xd760007a,
@@ -3755,38 +3763,46 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00000200, 0xbef600ff,
0x01000000, 0x7e000280,
0x7e020280, 0x7e040280,
- 0xbefd0080, 0xbe804ec2,
- 0xbf94fffe, 0xb8faf804,
- 0x8b7a847a, 0x91788478,
- 0x8c787a78, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xd7610002, 0x0000fa6c,
- 0x807d817d, 0x917aff6d,
- 0x80000000, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xd7610002, 0x0000fa6e,
- 0x807d817d, 0xd7610002,
- 0x0000fa6f, 0x807d817d,
- 0xd7610002, 0x0000fa78,
- 0x807d817d, 0xb8faf811,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xd7610002,
- 0x0000fa7b, 0x807d817d,
- 0xb8f1f801, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f814, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f815, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f812, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f813, 0xd7610002,
- 0x0000fa71, 0x807d817d,
+ 0xbe804ec2, 0xbf94fffe,
+ 0xb8faf804, 0x8b7a847a,
+ 0x91788478, 0x8c787a78,
+ 0x917aff6d, 0x80000000,
+ 0xd7610002, 0x00010071,
+ 0xd7610002, 0x0001026c,
+ 0xd7610002, 0x0001047a,
+ 0xd7610002, 0x0001066e,
+ 0xd7610002, 0x0001086f,
+ 0xd7610002, 0x00010a78,
+ 0xd7610002, 0x00010e7b,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xb8faf811, 0xd7610002,
+ 0x00010c7a, 0xb8faf801,
+ 0xd7610002, 0x0001107a,
+ 0xb8faf814, 0xd7610002,
+ 0x0001127a, 0xb8faf815,
+ 0xd7610002, 0x0001147a,
+ 0xb8faf812, 0xd7610002,
+ 0x0001167a, 0xb8faf813,
+ 0xd7610002, 0x0001187a,
0xb8faf802, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xbefa50c1, 0xbfc70000,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xbefe00ff,
+ 0x00011a7a, 0xbefa50c1,
+ 0xbfc70000, 0xd7610002,
+ 0x00011c7a, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xbefe00ff,
0x0000ffff, 0xbeff0080,
0xc4068070, 0x008ce802,
0x00000000, 0xbefe00c1,
@@ -3801,331 +3817,358 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0xbe824102, 0xbe844104,
0xbe864106, 0xbe884108,
0xbe8a410a, 0xbe8c410c,
- 0xbe8e410e, 0xd7610002,
- 0x0000f200, 0x80798179,
- 0xd7610002, 0x0000f201,
- 0x80798179, 0xd7610002,
- 0x0000f202, 0x80798179,
- 0xd7610002, 0x0000f203,
- 0x80798179, 0xd7610002,
- 0x0000f204, 0x80798179,
- 0xd7610002, 0x0000f205,
- 0x80798179, 0xd7610002,
- 0x0000f206, 0x80798179,
- 0xd7610002, 0x0000f207,
- 0x80798179, 0xd7610002,
- 0x0000f208, 0x80798179,
- 0xd7610002, 0x0000f209,
- 0x80798179, 0xd7610002,
- 0x0000f20a, 0x80798179,
- 0xd7610002, 0x0000f20b,
- 0x80798179, 0xd7610002,
- 0x0000f20c, 0x80798179,
- 0xd7610002, 0x0000f20d,
- 0x80798179, 0xd7610002,
- 0x0000f20e, 0x80798179,
- 0xd7610002, 0x0000f20f,
- 0x80798179, 0xbf06a079,
- 0xbfa10007, 0xc4068070,
+ 0xbe8e410e, 0xbf068079,
+ 0xbfa10032, 0xd7610002,
+ 0x00010000, 0xd7610002,
+ 0x00010201, 0xd7610002,
+ 0x00010402, 0xd7610002,
+ 0x00010603, 0xd7610002,
+ 0x00010804, 0xd7610002,
+ 0x00010a05, 0xd7610002,
+ 0x00010c06, 0xd7610002,
+ 0x00010e07, 0xd7610002,
+ 0x00011008, 0xd7610002,
+ 0x00011209, 0xd7610002,
+ 0x0001140a, 0xd7610002,
+ 0x0001160b, 0xd7610002,
+ 0x0001180c, 0xd7610002,
+ 0x00011a0d, 0xd7610002,
+ 0x00011c0e, 0xd7610002,
+ 0x00011e0f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0x80799079,
+ 0xbfa00038, 0xd7610002,
+ 0x00012000, 0xd7610002,
+ 0x00012201, 0xd7610002,
+ 0x00012402, 0xd7610002,
+ 0x00012603, 0xd7610002,
+ 0x00012804, 0xd7610002,
+ 0x00012a05, 0xd7610002,
+ 0x00012c06, 0xd7610002,
+ 0x00012e07, 0xd7610002,
+ 0x00013008, 0xd7610002,
+ 0x00013209, 0xd7610002,
+ 0x0001340a, 0xd7610002,
+ 0x0001360b, 0xd7610002,
+ 0x0001380c, 0xd7610002,
+ 0x00013a0d, 0xd7610002,
+ 0x00013c0e, 0xd7610002,
+ 0x00013e0f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0x80799079,
+ 0xc4068070, 0x008ce802,
+ 0x00000000, 0x8070ff70,
+ 0x00000080, 0xbef90080,
+ 0x7e040280, 0x807d907d,
+ 0xbf0aff7d, 0x00000060,
+ 0xbfa2ff88, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xd7610002,
+ 0x00010000, 0xd7610002,
+ 0x00010201, 0xd7610002,
+ 0x00010402, 0xd7610002,
+ 0x00010603, 0xd7610002,
+ 0x00010804, 0xd7610002,
+ 0x00010a05, 0xd7610002,
+ 0x00010c06, 0xd7610002,
+ 0x00010e07, 0xd7610002,
+ 0x00011008, 0xd7610002,
+ 0x00011209, 0xd7610002,
+ 0x0001140a, 0xd7610002,
+ 0x0001160b, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xc4068070,
0x008ce802, 0x00000000,
+ 0xbefe00c1, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8fb4306, 0x8b7bc17b,
+ 0xbfa10044, 0x8b7aff6d,
+ 0x80000000, 0xbfa10041,
+ 0x847b897b, 0xbef6007b,
+ 0xb8f03b05, 0x80708170,
+ 0xbf0d9973, 0xbfa20002,
+ 0x84708970, 0xbfa00001,
+ 0x84708a70, 0xb8fa1e06,
+ 0x847a8a7a, 0x80707a70,
+ 0x8070ff70, 0x00000200,
0x8070ff70, 0x00000080,
- 0xbef90080, 0x7e040280,
- 0x807d907d, 0xbf0aff7d,
- 0x00000060, 0xbfa2ffbb,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xd7610002, 0x0000f200,
- 0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
- 0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
- 0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
- 0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
- 0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
- 0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xc4068070, 0x008ce802,
- 0x00000000, 0xbefe00c1,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8fb4306,
- 0x8b7bc17b, 0xbfa10044,
- 0x8b7aff6d, 0x80000000,
- 0xbfa10041, 0x847b897b,
- 0xbef6007b, 0xb8f03b05,
- 0x80708170, 0xbf0d9973,
- 0xbfa20002, 0x84708970,
- 0xbfa00001, 0x84708a70,
- 0xb8fa1e06, 0x847a8a7a,
- 0x80707a70, 0x8070ff70,
- 0x00000200, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xd71f0000,
- 0x000100c1, 0xd7200000,
- 0x000200c1, 0x16000084,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa20013, 0xbe8300ff,
- 0x00000080, 0xbf800000,
- 0xbf800000, 0xbf800000,
- 0xd8d80000, 0x01000000,
- 0xbf8a0000, 0xc4068070,
- 0x008ce801, 0x00000000,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000080, 0xbf0a7b7d,
- 0xbfa2fff3, 0xbfa00012,
- 0xbe8300ff, 0x00000100,
+ 0xbef600ff, 0x01000000,
+ 0xd71f0000, 0x000100c1,
+ 0xd7200000, 0x000200c1,
+ 0x16000084, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa20013,
+ 0xbe8300ff, 0x00000080,
0xbf800000, 0xbf800000,
0xbf800000, 0xd8d80000,
0x01000000, 0xbf8a0000,
0xc4068070, 0x008ce801,
0x00000000, 0x807d037d,
0x80700370, 0xd5250000,
- 0x0001ff00, 0x00000100,
+ 0x0001ff00, 0x00000080,
0xbf0a7b7d, 0xbfa2fff3,
- 0xbefe00c1, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa20004, 0xbef000ff,
- 0x00000200, 0xbeff0080,
- 0xbfa00003, 0xbef000ff,
- 0x00000400, 0xbeff00c1,
- 0xb8fb3b05, 0x807b817b,
- 0x847b827b, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa2001b, 0xbef600ff,
- 0x01000000, 0xbefd0084,
- 0xbf0a7b7d, 0xbfa10040,
- 0x7e008700, 0x7e028701,
- 0x7e048702, 0x7e068703,
- 0xc4068070, 0x008ce800,
- 0x00000000, 0xc4068070,
- 0x008ce801, 0x00008000,
- 0xc4068070, 0x008ce802,
- 0x00010000, 0xc4068070,
- 0x008ce803, 0x00018000,
- 0x807d847d, 0x8070ff70,
- 0x00000200, 0xbf0a7b7d,
- 0xbfa2ffeb, 0xbfa0002a,
+ 0xbfa00012, 0xbe8300ff,
+ 0x00000100, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf8a0000, 0xc4068070,
+ 0x008ce801, 0x00000000,
+ 0x807d037d, 0x80700370,
+ 0xd5250000, 0x0001ff00,
+ 0x00000100, 0xbf0a7b7d,
+ 0xbfa2fff3, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20004,
+ 0xbef000ff, 0x00000200,
+ 0xbeff0080, 0xbfa00003,
+ 0xbef000ff, 0x00000400,
+ 0xbeff00c1, 0xb8fb3b05,
+ 0x807b817b, 0x847b827b,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa2001b,
0xbef600ff, 0x01000000,
0xbefd0084, 0xbf0a7b7d,
- 0xbfa10015, 0x7e008700,
+ 0xbfa10040, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xc4068070,
0x008ce800, 0x00000000,
0xc4068070, 0x008ce801,
- 0x00010000, 0xc4068070,
- 0x008ce802, 0x00020000,
+ 0x00008000, 0xc4068070,
+ 0x008ce802, 0x00010000,
0xc4068070, 0x008ce803,
- 0x00030000, 0x807d847d,
- 0x8070ff70, 0x00000400,
+ 0x00018000, 0x807d847d,
+ 0x8070ff70, 0x00000200,
0xbf0a7b7d, 0xbfa2ffeb,
- 0xb8fb1e06, 0x8b7bc17b,
- 0xbfa1000d, 0x847b837b,
- 0x807b7d7b, 0xbefe00c1,
- 0xbeff0080, 0x7e008700,
+ 0xbfa0002a, 0xbef600ff,
+ 0x01000000, 0xbefd0084,
+ 0xbf0a7b7d, 0xbfa10015,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
0xc4068070, 0x008ce800,
- 0x00000000, 0x807d817d,
- 0x8070ff70, 0x00000080,
- 0xbf0a7b7d, 0xbfa2fff7,
- 0xbfa0016e, 0xbef4007e,
- 0x8b75ff7f, 0x0000ffff,
- 0x8c75ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x10807fac, 0xbef1007f,
- 0xb8f20742, 0x84729972,
- 0x8b6eff7f, 0x04000000,
- 0xbfa1003b, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef4306,
- 0x8b6fc16f, 0xbfa10030,
- 0x846f896f, 0xbef6006f,
+ 0x00000000, 0xc4068070,
+ 0x008ce801, 0x00010000,
+ 0xc4068070, 0x008ce802,
+ 0x00020000, 0xc4068070,
+ 0x008ce803, 0x00030000,
+ 0x807d847d, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7d,
+ 0xbfa2ffeb, 0xb8fb1e06,
+ 0x8b7bc17b, 0xbfa1000d,
+ 0x847b837b, 0x807b7d7b,
+ 0xbefe00c1, 0xbeff0080,
+ 0x7e008700, 0xc4068070,
+ 0x008ce800, 0x00000000,
+ 0x807d817d, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7d,
+ 0xbfa2fff7, 0xbfa0016e,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x10807fac,
+ 0xbef1007f, 0xb8f20742,
+ 0x84729972, 0x8b6eff7f,
+ 0x04000000, 0xbfa1003b,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef4306, 0x8b6fc16f,
+ 0xbfa10030, 0x846f896f,
+ 0xbef6006f, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa2000d,
+ 0xc4050078, 0x0080e800,
+ 0x00000000, 0xbf8a0000,
+ 0xdac00000, 0x00000000,
+ 0x807dff7d, 0x00000080,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff4,
+ 0xbfa0000c, 0xc4050078,
+ 0x0080e800, 0x00000000,
+ 0xbf8a0000, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000100, 0x8078ff78,
+ 0x00000100, 0xbf0a6f7d,
+ 0xbfa2fff4, 0xbef80080,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef3b05, 0x806f816f,
+ 0x846f826f, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa2002c, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000200,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10061, 0xc4050078,
+ 0x008ce800, 0x00000000,
+ 0xc4050078, 0x008ce801,
+ 0x00008000, 0xc4050078,
+ 0x008ce802, 0x00010000,
+ 0xc4050078, 0x008ce803,
+ 0x00018000, 0xbf8a0000,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7d,
+ 0xbfa2ffea, 0xc405006e,
+ 0x008ce800, 0x00000000,
+ 0xc405006e, 0x008ce801,
+ 0x00008000, 0xc405006e,
+ 0x008ce802, 0x00010000,
+ 0xc405006e, 0x008ce803,
+ 0x00018000, 0xbf8a0000,
+ 0xbfa0003d, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10016, 0xc4050078,
+ 0x008ce800, 0x00000000,
+ 0xc4050078, 0x008ce801,
+ 0x00010000, 0xc4050078,
+ 0x008ce802, 0x00020000,
+ 0xc4050078, 0x008ce803,
+ 0x00030000, 0xbf8a0000,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7d,
+ 0xbfa2ffea, 0xb8ef1e06,
+ 0x8b6fc16f, 0xbfa1000f,
+ 0x846f836f, 0x806f7d6f,
+ 0xbefe00c1, 0xbeff0080,
+ 0xc4050078, 0x008ce800,
+ 0x00000000, 0xbf8a0000,
+ 0x7e008500, 0x807d817d,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff6,
+ 0xbeff00c1, 0xc405006e,
+ 0x008ce800, 0x00000000,
+ 0xc405006e, 0x008ce801,
+ 0x00010000, 0xc405006e,
+ 0x008ce802, 0x00020000,
+ 0xc405006e, 0x008ce803,
+ 0x00030000, 0xbf8a0000,
0xb8f83b05, 0x80788178,
0xbf0d9972, 0xbfa20002,
0x84788978, 0xbfa00001,
0x84788a78, 0xb8ee1e06,
0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa2000d, 0xc4050078,
- 0x0080e800, 0x00000000,
- 0xbf8a0000, 0xdac00000,
- 0x00000000, 0x807dff7d,
- 0x00000080, 0x8078ff78,
- 0x00000080, 0xbf0a6f7d,
- 0xbfa2fff4, 0xbfa0000c,
- 0xc4050078, 0x0080e800,
- 0x00000000, 0xbf8a0000,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000100,
- 0x8078ff78, 0x00000100,
- 0xbf0a6f7d, 0xbfa2fff4,
- 0xbef80080, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef3b05,
- 0x806f816f, 0x846f826f,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa2002c,
+ 0x80f8ff78, 0x00000050,
0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000200, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10061,
- 0xc4050078, 0x008ce800,
- 0x00000000, 0xc4050078,
- 0x008ce801, 0x00008000,
- 0xc4050078, 0x008ce802,
- 0x00010000, 0xc4050078,
- 0x008ce803, 0x00018000,
- 0xbf8a0000, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
+ 0xbefd00ff, 0x0000006c,
+ 0x80f89078, 0xf462403a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd847d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0x80f8a078, 0xf462603a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd887d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0x80f8c078, 0xf462803a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd907d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0xbe884308, 0xbe8a430a,
+ 0xbe8c430c, 0xbe8e430e,
+ 0xbf06807d, 0xbfa1fff0,
+ 0xb980f801, 0x00000000,
+ 0xb8f83b05, 0x80788178,
+ 0xbf0d9972, 0xbfa20002,
+ 0x84788978, 0xbfa00001,
+ 0x84788a78, 0xb8ee1e06,
+ 0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0xbf0a6f7d, 0xbfa2ffea,
- 0xc405006e, 0x008ce800,
- 0x00000000, 0xc405006e,
- 0x008ce801, 0x00008000,
- 0xc405006e, 0x008ce802,
- 0x00010000, 0xc405006e,
- 0x008ce803, 0x00018000,
- 0xbf8a0000, 0xbfa0003d,
0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10016,
- 0xc4050078, 0x008ce800,
- 0x00000000, 0xc4050078,
- 0x008ce801, 0x00010000,
- 0xc4050078, 0x008ce802,
- 0x00020000, 0xc4050078,
- 0x008ce803, 0x00030000,
- 0xbf8a0000, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7d, 0xbfa2ffea,
- 0xb8ef1e06, 0x8b6fc16f,
- 0xbfa1000f, 0x846f836f,
- 0x806f7d6f, 0xbefe00c1,
- 0xbeff0080, 0xc4050078,
- 0x008ce800, 0x00000000,
- 0xbf8a0000, 0x7e008500,
- 0x807d817d, 0x8078ff78,
- 0x00000080, 0xbf0a6f7d,
- 0xbfa2fff6, 0xbeff00c1,
- 0xc405006e, 0x008ce800,
- 0x00000000, 0xc405006e,
- 0x008ce801, 0x00010000,
- 0xc405006e, 0x008ce802,
- 0x00020000, 0xc405006e,
- 0x008ce803, 0x00030000,
- 0xbf8a0000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0x80f8ff78,
- 0x00000050, 0xbef600ff,
- 0x01000000, 0xbefd00ff,
- 0x0000006c, 0x80f89078,
- 0xf462403a, 0xf0000000,
- 0xbf8a0000, 0x80fd847d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0x80f8a078,
- 0xf462603a, 0xf0000000,
- 0xbf8a0000, 0x80fd887d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0xbe844304,
- 0xbe864306, 0x80f8c078,
- 0xf462803a, 0xf0000000,
- 0xbf8a0000, 0x80fd907d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0xbe844304,
- 0xbe864306, 0xbe884308,
- 0xbe8a430a, 0xbe8c430c,
- 0xbe8e430e, 0xbf06807d,
- 0xbfa1fff0, 0xb980f801,
- 0x00000000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0xbef600ff,
- 0x01000000, 0xbeff0071,
- 0xf4621bfa, 0xf0000000,
- 0x80788478, 0xf4621b3a,
+ 0xbeff0071, 0xf4621bfa,
0xf0000000, 0x80788478,
- 0xf4621b7a, 0xf0000000,
- 0x80788478, 0xf4621c3a,
+ 0xf4621b3a, 0xf0000000,
+ 0x80788478, 0xf4621b7a,
0xf0000000, 0x80788478,
- 0xf4621c7a, 0xf0000000,
- 0x80788478, 0xf4621eba,
+ 0xf4621c3a, 0xf0000000,
+ 0x80788478, 0xf4621c7a,
0xf0000000, 0x80788478,
- 0xf4621efa, 0xf0000000,
- 0x80788478, 0xf4621e7a,
+ 0xf4621eba, 0xf0000000,
+ 0x80788478, 0xf4621efa,
0xf0000000, 0x80788478,
- 0xf4621cfa, 0xf0000000,
- 0x80788478, 0xf4621bba,
+ 0xf4621e7a, 0xf0000000,
+ 0x80788478, 0xf4621cfa,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xb96ef814,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
- 0xb96ef815, 0xf4621bba,
+ 0xb96ef814, 0xf4621bba,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xb96ef812,
+ 0xbf8a0000, 0xb96ef815,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
- 0xb96ef813, 0x8b6eff7f,
- 0x04000000, 0xbfa1000d,
- 0x80788478, 0xf4621bba,
+ 0xb96ef812, 0xf4621bba,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xbf0d806e,
- 0xbfa10006, 0x856e906e,
- 0x8b6e6e6e, 0xbfa10003,
- 0xbe804ec1, 0x816ec16e,
- 0xbfa0fffb, 0xbefd006f,
- 0xbefe0070, 0xbeff0071,
- 0xb97b2011, 0x857b867b,
- 0xb97b0191, 0x857b827b,
- 0xb97bba11, 0xb973f801,
- 0xb8ee3b05, 0x806e816e,
- 0xbf0d9972, 0xbfa20002,
- 0x846e896e, 0xbfa00001,
- 0x846e8a6e, 0xb8ef1e06,
- 0x846f8a6f, 0x806e6f6e,
- 0x806eff6e, 0x00000200,
- 0x806e746e, 0x826f8075,
- 0x8b6fff6f, 0x0000ffff,
- 0xf4605c37, 0xf8000050,
- 0xf4605d37, 0xf8000060,
- 0xf4601e77, 0xf8000074,
- 0xbf8a0000, 0x8b6dff6d,
- 0x0000ffff, 0x8bfe7e7e,
- 0x8bea6a6a, 0xb97af804,
+ 0xbf8a0000, 0xb96ef813,
+ 0x8b6eff7f, 0x04000000,
+ 0xbfa1000d, 0x80788478,
+ 0xf4621bba, 0xf0000000,
+ 0x80788478, 0xbf8a0000,
+ 0xbf0d806e, 0xbfa10006,
+ 0x856e906e, 0x8b6e6e6e,
+ 0xbfa10003, 0xbe804ec1,
+ 0x816ec16e, 0xbfa0fffb,
+ 0xbefd006f, 0xbefe0070,
+ 0xbeff0071, 0xb97b2011,
+ 0x857b867b, 0xb97b0191,
+ 0x857b827b, 0xb97bba11,
+ 0xb973f801, 0xb8ee3b05,
+ 0x806e816e, 0xbf0d9972,
+ 0xbfa20002, 0x846e896e,
+ 0xbfa00001, 0x846e8a6e,
+ 0xb8ef1e06, 0x846f8a6f,
+ 0x806e6f6e, 0x806eff6e,
+ 0x00000200, 0x806e746e,
+ 0x826f8075, 0x8b6fff6f,
+ 0x0000ffff, 0xf4605c37,
+ 0xf8000050, 0xf4605d37,
+ 0xf8000060, 0xf4601e77,
+ 0xf8000074, 0xbf8a0000,
+ 0x8b6dff6d, 0x0000ffff,
+ 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb97af804, 0xbe804ec2,
+ 0xbf94fffe, 0xbe804a6c,
0xbe804ec2, 0xbf94fffe,
- 0xbe804a6c, 0xbe804ec2,
- 0xbf94fffe, 0xbfb10000,
+ 0xbfb10000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
- 0xbf9f0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx9_5_0_hex[] = {
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
index 7b9d36e5fa43..5a1a1b1f897f 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
@@ -30,6 +30,7 @@
#define CHIP_GFX12 37
#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost TRAP_AFTER_INST exception when SAVECTX raised
+#define HAVE_VALU_SGPR_HAZARD (ASIC_FAMILY == CHIP_GFX12)
var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4
var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9
@@ -351,6 +352,7 @@ L_HAVE_VGPRS:
v_writelane_b32 v0, ttmp13, 0xD
v_writelane_b32 v0, exec_lo, 0xE
v_writelane_b32 v0, exec_hi, 0xF
+ valu_sgpr_hazard()
s_mov_b32 exec_lo, 0x3FFF
s_mov_b32 exec_hi, 0x0
@@ -417,7 +419,6 @@ L_SAVE_HWREG:
v_mov_b32 v0, 0x0 //Offset[31:0] from buffer resource
v_mov_b32 v1, 0x0 //Offset[63:32] from buffer resource
v_mov_b32 v2, 0x0 //Set of SGPRs for TCP store
- s_mov_b32 m0, 0x0 //Next lane of v2 to write to
// Ensure no further changes to barrier or LDS state.
// STATE_PRIV.BARRIER_COMPLETE may change up to this point.
@@ -430,40 +431,41 @@ L_SAVE_HWREG:
s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
s_or_b32 s_save_state_priv, s_save_state_priv, s_save_tmp
- write_hwreg_to_v2(s_save_m0)
- write_hwreg_to_v2(s_save_pc_lo)
s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
- write_hwreg_to_v2(s_save_tmp)
- write_hwreg_to_v2(s_save_exec_lo)
- write_hwreg_to_v2(s_save_exec_hi)
- write_hwreg_to_v2(s_save_state_priv)
+ v_writelane_b32 v2, s_save_m0, 0x0
+ v_writelane_b32 v2, s_save_pc_lo, 0x1
+ v_writelane_b32 v2, s_save_tmp, 0x2
+ v_writelane_b32 v2, s_save_exec_lo, 0x3
+ v_writelane_b32 v2, s_save_exec_hi, 0x4
+ v_writelane_b32 v2, s_save_state_priv, 0x5
+ v_writelane_b32 v2, s_save_xnack_mask, 0x7
+ valu_sgpr_hazard()
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
- write_hwreg_to_v2(s_save_tmp)
+ v_writelane_b32 v2, s_save_tmp, 0x6
- write_hwreg_to_v2(s_save_xnack_mask)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_MODE)
+ v_writelane_b32 v2, s_save_tmp, 0x8
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_MODE)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
+ v_writelane_b32 v2, s_save_tmp, 0x9
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
+ v_writelane_b32 v2, s_save_tmp, 0xA
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+ v_writelane_b32 v2, s_save_tmp, 0xB
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
- write_hwreg_to_v2(s_save_m0)
-
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_TRAP_CTRL)
+ v_writelane_b32 v2, s_save_tmp, 0xC
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
- write_hwreg_to_v2(s_save_tmp)
+ v_writelane_b32 v2, s_save_tmp, 0xD
s_get_barrier_state s_save_tmp, -1
s_wait_kmcnt (0)
- write_hwreg_to_v2(s_save_tmp)
+ v_writelane_b32 v2, s_save_tmp, 0xE
+ valu_sgpr_hazard()
// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
s_mov_b32 exec_lo, 0xFFFF
@@ -497,10 +499,12 @@ L_SAVE_SGPR_LOOP:
s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0]
s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0]
- write_16sgpr_to_v2(s0)
-
- s_cmp_eq_u32 ttmp13, 0x20 //have 32 VGPR lanes filled?
- s_cbranch_scc0 L_SAVE_SGPR_SKIP_TCP_STORE
+ s_cmp_eq_u32 ttmp13, 0x0
+ s_cbranch_scc0 L_WRITE_V2_SECOND_HALF
+ write_16sgpr_to_v2(s0, 0x0)
+ s_branch L_SAVE_SGPR_SKIP_TCP_STORE
+L_WRITE_V2_SECOND_HALF:
+ write_16sgpr_to_v2(s0, 0x10)
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
s_add_u32 s_save_mem_offset, s_save_mem_offset, 0x80
@@ -1056,27 +1060,21 @@ L_END_PGM:
s_endpgm_saved
end
-function write_hwreg_to_v2(s)
- // Copy into VGPR for later TCP store.
- v_writelane_b32 v2, s, m0
- s_add_u32 m0, m0, 0x1
-end
-
-
-function write_16sgpr_to_v2(s)
+function write_16sgpr_to_v2(s, lane_offset)
// Copy into VGPR for later TCP store.
for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
- v_writelane_b32 v2, s[sgpr_idx], ttmp13
- s_add_u32 ttmp13, ttmp13, 0x1
+ v_writelane_b32 v2, s[sgpr_idx], sgpr_idx + lane_offset
end
+ valu_sgpr_hazard()
+ s_add_u32 ttmp13, ttmp13, 0x10
end
function write_12sgpr_to_v2(s)
// Copy into VGPR for later TCP store.
for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
- v_writelane_b32 v2, s[sgpr_idx], ttmp13
- s_add_u32 ttmp13, ttmp13, 0x1
+ v_writelane_b32 v2, s[sgpr_idx], sgpr_idx
end
+ valu_sgpr_hazard()
end
function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
@@ -1128,3 +1126,11 @@ function get_wave_size2(s_reg)
s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
end
+
+function valu_sgpr_hazard
+#if HAVE_VALU_SGPR_HAZARD
+ for var rep = 0; rep < 8; rep ++
+ ds_nop
+ end
+#endif
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 065d87841459..1e9dd00620bf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -155,8 +155,8 @@ static int kfd_open(struct inode *inode, struct file *filep)
/* filep now owns the reference returned by kfd_create_process */
filep->private_data = process;
- dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
- process->pasid, process->is_32bit_user_mode);
+ dev_dbg(kfd_device, "process pid %d opened kfd node, compat mode (32 bit) - %d\n",
+ process->lead_thread->pid, process->is_32bit_user_mode);
return 0;
}
@@ -212,6 +212,11 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return -EINVAL;
}
+ if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
+ args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
+ pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
+ }
+
if (!access_ok((const void __user *) args->read_pointer_address,
sizeof(uint32_t))) {
pr_err("Can't access read pointer\n");
@@ -361,8 +366,8 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_acquire_queue_buf;
}
- pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
- p->pasid,
+ pr_debug("Creating queue for process pid %d on gpu 0x%x\n",
+ p->lead_thread->pid,
dev->id);
err = pqm_create_queue(&p->pqm, dev, &q_properties, &queue_id,
@@ -415,9 +420,9 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
int retval;
struct kfd_ioctl_destroy_queue_args *args = data;
- pr_debug("Destroying queue id %d for pasid 0x%x\n",
+ pr_debug("Destroying queue id %d for process pid %d\n",
args->queue_id,
- p->pasid);
+ p->lead_thread->pid);
mutex_lock(&p->mutex);
@@ -461,6 +466,11 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
return -EINVAL;
}
+ if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
+ args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
+ pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
+ }
+
properties.queue_address = args->ring_base_address;
properties.queue_size = args->ring_size;
properties.queue_percent = args->queue_percentage & 0xFF;
@@ -468,8 +478,8 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
properties.priority = args->queue_priority;
- pr_debug("Updating queue id %d for pasid 0x%x\n",
- args->queue_id, p->pasid);
+ pr_debug("Updating queue id %d for process pid %d\n",
+ args->queue_id, p->lead_thread->pid);
mutex_lock(&p->mutex);
@@ -596,7 +606,8 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
default_policy,
alternate_policy,
(void __user *)args->alternate_aperture_base,
- args->alternate_aperture_size))
+ args->alternate_aperture_size,
+ args->misc_process_flag))
err = -EINVAL;
out:
@@ -695,7 +706,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
struct kfd_process_device_apertures *pAperture;
int i;
- dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
+ dev_dbg(kfd_device, "get apertures for process pid %d", p->lead_thread->pid);
args->num_of_nodes = 0;
@@ -747,7 +758,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
int ret;
int i;
- dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
+ dev_dbg(kfd_device, "get apertures for process pid %d",
+ p->lead_thread->pid);
if (args->num_of_nodes == 0) {
/* Return number of nodes, so that user space can alloacate
@@ -3365,12 +3377,12 @@ static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("pasid 0x%x mapping mmio page\n"
+ pr_debug("process pid %d mapping mmio page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
" size == 0x%04lX\n",
- process->pasid, (unsigned long long) vma->vm_start,
+ process->lead_thread->pid, (unsigned long long) vma->vm_start,
address, vma->vm_flags, PAGE_SIZE);
return io_remap_pfn_range(vma,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 693469c18c60..4a7180b46b71 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1704,6 +1704,7 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
/* Cacheline size not available in IP discovery for gc11.
* kfd_fill_gpu_cache_info_from_gfx_config to hard code it
*/
@@ -2132,9 +2133,6 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
bool ext_cpu = KFD_GC_VERSION(kdev) != IP_VERSION(9, 4, 3);
int mem_bw = 819200, weight = ext_cpu ? KFD_CRAT_XGMI_WEIGHT :
KFD_CRAT_INTRA_SOCKET_WEIGHT;
- uint32_t bandwidth = ext_cpu ? amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
- kdev->adev, NULL, true) : mem_bw;
-
/*
* with host gpu xgmi link, host can access gpu memory whether
* or not pcie bar type is large, so always create bidirectional
@@ -2143,8 +2141,16 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
sub_type_hdr->weight_xgmi = weight;
- sub_type_hdr->minimum_bandwidth_mbs = bandwidth;
- sub_type_hdr->maximum_bandwidth_mbs = bandwidth;
+ if (ext_cpu) {
+ amdgpu_xgmi_get_bandwidth(kdev->adev, NULL,
+ AMDGPU_XGMI_BW_MODE_PER_LINK,
+ AMDGPU_XGMI_BW_UNIT_MBYTES,
+ &sub_type_hdr->minimum_bandwidth_mbs,
+ &sub_type_hdr->maximum_bandwidth_mbs);
+ } else {
+ sub_type_hdr->minimum_bandwidth_mbs = mem_bw;
+ sub_type_hdr->maximum_bandwidth_mbs = mem_bw;
+ }
} else {
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
sub_type_hdr->minimum_bandwidth_mbs =
@@ -2197,12 +2203,12 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
if (use_ta_info) {
sub_type_hdr->weight_xgmi = KFD_CRAT_XGMI_WEIGHT *
- amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev);
- sub_type_hdr->maximum_bandwidth_mbs =
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev,
- peer_kdev->adev, false);
- sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0;
+ amdgpu_xgmi_get_hops_count(kdev->adev, peer_kdev->adev);
+ amdgpu_xgmi_get_bandwidth(kdev->adev, peer_kdev->adev,
+ AMDGPU_XGMI_BW_MODE_PER_PEER,
+ AMDGPU_XGMI_BW_UNIT_MBYTES,
+ &sub_type_hdr->minimum_bandwidth_mbs,
+ &sub_type_hdr->maximum_bandwidth_mbs);
} else {
bool is_single_hop = kdev->kfd == peer_kdev->kfd;
int weight = is_single_hop ? KFD_CRAT_INTRA_SOCKET_WEIGHT :
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
index a8abc3091801..ba99e0f258ae 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
@@ -204,11 +204,12 @@ bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev,
size_t exception_data_size)
{
struct kfd_process *p;
+ struct kfd_process_device *pdd = NULL;
bool signaled_to_debugger_or_runtime = false;
- p = kfd_lookup_process_by_pasid(pasid);
+ p = kfd_lookup_process_by_pasid(pasid, &pdd);
- if (!p)
+ if (!pdd)
return false;
if (!kfd_dbg_ev_raise(trap_mask, p, dev, doorbell_id, true,
@@ -238,9 +239,8 @@ bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev,
mutex_unlock(&p->mutex);
} else if (trap_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
- kfd_dqm_evict_pasid(dev->dqm, p->pasid);
- kfd_signal_vm_fault_event(dev, p->pasid, NULL,
- exception_data);
+ kfd_evict_process_device(pdd);
+ kfd_signal_vm_fault_event(pdd, NULL, exception_data);
signaled_to_debugger_or_runtime = true;
}
@@ -276,8 +276,8 @@ int kfd_dbg_send_exception_to_runtime(struct kfd_process *p,
data = (struct kfd_hsa_memory_exception_data *)
pdd->vm_fault_exc_data;
- kfd_dqm_evict_pasid(pdd->dev->dqm, p->pasid);
- kfd_signal_vm_fault_event(pdd->dev, p->pasid, NULL, data);
+ kfd_evict_process_device(pdd);
+ kfd_signal_vm_fault_event(pdd, NULL, data);
error_reason &= ~KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION);
}
@@ -357,12 +357,12 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en)
return 0;
if (!pdd->proc_ctx_cpu_ptr) {
- r = amdgpu_amdkfd_alloc_gtt_mem(adev,
- AMDGPU_MES_PROC_CTX_SIZE,
- &pdd->proc_ctx_bo,
- &pdd->proc_ctx_gpu_addr,
- &pdd->proc_ctx_cpu_ptr,
- false);
+ r = amdgpu_amdkfd_alloc_gtt_mem(adev,
+ AMDGPU_MES_PROC_CTX_SIZE,
+ &pdd->proc_ctx_bo,
+ &pdd->proc_ctx_gpu_addr,
+ &pdd->proc_ctx_cpu_ptr,
+ false);
if (r) {
dev_err(adev->dev,
"failed to allocate process context bo\n");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index a29374c86405..b9c82be6ce13 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -101,6 +101,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(6, 1, 0):
case IP_VERSION(6, 1, 1):
case IP_VERSION(6, 1, 2):
+ case IP_VERSION(6, 1, 3):
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 0, 1):
kfd->device_info.num_sdma_queues_per_engine = 8;
@@ -122,6 +123,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(6, 1, 0):
case IP_VERSION(6, 1, 1):
case IP_VERSION(6, 1, 2):
+ case IP_VERSION(6, 1, 3):
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 0, 1):
/* Reserve 1 for paging and 1 for gfx */
@@ -180,6 +182,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
break;
case IP_VERSION(12, 0, 0):
@@ -454,6 +457,10 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 110502;
f2g = &gfx_v11_kfd2kgd;
break;
+ case IP_VERSION(11, 5, 3):
+ gfx_target_version = 110503;
+ f2g = &gfx_v11_kfd2kgd;
+ break;
case IP_VERSION(12, 0, 0):
gfx_target_version = 120000;
f2g = &gfx_v12_kfd2kgd;
@@ -583,9 +590,13 @@ static int kfd_gws_init(struct kfd_node *node)
&& kfd->mec2_fw_version >= 0x6b) ||
(KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0)
&& KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0)
- && mes_rev >= 68))))
+ && mes_rev >= 68) ||
+ (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))))) {
+ if (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))
+ node->adev->gds.gws_size = 64;
ret = amdgpu_amdkfd_alloc_gws(node->adev,
node->adev->gds.gws_size, &node->gws);
+ }
return ret;
}
@@ -1558,7 +1569,7 @@ bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entr
u32 cam_index;
if (entry->ih == &adev->irq.ih_soft || entry->ih == &adev->irq.ih1) {
- p = kfd_lookup_process_by_pasid(entry->pasid);
+ p = kfd_lookup_process_by_pasid(entry->pasid, NULL);
if (!p)
return true;
@@ -1593,6 +1604,11 @@ int kfd_debugfs_hang_hws(struct kfd_node *dev)
return -EINVAL;
}
+ if (dev->kfd->shared_resources.enable_mes) {
+ dev_err(dev->adev->dev, "Inducing MES hang is not supported\n");
+ return -EINVAL;
+ }
+
return dqm_debugfs_hang_hws(dev->dqm);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d4593374e7a1..c610e172a2b8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -36,12 +36,15 @@
#include "kfd_kernel_queue.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_reset.h"
+#include "amdgpu_sdma.h"
#include "mes_v11_api_def.h"
#include "kfd_debug.h"
/* Size of the per-pipe EOP queue */
#define CIK_HPD_EOP_BYTES_LOG2 11
#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
+/* See unmap_queues_cpsch() */
+#define USE_DEFAULT_GRACE_PERIOD 0xffffffff
static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
u32 pasid, unsigned int vmid);
@@ -66,7 +69,8 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm,
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
static int allocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q, const uint32_t *restore_sdma_id);
-static void kfd_process_hw_exception(struct work_struct *work);
+
+static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma);
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
@@ -170,7 +174,7 @@ static void kfd_hws_hang(struct device_queue_manager *dqm)
/*
* Issue a GPU reset if HWS is unresponsive
*/
- schedule_work(&dqm->hw_exception_work);
+ amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
}
static int convert_to_mes_queue_type(int queue_type)
@@ -207,23 +211,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
if (!down_read_trylock(&adev->reset_domain->sem))
return -EIO;
- if (!pdd->proc_ctx_cpu_ptr) {
- r = amdgpu_amdkfd_alloc_gtt_mem(adev,
- AMDGPU_MES_PROC_CTX_SIZE,
- &pdd->proc_ctx_bo,
- &pdd->proc_ctx_gpu_addr,
- &pdd->proc_ctx_cpu_ptr,
- false);
- if (r) {
- dev_err(adev->dev,
- "failed to allocate process context bo\n");
- return r;
- }
- memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
- }
-
memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
- queue_input.process_id = qpd->pqm->process->pasid;
+ queue_input.process_id = pdd->pasid;
queue_input.page_table_base_addr = qpd->page_table_base;
queue_input.process_va_start = 0;
queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
@@ -542,6 +531,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
struct device *dev = dqm->dev->adev->dev;
int allocated_vmid = -1, i;
@@ -560,9 +550,9 @@ static int allocate_vmid(struct device_queue_manager *dqm,
pr_debug("vmid allocated: %d\n", allocated_vmid);
- dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
+ dqm->vmid_pasid[allocated_vmid] = pdd->pasid;
- set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
+ set_pasid_vmid_mapping(dqm, pdd->pasid, allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
@@ -814,6 +804,11 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process
return -EOPNOTSUPP;
}
+ /* taking the VMID for that process on the safe way using PDD */
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd)
+ return -EFAULT;
+
/* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
* ATC_VMID15_PASID_MAPPING
* to check which VMID the current process is mapped to.
@@ -823,23 +818,19 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process
status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
(dev->adev, vmid, &queried_pasid);
- if (status && queried_pasid == p->pasid) {
- pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
- vmid, p->pasid);
+ if (status && queried_pasid == pdd->pasid) {
+ pr_debug("Killing wave fronts of vmid %d and process pid %d\n",
+ vmid, p->lead_thread->pid);
break;
}
}
if (vmid > last_vmid_to_scan) {
- dev_err(dev->adev->dev, "Didn't find vmid for pasid 0x%x\n", p->pasid);
+ dev_err(dev->adev->dev, "Didn't find vmid for process pid %d\n",
+ p->lead_thread->pid);
return -EFAULT;
}
- /* taking the VMID for that process on the safe way using PDD */
- pdd = kfd_get_process_device_data(dev, p);
- if (!pdd)
- return -EFAULT;
-
reg_gfx_index.bits.sh_broadcast_writes = 1;
reg_gfx_index.bits.se_broadcast_writes = 1;
reg_gfx_index.bits.instance_broadcast_writes = 1;
@@ -1075,8 +1066,8 @@ static int suspend_single_queue(struct device_queue_manager *dqm,
if (q->properties.is_suspended)
return 0;
- pr_debug("Suspending PASID %u queue [%i]\n",
- pdd->process->pasid,
+ pr_debug("Suspending process pid %d queue [%i]\n",
+ pdd->process->lead_thread->pid,
q->properties.queue_id);
is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW);
@@ -1123,8 +1114,8 @@ static int resume_single_queue(struct device_queue_manager *dqm,
pdd = qpd_to_pdd(qpd);
- pr_debug("Restoring from suspend PASID %u queue [%i]\n",
- pdd->process->pasid,
+ pr_debug("Restoring from suspend process pid %d queue [%i]\n",
+ pdd->process->lead_thread->pid,
q->properties.queue_id);
q->properties.is_suspended = false;
@@ -1157,8 +1148,8 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
- pdd->process->pasid);
+ pr_debug_ratelimited("Evicting process pid %d queues\n",
+ pdd->process->lead_thread->pid);
pdd->last_evict_timestamp = get_jiffies_64();
/* Mark all queues as evicted. Deactivate all active queues on
@@ -1215,8 +1206,8 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
if (!pdd->drm_priv)
goto out;
- pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
- pdd->process->pasid);
+ pr_debug_ratelimited("Evicting process pid %d queues\n",
+ pdd->process->lead_thread->pid);
/* Mark all queues as evicted. Deactivate all active queues on
* the qpd.
@@ -1230,11 +1221,13 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
decrement_queue_count(dqm, qpd, q);
if (dqm->dev->kfd->shared_resources.enable_mes) {
- retval = remove_queue_mes(dqm, q, qpd);
- if (retval) {
+ int err;
+
+ err = remove_queue_mes(dqm, q, qpd);
+ if (err) {
dev_err(dev, "Failed to evict queue %d\n",
q->properties.queue_id);
- goto out;
+ retval = err;
}
}
}
@@ -1274,8 +1267,8 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
- pdd->process->pasid);
+ pr_debug_ratelimited("Restoring process pid %d queues\n",
+ pdd->process->lead_thread->pid);
/* Update PD Base in QPD */
qpd->page_table_base = pd_base;
@@ -1358,8 +1351,8 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
if (!pdd->drm_priv)
goto vm_not_acquired;
- pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
- pdd->process->pasid);
+ pr_debug_ratelimited("Restoring process pid %d queues\n",
+ pdd->process->lead_thread->pid);
/* Update PD Base in QPD */
qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
@@ -1753,15 +1746,11 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->active_cp_queue_count = 0;
dqm->gws_queue_count = 0;
dqm->active_runlist = false;
- INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
dqm->trap_debug_vmid = 0;
init_sdma_bitmaps(dqm);
- if (dqm->dev->kfd2kgd->get_iq_wait_times)
- dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev,
- &dqm->wait_times,
- ffs(dqm->dev->xcc_mask) - 1);
+ update_dqm_wait_times(dqm);
return 0;
}
@@ -1857,25 +1846,11 @@ static int start_cpsch(struct device_queue_manager *dqm)
/* clear hang status when driver try to start the hw scheduler */
dqm->sched_running = true;
- if (!dqm->dev->kfd->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes) {
+ if (pm_config_dequeue_wait_counts(&dqm->packet_mgr,
+ KFD_DEQUEUE_WAIT_INIT, 0 /* unused */))
+ dev_err(dev, "Setting optimized dequeue wait failed. Using default values\n");
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
-
- /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */
- if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu &&
- (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) {
- uint32_t reg_offset = 0;
- uint32_t grace_period = 1;
-
- retval = pm_update_grace_period(&dqm->packet_mgr,
- grace_period);
- if (retval)
- dev_err(dev, "Setting grace timeout failed\n");
- else if (dqm->dev->kfd2kgd->build_grace_period_packet_info)
- /* Update dqm->wait_times maintained in software */
- dqm->dev->kfd2kgd->build_grace_period_packet_info(
- dqm->dev->adev, dqm->wait_times,
- grace_period, &reg_offset,
- &dqm->wait_times);
}
/* setup per-queue reset detection buffer */
@@ -2150,8 +2125,8 @@ static void set_queue_as_reset(struct device_queue_manager *dqm, struct queue *q
{
struct kfd_process_device *pdd = qpd_to_pdd(qpd);
- dev_err(dqm->dev->adev->dev, "queue id 0x%0x at pasid 0x%0x is reset\n",
- q->properties.queue_id, q->process->pasid);
+ dev_err(dqm->dev->adev->dev, "queue id 0x%0x at pasid %d is reset\n",
+ q->properties.queue_id, pdd->process->lead_thread->pid);
pdd->has_reset_queue = true;
if (q->properties.is_active) {
@@ -2220,8 +2195,7 @@ static struct queue *find_queue_by_address(struct device_queue_manager *dqm, uin
return NULL;
}
-/* only for compute queue */
-static int reset_queues_on_hws_hang(struct device_queue_manager *dqm)
+static int reset_hung_queues(struct device_queue_manager *dqm)
{
int r = 0, reset_count = 0, i;
@@ -2274,7 +2248,112 @@ reset_fail:
return r;
}
-/* dqm->lock mutex has to be locked before calling this function */
+static bool sdma_has_hang(struct device_queue_manager *dqm)
+{
+ int engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
+ int engine_end = engine_start + get_num_all_sdma_engines(dqm);
+ int num_queues_per_eng = dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
+ int i, j;
+
+ for (i = engine_start; i < engine_end; i++) {
+ for (j = 0; j < num_queues_per_eng; j++) {
+ if (!dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j))
+ continue;
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool set_sdma_queue_as_reset(struct device_queue_manager *dqm,
+ uint32_t doorbell_off)
+{
+ struct device_process_node *cur;
+ struct qcm_process_device *qpd;
+ struct queue *q;
+
+ list_for_each_entry(cur, &dqm->queues, list) {
+ qpd = cur->qpd;
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if ((q->properties.type == KFD_QUEUE_TYPE_SDMA ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) &&
+ q->properties.doorbell_off == doorbell_off) {
+ set_queue_as_reset(dqm, q, qpd);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int reset_hung_queues_sdma(struct device_queue_manager *dqm)
+{
+ int engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
+ int engine_end = engine_start + get_num_all_sdma_engines(dqm);
+ int num_queues_per_eng = dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
+ int r = 0, i, j;
+
+ if (dqm->is_hws_hang)
+ return -EIO;
+
+ /* Scan for hung HW queues and reset engine. */
+ dqm->detect_hang_count = 0;
+ for (i = engine_start; i < engine_end; i++) {
+ for (j = 0; j < num_queues_per_eng; j++) {
+ uint32_t doorbell_off =
+ dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j);
+
+ if (!doorbell_off)
+ continue;
+
+ /* Reset engine and check. */
+ if (amdgpu_sdma_reset_engine(dqm->dev->adev, i) ||
+ dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j) ||
+ !set_sdma_queue_as_reset(dqm, doorbell_off)) {
+ r = -ENOTRECOVERABLE;
+ goto reset_fail;
+ }
+
+ /* Should only expect one queue active per engine */
+ dqm->detect_hang_count++;
+ break;
+ }
+ }
+
+ /* Signal process reset */
+ if (dqm->detect_hang_count)
+ kfd_signal_reset_event(dqm->dev);
+ else
+ r = -ENOTRECOVERABLE;
+
+reset_fail:
+ dqm->detect_hang_count = 0;
+
+ return r;
+}
+
+static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma)
+{
+ while (halt_if_hws_hang)
+ schedule();
+
+ if (!amdgpu_gpu_recovery)
+ return -ENOTRECOVERABLE;
+
+ return is_sdma ? reset_hung_queues_sdma(dqm) : reset_hung_queues(dqm);
+}
+
+/* dqm->lock mutex has to be locked before calling this function
+ *
+ * @grace_period: If USE_DEFAULT_GRACE_PERIOD then default wait time
+ * for context switch latency. Lower values are used by debugger
+ * since context switching are triggered at high frequency.
+ * This is configured by setting CP_IQ_WAIT_TIME2.SCH_WAVE
+ *
+ */
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param,
@@ -2293,7 +2372,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
return -EIO;
if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
- retval = pm_update_grace_period(&dqm->packet_mgr, grace_period);
+ retval = pm_config_dequeue_wait_counts(&dqm->packet_mgr,
+ KFD_DEQUEUE_WAIT_SET_SCH_WAVE, grace_period);
if (retval)
goto out;
}
@@ -2324,30 +2404,32 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
* check those fields
*/
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
- if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
- while (halt_if_hws_hang)
- schedule();
- if (reset_queues_on_hws_hang(dqm)) {
- dqm->is_hws_hang = true;
- kfd_hws_hang(dqm);
- retval = -ETIME;
- goto out;
- }
- }
+ if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd) &&
+ reset_queues_on_hws_hang(dqm, false))
+ goto reset_fail;
+
+ /* Check for SDMA hang and attempt SDMA reset */
+ if (sdma_has_hang(dqm) && reset_queues_on_hws_hang(dqm, true))
+ goto reset_fail;
/* We need to reset the grace period value for this device */
if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
- if (pm_update_grace_period(&dqm->packet_mgr,
- USE_DEFAULT_GRACE_PERIOD))
+ if (pm_config_dequeue_wait_counts(&dqm->packet_mgr,
+ KFD_DEQUEUE_WAIT_RESET, 0 /* unused */))
dev_err(dev, "Failed to reset grace period\n");
}
pm_release_ib(&dqm->packet_mgr);
dqm->active_runlist = false;
-
out:
up_read(&dqm->dev->adev->reset_domain->sem);
return retval;
+
+reset_fail:
+ dqm->is_hws_hang = true;
+ kfd_hws_hang(dqm);
+ up_read(&dqm->dev->adev->reset_domain->sem);
+ return -ETIME;
}
/* only for compute queue */
@@ -2504,20 +2586,13 @@ failed_try_destroy_debugged_queue:
return retval;
}
-/*
- * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
- * stay in user mode.
- */
-#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
-/* APE1 limit is inclusive and 64K aligned. */
-#define APE1_LIMIT_ALIGNMENT 0xFFFF
-
static bool set_cache_memory_policy(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size)
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
bool retval = true;
@@ -2526,41 +2601,17 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
dqm_lock(dqm);
- if (alternate_aperture_size == 0) {
- /* base > limit disables APE1 */
- qpd->sh_mem_ape1_base = 1;
- qpd->sh_mem_ape1_limit = 0;
- } else {
- /*
- * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
- * SH_MEM_APE1_BASE[31:0], 0x0000 }
- * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
- * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
- * Verify that the base and size parameters can be
- * represented in this format and convert them.
- * Additionally restrict APE1 to user-mode addresses.
- */
-
- uint64_t base = (uintptr_t)alternate_aperture_base;
- uint64_t limit = base + alternate_aperture_size - 1;
-
- if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
- (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
- retval = false;
- goto out;
- }
-
- qpd->sh_mem_ape1_base = base >> 16;
- qpd->sh_mem_ape1_limit = limit >> 16;
- }
-
retval = dqm->asic_ops.set_cache_memory_policy(
dqm,
qpd,
default_policy,
alternate_policy,
alternate_aperture_base,
- alternate_aperture_size);
+ alternate_aperture_size,
+ misc_process_properties);
+
+ if (retval)
+ goto out;
if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
program_sh_mem_settings(dqm, qpd);
@@ -2987,20 +3038,19 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id)
{
- struct kfd_process_device *pdd;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process_device *pdd = NULL;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, &pdd);
struct device_queue_manager *dqm = knode->dqm;
struct device *dev = dqm->dev->adev->dev;
struct qcm_process_device *qpd;
struct queue *q = NULL;
int ret = 0;
- if (!p)
+ if (!pdd)
return -EINVAL;
dqm_lock(dqm);
- pdd = kfd_get_process_device_data(dqm->dev, p);
if (pdd) {
qpd = &pdd->qpd;
@@ -3033,6 +3083,7 @@ int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbel
out:
dqm_unlock(dqm);
+ kfd_unref_process(p);
return ret;
}
@@ -3074,35 +3125,25 @@ out:
return ret;
}
-int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
+int kfd_evict_process_device(struct kfd_process_device *pdd)
{
- struct kfd_process_device *pdd;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct device_queue_manager *dqm;
+ struct kfd_process *p;
int ret = 0;
- if (!p)
- return -EINVAL;
+ p = pdd->process;
+ dqm = pdd->dev->dqm;
+
WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
- pdd = kfd_get_process_device_data(dqm->dev, p);
- if (pdd) {
- if (dqm->dev->kfd->shared_resources.enable_mes)
- ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd);
- else
- ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
- }
- kfd_unref_process(p);
+ if (dqm->dev->kfd->shared_resources.enable_mes)
+ ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd);
+ else
+ ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
return ret;
}
-static void kfd_process_hw_exception(struct work_struct *work)
-{
- struct device_queue_manager *dqm = container_of(work,
- struct device_queue_manager, hw_exception_work);
- amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
-}
-
int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 09ab36f8e8c6..74a61b5b2f0b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -37,7 +37,6 @@
#define KFD_MES_PROCESS_QUANTUM 100000
#define KFD_MES_GANG_QUANTUM 10000
-#define USE_DEFAULT_GRACE_PERIOD 0xffffffff
struct device_process_node {
struct qcm_process_device *qpd;
@@ -174,7 +173,8 @@ struct device_queue_manager_ops {
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
int (*process_termination)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
@@ -210,7 +210,8 @@ struct device_queue_manager_asic_ops {
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void (*init_sdma_vm)(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd);
@@ -269,7 +270,6 @@ struct device_queue_manager {
/* hw exception */
bool is_hws_hang;
bool is_resetting;
- struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
bool sched_running;
bool sched_halt;
@@ -359,4 +359,14 @@ static inline int read_sdma_queue_counter(uint64_t __user *q_rptr, uint64_t *val
/* SDMA activity counter is stored at queue's RPTR + 0x8 location. */
return get_user(*val, q_rptr + 1);
}
+
+static inline void update_dqm_wait_times(struct device_queue_manager *dqm)
+{
+ if (dqm->dev->kfd2kgd->get_iq_wait_times)
+ dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev,
+ &dqm->wait_times,
+ ffs(dqm->dev->xcc_mask) - 1);
+}
+
+
#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index d4d95c7f2e5d..0508ef5a41d7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -27,12 +27,21 @@
#include "oss/oss_2_4_sh_mask.h"
#include "gca/gfx_7_2_sh_mask.h"
+/*
+ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
+ * stay in user mode.
+ */
+#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
+/* APE1 limit is inclusive and 64K aligned. */
+#define APE1_LIMIT_ALIGNMENT 0xFFFF
+
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
static int update_qpd_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm(struct device_queue_manager *dqm,
@@ -80,10 +89,41 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size)
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
uint32_t default_mtype;
uint32_t ape1_mtype;
+ unsigned int temp;
+ bool retval = true;
+
+ if (alternate_aperture_size == 0) {
+ /* base > limit disables APE1 */
+ qpd->sh_mem_ape1_base = 1;
+ qpd->sh_mem_ape1_limit = 0;
+ } else {
+ /*
+ * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
+ * SH_MEM_APE1_BASE[31:0], 0x0000 }
+ * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
+ * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
+ * Verify that the base and size parameters can be
+ * represented in this format and convert them.
+ * Additionally restrict APE1 to user-mode addresses.
+ */
+
+ uint64_t base = (uintptr_t)alternate_aperture_base;
+ uint64_t limit = base + alternate_aperture_size - 1;
+
+ if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
+ (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
+ retval = false;
+ goto out;
+ }
+
+ qpd->sh_mem_ape1_base = base >> 16;
+ qpd->sh_mem_ape1_limit = limit >> 16;
+ }
default_mtype = (default_policy == cache_policy_coherent) ?
MTYPE_NONCACHED :
@@ -97,37 +137,22 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
| ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
| DEFAULT_MTYPE(default_mtype)
| APE1_MTYPE(ape1_mtype);
-
- return true;
-}
-
-static int update_qpd_cik(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct kfd_process_device *pdd;
- unsigned int temp;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
- DEFAULT_MTYPE(MTYPE_NONCACHED) |
- APE1_MTYPE(MTYPE_NONCACHED);
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
*/
- temp = get_sh_mem_bases_nybble_64(pdd);
+ temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd));
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+out:
+ return retval;
+}
+
+static int update_qpd_cik(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
index 245a90dfc2f6..ba6e3d747ccd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
@@ -31,10 +31,18 @@ static int update_qpd_v10(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v10(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void device_queue_manager_init_v10(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v10;
asic_ops->update_qpd = update_qpd_v10;
asic_ops->init_sdma_vm = init_sdma_vm_v10;
asic_ops->mqd_manager_init = mqd_manager_init_v10;
@@ -49,27 +57,28 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
-static int update_qpd_v10(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+static bool set_cache_memory_policy_v10(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
- struct kfd_process_device *pdd;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
- (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
+ qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ return true;
+}
+static int update_qpd_v10(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
index 2e129da7acb4..8b447d04558f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
@@ -30,10 +30,18 @@ static int update_qpd_v11(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v11(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void device_queue_manager_init_v11(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v11;
asic_ops->update_qpd = update_qpd_v11;
asic_ops->init_sdma_vm = init_sdma_vm_v11;
asic_ops->mqd_manager_init = mqd_manager_init_v11;
@@ -48,28 +56,29 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
-static int update_qpd_v11(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+static bool set_cache_memory_policy_v11(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
- struct kfd_process_device *pdd;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
- (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
-
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
+ qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ return true;
+}
+static int update_qpd_v11(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
index 4f3295b29dfb..3550da3a46f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
@@ -30,10 +30,18 @@ static int update_qpd_v12(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v12(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v12(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void device_queue_manager_init_v12(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v12;
asic_ops->update_qpd = update_qpd_v12;
asic_ops->init_sdma_vm = init_sdma_vm_v12;
asic_ops->mqd_manager_init = mqd_manager_init_v12;
@@ -48,28 +56,29 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
-static int update_qpd_v12(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+static bool set_cache_memory_policy_v12(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
- struct kfd_process_device *pdd;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
- (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
-
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
+ qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ return true;
+}
+static int update_qpd_v12(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 67137e674f1d..9fcc8c6e57b7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -30,10 +30,18 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v9(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
void device_queue_manager_init_v9(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v9;
asic_ops->update_qpd = update_qpd_v9;
asic_ops->init_sdma_vm = init_sdma_vm_v9;
asic_ops->mqd_manager_init = mqd_manager_init_v9;
@@ -48,10 +56,42 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
+static bool set_cache_memory_policy_v9(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
+{
+ qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+
+ if (dqm->dev->kfd->noretry)
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
+
+ if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) ||
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4))
+ qpd->sh_mem_config |= (1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
+
+ if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0)) {
+ if (misc_process_properties & KFD_PROC_FLAG_MFMA_HIGH_PRECISION)
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__PRECISION_MODE__SHIFT;
+ }
+
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
+
+ pr_debug("sh_mem_bases 0x%X sh_mem_config 0x%X\n", qpd->sh_mem_bases,
+ qpd->sh_mem_config);
+ return true;
+}
+
static int update_qpd_v9(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
- struct kfd_process_device *pdd;
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
pdd = qpd_to_pdd(qpd);
@@ -64,8 +104,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4) ||
- KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0))
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4))
qpd->sh_mem_config |=
(1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index b291ee0fab94..dad83356e976 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -27,12 +27,21 @@
#include "gca/gfx_8_0_sh_mask.h"
#include "oss/oss_3_0_sh_mask.h"
+/*
+ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
+ * stay in user mode.
+ */
+#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
+/* APE1 limit is inclusive and 64K aligned. */
+#define APE1_LIMIT_ALIGNMENT 0xFFFF
+
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties);
static int update_qpd_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm(struct device_queue_manager *dqm,
@@ -81,10 +90,41 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size)
+ uint64_t alternate_aperture_size,
+ u32 misc_process_properties)
{
uint32_t default_mtype;
uint32_t ape1_mtype;
+ unsigned int temp;
+ bool retval = true;
+
+ if (alternate_aperture_size == 0) {
+ /* base > limit disables APE1 */
+ qpd->sh_mem_ape1_base = 1;
+ qpd->sh_mem_ape1_limit = 0;
+ } else {
+ /*
+ * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
+ * SH_MEM_APE1_BASE[31:0], 0x0000 }
+ * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
+ * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
+ * Verify that the base and size parameters can be
+ * represented in this format and convert them.
+ * Additionally restrict APE1 to user-mode addresses.
+ */
+
+ uint64_t base = (uintptr_t)alternate_aperture_base;
+ uint64_t limit = base + alternate_aperture_size - 1;
+
+ if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
+ (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
+ retval = false;
+ goto out;
+ }
+
+ qpd->sh_mem_ape1_base = base >> 16;
+ qpd->sh_mem_ape1_limit = limit >> 16;
+ }
default_mtype = (default_policy == cache_policy_coherent) ?
MTYPE_UC :
@@ -100,40 +140,21 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
- return true;
-}
-
-static int update_qpd_vi(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct kfd_process_device *pdd;
- unsigned int temp;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
- MTYPE_UC <<
- SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
- MTYPE_UC <<
- SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
-
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
*/
- temp = get_sh_mem_bases_nybble_64(pdd);
+ temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd));
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n",
temp, qpd->sh_mem_bases);
+out:
+ return retval;
+}
+static int update_qpd_vi(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index d075f24e5f9f..fecdb6794075 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -727,7 +727,7 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
* to process context, kfd_process could attempt to exit while we are
* running so the lookup function increments the process ref count.
*/
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p)
return; /* Presumably process exited. */
@@ -1139,8 +1139,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (type == KFD_EVENT_TYPE_MEMORY) {
dev_warn(kfd_device,
- "Sending SIGSEGV to process %d (pasid 0x%x)",
- p->lead_thread->pid, p->pasid);
+ "Sending SIGSEGV to process pid %d",
+ p->lead_thread->pid);
send_sig(SIGSEGV, p->lead_thread, 0);
}
@@ -1148,13 +1148,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (send_signal) {
if (send_sigterm) {
dev_warn(kfd_device,
- "Sending SIGTERM to process %d (pasid 0x%x)",
- p->lead_thread->pid, p->pasid);
+ "Sending SIGTERM to process pid %d",
+ p->lead_thread->pid);
send_sig(SIGTERM, p->lead_thread, 0);
} else {
dev_err(kfd_device,
- "Process %d (pasid 0x%x) got unhandled exception",
- p->lead_thread->pid, p->pasid);
+ "Process pid %d got unhandled exception",
+ p->lead_thread->pid);
}
}
@@ -1168,7 +1168,7 @@ void kfd_signal_hw_exception_event(u32 pasid)
* to process context, kfd_process could attempt to exit while we are
* running so the lookup function increments the process ref count.
*/
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p)
return; /* Presumably process exited. */
@@ -1177,22 +1177,20 @@ void kfd_signal_hw_exception_event(u32 pasid)
kfd_unref_process(p);
}
-void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
+void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data)
{
struct kfd_event *ev;
uint32_t id;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = pdd->process;
struct kfd_hsa_memory_exception_data memory_exception_data;
int user_gpu_id;
- if (!p)
- return; /* Presumably process exited. */
-
- user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
+ user_gpu_id = kfd_process_get_user_gpu_id(p, pdd->dev->id);
if (unlikely(user_gpu_id == -EINVAL)) {
- WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
+ WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n",
+ pdd->dev->id);
return;
}
@@ -1229,7 +1227,6 @@ void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
}
rcu_read_unlock();
- kfd_unref_process(p);
}
void kfd_signal_reset_event(struct kfd_node *dev)
@@ -1264,7 +1261,8 @@ void kfd_signal_reset_event(struct kfd_node *dev)
}
if (unlikely(!pdd)) {
- WARN_ONCE(1, "Could not get device data from pasid:0x%x\n", p->pasid);
+ WARN_ONCE(1, "Could not get device data from process pid:%d\n",
+ p->lead_thread->pid);
continue;
}
@@ -1273,8 +1271,15 @@ void kfd_signal_reset_event(struct kfd_node *dev)
if (dev->dqm->detect_hang_count) {
struct amdgpu_task_info *ti;
+ struct amdgpu_fpriv *drv_priv;
+
+ if (unlikely(amdgpu_file_to_fpriv(pdd->drm_file, &drv_priv))) {
+ WARN_ONCE(1, "Could not get vm for device %x from pid:%d\n",
+ dev->id, p->lead_thread->pid);
+ continue;
+ }
- ti = amdgpu_vm_get_task_info_pasid(dev->adev, p->pasid);
+ ti = amdgpu_vm_get_task_info_vm(&drv_priv->vm);
if (ti) {
dev_err(dev->adev->dev,
"Queues reset on process %s tid %d thread %s pid %d\n",
@@ -1311,7 +1316,7 @@ void kfd_signal_reset_event(struct kfd_node *dev)
void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
{
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
struct kfd_hsa_memory_exception_data memory_exception_data;
struct kfd_hsa_hw_exception_data hw_exception_data;
struct kfd_event *ev;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index b3f988b275a8..c5f97e6e36ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -194,7 +194,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
enum amdgpu_ras_block block = 0;
int ret = -EINVAL;
uint32_t reset = 0;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p)
return;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 0cb5c582ce7d..b8a91bf4ef30 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -146,7 +146,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
{
enum amdgpu_ras_block block = 0;
uint32_t reset = 0;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
u64 event_id;
int old_poison, ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index d05d199b5e44..79251f22b702 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -1027,7 +1027,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
return -EINVAL;
- if (adev->flags & AMD_IS_APU)
+ if (adev->apu_prefer_gtt)
return 0;
pgmap = &kfddev->pgmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 3014925d95ff..80320a6c8854 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -554,7 +554,7 @@ static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd,
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
- if (amdgpu_sriov_vf(mm->dev->adev))
+ if (amdgpu_sriov_multi_vf_mode(mm->dev->adev))
m->cp_hqd_pq_doorbell_control |= 1 <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
m->cp_mqd_stride_size = kfd_hiq_mqd_stride(mm->dev);
@@ -667,7 +667,9 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc);
init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
-
+ if (amdgpu_sriov_multi_vf_mode(mm->dev->adev))
+ m->cp_hqd_pq_doorbell_control |= 1 <<
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
m->cp_mqd_stride_size = offset;
/*
@@ -727,6 +729,9 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd + size * xcc);
update_mqd(mm, m, q, minfo);
+ if (amdgpu_sriov_multi_vf_mode(mm->dev->adev))
+ m->cp_hqd_pq_doorbell_control |= 1 <<
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
update_cu_mask(mm, m, minfo, xcc);
if (q->format == KFD_QUEUE_FORMAT_AQL) {
@@ -749,6 +754,21 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
}
}
+static void restore_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *qp,
+ const void *mqd_src,
+ const void *ctl_stack_src, u32 ctl_stack_size)
+{
+ restore_mqd(mm, mqd, mqd_mem_obj, gart_addr, qp, mqd_src, ctl_stack_src, ctl_stack_size);
+ if (amdgpu_sriov_multi_vf_mode(mm->dev->adev)) {
+ struct v9_mqd *m;
+
+ m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
+ m->cp_hqd_pq_doorbell_control |= 1 <<
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
+ }
+}
static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type, unsigned int timeout,
uint32_t pipe_id, uint32_t queue_id)
@@ -883,7 +903,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->is_occupied = kfd_is_occupied_cp;
mqd->get_checkpoint_info = get_checkpoint_info;
mqd->checkpoint_mqd = checkpoint_mqd;
- mqd->restore_mqd = restore_mqd;
mqd->mqd_size = sizeof(struct v9_mqd);
mqd->mqd_stride = mqd_stride_v9;
#if defined(CONFIG_DEBUG_FS)
@@ -895,12 +914,14 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->init_mqd = init_mqd_v9_4_3;
mqd->load_mqd = load_mqd_v9_4_3;
mqd->update_mqd = update_mqd_v9_4_3;
+ mqd->restore_mqd = restore_mqd_v9_4_3;
mqd->destroy_mqd = destroy_mqd_v9_4_3;
mqd->get_wave_state = get_wave_state_v9_4_3;
} else {
mqd->init_mqd = init_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
+ mqd->restore_mqd = restore_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->get_wave_state = get_wave_state;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 4984b41cd372..271c567242ab 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -396,14 +396,33 @@ out:
return retval;
}
-int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
+/* pm_config_dequeue_wait_counts: Configure dequeue timer Wait Counts
+ * by writing to CP_IQ_WAIT_TIME2 registers.
+ *
+ * @cmd: See emum kfd_config_dequeue_wait_counts_cmd definition
+ * @value: Depends on the cmd. This parameter is unused for
+ * KFD_DEQUEUE_WAIT_INIT and KFD_DEQUEUE_WAIT_RESET. For
+ * KFD_DEQUEUE_WAIT_SET_SCH_WAVE it holds value to be set
+ *
+ */
+int pm_config_dequeue_wait_counts(struct packet_manager *pm,
+ enum kfd_config_dequeue_wait_counts_cmd cmd,
+ uint32_t value)
{
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
int retval = 0;
uint32_t *buffer, size;
- size = pm->pmf->set_grace_period_size;
+ if (!pm->pmf->config_dequeue_wait_counts ||
+ !pm->pmf->config_dequeue_wait_counts_size)
+ return 0;
+
+ if (cmd == KFD_DEQUEUE_WAIT_INIT && (KFD_GC_VERSION(pm->dqm->dev) < IP_VERSION(9, 4, 1) ||
+ KFD_GC_VERSION(pm->dqm->dev) >= IP_VERSION(10, 0, 0)))
+ return 0;
+
+ size = pm->pmf->config_dequeue_wait_counts_size;
mutex_lock(&pm->lock);
@@ -419,13 +438,18 @@ int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
goto out;
}
- retval = pm->pmf->set_grace_period(pm, buffer, grace_period);
- if (!retval)
+ retval = pm->pmf->config_dequeue_wait_counts(pm, buffer,
+ cmd, value);
+ if (!retval) {
retval = kq_submit_packet(pm->priv_queue);
- else
+
+ /* If default value is modified, cache that in dqm->wait_times */
+ if (!retval && cmd == KFD_DEQUEUE_WAIT_INIT)
+ update_dqm_wait_times(pm->dqm);
+ } else {
kq_rollback_packet(pm->priv_queue);
+ }
}
-
out:
mutex_unlock(&pm->lock);
return retval;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 1f9f5bfeaf86..2893fd5e5d00 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -47,7 +47,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
- packet->bitfields2.pasid = qpd->pqm->process->pasid;
+ packet->bitfields2.pasid = pdd->pasid;
packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
@@ -106,7 +106,7 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
- packet->bitfields2.pasid = qpd->pqm->process->pasid;
+ packet->bitfields2.pasid = pdd->pasid;
packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
@@ -297,23 +297,79 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
-static int pm_set_grace_period_v9(struct packet_manager *pm,
+static inline void pm_build_dequeue_wait_counts_packet_info(struct packet_manager *pm,
+ uint32_t sch_value, uint32_t que_sleep, uint32_t *reg_offset,
+ uint32_t *reg_data)
+{
+ pm->dqm->dev->kfd2kgd->build_dequeue_wait_counts_packet_info(
+ pm->dqm->dev->adev,
+ pm->dqm->wait_times,
+ sch_value,
+ que_sleep,
+ reg_offset,
+ reg_data);
+}
+
+/* pm_config_dequeue_wait_counts_v9: Builds WRITE_DATA packet with
+ * register/value for configuring dequeue wait counts
+ *
+ * @return: -ve for failure and 0 for success and buffer is
+ * filled in with packet
+ *
+ **/
+static int pm_config_dequeue_wait_counts_v9(struct packet_manager *pm,
uint32_t *buffer,
- uint32_t grace_period)
+ enum kfd_config_dequeue_wait_counts_cmd cmd,
+ uint32_t value)
{
struct pm4_mec_write_data_mmio *packet;
uint32_t reg_offset = 0;
uint32_t reg_data = 0;
- pm->dqm->dev->kfd2kgd->build_grace_period_packet_info(
- pm->dqm->dev->adev,
- pm->dqm->wait_times,
- grace_period,
- &reg_offset,
- &reg_data);
+ switch (cmd) {
+ case KFD_DEQUEUE_WAIT_INIT: {
+ uint32_t sch_wave = 0, que_sleep = 1;
+
+ /* For all gfx9 ASICs > gfx941,
+ * Reduce CP_IQ_WAIT_TIME2.QUE_SLEEP to 0x1 from default 0x40.
+ * On a 1GHz machine this is roughly 1 microsecond, which is
+ * about how long it takes to load data out of memory during
+ * queue connect
+ * QUE_SLEEP: Wait Count for Dequeue Retry.
+ *
+ * Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU
+ */
+ if (KFD_GC_VERSION(pm->dqm->dev) < IP_VERSION(9, 4, 1) ||
+ KFD_GC_VERSION(pm->dqm->dev) >= IP_VERSION(10, 0, 0))
+ return -EPERM;
+
+ if (amdgpu_emu_mode == 0 && pm->dqm->dev->adev->gmc.is_app_apu &&
+ (KFD_GC_VERSION(pm->dqm->dev) == IP_VERSION(9, 4, 3)))
+ sch_wave = 1;
- if (grace_period == USE_DEFAULT_GRACE_PERIOD)
- reg_data = pm->dqm->wait_times;
+ pm_build_dequeue_wait_counts_packet_info(pm, sch_wave, que_sleep,
+ &reg_offset, &reg_data);
+
+ break;
+ }
+ case KFD_DEQUEUE_WAIT_RESET:
+ /* reg_data would be set to dqm->wait_times */
+ pm_build_dequeue_wait_counts_packet_info(pm, 0, 0, &reg_offset, &reg_data);
+ break;
+
+ case KFD_DEQUEUE_WAIT_SET_SCH_WAVE:
+ /* The CP cannot handle value 0 and it will result in
+ * an infinite grace period being set so set to 1 to prevent this. Also
+ * avoid debugger API breakage as it sets 0 and expects a low value.
+ */
+ if (!value)
+ value = 1;
+ pm_build_dequeue_wait_counts_packet_info(pm, value, 0, &reg_offset, &reg_data);
+ break;
+ default:
+ pr_err("Invalid dequeue wait cmd\n");
+ return -EINVAL;
+ }
packet = (struct pm4_mec_write_data_mmio *)buffer;
memset(buffer, 0, sizeof(struct pm4_mec_write_data_mmio));
@@ -415,7 +471,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = {
.set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
- .set_grace_period = pm_set_grace_period_v9,
+ .config_dequeue_wait_counts = pm_config_dequeue_wait_counts_v9,
.query_status = pm_query_status_v9,
.release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process),
@@ -423,7 +479,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
- .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio),
+ .config_dequeue_wait_counts_size = sizeof(struct pm4_mec_write_data_mmio),
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = 0,
};
@@ -434,7 +490,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = {
.set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
- .set_grace_period = pm_set_grace_period_v9,
+ .config_dequeue_wait_counts = pm_config_dequeue_wait_counts_v9,
.query_status = pm_query_status_v9,
.release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process_aldebaran),
@@ -442,7 +498,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
- .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio),
+ .config_dequeue_wait_counts_size = sizeof(struct pm4_mec_write_data_mmio),
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = 0,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
index c1199d06d131..a1de5d7e173a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -42,6 +42,7 @@ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
struct qcm_process_device *qpd)
{
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
struct pm4_mes_map_process *packet;
packet = (struct pm4_mes_map_process *)buffer;
@@ -52,7 +53,7 @@ static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
sizeof(struct pm4_mes_map_process));
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
- packet->bitfields2.pasid = qpd->pqm->process->pasid;
+ packet->bitfields2.pasid = pdd->pasid;
packet->bitfields3.page_table_base = qpd->page_table_base;
packet->bitfields10.gds_size = qpd->gds_size;
packet->bitfields10.num_gws = qpd->num_gws;
@@ -303,7 +304,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = {
.set_resources = pm_set_resources_vi,
.map_queues = pm_map_queues_vi,
.unmap_queues = pm_unmap_queues_vi,
- .set_grace_period = NULL,
+ .config_dequeue_wait_counts = NULL,
.query_status = pm_query_status_vi,
.release_mem = pm_release_mem_vi,
.map_process_size = sizeof(struct pm4_mes_map_process),
@@ -311,7 +312,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
- .set_grace_period_size = 0,
+ .config_dequeue_wait_counts_size = 0,
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = sizeof(struct pm4_mec_release_mem)
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
deleted file mode 100644
index e3b250918f39..000000000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/*
- * Copyright 2014-2022 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/types.h>
-#include "kfd_priv.h"
-#include "amdgpu_ids.h"
-
-static unsigned int pasid_bits = 16;
-static bool pasids_allocated; /* = false */
-
-bool kfd_set_pasid_limit(unsigned int new_limit)
-{
- if (new_limit < 2)
- return false;
-
- if (new_limit < (1U << pasid_bits)) {
- if (pasids_allocated)
- /* We've already allocated user PASIDs, too late to
- * change the limit
- */
- return false;
-
- while (new_limit < (1U << pasid_bits))
- pasid_bits--;
- }
-
- return true;
-}
-
-unsigned int kfd_get_pasid_limit(void)
-{
- return 1U << pasid_bits;
-}
-
-u32 kfd_pasid_alloc(void)
-{
- int r = amdgpu_pasid_alloc(pasid_bits);
-
- if (r > 0) {
- pasids_allocated = true;
- return r;
- }
-
- return 0;
-}
-
-void kfd_pasid_free(u32 pasid)
-{
- amdgpu_pasid_free(pasid);
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d8cd913aa772..f6aedf69c644 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -289,7 +289,6 @@ struct kfd_node {
/* Global GWS resource shared between processes */
void *gws;
- bool gws_debug_workaround;
/* Clients watching SMI events */
struct list_head smi_clients;
@@ -851,6 +850,8 @@ struct kfd_process_device {
/* Tracks queue reset status */
bool has_reset_queue;
+
+ u32 pasid;
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -910,8 +911,6 @@ struct kfd_process {
/* We want to receive a notification when the mm_struct is destroyed */
struct mmu_notifier mmu_notifier;
- u32 pasid;
-
/*
* Array of kfd_process_device pointers,
* one for each device the process is using.
@@ -1039,7 +1038,8 @@ void kfd_process_destroy_wq(void);
void kfd_cleanup_processes(void);
struct kfd_process *kfd_create_process(struct task_struct *thread);
struct kfd_process *kfd_get_process(const struct task_struct *task);
-struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
+struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid,
+ struct kfd_process_device **pdd);
struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id);
@@ -1091,8 +1091,6 @@ struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid);
/* PASIDs */
int kfd_pasid_init(void);
void kfd_pasid_exit(void);
-bool kfd_set_pasid_limit(unsigned int new_limit);
-unsigned int kfd_get_pasid_limit(void);
u32 kfd_pasid_alloc(void);
void kfd_pasid_free(u32 pasid);
@@ -1142,7 +1140,6 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock(
uint32_t proximity_domain);
struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
struct kfd_node *kfd_device_by_id(uint32_t gpu_id);
-struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev);
static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id,
uint32_t vmid)
{
@@ -1337,7 +1334,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
enum kfd_queue_type type);
void kernel_queue_uninit(struct kernel_queue *kq);
-int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
+int kfd_evict_process_device(struct kfd_process_device *pdd);
int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id);
/* Process Queue Manager */
@@ -1366,8 +1363,6 @@ int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid,
struct mqd_update_info *minfo);
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws);
-struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
- unsigned int qid);
struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
unsigned int qid);
int pqm_get_wave_state(struct process_queue_manager *pqm,
@@ -1394,6 +1389,24 @@ int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
#define KFD_FENCE_COMPLETED (100)
#define KFD_FENCE_INIT (10)
+/**
+ * enum kfd_config_dequeue_wait_counts_cmd - Command for configuring
+ * dequeue wait counts.
+ *
+ * @KFD_DEQUEUE_WAIT_INIT: Set optimized dequeue wait counts for a
+ * certain ASICs. For these ASICs, this is default value used by RESET
+ * @KFD_DEQUEUE_WAIT_RESET: Reset dequeue wait counts to the optimized value
+ * for certain ASICs. For others set it to default hardware reset value
+ * @KFD_DEQUEUE_WAIT_SET_SCH_WAVE: Set context switch latency wait
+ *
+ */
+enum kfd_config_dequeue_wait_counts_cmd {
+ KFD_DEQUEUE_WAIT_INIT = 1,
+ KFD_DEQUEUE_WAIT_RESET = 2,
+ KFD_DEQUEUE_WAIT_SET_SCH_WAVE = 3
+};
+
+
struct packet_manager {
struct device_queue_manager *dqm;
struct kernel_queue *priv_queue;
@@ -1419,8 +1432,8 @@ struct packet_manager_funcs {
int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer,
enum kfd_unmap_queues_filter mode,
uint32_t filter_param, bool reset);
- int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer,
- uint32_t grace_period);
+ int (*config_dequeue_wait_counts)(struct packet_manager *pm, uint32_t *buffer,
+ enum kfd_config_dequeue_wait_counts_cmd cmd, uint32_t value);
int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
uint64_t fence_address, uint64_t fence_value);
int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
@@ -1431,7 +1444,7 @@ struct packet_manager_funcs {
int set_resources_size;
int map_queues_size;
int unmap_queues_size;
- int set_grace_period_size;
+ int config_dequeue_wait_counts_size;
int query_status_size;
int release_mem_size;
};
@@ -1454,7 +1467,9 @@ int pm_send_unmap_queue(struct packet_manager *pm,
void pm_release_ib(struct packet_manager *pm);
-int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period);
+int pm_config_dequeue_wait_counts(struct packet_manager *pm,
+ enum kfd_config_dequeue_wait_counts_cmd cmd,
+ uint32_t wait_counts_config);
/* Following PM funcs can be shared among VI and AI */
unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
@@ -1492,7 +1507,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
int kfd_get_num_events(struct kfd_process *p);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
-void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
+void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 083f83c94531..7c0c24732481 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -35,6 +35,7 @@
#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
#include "amdgpu.h"
+#include "amdgpu_reset.h"
struct mm_struct;
@@ -282,8 +283,8 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
cu_cnt = 0;
proc = pdd->process;
if (pdd->qpd.queue_count == 0) {
- pr_debug("Gpu-Id: %d has no active queues for process %d\n",
- dev->id, proc->pasid);
+ pr_debug("Gpu-Id: %d has no active queues for process pid %d\n",
+ dev->id, (int)proc->lead_thread->pid);
return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
}
@@ -327,12 +328,9 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
- if (strcmp(attr->name, "pasid") == 0) {
- struct kfd_process *p = container_of(attr, struct kfd_process,
- attr_pasid);
-
- return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
- } else if (strncmp(attr->name, "vram_", 5) == 0) {
+ if (strcmp(attr->name, "pasid") == 0)
+ return snprintf(buffer, PAGE_SIZE, "%d\n", 0);
+ else if (strncmp(attr->name, "vram_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_vram);
return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
@@ -841,6 +839,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
return ERR_PTR(-EINVAL);
}
+ /* If the process just called exec(3), it is possible that the
+ * cleanup of the kfd_process (following the release of the mm
+ * of the old process image) is still in the cleanup work queue.
+ * Make sure to drain any job before trying to recreate any
+ * resource for this process.
+ */
+ flush_workqueue(kfd_process_wq);
+
/*
* take kfd processes mutex before starting of process creation
* so there won't be a case where two threads of the same process
@@ -861,14 +867,6 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
if (process) {
pr_debug("Process already found\n");
} else {
- /* If the process just called exec(3), it is possible that the
- * cleanup of the kfd_process (following the release of the mm
- * of the old process image) is still in the cleanup work queue.
- * Make sure to drain any job before trying to recreate any
- * resource for this process.
- */
- flush_workqueue(kfd_process_wq);
-
process = create_process(thread);
if (IS_ERR(process))
goto out;
@@ -1056,17 +1054,13 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
- pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
- pdd->dev->id, p->pasid);
-
+ pr_debug("Releasing pdd (topology id %d, for pid %d)\n",
+ pdd->dev->id, p->lead_thread->pid);
kfd_process_device_destroy_cwsr_dgpu(pdd);
kfd_process_device_destroy_ib_mem(pdd);
- if (pdd->drm_file) {
- amdgpu_amdkfd_gpuvm_release_process_vm(
- pdd->dev->adev, pdd->drm_priv);
+ if (pdd->drm_file)
fput(pdd->drm_file);
- }
if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
@@ -1140,6 +1134,17 @@ static void kfd_process_remove_sysfs(struct kfd_process *p)
p->kobj = NULL;
}
+/*
+ * If any GPU is ongoing reset, wait for reset complete.
+ */
+static void kfd_process_wait_gpu_reset_complete(struct kfd_process *p)
+{
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++)
+ flush_workqueue(p->pdds[i]->dev->adev->reset_domain->wq);
+}
+
/* No process locking is needed in this function, because the process
* is not findable any more. We must assume that no other thread is
* using it any more, otherwise we couldn't safely free the process
@@ -1154,6 +1159,11 @@ static void kfd_process_wq_release(struct work_struct *work)
kfd_process_dequeue_from_all_devices(p);
pqm_uninit(&p->pqm);
+ /*
+ * If GPU in reset, user queues may still running, wait for reset complete.
+ */
+ kfd_process_wait_gpu_reset_complete(p);
+
/* Signal the eviction fence after user mode queues are
* destroyed. This allows any BOs to be freed without
* triggering pointless evictions or waiting for fences.
@@ -1174,7 +1184,6 @@ static void kfd_process_wq_release(struct work_struct *work)
kfd_event_free_process(p);
- kfd_pasid_free(p->pasid);
mutex_destroy(&p->mutex);
put_task_struct(p->lead_thread);
@@ -1525,12 +1534,6 @@ static struct kfd_process *create_process(const struct task_struct *thread)
atomic_set(&process->debugged_process_count, 0);
sema_init(&process->runtime_enable_sema, 0);
- process->pasid = kfd_pasid_alloc();
- if (process->pasid == 0) {
- err = -ENOSPC;
- goto err_alloc_pasid;
- }
-
err = pqm_init(&process->pqm, process);
if (err != 0)
goto err_process_pqm_init;
@@ -1584,8 +1587,6 @@ err_init_svm_range_list:
err_init_apertures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
- kfd_pasid_free(process->pasid);
-err_alloc_pasid:
kfd_event_free_process(process);
err_event_init:
mutex_destroy(&process->mutex);
@@ -1704,15 +1705,19 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
if (ret)
goto err_init_cwsr;
- ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
- if (ret)
- goto err_set_pasid;
+ if (unlikely(!avm->pasid)) {
+ dev_warn(pdd->dev->adev->dev, "WARN: vm %p has no pasid associated",
+ avm);
+ ret = -EINVAL;
+ goto err_get_pasid;
+ }
+ pdd->pasid = avm->pasid;
pdd->drm_file = drm_file;
return 0;
-err_set_pasid:
+err_get_pasid:
kfd_process_device_destroy_cwsr_dgpu(pdd);
err_init_cwsr:
kfd_process_device_destroy_ib_mem(pdd);
@@ -1798,25 +1803,50 @@ void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
idr_remove(&pdd->alloc_idr, handle);
}
-/* This increments the process->ref counter. */
-struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
+static struct kfd_process_device *kfd_lookup_process_device_by_pasid(u32 pasid)
{
- struct kfd_process *p, *ret_p = NULL;
+ struct kfd_process_device *ret_p = NULL;
+ struct kfd_process *p;
unsigned int temp;
-
- int idx = srcu_read_lock(&kfd_processes_srcu);
+ int i;
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- if (p->pasid == pasid) {
- kref_get(&p->ref);
- ret_p = p;
- break;
+ for (i = 0; i < p->n_pdds; i++) {
+ if (p->pdds[i]->pasid == pasid) {
+ ret_p = p->pdds[i];
+ break;
+ }
}
+ if (ret_p)
+ break;
+ }
+ return ret_p;
+}
+
+/* This increments the process->ref counter. */
+struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid,
+ struct kfd_process_device **pdd)
+{
+ struct kfd_process_device *ret_p;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ ret_p = kfd_lookup_process_device_by_pasid(pasid);
+ if (ret_p) {
+ if (pdd)
+ *pdd = ret_p;
+ kref_get(&ret_p->process->ref);
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+ return ret_p->process;
}
srcu_read_unlock(&kfd_processes_srcu, idx);
- return ret_p;
+ if (pdd)
+ *pdd = NULL;
+
+ return NULL;
}
/* This increments the process->ref counter. */
@@ -1972,7 +2002,7 @@ static void evict_process_worker(struct work_struct *work)
*/
p = container_of(dwork, struct kfd_process, eviction_work);
- pr_debug("Started evicting pasid 0x%x\n", p->pasid);
+ pr_debug("Started evicting process pid %d\n", p->lead_thread->pid);
ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
if (!ret) {
/* If another thread already signaled the eviction fence,
@@ -1984,9 +2014,9 @@ static void evict_process_worker(struct work_struct *work)
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
- pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
+ pr_debug("Finished evicting process pid %d\n", p->lead_thread->pid);
} else
- pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
+ pr_err("Failed to evict queues of process pid %d\n", p->lead_thread->pid);
}
static int restore_process_helper(struct kfd_process *p)
@@ -2003,9 +2033,11 @@ static int restore_process_helper(struct kfd_process *p)
ret = kfd_process_restore_queues(p);
if (!ret)
- pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
+ pr_debug("Finished restoring process pid %d\n",
+ p->lead_thread->pid);
else
- pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
+ pr_err("Failed to restore queues of process pid %d\n",
+ p->lead_thread->pid);
return ret;
}
@@ -2022,7 +2054,7 @@ static void restore_process_worker(struct work_struct *work)
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, restore_work);
- pr_debug("Started restoring pasid 0x%x\n", p->pasid);
+ pr_debug("Started restoring process pasid %d\n", (int)p->lead_thread->pid);
/* Setting last_restore_timestamp before successful restoration.
* Otherwise this would have to be set by KGD (restore_process_bos)
@@ -2038,8 +2070,8 @@ static void restore_process_worker(struct work_struct *work)
ret = restore_process_helper(p);
if (ret) {
- pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
- p->pasid, PROCESS_BACK_OFF_TIME_MS);
+ pr_debug("Failed to restore BOs of process pid %d, retry after %d ms\n",
+ p->lead_thread->pid, PROCESS_BACK_OFF_TIME_MS);
if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
@@ -2055,7 +2087,7 @@ void kfd_suspend_all_processes(void)
WARN(debug_evictions, "Evicting all processes");
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
- pr_err("Failed to suspend process 0x%x\n", p->pasid);
+ pr_err("Failed to suspend process pid %d\n", p->lead_thread->pid);
signal_eviction_fence(p);
}
srcu_read_unlock(&kfd_processes_srcu, idx);
@@ -2069,8 +2101,8 @@ int kfd_resume_all_processes(void)
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
if (restore_process_helper(p)) {
- pr_err("Restore process %d failed during resume\n",
- p->pasid);
+ pr_err("Restore process pid %d failed during resume\n",
+ p->lead_thread->pid);
ret = -EFAULT;
}
}
@@ -2125,7 +2157,7 @@ int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
KFD_IRQ_FENCE_CLIENTID;
- irq_drain_fence[3] = pdd->process->pasid;
+ irq_drain_fence[3] = pdd->pasid;
/*
* For GFX 9.4.3/9.5.0, send the NodeId also in IH cookie DW[3]
@@ -2156,7 +2188,7 @@ void kfd_process_close_interrupt_drain(unsigned int pasid)
{
struct kfd_process *p;
- p = kfd_lookup_process_by_pasid(pasid);
+ p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p)
return;
@@ -2277,8 +2309,8 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- seq_printf(m, "Process %d PASID 0x%x:\n",
- p->lead_thread->tgid, p->pasid);
+ seq_printf(m, "Process %d PASID %d:\n",
+ p->lead_thread->tgid, p->lead_thread->pid);
mutex_lock(&p->mutex);
r = pqm_debugfs_mqds(m, &p->pqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index bd36a75309e1..7eb370b68159 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -69,8 +69,8 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
- pr_info("Cannot open more queues for process with pasid 0x%x\n",
- pqm->process->pasid);
+ pr_info("Cannot open more queues for process with pid %d\n",
+ pqm->process->lead_thread->pid);
return -ENOMEM;
}
@@ -363,10 +363,26 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if (retval != 0)
return retval;
+ /* Register process if this is the first queue */
if (list_empty(&pdd->qpd.queues_list) &&
list_empty(&pdd->qpd.priv_queue_list))
dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
+ /* Allocate proc_ctx_bo only if MES is enabled and this is the first queue */
+ if (!pdd->proc_ctx_cpu_ptr && dev->kfd->shared_resources.enable_mes) {
+ retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
+ AMDGPU_MES_PROC_CTX_SIZE,
+ &pdd->proc_ctx_bo,
+ &pdd->proc_ctx_gpu_addr,
+ &pdd->proc_ctx_cpu_ptr,
+ false);
+ if (retval) {
+ dev_err(dev->adev->dev, "failed to allocate process context bo\n");
+ return retval;
+ }
+ memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
+ }
+
pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
if (!pqn) {
retval = -ENOMEM;
@@ -435,8 +451,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
- pqm->process->pasid, type, retval);
+ pr_err("process pid %d DQM create queue type %d failed. ret %d\n",
+ pqm->process->lead_thread->pid, type, retval);
goto err_create_queue;
}
@@ -530,9 +546,9 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
- pqm->process->pasid,
+ pdd->pasid,
pqn->q->properties.queue_id, retval);
- if (retval != -ETIME)
+ if (retval != -ETIME && retval != -EIO)
goto err_destroy_queue;
}
kfd_procfs_del_queue(pqn->q);
@@ -652,19 +668,6 @@ int pqm_update_mqd(struct process_queue_manager *pqm,
return 0;
}
-struct kernel_queue *pqm_get_kernel_queue(
- struct process_queue_manager *pqm,
- unsigned int qid)
-{
- struct process_queue_node *pqn;
-
- pqn = get_queue_by_qid(pqm, qid);
- if (pqn && pqn->kq)
- return pqn->kq;
-
- return NULL;
-}
-
struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
unsigned int qid)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 24396a2c77bd..4afff7094caf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -233,6 +233,7 @@ void kfd_queue_buffer_put(struct amdgpu_bo **bo)
int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
{
struct kfd_topology_device *topo_dev;
+ u64 expected_queue_size;
struct amdgpu_vm *vm;
u32 total_cwsr_size;
int err;
@@ -241,6 +242,15 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
if (!topo_dev)
return -EINVAL;
+ /* AQL queues on GFX7 and GFX8 appear twice their actual size */
+ if (properties->type == KFD_QUEUE_TYPE_COMPUTE &&
+ properties->format == KFD_QUEUE_FORMAT_AQL &&
+ topo_dev->node_props.gfx_target_version >= 70000 &&
+ topo_dev->node_props.gfx_target_version < 90000)
+ expected_queue_size = properties->queue_size / 2;
+ else
+ expected_queue_size = properties->queue_size;
+
vm = drm_priv_to_vm(pdd->drm_priv);
err = amdgpu_bo_reserve(vm->root.bo, false);
if (err)
@@ -255,7 +265,7 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
goto out_err_unreserve;
err = kfd_queue_buffer_get(vm, (void *)properties->queue_address,
- &properties->ring_bo, properties->queue_size);
+ &properties->ring_bo, expected_queue_size);
if (err)
goto out_err_unreserve;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index bd3e20d981e0..100717a98ec1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -563,7 +563,8 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
int r;
p = container_of(prange->svms, struct kfd_process, svms);
- pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
+ pr_debug("process pid: %d svms 0x%p [0x%lx 0x%lx]\n",
+ p->lead_thread->pid, prange->svms,
prange->start, prange->last);
if (svm_range_validate_svm_bo(node, prange))
@@ -1286,13 +1287,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
- if (domain == SVM_RANGE_VRAM_DOMAIN) {
- if (bo_node != node)
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- } else {
- mapping_flags |= coherent ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
- }
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
break;
default:
mapping_flags |= coherent ?
@@ -2691,7 +2686,7 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1;
}
- if (node->adev->flags & AMD_IS_APU)
+ if (node->adev->apu_prefer_gtt)
return 0;
if (prange->preferred_loc == gpuid ||
@@ -2979,7 +2974,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
return -EFAULT;
}
- p = kfd_lookup_process_by_pasid(pasid);
+ p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p) {
pr_debug("kfd process not founded pasid 0x%x\n", pasid);
return 0;
@@ -3008,19 +3003,6 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
goto out;
}
- /* check if this page fault time stamp is before svms->checkpoint_ts */
- if (svms->checkpoint_ts[gpuidx] != 0) {
- if (amdgpu_ih_ts_after(ts, svms->checkpoint_ts[gpuidx])) {
- pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
- r = 0;
- goto out;
- } else
- /* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
- * to zero to avoid following ts wrap around give wrong comparing
- */
- svms->checkpoint_ts[gpuidx] = 0;
- }
-
if (!p->xnack_enabled) {
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
r = -EFAULT;
@@ -3040,6 +3022,21 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
mmap_read_lock(mm);
retry_write_locked:
mutex_lock(&svms->lock);
+
+ /* check if this page fault time stamp is before svms->checkpoint_ts */
+ if (svms->checkpoint_ts[gpuidx] != 0) {
+ if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) {
+ pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ r = -EAGAIN;
+ goto out_unlock_svms;
+ } else {
+ /* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
+ * to zero to avoid following ts wrap around give wrong comparing
+ */
+ svms->checkpoint_ts[gpuidx] = 0;
+ }
+ }
+
prange = svm_range_from_addr(svms, addr, NULL);
if (!prange) {
pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
@@ -3165,7 +3162,8 @@ out_unlock_svms:
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
- svm_range_count_fault(node, p, gpuidx);
+ if (r != -EAGAIN)
+ svm_range_count_fault(node, p, gpuidx);
mmput(mm);
out:
@@ -3242,7 +3240,8 @@ void svm_range_list_fini(struct kfd_process *p)
struct svm_range *prange;
struct svm_range *next;
- pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
+ pr_debug("process pid %d svms 0x%p\n", p->lead_thread->pid,
+ &p->svms);
cancel_delayed_work_sync(&p->svms.restore_work);
@@ -3265,7 +3264,8 @@ void svm_range_list_fini(struct kfd_process *p)
mutex_destroy(&p->svms.lock);
- pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
+ pr_debug("process pid %d svms 0x%p done\n",
+ p->lead_thread->pid, &p->svms);
}
int svm_range_list_init(struct kfd_process *p)
@@ -3438,7 +3438,7 @@ svm_range_best_prefetch_location(struct svm_range *prange)
goto out;
}
- if (bo_node->adev->flags & AMD_IS_APU) {
+ if (bo_node->adev->apu_prefer_gtt) {
best_loc = 0;
goto out;
}
@@ -3628,8 +3628,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
bool flush_tlb;
int r, ret = 0;
- pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
- p->pasid, &p->svms, start, start + size - 1, size);
+ pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
+ p->lead_thread->pid, &p->svms, start, start + size - 1, size);
r = svm_range_check_attr(p, nattr, attrs);
if (r)
@@ -3737,8 +3737,8 @@ out_unlock_range:
out:
mutex_unlock(&process_info->lock);
- pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
- &p->svms, start, start + size - 1, r);
+ pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] done, r=%d\n",
+ p->lead_thread->pid, &p->svms, start, start + size - 1, r);
return ret ? ret : r;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index bddd24f04669..6ea23c78009c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -202,7 +202,7 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
* is initialized to not 0 when page migration register device memory.
*/
#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
- ((adev)->flags & AMD_IS_APU))
+ ((adev)->apu_prefer_gtt))
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index ceb9fb475ef1..e477d7509646 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -108,24 +108,6 @@ struct kfd_node *kfd_device_by_id(uint32_t gpu_id)
return top_dev->gpu;
}
-struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev)
-{
- struct kfd_topology_device *top_dev;
- struct kfd_node *device = NULL;
-
- down_read(&topology_lock);
-
- list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu && top_dev->gpu->adev->pdev == pdev) {
- device = top_dev->gpu;
- break;
- }
-
- up_read(&topology_lock);
-
- return device;
-}
-
/* Called with write topology_lock acquired */
static void kfd_release_topology_device(struct kfd_topology_device *dev)
{
@@ -537,6 +519,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->gpu->kfd->mec_fw_version);
sysfs_show_32bit_prop(buffer, offs, "capability",
dev->node_props.capability);
+ sysfs_show_32bit_prop(buffer, offs, "capability2",
+ dev->node_props.capability2);
sysfs_show_64bit_prop(buffer, offs, "debug_prop",
dev->node_props.debug_prop);
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
@@ -968,24 +952,23 @@ static void kfd_update_system_properties(void)
up_read(&topology_lock);
}
-static void find_system_memory(const struct dmi_header *dm,
- void *private)
+static void find_system_memory(const struct dmi_header *dm, void *private)
{
+ struct dmi_mem_device *memdev = container_of(dm, struct dmi_mem_device, header);
struct kfd_mem_properties *mem;
- u16 mem_width, mem_clock;
struct kfd_topology_device *kdev =
(struct kfd_topology_device *)private;
- const u8 *dmi_data = (const u8 *)(dm + 1);
-
- if (dm->type == DMI_ENTRY_MEM_DEVICE && dm->length >= 0x15) {
- mem_width = (u16)(*(const u16 *)(dmi_data + 0x6));
- mem_clock = (u16)(*(const u16 *)(dmi_data + 0x11));
- list_for_each_entry(mem, &kdev->mem_props, list) {
- if (mem_width != 0xFFFF && mem_width != 0)
- mem->width = mem_width;
- if (mem_clock != 0)
- mem->mem_clk_max = mem_clock;
- }
+
+ if (memdev->header.type != DMI_ENTRY_MEM_DEVICE)
+ return;
+ if (memdev->header.length < sizeof(struct dmi_mem_device))
+ return;
+
+ list_for_each_entry(mem, &kdev->mem_props, list) {
+ if (memdev->total_width != 0xFFFF && memdev->total_width != 0)
+ mem->width = memdev->total_width;
+ if (memdev->speed != 0)
+ mem->mem_clk_max = memdev->speed;
}
}
@@ -1683,17 +1666,32 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
int cache_type, unsigned int cu_processor_id,
struct kfd_node *knode)
{
- unsigned int cu_sibling_map_mask;
+ unsigned int cu_sibling_map_mask = 0;
int first_active_cu;
int i, j, k, xcc, start, end;
int num_xcc = NUM_XCC(knode->xcc_mask);
struct kfd_cache_properties *pcache = NULL;
enum amdgpu_memory_partition mode;
struct amdgpu_device *adev = knode->adev;
+ bool found = false;
start = ffs(knode->xcc_mask) - 1;
end = start + num_xcc;
- cu_sibling_map_mask = cu_info->bitmap[start][0][0];
+
+ /* To find the bitmap in the first active cu in the first
+ * xcc, it is based on the assumption that evrey xcc must
+ * have at least one active cu.
+ */
+ for (i = 0; i < gfx_info->max_shader_engines && !found; i++) {
+ for (j = 0; j < gfx_info->max_sh_per_se && !found; j++) {
+ if (cu_info->bitmap[start][i % 4][j % 4]) {
+ cu_sibling_map_mask =
+ cu_info->bitmap[start][i % 4][j % 4];
+ found = true;
+ }
+ }
+ }
+
cu_sibling_map_mask &=
((1 << pcache_info[cache_type].num_cu_shared) - 1);
first_active_cu = ffs(cu_sibling_map_mask);
@@ -1985,6 +1983,9 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
if (kfd_dbg_has_ttmps_always_setup(dev->gpu))
dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
+ if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
+ dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
+
if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) {
if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3) ||
KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 4))
@@ -2005,10 +2006,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
- if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0))
- dev->node_props.capability |=
- HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
-
if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(12, 0, 0))
dev->node_props.capability |=
HSA_CAP_TRAP_DEBUG_PRECISE_ALU_OPERATIONS_SUPPORTED;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 155b5c410af1..3de8ec0043bb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -24,6 +24,7 @@
#ifndef __KFD_TOPOLOGY_H__
#define __KFD_TOPOLOGY_H__
+#include <linux/dmi.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/kfd_sysfs.h>
@@ -50,6 +51,7 @@ struct kfd_node_properties {
uint32_t cpu_core_id_base;
uint32_t simd_id_base;
uint32_t capability;
+ uint32_t capability2;
uint64_t debug_prop;
uint32_t max_waves_per_simd;
uint32_t lds_size_in_kb;
@@ -179,6 +181,22 @@ struct kfd_system_properties {
struct attribute attr_props;
};
+struct dmi_mem_device {
+ struct dmi_header header;
+ u16 physical_handle;
+ u16 error_handle;
+ u16 total_width;
+ u16 data_width;
+ u16 size;
+ u8 form_factor;
+ u8 device_set;
+ u8 device_locator;
+ u8 bank_locator;
+ u8 memory_type;
+ u16 type_detail;
+ u16 speed;
+} __packed;
+
struct kfd_topology_device *kfd_create_topology_device(
struct list_head *device_list);
void kfd_release_topology_device_list(struct list_head *device_list);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 9d9645a2d18e..bae83a129b5f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -155,6 +155,9 @@ MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+#define FIRMWARE_DCN_36_DMUB "amdgpu/dcn_3_6_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_36_DMUB);
+
#define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB);
@@ -179,6 +182,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev);
static void amdgpu_dm_fini(struct amdgpu_device *adev);
static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state);
+static struct amdgpu_i2c_adapter *
+create_i2c(struct ddc_service *ddc_service, bool oem);
static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
{
@@ -245,6 +250,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
static void handle_hpd_rx_irq(void *param);
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ int bl_idx,
+ u32 user_brightness);
+
static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state);
@@ -316,7 +325,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
return 0;
}
-static bool dm_is_idle(void *handle)
+static bool dm_is_idle(struct amdgpu_ip_block *ip_block)
{
/* XXX todo */
return true;
@@ -1267,6 +1276,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
hw_params.dpia_supported = true;
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
@@ -1278,6 +1288,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
break;
default:
@@ -1741,7 +1752,7 @@ static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_
}
if (quirk_entries.support_edp0_on_dp1) {
init_data->flags.support_edp0_on_dp1 = true;
- drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ drm_info(dev, "support_edp0_on_dp1 attached\n");
}
}
@@ -1887,6 +1898,7 @@ static enum dmub_ips_disable_type dm_get_default_ips_mode(
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 6, 0):
/*
* On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
* cause a hard hang. A fix exists for newer PMFW.
@@ -2023,7 +2035,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_device_seamless_boot_supported(adev)) {
init_data.flags.seamless_boot_edp_requested = true;
init_data.flags.allow_seamless_boot_optimization = true;
- DRM_INFO("Seamless boot condition check passed\n");
+ drm_dbg(adev->dm.ddev, "Seamless boot requested\n");
}
init_data.flags.enable_mipi_converter_optimization = true;
@@ -2390,6 +2402,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
return 0;
default:
@@ -2515,6 +2528,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 1):
dmub_asic = DMUB_ASIC_DCN35;
break;
+ case IP_VERSION(3, 6, 0):
+ dmub_asic = DMUB_ASIC_DCN36;
+ break;
case IP_VERSION(4, 0, 1):
dmub_asic = DMUB_ASIC_DCN401;
break;
@@ -2948,6 +2964,33 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
return 0;
}
+static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct amdgpu_i2c_adapter *oem_i2c;
+ struct ddc_service *oem_ddc_service;
+ int r;
+
+ oem_ddc_service = dc_get_oem_i2c_device(adev->dm.dc);
+ if (oem_ddc_service) {
+ oem_i2c = create_i2c(oem_ddc_service, true);
+ if (!oem_i2c) {
+ dev_info(adev->dev, "Failed to create oem i2c adapter data\n");
+ return -ENOMEM;
+ }
+
+ r = i2c_add_adapter(&oem_i2c->base);
+ if (r) {
+ dev_info(adev->dev, "Failed to register oem i2c\n");
+ kfree(oem_i2c);
+ return r;
+ }
+ dm->oem_i2c = oem_i2c;
+ }
+
+ return 0;
+}
+
/**
* dm_hw_init() - Initialize DC device
* @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
@@ -2979,6 +3022,10 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block)
return r;
amdgpu_dm_hpd_init(adev);
+ r = dm_oem_i2c_hw_init(adev);
+ if (r)
+ dev_info(adev->dev, "Failed to add OEM i2c bus\n");
+
return 0;
}
@@ -2994,6 +3041,8 @@ static int dm_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ kfree(adev->dm.oem_i2c);
+
amdgpu_dm_hpd_fini(adev);
amdgpu_dm_irq_fini(adev);
@@ -3041,10 +3090,11 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
}
+DEFINE_FREE(state_release, struct dc_state *, if (_T) dc_state_release(_T))
+
static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
{
- struct dc_state *context = NULL;
- enum dc_status res = DC_ERROR_UNEXPECTED;
+ struct dc_state *context __free(state_release) = NULL;
int i;
struct dc_stream_state *del_streams[MAX_PIPES];
int del_streams_count = 0;
@@ -3054,7 +3104,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
context = dc_state_create_current_copy(dc);
if (context == NULL)
- goto context_alloc_fail;
+ return DC_ERROR_UNEXPECTED;
/* First remove from context all streams */
for (i = 0; i < context->stream_count; i++) {
@@ -3065,25 +3115,20 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
/* Remove all planes for removed streams and then remove the streams */
for (i = 0; i < del_streams_count; i++) {
- if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
- res = DC_FAIL_DETACH_SURFACES;
- goto fail;
- }
+ enum dc_status res;
+
+ if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context))
+ return DC_FAIL_DETACH_SURFACES;
res = dc_state_remove_stream(dc, context, del_streams[i]);
if (res != DC_OK)
- goto fail;
+ return res;
}
params.streams = context->streams;
params.stream_count = context->stream_count;
- res = dc_commit_streams(dc, &params);
-fail:
- dc_state_release(context);
-
-context_alloc_fail:
- return res;
+ return dc_commit_streams(dc, &params);
}
static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
@@ -3096,13 +3141,29 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
}
}
+static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ if (amdgpu_in_reset(adev))
+ return 0;
+
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+ if (IS_ERR(adev->dm.cached_state))
+ return PTR_ERR(adev->dm.cached_state);
+
+ return 0;
+}
+
static int dm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_display_manager *dm = &adev->dm;
- int ret = 0;
if (amdgpu_in_reset(adev)) {
+ enum dc_status res;
+
mutex_lock(&dm->dc_lock);
dc_allow_idle_optimizations(adev->dm.dc, false);
@@ -3112,19 +3173,24 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
if (dm->cached_dc_state)
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
- amdgpu_dm_commit_zero_streams(dm->dc);
+ res = amdgpu_dm_commit_zero_streams(dm->dc);
+ if (res != DC_OK) {
+ drm_err(adev_to_drm(adev), "Failed to commit zero streams: %d\n", res);
+ return -EINVAL;
+ }
amdgpu_dm_irq_suspend(adev);
hpd_rx_irq_work_suspend(dm);
- return ret;
+ return 0;
}
- WARN_ON(adev->dm.cached_state);
- adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
- if (IS_ERR(adev->dm.cached_state))
- return PTR_ERR(adev->dm.cached_state);
+ if (!adev->dm.cached_state) {
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+ if (IS_ERR(adev->dm.cached_state))
+ return PTR_ERR(adev->dm.cached_state);
+ }
s3_handle_hdmi_cec(adev_to_drm(adev), true);
@@ -3255,14 +3321,14 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
struct dc_scaling_info scaling_infos[MAX_SURFACES];
struct dc_flip_addrs flip_addrs[MAX_SURFACES];
struct dc_stream_update stream_update;
- } *bundle;
+ } *bundle __free(kfree);
int k, m;
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
if (!bundle) {
drm_err(dm->ddev, "Failed to allocate update bundle\n");
- goto cleanup;
+ return;
}
for (k = 0; k < dc_state->stream_count; k++) {
@@ -3282,9 +3348,24 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
&bundle->stream_update,
bundle->surface_updates);
}
+}
-cleanup:
- kfree(bundle);
+static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev,
+ struct dc_sink *sink)
+{
+ struct dc_panel_patch *ppatch = NULL;
+
+ if (!sink)
+ return;
+
+ ppatch = &sink->edid_caps.panel_patch;
+ if (ppatch->wait_after_dpcd_poweroff_ms) {
+ msleep(ppatch->wait_after_dpcd_poweroff_ms);
+ drm_dbg_driver(adev_to_drm(adev),
+ "%s: adding a %ds delay as w/a for panel\n",
+ __func__,
+ ppatch->wait_after_dpcd_poweroff_ms / 1000);
+ }
}
static int dm_resume(struct amdgpu_ip_block *ip_block)
@@ -3333,7 +3414,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
r = dm_dmub_hw_init(adev);
if (r)
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -3371,8 +3452,19 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
mutex_unlock(&dm->dc_lock);
+ /* set the backlight after a reset */
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (dm->backlight_dev[i])
+ amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ }
+
return 0;
}
+
+ /* leave display off for S4 sequence */
+ if (adev->in_s4)
+ return 0;
+
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_state_release(dm_state->context);
dm_state->context = dc_state_create(dm->dc, NULL);
@@ -3408,6 +3500,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
/* Do detection*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+ bool ret;
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -3424,17 +3517,20 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
if (aconnector->mst_root)
continue;
- mutex_lock(&aconnector->hpd_lock);
+ guard(mutex)(&aconnector->hpd_lock);
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
} else {
- mutex_lock(&dm->dc_lock);
+ guard(mutex)(&dm->dc_lock);
dc_exit_ips_for_hw_access(dm->dc);
- dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4);
- mutex_unlock(&dm->dc_lock);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4);
+ if (ret) {
+ /* w/a delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
+ }
}
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
@@ -3444,7 +3540,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
amdgpu_dm_update_connector_after_detect(aconnector);
- mutex_unlock(&aconnector->hpd_lock);
}
drm_connector_list_iter_end(&iter);
@@ -3527,6 +3622,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.early_fini = amdgpu_dm_early_fini,
.hw_init = dm_hw_init,
.hw_fini = dm_hw_fini,
+ .prepare_suspend = dm_prepare_suspend,
.suspend = dm_suspend,
.resume = dm_resume,
.is_idle = dm_is_idle,
@@ -3613,12 +3709,14 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->min_input_signal = min_input_signal_override;
}
+DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T))
+
void amdgpu_dm_update_connector_after_detect(
struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
+ struct dc_sink *sink __free(sink_release) = NULL;
struct drm_device *dev = connector->dev;
- struct dc_sink *sink;
/* MST handled by drm_mst framework */
if (aconnector->mst_mgr.mst_state == true)
@@ -3640,7 +3738,7 @@ void amdgpu_dm_update_connector_after_detect(
* For S3 resume with headless use eml_sink to fake stream
* because on resume connector->sink is set to NULL
*/
- mutex_lock(&dev->mode_config.mutex);
+ guard(mutex)(&dev->mode_config.mutex);
if (sink) {
if (aconnector->dc_sink) {
@@ -3665,10 +3763,6 @@ void amdgpu_dm_update_connector_after_detect(
}
}
- mutex_unlock(&dev->mode_config.mutex);
-
- if (sink)
- dc_sink_release(sink);
return;
}
@@ -3676,10 +3770,8 @@ void amdgpu_dm_update_connector_after_detect(
* TODO: temporary guard to look for proper fix
* if this sink is MST sink, we should not do anything
*/
- if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- dc_sink_release(sink);
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
return;
- }
if (aconnector->dc_sink == sink) {
/*
@@ -3688,15 +3780,13 @@ void amdgpu_dm_update_connector_after_detect(
*/
drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n",
aconnector->connector_id);
- if (sink)
- dc_sink_release(sink);
return;
}
drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
aconnector->connector_id, aconnector->dc_sink, sink);
- mutex_lock(&dev->mode_config.mutex);
+ guard(mutex)(&dev->mode_config.mutex);
/*
* 1. Update status of the drm connector
@@ -3758,12 +3848,7 @@ void amdgpu_dm_update_connector_after_detect(
connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
}
- mutex_unlock(&dev->mode_config.mutex);
-
update_subconnector_property(aconnector);
-
- if (sink)
- dc_sink_release(sink);
}
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
@@ -3783,7 +3868,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
*/
- mutex_lock(&aconnector->hpd_lock);
+ guard(mutex)(&aconnector->hpd_lock);
if (adev->dm.hdcp_workqueue) {
hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
@@ -3795,7 +3880,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
aconnector->timing_changed = false;
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
@@ -3807,11 +3892,13 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
drm_kms_helper_connector_hotplug_event(connector);
} else {
- mutex_lock(&adev->dm.dc_lock);
- dc_exit_ips_for_hw_access(dc);
- ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
- mutex_unlock(&adev->dm.dc_lock);
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ dc_exit_ips_for_hw_access(dc);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ }
if (ret) {
+ /* w/a delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
amdgpu_dm_update_connector_after_detect(aconnector);
drm_modeset_lock_all(dev);
@@ -3822,8 +3909,6 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
drm_kms_helper_connector_hotplug_event(connector);
}
}
- mutex_unlock(&aconnector->hpd_lock);
-
}
static void handle_hpd_irq(void *param)
@@ -4663,48 +4748,40 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
int bl_idx)
{
-#if defined(CONFIG_ACPI)
- struct amdgpu_dm_backlight_caps caps;
-
- memset(&caps, 0, sizeof(caps));
+ struct amdgpu_dm_backlight_caps *caps = &dm->backlight_caps[bl_idx];
- if (dm->backlight_caps[bl_idx].caps_valid)
+ if (caps->caps_valid)
return;
- amdgpu_acpi_get_backlight_caps(&caps);
+#if defined(CONFIG_ACPI)
+ amdgpu_acpi_get_backlight_caps(caps);
/* validate the firmware value is sane */
- if (caps.caps_valid) {
- int spread = caps.max_input_signal - caps.min_input_signal;
+ if (caps->caps_valid) {
+ int spread = caps->max_input_signal - caps->min_input_signal;
- if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
- caps.min_input_signal < 0 ||
+ if (caps->max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
+ caps->min_input_signal < 0 ||
spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
spread < AMDGPU_DM_MIN_SPREAD) {
DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n",
- caps.min_input_signal, caps.max_input_signal);
- caps.caps_valid = false;
+ caps->min_input_signal, caps->max_input_signal);
+ caps->caps_valid = false;
}
}
- if (caps.caps_valid) {
- dm->backlight_caps[bl_idx].caps_valid = true;
- if (caps.aux_support)
- return;
- dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
- dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
- } else {
- dm->backlight_caps[bl_idx].min_input_signal =
- AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
- dm->backlight_caps[bl_idx].max_input_signal =
- AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ if (!caps->caps_valid) {
+ caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ caps->caps_valid = true;
}
#else
- if (dm->backlight_caps[bl_idx].aux_support)
+ if (caps->aux_support)
return;
- dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
- dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ caps->caps_valid = true;
#endif
}
@@ -4730,10 +4807,38 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
uint32_t brightness)
{
unsigned int min, max;
+ u8 prev_signal = 0, prev_lum = 0;
if (!get_brightness_range(caps, &min, &max))
return brightness;
+ for (int i = 0; i < caps->data_points; i++) {
+ u8 signal, lum;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
+ break;
+
+ signal = caps->luminance_data[i].input_signal;
+ lum = caps->luminance_data[i].luminance;
+
+ /*
+ * brightness == signal: luminance is percent numerator
+ * brightness < signal: interpolate between previous and current luminance numerator
+ * brightness > signal: find next data point
+ */
+ if (brightness < signal)
+ lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
+ (brightness - prev_signal),
+ signal - prev_signal);
+ else if (brightness > signal) {
+ prev_signal = signal;
+ prev_lum = lum;
+ continue;
+ }
+ brightness = DIV_ROUND_CLOSEST(lum * brightness, 101);
+ break;
+ }
+
// Rescale 0..255 to min..max
return min + DIV_ROUND_CLOSEST((max - min) * brightness,
AMDGPU_MAX_BL_LEVEL);
@@ -4758,19 +4863,19 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
int bl_idx,
u32 user_brightness)
{
- struct amdgpu_dm_backlight_caps caps;
+ struct amdgpu_dm_backlight_caps *caps;
struct dc_link *link;
u32 brightness;
bool rc, reallow_idle = false;
amdgpu_dm_update_backlight_caps(dm, bl_idx);
- caps = dm->backlight_caps[bl_idx];
+ caps = &dm->backlight_caps[bl_idx];
dm->brightness[bl_idx] = user_brightness;
/* update scratch register */
if (bl_idx == 0)
amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
- brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
+ brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]);
link = (struct dc_link *)dm->backlight_link[bl_idx];
/* Change brightness based on AUX property */
@@ -4780,7 +4885,7 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
reallow_idle = true;
}
- if (caps.aux_support) {
+ if (caps->aux_support) {
rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
if (!rc)
@@ -4897,6 +5002,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
} else
props.brightness = AMDGPU_MAX_BL_LEVEL;
+ if (caps.data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
+ drm_info(drm, "Using custom brightness curve\n");
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
@@ -4906,6 +5013,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
dm->backlight_dev[aconnector->bl_idx] =
backlight_device_register(bl_name, aconnector->base.kdev, dm,
&amdgpu_dm_backlight_ops, &props);
+ dm->brightness[aconnector->bl_idx] = props.brightness;
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
DRM_ERROR("DM: Backlight registration failed!\n");
@@ -4973,7 +5081,6 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
aconnector->bl_idx = bl_idx;
amdgpu_dm_update_backlight_caps(dm, bl_idx);
- dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
dm->backlight_link[bl_idx] = link;
dm->num_of_edps++;
@@ -5089,6 +5196,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(2, 1, 0):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (register_outbox_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
@@ -5112,6 +5220,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
psr_feature_enabled = true;
break;
@@ -5129,6 +5238,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
replay_feature_enabled = true;
break;
@@ -5278,6 +5388,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
@@ -5420,6 +5531,9 @@ static int dm_init_microcode(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_351_DMUB;
break;
+ case IP_VERSION(3, 6, 0):
+ fw_name_dmub = FIRMWARE_DCN_36_DMUB;
+ break;
case IP_VERSION(4, 0, 1):
fw_name_dmub = FIRMWARE_DCN_401_DMUB;
break;
@@ -5548,6 +5662,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
@@ -5626,9 +5741,9 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
case DRM_COLOR_YCBCR_BT2020:
if (full_range)
- *color_space = COLOR_SPACE_2020_YCBCR;
+ *color_space = COLOR_SPACE_2020_YCBCR_FULL;
else
- return -EINVAL;
+ *color_space = COLOR_SPACE_2020_YCBCR_LIMITED;
break;
default:
@@ -6124,12 +6239,14 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
else
- color_space = COLOR_SPACE_2020_YCBCR;
+ color_space = COLOR_SPACE_2020_YCBCR_LIMITED;
break;
case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
default:
if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
color_space = COLOR_SPACE_SRGB;
+ if (connector_state->hdmi.broadcast_rgb == DRM_HDMI_BROADCAST_RGB_LIMITED)
+ color_space = COLOR_SPACE_SRGB_LIMITED;
/*
* 27030khz is the separation point between HDTV and SDTV
* according to HDMI spec, we use YCbCr709 and YCbCr601
@@ -7469,12 +7586,12 @@ cleanup:
}
struct dc_stream_state *
-create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+create_validate_stream_for_sink(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
const struct dm_connector_state *dm_state,
const struct dc_stream_state *old_stream)
{
- struct drm_connector *connector = &aconnector->base;
+ struct amdgpu_dm_connector *aconnector = NULL;
struct amdgpu_device *adev = drm_to_adev(connector->dev);
struct dc_stream_state *stream;
const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
@@ -7485,8 +7602,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (!dm_state)
return NULL;
- if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
- aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (aconnector &&
+ (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER))
bpc_limit = 8;
do {
@@ -7498,10 +7619,11 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
break;
}
- if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
+
+ if (!aconnector) /* writeback connector */
return stream;
- dc_result = dc_validate_stream(adev->dm.dc, stream);
if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
@@ -7531,7 +7653,7 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
__func__, __LINE__);
aconnector->force_yuv420_output = true;
- stream = create_validate_stream_for_sink(aconnector, drm_mode,
+ stream = create_validate_stream_for_sink(connector, drm_mode,
dm_state, old_stream);
aconnector->force_yuv420_output = false;
}
@@ -7540,12 +7662,16 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
}
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
int result = MODE_ERROR;
struct dc_sink *dc_sink;
+ struct drm_display_mode *test_mode;
/* TODO: Unhardcode stream count */
struct dc_stream_state *stream;
+ /* we always have an amdgpu_dm_connector here since we got
+ * here via the amdgpu_dm_connector_helper_funcs
+ */
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
@@ -7568,11 +7694,16 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
goto fail;
}
- drm_mode_set_crtcinfo(mode, 0);
+ test_mode = drm_mode_duplicate(connector->dev, mode);
+ if (!test_mode)
+ goto fail;
+
+ drm_mode_set_crtcinfo(test_mode, 0);
- stream = create_validate_stream_for_sink(aconnector, mode,
+ stream = create_validate_stream_for_sink(connector, test_mode,
to_dm_connector_state(connector->state),
NULL);
+ drm_mode_destroy(connector->dev, test_mode);
if (stream) {
dc_stream_release(stream);
result = MODE_OK;
@@ -8298,6 +8429,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
dm->ddev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA
+ || (connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root))
+ drm_connector_attach_broadcast_rgb_property(&aconnector->base);
+
drm_object_attach_property(&aconnector->base.base,
adev->mode_info.underscan_property,
UNDERSCAN_OFF);
@@ -8350,7 +8485,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
int i;
int result = -EIO;
- if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
+ if (!ddc_service->ddc_pin)
return result;
cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
@@ -8369,11 +8504,18 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
cmd.payloads[i].data = msgs[i].buf;
}
- if (dc_submit_i2c(
- ddc_service->ctx->dc,
- ddc_service->link->link_index,
- &cmd))
- result = num;
+ if (i2c->oem) {
+ if (dc_submit_i2c_oem(
+ ddc_service->ctx->dc,
+ &cmd))
+ result = num;
+ } else {
+ if (dc_submit_i2c(
+ ddc_service->ctx->dc,
+ ddc_service->link->link_index,
+ &cmd))
+ result = num;
+ }
kfree(cmd.payloads);
return result;
@@ -8390,9 +8532,7 @@ static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
};
static struct amdgpu_i2c_adapter *
-create_i2c(struct ddc_service *ddc_service,
- int link_index,
- int *res)
+create_i2c(struct ddc_service *ddc_service, bool oem)
{
struct amdgpu_device *adev = ddc_service->ctx->driver_context;
struct amdgpu_i2c_adapter *i2c;
@@ -8403,9 +8543,14 @@ create_i2c(struct ddc_service *ddc_service,
i2c->base.owner = THIS_MODULE;
i2c->base.dev.parent = &adev->pdev->dev;
i2c->base.algo = &amdgpu_dm_i2c_algo;
- snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+ if (oem)
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c OEM bus");
+ else
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d",
+ ddc_service->link->link_index);
i2c_set_adapdata(&i2c->base, i2c);
i2c->ddc_service = ddc_service;
+ i2c->oem = oem;
return i2c;
}
@@ -8451,7 +8596,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
link->priv = aconnector;
- i2c = create_i2c(link->ddc, link->link_index, &res);
+ i2c = create_i2c(link->ddc, false);
if (!i2c) {
DRM_ERROR("Failed to create i2c adapter data\n");
return -ENOMEM;
@@ -10058,7 +10203,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct dc_stream_update stream_update;
struct dc_info_packet hdr_packet;
struct dc_stream_status *status = NULL;
- bool abm_changed, hdr_changed, scaling_changed;
+ bool abm_changed, hdr_changed, scaling_changed, output_color_space_changed = false;
memset(&stream_update, 0, sizeof(stream_update));
@@ -10077,13 +10222,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
scaling_changed = is_scaling_state_different(dm_new_con_state,
dm_old_con_state);
+ if ((new_con_state->hdmi.broadcast_rgb != old_con_state->hdmi.broadcast_rgb) &&
+ (dm_old_crtc_state->stream->output_color_space !=
+ get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state)))
+ output_color_space_changed = true;
+
abm_changed = dm_new_crtc_state->abm_level !=
dm_old_crtc_state->abm_level;
hdr_changed =
!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
- if (!scaling_changed && !abm_changed && !hdr_changed)
+ if (!scaling_changed && !abm_changed && !hdr_changed && !output_color_space_changed)
continue;
stream_update.stream = dm_new_crtc_state->stream;
@@ -10095,6 +10245,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
stream_update.dst = dm_new_crtc_state->stream->dst;
}
+ if (output_color_space_changed) {
+ dm_new_crtc_state->stream->output_color_space
+ = get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state);
+
+ stream_update.output_color_space = &dm_new_crtc_state->stream->output_color_space;
+ }
+
if (abm_changed) {
dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
@@ -10595,7 +10752,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto skip_modeset;
- new_stream = create_validate_stream_for_sink(aconnector,
+ new_stream = create_validate_stream_for_sink(connector,
&new_crtc_state->mode,
dm_new_conn_state,
dm_old_crtc_state->stream);
@@ -12628,3 +12785,10 @@ bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
{
return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
}
+
+void dm_acpi_process_phy_transition_interlock(
+ const struct dc_context *ctx,
+ struct dm_process_phy_transition_init_params process_phy_transition_init_params)
+{
+ // Not yet implemented
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d2703ca7dff3..385faaca6e26 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -151,6 +151,18 @@ struct idle_workqueue {
bool running;
};
+#define MAX_LUMINANCE_DATA_POINTS 99
+
+/**
+ * struct amdgpu_dm_luminance_data - Custom luminance data
+ * @luminance: Luminance in percent
+ * @input_signal: Input signal in range 0-255
+ */
+struct amdgpu_dm_luminance_data {
+ u8 luminance;
+ u8 input_signal;
+} __packed;
+
/**
* struct amdgpu_dm_backlight_caps - Information about backlight
*
@@ -195,6 +207,14 @@ struct amdgpu_dm_backlight_caps {
* @dc_level: the default brightness if booted on DC
*/
u8 dc_level;
+ /**
+ * @data_points: the number of custom luminance data points
+ */
+ u8 data_points;
+ /**
+ * @luminance_data: custom luminance data
+ */
+ struct amdgpu_dm_luminance_data luminance_data[MAX_LUMINANCE_DATA_POINTS];
};
/**
@@ -606,6 +626,13 @@ struct amdgpu_display_manager {
* Bounding box data read from dmub during early initialization for DCN4+
*/
struct dml2_soc_bb *bb_from_dmub;
+
+ /**
+ * @oem_i2c:
+ *
+ * OEM i2c bus
+ */
+ struct amdgpu_i2c_adapter *oem_i2c;
};
enum dsc_clock_force_state {
@@ -949,7 +976,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
int link_index);
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
void dm_restore_drm_connector_state(struct drm_device *dev,
struct drm_connector *connector);
@@ -989,7 +1016,7 @@ int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int
struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
struct dc_stream_state *
- create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ create_validate_stream_for_sink(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
const struct dm_connector_state *dm_state,
const struct dc_stream_state *old_stream);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 049046c60462..c7d13e743e6c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1169,7 +1169,7 @@ static int amdgpu_current_colorspace_show(struct seq_file *m, void *data)
case COLOR_SPACE_2020_RGB_FULLRANGE:
seq_puts(m, "BT2020_RGB");
break;
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
seq_puts(m, "BT2020_YCC");
break;
default:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index e339c7a8d541..a3e93b2891f0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -172,7 +172,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
struct mod_hdcp_display_adjustment display_adjust;
unsigned int conn_index = aconnector->base.index;
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
hdcp_w->aconnector[conn_index] = aconnector;
memset(&link_adjust, 0, sizeof(link_adjust));
@@ -209,7 +209,6 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output);
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
}
static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
@@ -220,7 +219,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
struct drm_connector_state *conn_state = aconnector->base.state;
unsigned int conn_index = aconnector->base.index;
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
hdcp_w->aconnector[conn_index] = aconnector;
/* the removal of display will invoke auth reset -> hdcp destroy and
@@ -239,7 +238,6 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
}
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
@@ -247,7 +245,7 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
unsigned int conn_index;
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
@@ -259,8 +257,6 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
}
process_output(hdcp_w);
-
- mutex_unlock(&hdcp_w->mutex);
}
void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
@@ -277,7 +273,7 @@ static void event_callback(struct work_struct *work)
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
callback_dwork);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
cancel_delayed_work(&hdcp_work->callback_dwork);
@@ -285,8 +281,6 @@ static void event_callback(struct work_struct *work)
&hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_property_update(struct work_struct *work)
@@ -323,7 +317,7 @@ static void event_property_update(struct work_struct *work)
continue;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
if (conn_state->commit) {
ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
@@ -355,7 +349,6 @@ static void event_property_update(struct work_struct *work)
drm_hdcp_update_content_protection(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED);
}
- mutex_unlock(&hdcp_work->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
@@ -368,7 +361,7 @@ static void event_property_validate(struct work_struct *work)
struct amdgpu_dm_connector *aconnector;
unsigned int conn_index;
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
conn_index++) {
@@ -408,8 +401,6 @@ static void event_property_validate(struct work_struct *work)
schedule_work(&hdcp_work->property_update_work);
}
}
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_watchdog_timer(struct work_struct *work)
@@ -420,7 +411,7 @@ static void event_watchdog_timer(struct work_struct *work)
struct hdcp_workqueue,
watchdog_timer_dwork);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
@@ -429,8 +420,6 @@ static void event_watchdog_timer(struct work_struct *work)
&hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_cpirq(struct work_struct *work)
@@ -439,13 +428,11 @@ static void event_cpirq(struct work_struct *work)
hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
@@ -455,6 +442,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
for (i = 0; i < hdcp_work->max_link; i++) {
cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork);
}
sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
@@ -469,7 +457,6 @@ static bool enable_assr(void *handle, struct dc_link *link)
struct mod_hdcp hdcp = hdcp_work->hdcp;
struct psp_context *psp = hdcp.config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
- bool res = true;
if (!psp->dtm_context.context.initialized) {
DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");
@@ -478,7 +465,7 @@ static bool enable_assr(void *handle, struct dc_link *link)
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
- mutex_lock(&psp->dtm_context.mutex);
+ guard(mutex)(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
@@ -490,12 +477,10 @@ static bool enable_assr(void *handle, struct dc_link *link)
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
DRM_INFO("Failed to enable ASSR");
- res = false;
+ return false;
}
- mutex_unlock(&psp->dtm_context.mutex);
-
- return res;
+ return true;
}
static void update_config(void *handle, struct cp_psp_stream_config *config)
@@ -556,13 +541,11 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
(!!aconnector->base.state) ?
aconnector->base.state->hdcp_content_type : -1);
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
-
}
/**
@@ -742,6 +725,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
dc->ctx->dce_version == DCN_VERSION_3_15 ||
dc->ctx->dce_version == DCN_VERSION_3_5 ||
dc->ctx->dce_version == DCN_VERSION_3_51 ||
+ dc->ctx->dce_version == DCN_VERSION_3_6 ||
dc->ctx->dce_version == DCN_VERSION_3_16)
hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1;
hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index fbd80d8545a8..2cd35392e2da 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -55,16 +55,21 @@ static u32 edid_extract_panel_id(struct edid *edid)
(u32)EDID_PRODUCT_ID(edid);
}
-static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
+static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct dc_edid_caps *edid_caps)
{
uint32_t panel_id = edid_extract_panel_id(edid);
switch (panel_id) {
+ /* Workaround for monitors that need a delay after detecting the link */
+ case drm_edid_encode_panel_id('G', 'B', 'T', 0x3215):
+ drm_dbg_driver(dev, "Add 10s delay for link detection for panel id %X\n", panel_id);
+ edid_caps->panel_patch.wait_after_dpcd_poweroff_ms = 10000;
+ break;
/* Workaround for some monitors which does not work well with FAMS */
case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E):
case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053):
case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC):
- DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id);
+ drm_dbg_driver(dev, "Disabling FAMS on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_fams = true;
break;
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
@@ -73,11 +78,11 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
case drm_edid_encode_panel_id('M', 'S', 'F', 0x1003):
- DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
+ drm_dbg_driver(dev, "Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
- DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id);
+ drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_colorimetry = true;
break;
default:
@@ -101,6 +106,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
{
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL;
struct cea_sad *sads;
int sad_count = -1;
@@ -130,7 +136,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->edid_hdmi = connector->display_info.is_hdmi;
- apply_edid_quirks(edid_buf, edid_caps);
+ apply_edid_quirks(dev, edid_buf, edid_caps);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index c4a7fd453e5f..b61e210f6246 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -473,7 +473,7 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
unregister_all_irq_handlers(adev);
}
-int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
+void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
{
int src;
struct list_head *hnd_list_h;
@@ -511,10 +511,9 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
- return 0;
}
-int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
+void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
{
int src;
struct list_head *hnd_list_h, *hnd_list_l;
@@ -522,7 +521,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
- DRM_DEBUG_KMS("DM_IRQ: early resume\n");
+ drm_dbg(adev_to_drm(adev), "DM_IRQ: early resume\n");
/* re-enable short pulse interrupts HW interrupt */
for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
@@ -533,11 +532,9 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-
- return 0;
}
-int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
+void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
{
int src;
struct list_head *hnd_list_h, *hnd_list_l;
@@ -545,7 +542,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
- DRM_DEBUG_KMS("DM_IRQ: resume\n");
+ drm_dbg(adev_to_drm(adev), "DM_IRQ: resume\n");
/**
* Renable HW interrupt for HPD and only since FLIP and VBLANK
@@ -559,7 +556,6 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
- return 0;
}
/*
@@ -894,8 +890,16 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int irq_type;
int i;
+ /* First, clear all hpd and hpdrx interrupts */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
+ if (!dc_interrupt_set(adev->dm.dc, i, false))
+ drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n",
+ i);
+ }
+
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector;
@@ -908,10 +912,31 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
+ /*
+ * Get a base driver irq reference for hpd ints for the lifetime
+ * of dm. Note that only hpd interrupt types are registered with
+ * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on
+ * hpd_rx isn't available. DM currently controls hpd_rx
+ * explicitly with dc_interrupt_set()
+ */
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- true);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+ /*
+ * TODO: There's a mismatch between mode_info.num_hpd
+ * and what bios reports as the # of connectors with hpd
+ * sources. Since the # of hpd source types registered
+ * with base driver == mode_info.num_hpd, we have to
+ * fallback to dc_interrupt_set for the remaining types.
+ */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ true);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -921,12 +946,6 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
-
- /* Update reference counts for HPDs */
- for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
- if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
- drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
- }
}
/**
@@ -942,7 +961,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
- int i;
+ int irq_type;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -956,9 +975,18 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- false);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+
+ /* TODO: See same TODO in amdgpu_dm_hpd_init() */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ false);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -968,10 +996,4 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
-
- /* Update reference counts for HPDs */
- for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
- if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
- drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
- }
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
index 2349238a626b..ba17c23b2706 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
@@ -90,14 +90,14 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
* amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
*
*/
-int amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
+void amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
/**
* amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume.
* amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
*
*/
-int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
-int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev);
+void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
+void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev);
#endif /* __AMDGPU_DM_IRQ_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 07e744da7bf4..7ceedf626d23 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1625,7 +1625,6 @@ int pre_validate_dsc(struct drm_atomic_state *state,
if (ind >= 0) {
struct drm_connector *connector;
- struct amdgpu_dm_connector *aconnector;
struct drm_connector_state *drm_new_conn_state;
struct dm_connector_state *dm_new_conn_state;
struct dm_crtc_state *dm_old_crtc_state;
@@ -1633,15 +1632,17 @@ int pre_validate_dsc(struct drm_atomic_state *state,
connector =
amdgpu_dm_find_first_crtc_matching_connector(state,
state->crtcs[ind].ptr);
- aconnector = to_amdgpu_dm_connector(connector);
+ if (!connector)
+ continue;
+
drm_new_conn_state =
drm_atomic_get_new_connector_state(state,
- &aconnector->base);
+ connector);
dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state);
local_dc_state->streams[i] =
- create_validate_stream_for_sink(aconnector,
+ create_validate_stream_for_sink(connector,
&state->crtcs[ind].new_state->mode,
dm_new_conn_state,
dm_old_crtc_state->stream);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 774cc3f4f3fd..3e0f45f1711c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -277,8 +277,11 @@ static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
if (!dcc->enable)
return 0;
- if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
- !dc->cap_funcs.get_dcc_compression_cap)
+ if (adev->family < AMDGPU_FAMILY_GC_12_0_0 &&
+ format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return -EINVAL;
+
+ if (!dc->cap_funcs.get_dcc_compression_cap)
return -EINVAL;
input.format = format;
@@ -697,7 +700,7 @@ static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev,
uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D);
uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D);
uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1);
- uint8_t max_comp_block[] = {1, 0};
+ uint8_t max_comp_block[] = {2, 1, 0};
uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0};
uint8_t i = 0, j = 0;
uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR};
@@ -1258,21 +1261,24 @@ static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
}
static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state, bool flip)
{
struct drm_crtc_state *new_crtc_state;
struct drm_plane_state *new_plane_state;
struct dm_crtc_state *dm_new_crtc_state;
- /* Only support async updates on cursor planes. */
- if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ if (flip) {
+ if (plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return -EINVAL;
+ } else if (plane->type != DRM_PLANE_TYPE_CURSOR) {
return -EINVAL;
+ }
new_plane_state = drm_atomic_get_new_plane_state(state, plane);
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
/* Reject overlay cursors for now*/
- if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
+ if (!flip && dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
return -EINVAL;
return 0;
@@ -1430,7 +1436,7 @@ static void amdgpu_dm_plane_panic_flush(struct drm_plane *plane)
dc_plane_state = dm_plane_state->dc_state;
- dc_plane_force_update_for_panic(dc_plane_state, fb->modifier ? true : false);
+ dc_plane_force_dcc_and_tiling_disable(dc_plane_state, fb->modifier ? true : false);
}
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 8992e697759f..3e1f5b689718 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -52,7 +52,7 @@ endif
DC_LIBS += hdcp
ifdef CONFIG_DRM_AMD_DC_FP
-DC_LIBS += spl
+DC_LIBS += sspl
DC_SPL_TRANS += dc_spl_translate.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
index b2fc4f8e6482..a51c2701da24 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
@@ -40,7 +40,8 @@ bool is_rgb_cspace(enum dc_color_space output_color_space)
case COLOR_SPACE_YCBCR709:
case COLOR_SPACE_YCBCR601_LIMITED:
case COLOR_SPACE_YCBCR709_LIMITED:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
+ case COLOR_SPACE_2020_YCBCR_FULL:
return false;
default:
/* Add a case to switch */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index a62f6c51301c..04eb647acc4e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1778,6 +1778,7 @@ static enum bp_result get_firmware_info_v3_1(
struct dc_firmware_info *info)
{
struct atom_firmware_info_v3_1 *firmware_info;
+ struct atom_firmware_info_v3_2 *firmware_info32;
struct atom_display_controller_info_v4_1 *dce_info = NULL;
if (!info)
@@ -1785,11 +1786,13 @@ static enum bp_result get_firmware_info_v3_1(
firmware_info = GET_IMAGE(struct atom_firmware_info_v3_1,
DATA_TABLES(firmwareinfo));
+ firmware_info32 = GET_IMAGE(struct atom_firmware_info_v3_2,
+ DATA_TABLES(firmwareinfo));
dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1,
DATA_TABLES(dce_info));
- if (!firmware_info || !dce_info)
+ if (!firmware_info || !firmware_info32 || !dce_info)
return BP_RESULT_BADBIOSTABLE;
memset(info, 0, sizeof(*info));
@@ -1817,7 +1820,15 @@ static enum bp_result get_firmware_info_v3_1(
bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
}
- info->oem_i2c_present = false;
+ /* These fields are marked as reserved in v3_1, but they appear to be populated
+ * properly.
+ */
+ if (firmware_info32 && firmware_info32->board_i2c_feature_id == 0x2) {
+ info->oem_i2c_present = true;
+ info->oem_i2c_obj_id = firmware_info32->board_i2c_feature_gpio_id;
+ } else {
+ info->oem_i2c_present = false;
+ }
return BP_RESULT_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 7d18f372ce7a..2c645dffec18 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -101,7 +101,6 @@ static void init_dig_encoder_control(struct bios_parser *bp)
bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
break;
default:
- dm_output_to_console("Don't have dig_encoder_control for v%d\n", version);
bp->cmd_tbl.dig_encoder_control = encoder_control_fallback;
break;
}
@@ -210,6 +209,7 @@ static enum bp_result encoder_control_fallback(
******************************************************************************
*****************************************************************************/
+
static enum bp_result transmitter_control_v1_6(
struct bios_parser *bp,
struct bp_transmitter_control *cntl);
@@ -238,7 +238,6 @@ static void init_transmitter_control(struct bios_parser *bp)
bp->cmd_tbl.transmitter_control = transmitter_control_v1_7;
break;
default:
- dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
bp->cmd_tbl.transmitter_control = transmitter_control_fallback;
break;
}
@@ -325,6 +324,21 @@ static void transmitter_control_dmcub_v1_7(
dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+static struct dc_link *get_link_by_phy_id(struct dc *p_dc, uint32_t phy_id)
+{
+ struct dc_link *link = NULL;
+
+ // Get Transition Bitmask from dc_link structure associated with PHY
+ for (uint8_t link_id = 0; link_id < MAX_LINKS; link_id++) {
+ if (phy_id == p_dc->links[link_id]->link_enc->transmitter) {
+ link = p_dc->links[link_id];
+ break;
+ }
+ }
+
+ return link;
+}
+
static enum bp_result transmitter_control_v1_7(
struct bios_parser *bp,
struct bp_transmitter_control *cntl)
@@ -363,7 +377,37 @@ static enum bp_result transmitter_control_v1_7(
if (bp->base.ctx->dc->ctx->dmub_srv &&
bp->base.ctx->dc->debug.dmub_command_table) {
+ struct dm_process_phy_transition_init_params process_phy_transition_init_params = {0};
+ struct dc_link *link = get_link_by_phy_id(bp->base.ctx->dc, dig_v1_7.phyid);
+ bool is_phy_transition_interlock_allowed = false;
+ uint8_t action = dig_v1_7.action;
+
+ if (link) {
+ if (link->phy_transition_bitmask &&
+ (action == TRANSMITTER_CONTROL_ENABLE || action == TRANSMITTER_CONTROL_DISABLE)) {
+ is_phy_transition_interlock_allowed = true;
+
+ // Prepare input parameters for processing ACPI retimers
+ process_phy_transition_init_params.action = action;
+ process_phy_transition_init_params.display_port_lanes_count = cntl->lanes_number;
+ process_phy_transition_init_params.phy_id = dig_v1_7.phyid;
+ process_phy_transition_init_params.signal = cntl->signal;
+ process_phy_transition_init_params.sym_clock_10khz = dig_v1_7.symclk_units.symclk_10khz;
+ process_phy_transition_init_params.display_port_link_rate = link->cur_link_settings.link_rate;
+ process_phy_transition_init_params.transition_bitmask = link->phy_transition_bitmask;
+ }
+ }
+
+ // Handle PRE_OFF_TO_ON: Process ACPI PHY Transition Interlock
+ if (is_phy_transition_interlock_allowed && action == TRANSMITTER_CONTROL_ENABLE)
+ dm_acpi_process_phy_transition_interlock(bp->base.ctx, process_phy_transition_init_params);
+
transmitter_control_dmcub_v1_7(bp->base.ctx->dmub_srv, &dig_v1_7);
+
+ // Handle POST_ON_TO_OFF: Process ACPI PHY Transition Interlock
+ if (is_phy_transition_interlock_allowed && action == TRANSMITTER_CONTROL_DISABLE)
+ dm_acpi_process_phy_transition_interlock(bp->base.ctx, process_phy_transition_init_params);
+
return BP_RESULT_OK;
}
@@ -408,8 +452,6 @@ static void init_set_pixel_clock(struct bios_parser *bp)
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
break;
default:
- dm_output_to_console("Don't have set_pixel_clock for v%d\n",
- BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback;
break;
}
@@ -554,7 +596,6 @@ static void init_set_crtc_timing(struct bios_parser *bp)
set_crtc_using_dtd_timing_v3;
break;
default:
- dm_output_to_console("Don't have set_crtc_timing for v%d\n", dtd_version);
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -671,8 +712,6 @@ static void init_enable_crtc(struct bios_parser *bp)
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
break;
default:
- dm_output_to_console("Don't have enable_crtc for v%d\n",
- BIOS_CMD_TABLE_PARA_REVISION(enablecrtc));
bp->cmd_tbl.enable_crtc = NULL;
break;
}
@@ -864,8 +903,6 @@ static void init_set_dce_clock(struct bios_parser *bp)
bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
break;
default:
- dm_output_to_console("Don't have set_dce_clock for v%d\n",
- BIOS_CMD_TABLE_PARA_REVISION(setdceclock));
bp->cmd_tbl.set_dce_clock = NULL;
break;
}
@@ -1046,3 +1083,4 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
init_enable_lvtma_control(bp);
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
index e317a3615147..91bc8a06e2cf 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -293,3 +293,107 @@ uint8_t dal_cmd_table_helper_encoder_id_to_atom(
return ENCODER_OBJECT_ID_NONE;
}
}
+
+uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
index dfd30aaf4032..547700e119a6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
@@ -59,4 +59,12 @@ uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
uint8_t dal_cmd_table_helper_encoder_id_to_atom(
enum encoder_id id);
+
+uint8_t phy_id_to_atom(enum transmitter t);
+
+uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id);
+
+bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 73458e295103..268e2414b34f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -82,13 +82,13 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCN_VERSION_3_21:
case DCN_VERSION_3_5:
case DCN_VERSION_3_51:
+ case DCN_VERSION_3_6:
case DCN_VERSION_4_01:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
default:
- /* Unsupported DCE */
- BREAK_TO_DEBUGGER();
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
return false;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
index 11bf247bb180..3099128223df 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
@@ -31,39 +31,6 @@
#include "../command_table_helper.h"
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
-
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
-
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
@@ -94,32 +61,6 @@ static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
return atom_dig_mode;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
{
uint8_t atom_hpd_sel = 0;
@@ -207,51 +148,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
{
uint8_t atom_action = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
index 755b6e33140a..349f0e5d5856 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
@@ -29,40 +29,9 @@
#include "include/bios_parser_types.h"
-#include "../command_table_helper2.h"
-
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
+#include "../command_table_helper.h"
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
+#include "../command_table_helper2.h"
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
@@ -91,32 +60,6 @@ static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
return atom_dig_mode;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
{
uint8_t atom_hpd_sel = 0;
@@ -209,51 +152,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
{
uint8_t atom_action = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
index 06b4f7fa4a50..1a5fefcde8af 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
@@ -31,39 +31,6 @@
#include "../command_table_helper.h"
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
-
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
-
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
@@ -91,32 +58,6 @@ static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
return atom_dig_mode;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
{
uint8_t atom_hpd_sel = 0;
@@ -209,51 +150,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
{
uint8_t atom_action = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c
index 710221b4f5c5..01ccc803040c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c
@@ -58,51 +58,6 @@ static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
return atom_action;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static bool clock_source_id_to_atom(
enum clock_source_id id,
uint32_t *atom_pll_id)
@@ -149,32 +104,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
@@ -270,39 +199,6 @@ static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
return atom_dig_encoder_sel;
}
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
-
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
-
static uint8_t disp_power_gating_action_to_atom(
enum bp_pipe_control_action action)
{
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
index 8b30b558cf1f..2ec5264536c7 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
@@ -58,51 +58,6 @@ static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
return atom_action;
}
-static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
-{
- bool result = false;
-
- if (atom_engine_id != NULL)
- switch (id) {
- case ENGINE_ID_DIGA:
- *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGB:
- *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGC:
- *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGD:
- *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGE:
- *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGF:
- *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DIGG:
- *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
- result = true;
- break;
- case ENGINE_ID_DACA:
- *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
- result = true;
- break;
- default:
- break;
- }
-
- return result;
-}
-
static bool clock_source_id_to_atom(
enum clock_source_id id,
uint32_t *atom_pll_id)
@@ -149,32 +104,6 @@ static bool clock_source_id_to_atom(
return result;
}
-static uint8_t clock_source_id_to_atom_phy_clk_src_id(
- enum clock_source_id id)
-{
- uint8_t atom_phy_clk_src_id = 0;
-
- switch (id) {
- case CLOCK_SOURCE_ID_PLL0:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
- break;
- case CLOCK_SOURCE_ID_PLL1:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- case CLOCK_SOURCE_ID_PLL2:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
- break;
- case CLOCK_SOURCE_ID_EXTERNAL:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
- break;
- default:
- atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
- break;
- }
-
- return atom_phy_clk_src_id >> 2;
-}
-
static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
{
uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
@@ -270,39 +199,6 @@ static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
return atom_dig_encoder_sel;
}
-static uint8_t phy_id_to_atom(enum transmitter t)
-{
- uint8_t atom_phy_id;
-
- switch (t) {
- case TRANSMITTER_UNIPHY_A:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- case TRANSMITTER_UNIPHY_B:
- atom_phy_id = ATOM_PHY_ID_UNIPHYB;
- break;
- case TRANSMITTER_UNIPHY_C:
- atom_phy_id = ATOM_PHY_ID_UNIPHYC;
- break;
- case TRANSMITTER_UNIPHY_D:
- atom_phy_id = ATOM_PHY_ID_UNIPHYD;
- break;
- case TRANSMITTER_UNIPHY_E:
- atom_phy_id = ATOM_PHY_ID_UNIPHYE;
- break;
- case TRANSMITTER_UNIPHY_F:
- atom_phy_id = ATOM_PHY_ID_UNIPHYF;
- break;
- case TRANSMITTER_UNIPHY_G:
- atom_phy_id = ATOM_PHY_ID_UNIPHYG;
- break;
- default:
- atom_phy_id = ATOM_PHY_ID_UNIPHYA;
- break;
- }
- return atom_phy_id;
-}
-
static uint8_t disp_power_gating_action_to_atom(
enum bp_pipe_control_action action)
{
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index a0fb4481d2f1..e4d22f74f986 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -130,7 +130,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
+ int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
@@ -194,8 +194,6 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
- if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
- new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
@@ -204,15 +202,19 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- /* No need to apply the w/a if we haven't taken over from bios yet */
- if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, context, true);
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+ dcn315_disable_otg_wa(clk_mgr_base, context, true);
+
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ dcn315_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, context, false);
+ dcn315_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index c3e50c3aaa60..49efea0c8fcf 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -140,7 +140,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
+ int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
@@ -201,8 +201,6 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (new_clocks->dppclk_khz < 100000)
new_clocks->dppclk_khz = 100000;
- if (new_clocks->dispclk_khz < 100000)
- new_clocks->dispclk_khz = 100000;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
@@ -211,11 +209,18 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ dcn316_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 1648226586e2..142de8938d7c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -201,34 +201,41 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
struct pipe_ctx *pipe = safe_to_lower
? &context->res_ctx.pipe_ctx[i]
: &dc->current_state->res_ctx.pipe_ctx[i];
+ struct link_encoder *new_pipe_link_enc = new_pipe->link_res.dio_link_enc;
+ struct link_encoder *pipe_link_enc = pipe->link_res.dio_link_enc;
bool stream_changed_otg_dig_on = false;
+ bool has_active_hpo = false;
+
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
+
+ if (!dc->config.unify_link_enc_assignment) {
+ if (new_pipe->stream)
+ new_pipe_link_enc = new_pipe->stream->link_enc;
+ if (pipe->stream)
+ pipe_link_enc = pipe->stream->link_enc;
+ }
+
stream_changed_otg_dig_on = old_pipe->stream && new_pipe->stream &&
old_pipe->stream != new_pipe->stream &&
old_pipe->stream_res.tg == new_pipe->stream_res.tg &&
- new_pipe->stream->link_enc && !new_pipe->stream->dpms_off &&
- new_pipe->stream->link_enc->funcs->is_dig_enabled &&
- new_pipe->stream->link_enc->funcs->is_dig_enabled(
- new_pipe->stream->link_enc) &&
+ new_pipe_link_enc && !new_pipe->stream->dpms_off &&
+ new_pipe_link_enc->funcs->is_dig_enabled &&
+ new_pipe_link_enc->funcs->is_dig_enabled(
+ new_pipe_link_enc) &&
new_pipe->stream_res.stream_enc &&
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled &&
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc);
- bool has_active_hpo = false;
-
if (old_pipe->stream && new_pipe->stream && old_pipe->stream == new_pipe->stream) {
has_active_hpo = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(old_pipe) &&
dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(new_pipe);
- }
-
-
- if (!has_active_hpo && !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe) &&
- (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
- !pipe->stream->link_enc) && !stream_changed_otg_dig_on)) {
-
+ }
+ if (!has_active_hpo && !stream_changed_otg_dig_on && pipe->stream &&
+ (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || !pipe_link_enc) &&
+ !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe)) {
/* This w/a should not trigger when we have a dig active */
if (disable) {
if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
@@ -467,14 +474,19 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
- if (dc->debug.min_disp_clk_khz > 0 && new_clocks->dispclk_khz < dc->debug.min_disp_clk_khz)
- new_clocks->dispclk_khz = dc->debug.min_disp_clk_khz;
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+ dcn35_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
@@ -1249,7 +1261,7 @@ void dcn35_clk_mgr_construct(
clk_mgr->base.dprefclk_ss_divider = 1000;
clk_mgr->base.ss_on_dprefclk = false;
clk_mgr->base.dfs_ref_freq_khz = 48000;
- if (ctx->dce_version == DCN_VERSION_3_5) {
+ if (ctx->dce_version != DCN_VERSION_3_51) {
clk_mgr->base.regs = &clk_mgr_regs_dcn35;
clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn35;
clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn35;
@@ -1401,7 +1413,7 @@ void dcn35_clk_mgr_construct(
/* Disable dynamic IPS2 in older PMFW (93.12) for Z8 interop. */
if (ctx->dc->config.disable_ips == DMUB_IPS_ENABLE &&
- ctx->dce_version == DCN_VERSION_3_5 &&
+ ctx->dce_version != DCN_VERSION_3_51 &&
((clk_mgr->base.smu_ver & 0x00FFFFFF) <= 0x005d0c00))
ctx->dc->config.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 8082bb877611..a3b8e3d4a429 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -24,6 +24,8 @@
#include "dml/dcn401/dcn401_fpu.h"
+#define DCN_BASE__INST0_SEG1 0x000000C0
+
#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37
#define mmCLK01_CLK0_CLK0_DFS_CNTL 0x16E69
#define mmCLK01_CLK0_CLK1_DFS_CNTL 0x16E6C
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index f84e795e35f5..28d1353f403d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -276,6 +276,7 @@ static bool create_links(
link->link_id.type = OBJECT_TYPE_CONNECTOR;
link->link_id.id = CONNECTOR_ID_VIRTUAL;
link->link_id.enum_id = ENUM_ID_1;
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
if (!link->link_enc) {
@@ -452,6 +453,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
if (dc->caps.max_v_total != 0 &&
(adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) {
+ stream->adjust.timing_adjust_pending = false;
if (adjust->allow_otg_v_count_halt)
return set_long_vtotal(dc, stream, adjust);
else
@@ -465,7 +467,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
dc->hwss.set_drr(&pipe,
1,
*adjust);
-
+ stream->adjust.timing_adjust_pending = false;
return true;
}
}
@@ -515,33 +517,6 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
return status;
}
-bool dc_stream_get_crtc_position(struct dc *dc,
- struct dc_stream_state **streams, int num_streams,
- unsigned int *v_pos, unsigned int *nom_v_pos)
-{
- /* TODO: Support multiple streams */
- const struct dc_stream_state *stream = streams[0];
- int i;
- bool ret = false;
- struct crtc_position position;
-
- dc_exit_ips_for_hw_access(dc);
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *pipe =
- &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (pipe->stream == stream && pipe->stream_res.stream_enc) {
- dc->hwss.get_position(&pipe, 1, &position);
-
- *v_pos = position.vertical_count;
- *nom_v_pos = position.nominal_vcount;
- ret = true;
- }
- }
- return ret;
-}
-
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
static inline void
dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
@@ -931,7 +906,8 @@ void dc_stream_set_static_screen_params(struct dc *dc,
static void dc_destruct(struct dc *dc)
{
// reset link encoder assignment table on destruct
- if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
+ if (dc->res_pool && dc->res_pool->funcs->link_encs_assign &&
+ !dc->config.unify_link_enc_assignment)
link_enc_cfg_init(dc, dc->current_state);
if (dc->current_state) {
@@ -1227,6 +1203,8 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte
get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR)
get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_DCC)
+ get_dcc_visual_confirm_color(dc, pipe_ctx, &(pipe_ctx->visual_confirm_color));
else {
if (dc->ctx->dce_version < DCN_VERSION_2_0)
color_space_to_black_color(
@@ -1720,17 +1698,23 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
- if (dc->debug.force_odm_combine)
+ if (dc->debug.force_odm_combine) {
+ DC_LOG_DEBUG("boot timing validation failed due to force_odm_combine\n");
return false;
+ }
/* Check for enabled DIG to identify enabled display */
- if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
+ DC_LOG_DEBUG("boot timing validation failed due to disabled DIG\n");
return false;
+ }
enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
- if (enc_inst == ENGINE_ID_UNKNOWN)
+ if (enc_inst == ENGINE_ID_UNKNOWN) {
+ DC_LOG_DEBUG("boot timing validation failed due to unknown DIG engine ID\n");
return false;
+ }
for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
if (dc->res_pool->stream_enc[i]->id == enc_inst) {
@@ -1744,62 +1728,98 @@ bool dc_validate_boot_timing(const struct dc *dc,
}
// tg_inst not found
- if (i == dc->res_pool->stream_enc_count)
+ if (i == dc->res_pool->stream_enc_count) {
+ DC_LOG_DEBUG("boot timing validation failed due to timing generator instance not found\n");
return false;
+ }
- if (tg_inst >= dc->res_pool->timing_generator_count)
+ if (tg_inst >= dc->res_pool->timing_generator_count) {
+ DC_LOG_DEBUG("boot timing validation failed due to invalid timing generator count\n");
return false;
+ }
- if (tg_inst != link->link_enc->preferred_engine)
+ if (tg_inst != link->link_enc->preferred_engine) {
+ DC_LOG_DEBUG("boot timing validation failed due to non-preferred timing generator\n");
return false;
+ }
tg = dc->res_pool->timing_generators[tg_inst];
- if (!tg->funcs->get_hw_timing)
+ if (!tg->funcs->get_hw_timing) {
+ DC_LOG_DEBUG("boot timing validation failed due to missing get_hw_timing callback\n");
return false;
+ }
- if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
+ if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) {
+ DC_LOG_DEBUG("boot timing validation failed due to failed get_hw_timing return\n");
return false;
+ }
- if (crtc_timing->h_total != hw_crtc_timing.h_total)
+ if (crtc_timing->h_total != hw_crtc_timing.h_total) {
+ DC_LOG_DEBUG("boot timing validation failed due to h_total mismatch\n");
return false;
+ }
- if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
+ if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) {
+ DC_LOG_DEBUG("boot timing validation failed due to h_border_left mismatch\n");
return false;
+ }
- if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
+ if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) {
+ DC_LOG_DEBUG("boot timing validation failed due to h_addressable mismatch\n");
return false;
+ }
- if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
+ if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) {
+ DC_LOG_DEBUG("boot timing validation failed due to h_border_right mismatch\n");
return false;
+ }
- if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
+ if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) {
+ DC_LOG_DEBUG("boot timing validation failed due to h_front_porch mismatch\n");
return false;
+ }
- if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
+ if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) {
+ DC_LOG_DEBUG("boot timing validation failed due to h_sync_width mismatch\n");
return false;
+ }
- if (crtc_timing->v_total != hw_crtc_timing.v_total)
+ if (crtc_timing->v_total != hw_crtc_timing.v_total) {
+ DC_LOG_DEBUG("boot timing validation failed due to v_total mismatch\n");
return false;
+ }
- if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
+ if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) {
+ DC_LOG_DEBUG("boot timing validation failed due to v_border_top mismatch\n");
return false;
+ }
- if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
+ if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) {
+ DC_LOG_DEBUG("boot timing validation failed due to v_addressable mismatch\n");
return false;
+ }
- if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
+ if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) {
+ DC_LOG_DEBUG("boot timing validation failed due to v_border_bottom mismatch\n");
return false;
+ }
- if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
+ if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) {
+ DC_LOG_DEBUG("boot timing validation failed due to v_front_porch mismatch\n");
return false;
+ }
- if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
+ if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) {
+ DC_LOG_DEBUG("boot timing validation failed due to v_sync_width mismatch\n");
return false;
+ }
/* block DSC for now, as VBIOS does not currently support DSC timings */
- if (crtc_timing->flags.DSC)
+ if (crtc_timing->flags.DSC) {
+ DC_LOG_DEBUG("boot timing validation failed due to DSC\n");
return false;
+ }
if (dc_is_dp_signal(link->connector_signal)) {
unsigned int pix_clk_100hz = 0;
@@ -1821,39 +1841,55 @@ bool dc_validate_boot_timing(const struct dc *dc,
} else if (se && se->funcs->get_pixels_per_cycle) {
uint32_t pixels_per_cycle = se->funcs->get_pixels_per_cycle(se);
- if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+ DC_LOG_DEBUG("boot timing validation failed due to pixels_per_cycle\n");
return false;
+ }
pix_clk_100hz *= pixels_per_cycle;
}
// Note: In rare cases, HW pixclk may differ from crtc's pixclk
// slightly due to rounding issues in 10 kHz units.
- if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
+ if (crtc_timing->pix_clk_100hz != pix_clk_100hz) {
+ DC_LOG_DEBUG("boot timing validation failed due to pix_clk_100hz mismatch\n");
return false;
+ }
- if (!se || !se->funcs->dp_get_pixel_format)
+ if (!se || !se->funcs->dp_get_pixel_format) {
+ DC_LOG_DEBUG("boot timing validation failed due to missing dp_get_pixel_format\n");
return false;
+ }
if (!se->funcs->dp_get_pixel_format(
se,
&hw_crtc_timing.pixel_encoding,
- &hw_crtc_timing.display_color_depth))
+ &hw_crtc_timing.display_color_depth)) {
+ DC_LOG_DEBUG("boot timing validation failed due to dp_get_pixel_format failure\n");
return false;
+ }
- if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
+ if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) {
+ DC_LOG_DEBUG("boot timing validation failed due to display_color_depth mismatch\n");
return false;
+ }
- if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
+ if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) {
+ DC_LOG_DEBUG("boot timing validation failed due to pixel_encoding mismatch\n");
return false;
+ }
}
+
if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
+ DC_LOG_DEBUG("boot timing validation failed due to VSC SDP colorimetry\n");
return false;
}
- if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
+ if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ DC_LOG_DEBUG("boot timing validation failed due to DP 128b/132b\n");
return false;
+ }
if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
@@ -2266,7 +2302,7 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
/*
* Only update link encoder to stream assignment after bandwidth validation passed.
*/
- if (res == DC_OK && dc->res_pool->funcs->link_encs_assign)
+ if (res == DC_OK && dc->res_pool->funcs->link_encs_assign && !dc->config.unify_link_enc_assignment)
dc->res_pool->funcs->link_encs_assign(
dc, context, context->streams, context->stream_count);
@@ -2834,10 +2870,13 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->sharpening_required)
su_flags->bits.sharpening_required = 1;
+ if (stream_update->output_color_space)
+ su_flags->bits.out_csc = 1;
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
- if (stream_update->output_csc_transform || stream_update->output_color_space)
+ if (stream_update->output_csc_transform)
su_flags->bits.out_csc = 1;
/* Output transfer function changes do not require bandwidth recalculation,
@@ -3127,8 +3166,13 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_active_fixed)
stream->vrr_active_fixed = *update->vrr_active_fixed;
- if (update->crtc_timing_adjust)
+ if (update->crtc_timing_adjust) {
+ if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min ||
+ stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max)
+ update->crtc_timing_adjust->timing_adjust_pending = true;
stream->adjust = *update->crtc_timing_adjust;
+ update->crtc_timing_adjust->timing_adjust_pending = false;
+ }
if (update->dpms_off)
stream->dpms_off = *update->dpms_off;
@@ -3920,6 +3964,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FULL && dc->optimized_required)
hwss_process_outstanding_hw_updates(dc, dc->current_state);
+ if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
+ dc->res_pool->funcs->prepare_mcache_programming(dc, context);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -3978,9 +4025,6 @@ static void commit_planes_for_stream(struct dc *dc,
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
- if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
- dc->res_pool->funcs->prepare_mcache_programming(dc, context);
-
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
@@ -4902,7 +4946,8 @@ static bool full_update_required(struct dc *dc,
stream_update->lut3d_func ||
stream_update->pending_test_pattern ||
stream_update->crtc_timing_adjust ||
- stream_update->scaler_sharpener_update))
+ stream_update->scaler_sharpener_update ||
+ stream_update->hw_cursor_req))
return true;
if (stream) {
@@ -5414,18 +5459,6 @@ bool dc_is_dmcu_initialized(struct dc *dc)
return false;
}
-void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
-{
- info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
- info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
- info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
- info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
- info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
- info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
- info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
- info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
- info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
-}
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
{
if (dc->hwss.set_clock)
@@ -5549,9 +5582,11 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const
if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_memclk)
idle_dramclk_khz = dc->clk_mgr->funcs->get_hard_min_memclk(dc->clk_mgr);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &context->res_ctx.pipe_ctx[i];
- subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe);
+ if (dc->res_pool && context) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe);
+ }
}
DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index af1ea5792560..650e89825968 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -51,126 +51,6 @@
DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \
} while (0)
-void pre_surface_trace(
- struct dc *dc,
- const struct dc_plane_state *const *plane_states,
- int surface_count)
-{
- int i;
- DC_LOGGER_INIT(dc->ctx->logger);
-
- for (i = 0; i < surface_count; i++) {
- const struct dc_plane_state *plane_state = plane_states[i];
-
- SURFACE_TRACE("Planes %d:\n", i);
-
- SURFACE_TRACE(
- "plane_state->visible = %d;\n"
- "plane_state->flip_immediate = %d;\n"
- "plane_state->address.type = %d;\n"
- "plane_state->address.grph.addr.quad_part = 0x%llX;\n"
- "plane_state->address.grph.meta_addr.quad_part = 0x%llX;\n"
- "plane_state->scaling_quality.h_taps = %d;\n"
- "plane_state->scaling_quality.v_taps = %d;\n"
- "plane_state->scaling_quality.h_taps_c = %d;\n"
- "plane_state->scaling_quality.v_taps_c = %d;\n",
- plane_state->visible,
- plane_state->flip_immediate,
- plane_state->address.type,
- plane_state->address.grph.addr.quad_part,
- plane_state->address.grph.meta_addr.quad_part,
- plane_state->scaling_quality.h_taps,
- plane_state->scaling_quality.v_taps,
- plane_state->scaling_quality.h_taps_c,
- plane_state->scaling_quality.v_taps_c);
-
- SURFACE_TRACE(
- "plane_state->src_rect.x = %d;\n"
- "plane_state->src_rect.y = %d;\n"
- "plane_state->src_rect.width = %d;\n"
- "plane_state->src_rect.height = %d;\n"
- "plane_state->dst_rect.x = %d;\n"
- "plane_state->dst_rect.y = %d;\n"
- "plane_state->dst_rect.width = %d;\n"
- "plane_state->dst_rect.height = %d;\n"
- "plane_state->clip_rect.x = %d;\n"
- "plane_state->clip_rect.y = %d;\n"
- "plane_state->clip_rect.width = %d;\n"
- "plane_state->clip_rect.height = %d;\n",
- plane_state->src_rect.x,
- plane_state->src_rect.y,
- plane_state->src_rect.width,
- plane_state->src_rect.height,
- plane_state->dst_rect.x,
- plane_state->dst_rect.y,
- plane_state->dst_rect.width,
- plane_state->dst_rect.height,
- plane_state->clip_rect.x,
- plane_state->clip_rect.y,
- plane_state->clip_rect.width,
- plane_state->clip_rect.height);
-
- SURFACE_TRACE(
- "plane_state->plane_size.surface_size.x = %d;\n"
- "plane_state->plane_size.surface_size.y = %d;\n"
- "plane_state->plane_size.surface_size.width = %d;\n"
- "plane_state->plane_size.surface_size.height = %d;\n"
- "plane_state->plane_size.surface_pitch = %d;\n",
- plane_state->plane_size.surface_size.x,
- plane_state->plane_size.surface_size.y,
- plane_state->plane_size.surface_size.width,
- plane_state->plane_size.surface_size.height,
- plane_state->plane_size.surface_pitch);
-
-
- SURFACE_TRACE(
- "plane_state->tiling_info.gfx8.num_banks = %d;\n"
- "plane_state->tiling_info.gfx8.bank_width = %d;\n"
- "plane_state->tiling_info.gfx8.bank_width_c = %d;\n"
- "plane_state->tiling_info.gfx8.bank_height = %d;\n"
- "plane_state->tiling_info.gfx8.bank_height_c = %d;\n"
- "plane_state->tiling_info.gfx8.tile_aspect = %d;\n"
- "plane_state->tiling_info.gfx8.tile_aspect_c = %d;\n"
- "plane_state->tiling_info.gfx8.tile_split = %d;\n"
- "plane_state->tiling_info.gfx8.tile_split_c = %d;\n"
- "plane_state->tiling_info.gfx8.tile_mode = %d;\n"
- "plane_state->tiling_info.gfx8.tile_mode_c = %d;\n",
- plane_state->tiling_info.gfx8.num_banks,
- plane_state->tiling_info.gfx8.bank_width,
- plane_state->tiling_info.gfx8.bank_width_c,
- plane_state->tiling_info.gfx8.bank_height,
- plane_state->tiling_info.gfx8.bank_height_c,
- plane_state->tiling_info.gfx8.tile_aspect,
- plane_state->tiling_info.gfx8.tile_aspect_c,
- plane_state->tiling_info.gfx8.tile_split,
- plane_state->tiling_info.gfx8.tile_split_c,
- plane_state->tiling_info.gfx8.tile_mode,
- plane_state->tiling_info.gfx8.tile_mode_c);
-
- SURFACE_TRACE(
- "plane_state->tiling_info.gfx8.pipe_config = %d;\n"
- "plane_state->tiling_info.gfx8.array_mode = %d;\n"
- "plane_state->color_space = %d;\n"
- "plane_state->dcc.enable = %d;\n"
- "plane_state->format = %d;\n"
- "plane_state->rotation = %d;\n"
- "plane_state->stereo_format = %d;\n",
- plane_state->tiling_info.gfx8.pipe_config,
- plane_state->tiling_info.gfx8.array_mode,
- plane_state->color_space,
- plane_state->dcc.enable,
- plane_state->format,
- plane_state->rotation,
- plane_state->stereo_format);
-
- SURFACE_TRACE("plane_state->tiling_info.gfx9.swizzle = %d;\n",
- plane_state->tiling_info.gfx9.swizzle);
-
- SURFACE_TRACE("\n");
- }
- SURFACE_TRACE("\n");
-}
-
void update_surface_trace(
struct dc *dc,
const struct dc_surface_update *updates,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 6eb9bae3af91..55b32dfbfdd6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -34,6 +34,7 @@
#include "dc_state_priv.h"
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+#define MAX_NUM_MCACHE 8
/* used as index in array of black_color_format */
enum black_color_format {
@@ -176,7 +177,7 @@ static bool is_ycbcr2020_type(
{
bool ret = false;
- if (color_space == COLOR_SPACE_2020_YCBCR)
+ if (color_space == COLOR_SPACE_2020_YCBCR_LIMITED || color_space == COLOR_SPACE_2020_YCBCR_FULL)
ret = true;
return ret;
}
@@ -247,7 +248,8 @@ void color_space_to_black_color(
case COLOR_SPACE_YCBCR709_BLACK:
case COLOR_SPACE_YCBCR601_LIMITED:
case COLOR_SPACE_YCBCR709_LIMITED:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
+ case COLOR_SPACE_2020_YCBCR_FULL:
*black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
break;
@@ -552,6 +554,53 @@ void get_cursor_visual_confirm_color(
}
}
+void get_dcc_visual_confirm_color(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ const uint32_t MCACHE_ID_UNASSIGNED = 0xF;
+
+ if (!pipe_ctx->plane_state->dcc.enable) {
+ color->color_r_cr = 0; /* black - DCC disabled */
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ return;
+ }
+
+ if (dc->ctx->dce_version < DCN_VERSION_4_01) {
+ color->color_r_cr = MAX_TG_COLOR_VALUE; /* red - DCC enabled */
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ return;
+ }
+
+ uint32_t first_id = pipe_ctx->mcache_regs.main.p0.mcache_id_first;
+ uint32_t second_id = pipe_ctx->mcache_regs.main.p0.mcache_id_second;
+
+ if (first_id != MCACHE_ID_UNASSIGNED && second_id != MCACHE_ID_UNASSIGNED && first_id != second_id) {
+ color->color_r_cr = MAX_TG_COLOR_VALUE/2; /* grey - 2 mcache */
+ color->color_g_y = MAX_TG_COLOR_VALUE/2;
+ color->color_b_cb = MAX_TG_COLOR_VALUE/2;
+ }
+
+ else if (first_id != MCACHE_ID_UNASSIGNED || second_id != MCACHE_ID_UNASSIGNED) {
+ const struct tg_color id_colors[MAX_NUM_MCACHE] = {
+ {0, MAX_TG_COLOR_VALUE, 0}, /* green */
+ {0, 0, MAX_TG_COLOR_VALUE}, /* blue */
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* yellow */
+ {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* magenta */
+ {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* cyan */
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* white */
+ {MAX_TG_COLOR_VALUE/2, 0, 0}, /* dark red */
+ {0, MAX_TG_COLOR_VALUE/2, 0}, /* dark green */
+ };
+
+ uint32_t assigned_id = (first_id != MCACHE_ID_UNASSIGNED) ? first_id : second_id;
+ *color = id_colors[assigned_id];
+ }
+}
+
void set_p_state_switch_method(
struct dc *dc,
struct dc_state *context,
@@ -563,6 +612,7 @@ void set_p_state_switch_method(
if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba)
return;
+ pipe_ctx->p_state_type = P_STATE_UNKNOWN;
if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
dm_dram_clock_change_unsupported) {
/* MCLK switching is supported */
@@ -609,6 +659,21 @@ void set_p_state_switch_method(
}
}
+void set_drr_and_clear_adjust_pending(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream,
+ struct drr_params *params)
+{
+ /* params can be null.*/
+ if (pipe_ctx && pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+ pipe_ctx->stream_res.tg, params);
+
+ if (stream)
+ stream->adjust.timing_adjust_pending = false;
+}
+
void get_fams2_visual_confirm_color(
struct dc *dc,
struct dc_state *context,
@@ -752,7 +817,14 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC;
(*num_steps)++;
}
-
+ if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE &&
+ dc->hwss.update_visual_confirm_color) {
+ block_sequence[*num_steps].params.update_visual_confirm_params.dc = dc;
+ block_sequence[*num_steps].params.update_visual_confirm_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.update_visual_confirm_params.mpcc_id = current_mpc_pipe->plane_res.hubp->inst;
+ block_sequence[*num_steps].func = MPC_UPDATE_VISUAL_CONFIRM;
+ (*num_steps)++;
+ }
if (current_mpc_pipe->stream->update_flags.bits.out_csc) {
block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpc = dc->res_pool->mpc;
block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = current_mpc_pipe->plane_res.hubp->inst;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 039b176e086d..814f68d76257 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -44,20 +44,8 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream)
* yet match.
*/
if (link_enc && ((uint32_t)stream->link->connector_signal & link_enc->output_signals)) {
- if (dc_is_dp_signal(stream->signal)) {
- /* DIGs do not support DP2.0 streams with 128b/132b encoding. */
- struct dc_link_settings link_settings = {0};
-
- stream->ctx->dc->link_srv->dp_decide_link_settings(stream, &link_settings);
- if ((link_settings.link_rate >= LINK_RATE_LOW) &&
- link_settings.link_rate <= LINK_RATE_HIGH3) {
- is_dig_stream = true;
- break;
- }
- } else {
- is_dig_stream = true;
- break;
- }
+ is_dig_stream = true;
+ break;
}
}
}
@@ -559,17 +547,6 @@ struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc)
return link_enc;
}
-struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream(
- struct dc *dc,
- const struct dc_stream_state *stream)
-{
- struct link_encoder *link_enc;
-
- link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, stream->link);
-
- return link_enc;
-}
-
struct link_encoder *link_enc_cfg_get_link_enc(
const struct dc_link *link)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index c1b79b379447..71e15da4bb69 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -150,6 +150,12 @@ bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx)
return link->dc->link_srv->update_dsc_config(pipe_ctx);
}
+struct ddc_service *
+dc_get_oem_i2c_device(struct dc *dc)
+{
+ return dc->res_pool->oem_device;
+}
+
bool dc_is_oem_i2c_device_present(
struct dc *dc,
size_t slave_address)
@@ -364,15 +370,10 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
return link->dc->link_srv->dp_should_enable_fec(link);
}
-int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
+void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
struct dc_link *link, int peak_bw)
{
- return link->dc->link_srv->dpia_handle_usb4_bandwidth_allocation_for_link(link, peak_bw);
-}
-
-void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
-{
- link->dc->link_srv->dpia_handle_bw_alloc_response(link, bw, result);
+ link->dc->link_srv->dpia_handle_usb4_bandwidth_allocation_for_link(link, peak_bw);
}
bool dc_link_check_link_loss_status(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index a45037cb4cc0..313a32248cd7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -76,6 +76,7 @@
#include "dcn321/dcn321_resource.h"
#include "dcn35/dcn35_resource.h"
#include "dcn351/dcn351_resource.h"
+#include "dcn36/dcn36_resource.h"
#include "dcn401/dcn401_resource.h"
#if defined(CONFIG_DRM_AMD_DC_FP)
#include "dc_spl_translate.h"
@@ -204,6 +205,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
dc_version = DCN_VERSION_3_5;
if (ASICREV_IS_GC_11_0_4(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_3_51;
+ if (ASICREV_IS_DCN36(asic_id.hw_internal_rev))
+ dc_version = DCN_VERSION_3_6;
break;
case AMDGPU_FAMILY_GC_12_0_0:
if (ASICREV_IS_GC_12_0_1_A0(asic_id.hw_internal_rev) ||
@@ -320,6 +323,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
case DCN_VERSION_3_51:
res_pool = dcn351_create_resource_pool(init_data, dc);
break;
+ case DCN_VERSION_3_6:
+ res_pool = dcn36_create_resource_pool(init_data, dc);
+ break;
case DCN_VERSION_4_01:
res_pool = dcn401_create_resource_pool(init_data, dc);
break;
@@ -941,6 +947,17 @@ static void calculate_adjust_recout_for_visual_confirm(struct pipe_ctx *pipe_ctx
*base_offset = VISUAL_CONFIRM_BASE_DEFAULT;
}
+static void reverse_adjust_recout_for_visual_confirm(struct rect *recout,
+ struct pipe_ctx *pipe_ctx)
+{
+ int dpp_offset, base_offset;
+
+ calculate_adjust_recout_for_visual_confirm(pipe_ctx, &base_offset,
+ &dpp_offset);
+ recout->height += base_offset;
+ recout->height += dpp_offset;
+}
+
static void adjust_recout_for_visual_confirm(struct rect *recout,
struct pipe_ctx *pipe_ctx)
{
@@ -1642,6 +1659,62 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
return res;
}
+bool resource_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *test_pipe, *split_pipe;
+ struct rect r1 = pipe_ctx->plane_res.scl_data.recout;
+ int r1_right, r1_bottom;
+ int cur_layer = pipe_ctx->plane_state->layer_index;
+
+ reverse_adjust_recout_for_visual_confirm(&r1, pipe_ctx);
+ r1_right = r1.x + r1.width;
+ r1_bottom = r1.y + r1.height;
+
+ /**
+ * Disable the cursor if there's another pipe above this with a
+ * plane that contains this pipe's viewport to prevent double cursor
+ * and incorrect scaling artifacts.
+ */
+ for (test_pipe = pipe_ctx->top_pipe; test_pipe;
+ test_pipe = test_pipe->top_pipe) {
+ struct rect r2;
+ int r2_right, r2_bottom;
+ // Skip invisible layer and pipe-split plane on same layer
+ if (!test_pipe->plane_state ||
+ !test_pipe->plane_state->visible ||
+ test_pipe->plane_state->layer_index == cur_layer)
+ continue;
+
+ r2 = test_pipe->plane_res.scl_data.recout;
+ reverse_adjust_recout_for_visual_confirm(&r2, test_pipe);
+ r2_right = r2.x + r2.width;
+ r2_bottom = r2.y + r2.height;
+
+ /**
+ * There is another half plane on same layer because of
+ * pipe-split, merge together per same height.
+ */
+ for (split_pipe = pipe_ctx->top_pipe; split_pipe;
+ split_pipe = split_pipe->top_pipe)
+ if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
+ struct rect r2_half;
+
+ r2_half = split_pipe->plane_res.scl_data.recout;
+ reverse_adjust_recout_for_visual_confirm(&r2_half, split_pipe);
+ r2.x = min(r2_half.x, r2.x);
+ r2.width = r2.width + r2_half.width;
+ r2_right = r2.x + r2.width;
+ r2_bottom = min(r2_bottom, r2_half.y + r2_half.height);
+ break;
+ }
+
+ if (r1.x >= r2.x && r1.y >= r2.y && r1_right <= r2_right && r1_bottom <= r2_bottom)
+ return true;
+ }
+
+ return false;
+}
+
enum dc_status resource_build_scaling_params_for_context(
const struct dc *dc,
@@ -2617,6 +2690,162 @@ static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx,
}
}
+static inline int find_acquired_dio_link_enc_for_link(
+ const struct resource_context *res_ctx,
+ const struct dc_link *link)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ctx->dio_link_enc_ref_cnts); i++)
+ if (res_ctx->dio_link_enc_ref_cnts[i] > 0 &&
+ res_ctx->dio_link_enc_to_link_idx[i] == link->link_index)
+ return i;
+
+ return -1;
+}
+
+static inline int find_fixed_dio_link_enc(const struct dc_link *link)
+{
+ /* the 8b10b dp phy can only use fixed link encoder */
+ return link->eng_id;
+}
+
+static inline int find_free_dio_link_enc(const struct resource_context *res_ctx,
+ const struct dc_link *link, const struct resource_pool *pool)
+{
+ int i;
+ int enc_count = pool->dig_link_enc_count;
+
+ /* for dpia, check preferred encoder first and then the next one */
+ for (i = 0; i < enc_count; i++)
+ if (res_ctx->dio_link_enc_ref_cnts[(link->dpia_preferred_eng_id + i) % enc_count] == 0)
+ break;
+
+ return (i >= 0 && i < enc_count) ? (link->dpia_preferred_eng_id + i) % enc_count : -1;
+}
+
+static inline void acquire_dio_link_enc(
+ struct resource_context *res_ctx,
+ unsigned int link_index,
+ int enc_index)
+{
+ res_ctx->dio_link_enc_to_link_idx[enc_index] = link_index;
+ res_ctx->dio_link_enc_ref_cnts[enc_index] = 1;
+}
+
+static inline void retain_dio_link_enc(
+ struct resource_context *res_ctx,
+ int enc_index)
+{
+ res_ctx->dio_link_enc_ref_cnts[enc_index]++;
+}
+
+static inline void release_dio_link_enc(
+ struct resource_context *res_ctx,
+ int enc_index)
+{
+ ASSERT(res_ctx->dio_link_enc_ref_cnts[enc_index] > 0);
+ res_ctx->dio_link_enc_ref_cnts[enc_index]--;
+}
+
+static bool is_dio_enc_acquired_by_other_link(const struct dc_link *link,
+ int enc_index,
+ int *link_index)
+{
+ const struct dc *dc = link->dc;
+ const struct resource_context *res_ctx = &dc->current_state->res_ctx;
+
+ /* pass the link_index that acquired the enc_index */
+ if (res_ctx->dio_link_enc_ref_cnts[enc_index] > 0 &&
+ res_ctx->dio_link_enc_to_link_idx[enc_index] != link->link_index) {
+ *link_index = res_ctx->dio_link_enc_to_link_idx[enc_index];
+ return true;
+ }
+
+ return false;
+}
+
+static void swap_dio_link_enc_to_muxable_ctx(struct dc_state *context,
+ const struct resource_pool *pool,
+ int new_encoder,
+ int old_encoder)
+{
+ struct resource_context *res_ctx = &context->res_ctx;
+ int stream_count = context->stream_count;
+ int i = 0;
+
+ res_ctx->dio_link_enc_ref_cnts[new_encoder] = res_ctx->dio_link_enc_ref_cnts[old_encoder];
+ res_ctx->dio_link_enc_to_link_idx[new_encoder] = res_ctx->dio_link_enc_to_link_idx[old_encoder];
+ res_ctx->dio_link_enc_ref_cnts[old_encoder] = 0;
+
+ for (i = 0; i < stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+
+ if (pipe_ctx && pipe_ctx->link_res.dio_link_enc == pool->link_encoders[old_encoder])
+ pipe_ctx->link_res.dio_link_enc = pool->link_encoders[new_encoder];
+ }
+}
+
+static bool add_dio_link_enc_to_ctx(const struct dc *dc,
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ struct resource_context *res_ctx = &context->res_ctx;
+ int enc_index;
+
+ enc_index = find_acquired_dio_link_enc_for_link(res_ctx, stream->link);
+
+ if (enc_index >= 0) {
+ retain_dio_link_enc(res_ctx, enc_index);
+ } else {
+ if (stream->link->is_dig_mapping_flexible)
+ enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool);
+ else {
+ int link_index = 0;
+
+ enc_index = find_fixed_dio_link_enc(stream->link);
+ /* Fixed mapping link can only use its fixed link encoder.
+ * If the encoder is acquired by other link then get a new free encoder and swap the new
+ * one into the acquiring link.
+ */
+ if (enc_index >= 0 && is_dio_enc_acquired_by_other_link(stream->link, enc_index, &link_index)) {
+ int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool);
+
+ if (new_enc_index >= 0)
+ swap_dio_link_enc_to_muxable_ctx(context, pool, new_enc_index, enc_index);
+ else
+ return false;
+ }
+ }
+
+ if (enc_index >= 0)
+ acquire_dio_link_enc(res_ctx, stream->link->link_index, enc_index);
+ }
+
+ if (enc_index >= 0)
+ pipe_ctx->link_res.dio_link_enc = pool->link_encoders[enc_index];
+
+ return pipe_ctx->link_res.dio_link_enc != NULL;
+}
+
+static void remove_dio_link_enc_from_ctx(struct resource_context *res_ctx,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ int enc_index = -1;
+
+ if (stream->link)
+ enc_index = find_acquired_dio_link_enc_for_link(res_ctx, stream->link);
+
+ if (enc_index >= 0) {
+ release_dio_link_enc(res_ctx, enc_index);
+ pipe_ctx->link_res.dio_link_enc = NULL;
+ }
+}
+
static int get_num_of_free_pipes(const struct resource_pool *pool, const struct dc_state *context)
{
int i;
@@ -2664,6 +2893,10 @@ void resource_remove_otg_master_for_stream_output(struct dc_state *context,
remove_hpo_dp_link_enc_from_ctx(
&context->res_ctx, otg_master, stream);
}
+
+ if (stream->ctx->dc->config.unify_link_enc_assignment)
+ remove_dio_link_enc_from_ctx(&context->res_ctx, otg_master, stream);
+
if (otg_master->stream_res.audio)
update_audio_usage(
&context->res_ctx,
@@ -2678,6 +2911,7 @@ void resource_remove_otg_master_for_stream_output(struct dc_state *context,
if (pool->funcs->remove_stream_from_ctx)
pool->funcs->remove_stream_from_ctx(
stream->ctx->dc, context, stream);
+
memset(otg_master, 0, sizeof(*otg_master));
}
@@ -3389,10 +3623,13 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
break;
case COLOR_DEPTH_121212:
normalized_pix_clk = (pix_clk * 36) / 24;
- break;
+ break;
+ case COLOR_DEPTH_141414:
+ normalized_pix_clk = (pix_clk * 42) / 24;
+ break;
case COLOR_DEPTH_161616:
normalized_pix_clk = (pix_clk * 48) / 24;
- break;
+ break;
default:
ASSERT(0);
break;
@@ -3526,16 +3763,22 @@ static int acquire_resource_from_hw_enabled_state(
return -1;
}
-static void mark_seamless_boot_stream(
- const struct dc *dc,
- struct dc_stream_state *stream)
+static void mark_seamless_boot_stream(const struct dc *dc,
+ struct dc_stream_state *stream)
{
struct dc_bios *dcb = dc->ctx->dc_bios;
- if (dc->config.allow_seamless_boot_optimization &&
- !dcb->funcs->is_accelerated_mode(dcb)) {
- if (dc_validate_boot_timing(dc, stream->sink, &stream->timing))
- stream->apply_seamless_boot_optimization = true;
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (stream->apply_seamless_boot_optimization)
+ return;
+ if (!dc->config.allow_seamless_boot_optimization)
+ return;
+ if (dcb->funcs->is_accelerated_mode(dcb))
+ return;
+ if (dc_validate_boot_timing(dc, stream->sink, &stream->timing)) {
+ stream->apply_seamless_boot_optimization = true;
+ DC_LOG_DC("Marked stream for seamless boot optimization\n");
}
}
@@ -3646,6 +3889,7 @@ enum dc_status resource_map_pool_resources(
struct pipe_ctx *pipe_ctx = NULL;
int pipe_idx = -1;
bool acquired = false;
+ bool is_dio_encoder = true;
calculate_phy_pix_clks(stream);
@@ -3711,6 +3955,10 @@ enum dc_status resource_map_pool_resources(
}
}
+ if (dc->config.unify_link_enc_assignment && is_dio_encoder)
+ if (!add_dio_link_enc_to_ctx(dc, context, pool, pipe_ctx, stream))
+ return DC_NO_LINK_ENC_RESOURCE;
+
/* TODO: Add check if ASIC support and EDID audio */
if (!stream->converter_disable_audio &&
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
@@ -4244,7 +4492,7 @@ static void set_avi_info_frame(
break;
case COLOR_SPACE_2020_RGB_FULLRANGE:
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
break;
@@ -4258,7 +4506,7 @@ static void set_avi_info_frame(
break;
}
- if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR &&
+ if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR_LIMITED &&
stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
hdmi_info.bits.EC0_EC2 = 0;
hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
@@ -4681,7 +4929,10 @@ bool pipe_need_reprogram(
return true;
/* DIG link encoder resource assignment for stream changed. */
- if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) {
+ if (pipe_ctx_old->stream->ctx->dc->config.unify_link_enc_assignment) {
+ if (pipe_ctx_old->link_res.dio_link_enc != pipe_ctx->link_res.dio_link_enc)
+ return true;
+ } else if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) {
bool need_reprogram = false;
struct dc *dc = pipe_ctx_old->stream->ctx->dc;
struct link_encoder *link_enc_prev =
@@ -4947,6 +5198,28 @@ void get_audio_check(struct audio_info *aud_modes,
}
}
+struct link_encoder *get_temp_dio_link_enc(
+ const struct resource_context *res_ctx,
+ const struct resource_pool *const pool,
+ const struct dc_link *link)
+{
+ struct link_encoder *link_enc = NULL;
+ int enc_index;
+
+ if (link->is_dig_mapping_flexible)
+ enc_index = find_acquired_dio_link_enc_for_link(res_ctx, link);
+ else
+ enc_index = link->eng_id;
+
+ if (enc_index < 0)
+ enc_index = find_free_dio_link_enc(res_ctx, link, pool);
+
+ if (enc_index >= 0)
+ link_enc = pool->link_encoders[enc_index];
+
+ return link_enc;
+}
+
static struct hpo_dp_link_encoder *get_temp_hpo_dp_link_enc(
const struct resource_context *res_ctx,
const struct resource_pool *const pool,
@@ -4976,11 +5249,17 @@ bool get_temp_dp_link_res(struct dc_link *link,
memset(link_res, 0, sizeof(*link_res));
if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
- link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx,
- dc->res_pool, link);
+ link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx, dc->res_pool, link);
if (!link_res->hpo_dp_link_enc)
return false;
+ } else if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
+ dc->config.unify_link_enc_assignment) {
+ link_res->dio_link_enc = get_temp_dio_link_enc(res_ctx,
+ dc->res_pool, link);
+ if (!link_res->dio_link_enc)
+ return false;
}
+
return true;
}
@@ -5252,6 +5531,10 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
remove_hpo_dp_link_enc_from_ctx(&context->res_ctx, pipe_ctx, pipe_ctx->stream);
}
+ if (pipe_ctx->link_res.dio_link_enc == NULL && dc->config.unify_link_enc_assignment)
+ if (!add_dio_link_enc_to_ctx(dc, context, dc->res_pool, pipe_ctx, pipe_ctx->stream))
+ return DC_NO_LINK_ENC_RESOURCE;
+
return DC_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index e8134c47fe0d..0478dd856d8c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -201,7 +201,8 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
dc_stream_assign_stream_id(new_stream);
/* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
- if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign)
+ if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign &&
+ !new_stream->ctx->dc->config.unify_link_enc_assignment)
new_stream->link_enc = NULL;
kref_init(&new_stream->refcount);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index f3471d45b312..e6fcc21bb9bc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -270,8 +270,8 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut)
kref_get(&lut->refcount);
}
-void dc_plane_force_update_for_panic(struct dc_plane_state *plane_state,
- bool clear_tiling)
+void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
+ bool clear_tiling)
{
struct dc *dc;
int i;
@@ -290,30 +290,7 @@ void dc_plane_force_update_for_panic(struct dc_plane_state *plane_state,
if (!pipe_ctx)
continue;
- if (dc->ctx->dce_version >= DCE_VERSION_MAX) {
- struct hubp *hubp = pipe_ctx->plane_res.hubp;
- if (!hubp)
- continue;
- /* if framebuffer is tiled, disable tiling */
- if (clear_tiling && hubp->funcs->hubp_clear_tiling)
- hubp->funcs->hubp_clear_tiling(hubp);
-
- /* force page flip to see the new content of the framebuffer */
- hubp->funcs->hubp_program_surface_flip_and_addr(hubp,
- &plane_state->address,
- true);
- } else {
- struct mem_input *mi = pipe_ctx->plane_res.mi;
- if (!mi)
- continue;
- /* if framebuffer is tiled, disable tiling */
- if (clear_tiling && mi->funcs->mem_input_clear_tiling)
- mi->funcs->mem_input_clear_tiling(mi);
-
- /* force page flip to see the new content of the framebuffer */
- mi->funcs->mem_input_program_surface_flip_and_addr(mi,
- &plane_state->address,
- true);
- }
+ if (dc->hwss.clear_surface_dcc_and_tiling)
+ dc->hwss.clear_surface_dcc_and_tiling(pipe_ctx, plane_state, clear_tiling);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 053481ab69ef..7c2ee0526926 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -46,8 +46,6 @@
#include "dmub/inc/dmub_cmd.h"
-#include "spl/dc_spl_types.h"
-
struct abm_save_restore;
/* forward declaration */
@@ -55,9 +53,15 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.316"
+#define DC_VER "3.2.325"
+/**
+ * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
+ */
#define MAX_SURFACES 4
+/**
+ * MAX_PLANES - representative of the upper bound of planes that are supported by the HW
+ */
#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
@@ -473,6 +477,7 @@ struct dc_config {
bool consolidated_dpia_dp_lt;
bool set_pipe_unlock_order;
bool enable_dpia_pre_training;
+ bool unify_link_enc_assignment;
};
enum visual_confirm {
@@ -490,6 +495,7 @@ enum visual_confirm {
VISUAL_CONFIRM_FAMS2 = 19,
VISUAL_CONFIRM_HW_CURSOR = 20,
VISUAL_CONFIRM_VABC = 21,
+ VISUAL_CONFIRM_DCC = 22,
};
enum dc_psr_power_opts {
@@ -778,6 +784,7 @@ union dpia_debug_options {
uint32_t disable_usb4_pm_support:1; /* bit 5 */
uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */
uint32_t enable_dpia_pre_training:1; /* bit 7 */
+ uint32_t unify_link_enc_assignment:1; /* bit 8 */
uint32_t reserved:24;
} bits;
uint32_t raw;
@@ -1077,6 +1084,7 @@ struct dc_debug_options {
unsigned int enable_oled_edp_power_up_opt;
bool enable_hblank_borrow;
bool force_subvp_df_throttle;
+ uint32_t acpi_transition_bitmasks[MAX_PIPES];
};
@@ -1582,8 +1590,6 @@ bool dc_validate_boot_timing(const struct dc *dc,
enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
-void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
-
enum dc_status dc_validate_with_context(struct dc *dc,
const struct dc_validation_set set[],
int set_count,
@@ -1788,7 +1794,9 @@ struct dc_link {
bool dongle_mode_timing_override;
bool blank_stream_on_ocs_change;
bool read_dpcd204h_on_irq_hpd;
+ bool force_dp_ffe_preset;
} wa_flags;
+ union dc_dp_ffe_preset forced_dp_ffe_preset;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
struct dc_link_status link_status;
@@ -1800,6 +1808,7 @@ struct dc_link {
struct dc_panel_config panel_config;
struct phy_state phy_state;
+ uint32_t phy_transition_bitmask;
// BW ALLOCATON USB4 ONLY
struct dc_dpia_bw_alloc dpia_bw_alloc_config;
bool skip_implict_edp_power_control;
@@ -1947,6 +1956,9 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_return_code_type *operation_result);
+struct ddc_service *
+dc_get_oem_i2c_device(struct dc *dc);
+
bool dc_is_oem_i2c_device_present(
struct dc *dc,
size_t slave_address
@@ -2342,19 +2354,6 @@ unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw);
/*
- * Handle function for when the status of the Request above is complete.
- * We will find out the result of allocating on CM and update structs.
- *
- * @link: pointer to the dc_link struct instance
- * @bw: Allocated or Estimated BW depending on the result
- * @result: Response type
- *
- * return: none
- */
-void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link,
- uint8_t bw, uint8_t result);
-
-/*
* Handle the USB4 BW Allocation related functionality here:
* Plug => Try to allocate max bw from timing parameters supported by the sink
* Unplug => de-allocate bw
@@ -2362,9 +2361,8 @@ void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link,
* @link: pointer to the dc_link struct instance
* @peak_bw: Peak bw used by the link/sink
*
- * return: allocated bw else return 0
*/
-int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
+void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
struct dc_link *link, int peak_bw);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 44ff9abe2880..614e03bfd598 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -200,10 +200,10 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
if (status != DMUB_STATUS_OK) {
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
- if (!dmub->debug.timeout_occured) {
- dmub->debug.timeout_occured = true;
- dmub->debug.timeout_cmd = *cmd_list;
- dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
+ if (!dmub->debug.timeout_info.timeout_occured) {
+ dmub->debug.timeout_info.timeout_occured = true;
+ dmub->debug.timeout_info.timeout_cmd = *cmd_list;
+ dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
}
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
return false;
@@ -927,16 +927,15 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
-bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
+bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
{
- if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data)
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
- return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data);
+ return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub);
}
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
{
- struct dmub_diagnostic_data diag_data = {0};
uint32_t i;
if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
@@ -946,102 +945,56 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__);
- if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) {
+ if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv)) {
DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
return;
}
DC_LOG_DEBUG("DMCUB STATE:");
- DC_LOG_DEBUG(" dmcub_version : %08x", diag_data.dmcub_version);
- DC_LOG_DEBUG(" scratch [0] : %08x", diag_data.scratch[0]);
- DC_LOG_DEBUG(" scratch [1] : %08x", diag_data.scratch[1]);
- DC_LOG_DEBUG(" scratch [2] : %08x", diag_data.scratch[2]);
- DC_LOG_DEBUG(" scratch [3] : %08x", diag_data.scratch[3]);
- DC_LOG_DEBUG(" scratch [4] : %08x", diag_data.scratch[4]);
- DC_LOG_DEBUG(" scratch [5] : %08x", diag_data.scratch[5]);
- DC_LOG_DEBUG(" scratch [6] : %08x", diag_data.scratch[6]);
- DC_LOG_DEBUG(" scratch [7] : %08x", diag_data.scratch[7]);
- DC_LOG_DEBUG(" scratch [8] : %08x", diag_data.scratch[8]);
- DC_LOG_DEBUG(" scratch [9] : %08x", diag_data.scratch[9]);
- DC_LOG_DEBUG(" scratch [10] : %08x", diag_data.scratch[10]);
- DC_LOG_DEBUG(" scratch [11] : %08x", diag_data.scratch[11]);
- DC_LOG_DEBUG(" scratch [12] : %08x", diag_data.scratch[12]);
- DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]);
- DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]);
- DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]);
+ DC_LOG_DEBUG(" dmcub_version : %08x", dc_dmub_srv->dmub->debug.dmcub_version);
+ DC_LOG_DEBUG(" scratch [0] : %08x", dc_dmub_srv->dmub->debug.scratch[0]);
+ DC_LOG_DEBUG(" scratch [1] : %08x", dc_dmub_srv->dmub->debug.scratch[1]);
+ DC_LOG_DEBUG(" scratch [2] : %08x", dc_dmub_srv->dmub->debug.scratch[2]);
+ DC_LOG_DEBUG(" scratch [3] : %08x", dc_dmub_srv->dmub->debug.scratch[3]);
+ DC_LOG_DEBUG(" scratch [4] : %08x", dc_dmub_srv->dmub->debug.scratch[4]);
+ DC_LOG_DEBUG(" scratch [5] : %08x", dc_dmub_srv->dmub->debug.scratch[5]);
+ DC_LOG_DEBUG(" scratch [6] : %08x", dc_dmub_srv->dmub->debug.scratch[6]);
+ DC_LOG_DEBUG(" scratch [7] : %08x", dc_dmub_srv->dmub->debug.scratch[7]);
+ DC_LOG_DEBUG(" scratch [8] : %08x", dc_dmub_srv->dmub->debug.scratch[8]);
+ DC_LOG_DEBUG(" scratch [9] : %08x", dc_dmub_srv->dmub->debug.scratch[9]);
+ DC_LOG_DEBUG(" scratch [10] : %08x", dc_dmub_srv->dmub->debug.scratch[10]);
+ DC_LOG_DEBUG(" scratch [11] : %08x", dc_dmub_srv->dmub->debug.scratch[11]);
+ DC_LOG_DEBUG(" scratch [12] : %08x", dc_dmub_srv->dmub->debug.scratch[12]);
+ DC_LOG_DEBUG(" scratch [13] : %08x", dc_dmub_srv->dmub->debug.scratch[13]);
+ DC_LOG_DEBUG(" scratch [14] : %08x", dc_dmub_srv->dmub->debug.scratch[14]);
+ DC_LOG_DEBUG(" scratch [15] : %08x", dc_dmub_srv->dmub->debug.scratch[15]);
for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++)
- DC_LOG_DEBUG(" pc[%d] : %08x", i, diag_data.pc[i]);
- DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr);
- DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr);
- DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr);
- DC_LOG_DEBUG(" inbox1_rptr : %08x", diag_data.inbox1_rptr);
- DC_LOG_DEBUG(" inbox1_wptr : %08x", diag_data.inbox1_wptr);
- DC_LOG_DEBUG(" inbox1_size : %08x", diag_data.inbox1_size);
- DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr);
- DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr);
- DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size);
- DC_LOG_DEBUG(" outbox1_rptr : %08x", diag_data.outbox1_rptr);
- DC_LOG_DEBUG(" outbox1_wptr : %08x", diag_data.outbox1_wptr);
- DC_LOG_DEBUG(" outbox1_size : %08x", diag_data.outbox1_size);
- DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled);
- DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset);
- DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset);
- DC_LOG_DEBUG(" is_traceport_en : %d", diag_data.is_traceport_en);
- DC_LOG_DEBUG(" is_cw0_en : %d", diag_data.is_cw0_enabled);
- DC_LOG_DEBUG(" is_cw6_en : %d", diag_data.is_cw6_enabled);
-}
-
-static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *test_pipe, *split_pipe;
- const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
- struct rect r1 = scl_data->recout, r2, r2_half;
- int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
- int cur_layer = pipe_ctx->plane_state->layer_index;
-
- /**
- * Disable the cursor if there's another pipe above this with a
- * plane that contains this pipe's viewport to prevent double cursor
- * and incorrect scaling artifacts.
- */
- for (test_pipe = pipe_ctx->top_pipe; test_pipe;
- test_pipe = test_pipe->top_pipe) {
- // Skip invisible layer and pipe-split plane on same layer
- if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
- continue;
-
- r2 = test_pipe->plane_res.scl_data.recout;
- r2_r = r2.x + r2.width;
- r2_b = r2.y + r2.height;
-
- /**
- * There is another half plane on same layer because of
- * pipe-split, merge together per same height.
- */
- for (split_pipe = pipe_ctx->top_pipe; split_pipe;
- split_pipe = split_pipe->top_pipe)
- if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
- r2_half = split_pipe->plane_res.scl_data.recout;
- r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
- r2.width = r2.width + r2_half.width;
- r2_r = r2.x + r2.width;
- break;
- }
-
- if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
- return true;
- }
-
- return false;
+ DC_LOG_DEBUG(" pc[%d] : %08x", i, dc_dmub_srv->dmub->debug.pc[i]);
+ DC_LOG_DEBUG(" unk_fault_addr : %08x", dc_dmub_srv->dmub->debug.undefined_address_fault_addr);
+ DC_LOG_DEBUG(" inst_fault_addr : %08x", dc_dmub_srv->dmub->debug.inst_fetch_fault_addr);
+ DC_LOG_DEBUG(" data_fault_addr : %08x", dc_dmub_srv->dmub->debug.data_write_fault_addr);
+ DC_LOG_DEBUG(" inbox1_rptr : %08x", dc_dmub_srv->dmub->debug.inbox1_rptr);
+ DC_LOG_DEBUG(" inbox1_wptr : %08x", dc_dmub_srv->dmub->debug.inbox1_wptr);
+ DC_LOG_DEBUG(" inbox1_size : %08x", dc_dmub_srv->dmub->debug.inbox1_size);
+ DC_LOG_DEBUG(" inbox0_rptr : %08x", dc_dmub_srv->dmub->debug.inbox0_rptr);
+ DC_LOG_DEBUG(" inbox0_wptr : %08x", dc_dmub_srv->dmub->debug.inbox0_wptr);
+ DC_LOG_DEBUG(" inbox0_size : %08x", dc_dmub_srv->dmub->debug.inbox0_size);
+ DC_LOG_DEBUG(" outbox1_rptr : %08x", dc_dmub_srv->dmub->debug.outbox1_rptr);
+ DC_LOG_DEBUG(" outbox1_wptr : %08x", dc_dmub_srv->dmub->debug.outbox1_wptr);
+ DC_LOG_DEBUG(" outbox1_size : %08x", dc_dmub_srv->dmub->debug.outbox1_size);
+ DC_LOG_DEBUG(" is_enabled : %d", dc_dmub_srv->dmub->debug.is_dmcub_enabled);
+ DC_LOG_DEBUG(" is_soft_reset : %d", dc_dmub_srv->dmub->debug.is_dmcub_soft_reset);
+ DC_LOG_DEBUG(" is_secure_reset : %d", dc_dmub_srv->dmub->debug.is_dmcub_secure_reset);
+ DC_LOG_DEBUG(" is_traceport_en : %d", dc_dmub_srv->dmub->debug.is_traceport_en);
+ DC_LOG_DEBUG(" is_cw0_en : %d", dc_dmub_srv->dmub->debug.is_cw0_enabled);
+ DC_LOG_DEBUG(" is_cw6_en : %d", dc_dmub_srv->dmub->debug.is_cw6_enabled);
}
static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state != NULL) {
- if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
- return false;
-
- if (dc_can_pipe_disable_cursor(pipe_ctx))
+ if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
+ resource_can_pipe_disable_cursor(pipe_ctx))
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 10b48198b7a6..a636f4c3f01d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -94,7 +94,7 @@ void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);
-bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca);
+bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable);
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 94ce8fe74481..77c87ad57220 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -300,6 +300,19 @@ union lane_align_status_updated {
uint8_t raw;
};
+union link_service_irq_vector_esi0 {
+ struct {
+ uint8_t DP_LINK_RX_CAP_CHANGED:1;
+ uint8_t DP_LINK_STATUS_CHANGED:1;
+ uint8_t DP_LINK_STREAM_STATUS_CHANGED:1;
+ uint8_t DP_LINK_HDMI_LINK_STATUS_CHANGED:1;
+ uint8_t DP_LINK_CONNECTED_OFF_ENTRY_REQUESTED:1;
+ uint8_t DP_LINK_TUNNELING_IRQ:1;
+ uint8_t reserved:2;
+ } bits;
+ uint8_t raw;
+};
+
union lane_adjust {
struct {
uint8_t VOLTAGE_SWING_LANE:2;
@@ -410,14 +423,6 @@ union dwnstream_port_caps_byte3_hdmi {
uint8_t raw;
};
-union hdmi_sink_encoded_link_bw_support {
- struct {
- uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3;
- uint8_t RESERVED:5;
- } bits;
- uint8_t raw;
-};
-
union hdmi_encoded_link_bw {
struct {
uint8_t FRL_MODE:1; // Bit 0
@@ -427,7 +432,28 @@ union hdmi_encoded_link_bw {
uint8_t BW_32Gbps:1;
uint8_t BW_40Gbps:1;
uint8_t BW_48Gbps:1;
- uint8_t RESERVED:1; // Bit 7
+ uint8_t FRL_LINK_TRAINING_FINISHED:1; // Bit 7
+ } bits;
+ uint8_t raw;
+};
+
+union hdmi_tx_link_status {
+ struct {
+ uint8_t HDMI_TX_LINK_ACTIVE_STATUS:1;
+ uint8_t HDMI_TX_READY_STATUS:1;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+union autonomous_mode_and_frl_link_status {
+ struct {
+ uint8_t FRL_LT_IN_PROGRESS_STATUS:1;
+ uint8_t FRL_LT_LINK_CONFIG_IN_PROGRESS:3;
+ uint8_t RESERVED:1;
+ uint8_t FALLBACK_POLICY:1;
+ uint8_t FALLBACK_POLICY_VALID:1;
+ uint8_t REGULATED_AUTONOMOUS_MODE_SUPPORTED:1;
} bits;
uint8_t raw;
};
@@ -470,8 +496,10 @@ union sink_status {
uint8_t raw;
};
-/*6-byte structure corresponding to 6 registers (200h-205h)
-read during handling of HPD-IRQ*/
+/* 7-byte structure corresponding to 6 registers (200h-205h)
+ * and LINK_SERVICE_IRQ_ESI0 (2005h) for tunneling IRQ
+ * read during handling of HPD-IRQ
+ */
union hpd_irq_data {
struct {
union sink_count sink_cnt;/* 200h */
@@ -479,9 +507,10 @@ union hpd_irq_data {
union lane_status lane01_status;/* 202h */
union lane_status lane23_status;/* 203h */
union lane_align_status_updated lane_status_updated;/* 204h */
- union sink_status sink_status;
+ union sink_status sink_status;/* 205h */
+ union link_service_irq_vector_esi0 link_service_irq_esi0;/* 2005h */
} bytes;
- uint8_t raw[6];
+ uint8_t raw[7];
};
union down_stream_port_count {
@@ -959,6 +988,14 @@ union dp_128b_132b_supported_lttpr_link_rates {
uint8_t raw;
};
+union dp_alpm_lttpr_cap {
+ struct {
+ uint8_t AUX_LESS_ALPM_SUPPORTED :1;
+ uint8_t RESERVED :7;
+ } bits;
+ uint8_t raw;
+};
+
union dp_sink_video_fallback_formats {
struct {
uint8_t dp_1024x768_60Hz_24bpp_support :1;
@@ -1118,7 +1155,10 @@ struct dc_lttpr_caps {
uint8_t max_ext_timeout;
union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding;
union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
+ union dp_alpm_lttpr_cap alpm;
uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
+ uint8_t lttpr_ieee_oui[3];
+ uint8_t lttpr_device_id[6];
};
struct dc_dongle_dfp_cap_ext {
@@ -1147,6 +1187,7 @@ struct dc_dongle_caps {
uint32_t dp_hdmi_max_bpc;
uint32_t dp_hdmi_max_pixel_clk_in_khz;
uint32_t dp_hdmi_frl_max_link_bw_in_kbps;
+ uint32_t dp_hdmi_regulated_autonomous_mode_support;
struct dc_dongle_dfp_cap_ext dfp_cap_ext;
};
@@ -1209,6 +1250,8 @@ struct dpcd_caps {
struct replay_info pr_info;
uint16_t edp_oled_emission_rate;
union dp_receive_port0_cap receive_port0_cap;
+ /* Indicates the number of SST links supported by MSO (Multi-Stream Output) */
+ uint8_t mso_cap_sst_links_supported;
};
union dpcd_sink_ext_caps {
@@ -1370,6 +1413,12 @@ struct dp_trace {
#ifndef DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP
#define DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP 0x221c
#endif
+#ifndef DP_LTTPR_ALPM_CAPABILITIES
+#define DP_LTTPR_ALPM_CAPABILITIES 0xF0009
+#endif
+#ifndef DP_REGULATED_AUTONOMOUS_MODE_SUPPORTED_AND_HDMI_LINK_TRAINING_STATUS
+#define DP_REGULATED_AUTONOMOUS_MODE_SUPPORTED_AND_HDMI_LINK_TRAINING_STATUS 0x303C
+#endif
#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
#endif
@@ -1379,6 +1428,12 @@ struct dp_trace {
#ifndef DP_BRANCH_VENDOR_SPECIFIC_START
#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
#endif
+#ifndef DP_LTTPR_IEEE_OUI
+#define DP_LTTPR_IEEE_OUI 0xF003D
+#endif
+#ifndef DP_LTTPR_DEVICE_ID
+#define DP_LTTPR_DEVICE_ID 0xF0040
+#endif
/** USB4 DPCD BW Allocation Registers Chapter 10.7 **/
#ifndef DP_TUNNELING_CAPABILITIES
#define DP_TUNNELING_CAPABILITIES 0xE000D /* 1.4a */
@@ -1416,4 +1471,20 @@ struct dp_trace {
#ifndef REQUESTED_BW
#define REQUESTED_BW 0xE0031 /* 1.4a */
#endif
+# ifndef DP_TUNNELING_BW_ALLOC_BITS_MASK
+# define DP_TUNNELING_BW_ALLOC_BITS_MASK (0x0F << 0)
+# endif
+# ifndef DP_TUNNELING_BW_REQUEST_FAILED
+# define DP_TUNNELING_BW_REQUEST_FAILED (1 << 0)
+# endif
+# ifndef DP_TUNNELING_BW_REQUEST_SUCCEEDED
+# define DP_TUNNELING_BW_REQUEST_SUCCEEDED (1 << 1)
+# endif
+# ifndef DP_TUNNELING_ESTIMATED_BW_CHANGED
+# define DP_TUNNELING_ESTIMATED_BW_CHANGED (1 << 2)
+# endif
+# ifndef DP_TUNNELING_BW_ALLOC_CAP_CHANGED
+# define DP_TUNNELING_BW_ALLOC_CAP_CHANGED (1 << 3)
+# endif
+
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index b402be59b2c8..8f077e15b4f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -741,6 +741,8 @@ char *dce_version_to_string(const int version)
return "DCN 3.5";
case DCN_VERSION_3_51:
return "DCN 3.5.1";
+ case DCN_VERSION_3_6:
+ return "DCN 3.6";
case DCN_VERSION_4_01:
return "DCN 4.0.1";
default:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 5ac55601a6da..d562ddeca512 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -653,7 +653,8 @@ enum dc_color_space {
COLOR_SPACE_YCBCR709_LIMITED,
COLOR_SPACE_2020_RGB_FULLRANGE,
COLOR_SPACE_2020_RGB_LIMITEDRANGE,
- COLOR_SPACE_2020_YCBCR,
+ COLOR_SPACE_2020_YCBCR_LIMITED,
+ COLOR_SPACE_2020_YCBCR_FULL,
COLOR_SPACE_ADOBERGB,
COLOR_SPACE_DCIP3,
COLOR_SPACE_DISPLAYNATIVE,
@@ -661,6 +662,7 @@ enum dc_color_space {
COLOR_SPACE_APPCTRL,
COLOR_SPACE_CUSTOMPOINTS,
COLOR_SPACE_YCBCR709_BLACK,
+ COLOR_SPACE_2020_YCBCR = COLOR_SPACE_2020_YCBCR_LIMITED,
};
enum dc_dither_option {
@@ -1015,6 +1017,7 @@ struct dc_crtc_timing_adjust {
uint32_t v_total_mid;
uint32_t v_total_mid_frame_num;
uint32_t allow_otg_v_count_halt;
+ uint8_t timing_adjust_pending;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index fabcefeda288..e9413685ed4f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -34,7 +34,7 @@ const struct dc_plane_status *dc_plane_get_status(
void dc_plane_state_retain(struct dc_plane_state *plane_state);
void dc_plane_state_release(struct dc_plane_state *plane_state);
-void dc_plane_force_update_for_panic(struct dc_plane_state *plane_state,
- bool clear_tiling);
+void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
+ bool clear_tiling);
#endif /* _DC_PLANE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 3518eb1b8cd1..e3a8283b4098 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -3,7 +3,6 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc_spl_translate.h"
-#include "spl/dc_spl_types.h"
#include "dcn20/dcn20_dpp.h"
#include "dcn32/dcn32_dpp.h"
#include "dcn401/dcn401_dpp.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 3e303c7808fb..e0bfddaa23e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -528,12 +528,6 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
struct dc_stream_state *stream,
uint32_t *refresh_rate);
-bool dc_stream_get_crtc_position(struct dc *dc,
- struct dc_stream_state **stream,
- int num_streams,
- unsigned int *v_pos,
- unsigned int *nom_v_pos);
-
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
bool dc_stream_forward_crc_window(struct dc_stream_state *stream,
struct rect *rect,
@@ -578,12 +572,6 @@ bool dc_stream_set_gamut_remap(struct dc *dc,
bool dc_stream_program_csc_matrix(struct dc *dc,
struct dc_stream_state *stream);
-bool dc_stream_get_crtc_position(struct dc *dc,
- struct dc_stream_state **stream,
- int num_streams,
- unsigned int *v_pos,
- unsigned int *nom_v_pos);
-
struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream);
void dc_dmub_update_dirty_rect(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 0c2aa91f0a11..83ffaae9f439 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -181,6 +181,7 @@ struct dc_panel_patch {
uint8_t blankstream_before_otg_off;
bool oled_optimize_display_on;
unsigned int force_mst_blocked_discovery;
+ unsigned int wait_after_dpcd_poweroff_ms;
};
struct dc_edid_caps {
@@ -1033,6 +1034,13 @@ struct psr_settings {
unsigned int psr_sdp_transmit_line_num_deadline;
uint8_t force_ffu_mode;
unsigned int psr_power_opt;
+
+ /**
+ * Some panels cannot handle idle pattern during PSR entry.
+ * To power down phy before disable stream to avoid sending
+ * idle pattern.
+ */
+ uint8_t power_down_phy_before_disable_stream;
};
enum replay_coasting_vtotal_type {
@@ -1216,7 +1224,6 @@ struct dc_dpia_bw_alloc {
int bw_granularity; // BW Granularity
int dp_overhead; // DP overhead in dp tunneling
bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
- bool response_ready; // Response ready from the CM side
uint8_t nrd_max_lane_count; // Non-reduced max lane count
uint8_t nrd_max_link_rate; // Non-reduced max link rate
};
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
index 160c299419b7..a9b88f5e0c04 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
@@ -379,53 +379,55 @@ struct dccg_mask {
DCCG401_REG_FIELD_LIST(uint32_t)
};
+#define DCCG_REG_VARIABLE_LIST \
+ uint32_t DPPCLK_DTO_CTRL; \
+ uint32_t DPPCLK_DTO_PARAM[6]; \
+ uint32_t REFCLK_CNTL; \
+ uint32_t DISPCLK_FREQ_CHANGE_CNTL; \
+ uint32_t OTG_PIXEL_RATE_CNTL[MAX_PIPES]; \
+ uint32_t HDMICHARCLK_CLOCK_CNTL[6]; \
+ uint32_t PHYASYMCLK_CLOCK_CNTL; \
+ uint32_t PHYBSYMCLK_CLOCK_CNTL; \
+ uint32_t PHYCSYMCLK_CLOCK_CNTL; \
+ uint32_t PHYDSYMCLK_CLOCK_CNTL; \
+ uint32_t PHYESYMCLK_CLOCK_CNTL; \
+ uint32_t DTBCLK_DTO_MODULO[MAX_PIPES]; \
+ uint32_t DTBCLK_DTO_PHASE[MAX_PIPES]; \
+ uint32_t DCCG_AUDIO_DTBCLK_DTO_MODULO; \
+ uint32_t DCCG_AUDIO_DTBCLK_DTO_PHASE; \
+ uint32_t DCCG_AUDIO_DTO_SOURCE; \
+ uint32_t DPSTREAMCLK_CNTL; \
+ uint32_t HDMISTREAMCLK_CNTL; \
+ uint32_t SYMCLK32_SE_CNTL; \
+ uint32_t SYMCLK32_LE_CNTL; \
+ uint32_t DENTIST_DISPCLK_CNTL; \
+ uint32_t DSCCLK_DTO_CTRL; \
+ uint32_t DSCCLK0_DTO_PARAM; \
+ uint32_t DSCCLK1_DTO_PARAM; \
+ uint32_t DSCCLK2_DTO_PARAM; \
+ uint32_t DSCCLK3_DTO_PARAM; \
+ uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE; \
+ uint32_t DPSTREAMCLK_GATE_DISABLE; \
+ uint32_t DCCG_GATE_DISABLE_CNTL; \
+ uint32_t DCCG_GATE_DISABLE_CNTL2; \
+ uint32_t DCCG_GATE_DISABLE_CNTL3; \
+ uint32_t HDMISTREAMCLK0_DTO_PARAM; \
+ uint32_t DCCG_GATE_DISABLE_CNTL4; \
+ uint32_t OTG_PIXEL_RATE_DIV; \
+ uint32_t DTBCLK_P_CNTL; \
+ uint32_t DPPCLK_CTRL; \
+ uint32_t DCCG_GATE_DISABLE_CNTL5; \
+ uint32_t DCCG_GATE_DISABLE_CNTL6; \
+ uint32_t DCCG_GLOBAL_FGCG_REP_CNTL; \
+ uint32_t SYMCLKA_CLOCK_ENABLE; \
+ uint32_t SYMCLKB_CLOCK_ENABLE; \
+ uint32_t SYMCLKC_CLOCK_ENABLE; \
+ uint32_t SYMCLKD_CLOCK_ENABLE; \
+ uint32_t SYMCLKE_CLOCK_ENABLE; \
+ uint32_t DP_DTO_MODULO[MAX_PIPES]; \
+ uint32_t DP_DTO_PHASE[MAX_PIPES]
struct dccg_registers {
- uint32_t DPPCLK_DTO_CTRL;
- uint32_t DPPCLK_DTO_PARAM[6];
- uint32_t REFCLK_CNTL;
- uint32_t DISPCLK_FREQ_CHANGE_CNTL;
- uint32_t OTG_PIXEL_RATE_CNTL[MAX_PIPES];
- uint32_t HDMICHARCLK_CLOCK_CNTL[6];
- uint32_t PHYASYMCLK_CLOCK_CNTL;
- uint32_t PHYBSYMCLK_CLOCK_CNTL;
- uint32_t PHYCSYMCLK_CLOCK_CNTL;
- uint32_t PHYDSYMCLK_CLOCK_CNTL;
- uint32_t PHYESYMCLK_CLOCK_CNTL;
- uint32_t DTBCLK_DTO_MODULO[MAX_PIPES];
- uint32_t DTBCLK_DTO_PHASE[MAX_PIPES];
- uint32_t DCCG_AUDIO_DTBCLK_DTO_MODULO;
- uint32_t DCCG_AUDIO_DTBCLK_DTO_PHASE;
- uint32_t DCCG_AUDIO_DTO_SOURCE;
- uint32_t DPSTREAMCLK_CNTL;
- uint32_t HDMISTREAMCLK_CNTL;
- uint32_t SYMCLK32_SE_CNTL;
- uint32_t SYMCLK32_LE_CNTL;
- uint32_t DENTIST_DISPCLK_CNTL;
- uint32_t DSCCLK_DTO_CTRL;
- uint32_t DSCCLK0_DTO_PARAM;
- uint32_t DSCCLK1_DTO_PARAM;
- uint32_t DSCCLK2_DTO_PARAM;
- uint32_t DSCCLK3_DTO_PARAM;
- uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
- uint32_t DPSTREAMCLK_GATE_DISABLE;
- uint32_t DCCG_GATE_DISABLE_CNTL;
- uint32_t DCCG_GATE_DISABLE_CNTL2;
- uint32_t DCCG_GATE_DISABLE_CNTL3;
- uint32_t HDMISTREAMCLK0_DTO_PARAM;
- uint32_t DCCG_GATE_DISABLE_CNTL4;
- uint32_t OTG_PIXEL_RATE_DIV;
- uint32_t DTBCLK_P_CNTL;
- uint32_t DPPCLK_CTRL;
- uint32_t DCCG_GATE_DISABLE_CNTL5;
- uint32_t DCCG_GATE_DISABLE_CNTL6;
- uint32_t DCCG_GLOBAL_FGCG_REP_CNTL;
- uint32_t SYMCLKA_CLOCK_ENABLE;
- uint32_t SYMCLKB_CLOCK_ENABLE;
- uint32_t SYMCLKC_CLOCK_ENABLE;
- uint32_t SYMCLKD_CLOCK_ENABLE;
- uint32_t SYMCLKE_CLOCK_ENABLE;
- uint32_t DP_DTO_MODULO[MAX_PIPES];
- uint32_t DP_DTO_PHASE[MAX_PIPES];
+ DCCG_REG_VARIABLE_LIST;
};
struct dcn_dccg {
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index d3e46c3cfa57..ffd172231fdf 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -116,7 +116,7 @@ static void dccg401_wait_for_dentist_change_done(
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
}
-static void dccg401_get_pixel_rate_div(
+void dccg401_get_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
uint32_t *tmds_div,
@@ -154,7 +154,7 @@ static void dccg401_get_pixel_rate_div(
*tmds_div = val_tmds_div == 0 ? PIXEL_RATE_DIV_BY_2 : PIXEL_RATE_DIV_BY_4;
}
-static void dccg401_set_pixel_rate_div(
+void dccg401_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
enum pixel_rate_div tmds_div,
@@ -209,7 +209,7 @@ static void dccg401_set_pixel_rate_div(
}
-static void dccg401_set_dtbclk_p_src(
+void dccg401_set_dtbclk_p_src(
struct dccg *dccg,
enum streamclk_source src,
uint32_t otg_inst)
@@ -348,7 +348,7 @@ void dccg401_set_physymclk(
}
}
-static void dccg401_get_dccg_ref_freq(struct dccg *dccg,
+void dccg401_get_dccg_ref_freq(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
unsigned int *dccg_ref_freq_inKhz)
{
@@ -378,7 +378,7 @@ static void dccg401_otg_drop_pixel(struct dccg *dccg,
OTG_DROP_PIXEL[otg_inst], 1);
}
-static void dccg401_enable_symclk32_le(
+void dccg401_enable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst,
enum phyd32clk_clock_source phyd32clk)
@@ -429,7 +429,7 @@ static void dccg401_enable_symclk32_le(
}
}
-static void dccg401_disable_symclk32_le(
+void dccg401_disable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst)
{
@@ -531,7 +531,7 @@ static void dccg401_enable_dpstreamclk(struct dccg *dccg, int otg_inst, int dp_h
DPSTREAMCLK_ROOT_GATE_DISABLE, 1);
}
-static void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst)
+void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -574,7 +574,7 @@ static void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst)
}
}
-static void dccg401_set_dpstreamclk(
+void dccg401_set_dpstreamclk(
struct dccg *dccg,
enum streamclk_source src,
int otg_inst,
@@ -587,7 +587,7 @@ static void dccg401_set_dpstreamclk(
dccg401_enable_dpstreamclk(dccg, otg_inst, dp_hpo_inst);
}
-static void dccg401_set_dp_dto(
+void dccg401_set_dp_dto(
struct dccg *dccg,
const struct dp_dto_params *params)
{
@@ -727,7 +727,7 @@ void dccg401_init(struct dccg *dccg)
}
}
-static void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst)
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -763,7 +763,7 @@ static void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst)
}
}
-static void dccg401_set_ref_dscclk(struct dccg *dccg,
+void dccg401_set_ref_dscclk(struct dccg *dccg,
uint32_t dsc_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -798,7 +798,7 @@ static void dccg401_set_ref_dscclk(struct dccg *dccg,
}
}
-static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
+void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -834,7 +834,7 @@ static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
}
}
-static void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
+void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
index a196ce9e8127..55e8718aad22 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
@@ -193,11 +193,48 @@
void dccg401_init(struct dccg *dccg);
void dccg401_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
-
+void dccg401_get_dccg_ref_freq(struct dccg *dccg,
+ unsigned int xtalin_freq_inKhz,
+ unsigned int *dccg_ref_freq_inKhz);
+void dccg401_set_dpstreamclk(
+ struct dccg *dccg,
+ enum streamclk_source src,
+ int otg_inst,
+ int dp_hpo_inst);
+void dccg401_enable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ enum phyd32clk_clock_source phyd32clk);
+void dccg401_disable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst);
+void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst);
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst);
+void dccg401_set_ref_dscclk(struct dccg *dccg,
+ uint32_t dsc_inst);
void dccg401_set_src_sel(
struct dccg *dccg,
const struct dtbclk_dto_params *params);
-
+void dccg401_set_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div tmds_div,
+ enum pixel_rate_div unused);
+void dccg401_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ uint32_t *tmds_div,
+ uint32_t *dp_dto_int);
+void dccg401_set_dp_dto(
+ struct dccg *dccg,
+ const struct dp_dto_params *params);
+void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
+void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst);
+void dccg401_set_dtbclk_p_src(
+ struct dccg *dccg,
+ enum streamclk_source src,
+ uint32_t otg_inst);
struct dccg *dccg401_create(
struct dc_context *ctx,
const struct dccg_registers *regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index d199e4ed2e59..1130d7619b26 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -418,7 +418,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
dynamic_range_rgb = 1; /*limited range*/
break;
case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
@@ -430,6 +430,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
case COLOR_SPACE_APPCTRL:
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
+ default:
/* do nothing */
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index bf636b28e3e1..d37ecfdde4f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -63,11 +63,26 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
bool should_use_dmub_lock(struct dc_link *link)
{
+ /* ASIC doesn't support DMUB */
+ if (!link->ctx->dmub_srv)
+ return false;
+
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
return true;
if (link->replay_settings.replay_feature_enabled)
return true;
+ /* only use HW lock for PSR1 on single eDP */
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_1) {
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num;
+
+ dc_get_edp_links(link->dc, edp_links, &edp_num);
+
+ if (edp_num == 1)
+ return true;
+ }
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 88c75c243bf8..ff3b8244ba3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -418,6 +418,10 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->relock_delay_frame_cnt = 0;
if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
copy_settings_data->relock_delay_frame_cnt = 2;
+
+ copy_settings_data->power_down_phy_before_disable_stream =
+ link->psr_settings.power_down_phy_before_disable_stream;
+
copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index d241ee13b293..59a0961b49da 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -409,19 +409,6 @@ void dce110_compressor_destroy(struct compressor **compressor)
*compressor = NULL;
}
-void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y)
-{
- *max_x = FBC_MAX_X;
- *max_y = FBC_MAX_Y;
-
- /* if (m_smallLocalFrameBufferMemory == 1)
- * {
- * *max_x = FBC_MAX_X_SG;
- * *max_y = FBC_MAX_Y_SG;
- * }
- */
-}
-
static const struct compressor_funcs dce110_compressor_funcs = {
.power_up_fbc = dce110_compressor_power_up_fbc,
.enable_fbc = dce110_compressor_enable_fbc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
index 26c7335a1cbf..223c57941e92 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
@@ -75,7 +75,5 @@ void dce110_compressor_program_lpt_control(struct compressor *cp,
bool dce110_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
-void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c
index 1fdeef47e4dc..44b56490e152 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c
@@ -428,5 +428,6 @@ void dce60_hw_sequencer_construct(struct dc *dc)
dc->hwss.pipe_control_lock = dce60_pipe_control_lock;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
+ dc->hwss.clear_surface_dcc_and_tiling = dce100_reset_surface_dcc_and_tiling;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
index e5fb0e8333e4..e691a1cf3356 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
@@ -239,6 +239,7 @@ static const struct timing_generator_funcs dce60_tg_funcs = {
dce60_timing_generator_enable_advanced_request,
.configure_crc = dce60_configure_crc,
.get_crc = dce110_get_crc,
+ .is_two_pixels_per_container = dce110_is_two_pixels_per_container,
};
void dce60_timing_generator_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 88cf47a5ea75..baf663b661c8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -429,7 +429,9 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
struct dcn_otg_state s = {0};
int pix_clk = 0;
- optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+ if (tg->funcs->read_otg_state)
+ tg->funcs->read_otg_state(tg, &s);
+
pix_clk = dc->current_state->res_ctx.pipe_ctx[i].stream_res.pix_clk_params.requested_pix_clk_100hz / 10;
//only print if OTG master is enabled
@@ -495,7 +497,8 @@ static void dcn10_clear_otpc_underflow(struct dc *dc)
struct timing_generator *tg = pool->timing_generators[i];
struct dcn_otg_state s = {0};
- optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+ if (tg->funcs->read_otg_state)
+ tg->funcs->read_otg_state(tg, &s);
if (s.otg_enabled & 1)
tg->funcs->clear_optc_underflow(tg);
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
index d01a8b8f9595..22e66b375a7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
@@ -391,7 +391,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
break;
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
@@ -404,6 +404,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
case COLOR_SPACE_YCBCR709_BLACK:
+ default:
/* do nothing */
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.c
index 425b830b88d2..e93be7b6d9b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.c
@@ -47,7 +47,7 @@
enc1->base.ctx
-static void enc3_update_hdmi_info_packet(
+void enc3_update_hdmi_info_packet(
struct dcn10_stream_encoder *enc1,
uint32_t packet_index,
const struct dc_info_packet *info_packet)
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.h
index 06310973ded2..830ce7e47035 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.h
@@ -322,6 +322,10 @@ void enc3_dp_set_dsc_pps_info_packet(
struct stream_encoder *enc,
bool enable,
uint8_t *dsc_packed_pps,
- bool immediate_update);
+ bool immediate_update);
+void enc3_update_hdmi_info_packet(
+ struct dcn10_stream_encoder *enc1,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet);
#endif /* __DC_DIO_STREAM_ENCODER_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
index ea0c9a9d0bd6..9972911330b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
@@ -137,9 +137,9 @@ static const struct link_encoder_funcs dcn35_link_enc_funcs = {
.hw_init = dcn35_link_encoder_init,
.setup = dcn35_link_encoder_setup,
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
- .enable_dp_output = dcn31_link_encoder_enable_dp_output,
- .enable_dp_mst_output = dcn31_link_encoder_enable_dp_mst_output,
- .disable_output = dcn31_link_encoder_disable_output,
+ .enable_dp_output = dcn35_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn35_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn35_link_encoder_disable_output,
.dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
@@ -297,6 +297,50 @@ static void link_encoder_disable(struct dcn10_link_encoder *enc10)
REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
}
+void dcn35_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (!enc->ctx->dc->config.unify_link_enc_assignment)
+ dcn31_link_encoder_enable_dp_output(enc, link_settings, clock_source);
+ else {
+ DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine);
+ dcn20_link_encoder_enable_dp_output(enc, link_settings, clock_source);
+ }
+}
+
+void dcn35_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (!enc->ctx->dc->config.unify_link_enc_assignment)
+ dcn31_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
+ else {
+ DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine);
+ dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
+ }
+}
+
+void dcn35_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (!enc->ctx->dc->config.unify_link_enc_assignment)
+ dcn31_link_encoder_disable_output(enc, signal);
+ else {
+ DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine);
+ dcn10_link_encoder_disable_output(enc, signal);
+ }
+}
+
void dcn35_link_encoder_enable_dpia_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
index f9d4221f4b43..5712e6553fab 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
@@ -145,6 +145,29 @@ enum signal_type dcn35_get_dig_mode(struct link_encoder *enc);
void dcn35_link_encoder_setup(struct link_encoder *enc, enum signal_type signal);
/*
+ * Enable DP transmitter and its encoder.
+ */
+void dcn35_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/*
+ * Enable DP transmitter and its encoder in MST mode.
+ */
+void dcn35_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/*
+ * Disable transmitter and its encoder.
+ */
+void dcn35_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal);
+
+/*
* Enable DP transmitter and its encoder for dpia port.
*/
void dcn35_link_encoder_enable_dpia_output(
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
index 098c2a01a850..d5fa551dd3c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
@@ -60,7 +60,7 @@ static void enc401_dp_set_odm_combine(
}
/* setup stream encoder in dvi mode */
-static void enc401_stream_encoder_dvi_set_stream_attribute(
+void enc401_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
bool is_dual_link)
@@ -100,7 +100,7 @@ static void enc401_stream_encoder_dvi_set_stream_attribute(
}
/* setup stream encoder in hdmi mode */
-static void enc401_stream_encoder_hdmi_set_stream_attribute(
+void enc401_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
int actual_pix_clk_khz,
@@ -229,7 +229,7 @@ static void enc401_stream_encoder_hdmi_set_stream_attribute(
REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
}
-static void enc401_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container)
+void enc401_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -260,7 +260,7 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
-static void enc401_stream_encoder_dp_unblank(
+void enc401_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param)
@@ -376,7 +376,7 @@ static void enc401_stream_encoder_dp_unblank(
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
-static void enc401_read_state(struct stream_encoder *enc, struct enc_state *s)
+void enc401_read_state(struct stream_encoder *enc, struct enc_state *s)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -394,7 +394,7 @@ static void enc401_read_state(struct stream_encoder *enc, struct enc_state *s)
}
}
-static void enc401_stream_encoder_enable(
+void enc401_stream_encoder_enable(
struct stream_encoder *enc,
enum signal_type signal,
bool enable)
@@ -632,7 +632,7 @@ void enc401_stream_encoder_dp_set_stream_attribute(
break;
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
@@ -645,6 +645,7 @@ void enc401_stream_encoder_dp_set_stream_attribute(
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
case COLOR_SPACE_YCBCR709_BLACK:
+ default:
/* do nothing */
break;
}
@@ -704,7 +705,7 @@ void enc401_stream_encoder_dp_set_stream_attribute(
DP_SST_SDP_SPLITTING, enable_sdp_splitting);
}
-static void enc401_stream_encoder_map_to_link(
+void enc401_stream_encoder_map_to_link(
struct stream_encoder *enc,
uint32_t stream_enc_inst,
uint32_t link_enc_inst)
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h
index d751839598f8..d6b00cd246b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h
@@ -214,4 +214,27 @@ void enc401_stream_encoder_dp_set_stream_attribute(
enum dc_color_space output_color_space,
bool use_vsc_sdp_for_colorimetry,
uint32_t enable_sdp_splitting);
+void enc401_stream_encoder_dvi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link);
+void enc401_stream_encoder_dp_unblank(
+ struct dc_link *link,
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param);
+void enc401_stream_encoder_enable(
+ struct stream_encoder *enc,
+ enum signal_type signal,
+ bool enable);
+void enc401_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container);
+void enc401_stream_encoder_map_to_link(
+ struct stream_encoder *enc,
+ uint32_t stream_enc_inst,
+ uint32_t link_enc_inst);
+void enc401_read_state(struct stream_encoder *enc, struct enc_state *s);
+void enc401_stream_encoder_hdmi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio);
#endif /* __DC_DIO_STREAM_ENCODER_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index f81e5a4e1d6d..7b9c22c45453 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -291,6 +291,13 @@ bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, e
bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
/*
+ * ACPI Interfaces
+ */
+void dm_acpi_process_phy_transition_interlock(
+ const struct dc_context *ctx,
+ struct dm_process_phy_transition_init_params process_phy_transition_init_params);
+
+/*
* Debug and verification hooks
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index facf269c4326..bf63da266a18 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -275,4 +275,30 @@ enum dm_dmub_wait_type {
DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY,
};
+enum dm_acpi_transition_link_type {
+ hdmi_tmds,
+ hdmi_frl,
+ dp_8b_10b,
+ dp_128b_132b,
+ none
+};
+
+struct dm_process_phy_transition_init_params {
+ uint32_t phy_id;
+ uint8_t action;
+ uint32_t sym_clock_10khz;
+ enum signal_type signal;
+ enum dc_lane_count display_port_lanes_count;
+ enum dc_link_rate display_port_link_rate;
+ uint32_t transition_bitmask;
+ uint8_t hdmi_frl_num_lanes;
+};
+
+struct dm_process_phy_transition_input_params {
+ uint32_t phy_id;
+ uint32_t transition_id;
+ uint32_t phy_configuration;
+ uint32_t data_rate;
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index aac0a0ae2966..88789987bdbc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -178,82 +178,6 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
};
-void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
- double vtotal_avg)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- double vtotal_min, vtotal_max;
- double ratio, modulo, phase;
- uint32_t vblank_start;
- uint32_t v_total_mask_value = 0;
-
- dc_assert_fp_enabled();
-
- /* Compute VTOTAL_MIN and VTOTAL_MAX, so that
- * VOTAL_MAX - VTOTAL_MIN = 1
- */
- v_total_mask_value = 16;
- vtotal_min = dcn_bw_floor(vtotal_avg);
- vtotal_max = dcn_bw_ceil(vtotal_avg);
-
- /* Check that bottom VBLANK is at least 2 lines tall when running with
- * VTOTAL_MIN. Note that VTOTAL registers are defined as 'total number
- * of lines in a frame - 1'.
- */
- REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START,
- &vblank_start);
- ASSERT(vtotal_min >= vblank_start + 1);
-
- /* Special case where the average frame rate can be achieved
- * without using the DTO
- */
- if (vtotal_min == vtotal_max) {
- REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
-
- optc->funcs->set_vtotal_min_max(optc, 0, 0);
- REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
- REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
- REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
- OTG_V_TOTAL_MIN_SEL, 0,
- OTG_V_TOTAL_MAX_SEL, 0,
- OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
- return;
- }
-
- ratio = vtotal_max - vtotal_avg;
- modulo = 65536.0 * 65536.0 - 1.0; /* 2^32 - 1 */
- phase = ratio * modulo;
-
- /* Special cases where the DTO phase gets rounded to 0 or
- * to DTO modulo
- */
- if (phase <= 0 || phase >= modulo) {
- REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL,
- phase <= 0 ?
- (uint32_t)vtotal_max : (uint32_t)vtotal_min);
- REG_SET(OTG_V_TOTAL_MIN, 0, OTG_V_TOTAL_MIN, 0);
- REG_SET(OTG_V_TOTAL_MAX, 0, OTG_V_TOTAL_MAX, 0);
- REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, 0);
- REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, 0);
- REG_UPDATE_3(OTG_V_TOTAL_CONTROL,
- OTG_V_TOTAL_MIN_SEL, 0,
- OTG_V_TOTAL_MAX_SEL, 0,
- OTG_SET_V_TOTAL_MIN_MASK_EN, 0);
- return;
- }
- REG_UPDATE_6(OTG_V_TOTAL_CONTROL,
- OTG_V_TOTAL_MIN_SEL, 1,
- OTG_V_TOTAL_MAX_SEL, 1,
- OTG_SET_V_TOTAL_MIN_MASK_EN, 1,
- OTG_SET_V_TOTAL_MIN_MASK, v_total_mask_value,
- OTG_VTOTAL_MID_REPLACING_MIN_EN, 0,
- OTG_VTOTAL_MID_REPLACING_MAX_EN, 0);
- REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, (uint32_t)vtotal_min);
- optc->funcs->set_vtotal_min_max(optc, vtotal_min, vtotal_max);
- REG_SET(OTG_M_CONST_DTO0, 0, OTG_M_CONST_DTO_PHASE, (uint32_t)phase);
- REG_SET(OTG_M_CONST_DTO1, 0, OTG_M_CONST_DTO_MODULO, (uint32_t)modulo);
-}
-
void dcn30_fpu_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
index cab864095ce7..e3b6ad6a8784 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h
@@ -29,9 +29,6 @@
#include "core_types.h"
#include "dcn20/dcn20_optc.h"
-void optc3_fpu_set_vrr_m_const(struct timing_generator *optc,
- double vtotal_avg);
-
void dcn30_fpu_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index cee1b351e105..f1fe49401bc0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -281,10 +281,10 @@ static void CalculateDynamicMetadataParameters(
double DISPCLK,
double DCFClkDeepSleep,
double PixelClock,
- long HTotal,
- long VBlank,
- long DynamicMetadataTransmittedBytes,
- long DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int HTotal,
+ unsigned int VBlank,
+ unsigned int DynamicMetadataTransmittedBytes,
+ int DynamicMetadataLinesBeforeActiveRequired,
int InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
double *Tsetup,
@@ -3265,8 +3265,8 @@ static double CalculateWriteBackDelay(
static void CalculateDynamicMetadataParameters(int MaxInterDCNTileRepeaters, double DPPCLK, double DISPCLK,
- double DCFClkDeepSleep, double PixelClock, long HTotal, long VBlank, long DynamicMetadataTransmittedBytes,
- long DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP,
+ double DCFClkDeepSleep, double PixelClock, unsigned int HTotal, unsigned int VBlank, unsigned int DynamicMetadataTransmittedBytes,
+ int DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP,
double *Tsetup, double *Tdmbf, double *Tdmec, double *Tdmsks)
{
double TotalRepeaterDelayTime = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 21f637ae4add..5ed117e11aa2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -409,6 +409,9 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
}
+ if (dc->debug.force_odm_combine_4to1)
+ context->bw_ctx.dml.ip.odm_combine_4to1_supported = true;
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 6f490d8d7038..56dda686e299 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -626,6 +626,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* - Not TMZ surface
*/
if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) &&
+ !pipe->stream->hw_cursor_req &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 47d785204f29..92f0a099d089 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -159,7 +159,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dppclk_mhz = 1200.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
+ .dscclk_mhz = 400.0,
.dtbclk_mhz = 600.0,
},
},
@@ -367,6 +367,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
clock_limits[i].socclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dram_speed_mts = clock_limits[i].dram_speed_mts;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
clock_limits[i].dtbclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index d9e63c4fdd95..17d0b4923b0c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -401,6 +401,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
clock_limits[i].socclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dram_speed_mts = clock_limits[i].dram_speed_mts;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
clock_limits[i].dtbclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 412e75eb4704..12ff65b6a7e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -122,17 +122,6 @@ void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const stru
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param)
-{
- dml_print("DML_RQ_DLG_CALC: =====================================\n");
- dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
- dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
- print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_l);
- dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
- print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_c);
- dml_print("DML_RQ_DLG_CALC: =====================================\n");
-}
-
void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
index ebcd717744e5..2bc64c4081dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
@@ -35,7 +35,6 @@ void print__rq_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dp
void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing);
void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param);
void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param);
-void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param);
void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param);
void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 84a2de9a76d4..7ae9c0ba0c9e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -32,6 +32,7 @@
#define DML2_MAX_FMT_420_BUFFER_WIDTH 4096
#define TB_BORROWED_MAX 400
+#define DML_MAX_VSTARTUP_START 1023
// ---------------------------
// Declaration Begins
@@ -6210,6 +6211,7 @@ static dml_uint_t CalculateMaxVStartup(
dml_print("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
dml_print("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
#endif
+ max_vstartup_lines = (dml_uint_t) dml_min(max_vstartup_lines, DML_MAX_VSTARTUP_START);
return max_vstartup_lines;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
index dd3f43181a6e..0670e4dc4fd9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
@@ -38,6 +38,7 @@ enum dml_project_id {
dml_project_dcn35 = 3,
dml_project_dcn351 = 4,
dml_project_dcn401 = 5,
+ dml_project_dcn36 = 6,
};
enum dml_prefetch_modes {
dml_prefetch_support_uclk_fclk_and_stutter_if_possible = 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
index 1e56d995cd0e..930e86cdb88a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
@@ -232,7 +232,6 @@ void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *contex
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipe_ctx->plane_res.bw.dppclk_khz;
dml21_populate_mall_allocation_size(context, dml_ctx, pln_prog, pipe_ctx);
- memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[pipe_ctx->pipe_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation));
bool sub_vp_enabled = is_sub_vp_enabled(pipe_ctx->stream->ctx->dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index fb80ba9287b6..be54f0e696ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -124,6 +124,7 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0};
int num_pipes;
+ unsigned int dml_phantom_prog_idx;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
@@ -137,6 +138,9 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0;
context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0;
+ /* phantom's start after main planes */
+ dml_phantom_prog_idx = in_ctx->v21.mode_programming.programming->display_config.num_planes;
+
for (dml_prog_idx = 0; dml_prog_idx < DML2_MAX_PLANES; dml_prog_idx++) {
pln_prog = &in_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
@@ -162,6 +166,16 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
dml21_program_dc_pipe(in_ctx, context, dc_phantom_pipes[dc_pipe_index], pln_prog, stream_prog);
}
}
+
+ /* copy per plane mcache allocation */
+ memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_prog_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation));
+ if (pln_prog->phantom_plane.valid) {
+ memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_phantom_prog_idx],
+ &pln_prog->phantom_plane.mcache_allocation,
+ sizeof(struct dml2_mcache_surface_allocation));
+
+ dml_phantom_prog_idx++;
+ }
}
/* assign global clocks */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index d2d053f2354d..0dbf886d8926 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -14,11 +14,6 @@
struct dml2_instance;
-enum dml2_status {
- dml2_success = 0,
- dml2_error_generic = 1
-};
-
enum dml2_project_id {
dml2_project_invalid = 0,
dml2_project_dcn4x_stage1 = 1,
@@ -245,6 +240,7 @@ struct dml2_per_plane_programming {
struct {
bool valid;
struct dml2_plane_parameters descriptor;
+ struct dml2_mcache_surface_allocation mcache_allocation;
struct dml2_dchub_per_pipe_register_set *pipe_regs[DML2_MAX_PLANES];
} phantom_plane;
};
@@ -458,6 +454,10 @@ struct dml2_display_cfg_programming {
} plane_info[DML2_MAX_PLANES];
struct {
+ unsigned int total_num_dpps_required;
+ } dpp;
+
+ struct {
unsigned long long total_surface_size_in_mall_bytes;
unsigned int subviewport_lines_needed_in_mall[DML2_MAX_PLANES];
} mall;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index d68b4567e218..bb863c8c6b39 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -141,9 +141,8 @@ bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out)
core->clean_me_up.mode_lib.ip.subvp_fw_processing_delay_us = core_dcn4_ip_caps_base.subvp_pstate_allow_width_us;
core->clean_me_up.mode_lib.ip.subvp_swath_height_margin_lines = core_dcn4_ip_caps_base.subvp_swath_height_margin_lines;
} else {
- memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn4_ip_caps_base, sizeof(struct dml2_core_ip_params));
+ memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn4_ip_caps_base, sizeof(struct dml2_core_ip_params));
patch_ip_params_with_ip_caps(&core->clean_me_up.mode_lib.ip, in_out->ip_caps);
-
core->clean_me_up.mode_lib.ip.imall_supported = false;
}
@@ -254,7 +253,8 @@ static void expand_implict_subvp(const struct display_configuation_with_meta *di
static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_instance *core, const struct display_configuation_with_meta *display_cfg,
const struct dml2_display_cfg *svp_expanded_display_cfg, struct dml2_display_cfg_programming *programming, struct dml2_core_scratch *scratch)
{
- unsigned int stream_index, plane_index, pipe_offset, stream_already_populated_mask, main_plane_index;
+ unsigned int stream_index, plane_index, pipe_offset, stream_already_populated_mask, main_plane_index, mcache_index;
+ unsigned int total_main_mcaches_required = 0;
int total_pipe_regs_copied = 0;
int dml_internal_pipe_index = 0;
const struct dml2_plane_parameters *main_plane;
@@ -325,6 +325,13 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
+ memcpy(&programming->plane_programming[plane_index].mcache_allocation,
+ &display_cfg->stage2.mcache_allocations[plane_index],
+ sizeof(struct dml2_mcache_surface_allocation));
+ total_main_mcaches_required += programming->plane_programming[plane_index].mcache_allocation.num_mcaches_plane0 +
+ programming->plane_programming[plane_index].mcache_allocation.num_mcaches_plane1 -
+ (programming->plane_programming[plane_index].mcache_allocation.last_slice_sharing.plane0_plane1 ? 1 : 0);
+
for (pipe_offset = 0; pipe_offset < programming->plane_programming[plane_index].num_dpps_required; pipe_offset++) {
// Assign storage for this pipe's register values
programming->plane_programming[plane_index].pipe_regs[pipe_offset] = &programming->pipe_regs[total_pipe_regs_copied];
@@ -363,6 +370,22 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
memcpy(&programming->plane_programming[main_plane_index].phantom_plane.descriptor, phantom_plane, sizeof(struct dml2_plane_parameters));
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &programming->plane_programming[main_plane_index].svp_size_mall_bytes, dml_internal_pipe_index);
+
+ /* generate mcache allocation, phantoms use identical mcache configuration, but in the MALL set and unique mcache ID's beginning after all main ID's */
+ memcpy(&programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation,
+ &programming->plane_programming[main_plane_index].mcache_allocation,
+ sizeof(struct dml2_mcache_surface_allocation));
+ for (mcache_index = 0; mcache_index < programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.num_mcaches_plane0; mcache_index++) {
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_plane0[mcache_index] += total_main_mcaches_required;
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_mall_plane0[mcache_index] =
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_plane0[mcache_index];
+ }
+ for (mcache_index = 0; mcache_index < programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.num_mcaches_plane1; mcache_index++) {
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_plane1[mcache_index] += total_main_mcaches_required;
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_mall_plane1[mcache_index] =
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_plane1[mcache_index];
+ }
+
for (pipe_offset = 0; pipe_offset < programming->plane_programming[main_plane_index].num_dpps_required; pipe_offset++) {
// Assign storage for this pipe's register values
programming->plane_programming[main_plane_index].phantom_plane.pipe_regs[pipe_offset] = &programming->pipe_regs[total_pipe_regs_copied];
@@ -572,6 +595,10 @@ bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &in_out->programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
+ memcpy(&in_out->programming->plane_programming[plane_index].mcache_allocation,
+ &in_out->display_cfg->stage2.mcache_allocations[plane_index],
+ sizeof(struct dml2_mcache_surface_allocation));
+
for (pipe_offset = 0; pipe_offset < in_out->programming->plane_programming[plane_index].num_dpps_required; pipe_offset++) {
in_out->programming->plane_programming[plane_index].plane_descriptor = &in_out->programming->display_config.plane_descriptors[plane_index];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
index e62b2d3eeee6..a68bb001a346 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
@@ -9,7 +9,4 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out);
bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out);
bool core_dcn4_populate_informative(struct dml2_core_populate_informative_in_out *in_out);
bool core_dcn4_calculate_mcache_allocation(struct dml2_calculate_mcache_allocation_in_out *in_out);
-
-bool core_dcn4_unit_test(void);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 8ed49a9df378..4c504cb0e1c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -15,6 +15,7 @@
//#define DML_MODE_SUPPORT_USE_DPM_DRAM_BW
//#define DML_GLOBAL_PREFETCH_CHECK
#define ALLOW_SDPIF_RATE_LIMIT_PRE_CSTATE
+#define DML_MAX_VSTARTUP_START 1023
const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type)
{
@@ -2352,6 +2353,7 @@ static void calculate_mcache_row_bytes(
if (p->full_vp_height == 0 && p->full_vp_width == 0) {
*p->num_mcaches = 0;
*p->mcache_row_bytes = 0;
+ *p->mcache_row_bytes_per_channel = 0;
} else {
blk_bytes = dml_get_tile_block_size_bytes(p->tiling_mode);
@@ -2420,15 +2422,18 @@ static void calculate_mcache_row_bytes(
// If this mcache_row_bytes for the full viewport of the surface is less than or equal to mcache_bytes,
// then one mcache can be used for this request stream. If not, it is useful to know the width of the viewport that can be supported in the mcache_bytes.
- if (p->gpuvm_enable || !p->surf_vert) {
- *p->mcache_row_bytes = mvmpg_per_row_ub * meta_per_mvmpg_per_channel_ub;
+ if (p->gpuvm_enable || p->surf_vert) {
+ *p->mcache_row_bytes_per_channel = mvmpg_per_row_ub * meta_per_mvmpg_per_channel_ub;
+ *p->mcache_row_bytes = *p->mcache_row_bytes_per_channel * p->num_chans;
} else { // horizontal and gpuvm disable
*p->mcache_row_bytes = *p->meta_row_width_ub * p->blk_height * p->bytes_per_pixel / 256;
- *p->mcache_row_bytes = (unsigned int)math_ceil2((double)*p->mcache_row_bytes / p->num_chans, p->mcache_line_size_bytes);
+ if (p->mcache_line_size_bytes != 0)
+ *p->mcache_row_bytes_per_channel = (unsigned int)math_ceil2((double)*p->mcache_row_bytes / p->num_chans, p->mcache_line_size_bytes);
}
*p->dcc_dram_bw_pref_overhead_factor = 1 + math_max2(1.0 / 256.0, *p->mcache_row_bytes / p->full_swath_bytes); // dcc_dr_oh_pref
- *p->num_mcaches = (unsigned int)math_ceil2((double)*p->mcache_row_bytes / p->mcache_size_bytes, 1);
+ if (p->mcache_size_bytes != 0)
+ *p->num_mcaches = (unsigned int)math_ceil2((double)*p->mcache_row_bytes_per_channel / p->mcache_size_bytes, 1);
mvmpg_per_mcache = p->mcache_size_bytes / meta_per_mvmpg_per_channel_ub;
*p->mvmpg_per_mcache_lb = (unsigned int)math_floor2(mvmpg_per_mcache, 1);
@@ -2449,6 +2454,7 @@ static void calculate_mcache_row_bytes(
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes);
+ dml2_printf("DML::%s: mcache_row_bytes_per_channel = %u\n", __func__, *p->mcache_row_bytes_per_channel);
dml2_printf("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
#endif
DML2_ASSERT(*p->num_mcaches > 0);
@@ -2465,11 +2471,13 @@ static void calculate_mcache_setting(
*p->num_mcaches_l = 0;
*p->mcache_row_bytes_l = 0;
+ *p->mcache_row_bytes_per_channel_l = 0;
*p->dcc_dram_bw_nom_overhead_factor_l = 1.0;
*p->dcc_dram_bw_pref_overhead_factor_l = 1.0;
*p->num_mcaches_c = 0;
*p->mcache_row_bytes_c = 0;
+ *p->mcache_row_bytes_per_channel_c = 0;
*p->dcc_dram_bw_nom_overhead_factor_c = 1.0;
*p->dcc_dram_bw_pref_overhead_factor_c = 1.0;
@@ -2505,6 +2513,7 @@ static void calculate_mcache_setting(
// output
l->l_p.num_mcaches = p->num_mcaches_l;
l->l_p.mcache_row_bytes = p->mcache_row_bytes_l;
+ l->l_p.mcache_row_bytes_per_channel = p->mcache_row_bytes_per_channel_l;
l->l_p.dcc_dram_bw_nom_overhead_factor = p->dcc_dram_bw_nom_overhead_factor_l;
l->l_p.dcc_dram_bw_pref_overhead_factor = p->dcc_dram_bw_pref_overhead_factor_l;
l->l_p.mvmpg_width = &l->mvmpg_width_l;
@@ -2514,7 +2523,7 @@ static void calculate_mcache_setting(
l->l_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_l;
calculate_mcache_row_bytes(scratch, &l->l_p);
- dml2_assert(*p->num_mcaches_l > 0);
+ DML2_ASSERT(*p->num_mcaches_l > 0);
if (l->is_dual_plane) {
l->c_p.num_chans = p->num_chans;
@@ -2540,6 +2549,7 @@ static void calculate_mcache_setting(
// output
l->c_p.num_mcaches = p->num_mcaches_c;
l->c_p.mcache_row_bytes = p->mcache_row_bytes_c;
+ l->c_p.mcache_row_bytes_per_channel = p->mcache_row_bytes_per_channel_c;
l->c_p.dcc_dram_bw_nom_overhead_factor = p->dcc_dram_bw_nom_overhead_factor_c;
l->c_p.dcc_dram_bw_pref_overhead_factor = p->dcc_dram_bw_pref_overhead_factor_c;
l->c_p.mvmpg_width = &l->mvmpg_width_c;
@@ -2549,12 +2559,12 @@ static void calculate_mcache_setting(
l->c_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_c;
calculate_mcache_row_bytes(scratch, &l->c_p);
- dml2_assert(*p->num_mcaches_c > 0);
+ DML2_ASSERT(*p->num_mcaches_c > 0);
}
// Sharing for iMALL access
- l->mcache_remainder_l = *p->mcache_row_bytes_l % p->mcache_size_bytes;
- l->mcache_remainder_c = *p->mcache_row_bytes_c % p->mcache_size_bytes;
+ l->mcache_remainder_l = *p->mcache_row_bytes_per_channel_l % p->mcache_size_bytes;
+ l->mcache_remainder_c = *p->mcache_row_bytes_per_channel_c % p->mcache_size_bytes;
l->mvmpg_access_width_l = p->surf_vert ? l->mvmpg_height_l : l->mvmpg_width_l;
l->mvmpg_access_width_c = p->surf_vert ? l->mvmpg_height_c : l->mvmpg_width_c;
@@ -2577,11 +2587,14 @@ static void calculate_mcache_setting(
if (l->is_dual_plane) {
l->avg_mcache_element_size_c = l->meta_row_width_c / *p->num_mcaches_c;
- if (!p->imall_enable || (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c)) {
- l->lc_comb_last_mcache_size = (unsigned int)((l->mcache_remainder_l * (*p->mall_comb_mcache_l ? 2 : 1) * l->luma_time_factor) +
- (l->mcache_remainder_c * (*p->mall_comb_mcache_c ? 2 : 1)));
+ /* if either remainder is 0, then mcache sharing is not needed or not possible due to full utilization */
+ if (l->mcache_remainder_l && l->mcache_remainder_c) {
+ if (!p->imall_enable || (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c)) {
+ l->lc_comb_last_mcache_size = (unsigned int)((l->mcache_remainder_l * (*p->mall_comb_mcache_l ? 2 : 1) * l->luma_time_factor) +
+ (l->mcache_remainder_c * (*p->mall_comb_mcache_c ? 2 : 1)));
+ }
+ *p->lc_comb_mcache = (l->lc_comb_last_mcache_size <= p->mcache_size_bytes) && (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c);
}
- *p->lc_comb_mcache = (l->lc_comb_last_mcache_size <= p->mcache_size_bytes) && (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c);
}
#ifdef __DML_VBA_DEBUG__
@@ -3397,7 +3410,7 @@ static void calculate_cursor_req_attributes(
} else {
if (cursor_width > 0) {
dml2_printf("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp);
- dml2_assert(0);
+ DML2_ASSERT(0);
}
}
@@ -3440,7 +3453,7 @@ static void calculate_cursor_urgent_burst_factor(
CursorBufferSizeInTime = LinesInCursorBuffer * LineTime;
if (CursorBufferSizeInTime - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
- *UrgentBurstFactorCursor = 0;
+ *UrgentBurstFactorCursor = 1;
} else {
*NotEnoughUrgentLatencyHiding = 0;
*UrgentBurstFactorCursor = CursorBufferSizeInTime / (CursorBufferSizeInTime - UrgentLatency);
@@ -3503,7 +3516,7 @@ static void CalculateUrgentBurstFactor(
DETBufferSizeInTimeLuma = math_floor2(LinesInDETLuma, SwathHeightY) * LineTime / VRatio;
if (DETBufferSizeInTimeLuma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
- *UrgentBurstFactorLuma = 0;
+ *UrgentBurstFactorLuma = 1;
} else {
*UrgentBurstFactorLuma = DETBufferSizeInTimeLuma / (DETBufferSizeInTimeLuma - UrgentLatency);
}
@@ -3514,7 +3527,7 @@ static void CalculateUrgentBurstFactor(
DETBufferSizeInTimeChroma = math_floor2(LinesInDETChroma, SwathHeightC) * LineTime / VRatioC;
if (DETBufferSizeInTimeChroma - UrgentLatency <= 0) {
*NotEnoughUrgentLatencyHiding = 1;
- *UrgentBurstFactorChroma = 0;
+ *UrgentBurstFactorChroma = 1;
} else {
*UrgentBurstFactorChroma = DETBufferSizeInTimeChroma / (DETBufferSizeInTimeChroma - UrgentLatency);
}
@@ -3709,13 +3722,12 @@ static unsigned int CalculateMaxVStartup(
double line_time_us = (double)timing->h_total / ((double)timing->pixel_clock_khz / 1000);
unsigned int vblank_actual = timing->v_total - timing->v_active;
unsigned int vblank_nom_default_in_line = (unsigned int)math_floor2((double)vblank_nom_default_us / line_time_us, 1.0);
- unsigned int vblank_nom_input = (unsigned int)math_min2(timing->vblank_nom, vblank_nom_default_in_line);
- unsigned int vblank_avail = (vblank_nom_input == 0) ? vblank_nom_default_in_line : vblank_nom_input;
+ unsigned int vblank_avail = (timing->vblank_nom == 0) ? vblank_nom_default_in_line : (unsigned int)timing->vblank_nom;
vblank_size = (unsigned int)math_min2(vblank_actual, vblank_avail);
if (timing->interlaced && !ptoi_supported)
- max_vstartup_lines = (unsigned int)(math_floor2(vblank_size / 2.0, 1.0));
+ max_vstartup_lines = (unsigned int)(math_floor2((vblank_size - 1) / 2.0, 1.0));
else
max_vstartup_lines = vblank_size - (unsigned int)math_max2(1.0, math_ceil2(write_back_delay_us / line_time_us, 1.0));
#ifdef __DML_VBA_DEBUG__
@@ -3726,6 +3738,7 @@ static unsigned int CalculateMaxVStartup(
dml2_printf("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
dml2_printf("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
#endif
+ max_vstartup_lines = (unsigned int)math_min2(max_vstartup_lines, DML_MAX_VSTARTUP_START);
return max_vstartup_lines;
}
@@ -3894,8 +3907,8 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2;
RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2;
- p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
- p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
+ p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
+ p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
}
if (p->SwathHeightC[k] == 0)
@@ -4910,6 +4923,7 @@ static double get_urgent_bandwidth_required(
double ReadBandwidthChroma[],
double PrefetchBandwidthLuma[],
double PrefetchBandwidthChroma[],
+ double PrefetchBandwidthOto[],
double excess_vactive_fill_bw_l[],
double excess_vactive_fill_bw_c[],
double cursor_bw[],
@@ -4973,8 +4987,9 @@ static double get_urgent_bandwidth_required(
l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k];
l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur;
l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
+ l->flip_and_prefetch_bw_oto = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthOto[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k];
- surface_required_bw[k] = math_max4(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw);
+ surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_oto);
/* export peak required bandwidth for the surface */
surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]);
@@ -5045,7 +5060,7 @@ static void CalculateExtraLatency(
double HostVMInefficiencyFactorPrefetch,
unsigned int HostVMMinPageSize,
enum dml2_qos_param_type qos_type,
- bool max_oustanding_when_urgent_expected,
+ bool max_outstanding_when_urgent_expected,
unsigned int max_outstanding_requests,
unsigned int request_size_bytes_luma[],
unsigned int request_size_bytes_chroma[],
@@ -5093,7 +5108,7 @@ static void CalculateExtraLatency(
if (qos_type == dml2_qos_param_type_dcn4x) {
*ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK;
*ExtraLatency = *ExtraLatency_sr;
- if (max_oustanding_when_urgent_expected)
+ if (max_outstanding_when_urgent_expected)
*ExtraLatency = *ExtraLatency + (ROBBufferSizeInKByte * 1024 - max_outstanding_requests * max_request_size_bytes) / ReturnBW;
} else {
*ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK + RoundTripPingLatencyCycles / FabricClock + ReorderingBytes / ReturnBW;
@@ -5108,7 +5123,7 @@ static void CalculateExtraLatency(
dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type);
dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips);
- dml2_printf("DML::%s: max_oustanding_when_urgent_expected=%u\n", __func__, max_oustanding_when_urgent_expected);
+ dml2_printf("DML::%s: max_outstanding_when_urgent_expected=%u\n", __func__, max_outstanding_when_urgent_expected);
dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock);
dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
@@ -5172,6 +5187,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->Tsw_est3 = 0.0;
s->cursor_prefetch_bytes = 0;
*p->prefetch_cursor_bw = 0;
+ *p->RequiredPrefetchBWOTO = 0.0;
dcc_mrq_enable = (p->dcc_enable && p->mrq_present);
@@ -5385,6 +5401,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw_oto += (p->swath_width_chroma_ub * p->myPipe->BytePerPixelC) / s->LineTime;
}
+ /* oto prefetch bw should be always be less than total vactive bw */
+ //DML2_ASSERT(s->prefetch_bw_oto < s->per_pipe_vactive_sw_bw * p->myPipe->DPPPerSurface);
+
s->prefetch_bw_oto = math_max2(s->per_pipe_vactive_sw_bw, s->prefetch_bw_oto) * p->mall_prefetch_sdp_overhead_factor;
s->prefetch_bw_oto = math_min2(s->prefetch_bw_oto, *p->prefetch_sw_bytes/(s->min_Lsw_oto*s->LineTime));
@@ -5395,6 +5414,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
+ /* oto bw needs to be outputted even if the oto schedule isn't being used to avoid ms/mp mismatch.
+ * mp will fail if ms decides to use equ schedule and mp decides to use oto schedule
+ * and the required bandwidth increases when going from ms to mp
+ */
+ *p->RequiredPrefetchBWOTO = s->prefetch_bw_oto;
+
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
dml2_printf("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
@@ -5787,7 +5812,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes);
dml2_printf("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw);
#endif
- dml2_assert(*p->dst_y_prefetch < 64);
+ DML2_ASSERT(*p->dst_y_prefetch < 64);
unsigned int min_lsw_required = (unsigned int)math_max2(2, p->tdlut_drain_time / s->LineTime);
if (s->LinesToRequestPrefetchPixelData >= min_lsw_required && s->prefetch_bw_equ > 0) {
@@ -5980,7 +6005,7 @@ static unsigned int find_max_impact_plane(unsigned int this_plane_idx, unsigned
}
}
if (max_idx <= 0) {
- dml2_assert(max_idx >= 0);
+ DML2_ASSERT(max_idx >= 0);
max_idx = this_plane_idx;
}
@@ -6155,6 +6180,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
+ l->zero_array, //PrefetchBWOTO
l->zero_array,
l->zero_array,
l->zero_array,
@@ -6191,6 +6217,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
+ l->zero_array, //PrefetchBWOTO
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6227,6 +6254,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
+ p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6263,6 +6291,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
+ p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6299,6 +6328,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
+ p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6322,7 +6352,7 @@ static void calculate_peak_bandwidth_required(
dml2_printf("DML::%s: urg_bandwidth_required_qual[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
dml2_printf("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->non_urg_bandwidth_required[m][n]);
#endif
- dml2_assert(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]);
+ DML2_ASSERT(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]);
}
}
}
@@ -6454,7 +6484,7 @@ static void calculate_immediate_flip_bandwidth_support(
dml2_printf("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]);
dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
#endif
- dml2_assert(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]);
+ DML2_ASSERT(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]);
}
*frac_urg_bandwidth_flip = (frac_urg_bw_flip_sdp > frac_urg_bw_flip_dram) ? frac_urg_bw_flip_sdp : frac_urg_bw_flip_dram;
@@ -6568,7 +6598,7 @@ static void CalculateFlipSchedule(
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: min_row_time = %f\n", __func__, l->min_row_time);
#endif
- dml2_assert(l->min_row_time > 0);
+ DML2_ASSERT(l->min_row_time > 0);
if (use_lb_flip_bw) {
// For mode check, calculation the flip bw requirement with worst case flip time
@@ -7144,7 +7174,8 @@ static unsigned int get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, c
}
}
- dml2_assert(clk_entry_found);
+ if (!clk_entry_found)
+ DML2_ASSERT(clk_entry_found);
#if defined(__DML_VBA_DEBUG__)
dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
dml2_printf("DML::%s: index = %d\n", __func__, i);
@@ -8753,11 +8784,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_mcache_setting_params->num_mcaches_l = &mode_lib->ms.num_mcaches_l[k];
calculate_mcache_setting_params->mcache_row_bytes_l = &mode_lib->ms.mcache_row_bytes_l[k];
+ calculate_mcache_setting_params->mcache_row_bytes_per_channel_l = &mode_lib->ms.mcache_row_bytes_per_channel_l[k];
calculate_mcache_setting_params->mcache_offsets_l = mode_lib->ms.mcache_offsets_l[k];
calculate_mcache_setting_params->mcache_shift_granularity_l = &mode_lib->ms.mcache_shift_granularity_l[k];
calculate_mcache_setting_params->num_mcaches_c = &mode_lib->ms.num_mcaches_c[k];
calculate_mcache_setting_params->mcache_row_bytes_c = &mode_lib->ms.mcache_row_bytes_c[k];
+ calculate_mcache_setting_params->mcache_row_bytes_per_channel_c = &mode_lib->ms.mcache_row_bytes_per_channel_c[k];
calculate_mcache_setting_params->mcache_offsets_c = mode_lib->ms.mcache_offsets_c[k];
calculate_mcache_setting_params->mcache_shift_granularity_c = &mode_lib->ms.mcache_shift_granularity_c[k];
@@ -9061,6 +9094,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k];
CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
@@ -9205,6 +9239,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
@@ -9371,6 +9406,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
@@ -10408,13 +10444,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- dml2_assert(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
- dml2_assert(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
+ DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
+ DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 2 ||
cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
if (cfg_support_info->stream_support_info[stream_index].odms_used > 1)
- dml2_assert(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
+ DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
switch (cfg_support_info->stream_support_info[stream_index].odms_used) {
case (4):
@@ -10440,7 +10476,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used;
mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0;
- dml2_assert(mode_lib->mp.Dppclk[k] > 0);
+ DML2_ASSERT(mode_lib->mp.Dppclk[k] > 0);
}
for (k = 0; k < s->num_active_planes; ++k) {
@@ -10452,14 +10488,14 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0;
mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0;
- dml2_assert(mode_lib->mp.Dcfclk > 0);
- dml2_assert(mode_lib->mp.FabricClock > 0);
- dml2_assert(mode_lib->mp.dram_bw_mbps > 0);
- dml2_assert(mode_lib->mp.uclk_freq_mhz > 0);
- dml2_assert(mode_lib->mp.GlobalDPPCLK > 0);
- dml2_assert(mode_lib->mp.Dispclk > 0);
- dml2_assert(mode_lib->mp.DCFCLKDeepSleep > 0);
- dml2_assert(s->SOCCLK > 0);
+ DML2_ASSERT(mode_lib->mp.Dcfclk > 0);
+ DML2_ASSERT(mode_lib->mp.FabricClock > 0);
+ DML2_ASSERT(mode_lib->mp.dram_bw_mbps > 0);
+ DML2_ASSERT(mode_lib->mp.uclk_freq_mhz > 0);
+ DML2_ASSERT(mode_lib->mp.GlobalDPPCLK > 0);
+ DML2_ASSERT(mode_lib->mp.Dispclk > 0);
+ DML2_ASSERT(mode_lib->mp.DCFCLKDeepSleep > 0);
+ DML2_ASSERT(s->SOCCLK > 0);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
@@ -10847,11 +10883,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_mcache_setting_params->num_mcaches_l = &mode_lib->mp.num_mcaches_l[k];
calculate_mcache_setting_params->mcache_row_bytes_l = &mode_lib->mp.mcache_row_bytes_l[k];
+ calculate_mcache_setting_params->mcache_row_bytes_per_channel_l = &mode_lib->mp.mcache_row_bytes_per_channel_l[k];
calculate_mcache_setting_params->mcache_offsets_l = mode_lib->mp.mcache_offsets_l[k];
calculate_mcache_setting_params->mcache_shift_granularity_l = &mode_lib->mp.mcache_shift_granularity_l[k];
calculate_mcache_setting_params->num_mcaches_c = &mode_lib->mp.num_mcaches_c[k];
calculate_mcache_setting_params->mcache_row_bytes_c = &mode_lib->mp.mcache_row_bytes_c[k];
+ calculate_mcache_setting_params->mcache_row_bytes_per_channel_c = &mode_lib->mp.mcache_row_bytes_per_channel_c[k];
calculate_mcache_setting_params->mcache_offsets_c = mode_lib->mp.mcache_offsets_c[k];
calculate_mcache_setting_params->mcache_shift_granularity_c = &mode_lib->mp.mcache_shift_granularity_c[k];
@@ -11287,6 +11325,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->mp.VRatioPrefetchC[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &s->dummy_single_array[0][k];
CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->mp.NotEnoughTimeForDynamicMetadata[k];
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->mp.Tno_bw[k];
CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->mp.Tno_bw_flip[k];
@@ -11429,6 +11468,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0];
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->mp.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->mp.cursor_bw;
@@ -11568,6 +11608,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->meta_row_bw = mode_lib->mp.meta_row_bw;
calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->mp.prefetch_cursor_bw;
calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->mp.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0];
calculate_peak_bandwidth_params->flip_bw = mode_lib->mp.final_flip_bw;
calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->mp.UrgentBurstFactorLuma;
calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->mp.UrgentBurstFactorChroma;
@@ -12373,7 +12414,7 @@ static void rq_dlg_get_dlg_reg(
dml2_printf("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
l->plane_idx = dml_get_plane_idx(mode_lib, pipe_idx);
- dml2_assert(l->plane_idx < DML2_MAX_PLANES);
+ DML2_ASSERT(l->plane_idx < DML2_MAX_PLANES);
l->source_format = dml2_444_8;
l->odm_mode = dml2_odm_mode_bypass;
@@ -13108,8 +13149,11 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.watermarks.temp_read_or_ppt_watermark_us = dml_get_wm_temp_read_or_ppt(mode_lib);
out->informative.mall.total_surface_size_in_mall_bytes = 0;
- for (k = 0; k < out->display_config.num_planes; ++k)
+ out->informative.dpp.total_num_dpps_required = 0;
+ for (k = 0; k < out->display_config.num_planes; ++k) {
out->informative.mall.total_surface_size_in_mall_bytes += mode_lib->mp.SurfaceSizeInTheMALL[k];
+ out->informative.dpp.total_num_dpps_required += mode_lib->mp.NoOfDPP[k];
+ }
out->informative.qos.min_return_latency_in_dcfclk = mode_lib->mp.min_return_latency_in_dcfclk;
out->informative.qos.urgent_latency_us = dml_get_urgent_latency(mode_lib);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
deleted file mode 100644
index 8f3c1c0b1cc1..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
+++ /dev/null
@@ -1,12413 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-
-#include "dml2_internal_shared_types.h"
-#include "dml2_core_shared.h"
-#include "dml2_debug.h"
-#include "lib_float_math.h"
-
-double dml2_core_shared_div_rem(double dividend, unsigned int divisor, unsigned int *remainder)
-{
- *remainder = ((dividend / divisor) - (int)(dividend / divisor) > 0);
- return dividend / divisor;
-
-}
-
-/*
- * START OF STATIC HELPERS
- * These static methods are baseline implemenations from DCN4. These should NEVER
- * be modified when developing new DCNs. New DCN code should replace the static helpers
- * using the function pointer pattern.
- */
-
-static void dml2_print_dml_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only);
-static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg);
-static unsigned int dml_round_to_multiple(unsigned int num, unsigned int multiple, bool up);
-static unsigned int dml_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info);
-static void dml_calc_pipe_plane_mapping(const struct core_display_cfg_support_info *cfg_support_info, unsigned int *pipe_plane);
-static bool dml_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg);
-static bool dml_get_is_phantom_pipe(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int pipe_idx);
-static void CalculateMaxDETAndMinCompressedBufferSize(unsigned int ConfigReturnBufferSizeInKByte,
- unsigned int ConfigReturnBufferSegmentSizeInKByte,
- unsigned int ROBBufferSizeInKByte,
- unsigned int MaxNumDPP,
- unsigned int nomDETInKByteOverrideEnable, // VBA_DELTA, allow DV to override default DET size
- unsigned int nomDETInKByteOverrideValue, // VBA_DELTA
- bool is_mrq_present,
-
- // Output
- unsigned int *MaxTotalDETInKByte,
- unsigned int *nomDETInKByte,
- unsigned int *MinCompressedBufferSizeInKByte);
- static void PixelClockAdjustmentForProgressiveToInterlaceUnit(const struct dml2_display_cfg *display_cfg, bool ptoi_supported, double *PixelClockBackEnd);
-static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode);
-static bool dml_is_vertical_rotation(enum dml2_rotation_angle Scan);
-static int unsigned dml_get_gfx_version(enum dml2_swizzle_mode sw_mode);
-static void CalculateBytePerPixelAndBlockSizes(enum dml2_source_format_class SourcePixelFormat,
- enum dml2_swizzle_mode SurfaceTiling,
- unsigned int pitch_y,
- unsigned int pitch_c,
-
- // Output
- unsigned int *BytePerPixelY,
- unsigned int *BytePerPixelC,
- double *BytePerPixelDETY,
- double *BytePerPixelDETC,
- unsigned int *BlockHeight256BytesY,
- unsigned int *BlockHeight256BytesC,
- unsigned int *BlockWidth256BytesY,
- unsigned int *BlockWidth256BytesC,
- unsigned int *MacroTileHeightY,
- unsigned int *MacroTileHeightC,
- unsigned int *MacroTileWidthY,
- unsigned int *MacroTileWidthC,
- bool *surf_linear128_l,
- bool *surf_linear128_c);
-static void CalculateSinglePipeDPPCLKAndSCLThroughput(
- double HRatio,
- double HRatioChroma,
- double VRatio,
- double VRatioChroma,
- double MaxDCHUBToPSCLThroughput,
- double MaxPSCLToLBThroughput,
- double PixelClock,
- enum dml2_source_format_class SourcePixelFormat,
- unsigned int HTaps,
- unsigned int HTapsChroma,
- unsigned int VTaps,
- unsigned int VTapsChroma,
-
- // Output
- double *PSCL_THROUGHPUT,
- double *PSCL_THROUGHPUT_CHROMA,
- double *DPPCLKUsingSingleDPP);
-static void CalculateSwathWidth(
- const struct dml2_display_cfg *display_cfg,
- bool ForceSingleDPP,
- unsigned int NumberOfActiveSurfaces,
- enum dml2_odm_mode ODMMode[],
- unsigned int BytePerPixY[],
- unsigned int BytePerPixC[],
- unsigned int Read256BytesBlockHeightY[],
- unsigned int Read256BytesBlockHeightC[],
- unsigned int Read256BytesBlockWidthY[],
- unsigned int Read256BytesBlockWidthC[],
- bool surf_linear128_l[],
- bool surf_linear128_c[],
- unsigned int DPPPerSurface[],
-
- // Output
- unsigned int req_per_swath_ub_l[],
- unsigned int req_per_swath_ub_c[],
- unsigned int SwathWidthSingleDPPY[],
- unsigned int SwathWidthSingleDPPC[],
- unsigned int SwathWidthY[], // per-pipe
- unsigned int SwathWidthC[], // per-pipe
- unsigned int MaximumSwathHeightY[],
- unsigned int MaximumSwathHeightC[],
- unsigned int swath_width_luma_ub[], // per-pipe
- unsigned int swath_width_chroma_ub[]); // per-pipe
-static bool UnboundedRequest(bool unb_req_force_en, bool unb_req_force_val, unsigned int TotalNumberOfActiveDPP, bool NoChromaOrLinear);
-static void CalculateDETBufferSize(struct dml2_core_shared_calculate_det_buffer_size_params *p);
-static double CalculateRequiredDispclk(enum dml2_odm_mode ODMMode, double PixelClock);
-static double TruncToValidBPP(
- struct dml2_core_shared_TruncToValidBPP_locals *l,
- double LinkBitRate,
- unsigned int Lanes,
- unsigned int HTotal,
- unsigned int HActive,
- double PixelClock,
- double DesiredBPP,
- bool DSCEnable,
- enum dml2_output_encoder_class Output,
- enum dml2_output_format_class Format,
- unsigned int DSCInputBitPerComponent,
- unsigned int DSCSlices,
- unsigned int AudioRate,
- unsigned int AudioLayout,
- enum dml2_odm_mode ODMModeNoDSC,
- enum dml2_odm_mode ODMModeDSC,
-
- // Output
- unsigned int *RequiredSlots);
-static unsigned int dscceComputeDelay(
- unsigned int bpc,
- double BPP,
- unsigned int sliceWidth,
- unsigned int numSlices,
- enum dml2_output_format_class pixelFormat,
- enum dml2_output_encoder_class Output);
-static unsigned int dscComputeDelay(enum dml2_output_format_class pixelFormat, enum dml2_output_encoder_class Output);
-static unsigned int CalculateHostVMDynamicLevels(bool GPUVMEnable, bool HostVMEnable, unsigned int HostVMMinPageSize, unsigned int HostVMMaxNonCachedPageTableLevels);
-static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_and_row_bytes_params *p);
-static unsigned int CalculatePrefetchSourceLines(
- double VRatio,
- unsigned int VTaps,
- bool Interlace,
- bool ProgressiveToInterlaceUnitInOPP,
- unsigned int SwathHeight,
- enum dml2_rotation_angle RotationAngle,
- bool mirrored,
- bool ViewportStationary,
- unsigned int SwathWidth,
- unsigned int ViewportHeight,
- unsigned int ViewportXStart,
- unsigned int ViewportYStart,
-
- // Output
- unsigned int *VInitPreFill,
- unsigned int *MaxNumSwath);
-static void CalculateRowBandwidth(
- bool GPUVMEnable,
- bool use_one_row_for_frame,
- enum dml2_source_format_class SourcePixelFormat,
- double VRatio,
- double VRatioChroma,
- bool DCCEnable,
- double LineTime,
- unsigned int PixelPTEBytesPerRowLuma,
- unsigned int PixelPTEBytesPerRowChroma,
- unsigned int dpte_row_height_luma,
- unsigned int dpte_row_height_chroma,
-
- bool mrq_present,
- unsigned int meta_row_bytes_per_row_ub_l,
- unsigned int meta_row_bytes_per_row_ub_c,
- unsigned int meta_row_height_luma,
- unsigned int meta_row_height_chroma,
-
- // Output
- double *dpte_row_bw,
- double *meta_row_bw);
-static void CalculateMALLUseForStaticScreen(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MALLAllocatedForDCN,
- unsigned int SurfaceSizeInMALL[],
- bool one_row_per_frame_fits_in_buffer[],
-
- // Output
- bool is_using_mall_for_ss[]);
-static void CalculateDCCConfiguration(
- bool DCCEnabled,
- bool DCCProgrammingAssumesScanDirectionUnknown,
- enum dml2_source_format_class SourcePixelFormat,
- unsigned int SurfaceWidthLuma,
- unsigned int SurfaceWidthChroma,
- unsigned int SurfaceHeightLuma,
- unsigned int SurfaceHeightChroma,
- unsigned int nomDETInKByte,
- unsigned int RequestHeight256ByteLuma,
- unsigned int RequestHeight256ByteChroma,
- enum dml2_swizzle_mode TilingFormat,
- unsigned int BytePerPixelY,
- unsigned int BytePerPixelC,
- double BytePerPixelDETY,
- double BytePerPixelDETC,
- enum dml2_rotation_angle RotationAngle,
-
- // Output
- enum dml2_core_internal_request_type *RequestLuma,
- enum dml2_core_internal_request_type *RequestChroma,
- unsigned int *MaxUncompressedBlockLuma,
- unsigned int *MaxUncompressedBlockChroma,
- unsigned int *MaxCompressedBlockLuma,
- unsigned int *MaxCompressedBlockChroma,
- unsigned int *IndependentBlockLuma,
- unsigned int *IndependentBlockChroma);
-static void calculate_mcache_row_bytes(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_calculate_mcache_row_bytes_params *p);
-static void calculate_mcache_setting(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_calculate_mcache_setting_params *p);
-static void calculate_mall_bw_overhead_factor(
- double mall_prefetch_sdp_overhead_factor[],
- double mall_prefetch_dram_overhead_factor[],
-
- // input
- const struct dml2_display_cfg *display_cfg,
- unsigned int num_active_planes);
-static double dml_get_return_bandwidth_available(
- const struct dml2_soc_bb *soc,
- enum dml2_core_internal_soc_state_type state_type,
- enum dml2_core_internal_bw_type bw_type,
- bool is_avg_bw,
- bool is_hvm_en,
- bool is_hvm_only,
- double dcflk_mhz,
- double fclk_mhz,
- double dram_bw_mbps);
-static void calculate_bandwidth_available(
- double avg_bandwidth_available_min[dml2_core_internal_soc_state_max],
- double avg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available_min[dml2_core_internal_soc_state_max], // min between SDP and DRAM
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_max],
- double urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_max],
-
- const struct dml2_soc_bb *soc,
- bool HostVMEnable,
- double dcfclk_mhz,
- double fclk_mhz,
- double dram_bw_mbps);
-static void calculate_avg_bandwidth_required(
- double avg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
-
- // input
- const struct dml2_display_cfg *display_cfg,
- unsigned int num_active_planes,
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double cursor_bw[],
- double dcc_dram_bw_nom_overhead_factor_p0[],
- double dcc_dram_bw_nom_overhead_factor_p1[],
- double mall_prefetch_dram_overhead_factor[],
- double mall_prefetch_sdp_overhead_factor[]);
-static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_CalculateVMRowAndSwath_params *p);
-static double CalculateUrgentLatency(
- double UrgentLatencyPixelDataOnly,
- double UrgentLatencyPixelMixedWithVMData,
- double UrgentLatencyVMDataOnly,
- bool DoUrgentLatencyAdjustment,
- double UrgentLatencyAdjustmentFabricClockComponent,
- double UrgentLatencyAdjustmentFabricClockReference,
- double FabricClock,
- double uclk_freq_mhz,
- enum dml2_qos_param_type qos_type,
- unsigned int urgent_ramp_uclk_cycles,
- unsigned int df_qos_response_time_fclk_cycles,
- unsigned int max_round_trip_to_furthest_cs_fclk_cycles,
- unsigned int mall_overhead_fclk_cycles,
- double umc_urgent_ramp_latency_margin,
- double fabric_max_transport_latency_margin);
-static double CalculateTripToMemory(
- double UrgLatency,
- double FabricClock,
- double uclk_freq_mhz,
- enum dml2_qos_param_type qos_type,
- unsigned int trip_to_memory_uclk_cycles,
- unsigned int max_round_trip_to_furthest_cs_fclk_cycles,
- unsigned int mall_overhead_fclk_cycles,
- double umc_max_latency_margin,
- double fabric_max_transport_latency_margin);
-static double CalculateMetaTripToMemory(
- double UrgLatency,
- double FabricClock,
- double uclk_freq_mhz,
- enum dml2_qos_param_type qos_type,
- unsigned int meta_trip_to_memory_uclk_cycles,
- unsigned int meta_trip_to_memory_fclk_cycles,
- double umc_max_latency_margin,
- double fabric_max_transport_latency_margin);
-static void calculate_cursor_req_attributes(
- unsigned int cursor_width,
- unsigned int cursor_bpp,
-
- // output
- unsigned int *cursor_lines_per_chunk,
- unsigned int *cursor_bytes_per_line,
- unsigned int *cursor_bytes_per_chunk,
- unsigned int *cursor_bytes);
-static void calculate_cursor_urgent_burst_factor(
- unsigned int CursorBufferSize,
- unsigned int CursorWidth,
- unsigned int cursor_bytes_per_chunk,
- unsigned int cursor_lines_per_chunk,
- double LineTime,
- double UrgentLatency,
-
- double *UrgentBurstFactorCursor,
- bool *NotEnoughUrgentLatencyHiding);
-static void CalculateUrgentBurstFactor(
- const struct dml2_plane_parameters *plane_cfg,
- unsigned int swath_width_luma_ub,
- unsigned int swath_width_chroma_ub,
- unsigned int SwathHeightY,
- unsigned int SwathHeightC,
- double LineTime,
- double UrgentLatency,
- double VRatio,
- double VRatioC,
- double BytePerPixelInDETY,
- double BytePerPixelInDETC,
- unsigned int DETBufferSizeY,
- unsigned int DETBufferSizeC,
- // Output
- double *UrgentBurstFactorLuma,
- double *UrgentBurstFactorChroma,
- bool *NotEnoughUrgentLatencyHiding);
-static void CalculateDCFCLKDeepSleep(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int BytePerPixelY[],
- unsigned int BytePerPixelC[],
- unsigned int SwathWidthY[],
- unsigned int SwathWidthC[],
- unsigned int DPPPerSurface[],
- double PSCL_THROUGHPUT[],
- double PSCL_THROUGHPUT_CHROMA[],
- double Dppclk[],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- unsigned int ReturnBusWidth,
-
- // Output
- double *DCFClkDeepSleep);
-static double CalculateWriteBackDelay(
- enum dml2_source_format_class WritebackPixelFormat,
- double WritebackHRatio,
- double WritebackVRatio,
- unsigned int WritebackVTaps,
- unsigned int WritebackDestinationWidth,
- unsigned int WritebackDestinationHeight,
- unsigned int WritebackSourceHeight,
- unsigned int HTotal);
-static unsigned int CalculateMaxVStartup(
- bool ptoi_supported,
- unsigned int vblank_nom_default_us,
- const struct dml2_timing_cfg *timing,
- double write_back_delay_us);
-static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *p);
-static void CalculateODMMode(
- unsigned int MaximumPixelsPerLinePerDSCUnit,
- unsigned int HActive,
- enum dml2_output_encoder_class Output,
- enum dml2_odm_mode ODMUse,
- double MaxDispclk,
- bool DSCEnable,
- unsigned int TotalNumberOfActiveDPP,
- unsigned int MaxNumDPP,
- double PixelClock,
-
- // Output
- bool *TotalAvailablePipesSupport,
- unsigned int *NumberOfDPP,
- enum dml2_odm_mode *ODMMode,
- double *RequiredDISPCLKPerSurface);
-static void CalculateOutputLink(
- struct dml2_core_internal_scratch *s,
- double PHYCLK,
- double PHYCLKD18,
- double PHYCLKD32,
- double Downspreading,
- bool IsMainSurfaceUsingTheIndicatedTiming,
- enum dml2_output_encoder_class Output,
- enum dml2_output_format_class OutputFormat,
- unsigned int HTotal,
- unsigned int HActive,
- double PixelClockBackEnd,
- double ForcedOutputLinkBPP,
- unsigned int DSCInputBitPerComponent,
- unsigned int NumberOfDSCSlices,
- double AudioSampleRate,
- unsigned int AudioSampleLayout,
- enum dml2_odm_mode ODMModeNoDSC,
- enum dml2_odm_mode ODMModeDSC,
- enum dml2_dsc_enable_option DSCEnable,
- unsigned int OutputLinkDPLanes,
- enum dml2_output_link_dp_rate OutputLinkDPRate,
-
- // Output
- bool *RequiresDSC,
- bool *RequiresFEC,
- double *OutBpp,
- enum dml2_core_internal_output_type *OutputType,
- enum dml2_core_internal_output_type_rate *OutputRate,
- unsigned int *RequiredSlots);
-static double CalculateWriteBackDISPCLK(
- enum dml2_source_format_class WritebackPixelFormat,
- double PixelClock,
- double WritebackHRatio,
- double WritebackVRatio,
- unsigned int WritebackHTaps,
- unsigned int WritebackVTaps,
- unsigned int WritebackSourceWidth,
- unsigned int WritebackDestinationWidth,
- unsigned int HTotal,
- unsigned int WritebackLineBufferSize);
-static double RequiredDTBCLK(
- bool DSCEnable,
- double PixelClock,
- enum dml2_output_format_class OutputFormat,
- double OutputBpp,
- unsigned int DSCSlices,
- unsigned int HTotal,
- unsigned int HActive,
- unsigned int AudioRate,
- unsigned int AudioLayout);
-static unsigned int DSCDelayRequirement(
- bool DSCEnabled,
- enum dml2_odm_mode ODMMode,
- unsigned int DSCInputBitPerComponent,
- double OutputBpp,
- unsigned int HActive,
- unsigned int HTotal,
- unsigned int NumberOfDSCSlices,
- enum dml2_output_format_class OutputFormat,
- enum dml2_output_encoder_class Output,
- double PixelClock,
- double PixelClockBackEnd);
-static void CalculateSurfaceSizeInMall(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MALLAllocatedForDCN,
- unsigned int BytesPerPixelY[],
- unsigned int BytesPerPixelC[],
- unsigned int Read256BytesBlockWidthY[],
- unsigned int Read256BytesBlockWidthC[],
- unsigned int Read256BytesBlockHeightY[],
- unsigned int Read256BytesBlockHeightC[],
- unsigned int ReadBlockWidthY[],
- unsigned int ReadBlockWidthC[],
- unsigned int ReadBlockHeightY[],
- unsigned int ReadBlockHeightC[],
-
- // Output
- unsigned int SurfaceSizeInMALL[],
- bool *ExceededMALLSize);
-static void calculate_tdlut_setting(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_calculate_tdlut_setting_params *p);
-static void CalculateTarb(
- const struct dml2_display_cfg *display_cfg,
- unsigned int PixelChunkSizeInKByte,
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- unsigned int dpte_group_bytes[],
- unsigned int tdlut_bytes_per_group[],
- double HostVMInefficiencyFactor,
- double HostVMInefficiencyFactorPrefetch,
- unsigned int HostVMMinPageSize,
- double ReturnBW,
-
- unsigned int MetaChunkSize,
-
- // output
- double *Tarb,
- double *Tarb_prefetch);
-static double CalculateTWait(long reserved_vblank_time_ns, double UrgentLatency, double Ttrip);
-static void CalculateVUpdateAndDynamicMetadataParameters(
- unsigned int MaxInterDCNTileRepeaters,
- double Dppclk,
- double Dispclk,
- double DCFClkDeepSleep,
- double PixelClock,
- unsigned int HTotal,
- unsigned int VBlank,
- unsigned int DynamicMetadataTransmittedBytes,
- unsigned int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int InterlaceEnable,
- bool ProgressiveToInterlaceUnitInOPP,
-
- // Output
- double *TSetup,
- double *Tdmbf,
- double *Tdmec,
- double *Tdmsks,
- unsigned int *VUpdateOffsetPix,
- unsigned int *VUpdateWidthPix,
- unsigned int *VReadyOffsetPix);
-static double get_urgent_bandwidth_required(
- struct dml2_core_shared_get_urgent_bandwidth_required_locals *l,
- const struct dml2_display_cfg *display_cfg,
- enum dml2_core_internal_soc_state_type state_type,
- enum dml2_core_internal_bw_type bw_type,
- bool inc_flip_bw, // including flip bw
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- double dcc_dram_bw_nom_overhead_factor_p0[],
- double dcc_dram_bw_nom_overhead_factor_p1[],
- double dcc_dram_bw_pref_overhead_factor_p0[],
- double dcc_dram_bw_pref_overhead_factor_p1[],
- double mall_prefetch_sdp_overhead_factor[],
- double mall_prefetch_dram_overhead_factor[],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double PrefetchBandwidthLuma[],
- double PrefetchBandwidthChroma[],
- double cursor_bw[],
- double dpte_row_bw[],
- double meta_row_bw[],
- double prefetch_cursor_bw[],
- double prefetch_vmrow_bw[],
- double flip_bw[],
- double UrgentBurstFactorLuma[],
- double UrgentBurstFactorChroma[],
- double UrgentBurstFactorCursor[],
- double UrgentBurstFactorLumaPre[],
- double UrgentBurstFactorChromaPre[],
- double UrgentBurstFactorCursorPre[]);
-static void CalculateExtraLatency(
- const struct dml2_display_cfg *display_cfg,
- unsigned int ROBBufferSizeInKByte,
- unsigned int RoundTripPingLatencyCycles,
- unsigned int ReorderingBytes,
- double DCFCLK,
- double FabricClock,
- unsigned int PixelChunkSizeInKByte,
- double ReturnBW,
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- unsigned int dpte_group_bytes[],
- unsigned int tdlut_bytes_per_group[],
- double HostVMInefficiencyFactor,
- double HostVMInefficiencyFactorPrefetch,
- unsigned int HostVMMinPageSize,
- enum dml2_qos_param_type qos_type,
- bool max_oustanding_when_urgent_expected,
- unsigned int max_outstanding_requests,
- unsigned int request_size_bytes_luma[],
- unsigned int request_size_bytes_chroma[],
- unsigned int MetaChunkSize,
- unsigned int dchub_arb_to_ret_delay,
- double Ttrip,
- unsigned int hostvm_mode,
-
- // output
- double *ExtraLatency, // Tex
- double *ExtraLatency_sr, // Tex_sr
- double *ExtraLatencyPrefetch);
-static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_CalculatePrefetchSchedule_params *p);
-static void calculate_peak_bandwidth_required(
- struct dml2_core_internal_scratch *s,
-
- // output
- double urg_vactive_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
-
- // input
- const struct dml2_display_cfg *display_cfg,
- unsigned int inc_flip_bw,
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- double dcc_dram_bw_nom_overhead_factor_p0[],
- double dcc_dram_bw_nom_overhead_factor_p1[],
- double dcc_dram_bw_pref_overhead_factor_p0[],
- double dcc_dram_bw_pref_overhead_factor_p1[],
- double mall_prefetch_sdp_overhead_factor[],
- double mall_prefetch_dram_overhead_factor[],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double PrefetchBandwidthLuma[],
- double PrefetchBandwidthChroma[],
- double cursor_bw[],
- double dpte_row_bw[],
- double meta_row_bw[],
- double prefetch_cursor_bw[],
- double prefetch_vmrow_bw[],
- double flip_bw[],
- double UrgentBurstFactorLuma[],
- double UrgentBurstFactorChroma[],
- double UrgentBurstFactorCursor[],
- double UrgentBurstFactorLumaPre[],
- double UrgentBurstFactorChromaPre[],
- double UrgentBurstFactorCursorPre[]);
-static void check_urgent_bandwidth_support(
- double *frac_urg_bandwidth_nom,
- double *frac_urg_bandwidth_mall,
- bool *vactive_bandwidth_support_ok, // vactive ok
- bool *bandwidth_support_ok, // max of vm, prefetch, vactive all ok
-
- unsigned int mall_allocated_for_dcn_mbytes,
- double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_vactive_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]);
-static double get_bandwidth_available_for_immediate_flip(
- enum dml2_core_internal_soc_state_type eval_state,
- double urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max], // no flip
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]);
-static void calculate_immediate_flip_bandwidth_support(
- // Output
- double *frac_urg_bandwidth_flip,
- bool *flip_bandwidth_support_ok,
-
- // Input
- enum dml2_core_internal_soc_state_type eval_state,
- double urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]);
-static void CalculateFlipSchedule(
- struct dml2_core_internal_scratch *s,
- bool iflip_enable,
- bool use_lb_flip_bw,
- double HostVMInefficiencyFactor,
- double Tvm_trips_flip,
- double Tr0_trips_flip,
- double Tvm_trips_flip_rounded,
- double Tr0_trips_flip_rounded,
- bool GPUVMEnable,
- double vm_bytes, // vm_bytes
- double DPTEBytesPerRow, // dpte_row_bytes
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum dml2_source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw_flip,
- unsigned int dpte_row_height,
- unsigned int dpte_row_height_chroma,
- bool use_one_row_for_frame_flip,
- unsigned int max_flip_time_us,
- unsigned int per_pipe_flip_bytes,
- unsigned int meta_row_bytes,
- unsigned int meta_row_height,
- unsigned int meta_row_height_chroma,
- bool dcc_mrq_enable,
-
- // Output
- double *dst_y_per_vm_flip,
- double *dst_y_per_row_flip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe);
-static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *p);
-static double uclk_khz_to_dram_bw_mbps(unsigned long uclk_khz, const struct dml2_dram_params *dram_config);
-static double dram_bw_kbps_to_uclk_mhz(unsigned long long bw_kbps, const struct dml2_dram_params *dram_config);
-static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struct dml2_dcn4_uclk_dpm_dependent_qos_params *per_uclk_dpm_params);
-static unsigned int get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table);
-static unsigned int get_pipe_flip_bytes(
- double hostvm_inefficiency_factor,
- unsigned int vm_bytes,
- unsigned int dpte_row_bytes,
- unsigned int meta_row_bytes);
-static void calculate_hostvm_inefficiency_factor(
- double *HostVMInefficiencyFactor,
- double *HostVMInefficiencyFactorPrefetch,
-
- bool gpuvm_enable,
- bool hostvm_enable,
- unsigned int remote_iommu_outstanding_translations,
- unsigned int max_outstanding_reqs,
- double urg_bandwidth_avail_active_pixel_and_vm,
- double urg_bandwidth_avail_active_vm_only);
-static void CalculatePixelDeliveryTimes(
- const struct dml2_display_cfg *display_cfg,
- const struct core_display_cfg_support_info *cfg_support_info,
- unsigned int NumberOfActiveSurfaces,
- double VRatioPrefetchY[],
- double VRatioPrefetchC[],
- unsigned int swath_width_luma_ub[],
- unsigned int swath_width_chroma_ub[],
- double PSCL_THROUGHPUT[],
- double PSCL_THROUGHPUT_CHROMA[],
- double Dppclk[],
- unsigned int BytePerPixelC[],
- unsigned int req_per_swath_ub_l[],
- unsigned int req_per_swath_ub_c[],
-
- // Output
- double DisplayPipeLineDeliveryTimeLuma[],
- double DisplayPipeLineDeliveryTimeChroma[],
- double DisplayPipeLineDeliveryTimeLumaPrefetch[],
- double DisplayPipeLineDeliveryTimeChromaPrefetch[],
- double DisplayPipeRequestDeliveryTimeLuma[],
- double DisplayPipeRequestDeliveryTimeChroma[],
- double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
- double DisplayPipeRequestDeliveryTimeChromaPrefetch[]);
-static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTETimes_params *p);
-static void CalculateVMGroupAndRequestTimes(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int BytePerPixelC[],
- double dst_y_per_vm_vblank[],
- double dst_y_per_vm_flip[],
- unsigned int dpte_row_width_luma_ub[],
- unsigned int dpte_row_width_chroma_ub[],
- unsigned int vm_group_bytes[],
- unsigned int dpde0_bytes_per_frame_ub_l[],
- unsigned int dpde0_bytes_per_frame_ub_c[],
- unsigned int tdlut_pte_bytes_per_frame[],
- unsigned int meta_pte_bytes_per_frame_ub_l[],
- unsigned int meta_pte_bytes_per_frame_ub_c[],
- bool mrq_present,
-
- // Output
- double TimePerVMGroupVBlank[],
- double TimePerVMGroupFlip[],
- double TimePerVMRequestVBlank[],
- double TimePerVMRequestFlip[]);
-static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_CalculateStutterEfficiency_params *p);
-static bool dml_is_dual_plane(enum dml2_source_format_class source_format);
-static unsigned int dml_get_plane_idx(const struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int pipe_idx);
-static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_watermark_regs *wm_regs);
-static unsigned int log_and_substract_if_non_zero(unsigned int a, unsigned int subtrahend);
-static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
- const struct dml2_display_cfg *display_cfg,
- const struct dml2_core_internal_display_mode_lib *mode_lib,
- unsigned int pipe_idx);
-static void rq_dlg_get_dlg_reg(struct dml2_core_internal_scratch *s,
- struct dml2_display_dlg_regs *disp_dlg_regs,
- struct dml2_display_ttu_regs *disp_ttu_regs,
- const struct dml2_display_cfg *display_cfg,
- const struct dml2_core_internal_display_mode_lib *mode_lib,
- const unsigned int pipe_idx);
-static void rq_dlg_get_arb_params(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *arb_param);
-
-/*
- * END OF STATIC HELPERS
- */
-
-bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params)
-{
- struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
- const struct dml2_display_cfg *display_cfg = in_out_params->in_display_cfg;
- const struct dml2_mcg_min_clock_table *min_clk_table = in_out_params->min_clk_table;
-
- struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals;
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
- struct dml2_core_calcs_CalculateVMRowAndSwath_params *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params;
- struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
- struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
- struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
- struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
- unsigned int k, m, n;
-
- memset(&mode_lib->ms, 0, sizeof(struct dml2_core_internal_mode_support));
-
- mode_lib->ms.num_active_planes = display_cfg->num_planes;
- get_stream_output_bpp(s->OutputBpp, display_cfg);
-
- mode_lib->ms.state_idx = in_out_params->min_clk_index;
- mode_lib->ms.SOCCLK = ((double)mode_lib->soc.clk_table.socclk.clk_values_khz[0] / 1000);
- mode_lib->ms.DCFCLK = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_dcfclk_khz / 1000);
- mode_lib->ms.FabricClock = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz / 1000);
- mode_lib->ms.MaxDCFCLK = (double)min_clk_table->max_clocks_khz.dcfclk / 1000;
- mode_lib->ms.MaxFabricClock = (double)min_clk_table->max_clocks_khz.fclk / 1000;
- mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dispclk / 1000;
- mode_lib->ms.max_dscclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dscclk / 1000;
- mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dppclk / 1000;
- mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config);
- mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000);
- mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params);
- mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table);
-
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: --- START --- \n", __func__);
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK);
- dml2_printf("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz);
- dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
- dml2_printf("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
- dml2_printf("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
- dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
- dml2_printf("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
- dml2_printf("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
-
- // dml2_printf_dml_policy(&mode_lib->ms.policy);
- // dml2_printf_dml_display_cfg_timing(&display_cfg->timing, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_plane(&display_cfg->plane, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_surface(&display_cfg->surface, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_output(&display_cfg->output, mode_lib->ms.num_active_planes);
-#endif
-
- CalculateMaxDETAndMinCompressedBufferSize(
- mode_lib->ip.config_return_buffer_size_in_kbytes,
- mode_lib->ip.config_return_buffer_segment_size_in_kbytes,
- mode_lib->ip.rob_buffer_size_kbytes,
- mode_lib->ip.max_num_dpp,
- display_cfg->overrides.hw.force_nom_det_size_kbytes.enable,
- display_cfg->overrides.hw.force_nom_det_size_kbytes.value,
- mode_lib->ip.dcn_mrq_present,
-
- /* Output */
- &mode_lib->ms.MaxTotalDETInKByte,
- &mode_lib->ms.NomDETInKByte,
- &mode_lib->ms.MinCompressedBufferSizeInKByte);
-
- PixelClockAdjustmentForProgressiveToInterlaceUnit(display_cfg, mode_lib->ip.ptoi_supported, s->PixelClockBackEnd);
-
- /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
-
- /*Scale Ratio, taps Support Check*/
- mode_lib->ms.support.ScaleRatioAndTapsSupport = true;
- // Many core tests are still setting scaling parameters "incorrectly"
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].composition.scaler_info.enabled == false
- && (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format)
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio != 1.0
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps != 1.0
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio != 1.0
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps != 1.0)) {
- mode_lib->ms.support.ScaleRatioAndTapsSupport = false;
- } else if (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps < 1.0 || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps > 8.0
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps < 1.0 || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps > 8.0
- || (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps > 1.0 && (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps % 2) == 1)
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio > mode_lib->ip.max_hscl_ratio
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio > mode_lib->ip.max_vscl_ratio
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio > display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps
- || display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio > display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps
- || (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format)
- && (display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps < 1 || display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps > 8 ||
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps < 1 || display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps > 8 ||
- (display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps > 1 && display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps % 2 == 1) ||
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio > mode_lib->ip.max_hscl_ratio ||
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio > mode_lib->ip.max_vscl_ratio ||
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio > display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps ||
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio > display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps))) {
- mode_lib->ms.support.ScaleRatioAndTapsSupport = false;
- }
- }
-
- /*Source Format, Pixel Format and Scan Support Check*/
- mode_lib->ms.support.SourceFormatPixelAndScanSupport = true;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].surface.tiling == dml2_sw_linear && dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- mode_lib->ms.support.SourceFormatPixelAndScanSupport = false;
- }
- }
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- CalculateBytePerPixelAndBlockSizes(
- display_cfg->plane_descriptors[k].pixel_format,
- display_cfg->plane_descriptors[k].surface.tiling,
- display_cfg->plane_descriptors[k].surface.plane0.pitch,
- display_cfg->plane_descriptors[k].surface.plane1.pitch,
-
- /* Output */
- &mode_lib->ms.BytePerPixelY[k],
- &mode_lib->ms.BytePerPixelC[k],
- &mode_lib->ms.BytePerPixelInDETY[k],
- &mode_lib->ms.BytePerPixelInDETC[k],
- &mode_lib->ms.Read256BlockHeightY[k],
- &mode_lib->ms.Read256BlockHeightC[k],
- &mode_lib->ms.Read256BlockWidthY[k],
- &mode_lib->ms.Read256BlockWidthC[k],
- &mode_lib->ms.MacroTileHeightY[k],
- &mode_lib->ms.MacroTileHeightC[k],
- &mode_lib->ms.MacroTileWidthY[k],
- &mode_lib->ms.MacroTileWidthC[k],
- &mode_lib->ms.surf_linear128_l[k],
- &mode_lib->ms.surf_linear128_c[k]);
- }
-
- /*Bandwidth Support Check*/
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- mode_lib->ms.SwathWidthYSingleDPP[k] = display_cfg->plane_descriptors[k].composition.viewport.plane0.width;
- mode_lib->ms.SwathWidthCSingleDPP[k] = display_cfg->plane_descriptors[k].composition.viewport.plane1.width;
- } else {
- mode_lib->ms.SwathWidthYSingleDPP[k] = display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
- mode_lib->ms.SwathWidthCSingleDPP[k] = display_cfg->plane_descriptors[k].composition.viewport.plane1.height;
- }
- }
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- mode_lib->ms.SurfaceReadBandwidthLuma[k] = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- mode_lib->ms.SurfaceReadBandwidthChroma[k] = mode_lib->ms.SwathWidthCSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
-
- mode_lib->ms.cursor_bw[k] = display_cfg->plane_descriptors[k].cursor.num_cursors * display_cfg->plane_descriptors[k].cursor.cursor_width *
- display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
-
-#ifdef __DML_VBA_DEBUG__
- double old_ReadBandwidthLuma = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- double old_ReadBandwidthChroma = mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0;
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, old_ReadBandwidthLuma);
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, old_ReadBandwidthChroma);
- dml2_printf("DML::%s: k=%u, ReadBandwidthLuma = %f\n", __func__, k, mode_lib->ms.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthChroma = %f\n", __func__, k, mode_lib->ms.SurfaceReadBandwidthChroma[k]);
-#endif
- }
-
- // Writeback bandwidth
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_64) {
- mode_lib->ms.WriteBandwidth[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width
- / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total
- / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 8.0;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- mode_lib->ms.WriteBandwidth[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width
- / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total
- / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 4.0;
- } else {
- mode_lib->ms.WriteBandwidth[k] = 0.0;
- }
- }
-
- /*Writeback Latency support check*/
- mode_lib->ms.support.WritebackLatencySupport = true;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true &&
- (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024.0 / mode_lib->soc.qos_parameters.writeback.base_latency_us)) {
- mode_lib->ms.support.WritebackLatencySupport = false;
- }
- }
-
- /* Writeback Mode Support Check */
- s->TotalNumberOfActiveWriteback = 0;
- for (k = 0; k <= (unsigned int)mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true
- && (display_cfg->plane_descriptors[k].stream_index == k)) {
- s->TotalNumberOfActiveWriteback = s->TotalNumberOfActiveWriteback + 1;
- }
- }
-
- mode_lib->ms.support.EnoughWritebackUnits = 1;
- if (s->TotalNumberOfActiveWriteback > (unsigned int)mode_lib->ip.max_num_wb) {
- mode_lib->ms.support.EnoughWritebackUnits = false;
- }
-
- /* Writeback Scale Ratio and Taps Support Check */
- mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = true;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio > mode_lib->ip.writeback_max_hscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio > mode_lib->ip.writeback_max_vscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio < mode_lib->ip.writeback_min_hscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio < mode_lib->ip.writeback_min_vscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps > (unsigned int) mode_lib->ip.writeback_max_hscl_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps > (unsigned int) mode_lib->ip.writeback_max_vscl_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps
- || (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps > 2.0 && ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps % 2) == 1))) {
- mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = false;
- }
- if (2.0 * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps - 1) * 57 > mode_lib->ip.writeback_line_buffer_buffer_size) {
- mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = false;
- }
- }
- }
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- CalculateSinglePipeDPPCLKAndSCLThroughput(
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ip.max_dchub_pscl_bw_pix_per_clk,
- mode_lib->ip.max_pscl_lb_bw_pix_per_clk,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- display_cfg->plane_descriptors[k].pixel_format,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps,
- /* Output */
- &mode_lib->ms.PSCL_FACTOR[k],
- &mode_lib->ms.PSCL_FACTOR_CHROMA[k],
- &mode_lib->ms.MinDPPCLKUsingSingleDPP[k]);
- }
-
- // Max Viewport Size support
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].surface.tiling == dml2_sw_linear) {
- s->MaximumSwathWidthSupportLuma = 15360;
- } else if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle) && mode_lib->ms.BytePerPixelC[k] > 0 && display_cfg->plane_descriptors[k].pixel_format != dml2_rgbe_alpha) { // horz video
- s->MaximumSwathWidthSupportLuma = 7680 + 16;
- } else if (dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle) && mode_lib->ms.BytePerPixelC[k] > 0 && display_cfg->plane_descriptors[k].pixel_format != dml2_rgbe_alpha) { // vert video
- s->MaximumSwathWidthSupportLuma = 4320 + 16;
- } else if (display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) { // rgbe + alpha
- s->MaximumSwathWidthSupportLuma = 5120 + 16;
- } else if (dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle) && mode_lib->ms.BytePerPixelY[k] == 8 && display_cfg->plane_descriptors[k].surface.dcc.enable == true) { // vert 64bpp
- s->MaximumSwathWidthSupportLuma = 3072 + 16;
- } else {
- s->MaximumSwathWidthSupportLuma = 6144 + 16;
- }
-
- if (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format)) {
- s->MaximumSwathWidthSupportChroma = (unsigned int)(s->MaximumSwathWidthSupportLuma / 2.0);
- } else {
- s->MaximumSwathWidthSupportChroma = s->MaximumSwathWidthSupportLuma;
- }
- mode_lib->ms.MaximumSwathWidthInLineBufferLuma = mode_lib->ip.line_buffer_size_bits * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio, 1.0) / 57 /*FIXME_STAGE2 was: LBBitPerPixel*/ /
- (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps + math_max2(math_ceil2(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio, 1.0) - 2, 0.0));
- if (mode_lib->ms.BytePerPixelC[k] == 0.0) {
- mode_lib->ms.MaximumSwathWidthInLineBufferChroma = 0;
- } else {
- mode_lib->ms.MaximumSwathWidthInLineBufferChroma = mode_lib->ip.line_buffer_size_bits * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio, 1.0) / 57 /*FIXME_STAGE2 was: LBBitPerPixel*/ /
- (display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps + math_max2(math_ceil2(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio, 1.0) - 2, 0.0));
- }
- mode_lib->ms.MaximumSwathWidthLuma[k] = math_min2(s->MaximumSwathWidthSupportLuma, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
- mode_lib->ms.MaximumSwathWidthChroma[k] = math_min2(s->MaximumSwathWidthSupportChroma, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
- }
-
- /* Cursor Support Check */
- mode_lib->ms.support.CursorSupport = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].cursor.cursor_width > 0.0) {
- if (display_cfg->plane_descriptors[k].cursor.cursor_bpp == 64 && mode_lib->ip.cursor_64bpp_support == false) {
- mode_lib->ms.support.CursorSupport = false;
- }
- }
- }
-
- /* Valid Pitch Check */
- mode_lib->ms.support.PitchSupport = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
-
- // data pitch
- unsigned int alignment_l = mode_lib->ms.MacroTileWidthY[k];
-
- if (mode_lib->ms.surf_linear128_l[k])
- alignment_l = alignment_l / 2;
-
- mode_lib->ms.support.AlignedYPitch[k] = (unsigned int)math_ceil2(math_max2(display_cfg->plane_descriptors[k].surface.plane0.pitch, display_cfg->plane_descriptors[k].surface.plane0.width), alignment_l);
- if (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
- unsigned int alignment_c = mode_lib->ms.MacroTileWidthC[k];
-
- if (mode_lib->ms.surf_linear128_c[k])
- alignment_c = alignment_c / 2;
- mode_lib->ms.support.AlignedCPitch[k] = (unsigned int)math_ceil2(math_max2(display_cfg->plane_descriptors[k].surface.plane1.pitch, display_cfg->plane_descriptors[k].surface.plane1.width), alignment_c);
- } else {
- mode_lib->ms.support.AlignedCPitch[k] = display_cfg->plane_descriptors[k].surface.plane1.pitch;
- }
-
- if (mode_lib->ms.support.AlignedYPitch[k] > display_cfg->plane_descriptors[k].surface.plane0.pitch ||
- mode_lib->ms.support.AlignedCPitch[k] > display_cfg->plane_descriptors[k].surface.plane1.pitch) {
- mode_lib->ms.support.PitchSupport = false;
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]);
- dml2_printf("DML::%s: k=%u PitchY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch);
- dml2_printf("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]);
- dml2_printf("DML::%s: k=%u PitchC = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch);
- dml2_printf("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport);
-#endif
- }
-
- // meta pitch
- if (mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable) {
- mode_lib->ms.support.AlignedDCCMetaPitchY[k] = (unsigned int)math_ceil2(math_max2(display_cfg->plane_descriptors[k].surface.dcc.plane0.pitch,
- display_cfg->plane_descriptors[k].surface.plane0.width), 64.0 * mode_lib->ms.Read256BlockWidthY[k]);
-
- if (mode_lib->ms.support.AlignedDCCMetaPitchY[k] > display_cfg->plane_descriptors[k].surface.dcc.plane0.pitch)
- mode_lib->ms.support.PitchSupport = false;
-
- if (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
- mode_lib->ms.support.AlignedDCCMetaPitchC[k] = (unsigned int)math_ceil2(math_max2(display_cfg->plane_descriptors[k].surface.dcc.plane1.pitch,
- display_cfg->plane_descriptors[k].surface.plane1.width), 64.0 * mode_lib->ms.Read256BlockWidthC[k]);
-
- if (mode_lib->ms.support.AlignedDCCMetaPitchC[k] > display_cfg->plane_descriptors[k].surface.dcc.plane1.pitch)
- mode_lib->ms.support.PitchSupport = false;
- }
- } else {
- mode_lib->ms.support.AlignedDCCMetaPitchY[k] = 0;
- mode_lib->ms.support.AlignedDCCMetaPitchC[k] = 0;
- }
- }
-
- mode_lib->ms.support.ViewportExceedsSurface = false;
- if (!display_cfg->overrides.hw.surface_viewport_size_check_disable) {
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].composition.viewport.plane0.width > display_cfg->plane_descriptors[k].surface.plane0.width || display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) {
- mode_lib->ms.support.ViewportExceedsSurface = true;
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u ViewportWidth = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u SurfaceWidthY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u SurfaceHeightY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
- dml2_printf("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
-#endif
- if (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
- if (display_cfg->plane_descriptors[k].composition.viewport.plane1.width > display_cfg->plane_descriptors[k].surface.plane1.width ||
- display_cfg->plane_descriptors[k].composition.viewport.plane1.height > display_cfg->plane_descriptors[k].surface.plane1.height) {
- mode_lib->ms.support.ViewportExceedsSurface = true;
- }
- }
- }
- }
- }
-
- CalculateSwathAndDETConfiguration_params->display_cfg = display_cfg;
- CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSizeInKByte = mode_lib->ip.config_return_buffer_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->MaxTotalDETInKByte = mode_lib->ms.MaxTotalDETInKByte;
- CalculateSwathAndDETConfiguration_params->MinCompressedBufferSizeInKByte = mode_lib->ms.MinCompressedBufferSizeInKByte;
- CalculateSwathAndDETConfiguration_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
- CalculateSwathAndDETConfiguration_params->pixel_chunk_size_kbytes = mode_lib->ip.pixel_chunk_size_kbytes;
- CalculateSwathAndDETConfiguration_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
- CalculateSwathAndDETConfiguration_params->pixel_chunk_size_kbytes = mode_lib->ip.pixel_chunk_size_kbytes;
- CalculateSwathAndDETConfiguration_params->ForceSingleDPP = 1;
- CalculateSwathAndDETConfiguration_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- CalculateSwathAndDETConfiguration_params->nomDETInKByte = mode_lib->ms.NomDETInKByte;
- CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSegmentSizeInkByte = mode_lib->ip.config_return_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->CompressedBufferSegmentSizeInkByte = mode_lib->ip.compressed_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->ms.SurfaceReadBandwidthLuma;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->ms.SurfaceReadBandwidthChroma;
- CalculateSwathAndDETConfiguration_params->MaximumSwathWidthLuma = mode_lib->ms.MaximumSwathWidthLuma;
- CalculateSwathAndDETConfiguration_params->MaximumSwathWidthChroma = mode_lib->ms.MaximumSwathWidthChroma;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightY = mode_lib->ms.Read256BlockHeightY;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightC = mode_lib->ms.Read256BlockHeightC;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockWidthY = mode_lib->ms.Read256BlockWidthY;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockWidthC = mode_lib->ms.Read256BlockWidthC;
- CalculateSwathAndDETConfiguration_params->surf_linear128_l = mode_lib->ms.surf_linear128_l;
- CalculateSwathAndDETConfiguration_params->surf_linear128_c = mode_lib->ms.surf_linear128_c;
- CalculateSwathAndDETConfiguration_params->ODMMode = s->dummy_odm_mode;
- CalculateSwathAndDETConfiguration_params->BytePerPixY = mode_lib->ms.BytePerPixelY;
- CalculateSwathAndDETConfiguration_params->BytePerPixC = mode_lib->ms.BytePerPixelC;
- CalculateSwathAndDETConfiguration_params->BytePerPixDETY = mode_lib->ms.BytePerPixelInDETY;
- CalculateSwathAndDETConfiguration_params->BytePerPixDETC = mode_lib->ms.BytePerPixelInDETC;
- CalculateSwathAndDETConfiguration_params->DPPPerSurface = s->dummy_integer_array[2];
- CalculateSwathAndDETConfiguration_params->mrq_present = mode_lib->ip.dcn_mrq_present;
-
- // output
- CalculateSwathAndDETConfiguration_params->req_per_swath_ub_l = s->dummy_integer_array[0];
- CalculateSwathAndDETConfiguration_params->req_per_swath_ub_c = s->dummy_integer_array[1];
- CalculateSwathAndDETConfiguration_params->swath_width_luma_ub = s->dummy_integer_array[3];
- CalculateSwathAndDETConfiguration_params->swath_width_chroma_ub = s->dummy_integer_array[4];
- CalculateSwathAndDETConfiguration_params->SwathWidth = s->dummy_integer_array[5];
- CalculateSwathAndDETConfiguration_params->SwathWidthChroma = s->dummy_integer_array[6];
- CalculateSwathAndDETConfiguration_params->SwathHeightY = s->dummy_integer_array[7];
- CalculateSwathAndDETConfiguration_params->SwathHeightC = s->dummy_integer_array[8];
- CalculateSwathAndDETConfiguration_params->request_size_bytes_luma = s->dummy_integer_array[26];
- CalculateSwathAndDETConfiguration_params->request_size_bytes_chroma = s->dummy_integer_array[27];
- CalculateSwathAndDETConfiguration_params->DETBufferSizeInKByte = s->dummy_integer_array[9];
- CalculateSwathAndDETConfiguration_params->DETBufferSizeY = s->dummy_integer_array[10];
- CalculateSwathAndDETConfiguration_params->DETBufferSizeC = s->dummy_integer_array[11];
- CalculateSwathAndDETConfiguration_params->full_swath_bytes_l = s->full_swath_bytes_l;
- CalculateSwathAndDETConfiguration_params->full_swath_bytes_c = s->full_swath_bytes_c;
- CalculateSwathAndDETConfiguration_params->UnboundedRequestEnabled = &s->dummy_boolean[0];
- CalculateSwathAndDETConfiguration_params->compbuf_reserved_space_64b = &s->dummy_integer[1];
- CalculateSwathAndDETConfiguration_params->hw_debug5 = &s->dummy_boolean[2];
- CalculateSwathAndDETConfiguration_params->CompressedBufferSizeInkByte = &s->dummy_integer[0];
- CalculateSwathAndDETConfiguration_params->ViewportSizeSupportPerSurface = mode_lib->ms.SingleDPPViewportSizeSupportPerSurface;
- CalculateSwathAndDETConfiguration_params->ViewportSizeSupport = &s->dummy_boolean[1];
- CalculateSwathAndDETConfiguration_params->funcs = &mode_lib->funcs;
-
- // This calls is just to find out if there is enough DET space to support full vp in 1 pipe.
- CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params);
-
- {
- mode_lib->ms.TotalNumberOfActiveDPP = 0;
- mode_lib->ms.support.TotalAvailablePipesSupport = true;
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- CalculateODMMode(
- mode_lib->ip.maximum_pixels_per_line_per_dsc_unit,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode,
- mode_lib->ms.max_dispclk_freq_mhz,
- false, // DSCEnable
- mode_lib->ms.TotalNumberOfActiveDPP,
- mode_lib->ip.max_num_dpp,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
-
- /* Output */
- &s->TotalAvailablePipesSupportNoDSC,
- &s->NumberOfDPPNoDSC,
- &s->ODMModeNoDSC,
- &s->RequiredDISPCLKPerSurfaceNoDSC);
-
- CalculateODMMode(
- mode_lib->ip.maximum_pixels_per_line_per_dsc_unit,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode,
- mode_lib->ms.max_dispclk_freq_mhz,
- true, // DSCEnable
- mode_lib->ms.TotalNumberOfActiveDPP,
- mode_lib->ip.max_num_dpp,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
-
- /* Output */
- &s->TotalAvailablePipesSupportDSC,
- &s->NumberOfDPPDSC,
- &s->ODMModeDSC,
- &s->RequiredDISPCLKPerSurfaceDSC);
-
- /*Number Of DSC Slices*/
- if (display_cfg->plane_descriptors[k].stream_index == k) {
- if (s->PixelClockBackEnd[k] > 4800) {
- mode_lib->ms.support.NumberOfDSCSlices[k] = (unsigned int)(math_ceil2(s->PixelClockBackEnd[k] / 600, 4));
- } else if (s->PixelClockBackEnd[k] > 2400) {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 8;
- } else if (s->PixelClockBackEnd[k] > 1200) {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 4;
- } else if (s->PixelClockBackEnd[k] > 340) {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 2;
- } else {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 1;
- }
- } else {
- mode_lib->ms.support.NumberOfDSCSlices[k] = 0;
- }
-
- if (s->ODMModeDSC == dml2_odm_mode_combine_2to1)
- mode_lib->ms.support.NumberOfDSCSlices[k] = 2 * (unsigned int)math_ceil2(mode_lib->ms.support.NumberOfDSCSlices[k] / 2.0, 1.0);
- else if (s->ODMModeDSC == dml2_odm_mode_combine_3to1)
- mode_lib->ms.support.NumberOfDSCSlices[k] = 12;
- else if (s->ODMModeDSC == dml2_odm_mode_combine_4to1)
- mode_lib->ms.support.NumberOfDSCSlices[k] = 4 * (unsigned int)math_ceil2(mode_lib->ms.support.NumberOfDSCSlices[k] / 4.0, 1.0);
-
- CalculateOutputLink(
- &mode_lib->scratch,
- ((double)mode_lib->soc.clk_table.phyclk.clk_values_khz[0] / 1000),
- ((double)mode_lib->soc.clk_table.phyclk_d18.clk_values_khz[0] / 1000),
- ((double)mode_lib->soc.clk_table.phyclk_d32.clk_values_khz[0] / 1000),
- mode_lib->soc.phy_downspread_percent,
- (display_cfg->plane_descriptors[k].stream_index == k),
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
- s->PixelClockBackEnd[k],
- s->OutputBpp[k],
- mode_lib->ip.maximum_dsc_bits_per_component,
- mode_lib->ms.support.NumberOfDSCSlices[k],
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_rate,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_layout,
- s->ODMModeNoDSC,
- s->ODMModeDSC,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_lane_count,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate,
-
- /* Output */
- &mode_lib->ms.RequiresDSC[k],
- &mode_lib->ms.RequiresFEC[k],
- &mode_lib->ms.OutputBpp[k],
- &mode_lib->ms.OutputType[k], // VBA_DELTA, VBA uses a string to represent type and rate, but DML uses enum, don't want to rely on strng
- &mode_lib->ms.OutputRate[k],
- &mode_lib->ms.RequiredSlots[k]);
-
- if (mode_lib->ms.RequiresDSC[k] == false) {
- mode_lib->ms.ODMMode[k] = s->ODMModeNoDSC;
- mode_lib->ms.RequiredDISPCLKPerSurface[k] = s->RequiredDISPCLKPerSurfaceNoDSC;
- if (!s->TotalAvailablePipesSupportNoDSC)
- mode_lib->ms.support.TotalAvailablePipesSupport = false;
- mode_lib->ms.TotalNumberOfActiveDPP = mode_lib->ms.TotalNumberOfActiveDPP + s->NumberOfDPPNoDSC;
- } else {
- mode_lib->ms.ODMMode[k] = s->ODMModeDSC;
- mode_lib->ms.RequiredDISPCLKPerSurface[k] = s->RequiredDISPCLKPerSurfaceDSC;
- if (!s->TotalAvailablePipesSupportDSC)
- mode_lib->ms.support.TotalAvailablePipesSupport = false;
- mode_lib->ms.TotalNumberOfActiveDPP = mode_lib->ms.TotalNumberOfActiveDPP + s->NumberOfDPPDSC;
- }
- dml2_printf("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
- dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
- }
-
- // FIXME_DCN4 - add odm vs mpc use check
-
- // FIXME_DCN4 - add imall cap check
- mode_lib->ms.support.incorrect_imall_usage = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ip.imall_supported && display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_imall)
- mode_lib->ms.support.incorrect_imall_usage = 1;
- }
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.MPCCombine[k] = false;
- mode_lib->ms.NoOfDPP[k] = 1;
-
- if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1) {
- mode_lib->ms.MPCCombine[k] = false;
- mode_lib->ms.NoOfDPP[k] = 4;
- } else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1) {
- mode_lib->ms.MPCCombine[k] = false;
- mode_lib->ms.NoOfDPP[k] = 3;
- } else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1) {
- mode_lib->ms.MPCCombine[k] = false;
- mode_lib->ms.NoOfDPP[k] = 2;
- } else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 2) {
- mode_lib->ms.MPCCombine[k] = true;
- mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
- } else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 1) {
- mode_lib->ms.MPCCombine[k] = false;
- mode_lib->ms.NoOfDPP[k] = 1;
- if (!mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
- dml2_printf("ERROR: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__);
- }
- } else {
- if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
- mode_lib->ms.MPCCombine[k] = true;
- mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
- }
- }
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]);
-#endif
- }
-
- if (mode_lib->ms.TotalNumberOfActiveDPP > (unsigned int)mode_lib->ip.max_num_dpp)
- mode_lib->ms.support.TotalAvailablePipesSupport = false;
-
-
- mode_lib->ms.TotalNumberOfSingleDPPSurfaces = 0;
- for (k = 0; k < (unsigned int)mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.NoOfDPP[k] == 1)
- mode_lib->ms.TotalNumberOfSingleDPPSurfaces = mode_lib->ms.TotalNumberOfSingleDPPSurfaces + 1;
- }
-
- //DISPCLK/DPPCLK
- mode_lib->ms.WritebackRequiredDISPCLK = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable) {
- mode_lib->ms.WritebackRequiredDISPCLK = math_max2(mode_lib->ms.WritebackRequiredDISPCLK,
- CalculateWriteBackDISPCLK(display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
- mode_lib->ip.writeback_line_buffer_buffer_size));
- }
- }
-
- mode_lib->ms.RequiredDISPCLK = mode_lib->ms.WritebackRequiredDISPCLK;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.RequiredDISPCLK = math_max2(mode_lib->ms.RequiredDISPCLK, mode_lib->ms.RequiredDISPCLKPerSurface[k]);
- }
-
- mode_lib->ms.GlobalDPPCLK = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.RequiredDPPCLK[k] = mode_lib->ms.MinDPPCLKUsingSingleDPP[k] / mode_lib->ms.NoOfDPP[k];
- mode_lib->ms.GlobalDPPCLK = math_max2(mode_lib->ms.GlobalDPPCLK, mode_lib->ms.RequiredDPPCLK[k]);
- }
-
- mode_lib->ms.support.DISPCLK_DPPCLK_Support = !((mode_lib->ms.RequiredDISPCLK > mode_lib->ms.max_dispclk_freq_mhz) || (mode_lib->ms.GlobalDPPCLK > mode_lib->ms.max_dppclk_freq_mhz));
- }
-
- /* Total Available OTG, HDMIFRL, DP Support Check */
- s->TotalNumberOfActiveOTG = 0;
- s->TotalNumberOfActiveHDMIFRL = 0;
- s->TotalNumberOfActiveDP2p0 = 0;
- s->TotalNumberOfActiveDP2p0Outputs = 0;
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].stream_index == k) {
- s->TotalNumberOfActiveOTG = s->TotalNumberOfActiveOTG + 1;
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl)
- s->TotalNumberOfActiveHDMIFRL = s->TotalNumberOfActiveHDMIFRL + 1;
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp2p0) {
- s->TotalNumberOfActiveDP2p0 = s->TotalNumberOfActiveDP2p0 + 1;
- // FIXME_STAGE2: SW not using backend related stuff, need mapping for mst setup
- //if (display_cfg->output.OutputMultistreamId[k] == k || display_cfg->output.OutputMultistreamEn[k] == false) {
- s->TotalNumberOfActiveDP2p0Outputs = s->TotalNumberOfActiveDP2p0Outputs + 1;
- //}
- }
- }
- }
-
- mode_lib->ms.support.NumberOfOTGSupport = (s->TotalNumberOfActiveOTG <= (unsigned int)mode_lib->ip.max_num_otg);
- mode_lib->ms.support.NumberOfHDMIFRLSupport = (s->TotalNumberOfActiveHDMIFRL <= (unsigned int)mode_lib->ip.max_num_hdmi_frl_outputs);
- mode_lib->ms.support.NumberOfDP2p0Support = (s->TotalNumberOfActiveDP2p0 <= (unsigned int)mode_lib->ip.max_num_dp2p0_streams && s->TotalNumberOfActiveDP2p0Outputs <= (unsigned int)mode_lib->ip.max_num_dp2p0_outputs);
-
- mode_lib->ms.support.ExceededMultistreamSlots = false;
- mode_lib->ms.support.LinkCapacitySupport = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_disabled == false &&
- display_cfg->plane_descriptors[k].stream_index == k && (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp2p0 || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_edp ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmi || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl) && mode_lib->ms.OutputBpp[k] == 0) {
- mode_lib->ms.support.LinkCapacitySupport = false;
- }
- }
-
- mode_lib->ms.support.P2IWith420 = false;
- mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP = false;
- mode_lib->ms.support.DSC422NativeNotSupported = false;
- mode_lib->ms.support.LinkRateDoesNotMatchDPVersion = false;
- mode_lib->ms.support.LinkRateForMultistreamNotIndicated = false;
- mode_lib->ms.support.BPPForMultistreamNotIndicated = false;
- mode_lib->ms.support.MultistreamWithHDMIOreDP = false;
- mode_lib->ms.support.MSOOrODMSplitWithNonDPLink = false;
- mode_lib->ms.support.NotEnoughLanesForMSO = false;
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].stream_index == k && (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp2p0 || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_edp ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmi || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl)) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_420 && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced == 1 && mode_lib->ip.ptoi_supported == true)
- mode_lib->ms.support.P2IWith420 = true;
-
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable_if_necessary && s->OutputBpp[k] != 0)
- mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP = true;
- if ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable_if_necessary) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_n422 && !mode_lib->ip.dsc422_native_support)
- mode_lib->ms.support.DSC422NativeNotSupported = true;
-
- if (((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_hbr || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_hbr2 ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_hbr3) &&
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder != dml2_dp && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder != dml2_edp) ||
- ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_uhbr10 || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_uhbr13p5 ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_uhbr20) &&
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder != dml2_dp2p0))
- mode_lib->ms.support.LinkRateDoesNotMatchDPVersion = true;
-
- // FIXME_STAGE2
- //if (display_cfg->output.OutputMultistreamEn[k] == 1) {
- // if (display_cfg->output.OutputMultistreamId[k] == k && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_link_rate == dml2_dp_rate_na)
- // mode_lib->ms.support.LinkRateForMultistreamNotIndicated = true;
- // if (display_cfg->output.OutputMultistreamId[k] == k && s->OutputBpp[k] == 0)
- // mode_lib->ms.support.BPPForMultistreamNotIndicated = true;
- // for (n = 0; n < mode_lib->ms.num_active_planes; ++n) {
- // if (display_cfg->output.OutputMultistreamId[k] == n && s->OutputBpp[k] == 0)
- // mode_lib->ms.support.BPPForMultistreamNotIndicated = true;
- // }
- //}
-
- if ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_edp ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmi ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl)) {
- // FIXME_STAGE2
- //if (display_cfg->output.OutputMultistreamEn[k] == 1 && display_cfg->output.OutputMultistreamId[k] == k)
- // mode_lib->ms.support.MultistreamWithHDMIOreDP = true;
- //for (n = 0; n < mode_lib->ms.num_active_planes; ++n) {
- // if (display_cfg->output.OutputMultistreamEn[k] == 1 && display_cfg->output.OutputMultistreamId[k] == n)
- // mode_lib->ms.support.MultistreamWithHDMIOreDP = true;
- //}
- }
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder != dml2_dp && (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode == dml2_odm_mode_split_1to2 ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode == dml2_odm_mode_mso_1to2 || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode == dml2_odm_mode_mso_1to4))
- mode_lib->ms.support.MSOOrODMSplitWithNonDPLink = true;
-
- if ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode == dml2_odm_mode_mso_1to2 && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_lane_count < 2) ||
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].overrides.odm_mode == dml2_odm_mode_mso_1to4 && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_dp_lane_count < 4))
- mode_lib->ms.support.NotEnoughLanesForMSO = true;
- }
- }
-
- mode_lib->ms.support.DTBCLKRequiredMoreThanSupported = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].stream_index == k &&
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl &&
- RequiredDTBCLK(
- mode_lib->ms.RequiresDSC[k],
- s->PixelClockBackEnd[k],
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format,
- mode_lib->ms.OutputBpp[k],
- mode_lib->ms.support.NumberOfDSCSlices[k],
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_rate,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_layout) > ((double)mode_lib->soc.clk_table.dtbclk.clk_values_khz[0] / 1000)) {
- mode_lib->ms.support.DTBCLKRequiredMoreThanSupported = true;
- }
- }
-
- mode_lib->ms.support.DSCCLKRequiredMoreThanSupported = false;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].stream_index == k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_dp2p0 ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_edp ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_420) {
- s->DSCFormatFactor = 2;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_444) {
- s->DSCFormatFactor = 1;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_n422 || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl) {
- s->DSCFormatFactor = 2;
- } else {
- s->DSCFormatFactor = 1;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
-#endif
- if (mode_lib->ms.RequiresDSC[k] == true) {
- s->PixelClockBackEndFactor = 3.0;
-
- if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1)
- s->PixelClockBackEndFactor = 12.0;
- else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1)
- s->PixelClockBackEndFactor = 9.0;
- else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1)
- s->PixelClockBackEndFactor = 6.0;
-
- mode_lib->ms.required_dscclk_freq_mhz[k] = s->PixelClockBackEnd[k] / s->PixelClockBackEndFactor / (double)s->DSCFormatFactor;
- if (mode_lib->ms.required_dscclk_freq_mhz[k] > mode_lib->ms.max_dscclk_freq_mhz) {
- mode_lib->ms.support.DSCCLKRequiredMoreThanSupported = true;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
- dml2_printf("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]);
- dml2_printf("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
- dml2_printf("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported);
-#endif
- }
- }
- }
- }
-
- /* Check DSC Unit and Slices Support */
- mode_lib->ms.support.NotEnoughDSCSlices = false;
- s->TotalDSCUnitsRequired = 0;
- mode_lib->ms.support.PixelsPerLinePerDSCUnitSupport = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.RequiresDSC[k] == true) {
- s->NumDSCUnitRequired = 1;
-
- if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1)
- s->NumDSCUnitRequired = 4;
- else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1)
- s->NumDSCUnitRequired = 3;
- else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1)
- s->NumDSCUnitRequired = 2;
-
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active > s->NumDSCUnitRequired * (unsigned int)mode_lib->ip.maximum_pixels_per_line_per_dsc_unit)
- mode_lib->ms.support.PixelsPerLinePerDSCUnitSupport = false;
- s->TotalDSCUnitsRequired = s->TotalDSCUnitsRequired + s->NumDSCUnitRequired;
- if (mode_lib->ms.support.NumberOfDSCSlices[k] > 4 * s->NumDSCUnitRequired)
- mode_lib->ms.support.NotEnoughDSCSlices = true;
- }
- }
-
- mode_lib->ms.support.NotEnoughDSCUnits = false;
- if (s->TotalDSCUnitsRequired > (unsigned int)mode_lib->ip.num_dsc) {
- mode_lib->ms.support.NotEnoughDSCUnits = true;
- }
-
- /*DSC Delay per state*/
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.DSCDelay[k] = DSCDelayRequirement(mode_lib->ms.RequiresDSC[k],
- mode_lib->ms.ODMMode[k],
- mode_lib->ip.maximum_dsc_bits_per_component,
- mode_lib->ms.OutputBpp[k],
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
- mode_lib->ms.support.NumberOfDSCSlices[k],
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- s->PixelClockBackEnd[k]);
- }
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- for (m = 0; m < mode_lib->ms.num_active_planes; m++) {
- if (display_cfg->plane_descriptors[k].stream_index == m && mode_lib->ms.RequiresDSC[m] == true) {
- mode_lib->ms.DSCDelay[k] = mode_lib->ms.DSCDelay[m];
- }
- }
- }
-
- // Figure out the swath and DET configuration after the num dpp per plane is figured out
- CalculateSwathAndDETConfiguration_params->ForceSingleDPP = false;
- CalculateSwathAndDETConfiguration_params->ODMMode = mode_lib->ms.ODMMode;
- CalculateSwathAndDETConfiguration_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
-
- // output
- CalculateSwathAndDETConfiguration_params->req_per_swath_ub_l = s->dummy_integer_array[0];
- CalculateSwathAndDETConfiguration_params->req_per_swath_ub_c = s->dummy_integer_array[1];
- CalculateSwathAndDETConfiguration_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub;
- CalculateSwathAndDETConfiguration_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub;
- CalculateSwathAndDETConfiguration_params->SwathWidth = mode_lib->ms.SwathWidthY;
- CalculateSwathAndDETConfiguration_params->SwathWidthChroma = mode_lib->ms.SwathWidthC;
- CalculateSwathAndDETConfiguration_params->SwathHeightY = mode_lib->ms.SwathHeightY;
- CalculateSwathAndDETConfiguration_params->SwathHeightC = mode_lib->ms.SwathHeightC;
- CalculateSwathAndDETConfiguration_params->request_size_bytes_luma = mode_lib->ms.support.request_size_bytes_luma;
- CalculateSwathAndDETConfiguration_params->request_size_bytes_chroma = mode_lib->ms.support.request_size_bytes_chroma;
- CalculateSwathAndDETConfiguration_params->DETBufferSizeInKByte = mode_lib->ms.DETBufferSizeInKByte; // FIXME: This is per pipe but the pipes in plane will use that
- CalculateSwathAndDETConfiguration_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
- CalculateSwathAndDETConfiguration_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
- CalculateSwathAndDETConfiguration_params->UnboundedRequestEnabled = &mode_lib->ms.UnboundedRequestEnabled;
- CalculateSwathAndDETConfiguration_params->compbuf_reserved_space_64b = s->dummy_integer_array[3];
- CalculateSwathAndDETConfiguration_params->hw_debug5 = s->dummy_boolean_array[1];
- CalculateSwathAndDETConfiguration_params->CompressedBufferSizeInkByte = &mode_lib->ms.CompressedBufferSizeInkByte;
- CalculateSwathAndDETConfiguration_params->ViewportSizeSupportPerSurface = s->dummy_boolean_array[0];
- CalculateSwathAndDETConfiguration_params->ViewportSizeSupport = &mode_lib->ms.support.ViewportSizeSupport;
- CalculateSwathAndDETConfiguration_params->funcs = &mode_lib->funcs;
-
- CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params);
-
- if (mode_lib->soc.mall_allocated_for_dcn_mbytes == 0) {
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- mode_lib->ms.SurfaceSizeInMALL[k] = 0;
- mode_lib->ms.support.ExceededMALLSize = 0;
- } else {
- CalculateSurfaceSizeInMall(
- display_cfg,
- mode_lib->ms.num_active_planes,
- mode_lib->soc.mall_allocated_for_dcn_mbytes,
-
- mode_lib->ms.BytePerPixelY,
- mode_lib->ms.BytePerPixelC,
- mode_lib->ms.Read256BlockWidthY,
- mode_lib->ms.Read256BlockWidthC,
- mode_lib->ms.Read256BlockHeightY,
- mode_lib->ms.Read256BlockHeightC,
- mode_lib->ms.MacroTileWidthY,
- mode_lib->ms.MacroTileWidthC,
- mode_lib->ms.MacroTileHeightY,
- mode_lib->ms.MacroTileHeightC,
-
- /* Output */
- mode_lib->ms.SurfaceSizeInMALL,
- &mode_lib->ms.support.ExceededMALLSize);
- }
-
- mode_lib->ms.TotalNumberOfDCCActiveDPP = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].surface.dcc.enable == true) {
- mode_lib->ms.TotalNumberOfDCCActiveDPP = mode_lib->ms.TotalNumberOfDCCActiveDPP + mode_lib->ms.NoOfDPP[k];
- }
- }
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- s->SurfParameters[k].PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- s->SurfParameters[k].DPPPerSurface = mode_lib->ms.NoOfDPP[k];
- s->SurfParameters[k].RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
- s->SurfParameters[k].ViewportHeight = display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
- s->SurfParameters[k].ViewportHeightC = display_cfg->plane_descriptors[k].composition.viewport.plane1.height;
- s->SurfParameters[k].BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
- s->SurfParameters[k].BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
- s->SurfParameters[k].BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
- s->SurfParameters[k].BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
- s->SurfParameters[k].BlockWidthY = mode_lib->ms.MacroTileWidthY[k];
- s->SurfParameters[k].BlockHeightY = mode_lib->ms.MacroTileHeightY[k];
- s->SurfParameters[k].BlockWidthC = mode_lib->ms.MacroTileWidthC[k];
- s->SurfParameters[k].BlockHeightC = mode_lib->ms.MacroTileHeightC[k];
- s->SurfParameters[k].InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
- s->SurfParameters[k].HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- s->SurfParameters[k].DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- s->SurfParameters[k].SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
- s->SurfParameters[k].SurfaceTiling = display_cfg->plane_descriptors[k].surface.tiling;
- s->SurfParameters[k].BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
- s->SurfParameters[k].BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
- s->SurfParameters[k].ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
- s->SurfParameters[k].VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- s->SurfParameters[k].VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- s->SurfParameters[k].VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- s->SurfParameters[k].VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- s->SurfParameters[k].PitchY = display_cfg->plane_descriptors[k].surface.plane0.pitch;
- s->SurfParameters[k].PitchC = display_cfg->plane_descriptors[k].surface.plane1.pitch;
- s->SurfParameters[k].ViewportStationary = display_cfg->plane_descriptors[k].composition.viewport.stationary;
- s->SurfParameters[k].ViewportXStart = display_cfg->plane_descriptors[k].composition.viewport.plane0.x_start;
- s->SurfParameters[k].ViewportYStart = display_cfg->plane_descriptors[k].composition.viewport.plane0.y_start;
- s->SurfParameters[k].ViewportXStartC = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- s->SurfParameters[k].ViewportYStartC = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- s->SurfParameters[k].FORCE_ONE_ROW_FOR_FRAME = display_cfg->plane_descriptors[k].overrides.hw.force_one_row_for_frame;
- s->SurfParameters[k].SwathHeightY = mode_lib->ms.SwathHeightY[k];
- s->SurfParameters[k].SwathHeightC = mode_lib->ms.SwathHeightC[k];
-
- s->SurfParameters[k].DCCMetaPitchY = display_cfg->plane_descriptors[k].surface.dcc.plane0.pitch;
- s->SurfParameters[k].DCCMetaPitchC = display_cfg->plane_descriptors[k].surface.dcc.plane1.pitch;
- }
-
- CalculateVMRowAndSwath_params->display_cfg = display_cfg;
- CalculateVMRowAndSwath_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- CalculateVMRowAndSwath_params->myPipe = s->SurfParameters;
- CalculateVMRowAndSwath_params->SurfaceSizeInMALL = mode_lib->ms.SurfaceSizeInMALL;
- CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsLuma = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
- CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsChroma = mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma;
- CalculateVMRowAndSwath_params->MALLAllocatedForDCN = mode_lib->soc.mall_allocated_for_dcn_mbytes;
- CalculateVMRowAndSwath_params->SwathWidthY = mode_lib->ms.SwathWidthY;
- CalculateVMRowAndSwath_params->SwathWidthC = mode_lib->ms.SwathWidthC;
- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
- CalculateVMRowAndSwath_params->DCCMetaBufferSizeBytes = mode_lib->ip.dcc_meta_buffer_size_bytes;
- CalculateVMRowAndSwath_params->mrq_present = mode_lib->ip.dcn_mrq_present;
-
- // output
- CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceeded;
- CalculateVMRowAndSwath_params->dpte_row_width_luma_ub = s->dummy_integer_array[12];
- CalculateVMRowAndSwath_params->dpte_row_width_chroma_ub = s->dummy_integer_array[13];
- CalculateVMRowAndSwath_params->dpte_row_height_luma = mode_lib->ms.dpte_row_height;
- CalculateVMRowAndSwath_params->dpte_row_height_chroma = mode_lib->ms.dpte_row_height_chroma;
- CalculateVMRowAndSwath_params->dpte_row_height_linear_luma = s->dummy_integer_array[14]; // VBA_DELTA
- CalculateVMRowAndSwath_params->dpte_row_height_linear_chroma = s->dummy_integer_array[15]; // VBA_DELTA
- CalculateVMRowAndSwath_params->vm_group_bytes = s->dummy_integer_array[16];
- CalculateVMRowAndSwath_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
- CalculateVMRowAndSwath_params->PixelPTEReqWidthY = s->dummy_integer_array[17];
- CalculateVMRowAndSwath_params->PixelPTEReqHeightY = s->dummy_integer_array[18];
- CalculateVMRowAndSwath_params->PTERequestSizeY = s->dummy_integer_array[19];
- CalculateVMRowAndSwath_params->PixelPTEReqWidthC = s->dummy_integer_array[20];
- CalculateVMRowAndSwath_params->PixelPTEReqHeightC = s->dummy_integer_array[21];
- CalculateVMRowAndSwath_params->PTERequestSizeC = s->dummy_integer_array[22];
- CalculateVMRowAndSwath_params->vmpg_width_y = s->vmpg_width_y;
- CalculateVMRowAndSwath_params->vmpg_height_y = s->vmpg_height_y;
- CalculateVMRowAndSwath_params->vmpg_width_c = s->vmpg_width_c;
- CalculateVMRowAndSwath_params->vmpg_height_c = s->vmpg_height_c;
- CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_l = s->dummy_integer_array[23];
- CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_c = s->dummy_integer_array[24];
- CalculateVMRowAndSwath_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY;
- CalculateVMRowAndSwath_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC;
- CalculateVMRowAndSwath_params->VInitPreFillY = mode_lib->ms.PrefillY;
- CalculateVMRowAndSwath_params->VInitPreFillC = mode_lib->ms.PrefillC;
- CalculateVMRowAndSwath_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY;
- CalculateVMRowAndSwath_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC;
- CalculateVMRowAndSwath_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- CalculateVMRowAndSwath_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow;
- CalculateVMRowAndSwath_params->vm_bytes = mode_lib->ms.vm_bytes;
- CalculateVMRowAndSwath_params->use_one_row_for_frame = mode_lib->ms.use_one_row_for_frame;
- CalculateVMRowAndSwath_params->use_one_row_for_frame_flip = mode_lib->ms.use_one_row_for_frame_flip;
- CalculateVMRowAndSwath_params->is_using_mall_for_ss = s->dummy_boolean_array[0];
- CalculateVMRowAndSwath_params->PTE_BUFFER_MODE = s->dummy_boolean_array[1];
- CalculateVMRowAndSwath_params->BIGK_FRAGMENT_SIZE = s->dummy_integer_array[25];
- CalculateVMRowAndSwath_params->DCCMetaBufferSizeNotExceeded = mode_lib->ms.DCCMetaBufferSizeNotExceeded;
- CalculateVMRowAndSwath_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- CalculateVMRowAndSwath_params->meta_row_bytes = mode_lib->ms.meta_row_bytes;
- CalculateVMRowAndSwath_params->meta_req_width_luma = s->dummy_integer_array[26];
- CalculateVMRowAndSwath_params->meta_req_height_luma = s->dummy_integer_array[27];
- CalculateVMRowAndSwath_params->meta_row_width_luma = s->dummy_integer_array[28];
- CalculateVMRowAndSwath_params->meta_row_height_luma = s->meta_row_height_luma;
- CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_l = s->dummy_integer_array[29];
- CalculateVMRowAndSwath_params->meta_req_width_chroma = s->dummy_integer_array[30];
- CalculateVMRowAndSwath_params->meta_req_height_chroma = s->dummy_integer_array[31];
- CalculateVMRowAndSwath_params->meta_row_width_chroma = s->dummy_integer_array[32];
- CalculateVMRowAndSwath_params->meta_row_height_chroma = s->meta_row_height_chroma;
- CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_c = s->dummy_integer_array[33];
-
- CalculateVMRowAndSwath(&mode_lib->scratch, CalculateVMRowAndSwath_params);
-
- mode_lib->ms.support.PTEBufferSizeNotExceeded = true;
- mode_lib->ms.support.DCCMetaBufferSizeNotExceeded = true;
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.PTEBufferSizeNotExceeded[k] == false)
- mode_lib->ms.support.PTEBufferSizeNotExceeded = false;
-
- if (mode_lib->ms.DCCMetaBufferSizeNotExceeded[k] == false)
- mode_lib->ms.support.DCCMetaBufferSizeNotExceeded = false;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]);
-#endif
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded);
- dml2_printf("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded);
-#endif
-
- mode_lib->ms.UrgLatency = CalculateUrgentLatency(
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us,
- mode_lib->soc.do_urgent_latency_adjustment,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz,
- mode_lib->ms.FabricClock,
- mode_lib->ms.uclk_freq_mhz,
- mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
-
- mode_lib->ms.TripToMemory = CalculateTripToMemory(
- mode_lib->ms.UrgLatency,
- mode_lib->ms.FabricClock,
- mode_lib->ms.uclk_freq_mhz,
- mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
-
- mode_lib->ms.TripToMemory = math_max2(mode_lib->ms.UrgLatency, mode_lib->ms.TripToMemory);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- double line_time_us = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- calculate_cursor_req_attributes(
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- display_cfg->plane_descriptors[k].cursor.cursor_bpp,
-
- // output
- &s->cursor_lines_per_chunk[k],
- &s->cursor_bytes_per_line[k],
- &s->cursor_bytes_per_chunk[k],
- &s->cursor_bytes[k]);
-
- bool cursor_not_enough_urgent_latency_hiding = 0;
- calculate_cursor_urgent_burst_factor(
- mode_lib->ip.cursor_buffer_size,
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- s->cursor_bytes_per_chunk[k],
- s->cursor_lines_per_chunk[k],
- line_time_us,
- mode_lib->ms.UrgLatency,
-
- // output
- &mode_lib->ms.UrgentBurstFactorCursor[k],
- &cursor_not_enough_urgent_latency_hiding);
- mode_lib->ms.UrgentBurstFactorCursorPre[k] = mode_lib->ms.UrgentBurstFactorCursor[k];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
-#endif
-
- CalculateUrgentBurstFactor(
- &display_cfg->plane_descriptors[k],
- mode_lib->ms.swath_width_luma_ub[k],
- mode_lib->ms.swath_width_chroma_ub[k],
- mode_lib->ms.SwathHeightY[k],
- mode_lib->ms.SwathHeightC[k],
- line_time_us,
- mode_lib->ms.UrgLatency,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ms.BytePerPixelInDETY[k],
- mode_lib->ms.BytePerPixelInDETC[k],
- mode_lib->ms.DETBufferSizeY[k],
- mode_lib->ms.DETBufferSizeC[k],
-
- // Output
- &mode_lib->ms.UrgentBurstFactorLuma[k],
- &mode_lib->ms.UrgentBurstFactorChroma[k],
- &mode_lib->ms.NotEnoughUrgentLatencyHiding[k]);
-
- mode_lib->ms.NotEnoughUrgentLatencyHiding[k] = mode_lib->ms.NotEnoughUrgentLatencyHiding[k] || cursor_not_enough_urgent_latency_hiding;
- }
-
- CalculateDCFCLKDeepSleep(
- display_cfg,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.BytePerPixelY,
- mode_lib->ms.BytePerPixelC,
- mode_lib->ms.SwathWidthY,
- mode_lib->ms.SwathWidthC,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.PSCL_FACTOR,
- mode_lib->ms.PSCL_FACTOR_CHROMA,
- mode_lib->ms.RequiredDPPCLK,
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
- mode_lib->soc.return_bus_width_bytes,
-
- /* Output */
- &mode_lib->ms.dcfclk_deepsleep);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].stream_index == k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- mode_lib->ms.WritebackDelayTime[k] = mode_lib->soc.qos_parameters.writeback.base_latency_us + CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) / mode_lib->ms.RequiredDISPCLK;
- } else {
- mode_lib->ms.WritebackDelayTime[k] = 0.0;
- }
- for (m = 0; m <= mode_lib->ms.num_active_planes - 1; m++) {
- if (display_cfg->plane_descriptors[m].stream_index == k && display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.enable == true) {
- mode_lib->ms.WritebackDelayTime[k] = math_max2(mode_lib->ms.WritebackDelayTime[k],
- mode_lib->soc.qos_parameters.writeback.base_latency_us + CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].writeback.scaling_info.input_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[m].stream_index].timing.h_total) / mode_lib->ms.RequiredDISPCLK);
- }
- }
- }
- }
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- for (m = 0; m <= mode_lib->ms.num_active_planes - 1; m++) {
- if (display_cfg->plane_descriptors[k].stream_index == m) {
- mode_lib->ms.WritebackDelayTime[k] = mode_lib->ms.WritebackDelayTime[m];
- }
- }
- }
-
- // MaximumVStartup is actually Tvstartup_min in DCN4 programming guide
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- bool isInterlaceTiming = (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced && !mode_lib->ip.ptoi_supported);
- s->MaximumVStartup[k] = CalculateMaxVStartup(
- mode_lib->ip.ptoi_supported,
- mode_lib->ip.vblank_nom_default_us,
- &display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing,
- mode_lib->ms.WritebackDelayTime[k]);
- mode_lib->ms.MaxVStartupLines[k] = (isInterlaceTiming ? (2 * s->MaximumVStartup[k]) : s->MaximumVStartup[k]);
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]);
-#endif
-
- /* Immediate Flip and MALL parameters */
- s->ImmediateFlipRequired = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- s->ImmediateFlipRequired = s->ImmediateFlipRequired || display_cfg->plane_descriptors[k].immediate_flip;
- }
-
- mode_lib->ms.support.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.support.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe =
- mode_lib->ms.support.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe ||
- ((display_cfg->hostvm_enable == true || display_cfg->plane_descriptors[k].immediate_flip == true) &&
- (display_cfg->plane_descriptors[k].overrides.uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_full_frame || dml_is_phantom_pipe(&display_cfg->plane_descriptors[k])));
- }
-
- mode_lib->ms.support.InvalidCombinationOfMALLUseForPStateAndStaticScreen = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- mode_lib->ms.support.InvalidCombinationOfMALLUseForPStateAndStaticScreen = mode_lib->ms.support.InvalidCombinationOfMALLUseForPStateAndStaticScreen ||
- ((display_cfg->plane_descriptors[k].overrides.refresh_from_mall == dml2_refresh_from_mall_mode_override_force_enable || display_cfg->plane_descriptors[k].overrides.refresh_from_mall == dml2_refresh_from_mall_mode_override_auto) && (dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]))) ||
- ((display_cfg->plane_descriptors[k].overrides.refresh_from_mall == dml2_refresh_from_mall_mode_override_force_disable || display_cfg->plane_descriptors[k].overrides.refresh_from_mall == dml2_refresh_from_mall_mode_override_auto) && (display_cfg->plane_descriptors[k].overrides.uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_full_frame));
- }
-
- s->FullFrameMALLPStateMethod = false;
- s->SubViewportMALLPStateMethod = false;
- s->PhantomPipeMALLPStateMethod = false;
- s->SubViewportMALLRefreshGreaterThan120Hz = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].overrides.uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_full_frame)
- s->FullFrameMALLPStateMethod = true;
- if (display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) {
- s->SubViewportMALLPStateMethod = true;
- if (!display_cfg->overrides.enable_subvp_implicit_pmo) {
- // For dv, small frame tests will have very high refresh rate
- unsigned long long refresh_rate = (unsigned long long) ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz * 1000 /
- (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
- if (refresh_rate > 120)
- s->SubViewportMALLRefreshGreaterThan120Hz = true;
- }
- }
- if (dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]))
- s->PhantomPipeMALLPStateMethod = true;
- }
- mode_lib->ms.support.InvalidCombinationOfMALLUseForPState = (s->SubViewportMALLPStateMethod != s->PhantomPipeMALLPStateMethod) ||
- (s->SubViewportMALLPStateMethod && s->FullFrameMALLPStateMethod) || s->SubViewportMALLRefreshGreaterThan120Hz;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod);
- dml2_printf("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod);
- dml2_printf("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod);
- dml2_printf("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz);
- dml2_printf("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState);
- dml2_printf("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.max_urgent_latency_us);
- dml2_printf("DML::%s: urgent latency tolerance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)));
-#endif
-
- mode_lib->ms.support.OutstandingRequestsSupport = true;
- mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = true;
-
- mode_lib->ms.support.avg_urgent_latency_us
- = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
-
- mode_lib->ms.support.avg_non_urgent_latency_us
- = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
-
- double outstanding_latency_us = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
- outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_luma[k]
- / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes));
-
- if (outstanding_latency_us < mode_lib->ms.support.avg_urgent_latency_us) {
- mode_lib->ms.support.OutstandingRequestsSupport = false;
- }
-
- if (outstanding_latency_us < mode_lib->ms.support.avg_non_urgent_latency_us) {
- mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = false;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us);
- dml2_printf("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us);
- dml2_printf("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us);
-#endif
- }
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x && mode_lib->ms.BytePerPixelC[k] > 0) {
- outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_chroma[k]
- / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes));
-
- if (outstanding_latency_us < mode_lib->ms.support.avg_urgent_latency_us) {
- mode_lib->ms.support.OutstandingRequestsSupport = false;
- }
-
- if (outstanding_latency_us < mode_lib->ms.support.avg_non_urgent_latency_us) {
- mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = false;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us);
-#endif
- }
- }
-
- memset(calculate_mcache_setting_params, 0, sizeof(struct dml2_core_calcs_calculate_mcache_setting_params));
- if (mode_lib->soc.mall_allocated_for_dcn_mbytes == 0 || mode_lib->ip.dcn_mrq_present) {
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- mode_lib->ms.mall_prefetch_sdp_overhead_factor[k] = 1.0;
- mode_lib->ms.mall_prefetch_dram_overhead_factor[k] = 1.0;
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0[k] = 1.0;
- mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0[k] = 1.0;
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1[k] = 1.0;
- mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1[k] = 1.0;
- }
- } else {
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- calculate_mcache_setting_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- calculate_mcache_setting_params->num_chans = mode_lib->soc.clk_table.dram_config.channel_count;
- calculate_mcache_setting_params->mem_word_bytes = mode_lib->soc.mem_word_bytes;
- calculate_mcache_setting_params->mcache_size_bytes = mode_lib->soc.mcache_size_bytes;
- calculate_mcache_setting_params->mcache_line_size_bytes = mode_lib->soc.mcache_line_size_bytes;
- calculate_mcache_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
- calculate_mcache_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
-
- calculate_mcache_setting_params->source_format = display_cfg->plane_descriptors[k].pixel_format;
- calculate_mcache_setting_params->surf_vert = dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle);
- calculate_mcache_setting_params->vp_stationary = display_cfg->plane_descriptors[k].composition.viewport.stationary;
- calculate_mcache_setting_params->tiling_mode = display_cfg->plane_descriptors[k].surface.tiling;
- calculate_mcache_setting_params->imall_enable = mode_lib->ip.imall_supported && display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_imall;
-
- calculate_mcache_setting_params->vp_start_x_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.x_start;
- calculate_mcache_setting_params->vp_start_y_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.y_start;
- calculate_mcache_setting_params->full_vp_width_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.width;
- calculate_mcache_setting_params->full_vp_height_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
- calculate_mcache_setting_params->blk_width_l = mode_lib->ms.MacroTileWidthY[k];
- calculate_mcache_setting_params->blk_height_l = mode_lib->ms.MacroTileHeightY[k];
- calculate_mcache_setting_params->vmpg_width_l = s->vmpg_width_y[k];
- calculate_mcache_setting_params->vmpg_height_l = s->vmpg_height_y[k];
- calculate_mcache_setting_params->full_swath_bytes_l = s->full_swath_bytes_l[k];
- calculate_mcache_setting_params->bytes_per_pixel_l = mode_lib->ms.BytePerPixelY[k];
-
- calculate_mcache_setting_params->vp_start_x_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.x_start;
- calculate_mcache_setting_params->vp_start_y_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- calculate_mcache_setting_params->full_vp_width_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.width;
- calculate_mcache_setting_params->full_vp_height_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.height;
- calculate_mcache_setting_params->blk_width_c = mode_lib->ms.MacroTileWidthC[k];
- calculate_mcache_setting_params->blk_height_c = mode_lib->ms.MacroTileHeightC[k];
- calculate_mcache_setting_params->vmpg_width_c = s->vmpg_width_c[k];
- calculate_mcache_setting_params->vmpg_height_c = s->vmpg_height_c[k];
- calculate_mcache_setting_params->full_swath_bytes_c = s->full_swath_bytes_c[k];
- calculate_mcache_setting_params->bytes_per_pixel_c = mode_lib->ms.BytePerPixelC[k];
-
- // output
- calculate_mcache_setting_params->dcc_dram_bw_nom_overhead_factor_l = &mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0[k];
- calculate_mcache_setting_params->dcc_dram_bw_pref_overhead_factor_l = &mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0[k];
- calculate_mcache_setting_params->dcc_dram_bw_nom_overhead_factor_c = &mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1[k];
- calculate_mcache_setting_params->dcc_dram_bw_pref_overhead_factor_c = &mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1[k];
-
- calculate_mcache_setting_params->num_mcaches_l = &mode_lib->ms.num_mcaches_l[k];
- calculate_mcache_setting_params->mcache_row_bytes_l = &mode_lib->ms.mcache_row_bytes_l[k];
- calculate_mcache_setting_params->mcache_offsets_l = mode_lib->ms.mcache_offsets_l[k];
- calculate_mcache_setting_params->mcache_shift_granularity_l = &mode_lib->ms.mcache_shift_granularity_l[k];
-
- calculate_mcache_setting_params->num_mcaches_c = &mode_lib->ms.num_mcaches_c[k];
- calculate_mcache_setting_params->mcache_row_bytes_c = &mode_lib->ms.mcache_row_bytes_c[k];
- calculate_mcache_setting_params->mcache_offsets_c = mode_lib->ms.mcache_offsets_c[k];
- calculate_mcache_setting_params->mcache_shift_granularity_c = &mode_lib->ms.mcache_shift_granularity_c[k];
-
- calculate_mcache_setting_params->mall_comb_mcache_l = &mode_lib->ms.mall_comb_mcache_l[k];
- calculate_mcache_setting_params->mall_comb_mcache_c = &mode_lib->ms.mall_comb_mcache_c[k];
- calculate_mcache_setting_params->lc_comb_mcache = &mode_lib->ms.lc_comb_mcache[k];
-
- calculate_mcache_setting(&mode_lib->scratch, calculate_mcache_setting_params);
- }
-
- calculate_mall_bw_overhead_factor(
- mode_lib->ms.mall_prefetch_sdp_overhead_factor,
- mode_lib->ms.mall_prefetch_dram_overhead_factor,
-
- // input
- display_cfg,
- mode_lib->ms.num_active_planes);
- }
-
- // Calculate all the bandwidth available
- // Need anothe bw for latency evaluation
- calculate_bandwidth_available(
- mode_lib->ms.support.avg_bandwidth_available_min, // not used
- mode_lib->ms.support.avg_bandwidth_available, // not used
- mode_lib->ms.support.urg_bandwidth_available_min_latency,
- mode_lib->ms.support.urg_bandwidth_available, // not used
- mode_lib->ms.support.urg_bandwidth_available_vm_only, // not used
- mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm, // not used
-
- &mode_lib->soc,
- display_cfg->hostvm_enable,
- mode_lib->ms.DCFCLK,
- mode_lib->ms.FabricClock,
- mode_lib->ms.dram_bw_mbps);
-
- calculate_bandwidth_available(
- mode_lib->ms.support.avg_bandwidth_available_min,
- mode_lib->ms.support.avg_bandwidth_available,
- mode_lib->ms.support.urg_bandwidth_available_min,
- mode_lib->ms.support.urg_bandwidth_available,
- mode_lib->ms.support.urg_bandwidth_available_vm_only,
- mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm,
-
- &mode_lib->soc,
- display_cfg->hostvm_enable,
- mode_lib->ms.MaxDCFCLK,
- mode_lib->ms.MaxFabricClock,
- mode_lib->ms.dram_bw_mbps);
-
-
- // Average BW support check
- calculate_avg_bandwidth_required(
- mode_lib->ms.support.avg_bandwidth_required,
- // input
- display_cfg,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
- mode_lib->ms.cursor_bw,
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
- mode_lib->ms.mall_prefetch_dram_overhead_factor,
- mode_lib->ms.mall_prefetch_sdp_overhead_factor);
-
- for (m = 0; m < dml2_core_internal_bw_max; m++) { // check sdp and dram
- mode_lib->ms.support.avg_bandwidth_support_ok[dml2_core_internal_soc_state_sys_idle][m] = 1;
- mode_lib->ms.support.avg_bandwidth_support_ok[dml2_core_internal_soc_state_sys_active][m] = (mode_lib->ms.support.avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][m] <= mode_lib->ms.support.avg_bandwidth_available[dml2_core_internal_soc_state_sys_active][m]);
- mode_lib->ms.support.avg_bandwidth_support_ok[dml2_core_internal_soc_state_svp_prefetch][m] = (mode_lib->ms.support.avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][m] <= mode_lib->ms.support.avg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][m]);
- }
-
- mode_lib->ms.support.AvgBandwidthSupport = true;
- mode_lib->ms.support.EnoughUrgentLatencyHidingSupport = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.NotEnoughUrgentLatencyHiding[k]) {
- mode_lib->ms.support.EnoughUrgentLatencyHidingSupport = false;
- dml2_printf("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k);
-
- }
- }
- for (m = 0; m < dml2_core_internal_soc_state_max; m++) {
- for (n = 0; n < dml2_core_internal_bw_max; n++) { // check sdp and dram
- if (!mode_lib->ms.support.avg_bandwidth_support_ok[m][n] && (m == dml2_core_internal_soc_state_sys_active || mode_lib->soc.mall_allocated_for_dcn_mbytes > 0)) {
- mode_lib->ms.support.AvgBandwidthSupport = false;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n));
-#endif
- }
- }
- }
-
- /* Prefetch Check */
- {
- mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
-
-
- calculate_hostvm_inefficiency_factor(
- &s->HostVMInefficiencyFactor,
- &s->HostVMInefficiencyFactorPrefetch,
-
- display_cfg->gpuvm_enable,
- display_cfg->hostvm_enable,
- mode_lib->ip.remote_iommu_outstanding_translations,
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
- mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
-
- mode_lib->ms.Total3dlutActive = 0;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
- mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1;
-
- // Calculate tdlut schedule related terms
- calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK;
- calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
- calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
- calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
- calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
- calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
- calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag;
- calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling);
-
- // output
- calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
- calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
- calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
-
- calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
- }
-
- double min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
- s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
-
- CalculateExtraLatency(
- display_cfg,
- mode_lib->ip.rob_buffer_size_kbytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
- s->ReorderingBytes,
- mode_lib->ms.DCFCLK,
- mode_lib->ms.FabricClock,
- mode_lib->ip.pixel_chunk_size_kbytes,
- min_return_bw_for_latency,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.dpte_group_bytes,
- s->tdlut_bytes_per_group,
- s->HostVMInefficiencyFactor,
- s->HostVMInefficiencyFactorPrefetch,
- mode_lib->soc.hostvm_min_page_size_kbytes,
- mode_lib->soc.qos_parameters.qos_type,
- !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.request_size_bytes_luma,
- mode_lib->ms.support.request_size_bytes_chroma,
- mode_lib->ip.meta_chunk_size_kbytes,
- mode_lib->ip.dchub_arb_to_ret_delay,
- mode_lib->ms.TripToMemory,
- mode_lib->ip.hostvm_mode,
-
- // output
- &mode_lib->ms.ExtraLatency,
- &mode_lib->ms.ExtraLatency_sr,
- &mode_lib->ms.ExtraLatencyPrefetch);
-
- {
- mode_lib->ms.support.PrefetchSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
-
- mode_lib->ms.TWait[k] = CalculateTWait(
- display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
- mode_lib->ms.UrgLatency,
- mode_lib->ms.TripToMemory);
-
- struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
- myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
- myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
- myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k];
- myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
- myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
- myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
- myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
- myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
- myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
- myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
- myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
- myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
- myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
- myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
- myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- myPipe->ODMMode = mode_lib->ms.ODMMode[k];
- myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
- myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
- myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
- myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
- dml2_printf("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]);
-#endif
- CalculatePrefetchSchedule_params->display_cfg = display_cfg;
- CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
- CalculatePrefetchSchedule_params->myPipe = myPipe;
- CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k];
- CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
- CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
- CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
- CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
- CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
- CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
- CalculatePrefetchSchedule_params->MaxVStartup = s->MaximumVStartup[k];
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
- CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
- CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
- CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
- CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
- CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency;
- CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch;
- CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc;
- CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k];
- CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k];
- CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k];
- CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k];
- CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k];
- CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k];
- CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k];
- CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k];
- CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k];
- CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k];
- CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k];
- CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory;
- CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
- CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
- CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
- CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
- CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
- CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
- CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
- CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
-
- // output
- CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
- CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k];
- CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k];
- CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k];
- CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
- CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
- CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
- CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
- CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k];
- CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0];
- CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1];
- CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2];
- CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
- CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0];
- CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
- CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
- CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
-
- mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
-
- mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k];
- dml2_printf("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
- } // for k num_planes
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.dst_y_prefetch[k] < 2.0
- || mode_lib->ms.LinesForVM[k] >= 32.0
- || mode_lib->ms.LinesForDPTERow[k] >= 16.0
- || mode_lib->ms.NoTimeForPrefetch[k] == true
- || s->DSTYAfterScaler[k] > 8) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
- dml2_printf("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
- dml2_printf("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
- dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
- dml2_printf("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
- }
- }
-
- mode_lib->ms.support.DynamicMetadataSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) {
- mode_lib->ms.support.DynamicMetadataSupported = false;
- }
- }
-
- mode_lib->ms.support.VRatioInPrefetchSupported = true;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ ||
- mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__) {
- mode_lib->ms.support.VRatioInPrefetchSupported = false;
- dml2_printf("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
- }
- }
-
- s->AnyLinesForVMOrRowTooLarge = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.LinesForDPTERow[k] >= 16 || mode_lib->ms.LinesForVM[k] >= 32) {
- s->AnyLinesForVMOrRowTooLarge = true;
- }
- }
-
- // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
- if (mode_lib->ms.support.PrefetchSupported) {
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- // Calculate Urgent burst factor for prefetch
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]);
- dml2_printf("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]);
-#endif
- double line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- CalculateUrgentBurstFactor(
- &display_cfg->plane_descriptors[k],
- mode_lib->ms.swath_width_luma_ub[k],
- mode_lib->ms.swath_width_chroma_ub[k],
- mode_lib->ms.SwathHeightY[k],
- mode_lib->ms.SwathHeightC[k],
- line_time_us,
- mode_lib->ms.UrgLatency,
- mode_lib->ms.VRatioPreY[k],
- mode_lib->ms.VRatioPreC[k],
- mode_lib->ms.BytePerPixelInDETY[k],
- mode_lib->ms.BytePerPixelInDETC[k],
- mode_lib->ms.DETBufferSizeY[k],
- mode_lib->ms.DETBufferSizeC[k],
- /* Output */
- &mode_lib->ms.UrgentBurstFactorLumaPre[k],
- &mode_lib->ms.UrgentBurstFactorChromaPre[k],
- &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
-
- // Calculate urgent bandwidth required, both urg and non urg peak bandwidth
- // assume flip bw is 0 at this point
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- mode_lib->ms.final_flip_bw[k] = 0;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- mode_lib->ms.support.urg_vactive_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_required,
- mode_lib->ms.support.non_urg_bandwidth_required,
-
- display_cfg,
- 0, // inc_flip_bw
- mode_lib->ms.num_active_planes,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
- mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0,
- mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1,
- mode_lib->ms.mall_prefetch_sdp_overhead_factor,
- mode_lib->ms.mall_prefetch_dram_overhead_factor,
-
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
- mode_lib->ms.RequiredPrefetchPixelDataBWLuma,
- mode_lib->ms.RequiredPrefetchPixelDataBWChroma,
- mode_lib->ms.cursor_bw,
- mode_lib->ms.dpte_row_bw,
- mode_lib->ms.meta_row_bw,
- mode_lib->ms.prefetch_cursor_bw,
- mode_lib->ms.prefetch_vmrow_bw,
- mode_lib->ms.final_flip_bw,
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor,
- mode_lib->ms.UrgentBurstFactorLumaPre,
- mode_lib->ms.UrgentBurstFactorChromaPre,
- mode_lib->ms.UrgentBurstFactorCursorPre);
-
- // Check urg peak bandwidth against available urg bw
- // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
- check_urgent_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth
- &s->dummy_single[1], // double* frac_urg_bandwidth_mall
- &mode_lib->ms.support.UrgVactiveBandwidthSupport,
- &mode_lib->ms.support.PrefetchBandwidthSupported,
-
- mode_lib->soc.mall_allocated_for_dcn_mbytes,
- mode_lib->ms.support.non_urg_bandwidth_required,
- mode_lib->ms.support.urg_vactive_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported;
- dml2_printf("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
- }
-
-
- // Both prefetch schedule and BW okay
- if (mode_lib->ms.support.PrefetchSupported == true && mode_lib->ms.support.VRatioInPrefetchSupported == true) {
- mode_lib->ms.BandwidthAvailableForImmediateFlip =
- get_bandwidth_available_for_immediate_flip(dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required, // no flip
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.TotImmediateFlipBytes = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip) {
- s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
- s->HostVMInefficiencyFactor,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.meta_row_bytes[k]);
- } else {
- s->per_pipe_flip_bytes[k] = 0;
- }
- mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
-
- }
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- CalculateFlipSchedule(
- &mode_lib->scratch,
- display_cfg->plane_descriptors[k].immediate_flip,
- 1, // use_lb_flip_bw
- s->HostVMInefficiencyFactor,
- s->Tvm_trips_flip[k],
- s->Tr0_trips_flip[k],
- s->Tvm_trips_flip_rounded[k],
- s->Tr0_trips_flip_rounded[k],
- display_cfg->gpuvm_enable,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.BandwidthAvailableForImmediateFlip,
- mode_lib->ms.TotImmediateFlipBytes,
- display_cfg->plane_descriptors[k].pixel_format,
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ms.Tno_bw_flip[k],
- mode_lib->ms.dpte_row_height[k],
- mode_lib->ms.dpte_row_height_chroma[k],
- mode_lib->ms.use_one_row_for_frame_flip[k],
- mode_lib->ip.max_flip_time_us,
- s->per_pipe_flip_bytes[k],
- mode_lib->ms.meta_row_bytes[k],
- s->meta_row_height_luma[k],
- s->meta_row_height_chroma[k],
- mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
-
- /* Output */
- &mode_lib->ms.dst_y_per_vm_flip[k],
- &mode_lib->ms.dst_y_per_row_flip[k],
- &mode_lib->ms.final_flip_bw[k],
- &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
- }
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- s->dummy_bw,
- mode_lib->ms.support.urg_bandwidth_required_flip,
- mode_lib->ms.support.non_urg_bandwidth_required_flip,
-
- // Input
- display_cfg,
- 1, // inc_flip_bw
- mode_lib->ms.num_active_planes,
- mode_lib->ms.NoOfDPP,
-
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
- mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
- mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0,
- mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1,
- mode_lib->ms.mall_prefetch_sdp_overhead_factor,
- mode_lib->ms.mall_prefetch_dram_overhead_factor,
-
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
- mode_lib->ms.RequiredPrefetchPixelDataBWLuma,
- mode_lib->ms.RequiredPrefetchPixelDataBWChroma,
- mode_lib->ms.cursor_bw,
- mode_lib->ms.dpte_row_bw,
- mode_lib->ms.meta_row_bw,
- mode_lib->ms.prefetch_cursor_bw,
- mode_lib->ms.prefetch_vmrow_bw,
- mode_lib->ms.final_flip_bw,
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor,
- mode_lib->ms.UrgentBurstFactorLumaPre,
- mode_lib->ms.UrgentBurstFactorChromaPre,
- mode_lib->ms.UrgentBurstFactorCursorPre);
-
- calculate_immediate_flip_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth_flip
- &mode_lib->ms.support.ImmediateFlipSupport,
-
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_flip,
- mode_lib->ms.support.non_urg_bandwidth_required_flip,
- mode_lib->ms.support.urg_bandwidth_available);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
-
- } else { // if prefetch not support, assume iflip is not supported too
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
- } // prefetch schedule
- }
-
- s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
- s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
- s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
- s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
- s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
- s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
- s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.USRRetrainingLatency = 0; // FIXME_STAGE2: no USR related bbox value
- s->mSOCParameters.SMNLatency = 0; // FIXME_STAGE2
-
- CalculateWatermarks_params->display_cfg = display_cfg;
- CalculateWatermarks_params->USRRetrainingRequired = false /*FIXME_STAGE2 was: mode_lib->ms.policy.USRRetrainingRequired, no new dml2 replacement*/;
- CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
- CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
- CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
- CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK;
- CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
- CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
- CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
- CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters;
- CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
- CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK;
- CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
- CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
- CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
- CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
- //CalculateWatermarks_params->LBBitPerPixel = 57; // FIXME_STAGE2, need a new ip param?
- CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
- CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
- CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
- CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY;
- CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC;
- CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler;
- CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler;
- CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled;
- CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte;
- CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma;
- CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma;
-
- // Output
- CalculateWatermarks_params->Watermark = &s->dummy_watermark; // Watermarks *Watermark
- CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport;
- CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported;
- CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[]
- CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[]
- CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport;
- CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported;
- CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported
- CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport;
- CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin;
- CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
-
- CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
- }
-
- // End of Prefetch Check
-
- dml2_printf("DML::%s: Done prefetch calculation\n", __func__);
-
- //Re-ordering Buffer Support Check
- mode_lib->ms.support.max_urgent_latency_us
- = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
- if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
- / mode_lib->ms.support.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= mode_lib->ms.support.max_urgent_latency_us) {
- mode_lib->ms.support.ROBSupport = true;
- } else {
- mode_lib->ms.support.ROBSupport = false;
- }
- } else {
- if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) {
- mode_lib->ms.support.ROBSupport = true;
- } else {
- mode_lib->ms.support.ROBSupport = false;
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.max_urgent_latency_us);
- dml2_printf("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
-#endif
-
- /*Mode Support, Voltage State and SOC Configuration*/
- {
- // s->dram_clock_change_support = 1;
- // s->f_clock_change_support = 1;
-
- if (mode_lib->ms.support.ScaleRatioAndTapsSupport
- && mode_lib->ms.support.SourceFormatPixelAndScanSupport
- && mode_lib->ms.support.ViewportSizeSupport
- && !mode_lib->ms.support.LinkRateDoesNotMatchDPVersion
- && !mode_lib->ms.support.LinkRateForMultistreamNotIndicated
- && !mode_lib->ms.support.BPPForMultistreamNotIndicated
- && !mode_lib->ms.support.MultistreamWithHDMIOreDP
- && !mode_lib->ms.support.ExceededMultistreamSlots
- && !mode_lib->ms.support.MSOOrODMSplitWithNonDPLink
- && !mode_lib->ms.support.NotEnoughLanesForMSO
- //&& mode_lib->ms.support.LinkCapacitySupport == true // FIXME_STAGE2
- && !mode_lib->ms.support.P2IWith420
- && !mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP
- && !mode_lib->ms.support.DSC422NativeNotSupported
- && !mode_lib->ms.support.NotEnoughDSCUnits
- && !mode_lib->ms.support.NotEnoughDSCSlices
- && !mode_lib->ms.support.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe
- && !mode_lib->ms.support.InvalidCombinationOfMALLUseForPStateAndStaticScreen
- && !mode_lib->ms.support.DSCCLKRequiredMoreThanSupported
- && mode_lib->ms.support.PixelsPerLinePerDSCUnitSupport
- && !mode_lib->ms.support.DTBCLKRequiredMoreThanSupported
- && !mode_lib->ms.support.InvalidCombinationOfMALLUseForPState
- && mode_lib->ms.support.ROBSupport
- && mode_lib->ms.support.OutstandingRequestsSupport
- && mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance
- && mode_lib->ms.support.DISPCLK_DPPCLK_Support
- && mode_lib->ms.support.TotalAvailablePipesSupport
- && mode_lib->ms.support.NumberOfOTGSupport
- && mode_lib->ms.support.NumberOfHDMIFRLSupport
- && mode_lib->ms.support.NumberOfDP2p0Support
- && mode_lib->ms.support.EnoughWritebackUnits
- && mode_lib->ms.support.WritebackLatencySupport
- && mode_lib->ms.support.WritebackScaleRatioAndTapsSupport
- && mode_lib->ms.support.CursorSupport
- && mode_lib->ms.support.PitchSupport
- && !mode_lib->ms.support.ViewportExceedsSurface
- && mode_lib->ms.support.PrefetchSupported
- && mode_lib->ms.support.EnoughUrgentLatencyHidingSupport
- && mode_lib->ms.support.AvgBandwidthSupport
- && mode_lib->ms.support.DynamicMetadataSupported
- && mode_lib->ms.support.VRatioInPrefetchSupported
- && mode_lib->ms.support.PTEBufferSizeNotExceeded
- && mode_lib->ms.support.DCCMetaBufferSizeNotExceeded
- && !mode_lib->ms.support.ExceededMALLSize
- && ((!display_cfg->hostvm_enable && !s->ImmediateFlipRequired) || mode_lib->ms.support.ImmediateFlipSupport)) {
- // && s->dram_clock_change_support == true
- // && s->f_clock_change_support == true
- // && (/*FIXME_STAGE2 was: mode_lib->ms.policy.USRRetrainingRequired, no new dml2 replacement || */ mode_lib->ms.support.USRRetrainingSupport)) {
- dml2_printf("DML::%s: mode is supported\n", __func__);
- mode_lib->ms.support.ModeSupport = true;
- } else {
- dml2_printf("DML::%s: mode is NOT supported\n", __func__);
- mode_lib->ms.support.ModeSupport = false;
- }
- }
-
- // Since now the mode_support work on 1 particular power state, so there is only 1 state idx (index 0).
- dml2_printf("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
- dml2_printf("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- mode_lib->ms.support.MPCCombineEnable[k] = mode_lib->ms.MPCCombine[k];
- mode_lib->ms.support.DPPPerSurface[k] = mode_lib->ms.NoOfDPP[k];
- }
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].stream_index == k) {
- mode_lib->ms.support.ODMMode[k] = mode_lib->ms.ODMMode[k];
- } else {
- mode_lib->ms.support.ODMMode[k] = dml2_odm_mode_bypass;
- }
-
- mode_lib->ms.support.DSCEnabled[k] = mode_lib->ms.RequiresDSC[k];
- mode_lib->ms.support.FECEnabled[k] = mode_lib->ms.RequiresFEC[k];
- mode_lib->ms.support.OutputBpp[k] = mode_lib->ms.OutputBpp[k];
- mode_lib->ms.support.OutputType[k] = mode_lib->ms.OutputType[k];
- mode_lib->ms.support.OutputRate[k] = mode_lib->ms.OutputRate[k];
-
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]);
- dml2_printf("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]);
-#endif
- }
-
-#if defined(__DML_VBA_DEBUG__)
- if (!mode_lib->ms.support.ModeSupport)
- dml2_print_dml_mode_support_info(&mode_lib->ms.support, true);
- dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, mode_lib->ms.support.ModeSupport, in_out_params->min_clk_index);
- dml2_printf("DML::%s: --- DONE --- \n", __func__);
-#endif
-
- if (mode_lib->ms.support.ModeSupport) {
- *in_out_params->out_evaluation_info = in_out_params->mode_lib->ms.support;
- return true;
- } else {
- return false;
- }
-}
-
-static void dml2_print_dml_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
-{
- dml2_printf("DML: ===================================== \n");
- dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
- if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = 0x%x\n", support->ImmediateFlipSupport);
- if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = 0x%x\n", support->WritebackLatencySupport);
- if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: ScaleRatioAndTapsSupport = 0x%x\n", support->ScaleRatioAndTapsSupport);
- if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
- dml2_printf("DML: support: SourceFormatPixelAndScanSupport = 0x%x\n", support->SourceFormatPixelAndScanSupport);
- if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = 0x%x\n", support->P2IWith420);
- if (!fail_only || support->DSCOnlyIfNecessaryWithBPP == 1)
- dml2_printf("DML: support: DSCOnlyIfNecessaryWithBPP = 0x%x\n", support->DSCOnlyIfNecessaryWithBPP);
- if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = 0x%x\n", support->DSC422NativeNotSupported);
- if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
- dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = 0x%x\n", support->LinkRateDoesNotMatchDPVersion);
- if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = 0x%x\n", support->LinkRateForMultistreamNotIndicated);
- if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: BPPForMultistreamNotIndicated = 0x%x\n", support->BPPForMultistreamNotIndicated);
- if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
- dml2_printf("DML: support: MultistreamWithHDMIOreDP = 0x%x\n", support->MultistreamWithHDMIOreDP);
- if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
- dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = 0x%x\n", support->MSOOrODMSplitWithNonDPLink);
- if (!fail_only || support->NotEnoughLanesForMSO == 1)
- dml2_printf("DML: support: NotEnoughLanesForMSO = 0x%x\n", support->NotEnoughLanesForMSO);
- if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = 0x%x\n", support->NumberOfOTGSupport);
- if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = 0x%x\n", support->NumberOfHDMIFRLSupport);
- if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = 0x%x\n", support->NumberOfDP2p0Support);
- if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = 0x%x\n", support->WritebackScaleRatioAndTapsSupport);
- if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = 0x%x\n", support->CursorSupport);
- if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = 0x%x\n", support->PitchSupport);
- if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = 0x%x\n", support->ViewportExceedsSurface);
- if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = 0x%x\n", support->ExceededMALLSize);
- if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = 0x%x\n", support->EnoughWritebackUnits);
- if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = 0x%x\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
- if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = 0x%x\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
- if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = 0x%x\n", support->InvalidCombinationOfMALLUseForPState);
- if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = 0x%x\n", support->ExceededMultistreamSlots);
- if (!fail_only || support->NotEnoughDSCUnits == 1)
- dml2_printf("DML: support: NotEnoughDSCUnits = 0x%x\n", support->NotEnoughDSCUnits);
- if (!fail_only || support->NotEnoughDSCSlices == 1)
- dml2_printf("DML: support: NotEnoughDSCSlices = 0x%x\n", support->NotEnoughDSCSlices);
- if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = 0x%x\n", support->PixelsPerLinePerDSCUnitSupport);
- if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = 0x%x\n", support->DSCCLKRequiredMoreThanSupported);
- if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = 0x%x\n", support->DTBCLKRequiredMoreThanSupported);
- if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = 0x%x\n", support->LinkCapacitySupport);
- if (!fail_only || support->ROBSupport == 0)
- dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
- if (!fail_only || support->OutstandingRequestsSupport == 0)
- dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
- if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
- dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
- if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
- if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
- if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
- if (!fail_only || support->PrefetchSupported == 0)
- dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
- if (!fail_only || support->DynamicMetadataSupported == 0)
- dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
- if (!fail_only || support->VRatioInPrefetchSupported == 0)
- dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
- if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
- if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
- if (!fail_only || support->ModeSupport == 0)
- dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
- dml2_printf("DML: ===================================== \n");
-}
-
-static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg)
-{
- for (unsigned int k = 0; k < display_cfg->num_planes; k++) {
- double bpc = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.bpc;
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_disable) {
- switch (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format) {
- case dml2_444:
- out_bpp[k] = bpc * 3;
- break;
- case dml2_s422:
- out_bpp[k] = bpc * 2;
- break;
- case dml2_n422:
- out_bpp[k] = bpc * 2;
- break;
- case dml2_420:
- default:
- out_bpp[k] = bpc * 1.5;
- break;
- }
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable) {
- out_bpp[k] = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.dsc_compressed_bpp_x16 / 16;
- } else {
- out_bpp[k] = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
- dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
- dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
-#endif
- }
-}
-
-static unsigned int dml_round_to_multiple(unsigned int num, unsigned int multiple, bool up)
-{
- unsigned int remainder;
-
- if (multiple == 0)
- return num;
-
- remainder = num % multiple;
- if (remainder == 0)
- return num;
-
- if (up)
- return (num + multiple - remainder);
- else
- return (num - remainder);
-}
-
-static unsigned int dml_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info)
-{
- unsigned int num_active_pipes = 0;
-
- for (unsigned int k = 0; k < num_planes; k++) {
- num_active_pipes = num_active_pipes + (unsigned int)cfg_support_info->plane_support_info[k].dpps_used;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
-#endif
- return num_active_pipes;
-}
-
-static void dml_calc_pipe_plane_mapping(const struct core_display_cfg_support_info *cfg_support_info, unsigned int *pipe_plane)
-{
- unsigned int pipe_idx = 0;
-
- for (unsigned int k = 0; k < DML2_MAX_PLANES; ++k) {
- pipe_plane[k] = __DML2_CALCS_PIPE_NO_PLANE__;
- }
-
- for (unsigned int plane_idx = 0; plane_idx < DML2_MAX_PLANES; plane_idx++) {
- for (int i = 0; i < cfg_support_info->plane_support_info[plane_idx].dpps_used; i++) {
- pipe_plane[pipe_idx] = plane_idx;
- pipe_idx++;
- }
- }
-}
-
-static bool dml_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg)
-{
- bool is_phantom = false;
-
- if (plane_cfg->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe ||
- plane_cfg->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return) {
- is_phantom = true;
- }
-
- return is_phantom;
-}
-
-static bool dml_get_is_phantom_pipe(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int pipe_idx)
-{
- unsigned int plane_idx = mode_lib->mp.pipe_plane[pipe_idx];
-
- bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[plane_idx]);
- dml2_printf("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom);
- return is_phantom;
-}
-
-static void CalculateMaxDETAndMinCompressedBufferSize(
- unsigned int ConfigReturnBufferSizeInKByte,
- unsigned int ConfigReturnBufferSegmentSizeInKByte,
- unsigned int ROBBufferSizeInKByte,
- unsigned int MaxNumDPP,
- unsigned int nomDETInKByteOverrideEnable, // VBA_DELTA, allow DV to override default DET size
- unsigned int nomDETInKByteOverrideValue, // VBA_DELTA
- bool is_mrq_present,
-
- // Output
- unsigned int *MaxTotalDETInKByte,
- unsigned int *nomDETInKByte,
- unsigned int *MinCompressedBufferSizeInKByte)
-{
- if (is_mrq_present)
- *MaxTotalDETInKByte = (unsigned int)math_ceil2((double)(ConfigReturnBufferSizeInKByte + ROBBufferSizeInKByte) * 4 / 5, 64);
- else
- *MaxTotalDETInKByte = ConfigReturnBufferSizeInKByte - ConfigReturnBufferSegmentSizeInKByte;
-
- *nomDETInKByte = (unsigned int)(math_floor2((double)*MaxTotalDETInKByte / (double)MaxNumDPP, ConfigReturnBufferSegmentSizeInKByte));
- *MinCompressedBufferSizeInKByte = ConfigReturnBufferSizeInKByte - *MaxTotalDETInKByte;
-
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte);
- dml2_printf("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte);
-#endif
-
- if (nomDETInKByteOverrideEnable) {
- *nomDETInKByte = nomDETInKByteOverrideValue;
- dml2_printf("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte);
- }
-}
-
-static void PixelClockAdjustmentForProgressiveToInterlaceUnit(const struct dml2_display_cfg *display_cfg, bool ptoi_supported, double *PixelClockBackEnd)
-{
- //unsigned int num_active_planes = display_cfg->num_planes;
-
- //Progressive To Interlace Unit Effect
- for (unsigned int k = 0; k < display_cfg->num_planes; ++k) {
- PixelClockBackEnd[k] = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced == 1 && ptoi_supported == true) {
- // FIXME_STAGE2... can sw pass the pixel rate for interlaced directly
- //display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz = 2 * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz;
- }
- }
-}
-
-bool dml2_core_shared_is_420(enum dml2_source_format_class source_format)
-{
- bool val = false;
-
- switch (source_format) {
- case dml2_444_8:
- val = 0;
- break;
- case dml2_444_16:
- val = 0;
- break;
- case dml2_444_32:
- val = 0;
- break;
- case dml2_444_64:
- val = 0;
- break;
- case dml2_420_8:
- val = 1;
- break;
- case dml2_420_10:
- val = 1;
- break;
- case dml2_420_12:
- val = 1;
- break;
- case dml2_rgbe_alpha:
- val = 0;
- break;
- case dml2_rgbe:
- val = 0;
- break;
- case dml2_mono_8:
- val = 0;
- break;
- case dml2_mono_16:
- val = 0;
- break;
- default:
- DML2_ASSERT(0);
- break;
- }
- return val;
-}
-
-static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode)
-{
- switch (sw_mode) {
- case (dml2_sw_linear):
- return 256;
- case (dml2_sw_256b_2d):
- return 256;
- case (dml2_sw_4kb_2d):
- return 4096;
- case (dml2_sw_64kb_2d):
- return 65536;
- case (dml2_sw_256kb_2d):
- return 262144;
- case (dml2_gfx11_sw_linear):
- return 256;
- case (dml2_gfx11_sw_64kb_d):
- return 65536;
- case (dml2_gfx11_sw_64kb_d_t):
- return 65536;
- case (dml2_gfx11_sw_64kb_d_x):
- return 65536;
- case (dml2_gfx11_sw_64kb_r_x):
- return 65536;
- case (dml2_gfx11_sw_256kb_d_x):
- return 262144;
- case (dml2_gfx11_sw_256kb_r_x):
- return 262144;
- default:
- DML2_ASSERT(0);
- return 256;
- }
-}
-
-const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type)
-{
- switch (bw_type) {
- case (dml2_core_internal_bw_sdp):
- return("dml2_core_internal_bw_sdp");
- case (dml2_core_internal_bw_dram):
- return("dml2_core_internal_bw_dram");
- case (dml2_core_internal_bw_max):
- return("dml2_core_internal_bw_max");
- default:
- return("dml2_core_internal_bw_unknown");
- }
-}
-
-const char *dml2_core_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type)
-{
- switch (dml2_core_internal_soc_state_type) {
- case (dml2_core_internal_soc_state_sys_idle):
- return("dml2_core_internal_soc_state_sys_idle");
- case (dml2_core_internal_soc_state_sys_active):
- return("dml2_core_internal_soc_state_sys_active");
- case (dml2_core_internal_soc_state_svp_prefetch):
- return("dml2_core_internal_soc_state_svp_prefetch");
- case dml2_core_internal_soc_state_max:
- default:
- return("dml2_core_internal_soc_state_unknown");
- }
-}
-
-static bool dml_is_vertical_rotation(enum dml2_rotation_angle Scan)
-{
- bool is_vert = false;
- if (Scan == dml2_rotation_90 || Scan == dml2_rotation_270) {
- is_vert = true;
- } else {
- is_vert = false;
- }
- return is_vert;
-}
-
-static int unsigned dml_get_gfx_version(enum dml2_swizzle_mode sw_mode)
-{
- int unsigned version = 0;
-
- if (sw_mode == dml2_sw_linear ||
- sw_mode == dml2_sw_256b_2d ||
- sw_mode == dml2_sw_4kb_2d ||
- sw_mode == dml2_sw_64kb_2d ||
- sw_mode == dml2_sw_256kb_2d) {
- version = 12;
- } else if (sw_mode == dml2_gfx11_sw_linear ||
- sw_mode == dml2_gfx11_sw_64kb_d ||
- sw_mode == dml2_gfx11_sw_64kb_d_t ||
- sw_mode == dml2_gfx11_sw_64kb_d_x ||
- sw_mode == dml2_gfx11_sw_64kb_r_x ||
- sw_mode == dml2_gfx11_sw_256kb_d_x ||
- sw_mode == dml2_gfx11_sw_256kb_r_x) {
- version = 11;
- } else {
- dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
- DML2_ASSERT(0);
- }
-
- return version;
-}
-
-static void CalculateBytePerPixelAndBlockSizes(
- enum dml2_source_format_class SourcePixelFormat,
- enum dml2_swizzle_mode SurfaceTiling,
- unsigned int pitch_y,
- unsigned int pitch_c,
-
- // Output
- unsigned int *BytePerPixelY,
- unsigned int *BytePerPixelC,
- double *BytePerPixelDETY,
- double *BytePerPixelDETC,
- unsigned int *BlockHeight256BytesY,
- unsigned int *BlockHeight256BytesC,
- unsigned int *BlockWidth256BytesY,
- unsigned int *BlockWidth256BytesC,
- unsigned int *MacroTileHeightY,
- unsigned int *MacroTileHeightC,
- unsigned int *MacroTileWidthY,
- unsigned int *MacroTileWidthC,
- bool *surf_linear128_l,
- bool *surf_linear128_c)
-{
- *BytePerPixelDETY = 0;
- *BytePerPixelDETC = 0;
- *BytePerPixelY = 0;
- *BytePerPixelC = 0;
-
- if (SourcePixelFormat == dml2_444_64) {
- *BytePerPixelDETY = 8;
- *BytePerPixelDETC = 0;
- *BytePerPixelY = 8;
- *BytePerPixelC = 0;
- } else if (SourcePixelFormat == dml2_444_32 || SourcePixelFormat == dml2_rgbe) {
- *BytePerPixelDETY = 4;
- *BytePerPixelDETC = 0;
- *BytePerPixelY = 4;
- *BytePerPixelC = 0;
- } else if (SourcePixelFormat == dml2_444_16 || SourcePixelFormat == dml2_mono_16) {
- *BytePerPixelDETY = 2;
- *BytePerPixelDETC = 0;
- *BytePerPixelY = 2;
- *BytePerPixelC = 0;
- } else if (SourcePixelFormat == dml2_444_8 || SourcePixelFormat == dml2_mono_8) {
- *BytePerPixelDETY = 1;
- *BytePerPixelDETC = 0;
- *BytePerPixelY = 1;
- *BytePerPixelC = 0;
- } else if (SourcePixelFormat == dml2_rgbe_alpha) {
- *BytePerPixelDETY = 4;
- *BytePerPixelDETC = 1;
- *BytePerPixelY = 4;
- *BytePerPixelC = 1;
- } else if (SourcePixelFormat == dml2_420_8) {
- *BytePerPixelDETY = 1;
- *BytePerPixelDETC = 2;
- *BytePerPixelY = 1;
- *BytePerPixelC = 2;
- } else if (SourcePixelFormat == dml2_420_12) {
- *BytePerPixelDETY = 2;
- *BytePerPixelDETC = 4;
- *BytePerPixelY = 2;
- *BytePerPixelC = 4;
- } else if (SourcePixelFormat == dml2_420_10) {
- *BytePerPixelDETY = (double)(4.0 / 3);
- *BytePerPixelDETC = (double)(8.0 / 3);
- *BytePerPixelY = 2;
- *BytePerPixelC = 4;
- } else {
- dml2_printf("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat);
- DML2_ASSERT(0);
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat);
- dml2_printf("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY);
- dml2_printf("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC);
- dml2_printf("DML::%s: pitch_y = %u\n", __func__, pitch_y);
- dml2_printf("DML::%s: pitch_c = %u\n", __func__, pitch_c);
- dml2_printf("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l);
- dml2_printf("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c);
-#endif
-
- if (dml_get_gfx_version(SurfaceTiling) == 11) {
- *surf_linear128_l = 0;
- *surf_linear128_c = 0;
- } else {
- if (SurfaceTiling == dml2_sw_linear) {
- *surf_linear128_l = (((pitch_y * *BytePerPixelY) % 256) != 0);
-
- if (dml2_core_shared_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha)
- *surf_linear128_c = (((pitch_c * *BytePerPixelC) % 256) != 0);
- }
- }
-
- if (!(dml2_core_shared_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha)) {
- if (SurfaceTiling == dml2_sw_linear) {
- *BlockHeight256BytesY = 1;
- } else if (SourcePixelFormat == dml2_444_64) {
- *BlockHeight256BytesY = 4;
- } else if (SourcePixelFormat == dml2_444_8) {
- *BlockHeight256BytesY = 16;
- } else {
- *BlockHeight256BytesY = 8;
- }
- *BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
- *BlockHeight256BytesC = 0;
- *BlockWidth256BytesC = 0;
- } else { // dual plane
- if (SurfaceTiling == dml2_sw_linear) {
- *BlockHeight256BytesY = 1;
- *BlockHeight256BytesC = 1;
- } else if (SourcePixelFormat == dml2_rgbe_alpha) {
- *BlockHeight256BytesY = 8;
- *BlockHeight256BytesC = 16;
- } else if (SourcePixelFormat == dml2_420_8) {
- *BlockHeight256BytesY = 16;
- *BlockHeight256BytesC = 8;
- } else {
- *BlockHeight256BytesY = 8;
- *BlockHeight256BytesC = 8;
- }
- *BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
- *BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY);
- dml2_printf("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY);
- dml2_printf("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC);
- dml2_printf("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC);
-#endif
-
- if (dml_get_gfx_version(SurfaceTiling) == 11) {
- if (SurfaceTiling == dml2_gfx11_sw_linear) {
- *MacroTileHeightY = *BlockHeight256BytesY;
- *MacroTileWidthY = 256 / *BytePerPixelY / *MacroTileHeightY;
- *MacroTileHeightC = *BlockHeight256BytesC;
- if (*MacroTileHeightC == 0) {
- *MacroTileWidthC = 0;
- } else {
- *MacroTileWidthC = 256 / *BytePerPixelC / *MacroTileHeightC;
- }
- } else if (SurfaceTiling == dml2_gfx11_sw_64kb_d || SurfaceTiling == dml2_gfx11_sw_64kb_d_t || SurfaceTiling == dml2_gfx11_sw_64kb_d_x || SurfaceTiling == dml2_gfx11_sw_64kb_r_x) {
- *MacroTileHeightY = 16 * *BlockHeight256BytesY;
- *MacroTileWidthY = 65536 / *BytePerPixelY / *MacroTileHeightY;
- *MacroTileHeightC = 16 * *BlockHeight256BytesC;
- if (*MacroTileHeightC == 0) {
- *MacroTileWidthC = 0;
- } else {
- *MacroTileWidthC = 65536 / *BytePerPixelC / *MacroTileHeightC;
- }
- } else {
- *MacroTileHeightY = 32 * *BlockHeight256BytesY;
- *MacroTileWidthY = 65536 * 4 / *BytePerPixelY / *MacroTileHeightY;
- *MacroTileHeightC = 32 * *BlockHeight256BytesC;
- if (*MacroTileHeightC == 0) {
- *MacroTileWidthC = 0;
- } else {
- *MacroTileWidthC = 65536 * 4 / *BytePerPixelC / *MacroTileHeightC;
- }
- }
- } else {
- unsigned int macro_tile_size_bytes = dml_get_tile_block_size_bytes(SurfaceTiling);
- unsigned int macro_tile_scale = 1; // macro tile to 256B req scaling
-
- if (SurfaceTiling == dml2_sw_linear) {
- macro_tile_scale = 1;
- } else if (SurfaceTiling == dml2_sw_4kb_2d) {
- macro_tile_scale = 4;
- } else if (SurfaceTiling == dml2_sw_64kb_2d) {
- macro_tile_scale = 16;
- } else if (SurfaceTiling == dml2_sw_256kb_2d) {
- macro_tile_scale = 32;
- } else {
- dml2_printf("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling);
- DML2_ASSERT(0);
- }
-
- *MacroTileHeightY = macro_tile_scale * *BlockHeight256BytesY;
- *MacroTileWidthY = macro_tile_size_bytes / *BytePerPixelY / *MacroTileHeightY;
- *MacroTileHeightC = macro_tile_scale * *BlockHeight256BytesC;
- if (*MacroTileHeightC == 0) {
- *MacroTileWidthC = 0;
- } else {
- *MacroTileWidthC = macro_tile_size_bytes / *BytePerPixelC / *MacroTileHeightC;
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY);
- dml2_printf("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY);
- dml2_printf("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC);
- dml2_printf("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC);
-#endif
-}
-
-static void CalculateSinglePipeDPPCLKAndSCLThroughput(
- double HRatio,
- double HRatioChroma,
- double VRatio,
- double VRatioChroma,
- double MaxDCHUBToPSCLThroughput,
- double MaxPSCLToLBThroughput,
- double PixelClock,
- enum dml2_source_format_class SourcePixelFormat,
- unsigned int HTaps,
- unsigned int HTapsChroma,
- unsigned int VTaps,
- unsigned int VTapsChroma,
-
- // Output
- double *PSCL_THROUGHPUT,
- double *PSCL_THROUGHPUT_CHROMA,
- double *DPPCLKUsingSingleDPP)
-{
- if (HRatio > 1) {
- *PSCL_THROUGHPUT = math_min2(MaxDCHUBToPSCLThroughput, MaxPSCLToLBThroughput * HRatio / math_ceil2((double)HTaps / 6.0, 1.0));
- } else {
- *PSCL_THROUGHPUT = math_min2(MaxDCHUBToPSCLThroughput, MaxPSCLToLBThroughput);
- }
-
- double DPPCLKUsingSingleDPPLuma;
- double DPPCLKUsingSingleDPPChroma;
-
- DPPCLKUsingSingleDPPLuma = PixelClock * math_max3(VTaps / 6 * math_min2(1, HRatio), HRatio * VRatio / *PSCL_THROUGHPUT, 1);
-
- if ((HTaps > 6 || VTaps > 6) && DPPCLKUsingSingleDPPLuma < 2 * PixelClock)
- DPPCLKUsingSingleDPPLuma = 2 * PixelClock;
-
- if (!dml2_core_shared_is_420(SourcePixelFormat) && SourcePixelFormat != dml2_rgbe_alpha) {
- *PSCL_THROUGHPUT_CHROMA = 0;
- *DPPCLKUsingSingleDPP = DPPCLKUsingSingleDPPLuma;
- } else {
- if (HRatioChroma > 1) {
- *PSCL_THROUGHPUT_CHROMA = math_min2(MaxDCHUBToPSCLThroughput, MaxPSCLToLBThroughput * HRatioChroma / math_ceil2((double)HTapsChroma / 6.0, 1.0));
- } else {
- *PSCL_THROUGHPUT_CHROMA = math_min2(MaxDCHUBToPSCLThroughput, MaxPSCLToLBThroughput);
- }
- DPPCLKUsingSingleDPPChroma = PixelClock * math_max3(VTapsChroma / 6 * math_min2(1, HRatioChroma),
- HRatioChroma * VRatioChroma / *PSCL_THROUGHPUT_CHROMA, 1);
- if ((HTapsChroma > 6 || VTapsChroma > 6) && DPPCLKUsingSingleDPPChroma < 2 * PixelClock)
- DPPCLKUsingSingleDPPChroma = 2 * PixelClock;
- *DPPCLKUsingSingleDPP = math_max2(DPPCLKUsingSingleDPPLuma, DPPCLKUsingSingleDPPChroma);
- }
-}
-
-static void CalculateSwathWidth(
- const struct dml2_display_cfg *display_cfg,
- bool ForceSingleDPP,
- unsigned int NumberOfActiveSurfaces,
- enum dml2_odm_mode ODMMode[],
- unsigned int BytePerPixY[],
- unsigned int BytePerPixC[],
- unsigned int Read256BytesBlockHeightY[],
- unsigned int Read256BytesBlockHeightC[],
- unsigned int Read256BytesBlockWidthY[],
- unsigned int Read256BytesBlockWidthC[],
- bool surf_linear128_l[],
- bool surf_linear128_c[],
- unsigned int DPPPerSurface[],
-
- // Output
- unsigned int req_per_swath_ub_l[],
- unsigned int req_per_swath_ub_c[],
- unsigned int SwathWidthSingleDPPY[],
- unsigned int SwathWidthSingleDPPC[],
- unsigned int SwathWidthY[], // per-pipe
- unsigned int SwathWidthC[], // per-pipe
- unsigned int MaximumSwathHeightY[],
- unsigned int MaximumSwathHeightC[],
- unsigned int swath_width_luma_ub[], // per-pipe
- unsigned int swath_width_chroma_ub[]) // per-pipe
-{
- enum dml2_odm_mode MainSurfaceODMMode;
- double odm_hactive_factor = 1.0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
-#endif
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- SwathWidthSingleDPPY[k] = (unsigned int)display_cfg->plane_descriptors[k].composition.viewport.plane0.width;
- } else {
- SwathWidthSingleDPPY[k] = (unsigned int)display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u ViewportWidth=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
-
- MainSurfaceODMMode = ODMMode[k];
- for (unsigned int j = 0; j < NumberOfActiveSurfaces; ++j) {
- if (display_cfg->plane_descriptors[k].stream_index == j) {
- MainSurfaceODMMode = ODMMode[j];
- }
- }
-
- if (ForceSingleDPP) {
- SwathWidthY[k] = SwathWidthSingleDPPY[k];
- } else {
- if (MainSurfaceODMMode == dml2_odm_mode_combine_4to1)
- odm_hactive_factor = 4.0;
- else if (MainSurfaceODMMode == dml2_odm_mode_combine_3to1)
- odm_hactive_factor = 3.0;
- else if (MainSurfaceODMMode == dml2_odm_mode_combine_2to1)
- odm_hactive_factor = 2.0;
-
- if (MainSurfaceODMMode == dml2_odm_mode_combine_4to1 || MainSurfaceODMMode == dml2_odm_mode_combine_3to1 || MainSurfaceODMMode == dml2_odm_mode_combine_2to1) {
- SwathWidthY[k] = (unsigned int)(math_min2((double)SwathWidthSingleDPPY[k], math_round((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active / odm_hactive_factor * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio)));
- } else if (DPPPerSurface[k] == 2) {
- SwathWidthY[k] = SwathWidthSingleDPPY[k] / 2;
- } else {
- SwathWidthY[k] = SwathWidthSingleDPPY[k];
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u HActive=%u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active);
- dml2_printf("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode);
- dml2_printf("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]);
- dml2_printf("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]);
-#endif
-
- if (dml2_core_shared_is_420(display_cfg->plane_descriptors[k].pixel_format)) {
- SwathWidthC[k] = SwathWidthY[k] / 2;
- SwathWidthSingleDPPC[k] = SwathWidthSingleDPPY[k] / 2;
- } else {
- SwathWidthC[k] = SwathWidthY[k];
- SwathWidthSingleDPPC[k] = SwathWidthSingleDPPY[k];
- }
-
- if (ForceSingleDPP == true) {
- SwathWidthY[k] = SwathWidthSingleDPPY[k];
- SwathWidthC[k] = SwathWidthSingleDPPC[k];
- }
-
- unsigned int req_width_horz_y = Read256BytesBlockWidthY[k];
- unsigned int req_width_horz_c = Read256BytesBlockWidthC[k];
-
- if (surf_linear128_l[k])
- req_width_horz_y = req_width_horz_y / 2;
-
- if (surf_linear128_c[k])
- req_width_horz_c = req_width_horz_c / 2;
-
- unsigned int surface_width_ub_l = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane0.width, req_width_horz_y);
- unsigned int surface_height_ub_l = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane0.height, Read256BytesBlockHeightY[k]);
- unsigned int surface_width_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.width, req_width_horz_c);
- unsigned int surface_height_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.height, Read256BytesBlockHeightC[k]);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l);
- dml2_printf("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l);
- dml2_printf("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c);
- dml2_printf("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
-
- req_per_swath_ub_l[k] = 0;
- req_per_swath_ub_c[k] = 0;
- if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- MaximumSwathHeightY[k] = Read256BytesBlockHeightY[k];
- MaximumSwathHeightC[k] = Read256BytesBlockHeightC[k];
- if (display_cfg->plane_descriptors[k].composition.viewport.stationary && DPPPerSurface[k] == 1) {
- swath_width_luma_ub[k] = (unsigned int)(math_min2(surface_width_ub_l, math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane0.x_start + SwathWidthY[k] + req_width_horz_y - 1, req_width_horz_y) - math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane0.x_start, req_width_horz_y)));
- } else {
- swath_width_luma_ub[k] = (unsigned int)(math_min2(surface_width_ub_l, math_ceil2((double)SwathWidthY[k] - 1, req_width_horz_y) + req_width_horz_y));
- }
- req_per_swath_ub_l[k] = swath_width_luma_ub[k] / req_width_horz_y;
-
- if (BytePerPixC[k] > 0) {
- if (display_cfg->plane_descriptors[k].composition.viewport.stationary && DPPPerSurface[k] == 1) {
- swath_width_chroma_ub[k] = (unsigned int)(math_min2(surface_width_ub_c, math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start + SwathWidthC[k] + req_width_horz_c - 1, req_width_horz_c) - math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start, req_width_horz_c)));
- } else {
- swath_width_chroma_ub[k] = (unsigned int)(math_min2(surface_width_ub_c, math_ceil2((double)SwathWidthC[k] - 1, req_width_horz_c) + req_width_horz_c));
- }
- req_per_swath_ub_c[k] = swath_width_chroma_ub[k] / req_width_horz_c;
- } else {
- swath_width_chroma_ub[k] = 0;
- }
- } else {
- MaximumSwathHeightY[k] = Read256BytesBlockWidthY[k];
- MaximumSwathHeightC[k] = Read256BytesBlockWidthC[k];
-
- if (display_cfg->plane_descriptors[k].composition.viewport.stationary && DPPPerSurface[k] == 1) {
- swath_width_luma_ub[k] = (unsigned int)(math_min2(surface_height_ub_l, math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane0.y_start + SwathWidthY[k] + Read256BytesBlockHeightY[k] - 1, Read256BytesBlockHeightY[k]) - math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane0.y_start, Read256BytesBlockHeightY[k])));
- } else {
- swath_width_luma_ub[k] = (unsigned int)(math_min2(surface_height_ub_l, math_ceil2((double)SwathWidthY[k] - 1, Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]));
- }
- req_per_swath_ub_l[k] = swath_width_luma_ub[k] / Read256BytesBlockHeightY[k];
- if (BytePerPixC[k] > 0) {
- if (display_cfg->plane_descriptors[k].composition.viewport.stationary && DPPPerSurface[k] == 1) {
- swath_width_chroma_ub[k] = (unsigned int)(math_min2(surface_height_ub_c, math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start + SwathWidthC[k] + Read256BytesBlockHeightC[k] - 1, Read256BytesBlockHeightC[k]) - math_floor2(display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start, Read256BytesBlockHeightC[k])));
- } else {
- swath_width_chroma_ub[k] = (unsigned int)(math_min2(surface_height_ub_c, math_ceil2((double)SwathWidthC[k] - 1, Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]));
- }
- req_per_swath_ub_c[k] = swath_width_chroma_ub[k] / Read256BytesBlockHeightC[k];
- } else {
- swath_width_chroma_ub[k] = 0;
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]);
-#endif
-
- }
-}
-
-static bool UnboundedRequest(bool unb_req_force_en, bool unb_req_force_val, unsigned int TotalNumberOfActiveDPP, bool NoChromaOrLinear)
-{
- bool unb_req_ok = false;
- bool unb_req_en = false;
-
- unb_req_ok = (TotalNumberOfActiveDPP == 1 && NoChromaOrLinear);
- unb_req_en = unb_req_ok;
-
- if (unb_req_force_en) {
- unb_req_en = unb_req_force_val && unb_req_ok;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en);
- dml2_printf("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val);
- dml2_printf("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok);
- dml2_printf("DML::%s: unb_req_en = %u\n", __func__, unb_req_en);
-#endif
- return (unb_req_en);
-}
-
-static void CalculateDETBufferSize(struct dml2_core_shared_calculate_det_buffer_size_params *p)
-{
- unsigned int DETBufferSizePoolInKByte;
- unsigned int NextDETBufferPieceInKByte;
- bool DETPieceAssignedToThisSurfaceAlready[DML2_MAX_PLANES];
- bool NextPotentialSurfaceToAssignDETPieceFound;
- unsigned int NextSurfaceToAssignDETPiece;
- double TotalBandwidth;
- double BandwidthOfSurfacesNotAssignedDETPiece;
- unsigned int max_minDET;
- unsigned int minDET;
- unsigned int minDET_pipe;
- unsigned int TotalBandwidthPerStream[DML2_MAX_PLANES] = { 0 };
- unsigned int TotalPixelRate = 0;
- unsigned int DETBudgetPerStream[DML2_MAX_PLANES] = { 0 };
- unsigned int RemainingDETBudgetPerStream[DML2_MAX_PLANES] = { 0 };
- unsigned int IdealDETBudget, DeltaDETBudget;
- bool MinimizeReallocationSuccess = false;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, p->NumberOfActiveSurfaces);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, p->MaxTotalDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, p->MinCompressedBufferSizeInKByte);
- dml2_printf("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, p->CompressedBufferSegmentSizeInkByte);
-#endif
-
- // Note: Will use default det size if that fits 2 swaths
- if (p->UnboundedRequestEnabled) {
- if (p->display_cfg->plane_descriptors[0].overrides.det_size_override_kb > 0) {
- p->DETBufferSizeInKByte[0] = p->display_cfg->plane_descriptors[0].overrides.det_size_override_kb;
- } else {
- p->DETBufferSizeInKByte[0] = (unsigned int)math_max2(128.0, math_ceil2(2.0 * ((double)p->full_swath_bytes_l[0] + (double)p->full_swath_bytes_c[0]) / 1024.0, p->ConfigReturnBufferSegmentSizeInkByte));
- }
- *p->CompressedBufferSizeInkByte = p->ConfigReturnBufferSizeInKByte - p->DETBufferSizeInKByte[0];
- } else {
- DETBufferSizePoolInKByte = p->MaxTotalDETInKByte;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- p->DETBufferSizeInKByte[k] = 0;
- if (dml2_core_shared_is_420(p->display_cfg->plane_descriptors[k].pixel_format)) {
- max_minDET = p->nomDETInKByte - p->ConfigReturnBufferSegmentSizeInkByte;
- } else {
- max_minDET = p->nomDETInKByte;
- }
- minDET = 128;
- minDET_pipe = 0;
-
- // add DET resource until can hold 2 full swaths
- while (minDET <= max_minDET && minDET_pipe == 0) {
- if (2.0 * ((double)p->full_swath_bytes_l[k] + (double)p->full_swath_bytes_c[k]) / 1024.0 <= minDET)
- minDET_pipe = minDET;
- minDET = minDET + p->ConfigReturnBufferSegmentSizeInkByte;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET = %u\n", __func__, k, minDET);
- dml2_printf("DML::%s: k=%u max_minDET = %u\n", __func__, k, max_minDET);
- dml2_printf("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, minDET_pipe);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
-#endif
-
- if (minDET_pipe == 0) {
- minDET_pipe = (unsigned int)(math_max2(128, math_ceil2(((double)p->full_swath_bytes_l[k] + (double)p->full_swath_bytes_c[k]) / 1024.0, p->ConfigReturnBufferSegmentSizeInkByte)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, minDET_pipe);
-#endif
- }
-
- if (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- p->DETBufferSizeInKByte[k] = 0;
- } else if (p->display_cfg->plane_descriptors[k].overrides.det_size_override_kb > 0) {
- p->DETBufferSizeInKByte[k] = p->display_cfg->plane_descriptors[k].overrides.det_size_override_kb;
- DETBufferSizePoolInKByte = DETBufferSizePoolInKByte - (p->ForceSingleDPP ? 1 : p->DPPPerSurface[k]) * p->display_cfg->plane_descriptors[k].overrides.det_size_override_kb;
- } else if ((p->ForceSingleDPP ? 1 : p->DPPPerSurface[k]) * minDET_pipe <= DETBufferSizePoolInKByte) {
- p->DETBufferSizeInKByte[k] = minDET_pipe;
- DETBufferSizePoolInKByte = DETBufferSizePoolInKByte - (p->ForceSingleDPP ? 1 : p->DPPPerSurface[k]) * minDET_pipe;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.det_size_override_kb);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, DETBufferSizePoolInKByte);
-#endif
- }
-
- if (p->display_cfg->minimize_det_reallocation) {
- MinimizeReallocationSuccess = true;
- // To minimize det reallocation, we don't distribute based on each surfaces bandwidth proportional to the global
- // but rather distribute DET across streams proportionally based on pixel rate, and only distribute based on
- // bandwidth between the planes on the same stream. This ensures that large scale re-distribution only on a
- // stream count and/or pixel rate change, which is must less likely then general bandwidth changes per plane.
-
- // Calculate total pixel rate
- for (unsigned int k = 0; k < p->display_cfg->num_streams; ++k) {
- TotalPixelRate += p->display_cfg->stream_descriptors[k].timing.pixel_clock_khz;
- }
-
- // Calculate per stream DET budget
- for (unsigned int k = 0; k < p->display_cfg->num_streams; ++k) {
- DETBudgetPerStream[k] = (unsigned int)((double)p->display_cfg->stream_descriptors[k].timing.pixel_clock_khz * p->MaxTotalDETInKByte / TotalPixelRate);
- RemainingDETBudgetPerStream[k] = DETBudgetPerStream[k];
- }
-
- // Calculate the per stream total bandwidth
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- TotalBandwidthPerStream[p->display_cfg->plane_descriptors[k].stream_index] += (unsigned int)(p->ReadBandwidthLuma[k] + p->ReadBandwidthChroma[k]);
-
- // Check the minimum can be satisfied by budget
- if (RemainingDETBudgetPerStream[p->display_cfg->plane_descriptors[k].stream_index] >= p->DETBufferSizeInKByte[k]) {
- RemainingDETBudgetPerStream[p->display_cfg->plane_descriptors[k].stream_index] -= p->DETBufferSizeInKByte[k];
- } else {
- MinimizeReallocationSuccess = false;
- break;
- }
- }
- }
-
- if (MinimizeReallocationSuccess) {
- // Since a fixed budget per stream is sufficient to satisfy the minimums, just re-distribute each streams
- // budget proportionally across its planes
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- IdealDETBudget = (unsigned int)(((p->ReadBandwidthLuma[k] + p->ReadBandwidthChroma[k]) / TotalBandwidthPerStream[p->display_cfg->plane_descriptors[k].stream_index])
- * DETBudgetPerStream[p->display_cfg->plane_descriptors[k].stream_index]);
-
- if (IdealDETBudget > p->DETBufferSizeInKByte[k]) {
- DeltaDETBudget = IdealDETBudget - p->DETBufferSizeInKByte[k];
- if (DeltaDETBudget > RemainingDETBudgetPerStream[p->display_cfg->plane_descriptors[k].stream_index])
- DeltaDETBudget = RemainingDETBudgetPerStream[p->display_cfg->plane_descriptors[k].stream_index];
-
- p->DETBufferSizeInKByte[k] += DeltaDETBudget;
- RemainingDETBudgetPerStream[p->display_cfg->plane_descriptors[k].stream_index] -= DeltaDETBudget;
- }
-
- // Split among the pipes per the plane
- p->DETBufferSizeInKByte[k] = (unsigned int)((double)p->DETBufferSizeInKByte[k] / (p->ForceSingleDPP ? 1 : p->DPPPerSurface[k]));
-
- // Round down to segment size
- p->DETBufferSizeInKByte[k] = (p->DETBufferSizeInKByte[k] / p->CompressedBufferSegmentSizeInkByte) * p->CompressedBufferSegmentSizeInkByte;
- }
- }
- }
- }
-
- if (!MinimizeReallocationSuccess) {
- TotalBandwidth = 0;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- TotalBandwidth = TotalBandwidth + p->ReadBandwidthLuma[k] + p->ReadBandwidthChroma[k];
- }
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- Before bandwidth adjustment ---\n", __func__);
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
- }
- dml2_printf("DML::%s: --- DET allocation with bandwidth ---\n", __func__);
-#endif
- dml2_printf("DML::%s: TotalBandwidth = %f\n", __func__, TotalBandwidth);
- BandwidthOfSurfacesNotAssignedDETPiece = TotalBandwidth;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
-
- if (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- DETPieceAssignedToThisSurfaceAlready[k] = true;
- } else if (p->display_cfg->plane_descriptors[k].overrides.det_size_override_kb > 0 || (((double)(p->ForceSingleDPP ? 1 : p->DPPPerSurface[k]) * (double)p->DETBufferSizeInKByte[k] / (double)p->MaxTotalDETInKByte) >= ((p->ReadBandwidthLuma[k] + p->ReadBandwidthChroma[k]) / TotalBandwidth))) {
- DETPieceAssignedToThisSurfaceAlready[k] = true;
- BandwidthOfSurfacesNotAssignedDETPiece = BandwidthOfSurfacesNotAssignedDETPiece - p->ReadBandwidthLuma[k] - p->ReadBandwidthChroma[k];
- } else {
- DETPieceAssignedToThisSurfaceAlready[k] = false;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, BandwidthOfSurfacesNotAssignedDETPiece);
-#endif
- }
-
- for (unsigned int j = 0; j < p->NumberOfActiveSurfaces; ++j) {
- NextPotentialSurfaceToAssignDETPieceFound = false;
- NextSurfaceToAssignDETPiece = 0;
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, p->ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, p->ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, p->ReadBandwidthLuma[NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, p->ReadBandwidthChroma[NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, NextSurfaceToAssignDETPiece);
-#endif
- if (!DETPieceAssignedToThisSurfaceAlready[k] && (!NextPotentialSurfaceToAssignDETPieceFound ||
- p->ReadBandwidthLuma[k] + p->ReadBandwidthChroma[k] < p->ReadBandwidthLuma[NextSurfaceToAssignDETPiece] + p->ReadBandwidthChroma[NextSurfaceToAssignDETPiece])) {
- NextSurfaceToAssignDETPiece = k;
- NextPotentialSurfaceToAssignDETPieceFound = true;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound);
-#endif
- }
-
- if (NextPotentialSurfaceToAssignDETPieceFound) {
- NextDETBufferPieceInKByte = (unsigned int)(math_min2(
- math_round((double)DETBufferSizePoolInKByte * (p->ReadBandwidthLuma[NextSurfaceToAssignDETPiece] + p->ReadBandwidthChroma[NextSurfaceToAssignDETPiece]) / BandwidthOfSurfacesNotAssignedDETPiece /
- ((p->ForceSingleDPP ? 1 : p->DPPPerSurface[NextSurfaceToAssignDETPiece]) * p->ConfigReturnBufferSegmentSizeInkByte))
- * (p->ForceSingleDPP ? 1 : p->DPPPerSurface[NextSurfaceToAssignDETPiece]) * p->ConfigReturnBufferSegmentSizeInkByte,
- math_floor2((double)DETBufferSizePoolInKByte, (p->ForceSingleDPP ? 1 : p->DPPPerSurface[NextSurfaceToAssignDETPiece]) * p->ConfigReturnBufferSegmentSizeInkByte)));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, DETBufferSizePoolInKByte);
- dml2_printf("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, NextSurfaceToAssignDETPiece);
- dml2_printf("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, NextSurfaceToAssignDETPiece, p->ReadBandwidthLuma[NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, NextSurfaceToAssignDETPiece, p->ReadBandwidthChroma[NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, BandwidthOfSurfacesNotAssignedDETPiece);
- dml2_printf("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, NextDETBufferPieceInKByte);
- dml2_printf("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, NextSurfaceToAssignDETPiece, p->DETBufferSizeInKByte[NextSurfaceToAssignDETPiece]);
-#endif
-
- p->DETBufferSizeInKByte[NextSurfaceToAssignDETPiece] = p->DETBufferSizeInKByte[NextSurfaceToAssignDETPiece] + NextDETBufferPieceInKByte / (p->ForceSingleDPP ? 1 : p->DPPPerSurface[NextSurfaceToAssignDETPiece]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("to %u\n", p->DETBufferSizeInKByte[NextSurfaceToAssignDETPiece]);
-#endif
-
- DETBufferSizePoolInKByte = DETBufferSizePoolInKByte - NextDETBufferPieceInKByte;
- DETPieceAssignedToThisSurfaceAlready[NextSurfaceToAssignDETPiece] = true;
- BandwidthOfSurfacesNotAssignedDETPiece = BandwidthOfSurfacesNotAssignedDETPiece - (p->ReadBandwidthLuma[NextSurfaceToAssignDETPiece] + p->ReadBandwidthChroma[NextSurfaceToAssignDETPiece]);
- }
- }
- }
- *p->CompressedBufferSizeInkByte = p->MinCompressedBufferSizeInKByte;
- }
- *p->CompressedBufferSizeInkByte = *p->CompressedBufferSizeInkByte * p->CompressedBufferSegmentSizeInkByte / p->ConfigReturnBufferSegmentSizeInkByte;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- After bandwidth adjustment ---\n", __func__);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, p->DETBufferSizeInKByte[k], p->ReadBandwidthLuma[k] + p->ReadBandwidthChroma[k]);
- }
-#endif
-}
-
-static double CalculateRequiredDispclk(
- enum dml2_odm_mode ODMMode,
- double PixelClock)
-{
-
- if (ODMMode == dml2_odm_mode_combine_4to1) {
- return PixelClock / 4.0;
- } else if (ODMMode == dml2_odm_mode_combine_3to1) {
- return PixelClock / 3.0;
- } else if (ODMMode == dml2_odm_mode_combine_2to1) {
- return PixelClock / 2.0;
- } else {
- return PixelClock;
- }
-}
-
-static double TruncToValidBPP(
- struct dml2_core_shared_TruncToValidBPP_locals *l,
- double LinkBitRate,
- unsigned int Lanes,
- unsigned int HTotal,
- unsigned int HActive,
- double PixelClock,
- double DesiredBPP,
- bool DSCEnable,
- enum dml2_output_encoder_class Output,
- enum dml2_output_format_class Format,
- unsigned int DSCInputBitPerComponent,
- unsigned int DSCSlices,
- unsigned int AudioRate,
- unsigned int AudioLayout,
- enum dml2_odm_mode ODMModeNoDSC,
- enum dml2_odm_mode ODMModeDSC,
-
- // Output
- unsigned int *RequiredSlots)
-{
- double MaxLinkBPP;
- unsigned int MinDSCBPP;
- double MaxDSCBPP;
- unsigned int NonDSCBPP0;
- unsigned int NonDSCBPP1;
- unsigned int NonDSCBPP2;
- enum dml2_odm_mode ODMMode;
-
- if (Format == dml2_420) {
- NonDSCBPP0 = 12;
- NonDSCBPP1 = 15;
- NonDSCBPP2 = 18;
- MinDSCBPP = 6;
- MaxDSCBPP = 16;
- } else if (Format == dml2_444) {
- NonDSCBPP0 = 24;
- NonDSCBPP1 = 30;
- NonDSCBPP2 = 36;
- MinDSCBPP = 8;
- MaxDSCBPP = 16;
- } else {
- if (Output == dml2_hdmi || Output == dml2_hdmifrl) {
- NonDSCBPP0 = 24;
- NonDSCBPP1 = 24;
- NonDSCBPP2 = 24;
- } else {
- NonDSCBPP0 = 16;
- NonDSCBPP1 = 20;
- NonDSCBPP2 = 24;
- }
- if (Format == dml2_n422 || Output == dml2_hdmifrl) {
- MinDSCBPP = 7;
- MaxDSCBPP = 16;
- } else {
- MinDSCBPP = 8;
- MaxDSCBPP = 16;
- }
- }
- if (Output == dml2_dp2p0) {
- MaxLinkBPP = LinkBitRate * Lanes / PixelClock * 128.0 / 132.0 * 383.0 / 384.0 * 65536.0 / 65540.0;
- } else if (DSCEnable && Output == dml2_dp) {
- MaxLinkBPP = LinkBitRate / 10.0 * 8.0 * Lanes / PixelClock * (1 - 2.4 / 100);
- } else {
- MaxLinkBPP = LinkBitRate / 10.0 * 8.0 * Lanes / PixelClock;
- }
-
- ODMMode = DSCEnable ? ODMModeDSC : ODMModeNoDSC;
-
- if (ODMMode == dml2_odm_mode_split_1to2) {
- MaxLinkBPP = 2 * MaxLinkBPP;
- }
-
- if (DesiredBPP == 0) {
- if (DSCEnable) {
- if (MaxLinkBPP < MinDSCBPP) {
- return __DML2_CALCS_DPP_INVALID__;
- } else if (MaxLinkBPP >= MaxDSCBPP) {
- return MaxDSCBPP;
- } else {
- return math_floor2(16.0 * MaxLinkBPP, 1.0) / 16.0;
- }
- } else {
- if (MaxLinkBPP >= NonDSCBPP2) {
- return NonDSCBPP2;
- } else if (MaxLinkBPP >= NonDSCBPP1) {
- return NonDSCBPP1;
- } else if (MaxLinkBPP >= NonDSCBPP0) {
- return NonDSCBPP0;
- } else {
- return __DML2_CALCS_DPP_INVALID__;
- }
- }
- } else {
- if (!((DSCEnable == false && (DesiredBPP == NonDSCBPP2 || DesiredBPP == NonDSCBPP1 || DesiredBPP == NonDSCBPP0)) ||
- (DSCEnable && DesiredBPP >= MinDSCBPP && DesiredBPP <= MaxDSCBPP))) {
- return __DML2_CALCS_DPP_INVALID__;
- } else {
- return DesiredBPP;
- }
- }
-}
-
-// updated for dcn4
-static unsigned int dscceComputeDelay(
- unsigned int bpc,
- double BPP,
- unsigned int sliceWidth,
- unsigned int numSlices,
- enum dml2_output_format_class pixelFormat,
- enum dml2_output_encoder_class Output)
-{
- // valid bpc = source bits per component in the set of {8, 10, 12}
- // valid bpp = increments of 1/16 of a bit
- // min = 6/7/8 in N420/N422/444, respectively
- // max = such that compression is 1:1
- //valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
- //valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
- //valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
-
- // fixed value
- unsigned int rcModelSize = 8192;
-
- // N422/N420 operate at 2 pixels per clock
- unsigned int pixelsPerClock, padding_pixels, ssm_group_priming_delay, ssm_pipeline_delay, obsm_pipeline_delay, slice_padded_pixels, ixd_plus_padding, ixd_plus_padding_groups, cycles_per_group, group_delay, pipeline_delay, pixels, additional_group_delay, lines_to_reach_ixd, groups_to_reach_ixd, slice_width_groups, initial_xmit_delay, number_of_lines_to_reach_ixd, slice_width_modified;
-
-
- if (pixelFormat == dml2_420)
- pixelsPerClock = 2;
- // #all other modes operate at 1 pixel per clock
- else if (pixelFormat == dml2_444)
- pixelsPerClock = 1;
- else if (pixelFormat == dml2_n422 || Output == dml2_hdmifrl)
- pixelsPerClock = 2;
- else
- pixelsPerClock = 1;
-
- //initial transmit delay as per PPS
- initial_xmit_delay = (unsigned int)(math_round(rcModelSize / 2.0 / BPP / pixelsPerClock));
-
- //slice width as seen by dscc_bcl in pixels or pixels pairs (depending on number of pixels per pixel container based on pixel format)
- slice_width_modified = (pixelFormat == dml2_444 || pixelFormat == dml2_420 || Output == dml2_hdmifrl) ? sliceWidth / 2 : sliceWidth;
-
- padding_pixels = ((slice_width_modified % 3) != 0) ? (3 - (slice_width_modified % 3)) * (initial_xmit_delay / slice_width_modified) : 0;
-
- if ((3.0 * pixelsPerClock * BPP) >= ((double)((initial_xmit_delay + 2) / 3) * (double)(3 + (pixelFormat == dml2_n422)))) {
- if ((initial_xmit_delay + padding_pixels) % 3 == 1) {
- initial_xmit_delay++;
- }
- }
-
-
- //sub-stream multiplexer balance fifo priming delay in groups as per dsc standard
- if (bpc == 8)
- ssm_group_priming_delay = 83;
- else if (bpc == 10)
- ssm_group_priming_delay = 91;
- else if (bpc == 12)
- ssm_group_priming_delay = 115;
- else if (bpc == 14)
- ssm_group_priming_delay = 123;
- else
- ssm_group_priming_delay = 128;
-
- //slice width in groups is rounded up to the nearest group as DSC adds padded pixels such that there are an integer number of groups per slice
- slice_width_groups = (slice_width_modified + 2) / 3;
-
- //determine number of padded pixels in the last group of a slice line, computed as
- slice_padded_pixels = 3 * slice_width_groups - slice_width_modified;
-
-
-
-
- //determine integer number of complete slice lines required to reach initial transmit delay without ssm delay considered
- number_of_lines_to_reach_ixd = initial_xmit_delay / slice_width_modified;
-
- //increase initial transmit delay by the number of padded pixels added to a slice line multipled by the integer number of complete lines to reach initial transmit delay
- //this step is necessary as each padded pixel added takes up a clock cycle and, therefore, adds to the overall delay
- ixd_plus_padding = initial_xmit_delay + slice_padded_pixels * number_of_lines_to_reach_ixd;
-
- //convert the padded initial transmit delay from pixels to groups by rounding up to the nearest group as DSC processes in groups of pixels
- ixd_plus_padding_groups = (ixd_plus_padding + 2) / 3;
-
- //number of groups required for a slice to reach initial transmit delay is the sum of the padded initial transmit delay plus the ssm group priming delay
- groups_to_reach_ixd = ixd_plus_padding_groups + ssm_group_priming_delay;
-
-
- //number of lines required to reach padded initial transmit delay in groups in slices to the left of the last horizontal slice
- //needs to be rounded up as a complete slice lines are buffered prior to initial transmit delay being reached in the last horizontal slice
- lines_to_reach_ixd = (groups_to_reach_ixd + slice_width_groups - 1) / slice_width_groups; //round up lines to reach ixd to next
-
- //determine if there are non-zero number of pixels reached in the group where initial transmit delay is reached
- //an additional group time (i.e., 3 pixel times) is required before the first output if there are no additional pixels beyond initial transmit delay
- additional_group_delay = ((initial_xmit_delay - number_of_lines_to_reach_ixd * slice_width_modified) % 3) == 0 ? 1 : 0;
-
- //number of pipeline delay cycles in the ssm block (can be determined empirically or analytically by inspecting the ssm block)
- ssm_pipeline_delay = 2;
-
- //number of pipe delay cycles in the obsm block (can be determined empirically or analytically by inspecting the obsm block)
- obsm_pipeline_delay = 1;
-
- //a group of pixels is worth 6 pixels in N422/N420 mode or 3 pixels in all other modes
- if (pixelFormat == dml2_420 || pixelFormat == dml2_444 || pixelFormat == dml2_n422 || Output == dml2_hdmifrl)
- cycles_per_group = 6;
- else
- cycles_per_group = 3;
- //delay of the bit stream contruction layer in pixels is the sum of:
- //1. number of pixel containers in a slice line multipled by the number of lines required to reach initial transmit delay multipled by number of slices to the left of the last horizontal slice
- //2. number of pixel containers required to reach initial transmit delay (specifically, in the last horizontal slice)
- //3. additional group of delay if initial transmit delay is reached exactly in a group
- //4. ssm and obsm pipeline delay (i.e., clock cycles of delay)
- group_delay = (lines_to_reach_ixd * slice_width_groups * (numSlices - 1)) + groups_to_reach_ixd + additional_group_delay;
- pipeline_delay = ssm_pipeline_delay + obsm_pipeline_delay;
-
- //pixel delay is group_delay (converted to pixels) + pipeline, however, first group is a special case since it is processed as soon as it arrives (i.e., in 3 cycles regardless of pixel format)
- pixels = (group_delay - 1) * cycles_per_group + 3 + pipeline_delay;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: bpc: %u\n", __func__, bpc);
- dml2_printf("DML::%s: BPP: %f\n", __func__, BPP);
- dml2_printf("DML::%s: sliceWidth: %u\n", __func__, sliceWidth);
- dml2_printf("DML::%s: numSlices: %u\n", __func__, numSlices);
- dml2_printf("DML::%s: pixelFormat: %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Output: %u\n", __func__, Output);
- dml2_printf("DML::%s: pixels: %u\n", __func__, pixels);
-#endif
- return pixels;
-}
-
-
-//updated in dcn4
-static unsigned int dscComputeDelay(enum dml2_output_format_class pixelFormat, enum dml2_output_encoder_class Output)
-{
- unsigned int Delay = 0;
- unsigned int dispclk_per_dscclk = 3;
-
- // sfr
- Delay = Delay + 2;
-
- if (pixelFormat == dml2_420 || pixelFormat == dml2_n422 || (Output == dml2_hdmifrl && pixelFormat != dml2_444)) {
- dispclk_per_dscclk = 3 * 2;
- }
-
- if (pixelFormat == dml2_420) {
- //dscc top delay for pixel compression layer
- Delay = Delay + 16 * dispclk_per_dscclk;
-
- // dscc - input deserializer
- Delay = Delay + 5;
-
- // dscc - input cdc fifo
- Delay = Delay + 1 + 4 * dispclk_per_dscclk;
-
- // dscc - output cdc fifo
- Delay = Delay + 3 + 1 * dispclk_per_dscclk;
-
- // dscc - cdc uncertainty
- Delay = Delay + 3 + 3 * dispclk_per_dscclk;
- } else if (pixelFormat == dml2_n422 || (Output == dml2_hdmifrl && pixelFormat != dml2_444)) {
- //dscc top delay for pixel compression layer
- Delay = Delay + 16 * dispclk_per_dscclk;
- // dsccif
- Delay = Delay + 1;
- // dscc - input deserializer
- Delay = Delay + 5;
- // dscc - input cdc fifo
- Delay = Delay + 1 + 4 * dispclk_per_dscclk;
-
-
- // dscc - output cdc fifo
- Delay = Delay + 3 + 1 * dispclk_per_dscclk;
- // dscc - cdc uncertainty
- Delay = Delay + 3 + 3 * dispclk_per_dscclk;
- } else if (pixelFormat == dml2_s422) {
- //dscc top delay for pixel compression layer
- Delay = Delay + 17 * dispclk_per_dscclk;
-
- // dscc - input deserializer
- Delay = Delay + 3;
- // dscc - input cdc fifo
- Delay = Delay + 1 + 4 * dispclk_per_dscclk;
- // dscc - output cdc fifo
- Delay = Delay + 3 + 1 * dispclk_per_dscclk;
- // dscc - cdc uncertainty
- Delay = Delay + 3 + 3 * dispclk_per_dscclk;
- } else {
- //dscc top delay for pixel compression layer
- Delay = Delay + 16 * dispclk_per_dscclk;
- // dscc - input deserializer
- Delay = Delay + 3;
- // dscc - input cdc fifo
- Delay = Delay + 1 + 4 * dispclk_per_dscclk;
- // dscc - output cdc fifo
- Delay = Delay + 3 + 1 * dispclk_per_dscclk;
-
- // dscc - cdc uncertainty
- Delay = Delay + 3 + 3 * dispclk_per_dscclk;
- }
-
- // sft
- Delay = Delay + 1;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: pixelFormat = %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Delay = %u\n", __func__, Delay);
-#endif
-
- return Delay;
-}
-
-static unsigned int CalculateHostVMDynamicLevels(
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMinPageSize,
- unsigned int HostVMMaxNonCachedPageTableLevels)
-{
- unsigned int HostVMDynamicLevels = 0;
-
- if (GPUVMEnable && HostVMEnable) {
- if (HostVMMinPageSize < 2048)
- HostVMDynamicLevels = HostVMMaxNonCachedPageTableLevels;
- else if (HostVMMinPageSize >= 2048 && HostVMMinPageSize < 1048576)
- HostVMDynamicLevels = (unsigned int)math_max2(0, (double)HostVMMaxNonCachedPageTableLevels - 1);
- else
- HostVMDynamicLevels = (unsigned int)math_max2(0, (double)HostVMMaxNonCachedPageTableLevels - 2);
- } else {
- HostVMDynamicLevels = 0;
- }
- return HostVMDynamicLevels;
-}
-
-static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_and_row_bytes_params *p)
-{
- unsigned int extra_dpde_bytes;
- unsigned int extra_mpde_bytes;
- unsigned int MacroTileSizeBytes;
- unsigned int vp_height_dpte_ub;
-
- unsigned int meta_surface_bytes;
- unsigned int vm_bytes;
- unsigned int vp_height_meta_ub;
-
- *p->MetaRequestHeight = 8 * p->BlockHeight256Bytes;
- *p->MetaRequestWidth = 8 * p->BlockWidth256Bytes;
- if (p->SurfaceTiling == dml2_sw_linear) {
- *p->meta_row_height = 32;
- *p->meta_row_width = (unsigned int)(math_floor2(p->ViewportXStart + p->SwathWidth + *p->MetaRequestWidth - 1, *p->MetaRequestWidth) - math_floor2(p->ViewportXStart, *p->MetaRequestWidth));
- *p->meta_row_bytes = (unsigned int)(*p->meta_row_width * *p->MetaRequestHeight * p->BytePerPixel / 256.0); // FIXME_DCN4SW missing in old code but no dcc for linear anyways?
- } else if (!dml_is_vertical_rotation(p->RotationAngle)) {
- *p->meta_row_height = *p->MetaRequestHeight;
- if (p->ViewportStationary && p->NumberOfDPPs == 1) {
- *p->meta_row_width = (unsigned int)(math_floor2(p->ViewportXStart + p->SwathWidth + *p->MetaRequestWidth - 1, *p->MetaRequestWidth) - math_floor2(p->ViewportXStart, *p->MetaRequestWidth));
- } else {
- *p->meta_row_width = (unsigned int)(math_ceil2(p->SwathWidth - 1, *p->MetaRequestWidth) + *p->MetaRequestWidth);
- }
- *p->meta_row_bytes = (unsigned int)(*p->meta_row_width * *p->MetaRequestHeight * p->BytePerPixel / 256.0);
- } else {
- *p->meta_row_height = *p->MetaRequestWidth;
- if (p->ViewportStationary && p->NumberOfDPPs == 1) {
- *p->meta_row_width = (unsigned int)(math_floor2(p->ViewportYStart + p->ViewportHeight + *p->MetaRequestHeight - 1, *p->MetaRequestHeight) - math_floor2(p->ViewportYStart, *p->MetaRequestHeight));
- } else {
- *p->meta_row_width = (unsigned int)(math_ceil2(p->SwathWidth - 1, *p->MetaRequestHeight) + *p->MetaRequestHeight);
- }
- *p->meta_row_bytes = (unsigned int)(*p->meta_row_width * *p->MetaRequestWidth * p->BytePerPixel / 256.0);
- }
-
- if (p->ViewportStationary && p->is_phantom && (p->NumberOfDPPs == 1 || !dml_is_vertical_rotation(p->RotationAngle))) {
- vp_height_meta_ub = (unsigned int)(math_floor2(p->ViewportYStart + p->ViewportHeight + 64 * p->BlockHeight256Bytes - 1, 64 * p->BlockHeight256Bytes) - math_floor2(p->ViewportYStart, 64 * p->BlockHeight256Bytes));
- } else if (!dml_is_vertical_rotation(p->RotationAngle)) {
- vp_height_meta_ub = (unsigned int)(math_ceil2(p->ViewportHeight - 1, 64 * p->BlockHeight256Bytes) + 64 * p->BlockHeight256Bytes);
- } else {
- vp_height_meta_ub = (unsigned int)(math_ceil2(p->SwathWidth - 1, 64 * p->BlockHeight256Bytes) + 64 * p->BlockHeight256Bytes);
- }
-
- meta_surface_bytes = (unsigned int)(p->DCCMetaPitch * vp_height_meta_ub * p->BytePerPixel / 256.0);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch);
- dml2_printf("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes);
-#endif
- if (p->GPUVMEnable == true) {
- double meta_vmpg_bytes = 4.0 * 1024.0;
- *p->meta_pte_bytes_per_frame_ub = (unsigned int)((math_ceil2((double)(meta_surface_bytes - meta_vmpg_bytes) / (8 * meta_vmpg_bytes), 1) + 1) * 64);
- extra_mpde_bytes = 128 * (p->GPUVMMaxPageTableLevels - 1);
- } else {
- *p->meta_pte_bytes_per_frame_ub = 0;
- extra_mpde_bytes = 0;
- }
-
- if (!p->DCCEnable || !p->mrq_present) {
- *p->meta_pte_bytes_per_frame_ub = 0;
- extra_mpde_bytes = 0;
- *p->meta_row_bytes = 0;
- }
-
- if (!p->GPUVMEnable) {
- *p->PixelPTEBytesPerRow = 0;
- *p->PixelPTEBytesPerRowStorage = 0;
- *p->dpte_row_width_ub = 0;
- *p->dpte_row_height = 0;
- *p->dpte_row_height_linear = 0;
- *p->PixelPTEBytesPerRow_one_row_per_frame = 0;
- *p->dpte_row_width_ub_one_row_per_frame = 0;
- *p->dpte_row_height_one_row_per_frame = 0;
- *p->vmpg_width = 0;
- *p->vmpg_height = 0;
- *p->PixelPTEReqWidth = 0;
- *p->PixelPTEReqHeight = 0;
- *p->PTERequestSize = 0;
- *p->dpde0_bytes_per_frame_ub = 0;
- return 0;
- }
-
- MacroTileSizeBytes = p->MacroTileWidth * p->BytePerPixel * p->MacroTileHeight;
-
- if (p->ViewportStationary && p->is_phantom && (p->NumberOfDPPs == 1 || !dml_is_vertical_rotation(p->RotationAngle))) {
- vp_height_dpte_ub = (unsigned int)(math_floor2(p->ViewportYStart + p->ViewportHeight + p->MacroTileHeight - 1, p->MacroTileHeight) - math_floor2(p->ViewportYStart, p->MacroTileHeight));
- } else if (!dml_is_vertical_rotation(p->RotationAngle)) {
- vp_height_dpte_ub = (unsigned int)(math_ceil2((double)p->ViewportHeight - 1, p->MacroTileHeight) + p->MacroTileHeight);
- } else {
- vp_height_dpte_ub = (unsigned int)(math_ceil2((double)p->SwathWidth - 1, p->MacroTileHeight) + p->MacroTileHeight);
- }
-
- if (p->GPUVMEnable == true && p->GPUVMMaxPageTableLevels > 1) {
- *p->dpde0_bytes_per_frame_ub = (unsigned int)(64 * (math_ceil2((double)(p->Pitch * vp_height_dpte_ub * p->BytePerPixel - MacroTileSizeBytes) / (double)(8 * 2097152), 1) + 1));
- extra_dpde_bytes = 128 * (p->GPUVMMaxPageTableLevels - 2);
- } else {
- *p->dpde0_bytes_per_frame_ub = 0;
- extra_dpde_bytes = 0;
- }
-
- vm_bytes = *p->meta_pte_bytes_per_frame_ub + extra_mpde_bytes + *p->dpde0_bytes_per_frame_ub + extra_dpde_bytes;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear);
- dml2_printf("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel);
- dml2_printf("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels);
- dml2_printf("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes);
- dml2_printf("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes);
- dml2_printf("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight);
- dml2_printf("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth);
- dml2_printf("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub);
- dml2_printf("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub);
- dml2_printf("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes);
- dml2_printf("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight);
- dml2_printf("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth);
- dml2_printf("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub);
-#endif
-
- unsigned int PixelPTEReqWidth_linear = 0; // VBA_DELTA. VBA doesn't calculate this
-
- if (p->SurfaceTiling == dml2_sw_linear) {
- *p->PixelPTEReqHeight = 1;
- *p->PixelPTEReqWidth = p->GPUVMMinPageSizeKBytes * 1024 * 8 / p->BytePerPixel;
- PixelPTEReqWidth_linear = p->GPUVMMinPageSizeKBytes * 1024 * 8 / p->BytePerPixel;
- *p->PTERequestSize = 64;
-
- *p->vmpg_height = 1;
- *p->vmpg_width = p->GPUVMMinPageSizeKBytes * 1024 / p->BytePerPixel;
- } else if (p->GPUVMMinPageSizeKBytes * 1024 >= dml_get_tile_block_size_bytes(p->SurfaceTiling)) { // 1 64B 8x1 PTE
- *p->PixelPTEReqHeight = p->MacroTileHeight;
- *p->PixelPTEReqWidth = 8 * 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel);
- *p->PTERequestSize = 64;
-
- *p->vmpg_height = p->MacroTileHeight;
- *p->vmpg_width = 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel);
-
- } else if (p->GPUVMMinPageSizeKBytes == 4 && dml_get_tile_block_size_bytes(p->SurfaceTiling) == 65536) { // 2 64B PTE requests to get 16 PTEs to cover the 64K tile
- // one 64KB tile, is 16x16x256B req
- *p->PixelPTEReqHeight = 16 * p->BlockHeight256Bytes;
- *p->PixelPTEReqWidth = 16 * p->BlockWidth256Bytes;
- *p->PTERequestSize = 128;
-
- *p->vmpg_height = *p->PixelPTEReqHeight;
- *p->vmpg_width = *p->PixelPTEReqWidth;
- } else {
- // default for rest of calculation to go through, when vm is disable, the calulated pte related values shouldnt be used anyways
- *p->PixelPTEReqHeight = p->MacroTileHeight;
- *p->PixelPTEReqWidth = 8 * 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel);
- *p->PTERequestSize = 64;
-
- *p->vmpg_height = p->MacroTileHeight;
- *p->vmpg_width = 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel);
-
- if (p->GPUVMEnable == true) {
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n",
- __func__, p->GPUVMMinPageSizeKBytes, p->SurfaceTiling, dml_get_tile_block_size_bytes(p->SurfaceTiling));
- DML2_ASSERT(0);
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight);
- dml2_printf("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth);
- dml2_printf("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear);
- dml2_printf("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize);
- dml2_printf("DML::%s: Pitch = %u\n", __func__, p->Pitch);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height);
-#endif
-
- *p->dpte_row_height_one_row_per_frame = vp_height_dpte_ub;
- *p->dpte_row_width_ub_one_row_per_frame = (unsigned int)((math_ceil2(((double)p->Pitch * (double)*p->dpte_row_height_one_row_per_frame / (double)*p->PixelPTEReqHeight - 1) / (double)*p->PixelPTEReqWidth, 1) + 1) * (double)*p->PixelPTEReqWidth);
- *p->PixelPTEBytesPerRow_one_row_per_frame = (unsigned int)((double)*p->dpte_row_width_ub_one_row_per_frame / (double)*p->PixelPTEReqWidth * *p->PTERequestSize);
-
- if (p->SurfaceTiling == dml2_sw_linear) {
- *p->dpte_row_height = (unsigned int)(math_min2(128, (double)(1ULL << (unsigned int)math_floor2(math_log((float)(p->PTEBufferSizeInRequests * *p->PixelPTEReqWidth / p->Pitch), 2.0), 1))));
- *p->dpte_row_width_ub = (unsigned int)(math_ceil2(((double)p->Pitch * (double)*p->dpte_row_height - 1), (double)*p->PixelPTEReqWidth) + *p->PixelPTEReqWidth);
- *p->PixelPTEBytesPerRow = (unsigned int)((double)*p->dpte_row_width_ub / (double)*p->PixelPTEReqWidth * *p->PTERequestSize);
- *p->dpte_row_height_linear = 0;
-
- // VBA_DELTA, VBA doesn't have programming value for pte row height linear.
- *p->dpte_row_height_linear = (unsigned int)1 << (unsigned int)math_floor2(math_log((float)(p->PTEBufferSizeInRequests * PixelPTEReqWidth_linear / p->Pitch), 2.0), 1);
- if (*p->dpte_row_height_linear > 128)
- *p->dpte_row_height_linear = 128;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub);
-#endif
-
- } else if (!dml_is_vertical_rotation(p->RotationAngle)) {
- *p->dpte_row_height = *p->PixelPTEReqHeight;
-
- if (p->GPUVMMinPageSizeKBytes > 64) {
- *p->dpte_row_width_ub = (unsigned int)((math_ceil2(((double)p->Pitch * (double)*p->dpte_row_height / (double)*p->PixelPTEReqHeight - 1) / (double)*p->PixelPTEReqWidth, 1) + 1) * *p->PixelPTEReqWidth);
- } else if (p->ViewportStationary && (p->NumberOfDPPs == 1)) {
- *p->dpte_row_width_ub = (unsigned int)(math_floor2(p->ViewportXStart + p->SwathWidth + *p->PixelPTEReqWidth - 1, *p->PixelPTEReqWidth) - math_floor2(p->ViewportXStart, *p->PixelPTEReqWidth));
- } else {
- *p->dpte_row_width_ub = (unsigned int)((math_ceil2((double)(p->SwathWidth - 1) / (double)*p->PixelPTEReqWidth, 1) + 1.0) * *p->PixelPTEReqWidth);
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub);
-#endif
-
- *p->PixelPTEBytesPerRow = *p->dpte_row_width_ub / *p->PixelPTEReqWidth * *p->PTERequestSize;
- } else {
- *p->dpte_row_height = (unsigned int)(math_min2(*p->PixelPTEReqWidth, p->MacroTileWidth));
-
- if (p->ViewportStationary && (p->NumberOfDPPs == 1)) {
- *p->dpte_row_width_ub = (unsigned int)(math_floor2(p->ViewportYStart + p->ViewportHeight + *p->PixelPTEReqHeight - 1, *p->PixelPTEReqHeight) - math_floor2(p->ViewportYStart, *p->PixelPTEReqHeight));
- } else {
- *p->dpte_row_width_ub = (unsigned int)((math_ceil2((double)(p->SwathWidth - 1) / (double)*p->PixelPTEReqHeight, 1) + 1) * *p->PixelPTEReqHeight);
- }
-
- *p->PixelPTEBytesPerRow = (unsigned int)((double)*p->dpte_row_width_ub / (double)*p->PixelPTEReqHeight * *p->PTERequestSize);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub);
-#endif
- }
-
- if (p->GPUVMEnable != true) {
- *p->PixelPTEBytesPerRow = 0;
- *p->PixelPTEBytesPerRow_one_row_per_frame = 0;
- }
-
- *p->PixelPTEBytesPerRowStorage = *p->PixelPTEBytesPerRow;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height);
- dml2_printf("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear);
- dml2_printf("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage);
- dml2_printf("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests);
- dml2_printf("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame);
- dml2_printf("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame);
- dml2_printf("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame);
-#endif
-
- return vm_bytes;
-} // CalculateVMAndRowBytes
-
-static unsigned int CalculatePrefetchSourceLines(
- double VRatio,
- unsigned int VTaps,
- bool Interlace,
- bool ProgressiveToInterlaceUnitInOPP,
- unsigned int SwathHeight,
- enum dml2_rotation_angle RotationAngle,
- bool mirrored,
- bool ViewportStationary,
- unsigned int SwathWidth,
- unsigned int ViewportHeight,
- unsigned int ViewportXStart,
- unsigned int ViewportYStart,
-
- // Output
- unsigned int *VInitPreFill,
- unsigned int *MaxNumSwath)
-{
-
- unsigned int vp_start_rot = 0;
- unsigned int sw0_tmp = 0;
- unsigned int MaxPartialSwath = 0;
- double numLines = 0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VTaps = %u\n", __func__, VTaps);
- dml2_printf("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart);
- dml2_printf("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart);
- dml2_printf("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary);
- dml2_printf("DML::%s: SwathHeight = %u\n", __func__, SwathHeight);
-#endif
- if (ProgressiveToInterlaceUnitInOPP)
- *VInitPreFill = (unsigned int)(math_floor2((VRatio + (double)VTaps + 1) / 2.0, 1));
- else
- *VInitPreFill = (unsigned int)(math_floor2((VRatio + (double)VTaps + 1 + (Interlace ? 1 : 0) * 0.5 * VRatio) / 2.0, 1));
-
- if (ViewportStationary) {
- if (RotationAngle == dml2_rotation_180) {
- vp_start_rot = SwathHeight - (((unsigned int)(ViewportYStart + ViewportHeight - 1) % SwathHeight) + 1);
- } else if ((RotationAngle == dml2_rotation_270 && !mirrored) || (RotationAngle == dml2_rotation_90 && mirrored)) {
- vp_start_rot = ViewportXStart;
- } else if ((RotationAngle == dml2_rotation_90 && !mirrored) || (RotationAngle == dml2_rotation_270 && mirrored)) {
- vp_start_rot = SwathHeight - (((unsigned int)(ViewportYStart + SwathWidth - 1) % SwathHeight) + 1);
- } else {
- vp_start_rot = ViewportYStart;
- }
- sw0_tmp = SwathHeight - (vp_start_rot % SwathHeight);
- if (sw0_tmp < *VInitPreFill) {
- *MaxNumSwath = (unsigned int)(math_ceil2((*VInitPreFill - sw0_tmp) / (double)SwathHeight, 1) + 1);
- } else {
- *MaxNumSwath = 1;
- }
- MaxPartialSwath = (unsigned int)(math_max2(1, (unsigned int)(vp_start_rot + *VInitPreFill - 1) % SwathHeight));
- } else {
- *MaxNumSwath = (unsigned int)(math_ceil2((*VInitPreFill - 1.0) / (double)SwathHeight, 1) + 1);
- if (*VInitPreFill > 1) {
- MaxPartialSwath = (unsigned int)(math_max2(1, (unsigned int)(*VInitPreFill - 2) % SwathHeight));
- } else {
- MaxPartialSwath = (unsigned int)(math_max2(1, (unsigned int)(*VInitPreFill + SwathHeight - 2) % SwathHeight));
- }
- }
- numLines = *MaxNumSwath * SwathHeight + MaxPartialSwath;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot);
- dml2_printf("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill);
- dml2_printf("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath);
- dml2_printf("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath);
- dml2_printf("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines);
-#endif
- return (unsigned int)(numLines);
-
-}
-
-static void CalculateRowBandwidth(
- bool GPUVMEnable,
- bool use_one_row_for_frame,
- enum dml2_source_format_class SourcePixelFormat,
- double VRatio,
- double VRatioChroma,
- bool DCCEnable,
- double LineTime,
- unsigned int PixelPTEBytesPerRowLuma,
- unsigned int PixelPTEBytesPerRowChroma,
- unsigned int dpte_row_height_luma,
- unsigned int dpte_row_height_chroma,
-
- bool mrq_present,
- unsigned int meta_row_bytes_per_row_ub_l,
- unsigned int meta_row_bytes_per_row_ub_c,
- unsigned int meta_row_height_luma,
- unsigned int meta_row_height_chroma,
-
- // Output
- double *dpte_row_bw,
- double *meta_row_bw)
-{
- if (!DCCEnable || !mrq_present) {
- *meta_row_bw = 0;
- } else if (dml2_core_shared_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha) {
- *meta_row_bw = VRatio * meta_row_bytes_per_row_ub_l / (meta_row_height_luma * LineTime)
- + VRatioChroma * meta_row_bytes_per_row_ub_c / (meta_row_height_chroma * LineTime);
- } else {
- *meta_row_bw = VRatio * meta_row_bytes_per_row_ub_l / (meta_row_height_luma * LineTime);
- }
-
- if (GPUVMEnable != true) {
- *dpte_row_bw = 0;
- } else if (dml2_core_shared_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha) {
- *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
- + VRatioChroma * PixelPTEBytesPerRowChroma / (dpte_row_height_chroma * LineTime);
- } else {
- *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
- }
-}
-
-static void CalculateMALLUseForStaticScreen(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MALLAllocatedForDCN,
- unsigned int SurfaceSizeInMALL[],
- bool one_row_per_frame_fits_in_buffer[],
-
- // Output
- bool is_using_mall_for_ss[])
-{
-
- unsigned int SurfaceToAddToMALL;
- bool CanAddAnotherSurfaceToMALL;
- unsigned int TotalSurfaceSizeInMALL;
-
- TotalSurfaceSizeInMALL = 0;
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- is_using_mall_for_ss[k] = (display_cfg->plane_descriptors[k].overrides.refresh_from_mall == dml2_refresh_from_mall_mode_override_force_enable);
- if (is_using_mall_for_ss[k])
- TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k];
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]);
- dml2_printf("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL);
-#endif
- }
-
- SurfaceToAddToMALL = 0;
- CanAddAnotherSurfaceToMALL = true;
- while (CanAddAnotherSurfaceToMALL) {
- CanAddAnotherSurfaceToMALL = false;
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k] <= MALLAllocatedForDCN * 1024 * 1024 &&
- !is_using_mall_for_ss[k] && display_cfg->plane_descriptors[k].overrides.refresh_from_mall != dml2_refresh_from_mall_mode_override_force_disable && one_row_per_frame_fits_in_buffer[k] &&
- (!CanAddAnotherSurfaceToMALL || SurfaceSizeInMALL[k] < SurfaceSizeInMALL[SurfaceToAddToMALL])) {
- CanAddAnotherSurfaceToMALL = true;
- SurfaceToAddToMALL = k;
- dml2_printf("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall);
- }
- }
- if (CanAddAnotherSurfaceToMALL) {
- is_using_mall_for_ss[SurfaceToAddToMALL] = true;
- TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[SurfaceToAddToMALL];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL);
-#endif
- }
- }
-}
-
-static void CalculateDCCConfiguration(
- bool DCCEnabled,
- bool DCCProgrammingAssumesScanDirectionUnknown,
- enum dml2_source_format_class SourcePixelFormat,
- unsigned int SurfaceWidthLuma,
- unsigned int SurfaceWidthChroma,
- unsigned int SurfaceHeightLuma,
- unsigned int SurfaceHeightChroma,
- unsigned int nomDETInKByte,
- unsigned int RequestHeight256ByteLuma,
- unsigned int RequestHeight256ByteChroma,
- enum dml2_swizzle_mode TilingFormat,
- unsigned int BytePerPixelY,
- unsigned int BytePerPixelC,
- double BytePerPixelDETY,
- double BytePerPixelDETC,
- enum dml2_rotation_angle RotationAngle,
-
- // Output
- enum dml2_core_internal_request_type *RequestLuma,
- enum dml2_core_internal_request_type *RequestChroma,
- unsigned int *MaxUncompressedBlockLuma,
- unsigned int *MaxUncompressedBlockChroma,
- unsigned int *MaxCompressedBlockLuma,
- unsigned int *MaxCompressedBlockChroma,
- unsigned int *IndependentBlockLuma,
- unsigned int *IndependentBlockChroma)
-{
- unsigned int DETBufferSizeForDCC = nomDETInKByte * 1024;
-
- unsigned int yuv420;
- unsigned int horz_div_l;
- unsigned int horz_div_c;
- unsigned int vert_div_l;
- unsigned int vert_div_c;
-
- unsigned int swath_buf_size;
- double detile_buf_vp_horz_limit;
- double detile_buf_vp_vert_limit;
-
- yuv420 = dml2_core_shared_is_420(SourcePixelFormat);
- horz_div_l = 1;
- horz_div_c = 1;
- vert_div_l = 1;
- vert_div_c = 1;
-
- if (BytePerPixelY == 1)
- vert_div_l = 0;
- if (BytePerPixelC == 1)
- vert_div_c = 0;
-
- if (BytePerPixelC == 0) {
- swath_buf_size = DETBufferSizeForDCC / 2 - 2 * 256;
- detile_buf_vp_horz_limit = (double)swath_buf_size / ((double)RequestHeight256ByteLuma * BytePerPixelY / (1 + horz_div_l));
- detile_buf_vp_vert_limit = (double)swath_buf_size / (256.0 / RequestHeight256ByteLuma / (1 + vert_div_l));
- } else {
- swath_buf_size = DETBufferSizeForDCC / 2 - 2 * 2 * 256;
- detile_buf_vp_horz_limit = (double)swath_buf_size / ((double)RequestHeight256ByteLuma * BytePerPixelY / (1 + horz_div_l) + (double)RequestHeight256ByteChroma * BytePerPixelC / (1 + horz_div_c) / (1 + yuv420));
- detile_buf_vp_vert_limit = (double)swath_buf_size / (256.0 / RequestHeight256ByteLuma / (1 + vert_div_l) + 256.0 / RequestHeight256ByteChroma / (1 + vert_div_c) / (1 + yuv420));
- }
-
- if (SourcePixelFormat == dml2_420_10) {
- detile_buf_vp_horz_limit = 1.5 * detile_buf_vp_horz_limit;
- detile_buf_vp_vert_limit = 1.5 * detile_buf_vp_vert_limit;
- }
-
- detile_buf_vp_horz_limit = math_floor2(detile_buf_vp_horz_limit - 1, 16);
- detile_buf_vp_vert_limit = math_floor2(detile_buf_vp_vert_limit - 1, 16);
-
- unsigned int MAS_vp_horz_limit;
- unsigned int MAS_vp_vert_limit;
- unsigned int max_vp_horz_width;
- unsigned int max_vp_vert_height;
- unsigned int eff_surf_width_l;
- unsigned int eff_surf_width_c;
- unsigned int eff_surf_height_l;
- unsigned int eff_surf_height_c;
-
- unsigned int full_swath_bytes_horz_wc_l;
- unsigned int full_swath_bytes_horz_wc_c;
- unsigned int full_swath_bytes_vert_wc_l;
- unsigned int full_swath_bytes_vert_wc_c;
-
- MAS_vp_horz_limit = SourcePixelFormat == dml2_rgbe_alpha ? 3840 : 6144;
- MAS_vp_vert_limit = SourcePixelFormat == dml2_rgbe_alpha ? 3840 : (BytePerPixelY == 8 ? 3072 : 6144);
- max_vp_horz_width = (unsigned int)(math_min2((double)MAS_vp_horz_limit, detile_buf_vp_horz_limit));
- max_vp_vert_height = (unsigned int)(math_min2((double)MAS_vp_vert_limit, detile_buf_vp_vert_limit));
- eff_surf_width_l = (SurfaceWidthLuma > max_vp_horz_width ? max_vp_horz_width : SurfaceWidthLuma);
- eff_surf_width_c = eff_surf_width_l / (1 + yuv420);
- eff_surf_height_l = (SurfaceHeightLuma > max_vp_vert_height ? max_vp_vert_height : SurfaceHeightLuma);
- eff_surf_height_c = eff_surf_height_l / (1 + yuv420);
-
- full_swath_bytes_horz_wc_l = eff_surf_width_l * RequestHeight256ByteLuma * BytePerPixelY;
- full_swath_bytes_vert_wc_l = eff_surf_height_l * 256 / RequestHeight256ByteLuma;
- if (BytePerPixelC > 0) {
- full_swath_bytes_horz_wc_c = eff_surf_width_c * RequestHeight256ByteChroma * BytePerPixelC;
- full_swath_bytes_vert_wc_c = eff_surf_height_c * 256 / RequestHeight256ByteChroma;
- } else {
- full_swath_bytes_horz_wc_c = 0;
- full_swath_bytes_vert_wc_c = 0;
- }
-
- if (SourcePixelFormat == dml2_420_10) {
- full_swath_bytes_horz_wc_l = (unsigned int)(math_ceil2((double)full_swath_bytes_horz_wc_l * 2.0 / 3.0, 256.0));
- full_swath_bytes_horz_wc_c = (unsigned int)(math_ceil2((double)full_swath_bytes_horz_wc_c * 2.0 / 3.0, 256.0));
- full_swath_bytes_vert_wc_l = (unsigned int)(math_ceil2((double)full_swath_bytes_vert_wc_l * 2.0 / 3.0, 256.0));
- full_swath_bytes_vert_wc_c = (unsigned int)(math_ceil2((double)full_swath_bytes_vert_wc_c * 2.0 / 3.0, 256.0));
- }
-
- unsigned int req128_horz_wc_l;
- unsigned int req128_horz_wc_c;
- unsigned int req128_vert_wc_l;
- unsigned int req128_vert_wc_c;
-
- if (2 * full_swath_bytes_horz_wc_l + 2 * full_swath_bytes_horz_wc_c <= DETBufferSizeForDCC) {
- req128_horz_wc_l = 0;
- req128_horz_wc_c = 0;
- } else if (full_swath_bytes_horz_wc_l < 1.5 * full_swath_bytes_horz_wc_c && 2 * full_swath_bytes_horz_wc_l + full_swath_bytes_horz_wc_c <= DETBufferSizeForDCC) {
- req128_horz_wc_l = 0;
- req128_horz_wc_c = 1;
- } else if (full_swath_bytes_horz_wc_l >= 1.5 * full_swath_bytes_horz_wc_c && full_swath_bytes_horz_wc_l + 2 * full_swath_bytes_horz_wc_c <= DETBufferSizeForDCC) {
- req128_horz_wc_l = 1;
- req128_horz_wc_c = 0;
- } else {
- req128_horz_wc_l = 1;
- req128_horz_wc_c = 1;
- }
-
- if (2 * full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c <= DETBufferSizeForDCC) {
- req128_vert_wc_l = 0;
- req128_vert_wc_c = 0;
- } else if (full_swath_bytes_vert_wc_l < 1.5 * full_swath_bytes_vert_wc_c && 2 * full_swath_bytes_vert_wc_l + full_swath_bytes_vert_wc_c <= DETBufferSizeForDCC) {
- req128_vert_wc_l = 0;
- req128_vert_wc_c = 1;
- } else if (full_swath_bytes_vert_wc_l >= 1.5 * full_swath_bytes_vert_wc_c && full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c <= DETBufferSizeForDCC) {
- req128_vert_wc_l = 1;
- req128_vert_wc_c = 0;
- } else {
- req128_vert_wc_l = 1;
- req128_vert_wc_c = 1;
- }
-
- unsigned int segment_order_horz_contiguous_luma;
- unsigned int segment_order_horz_contiguous_chroma;
- unsigned int segment_order_vert_contiguous_luma;
- unsigned int segment_order_vert_contiguous_chroma;
-
- if (BytePerPixelY == 2) {
- segment_order_horz_contiguous_luma = 0;
- segment_order_vert_contiguous_luma = 1;
- } else {
- segment_order_horz_contiguous_luma = 1;
- segment_order_vert_contiguous_luma = 0;
- }
-
- if (BytePerPixelC == 2) {
- segment_order_horz_contiguous_chroma = 0;
- segment_order_vert_contiguous_chroma = 1;
- } else {
- segment_order_horz_contiguous_chroma = 1;
- segment_order_vert_contiguous_chroma = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
- dml2_printf("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC);
- dml2_printf("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l);
- dml2_printf("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c);
- dml2_printf("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l);
- dml2_printf("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c);
- dml2_printf("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma);
- dml2_printf("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma);
-#endif
- if (DCCProgrammingAssumesScanDirectionUnknown == true) {
- if (req128_horz_wc_l == 0 && req128_vert_wc_l == 0) {
- *RequestLuma = dml2_core_internal_request_type_256_bytes;
- } else if ((req128_horz_wc_l == 1 && segment_order_horz_contiguous_luma == 0) || (req128_vert_wc_l == 1 && segment_order_vert_contiguous_luma == 0)) {
- *RequestLuma = dml2_core_internal_request_type_128_bytes_non_contiguous;
- } else {
- *RequestLuma = dml2_core_internal_request_type_128_bytes_contiguous;
- }
- if (req128_horz_wc_c == 0 && req128_vert_wc_c == 0) {
- *RequestChroma = dml2_core_internal_request_type_256_bytes;
- } else if ((req128_horz_wc_c == 1 && segment_order_horz_contiguous_chroma == 0) || (req128_vert_wc_c == 1 && segment_order_vert_contiguous_chroma == 0)) {
- *RequestChroma = dml2_core_internal_request_type_128_bytes_non_contiguous;
- } else {
- *RequestChroma = dml2_core_internal_request_type_128_bytes_contiguous;
- }
- } else if (!dml_is_vertical_rotation(RotationAngle)) {
- if (req128_horz_wc_l == 0) {
- *RequestLuma = dml2_core_internal_request_type_256_bytes;
- } else if (segment_order_horz_contiguous_luma == 0) {
- *RequestLuma = dml2_core_internal_request_type_128_bytes_non_contiguous;
- } else {
- *RequestLuma = dml2_core_internal_request_type_128_bytes_contiguous;
- }
- if (req128_horz_wc_c == 0) {
- *RequestChroma = dml2_core_internal_request_type_256_bytes;
- } else if (segment_order_horz_contiguous_chroma == 0) {
- *RequestChroma = dml2_core_internal_request_type_128_bytes_non_contiguous;
- } else {
- *RequestChroma = dml2_core_internal_request_type_128_bytes_contiguous;
- }
- } else {
- if (req128_vert_wc_l == 0) {
- *RequestLuma = dml2_core_internal_request_type_256_bytes;
- } else if (segment_order_vert_contiguous_luma == 0) {
- *RequestLuma = dml2_core_internal_request_type_128_bytes_non_contiguous;
- } else {
- *RequestLuma = dml2_core_internal_request_type_128_bytes_contiguous;
- }
- if (req128_vert_wc_c == 0) {
- *RequestChroma = dml2_core_internal_request_type_256_bytes;
- } else if (segment_order_vert_contiguous_chroma == 0) {
- *RequestChroma = dml2_core_internal_request_type_128_bytes_non_contiguous;
- } else {
- *RequestChroma = dml2_core_internal_request_type_128_bytes_contiguous;
- }
- }
-
- if (*RequestLuma == dml2_core_internal_request_type_256_bytes) {
- *MaxUncompressedBlockLuma = 256;
- *MaxCompressedBlockLuma = 256;
- *IndependentBlockLuma = 0;
- } else if (*RequestLuma == dml2_core_internal_request_type_128_bytes_contiguous) {
- *MaxUncompressedBlockLuma = 256;
- *MaxCompressedBlockLuma = 128;
- *IndependentBlockLuma = 128;
- } else {
- *MaxUncompressedBlockLuma = 256;
- *MaxCompressedBlockLuma = 64;
- *IndependentBlockLuma = 64;
- }
-
- if (*RequestChroma == dml2_core_internal_request_type_256_bytes) {
- *MaxUncompressedBlockChroma = 256;
- *MaxCompressedBlockChroma = 256;
- *IndependentBlockChroma = 0;
- } else if (*RequestChroma == dml2_core_internal_request_type_128_bytes_contiguous) {
- *MaxUncompressedBlockChroma = 256;
- *MaxCompressedBlockChroma = 128;
- *IndependentBlockChroma = 128;
- } else {
- *MaxUncompressedBlockChroma = 256;
- *MaxCompressedBlockChroma = 64;
- *IndependentBlockChroma = 64;
- }
-
- if (DCCEnabled != true || BytePerPixelC == 0) {
- *MaxUncompressedBlockChroma = 0;
- *MaxCompressedBlockChroma = 0;
- *IndependentBlockChroma = 0;
- }
-
- if (DCCEnabled != true) {
- *MaxUncompressedBlockLuma = 0;
- *MaxCompressedBlockLuma = 0;
- *IndependentBlockLuma = 0;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma);
- dml2_printf("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma);
- dml2_printf("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma);
- dml2_printf("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma);
- dml2_printf("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma);
- dml2_printf("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma);
-#endif
-
-}
-
-static void calculate_mcache_row_bytes(
- struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_calculate_mcache_row_bytes_params *p)
-{
- unsigned int vmpg_bytes = 0;
- unsigned int blk_bytes = 0;
- float meta_per_mvmpg_per_channel = 0;
- unsigned int est_blk_per_vmpg = 2;
- unsigned int mvmpg_per_row_ub = 0;
- unsigned int full_vp_width_mvmpg_aligned = 0;
- unsigned int full_vp_height_mvmpg_aligned = 0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_chans = %u\n", __func__, p->num_chans);
- dml2_printf("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes);
- dml2_printf("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes);
- dml2_printf("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes);
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes);
- dml2_printf("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary);
- dml2_printf("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode);
- dml2_printf("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x);
- dml2_printf("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y);
- dml2_printf("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width);
- dml2_printf("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height);
- dml2_printf("DML::%s: blk_width = %u\n", __func__, p->blk_width);
- dml2_printf("DML::%s: blk_height = %u\n", __func__, p->blk_height);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height);
- dml2_printf("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes);
-#endif
- DML2_ASSERT(p->mcache_line_size_bytes != 0);
- DML2_ASSERT(p->mcache_size_bytes != 0);
-
- *p->mvmpg_width = 0;
- *p->mvmpg_height = 0;
-
- if (p->full_vp_height == 0 && p->full_vp_width == 0) {
- *p->num_mcaches = 0;
- *p->mcache_row_bytes = 0;
- } else {
- blk_bytes = dml_get_tile_block_size_bytes(p->tiling_mode);
-
- // if gpuvm is not enable, the alignment boundary should be in terms of tiling block size
- vmpg_bytes = p->gpuvm_page_size_kbytes * 1024;
-
- //With vmpg_bytes >= tile blk_bytes, the meta_row_width alignment equations are relative to the vmpg_width/height.
- // But for 4KB page with 64KB tile block, we need the meta for all pages in the tile block.
- // Therefore, the alignment is relative to the blk_width/height. The factor of 16 vmpg per 64KB tile block is applied at the end.
- *p->mvmpg_width = p->blk_width;
- *p->mvmpg_height = p->blk_height;
- if (p->gpuvm_enable) {
- if (vmpg_bytes >= blk_bytes) {
- *p->mvmpg_width = p->vmpg_width;
- *p->mvmpg_height = p->vmpg_height;
- } else if (!((blk_bytes == 65536) && (vmpg_bytes == 4096))) {
- dml2_printf("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__);
- DML2_ASSERT(0);
- }
- }
-
- //For plane0 & 1, first calculate full_vp_width/height_l/c aligned to vmpg_width/height_l/c
- full_vp_width_mvmpg_aligned = (unsigned int)(math_floor2((p->vp_start_x + p->full_vp_width) + *p->mvmpg_width - 1, *p->mvmpg_width) - math_floor2(p->vp_start_x, *p->mvmpg_width));
- full_vp_height_mvmpg_aligned = (unsigned int)(math_floor2((p->vp_start_y + p->full_vp_height) + *p->mvmpg_height - 1, *p->mvmpg_height) - math_floor2(p->vp_start_y, *p->mvmpg_height));
-
- *p->full_vp_access_width_mvmpg_aligned = p->surf_vert ? full_vp_height_mvmpg_aligned : full_vp_width_mvmpg_aligned;
-
- //Use the equation for the exact alignment when possible. Note that the exact alignment cannot be used for horizontal access if vmpg_bytes > blk_bytes.
- if (!p->surf_vert) { //horizontal access
- if (p->vp_stationary == 1 && vmpg_bytes <= blk_bytes)
- *p->meta_row_width_ub = full_vp_width_mvmpg_aligned;
- else
- *p->meta_row_width_ub = (unsigned int)math_ceil2((double)p->full_vp_width - 1, *p->mvmpg_width) + *p->mvmpg_width;
- mvmpg_per_row_ub = *p->meta_row_width_ub / *p->mvmpg_width;
- } else { //vertical access
- if (p->vp_stationary == 1)
- *p->meta_row_width_ub = full_vp_height_mvmpg_aligned;
- else
- *p->meta_row_width_ub = (unsigned int)math_ceil2((double)p->full_vp_height - 1, *p->mvmpg_height) + *p->mvmpg_height;
- mvmpg_per_row_ub = *p->meta_row_width_ub / *p->mvmpg_height;
- }
-
- unsigned int meta_per_mvmpg_per_channel_ub = 0;
-
- if (p->gpuvm_enable) {
- meta_per_mvmpg_per_channel = (float)vmpg_bytes / (float)256 / p->num_chans;
-
- //but using the est_blk_per_vmpg between 2 and 4, to be not as pessimestic
- if (p->surf_vert && vmpg_bytes > blk_bytes) {
- meta_per_mvmpg_per_channel = (float)est_blk_per_vmpg * blk_bytes / 256 / p->num_chans;
- }
-
- *p->dcc_dram_bw_nom_overhead_factor = 1 + math_max2(1.0 / 256.0, math_ceil2(meta_per_mvmpg_per_channel, p->mem_word_bytes) / (256 * meta_per_mvmpg_per_channel)); // dcc_dr_oh_nom
- } else {
- meta_per_mvmpg_per_channel = (float)blk_bytes / (float)256 / p->num_chans;
-
- if (!p->surf_vert)
- *p->dcc_dram_bw_nom_overhead_factor = 1 + 1.0 / 256.0;
- else
- *p->dcc_dram_bw_nom_overhead_factor = 1 + math_max2(1.0 / 256.0, math_ceil2(meta_per_mvmpg_per_channel, p->mem_word_bytes) / (256 * meta_per_mvmpg_per_channel));
- }
-
- meta_per_mvmpg_per_channel_ub = (unsigned int)math_ceil2((double)meta_per_mvmpg_per_channel, p->mcache_line_size_bytes);
-
- //but for 4KB vmpg with 64KB tile blk
- if (p->gpuvm_enable && (blk_bytes == 65536) && (vmpg_bytes == 4096))
- meta_per_mvmpg_per_channel_ub = 16 * meta_per_mvmpg_per_channel_ub;
-
- // If this mcache_row_bytes for the full viewport of the surface is less than or equal to mcache_bytes,
- // then one mcache can be used for this request stream. If not, it is useful to know the width of the viewport that can be supported in the mcache_bytes.
- if (p->gpuvm_enable || !p->surf_vert) {
- *p->mcache_row_bytes = mvmpg_per_row_ub * meta_per_mvmpg_per_channel_ub;
- } else { // horizontal and gpuvm disable
- *p->mcache_row_bytes = *p->meta_row_width_ub * p->blk_height * p->bytes_per_pixel / 256;
- *p->mcache_row_bytes = (unsigned int)math_ceil2((double)*p->mcache_row_bytes / p->num_chans, p->mcache_line_size_bytes);
- }
-
- *p->dcc_dram_bw_pref_overhead_factor = 1 + math_max2(1.0 / 256.0, *p->mcache_row_bytes / p->full_swath_bytes); // dcc_dr_oh_pref
- *p->num_mcaches = (unsigned int)math_ceil2((double)*p->mcache_row_bytes / p->mcache_size_bytes, 1);
-
- unsigned int mvmpg_per_mcache = p->mcache_size_bytes / meta_per_mvmpg_per_channel_ub;
- *p->mvmpg_per_mcache_lb = (unsigned int)math_floor2(mvmpg_per_mcache, 1);
-
- DML2_ASSERT(*p->num_mcaches > 0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: blk_bytes = %u\n", __func__, blk_bytes);
- dml2_printf("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel);
- dml2_printf("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub);
- dml2_printf("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub);
- dml2_printf("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width);
- dml2_printf("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height);
- dml2_printf("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor);
- dml2_printf("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor);
-#endif
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes);
- dml2_printf("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
-#endif
-}
-
-static void calculate_mcache_setting(
- struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_calculate_mcache_setting_params *p)
-{
- unsigned int n;
-
- struct dml2_core_shared_calculate_mcache_setting_locals *l = &scratch->calculate_mcache_setting_locals;
- memset(l, 0, sizeof(struct dml2_core_shared_calculate_mcache_setting_locals));
-
- *p->num_mcaches_l = 0;
- *p->mcache_row_bytes_l = 0;
- *p->dcc_dram_bw_nom_overhead_factor_l = 1.0;
- *p->dcc_dram_bw_pref_overhead_factor_l = 1.0;
-
- *p->num_mcaches_c = 0;
- *p->mcache_row_bytes_c = 0;
- *p->dcc_dram_bw_nom_overhead_factor_c = 1.0;
- *p->dcc_dram_bw_pref_overhead_factor_c = 1.0;
-
- *p->mall_comb_mcache_l = 0;
- *p->mall_comb_mcache_c = 0;
- *p->lc_comb_mcache = 0;
-
- if (!p->dcc_enable)
- return;
-
- l->is_dual_plane = dml2_core_shared_is_420(p->source_format) || p->source_format == dml2_rgbe_alpha;
-
- l->l_p.num_chans = p->num_chans;
- l->l_p.mem_word_bytes = p->mem_word_bytes;
- l->l_p.mcache_size_bytes = p->mcache_size_bytes;
- l->l_p.mcache_line_size_bytes = p->mcache_line_size_bytes;
- l->l_p.gpuvm_enable = p->gpuvm_enable;
- l->l_p.gpuvm_page_size_kbytes = p->gpuvm_page_size_kbytes;
- l->l_p.surf_vert = p->surf_vert;
- l->l_p.vp_stationary = p->vp_stationary;
- l->l_p.tiling_mode = p->tiling_mode;
- l->l_p.vp_start_x = p->vp_start_x_l;
- l->l_p.vp_start_y = p->vp_start_y_l;
- l->l_p.full_vp_width = p->full_vp_width_l;
- l->l_p.full_vp_height = p->full_vp_height_l;
- l->l_p.blk_width = p->blk_width_l;
- l->l_p.blk_height = p->blk_height_l;
- l->l_p.vmpg_width = p->vmpg_width_l;
- l->l_p.vmpg_height = p->vmpg_height_l;
- l->l_p.full_swath_bytes = p->full_swath_bytes_l;
- l->l_p.bytes_per_pixel = p->bytes_per_pixel_l;
-
- // output
- l->l_p.num_mcaches = p->num_mcaches_l;
- l->l_p.mcache_row_bytes = p->mcache_row_bytes_l;
- l->l_p.dcc_dram_bw_nom_overhead_factor = p->dcc_dram_bw_nom_overhead_factor_l;
- l->l_p.dcc_dram_bw_pref_overhead_factor = p->dcc_dram_bw_pref_overhead_factor_l;
- l->l_p.mvmpg_width = &l->mvmpg_width_l;
- l->l_p.mvmpg_height = &l->mvmpg_height_l;
- l->l_p.full_vp_access_width_mvmpg_aligned = &l->full_vp_access_width_mvmpg_aligned_l;
- l->l_p.meta_row_width_ub = &l->meta_row_width_l;
- l->l_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_l;
-
- calculate_mcache_row_bytes(scratch, &l->l_p);
- dml2_assert(*p->num_mcaches_l > 0);
-
- if (l->is_dual_plane) {
- l->c_p.num_chans = p->num_chans;
- l->c_p.mem_word_bytes = p->mem_word_bytes;
- l->c_p.mcache_size_bytes = p->mcache_size_bytes;
- l->c_p.mcache_line_size_bytes = p->mcache_line_size_bytes;
- l->c_p.gpuvm_enable = p->gpuvm_enable;
- l->c_p.gpuvm_page_size_kbytes = p->gpuvm_page_size_kbytes;
- l->c_p.surf_vert = p->surf_vert;
- l->c_p.vp_stationary = p->vp_stationary;
- l->c_p.tiling_mode = p->tiling_mode;
- l->c_p.vp_start_x = p->vp_start_x_c;
- l->c_p.vp_start_y = p->vp_start_y_c;
- l->c_p.full_vp_width = p->full_vp_width_c;
- l->c_p.full_vp_height = p->full_vp_height_c;
- l->c_p.blk_width = p->blk_width_c;
- l->c_p.blk_height = p->blk_height_c;
- l->c_p.vmpg_width = p->vmpg_width_c;
- l->c_p.vmpg_height = p->vmpg_height_c;
- l->c_p.full_swath_bytes = p->full_swath_bytes_c;
- l->c_p.bytes_per_pixel = p->bytes_per_pixel_c;
-
- // output
- l->c_p.num_mcaches = p->num_mcaches_c;
- l->c_p.mcache_row_bytes = p->mcache_row_bytes_c;
- l->c_p.dcc_dram_bw_nom_overhead_factor = p->dcc_dram_bw_nom_overhead_factor_c;
- l->c_p.dcc_dram_bw_pref_overhead_factor = p->dcc_dram_bw_pref_overhead_factor_c;
- l->c_p.mvmpg_width = &l->mvmpg_width_c;
- l->c_p.mvmpg_height = &l->mvmpg_height_c;
- l->c_p.full_vp_access_width_mvmpg_aligned = &l->full_vp_access_width_mvmpg_aligned_c;
- l->c_p.meta_row_width_ub = &l->meta_row_width_c;
- l->c_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_c;
-
- calculate_mcache_row_bytes(scratch, &l->c_p);
- dml2_assert(*p->num_mcaches_c > 0);
- }
-
- // Sharing for iMALL access
- l->mcache_remainder_l = *p->mcache_row_bytes_l % p->mcache_size_bytes;
- l->mcache_remainder_c = *p->mcache_row_bytes_c % p->mcache_size_bytes;
- l->mvmpg_access_width_l = p->surf_vert ? l->mvmpg_height_l : l->mvmpg_width_l;
- l->mvmpg_access_width_c = p->surf_vert ? l->mvmpg_height_c : l->mvmpg_width_c;
-
- if (p->imall_enable) {
- *p->mall_comb_mcache_l = (2 * l->mcache_remainder_l <= p->mcache_size_bytes);
-
- if (l->is_dual_plane)
- *p->mall_comb_mcache_c = (2 * l->mcache_remainder_c <= p->mcache_size_bytes);
- }
-
- if (!p->surf_vert) // horizonatal access
- l->luma_time_factor = (double)l->mvmpg_height_c / l->mvmpg_height_l * 2;
- else // vertical access
- l->luma_time_factor = (double)l->mvmpg_width_c / l->mvmpg_width_l * 2;
-
- // The algorithm starts with computing a non-integer, avg_mcache_element_size_l/c:
- l->avg_mcache_element_size_l = l->meta_row_width_l / *p->num_mcaches_l;
- if (l->is_dual_plane) {
- l->avg_mcache_element_size_c = l->meta_row_width_c / *p->num_mcaches_c;
-
- if (!p->imall_enable || (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c)) {
- l->lc_comb_last_mcache_size = (unsigned int)((l->mcache_remainder_l * (*p->mall_comb_mcache_l ? 2 : 1) * l->luma_time_factor) +
- (l->mcache_remainder_c * (*p->mall_comb_mcache_c ? 2 : 1)));
- }
- *p->lc_comb_mcache = (l->lc_comb_last_mcache_size <= p->mcache_size_bytes) && (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c);
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: imall_enable = %u\n", __func__, p->imall_enable);
- dml2_printf("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane);
- dml2_printf("DML::%s: surf_vert = %u\n", __func__, p->surf_vert);
- dml2_printf("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l);
- dml2_printf("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l);
- dml2_printf("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l);
- dml2_printf("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l);
- dml2_printf("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l);
- dml2_printf("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l);
- dml2_printf("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l);
-
- if (l->is_dual_plane) {
- dml2_printf("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c);
- dml2_printf("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c);
- dml2_printf("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c);
- dml2_printf("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor);
- dml2_printf("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c);
- dml2_printf("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c);
- dml2_printf("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c);
- dml2_printf("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c);
- dml2_printf("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size);
- dml2_printf("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache);
- }
-#endif
- // calculate split_coordinate
- l->full_vp_access_width_l = p->surf_vert ? p->full_vp_height_l : p->full_vp_width_l;
- l->full_vp_access_width_c = p->surf_vert ? p->full_vp_height_c : p->full_vp_width_c;
-
- for (n = 0; n < *p->num_mcaches_l - 1; n++) {
- p->mcache_offsets_l[n] = (unsigned int)(math_floor2((n + 1) * l->avg_mcache_element_size_l / l->mvmpg_access_width_l, 1)) * l->mvmpg_access_width_l;
- }
- p->mcache_offsets_l[*p->num_mcaches_l - 1] = l->full_vp_access_width_l;
-
- if (l->is_dual_plane) {
- for (n = 0; n < *p->num_mcaches_c - 1; n++) {
- p->mcache_offsets_c[n] = (unsigned int)(math_floor2((n + 1) * l->avg_mcache_element_size_c / l->mvmpg_access_width_c, 1)) * l->mvmpg_access_width_c;
- }
- p->mcache_offsets_c[*p->num_mcaches_c - 1] = l->full_vp_access_width_c;
- }
-#ifdef __DML_VBA_DEBUG__
- for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
-
- if (l->is_dual_plane) {
- for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
- }
-#endif
-
- // Luma/Chroma combine in the last mcache
- // In the case of Luma/Chroma combine-mCache (with lc_comb_mcache==1), all mCaches except the last segment are filled as much as possible, when stay aligned to mvmpg boundary
- if (*p->lc_comb_mcache && l->is_dual_plane) {
- for (n = 0; n < *p->num_mcaches_l - 1; n++)
- p->mcache_offsets_l[n] = (n + 1) * l->mvmpg_per_mcache_lb_l * l->mvmpg_access_width_l;
- p->mcache_offsets_l[*p->num_mcaches_l - 1] = l->full_vp_access_width_l;
-
- for (n = 0; n < *p->num_mcaches_c - 1; n++)
- p->mcache_offsets_c[n] = (n + 1) * l->mvmpg_per_mcache_lb_c * l->mvmpg_access_width_c;
- p->mcache_offsets_c[*p->num_mcaches_c - 1] = l->full_vp_access_width_c;
-
-#ifdef __DML_VBA_DEBUG__
- for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
-
- for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
-#endif
- }
-
- *p->mcache_shift_granularity_l = l->mvmpg_access_width_l;
- *p->mcache_shift_granularity_c = l->mvmpg_access_width_c;
-}
-
-static void calculate_mall_bw_overhead_factor(
- double mall_prefetch_sdp_overhead_factor[], //mall_sdp_oh_nom/pref
- double mall_prefetch_dram_overhead_factor[], //mall_dram_oh_nom/pref
-
- // input
- const struct dml2_display_cfg *display_cfg,
- unsigned int num_active_planes)
-{
- for (unsigned int k = 0; k < num_active_planes; ++k) {
- mall_prefetch_sdp_overhead_factor[k] = 1.0;
- mall_prefetch_dram_overhead_factor[k] = 1.0;
-
- // SDP - on the return side
- if (display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_imall) // always no data return
- mall_prefetch_sdp_overhead_factor[k] = 1.25;
- else if (display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return)
- mall_prefetch_sdp_overhead_factor[k] = 0.25;
-
- // DRAM
- if (display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_imall)
- mall_prefetch_dram_overhead_factor[k] = 2.0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]);
- dml2_printf("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]);
-#endif
- }
-}
-
-static double dml_get_return_bandwidth_available(
- const struct dml2_soc_bb *soc,
- enum dml2_core_internal_soc_state_type state_type,
- enum dml2_core_internal_bw_type bw_type,
- bool is_avg_bw,
- bool is_hvm_en,
- bool is_hvm_only,
- double dcflk_mhz,
- double fclk_mhz,
- double dram_bw_mbps)
-{
- double return_bw_mbps = 0.;
- double ideal_sdp_bandwidth = (double)soc->return_bus_width_bytes * dcflk_mhz;
- double ideal_fabric_bandwidth = fclk_mhz * (double)soc->fabric_datapath_to_dcn_data_return_bytes;
- double ideal_dram_bandwidth = dram_bw_mbps; //dram_speed_mts * soc->clk_table.dram_config.channel_count * soc->clk_table.dram_config.channel_width_bytes;
-
- double derate_sdp_factor = 1;
- double derate_fabric_factor = 1;
- double derate_dram_factor = 1;
-
- if (is_avg_bw) {
- if (state_type == dml2_core_internal_soc_state_svp_prefetch) {
- derate_sdp_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_average.dcfclk_derate_percent / 100.0;
- derate_fabric_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_average.fclk_derate_percent / 100.0;
- derate_dram_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_average.dram_derate_percent_pixel / 100.0;
- } else { // just assume sys_active
- derate_sdp_factor = soc->qos_parameters.derate_table.system_active_average.dcfclk_derate_percent / 100.0;
- derate_fabric_factor = soc->qos_parameters.derate_table.system_active_average.fclk_derate_percent / 100.0;
- derate_dram_factor = soc->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100.0;
- }
- } else { // urgent bw
- if (state_type == dml2_core_internal_soc_state_svp_prefetch) {
- derate_sdp_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dcfclk_derate_percent / 100.0;
- derate_fabric_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_urgent.fclk_derate_percent / 100.0;
- derate_dram_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_pixel / 100.0;
-
- if (is_hvm_en) {
- if (is_hvm_only)
- derate_dram_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_vm / 100.0;
- else
- derate_dram_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_pixel_and_vm / 100.0;
- } else {
- derate_dram_factor = soc->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_pixel / 100.0;
- }
- } else { // just assume sys_active
- derate_sdp_factor = soc->qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0;
- derate_fabric_factor = soc->qos_parameters.derate_table.system_active_urgent.fclk_derate_percent / 100.0;
-
- if (is_hvm_en) {
- if (is_hvm_only)
- derate_dram_factor = soc->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_vm / 100.0;
- else
- derate_dram_factor = soc->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel_and_vm / 100.0;
- } else {
- derate_dram_factor = soc->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100.0;
- }
- }
- }
-
- double derate_sdp_bandwidth = ideal_sdp_bandwidth * derate_sdp_factor;
- double derate_fabric_bandwidth = ideal_fabric_bandwidth * derate_fabric_factor;
- double derate_dram_bandwidth = ideal_dram_bandwidth * derate_dram_factor;
-
- if (bw_type == dml2_core_internal_bw_sdp)
- return_bw_mbps = math_min2(derate_sdp_bandwidth, derate_fabric_bandwidth);
- else // dml2_core_internal_bw_dram
- return_bw_mbps = derate_dram_bandwidth;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw);
- dml2_printf("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en);
- dml2_printf("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
- dml2_printf("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
- dml2_printf("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
- dml2_printf("DML::%s: dcflk_mhz = %f\n", __func__, dcflk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
- dml2_printf("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
- dml2_printf("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth);
- dml2_printf("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor);
- dml2_printf("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor);
- dml2_printf("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor);
- dml2_printf("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps);
-#endif
- return return_bw_mbps;
-}
-
-static void calculate_bandwidth_available(
- double avg_bandwidth_available_min[dml2_core_internal_soc_state_max],
- double avg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available_min[dml2_core_internal_soc_state_max], // min between SDP and DRAM
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_max],
- double urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_max],
-
- const struct dml2_soc_bb *soc,
- bool HostVMEnable,
- double dcfclk_mhz,
- double fclk_mhz,
- double dram_bw_mbps)
-{
- unsigned int n, m;
-
- dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps);
-
- // Calculate all the bandwidth availabe
- for (m = 0; m < dml2_core_internal_soc_state_max; m++) {
- for (n = 0; n < dml2_core_internal_bw_max; n++) {
- avg_bandwidth_available[m][n] = dml_get_return_bandwidth_available(soc,
- m, // soc_state
- n, // bw_type
- 1, // avg_bw
- HostVMEnable,
- 0, // hvm_only
- dcfclk_mhz,
- fclk_mhz,
- dram_bw_mbps);
-
- urg_bandwidth_available[m][n] = dml_get_return_bandwidth_available(soc, m, n, 0, HostVMEnable, 0, dcfclk_mhz, fclk_mhz, dram_bw_mbps);
-
-
- dml2_printf("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]);
-
- // urg_bandwidth_available_vm_only is indexed by soc_state
- if (n == dml2_core_internal_bw_dram) {
- urg_bandwidth_available_vm_only[m] = dml_get_return_bandwidth_available(soc, m, n, 0, HostVMEnable, 1, dcfclk_mhz, fclk_mhz, dram_bw_mbps);
- urg_bandwidth_available_pixel_and_vm[m] = dml_get_return_bandwidth_available(soc, m, n, 0, HostVMEnable, 0, dcfclk_mhz, fclk_mhz, dram_bw_mbps);
- }
- }
-
- avg_bandwidth_available_min[m] = math_min2(avg_bandwidth_available[m][dml2_core_internal_bw_dram], avg_bandwidth_available[m][dml2_core_internal_bw_sdp]);
- urg_bandwidth_available_min[m] = math_min2(urg_bandwidth_available[m][dml2_core_internal_bw_dram], urg_bandwidth_available[m][dml2_core_internal_bw_sdp]);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]);
-#endif
- }
-}
-
-static void calculate_avg_bandwidth_required(
- double avg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
-
- // input
- const struct dml2_display_cfg *display_cfg,
- unsigned int num_active_planes,
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double cursor_bw[],
- double dcc_dram_bw_nom_overhead_factor_p0[],
- double dcc_dram_bw_nom_overhead_factor_p1[],
- double mall_prefetch_dram_overhead_factor[],
- double mall_prefetch_sdp_overhead_factor[])
-{
- unsigned int n, m, k;
-
- // Average BW support check
- for (m = 0; m < dml2_core_internal_soc_state_max; m++) {
- for (n = 0; n < dml2_core_internal_bw_max; n++) { // sdp, dram
- avg_bandwidth_required[m][n] = 0;
- }
- }
-
- // SysActive and SVP Prefetch AVG bandwidth Check
- for (k = 0; k < num_active_planes; ++k) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: plane %0d\n", __func__, k);
- dml2_printf("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]);
- dml2_printf("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]);
-#endif
-
- double sdp_overhead_factor = mall_prefetch_sdp_overhead_factor[k];
- double dram_overhead_factor_p0 = dcc_dram_bw_nom_overhead_factor_p0[k] * mall_prefetch_dram_overhead_factor[k];
- double dram_overhead_factor_p1 = dcc_dram_bw_nom_overhead_factor_p1[k] * mall_prefetch_dram_overhead_factor[k];
-
- // FIXME_DCN4, was missing cursor_bw in here, but do I actually need that and tdlut bw for average bandwidth calculation?
- // active avg bw not include phantom, but svp_prefetch avg bw should include phantom pipes
- if (!dml_is_phantom_pipe(&display_cfg->plane_descriptors[k])) {
- avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp] += sdp_overhead_factor * (ReadBandwidthLuma[k] + ReadBandwidthChroma[k]) + cursor_bw[k];
- avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] += dram_overhead_factor_p0 * ReadBandwidthLuma[k] + dram_overhead_factor_p1 * ReadBandwidthChroma[k] + cursor_bw[k];
- }
- avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp] += sdp_overhead_factor * (ReadBandwidthLuma[k] + ReadBandwidthChroma[k]) + cursor_bw[k];
- avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] += dram_overhead_factor_p0 * ReadBandwidthLuma[k] + dram_overhead_factor_p1 * ReadBandwidthChroma[k] + cursor_bw[k];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
-#endif
- }
-}
-
-static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_CalculateVMRowAndSwath_params *p)
-{
- struct dml2_core_calcs_CalculateVMRowAndSwath_locals *s = &scratch->CalculateVMRowAndSwath_locals;
-
- s->HostVMDynamicLevels = CalculateHostVMDynamicLevels(p->display_cfg->gpuvm_enable, p->display_cfg->hostvm_enable, p->HostVMMinPageSize, p->display_cfg->hostvm_max_non_cached_page_table_levels);
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->hostvm_enable == true) {
- p->vm_group_bytes[k] = 512;
- p->dpte_group_bytes[k] = 512;
- } else if (p->display_cfg->gpuvm_enable == true) {
- p->vm_group_bytes[k] = 2048;
- if (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes >= 64 && dml_is_vertical_rotation(p->myPipe[k].RotationAngle)) {
- p->dpte_group_bytes[k] = 512;
- } else {
- p->dpte_group_bytes[k] = 2048;
- }
- } else {
- p->vm_group_bytes[k] = 0;
- p->dpte_group_bytes[k] = 0;
- }
-
- if (dml2_core_shared_is_420(p->myPipe[k].SourcePixelFormat) || p->myPipe[k].SourcePixelFormat == dml2_rgbe_alpha) {
- if ((p->myPipe[k].SourcePixelFormat == dml2_420_10 || p->myPipe[k].SourcePixelFormat == dml2_420_12) && !dml_is_vertical_rotation(p->myPipe[k].RotationAngle)) {
- s->PTEBufferSizeInRequestsForLuma[k] = (p->PTEBufferSizeInRequestsLuma + p->PTEBufferSizeInRequestsChroma) / 2;
- s->PTEBufferSizeInRequestsForChroma[k] = s->PTEBufferSizeInRequestsForLuma[k];
- } else {
- s->PTEBufferSizeInRequestsForLuma[k] = p->PTEBufferSizeInRequestsLuma;
- s->PTEBufferSizeInRequestsForChroma[k] = p->PTEBufferSizeInRequestsChroma;
- }
-
- scratch->calculate_vm_and_row_bytes_params.ViewportStationary = p->myPipe[k].ViewportStationary;
- scratch->calculate_vm_and_row_bytes_params.DCCEnable = p->myPipe[k].DCCEnable;
- scratch->calculate_vm_and_row_bytes_params.NumberOfDPPs = p->myPipe[k].DPPPerSurface;
- scratch->calculate_vm_and_row_bytes_params.BlockHeight256Bytes = p->myPipe[k].BlockHeight256BytesC;
- scratch->calculate_vm_and_row_bytes_params.BlockWidth256Bytes = p->myPipe[k].BlockWidth256BytesC;
- scratch->calculate_vm_and_row_bytes_params.SourcePixelFormat = p->myPipe[k].SourcePixelFormat;
- scratch->calculate_vm_and_row_bytes_params.SurfaceTiling = p->myPipe[k].SurfaceTiling;
- scratch->calculate_vm_and_row_bytes_params.BytePerPixel = p->myPipe[k].BytePerPixelC;
- scratch->calculate_vm_and_row_bytes_params.RotationAngle = p->myPipe[k].RotationAngle;
- scratch->calculate_vm_and_row_bytes_params.SwathWidth = p->SwathWidthC[k];
- scratch->calculate_vm_and_row_bytes_params.ViewportHeight = p->myPipe[k].ViewportHeightC;
- scratch->calculate_vm_and_row_bytes_params.ViewportXStart = p->myPipe[k].ViewportXStartC;
- scratch->calculate_vm_and_row_bytes_params.ViewportYStart = p->myPipe[k].ViewportYStartC;
- scratch->calculate_vm_and_row_bytes_params.GPUVMEnable = p->display_cfg->gpuvm_enable;
- scratch->calculate_vm_and_row_bytes_params.GPUVMMaxPageTableLevels = p->display_cfg->gpuvm_max_page_table_levels;
- scratch->calculate_vm_and_row_bytes_params.GPUVMMinPageSizeKBytes = p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
- scratch->calculate_vm_and_row_bytes_params.PTEBufferSizeInRequests = s->PTEBufferSizeInRequestsForChroma[k];
- scratch->calculate_vm_and_row_bytes_params.Pitch = p->myPipe[k].PitchC;
- scratch->calculate_vm_and_row_bytes_params.MacroTileWidth = p->myPipe[k].BlockWidthC;
- scratch->calculate_vm_and_row_bytes_params.MacroTileHeight = p->myPipe[k].BlockHeightC;
- scratch->calculate_vm_and_row_bytes_params.is_phantom = dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]);
- scratch->calculate_vm_and_row_bytes_params.DCCMetaPitch = p->myPipe[k].DCCMetaPitchC;
- scratch->calculate_vm_and_row_bytes_params.mrq_present = p->mrq_present;
-
- scratch->calculate_vm_and_row_bytes_params.PixelPTEBytesPerRow = &s->PixelPTEBytesPerRowC[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEBytesPerRowStorage = &s->PixelPTEBytesPerRowStorageC[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_width_ub = &p->dpte_row_width_chroma_ub[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_height = &p->dpte_row_height_chroma[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_height_linear = &p->dpte_row_height_linear_chroma[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEBytesPerRow_one_row_per_frame = &s->PixelPTEBytesPerRowC_one_row_per_frame[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_width_ub_one_row_per_frame = &s->dpte_row_width_chroma_ub_one_row_per_frame[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_height_one_row_per_frame = &s->dpte_row_height_chroma_one_row_per_frame[k];
- scratch->calculate_vm_and_row_bytes_params.vmpg_width = &p->vmpg_width_c[k];
- scratch->calculate_vm_and_row_bytes_params.vmpg_height = &p->vmpg_height_c[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEReqWidth = &p->PixelPTEReqWidthC[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEReqHeight = &p->PixelPTEReqHeightC[k];
- scratch->calculate_vm_and_row_bytes_params.PTERequestSize = &p->PTERequestSizeC[k];
- scratch->calculate_vm_and_row_bytes_params.dpde0_bytes_per_frame_ub = &p->dpde0_bytes_per_frame_ub_c[k];
-
- scratch->calculate_vm_and_row_bytes_params.meta_row_bytes = &s->meta_row_bytes_per_row_ub_c[k];
- scratch->calculate_vm_and_row_bytes_params.MetaRequestWidth = &p->meta_req_width_chroma[k];
- scratch->calculate_vm_and_row_bytes_params.MetaRequestHeight = &p->meta_req_height_chroma[k];
- scratch->calculate_vm_and_row_bytes_params.meta_row_width = &p->meta_row_width_chroma[k];
- scratch->calculate_vm_and_row_bytes_params.meta_row_height = &p->meta_row_height_chroma[k];
- scratch->calculate_vm_and_row_bytes_params.meta_pte_bytes_per_frame_ub = &p->meta_pte_bytes_per_frame_ub_c[k];
-
- s->vm_bytes_c = CalculateVMAndRowBytes(&scratch->calculate_vm_and_row_bytes_params);
-
- p->PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
- p->myPipe[k].VRatioChroma,
- p->myPipe[k].VTapsChroma,
- p->myPipe[k].InterlaceEnable,
- p->myPipe[k].ProgressiveToInterlaceUnitInOPP,
- p->myPipe[k].SwathHeightC,
- p->myPipe[k].RotationAngle,
- p->myPipe[k].mirrored,
- p->myPipe[k].ViewportStationary,
- p->SwathWidthC[k],
- p->myPipe[k].ViewportHeightC,
- p->myPipe[k].ViewportXStartC,
- p->myPipe[k].ViewportYStartC,
-
- // Output
- &p->VInitPreFillC[k],
- &p->MaxNumSwathC[k]);
- } else {
- s->PTEBufferSizeInRequestsForLuma[k] = p->PTEBufferSizeInRequestsLuma + p->PTEBufferSizeInRequestsChroma;
- s->PTEBufferSizeInRequestsForChroma[k] = 0;
- s->PixelPTEBytesPerRowC[k] = 0;
- s->PixelPTEBytesPerRowStorageC[k] = 0;
- s->vm_bytes_c = 0;
- p->MaxNumSwathC[k] = 0;
- p->PrefetchSourceLinesC[k] = 0;
- s->dpte_row_height_chroma_one_row_per_frame[k] = 0;
- s->dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
- s->PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
- }
-
- scratch->calculate_vm_and_row_bytes_params.ViewportStationary = p->myPipe[k].ViewportStationary;
- scratch->calculate_vm_and_row_bytes_params.DCCEnable = p->myPipe[k].DCCEnable;
- scratch->calculate_vm_and_row_bytes_params.NumberOfDPPs = p->myPipe[k].DPPPerSurface;
- scratch->calculate_vm_and_row_bytes_params.BlockHeight256Bytes = p->myPipe[k].BlockHeight256BytesY;
- scratch->calculate_vm_and_row_bytes_params.BlockWidth256Bytes = p->myPipe[k].BlockWidth256BytesY;
- scratch->calculate_vm_and_row_bytes_params.SourcePixelFormat = p->myPipe[k].SourcePixelFormat;
- scratch->calculate_vm_and_row_bytes_params.SurfaceTiling = p->myPipe[k].SurfaceTiling;
- scratch->calculate_vm_and_row_bytes_params.BytePerPixel = p->myPipe[k].BytePerPixelY;
- scratch->calculate_vm_and_row_bytes_params.RotationAngle = p->myPipe[k].RotationAngle;
- scratch->calculate_vm_and_row_bytes_params.SwathWidth = p->SwathWidthY[k];
- scratch->calculate_vm_and_row_bytes_params.ViewportHeight = p->myPipe[k].ViewportHeight;
- scratch->calculate_vm_and_row_bytes_params.ViewportXStart = p->myPipe[k].ViewportXStart;
- scratch->calculate_vm_and_row_bytes_params.ViewportYStart = p->myPipe[k].ViewportYStart;
- scratch->calculate_vm_and_row_bytes_params.GPUVMEnable = p->display_cfg->gpuvm_enable;
- scratch->calculate_vm_and_row_bytes_params.GPUVMMaxPageTableLevels = p->display_cfg->gpuvm_max_page_table_levels;
- scratch->calculate_vm_and_row_bytes_params.GPUVMMinPageSizeKBytes = p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
- scratch->calculate_vm_and_row_bytes_params.PTEBufferSizeInRequests = s->PTEBufferSizeInRequestsForLuma[k];
- scratch->calculate_vm_and_row_bytes_params.Pitch = p->myPipe[k].PitchY;
- scratch->calculate_vm_and_row_bytes_params.MacroTileWidth = p->myPipe[k].BlockWidthY;
- scratch->calculate_vm_and_row_bytes_params.MacroTileHeight = p->myPipe[k].BlockHeightY;
- scratch->calculate_vm_and_row_bytes_params.is_phantom = dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]);
- scratch->calculate_vm_and_row_bytes_params.DCCMetaPitch = p->myPipe[k].DCCMetaPitchY;
- scratch->calculate_vm_and_row_bytes_params.mrq_present = p->mrq_present;
-
- scratch->calculate_vm_and_row_bytes_params.PixelPTEBytesPerRow = &s->PixelPTEBytesPerRowY[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEBytesPerRowStorage = &s->PixelPTEBytesPerRowStorageY[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_width_ub = &p->dpte_row_width_luma_ub[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_height = &p->dpte_row_height_luma[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_height_linear = &p->dpte_row_height_linear_luma[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEBytesPerRow_one_row_per_frame = &s->PixelPTEBytesPerRowY_one_row_per_frame[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_width_ub_one_row_per_frame = &s->dpte_row_width_luma_ub_one_row_per_frame[k];
- scratch->calculate_vm_and_row_bytes_params.dpte_row_height_one_row_per_frame = &s->dpte_row_height_luma_one_row_per_frame[k];
- scratch->calculate_vm_and_row_bytes_params.vmpg_width = &p->vmpg_width_y[k];
- scratch->calculate_vm_and_row_bytes_params.vmpg_height = &p->vmpg_height_y[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEReqWidth = &p->PixelPTEReqWidthY[k];
- scratch->calculate_vm_and_row_bytes_params.PixelPTEReqHeight = &p->PixelPTEReqHeightY[k];
- scratch->calculate_vm_and_row_bytes_params.PTERequestSize = &p->PTERequestSizeY[k];
- scratch->calculate_vm_and_row_bytes_params.dpde0_bytes_per_frame_ub = &p->dpde0_bytes_per_frame_ub_l[k];
-
- scratch->calculate_vm_and_row_bytes_params.meta_row_bytes = &s->meta_row_bytes_per_row_ub_l[k];
- scratch->calculate_vm_and_row_bytes_params.MetaRequestWidth = &p->meta_req_width_luma[k];
- scratch->calculate_vm_and_row_bytes_params.MetaRequestHeight = &p->meta_req_height_luma[k];
- scratch->calculate_vm_and_row_bytes_params.meta_row_width = &p->meta_row_width_luma[k];
- scratch->calculate_vm_and_row_bytes_params.meta_row_height = &p->meta_row_height_luma[k];
- scratch->calculate_vm_and_row_bytes_params.meta_pte_bytes_per_frame_ub = &p->meta_pte_bytes_per_frame_ub_l[k];
-
- s->vm_bytes_l = CalculateVMAndRowBytes(&scratch->calculate_vm_and_row_bytes_params);
-
- p->PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
- p->myPipe[k].VRatio,
- p->myPipe[k].VTaps,
- p->myPipe[k].InterlaceEnable,
- p->myPipe[k].ProgressiveToInterlaceUnitInOPP,
- p->myPipe[k].SwathHeightY,
- p->myPipe[k].RotationAngle,
- p->myPipe[k].mirrored,
- p->myPipe[k].ViewportStationary,
- p->SwathWidthY[k],
- p->myPipe[k].ViewportHeight,
- p->myPipe[k].ViewportXStart,
- p->myPipe[k].ViewportYStart,
-
- // Output
- &p->VInitPreFillY[k],
- &p->MaxNumSwathY[k]);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l);
- dml2_printf("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]);
-#endif
- p->vm_bytes[k] = (s->vm_bytes_l + s->vm_bytes_c) * (1 + 8 * s->HostVMDynamicLevels);
- p->meta_row_bytes[k] = s->meta_row_bytes_per_row_ub_l[k] + s->meta_row_bytes_per_row_ub_c[k];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]);
-#endif
- if (s->PixelPTEBytesPerRowStorageY[k] <= 64 * s->PTEBufferSizeInRequestsForLuma[k] && s->PixelPTEBytesPerRowStorageC[k] <= 64 * s->PTEBufferSizeInRequestsForChroma[k]) {
- p->PTEBufferSizeNotExceeded[k] = true;
- } else {
- p->PTEBufferSizeNotExceeded[k] = false;
- }
-
- s->one_row_per_frame_fits_in_buffer[k] = (s->PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 * s->PTEBufferSizeInRequestsForLuma[k] &&
- s->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * s->PTEBufferSizeInRequestsForChroma[k]);
-#ifdef __DML_VBA_DEBUG__
- if (p->PTEBufferSizeNotExceeded[k] == 0 || s->one_row_per_frame_fits_in_buffer[k] == 0) {
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
-
- dml2_printf("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]);
- }
-#endif
- }
-
- CalculateMALLUseForStaticScreen(
- p->display_cfg,
- p->NumberOfActiveSurfaces,
- p->MALLAllocatedForDCN,
- p->SurfaceSizeInMALL,
- s->one_row_per_frame_fits_in_buffer,
- // Output
- p->is_using_mall_for_ss);
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->gpuvm_enable) {
- if (p->display_cfg->plane_descriptors[k].overrides.hw.force_pte_buffer_mode.enable == 1) {
- p->PTE_BUFFER_MODE[k] = p->display_cfg->plane_descriptors[k].overrides.hw.force_pte_buffer_mode.value;
- }
- p->PTE_BUFFER_MODE[k] = p->myPipe[k].FORCE_ONE_ROW_FOR_FRAME || p->is_using_mall_for_ss[k] || (p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) ||
- dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]) || (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes > 64);
- p->BIGK_FRAGMENT_SIZE[k] = (unsigned int)(math_log((float)p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes * 1024, 2) - 12);
- } else {
- p->PTE_BUFFER_MODE[k] = 0;
- p->BIGK_FRAGMENT_SIZE[k] = 0;
- }
- }
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- p->DCCMetaBufferSizeNotExceeded[k] = true;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]);
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]);
-#endif
- p->use_one_row_for_frame[k] = p->myPipe[k].FORCE_ONE_ROW_FOR_FRAME || p->is_using_mall_for_ss[k] || (p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) ||
- (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) || (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes > 64 && dml_is_vertical_rotation(p->myPipe[k].RotationAngle));
-
- p->use_one_row_for_frame_flip[k] = p->use_one_row_for_frame[k] && !(p->display_cfg->plane_descriptors[k].overrides.uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_full_frame);
-
- if (p->use_one_row_for_frame[k]) {
- p->dpte_row_height_luma[k] = s->dpte_row_height_luma_one_row_per_frame[k];
- p->dpte_row_width_luma_ub[k] = s->dpte_row_width_luma_ub_one_row_per_frame[k];
- s->PixelPTEBytesPerRowY[k] = s->PixelPTEBytesPerRowY_one_row_per_frame[k];
- p->dpte_row_height_chroma[k] = s->dpte_row_height_chroma_one_row_per_frame[k];
- p->dpte_row_width_chroma_ub[k] = s->dpte_row_width_chroma_ub_one_row_per_frame[k];
- s->PixelPTEBytesPerRowC[k] = s->PixelPTEBytesPerRowC_one_row_per_frame[k];
- p->PTEBufferSizeNotExceeded[k] = s->one_row_per_frame_fits_in_buffer[k];
- }
-
- if (p->meta_row_bytes[k] <= p->DCCMetaBufferSizeBytes) {
- p->DCCMetaBufferSizeNotExceeded[k] = true;
- } else {
- p->DCCMetaBufferSizeNotExceeded[k] = false;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]);
-#endif
- }
-
- s->PixelPTEBytesPerRowY[k] = s->PixelPTEBytesPerRowY[k] * (1 + 8 * s->HostVMDynamicLevels);
- s->PixelPTEBytesPerRowC[k] = s->PixelPTEBytesPerRowC[k] * (1 + 8 * s->HostVMDynamicLevels);
- p->PixelPTEBytesPerRow[k] = s->PixelPTEBytesPerRowY[k] + s->PixelPTEBytesPerRowC[k];
-
- // if one row of dPTEs is meant to span the entire frame, then for these calculations, we will pretend like that one big row is fetched in two halfs
- if (p->use_one_row_for_frame[k])
- p->PixelPTEBytesPerRow[k] = p->PixelPTEBytesPerRow[k] / 2;
-
- CalculateRowBandwidth(
- p->display_cfg->gpuvm_enable,
- p->use_one_row_for_frame[k],
- p->myPipe[k].SourcePixelFormat,
- p->myPipe[k].VRatio,
- p->myPipe[k].VRatioChroma,
- p->myPipe[k].DCCEnable,
- p->myPipe[k].HTotal / p->myPipe[k].PixelClock,
- s->PixelPTEBytesPerRowY[k],
- s->PixelPTEBytesPerRowC[k],
- p->dpte_row_height_luma[k],
- p->dpte_row_height_chroma[k],
-
- p->mrq_present,
- s->meta_row_bytes_per_row_ub_l[k],
- s->meta_row_bytes_per_row_ub_c[k],
- p->meta_row_height_luma[k],
- p->meta_row_height_chroma[k],
-
- // Output
- &p->dpte_row_bw[k],
- &p->meta_row_bw[k]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]);
- dml2_printf("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config);
- dml2_printf("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]);
- dml2_printf("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]);
-#endif
- }
-}
-
-static double CalculateUrgentLatency(
- double UrgentLatencyPixelDataOnly,
- double UrgentLatencyPixelMixedWithVMData,
- double UrgentLatencyVMDataOnly,
- bool DoUrgentLatencyAdjustment,
- double UrgentLatencyAdjustmentFabricClockComponent,
- double UrgentLatencyAdjustmentFabricClockReference,
- double FabricClock,
- double uclk_freq_mhz,
- enum dml2_qos_param_type qos_type,
- unsigned int urgent_ramp_uclk_cycles,
- unsigned int df_qos_response_time_fclk_cycles,
- unsigned int max_round_trip_to_furthest_cs_fclk_cycles,
- unsigned int mall_overhead_fclk_cycles,
- double umc_urgent_ramp_latency_margin,
- double fabric_max_transport_latency_margin)
-{
- double urgent_latency = 0;
- if (qos_type == dml2_qos_param_type_dcn4x) {
- urgent_latency = (df_qos_response_time_fclk_cycles + mall_overhead_fclk_cycles) / FabricClock
- + max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1 + fabric_max_transport_latency_margin / 100.0)
- + urgent_ramp_uclk_cycles / uclk_freq_mhz * (1 + umc_urgent_ramp_latency_margin / 100.0);
- } else {
- urgent_latency = math_max3(UrgentLatencyPixelDataOnly, UrgentLatencyPixelMixedWithVMData, UrgentLatencyVMDataOnly);
- if (DoUrgentLatencyAdjustment == true) {
- urgent_latency = urgent_latency + UrgentLatencyAdjustmentFabricClockComponent * (UrgentLatencyAdjustmentFabricClockReference / FabricClock - 1);
- }
- }
-#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin);
- } else {
- dml2_printf("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly);
- dml2_printf("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData);
- dml2_printf("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference);
- }
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency);
-#endif
- return urgent_latency;
-}
-
-static double CalculateTripToMemory(
- double UrgLatency,
- double FabricClock,
- double uclk_freq_mhz,
- enum dml2_qos_param_type qos_type,
- unsigned int trip_to_memory_uclk_cycles,
- unsigned int max_round_trip_to_furthest_cs_fclk_cycles,
- unsigned int mall_overhead_fclk_cycles,
- double umc_max_latency_margin,
- double fabric_max_transport_latency_margin)
-{
- double trip_to_memory_us;
- if (qos_type == dml2_qos_param_type_dcn4x) {
- trip_to_memory_us = mall_overhead_fclk_cycles / FabricClock
- + max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0)
- + trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0);
- } else {
- trip_to_memory_us = UrgLatency;
- }
-
-#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
- dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
- dml2_printf("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin);
- dml2_printf("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin);
- } else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
- }
- dml2_printf("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us);
-#endif
-
-
- return trip_to_memory_us;
-}
-
-static double CalculateMetaTripToMemory(
- double UrgLatency,
- double FabricClock,
- double uclk_freq_mhz,
- enum dml2_qos_param_type qos_type,
- unsigned int meta_trip_to_memory_uclk_cycles,
- unsigned int meta_trip_to_memory_fclk_cycles,
- double umc_max_latency_margin,
- double fabric_max_transport_latency_margin)
-{
- double meta_trip_to_memory_us;
- if (qos_type == dml2_qos_param_type_dcn4x) {
- meta_trip_to_memory_us = meta_trip_to_memory_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0)
- + meta_trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0);
- } else {
- meta_trip_to_memory_us = UrgLatency;
- }
-
-#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
- dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- } else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
- }
- dml2_printf("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us);
-#endif
-
-
- return meta_trip_to_memory_us;
-}
-
-static void calculate_cursor_req_attributes(
- unsigned int cursor_width,
- unsigned int cursor_bpp,
-
- // output
- unsigned int *cursor_lines_per_chunk,
- unsigned int *cursor_bytes_per_line,
- unsigned int *cursor_bytes_per_chunk,
- unsigned int *cursor_bytes)
-{
- unsigned int cursor_pitch = 0;
- unsigned int cursor_bytes_per_req = 0;
- unsigned int cursor_width_bytes = 0;
- unsigned int cursor_height = 0;
-
- //SW determines the cursor pitch to support the maximum cursor_width that will be used but the following restrictions apply.
- //- For 2bpp, cursor_pitch = 256 pixels due to min cursor request size of 64B
- //- For 32 or 64 bpp, cursor_pitch = 64, 128 or 256 pixels depending on the cursor width
- if (cursor_bpp == 2)
- cursor_pitch = 256;
- else
- cursor_pitch = (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1);
-
- //The cursor requestor uses a cursor request size of 64B, 128B, or 256B depending on the cursor_width and cursor_bpp as follows.
-
- cursor_width_bytes = (unsigned int)math_ceil2((double)cursor_width * cursor_bpp / 8, 1);
- if (cursor_width_bytes <= 64)
- cursor_bytes_per_req = 64;
- else if (cursor_width_bytes <= 128)
- cursor_bytes_per_req = 128;
- else
- cursor_bytes_per_req = 256;
-
- //If cursor_width_bytes is greater than 256B, then multiple 256B requests are issued to fetch the entire cursor line.
- *cursor_bytes_per_line = (unsigned int)math_ceil2((double)cursor_width_bytes, cursor_bytes_per_req);
-
- //Nominally, the cursor chunk is 1KB or 2KB but it is restricted to a power of 2 number of lines with a maximum of 16 lines.
- if (cursor_bpp == 2) {
- *cursor_lines_per_chunk = 16;
- } else if (cursor_bpp == 32) {
- if (cursor_width <= 32)
- *cursor_lines_per_chunk = 16;
- else if (cursor_width <= 64)
- *cursor_lines_per_chunk = 8;
- else if (cursor_width <= 128)
- *cursor_lines_per_chunk = 4;
- else
- *cursor_lines_per_chunk = 2;
- } else if (cursor_bpp == 64) {
- if (cursor_width <= 16)
- *cursor_lines_per_chunk = 16;
- else if (cursor_width <= 32)
- *cursor_lines_per_chunk = 8;
- else if (cursor_width <= 64)
- *cursor_lines_per_chunk = 4;
- else if (cursor_width <= 128)
- *cursor_lines_per_chunk = 2;
- else
- *cursor_lines_per_chunk = 1;
- } else {
- if (cursor_width > 0) {
- dml2_printf("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp);
- dml2_assert(0);
- }
- }
-
- *cursor_bytes_per_chunk = *cursor_bytes_per_line * *cursor_lines_per_chunk;
-
- // For the cursor implementation, all requested data is stored in the return buffer. Given this fact, the cursor_bytes can be directly compared with the CursorBufferSize.
- // Only cursor_width is provided for worst case sizing so assume that the cursor is square
- cursor_height = cursor_width;
- *cursor_bytes = *cursor_bytes_per_line * cursor_height;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp);
- dml2_printf("DML::%s: cursor_width = %d\n", __func__, cursor_width);
- dml2_printf("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes);
- dml2_printf("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
- dml2_printf("DML::%s: cursor_pitch = %d\n", __func__, cursor_pitch);
-#endif
-}
-
-static void calculate_cursor_urgent_burst_factor(
- unsigned int CursorBufferSize,
- unsigned int CursorWidth,
- unsigned int cursor_bytes_per_chunk,
- unsigned int cursor_lines_per_chunk,
- double LineTime,
- double UrgentLatency,
-
- double *UrgentBurstFactorCursor,
- bool *NotEnoughUrgentLatencyHiding)
-{
- unsigned int LinesInCursorBuffer = 0;
- double CursorBufferSizeInTime = 0;
-
- if (CursorWidth > 0) {
- LinesInCursorBuffer = (unsigned int)math_floor2(CursorBufferSize * 1024.0 / (double)cursor_bytes_per_chunk, 1) * cursor_lines_per_chunk;
-
- CursorBufferSizeInTime = LinesInCursorBuffer * LineTime;
- if (CursorBufferSizeInTime - UrgentLatency <= 0) {
- *NotEnoughUrgentLatencyHiding = 1;
- *UrgentBurstFactorCursor = 0;
- } else {
- *NotEnoughUrgentLatencyHiding = 0;
- *UrgentBurstFactorCursor = CursorBufferSizeInTime / (CursorBufferSizeInTime - UrgentLatency);
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer);
- dml2_printf("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime);
- dml2_printf("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk);
- dml2_printf("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
-#endif
-
- }
-}
-
-static void CalculateUrgentBurstFactor(
- const struct dml2_plane_parameters *plane_cfg,
- unsigned int swath_width_luma_ub,
- unsigned int swath_width_chroma_ub,
- unsigned int SwathHeightY,
- unsigned int SwathHeightC,
- double LineTime,
- double UrgentLatency,
- double VRatio,
- double VRatioC,
- double BytePerPixelInDETY,
- double BytePerPixelInDETC,
- unsigned int DETBufferSizeY,
- unsigned int DETBufferSizeC,
- // Output
- double *UrgentBurstFactorLuma,
- double *UrgentBurstFactorChroma,
- bool *NotEnoughUrgentLatencyHiding)
-{
- double LinesInDETLuma;
- double LinesInDETChroma;
- double DETBufferSizeInTimeLuma;
- double DETBufferSizeInTimeChroma;
-
- *NotEnoughUrgentLatencyHiding = 0;
- *UrgentBurstFactorLuma = 0;
- *UrgentBurstFactorChroma = 0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VRatioC = %f\n", __func__, VRatioC);
- dml2_printf("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY);
- dml2_printf("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC);
- dml2_printf("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY);
- dml2_printf("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
-#endif
- DML2_ASSERT(VRatio > 0);
-
- LinesInDETLuma = (dml_is_phantom_pipe(plane_cfg) ? 1024 * 1024 : DETBufferSizeY) / BytePerPixelInDETY / swath_width_luma_ub;
-
- DETBufferSizeInTimeLuma = math_floor2(LinesInDETLuma, SwathHeightY) * LineTime / VRatio;
- if (DETBufferSizeInTimeLuma - UrgentLatency <= 0) {
- *NotEnoughUrgentLatencyHiding = 1;
- *UrgentBurstFactorLuma = 0;
- } else {
- *UrgentBurstFactorLuma = DETBufferSizeInTimeLuma / (DETBufferSizeInTimeLuma - UrgentLatency);
- }
-
- if (BytePerPixelInDETC > 0) {
- LinesInDETChroma = (dml_is_phantom_pipe(plane_cfg) ? 1024 * 1024 : DETBufferSizeC) / BytePerPixelInDETC / swath_width_chroma_ub;
-
- DETBufferSizeInTimeChroma = math_floor2(LinesInDETChroma, SwathHeightC) * LineTime / VRatioC;
- if (DETBufferSizeInTimeChroma - UrgentLatency <= 0) {
- *NotEnoughUrgentLatencyHiding = 1;
- *UrgentBurstFactorChroma = 0;
- } else {
- *UrgentBurstFactorChroma = DETBufferSizeInTimeChroma / (DETBufferSizeInTimeChroma - UrgentLatency);
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma);
- dml2_printf("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma);
- dml2_printf("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
-#endif
-
-}
-
-static void CalculateDCFCLKDeepSleep(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int BytePerPixelY[],
- unsigned int BytePerPixelC[],
- unsigned int SwathWidthY[],
- unsigned int SwathWidthC[],
- unsigned int DPPPerSurface[],
- double PSCL_THROUGHPUT[],
- double PSCL_THROUGHPUT_CHROMA[],
- double Dppclk[],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- unsigned int ReturnBusWidth,
-
- // Output
- double *DCFClkDeepSleep)
-{
- double DisplayPipeLineDeliveryTimeLuma;
- double DisplayPipeLineDeliveryTimeChroma;
- double DCFClkDeepSleepPerSurface[DML2_MAX_PLANES];
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- double pixel_rate_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
-
- if (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio <= 1) {
- DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] * DPPPerSurface[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio / pixel_rate_mhz;
- } else {
- DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] / PSCL_THROUGHPUT[k] / Dppclk[k];
- }
- if (BytePerPixelC[k] == 0) {
- DisplayPipeLineDeliveryTimeChroma = 0;
- } else {
- if (display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio <= 1) {
- DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] * DPPPerSurface[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio / pixel_rate_mhz;
- } else {
- DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] / PSCL_THROUGHPUT_CHROMA[k] / Dppclk[k];
- }
- }
-
- if (BytePerPixelC[k] > 0) {
- DCFClkDeepSleepPerSurface[k] = math_max2(__DML2_CALCS_DCFCLK_FACTOR__ * SwathWidthY[k] * BytePerPixelY[k] / 32.0 / DisplayPipeLineDeliveryTimeLuma,
- __DML2_CALCS_DCFCLK_FACTOR__ * SwathWidthC[k] * BytePerPixelC[k] / 32.0 / DisplayPipeLineDeliveryTimeChroma);
- } else {
- DCFClkDeepSleepPerSurface[k] = __DML2_CALCS_DCFCLK_FACTOR__ * SwathWidthY[k] * BytePerPixelY[k] / 64.0 / DisplayPipeLineDeliveryTimeLuma;
- }
- DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], pixel_rate_mhz / 16);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
- dml2_printf("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
-#endif
- }
-
- double ReadBandwidth = 0.0;
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- ReadBandwidth = ReadBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k];
- }
-
- *DCFClkDeepSleep = math_max2(8.0, __DML2_CALCS_DCFCLK_FACTOR__ * ReadBandwidth / (double)ReturnBusWidth);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__);
- dml2_printf("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth);
- dml2_printf("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep);
-#endif
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- *DCFClkDeepSleep = math_max2(*DCFClkDeepSleep, DCFClkDeepSleepPerSurface[k]);
- }
- dml2_printf("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
-}
-
-static double CalculateWriteBackDelay(
- enum dml2_source_format_class WritebackPixelFormat,
- double WritebackHRatio,
- double WritebackVRatio,
- unsigned int WritebackVTaps,
- unsigned int WritebackDestinationWidth,
- unsigned int WritebackDestinationHeight,
- unsigned int WritebackSourceHeight,
- unsigned int HTotal)
-{
- double CalculateWriteBackDelay;
- double Line_length;
- double Output_lines_last_notclamped;
- double WritebackVInit;
-
- WritebackVInit = (WritebackVRatio + WritebackVTaps + 1) / 2;
- Line_length = math_max2((double)WritebackDestinationWidth, math_ceil2((double)WritebackDestinationWidth / 6.0, 1.0) * WritebackVTaps);
- Output_lines_last_notclamped = WritebackDestinationHeight - 1 - math_ceil2(((double)WritebackSourceHeight - (double)WritebackVInit) / (double)WritebackVRatio, 1.0);
- if (Output_lines_last_notclamped < 0) {
- CalculateWriteBackDelay = 0;
- } else {
- CalculateWriteBackDelay = Output_lines_last_notclamped * Line_length + (HTotal - WritebackDestinationWidth) + 80;
- }
- return CalculateWriteBackDelay;
-}
-
-static unsigned int CalculateMaxVStartup(
- bool ptoi_supported,
- unsigned int vblank_nom_default_us,
- const struct dml2_timing_cfg *timing,
- double write_back_delay_us)
-{
- unsigned int vblank_size = 0;
- unsigned int max_vstartup_lines = 0;
-
- double line_time_us = (double)timing->h_total / ((double)timing->pixel_clock_khz / 1000);
- unsigned int vblank_actual = timing->v_total - timing->v_active;
- unsigned int vblank_nom_default_in_line = (unsigned int)math_floor2((double)vblank_nom_default_us / line_time_us, 1.0);
- unsigned int vblank_nom_input = (unsigned int)math_min2(timing->vblank_nom, vblank_nom_default_in_line);
- unsigned int vblank_avail = (vblank_nom_input == 0) ? vblank_nom_default_in_line : vblank_nom_input;
-
- vblank_size = (unsigned int)math_min2(vblank_actual, vblank_avail);
-
- if (timing->interlaced && !ptoi_supported)
- max_vstartup_lines = (unsigned int)(math_floor2(vblank_size / 2.0, 1.0));
- else
- max_vstartup_lines = vblank_size - (unsigned int)math_max2(1.0, math_ceil2(write_back_delay_us / line_time_us, 1.0));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VBlankNom = %u\n", __func__, timing->vblank_nom);
- dml2_printf("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us);
- dml2_printf("DML::%s: line_time_us = %f\n", __func__, line_time_us);
- dml2_printf("DML::%s: vblank_actual = %u\n", __func__, vblank_actual);
- dml2_printf("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
- dml2_printf("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
-#endif
- return max_vstartup_lines;
-}
-
-static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *p)
-{
- struct dml2_core_shared_CalculateSwathAndDETConfiguration_locals *l = &scratch->CalculateSwathAndDETConfiguration_locals;
- memset(l, 0, sizeof(struct dml2_core_shared_CalculateSwathAndDETConfiguration_locals));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]);
- }
-#endif
- CalculateSwathWidth(
- p->display_cfg,
- p->ForceSingleDPP,
- p->NumberOfActiveSurfaces,
- p->ODMMode,
- p->BytePerPixY,
- p->BytePerPixC,
- p->Read256BytesBlockHeightY,
- p->Read256BytesBlockHeightC,
- p->Read256BytesBlockWidthY,
- p->Read256BytesBlockWidthC,
- p->surf_linear128_l,
- p->surf_linear128_c,
- p->DPPPerSurface,
-
- // Output
- p->req_per_swath_ub_l,
- p->req_per_swath_ub_c,
- l->SwathWidthSingleDPP,
- l->SwathWidthSingleDPPChroma,
- p->SwathWidth,
- p->SwathWidthChroma,
- l->MaximumSwathHeightY,
- l->MaximumSwathHeightC,
- p->swath_width_luma_ub,
- p->swath_width_chroma_ub);
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- p->full_swath_bytes_l[k] = (unsigned int)(p->swath_width_luma_ub[k] * p->BytePerPixDETY[k] * l->MaximumSwathHeightY[k]);
- p->full_swath_bytes_c[k] = (unsigned int)(p->swath_width_chroma_ub[k] * p->BytePerPixDETC[k] * l->MaximumSwathHeightC[k]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, l->MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, l->MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
-#endif
- if (p->display_cfg->plane_descriptors[k].pixel_format == dml2_420_10) {
- p->full_swath_bytes_l[k] = (unsigned int)(math_ceil2((double)p->full_swath_bytes_l[k], 256));
- p->full_swath_bytes_c[k] = (unsigned int)(math_ceil2((double)p->full_swath_bytes_c[k], 256));
- }
- }
-
- unsigned int TotalActiveDPP = 0;
- bool NoChromaOrLinear = true;
- unsigned int SurfaceDoingUnboundedRequest = 0;
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- TotalActiveDPP = TotalActiveDPP + (p->ForceSingleDPP ? 1 : p->DPPPerSurface[k]);
- if (p->DPPPerSurface[k] > 0)
- SurfaceDoingUnboundedRequest = k;
- if (dml2_core_shared_is_420(p->display_cfg->plane_descriptors[k].pixel_format) || p->display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha
- || p->display_cfg->plane_descriptors[k].surface.tiling == dml2_sw_linear) {
- NoChromaOrLinear = false;
- }
- l->SwathTimeValueUs[k] = (unsigned int) ((double)l->MaximumSwathHeightY[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total
- / p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz * 1000);
- }
-
- *p->UnboundedRequestEnabled = UnboundedRequest(p->display_cfg->overrides.hw.force_unbounded_requesting.enable, p->display_cfg->overrides.hw.force_unbounded_requesting.value, TotalActiveDPP, NoChromaOrLinear);
-
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.display_cfg = p->display_cfg;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.ForceSingleDPP = p->ForceSingleDPP;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.NumberOfActiveSurfaces = p->NumberOfActiveSurfaces;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.UnboundedRequestEnabled = *p->UnboundedRequestEnabled;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.nomDETInKByte = p->nomDETInKByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.MaxTotalDETInKByte = p->MaxTotalDETInKByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.ConfigReturnBufferSizeInKByte = p->ConfigReturnBufferSizeInKByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.MinCompressedBufferSizeInKByte = p->MinCompressedBufferSizeInKByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.ConfigReturnBufferSegmentSizeInkByte = p->ConfigReturnBufferSegmentSizeInkByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.CompressedBufferSegmentSizeInkByte = p->CompressedBufferSegmentSizeInkByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.ReadBandwidthLuma = p->ReadBandwidthLuma;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.ReadBandwidthChroma = p->ReadBandwidthChroma;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.full_swath_bytes_l = p->full_swath_bytes_l;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.full_swath_bytes_c = p->full_swath_bytes_c;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.DPPPerSurface = p->DPPPerSurface;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.DETBufferSizeInKByte = p->DETBufferSizeInKByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.CompressedBufferSizeInkByte = p->CompressedBufferSizeInkByte;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.swath_time_value_us = l->SwathTimeValueUs;
- scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params.bestEffortMinActiveLatencyHidingUs = p->display_cfg->overrides.best_effort_min_active_latency_hiding_us;
- if (p->funcs->calculate_det_buffer_size) {
- p->funcs->calculate_det_buffer_size(&scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params);
- } else {
- CalculateDETBufferSize(&scratch->CalculateSwathAndDETConfiguration_locals.calculate_det_buffer_size_params);
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
-#endif
-
- unsigned int DETBufferSizeInKByteForSwathCalculation;
- *p->ViewportSizeSupport = true;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
-
- DETBufferSizeInKByteForSwathCalculation = (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]) ? 1024 : p->DETBufferSizeInKByte[k]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
-#endif
-
- if (p->full_swath_bytes_l[k] + p->full_swath_bytes_c[k] <= DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- p->SwathHeightY[k] = l->MaximumSwathHeightY[k];
- p->SwathHeightC[k] = l->MaximumSwathHeightC[k];
- l->RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k];
- l->RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k];
- p->request_size_bytes_luma[k] = 256;
- p->request_size_bytes_chroma[k] = 256;
-
- } else if (p->full_swath_bytes_l[k] >= 1.5 * p->full_swath_bytes_c[k] && p->full_swath_bytes_l[k] / 2 + p->full_swath_bytes_c[k] <= DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- p->SwathHeightY[k] = l->MaximumSwathHeightY[k] / 2;
- p->SwathHeightC[k] = l->MaximumSwathHeightC[k];
- l->RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2;
- l->RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k];
- p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
- p->request_size_bytes_chroma[k] = 256;
-
- } else if (p->full_swath_bytes_l[k] < 1.5 * p->full_swath_bytes_c[k] && p->full_swath_bytes_l[k] + p->full_swath_bytes_c[k] / 2 <= DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- p->SwathHeightY[k] = l->MaximumSwathHeightY[k];
- p->SwathHeightC[k] = l->MaximumSwathHeightC[k] / 2;
- l->RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k];
- l->RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2;
- p->request_size_bytes_luma[k] = 256;
- p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
-
- } else {
- p->SwathHeightY[k] = l->MaximumSwathHeightY[k] / 2;
- p->SwathHeightC[k] = l->MaximumSwathHeightC[k] / 2;
- l->RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2;
- l->RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2;
- p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
- p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
- }
-
- if (p->SwathHeightC[k] == 0)
- p->request_size_bytes_chroma[k] = 0;
-
- if ((p->full_swath_bytes_l[k] / 2 + p->full_swath_bytes_c[k] / 2 > DETBufferSizeInKByteForSwathCalculation * 1024 / 2) ||
- p->SwathWidth[k] > p->MaximumSwathWidthLuma[k] || (p->SwathHeightC[k] > 0 && p->SwathWidthChroma[k] > p->MaximumSwathWidthChroma[k])) {
- *p->ViewportSizeSupport = false;
- p->ViewportSizeSupportPerSurface[k] = false;
- } else {
- p->ViewportSizeSupportPerSurface[k] = true;
- }
-
- if (p->SwathHeightC[k] == 0) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k);
-#endif
- p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024;
- p->DETBufferSizeC[k] = 0;
- } else if (l->RoundedUpSwathSizeBytesY[k] <= 1.5 * l->RoundedUpSwathSizeBytesC[k]) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k);
-#endif
- p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
- p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
- } else {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k);
-#endif
- p->DETBufferSizeY[k] = (unsigned int)(math_floor2(p->DETBufferSizeInKByte[k] * 1024 * 2 / 3, 1024));
- p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 - p->DETBufferSizeY[k];
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, l->RoundedUpSwathSizeBytesY[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, l->RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]);
- dml2_printf("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]);
-#endif
-
- }
-
- const long TTUFIFODEPTH = 8;
- const long MAXIMUMCOMPRESSION = 4;
- *p->compbuf_reserved_space_64b = 2 * p->pixel_chunk_size_kbytes * 1024 / 64;
- if (*p->UnboundedRequestEnabled) {
- *p->compbuf_reserved_space_64b = (unsigned int)math_ceil2(math_max2(*p->compbuf_reserved_space_64b,
- (double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(l->RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / 64)), 1.0);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, l->RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
- dml2_printf("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
-#endif
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b);
-#endif
-
- *p->hw_debug5 = false;
- if (!p->mrq_present) {
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!(*p->UnboundedRequestEnabled)
- && p->display_cfg->plane_descriptors[k].surface.dcc.enable
- && ((p->rob_buffer_size_kbytes * 1024 + *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (l->RoundedUpSwathSizeBytesY[k] + l->RoundedUpSwathSizeBytesC[k])))
- *p->hw_debug5 = true;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
- dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
- dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, l->RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
-#endif
- }
- }
-}
-
-static void CalculateODMMode(
- unsigned int MaximumPixelsPerLinePerDSCUnit,
- unsigned int HActive,
- enum dml2_output_encoder_class Output,
- enum dml2_odm_mode ODMUse,
- double MaxDispclk,
- bool DSCEnable,
- unsigned int TotalNumberOfActiveDPP,
- unsigned int MaxNumDPP,
- double PixelClock,
-
- // Output
- bool *TotalAvailablePipesSupport,
- unsigned int *NumberOfDPP,
- enum dml2_odm_mode *ODMMode,
- double *RequiredDISPCLKPerSurface)
-{
- double SurfaceRequiredDISPCLKWithoutODMCombine;
- double SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
- double SurfaceRequiredDISPCLKWithODMCombineThreeToOne;
- double SurfaceRequiredDISPCLKWithODMCombineFourToOne;
-
- SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock);
- *TotalAvailablePipesSupport = true;
-
- if (ODMUse == dml2_odm_mode_bypass || ODMUse == dml2_odm_mode_auto)
- *ODMMode = dml2_odm_mode_bypass;
- else if (ODMUse == dml2_odm_mode_combine_2to1)
- *ODMMode = dml2_odm_mode_combine_2to1;
- else if (ODMUse == dml2_odm_mode_combine_3to1)
- *ODMMode = dml2_odm_mode_combine_3to1;
- else if (ODMUse == dml2_odm_mode_combine_4to1)
- *ODMMode = dml2_odm_mode_combine_4to1;
- else if (ODMUse == dml2_odm_mode_split_1to2)
- *ODMMode = dml2_odm_mode_split_1to2;
- else if (ODMUse == dml2_odm_mode_mso_1to2)
- *ODMMode = dml2_odm_mode_mso_1to2;
- else if (ODMUse == dml2_odm_mode_mso_1to4)
- *ODMMode = dml2_odm_mode_mso_1to4;
-
- *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithoutODMCombine;
- *NumberOfDPP = 0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMUse = %d\n", __func__, ODMUse);
- dml2_printf("DML::%s: Output = %d\n", __func__, Output);
- dml2_printf("DML::%s: DSCEnable = %d\n", __func__, DSCEnable);
- dml2_printf("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk);
- dml2_printf("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne);
-#endif
-
- if (ODMUse == dml2_odm_mode_combine_4to1 || (ODMUse == dml2_odm_mode_auto &&
- (SurfaceRequiredDISPCLKWithODMCombineThreeToOne > MaxDispclk || (DSCEnable && (HActive > 3 * MaximumPixelsPerLinePerDSCUnit))))) {
- if (TotalNumberOfActiveDPP + 4 <= MaxNumDPP) {
- *ODMMode = dml2_odm_mode_combine_4to1;
- *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
- *NumberOfDPP = 4;
- } else {
- *TotalAvailablePipesSupport = false;
- }
- } else if (ODMUse == dml2_odm_mode_combine_3to1 || (ODMUse == dml2_odm_mode_auto &&
- ((SurfaceRequiredDISPCLKWithODMCombineTwoToOne > MaxDispclk && SurfaceRequiredDISPCLKWithODMCombineThreeToOne <= MaxDispclk) ||
- (DSCEnable && (HActive > 2 * MaximumPixelsPerLinePerDSCUnit))))) {
- if (TotalNumberOfActiveDPP + 3 <= MaxNumDPP) {
- *ODMMode = dml2_odm_mode_combine_3to1;
- *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineThreeToOne;
- *NumberOfDPP = 3;
- } else {
- *TotalAvailablePipesSupport = false;
- }
-
- } else if (ODMUse == dml2_odm_mode_combine_2to1 || (ODMUse == dml2_odm_mode_auto &&
- ((SurfaceRequiredDISPCLKWithoutODMCombine > MaxDispclk && SurfaceRequiredDISPCLKWithODMCombineTwoToOne <= MaxDispclk) ||
- (DSCEnable && (HActive > MaximumPixelsPerLinePerDSCUnit))))) {
- if (TotalNumberOfActiveDPP + 2 <= MaxNumDPP) {
- *ODMMode = dml2_odm_mode_combine_2to1;
- *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
- *NumberOfDPP = 2;
- } else {
- *TotalAvailablePipesSupport = false;
- }
-
- } else {
- if (TotalNumberOfActiveDPP + 1 <= MaxNumDPP) {
- *NumberOfDPP = 1;
- } else {
- *TotalAvailablePipesSupport = false;
- }
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMMode = %d\n", __func__, *ODMMode);
- dml2_printf("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP);
- dml2_printf("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport);
- dml2_printf("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface);
-#endif
-
-}
-
-static void CalculateOutputLink(
- struct dml2_core_internal_scratch *s,
- double PHYCLK,
- double PHYCLKD18,
- double PHYCLKD32,
- double Downspreading,
- bool IsMainSurfaceUsingTheIndicatedTiming,
- enum dml2_output_encoder_class Output,
- enum dml2_output_format_class OutputFormat,
- unsigned int HTotal,
- unsigned int HActive,
- double PixelClockBackEnd,
- double ForcedOutputLinkBPP,
- unsigned int DSCInputBitPerComponent,
- unsigned int NumberOfDSCSlices,
- double AudioSampleRate,
- unsigned int AudioSampleLayout,
- enum dml2_odm_mode ODMModeNoDSC,
- enum dml2_odm_mode ODMModeDSC,
- enum dml2_dsc_enable_option DSCEnable,
- unsigned int OutputLinkDPLanes,
- enum dml2_output_link_dp_rate OutputLinkDPRate,
-
- // Output
- bool *RequiresDSC,
- bool *RequiresFEC,
- double *OutBpp,
- enum dml2_core_internal_output_type *OutputType,
- enum dml2_core_internal_output_type_rate *OutputRate,
- unsigned int *RequiredSlots)
-{
- bool LinkDSCEnable;
- unsigned int dummy;
- *RequiresDSC = false;
- *RequiresFEC = false;
- *OutBpp = 0;
-
- *OutputType = dml2_core_internal_output_type_unknown;
- *OutputRate = dml2_core_internal_output_rate_unknown;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable);
- dml2_printf("DML::%s: IsMainSurfaceUsingTheIndicatedTiming = %u\n", __func__, IsMainSurfaceUsingTheIndicatedTiming);
- dml2_printf("DML::%s: PHYCLK = %f\n", __func__, PHYCLK);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC);
- dml2_printf("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC);
- dml2_printf("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP);
- dml2_printf("DML::%s: Output (encoder) = %u\n", __func__, Output);
- dml2_printf("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate);
-#endif
- if (IsMainSurfaceUsingTheIndicatedTiming) {
- if (Output == dml2_hdmi) {
- *RequiresDSC = false;
- *RequiresFEC = false;
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, math_min2(600, PHYCLK) * 10, 3, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, false, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- //OutputTypeAndRate = "HDMI";
- *OutputType = dml2_core_internal_output_type_hdmi;
- } else if (Output == dml2_dp || Output == dml2_dp2p0 || Output == dml2_edp) {
- if (DSCEnable == dml2_dsc_enable) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- if (Output == dml2_dp || Output == dml2_dp2p0) {
- *RequiresFEC = true;
- } else {
- *RequiresFEC = false;
- }
- } else {
- *RequiresDSC = false;
- LinkDSCEnable = false;
- if (Output == dml2_dp2p0) {
- *RequiresFEC = true;
- } else {
- *RequiresFEC = false;
- }
- }
- if (Output == dml2_dp2p0) {
- *OutBpp = 0;
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr10) && PHYCLKD32 >= 10000.0 / 32) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 10000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- if (*OutBpp == 0 && PHYCLKD32 < 13500.0 / 32 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 10000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- }
- //OutputTypeAndRate = Output & " UHBR10";
- *OutputType = dml2_core_internal_output_type_dp2p0;
- *OutputRate = dml2_core_internal_output_rate_dp_rate_uhbr10;
- }
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr13p5) && *OutBpp == 0 && PHYCLKD32 >= 13500.0 / 32) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
-
- if (*OutBpp == 0 && PHYCLKD32 < 20000 / 32 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- }
- //OutputTypeAndRate = Output & " UHBR13p5";
- *OutputType = dml2_core_internal_output_type_dp2p0;
- *OutputRate = dml2_core_internal_output_rate_dp_rate_uhbr13p5;
- }
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_uhbr20) && *OutBpp == 0 && PHYCLKD32 >= 20000 / 32) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 20000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 20000, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- }
- //OutputTypeAndRate = Output & " UHBR20";
- *OutputType = dml2_core_internal_output_type_dp2p0;
- *OutputRate = dml2_core_internal_output_rate_dp_rate_uhbr20;
- }
- } else { // output is dp or edp
- *OutBpp = 0;
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_hbr) && PHYCLK >= 270) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 2700, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- if (*OutBpp == 0 && PHYCLK < 540 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- if (Output == dml2_dp) {
- *RequiresFEC = true;
- }
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 2700, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- }
- //OutputTypeAndRate = Output & " HBR";
- *OutputType = (Output == dml2_dp) ? dml2_core_internal_output_type_dp : dml2_core_internal_output_type_edp;
- *OutputRate = dml2_core_internal_output_rate_dp_rate_hbr;
- }
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_hbr2) && *OutBpp == 0 && PHYCLK >= 540) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 5400, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
-
- if (*OutBpp == 0 && PHYCLK < 810 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- if (Output == dml2_dp) {
- *RequiresFEC = true;
- }
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 5400, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- }
- //OutputTypeAndRate = Output & " HBR2";
- *OutputType = (Output == dml2_dp) ? dml2_core_internal_output_type_dp : dml2_core_internal_output_type_edp;
- *OutputRate = dml2_core_internal_output_rate_dp_rate_hbr2;
- }
- if ((OutputLinkDPRate == dml2_dp_rate_na || OutputLinkDPRate == dml2_dp_rate_hbr3) && *OutBpp == 0 && PHYCLK >= 810) { // VBA_ERROR, vba code doesn't have hbr3 check
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 8100, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
-
- if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- if (Output == dml2_dp) {
- *RequiresFEC = true;
- }
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, (1 - Downspreading / 100) * 8100, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
- OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
- }
- //OutputTypeAndRate = Output & " HBR3";
- *OutputType = (Output == dml2_dp) ? dml2_core_internal_output_type_dp : dml2_core_internal_output_type_edp;
- *OutputRate = dml2_core_internal_output_rate_dp_rate_hbr3;
- }
- }
- } else if (Output == dml2_hdmifrl) {
- if (DSCEnable == dml2_dsc_enable) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- *RequiresFEC = true;
- } else {
- *RequiresDSC = false;
- LinkDSCEnable = false;
- *RequiresFEC = false;
- }
- *OutBpp = 0;
- if (PHYCLKD18 >= 3000.0 / 18) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 3000, 3, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- //OutputTypeAndRate = Output & "3x3";
- *OutputType = dml2_core_internal_output_type_hdmifrl;
- *OutputRate = dml2_core_internal_output_rate_hdmi_rate_3x3;
- }
- if (*OutBpp == 0 && PHYCLKD18 >= 6000.0 / 18) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 6000, 3, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- //OutputTypeAndRate = Output & "6x3";
- *OutputType = dml2_core_internal_output_type_hdmifrl;
- *OutputRate = dml2_core_internal_output_rate_hdmi_rate_6x3;
- }
- if (*OutBpp == 0 && PHYCLKD18 >= 6000.0 / 18) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 6000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- //OutputTypeAndRate = Output & "6x4";
- *OutputType = dml2_core_internal_output_type_hdmifrl;
- *OutputRate = dml2_core_internal_output_rate_hdmi_rate_6x4;
- }
- if (*OutBpp == 0 && PHYCLKD18 >= 8000.0 / 18) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 8000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- //OutputTypeAndRate = Output & "8x4";
- *OutputType = dml2_core_internal_output_type_hdmifrl;
- *OutputRate = dml2_core_internal_output_rate_hdmi_rate_8x4;
- }
- if (*OutBpp == 0 && PHYCLKD18 >= 10000.0 / 18) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 10000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0 && PHYCLKD18 < 12000.0 / 18) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- *RequiresFEC = true;
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 10000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- }
- //OutputTypeAndRate = Output & "10x4";
- *OutputType = dml2_core_internal_output_type_hdmifrl;
- *OutputRate = dml2_core_internal_output_rate_hdmi_rate_10x4;
- }
- if (*OutBpp == 0 && PHYCLKD18 >= 12000.0 / 18) {
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 12000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- if (*OutBpp == 0 && DSCEnable == dml2_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
- *RequiresDSC = true;
- LinkDSCEnable = true;
- *RequiresFEC = true;
- *OutBpp = TruncToValidBPP(&s->TruncToValidBPP_locals, 12000, 4, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (unsigned int)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, &dummy);
- }
- //OutputTypeAndRate = Output & "12x4";
- *OutputType = dml2_core_internal_output_type_hdmifrl;
- *OutputRate = dml2_core_internal_output_rate_hdmi_rate_12x4;
- }
- }
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC);
- dml2_printf("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC);
- dml2_printf("DML::%s: OutBpp = %f\n", __func__, *OutBpp);
-#endif
-}
-
-static double CalculateWriteBackDISPCLK(
- enum dml2_source_format_class WritebackPixelFormat,
- double PixelClock,
- double WritebackHRatio,
- double WritebackVRatio,
- unsigned int WritebackHTaps,
- unsigned int WritebackVTaps,
- unsigned int WritebackSourceWidth,
- unsigned int WritebackDestinationWidth,
- unsigned int HTotal,
- unsigned int WritebackLineBufferSize)
-{
- double DISPCLK_H, DISPCLK_V, DISPCLK_HB;
-
- DISPCLK_H = PixelClock * math_ceil2((double)WritebackHTaps / 8.0, 1) / WritebackHRatio;
- DISPCLK_V = PixelClock * (WritebackVTaps * math_ceil2((double)WritebackDestinationWidth / 6.0, 1) + 8.0) / (double)HTotal;
- DISPCLK_HB = PixelClock * WritebackVTaps * (WritebackDestinationWidth * WritebackVTaps - WritebackLineBufferSize / 57.0) / 6.0 / (double)WritebackSourceWidth;
- return math_max3(DISPCLK_H, DISPCLK_V, DISPCLK_HB);
-}
-
-static double RequiredDTBCLK(
- bool DSCEnable,
- double PixelClock,
- enum dml2_output_format_class OutputFormat,
- double OutputBpp,
- unsigned int DSCSlices,
- unsigned int HTotal,
- unsigned int HActive,
- unsigned int AudioRate,
- unsigned int AudioLayout)
-{
- if (DSCEnable != true) {
- return math_max2(PixelClock / 4.0 * OutputBpp / 24.0, 25.0);
- } else {
- double PixelWordRate = PixelClock / (OutputFormat == dml2_444 ? 1 : 2);
- double HCActive = math_ceil2(DSCSlices * math_ceil2(OutputBpp * math_ceil2(HActive / DSCSlices, 1) / 8.0, 1) / 3.0, 1);
- double HCBlank = 64 + 32 * math_ceil2(AudioRate * (AudioLayout == 1 ? 1 : 0.25) * HTotal / (PixelClock * 1000), 1);
- double AverageTribyteRate = PixelWordRate * (HCActive + HCBlank) / HTotal;
- double HActiveTribyteRate = PixelWordRate * HCActive / HActive;
- return math_max4(PixelWordRate / 4.0, AverageTribyteRate / 4.0, HActiveTribyteRate / 4.0, 25.0) * 1.002;
- }
-}
-
-static unsigned int DSCDelayRequirement(
- bool DSCEnabled,
- enum dml2_odm_mode ODMMode,
- unsigned int DSCInputBitPerComponent,
- double OutputBpp,
- unsigned int HActive,
- unsigned int HTotal,
- unsigned int NumberOfDSCSlices,
- enum dml2_output_format_class OutputFormat,
- enum dml2_output_encoder_class Output,
- double PixelClock,
- double PixelClockBackEnd)
-{
- unsigned int DSCDelayRequirement_val = 0;
- unsigned int NumberOfDSCSlicesFactor = 1;
-
- if (DSCEnabled == true && OutputBpp != 0) {
-
- if (ODMMode == dml2_odm_mode_combine_4to1)
- NumberOfDSCSlicesFactor = 4;
- else if (ODMMode == dml2_odm_mode_combine_3to1)
- NumberOfDSCSlicesFactor = 3;
- else if (ODMMode == dml2_odm_mode_combine_2to1)
- NumberOfDSCSlicesFactor = 2;
-
- DSCDelayRequirement_val = NumberOfDSCSlicesFactor * (dscceComputeDelay(DSCInputBitPerComponent, OutputBpp, (unsigned int)(math_ceil2((double)HActive / (double)NumberOfDSCSlices, 1.0)),
- (NumberOfDSCSlices / NumberOfDSCSlicesFactor), OutputFormat, Output) + dscComputeDelay(OutputFormat, Output));
-
- DSCDelayRequirement_val = (unsigned int)(DSCDelayRequirement_val + (HTotal - HActive) * math_ceil2((double)DSCDelayRequirement_val / (double)HActive, 1.0));
- DSCDelayRequirement_val = (unsigned int)(DSCDelayRequirement_val * PixelClock / PixelClockBackEnd);
-
- } else {
- DSCDelayRequirement_val = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, ODMMode);
- dml2_printf("DML::%s: OutputBpp = %f\n", __func__, OutputBpp);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: OutputFormat = %u\n", __func__, OutputFormat);
- dml2_printf("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent);
- dml2_printf("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices);
- dml2_printf("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val);
-#endif
-
- return DSCDelayRequirement_val;
-}
-
-static void CalculateSurfaceSizeInMall(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MALLAllocatedForDCN,
- unsigned int BytesPerPixelY[],
- unsigned int BytesPerPixelC[],
- unsigned int Read256BytesBlockWidthY[],
- unsigned int Read256BytesBlockWidthC[],
- unsigned int Read256BytesBlockHeightY[],
- unsigned int Read256BytesBlockHeightC[],
- unsigned int ReadBlockWidthY[],
- unsigned int ReadBlockWidthC[],
- unsigned int ReadBlockHeightY[],
- unsigned int ReadBlockHeightC[],
-
- // Output
- unsigned int SurfaceSizeInMALL[],
- bool *ExceededMALLSize)
-{
- unsigned int TotalSurfaceSizeInMALLForSS = 0;
- unsigned int TotalSurfaceSizeInMALLForSubVP = 0;
- unsigned int MALLAllocatedForDCNInBytes = MALLAllocatedForDCN * 1024 * 1024;
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- const struct dml2_composition_cfg *composition = &display_cfg->plane_descriptors[k].composition;
- const struct dml2_surface_cfg *surface = &display_cfg->plane_descriptors[k].surface;
-
- if (composition->viewport.stationary) {
- SurfaceSizeInMALL[k] = (unsigned int)(math_min2(math_ceil2((double)surface->plane0.width, ReadBlockWidthY[k]),
- math_floor2(composition->viewport.plane0.x_start + composition->viewport.plane0.width + ReadBlockWidthY[k] - 1, ReadBlockWidthY[k]) -
- math_floor2((double)composition->viewport.plane0.x_start, ReadBlockWidthY[k])) *
- math_min2(math_ceil2((double)surface->plane0.height, ReadBlockHeightY[k]),
- math_floor2((double)composition->viewport.plane0.y_start + composition->viewport.plane0.height + ReadBlockHeightY[k] - 1, ReadBlockHeightY[k]) -
- math_floor2((double)composition->viewport.plane0.y_start, ReadBlockHeightY[k])) * BytesPerPixelY[k]);
-
- if (ReadBlockWidthC[k] > 0) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_min2(math_ceil2((double)surface->plane1.width, ReadBlockWidthC[k]),
- math_floor2((double)composition->viewport.plane1.y_start + composition->viewport.plane1.width + ReadBlockWidthC[k] - 1, ReadBlockWidthC[k]) -
- math_floor2((double)composition->viewport.plane1.y_start, ReadBlockWidthC[k])) *
- math_min2(math_ceil2((double)surface->plane1.height, ReadBlockHeightC[k]),
- math_floor2((double)composition->viewport.plane1.y_start + composition->viewport.plane1.height + ReadBlockHeightC[k] - 1, ReadBlockHeightC[k]) -
- math_floor2(composition->viewport.plane1.y_start, ReadBlockHeightC[k])) * BytesPerPixelC[k]);
- }
- if (surface->dcc.enable) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_min2(math_ceil2(surface->plane0.width, 8 * Read256BytesBlockWidthY[k]),
- math_floor2(composition->viewport.plane0.x_start + composition->viewport.plane0.width + 8 * Read256BytesBlockWidthY[k] - 1, 8 * Read256BytesBlockWidthY[k]) -
- math_floor2(composition->viewport.plane0.x_start, 8 * Read256BytesBlockWidthY[k])) *
- math_min2(math_ceil2(surface->plane0.height, 8 * Read256BytesBlockHeightY[k]),
- math_floor2(composition->viewport.plane0.y_start + composition->viewport.plane0.height + 8 * Read256BytesBlockHeightY[k] - 1, 8 * Read256BytesBlockHeightY[k]) -
- math_floor2(composition->viewport.plane0.y_start, 8 * Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256) + (64 * 1024);
- if (Read256BytesBlockWidthC[k] > 0) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_min2(math_ceil2(surface->plane1.width, 8 * Read256BytesBlockWidthC[k]),
- math_floor2(composition->viewport.plane1.y_start + composition->viewport.plane1.width + 8 * Read256BytesBlockWidthC[k] - 1, 8 * Read256BytesBlockWidthC[k]) -
- math_floor2(composition->viewport.plane1.y_start, 8 * Read256BytesBlockWidthC[k])) *
- math_min2(math_ceil2(surface->plane1.height, 8 * Read256BytesBlockHeightC[k]),
- math_floor2(composition->viewport.plane1.y_start + composition->viewport.plane1.height + 8 * Read256BytesBlockHeightC[k] - 1, 8 * Read256BytesBlockHeightC[k]) -
- math_floor2(composition->viewport.plane1.y_start, 8 * Read256BytesBlockHeightC[k])) * BytesPerPixelC[k] / 256);
- }
- }
- } else {
- SurfaceSizeInMALL[k] = (unsigned int)(math_ceil2(math_min2(surface->plane0.width, composition->viewport.plane0.width + ReadBlockWidthY[k] - 1), ReadBlockWidthY[k]) *
- math_ceil2(math_min2(surface->plane0.height, composition->viewport.plane0.height + ReadBlockHeightY[k] - 1), ReadBlockHeightY[k]) * BytesPerPixelY[k]);
- if (ReadBlockWidthC[k] > 0) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_ceil2(math_min2(surface->plane1.width, composition->viewport.plane1.width + ReadBlockWidthC[k] - 1), ReadBlockWidthC[k]) *
- math_ceil2(math_min2(surface->plane1.height, composition->viewport.plane1.height + ReadBlockHeightC[k] - 1), ReadBlockHeightC[k]) * BytesPerPixelC[k]);
- }
- if (surface->dcc.enable) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_ceil2(math_min2(surface->plane0.width, composition->viewport.plane0.width + 8 * Read256BytesBlockWidthY[k] - 1), 8 * Read256BytesBlockWidthY[k]) *
- math_ceil2(math_min2(surface->plane0.height, composition->viewport.plane0.height + 8 * Read256BytesBlockHeightY[k] - 1), 8 * Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256) + (64 * 1024);
-
- if (Read256BytesBlockWidthC[k] > 0) {
- SurfaceSizeInMALL[k] = (unsigned int)(SurfaceSizeInMALL[k] +
- math_ceil2(math_min2(surface->plane1.width, composition->viewport.plane1.width + 8 * Read256BytesBlockWidthC[k] - 1), 8 * Read256BytesBlockWidthC[k]) *
- math_ceil2(math_min2(surface->plane1.height, composition->viewport.plane1.height + 8 * Read256BytesBlockHeightC[k] - 1), 8 * Read256BytesBlockHeightC[k]) * BytesPerPixelC[k] / 256);
- }
- }
- }
- }
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- /* SS and Subvp counted separate as they are never used at the same time */
- if (dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]))
- TotalSurfaceSizeInMALLForSubVP += SurfaceSizeInMALL[k];
- else if (display_cfg->plane_descriptors[k].overrides.refresh_from_mall == dml2_refresh_from_mall_mode_override_force_enable)
- TotalSurfaceSizeInMALLForSS += SurfaceSizeInMALL[k];
- }
-
- *ExceededMALLSize = (TotalSurfaceSizeInMALLForSS > MALLAllocatedForDCNInBytes) ||
- (TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS);
- dml2_printf("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize);
-#endif
-}
-
-static void calculate_tdlut_setting(
- struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_calculate_tdlut_setting_params *p)
-{
- if (!p->setup_for_tdlut) {
- *p->tdlut_groups_per_2row_ub = 0;
- *p->tdlut_opt_time = 0;
- *p->tdlut_drain_time = 0;
- *p->tdlut_bytes_per_group = 0;
- *p->tdlut_pte_bytes_per_frame = 0;
- *p->tdlut_bytes_per_frame = 0;
- return;
- }
-
- // locals
- unsigned int tdlut_bpe = 8;
- unsigned int tdlut_width;
- unsigned int tdlut_pitch_bytes;
- unsigned int tdlut_footprint_bytes;
- unsigned int vmpg_bytes;
- unsigned int tdlut_vmpg_per_frame;
- unsigned int tdlut_pte_req_per_frame;
- unsigned int tdlut_bytes_per_line;
- unsigned int tdlut_delivery_cycles;
- double tdlut_drain_rate;
- unsigned int tdlut_mpc_width;
- unsigned int tdlut_bytes_per_group_simple;
-
- if (p->tdlut_mpc_width_flag) {
- tdlut_mpc_width = 33;
- tdlut_bytes_per_group_simple = 39 * 256;
- } else {
- tdlut_mpc_width = 17;
- tdlut_bytes_per_group_simple = 10 * 256;
- }
-
- vmpg_bytes = p->gpuvm_page_size_kbytes * 1024;
-
- if (p->tdlut_addressing_mode == dml2_tdlut_simple_linear) {
- if (p->tdlut_width_mode == dml2_tdlut_width_17_cube)
- tdlut_width = 4916;
- else
- tdlut_width = 35940;
- } else {
- if (p->tdlut_width_mode == dml2_tdlut_width_17_cube)
- tdlut_width = 17;
- else // dml2_tdlut_width_33_cube
- tdlut_width = 33;
- }
-
- if (p->is_gfx11)
- tdlut_pitch_bytes = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256); //256B alignment
- else
- tdlut_pitch_bytes = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 128); //128B alignment
-
- if (p->tdlut_addressing_mode == dml2_tdlut_sw_linear)
- tdlut_footprint_bytes = tdlut_pitch_bytes * tdlut_width * tdlut_width;
- else
- tdlut_footprint_bytes = tdlut_pitch_bytes;
-
- if (!p->gpuvm_enable) {
- tdlut_vmpg_per_frame = 0;
- tdlut_pte_req_per_frame = 0;
- } else {
- tdlut_vmpg_per_frame = (unsigned int)math_ceil2(tdlut_footprint_bytes - 1, vmpg_bytes) / vmpg_bytes + 1;
- tdlut_pte_req_per_frame = (unsigned int)math_ceil2(tdlut_vmpg_per_frame - 1, 8) / 8 + 1;
- }
- tdlut_bytes_per_line = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 64); //64b request
- *p->tdlut_pte_bytes_per_frame = tdlut_pte_req_per_frame * 64;
-
- if (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) {
- //the tdlut_width is either 17 or 33 but the 33x33x33 is subsampled every other line/slice
- *p->tdlut_bytes_per_frame = tdlut_bytes_per_line * tdlut_mpc_width * tdlut_mpc_width;
- *p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width;
- //the delivery cycles is DispClk cycles per line * number of lines * number of slices
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width / 2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
- tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1);
- } else {
- //tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements
- *p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256);
- *p->tdlut_bytes_per_group = tdlut_bytes_per_group_simple;
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width / 2.0, 1);
- tdlut_drain_rate = 2 * tdlut_bpe * p->dispclk_mhz;
- }
-
- //the tdlut is fetched during the 2 row times of prefetch.
- if (p->setup_for_tdlut) {
- *p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2(*p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
- *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
- *p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame);
- dml2_printf("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame);
- dml2_printf("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
- dml2_printf("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
- dml2_printf("DML::%s: tdlut_addressing_mode = %u\n", __func__, p->tdlut_addressing_mode);
- dml2_printf("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
- dml2_printf("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
- dml2_printf("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
- dml2_printf("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line);
- dml2_printf("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group);
- dml2_printf("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate);
- dml2_printf("DML::%s: tdlut_delivery_cycles = %u\n", __func__, tdlut_delivery_cycles);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
- dml2_printf("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
-#endif
-}
-
-static void CalculateTarb(
- const struct dml2_display_cfg *display_cfg,
- unsigned int PixelChunkSizeInKByte,
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- unsigned int dpte_group_bytes[],
- unsigned int tdlut_bytes_per_group[],
- double HostVMInefficiencyFactor,
- double HostVMInefficiencyFactorPrefetch,
- unsigned int HostVMMinPageSize,
- double ReturnBW,
- unsigned int MetaChunkSize,
-
- // output
- double *Tarb,
- double *Tarb_prefetch)
-{
- double extra_bytes = 0;
- double extra_bytes_prefetch = 0;
- double HostVMDynamicLevels = CalculateHostVMDynamicLevels(display_cfg->gpuvm_enable, display_cfg->hostvm_enable, HostVMMinPageSize, display_cfg->hostvm_max_non_cached_page_table_levels);
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- extra_bytes = extra_bytes + (NumberOfDPP[k] * PixelChunkSizeInKByte * 1024);
-
- if (display_cfg->plane_descriptors[k].surface.dcc.enable)
- extra_bytes = extra_bytes + (MetaChunkSize * 1024);
-
- if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
- extra_bytes = extra_bytes + tdlut_bytes_per_group[k];
- }
-
- extra_bytes_prefetch = extra_bytes;
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (display_cfg->gpuvm_enable == true) {
- extra_bytes = extra_bytes + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
- extra_bytes_prefetch = extra_bytes_prefetch + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactorPrefetch;
- }
- }
- *Tarb = extra_bytes / ReturnBW;
- *Tarb_prefetch = extra_bytes_prefetch / ReturnBW;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte);
- dml2_printf("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize);
- dml2_printf("DML::%s: extra_bytes = %f\n", __func__, extra_bytes);
- dml2_printf("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch);
-#endif
-}
-
-static double CalculateTWait(
- long reserved_vblank_time_ns,
- double UrgentLatency,
- double Ttrip)
-{
- double TWait;
- double t_urg_trip = math_max2(UrgentLatency, Ttrip);
- TWait = reserved_vblank_time_ns / 1000.0 + t_urg_trip;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: reserved_vblank_time_ns = %d\n", __func__, reserved_vblank_time_ns);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, Ttrip);
- dml2_printf("DML::%s: TWait = %f\n", __func__, TWait);
-#endif
- return TWait;
-}
-
-
-static void CalculateVUpdateAndDynamicMetadataParameters(
- unsigned int MaxInterDCNTileRepeaters,
- double Dppclk,
- double Dispclk,
- double DCFClkDeepSleep,
- double PixelClock,
- unsigned int HTotal,
- unsigned int VBlank,
- unsigned int DynamicMetadataTransmittedBytes,
- unsigned int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int InterlaceEnable,
- bool ProgressiveToInterlaceUnitInOPP,
-
- // Output
- double *TSetup,
- double *Tdmbf,
- double *Tdmec,
- double *Tdmsks,
- unsigned int *VUpdateOffsetPix,
- unsigned int *VUpdateWidthPix,
- unsigned int *VReadyOffsetPix)
-{
- double TotalRepeaterDelayTime;
- TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2 / Dppclk + 3 / Dispclk);
- *VUpdateWidthPix = (unsigned int)(math_ceil2((14.0 / DCFClkDeepSleep + 12.0 / Dppclk + TotalRepeaterDelayTime) * PixelClock, 1.0));
- *VReadyOffsetPix = (unsigned int)(math_ceil2(math_max2(150.0 / Dppclk, TotalRepeaterDelayTime + 20.0 / DCFClkDeepSleep + 10.0 / Dppclk) * PixelClock, 1.0));
- *VUpdateOffsetPix = (unsigned int)(math_ceil2(HTotal / 4.0, 1.0));
- *TSetup = (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock;
- *Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / Dispclk;
- *Tdmec = HTotal / PixelClock;
-
- if (DynamicMetadataLinesBeforeActiveRequired == 0) {
- *Tdmsks = VBlank * HTotal / PixelClock / 2.0;
- } else {
- *Tdmsks = DynamicMetadataLinesBeforeActiveRequired * HTotal / PixelClock;
- }
- if (InterlaceEnable == 1 && ProgressiveToInterlaceUnitInOPP == false) {
- *Tdmsks = *Tdmsks / 2;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired);
- dml2_printf("DML::%s: VBlank = %u\n", __func__, VBlank);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, Dppclk);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep);
- dml2_printf("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters);
- dml2_printf("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime);
-
- dml2_printf("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix);
- dml2_printf("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix);
- dml2_printf("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix);
-
- dml2_printf("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks);
-#endif
-}
-
-static double get_urgent_bandwidth_required(
- struct dml2_core_shared_get_urgent_bandwidth_required_locals *l,
- const struct dml2_display_cfg *display_cfg,
- enum dml2_core_internal_soc_state_type state_type,
- enum dml2_core_internal_bw_type bw_type,
- bool inc_flip_bw, // including flip bw
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- double dcc_dram_bw_nom_overhead_factor_p0[],
- double dcc_dram_bw_nom_overhead_factor_p1[],
- double dcc_dram_bw_pref_overhead_factor_p0[],
- double dcc_dram_bw_pref_overhead_factor_p1[],
- double mall_prefetch_sdp_overhead_factor[],
- double mall_prefetch_dram_overhead_factor[],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double PrefetchBandwidthLuma[],
- double PrefetchBandwidthChroma[],
- double cursor_bw[],
- double dpte_row_bw[],
- double meta_row_bw[],
- double prefetch_cursor_bw[],
- double prefetch_vmrow_bw[],
- double flip_bw[],
- double UrgentBurstFactorLuma[],
- double UrgentBurstFactorChroma[],
- double UrgentBurstFactorCursor[],
- double UrgentBurstFactorLumaPre[],
- double UrgentBurstFactorChromaPre[],
- double UrgentBurstFactorCursorPre[])
-{
- memset(l, 0, sizeof(struct dml2_core_shared_get_urgent_bandwidth_required_locals));
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- l->mall_svp_prefetch_factor = (state_type == dml2_core_internal_soc_state_svp_prefetch) ? (bw_type == dml2_core_internal_bw_dram ? mall_prefetch_dram_overhead_factor[k] : mall_prefetch_sdp_overhead_factor[k]) : 1.0;
- l->tmp_nom_adj_factor_p0 = (bw_type == dml2_core_internal_bw_dram ? dcc_dram_bw_nom_overhead_factor_p0[k] : 1.0) * l->mall_svp_prefetch_factor;
- l->tmp_nom_adj_factor_p1 = (bw_type == dml2_core_internal_bw_dram ? dcc_dram_bw_nom_overhead_factor_p1[k] : 1.0) * l->mall_svp_prefetch_factor;
- l->tmp_pref_adj_factor_p0 = (bw_type == dml2_core_internal_bw_dram ? dcc_dram_bw_pref_overhead_factor_p0[k] : 1.0) * l->mall_svp_prefetch_factor;
- l->tmp_pref_adj_factor_p1 = (bw_type == dml2_core_internal_bw_dram ? dcc_dram_bw_pref_overhead_factor_p1[k] : 1.0) * l->mall_svp_prefetch_factor;
-
- l->adj_factor_p0 = UrgentBurstFactorLuma[k] * l->tmp_nom_adj_factor_p0;
- l->adj_factor_p1 = UrgentBurstFactorChroma[k] * l->tmp_nom_adj_factor_p1;
- l->adj_factor_cur = UrgentBurstFactorCursor[k];
- l->adj_factor_p0_pre = UrgentBurstFactorLumaPre[k] * l->tmp_pref_adj_factor_p0;
- l->adj_factor_p1_pre = UrgentBurstFactorChromaPre[k] * l->tmp_pref_adj_factor_p1;
- l->adj_factor_cur_pre = UrgentBurstFactorCursorPre[k];
-
- // both dchub_urgent_bw_at_sdp_noflip and dchub_urgent_bw_at_dram_noflip don't include the phantom_pipe because iflips dont occur while phantom_pipe is active
- bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]);
- bool exclude_this_plane = 0;
-
- // Exclude phantom pipe in bw calculation for non svp prefetch state
- if (state_type != dml2_core_internal_soc_state_svp_prefetch && is_phantom)
- exclude_this_plane = 1;
-
- if (display_cfg->plane_descriptors[k].immediate_flip == false || !inc_flip_bw)
- l->per_plane_flip_bw[k] = NumberOfDPP[k] * (dpte_row_bw[k] + meta_row_bw[k]);
- else
- l->per_plane_flip_bw[k] = NumberOfDPP[k] * flip_bw[k];
-
-
- if (!exclude_this_plane) {
- l->required_bandwidth_mbps_this_surface = math_max3(NumberOfDPP[k] * prefetch_vmrow_bw[k],
- l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur,
- l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre);
-
- l->required_bandwidth_mbps = l->required_bandwidth_mbps + l->required_bandwidth_mbps_this_surface;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
- dml2_printf("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
- dml2_printf("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
- dml2_printf("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
- dml2_printf("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur);
-
- dml2_printf("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre);
-
- dml2_printf("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]);
- dml2_printf("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
-
- dml2_printf("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
- dml2_printf("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
-#endif
- }
-
- return l->required_bandwidth_mbps;
-}
-
-static void CalculateExtraLatency(
- const struct dml2_display_cfg *display_cfg,
- unsigned int ROBBufferSizeInKByte,
- unsigned int RoundTripPingLatencyCycles,
- unsigned int ReorderingBytes,
- double DCFCLK,
- double FabricClock,
- unsigned int PixelChunkSizeInKByte,
- double ReturnBW,
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- unsigned int dpte_group_bytes[],
- unsigned int tdlut_bytes_per_group[],
- double HostVMInefficiencyFactor,
- double HostVMInefficiencyFactorPrefetch,
- unsigned int HostVMMinPageSize,
- enum dml2_qos_param_type qos_type,
- bool max_oustanding_when_urgent_expected,
- unsigned int max_outstanding_requests,
- unsigned int request_size_bytes_luma[],
- unsigned int request_size_bytes_chroma[],
- unsigned int MetaChunkSize,
- unsigned int dchub_arb_to_ret_delay,
- double Ttrip,
- unsigned int hostvm_mode,
-
- // output
- double *ExtraLatency,
- double *ExtraLatency_sr,
- double *ExtraLatencyPrefetch)
-{
- double Tarb;
- double Tarb_prefetch;
-
- CalculateTarb(
- display_cfg,
- PixelChunkSizeInKByte,
- NumberOfActiveSurfaces,
- NumberOfDPP,
- dpte_group_bytes,
- tdlut_bytes_per_group,
- HostVMInefficiencyFactor,
- HostVMInefficiencyFactorPrefetch,
- HostVMMinPageSize,
- ReturnBW,
- MetaChunkSize,
- // output
- &Tarb,
- &Tarb_prefetch);
-
- unsigned int max_request_size_bytes = 0;
- double Tex_trips = (display_cfg->hostvm_enable && hostvm_mode == 1) ? (2.0 * Ttrip) : 0.0;
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (request_size_bytes_luma[k] > max_request_size_bytes)
- max_request_size_bytes = request_size_bytes_luma[k];
- if (request_size_bytes_chroma[k] > max_request_size_bytes)
- max_request_size_bytes = request_size_bytes_chroma[k];
- }
-
- if (qos_type == dml2_qos_param_type_dcn4x) {
- *ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK;
- *ExtraLatency = *ExtraLatency_sr;
- if (max_oustanding_when_urgent_expected)
- *ExtraLatency = *ExtraLatency + (ROBBufferSizeInKByte * 1024 - max_outstanding_requests * max_request_size_bytes) / ReturnBW;
- } else {
- *ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK + RoundTripPingLatencyCycles / FabricClock + ReorderingBytes / ReturnBW;
- *ExtraLatency = *ExtraLatency_sr;
- }
- *ExtraLatency = *ExtraLatency + Tex_trips;
- *ExtraLatencyPrefetch = *ExtraLatency + Tarb_prefetch;
- *ExtraLatency = *ExtraLatency + Tarb;
- *ExtraLatency_sr = *ExtraLatency_sr + Tarb;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type);
- dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
- dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips);
- dml2_printf("DML::%s: max_oustanding_when_urgent_expected=%u\n", __func__, max_oustanding_when_urgent_expected);
- dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock);
- dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
- dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
- dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
- dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
- dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb);
- dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
- dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
- dml2_printf("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch);
-#endif
-}
-
-static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch, struct dml2_core_calcs_CalculatePrefetchSchedule_params *p)
-{
- struct dml2_core_calcs_CalculatePrefetchSchedule_locals *s = &scratch->CalculatePrefetchSchedule_locals;
-
- s->NoTimeToPrefetch = false;
- s->DPPCycles = 0;
- s->DISPCLKCycles = 0;
- s->DSTTotalPixelsAfterScaler = 0.0;
- s->LineTime = 0.0;
- s->dst_y_prefetch_equ = 0.0;
- s->prefetch_bw_oto = 0.0;
- s->Tvm_oto = 0.0;
- s->Tr0_oto = 0.0;
- s->Tvm_oto_lines = 0.0;
- s->Tr0_oto_lines = 0.0;
- s->dst_y_prefetch_oto = 0.0;
- s->TimeForFetchingVM = 0.0;
- s->TimeForFetchingRowInVBlank = 0.0;
- s->LinesToRequestPrefetchPixelData = 0.0;
- s->HostVMDynamicLevelsTrips = 0;
- s->trip_to_mem = 0.0;
- *p->Tvm_trips = 0.0;
- *p->Tr0_trips = 0.0;
- s->Tvm_trips_rounded = 0.0;
- s->Tr0_trips_rounded = 0.0;
- s->max_Tsw = 0.0;
- s->Lsw_oto = 0.0;
- s->Tpre_rounded = 0.0;
- s->prefetch_bw_equ = 0.0;
- s->Tvm_equ = 0.0;
- s->Tr0_equ = 0.0;
- s->Tdmbf = 0.0;
- s->Tdmec = 0.0;
- s->Tdmsks = 0.0;
- s->prefetch_sw_bytes = 0.0;
- s->prefetch_bw_pr = 0.0;
- s->bytes_pp = 0.0;
- s->dep_bytes = 0.0;
- s->min_Lsw_oto = 0.0;
- s->Tsw_est1 = 0.0;
- s->Tsw_est3 = 0.0;
- s->cursor_prefetch_bytes = 0;
- *p->prefetch_cursor_bw = 0;
- bool dcc_mrq_enable = (p->dcc_enable && p->mrq_present);
-
- s->TWait_p = p->TWait - p->Ttrip; // TWait includes max(Turg, Ttrip)
-
- if (p->display_cfg->gpuvm_enable == true && p->display_cfg->hostvm_enable == true) {
- s->HostVMDynamicLevelsTrips = p->display_cfg->hostvm_max_non_cached_page_table_levels;
- } else {
- s->HostVMDynamicLevelsTrips = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable);
- dml2_printf("DML::%s: mrq_present = %u\n", __func__, p->mrq_present);
- dml2_printf("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
- dml2_printf("DML::%s: VStartup = %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: MaxVStartup = %u\n", __func__, p->MaxVStartup);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
- dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk);
-#endif
- CalculateVUpdateAndDynamicMetadataParameters(
- p->MaxInterDCNTileRepeaters,
- p->myPipe->Dppclk,
- p->myPipe->Dispclk,
- p->myPipe->DCFClkDeepSleep,
- p->myPipe->PixelClock,
- p->myPipe->HTotal,
- p->myPipe->VBlank,
- p->DynamicMetadataTransmittedBytes,
- p->DynamicMetadataLinesBeforeActiveRequired,
- p->myPipe->InterlaceEnable,
- p->myPipe->ProgressiveToInterlaceUnitInOPP,
- p->TSetup,
-
- // Output
- &s->Tdmbf,
- &s->Tdmec,
- &s->Tdmsks,
- p->VUpdateOffsetPix,
- p->VUpdateWidthPix,
- p->VReadyOffsetPix);
-
- s->LineTime = p->myPipe->HTotal / p->myPipe->PixelClock;
- s->trip_to_mem = p->Ttrip;
- *p->Tvm_trips = p->ExtraLatencyPrefetch + s->trip_to_mem * (p->display_cfg->gpuvm_max_page_table_levels * (s->HostVMDynamicLevelsTrips + 1));
- if (dcc_mrq_enable)
- *p->Tvm_trips_flip = *p->Tvm_trips;
- else
- *p->Tvm_trips_flip = *p->Tvm_trips - s->trip_to_mem;
- *p->Tr0_trips_flip = s->trip_to_mem * (s->HostVMDynamicLevelsTrips + 1);
- *p->Tr0_trips = math_max2(*p->Tr0_trips_flip, p->tdlut_opt_time / 2);
-
- if (p->DynamicMetadataVMEnabled == true) {
- *p->Tdmdl_vm = s->TWait_p + *p->Tvm_trips;
- *p->Tdmdl = *p->Tdmdl_vm + p->Ttrip;
- } else {
- *p->Tdmdl_vm = 0;
- *p->Tdmdl = p->TWait + p->ExtraLatencyPrefetch; // Tex
- }
-
- if (p->DynamicMetadataEnable == true) {
- if (p->VStartup * s->LineTime < *p->TSetup + *p->Tdmdl + s->Tdmbf + s->Tdmec + s->Tdmsks) {
- *p->NotEnoughTimeForDynamicMetadata = true;
- dml2_printf("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
- } else {
- *p->NotEnoughTimeForDynamicMetadata = false;
- }
- } else {
- *p->NotEnoughTimeForDynamicMetadata = false;
- }
-
- if (p->myPipe->ScalerEnabled)
- s->DPPCycles = (unsigned int)(p->DPPCLKDelaySubtotalPlusCNVCFormater + p->DPPCLKDelaySCL);
- else
- s->DPPCycles = (unsigned int)(p->DPPCLKDelaySubtotalPlusCNVCFormater + p->DPPCLKDelaySCLLBOnly);
-
- s->DPPCycles = (unsigned int)(s->DPPCycles + p->myPipe->NumberOfCursors * p->DPPCLKDelayCNVCCursor);
-
- s->DISPCLKCycles = (unsigned int)p->DISPCLKDelaySubtotal;
-
- if (p->myPipe->Dppclk == 0.0 || p->myPipe->Dispclk == 0.0)
- return true;
-
- *p->DSTXAfterScaler = (unsigned int)math_round(s->DPPCycles * p->myPipe->PixelClock / p->myPipe->Dppclk + s->DISPCLKCycles * p->myPipe->PixelClock / p->myPipe->Dispclk + p->DSCDelay);
- *p->DSTXAfterScaler = (unsigned int)math_round(*p->DSTXAfterScaler + (p->myPipe->ODMMode != dml2_odm_mode_bypass ? 18 : 0) + (p->myPipe->DPPPerSurface - 1) * p->DPP_RECOUT_WIDTH +
- ((p->myPipe->ODMMode == dml2_odm_mode_split_1to2 || p->myPipe->ODMMode == dml2_odm_mode_mso_1to2) ? (double)p->myPipe->HActive / 2.0 : 0) +
- ((p->myPipe->ODMMode == dml2_odm_mode_mso_1to4) ? (double)p->myPipe->HActive * 3.0 / 4.0 : 0));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled);
- dml2_printf("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles);
- dml2_printf("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk);
- dml2_printf("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode);
- dml2_printf("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH);
- dml2_printf("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler);
-
- dml2_printf("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
-#endif
-
- if (p->OutputFormat == dml2_420 || (p->myPipe->InterlaceEnable && p->myPipe->ProgressiveToInterlaceUnitInOPP))
- *p->DSTYAfterScaler = 1;
- else
- *p->DSTYAfterScaler = 0;
-
- s->DSTTotalPixelsAfterScaler = *p->DSTYAfterScaler * p->myPipe->HTotal + *p->DSTXAfterScaler;
- *p->DSTYAfterScaler = (unsigned int)(math_floor2(s->DSTTotalPixelsAfterScaler / p->myPipe->HTotal, 1));
- *p->DSTXAfterScaler = (unsigned int)(s->DSTTotalPixelsAfterScaler - ((double)(*p->DSTYAfterScaler * p->myPipe->HTotal)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
-#endif
-
- s->NoTimeToPrefetch = false;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips);
-#endif
- if (p->display_cfg->gpuvm_enable) {
- s->Tvm_trips_rounded = math_ceil2(4.0 * *p->Tvm_trips / s->LineTime, 1.0) / 4.0 * s->LineTime;
- *p->Tvm_trips_flip_rounded = math_ceil2(4.0 * *p->Tvm_trips_flip / s->LineTime, 1.0) / 4.0 * s->LineTime;
- } else {
- s->Tvm_trips_rounded = s->LineTime / 4.0;
- *p->Tvm_trips_flip_rounded = s->LineTime / 4.0;
- }
- s->Tvm_trips_rounded = math_max2(s->Tvm_trips_rounded, s->LineTime / 4.0);
- *p->Tvm_trips_flip_rounded = math_max2(*p->Tvm_trips_flip_rounded, s->LineTime / 4.0);
-
- if (p->display_cfg->gpuvm_enable == true || p->setup_for_tdlut || dcc_mrq_enable) {
- s->Tr0_trips_rounded = math_ceil2(4.0 * *p->Tr0_trips / s->LineTime, 1.0) / 4.0 * s->LineTime;
- *p->Tr0_trips_flip_rounded = math_ceil2(4.0 * *p->Tr0_trips_flip / s->LineTime, 1.0) / 4.0 * s->LineTime;
- } else {
- s->Tr0_trips_rounded = s->LineTime / 4.0;
- *p->Tr0_trips_flip_rounded = s->LineTime / 4.0;
- }
- s->Tr0_trips_rounded = math_max2(s->Tr0_trips_rounded, s->LineTime / 4.0);
- *p->Tr0_trips_flip_rounded = math_max2(*p->Tr0_trips_flip_rounded, s->LineTime / 4.0);
-
- *p->Tno_bw_flip = 0;
- if (p->display_cfg->gpuvm_enable == true) {
- if (p->display_cfg->gpuvm_max_page_table_levels >= 3) {
- *p->Tno_bw = p->ExtraLatencyPrefetch + s->trip_to_mem * (double)((p->display_cfg->gpuvm_max_page_table_levels - 2) * (s->HostVMDynamicLevelsTrips + 1));
- } else if (p->display_cfg->gpuvm_max_page_table_levels == 1 && !dcc_mrq_enable && !p->setup_for_tdlut) {
- *p->Tno_bw = p->ExtraLatencyPrefetch;
- } else {
- *p->Tno_bw = 0;
- }
- *p->Tno_bw_flip = *p->Tno_bw;
- } else {
- *p->Tno_bw = 0;
- }
-
- if (dml2_core_shared_is_420(p->myPipe->SourcePixelFormat)) {
- s->bytes_pp = p->myPipe->BytePerPixelY + p->myPipe->BytePerPixelC / 4.0;
- } else {
- s->bytes_pp = p->myPipe->BytePerPixelY + p->myPipe->BytePerPixelC;
- }
-
- s->prefetch_bw_pr = s->bytes_pp * p->myPipe->PixelClock / (double)p->myPipe->DPPPerSurface;
- if (p->myPipe->VRatio < 1.0)
- s->prefetch_bw_pr = p->myPipe->VRatio * s->prefetch_bw_pr;
- s->max_Tsw = (math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) * s->LineTime);
-
- s->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC;
- s->prefetch_bw_pr = s->prefetch_bw_pr * p->mall_prefetch_sdp_overhead_factor;
- s->prefetch_sw_bytes = s->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor;
- s->prefetch_bw_oto = math_max2(s->prefetch_bw_pr, s->prefetch_sw_bytes / s->max_Tsw);
-
- s->min_Lsw_oto = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_OTO__;
- s->min_Lsw_oto = math_max2(s->min_Lsw_oto, 2.0);
- s->min_Lsw_oto = math_max2(s->min_Lsw_oto, p->tdlut_drain_time / s->LineTime);
-
- unsigned int vm_bytes = p->vm_bytes; // vm_bytes is dpde0_bytes_per_frame_ub_l + dpde0_bytes_per_frame_ub_c + 2*extra_dpde_bytes;
- unsigned int extra_tdpe_bytes = (unsigned int)math_max2(0, (p->display_cfg->gpuvm_max_page_table_levels - 1) * 128);
-
- if (p->setup_for_tdlut)
- vm_bytes = vm_bytes + p->tdlut_pte_bytes_per_frame + (p->display_cfg->gpuvm_enable ? extra_tdpe_bytes : 0);
-
- unsigned long tdlut_row_bytes = (unsigned long) math_ceil2(p->tdlut_bytes_per_frame/2.0, 1.0);
- s->prefetch_bw_oto = math_max3(s->prefetch_bw_oto,
- p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
- s->Lsw_oto = math_ceil2(4.0 * math_max2(s->prefetch_sw_bytes / s->prefetch_bw_oto / s->LineTime, s->min_Lsw_oto), 1.0) / 4.0;
-
- if (p->display_cfg->gpuvm_enable == true) {
- s->Tvm_oto = math_max3(
- *p->Tvm_trips,
- *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto,
- s->LineTime / 4.0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4);
-#endif
-
- } else
- s->Tvm_oto = s->LineTime / 4.0;
-
- if ((p->display_cfg->gpuvm_enable == true || p->setup_for_tdlut || dcc_mrq_enable)) {
- s->Tr0_oto = math_max3(
- *p->Tr0_trips,
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto,
- s->LineTime / 4.0);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
-#endif
- } else
- s->Tr0_oto = (s->LineTime - s->Tvm_oto) / 4.0;
-
- s->Tvm_oto_lines = math_ceil2(4.0 * s->Tvm_oto / s->LineTime, 1) / 4.0;
- s->Tr0_oto_lines = math_ceil2(4.0 * s->Tr0_oto / s->LineTime, 1) / 4.0;
- s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
-
- //To (time for delay after scaler) in line time
- unsigned int Lo = (unsigned int)(*p->DSTYAfterScaler + (double)*p->DSTXAfterScaler / (double)p->myPipe->HTotal);
-
- //Tpre_equ in line time
- s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(s->TWait_p + p->TCalc, *p->Tdmdl - p->Ttrip)) / s->LineTime - Lo;
- s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
- dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
- dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
- dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, s->prefetch_sw_bytes);
- dml2_printf("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
- dml2_printf("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip);
- dml2_printf("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr);
- dml2_printf("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto);
- dml2_printf("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto);
- dml2_printf("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines);
- dml2_printf("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines);
- dml2_printf("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto);
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
- dml2_printf("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes);
-#endif
-
- s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0;
- s->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
-
- dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: LineTime: %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: VStartup: %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime);
- dml2_printf("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup);
- dml2_printf("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc);
- dml2_printf("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
- dml2_printf("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
-
- s->dep_bytes = math_max2(vm_bytes * p->HostVMInefficiencyFactor, p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes);
-
- dml2_printf("DML::%s: dep_bytes: %f\n", __func__, s->dep_bytes);
- dml2_printf("DML::%s: prefetch_sw_bytes: %f\n", __func__, s->prefetch_sw_bytes);
- dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes * p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes);
-
- if (s->prefetch_sw_bytes < s->dep_bytes) {
- s->prefetch_sw_bytes = 2 * s->dep_bytes;
- dml2_printf("DML::%s: bump prefetch_sw_bytes to %f\n", __func__, s->prefetch_sw_bytes);
- }
-
- *p->dst_y_per_vm_vblank = 0;
- *p->dst_y_per_row_vblank = 0;
- *p->VRatioPrefetchY = 0;
- *p->VRatioPrefetchC = 0;
- *p->RequiredPrefetchPixelDataBWLuma = 0;
-
- if (s->dst_y_prefetch_equ > 1) {
- s->prefetch_bw1 = 0.;
- s->prefetch_bw2 = 0.;
- s->prefetch_bw3 = 0.;
- s->prefetch_bw4 = 0.;
-
- if (s->Tpre_rounded - *p->Tno_bw > 0) {
- s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor
- + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)
- + s->prefetch_sw_bytes)
- / (s->Tpre_rounded - *p->Tno_bw);
- s->Tsw_est1 = s->prefetch_sw_bytes / s->prefetch_bw1;
- } else
- s->prefetch_bw1 = 0;
-
- dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
- if (p->VStartup == p->MaxVStartup && (s->Tsw_est1 / s->LineTime < s->min_Lsw_oto) && s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0) {
- s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) /
- (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
- dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
- }
-
- if (s->Tpre_rounded - *p->Tno_bw - 2 * s->Tr0_trips_rounded > 0)
- s->prefetch_bw2 = (vm_bytes * p->HostVMInefficiencyFactor + s->prefetch_sw_bytes) /
- (s->Tpre_rounded - *p->Tno_bw - 2 * s->Tr0_trips_rounded);
- else
- s->prefetch_bw2 = 0;
-
- if (s->Tpre_rounded - s->Tvm_trips_rounded > 0) {
- s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) + s->prefetch_sw_bytes) /
- (s->Tpre_rounded - s->Tvm_trips_rounded);
- s->Tsw_est3 = s->prefetch_sw_bytes / s->prefetch_bw3;
- } else
- s->prefetch_bw3 = 0;
-
-
- dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
- if (p->VStartup == p->MaxVStartup && (s->Tsw_est3 / s->LineTime < s->min_Lsw_oto) && s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded > 0) {
- s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
- dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
- }
-
- if (s->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded > 0)
- s->prefetch_bw4 = s->prefetch_sw_bytes / (s->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded);
- else
- s->prefetch_bw4 = 0;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tpre_rounded: %f\n", __func__, s->Tpre_rounded);
- dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tvm_trips_rounded: %f\n", __func__, s->Tvm_trips_rounded);
- dml2_printf("DML::%s: Tr0_trips_rounded: %f\n", __func__, 2 * s->Tr0_trips_rounded);
- dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
- dml2_printf("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
- dml2_printf("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
- dml2_printf("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
- dml2_printf("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3);
- dml2_printf("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4);
-#endif
-
- {
- bool Case1OK = false;
- bool Case2OK = false;
- bool Case3OK = false;
-
- if (s->prefetch_bw1 > 0) {
- if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1 >= s->Tvm_trips_rounded &&
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw1 >= s->Tr0_trips_rounded) {
- Case1OK = true;
- }
- }
-
- if (s->prefetch_bw2 > 0) {
- if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2 >= s->Tvm_trips_rounded &&
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw2 < s->Tr0_trips_rounded) {
- Case2OK = true;
- }
- }
-
- if (s->prefetch_bw3 > 0) {
- if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3 < s->Tvm_trips_rounded &&
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw3 >= s->Tr0_trips_rounded) {
- Case3OK = true;
- }
- }
-
- if (Case1OK) {
- s->prefetch_bw_equ = s->prefetch_bw1;
- } else if (Case2OK) {
- s->prefetch_bw_equ = s->prefetch_bw2;
- } else if (Case3OK) {
- s->prefetch_bw_equ = s->prefetch_bw3;
- } else {
- s->prefetch_bw_equ = s->prefetch_bw4;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Case1OK: %u\n", __func__, Case1OK);
- dml2_printf("DML::%s: Case2OK: %u\n", __func__, Case2OK);
- dml2_printf("DML::%s: Case3OK: %u\n", __func__, Case3OK);
- dml2_printf("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ);
-#endif
- s->prefetch_bw_equ = math_max3(s->prefetch_bw_equ,
- p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
-
- if (s->prefetch_bw_equ > 0) {
- if (p->display_cfg->gpuvm_enable == true) {
- s->Tvm_equ = math_max3(*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_equ, *p->Tvm_trips, s->LineTime / 4);
- } else {
- s->Tvm_equ = s->LineTime / 4;
- }
-
- if (p->display_cfg->gpuvm_enable == true || dcc_mrq_enable || p->setup_for_tdlut) {
- s->Tr0_equ = math_max3((p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_equ, // PixelPTEBytesPerRow is dpte_row_bytes
- *p->Tr0_trips,
- s->LineTime / 4);
- } else {
- s->Tr0_equ = s->LineTime / 4;
- }
- } else {
- s->Tvm_equ = 0;
- s->Tr0_equ = 0;
- dml2_printf("DML::%s: prefetch_bw_equ equals 0!\n", __func__);
- }
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
- dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
-#endif
-
- if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) {
- *p->dst_y_prefetch = s->dst_y_prefetch_oto;
- s->TimeForFetchingVM = s->Tvm_oto;
- s->TimeForFetchingRowInVBlank = s->Tr0_oto;
-
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using oto bw scheduling for prefetch\n", __func__);
-#endif
-
- } else {
- *p->dst_y_prefetch = s->dst_y_prefetch_equ;
- s->TimeForFetchingVM = s->Tvm_equ;
- s->TimeForFetchingRowInVBlank = s->Tr0_equ;
-
- if (p->VStartup == p->MaxVStartup) {
- *p->dst_y_per_vm_vblank = math_floor2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_floor2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- } else {
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
-#endif
- }
- dml2_assert(*p->dst_y_prefetch < 64);
-
- // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
- s->LinesToRequestPrefetchPixelData = *p->dst_y_prefetch - *p->dst_y_per_vm_vblank - 2 * *p->dst_y_per_row_vblank; // Lsw
-
- s->cursor_prefetch_bytes = (unsigned int)math_max2(p->cursor_bytes_per_chunk, 4 * p->cursor_bytes_per_line);
- *p->prefetch_cursor_bw = p->num_cursors * s->cursor_prefetch_bytes / (s->LinesToRequestPrefetchPixelData * s->LineTime);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
- dml2_printf("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
-
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes);
- dml2_printf("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw);
-#endif
- unsigned int min_lsw_required = (unsigned int)math_max2(2, p->tdlut_drain_time / s->LineTime);
-
- if (s->LinesToRequestPrefetchPixelData >= min_lsw_required && s->prefetch_bw_equ > 0) {
- *p->VRatioPrefetchY = (double)p->PrefetchSourceLinesY / s->LinesToRequestPrefetchPixelData;
- *p->VRatioPrefetchY = math_max2(*p->VRatioPrefetchY, 1.0);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY);
- dml2_printf("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY);
-#endif
- if ((p->SwathHeightY > 4) && (p->VInitPreFillY > 3)) {
- if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillY - 3.0) / 2.0) {
- *p->VRatioPrefetchY = math_max2(*p->VRatioPrefetchY,
- (double)p->MaxNumSwathY * p->SwathHeightY / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillY - 3.0) / 2.0));
- } else {
- s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
- *p->VRatioPrefetchY = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY);
-#endif
- }
-
- *p->VRatioPrefetchC = (double)p->PrefetchSourceLinesC / s->LinesToRequestPrefetchPixelData;
- *p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, 1.0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC);
- dml2_printf("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC);
-#endif
- if ((p->SwathHeightC > 4) && (p->VInitPreFillC > 3)) {
- if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillC - 3.0) / 2.0) {
- *p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, (double)p->MaxNumSwathC * p->SwathHeightC / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillC - 3.0) / 2.0));
- } else {
- s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
- *p->VRatioPrefetchC = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC);
-#endif
- }
-
- *p->RequiredPrefetchPixelDataBWLuma = (double)p->PrefetchSourceLinesY / s->LinesToRequestPrefetchPixelData * p->myPipe->BytePerPixelY * p->swath_width_luma_ub / s->LineTime;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
-#endif
- *p->RequiredPrefetchPixelDataBWChroma = (double)p->PrefetchSourceLinesC / s->LinesToRequestPrefetchPixelData * p->myPipe->BytePerPixelC * p->swath_width_chroma_ub / s->LineTime;
- } else {
- s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
- dml2_printf("DML::%s: MyErr set, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
- *p->VRatioPrefetchY = 0;
- *p->VRatioPrefetchC = 0;
- *p->RequiredPrefetchPixelDataBWLuma = 0;
- *p->RequiredPrefetchPixelDataBWChroma = 0;
- }
-
- dml2_printf("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
- dml2_printf("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
- dml2_printf("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
- dml2_printf("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime);
- dml2_printf("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime);
- dml2_printf("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n");
- dml2_printf("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup);
- dml2_printf("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
-
- } else {
- dml2_printf("DML::%s: MyErr set, dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
- s->NoTimeToPrefetch = true;
- s->TimeForFetchingVM = 0;
- s->TimeForFetchingRowInVBlank = 0;
- *p->dst_y_per_vm_vblank = 0;
- *p->dst_y_per_row_vblank = 0;
- s->LinesToRequestPrefetchPixelData = 0;
- *p->VRatioPrefetchY = 0;
- *p->VRatioPrefetchC = 0;
- *p->RequiredPrefetchPixelDataBWLuma = 0;
- *p->RequiredPrefetchPixelDataBWChroma = 0;
- }
-
- {
- double prefetch_vm_bw;
- double prefetch_row_bw;
-
- if (vm_bytes == 0) {
- prefetch_vm_bw = 0;
- } else if (*p->dst_y_per_vm_vblank > 0) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
-#endif
- prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
-#endif
- } else {
- prefetch_vm_bw = 0;
- s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
- }
-
- if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) {
- prefetch_row_bw = 0;
- } else if (*p->dst_y_per_row_vblank > 0) {
- prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
-#endif
- } else {
- prefetch_row_bw = 0;
- s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
- }
-
- *p->prefetch_vmrow_bw = math_max2(prefetch_vm_bw, prefetch_row_bw);
- }
-
- if (s->NoTimeToPrefetch) {
- s->TimeForFetchingVM = 0;
- s->TimeForFetchingRowInVBlank = 0;
- *p->dst_y_per_vm_vblank = 0;
- *p->dst_y_per_row_vblank = 0;
- *p->dst_y_prefetch = 0;
- s->LinesToRequestPrefetchPixelData = 0;
- *p->VRatioPrefetchY = 0;
- *p->VRatioPrefetchC = 0;
- *p->RequiredPrefetchPixelDataBWLuma = 0;
- *p->RequiredPrefetchPixelDataBWChroma = 0;
- }
-
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
- return s->NoTimeToPrefetch;
-}
-
-static void calculate_peak_bandwidth_required(
- struct dml2_core_internal_scratch *s,
-
- // output
- double urg_vactive_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
-
- // input
- const struct dml2_display_cfg *display_cfg,
- unsigned int inc_flip_bw,
- unsigned int NumberOfActiveSurfaces,
- unsigned int NumberOfDPP[],
- double dcc_dram_bw_nom_overhead_factor_p0[],
- double dcc_dram_bw_nom_overhead_factor_p1[],
- double dcc_dram_bw_pref_overhead_factor_p0[],
- double dcc_dram_bw_pref_overhead_factor_p1[],
- double mall_prefetch_sdp_overhead_factor[],
- double mall_prefetch_dram_overhead_factor[],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double PrefetchBandwidthLuma[],
- double PrefetchBandwidthChroma[],
- double cursor_bw[],
- double dpte_row_bw[],
- double meta_row_bw[],
- double prefetch_cursor_bw[],
- double prefetch_vmrow_bw[],
- double flip_bw[],
- double UrgentBurstFactorLuma[],
- double UrgentBurstFactorChroma[],
- double UrgentBurstFactorCursor[],
- double UrgentBurstFactorLumaPre[],
- double UrgentBurstFactorChromaPre[],
- double UrgentBurstFactorCursorPre[])
-{
- unsigned int n;
- unsigned int m;
-
- struct dml2_core_shared_calculate_peak_bandwidth_required_locals *l = &s->calculate_peak_bandwidth_required_locals;
-
- memset(l, 0, sizeof(struct dml2_core_shared_calculate_peak_bandwidth_required_locals));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: inc_flip_bw = %d\n", __func__, inc_flip_bw);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, NumberOfActiveSurfaces);
-#endif
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- l->unity_array[k] = 1.0;
- l->zero_array[k] = 0.0;
- }
-
- for (m = 0; m < dml2_core_internal_soc_state_max; m++) {
- for (n = 0; n < dml2_core_internal_bw_max; n++) {
- urg_vactive_bandwidth_required[m][n] = get_urgent_bandwidth_required(
- &s->get_urgent_bandwidth_required_locals,
- display_cfg,
- m,
- n,
- 0, //inc_flip_bw,
- NumberOfActiveSurfaces,
- NumberOfDPP,
- dcc_dram_bw_nom_overhead_factor_p0,
- dcc_dram_bw_nom_overhead_factor_p1,
- dcc_dram_bw_pref_overhead_factor_p0,
- dcc_dram_bw_pref_overhead_factor_p1,
- mall_prefetch_sdp_overhead_factor,
- mall_prefetch_dram_overhead_factor,
- ReadBandwidthLuma,
- ReadBandwidthChroma,
- l->zero_array, //PrefetchBandwidthLuma,
- l->zero_array, //PrefetchBandwidthChroma,
- cursor_bw,
- dpte_row_bw,
- meta_row_bw,
- l->zero_array, //prefetch_cursor_bw,
- l->zero_array, //prefetch_vmrow_bw,
- l->zero_array, //flip_bw,
- UrgentBurstFactorLuma,
- UrgentBurstFactorChroma,
- UrgentBurstFactorCursor,
- UrgentBurstFactorLumaPre,
- UrgentBurstFactorChromaPre,
- UrgentBurstFactorCursorPre);
-
-
- urg_bandwidth_required[m][n] = get_urgent_bandwidth_required(
- &s->get_urgent_bandwidth_required_locals,
- display_cfg,
- m,
- n,
- inc_flip_bw,
- NumberOfActiveSurfaces,
- NumberOfDPP,
- dcc_dram_bw_nom_overhead_factor_p0,
- dcc_dram_bw_nom_overhead_factor_p1,
- dcc_dram_bw_pref_overhead_factor_p0,
- dcc_dram_bw_pref_overhead_factor_p1,
- mall_prefetch_sdp_overhead_factor,
- mall_prefetch_dram_overhead_factor,
- ReadBandwidthLuma,
- ReadBandwidthChroma,
- PrefetchBandwidthLuma,
- PrefetchBandwidthChroma,
- cursor_bw,
- dpte_row_bw,
- meta_row_bw,
- prefetch_cursor_bw,
- prefetch_vmrow_bw,
- flip_bw,
- UrgentBurstFactorLuma,
- UrgentBurstFactorChroma,
- UrgentBurstFactorCursor,
- UrgentBurstFactorLumaPre,
- UrgentBurstFactorChromaPre,
- UrgentBurstFactorCursorPre);
-
- non_urg_bandwidth_required[m][n] = get_urgent_bandwidth_required(
- &s->get_urgent_bandwidth_required_locals,
- display_cfg,
- m,
- n,
- inc_flip_bw,
- NumberOfActiveSurfaces,
- NumberOfDPP,
- dcc_dram_bw_nom_overhead_factor_p0,
- dcc_dram_bw_nom_overhead_factor_p1,
- dcc_dram_bw_pref_overhead_factor_p0,
- dcc_dram_bw_pref_overhead_factor_p1,
- mall_prefetch_sdp_overhead_factor,
- mall_prefetch_dram_overhead_factor,
- ReadBandwidthLuma,
- ReadBandwidthChroma,
- PrefetchBandwidthLuma,
- PrefetchBandwidthChroma,
- cursor_bw,
- dpte_row_bw,
- meta_row_bw,
- prefetch_cursor_bw,
- prefetch_vmrow_bw,
- flip_bw,
- l->unity_array,
- l->unity_array,
- l->unity_array,
- l->unity_array,
- l->unity_array,
- l->unity_array);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_vactive_bandwidth_required[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_required[m][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), non_urg_bandwidth_required[m][n]);
-#endif
- dml2_assert(urg_bandwidth_required[m][n] >= non_urg_bandwidth_required[m][n]);
- }
- }
-}
-
-static void check_urgent_bandwidth_support(
- double *frac_urg_bandwidth_nom,
- double *frac_urg_bandwidth_mall,
- bool *vactive_bandwidth_support_ok, // vactive ok
- bool *bandwidth_support_ok, // max of vm, prefetch, vactive all ok
-
- unsigned int mall_allocated_for_dcn_mbytes,
- double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_vactive_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max])
-{
- *bandwidth_support_ok = 1;
- *vactive_bandwidth_support_ok = 1;
-
- double frac_urg_bandwidth_nom_sdp = non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp] / urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- double frac_urg_bandwidth_nom_dram = non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] / urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- double frac_urg_bandwidth_mall_sdp = non_urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp] / urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- double frac_urg_bandwidth_mall_dram = non_urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] / urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- // Check urgent bandwidth required at sdp vs urgent bandwidth avail at sdp -> FractionOfUrgentBandwidth
- // Check urgent bandwidth required at dram vs urgent bandwidth avail at dram
- // Check urgent bandwidth required at sdp vs urgent bandwidth avail at sdp, svp_prefetch -> FractionOfUrgentBandwidthMALL
- // Check urgent bandwidth required at dram vs urgent bandwidth avail at dram, svp_prefetch
-
- *bandwidth_support_ok &= urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp] <= urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- *bandwidth_support_ok &= urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] <= urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
-
- if (mall_allocated_for_dcn_mbytes > 0) {
- *bandwidth_support_ok &= urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp] <= urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- *bandwidth_support_ok &= urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] <= urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
- }
-
- *frac_urg_bandwidth_nom = math_max2(frac_urg_bandwidth_nom_sdp, frac_urg_bandwidth_nom_dram);
- *frac_urg_bandwidth_mall = math_max2(frac_urg_bandwidth_mall_sdp, frac_urg_bandwidth_mall_dram);
-
- *bandwidth_support_ok &= (*frac_urg_bandwidth_nom <= 1.0);
-
- if (mall_allocated_for_dcn_mbytes > 0)
- *bandwidth_support_ok &= (*frac_urg_bandwidth_mall <= 1.0);
-
- *vactive_bandwidth_support_ok &= urg_vactive_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp] <= urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- *vactive_bandwidth_support_ok &= urg_vactive_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] <= urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- if (mall_allocated_for_dcn_mbytes > 0) {
- *vactive_bandwidth_support_ok &= urg_vactive_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp] <= urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- *vactive_bandwidth_support_ok &= urg_vactive_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] <= urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom);
-
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall);
- dml2_printf("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok);
-#endif
-
-}
-
-static double get_bandwidth_available_for_immediate_flip(enum dml2_core_internal_soc_state_type eval_state,
- double urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max], // no flip
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max])
-{
- double flip_bw_available_mbps;
- double flip_bw_available_sdp_mbps;
- double flip_bw_available_dram_mbps;
-
- flip_bw_available_sdp_mbps = urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp] - urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp];
- flip_bw_available_dram_mbps = urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram] - urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram];
- flip_bw_available_mbps = flip_bw_available_sdp_mbps < flip_bw_available_dram_mbps ? flip_bw_available_sdp_mbps : flip_bw_available_dram_mbps;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps);
- dml2_printf("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps);
- dml2_printf("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps);
-#endif
-
- return flip_bw_available_mbps;
-}
-
-static void calculate_immediate_flip_bandwidth_support(
- // Output
- double *frac_urg_bandwidth_flip,
- bool *flip_bandwidth_support_ok,
-
- // Input
- enum dml2_core_internal_soc_state_type eval_state,
- double urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
- double urg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max])
-{
- double frac_urg_bw_flip_sdp = non_urg_bandwidth_required_flip[eval_state][dml2_core_internal_bw_sdp] / urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp];
- double frac_urg_bw_flip_dram = non_urg_bandwidth_required_flip[eval_state][dml2_core_internal_bw_dram] / urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram];
-
- *flip_bandwidth_support_ok = true;
- for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) { // check sdp and dram
- *flip_bandwidth_support_ok &= urg_bandwidth_available[eval_state][n] >= urg_bandwidth_required_flip[eval_state][n];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str((enum dml2_core_internal_bw_type) eval_state));
- dml2_printf("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
-#endif
- dml2_assert(urg_bandwidth_required_flip[eval_state][n] > non_urg_bandwidth_required_flip[eval_state][n]);
- }
-
- *frac_urg_bandwidth_flip = (frac_urg_bw_flip_sdp > frac_urg_bw_flip_dram) ? frac_urg_bw_flip_sdp : frac_urg_bw_flip_dram;
- *flip_bandwidth_support_ok &= (*frac_urg_bandwidth_flip <= 1);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp);
- dml2_printf("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
-
- for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) {
- for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) {
- dml2_printf("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
- __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n),
- urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required_flip[m][n]) ? "<" : ">=", urg_bandwidth_required_flip[m][n]);
- }
- }
-#endif
-}
-
-static void CalculateFlipSchedule(
- struct dml2_core_internal_scratch *s,
- bool iflip_enable,
- bool use_lb_flip_bw,
- double HostVMInefficiencyFactor,
- double Tvm_trips_flip,
- double Tr0_trips_flip,
- double Tvm_trips_flip_rounded,
- double Tr0_trips_flip_rounded,
- bool GPUVMEnable,
- double vm_bytes, // vm_bytes
- double DPTEBytesPerRow, // dpte_row_bytes
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum dml2_source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw_flip,
- unsigned int dpte_row_height,
- unsigned int dpte_row_height_chroma,
- bool use_one_row_for_frame_flip,
- unsigned int max_flip_time_us,
- unsigned int per_pipe_flip_bytes,
- unsigned int meta_row_bytes,
- unsigned int meta_row_height,
- unsigned int meta_row_height_chroma,
- bool dcc_mrq_enable,
-
- // Output
- double *dst_y_per_vm_flip,
- double *dst_y_per_row_flip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe)
-{
- struct dml2_core_shared_CalculateFlipSchedule_locals *l = &s->CalculateFlipSchedule_locals;
-
- l->dual_plane = dml2_core_shared_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha;
- l->dpte_row_bytes = DPTEBytesPerRow;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
- dml2_printf("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
- dml2_printf("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
- dml2_printf("DML::%s: iflip_enable = %u\n", __func__, iflip_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip);
- dml2_printf("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded);
- dml2_printf("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded);
- dml2_printf("DML::%s: vm_bytes = %f\n", __func__, vm_bytes);
- dml2_printf("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes);
- dml2_printf("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes);
- dml2_printf("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height);
- dml2_printf("DML::%s: meta_row_height = %d\n", __func__, meta_row_height);
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
-#endif
-
- if (TotImmediateFlipBytes > 0 && (GPUVMEnable || dcc_mrq_enable)) {
- if (l->dual_plane) {
- if (dcc_mrq_enable & GPUVMEnable) {
- l->min_row_height = math_min2(dpte_row_height, meta_row_height);
- l->min_row_height_chroma = math_min2(dpte_row_height_chroma, meta_row_height_chroma);
- } else if (GPUVMEnable) {
- l->min_row_height = dpte_row_height;
- l->min_row_height_chroma = dpte_row_height_chroma;
- } else {
- l->min_row_height = meta_row_height;
- l->min_row_height_chroma = meta_row_height_chroma;
- }
- l->min_row_time = math_min2(l->min_row_height * LineTime / VRatio, l->min_row_height_chroma * LineTime / VRatioChroma);
- } else {
- if (dcc_mrq_enable & GPUVMEnable)
- l->min_row_height = math_min2(dpte_row_height, meta_row_height);
- else if (GPUVMEnable)
- l->min_row_height = dpte_row_height;
- else
- l->min_row_height = meta_row_height;
-
- l->min_row_time = l->min_row_height * LineTime / VRatio;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min_row_time = %f\n", __func__, l->min_row_time);
-#endif
- dml2_assert(l->min_row_time > 0);
-
- if (use_lb_flip_bw) {
- // For mode check, calculation the flip bw requirement with worst case flip time
- l->max_flip_time = math_min2(l->min_row_time, math_max2(Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded, (double)max_flip_time_us));
-
- //The lower bound on flip bandwidth
- // Note: The get_urgent_bandwidth_required already consider dpte_row_bw and meta_row_bw in bandwidth calculation, so leave final_flip_bw = 0 if iflip not required
- l->lb_flip_bw = 0;
-
- if (iflip_enable) {
- l->hvm_scaled_vm_bytes = vm_bytes * HostVMInefficiencyFactor;
- l->num_rows = 2;
- l->hvm_scaled_row_bytes = (l->num_rows * l->dpte_row_bytes * HostVMInefficiencyFactor + l->num_rows * meta_row_bytes);
- l->hvm_scaled_vm_row_bytes = l->hvm_scaled_vm_bytes + l->hvm_scaled_row_bytes;
- l->lb_flip_bw = math_max3(
- l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip),
- l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded),
- l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
- dml2_printf("DML::%s: total row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_row_bytes);
- dml2_printf("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
- dml2_printf("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
- dml2_printf("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
- dml2_printf("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
-
- if (l->lb_flip_bw > 0) {
- dml2_printf("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw);
- dml2_printf("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
- dml2_printf("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
- dml2_printf("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
- }
-#endif
- l->lb_flip_bw = math_max3(l->lb_flip_bw,
- l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip,
- (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip);
- dml2_printf("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
-#endif
- }
-
- *final_flip_bw = l->lb_flip_bw;
-
- *dst_y_per_vm_flip = 1; // not used
- *dst_y_per_row_flip = 1; // not used
- *ImmediateFlipSupportedForPipe = true;
- } else {
- if (iflip_enable) {
- l->ImmediateFlipBW = (double)per_pipe_flip_bytes * BandwidthAvailableForImmediateFlip / (double)TotImmediateFlipBytes; // flip_bw(i)
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW);
-#endif
- if (l->ImmediateFlipBW == 0) {
- l->Tvm_flip = 0;
- l->Tr0_flip = 0;
- } else {
- l->Tvm_flip = math_max3(Tvm_trips_flip,
- Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW,
- LineTime / 4.0);
-
- l->Tr0_flip = math_max3(Tr0_trips_flip,
- (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW,
- LineTime / 4.0);
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor);
- dml2_printf("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes));
-
- dml2_printf("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip);
-#endif
- *dst_y_per_vm_flip = math_ceil2(4.0 * (l->Tvm_flip / LineTime), 1.0) / 4.0;
- *dst_y_per_row_flip = math_ceil2(4.0 * (l->Tr0_flip / LineTime), 1.0) / 4.0;
-
- *final_flip_bw = math_max2(vm_bytes * HostVMInefficiencyFactor / (*dst_y_per_vm_flip * LineTime),
- (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (*dst_y_per_row_flip * LineTime));
-
- if (*dst_y_per_vm_flip >= 32 || *dst_y_per_row_flip >= 16 || l->Tvm_flip + 2 * l->Tr0_flip > l->min_row_time) {
- *ImmediateFlipSupportedForPipe = false;
- } else {
- *ImmediateFlipSupportedForPipe = iflip_enable;
- }
- } else {
- l->Tvm_flip = 0;
- l->Tr0_flip = 0;
- *dst_y_per_vm_flip = 0;
- *dst_y_per_row_flip = 0;
- *final_flip_bw = 0;
- *ImmediateFlipSupportedForPipe = iflip_enable;
- }
- }
- } else {
- l->Tvm_flip = 0;
- l->Tr0_flip = 0;
- *dst_y_per_vm_flip = 0;
- *dst_y_per_row_flip = 0;
- *final_flip_bw = 0;
- *ImmediateFlipSupportedForPipe = iflip_enable;
- }
-
-#ifdef __DML_VBA_DEBUG__
- if (!use_lb_flip_bw) {
- dml2_printf("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip);
- dml2_printf("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
- dml2_printf("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
- }
- dml2_printf("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
- dml2_printf("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
-#endif
-}
-
-static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *p)
-{
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals *s = &scratch->CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals;
-
- s->TotalActiveWriteback = 0;
- p->Watermark->UrgentWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
-#endif
-
- p->Watermark->USRRetrainingWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency + p->mmSOCParameters.USRRetrainingLatency + p->mmSOCParameters.SMNLatency;
- p->Watermark->DRAMClockChangeWatermark = p->mmSOCParameters.DRAMClockChangeLatency + p->Watermark->UrgentWatermark;
- p->Watermark->FCLKChangeWatermark = p->mmSOCParameters.FCLKChangeLatency + p->Watermark->UrgentWatermark;
- p->Watermark->StutterExitWatermark = p->mmSOCParameters.SRExitTime + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
- p->Watermark->StutterEnterPlusExitWatermark = p->mmSOCParameters.SREnterPlusExitTime + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
- p->Watermark->Z8StutterExitWatermark = p->mmSOCParameters.SRExitZ8Time + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
- p->Watermark->Z8StutterEnterPlusExitWatermark = p->mmSOCParameters.SREnterPlusExitZ8Time + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
- dml2_printf("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency);
- dml2_printf("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency);
- dml2_printf("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time);
- dml2_printf("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime);
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark);
- dml2_printf("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark);
- dml2_printf("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark);
- dml2_printf("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark);
- dml2_printf("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
-#endif
-
- s->TotalActiveWriteback = 0;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- s->TotalActiveWriteback = s->TotalActiveWriteback + 1;
- }
- }
-
- if (s->TotalActiveWriteback <= 1) {
- p->Watermark->WritebackUrgentWatermark = p->mmSOCParameters.WritebackLatency;
- } else {
- p->Watermark->WritebackUrgentWatermark = p->mmSOCParameters.WritebackLatency + p->WritebackChunkSize * 1024.0 / 32.0 / p->SOCCLK;
- }
- if (p->USRRetrainingRequired)
- p->Watermark->WritebackUrgentWatermark = p->Watermark->WritebackUrgentWatermark + p->mmSOCParameters.USRRetrainingLatency;
-
- if (s->TotalActiveWriteback <= 1) {
- p->Watermark->WritebackDRAMClockChangeWatermark = p->mmSOCParameters.DRAMClockChangeLatency + p->mmSOCParameters.WritebackLatency;
- p->Watermark->WritebackFCLKChangeWatermark = p->mmSOCParameters.FCLKChangeLatency + p->mmSOCParameters.WritebackLatency;
- } else {
- p->Watermark->WritebackDRAMClockChangeWatermark = p->mmSOCParameters.DRAMClockChangeLatency + p->mmSOCParameters.WritebackLatency + p->WritebackChunkSize * 1024.0 / 32.0 / p->SOCCLK;
- p->Watermark->WritebackFCLKChangeWatermark = p->mmSOCParameters.FCLKChangeLatency + p->mmSOCParameters.WritebackLatency + p->WritebackChunkSize * 1024 / 32 / p->SOCCLK;
- }
-
- if (p->USRRetrainingRequired)
- p->Watermark->WritebackDRAMClockChangeWatermark = p->Watermark->WritebackDRAMClockChangeWatermark + p->mmSOCParameters.USRRetrainingLatency;
-
- if (p->USRRetrainingRequired)
- p->Watermark->WritebackFCLKChangeWatermark = p->Watermark->WritebackFCLKChangeWatermark + p->mmSOCParameters.USRRetrainingLatency;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark);
- dml2_printf("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark);
- dml2_printf("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired);
- dml2_printf("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency);
-#endif
-
- s->TotalPixelBW = 0.0;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- double h_total = (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- double pixel_clock_mhz = p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000.0;
- double v_ratio = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- double v_ratio_c = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
-
- s->TotalPixelBW = s->TotalPixelBW + p->DPPPerSurface[k]
- * (p->SwathWidthY[k] * p->BytePerPixelDETY[k] * v_ratio + p->SwathWidthC[k] * p->BytePerPixelDETC[k] * v_ratio_c) / (h_total / pixel_clock_mhz);
- }
-
- *p->global_fclk_change_supported = true;
- *p->global_dram_clock_change_supported = true;
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- double h_total = (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- double pixel_clock_mhz = p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000.0;
- double v_ratio = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- double v_ratio_c = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- double v_taps = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- double v_taps_c = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- double h_ratio = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio;
- double h_ratio_c = p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio;
- double LBBitPerPixel = 57;
-
- s->LBLatencyHidingSourceLinesY[k] = (unsigned int)(math_min2((double)p->MaxLineBufferLines, math_floor2((double)p->LineBufferSize / LBBitPerPixel / ((double)p->SwathWidthY[k] / math_max2(h_ratio, 1.0)), 1)) - (v_taps - 1));
- s->LBLatencyHidingSourceLinesC[k] = (unsigned int)(math_min2((double)p->MaxLineBufferLines, math_floor2((double)p->LineBufferSize / LBBitPerPixel / ((double)p->SwathWidthC[k] / math_max2(h_ratio_c, 1.0)), 1)) - (v_taps_c - 1));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaxLineBufferLines= %u\n", __func__, k, p->MaxLineBufferLines);
- dml2_printf("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
- dml2_printf("DML::%s: k=%u, LBBitPerPixel = %u\n", __func__, k, LBBitPerPixel);
- dml2_printf("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
- dml2_printf("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps);
-#endif
-
- s->EffectiveLBLatencyHidingY = s->LBLatencyHidingSourceLinesY[k] / v_ratio * (h_total / pixel_clock_mhz);
- s->EffectiveLBLatencyHidingC = s->LBLatencyHidingSourceLinesC[k] / v_ratio_c * (h_total / pixel_clock_mhz);
-
- s->EffectiveDETBufferSizeY = p->DETBufferSizeY[k];
- if (p->UnboundedRequestEnabled) {
- s->EffectiveDETBufferSizeY = s->EffectiveDETBufferSizeY + p->CompressedBufferSizeInkByte * 1024 * (p->SwathWidthY[k] * p->BytePerPixelDETY[k] * v_ratio) / (h_total / pixel_clock_mhz) / s->TotalPixelBW;
- }
-
- s->LinesInDETY[k] = (double)s->EffectiveDETBufferSizeY / p->BytePerPixelDETY[k] / p->SwathWidthY[k];
- s->LinesInDETYRoundedDownToSwath[k] = (unsigned int)(math_floor2(s->LinesInDETY[k], p->SwathHeightY[k]));
- s->FullDETBufferingTimeY = s->LinesInDETYRoundedDownToSwath[k] * (h_total / pixel_clock_mhz) / v_ratio;
-
- s->ActiveClockChangeLatencyHidingY = s->EffectiveLBLatencyHidingY + s->FullDETBufferingTimeY - ((double)p->DSTXAfterScaler[k] / h_total + (double)p->DSTYAfterScaler[k]) * h_total / pixel_clock_mhz;
-
- if (p->NumberOfActiveSurfaces > 1) {
- s->ActiveClockChangeLatencyHidingY = s->ActiveClockChangeLatencyHidingY - (1.0 - 1.0 / (double)p->NumberOfActiveSurfaces) * (double)p->SwathHeightY[k] * (double)h_total / pixel_clock_mhz / v_ratio;
- }
-
- if (p->BytePerPixelDETC[k] > 0) {
- s->LinesInDETC[k] = p->DETBufferSizeC[k] / p->BytePerPixelDETC[k] / p->SwathWidthC[k];
- s->LinesInDETCRoundedDownToSwath[k] = (unsigned int)(math_floor2(s->LinesInDETC[k], p->SwathHeightC[k]));
- s->FullDETBufferingTimeC = s->LinesInDETCRoundedDownToSwath[k] * (h_total / pixel_clock_mhz) / v_ratio_c;
- s->ActiveClockChangeLatencyHidingC = s->EffectiveLBLatencyHidingC + s->FullDETBufferingTimeC - ((double)p->DSTXAfterScaler[k] / (double)h_total + (double)p->DSTYAfterScaler[k]) * (double)h_total / pixel_clock_mhz;
- if (p->NumberOfActiveSurfaces > 1) {
- s->ActiveClockChangeLatencyHidingC = s->ActiveClockChangeLatencyHidingC - (1.0 - 1.0 / (double)p->NumberOfActiveSurfaces) * (double)p->SwathHeightC[k] * (double)h_total / pixel_clock_mhz / v_ratio_c;
- }
- s->ActiveClockChangeLatencyHiding = math_min2(s->ActiveClockChangeLatencyHidingY, s->ActiveClockChangeLatencyHidingC);
- } else {
- s->ActiveClockChangeLatencyHiding = s->ActiveClockChangeLatencyHidingY;
- }
-
- s->ActiveDRAMClockChangeLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->DRAMClockChangeWatermark;
- s->ActiveFCLKChangeLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->FCLKChangeWatermark;
- s->USRRetrainingLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->USRRetrainingWatermark;
-
- if (p->VActiveLatencyHidingMargin)
- p->VActiveLatencyHidingMargin[k] = s->ActiveDRAMClockChangeLatencyMargin[k];
-
- p->VActiveLatencyHidingUs[k] = s->ActiveClockChangeLatencyHiding;
-
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.enable) {
- s->WritebackLatencyHiding = (double)p->WritebackInterfaceBufferSize * 1024.0 / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height * (double)h_total / pixel_clock_mhz) * 4.0);
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_64) {
- s->WritebackLatencyHiding = s->WritebackLatencyHiding / 2;
- }
- s->WritebackDRAMClockChangeLatencyMargin = s->WritebackLatencyHiding - p->Watermark->WritebackDRAMClockChangeWatermark;
-
- s->WritebackFCLKChangeLatencyMargin = s->WritebackLatencyHiding - p->Watermark->WritebackFCLKChangeWatermark;
-
- s->ActiveDRAMClockChangeLatencyMargin[k] = math_min2(s->ActiveDRAMClockChangeLatencyMargin[k], s->WritebackDRAMClockChangeLatencyMargin);
- s->ActiveFCLKChangeLatencyMargin[k] = math_min2(s->ActiveFCLKChangeLatencyMargin[k], s->WritebackFCLKChangeLatencyMargin);
- }
- p->MaxActiveDRAMClockChangeLatencySupported[k] = dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]) ? 0 : (s->ActiveDRAMClockChangeLatencyMargin[k] + p->mmSOCParameters.DRAMClockChangeLatency);
-
- enum dml2_uclk_pstate_change_strategy uclk_pstate_change_strategy = p->display_cfg->plane_descriptors[k].overrides.uclk_pstate_change_strategy;
- double reserved_vblank_time_us = (double)p->display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns / 1000;
-
- p->FCLKChangeSupport[k] = dml2_fclock_change_unsupported;
- if (s->ActiveFCLKChangeLatencyMargin[k] > 0)
- p->FCLKChangeSupport[k] = dml2_fclock_change_vactive;
- else if (reserved_vblank_time_us >= p->mmSOCParameters.FCLKChangeLatency)
- p->FCLKChangeSupport[k] = dml2_fclock_change_vblank;
-
- if (p->FCLKChangeSupport[k] == dml2_fclock_change_unsupported)
- *p->global_fclk_change_supported = false;
-
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_unsupported;
- if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_auto) {
- if (s->ActiveDRAMClockChangeLatencyMargin[k] > 0 && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank_and_vactive;
- else if (s->ActiveDRAMClockChangeLatencyMargin[k] > 0)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vactive;
- else if (reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank;
- } else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_vactive && s->ActiveDRAMClockChangeLatencyMargin[k] > 0)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vactive;
- else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_vblank && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank;
- else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_drr)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_drr;
- else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_svp)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_mall_svp;
- else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_full_frame)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_mall_full_frame;
-
- if (p->DRAMClockChangeSupport[k] == dml2_dram_clock_change_unsupported)
- *p->global_dram_clock_change_supported = false;
-
- s->dst_y_pstate = (unsigned int)(math_ceil2((p->mmSOCParameters.DRAMClockChangeLatency + p->mmSOCParameters.UrgentLatency) / (h_total / pixel_clock_mhz), 1));
- s->src_y_pstate_l = (unsigned int)(math_ceil2(s->dst_y_pstate * v_ratio, p->SwathHeightY[k]));
- s->src_y_ahead_l = (unsigned int)(math_floor2(p->DETBufferSizeY[k] / p->BytePerPixelDETY[k] / p->SwathWidthY[k], p->SwathHeightY[k]) + s->LBLatencyHidingSourceLinesY[k]);
- s->sub_vp_lines_l = s->src_y_pstate_l + s->src_y_ahead_l + p->meta_row_height_l[k];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]);
- dml2_printf("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate);
- dml2_printf("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l);
- dml2_printf("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l);
- dml2_printf("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, p->meta_row_height_l[k]);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l);
-#endif
- p->SubViewportLinesNeededInMALL[k] = s->sub_vp_lines_l;
-
- if (p->BytePerPixelDETC[k] > 0) {
- s->src_y_pstate_c = (unsigned int)(math_ceil2(s->dst_y_pstate * v_ratio_c, p->SwathHeightC[k]));
- s->src_y_ahead_c = (unsigned int)(math_floor2(p->DETBufferSizeC[k] / p->BytePerPixelDETC[k] / p->SwathWidthC[k], p->SwathHeightC[k]) + s->LBLatencyHidingSourceLinesC[k]);
- s->sub_vp_lines_c = s->src_y_pstate_c + s->src_y_ahead_c + p->meta_row_height_c[k];
-
- if (dml2_core_shared_is_420(p->display_cfg->plane_descriptors[k].pixel_format))
- p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, 2 * s->sub_vp_lines_c));
- else
- p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, s->sub_vp_lines_c));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, p->meta_row_height_c[k]);
- dml2_printf("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
- dml2_printf("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
-#endif
- }
- }
-
- bool FoundCriticalSurface = false;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if ((!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) && ((!FoundCriticalSurface)
- || ((s->ActiveFCLKChangeLatencyMargin[k] + p->mmSOCParameters.FCLKChangeLatency) < *p->MaxActiveFCLKChangeLatencySupported))) {
- FoundCriticalSurface = true;
- *p->MaxActiveFCLKChangeLatencySupported = s->ActiveFCLKChangeLatencyMargin[k] + p->mmSOCParameters.FCLKChangeLatency;
- }
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported);
- dml2_printf("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported);
- dml2_printf("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported);
- dml2_printf("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport);
-#endif
-}
-
-static double uclk_khz_to_dram_bw_mbps(unsigned long uclk_khz, const struct dml2_dram_params *dram_config)
-{
- double bw_mbps = 0;
- bw_mbps = ((double)uclk_khz * dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock) / 1000.0;
-
- return bw_mbps;
-}
-
-static double dram_bw_kbps_to_uclk_mhz(unsigned long long bw_kbps, const struct dml2_dram_params *dram_config)
-{
- double uclk_mhz = 0;
-
- uclk_mhz = (double)bw_kbps / (dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock) / 1000.0;
-
- return uclk_mhz;
-}
-
-static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struct dml2_dcn4_uclk_dpm_dependent_qos_params *per_uclk_dpm_params)
-{
- unsigned int i;
- unsigned int index = 0;
-
- for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
-
- if (i == 0)
- index = 0;
- else
- index = i - 1;
-
- if (uclk_freq_khz < per_uclk_dpm_params[i].minimum_uclk_khz ||
- per_uclk_dpm_params[i].minimum_uclk_khz == 0) {
- break;
- }
- }
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, index);
-#endif
- return index;
-}
-
-static unsigned int get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
-{
- unsigned int i;
- bool clk_entry_found = 0;
-
- for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
- dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
-
- if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
- clk_entry_found = 1;
- break;
- }
- }
-
- dml2_assert(clk_entry_found);
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, i);
-#endif
- return i;
-}
-
-static unsigned int get_pipe_flip_bytes(
- double hostvm_inefficiency_factor,
- unsigned int vm_bytes,
- unsigned int dpte_row_bytes,
- unsigned int meta_row_bytes)
-{
- unsigned int flip_bytes = 0;
-
- flip_bytes += (unsigned int)((vm_bytes * hostvm_inefficiency_factor) + 2 * meta_row_bytes);
- flip_bytes += (unsigned int)(2 * dpte_row_bytes * hostvm_inefficiency_factor);
-
- return flip_bytes;
-}
-
-static void calculate_hostvm_inefficiency_factor(
- double *HostVMInefficiencyFactor,
- double *HostVMInefficiencyFactorPrefetch,
-
- bool gpuvm_enable,
- bool hostvm_enable,
- unsigned int remote_iommu_outstanding_translations,
- unsigned int max_outstanding_reqs,
- double urg_bandwidth_avail_active_pixel_and_vm,
- double urg_bandwidth_avail_active_vm_only)
-{
- *HostVMInefficiencyFactor = 1;
- *HostVMInefficiencyFactorPrefetch = 1;
-
- if (gpuvm_enable && hostvm_enable) {
- *HostVMInefficiencyFactor = urg_bandwidth_avail_active_pixel_and_vm / urg_bandwidth_avail_active_vm_only;
- *HostVMInefficiencyFactorPrefetch = *HostVMInefficiencyFactor;
-
- if ((*HostVMInefficiencyFactorPrefetch < 4) && (remote_iommu_outstanding_translations < max_outstanding_reqs))
- *HostVMInefficiencyFactorPrefetch = 4;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm);
- dml2_printf("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor);
- dml2_printf("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch);
-#endif
- }
-}
-
-static void CalculatePixelDeliveryTimes(
- const struct dml2_display_cfg *display_cfg,
- const struct core_display_cfg_support_info *cfg_support_info,
- unsigned int NumberOfActiveSurfaces,
- double VRatioPrefetchY[],
- double VRatioPrefetchC[],
- unsigned int swath_width_luma_ub[],
- unsigned int swath_width_chroma_ub[],
- double PSCL_THROUGHPUT[],
- double PSCL_THROUGHPUT_CHROMA[],
- double Dppclk[],
- unsigned int BytePerPixelC[],
- unsigned int req_per_swath_ub_l[],
- unsigned int req_per_swath_ub_c[],
-
- // Output
- double DisplayPipeLineDeliveryTimeLuma[],
- double DisplayPipeLineDeliveryTimeChroma[],
- double DisplayPipeLineDeliveryTimeLumaPrefetch[],
- double DisplayPipeLineDeliveryTimeChromaPrefetch[],
- double DisplayPipeRequestDeliveryTimeLuma[],
- double DisplayPipeRequestDeliveryTimeChroma[],
- double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
- double DisplayPipeRequestDeliveryTimeChromaPrefetch[])
-{
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]);
- dml2_printf("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]);
- dml2_printf("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used);
- dml2_printf("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz);
- dml2_printf("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]);
-#endif
- if (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio <= 1) {
- DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio / pixel_clock_mhz;
- } else {
- DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / Dppclk[k];
- }
-
- if (BytePerPixelC[k] == 0) {
- DisplayPipeLineDeliveryTimeChroma[k] = 0;
- } else {
- if (display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio <= 1) {
- DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio / pixel_clock_mhz;
- } else {
- DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / Dppclk[k];
- }
- }
-
- if (VRatioPrefetchY[k] <= 1) {
- DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio / pixel_clock_mhz;
- } else {
- DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / Dppclk[k];
- }
-
- if (BytePerPixelC[k] == 0) {
- DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
- } else {
- if (VRatioPrefetchC[k] <= 1) {
- DisplayPipeLineDeliveryTimeChromaPrefetch[k] = swath_width_chroma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio / pixel_clock_mhz;
- } else {
- DisplayPipeLineDeliveryTimeChromaPrefetch[k] = swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / Dppclk[k];
- }
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
-#endif
- }
-
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
-
- DisplayPipeRequestDeliveryTimeLuma[k] = DisplayPipeLineDeliveryTimeLuma[k] / req_per_swath_ub_l[k];
- DisplayPipeRequestDeliveryTimeLumaPrefetch[k] = DisplayPipeLineDeliveryTimeLumaPrefetch[k] / req_per_swath_ub_l[k];
- if (BytePerPixelC[k] == 0) {
- DisplayPipeRequestDeliveryTimeChroma[k] = 0;
- DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = 0;
- } else {
- DisplayPipeRequestDeliveryTimeChroma[k] = DisplayPipeLineDeliveryTimeChroma[k] / req_per_swath_ub_c[k];
- DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub_c[k];
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]);
-#endif
- }
-}
-
-static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTETimes_params *p)
-{
- unsigned int meta_chunk_width;
- unsigned int min_meta_chunk_width;
- unsigned int meta_chunk_per_row_int;
- unsigned int meta_row_remainder;
- unsigned int meta_chunk_threshold;
- unsigned int meta_chunks_per_row_ub;
- unsigned int meta_chunk_width_chroma;
- unsigned int min_meta_chunk_width_chroma;
- unsigned int meta_chunk_per_row_int_chroma;
- unsigned int meta_row_remainder_chroma;
- unsigned int meta_chunk_threshold_chroma;
- unsigned int meta_chunks_per_row_ub_chroma;
- unsigned int dpte_group_width_luma;
- unsigned int dpte_groups_per_row_luma_ub;
- unsigned int dpte_group_width_chroma;
- unsigned int dpte_groups_per_row_chroma_ub;
- double pixel_clock_mhz;
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- p->DST_Y_PER_PTE_ROW_NOM_L[k] = p->dpte_row_height[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- if (p->BytePerPixelC[k] == 0) {
- p->DST_Y_PER_PTE_ROW_NOM_C[k] = 0;
- } else {
- p->DST_Y_PER_PTE_ROW_NOM_C[k] = p->dpte_row_height_chroma[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- }
- p->DST_Y_PER_META_ROW_NOM_L[k] = p->meta_row_height[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- if (p->BytePerPixelC[k] == 0) {
- p->DST_Y_PER_META_ROW_NOM_C[k] = 0;
- } else {
- p->DST_Y_PER_META_ROW_NOM_C[k] = p->meta_row_height_chroma[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- }
- }
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->plane_descriptors[k].surface.dcc.enable == true && p->mrq_present) {
- meta_chunk_width = p->MetaChunkSize * 1024 * 256 / p->BytePerPixelY[k] / p->meta_row_height[k];
- min_meta_chunk_width = p->MinMetaChunkSizeBytes * 256 / p->BytePerPixelY[k] / p->meta_row_height[k];
- meta_chunk_per_row_int = p->meta_row_width[k] / meta_chunk_width;
- meta_row_remainder = p->meta_row_width[k] % meta_chunk_width;
- if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- meta_chunk_threshold = 2 * min_meta_chunk_width - p->meta_req_width[k];
- } else {
- meta_chunk_threshold = 2 * min_meta_chunk_width - p->meta_req_height[k];
- }
- if (meta_row_remainder <= meta_chunk_threshold) {
- meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
- } else {
- meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
- }
- p->TimePerMetaChunkNominal[k] = p->meta_row_height[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio *
- p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) / meta_chunks_per_row_ub;
- p->TimePerMetaChunkVBlank[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) / meta_chunks_per_row_ub;
- p->TimePerMetaChunkFlip[k] = p->dst_y_per_row_flip[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) / meta_chunks_per_row_ub;
- if (p->BytePerPixelC[k] == 0) {
- p->TimePerChromaMetaChunkNominal[k] = 0;
- p->TimePerChromaMetaChunkVBlank[k] = 0;
- p->TimePerChromaMetaChunkFlip[k] = 0;
- } else {
- meta_chunk_width_chroma = p->MetaChunkSize * 1024 * 256 / p->BytePerPixelC[k] / p->meta_row_height_chroma[k];
- min_meta_chunk_width_chroma = p->MinMetaChunkSizeBytes * 256 / p->BytePerPixelC[k] / p->meta_row_height_chroma[k];
- meta_chunk_per_row_int_chroma = (unsigned int)((double)p->meta_row_width_chroma[k] / meta_chunk_width_chroma);
- meta_row_remainder_chroma = p->meta_row_width_chroma[k] % meta_chunk_width_chroma;
- if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma - p->meta_req_width_chroma[k];
- } else {
- meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma - p->meta_req_height_chroma[k];
- }
- if (meta_row_remainder_chroma <= meta_chunk_threshold_chroma) {
- meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 1;
- } else {
- meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 2;
- }
- p->TimePerChromaMetaChunkNominal[k] = p->meta_row_height_chroma[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) / meta_chunks_per_row_ub_chroma;
- p->TimePerChromaMetaChunkVBlank[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) / meta_chunks_per_row_ub_chroma;
- p->TimePerChromaMetaChunkFlip[k] = p->dst_y_per_row_flip[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) / meta_chunks_per_row_ub_chroma;
- }
- } else {
- p->TimePerMetaChunkNominal[k] = 0;
- p->TimePerMetaChunkVBlank[k] = 0;
- p->TimePerMetaChunkFlip[k] = 0;
- p->TimePerChromaMetaChunkNominal[k] = 0;
- p->TimePerChromaMetaChunkVBlank[k] = 0;
- p->TimePerChromaMetaChunkFlip[k] = 0;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]);
-#endif
- }
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- p->DST_Y_PER_PTE_ROW_NOM_L[k] = p->dpte_row_height[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- if (p->BytePerPixelC[k] == 0) {
- p->DST_Y_PER_PTE_ROW_NOM_C[k] = 0;
- } else {
- p->DST_Y_PER_PTE_ROW_NOM_C[k] = p->dpte_row_height_chroma[k] / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- }
- }
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- pixel_clock_mhz = ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
-
- if (p->display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
- p->time_per_tdlut_group[k] = 2 * p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / p->tdlut_groups_per_2row_ub[k];
- else
- p->time_per_tdlut_group[k] = 0;
-
- dml2_printf("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]);
-
- if (p->display_cfg->gpuvm_enable == true) {
- if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- dpte_group_width_luma = (unsigned int)((double)p->dpte_group_bytes[k] / (double)p->PTERequestSizeY[k] * p->PixelPTEReqWidthY[k]);
- } else {
- dpte_group_width_luma = (unsigned int)((double)p->dpte_group_bytes[k] / (double)p->PTERequestSizeY[k] * p->PixelPTEReqHeightY[k]);
- }
- if (p->use_one_row_for_frame[k]) {
- dpte_groups_per_row_luma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_luma_ub[k] / (double)dpte_group_width_luma / 2.0, 1.0));
- } else {
- dpte_groups_per_row_luma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_luma_ub[k] / (double)dpte_group_width_luma, 1.0));
- }
-
- if (dpte_groups_per_row_luma_ub <= 2) {
- dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1;
- }
-
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub);
-
- p->time_per_pte_group_nom_luma[k] = p->DST_Y_PER_PTE_ROW_NOM_L[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
- p->time_per_pte_group_vblank_luma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
- p->time_per_pte_group_flip_luma[k] = p->dst_y_per_row_flip[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
- if (p->BytePerPixelC[k] == 0) {
- p->time_per_pte_group_nom_chroma[k] = 0;
- p->time_per_pte_group_vblank_chroma[k] = 0;
- p->time_per_pte_group_flip_chroma[k] = 0;
- } else {
- if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) {
- dpte_group_width_chroma = (unsigned int)((double)p->dpte_group_bytes[k] / (double)p->PTERequestSizeC[k] * p->PixelPTEReqWidthC[k]);
- } else {
- dpte_group_width_chroma = (unsigned int)((double)p->dpte_group_bytes[k] / (double)p->PTERequestSizeC[k] * p->PixelPTEReqHeightC[k]);
- }
-
- if (p->use_one_row_for_frame[k]) {
- dpte_groups_per_row_chroma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_chroma_ub[k] / (double)dpte_group_width_chroma / 2.0, 1.0));
- } else {
- dpte_groups_per_row_chroma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_chroma_ub[k] / (double)dpte_group_width_chroma, 1.0));
- }
- if (dpte_groups_per_row_chroma_ub <= 2) {
- dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1;
- }
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
-
- p->time_per_pte_group_nom_chroma[k] = p->DST_Y_PER_PTE_ROW_NOM_C[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
- p->time_per_pte_group_vblank_chroma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
- p->time_per_pte_group_flip_chroma[k] = p->dst_y_per_row_flip[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
- }
- } else {
- p->time_per_pte_group_nom_luma[k] = 0;
- p->time_per_pte_group_vblank_luma[k] = 0;
- p->time_per_pte_group_flip_luma[k] = 0;
- p->time_per_pte_group_nom_chroma[k] = 0;
- p->time_per_pte_group_vblank_chroma[k] = 0;
- p->time_per_pte_group_flip_chroma[k] = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]);
-
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]);
-#endif
- }
-} // CalculateMetaAndPTETimes
-
-static void CalculateVMGroupAndRequestTimes(
- const struct dml2_display_cfg *display_cfg,
- unsigned int NumberOfActiveSurfaces,
- unsigned int BytePerPixelC[],
- double dst_y_per_vm_vblank[],
- double dst_y_per_vm_flip[],
- unsigned int dpte_row_width_luma_ub[],
- unsigned int dpte_row_width_chroma_ub[],
- unsigned int vm_group_bytes[],
- unsigned int dpde0_bytes_per_frame_ub_l[],
- unsigned int dpde0_bytes_per_frame_ub_c[],
- unsigned int tdlut_pte_bytes_per_frame[],
- unsigned int meta_pte_bytes_per_frame_ub_l[],
- unsigned int meta_pte_bytes_per_frame_ub_c[],
- bool mrq_present,
-
- // Output
- double TimePerVMGroupVBlank[],
- double TimePerVMGroupFlip[],
- double TimePerVMRequestVBlank[],
- double TimePerVMRequestFlip[])
-{
- unsigned int num_group_per_lower_vm_stage = 1;
- unsigned int num_req_per_lower_vm_stage = 1;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
-#endif
- for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- bool dcc_mrq_enable = display_cfg->plane_descriptors[k].surface.dcc.enable && mrq_present;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable);
- dml2_printf("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]);
-#endif
-
- if (display_cfg->gpuvm_enable) {
- if (display_cfg->gpuvm_max_page_table_levels >= 2) {
- num_group_per_lower_vm_stage += (unsigned int)math_ceil2((double)(dpde0_bytes_per_frame_ub_l[k]) / (double)(vm_group_bytes[k]), 1);
-
- if (BytePerPixelC[k] > 0)
- num_group_per_lower_vm_stage += (unsigned int)math_ceil2((double)(dpde0_bytes_per_frame_ub_c[k]) / (double)(vm_group_bytes[k]), 1);
- }
-
- if (dcc_mrq_enable) {
- if (BytePerPixelC[k] > 0) {
- num_group_per_lower_vm_stage += (unsigned int)(2.0 /*for each mpde0 group*/ + math_ceil2((double)(meta_pte_bytes_per_frame_ub_l[k]) / (double)(vm_group_bytes[k]), 1) +
- math_ceil2((double)(meta_pte_bytes_per_frame_ub_c[k]) / (double)(vm_group_bytes[k]), 1));
- } else {
- num_group_per_lower_vm_stage += (unsigned int)(1.0 + math_ceil2((double)(meta_pte_bytes_per_frame_ub_l[k]) / (double)(vm_group_bytes[k]), 1));
- }
- }
-
- unsigned int num_group_per_lower_vm_stage_flip = num_group_per_lower_vm_stage;
- unsigned int num_group_per_lower_vm_stage_pref = num_group_per_lower_vm_stage;
-
- if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut && display_cfg->gpuvm_enable) {
- num_group_per_lower_vm_stage_pref += (unsigned int)math_ceil2(tdlut_pte_bytes_per_frame[k] / vm_group_bytes[k], 1);
- if (display_cfg->gpuvm_max_page_table_levels >= 2)
- num_group_per_lower_vm_stage_pref += 1; // tdpe0 group
- }
-
- if (display_cfg->gpuvm_max_page_table_levels >= 2) {
- num_req_per_lower_vm_stage += dpde0_bytes_per_frame_ub_l[k] / 64;
- if (BytePerPixelC[k] > 0)
- num_req_per_lower_vm_stage += dpde0_bytes_per_frame_ub_c[k];
- }
-
- if (dcc_mrq_enable) {
- num_req_per_lower_vm_stage += meta_pte_bytes_per_frame_ub_l[k] / 64;
- if (BytePerPixelC[k] > 0)
- num_req_per_lower_vm_stage += meta_pte_bytes_per_frame_ub_c[k] / 64;
- }
-
- unsigned int num_req_per_lower_vm_stage_flip = num_req_per_lower_vm_stage;
- unsigned int num_req_per_lower_vm_stage_pref = num_req_per_lower_vm_stage;
-
- if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut && display_cfg->gpuvm_enable) {
- num_req_per_lower_vm_stage_pref += tdlut_pte_bytes_per_frame[k] / 64;
- }
-
- double line_time = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz;
-
- if (num_group_per_lower_vm_stage_flip <= 2) {
- num_group_per_lower_vm_stage_flip = num_group_per_lower_vm_stage_flip + 1;
- }
-
- if (num_group_per_lower_vm_stage_pref <= 2) {
- num_group_per_lower_vm_stage_pref = num_group_per_lower_vm_stage_pref + 1;
- }
-
- TimePerVMGroupVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_group_per_lower_vm_stage_pref;
- TimePerVMGroupFlip[k] = dst_y_per_vm_flip[k] * line_time / num_group_per_lower_vm_stage_flip;
- TimePerVMRequestVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_req_per_lower_vm_stage_pref;
- TimePerVMRequestFlip[k] = dst_y_per_vm_flip[k] * line_time / num_req_per_lower_vm_stage_flip;
-
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]);
- dml2_printf("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %f\n", __func__, k, num_group_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %f\n", __func__, k, num_group_per_lower_vm_stage_flip);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %f\n", __func__, k, num_req_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %f\n", __func__, k, num_req_per_lower_vm_stage_flip);
-
- if (display_cfg->gpuvm_max_page_table_levels > 2) {
- TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
- TimePerVMGroupFlip[k] = TimePerVMGroupFlip[k] / 2;
- TimePerVMRequestVBlank[k] = TimePerVMRequestVBlank[k] / 2;
- TimePerVMRequestFlip[k] = TimePerVMRequestFlip[k] / 2;
- }
-
- } else {
- TimePerVMGroupVBlank[k] = 0;
- TimePerVMGroupFlip[k] = 0;
- TimePerVMRequestVBlank[k] = 0;
- TimePerVMRequestFlip[k] = 0;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]);
-#endif
- }
-}
-
-static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratch,
- struct dml2_core_calcs_CalculateStutterEfficiency_params *p)
-{
- struct dml2_core_calcs_CalculateStutterEfficiency_locals *l = &scratch->CalculateStutterEfficiency_locals;
-
- memset(l, 0, sizeof(struct dml2_core_calcs_CalculateStutterEfficiency_locals));
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- if (p->display_cfg->plane_descriptors[k].surface.dcc.enable == true) {
- if ((dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle) && p->BlockWidth256BytesY[k] > p->SwathHeightY[k]) || (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle) && p->BlockHeight256BytesY[k] > p->SwathHeightY[k]) || p->DCCYMaxUncompressedBlock[k] < 256) {
- l->MaximumEffectiveCompressionLuma = 2;
- } else {
- l->MaximumEffectiveCompressionLuma = 4;
- }
- l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0, l->MaximumEffectiveCompressionLuma);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma);
-#endif
- l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0;
- l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0 / l->MaximumEffectiveCompressionLuma;
-
- if (p->ReadBandwidthSurfaceChroma[k] > 0) {
- if ((dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle) && p->BlockWidth256BytesC[k] > p->SwathHeightC[k]) || (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle) && p->BlockHeight256BytesC[k] > p->SwathHeightC[k]) || p->DCCCMaxUncompressedBlock[k] < 256) {
- l->MaximumEffectiveCompressionChroma = 2;
- } else {
- l->MaximumEffectiveCompressionChroma = 4;
- }
- l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1, l->MaximumEffectiveCompressionChroma);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma);
-#endif
- l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1;
- l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1 / l->MaximumEffectiveCompressionChroma;
- }
- } else {
- l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] + p->ReadBandwidthSurfaceChroma[k];
- }
- l->TotalRowReadBandwidth = l->TotalRowReadBandwidth + p->DPPPerSurface[k] * (p->meta_row_bw[k] + p->dpte_row_bw[k]);
- }
- }
-
- l->AverageDCCCompressionRate = p->TotalDataReadBandwidth / l->TotalCompressedReadBandwidth;
- l->AverageDCCZeroSizeFraction = l->TotalZeroSizeRequestReadBandwidth / p->TotalDataReadBandwidth;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth);
- dml2_printf("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma);
- dml2_printf("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma);
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction);
-
- dml2_printf("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0);
- dml2_printf("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte);
-#endif
- if (l->AverageDCCZeroSizeFraction == 1) {
- l->AverageZeroSizeCompressionRate = l->TotalZeroSizeRequestReadBandwidth / l->TotalZeroSizeCompressedReadBandwidth;
- l->EffectiveCompressedBufferSize = (double)p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageZeroSizeCompressionRate + ((double)p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 * l->AverageZeroSizeCompressionRate;
-
-
- } else if (l->AverageDCCZeroSizeFraction > 0) {
- l->AverageZeroSizeCompressionRate = l->TotalZeroSizeRequestReadBandwidth / l->TotalZeroSizeCompressedReadBandwidth;
- l->EffectiveCompressedBufferSize = math_min2((double)p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate,
- (double)p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate)) +
- (p->rob_alloc_compressed ? math_min2(((double)p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64) * l->AverageDCCCompressionRate,
- ((double)p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate))
- : ((double)p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
-
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
- dml2_printf("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate));
-#endif
- } else {
- l->EffectiveCompressedBufferSize = math_min2((double)p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate,
- (double)p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate) +
- ((double)p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64) * (p->rob_alloc_compressed ? l->AverageDCCCompressionRate : 1.0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate);
-#endif
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries);
- dml2_printf("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
-#endif
-
- bool FoundCriticalSurface = false;
- *p->StutterPeriod = 0;
-
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- l->LinesInDETY = ((double)p->DETBufferSizeY[k] + (p->UnboundedRequestEnabled == true ? l->EffectiveCompressedBufferSize : 0) * p->ReadBandwidthSurfaceLuma[k] / p->TotalDataReadBandwidth) / p->BytePerPixelDETY[k] / p->SwathWidthY[k];
- l->LinesInDETYRoundedDownToSwath = math_floor2(l->LinesInDETY, p->SwathHeightY[k]);
- l->DETBufferingTimeY = l->LinesInDETYRoundedDownToSwath * ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY);
- dml2_printf("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath);
- dml2_printf("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY);
-#endif
-
- if (!FoundCriticalSurface || l->DETBufferingTimeY < *p->StutterPeriod) {
- bool isInterlaceTiming = p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.interlaced && !p->ProgressiveToInterlaceUnitInOPP;
-
- FoundCriticalSurface = true;
- *p->StutterPeriod = l->DETBufferingTimeY;
- l->FrameTimeCriticalSurface = (isInterlaceTiming ? math_floor2((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.v_total / 2.0, 1.0) : p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.v_total) * (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- l->VActiveTimeCriticalSurface = (isInterlaceTiming ? math_floor2((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.v_active / 2.0, 1.0) : p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.v_active) * (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- l->BytePerPixelYCriticalSurface = p->BytePerPixelY[k];
- l->SwathWidthYCriticalSurface = p->SwathWidthY[k];
- l->SwathHeightYCriticalSurface = p->SwathHeightY[k];
- l->BlockWidth256BytesYCriticalSurface = p->BlockWidth256BytesY[k];
- l->DETBufferSizeYCriticalSurface = p->DETBufferSizeY[k];
- l->MinTTUVBlankCriticalSurface = p->MinTTUVBlank[k];
- l->SinglePlaneCriticalSurface = (p->ReadBandwidthSurfaceChroma[k] == 0);
- l->SinglePipeCriticalSurface = (p->DPPPerSurface[k] == 1);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface);
- dml2_printf("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod);
- dml2_printf("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface);
- dml2_printf("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface);
- dml2_printf("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface);
-#endif
- }
- }
- }
-
- // for bounded req, the stutter period is calculated only based on DET size, but during burst there can be some return inside ROB/compressed buffer
- // stutter period is calculated only on the det sizing
- // if (cdb + rob >= det) the stutter burst will be absorbed by the cdb + rob which is before decompress
- // else
- // the cdb + rob part will be in compressed rate with urg bw (idea bw)
- // the det part will be return at uncompressed rate with 64B/dcfclk
- //
- // for unbounded req, the stutter period should be calculated as total of CDB+ROB+DET, so the term "PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer"
- // should be == EffectiveCompressedBufferSize which will returned a compressed rate, the rest of stutter period is from the DET will be returned at uncompressed rate with 64B/dcfclk
-
- l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = math_min2(*p->StutterPeriod * p->TotalDataReadBandwidth, l->EffectiveCompressedBufferSize);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
- dml2_printf("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024);
- dml2_printf("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW);
- dml2_printf("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK);
-#endif
-
- l->StutterBurstTime = l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer
- / (p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
- (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer)
- / math_max2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
- *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64));
- dml2_printf("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
-#endif
-
- l->TotalActiveWriteback = 0;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.enable) {
- l->TotalActiveWriteback = l->TotalActiveWriteback + 1;
- }
- }
-
- if (l->TotalActiveWriteback == 0) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime);
- dml2_printf("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
-#endif
- *p->StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitTime + l->StutterBurstTime) / *p->StutterPeriod) * 100;
- *p->Z8StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitZ8Time + l->StutterBurstTime) / *p->StutterPeriod) * 100;
- *p->NumberOfStutterBurstsPerFrame = (*p->StutterEfficiencyNotIncludingVBlank > 0 ? (unsigned int)(math_ceil2(l->VActiveTimeCriticalSurface / *p->StutterPeriod, 1)) : 0);
- *p->Z8NumberOfStutterBurstsPerFrame = (*p->Z8StutterEfficiencyNotIncludingVBlank > 0 ? (unsigned int)(math_ceil2(l->VActiveTimeCriticalSurface / *p->StutterPeriod, 1)) : 0);
- } else {
- *p->StutterEfficiencyNotIncludingVBlank = 0.;
- *p->Z8StutterEfficiencyNotIncludingVBlank = 0.;
- *p->NumberOfStutterBurstsPerFrame = 0;
- *p->Z8NumberOfStutterBurstsPerFrame = 0;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
-#endif
-
- unsigned int TotalNumberOfActiveOTG = 0;
- double SinglePixelClock = 0;
- unsigned int SingleHTotal = 0;
- unsigned int SingleVTotal = 0;
- bool SameTiming = true;
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
- if (p->display_cfg->plane_descriptors[k].stream_index == k) {
- if (TotalNumberOfActiveOTG == 0) {
- SinglePixelClock = ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- SingleHTotal = p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- SingleVTotal = p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.v_total;
- } else if (SinglePixelClock != ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) || SingleHTotal != p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total || SingleVTotal != p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.v_total) {
- SameTiming = false;
- }
- TotalNumberOfActiveOTG = TotalNumberOfActiveOTG + 1;
- }
- }
- }
-
- if (*p->StutterEfficiencyNotIncludingVBlank > 0) {
- if (!((p->SynchronizeTimings || TotalNumberOfActiveOTG == 1) && SameTiming)) {
- *p->StutterEfficiency = *p->StutterEfficiencyNotIncludingVBlank;
- } else {
- *p->StutterEfficiency = (1 - (*p->NumberOfStutterBurstsPerFrame * p->SRExitTime + l->StutterBurstTime * l->VActiveTimeCriticalSurface / *p->StutterPeriod) / l->FrameTimeCriticalSurface) * 100;
- }
- } else {
- *p->StutterEfficiency = 0;
- *p->NumberOfStutterBurstsPerFrame = 0;
- }
-
- double LastZ8StutterPeriod = 0.0;
-
- if (*p->Z8StutterEfficiencyNotIncludingVBlank > 0) {
- LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod;
- if (!((p->SynchronizeTimings || TotalNumberOfActiveOTG == 1) && SameTiming)) {
- *p->Z8StutterEfficiency = *p->Z8StutterEfficiencyNotIncludingVBlank;
- } else {
- *p->Z8StutterEfficiency = (1 - (*p->Z8NumberOfStutterBurstsPerFrame * p->SRExitZ8Time + l->StutterBurstTime * l->VActiveTimeCriticalSurface / *p->StutterPeriod) / l->FrameTimeCriticalSurface) * 100;
- }
- } else {
- *p->Z8StutterEfficiency = 0.;
- *p->Z8NumberOfStutterBurstsPerFrame = 0;
- }
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LastZ8StutterPeriod = %f\n", __func__, LastZ8StutterPeriod);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
- dml2_printf("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency);
- dml2_printf("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
-#endif
-
-
- unsigned int SwathSizeCriticalSurface;
- unsigned int LastChunkOfSwathSize;
- unsigned int MissingPartOfLastSwathOfDETSize;
-
- SwathSizeCriticalSurface = (unsigned int)(l->BytePerPixelYCriticalSurface * l->SwathHeightYCriticalSurface * math_ceil2(l->SwathWidthYCriticalSurface, l->BlockWidth256BytesYCriticalSurface));
- LastChunkOfSwathSize = SwathSizeCriticalSurface % (p->PixelChunkSizeInKByte * 1024);
- MissingPartOfLastSwathOfDETSize = (unsigned int)(math_ceil2(l->DETBufferSizeYCriticalSurface, SwathSizeCriticalSurface) - l->DETBufferSizeYCriticalSurface);
-
- *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface && (LastChunkOfSwathSize > 0) &&
- (LastChunkOfSwathSize <= 4096) && (MissingPartOfLastSwathOfDETSize > 0) && (MissingPartOfLastSwathOfDETSize <= LastChunkOfSwathSize));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SwathSizeCriticalSurface = %u\n", __func__, SwathSizeCriticalSurface);
- dml2_printf("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
- dml2_printf("DML::%s: LastChunkOfSwathSize = %u\n", __func__, LastChunkOfSwathSize);
- dml2_printf("DML::%s: MissingPartOfLastSwathOfDETSize = %u\n", __func__, MissingPartOfLastSwathOfDETSize);
- dml2_printf("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
-#endif
-}
-
-bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_ex *in_out_params)
-{
- const struct dml2_display_cfg *display_cfg = in_out_params->in_display_cfg;
- const struct dml2_mcg_min_clock_table *min_clk_table = in_out_params->min_clk_table;
- const struct core_display_cfg_support_info *cfg_support_info = in_out_params->cfg_support_info;
- struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
- struct dml2_display_cfg_programming *programming = in_out_params->programming;
-
- struct dml2_core_calcs_mode_programming_locals *s = &mode_lib->scratch.dml_core_mode_programming_locals;
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
- struct dml2_core_calcs_CalculateVMRowAndSwath_params *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params;
- struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
- struct dml2_core_calcs_CalculateStutterEfficiency_params *CalculateStutterEfficiency_params = &mode_lib->scratch.CalculateStutterEfficiency_params;
- struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
- struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
- struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
- struct dml2_core_shared_CalculateMetaAndPTETimes_params *CalculateMetaAndPTETimes_params = &mode_lib->scratch.CalculateMetaAndPTETimes_params;
-
- unsigned int j, k;
-
- dml2_printf("DML::%s: --- START --- \n", __func__);
-
- memset(&mode_lib->mp, 0, sizeof(struct dml2_core_internal_mode_program));
-
- s->num_active_planes = display_cfg->num_planes;
- get_stream_output_bpp(s->OutputBpp, display_cfg);
-
- mode_lib->mp.num_active_pipes = dml_get_num_active_pipes(display_cfg->num_planes, cfg_support_info);
- dml_calc_pipe_plane_mapping(cfg_support_info, mode_lib->mp.pipe_plane);
-
- mode_lib->mp.Dcfclk = programming->min_clocks.dcn4x.active.dcfclk_khz / 1000.0;
- mode_lib->mp.FabricClock = programming->min_clocks.dcn4x.active.fclk_khz / 1000.0;
- mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table.dram_config);
- mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4x.active.uclk_khz / 1000.0;
- mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4x.dpprefclk_khz / 1000.0;
- s->SOCCLK = (double)programming->min_clocks.dcn4x.socclk_khz / 1000;
- mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4x.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params);
- mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- dml2_assert(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
- dml2_assert(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
- cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 2 ||
- cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
-
- if (cfg_support_info->stream_support_info[stream_index].odms_used > 1)
- dml2_assert(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
-
- switch (cfg_support_info->stream_support_info[stream_index].odms_used) {
- case (4):
- mode_lib->mp.ODMMode[k] = dml2_odm_mode_combine_4to1;
- break;
- case (3):
- mode_lib->mp.ODMMode[k] = dml2_odm_mode_combine_3to1;
- break;
- case (2):
- mode_lib->mp.ODMMode[k] = dml2_odm_mode_combine_2to1;
- break;
- default:
- if (cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4)
- mode_lib->mp.ODMMode[k] = dml2_odm_mode_mso_1to4;
- else if (cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 2)
- mode_lib->mp.ODMMode[k] = dml2_odm_mode_mso_1to2;
- else
- mode_lib->mp.ODMMode[k] = dml2_odm_mode_bypass;
- break;
- }
- }
-
- for (k = 0; k < s->num_active_planes; ++k) {
- mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used;
- mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0;
- dml2_assert(mode_lib->mp.Dppclk[k] > 0);
- }
-
- for (k = 0; k < s->num_active_planes; ++k) {
- unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0;
- dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
- }
-
- mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0;
- mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0;
-
- dml2_assert(mode_lib->mp.Dcfclk > 0);
- dml2_assert(mode_lib->mp.FabricClock > 0);
- dml2_assert(mode_lib->mp.dram_bw_mbps > 0);
- dml2_assert(mode_lib->mp.uclk_freq_mhz > 0);
- dml2_assert(mode_lib->mp.GlobalDPPCLK > 0);
- dml2_assert(mode_lib->mp.Dispclk > 0);
- dml2_assert(mode_lib->mp.DCFCLKDeepSleep > 0);
- dml2_assert(s->SOCCLK > 0);
-
-#ifdef __DML_VBA_DEBUG__
- // dml2_printf_dml_display_cfg_timing(&display_cfg->timing, s->num_active_planes);
- // dml2_printf_dml_display_cfg_plane(&display_cfg->plane, s->num_active_planes);
- // dml2_printf_dml_display_cfg_surface(&display_cfg->surface, s->num_active_planes);
- // dml2_printf_dml_display_cfg_output(&display_cfg->output, s->num_active_planes);
- // dml2_printf_dml_display_cfg_hw_resource(&display_cfg->hw, s->num_active_planes);
-
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
- dml2_printf("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
- dml2_printf("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz);
- dml2_printf("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk);
- for (k = 0; k < s->num_active_planes; ++k) {
- dml2_printf("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]);
- }
- dml2_printf("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK);
- dml2_printf("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: min_clk_table min_fclk_khz = %d\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz);
- dml2_printf("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config));
- for (k = 0; k < mode_lib->mp.num_active_pipes; ++k) {
- dml2_printf("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]);
- dml2_printf("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
- }
-
- for (k = 0; k < s->num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
-#endif
-
- CalculateMaxDETAndMinCompressedBufferSize(
- mode_lib->ip.config_return_buffer_size_in_kbytes,
- mode_lib->ip.config_return_buffer_segment_size_in_kbytes,
- mode_lib->ip.rob_buffer_size_kbytes,
- mode_lib->ip.max_num_dpp,
- display_cfg->overrides.hw.force_nom_det_size_kbytes.enable,
- display_cfg->overrides.hw.force_nom_det_size_kbytes.value,
- mode_lib->ip.dcn_mrq_present,
-
- /* Output */
- &s->MaxTotalDETInKByte,
- &s->NomDETInKByte,
- &s->MinCompressedBufferSizeInKByte);
-
-
- PixelClockAdjustmentForProgressiveToInterlaceUnit(display_cfg, mode_lib->ip.ptoi_supported, s->PixelClockBackEnd);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- CalculateSinglePipeDPPCLKAndSCLThroughput(
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ip.max_dchub_pscl_bw_pix_per_clk,
- mode_lib->ip.max_pscl_lb_bw_pix_per_clk,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- display_cfg->plane_descriptors[k].pixel_format,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_taps,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_taps,
-
- /* Output */
- &mode_lib->mp.PSCL_THROUGHPUT[k],
- &mode_lib->mp.PSCL_THROUGHPUT_CHROMA[k],
- &mode_lib->mp.DPPCLKUsingSingleDPP[k]);
- }
-
- for (k = 0; k < s->num_active_planes; ++k) {
- CalculateBytePerPixelAndBlockSizes(
- display_cfg->plane_descriptors[k].pixel_format,
- display_cfg->plane_descriptors[k].surface.tiling,
- display_cfg->plane_descriptors[k].surface.plane0.pitch,
- display_cfg->plane_descriptors[k].surface.plane1.pitch,
-
- // Output
- &mode_lib->mp.BytePerPixelY[k],
- &mode_lib->mp.BytePerPixelC[k],
- &mode_lib->mp.BytePerPixelInDETY[k],
- &mode_lib->mp.BytePerPixelInDETC[k],
- &mode_lib->mp.Read256BlockHeightY[k],
- &mode_lib->mp.Read256BlockHeightC[k],
- &mode_lib->mp.Read256BlockWidthY[k],
- &mode_lib->mp.Read256BlockWidthC[k],
- &mode_lib->mp.MacroTileHeightY[k],
- &mode_lib->mp.MacroTileHeightC[k],
- &mode_lib->mp.MacroTileWidthY[k],
- &mode_lib->mp.MacroTileWidthC[k],
- &mode_lib->mp.surf_linear128_l[k],
- &mode_lib->mp.surf_linear128_c[k]);
- }
-
- CalculateSwathWidth(
- display_cfg,
- false, // ForceSingleDPP
- s->num_active_planes,
- mode_lib->mp.ODMMode,
- mode_lib->mp.BytePerPixelY,
- mode_lib->mp.BytePerPixelC,
- mode_lib->mp.Read256BlockHeightY,
- mode_lib->mp.Read256BlockHeightC,
- mode_lib->mp.Read256BlockWidthY,
- mode_lib->mp.Read256BlockWidthC,
- mode_lib->mp.surf_linear128_l,
- mode_lib->mp.surf_linear128_c,
- mode_lib->mp.NoOfDPP,
-
- /* Output */
- mode_lib->mp.req_per_swath_ub_l,
- mode_lib->mp.req_per_swath_ub_c,
- mode_lib->mp.SwathWidthSingleDPPY,
- mode_lib->mp.SwathWidthSingleDPPC,
- mode_lib->mp.SwathWidthY,
- mode_lib->mp.SwathWidthC,
- s->dummy_integer_array[0], // unsigned int MaximumSwathHeightY[]
- s->dummy_integer_array[1], // unsigned int MaximumSwathHeightC[]
- mode_lib->mp.swath_width_luma_ub,
- mode_lib->mp.swath_width_chroma_ub);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- mode_lib->mp.cursor_bw[k] = display_cfg->plane_descriptors[k].cursor.num_cursors * display_cfg->plane_descriptors[k].cursor.cursor_width * display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 /
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
- mode_lib->mp.SurfaceReadBandwidthLuma[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- mode_lib->mp.SurfaceReadBandwidthChroma[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- dml2_printf("DML::%s: ReadBandwidthSurfaceLuma[%i] = %fBps\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: ReadBandwidthSurfaceChroma[%i] = %fBps\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
- }
-
- CalculateSwathAndDETConfiguration_params->display_cfg = display_cfg;
- CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSizeInKByte = mode_lib->ip.config_return_buffer_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->MaxTotalDETInKByte = s->MaxTotalDETInKByte;
- CalculateSwathAndDETConfiguration_params->MinCompressedBufferSizeInKByte = s->MinCompressedBufferSizeInKByte;
- CalculateSwathAndDETConfiguration_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
- CalculateSwathAndDETConfiguration_params->pixel_chunk_size_kbytes = mode_lib->ip.pixel_chunk_size_kbytes;
- CalculateSwathAndDETConfiguration_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
- CalculateSwathAndDETConfiguration_params->pixel_chunk_size_kbytes = mode_lib->ip.pixel_chunk_size_kbytes;
-
- CalculateSwathAndDETConfiguration_params->ForceSingleDPP = false;
- CalculateSwathAndDETConfiguration_params->NumberOfActiveSurfaces = s->num_active_planes;
- CalculateSwathAndDETConfiguration_params->nomDETInKByte = s->NomDETInKByte;
- CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSegmentSizeInkByte = mode_lib->ip.config_return_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->CompressedBufferSegmentSizeInkByte = mode_lib->ip.compressed_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->mp.SurfaceReadBandwidthLuma;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->mp.SurfaceReadBandwidthChroma;
- CalculateSwathAndDETConfiguration_params->MaximumSwathWidthLuma = s->dummy_single_array[0];
- CalculateSwathAndDETConfiguration_params->MaximumSwathWidthChroma = s->dummy_single_array[1];
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightY = mode_lib->mp.Read256BlockHeightY;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightC = mode_lib->mp.Read256BlockHeightC;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockWidthY = mode_lib->mp.Read256BlockWidthY;
- CalculateSwathAndDETConfiguration_params->Read256BytesBlockWidthC = mode_lib->mp.Read256BlockWidthC;
- CalculateSwathAndDETConfiguration_params->surf_linear128_l = mode_lib->mp.surf_linear128_l;
- CalculateSwathAndDETConfiguration_params->surf_linear128_c = mode_lib->mp.surf_linear128_c;
- CalculateSwathAndDETConfiguration_params->ODMMode = mode_lib->mp.ODMMode;
- CalculateSwathAndDETConfiguration_params->DPPPerSurface = mode_lib->mp.NoOfDPP;
- CalculateSwathAndDETConfiguration_params->BytePerPixY = mode_lib->mp.BytePerPixelY;
- CalculateSwathAndDETConfiguration_params->BytePerPixC = mode_lib->mp.BytePerPixelC;
- CalculateSwathAndDETConfiguration_params->BytePerPixDETY = mode_lib->mp.BytePerPixelInDETY;
- CalculateSwathAndDETConfiguration_params->BytePerPixDETC = mode_lib->mp.BytePerPixelInDETC;
-
- // output
- CalculateSwathAndDETConfiguration_params->req_per_swath_ub_l = mode_lib->mp.req_per_swath_ub_l;
- CalculateSwathAndDETConfiguration_params->req_per_swath_ub_c = mode_lib->mp.req_per_swath_ub_c;
- CalculateSwathAndDETConfiguration_params->swath_width_luma_ub = s->dummy_long_array[0];
- CalculateSwathAndDETConfiguration_params->swath_width_chroma_ub = s->dummy_long_array[1];
- CalculateSwathAndDETConfiguration_params->SwathWidth = s->dummy_long_array[2];
- CalculateSwathAndDETConfiguration_params->SwathWidthChroma = s->dummy_long_array[3];
- CalculateSwathAndDETConfiguration_params->SwathHeightY = mode_lib->mp.SwathHeightY;
- CalculateSwathAndDETConfiguration_params->SwathHeightC = mode_lib->mp.SwathHeightC;
- CalculateSwathAndDETConfiguration_params->request_size_bytes_luma = mode_lib->mp.request_size_bytes_luma;
- CalculateSwathAndDETConfiguration_params->request_size_bytes_chroma = mode_lib->mp.request_size_bytes_chroma;
- CalculateSwathAndDETConfiguration_params->DETBufferSizeInKByte = mode_lib->mp.DETBufferSizeInKByte;
- CalculateSwathAndDETConfiguration_params->DETBufferSizeY = mode_lib->mp.DETBufferSizeY;
- CalculateSwathAndDETConfiguration_params->DETBufferSizeC = mode_lib->mp.DETBufferSizeC;
- CalculateSwathAndDETConfiguration_params->full_swath_bytes_l = s->full_swath_bytes_l;
- CalculateSwathAndDETConfiguration_params->full_swath_bytes_c = s->full_swath_bytes_c;
- CalculateSwathAndDETConfiguration_params->UnboundedRequestEnabled = &mode_lib->mp.UnboundedRequestEnabled;
- CalculateSwathAndDETConfiguration_params->compbuf_reserved_space_64b = &mode_lib->mp.compbuf_reserved_space_64b;
- CalculateSwathAndDETConfiguration_params->hw_debug5 = &mode_lib->mp.hw_debug5;
- CalculateSwathAndDETConfiguration_params->CompressedBufferSizeInkByte = &mode_lib->mp.CompressedBufferSizeInkByte;
- CalculateSwathAndDETConfiguration_params->ViewportSizeSupportPerSurface = &s->dummy_boolean_array[0][0];
- CalculateSwathAndDETConfiguration_params->ViewportSizeSupport = &s->dummy_boolean[0];
- CalculateSwathAndDETConfiguration_params->funcs = &mode_lib->funcs;
-
- // VBA_DELTA
- // Calculate DET size, swath height here. In VBA, they are calculated in mode check stage
- CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params);
-
- // DSCCLK
- /*
- s->DSCFormatFactor = 0;
- for (k = 0; k < s->num_active_planes; ++k) {
- if ((display_cfg->plane_descriptors[k].stream_index != k) || !cfg_support_info->stream_support_info[display_cfg->plane_descriptors[k].stream_index].dsc_enable) {
- } else {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_420)
- s->DSCFormatFactor = 2;
- else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_444)
- s->DSCFormatFactor = 1;
- else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_n422 ||
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder == dml2_hdmifrl)
- s->DSCFormatFactor = 2;
- else
- s->DSCFormatFactor = 1;
-
- s->PixelClockBackEndFactor = 3.0;
-
- if (mode_lib->mp.ODMMode[k] == dml2_odm_mode_combine_4to1)
- s->PixelClockBackEndFactor = 12.0;
- else if (mode_lib->mp.ODMMode[k] == dml2_odm_mode_combine_3to1)
- s->PixelClockBackEndFactor = 9.0;
- else if (mode_lib->mp.ODMMode[k] == dml2_odm_mode_combine_2to1)
- s->PixelClockBackEndFactor = 6.0;
-
- }
- #ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DSCEnabled = %u\n", __func__, k, cfg_support_info->stream_support_info[display_cfg->plane_descriptors[k].stream_index].dsc_enable);
- dml2_printf("DML::%s: k=%u, BlendingAndTiming = %u\n", __func__, k, display_cfg->plane_descriptors[k].stream_index);
- dml2_printf("DML::%s: k=%u, PixelClockBackEndFactor = %f\n", __func__, k, s->PixelClockBackEndFactor);
- dml2_printf("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
- dml2_printf("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
- dml2_printf("DML::%s: k=%u, DSCCLK = %f\n", __func__, k, mode_lib->mp.DSCCLK[k]);
- #endif
- }
- */
-
- // DSC Delay
- for (k = 0; k < s->num_active_planes; ++k) {
- mode_lib->mp.DSCDelay[k] = DSCDelayRequirement(cfg_support_info->stream_support_info[display_cfg->plane_descriptors[k].stream_index].dsc_enable,
- mode_lib->mp.ODMMode[k],
- mode_lib->ip.maximum_dsc_bits_per_component,
- s->OutputBpp[k],
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
- cfg_support_info->stream_support_info[display_cfg->plane_descriptors[k].stream_index].num_dsc_slices,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_encoder,
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- s->PixelClockBackEnd[k]);
- }
-
- for (k = 0; k < s->num_active_planes; ++k)
- for (j = 0; j < s->num_active_planes; ++j) // NumberOfSurfaces
- if (j != k && display_cfg->plane_descriptors[k].stream_index == j && cfg_support_info->stream_support_info[display_cfg->plane_descriptors[j].stream_index].dsc_enable)
- mode_lib->mp.DSCDelay[k] = mode_lib->mp.DSCDelay[j];
-
- // Prefetch
- if (mode_lib->soc.mall_allocated_for_dcn_mbytes == 0) {
- for (k = 0; k < s->num_active_planes; ++k)
- mode_lib->mp.SurfaceSizeInTheMALL[k] = 0;
- } else {
- CalculateSurfaceSizeInMall(
- display_cfg,
- s->num_active_planes,
- mode_lib->soc.mall_allocated_for_dcn_mbytes,
- mode_lib->mp.BytePerPixelY,
- mode_lib->mp.BytePerPixelC,
- mode_lib->mp.Read256BlockWidthY,
- mode_lib->mp.Read256BlockWidthC,
- mode_lib->mp.Read256BlockHeightY,
- mode_lib->mp.Read256BlockHeightC,
- mode_lib->mp.MacroTileWidthY,
- mode_lib->mp.MacroTileWidthC,
- mode_lib->mp.MacroTileHeightY,
- mode_lib->mp.MacroTileHeightC,
-
- /* Output */
- mode_lib->mp.SurfaceSizeInTheMALL,
- &s->dummy_boolean[0]); /* bool *ExceededMALLSize */
- }
-
- for (k = 0; k < s->num_active_planes; ++k) {
- s->SurfaceParameters[k].PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- s->SurfaceParameters[k].DPPPerSurface = mode_lib->mp.NoOfDPP[k];
- s->SurfaceParameters[k].RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
- s->SurfaceParameters[k].ViewportHeight = display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
- s->SurfaceParameters[k].ViewportHeightC = display_cfg->plane_descriptors[k].composition.viewport.plane1.height;
- s->SurfaceParameters[k].BlockWidth256BytesY = mode_lib->mp.Read256BlockWidthY[k];
- s->SurfaceParameters[k].BlockHeight256BytesY = mode_lib->mp.Read256BlockHeightY[k];
- s->SurfaceParameters[k].BlockWidth256BytesC = mode_lib->mp.Read256BlockWidthC[k];
- s->SurfaceParameters[k].BlockHeight256BytesC = mode_lib->mp.Read256BlockHeightC[k];
- s->SurfaceParameters[k].BlockWidthY = mode_lib->mp.MacroTileWidthY[k];
- s->SurfaceParameters[k].BlockHeightY = mode_lib->mp.MacroTileHeightY[k];
- s->SurfaceParameters[k].BlockWidthC = mode_lib->mp.MacroTileWidthC[k];
- s->SurfaceParameters[k].BlockHeightC = mode_lib->mp.MacroTileHeightC[k];
- s->SurfaceParameters[k].InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
- s->SurfaceParameters[k].HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- s->SurfaceParameters[k].DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- s->SurfaceParameters[k].SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
- s->SurfaceParameters[k].SurfaceTiling = display_cfg->plane_descriptors[k].surface.tiling;
- s->SurfaceParameters[k].BytePerPixelY = mode_lib->mp.BytePerPixelY[k];
- s->SurfaceParameters[k].BytePerPixelC = mode_lib->mp.BytePerPixelC[k];
- s->SurfaceParameters[k].ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
- s->SurfaceParameters[k].VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- s->SurfaceParameters[k].VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- s->SurfaceParameters[k].VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- s->SurfaceParameters[k].VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- s->SurfaceParameters[k].PitchY = display_cfg->plane_descriptors[k].surface.plane0.pitch;
- s->SurfaceParameters[k].PitchC = display_cfg->plane_descriptors[k].surface.plane1.pitch;
- s->SurfaceParameters[k].ViewportStationary = display_cfg->plane_descriptors[k].composition.viewport.stationary;
- s->SurfaceParameters[k].ViewportXStart = display_cfg->plane_descriptors[k].composition.viewport.plane0.x_start;
- s->SurfaceParameters[k].ViewportYStart = display_cfg->plane_descriptors[k].composition.viewport.plane0.y_start;
- s->SurfaceParameters[k].ViewportXStartC = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- s->SurfaceParameters[k].ViewportYStartC = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- s->SurfaceParameters[k].FORCE_ONE_ROW_FOR_FRAME = display_cfg->plane_descriptors[k].overrides.hw.force_one_row_for_frame;
- s->SurfaceParameters[k].SwathHeightY = mode_lib->mp.SwathHeightY[k];
- s->SurfaceParameters[k].SwathHeightC = mode_lib->mp.SwathHeightC[k];
- s->SurfaceParameters[k].DCCMetaPitchY = display_cfg->plane_descriptors[k].surface.dcc.plane0.pitch;
- s->SurfaceParameters[k].DCCMetaPitchC = display_cfg->plane_descriptors[k].surface.dcc.plane1.pitch;
- }
-
- CalculateVMRowAndSwath_params->display_cfg = display_cfg;
- CalculateVMRowAndSwath_params->NumberOfActiveSurfaces = s->num_active_planes;
- CalculateVMRowAndSwath_params->myPipe = s->SurfaceParameters;
- CalculateVMRowAndSwath_params->SurfaceSizeInMALL = mode_lib->mp.SurfaceSizeInTheMALL;
- CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsLuma = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
- CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsChroma = mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma;
- CalculateVMRowAndSwath_params->MALLAllocatedForDCN = mode_lib->soc.mall_allocated_for_dcn_mbytes;
- CalculateVMRowAndSwath_params->SwathWidthY = mode_lib->mp.SwathWidthY;
- CalculateVMRowAndSwath_params->SwathWidthC = mode_lib->mp.SwathWidthC;
- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
- CalculateVMRowAndSwath_params->DCCMetaBufferSizeBytes = mode_lib->ip.dcc_meta_buffer_size_bytes;
- CalculateVMRowAndSwath_params->mrq_present = mode_lib->ip.dcn_mrq_present;
-
- // output
- CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
- CalculateVMRowAndSwath_params->dpte_row_width_luma_ub = mode_lib->mp.dpte_row_width_luma_ub;
- CalculateVMRowAndSwath_params->dpte_row_width_chroma_ub = mode_lib->mp.dpte_row_width_chroma_ub;
- CalculateVMRowAndSwath_params->dpte_row_height_luma = mode_lib->mp.dpte_row_height;
- CalculateVMRowAndSwath_params->dpte_row_height_chroma = mode_lib->mp.dpte_row_height_chroma;
- CalculateVMRowAndSwath_params->dpte_row_height_linear_luma = mode_lib->mp.dpte_row_height_linear;
- CalculateVMRowAndSwath_params->dpte_row_height_linear_chroma = mode_lib->mp.dpte_row_height_linear_chroma;
- CalculateVMRowAndSwath_params->vm_group_bytes = mode_lib->mp.vm_group_bytes;
- CalculateVMRowAndSwath_params->dpte_group_bytes = mode_lib->mp.dpte_group_bytes;
- CalculateVMRowAndSwath_params->PixelPTEReqWidthY = mode_lib->mp.PixelPTEReqWidthY;
- CalculateVMRowAndSwath_params->PixelPTEReqHeightY = mode_lib->mp.PixelPTEReqHeightY;
- CalculateVMRowAndSwath_params->PTERequestSizeY = mode_lib->mp.PTERequestSizeY;
- CalculateVMRowAndSwath_params->PixelPTEReqWidthC = mode_lib->mp.PixelPTEReqWidthC;
- CalculateVMRowAndSwath_params->PixelPTEReqHeightC = mode_lib->mp.PixelPTEReqHeightC;
- CalculateVMRowAndSwath_params->PTERequestSizeC = mode_lib->mp.PTERequestSizeC;
- CalculateVMRowAndSwath_params->vmpg_width_y = s->vmpg_width_y;
- CalculateVMRowAndSwath_params->vmpg_height_y = s->vmpg_height_y;
- CalculateVMRowAndSwath_params->vmpg_width_c = s->vmpg_width_c;
- CalculateVMRowAndSwath_params->vmpg_height_c = s->vmpg_height_c;
- CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_l = mode_lib->mp.dpde0_bytes_per_frame_ub_l;
- CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_c = mode_lib->mp.dpde0_bytes_per_frame_ub_c;
- CalculateVMRowAndSwath_params->PrefetchSourceLinesY = mode_lib->mp.PrefetchSourceLinesY;
- CalculateVMRowAndSwath_params->PrefetchSourceLinesC = mode_lib->mp.PrefetchSourceLinesC;
- CalculateVMRowAndSwath_params->VInitPreFillY = mode_lib->mp.VInitPreFillY;
- CalculateVMRowAndSwath_params->VInitPreFillC = mode_lib->mp.VInitPreFillC;
- CalculateVMRowAndSwath_params->MaxNumSwathY = mode_lib->mp.MaxNumSwathY;
- CalculateVMRowAndSwath_params->MaxNumSwathC = mode_lib->mp.MaxNumSwathC;
- CalculateVMRowAndSwath_params->dpte_row_bw = mode_lib->mp.dpte_row_bw;
- CalculateVMRowAndSwath_params->PixelPTEBytesPerRow = mode_lib->mp.PixelPTEBytesPerRow;
- CalculateVMRowAndSwath_params->vm_bytes = mode_lib->mp.vm_bytes;
- CalculateVMRowAndSwath_params->use_one_row_for_frame = mode_lib->mp.use_one_row_for_frame;
- CalculateVMRowAndSwath_params->use_one_row_for_frame_flip = mode_lib->mp.use_one_row_for_frame_flip;
- CalculateVMRowAndSwath_params->is_using_mall_for_ss = mode_lib->mp.is_using_mall_for_ss;
- CalculateVMRowAndSwath_params->PTE_BUFFER_MODE = mode_lib->mp.PTE_BUFFER_MODE;
- CalculateVMRowAndSwath_params->BIGK_FRAGMENT_SIZE = mode_lib->mp.BIGK_FRAGMENT_SIZE;
- CalculateVMRowAndSwath_params->DCCMetaBufferSizeNotExceeded = s->dummy_boolean_array[1];
- CalculateVMRowAndSwath_params->meta_row_bw = mode_lib->mp.meta_row_bw;
- CalculateVMRowAndSwath_params->meta_row_bytes = mode_lib->mp.meta_row_bytes;
- CalculateVMRowAndSwath_params->meta_req_width_luma = mode_lib->mp.meta_req_width;
- CalculateVMRowAndSwath_params->meta_req_height_luma = mode_lib->mp.meta_req_height;
- CalculateVMRowAndSwath_params->meta_row_width_luma = mode_lib->mp.meta_row_width;
- CalculateVMRowAndSwath_params->meta_row_height_luma = mode_lib->mp.meta_row_height;
- CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_l = mode_lib->mp.meta_pte_bytes_per_frame_ub_l;
- CalculateVMRowAndSwath_params->meta_req_width_chroma = mode_lib->mp.meta_req_width_chroma;
- CalculateVMRowAndSwath_params->meta_row_height_chroma = mode_lib->mp.meta_row_height_chroma;
- CalculateVMRowAndSwath_params->meta_row_width_chroma = mode_lib->mp.meta_row_width_chroma;
- CalculateVMRowAndSwath_params->meta_req_height_chroma = mode_lib->mp.meta_req_height_chroma;
- CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_c = mode_lib->mp.meta_pte_bytes_per_frame_ub_c;
-
- CalculateVMRowAndSwath(&mode_lib->scratch, CalculateVMRowAndSwath_params);
-
- memset(calculate_mcache_setting_params, 0, sizeof(struct dml2_core_calcs_calculate_mcache_setting_params));
- if (mode_lib->soc.mall_allocated_for_dcn_mbytes == 0 || mode_lib->ip.dcn_mrq_present) {
- for (k = 0; k < s->num_active_planes; k++) {
- mode_lib->mp.mall_prefetch_sdp_overhead_factor[k] = 1.0;
- mode_lib->mp.mall_prefetch_dram_overhead_factor[k] = 1.0;
- mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p0[k] = 1.0;
- mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p0[k] = 1.0;
- mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p1[k] = 1.0;
- mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p1[k] = 1.0;
- }
- } else {
- for (k = 0; k < s->num_active_planes; k++) {
- calculate_mcache_setting_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- calculate_mcache_setting_params->num_chans = mode_lib->soc.clk_table.dram_config.channel_count;
- calculate_mcache_setting_params->mem_word_bytes = mode_lib->soc.mem_word_bytes;
- calculate_mcache_setting_params->mcache_size_bytes = mode_lib->soc.mcache_size_bytes;
- calculate_mcache_setting_params->mcache_line_size_bytes = mode_lib->soc.mcache_line_size_bytes;
- calculate_mcache_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
- calculate_mcache_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
-
- calculate_mcache_setting_params->source_format = display_cfg->plane_descriptors[k].pixel_format;
- calculate_mcache_setting_params->surf_vert = dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle);
- calculate_mcache_setting_params->vp_stationary = display_cfg->plane_descriptors[k].composition.viewport.stationary;
- calculate_mcache_setting_params->tiling_mode = display_cfg->plane_descriptors[k].surface.tiling;
- calculate_mcache_setting_params->imall_enable = mode_lib->ip.imall_supported && display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_imall;
-
- calculate_mcache_setting_params->vp_start_x_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.x_start;
- calculate_mcache_setting_params->vp_start_y_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.y_start;
- calculate_mcache_setting_params->full_vp_width_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.width;
- calculate_mcache_setting_params->full_vp_height_l = display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
- calculate_mcache_setting_params->blk_width_l = mode_lib->mp.MacroTileWidthY[k];
- calculate_mcache_setting_params->blk_height_l = mode_lib->mp.MacroTileHeightY[k];
- calculate_mcache_setting_params->vmpg_width_l = s->vmpg_width_y[k];
- calculate_mcache_setting_params->vmpg_height_l = s->vmpg_height_y[k];
- calculate_mcache_setting_params->full_swath_bytes_l = s->full_swath_bytes_l[k];
- calculate_mcache_setting_params->bytes_per_pixel_l = mode_lib->mp.BytePerPixelY[k];
-
- calculate_mcache_setting_params->vp_start_x_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- calculate_mcache_setting_params->vp_start_y_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.y_start;
- calculate_mcache_setting_params->full_vp_width_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.width;
- calculate_mcache_setting_params->full_vp_height_c = display_cfg->plane_descriptors[k].composition.viewport.plane1.height;
- calculate_mcache_setting_params->blk_width_c = mode_lib->mp.MacroTileWidthC[k];
- calculate_mcache_setting_params->blk_height_c = mode_lib->mp.MacroTileHeightC[k];
- calculate_mcache_setting_params->vmpg_width_c = s->vmpg_width_c[k];
- calculate_mcache_setting_params->vmpg_height_c = s->vmpg_height_c[k];
- calculate_mcache_setting_params->full_swath_bytes_c = s->full_swath_bytes_c[k];
- calculate_mcache_setting_params->bytes_per_pixel_c = mode_lib->mp.BytePerPixelC[k];
-
- // output
- calculate_mcache_setting_params->dcc_dram_bw_nom_overhead_factor_l = &mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p0[k];
- calculate_mcache_setting_params->dcc_dram_bw_pref_overhead_factor_l = &mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p0[k];
- calculate_mcache_setting_params->dcc_dram_bw_nom_overhead_factor_c = &mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p1[k];
- calculate_mcache_setting_params->dcc_dram_bw_pref_overhead_factor_c = &mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p1[k];
-
- calculate_mcache_setting_params->num_mcaches_l = &mode_lib->mp.num_mcaches_l[k];
- calculate_mcache_setting_params->mcache_row_bytes_l = &mode_lib->mp.mcache_row_bytes_l[k];
- calculate_mcache_setting_params->mcache_offsets_l = mode_lib->mp.mcache_offsets_l[k];
- calculate_mcache_setting_params->mcache_shift_granularity_l = &mode_lib->mp.mcache_shift_granularity_l[k];
-
- calculate_mcache_setting_params->num_mcaches_c = &mode_lib->mp.num_mcaches_c[k];
- calculate_mcache_setting_params->mcache_row_bytes_c = &mode_lib->mp.mcache_row_bytes_c[k];
- calculate_mcache_setting_params->mcache_offsets_c = mode_lib->mp.mcache_offsets_c[k];
- calculate_mcache_setting_params->mcache_shift_granularity_c = &mode_lib->mp.mcache_shift_granularity_c[k];
-
- calculate_mcache_setting_params->mall_comb_mcache_l = &mode_lib->mp.mall_comb_mcache_l[k];
- calculate_mcache_setting_params->mall_comb_mcache_c = &mode_lib->mp.mall_comb_mcache_c[k];
- calculate_mcache_setting_params->lc_comb_mcache = &mode_lib->mp.lc_comb_mcache[k];
- calculate_mcache_setting(&mode_lib->scratch, calculate_mcache_setting_params);
- }
-
- calculate_mall_bw_overhead_factor(
- mode_lib->mp.mall_prefetch_sdp_overhead_factor,
- mode_lib->mp.mall_prefetch_dram_overhead_factor,
-
- // input
- display_cfg,
- s->num_active_planes);
- }
-
- // Calculate all the bandwidth availabe
- calculate_bandwidth_available(
- mode_lib->mp.avg_bandwidth_available_min,
- mode_lib->mp.avg_bandwidth_available,
- mode_lib->mp.urg_bandwidth_available_min,
- mode_lib->mp.urg_bandwidth_available,
- mode_lib->mp.urg_bandwidth_available_vm_only,
- mode_lib->mp.urg_bandwidth_available_pixel_and_vm,
-
- &mode_lib->soc,
- display_cfg->hostvm_enable,
- mode_lib->mp.Dcfclk,
- mode_lib->mp.FabricClock,
- mode_lib->mp.dram_bw_mbps);
-
-
- calculate_hostvm_inefficiency_factor(
- &s->HostVMInefficiencyFactor,
- &s->HostVMInefficiencyFactorPrefetch,
-
- display_cfg->gpuvm_enable,
- display_cfg->hostvm_enable,
- mode_lib->ip.remote_iommu_outstanding_translations,
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->mp.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
- mode_lib->mp.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
-
- s->TotalDCCActiveDPP = 0;
- s->TotalActiveDPP = 0;
- for (k = 0; k < s->num_active_planes; ++k) {
- s->TotalActiveDPP = s->TotalActiveDPP + mode_lib->mp.NoOfDPP[k];
- if (display_cfg->plane_descriptors[k].surface.dcc.enable)
- s->TotalDCCActiveDPP = s->TotalDCCActiveDPP + mode_lib->mp.NoOfDPP[k];
- }
- // Calculate tdlut schedule related terms
- for (k = 0; k <= s->num_active_planes - 1; k++) {
- calculate_tdlut_setting_params->dispclk_mhz = mode_lib->mp.Dispclk;
- calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
- calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
- calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
- calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
- calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
-
- // output
- calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
- calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
- calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
-
- calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
- }
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
- s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
-
- CalculateExtraLatency(
- display_cfg,
- mode_lib->ip.rob_buffer_size_kbytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
- s->ReorderingBytes,
- mode_lib->mp.Dcfclk,
- mode_lib->mp.FabricClock,
- mode_lib->ip.pixel_chunk_size_kbytes,
- mode_lib->mp.urg_bandwidth_available_min[dml2_core_internal_soc_state_sys_active],
- s->num_active_planes,
- mode_lib->mp.NoOfDPP,
- mode_lib->mp.dpte_group_bytes,
- s->tdlut_bytes_per_group,
- s->HostVMInefficiencyFactor,
- s->HostVMInefficiencyFactorPrefetch,
- mode_lib->soc.hostvm_min_page_size_kbytes,
- mode_lib->soc.qos_parameters.qos_type,
- !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->mp.request_size_bytes_luma,
- mode_lib->mp.request_size_bytes_chroma,
- mode_lib->ip.meta_chunk_size_kbytes,
- mode_lib->ip.dchub_arb_to_ret_delay,
- mode_lib->mp.TripToMemory,
- mode_lib->ip.hostvm_mode,
-
- // output
- &mode_lib->mp.ExtraLatency,
- &mode_lib->mp.ExtraLatency_sr,
- &mode_lib->mp.ExtraLatencyPrefetch);
-
- mode_lib->mp.TCalc = 24.0 / mode_lib->mp.DCFCLKDeepSleep;
-
- for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].stream_index == k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- mode_lib->mp.WritebackDelay[k] =
- mode_lib->soc.qos_parameters.writeback.base_latency_us
- + CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) / mode_lib->mp.Dispclk;
- } else
- mode_lib->mp.WritebackDelay[k] = 0;
-
- for (j = 0; j < s->num_active_planes; ++j) {
- if (display_cfg->plane_descriptors[j].stream_index == k
- && display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.enable == true) {
- mode_lib->mp.WritebackDelay[k] =
- math_max2(
- mode_lib->mp.WritebackDelay[k],
- mode_lib->soc.qos_parameters.writeback.base_latency_us
- + CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[j].stream_index].writeback.scaling_info.input_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) / mode_lib->mp.Dispclk);
- }
- }
- }
- }
-
- for (k = 0; k < s->num_active_planes; ++k)
- for (j = 0; j < s->num_active_planes; ++j)
- if (display_cfg->plane_descriptors[k].stream_index == j)
- mode_lib->mp.WritebackDelay[k] = mode_lib->mp.WritebackDelay[j];
-
- mode_lib->mp.UrgentLatency = CalculateUrgentLatency(
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us,
- mode_lib->soc.do_urgent_latency_adjustment,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz,
- mode_lib->mp.FabricClock,
- mode_lib->mp.uclk_freq_mhz,
- mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
-
- mode_lib->mp.TripToMemory = CalculateTripToMemory(
- mode_lib->mp.UrgentLatency,
- mode_lib->mp.FabricClock,
- mode_lib->mp.uclk_freq_mhz,
- mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
-
- mode_lib->mp.TripToMemory = math_max2(mode_lib->mp.UrgentLatency, mode_lib->mp.TripToMemory);
-
- mode_lib->mp.MetaTripToMemory = CalculateMetaTripToMemory(
- mode_lib->mp.UrgentLatency,
- mode_lib->mp.FabricClock,
- mode_lib->mp.uclk_freq_mhz,
- mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.meta_trip_adder_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- calculate_cursor_req_attributes(
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- display_cfg->plane_descriptors[k].cursor.cursor_bpp,
-
- // output
- &s->cursor_lines_per_chunk[k],
- &s->cursor_bytes_per_line[k],
- &s->cursor_bytes_per_chunk[k],
- &s->cursor_bytes[k]);
-
- bool cursor_not_enough_urgent_latency_hiding = 0;
- double line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
-
- calculate_cursor_urgent_burst_factor(
- mode_lib->ip.cursor_buffer_size,
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- s->cursor_bytes_per_chunk[k],
- s->cursor_lines_per_chunk[k],
- line_time_us,
- mode_lib->mp.UrgentLatency,
-
- // output
- &mode_lib->mp.UrgentBurstFactorCursor[k],
- &cursor_not_enough_urgent_latency_hiding);
- mode_lib->mp.UrgentBurstFactorCursorPre[k] = mode_lib->mp.UrgentBurstFactorCursor[k];
-
- CalculateUrgentBurstFactor(
- &display_cfg->plane_descriptors[k],
- mode_lib->mp.swath_width_luma_ub[k],
- mode_lib->mp.swath_width_chroma_ub[k],
- mode_lib->mp.SwathHeightY[k],
- mode_lib->mp.SwathHeightC[k],
- line_time_us,
- mode_lib->mp.UrgentLatency,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->mp.BytePerPixelInDETY[k],
- mode_lib->mp.BytePerPixelInDETC[k],
- mode_lib->mp.DETBufferSizeY[k],
- mode_lib->mp.DETBufferSizeC[k],
-
- /* output */
- &mode_lib->mp.UrgentBurstFactorLuma[k],
- &mode_lib->mp.UrgentBurstFactorChroma[k],
- &mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
-
- mode_lib->mp.NotEnoughUrgentLatencyHiding[k] = mode_lib->mp.NotEnoughUrgentLatencyHiding[k] || cursor_not_enough_urgent_latency_hiding;
- }
-
- for (k = 0; k < s->num_active_planes; ++k) {
- s->MaxVStartupLines[k] = CalculateMaxVStartup(
- mode_lib->ip.ptoi_supported,
- mode_lib->ip.vblank_nom_default_us,
- &display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing,
- mode_lib->mp.WritebackDelay[k]);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]);
-#endif
- }
-
- s->immediate_flip_required = false;
- for (k = 0; k < s->num_active_planes; ++k) {
- s->immediate_flip_required = s->immediate_flip_required || display_cfg->plane_descriptors[k].immediate_flip;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
-#endif
-
- {
- s->DestinationLineTimesForPrefetchLessThan2 = false;
- s->VRatioPrefetchMoreThanMax = false;
-
- dml2_printf("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
-
- mode_lib->mp.TWait[k] = CalculateTWait(
- display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
- mode_lib->mp.UrgentLatency,
- mode_lib->mp.TripToMemory);
-
- struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
- myPipe->Dppclk = mode_lib->mp.Dppclk[k];
- myPipe->Dispclk = mode_lib->mp.Dispclk;
- myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- myPipe->DCFClkDeepSleep = mode_lib->mp.DCFCLKDeepSleep;
- myPipe->DPPPerSurface = mode_lib->mp.NoOfDPP[k];
- myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
- myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
- myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
- myPipe->BlockWidth256BytesY = mode_lib->mp.Read256BlockWidthY[k];
- myPipe->BlockHeight256BytesY = mode_lib->mp.Read256BlockHeightY[k];
- myPipe->BlockWidth256BytesC = mode_lib->mp.Read256BlockWidthC[k];
- myPipe->BlockHeight256BytesC = mode_lib->mp.Read256BlockHeightC[k];
- myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
- myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
- myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
- myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
- myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- myPipe->ODMMode = mode_lib->mp.ODMMode[k];
- myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
- myPipe->BytePerPixelY = mode_lib->mp.BytePerPixelY[k];
- myPipe->BytePerPixelC = mode_lib->mp.BytePerPixelC[k];
- myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
-#endif
- CalculatePrefetchSchedule_params->display_cfg = display_cfg;
- CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
- CalculatePrefetchSchedule_params->myPipe = myPipe;
- CalculatePrefetchSchedule_params->DSCDelay = mode_lib->mp.DSCDelay[k];
- CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
- CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
- CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
- CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->mp.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
- CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
- CalculatePrefetchSchedule_params->VStartup = s->MaxVStartupLines[k];
- CalculatePrefetchSchedule_params->MaxVStartup = s->MaxVStartupLines[k];
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
- CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
- CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
- CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
- CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
- CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->mp.UrgentLatency;
- CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->mp.ExtraLatencyPrefetch;
- CalculatePrefetchSchedule_params->TCalc = mode_lib->mp.TCalc;
- CalculatePrefetchSchedule_params->vm_bytes = mode_lib->mp.vm_bytes[k];
- CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->mp.PixelPTEBytesPerRow[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->mp.PrefetchSourceLinesY[k];
- CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->mp.VInitPreFillY[k];
- CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->mp.MaxNumSwathY[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->mp.PrefetchSourceLinesC[k];
- CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->mp.VInitPreFillC[k];
- CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->mp.MaxNumSwathC[k];
- CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->mp.swath_width_luma_ub[k];
- CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->mp.swath_width_chroma_ub[k];
- CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->mp.SwathHeightY[k];
- CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->mp.SwathHeightC[k];
- CalculatePrefetchSchedule_params->TWait = mode_lib->mp.TWait[k];
- CalculatePrefetchSchedule_params->Ttrip = mode_lib->mp.TripToMemory;
- CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
- CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
- CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
- CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
- CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
- CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
- CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->mp.meta_row_bytes[k];
- CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->mp.mall_prefetch_sdp_overhead_factor[k];
-
- // output
- CalculatePrefetchSchedule_params->DSTXAfterScaler = &mode_lib->mp.DSTXAfterScaler[k];
- CalculatePrefetchSchedule_params->DSTYAfterScaler = &mode_lib->mp.DSTYAfterScaler[k];
- CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->mp.dst_y_prefetch[k];
- CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->mp.dst_y_per_vm_vblank[k];
- CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->mp.dst_y_per_row_vblank[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->mp.VRatioPrefetchY[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->mp.VRatioPrefetchC[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k];
- CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->mp.NotEnoughTimeForDynamicMetadata[k];
- CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->mp.Tno_bw[k];
- CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->mp.Tno_bw_flip[k];
- CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->mp.prefetch_vmrow_bw[k];
- CalculatePrefetchSchedule_params->Tdmdl_vm = &mode_lib->mp.Tdmdl_vm[k];
- CalculatePrefetchSchedule_params->Tdmdl = &mode_lib->mp.Tdmdl[k];
- CalculatePrefetchSchedule_params->TSetup = &mode_lib->mp.TSetup[k];
- CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
- CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->VUpdateOffsetPix = &mode_lib->mp.VUpdateOffsetPix[k];
- CalculatePrefetchSchedule_params->VUpdateWidthPix = &mode_lib->mp.VUpdateWidthPix[k];
- CalculatePrefetchSchedule_params->VReadyOffsetPix = &mode_lib->mp.VReadyOffsetPix[k];
- CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->mp.prefetch_cursor_bw[k];
-
- mode_lib->mp.NoTimeToPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
-#endif
- mode_lib->mp.VStartupMin[k] = s->MaxVStartupLines[k];
- } // for k
-
- mode_lib->mp.PrefetchModeSupported = true;
- for (k = 0; k < s->num_active_planes; ++k) {
- if (mode_lib->mp.NoTimeToPrefetch[k] == true ||
- mode_lib->mp.NotEnoughTimeForDynamicMetadata[k] ||
- mode_lib->mp.DSTYAfterScaler[k] > 8) {
- dml2_printf("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
- dml2_printf("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]);
- dml2_printf("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]);
- mode_lib->mp.PrefetchModeSupported = false;
- }
- if (mode_lib->mp.dst_y_prefetch[k] < 2)
- s->DestinationLineTimesForPrefetchLessThan2 = true;
-
- if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ ||
- mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__)
- s->VRatioPrefetchMoreThanMax = true;
-
- if (mode_lib->mp.NotEnoughUrgentLatencyHiding[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
- mode_lib->mp.PrefetchModeSupported = false;
- }
- }
-
- if (s->VRatioPrefetchMoreThanMax == true || s->DestinationLineTimesForPrefetchLessThan2 == true) {
- dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
- dml2_printf("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
- mode_lib->mp.PrefetchModeSupported = false;
- }
-
- dml2_printf("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__,
- mode_lib->mp.PrefetchModeSupported ? "" : "NOT ", CalculatePrefetchSchedule_params->VStartup);
-
- // Prefetch schedule OK, now check prefetch bw
- if (mode_lib->mp.PrefetchModeSupported == true) {
- for (k = 0; k < s->num_active_planes; ++k) {
- double line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- CalculateUrgentBurstFactor(
- &display_cfg->plane_descriptors[k],
- mode_lib->mp.swath_width_luma_ub[k],
- mode_lib->mp.swath_width_chroma_ub[k],
- mode_lib->mp.SwathHeightY[k],
- mode_lib->mp.SwathHeightC[k],
- line_time_us,
- mode_lib->mp.UrgentLatency,
- mode_lib->mp.VRatioPrefetchY[k],
- mode_lib->mp.VRatioPrefetchC[k],
- mode_lib->mp.BytePerPixelInDETY[k],
- mode_lib->mp.BytePerPixelInDETC[k],
- mode_lib->mp.DETBufferSizeY[k],
- mode_lib->mp.DETBufferSizeC[k],
- /* Output */
- &mode_lib->mp.UrgentBurstFactorLumaPre[k],
- &mode_lib->mp.UrgentBurstFactorChromaPre[k],
- &mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]);
-
- dml2_printf("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
-
- dml2_printf("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%0u ReadBandwidthSurfaceLuma=%f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%0u ReadBandwidthSurfaceChroma=%f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
- dml2_printf("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]);
- dml2_printf("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]);
-#endif
- }
-
- for (k = 0; k <= s->num_active_planes - 1; k++)
- mode_lib->mp.final_flip_bw[k] = 0;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- mode_lib->mp.urg_vactive_bandwidth_required,
- mode_lib->mp.urg_bandwidth_required,
- mode_lib->mp.non_urg_bandwidth_required,
-
- // Input
- display_cfg,
- 0, // inc_flip_bw
- s->num_active_planes,
- mode_lib->mp.NoOfDPP,
- mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p0,
- mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p1,
- mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p0,
- mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p1,
- mode_lib->mp.mall_prefetch_sdp_overhead_factor,
- mode_lib->mp.mall_prefetch_dram_overhead_factor,
- mode_lib->mp.SurfaceReadBandwidthLuma,
- mode_lib->mp.SurfaceReadBandwidthChroma,
- mode_lib->mp.RequiredPrefetchPixelDataBWLuma,
- mode_lib->mp.RequiredPrefetchPixelDataBWChroma,
- mode_lib->mp.cursor_bw,
- mode_lib->mp.dpte_row_bw,
- mode_lib->mp.meta_row_bw,
- mode_lib->mp.prefetch_cursor_bw,
- mode_lib->mp.prefetch_vmrow_bw,
- mode_lib->mp.final_flip_bw,
- mode_lib->mp.UrgentBurstFactorLuma,
- mode_lib->mp.UrgentBurstFactorChroma,
- mode_lib->mp.UrgentBurstFactorCursor,
- mode_lib->mp.UrgentBurstFactorLumaPre,
- mode_lib->mp.UrgentBurstFactorChromaPre,
- mode_lib->mp.UrgentBurstFactorCursorPre);
-
- // Check urg peak bandwidth against available urg bw
- // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
- check_urgent_bandwidth_support(
- &mode_lib->mp.FractionOfUrgentBandwidth, // double* frac_urg_bandwidth
- &mode_lib->mp.FractionOfUrgentBandwidthMALL, // double* frac_urg_bandwidth_mall
- &s->dummy_boolean[1], // vactive bw ok
- &mode_lib->mp.PrefetchModeSupported, // prefetch bw ok
-
- mode_lib->soc.mall_allocated_for_dcn_mbytes,
- mode_lib->mp.non_urg_bandwidth_required,
- mode_lib->mp.urg_vactive_bandwidth_required,
- mode_lib->mp.urg_bandwidth_required,
- mode_lib->mp.urg_bandwidth_available);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- if (mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
- mode_lib->mp.PrefetchModeSupported = false;
- }
- }
- } // prefetch schedule ok
-
- // Prefetch schedule and prefetch bw ok, now check flip bw
- if (mode_lib->mp.PrefetchModeSupported == true) { // prefetch schedule and prefetch bw ok, now check flip bw
-
- mode_lib->mp.BandwidthAvailableForImmediateFlip =
- get_bandwidth_available_for_immediate_flip(dml2_core_internal_soc_state_sys_active,
- mode_lib->mp.urg_bandwidth_required, // no flip
- mode_lib->mp.urg_bandwidth_available);
- mode_lib->mp.TotImmediateFlipBytes = 0;
- for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].immediate_flip) {
- s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
- s->HostVMInefficiencyFactor,
- mode_lib->mp.vm_bytes[k],
- mode_lib->mp.PixelPTEBytesPerRow[k],
- mode_lib->mp.meta_row_bytes[k]);
- } else {
- s->per_pipe_flip_bytes[k] = 0;
- }
- mode_lib->mp.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->mp.NoOfDPP[k];
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k = %u\n", __func__, k);
- dml2_printf("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes);
-#endif
- }
- for (k = 0; k < s->num_active_planes; ++k) {
- CalculateFlipSchedule(
- &mode_lib->scratch,
- display_cfg->plane_descriptors[k].immediate_flip,
- 0, // use_lb_flip_bw
- s->HostVMInefficiencyFactor,
- s->Tvm_trips_flip[k],
- s->Tr0_trips_flip[k],
- s->Tvm_trips_flip_rounded[k],
- s->Tr0_trips_flip_rounded[k],
- display_cfg->gpuvm_enable,
- mode_lib->mp.vm_bytes[k],
- mode_lib->mp.PixelPTEBytesPerRow[k],
- mode_lib->mp.BandwidthAvailableForImmediateFlip,
- mode_lib->mp.TotImmediateFlipBytes,
- display_cfg->plane_descriptors[k].pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->mp.Tno_bw[k],
- mode_lib->mp.dpte_row_height[k],
- mode_lib->mp.dpte_row_height_chroma[k],
- mode_lib->mp.use_one_row_for_frame_flip[k],
- mode_lib->ip.max_flip_time_us,
- s->per_pipe_flip_bytes[k],
- mode_lib->mp.meta_row_bytes[k],
- mode_lib->mp.meta_row_height[k],
- mode_lib->mp.meta_row_height_chroma[k],
- mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
-
- // Output
- &mode_lib->mp.dst_y_per_vm_flip[k],
- &mode_lib->mp.dst_y_per_row_flip[k],
- &mode_lib->mp.final_flip_bw[k],
- &mode_lib->mp.ImmediateFlipSupportedForPipe[k]);
- }
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- s->dummy_bw,
- mode_lib->mp.urg_bandwidth_required_flip,
- mode_lib->mp.non_urg_bandwidth_required_flip,
-
- // Input
- display_cfg,
- 1, // inc_flip_bw
- s->num_active_planes,
- mode_lib->mp.NoOfDPP,
- mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p0,
- mode_lib->mp.dcc_dram_bw_nom_overhead_factor_p1,
- mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p0,
- mode_lib->mp.dcc_dram_bw_pref_overhead_factor_p1,
- mode_lib->mp.mall_prefetch_sdp_overhead_factor,
- mode_lib->mp.mall_prefetch_dram_overhead_factor,
- mode_lib->mp.SurfaceReadBandwidthLuma,
- mode_lib->mp.SurfaceReadBandwidthChroma,
- mode_lib->mp.RequiredPrefetchPixelDataBWLuma,
- mode_lib->mp.RequiredPrefetchPixelDataBWChroma,
- mode_lib->mp.cursor_bw,
- mode_lib->mp.dpte_row_bw,
- mode_lib->mp.meta_row_bw,
- mode_lib->mp.prefetch_cursor_bw,
- mode_lib->mp.prefetch_vmrow_bw,
- mode_lib->mp.final_flip_bw,
- mode_lib->mp.UrgentBurstFactorLuma,
- mode_lib->mp.UrgentBurstFactorChroma,
- mode_lib->mp.UrgentBurstFactorCursor,
- mode_lib->mp.UrgentBurstFactorLumaPre,
- mode_lib->mp.UrgentBurstFactorChromaPre,
- mode_lib->mp.UrgentBurstFactorCursorPre);
-
- calculate_immediate_flip_bandwidth_support(
- &mode_lib->mp.FractionOfUrgentBandwidthImmediateFlip, // double* frac_urg_bandwidth_flip
- &mode_lib->mp.ImmediateFlipSupported, // bool* flip_bandwidth_support_ok
-
- dml2_core_internal_soc_state_sys_active,
- mode_lib->mp.urg_bandwidth_required_flip,
- mode_lib->mp.non_urg_bandwidth_required_flip,
- mode_lib->mp.urg_bandwidth_available);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->plane_descriptors[k].immediate_flip && mode_lib->mp.ImmediateFlipSupportedForPipe[k] == false) {
- mode_lib->mp.ImmediateFlipSupported = false;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k);
-#endif
- }
- }
- } else { // flip or prefetch not support
- mode_lib->mp.ImmediateFlipSupported = false;
- }
-
- // consider flip support is okay if the flip bw is ok or (when user does't require a iflip and there is no host vm)
- bool must_support_iflip = display_cfg->hostvm_enable || s->immediate_flip_required;
- mode_lib->mp.PrefetchAndImmediateFlipSupported = (mode_lib->mp.PrefetchModeSupported == true && (!must_support_iflip || mode_lib->mp.ImmediateFlipSupported));
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported);
- for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable);
- dml2_printf("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported);
- dml2_printf("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported);
-#endif
- dml2_printf("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]);
- }
-
- for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
-
- if (!mode_lib->mp.PrefetchAndImmediateFlipSupported) {
- dml2_printf("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__);
- } else {
- dml2_printf("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__);
-
- // DCC Configuration
- for (k = 0; k < s->num_active_planes; ++k) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k);
-#endif
- CalculateDCCConfiguration(
- display_cfg->plane_descriptors[k].surface.dcc.enable,
- display_cfg->overrides.dcc_programming_assumes_scan_direction_unknown,
- display_cfg->plane_descriptors[k].pixel_format,
- display_cfg->plane_descriptors[k].surface.plane0.width,
- display_cfg->plane_descriptors[k].surface.plane1.width,
- display_cfg->plane_descriptors[k].surface.plane0.height,
- display_cfg->plane_descriptors[k].surface.plane1.height,
- s->NomDETInKByte,
- mode_lib->mp.Read256BlockHeightY[k],
- mode_lib->mp.Read256BlockHeightC[k],
- display_cfg->plane_descriptors[k].surface.tiling,
- mode_lib->mp.BytePerPixelY[k],
- mode_lib->mp.BytePerPixelC[k],
- mode_lib->mp.BytePerPixelInDETY[k],
- mode_lib->mp.BytePerPixelInDETC[k],
- display_cfg->plane_descriptors[k].composition.rotation_angle,
-
- /* Output */
- &mode_lib->mp.RequestLuma[k],
- &mode_lib->mp.RequestChroma[k],
- &mode_lib->mp.DCCYMaxUncompressedBlock[k],
- &mode_lib->mp.DCCCMaxUncompressedBlock[k],
- &mode_lib->mp.DCCYMaxCompressedBlock[k],
- &mode_lib->mp.DCCCMaxCompressedBlock[k],
- &mode_lib->mp.DCCYIndependentBlock[k],
- &mode_lib->mp.DCCCIndependentBlock[k]);
- }
-
- //Watermarks and NB P-State/DRAM Clock Change Support
- s->mmSOCParameters.UrgentLatency = mode_lib->mp.UrgentLatency;
- s->mmSOCParameters.ExtraLatency = mode_lib->mp.ExtraLatency;
- s->mmSOCParameters.ExtraLatency_sr = mode_lib->mp.ExtraLatency_sr;
- s->mmSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
- s->mmSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
- s->mmSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
- s->mmSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
- s->mmSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
- s->mmSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
- s->mmSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
- s->mmSOCParameters.USRRetrainingLatency = 0; //0; //FIXME_STAGE2
- s->mmSOCParameters.SMNLatency = 0; //mode_lib->soc.smn_latency_us; //FIXME_STAGE2
-
- CalculateWatermarks_params->display_cfg = display_cfg;
- CalculateWatermarks_params->USRRetrainingRequired = false/*FIXME_STAGE2 was: mode_lib->ms.policy.USRRetrainingRequired, no new dml2 replacement*/;
- CalculateWatermarks_params->NumberOfActiveSurfaces = s->num_active_planes;
- CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
- CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
- CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
- CalculateWatermarks_params->DCFCLK = mode_lib->mp.Dcfclk;
- CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
- CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
- CalculateWatermarks_params->dpte_group_bytes = mode_lib->mp.dpte_group_bytes;
- CalculateWatermarks_params->mmSOCParameters = s->mmSOCParameters;
- CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
- CalculateWatermarks_params->SOCCLK = s->SOCCLK;
- CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->mp.DCFCLKDeepSleep;
- CalculateWatermarks_params->DETBufferSizeY = mode_lib->mp.DETBufferSizeY;
- CalculateWatermarks_params->DETBufferSizeC = mode_lib->mp.DETBufferSizeC;
- CalculateWatermarks_params->SwathHeightY = mode_lib->mp.SwathHeightY;
- CalculateWatermarks_params->SwathHeightC = mode_lib->mp.SwathHeightC;
- //CalculateWatermarks_params->LBBitPerPixel = 57; //FIXME_STAGE2
- CalculateWatermarks_params->SwathWidthY = mode_lib->mp.SwathWidthY;
- CalculateWatermarks_params->SwathWidthC = mode_lib->mp.SwathWidthC;
- CalculateWatermarks_params->BytePerPixelDETY = mode_lib->mp.BytePerPixelInDETY;
- CalculateWatermarks_params->BytePerPixelDETC = mode_lib->mp.BytePerPixelInDETC;
- CalculateWatermarks_params->DSTXAfterScaler = mode_lib->mp.DSTXAfterScaler;
- CalculateWatermarks_params->DSTYAfterScaler = mode_lib->mp.DSTYAfterScaler;
- CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->mp.UnboundedRequestEnabled;
- CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->mp.CompressedBufferSizeInkByte;
- CalculateWatermarks_params->meta_row_height_l = mode_lib->mp.meta_row_height;
- CalculateWatermarks_params->meta_row_height_c = mode_lib->mp.meta_row_height_chroma;
-
- // Output
- CalculateWatermarks_params->Watermark = &mode_lib->mp.Watermark;
- CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->mp.DRAMClockChangeSupport;
- CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->mp.global_dram_clock_change_supported;
- CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = mode_lib->mp.MaxActiveDRAMClockChangeLatencySupported;
- CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->mp.SubViewportLinesNeededInMALL;
- CalculateWatermarks_params->FCLKChangeSupport = mode_lib->mp.FCLKChangeSupport;
- CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->mp.global_fclk_change_supported;
- CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &mode_lib->mp.MaxActiveFCLKChangeLatencySupported;
- CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->mp.USRRetrainingSupport;
- CalculateWatermarks_params->VActiveLatencyHidingMargin = 0;
-
- CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
-
- for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k] = math_max2(0, mode_lib->mp.VStartupMin[k] * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) - mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark);
- mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k] = math_max2(0, mode_lib->mp.VStartupMin[k] * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) - mode_lib->mp.Watermark.WritebackFCLKChangeWatermark);
- } else {
- mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k] = 0;
- mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k] = 0;
- }
- }
-
- dml2_printf("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
- dml2_printf("DML::%s: DEBUG PixelClock = %d kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
-
- //Display Pipeline Delivery Time in Prefetch, Groups
- CalculatePixelDeliveryTimes(
- display_cfg,
- cfg_support_info,
- s->num_active_planes,
- mode_lib->mp.VRatioPrefetchY,
- mode_lib->mp.VRatioPrefetchC,
- mode_lib->mp.swath_width_luma_ub,
- mode_lib->mp.swath_width_chroma_ub,
- mode_lib->mp.PSCL_THROUGHPUT,
- mode_lib->mp.PSCL_THROUGHPUT_CHROMA,
- mode_lib->mp.Dppclk,
- mode_lib->mp.BytePerPixelC,
- mode_lib->mp.req_per_swath_ub_l,
- mode_lib->mp.req_per_swath_ub_c,
-
- /* Output */
- mode_lib->mp.DisplayPipeLineDeliveryTimeLuma,
- mode_lib->mp.DisplayPipeLineDeliveryTimeChroma,
- mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch,
- mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch,
- mode_lib->mp.DisplayPipeRequestDeliveryTimeLuma,
- mode_lib->mp.DisplayPipeRequestDeliveryTimeChroma,
- mode_lib->mp.DisplayPipeRequestDeliveryTimeLumaPrefetch,
- mode_lib->mp.DisplayPipeRequestDeliveryTimeChromaPrefetch);
-
- CalculateMetaAndPTETimes_params->scratch = &mode_lib->scratch;
- CalculateMetaAndPTETimes_params->display_cfg = display_cfg;
- CalculateMetaAndPTETimes_params->NumberOfActiveSurfaces = s->num_active_planes;
- CalculateMetaAndPTETimes_params->use_one_row_for_frame = mode_lib->mp.use_one_row_for_frame;
- CalculateMetaAndPTETimes_params->dst_y_per_row_vblank = mode_lib->mp.dst_y_per_row_vblank;
- CalculateMetaAndPTETimes_params->dst_y_per_row_flip = mode_lib->mp.dst_y_per_row_flip;
- CalculateMetaAndPTETimes_params->BytePerPixelY = mode_lib->mp.BytePerPixelY;
- CalculateMetaAndPTETimes_params->BytePerPixelC = mode_lib->mp.BytePerPixelC;
- CalculateMetaAndPTETimes_params->dpte_row_height = mode_lib->mp.dpte_row_height;
- CalculateMetaAndPTETimes_params->dpte_row_height_chroma = mode_lib->mp.dpte_row_height_chroma;
- CalculateMetaAndPTETimes_params->dpte_group_bytes = mode_lib->mp.dpte_group_bytes;
- CalculateMetaAndPTETimes_params->PTERequestSizeY = mode_lib->mp.PTERequestSizeY;
- CalculateMetaAndPTETimes_params->PTERequestSizeC = mode_lib->mp.PTERequestSizeC;
- CalculateMetaAndPTETimes_params->PixelPTEReqWidthY = mode_lib->mp.PixelPTEReqWidthY;
- CalculateMetaAndPTETimes_params->PixelPTEReqHeightY = mode_lib->mp.PixelPTEReqHeightY;
- CalculateMetaAndPTETimes_params->PixelPTEReqWidthC = mode_lib->mp.PixelPTEReqWidthC;
- CalculateMetaAndPTETimes_params->PixelPTEReqHeightC = mode_lib->mp.PixelPTEReqHeightC;
- CalculateMetaAndPTETimes_params->dpte_row_width_luma_ub = mode_lib->mp.dpte_row_width_luma_ub;
- CalculateMetaAndPTETimes_params->dpte_row_width_chroma_ub = mode_lib->mp.dpte_row_width_chroma_ub;
- CalculateMetaAndPTETimes_params->tdlut_groups_per_2row_ub = s->tdlut_groups_per_2row_ub;
- CalculateMetaAndPTETimes_params->mrq_present = mode_lib->ip.dcn_mrq_present;
-
- CalculateMetaAndPTETimes_params->MetaChunkSize = mode_lib->ip.meta_chunk_size_kbytes;
- CalculateMetaAndPTETimes_params->MinMetaChunkSizeBytes = mode_lib->ip.min_meta_chunk_size_bytes;
- CalculateMetaAndPTETimes_params->meta_row_width = mode_lib->mp.meta_row_width;
- CalculateMetaAndPTETimes_params->meta_row_width_chroma = mode_lib->mp.meta_row_width_chroma;
- CalculateMetaAndPTETimes_params->meta_row_height = mode_lib->mp.meta_row_height;
- CalculateMetaAndPTETimes_params->meta_row_height_chroma = mode_lib->mp.meta_row_height_chroma;
- CalculateMetaAndPTETimes_params->meta_req_width = mode_lib->mp.meta_req_width;
- CalculateMetaAndPTETimes_params->meta_req_width_chroma = mode_lib->mp.meta_req_width_chroma;
- CalculateMetaAndPTETimes_params->meta_req_height = mode_lib->mp.meta_req_height;
- CalculateMetaAndPTETimes_params->meta_req_height_chroma = mode_lib->mp.meta_req_height_chroma;
-
- CalculateMetaAndPTETimes_params->time_per_tdlut_group = mode_lib->mp.time_per_tdlut_group;
- CalculateMetaAndPTETimes_params->DST_Y_PER_PTE_ROW_NOM_L = mode_lib->mp.DST_Y_PER_PTE_ROW_NOM_L;
- CalculateMetaAndPTETimes_params->DST_Y_PER_PTE_ROW_NOM_C = mode_lib->mp.DST_Y_PER_PTE_ROW_NOM_C;
- CalculateMetaAndPTETimes_params->time_per_pte_group_nom_luma = mode_lib->mp.time_per_pte_group_nom_luma;
- CalculateMetaAndPTETimes_params->time_per_pte_group_vblank_luma = mode_lib->mp.time_per_pte_group_vblank_luma;
- CalculateMetaAndPTETimes_params->time_per_pte_group_flip_luma = mode_lib->mp.time_per_pte_group_flip_luma;
- CalculateMetaAndPTETimes_params->time_per_pte_group_nom_chroma = mode_lib->mp.time_per_pte_group_nom_chroma;
- CalculateMetaAndPTETimes_params->time_per_pte_group_vblank_chroma = mode_lib->mp.time_per_pte_group_vblank_chroma;
- CalculateMetaAndPTETimes_params->time_per_pte_group_flip_chroma = mode_lib->mp.time_per_pte_group_flip_chroma;
- CalculateMetaAndPTETimes_params->DST_Y_PER_META_ROW_NOM_L = mode_lib->mp.DST_Y_PER_META_ROW_NOM_L;
- CalculateMetaAndPTETimes_params->DST_Y_PER_META_ROW_NOM_C = mode_lib->mp.DST_Y_PER_META_ROW_NOM_C;
- CalculateMetaAndPTETimes_params->TimePerMetaChunkNominal = mode_lib->mp.TimePerMetaChunkNominal;
- CalculateMetaAndPTETimes_params->TimePerChromaMetaChunkNominal = mode_lib->mp.TimePerChromaMetaChunkNominal;
- CalculateMetaAndPTETimes_params->TimePerMetaChunkVBlank = mode_lib->mp.TimePerMetaChunkVBlank;
- CalculateMetaAndPTETimes_params->TimePerChromaMetaChunkVBlank = mode_lib->mp.TimePerChromaMetaChunkVBlank;
- CalculateMetaAndPTETimes_params->TimePerMetaChunkFlip = mode_lib->mp.TimePerMetaChunkFlip;
- CalculateMetaAndPTETimes_params->TimePerChromaMetaChunkFlip = mode_lib->mp.TimePerChromaMetaChunkFlip;
-
- CalculateMetaAndPTETimes(CalculateMetaAndPTETimes_params);
-
- CalculateVMGroupAndRequestTimes(
- display_cfg,
- s->num_active_planes,
- mode_lib->mp.BytePerPixelC,
- mode_lib->mp.dst_y_per_vm_vblank,
- mode_lib->mp.dst_y_per_vm_flip,
- mode_lib->mp.dpte_row_width_luma_ub,
- mode_lib->mp.dpte_row_width_chroma_ub,
- mode_lib->mp.vm_group_bytes,
- mode_lib->mp.dpde0_bytes_per_frame_ub_l,
- mode_lib->mp.dpde0_bytes_per_frame_ub_c,
- s->tdlut_pte_bytes_per_frame,
- mode_lib->mp.meta_pte_bytes_per_frame_ub_l,
- mode_lib->mp.meta_pte_bytes_per_frame_ub_c,
- mode_lib->ip.dcn_mrq_present,
-
- /* Output */
- mode_lib->mp.TimePerVMGroupVBlank,
- mode_lib->mp.TimePerVMGroupFlip,
- mode_lib->mp.TimePerVMRequestVBlank,
- mode_lib->mp.TimePerVMRequestFlip);
-
- // VStartup Adjustment
- for (k = 0; k < s->num_active_planes; ++k) {
-
- mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.TWait[k] + mode_lib->mp.ExtraLatency;
- if (!display_cfg->plane_descriptors[k].dynamic_meta_data.enable)
- mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.TCalc + mode_lib->mp.MinTTUVBlank[k];
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
-#endif
- s->Tvstartup_margin = (s->MaxVStartupLines[k] - mode_lib->mp.VStartupMin[k]) * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.MinTTUVBlank[k] + s->Tvstartup_margin;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin);
- dml2_printf("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
-#endif
-
- mode_lib->mp.Tdmdl[k] = mode_lib->mp.Tdmdl[k] + s->Tvstartup_margin;
- if (display_cfg->plane_descriptors[k].dynamic_meta_data.enable && mode_lib->ip.dynamic_metadata_vm_enabled) {
- mode_lib->mp.Tdmdl_vm[k] = mode_lib->mp.Tdmdl_vm[k] + s->Tvstartup_margin;
- }
-
- bool isInterlaceTiming = (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced && !mode_lib->ip.ptoi_supported);
-
- // The actual positioning of the vstartup
- mode_lib->mp.VStartup[k] = (isInterlaceTiming ? (2 * s->MaxVStartupLines[k]) : s->MaxVStartupLines[k]);
-
- s->dlg_vblank_start = ((isInterlaceTiming ? math_floor2((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch) / 2.0, 1.0) :
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total) - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
- s->LSetup = math_floor2(4.0 * mode_lib->mp.TSetup[k] / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)), 1.0) / 4.0;
- s->blank_lines_remaining = (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active) - mode_lib->mp.VStartup[k];
-
- if (s->blank_lines_remaining < 0) {
- dml2_printf("ERROR: Vstartup is larger than vblank!?\n");
- s->blank_lines_remaining = 0;
- DML2_ASSERT(0);
- }
- mode_lib->mp.MIN_DST_Y_NEXT_START[k] = s->dlg_vblank_start + s->blank_lines_remaining + s->LSetup;
-
- // debug only
- if (((mode_lib->mp.VUpdateOffsetPix[k] + mode_lib->mp.VUpdateWidthPix[k] + (double) mode_lib->mp.VReadyOffsetPix[k]) / display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) <=
- (isInterlaceTiming ?
- math_floor2((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch - mode_lib->mp.VStartup[k]) / 2.0, 1.0) :
- (int)(display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch - mode_lib->mp.VStartup[k]))) {
- mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k] = true;
- } else {
- mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k] = false;
- }
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]);
- dml2_printf("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]);
- dml2_printf("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]);
- dml2_printf("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, HTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total);
- dml2_printf("DML::%s: k=%u, VTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
- dml2_printf("DML::%s: k=%u, VActive = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active);
- dml2_printf("DML::%s: k=%u, VFrontPorch = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
- dml2_printf("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]);
- dml2_printf("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]);
- dml2_printf("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]);
-#endif
- }
-
- //Maximum Bandwidth Used
- s->TotalWRBandwidth = 0;
- s->WRBandwidth = 0;
- for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_32) {
- s->WRBandwidth = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width /
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 4;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- s->WRBandwidth = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width /
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 8;
- }
- s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth;
- }
-
- mode_lib->mp.TotalDataReadBandwidth = 0;
- for (k = 0; k < s->num_active_planes; ++k) {
- mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.SurfaceReadBandwidthLuma[k] + mode_lib->mp.SurfaceReadBandwidthChroma[k];
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
-#endif
- }
-
- CalculateStutterEfficiency_params->display_cfg = display_cfg;
- CalculateStutterEfficiency_params->CompressedBufferSizeInkByte = mode_lib->mp.CompressedBufferSizeInkByte;
- CalculateStutterEfficiency_params->UnboundedRequestEnabled = mode_lib->mp.UnboundedRequestEnabled;
- CalculateStutterEfficiency_params->MetaFIFOSizeInKEntries = mode_lib->ip.meta_fifo_size_in_kentries;
- CalculateStutterEfficiency_params->ZeroSizeBufferEntries = mode_lib->ip.zero_size_buffer_entries;
- CalculateStutterEfficiency_params->PixelChunkSizeInKByte = mode_lib->ip.pixel_chunk_size_kbytes;
- CalculateStutterEfficiency_params->NumberOfActiveSurfaces = s->num_active_planes;
- CalculateStutterEfficiency_params->ROBBufferSizeInKByte = mode_lib->ip.rob_buffer_size_kbytes;
- CalculateStutterEfficiency_params->TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth;
- CalculateStutterEfficiency_params->DCFCLK = mode_lib->mp.Dcfclk;
- CalculateStutterEfficiency_params->ReturnBW = mode_lib->mp.urg_bandwidth_available_min[dml2_core_internal_soc_state_sys_active];
- CalculateStutterEfficiency_params->CompbufReservedSpace64B = mode_lib->mp.compbuf_reserved_space_64b;
- CalculateStutterEfficiency_params->CompbufReservedSpaceZs = mode_lib->ip.compbuf_reserved_space_zs;
- CalculateStutterEfficiency_params->SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
- CalculateStutterEfficiency_params->SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
- CalculateStutterEfficiency_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
- CalculateStutterEfficiency_params->StutterEnterPlusExitWatermark = mode_lib->mp.Watermark.StutterEnterPlusExitWatermark;
- CalculateStutterEfficiency_params->Z8StutterEnterPlusExitWatermark = mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark;
- CalculateStutterEfficiency_params->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
- CalculateStutterEfficiency_params->MinTTUVBlank = mode_lib->mp.MinTTUVBlank;
- CalculateStutterEfficiency_params->DPPPerSurface = mode_lib->mp.NoOfDPP;
- CalculateStutterEfficiency_params->DETBufferSizeY = mode_lib->mp.DETBufferSizeY;
- CalculateStutterEfficiency_params->BytePerPixelY = mode_lib->mp.BytePerPixelY;
- CalculateStutterEfficiency_params->BytePerPixelDETY = mode_lib->mp.BytePerPixelInDETY;
- CalculateStutterEfficiency_params->SwathWidthY = mode_lib->mp.SwathWidthY;
- CalculateStutterEfficiency_params->SwathHeightY = mode_lib->mp.SwathHeightY;
- CalculateStutterEfficiency_params->SwathHeightC = mode_lib->mp.SwathHeightC;
- CalculateStutterEfficiency_params->BlockHeight256BytesY = mode_lib->mp.Read256BlockHeightY;
- CalculateStutterEfficiency_params->BlockWidth256BytesY = mode_lib->mp.Read256BlockWidthY;
- CalculateStutterEfficiency_params->BlockHeight256BytesC = mode_lib->mp.Read256BlockHeightC;
- CalculateStutterEfficiency_params->BlockWidth256BytesC = mode_lib->mp.Read256BlockWidthC;
- CalculateStutterEfficiency_params->DCCYMaxUncompressedBlock = mode_lib->mp.DCCYMaxUncompressedBlock;
- CalculateStutterEfficiency_params->DCCCMaxUncompressedBlock = mode_lib->mp.DCCCMaxUncompressedBlock;
- CalculateStutterEfficiency_params->ReadBandwidthSurfaceLuma = mode_lib->mp.SurfaceReadBandwidthLuma;
- CalculateStutterEfficiency_params->ReadBandwidthSurfaceChroma = mode_lib->mp.SurfaceReadBandwidthChroma;
- CalculateStutterEfficiency_params->dpte_row_bw = mode_lib->mp.dpte_row_bw;
- CalculateStutterEfficiency_params->meta_row_bw = mode_lib->mp.meta_row_bw;
- CalculateStutterEfficiency_params->rob_alloc_compressed = mode_lib->ip.dcn_mrq_present;
-
- // output
- CalculateStutterEfficiency_params->StutterEfficiencyNotIncludingVBlank = &mode_lib->mp.StutterEfficiencyNotIncludingVBlank;
- CalculateStutterEfficiency_params->StutterEfficiency = &mode_lib->mp.StutterEfficiency;
- CalculateStutterEfficiency_params->NumberOfStutterBurstsPerFrame = &mode_lib->mp.NumberOfStutterBurstsPerFrame;
- CalculateStutterEfficiency_params->Z8StutterEfficiencyNotIncludingVBlank = &mode_lib->mp.Z8StutterEfficiencyNotIncludingVBlank;
- CalculateStutterEfficiency_params->Z8StutterEfficiency = &mode_lib->mp.Z8StutterEfficiency;
- CalculateStutterEfficiency_params->Z8NumberOfStutterBurstsPerFrame = &mode_lib->mp.Z8NumberOfStutterBurstsPerFrame;
- CalculateStutterEfficiency_params->StutterPeriod = &mode_lib->mp.StutterPeriod;
- CalculateStutterEfficiency_params->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = &mode_lib->mp.DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE;
-
- // Stutter Efficiency
- CalculateStutterEfficiency(&mode_lib->scratch, CalculateStutterEfficiency_params);
-
-#ifdef __DML_VBA_ALLOW_DELTA__
- // Calculate z8 stutter eff assuming 0 reserved space
- CalculateStutterEfficiency_params->CompbufReservedSpace64B = 0;
- CalculateStutterEfficiency_params->CompbufReservedSpaceZs = 0;
-
- CalculateStutterEfficiency_params->Z8StutterEfficiencyNotIncludingVBlank = &mode_lib->mp.Z8StutterEfficiencyNotIncludingVBlankBestCase;
- CalculateStutterEfficiency_params->Z8StutterEfficiency = &mode_lib->mp.Z8StutterEfficiencyBestCase;
- CalculateStutterEfficiency_params->Z8NumberOfStutterBurstsPerFrame = &mode_lib->mp.Z8NumberOfStutterBurstsPerFrameBestCase;
- CalculateStutterEfficiency_params->StutterPeriod = &mode_lib->mp.StutterPeriodBestCase;
-
- // Stutter Efficiency
- CalculateStutterEfficiency(&mode_lib->scratch, CalculateStutterEfficiency_params);
-#else
- mode_lib->mp.Z8StutterEfficiencyNotIncludingVBlankBestCase = mode_lib->mp.Z8StutterEfficiencyNotIncludingVBlank;
- mode_lib->mp.Z8StutterEfficiencyBestCase = mode_lib->mp.Z8StutterEfficiency;
- mode_lib->mp.Z8NumberOfStutterBurstsPerFrameBestCase = mode_lib->mp.Z8NumberOfStutterBurstsPerFrame;
- mode_lib->mp.StutterPeriodBestCase = mode_lib->mp.StutterPeriod;
-#endif
- } // PrefetchAndImmediateFlipSupported
-
- const long min_return_uclk_cycles = 83;
- const long min_return_fclk_cycles = 75;
- double max_fclk_mhz = min_clk_table->max_clocks_khz.fclk / 1000.0;
- double max_uclk_mhz = mode_lib->soc.clk_table.uclk.clk_values_khz[mode_lib->soc.clk_table.uclk.num_clk_values - 1] / 1000.0;
- double hard_minimum_dcfclk_mhz = (double)min_clk_table->dram_bw_table.entries[0].min_dcfclk_khz / 1000.0;
- double min_return_latency_in_DCFCLK_cycles = (min_return_uclk_cycles / max_uclk_mhz + min_return_fclk_cycles / max_fclk_mhz) * hard_minimum_dcfclk_mhz;
- mode_lib->mp.min_return_latency_in_dcfclk = (unsigned int)min_return_latency_in_DCFCLK_cycles;
- mode_lib->mp.dcfclk_deep_sleep_hysteresis = (unsigned int)math_max2(32, (double)mode_lib->ip.pixel_chunk_size_kbytes * 1024 * 3 / 4 / 64 - min_return_latency_in_DCFCLK_cycles);
- mode_lib->mp.dcfclk_deep_sleep_hysteresis = 255;
- DML2_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz);
- dml2_printf("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz);
- dml2_printf("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz);
- dml2_printf("DML::%s: min_return_uclk_cycles = %d\n", __func__, min_return_uclk_cycles);
- dml2_printf("DML::%s: min_return_fclk_cycles = %d\n", __func__, min_return_fclk_cycles);
- dml2_printf("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis);
- dml2_printf("DML::%s: --- END --- \n", __func__);
-#endif
-
- return (in_out_params->mode_lib->mp.PrefetchAndImmediateFlipSupported);
-}
-
-static bool dml_is_dual_plane(enum dml2_source_format_class source_format)
-{
- bool ret_val = 0;
-
- if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha))
- ret_val = 1;
-
- return ret_val;
-}
-
-static unsigned int dml_get_plane_idx(const struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int pipe_idx)
-{
- unsigned int plane_idx = mode_lib->mp.pipe_plane[pipe_idx];
- return plane_idx;
-}
-
-static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_watermark_regs *wm_regs)
-{
- double refclk_freq_in_mhz = (display_cfg->overrides.hw.dlg_ref_clk_mhz > 0) ? (double)display_cfg->overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
-
- wm_regs->fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
- wm_regs->sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
- wm_regs->sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
- wm_regs->temp_read_or_ppt = 0;
- wm_regs->uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
- wm_regs->urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
-}
-
-static unsigned int log_and_substract_if_non_zero(unsigned int a, unsigned int subtrahend)
-{
- if (a == 0)
- return 0;
-
- return (math_log2_approx(a) - subtrahend);
-}
-
-void dml2_core_shared_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs, const struct dml2_get_cursor_dlg_reg *p)
-{
- int dst_x_offset = (int)((p->cursor_x_position + (p->cursor_stereo_en == 0 ? 0 : math_max2(p->cursor_primary_offset, p->cursor_secondary_offset)) -
- (p->cursor_hotspot_x * (p->cursor_2x_magnify == 0 ? 1 : 2))) * p->dlg_refclk_mhz / p->pixel_rate_mhz / p->hratio);
- cursor_dlg_regs->dst_x_offset = (unsigned int)((dst_x_offset > 0) ? dst_x_offset : 0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position);
- dml2_printf("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz);
- dml2_printf("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset);
-#endif
-
- cursor_dlg_regs->chunk_hdl_adjust = 3;
- cursor_dlg_regs->dst_y_offset = 0;
-
- cursor_dlg_regs->qos_level_fixed = 8;
- cursor_dlg_regs->qos_ramp_disable = 0;
-}
-
-static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
- const struct dml2_display_cfg *display_cfg,
- const struct dml2_core_internal_display_mode_lib *mode_lib,
- unsigned int pipe_idx)
-{
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx);
-
- unsigned int plane_idx = dml_get_plane_idx(mode_lib, pipe_idx);
- enum dml2_source_format_class source_format = display_cfg->plane_descriptors[plane_idx].pixel_format;
- enum dml2_swizzle_mode sw_mode = display_cfg->plane_descriptors[plane_idx].surface.tiling;
- bool dual_plane = dml_is_dual_plane((enum dml2_source_format_class)(source_format));
-
- unsigned int pixel_chunk_bytes = 0;
- unsigned int min_pixel_chunk_bytes = 0;
- unsigned int dpte_group_bytes = 0;
- unsigned int mpte_group_bytes = 0;
-
- unsigned int p1_pixel_chunk_bytes = 0;
- unsigned int p1_min_pixel_chunk_bytes = 0;
- unsigned int p1_dpte_group_bytes = 0;
- unsigned int p1_mpte_group_bytes = 0;
-
- pixel_chunk_bytes = (unsigned int)(mode_lib->ip.pixel_chunk_size_kbytes * 1024);
- min_pixel_chunk_bytes = (unsigned int)(mode_lib->ip.min_pixel_chunk_size_bytes);
-
- if (pixel_chunk_bytes == 64 * 1024)
- min_pixel_chunk_bytes = 0;
-
- dpte_group_bytes = (unsigned int)(mode_lib->mp.dpte_group_bytes[mode_lib->mp.pipe_plane[pipe_idx]]);
- mpte_group_bytes = (unsigned int)(mode_lib->mp.vm_group_bytes[mode_lib->mp.pipe_plane[pipe_idx]]);
-
- p1_pixel_chunk_bytes = pixel_chunk_bytes;
- p1_min_pixel_chunk_bytes = min_pixel_chunk_bytes;
- p1_dpte_group_bytes = dpte_group_bytes;
- p1_mpte_group_bytes = mpte_group_bytes;
-
- if (source_format == dml2_rgbe_alpha)
- p1_pixel_chunk_bytes = (unsigned int)(mode_lib->ip.alpha_pixel_chunk_size_kbytes * 1024);
-
- rq_regs->unbounded_request_enabled = mode_lib->mp.UnboundedRequestEnabled;
- rq_regs->rq_regs_l.chunk_size = log_and_substract_if_non_zero(pixel_chunk_bytes, 10);
- rq_regs->rq_regs_c.chunk_size = log_and_substract_if_non_zero(p1_pixel_chunk_bytes, 10);
-
- if (min_pixel_chunk_bytes == 0)
- rq_regs->rq_regs_l.min_chunk_size = 0;
- else
- rq_regs->rq_regs_l.min_chunk_size = log_and_substract_if_non_zero(min_pixel_chunk_bytes, 8 - 1);
-
- if (p1_min_pixel_chunk_bytes == 0)
- rq_regs->rq_regs_c.min_chunk_size = 0;
- else
- rq_regs->rq_regs_c.min_chunk_size = log_and_substract_if_non_zero(p1_min_pixel_chunk_bytes, 8 - 1);
-
- rq_regs->rq_regs_l.dpte_group_size = log_and_substract_if_non_zero(dpte_group_bytes, 6);
- rq_regs->rq_regs_l.mpte_group_size = log_and_substract_if_non_zero(mpte_group_bytes, 6);
- rq_regs->rq_regs_c.dpte_group_size = log_and_substract_if_non_zero(p1_dpte_group_bytes, 6);
- rq_regs->rq_regs_c.mpte_group_size = log_and_substract_if_non_zero(p1_mpte_group_bytes, 6);
-
- unsigned int detile_buf_size_in_bytes = (unsigned int)(mode_lib->mp.DETBufferSizeInKByte[mode_lib->mp.pipe_plane[pipe_idx]] * 1024);
- unsigned int detile_buf_plane1_addr = 0;
-
- if (sw_mode == dml2_sw_linear && display_cfg->gpuvm_enable) {
- unsigned int p0_pte_row_height_linear = (unsigned int)(mode_lib->mp.dpte_row_height_linear[mode_lib->mp.pipe_plane[pipe_idx]]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear);
-#endif
- DML2_ASSERT(p0_pte_row_height_linear >= 8);
-
- rq_regs->rq_regs_l.pte_row_height_linear = math_log2_approx(p0_pte_row_height_linear) - 3;
- if (dual_plane) {
- unsigned int p1_pte_row_height_linear = (unsigned int)(mode_lib->mp.dpte_row_height_linear_chroma[mode_lib->mp.pipe_plane[pipe_idx]]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear);
-#endif
- if (sw_mode == dml2_sw_linear) {
- DML2_ASSERT(p1_pte_row_height_linear >= 8);
- }
-
- rq_regs->rq_regs_c.pte_row_height_linear = math_log2_approx(p1_pte_row_height_linear) - 3;
- }
- } else {
- rq_regs->rq_regs_l.pte_row_height_linear = 0;
- rq_regs->rq_regs_c.pte_row_height_linear = 0;
- }
-
- rq_regs->rq_regs_l.swath_height = log_and_substract_if_non_zero(mode_lib->mp.SwathHeightY[mode_lib->mp.pipe_plane[pipe_idx]], 0);
- rq_regs->rq_regs_c.swath_height = log_and_substract_if_non_zero(mode_lib->mp.SwathHeightC[mode_lib->mp.pipe_plane[pipe_idx]], 0);
-
- // FIXME_DCN4, programming guide has dGPU condition
- if (pixel_chunk_bytes >= 32 * 1024 || (dual_plane && p1_pixel_chunk_bytes >= 32 * 1024)) { //32kb
- rq_regs->drq_expansion_mode = 0;
- } else {
- rq_regs->drq_expansion_mode = 2;
- }
- rq_regs->prq_expansion_mode = 1;
- rq_regs->crq_expansion_mode = 1;
- rq_regs->mrq_expansion_mode = 1;
-
- double stored_swath_l_bytes = mode_lib->mp.DETBufferSizeY[mode_lib->mp.pipe_plane[pipe_idx]];
- double stored_swath_c_bytes = mode_lib->mp.DETBufferSizeC[mode_lib->mp.pipe_plane[pipe_idx]];
- bool is_phantom_pipe = dml_get_is_phantom_pipe(display_cfg, mode_lib, pipe_idx);
-
- // Note: detile_buf_plane1_addr is in unit of 1KB
- if (dual_plane) {
- if (is_phantom_pipe) {
- detile_buf_plane1_addr = (unsigned int)((1024.0 * 1024.0) / 2.0 / 1024.0); // half to chroma
- } else {
- if (stored_swath_l_bytes / stored_swath_c_bytes <= 1.5) {
- detile_buf_plane1_addr = (unsigned int)(detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
-#endif
- } else {
- detile_buf_plane1_addr = (unsigned int)(dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0), 1024, 0) / 1024.0); // 2/3 to luma
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
-#endif
- }
- }
- }
- rq_regs->plane1_base_address = detile_buf_plane1_addr;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
- dml2_printf("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
- dml2_printf("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
- dml2_printf("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
-#endif
- //dml2_printf_rq_regs_st(rq_regs);
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
-}
-
-static void rq_dlg_get_dlg_reg(
- struct dml2_core_internal_scratch *s,
- struct dml2_display_dlg_regs *disp_dlg_regs,
- struct dml2_display_ttu_regs *disp_ttu_regs,
- const struct dml2_display_cfg *display_cfg,
- const struct dml2_core_internal_display_mode_lib *mode_lib,
- const unsigned int pipe_idx)
-{
- struct dml2_core_shared_rq_dlg_get_dlg_reg_locals *l = &s->rq_dlg_get_dlg_reg_locals;
-
- memset(l, 0, sizeof(struct dml2_core_shared_rq_dlg_get_dlg_reg_locals));
-
- dml2_printf("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
-
- l->plane_idx = dml_get_plane_idx(mode_lib, pipe_idx);
- dml2_assert(l->plane_idx < DML2_MAX_PLANES);
-
- l->source_format = dml2_444_8;
- l->dual_plane = dml_is_dual_plane(l->source_format);
- l->odm_mode = dml2_odm_mode_bypass;
-
- l->htotal = 0;
- l->hactive = 0;
- l->hblank_end = 0;
- l->vblank_end = 0;
- l->interlaced = false;
- l->pclk_freq_in_mhz = 0.0;
- l->refclk_freq_in_mhz = (display_cfg->overrides.hw.dlg_ref_clk_mhz > 0) ? (double)display_cfg->overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
- l->ref_freq_to_pix_freq = 0.0;
-
- if (l->plane_idx < DML2_MAX_PLANES) {
-
- l->timing = &display_cfg->stream_descriptors[display_cfg->plane_descriptors[l->plane_idx].stream_index].timing;
- l->source_format = display_cfg->plane_descriptors[l->plane_idx].pixel_format;
- l->odm_mode = mode_lib->mp.ODMMode[l->plane_idx];
-
- l->htotal = l->timing->h_total;
- l->hactive = l->timing->h_active;
- l->hblank_end = l->timing->h_blank_end;
- l->vblank_end = l->timing->v_blank_end;
- l->interlaced = l->timing->interlaced;
- l->pclk_freq_in_mhz = (double)l->timing->pixel_clock_khz / 1000;
- l->ref_freq_to_pix_freq = l->refclk_freq_in_mhz / l->pclk_freq_in_mhz;
-
- dml2_printf("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx);
- dml2_printf("DML_DLG: %s: htotal = %d\n", __func__, l->htotal);
- dml2_printf("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz);
- dml2_printf("DML_DLG: %s: soc.refclk_mhz = %3.2f\n", __func__, mode_lib->soc.dchub_refclk_mhz);
- dml2_printf("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced);
-
- DML2_ASSERT(l->refclk_freq_in_mhz != 0);
- DML2_ASSERT(l->pclk_freq_in_mhz != 0);
- DML2_ASSERT(l->ref_freq_to_pix_freq < 4.0);
-
- // Need to figure out which side of odm combine we're in
- // Assume the pipe instance under the same plane is in order
-
- if (l->odm_mode == dml2_odm_mode_bypass) {
- disp_dlg_regs->refcyc_h_blank_end = (unsigned int)((double)l->hblank_end * l->ref_freq_to_pix_freq);
- } else if (l->odm_mode == dml2_odm_mode_combine_2to1 || l->odm_mode == dml2_odm_mode_combine_3to1 || l->odm_mode == dml2_odm_mode_combine_4to1) {
- // find out how many pipe are in this plane
- l->num_active_pipes = mode_lib->mp.num_active_pipes;
- l->first_pipe_idx_in_plane = DML2_MAX_PLANES;
- l->pipe_idx_in_combine = 0; // pipe index within the plane
- l->odm_combine_factor = 2;
-
- if (l->odm_mode == dml2_odm_mode_combine_3to1)
- l->odm_combine_factor = 3;
- else if (l->odm_mode == dml2_odm_mode_combine_4to1)
- l->odm_combine_factor = 4;
-
- for (unsigned int i = 0; i < l->num_active_pipes; i++) {
- if (dml_get_plane_idx(mode_lib, i) == l->plane_idx) {
- if (i < l->first_pipe_idx_in_plane) {
- l->first_pipe_idx_in_plane = i;
- }
- }
- }
- l->pipe_idx_in_combine = pipe_idx - l->first_pipe_idx_in_plane; // DML assumes the pipes in the same plane will have continuous indexing (i.e. plane 0 use pipe 0, 1, and plane 1 uses pipe 2, 3, etc.)
-
- disp_dlg_regs->refcyc_h_blank_end = (unsigned int)(((double)l->hblank_end + (double)l->pipe_idx_in_combine * (double)l->hactive / (double)l->odm_combine_factor) * l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx);
- dml2_printf("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane);
- dml2_printf("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine);
- dml2_printf("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor);
- }
- dml2_printf("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end);
-
- DML2_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13));
-
- disp_dlg_regs->ref_freq_to_pix_freq = (unsigned int)(l->ref_freq_to_pix_freq * math_pow(2, 19));
- disp_dlg_regs->refcyc_per_htotal = (unsigned int)(l->ref_freq_to_pix_freq * (double)l->htotal * math_pow(2, 8));
- disp_dlg_regs->dlg_vblank_end = l->interlaced ? (l->vblank_end / 2) : l->vblank_end; // 15 bits
-
- l->min_ttu_vblank = mode_lib->mp.MinTTUVBlank[mode_lib->mp.pipe_plane[pipe_idx]];
- l->min_dst_y_next_start = (unsigned int)(mode_lib->mp.MIN_DST_Y_NEXT_START[mode_lib->mp.pipe_plane[pipe_idx]]);
-
- dml2_printf("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank);
- dml2_printf("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
-
- l->vready_after_vcount0 = (unsigned int)(mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[mode_lib->mp.pipe_plane[pipe_idx]]);
- disp_dlg_regs->vready_after_vcount0 = l->vready_after_vcount0;
-
- dml2_printf("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
-
- l->dst_x_after_scaler = (unsigned int)(mode_lib->mp.DSTXAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
- l->dst_y_after_scaler = (unsigned int)(mode_lib->mp.DSTYAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
-
- dml2_printf("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler);
- dml2_printf("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler);
-
- l->dst_y_prefetch = mode_lib->mp.dst_y_prefetch[mode_lib->mp.pipe_plane[pipe_idx]];
- l->dst_y_per_vm_vblank = mode_lib->mp.dst_y_per_vm_vblank[mode_lib->mp.pipe_plane[pipe_idx]];
- l->dst_y_per_row_vblank = mode_lib->mp.dst_y_per_row_vblank[mode_lib->mp.pipe_plane[pipe_idx]];
- l->dst_y_per_vm_flip = mode_lib->mp.dst_y_per_vm_flip[mode_lib->mp.pipe_plane[pipe_idx]];
- l->dst_y_per_row_flip = mode_lib->mp.dst_y_per_row_flip[mode_lib->mp.pipe_plane[pipe_idx]];
-
- l->max_dst_y_per_vm_vblank = 32.0; //U5.2
- l->max_dst_y_per_row_vblank = 16.0; //U4.2
-
- // magic!
- if (l->htotal <= 75) {
- l->max_dst_y_per_vm_vblank = 100.0;
- l->max_dst_y_per_row_vblank = 100.0;
- }
-
- dml2_printf("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank);
-
- DML2_ASSERT(l->dst_y_per_vm_vblank < l->max_dst_y_per_vm_vblank);
- DML2_ASSERT(l->dst_y_per_row_vblank < l->max_dst_y_per_row_vblank);
- if (l->dst_y_prefetch > 0 && l->dst_y_per_vm_vblank > 0 && l->dst_y_per_row_vblank > 0) {
- DML2_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank));
- }
-
- l->vratio_pre_l = mode_lib->mp.VRatioPrefetchY[mode_lib->mp.pipe_plane[pipe_idx]];
- l->vratio_pre_c = mode_lib->mp.VRatioPrefetchC[mode_lib->mp.pipe_plane[pipe_idx]];
-
- dml2_printf("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l);
- dml2_printf("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c);
-
- // Active
- l->refcyc_per_line_delivery_pre_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_line_delivery_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
-
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l);
-
- l->refcyc_per_line_delivery_pre_c = 0.0;
- l->refcyc_per_line_delivery_c = 0.0;
-
- if (l->dual_plane) {
- l->refcyc_per_line_delivery_pre_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_line_delivery_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
-
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c);
- }
-
- disp_dlg_regs->refcyc_per_vm_dmdata = (unsigned int)(mode_lib->mp.Tdmdl_vm[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
- disp_dlg_regs->dmdata_dl_delta = (unsigned int)(mode_lib->mp.Tdmdl[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
-
- l->refcyc_per_req_delivery_pre_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_req_delivery_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
-
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l);
-
- l->refcyc_per_req_delivery_pre_c = 0.0;
- l->refcyc_per_req_delivery_c = 0.0;
- if (l->dual_plane) {
- l->refcyc_per_req_delivery_pre_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_req_delivery_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
-
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c);
- }
-
- // TTU - Cursor
- DML2_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1);
-
- // Assign to register structures
- disp_dlg_regs->min_dst_y_next_start = (unsigned int)((double)l->min_dst_y_next_start * math_pow(2, 2));
- DML2_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18));
-
- disp_dlg_regs->dst_y_after_scaler = l->dst_y_after_scaler; // in terms of line
- disp_dlg_regs->refcyc_x_after_scaler = (unsigned int)((double)l->dst_x_after_scaler * l->ref_freq_to_pix_freq); // in terms of refclk
- disp_dlg_regs->dst_y_prefetch = (unsigned int)(l->dst_y_prefetch * math_pow(2, 2));
- disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int)(l->dst_y_per_vm_vblank * math_pow(2, 2));
- disp_dlg_regs->dst_y_per_row_vblank = (unsigned int)(l->dst_y_per_row_vblank * math_pow(2, 2));
- disp_dlg_regs->dst_y_per_vm_flip = (unsigned int)(l->dst_y_per_vm_flip * math_pow(2, 2));
- disp_dlg_regs->dst_y_per_row_flip = (unsigned int)(l->dst_y_per_row_flip * math_pow(2, 2));
-
- disp_dlg_regs->vratio_prefetch = (unsigned int)(l->vratio_pre_l * math_pow(2, 19));
- disp_dlg_regs->vratio_prefetch_c = (unsigned int)(l->vratio_pre_c * math_pow(2, 19));
-
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
-
- disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(mode_lib->mp.TimePerVMGroupVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
- disp_dlg_regs->refcyc_per_vm_group_flip = (unsigned int)(mode_lib->mp.TimePerVMGroupFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
- disp_dlg_regs->refcyc_per_vm_req_vblank = (unsigned int)(mode_lib->mp.TimePerVMRequestVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz * math_pow(2, 10));
- disp_dlg_regs->refcyc_per_vm_req_flip = (unsigned int)(mode_lib->mp.TimePerVMRequestFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz * math_pow(2, 10));
-
- l->dst_y_per_pte_row_nom_l = mode_lib->mp.DST_Y_PER_PTE_ROW_NOM_L[mode_lib->mp.pipe_plane[pipe_idx]];
- l->dst_y_per_pte_row_nom_c = mode_lib->mp.DST_Y_PER_PTE_ROW_NOM_C[mode_lib->mp.pipe_plane[pipe_idx]];
- l->refcyc_per_pte_group_nom_l = mode_lib->mp.time_per_pte_group_nom_luma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_pte_group_nom_c = mode_lib->mp.time_per_pte_group_nom_chroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_pte_group_vblank_l = mode_lib->mp.time_per_pte_group_vblank_luma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_pte_group_vblank_c = mode_lib->mp.time_per_pte_group_vblank_chroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_pte_group_flip_l = mode_lib->mp.time_per_pte_group_flip_luma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_pte_group_flip_c = mode_lib->mp.time_per_pte_group_flip_chroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_tdlut_group = mode_lib->mp.time_per_tdlut_group[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
-
- disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int)(l->dst_y_per_pte_row_nom_l * math_pow(2, 2));
- disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int)(l->dst_y_per_pte_row_nom_c * math_pow(2, 2));
-
- disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int)(l->refcyc_per_pte_group_nom_l);
- disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int)(l->refcyc_per_pte_group_nom_c);
- disp_dlg_regs->refcyc_per_pte_group_vblank_l = (unsigned int)(l->refcyc_per_pte_group_vblank_l);
- disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int)(l->refcyc_per_pte_group_vblank_c);
- disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int)(l->refcyc_per_pte_group_flip_l);
- disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int)(l->refcyc_per_pte_group_flip_c);
- disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int)math_floor2(l->refcyc_per_line_delivery_pre_l, 1);
- disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int)math_floor2(l->refcyc_per_line_delivery_l, 1);
- disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int)math_floor2(l->refcyc_per_line_delivery_pre_c, 1);
- disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int)math_floor2(l->refcyc_per_line_delivery_c, 1);
-
- l->dst_y_per_meta_row_nom_l = mode_lib->mp.DST_Y_PER_META_ROW_NOM_L[mode_lib->mp.pipe_plane[pipe_idx]];
- l->dst_y_per_meta_row_nom_c = mode_lib->mp.DST_Y_PER_META_ROW_NOM_C[mode_lib->mp.pipe_plane[pipe_idx]];
- l->refcyc_per_meta_chunk_nom_l = mode_lib->mp.TimePerMetaChunkNominal[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_meta_chunk_nom_c = mode_lib->mp.TimePerChromaMetaChunkNominal[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_meta_chunk_vblank_l = mode_lib->mp.TimePerMetaChunkVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_meta_chunk_vblank_c = mode_lib->mp.TimePerChromaMetaChunkVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_meta_chunk_flip_l = mode_lib->mp.TimePerMetaChunkFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- l->refcyc_per_meta_chunk_flip_c = mode_lib->mp.TimePerChromaMetaChunkFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
-
- disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int)(l->dst_y_per_meta_row_nom_l * math_pow(2, 2));
- disp_dlg_regs->dst_y_per_meta_row_nom_c = (unsigned int)(l->dst_y_per_meta_row_nom_c * math_pow(2, 2));
- disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int)(l->refcyc_per_meta_chunk_nom_l);
- disp_dlg_regs->refcyc_per_meta_chunk_nom_c = (unsigned int)(l->refcyc_per_meta_chunk_nom_c);
- disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = (unsigned int)(l->refcyc_per_meta_chunk_vblank_l);
- disp_dlg_regs->refcyc_per_meta_chunk_vblank_c = (unsigned int)(l->refcyc_per_meta_chunk_vblank_c);
- disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int)(l->refcyc_per_meta_chunk_flip_l);
- disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int)(l->refcyc_per_meta_chunk_flip_c);
-
- disp_dlg_regs->refcyc_per_tdlut_group = (unsigned int)(l->refcyc_per_tdlut_group);
- disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
-
- disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int)(l->refcyc_per_req_delivery_pre_l * math_pow(2, 10));
- disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int)(l->refcyc_per_req_delivery_l * math_pow(2, 10));
- disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int)(l->refcyc_per_req_delivery_pre_c * math_pow(2, 10));
- disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int)(l->refcyc_per_req_delivery_c * math_pow(2, 10));
- disp_ttu_regs->qos_level_low_wm = 0;
-
- disp_ttu_regs->qos_level_high_wm = (unsigned int)(4.0 * (double)l->htotal * l->ref_freq_to_pix_freq);
-
- disp_ttu_regs->qos_level_flip = 14;
- disp_ttu_regs->qos_level_fixed_l = 8;
- disp_ttu_regs->qos_level_fixed_c = 8;
- disp_ttu_regs->qos_ramp_disable_l = 0;
- disp_ttu_regs->qos_ramp_disable_c = 0;
- disp_ttu_regs->min_ttu_vblank = (unsigned int)(l->min_ttu_vblank * l->refclk_freq_in_mhz);
-
- // CHECK for HW registers' range, DML2_ASSERT or clamp
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13));
- if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)math_pow(2, 23))
- disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(math_pow(2, 23) - 1);
-
- if (disp_dlg_regs->refcyc_per_vm_group_flip >= (unsigned int)math_pow(2, 23))
- disp_dlg_regs->refcyc_per_vm_group_flip = (unsigned int)(math_pow(2, 23) - 1);
-
- if (disp_dlg_regs->refcyc_per_vm_req_vblank >= (unsigned int)math_pow(2, 23))
- disp_dlg_regs->refcyc_per_vm_req_vblank = (unsigned int)(math_pow(2, 23) - 1);
-
- if (disp_dlg_regs->refcyc_per_vm_req_flip >= (unsigned int)math_pow(2, 23))
- disp_dlg_regs->refcyc_per_vm_req_flip = (unsigned int)(math_pow(2, 23) - 1);
-
-
- DML2_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
- DML2_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
-
- if (disp_dlg_regs->dst_y_per_pte_row_nom_l >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1);
- l->dst_y_per_pte_row_nom_l = (unsigned int)math_pow(2, 17) - 1;
- }
- if (l->dual_plane) {
- if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1);
- l->dst_y_per_pte_row_nom_c = (unsigned int)math_pow(2, 17) - 1;
- }
- }
-
- if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int)math_pow(2, 23))
- disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int)(math_pow(2, 23) - 1);
- if (l->dual_plane) {
- if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int)math_pow(2, 23))
- disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int)(math_pow(2, 23) - 1);
- }
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13));
- if (l->dual_plane) {
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13));
- }
-
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24));
-
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
-
- }
-}
-
-static void rq_dlg_get_arb_params(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *arb_param)
-{
- arb_param->max_req_outstanding = mode_lib->soc.max_outstanding_reqs;
- arb_param->min_req_outstanding = mode_lib->soc.max_outstanding_reqs; // turn off the sat level feature if this set to max
- arb_param->sdpif_request_rate_limit = (3 * mode_lib->ip.words_per_channel * mode_lib->soc.clk_table.dram_config.channel_count) / 4;
- arb_param->sdpif_request_rate_limit = arb_param->sdpif_request_rate_limit < 96 ? 96 : arb_param->sdpif_request_rate_limit;
- arb_param->sat_level_us = 60;
- arb_param->hvm_max_qos_commit_threshold = 0xf;
- arb_param->hvm_min_req_outstand_commit_threshold = 0xa;
- arb_param->compbuf_reserved_space_kbytes = mode_lib->mp.compbuf_reserved_space_64b * 64 / 1024;
- arb_param->allow_sdpif_rate_limit_when_cstate_req = mode_lib->mp.hw_debug5;
- arb_param->dcfclk_deep_sleep_hysteresis = mode_lib->mp.dcfclk_deep_sleep_hysteresis;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding);
- dml2_printf("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit);
- dml2_printf("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes);
- dml2_printf("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis);
-#endif
-
-}
-
-void dml2_core_shared_get_watermarks(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_watermark_regs *out)
-{
- rq_dlg_get_wm_regs(display_cfg, mode_lib, out);
-}
-
-void dml2_core_shared_get_arb_params(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *out)
-{
- rq_dlg_get_arb_params(mode_lib, out);
-}
-
-void dml2_core_shared_get_pipe_regs(const struct dml2_display_cfg *display_cfg,
- struct dml2_core_internal_display_mode_lib *mode_lib,
- struct dml2_dchub_per_pipe_register_set *out, int pipe_index)
-{
- rq_dlg_get_rq_reg(&out->rq_regs, display_cfg, mode_lib, pipe_index);
- rq_dlg_get_dlg_reg(&mode_lib->scratch, &out->dlg_regs, &out->ttu_regs, display_cfg, mode_lib, pipe_index);
- out->det_size = mode_lib->mp.DETBufferSizeInKByte[mode_lib->mp.pipe_plane[pipe_index]] / mode_lib->ip.config_return_buffer_segment_size_in_kbytes;
-}
-
-void dml2_core_shared_get_stream_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_per_stream_programming *out, int pipe_index)
-{
- // out->min_clocks.dcn4x.dscclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000); // FIXME_STAGE2
- // out->min_clocks.dcn4x.dtbclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000);
- // out->min_clocks.dcn4x.phyclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000);
-
- out->global_sync.dcn4x.vready_offset_pixels = mode_lib->mp.VReadyOffsetPix[mode_lib->mp.pipe_plane[pipe_index]];
- out->global_sync.dcn4x.vstartup_lines = mode_lib->mp.VStartup[mode_lib->mp.pipe_plane[pipe_index]];
- out->global_sync.dcn4x.vupdate_offset_pixels = mode_lib->mp.VUpdateOffsetPix[mode_lib->mp.pipe_plane[pipe_index]];
- out->global_sync.dcn4x.vupdate_vupdate_width_pixels = mode_lib->mp.VUpdateWidthPix[mode_lib->mp.pipe_plane[pipe_index]];
-}
-
-void dml2_core_shared_get_mcache_allocation(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_mcache_surface_allocation *out, int plane_idx)
-{
- unsigned int n;
-
- out->num_mcaches_plane0 = mode_lib->ms.num_mcaches_l[plane_idx];
- out->num_mcaches_plane1 = mode_lib->ms.num_mcaches_c[plane_idx];
- out->shift_granularity.p0 = mode_lib->ms.mcache_shift_granularity_l[plane_idx];
- out->shift_granularity.p1 = mode_lib->ms.mcache_shift_granularity_c[plane_idx];
-
- for (n = 0; n < out->num_mcaches_plane0; n++)
- out->mcache_x_offsets_plane0[n] = mode_lib->ms.mcache_offsets_l[plane_idx][n];
-
- for (n = 0; n < out->num_mcaches_plane1; n++)
- out->mcache_x_offsets_plane1[n] = mode_lib->ms.mcache_offsets_l[plane_idx][n];
-
- out->last_slice_sharing.mall_comb_mcache_p0 = mode_lib->ms.mall_comb_mcache_l[plane_idx];
- out->last_slice_sharing.mall_comb_mcache_p1 = mode_lib->ms.mall_comb_mcache_c[plane_idx];
- out->last_slice_sharing.plane0_plane1 = mode_lib->ms.lc_comb_mcache[plane_idx];
- out->informative.meta_row_bytes_plane0 = mode_lib->ms.mcache_row_bytes_l[plane_idx];
- out->informative.meta_row_bytes_plane1 = mode_lib->ms.mcache_row_bytes_c[plane_idx];
-
- out->valid = true;
-}
-
-void dml2_core_shared_get_mall_allocation(struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int *out, int pipe_index)
-{
- *out = mode_lib->mp.SurfaceSizeInTheMALL[mode_lib->mp.pipe_plane[pipe_index]];
-}
-
-void dml2_core_shared_get_plane_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_plane_support_info *out, int plane_idx)
-{
- out->mall_svp_size_requirement_ways = 0;
-
- out->nominal_vblank_pstate_latency_hiding_us =
- (int)(display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_idx].stream_index].timing.h_total /
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_idx].stream_index].timing.pixel_clock_khz / 1000) * mode_lib->ms.TWait[plane_idx]);
-
- out->dram_change_latency_hiding_margin_in_active = (int)mode_lib->ms.VActiveLatencyHidingMargin[plane_idx];
-
- out->active_latency_hiding_us = (int)mode_lib->ms.VActiveLatencyHidingUs[plane_idx];
-}
-
-void dml2_core_shared_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index)
-{
- double phantom_processing_delay_pix;
- unsigned int phantom_processing_delay_lines;
- unsigned int phantom_v_active_lines;
- unsigned int phantom_v_startup_lines;
- unsigned int phantom_v_blank_lines;
- unsigned int main_v_blank_lines;
- unsigned int rem;
-
- phantom_processing_delay_pix = (double)((mode_lib->ip.subvp_fw_processing_delay_us + mode_lib->ip.subvp_pstate_allow_width_us) *
- ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.pixel_clock_khz / 1000));
- phantom_processing_delay_lines = (unsigned int)(phantom_processing_delay_pix / (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.h_total);
- dml2_core_shared_div_rem(phantom_processing_delay_pix, display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.h_total, &rem);
- if (rem)
- phantom_processing_delay_lines++;
-
- phantom_v_startup_lines = mode_lib->ms.MaxVStartupLines[plane_index];
- phantom_v_active_lines = phantom_processing_delay_lines + mode_lib->ms.SubViewportLinesNeededInMALL[plane_index] + mode_lib->ip.subvp_swath_height_margin_lines;
-
- // phantom_vblank = max(vbp(vstartup) + vactive + vfp(always 1) + vsync(can be 1), main_vblank)
- phantom_v_blank_lines = phantom_v_startup_lines + 1 + 1;
- main_v_blank_lines = display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.v_active;
- if (phantom_v_blank_lines > main_v_blank_lines)
- phantom_v_blank_lines = main_v_blank_lines;
-
- out->phantom_v_active = phantom_v_active_lines;
- // phantom_vtotal = vactive + vblank
- out->phantom_v_total = phantom_v_active_lines + phantom_v_blank_lines;
-
- out->phantom_min_v_active = mode_lib->ms.SubViewportLinesNeededInMALL[plane_index];
- out->phantom_v_startup = mode_lib->ms.MaxVStartupLines[plane_index];
-
- out->vblank_reserved_time_us = display_cfg->plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us);
- dml2_printf("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us);
- dml2_printf("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines);
- dml2_printf("DML::%s: vblank_reserved_time_us = %f\n", __func__, out->vblank_reserved_time_us);
-#endif
-}
-
-void dml2_core_shared_get_informative(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_cfg_programming *out)
-{
- unsigned int k, n;
-
- out->informative.mode_support_info.ModeIsSupported = mode_lib->ms.support.ModeSupport;
- out->informative.mode_support_info.ImmediateFlipSupport = mode_lib->ms.support.ImmediateFlipSupport;
- out->informative.mode_support_info.WritebackLatencySupport = mode_lib->ms.support.WritebackLatencySupport;
- out->informative.mode_support_info.ScaleRatioAndTapsSupport = mode_lib->ms.support.ScaleRatioAndTapsSupport;
- out->informative.mode_support_info.SourceFormatPixelAndScanSupport = mode_lib->ms.support.SourceFormatPixelAndScanSupport;
- out->informative.mode_support_info.P2IWith420 = mode_lib->ms.support.P2IWith420;
- out->informative.mode_support_info.DSCOnlyIfNecessaryWithBPP = mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP;
- out->informative.mode_support_info.DSC422NativeNotSupported = mode_lib->ms.support.DSC422NativeNotSupported;
- out->informative.mode_support_info.LinkRateDoesNotMatchDPVersion = mode_lib->ms.support.LinkRateDoesNotMatchDPVersion;
- out->informative.mode_support_info.LinkRateForMultistreamNotIndicated = mode_lib->ms.support.LinkRateForMultistreamNotIndicated;
- out->informative.mode_support_info.BPPForMultistreamNotIndicated = mode_lib->ms.support.BPPForMultistreamNotIndicated;
- out->informative.mode_support_info.MultistreamWithHDMIOreDP = mode_lib->ms.support.MultistreamWithHDMIOreDP;
- out->informative.mode_support_info.MSOOrODMSplitWithNonDPLink = mode_lib->ms.support.MSOOrODMSplitWithNonDPLink;
- out->informative.mode_support_info.NotEnoughLanesForMSO = mode_lib->ms.support.NotEnoughLanesForMSO;
- out->informative.mode_support_info.NumberOfOTGSupport = mode_lib->ms.support.NumberOfOTGSupport;
- out->informative.mode_support_info.NumberOfHDMIFRLSupport = mode_lib->ms.support.NumberOfHDMIFRLSupport;
- out->informative.mode_support_info.NumberOfDP2p0Support = mode_lib->ms.support.NumberOfDP2p0Support;
- out->informative.mode_support_info.WritebackScaleRatioAndTapsSupport = mode_lib->ms.support.WritebackScaleRatioAndTapsSupport;
- out->informative.mode_support_info.CursorSupport = mode_lib->ms.support.CursorSupport;
- out->informative.mode_support_info.PitchSupport = mode_lib->ms.support.PitchSupport;
- out->informative.mode_support_info.ViewportExceedsSurface = mode_lib->ms.support.ViewportExceedsSurface;
- out->informative.mode_support_info.ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified = false;
- out->informative.mode_support_info.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = mode_lib->ms.support.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe;
- out->informative.mode_support_info.InvalidCombinationOfMALLUseForPStateAndStaticScreen = mode_lib->ms.support.InvalidCombinationOfMALLUseForPStateAndStaticScreen;
- out->informative.mode_support_info.InvalidCombinationOfMALLUseForPState = mode_lib->ms.support.InvalidCombinationOfMALLUseForPState;
- out->informative.mode_support_info.ExceededMALLSize = mode_lib->ms.support.ExceededMALLSize;
- out->informative.mode_support_info.EnoughWritebackUnits = mode_lib->ms.support.EnoughWritebackUnits;
-
- out->informative.mode_support_info.ExceededMultistreamSlots = mode_lib->ms.support.ExceededMultistreamSlots;
- out->informative.mode_support_info.NotEnoughDSCUnits = mode_lib->ms.support.NotEnoughDSCUnits;
- out->informative.mode_support_info.NotEnoughDSCSlices = mode_lib->ms.support.NotEnoughDSCSlices;
- out->informative.mode_support_info.PixelsPerLinePerDSCUnitSupport = mode_lib->ms.support.PixelsPerLinePerDSCUnitSupport;
- out->informative.mode_support_info.DSCCLKRequiredMoreThanSupported = mode_lib->ms.support.DSCCLKRequiredMoreThanSupported;
- out->informative.mode_support_info.DTBCLKRequiredMoreThanSupported = mode_lib->ms.support.DTBCLKRequiredMoreThanSupported;
- out->informative.mode_support_info.LinkCapacitySupport = mode_lib->ms.support.LinkCapacitySupport;
-
- out->informative.mode_support_info.ROBSupport = mode_lib->ms.support.ROBSupport;
- out->informative.mode_support_info.OutstandingRequestsSupport = mode_lib->ms.support.OutstandingRequestsSupport;
- out->informative.mode_support_info.OutstandingRequestsUrgencyAvoidance = mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance;
- out->informative.mode_support_info.PTEBufferSizeNotExceeded = mode_lib->ms.support.PTEBufferSizeNotExceeded;
- out->informative.mode_support_info.DCCMetaBufferSizeNotExceeded = mode_lib->ms.support.DCCMetaBufferSizeNotExceeded;
-
- out->informative.mode_support_info.TotalVerticalActiveBandwidthSupport = mode_lib->ms.support.AvgBandwidthSupport;
- out->informative.mode_support_info.VActiveBandwidthSupport = mode_lib->ms.support.UrgVactiveBandwidthSupport;
- out->informative.mode_support_info.USRRetrainingSupport = mode_lib->ms.support.USRRetrainingSupport;
-
- out->informative.mode_support_info.PrefetchSupported = mode_lib->ms.support.PrefetchSupported;
- out->informative.mode_support_info.DynamicMetadataSupported = mode_lib->ms.support.DynamicMetadataSupported;
- out->informative.mode_support_info.VRatioInPrefetchSupported = mode_lib->ms.support.VRatioInPrefetchSupported;
- out->informative.mode_support_info.DISPCLK_DPPCLK_Support = mode_lib->ms.support.DISPCLK_DPPCLK_Support;
- out->informative.mode_support_info.TotalAvailablePipesSupport = mode_lib->ms.support.TotalAvailablePipesSupport;
- out->informative.mode_support_info.ViewportSizeSupport = mode_lib->ms.support.ViewportSizeSupport;
-
- for (k = 0; k < out->display_config.num_planes; k++) {
-
- out->informative.mode_support_info.FCLKChangeSupport[k] = mode_lib->ms.support.FCLKChangeSupport[k];
- out->informative.mode_support_info.MPCCombineEnable[k] = mode_lib->ms.support.MPCCombineEnable[k];
- out->informative.mode_support_info.ODMMode[k] = mode_lib->ms.support.ODMMode[k];
- out->informative.mode_support_info.DPPPerSurface[k] = mode_lib->ms.support.DPPPerSurface[k];
- out->informative.mode_support_info.DSCEnabled[k] = mode_lib->ms.support.DSCEnabled[k];
- out->informative.mode_support_info.FECEnabled[k] = mode_lib->ms.support.FECEnabled[k];
- out->informative.mode_support_info.NumberOfDSCSlices[k] = mode_lib->ms.support.NumberOfDSCSlices[k];
- out->informative.mode_support_info.OutputBpp[k] = mode_lib->ms.support.OutputBpp[k];
-
- if (mode_lib->ms.support.OutputType[k] == dml2_core_internal_output_type_unknown)
- out->informative.mode_support_info.OutputType[k] = dml2_output_type_unknown;
- else if (mode_lib->ms.support.OutputType[k] == dml2_core_internal_output_type_dp)
- out->informative.mode_support_info.OutputType[k] = dml2_output_type_dp;
- else if (mode_lib->ms.support.OutputType[k] == dml2_core_internal_output_type_edp)
- out->informative.mode_support_info.OutputType[k] = dml2_output_type_edp;
- else if (mode_lib->ms.support.OutputType[k] == dml2_core_internal_output_type_dp2p0)
- out->informative.mode_support_info.OutputType[k] = dml2_output_type_dp2p0;
- else if (mode_lib->ms.support.OutputType[k] == dml2_core_internal_output_type_hdmi)
- out->informative.mode_support_info.OutputType[k] = dml2_output_type_hdmi;
- else if (mode_lib->ms.support.OutputType[k] == dml2_core_internal_output_type_hdmifrl)
- out->informative.mode_support_info.OutputType[k] = dml2_output_type_hdmifrl;
-
- if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_unknown)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_unknown;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_dp_rate_hbr)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_dp_rate_hbr;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_dp_rate_hbr2)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_dp_rate_hbr2;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_dp_rate_hbr3)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_dp_rate_hbr3;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_dp_rate_uhbr10)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_dp_rate_uhbr10;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_dp_rate_uhbr13p5)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_dp_rate_uhbr13p5;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_dp_rate_uhbr20)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_dp_rate_uhbr20;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_3x3)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_3x3;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_6x3)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_6x3;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_6x4)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_6x4;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_8x4)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_8x4;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_10x4)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_10x4;
- else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_12x4)
- out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_12x4;
-
- out->informative.mode_support_info.AlignedYPitch[k] = mode_lib->ms.support.AlignedYPitch[k];
- out->informative.mode_support_info.AlignedCPitch[k] = mode_lib->ms.support.AlignedCPitch[k];
- }
-
- out->informative.watermarks.urgent_us = mode_lib->mp.Watermark.UrgentWatermark;
- out->informative.watermarks.writeback_urgent_us = mode_lib->mp.Watermark.WritebackUrgentWatermark;
- out->informative.watermarks.writeback_pstate_us = mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark;
- out->informative.watermarks.writeback_fclk_pstate_us = mode_lib->mp.Watermark.WritebackFCLKChangeWatermark;
-
- out->informative.watermarks.cstate_exit_us = mode_lib->mp.Watermark.StutterExitWatermark;
- out->informative.watermarks.cstate_enter_plus_exit_us = mode_lib->mp.Watermark.StutterEnterPlusExitWatermark;
- out->informative.watermarks.z8_cstate_exit_us = mode_lib->mp.Watermark.Z8StutterExitWatermark;
- out->informative.watermarks.z8_cstate_enter_plus_exit_us = mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark;
- out->informative.watermarks.pstate_change_us = mode_lib->mp.Watermark.DRAMClockChangeWatermark;
- out->informative.watermarks.fclk_pstate_change_us = mode_lib->mp.Watermark.FCLKChangeWatermark;
- out->informative.watermarks.usr_retraining_us = mode_lib->mp.Watermark.USRRetrainingWatermark;
-
- out->informative.mall.total_surface_size_in_mall_bytes = 0;
- for (k = 0; k < out->display_config.num_planes; ++k)
- out->informative.mall.total_surface_size_in_mall_bytes += mode_lib->mp.SurfaceSizeInTheMALL[k];
-
- out->informative.qos.min_return_latency_in_dcfclk = mode_lib->mp.min_return_latency_in_dcfclk;
- out->informative.qos.urgent_latency_us = mode_lib->mp.UrgentLatency;
-
- out->informative.qos.max_urgent_latency_us = mode_lib->ms.support.max_urgent_latency_us;
- out->informative.qos.avg_non_urgent_latency_us = mode_lib->ms.support.avg_non_urgent_latency_us;
- out->informative.qos.avg_urgent_latency_us = mode_lib->ms.support.avg_urgent_latency_us;
-
- out->informative.qos.wm_memory_trip_us = mode_lib->mp.UrgentLatency;
- out->informative.qos.meta_trip_memory_us = mode_lib->mp.MetaTripToMemory;
- out->informative.qos.fraction_of_urgent_bandwidth = mode_lib->mp.FractionOfUrgentBandwidth;
- out->informative.qos.fraction_of_urgent_bandwidth_immediate_flip = mode_lib->mp.FractionOfUrgentBandwidthImmediateFlip;
- out->informative.qos.fraction_of_urgent_bandwidth_mall = mode_lib->mp.FractionOfUrgentBandwidthMALL;
-
- out->informative.qos.avg_bw_required.sys_active.sdp_bw_mbps =
- mode_lib->ms.support.avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.avg_bw_required.sys_active.dram_bw_mbps =
- mode_lib->ms.support.avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.avg_bw_required.svp_prefetch.sdp_bw_mbps =
- mode_lib->ms.support.avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.avg_bw_required.svp_prefetch.dram_bw_mbps =
- mode_lib->ms.support.avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- out->informative.qos.avg_bw_available.sys_active.sdp_bw_mbps =
- mode_lib->mp.avg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.avg_bw_available.sys_active.dram_bw_mbps =
- mode_lib->mp.avg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.avg_bw_available.svp_prefetch.sdp_bw_mbps =
- mode_lib->mp.avg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.avg_bw_available.svp_prefetch.dram_bw_mbps =
- mode_lib->mp.avg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- out->informative.qos.urg_bw_available.sys_active.sdp_bw_mbps =
- mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.urg_bw_available.sys_active.dram_bw_mbps =
- mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.urg_bw_available.sys_active.dram_vm_only_bw_mbps =
- mode_lib->mp.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active];
-
- out->informative.qos.urg_bw_available.svp_prefetch.sdp_bw_mbps =
- mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.urg_bw_available.svp_prefetch.dram_bw_mbps =
- mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
- out->informative.qos.urg_bw_available.svp_prefetch.dram_vm_only_bw_mbps =
- mode_lib->mp.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_svp_prefetch];
-
- out->informative.qos.urg_bw_required.sys_active.sdp_bw_mbps = mode_lib->mp.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.urg_bw_required.sys_active.dram_bw_mbps = mode_lib->mp.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.urg_bw_required.svp_prefetch.sdp_bw_mbps = mode_lib->mp.urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.urg_bw_required.svp_prefetch.dram_bw_mbps = mode_lib->mp.urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- out->informative.qos.non_urg_bw_required.sys_active.sdp_bw_mbps = mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.non_urg_bw_required.sys_active.dram_bw_mbps = mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.non_urg_bw_required.svp_prefetch.sdp_bw_mbps = mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.non_urg_bw_required.svp_prefetch.dram_bw_mbps = mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- out->informative.qos.urg_bw_required_with_flip.sys_active.sdp_bw_mbps = mode_lib->mp.urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.urg_bw_required_with_flip.sys_active.dram_bw_mbps = mode_lib->mp.urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.urg_bw_required_with_flip.svp_prefetch.sdp_bw_mbps = mode_lib->mp.urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.urg_bw_required_with_flip.svp_prefetch.dram_bw_mbps = mode_lib->mp.urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- out->informative.qos.non_urg_bw_required_with_flip.sys_active.sdp_bw_mbps = mode_lib->mp.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- out->informative.qos.non_urg_bw_required_with_flip.sys_active.dram_bw_mbps = mode_lib->mp.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram];
- out->informative.qos.non_urg_bw_required_with_flip.svp_prefetch.sdp_bw_mbps = mode_lib->mp.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp];
- out->informative.qos.non_urg_bw_required_with_flip.svp_prefetch.dram_bw_mbps = mode_lib->mp.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram];
-
- out->informative.crb.comp_buffer_size_kbytes = mode_lib->mp.CompressedBufferSizeInkByte;
- out->informative.crb.UnboundedRequestEnabled = mode_lib->mp.UnboundedRequestEnabled;
-
- out->informative.crb.compbuf_reserved_space_64b = mode_lib->mp.compbuf_reserved_space_64b;
- out->informative.misc.hw_debug5 = mode_lib->mp.hw_debug5;
- out->informative.misc.dcfclk_deep_sleep_hysteresis = mode_lib->mp.dcfclk_deep_sleep_hysteresis;
-
- out->informative.power_management.stutter_efficiency = mode_lib->mp.StutterEfficiencyNotIncludingVBlank;
- out->informative.power_management.stutter_efficiency_with_vblank = mode_lib->mp.StutterEfficiency;
- out->informative.power_management.stutter_num_bursts = mode_lib->mp.NumberOfStutterBurstsPerFrame;
-
- out->informative.power_management.z8.stutter_efficiency = mode_lib->mp.Z8StutterEfficiency;
- out->informative.power_management.z8.stutter_efficiency_with_vblank = mode_lib->mp.StutterEfficiency;
- out->informative.power_management.z8.stutter_num_bursts = mode_lib->mp.Z8NumberOfStutterBurstsPerFrame;
- out->informative.power_management.z8.stutter_period = mode_lib->mp.StutterPeriod;
-
- out->informative.power_management.z8.bestcase.stutter_efficiency = mode_lib->mp.Z8StutterEfficiencyBestCase;
- out->informative.power_management.z8.bestcase.stutter_num_bursts = mode_lib->mp.Z8NumberOfStutterBurstsPerFrameBestCase;
- out->informative.power_management.z8.bestcase.stutter_period = mode_lib->mp.StutterPeriodBestCase;
-
- out->informative.misc.cstate_max_cap_mode = mode_lib->mp.DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE;
-
- out->min_clocks.dcn4x.dpprefclk_khz = (int unsigned)(mode_lib->mp.GlobalDPPCLK * 1000.0);
-
- out->informative.qos.max_active_fclk_change_latency_supported = mode_lib->mp.MaxActiveFCLKChangeLatencySupported;
-
- for (k = 0; k < out->display_config.num_planes; k++) {
-
- if ((out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us)
- && (out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.fclk_change_blackout_us)
- && (out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us))
- out->informative.misc.PrefetchMode[k] = 0;
- else if ((out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.fclk_change_blackout_us)
- && (out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us))
- out->informative.misc.PrefetchMode[k] = 1;
- else if (out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us)
- out->informative.misc.PrefetchMode[k] = 2;
- else
- out->informative.misc.PrefetchMode[k] = 3;
-
- out->informative.misc.min_ttu_vblank_us[k] = mode_lib->mp.MinTTUVBlank[k];
- out->informative.mall.subviewport_lines_needed_in_mall[k] = mode_lib->mp.SubViewportLinesNeededInMALL[k];
- out->informative.crb.det_size_in_kbytes[k] = mode_lib->mp.DETBufferSizeInKByte[k];
- out->informative.crb.DETBufferSizeY[k] = mode_lib->mp.DETBufferSizeY[k];
- out->informative.misc.ImmediateFlipSupportedForPipe[k] = mode_lib->mp.ImmediateFlipSupportedForPipe[k];
- out->informative.misc.UsesMALLForStaticScreen[k] = mode_lib->mp.is_using_mall_for_ss[k];
- out->informative.plane_info[k].dpte_row_height_plane0 = mode_lib->mp.dpte_row_height[k];
- out->informative.plane_info[k].dpte_row_height_plane1 = mode_lib->mp.dpte_row_height_chroma[k];
- out->informative.plane_info[k].meta_row_height_plane0 = mode_lib->mp.meta_row_height[k];
- out->informative.plane_info[k].meta_row_height_plane1 = mode_lib->mp.meta_row_height_chroma[k];
- out->informative.dcc_control[k].max_uncompressed_block_plane0 = mode_lib->mp.DCCYMaxUncompressedBlock[k];
- out->informative.dcc_control[k].max_compressed_block_plane0 = mode_lib->mp.DCCYMaxCompressedBlock[k];
- out->informative.dcc_control[k].independent_block_plane0 = mode_lib->mp.DCCYIndependentBlock[k];
- out->informative.dcc_control[k].max_uncompressed_block_plane1 = mode_lib->mp.DCCCMaxUncompressedBlock[k];
- out->informative.dcc_control[k].max_compressed_block_plane1 = mode_lib->mp.DCCCMaxCompressedBlock[k];
- out->informative.dcc_control[k].independent_block_plane1 = mode_lib->mp.DCCCIndependentBlock[k];
- out->informative.misc.dst_x_after_scaler[k] = mode_lib->mp.DSTXAfterScaler[k];
- out->informative.misc.dst_y_after_scaler[k] = mode_lib->mp.DSTYAfterScaler[k];
- out->informative.misc.prefetch_source_lines_plane0[k] = mode_lib->mp.PrefetchSourceLinesY[k];
- out->informative.misc.prefetch_source_lines_plane1[k] = mode_lib->mp.PrefetchSourceLinesC[k];
- out->informative.misc.vready_at_or_after_vsync[k] = mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k];
- out->informative.misc.min_dst_y_next_start[k] = mode_lib->mp.MIN_DST_Y_NEXT_START[k];
- out->informative.plane_info[k].swath_width_plane0 = mode_lib->mp.SwathWidthY[k];
- out->informative.plane_info[k].swath_height_plane0 = mode_lib->mp.SwathHeightY[k];
- out->informative.plane_info[k].swath_height_plane1 = mode_lib->mp.SwathHeightC[k];
- out->informative.misc.CursorDstXOffset[k] = mode_lib->mp.CursorDstXOffset[k];
- out->informative.misc.CursorDstYOffset[k] = mode_lib->mp.CursorDstYOffset[k];
- out->informative.misc.CursorChunkHDLAdjust[k] = mode_lib->mp.CursorChunkHDLAdjust[k];
- out->informative.misc.dpte_group_bytes[k] = mode_lib->mp.dpte_group_bytes[k];
- out->informative.misc.vm_group_bytes[k] = mode_lib->mp.vm_group_bytes[k];
- out->informative.misc.DisplayPipeRequestDeliveryTimeLuma[k] = mode_lib->mp.DisplayPipeRequestDeliveryTimeLuma[k];
- out->informative.misc.DisplayPipeRequestDeliveryTimeChroma[k] = mode_lib->mp.DisplayPipeRequestDeliveryTimeChroma[k];
- out->informative.misc.DisplayPipeRequestDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeRequestDeliveryTimeLumaPrefetch[k];
- out->informative.misc.DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeRequestDeliveryTimeChromaPrefetch[k];
- out->informative.misc.TimePerVMGroupVBlank[k] = mode_lib->mp.TimePerVMGroupVBlank[k];
- out->informative.misc.TimePerVMGroupFlip[k] = mode_lib->mp.TimePerVMGroupFlip[k];
- out->informative.misc.TimePerVMRequestVBlank[k] = mode_lib->mp.TimePerVMRequestVBlank[k];
- out->informative.misc.TimePerVMRequestFlip[k] = mode_lib->mp.TimePerVMRequestFlip[k];
- out->informative.misc.Tdmdl_vm[k] = mode_lib->mp.Tdmdl_vm[k];
- out->informative.misc.Tdmdl[k] = mode_lib->mp.Tdmdl[k];
- out->informative.misc.VStartup[k] = mode_lib->mp.VStartup[k];
- out->informative.misc.VUpdateOffsetPix[k] = mode_lib->mp.VUpdateOffsetPix[k];
- out->informative.misc.VUpdateWidthPix[k] = mode_lib->mp.VUpdateWidthPix[k];
- out->informative.misc.VReadyOffsetPix[k] = mode_lib->mp.VReadyOffsetPix[k];
-
- out->informative.misc.DST_Y_PER_PTE_ROW_NOM_L[k] = mode_lib->mp.DST_Y_PER_PTE_ROW_NOM_L[k];
- out->informative.misc.DST_Y_PER_PTE_ROW_NOM_C[k] = mode_lib->mp.DST_Y_PER_PTE_ROW_NOM_C[k];
- out->informative.misc.time_per_pte_group_nom_luma[k] = mode_lib->mp.time_per_pte_group_nom_luma[k];
- out->informative.misc.time_per_pte_group_nom_chroma[k] = mode_lib->mp.time_per_pte_group_nom_chroma[k];
- out->informative.misc.time_per_pte_group_vblank_luma[k] = mode_lib->mp.time_per_pte_group_vblank_luma[k];
- out->informative.misc.time_per_pte_group_vblank_chroma[k] = mode_lib->mp.time_per_pte_group_vblank_chroma[k];
- out->informative.misc.time_per_pte_group_flip_luma[k] = mode_lib->mp.time_per_pte_group_flip_luma[k];
- out->informative.misc.time_per_pte_group_flip_chroma[k] = mode_lib->mp.time_per_pte_group_flip_chroma[k];
- out->informative.misc.VRatioPrefetchY[k] = mode_lib->mp.VRatioPrefetchY[k];
- out->informative.misc.VRatioPrefetchC[k] = mode_lib->mp.VRatioPrefetchC[k];
- out->informative.misc.DestinationLinesForPrefetch[k] = mode_lib->mp.dst_y_prefetch[k];
- out->informative.misc.DestinationLinesToRequestVMInVBlank[k] = mode_lib->mp.dst_y_per_vm_vblank[k];
- out->informative.misc.DestinationLinesToRequestRowInVBlank[k] = mode_lib->mp.dst_y_per_row_vblank[k];
- out->informative.misc.DestinationLinesToRequestVMInImmediateFlip[k] = mode_lib->mp.dst_y_per_vm_flip[k];
- out->informative.misc.DestinationLinesToRequestRowInImmediateFlip[k] = mode_lib->mp.dst_y_per_row_flip[k];
- out->informative.misc.DisplayPipeLineDeliveryTimeLuma[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLuma[k];
- out->informative.misc.DisplayPipeLineDeliveryTimeChroma[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChroma[k];
- out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k];
- out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k];
-
- out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k];
- out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k];
- out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k];
- out->informative.misc.BIGK_FRAGMENT_SIZE[k] = mode_lib->mp.BIGK_FRAGMENT_SIZE[k];
- out->informative.misc.PTE_BUFFER_MODE[k] = mode_lib->mp.PTE_BUFFER_MODE[k];
- out->informative.misc.DSCDelay[k] = mode_lib->mp.DSCDelay[k];
- out->informative.misc.MaxActiveDRAMClockChangeLatencySupported[k] = mode_lib->mp.MaxActiveDRAMClockChangeLatencySupported[k];
- }
-
- // For this DV informative layer, all pipes in the same planes will just use the same id
- // will have the optimization and helper layer later on
- // only work when we can have high "mcache" that fit everything without thrashing the cache
- for (k = 0; k < out->display_config.num_planes; k++) {
- out->informative.non_optimized_mcache_allocation[k].num_mcaches_plane0 = mode_lib->ms.num_mcaches_l[k];
- out->informative.non_optimized_mcache_allocation[k].informative.meta_row_bytes_plane0 = mode_lib->ms.mcache_row_bytes_l[k];
-
- for (n = 0; n < out->informative.non_optimized_mcache_allocation[k].num_mcaches_plane0; n++) {
- out->informative.non_optimized_mcache_allocation[k].mcache_x_offsets_plane0[n] = mode_lib->ms.mcache_offsets_l[k][n];
- out->informative.non_optimized_mcache_allocation[k].global_mcache_ids_plane0[n] = k;
- }
-
- out->informative.non_optimized_mcache_allocation[k].num_mcaches_plane1 = mode_lib->ms.num_mcaches_c[k];
- out->informative.non_optimized_mcache_allocation[k].informative.meta_row_bytes_plane1 = mode_lib->ms.mcache_row_bytes_c[k];
-
- for (n = 0; n < out->informative.non_optimized_mcache_allocation[k].num_mcaches_plane1; n++) {
- out->informative.non_optimized_mcache_allocation[k].mcache_x_offsets_plane1[n] = mode_lib->ms.mcache_offsets_c[k][n];
- out->informative.non_optimized_mcache_allocation[k].global_mcache_ids_plane1[n] = k;
- }
- }
-
- out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
- / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
- if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
- / mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) {
- out->informative.misc.ROBUrgencyAvoidance = true;
- } else {
- out->informative.misc.ROBUrgencyAvoidance = false;
- }
- } else {
- out->informative.misc.ROBUrgencyAvoidance = true;
- }
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index 23c0fca5515f..4e502f0a6d20 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -484,6 +484,8 @@ struct dml2_core_internal_mode_support {
double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK];
double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES];
double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES];
+ /* oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp */
+ double RequiredPrefetchBWOTO[DML2_MAX_PLANES];
double cursor_bw[DML2_MAX_PLANES];
double prefetch_cursor_bw[DML2_MAX_PLANES];
double prefetch_vmrow_bw[DML2_MAX_PLANES];
@@ -522,11 +524,13 @@ struct dml2_core_internal_mode_support {
unsigned int num_mcaches_l[DML2_MAX_PLANES];
unsigned int mcache_row_bytes_l[DML2_MAX_PLANES];
+ unsigned int mcache_row_bytes_per_channel_l[DML2_MAX_PLANES];
unsigned int mcache_offsets_l[DML2_MAX_PLANES][DML2_MAX_MCACHES + 1];
unsigned int mcache_shift_granularity_l[DML2_MAX_PLANES];
unsigned int num_mcaches_c[DML2_MAX_PLANES];
unsigned int mcache_row_bytes_c[DML2_MAX_PLANES];
+ unsigned int mcache_row_bytes_per_channel_c[DML2_MAX_PLANES];
unsigned int mcache_offsets_c[DML2_MAX_PLANES][DML2_MAX_MCACHES + 1];
unsigned int mcache_shift_granularity_c[DML2_MAX_PLANES];
@@ -839,11 +843,13 @@ struct dml2_core_internal_mode_program {
unsigned int num_mcaches_l[DML2_MAX_PLANES];
unsigned int mcache_row_bytes_l[DML2_MAX_PLANES];
+ unsigned int mcache_row_bytes_per_channel_l[DML2_MAX_PLANES];
unsigned int mcache_offsets_l[DML2_MAX_PLANES][DML2_MAX_MCACHES + 1];
unsigned int mcache_shift_granularity_l[DML2_MAX_PLANES];
unsigned int num_mcaches_c[DML2_MAX_PLANES];
unsigned int mcache_row_bytes_c[DML2_MAX_PLANES];
+ unsigned int mcache_row_bytes_per_channel_c[DML2_MAX_PLANES];
unsigned int mcache_offsets_c[DML2_MAX_PLANES][DML2_MAX_MCACHES + 1];
unsigned int mcache_shift_granularity_c[DML2_MAX_PLANES];
@@ -1381,6 +1387,7 @@ struct dml2_core_shared_get_urgent_bandwidth_required_locals {
double vm_row_bw;
double flip_and_active_bw;
double flip_and_prefetch_bw;
+ double flip_and_prefetch_bw_oto;
double active_and_excess_bw;
};
@@ -1564,7 +1571,7 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
unsigned int *DSTYAfterScaler;
bool UnboundedRequestEnabled;
unsigned int CompressedBufferSizeInkByte;
- bool max_oustanding_when_urgent_expected;
+ bool max_outstanding_when_urgent_expected;
unsigned int max_outstanding_requests;
unsigned int max_request_size_bytes;
unsigned int *meta_row_height_l;
@@ -1792,6 +1799,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
double *VRatioPrefetchC;
double *RequiredPrefetchPixelDataBWLuma;
double *RequiredPrefetchPixelDataBWChroma;
+ double *RequiredPrefetchBWOTO;
bool *NotEnoughTimeForDynamicMetadata;
double *Tno_bw;
double *Tno_bw_flip;
@@ -1883,6 +1891,7 @@ struct dml2_core_calcs_calculate_mcache_row_bytes_params {
// output
unsigned int *num_mcaches;
unsigned int *mcache_row_bytes;
+ unsigned int *mcache_row_bytes_per_channel;
unsigned int *meta_row_width_ub;
double *dcc_dram_bw_nom_overhead_factor;
double *dcc_dram_bw_pref_overhead_factor;
@@ -1962,6 +1971,7 @@ struct dml2_core_calcs_calculate_mcache_setting_params {
// output
unsigned int *num_mcaches_l;
unsigned int *mcache_row_bytes_l;
+ unsigned int *mcache_row_bytes_per_channel_l;
unsigned int *mcache_offsets_l;
unsigned int *mcache_shift_granularity_l;
double *dcc_dram_bw_nom_overhead_factor_l;
@@ -1969,6 +1979,7 @@ struct dml2_core_calcs_calculate_mcache_setting_params {
unsigned int *num_mcaches_c;
unsigned int *mcache_row_bytes_c;
+ unsigned int *mcache_row_bytes_per_channel_c;
unsigned int *mcache_offsets_c;
unsigned int *mcache_shift_granularity_c;
double *dcc_dram_bw_nom_overhead_factor_c;
@@ -2025,6 +2036,7 @@ struct dml2_core_calcs_calculate_peak_bandwidth_required_params {
double *surface_read_bandwidth_c;
double *prefetch_bandwidth_l;
double *prefetch_bandwidth_c;
+ double *prefetch_bandwidth_oto;
double *excess_vactive_fill_bw_l;
double *excess_vactive_fill_bw_c;
double *cursor_bw;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
index 456b3f8a6d38..2504d9c2ec34 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
@@ -544,7 +544,8 @@ unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_fr
}
}
- dml2_assert(clk_entry_found);
+ if (!clk_entry_found)
+ DML2_ASSERT(clk_entry_found);
#if defined(__DML_VBA_DEBUG__)
dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
dml2_printf("DML::%s: index = %d\n", __func__, i);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index fc77fb34a19a..15507926f3a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -307,8 +307,8 @@ static bool map_soc_min_clocks_to_dpm_fine_grained(struct dml2_display_cfg_progr
/* these clocks are optional, so they can fail to map, in which case map all to 0 */
if (result) {
if (!round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.dcfclk_khz, &state_table->dcfclk) ||
- !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz, &state_table->fclk) ||
- !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz, &state_table->uclk)) {
+ !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz, &state_table->fclk) ||
+ !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz, &state_table->uclk)) {
display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.dcfclk_khz = 0;
display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz = 0;
display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
index b165c58dfd11..e7b58f2efda4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
@@ -11,6 +11,4 @@ bool dpmm_dcn3_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_i
bool dpmm_dcn4_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out);
bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_out);
-bool dpmm_dcn4_unit_test(void);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
index c60b8fe90819..cd3fbc0591d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
@@ -15,7 +15,7 @@ bool dml2_mcg_create(enum dml2_project_id project_id, struct dml2_mcg_instance *
{
bool result = false;
- if (!out)
+ if (out == 0)
return false;
memset(out, 0, sizeof(struct dml2_mcg_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index a3324f7b9ba6..f50662b83296 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -1082,12 +1082,21 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
const struct dml2_fams2_meta *stream_fams2_meta;
unsigned int microschedule_vlines;
unsigned int i;
+ unsigned int mcaches_per_plane;
+ unsigned int total_mcaches_required = 0;
unsigned int num_planes_per_stream[DML2_MAX_PLANES] = { 0 };
/* confirm timing it is not a centered timing */
for (i = 0; i < display_config->display_config.num_planes; i++) {
plane_descriptor = &display_config->display_config.plane_descriptors[i];
+ mcaches_per_plane = 0;
+
+ if (plane_descriptor->surface.dcc.enable) {
+ mcaches_per_plane += display_config->stage2.mcache_allocations[i].num_mcaches_plane0 +
+ display_config->stage2.mcache_allocations[i].num_mcaches_plane1 -
+ (display_config->stage2.mcache_allocations[i].last_slice_sharing.plane0_plane1 ? 1 : 0);
+ }
if (is_bit_set_in_bitfield(mask, (unsigned char)plane_descriptor->stream_index)) {
num_planes_per_stream[plane_descriptor->stream_index]++;
@@ -1098,7 +1107,18 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
plane_descriptor->composition.rotation_angle != dml2_rotation_0) {
return false;
}
+
+ /* phantom requires same number of mcaches as main */
+ if (plane_descriptor->surface.dcc.enable) {
+ mcaches_per_plane *= 2;
+ }
}
+ total_mcaches_required += mcaches_per_plane;
+ }
+
+ if (total_mcaches_required > pmo->soc_bb->num_dcc_mcaches) {
+ /* too many mcaches required */
+ return false;
}
for (i = 0; i < DML2_MAX_PLANES; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
index f88931ccbc5e..5a33e2f357f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
@@ -47,4 +47,3 @@ bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *
return in_out->dml2_instance->funcs.build_mcache_programming(in_out);
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
index a8f58f8448e4..dc2ce5e77f57 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
@@ -831,7 +831,6 @@ static bool dml2_top_soc15_build_mode_programming(struct dml2_build_mode_program
bool uclk_pstate_success = false;
bool vmin_success = false;
bool stutter_success = false;
- unsigned int i;
memset(l, 0, sizeof(struct dml2_build_mode_programming_locals));
memset(in_out->programming, 0, sizeof(struct dml2_display_cfg_programming));
@@ -977,13 +976,6 @@ static bool dml2_top_soc15_build_mode_programming(struct dml2_build_mode_program
}
/*
- * Populate mcache programming
- */
- for (i = 0; i < in_out->display_config->num_planes; i++) {
- in_out->programming->plane_programming[i].mcache_allocation = l->base_display_config_with_meta.stage2.mcache_allocations[i];
- }
-
- /*
* Call DPMM to map all requirements to minimum clock state
*/
if (result) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
deleted file mode 100644
index f9f8869cd8b8..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
+++ /dev/null
@@ -1,354 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dml2_internal_shared_types.h"
-#include "dml_top.h"
-#include "dml2_mcg_factory.h"
-#include "dml2_core_factory.h"
-#include "dml2_dpmm_factory.h"
-#include "dml2_pmo_factory.h"
-#include "dml_top_mcache.h"
-#include "dml2_top_optimization.h"
-#include "dml2_external_lib_deps.h"
-
-unsigned int dml2_get_instance_size_bytes(void)
-{
- return sizeof(struct dml2_instance);
-}
-
-bool dml2_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
-{
- struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
- struct dml2_initialize_instance_locals *l = &dml->scratch.initialize_instance_locals;
- struct dml2_core_initialize_in_out core_init_params = { 0 };
- struct dml2_mcg_build_min_clock_table_params_in_out mcg_build_min_clk_params = { 0 };
- struct dml2_pmo_initialize_in_out pmo_init_params = { 0 };
- bool result = false;
-
- memset(l, 0, sizeof(struct dml2_initialize_instance_locals));
- memset(dml, 0, sizeof(struct dml2_instance));
-
- memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
- memcpy(&dml->soc_bbox, &in_out->soc_bb, sizeof(struct dml2_soc_bb));
-
- dml->project_id = in_out->options.project_id;
- dml->pmo_options = in_out->options.pmo_options;
-
- // Initialize All Components
- result = dml2_mcg_create(in_out->options.project_id, &dml->mcg_instance);
-
- if (result)
- result = dml2_dpmm_create(in_out->options.project_id, &dml->dpmm_instance);
-
- if (result)
- result = dml2_core_create(in_out->options.project_id, &dml->core_instance);
-
- if (result) {
- mcg_build_min_clk_params.soc_bb = &in_out->soc_bb;
- mcg_build_min_clk_params.min_clk_table = &dml->min_clk_table;
- result = dml->mcg_instance.build_min_clock_table(&mcg_build_min_clk_params);
- }
-
- if (result) {
- core_init_params.project_id = in_out->options.project_id;
- core_init_params.instance = &dml->core_instance;
- core_init_params.minimum_clock_table = &dml->min_clk_table;
- core_init_params.explicit_ip_bb = in_out->overrides.explicit_ip_bb;
- core_init_params.explicit_ip_bb_size = in_out->overrides.explicit_ip_bb_size;
- core_init_params.ip_caps = &in_out->ip_caps;
- core_init_params.soc_bb = &in_out->soc_bb;
- result = dml->core_instance.initialize(&core_init_params);
-
- if (core_init_params.explicit_ip_bb && core_init_params.explicit_ip_bb_size > 0) {
- memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
- }
- }
-
- if (result)
- result = dml2_pmo_create(in_out->options.project_id, &dml->pmo_instance);
-
- if (result) {
- pmo_init_params.instance = &dml->pmo_instance;
- pmo_init_params.soc_bb = &dml->soc_bbox;
- pmo_init_params.ip_caps = &dml->ip_caps;
- pmo_init_params.mcg_clock_table_size = dml->min_clk_table.dram_bw_table.num_entries;
- pmo_init_params.options = &dml->pmo_options;
- dml->pmo_instance.initialize(&pmo_init_params);
- }
-
- return result;
-}
-
-static void setup_unoptimized_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config)
-{
- memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg));
- out->stage1.min_clk_index_for_latency = dml->min_clk_table.dram_bw_table.num_entries - 1; //dml->min_clk_table.clean_me_up.soc_bb.num_states - 1;
-}
-
-static void setup_speculative_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config)
-{
- memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg));
- out->stage1.min_clk_index_for_latency = 0;
-}
-
-bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
-{
- struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
- struct dml2_check_mode_supported_locals *l = &dml->scratch.check_mode_supported_locals;
- struct dml2_display_cfg_programming *dpmm_programming = &dml->dpmm_instance.dpmm_scratch.programming;
-
- bool result = false;
- bool mcache_success = false;
-
- memset(dpmm_programming, 0, sizeof(struct dml2_display_cfg_programming));
-
- setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
-
- l->mode_support_params.instance = &dml->core_instance;
- l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
- l->mode_support_params.min_clk_table = &dml->min_clk_table;
- l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
-
- result = dml->core_instance.mode_support(&l->mode_support_params);
- l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
-
- if (result) {
- struct optimization_phase_params mcache_phase = {
- .dml = dml,
- .display_config = &l->base_display_config_with_meta,
- .test_function = dml2_top_optimization_test_function_mcache,
- .optimize_function = dml2_top_optimization_optimize_function_mcache,
- .optimized_display_config = &l->optimized_display_config_with_meta,
- .all_or_nothing = false,
- };
- mcache_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &mcache_phase);
- }
-
- /*
- * Call DPMM to map all requirements to minimum clock state
- */
- if (result) {
- l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
- l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
- l->dppm_map_mode_params.programming = dpmm_programming;
- l->dppm_map_mode_params.soc_bb = &dml->soc_bbox;
- l->dppm_map_mode_params.ip = &dml->core_instance.clean_me_up.mode_lib.ip;
- result = dml->dpmm_instance.map_mode_to_soc_dpm(&l->dppm_map_mode_params);
- }
-
- in_out->is_supported = mcache_success;
- result = result && in_out->is_supported;
-
- return result;
-}
-
-bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_out)
-{
- struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
- struct dml2_build_mode_programming_locals *l = &dml->scratch.build_mode_programming_locals;
-
- bool result = false;
- bool mcache_success = false;
- bool uclk_pstate_success = false;
- bool vmin_success = false;
- bool stutter_success = false;
- unsigned int i;
-
- memset(l, 0, sizeof(struct dml2_build_mode_programming_locals));
- memset(in_out->programming, 0, sizeof(struct dml2_display_cfg_programming));
-
- memcpy(&in_out->programming->display_config, in_out->display_config, sizeof(struct dml2_display_cfg));
-
- setup_speculative_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
-
- l->mode_support_params.instance = &dml->core_instance;
- l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
- l->mode_support_params.min_clk_table = &dml->min_clk_table;
- l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
-
- result = dml->core_instance.mode_support(&l->mode_support_params);
- l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
-
- if (!result) {
- setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
-
- l->mode_support_params.instance = &dml->core_instance;
- l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
- l->mode_support_params.min_clk_table = &dml->min_clk_table;
- l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
-
- result = dml->core_instance.mode_support(&l->mode_support_params);
- l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
-
- if (!result) {
- l->informative_params.instance = &dml->core_instance;
- l->informative_params.programming = in_out->programming;
- l->informative_params.mode_is_supported = false;
- dml->core_instance.populate_informative(&l->informative_params);
-
- return false;
- }
-
- /*
- * Phase 1: Determine minimum clocks to satisfy latency requirements for this mode
- */
- memset(&l->min_clock_for_latency_phase, 0, sizeof(struct optimization_phase_params));
- l->min_clock_for_latency_phase.dml = dml;
- l->min_clock_for_latency_phase.display_config = &l->base_display_config_with_meta;
- l->min_clock_for_latency_phase.init_function = dml2_top_optimization_init_function_min_clk_for_latency;
- l->min_clock_for_latency_phase.test_function = dml2_top_optimization_test_function_min_clk_for_latency;
- l->min_clock_for_latency_phase.optimize_function = dml2_top_optimization_optimize_function_min_clk_for_latency;
- l->min_clock_for_latency_phase.optimized_display_config = &l->optimized_display_config_with_meta;
- l->min_clock_for_latency_phase.all_or_nothing = false;
-
- dml2_top_optimization_perform_optimization_phase_1(&l->optimization_phase_locals, &l->min_clock_for_latency_phase);
-
- memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
- }
-
- /*
- * Phase 2: Satisfy DCC mcache requirements
- */
- memset(&l->mcache_phase, 0, sizeof(struct optimization_phase_params));
- l->mcache_phase.dml = dml;
- l->mcache_phase.display_config = &l->base_display_config_with_meta;
- l->mcache_phase.test_function = dml2_top_optimization_test_function_mcache;
- l->mcache_phase.optimize_function = dml2_top_optimization_optimize_function_mcache;
- l->mcache_phase.optimized_display_config = &l->optimized_display_config_with_meta;
- l->mcache_phase.all_or_nothing = true;
-
- mcache_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->mcache_phase);
-
- if (!mcache_success) {
- l->informative_params.instance = &dml->core_instance;
- l->informative_params.programming = in_out->programming;
- l->informative_params.mode_is_supported = false;
-
- dml->core_instance.populate_informative(&l->informative_params);
-
- in_out->programming->informative.failed_mcache_validation = true;
- return false;
- }
-
- memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
-
- /*
- * Phase 3: Optimize for Pstate
- */
- memset(&l->uclk_pstate_phase, 0, sizeof(struct optimization_phase_params));
- l->uclk_pstate_phase.dml = dml;
- l->uclk_pstate_phase.display_config = &l->base_display_config_with_meta;
- l->uclk_pstate_phase.init_function = dml2_top_optimization_init_function_uclk_pstate;
- l->uclk_pstate_phase.test_function = dml2_top_optimization_test_function_uclk_pstate;
- l->uclk_pstate_phase.optimize_function = dml2_top_optimization_optimize_function_uclk_pstate;
- l->uclk_pstate_phase.optimized_display_config = &l->optimized_display_config_with_meta;
- l->uclk_pstate_phase.all_or_nothing = true;
-
- uclk_pstate_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->uclk_pstate_phase);
-
- if (uclk_pstate_success) {
- memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
- l->base_display_config_with_meta.stage3.success = true;
- }
-
- /*
- * Phase 4: Optimize for Vmin
- */
- memset(&l->vmin_phase, 0, sizeof(struct optimization_phase_params));
- l->vmin_phase.dml = dml;
- l->vmin_phase.display_config = &l->base_display_config_with_meta;
- l->vmin_phase.init_function = dml2_top_optimization_init_function_vmin;
- l->vmin_phase.test_function = dml2_top_optimization_test_function_vmin;
- l->vmin_phase.optimize_function = dml2_top_optimization_optimize_function_vmin;
- l->vmin_phase.optimized_display_config = &l->optimized_display_config_with_meta;
- l->vmin_phase.all_or_nothing = false;
-
- vmin_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->vmin_phase);
-
- if (l->optimized_display_config_with_meta.stage4.performed) {
- /*
- * when performed is true, optimization has applied to
- * optimized_display_config_with_meta and it has passed mode
- * support. However it may or may not pass the test function to
- * reach actual Vmin. As long as voltage is optimized even if it
- * doesn't reach Vmin level, there is still power benefit so in
- * this case we will still copy this optimization into base
- * display config.
- */
- memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
- l->base_display_config_with_meta.stage4.success = vmin_success;
- }
-
- /*
- * Phase 5: Optimize for Stutter
- */
- memset(&l->stutter_phase, 0, sizeof(struct optimization_phase_params));
- l->stutter_phase.dml = dml;
- l->stutter_phase.display_config = &l->base_display_config_with_meta;
- l->stutter_phase.init_function = dml2_top_optimization_init_function_stutter;
- l->stutter_phase.test_function = dml2_top_optimization_test_function_stutter;
- l->stutter_phase.optimize_function = dml2_top_optimization_optimize_function_stutter;
- l->stutter_phase.optimized_display_config = &l->optimized_display_config_with_meta;
- l->stutter_phase.all_or_nothing = true;
-
- stutter_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->stutter_phase);
-
- if (stutter_success) {
- memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
- l->base_display_config_with_meta.stage5.success = true;
- }
-
- /*
- * Populate mcache programming
- */
- for (i = 0; i < in_out->display_config->num_planes; i++) {
- in_out->programming->plane_programming[i].mcache_allocation = l->base_display_config_with_meta.stage2.mcache_allocations[i];
- }
-
- /*
- * Call DPMM to map all requirements to minimum clock state
- */
- if (result) {
- l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
- l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
- l->dppm_map_mode_params.programming = in_out->programming;
- l->dppm_map_mode_params.soc_bb = &dml->soc_bbox;
- l->dppm_map_mode_params.ip = &dml->core_instance.clean_me_up.mode_lib.ip;
- result = dml->dpmm_instance.map_mode_to_soc_dpm(&l->dppm_map_mode_params);
- if (!result)
- in_out->programming->informative.failed_dpmm = true;
- }
-
- if (result) {
- l->mode_programming_params.instance = &dml->core_instance;
- l->mode_programming_params.display_cfg = &l->base_display_config_with_meta;
- l->mode_programming_params.cfg_support_info = &l->base_display_config_with_meta.mode_support_result.cfg_support_info;
- l->mode_programming_params.programming = in_out->programming;
-
- result = dml->core_instance.mode_programming(&l->mode_programming_params);
- if (!result)
- in_out->programming->informative.failed_mode_programming = true;
- }
-
- if (result) {
- l->dppm_map_watermarks_params.core = &dml->core_instance;
- l->dppm_map_watermarks_params.display_cfg = &l->base_display_config_with_meta;
- l->dppm_map_watermarks_params.programming = in_out->programming;
- result = dml->dpmm_instance.map_watermarks(&l->dppm_map_watermarks_params);
- }
-
- l->informative_params.instance = &dml->core_instance;
- l->informative_params.programming = in_out->programming;
- l->informative_params.mode_is_supported = result;
-
- dml->core_instance.populate_informative(&l->informative_params);
-
- return result;
-}
-
-bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out)
-{
- return dml2_top_mcache_build_mcache_programming(in_out);
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
index f95c7ff56f15..c506667897c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
@@ -29,8 +29,3 @@ int dml2_printf(const char *format, ...)
return 0;
#endif
}
-
-void dml2_assert(int condition)
-{
- //ASSERT(condition);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
index a27792b56f7e..bfe6f236d2e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
@@ -5,11 +5,10 @@
#ifndef __DML2_DEBUG_H__
#define __DML2_DEBUG_H__
-#ifdef _DEBUG
-#define DML2_ASSERT(condition) dml2_assert(condition)
-#else
+#ifndef DML2_ASSERT
#define DML2_ASSERT(condition) ((void)0)
#endif
+
/*
* DML_LOG_FATAL - fatal errors for unrecoverable DML states until a restart.
* DML_LOG_ERROR - unexpected but recoverable failures inside DML
@@ -56,6 +55,5 @@
int dml2_log_internal(const char *format, ...);
int dml2_printf(const char *format, ...);
-void dml2_assert(int condition);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
index 7fb6026bcb49..d8d01dceacdd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
@@ -64,7 +64,6 @@ struct dml2_mcg_build_min_clock_table_params_in_out {
};
struct dml2_mcg_instance {
bool (*build_min_clock_table)(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
- bool (*unit_test)(void);
};
/*
@@ -110,7 +109,6 @@ struct dml2_dpmm_scratch {
struct dml2_dpmm_instance {
bool (*map_mode_to_soc_dpm)(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out);
bool (*map_watermarks)(struct dml2_dpmm_map_watermarks_params_in_out *in_out);
- bool (*unit_test)(void);
struct dml2_dpmm_scratch dpmm_scratch;
};
@@ -473,7 +471,6 @@ struct dml2_core_instance {
bool (*mode_programming)(struct dml2_core_mode_programming_in_out *in_out);
bool (*populate_informative)(struct dml2_core_populate_informative_in_out *in_out);
bool (*calculate_mcache_allocation)(struct dml2_calculate_mcache_allocation_in_out *in_out);
- bool (*unit_test)(void);
struct {
struct dml2_core_internal_display_mode_lib mode_lib;
@@ -721,8 +718,6 @@ struct dml2_pmo_instance {
bool (*test_for_stutter)(struct dml2_pmo_test_for_stutter_in_out *in_out);
bool (*optimize_for_stutter)(struct dml2_pmo_optimize_for_stutter_in_out *in_out);
- bool (*unit_test)(void);
-
struct dml2_pmo_init_data init_data;
struct dml2_pmo_scratch scratch;
};
@@ -947,7 +942,6 @@ struct dml2_top_funcs {
bool (*check_mode_supported)(struct dml2_check_mode_supported_in_out *in_out);
bool (*build_mode_programming)(struct dml2_build_mode_programming_in_out *in_out);
bool (*build_mcache_programming)(struct dml2_build_mcache_programming_in_out *in_out);
- bool (*unit_test)(void);
};
struct dml2_instance {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index 1ed21c1b86a5..a966abd40788 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -532,26 +532,6 @@ static void calculate_odm_slices(const struct dc_stream_state *stream, unsigned
odm_slice_end_x[odm_factor - 1] = stream->src.width - 1;
}
-static bool is_plane_in_odm_slice(const struct dc_plane_state *plane, unsigned int slice_index, unsigned int *odm_slice_end_x, unsigned int num_slices)
-{
- unsigned int slice_start_x, slice_end_x;
-
- if (slice_index == 0)
- slice_start_x = 0;
- else
- slice_start_x = odm_slice_end_x[slice_index - 1] + 1;
-
- slice_end_x = odm_slice_end_x[slice_index];
-
- if (plane->clip_rect.x + plane->clip_rect.width < slice_start_x)
- return false;
-
- if (plane->clip_rect.x > slice_end_x)
- return false;
-
- return true;
-}
-
static void add_odm_slice_to_odm_tree(struct dml2_context *ctx,
struct dc_state *state,
struct dc_pipe_mapping_scratch *scratch,
@@ -791,12 +771,6 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state
sort_pipes_for_splitting(&scratch->pipe_pool);
for (odm_slice_index = 0; odm_slice_index < scratch->odm_info.odm_factor; odm_slice_index++) {
- // We build the tree for one ODM slice at a time.
- // Each ODM slice shares a common OPP
- if (!is_plane_in_odm_slice(plane, odm_slice_index, scratch->odm_info.odm_slice_end_x, scratch->odm_info.odm_factor)) {
- continue;
- }
-
// Now we have a list of all pipes to be used for this plane/stream, now setup the tree.
scratch->odm_info.next_higher_pipe_for_odm_slice[odm_slice_index] = add_plane_to_blend_tree(ctx, state,
plane,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
index c4c52173ef22..ef693f608d59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
@@ -301,6 +301,7 @@ void build_unoptimized_policy_settings(enum dml_project_id project, struct dml_m
policy->AssumeModeSupportAtMaxPwrStateEvenDRAMClockChangeNotSupported = true; // TOREVIEW: What does this mean?
policy->AssumeModeSupportAtMaxPwrStateEvenFClockChangeNotSupported = true; // TOREVIEW: What does this mean?
if (project == dml_project_dcn35 ||
+ project == dml_project_dcn36 ||
project == dml_project_dcn351) {
policy->DCCProgrammingAssumesScanDirectionUnknownFinal = false;
policy->EnhancedPrefetchScheduleAccelerationFinal = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index b8a34abaf519..70c39df62533 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -107,6 +107,7 @@ void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, stru
case dml_project_dcn35:
case dml_project_dcn351:
+ case dml_project_dcn36:
out->rob_buffer_size_kbytes = 64;
out->config_return_buffer_size_in_kbytes = 1792;
out->compressed_buffer_segment_size_in_kbytes = 64;
@@ -292,6 +293,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
case dml_project_dcn35:
case dml_project_dcn351:
+ case dml_project_dcn36:
out->num_chans = 4;
out->round_trip_ping_latency_dcfclk_cycles = 106;
out->smn_latency_us = 2;
@@ -506,6 +508,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
p->dcfclk_stas_mhz[3] = 1324;
p->dcfclk_stas_mhz[4] = p->in_states->state_array[1].dcfclk_mhz;
} else if (dml2->v20.dml_core_ctx.project != dml_project_dcn35 &&
+ dml2->v20.dml_core_ctx.project != dml_project_dcn36 &&
dml2->v20.dml_core_ctx.project != dml_project_dcn351) {
p->dcfclk_stas_mhz[0] = 300;
p->dcfclk_stas_mhz[1] = 615;
@@ -554,6 +557,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
+ dml2->v20.dml_core_ctx.project == dml_project_dcn36 ||
dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0,
max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0, max_socclk_mhz = 0;
@@ -586,11 +590,11 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz;
p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz;
- p->out_states->state_array[i].dscclk_mhz = max_dispclk_mhz / 3.0;
p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz;
p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz;
/* Dependent states. */
+ p->out_states->state_array[i].dscclk_mhz = p->in_states->state_array[i].dscclk_mhz;
p->out_states->state_array[i].dram_speed_mts = p->in_states->state_array[i].dram_speed_mts;
p->out_states->state_array[i].fabricclk_mhz = p->in_states->state_array[i].fabricclk_mhz;
p->out_states->state_array[i].socclk_mhz = p->in_states->state_array[i].socclk_mhz;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 68b882d28195..939ee0708bd2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -33,7 +33,6 @@
#include "dml2_dc_resource_mgmt.h"
#include "dml21_wrapper.h"
-
static void initialize_dml2_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
{
if (dml2->config.use_native_soc_bb_construction)
@@ -72,6 +71,7 @@ static void map_hw_resources(struct dml2_context *dml2,
in_out_display_cfg->hw.NumberOfDSCSlices[i] = mode_support_info->NumberOfDSCSlices[i];
in_out_display_cfg->hw.DLGRefClkFreqMHz = 24;
if (dml2->v20.dml_core_ctx.project != dml_project_dcn35 &&
+ dml2->v20.dml_core_ctx.project != dml_project_dcn36 &&
dml2->v20.dml_core_ctx.project != dml_project_dcn351) {
/*dGPU default as 50Mhz*/
in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
@@ -762,6 +762,9 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
case DCN_VERSION_3_51:
(*dml2)->v20.dml_core_ctx.project = dml_project_dcn351;
break;
+ case DCN_VERSION_3_6:
+ (*dml2)->v20.dml_core_ctx.project = dml_project_dcn36;
+ break;
case DCN_VERSION_3_2:
(*dml2)->v20.dml_core_ctx.project = dml_project_dcn32;
break;
@@ -785,7 +788,10 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01))
+ // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
+ if ((in_dc->debug.using_dml21)
+ && (in_dc->ctx->dce_version == DCN_VERSION_4_01
+ ))
return dml21_create(in_dc, dml2, config);
// Allocate Mode Lib Ctx
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 0f944fcfd5a5..785226945699 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -159,6 +159,7 @@ struct dml2_clks_table_entry {
unsigned int dtbclk_mhz;
unsigned int dispclk_mhz;
unsigned int dppclk_mhz;
+ unsigned int dram_speed_mts; /*which is based on wck_ratio*/
};
struct dml2_clks_num_entries {
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
index f09cba8e29cc..85f359b5da67 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
@@ -26,7 +26,6 @@
#define __DCN20_DPP_H__
#include "dcn10/dcn10_dpp.h"
-#include "spl/dc_spl_types.h"
#define TO_DCN20_DPP(dpp)\
container_of(dpp, struct dcn20_dpp, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 40acebd13e46..abf439e743f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -425,11 +425,6 @@ bool dpp3_get_optimal_number_of_taps(
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
- if (scl_data->viewport.width > scl_data->h_active &&
- dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
- scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
- return false;
-
/*
* Set default taps if none are provided
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
@@ -467,6 +462,12 @@ bool dpp3_get_optimal_number_of_taps(
else
scl_data->taps.h_taps_c = in_taps->h_taps_c;
+ // Avoid null data in the scl data with this early return, proceed non-adaptive calcualtion first
+ if (scl_data->viewport.width > scl_data->h_active &&
+ dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
+ scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
+ return false;
+
/*Ensure we can support the requested number of vtaps*/
min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert);
min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h
index 992df172378c..f33dddbfcc31 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h
@@ -27,7 +27,6 @@
#include "dcn20/dcn20_dpp.h"
#include "dcn30/dcn30_dpp.h"
-#include "spl/dc_spl_types.h"
bool dpp32_construct(struct dcn3_dpp *dpp3,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
index 4bc85aaf17da..ecaa976e1f52 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
@@ -567,80 +567,82 @@
type ISHARP_NLDELTA_SCLIP_PIVOT_N; \
type ISHARP_NLDELTA_SCLIP_SLOPE_N
+#define DPP_REG_VARIABLE_LIST_DCN401 \
+ DPP_DCN3_REG_VARIABLE_LIST_COMMON; \
+ uint32_t CURSOR0_FP_SCALE_BIAS_G_Y; \
+ uint32_t CURSOR0_FP_SCALE_BIAS_RB_CRCB; \
+ uint32_t CUR0_MATRIX_MODE; \
+ uint32_t CUR0_MATRIX_C11_C12_A; \
+ uint32_t CUR0_MATRIX_C13_C14_A; \
+ uint32_t CUR0_MATRIX_C21_C22_A; \
+ uint32_t CUR0_MATRIX_C23_C24_A; \
+ uint32_t CUR0_MATRIX_C31_C32_A; \
+ uint32_t CUR0_MATRIX_C33_C34_A; \
+ uint32_t CUR0_MATRIX_C11_C12_B; \
+ uint32_t CUR0_MATRIX_C13_C14_B; \
+ uint32_t CUR0_MATRIX_C21_C22_B; \
+ uint32_t CUR0_MATRIX_C23_C24_B; \
+ uint32_t CUR0_MATRIX_C31_C32_B; \
+ uint32_t CUR0_MATRIX_C33_C34_B; \
+ uint32_t DSCL_SC_MODE; \
+ uint32_t DSCL_EASF_H_MODE; \
+ uint32_t DSCL_EASF_H_BF_CNTL; \
+ uint32_t DSCL_EASF_H_RINGEST_EVENTAP_REDUCE; \
+ uint32_t DSCL_EASF_H_RINGEST_EVENTAP_GAIN; \
+ uint32_t DSCL_EASF_H_BF_FINAL_MAX_MIN; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG0; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG1; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG2; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG3; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG4; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG5; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG6; \
+ uint32_t DSCL_EASF_H_BF1_PWL_SEG7; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG0; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG1; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG2; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG3; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG4; \
+ uint32_t DSCL_EASF_H_BF3_PWL_SEG5; \
+ uint32_t DSCL_EASF_V_MODE; \
+ uint32_t DSCL_EASF_V_BF_CNTL; \
+ uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL1; \
+ uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL2; \
+ uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL3; \
+ uint32_t DSCL_EASF_V_RINGEST_EVENTAP_REDUCE; \
+ uint32_t DSCL_EASF_V_RINGEST_EVENTAP_GAIN; \
+ uint32_t DSCL_EASF_V_BF_FINAL_MAX_MIN; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG0; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG1; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG2; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG3; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG4; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG5; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG6; \
+ uint32_t DSCL_EASF_V_BF1_PWL_SEG7; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG0; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG1; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG2; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG3; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG4; \
+ uint32_t DSCL_EASF_V_BF3_PWL_SEG5; \
+ uint32_t DSCL_SC_MATRIX_C0C1; \
+ uint32_t DSCL_SC_MATRIX_C2C3; \
+ uint32_t ISHARP_MODE; \
+ uint32_t ISHARP_NOISEDET_THRESHOLD; \
+ uint32_t ISHARP_NOISE_GAIN_PWL; \
+ uint32_t ISHARP_LBA_PWL_SEG0; \
+ uint32_t ISHARP_LBA_PWL_SEG1; \
+ uint32_t ISHARP_LBA_PWL_SEG2; \
+ uint32_t ISHARP_LBA_PWL_SEG3; \
+ uint32_t ISHARP_LBA_PWL_SEG4; \
+ uint32_t ISHARP_LBA_PWL_SEG5; \
+ uint32_t ISHARP_DELTA_CTRL; \
+ uint32_t ISHARP_DELTA_DATA; \
+ uint32_t ISHARP_DELTA_INDEX; \
+ uint32_t ISHARP_NLDELTA_SOFT_CLIP
struct dcn401_dpp_registers {
- DPP_DCN3_REG_VARIABLE_LIST_COMMON;
- uint32_t CURSOR0_FP_SCALE_BIAS_G_Y;
- uint32_t CURSOR0_FP_SCALE_BIAS_RB_CRCB;
- uint32_t CUR0_MATRIX_MODE;
- uint32_t CUR0_MATRIX_C11_C12_A;
- uint32_t CUR0_MATRIX_C13_C14_A;
- uint32_t CUR0_MATRIX_C21_C22_A;
- uint32_t CUR0_MATRIX_C23_C24_A;
- uint32_t CUR0_MATRIX_C31_C32_A;
- uint32_t CUR0_MATRIX_C33_C34_A;
- uint32_t CUR0_MATRIX_C11_C12_B;
- uint32_t CUR0_MATRIX_C13_C14_B;
- uint32_t CUR0_MATRIX_C21_C22_B;
- uint32_t CUR0_MATRIX_C23_C24_B;
- uint32_t CUR0_MATRIX_C31_C32_B;
- uint32_t CUR0_MATRIX_C33_C34_B;
- uint32_t DSCL_SC_MODE;
- uint32_t DSCL_EASF_H_MODE;
- uint32_t DSCL_EASF_H_BF_CNTL;
- uint32_t DSCL_EASF_H_RINGEST_EVENTAP_REDUCE;
- uint32_t DSCL_EASF_H_RINGEST_EVENTAP_GAIN;
- uint32_t DSCL_EASF_H_BF_FINAL_MAX_MIN;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG0;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG1;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG2;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG3;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG4;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG5;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG6;
- uint32_t DSCL_EASF_H_BF1_PWL_SEG7;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG0;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG1;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG2;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG3;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG4;
- uint32_t DSCL_EASF_H_BF3_PWL_SEG5;
- uint32_t DSCL_EASF_V_MODE;
- uint32_t DSCL_EASF_V_BF_CNTL;
- uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL1;
- uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL2;
- uint32_t DSCL_EASF_V_RINGEST_3TAP_CNTL3;
- uint32_t DSCL_EASF_V_RINGEST_EVENTAP_REDUCE;
- uint32_t DSCL_EASF_V_RINGEST_EVENTAP_GAIN;
- uint32_t DSCL_EASF_V_BF_FINAL_MAX_MIN;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG0;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG1;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG2;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG3;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG4;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG5;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG6;
- uint32_t DSCL_EASF_V_BF1_PWL_SEG7;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG0;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG1;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG2;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG3;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG4;
- uint32_t DSCL_EASF_V_BF3_PWL_SEG5;
- uint32_t DSCL_SC_MATRIX_C0C1;
- uint32_t DSCL_SC_MATRIX_C2C3;
- uint32_t ISHARP_MODE;
- uint32_t ISHARP_NOISEDET_THRESHOLD;
- uint32_t ISHARP_NOISE_GAIN_PWL;
- uint32_t ISHARP_LBA_PWL_SEG0;
- uint32_t ISHARP_LBA_PWL_SEG1;
- uint32_t ISHARP_LBA_PWL_SEG2;
- uint32_t ISHARP_LBA_PWL_SEG3;
- uint32_t ISHARP_LBA_PWL_SEG4;
- uint32_t ISHARP_LBA_PWL_SEG5;
- uint32_t ISHARP_DELTA_CTRL;
- uint32_t ISHARP_DELTA_DATA;
- uint32_t ISHARP_DELTA_INDEX;
- uint32_t ISHARP_NLDELTA_SOFT_CLIP;
+ DPP_REG_VARIABLE_LIST_DCN401;
};
struct dcn401_dpp_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 61678b0a5a1e..4893b793fec0 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -16,14 +16,7 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
/* Object I/F functions */
//static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
-static void dsc401_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
-static bool dsc401_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
-static void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
- struct dsc_optc_config *dsc_optc_cfg);
//static bool dsc401_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps);
-static void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe);
-static void dsc401_disable(struct display_stream_compressor *dsc);
-static void dsc401_disconnect(struct display_stream_compressor *dsc);
static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
@@ -117,7 +110,7 @@ static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clo
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
* into a dcn_dsc_state struct.
*/
-static void dsc401_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s)
+void dsc401_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
@@ -134,7 +127,7 @@ static void dsc401_read_state(struct display_stream_compressor *dsc, struct dcn_
}
-static bool dsc401_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
+bool dsc401_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
{
struct dsc_optc_config dsc_optc_cfg;
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
@@ -145,7 +138,7 @@ static bool dsc401_validate_stream(struct display_stream_compressor *dsc, const
return dsc_prepare_config(dsc_cfg, &dsc401->reg_vals, &dsc_optc_cfg);
}
-static void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
+void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg)
{
bool is_config_ok;
@@ -160,7 +153,7 @@ static void dsc401_set_config(struct display_stream_compressor *dsc, const struc
dsc_write_to_registers(dsc, &dsc401->reg_vals);
}
-static void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe)
+void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
int dsc_clock_en;
@@ -185,7 +178,7 @@ static void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe)
}
-static void dsc401_disable(struct display_stream_compressor *dsc)
+void dsc401_disable(struct display_stream_compressor *dsc)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
int dsc_clock_en;
@@ -211,7 +204,7 @@ static void dsc401_wait_disconnect_pending_clear(struct display_stream_compresso
REG_WAIT(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN_STATUS, 0, 2, 50000);
}
-static void dsc401_disconnect(struct display_stream_compressor *dsc)
+void dsc401_disconnect(struct display_stream_compressor *dsc)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
index 3c9fa8988974..e3ca70058e64 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
@@ -334,5 +334,12 @@ void dsc401_construct(struct dcn401_dsc *dsc,
void dsc401_set_fgcg(struct dcn401_dsc *dsc401, bool enable);
+void dsc401_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+bool dsc401_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
+void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
+ struct dsc_optc_config *dsc_optc_cfg);
+void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe);
+void dsc401_disable(struct display_stream_compressor *dsc);
+void dsc401_disconnect(struct display_stream_compressor *dsc);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 9a0952f9004f..8bc67ca42197 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -112,6 +112,7 @@ bool dal_hw_factory_init(
case DCN_VERSION_3_21:
case DCN_VERSION_3_5:
case DCN_VERSION_3_51:
+ case DCN_VERSION_3_6:
dal_hw_factory_dcn32_init(factory);
return true;
case DCN_VERSION_4_01:
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 9832247ee739..cb79a2832287 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -113,6 +113,7 @@ bool dal_hw_translate_init(
case DCN_VERSION_3_21:
case DCN_VERSION_3_5:
case DCN_VERSION_3_51:
+ case DCN_VERSION_3_6:
dal_hw_translate_dcn32_init(translate);
return true;
case DCN_VERSION_4_01:
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
index 03b4ac2f1991..0d2ae21abbdd 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -262,7 +262,7 @@ void dcn31_hpo_dp_link_enc_set_link_test_pattern(
}
}
-static void fill_stream_allocation_row_info(
+void dcn31_fill_stream_allocation_row_info(
const struct link_mst_stream_allocation *stream_allocation,
uint32_t *src,
uint32_t *slots)
@@ -296,7 +296,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
/* we should clean-up table each time */
if (table->stream_count >= 1) {
- fill_stream_allocation_row_info(
+ dcn31_fill_stream_allocation_row_info(
&table->stream_allocations[0],
&src,
&slots);
@@ -310,7 +310,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
SAT_SLOT_COUNT, slots);
if (table->stream_count >= 2) {
- fill_stream_allocation_row_info(
+ dcn31_fill_stream_allocation_row_info(
&table->stream_allocations[1],
&src,
&slots);
@@ -324,7 +324,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
SAT_SLOT_COUNT, slots);
if (table->stream_count >= 3) {
- fill_stream_allocation_row_info(
+ dcn31_fill_stream_allocation_row_info(
&table->stream_allocations[2],
&src,
&slots);
@@ -338,7 +338,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
SAT_SLOT_COUNT, slots);
if (table->stream_count >= 4) {
- fill_stream_allocation_row_info(
+ dcn31_fill_stream_allocation_row_info(
&table->stream_allocations[3],
&src,
&slots);
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
index 51f5781325e8..40859660e4dc 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
@@ -226,4 +226,10 @@ void dcn31_hpo_dp_link_enc_set_ffe(
const struct dc_link_settings *link_settings,
uint8_t ffe_preset);
+
+void dcn31_fill_stream_allocation_row_info(
+ const struct link_mst_stream_allocation *stream_allocation,
+ uint32_t *src,
+ uint32_t *slots);
+
#endif // __DAL_DCN31_HPO_LINK_ENCODER_H__
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
index 678db949cfe3..759b453385c4 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -323,7 +323,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
break;
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
@@ -336,6 +336,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
case COLOR_SPACE_YCBCR709_BLACK:
+ default:
/* do nothing */
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c
index 8af01f579690..de3ec4fcade2 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c
@@ -41,7 +41,7 @@
#define CTX \
enc3->base.ctx
-static bool dcn32_hpo_dp_link_enc_is_in_alt_mode(
+bool dcn32_hpo_dp_link_enc_is_in_alt_mode(
struct hpo_dp_link_encoder *enc)
{
struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h
index 176b1537d2a1..bea4e1a8ff90 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h
@@ -54,6 +54,7 @@
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_Y, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE, SAT_UPDATE, mask_sh)
+bool dcn32_hpo_dp_link_enc_is_in_alt_mode(struct hpo_dp_link_encoder *enc);
void hpo_dp_link_encoder32_construct(struct dcn31_hpo_dp_link_encoder *enc31,
struct dc_context *ctx,
uint32_t inst,
@@ -61,4 +62,7 @@ void hpo_dp_link_encoder32_construct(struct dcn31_hpo_dp_link_encoder *enc31,
const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift,
const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask);
+bool dcn32_hpo_dp_link_enc_is_in_alt_mode(
+ struct hpo_dp_link_encoder *enc);
+
#endif // __DAL_DCN32_HPO_DP_LINK_ENCODER_H__
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c
index d738a36f2132..7847c1c4927b 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c
@@ -679,24 +679,6 @@ void hubbub1_update_dchub(
dh_data->dchub_info_valid = false;
}
-void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
-{
- struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
-
- uint32_t watermark_change_req;
-
- REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
-
- if (watermark_change_req)
- watermark_change_req = 0;
- else
- watermark_change_req = 1;
-
- REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
-}
-
void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h
index 9fbd45c7dfef..fa5c4c18ed59 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h
@@ -489,9 +489,6 @@ void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow);
bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubub);
-void hubbub1_toggle_watermark_change_req(
- struct hubbub *hubbub);
-
void hubbub1_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm);
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index dce7269959ce..6d41953011f5 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -46,7 +46,7 @@
#define DCN35_CRB_SEGMENT_SIZE_KB 64
-static void dcn35_init_crb(struct hubbub *hubbub)
+void dcn35_init_crb(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -71,7 +71,7 @@ static void dcn35_init_crb(struct hubbub *hubbub)
REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x5FF);
}
-static void dcn35_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
+void dcn35_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
unsigned int compbuf_size_segments = (compbuf_size_kb + DCN35_CRB_SEGMENT_SIZE_KB - 1) / DCN35_CRB_SEGMENT_SIZE_KB;
@@ -255,7 +255,7 @@ static bool hubbub35_program_stutter_z8_watermarks(
return wm_pending;
}
-static void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub,
+void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub,
unsigned int dccg_ref_freq_inKhz,
unsigned int *dchub_ref_freq_inKhz)
{
@@ -295,7 +295,7 @@ static void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub,
}
-static bool hubbub35_program_watermarks(
+bool hubbub35_program_watermarks(
struct hubbub *hubbub,
union dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -335,7 +335,7 @@ static bool hubbub35_program_watermarks(
}
/* Copy values from WM set A to all other sets */
-static void hubbub35_init_watermarks(struct hubbub *hubbub)
+void hubbub35_init_watermarks(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
uint32_t reg;
@@ -397,7 +397,7 @@ static void hubbub35_init_watermarks(struct hubbub *hubbub)
}
-static void hubbub35_wm_read_state(struct hubbub *hubbub,
+void hubbub35_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -514,7 +514,7 @@ static void hubbub35_set_fgcg(struct dcn20_hubbub *hubbub2, bool enable)
REG_UPDATE(DCHUBBUB_CLOCK_CNTL, DCHUBBUB_FGCG_REP_DIS, !enable);
}
-static void hubbub35_init(struct hubbub *hubbub)
+void hubbub35_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
/*Enable clock gaters*/
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
index 54cf00ffceb8..23fecf88556c 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
@@ -152,4 +152,20 @@ void hubbub35_construct(struct dcn20_hubbub *hubbub2,
int det_size_kb,
int pixel_chunk_size_kb,
int config_return_buffer_size_kb);
+
+void hubbub35_wm_read_state(struct hubbub *hubbub,
+ struct dcn_hubbub_wm *wm);
+void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub,
+ unsigned int dccg_ref_freq_inKhz,
+ unsigned int *dchub_ref_freq_inKhz);
+bool hubbub35_program_watermarks(
+ struct hubbub *hubbub,
+ union dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub35_init_watermarks(struct hubbub *hubbub);
+void dcn35_program_compbuf_size(struct hubbub *hubbub,
+ unsigned int compbuf_size_kb, bool safe_to_increase);
+void dcn35_init_crb(struct hubbub *hubbub);
+void hubbub35_init(struct hubbub *hubbub);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
index 6968087a3605..62369be070ea 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
@@ -280,9 +280,8 @@
type MCACHEID_MALL_PREF_1H_P0;\
type MCACHEID_MALL_PREF_2H_P0;\
type MCACHEID_MALL_PREF_1H_P1;\
- type MCACHEID_MALL_PREF_2H_P1
-
-
+ type MCACHEID_MALL_PREF_2H_P1;\
+ type HUBP_FGCG_REP_DIS
struct dcn_hubp2_registers {
DCN401_HUBP_REG_COMMON_VARIABLE_LIST;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
index c2900c79a2d3..7fd582a8a4ba 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
@@ -44,7 +44,7 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable)
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(DCHUBP_CNTL, HUBP_UNBOUNDED_REQ_MODE, enable);
- REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, enable);
+ REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, 1);
}
void hubp31_soft_reset(struct hubp *hubp, bool reset)
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index 5661d7a80d54..6d060ba12da8 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -45,7 +45,7 @@ void hubp35_set_fgcg(struct hubp *hubp, bool enable)
REG_UPDATE(HUBP_CLK_CNTL, HUBP_FGCG_REP_DIS, !enable);
}
-static void hubp35_init(struct hubp *hubp)
+void hubp35_init(struct hubp *hubp)
{
hubp3_init(hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
index d913f80b3130..934836717f32 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
@@ -72,4 +72,5 @@ void hubp35_program_surface_config(
bool horizontal_mirror,
unsigned int compat_level);
+void hubp35_init(struct hubp *hubp);
#endif /* __DC_HUBP_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
index 84c8f8707c5d..f66a38f43a09 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
@@ -644,10 +644,18 @@ struct dce_hwseq_registers {
uint32_t DPP_TOP0_DPP_CRC_CTRL;
uint32_t DPP_TOP0_DPP_CRC_VAL_R_G;
uint32_t DPP_TOP0_DPP_CRC_VAL_B_A;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_R;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_G;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_B;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_A;
uint32_t MPC_CRC_CTRL;
uint32_t MPC_CRC_RESULT_GB;
uint32_t MPC_CRC_RESULT_C;
uint32_t MPC_CRC_RESULT_AR;
+ uint32_t MPC_CRC_RESULT_R;
+ uint32_t MPC_CRC_RESULT_G;
+ uint32_t MPC_CRC_RESULT_B;
+ uint32_t MPC_CRC_RESULT_A;
uint32_t D1VGA_CONTROL;
uint32_t D2VGA_CONTROL;
uint32_t D3VGA_CONTROL;
@@ -1236,6 +1244,7 @@ struct dce_hwseq_registers {
type DOMAIN24_PGFSM_PWR_STATUS; \
type DOMAIN25_PGFSM_PWR_STATUS; \
type DOMAIN_DESIRED_PWR_STATE;
+
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
HWSEQ_DCN_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.c
index f1f14796a3da..0d7e28260db1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.c
@@ -138,5 +138,35 @@ void dce100_hw_sequencer_construct(struct dc *dc)
dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
+ dc->hwss.clear_surface_dcc_and_tiling = dce100_reset_surface_dcc_and_tiling;
+}
+
+/**
+ * dce100_reset_surface_dcc_and_tiling - Set DCC and tiling in DCE to their disable mode.
+ *
+ * @pipe_ctx: Pointer to the pipe context structure.
+ * @plane_state: Surface state
+ * @clear_tiling: If true set tiling to Linear, otherwise does not change tiling
+ *
+ * This function is responsible for call the HUBP block to disable DCC and set
+ * tiling to the linear mode.
+ */
+void dce100_reset_surface_dcc_and_tiling(struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state,
+ bool clear_tiling)
+{
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+
+ if (!mi)
+ return;
+
+ /* if framebuffer is tiled, disable tiling */
+ if (clear_tiling && mi->funcs->mem_input_clear_tiling)
+ mi->funcs->mem_input_clear_tiling(mi);
+
+ /* force page flip to see the new content of the framebuffer */
+ mi->funcs->mem_input_program_surface_flip_and_addr(mi,
+ &plane_state->address,
+ true);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.h
index 34518da20009..fadfa794f96b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.h
@@ -46,5 +46,9 @@ bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
struct dc_bios *dcb,
enum pipe_gating_control power_gating);
+void dce100_reset_surface_dcc_and_tiling(struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state,
+ bool clear_tiling);
+
#endif /* __DC_HWSS_DCE100_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 81f4c386c287..5656d10368ad 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -33,6 +33,7 @@
#include "dce110_hwseq.h"
#include "dce110/dce110_timing_generator.h"
#include "dce/dce_hwseq.h"
+#include "dce100/dce100_hwseq.h"
#include "gpio_service_interface.h"
#include "dce110/dce110_compressor.h"
@@ -1065,7 +1066,8 @@ void dce110_edp_backlight_control(
DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
}
- if (!enable && link->dpcd_sink_ext_caps.bits.oled) {
+ if (!enable) {
+ /*follow oem panel config's requirement*/
pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
msleep(pre_T11_delay);
}
@@ -1152,9 +1154,12 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct dtbclk_dto_params dto_params = {0};
int dp_hpo_inst;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc);
@@ -1654,9 +1659,7 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
@@ -1834,10 +1837,10 @@ static void clean_up_dsc_blocks(struct dc *dc)
struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
int i;
- if (dc->ctx->dce_version != DCN_VERSION_3_5 &&
- dc->ctx->dce_version != DCN_VERSION_3_51)
+ if (!dc->caps.is_apu ||
+ dc->ctx->dce_version < DCN_VERSION_3_15)
return;
-
+ /*VBIOS supports dsc starts from dcn315*/
for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
struct dcn_dsc_state s = {0};
@@ -2104,8 +2107,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
if ((tg != NULL) && tg->funcs) {
- if (tg->funcs->set_drr)
- tg->funcs->set_drr(tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
if (tg->funcs->set_static_screen_control)
tg->funcs->set_static_screen_control(
@@ -3331,6 +3333,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.post_unlock_program_front_end = dce110_post_unlock_program_front_end,
.update_plane_addr = update_plane_addr,
.update_pending_status = dce110_update_pending_status,
+ .clear_surface_dcc_and_tiling = dce100_reset_surface_dcc_and_tiling,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dce110_enable_timing_synchronization,
.enable_per_frame_crtc_position_reset = dce110_enable_per_frame_crtc_position_reset,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c
index 22ee304ef9cf..2a62f63d0357 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c
@@ -29,6 +29,7 @@
#include "dce120_hwseq.h"
#include "dce/dce_hwseq.h"
+#include "dce100/dce100_hwseq.h"
#include "dce110/dce110_hwseq.h"
#include "dce/dce_12_0_offset.h"
@@ -264,5 +265,6 @@ void dce120_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc);
dc->hwseq->funcs.enable_display_power_gating = dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub;
+ dc->hwss.clear_surface_dcc_and_tiling = dce100_reset_surface_dcc_and_tiling;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.c
index 0a054e880801..76fd45550c5e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.c
@@ -50,5 +50,6 @@ void dce80_hw_sequencer_construct(struct dc *dc)
dc->hwss.pipe_control_lock = dce_pipe_control_lock;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
+ dc->hwss.clear_surface_dcc_and_tiling = dce100_reset_surface_dcc_and_tiling;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 44e405e9bc97..912f96323ed6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -415,7 +415,8 @@ void dcn10_log_hw_state(struct dc *dc,
struct timing_generator *tg = pool->timing_generators[i];
struct dcn_otg_state s = {0};
/* Read shared OTG state registers for all DCNx */
- optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+ if (tg->funcs->read_otg_state)
+ tg->funcs->read_otg_state(tg, &s);
/*
* For DCN2 and greater, a register on the OPP is used to
@@ -1112,9 +1113,7 @@ static void dcn10_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
}
@@ -1992,20 +1991,11 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
dc->hwss.get_position(&pipe_ctx, 1, &position);
vpos = position.vertical_count;
- /* Avoid wraparound calculation issues */
- vupdate_start += stream->timing.v_total;
- vupdate_end += stream->timing.v_total;
- vpos += stream->timing.v_total;
-
if (vpos <= vupdate_start) {
/* VPOS is in VACTIVE or back porch. */
lines_to_vupdate = vupdate_start - vpos;
- } else if (vpos > vupdate_end) {
- /* VPOS is in the front porch. */
- return;
} else {
- /* VPOS is in VUPDATE. */
- lines_to_vupdate = 0;
+ lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
}
/* Calculate time until VUPDATE in microseconds. */
@@ -2013,13 +2003,18 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
us_to_vupdate = lines_to_vupdate * us_per_line;
+ /* Stall out until the cursor update completes. */
+ if (vupdate_end < vupdate_start)
+ vupdate_end += stream->timing.v_total;
+
+ /* Position is in the range of vupdate start and end*/
+ if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
+ us_to_vupdate = 0;
+
/* 70 us is a conservative estimate of cursor update time*/
if (us_to_vupdate > 70)
return;
- /* Stall out until the cursor update completes. */
- if (vupdate_end < vupdate_start)
- vupdate_end += stream->timing.v_total;
us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
udelay(us_to_vupdate + us_vupdate);
}
@@ -3221,8 +3216,7 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
if ((tg != NULL) && tg->funcs) {
- if (tg->funcs->set_drr)
- tg->funcs->set_drr(tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
if (tg->funcs->set_static_screen_control)
tg->funcs->set_static_screen_control(
@@ -3431,52 +3425,6 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
hubbub->funcs->update_dchub(hubbub, dh_data);
}
-static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *test_pipe, *split_pipe;
- const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
- struct rect r1 = scl_data->recout, r2, r2_half;
- int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
- int cur_layer = pipe_ctx->plane_state->layer_index;
-
- /**
- * Disable the cursor if there's another pipe above this with a
- * plane that contains this pipe's viewport to prevent double cursor
- * and incorrect scaling artifacts.
- */
- for (test_pipe = pipe_ctx->top_pipe; test_pipe;
- test_pipe = test_pipe->top_pipe) {
- // Skip invisible layer and pipe-split plane on same layer
- if (!test_pipe->plane_state ||
- !test_pipe->plane_state->visible ||
- test_pipe->plane_state->layer_index == cur_layer)
- continue;
-
- r2 = test_pipe->plane_res.scl_data.recout;
- r2_r = r2.x + r2.width;
- r2_b = r2.y + r2.height;
-
- /**
- * There is another half plane on same layer because of
- * pipe-split, merge together per same height.
- */
- for (split_pipe = pipe_ctx->top_pipe; split_pipe;
- split_pipe = split_pipe->top_pipe)
- if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
- r2_half = split_pipe->plane_res.scl_data.recout;
- r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
- r2.width = r2.width + r2_half.width;
- r2_r = r2.x + r2.width;
- break;
- }
-
- if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
- return true;
- }
-
- return false;
-}
-
void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -3576,7 +3524,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
pos_cpy.enable = false;
- if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
+ if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
pos_cpy.enable = false;
@@ -3969,3 +3917,32 @@ void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
dcc_en_bits[i] = s->dcc_en ? 1 : 0;
}
}
+
+/**
+ * dcn10_reset_surface_dcc_and_tiling - Set DCC and tiling in DCN to their disable mode.
+ *
+ * @pipe_ctx: Pointer to the pipe context structure.
+ * @plane_state: Surface state
+ * @clear_tiling: If true set tiling to Linear, otherwise does not change tiling
+ *
+ * This function is responsible for call the HUBP block to disable DCC and set
+ * tiling to the linear mode.
+ */
+void dcn10_reset_surface_dcc_and_tiling(struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state,
+ bool clear_tiling)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+
+ if (!hubp)
+ return;
+
+ /* if framebuffer is tiled, disable tiling */
+ if (clear_tiling && hubp->funcs->hubp_clear_tiling)
+ hubp->funcs->hubp_clear_tiling(hubp);
+
+ /* force page flip to see the new content of the framebuffer */
+ hubp->funcs->hubp_program_surface_flip_and_addr(hubp,
+ &plane_state->address,
+ true);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
index bc5dd68a2408..42ffd1e1299c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
@@ -207,4 +207,8 @@ void dcn10_update_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
int mpcc_id);
+void dcn10_reset_surface_dcc_and_tiling(struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state,
+ bool clear_tiling);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
index 5e51e1761707..079c226c1097 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
@@ -40,6 +40,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.update_plane_addr = dcn10_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.program_output_csc = dcn10_program_output_csc,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dcn10_enable_timing_synchronization,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index a5e18ab72394..926c08e790c1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -952,9 +952,7 @@ enum dc_status dcn20_enable_stream_timing(
params.vertical_total_max = stream->adjust.v_total_max;
params.vertical_total_mid = stream->adjust.v_total_mid;
params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
@@ -1266,14 +1264,18 @@ static void dcn20_power_on_plane_resources(
struct dce_hwseq *hws,
struct pipe_ctx *pipe_ctx)
{
+ uint32_t org_ip_request_cntl = 0;
+
DC_LOGGER_INIT(hws->ctx->logger);
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, pipe_ctx->plane_res.dpp->inst, true);
if (REG(DC_IP_REQUEST_CNTL)) {
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 1);
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
if (hws->funcs.dpp_pg_control)
hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
@@ -1281,8 +1283,10 @@ static void dcn20_power_on_plane_resources(
if (hws->funcs.hubp_pg_control)
hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 0);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+
DC_LOG_DEBUG(
"Un-gated front end for pipe %d\n", pipe_ctx->plane_res.hubp->inst);
}
@@ -2772,7 +2776,8 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (is_two_pixels_per_container || params.opp_cnt > 1)
params.timing.pix_clk_100hz /= 2;
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
+ if (pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine)
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1);
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
}
@@ -2849,9 +2854,7 @@ void dcn20_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
* the case where the same symclk is shared across multiple otg
* instances
@@ -3013,9 +3016,12 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
enum phyd32clk_clock_source phyd32clk;
int dp_hpo_inst;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dto_params.otg_inst = tg->inst;
dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
index 32707b344f0b..ad253c586ea1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
@@ -36,6 +36,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
index 78351408e864..dec57fb4c05c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
@@ -36,6 +36,7 @@ static const struct hw_sequencer_funcs dcn201_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
.update_plane_addr = dcn201_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
index e044e9e0a3a1..c7701a8b574a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
@@ -37,6 +37,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index c32764aef884..2ac5d54d1626 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -37,6 +37,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
index dcb27cdbce73..8d7ceb7b32b8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
@@ -39,6 +39,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 03ba01f4ace1..f38340aa3f15 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -396,6 +396,11 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
else if (pipe_ctx->stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets_sdp_line_num)
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets_sdp_line_num(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.hpo_dp_stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
@@ -538,9 +543,7 @@ static void dcn31_reset_back_end_for_pipe(
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
/* DPMS may already disable or */
/* dpms_off status is incorrect due to fastboot
@@ -616,7 +619,8 @@ void dcn31_reset_hw_ctx_wrap(
}
/* New dc_state in the process of being applied to hardware. */
- link_enc_cfg_set_transient_mode(dc, dc->current_state, context);
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc_cfg_set_transient_mode(dc, dc->current_state, context);
}
void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index fb2ffb637931..556f4fe57eda 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -40,6 +40,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index 21ef03a76229..f5112742edf9 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -42,6 +42,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index ee4de9ddfef4..cd0adf72b223 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -316,10 +316,12 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.cab.cab_alloc_ways = (uint8_t)ways;
dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ DC_LOG_MALL("enable scanout from MALL");
return true;
}
+ DC_LOG_MALL("surface cannot fit in CAB, disabling scanout from MALL\n");
return false;
}
@@ -1322,7 +1324,8 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
params.timing.pix_clk_100hz /= 2;
params.pix_per_cycle = 2;
}
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
+ if (pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine)
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
pipe_ctx->stream_res.stream_enc, params.pix_per_cycle > 1);
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index e4d149eff10f..b971356d30b1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -39,6 +39,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index b907ad1acedd..922b8d71cf1a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -1473,8 +1473,7 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
num_frames = 2 * (frame_rate % 60);
}
}
- if (tg->funcs->set_drr)
- tg->funcs->set_drr(tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
if (tg->funcs->set_static_screen_control)
tg->funcs->set_static_screen_control(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index c7acaf97974c..6a82a865209c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -44,6 +44,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 4f73e7f551ac..902a96940a01 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -43,6 +43,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 555a9f590cd7..8f5da0ded850 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -830,10 +830,7 @@ enum dc_status dcn401_enable_stream_timing(
}
hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
-
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
/* Event triggers and num frames initialized for DRR, but can be
* later updated for PSR use. Note DRR trigger events are generated
@@ -927,9 +924,12 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
int dp_hpo_inst = 0;
unsigned int tmds_div = PIXEL_RATE_DIV_NA;
unsigned int unused_div = PIXEL_RATE_DIV_NA;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
dcn401_enable_stream_calc(pipe_ctx, &dp_hpo_inst, &phyd32clk,
&tmds_div, &early_control);
@@ -972,52 +972,6 @@ void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, enable);
}
-static bool dcn401_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *test_pipe, *split_pipe;
- const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
- struct rect r1 = scl_data->recout, r2, r2_half;
- int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
- int cur_layer = pipe_ctx->plane_state->layer_index;
-
- /**
- * Disable the cursor if there's another pipe above this with a
- * plane that contains this pipe's viewport to prevent double cursor
- * and incorrect scaling artifacts.
- */
- for (test_pipe = pipe_ctx->top_pipe; test_pipe;
- test_pipe = test_pipe->top_pipe) {
- // Skip invisible layer and pipe-split plane on same layer
- if (!test_pipe->plane_state ||
- !test_pipe->plane_state->visible ||
- test_pipe->plane_state->layer_index == cur_layer)
- continue;
-
- r2 = test_pipe->plane_res.scl_data.recout;
- r2_r = r2.x + r2.width;
- r2_b = r2.y + r2.height;
-
- /**
- * There is another half plane on same layer because of
- * pipe-split, merge together per same height.
- */
- for (split_pipe = pipe_ctx->top_pipe; split_pipe;
- split_pipe = split_pipe->top_pipe)
- if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
- r2_half = split_pipe->plane_res.scl_data.recout;
- r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
- r2.width = r2.width + r2_half.width;
- r2_r = r2.x + r2.width;
- break;
- }
-
- if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
- return true;
- }
-
- return false;
-}
-
void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy)
{
if (cursor_width <= 128) {
@@ -1208,7 +1162,7 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
pos_cpy.x = (uint32_t)x_pos;
pos_cpy.y = (uint32_t)y_pos;
- if (pos_cpy.enable && dcn401_can_pipe_disable_cursor(pipe_ctx))
+ if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
pos_cpy.enable = false;
x_pos = pos_cpy.x - param.recout.x;
@@ -1863,9 +1817,8 @@ void dcn401_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
+
/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
* the case where the same symclk is shared across multiple otg
* instances
@@ -1980,7 +1933,7 @@ static void dcn401_program_tg(
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
}
-static void dcn401_program_pipe(
+void dcn401_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -2656,3 +2609,37 @@ void dcn401_detect_pipe_changes(struct dc_state *old_state,
new_pipe->update_flags.bits.test_pattern_changed = 1;
}
}
+
+void dcn401_plane_atomic_power_down(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t org_ip_request_cntl = 0;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
+ hubp->funcs->hubp_reset(hubp);
+ dpp->funcs->dpp_reset(dpp);
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+
+ DC_LOG_DEBUG(
+ "Power gated front end %d\n", hubp->inst);
+
+ if (hws->funcs.dpp_root_clock_control)
+ hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 17cea748789e..781cf0efccc6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -93,6 +93,10 @@ void dcn401_reset_back_end_for_pipe(
void dcn401_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context);
+void dcn401_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx);
void dcn401_program_front_end_for_ctx(struct dc *dc, struct dc_state *context);
void dcn401_post_unlock_program_front_end(struct dc *dc, struct dc_state *context);
@@ -102,4 +106,7 @@ void dcn401_detect_pipe_changes(
struct dc_state *new_state,
struct pipe_ctx *old_pipe,
struct pipe_ctx *new_pipe);
+void dcn401_plane_atomic_power_down(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index 44cb376f97c1..fe7aceb2f510 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -18,6 +18,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn401_program_front_end_for_ctx,
+ .clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn401_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
@@ -123,7 +124,7 @@ static const struct hwseq_private_funcs dcn401_private_funcs = {
.disable_vga = dcn20_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn20_plane_atomic_disable,
- .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .plane_atomic_power_down = dcn401_plane_atomic_power_down,
.enable_power_gating_plane = dcn32_enable_power_gating_plane,
.hubp_pg_control = dcn32_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index a7d66cfd93c9..c8b5ed834579 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -46,6 +46,7 @@ struct dce_hwseq;
struct link_resource;
struct dc_dmub_cmd;
struct pg_block_update;
+struct drr_params;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
@@ -240,6 +241,7 @@ struct hw_sequencer_funcs {
struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
void (*update_dsc_pg)(struct dc *dc, struct dc_state *context, bool safe_to_disable);
+ void (*clear_surface_dcc_and_tiling)(struct pipe_ctx *pipe_ctx, struct dc_plane_state *plane_state, bool clear_tiling);
/* Pipe Lock Related */
void (*pipe_control_lock)(struct dc *dc,
@@ -516,11 +518,21 @@ void get_cursor_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void get_dcc_visual_confirm_color(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
void set_p_state_switch_method(
struct dc *dc,
struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+void set_drr_and_clear_adjust_pending(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream,
+ struct drr_params *params);
+
void hwss_execute_sequence(struct dc *dc,
struct block_sequence block_sequence[],
int num_steps);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d558efc6e12f..d0021f25f3d8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -39,7 +39,7 @@
#include "panel_cntl.h"
#include "dmub/inc/dmub_cmd.h"
#include "pg_cntl.h"
-#include "spl/dc_spl.h"
+#include "sspl/dc_spl.h"
#define MAX_CLOCK_SOURCES 7
#define MAX_SVP_PHANTOM_STREAMS 2
@@ -376,6 +376,7 @@ struct plane_resource {
/* all mappable hardware resources used to enable a link */
struct link_resource {
+ struct link_encoder *dio_link_enc;
struct hpo_dp_link_encoder *hpo_dp_link_enc;
};
@@ -500,6 +501,8 @@ struct resource_context {
uint8_t dp_clock_source_ref_count;
bool is_dsc_acquired[MAX_PIPES];
struct link_enc_cfg_context link_enc_cfg_ctx;
+ unsigned int dio_link_enc_to_link_idx[MAX_DIG_LINK_ENCODERS];
+ int dio_link_enc_ref_cnts[MAX_DIG_LINK_ENCODERS];
bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS];
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
@@ -627,7 +630,7 @@ struct dc_state {
*/
struct bw_context bw_ctx;
- struct block_sequence block_sequence[50];
+ struct block_sequence block_sequence[100];
unsigned int block_sequence_steps;
struct dc_dmub_cmd dc_dmub_cmd[10];
unsigned int dmub_cmd_count;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 7a1ca1e98059..221645c023b5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -221,6 +221,7 @@ enum dentist_divider_range {
CLK_SF(CLK0_CLK_PLL_REQ, FbMult_frac, mask_sh)
#define CLK_REG_LIST_DCN401() \
+ SR(DENTIST_DISPCLK_CNTL), \
CLK_SR_DCN401(CLK0_CLK_PLL_REQ, CLK01, 0), \
CLK_SR_DCN401(CLK0_CLK0_DFS_CNTL, CLK01, 0), \
CLK_SR_DCN401(CLK0_CLK1_DFS_CNTL, CLK01, 0), \
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 0150f2581ee4..0c5675d1c593 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -119,10 +119,14 @@ static const struct dpp_input_csc_matrix __maybe_unused dpp_input_csc_matrix[] =
{ 0x39a6, 0x2568, 0, 0xe0d6,
0xeedd, 0x2568, 0xf925, 0x9a8,
0, 0x2568, 0x43ee, 0xdbb2 } },
- { COLOR_SPACE_2020_YCBCR,
+ { COLOR_SPACE_2020_YCBCR_FULL,
{ 0x2F30, 0x2000, 0, 0xE869,
0xEDB7, 0x2000, 0xFABC, 0xBC6,
0, 0x2000, 0x3C34, 0xE1E6 } },
+ { COLOR_SPACE_2020_YCBCR_LIMITED,
+ { 0x35B9, 0x2543, 0, 0xE2B2,
+ 0xEB2F, 0x2543, 0xFA01, 0x0B1F,
+ 0, 0x2543, 0x4489, 0xDB42 } },
{ COLOR_SPACE_2020_RGB_LIMITEDRANGE,
{ 0x35E0, 0x255F, 0, 0xE2B3,
0xEB20, 0x255F, 0xF9FD, 0xB1E,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 6fdc9809280c..7f371cbb35cd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -70,35 +70,7 @@ struct optc {
enum signal_type signal;
};
-struct dcn_otg_state {
- uint32_t v_blank_start;
- uint32_t v_blank_end;
- uint32_t v_sync_a_pol;
- uint32_t v_total;
- uint32_t v_total_max;
- uint32_t v_total_min;
- uint32_t v_total_min_sel;
- uint32_t v_total_max_sel;
- uint32_t v_sync_a_start;
- uint32_t v_sync_a_end;
- uint32_t h_blank_start;
- uint32_t h_blank_end;
- uint32_t h_sync_a_start;
- uint32_t h_sync_a_end;
- uint32_t h_sync_a_pol;
- uint32_t h_total;
- uint32_t underflow_occurred_status;
- uint32_t otg_enabled;
- uint32_t blank_enabled;
- uint32_t vertical_interrupt1_en;
- uint32_t vertical_interrupt1_line;
- uint32_t vertical_interrupt2_en;
- uint32_t vertical_interrupt2_line;
- uint32_t otg_master_update_lock;
- uint32_t otg_double_buffer_control;
-};
-
-void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s);
+void optc1_read_otg_state(struct timing_generator *optc, struct dcn_otg_state *s);
bool optc1_get_hw_timing(struct timing_generator *tg, struct dc_crtc_timing *hw_crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 9885cb3c310f..267ace4eef8a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -146,6 +146,35 @@ struct crc_params {
bool reset;
};
+struct dcn_otg_state {
+ uint32_t v_blank_start;
+ uint32_t v_blank_end;
+ uint32_t v_sync_a_pol;
+ uint32_t v_total;
+ uint32_t v_total_max;
+ uint32_t v_total_min;
+ uint32_t v_total_min_sel;
+ uint32_t v_total_max_sel;
+ uint32_t v_sync_a_start;
+ uint32_t v_sync_a_end;
+ uint32_t h_blank_start;
+ uint32_t h_blank_end;
+ uint32_t h_sync_a_start;
+ uint32_t h_sync_a_end;
+ uint32_t h_sync_a_pol;
+ uint32_t h_total;
+ uint32_t underflow_occurred_status;
+ uint32_t otg_enabled;
+ uint32_t blank_enabled;
+ uint32_t vertical_interrupt1_en;
+ uint32_t vertical_interrupt1_line;
+ uint32_t vertical_interrupt2_en;
+ uint32_t vertical_interrupt2_line;
+ uint32_t vertical_interrupt2_dest;
+ uint32_t otg_master_update_lock;
+ uint32_t otg_double_buffer_control;
+};
+
/**
* struct timing_generator - Entry point to Output Timing Generator feature.
*/
@@ -350,6 +379,7 @@ struct timing_generator_funcs {
bool (*get_pipe_update_pending)(struct timing_generator *tg);
void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable);
bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked);
+ void (*read_otg_state)(struct timing_generator *tg, struct dcn_otg_state *s);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index 45262cba675e..5a1d9b708a9d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -29,7 +29,7 @@
#include "hw_shared.h"
#include "dc_hw_types.h"
#include "fixed31_32.h"
-#include "spl/dc_spl_types.h"
+#include "sspl/dc_spl_types.h"
#define CSC_TEMPERATURE_MATRIX_SIZE 12
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index fd1f9d3db039..2948a696ee12 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -218,10 +218,8 @@ struct link_service {
/*************************** DP DPIA/PHY ******************************/
- int (*dpia_handle_usb4_bandwidth_allocation_for_link)(
+ void (*dpia_handle_usb4_bandwidth_allocation_for_link)(
struct dc_link *link, int peak_bw);
- void (*dpia_handle_bw_alloc_response)(
- struct dc_link *link, uint8_t bw, uint8_t result);
void (*dp_set_drive_settings)(
struct dc_link *link,
const struct link_resource *link_res,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
index dc650be3837e..f1afb31ac70b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
@@ -96,11 +96,6 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
/* Return next available DIG link encoder. NULL if none available. */
struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc);
-/* Return DIG link encoder used by stream. NULL if unused. */
-struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream(
- struct dc *dc,
- const struct dc_stream_state *stream);
-
/* Return DIG link encoder. NULL if unused. */
struct link_encoder *link_enc_cfg_get_link_enc(const struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index cd1157d225ab..9458187b834d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -29,7 +29,6 @@
#include "core_status.h"
#include "dal_asic_id.h"
#include "dm_pp_smu.h"
-#include "spl/dc_spl.h"
#define MEMORY_TYPE_MULTIPLIER_CZ 4
#define MEMORY_TYPE_HBM 2
@@ -152,6 +151,8 @@ bool resource_attach_surfaces_to_context(
struct dc_state *context,
const struct resource_pool *pool);
+bool resource_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx);
+
#define FREE_PIPE_INDEX_NOT_FOUND -1
/*
@@ -646,4 +647,9 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio
int resource_calculate_det_for_stream(struct dc_state *state, struct pipe_ctx *otg_master);
bool resource_is_hpo_acquired(struct dc_state *context);
+
+struct link_encoder *get_temp_dio_link_enc(
+ const struct resource_context *res_ctx,
+ const struct resource_pool *const pool,
+ const struct dc_link *link);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index 8ac36bdd4e1e..b5e14d792378 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -182,6 +182,15 @@ AMD_DAL_IRQ_DCN351= $(addprefix $(AMDDALPATH)/dc/irq/dcn351/,$(IRQ_DCN351))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN351)
###############################################################################
+# DCN 36
+###############################################################################
+IRQ_DCN36 = irq_service_dcn36.o
+
+AMD_DAL_IRQ_DCN36= $(addprefix $(AMDDALPATH)/dc/irq/dcn36/,$(IRQ_DCN36))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN36)
+
+###############################################################################
# DCN 401
###############################################################################
IRQ_DCN401 = irq_service_dcn401.o
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
new file mode 100644
index 000000000000..ea958628f8b8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
@@ -0,0 +1,408 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+#include "dm_services.h"
+#include "include/logger_interface.h"
+#include "../dce110/irq_service_dce110.h"
+
+#include "dcn/dcn_3_6_0_offset.h"
+#include "dcn/dcn_3_6_0_sh_mask.h"
+
+#include "irq_service_dcn36.h"
+
+#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
+
+static enum dc_irq_source to_dal_irq_source_dcn36(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK3;
+ case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK4;
+ case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK5;
+ case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC1_VLINE0;
+ case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC2_VLINE0;
+ case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC3_VLINE0;
+ case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC4_VLINE0;
+ case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC5_VLINE0;
+ case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC6_VLINE0;
+ case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP3;
+ case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP4;
+ case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP5;
+ case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP6;
+ case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE6;
+ case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT:
+ return DC_IRQ_SOURCE_DMCUB_OUTBOX;
+ case DCN_1_0__SRCID__DC_HPD1_INT:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case DCN_1_0__CTXID__DC_HPD1_INT:
+ return DC_IRQ_SOURCE_HPD1;
+ case DCN_1_0__CTXID__DC_HPD2_INT:
+ return DC_IRQ_SOURCE_HPD2;
+ case DCN_1_0__CTXID__DC_HPD3_INT:
+ return DC_IRQ_SOURCE_HPD3;
+ case DCN_1_0__CTXID__DC_HPD4_INT:
+ return DC_IRQ_SOURCE_HPD4;
+ case DCN_1_0__CTXID__DC_HPD5_INT:
+ return DC_IRQ_SOURCE_HPD5;
+ case DCN_1_0__CTXID__DC_HPD6_INT:
+ return DC_IRQ_SOURCE_HPD6;
+ case DCN_1_0__CTXID__DC_HPD1_RX_INT:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case DCN_1_0__CTXID__DC_HPD2_RX_INT:
+ return DC_IRQ_SOURCE_HPD2RX;
+ case DCN_1_0__CTXID__DC_HPD3_RX_INT:
+ return DC_IRQ_SOURCE_HPD3RX;
+ case DCN_1_0__CTXID__DC_HPD4_RX_INT:
+ return DC_IRQ_SOURCE_HPD4RX;
+ case DCN_1_0__CTXID__DC_HPD5_RX_INT:
+ return DC_IRQ_SOURCE_HPD5RX;
+ case DCN_1_0__CTXID__DC_HPD6_RX_INT:
+ return DC_IRQ_SOURCE_HPD6RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs outbox_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline0_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+#undef BASE_INNER
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SRI(reg_name, block, id)\
+ BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_DMUB(reg_name)\
+ BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define IRQ_REG_ENTRY(base, block, reg_num, reg1, mask1, reg2, mask2)\
+ REG_STRUCT[base + reg_num].enable_reg = SRI(reg1, block, reg_num),\
+ REG_STRUCT[base + reg_num].enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base + reg_num].enable_value[0] = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base + reg_num].enable_value[1] = \
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \
+ REG_STRUCT[base + reg_num].ack_reg = SRI(reg2, block, reg_num),\
+ REG_STRUCT[base + reg_num].ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ REG_STRUCT[base + reg_num].ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+#define IRQ_REG_ENTRY_DMUB(base, reg1, mask1, reg2, mask2)\
+ REG_STRUCT[base].enable_reg = SRI_DMUB(reg1),\
+ REG_STRUCT[base].enable_mask = \
+ reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base].enable_value[0] = \
+ reg1 ## __ ## mask1 ## _MASK,\
+ REG_STRUCT[base].enable_value[1] = \
+ ~reg1 ## __ ## mask1 ## _MASK, \
+ REG_STRUCT[base].ack_reg = SRI_DMUB(reg2),\
+ REG_STRUCT[base].ack_mask = \
+ reg2 ## __ ## mask2 ## _MASK,\
+ REG_STRUCT[base].ack_value = \
+ reg2 ## __ ## mask2 ## _MASK \
+
+#define hpd_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1, HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].funcs = &hpd_irq_info_funcs;\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\
+
+#define hpd_rx_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1RX, HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\
+ REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].funcs = &hpd_rx_irq_info_funcs;\
+
+#define pflip_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_PFLIP1, HUBPREQ, reg_num,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_PFLIP1 + reg_num].funcs = &pflip_irq_info_funcs\
+
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_VUPDATE1, OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_VUPDATE1 + reg_num].funcs = &vupdate_no_lock_irq_info_funcs\
+
+#define vblank_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_VBLANK1, OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_VBLANK1 + reg_num].funcs = &vblank_irq_info_funcs\
+
+#define vline0_int_entry(reg_num)\
+ IRQ_REG_ENTRY(DC_IRQ_SOURCE_DC1_VLINE0, OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
+ REG_STRUCT[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num].funcs = &vline0_irq_info_funcs\
+
+#define dmub_outbox_int_entry()\
+ IRQ_REG_ENTRY_DMUB(DC_IRQ_SOURCE_DMCUB_OUTBOX, \
+ DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\
+ DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\
+ REG_STRUCT[DC_IRQ_SOURCE_DMCUB_OUTBOX].funcs = &outbox_irq_info_funcs
+
+#define dummy_irq_entry(irqno) \
+ REG_STRUCT[irqno].funcs = &dummy_irq_info_funcs\
+
+#define i2c_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_I2C_DDC ## reg_num)
+
+#define dp_sink_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_DPSINK ## reg_num)
+
+#define gpio_pad_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_GPIOPAD ## reg_num)
+
+#define dc_underflow_int_entry(reg_num) \
+ dummy_irq_entry(DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW)
+
+static struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+#define dcn36_irq_init_part_1() {\
+ dummy_irq_entry(DC_IRQ_SOURCE_INVALID); \
+ hpd_int_entry(0); \
+ hpd_int_entry(1); \
+ hpd_int_entry(2); \
+ hpd_int_entry(3); \
+ hpd_int_entry(4); \
+ hpd_rx_int_entry(0); \
+ hpd_rx_int_entry(1); \
+ hpd_rx_int_entry(2); \
+ hpd_rx_int_entry(3); \
+ hpd_rx_int_entry(4); \
+ i2c_int_entry(1); \
+ i2c_int_entry(2); \
+ i2c_int_entry(3); \
+ i2c_int_entry(4); \
+ i2c_int_entry(5); \
+ i2c_int_entry(6); \
+ dp_sink_int_entry(1); \
+ dp_sink_int_entry(2); \
+ dp_sink_int_entry(3); \
+ dp_sink_int_entry(4); \
+ dp_sink_int_entry(5); \
+ dp_sink_int_entry(6); \
+ dummy_irq_entry(DC_IRQ_SOURCE_TIMER); \
+ pflip_int_entry(0); \
+ pflip_int_entry(1); \
+ pflip_int_entry(2); \
+ pflip_int_entry(3); \
+ dummy_irq_entry(DC_IRQ_SOURCE_PFLIP5); \
+ dummy_irq_entry(DC_IRQ_SOURCE_PFLIP6); \
+ dummy_irq_entry(DC_IRQ_SOURCE_PFLIP_UNDERLAY0); \
+ gpio_pad_int_entry(0); \
+ gpio_pad_int_entry(1); \
+ gpio_pad_int_entry(2); \
+ gpio_pad_int_entry(3); \
+ gpio_pad_int_entry(4); \
+ gpio_pad_int_entry(5); \
+ gpio_pad_int_entry(6); \
+ gpio_pad_int_entry(7); \
+ gpio_pad_int_entry(8); \
+ gpio_pad_int_entry(9); \
+ gpio_pad_int_entry(10); \
+ gpio_pad_int_entry(11); \
+ gpio_pad_int_entry(12); \
+ gpio_pad_int_entry(13); \
+ gpio_pad_int_entry(14); \
+ gpio_pad_int_entry(15); \
+ gpio_pad_int_entry(16); \
+ gpio_pad_int_entry(17); \
+ gpio_pad_int_entry(18); \
+ gpio_pad_int_entry(19); \
+ gpio_pad_int_entry(20); \
+ gpio_pad_int_entry(21); \
+ gpio_pad_int_entry(22); \
+ gpio_pad_int_entry(23); \
+ gpio_pad_int_entry(24); \
+ gpio_pad_int_entry(25); \
+ gpio_pad_int_entry(26); \
+ gpio_pad_int_entry(27); \
+ gpio_pad_int_entry(28); \
+ gpio_pad_int_entry(29); \
+ gpio_pad_int_entry(30); \
+ dc_underflow_int_entry(1); \
+ dc_underflow_int_entry(2); \
+ dc_underflow_int_entry(3); \
+ dc_underflow_int_entry(4); \
+ dc_underflow_int_entry(5); \
+ dc_underflow_int_entry(6); \
+ dummy_irq_entry(DC_IRQ_SOURCE_DMCU_SCP); \
+ dummy_irq_entry(DC_IRQ_SOURCE_VBIOS_SW); \
+}
+
+#define dcn36_irq_init_part_2() {\
+ vupdate_no_lock_int_entry(0); \
+ vupdate_no_lock_int_entry(1); \
+ vupdate_no_lock_int_entry(2); \
+ vupdate_no_lock_int_entry(3); \
+ vblank_int_entry(0); \
+ vblank_int_entry(1); \
+ vblank_int_entry(2); \
+ vblank_int_entry(3); \
+ vline0_int_entry(0); \
+ vline0_int_entry(1); \
+ vline0_int_entry(2); \
+ vline0_int_entry(3); \
+ dummy_irq_entry(DC_IRQ_SOURCE_DC5_VLINE1); \
+ dummy_irq_entry(DC_IRQ_SOURCE_DC6_VLINE1); \
+ dmub_outbox_int_entry(); \
+}
+
+#define dcn36_irq_init() {\
+ dcn36_irq_init_part_1(); \
+ dcn36_irq_init_part_2(); \
+}
+
+static struct irq_source_info irq_source_info_dcn36[DAL_IRQ_SOURCES_NUMBER] = {0};
+
+static struct irq_service_funcs irq_service_funcs_dcn36 = {
+ .to_dal_irq_source = to_dal_irq_source_dcn36
+};
+
+static void dcn36_irq_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ struct dc_context *ctx = init_data->ctx;
+
+#define REG_STRUCT irq_source_info_dcn36
+ dcn36_irq_init();
+
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dcn36;
+ irq_service->funcs = &irq_service_funcs_dcn36;
+}
+
+struct irq_service *dal_irq_service_dcn36_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ dcn36_irq_construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.h b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.h
new file mode 100644
index 000000000000..21ff95f6562d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+#ifndef __DAL_IRQ_SERVICE_DCN36_H__
+#define __DAL_IRQ_SERVICE_DCN36_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dcn36_create(
+ struct irq_service_init_data *init_data);
+
+#endif /* __DAL_IRQ_SERVICE_DCN36_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index e962c426beda..110f656d43ae 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -170,6 +170,7 @@ enum irq_type
IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1,
IRQ_TYPE_VLINE0 = DC_IRQ_SOURCE_DC1_VLINE0,
+ IRQ_TYPE_DCUNDERFLOW = DC_IRQ_SOURCE_DC1UNDERFLOW,
};
#define DAL_VALID_IRQ_SRC_NUM(src) \
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index 06faa461067b..b68bcc9fca0a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -48,9 +48,16 @@ void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
+
link_enc->funcs->connect_dig_be_to_fe(link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -71,9 +78,16 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
+
if (!stream_enc)
return;
@@ -142,7 +156,14 @@ void enable_dio_dp_link_output(struct dc_link *link,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
if (dc_is_dp_sst_signal(signal))
link_enc->funcs->enable_dp_output(
@@ -162,11 +183,16 @@ void disable_dio_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
- if (link_enc != NULL)
- link_enc->funcs->disable_output(link_enc, signal);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
+ link_enc->funcs->disable_output(link_enc, signal);
link->dc->link_srv->dp_trace_source_sequence(link,
DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
@@ -175,7 +201,14 @@ void set_dio_dp_link_test_pattern(struct dc_link *link,
const struct link_resource *link_res,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params);
link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
@@ -186,7 +219,14 @@ void set_dio_dp_lane_settings(struct dc_link *link,
const struct dc_link_settings *link_settings,
const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
link_enc->funcs->dp_set_lane_settings(link_enc, link_settings, lane_settings);
}
@@ -195,9 +235,15 @@ void update_dio_stream_allocation_table(struct dc_link *link,
const struct link_resource *link_res,
const struct link_mst_stream_allocation_table *table)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link_enc) {
+ ASSERT(link_enc);
+ return;
+ }
- ASSERT(link_enc);
link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);
}
@@ -282,7 +328,10 @@ static const struct link_hwss dio_link_hwss = {
bool can_use_dio_link_hwss(const struct dc_link *link,
const struct link_resource *link_res)
{
- return link->link_enc != NULL;
+ if (!link->dc->config.unify_link_enc_assignment)
+ return link->link_enc != NULL;
+ else
+ return link_res->dio_link_enc != NULL;
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
index a6d1d7641ab4..e1dff4e3f446 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
@@ -127,7 +127,10 @@ static void set_dio_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *lin
const struct link_resource *link_res,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (!set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(
link, link_res, tp_params, get_dio_link_hwss())) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
index 36adf95744fe..81bf3c5e1fdf 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
@@ -35,12 +35,15 @@ static void update_dpia_stream_allocation_table(struct dc_link *link,
const struct link_resource *link_res,
const struct link_mst_stream_allocation_table *table)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
static enum dc_status status;
uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
int i;
DC_LOGGER_INIT(link->ctx->logger);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
for (i = 0; i < table->stream_count; i++)
mst_alloc_slots += table->stream_allocations[i].slot_count;
@@ -61,7 +64,10 @@ static void set_dio_dpia_link_test_pattern(struct dc_link *link,
if (tp_params->dp_phy_pattern != DP_TEST_PATTERN_VIDEO_MODE)
return;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (!link_enc)
return;
@@ -83,31 +89,28 @@ static void enable_dpia_link_output(struct dc_link *link,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc != NULL) {
- if (link->dc->config.enable_dpia_pre_training && link_enc->funcs->enable_dpia_output) {
+ if (link->dc->config.enable_dpia_pre_training || link->dc->config.unify_link_enc_assignment) {
uint8_t fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
uint8_t digmode = dc_is_dp_sst_signal(signal) ? DIG_SST_MODE : DIG_MST_MODE;
- link_enc->funcs->enable_dpia_output(
- link_enc,
- link_settings,
- link->ddc_hw_inst,
- digmode,
- fec_rdy);
- } else {
- if (dc_is_dp_sst_signal(signal))
- link_enc->funcs->enable_dp_output(
+ if (link_enc->funcs->enable_dpia_output)
+ link_enc->funcs->enable_dpia_output(
link_enc,
link_settings,
- clock_source);
+ link->ddc_hw_inst,
+ digmode,
+ fec_rdy);
else
- link_enc->funcs->enable_dp_mst_output(
- link_enc,
- link_settings,
- clock_source);
- }
+ DC_LOG_ERROR("%s: link encoder does not support enable_dpia_output\n", __func__);
+ } else
+ enable_dio_dp_link_output(link, link_res, signal, clock_source, link_settings);
}
@@ -119,13 +122,20 @@ static void disable_dpia_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ struct link_encoder *link_enc = link_res->dio_link_enc;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc != NULL) {
- if (link->dc->config.enable_dpia_pre_training && link_enc->funcs->disable_dpia_output) {
+ if (link->dc->config.enable_dpia_pre_training || link->dc->config.unify_link_enc_assignment) {
uint8_t digmode = dc_is_dp_sst_signal(signal) ? DIG_SST_MODE : DIG_MST_MODE;
- link_enc->funcs->disable_dpia_output(link_enc, link->ddc_hw_inst, digmode);
+ if (link_enc->funcs->disable_dpia_output)
+ link_enc->funcs->disable_dpia_output(link_enc, link->ddc_hw_inst, digmode);
+ else
+ DC_LOG_ERROR("%s: link encoder does not support disable_dpia_output\n", __func__);
} else
link_enc->funcs->disable_output(link_enc, signal);
}
@@ -154,8 +164,10 @@ static const struct link_hwss dpia_link_hwss = {
bool can_use_dpia_link_hwss(const struct dc_link *link,
const struct link_resource *link_res)
{
- return link->is_dig_mapping_flexible &&
- link->dc->res_pool->funcs->link_encs_assign;
+ if (!link->dc->config.unify_link_enc_assignment)
+ return link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign;
+ else
+ return link->is_dig_mapping_flexible && link_res->dio_link_enc != NULL;
}
const struct link_hwss *get_dpia_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 550e1a098fa2..cc9191a5c9e6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -816,7 +816,10 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
{
bool destrictive = false;
struct dc_link_settings max_link_cap;
- bool is_link_enc_unavailable = link->link_enc &&
+ bool is_link_enc_unavailable = false;
+
+ if (!link->dc->config.unify_link_enc_assignment)
+ is_link_enc_unavailable = link->link_enc &&
link->dc->res_pool->funcs->link_encs_assign &&
!link_enc_cfg_is_link_enc_avail(
link->ctx->dc,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index ec7de9c01fab..268626e73c54 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -652,15 +652,15 @@ static void write_i2c_redriver_setting(
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct cp_psp_stream_config config = {0};
enum dp_panel_mode panel_mode =
dp_get_panel_mode(pipe_ctx->stream->link);
if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL)
return;
-
- link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
ASSERT(link_enc);
if (link_enc == NULL)
return;
@@ -1924,7 +1924,7 @@ static void disable_link_dp(struct dc_link *link,
if (link_dp_get_encoding_format(&link_settings) ==
DP_8b_10b_ENCODING) {
- dp_set_fec_enable(link, false);
+ dp_set_fec_enable(link, link_res, false);
dp_set_fec_ready(link, link_res, false);
}
}
@@ -2122,7 +2122,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
fec_enable = true;
if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
- dp_set_fec_enable(link, fec_enable);
+ dp_set_fec_enable(link, &pipe_ctx->link_res, fec_enable);
// during mode set we do DP_SET_POWER off then on, aux writes are lost
if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
@@ -2291,22 +2291,7 @@ static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, i
link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link);
req_bw += link->dpia_bw_alloc_config.dp_overhead;
- if (link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw)) {
- if (req_bw <= link->dpia_bw_alloc_config.allocated_bw) {
- DC_LOG_DEBUG("%s, Success in allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n",
- __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw,
- link->dpia_bw_alloc_config.dp_overhead);
- } else {
- // Cannot get the required bandwidth.
- DC_LOG_ERROR("%s, Failed to allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n",
- __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw,
- link->dpia_bw_alloc_config.dp_overhead);
- return false;
- }
- } else {
- DC_LOG_DEBUG("%s, usb4 request bw timeout\n", __func__);
- return false;
- }
+ link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw);
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
int i = 0;
@@ -2461,7 +2446,7 @@ void link_set_dpms_on(
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
enum dc_status status;
- struct link_encoder *link_enc;
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
@@ -2486,7 +2471,8 @@ void link_set_dpms_on(
}
}
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index a7877d57a00f..f6b6b19e7481 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -175,7 +175,6 @@ static void construct_link_service_dp_phy_or_dpia(struct link_service *link_srv)
{
link_srv->dpia_handle_usb4_bandwidth_allocation_for_link =
dpia_handle_usb4_bandwidth_allocation_for_link;
- link_srv->dpia_handle_bw_alloc_response = dpia_handle_bw_alloc_response;
link_srv->dp_set_drive_settings = dp_set_drive_settings;
link_srv->dpcd_write_rx_power_ctrl = dpcd_write_rx_power_ctrl;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 44c3023a7731..21ee0d96c9d4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -250,21 +250,23 @@ static uint32_t intersect_frl_link_bw_support(
{
uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps;
- // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode)
- if (hdmi_encoded_link_bw.bits.FRL_MODE) {
- if (hdmi_encoded_link_bw.bits.BW_48Gbps)
- supported_bw_in_kbps = 48000000;
- else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
- supported_bw_in_kbps = 40000000;
- else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
- supported_bw_in_kbps = 32000000;
- else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
- supported_bw_in_kbps = 24000000;
- else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
- supported_bw_in_kbps = 18000000;
- else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
- supported_bw_in_kbps = 9000000;
- }
+ /* Skip checking FRL_MODE bit, as certain PCON will clear
+ * it despite supporting the link BW indicated in the other bits.
+ */
+ if (hdmi_encoded_link_bw.bits.BW_48Gbps)
+ supported_bw_in_kbps = 48000000;
+ else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
+ supported_bw_in_kbps = 40000000;
+ else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
+ supported_bw_in_kbps = 32000000;
+ else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
+ supported_bw_in_kbps = 24000000;
+ else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
+ supported_bw_in_kbps = 18000000;
+ else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
+ supported_bw_in_kbps = 9000000;
+ else if (hdmi_encoded_link_bw.bits.FRL_LINK_TRAINING_FINISHED)
+ supported_bw_in_kbps = 0; /* This case should only get hit in regulated autonomous mode. */
return supported_bw_in_kbps;
}
@@ -330,9 +332,12 @@ bool dp_is_fec_supported(const struct dc_link *link)
/* TODO - use asic cap instead of link_enc->features
* we no longer know which link enc to use for this link before commit
*/
- struct link_encoder *link_enc = NULL;
+ struct resource_context *res_ctx = &link->dc->current_state->res_ctx;
+ struct resource_pool *res_pool = link->dc->res_pool;
+ struct link_encoder *link_enc = get_temp_dio_link_enc(res_ctx, res_pool, link);
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
return (dc_is_dp_signal(link->connector_signal) && link_enc &&
@@ -945,6 +950,9 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
* TODO: add MST specific link training routine
*/
decide_mst_link_settings(link, link_setting);
+ } else if (stream->signal == SIGNAL_TYPE_VIRTUAL) {
+ link_setting->lane_count = LANE_COUNT_FOUR;
+ link_setting->link_rate = LINK_RATE_HIGH3;
} else if (link->connector_signal == SIGNAL_TYPE_EDP) {
/* enable edp link optimization for DSC eDP case */
if (stream->timing.flags.DSC) {
@@ -967,9 +975,6 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
} else {
edp_decide_link_settings(link, link_setting, req_bw);
}
- } else if (stream->signal == SIGNAL_TYPE_VIRTUAL) {
- link_setting->lane_count = LANE_COUNT_FOUR;
- link_setting->link_rate = LINK_RATE_HIGH3;
} else {
decide_dp_link_settings(link, link_setting, req_bw);
}
@@ -1072,6 +1077,48 @@ static enum dc_status wake_up_aux_channel(struct dc_link *link)
return DC_OK;
}
+static void read_and_intersect_post_frl_lt_status(
+ struct dc_link *link)
+{
+ union autonomous_mode_and_frl_link_status autonomous_mode_caps = {0};
+ union hdmi_tx_link_status hdmi_tx_link_status = {0};
+ union hdmi_encoded_link_bw hdmi_encoded_link_bw = {0};
+
+ /* Check if dongle supports regulated autonomous mode. */
+ core_link_read_dpcd(link, DP_REGULATED_AUTONOMOUS_MODE_SUPPORTED_AND_HDMI_LINK_TRAINING_STATUS,
+ &autonomous_mode_caps.raw, sizeof(autonomous_mode_caps));
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_regulated_autonomous_mode_support =
+ autonomous_mode_caps.bits.REGULATED_AUTONOMOUS_MODE_SUPPORTED;
+
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_regulated_autonomous_mode_support) {
+ DC_LOG_DC("%s: PCON supports regulated autonomous mode.\n", __func__);
+
+ core_link_read_dpcd(link, DP_PCON_HDMI_TX_LINK_STATUS,
+ &hdmi_tx_link_status.raw, sizeof(hdmi_tx_link_status));
+ }
+
+ // Intersect reported max link bw support with the supported link rate post FRL link training
+ if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS,
+ &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) {
+
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_regulated_autonomous_mode_support &&
+ (!hdmi_tx_link_status.bits.HDMI_TX_READY_STATUS ||
+ !hdmi_encoded_link_bw.bits.FRL_LINK_TRAINING_FINISHED)) {
+ DC_LOG_WARNING("%s: PCON TX link training has not finished.\n", __func__);
+
+ /* Link training not finished, ignore values from this DPCD reg. */
+ return;
+ }
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
+ hdmi_encoded_link_bw);
+ DC_LOG_DC("%s: pcon frl link bw = %u\n", __func__,
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps);
+ }
+}
+
static void get_active_converter_info(
uint8_t data, struct dc_link *link)
{
@@ -1160,21 +1207,12 @@ static void get_active_converter_info(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
if (link->dc->caps.dp_hdmi21_pcon_support) {
- union hdmi_encoded_link_bw hdmi_encoded_link_bw;
link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps =
link_bw_kbps_from_raw_frl_link_rate_data(
hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT);
- // Intersect reported max link bw support with the supported link rate post FRL link training
- if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS,
- &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) {
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
- hdmi_encoded_link_bw);
- DC_LOG_DC("%s: pcon frl link bw = %u\n", __func__,
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps);
- }
+ read_and_intersect_post_frl_lt_status(link);
if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0)
link->dpcd_caps.dongle_caps.extendedCapValid = true;
@@ -1502,7 +1540,7 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
{
- uint8_t lttpr_dpcd_data[8] = {0};
+ uint8_t lttpr_dpcd_data[10] = {0};
enum dc_status status;
bool is_lttpr_present;
@@ -1552,6 +1590,10 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ link->dpcd_caps.lttpr_caps.alpm.raw =
+ lttpr_dpcd_data[DP_LTTPR_ALPM_CAPABILITIES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
/* If this chip cap is set, at least one retimer must exist in the chain
* Override count to 1 if we receive a known bad count (0 or an invalid value) */
if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
@@ -1568,10 +1610,18 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
is_lttpr_present = dp_is_lttpr_present(link);
- if (is_lttpr_present)
+ DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
+
+ if (is_lttpr_present) {
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
- DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
+ core_link_read_dpcd(link, DP_LTTPR_IEEE_OUI, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui));
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui), "LTTPR IEEE OUI: ");
+
+ core_link_read_dpcd(link, DP_LTTPR_DEVICE_ID, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id));
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id), "LTTPR Device ID: ");
+ }
+
return status;
}
@@ -2085,18 +2135,32 @@ void detect_edp_sink_caps(struct dc_link *link)
core_link_read_dpcd(link, DP_SINK_EMISSION_RATE,
(uint8_t *)&link->dpcd_caps.edp_oled_emission_rate,
sizeof(link->dpcd_caps.edp_oled_emission_rate));
+
+ /*
+ * Read Multi-SST (Single Stream Transport) capability
+ * for eDP version 1.4 or higher.
+ */
+ if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14)
+ core_link_read_dpcd(
+ link,
+ DP_EDP_MSO_LINK_CAPABILITIES,
+ (uint8_t *)&link->dpcd_caps.mso_cap_sst_links_supported,
+ sizeof(link->dpcd_caps.mso_cap_sst_links_supported));
}
bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
{
- struct link_encoder *link_enc = NULL;
+ struct resource_context *res_ctx = &link->dc->current_state->res_ctx;
+ struct resource_pool *res_pool = link->dc->res_pool;
+ struct link_encoder *link_enc = get_temp_dio_link_enc(res_ctx, res_pool, link);
if (!max_link_enc_cap) {
DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__);
return false;
}
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
if (link_enc && link_enc->funcs->get_max_link_cap) {
@@ -2124,10 +2188,13 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
struct dc_link_settings max_link_cap = {0};
enum dc_link_rate lttpr_max_link_rate;
enum dc_link_rate cable_max_link_rate;
- struct link_encoder *link_enc = NULL;
+ struct resource_context *res_ctx = &link->dc->current_state->res_ctx;
+ struct resource_pool *res_pool = link->dc->res_pool;
+ struct link_encoder *link_enc = get_temp_dio_link_enc(res_ctx, res_pool, link);
bool is_uhbr13_5_supported = true;
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
/* get max link encoder capability */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
index 8f0ce97f2362..0ce0af3ddbeb 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -67,6 +67,7 @@ bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx);
/* Initialize output parameter lt_settings. */
void dp_decide_training_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
struct link_training_settings *lt_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 0f1c411523a2..a254ead2f7e8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -24,7 +24,7 @@
*
*/
/*********************************************************************/
-// USB4 DPIA BANDWIDTH ALLOCATION LOGIC
+// USB4 DPIA BANDWIDTH ALLOCATION LOGIC
/*********************************************************************/
#include "link_dp_dpia_bw.h"
#include "link_dpcd.h"
@@ -36,7 +36,7 @@
#define Kbps_TO_Gbps (1000 * 1000)
// ------------------------------------------------------------------
-// PRIVATE FUNCTIONS
+// PRIVATE FUNCTIONS
// ------------------------------------------------------------------
/*
* Always Check the following:
@@ -44,11 +44,11 @@
* - Is HPD HIGH?
* - Is BW Allocation Support Mode enabled on DP-Tx?
*/
-static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
+static bool link_dp_is_bw_alloc_available(struct dc_link *link)
{
- return (tmp && DISPLAY_ENDPOINT_USB4_DPIA == tmp->ep_type
- && tmp->hpd_status
- && tmp->dpia_bw_alloc_config.bw_alloc_enabled);
+ return (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA
+ && link->hpd_status
+ && link->dpia_bw_alloc_config.bw_alloc_enabled);
}
static void reset_bw_alloc_struct(struct dc_link *link)
@@ -60,7 +60,6 @@ static void reset_bw_alloc_struct(struct dc_link *link)
link->dpia_bw_alloc_config.estimated_bw = 0;
link->dpia_bw_alloc_config.bw_granularity = 0;
link->dpia_bw_alloc_config.dp_overhead = 0;
- link->dpia_bw_alloc_config.response_ready = false;
link->dpia_bw_alloc_config.nrd_max_lane_count = 0;
link->dpia_bw_alloc_config.nrd_max_link_rate = 0;
for (int i = 0; i < MAX_SINKS_PER_LINK; i++)
@@ -243,20 +242,20 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in
static void dpia_bw_alloc_unplug(struct dc_link *link)
{
if (link) {
- DC_LOG_DEBUG("%s: resetting bw alloc config for link(%d)\n",
+ DC_LOG_DEBUG("%s: resetting BW alloc config for link(%d)\n",
__func__, link->link_index);
reset_bw_alloc_struct(link);
}
}
-static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
+static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw)
{
uint8_t requested_bw;
uint32_t temp;
/* Error check whether request bw greater than allocated */
if (req_bw > link->dpia_bw_alloc_config.estimated_bw) {
- DC_LOG_ERROR("%s: Request bw greater than estimated bw for link(%d)\n",
+ DC_LOG_ERROR("%s: Request BW greater than estimated BW for link(%d)\n",
__func__, link->link_index);
req_bw = link->dpia_bw_alloc_config.estimated_bw;
}
@@ -271,32 +270,17 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
/* Error check whether requested and allocated are equal */
req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) {
- DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
+ DC_LOG_ERROR("%s: Request BW equals to allocated BW for link(%d)\n",
__func__, link->link_index);
}
- link->dpia_bw_alloc_config.response_ready = false; // Reset flag
- core_link_write_dpcd(
- link,
- REQUESTED_BW,
+ core_link_write_dpcd(link, REQUESTED_BW,
&requested_bw,
sizeof(uint8_t));
}
-/*
- * Return the response_ready flag from dc_link struct
- *
- * @link: pointer to the dc_link struct instance
- *
- * return: response_ready flag from dc_link struct
- */
-static bool get_cm_response_ready_flag(struct dc_link *link)
-{
- return link->dpia_bw_alloc_config.response_ready;
-}
-
// ------------------------------------------------------------------
-// PUBLIC FUNCTIONS
+// PUBLIC FUNCTIONS
// ------------------------------------------------------------------
bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
{
@@ -356,141 +340,68 @@ out:
return ret;
}
-void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
+/*
+ * Handle DP BW allocation status register
+ *
+ * @link: pointer to the dc_link struct instance
+ * @status: content of DP tunneling status DPCD register
+ *
+ * return: none
+ */
+void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
{
- int bw_needed = 0;
- int estimated = 0;
-
- if (!get_bw_alloc_proceed_flag((link)))
- return;
-
- switch (result) {
-
- case DPIA_BW_REQ_FAILED:
-
- /*
- * Ideally, we shouldn't run into this case as we always validate available
- * bandwidth and request within that limit
- */
- estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
-
- DC_LOG_ERROR("%s: BW REQ FAILURE for DP-TX Request for link(%d)\n",
- __func__, link->link_index);
- DC_LOG_ERROR("%s: current estimated_bw(%d), new estimated_bw(%d)\n",
- __func__, link->dpia_bw_alloc_config.estimated_bw, estimated);
-
- /* Update the new Estimated BW value updated by CM */
- link->dpia_bw_alloc_config.estimated_bw = estimated;
-
- /* Allocate the previously requested bandwidth */
- set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw);
-
- /*
- * If FAIL then it is either:
- * 1. Due to DP-Tx trying to allocate more than available i.e. it failed locally
- * => get estimated and allocate that
- * 2. Due to the fact that DP-Tx tried to allocated ESTIMATED BW and failed then
- * CM will have to update 0xE0023 with new ESTIMATED BW value.
- */
- break;
-
- case DPIA_BW_REQ_SUCCESS:
-
- bw_needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
-
- DC_LOG_DEBUG("%s: BW REQ SUCCESS for DP-TX Request for link(%d)\n",
- __func__, link->link_index);
- DC_LOG_DEBUG("%s: current allocated_bw(%d), new allocated_bw(%d)\n",
- __func__, link->dpia_bw_alloc_config.allocated_bw, bw_needed);
-
- link->dpia_bw_alloc_config.allocated_bw = bw_needed;
-
- link->dpia_bw_alloc_config.response_ready = true;
- break;
-
- case DPIA_EST_BW_CHANGED:
+ if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) {
+ DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)",
+ __func__, link->link_index);
+ } else if (status & DP_TUNNELING_BW_REQUEST_FAILED) {
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
- estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d",
+ __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
- DC_LOG_DEBUG("%s: ESTIMATED BW CHANGED for link(%d)\n",
- __func__, link->link_index);
- DC_LOG_DEBUG("%s: current estimated_bw(%d), new estimated_bw(%d)\n",
- __func__, link->dpia_bw_alloc_config.estimated_bw, estimated);
-
- link->dpia_bw_alloc_config.estimated_bw = estimated;
- break;
+ link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw);
+ } else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) {
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
- case DPIA_BW_ALLOC_CAPS_CHANGED:
-
- DC_LOG_ERROR("%s: BW ALLOC CAPABILITY CHANGED to Disabled for link(%d)\n",
- __func__, link->link_index);
- link->dpia_bw_alloc_config.bw_alloc_enabled = false;
- break;
+ DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d",
+ __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
}
-}
-int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
-{
- int ret = 0;
- uint8_t timeout = 10;
-
- if (!(link && DISPLAY_ENDPOINT_USB4_DPIA == link->ep_type
- && link->dpia_bw_alloc_config.bw_alloc_enabled))
- goto out;
-
- //1. Hot Plug
- if (link->hpd_status && peak_bw > 0) {
- // If DP over USB4 then we need to check BW allocation
- link->dpia_bw_alloc_config.link_max_bw = peak_bw;
- set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.link_max_bw);
+ core_link_write_dpcd(
+ link, DP_TUNNELING_STATUS,
+ &status, sizeof(status));
+}
- do {
- if (timeout > 0)
- timeout--;
- else
- break;
- msleep(10);
- } while (!get_cm_response_ready_flag(link));
+/*
+ * Handle the DP Bandwidth allocation for DPIA
+ *
+ */
+void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
+{
+ if (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dpia_bw_alloc_config.bw_alloc_enabled) {
+ //1. Hot Plug
+ if (link->hpd_status && peak_bw > 0) {
+ // If DP over USB4 then we need to check BW allocation
+ link->dpia_bw_alloc_config.link_max_bw = peak_bw;
- if (!timeout)
- ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
- else if (link->dpia_bw_alloc_config.allocated_bw > 0)
- ret = link->dpia_bw_alloc_config.allocated_bw;
+ link_dpia_send_bw_alloc_request(link, peak_bw);
+ }
+ //2. Cold Unplug
+ else if (!link->hpd_status)
+ dpia_bw_alloc_unplug(link);
}
- //2. Cold Unplug
- else if (!link->hpd_status)
- dpia_bw_alloc_unplug(link);
-
-out:
- return ret;
}
-bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
-{
- bool ret = false;
- uint8_t timeout = 10;
+void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
+{
DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
__func__, link->link_index, link->hpd_status,
link->dpia_bw_alloc_config.allocated_bw, req_bw);
- if (!get_bw_alloc_proceed_flag(link))
- goto out;
-
- set_usb4_req_bw_req(link, req_bw);
- do {
- if (timeout > 0)
- timeout--;
- else
- break;
- msleep(10);
- } while (!get_cm_response_ready_flag(link));
-
- if (timeout)
- ret = true;
-
-out:
- DC_LOG_DEBUG("%s: EXIT: timeout(%d), ret(%d)\n", __func__, timeout, ret);
- return ret;
+ if (link_dp_is_bw_alloc_available(link))
+ link_dpia_send_bw_alloc_request(link, req_bw);
+ else
+ DC_LOG_DEBUG("%s: Not able to send the BW Allocation request", __func__);
}
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
@@ -541,7 +452,7 @@ int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
{
int dp_overhead = 0, link_mst_overhead = 0;
- if (!get_bw_alloc_proceed_flag((link)))
+ if (!link_dp_is_bw_alloc_available(link))
return dp_overhead;
/* if its mst link, add MTPH overhead */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 3b6d8494f9d5..6df9b946b00f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -59,9 +59,8 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
* @link: pointer to the dc_link struct instance
* @req_bw: Bw requested by the stream
*
- * return: true if allocated successfully
*/
-bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
+void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
/*
* Handle the USB4 BW Allocation related functionality here:
@@ -71,21 +70,8 @@ bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
* @link: pointer to the dc_link struct instance
* @peak_bw: Peak bw used by the link/sink
*
- * return: allocated bw else return 0
*/
-int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
-
-/*
- * Handle function for when the status of the Request above is complete.
- * We will find out the result of allocating on CM and update structs.
- *
- * @link: pointer to the dc_link struct instance
- * @bw: Allocated or Estimated BW depending on the result
- * @result: Response type
- *
- * return: none
- */
-void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result);
+void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
/*
* Handle the validation of total BW here and confirm that the bw used by each
@@ -108,4 +94,14 @@ bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned
*/
int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
+/*
+ * Handle DP BW allocation status register
+ *
+ * @link: pointer to the dc_link struct instance
+ * @status: content of DP tunneling status register
+ *
+ * return: none
+ */
+void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status);
+
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index a08403c022ea..5be00e4ce10b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -37,6 +37,7 @@
#include "link/accessories/link_dp_trace.h"
#include "link/link_dpms.h"
#include "dm_helpers.h"
+#include "link_dp_dpia_bw.h"
#define DC_LOGGER \
link->ctx->logger
@@ -286,6 +287,30 @@ void dp_handle_link_loss(struct dc_link *link)
}
}
+static void dp_handle_tunneling_irq(struct dc_link *link)
+{
+ enum dc_status retval;
+ uint8_t tunneling_status = 0;
+
+ retval = core_link_read_dpcd(
+ link, DP_TUNNELING_STATUS,
+ &tunneling_status,
+ sizeof(tunneling_status));
+
+ if (retval == DC_OK) {
+ DC_LOG_HW_HPD_IRQ("%s: Got DP tunneling status on link %d status=0x%x",
+ __func__, link->link_index, tunneling_status);
+
+ if (tunneling_status & DP_TUNNELING_BW_ALLOC_BITS_MASK)
+ link_dp_dpia_handle_bw_alloc_status(link, tunneling_status);
+ }
+
+ tunneling_status = DP_TUNNELING_IRQ;
+ core_link_write_dpcd(
+ link, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
+ &tunneling_status, 1);
+}
+
static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data)
{
enum dc_status retval;
@@ -319,13 +344,19 @@ enum dc_status dp_read_hpd_rx_irq_data(
*
* For DP 1.4 we need to read those from 2002h range.
*/
- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) {
retval = core_link_read_dpcd(
link,
DP_SINK_COUNT,
irq_data->raw,
- sizeof(union hpd_irq_data));
- else {
+ DP_SINK_STATUS - DP_SINK_COUNT + 1);
+
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ retval = core_link_read_dpcd(
+ link, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
+ &irq_data->bytes.link_service_irq_esi0.raw, 1);
+ }
+ } else {
/* Read 14 bytes in a single read and then copy only the required fields.
* This is more efficient than doing it in two separate AUX reads. */
@@ -346,6 +377,7 @@ enum dc_status dp_read_hpd_rx_irq_data(
irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.link_service_irq_esi0.raw = tmp[DP_LINK_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
/*
* This display doesn't have correct values in DPCD200Eh.
@@ -488,6 +520,11 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
dp_trace_link_loss_increment(link);
}
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (hpd_irq_dpcd_data.bytes.link_service_irq_esi0.bits.DP_LINK_TUNNELING_IRQ)
+ dp_handle_tunneling_irq(link);
+ }
+
if (link->type == dc_connection_sst_branch &&
hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
!= link->dpcd_sink_count)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
index 2c73ac87cd66..49521ac4b0e8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -75,7 +75,8 @@ void dp_disable_link_phy(struct dc_link *link,
struct dc *dc = link->ctx->dc;
if (!link->wa_flags.dp_keep_receiver_powered &&
- !link->skip_implict_edp_power_control)
+ !link->skip_implict_edp_power_control &&
+ link->type != dc_connection_none)
dpcd_write_rx_power_ctrl(link, false);
dc->hwss.disable_link_output(link, link_res, signal);
@@ -141,11 +142,12 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
* if the sink supports it and leave it enabled on link.
* If FEC is not supported, disable it.
*/
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = link_res->dio_link_enc;
enum dc_status status = DC_OK;
uint8_t fec_config = 0;
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
if (link_enc->funcs->fec_set_ready == NULL)
return DC_NOT_SUPPORTED;
@@ -163,8 +165,9 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
} else {
if (link->fec_state == dc_link_fec_ready) {
fec_config = 0;
- core_link_write_dpcd(link, DP_FEC_CONFIGURATION,
- &fec_config, sizeof(fec_config));
+ if (link->type != dc_connection_none)
+ core_link_write_dpcd(link, DP_FEC_CONFIGURATION,
+ &fec_config, sizeof(fec_config));
link_enc->funcs->fec_set_ready(link_enc, false);
link->fec_state = dc_link_fec_not_ready;
@@ -174,13 +177,14 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
return status;
}
-void dp_set_fec_enable(struct dc_link *link, bool enable)
+void dp_set_fec_enable(struct dc_link *link, const struct link_resource *link_res, bool enable)
{
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = link_res->dio_link_enc;
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
- if (link_enc->funcs->fec_set_enable == NULL)
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (link_enc == NULL || link_enc->funcs == NULL || link_enc->funcs->fec_set_enable == NULL)
return;
if (enable && dp_should_enable_fec(link)) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
index 1eb0619d6710..ab1c1f8f1f8b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
@@ -52,7 +52,8 @@ void dp_set_drive_settings(
enum dc_status dp_set_fec_ready(struct dc_link *link,
const struct link_resource *link_res, bool ready);
-void dp_set_fec_enable(struct dc_link *link, bool enable);
+void dp_set_fec_enable(struct dc_link *link,
+ const struct link_resource *link_res, bool enable);
void dpcd_write_rx_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 88d4288cde0f..ef358afdfb65 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -736,6 +736,8 @@ void override_training_settings(
lt_settings->pre_emphasis = overrides->pre_emphasis;
if (overrides->post_cursor2 != NULL)
lt_settings->post_cursor2 = overrides->post_cursor2;
+ if (link->wa_flags.force_dp_ffe_preset && !dp_is_lttpr_present(link))
+ lt_settings->ffe_preset = &link->forced_dp_ffe_preset;
if (overrides->ffe_preset != NULL)
lt_settings->ffe_preset = overrides->ffe_preset;
/* Override HW lane settings with BIOS forced values if present */
@@ -799,19 +801,23 @@ enum dc_dp_training_pattern decide_cr_training_pattern(
}
enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings)
{
- struct link_encoder *link_enc;
+ struct link_encoder *link_enc = link_res->dio_link_enc;
struct encoder_feature_support *enc_caps;
struct dpcd_caps *rx_caps = &link->dpcd_caps;
enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
- enc_caps = &link_enc->features;
-
switch (link_dp_get_encoding_format(link_settings)) {
case DP_8b_10b_ENCODING:
+ if (!link->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (!link_enc)
+ break;
+
+ enc_caps = &link_enc->features;
if (enc_caps->flags.bits.IS_TPS4_CAPABLE &&
rx_caps->max_down_spread.bits.TPS4_SUPPORTED)
pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
@@ -884,13 +890,14 @@ void dp_decide_lane_settings(
void dp_decide_training_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
struct link_training_settings *lt_settings)
{
if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
- decide_8b_10b_training_settings(link, link_settings, lt_settings);
+ decide_8b_10b_training_settings(link, link_res, link_settings, lt_settings);
else if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING)
- decide_128b_132b_training_settings(link, link_settings, lt_settings);
+ decide_128b_132b_training_settings(link, link_res, link_settings, lt_settings);
}
@@ -1554,6 +1561,7 @@ enum link_training_result dp_perform_link_training(
/* decide training settings */
dp_decide_training_settings(
link,
+ link_res,
link_settings,
&lt_settings);
@@ -1567,7 +1575,8 @@ enum link_training_result dp_perform_link_training(
/* configure link prior to entering training mode */
dpcd_configure_lttpr_mode(link, &lt_settings);
- dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);
+ if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
+ dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);
dpcd_configure_channel_coding(link, &lt_settings);
/* enter training mode:
@@ -1780,13 +1789,10 @@ bool perform_link_training_with_retries(
is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
(cur_link_settings.lane_count <= LANE_COUNT_ONE));
- if (is_link_bw_low) {
+ if (is_link_bw_low)
DC_LOG_WARNING(
"%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
__func__, link->link_index, req_bw, link_bw);
-
- return false;
- }
}
msleep(delay_between_attempts);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
index 0b18aa35c33c..574b083e0936 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -104,6 +104,7 @@ void start_clock_recovery_pattern_early(struct dc_link *link,
void dp_decide_training_settings(
struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
struct link_training_settings *lt_settings);
@@ -117,6 +118,7 @@ enum dc_dp_training_pattern decide_cr_training_pattern(
const struct dc_link_settings *link_settings);
enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings);
enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
index db87cfe37b5c..11565f187ac7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
@@ -204,6 +204,7 @@ enum link_training_result dp_perform_128b_132b_link_training(
struct link_training_settings legacy_settings;
decide_8b_10b_training_settings(link,
+ link_res,
&lt_settings->link_settings,
&legacy_settings);
return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings);
@@ -227,6 +228,7 @@ enum link_training_result dp_perform_128b_132b_link_training(
}
void decide_128b_132b_training_settings(struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
struct link_training_settings *lt_settings)
{
@@ -238,7 +240,7 @@ void decide_128b_132b_training_settings(struct dc_link *link,
LINK_SPREAD_05_DOWNSPREAD_30KHZ;
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings);
- lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_res, link_settings);
lt_settings->eq_pattern_time = 2500;
lt_settings->eq_wait_time_limit = 400000;
lt_settings->eq_loop_count_limit = 20;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h
index 2147f24efc8b..901a42edafa1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h
@@ -34,6 +34,7 @@ enum link_training_result dp_perform_128b_132b_link_training(
struct link_training_settings *lt_settings);
void decide_128b_132b_training_settings(struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
struct link_training_settings *lt_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
index 3bdce32a85e3..34d2e097ca2e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -36,7 +36,8 @@
link->ctx->logger
static int32_t get_cr_training_aux_rd_interval(struct dc_link *link,
- const struct dc_link_settings *link_settings)
+ const struct dc_link_settings *link_settings,
+ enum lttpr_mode lttpr_mode)
{
union training_aux_rd_interval training_rd_interval;
uint32_t wait_in_micro_secs = 100;
@@ -49,6 +50,8 @@ static int32_t get_cr_training_aux_rd_interval(struct dc_link *link,
DP_TRAINING_AUX_RD_INTERVAL,
(uint8_t *)&training_rd_interval,
sizeof(training_rd_interval));
+ if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT)
+ wait_in_micro_secs = 400;
if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
}
@@ -90,7 +93,8 @@ static uint32_t get_eq_training_aux_rd_interval(
}
void decide_8b_10b_training_settings(
- struct dc_link *link,
+ struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
struct link_training_settings *lt_settings)
{
@@ -110,15 +114,15 @@ void decide_8b_10b_training_settings(
*/
lt_settings->link_settings.link_spread = link->dp_ss_off ?
LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
- lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
- lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_res, link_setting);
lt_settings->enhanced_framing = 1;
lt_settings->should_set_fec_ready = true;
lt_settings->disallow_per_lane_settings = true;
lt_settings->always_match_dpcd_with_hw_lane_settings = true;
lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
+ lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting, lt_settings->lttpr_mode);
dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h
index d26de15ce954..ea0de701d83f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h
@@ -54,7 +54,8 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence(
enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link);
void decide_8b_10b_training_settings(
- struct dc_link *link,
+ struct dc_link *link,
+ const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
struct link_training_settings *lt_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
index 4c6b886a9da8..f99d26290bc0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
@@ -39,6 +39,7 @@ bool dp_perform_link_training_skip_aux(
dp_decide_training_settings(
link,
+ link_res,
link_setting,
&lt_settings);
override_training_settings(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index 39e4b7dc9588..603537ffd128 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -110,6 +110,7 @@ static enum link_training_result dpia_configure_link(
dp_decide_training_settings(
link,
+ link_res,
link_setting,
lt_settings);
@@ -129,11 +130,14 @@ static enum link_training_result dpia_configure_link(
if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
- if (link->preferred_training_settings.fec_enable != NULL)
- fec_enable = *link->preferred_training_settings.fec_enable;
- else
- fec_enable = true;
- status = dp_set_fec_ready(link, link_res, fec_enable);
+ if (link_dp_get_encoding_format(link_setting) == DP_8b_10b_ENCODING) {
+ if (link->preferred_training_settings.fec_enable != NULL)
+ fec_enable = *link->preferred_training_settings.fec_enable;
+ else
+ fec_enable = true;
+ status = dp_set_fec_ready(link, link_res, fec_enable);
+ }
+
if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index ccf8096dde29..ce174ce5579c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -270,7 +270,8 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
rate = get_dpcd_link_rate(&lt_settings->link_settings);
- if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) {
+ // Only perform toggle if FIXED_VS LTTPR reports no IEEE OUI
+ if (memcmp("\x0,\x0,\x0", &link->dpcd_caps.lttpr_caps.lttpr_ieee_oui[0], 3) == 0) {
/* Vendor specific: Toggle link rate */
toggle_rate = (rate == 0x6) ? 0xA : 0x6;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index e0e3bb865359..1e4adbc764ea 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -675,6 +675,18 @@ bool edp_setup_psr(struct dc_link *link,
if (!link)
return false;
+ //Clear PSR cfg
+ memset(&psr_configuration, 0, sizeof(psr_configuration));
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ DP_PSR_EN_CFG,
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
+ return false;
+
dc = link->ctx->dc;
dmcu = dc->res_pool->dmcu;
psr = dc->res_pool->psr;
@@ -685,9 +697,6 @@ bool edp_setup_psr(struct dc_link *link,
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
-
- memset(&psr_configuration, 0, sizeof(psr_configuration));
-
psr_configuration.bits.ENABLE = 1;
psr_configuration.bits.CRC_VERIFICATION = 1;
psr_configuration.bits.FRAME_CAPTURE_INDICATION =
@@ -950,6 +959,16 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
if (!link)
return false;
+ //Clear Replay config
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_SINK_PR_ENABLE_AND_CONFIGURATION,
+ (uint8_t *)&(replay_config.raw), sizeof(uint8_t));
+
+ if (!(link->replay_settings.config.replay_supported))
+ return false;
+
+ link->replay_settings.config.replay_error_status.raw = 0;
+
dc = link->ctx->dc;
replay = dc->res_pool->replay;
diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile b/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
index eab196c57c6c..2d4b7a85847d 100644
--- a/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
@@ -50,5 +50,5 @@ MMHUBBUB_DCN35 = dcn35_mmhubbub.o
AMD_DAL_MMHUBBUB_DCN35 = $(addprefix $(AMDDALPATH)/dc/mmhubbub/dcn35/,$(MMHUBBUB_DCN35))
AMD_DISPLAY_FILES += $(AMD_DAL_MMHUBBUB_DCN35)
-
endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/Makefile b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
index 5402c3529f5e..1e2e66508192 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
@@ -68,5 +68,5 @@ MPC_DCN401 = dcn401_mpc.o
AMD_DAL_MPC_DCN401 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn401/,$(MPC_DCN401))
AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN401)
-
endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
index f2f55565e98a..b23c64004dd5 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
@@ -142,22 +142,6 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
return NULL;
}
-bool mpc1_is_mpcc_idle(struct mpc *mpc, int mpcc_id)
-{
- struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- unsigned int top_sel;
- unsigned int opp_id;
- unsigned int idle;
-
- REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel);
- REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
- REG_GET(MPCC_STATUS[mpcc_id], MPCC_IDLE, &idle);
- if (top_sel == 0xf && opp_id == 0xf && idle)
- return true;
- else
- return false;
-}
-
void mpc1_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h
index dbfffc6383dc..874e36e39e1b 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h
@@ -173,10 +173,6 @@ void mpc1_update_stereo_mix(
struct mpcc_sm_cfg *sm_cfg,
int mpcc_id);
-bool mpc1_is_mpcc_idle(
- struct mpc *mpc,
- int mpcc_id);
-
void mpc1_assert_mpcc_idle_before_connect(
struct mpc *mpc,
int mpcc_id);
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index 37ab5a4eefc7..ad67197557ca 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -40,14 +40,14 @@
#define FN(reg_name, field_name) \
mpc401->mpc_shift->field_name, mpc401->mpc_mask->field_name
-static void mpc401_update_3dlut_fast_load_select(struct mpc *mpc, int mpcc_id, int hubp_idx)
+void mpc401_update_3dlut_fast_load_select(struct mpc *mpc, int mpcc_id, int hubp_idx)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
REG_SET(MPCC_MCM_3DLUT_FAST_LOAD_SELECT[mpcc_id], 0, MPCC_MCM_3DLUT_FL_SEL, hubp_idx);
}
-static void mpc401_get_3dlut_fast_load_status(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow)
+void mpc401_get_3dlut_fast_load_status(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index af44054c2477..ce6fbcf14d7a 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -63,7 +63,7 @@
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C31_C32_B[MAX_MPCC]; \
uint32_t MPC_MCM_SECOND_GAMUT_REMAP_C33_C34_B[MAX_MPCC]; \
uint32_t MPCC_MCM_3DLUT_FAST_LOAD_SELECT[MAX_MPCC]; \
- uint32_t MPCC_MCM_3DLUT_FAST_LOAD_STATUS[MAX_MPCC]
+ uint32_t MPCC_MCM_3DLUT_FAST_LOAD_STATUS[MAX_MPCC];
#define MPC_COMMON_MASK_SH_LIST_DCN4_01(mask_sh) \
MPC_COMMON_MASK_SH_LIST_DCN32(mask_sh), \
@@ -183,7 +183,7 @@ struct dcn401_mpc_mask {
};
struct dcn401_mpc_registers {
- MPC_REG_VARIABLE_LIST_DCN4_01;
+ MPC_REG_VARIABLE_LIST_DCN4_01
};
struct dcn401_mpc {
@@ -236,4 +236,28 @@ void mpc401_get_gamut_remap(
int mpcc_id,
struct mpc_grph_gamut_adjustment *adjust);
+void mpc401_update_3dlut_fast_load_select(
+ struct mpc *mpc,
+ int mpcc_id,
+ int hubp_idx);
+
+void mpc401_get_3dlut_fast_load_status(
+ struct mpc *mpc,
+ int mpcc_id,
+ uint32_t *done,
+ uint32_t *soft_underflow,
+ uint32_t *hard_underflow);
+
+void mpc401_update_3dlut_fast_load_select(
+ struct mpc *mpc,
+ int mpcc_id,
+ int hubp_idx);
+
+void mpc401_get_3dlut_fast_load_status(
+ struct mpc *mpc,
+ int mpcc_id,
+ uint32_t *done,
+ uint32_t *soft_underflow,
+ uint32_t *hard_underflow);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 19d5ebc6763c..6f7b0f816f2a 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
@@ -1312,7 +1312,7 @@ bool optc1_get_hw_timing(struct timing_generator *tg,
if (tg == NULL || hw_crtc_timing == NULL)
return false;
- optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+ optc1_read_otg_state(tg, &s);
hw_crtc_timing->h_total = s.h_total + 1;
hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
@@ -1328,9 +1328,11 @@ bool optc1_get_hw_timing(struct timing_generator *tg,
}
-void optc1_read_otg_state(struct optc *optc1,
+void optc1_read_otg_state(struct timing_generator *optc,
struct dcn_otg_state *s)
{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
REG_GET(OTG_CONTROL,
OTG_MASTER_EN, &s->otg_enabled);
@@ -1663,6 +1665,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.setup_manual_trigger = optc1_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .read_otg_state = optc1_read_otg_state,
};
void dcn10_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index 159172178d51..8b2a8455eb56 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -104,111 +104,115 @@
SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst)
+#define OPTC_REG_VARIABLE_LIST_DCN \
+ uint32_t OTG_GLOBAL_CONTROL1; \
+ uint32_t OTG_GLOBAL_CONTROL2; \
+ uint32_t OTG_VERT_SYNC_CONTROL; \
+ uint32_t OTG_MASTER_UPDATE_MODE; \
+ uint32_t OTG_GSL_CONTROL; \
+ uint32_t OTG_VSTARTUP_PARAM; \
+ uint32_t OTG_VUPDATE_PARAM; \
+ uint32_t OTG_VREADY_PARAM; \
+ uint32_t OTG_BLANK_CONTROL; \
+ uint32_t OTG_MASTER_UPDATE_LOCK; \
+ uint32_t OTG_GLOBAL_CONTROL0; \
+ uint32_t OTG_DOUBLE_BUFFER_CONTROL; \
+ uint32_t OTG_H_TOTAL; \
+ uint32_t OTG_H_BLANK_START_END; \
+ uint32_t OTG_H_SYNC_A; \
+ uint32_t OTG_H_SYNC_A_CNTL; \
+ uint32_t OTG_H_TIMING_CNTL; \
+ uint32_t OTG_V_TOTAL; \
+ uint32_t OTG_V_BLANK_START_END; \
+ uint32_t OTG_V_SYNC_A; \
+ uint32_t OTG_V_SYNC_A_CNTL; \
+ uint32_t OTG_INTERLACE_CONTROL; \
+ uint32_t OTG_CONTROL; \
+ uint32_t OTG_STEREO_CONTROL; \
+ uint32_t OTG_3D_STRUCTURE_CONTROL; \
+ uint32_t OTG_STEREO_STATUS; \
+ uint32_t OTG_V_TOTAL_MAX; \
+ uint32_t OTG_V_TOTAL_MID; \
+ uint32_t OTG_V_TOTAL_MIN; \
+ uint32_t OTG_V_TOTAL_CONTROL; \
+ uint32_t OTG_V_COUNT_STOP_CONTROL; \
+ uint32_t OTG_V_COUNT_STOP_CONTROL2; \
+ uint32_t OTG_TRIGA_CNTL; \
+ uint32_t OTG_TRIGA_MANUAL_TRIG; \
+ uint32_t OTG_MANUAL_FLOW_CONTROL; \
+ uint32_t OTG_FORCE_COUNT_NOW_CNTL; \
+ uint32_t OTG_STATIC_SCREEN_CONTROL; \
+ uint32_t OTG_STATUS_FRAME_COUNT; \
+ uint32_t OTG_STATUS; \
+ uint32_t OTG_STATUS_POSITION; \
+ uint32_t OTG_NOM_VERT_POSITION; \
+ uint32_t OTG_BLACK_COLOR; \
+ uint32_t OTG_TEST_PATTERN_PARAMETERS; \
+ uint32_t OTG_TEST_PATTERN_CONTROL; \
+ uint32_t OTG_TEST_PATTERN_COLOR; \
+ uint32_t OTG_CLOCK_CONTROL; \
+ uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL; \
+ uint32_t OTG_VERTICAL_INTERRUPT0_POSITION; \
+ uint32_t OTG_VERTICAL_INTERRUPT1_CONTROL; \
+ uint32_t OTG_VERTICAL_INTERRUPT1_POSITION; \
+ uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL; \
+ uint32_t OTG_VERTICAL_INTERRUPT2_POSITION; \
+ uint32_t OPTC_INPUT_CLOCK_CONTROL; \
+ uint32_t OPTC_DATA_SOURCE_SELECT; \
+ uint32_t OPTC_MEMORY_CONFIG; \
+ uint32_t OPTC_INPUT_GLOBAL_CONTROL; \
+ uint32_t CONTROL; \
+ uint32_t OTG_GSL_WINDOW_X; \
+ uint32_t OTG_GSL_WINDOW_Y; \
+ uint32_t OTG_VUPDATE_KEEPOUT; \
+ uint32_t OTG_CRC_CNTL; \
+ uint32_t OTG_CRC_CNTL2; \
+ uint32_t OTG_CRC0_DATA_RG; \
+ uint32_t OTG_CRC0_DATA_B; \
+ uint32_t OTG_CRC1_DATA_B; \
+ uint32_t OTG_CRC2_DATA_B; \
+ uint32_t OTG_CRC3_DATA_B; \
+ uint32_t OTG_CRC1_DATA_RG; \
+ uint32_t OTG_CRC2_DATA_RG; \
+ uint32_t OTG_CRC3_DATA_RG; \
+ uint32_t OTG_CRC0_WINDOWA_X_CONTROL; \
+ uint32_t OTG_CRC0_WINDOWA_Y_CONTROL; \
+ uint32_t OTG_CRC0_WINDOWB_X_CONTROL; \
+ uint32_t OTG_CRC0_WINDOWB_Y_CONTROL; \
+ uint32_t OTG_CRC1_WINDOWA_X_CONTROL; \
+ uint32_t OTG_CRC1_WINDOWA_Y_CONTROL; \
+ uint32_t OTG_CRC1_WINDOWB_X_CONTROL; \
+ uint32_t OTG_CRC1_WINDOWB_Y_CONTROL; \
+ uint32_t GSL_SOURCE_SELECT; \
+ uint32_t DWB_SOURCE_SELECT; \
+ uint32_t OTG_DSC_START_POSITION; \
+ uint32_t OPTC_DATA_FORMAT_CONTROL; \
+ uint32_t OPTC_BYTES_PER_PIXEL; \
+ uint32_t OPTC_WIDTH_CONTROL; \
+ uint32_t OTG_DRR_CONTROL; \
+ uint32_t OTG_BLANK_DATA_COLOR; \
+ uint32_t OTG_BLANK_DATA_COLOR_EXT; \
+ uint32_t OTG_DRR_TRIGGER_WINDOW; \
+ uint32_t OTG_M_CONST_DTO0; \
+ uint32_t OTG_M_CONST_DTO1; \
+ uint32_t OTG_DRR_V_TOTAL_CHANGE; \
+ uint32_t OTG_GLOBAL_CONTROL4; \
+ uint32_t OTG_CRC0_WINDOWA_X_CONTROL_READBACK; \
+ uint32_t OTG_CRC0_WINDOWA_Y_CONTROL_READBACK; \
+ uint32_t OTG_CRC0_WINDOWB_X_CONTROL_READBACK; \
+ uint32_t OTG_CRC0_WINDOWB_Y_CONTROL_READBACK; \
+ uint32_t OTG_CRC1_WINDOWA_X_CONTROL_READBACK; \
+ uint32_t OTG_CRC1_WINDOWA_Y_CONTROL_READBACK; \
+ uint32_t OTG_CRC1_WINDOWB_X_CONTROL_READBACK; \
+ uint32_t OTG_CRC1_WINDOWB_Y_CONTROL_READBACK; \
+ uint32_t OPTC_CLOCK_CONTROL; \
+ uint32_t OPTC_WIDTH_CONTROL2; \
+ uint32_t OTG_PSTATE_REGISTER; \
+ uint32_t OTG_PIPE_UPDATE_STATUS; \
+ uint32_t INTERRUPT_DEST
+
struct dcn_optc_registers {
- uint32_t OTG_GLOBAL_CONTROL1;
- uint32_t OTG_GLOBAL_CONTROL2;
- uint32_t OTG_VERT_SYNC_CONTROL;
- uint32_t OTG_MASTER_UPDATE_MODE;
- uint32_t OTG_GSL_CONTROL;
- uint32_t OTG_VSTARTUP_PARAM;
- uint32_t OTG_VUPDATE_PARAM;
- uint32_t OTG_VREADY_PARAM;
- uint32_t OTG_BLANK_CONTROL;
- uint32_t OTG_MASTER_UPDATE_LOCK;
- uint32_t OTG_GLOBAL_CONTROL0;
- uint32_t OTG_DOUBLE_BUFFER_CONTROL;
- uint32_t OTG_H_TOTAL;
- uint32_t OTG_H_BLANK_START_END;
- uint32_t OTG_H_SYNC_A;
- uint32_t OTG_H_SYNC_A_CNTL;
- uint32_t OTG_H_TIMING_CNTL;
- uint32_t OTG_V_TOTAL;
- uint32_t OTG_V_BLANK_START_END;
- uint32_t OTG_V_SYNC_A;
- uint32_t OTG_V_SYNC_A_CNTL;
- uint32_t OTG_INTERLACE_CONTROL;
- uint32_t OTG_CONTROL;
- uint32_t OTG_STEREO_CONTROL;
- uint32_t OTG_3D_STRUCTURE_CONTROL;
- uint32_t OTG_STEREO_STATUS;
- uint32_t OTG_V_TOTAL_MAX;
- uint32_t OTG_V_TOTAL_MID;
- uint32_t OTG_V_TOTAL_MIN;
- uint32_t OTG_V_TOTAL_CONTROL;
- uint32_t OTG_V_COUNT_STOP_CONTROL;
- uint32_t OTG_V_COUNT_STOP_CONTROL2;
- uint32_t OTG_TRIGA_CNTL;
- uint32_t OTG_TRIGA_MANUAL_TRIG;
- uint32_t OTG_MANUAL_FLOW_CONTROL;
- uint32_t OTG_FORCE_COUNT_NOW_CNTL;
- uint32_t OTG_STATIC_SCREEN_CONTROL;
- uint32_t OTG_STATUS_FRAME_COUNT;
- uint32_t OTG_STATUS;
- uint32_t OTG_STATUS_POSITION;
- uint32_t OTG_NOM_VERT_POSITION;
- uint32_t OTG_BLACK_COLOR;
- uint32_t OTG_TEST_PATTERN_PARAMETERS;
- uint32_t OTG_TEST_PATTERN_CONTROL;
- uint32_t OTG_TEST_PATTERN_COLOR;
- uint32_t OTG_CLOCK_CONTROL;
- uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL;
- uint32_t OTG_VERTICAL_INTERRUPT0_POSITION;
- uint32_t OTG_VERTICAL_INTERRUPT1_CONTROL;
- uint32_t OTG_VERTICAL_INTERRUPT1_POSITION;
- uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL;
- uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
- uint32_t OPTC_INPUT_CLOCK_CONTROL;
- uint32_t OPTC_DATA_SOURCE_SELECT;
- uint32_t OPTC_MEMORY_CONFIG;
- uint32_t OPTC_INPUT_GLOBAL_CONTROL;
- uint32_t CONTROL;
- uint32_t OTG_GSL_WINDOW_X;
- uint32_t OTG_GSL_WINDOW_Y;
- uint32_t OTG_VUPDATE_KEEPOUT;
- uint32_t OTG_CRC_CNTL;
- uint32_t OTG_CRC_CNTL2;
- uint32_t OTG_CRC0_DATA_RG;
- uint32_t OTG_CRC0_DATA_B;
- uint32_t OTG_CRC1_DATA_B;
- uint32_t OTG_CRC2_DATA_B;
- uint32_t OTG_CRC3_DATA_B;
- uint32_t OTG_CRC1_DATA_RG;
- uint32_t OTG_CRC2_DATA_RG;
- uint32_t OTG_CRC3_DATA_RG;
- uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
- uint32_t OTG_CRC0_WINDOWA_Y_CONTROL;
- uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
- uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
- uint32_t OTG_CRC1_WINDOWA_X_CONTROL;
- uint32_t OTG_CRC1_WINDOWA_Y_CONTROL;
- uint32_t OTG_CRC1_WINDOWB_X_CONTROL;
- uint32_t OTG_CRC1_WINDOWB_Y_CONTROL;
- uint32_t GSL_SOURCE_SELECT;
- uint32_t DWB_SOURCE_SELECT;
- uint32_t OTG_DSC_START_POSITION;
- uint32_t OPTC_DATA_FORMAT_CONTROL;
- uint32_t OPTC_BYTES_PER_PIXEL;
- uint32_t OPTC_WIDTH_CONTROL;
- uint32_t OTG_DRR_CONTROL;
- uint32_t OTG_BLANK_DATA_COLOR;
- uint32_t OTG_BLANK_DATA_COLOR_EXT;
- uint32_t OTG_DRR_TRIGGER_WINDOW;
- uint32_t OTG_M_CONST_DTO0;
- uint32_t OTG_M_CONST_DTO1;
- uint32_t OTG_DRR_V_TOTAL_CHANGE;
- uint32_t OTG_GLOBAL_CONTROL4;
- uint32_t OTG_CRC0_WINDOWA_X_CONTROL_READBACK;
- uint32_t OTG_CRC0_WINDOWA_Y_CONTROL_READBACK;
- uint32_t OTG_CRC0_WINDOWB_X_CONTROL_READBACK;
- uint32_t OTG_CRC0_WINDOWB_Y_CONTROL_READBACK;
- uint32_t OTG_CRC1_WINDOWA_X_CONTROL_READBACK;
- uint32_t OTG_CRC1_WINDOWA_Y_CONTROL_READBACK;
- uint32_t OTG_CRC1_WINDOWB_X_CONTROL_READBACK;
- uint32_t OTG_CRC1_WINDOWB_Y_CONTROL_READBACK;
- uint32_t OPTC_CLOCK_CONTROL;
- uint32_t OPTC_WIDTH_CONTROL2;
- uint32_t OTG_PSTATE_REGISTER;
- uint32_t OTG_PIPE_UPDATE_STATUS;
+ OPTC_REG_VARIABLE_LIST_DCN;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -591,6 +595,7 @@ struct dcn_optc_registers {
type OTG_DC_REG_UPDATE_PENDING;\
type OTG_CURSOR_UPDATE_PENDING;\
type OTG_VUPDATE_KEEPOUT_STATUS;\
+ type OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST;
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
index b4694985a40a..81857ce6d68d 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
@@ -562,6 +562,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.get_hw_timing = optc1_get_hw_timing,
.align_vblanks = optc2_align_vblanks,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .read_otg_state = optc1_read_otg_state,
};
void dcn20_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
index 49c2efdfa403..f2415eebdc09 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
@@ -180,6 +180,7 @@ static struct timing_generator_funcs dcn201_tg_funcs = {
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .read_otg_state = optc1_read_otg_state,
};
void dcn201_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
index 4c95c0958612..78b58a449fa4 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
@@ -420,6 +420,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
.get_otg_double_buffer_pending = optc3_get_otg_update_pending,
.get_pipe_update_pending = optc3_get_pipe_update_pending,
+ .read_otg_state = optc1_read_otg_state,
};
void dcn30_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
index d7a45ef2d01b..65e9089b7f31 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
@@ -172,6 +172,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
.get_otg_double_buffer_pending = optc3_get_otg_update_pending,
.get_pipe_update_pending = optc3_get_pipe_update_pending,
+ .read_otg_state = optc1_read_otg_state,
};
void dcn301_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index 4b6446ed4ce4..ef536f37b4ed 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
@@ -245,6 +245,76 @@ void optc3_init_odm(struct timing_generator *optc)
optc1->opp_count = 1;
}
+void optc31_read_otg_state(struct timing_generator *optc,
+ struct dcn_otg_state *s)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OTG_CONTROL,
+ OTG_MASTER_EN, &s->otg_enabled);
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, &s->v_blank_start,
+ OTG_V_BLANK_END, &s->v_blank_end);
+
+ REG_GET(OTG_V_SYNC_A_CNTL,
+ OTG_V_SYNC_A_POL, &s->v_sync_a_pol);
+
+ REG_GET(OTG_V_TOTAL,
+ OTG_V_TOTAL, &s->v_total);
+
+ REG_GET(OTG_V_TOTAL_MAX,
+ OTG_V_TOTAL_MAX, &s->v_total_max);
+
+ REG_GET(OTG_V_TOTAL_MIN,
+ OTG_V_TOTAL_MIN, &s->v_total_min);
+
+ REG_GET(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MAX_SEL, &s->v_total_max_sel);
+
+ REG_GET(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, &s->v_total_min_sel);
+
+ REG_GET_2(OTG_V_SYNC_A,
+ OTG_V_SYNC_A_START, &s->v_sync_a_start,
+ OTG_V_SYNC_A_END, &s->v_sync_a_end);
+
+ REG_GET_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, &s->h_blank_start,
+ OTG_H_BLANK_END, &s->h_blank_end);
+
+ REG_GET_2(OTG_H_SYNC_A,
+ OTG_H_SYNC_A_START, &s->h_sync_a_start,
+ OTG_H_SYNC_A_END, &s->h_sync_a_end);
+
+ REG_GET(OTG_H_SYNC_A_CNTL,
+ OTG_H_SYNC_A_POL, &s->h_sync_a_pol);
+
+ REG_GET(OTG_H_TOTAL,
+ OTG_H_TOTAL, &s->h_total);
+
+ REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
+ OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
+
+ REG_GET(OTG_VERTICAL_INTERRUPT1_CONTROL,
+ OTG_VERTICAL_INTERRUPT1_INT_ENABLE, &s->vertical_interrupt1_en);
+
+ REG_GET(OTG_VERTICAL_INTERRUPT1_POSITION,
+ OTG_VERTICAL_INTERRUPT1_LINE_START, &s->vertical_interrupt1_line);
+
+ REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
+ OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &s->vertical_interrupt2_en);
+
+ REG_GET(OTG_VERTICAL_INTERRUPT2_POSITION,
+ OTG_VERTICAL_INTERRUPT2_LINE_START, &s->vertical_interrupt2_line);
+
+ REG_GET(INTERRUPT_DEST,
+ OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, &s->vertical_interrupt2_dest);
+
+ s->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK);
+ s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
+}
+
static struct timing_generator_funcs dcn31_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -306,6 +376,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.get_hw_timing = optc1_get_hw_timing,
.init_odm = optc3_init_odm,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .read_otg_state = optc31_read_otg_state,
};
void dcn31_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
index fbbe86d00c2e..0f72c274f40b 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
@@ -100,7 +100,8 @@
SRI(OTG_CRC_CNTL2, OTG, inst),\
SR(DWB_SOURCE_SELECT),\
SRI(OTG_DRR_CONTROL, OTG, inst),\
- SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst)
+ SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst),\
+ SRI(INTERRUPT_DEST, OTG, inst)
#define OPTC_COMMON_MASK_SH_LIST_DCN3_1(mask_sh)\
SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
@@ -260,6 +261,7 @@
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
+ SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh)
void dcn31_timing_generator_init(struct optc *optc1);
@@ -269,4 +271,7 @@ void optc31_set_drr(struct timing_generator *optc, const struct drr_params *para
void optc3_init_odm(struct timing_generator *optc);
+void optc31_read_otg_state(struct timing_generator *optc,
+ struct dcn_otg_state *s);
+
#endif /* __DC_OPTC_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
index 633d62addd4d..0e603bad0d12 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
@@ -255,6 +255,7 @@ static struct timing_generator_funcs dcn314_tg_funcs = {
.set_odm_combine = optc314_set_odm_combine,
.set_h_timing_div_manual_mode = optc314_set_h_timing_div_manual_mode,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .read_otg_state = optc31_read_otg_state,
};
void dcn314_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
index 0ff72b97b465..6bfdee3fcf5f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
@@ -99,7 +99,8 @@
SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
SRI(OTG_DRR_CONTROL, OTG, inst),\
- SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst)
+ SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst),\
+ SRI(INTERRUPT_DEST, OTG, inst)
#define OPTC_COMMON_MASK_SH_LIST_DCN3_14(mask_sh)\
SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
@@ -254,6 +255,7 @@
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
+ SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh)
void dcn314_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index c217f653b3c8..2cdd19ba634b 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -364,6 +364,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
.get_otg_double_buffer_pending = optc3_get_otg_update_pending,
.get_pipe_update_pending = optc3_get_pipe_update_pending,
+ .read_otg_state = optc31_read_otg_state,
};
void dcn32_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index 0b0964a9da74..d159e3ed3bb3 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -181,7 +181,8 @@
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
- SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh)
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
+ SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh)
void dcn32_timing_generator_init(struct optc *optc1);
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index d21e82b927d0..b86fe2b094f8 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -492,6 +492,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = {
.init_odm = optc3_init_odm,
.set_long_vtotal = optc35_set_long_vtotal,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .read_otg_state = optc31_read_otg_state,
};
void dcn35_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
index be749ab41dce..733a2f149d9a 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
@@ -71,7 +71,8 @@
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
- SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh)
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
+ SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh)
void dcn35_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index 338a0cad23a5..382ac18e7854 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -101,7 +101,7 @@ static uint32_t decide_odm_mem_bit_map(int *opp_id, int opp_cnt, int h_active)
return memory_bit_map;
}
-static void optc401_set_odm_combine(struct timing_generator *optc, int *opp_id,
+void optc401_set_odm_combine(struct timing_generator *optc, int *opp_id,
int opp_cnt, int segment_width, int last_segment_width)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -162,7 +162,7 @@ static void optc401_set_odm_combine(struct timing_generator *optc, int *opp_id,
optc1->opp_count = opp_cnt;
}
-static void optc401_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
+void optc401_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -177,7 +177,7 @@ static void optc401_set_h_timing_div_manual_mode(struct timing_generator *optc,
*
* Return: Always returns true
*/
-static bool optc401_enable_crtc(struct timing_generator *optc)
+bool optc401_enable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -203,7 +203,7 @@ static bool optc401_enable_crtc(struct timing_generator *optc)
}
/* disable_crtc */
-static bool optc401_disable_crtc(struct timing_generator *optc)
+bool optc401_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -234,7 +234,7 @@ static bool optc401_disable_crtc(struct timing_generator *optc)
return true;
}
-static void optc401_phantom_crtc_post_enable(struct timing_generator *optc)
+void optc401_phantom_crtc_post_enable(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -245,7 +245,7 @@ static void optc401_phantom_crtc_post_enable(struct timing_generator *optc)
REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000);
}
-static void optc401_disable_phantom_otg(struct timing_generator *optc)
+void optc401_disable_phantom_otg(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -259,7 +259,7 @@ static void optc401_disable_phantom_otg(struct timing_generator *optc)
REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
}
-static void optc401_set_odm_bypass(struct timing_generator *optc,
+void optc401_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -365,7 +365,7 @@ void optc401_set_drr(
}
}
-static void optc401_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest)
+void optc401_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -396,7 +396,7 @@ void optc401_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, i
}
}
-static void optc401_program_global_sync(
+void optc401_program_global_sync(
struct timing_generator *optc,
int vready_offset,
int vstartup_start,
@@ -430,7 +430,7 @@ static void optc401_program_global_sync(
REG_UPDATE(OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, pstate_keepout);
}
-static void optc401_set_vupdate_keepout(struct timing_generator *tg, bool enable)
+void optc401_set_vupdate_keepout(struct timing_generator *tg, bool enable)
{
struct optc *optc1 = DCN10TG_FROM_TG(tg);
@@ -442,7 +442,7 @@ static void optc401_set_vupdate_keepout(struct timing_generator *tg, bool enable
return;
}
-static bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked)
+bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked)
{
struct optc *optc1 = DCN10TG_FROM_TG(tg);
uint32_t lock_status = 0;
@@ -527,6 +527,7 @@ static struct timing_generator_funcs dcn401_tg_funcs = {
.get_pipe_update_pending = optc3_get_pipe_update_pending,
.set_vupdate_keepout = optc401_set_vupdate_keepout,
.wait_update_lock_status = optc401_wait_update_lock_status,
+ .read_otg_state = optc31_read_otg_state,
};
void dcn401_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
index 1be89571986f..fa62737b5b1b 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
@@ -163,7 +163,8 @@
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
- SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh)
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
+ SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh)
void dcn401_timing_generator_init(struct optc *optc1);
@@ -172,5 +173,24 @@ void optc401_set_drr(
const struct drr_params *params);
void optc401_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
void optc401_setup_manual_trigger(struct timing_generator *optc);
+void optc401_program_global_sync(
+ struct timing_generator *optc,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width,
+ int pstate_keepout);
+bool optc401_enable_crtc(struct timing_generator *optc);
+bool optc401_disable_crtc(struct timing_generator *optc);
+void optc401_phantom_crtc_post_enable(struct timing_generator *optc);
+void optc401_disable_phantom_otg(struct timing_generator *optc);
+void optc401_set_odm_bypass(struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing);
+void optc401_set_odm_combine(struct timing_generator *optc, int *opp_id,
+ int opp_cnt, int segment_width, int last_segment_width);
+void optc401_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode);
+void optc401_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest);
+bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked);
+void optc401_set_vupdate_keepout(struct timing_generator *tg, bool enable);
#endif /* __DC_OPTC_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
index 09320344d8e9..b8cddef6b3d2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -198,6 +198,14 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN351)
###############################################################################
+RESOURCE_DCN36 = dcn36_resource.o
+
+AMD_DAL_RESOURCE_DCN36 = $(addprefix $(AMDDALPATH)/dc/resource/dcn36/,$(RESOURCE_DCN36))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN36)
+
+###############################################################################
+
RESOURCE_DCN401 = dcn401_resource.o
AMD_DAL_RESOURCE_DCN401 = $(addprefix $(AMDDALPATH)/dc/resource/dcn401/,$(RESOURCE_DCN401))
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index c63c59623433..eb1e158d3436 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -67,6 +67,7 @@
#include "reg_helper.h"
#include "dce100/dce100_resource.h"
+#include "link.h"
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
@@ -659,6 +660,12 @@ static void dce120_resource_destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
static void read_dce_straps(
@@ -1054,6 +1061,7 @@ static bool dce120_resource_construct(
struct dc *dc,
struct dce110_resource_pool *pool)
{
+ struct ddc_service_init_data ddc_init_data = {0};
unsigned int i;
int j;
struct dc_context *ctx = dc->ctx;
@@ -1257,6 +1265,15 @@ static bool dce120_resource_construct(
bw_calcs_data_update_from_pplib(dc);
+ if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
+ ddc_init_data.ctx = dc->ctx;
+ ddc_init_data.link = NULL;
+ ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
+ ddc_init_data.id.enum_id = 0;
+ ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
+ }
+
return true;
irqs_create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 5c6dc710e96c..e4eca3e32c1b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -1220,7 +1220,7 @@ static void get_pixel_clock_parameters(
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
struct dc_link *link = stream->link;
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dce_hwseq *hws = dc->hwseq;
@@ -1229,7 +1229,8 @@ static void get_pixel_clock_parameters(
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc)
pixel_clk_params->encoder_object_id = link_enc->id;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 2615c36d5ffe..4bd5c2278596 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -1413,9 +1413,9 @@ static bool dcn21_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.max_slave_planes = 1;
- dc->caps.max_slave_yuv_planes = 1;
- dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.max_slave_planes = 3;
+ dc->caps.max_slave_yuv_planes = 3;
+ dc->caps.max_slave_rgb_planes = 3;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 13202ce30d66..f01ced015072 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -2047,7 +2047,8 @@ bool dcn30_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kcalloc(dc->res_pool->pipe_count,
+ sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 911bd60d4fbc..dddddbfef85f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -890,7 +890,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = true,
.enable_legacy_fast_update = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
+ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
.using_dml2 = false,
};
@@ -1768,7 +1768,8 @@ bool dcn31_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kcalloc(dc->res_pool->pipe_count,
+ sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index e3ba105034f8..26becc4cb804 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -1704,7 +1704,8 @@ bool dcn314_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kcalloc(dc->res_pool->pipe_count,
+ sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 14acef036b5a..6c2bb3f63be1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1698,7 +1698,7 @@ static int dcn315_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ if (pixel_rate_crb) {
int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format);
/* Ceil to crb segment size */
int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate(
@@ -1755,28 +1755,26 @@ static int dcn315_populate_dml_pipes_from_context(
continue;
}
- if (!pipe->top_pipe && !pipe->prev_odm_pipe) {
- bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
- || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
-
- if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0)
- pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
- (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
- if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
- /* Clamp to 2 pipe split max det segments */
- remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
- pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
- }
- if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
- /* If we are splitting we must have an even number of segments */
- remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
- pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
- }
- /* Convert segments into size for DML use */
- pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
-
- crb_idx++;
+ bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
+ || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
+
+ if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0)
+ pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
+ (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
+ if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
+ /* Clamp to 2 pipe split max det segments */
+ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
+ pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
+ }
+ if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
+ /* If we are splitting we must have an even number of segments */
+ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
+ pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
}
+ /* Convert segments into size for DML use */
+ pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
+
+ crb_idx++;
pipe_cnt++;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 664302876019..2a59cc61ed8c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1749,7 +1749,8 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
int vlevel = 0;
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kcalloc(dc->res_pool->pipe_count,
+ sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
/* To handle Freesync properly, setting FreeSync DML parameters
* to its default state for the first stage of validation
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 86c6e5e8c42e..1aa4ced29291 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -1055,7 +1055,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \
- SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst)
+ SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst), \
+ SRI_ARR(INTERRUPT_DEST, OTG, inst)
/* HUBP */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 8ee3d99ea2aa..ffd2b816cd02 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1839,9 +1839,9 @@ static bool dcn35_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.max_slave_planes = 2;
- dc->caps.max_slave_yuv_planes = 2;
- dc->caps.max_slave_rgb_planes = 2;
+ dc->caps.max_slave_planes = 3;
+ dc->caps.max_slave_yuv_planes = 3;
+ dc->caps.max_slave_rgb_planes = 3;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
if (dc->config.forceHBR2CP2520)
@@ -1909,6 +1909,7 @@ static bool dcn35_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
+
dc->config.disable_hbr_audio_dp2 = true;
/* read VBIOS LTTPR caps */
{
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
index 9d03a55d90cf..9c56ae76e0c7 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
@@ -305,7 +305,8 @@ struct resource_pool *dcn35_create_resource_pool(
SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst),\
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst),\
SRI_ARR(OTG_DRR_CONTROL, OTG, inst),\
- SRI2_ARR(OPTC_CLOCK_CONTROL, OPTC, inst)
+ SRI2_ARR(OPTC_CLOCK_CONTROL, OPTC, inst),\
+ SRI_ARR(INTERRUPT_DEST, OTG, inst)
/* DPP */
#define DPP_REG_LIST_DCN35_RI(id)\
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 14f7c3acdc96..98f5bc1b929e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1811,9 +1811,9 @@ static bool dcn351_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.max_slave_planes = 2;
- dc->caps.max_slave_yuv_planes = 2;
- dc->caps.max_slave_rgb_planes = 2;
+ dc->caps.max_slave_planes = 3;
+ dc->caps.max_slave_yuv_planes = 3;
+ dc->caps.max_slave_rgb_planes = 3;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
if (dc->config.forceHBR2CP2520)
@@ -1877,6 +1877,7 @@ static bool dcn351_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
+
/* Use psp mailbox to enable assr */
dc->config.use_assr_psp_message = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
new file mode 100644
index 000000000000..b6468573dc33
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -0,0 +1,2171 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+#include "dm_services.h"
+#include "dc.h"
+
+#include "dcn31/dcn31_init.h"
+#include "dcn35/dcn35_init.h"
+#include "dcn36/dcn36_resource.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dcn36_resource.h"
+#include "dml2/dml2_wrapper.h"
+
+#include "dcn20/dcn20_resource.h"
+#include "dcn30/dcn30_resource.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn32/dcn32_resource.h"
+#include "dcn35/dcn35_resource.h"
+
+#include "dcn10/dcn10_ipp.h"
+#include "dcn30/dcn30_hubbub.h"
+#include "dcn31/dcn31_hubbub.h"
+#include "dcn35/dcn35_hubbub.h"
+#include "dcn32/dcn32_mpc.h"
+#include "dcn35/dcn35_hubp.h"
+#include "irq/dcn36/irq_service_dcn36.h"
+#include "dcn35/dcn35_dpp.h"
+#include "dcn35/dcn35_optc.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn30/dcn30_hwseq.h"
+#include "dce110/dce110_hwseq.h"
+#include "dcn35/dcn35_opp.h"
+#include "dcn35/dcn35_dsc.h"
+#include "dcn30/dcn30_vpg.h"
+#include "dcn30/dcn30_afmt.h"
+#include "dcn31/dcn31_dio_link_encoder.h"
+#include "dcn35/dcn35_dio_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_link_encoder.h"
+#include "dcn32/dcn32_hpo_dp_link_encoder.h"
+#include "link.h"
+#include "dcn31/dcn31_apg.h"
+#include "dcn32/dcn32_dio_link_encoder.h"
+#include "dcn31/dcn31_vpg.h"
+#include "dcn31/dcn31_afmt.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "clk_mgr.h"
+#include "virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dml/display_mode_vba.h"
+#include "dcn35/dcn35_dccg.h"
+#include "dcn35/dcn35_pg_cntl.h"
+#include "dcn10/dcn10_resource.h"
+#include "dcn31/dcn31_panel_cntl.h"
+#include "dcn35/dcn35_hwseq.h"
+#include "dcn35/dcn35_dio_link_encoder.h"
+#include "dml/dcn31/dcn31_fpu.h" /*todo*/
+#include "dml/dcn35/dcn35_fpu.h"
+#include "dcn35/dcn35_dwb.h"
+#include "dcn35/dcn35_mmhubbub.h"
+
+#include "dcn/dcn_3_6_0_offset.h"
+#include "dcn/dcn_3_6_0_sh_mask.h"
+
+#define regBIF_BX2_BIOS_SCRATCH_2 0x2ffc004e
+#define regBIF_BX2_BIOS_SCRATCH_2_BASE_IDX 5
+
+#define regBIF_BX2_BIOS_SCRATCH_3 0x2ffc004f
+#define regBIF_BX2_BIOS_SCRATCH_3_BASE_IDX 5
+
+#define regBIF_BX2_BIOS_SCRATCH_6 0x2ffc0052
+#define regBIF_BX2_BIOS_SCRATCH_6_BASE_IDX 5
+
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+
+#include "reg_helper.h"
+#include "dce/dmub_abm.h"
+#include "dce/dmub_psr.h"
+#include "dce/dmub_replay.h"
+#include "dce/dce_aux.h"
+#include "dce/dce_i2c.h"
+#include "dml/dcn31/display_mode_vba_31.h" /*temp*/
+#include "vm_helper.h"
+#include "dcn20/dcn20_vmid.h"
+
+#include "dc_state_priv.h"
+
+#include "link_enc_cfg.h"
+#define DC_LOGGER_INIT(logger)
+
+enum dcn36_clk_src_array_id {
+ DCN36_CLK_SRC_PLL0,
+ DCN36_CLK_SRC_PLL1,
+ DCN36_CLK_SRC_PLL2,
+ DCN36_CLK_SRC_PLL3,
+ DCN36_CLK_SRC_PLL4,
+ DCN36_CLK_SRC_TOTAL
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file
+ */
+
+/* DCN */
+/* TODO awful hack. fixup dcn20_dwb.h */
+#undef BASE_INNER
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SR_ARR(reg_name, id) \
+ REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SR_ARR_INIT(reg_name, id, value) \
+ REG_STRUCT[id].reg_name = value
+
+#define SRI(reg_name, block, id)\
+ REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SR_ARR_I2C(reg_name, id) \
+ REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SRI_ARR_I2C(reg_name, block, id)\
+ REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR_ALPHABET(reg_name, block, index, id)\
+ REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI2(reg_name, block, id)\
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SRI2_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SRIR(var_name, reg_name, block, id)\
+ .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII(reg_name, block, id)\
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_ARR_2(reg_name, block, id, inst)\
+ REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_MPC_RMU(reg_name, block, id)\
+ .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_DWB(reg_name, temp_name, block, id)\
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## temp_name
+
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DCCG_SRII(reg_name, block, id)\
+ REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define VUPDATE_SRII(reg_name, block, id)\
+ REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ reg ## reg_name ## _ ## block ## id
+
+/* NBIO */
+#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg]
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+#define NBIO_SR(reg_name)\
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX2_ ## reg_name
+
+#define NBIO_SR_ARR(reg_name, id)\
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX2_ ## reg_name
+
+#define bios_regs_init() \
+ ( \
+ NBIO_SR(BIOS_SCRATCH_3),\
+ NBIO_SR(BIOS_SCRATCH_6)\
+ )
+
+static struct bios_registers bios_regs;
+
+#define clk_src_regs_init(index, pllid)\
+ CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid)
+
+static struct dce110_clk_src_regs clk_src_regs[5];
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCN3_1_4(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCN3_1_4(_MASK)
+};
+
+#define abm_regs_init(id)\
+ ABM_DCN32_REG_LIST_RI(id)
+
+static struct dce_abm_registers abm_regs[4];
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define audio_regs_init(id)\
+ AUD_COMMON_REG_LIST_RI(id)
+
+static struct dce_audio_registers audio_regs[7];
+
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_audio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define vpg_regs_init(id)\
+ VPG_DCN31_REG_LIST_RI(id)
+
+static struct dcn31_vpg_registers vpg_regs[10];
+
+static const struct dcn31_vpg_shift vpg_shift = {
+ DCN31_VPG_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_vpg_mask vpg_mask = {
+ DCN31_VPG_MASK_SH_LIST(_MASK)
+};
+
+#define afmt_regs_init(id)\
+ AFMT_DCN31_REG_LIST_RI(id)
+
+static struct dcn31_afmt_registers afmt_regs[6];
+
+static const struct dcn31_afmt_shift afmt_shift = {
+ DCN31_AFMT_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_afmt_mask afmt_mask = {
+ DCN31_AFMT_MASK_SH_LIST(_MASK)
+};
+
+#define apg_regs_init(id)\
+ APG_DCN31_REG_LIST_RI(id)
+
+static struct dcn31_apg_registers apg_regs[4];
+
+static const struct dcn31_apg_shift apg_shift = {
+ DCN31_APG_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_apg_mask apg_mask = {
+ DCN31_APG_MASK_SH_LIST(_MASK)
+};
+
+#define stream_enc_regs_init(id)\
+ SE_DCN35_REG_LIST_RI(id)
+
+static struct dcn10_stream_enc_registers stream_enc_regs[5];
+
+static const struct dcn10_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn10_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define aux_regs_init(id)\
+ DCN2_AUX_REG_LIST_RI(id)
+
+static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5];
+
+#define hpd_regs_init(id)\
+ HPD_REG_LIST_RI(id)
+
+static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5];
+
+
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+#define link_regs_init(id, phyid)\
+ ( \
+ LE_DCN35_REG_LIST_RI(id), \
+ UNIPHY_DCN2_REG_LIST_RI(id, phyid)\
+ )
+
+static struct dcn10_link_enc_registers link_enc_regs[5];
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN35(__SHIFT), \
+ //DPCS_DCN31_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN35(_MASK), \
+ //DPCS_DCN31_MASK_SH_LIST(_MASK)
+};
+
+#define hpo_dp_stream_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id)
+
+static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4];
+
+static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK)
+};
+
+#define hpo_dp_link_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id)
+
+static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2];
+
+static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
+ DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
+ DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define dpp_regs_init(id)\
+ DPP_REG_LIST_DCN35_RI(id)
+
+static struct dcn3_dpp_registers dpp_regs[4];
+
+static const struct dcn35_dpp_shift tf_shift = {
+ DPP_REG_LIST_SH_MASK_DCN35(__SHIFT)
+};
+
+static const struct dcn35_dpp_mask tf_mask = {
+ DPP_REG_LIST_SH_MASK_DCN35(_MASK)
+};
+
+#define opp_regs_init(id)\
+ OPP_REG_LIST_DCN35_RI(id)
+
+static struct dcn35_opp_registers opp_regs[4];
+
+static const struct dcn35_opp_shift opp_shift = {
+ OPP_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn35_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define aux_engine_regs_init(id)\
+ ( \
+ AUX_COMMON_REG_LIST0_RI(id), \
+ SR_ARR_INIT(AUXN_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUXP_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK) \
+ )
+
+static struct dce110_aux_registers aux_engine_regs[5];
+
+#define dwbc_regs_dcn3_init(id)\
+ DWBC_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dcn30_dwbc_registers dwbc35_regs[1];
+
+static const struct dcn35_dwbc_shift dwbc35_shift = {
+ DWBC_COMMON_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn35_dwbc_mask dwbc35_mask = {
+ DWBC_COMMON_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define mcif_wb_regs_dcn3_init(id)\
+ MCIF_WB_COMMON_REG_LIST_DCN3_5_RI(id)
+
+static struct dcn35_mmhubbub_registers mcif_wb35_regs[1];
+
+static const struct dcn35_mmhubbub_shift mcif_wb35_shift = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT)
+};
+
+static const struct dcn35_mmhubbub_mask mcif_wb35_mask = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(_MASK)
+};
+
+#define dsc_regsDCN35_init(id)\
+ DSC_REG_LIST_DCN20_RI(id)
+
+static struct dcn20_dsc_registers dsc_regs[4];
+
+static const struct dcn35_dsc_shift dsc_shift = {
+ DSC_REG_LIST_SH_MASK_DCN35(__SHIFT)
+};
+
+static const struct dcn35_dsc_mask dsc_mask = {
+ DSC_REG_LIST_SH_MASK_DCN35(_MASK)
+};
+
+static struct dcn30_mpc_registers mpc_regs;
+
+#define dcn_mpc_regs_init() \
+ MPC_REG_LIST_DCN3_2_RI(0),\
+ MPC_REG_LIST_DCN3_2_RI(1),\
+ MPC_REG_LIST_DCN3_2_RI(2),\
+ MPC_REG_LIST_DCN3_2_RI(3),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
+ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)
+
+static const struct dcn30_mpc_shift mpc_shift = {
+ MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
+};
+
+static const struct dcn30_mpc_mask mpc_mask = {
+ MPC_COMMON_MASK_SH_LIST_DCN32(_MASK)
+};
+
+#define optc_regs_init(id)\
+ OPTC_COMMON_REG_LIST_DCN3_5_RI(id)
+
+static struct dcn_optc_registers optc_regs[4];
+
+static const struct dcn_optc_shift optc_shift = {
+ OPTC_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT)
+};
+
+static const struct dcn_optc_mask optc_mask = {
+ OPTC_COMMON_MASK_SH_LIST_DCN3_5(_MASK)
+};
+
+#define hubp_regs_init(id)\
+ HUBP_REG_LIST_DCN30_RI(id)
+
+static struct dcn_hubp2_registers hubp_regs[4];
+
+
+static const struct dcn35_hubp2_shift hubp_shift = {
+ HUBP_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn35_hubp2_mask hubp_mask = {
+ HUBP_MASK_SH_LIST_DCN35(_MASK)
+};
+
+static struct dcn_hubbub_registers hubbub_reg;
+
+#define hubbub_reg_init()\
+ HUBBUB_REG_LIST_DCN35(0)
+
+static const struct dcn_hubbub_shift hubbub_shift = {
+ HUBBUB_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dcn_hubbub_mask hubbub_mask = {
+ HUBBUB_MASK_SH_LIST_DCN35(_MASK)
+};
+
+static struct dccg_registers dccg_regs;
+
+#define dccg_regs_init()\
+ DCCG_REG_LIST_DCN35()
+
+static const struct dccg_shift dccg_shift = {
+ DCCG_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dccg_mask dccg_mask = {
+ DCCG_MASK_SH_LIST_DCN35(_MASK)
+};
+
+static struct pg_cntl_registers pg_cntl_regs;
+
+#define pg_cntl_dcn35_regs_init() \
+ PG_CNTL_REG_LIST_DCN35()
+
+static const struct pg_cntl_shift pg_cntl_shift = {
+ PG_CNTL_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct pg_cntl_mask pg_cntl_mask = {
+ PG_CNTL_MASK_SH_LIST_DCN35(_MASK)
+};
+
+#define SRII2(reg_name_pre, reg_name_post, id)\
+ .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \
+ ## id ## _ ## reg_name_post ## _BASE_IDX) + \
+ reg ## reg_name_pre ## id ## _ ## reg_name_post
+
+static struct dce_hwseq_registers hwseq_reg;
+
+#define hwseq_reg_init()\
+ HWSEQ_DCN36_REG_LIST()
+
+#define HWSEQ_DCN36_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
+ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN22_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN23_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN24_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN25_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
+ HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \
+ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
+ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
+ HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \
+ HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DISPCLK_R_DMU_GATE_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DISPCLK_G_RBBMIF_GATE_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, RBBMIF_FGCG_REP_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DPREFCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DISPCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DPPCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DTBCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DCFCLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, DPIACLK_ALLOW_DS_CLKSTOP, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_FGCG_REP_DIS, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_DISPCLK_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_SOCCLK_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_FE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK3_GATE_DISABLE, mask_sh)
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCN36_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCN36_MASK_SH_LIST(_MASK)
+};
+
+#define vmid_regs_init(id)\
+ DCN20_VMID_REG_LIST_RI(id)
+
+static struct dcn_vmid_registers vmid_regs[16];
+
+static const struct dcn20_vmid_shift vmid_shifts = {
+ DCN20_VMID_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn20_vmid_mask vmid_masks = {
+ DCN20_VMID_MASK_SH_LIST(_MASK)
+};
+
+static const struct resource_caps res_cap_dcn36 = {
+ .num_timing_generator = 4,
+ .num_opp = 4,
+ .num_video_plane = 4,
+ .num_audio = 5,
+ .num_stream_encoder = 5,
+ .num_dig_link_enc = 5,
+ .num_hpo_dp_stream_encoder = 4,
+ .num_hpo_dp_link_encoder = 2,
+ .num_pll = 4,/*1 c10 edp, 3xc20 combo PHY*/
+ .num_dwb = 1,
+ .num_ddc = 5,
+ .num_vmid = 16,
+ .num_mpc_3dlut = 2,
+ .num_dsc = 4,
+};
+
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .per_pixel_alpha = true,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = true,
+ .fp16 = true,
+ .p010 = true,
+ .ayuv = false,
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 16000,
+ .fp16 = 16000
+ },
+
+ // 6:1 downscaling ratio: 1000/6 = 166.666
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 167,
+ .fp16 = 167
+ },
+ 64,
+ 64
+};
+
+static const struct dc_debug_options debug_defaults_drv = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .clock_trace = true,
+ .disable_pplib_clock_request = false,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
+ .force_single_disp_pipe_split = false,
+ .disable_dcc = DCC_ENABLE,
+ .disable_dpp_power_gate = true,
+ .disable_hubp_power_gate = true,
+ .disable_optc_power_gate = true, /*should the same as above two*/
+ .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
+ .disable_clock_gate = false,
+ .disable_dsc_power_gate = true,
+ .vsr_support = true,
+ .performance_trace = false,
+ .max_downscale_src_width = 4096,/*upto true 4k*/
+ .disable_pplib_wm_range = false,
+ .scl_reset_length10 = true,
+ .sanity_checks = false,
+ .underflow_assert_delay_us = 0xFFFFFFFF,
+ .dwb_fi_phase = -1, // -1 = disable,
+ .dmub_command_table = true,
+ .pstate_enabled = true,
+ .use_max_lb = true,
+ .enable_mem_low_power = {
+ .bits = {
+ .vga = false,
+ .i2c = true,
+ .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
+ .dscl = true,
+ .cm = true,
+ .mpc = true,
+ .optc = true,
+ .vpg = true,
+ .afmt = true,
+ }
+ },
+ .root_clock_optimization = {
+ .bits = {
+ .dpp = true,
+ .dsc = true,/*dscclk and dsc pg*/
+ .hdmistream = true,
+ .hdmichar = true,
+ .dpstream = true,
+ .symclk32_se = true,
+ .symclk32_le = true,
+ .symclk_fe = true,
+ .physymclk = false,
+ .dpiasymclk = true,
+ }
+ },
+ .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
+ .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+ .minimum_z8_residency_time = 1, /* Always allow when other conditions are met */
+ .using_dml2 = true,
+ .support_eDP1_5 = true,
+ .enable_hpo_pg_support = false,
+ .enable_legacy_fast_update = true,
+ .enable_single_display_2to1_odm_policy = true,
+ .disable_idle_power_optimizations = false,
+ .dmcub_emulation = false,
+ .disable_boot_optimizations = false,
+ .disable_unbounded_requesting = false,
+ .disable_mem_low_power = false,
+ //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions
+ .enable_double_buffered_dsc_pg_support = true,
+ .enable_dp_dig_pixel_rate_div_policy = 1,
+ .disable_z10 = false,
+ .ignore_pg = true,
+ .psp_disabled_wa = true,
+ .ips2_eval_delay_us = 2000,
+ .ips2_entry_delay_us = 800,
+ .disable_dmub_reallow_idle = false,
+ .static_screen_wait_frames = 2,
+ .disable_timeout = true,
+ .min_disp_clk_khz = 50000,
+};
+
+static const struct dc_panel_config panel_config_defaults = {
+ .psr = {
+ .disable_psr = false,
+ .disallow_psrsu = false,
+ .disallow_replay = false,
+ },
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
+static void dcn35_dpp_destroy(struct dpp **dpp)
+{
+ kfree(TO_DCN20_DPP(*dpp));
+ *dpp = NULL;
+}
+
+static struct dpp *dcn35_dpp_create(struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL);
+ bool success = (dpp != NULL);
+
+ if (!success)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT dpp_regs
+ dpp_regs_init(0),
+ dpp_regs_init(1),
+ dpp_regs_init(2),
+ dpp_regs_init(3);
+
+ success = dpp35_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift,
+ &tf_mask);
+ if (success) {
+ dpp35_set_fgcg(
+ dpp,
+ ctx->dc->debug.enable_fine_grain_clock_gating.bits.dpp);
+ return &dpp->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ kfree(dpp);
+ return NULL;
+}
+
+static struct output_pixel_processor *dcn35_opp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_opp *opp =
+ kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
+
+ if (!opp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT opp_regs
+ opp_regs_init(0),
+ opp_regs_init(1),
+ opp_regs_init(2),
+ opp_regs_init(3);
+
+ dcn35_opp_construct(opp, ctx, inst,
+ &opp_regs[inst], &opp_shift, &opp_mask);
+
+ dcn35_opp_set_fgcg(opp, ctx->dc->debug.enable_fine_grain_clock_gating.bits.opp);
+
+ return &opp->base;
+}
+
+static struct dce_aux *dcn31_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT aux_engine_regs
+ aux_engine_regs_init(0),
+ aux_engine_regs_init(1),
+ aux_engine_regs_init(2),
+ aux_engine_regs_init(3),
+ aux_engine_regs_init(4);
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
+
+ return &aux_engine->base;
+}
+
+#define i2c_inst_regs_init(id)\
+ I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dce_i2c_registers i2c_hw_regs[5];
+
+static const struct dce_i2c_shift i2c_shifts = {
+ I2C_COMMON_MASK_SH_LIST_DCN35(__SHIFT)
+};
+
+static const struct dce_i2c_mask i2c_masks = {
+ I2C_COMMON_MASK_SH_LIST_DCN35(_MASK)
+};
+
+/* ========================================================== */
+
+/*
+ * DPIA index | Preferred Encoder | Host Router
+ * 0 | C | 0
+ * 1 | First Available | 0
+ * 2 | D | 1
+ * 3 | First Available | 1
+ */
+/* ========================================================== */
+static const enum engine_id dpia_to_preferred_enc_id_table[] = {
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGD,
+ ENGINE_ID_DIGD
+};
+
+static enum engine_id dcn36_get_preferred_eng_id_dpia(unsigned int dpia_index)
+{
+ return dpia_to_preferred_enc_id_table[dpia_index];
+}
+
+static struct dce_i2c_hw *dcn31_i2c_hw_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_i2c_hw *dce_i2c_hw =
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
+
+ if (!dce_i2c_hw)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT i2c_hw_regs
+ i2c_inst_regs_init(1),
+ i2c_inst_regs_init(2),
+ i2c_inst_regs_init(3),
+ i2c_inst_regs_init(4),
+ i2c_inst_regs_init(5);
+
+ dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
+ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
+ return dce_i2c_hw;
+}
+static struct mpc *dcn35_mpc_create(
+ struct dc_context *ctx,
+ int num_mpcc,
+ int num_rmu)
+{
+ struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL);
+
+ if (!mpc30)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT mpc_regs
+ dcn_mpc_regs_init();
+
+ dcn32_mpc_construct(mpc30, ctx,
+ &mpc_regs,
+ &mpc_shift,
+ &mpc_mask,
+ num_mpcc,
+ num_rmu);
+
+ return &mpc30->base;
+}
+
+static struct hubbub *dcn35_hubbub_create(struct dc_context *ctx)
+{
+ int i;
+
+ struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub),
+ GFP_KERNEL);
+
+ if (!hubbub3)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT hubbub_reg
+ hubbub_reg_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT vmid_regs
+ vmid_regs_init(0),
+ vmid_regs_init(1),
+ vmid_regs_init(2),
+ vmid_regs_init(3),
+ vmid_regs_init(4),
+ vmid_regs_init(5),
+ vmid_regs_init(6),
+ vmid_regs_init(7),
+ vmid_regs_init(8),
+ vmid_regs_init(9),
+ vmid_regs_init(10),
+ vmid_regs_init(11),
+ vmid_regs_init(12),
+ vmid_regs_init(13),
+ vmid_regs_init(14),
+ vmid_regs_init(15);
+
+ hubbub35_construct(hubbub3, ctx,
+ &hubbub_reg,
+ &hubbub_shift,
+ &hubbub_mask,
+ 384,/*ctx->dc->dml.ip.det_buffer_size_kbytes,*/
+ 8, /*ctx->dc->dml.ip.pixel_chunk_size_kbytes,*/
+ 1792 /*ctx->dc->dml.ip.config_return_buffer_size_in_kbytes*/);
+
+
+ for (i = 0; i < res_cap_dcn36.num_vmid; i++) {
+ struct dcn20_vmid *vmid = &hubbub3->vmid[i];
+
+ vmid->ctx = ctx;
+
+ vmid->regs = &vmid_regs[i];
+ vmid->shifts = &vmid_shifts;
+ vmid->masks = &vmid_masks;
+ }
+
+ return &hubbub3->base;
+}
+
+static struct timing_generator *dcn35_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance)
+{
+ struct optc *tgn10 =
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
+
+ if (!tgn10)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT optc_regs
+ optc_regs_init(0),
+ optc_regs_init(1),
+ optc_regs_init(2),
+ optc_regs_init(3);
+
+ tgn10->base.inst = instance;
+ tgn10->base.ctx = ctx;
+
+ tgn10->tg_regs = &optc_regs[instance];
+ tgn10->tg_shift = &optc_shift;
+ tgn10->tg_mask = &optc_mask;
+
+ dcn35_timing_generator_init(tgn10);
+
+ return &tgn10->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .fec_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+static struct link_encoder *dcn35_link_encoder_create(
+ struct dc_context *ctx,
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn20_link_encoder *enc20 =
+ kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_aux_regs
+ aux_regs_init(0),
+ aux_regs_init(1),
+ aux_regs_init(2),
+ aux_regs_init(3),
+ aux_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_hpd_regs
+ hpd_regs_init(0),
+ hpd_regs_init(1),
+ hpd_regs_init(2),
+ hpd_regs_init(3),
+ hpd_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_regs
+ link_regs_init(0, A),
+ link_regs_init(1, B),
+ link_regs_init(2, C),
+ link_regs_init(3, D),
+ link_regs_init(4, E);
+
+ dcn35_link_encoder_construct(enc20,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc20->enc10.base;
+}
+
+/* Create a minimal link encoder object not associated with a particular
+ * physical connector.
+ * resource_funcs.link_enc_create_minimal
+ */
+static struct link_encoder *dcn31_link_enc_create_minimal(
+ struct dc_context *ctx, enum engine_id eng_id)
+{
+ struct dcn20_link_encoder *enc20;
+
+ if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc)
+ return NULL;
+
+ enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+ if (!enc20)
+ return NULL;
+
+ dcn31_link_encoder_construct_minimal(
+ enc20,
+ ctx,
+ &link_enc_feature,
+ &link_enc_regs[eng_id - ENGINE_ID_DIGA],
+ eng_id);
+
+ return &enc20->enc10.base;
+}
+
+static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dcn31_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dcn31_panel_cntl_construct(panel_cntl, init_data);
+
+ return &panel_cntl->base;
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
+ FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
+
+}
+
+static struct audio *dcn31_create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+
+#undef REG_STRUCT
+#define REG_STRUCT audio_regs
+ audio_regs_init(0),
+ audio_regs_init(1),
+ audio_regs_init(2),
+ audio_regs_init(3),
+ audio_regs_init(4);
+ audio_regs_init(5);
+ audio_regs_init(6);
+
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct vpg *dcn31_vpg_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL);
+
+ if (!vpg31)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT vpg_regs
+ vpg_regs_init(0),
+ vpg_regs_init(1),
+ vpg_regs_init(2),
+ vpg_regs_init(3),
+ vpg_regs_init(4),
+ vpg_regs_init(5),
+ vpg_regs_init(6),
+ vpg_regs_init(7),
+ vpg_regs_init(8),
+ vpg_regs_init(9);
+
+ vpg31_construct(vpg31, ctx, inst,
+ &vpg_regs[inst],
+ &vpg_shift,
+ &vpg_mask);
+
+ return &vpg31->base;
+}
+
+static struct afmt *dcn31_afmt_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL);
+
+ if (!afmt31)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT afmt_regs
+ afmt_regs_init(0),
+ afmt_regs_init(1),
+ afmt_regs_init(2),
+ afmt_regs_init(3),
+ afmt_regs_init(4),
+ afmt_regs_init(5);
+
+ afmt31_construct(afmt31, ctx, inst,
+ &afmt_regs[inst],
+ &afmt_shift,
+ &afmt_mask);
+
+ // Light sleep by default, no need to power down here
+
+ return &afmt31->base;
+}
+
+static struct apg *dcn31_apg_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL);
+
+ if (!apg31)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT apg_regs
+ apg_regs_init(0),
+ apg_regs_init(1),
+ apg_regs_init(2),
+ apg_regs_init(3);
+
+ apg31_construct(apg31, ctx, inst,
+ &apg_regs[inst],
+ &apg_shift,
+ &apg_mask);
+
+ return &apg31->base;
+}
+
+static struct stream_encoder *dcn35_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn10_stream_encoder *enc1;
+ struct vpg *vpg;
+ struct afmt *afmt;
+ int vpg_inst;
+ int afmt_inst;
+
+ /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
+ if (eng_id <= ENGINE_ID_DIGF) {
+ vpg_inst = eng_id;
+ afmt_inst = eng_id;
+ } else
+ return NULL;
+
+ enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
+ vpg = dcn31_vpg_create(ctx, vpg_inst);
+ afmt = dcn31_afmt_create(ctx, afmt_inst);
+
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT stream_enc_regs
+ stream_enc_regs_init(0),
+ stream_enc_regs_init(1),
+ stream_enc_regs_init(2),
+ stream_enc_regs_init(3),
+ stream_enc_regs_init(4);
+
+ dcn35_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
+ eng_id, vpg, afmt,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+
+ return &enc1->base;
+}
+
+static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31;
+ struct vpg *vpg;
+ struct apg *apg;
+ uint32_t hpo_dp_inst;
+ uint32_t vpg_inst;
+ uint32_t apg_inst;
+
+ ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3));
+ hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0;
+
+ /* Mapping of VPG register blocks to HPO DP block instance:
+ * VPG[6] -> HPO_DP[0]
+ * VPG[7] -> HPO_DP[1]
+ * VPG[8] -> HPO_DP[2]
+ * VPG[9] -> HPO_DP[3]
+ */
+ vpg_inst = hpo_dp_inst + 6;
+
+ /* Mapping of APG register blocks to HPO DP block instance:
+ * APG[0] -> HPO_DP[0]
+ * APG[1] -> HPO_DP[1]
+ * APG[2] -> HPO_DP[2]
+ * APG[3] -> HPO_DP[3]
+ */
+ apg_inst = hpo_dp_inst;
+
+ /* allocate HPO stream encoder and create VPG sub-block */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL);
+ vpg = dcn31_vpg_create(ctx, vpg_inst);
+ apg = dcn31_apg_create(ctx, apg_inst);
+
+ if (!hpo_dp_enc31 || !vpg || !apg) {
+ kfree(hpo_dp_enc31);
+ kfree(vpg);
+ kfree(apg);
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_stream_enc_regs
+ hpo_dp_stream_encoder_reg_init(0),
+ hpo_dp_stream_encoder_reg_init(1),
+ hpo_dp_stream_encoder_reg_init(2),
+ hpo_dp_stream_encoder_reg_init(3);
+
+ dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
+ hpo_dp_inst, eng_id, vpg, apg,
+ &hpo_dp_stream_enc_regs[hpo_dp_inst],
+ &hpo_dp_se_shift, &hpo_dp_se_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
+static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ uint8_t inst,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31;
+
+ /* allocate HPO link encoder */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+ if (!hpo_dp_enc31)
+ return NULL; /* out of memory */
+
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_link_enc_regs
+ hpo_dp_link_encoder_reg_init(0),
+ hpo_dp_link_encoder_reg_init(1);
+
+ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ &hpo_dp_link_enc_regs[inst],
+ &hpo_dp_le_shift, &hpo_dp_le_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
+static struct dce_hwseq *dcn36_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+#undef REG_STRUCT
+#define REG_STRUCT hwseq_reg
+ hwseq_reg_init();
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = dcn31_create_audio,
+ .create_stream_encoder = dcn35_stream_encoder_create,
+ .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
+ .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
+ .create_hwseq = dcn36_hwseq_create,
+};
+
+static void dcn36_resource_destruct(struct dcn36_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL) {
+ if (pool->base.stream_enc[i]->vpg != NULL) {
+ kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg));
+ pool->base.stream_enc[i]->vpg = NULL;
+ }
+ if (pool->base.stream_enc[i]->afmt != NULL) {
+ kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt));
+ pool->base.stream_enc[i]->afmt = NULL;
+ }
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ pool->base.stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) {
+ if (pool->base.hpo_dp_stream_enc[i] != NULL) {
+ if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) {
+ kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg));
+ pool->base.hpo_dp_stream_enc[i]->vpg = NULL;
+ }
+ if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) {
+ kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg));
+ pool->base.hpo_dp_stream_enc[i]->apg = NULL;
+ }
+ kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i]));
+ pool->base.hpo_dp_stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) {
+ if (pool->base.hpo_dp_link_enc[i] != NULL) {
+ kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i]));
+ pool->base.hpo_dp_link_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ if (pool->base.dscs[i] != NULL)
+ dcn20_dsc_destroy(&pool->base.dscs[i]);
+ }
+
+ if (pool->base.mpc != NULL) {
+ kfree(TO_DCN20_MPC(pool->base.mpc));
+ pool->base.mpc = NULL;
+ }
+ if (pool->base.hubbub != NULL) {
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
+ }
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.dpps[i] != NULL)
+ dcn35_dpp_destroy(&pool->base.dpps[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.hubps[i] != NULL) {
+ kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
+ pool->base.hubps[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+ kfree(pool->base.hw_i2cs[i]);
+ pool->base.hw_i2cs[i] = NULL;
+ }
+ if (pool->base.sw_i2cs[i] != NULL) {
+ kfree(pool->base.sw_i2cs[i]);
+ pool->base.sw_i2cs[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ if (pool->base.opps[i] != NULL)
+ pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
+ if (pool->base.dwbc[i] != NULL) {
+ kfree(TO_DCN30_DWBC(pool->base.dwbc[i]));
+ pool->base.dwbc[i] = NULL;
+ }
+ if (pool->base.mcif_wb[i] != NULL) {
+ kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i]));
+ pool->base.mcif_wb[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
+ pool->base.clock_sources[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) {
+ if (pool->base.mpc_lut[i] != NULL) {
+ dc_3dlut_func_release(pool->base.mpc_lut[i]);
+ pool->base.mpc_lut[i] = NULL;
+ }
+ if (pool->base.mpc_shaper[i] != NULL) {
+ dc_transfer_func_release(pool->base.mpc_shaper[i]);
+ pool->base.mpc_shaper[i] = NULL;
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL) {
+ dcn20_clock_source_destroy(&pool->base.dp_clock_source);
+ pool->base.dp_clock_source = NULL;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.multiple_abms[i] != NULL)
+ dce_abm_destroy(&pool->base.multiple_abms[i]);
+ }
+
+ if (pool->base.psr != NULL)
+ dmub_psr_destroy(&pool->base.psr);
+
+ if (pool->base.replay != NULL)
+ dmub_replay_destroy(&pool->base.replay);
+
+ if (pool->base.pg_cntl != NULL)
+ dcn_pg_cntl_destroy(&pool->base.pg_cntl);
+
+ if (pool->base.dccg != NULL)
+ dcn_dccg_destroy(&pool->base.dccg);
+}
+
+static struct hubp *dcn35_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn20_hubp *hubp2 =
+ kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
+
+ if (!hubp2)
+ return NULL;
+
+#undef REG_STRUCT
+#define REG_STRUCT hubp_regs
+ hubp_regs_init(0),
+ hubp_regs_init(1),
+ hubp_regs_init(2),
+ hubp_regs_init(3);
+
+ if (hubp35_construct(hubp2, ctx, inst,
+ &hubp_regs[inst], &hubp_shift, &hubp_mask))
+ return &hubp2->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(hubp2);
+ return NULL;
+}
+
+static void dcn35_dwbc_init(struct dcn30_dwbc *dwbc30, struct dc_context *ctx)
+{
+ dcn35_dwbc_set_fgcg(
+ dwbc30, ctx->dc->debug.enable_fine_grain_clock_gating.bits.dwb);
+}
+
+static bool dcn35_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc),
+ GFP_KERNEL);
+
+ if (!dwbc30) {
+ dm_error("DC: failed to create dwbc30!\n");
+ return false;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT dwbc35_regs
+ dwbc_regs_dcn3_init(0);
+
+ dcn35_dwbc_construct(dwbc30, ctx,
+ &dwbc35_regs[i],
+ &dwbc35_shift,
+ &dwbc35_mask,
+ i);
+
+ pool->dwbc[i] = &dwbc30->base;
+
+ dcn35_dwbc_init(dwbc30, ctx);
+ }
+ return true;
+}
+
+static void dcn35_mmhubbub_init(struct dcn30_mmhubbub *mcif_wb30,
+ struct dc_context *ctx)
+{
+ dcn35_mmhubbub_set_fgcg(
+ mcif_wb30,
+ ctx->dc->debug.enable_fine_grain_clock_gating.bits.mmhubbub);
+}
+
+static bool dcn35_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub),
+ GFP_KERNEL);
+
+ if (!mcif_wb30) {
+ dm_error("DC: failed to create mcif_wb30!\n");
+ return false;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT mcif_wb35_regs
+ mcif_wb_regs_dcn3_init(0);
+
+ dcn35_mmhubbub_construct(mcif_wb30, ctx,
+ &mcif_wb35_regs[i],
+ &mcif_wb35_shift,
+ &mcif_wb35_mask,
+ i);
+
+ dcn35_mmhubbub_init(mcif_wb30, ctx);
+
+ pool->mcif_wb[i] = &mcif_wb30->base;
+ }
+ return true;
+}
+
+static struct display_stream_compressor *dcn35_dsc_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_dsc *dsc =
+ kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
+
+ if (!dsc) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT dsc_regs
+ dsc_regsDCN35_init(0),
+ dsc_regsDCN35_init(1),
+ dsc_regsDCN35_init(2),
+ dsc_regsDCN35_init(3);
+
+ dsc35_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
+ dsc35_set_fgcg(dsc,
+ ctx->dc->debug.enable_fine_grain_clock_gating.bits.dsc);
+ return &dsc->base;
+}
+
+static void dcn36_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dcn36_resource_pool *dcn36_pool = TO_DCN36_RES_POOL(*pool);
+
+ dcn36_resource_destruct(dcn36_pool);
+ kfree(dcn36_pool);
+ *pool = NULL;
+}
+
+static struct clock_source *dcn35_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dcn31_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ kfree(clk_src);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = dcn20_get_dcc_compression_cap
+};
+
+static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
+
+static bool dcn35_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ out = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate);
+
+ if (fast_validate)
+ return out;
+
+ DC_FP_START();
+ dcn35_decide_zstate_support(dc, context);
+ DC_FP_END();
+
+ return out;
+}
+
+
+static struct resource_funcs dcn36_res_pool_funcs = {
+ .destroy = dcn36_destroy_resource_pool,
+ .link_enc_create = dcn35_link_encoder_create,
+ .link_enc_create_minimal = dcn31_link_enc_create_minimal,
+ .link_encs_assign = link_enc_cfg_link_encs_assign,
+ .link_enc_unassign = link_enc_cfg_link_enc_unassign,
+ .panel_cntl_create = dcn31_panel_cntl_create,
+ .validate_bandwidth = dcn35_validate_bandwidth,
+ .calculate_wm_and_dlg = NULL,
+ .update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+ .populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
+ .release_pipe = dcn20_release_pipe,
+ .add_stream_to_ctx = dcn30_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
+ .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
+ .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
+ .set_mcif_arb_params = dcn30_set_mcif_arb_params,
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
+ .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
+ .update_bw_bounding_box = dcn35_update_bw_bounding_box_fpu,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn35_get_panel_config_defaults,
+ .get_preferred_eng_id_dpia = dcn36_get_preferred_eng_id_dpia,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+};
+
+static bool dcn36_resource_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dcn36_resource_pool *pool)
+{
+ int i;
+ struct dc_context *ctx = dc->ctx;
+ struct irq_service_init_data init_data;
+
+#undef REG_STRUCT
+#define REG_STRUCT bios_regs
+ bios_regs_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT clk_src_regs
+ clk_src_regs_init(0, A),
+ clk_src_regs_init(1, B),
+ clk_src_regs_init(2, C),
+ clk_src_regs_init(3, D),
+ clk_src_regs_init(4, E);
+
+#undef REG_STRUCT
+#define REG_STRUCT abm_regs
+ abm_regs_init(0),
+ abm_regs_init(1),
+ abm_regs_init(2),
+ abm_regs_init(3);
+
+#undef REG_STRUCT
+#define REG_STRUCT dccg_regs
+ dccg_regs_init();
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_dcn36;
+
+ pool->base.funcs = &dcn36_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
+ dc->caps.max_downscale_ratio = 600;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.i2c_speed_in_khz_hdcp = 100;
+ dc->caps.max_cursor_size = 256;
+ dc->caps.min_horizontal_blanking_period = 80;
+ dc->caps.dmdata_alloc_size = 2048;
+ dc->caps.max_slave_planes = 3;
+ dc->caps.max_slave_yuv_planes = 3;
+ dc->caps.max_slave_rgb_planes = 3;
+ dc->caps.post_blend_color_processing = true;
+ dc->caps.force_dp_tps4_for_cp2520 = true;
+ if (dc->config.forceHBR2CP2520)
+ dc->caps.force_dp_tps4_for_cp2520 = false;
+ dc->caps.dp_hpo = true;
+ dc->caps.dp_hdmi21_pcon_support = true;
+
+ dc->caps.edp_dsc_support = true;
+ dc->caps.extended_aux_timeout_support = true;
+ dc->caps.dmcub_support = true;
+ dc->caps.is_apu = true;
+ dc->caps.seamless_odm = true;
+
+ dc->caps.zstate_support = true;
+ dc->caps.ips_support = true;
+ dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
+
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 1;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 1;
+ dc->caps.color.dpp.post_csc = 1;
+ dc->caps.color.dpp.gamma_corr = 1;
+ dc->caps.color.dpp.dgam_rom_for_yuv = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1
+ // no OGAM ROM on DCN301
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 1;
+ dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
+ /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
+ * to provide some margin.
+ * It's expected for furture ASIC to have equal or higher value, in order to
+ * have determinstic power improvement from generate to genration.
+ * (i.e., we should not expect new ASIC generation with lower vmin rate)
+ */
+ dc->caps.max_disp_clock_khz_at_vmin = 650000;
+
+ /* Sequential ONO is based on ASIC. */
+ if (dc->ctx->asic_id.hw_internal_rev > 0x10)
+ dc->caps.sequential_ono = true;
+
+ /* Use pipe context based otg sync logic */
+ dc->config.use_pipe_ctx_sync_logic = true;
+
+ dc->config.disable_hbr_audio_dp2 = true;
+ /* read VBIOS LTTPR caps */
+ {
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ /* interop bit is implicit */
+ {
+ dc->caps.vbios_lttpr_aware = true;
+ }
+ }
+
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
+ dc->debug = debug_defaults_drv;
+
+ /*HW default is to have all the FGCG enabled, SW no need to program them*/
+ dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF;
+ // Init the vm_helper
+ if (dc->vm_helper)
+ vm_helper_init(dc->vm_helper, 16);
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ /* Clock Sources for Pixel Clock*/
+ pool->base.clock_sources[DCN36_CLK_SRC_PLL0] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCN36_CLK_SRC_PLL1] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCN36_CLK_SRC_PLL2] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCN36_CLK_SRC_PLL3] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCN36_CLK_SRC_PLL4] =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
+
+ pool->base.clk_src_count = DCN36_CLK_SRC_TOTAL;
+
+ /* todo: not reuse phy_pll registers */
+ pool->base.dp_clock_source =
+ dcn35_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+ /*temp till dml2 fully work without dml1*/
+ dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip, DML_PROJECT_DCN31);
+
+ /* TODO: DCCG */
+ pool->base.dccg = dccg35_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
+ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create dccg!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+#undef REG_STRUCT
+#define REG_STRUCT pg_cntl_regs
+ pg_cntl_dcn35_regs_init();
+
+ pool->base.pg_cntl = pg_cntl35_create(ctx, &pg_cntl_regs, &pg_cntl_shift, &pg_cntl_mask);
+ if (pool->base.pg_cntl == NULL) {
+ dm_error("DC: failed to create power gate control!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* TODO: IRQ */
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dcn36_create(&init_data);
+ if (!pool->base.irqs)
+ goto create_fail;
+
+ /* HUBBUB */
+ pool->base.hubbub = dcn35_hubbub_create(ctx);
+ if (pool->base.hubbub == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create hubbub!\n");
+ goto create_fail;
+ }
+
+ /* HUBPs, DPPs, OPPs and TGs */
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.hubps[i] = dcn35_hubp_create(ctx, i);
+ if (pool->base.hubps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create hubps!\n");
+ goto create_fail;
+ }
+
+ pool->base.dpps[i] = dcn35_dpp_create(ctx, i);
+ if (pool->base.dpps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create dpps!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ pool->base.opps[i] = dcn35_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.timing_generators[i] = dcn35_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ }
+ pool->base.timing_generator_count = i;
+
+ /* PSR */
+ pool->base.psr = dmub_psr_create(ctx);
+ if (pool->base.psr == NULL) {
+ dm_error("DC: failed to create psr obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* Replay */
+ pool->base.replay = dmub_replay_create(ctx);
+ if (pool->base.replay == NULL) {
+ dm_error("DC: failed to create replay obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* ABM */
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.multiple_abms[i] = dmub_abm_create(ctx,
+ &abm_regs[i],
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.multiple_abms[i] == NULL) {
+ dm_error("DC: failed to create abm for pipe %d!\n", i);
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+
+ /* MPC and DSC */
+ pool->base.mpc = dcn35_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut);
+ if (pool->base.mpc == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mpc!\n");
+ goto create_fail;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ pool->base.dscs[i] = dcn35_dsc_create(ctx, i);
+ if (pool->base.dscs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create display stream compressor %d!\n", i);
+ goto create_fail;
+ }
+ }
+
+ /* DWB and MMHUBBUB */
+ if (!dcn35_dwbc_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create dwbc!\n");
+ goto create_fail;
+ }
+
+ if (!dcn35_mmhubbub_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mcif_wb!\n");
+ goto create_fail;
+ }
+
+ /* AUX and I2C */
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dcn31_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto create_fail;
+ }
+ pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create hw i2c!!\n");
+ goto create_fail;
+ }
+ pool->base.sw_i2cs[i] = NULL;
+ }
+
+ /* DCN3.5 has 6 DPIA */
+ pool->base.usb4_dpia_count = 4;
+ if (dc->debug.dpia_debug.bits.disable_dpia)
+ pool->base.usb4_dpia_count = 0;
+
+ /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto create_fail;
+
+ /* HW Sequencer and Plane caps */
+ dcn35_hw_sequencer_construct(dc);
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->cap_funcs = cap_funcs;
+
+ dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
+
+ dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
+ dc->dml2_options.use_native_pstate_optimization = true;
+ dc->dml2_options.use_native_soc_bb_construction = true;
+ dc->dml2_options.minimize_dispclk_using_odm = false;
+ if (dc->config.EnableMinDispClkODM)
+ dc->dml2_options.minimize_dispclk_using_odm = true;
+ dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
+
+ resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
+ dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
+
+ dc->dml2_options.max_segments_per_hubp = 24;
+ dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
+ dc->dml2_options.override_det_buffer_size_kbytes = true;
+
+ if (dc->config.sdpif_request_limit_words_per_umc == 0)
+ dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/
+
+ return true;
+
+create_fail:
+
+ dcn36_resource_destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dcn36_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc)
+{
+ struct dcn36_resource_pool *pool =
+ kzalloc(sizeof(struct dcn36_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dcn36_resource_construct(init_data->num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(pool);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.h
new file mode 100644
index 000000000000..5490c9975e23
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+#ifndef _DCN36_RESOURCE_H_
+#define _DCN36_RESOURCE_H_
+
+#include "core_types.h"
+
+extern struct _vcs_dpi_ip_params_st dcn3_6_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_6_soc;
+
+#define TO_DCN36_RES_POOL(pool)\
+ container_of(pool, struct dcn36_resource_pool, base)
+
+struct dcn36_resource_pool {
+ struct resource_pool base;
+};
+
+struct resource_pool *dcn36_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc);
+
+#define HWSEQ_DCN36_REG_LIST()\
+ SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
+ SR(DIO_MEM_PWR_CTRL), \
+ SR(ODM_MEM_PWR_CTRL3), \
+ SR(MMHUBBUB_MEM_PWR_CNTL), \
+ SR(DCCG_GATE_DISABLE_CNTL), \
+ SR(DCCG_GATE_DISABLE_CNTL2), \
+ SR(DCCG_GATE_DISABLE_CNTL4), \
+ SR(DCCG_GATE_DISABLE_CNTL5), \
+ SR(DCFCLK_CNTL),\
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
+ SRII(PIXEL_RATE_CNTL, OTG, 0), \
+ SRII(PIXEL_RATE_CNTL, OTG, 1),\
+ SRII(PIXEL_RATE_CNTL, OTG, 2),\
+ SRII(PIXEL_RATE_CNTL, OTG, 3),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\
+ SR(MICROSECOND_TIME_BASE_DIV), \
+ SR(MILLISECOND_TIME_BASE_DIV), \
+ SR(DISPCLK_FREQ_CHANGE_CNTL), \
+ SR(RBBMIF_TIMEOUT_DIS), \
+ SR(RBBMIF_TIMEOUT_DIS_2), \
+ SR(DCHUBBUB_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_CTRL), \
+ SR(MPC_CRC_CTRL), \
+ SR(DOMAIN0_PG_CONFIG), \
+ SR(DOMAIN1_PG_CONFIG), \
+ SR(DOMAIN2_PG_CONFIG), \
+ SR(DOMAIN3_PG_CONFIG), \
+ SR(DOMAIN16_PG_CONFIG), \
+ SR(DOMAIN17_PG_CONFIG), \
+ SR(DOMAIN18_PG_CONFIG), \
+ SR(DOMAIN19_PG_CONFIG), \
+ SR(DOMAIN0_PG_STATUS), \
+ SR(DOMAIN1_PG_STATUS), \
+ SR(DOMAIN2_PG_STATUS), \
+ SR(DOMAIN3_PG_STATUS), \
+ SR(DOMAIN16_PG_STATUS), \
+ SR(DOMAIN17_PG_STATUS), \
+ SR(DOMAIN18_PG_STATUS), \
+ SR(DOMAIN19_PG_STATUS), \
+ SR(DC_IP_REQUEST_CNTL), \
+ SR(AZALIA_AUDIO_DTO), \
+ SR(AZALIA_CONTROLLER_CLOCK_GATING), \
+ SR(HPO_TOP_HW_CONTROL),\
+ SR(DMU_CLK_CNTL)
+
+#endif /* _DCN36_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index c1ebc6b1c937..7436dfbdf927 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -76,9 +76,6 @@
#include "dml2/dml2_wrapper.h"
-#include "spl/dc_spl_scl_easf_filters.h"
-#include "spl/dc_spl_isharp_filters.h"
-
#define DC_LOGGER_INIT(logger)
enum dcn401_clk_src_array_id {
@@ -1669,12 +1666,13 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct link_encoder *link_enc = NULL;
+ struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
struct pixel_clk_params *pixel_clk_params = &pipe_ctx->stream_res.pix_clk_params;
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
- link_enc = link_enc_cfg_get_link_enc(link);
+ if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
+ link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc)
pixel_clk_params->encoder_object_id = link_enc->id;
@@ -1872,9 +1870,9 @@ static bool dcn401_resource_construct(
dc->caps.subvp_vertical_int_margin_us = 30;
dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin
- dc->caps.max_slave_planes = 2;
- dc->caps.max_slave_yuv_planes = 2;
- dc->caps.max_slave_rgb_planes = 2;
+ dc->caps.max_slave_planes = 3;
+ dc->caps.max_slave_yuv_planes = 3;
+ dc->caps.max_slave_rgb_planes = 3;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
@@ -1926,6 +1924,7 @@ static bool dcn401_resource_construct(
dc->config.dc_mode_clk_limit_support = true;
dc->config.enable_windowed_mpo_odm = true;
dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */
+
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -2189,8 +2188,6 @@ static bool dcn401_resource_construct(
dc->dml2_options.det_segment_size = DCN4_01_CRB_SEGMENT_SIZE_KB;
/* SPL */
- spl_init_easf_filter_coeffs();
- spl_init_blur_scale_coeffs();
dc->caps.scl_caps.sharpener_support = true;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 19568c359669..4c259745d519 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -538,7 +538,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \
SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst), \
- SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst)
+ SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst), \
+ SRI_ARR(INTERRUPT_DEST, OTG, inst)
/* HUBBUB */
#define HUBBUB_REG_LIST_DCN4_01_RI(id) \
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h
deleted file mode 100644
index 02a2d6725ed5..000000000000
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DC_SPL_H__
-#define __DC_SPL_H__
-
-#include "dc_spl_types.h"
-#define BLACK_OFFSET_RGB_Y 0x0
-#define BLACK_OFFSET_CBCR 0x8000
-
-/* SPL interfaces */
-
-bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out);
-
-bool spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out);
-
-#endif /* __DC_SPL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h
deleted file mode 100644
index 48202bc4f81e..000000000000
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DC_SPL_SCL_FILTERS_H__
-#define __DC_SPL_SCL_FILTERS_H__
-
-#include "dc_spl_types.h"
-
-const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_2tap_16p(void);
-const uint16_t *spl_get_filter_2tap_64p(void);
-const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
-
-#endif /* __DC_SPL_SCL_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/Makefile b/drivers/gpu/drm/amd/display/dc/sspl/Makefile
index 5edf3c6cf3e2..5e3e4aa13820 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/sspl/Makefile
@@ -25,7 +25,7 @@
SPL = dc_spl.o dc_spl_scl_filters.o dc_spl_scl_easf_filters.o dc_spl_isharp_filters.o dc_spl_filters.o spl_fixpt31_32.o spl_custom_float.o
-AMD_DAL_SPL = $(addprefix $(AMDDALPATH)/dc/spl/,$(SPL))
+AMD_DAL_SPL = $(addprefix $(AMDDALPATH)/dc/sspl/,$(SPL))
AMD_DISPLAY_FILES += $(AMD_DAL_SPL)
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index 38a9a0d68058..28348734d900 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -3,12 +3,11 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc_spl.h"
-#include "dc_spl_scl_filters.h"
#include "dc_spl_scl_easf_filters.h"
#include "dc_spl_isharp_filters.h"
#include "spl_debug.h"
-#define IDENTITY_RATIO(ratio) (spl_fixpt_u2d19(ratio) == (1 << 19))
+#define IDENTITY_RATIO(ratio) (spl_fixpt_u3d19(ratio) == (1 << 19))
#define MIN_VIEWPORT_SIZE 12
static bool spl_is_yuv420(enum spl_pixel_format format)
@@ -76,6 +75,21 @@ static struct spl_rect shift_rec(const struct spl_rect *rec_in, int x, int y)
return rec_out;
}
+static void spl_opp_adjust_rect(struct spl_rect *rec, const struct spl_opp_adjust *adjust)
+{
+ if ((rec->x + adjust->x) >= 0)
+ rec->x += adjust->x;
+
+ if ((rec->y + adjust->y) >= 0)
+ rec->y += adjust->y;
+
+ if ((rec->width + adjust->width) >= 1)
+ rec->width += adjust->width;
+
+ if ((rec->height + adjust->height) >= 1)
+ rec->height += adjust->height;
+}
+
static struct spl_rect calculate_plane_rec_in_timing_active(
struct spl_in *spl_in,
const struct spl_rect *rec_in)
@@ -723,13 +737,15 @@ static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
}
}
-static void spl_clamp_viewport(struct spl_rect *viewport)
+static void spl_clamp_viewport(struct spl_rect *viewport, int min_viewport_size)
{
+ if (min_viewport_size == 0)
+ min_viewport_size = MIN_VIEWPORT_SIZE;
/* Clamp minimum viewport size */
- if (viewport->height < MIN_VIEWPORT_SIZE)
- viewport->height = MIN_VIEWPORT_SIZE;
- if (viewport->width < MIN_VIEWPORT_SIZE)
- viewport->width = MIN_VIEWPORT_SIZE;
+ if (viewport->height < min_viewport_size)
+ viewport->height = min_viewport_size;
+ if (viewport->width < min_viewport_size)
+ viewport->width = min_viewport_size;
}
static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
@@ -767,25 +783,13 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
return SCL_MODE_SCALING_420_YCBCR_ENABLE;
}
-static bool spl_choose_lls_policy(enum spl_pixel_format format,
- enum spl_transfer_func_type tf_type,
- enum spl_transfer_func_predefined tf_predefined_type,
+static void spl_choose_lls_policy(enum spl_pixel_format format,
enum linear_light_scaling *lls_pref)
{
- if (spl_is_video_format(format)) {
+ if (spl_is_subsampled_format(format))
*lls_pref = LLS_PREF_NO;
- if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
- (tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS))
- return true;
- } else { /* RGB or YUV444 */
- if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
- (tf_type == SPL_TF_TYPE_BYPASS)) {
- *lls_pref = LLS_PREF_YES;
- return true;
- }
- }
- *lls_pref = LLS_PREF_NO;
- return false;
+ else /* RGB or YUV444 */
+ *lls_pref = LLS_PREF_YES;
}
/* Enable EASF ?*/
@@ -794,7 +798,6 @@ static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
int vratio = 0;
int hratio = 0;
bool skip_easf = false;
- bool lls_enable_easf = true;
if (spl_in->disable_easf)
skip_easf = true;
@@ -810,17 +813,13 @@ static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
skip_easf = true;
/*
- * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format and transfer
- * function to determine whether to use LINEAR or NONLINEAR scaling
+ * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format
+ * to determine whether to use LINEAR or NONLINEAR scaling
*/
if (spl_in->lls_pref == LLS_PREF_DONT_CARE)
- lls_enable_easf = spl_choose_lls_policy(spl_in->basic_in.format,
- spl_in->basic_in.tf_type, spl_in->basic_in.tf_predefined_type,
+ spl_choose_lls_policy(spl_in->basic_in.format,
&spl_in->lls_pref);
- if (!lls_enable_easf)
- skip_easf = true;
-
/* Check for linear scaling or EASF preferred */
if (spl_in->lls_pref != LLS_PREF_YES && !spl_in->prefer_easf)
skip_easf = true;
@@ -887,6 +886,8 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
static void spl_get_taps_non_adaptive_scaler(
struct spl_scratch *spl_scratch, const struct spl_taps *in_taps)
{
+ bool check_max_downscale = false;
+
if (in_taps->h_taps == 0) {
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1)
spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil(
@@ -926,6 +927,23 @@ static void spl_get_taps_non_adaptive_scaler(
else
spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c;
+
+ /*
+ * Max downscale supported is 6.0x. Add ASSERT to catch if go beyond that
+ */
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.horz,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.vert,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.horz_c,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.vert_c,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz))
spl_scratch->scl_data.taps.h_taps = 1;
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))
@@ -944,8 +962,8 @@ static bool spl_get_optimal_number_of_taps(
bool *enable_isharp)
{
int num_part_y, num_part_c;
- int max_taps_y, max_taps_c;
- int min_taps_y, min_taps_c;
+ unsigned int max_taps_y, max_taps_c;
+ unsigned int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
bool skip_easf = false;
bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format);
@@ -1007,12 +1025,18 @@ static bool spl_get_optimal_number_of_taps(
lb_config, &num_part_y, &num_part_c);
/* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 2)
- max_taps_y = num_part_y - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2);
+ if ((spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2) > num_part_y)
+ max_taps_y = 0;
+ else
+ max_taps_y = num_part_y - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2);
else
max_taps_y = num_part_y;
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 2)
- max_taps_c = num_part_c - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) - 2);
+ if ((spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) - 2) > num_part_c)
+ max_taps_c = 0;
+ else
+ max_taps_c = num_part_c - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) - 2);
else
max_taps_c = num_part_c;
@@ -1781,6 +1805,8 @@ static bool spl_calculate_number_of_taps(struct spl_in *spl_in, struct spl_scrat
spl_calculate_recout(spl_in, spl_scratch, spl_out);
/* depends on pixel format */
spl_calculate_scaling_ratios(spl_in, spl_scratch, spl_out);
+ /* Adjust recout for opp if needed */
+ spl_opp_adjust_rect(&spl_scratch->scl_data.recout, &spl_in->basic_in.opp_recout_adjust);
/* depends on scaling ratios and recout, does not calculate offset yet */
spl_calculate_viewport_size(spl_in, spl_scratch);
@@ -1792,7 +1818,7 @@ static bool spl_calculate_number_of_taps(struct spl_in *spl_in, struct spl_scrat
}
/* Calculate scaler parameters */
-bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
+bool SPL_NAMESPACE(spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out))
{
bool res = false;
bool enable_easf_v = false;
@@ -1817,7 +1843,7 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
// Handle 3d recout
spl_handle_3d_recout(spl_in, &spl_scratch.scl_data.recout);
// Clamp
- spl_clamp_viewport(&spl_scratch.scl_data.viewport);
+ spl_clamp_viewport(&spl_scratch.scl_data.viewport, spl_in->min_viewport_size);
// Save all calculated parameters in dscl_prog_data structure to program hw registers
spl_set_dscl_prog_data(spl_in, &spl_scratch, spl_out, enable_easf_v, enable_easf_h, enable_isharp);
@@ -1857,7 +1883,7 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
}
/* External interface to get number of taps only */
-bool spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out)
+bool SPL_NAMESPACE(spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out))
{
bool res = false;
bool enable_easf_v = false;
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h
new file mode 100644
index 000000000000..d621c42a237e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#ifndef __DC_SPL_H__
+#define __DC_SPL_H__
+
+#include "dc_spl_types.h"
+#define BLACK_OFFSET_RGB_Y 0x0
+#define BLACK_OFFSET_CBCR 0x8000
+
+#ifndef SPL_PFX_
+#define SPL_PFX_
+#endif
+
+#define SPL_EXPAND2(a, b) a##b
+#define SPL_EXPAND(a, b) SPL_EXPAND2(a, b)
+#define SPL_NAMESPACE(symbol) SPL_EXPAND(SPL_PFX_, symbol)
+
+
+/* SPL interfaces */
+
+bool SPL_NAMESPACE(spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out));
+
+bool SPL_NAMESPACE(spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out));
+
+#endif /* __DC_SPL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.c
index 99238644e0a1..99238644e0a1 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.c
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.h
index 20439cdbdb10..20439cdbdb10 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.h
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c
index e0572252c640..12acdd34e6a6 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c
@@ -11,232 +11,6 @@
// LUT content is packed as 4-bytes into one DWORD/entry
// A_start = 0.000000
// A_end = 10.000000
-// A_gain = 2.000000
-// B_start = 11.000000
-// B_end = 86.000000
-// C_start = 40.000000
-// C_end = 64.000000
-//========================================
-static const uint32_t filter_isharp_1D_lut_0[ISHARP_LUT_TABLE_SIZE] = {
-0x02010000,
-0x0A070503,
-0x1614100D,
-0x1C1B1918,
-0x22211F1E,
-0x27262423,
-0x2A2A2928,
-0x2D2D2C2B,
-0x302F2F2E,
-0x31313030,
-0x31313131,
-0x31313131,
-0x30303031,
-0x292D2F2F,
-0x191D2125,
-0x050A0F14,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-0x00000000,
-};
-//========================================
-// Delta Gain 1DLUT
-// LUT content is packed as 4-bytes into one DWORD/entry
-// A_start = 0.000000
-// A_end = 10.000000
-// A_gain = 0.500000
-// B_start = 11.000000
-// B_end = 127.000000
-// C_start = 96.000000
-// C_end = 127.000000
-//========================================
-
-static const uint32_t filter_isharp_1D_lut_0p5x[ISHARP_LUT_TABLE_SIZE] = {
-0x00000000,
-0x02020101,
-0x06050403,
-0x07070606,
-0x09080808,
-0x0A0A0A09,
-0x0C0B0B0B,
-0x0D0D0C0C,
-0x0E0E0D0D,
-0x0F0F0E0E,
-0x100F0F0F,
-0x10101010,
-0x11111010,
-0x11111111,
-0x11111111,
-0x11111111,
-0x11111111,
-0x11111111,
-0x11111111,
-0x10101111,
-0x10101010,
-0x0F0F0F10,
-0x0E0E0F0F,
-0x0D0D0E0E,
-0x0C0C0D0D,
-0x0B0B0B0C,
-0x090A0A0A,
-0x08080809,
-0x06060707,
-0x04050506,
-0x02030304,
-0x00010102,
-};
-//========================================
-// Delta Gain 1DLUT
-// LUT content is packed as 4-bytes into one DWORD/entry
-// A_start = 0.000000
-// A_end = 10.000000
-// A_gain = 1.000000
-// B_start = 11.000000
-// B_end = 127.000000
-// C_start = 96.000000
-// C_end = 127.000000
-//========================================
-static const uint32_t filter_isharp_1D_lut_1p0x[ISHARP_LUT_TABLE_SIZE] = {
-0x01000000,
-0x05040302,
-0x0B0A0806,
-0x0E0E0D0C,
-0x1211100F,
-0x15141312,
-0x17171615,
-0x1A191918,
-0x1C1B1B1A,
-0x1E1D1D1C,
-0x1F1F1E1E,
-0x2020201F,
-0x21212121,
-0x22222222,
-0x23232222,
-0x23232323,
-0x23232323,
-0x22222323,
-0x22222222,
-0x21212121,
-0x1F202020,
-0x1E1E1F1F,
-0x1C1D1D1E,
-0x1A1B1B1C,
-0x1819191A,
-0x15161717,
-0x12131415,
-0x0F101112,
-0x0C0D0E0E,
-0x08090A0B,
-0x04050607,
-0x00010203,
-};
-//========================================
-// Delta Gain 1DLUT
-// LUT content is packed as 4-bytes into one DWORD/entry
-// A_start = 0.000000
-// A_end = 10.000000
-// A_gain = 1.500000
-// B_start = 11.000000
-// B_end = 127.000000
-// C_start = 96.000000
-// C_end = 127.000000
-//========================================
-static const uint32_t filter_isharp_1D_lut_1p5x[ISHARP_LUT_TABLE_SIZE] = {
-0x01010000,
-0x07050402,
-0x110F0C0A,
-0x16141312,
-0x1B191817,
-0x1F1E1D1C,
-0x23222120,
-0x26262524,
-0x2A292827,
-0x2C2C2B2A,
-0x2F2E2E2D,
-0x3130302F,
-0x32323131,
-0x33333332,
-0x34343433,
-0x34343434,
-0x34343434,
-0x33343434,
-0x32333333,
-0x31313232,
-0x2F303031,
-0x2D2E2E2F,
-0x2A2B2C2C,
-0x2728292A,
-0x24252626,
-0x20212223,
-0x1C1D1E1F,
-0x1718191B,
-0x12131416,
-0x0C0E0F10,
-0x0608090B,
-0x00020305
-};
-//========================================
-// Delta Gain 1DLUT
-// LUT content is packed as 4-bytes into one DWORD/entry
-// A_start = 0.000000
-// A_end = 10.000000
-// A_gain = 2.000000
-// B_start = 11.000000
-// B_end = 127.000000
-// C_start = 40.000000
-// C_end = 127.000000
-//========================================
-static const uint32_t filter_isharp_1D_lut_2p0x[ISHARP_LUT_TABLE_SIZE] = {
-0x02010000,
-0x0A070503,
-0x1614100D,
-0x1D1B1A18,
-0x2322201F,
-0x29282625,
-0x2F2D2C2B,
-0x33323130,
-0x38373534,
-0x3B3A3938,
-0x3E3E3D3C,
-0x4140403F,
-0x43424241,
-0x44444443,
-0x45454545,
-0x46454545,
-0x45454546,
-0x45454545,
-0x43444444,
-0x41424243,
-0x3F404041,
-0x3C3D3E3E,
-0x38393A3B,
-0x34353738,
-0x30313233,
-0x2B2C2D2F,
-0x25262829,
-0x1F202223,
-0x181A1B1D,
-0x10121416,
-0x080B0D0E,
-0x00020406,
-};
-//========================================
-// Delta Gain 1DLUT
-// LUT content is packed as 4-bytes into one DWORD/entry
-// A_start = 0.000000
-// A_end = 10.000000
// A_gain = 3.000000
// B_start = 11.000000
// B_end = 127.000000
@@ -278,52 +52,6 @@ static const uint32_t filter_isharp_1D_lut_3p0x[ISHARP_LUT_TABLE_SIZE] = {
0x0003060A,
};
-//========================================
-// Wide scaler coefficients
-//========================================================
-// <using> gen_scaler_coeffs.m
-// <date> 15-Dec-2021
-// <coeffDescrip> 6t_64p_LanczosEd_p_1_p_10qb_
-// <num_taps> 6
-// <num_phases> 64
-// <CoefType> LanczosEd
-// <CoefQuant> S1.10
-//========================================================
-static const uint16_t filter_isharp_wide_6tap_64p[198] = {
-0x0000, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000,
-0x0003, 0x0FF3, 0x0400, 0x000D, 0x0FFD, 0x0000,
-0x0006, 0x0FE7, 0x03FE, 0x001C, 0x0FF9, 0x0000,
-0x0009, 0x0FDB, 0x03FC, 0x002B, 0x0FF5, 0x0000,
-0x000C, 0x0FD0, 0x03F9, 0x003A, 0x0FF1, 0x0000,
-0x000E, 0x0FC5, 0x03F5, 0x004A, 0x0FED, 0x0001,
-0x0011, 0x0FBB, 0x03F0, 0x005A, 0x0FE9, 0x0001,
-0x0013, 0x0FB2, 0x03EB, 0x006A, 0x0FE5, 0x0001,
-0x0015, 0x0FA9, 0x03E4, 0x007B, 0x0FE1, 0x0002,
-0x0017, 0x0FA1, 0x03DD, 0x008D, 0x0FDC, 0x0002,
-0x0018, 0x0F99, 0x03D4, 0x00A0, 0x0FD8, 0x0003,
-0x001A, 0x0F92, 0x03CB, 0x00B2, 0x0FD3, 0x0004,
-0x001B, 0x0F8C, 0x03C1, 0x00C6, 0x0FCE, 0x0004,
-0x001C, 0x0F86, 0x03B7, 0x00D9, 0x0FC9, 0x0005,
-0x001D, 0x0F80, 0x03AB, 0x00EE, 0x0FC4, 0x0006,
-0x001E, 0x0F7C, 0x039F, 0x0101, 0x0FBF, 0x0007,
-0x001F, 0x0F78, 0x0392, 0x0115, 0x0FBA, 0x0008,
-0x001F, 0x0F74, 0x0385, 0x012B, 0x0FB5, 0x0008,
-0x0020, 0x0F71, 0x0376, 0x0140, 0x0FB0, 0x0009,
-0x0020, 0x0F6E, 0x0367, 0x0155, 0x0FAB, 0x000B,
-0x0020, 0x0F6C, 0x0357, 0x016B, 0x0FA6, 0x000C,
-0x0020, 0x0F6A, 0x0347, 0x0180, 0x0FA2, 0x000D,
-0x0020, 0x0F69, 0x0336, 0x0196, 0x0F9D, 0x000E,
-0x0020, 0x0F69, 0x0325, 0x01AB, 0x0F98, 0x000F,
-0x001F, 0x0F68, 0x0313, 0x01C3, 0x0F93, 0x0010,
-0x001F, 0x0F69, 0x0300, 0x01D8, 0x0F8F, 0x0011,
-0x001E, 0x0F69, 0x02ED, 0x01EF, 0x0F8B, 0x0012,
-0x001D, 0x0F6A, 0x02D9, 0x0205, 0x0F87, 0x0014,
-0x001D, 0x0F6C, 0x02C5, 0x021A, 0x0F83, 0x0015,
-0x001C, 0x0F6E, 0x02B1, 0x0230, 0x0F7F, 0x0016,
-0x001B, 0x0F70, 0x029C, 0x0247, 0x0F7B, 0x0017,
-0x001A, 0x0F72, 0x0287, 0x025D, 0x0F78, 0x0018,
-0x0019, 0x0F75, 0x0272, 0x0272, 0x0F75, 0x0019
-};
// Blur and scale coefficients
//========================================================
// <using> gen_BlurScale_coeffs.m
@@ -456,9 +184,113 @@ static const uint16_t filter_isharp_bs_3tap_64p[99] = {
};
/* Converted Blur & Scale coeff tables from S1.10 to S1.12 */
-static uint16_t filter_isharp_bs_4tap_in_6_64p_s1_12[198];
-static uint16_t filter_isharp_bs_4tap_64p_s1_12[132];
-static uint16_t filter_isharp_bs_3tap_64p_s1_12[99];
+static const uint16_t filter_isharp_bs_4tap_in_6_64p_s1_12[198] = {
+0x0000, 0x0394, 0x08dc, 0x0390, 0x0000, 0x0000,
+0x0000, 0x0378, 0x08dc, 0x03ac, 0x0000, 0x0000,
+0x0000, 0x035c, 0x08d8, 0x03c8, 0x0004, 0x0000,
+0x0000, 0x0340, 0x08d4, 0x03e8, 0x0004, 0x0000,
+0x0000, 0x0324, 0x08d0, 0x0404, 0x0008, 0x0000,
+0x0000, 0x0308, 0x08cc, 0x0420, 0x000c, 0x0000,
+0x0000, 0x02ec, 0x08c8, 0x0440, 0x000c, 0x0000,
+0x0000, 0x02d4, 0x08c0, 0x045c, 0x0010, 0x0000,
+0x0000, 0x02b8, 0x08b8, 0x047c, 0x0014, 0x0000,
+0x0000, 0x02a0, 0x08b0, 0x0498, 0x0018, 0x0000,
+0x0000, 0x0288, 0x08a8, 0x04b4, 0x001c, 0x0000,
+0x0000, 0x0270, 0x08a0, 0x04d0, 0x0020, 0x0000,
+0x0000, 0x0258, 0x0894, 0x04f0, 0x0024, 0x0000,
+0x0000, 0x0240, 0x0888, 0x050c, 0x002c, 0x0000,
+0x0000, 0x0228, 0x087c, 0x052c, 0x0030, 0x0000,
+0x0000, 0x0214, 0x0870, 0x0544, 0x0038, 0x0000,
+0x0000, 0x01fc, 0x0860, 0x0568, 0x003c, 0x0000,
+0x0000, 0x01e8, 0x0854, 0x0580, 0x0044, 0x0000,
+0x0000, 0x01d0, 0x0844, 0x05a0, 0x004c, 0x0000,
+0x0000, 0x01bc, 0x0834, 0x05bc, 0x0054, 0x0000,
+0x0000, 0x01a8, 0x0824, 0x05d8, 0x005c, 0x0000,
+0x0000, 0x0194, 0x0810, 0x05f8, 0x0064, 0x0000,
+0x0000, 0x0180, 0x0800, 0x0614, 0x006c, 0x0000,
+0x0000, 0x0170, 0x07ec, 0x0630, 0x0074, 0x0000,
+0x0000, 0x015c, 0x07d8, 0x064c, 0x0080, 0x0000,
+0x0000, 0x014c, 0x07c4, 0x0668, 0x0088, 0x0000,
+0x0000, 0x0138, 0x07b0, 0x0684, 0x0094, 0x0000,
+0x0000, 0x0128, 0x0798, 0x06a0, 0x00a0, 0x0000,
+0x0000, 0x0118, 0x0784, 0x06bc, 0x00a8, 0x0000,
+0x0000, 0x0108, 0x076c, 0x06d8, 0x00b4, 0x0000,
+0x0000, 0x00fc, 0x0754, 0x06ec, 0x00c4, 0x0000,
+0x0000, 0x00ec, 0x073c, 0x0708, 0x00d0, 0x0000,
+0x0000, 0x00dc, 0x0724, 0x0724, 0x00dc, 0x0000,
+};
+
+static const uint16_t filter_isharp_bs_4tap_64p_s1_12[132] = {
+0x0394, 0x08dc, 0x0390, 0x0000,
+0x0378, 0x08dc, 0x03ac, 0x0000,
+0x035c, 0x08d8, 0x03c8, 0x0004,
+0x0340, 0x08d4, 0x03e8, 0x0004,
+0x0324, 0x08d0, 0x0404, 0x0008,
+0x0308, 0x08cc, 0x0420, 0x000c,
+0x02ec, 0x08c8, 0x0440, 0x000c,
+0x02d4, 0x08c0, 0x045c, 0x0010,
+0x02b8, 0x08b8, 0x047c, 0x0014,
+0x02a0, 0x08b0, 0x0498, 0x0018,
+0x0288, 0x08a8, 0x04b4, 0x001c,
+0x0270, 0x08a0, 0x04d0, 0x0020,
+0x0258, 0x0894, 0x04f0, 0x0024,
+0x0240, 0x0888, 0x050c, 0x002c,
+0x0228, 0x087c, 0x052c, 0x0030,
+0x0214, 0x0870, 0x0544, 0x0038,
+0x01fc, 0x0860, 0x0568, 0x003c,
+0x01e8, 0x0854, 0x0580, 0x0044,
+0x01d0, 0x0844, 0x05a0, 0x004c,
+0x01bc, 0x0834, 0x05bc, 0x0054,
+0x01a8, 0x0824, 0x05d8, 0x005c,
+0x0194, 0x0810, 0x05f8, 0x0064,
+0x0180, 0x0800, 0x0614, 0x006c,
+0x0170, 0x07ec, 0x0630, 0x0074,
+0x015c, 0x07d8, 0x064c, 0x0080,
+0x014c, 0x07c4, 0x0668, 0x0088,
+0x0138, 0x07b0, 0x0684, 0x0094,
+0x0128, 0x0798, 0x06a0, 0x00a0,
+0x0118, 0x0784, 0x06bc, 0x00a8,
+0x0108, 0x076c, 0x06d8, 0x00b4,
+0x00fc, 0x0754, 0x06ec, 0x00c4,
+0x00ec, 0x073c, 0x0708, 0x00d0,
+0x00dc, 0x0724, 0x0724, 0x00dc,
+};
+
+static const uint16_t filter_isharp_bs_3tap_64p_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07d8, 0x0818, 0x0010,
+0x07b0, 0x082c, 0x0024,
+0x0788, 0x0844, 0x0034,
+0x0760, 0x0858, 0x0048,
+0x0738, 0x0870, 0x0058,
+0x0710, 0x0884, 0x006c,
+0x06e8, 0x0898, 0x0080,
+0x06c0, 0x08a8, 0x0098,
+0x0698, 0x08bc, 0x00ac,
+0x0670, 0x08cc, 0x00c4,
+0x0648, 0x08e0, 0x00d8,
+0x0620, 0x08f0, 0x00f0,
+0x05f8, 0x0900, 0x0108,
+0x05d0, 0x0910, 0x0120,
+0x05a8, 0x0920, 0x0138,
+0x0584, 0x0928, 0x0154,
+0x055c, 0x0938, 0x016c,
+0x0534, 0x0944, 0x0188,
+0x0510, 0x094c, 0x01a4,
+0x04e8, 0x0958, 0x01c0,
+0x04c4, 0x0960, 0x01dc,
+0x049c, 0x096c, 0x01f8,
+0x0478, 0x0970, 0x0218,
+0x0454, 0x0978, 0x0234,
+0x042c, 0x0980, 0x0254,
+0x0408, 0x0988, 0x0270,
+0x03e4, 0x098c, 0x0290,
+0x03c0, 0x0990, 0x02b0,
+0x039c, 0x0994, 0x02d0,
+0x037c, 0x0990, 0x02f4,
+0x0358, 0x0994, 0x0314,
+0x0334, 0x0998, 0x0334,
+};
/* Pre-generated 1DLUT for given setup and sharpness level */
struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = {
@@ -509,47 +341,6 @@ struct scale_ratio_to_sharpness_level_adj sharpness_level_adj[NUM_SHARPNESS_ADJ_
{1, 1, 5},
};
-const uint32_t *spl_get_filter_isharp_1D_lut_0(void)
-{
- return filter_isharp_1D_lut_0;
-}
-const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void)
-{
- return filter_isharp_1D_lut_0p5x;
-}
-const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void)
-{
- return filter_isharp_1D_lut_1p0x;
-}
-const uint32_t *spl_get_filter_isharp_1D_lut_1p5x(void)
-{
- return filter_isharp_1D_lut_1p5x;
-}
-const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void)
-{
- return filter_isharp_1D_lut_2p0x;
-}
-const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void)
-{
- return filter_isharp_1D_lut_3p0x;
-}
-const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void)
-{
- return filter_isharp_wide_6tap_64p;
-}
-uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void)
-{
- return filter_isharp_bs_4tap_in_6_64p_s1_12;
-}
-uint16_t *spl_get_filter_isharp_bs_4tap_64p(void)
-{
- return filter_isharp_bs_4tap_64p_s1_12;
-}
-uint16_t *spl_get_filter_isharp_bs_3tap_64p(void)
-{
- return filter_isharp_bs_3tap_64p_s1_12;
-}
-
static unsigned int spl_calculate_sharpness_level_adj(struct spl_fixed31_32 ratio)
{
int j;
@@ -589,7 +380,7 @@ static unsigned int spl_calculate_sharpness_level_adj(struct spl_fixed31_32 rati
}
static unsigned int spl_calculate_sharpness_level(struct spl_fixed31_32 ratio,
- int discrete_sharpness_level, enum system_setup setup,
+ unsigned int discrete_sharpness_level, enum system_setup setup,
struct spl_sharpness_range sharpness_range,
enum scale_to_sharpness_policy scale_to_sharpness_policy)
{
@@ -720,24 +511,29 @@ uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup)
return filter_isharp_1D_lut_pregen[setup].value;
}
-void spl_init_blur_scale_coeffs(void)
+const uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps)
{
- convert_filter_s1_10_to_s1_12(filter_isharp_bs_3tap_64p,
- filter_isharp_bs_3tap_64p_s1_12, 3);
- convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_64p,
- filter_isharp_bs_4tap_64p_s1_12, 4);
- convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_in_6_64p,
- filter_isharp_bs_4tap_in_6_64p_s1_12, 6);
+ if (taps == 3)
+ return filter_isharp_bs_3tap_64p_s1_12;
+ else if (taps == 4)
+ return filter_isharp_bs_4tap_64p_s1_12;
+ else if (taps == 6)
+ return filter_isharp_bs_4tap_in_6_64p_s1_12;
+ else {
+ /* should never happen, bug */
+ SPL_BREAK_TO_DEBUGGER();
+ return NULL;
+ }
}
-uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps)
+const uint16_t *spl_dscl_get_blur_scale_coeffs_64p_s1_10(int taps)
{
if (taps == 3)
- return spl_get_filter_isharp_bs_3tap_64p();
+ return filter_isharp_bs_3tap_64p;
else if (taps == 4)
- return spl_get_filter_isharp_bs_4tap_64p();
+ return filter_isharp_bs_4tap_64p;
else if (taps == 6)
- return spl_get_filter_isharp_bs_4tap_in_6_64p();
+ return filter_isharp_bs_4tap_in_6_64p;
else {
/* should never happen, bug */
SPL_BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.h
index 89af91e19b6c..f5e3d3ecc913 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.h
@@ -7,18 +7,6 @@
#include "dc_spl_types.h"
-const uint32_t *spl_get_filter_isharp_1D_lut_0(void);
-const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void);
-const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void);
-const uint32_t *spl_get_filter_isharp_1D_lut_1p5x(void);
-const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void);
-const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void);
-uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void);
-uint16_t *spl_get_filter_isharp_bs_4tap_64p(void);
-uint16_t *spl_get_filter_isharp_bs_3tap_64p(void);
-const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void);
-uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps);
-
#define NUM_SHARPNESS_ADJ_LEVELS 6
struct scale_ratio_to_sharpness_level_adj {
unsigned int ratio_numer;
@@ -40,11 +28,15 @@ enum system_setup {
NUM_SHARPNESS_SETUPS
};
-void spl_init_blur_scale_coeffs(void);
void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
const struct spl_scaler_data *data);
void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup,
struct adaptive_sharpness sharpness, enum scale_to_sharpness_policy scale_to_sharpness_policy);
uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup);
+
+// public API
+const uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps);
+const uint16_t *spl_dscl_get_blur_scale_coeffs_64p_s1_10(int taps);
+
#endif /* __DC_SPL_ISHARP_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.c
index 09bf82f7d468..0d1bd81ff04a 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.c
@@ -1136,32 +1136,871 @@ static const uint16_t easf_filter_6tap_64p_ratio_1_00[198] = {
};
/* Converted scaler coeff tables from S1.10 to S1.12 */
-static uint16_t easf_filter_3tap_64p_ratio_0_30_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_0_40_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_0_50_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_0_60_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_0_70_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_0_80_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_0_90_s1_12[99];
-static uint16_t easf_filter_3tap_64p_ratio_1_00_s1_12[99];
-static uint16_t easf_filter_4tap_64p_ratio_0_30_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_0_40_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_0_50_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_0_60_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_0_70_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_0_80_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_0_90_s1_12[132];
-static uint16_t easf_filter_4tap_64p_ratio_1_00_s1_12[132];
-static uint16_t easf_filter_6tap_64p_ratio_0_30_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_0_40_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_0_50_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_0_60_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_0_70_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_0_80_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_0_90_s1_12[198];
-static uint16_t easf_filter_6tap_64p_ratio_1_00_s1_12[198];
-
-struct scale_ratio_to_reg_value_lookup easf_v_bf3_mode_lookup[] = {
+static const uint16_t easf_filter_3tap_64p_ratio_0_30_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07d8, 0x0818, 0x0010,
+0x07b0, 0x082c, 0x0024,
+0x0788, 0x0844, 0x0034,
+0x0760, 0x0858, 0x0048,
+0x0738, 0x0870, 0x0058,
+0x0710, 0x0884, 0x006c,
+0x06e8, 0x0898, 0x0080,
+0x06c0, 0x08a8, 0x0098,
+0x0698, 0x08bc, 0x00ac,
+0x0670, 0x08cc, 0x00c4,
+0x0648, 0x08e0, 0x00d8,
+0x0620, 0x08f0, 0x00f0,
+0x05f8, 0x0900, 0x0108,
+0x05d0, 0x0910, 0x0120,
+0x05a8, 0x0920, 0x0138,
+0x0584, 0x0928, 0x0154,
+0x055c, 0x0938, 0x016c,
+0x0534, 0x0944, 0x0188,
+0x0510, 0x094c, 0x01a4,
+0x04e8, 0x0958, 0x01c0,
+0x04c4, 0x0960, 0x01dc,
+0x049c, 0x096c, 0x01f8,
+0x0478, 0x0970, 0x0218,
+0x0454, 0x0978, 0x0234,
+0x042c, 0x0980, 0x0254,
+0x0408, 0x0988, 0x0270,
+0x03e4, 0x098c, 0x0290,
+0x03c0, 0x0990, 0x02b0,
+0x039c, 0x0994, 0x02d0,
+0x037c, 0x0990, 0x02f4,
+0x0358, 0x0994, 0x0314,
+0x0334, 0x0998, 0x0334,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_0_40_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07d8, 0x0818, 0x0010,
+0x07ac, 0x0838, 0x001c,
+0x0784, 0x0850, 0x002c,
+0x075c, 0x0868, 0x003c,
+0x0734, 0x0880, 0x004c,
+0x0708, 0x0898, 0x0060,
+0x06e0, 0x08b0, 0x0070,
+0x06b8, 0x08c4, 0x0084,
+0x068c, 0x08dc, 0x0098,
+0x0664, 0x08f0, 0x00ac,
+0x063c, 0x0900, 0x00c4,
+0x0614, 0x0914, 0x00d8,
+0x05e8, 0x0928, 0x00f0,
+0x05c0, 0x093c, 0x0104,
+0x0598, 0x094c, 0x011c,
+0x0570, 0x095c, 0x0134,
+0x0548, 0x0968, 0x0150,
+0x0520, 0x0978, 0x0168,
+0x04f8, 0x0984, 0x0184,
+0x04d0, 0x0990, 0x01a0,
+0x04ac, 0x0998, 0x01bc,
+0x0484, 0x09a4, 0x01d8,
+0x045c, 0x09b0, 0x01f4,
+0x0438, 0x09b8, 0x0210,
+0x0410, 0x09c0, 0x0230,
+0x03ec, 0x09c4, 0x0250,
+0x03c8, 0x09c8, 0x0270,
+0x03a4, 0x09cc, 0x0290,
+0x0380, 0x09d0, 0x02b0,
+0x035c, 0x09d4, 0x02d0,
+0x0338, 0x09d4, 0x02f4,
+0x0314, 0x09d8, 0x0314,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_0_50_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07d4, 0x0824, 0x0008,
+0x07a8, 0x0844, 0x0014,
+0x077c, 0x0868, 0x001c,
+0x0750, 0x0888, 0x0028,
+0x0724, 0x08a8, 0x0034,
+0x06f8, 0x08c8, 0x0040,
+0x06cc, 0x08e4, 0x0050,
+0x06a0, 0x0904, 0x005c,
+0x0674, 0x0920, 0x006c,
+0x0648, 0x093c, 0x007c,
+0x061c, 0x0954, 0x0090,
+0x05f0, 0x0970, 0x00a0,
+0x05c4, 0x0988, 0x00b4,
+0x0598, 0x09a0, 0x00c8,
+0x056c, 0x09b8, 0x00dc,
+0x0540, 0x09cc, 0x00f4,
+0x0518, 0x09e0, 0x0108,
+0x04ec, 0x09f4, 0x0120,
+0x04c0, 0x0a08, 0x0138,
+0x0498, 0x0a18, 0x0150,
+0x046c, 0x0a28, 0x016c,
+0x0444, 0x0a34, 0x0188,
+0x041c, 0x0a40, 0x01a4,
+0x03f4, 0x0a4c, 0x01c0,
+0x03cc, 0x0a58, 0x01dc,
+0x03a4, 0x0a60, 0x01fc,
+0x037c, 0x0a68, 0x021c,
+0x0354, 0x0a70, 0x023c,
+0x0330, 0x0a74, 0x025c,
+0x030c, 0x0a78, 0x027c,
+0x02e8, 0x0a78, 0x02a0,
+0x02c4, 0x0a78, 0x02c4,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_0_60_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07d0, 0x082c, 0x0004,
+0x07a0, 0x0858, 0x0008,
+0x0770, 0x0884, 0x000c,
+0x0740, 0x08ac, 0x0014,
+0x0710, 0x08d4, 0x001c,
+0x06e0, 0x0900, 0x0020,
+0x06b0, 0x0924, 0x002c,
+0x0680, 0x094c, 0x0034,
+0x0650, 0x0970, 0x0040,
+0x0620, 0x0994, 0x004c,
+0x05f0, 0x09b8, 0x0058,
+0x05c0, 0x09dc, 0x0064,
+0x0590, 0x09fc, 0x0074,
+0x0560, 0x0a1c, 0x0084,
+0x0530, 0x0a3c, 0x0094,
+0x0500, 0x0a5c, 0x00a4,
+0x04d4, 0x0a74, 0x00b8,
+0x04a4, 0x0a90, 0x00cc,
+0x0474, 0x0aac, 0x00e0,
+0x0448, 0x0ac0, 0x00f8,
+0x041c, 0x0ad4, 0x0110,
+0x03f0, 0x0ae8, 0x0128,
+0x03c4, 0x0afc, 0x0140,
+0x0398, 0x0b0c, 0x015c,
+0x036c, 0x0b1c, 0x0178,
+0x0344, 0x0b28, 0x0194,
+0x031c, 0x0b30, 0x01b4,
+0x02f4, 0x0b38, 0x01d4,
+0x02cc, 0x0b40, 0x01f4,
+0x02a4, 0x0b48, 0x0214,
+0x0280, 0x0b48, 0x0238,
+0x025c, 0x0b48, 0x025c,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_0_70_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07cc, 0x0834, 0x0000,
+0x0794, 0x086c, 0x0000,
+0x0760, 0x08a0, 0x0000,
+0x072c, 0x08d4, 0x0000,
+0x06f4, 0x090c, 0x0000,
+0x06c0, 0x093c, 0x0004,
+0x0688, 0x0970, 0x0008,
+0x0654, 0x09a0, 0x000c,
+0x061c, 0x09d4, 0x0010,
+0x05e8, 0x0a00, 0x0018,
+0x05b4, 0x0a30, 0x001c,
+0x057c, 0x0a60, 0x0024,
+0x0548, 0x0a88, 0x0030,
+0x0514, 0x0ab4, 0x0038,
+0x04e0, 0x0adc, 0x0044,
+0x04ac, 0x0b00, 0x0054,
+0x0478, 0x0b28, 0x0060,
+0x0444, 0x0b4c, 0x0070,
+0x0414, 0x0b6c, 0x0080,
+0x03e0, 0x0b8c, 0x0094,
+0x03b0, 0x0ba8, 0x00a8,
+0x0380, 0x0bc4, 0x00bc,
+0x0354, 0x0bd8, 0x00d4,
+0x0324, 0x0bf0, 0x00ec,
+0x02f8, 0x0c04, 0x0104,
+0x02cc, 0x0c14, 0x0120,
+0x02a0, 0x0c24, 0x013c,
+0x0278, 0x0c30, 0x0158,
+0x0250, 0x0c38, 0x0178,
+0x0228, 0x0c40, 0x0198,
+0x0204, 0x0c40, 0x01bc,
+0x01dc, 0x0c48, 0x01dc,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_0_80_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07c4, 0x0840, 0x3ffc,
+0x0788, 0x0880, 0x3ff8,
+0x0748, 0x08c8, 0x3ff0,
+0x070c, 0x0904, 0x3ff0,
+0x06d0, 0x0944, 0x3fec,
+0x0690, 0x0988, 0x3fe8,
+0x0654, 0x09c4, 0x3fe8,
+0x0618, 0x0a04, 0x3fe4,
+0x05d8, 0x0a44, 0x3fe4,
+0x059c, 0x0a80, 0x3fe4,
+0x0560, 0x0ab8, 0x3fe8,
+0x0524, 0x0af4, 0x3fe8,
+0x04e8, 0x0b2c, 0x3fec,
+0x04b0, 0x0b5c, 0x3ff4,
+0x0474, 0x0b94, 0x3ff8,
+0x043c, 0x0bc4, 0x0000,
+0x0404, 0x0bf4, 0x0008,
+0x03cc, 0x0c20, 0x0014,
+0x0394, 0x0c4c, 0x0020,
+0x0360, 0x0c74, 0x002c,
+0x032c, 0x0c98, 0x003c,
+0x02f8, 0x0cbc, 0x004c,
+0x02c8, 0x0cdc, 0x005c,
+0x0298, 0x0cf8, 0x0070,
+0x0268, 0x0d14, 0x0084,
+0x023c, 0x0d28, 0x009c,
+0x0210, 0x0d3c, 0x00b4,
+0x01e4, 0x0d4c, 0x00d0,
+0x01bc, 0x0d58, 0x00ec,
+0x0194, 0x0d60, 0x010c,
+0x0170, 0x0d64, 0x012c,
+0x014c, 0x0d68, 0x014c,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_0_90_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07b8, 0x0850, 0x3ff8,
+0x0770, 0x08a0, 0x3ff0,
+0x0728, 0x08f0, 0x3fe8,
+0x06e4, 0x093c, 0x3fe0,
+0x069c, 0x0988, 0x3fdc,
+0x0654, 0x09d8, 0x3fd4,
+0x060c, 0x0a28, 0x3fcc,
+0x05c8, 0x0a70, 0x3fc8,
+0x0580, 0x0abc, 0x3fc4,
+0x053c, 0x0b08, 0x3fbc,
+0x04f8, 0x0b50, 0x3fb8,
+0x04b4, 0x0b94, 0x3fb8,
+0x0470, 0x0bdc, 0x3fb4,
+0x0430, 0x0c1c, 0x3fb4,
+0x03ec, 0x0c60, 0x3fb4,
+0x03b0, 0x0c9c, 0x3fb4,
+0x0370, 0x0cd8, 0x3fb8,
+0x0334, 0x0d10, 0x3fbc,
+0x02f8, 0x0d48, 0x3fc0,
+0x02c0, 0x0d78, 0x3fc8,
+0x0288, 0x0da8, 0x3fd0,
+0x0254, 0x0dd4, 0x3fd8,
+0x0220, 0x0dfc, 0x3fe4,
+0x01ec, 0x0e20, 0x3ff4,
+0x01bc, 0x0e44, 0x0000,
+0x0190, 0x0e5c, 0x0014,
+0x0164, 0x0e74, 0x0028,
+0x0138, 0x0e8c, 0x003c,
+0x0114, 0x0e98, 0x0054,
+0x00ec, 0x0ea4, 0x0070,
+0x00cc, 0x0ea8, 0x008c,
+0x00a8, 0x0eb0, 0x00a8,
+};
+
+static const uint16_t easf_filter_3tap_64p_ratio_1_00_s1_12[99] = {
+0x0800, 0x0800, 0x0000,
+0x07ac, 0x085c, 0x3ff8,
+0x0754, 0x08bc, 0x3ff0,
+0x0700, 0x091c, 0x3fe4,
+0x06ac, 0x0978, 0x3fdc,
+0x0658, 0x09d8, 0x3fd0,
+0x0604, 0x0a34, 0x3fc8,
+0x05b0, 0x0a94, 0x3fbc,
+0x0560, 0x0aec, 0x3fb4,
+0x0510, 0x0b44, 0x3fac,
+0x04c0, 0x0ba0, 0x3fa0,
+0x0470, 0x0bf8, 0x3f98,
+0x0424, 0x0c4c, 0x3f90,
+0x03d8, 0x0ca0, 0x3f88,
+0x0390, 0x0cf0, 0x3f80,
+0x0348, 0x0d3c, 0x3f7c,
+0x0300, 0x0d8c, 0x3f74,
+0x02c0, 0x0dd0, 0x3f70,
+0x027c, 0x0e14, 0x3f70,
+0x0240, 0x0e54, 0x3f6c,
+0x0204, 0x0e90, 0x3f6c,
+0x01c8, 0x0ecc, 0x3f6c,
+0x0190, 0x0f00, 0x3f70,
+0x015c, 0x0f30, 0x3f74,
+0x012c, 0x0f58, 0x3f7c,
+0x00fc, 0x0f80, 0x3f84,
+0x00d0, 0x0fa0, 0x3f90,
+0x00a8, 0x0fbc, 0x3f9c,
+0x0080, 0x0fd4, 0x3fac,
+0x005c, 0x0fe8, 0x3fbc,
+0x003c, 0x0ff4, 0x3fd0,
+0x001c, 0x0ffc, 0x3fe8,
+0x0000, 0x1000, 0x0000,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_30_s1_12[132] = {
+0x0410, 0x07e0, 0x0410, 0x0000,
+0x03f8, 0x07dc, 0x0428, 0x0004,
+0x03e0, 0x07d8, 0x043c, 0x000c,
+0x03c8, 0x07d4, 0x0450, 0x0014,
+0x03ac, 0x07d0, 0x046c, 0x0018,
+0x0394, 0x07cc, 0x0480, 0x0020,
+0x037c, 0x07c8, 0x0494, 0x0028,
+0x0368, 0x07c0, 0x04a8, 0x0030,
+0x0350, 0x07b8, 0x04c0, 0x0038,
+0x0338, 0x07b4, 0x04d4, 0x0040,
+0x0320, 0x07ac, 0x04e8, 0x004c,
+0x0308, 0x07a4, 0x0500, 0x0054,
+0x02f4, 0x079c, 0x0514, 0x005c,
+0x02dc, 0x0794, 0x0528, 0x0068,
+0x02c4, 0x0788, 0x0544, 0x0070,
+0x02b0, 0x0780, 0x0554, 0x007c,
+0x029c, 0x0774, 0x0568, 0x0088,
+0x0284, 0x076c, 0x057c, 0x0094,
+0x0270, 0x0760, 0x0594, 0x009c,
+0x025c, 0x0754, 0x05a8, 0x00a8,
+0x0248, 0x0748, 0x05b8, 0x00b8,
+0x0230, 0x073c, 0x05d0, 0x00c4,
+0x021c, 0x0730, 0x05e4, 0x00d0,
+0x020c, 0x0724, 0x05f4, 0x00dc,
+0x01f8, 0x0714, 0x0608, 0x00ec,
+0x01e4, 0x0708, 0x061c, 0x00f8,
+0x01d0, 0x06f8, 0x0630, 0x0108,
+0x01c0, 0x06e8, 0x0640, 0x0118,
+0x01ac, 0x06dc, 0x0654, 0x0124,
+0x0198, 0x06cc, 0x0668, 0x0134,
+0x0188, 0x06bc, 0x0678, 0x0144,
+0x0178, 0x06ac, 0x0688, 0x0154,
+0x0168, 0x0698, 0x0698, 0x0168,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_40_s1_12[132] = {
+0x03ec, 0x0824, 0x03f0, 0x0000,
+0x03d4, 0x0824, 0x0404, 0x0004,
+0x03b8, 0x0820, 0x0420, 0x0008,
+0x03a0, 0x081c, 0x0438, 0x000c,
+0x0388, 0x0818, 0x0450, 0x0010,
+0x036c, 0x0814, 0x0468, 0x0018,
+0x0354, 0x0810, 0x0480, 0x001c,
+0x033c, 0x080c, 0x0494, 0x0024,
+0x0324, 0x0804, 0x04b0, 0x0028,
+0x030c, 0x07fc, 0x04c8, 0x0030,
+0x02f4, 0x07f4, 0x04e0, 0x0038,
+0x02dc, 0x07ec, 0x04f8, 0x0040,
+0x02c4, 0x07e4, 0x0510, 0x0048,
+0x02b0, 0x07dc, 0x0524, 0x0050,
+0x0298, 0x07d0, 0x0540, 0x0058,
+0x0280, 0x07c8, 0x0558, 0x0060,
+0x026c, 0x07bc, 0x0570, 0x0068,
+0x0254, 0x07b0, 0x0588, 0x0074,
+0x0240, 0x07a4, 0x05a0, 0x007c,
+0x022c, 0x0798, 0x05b4, 0x0088,
+0x0214, 0x078c, 0x05cc, 0x0094,
+0x0200, 0x077c, 0x05e4, 0x00a0,
+0x01ec, 0x0770, 0x05f8, 0x00ac,
+0x01d8, 0x0760, 0x0610, 0x00b8,
+0x01c4, 0x0750, 0x0628, 0x00c4,
+0x01b4, 0x0744, 0x0638, 0x00d0,
+0x01a0, 0x0734, 0x064c, 0x00e0,
+0x018c, 0x0720, 0x0668, 0x00ec,
+0x017c, 0x0710, 0x0678, 0x00fc,
+0x016c, 0x0700, 0x068c, 0x0108,
+0x0158, 0x06ec, 0x06a4, 0x0118,
+0x0148, 0x06dc, 0x06b4, 0x0128,
+0x0138, 0x06c8, 0x06c8, 0x0138,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_50_s1_12[132] = {
+0x0394, 0x08d8, 0x0394, 0x0000,
+0x0378, 0x08d4, 0x03b4, 0x0000,
+0x035c, 0x08d4, 0x03d0, 0x0000,
+0x0340, 0x08d4, 0x03ec, 0x0000,
+0x0324, 0x08d0, 0x0408, 0x0004,
+0x0308, 0x08cc, 0x0428, 0x0004,
+0x02f0, 0x08c8, 0x0444, 0x0004,
+0x02d4, 0x08c0, 0x0464, 0x0008,
+0x02b8, 0x08bc, 0x0484, 0x0008,
+0x02a0, 0x08b4, 0x04a0, 0x000c,
+0x0288, 0x08ac, 0x04bc, 0x0010,
+0x026c, 0x08a4, 0x04dc, 0x0014,
+0x0254, 0x0898, 0x04fc, 0x0018,
+0x023c, 0x0890, 0x0518, 0x001c,
+0x0224, 0x0884, 0x0538, 0x0020,
+0x020c, 0x0878, 0x0554, 0x0028,
+0x01f8, 0x086c, 0x0570, 0x002c,
+0x01e0, 0x085c, 0x0590, 0x0034,
+0x01c8, 0x084c, 0x05b4, 0x0038,
+0x01b4, 0x0840, 0x05cc, 0x0040,
+0x01a0, 0x0830, 0x05e8, 0x0048,
+0x018c, 0x081c, 0x0608, 0x0050,
+0x0178, 0x080c, 0x0624, 0x0058,
+0x0164, 0x07f8, 0x0644, 0x0060,
+0x0150, 0x07e4, 0x0660, 0x006c,
+0x0140, 0x07d0, 0x067c, 0x0074,
+0x012c, 0x07bc, 0x0698, 0x0080,
+0x011c, 0x07a8, 0x06b0, 0x008c,
+0x010c, 0x0790, 0x06cc, 0x0098,
+0x00fc, 0x077c, 0x06e4, 0x00a4,
+0x00ec, 0x0764, 0x0700, 0x00b0,
+0x00dc, 0x074c, 0x0718, 0x00c0,
+0x00cc, 0x0734, 0x0734, 0x00cc,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_60_s1_12[132] = {
+0x0320, 0x09bc, 0x0324, 0x0000,
+0x0300, 0x09c0, 0x0344, 0x3ffc,
+0x02e0, 0x09c0, 0x0364, 0x3ffc,
+0x02c4, 0x09c0, 0x0384, 0x3ff8,
+0x02a4, 0x09bc, 0x03ac, 0x3ff4,
+0x0288, 0x09b8, 0x03cc, 0x3ff4,
+0x0268, 0x09b4, 0x03f4, 0x3ff0,
+0x024c, 0x09b0, 0x0414, 0x3ff0,
+0x0230, 0x09a8, 0x043c, 0x3fec,
+0x0214, 0x09a0, 0x0460, 0x3fec,
+0x01f8, 0x0994, 0x0488, 0x3fec,
+0x01e0, 0x098c, 0x04a8, 0x3fec,
+0x01c4, 0x0980, 0x04d0, 0x3fec,
+0x01ac, 0x0970, 0x04f8, 0x3fec,
+0x0194, 0x0964, 0x051c, 0x3fec,
+0x017c, 0x0954, 0x0544, 0x3fec,
+0x0164, 0x0944, 0x0568, 0x3ff0,
+0x0150, 0x0934, 0x058c, 0x3ff0,
+0x0138, 0x0920, 0x05b4, 0x3ff4,
+0x0124, 0x090c, 0x05d8, 0x3ff8,
+0x0110, 0x08f8, 0x05fc, 0x3ffc,
+0x00fc, 0x08e0, 0x0624, 0x0000,
+0x00e8, 0x08c8, 0x064c, 0x0004,
+0x00d8, 0x08b0, 0x0670, 0x0008,
+0x00c4, 0x0898, 0x0694, 0x0010,
+0x00b4, 0x087c, 0x06bc, 0x0014,
+0x00a4, 0x0860, 0x06e0, 0x001c,
+0x0094, 0x0844, 0x0704, 0x0024,
+0x0088, 0x0828, 0x0724, 0x002c,
+0x0078, 0x080c, 0x0748, 0x0034,
+0x006c, 0x07ec, 0x0768, 0x0040,
+0x0060, 0x07cc, 0x078c, 0x0048,
+0x0054, 0x07ac, 0x07ac, 0x0054,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_70_s1_12[132] = {
+0x028c, 0x0ae4, 0x0290, 0x0000,
+0x0268, 0x0ae8, 0x02b4, 0x3ffc,
+0x0248, 0x0ae8, 0x02d8, 0x3ff8,
+0x0224, 0x0ae8, 0x0304, 0x3ff0,
+0x0204, 0x0ae4, 0x032c, 0x3fec,
+0x01e4, 0x0ae0, 0x0354, 0x3fe8,
+0x01c4, 0x0adc, 0x037c, 0x3fe4,
+0x01a4, 0x0ad4, 0x03a8, 0x3fe0,
+0x0188, 0x0acc, 0x03d0, 0x3fdc,
+0x016c, 0x0ac0, 0x03fc, 0x3fd8,
+0x0150, 0x0ab4, 0x042c, 0x3fd0,
+0x0134, 0x0aa4, 0x045c, 0x3fcc,
+0x0118, 0x0a94, 0x048c, 0x3fc8,
+0x0100, 0x0a84, 0x04b4, 0x3fc8,
+0x00e8, 0x0a70, 0x04e4, 0x3fc4,
+0x00d0, 0x0a5c, 0x0514, 0x3fc0,
+0x00bc, 0x0a48, 0x0540, 0x3fbc,
+0x00a4, 0x0a30, 0x0570, 0x3fbc,
+0x0090, 0x0a14, 0x05a4, 0x3fb8,
+0x007c, 0x09fc, 0x05d0, 0x3fb8,
+0x006c, 0x09e0, 0x05fc, 0x3fb8,
+0x0058, 0x09c0, 0x0634, 0x3fb4,
+0x0048, 0x09a0, 0x0664, 0x3fb4,
+0x0038, 0x0980, 0x0690, 0x3fb8,
+0x002c, 0x0960, 0x06bc, 0x3fb8,
+0x001c, 0x093c, 0x06f0, 0x3fb8,
+0x0010, 0x0918, 0x071c, 0x3fbc,
+0x0004, 0x08f4, 0x074c, 0x3fbc,
+0x3ff8, 0x08cc, 0x077c, 0x3fc0,
+0x3ff0, 0x08a4, 0x07a8, 0x3fc4,
+0x3fe8, 0x087c, 0x07d0, 0x3fcc,
+0x3fe0, 0x0854, 0x07fc, 0x3fd0,
+0x3fd8, 0x0828, 0x0828, 0x3fd8,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_80_s1_12[132] = {
+0x01d4, 0x0c54, 0x01d8, 0x0000,
+0x01b0, 0x0c58, 0x01fc, 0x3ffc,
+0x0188, 0x0c58, 0x0228, 0x3ff8,
+0x0164, 0x0c54, 0x0258, 0x3ff0,
+0x0140, 0x0c50, 0x0284, 0x3fec,
+0x0120, 0x0c48, 0x02b4, 0x3fe4,
+0x0100, 0x0c40, 0x02e0, 0x3fe0,
+0x00e0, 0x0c34, 0x0314, 0x3fd8,
+0x00c0, 0x0c28, 0x0344, 0x3fd4,
+0x00a4, 0x0c18, 0x0378, 0x3fcc,
+0x0088, 0x0c04, 0x03ac, 0x3fc8,
+0x0070, 0x0bf0, 0x03e0, 0x3fc0,
+0x0054, 0x0bdc, 0x0418, 0x3fb8,
+0x0040, 0x0bc4, 0x0448, 0x3fb4,
+0x0028, 0x0ba8, 0x0484, 0x3fac,
+0x0014, 0x0b8c, 0x04bc, 0x3fa4,
+0x0000, 0x0b6c, 0x04f4, 0x3fa0,
+0x3fec, 0x0b4c, 0x0530, 0x3f98,
+0x3fdc, 0x0b28, 0x0568, 0x3f94,
+0x3fcc, 0x0b04, 0x05a4, 0x3f8c,
+0x3fc0, 0x0adc, 0x05dc, 0x3f88,
+0x3fb0, 0x0ab4, 0x0618, 0x3f84,
+0x3fa4, 0x0a88, 0x0658, 0x3f7c,
+0x3f9c, 0x0a5c, 0x0690, 0x3f78,
+0x3f90, 0x0a30, 0x06cc, 0x3f74,
+0x3f88, 0x0a00, 0x0708, 0x3f70,
+0x3f80, 0x09d0, 0x0740, 0x3f70,
+0x3f7c, 0x09a0, 0x0778, 0x3f6c,
+0x3f74, 0x096c, 0x07b8, 0x3f68,
+0x3f70, 0x0938, 0x07f0, 0x3f68,
+0x3f6c, 0x0904, 0x0828, 0x3f68,
+0x3f6c, 0x08cc, 0x0860, 0x3f68,
+0x3f68, 0x0898, 0x0898, 0x3f68,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_0_90_s1_12[132] = {
+0x00fc, 0x0e0c, 0x00f8, 0x0000,
+0x00d0, 0x0e0c, 0x0128, 0x3ffc,
+0x00ac, 0x0e0c, 0x0150, 0x3ff8,
+0x0084, 0x0e04, 0x0184, 0x3ff4,
+0x0064, 0x0dfc, 0x01b0, 0x3ff0,
+0x0040, 0x0df0, 0x01e4, 0x3fec,
+0x0020, 0x0de0, 0x0218, 0x3fe8,
+0x0004, 0x0dd0, 0x024c, 0x3fe0,
+0x3fe8, 0x0db8, 0x0284, 0x3fdc,
+0x3fcc, 0x0da0, 0x02c0, 0x3fd4,
+0x3fb4, 0x0d84, 0x02fc, 0x3fcc,
+0x3fa0, 0x0d68, 0x0334, 0x3fc4,
+0x3f88, 0x0d48, 0x0370, 0x3fc0,
+0x3f78, 0x0d24, 0x03ac, 0x3fb8,
+0x3f64, 0x0cfc, 0x03f0, 0x3fb0,
+0x3f54, 0x0cd4, 0x0434, 0x3fa4,
+0x3f48, 0x0ca8, 0x0474, 0x3f9c,
+0x3f3c, 0x0c78, 0x04b8, 0x3f94,
+0x3f30, 0x0c48, 0x04fc, 0x3f8c,
+0x3f28, 0x0c14, 0x0540, 0x3f84,
+0x3f20, 0x0be0, 0x0588, 0x3f78,
+0x3f18, 0x0ba8, 0x05d0, 0x3f70,
+0x3f14, 0x0b70, 0x0614, 0x3f68,
+0x3f10, 0x0b34, 0x065c, 0x3f60,
+0x3f0c, 0x0af8, 0x06a8, 0x3f54,
+0x3f0c, 0x0abc, 0x06ec, 0x3f4c,
+0x3f0c, 0x0a7c, 0x0734, 0x3f44,
+0x3f0c, 0x0a38, 0x0780, 0x3f3c,
+0x3f0c, 0x09f8, 0x07c8, 0x3f34,
+0x3f10, 0x09b4, 0x080c, 0x3f30,
+0x3f14, 0x0970, 0x0854, 0x3f28,
+0x3f18, 0x092c, 0x089c, 0x3f20,
+0x3f1c, 0x08e4, 0x08e4, 0x3f1c,
+};
+
+static const uint16_t easf_filter_4tap_64p_ratio_1_00_s1_12[132] = {
+0x0000, 0x1000, 0x0000, 0x0000,
+0x3fd8, 0x0ffc, 0x002c, 0x0000,
+0x3fb4, 0x0ff8, 0x0054, 0x0000,
+0x3f90, 0x0fec, 0x0088, 0x3ffc,
+0x3f70, 0x0fdc, 0x00b8, 0x3ffc,
+0x3f54, 0x0fc8, 0x00ec, 0x3ff8,
+0x3f38, 0x0fb0, 0x0120, 0x3ff8,
+0x3f20, 0x0f94, 0x0158, 0x3ff4,
+0x3f0c, 0x0f70, 0x0194, 0x3ff0,
+0x3ef8, 0x0f4c, 0x01d4, 0x3fe8,
+0x3ee4, 0x0f24, 0x0214, 0x3fe4,
+0x3ed8, 0x0ef8, 0x0250, 0x3fe0,
+0x3ec8, 0x0ec8, 0x0298, 0x3fd8,
+0x3ec0, 0x0e94, 0x02dc, 0x3fd0,
+0x3eb4, 0x0e5c, 0x0328, 0x3fc8,
+0x3eac, 0x0e24, 0x0370, 0x3fc0,
+0x3ea8, 0x0de4, 0x03bc, 0x3fb8,
+0x3ea4, 0x0da4, 0x0408, 0x3fb0,
+0x3ea4, 0x0d64, 0x0454, 0x3fa4,
+0x3ea4, 0x0d20, 0x04a4, 0x3f98,
+0x3ea4, 0x0cd8, 0x04f4, 0x3f90,
+0x3ea4, 0x0c8c, 0x054c, 0x3f84,
+0x3ea8, 0x0c40, 0x05a0, 0x3f78,
+0x3eb0, 0x0bf4, 0x05f0, 0x3f6c,
+0x3eb4, 0x0ba4, 0x0648, 0x3f60,
+0x3ebc, 0x0b54, 0x069c, 0x3f54,
+0x3ec4, 0x0b00, 0x06f4, 0x3f48,
+0x3ecc, 0x0ab0, 0x0748, 0x3f3c,
+0x3ed4, 0x0a58, 0x07a4, 0x3f30,
+0x3ee0, 0x0a04, 0x07f8, 0x3f24,
+0x3ee8, 0x09b0, 0x0850, 0x3f18,
+0x3ef4, 0x0958, 0x08a8, 0x3f0c,
+0x3f00, 0x0900, 0x0900, 0x3f00,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_30_s1_12[198] = {
+0x012c, 0x0400, 0x05a4, 0x0404, 0x012c, 0x0000,
+0x0124, 0x03f4, 0x05a4, 0x040c, 0x0138, 0x0000,
+0x011c, 0x03e8, 0x05a4, 0x0418, 0x0140, 0x0000,
+0x0114, 0x03dc, 0x05a0, 0x0424, 0x0148, 0x0004,
+0x010c, 0x03d4, 0x05a0, 0x042c, 0x0150, 0x0004,
+0x0100, 0x03c8, 0x05a0, 0x0438, 0x015c, 0x0004,
+0x00f8, 0x03bc, 0x05a0, 0x0440, 0x0164, 0x0008,
+0x00f0, 0x03b0, 0x059c, 0x044c, 0x0170, 0x0008,
+0x00e8, 0x03a4, 0x059c, 0x0458, 0x0178, 0x0008,
+0x00e0, 0x0398, 0x0598, 0x0460, 0x0184, 0x000c,
+0x00d8, 0x038c, 0x0594, 0x0470, 0x018c, 0x000c,
+0x00d0, 0x0380, 0x0594, 0x0474, 0x0198, 0x0010,
+0x00cc, 0x0374, 0x0590, 0x0480, 0x01a0, 0x0010,
+0x00c4, 0x0368, 0x058c, 0x0488, 0x01ac, 0x0014,
+0x00bc, 0x035c, 0x058c, 0x0494, 0x01b4, 0x0014,
+0x00b4, 0x034c, 0x0588, 0x04a0, 0x01c0, 0x0018,
+0x00ac, 0x0340, 0x0584, 0x04a8, 0x01cc, 0x001c,
+0x00a8, 0x0334, 0x0580, 0x04b4, 0x01d4, 0x001c,
+0x00a0, 0x0328, 0x057c, 0x04bc, 0x01e0, 0x0020,
+0x0098, 0x031c, 0x0578, 0x04c4, 0x01ec, 0x0024,
+0x0094, 0x0310, 0x0574, 0x04cc, 0x01f8, 0x0024,
+0x008c, 0x0304, 0x0570, 0x04d8, 0x0200, 0x0028,
+0x0088, 0x02f8, 0x0568, 0x04e0, 0x020c, 0x002c,
+0x0080, 0x02ec, 0x0564, 0x04e8, 0x0218, 0x0030,
+0x007c, 0x02e0, 0x0560, 0x04ec, 0x0224, 0x0034,
+0x0078, 0x02d4, 0x0558, 0x04f8, 0x0230, 0x0034,
+0x0070, 0x02c8, 0x0554, 0x0500, 0x023c, 0x0038,
+0x006c, 0x02bc, 0x054c, 0x050c, 0x0244, 0x003c,
+0x0064, 0x02b0, 0x0548, 0x0514, 0x0250, 0x0040,
+0x0060, 0x02a4, 0x0540, 0x051c, 0x025c, 0x0044,
+0x005c, 0x0298, 0x053c, 0x0520, 0x0268, 0x0048,
+0x0058, 0x028c, 0x0534, 0x0524, 0x0274, 0x0050,
+0x0054, 0x0280, 0x052c, 0x052c, 0x0280, 0x0054,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_40_s1_12[198] = {
+0x00a0, 0x0418, 0x068c, 0x041c, 0x00a0, 0x0000,
+0x0098, 0x0408, 0x068c, 0x0428, 0x00ac, 0x0000,
+0x0090, 0x03f8, 0x068c, 0x043c, 0x00b4, 0x3ffc,
+0x0088, 0x03e8, 0x068c, 0x044c, 0x00bc, 0x3ffc,
+0x0084, 0x03d8, 0x068c, 0x0458, 0x00c4, 0x3ffc,
+0x007c, 0x03c8, 0x0688, 0x046c, 0x00d0, 0x3ff8,
+0x0074, 0x03b8, 0x0688, 0x047c, 0x00d8, 0x3ff8,
+0x006c, 0x03a8, 0x0684, 0x048c, 0x00e4, 0x3ff8,
+0x0064, 0x0398, 0x0684, 0x049c, 0x00ec, 0x3ff8,
+0x0060, 0x0388, 0x0680, 0x04a8, 0x00f8, 0x3ff8,
+0x0058, 0x0378, 0x0680, 0x04b8, 0x0104, 0x3ff4,
+0x0054, 0x0368, 0x067c, 0x04c8, 0x010c, 0x3ff4,
+0x004c, 0x0358, 0x0678, 0x04d8, 0x0118, 0x3ff4,
+0x0048, 0x0348, 0x0674, 0x04e4, 0x0124, 0x3ff4,
+0x0040, 0x0338, 0x0670, 0x04f4, 0x0130, 0x3ff4,
+0x003c, 0x0328, 0x0668, 0x0504, 0x013c, 0x3ff4,
+0x0038, 0x0318, 0x0664, 0x0510, 0x0148, 0x3ff4,
+0x0034, 0x0308, 0x065c, 0x0520, 0x0154, 0x3ff4,
+0x002c, 0x02f8, 0x0658, 0x0530, 0x0160, 0x3ff4,
+0x0028, 0x02e8, 0x0654, 0x053c, 0x016c, 0x3ff4,
+0x0024, 0x02d8, 0x064c, 0x054c, 0x0178, 0x3ff4,
+0x0020, 0x02c8, 0x0644, 0x055c, 0x0184, 0x3ff4,
+0x001c, 0x02b8, 0x0640, 0x0568, 0x0190, 0x3ff4,
+0x0018, 0x02a8, 0x0638, 0x0574, 0x01a0, 0x3ff4,
+0x0014, 0x0298, 0x0630, 0x0584, 0x01ac, 0x3ff4,
+0x0014, 0x0288, 0x0624, 0x0590, 0x01bc, 0x3ff4,
+0x0010, 0x0278, 0x061c, 0x059c, 0x01c8, 0x3ff8,
+0x000c, 0x0268, 0x0614, 0x05ac, 0x01d4, 0x3ff8,
+0x0008, 0x0258, 0x060c, 0x05b8, 0x01e4, 0x3ff8,
+0x0008, 0x024c, 0x0600, 0x05bc, 0x01f4, 0x3ffc,
+0x0004, 0x023c, 0x05f8, 0x05cc, 0x0200, 0x3ffc,
+0x0004, 0x022c, 0x05ec, 0x05d4, 0x0210, 0x0000,
+0x0000, 0x021c, 0x05e4, 0x05e4, 0x021c, 0x0000,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_50_s1_12[198] = {
+0x0000, 0x041c, 0x07cc, 0x0418, 0x0000, 0x0000,
+0x3ff8, 0x0404, 0x07cc, 0x0434, 0x0008, 0x3ffc,
+0x3ff4, 0x03ec, 0x07cc, 0x044c, 0x000c, 0x3ffc,
+0x3ff0, 0x03d8, 0x07cc, 0x0460, 0x0014, 0x3ff8,
+0x3fe8, 0x03c0, 0x07cc, 0x0478, 0x001c, 0x3ff8,
+0x3fe4, 0x03ac, 0x07c8, 0x0490, 0x0024, 0x3ff4,
+0x3fe0, 0x0394, 0x07c8, 0x04a4, 0x002c, 0x3ff4,
+0x3fdc, 0x0380, 0x07c4, 0x04bc, 0x0034, 0x3ff0,
+0x3fd8, 0x0368, 0x07c0, 0x04d4, 0x0040, 0x3fec,
+0x3fd4, 0x0350, 0x07bc, 0x04ec, 0x0048, 0x3fec,
+0x3fd0, 0x033c, 0x07b8, 0x0504, 0x0050, 0x3fe8,
+0x3fcc, 0x0324, 0x07b4, 0x051c, 0x005c, 0x3fe4,
+0x3fc8, 0x0310, 0x07ac, 0x0530, 0x0068, 0x3fe4,
+0x3fc4, 0x02fc, 0x07a8, 0x0548, 0x0070, 0x3fe0,
+0x3fc4, 0x02e4, 0x07a0, 0x055c, 0x007c, 0x3fe0,
+0x3fc0, 0x02d0, 0x0798, 0x0574, 0x0088, 0x3fdc,
+0x3fc0, 0x02b8, 0x0790, 0x058c, 0x0094, 0x3fd8,
+0x3fbc, 0x02a4, 0x0788, 0x05a0, 0x00a0, 0x3fd8,
+0x3fbc, 0x0290, 0x077c, 0x05b8, 0x00ac, 0x3fd4,
+0x3fbc, 0x027c, 0x0774, 0x05c8, 0x00b8, 0x3fd4,
+0x3fb8, 0x0268, 0x0768, 0x05e0, 0x00c8, 0x3fd0,
+0x3fb8, 0x0250, 0x0760, 0x05f8, 0x00d4, 0x3fcc,
+0x3fb8, 0x023c, 0x0754, 0x0608, 0x00e4, 0x3fcc,
+0x3fb8, 0x0228, 0x0748, 0x0620, 0x00f0, 0x3fc8,
+0x3fb8, 0x0214, 0x073c, 0x0630, 0x0100, 0x3fc8,
+0x3fb8, 0x0204, 0x072c, 0x0644, 0x0110, 0x3fc4,
+0x3fb8, 0x01f0, 0x0720, 0x0658, 0x011c, 0x3fc4,
+0x3fb8, 0x01dc, 0x0710, 0x0670, 0x012c, 0x3fc0,
+0x3fb8, 0x01c8, 0x0704, 0x0680, 0x013c, 0x3fc0,
+0x3fb8, 0x01b8, 0x06f4, 0x0690, 0x014c, 0x3fc0,
+0x3fb8, 0x01a4, 0x06e4, 0x06a4, 0x0160, 0x3fbc,
+0x3fb8, 0x0194, 0x06d4, 0x06b4, 0x0170, 0x3fbc,
+0x3fbc, 0x0180, 0x06c4, 0x06c4, 0x0180, 0x3fbc,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_60_s1_12[198] = {
+0x3f64, 0x03ec, 0x0960, 0x03ec, 0x3f64, 0x0000,
+0x3f64, 0x03cc, 0x0960, 0x0408, 0x3f68, 0x0000,
+0x3f60, 0x03ac, 0x0960, 0x042c, 0x3f6c, 0x3ffc,
+0x3f60, 0x038c, 0x0960, 0x0448, 0x3f70, 0x3ffc,
+0x3f60, 0x0370, 0x095c, 0x046c, 0x3f70, 0x3ff8,
+0x3f5c, 0x0350, 0x0958, 0x048c, 0x3f78, 0x3ff8,
+0x3f5c, 0x0334, 0x0954, 0x04ac, 0x3f7c, 0x3ff4,
+0x3f5c, 0x0314, 0x0950, 0x04cc, 0x3f80, 0x3ff4,
+0x3f5c, 0x02f8, 0x0948, 0x04f0, 0x3f84, 0x3ff0,
+0x3f5c, 0x02d8, 0x0944, 0x050c, 0x3f8c, 0x3ff0,
+0x3f60, 0x02bc, 0x093c, 0x052c, 0x3f90, 0x3fec,
+0x3f60, 0x02a0, 0x0930, 0x0550, 0x3f98, 0x3fe8,
+0x3f60, 0x0284, 0x0928, 0x056c, 0x3fa0, 0x3fe8,
+0x3f64, 0x0268, 0x091c, 0x058c, 0x3fa8, 0x3fe4,
+0x3f64, 0x024c, 0x0910, 0x05b0, 0x3fb0, 0x3fe0,
+0x3f64, 0x0230, 0x0904, 0x05d0, 0x3fbc, 0x3fdc,
+0x3f68, 0x0214, 0x08f8, 0x05ec, 0x3fc4, 0x3fdc,
+0x3f6c, 0x01fc, 0x08e8, 0x060c, 0x3fcc, 0x3fd8,
+0x3f6c, 0x01e0, 0x08dc, 0x062c, 0x3fd8, 0x3fd4,
+0x3f70, 0x01c8, 0x08cc, 0x0648, 0x3fe4, 0x3fd0,
+0x3f74, 0x01b0, 0x08bc, 0x0664, 0x3ff0, 0x3fcc,
+0x3f74, 0x0194, 0x08a8, 0x068c, 0x3ffc, 0x3fc8,
+0x3f78, 0x017c, 0x0898, 0x06a8, 0x0008, 0x3fc4,
+0x3f7c, 0x0168, 0x0884, 0x06c0, 0x0018, 0x3fc0,
+0x3f80, 0x0150, 0x0870, 0x06dc, 0x0024, 0x3fc0,
+0x3f84, 0x0138, 0x085c, 0x06f8, 0x0034, 0x3fbc,
+0x3f88, 0x0120, 0x0848, 0x0718, 0x0040, 0x3fb8,
+0x3f8c, 0x010c, 0x0830, 0x0734, 0x0050, 0x3fb4,
+0x3f90, 0x00f8, 0x081c, 0x074c, 0x0060, 0x3fb0,
+0x3f94, 0x00e4, 0x0800, 0x0768, 0x0074, 0x3fac,
+0x3f98, 0x00d0, 0x07e8, 0x0784, 0x0084, 0x3fa8,
+0x3f9c, 0x00bc, 0x07d4, 0x079c, 0x0094, 0x3fa4,
+0x3fa0, 0x00a8, 0x07b8, 0x07b8, 0x00a8, 0x3fa0,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_70_s1_12[198] = {
+0x3f00, 0x0368, 0x0b30, 0x0368, 0x3f00, 0x0000,
+0x3f04, 0x0340, 0x0b30, 0x0390, 0x3efc, 0x0000,
+0x3f08, 0x0318, 0x0b2c, 0x03bc, 0x3ef8, 0x0000,
+0x3f0c, 0x02f0, 0x0b28, 0x03e4, 0x3ef8, 0x0000,
+0x3f10, 0x02c8, 0x0b24, 0x0410, 0x3ef4, 0x0000,
+0x3f14, 0x02a0, 0x0b1c, 0x043c, 0x3ef4, 0x0000,
+0x3f1c, 0x027c, 0x0b14, 0x0464, 0x3ef0, 0x0000,
+0x3f20, 0x0254, 0x0b0c, 0x0490, 0x3ef0, 0x0000,
+0x3f24, 0x0230, 0x0b00, 0x04bc, 0x3ef0, 0x0000,
+0x3f2c, 0x020c, 0x0af4, 0x04e4, 0x3ef0, 0x0000,
+0x3f30, 0x01e8, 0x0ae8, 0x0510, 0x3ef0, 0x0000,
+0x3f38, 0x01c8, 0x0ad8, 0x0534, 0x3ef4, 0x0000,
+0x3f40, 0x01a4, 0x0ac8, 0x0564, 0x3ef4, 0x3ffc,
+0x3f44, 0x0184, 0x0ab4, 0x0590, 0x3ef8, 0x3ffc,
+0x3f4c, 0x0164, 0x0aa4, 0x05b8, 0x3efc, 0x3ff8,
+0x3f50, 0x0144, 0x0a90, 0x05e8, 0x3efc, 0x3ff8,
+0x3f58, 0x0124, 0x0a78, 0x0610, 0x3f04, 0x3ff8,
+0x3f60, 0x0108, 0x0a64, 0x0638, 0x3f08, 0x3ff4,
+0x3f64, 0x00e8, 0x0a4c, 0x066c, 0x3f0c, 0x3ff0,
+0x3f6c, 0x00cc, 0x0a34, 0x0690, 0x3f14, 0x3ff0,
+0x3f70, 0x00b4, 0x0a18, 0x06bc, 0x3f1c, 0x3fec,
+0x3f78, 0x0098, 0x0a00, 0x06e8, 0x3f20, 0x3fe8,
+0x3f80, 0x007c, 0x09e4, 0x0710, 0x3f2c, 0x3fe4,
+0x3f84, 0x0064, 0x09c8, 0x0738, 0x3f34, 0x3fe4,
+0x3f8c, 0x004c, 0x09a8, 0x0764, 0x3f3c, 0x3fe0,
+0x3f90, 0x0034, 0x098c, 0x078c, 0x3f48, 0x3fdc,
+0x3f98, 0x0020, 0x096c, 0x07b0, 0x3f54, 0x3fd8,
+0x3f9c, 0x0008, 0x094c, 0x07dc, 0x3f60, 0x3fd4,
+0x3fa4, 0x3ff4, 0x0928, 0x0808, 0x3f6c, 0x3fcc,
+0x3fa8, 0x3fe0, 0x0908, 0x082c, 0x3f7c, 0x3fc8,
+0x3fb0, 0x3fcc, 0x08e4, 0x0854, 0x3f88, 0x3fc4,
+0x3fb4, 0x3fbc, 0x08c0, 0x0878, 0x3f98, 0x3fc0,
+0x3fbc, 0x3fac, 0x0898, 0x0898, 0x3fac, 0x3fbc,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_80_s1_12[198] = {
+0x3efc, 0x0284, 0x0d00, 0x0284, 0x3efc, 0x0000,
+0x3f04, 0x0254, 0x0d00, 0x02b4, 0x3ef0, 0x0004,
+0x3f10, 0x0224, 0x0cf8, 0x02e8, 0x3ee8, 0x0004,
+0x3f18, 0x01f4, 0x0cf4, 0x0318, 0x3ee0, 0x0008,
+0x3f24, 0x01c8, 0x0ce8, 0x034c, 0x3ed8, 0x0008,
+0x3f30, 0x019c, 0x0ce0, 0x037c, 0x3ecc, 0x000c,
+0x3f38, 0x0170, 0x0cd0, 0x03b8, 0x3ec4, 0x000c,
+0x3f44, 0x0144, 0x0cc4, 0x03e8, 0x3ebc, 0x0010,
+0x3f4c, 0x011c, 0x0cb4, 0x0420, 0x3eb4, 0x0010,
+0x3f58, 0x00f4, 0x0ca0, 0x0458, 0x3eac, 0x0010,
+0x3f60, 0x00cc, 0x0c8c, 0x048c, 0x3ea8, 0x0014,
+0x3f6c, 0x00a8, 0x0c74, 0x04c4, 0x3ea0, 0x0014,
+0x3f74, 0x0084, 0x0c5c, 0x04fc, 0x3e9c, 0x0014,
+0x3f7c, 0x0060, 0x0c44, 0x0534, 0x3e94, 0x0018,
+0x3f88, 0x0040, 0x0c28, 0x0568, 0x3e90, 0x0018,
+0x3f90, 0x0020, 0x0c08, 0x05a4, 0x3e8c, 0x0018,
+0x3f98, 0x0000, 0x0bec, 0x05dc, 0x3e88, 0x0018,
+0x3fa0, 0x3fe4, 0x0bcc, 0x0614, 0x3e84, 0x0018,
+0x3fac, 0x3fc4, 0x0ba8, 0x064c, 0x3e84, 0x0018,
+0x3fb4, 0x3fac, 0x0b84, 0x0684, 0x3e80, 0x0018,
+0x3fb8, 0x3f90, 0x0b60, 0x06c0, 0x3e80, 0x0018,
+0x3fc0, 0x3f78, 0x0b38, 0x06f8, 0x3e80, 0x0018,
+0x3fc8, 0x3f60, 0x0b14, 0x072c, 0x3e80, 0x0018,
+0x3fd0, 0x3f4c, 0x0ae8, 0x0760, 0x3e84, 0x0018,
+0x3fd8, 0x3f34, 0x0ac0, 0x079c, 0x3e84, 0x0014,
+0x3fdc, 0x3f20, 0x0a94, 0x07d4, 0x3e88, 0x0014,
+0x3fe4, 0x3f10, 0x0a68, 0x0808, 0x3e8c, 0x0010,
+0x3fe8, 0x3f00, 0x0a38, 0x0840, 0x3e90, 0x0010,
+0x3fec, 0x3ef0, 0x0a0c, 0x0874, 0x3e98, 0x000c,
+0x3ff4, 0x3ee0, 0x09d8, 0x08a8, 0x3ea0, 0x000c,
+0x3ff8, 0x3ed0, 0x09ac, 0x08dc, 0x3ea8, 0x0008,
+0x3ffc, 0x3ec4, 0x0978, 0x0914, 0x3eb0, 0x0004,
+0x0000, 0x3eb8, 0x0948, 0x0948, 0x3eb8, 0x0000,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_0_90_s1_12[198] = {
+0x3f60, 0x0154, 0x0e9c, 0x0150, 0x3f60, 0x0000,
+0x3f6c, 0x011c, 0x0e9c, 0x018c, 0x3f50, 0x0000,
+0x3f7c, 0x00ec, 0x0e94, 0x01bc, 0x3f44, 0x0004,
+0x3f88, 0x00b8, 0x0e8c, 0x01f8, 0x3f34, 0x0008,
+0x3f94, 0x0088, 0x0e80, 0x0234, 0x3f28, 0x0008,
+0x3fa0, 0x005c, 0x0e74, 0x026c, 0x3f18, 0x000c,
+0x3fac, 0x0030, 0x0e60, 0x02b0, 0x3f08, 0x000c,
+0x3fb8, 0x0004, 0x0e50, 0x02e8, 0x3efc, 0x0010,
+0x3fc4, 0x3fdc, 0x0e38, 0x0328, 0x3eec, 0x0014,
+0x3fd0, 0x3fb4, 0x0e20, 0x0368, 0x3ee0, 0x0014,
+0x3fd8, 0x3f90, 0x0e04, 0x03ac, 0x3ed0, 0x0018,
+0x3fe4, 0x3f6c, 0x0de8, 0x03e8, 0x3ec4, 0x001c,
+0x3fec, 0x3f4c, 0x0dc8, 0x042c, 0x3eb4, 0x0020,
+0x3ff4, 0x3f2c, 0x0da4, 0x0474, 0x3ea8, 0x0020,
+0x0000, 0x3f0c, 0x0d80, 0x04b8, 0x3e98, 0x0024,
+0x0008, 0x3ef0, 0x0d58, 0x04fc, 0x3e8c, 0x0028,
+0x000c, 0x3ed8, 0x0d30, 0x0540, 0x3e80, 0x002c,
+0x0014, 0x3ec0, 0x0d04, 0x0588, 0x3e74, 0x002c,
+0x001c, 0x3ea8, 0x0cd8, 0x05cc, 0x3e68, 0x0030,
+0x0020, 0x3e94, 0x0ca8, 0x0614, 0x3e5c, 0x0034,
+0x0028, 0x3e80, 0x0c78, 0x065c, 0x3e50, 0x0034,
+0x002c, 0x3e6c, 0x0c44, 0x06a4, 0x3e48, 0x0038,
+0x0030, 0x3e5c, 0x0c0c, 0x06f0, 0x3e3c, 0x003c,
+0x0034, 0x3e50, 0x0bd8, 0x0734, 0x3e34, 0x003c,
+0x0038, 0x3e44, 0x0ba0, 0x0778, 0x3e2c, 0x0040,
+0x003c, 0x3e38, 0x0b64, 0x07c4, 0x3e24, 0x0040,
+0x0040, 0x3e2c, 0x0b28, 0x0808, 0x3e20, 0x0044,
+0x0040, 0x3e24, 0x0aec, 0x0850, 0x3e1c, 0x0044,
+0x0044, 0x3e1c, 0x0aac, 0x0898, 0x3e18, 0x0044,
+0x0044, 0x3e18, 0x0a70, 0x08d8, 0x3e14, 0x0048,
+0x0044, 0x3e14, 0x0a2c, 0x0924, 0x3e10, 0x0048,
+0x0048, 0x3e10, 0x09ec, 0x0964, 0x3e10, 0x0048,
+0x0048, 0x3e10, 0x09a8, 0x09a8, 0x3e10, 0x0048,
+};
+
+static const uint16_t easf_filter_6tap_64p_ratio_1_00_s1_12[198] = {
+0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000,
+0x000c, 0x3fcc, 0x1000, 0x0034, 0x3ff4, 0x0000,
+0x0018, 0x3f9c, 0x0ff8, 0x0070, 0x3fe4, 0x0000,
+0x0024, 0x3f6c, 0x0ff0, 0x00ac, 0x3fd4, 0x0000,
+0x0030, 0x3f40, 0x0fe4, 0x00e8, 0x3fc4, 0x0000,
+0x0038, 0x3f14, 0x0fd4, 0x0128, 0x3fb4, 0x0004,
+0x0044, 0x3eec, 0x0fc0, 0x0168, 0x3fa4, 0x0004,
+0x004c, 0x3ec8, 0x0fac, 0x01a8, 0x3f94, 0x0004,
+0x0054, 0x3ea4, 0x0f90, 0x01ec, 0x3f84, 0x0008,
+0x005c, 0x3e84, 0x0f74, 0x0234, 0x3f70, 0x0008,
+0x0060, 0x3e64, 0x0f50, 0x0280, 0x3f60, 0x000c,
+0x0068, 0x3e48, 0x0f2c, 0x02c8, 0x3f4c, 0x0010,
+0x006c, 0x3e30, 0x0f04, 0x0318, 0x3f38, 0x0010,
+0x0070, 0x3e18, 0x0edc, 0x0364, 0x3f24, 0x0014,
+0x0074, 0x3e00, 0x0eac, 0x03b8, 0x3f10, 0x0018,
+0x0078, 0x3df0, 0x0e7c, 0x0404, 0x3efc, 0x001c,
+0x007c, 0x3de0, 0x0e48, 0x0454, 0x3ee8, 0x0020,
+0x007c, 0x3dd0, 0x0e14, 0x04ac, 0x3ed4, 0x0020,
+0x0080, 0x3dc4, 0x0dd8, 0x0500, 0x3ec0, 0x0024,
+0x0080, 0x3db8, 0x0d9c, 0x0554, 0x3eac, 0x002c,
+0x0080, 0x3db0, 0x0d5c, 0x05ac, 0x3e98, 0x0030,
+0x0080, 0x3da8, 0x0d1c, 0x0600, 0x3e88, 0x0034,
+0x0080, 0x3da4, 0x0cd8, 0x0658, 0x3e74, 0x0038,
+0x0080, 0x3da4, 0x0c94, 0x06ac, 0x3e60, 0x003c,
+0x007c, 0x3da0, 0x0c4c, 0x070c, 0x3e4c, 0x0040,
+0x007c, 0x3da4, 0x0c00, 0x0760, 0x3e3c, 0x0044,
+0x0078, 0x3da4, 0x0bb4, 0x07bc, 0x3e2c, 0x0048,
+0x0074, 0x3da8, 0x0b64, 0x0814, 0x3e1c, 0x0050,
+0x0074, 0x3db0, 0x0b14, 0x0868, 0x3e0c, 0x0054,
+0x0070, 0x3db8, 0x0ac4, 0x08c0, 0x3dfc, 0x0058,
+0x006c, 0x3dc0, 0x0a70, 0x091c, 0x3dec, 0x005c,
+0x0068, 0x3dc8, 0x0a1c, 0x0974, 0x3de0, 0x0060,
+0x0064, 0x3dd4, 0x09c8, 0x09c8, 0x3dd4, 0x0064,
+};
+
+static struct scale_ratio_to_reg_value_lookup easf_v_bf3_mode_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1173,7 +2012,7 @@ struct scale_ratio_to_reg_value_lookup easf_v_bf3_mode_lookup[] = {
{-1, -1, 0x0002},
};
-struct scale_ratio_to_reg_value_lookup easf_h_bf3_mode_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_h_bf3_mode_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1185,7 +2024,7 @@ struct scale_ratio_to_reg_value_lookup easf_h_bf3_mode_lookup[] = {
{-1, -1, 0x0002},
};
-struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_6tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_6tap_lookup[] = {
{3, 10, 0x4100},
{4, 10, 0x4100},
{5, 10, 0x4100},
@@ -1197,7 +2036,7 @@ struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_6tap_lookup[] = {
{-1, -1, 0x4100},
};
-struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_6tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_6tap_lookup[] = {
{3, 10, 0x4000},
{4, 10, 0x4000},
{5, 10, 0x4000},
@@ -1209,7 +2048,7 @@ struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_6tap_lookup[] = {
{-1, -1, 0x4000},
};
-struct scale_ratio_to_reg_value_lookup easf_gain_ring6_6tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_gain_ring6_6tap_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x251F},
{5, 10, 0x291F},
@@ -1221,7 +2060,7 @@ struct scale_ratio_to_reg_value_lookup easf_gain_ring6_6tap_lookup[] = {
{-1, -1, 0xA640},
};
-struct scale_ratio_to_reg_value_lookup easf_gain_ring4_6tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_gain_ring4_6tap_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x9600},
{5, 10, 0xA460},
@@ -1233,7 +2072,7 @@ struct scale_ratio_to_reg_value_lookup easf_gain_ring4_6tap_lookup[] = {
{-1, -1, 0xB058},
};
-struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_4tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_4tap_lookup[] = {
{3, 10, 0x4100},
{4, 10, 0x4100},
{5, 10, 0x4100},
@@ -1245,7 +2084,7 @@ struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_4tap_lookup[] = {
{-1, -1, 0x4100},
};
-struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_4tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_4tap_lookup[] = {
{3, 10, 0x4000},
{4, 10, 0x4000},
{5, 10, 0x4000},
@@ -1257,7 +2096,7 @@ struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_4tap_lookup[] = {
{-1, -1, 0x4000},
};
-struct scale_ratio_to_reg_value_lookup easf_gain_ring6_4tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_gain_ring6_4tap_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1269,7 +2108,7 @@ struct scale_ratio_to_reg_value_lookup easf_gain_ring6_4tap_lookup[] = {
{-1, -1, 0x0000},
};
-struct scale_ratio_to_reg_value_lookup easf_gain_ring4_4tap_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_gain_ring4_4tap_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1281,7 +2120,7 @@ struct scale_ratio_to_reg_value_lookup easf_gain_ring4_4tap_lookup[] = {
{-1, -1, 0xAC00},
};
-struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_uptilt_offset_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_uptilt_offset_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1293,7 +2132,7 @@ struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_uptilt_offset_lookup[] =
{-1, -1, 0xA8D8},
};
-struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt_maxval_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt_maxval_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1305,7 +2144,7 @@ struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt_maxval_lookup[] = {
{-1, -1, 0x3ADB},
};
-struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_slope_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_slope_lookup[] = {
{3, 10, 0x3800},
{4, 10, 0x3800},
{5, 10, 0x3800},
@@ -1317,7 +2156,7 @@ struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_slope_lookup[] = {
{-1, -1, 0x3B66},
};
-struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt1_slope_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt1_slope_lookup[] = {
{3, 10, 0x3800},
{4, 10, 0x3800},
{5, 10, 0x3800},
@@ -1329,7 +2168,7 @@ struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt1_slope_lookup[] = {
{-1, -1, 0x2F20},
};
-struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_slope_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_slope_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1341,7 +2180,7 @@ struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_slope_lookup[] = {
{-1, -1, 0x1F00},
};
-struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_offset_lookup[] = {
+static struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_offset_lookup[] = {
{3, 10, 0x0000},
{4, 10, 0x0000},
{5, 10, 0x0000},
@@ -1353,61 +2192,7 @@ struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_offset_lookup[] = {
{-1, -1, 0x9E00},
};
-void spl_init_easf_filter_coeffs(void)
-{
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_30,
- easf_filter_3tap_64p_ratio_0_30_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_40,
- easf_filter_3tap_64p_ratio_0_40_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_50,
- easf_filter_3tap_64p_ratio_0_50_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_60,
- easf_filter_3tap_64p_ratio_0_60_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_70,
- easf_filter_3tap_64p_ratio_0_70_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_80,
- easf_filter_3tap_64p_ratio_0_80_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_90,
- easf_filter_3tap_64p_ratio_0_90_s1_12, 3);
- convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_1_00,
- easf_filter_3tap_64p_ratio_1_00_s1_12, 3);
-
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_30,
- easf_filter_4tap_64p_ratio_0_30_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_40,
- easf_filter_4tap_64p_ratio_0_40_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_50,
- easf_filter_4tap_64p_ratio_0_50_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_60,
- easf_filter_4tap_64p_ratio_0_60_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_70,
- easf_filter_4tap_64p_ratio_0_70_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_80,
- easf_filter_4tap_64p_ratio_0_80_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_90,
- easf_filter_4tap_64p_ratio_0_90_s1_12, 4);
- convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_1_00,
- easf_filter_4tap_64p_ratio_1_00_s1_12, 4);
-
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_30,
- easf_filter_6tap_64p_ratio_0_30_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_40,
- easf_filter_6tap_64p_ratio_0_40_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_50,
- easf_filter_6tap_64p_ratio_0_50_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_60,
- easf_filter_6tap_64p_ratio_0_60_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_70,
- easf_filter_6tap_64p_ratio_0_70_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_80,
- easf_filter_6tap_64p_ratio_0_80_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_90,
- easf_filter_6tap_64p_ratio_0_90_s1_12, 6);
- convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_1_00,
- easf_filter_6tap_64p_ratio_1_00_s1_12, 6);
-}
-
-uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
return easf_filter_3tap_64p_ratio_0_30_s1_12;
@@ -1427,7 +2212,7 @@ uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio)
return easf_filter_3tap_64p_ratio_1_00_s1_12;
}
-uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
return easf_filter_4tap_64p_ratio_0_30_s1_12;
@@ -1447,7 +2232,7 @@ uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio)
return easf_filter_4tap_64p_ratio_1_00_s1_12;
}
-uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
return easf_filter_6tap_64p_ratio_0_30_s1_12;
@@ -1467,7 +2252,7 @@ uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio)
return easf_filter_6tap_64p_ratio_1_00_s1_12;
}
-uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio)
+const uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio)
{
if (taps == 6)
return spl_get_easf_filter_6tap_64p(ratio);
@@ -1482,6 +2267,81 @@ uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ra
}
}
+static const uint16_t *spl_get_easf_filter_3tap_64p_s1_10(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_3tap_64p_ratio_0_30;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_3tap_64p_ratio_0_40;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_3tap_64p_ratio_0_50;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_3tap_64p_ratio_0_60;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_3tap_64p_ratio_0_70;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_3tap_64p_ratio_0_80;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_3tap_64p_ratio_0_90;
+ else
+ return easf_filter_3tap_64p_ratio_1_00;
+}
+
+static const uint16_t *spl_get_easf_filter_4tap_64p_s1_10(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_4tap_64p_ratio_0_30;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_4tap_64p_ratio_0_40;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_4tap_64p_ratio_0_50;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_4tap_64p_ratio_0_60;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_4tap_64p_ratio_0_70;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_4tap_64p_ratio_0_80;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_4tap_64p_ratio_0_90;
+ else
+ return easf_filter_4tap_64p_ratio_1_00;
+}
+
+static const uint16_t *spl_get_easf_filter_6tap_64p_s1_10(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_6tap_64p_ratio_0_30;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_6tap_64p_ratio_0_40;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_6tap_64p_ratio_0_50;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_6tap_64p_ratio_0_60;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_6tap_64p_ratio_0_70;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_6tap_64p_ratio_0_80;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_6tap_64p_ratio_0_90;
+ else
+ return easf_filter_6tap_64p_ratio_1_00;
+}
+
+const uint16_t *spl_dscl_get_easf_filter_coeffs_64p_s1_10(int taps, struct spl_fixed31_32 ratio)
+{
+ if (taps == 6)
+ return spl_get_easf_filter_6tap_64p_s1_10(ratio);
+ else if (taps == 4)
+ return spl_get_easf_filter_4tap_64p_s1_10(ratio);
+ else if (taps == 3)
+ return spl_get_easf_filter_3tap_64p_s1_10(ratio);
+ else {
+ /* should never happen, bug */
+ SPL_BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
const struct spl_scaler_data *data, bool enable_easf_v,
bool enable_easf_h)
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.h
index 8bb2b8108e38..321ae22a04d4 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.h
@@ -13,11 +13,6 @@ struct scale_ratio_to_reg_value_lookup {
const uint32_t reg_value;
};
-void spl_init_easf_filter_coeffs(void);
-uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio);
-uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio);
-uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio);
-uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
const struct spl_scaler_data *data, bool enable_easf_v,
bool enable_easf_h);
@@ -35,4 +30,8 @@ uint32_t spl_get_3tap_uptilt1_slope(int taps, struct spl_fixed31_32 ratio);
uint32_t spl_get_3tap_uptilt2_slope(int taps, struct spl_fixed31_32 ratio);
uint32_t spl_get_3tap_uptilt2_offset(int taps, struct spl_fixed31_32 ratio);
+/* public API */
+const uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
+const uint16_t *spl_dscl_get_easf_filter_coeffs_64p_s1_10(int taps, struct spl_fixed31_32 ratio);
+
#endif /* __DC_SPL_SCL_EASF_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.c
index b02c7b0b262b..5e52bdf1ad44 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.c
@@ -4,194 +4,6 @@
#include "spl_debug.h"
#include "dc_spl_scl_filters.h"
-//=========================================
-// <num_taps> = 2
-// <num_phases> = 16
-// <scale_ratio> = 0.833333 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = s1.10
-// <CoefOut> = s1.12
-//=========================================
-static const uint16_t filter_2tap_16p[18] = {
- 0x1000, 0x0000,
- 0x0FF0, 0x0010,
- 0x0FB0, 0x0050,
- 0x0F34, 0x00CC,
- 0x0E68, 0x0198,
- 0x0D44, 0x02BC,
- 0x0BC4, 0x043C,
- 0x09FC, 0x0604,
- 0x0800, 0x0800
-};
-
-//=========================================
-// <num_taps> = 3
-// <num_phases> = 16
-// <scale_ratio> = 0.83333 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_3tap_16p_upscale[27] = {
- 0x0804, 0x07FC, 0x0000,
- 0x06AC, 0x0978, 0x3FDC,
- 0x055C, 0x0AF0, 0x3FB4,
- 0x0420, 0x0C50, 0x3F90,
- 0x0300, 0x0D88, 0x3F78,
- 0x0200, 0x0E90, 0x3F70,
- 0x0128, 0x0F5C, 0x3F7C,
- 0x007C, 0x0FD8, 0x3FAC,
- 0x0000, 0x1000, 0x0000
-};
-
-//=========================================
-// <num_taps> = 3
-// <num_phases> = 16
-// <scale_ratio> = 1.16666 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_3tap_16p_116[27] = {
- 0x0804, 0x07FC, 0x0000,
- 0x0700, 0x0914, 0x3FEC,
- 0x0604, 0x0A1C, 0x3FE0,
- 0x050C, 0x0B14, 0x3FE0,
- 0x041C, 0x0BF4, 0x3FF0,
- 0x0340, 0x0CB0, 0x0010,
- 0x0274, 0x0D3C, 0x0050,
- 0x01C0, 0x0D94, 0x00AC,
- 0x0128, 0x0DB4, 0x0124
-};
-
-//=========================================
-// <num_taps> = 3
-// <num_phases> = 16
-// <scale_ratio> = 1.49999 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_3tap_16p_149[27] = {
- 0x0804, 0x07FC, 0x0000,
- 0x0730, 0x08CC, 0x0004,
- 0x0660, 0x098C, 0x0014,
- 0x0590, 0x0A3C, 0x0034,
- 0x04C4, 0x0AD4, 0x0068,
- 0x0400, 0x0B54, 0x00AC,
- 0x0348, 0x0BB0, 0x0108,
- 0x029C, 0x0BEC, 0x0178,
- 0x0200, 0x0C00, 0x0200
-};
-
-//=========================================
-// <num_taps> = 3
-// <num_phases> = 16
-// <scale_ratio> = 1.83332 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_3tap_16p_183[27] = {
- 0x0804, 0x07FC, 0x0000,
- 0x0754, 0x0880, 0x002C,
- 0x06A8, 0x08F0, 0x0068,
- 0x05FC, 0x0954, 0x00B0,
- 0x0550, 0x09AC, 0x0104,
- 0x04A8, 0x09F0, 0x0168,
- 0x0408, 0x0A20, 0x01D8,
- 0x036C, 0x0A40, 0x0254,
- 0x02DC, 0x0A48, 0x02DC
-};
-
-//=========================================
-// <num_taps> = 4
-// <num_phases> = 16
-// <scale_ratio> = 0.83333 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_4tap_16p_upscale[36] = {
- 0x0000, 0x1000, 0x0000, 0x0000,
- 0x3F74, 0x0FDC, 0x00B4, 0x3FFC,
- 0x3F0C, 0x0F70, 0x0194, 0x3FF0,
- 0x3ECC, 0x0EC4, 0x0298, 0x3FD8,
- 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8,
- 0x3EA4, 0x0CD8, 0x04F4, 0x3F90,
- 0x3EB8, 0x0BA0, 0x0644, 0x3F64,
- 0x3ED8, 0x0A54, 0x07A0, 0x3F34,
- 0x3F00, 0x08FC, 0x0900, 0x3F04
-};
-
-//=========================================
-// <num_taps> = 4
-// <num_phases> = 16
-// <scale_ratio> = 1.16666 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_4tap_16p_116[36] = {
- 0x01A8, 0x0CB4, 0x01A4, 0x0000,
- 0x0110, 0x0CB0, 0x0254, 0x3FEC,
- 0x0090, 0x0C80, 0x031C, 0x3FD4,
- 0x0024, 0x0C2C, 0x03F4, 0x3FBC,
- 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0,
- 0x3F9C, 0x0B14, 0x05CC, 0x3F84,
- 0x3F70, 0x0A60, 0x06C4, 0x3F6C,
- 0x3F5C, 0x098C, 0x07BC, 0x3F5C,
- 0x3F54, 0x08AC, 0x08AC, 0x3F54
-};
-
-//=========================================
-// <num_taps> = 4
-// <num_phases> = 16
-// <scale_ratio> = 1.49999 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_4tap_16p_149[36] = {
- 0x02B8, 0x0A90, 0x02B8, 0x0000,
- 0x0230, 0x0A90, 0x0350, 0x3FF0,
- 0x01B8, 0x0A78, 0x03F0, 0x3FE0,
- 0x0148, 0x0A48, 0x049C, 0x3FD4,
- 0x00E8, 0x0A00, 0x054C, 0x3FCC,
- 0x0098, 0x09A0, 0x0600, 0x3FC8,
- 0x0054, 0x0928, 0x06B4, 0x3FD0,
- 0x001C, 0x08A4, 0x0760, 0x3FE0,
- 0x3FFC, 0x0804, 0x0804, 0x3FFC
-};
-
-//=========================================
-// <num_taps> = 4
-// <num_phases> = 16
-// <scale_ratio> = 1.83332 (input/output)
-// <sharpness> = 0
-// <CoefType> = ModifiedLanczos
-// <CoefQuant> = 1.10
-// <CoefOut> = 1.12
-//=========================================
-static const uint16_t filter_4tap_16p_183[36] = {
- 0x03B0, 0x08A0, 0x03B0, 0x0000,
- 0x0348, 0x0898, 0x041C, 0x0004,
- 0x02DC, 0x0884, 0x0490, 0x0010,
- 0x0278, 0x0864, 0x0500, 0x0024,
- 0x021C, 0x0838, 0x0570, 0x003C,
- 0x01C8, 0x07FC, 0x05E0, 0x005C,
- 0x0178, 0x07B8, 0x064C, 0x0084,
- 0x0130, 0x076C, 0x06B0, 0x00B4,
- 0x00F0, 0x0714, 0x0710, 0x00EC
-};
//=========================================
// <num_taps> = 2
@@ -1318,19 +1130,7 @@ static const uint16_t filter_8tap_64p_183[264] = {
0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4
};
-const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio)
-{
- if (ratio.value < spl_fixpt_one.value)
- return filter_3tap_16p_upscale;
- else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
- return filter_3tap_16p_116;
- else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
- return filter_3tap_16p_149;
- else
- return filter_3tap_16p_183;
-}
-
-const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
return filter_3tap_64p_upscale;
@@ -1342,19 +1142,7 @@ const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio)
return filter_3tap_64p_183;
}
-const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio)
-{
- if (ratio.value < spl_fixpt_one.value)
- return filter_4tap_16p_upscale;
- else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
- return filter_4tap_16p_116;
- else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
- return filter_4tap_16p_149;
- else
- return filter_4tap_16p_183;
-}
-
-const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
return filter_4tap_64p_upscale;
@@ -1366,7 +1154,7 @@ const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio)
return filter_4tap_64p_183;
}
-const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
return filter_5tap_64p_upscale;
@@ -1378,7 +1166,7 @@ const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio)
return filter_5tap_64p_183;
}
-const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
return filter_6tap_64p_upscale;
@@ -1390,7 +1178,7 @@ const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio)
return filter_6tap_64p_183;
}
-const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
return filter_7tap_64p_upscale;
@@ -1402,7 +1190,7 @@ const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio)
return filter_7tap_64p_183;
}
-const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio)
+static const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio)
{
if (ratio.value < spl_fixpt_one.value)
return filter_8tap_64p_upscale;
@@ -1414,12 +1202,7 @@ const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio)
return filter_8tap_64p_183;
}
-const uint16_t *spl_get_filter_2tap_16p(void)
-{
- return filter_2tap_16p;
-}
-
-const uint16_t *spl_get_filter_2tap_64p(void)
+static const uint16_t *spl_get_filter_2tap_64p(void)
{
return filter_2tap_64p;
}
@@ -1448,4 +1231,3 @@ const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 r
return NULL;
}
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.h
new file mode 100644
index 000000000000..c315a438d064
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#ifndef __DC_SPL_SCL_FILTERS_H__
+#define __DC_SPL_SCL_FILTERS_H__
+
+#include "dc_spl_types.h"
+
+/* public API */
+const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
+
+#endif /* __DC_SPL_SCL_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
index 467af9dd90de..1c3949b24611 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
@@ -427,6 +427,14 @@ struct spl_out {
// SPL inputs
+// opp extra adjustment for rect
+struct spl_opp_adjust {
+ int x;
+ int y;
+ int width;
+ int height;
+};
+
// Basic input information
struct basic_in {
enum spl_pixel_format format; // Pixel Format
@@ -444,6 +452,7 @@ struct basic_in {
} num_slices_recout_width;
} num_h_slices_recout_width_align;
int mpc_h_slice_index; // previous mpc_combine_v - split_idx
+ struct spl_opp_adjust opp_recout_adjust;
// Inputs for adaptive scaler - TODO
enum spl_transfer_func_type tf_type; /* Transfer function type */
enum spl_transfer_func_predefined tf_predefined_type; /* Transfer function predefined type */
@@ -484,7 +493,7 @@ struct spl_sharpness_range {
};
struct adaptive_sharpness {
bool enable;
- int sharpness_level;
+ unsigned int sharpness_level;
struct spl_sharpness_range sharpness_range;
};
enum linear_light_scaling { // convert it in translation logic
@@ -535,6 +544,7 @@ struct spl_in {
bool is_hdr_on;
int h_active;
int v_active;
+ int min_viewport_size;
int sdr_white_level_nits;
enum sharpen_policy sharpen_policy;
};
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c b/drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.c
index be2f34d034c5..be2f34d034c5 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.c
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h b/drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.h
index cdc4e107b9de..cdc4e107b9de 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.h
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h b/drivers/gpu/drm/amd/display/dc/sspl/spl_debug.h
index a6f6132df241..a6f6132df241 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_debug.h
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
index 131f1e3949d3..52d97918a3bd 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
@@ -346,7 +346,7 @@ struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg)
if (m > 0)
return spl_fixpt_shl(
spl_fixed31_32_exp_from_taylor_series(r),
- (unsigned char)m);
+ (unsigned int)m);
else
return spl_fixpt_div_int(
spl_fixed31_32_exp_from_taylor_series(r),
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h
index ed2647f9a099..9f349ffe9148 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h
@@ -189,7 +189,7 @@ static inline struct spl_fixed31_32 spl_fixpt_clamp(
* @brief
* result = arg << shift
*/
-static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift)
+static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned int shift)
{
SPL_ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift))));
@@ -203,7 +203,7 @@ static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, uns
* @brief
* result = arg >> shift
*/
-static inline struct spl_fixed31_32 spl_fixpt_shr(struct spl_fixed31_32 arg, unsigned char shift)
+static inline struct spl_fixed31_32 spl_fixpt_shr(struct spl_fixed31_32 arg, unsigned int shift)
{
bool negative = arg.value < 0;
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h b/drivers/gpu/drm/amd/display/dc/sspl/spl_os_types.h
index 2e6ba71960ac..2e6ba71960ac 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_os_types.h
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 4b3ccbca0da2..4e0efff92dca 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -114,6 +114,7 @@ enum dmub_asic {
DMUB_ASIC_DCN321,
DMUB_ASIC_DCN35,
DMUB_ASIC_DCN351,
+ DMUB_ASIC_DCN36,
DMUB_ASIC_DCN401,
DMUB_ASIC_MAX,
};
@@ -312,7 +313,7 @@ struct dmub_srv_hw_params {
* @timeout_occured: Indicates a timeout occured on any message from driver to dmub
* @timeout_cmd: first cmd sent from driver that timed out - subsequent timeouts are not stored
*/
-struct dmub_srv_debug {
+struct dmub_timeout_info {
bool timeout_occured;
union dmub_rb_cmd timeout_cmd;
unsigned long long timestamp;
@@ -339,7 +340,7 @@ struct dmub_diagnostic_data {
uint32_t outbox1_wptr;
uint32_t outbox1_size;
uint32_t gpint_datain0;
- struct dmub_srv_debug timeout_info;
+ struct dmub_timeout_info timeout_info;
uint8_t is_dmcub_enabled : 1;
uint8_t is_dmcub_soft_reset : 1;
uint8_t is_dmcub_secure_reset : 1;
@@ -455,7 +456,7 @@ struct dmub_srv_hw_funcs {
void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
uint32_t (*get_current_time)(struct dmub_srv *dmub);
- void (*get_diagnostic_data)(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca);
+ void (*get_diagnostic_data)(struct dmub_srv *dmub);
bool (*should_detect)(struct dmub_srv *dmub);
void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx);
@@ -518,7 +519,6 @@ struct dmub_srv {
struct dmub_srv_dcn32_regs *regs_dcn32;
struct dmub_srv_dcn35_regs *regs_dcn35;
const struct dmub_srv_dcn401_regs *regs_dcn401;
-
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs hw_funcs;
struct dmub_rb inbox1_rb;
@@ -544,7 +544,7 @@ struct dmub_srv {
struct dmub_visual_confirm_color visual_confirm_color;
enum dmub_srv_power_state_type power_state;
- struct dmub_srv_debug debug;
+ struct dmub_diagnostic_data debug;
};
/**
@@ -900,7 +900,7 @@ enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry);
-bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub);
bool dmub_srv_should_detect(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index d0fe324cb537..1f5f4e3e49d4 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -161,6 +161,13 @@
#endif
/**
+ * OS/FW agnostic memcmp
+ */
+#ifndef dmub_memcmp
+#define dmub_memcmp(lhs, rhs, bytes) memcmp((lhs), (rhs), (bytes))
+#endif
+
+/**
* OS/FW agnostic udelay
*/
#ifndef dmub_udelay
@@ -1325,6 +1332,16 @@ enum dmub_inbox0_command {
#define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY)
/**
+ * Maximum number of items in the DMUB REG INBOX0 internal ringbuffer.
+ */
+#define DMUB_REG_INBOX0_RB_MAX_ENTRY 16
+
+/**
+ * Ringbuffer size in bytes.
+ */
+#define DMUB_REG_INBOX0_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_REG_INBOX0_RB_MAX_ENTRY)
+
+/**
* REG_SET mask for reg offload.
*/
#define REG_SET_MASK 0xFFFF
@@ -1460,6 +1477,11 @@ enum dmub_cmd_type {
*/
DMUB_CMD__PSP = 88,
+ /**
+ * Command type used for all Fused IO commands.
+ */
+ DMUB_CMD__FUSED_IO = 89,
+
DMUB_CMD__VBIOS = 128,
};
@@ -1491,6 +1513,10 @@ enum dmub_out_cmd_type {
* Command type used for HPD redetect notification
*/
DMUB_OUT_CMD__HPD_SENSE_NOTIFY = 6,
+ /**
+ * Command type used for Fused IO notification
+ */
+ DMUB_OUT_CMD__FUSED_IO = 7,
};
/* DMUB_CMD__DPIA command sub-types. */
@@ -1517,7 +1543,8 @@ struct dmub_cmd_header {
unsigned int sub_type : 8; /**< command sub type */
unsigned int ret_status : 1; /**< 1 if returned data, 0 otherwise */
unsigned int multi_cmd_pending : 1; /**< 1 if multiple commands chained together */
- unsigned int reserved0 : 6; /**< reserved bits */
+ unsigned int is_reg_based : 1; /**< 1 if register based mailbox cmd, 0 if FB based cmd */
+ unsigned int reserved0 : 5; /**< reserved bits */
unsigned int payload_bytes : 6; /* payload excluding header - up to 60 bytes */
unsigned int reserved1 : 2; /**< reserved bits */
};
@@ -3118,6 +3145,12 @@ struct dmub_cmd_psr_copy_settings_data {
* Some panels request main link off before xth vertical line
*/
uint16_t poweroff_before_vertical_line;
+ /**
+ * Some panels cannot handle idle pattern during PSR entry.
+ * To power down phy before disable stream to avoid sending
+ * idle pattern.
+ */
+ uint8_t power_down_phy_before_disable_stream;
};
/**
@@ -5319,6 +5352,63 @@ struct dmub_rb_cmd_get_usbc_cable_id {
} data;
};
+enum dmub_cmd_fused_io_sub_type {
+ DMUB_CMD__FUSED_IO_EXECUTE = 0,
+ DMUB_CMD__FUSED_IO_ABORT = 1,
+};
+
+enum dmub_cmd_fused_request_type {
+ FUSED_REQUEST_READ,
+ FUSED_REQUEST_WRITE,
+ FUSED_REQUEST_POLL,
+};
+
+enum dmub_cmd_fused_request_status {
+ FUSED_REQUEST_STATUS_SUCCESS,
+ FUSED_REQUEST_STATUS_BEGIN,
+ FUSED_REQUEST_STATUS_SUBMIT,
+ FUSED_REQUEST_STATUS_REPLY,
+ FUSED_REQUEST_STATUS_POLL,
+ FUSED_REQUEST_STATUS_ABORTED,
+ FUSED_REQUEST_STATUS_FAILED = 0x80,
+ FUSED_REQUEST_STATUS_INVALID,
+ FUSED_REQUEST_STATUS_BUSY,
+ FUSED_REQUEST_STATUS_TIMEOUT,
+ FUSED_REQUEST_STATUS_POLL_TIMEOUT,
+};
+
+struct dmub_cmd_fused_request {
+ uint8_t status;
+ uint8_t type : 2;
+ uint8_t _reserved0 : 3;
+ uint8_t poll_mask_msb : 3; // Number of MSB to zero out from last byte before comparing
+ uint8_t identifier;
+ uint8_t _reserved1;
+ uint32_t timeout_us;
+ union dmub_cmd_fused_request_location {
+ struct dmub_cmd_fused_request_location_i2c {
+ uint8_t is_aux : 1; // False
+ uint8_t ddc_line : 3;
+ uint8_t _reserved0 : 4;
+ uint8_t address;
+ uint8_t offset;
+ uint8_t length;
+ } i2c;
+ struct dmub_cmd_fused_request_location_aux {
+ uint32_t is_aux : 1; // True
+ uint32_t ddc_line : 3;
+ uint32_t address : 20;
+ uint32_t length : 8; // Automatically split into 16B transactions
+ } aux;
+ } u;
+ uint8_t buffer[0x30]; // Read: out, write: in, poll: expected
+};
+
+struct dmub_rb_cmd_fused_io {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_fused_request request;
+};
+
/**
* Command type of a DMUB_CMD__SECURE_DISPLAY command
*/
@@ -5732,6 +5822,8 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_fams2_drr_update fams2_drr_update;
struct dmub_rb_cmd_fams2_flip fams2_flip;
+
+ struct dmub_rb_cmd_fused_io fused_io;
};
/**
@@ -5762,6 +5854,7 @@ union dmub_rb_out_cmd {
* HPD sense notification command.
*/
struct dmub_rb_cmd_hpd_sense_notify hpd_sense_notify;
+ struct dmub_rb_cmd_fused_io fused_io;
};
#pragma pack(pop)
@@ -5809,6 +5902,42 @@ static inline bool dmub_rb_empty(struct dmub_rb *rb)
}
/**
+ * @brief gets number of outstanding requests in the RB
+ *
+ * @param rb DMUB Ringbuffer
+ * @return true if full
+ */
+static inline uint32_t dmub_rb_num_outstanding(struct dmub_rb *rb)
+{
+ uint32_t data_count;
+
+ if (rb->wrpt >= rb->rptr)
+ data_count = rb->wrpt - rb->rptr;
+ else
+ data_count = rb->capacity - (rb->rptr - rb->wrpt);
+
+ return data_count / DMUB_RB_CMD_SIZE;
+}
+
+/**
+ * @brief gets number of free buffers in the RB
+ *
+ * @param rb DMUB Ringbuffer
+ * @return true if full
+ */
+static inline uint32_t dmub_rb_num_free(struct dmub_rb *rb)
+{
+ uint32_t data_count;
+
+ if (rb->wrpt >= rb->rptr)
+ data_count = rb->wrpt - rb->rptr;
+ else
+ data_count = rb->capacity - (rb->rptr - rb->wrpt);
+
+ return (rb->capacity - data_count) / DMUB_RB_CMD_SIZE;
+}
+
+/**
* @brief Checks if the ringbuffer is full
*
* @param rb DMUB Ringbuffer
diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile
index a00b9e992292..468b768c11ae 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/Makefile
+++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile
@@ -26,6 +26,7 @@ DMUB += dmub_dcn31.o dmub_dcn314.o dmub_dcn315.o dmub_dcn316.o
DMUB += dmub_dcn32.o
DMUB += dmub_dcn35.o
DMUB += dmub_dcn351.o
+DMUB += dmub_dcn36.o
DMUB += dmub_dcn401.o
AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB))
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index e500ca9ae09c..73888c1bea93 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -414,63 +414,66 @@ uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub)
{
uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- diag_data->is_dmcub_secure_reset = is_sec_reset;
+ dmub->debug.is_dmcub_secure_reset = is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- diag_data->is_cw0_enabled = is_cw0_enabled;
+ dmub->debug.is_cw0_enabled = is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index de287b101848..42c1fb4bc73f 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -247,6 +247,6 @@ bool dmub_dcn20_use_cached_trace_buffer(struct dmub_srv *dmub);
uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca);
+void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub);
#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index d9f31b191c69..a308bd604677 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -83,8 +83,8 @@ static inline void dmub_dcn31_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn31_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 100;
- uint32_t in_reset, scratch, i, pwait_mode;
+ const uint32_t timeout = 100000;
+ uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -108,7 +108,7 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
}
for (i = 0; i < timeout; ++i) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
@@ -125,9 +125,14 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
/* Force reset in case we timed out, DMCUB is likely hung. */
}
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
+
+ if (is_enabled) {
+ REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ }
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -371,6 +376,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
+ boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
@@ -408,69 +414,72 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub)
{
uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
-
- diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
- diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
- diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- diag_data->is_dmcub_secure_reset = is_sec_reset;
+ dmub->debug.is_dmcub_secure_reset = is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- diag_data->is_cw0_enabled = is_cw0_enabled;
+ dmub->debug.is_cw0_enabled = is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
}
bool dmub_dcn31_should_detect(struct dmub_srv *dmub)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index eccdab4986ce..1c43ef2bca66 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -251,7 +251,7 @@ void dmub_dcn31_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub);
bool dmub_dcn31_should_detect(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index 9600b7f858b0..e7056205b050 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -417,73 +417,75 @@ uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub)
{
uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
- diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
-
- diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
- diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
- diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+ dmub->debug.scratch[16] = REG_READ(DMCUB_SCRATCH16);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- diag_data->is_dmcub_secure_reset = is_sec_reset;
+ dmub->debug.is_dmcub_secure_reset = is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- diag_data->is_cw0_enabled = is_cw0_enabled;
+ dmub->debug.is_cw0_enabled = is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
- diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
-
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
index 29c1132951af..1a229450c53d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
@@ -254,7 +254,7 @@ void dmub_dcn32_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub);
void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub);
void dmub_dcn32_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index e5e77bd3c31e..72a0f078cd1a 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -88,7 +88,7 @@ static inline void dmub_dcn35_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn35_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 100;
+ const uint32_t timeout = 100000;
uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -113,7 +113,7 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
}
for (i = 0; i < timeout; ++i) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
@@ -462,66 +462,69 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub)
{
uint32_t is_dmub_enabled, is_soft_reset;
uint32_t is_traceport_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
- diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
-
- diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
- diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
- diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+ dmub->debug.scratch[16] = REG_READ(DMCUB_SCRATCH16);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
- diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
index 686e97c00ccc..39fcb7275da5 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
@@ -269,7 +269,7 @@ void dmub_dcn35_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub);
void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.c
new file mode 100644
index 000000000000..b1ce09d48920
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+#include "../dmub_srv.h"
+#include "dmub_reg.h"
+#include "dmub_dcn36.h"
+
+#include "dcn/dcn_3_6_0_offset.h"
+#include "dcn/dcn_3_6_0_sh_mask.h"
+
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
+#define CTX dmub
+#define REGS dmub->regs_dcn35
+#define REG_OFFSET_EXP(reg_name) BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+void dmub_srv_dcn36_regs_init(struct dmub_srv *dmub, struct dc_context *ctx)
+{
+ struct dmub_srv_dcn35_regs *regs = dmub->regs_dcn35;
+#define REG_STRUCT regs
+
+#define DMUB_SR(reg) REG_STRUCT->offset.reg = REG_OFFSET_EXP(reg);
+ DMUB_DCN35_REGS()
+ DMCUB_INTERNAL_REGS()
+#undef DMUB_SR
+
+#define DMUB_SF(reg, field) REG_STRUCT->mask.reg##__##field = FD_MASK(reg, field);
+ DMUB_DCN35_FIELDS()
+#undef DMUB_SF
+
+#define DMUB_SF(reg, field) REG_STRUCT->shift.reg##__##field = FD_SHIFT(reg, field);
+ DMUB_DCN35_FIELDS()
+#undef DMUB_SF
+#undef REG_STRUCT
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.h
new file mode 100644
index 000000000000..57850550f682
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+#ifndef _DMUB_DCN36_H_
+#define _DMUB_DCN36_H_
+
+#include "dmub_dcn35.h"
+
+struct dmub_srv;
+
+void dmub_srv_dcn36_regs_init(struct dmub_srv *dmub, struct dc_context *ctx);
+
+#endif /* _DMUB_DCN36_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index 39a8cb6d7523..e67f7c4784eb 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -63,8 +63,10 @@ static inline void dmub_dcn401_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn401_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 30;
- uint32_t in_reset, scratch, i;
+ const uint32_t timeout_us = 1 * 1000 * 1000; //1s
+ const uint32_t poll_delay_us = 1; //1us
+ uint32_t i = 0;
+ uint32_t in_reset, scratch, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -75,32 +77,35 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
dmub->hw_funcs.set_gpint(dmub, cmd);
- /**
- * Timeout covers both the ACK and the wait
- * for remaining work to finish.
- *
- * This is mostly bound by the PHY disable sequence.
- * Each register check will be greater than 1us, so
- * don't bother using udelay.
- */
-
- for (i = 0; i < timeout; ++i) {
+ for (i = 0; i < timeout_us; i++) {
if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
break;
+
+ udelay(poll_delay_us);
}
- for (i = 0; i < timeout; ++i) {
+ for (; i < timeout_us; i++) {
scratch = dmub->hw_funcs.get_gpint_response(dmub);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
+
+ udelay(poll_delay_us);
}
- /* Force reset in case we timed out, DMCUB is likely hung. */
+ for (; i < timeout_us; i++) {
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
+ if (pwait_mode & (1 << 0))
+ break;
+
+ udelay(poll_delay_us);
+ }
+ }
+
+ if (i >= timeout_us) {
+ /* timeout should never occur */
+ BREAK_TO_DEBUGGER();
}
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -131,7 +136,10 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
dmub_dcn401_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
dmub_dcn401_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
@@ -151,6 +159,7 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
DMCUB_REGION3_CW1_ENABLE, 1);
+ /* release DMCUB reset only to prevent premature execution */
REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
0x20);
}
@@ -161,7 +170,10 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub,
{
union dmub_addr offset;
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
offset = cw0->offset;
@@ -181,6 +193,7 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub,
DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
DMCUB_REGION3_CW1_ENABLE, 1);
+ /* release DMCUB reset only to prevent premature execution */
REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
0x20);
}
@@ -402,72 +415,75 @@ uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub)
{
uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
- diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
-
- diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
- diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
- diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+ dmub->debug.scratch[16] = REG_READ(DMCUB_SCRATCH16);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- diag_data->is_dmcub_secure_reset = is_sec_reset;
+ dmub->debug.is_dmcub_secure_reset = is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- diag_data->is_cw0_enabled = is_cw0_enabled;
+ dmub->debug.is_cw0_enabled = is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
- diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
void dmub_dcn401_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
index 4c8843b79695..c35be52676f6 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
@@ -169,7 +169,8 @@ struct dmub_srv;
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN) \
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK) \
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_STAT) \
- DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN)
+ DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS)
struct dmub_srv_dcn401_reg_offset {
#define DMUB_SR(reg) uint32_t reg;
@@ -263,7 +264,7 @@ void dmub_dcn401_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub);
void dmub_dcn401_configure_dmub_in_system_memory(struct dmub_srv *dmub);
void dmub_dcn401_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 15ea216e903d..ae8133816b43 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -38,6 +38,7 @@
#include "dmub_dcn32.h"
#include "dmub_dcn35.h"
#include "dmub_dcn351.h"
+#include "dmub_dcn36.h"
#include "dmub_dcn401.h"
#include "os_types.h"
/*
@@ -314,6 +315,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
case DMUB_ASIC_DCN35:
case DMUB_ASIC_DCN351:
+ case DMUB_ASIC_DCN36:
dmub->regs_dcn35 = &dmub_srv_dcn35_regs;
funcs->configure_dmub_in_system_memory = dmub_dcn35_configure_dmub_in_system_memory;
funcs->send_inbox0_cmd = dmub_dcn35_send_inbox0_cmd;
@@ -351,7 +353,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->init_reg_offsets = dmub_srv_dcn35_regs_init;
if (asic == DMUB_ASIC_DCN351)
- funcs->init_reg_offsets = dmub_srv_dcn351_regs_init;
+ funcs->init_reg_offsets = dmub_srv_dcn351_regs_init;
+ if (asic == DMUB_ASIC_DCN36)
+ funcs->init_reg_offsets = dmub_srv_dcn36_regs_init;
funcs->is_hw_powered_up = dmub_dcn35_is_hw_powered_up;
funcs->should_detect = dmub_dcn35_should_detect;
@@ -704,7 +708,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
cw6.region.base = DMUB_CW6_BASE;
cw6.region.top = cw6.region.base + fw_state_fb->size;
- dmub->fw_state = fw_state_fb->cpu_addr;
+ dmub->fw_state = (void *)((uintptr_t)(fw_state_fb->cpu_addr) + DMUB_DEBUG_FW_STATE_OFFSET);
region6.offset.quad_part = shared_state_fb->gpu_addr;
region6.region.base = DMUB_CW6_BASE;
@@ -846,7 +850,7 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
flush_rb.rptr = dmub->inbox1_last_wptr;
dmub_rb_flush_pending(&flush_rb);
- dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+ dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
@@ -915,7 +919,7 @@ enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
return DMUB_STATUS_INVALID;
for (i = 0; i <= timeout_us; ++i) {
- rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
if (rptr > dmub->inbox1_rb.capacity)
return DMUB_STATUS_HW_FAILURE;
@@ -1095,11 +1099,11 @@ bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entr
return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry);
}
-bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub)
{
- if (!dmub || !dmub->hw_funcs.get_diagnostic_data || !diag_data)
+ if (!dmub || !dmub->hw_funcs.get_diagnostic_data)
return false;
- dmub->hw_funcs.get_diagnostic_data(dmub, diag_data);
+ dmub->hw_funcs.get_diagnostic_data(dmub);
return true;
}
@@ -1156,6 +1160,7 @@ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_
}
}
+
enum dmub_status dmub_srv_send_reg_inbox0_cmd(
struct dmub_srv *dmub,
union dmub_rb_cmd *cmd,
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 090230d29df8..5fc29164e4b4 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -257,6 +257,7 @@ enum {
#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_0_3_A0)
#define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN)
#define ASICREV_IS_GC_11_0_4(eChipRev) (eChipRev >= GC_11_0_4_A0 && eChipRev < GC_11_UNKNOWN)
+#define ASICREV_IS_DCN36(eChipRev) ((eChipRev) >= 0x50 && (eChipRev) < 0xC0)
#define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index 654387cf057f..a021d12acd74 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -63,6 +63,7 @@ enum dce_version {
DCN_VERSION_3_21,
DCN_VERSION_3_5,
DCN_VERSION_3_51,
+ DCN_VERSION_3_6,
DCN_VERSION_4_01,
DCN_VERSION_MAX
};
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index 058f882d5bdd..4c01514b926c 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -40,11 +40,6 @@ struct dc_state;
*
*/
-void pre_surface_trace(
- struct dc *dc,
- const struct dc_plane_state *const *plane_states,
- int surface_count);
-
void update_surface_trace(
struct dc *dc,
const struct dc_surface_update *updates,
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 4d68c1c6e210..177acb0574f1 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -32,6 +32,7 @@
#define DC_LOG_WARNING(...) drm_warn((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_DEBUG(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_DC(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__)
+#define DC_LOG_INFO(...) drm_info((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_SURFACE(...) pr_debug("[SURFACE]:"__VA_ARGS__)
#define DC_LOG_HW_HOTPLUG(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_HW_LINK_TRAINING(...) pr_debug("[HW_LINK_TRAINING]:"__VA_ARGS__)
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 2b3964529539..3ba9b62ba70b 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -552,43 +552,6 @@ static bool vrr_settings_require_update(struct core_freesync *core_freesync,
return false;
}
-bool mod_freesync_get_vmin_vmax(struct mod_freesync *mod_freesync,
- const struct dc_stream_state *stream,
- unsigned int *vmin,
- unsigned int *vmax)
-{
- *vmin = stream->adjust.v_total_min;
- *vmax = stream->adjust.v_total_max;
-
- return true;
-}
-
-bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
- struct dc_stream_state *stream,
- unsigned int *nom_v_pos,
- unsigned int *v_pos)
-{
- struct core_freesync *core_freesync = NULL;
- struct crtc_position position;
-
- if (mod_freesync == NULL)
- return false;
-
- core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
-
- if (dc_stream_get_crtc_position(core_freesync->dc, &stream, 1,
- &position.vertical_count,
- &position.nominal_vcount)) {
-
- *nom_v_pos = position.nominal_vcount;
- *v_pos = position.vertical_count;
-
- return true;
- }
-
- return false;
-}
-
static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
struct dc_info_packet *infopacket,
bool freesync_on_desktop)
@@ -1291,28 +1254,6 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
}
}
-void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
- const struct mod_vrr_params *vrr,
- unsigned int *v_total_min, unsigned int *v_total_max,
- unsigned int *event_triggers,
- unsigned int *window_min, unsigned int *window_max,
- unsigned int *lfc_mid_point_in_us,
- unsigned int *inserted_frames,
- unsigned int *inserted_duration_in_us)
-{
- if (mod_freesync == NULL)
- return;
-
- if (vrr->supported) {
- *v_total_min = vrr->adjust.v_total_min;
- *v_total_max = vrr->adjust.v_total_max;
- *event_triggers = 0;
- *lfc_mid_point_in_us = vrr->btr.mid_point_in_us;
- *inserted_frames = vrr->btr.frames_to_insert;
- *inserted_duration_in_us = vrr->btr.inserted_duration_in_us;
- }
-}
-
unsigned long long mod_freesync_calc_nominal_field_rate(
const struct dc_stream_state *stream)
{
@@ -1328,85 +1269,7 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
return nominal_field_rate_in_uhz;
}
-unsigned long long mod_freesync_calc_field_rate_from_timing(
- unsigned int vtotal, unsigned int htotal, unsigned int pix_clk)
-{
- unsigned long long field_rate_in_uhz = 0;
- unsigned int total = htotal * vtotal;
-
- /* Calculate nominal field rate for stream, rounded up to nearest integer */
- field_rate_in_uhz = pix_clk;
- field_rate_in_uhz *= 1000000ULL;
-
- field_rate_in_uhz = div_u64(field_rate_in_uhz, total);
-
- return field_rate_in_uhz;
-}
-
bool mod_freesync_get_freesync_enabled(struct mod_vrr_params *pVrr)
{
return (pVrr->state != VRR_STATE_UNSUPPORTED) && (pVrr->state != VRR_STATE_DISABLED);
}
-
-bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz,
- uint32_t max_refresh_cap_in_uhz,
- uint32_t nominal_field_rate_in_uhz)
-{
-
- /* Typically nominal refresh calculated can have some fractional part.
- * Allow for some rounding error of actual video timing by taking floor
- * of caps and request. Round the nominal refresh rate.
- *
- * Dividing will convert everything to units in Hz although input
- * variable name is in uHz!
- *
- * Also note, this takes care of rounding error on the nominal refresh
- * so by rounding error we only expect it to be off by a small amount,
- * such as < 0.1 Hz. i.e. 143.9xxx or 144.1xxx.
- *
- * Example 1. Caps Min = 40 Hz, Max = 144 Hz
- * Request Min = 40 Hz, Max = 144 Hz
- * Nominal = 143.5x Hz rounded to 144 Hz
- * This function should allow this as valid request
- *
- * Example 2. Caps Min = 40 Hz, Max = 144 Hz
- * Request Min = 40 Hz, Max = 144 Hz
- * Nominal = 144.4x Hz rounded to 144 Hz
- * This function should allow this as valid request
- *
- * Example 3. Caps Min = 40 Hz, Max = 144 Hz
- * Request Min = 40 Hz, Max = 144 Hz
- * Nominal = 120.xx Hz rounded to 120 Hz
- * This function should return NOT valid since the requested
- * max is greater than current timing's nominal
- *
- * Example 4. Caps Min = 40 Hz, Max = 120 Hz
- * Request Min = 40 Hz, Max = 120 Hz
- * Nominal = 144.xx Hz rounded to 144 Hz
- * This function should return NOT valid since the nominal
- * is greater than the capability's max refresh
- */
- nominal_field_rate_in_uhz =
- div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
- min_refresh_cap_in_uhz /= 1000000;
- max_refresh_cap_in_uhz /= 1000000;
-
- /* Check nominal is within range */
- if (nominal_field_rate_in_uhz > max_refresh_cap_in_uhz ||
- nominal_field_rate_in_uhz < min_refresh_cap_in_uhz)
- return false;
-
- /* If nominal is less than max, limit the max allowed refresh rate */
- if (nominal_field_rate_in_uhz < max_refresh_cap_in_uhz)
- max_refresh_cap_in_uhz = nominal_field_rate_in_uhz;
-
- /* Check min is within range */
- if (min_refresh_cap_in_uhz > max_refresh_cap_in_uhz)
- return false;
-
- /* For variable range, check for at least 10 Hz range */
- if (nominal_field_rate_in_uhz - min_refresh_cap_in_uhz < 10)
- return false;
-
- return true;
-}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index cc3dc9b589f6..57916ed98c86 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -110,25 +110,6 @@ struct mod_vrr_params {
struct mod_freesync *mod_freesync_create(struct dc *dc);
void mod_freesync_destroy(struct mod_freesync *mod_freesync);
-bool mod_freesync_get_vmin_vmax(struct mod_freesync *mod_freesync,
- const struct dc_stream_state *stream,
- unsigned int *vmin,
- unsigned int *vmax);
-
-bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
- struct dc_stream_state *stream,
- unsigned int *nom_v_pos,
- unsigned int *v_pos);
-
-void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
- const struct mod_vrr_params *vrr,
- unsigned int *v_total_min, unsigned int *v_total_max,
- unsigned int *event_triggers,
- unsigned int *window_min, unsigned int *window_max,
- unsigned int *lfc_mid_point_in_us,
- unsigned int *inserted_frames,
- unsigned int *inserted_duration_in_us);
-
void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
const struct dc_stream_state *stream,
const struct mod_vrr_params *vrr,
@@ -155,13 +136,6 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
unsigned long long mod_freesync_calc_nominal_field_rate(
const struct dc_stream_state *stream);
-unsigned long long mod_freesync_calc_field_rate_from_timing(
- unsigned int vtotal, unsigned int htotal, unsigned int pix_clk);
-
-bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz,
- uint32_t max_refresh_cap_in_uhz,
- uint32_t nominal_field_rate_in_uhz);
-
unsigned int mod_freesync_calc_v_total_from_refresh(
const struct dc_stream_state *stream,
unsigned int refresh_in_uhz);
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index a344e2e49b0e..b3d55cac3569 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -383,10 +383,10 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
colorimetryFormat = ColorimetryYCC_DP_ITU709;
else if (cs == COLOR_SPACE_ADOBERGB)
colorimetryFormat = ColorimetryYCC_DP_AdobeYCC;
- else if (cs == COLOR_SPACE_2020_YCBCR)
+ else if (cs == COLOR_SPACE_2020_YCBCR_LIMITED)
colorimetryFormat = ColorimetryYCC_DP_ITU2020YCbCr;
- if (cs == COLOR_SPACE_2020_YCBCR && tf == TRANSFER_FUNC_GAMMA_22)
+ if (cs == COLOR_SPACE_2020_YCBCR_LIMITED && tf == TRANSFER_FUNC_GAMMA_22)
colorimetryFormat = ColorimetryYCC_DP_ITU709;
break;
diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h
index 2d089d30518f..06badbf0c5b9 100644
--- a/drivers/gpu/drm/amd/include/amd_acpi.h
+++ b/drivers/gpu/drm/amd/include/amd_acpi.h
@@ -61,7 +61,7 @@ struct atif_qbtc_arguments {
struct atif_qbtc_data_point {
u8 luminance; /* luminance in percent */
- u8 ipnut_signal; /* input signal in range 0-255 */
+ u8 input_signal; /* input signal in range 0-255 */
} __packed;
struct atif_qbtc_output {
@@ -75,6 +75,8 @@ struct atif_qbtc_output {
u8 number_of_points; /* number of data points */
struct atif_qbtc_data_point data_points[ATIF_QBTC_MAX_DATA_POINTS];
} __packed;
+static_assert(ATIF_QBTC_MAX_DATA_POINTS == MAX_LUMINANCE_DATA_POINTS);
+static_assert(sizeof(struct atif_qbtc_data_point) == sizeof(struct amdgpu_dm_luminance_data));
#define ATIF_NOTIFY_MASK 0x3
#define ATIF_NOTIFY_NONE 0
diff --git a/drivers/gpu/drm/amd/include/amd_cper.h b/drivers/gpu/drm/amd/include/amd_cper.h
new file mode 100644
index 000000000000..086869264425
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/amd_cper.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMD_CPER_H__
+#define __AMD_CPER_H__
+
+#include <linux/uuid.h>
+
+#define CPER_HDR_REV_1 (0x100)
+#define CPER_SEC_MINOR_REV_1 (0x01)
+#define CPER_SEC_MAJOR_REV_22 (0x22)
+#define CPER_MAX_OAM_COUNT (8)
+
+#define CPER_CTX_TYPE_CRASH (1)
+#define CPER_CTX_TYPE_BOOT (9)
+
+#define CPER_CREATOR_ID_AMDGPU "amdgpu"
+
+#define CPER_NOTIFY_MCE \
+ GUID_INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
+ 0xE1, 0x49, 0x13, 0xBB)
+#define CPER_NOTIFY_CMC \
+ GUID_INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
+ 0xEB, 0xD4, 0xF8, 0x90)
+#define BOOT_TYPE \
+ GUID_INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
+ 0xD4, 0x64, 0xB3, 0x8F)
+
+#define AMD_CRASHDUMP \
+ GUID_INIT(0x32AC0C78, 0x2623, 0x48F6, 0xB0, 0xD0, 0x73, 0x65, \
+ 0x72, 0x5F, 0xD6, 0xAE)
+#define AMD_GPU_NONSTANDARD_ERROR \
+ GUID_INIT(0x32AC0C78, 0x2623, 0x48F6, 0x81, 0xA2, 0xAC, 0x69, \
+ 0x17, 0x80, 0x55, 0x1D)
+#define PROC_ERR_SECTION_TYPE \
+ GUID_INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
+ 0x24, 0x2B, 0x6E, 0x1D)
+
+enum cper_error_severity {
+ CPER_SEV_NON_FATAL_UNCORRECTED = 0,
+ CPER_SEV_FATAL = 1,
+ CPER_SEV_NON_FATAL_CORRECTED = 2,
+ CPER_SEV_NUM = 3,
+
+ CPER_SEV_UNUSED = 10,
+};
+
+enum cper_aca_reg {
+ CPER_ACA_REG_CTL_LO = 0,
+ CPER_ACA_REG_CTL_HI = 1,
+ CPER_ACA_REG_STATUS_LO = 2,
+ CPER_ACA_REG_STATUS_HI = 3,
+ CPER_ACA_REG_ADDR_LO = 4,
+ CPER_ACA_REG_ADDR_HI = 5,
+ CPER_ACA_REG_MISC0_LO = 6,
+ CPER_ACA_REG_MISC0_HI = 7,
+ CPER_ACA_REG_CONFIG_LO = 8,
+ CPER_ACA_REG_CONFIG_HI = 9,
+ CPER_ACA_REG_IPID_LO = 10,
+ CPER_ACA_REG_IPID_HI = 11,
+ CPER_ACA_REG_SYND_LO = 12,
+ CPER_ACA_REG_SYND_HI = 13,
+
+ CPER_ACA_REG_COUNT = 32,
+};
+
+#pragma pack(push, 1)
+
+struct cper_timestamp {
+ uint8_t seconds;
+ uint8_t minutes;
+ uint8_t hours;
+ uint8_t flag;
+ uint8_t day;
+ uint8_t month;
+ uint8_t year;
+ uint8_t century;
+};
+
+struct cper_hdr {
+ char signature[4]; /* "CPER" */
+ uint16_t revision;
+ uint32_t signature_end; /* 0xFFFFFFFF */
+ uint16_t sec_cnt;
+ enum cper_error_severity error_severity;
+ union {
+ struct {
+ uint32_t platform_id : 1;
+ uint32_t timestamp : 1;
+ uint32_t partition_id : 1;
+ uint32_t reserved : 29;
+ } valid_bits;
+ uint32_t valid_mask;
+ };
+ uint32_t record_length; /* Total size of CPER Entry */
+ struct cper_timestamp timestamp;
+ char platform_id[16];
+ guid_t partition_id; /* Reserved */
+ char creator_id[16];
+ guid_t notify_type; /* CMC, MCE */
+ char record_id[8]; /* Unique CPER Entry ID */
+ uint32_t flags; /* Reserved */
+ uint64_t persistence_info; /* Reserved */
+ uint8_t reserved[12]; /* Reserved */
+};
+
+struct cper_sec_desc {
+ uint32_t sec_offset; /* Offset from the start of CPER entry */
+ uint32_t sec_length;
+ uint8_t revision_minor; /* CPER_SEC_MINOR_REV_1 */
+ uint8_t revision_major; /* CPER_SEC_MAJOR_REV_22 */
+ union {
+ struct {
+ uint8_t fru_id : 1;
+ uint8_t fru_text : 1;
+ uint8_t reserved : 6;
+ } valid_bits;
+ uint8_t valid_mask;
+ };
+ uint8_t reserved;
+ union {
+ struct {
+ uint32_t primary : 1;
+ uint32_t reserved1 : 2;
+ uint32_t exceed_err_threshold : 1;
+ uint32_t latent_err : 1;
+ uint32_t reserved2 : 27;
+ } flag_bits;
+ uint32_t flag_mask;
+ };
+ guid_t sec_type;
+ char fru_id[16];
+ enum cper_error_severity severity;
+ char fru_text[20];
+};
+
+struct cper_sec_nonstd_err_hdr {
+ union {
+ struct {
+ uint64_t apic_id : 1;
+ uint64_t fw_id : 1;
+ uint64_t err_info_cnt : 6;
+ uint64_t err_context_cnt : 6;
+ } valid_bits;
+ uint64_t valid_mask;
+ };
+ uint64_t apic_id;
+ char fw_id[48];
+};
+
+struct cper_sec_nonstd_err_info {
+ guid_t error_type;
+ union {
+ struct {
+ uint64_t ms_chk : 1;
+ uint64_t target_addr_id : 1;
+ uint64_t req_id : 1;
+ uint64_t resp_id : 1;
+ uint64_t instr_ptr : 1;
+ uint64_t reserved : 59;
+ } valid_bits;
+ uint64_t valid_mask;
+ };
+ union {
+ struct {
+ uint64_t err_type_valid : 1;
+ uint64_t pcc_valid : 1;
+ uint64_t uncorr_valid : 1;
+ uint64_t precise_ip_valid : 1;
+ uint64_t restartable_ip_valid : 1;
+ uint64_t overflow_valid : 1;
+ uint64_t reserved1 : 10;
+ uint64_t err_type : 2;
+ uint64_t pcc : 1;
+ uint64_t uncorr : 1;
+ uint64_t precised_ip : 1;
+ uint64_t restartable_ip : 1;
+ uint64_t overflow : 1;
+ uint64_t reserved2 : 41;
+ } ms_chk_bits;
+ uint64_t ms_chk_mask;
+ };
+ uint64_t target_addr_id;
+ uint64_t req_id;
+ uint64_t resp_id;
+ uint64_t instr_ptr;
+};
+
+struct cper_sec_nonstd_err_ctx {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t msr_addr;
+ uint64_t mm_reg_addr;
+ uint32_t reg_dump[CPER_ACA_REG_COUNT];
+};
+
+struct cper_sec_nonstd_err {
+ struct cper_sec_nonstd_err_hdr hdr;
+ struct cper_sec_nonstd_err_info info;
+ struct cper_sec_nonstd_err_ctx ctx;
+};
+
+struct cper_sec_crashdump_hdr {
+ uint64_t reserved1;
+ uint64_t reserved2;
+ char fw_id[48];
+ uint64_t reserved3[8];
+};
+
+struct cper_sec_crashdump_reg_data {
+ uint32_t status_lo;
+ uint32_t status_hi;
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t ipid_lo;
+ uint32_t ipid_hi;
+ uint32_t synd_lo;
+ uint32_t synd_hi;
+};
+
+struct cper_sec_crashdump_body_fatal {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ struct cper_sec_crashdump_reg_data data;
+};
+
+struct cper_sec_crashdump_body_boot {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ uint64_t msg[CPER_MAX_OAM_COUNT];
+};
+
+struct cper_sec_crashdump_fatal {
+ struct cper_sec_crashdump_hdr hdr;
+ struct cper_sec_crashdump_body_fatal body;
+};
+
+struct cper_sec_crashdump_boot {
+ struct cper_sec_crashdump_hdr hdr;
+ struct cper_sec_crashdump_body_boot body;
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 6dccee403a3d..4c95b885d1d0 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -354,6 +354,22 @@ enum DC_DEBUG_MASK {
* @DC_DISABLE_SUBVP: If set, disable DCN Sub-Viewport feature in amdgpu driver.
*/
DC_DISABLE_SUBVP = 0x20000,
+ /**
+ * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: If set, disable support for custom brightness curves
+ */
+ DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE = 0x40000,
+
+ /**
+ * @DC_HDCP_LC_FORCE_FW_ENABLE: If set, use HDCP Locality Check FW
+ * path regardless of reported HW capabilities.
+ */
+ DC_HDCP_LC_FORCE_FW_ENABLE = 0x80000,
+
+ /**
+ * @DC_HDCP_LC_ENABLE_SW_FALLBACK If set, upon HDCP Locality Check FW
+ * path failure, retry using legacy SW path.
+ */
+ DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000,
};
enum amd_dpm_forced_level;
@@ -405,7 +421,7 @@ struct amd_ip_funcs {
int (*prepare_suspend)(struct amdgpu_ip_block *ip_block);
int (*suspend)(struct amdgpu_ip_block *ip_block);
int (*resume)(struct amdgpu_ip_block *ip_block);
- bool (*is_idle)(void *handle);
+ bool (*is_idle)(struct amdgpu_ip_block *ip_block);
int (*wait_for_idle)(struct amdgpu_ip_block *ip_block);
bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block);
int (*pre_soft_reset)(struct amdgpu_ip_block *ip_block);
@@ -415,7 +431,7 @@ struct amd_ip_funcs {
enum amd_clockgating_state state);
int (*set_powergating_state)(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
- void (*get_clockgating_state)(void *handle, u64 *flags);
+ void (*get_clockgating_state)(struct amdgpu_ip_block *ip_block, u64 *flags);
void (*dump_ip_state)(struct amdgpu_ip_block *ip_block);
void (*print_ip_state)(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_offset.h
new file mode 100644
index 000000000000..b070f9b91f17
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_offset.h
@@ -0,0 +1,15485 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+
+#ifndef _dcn_3_6_0_OFFSET_HEADER
+#define _dcn_3_6_0_OFFSET_HEADER
+
+
+
+// addressBlock: dce_dc_hda_azcontroller_azdec
+// base address: 0x1300000
+#define regGLOBAL_CAPABILITIES 0x4b7000
+#define regGLOBAL_CAPABILITIES_BASE_IDX 3
+#define regMINOR_VERSION 0x4b7000
+#define regMINOR_VERSION_BASE_IDX 3
+#define regMAJOR_VERSION 0x4b7000
+#define regMAJOR_VERSION_BASE_IDX 3
+#define regOUTPUT_PAYLOAD_CAPABILITY 0x4b7001
+#define regOUTPUT_PAYLOAD_CAPABILITY_BASE_IDX 3
+#define regINPUT_PAYLOAD_CAPABILITY 0x4b7001
+#define regINPUT_PAYLOAD_CAPABILITY_BASE_IDX 3
+#define regGLOBAL_CONTROL 0x4b7002
+#define regGLOBAL_CONTROL_BASE_IDX 3
+#define regWAKE_ENABLE 0x4b7003
+#define regWAKE_ENABLE_BASE_IDX 3
+#define regSTATE_CHANGE_STATUS 0x4b7003
+#define regSTATE_CHANGE_STATUS_BASE_IDX 3
+#define regGLOBAL_STATUS 0x4b7004
+#define regGLOBAL_STATUS_BASE_IDX 3
+#define regOUTPUT_STREAM_PAYLOAD_CAPABILITY 0x4b7006
+#define regOUTPUT_STREAM_PAYLOAD_CAPABILITY_BASE_IDX 3
+#define regINPUT_STREAM_PAYLOAD_CAPABILITY 0x4b7006
+#define regINPUT_STREAM_PAYLOAD_CAPABILITY_BASE_IDX 3
+#define regINTERRUPT_CONTROL 0x4b7008
+#define regINTERRUPT_CONTROL_BASE_IDX 3
+#define regINTERRUPT_STATUS 0x4b7009
+#define regINTERRUPT_STATUS_BASE_IDX 3
+#define regWALL_CLOCK_COUNTER 0x4b700c
+#define regWALL_CLOCK_COUNTER_BASE_IDX 3
+#define regSTREAM_SYNCHRONIZATION 0x4b700e
+#define regSTREAM_SYNCHRONIZATION_BASE_IDX 3
+#define regCORB_LOWER_BASE_ADDRESS 0x4b7010
+#define regCORB_LOWER_BASE_ADDRESS_BASE_IDX 3
+#define regCORB_UPPER_BASE_ADDRESS 0x4b7011
+#define regCORB_UPPER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_WRITE_POINTER 0x4b7012
+#define regAZCONTROLLER0_CORB_WRITE_POINTER_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_READ_POINTER 0x4b7012
+#define regAZCONTROLLER0_CORB_READ_POINTER_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_CONTROL 0x4b7013
+#define regAZCONTROLLER0_CORB_CONTROL_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_STATUS 0x4b7013
+#define regAZCONTROLLER0_CORB_STATUS_BASE_IDX 3
+#define regAZCONTROLLER0_CORB_SIZE 0x4b7013
+#define regAZCONTROLLER0_CORB_SIZE_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS 0x4b7014
+#define regAZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS 0x4b7015
+#define regAZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_WRITE_POINTER 0x4b7016
+#define regAZCONTROLLER0_RIRB_WRITE_POINTER_BASE_IDX 3
+#define regAZCONTROLLER0_RESPONSE_INTERRUPT_COUNT 0x4b7016
+#define regAZCONTROLLER0_RESPONSE_INTERRUPT_COUNT_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_CONTROL 0x4b7017
+#define regAZCONTROLLER0_RIRB_CONTROL_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_STATUS 0x4b7017
+#define regAZCONTROLLER0_RIRB_STATUS_BASE_IDX 3
+#define regAZCONTROLLER0_RIRB_SIZE 0x4b7017
+#define regAZCONTROLLER0_RIRB_SIZE_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE 0x4b7018
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x4b7018
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x4b7018
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE 0x4b7019
+#define regAZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE_BASE_IDX 3
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_STATUS 0x4b701a
+#define regAZCONTROLLER0_IMMEDIATE_COMMAND_STATUS_BASE_IDX 3
+#define regAZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS 0x4b701c
+#define regAZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS 0x4b701d
+#define regAZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS_BASE_IDX 3
+#define regAZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS 0x4b780c
+#define regAZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hda_azendpoint_azdec
+// base address: 0x1300000
+#define regAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x4b7018
+#define regAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_BASE_IDX 3
+#define regAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x4b7018
+#define regAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hda_azinputendpoint_azdec
+// base address: 0x1300000
+#define regAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA 0x4b7018
+#define regAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA_BASE_IDX 3
+#define regAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX 0x4b7018
+#define regAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hda_azcontroller_azdec
+// base address: 0x0
+#define regAZCONTROLLER1_CORB_WRITE_POINTER 0x0000
+#define regAZCONTROLLER1_CORB_WRITE_POINTER_BASE_IDX 0
+#define regAZCONTROLLER1_CORB_READ_POINTER 0x0000
+#define regAZCONTROLLER1_CORB_READ_POINTER_BASE_IDX 0
+#define regAZCONTROLLER1_CORB_CONTROL 0x0001
+#define regAZCONTROLLER1_CORB_CONTROL_BASE_IDX 0
+#define regAZCONTROLLER1_CORB_STATUS 0x0001
+#define regAZCONTROLLER1_CORB_STATUS_BASE_IDX 0
+#define regAZCONTROLLER1_CORB_SIZE 0x0001
+#define regAZCONTROLLER1_CORB_SIZE_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS 0x0002
+#define regAZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS 0x0003
+#define regAZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_WRITE_POINTER 0x0004
+#define regAZCONTROLLER1_RIRB_WRITE_POINTER_BASE_IDX 0
+#define regAZCONTROLLER1_RESPONSE_INTERRUPT_COUNT 0x0004
+#define regAZCONTROLLER1_RESPONSE_INTERRUPT_COUNT_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_CONTROL 0x0005
+#define regAZCONTROLLER1_RIRB_CONTROL_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_STATUS 0x0005
+#define regAZCONTROLLER1_RIRB_STATUS_BASE_IDX 0
+#define regAZCONTROLLER1_RIRB_SIZE 0x0005
+#define regAZCONTROLLER1_RIRB_SIZE_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE 0x0006
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x0006
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x0006
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE 0x0007
+#define regAZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE_BASE_IDX 0
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_STATUS 0x0008
+#define regAZCONTROLLER1_IMMEDIATE_COMMAND_STATUS_BASE_IDX 0
+#define regAZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS 0x000a
+#define regAZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS_BASE_IDX 0
+#define regAZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS 0x000b
+#define regAZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS_BASE_IDX 0
+#define regAZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS 0x074c
+#define regAZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS_BASE_IDX 1
+
+
+// addressBlock: dce_dc_hda_azendpoint_azdec
+// base address: 0x0
+#define regAZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x0006
+#define regAZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_BASE_IDX 0
+#define regAZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x0006
+#define regAZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_BASE_IDX 0
+
+
+// addressBlock: dce_dc_hda_azinputendpoint_azdec
+// base address: 0x0
+#define regAZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA 0x0006
+#define regAZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA_BASE_IDX 0
+#define regAZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX 0x0006
+#define regAZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX_BASE_IDX 0
+
+
+// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
+// base address: 0x0
+#define regDENTIST_DISPCLK_CNTL 0x0064
+#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
+
+
+// addressBlock: dce_dc_dccg_dccg_dispdec
+// base address: 0x0
+#define regPHYPLLA_PIXCLK_RESYNC_CNTL 0x0040
+#define regPHYPLLA_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regPHYPLLB_PIXCLK_RESYNC_CNTL 0x0041
+#define regPHYPLLB_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regPHYPLLC_PIXCLK_RESYNC_CNTL 0x0042
+#define regPHYPLLC_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regPHYPLLD_PIXCLK_RESYNC_CNTL 0x0043
+#define regPHYPLLD_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regDP_DTO_DBUF_EN 0x0044
+#define regDP_DTO_DBUF_EN_BASE_IDX 1
+#define regDSCCLK3_DTO_PARAM 0x0045
+#define regDSCCLK3_DTO_PARAM_BASE_IDX 1
+#define regDPREFCLK_CGTT_BLK_CTRL_REG 0x0048
+#define regDPREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL4 0x0049
+#define regDCCG_GATE_DISABLE_CNTL4_BASE_IDX 1
+#define regDPSTREAMCLK_CNTL 0x004a
+#define regDPSTREAMCLK_CNTL_BASE_IDX 1
+#define regREFCLK_CGTT_BLK_CTRL_REG 0x004b
+#define regREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regPHYPLLE_PIXCLK_RESYNC_CNTL 0x004c
+#define regPHYPLLE_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regDCCG_PERFMON_CNTL2 0x004e
+#define regDCCG_PERFMON_CNTL2_BASE_IDX 1
+#define regDCCG_GLOBAL_FGCG_REP_CNTL 0x0050
+#define regDCCG_GLOBAL_FGCG_REP_CNTL_BASE_IDX 1
+#define regDCCG_DS_DTO_INCR 0x0053
+#define regDCCG_DS_DTO_INCR_BASE_IDX 1
+#define regDCCG_DS_DTO_MODULO 0x0054
+#define regDCCG_DS_DTO_MODULO_BASE_IDX 1
+#define regDCCG_DS_CNTL 0x0055
+#define regDCCG_DS_CNTL_BASE_IDX 1
+#define regDCCG_DS_HW_CAL_INTERVAL 0x0056
+#define regDCCG_DS_HW_CAL_INTERVAL_BASE_IDX 1
+#define regDPREFCLK_CNTL 0x0058
+#define regDPREFCLK_CNTL_BASE_IDX 1
+#define regDCE_VERSION 0x005e
+#define regDCE_VERSION_BASE_IDX 1
+#define regDCCG_GTC_CNTL 0x0060
+#define regDCCG_GTC_CNTL_BASE_IDX 1
+#define regDCCG_GTC_DTO_INCR 0x0061
+#define regDCCG_GTC_DTO_INCR_BASE_IDX 1
+#define regDCCG_GTC_DTO_MODULO 0x0062
+#define regDCCG_GTC_DTO_MODULO_BASE_IDX 1
+#define regDCCG_GTC_CURRENT 0x0063
+#define regDCCG_GTC_CURRENT_BASE_IDX 1
+#define regSYMCLK32_SE_CNTL 0x0065
+#define regSYMCLK32_SE_CNTL_BASE_IDX 1
+#define regSYMCLK32_LE_CNTL 0x0066
+#define regSYMCLK32_LE_CNTL_BASE_IDX 1
+#define regDTBCLK_P_CNTL 0x0068
+#define regDTBCLK_P_CNTL_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL5 0x0069
+#define regDCCG_GATE_DISABLE_CNTL5_BASE_IDX 1
+#define regDSCCLK0_DTO_PARAM 0x006c
+#define regDSCCLK0_DTO_PARAM_BASE_IDX 1
+#define regDSCCLK1_DTO_PARAM 0x006d
+#define regDSCCLK1_DTO_PARAM_BASE_IDX 1
+#define regDSCCLK2_DTO_PARAM 0x006e
+#define regDSCCLK2_DTO_PARAM_BASE_IDX 1
+#define regOTG_PIXEL_RATE_DIV 0x006f
+#define regOTG_PIXEL_RATE_DIV_BASE_IDX 1
+#define regMILLISECOND_TIME_BASE_DIV 0x0070
+#define regMILLISECOND_TIME_BASE_DIV_BASE_IDX 1
+#define regDISPCLK_FREQ_CHANGE_CNTL 0x0071
+#define regDISPCLK_FREQ_CHANGE_CNTL_BASE_IDX 1
+#define regDC_MEM_GLOBAL_PWR_REQ_CNTL 0x0072
+#define regDC_MEM_GLOBAL_PWR_REQ_CNTL_BASE_IDX 1
+#define regDCCG_PERFMON_CNTL 0x0073
+#define regDCCG_PERFMON_CNTL_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL 0x0074
+#define regDCCG_GATE_DISABLE_CNTL_BASE_IDX 1
+#define regDISPCLK_CGTT_BLK_CTRL_REG 0x0075
+#define regDISPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regSOCCLK_CGTT_BLK_CTRL_REG 0x0076
+#define regSOCCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regDCCG_CAC_STATUS 0x0077
+#define regDCCG_CAC_STATUS_BASE_IDX 1
+#define regMICROSECOND_TIME_BASE_DIV 0x007b
+#define regMICROSECOND_TIME_BASE_DIV_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL2 0x007c
+#define regDCCG_GATE_DISABLE_CNTL2_BASE_IDX 1
+#define regSYMCLK_CGTT_BLK_CTRL_REG 0x007d
+#define regSYMCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regDCCG_DISP_CNTL_REG 0x007f
+#define regDCCG_DISP_CNTL_REG_BASE_IDX 1
+#define regOTG0_PIXEL_RATE_CNTL 0x0080
+#define regOTG0_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDP_DTO0_PHASE 0x0081
+#define regDP_DTO0_PHASE_BASE_IDX 1
+#define regDP_DTO0_MODULO 0x0082
+#define regDP_DTO0_MODULO_BASE_IDX 1
+#define regOTG0_PHYPLL_PIXEL_RATE_CNTL 0x0083
+#define regOTG0_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regOTG1_PIXEL_RATE_CNTL 0x0084
+#define regOTG1_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDP_DTO1_PHASE 0x0085
+#define regDP_DTO1_PHASE_BASE_IDX 1
+#define regDP_DTO1_MODULO 0x0086
+#define regDP_DTO1_MODULO_BASE_IDX 1
+#define regOTG1_PHYPLL_PIXEL_RATE_CNTL 0x0087
+#define regOTG1_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regOTG2_PIXEL_RATE_CNTL 0x0088
+#define regOTG2_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDP_DTO2_PHASE 0x0089
+#define regDP_DTO2_PHASE_BASE_IDX 1
+#define regDP_DTO2_MODULO 0x008a
+#define regDP_DTO2_MODULO_BASE_IDX 1
+#define regOTG2_PHYPLL_PIXEL_RATE_CNTL 0x008b
+#define regOTG2_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regOTG3_PIXEL_RATE_CNTL 0x008c
+#define regOTG3_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDP_DTO3_PHASE 0x008d
+#define regDP_DTO3_PHASE_BASE_IDX 1
+#define regDP_DTO3_MODULO 0x008e
+#define regDP_DTO3_MODULO_BASE_IDX 1
+#define regOTG3_PHYPLL_PIXEL_RATE_CNTL 0x008f
+#define regOTG3_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define regDPPCLK_CGTT_BLK_CTRL_REG 0x0098
+#define regDPPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define regDPPCLK0_DTO_PARAM 0x0099
+#define regDPPCLK0_DTO_PARAM_BASE_IDX 1
+#define regDPPCLK1_DTO_PARAM 0x009a
+#define regDPPCLK1_DTO_PARAM_BASE_IDX 1
+#define regDPPCLK2_DTO_PARAM 0x009b
+#define regDPPCLK2_DTO_PARAM_BASE_IDX 1
+#define regDPPCLK3_DTO_PARAM 0x009c
+#define regDPPCLK3_DTO_PARAM_BASE_IDX 1
+#define regDCCG_CAC_STATUS2 0x009f
+#define regDCCG_CAC_STATUS2_BASE_IDX 1
+#define regSYMCLKA_CLOCK_ENABLE 0x00a0
+#define regSYMCLKA_CLOCK_ENABLE_BASE_IDX 1
+#define regSYMCLKB_CLOCK_ENABLE 0x00a1
+#define regSYMCLKB_CLOCK_ENABLE_BASE_IDX 1
+#define regSYMCLKC_CLOCK_ENABLE 0x00a2
+#define regSYMCLKC_CLOCK_ENABLE_BASE_IDX 1
+#define regSYMCLKD_CLOCK_ENABLE 0x00a3
+#define regSYMCLKD_CLOCK_ENABLE_BASE_IDX 1
+#define regSYMCLKE_CLOCK_ENABLE 0x00a4
+#define regSYMCLKE_CLOCK_ENABLE_BASE_IDX 1
+#define regDCCG_SOFT_RESET 0x00a6
+#define regDCCG_SOFT_RESET_BASE_IDX 1
+#define regDSCCLK_DTO_CTRL 0x00a7
+#define regDSCCLK_DTO_CTRL_BASE_IDX 1
+#define regDPPCLK_CTRL 0x00a8
+#define regDPPCLK_CTRL_BASE_IDX 1
+#define regDCCG_GATE_DISABLE_CNTL6 0x00a9
+#define regDCCG_GATE_DISABLE_CNTL6_BASE_IDX 1
+#define regSYMCLK_PSP_CNTL 0x00aa
+#define regSYMCLK_PSP_CNTL_BASE_IDX 1
+#define regDCCG_AUDIO_DTO_SOURCE 0x00ab
+#define regDCCG_AUDIO_DTO_SOURCE_BASE_IDX 1
+#define regDCCG_AUDIO_DTO0_PHASE 0x00ac
+#define regDCCG_AUDIO_DTO0_PHASE_BASE_IDX 1
+#define regDCCG_AUDIO_DTO0_MODULE 0x00ad
+#define regDCCG_AUDIO_DTO0_MODULE_BASE_IDX 1
+#define regDCCG_AUDIO_DTO1_PHASE 0x00ae
+#define regDCCG_AUDIO_DTO1_PHASE_BASE_IDX 1
+#define regDCCG_AUDIO_DTO1_MODULE 0x00af
+#define regDCCG_AUDIO_DTO1_MODULE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG0_LATCH_VALUE 0x00b0
+#define regDCCG_VSYNC_OTG0_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG1_LATCH_VALUE 0x00b1
+#define regDCCG_VSYNC_OTG1_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG2_LATCH_VALUE 0x00b2
+#define regDCCG_VSYNC_OTG2_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG3_LATCH_VALUE 0x00b3
+#define regDCCG_VSYNC_OTG3_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG4_LATCH_VALUE 0x00b4
+#define regDCCG_VSYNC_OTG4_LATCH_VALUE_BASE_IDX 1
+#define regDCCG_VSYNC_OTG5_LATCH_VALUE 0x00b5
+#define regDCCG_VSYNC_OTG5_LATCH_VALUE_BASE_IDX 1
+#define regDPPCLK_DTO_CTRL 0x00b6
+#define regDPPCLK_DTO_CTRL_BASE_IDX 1
+#define regDCCG_VSYNC_CNT_CTRL 0x00b8
+#define regDCCG_VSYNC_CNT_CTRL_BASE_IDX 1
+#define regDCCG_VSYNC_CNT_INT_CTRL 0x00b9
+#define regDCCG_VSYNC_CNT_INT_CTRL_BASE_IDX 1
+#define regFORCE_SYMCLK_DISABLE 0x00ba
+#define regFORCE_SYMCLK_DISABLE_BASE_IDX 1
+#define regDTBCLK_DTO0_PHASE 0x0018
+#define regDTBCLK_DTO0_PHASE_BASE_IDX 2
+#define regDTBCLK_DTO1_PHASE 0x0019
+#define regDTBCLK_DTO1_PHASE_BASE_IDX 2
+#define regDTBCLK_DTO2_PHASE 0x001a
+#define regDTBCLK_DTO2_PHASE_BASE_IDX 2
+#define regDTBCLK_DTO3_PHASE 0x001b
+#define regDTBCLK_DTO3_PHASE_BASE_IDX 2
+#define regDTBCLK_DTO0_MODULO 0x001f
+#define regDTBCLK_DTO0_MODULO_BASE_IDX 2
+#define regDTBCLK_DTO1_MODULO 0x0020
+#define regDTBCLK_DTO1_MODULO_BASE_IDX 2
+#define regDTBCLK_DTO2_MODULO 0x0021
+#define regDTBCLK_DTO2_MODULO_BASE_IDX 2
+#define regDTBCLK_DTO3_MODULO 0x0022
+#define regDTBCLK_DTO3_MODULO_BASE_IDX 2
+#define regHDMICHARCLK0_CLOCK_CNTL 0x004a
+#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2
+#define regPHYASYMCLK_CLOCK_CNTL 0x0052
+#define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regPHYBSYMCLK_CLOCK_CNTL 0x0053
+#define regPHYBSYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regPHYCSYMCLK_CLOCK_CNTL 0x0054
+#define regPHYCSYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regPHYDSYMCLK_CLOCK_CNTL 0x0055
+#define regPHYDSYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regPHYESYMCLK_CLOCK_CNTL 0x0056
+#define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2
+#define regHDMISTREAMCLK_CNTL 0x0059
+#define regHDMISTREAMCLK_CNTL_BASE_IDX 2
+#define regDCCG_GATE_DISABLE_CNTL3 0x005a
+#define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2
+#define regHDMISTREAMCLK0_DTO_PARAM 0x005b
+#define regHDMISTREAMCLK0_DTO_PARAM_BASE_IDX 2
+#define regDCCG_AUDIO_DTBCLK_DTO_PHASE 0x0061
+#define regDCCG_AUDIO_DTBCLK_DTO_PHASE_BASE_IDX 2
+#define regDCCG_AUDIO_DTBCLK_DTO_MODULO 0x0062
+#define regDCCG_AUDIO_DTBCLK_DTO_MODULO_BASE_IDX 2
+#define regDTBCLK_DTO_DBUF_EN 0x0063
+#define regDTBCLK_DTO_DBUF_EN_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon0_dc_perfmon_dispdec
+// base address: 0x0
+#define regDC_PERFMON0_PERFCOUNTER_CNTL 0x0000
+#define regDC_PERFMON0_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON0_PERFCOUNTER_CNTL2 0x0001
+#define regDC_PERFMON0_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON0_PERFCOUNTER_STATE 0x0002
+#define regDC_PERFMON0_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_CNTL 0x0003
+#define regDC_PERFMON0_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_CNTL2 0x0004
+#define regDC_PERFMON0_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_CVALUE_INT_MISC 0x0005
+#define regDC_PERFMON0_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_CVALUE_LOW 0x0006
+#define regDC_PERFMON0_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_HI 0x0007
+#define regDC_PERFMON0_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON0_PERFMON_LOW 0x0008
+#define regDC_PERFMON0_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon1_dc_perfmon_dispdec
+// base address: 0x30
+#define regDC_PERFMON1_PERFCOUNTER_CNTL 0x000c
+#define regDC_PERFMON1_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON1_PERFCOUNTER_CNTL2 0x000d
+#define regDC_PERFMON1_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON1_PERFCOUNTER_STATE 0x000e
+#define regDC_PERFMON1_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_CNTL 0x000f
+#define regDC_PERFMON1_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_CNTL2 0x0010
+#define regDC_PERFMON1_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_CVALUE_INT_MISC 0x0011
+#define regDC_PERFMON1_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_CVALUE_LOW 0x0012
+#define regDC_PERFMON1_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_HI 0x0013
+#define regDC_PERFMON1_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON1_PERFMON_LOW 0x0014
+#define regDC_PERFMON1_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_fgsec_dispdec
+// base address: 0x0
+#define regDMCUB_RBBMIF_SEC_CNTL 0x017a
+#define regDMCUB_RBBMIF_SEC_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_rbbmif_dispdec
+// base address: 0x0
+#define regRBBMIF_TIMEOUT 0x017f
+#define regRBBMIF_TIMEOUT_BASE_IDX 2
+#define regRBBMIF_STATUS 0x0180
+#define regRBBMIF_STATUS_BASE_IDX 2
+#define regRBBMIF_STATUS_2 0x0181
+#define regRBBMIF_STATUS_2_BASE_IDX 2
+#define regRBBMIF_INT_STATUS 0x0182
+#define regRBBMIF_INT_STATUS_BASE_IDX 2
+#define regRBBMIF_TIMEOUT_DIS 0x0183
+#define regRBBMIF_TIMEOUT_DIS_BASE_IDX 2
+#define regRBBMIF_TIMEOUT_DIS_2 0x0184
+#define regRBBMIF_TIMEOUT_DIS_2_BASE_IDX 2
+#define regRBBMIF_STATUS_FLAG 0x0185
+#define regRBBMIF_STATUS_FLAG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dmu_dcperfmon_dc_perfmon_dispdec
+// base address: 0x2f8
+#define regDC_PERFMON2_PERFCOUNTER_CNTL 0x00be
+#define regDC_PERFMON2_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON2_PERFCOUNTER_CNTL2 0x00bf
+#define regDC_PERFMON2_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON2_PERFCOUNTER_STATE 0x00c0
+#define regDC_PERFMON2_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_CNTL 0x00c1
+#define regDC_PERFMON2_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_CNTL2 0x00c2
+#define regDC_PERFMON2_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_CVALUE_INT_MISC 0x00c3
+#define regDC_PERFMON2_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_CVALUE_LOW 0x00c4
+#define regDC_PERFMON2_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_HI 0x00c5
+#define regDC_PERFMON2_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON2_PERFMON_LOW 0x00c6
+#define regDC_PERFMON2_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_ihc_dispdec
+// base address: 0x0
+#define regDC_GPU_TIMER_START_POSITION_V_UPDATE 0x0126
+#define regDC_GPU_TIMER_START_POSITION_V_UPDATE_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_VSTARTUP 0x0127
+#define regDC_GPU_TIMER_START_POSITION_VSTARTUP_BASE_IDX 2
+#define regDC_GPU_TIMER_READ 0x0128
+#define regDC_GPU_TIMER_READ_BASE_IDX 2
+#define regDC_GPU_TIMER_READ_CNTL 0x0129
+#define regDC_GPU_TIMER_READ_CNTL_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS 0x012a
+#define regDISP_INTERRUPT_STATUS_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE 0x012b
+#define regDISP_INTERRUPT_STATUS_CONTINUE_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE2 0x012c
+#define regDISP_INTERRUPT_STATUS_CONTINUE2_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE3 0x012d
+#define regDISP_INTERRUPT_STATUS_CONTINUE3_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE4 0x012e
+#define regDISP_INTERRUPT_STATUS_CONTINUE4_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE5 0x012f
+#define regDISP_INTERRUPT_STATUS_CONTINUE5_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE6 0x0130
+#define regDISP_INTERRUPT_STATUS_CONTINUE6_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE7 0x0131
+#define regDISP_INTERRUPT_STATUS_CONTINUE7_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE8 0x0132
+#define regDISP_INTERRUPT_STATUS_CONTINUE8_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE9 0x0133
+#define regDISP_INTERRUPT_STATUS_CONTINUE9_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE10 0x0134
+#define regDISP_INTERRUPT_STATUS_CONTINUE10_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE11 0x0135
+#define regDISP_INTERRUPT_STATUS_CONTINUE11_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE12 0x0136
+#define regDISP_INTERRUPT_STATUS_CONTINUE12_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE13 0x0137
+#define regDISP_INTERRUPT_STATUS_CONTINUE13_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE14 0x0138
+#define regDISP_INTERRUPT_STATUS_CONTINUE14_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE15 0x0139
+#define regDISP_INTERRUPT_STATUS_CONTINUE15_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE16 0x013a
+#define regDISP_INTERRUPT_STATUS_CONTINUE16_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE17 0x013b
+#define regDISP_INTERRUPT_STATUS_CONTINUE17_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE18 0x013c
+#define regDISP_INTERRUPT_STATUS_CONTINUE18_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE19 0x013d
+#define regDISP_INTERRUPT_STATUS_CONTINUE19_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE20 0x013e
+#define regDISP_INTERRUPT_STATUS_CONTINUE20_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE21 0x013f
+#define regDISP_INTERRUPT_STATUS_CONTINUE21_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE22 0x0140
+#define regDISP_INTERRUPT_STATUS_CONTINUE22_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_VREADY 0x0141
+#define regDC_GPU_TIMER_START_POSITION_VREADY_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_FLIP 0x0142
+#define regDC_GPU_TIMER_START_POSITION_FLIP_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK 0x0143
+#define regDC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK_BASE_IDX 2
+#define regDC_GPU_TIMER_START_POSITION_FLIP_AWAY 0x0144
+#define regDC_GPU_TIMER_START_POSITION_FLIP_AWAY_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE23 0x0145
+#define regDISP_INTERRUPT_STATUS_CONTINUE23_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE24 0x0146
+#define regDISP_INTERRUPT_STATUS_CONTINUE24_BASE_IDX 2
+#define regDISP_INTERRUPT_STATUS_CONTINUE25 0x0147
+#define regDISP_INTERRUPT_STATUS_CONTINUE25_BASE_IDX 2
+#define regDCCG_INTERRUPT_DEST 0x0148
+#define regDCCG_INTERRUPT_DEST_BASE_IDX 2
+#define regDMU_INTERRUPT_DEST 0x0149
+#define regDMU_INTERRUPT_DEST_BASE_IDX 2
+#define regDMU_INTERRUPT_DEST2 0x014a
+#define regDMU_INTERRUPT_DEST2_BASE_IDX 2
+#define regDCPG_INTERRUPT_DEST 0x014b
+#define regDCPG_INTERRUPT_DEST_BASE_IDX 2
+#define regDCPG_INTERRUPT_DEST2 0x014c
+#define regDCPG_INTERRUPT_DEST2_BASE_IDX 2
+#define regMMHUBBUB_INTERRUPT_DEST 0x014d
+#define regMMHUBBUB_INTERRUPT_DEST_BASE_IDX 2
+#define regWB_INTERRUPT_DEST 0x014e
+#define regWB_INTERRUPT_DEST_BASE_IDX 2
+#define regDCHUB_INTERRUPT_DEST 0x014f
+#define regDCHUB_INTERRUPT_DEST_BASE_IDX 2
+#define regDCHUB_PERFCOUNTER_INTERRUPT_DEST 0x0150
+#define regDCHUB_PERFCOUNTER_INTERRUPT_DEST_BASE_IDX 2
+#define regDCHUB_INTERRUPT_DEST2 0x0151
+#define regDCHUB_INTERRUPT_DEST2_BASE_IDX 2
+#define regDPP_PERFCOUNTER_INTERRUPT_DEST 0x0152
+#define regDPP_PERFCOUNTER_INTERRUPT_DEST_BASE_IDX 2
+#define regMPC_INTERRUPT_DEST 0x0153
+#define regMPC_INTERRUPT_DEST_BASE_IDX 2
+#define regOPP_INTERRUPT_DEST 0x0154
+#define regOPP_INTERRUPT_DEST_BASE_IDX 2
+#define regOPTC_INTERRUPT_DEST 0x0155
+#define regOPTC_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG0_INTERRUPT_DEST 0x0156
+#define regOTG0_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG1_INTERRUPT_DEST 0x0157
+#define regOTG1_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG2_INTERRUPT_DEST 0x0158
+#define regOTG2_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG3_INTERRUPT_DEST 0x0159
+#define regOTG3_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG4_INTERRUPT_DEST 0x015a
+#define regOTG4_INTERRUPT_DEST_BASE_IDX 2
+#define regOTG5_INTERRUPT_DEST 0x015b
+#define regOTG5_INTERRUPT_DEST_BASE_IDX 2
+#define regDIG_INTERRUPT_DEST 0x015c
+#define regDIG_INTERRUPT_DEST_BASE_IDX 2
+#define regI2C_DDC_HPD_INTERRUPT_DEST 0x015d
+#define regI2C_DDC_HPD_INTERRUPT_DEST_BASE_IDX 2
+#define regDIO_INTERRUPT_DEST 0x015f
+#define regDIO_INTERRUPT_DEST_BASE_IDX 2
+#define regDCIO_INTERRUPT_DEST 0x0160
+#define regDCIO_INTERRUPT_DEST_BASE_IDX 2
+#define regHPD_INTERRUPT_DEST 0x0161
+#define regHPD_INTERRUPT_DEST_BASE_IDX 2
+#define regAZ_INTERRUPT_DEST 0x0162
+#define regAZ_INTERRUPT_DEST_BASE_IDX 2
+#define regAUX_INTERRUPT_DEST 0x0163
+#define regAUX_INTERRUPT_DEST_BASE_IDX 2
+#define regDSC_INTERRUPT_DEST 0x0164
+#define regDSC_INTERRUPT_DEST_BASE_IDX 2
+#define regHPO_INTERRUPT_DEST 0x0165
+#define regHPO_INTERRUPT_DEST_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dmu_misc_dispdec
+// base address: 0x0
+#define regCC_DC_PIPE_DIS 0x00ca
+#define regCC_DC_PIPE_DIS_BASE_IDX 2
+#define regDMU_CLK_CNTL 0x00cb
+#define regDMU_CLK_CNTL_BASE_IDX 2
+#define regDMCUB_SMU_INTERRUPT_CNTL 0x00cd
+#define regDMCUB_SMU_INTERRUPT_CNTL_BASE_IDX 2
+#define regSMU_INTERRUPT_CONTROL 0x00ce
+#define regSMU_INTERRUPT_CONTROL_BASE_IDX 2
+#define regZSC_CNTL 0x00cf
+#define regZSC_CNTL_BASE_IDX 2
+#define regZSC_CNTL2 0x00d0
+#define regZSC_CNTL2_BASE_IDX 2
+#define regDMU_MISC_ALLOW_DS_FORCE 0x00d6
+#define regDMU_MISC_ALLOW_DS_FORCE_BASE_IDX 2
+#define regZSC_STATUS 0x00d7
+#define regZSC_STATUS_BASE_IDX 2
+#define regDMU_DISPCLK_CGTT_BLK_CTRL_REG 0x00d8
+#define regDMU_DISPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 2
+#define regDMU_SOCCLK_CGTT_BLK_CTRL_REG 0x00d9
+#define regDMU_SOCCLK_CGTT_BLK_CTRL_REG_BASE_IDX 2
+#define regZPR_CLK_UNGATE_DELAY 0x00da
+#define regZPR_CLK_UNGATE_DELAY_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dc_pg_dispdec
+// base address: 0x0
+#define regDOMAIN0_PG_CONFIG 0x0080
+#define regDOMAIN0_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN0_PG_STATUS 0x0081
+#define regDOMAIN0_PG_STATUS_BASE_IDX 2
+#define regDOMAIN1_PG_CONFIG 0x0082
+#define regDOMAIN1_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN1_PG_STATUS 0x0083
+#define regDOMAIN1_PG_STATUS_BASE_IDX 2
+#define regDOMAIN2_PG_CONFIG 0x0084
+#define regDOMAIN2_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN2_PG_STATUS 0x0085
+#define regDOMAIN2_PG_STATUS_BASE_IDX 2
+#define regDOMAIN3_PG_CONFIG 0x0086
+#define regDOMAIN3_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN3_PG_STATUS 0x0087
+#define regDOMAIN3_PG_STATUS_BASE_IDX 2
+#define regDOMAIN16_PG_CONFIG 0x0089
+#define regDOMAIN16_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN16_PG_STATUS 0x008a
+#define regDOMAIN16_PG_STATUS_BASE_IDX 2
+#define regDOMAIN17_PG_CONFIG 0x008b
+#define regDOMAIN17_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN17_PG_STATUS 0x008c
+#define regDOMAIN17_PG_STATUS_BASE_IDX 2
+#define regDOMAIN18_PG_CONFIG 0x008d
+#define regDOMAIN18_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN18_PG_STATUS 0x008e
+#define regDOMAIN18_PG_STATUS_BASE_IDX 2
+#define regDOMAIN19_PG_CONFIG 0x008f
+#define regDOMAIN19_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN19_PG_STATUS 0x0090
+#define regDOMAIN19_PG_STATUS_BASE_IDX 2
+#define regDOMAIN22_PG_CONFIG 0x0092
+#define regDOMAIN22_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN22_PG_STATUS 0x0093
+#define regDOMAIN22_PG_STATUS_BASE_IDX 2
+#define regDOMAIN23_PG_CONFIG 0x0094
+#define regDOMAIN23_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN23_PG_STATUS 0x0095
+#define regDOMAIN23_PG_STATUS_BASE_IDX 2
+#define regDOMAIN24_PG_CONFIG 0x0096
+#define regDOMAIN24_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN24_PG_STATUS 0x0097
+#define regDOMAIN24_PG_STATUS_BASE_IDX 2
+#define regDOMAIN25_PG_CONFIG 0x0098
+#define regDOMAIN25_PG_CONFIG_BASE_IDX 2
+#define regDOMAIN25_PG_STATUS 0x0099
+#define regDOMAIN25_PG_STATUS_BASE_IDX 2
+#define regDCPG_INTERRUPT_STATUS 0x009a
+#define regDCPG_INTERRUPT_STATUS_BASE_IDX 2
+#define regDCPG_INTERRUPT_STATUS_2 0x009b
+#define regDCPG_INTERRUPT_STATUS_2_BASE_IDX 2
+#define regDCPG_INTERRUPT_STATUS_3 0x009c
+#define regDCPG_INTERRUPT_STATUS_3_BASE_IDX 2
+#define regDCPG_INTERRUPT_CONTROL_1 0x009d
+#define regDCPG_INTERRUPT_CONTROL_1_BASE_IDX 2
+#define regDCPG_INTERRUPT_CONTROL_2 0x009e
+#define regDCPG_INTERRUPT_CONTROL_2_BASE_IDX 2
+#define regDCPG_INTERRUPT_CONTROL_3 0x009f
+#define regDCPG_INTERRUPT_CONTROL_3_BASE_IDX 2
+#define regDC_IP_REQUEST_CNTL 0x00a0
+#define regDC_IP_REQUEST_CNTL_BASE_IDX 2
+#define regLONO_MEM_PWR_REQ_CNTL 0x00a4
+#define regLONO_MEM_PWR_REQ_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dmu_dmcub_dispdec
+// base address: 0x0
+#define regDMCUB_REGION0_OFFSET 0x018e
+#define regDMCUB_REGION0_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION0_OFFSET_HIGH 0x018f
+#define regDMCUB_REGION0_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION1_OFFSET 0x0190
+#define regDMCUB_REGION1_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION1_OFFSET_HIGH 0x0191
+#define regDMCUB_REGION1_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION2_OFFSET 0x0192
+#define regDMCUB_REGION2_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION2_OFFSET_HIGH 0x0193
+#define regDMCUB_REGION2_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION4_OFFSET 0x0196
+#define regDMCUB_REGION4_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION4_OFFSET_HIGH 0x0197
+#define regDMCUB_REGION4_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION5_OFFSET 0x0198
+#define regDMCUB_REGION5_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION5_OFFSET_HIGH 0x0199
+#define regDMCUB_REGION5_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION6_OFFSET 0x019a
+#define regDMCUB_REGION6_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION6_OFFSET_HIGH 0x019b
+#define regDMCUB_REGION6_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION7_OFFSET 0x019c
+#define regDMCUB_REGION7_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION7_OFFSET_HIGH 0x019d
+#define regDMCUB_REGION7_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION0_TOP_ADDRESS 0x019e
+#define regDMCUB_REGION0_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION1_TOP_ADDRESS 0x019f
+#define regDMCUB_REGION1_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION2_TOP_ADDRESS 0x01a0
+#define regDMCUB_REGION2_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION4_TOP_ADDRESS 0x01a1
+#define regDMCUB_REGION4_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION5_TOP_ADDRESS 0x01a2
+#define regDMCUB_REGION5_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION6_TOP_ADDRESS 0x01a3
+#define regDMCUB_REGION6_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION7_TOP_ADDRESS 0x01a4
+#define regDMCUB_REGION7_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW0_BASE_ADDRESS 0x01a5
+#define regDMCUB_REGION3_CW0_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW1_BASE_ADDRESS 0x01a6
+#define regDMCUB_REGION3_CW1_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW2_BASE_ADDRESS 0x01a7
+#define regDMCUB_REGION3_CW2_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW3_BASE_ADDRESS 0x01a8
+#define regDMCUB_REGION3_CW3_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW4_BASE_ADDRESS 0x01a9
+#define regDMCUB_REGION3_CW4_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW5_BASE_ADDRESS 0x01aa
+#define regDMCUB_REGION3_CW5_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW6_BASE_ADDRESS 0x01ab
+#define regDMCUB_REGION3_CW6_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW7_BASE_ADDRESS 0x01ac
+#define regDMCUB_REGION3_CW7_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW0_TOP_ADDRESS 0x01ad
+#define regDMCUB_REGION3_CW0_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW1_TOP_ADDRESS 0x01ae
+#define regDMCUB_REGION3_CW1_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW2_TOP_ADDRESS 0x01af
+#define regDMCUB_REGION3_CW2_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW3_TOP_ADDRESS 0x01b0
+#define regDMCUB_REGION3_CW3_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW4_TOP_ADDRESS 0x01b1
+#define regDMCUB_REGION3_CW4_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW5_TOP_ADDRESS 0x01b2
+#define regDMCUB_REGION3_CW5_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW6_TOP_ADDRESS 0x01b3
+#define regDMCUB_REGION3_CW6_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW7_TOP_ADDRESS 0x01b4
+#define regDMCUB_REGION3_CW7_TOP_ADDRESS_BASE_IDX 2
+#define regDMCUB_REGION3_CW0_OFFSET 0x01b5
+#define regDMCUB_REGION3_CW0_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW0_OFFSET_HIGH 0x01b6
+#define regDMCUB_REGION3_CW0_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW1_OFFSET 0x01b7
+#define regDMCUB_REGION3_CW1_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW1_OFFSET_HIGH 0x01b8
+#define regDMCUB_REGION3_CW1_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW2_OFFSET 0x01b9
+#define regDMCUB_REGION3_CW2_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW2_OFFSET_HIGH 0x01ba
+#define regDMCUB_REGION3_CW2_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW3_OFFSET 0x01bb
+#define regDMCUB_REGION3_CW3_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW3_OFFSET_HIGH 0x01bc
+#define regDMCUB_REGION3_CW3_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW4_OFFSET 0x01bd
+#define regDMCUB_REGION3_CW4_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW4_OFFSET_HIGH 0x01be
+#define regDMCUB_REGION3_CW4_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW5_OFFSET 0x01bf
+#define regDMCUB_REGION3_CW5_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW5_OFFSET_HIGH 0x01c0
+#define regDMCUB_REGION3_CW5_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW6_OFFSET 0x01c1
+#define regDMCUB_REGION3_CW6_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW6_OFFSET_HIGH 0x01c2
+#define regDMCUB_REGION3_CW6_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_REGION3_CW7_OFFSET 0x01c3
+#define regDMCUB_REGION3_CW7_OFFSET_BASE_IDX 2
+#define regDMCUB_REGION3_CW7_OFFSET_HIGH 0x01c4
+#define regDMCUB_REGION3_CW7_OFFSET_HIGH_BASE_IDX 2
+#define regDMCUB_INTERRUPT_ENABLE 0x01c5
+#define regDMCUB_INTERRUPT_ENABLE_BASE_IDX 2
+#define regDMCUB_INTERRUPT_ACK 0x01c6
+#define regDMCUB_INTERRUPT_ACK_BASE_IDX 2
+#define regDMCUB_INTERRUPT_STATUS 0x01c7
+#define regDMCUB_INTERRUPT_STATUS_BASE_IDX 2
+#define regDMCUB_INTERRUPT_TYPE 0x01c8
+#define regDMCUB_INTERRUPT_TYPE_BASE_IDX 2
+#define regDMCUB_EXT_INTERRUPT_STATUS 0x01c9
+#define regDMCUB_EXT_INTERRUPT_STATUS_BASE_IDX 2
+#define regDMCUB_EXT_INTERRUPT_CTXID 0x01ca
+#define regDMCUB_EXT_INTERRUPT_CTXID_BASE_IDX 2
+#define regDMCUB_EXT_INTERRUPT_ACK 0x01cb
+#define regDMCUB_EXT_INTERRUPT_ACK_BASE_IDX 2
+#define regDMCUB_INST_FETCH_FAULT_ADDR 0x01cc
+#define regDMCUB_INST_FETCH_FAULT_ADDR_BASE_IDX 2
+#define regDMCUB_DATA_WRITE_FAULT_ADDR 0x01cd
+#define regDMCUB_DATA_WRITE_FAULT_ADDR_BASE_IDX 2
+#define regDMCUB_SEC_CNTL 0x01ce
+#define regDMCUB_SEC_CNTL_BASE_IDX 2
+#define regDMCUB_MEM_CNTL 0x01cf
+#define regDMCUB_MEM_CNTL_BASE_IDX 2
+#define regDMCUB_INBOX0_BASE_ADDRESS 0x01d0
+#define regDMCUB_INBOX0_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_INBOX0_SIZE 0x01d1
+#define regDMCUB_INBOX0_SIZE_BASE_IDX 2
+#define regDMCUB_INBOX0_WPTR 0x01d2
+#define regDMCUB_INBOX0_WPTR_BASE_IDX 2
+#define regDMCUB_INBOX0_RPTR 0x01d3
+#define regDMCUB_INBOX0_RPTR_BASE_IDX 2
+#define regDMCUB_INBOX1_BASE_ADDRESS 0x01d4
+#define regDMCUB_INBOX1_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_INBOX1_SIZE 0x01d5
+#define regDMCUB_INBOX1_SIZE_BASE_IDX 2
+#define regDMCUB_INBOX1_WPTR 0x01d6
+#define regDMCUB_INBOX1_WPTR_BASE_IDX 2
+#define regDMCUB_INBOX1_RPTR 0x01d7
+#define regDMCUB_INBOX1_RPTR_BASE_IDX 2
+#define regDMCUB_OUTBOX0_BASE_ADDRESS 0x01d8
+#define regDMCUB_OUTBOX0_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_OUTBOX0_SIZE 0x01d9
+#define regDMCUB_OUTBOX0_SIZE_BASE_IDX 2
+#define regDMCUB_OUTBOX0_WPTR 0x01da
+#define regDMCUB_OUTBOX0_WPTR_BASE_IDX 2
+#define regDMCUB_OUTBOX0_RPTR 0x01db
+#define regDMCUB_OUTBOX0_RPTR_BASE_IDX 2
+#define regDMCUB_OUTBOX1_BASE_ADDRESS 0x01dc
+#define regDMCUB_OUTBOX1_BASE_ADDRESS_BASE_IDX 2
+#define regDMCUB_OUTBOX1_SIZE 0x01dd
+#define regDMCUB_OUTBOX1_SIZE_BASE_IDX 2
+#define regDMCUB_OUTBOX1_WPTR 0x01de
+#define regDMCUB_OUTBOX1_WPTR_BASE_IDX 2
+#define regDMCUB_OUTBOX1_RPTR 0x01df
+#define regDMCUB_OUTBOX1_RPTR_BASE_IDX 2
+#define regDMCUB_TIMER_TRIGGER0 0x01e0
+#define regDMCUB_TIMER_TRIGGER0_BASE_IDX 2
+#define regDMCUB_TIMER_TRIGGER1 0x01e1
+#define regDMCUB_TIMER_TRIGGER1_BASE_IDX 2
+#define regDMCUB_TIMER_WINDOW 0x01e2
+#define regDMCUB_TIMER_WINDOW_BASE_IDX 2
+#define regDMCUB_SCRATCH0 0x01e3
+#define regDMCUB_SCRATCH0_BASE_IDX 2
+#define regDMCUB_SCRATCH1 0x01e4
+#define regDMCUB_SCRATCH1_BASE_IDX 2
+#define regDMCUB_SCRATCH2 0x01e5
+#define regDMCUB_SCRATCH2_BASE_IDX 2
+#define regDMCUB_SCRATCH3 0x01e6
+#define regDMCUB_SCRATCH3_BASE_IDX 2
+#define regDMCUB_SCRATCH4 0x01e7
+#define regDMCUB_SCRATCH4_BASE_IDX 2
+#define regDMCUB_SCRATCH5 0x01e8
+#define regDMCUB_SCRATCH5_BASE_IDX 2
+#define regDMCUB_SCRATCH6 0x01e9
+#define regDMCUB_SCRATCH6_BASE_IDX 2
+#define regDMCUB_SCRATCH7 0x01ea
+#define regDMCUB_SCRATCH7_BASE_IDX 2
+#define regDMCUB_SCRATCH8 0x01eb
+#define regDMCUB_SCRATCH8_BASE_IDX 2
+#define regDMCUB_SCRATCH9 0x01ec
+#define regDMCUB_SCRATCH9_BASE_IDX 2
+#define regDMCUB_SCRATCH10 0x01ed
+#define regDMCUB_SCRATCH10_BASE_IDX 2
+#define regDMCUB_SCRATCH11 0x01ee
+#define regDMCUB_SCRATCH11_BASE_IDX 2
+#define regDMCUB_SCRATCH12 0x01ef
+#define regDMCUB_SCRATCH12_BASE_IDX 2
+#define regDMCUB_SCRATCH13 0x01f0
+#define regDMCUB_SCRATCH13_BASE_IDX 2
+#define regDMCUB_SCRATCH14 0x01f1
+#define regDMCUB_SCRATCH14_BASE_IDX 2
+#define regDMCUB_SCRATCH15 0x01f2
+#define regDMCUB_SCRATCH15_BASE_IDX 2
+#define regDMCUB_SCRATCH16 0x01f3
+#define regDMCUB_SCRATCH16_BASE_IDX 2
+#define regDMCUB_SCRATCH17 0x01f4
+#define regDMCUB_SCRATCH17_BASE_IDX 2
+#define regDMCUB_SCRATCH18 0x01f5
+#define regDMCUB_SCRATCH18_BASE_IDX 2
+#define regDMCUB_CNTL 0x01f6
+#define regDMCUB_CNTL_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN0 0x01f7
+#define regDMCUB_GPINT_DATAIN0_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN1 0x01f8
+#define regDMCUB_GPINT_DATAIN1_BASE_IDX 2
+#define regDMCUB_GPINT_DATAOUT 0x01f9
+#define regDMCUB_GPINT_DATAOUT_BASE_IDX 2
+#define regDMCUB_UNDEFINED_ADDRESS_FAULT_ADDR 0x01fa
+#define regDMCUB_UNDEFINED_ADDRESS_FAULT_ADDR_BASE_IDX 2
+#define regDMCUB_LS_WAKE_INT_ENABLE 0x01fb
+#define regDMCUB_LS_WAKE_INT_ENABLE_BASE_IDX 2
+#define regDMCUB_MEM_PWR_CNTL 0x01fc
+#define regDMCUB_MEM_PWR_CNTL_BASE_IDX 2
+#define regDMCUB_TIMER_CURRENT 0x01fd
+#define regDMCUB_TIMER_CURRENT_BASE_IDX 2
+#define regDMCUB_PROC_ID 0x01ff
+#define regDMCUB_PROC_ID_BASE_IDX 2
+#define regDMCUB_CNTL2 0x0200
+#define regDMCUB_CNTL2_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN2 0x0215
+#define regDMCUB_GPINT_DATAIN2_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN3 0x0216
+#define regDMCUB_GPINT_DATAIN3_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN4 0x0217
+#define regDMCUB_GPINT_DATAIN4_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN5 0x0218
+#define regDMCUB_GPINT_DATAIN5_BASE_IDX 2
+#define regDMCUB_GPINT_DATAIN6 0x0219
+#define regDMCUB_GPINT_DATAIN6_BASE_IDX 2
+#define regDMCUB_REGION3_TMR_AXI_SPACE 0x021a
+#define regDMCUB_REGION3_TMR_AXI_SPACE_BASE_IDX 2
+#define regDMCUB_SCRATCH19 0x022e
+#define regDMCUB_SCRATCH19_BASE_IDX 2
+#define regDMCUB_SCRATCH20 0x022f
+#define regDMCUB_SCRATCH20_BASE_IDX 2
+#define regDMCUB_SCRATCH21 0x0230
+#define regDMCUB_SCRATCH21_BASE_IDX 2
+#define regDMCUB_SCRATCH22 0x0231
+#define regDMCUB_SCRATCH22_BASE_IDX 2
+#define regDMCUB_SCRATCH23 0x0232
+#define regDMCUB_SCRATCH23_BASE_IDX 2
+
+
+// addressBlock: dce_dc_wb0_dispdec_dwb_top_dispdec
+// base address: 0x0
+#define regDWB_ENABLE_CLK_CTRL 0x3228
+#define regDWB_ENABLE_CLK_CTRL_BASE_IDX 2
+#define regDWB_MEM_PWR_CTRL 0x3229
+#define regDWB_MEM_PWR_CTRL_BASE_IDX 2
+#define regFC_MODE_CTRL 0x322a
+#define regFC_MODE_CTRL_BASE_IDX 2
+#define regFC_FLOW_CTRL 0x322b
+#define regFC_FLOW_CTRL_BASE_IDX 2
+#define regFC_WINDOW_START 0x322c
+#define regFC_WINDOW_START_BASE_IDX 2
+#define regFC_WINDOW_SIZE 0x322d
+#define regFC_WINDOW_SIZE_BASE_IDX 2
+#define regFC_SOURCE_SIZE 0x322e
+#define regFC_SOURCE_SIZE_BASE_IDX 2
+#define regDWB_UPDATE_CTRL 0x322f
+#define regDWB_UPDATE_CTRL_BASE_IDX 2
+#define regDWB_CRC_CTRL 0x3230
+#define regDWB_CRC_CTRL_BASE_IDX 2
+#define regDWB_CRC_MASK_R_G 0x3231
+#define regDWB_CRC_MASK_R_G_BASE_IDX 2
+#define regDWB_CRC_MASK_B_A 0x3232
+#define regDWB_CRC_MASK_B_A_BASE_IDX 2
+#define regDWB_CRC_VAL_R_G 0x3233
+#define regDWB_CRC_VAL_R_G_BASE_IDX 2
+#define regDWB_CRC_VAL_B_A 0x3234
+#define regDWB_CRC_VAL_B_A_BASE_IDX 2
+#define regDWB_OUT_CTRL 0x3235
+#define regDWB_OUT_CTRL_BASE_IDX 2
+#define regDWB_MMHUBBUB_BACKPRESSURE_CNT_EN 0x3236
+#define regDWB_MMHUBBUB_BACKPRESSURE_CNT_EN_BASE_IDX 2
+#define regDWB_MMHUBBUB_BACKPRESSURE_CNT 0x3237
+#define regDWB_MMHUBBUB_BACKPRESSURE_CNT_BASE_IDX 2
+#define regDWB_HOST_READ_CONTROL 0x3238
+#define regDWB_HOST_READ_CONTROL_BASE_IDX 2
+#define regDWB_OVERFLOW_STATUS 0x3239
+#define regDWB_OVERFLOW_STATUS_BASE_IDX 2
+#define regDWB_OVERFLOW_COUNTER 0x323a
+#define regDWB_OVERFLOW_COUNTER_BASE_IDX 2
+#define regDWB_SOFT_RESET 0x323b
+#define regDWB_SOFT_RESET_BASE_IDX 2
+
+
+// addressBlock: dce_dc_wb0_dispdec_dwbcp_dispdec
+// base address: 0x0
+#define regDWB_HDR_MULT_COEF 0x3294
+#define regDWB_HDR_MULT_COEF_BASE_IDX 2
+#define regDWB_GAMUT_REMAP_MODE 0x3295
+#define regDWB_GAMUT_REMAP_MODE_BASE_IDX 2
+#define regDWB_GAMUT_REMAP_COEF_FORMAT 0x3296
+#define regDWB_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C11_C12 0x3297
+#define regDWB_GAMUT_REMAPA_C11_C12_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C13_C14 0x3298
+#define regDWB_GAMUT_REMAPA_C13_C14_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C21_C22 0x3299
+#define regDWB_GAMUT_REMAPA_C21_C22_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C23_C24 0x329a
+#define regDWB_GAMUT_REMAPA_C23_C24_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C31_C32 0x329b
+#define regDWB_GAMUT_REMAPA_C31_C32_BASE_IDX 2
+#define regDWB_GAMUT_REMAPA_C33_C34 0x329c
+#define regDWB_GAMUT_REMAPA_C33_C34_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C11_C12 0x329d
+#define regDWB_GAMUT_REMAPB_C11_C12_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C13_C14 0x329e
+#define regDWB_GAMUT_REMAPB_C13_C14_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C21_C22 0x329f
+#define regDWB_GAMUT_REMAPB_C21_C22_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C23_C24 0x32a0
+#define regDWB_GAMUT_REMAPB_C23_C24_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C31_C32 0x32a1
+#define regDWB_GAMUT_REMAPB_C31_C32_BASE_IDX 2
+#define regDWB_GAMUT_REMAPB_C33_C34 0x32a2
+#define regDWB_GAMUT_REMAPB_C33_C34_BASE_IDX 2
+#define regDWB_OGAM_CONTROL 0x32a3
+#define regDWB_OGAM_CONTROL_BASE_IDX 2
+#define regDWB_OGAM_LUT_INDEX 0x32a4
+#define regDWB_OGAM_LUT_INDEX_BASE_IDX 2
+#define regDWB_OGAM_LUT_DATA 0x32a5
+#define regDWB_OGAM_LUT_DATA_BASE_IDX 2
+#define regDWB_OGAM_LUT_CONTROL 0x32a6
+#define regDWB_OGAM_LUT_CONTROL_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_CNTL_B 0x32a7
+#define regDWB_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_CNTL_G 0x32a8
+#define regDWB_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_CNTL_R 0x32a9
+#define regDWB_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_B 0x32aa
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_B 0x32ab
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_G 0x32ac
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_G 0x32ad
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_R 0x32ae
+#define regDWB_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_R 0x32af
+#define regDWB_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL1_B 0x32b0
+#define regDWB_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL2_B 0x32b1
+#define regDWB_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL1_G 0x32b2
+#define regDWB_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL2_G 0x32b3
+#define regDWB_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL1_R 0x32b4
+#define regDWB_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_END_CNTL2_R 0x32b5
+#define regDWB_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_OFFSET_B 0x32b6
+#define regDWB_OGAM_RAMA_OFFSET_B_BASE_IDX 2
+#define regDWB_OGAM_RAMA_OFFSET_G 0x32b7
+#define regDWB_OGAM_RAMA_OFFSET_G_BASE_IDX 2
+#define regDWB_OGAM_RAMA_OFFSET_R 0x32b8
+#define regDWB_OGAM_RAMA_OFFSET_R_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_0_1 0x32b9
+#define regDWB_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_2_3 0x32ba
+#define regDWB_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_4_5 0x32bb
+#define regDWB_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_6_7 0x32bc
+#define regDWB_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_8_9 0x32bd
+#define regDWB_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_10_11 0x32be
+#define regDWB_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_12_13 0x32bf
+#define regDWB_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_14_15 0x32c0
+#define regDWB_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_16_17 0x32c1
+#define regDWB_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_18_19 0x32c2
+#define regDWB_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_20_21 0x32c3
+#define regDWB_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_22_23 0x32c4
+#define regDWB_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_24_25 0x32c5
+#define regDWB_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_26_27 0x32c6
+#define regDWB_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_28_29 0x32c7
+#define regDWB_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_30_31 0x32c8
+#define regDWB_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define regDWB_OGAM_RAMA_REGION_32_33 0x32c9
+#define regDWB_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_CNTL_B 0x32ca
+#define regDWB_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_CNTL_G 0x32cb
+#define regDWB_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_CNTL_R 0x32cc
+#define regDWB_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_B 0x32cd
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_B 0x32ce
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_G 0x32cf
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_G 0x32d0
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_R 0x32d1
+#define regDWB_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_R 0x32d2
+#define regDWB_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL1_B 0x32d3
+#define regDWB_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL2_B 0x32d4
+#define regDWB_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL1_G 0x32d5
+#define regDWB_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL2_G 0x32d6
+#define regDWB_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL1_R 0x32d7
+#define regDWB_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_END_CNTL2_R 0x32d8
+#define regDWB_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_OFFSET_B 0x32d9
+#define regDWB_OGAM_RAMB_OFFSET_B_BASE_IDX 2
+#define regDWB_OGAM_RAMB_OFFSET_G 0x32da
+#define regDWB_OGAM_RAMB_OFFSET_G_BASE_IDX 2
+#define regDWB_OGAM_RAMB_OFFSET_R 0x32db
+#define regDWB_OGAM_RAMB_OFFSET_R_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_0_1 0x32dc
+#define regDWB_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_2_3 0x32dd
+#define regDWB_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_4_5 0x32de
+#define regDWB_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_6_7 0x32df
+#define regDWB_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_8_9 0x32e0
+#define regDWB_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_10_11 0x32e1
+#define regDWB_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_12_13 0x32e2
+#define regDWB_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_14_15 0x32e3
+#define regDWB_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_16_17 0x32e4
+#define regDWB_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_18_19 0x32e5
+#define regDWB_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_20_21 0x32e6
+#define regDWB_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_22_23 0x32e7
+#define regDWB_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_24_25 0x32e8
+#define regDWB_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_26_27 0x32e9
+#define regDWB_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_28_29 0x32ea
+#define regDWB_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_30_31 0x32eb
+#define regDWB_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define regDWB_OGAM_RAMB_REGION_32_33 0x32ec
+#define regDWB_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_wb0_dispdec_wb_dcperfmon_dc_perfmon_dispdec
+// base address: 0xca20
+#define regDC_PERFMON3_PERFCOUNTER_CNTL 0x3288
+#define regDC_PERFMON3_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON3_PERFCOUNTER_CNTL2 0x3289
+#define regDC_PERFMON3_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON3_PERFCOUNTER_STATE 0x328a
+#define regDC_PERFMON3_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_CNTL 0x328b
+#define regDC_PERFMON3_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_CNTL2 0x328c
+#define regDC_PERFMON3_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_CVALUE_INT_MISC 0x328d
+#define regDC_PERFMON3_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_CVALUE_LOW 0x328e
+#define regDC_PERFMON3_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_HI 0x328f
+#define regDC_PERFMON3_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON3_PERFMON_LOW 0x3290
+#define regDC_PERFMON3_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mcif_wb0_dispdec
+// base address: 0x0
+#define regMCIF_WB_BUFMGR_SW_CONTROL 0x0272
+#define regMCIF_WB_BUFMGR_SW_CONTROL_BASE_IDX 2
+#define regMCIF_WB_BUFMGR_STATUS 0x0274
+#define regMCIF_WB_BUFMGR_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_PITCH 0x0275
+#define regMCIF_WB_BUF_PITCH_BASE_IDX 2
+#define regMCIF_WB_BUF_1_STATUS 0x0276
+#define regMCIF_WB_BUF_1_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_1_STATUS2 0x0277
+#define regMCIF_WB_BUF_1_STATUS2_BASE_IDX 2
+#define regMCIF_WB_BUF_2_STATUS 0x0278
+#define regMCIF_WB_BUF_2_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_2_STATUS2 0x0279
+#define regMCIF_WB_BUF_2_STATUS2_BASE_IDX 2
+#define regMCIF_WB_BUF_3_STATUS 0x027a
+#define regMCIF_WB_BUF_3_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_3_STATUS2 0x027b
+#define regMCIF_WB_BUF_3_STATUS2_BASE_IDX 2
+#define regMCIF_WB_BUF_4_STATUS 0x027c
+#define regMCIF_WB_BUF_4_STATUS_BASE_IDX 2
+#define regMCIF_WB_BUF_4_STATUS2 0x027d
+#define regMCIF_WB_BUF_4_STATUS2_BASE_IDX 2
+#define regMCIF_WB_ARBITRATION_CONTROL 0x027e
+#define regMCIF_WB_ARBITRATION_CONTROL_BASE_IDX 2
+#define regMCIF_WB_SCLK_CHANGE 0x027f
+#define regMCIF_WB_SCLK_CHANGE_BASE_IDX 2
+#define regMCIF_WB_BUF_1_ADDR_Y 0x0282
+#define regMCIF_WB_BUF_1_ADDR_Y_BASE_IDX 2
+#define regMCIF_WB_BUF_1_ADDR_C 0x0284
+#define regMCIF_WB_BUF_1_ADDR_C_BASE_IDX 2
+#define regMCIF_WB_BUF_2_ADDR_Y 0x0286
+#define regMCIF_WB_BUF_2_ADDR_Y_BASE_IDX 2
+#define regMCIF_WB_BUF_2_ADDR_C 0x0288
+#define regMCIF_WB_BUF_2_ADDR_C_BASE_IDX 2
+#define regMCIF_WB_BUF_3_ADDR_Y 0x028a
+#define regMCIF_WB_BUF_3_ADDR_Y_BASE_IDX 2
+#define regMCIF_WB_BUF_3_ADDR_C 0x028c
+#define regMCIF_WB_BUF_3_ADDR_C_BASE_IDX 2
+#define regMCIF_WB_BUF_4_ADDR_Y 0x028e
+#define regMCIF_WB_BUF_4_ADDR_Y_BASE_IDX 2
+#define regMCIF_WB_BUF_4_ADDR_C 0x0290
+#define regMCIF_WB_BUF_4_ADDR_C_BASE_IDX 2
+#define regMCIF_WB_BUFMGR_VCE_CONTROL 0x0292
+#define regMCIF_WB_BUFMGR_VCE_CONTROL_BASE_IDX 2
+#define regMCIF_WB_NB_PSTATE_CONTROL 0x0293
+#define regMCIF_WB_NB_PSTATE_CONTROL_BASE_IDX 2
+#define regMCIF_WB_CLOCK_GATER_CONTROL 0x0294
+#define regMCIF_WB_CLOCK_GATER_CONTROL_BASE_IDX 2
+#define regMCIF_WB_SELF_REFRESH_CONTROL 0x0296
+#define regMCIF_WB_SELF_REFRESH_CONTROL_BASE_IDX 2
+#define regMULTI_LEVEL_QOS_CTRL 0x0297
+#define regMULTI_LEVEL_QOS_CTRL_BASE_IDX 2
+#define regMCIF_WB_SECURITY_LEVEL 0x0298
+#define regMCIF_WB_SECURITY_LEVEL_BASE_IDX 2
+#define regMCIF_WB_BUF_LUMA_SIZE 0x0299
+#define regMCIF_WB_BUF_LUMA_SIZE_BASE_IDX 2
+#define regMCIF_WB_BUF_CHROMA_SIZE 0x029a
+#define regMCIF_WB_BUF_CHROMA_SIZE_BASE_IDX 2
+#define regMCIF_WB_BUF_1_ADDR_Y_HIGH 0x029b
+#define regMCIF_WB_BUF_1_ADDR_Y_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_1_ADDR_C_HIGH 0x029c
+#define regMCIF_WB_BUF_1_ADDR_C_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_2_ADDR_Y_HIGH 0x029d
+#define regMCIF_WB_BUF_2_ADDR_Y_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_2_ADDR_C_HIGH 0x029e
+#define regMCIF_WB_BUF_2_ADDR_C_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_3_ADDR_Y_HIGH 0x029f
+#define regMCIF_WB_BUF_3_ADDR_Y_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_3_ADDR_C_HIGH 0x02a0
+#define regMCIF_WB_BUF_3_ADDR_C_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_4_ADDR_Y_HIGH 0x02a1
+#define regMCIF_WB_BUF_4_ADDR_Y_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_4_ADDR_C_HIGH 0x02a2
+#define regMCIF_WB_BUF_4_ADDR_C_HIGH_BASE_IDX 2
+#define regMCIF_WB_BUF_1_RESOLUTION 0x02a3
+#define regMCIF_WB_BUF_1_RESOLUTION_BASE_IDX 2
+#define regMCIF_WB_BUF_2_RESOLUTION 0x02a4
+#define regMCIF_WB_BUF_2_RESOLUTION_BASE_IDX 2
+#define regMCIF_WB_BUF_3_RESOLUTION 0x02a5
+#define regMCIF_WB_BUF_3_RESOLUTION_BASE_IDX 2
+#define regMCIF_WB_BUF_4_RESOLUTION 0x02a6
+#define regMCIF_WB_BUF_4_RESOLUTION_BASE_IDX 2
+#define regMCIF_WB_PSTATE_CHANGE_DURATION_VBI 0x02a7
+#define regMCIF_WB_PSTATE_CHANGE_DURATION_VBI_BASE_IDX 2
+#define regMCIF_WB_VMID_CONTROL 0x02a8
+#define regMCIF_WB_VMID_CONTROL_BASE_IDX 2
+#define regMCIF_WB_MIN_TTO 0x02a9
+#define regMCIF_WB_MIN_TTO_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dcperfmon_dc_perfmon_dispdec
+// base address: 0xd48
+#define regDC_PERFMON4_PERFCOUNTER_CNTL 0x0352
+#define regDC_PERFMON4_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON4_PERFCOUNTER_CNTL2 0x0353
+#define regDC_PERFMON4_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON4_PERFCOUNTER_STATE 0x0354
+#define regDC_PERFMON4_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_CNTL 0x0355
+#define regDC_PERFMON4_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_CNTL2 0x0356
+#define regDC_PERFMON4_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_CVALUE_INT_MISC 0x0357
+#define regDC_PERFMON4_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_CVALUE_LOW 0x0358
+#define regDC_PERFMON4_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_HI 0x0359
+#define regDC_PERFMON4_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON4_PERFMON_LOW 0x035a
+#define regDC_PERFMON4_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dispdec
+// base address: 0x0
+#define regMCIF_WB_NB_PSTATE_LATENCY_WATERMARK 0x02aa
+#define regMCIF_WB_NB_PSTATE_LATENCY_WATERMARK_BASE_IDX 2
+#define regMCIF_WB_WATERMARK 0x02ab
+#define regMCIF_WB_WATERMARK_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_CONFIG 0x02ac
+#define regMMHUBBUB_WARMUP_CONFIG_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_CONTROL_STATUS 0x02ad
+#define regMMHUBBUB_WARMUP_CONTROL_STATUS_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_BASE_ADDR_LOW 0x02ae
+#define regMMHUBBUB_WARMUP_BASE_ADDR_LOW_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_BASE_ADDR_HIGH 0x02af
+#define regMMHUBBUB_WARMUP_BASE_ADDR_HIGH_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_ADDR_REGION 0x02b0
+#define regMMHUBBUB_WARMUP_ADDR_REGION_BASE_IDX 2
+#define regMMHUBBUB_MIN_TTO 0x02b1
+#define regMMHUBBUB_MIN_TTO_BASE_IDX 2
+#define regMMHUBBUB_CTRL 0x0333
+#define regMMHUBBUB_CTRL_BASE_IDX 2
+#define regWBIF_SMU_WM_CONTROL 0x0334
+#define regWBIF_SMU_WM_CONTROL_BASE_IDX 2
+#define regWBIF0_MISC_CTRL 0x0335
+#define regWBIF0_MISC_CTRL_BASE_IDX 2
+#define regWBIF0_PHASE0_OUTSTANDING_COUNTER 0x0336
+#define regWBIF0_PHASE0_OUTSTANDING_COUNTER_BASE_IDX 2
+#define regWBIF0_PHASE1_OUTSTANDING_COUNTER 0x0337
+#define regWBIF0_PHASE1_OUTSTANDING_COUNTER_BASE_IDX 2
+#define regMMHUBBUB_MEM_PWR_STATUS 0x033e
+#define regMMHUBBUB_MEM_PWR_STATUS_BASE_IDX 2
+#define regMMHUBBUB_MEM_PWR_CNTL 0x033f
+#define regMMHUBBUB_MEM_PWR_CNTL_BASE_IDX 2
+#define regMMHUBBUB_CLOCK_CNTL 0x0340
+#define regMMHUBBUB_CLOCK_CNTL_BASE_IDX 2
+#define regMMHUBBUB_SOFT_RESET 0x0341
+#define regMMHUBBUB_SOFT_RESET_BASE_IDX 2
+#define regDMU_IF_ERR_STATUS 0x0345
+#define regDMU_IF_ERR_STATUS_BASE_IDX 2
+#define regMMHUBBUB_CLIENT_UNIT_ID 0x0346
+#define regMMHUBBUB_CLIENT_UNIT_ID_BASE_IDX 2
+#define regMMHUBBUB_WARMUP_VMID_CONTROL 0x0348
+#define regMMHUBBUB_WARMUP_VMID_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0controller_dispdec
+// base address: 0x0
+#define regAZALIA_CONTROLLER_CLOCK_GATING 0x03c2
+#define regAZALIA_CONTROLLER_CLOCK_GATING_BASE_IDX 2
+#define regAZALIA_AUDIO_DTO 0x03c3
+#define regAZALIA_AUDIO_DTO_BASE_IDX 2
+#define regAZALIA_AUDIO_DTO_CONTROL 0x03c4
+#define regAZALIA_AUDIO_DTO_CONTROL_BASE_IDX 2
+#define regAZALIA_SOCCLK_CONTROL 0x03c5
+#define regAZALIA_SOCCLK_CONTROL_BASE_IDX 2
+#define regAZALIA_UNDERFLOW_FILLER_SAMPLE 0x03c6
+#define regAZALIA_UNDERFLOW_FILLER_SAMPLE_BASE_IDX 2
+#define regAZALIA_DATA_DMA_CONTROL 0x03c7
+#define regAZALIA_DATA_DMA_CONTROL_BASE_IDX 2
+#define regAZALIA_BDL_DMA_CONTROL 0x03c8
+#define regAZALIA_BDL_DMA_CONTROL_BASE_IDX 2
+#define regAZALIA_RIRB_AND_DP_CONTROL 0x03c9
+#define regAZALIA_RIRB_AND_DP_CONTROL_BASE_IDX 2
+#define regAZALIA_CORB_DMA_CONTROL 0x03ca
+#define regAZALIA_CORB_DMA_CONTROL_BASE_IDX 2
+#define regAZALIA_GLOBAL_CAPABILITIES 0x03d3
+#define regAZALIA_GLOBAL_CAPABILITIES_BASE_IDX 2
+#define regAZALIA_OUTPUT_PAYLOAD_CAPABILITY 0x03d4
+#define regAZALIA_OUTPUT_PAYLOAD_CAPABILITY_BASE_IDX 2
+#define regAZALIA_OUTPUT_STREAM_ARBITER_CONTROL 0x03d5
+#define regAZALIA_OUTPUT_STREAM_ARBITER_CONTROL_BASE_IDX 2
+#define regAZALIA_INPUT_PAYLOAD_CAPABILITY 0x03d6
+#define regAZALIA_INPUT_PAYLOAD_CAPABILITY_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_CONTROL0 0x03d9
+#define regAZALIA_INPUT_CRC0_CONTROL0_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_CONTROL1 0x03da
+#define regAZALIA_INPUT_CRC0_CONTROL1_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_CONTROL2 0x03db
+#define regAZALIA_INPUT_CRC0_CONTROL2_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_CONTROL3 0x03dc
+#define regAZALIA_INPUT_CRC0_CONTROL3_BASE_IDX 2
+#define regAZALIA_INPUT_CRC0_RESULT 0x03dd
+#define regAZALIA_INPUT_CRC0_RESULT_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_CONTROL0 0x03de
+#define regAZALIA_INPUT_CRC1_CONTROL0_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_CONTROL1 0x03df
+#define regAZALIA_INPUT_CRC1_CONTROL1_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_CONTROL2 0x03e0
+#define regAZALIA_INPUT_CRC1_CONTROL2_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_CONTROL3 0x03e1
+#define regAZALIA_INPUT_CRC1_CONTROL3_BASE_IDX 2
+#define regAZALIA_INPUT_CRC1_RESULT 0x03e2
+#define regAZALIA_INPUT_CRC1_RESULT_BASE_IDX 2
+#define regAZALIA_CRC0_CONTROL0 0x03e3
+#define regAZALIA_CRC0_CONTROL0_BASE_IDX 2
+#define regAZALIA_CRC0_CONTROL1 0x03e4
+#define regAZALIA_CRC0_CONTROL1_BASE_IDX 2
+#define regAZALIA_CRC0_CONTROL2 0x03e5
+#define regAZALIA_CRC0_CONTROL2_BASE_IDX 2
+#define regAZALIA_CRC0_CONTROL3 0x03e6
+#define regAZALIA_CRC0_CONTROL3_BASE_IDX 2
+#define regAZALIA_CRC0_RESULT 0x03e7
+#define regAZALIA_CRC0_RESULT_BASE_IDX 2
+#define regAZALIA_CRC1_CONTROL0 0x03e8
+#define regAZALIA_CRC1_CONTROL0_BASE_IDX 2
+#define regAZALIA_CRC1_CONTROL1 0x03e9
+#define regAZALIA_CRC1_CONTROL1_BASE_IDX 2
+#define regAZALIA_CRC1_CONTROL2 0x03ea
+#define regAZALIA_CRC1_CONTROL2_BASE_IDX 2
+#define regAZALIA_CRC1_CONTROL3 0x03eb
+#define regAZALIA_CRC1_CONTROL3_BASE_IDX 2
+#define regAZALIA_CRC1_RESULT 0x03ec
+#define regAZALIA_CRC1_RESULT_BASE_IDX 2
+#define regAZALIA_MEM_PWR_CTRL 0x03ee
+#define regAZALIA_MEM_PWR_CTRL_BASE_IDX 2
+#define regAZALIA_MEM_PWR_STATUS 0x03ef
+#define regAZALIA_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0root_dispdec
+// base address: 0x0
+#define regAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x0406
+#define regAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_BASE_IDX 2
+#define regAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID 0x0407
+#define regAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID_BASE_IDX 2
+#define regAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL 0x0408
+#define regAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL_BASE_IDX 2
+#define regAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL 0x0409
+#define regAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x040a
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x040b
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x040c
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x040d
+#define regAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE 0x040e
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET 0x040f
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x0410
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_BASE_IDX 2
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x0411
+#define regAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION_BASE_IDX 2
+#define regCC_RCU_DC_AUDIO_PORT_CONNECTIVITY 0x0412
+#define regCC_RCU_DC_AUDIO_PORT_CONNECTIVITY_BASE_IDX 2
+#define regCC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY 0x0413
+#define regCC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET0 0x0415
+#define regAZALIA_F0_GTC_GROUP_OFFSET0_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET1 0x0416
+#define regAZALIA_F0_GTC_GROUP_OFFSET1_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET2 0x0417
+#define regAZALIA_F0_GTC_GROUP_OFFSET2_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET3 0x0418
+#define regAZALIA_F0_GTC_GROUP_OFFSET3_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET4 0x0419
+#define regAZALIA_F0_GTC_GROUP_OFFSET4_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET5 0x041a
+#define regAZALIA_F0_GTC_GROUP_OFFSET5_BASE_IDX 2
+#define regAZALIA_F0_GTC_GROUP_OFFSET6 0x041b
+#define regAZALIA_F0_GTC_GROUP_OFFSET6_BASE_IDX 2
+#define regREG_DC_AUDIO_PORT_CONNECTIVITY 0x041c
+#define regREG_DC_AUDIO_PORT_CONNECTIVITY_BASE_IDX 2
+#define regREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY 0x041d
+#define regREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_az_misc_dispdec
+// base address: 0x0
+#define regAZ_CLOCK_CNTL 0x0372
+#define regAZ_CLOCK_CNTL_BASE_IDX 2
+#define regAZ_MEM_GLOBAL_PWR_REQ_CNTL 0x0373
+#define regAZ_MEM_GLOBAL_PWR_REQ_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_az_dcperfmon_dc_perfmon_dispdec
+// base address: 0xde8
+#define regDC_PERFMON5_PERFCOUNTER_CNTL 0x037a
+#define regDC_PERFMON5_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON5_PERFCOUNTER_CNTL2 0x037b
+#define regDC_PERFMON5_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON5_PERFCOUNTER_STATE 0x037c
+#define regDC_PERFMON5_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_CNTL 0x037d
+#define regDC_PERFMON5_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_CNTL2 0x037e
+#define regDC_PERFMON5_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_CVALUE_INT_MISC 0x037f
+#define regDC_PERFMON5_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_CVALUE_LOW 0x0380
+#define regDC_PERFMON5_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_HI 0x0381
+#define regDC_PERFMON5_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON5_PERFMON_LOW 0x0382
+#define regDC_PERFMON5_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream0_dispdec
+// base address: 0x0
+#define regAZF0STREAM0_AZALIA_STREAM_INDEX 0x035e
+#define regAZF0STREAM0_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM0_AZALIA_STREAM_DATA 0x035f
+#define regAZF0STREAM0_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream1_dispdec
+// base address: 0x8
+#define regAZF0STREAM1_AZALIA_STREAM_INDEX 0x0360
+#define regAZF0STREAM1_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM1_AZALIA_STREAM_DATA 0x0361
+#define regAZF0STREAM1_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream2_dispdec
+// base address: 0x10
+#define regAZF0STREAM2_AZALIA_STREAM_INDEX 0x0362
+#define regAZF0STREAM2_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM2_AZALIA_STREAM_DATA 0x0363
+#define regAZF0STREAM2_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream3_dispdec
+// base address: 0x18
+#define regAZF0STREAM3_AZALIA_STREAM_INDEX 0x0364
+#define regAZF0STREAM3_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM3_AZALIA_STREAM_DATA 0x0365
+#define regAZF0STREAM3_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream4_dispdec
+// base address: 0x20
+#define regAZF0STREAM4_AZALIA_STREAM_INDEX 0x0366
+#define regAZF0STREAM4_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM4_AZALIA_STREAM_DATA 0x0367
+#define regAZF0STREAM4_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream5_dispdec
+// base address: 0x28
+#define regAZF0STREAM5_AZALIA_STREAM_INDEX 0x0368
+#define regAZF0STREAM5_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM5_AZALIA_STREAM_DATA 0x0369
+#define regAZF0STREAM5_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream6_dispdec
+// base address: 0x30
+#define regAZF0STREAM6_AZALIA_STREAM_INDEX 0x036a
+#define regAZF0STREAM6_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM6_AZALIA_STREAM_DATA 0x036b
+#define regAZF0STREAM6_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream7_dispdec
+// base address: 0x38
+#define regAZF0STREAM7_AZALIA_STREAM_INDEX 0x036c
+#define regAZF0STREAM7_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM7_AZALIA_STREAM_DATA 0x036d
+#define regAZF0STREAM7_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream8_dispdec
+// base address: 0x320
+#define regAZF0STREAM8_AZALIA_STREAM_INDEX 0x0426
+#define regAZF0STREAM8_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM8_AZALIA_STREAM_DATA 0x0427
+#define regAZF0STREAM8_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream9_dispdec
+// base address: 0x328
+#define regAZF0STREAM9_AZALIA_STREAM_INDEX 0x0428
+#define regAZF0STREAM9_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM9_AZALIA_STREAM_DATA 0x0429
+#define regAZF0STREAM9_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream10_dispdec
+// base address: 0x330
+#define regAZF0STREAM10_AZALIA_STREAM_INDEX 0x042a
+#define regAZF0STREAM10_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM10_AZALIA_STREAM_DATA 0x042b
+#define regAZF0STREAM10_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream11_dispdec
+// base address: 0x338
+#define regAZF0STREAM11_AZALIA_STREAM_INDEX 0x042c
+#define regAZF0STREAM11_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM11_AZALIA_STREAM_DATA 0x042d
+#define regAZF0STREAM11_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream12_dispdec
+// base address: 0x340
+#define regAZF0STREAM12_AZALIA_STREAM_INDEX 0x042e
+#define regAZF0STREAM12_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM12_AZALIA_STREAM_DATA 0x042f
+#define regAZF0STREAM12_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream13_dispdec
+// base address: 0x348
+#define regAZF0STREAM13_AZALIA_STREAM_INDEX 0x0430
+#define regAZF0STREAM13_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM13_AZALIA_STREAM_DATA 0x0431
+#define regAZF0STREAM13_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream14_dispdec
+// base address: 0x350
+#define regAZF0STREAM14_AZALIA_STREAM_INDEX 0x0432
+#define regAZF0STREAM14_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM14_AZALIA_STREAM_DATA 0x0433
+#define regAZF0STREAM14_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0stream15_dispdec
+// base address: 0x358
+#define regAZF0STREAM15_AZALIA_STREAM_INDEX 0x0434
+#define regAZF0STREAM15_AZALIA_STREAM_INDEX_BASE_IDX 2
+#define regAZF0STREAM15_AZALIA_STREAM_DATA 0x0435
+#define regAZF0STREAM15_AZALIA_STREAM_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint0_dispdec
+// base address: 0x0
+#define regAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0386
+#define regAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0387
+#define regAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint1_dispdec
+// base address: 0x18
+#define regAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x038c
+#define regAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA 0x038d
+#define regAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint2_dispdec
+// base address: 0x30
+#define regAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0392
+#define regAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0393
+#define regAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint3_dispdec
+// base address: 0x48
+#define regAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0398
+#define regAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0399
+#define regAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint4_dispdec
+// base address: 0x60
+#define regAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x039e
+#define regAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA 0x039f
+#define regAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint5_dispdec
+// base address: 0x78
+#define regAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x03a4
+#define regAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA 0x03a5
+#define regAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint6_dispdec
+// base address: 0x90
+#define regAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x03aa
+#define regAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA 0x03ab
+#define regAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint7_dispdec
+// base address: 0xa8
+#define regAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x03b0
+#define regAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA 0x03b1
+#define regAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec
+// base address: 0x0
+#define regAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043a
+#define regAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043b
+#define regAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec
+// base address: 0x10
+#define regAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043e
+#define regAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043f
+#define regAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint2_dispdec
+// base address: 0x20
+#define regAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0442
+#define regAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0443
+#define regAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint3_dispdec
+// base address: 0x30
+#define regAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0446
+#define regAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0447
+#define regAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint4_dispdec
+// base address: 0x40
+#define regAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x044a
+#define regAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x044b
+#define regAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint5_dispdec
+// base address: 0x50
+#define regAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x044e
+#define regAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x044f
+#define regAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint6_dispdec
+// base address: 0x60
+#define regAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0452
+#define regAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0453
+#define regAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint7_dispdec
+// base address: 0x70
+#define regAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x0456
+#define regAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define regAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x0457
+#define regAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_dispdec
+// base address: 0x0
+#define regDCHUBBUB_ARB_DF_REQ_OUTSTAND 0x04f9
+#define regDCHUBBUB_ARB_DF_REQ_OUTSTAND_BASE_IDX 2
+#define regDCHUBBUB_ARB_SAT_LEVEL 0x04fa
+#define regDCHUBBUB_ARB_SAT_LEVEL_BASE_IDX 2
+#define regDCHUBBUB_ARB_QOS_FORCE 0x04fb
+#define regDCHUBBUB_ARB_QOS_FORCE_BASE_IDX 2
+#define regDCHUBBUB_ARB_DRAM_STATE_CNTL 0x04fc
+#define regDCHUBBUB_ARB_DRAM_STATE_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_CNTL 0x04fd
+#define regDCHUBBUB_ARB_USR_RETRAINING_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A 0x04fe
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A 0x04ff
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A 0x0500
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A 0x0501
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A 0x0502
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A 0x0503
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A 0x0504
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A 0x0505
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A 0x0506
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_A 0x0507
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_A 0x0508
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_A_BASE_IDX 2
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B 0x0509
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B 0x050a
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B 0x050b
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B 0x050c
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B 0x050d
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B 0x050e
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B 0x050f
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B 0x0510
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B 0x0511
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_B 0x0512
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_B 0x0513
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_B_BASE_IDX 2
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C 0x0514
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C 0x0515
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C 0x0516
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C 0x0517
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C 0x0518
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C 0x0519
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C 0x051a
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C 0x051b
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C 0x051c
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_C 0x051d
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_C 0x051e
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_C_BASE_IDX 2
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D 0x051f
+#define regDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D 0x0520
+#define regDCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D 0x0521
+#define regDCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D 0x0522
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D 0x0523
+#define regDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D 0x0524
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D 0x0525
+#define regDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D 0x0526
+#define regDCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D 0x0527
+#define regDCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_D 0x0528
+#define regDCHUBBUB_ARB_FRAC_URG_BW_NOM_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_D 0x0529
+#define regDCHUBBUB_ARB_FRAC_URG_BW_FLIP_D_BASE_IDX 2
+#define regDCHUBBUB_ARB_HOSTVM_CNTL 0x052a
+#define regDCHUBBUB_ARB_HOSTVM_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL 0x052b
+#define regDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_MALL_CNTL 0x052c
+#define regDCHUBBUB_ARB_MALL_CNTL_BASE_IDX 2
+#define regDCHUBBUB_ARB_TIMEOUT_ENABLE 0x052d
+#define regDCHUBBUB_ARB_TIMEOUT_ENABLE_BASE_IDX 2
+#define regDCHUBBUB_GLOBAL_TIMER_CNTL 0x052e
+#define regDCHUBBUB_GLOBAL_TIMER_CNTL_BASE_IDX 2
+#define regSURFACE_CHECK0_ADDRESS_LSB 0x052f
+#define regSURFACE_CHECK0_ADDRESS_LSB_BASE_IDX 2
+#define regSURFACE_CHECK0_ADDRESS_MSB 0x0530
+#define regSURFACE_CHECK0_ADDRESS_MSB_BASE_IDX 2
+#define regSURFACE_CHECK1_ADDRESS_LSB 0x0531
+#define regSURFACE_CHECK1_ADDRESS_LSB_BASE_IDX 2
+#define regSURFACE_CHECK1_ADDRESS_MSB 0x0532
+#define regSURFACE_CHECK1_ADDRESS_MSB_BASE_IDX 2
+#define regSURFACE_CHECK2_ADDRESS_LSB 0x0533
+#define regSURFACE_CHECK2_ADDRESS_LSB_BASE_IDX 2
+#define regSURFACE_CHECK2_ADDRESS_MSB 0x0534
+#define regSURFACE_CHECK2_ADDRESS_MSB_BASE_IDX 2
+#define regSURFACE_CHECK3_ADDRESS_LSB 0x0535
+#define regSURFACE_CHECK3_ADDRESS_LSB_BASE_IDX 2
+#define regSURFACE_CHECK3_ADDRESS_MSB 0x0536
+#define regSURFACE_CHECK3_ADDRESS_MSB_BASE_IDX 2
+#define regVTG0_CONTROL 0x0537
+#define regVTG0_CONTROL_BASE_IDX 2
+#define regVTG1_CONTROL 0x0538
+#define regVTG1_CONTROL_BASE_IDX 2
+#define regVTG2_CONTROL 0x0539
+#define regVTG2_CONTROL_BASE_IDX 2
+#define regVTG3_CONTROL 0x053a
+#define regVTG3_CONTROL_BASE_IDX 2
+#define regDCHUBBUB_SOFT_RESET 0x053b
+#define regDCHUBBUB_SOFT_RESET_BASE_IDX 2
+#define regDCHUBBUB_CLOCK_CNTL 0x053c
+#define regDCHUBBUB_CLOCK_CNTL_BASE_IDX 2
+#define regDCFCLK_CNTL 0x053d
+#define regDCFCLK_CNTL_BASE_IDX 2
+#define regDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL 0x053e
+#define regDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL_BASE_IDX 2
+#define regDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2 0x053f
+#define regDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2_BASE_IDX 2
+#define regDCHUBBUB_VLINE_SNAPSHOT 0x0540
+#define regDCHUBBUB_VLINE_SNAPSHOT_BASE_IDX 2
+#define regDCHUBBUB_CTRL_STATUS 0x0541
+#define regDCHUBBUB_CTRL_STATUS_BASE_IDX 2
+#define regDCHUBBUB_TIMEOUT_DETECTION_CTRL1 0x0547
+#define regDCHUBBUB_TIMEOUT_DETECTION_CTRL1_BASE_IDX 2
+#define regDCHUBBUB_TIMEOUT_DETECTION_CTRL2 0x0548
+#define regDCHUBBUB_TIMEOUT_DETECTION_CTRL2_BASE_IDX 2
+#define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS 0x0549
+#define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2
+#define regFMON_CTRL 0x054a
+#define regFMON_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_sdpif_dispdec
+// base address: 0x0
+#define regDCHUBBUB_SDPIF_CFG0 0x046f
+#define regDCHUBBUB_SDPIF_CFG0_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_CFG1 0x0470
+#define regDCHUBBUB_SDPIF_CFG1_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_CFG2 0x0471
+#define regDCHUBBUB_SDPIF_CFG2_BASE_IDX 2
+#define regVM_REQUEST_PHYSICAL 0x0472
+#define regVM_REQUEST_PHYSICAL_BASE_IDX 2
+#define regDCHUBBUB_FORCE_IO_STATUS_0 0x0473
+#define regDCHUBBUB_FORCE_IO_STATUS_0_BASE_IDX 2
+#define regDCHUBBUB_FORCE_IO_STATUS_1 0x0474
+#define regDCHUBBUB_FORCE_IO_STATUS_1_BASE_IDX 2
+#define regDCN_VM_FB_LOCATION_BASE 0x0475
+#define regDCN_VM_FB_LOCATION_BASE_BASE_IDX 2
+#define regDCN_VM_FB_LOCATION_TOP 0x0476
+#define regDCN_VM_FB_LOCATION_TOP_BASE_IDX 2
+#define regDCN_VM_FB_OFFSET 0x0477
+#define regDCN_VM_FB_OFFSET_BASE_IDX 2
+#define regDCN_VM_AGP_BOT 0x0478
+#define regDCN_VM_AGP_BOT_BASE_IDX 2
+#define regDCN_VM_AGP_TOP 0x0479
+#define regDCN_VM_AGP_TOP_BASE_IDX 2
+#define regDCN_VM_AGP_BASE 0x047a
+#define regDCN_VM_AGP_BASE_BASE_IDX 2
+#define regDCN_VM_LOCAL_HBM_ADDRESS_START 0x047b
+#define regDCN_VM_LOCAL_HBM_ADDRESS_START_BASE_IDX 2
+#define regDCN_VM_LOCAL_HBM_ADDRESS_END 0x047c
+#define regDCN_VM_LOCAL_HBM_ADDRESS_END_BASE_IDX 2
+#define regDCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL 0x047d
+#define regDCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_SEC_LVL 0x047e
+#define regDCHUBBUB_SDPIF_PIPE_SEC_LVL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_NOALLOC 0x047f
+#define regDCHUBBUB_SDPIF_PIPE_NOALLOC_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL 0x0480
+#define regDCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL 0x0481
+#define regDCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL 0x0482
+#define regDCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL 0x0483
+#define regDCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL_BASE_IDX 2
+#define regSDPIF_REQUEST_RATE_LIMIT 0x0484
+#define regSDPIF_REQUEST_RATE_LIMIT_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_MEM_PWR_CTRL 0x0485
+#define regDCHUBBUB_SDPIF_MEM_PWR_CTRL_BASE_IDX 2
+#define regDCHUBBUB_SDPIF_MEM_PWR_STATUS 0x0486
+#define regDCHUBBUB_SDPIF_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_ret_path_dispdec
+// base address: 0x0
+#define regDCHUBBUB_RET_PATH_MEM_PWR_CTRL 0x04af
+#define regDCHUBBUB_RET_PATH_MEM_PWR_CTRL_BASE_IDX 2
+#define regDCHUBBUB_RET_PATH_MEM_PWR_STATUS 0x04b0
+#define regDCHUBBUB_RET_PATH_MEM_PWR_STATUS_BASE_IDX 2
+#define regDCHUBBUB_CRC_CTRL 0x04b1
+#define regDCHUBBUB_CRC_CTRL_BASE_IDX 2
+#define regDCHUBBUB_CRC0_VAL_R 0x04b2
+#define regDCHUBBUB_CRC0_VAL_R_BASE_IDX 2
+#define regDCHUBBUB_CRC0_VAL_G 0x04b3
+#define regDCHUBBUB_CRC0_VAL_G_BASE_IDX 2
+#define regDCHUBBUB_CRC0_VAL_B 0x04b4
+#define regDCHUBBUB_CRC0_VAL_B_BASE_IDX 2
+#define regDCHUBBUB_CRC0_VAL_A 0x04b5
+#define regDCHUBBUB_CRC0_VAL_A_BASE_IDX 2
+#define regDCHUBBUB_DCC_STAT_CNTL 0x04b6
+#define regDCHUBBUB_DCC_STAT_CNTL_BASE_IDX 2
+#define regDCHUBBUB_DCC_STAT0 0x04b7
+#define regDCHUBBUB_DCC_STAT0_BASE_IDX 2
+#define regDCHUBBUB_DCC_STAT1 0x04b8
+#define regDCHUBBUB_DCC_STAT1_BASE_IDX 2
+#define regDCHUBBUB_DCC_STAT2 0x04b9
+#define regDCHUBBUB_DCC_STAT2_BASE_IDX 2
+#define regDCHUBBUB_COMPBUF_CTRL 0x04ba
+#define regDCHUBBUB_COMPBUF_CTRL_BASE_IDX 2
+#define regDCHUBBUB_DET0_CTRL 0x04bb
+#define regDCHUBBUB_DET0_CTRL_BASE_IDX 2
+#define regDCHUBBUB_DET1_CTRL 0x04bc
+#define regDCHUBBUB_DET1_CTRL_BASE_IDX 2
+#define regDCHUBBUB_DET2_CTRL 0x04bd
+#define regDCHUBBUB_DET2_CTRL_BASE_IDX 2
+#define regDCHUBBUB_DET3_CTRL 0x04be
+#define regDCHUBBUB_DET3_CTRL_BASE_IDX 2
+#define regDCHUBBUB_MEM_PWR_MODE_CTRL 0x04c0
+#define regDCHUBBUB_MEM_PWR_MODE_CTRL_BASE_IDX 2
+#define regCOMPBUF_MEM_PWR_CTRL_1 0x04c1
+#define regCOMPBUF_MEM_PWR_CTRL_1_BASE_IDX 2
+#define regCOMPBUF_MEM_PWR_CTRL_2 0x04c2
+#define regCOMPBUF_MEM_PWR_CTRL_2_BASE_IDX 2
+#define regDCHUBBUB_MEM_PWR_STATUS 0x04c3
+#define regDCHUBBUB_MEM_PWR_STATUS_BASE_IDX 2
+#define regCOMPBUF_RESERVED_SPACE 0x04c4
+#define regCOMPBUF_RESERVED_SPACE_BASE_IDX 2
+#define regDCHUBBUB_DEBUG_CTRL_0 0x04c5
+#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2
+#define regDCHUBBUB_CRC1_VAL_R 0x04ca
+#define regDCHUBBUB_CRC1_VAL_R_BASE_IDX 2
+#define regDCHUBBUB_CRC1_VAL_G 0x04cb
+#define regDCHUBBUB_CRC1_VAL_G_BASE_IDX 2
+#define regDCHUBBUB_CRC1_VAL_B 0x04cc
+#define regDCHUBBUB_CRC1_VAL_B_BASE_IDX 2
+#define regDCHUBBUB_CRC1_VAL_A 0x04cd
+#define regDCHUBBUB_CRC1_VAL_A_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_vmrq_if_dispdec
+// base address: 0x0
+#define regDCN_VM_CONTEXT0_CNTL 0x0559
+#define regDCN_VM_CONTEXT0_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x055a
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x055b
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x055c
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x055d
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x055e
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x055f
+#define regDCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_CNTL 0x0560
+#define regDCN_VM_CONTEXT1_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x0561
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x0562
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x0563
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x0564
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x0565
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x0566
+#define regDCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_CNTL 0x0567
+#define regDCN_VM_CONTEXT2_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x0568
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x0569
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x056a
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x056b
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x056c
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x056d
+#define regDCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_CNTL 0x056e
+#define regDCN_VM_CONTEXT3_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x056f
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x0570
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x0571
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x0572
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x0573
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x0574
+#define regDCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_CNTL 0x0575
+#define regDCN_VM_CONTEXT4_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x0576
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x0577
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x0578
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x0579
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x057a
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x057b
+#define regDCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_CNTL 0x057c
+#define regDCN_VM_CONTEXT5_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x057d
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x057e
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x057f
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x0580
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x0581
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x0582
+#define regDCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_CNTL 0x0583
+#define regDCN_VM_CONTEXT6_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x0584
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x0585
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x0586
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x0587
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x0588
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x0589
+#define regDCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_CNTL 0x058a
+#define regDCN_VM_CONTEXT7_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x058b
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x058c
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x058d
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x058e
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x058f
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x0590
+#define regDCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_CNTL 0x0591
+#define regDCN_VM_CONTEXT8_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x0592
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x0593
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x0594
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x0595
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x0596
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x0597
+#define regDCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_CNTL 0x0598
+#define regDCN_VM_CONTEXT9_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x0599
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x059a
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x059b
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x059c
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x059d
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x059e
+#define regDCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_CNTL 0x059f
+#define regDCN_VM_CONTEXT10_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x05a0
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x05a1
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x05a2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x05a3
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x05a4
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x05a5
+#define regDCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_CNTL 0x05a6
+#define regDCN_VM_CONTEXT11_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x05a7
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x05a8
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x05a9
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x05aa
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x05ab
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x05ac
+#define regDCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_CNTL 0x05ad
+#define regDCN_VM_CONTEXT12_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x05ae
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x05af
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x05b0
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x05b1
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x05b2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x05b3
+#define regDCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_CNTL 0x05b4
+#define regDCN_VM_CONTEXT13_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x05b5
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x05b6
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x05b7
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x05b8
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x05b9
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x05ba
+#define regDCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_CNTL 0x05bb
+#define regDCN_VM_CONTEXT14_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x05bc
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x05bd
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x05be
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x05bf
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x05c0
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x05c1
+#define regDCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_CNTL 0x05c2
+#define regDCN_VM_CONTEXT15_CNTL_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x05c3
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x05c4
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x05c5
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x05c6
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x05c7
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 2
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x05c8
+#define regDCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 2
+#define regDCN_VM_DEFAULT_ADDR_MSB 0x05c9
+#define regDCN_VM_DEFAULT_ADDR_MSB_BASE_IDX 2
+#define regDCN_VM_DEFAULT_ADDR_LSB 0x05ca
+#define regDCN_VM_DEFAULT_ADDR_LSB_BASE_IDX 2
+#define regDCN_VM_FAULT_CNTL 0x05cb
+#define regDCN_VM_FAULT_CNTL_BASE_IDX 2
+#define regDCN_VM_FAULT_STATUS 0x05cc
+#define regDCN_VM_FAULT_STATUS_BASE_IDX 2
+#define regDCN_VM_FAULT_ADDR_MSB 0x05cd
+#define regDCN_VM_FAULT_ADDR_MSB_BASE_IDX 2
+#define regDCN_VM_FAULT_ADDR_LSB 0x05ce
+#define regDCN_VM_FAULT_ADDR_LSB_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbubl_dchubbub_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1534
+#define regDC_PERFMON6_PERFCOUNTER_CNTL 0x054d
+#define regDC_PERFMON6_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON6_PERFCOUNTER_CNTL2 0x054e
+#define regDC_PERFMON6_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON6_PERFCOUNTER_STATE 0x054f
+#define regDC_PERFMON6_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_CNTL 0x0550
+#define regDC_PERFMON6_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_CNTL2 0x0551
+#define regDC_PERFMON6_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_CVALUE_INT_MISC 0x0552
+#define regDC_PERFMON6_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_CVALUE_LOW 0x0553
+#define regDC_PERFMON6_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_HI 0x0554
+#define regDC_PERFMON6_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON6_PERFMON_LOW 0x0555
+#define regDC_PERFMON6_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec
+// base address: 0x0
+#define regHUBP0_DCSURF_SURFACE_CONFIG 0x05e5
+#define regHUBP0_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define regHUBP0_DCSURF_ADDR_CONFIG 0x05e6
+#define regHUBP0_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define regHUBP0_DCSURF_TILING_CONFIG 0x05e7
+#define regHUBP0_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define regHUBP0_DCSURF_PRI_VIEWPORT_START 0x05e9
+#define regHUBP0_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x05ea
+#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP0_DCSURF_PRI_VIEWPORT_START_C 0x05eb
+#define regHUBP0_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x05ec
+#define regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP0_DCSURF_SEC_VIEWPORT_START 0x05ed
+#define regHUBP0_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define regHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION 0x05ee
+#define regHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP0_DCSURF_SEC_VIEWPORT_START_C 0x05ef
+#define regHUBP0_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x05f0
+#define regHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP0_DCHUBP_REQ_SIZE_CONFIG 0x05f1
+#define regHUBP0_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define regHUBP0_DCHUBP_REQ_SIZE_CONFIG_C 0x05f2
+#define regHUBP0_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define regHUBP0_DCHUBP_CNTL 0x05f3
+#define regHUBP0_DCHUBP_CNTL_BASE_IDX 2
+#define regHUBP0_HUBP_CLK_CNTL 0x05f4
+#define regHUBP0_HUBP_CLK_CNTL_BASE_IDX 2
+#define regHUBP0_DCHUBP_VMPG_CONFIG 0x05f5
+#define regHUBP0_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define regHUBP0_DCHUBP_MALL_CONFIG 0x05f6
+#define regHUBP0_DCHUBP_MALL_CONFIG_BASE_IDX 2
+#define regHUBP0_DCHUBP_MALL_SUB_VP 0x05f7
+#define regHUBP0_DCHUBP_MALL_SUB_VP_BASE_IDX 2
+#define regHUBP0_HUBPREQ_DEBUG_DB 0x05f8
+#define regHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define regHUBP0_HUBPREQ_DEBUG 0x05f9
+#define regHUBP0_HUBPREQ_DEBUG_BASE_IDX 2
+#define regHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x05fd
+#define regHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define regHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x05fe
+#define regHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+#define regHUBP0_HUBP_MALL_STATUS 0x05ff
+#define regHUBP0_HUBP_MALL_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec
+// base address: 0x0
+#define regHUBPREQ0_DCSURF_SURFACE_PITCH 0x0607
+#define regHUBPREQ0_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_PITCH_C 0x0608
+#define regHUBPREQ0_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define regHUBPREQ0_VMID_SETTINGS_0 0x0609
+#define regHUBPREQ0_VMID_SETTINGS_0_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS 0x060a
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x060b
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x060c
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x060d
+#define regHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS 0x060e
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x060f
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x0610
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x0611
+#define regHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x0612
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x0613
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x0614
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x0615
+#define regHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x0616
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x0617
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x0618
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x0619
+#define regHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_CONTROL 0x061a
+#define regHUBPREQ0_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_FLIP_CONTROL 0x061b
+#define regHUBPREQ0_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_FLIP_CONTROL2 0x061c
+#define regHUBPREQ0_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT 0x061f
+#define regHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE 0x0620
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH 0x0621
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_C 0x0622
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C 0x0623
+#define regHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE 0x0624
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0625
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0626
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0627
+#define regHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ0_DCN_EXPANSION_MODE 0x0628
+#define regHUBPREQ0_DCN_EXPANSION_MODE_BASE_IDX 2
+#define regHUBPREQ0_DCN_TTU_QOS_WM 0x0629
+#define regHUBPREQ0_DCN_TTU_QOS_WM_BASE_IDX 2
+#define regHUBPREQ0_DCN_GLOBAL_TTU_CNTL 0x062a
+#define regHUBPREQ0_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define regHUBPREQ0_DCN_SURF0_TTU_CNTL0 0x062b
+#define regHUBPREQ0_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ0_DCN_SURF0_TTU_CNTL1 0x062c
+#define regHUBPREQ0_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ0_DCN_SURF1_TTU_CNTL0 0x062d
+#define regHUBPREQ0_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ0_DCN_SURF1_TTU_CNTL1 0x062e
+#define regHUBPREQ0_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ0_DCN_CUR0_TTU_CNTL0 0x062f
+#define regHUBPREQ0_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ0_DCN_CUR0_TTU_CNTL1 0x0630
+#define regHUBPREQ0_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ0_DCN_CUR1_TTU_CNTL0 0x0631
+#define regHUBPREQ0_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ0_DCN_CUR1_TTU_CNTL1 0x0632
+#define regHUBPREQ0_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ0_DCN_DMDATA_VM_CNTL 0x0633
+#define regHUBPREQ0_DCN_DMDATA_VM_CNTL_BASE_IDX 2
+#define regHUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x0634
+#define regHUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define regHUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0635
+#define regHUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define regHUBPREQ0_DCN_VM_MX_L1_TLB_CNTL 0x0642
+#define regHUBPREQ0_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define regHUBPREQ0_BLANK_OFFSET_0 0x0643
+#define regHUBPREQ0_BLANK_OFFSET_0_BASE_IDX 2
+#define regHUBPREQ0_BLANK_OFFSET_1 0x0644
+#define regHUBPREQ0_BLANK_OFFSET_1_BASE_IDX 2
+#define regHUBPREQ0_DST_DIMENSIONS 0x0645
+#define regHUBPREQ0_DST_DIMENSIONS_BASE_IDX 2
+#define regHUBPREQ0_DST_AFTER_SCALER 0x0646
+#define regHUBPREQ0_DST_AFTER_SCALER_BASE_IDX 2
+#define regHUBPREQ0_PREFETCH_SETTINGS 0x0647
+#define regHUBPREQ0_PREFETCH_SETTINGS_BASE_IDX 2
+#define regHUBPREQ0_PREFETCH_SETTINGS_C 0x0648
+#define regHUBPREQ0_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_0 0x0649
+#define regHUBPREQ0_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_1 0x064a
+#define regHUBPREQ0_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_2 0x064b
+#define regHUBPREQ0_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_3 0x064c
+#define regHUBPREQ0_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_4 0x064d
+#define regHUBPREQ0_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_0 0x064e
+#define regHUBPREQ0_FLIP_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_1 0x064f
+#define regHUBPREQ0_FLIP_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_2 0x0650
+#define regHUBPREQ0_FLIP_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_0 0x0651
+#define regHUBPREQ0_NOM_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_1 0x0652
+#define regHUBPREQ0_NOM_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_2 0x0653
+#define regHUBPREQ0_NOM_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_3 0x0654
+#define regHUBPREQ0_NOM_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_4 0x0655
+#define regHUBPREQ0_NOM_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_5 0x0656
+#define regHUBPREQ0_NOM_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_6 0x0657
+#define regHUBPREQ0_NOM_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ0_NOM_PARAMETERS_7 0x0658
+#define regHUBPREQ0_NOM_PARAMETERS_7_BASE_IDX 2
+#define regHUBPREQ0_PER_LINE_DELIVERY_PRE 0x0659
+#define regHUBPREQ0_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define regHUBPREQ0_PER_LINE_DELIVERY 0x065a
+#define regHUBPREQ0_PER_LINE_DELIVERY_BASE_IDX 2
+#define regHUBPREQ0_CURSOR_SETTINGS 0x065b
+#define regHUBPREQ0_CURSOR_SETTINGS_BASE_IDX 2
+#define regHUBPREQ0_REF_FREQ_TO_PIX_FREQ 0x065c
+#define regHUBPREQ0_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define regHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT 0x065d
+#define regHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_MEM_PWR_CTRL 0x065e
+#define regHUBPREQ0_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_MEM_PWR_STATUS 0x065f
+#define regHUBPREQ0_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_5 0x0662
+#define regHUBPREQ0_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ0_VBLANK_PARAMETERS_6 0x0663
+#define regHUBPREQ0_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_3 0x0664
+#define regHUBPREQ0_FLIP_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_4 0x0665
+#define regHUBPREQ0_FLIP_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_5 0x0666
+#define regHUBPREQ0_FLIP_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ0_FLIP_PARAMETERS_6 0x0667
+#define regHUBPREQ0_FLIP_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ0_UCLK_PSTATE_FORCE 0x0668
+#define regHUBPREQ0_UCLK_PSTATE_FORCE_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_STATUS_REG0 0x0669
+#define regHUBPREQ0_HUBPREQ_STATUS_REG0_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_STATUS_REG1 0x066a
+#define regHUBPREQ0_HUBPREQ_STATUS_REG1_BASE_IDX 2
+#define regHUBPREQ0_HUBPREQ_STATUS_REG2 0x066b
+#define regHUBPREQ0_HUBPREQ_STATUS_REG2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec
+// base address: 0x0
+#define regHUBPRET0_HUBPRET_CONTROL 0x066c
+#define regHUBPRET0_HUBPRET_CONTROL_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_MEM_PWR_CTRL 0x066d
+#define regHUBPRET0_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_MEM_PWR_STATUS 0x066e
+#define regHUBPRET0_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE_CTRL0 0x066f
+#define regHUBPRET0_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE_CTRL1 0x0670
+#define regHUBPRET0_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE0 0x0671
+#define regHUBPRET0_HUBPRET_READ_LINE0_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE1 0x0672
+#define regHUBPRET0_HUBPRET_READ_LINE1_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_INTERRUPT 0x0673
+#define regHUBPRET0_HUBPRET_INTERRUPT_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE_VALUE 0x0674
+#define regHUBPRET0_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define regHUBPRET0_HUBPRET_READ_LINE_STATUS 0x0675
+#define regHUBPRET0_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_cursor0_dispdec
+// base address: 0x0
+#define regCURSOR0_0_CURSOR_CONTROL 0x0678
+#define regCURSOR0_0_CURSOR_CONTROL_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_SURFACE_ADDRESS 0x0679
+#define regCURSOR0_0_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH 0x067a
+#define regCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_SIZE 0x067b
+#define regCURSOR0_0_CURSOR_SIZE_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_POSITION 0x067c
+#define regCURSOR0_0_CURSOR_POSITION_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_HOT_SPOT 0x067d
+#define regCURSOR0_0_CURSOR_HOT_SPOT_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_STEREO_CONTROL 0x067e
+#define regCURSOR0_0_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_DST_OFFSET 0x067f
+#define regCURSOR0_0_CURSOR_DST_OFFSET_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_MEM_PWR_CTRL 0x0680
+#define regCURSOR0_0_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define regCURSOR0_0_CURSOR_MEM_PWR_STATUS 0x0681
+#define regCURSOR0_0_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_ADDRESS_HIGH 0x0682
+#define regCURSOR0_0_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_ADDRESS_LOW 0x0683
+#define regCURSOR0_0_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_CNTL 0x0684
+#define regCURSOR0_0_DMDATA_CNTL_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_QOS_CNTL 0x0685
+#define regCURSOR0_0_DMDATA_QOS_CNTL_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_STATUS 0x0686
+#define regCURSOR0_0_DMDATA_STATUS_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_SW_CNTL 0x0687
+#define regCURSOR0_0_DMDATA_SW_CNTL_BASE_IDX 2
+#define regCURSOR0_0_DMDATA_SW_DATA 0x0688
+#define regCURSOR0_0_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1a74
+#define regDC_PERFMON7_PERFCOUNTER_CNTL 0x069d
+#define regDC_PERFMON7_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON7_PERFCOUNTER_CNTL2 0x069e
+#define regDC_PERFMON7_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON7_PERFCOUNTER_STATE 0x069f
+#define regDC_PERFMON7_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_CNTL 0x06a0
+#define regDC_PERFMON7_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_CNTL2 0x06a1
+#define regDC_PERFMON7_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_CVALUE_INT_MISC 0x06a2
+#define regDC_PERFMON7_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_CVALUE_LOW 0x06a3
+#define regDC_PERFMON7_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_HI 0x06a4
+#define regDC_PERFMON7_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON7_PERFMON_LOW 0x06a5
+#define regDC_PERFMON7_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec
+// base address: 0x370
+#define regHUBP1_DCSURF_SURFACE_CONFIG 0x06c1
+#define regHUBP1_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define regHUBP1_DCSURF_ADDR_CONFIG 0x06c2
+#define regHUBP1_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define regHUBP1_DCSURF_TILING_CONFIG 0x06c3
+#define regHUBP1_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define regHUBP1_DCSURF_PRI_VIEWPORT_START 0x06c5
+#define regHUBP1_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION 0x06c6
+#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP1_DCSURF_PRI_VIEWPORT_START_C 0x06c7
+#define regHUBP1_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x06c8
+#define regHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP1_DCSURF_SEC_VIEWPORT_START 0x06c9
+#define regHUBP1_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define regHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION 0x06ca
+#define regHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP1_DCSURF_SEC_VIEWPORT_START_C 0x06cb
+#define regHUBP1_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x06cc
+#define regHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP1_DCHUBP_REQ_SIZE_CONFIG 0x06cd
+#define regHUBP1_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define regHUBP1_DCHUBP_REQ_SIZE_CONFIG_C 0x06ce
+#define regHUBP1_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define regHUBP1_DCHUBP_CNTL 0x06cf
+#define regHUBP1_DCHUBP_CNTL_BASE_IDX 2
+#define regHUBP1_HUBP_CLK_CNTL 0x06d0
+#define regHUBP1_HUBP_CLK_CNTL_BASE_IDX 2
+#define regHUBP1_DCHUBP_VMPG_CONFIG 0x06d1
+#define regHUBP1_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define regHUBP1_DCHUBP_MALL_CONFIG 0x06d2
+#define regHUBP1_DCHUBP_MALL_CONFIG_BASE_IDX 2
+#define regHUBP1_DCHUBP_MALL_SUB_VP 0x06d3
+#define regHUBP1_DCHUBP_MALL_SUB_VP_BASE_IDX 2
+#define regHUBP1_HUBPREQ_DEBUG_DB 0x06d4
+#define regHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define regHUBP1_HUBPREQ_DEBUG 0x06d5
+#define regHUBP1_HUBPREQ_DEBUG_BASE_IDX 2
+#define regHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x06d9
+#define regHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define regHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x06da
+#define regHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+#define regHUBP1_HUBP_MALL_STATUS 0x06db
+#define regHUBP1_HUBP_MALL_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec
+// base address: 0x370
+#define regHUBPREQ1_DCSURF_SURFACE_PITCH 0x06e3
+#define regHUBPREQ1_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_PITCH_C 0x06e4
+#define regHUBPREQ1_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define regHUBPREQ1_VMID_SETTINGS_0 0x06e5
+#define regHUBPREQ1_VMID_SETTINGS_0_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS 0x06e6
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x06e7
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x06e8
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x06e9
+#define regHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS 0x06ea
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x06eb
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x06ec
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x06ed
+#define regHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x06ee
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x06ef
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x06f0
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x06f1
+#define regHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x06f2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x06f3
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x06f4
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x06f5
+#define regHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_CONTROL 0x06f6
+#define regHUBPREQ1_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_FLIP_CONTROL 0x06f7
+#define regHUBPREQ1_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_FLIP_CONTROL2 0x06f8
+#define regHUBPREQ1_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT 0x06fb
+#define regHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE 0x06fc
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH 0x06fd
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_C 0x06fe
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C 0x06ff
+#define regHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE 0x0700
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0701
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0702
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0703
+#define regHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ1_DCN_EXPANSION_MODE 0x0704
+#define regHUBPREQ1_DCN_EXPANSION_MODE_BASE_IDX 2
+#define regHUBPREQ1_DCN_TTU_QOS_WM 0x0705
+#define regHUBPREQ1_DCN_TTU_QOS_WM_BASE_IDX 2
+#define regHUBPREQ1_DCN_GLOBAL_TTU_CNTL 0x0706
+#define regHUBPREQ1_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define regHUBPREQ1_DCN_SURF0_TTU_CNTL0 0x0707
+#define regHUBPREQ1_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ1_DCN_SURF0_TTU_CNTL1 0x0708
+#define regHUBPREQ1_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ1_DCN_SURF1_TTU_CNTL0 0x0709
+#define regHUBPREQ1_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ1_DCN_SURF1_TTU_CNTL1 0x070a
+#define regHUBPREQ1_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ1_DCN_CUR0_TTU_CNTL0 0x070b
+#define regHUBPREQ1_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ1_DCN_CUR0_TTU_CNTL1 0x070c
+#define regHUBPREQ1_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ1_DCN_CUR1_TTU_CNTL0 0x070d
+#define regHUBPREQ1_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ1_DCN_CUR1_TTU_CNTL1 0x070e
+#define regHUBPREQ1_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ1_DCN_DMDATA_VM_CNTL 0x070f
+#define regHUBPREQ1_DCN_DMDATA_VM_CNTL_BASE_IDX 2
+#define regHUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x0710
+#define regHUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define regHUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0711
+#define regHUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define regHUBPREQ1_DCN_VM_MX_L1_TLB_CNTL 0x071e
+#define regHUBPREQ1_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define regHUBPREQ1_BLANK_OFFSET_0 0x071f
+#define regHUBPREQ1_BLANK_OFFSET_0_BASE_IDX 2
+#define regHUBPREQ1_BLANK_OFFSET_1 0x0720
+#define regHUBPREQ1_BLANK_OFFSET_1_BASE_IDX 2
+#define regHUBPREQ1_DST_DIMENSIONS 0x0721
+#define regHUBPREQ1_DST_DIMENSIONS_BASE_IDX 2
+#define regHUBPREQ1_DST_AFTER_SCALER 0x0722
+#define regHUBPREQ1_DST_AFTER_SCALER_BASE_IDX 2
+#define regHUBPREQ1_PREFETCH_SETTINGS 0x0723
+#define regHUBPREQ1_PREFETCH_SETTINGS_BASE_IDX 2
+#define regHUBPREQ1_PREFETCH_SETTINGS_C 0x0724
+#define regHUBPREQ1_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_0 0x0725
+#define regHUBPREQ1_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_1 0x0726
+#define regHUBPREQ1_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_2 0x0727
+#define regHUBPREQ1_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_3 0x0728
+#define regHUBPREQ1_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_4 0x0729
+#define regHUBPREQ1_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_0 0x072a
+#define regHUBPREQ1_FLIP_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_1 0x072b
+#define regHUBPREQ1_FLIP_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_2 0x072c
+#define regHUBPREQ1_FLIP_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_0 0x072d
+#define regHUBPREQ1_NOM_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_1 0x072e
+#define regHUBPREQ1_NOM_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_2 0x072f
+#define regHUBPREQ1_NOM_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_3 0x0730
+#define regHUBPREQ1_NOM_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_4 0x0731
+#define regHUBPREQ1_NOM_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_5 0x0732
+#define regHUBPREQ1_NOM_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_6 0x0733
+#define regHUBPREQ1_NOM_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ1_NOM_PARAMETERS_7 0x0734
+#define regHUBPREQ1_NOM_PARAMETERS_7_BASE_IDX 2
+#define regHUBPREQ1_PER_LINE_DELIVERY_PRE 0x0735
+#define regHUBPREQ1_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define regHUBPREQ1_PER_LINE_DELIVERY 0x0736
+#define regHUBPREQ1_PER_LINE_DELIVERY_BASE_IDX 2
+#define regHUBPREQ1_CURSOR_SETTINGS 0x0737
+#define regHUBPREQ1_CURSOR_SETTINGS_BASE_IDX 2
+#define regHUBPREQ1_REF_FREQ_TO_PIX_FREQ 0x0738
+#define regHUBPREQ1_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define regHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT 0x0739
+#define regHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_MEM_PWR_CTRL 0x073a
+#define regHUBPREQ1_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_MEM_PWR_STATUS 0x073b
+#define regHUBPREQ1_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_5 0x073e
+#define regHUBPREQ1_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ1_VBLANK_PARAMETERS_6 0x073f
+#define regHUBPREQ1_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_3 0x0740
+#define regHUBPREQ1_FLIP_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_4 0x0741
+#define regHUBPREQ1_FLIP_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_5 0x0742
+#define regHUBPREQ1_FLIP_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ1_FLIP_PARAMETERS_6 0x0743
+#define regHUBPREQ1_FLIP_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ1_UCLK_PSTATE_FORCE 0x0744
+#define regHUBPREQ1_UCLK_PSTATE_FORCE_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_STATUS_REG0 0x0745
+#define regHUBPREQ1_HUBPREQ_STATUS_REG0_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_STATUS_REG1 0x0746
+#define regHUBPREQ1_HUBPREQ_STATUS_REG1_BASE_IDX 2
+#define regHUBPREQ1_HUBPREQ_STATUS_REG2 0x0747
+#define regHUBPREQ1_HUBPREQ_STATUS_REG2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec
+// base address: 0x370
+#define regHUBPRET1_HUBPRET_CONTROL 0x0748
+#define regHUBPRET1_HUBPRET_CONTROL_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_MEM_PWR_CTRL 0x0749
+#define regHUBPRET1_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_MEM_PWR_STATUS 0x074a
+#define regHUBPRET1_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE_CTRL0 0x074b
+#define regHUBPRET1_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE_CTRL1 0x074c
+#define regHUBPRET1_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE0 0x074d
+#define regHUBPRET1_HUBPRET_READ_LINE0_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE1 0x074e
+#define regHUBPRET1_HUBPRET_READ_LINE1_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_INTERRUPT 0x074f
+#define regHUBPRET1_HUBPRET_INTERRUPT_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE_VALUE 0x0750
+#define regHUBPRET1_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define regHUBPRET1_HUBPRET_READ_LINE_STATUS 0x0751
+#define regHUBPRET1_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_cursor0_dispdec
+// base address: 0x370
+#define regCURSOR0_1_CURSOR_CONTROL 0x0754
+#define regCURSOR0_1_CURSOR_CONTROL_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_SURFACE_ADDRESS 0x0755
+#define regCURSOR0_1_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH 0x0756
+#define regCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_SIZE 0x0757
+#define regCURSOR0_1_CURSOR_SIZE_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_POSITION 0x0758
+#define regCURSOR0_1_CURSOR_POSITION_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_HOT_SPOT 0x0759
+#define regCURSOR0_1_CURSOR_HOT_SPOT_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_STEREO_CONTROL 0x075a
+#define regCURSOR0_1_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_DST_OFFSET 0x075b
+#define regCURSOR0_1_CURSOR_DST_OFFSET_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_MEM_PWR_CTRL 0x075c
+#define regCURSOR0_1_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define regCURSOR0_1_CURSOR_MEM_PWR_STATUS 0x075d
+#define regCURSOR0_1_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_ADDRESS_HIGH 0x075e
+#define regCURSOR0_1_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_ADDRESS_LOW 0x075f
+#define regCURSOR0_1_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_CNTL 0x0760
+#define regCURSOR0_1_DMDATA_CNTL_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_QOS_CNTL 0x0761
+#define regCURSOR0_1_DMDATA_QOS_CNTL_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_STATUS 0x0762
+#define regCURSOR0_1_DMDATA_STATUS_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_SW_CNTL 0x0763
+#define regCURSOR0_1_DMDATA_SW_CNTL_BASE_IDX 2
+#define regCURSOR0_1_DMDATA_SW_DATA 0x0764
+#define regCURSOR0_1_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1de4
+#define regDC_PERFMON8_PERFCOUNTER_CNTL 0x0779
+#define regDC_PERFMON8_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON8_PERFCOUNTER_CNTL2 0x077a
+#define regDC_PERFMON8_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON8_PERFCOUNTER_STATE 0x077b
+#define regDC_PERFMON8_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_CNTL 0x077c
+#define regDC_PERFMON8_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_CNTL2 0x077d
+#define regDC_PERFMON8_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_CVALUE_INT_MISC 0x077e
+#define regDC_PERFMON8_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_CVALUE_LOW 0x077f
+#define regDC_PERFMON8_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_HI 0x0780
+#define regDC_PERFMON8_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON8_PERFMON_LOW 0x0781
+#define regDC_PERFMON8_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec
+// base address: 0x6e0
+#define regHUBP2_DCSURF_SURFACE_CONFIG 0x079d
+#define regHUBP2_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define regHUBP2_DCSURF_ADDR_CONFIG 0x079e
+#define regHUBP2_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define regHUBP2_DCSURF_TILING_CONFIG 0x079f
+#define regHUBP2_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_START 0x07a1
+#define regHUBP2_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION 0x07a2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_START_C 0x07a3
+#define regHUBP2_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x07a4
+#define regHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP2_DCSURF_SEC_VIEWPORT_START 0x07a5
+#define regHUBP2_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define regHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION 0x07a6
+#define regHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP2_DCSURF_SEC_VIEWPORT_START_C 0x07a7
+#define regHUBP2_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x07a8
+#define regHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP2_DCHUBP_REQ_SIZE_CONFIG 0x07a9
+#define regHUBP2_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define regHUBP2_DCHUBP_REQ_SIZE_CONFIG_C 0x07aa
+#define regHUBP2_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define regHUBP2_DCHUBP_CNTL 0x07ab
+#define regHUBP2_DCHUBP_CNTL_BASE_IDX 2
+#define regHUBP2_HUBP_CLK_CNTL 0x07ac
+#define regHUBP2_HUBP_CLK_CNTL_BASE_IDX 2
+#define regHUBP2_DCHUBP_VMPG_CONFIG 0x07ad
+#define regHUBP2_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define regHUBP2_DCHUBP_MALL_CONFIG 0x07ae
+#define regHUBP2_DCHUBP_MALL_CONFIG_BASE_IDX 2
+#define regHUBP2_DCHUBP_MALL_SUB_VP 0x07af
+#define regHUBP2_DCHUBP_MALL_SUB_VP_BASE_IDX 2
+#define regHUBP2_HUBPREQ_DEBUG_DB 0x07b0
+#define regHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define regHUBP2_HUBPREQ_DEBUG 0x07b1
+#define regHUBP2_HUBPREQ_DEBUG_BASE_IDX 2
+#define regHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x07b5
+#define regHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define regHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x07b6
+#define regHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+#define regHUBP2_HUBP_MALL_STATUS 0x07b7
+#define regHUBP2_HUBP_MALL_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec
+// base address: 0x6e0
+#define regHUBPREQ2_DCSURF_SURFACE_PITCH 0x07bf
+#define regHUBPREQ2_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_PITCH_C 0x07c0
+#define regHUBPREQ2_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define regHUBPREQ2_VMID_SETTINGS_0 0x07c1
+#define regHUBPREQ2_VMID_SETTINGS_0_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS 0x07c2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x07c3
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x07c4
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x07c5
+#define regHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS 0x07c6
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x07c7
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x07c8
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x07c9
+#define regHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x07ca
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x07cb
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x07cc
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x07cd
+#define regHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x07ce
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x07cf
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x07d0
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x07d1
+#define regHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_CONTROL 0x07d2
+#define regHUBPREQ2_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_FLIP_CONTROL 0x07d3
+#define regHUBPREQ2_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_FLIP_CONTROL2 0x07d4
+#define regHUBPREQ2_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT 0x07d7
+#define regHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE 0x07d8
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH 0x07d9
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_C 0x07da
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C 0x07db
+#define regHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE 0x07dc
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x07dd
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C 0x07de
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x07df
+#define regHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ2_DCN_EXPANSION_MODE 0x07e0
+#define regHUBPREQ2_DCN_EXPANSION_MODE_BASE_IDX 2
+#define regHUBPREQ2_DCN_TTU_QOS_WM 0x07e1
+#define regHUBPREQ2_DCN_TTU_QOS_WM_BASE_IDX 2
+#define regHUBPREQ2_DCN_GLOBAL_TTU_CNTL 0x07e2
+#define regHUBPREQ2_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define regHUBPREQ2_DCN_SURF0_TTU_CNTL0 0x07e3
+#define regHUBPREQ2_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ2_DCN_SURF0_TTU_CNTL1 0x07e4
+#define regHUBPREQ2_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ2_DCN_SURF1_TTU_CNTL0 0x07e5
+#define regHUBPREQ2_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ2_DCN_SURF1_TTU_CNTL1 0x07e6
+#define regHUBPREQ2_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ2_DCN_CUR0_TTU_CNTL0 0x07e7
+#define regHUBPREQ2_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ2_DCN_CUR0_TTU_CNTL1 0x07e8
+#define regHUBPREQ2_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ2_DCN_CUR1_TTU_CNTL0 0x07e9
+#define regHUBPREQ2_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ2_DCN_CUR1_TTU_CNTL1 0x07ea
+#define regHUBPREQ2_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ2_DCN_DMDATA_VM_CNTL 0x07eb
+#define regHUBPREQ2_DCN_DMDATA_VM_CNTL_BASE_IDX 2
+#define regHUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x07ec
+#define regHUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define regHUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x07ed
+#define regHUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define regHUBPREQ2_DCN_VM_MX_L1_TLB_CNTL 0x07fa
+#define regHUBPREQ2_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define regHUBPREQ2_BLANK_OFFSET_0 0x07fb
+#define regHUBPREQ2_BLANK_OFFSET_0_BASE_IDX 2
+#define regHUBPREQ2_BLANK_OFFSET_1 0x07fc
+#define regHUBPREQ2_BLANK_OFFSET_1_BASE_IDX 2
+#define regHUBPREQ2_DST_DIMENSIONS 0x07fd
+#define regHUBPREQ2_DST_DIMENSIONS_BASE_IDX 2
+#define regHUBPREQ2_DST_AFTER_SCALER 0x07fe
+#define regHUBPREQ2_DST_AFTER_SCALER_BASE_IDX 2
+#define regHUBPREQ2_PREFETCH_SETTINGS 0x07ff
+#define regHUBPREQ2_PREFETCH_SETTINGS_BASE_IDX 2
+#define regHUBPREQ2_PREFETCH_SETTINGS_C 0x0800
+#define regHUBPREQ2_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_0 0x0801
+#define regHUBPREQ2_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_1 0x0802
+#define regHUBPREQ2_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_2 0x0803
+#define regHUBPREQ2_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_3 0x0804
+#define regHUBPREQ2_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_4 0x0805
+#define regHUBPREQ2_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_0 0x0806
+#define regHUBPREQ2_FLIP_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_1 0x0807
+#define regHUBPREQ2_FLIP_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_2 0x0808
+#define regHUBPREQ2_FLIP_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_0 0x0809
+#define regHUBPREQ2_NOM_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_1 0x080a
+#define regHUBPREQ2_NOM_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_2 0x080b
+#define regHUBPREQ2_NOM_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_3 0x080c
+#define regHUBPREQ2_NOM_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_4 0x080d
+#define regHUBPREQ2_NOM_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_5 0x080e
+#define regHUBPREQ2_NOM_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_6 0x080f
+#define regHUBPREQ2_NOM_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ2_NOM_PARAMETERS_7 0x0810
+#define regHUBPREQ2_NOM_PARAMETERS_7_BASE_IDX 2
+#define regHUBPREQ2_PER_LINE_DELIVERY_PRE 0x0811
+#define regHUBPREQ2_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define regHUBPREQ2_PER_LINE_DELIVERY 0x0812
+#define regHUBPREQ2_PER_LINE_DELIVERY_BASE_IDX 2
+#define regHUBPREQ2_CURSOR_SETTINGS 0x0813
+#define regHUBPREQ2_CURSOR_SETTINGS_BASE_IDX 2
+#define regHUBPREQ2_REF_FREQ_TO_PIX_FREQ 0x0814
+#define regHUBPREQ2_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define regHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT 0x0815
+#define regHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_MEM_PWR_CTRL 0x0816
+#define regHUBPREQ2_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_MEM_PWR_STATUS 0x0817
+#define regHUBPREQ2_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_5 0x081a
+#define regHUBPREQ2_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ2_VBLANK_PARAMETERS_6 0x081b
+#define regHUBPREQ2_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_3 0x081c
+#define regHUBPREQ2_FLIP_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_4 0x081d
+#define regHUBPREQ2_FLIP_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_5 0x081e
+#define regHUBPREQ2_FLIP_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ2_FLIP_PARAMETERS_6 0x081f
+#define regHUBPREQ2_FLIP_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ2_UCLK_PSTATE_FORCE 0x0820
+#define regHUBPREQ2_UCLK_PSTATE_FORCE_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_STATUS_REG0 0x0821
+#define regHUBPREQ2_HUBPREQ_STATUS_REG0_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_STATUS_REG1 0x0822
+#define regHUBPREQ2_HUBPREQ_STATUS_REG1_BASE_IDX 2
+#define regHUBPREQ2_HUBPREQ_STATUS_REG2 0x0823
+#define regHUBPREQ2_HUBPREQ_STATUS_REG2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec
+// base address: 0x6e0
+#define regHUBPRET2_HUBPRET_CONTROL 0x0824
+#define regHUBPRET2_HUBPRET_CONTROL_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_MEM_PWR_CTRL 0x0825
+#define regHUBPRET2_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_MEM_PWR_STATUS 0x0826
+#define regHUBPRET2_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE_CTRL0 0x0827
+#define regHUBPRET2_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE_CTRL1 0x0828
+#define regHUBPRET2_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE0 0x0829
+#define regHUBPRET2_HUBPRET_READ_LINE0_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE1 0x082a
+#define regHUBPRET2_HUBPRET_READ_LINE1_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_INTERRUPT 0x082b
+#define regHUBPRET2_HUBPRET_INTERRUPT_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE_VALUE 0x082c
+#define regHUBPRET2_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define regHUBPRET2_HUBPRET_READ_LINE_STATUS 0x082d
+#define regHUBPRET2_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_cursor0_dispdec
+// base address: 0x6e0
+#define regCURSOR0_2_CURSOR_CONTROL 0x0830
+#define regCURSOR0_2_CURSOR_CONTROL_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_SURFACE_ADDRESS 0x0831
+#define regCURSOR0_2_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH 0x0832
+#define regCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_SIZE 0x0833
+#define regCURSOR0_2_CURSOR_SIZE_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_POSITION 0x0834
+#define regCURSOR0_2_CURSOR_POSITION_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_HOT_SPOT 0x0835
+#define regCURSOR0_2_CURSOR_HOT_SPOT_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_STEREO_CONTROL 0x0836
+#define regCURSOR0_2_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_DST_OFFSET 0x0837
+#define regCURSOR0_2_CURSOR_DST_OFFSET_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_MEM_PWR_CTRL 0x0838
+#define regCURSOR0_2_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define regCURSOR0_2_CURSOR_MEM_PWR_STATUS 0x0839
+#define regCURSOR0_2_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_ADDRESS_HIGH 0x083a
+#define regCURSOR0_2_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_ADDRESS_LOW 0x083b
+#define regCURSOR0_2_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_CNTL 0x083c
+#define regCURSOR0_2_DMDATA_CNTL_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_QOS_CNTL 0x083d
+#define regCURSOR0_2_DMDATA_QOS_CNTL_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_STATUS 0x083e
+#define regCURSOR0_2_DMDATA_STATUS_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_SW_CNTL 0x083f
+#define regCURSOR0_2_DMDATA_SW_CNTL_BASE_IDX 2
+#define regCURSOR0_2_DMDATA_SW_DATA 0x0840
+#define regCURSOR0_2_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x2154
+#define regDC_PERFMON9_PERFCOUNTER_CNTL 0x0855
+#define regDC_PERFMON9_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON9_PERFCOUNTER_CNTL2 0x0856
+#define regDC_PERFMON9_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON9_PERFCOUNTER_STATE 0x0857
+#define regDC_PERFMON9_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_CNTL 0x0858
+#define regDC_PERFMON9_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_CNTL2 0x0859
+#define regDC_PERFMON9_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_CVALUE_INT_MISC 0x085a
+#define regDC_PERFMON9_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_CVALUE_LOW 0x085b
+#define regDC_PERFMON9_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_HI 0x085c
+#define regDC_PERFMON9_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON9_PERFMON_LOW 0x085d
+#define regDC_PERFMON9_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec
+// base address: 0xa50
+#define regHUBP3_DCSURF_SURFACE_CONFIG 0x0879
+#define regHUBP3_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define regHUBP3_DCSURF_ADDR_CONFIG 0x087a
+#define regHUBP3_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define regHUBP3_DCSURF_TILING_CONFIG 0x087b
+#define regHUBP3_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define regHUBP3_DCSURF_PRI_VIEWPORT_START 0x087d
+#define regHUBP3_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION 0x087e
+#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP3_DCSURF_PRI_VIEWPORT_START_C 0x087f
+#define regHUBP3_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x0880
+#define regHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP3_DCSURF_SEC_VIEWPORT_START 0x0881
+#define regHUBP3_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define regHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION 0x0882
+#define regHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define regHUBP3_DCSURF_SEC_VIEWPORT_START_C 0x0883
+#define regHUBP3_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define regHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x0884
+#define regHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define regHUBP3_DCHUBP_REQ_SIZE_CONFIG 0x0885
+#define regHUBP3_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define regHUBP3_DCHUBP_REQ_SIZE_CONFIG_C 0x0886
+#define regHUBP3_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define regHUBP3_DCHUBP_CNTL 0x0887
+#define regHUBP3_DCHUBP_CNTL_BASE_IDX 2
+#define regHUBP3_HUBP_CLK_CNTL 0x0888
+#define regHUBP3_HUBP_CLK_CNTL_BASE_IDX 2
+#define regHUBP3_DCHUBP_VMPG_CONFIG 0x0889
+#define regHUBP3_DCHUBP_VMPG_CONFIG_BASE_IDX 2
+#define regHUBP3_DCHUBP_MALL_CONFIG 0x088a
+#define regHUBP3_DCHUBP_MALL_CONFIG_BASE_IDX 2
+#define regHUBP3_DCHUBP_MALL_SUB_VP 0x088b
+#define regHUBP3_DCHUBP_MALL_SUB_VP_BASE_IDX 2
+#define regHUBP3_HUBPREQ_DEBUG_DB 0x088c
+#define regHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define regHUBP3_HUBPREQ_DEBUG 0x088d
+#define regHUBP3_HUBPREQ_DEBUG_BASE_IDX 2
+#define regHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x0891
+#define regHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define regHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x0892
+#define regHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+#define regHUBP3_HUBP_MALL_STATUS 0x0893
+#define regHUBP3_HUBP_MALL_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec
+// base address: 0xa50
+#define regHUBPREQ3_DCSURF_SURFACE_PITCH 0x089b
+#define regHUBPREQ3_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_PITCH_C 0x089c
+#define regHUBPREQ3_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define regHUBPREQ3_VMID_SETTINGS_0 0x089d
+#define regHUBPREQ3_VMID_SETTINGS_0_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS 0x089e
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x089f
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x08a0
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x08a1
+#define regHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS 0x08a2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x08a3
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x08a4
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x08a5
+#define regHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x08a6
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x08a7
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x08a8
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x08a9
+#define regHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x08aa
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x08ab
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x08ac
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x08ad
+#define regHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_CONTROL 0x08ae
+#define regHUBPREQ3_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_FLIP_CONTROL 0x08af
+#define regHUBPREQ3_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_FLIP_CONTROL2 0x08b0
+#define regHUBPREQ3_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT 0x08b3
+#define regHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE 0x08b4
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH 0x08b5
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_C 0x08b6
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C 0x08b7
+#define regHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE 0x08b8
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x08b9
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C 0x08ba
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x08bb
+#define regHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define regHUBPREQ3_DCN_EXPANSION_MODE 0x08bc
+#define regHUBPREQ3_DCN_EXPANSION_MODE_BASE_IDX 2
+#define regHUBPREQ3_DCN_TTU_QOS_WM 0x08bd
+#define regHUBPREQ3_DCN_TTU_QOS_WM_BASE_IDX 2
+#define regHUBPREQ3_DCN_GLOBAL_TTU_CNTL 0x08be
+#define regHUBPREQ3_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define regHUBPREQ3_DCN_SURF0_TTU_CNTL0 0x08bf
+#define regHUBPREQ3_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ3_DCN_SURF0_TTU_CNTL1 0x08c0
+#define regHUBPREQ3_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ3_DCN_SURF1_TTU_CNTL0 0x08c1
+#define regHUBPREQ3_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ3_DCN_SURF1_TTU_CNTL1 0x08c2
+#define regHUBPREQ3_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ3_DCN_CUR0_TTU_CNTL0 0x08c3
+#define regHUBPREQ3_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ3_DCN_CUR0_TTU_CNTL1 0x08c4
+#define regHUBPREQ3_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ3_DCN_CUR1_TTU_CNTL0 0x08c5
+#define regHUBPREQ3_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define regHUBPREQ3_DCN_CUR1_TTU_CNTL1 0x08c6
+#define regHUBPREQ3_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define regHUBPREQ3_DCN_DMDATA_VM_CNTL 0x08c7
+#define regHUBPREQ3_DCN_DMDATA_VM_CNTL_BASE_IDX 2
+#define regHUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR 0x08c8
+#define regHUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 2
+#define regHUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR 0x08c9
+#define regHUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 2
+#define regHUBPREQ3_DCN_VM_MX_L1_TLB_CNTL 0x08d6
+#define regHUBPREQ3_DCN_VM_MX_L1_TLB_CNTL_BASE_IDX 2
+#define regHUBPREQ3_BLANK_OFFSET_0 0x08d7
+#define regHUBPREQ3_BLANK_OFFSET_0_BASE_IDX 2
+#define regHUBPREQ3_BLANK_OFFSET_1 0x08d8
+#define regHUBPREQ3_BLANK_OFFSET_1_BASE_IDX 2
+#define regHUBPREQ3_DST_DIMENSIONS 0x08d9
+#define regHUBPREQ3_DST_DIMENSIONS_BASE_IDX 2
+#define regHUBPREQ3_DST_AFTER_SCALER 0x08da
+#define regHUBPREQ3_DST_AFTER_SCALER_BASE_IDX 2
+#define regHUBPREQ3_PREFETCH_SETTINGS 0x08db
+#define regHUBPREQ3_PREFETCH_SETTINGS_BASE_IDX 2
+#define regHUBPREQ3_PREFETCH_SETTINGS_C 0x08dc
+#define regHUBPREQ3_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_0 0x08dd
+#define regHUBPREQ3_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_1 0x08de
+#define regHUBPREQ3_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_2 0x08df
+#define regHUBPREQ3_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_3 0x08e0
+#define regHUBPREQ3_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_4 0x08e1
+#define regHUBPREQ3_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_0 0x08e2
+#define regHUBPREQ3_FLIP_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_1 0x08e3
+#define regHUBPREQ3_FLIP_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_2 0x08e4
+#define regHUBPREQ3_FLIP_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_0 0x08e5
+#define regHUBPREQ3_NOM_PARAMETERS_0_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_1 0x08e6
+#define regHUBPREQ3_NOM_PARAMETERS_1_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_2 0x08e7
+#define regHUBPREQ3_NOM_PARAMETERS_2_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_3 0x08e8
+#define regHUBPREQ3_NOM_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_4 0x08e9
+#define regHUBPREQ3_NOM_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_5 0x08ea
+#define regHUBPREQ3_NOM_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_6 0x08eb
+#define regHUBPREQ3_NOM_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ3_NOM_PARAMETERS_7 0x08ec
+#define regHUBPREQ3_NOM_PARAMETERS_7_BASE_IDX 2
+#define regHUBPREQ3_PER_LINE_DELIVERY_PRE 0x08ed
+#define regHUBPREQ3_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define regHUBPREQ3_PER_LINE_DELIVERY 0x08ee
+#define regHUBPREQ3_PER_LINE_DELIVERY_BASE_IDX 2
+#define regHUBPREQ3_CURSOR_SETTINGS 0x08ef
+#define regHUBPREQ3_CURSOR_SETTINGS_BASE_IDX 2
+#define regHUBPREQ3_REF_FREQ_TO_PIX_FREQ 0x08f0
+#define regHUBPREQ3_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define regHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT 0x08f1
+#define regHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_MEM_PWR_CTRL 0x08f2
+#define regHUBPREQ3_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_MEM_PWR_STATUS 0x08f3
+#define regHUBPREQ3_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_5 0x08f6
+#define regHUBPREQ3_VBLANK_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ3_VBLANK_PARAMETERS_6 0x08f7
+#define regHUBPREQ3_VBLANK_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_3 0x08f8
+#define regHUBPREQ3_FLIP_PARAMETERS_3_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_4 0x08f9
+#define regHUBPREQ3_FLIP_PARAMETERS_4_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_5 0x08fa
+#define regHUBPREQ3_FLIP_PARAMETERS_5_BASE_IDX 2
+#define regHUBPREQ3_FLIP_PARAMETERS_6 0x08fb
+#define regHUBPREQ3_FLIP_PARAMETERS_6_BASE_IDX 2
+#define regHUBPREQ3_UCLK_PSTATE_FORCE 0x08fc
+#define regHUBPREQ3_UCLK_PSTATE_FORCE_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_STATUS_REG0 0x08fd
+#define regHUBPREQ3_HUBPREQ_STATUS_REG0_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_STATUS_REG1 0x08fe
+#define regHUBPREQ3_HUBPREQ_STATUS_REG1_BASE_IDX 2
+#define regHUBPREQ3_HUBPREQ_STATUS_REG2 0x08ff
+#define regHUBPREQ3_HUBPREQ_STATUS_REG2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec
+// base address: 0xa50
+#define regHUBPRET3_HUBPRET_CONTROL 0x0900
+#define regHUBPRET3_HUBPRET_CONTROL_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_MEM_PWR_CTRL 0x0901
+#define regHUBPRET3_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_MEM_PWR_STATUS 0x0902
+#define regHUBPRET3_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE_CTRL0 0x0903
+#define regHUBPRET3_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE_CTRL1 0x0904
+#define regHUBPRET3_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE0 0x0905
+#define regHUBPRET3_HUBPRET_READ_LINE0_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE1 0x0906
+#define regHUBPRET3_HUBPRET_READ_LINE1_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_INTERRUPT 0x0907
+#define regHUBPRET3_HUBPRET_INTERRUPT_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE_VALUE 0x0908
+#define regHUBPRET3_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define regHUBPRET3_HUBPRET_READ_LINE_STATUS 0x0909
+#define regHUBPRET3_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_cursor0_dispdec
+// base address: 0xa50
+#define regCURSOR0_3_CURSOR_CONTROL 0x090c
+#define regCURSOR0_3_CURSOR_CONTROL_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_SURFACE_ADDRESS 0x090d
+#define regCURSOR0_3_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH 0x090e
+#define regCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_SIZE 0x090f
+#define regCURSOR0_3_CURSOR_SIZE_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_POSITION 0x0910
+#define regCURSOR0_3_CURSOR_POSITION_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_HOT_SPOT 0x0911
+#define regCURSOR0_3_CURSOR_HOT_SPOT_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_STEREO_CONTROL 0x0912
+#define regCURSOR0_3_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_DST_OFFSET 0x0913
+#define regCURSOR0_3_CURSOR_DST_OFFSET_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_MEM_PWR_CTRL 0x0914
+#define regCURSOR0_3_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define regCURSOR0_3_CURSOR_MEM_PWR_STATUS 0x0915
+#define regCURSOR0_3_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_ADDRESS_HIGH 0x0916
+#define regCURSOR0_3_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_ADDRESS_LOW 0x0917
+#define regCURSOR0_3_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_CNTL 0x0918
+#define regCURSOR0_3_DMDATA_CNTL_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_QOS_CNTL 0x0919
+#define regCURSOR0_3_DMDATA_QOS_CNTL_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_STATUS 0x091a
+#define regCURSOR0_3_DMDATA_STATUS_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_SW_CNTL 0x091b
+#define regCURSOR0_3_DMDATA_SW_CNTL_BASE_IDX 2
+#define regCURSOR0_3_DMDATA_SW_DATA 0x091c
+#define regCURSOR0_3_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x24c4
+#define regDC_PERFMON10_PERFCOUNTER_CNTL 0x0931
+#define regDC_PERFMON10_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON10_PERFCOUNTER_CNTL2 0x0932
+#define regDC_PERFMON10_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON10_PERFCOUNTER_STATE 0x0933
+#define regDC_PERFMON10_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_CNTL 0x0934
+#define regDC_PERFMON10_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_CNTL2 0x0935
+#define regDC_PERFMON10_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_CVALUE_INT_MISC 0x0936
+#define regDC_PERFMON10_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_CVALUE_LOW 0x0937
+#define regDC_PERFMON10_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_HI 0x0938
+#define regDC_PERFMON10_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON10_PERFMON_LOW 0x0939
+#define regDC_PERFMON10_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec
+// base address: 0x0
+#define regCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT 0x0ccf
+#define regCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define regCNVC_CFG0_FORMAT_CONTROL 0x0cd0
+#define regCNVC_CFG0_FORMAT_CONTROL_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_BIAS_R 0x0cd1
+#define regCNVC_CFG0_FCNV_FP_BIAS_R_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_BIAS_G 0x0cd2
+#define regCNVC_CFG0_FCNV_FP_BIAS_G_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_BIAS_B 0x0cd3
+#define regCNVC_CFG0_FCNV_FP_BIAS_B_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_SCALE_R 0x0cd4
+#define regCNVC_CFG0_FCNV_FP_SCALE_R_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_SCALE_G 0x0cd5
+#define regCNVC_CFG0_FCNV_FP_SCALE_G_BASE_IDX 2
+#define regCNVC_CFG0_FCNV_FP_SCALE_B 0x0cd6
+#define regCNVC_CFG0_FCNV_FP_SCALE_B_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_CONTROL 0x0cd7
+#define regCNVC_CFG0_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_ALPHA 0x0cd8
+#define regCNVC_CFG0_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_RED 0x0cd9
+#define regCNVC_CFG0_COLOR_KEYER_RED_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_GREEN 0x0cda
+#define regCNVC_CFG0_COLOR_KEYER_GREEN_BASE_IDX 2
+#define regCNVC_CFG0_COLOR_KEYER_BLUE 0x0cdb
+#define regCNVC_CFG0_COLOR_KEYER_BLUE_BASE_IDX 2
+#define regCNVC_CFG0_ALPHA_2BIT_LUT 0x0cdd
+#define regCNVC_CFG0_ALPHA_2BIT_LUT_BASE_IDX 2
+#define regCNVC_CFG0_PRE_DEALPHA 0x0cde
+#define regCNVC_CFG0_PRE_DEALPHA_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_MODE 0x0cdf
+#define regCNVC_CFG0_PRE_CSC_MODE_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C11_C12 0x0ce0
+#define regCNVC_CFG0_PRE_CSC_C11_C12_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C13_C14 0x0ce1
+#define regCNVC_CFG0_PRE_CSC_C13_C14_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C21_C22 0x0ce2
+#define regCNVC_CFG0_PRE_CSC_C21_C22_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C23_C24 0x0ce3
+#define regCNVC_CFG0_PRE_CSC_C23_C24_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C31_C32 0x0ce4
+#define regCNVC_CFG0_PRE_CSC_C31_C32_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_C33_C34 0x0ce5
+#define regCNVC_CFG0_PRE_CSC_C33_C34_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C11_C12 0x0ce6
+#define regCNVC_CFG0_PRE_CSC_B_C11_C12_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C13_C14 0x0ce7
+#define regCNVC_CFG0_PRE_CSC_B_C13_C14_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C21_C22 0x0ce8
+#define regCNVC_CFG0_PRE_CSC_B_C21_C22_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C23_C24 0x0ce9
+#define regCNVC_CFG0_PRE_CSC_B_C23_C24_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C31_C32 0x0cea
+#define regCNVC_CFG0_PRE_CSC_B_C31_C32_BASE_IDX 2
+#define regCNVC_CFG0_PRE_CSC_B_C33_C34 0x0ceb
+#define regCNVC_CFG0_PRE_CSC_B_C33_C34_BASE_IDX 2
+#define regCNVC_CFG0_CNVC_COEF_FORMAT 0x0cec
+#define regCNVC_CFG0_CNVC_COEF_FORMAT_BASE_IDX 2
+#define regCNVC_CFG0_PRE_DEGAM 0x0ced
+#define regCNVC_CFG0_PRE_DEGAM_BASE_IDX 2
+#define regCNVC_CFG0_PRE_REALPHA 0x0cee
+#define regCNVC_CFG0_PRE_REALPHA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec
+// base address: 0x0
+#define regCNVC_CUR0_CURSOR0_CONTROL 0x0cf1
+#define regCNVC_CUR0_CURSOR0_CONTROL_BASE_IDX 2
+#define regCNVC_CUR0_CURSOR0_COLOR0 0x0cf2
+#define regCNVC_CUR0_CURSOR0_COLOR0_BASE_IDX 2
+#define regCNVC_CUR0_CURSOR0_COLOR1 0x0cf3
+#define regCNVC_CUR0_CURSOR0_COLOR1_BASE_IDX 2
+#define regCNVC_CUR0_CURSOR0_FP_SCALE_BIAS 0x0cf4
+#define regCNVC_CUR0_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec
+// base address: 0x0
+#define regDSCL0_SCL_COEF_RAM_TAP_SELECT 0x0cf9
+#define regDSCL0_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define regDSCL0_SCL_COEF_RAM_TAP_DATA 0x0cfa
+#define regDSCL0_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define regDSCL0_SCL_MODE 0x0cfb
+#define regDSCL0_SCL_MODE_BASE_IDX 2
+#define regDSCL0_SCL_TAP_CONTROL 0x0cfc
+#define regDSCL0_SCL_TAP_CONTROL_BASE_IDX 2
+#define regDSCL0_DSCL_CONTROL 0x0cfd
+#define regDSCL0_DSCL_CONTROL_BASE_IDX 2
+#define regDSCL0_DSCL_2TAP_CONTROL 0x0cfe
+#define regDSCL0_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define regDSCL0_SCL_MANUAL_REPLICATE_CONTROL 0x0cff
+#define regDSCL0_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define regDSCL0_SCL_HORZ_FILTER_SCALE_RATIO 0x0d00
+#define regDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL0_SCL_HORZ_FILTER_INIT 0x0d01
+#define regDSCL0_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define regDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0d02
+#define regDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL0_SCL_HORZ_FILTER_INIT_C 0x0d03
+#define regDSCL0_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_SCALE_RATIO 0x0d04
+#define regDSCL0_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_INIT 0x0d05
+#define regDSCL0_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_INIT_BOT 0x0d06
+#define regDSCL0_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C 0x0d07
+#define regDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_INIT_C 0x0d08
+#define regDSCL0_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL0_SCL_VERT_FILTER_INIT_BOT_C 0x0d09
+#define regDSCL0_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define regDSCL0_SCL_BLACK_COLOR 0x0d0a
+#define regDSCL0_SCL_BLACK_COLOR_BASE_IDX 2
+#define regDSCL0_DSCL_UPDATE 0x0d0b
+#define regDSCL0_DSCL_UPDATE_BASE_IDX 2
+#define regDSCL0_DSCL_AUTOCAL 0x0d0c
+#define regDSCL0_DSCL_AUTOCAL_BASE_IDX 2
+#define regDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0d0d
+#define regDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define regDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0d0e
+#define regDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define regDSCL0_OTG_H_BLANK 0x0d0f
+#define regDSCL0_OTG_H_BLANK_BASE_IDX 2
+#define regDSCL0_OTG_V_BLANK 0x0d10
+#define regDSCL0_OTG_V_BLANK_BASE_IDX 2
+#define regDSCL0_RECOUT_START 0x0d11
+#define regDSCL0_RECOUT_START_BASE_IDX 2
+#define regDSCL0_RECOUT_SIZE 0x0d12
+#define regDSCL0_RECOUT_SIZE_BASE_IDX 2
+#define regDSCL0_MPC_SIZE 0x0d13
+#define regDSCL0_MPC_SIZE_BASE_IDX 2
+#define regDSCL0_LB_DATA_FORMAT 0x0d14
+#define regDSCL0_LB_DATA_FORMAT_BASE_IDX 2
+#define regDSCL0_LB_MEMORY_CTRL 0x0d15
+#define regDSCL0_LB_MEMORY_CTRL_BASE_IDX 2
+#define regDSCL0_LB_V_COUNTER 0x0d16
+#define regDSCL0_LB_V_COUNTER_BASE_IDX 2
+#define regDSCL0_DSCL_MEM_PWR_CTRL 0x0d17
+#define regDSCL0_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define regDSCL0_DSCL_MEM_PWR_STATUS 0x0d18
+#define regDSCL0_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define regDSCL0_OBUF_CONTROL 0x0d19
+#define regDSCL0_OBUF_CONTROL_BASE_IDX 2
+#define regDSCL0_OBUF_MEM_PWR_CTRL 0x0d1a
+#define regDSCL0_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec
+// base address: 0x0
+#define regCM0_CM_CONTROL 0x0d20
+#define regCM0_CM_CONTROL_BASE_IDX 2
+#define regCM0_CM_POST_CSC_CONTROL 0x0d21
+#define regCM0_CM_POST_CSC_CONTROL_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C11_C12 0x0d22
+#define regCM0_CM_POST_CSC_C11_C12_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C13_C14 0x0d23
+#define regCM0_CM_POST_CSC_C13_C14_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C21_C22 0x0d24
+#define regCM0_CM_POST_CSC_C21_C22_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C23_C24 0x0d25
+#define regCM0_CM_POST_CSC_C23_C24_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C31_C32 0x0d26
+#define regCM0_CM_POST_CSC_C31_C32_BASE_IDX 2
+#define regCM0_CM_POST_CSC_C33_C34 0x0d27
+#define regCM0_CM_POST_CSC_C33_C34_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C11_C12 0x0d28
+#define regCM0_CM_POST_CSC_B_C11_C12_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C13_C14 0x0d29
+#define regCM0_CM_POST_CSC_B_C13_C14_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C21_C22 0x0d2a
+#define regCM0_CM_POST_CSC_B_C21_C22_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C23_C24 0x0d2b
+#define regCM0_CM_POST_CSC_B_C23_C24_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C31_C32 0x0d2c
+#define regCM0_CM_POST_CSC_B_C31_C32_BASE_IDX 2
+#define regCM0_CM_POST_CSC_B_C33_C34 0x0d2d
+#define regCM0_CM_POST_CSC_B_C33_C34_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_CONTROL 0x0d2e
+#define regCM0_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C11_C12 0x0d2f
+#define regCM0_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C13_C14 0x0d30
+#define regCM0_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C21_C22 0x0d31
+#define regCM0_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C23_C24 0x0d32
+#define regCM0_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C31_C32 0x0d33
+#define regCM0_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_C33_C34 0x0d34
+#define regCM0_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C11_C12 0x0d35
+#define regCM0_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C13_C14 0x0d36
+#define regCM0_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C21_C22 0x0d37
+#define regCM0_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C23_C24 0x0d38
+#define regCM0_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C31_C32 0x0d39
+#define regCM0_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define regCM0_CM_GAMUT_REMAP_B_C33_C34 0x0d3a
+#define regCM0_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define regCM0_CM_BIAS_CR_R 0x0d3b
+#define regCM0_CM_BIAS_CR_R_BASE_IDX 2
+#define regCM0_CM_BIAS_Y_G_CB_B 0x0d3c
+#define regCM0_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_CONTROL 0x0d3d
+#define regCM0_CM_GAMCOR_CONTROL_BASE_IDX 2
+#define regCM0_CM_GAMCOR_LUT_INDEX 0x0d3e
+#define regCM0_CM_GAMCOR_LUT_INDEX_BASE_IDX 2
+#define regCM0_CM_GAMCOR_LUT_DATA 0x0d3f
+#define regCM0_CM_GAMCOR_LUT_DATA_BASE_IDX 2
+#define regCM0_CM_GAMCOR_LUT_CONTROL 0x0d40
+#define regCM0_CM_GAMCOR_LUT_CONTROL_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_B 0x0d41
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_G 0x0d42
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_R 0x0d43
+#define regCM0_CM_GAMCOR_RAMA_START_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B 0x0d44
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G 0x0d45
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R 0x0d46
+#define regCM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B 0x0d47
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G 0x0d48
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R 0x0d49
+#define regCM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_B 0x0d4a
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_B 0x0d4b
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_G 0x0d4c
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_G 0x0d4d
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_R 0x0d4e
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_R 0x0d4f
+#define regCM0_CM_GAMCOR_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_B 0x0d50
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_G 0x0d51
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_R 0x0d52
+#define regCM0_CM_GAMCOR_RAMA_OFFSET_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_0_1 0x0d53
+#define regCM0_CM_GAMCOR_RAMA_REGION_0_1_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_2_3 0x0d54
+#define regCM0_CM_GAMCOR_RAMA_REGION_2_3_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_4_5 0x0d55
+#define regCM0_CM_GAMCOR_RAMA_REGION_4_5_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_6_7 0x0d56
+#define regCM0_CM_GAMCOR_RAMA_REGION_6_7_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_8_9 0x0d57
+#define regCM0_CM_GAMCOR_RAMA_REGION_8_9_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_10_11 0x0d58
+#define regCM0_CM_GAMCOR_RAMA_REGION_10_11_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_12_13 0x0d59
+#define regCM0_CM_GAMCOR_RAMA_REGION_12_13_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_14_15 0x0d5a
+#define regCM0_CM_GAMCOR_RAMA_REGION_14_15_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_16_17 0x0d5b
+#define regCM0_CM_GAMCOR_RAMA_REGION_16_17_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_18_19 0x0d5c
+#define regCM0_CM_GAMCOR_RAMA_REGION_18_19_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_20_21 0x0d5d
+#define regCM0_CM_GAMCOR_RAMA_REGION_20_21_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_22_23 0x0d5e
+#define regCM0_CM_GAMCOR_RAMA_REGION_22_23_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_24_25 0x0d5f
+#define regCM0_CM_GAMCOR_RAMA_REGION_24_25_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_26_27 0x0d60
+#define regCM0_CM_GAMCOR_RAMA_REGION_26_27_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_28_29 0x0d61
+#define regCM0_CM_GAMCOR_RAMA_REGION_28_29_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_30_31 0x0d62
+#define regCM0_CM_GAMCOR_RAMA_REGION_30_31_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMA_REGION_32_33 0x0d63
+#define regCM0_CM_GAMCOR_RAMA_REGION_32_33_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_B 0x0d64
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_G 0x0d65
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_R 0x0d66
+#define regCM0_CM_GAMCOR_RAMB_START_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B 0x0d67
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G 0x0d68
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R 0x0d69
+#define regCM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B 0x0d6a
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G 0x0d6b
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R 0x0d6c
+#define regCM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_B 0x0d6d
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_B 0x0d6e
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_G 0x0d6f
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_G 0x0d70
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_R 0x0d71
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_R 0x0d72
+#define regCM0_CM_GAMCOR_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_B 0x0d73
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_B_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_G 0x0d74
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_G_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_R 0x0d75
+#define regCM0_CM_GAMCOR_RAMB_OFFSET_R_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_0_1 0x0d76
+#define regCM0_CM_GAMCOR_RAMB_REGION_0_1_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_2_3 0x0d77
+#define regCM0_CM_GAMCOR_RAMB_REGION_2_3_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_4_5 0x0d78
+#define regCM0_CM_GAMCOR_RAMB_REGION_4_5_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_6_7 0x0d79
+#define regCM0_CM_GAMCOR_RAMB_REGION_6_7_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_8_9 0x0d7a
+#define regCM0_CM_GAMCOR_RAMB_REGION_8_9_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_10_11 0x0d7b
+#define regCM0_CM_GAMCOR_RAMB_REGION_10_11_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_12_13 0x0d7c
+#define regCM0_CM_GAMCOR_RAMB_REGION_12_13_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_14_15 0x0d7d
+#define regCM0_CM_GAMCOR_RAMB_REGION_14_15_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_16_17 0x0d7e
+#define regCM0_CM_GAMCOR_RAMB_REGION_16_17_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_18_19 0x0d7f
+#define regCM0_CM_GAMCOR_RAMB_REGION_18_19_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_20_21 0x0d80
+#define regCM0_CM_GAMCOR_RAMB_REGION_20_21_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_22_23 0x0d81
+#define regCM0_CM_GAMCOR_RAMB_REGION_22_23_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_24_25 0x0d82
+#define regCM0_CM_GAMCOR_RAMB_REGION_24_25_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_26_27 0x0d83
+#define regCM0_CM_GAMCOR_RAMB_REGION_26_27_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_28_29 0x0d84
+#define regCM0_CM_GAMCOR_RAMB_REGION_28_29_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_30_31 0x0d85
+#define regCM0_CM_GAMCOR_RAMB_REGION_30_31_BASE_IDX 2
+#define regCM0_CM_GAMCOR_RAMB_REGION_32_33 0x0d86
+#define regCM0_CM_GAMCOR_RAMB_REGION_32_33_BASE_IDX 2
+#define regCM0_CM_HDR_MULT_COEF 0x0d87
+#define regCM0_CM_HDR_MULT_COEF_BASE_IDX 2
+#define regCM0_CM_MEM_PWR_CTRL 0x0d88
+#define regCM0_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define regCM0_CM_MEM_PWR_STATUS 0x0d89
+#define regCM0_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define regCM0_CM_DEALPHA 0x0d8b
+#define regCM0_CM_DEALPHA_BASE_IDX 2
+#define regCM0_CM_COEF_FORMAT 0x0d8c
+#define regCM0_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d
+#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e
+#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
+#define regCM0_DPP_CRC_VAL_R 0x0d8f
+#define regCM0_DPP_CRC_VAL_R_BASE_IDX 2
+#define regCM0_DPP_CRC_VAL_G 0x0d90
+#define regCM0_DPP_CRC_VAL_G_BASE_IDX 2
+#define regCM0_DPP_CRC_VAL_B 0x0d91
+#define regCM0_DPP_CRC_VAL_B_BASE_IDX 2
+#define regCM0_DPP_CRC_VAL_A 0x0d92
+#define regCM0_DPP_CRC_VAL_A_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
+// base address: 0x0
+#define regDPP_TOP0_DPP_CONTROL 0x0cc5
+#define regDPP_TOP0_DPP_CONTROL_BASE_IDX 2
+#define regDPP_TOP0_DPP_SOFT_RESET 0x0cc6
+#define regDPP_TOP0_DPP_SOFT_RESET_BASE_IDX 2
+#define regDPP_TOP0_DPP_CRC_CTRL 0x0cc9
+#define regDPP_TOP0_DPP_CRC_CTRL_BASE_IDX 2
+#define regDPP_TOP0_HOST_READ_CONTROL 0x0cca
+#define regDPP_TOP0_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x3890
+#define regDC_PERFMON11_PERFCOUNTER_CNTL 0x0e24
+#define regDC_PERFMON11_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON11_PERFCOUNTER_CNTL2 0x0e25
+#define regDC_PERFMON11_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON11_PERFCOUNTER_STATE 0x0e26
+#define regDC_PERFMON11_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_CNTL 0x0e27
+#define regDC_PERFMON11_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_CNTL2 0x0e28
+#define regDC_PERFMON11_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_CVALUE_INT_MISC 0x0e29
+#define regDC_PERFMON11_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_CVALUE_LOW 0x0e2a
+#define regDC_PERFMON11_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_HI 0x0e2b
+#define regDC_PERFMON11_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON11_PERFMON_LOW 0x0e2c
+#define regDC_PERFMON11_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec
+// base address: 0x5ac
+#define regCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT 0x0e3a
+#define regCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define regCNVC_CFG1_FORMAT_CONTROL 0x0e3b
+#define regCNVC_CFG1_FORMAT_CONTROL_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_BIAS_R 0x0e3c
+#define regCNVC_CFG1_FCNV_FP_BIAS_R_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_BIAS_G 0x0e3d
+#define regCNVC_CFG1_FCNV_FP_BIAS_G_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_BIAS_B 0x0e3e
+#define regCNVC_CFG1_FCNV_FP_BIAS_B_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_SCALE_R 0x0e3f
+#define regCNVC_CFG1_FCNV_FP_SCALE_R_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_SCALE_G 0x0e40
+#define regCNVC_CFG1_FCNV_FP_SCALE_G_BASE_IDX 2
+#define regCNVC_CFG1_FCNV_FP_SCALE_B 0x0e41
+#define regCNVC_CFG1_FCNV_FP_SCALE_B_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_CONTROL 0x0e42
+#define regCNVC_CFG1_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_ALPHA 0x0e43
+#define regCNVC_CFG1_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_RED 0x0e44
+#define regCNVC_CFG1_COLOR_KEYER_RED_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_GREEN 0x0e45
+#define regCNVC_CFG1_COLOR_KEYER_GREEN_BASE_IDX 2
+#define regCNVC_CFG1_COLOR_KEYER_BLUE 0x0e46
+#define regCNVC_CFG1_COLOR_KEYER_BLUE_BASE_IDX 2
+#define regCNVC_CFG1_ALPHA_2BIT_LUT 0x0e48
+#define regCNVC_CFG1_ALPHA_2BIT_LUT_BASE_IDX 2
+#define regCNVC_CFG1_PRE_DEALPHA 0x0e49
+#define regCNVC_CFG1_PRE_DEALPHA_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_MODE 0x0e4a
+#define regCNVC_CFG1_PRE_CSC_MODE_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C11_C12 0x0e4b
+#define regCNVC_CFG1_PRE_CSC_C11_C12_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C13_C14 0x0e4c
+#define regCNVC_CFG1_PRE_CSC_C13_C14_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C21_C22 0x0e4d
+#define regCNVC_CFG1_PRE_CSC_C21_C22_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C23_C24 0x0e4e
+#define regCNVC_CFG1_PRE_CSC_C23_C24_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C31_C32 0x0e4f
+#define regCNVC_CFG1_PRE_CSC_C31_C32_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_C33_C34 0x0e50
+#define regCNVC_CFG1_PRE_CSC_C33_C34_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C11_C12 0x0e51
+#define regCNVC_CFG1_PRE_CSC_B_C11_C12_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C13_C14 0x0e52
+#define regCNVC_CFG1_PRE_CSC_B_C13_C14_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C21_C22 0x0e53
+#define regCNVC_CFG1_PRE_CSC_B_C21_C22_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C23_C24 0x0e54
+#define regCNVC_CFG1_PRE_CSC_B_C23_C24_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C31_C32 0x0e55
+#define regCNVC_CFG1_PRE_CSC_B_C31_C32_BASE_IDX 2
+#define regCNVC_CFG1_PRE_CSC_B_C33_C34 0x0e56
+#define regCNVC_CFG1_PRE_CSC_B_C33_C34_BASE_IDX 2
+#define regCNVC_CFG1_CNVC_COEF_FORMAT 0x0e57
+#define regCNVC_CFG1_CNVC_COEF_FORMAT_BASE_IDX 2
+#define regCNVC_CFG1_PRE_DEGAM 0x0e58
+#define regCNVC_CFG1_PRE_DEGAM_BASE_IDX 2
+#define regCNVC_CFG1_PRE_REALPHA 0x0e59
+#define regCNVC_CFG1_PRE_REALPHA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec
+// base address: 0x5ac
+#define regCNVC_CUR1_CURSOR0_CONTROL 0x0e5c
+#define regCNVC_CUR1_CURSOR0_CONTROL_BASE_IDX 2
+#define regCNVC_CUR1_CURSOR0_COLOR0 0x0e5d
+#define regCNVC_CUR1_CURSOR0_COLOR0_BASE_IDX 2
+#define regCNVC_CUR1_CURSOR0_COLOR1 0x0e5e
+#define regCNVC_CUR1_CURSOR0_COLOR1_BASE_IDX 2
+#define regCNVC_CUR1_CURSOR0_FP_SCALE_BIAS 0x0e5f
+#define regCNVC_CUR1_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec
+// base address: 0x5ac
+#define regDSCL1_SCL_COEF_RAM_TAP_SELECT 0x0e64
+#define regDSCL1_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define regDSCL1_SCL_COEF_RAM_TAP_DATA 0x0e65
+#define regDSCL1_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define regDSCL1_SCL_MODE 0x0e66
+#define regDSCL1_SCL_MODE_BASE_IDX 2
+#define regDSCL1_SCL_TAP_CONTROL 0x0e67
+#define regDSCL1_SCL_TAP_CONTROL_BASE_IDX 2
+#define regDSCL1_DSCL_CONTROL 0x0e68
+#define regDSCL1_DSCL_CONTROL_BASE_IDX 2
+#define regDSCL1_DSCL_2TAP_CONTROL 0x0e69
+#define regDSCL1_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define regDSCL1_SCL_MANUAL_REPLICATE_CONTROL 0x0e6a
+#define regDSCL1_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define regDSCL1_SCL_HORZ_FILTER_SCALE_RATIO 0x0e6b
+#define regDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL1_SCL_HORZ_FILTER_INIT 0x0e6c
+#define regDSCL1_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define regDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0e6d
+#define regDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL1_SCL_HORZ_FILTER_INIT_C 0x0e6e
+#define regDSCL1_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_SCALE_RATIO 0x0e6f
+#define regDSCL1_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_INIT 0x0e70
+#define regDSCL1_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_INIT_BOT 0x0e71
+#define regDSCL1_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C 0x0e72
+#define regDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_INIT_C 0x0e73
+#define regDSCL1_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL1_SCL_VERT_FILTER_INIT_BOT_C 0x0e74
+#define regDSCL1_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define regDSCL1_SCL_BLACK_COLOR 0x0e75
+#define regDSCL1_SCL_BLACK_COLOR_BASE_IDX 2
+#define regDSCL1_DSCL_UPDATE 0x0e76
+#define regDSCL1_DSCL_UPDATE_BASE_IDX 2
+#define regDSCL1_DSCL_AUTOCAL 0x0e77
+#define regDSCL1_DSCL_AUTOCAL_BASE_IDX 2
+#define regDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0e78
+#define regDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define regDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0e79
+#define regDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define regDSCL1_OTG_H_BLANK 0x0e7a
+#define regDSCL1_OTG_H_BLANK_BASE_IDX 2
+#define regDSCL1_OTG_V_BLANK 0x0e7b
+#define regDSCL1_OTG_V_BLANK_BASE_IDX 2
+#define regDSCL1_RECOUT_START 0x0e7c
+#define regDSCL1_RECOUT_START_BASE_IDX 2
+#define regDSCL1_RECOUT_SIZE 0x0e7d
+#define regDSCL1_RECOUT_SIZE_BASE_IDX 2
+#define regDSCL1_MPC_SIZE 0x0e7e
+#define regDSCL1_MPC_SIZE_BASE_IDX 2
+#define regDSCL1_LB_DATA_FORMAT 0x0e7f
+#define regDSCL1_LB_DATA_FORMAT_BASE_IDX 2
+#define regDSCL1_LB_MEMORY_CTRL 0x0e80
+#define regDSCL1_LB_MEMORY_CTRL_BASE_IDX 2
+#define regDSCL1_LB_V_COUNTER 0x0e81
+#define regDSCL1_LB_V_COUNTER_BASE_IDX 2
+#define regDSCL1_DSCL_MEM_PWR_CTRL 0x0e82
+#define regDSCL1_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define regDSCL1_DSCL_MEM_PWR_STATUS 0x0e83
+#define regDSCL1_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define regDSCL1_OBUF_CONTROL 0x0e84
+#define regDSCL1_OBUF_CONTROL_BASE_IDX 2
+#define regDSCL1_OBUF_MEM_PWR_CTRL 0x0e85
+#define regDSCL1_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec
+// base address: 0x5ac
+#define regCM1_CM_CONTROL 0x0e8b
+#define regCM1_CM_CONTROL_BASE_IDX 2
+#define regCM1_CM_POST_CSC_CONTROL 0x0e8c
+#define regCM1_CM_POST_CSC_CONTROL_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C11_C12 0x0e8d
+#define regCM1_CM_POST_CSC_C11_C12_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C13_C14 0x0e8e
+#define regCM1_CM_POST_CSC_C13_C14_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C21_C22 0x0e8f
+#define regCM1_CM_POST_CSC_C21_C22_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C23_C24 0x0e90
+#define regCM1_CM_POST_CSC_C23_C24_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C31_C32 0x0e91
+#define regCM1_CM_POST_CSC_C31_C32_BASE_IDX 2
+#define regCM1_CM_POST_CSC_C33_C34 0x0e92
+#define regCM1_CM_POST_CSC_C33_C34_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C11_C12 0x0e93
+#define regCM1_CM_POST_CSC_B_C11_C12_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C13_C14 0x0e94
+#define regCM1_CM_POST_CSC_B_C13_C14_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C21_C22 0x0e95
+#define regCM1_CM_POST_CSC_B_C21_C22_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C23_C24 0x0e96
+#define regCM1_CM_POST_CSC_B_C23_C24_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C31_C32 0x0e97
+#define regCM1_CM_POST_CSC_B_C31_C32_BASE_IDX 2
+#define regCM1_CM_POST_CSC_B_C33_C34 0x0e98
+#define regCM1_CM_POST_CSC_B_C33_C34_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_CONTROL 0x0e99
+#define regCM1_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C11_C12 0x0e9a
+#define regCM1_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C13_C14 0x0e9b
+#define regCM1_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C21_C22 0x0e9c
+#define regCM1_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C23_C24 0x0e9d
+#define regCM1_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C31_C32 0x0e9e
+#define regCM1_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_C33_C34 0x0e9f
+#define regCM1_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C11_C12 0x0ea0
+#define regCM1_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C13_C14 0x0ea1
+#define regCM1_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C21_C22 0x0ea2
+#define regCM1_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C23_C24 0x0ea3
+#define regCM1_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C31_C32 0x0ea4
+#define regCM1_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define regCM1_CM_GAMUT_REMAP_B_C33_C34 0x0ea5
+#define regCM1_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define regCM1_CM_BIAS_CR_R 0x0ea6
+#define regCM1_CM_BIAS_CR_R_BASE_IDX 2
+#define regCM1_CM_BIAS_Y_G_CB_B 0x0ea7
+#define regCM1_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_CONTROL 0x0ea8
+#define regCM1_CM_GAMCOR_CONTROL_BASE_IDX 2
+#define regCM1_CM_GAMCOR_LUT_INDEX 0x0ea9
+#define regCM1_CM_GAMCOR_LUT_INDEX_BASE_IDX 2
+#define regCM1_CM_GAMCOR_LUT_DATA 0x0eaa
+#define regCM1_CM_GAMCOR_LUT_DATA_BASE_IDX 2
+#define regCM1_CM_GAMCOR_LUT_CONTROL 0x0eab
+#define regCM1_CM_GAMCOR_LUT_CONTROL_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_B 0x0eac
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_G 0x0ead
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_R 0x0eae
+#define regCM1_CM_GAMCOR_RAMA_START_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B 0x0eaf
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G 0x0eb0
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R 0x0eb1
+#define regCM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B 0x0eb2
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G 0x0eb3
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R 0x0eb4
+#define regCM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_B 0x0eb5
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_B 0x0eb6
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_G 0x0eb7
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_G 0x0eb8
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_R 0x0eb9
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_R 0x0eba
+#define regCM1_CM_GAMCOR_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_B 0x0ebb
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_G 0x0ebc
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_R 0x0ebd
+#define regCM1_CM_GAMCOR_RAMA_OFFSET_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_0_1 0x0ebe
+#define regCM1_CM_GAMCOR_RAMA_REGION_0_1_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_2_3 0x0ebf
+#define regCM1_CM_GAMCOR_RAMA_REGION_2_3_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_4_5 0x0ec0
+#define regCM1_CM_GAMCOR_RAMA_REGION_4_5_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_6_7 0x0ec1
+#define regCM1_CM_GAMCOR_RAMA_REGION_6_7_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_8_9 0x0ec2
+#define regCM1_CM_GAMCOR_RAMA_REGION_8_9_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_10_11 0x0ec3
+#define regCM1_CM_GAMCOR_RAMA_REGION_10_11_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_12_13 0x0ec4
+#define regCM1_CM_GAMCOR_RAMA_REGION_12_13_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_14_15 0x0ec5
+#define regCM1_CM_GAMCOR_RAMA_REGION_14_15_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_16_17 0x0ec6
+#define regCM1_CM_GAMCOR_RAMA_REGION_16_17_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_18_19 0x0ec7
+#define regCM1_CM_GAMCOR_RAMA_REGION_18_19_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_20_21 0x0ec8
+#define regCM1_CM_GAMCOR_RAMA_REGION_20_21_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_22_23 0x0ec9
+#define regCM1_CM_GAMCOR_RAMA_REGION_22_23_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_24_25 0x0eca
+#define regCM1_CM_GAMCOR_RAMA_REGION_24_25_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_26_27 0x0ecb
+#define regCM1_CM_GAMCOR_RAMA_REGION_26_27_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_28_29 0x0ecc
+#define regCM1_CM_GAMCOR_RAMA_REGION_28_29_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_30_31 0x0ecd
+#define regCM1_CM_GAMCOR_RAMA_REGION_30_31_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMA_REGION_32_33 0x0ece
+#define regCM1_CM_GAMCOR_RAMA_REGION_32_33_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_B 0x0ecf
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_G 0x0ed0
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_R 0x0ed1
+#define regCM1_CM_GAMCOR_RAMB_START_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B 0x0ed2
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G 0x0ed3
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R 0x0ed4
+#define regCM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B 0x0ed5
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G 0x0ed6
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R 0x0ed7
+#define regCM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_B 0x0ed8
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_B 0x0ed9
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_G 0x0eda
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_G 0x0edb
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_R 0x0edc
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_R 0x0edd
+#define regCM1_CM_GAMCOR_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_B 0x0ede
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_B_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_G 0x0edf
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_G_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_R 0x0ee0
+#define regCM1_CM_GAMCOR_RAMB_OFFSET_R_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_0_1 0x0ee1
+#define regCM1_CM_GAMCOR_RAMB_REGION_0_1_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_2_3 0x0ee2
+#define regCM1_CM_GAMCOR_RAMB_REGION_2_3_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_4_5 0x0ee3
+#define regCM1_CM_GAMCOR_RAMB_REGION_4_5_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_6_7 0x0ee4
+#define regCM1_CM_GAMCOR_RAMB_REGION_6_7_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_8_9 0x0ee5
+#define regCM1_CM_GAMCOR_RAMB_REGION_8_9_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_10_11 0x0ee6
+#define regCM1_CM_GAMCOR_RAMB_REGION_10_11_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_12_13 0x0ee7
+#define regCM1_CM_GAMCOR_RAMB_REGION_12_13_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_14_15 0x0ee8
+#define regCM1_CM_GAMCOR_RAMB_REGION_14_15_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_16_17 0x0ee9
+#define regCM1_CM_GAMCOR_RAMB_REGION_16_17_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_18_19 0x0eea
+#define regCM1_CM_GAMCOR_RAMB_REGION_18_19_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_20_21 0x0eeb
+#define regCM1_CM_GAMCOR_RAMB_REGION_20_21_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_22_23 0x0eec
+#define regCM1_CM_GAMCOR_RAMB_REGION_22_23_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_24_25 0x0eed
+#define regCM1_CM_GAMCOR_RAMB_REGION_24_25_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_26_27 0x0eee
+#define regCM1_CM_GAMCOR_RAMB_REGION_26_27_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_28_29 0x0eef
+#define regCM1_CM_GAMCOR_RAMB_REGION_28_29_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_30_31 0x0ef0
+#define regCM1_CM_GAMCOR_RAMB_REGION_30_31_BASE_IDX 2
+#define regCM1_CM_GAMCOR_RAMB_REGION_32_33 0x0ef1
+#define regCM1_CM_GAMCOR_RAMB_REGION_32_33_BASE_IDX 2
+#define regCM1_CM_HDR_MULT_COEF 0x0ef2
+#define regCM1_CM_HDR_MULT_COEF_BASE_IDX 2
+#define regCM1_CM_MEM_PWR_CTRL 0x0ef3
+#define regCM1_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define regCM1_CM_MEM_PWR_STATUS 0x0ef4
+#define regCM1_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define regCM1_CM_DEALPHA 0x0ef6
+#define regCM1_CM_DEALPHA_BASE_IDX 2
+#define regCM1_CM_COEF_FORMAT 0x0ef7
+#define regCM1_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8
+#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9
+#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
+#define regCM1_DPP_CRC_VAL_R 0x0efa
+#define regCM1_DPP_CRC_VAL_R_BASE_IDX 2
+#define regCM1_DPP_CRC_VAL_G 0x0efb
+#define regCM1_DPP_CRC_VAL_G_BASE_IDX 2
+#define regCM1_DPP_CRC_VAL_B 0x0efc
+#define regCM1_DPP_CRC_VAL_B_BASE_IDX 2
+#define regCM1_DPP_CRC_VAL_A 0x0efd
+#define regCM1_DPP_CRC_VAL_A_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
+// base address: 0x5ac
+#define regDPP_TOP1_DPP_CONTROL 0x0e30
+#define regDPP_TOP1_DPP_CONTROL_BASE_IDX 2
+#define regDPP_TOP1_DPP_SOFT_RESET 0x0e31
+#define regDPP_TOP1_DPP_SOFT_RESET_BASE_IDX 2
+#define regDPP_TOP1_DPP_CRC_CTRL 0x0e34
+#define regDPP_TOP1_DPP_CRC_CTRL_BASE_IDX 2
+#define regDPP_TOP1_HOST_READ_CONTROL 0x0e35
+#define regDPP_TOP1_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x3e3c
+#define regDC_PERFMON12_PERFCOUNTER_CNTL 0x0f8f
+#define regDC_PERFMON12_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON12_PERFCOUNTER_CNTL2 0x0f90
+#define regDC_PERFMON12_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON12_PERFCOUNTER_STATE 0x0f91
+#define regDC_PERFMON12_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_CNTL 0x0f92
+#define regDC_PERFMON12_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_CNTL2 0x0f93
+#define regDC_PERFMON12_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_CVALUE_INT_MISC 0x0f94
+#define regDC_PERFMON12_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_CVALUE_LOW 0x0f95
+#define regDC_PERFMON12_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_HI 0x0f96
+#define regDC_PERFMON12_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON12_PERFMON_LOW 0x0f97
+#define regDC_PERFMON12_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec
+// base address: 0xb58
+#define regCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT 0x0fa5
+#define regCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define regCNVC_CFG2_FORMAT_CONTROL 0x0fa6
+#define regCNVC_CFG2_FORMAT_CONTROL_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_BIAS_R 0x0fa7
+#define regCNVC_CFG2_FCNV_FP_BIAS_R_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_BIAS_G 0x0fa8
+#define regCNVC_CFG2_FCNV_FP_BIAS_G_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_BIAS_B 0x0fa9
+#define regCNVC_CFG2_FCNV_FP_BIAS_B_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_SCALE_R 0x0faa
+#define regCNVC_CFG2_FCNV_FP_SCALE_R_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_SCALE_G 0x0fab
+#define regCNVC_CFG2_FCNV_FP_SCALE_G_BASE_IDX 2
+#define regCNVC_CFG2_FCNV_FP_SCALE_B 0x0fac
+#define regCNVC_CFG2_FCNV_FP_SCALE_B_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_CONTROL 0x0fad
+#define regCNVC_CFG2_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_ALPHA 0x0fae
+#define regCNVC_CFG2_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_RED 0x0faf
+#define regCNVC_CFG2_COLOR_KEYER_RED_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_GREEN 0x0fb0
+#define regCNVC_CFG2_COLOR_KEYER_GREEN_BASE_IDX 2
+#define regCNVC_CFG2_COLOR_KEYER_BLUE 0x0fb1
+#define regCNVC_CFG2_COLOR_KEYER_BLUE_BASE_IDX 2
+#define regCNVC_CFG2_ALPHA_2BIT_LUT 0x0fb3
+#define regCNVC_CFG2_ALPHA_2BIT_LUT_BASE_IDX 2
+#define regCNVC_CFG2_PRE_DEALPHA 0x0fb4
+#define regCNVC_CFG2_PRE_DEALPHA_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_MODE 0x0fb5
+#define regCNVC_CFG2_PRE_CSC_MODE_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C11_C12 0x0fb6
+#define regCNVC_CFG2_PRE_CSC_C11_C12_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C13_C14 0x0fb7
+#define regCNVC_CFG2_PRE_CSC_C13_C14_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C21_C22 0x0fb8
+#define regCNVC_CFG2_PRE_CSC_C21_C22_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C23_C24 0x0fb9
+#define regCNVC_CFG2_PRE_CSC_C23_C24_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C31_C32 0x0fba
+#define regCNVC_CFG2_PRE_CSC_C31_C32_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_C33_C34 0x0fbb
+#define regCNVC_CFG2_PRE_CSC_C33_C34_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C11_C12 0x0fbc
+#define regCNVC_CFG2_PRE_CSC_B_C11_C12_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C13_C14 0x0fbd
+#define regCNVC_CFG2_PRE_CSC_B_C13_C14_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C21_C22 0x0fbe
+#define regCNVC_CFG2_PRE_CSC_B_C21_C22_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C23_C24 0x0fbf
+#define regCNVC_CFG2_PRE_CSC_B_C23_C24_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C31_C32 0x0fc0
+#define regCNVC_CFG2_PRE_CSC_B_C31_C32_BASE_IDX 2
+#define regCNVC_CFG2_PRE_CSC_B_C33_C34 0x0fc1
+#define regCNVC_CFG2_PRE_CSC_B_C33_C34_BASE_IDX 2
+#define regCNVC_CFG2_CNVC_COEF_FORMAT 0x0fc2
+#define regCNVC_CFG2_CNVC_COEF_FORMAT_BASE_IDX 2
+#define regCNVC_CFG2_PRE_DEGAM 0x0fc3
+#define regCNVC_CFG2_PRE_DEGAM_BASE_IDX 2
+#define regCNVC_CFG2_PRE_REALPHA 0x0fc4
+#define regCNVC_CFG2_PRE_REALPHA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec
+// base address: 0xb58
+#define regCNVC_CUR2_CURSOR0_CONTROL 0x0fc7
+#define regCNVC_CUR2_CURSOR0_CONTROL_BASE_IDX 2
+#define regCNVC_CUR2_CURSOR0_COLOR0 0x0fc8
+#define regCNVC_CUR2_CURSOR0_COLOR0_BASE_IDX 2
+#define regCNVC_CUR2_CURSOR0_COLOR1 0x0fc9
+#define regCNVC_CUR2_CURSOR0_COLOR1_BASE_IDX 2
+#define regCNVC_CUR2_CURSOR0_FP_SCALE_BIAS 0x0fca
+#define regCNVC_CUR2_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec
+// base address: 0xb58
+#define regDSCL2_SCL_COEF_RAM_TAP_SELECT 0x0fcf
+#define regDSCL2_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define regDSCL2_SCL_COEF_RAM_TAP_DATA 0x0fd0
+#define regDSCL2_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define regDSCL2_SCL_MODE 0x0fd1
+#define regDSCL2_SCL_MODE_BASE_IDX 2
+#define regDSCL2_SCL_TAP_CONTROL 0x0fd2
+#define regDSCL2_SCL_TAP_CONTROL_BASE_IDX 2
+#define regDSCL2_DSCL_CONTROL 0x0fd3
+#define regDSCL2_DSCL_CONTROL_BASE_IDX 2
+#define regDSCL2_DSCL_2TAP_CONTROL 0x0fd4
+#define regDSCL2_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define regDSCL2_SCL_MANUAL_REPLICATE_CONTROL 0x0fd5
+#define regDSCL2_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define regDSCL2_SCL_HORZ_FILTER_SCALE_RATIO 0x0fd6
+#define regDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL2_SCL_HORZ_FILTER_INIT 0x0fd7
+#define regDSCL2_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define regDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0fd8
+#define regDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL2_SCL_HORZ_FILTER_INIT_C 0x0fd9
+#define regDSCL2_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_SCALE_RATIO 0x0fda
+#define regDSCL2_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_INIT 0x0fdb
+#define regDSCL2_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_INIT_BOT 0x0fdc
+#define regDSCL2_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C 0x0fdd
+#define regDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_INIT_C 0x0fde
+#define regDSCL2_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL2_SCL_VERT_FILTER_INIT_BOT_C 0x0fdf
+#define regDSCL2_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define regDSCL2_SCL_BLACK_COLOR 0x0fe0
+#define regDSCL2_SCL_BLACK_COLOR_BASE_IDX 2
+#define regDSCL2_DSCL_UPDATE 0x0fe1
+#define regDSCL2_DSCL_UPDATE_BASE_IDX 2
+#define regDSCL2_DSCL_AUTOCAL 0x0fe2
+#define regDSCL2_DSCL_AUTOCAL_BASE_IDX 2
+#define regDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0fe3
+#define regDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define regDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0fe4
+#define regDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define regDSCL2_OTG_H_BLANK 0x0fe5
+#define regDSCL2_OTG_H_BLANK_BASE_IDX 2
+#define regDSCL2_OTG_V_BLANK 0x0fe6
+#define regDSCL2_OTG_V_BLANK_BASE_IDX 2
+#define regDSCL2_RECOUT_START 0x0fe7
+#define regDSCL2_RECOUT_START_BASE_IDX 2
+#define regDSCL2_RECOUT_SIZE 0x0fe8
+#define regDSCL2_RECOUT_SIZE_BASE_IDX 2
+#define regDSCL2_MPC_SIZE 0x0fe9
+#define regDSCL2_MPC_SIZE_BASE_IDX 2
+#define regDSCL2_LB_DATA_FORMAT 0x0fea
+#define regDSCL2_LB_DATA_FORMAT_BASE_IDX 2
+#define regDSCL2_LB_MEMORY_CTRL 0x0feb
+#define regDSCL2_LB_MEMORY_CTRL_BASE_IDX 2
+#define regDSCL2_LB_V_COUNTER 0x0fec
+#define regDSCL2_LB_V_COUNTER_BASE_IDX 2
+#define regDSCL2_DSCL_MEM_PWR_CTRL 0x0fed
+#define regDSCL2_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define regDSCL2_DSCL_MEM_PWR_STATUS 0x0fee
+#define regDSCL2_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define regDSCL2_OBUF_CONTROL 0x0fef
+#define regDSCL2_OBUF_CONTROL_BASE_IDX 2
+#define regDSCL2_OBUF_MEM_PWR_CTRL 0x0ff0
+#define regDSCL2_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec
+// base address: 0xb58
+#define regCM2_CM_CONTROL 0x0ff6
+#define regCM2_CM_CONTROL_BASE_IDX 2
+#define regCM2_CM_POST_CSC_CONTROL 0x0ff7
+#define regCM2_CM_POST_CSC_CONTROL_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C11_C12 0x0ff8
+#define regCM2_CM_POST_CSC_C11_C12_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C13_C14 0x0ff9
+#define regCM2_CM_POST_CSC_C13_C14_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C21_C22 0x0ffa
+#define regCM2_CM_POST_CSC_C21_C22_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C23_C24 0x0ffb
+#define regCM2_CM_POST_CSC_C23_C24_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C31_C32 0x0ffc
+#define regCM2_CM_POST_CSC_C31_C32_BASE_IDX 2
+#define regCM2_CM_POST_CSC_C33_C34 0x0ffd
+#define regCM2_CM_POST_CSC_C33_C34_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C11_C12 0x0ffe
+#define regCM2_CM_POST_CSC_B_C11_C12_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C13_C14 0x0fff
+#define regCM2_CM_POST_CSC_B_C13_C14_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C21_C22 0x1000
+#define regCM2_CM_POST_CSC_B_C21_C22_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C23_C24 0x1001
+#define regCM2_CM_POST_CSC_B_C23_C24_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C31_C32 0x1002
+#define regCM2_CM_POST_CSC_B_C31_C32_BASE_IDX 2
+#define regCM2_CM_POST_CSC_B_C33_C34 0x1003
+#define regCM2_CM_POST_CSC_B_C33_C34_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_CONTROL 0x1004
+#define regCM2_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C11_C12 0x1005
+#define regCM2_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C13_C14 0x1006
+#define regCM2_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C21_C22 0x1007
+#define regCM2_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C23_C24 0x1008
+#define regCM2_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C31_C32 0x1009
+#define regCM2_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_C33_C34 0x100a
+#define regCM2_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C11_C12 0x100b
+#define regCM2_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C13_C14 0x100c
+#define regCM2_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C21_C22 0x100d
+#define regCM2_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C23_C24 0x100e
+#define regCM2_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C31_C32 0x100f
+#define regCM2_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define regCM2_CM_GAMUT_REMAP_B_C33_C34 0x1010
+#define regCM2_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define regCM2_CM_BIAS_CR_R 0x1011
+#define regCM2_CM_BIAS_CR_R_BASE_IDX 2
+#define regCM2_CM_BIAS_Y_G_CB_B 0x1012
+#define regCM2_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_CONTROL 0x1013
+#define regCM2_CM_GAMCOR_CONTROL_BASE_IDX 2
+#define regCM2_CM_GAMCOR_LUT_INDEX 0x1014
+#define regCM2_CM_GAMCOR_LUT_INDEX_BASE_IDX 2
+#define regCM2_CM_GAMCOR_LUT_DATA 0x1015
+#define regCM2_CM_GAMCOR_LUT_DATA_BASE_IDX 2
+#define regCM2_CM_GAMCOR_LUT_CONTROL 0x1016
+#define regCM2_CM_GAMCOR_LUT_CONTROL_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_B 0x1017
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_G 0x1018
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_R 0x1019
+#define regCM2_CM_GAMCOR_RAMA_START_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B 0x101a
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G 0x101b
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R 0x101c
+#define regCM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B 0x101d
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G 0x101e
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R 0x101f
+#define regCM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_B 0x1020
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_B 0x1021
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_G 0x1022
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_G 0x1023
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_R 0x1024
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_R 0x1025
+#define regCM2_CM_GAMCOR_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_B 0x1026
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_G 0x1027
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_R 0x1028
+#define regCM2_CM_GAMCOR_RAMA_OFFSET_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_0_1 0x1029
+#define regCM2_CM_GAMCOR_RAMA_REGION_0_1_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_2_3 0x102a
+#define regCM2_CM_GAMCOR_RAMA_REGION_2_3_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_4_5 0x102b
+#define regCM2_CM_GAMCOR_RAMA_REGION_4_5_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_6_7 0x102c
+#define regCM2_CM_GAMCOR_RAMA_REGION_6_7_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_8_9 0x102d
+#define regCM2_CM_GAMCOR_RAMA_REGION_8_9_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_10_11 0x102e
+#define regCM2_CM_GAMCOR_RAMA_REGION_10_11_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_12_13 0x102f
+#define regCM2_CM_GAMCOR_RAMA_REGION_12_13_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_14_15 0x1030
+#define regCM2_CM_GAMCOR_RAMA_REGION_14_15_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_16_17 0x1031
+#define regCM2_CM_GAMCOR_RAMA_REGION_16_17_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_18_19 0x1032
+#define regCM2_CM_GAMCOR_RAMA_REGION_18_19_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_20_21 0x1033
+#define regCM2_CM_GAMCOR_RAMA_REGION_20_21_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_22_23 0x1034
+#define regCM2_CM_GAMCOR_RAMA_REGION_22_23_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_24_25 0x1035
+#define regCM2_CM_GAMCOR_RAMA_REGION_24_25_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_26_27 0x1036
+#define regCM2_CM_GAMCOR_RAMA_REGION_26_27_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_28_29 0x1037
+#define regCM2_CM_GAMCOR_RAMA_REGION_28_29_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_30_31 0x1038
+#define regCM2_CM_GAMCOR_RAMA_REGION_30_31_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMA_REGION_32_33 0x1039
+#define regCM2_CM_GAMCOR_RAMA_REGION_32_33_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_B 0x103a
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_G 0x103b
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_R 0x103c
+#define regCM2_CM_GAMCOR_RAMB_START_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B 0x103d
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G 0x103e
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R 0x103f
+#define regCM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B 0x1040
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G 0x1041
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R 0x1042
+#define regCM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_B 0x1043
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_B 0x1044
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_G 0x1045
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_G 0x1046
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_R 0x1047
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_R 0x1048
+#define regCM2_CM_GAMCOR_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_B 0x1049
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_B_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_G 0x104a
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_G_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_R 0x104b
+#define regCM2_CM_GAMCOR_RAMB_OFFSET_R_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_0_1 0x104c
+#define regCM2_CM_GAMCOR_RAMB_REGION_0_1_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_2_3 0x104d
+#define regCM2_CM_GAMCOR_RAMB_REGION_2_3_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_4_5 0x104e
+#define regCM2_CM_GAMCOR_RAMB_REGION_4_5_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_6_7 0x104f
+#define regCM2_CM_GAMCOR_RAMB_REGION_6_7_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_8_9 0x1050
+#define regCM2_CM_GAMCOR_RAMB_REGION_8_9_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_10_11 0x1051
+#define regCM2_CM_GAMCOR_RAMB_REGION_10_11_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_12_13 0x1052
+#define regCM2_CM_GAMCOR_RAMB_REGION_12_13_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_14_15 0x1053
+#define regCM2_CM_GAMCOR_RAMB_REGION_14_15_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_16_17 0x1054
+#define regCM2_CM_GAMCOR_RAMB_REGION_16_17_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_18_19 0x1055
+#define regCM2_CM_GAMCOR_RAMB_REGION_18_19_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_20_21 0x1056
+#define regCM2_CM_GAMCOR_RAMB_REGION_20_21_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_22_23 0x1057
+#define regCM2_CM_GAMCOR_RAMB_REGION_22_23_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_24_25 0x1058
+#define regCM2_CM_GAMCOR_RAMB_REGION_24_25_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_26_27 0x1059
+#define regCM2_CM_GAMCOR_RAMB_REGION_26_27_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_28_29 0x105a
+#define regCM2_CM_GAMCOR_RAMB_REGION_28_29_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_30_31 0x105b
+#define regCM2_CM_GAMCOR_RAMB_REGION_30_31_BASE_IDX 2
+#define regCM2_CM_GAMCOR_RAMB_REGION_32_33 0x105c
+#define regCM2_CM_GAMCOR_RAMB_REGION_32_33_BASE_IDX 2
+#define regCM2_CM_HDR_MULT_COEF 0x105d
+#define regCM2_CM_HDR_MULT_COEF_BASE_IDX 2
+#define regCM2_CM_MEM_PWR_CTRL 0x105e
+#define regCM2_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define regCM2_CM_MEM_PWR_STATUS 0x105f
+#define regCM2_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define regCM2_CM_DEALPHA 0x1061
+#define regCM2_CM_DEALPHA_BASE_IDX 2
+#define regCM2_CM_COEF_FORMAT 0x1062
+#define regCM2_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_INDEX 0x1063
+#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM2_CM_TEST_DEBUG_DATA 0x1064
+#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
+#define regCM2_DPP_CRC_VAL_R 0x1065
+#define regCM2_DPP_CRC_VAL_R_BASE_IDX 2
+#define regCM2_DPP_CRC_VAL_G 0x1066
+#define regCM2_DPP_CRC_VAL_G_BASE_IDX 2
+#define regCM2_DPP_CRC_VAL_B 0x1067
+#define regCM2_DPP_CRC_VAL_B_BASE_IDX 2
+#define regCM2_DPP_CRC_VAL_A 0x1068
+#define regCM2_DPP_CRC_VAL_A_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
+// base address: 0xb58
+#define regDPP_TOP2_DPP_CONTROL 0x0f9b
+#define regDPP_TOP2_DPP_CONTROL_BASE_IDX 2
+#define regDPP_TOP2_DPP_SOFT_RESET 0x0f9c
+#define regDPP_TOP2_DPP_SOFT_RESET_BASE_IDX 2
+#define regDPP_TOP2_DPP_CRC_CTRL 0x0f9f
+#define regDPP_TOP2_DPP_CRC_CTRL_BASE_IDX 2
+#define regDPP_TOP2_HOST_READ_CONTROL 0x0fa0
+#define regDPP_TOP2_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x43e8
+#define regDC_PERFMON13_PERFCOUNTER_CNTL 0x10fa
+#define regDC_PERFMON13_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON13_PERFCOUNTER_CNTL2 0x10fb
+#define regDC_PERFMON13_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON13_PERFCOUNTER_STATE 0x10fc
+#define regDC_PERFMON13_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_CNTL 0x10fd
+#define regDC_PERFMON13_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_CNTL2 0x10fe
+#define regDC_PERFMON13_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_CVALUE_INT_MISC 0x10ff
+#define regDC_PERFMON13_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_CVALUE_LOW 0x1100
+#define regDC_PERFMON13_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_HI 0x1101
+#define regDC_PERFMON13_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON13_PERFMON_LOW 0x1102
+#define regDC_PERFMON13_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec
+// base address: 0x1104
+#define regCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT 0x1110
+#define regCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define regCNVC_CFG3_FORMAT_CONTROL 0x1111
+#define regCNVC_CFG3_FORMAT_CONTROL_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_BIAS_R 0x1112
+#define regCNVC_CFG3_FCNV_FP_BIAS_R_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_BIAS_G 0x1113
+#define regCNVC_CFG3_FCNV_FP_BIAS_G_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_BIAS_B 0x1114
+#define regCNVC_CFG3_FCNV_FP_BIAS_B_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_SCALE_R 0x1115
+#define regCNVC_CFG3_FCNV_FP_SCALE_R_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_SCALE_G 0x1116
+#define regCNVC_CFG3_FCNV_FP_SCALE_G_BASE_IDX 2
+#define regCNVC_CFG3_FCNV_FP_SCALE_B 0x1117
+#define regCNVC_CFG3_FCNV_FP_SCALE_B_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_CONTROL 0x1118
+#define regCNVC_CFG3_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_ALPHA 0x1119
+#define regCNVC_CFG3_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_RED 0x111a
+#define regCNVC_CFG3_COLOR_KEYER_RED_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_GREEN 0x111b
+#define regCNVC_CFG3_COLOR_KEYER_GREEN_BASE_IDX 2
+#define regCNVC_CFG3_COLOR_KEYER_BLUE 0x111c
+#define regCNVC_CFG3_COLOR_KEYER_BLUE_BASE_IDX 2
+#define regCNVC_CFG3_ALPHA_2BIT_LUT 0x111e
+#define regCNVC_CFG3_ALPHA_2BIT_LUT_BASE_IDX 2
+#define regCNVC_CFG3_PRE_DEALPHA 0x111f
+#define regCNVC_CFG3_PRE_DEALPHA_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_MODE 0x1120
+#define regCNVC_CFG3_PRE_CSC_MODE_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C11_C12 0x1121
+#define regCNVC_CFG3_PRE_CSC_C11_C12_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C13_C14 0x1122
+#define regCNVC_CFG3_PRE_CSC_C13_C14_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C21_C22 0x1123
+#define regCNVC_CFG3_PRE_CSC_C21_C22_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C23_C24 0x1124
+#define regCNVC_CFG3_PRE_CSC_C23_C24_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C31_C32 0x1125
+#define regCNVC_CFG3_PRE_CSC_C31_C32_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_C33_C34 0x1126
+#define regCNVC_CFG3_PRE_CSC_C33_C34_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C11_C12 0x1127
+#define regCNVC_CFG3_PRE_CSC_B_C11_C12_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C13_C14 0x1128
+#define regCNVC_CFG3_PRE_CSC_B_C13_C14_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C21_C22 0x1129
+#define regCNVC_CFG3_PRE_CSC_B_C21_C22_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C23_C24 0x112a
+#define regCNVC_CFG3_PRE_CSC_B_C23_C24_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C31_C32 0x112b
+#define regCNVC_CFG3_PRE_CSC_B_C31_C32_BASE_IDX 2
+#define regCNVC_CFG3_PRE_CSC_B_C33_C34 0x112c
+#define regCNVC_CFG3_PRE_CSC_B_C33_C34_BASE_IDX 2
+#define regCNVC_CFG3_CNVC_COEF_FORMAT 0x112d
+#define regCNVC_CFG3_CNVC_COEF_FORMAT_BASE_IDX 2
+#define regCNVC_CFG3_PRE_DEGAM 0x112e
+#define regCNVC_CFG3_PRE_DEGAM_BASE_IDX 2
+#define regCNVC_CFG3_PRE_REALPHA 0x112f
+#define regCNVC_CFG3_PRE_REALPHA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec
+// base address: 0x1104
+#define regCNVC_CUR3_CURSOR0_CONTROL 0x1132
+#define regCNVC_CUR3_CURSOR0_CONTROL_BASE_IDX 2
+#define regCNVC_CUR3_CURSOR0_COLOR0 0x1133
+#define regCNVC_CUR3_CURSOR0_COLOR0_BASE_IDX 2
+#define regCNVC_CUR3_CURSOR0_COLOR1 0x1134
+#define regCNVC_CUR3_CURSOR0_COLOR1_BASE_IDX 2
+#define regCNVC_CUR3_CURSOR0_FP_SCALE_BIAS 0x1135
+#define regCNVC_CUR3_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec
+// base address: 0x1104
+#define regDSCL3_SCL_COEF_RAM_TAP_SELECT 0x113a
+#define regDSCL3_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define regDSCL3_SCL_COEF_RAM_TAP_DATA 0x113b
+#define regDSCL3_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define regDSCL3_SCL_MODE 0x113c
+#define regDSCL3_SCL_MODE_BASE_IDX 2
+#define regDSCL3_SCL_TAP_CONTROL 0x113d
+#define regDSCL3_SCL_TAP_CONTROL_BASE_IDX 2
+#define regDSCL3_DSCL_CONTROL 0x113e
+#define regDSCL3_DSCL_CONTROL_BASE_IDX 2
+#define regDSCL3_DSCL_2TAP_CONTROL 0x113f
+#define regDSCL3_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define regDSCL3_SCL_MANUAL_REPLICATE_CONTROL 0x1140
+#define regDSCL3_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define regDSCL3_SCL_HORZ_FILTER_SCALE_RATIO 0x1141
+#define regDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL3_SCL_HORZ_FILTER_INIT 0x1142
+#define regDSCL3_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define regDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C 0x1143
+#define regDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL3_SCL_HORZ_FILTER_INIT_C 0x1144
+#define regDSCL3_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_SCALE_RATIO 0x1145
+#define regDSCL3_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_INIT 0x1146
+#define regDSCL3_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_INIT_BOT 0x1147
+#define regDSCL3_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C 0x1148
+#define regDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_INIT_C 0x1149
+#define regDSCL3_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define regDSCL3_SCL_VERT_FILTER_INIT_BOT_C 0x114a
+#define regDSCL3_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define regDSCL3_SCL_BLACK_COLOR 0x114b
+#define regDSCL3_SCL_BLACK_COLOR_BASE_IDX 2
+#define regDSCL3_DSCL_UPDATE 0x114c
+#define regDSCL3_DSCL_UPDATE_BASE_IDX 2
+#define regDSCL3_DSCL_AUTOCAL 0x114d
+#define regDSCL3_DSCL_AUTOCAL_BASE_IDX 2
+#define regDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x114e
+#define regDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define regDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x114f
+#define regDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define regDSCL3_OTG_H_BLANK 0x1150
+#define regDSCL3_OTG_H_BLANK_BASE_IDX 2
+#define regDSCL3_OTG_V_BLANK 0x1151
+#define regDSCL3_OTG_V_BLANK_BASE_IDX 2
+#define regDSCL3_RECOUT_START 0x1152
+#define regDSCL3_RECOUT_START_BASE_IDX 2
+#define regDSCL3_RECOUT_SIZE 0x1153
+#define regDSCL3_RECOUT_SIZE_BASE_IDX 2
+#define regDSCL3_MPC_SIZE 0x1154
+#define regDSCL3_MPC_SIZE_BASE_IDX 2
+#define regDSCL3_LB_DATA_FORMAT 0x1155
+#define regDSCL3_LB_DATA_FORMAT_BASE_IDX 2
+#define regDSCL3_LB_MEMORY_CTRL 0x1156
+#define regDSCL3_LB_MEMORY_CTRL_BASE_IDX 2
+#define regDSCL3_LB_V_COUNTER 0x1157
+#define regDSCL3_LB_V_COUNTER_BASE_IDX 2
+#define regDSCL3_DSCL_MEM_PWR_CTRL 0x1158
+#define regDSCL3_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define regDSCL3_DSCL_MEM_PWR_STATUS 0x1159
+#define regDSCL3_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define regDSCL3_OBUF_CONTROL 0x115a
+#define regDSCL3_OBUF_CONTROL_BASE_IDX 2
+#define regDSCL3_OBUF_MEM_PWR_CTRL 0x115b
+#define regDSCL3_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec
+// base address: 0x1104
+#define regCM3_CM_CONTROL 0x1161
+#define regCM3_CM_CONTROL_BASE_IDX 2
+#define regCM3_CM_POST_CSC_CONTROL 0x1162
+#define regCM3_CM_POST_CSC_CONTROL_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C11_C12 0x1163
+#define regCM3_CM_POST_CSC_C11_C12_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C13_C14 0x1164
+#define regCM3_CM_POST_CSC_C13_C14_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C21_C22 0x1165
+#define regCM3_CM_POST_CSC_C21_C22_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C23_C24 0x1166
+#define regCM3_CM_POST_CSC_C23_C24_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C31_C32 0x1167
+#define regCM3_CM_POST_CSC_C31_C32_BASE_IDX 2
+#define regCM3_CM_POST_CSC_C33_C34 0x1168
+#define regCM3_CM_POST_CSC_C33_C34_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C11_C12 0x1169
+#define regCM3_CM_POST_CSC_B_C11_C12_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C13_C14 0x116a
+#define regCM3_CM_POST_CSC_B_C13_C14_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C21_C22 0x116b
+#define regCM3_CM_POST_CSC_B_C21_C22_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C23_C24 0x116c
+#define regCM3_CM_POST_CSC_B_C23_C24_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C31_C32 0x116d
+#define regCM3_CM_POST_CSC_B_C31_C32_BASE_IDX 2
+#define regCM3_CM_POST_CSC_B_C33_C34 0x116e
+#define regCM3_CM_POST_CSC_B_C33_C34_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_CONTROL 0x116f
+#define regCM3_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C11_C12 0x1170
+#define regCM3_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C13_C14 0x1171
+#define regCM3_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C21_C22 0x1172
+#define regCM3_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C23_C24 0x1173
+#define regCM3_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C31_C32 0x1174
+#define regCM3_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_C33_C34 0x1175
+#define regCM3_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C11_C12 0x1176
+#define regCM3_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C13_C14 0x1177
+#define regCM3_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C21_C22 0x1178
+#define regCM3_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C23_C24 0x1179
+#define regCM3_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C31_C32 0x117a
+#define regCM3_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define regCM3_CM_GAMUT_REMAP_B_C33_C34 0x117b
+#define regCM3_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define regCM3_CM_BIAS_CR_R 0x117c
+#define regCM3_CM_BIAS_CR_R_BASE_IDX 2
+#define regCM3_CM_BIAS_Y_G_CB_B 0x117d
+#define regCM3_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_CONTROL 0x117e
+#define regCM3_CM_GAMCOR_CONTROL_BASE_IDX 2
+#define regCM3_CM_GAMCOR_LUT_INDEX 0x117f
+#define regCM3_CM_GAMCOR_LUT_INDEX_BASE_IDX 2
+#define regCM3_CM_GAMCOR_LUT_DATA 0x1180
+#define regCM3_CM_GAMCOR_LUT_DATA_BASE_IDX 2
+#define regCM3_CM_GAMCOR_LUT_CONTROL 0x1181
+#define regCM3_CM_GAMCOR_LUT_CONTROL_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_B 0x1182
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_G 0x1183
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_R 0x1184
+#define regCM3_CM_GAMCOR_RAMA_START_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B 0x1185
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G 0x1186
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R 0x1187
+#define regCM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B 0x1188
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G 0x1189
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R 0x118a
+#define regCM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_B 0x118b
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_B 0x118c
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_G 0x118d
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_G 0x118e
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_R 0x118f
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL1_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_R 0x1190
+#define regCM3_CM_GAMCOR_RAMA_END_CNTL2_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_B 0x1191
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_G 0x1192
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_R 0x1193
+#define regCM3_CM_GAMCOR_RAMA_OFFSET_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_0_1 0x1194
+#define regCM3_CM_GAMCOR_RAMA_REGION_0_1_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_2_3 0x1195
+#define regCM3_CM_GAMCOR_RAMA_REGION_2_3_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_4_5 0x1196
+#define regCM3_CM_GAMCOR_RAMA_REGION_4_5_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_6_7 0x1197
+#define regCM3_CM_GAMCOR_RAMA_REGION_6_7_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_8_9 0x1198
+#define regCM3_CM_GAMCOR_RAMA_REGION_8_9_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_10_11 0x1199
+#define regCM3_CM_GAMCOR_RAMA_REGION_10_11_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_12_13 0x119a
+#define regCM3_CM_GAMCOR_RAMA_REGION_12_13_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_14_15 0x119b
+#define regCM3_CM_GAMCOR_RAMA_REGION_14_15_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_16_17 0x119c
+#define regCM3_CM_GAMCOR_RAMA_REGION_16_17_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_18_19 0x119d
+#define regCM3_CM_GAMCOR_RAMA_REGION_18_19_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_20_21 0x119e
+#define regCM3_CM_GAMCOR_RAMA_REGION_20_21_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_22_23 0x119f
+#define regCM3_CM_GAMCOR_RAMA_REGION_22_23_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_24_25 0x11a0
+#define regCM3_CM_GAMCOR_RAMA_REGION_24_25_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_26_27 0x11a1
+#define regCM3_CM_GAMCOR_RAMA_REGION_26_27_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_28_29 0x11a2
+#define regCM3_CM_GAMCOR_RAMA_REGION_28_29_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_30_31 0x11a3
+#define regCM3_CM_GAMCOR_RAMA_REGION_30_31_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMA_REGION_32_33 0x11a4
+#define regCM3_CM_GAMCOR_RAMA_REGION_32_33_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_B 0x11a5
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_G 0x11a6
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_R 0x11a7
+#define regCM3_CM_GAMCOR_RAMB_START_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B 0x11a8
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G 0x11a9
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R 0x11aa
+#define regCM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B 0x11ab
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G 0x11ac
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R 0x11ad
+#define regCM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_B 0x11ae
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_B 0x11af
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_G 0x11b0
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_G 0x11b1
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_R 0x11b2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL1_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_R 0x11b3
+#define regCM3_CM_GAMCOR_RAMB_END_CNTL2_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_B 0x11b4
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_B_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_G 0x11b5
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_G_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_R 0x11b6
+#define regCM3_CM_GAMCOR_RAMB_OFFSET_R_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_0_1 0x11b7
+#define regCM3_CM_GAMCOR_RAMB_REGION_0_1_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_2_3 0x11b8
+#define regCM3_CM_GAMCOR_RAMB_REGION_2_3_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_4_5 0x11b9
+#define regCM3_CM_GAMCOR_RAMB_REGION_4_5_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_6_7 0x11ba
+#define regCM3_CM_GAMCOR_RAMB_REGION_6_7_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_8_9 0x11bb
+#define regCM3_CM_GAMCOR_RAMB_REGION_8_9_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_10_11 0x11bc
+#define regCM3_CM_GAMCOR_RAMB_REGION_10_11_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_12_13 0x11bd
+#define regCM3_CM_GAMCOR_RAMB_REGION_12_13_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_14_15 0x11be
+#define regCM3_CM_GAMCOR_RAMB_REGION_14_15_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_16_17 0x11bf
+#define regCM3_CM_GAMCOR_RAMB_REGION_16_17_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_18_19 0x11c0
+#define regCM3_CM_GAMCOR_RAMB_REGION_18_19_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_20_21 0x11c1
+#define regCM3_CM_GAMCOR_RAMB_REGION_20_21_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_22_23 0x11c2
+#define regCM3_CM_GAMCOR_RAMB_REGION_22_23_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_24_25 0x11c3
+#define regCM3_CM_GAMCOR_RAMB_REGION_24_25_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_26_27 0x11c4
+#define regCM3_CM_GAMCOR_RAMB_REGION_26_27_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_28_29 0x11c5
+#define regCM3_CM_GAMCOR_RAMB_REGION_28_29_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_30_31 0x11c6
+#define regCM3_CM_GAMCOR_RAMB_REGION_30_31_BASE_IDX 2
+#define regCM3_CM_GAMCOR_RAMB_REGION_32_33 0x11c7
+#define regCM3_CM_GAMCOR_RAMB_REGION_32_33_BASE_IDX 2
+#define regCM3_CM_HDR_MULT_COEF 0x11c8
+#define regCM3_CM_HDR_MULT_COEF_BASE_IDX 2
+#define regCM3_CM_MEM_PWR_CTRL 0x11c9
+#define regCM3_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define regCM3_CM_MEM_PWR_STATUS 0x11ca
+#define regCM3_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define regCM3_CM_DEALPHA 0x11cc
+#define regCM3_CM_DEALPHA_BASE_IDX 2
+#define regCM3_CM_COEF_FORMAT 0x11cd
+#define regCM3_CM_COEF_FORMAT_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce
+#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define regCM3_CM_TEST_DEBUG_DATA 0x11cf
+#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
+#define regCM3_DPP_CRC_VAL_R 0x11d0
+#define regCM3_DPP_CRC_VAL_R_BASE_IDX 2
+#define regCM3_DPP_CRC_VAL_G 0x11d1
+#define regCM3_DPP_CRC_VAL_G_BASE_IDX 2
+#define regCM3_DPP_CRC_VAL_B 0x11d2
+#define regCM3_DPP_CRC_VAL_B_BASE_IDX 2
+#define regCM3_DPP_CRC_VAL_A 0x11d3
+#define regCM3_DPP_CRC_VAL_A_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
+// base address: 0x1104
+#define regDPP_TOP3_DPP_CONTROL 0x1106
+#define regDPP_TOP3_DPP_CONTROL_BASE_IDX 2
+#define regDPP_TOP3_DPP_SOFT_RESET 0x1107
+#define regDPP_TOP3_DPP_SOFT_RESET_BASE_IDX 2
+#define regDPP_TOP3_DPP_CRC_CTRL 0x110a
+#define regDPP_TOP3_DPP_CRC_CTRL_BASE_IDX 2
+#define regDPP_TOP3_HOST_READ_CONTROL 0x110b
+#define regDPP_TOP3_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x4994
+#define regDC_PERFMON14_PERFCOUNTER_CNTL 0x1265
+#define regDC_PERFMON14_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON14_PERFCOUNTER_CNTL2 0x1266
+#define regDC_PERFMON14_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON14_PERFCOUNTER_STATE 0x1267
+#define regDC_PERFMON14_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_CNTL 0x1268
+#define regDC_PERFMON14_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_CNTL2 0x1269
+#define regDC_PERFMON14_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_CVALUE_INT_MISC 0x126a
+#define regDC_PERFMON14_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_CVALUE_LOW 0x126b
+#define regDC_PERFMON14_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_HI 0x126c
+#define regDC_PERFMON14_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON14_PERFMON_LOW 0x126d
+#define regDC_PERFMON14_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc0_dispdec
+// base address: 0x0
+#define regMPCC0_MPCC_TOP_SEL 0x0000
+#define regMPCC0_MPCC_TOP_SEL_BASE_IDX 3
+#define regMPCC0_MPCC_BOT_SEL 0x0001
+#define regMPCC0_MPCC_BOT_SEL_BASE_IDX 3
+#define regMPCC0_MPCC_OPP_ID 0x0002
+#define regMPCC0_MPCC_OPP_ID_BASE_IDX 3
+#define regMPCC0_MPCC_CONTROL 0x0003
+#define regMPCC0_MPCC_CONTROL_BASE_IDX 3
+#define regMPCC0_MPCC_SM_CONTROL 0x0004
+#define regMPCC0_MPCC_SM_CONTROL_BASE_IDX 3
+#define regMPCC0_MPCC_UPDATE_LOCK_SEL 0x0005
+#define regMPCC0_MPCC_UPDATE_LOCK_SEL_BASE_IDX 3
+#define regMPCC0_MPCC_TOP_GAIN 0x0006
+#define regMPCC0_MPCC_TOP_GAIN_BASE_IDX 3
+#define regMPCC0_MPCC_BOT_GAIN_INSIDE 0x0007
+#define regMPCC0_MPCC_BOT_GAIN_INSIDE_BASE_IDX 3
+#define regMPCC0_MPCC_BOT_GAIN_OUTSIDE 0x0008
+#define regMPCC0_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 3
+#define regMPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL 0x0009
+#define regMPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL_BASE_IDX 3
+#define regMPCC0_MPCC_BG_R_CR 0x000a
+#define regMPCC0_MPCC_BG_R_CR_BASE_IDX 3
+#define regMPCC0_MPCC_BG_G_Y 0x000b
+#define regMPCC0_MPCC_BG_G_Y_BASE_IDX 3
+#define regMPCC0_MPCC_BG_B_CB 0x000c
+#define regMPCC0_MPCC_BG_B_CB_BASE_IDX 3
+#define regMPCC0_MPCC_MEM_PWR_CTRL 0x000d
+#define regMPCC0_MPCC_MEM_PWR_CTRL_BASE_IDX 3
+#define regMPCC0_MPCC_STATUS 0x000e
+#define regMPCC0_MPCC_STATUS_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc1_dispdec
+// base address: 0x54
+#define regMPCC1_MPCC_TOP_SEL 0x0015
+#define regMPCC1_MPCC_TOP_SEL_BASE_IDX 3
+#define regMPCC1_MPCC_BOT_SEL 0x0016
+#define regMPCC1_MPCC_BOT_SEL_BASE_IDX 3
+#define regMPCC1_MPCC_OPP_ID 0x0017
+#define regMPCC1_MPCC_OPP_ID_BASE_IDX 3
+#define regMPCC1_MPCC_CONTROL 0x0018
+#define regMPCC1_MPCC_CONTROL_BASE_IDX 3
+#define regMPCC1_MPCC_SM_CONTROL 0x0019
+#define regMPCC1_MPCC_SM_CONTROL_BASE_IDX 3
+#define regMPCC1_MPCC_UPDATE_LOCK_SEL 0x001a
+#define regMPCC1_MPCC_UPDATE_LOCK_SEL_BASE_IDX 3
+#define regMPCC1_MPCC_TOP_GAIN 0x001b
+#define regMPCC1_MPCC_TOP_GAIN_BASE_IDX 3
+#define regMPCC1_MPCC_BOT_GAIN_INSIDE 0x001c
+#define regMPCC1_MPCC_BOT_GAIN_INSIDE_BASE_IDX 3
+#define regMPCC1_MPCC_BOT_GAIN_OUTSIDE 0x001d
+#define regMPCC1_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 3
+#define regMPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL 0x001e
+#define regMPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL_BASE_IDX 3
+#define regMPCC1_MPCC_BG_R_CR 0x001f
+#define regMPCC1_MPCC_BG_R_CR_BASE_IDX 3
+#define regMPCC1_MPCC_BG_G_Y 0x0020
+#define regMPCC1_MPCC_BG_G_Y_BASE_IDX 3
+#define regMPCC1_MPCC_BG_B_CB 0x0021
+#define regMPCC1_MPCC_BG_B_CB_BASE_IDX 3
+#define regMPCC1_MPCC_MEM_PWR_CTRL 0x0022
+#define regMPCC1_MPCC_MEM_PWR_CTRL_BASE_IDX 3
+#define regMPCC1_MPCC_STATUS 0x0023
+#define regMPCC1_MPCC_STATUS_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc2_dispdec
+// base address: 0xa8
+#define regMPCC2_MPCC_TOP_SEL 0x002a
+#define regMPCC2_MPCC_TOP_SEL_BASE_IDX 3
+#define regMPCC2_MPCC_BOT_SEL 0x002b
+#define regMPCC2_MPCC_BOT_SEL_BASE_IDX 3
+#define regMPCC2_MPCC_OPP_ID 0x002c
+#define regMPCC2_MPCC_OPP_ID_BASE_IDX 3
+#define regMPCC2_MPCC_CONTROL 0x002d
+#define regMPCC2_MPCC_CONTROL_BASE_IDX 3
+#define regMPCC2_MPCC_SM_CONTROL 0x002e
+#define regMPCC2_MPCC_SM_CONTROL_BASE_IDX 3
+#define regMPCC2_MPCC_UPDATE_LOCK_SEL 0x002f
+#define regMPCC2_MPCC_UPDATE_LOCK_SEL_BASE_IDX 3
+#define regMPCC2_MPCC_TOP_GAIN 0x0030
+#define regMPCC2_MPCC_TOP_GAIN_BASE_IDX 3
+#define regMPCC2_MPCC_BOT_GAIN_INSIDE 0x0031
+#define regMPCC2_MPCC_BOT_GAIN_INSIDE_BASE_IDX 3
+#define regMPCC2_MPCC_BOT_GAIN_OUTSIDE 0x0032
+#define regMPCC2_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 3
+#define regMPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL 0x0033
+#define regMPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL_BASE_IDX 3
+#define regMPCC2_MPCC_BG_R_CR 0x0034
+#define regMPCC2_MPCC_BG_R_CR_BASE_IDX 3
+#define regMPCC2_MPCC_BG_G_Y 0x0035
+#define regMPCC2_MPCC_BG_G_Y_BASE_IDX 3
+#define regMPCC2_MPCC_BG_B_CB 0x0036
+#define regMPCC2_MPCC_BG_B_CB_BASE_IDX 3
+#define regMPCC2_MPCC_MEM_PWR_CTRL 0x0037
+#define regMPCC2_MPCC_MEM_PWR_CTRL_BASE_IDX 3
+#define regMPCC2_MPCC_STATUS 0x0038
+#define regMPCC2_MPCC_STATUS_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc3_dispdec
+// base address: 0xfc
+#define regMPCC3_MPCC_TOP_SEL 0x003f
+#define regMPCC3_MPCC_TOP_SEL_BASE_IDX 3
+#define regMPCC3_MPCC_BOT_SEL 0x0040
+#define regMPCC3_MPCC_BOT_SEL_BASE_IDX 3
+#define regMPCC3_MPCC_OPP_ID 0x0041
+#define regMPCC3_MPCC_OPP_ID_BASE_IDX 3
+#define regMPCC3_MPCC_CONTROL 0x0042
+#define regMPCC3_MPCC_CONTROL_BASE_IDX 3
+#define regMPCC3_MPCC_SM_CONTROL 0x0043
+#define regMPCC3_MPCC_SM_CONTROL_BASE_IDX 3
+#define regMPCC3_MPCC_UPDATE_LOCK_SEL 0x0044
+#define regMPCC3_MPCC_UPDATE_LOCK_SEL_BASE_IDX 3
+#define regMPCC3_MPCC_TOP_GAIN 0x0045
+#define regMPCC3_MPCC_TOP_GAIN_BASE_IDX 3
+#define regMPCC3_MPCC_BOT_GAIN_INSIDE 0x0046
+#define regMPCC3_MPCC_BOT_GAIN_INSIDE_BASE_IDX 3
+#define regMPCC3_MPCC_BOT_GAIN_OUTSIDE 0x0047
+#define regMPCC3_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 3
+#define regMPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL 0x0048
+#define regMPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL_BASE_IDX 3
+#define regMPCC3_MPCC_BG_R_CR 0x0049
+#define regMPCC3_MPCC_BG_R_CR_BASE_IDX 3
+#define regMPCC3_MPCC_BG_G_Y 0x004a
+#define regMPCC3_MPCC_BG_G_Y_BASE_IDX 3
+#define regMPCC3_MPCC_BG_B_CB 0x004b
+#define regMPCC3_MPCC_BG_B_CB_BASE_IDX 3
+#define regMPCC3_MPCC_MEM_PWR_CTRL 0x004c
+#define regMPCC3_MPCC_MEM_PWR_CTRL_BASE_IDX 3
+#define regMPCC3_MPCC_STATUS 0x004d
+#define regMPCC3_MPCC_STATUS_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpc_cfg_dispdec
+// base address: 0x0
+#define regMPC_CLOCK_CONTROL 0x0398
+#define regMPC_CLOCK_CONTROL_BASE_IDX 3
+#define regMPC_SOFT_RESET 0x0399
+#define regMPC_SOFT_RESET_BASE_IDX 3
+#define regMPC_CRC_CTRL 0x039a
+#define regMPC_CRC_CTRL_BASE_IDX 3
+#define regMPC_CRC_SEL_CONTROL 0x039b
+#define regMPC_CRC_SEL_CONTROL_BASE_IDX 3
+#define regMPC_PERFMON_EVENT_CTRL 0x03a1
+#define regMPC_PERFMON_EVENT_CTRL_BASE_IDX 3
+#define regMPC_BYPASS_BG_AR 0x03a2
+#define regMPC_BYPASS_BG_AR_BASE_IDX 3
+#define regMPC_BYPASS_BG_GB 0x03a3
+#define regMPC_BYPASS_BG_GB_BASE_IDX 3
+#define regMPC_HOST_READ_CONTROL 0x03a4
+#define regMPC_HOST_READ_CONTROL_BASE_IDX 3
+#define regMPC_DPP_PENDING_STATUS 0x03a5
+#define regMPC_DPP_PENDING_STATUS_BASE_IDX 3
+#define regMPC_PENDING_STATUS_MISC 0x03a6
+#define regMPC_PENDING_STATUS_MISC_BASE_IDX 3
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET0 0x03a7
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regADR_CFG_VUPDATE_LOCK_SET0 0x03a8
+#define regADR_CFG_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regADR_VUPDATE_LOCK_SET0 0x03a9
+#define regADR_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regCFG_VUPDATE_LOCK_SET0 0x03aa
+#define regCFG_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regCUR_VUPDATE_LOCK_SET0 0x03ab
+#define regCUR_VUPDATE_LOCK_SET0_BASE_IDX 3
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET1 0x03ac
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regADR_CFG_VUPDATE_LOCK_SET1 0x03ad
+#define regADR_CFG_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regADR_VUPDATE_LOCK_SET1 0x03ae
+#define regADR_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regCFG_VUPDATE_LOCK_SET1 0x03af
+#define regCFG_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regCUR_VUPDATE_LOCK_SET1 0x03b0
+#define regCUR_VUPDATE_LOCK_SET1_BASE_IDX 3
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET2 0x03b1
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regADR_CFG_VUPDATE_LOCK_SET2 0x03b2
+#define regADR_CFG_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regADR_VUPDATE_LOCK_SET2 0x03b3
+#define regADR_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regCFG_VUPDATE_LOCK_SET2 0x03b4
+#define regCFG_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regCUR_VUPDATE_LOCK_SET2 0x03b5
+#define regCUR_VUPDATE_LOCK_SET2_BASE_IDX 3
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET3 0x03b6
+#define regADR_CFG_CUR_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regADR_CFG_VUPDATE_LOCK_SET3 0x03b7
+#define regADR_CFG_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regADR_VUPDATE_LOCK_SET3 0x03b8
+#define regADR_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regCFG_VUPDATE_LOCK_SET3 0x03b9
+#define regCFG_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regCUR_VUPDATE_LOCK_SET3 0x03ba
+#define regCUR_VUPDATE_LOCK_SET3_BASE_IDX 3
+#define regMPC_CRC_RESULT_A 0x03c0
+#define regMPC_CRC_RESULT_A_BASE_IDX 3
+#define regMPC_CRC_RESULT_R 0x03c1
+#define regMPC_CRC_RESULT_R_BASE_IDX 3
+#define regMPC_CRC_RESULT_G 0x03c2
+#define regMPC_CRC_RESULT_G_BASE_IDX 3
+#define regMPC_CRC_RESULT_B 0x03c3
+#define regMPC_CRC_RESULT_B_BASE_IDX 3
+#define regMPC_DWB0_MUX 0x03c6
+#define regMPC_DWB0_MUX_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
+// base address: 0x17e1c
+#define regDC_PERFMON15_PERFCOUNTER_CNTL 0x0447
+#define regDC_PERFMON15_PERFCOUNTER_CNTL_BASE_IDX 3
+#define regDC_PERFMON15_PERFCOUNTER_CNTL2 0x0448
+#define regDC_PERFMON15_PERFCOUNTER_CNTL2_BASE_IDX 3
+#define regDC_PERFMON15_PERFCOUNTER_STATE 0x0449
+#define regDC_PERFMON15_PERFCOUNTER_STATE_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_CNTL 0x044a
+#define regDC_PERFMON15_PERFMON_CNTL_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_CNTL2 0x044b
+#define regDC_PERFMON15_PERFMON_CNTL2_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_CVALUE_INT_MISC 0x044c
+#define regDC_PERFMON15_PERFMON_CVALUE_INT_MISC_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_CVALUE_LOW 0x044d
+#define regDC_PERFMON15_PERFMON_CVALUE_LOW_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_HI 0x044e
+#define regDC_PERFMON15_PERFMON_HI_BASE_IDX 3
+#define regDC_PERFMON15_PERFMON_LOW 0x044f
+#define regDC_PERFMON15_PERFMON_LOW_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam0_dispdec
+// base address: 0x0
+#define regMPCC_OGAM0_MPCC_OGAM_CONTROL 0x00a8
+#define regMPCC_OGAM0_MPCC_OGAM_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_INDEX 0x00a9
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_INDEX_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_DATA 0x00aa
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_DATA_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_CONTROL 0x00ab
+#define regMPCC_OGAM0_MPCC_OGAM_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B 0x00ac
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G 0x00ad
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R 0x00ae
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B 0x00af
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G 0x00b0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R 0x00b1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B 0x00b2
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G 0x00b3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R 0x00b4
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B 0x00b5
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B 0x00b6
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G 0x00b7
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G 0x00b8
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R 0x00b9
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R 0x00ba
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B 0x00bb
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G 0x00bc
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R 0x00bd
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1 0x00be
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3 0x00bf
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5 0x00c0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7 0x00c1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9 0x00c2
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11 0x00c3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13 0x00c4
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15 0x00c5
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17 0x00c6
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19 0x00c7
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21 0x00c8
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23 0x00c9
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25 0x00ca
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27 0x00cb
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29 0x00cc
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31 0x00cd
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33 0x00ce
+#define regMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B 0x00cf
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G 0x00d0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R 0x00d1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B 0x00d2
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G 0x00d3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R 0x00d4
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B 0x00d5
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G 0x00d6
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R 0x00d7
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B 0x00d8
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B 0x00d9
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G 0x00da
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G 0x00db
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R 0x00dc
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R 0x00dd
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B 0x00de
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G 0x00df
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R 0x00e0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1 0x00e1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3 0x00e2
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5 0x00e3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7 0x00e4
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9 0x00e5
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11 0x00e6
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13 0x00e7
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15 0x00e8
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17 0x00e9
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19 0x00ea
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21 0x00eb
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23 0x00ec
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25 0x00ed
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27 0x00ee
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29 0x00ef
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31 0x00f0
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33 0x00f1
+#define regMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT 0x00f2
+#define regMPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 3
+#define regMPCC_OGAM0_MPCC_GAMUT_REMAP_MODE 0x00f3
+#define regMPCC_OGAM0_MPCC_GAMUT_REMAP_MODE_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A 0x00f4
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A 0x00f5
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A 0x00f6
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A 0x00f7
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A 0x00f8
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A 0x00f9
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B 0x00fa
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B 0x00fb
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B 0x00fc
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B 0x00fd
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B 0x00fe
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B_BASE_IDX 3
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B 0x00ff
+#define regMPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam1_dispdec
+// base address: 0x178
+#define regMPCC_OGAM1_MPCC_OGAM_CONTROL 0x0106
+#define regMPCC_OGAM1_MPCC_OGAM_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_INDEX 0x0107
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_INDEX_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_DATA 0x0108
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_DATA_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_CONTROL 0x0109
+#define regMPCC_OGAM1_MPCC_OGAM_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B 0x010a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G 0x010b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R 0x010c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B 0x010d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G 0x010e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R 0x010f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B 0x0110
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G 0x0111
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R 0x0112
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B 0x0113
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B 0x0114
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G 0x0115
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G 0x0116
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R 0x0117
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R 0x0118
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B 0x0119
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G 0x011a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R 0x011b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1 0x011c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3 0x011d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5 0x011e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7 0x011f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9 0x0120
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11 0x0121
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13 0x0122
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15 0x0123
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17 0x0124
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19 0x0125
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21 0x0126
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23 0x0127
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25 0x0128
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27 0x0129
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29 0x012a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31 0x012b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33 0x012c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B 0x012d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G 0x012e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R 0x012f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B 0x0130
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G 0x0131
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R 0x0132
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B 0x0133
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G 0x0134
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R 0x0135
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B 0x0136
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B 0x0137
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G 0x0138
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G 0x0139
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R 0x013a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R 0x013b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B 0x013c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G 0x013d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R 0x013e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1 0x013f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3 0x0140
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5 0x0141
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7 0x0142
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9 0x0143
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11 0x0144
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13 0x0145
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15 0x0146
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17 0x0147
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19 0x0148
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21 0x0149
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23 0x014a
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25 0x014b
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27 0x014c
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29 0x014d
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31 0x014e
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33 0x014f
+#define regMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT 0x0150
+#define regMPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 3
+#define regMPCC_OGAM1_MPCC_GAMUT_REMAP_MODE 0x0151
+#define regMPCC_OGAM1_MPCC_GAMUT_REMAP_MODE_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A 0x0152
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A 0x0153
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A 0x0154
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A 0x0155
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A 0x0156
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A 0x0157
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B 0x0158
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B 0x0159
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B 0x015a
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B 0x015b
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B 0x015c
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B_BASE_IDX 3
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B 0x015d
+#define regMPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam2_dispdec
+// base address: 0x2f0
+#define regMPCC_OGAM2_MPCC_OGAM_CONTROL 0x0164
+#define regMPCC_OGAM2_MPCC_OGAM_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_INDEX 0x0165
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_INDEX_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_DATA 0x0166
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_DATA_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_CONTROL 0x0167
+#define regMPCC_OGAM2_MPCC_OGAM_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B 0x0168
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G 0x0169
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R 0x016a
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B 0x016b
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G 0x016c
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R 0x016d
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B 0x016e
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G 0x016f
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R 0x0170
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B 0x0171
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B 0x0172
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G 0x0173
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G 0x0174
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R 0x0175
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R 0x0176
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B 0x0177
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G 0x0178
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R 0x0179
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1 0x017a
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3 0x017b
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5 0x017c
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7 0x017d
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9 0x017e
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11 0x017f
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13 0x0180
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15 0x0181
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17 0x0182
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19 0x0183
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21 0x0184
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23 0x0185
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25 0x0186
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27 0x0187
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29 0x0188
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31 0x0189
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33 0x018a
+#define regMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B 0x018b
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G 0x018c
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R 0x018d
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B 0x018e
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G 0x018f
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R 0x0190
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B 0x0191
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G 0x0192
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R 0x0193
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B 0x0194
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B 0x0195
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G 0x0196
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G 0x0197
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R 0x0198
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R 0x0199
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B 0x019a
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G 0x019b
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R 0x019c
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1 0x019d
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3 0x019e
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5 0x019f
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7 0x01a0
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9 0x01a1
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11 0x01a2
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13 0x01a3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15 0x01a4
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17 0x01a5
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19 0x01a6
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21 0x01a7
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23 0x01a8
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25 0x01a9
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27 0x01aa
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29 0x01ab
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31 0x01ac
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33 0x01ad
+#define regMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT 0x01ae
+#define regMPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 3
+#define regMPCC_OGAM2_MPCC_GAMUT_REMAP_MODE 0x01af
+#define regMPCC_OGAM2_MPCC_GAMUT_REMAP_MODE_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A 0x01b0
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A 0x01b1
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A 0x01b2
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A 0x01b3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A 0x01b4
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A 0x01b5
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B 0x01b6
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B 0x01b7
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B 0x01b8
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B 0x01b9
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B 0x01ba
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B_BASE_IDX 3
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B 0x01bb
+#define regMPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam3_dispdec
+// base address: 0x468
+#define regMPCC_OGAM3_MPCC_OGAM_CONTROL 0x01c2
+#define regMPCC_OGAM3_MPCC_OGAM_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_INDEX 0x01c3
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_INDEX_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_DATA 0x01c4
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_DATA_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_CONTROL 0x01c5
+#define regMPCC_OGAM3_MPCC_OGAM_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B 0x01c6
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G 0x01c7
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R 0x01c8
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B 0x01c9
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G 0x01ca
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R 0x01cb
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B 0x01cc
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G 0x01cd
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R 0x01ce
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B 0x01cf
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B 0x01d0
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G 0x01d1
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G 0x01d2
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R 0x01d3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R 0x01d4
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B 0x01d5
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G 0x01d6
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R 0x01d7
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1 0x01d8
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3 0x01d9
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5 0x01da
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7 0x01db
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9 0x01dc
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11 0x01dd
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13 0x01de
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15 0x01df
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17 0x01e0
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19 0x01e1
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21 0x01e2
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23 0x01e3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25 0x01e4
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27 0x01e5
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29 0x01e6
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31 0x01e7
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33 0x01e8
+#define regMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B 0x01e9
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G 0x01ea
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R 0x01eb
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B 0x01ec
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G 0x01ed
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R 0x01ee
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B 0x01ef
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G 0x01f0
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R 0x01f1
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B 0x01f2
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B 0x01f3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G 0x01f4
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G 0x01f5
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R 0x01f6
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R 0x01f7
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B 0x01f8
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G 0x01f9
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R 0x01fa
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1 0x01fb
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3 0x01fc
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5 0x01fd
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7 0x01fe
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9 0x01ff
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11 0x0200
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13 0x0201
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15 0x0202
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17 0x0203
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19 0x0204
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21 0x0205
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23 0x0206
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25 0x0207
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27 0x0208
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29 0x0209
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31 0x020a
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33 0x020b
+#define regMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT 0x020c
+#define regMPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT_BASE_IDX 3
+#define regMPCC_OGAM3_MPCC_GAMUT_REMAP_MODE 0x020d
+#define regMPCC_OGAM3_MPCC_GAMUT_REMAP_MODE_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A 0x020e
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A 0x020f
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A 0x0210
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A 0x0211
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A 0x0212
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A 0x0213
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B 0x0214
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B 0x0215
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B 0x0216
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B 0x0217
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B 0x0218
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B_BASE_IDX 3
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B 0x0219
+#define regMPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm0_dispdec
+// base address: 0x0
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_CONTROL 0x0453
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_CONTROL_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R 0x0454
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G 0x0455
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B 0x0456
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R 0x0457
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B 0x0458
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX 0x0459
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA 0x045a
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK 0x045b
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B 0x045c
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G 0x045d
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R 0x045e
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B 0x045f
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G 0x0460
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R 0x0461
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1 0x0462
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3 0x0463
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5 0x0464
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7 0x0465
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9 0x0466
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11 0x0467
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13 0x0468
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15 0x0469
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17 0x046a
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19 0x046b
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21 0x046c
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23 0x046d
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25 0x046e
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27 0x046f
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29 0x0470
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31 0x0471
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33 0x0472
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B 0x0473
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G 0x0474
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R 0x0475
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B 0x0476
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G 0x0477
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R 0x0478
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1 0x0479
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3 0x047a
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5 0x047b
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7 0x047c
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9 0x047d
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11 0x047e
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13 0x047f
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15 0x0480
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17 0x0481
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19 0x0482
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21 0x0483
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23 0x0484
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25 0x0485
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27 0x0486
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29 0x0487
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31 0x0488
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33 0x0489
+#define regMPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_MODE 0x048a
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_MODE_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_INDEX 0x048b
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_DATA 0x048c
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_DATA_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT 0x048d
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL 0x048e
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR 0x048f
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R 0x0490
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G 0x0491
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B 0x0492
+#define regMPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_CONTROL 0x0493
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX 0x0494
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA 0x0495
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL 0x0496
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B 0x0497
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G 0x0498
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R 0x0499
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B 0x049a
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G 0x049b
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R 0x049c
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B 0x049d
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G 0x049e
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R 0x049f
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B 0x04a0
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B 0x04a1
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G 0x04a2
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G 0x04a3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R 0x04a4
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R 0x04a5
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B 0x04a6
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G 0x04a7
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R 0x04a8
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1 0x04a9
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3 0x04aa
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5 0x04ab
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7 0x04ac
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9 0x04ad
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11 0x04ae
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13 0x04af
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15 0x04b0
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17 0x04b1
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19 0x04b2
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21 0x04b3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23 0x04b4
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25 0x04b5
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27 0x04b6
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29 0x04b7
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31 0x04b8
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33 0x04b9
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B 0x04ba
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G 0x04bb
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R 0x04bc
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B 0x04bd
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G 0x04be
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R 0x04bf
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B 0x04c0
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G 0x04c1
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R 0x04c2
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B 0x04c3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B 0x04c4
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G 0x04c5
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G 0x04c6
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R 0x04c7
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R 0x04c8
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B 0x04c9
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G 0x04ca
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R 0x04cb
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1 0x04cc
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3 0x04cd
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5 0x04ce
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7 0x04cf
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9 0x04d0
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11 0x04d1
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13 0x04d2
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15 0x04d3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17 0x04d4
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19 0x04d5
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21 0x04d6
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23 0x04d7
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25 0x04d8
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27 0x04d9
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29 0x04da
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31 0x04db
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33 0x04dc
+#define regMPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL 0x04dd
+#define regMPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm1_dispdec
+// base address: 0x240
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_CONTROL 0x04e3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_CONTROL_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R 0x04e4
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G 0x04e5
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B 0x04e6
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R 0x04e7
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B 0x04e8
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX 0x04e9
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA 0x04ea
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK 0x04eb
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B 0x04ec
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G 0x04ed
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R 0x04ee
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B 0x04ef
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G 0x04f0
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R 0x04f1
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1 0x04f2
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3 0x04f3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5 0x04f4
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7 0x04f5
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9 0x04f6
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11 0x04f7
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13 0x04f8
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15 0x04f9
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17 0x04fa
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19 0x04fb
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21 0x04fc
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23 0x04fd
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25 0x04fe
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27 0x04ff
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29 0x0500
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31 0x0501
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33 0x0502
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B 0x0503
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G 0x0504
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R 0x0505
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B 0x0506
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G 0x0507
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R 0x0508
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1 0x0509
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3 0x050a
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5 0x050b
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7 0x050c
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9 0x050d
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11 0x050e
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13 0x050f
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15 0x0510
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17 0x0511
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19 0x0512
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21 0x0513
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23 0x0514
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25 0x0515
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27 0x0516
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29 0x0517
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31 0x0518
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33 0x0519
+#define regMPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_MODE 0x051a
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_MODE_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_INDEX 0x051b
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_DATA 0x051c
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_DATA_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT 0x051d
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL 0x051e
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR 0x051f
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R 0x0520
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G 0x0521
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B 0x0522
+#define regMPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_CONTROL 0x0523
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX 0x0524
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA 0x0525
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL 0x0526
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B 0x0527
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G 0x0528
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R 0x0529
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B 0x052a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G 0x052b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R 0x052c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B 0x052d
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G 0x052e
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R 0x052f
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B 0x0530
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B 0x0531
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G 0x0532
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G 0x0533
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R 0x0534
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R 0x0535
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B 0x0536
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G 0x0537
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R 0x0538
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1 0x0539
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3 0x053a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5 0x053b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7 0x053c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9 0x053d
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11 0x053e
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13 0x053f
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15 0x0540
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17 0x0541
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19 0x0542
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21 0x0543
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23 0x0544
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25 0x0545
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27 0x0546
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29 0x0547
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31 0x0548
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33 0x0549
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B 0x054a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G 0x054b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R 0x054c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B 0x054d
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G 0x054e
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R 0x054f
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B 0x0550
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G 0x0551
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R 0x0552
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B 0x0553
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B 0x0554
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G 0x0555
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G 0x0556
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R 0x0557
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R 0x0558
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B 0x0559
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G 0x055a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R 0x055b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1 0x055c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3 0x055d
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5 0x055e
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7 0x055f
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9 0x0560
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11 0x0561
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13 0x0562
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15 0x0563
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17 0x0564
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19 0x0565
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21 0x0566
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23 0x0567
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25 0x0568
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27 0x0569
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29 0x056a
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31 0x056b
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33 0x056c
+#define regMPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL 0x056d
+#define regMPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm2_dispdec
+// base address: 0x480
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_CONTROL 0x0573
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_CONTROL_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R 0x0574
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G 0x0575
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B 0x0576
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R 0x0577
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B 0x0578
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX 0x0579
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA 0x057a
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK 0x057b
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B 0x057c
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G 0x057d
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R 0x057e
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B 0x057f
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G 0x0580
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R 0x0581
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1 0x0582
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3 0x0583
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5 0x0584
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7 0x0585
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9 0x0586
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11 0x0587
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13 0x0588
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15 0x0589
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17 0x058a
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19 0x058b
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21 0x058c
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23 0x058d
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25 0x058e
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27 0x058f
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29 0x0590
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31 0x0591
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33 0x0592
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B 0x0593
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G 0x0594
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R 0x0595
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B 0x0596
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G 0x0597
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R 0x0598
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1 0x0599
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3 0x059a
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5 0x059b
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7 0x059c
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9 0x059d
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11 0x059e
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13 0x059f
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15 0x05a0
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17 0x05a1
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19 0x05a2
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21 0x05a3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23 0x05a4
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25 0x05a5
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27 0x05a6
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29 0x05a7
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31 0x05a8
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33 0x05a9
+#define regMPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_MODE 0x05aa
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_MODE_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_INDEX 0x05ab
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_DATA 0x05ac
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_DATA_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT 0x05ad
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL 0x05ae
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR 0x05af
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R 0x05b0
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G 0x05b1
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B 0x05b2
+#define regMPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_CONTROL 0x05b3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX 0x05b4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA 0x05b5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL 0x05b6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B 0x05b7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G 0x05b8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R 0x05b9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B 0x05ba
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G 0x05bb
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R 0x05bc
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B 0x05bd
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G 0x05be
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R 0x05bf
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B 0x05c0
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B 0x05c1
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G 0x05c2
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G 0x05c3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R 0x05c4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R 0x05c5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B 0x05c6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G 0x05c7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R 0x05c8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1 0x05c9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3 0x05ca
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5 0x05cb
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7 0x05cc
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9 0x05cd
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11 0x05ce
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13 0x05cf
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15 0x05d0
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17 0x05d1
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19 0x05d2
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21 0x05d3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23 0x05d4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25 0x05d5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27 0x05d6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29 0x05d7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31 0x05d8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33 0x05d9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B 0x05da
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G 0x05db
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R 0x05dc
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B 0x05dd
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G 0x05de
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R 0x05df
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B 0x05e0
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G 0x05e1
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R 0x05e2
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B 0x05e3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B 0x05e4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G 0x05e5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G 0x05e6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R 0x05e7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R 0x05e8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B 0x05e9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G 0x05ea
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R 0x05eb
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1 0x05ec
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3 0x05ed
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5 0x05ee
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7 0x05ef
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9 0x05f0
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11 0x05f1
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13 0x05f2
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15 0x05f3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17 0x05f4
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19 0x05f5
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21 0x05f6
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23 0x05f7
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25 0x05f8
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27 0x05f9
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29 0x05fa
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31 0x05fb
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33 0x05fc
+#define regMPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL 0x05fd
+#define regMPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm3_dispdec
+// base address: 0x6c0
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_CONTROL 0x0603
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_CONTROL_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R 0x0604
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G 0x0605
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B 0x0606
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R 0x0607
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B 0x0608
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX 0x0609
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA 0x060a
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK 0x060b
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B 0x060c
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G 0x060d
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R 0x060e
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B 0x060f
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G 0x0610
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R 0x0611
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1 0x0612
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3 0x0613
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5 0x0614
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7 0x0615
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9 0x0616
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11 0x0617
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13 0x0618
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15 0x0619
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17 0x061a
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19 0x061b
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21 0x061c
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23 0x061d
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25 0x061e
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27 0x061f
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29 0x0620
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31 0x0621
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33 0x0622
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B 0x0623
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G 0x0624
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R 0x0625
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B 0x0626
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G 0x0627
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R 0x0628
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1 0x0629
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3 0x062a
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5 0x062b
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7 0x062c
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9 0x062d
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11 0x062e
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13 0x062f
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15 0x0630
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17 0x0631
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19 0x0632
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21 0x0633
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23 0x0634
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25 0x0635
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27 0x0636
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29 0x0637
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31 0x0638
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33 0x0639
+#define regMPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_MODE 0x063a
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_MODE_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_INDEX 0x063b
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_DATA 0x063c
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_DATA_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT 0x063d
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL 0x063e
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR 0x063f
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R 0x0640
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G 0x0641
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B 0x0642
+#define regMPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_CONTROL 0x0643
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX 0x0644
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA 0x0645
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL 0x0646
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B 0x0647
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G 0x0648
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R 0x0649
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B 0x064a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G 0x064b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R 0x064c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B 0x064d
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G 0x064e
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R 0x064f
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B 0x0650
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B 0x0651
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G 0x0652
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G 0x0653
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R 0x0654
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R 0x0655
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B 0x0656
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G 0x0657
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R 0x0658
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1 0x0659
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3 0x065a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5 0x065b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7 0x065c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9 0x065d
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11 0x065e
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13 0x065f
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15 0x0660
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17 0x0661
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19 0x0662
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21 0x0663
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23 0x0664
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25 0x0665
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27 0x0666
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29 0x0667
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31 0x0668
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33 0x0669
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B 0x066a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G 0x066b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R 0x066c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B 0x066d
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G 0x066e
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R 0x066f
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B 0x0670
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G 0x0671
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R 0x0672
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B 0x0673
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B 0x0674
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G 0x0675
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G 0x0676
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R 0x0677
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R 0x0678
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B 0x0679
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G 0x067a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R 0x067b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1 0x067c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3 0x067d
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5 0x067e
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7 0x067f
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9 0x0680
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11 0x0681
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13 0x0682
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15 0x0683
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17 0x0684
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19 0x0685
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21 0x0686
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23 0x0687
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25 0x0688
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27 0x0689
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29 0x068a
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31 0x068b
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33 0x068c
+#define regMPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33_BASE_IDX 3
+#define regMPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL 0x068d
+#define regMPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_mpc_mpc_ocsc_dispdec
+// base address: 0x0
+#define regMPC_OUT0_MUX 0x03d8
+#define regMPC_OUT0_MUX_BASE_IDX 3
+#define regMPC_OUT0_DENORM_CONTROL 0x03d9
+#define regMPC_OUT0_DENORM_CONTROL_BASE_IDX 3
+#define regMPC_OUT0_DENORM_CLAMP_G_Y 0x03da
+#define regMPC_OUT0_DENORM_CLAMP_G_Y_BASE_IDX 3
+#define regMPC_OUT0_DENORM_CLAMP_B_CB 0x03db
+#define regMPC_OUT0_DENORM_CLAMP_B_CB_BASE_IDX 3
+#define regMPC_OUT1_MUX 0x03dc
+#define regMPC_OUT1_MUX_BASE_IDX 3
+#define regMPC_OUT1_DENORM_CONTROL 0x03dd
+#define regMPC_OUT1_DENORM_CONTROL_BASE_IDX 3
+#define regMPC_OUT1_DENORM_CLAMP_G_Y 0x03de
+#define regMPC_OUT1_DENORM_CLAMP_G_Y_BASE_IDX 3
+#define regMPC_OUT1_DENORM_CLAMP_B_CB 0x03df
+#define regMPC_OUT1_DENORM_CLAMP_B_CB_BASE_IDX 3
+#define regMPC_OUT2_MUX 0x03e0
+#define regMPC_OUT2_MUX_BASE_IDX 3
+#define regMPC_OUT2_DENORM_CONTROL 0x03e1
+#define regMPC_OUT2_DENORM_CONTROL_BASE_IDX 3
+#define regMPC_OUT2_DENORM_CLAMP_G_Y 0x03e2
+#define regMPC_OUT2_DENORM_CLAMP_G_Y_BASE_IDX 3
+#define regMPC_OUT2_DENORM_CLAMP_B_CB 0x03e3
+#define regMPC_OUT2_DENORM_CLAMP_B_CB_BASE_IDX 3
+#define regMPC_OUT3_MUX 0x03e4
+#define regMPC_OUT3_MUX_BASE_IDX 3
+#define regMPC_OUT3_DENORM_CONTROL 0x03e5
+#define regMPC_OUT3_DENORM_CONTROL_BASE_IDX 3
+#define regMPC_OUT3_DENORM_CLAMP_G_Y 0x03e6
+#define regMPC_OUT3_DENORM_CLAMP_G_Y_BASE_IDX 3
+#define regMPC_OUT3_DENORM_CLAMP_B_CB 0x03e7
+#define regMPC_OUT3_DENORM_CLAMP_B_CB_BASE_IDX 3
+#define regMPC_OUT_CSC_COEF_FORMAT 0x03f0
+#define regMPC_OUT_CSC_COEF_FORMAT_BASE_IDX 3
+#define regMPC_OUT0_CSC_MODE 0x03f1
+#define regMPC_OUT0_CSC_MODE_BASE_IDX 3
+#define regMPC_OUT0_CSC_C11_C12_A 0x03f2
+#define regMPC_OUT0_CSC_C11_C12_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C13_C14_A 0x03f3
+#define regMPC_OUT0_CSC_C13_C14_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C21_C22_A 0x03f4
+#define regMPC_OUT0_CSC_C21_C22_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C23_C24_A 0x03f5
+#define regMPC_OUT0_CSC_C23_C24_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C31_C32_A 0x03f6
+#define regMPC_OUT0_CSC_C31_C32_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C33_C34_A 0x03f7
+#define regMPC_OUT0_CSC_C33_C34_A_BASE_IDX 3
+#define regMPC_OUT0_CSC_C11_C12_B 0x03f8
+#define regMPC_OUT0_CSC_C11_C12_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C13_C14_B 0x03f9
+#define regMPC_OUT0_CSC_C13_C14_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C21_C22_B 0x03fa
+#define regMPC_OUT0_CSC_C21_C22_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C23_C24_B 0x03fb
+#define regMPC_OUT0_CSC_C23_C24_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C31_C32_B 0x03fc
+#define regMPC_OUT0_CSC_C31_C32_B_BASE_IDX 3
+#define regMPC_OUT0_CSC_C33_C34_B 0x03fd
+#define regMPC_OUT0_CSC_C33_C34_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_MODE 0x03fe
+#define regMPC_OUT1_CSC_MODE_BASE_IDX 3
+#define regMPC_OUT1_CSC_C11_C12_A 0x03ff
+#define regMPC_OUT1_CSC_C11_C12_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C13_C14_A 0x0400
+#define regMPC_OUT1_CSC_C13_C14_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C21_C22_A 0x0401
+#define regMPC_OUT1_CSC_C21_C22_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C23_C24_A 0x0402
+#define regMPC_OUT1_CSC_C23_C24_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C31_C32_A 0x0403
+#define regMPC_OUT1_CSC_C31_C32_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C33_C34_A 0x0404
+#define regMPC_OUT1_CSC_C33_C34_A_BASE_IDX 3
+#define regMPC_OUT1_CSC_C11_C12_B 0x0405
+#define regMPC_OUT1_CSC_C11_C12_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C13_C14_B 0x0406
+#define regMPC_OUT1_CSC_C13_C14_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C21_C22_B 0x0407
+#define regMPC_OUT1_CSC_C21_C22_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C23_C24_B 0x0408
+#define regMPC_OUT1_CSC_C23_C24_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C31_C32_B 0x0409
+#define regMPC_OUT1_CSC_C31_C32_B_BASE_IDX 3
+#define regMPC_OUT1_CSC_C33_C34_B 0x040a
+#define regMPC_OUT1_CSC_C33_C34_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_MODE 0x040b
+#define regMPC_OUT2_CSC_MODE_BASE_IDX 3
+#define regMPC_OUT2_CSC_C11_C12_A 0x040c
+#define regMPC_OUT2_CSC_C11_C12_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C13_C14_A 0x040d
+#define regMPC_OUT2_CSC_C13_C14_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C21_C22_A 0x040e
+#define regMPC_OUT2_CSC_C21_C22_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C23_C24_A 0x040f
+#define regMPC_OUT2_CSC_C23_C24_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C31_C32_A 0x0410
+#define regMPC_OUT2_CSC_C31_C32_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C33_C34_A 0x0411
+#define regMPC_OUT2_CSC_C33_C34_A_BASE_IDX 3
+#define regMPC_OUT2_CSC_C11_C12_B 0x0412
+#define regMPC_OUT2_CSC_C11_C12_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C13_C14_B 0x0413
+#define regMPC_OUT2_CSC_C13_C14_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C21_C22_B 0x0414
+#define regMPC_OUT2_CSC_C21_C22_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C23_C24_B 0x0415
+#define regMPC_OUT2_CSC_C23_C24_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C31_C32_B 0x0416
+#define regMPC_OUT2_CSC_C31_C32_B_BASE_IDX 3
+#define regMPC_OUT2_CSC_C33_C34_B 0x0417
+#define regMPC_OUT2_CSC_C33_C34_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_MODE 0x0418
+#define regMPC_OUT3_CSC_MODE_BASE_IDX 3
+#define regMPC_OUT3_CSC_C11_C12_A 0x0419
+#define regMPC_OUT3_CSC_C11_C12_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C13_C14_A 0x041a
+#define regMPC_OUT3_CSC_C13_C14_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C21_C22_A 0x041b
+#define regMPC_OUT3_CSC_C21_C22_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C23_C24_A 0x041c
+#define regMPC_OUT3_CSC_C23_C24_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C31_C32_A 0x041d
+#define regMPC_OUT3_CSC_C31_C32_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C33_C34_A 0x041e
+#define regMPC_OUT3_CSC_C33_C34_A_BASE_IDX 3
+#define regMPC_OUT3_CSC_C11_C12_B 0x041f
+#define regMPC_OUT3_CSC_C11_C12_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C13_C14_B 0x0420
+#define regMPC_OUT3_CSC_C13_C14_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C21_C22_B 0x0421
+#define regMPC_OUT3_CSC_C21_C22_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C23_C24_B 0x0422
+#define regMPC_OUT3_CSC_C23_C24_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C31_C32_B 0x0423
+#define regMPC_OUT3_CSC_C31_C32_B_BASE_IDX 3
+#define regMPC_OUT3_CSC_C33_C34_B 0x0424
+#define regMPC_OUT3_CSC_C33_C34_B_BASE_IDX 3
+
+
+// addressBlock: dce_dc_opp_abm0_dispdec
+// base address: 0x0
+#define regABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL 0x0e7a
+#define regABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL_BASE_IDX 3
+#define regABM0_BL1_PWM_USER_LEVEL 0x0e7b
+#define regABM0_BL1_PWM_USER_LEVEL_BASE_IDX 3
+#define regABM0_BL1_PWM_TARGET_ABM_LEVEL 0x0e7c
+#define regABM0_BL1_PWM_TARGET_ABM_LEVEL_BASE_IDX 3
+#define regABM0_BL1_PWM_CURRENT_ABM_LEVEL 0x0e7d
+#define regABM0_BL1_PWM_CURRENT_ABM_LEVEL_BASE_IDX 3
+#define regABM0_BL1_PWM_FINAL_DUTY_CYCLE 0x0e7e
+#define regABM0_BL1_PWM_FINAL_DUTY_CYCLE_BASE_IDX 3
+#define regABM0_BL1_PWM_MINIMUM_DUTY_CYCLE 0x0e7f
+#define regABM0_BL1_PWM_MINIMUM_DUTY_CYCLE_BASE_IDX 3
+#define regABM0_BL1_PWM_ABM_CNTL 0x0e80
+#define regABM0_BL1_PWM_ABM_CNTL_BASE_IDX 3
+#define regABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE 0x0e81
+#define regABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE_BASE_IDX 3
+#define regABM0_BL1_PWM_GRP2_REG_LOCK 0x0e82
+#define regABM0_BL1_PWM_GRP2_REG_LOCK_BASE_IDX 3
+#define regABM0_DC_ABM1_CNTL 0x0e83
+#define regABM0_DC_ABM1_CNTL_BASE_IDX 3
+#define regABM0_DC_ABM1_IPCSC_COEFF_SEL 0x0e84
+#define regABM0_DC_ABM1_IPCSC_COEFF_SEL_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_0 0x0e85
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_0_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_1 0x0e86
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_1_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_2 0x0e87
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_2_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_3 0x0e88
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_3_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_4 0x0e89
+#define regABM0_DC_ABM1_ACE_OFFSET_SLOPE_4_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_THRES_12 0x0e8a
+#define regABM0_DC_ABM1_ACE_THRES_12_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_THRES_34 0x0e8b
+#define regABM0_DC_ABM1_ACE_THRES_34_BASE_IDX 3
+#define regABM0_DC_ABM1_ACE_CNTL_MISC 0x0e8c
+#define regABM0_DC_ABM1_ACE_CNTL_MISC_BASE_IDX 3
+#define regABM0_DC_ABM1_HGLS_REG_READ_PROGRESS 0x0e8e
+#define regABM0_DC_ABM1_HGLS_REG_READ_PROGRESS_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_MISC_CTRL 0x0e8f
+#define regABM0_DC_ABM1_HG_MISC_CTRL_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_SUM_OF_LUMA 0x0e90
+#define regABM0_DC_ABM1_LS_SUM_OF_LUMA_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_MIN_MAX_LUMA 0x0e91
+#define regABM0_DC_ABM1_LS_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x0e92
+#define regABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_PIXEL_COUNT 0x0e93
+#define regABM0_DC_ABM1_LS_PIXEL_COUNT_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x0e94
+#define regABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x0e95
+#define regABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x0e96
+#define regABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_SAMPLE_RATE 0x0e97
+#define regABM0_DC_ABM1_HG_SAMPLE_RATE_BASE_IDX 3
+#define regABM0_DC_ABM1_LS_SAMPLE_RATE 0x0e98
+#define regABM0_DC_ABM1_LS_SAMPLE_RATE_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x0e99
+#define regABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x0e9a
+#define regABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x0e9b
+#define regABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x0e9c
+#define regABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x0e9d
+#define regABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_1 0x0e9e
+#define regABM0_DC_ABM1_HG_RESULT_1_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_2 0x0e9f
+#define regABM0_DC_ABM1_HG_RESULT_2_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_3 0x0ea0
+#define regABM0_DC_ABM1_HG_RESULT_3_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_4 0x0ea1
+#define regABM0_DC_ABM1_HG_RESULT_4_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_5 0x0ea2
+#define regABM0_DC_ABM1_HG_RESULT_5_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_6 0x0ea3
+#define regABM0_DC_ABM1_HG_RESULT_6_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_7 0x0ea4
+#define regABM0_DC_ABM1_HG_RESULT_7_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_8 0x0ea5
+#define regABM0_DC_ABM1_HG_RESULT_8_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_9 0x0ea6
+#define regABM0_DC_ABM1_HG_RESULT_9_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_10 0x0ea7
+#define regABM0_DC_ABM1_HG_RESULT_10_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_11 0x0ea8
+#define regABM0_DC_ABM1_HG_RESULT_11_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_12 0x0ea9
+#define regABM0_DC_ABM1_HG_RESULT_12_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_13 0x0eaa
+#define regABM0_DC_ABM1_HG_RESULT_13_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_14 0x0eab
+#define regABM0_DC_ABM1_HG_RESULT_14_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_15 0x0eac
+#define regABM0_DC_ABM1_HG_RESULT_15_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_16 0x0ead
+#define regABM0_DC_ABM1_HG_RESULT_16_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_17 0x0eae
+#define regABM0_DC_ABM1_HG_RESULT_17_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_18 0x0eaf
+#define regABM0_DC_ABM1_HG_RESULT_18_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_19 0x0eb0
+#define regABM0_DC_ABM1_HG_RESULT_19_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_20 0x0eb1
+#define regABM0_DC_ABM1_HG_RESULT_20_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_21 0x0eb2
+#define regABM0_DC_ABM1_HG_RESULT_21_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_22 0x0eb3
+#define regABM0_DC_ABM1_HG_RESULT_22_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_23 0x0eb4
+#define regABM0_DC_ABM1_HG_RESULT_23_BASE_IDX 3
+#define regABM0_DC_ABM1_HG_RESULT_24 0x0eb5
+#define regABM0_DC_ABM1_HG_RESULT_24_BASE_IDX 3
+#define regABM0_DC_ABM1_BL_MASTER_LOCK 0x0eb6
+#define regABM0_DC_ABM1_BL_MASTER_LOCK_BASE_IDX 3
+
+
+// addressBlock: dce_dc_opp_abm1_dispdec
+// base address: 0x104
+#define regABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL 0x0ebb
+#define regABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL_BASE_IDX 3
+#define regABM1_BL1_PWM_USER_LEVEL 0x0ebc
+#define regABM1_BL1_PWM_USER_LEVEL_BASE_IDX 3
+#define regABM1_BL1_PWM_TARGET_ABM_LEVEL 0x0ebd
+#define regABM1_BL1_PWM_TARGET_ABM_LEVEL_BASE_IDX 3
+#define regABM1_BL1_PWM_CURRENT_ABM_LEVEL 0x0ebe
+#define regABM1_BL1_PWM_CURRENT_ABM_LEVEL_BASE_IDX 3
+#define regABM1_BL1_PWM_FINAL_DUTY_CYCLE 0x0ebf
+#define regABM1_BL1_PWM_FINAL_DUTY_CYCLE_BASE_IDX 3
+#define regABM1_BL1_PWM_MINIMUM_DUTY_CYCLE 0x0ec0
+#define regABM1_BL1_PWM_MINIMUM_DUTY_CYCLE_BASE_IDX 3
+#define regABM1_BL1_PWM_ABM_CNTL 0x0ec1
+#define regABM1_BL1_PWM_ABM_CNTL_BASE_IDX 3
+#define regABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE 0x0ec2
+#define regABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE_BASE_IDX 3
+#define regABM1_BL1_PWM_GRP2_REG_LOCK 0x0ec3
+#define regABM1_BL1_PWM_GRP2_REG_LOCK_BASE_IDX 3
+#define regABM1_DC_ABM1_CNTL 0x0ec4
+#define regABM1_DC_ABM1_CNTL_BASE_IDX 3
+#define regABM1_DC_ABM1_IPCSC_COEFF_SEL 0x0ec5
+#define regABM1_DC_ABM1_IPCSC_COEFF_SEL_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_0 0x0ec6
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_0_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_1 0x0ec7
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_1_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_2 0x0ec8
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_2_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_3 0x0ec9
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_3_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_4 0x0eca
+#define regABM1_DC_ABM1_ACE_OFFSET_SLOPE_4_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_THRES_12 0x0ecb
+#define regABM1_DC_ABM1_ACE_THRES_12_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_THRES_34 0x0ecc
+#define regABM1_DC_ABM1_ACE_THRES_34_BASE_IDX 3
+#define regABM1_DC_ABM1_ACE_CNTL_MISC 0x0ecd
+#define regABM1_DC_ABM1_ACE_CNTL_MISC_BASE_IDX 3
+#define regABM1_DC_ABM1_HGLS_REG_READ_PROGRESS 0x0ecf
+#define regABM1_DC_ABM1_HGLS_REG_READ_PROGRESS_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_MISC_CTRL 0x0ed0
+#define regABM1_DC_ABM1_HG_MISC_CTRL_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_SUM_OF_LUMA 0x0ed1
+#define regABM1_DC_ABM1_LS_SUM_OF_LUMA_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_MIN_MAX_LUMA 0x0ed2
+#define regABM1_DC_ABM1_LS_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x0ed3
+#define regABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_PIXEL_COUNT 0x0ed4
+#define regABM1_DC_ABM1_LS_PIXEL_COUNT_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x0ed5
+#define regABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x0ed6
+#define regABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x0ed7
+#define regABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_SAMPLE_RATE 0x0ed8
+#define regABM1_DC_ABM1_HG_SAMPLE_RATE_BASE_IDX 3
+#define regABM1_DC_ABM1_LS_SAMPLE_RATE 0x0ed9
+#define regABM1_DC_ABM1_LS_SAMPLE_RATE_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x0eda
+#define regABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x0edb
+#define regABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x0edc
+#define regABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x0edd
+#define regABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x0ede
+#define regABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_1 0x0edf
+#define regABM1_DC_ABM1_HG_RESULT_1_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_2 0x0ee0
+#define regABM1_DC_ABM1_HG_RESULT_2_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_3 0x0ee1
+#define regABM1_DC_ABM1_HG_RESULT_3_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_4 0x0ee2
+#define regABM1_DC_ABM1_HG_RESULT_4_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_5 0x0ee3
+#define regABM1_DC_ABM1_HG_RESULT_5_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_6 0x0ee4
+#define regABM1_DC_ABM1_HG_RESULT_6_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_7 0x0ee5
+#define regABM1_DC_ABM1_HG_RESULT_7_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_8 0x0ee6
+#define regABM1_DC_ABM1_HG_RESULT_8_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_9 0x0ee7
+#define regABM1_DC_ABM1_HG_RESULT_9_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_10 0x0ee8
+#define regABM1_DC_ABM1_HG_RESULT_10_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_11 0x0ee9
+#define regABM1_DC_ABM1_HG_RESULT_11_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_12 0x0eea
+#define regABM1_DC_ABM1_HG_RESULT_12_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_13 0x0eeb
+#define regABM1_DC_ABM1_HG_RESULT_13_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_14 0x0eec
+#define regABM1_DC_ABM1_HG_RESULT_14_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_15 0x0eed
+#define regABM1_DC_ABM1_HG_RESULT_15_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_16 0x0eee
+#define regABM1_DC_ABM1_HG_RESULT_16_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_17 0x0eef
+#define regABM1_DC_ABM1_HG_RESULT_17_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_18 0x0ef0
+#define regABM1_DC_ABM1_HG_RESULT_18_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_19 0x0ef1
+#define regABM1_DC_ABM1_HG_RESULT_19_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_20 0x0ef2
+#define regABM1_DC_ABM1_HG_RESULT_20_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_21 0x0ef3
+#define regABM1_DC_ABM1_HG_RESULT_21_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_22 0x0ef4
+#define regABM1_DC_ABM1_HG_RESULT_22_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_23 0x0ef5
+#define regABM1_DC_ABM1_HG_RESULT_23_BASE_IDX 3
+#define regABM1_DC_ABM1_HG_RESULT_24 0x0ef6
+#define regABM1_DC_ABM1_HG_RESULT_24_BASE_IDX 3
+#define regABM1_DC_ABM1_BL_MASTER_LOCK 0x0ef7
+#define regABM1_DC_ABM1_BL_MASTER_LOCK_BASE_IDX 3
+
+
+// addressBlock: dce_dc_opp_abm2_dispdec
+// base address: 0x208
+#define regABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL 0x0efc
+#define regABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL_BASE_IDX 3
+#define regABM2_BL1_PWM_USER_LEVEL 0x0efd
+#define regABM2_BL1_PWM_USER_LEVEL_BASE_IDX 3
+#define regABM2_BL1_PWM_TARGET_ABM_LEVEL 0x0efe
+#define regABM2_BL1_PWM_TARGET_ABM_LEVEL_BASE_IDX 3
+#define regABM2_BL1_PWM_CURRENT_ABM_LEVEL 0x0eff
+#define regABM2_BL1_PWM_CURRENT_ABM_LEVEL_BASE_IDX 3
+#define regABM2_BL1_PWM_FINAL_DUTY_CYCLE 0x0f00
+#define regABM2_BL1_PWM_FINAL_DUTY_CYCLE_BASE_IDX 3
+#define regABM2_BL1_PWM_MINIMUM_DUTY_CYCLE 0x0f01
+#define regABM2_BL1_PWM_MINIMUM_DUTY_CYCLE_BASE_IDX 3
+#define regABM2_BL1_PWM_ABM_CNTL 0x0f02
+#define regABM2_BL1_PWM_ABM_CNTL_BASE_IDX 3
+#define regABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE 0x0f03
+#define regABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE_BASE_IDX 3
+#define regABM2_BL1_PWM_GRP2_REG_LOCK 0x0f04
+#define regABM2_BL1_PWM_GRP2_REG_LOCK_BASE_IDX 3
+#define regABM2_DC_ABM1_CNTL 0x0f05
+#define regABM2_DC_ABM1_CNTL_BASE_IDX 3
+#define regABM2_DC_ABM1_IPCSC_COEFF_SEL 0x0f06
+#define regABM2_DC_ABM1_IPCSC_COEFF_SEL_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_0 0x0f07
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_0_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_1 0x0f08
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_1_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_2 0x0f09
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_2_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_3 0x0f0a
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_3_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_4 0x0f0b
+#define regABM2_DC_ABM1_ACE_OFFSET_SLOPE_4_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_THRES_12 0x0f0c
+#define regABM2_DC_ABM1_ACE_THRES_12_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_THRES_34 0x0f0d
+#define regABM2_DC_ABM1_ACE_THRES_34_BASE_IDX 3
+#define regABM2_DC_ABM1_ACE_CNTL_MISC 0x0f0e
+#define regABM2_DC_ABM1_ACE_CNTL_MISC_BASE_IDX 3
+#define regABM2_DC_ABM1_HGLS_REG_READ_PROGRESS 0x0f10
+#define regABM2_DC_ABM1_HGLS_REG_READ_PROGRESS_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_MISC_CTRL 0x0f11
+#define regABM2_DC_ABM1_HG_MISC_CTRL_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_SUM_OF_LUMA 0x0f12
+#define regABM2_DC_ABM1_LS_SUM_OF_LUMA_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_MIN_MAX_LUMA 0x0f13
+#define regABM2_DC_ABM1_LS_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x0f14
+#define regABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_PIXEL_COUNT 0x0f15
+#define regABM2_DC_ABM1_LS_PIXEL_COUNT_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x0f16
+#define regABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x0f17
+#define regABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x0f18
+#define regABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_SAMPLE_RATE 0x0f19
+#define regABM2_DC_ABM1_HG_SAMPLE_RATE_BASE_IDX 3
+#define regABM2_DC_ABM1_LS_SAMPLE_RATE 0x0f1a
+#define regABM2_DC_ABM1_LS_SAMPLE_RATE_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x0f1b
+#define regABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x0f1c
+#define regABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x0f1d
+#define regABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x0f1e
+#define regABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x0f1f
+#define regABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_1 0x0f20
+#define regABM2_DC_ABM1_HG_RESULT_1_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_2 0x0f21
+#define regABM2_DC_ABM1_HG_RESULT_2_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_3 0x0f22
+#define regABM2_DC_ABM1_HG_RESULT_3_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_4 0x0f23
+#define regABM2_DC_ABM1_HG_RESULT_4_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_5 0x0f24
+#define regABM2_DC_ABM1_HG_RESULT_5_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_6 0x0f25
+#define regABM2_DC_ABM1_HG_RESULT_6_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_7 0x0f26
+#define regABM2_DC_ABM1_HG_RESULT_7_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_8 0x0f27
+#define regABM2_DC_ABM1_HG_RESULT_8_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_9 0x0f28
+#define regABM2_DC_ABM1_HG_RESULT_9_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_10 0x0f29
+#define regABM2_DC_ABM1_HG_RESULT_10_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_11 0x0f2a
+#define regABM2_DC_ABM1_HG_RESULT_11_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_12 0x0f2b
+#define regABM2_DC_ABM1_HG_RESULT_12_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_13 0x0f2c
+#define regABM2_DC_ABM1_HG_RESULT_13_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_14 0x0f2d
+#define regABM2_DC_ABM1_HG_RESULT_14_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_15 0x0f2e
+#define regABM2_DC_ABM1_HG_RESULT_15_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_16 0x0f2f
+#define regABM2_DC_ABM1_HG_RESULT_16_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_17 0x0f30
+#define regABM2_DC_ABM1_HG_RESULT_17_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_18 0x0f31
+#define regABM2_DC_ABM1_HG_RESULT_18_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_19 0x0f32
+#define regABM2_DC_ABM1_HG_RESULT_19_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_20 0x0f33
+#define regABM2_DC_ABM1_HG_RESULT_20_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_21 0x0f34
+#define regABM2_DC_ABM1_HG_RESULT_21_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_22 0x0f35
+#define regABM2_DC_ABM1_HG_RESULT_22_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_23 0x0f36
+#define regABM2_DC_ABM1_HG_RESULT_23_BASE_IDX 3
+#define regABM2_DC_ABM1_HG_RESULT_24 0x0f37
+#define regABM2_DC_ABM1_HG_RESULT_24_BASE_IDX 3
+#define regABM2_DC_ABM1_BL_MASTER_LOCK 0x0f38
+#define regABM2_DC_ABM1_BL_MASTER_LOCK_BASE_IDX 3
+
+
+// addressBlock: dce_dc_opp_abm3_dispdec
+// base address: 0x30c
+#define regABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL 0x0f3d
+#define regABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL_BASE_IDX 3
+#define regABM3_BL1_PWM_USER_LEVEL 0x0f3e
+#define regABM3_BL1_PWM_USER_LEVEL_BASE_IDX 3
+#define regABM3_BL1_PWM_TARGET_ABM_LEVEL 0x0f3f
+#define regABM3_BL1_PWM_TARGET_ABM_LEVEL_BASE_IDX 3
+#define regABM3_BL1_PWM_CURRENT_ABM_LEVEL 0x0f40
+#define regABM3_BL1_PWM_CURRENT_ABM_LEVEL_BASE_IDX 3
+#define regABM3_BL1_PWM_FINAL_DUTY_CYCLE 0x0f41
+#define regABM3_BL1_PWM_FINAL_DUTY_CYCLE_BASE_IDX 3
+#define regABM3_BL1_PWM_MINIMUM_DUTY_CYCLE 0x0f42
+#define regABM3_BL1_PWM_MINIMUM_DUTY_CYCLE_BASE_IDX 3
+#define regABM3_BL1_PWM_ABM_CNTL 0x0f43
+#define regABM3_BL1_PWM_ABM_CNTL_BASE_IDX 3
+#define regABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE 0x0f44
+#define regABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE_BASE_IDX 3
+#define regABM3_BL1_PWM_GRP2_REG_LOCK 0x0f45
+#define regABM3_BL1_PWM_GRP2_REG_LOCK_BASE_IDX 3
+#define regABM3_DC_ABM1_CNTL 0x0f46
+#define regABM3_DC_ABM1_CNTL_BASE_IDX 3
+#define regABM3_DC_ABM1_IPCSC_COEFF_SEL 0x0f47
+#define regABM3_DC_ABM1_IPCSC_COEFF_SEL_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_0 0x0f48
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_0_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_1 0x0f49
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_1_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_2 0x0f4a
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_2_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_3 0x0f4b
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_3_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_4 0x0f4c
+#define regABM3_DC_ABM1_ACE_OFFSET_SLOPE_4_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_THRES_12 0x0f4d
+#define regABM3_DC_ABM1_ACE_THRES_12_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_THRES_34 0x0f4e
+#define regABM3_DC_ABM1_ACE_THRES_34_BASE_IDX 3
+#define regABM3_DC_ABM1_ACE_CNTL_MISC 0x0f4f
+#define regABM3_DC_ABM1_ACE_CNTL_MISC_BASE_IDX 3
+#define regABM3_DC_ABM1_HGLS_REG_READ_PROGRESS 0x0f51
+#define regABM3_DC_ABM1_HGLS_REG_READ_PROGRESS_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_MISC_CTRL 0x0f52
+#define regABM3_DC_ABM1_HG_MISC_CTRL_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_SUM_OF_LUMA 0x0f53
+#define regABM3_DC_ABM1_LS_SUM_OF_LUMA_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_MIN_MAX_LUMA 0x0f54
+#define regABM3_DC_ABM1_LS_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x0f55
+#define regABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_PIXEL_COUNT 0x0f56
+#define regABM3_DC_ABM1_LS_PIXEL_COUNT_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x0f57
+#define regABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x0f58
+#define regABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x0f59
+#define regABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_SAMPLE_RATE 0x0f5a
+#define regABM3_DC_ABM1_HG_SAMPLE_RATE_BASE_IDX 3
+#define regABM3_DC_ABM1_LS_SAMPLE_RATE 0x0f5b
+#define regABM3_DC_ABM1_LS_SAMPLE_RATE_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x0f5c
+#define regABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x0f5d
+#define regABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x0f5e
+#define regABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x0f5f
+#define regABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x0f60
+#define regABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_1 0x0f61
+#define regABM3_DC_ABM1_HG_RESULT_1_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_2 0x0f62
+#define regABM3_DC_ABM1_HG_RESULT_2_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_3 0x0f63
+#define regABM3_DC_ABM1_HG_RESULT_3_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_4 0x0f64
+#define regABM3_DC_ABM1_HG_RESULT_4_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_5 0x0f65
+#define regABM3_DC_ABM1_HG_RESULT_5_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_6 0x0f66
+#define regABM3_DC_ABM1_HG_RESULT_6_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_7 0x0f67
+#define regABM3_DC_ABM1_HG_RESULT_7_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_8 0x0f68
+#define regABM3_DC_ABM1_HG_RESULT_8_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_9 0x0f69
+#define regABM3_DC_ABM1_HG_RESULT_9_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_10 0x0f6a
+#define regABM3_DC_ABM1_HG_RESULT_10_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_11 0x0f6b
+#define regABM3_DC_ABM1_HG_RESULT_11_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_12 0x0f6c
+#define regABM3_DC_ABM1_HG_RESULT_12_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_13 0x0f6d
+#define regABM3_DC_ABM1_HG_RESULT_13_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_14 0x0f6e
+#define regABM3_DC_ABM1_HG_RESULT_14_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_15 0x0f6f
+#define regABM3_DC_ABM1_HG_RESULT_15_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_16 0x0f70
+#define regABM3_DC_ABM1_HG_RESULT_16_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_17 0x0f71
+#define regABM3_DC_ABM1_HG_RESULT_17_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_18 0x0f72
+#define regABM3_DC_ABM1_HG_RESULT_18_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_19 0x0f73
+#define regABM3_DC_ABM1_HG_RESULT_19_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_20 0x0f74
+#define regABM3_DC_ABM1_HG_RESULT_20_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_21 0x0f75
+#define regABM3_DC_ABM1_HG_RESULT_21_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_22 0x0f76
+#define regABM3_DC_ABM1_HG_RESULT_22_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_23 0x0f77
+#define regABM3_DC_ABM1_HG_RESULT_23_BASE_IDX 3
+#define regABM3_DC_ABM1_HG_RESULT_24 0x0f78
+#define regABM3_DC_ABM1_HG_RESULT_24_BASE_IDX 3
+#define regABM3_DC_ABM1_BL_MASTER_LOCK 0x0f79
+#define regABM3_DC_ABM1_BL_MASTER_LOCK_BASE_IDX 3
+
+
+// addressBlock: dce_dc_opp_dpg0_dispdec
+// base address: 0x0
+#define regDPG0_DPG_CONTROL 0x1854
+#define regDPG0_DPG_CONTROL_BASE_IDX 2
+#define regDPG0_DPG_RAMP_CONTROL 0x1855
+#define regDPG0_DPG_RAMP_CONTROL_BASE_IDX 2
+#define regDPG0_DPG_DIMENSIONS 0x1856
+#define regDPG0_DPG_DIMENSIONS_BASE_IDX 2
+#define regDPG0_DPG_COLOUR_R_CR 0x1857
+#define regDPG0_DPG_COLOUR_R_CR_BASE_IDX 2
+#define regDPG0_DPG_COLOUR_G_Y 0x1858
+#define regDPG0_DPG_COLOUR_G_Y_BASE_IDX 2
+#define regDPG0_DPG_COLOUR_B_CB 0x1859
+#define regDPG0_DPG_COLOUR_B_CB_BASE_IDX 2
+#define regDPG0_DPG_OFFSET_SEGMENT 0x185a
+#define regDPG0_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define regDPG0_DPG_STATUS 0x185b
+#define regDPG0_DPG_STATUS_BASE_IDX 2
+#define regDPG0_OPP_PIPE_CRC_RESULTA 0x185e
+#define regDPG0_OPP_PIPE_CRC_RESULTA_BASE_IDX 2
+#define regDPG0_OPP_PIPE_CRC_RESULTR 0x185f
+#define regDPG0_OPP_PIPE_CRC_RESULTR_BASE_IDX 2
+#define regDPG0_OPP_PIPE_CRC_RESULTG 0x1860
+#define regDPG0_OPP_PIPE_CRC_RESULTG_BASE_IDX 2
+#define regDPG0_OPP_PIPE_CRC_RESULTB 0x1861
+#define regDPG0_OPP_PIPE_CRC_RESULTB_BASE_IDX 2
+#define regDPG0_OPP_PIPE_CRC_RESULTC 0x1862
+#define regDPG0_OPP_PIPE_CRC_RESULTC_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt0_dispdec
+// base address: 0x0
+#define regFMT0_FMT_CLAMP_COMPONENT_R 0x183c
+#define regFMT0_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define regFMT0_FMT_CLAMP_COMPONENT_G 0x183d
+#define regFMT0_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define regFMT0_FMT_CLAMP_COMPONENT_B 0x183e
+#define regFMT0_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define regFMT0_FMT_DYNAMIC_EXP_CNTL 0x183f
+#define regFMT0_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define regFMT0_FMT_CONTROL 0x1840
+#define regFMT0_FMT_CONTROL_BASE_IDX 2
+#define regFMT0_FMT_BIT_DEPTH_CONTROL 0x1841
+#define regFMT0_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define regFMT0_FMT_DITHER_RAND_R_SEED 0x1842
+#define regFMT0_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define regFMT0_FMT_DITHER_RAND_G_SEED 0x1843
+#define regFMT0_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define regFMT0_FMT_DITHER_RAND_B_SEED 0x1844
+#define regFMT0_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define regFMT0_FMT_CLAMP_CNTL 0x1845
+#define regFMT0_FMT_CLAMP_CNTL_BASE_IDX 2
+#define regFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1846
+#define regFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define regFMT0_FMT_MAP420_MEMORY_CONTROL 0x1847
+#define regFMT0_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define regFMT0_FMT_422_CONTROL 0x1849
+#define regFMT0_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf0_dispdec
+// base address: 0x0
+#define regOPPBUF0_OPPBUF_CONTROL 0x1884
+#define regOPPBUF0_OPPBUF_CONTROL_BASE_IDX 2
+#define regOPPBUF0_OPPBUF_3D_PARAMETERS_0 0x1885
+#define regOPPBUF0_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define regOPPBUF0_OPPBUF_3D_PARAMETERS_1 0x1886
+#define regOPPBUF0_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define regOPPBUF0_OPPBUF_CONTROL1 0x1889
+#define regOPPBUF0_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe0_dispdec
+// base address: 0x0
+#define regOPP_PIPE0_OPP_PIPE_CONTROL 0x188c
+#define regOPP_PIPE0_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc0_dispdec
+// base address: 0x0
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL 0x1891
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK 0x1892
+#define regOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg1_dispdec
+// base address: 0x168
+#define regDPG1_DPG_CONTROL 0x18ae
+#define regDPG1_DPG_CONTROL_BASE_IDX 2
+#define regDPG1_DPG_RAMP_CONTROL 0x18af
+#define regDPG1_DPG_RAMP_CONTROL_BASE_IDX 2
+#define regDPG1_DPG_DIMENSIONS 0x18b0
+#define regDPG1_DPG_DIMENSIONS_BASE_IDX 2
+#define regDPG1_DPG_COLOUR_R_CR 0x18b1
+#define regDPG1_DPG_COLOUR_R_CR_BASE_IDX 2
+#define regDPG1_DPG_COLOUR_G_Y 0x18b2
+#define regDPG1_DPG_COLOUR_G_Y_BASE_IDX 2
+#define regDPG1_DPG_COLOUR_B_CB 0x18b3
+#define regDPG1_DPG_COLOUR_B_CB_BASE_IDX 2
+#define regDPG1_DPG_OFFSET_SEGMENT 0x18b4
+#define regDPG1_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define regDPG1_DPG_STATUS 0x18b5
+#define regDPG1_DPG_STATUS_BASE_IDX 2
+#define regDPG1_OPP_PIPE_CRC_RESULTA 0x18b8
+#define regDPG1_OPP_PIPE_CRC_RESULTA_BASE_IDX 2
+#define regDPG1_OPP_PIPE_CRC_RESULTR 0x18b9
+#define regDPG1_OPP_PIPE_CRC_RESULTR_BASE_IDX 2
+#define regDPG1_OPP_PIPE_CRC_RESULTG 0x18ba
+#define regDPG1_OPP_PIPE_CRC_RESULTG_BASE_IDX 2
+#define regDPG1_OPP_PIPE_CRC_RESULTB 0x18bb
+#define regDPG1_OPP_PIPE_CRC_RESULTB_BASE_IDX 2
+#define regDPG1_OPP_PIPE_CRC_RESULTC 0x18bc
+#define regDPG1_OPP_PIPE_CRC_RESULTC_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt1_dispdec
+// base address: 0x168
+#define regFMT1_FMT_CLAMP_COMPONENT_R 0x1896
+#define regFMT1_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define regFMT1_FMT_CLAMP_COMPONENT_G 0x1897
+#define regFMT1_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define regFMT1_FMT_CLAMP_COMPONENT_B 0x1898
+#define regFMT1_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define regFMT1_FMT_DYNAMIC_EXP_CNTL 0x1899
+#define regFMT1_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define regFMT1_FMT_CONTROL 0x189a
+#define regFMT1_FMT_CONTROL_BASE_IDX 2
+#define regFMT1_FMT_BIT_DEPTH_CONTROL 0x189b
+#define regFMT1_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define regFMT1_FMT_DITHER_RAND_R_SEED 0x189c
+#define regFMT1_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define regFMT1_FMT_DITHER_RAND_G_SEED 0x189d
+#define regFMT1_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define regFMT1_FMT_DITHER_RAND_B_SEED 0x189e
+#define regFMT1_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define regFMT1_FMT_CLAMP_CNTL 0x189f
+#define regFMT1_FMT_CLAMP_CNTL_BASE_IDX 2
+#define regFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x18a0
+#define regFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define regFMT1_FMT_MAP420_MEMORY_CONTROL 0x18a1
+#define regFMT1_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define regFMT1_FMT_422_CONTROL 0x18a3
+#define regFMT1_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf1_dispdec
+// base address: 0x168
+#define regOPPBUF1_OPPBUF_CONTROL 0x18de
+#define regOPPBUF1_OPPBUF_CONTROL_BASE_IDX 2
+#define regOPPBUF1_OPPBUF_3D_PARAMETERS_0 0x18df
+#define regOPPBUF1_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define regOPPBUF1_OPPBUF_3D_PARAMETERS_1 0x18e0
+#define regOPPBUF1_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define regOPPBUF1_OPPBUF_CONTROL1 0x18e3
+#define regOPPBUF1_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe1_dispdec
+// base address: 0x168
+#define regOPP_PIPE1_OPP_PIPE_CONTROL 0x18e6
+#define regOPP_PIPE1_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc1_dispdec
+// base address: 0x168
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL 0x18eb
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK 0x18ec
+#define regOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg2_dispdec
+// base address: 0x2d0
+#define regDPG2_DPG_CONTROL 0x1908
+#define regDPG2_DPG_CONTROL_BASE_IDX 2
+#define regDPG2_DPG_RAMP_CONTROL 0x1909
+#define regDPG2_DPG_RAMP_CONTROL_BASE_IDX 2
+#define regDPG2_DPG_DIMENSIONS 0x190a
+#define regDPG2_DPG_DIMENSIONS_BASE_IDX 2
+#define regDPG2_DPG_COLOUR_R_CR 0x190b
+#define regDPG2_DPG_COLOUR_R_CR_BASE_IDX 2
+#define regDPG2_DPG_COLOUR_G_Y 0x190c
+#define regDPG2_DPG_COLOUR_G_Y_BASE_IDX 2
+#define regDPG2_DPG_COLOUR_B_CB 0x190d
+#define regDPG2_DPG_COLOUR_B_CB_BASE_IDX 2
+#define regDPG2_DPG_OFFSET_SEGMENT 0x190e
+#define regDPG2_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define regDPG2_DPG_STATUS 0x190f
+#define regDPG2_DPG_STATUS_BASE_IDX 2
+#define regDPG2_OPP_PIPE_CRC_RESULTA 0x1912
+#define regDPG2_OPP_PIPE_CRC_RESULTA_BASE_IDX 2
+#define regDPG2_OPP_PIPE_CRC_RESULTR 0x1913
+#define regDPG2_OPP_PIPE_CRC_RESULTR_BASE_IDX 2
+#define regDPG2_OPP_PIPE_CRC_RESULTG 0x1914
+#define regDPG2_OPP_PIPE_CRC_RESULTG_BASE_IDX 2
+#define regDPG2_OPP_PIPE_CRC_RESULTB 0x1915
+#define regDPG2_OPP_PIPE_CRC_RESULTB_BASE_IDX 2
+#define regDPG2_OPP_PIPE_CRC_RESULTC 0x1916
+#define regDPG2_OPP_PIPE_CRC_RESULTC_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt2_dispdec
+// base address: 0x2d0
+#define regFMT2_FMT_CLAMP_COMPONENT_R 0x18f0
+#define regFMT2_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define regFMT2_FMT_CLAMP_COMPONENT_G 0x18f1
+#define regFMT2_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define regFMT2_FMT_CLAMP_COMPONENT_B 0x18f2
+#define regFMT2_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define regFMT2_FMT_DYNAMIC_EXP_CNTL 0x18f3
+#define regFMT2_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define regFMT2_FMT_CONTROL 0x18f4
+#define regFMT2_FMT_CONTROL_BASE_IDX 2
+#define regFMT2_FMT_BIT_DEPTH_CONTROL 0x18f5
+#define regFMT2_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define regFMT2_FMT_DITHER_RAND_R_SEED 0x18f6
+#define regFMT2_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define regFMT2_FMT_DITHER_RAND_G_SEED 0x18f7
+#define regFMT2_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define regFMT2_FMT_DITHER_RAND_B_SEED 0x18f8
+#define regFMT2_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define regFMT2_FMT_CLAMP_CNTL 0x18f9
+#define regFMT2_FMT_CLAMP_CNTL_BASE_IDX 2
+#define regFMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x18fa
+#define regFMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define regFMT2_FMT_MAP420_MEMORY_CONTROL 0x18fb
+#define regFMT2_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define regFMT2_FMT_422_CONTROL 0x18fd
+#define regFMT2_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf2_dispdec
+// base address: 0x2d0
+#define regOPPBUF2_OPPBUF_CONTROL 0x1938
+#define regOPPBUF2_OPPBUF_CONTROL_BASE_IDX 2
+#define regOPPBUF2_OPPBUF_3D_PARAMETERS_0 0x1939
+#define regOPPBUF2_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define regOPPBUF2_OPPBUF_3D_PARAMETERS_1 0x193a
+#define regOPPBUF2_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define regOPPBUF2_OPPBUF_CONTROL1 0x193d
+#define regOPPBUF2_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe2_dispdec
+// base address: 0x2d0
+#define regOPP_PIPE2_OPP_PIPE_CONTROL 0x1940
+#define regOPP_PIPE2_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc2_dispdec
+// base address: 0x2d0
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL 0x1945
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_MASK 0x1946
+#define regOPP_PIPE_CRC2_OPP_PIPE_CRC_MASK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg3_dispdec
+// base address: 0x438
+#define regDPG3_DPG_CONTROL 0x1962
+#define regDPG3_DPG_CONTROL_BASE_IDX 2
+#define regDPG3_DPG_RAMP_CONTROL 0x1963
+#define regDPG3_DPG_RAMP_CONTROL_BASE_IDX 2
+#define regDPG3_DPG_DIMENSIONS 0x1964
+#define regDPG3_DPG_DIMENSIONS_BASE_IDX 2
+#define regDPG3_DPG_COLOUR_R_CR 0x1965
+#define regDPG3_DPG_COLOUR_R_CR_BASE_IDX 2
+#define regDPG3_DPG_COLOUR_G_Y 0x1966
+#define regDPG3_DPG_COLOUR_G_Y_BASE_IDX 2
+#define regDPG3_DPG_COLOUR_B_CB 0x1967
+#define regDPG3_DPG_COLOUR_B_CB_BASE_IDX 2
+#define regDPG3_DPG_OFFSET_SEGMENT 0x1968
+#define regDPG3_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define regDPG3_DPG_STATUS 0x1969
+#define regDPG3_DPG_STATUS_BASE_IDX 2
+#define regDPG3_OPP_PIPE_CRC_RESULTA 0x196c
+#define regDPG3_OPP_PIPE_CRC_RESULTA_BASE_IDX 2
+#define regDPG3_OPP_PIPE_CRC_RESULTR 0x196d
+#define regDPG3_OPP_PIPE_CRC_RESULTR_BASE_IDX 2
+#define regDPG3_OPP_PIPE_CRC_RESULTG 0x196e
+#define regDPG3_OPP_PIPE_CRC_RESULTG_BASE_IDX 2
+#define regDPG3_OPP_PIPE_CRC_RESULTB 0x196f
+#define regDPG3_OPP_PIPE_CRC_RESULTB_BASE_IDX 2
+#define regDPG3_OPP_PIPE_CRC_RESULTC 0x1970
+#define regDPG3_OPP_PIPE_CRC_RESULTC_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt3_dispdec
+// base address: 0x438
+#define regFMT3_FMT_CLAMP_COMPONENT_R 0x194a
+#define regFMT3_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define regFMT3_FMT_CLAMP_COMPONENT_G 0x194b
+#define regFMT3_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define regFMT3_FMT_CLAMP_COMPONENT_B 0x194c
+#define regFMT3_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define regFMT3_FMT_DYNAMIC_EXP_CNTL 0x194d
+#define regFMT3_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define regFMT3_FMT_CONTROL 0x194e
+#define regFMT3_FMT_CONTROL_BASE_IDX 2
+#define regFMT3_FMT_BIT_DEPTH_CONTROL 0x194f
+#define regFMT3_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define regFMT3_FMT_DITHER_RAND_R_SEED 0x1950
+#define regFMT3_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define regFMT3_FMT_DITHER_RAND_G_SEED 0x1951
+#define regFMT3_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define regFMT3_FMT_DITHER_RAND_B_SEED 0x1952
+#define regFMT3_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define regFMT3_FMT_CLAMP_CNTL 0x1953
+#define regFMT3_FMT_CLAMP_CNTL_BASE_IDX 2
+#define regFMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1954
+#define regFMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define regFMT3_FMT_MAP420_MEMORY_CONTROL 0x1955
+#define regFMT3_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define regFMT3_FMT_422_CONTROL 0x1957
+#define regFMT3_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf3_dispdec
+// base address: 0x438
+#define regOPPBUF3_OPPBUF_CONTROL 0x1992
+#define regOPPBUF3_OPPBUF_CONTROL_BASE_IDX 2
+#define regOPPBUF3_OPPBUF_3D_PARAMETERS_0 0x1993
+#define regOPPBUF3_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define regOPPBUF3_OPPBUF_3D_PARAMETERS_1 0x1994
+#define regOPPBUF3_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+#define regOPPBUF3_OPPBUF_CONTROL1 0x1997
+#define regOPPBUF3_OPPBUF_CONTROL1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe3_dispdec
+// base address: 0x438
+#define regOPP_PIPE3_OPP_PIPE_CONTROL 0x199a
+#define regOPP_PIPE3_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc3_dispdec
+// base address: 0x438
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL 0x199f
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_MASK 0x19a0
+#define regOPP_PIPE_CRC3_OPP_PIPE_CRC_MASK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm0_dispdec
+// base address: 0x0
+#define regDSCRM0_DSCRM_DSC_FORWARD_CONFIG 0x1a64
+#define regDSCRM0_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm1_dispdec
+// base address: 0x4
+#define regDSCRM1_DSCRM_DSC_FORWARD_CONFIG 0x1a65
+#define regDSCRM1_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm2_dispdec
+// base address: 0x8
+#define regDSCRM2_DSCRM_DSC_FORWARD_CONFIG 0x1a66
+#define regDSCRM2_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dscrm3_dispdec
+// base address: 0xc
+#define regDSCRM3_DSCRM_DSC_FORWARD_CONFIG 0x1a67
+#define regDSCRM3_DSCRM_DSC_FORWARD_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_top_dispdec
+// base address: 0x0
+#define regOPP_TOP_CLK_CONTROL 0x1a5e
+#define regOPP_TOP_CLK_CONTROL_BASE_IDX 2
+#define regOPP_ABM_CONTROL 0x1a60
+#define regOPP_ABM_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_dcperfmon_dc_perfmon_dispdec
+// base address: 0x6af8
+#define regDC_PERFMON16_PERFCOUNTER_CNTL 0x1abe
+#define regDC_PERFMON16_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON16_PERFCOUNTER_CNTL2 0x1abf
+#define regDC_PERFMON16_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON16_PERFCOUNTER_STATE 0x1ac0
+#define regDC_PERFMON16_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_CNTL 0x1ac1
+#define regDC_PERFMON16_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_CNTL2 0x1ac2
+#define regDC_PERFMON16_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_CVALUE_INT_MISC 0x1ac3
+#define regDC_PERFMON16_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_CVALUE_LOW 0x1ac4
+#define regDC_PERFMON16_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_HI 0x1ac5
+#define regDC_PERFMON16_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON16_PERFMON_LOW 0x1ac6
+#define regDC_PERFMON16_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm0_dispdec
+// base address: 0x0
+#define regODM0_OPTC_INPUT_GLOBAL_CONTROL 0x1aca
+#define regODM0_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_DATA_SOURCE_SELECT 0x1acb
+#define regODM0_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define regODM0_OPTC_DATA_FORMAT_CONTROL 0x1acc
+#define regODM0_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_BYTES_PER_PIXEL 0x1acd
+#define regODM0_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define regODM0_OPTC_WIDTH_CONTROL 0x1ace
+#define regODM0_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_INPUT_CLOCK_CONTROL 0x1acf
+#define regODM0_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_MEMORY_CONFIG 0x1ad0
+#define regODM0_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define regODM0_OPTC_INPUT_SPARE_REGISTER 0x1ad1
+#define regODM0_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+#define regODM0_OPTC_UNDERFLOW_THRESHOLD 0x1ad5
+#define regODM0_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm1_dispdec
+// base address: 0x40
+#define regODM1_OPTC_INPUT_GLOBAL_CONTROL 0x1ada
+#define regODM1_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM1_OPTC_DATA_SOURCE_SELECT 0x1adb
+#define regODM1_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define regODM1_OPTC_DATA_FORMAT_CONTROL 0x1adc
+#define regODM1_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define regODM1_OPTC_BYTES_PER_PIXEL 0x1add
+#define regODM1_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define regODM1_OPTC_WIDTH_CONTROL 0x1ade
+#define regODM1_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define regODM1_OPTC_INPUT_CLOCK_CONTROL 0x1adf
+#define regODM1_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define regODM1_OPTC_MEMORY_CONFIG 0x1ae0
+#define regODM1_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define regODM1_OPTC_INPUT_SPARE_REGISTER 0x1ae1
+#define regODM1_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+#define regODM1_OPTC_UNDERFLOW_THRESHOLD 0x1ae5
+#define regODM1_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm2_dispdec
+// base address: 0x80
+#define regODM2_OPTC_INPUT_GLOBAL_CONTROL 0x1aea
+#define regODM2_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM2_OPTC_DATA_SOURCE_SELECT 0x1aeb
+#define regODM2_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define regODM2_OPTC_DATA_FORMAT_CONTROL 0x1aec
+#define regODM2_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define regODM2_OPTC_BYTES_PER_PIXEL 0x1aed
+#define regODM2_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define regODM2_OPTC_WIDTH_CONTROL 0x1aee
+#define regODM2_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define regODM2_OPTC_INPUT_CLOCK_CONTROL 0x1aef
+#define regODM2_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define regODM2_OPTC_MEMORY_CONFIG 0x1af0
+#define regODM2_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define regODM2_OPTC_INPUT_SPARE_REGISTER 0x1af1
+#define regODM2_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+#define regODM2_OPTC_UNDERFLOW_THRESHOLD 0x1af5
+#define regODM2_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm3_dispdec
+// base address: 0xc0
+#define regODM3_OPTC_INPUT_GLOBAL_CONTROL 0x1afa
+#define regODM3_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM3_OPTC_DATA_SOURCE_SELECT 0x1afb
+#define regODM3_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define regODM3_OPTC_DATA_FORMAT_CONTROL 0x1afc
+#define regODM3_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define regODM3_OPTC_BYTES_PER_PIXEL 0x1afd
+#define regODM3_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define regODM3_OPTC_WIDTH_CONTROL 0x1afe
+#define regODM3_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define regODM3_OPTC_INPUT_CLOCK_CONTROL 0x1aff
+#define regODM3_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+#define regODM3_OPTC_MEMORY_CONFIG 0x1b00
+#define regODM3_OPTC_MEMORY_CONFIG_BASE_IDX 2
+#define regODM3_OPTC_INPUT_SPARE_REGISTER 0x1b01
+#define regODM3_OPTC_INPUT_SPARE_REGISTER_BASE_IDX 2
+#define regODM3_OPTC_UNDERFLOW_THRESHOLD 0x1b05
+#define regODM3_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg0_dispdec
+// base address: 0x0
+#define regOTG0_OTG_H_TOTAL 0x1b2a
+#define regOTG0_OTG_H_TOTAL_BASE_IDX 2
+#define regOTG0_OTG_H_BLANK_START_END 0x1b2b
+#define regOTG0_OTG_H_BLANK_START_END_BASE_IDX 2
+#define regOTG0_OTG_H_SYNC_A 0x1b2c
+#define regOTG0_OTG_H_SYNC_A_BASE_IDX 2
+#define regOTG0_OTG_H_SYNC_A_CNTL 0x1b2d
+#define regOTG0_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG0_OTG_H_TIMING_CNTL 0x1b2e
+#define regOTG0_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL 0x1b2f
+#define regOTG0_OTG_V_TOTAL_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_MIN 0x1b30
+#define regOTG0_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_MAX 0x1b31
+#define regOTG0_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_MID 0x1b32
+#define regOTG0_OTG_V_TOTAL_MID_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_CONTROL 0x1b33
+#define regOTG0_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_V_COUNT_STOP_CONTROL 0x1b34
+#define regOTG0_OTG_V_COUNT_STOP_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_V_COUNT_STOP_CONTROL2 0x1b35
+#define regOTG0_OTG_V_COUNT_STOP_CONTROL2_BASE_IDX 2
+#define regOTG0_OTG_V_TOTAL_INT_STATUS 0x1b36
+#define regOTG0_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define regOTG0_OTG_VSYNC_NOM_INT_STATUS 0x1b37
+#define regOTG0_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define regOTG0_OTG_V_BLANK_START_END 0x1b38
+#define regOTG0_OTG_V_BLANK_START_END_BASE_IDX 2
+#define regOTG0_OTG_V_SYNC_A 0x1b39
+#define regOTG0_OTG_V_SYNC_A_BASE_IDX 2
+#define regOTG0_OTG_V_SYNC_A_CNTL 0x1b3a
+#define regOTG0_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG0_OTG_TRIGA_CNTL 0x1b3b
+#define regOTG0_OTG_TRIGA_CNTL_BASE_IDX 2
+#define regOTG0_OTG_TRIGA_MANUAL_TRIG 0x1b3c
+#define regOTG0_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define regOTG0_OTG_TRIGB_CNTL 0x1b3d
+#define regOTG0_OTG_TRIGB_CNTL_BASE_IDX 2
+#define regOTG0_OTG_TRIGB_MANUAL_TRIG 0x1b3e
+#define regOTG0_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define regOTG0_OTG_FORCE_COUNT_NOW_CNTL 0x1b3f
+#define regOTG0_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define regOTG0_OTG_STEREO_FORCE_NEXT_EYE 0x1b41
+#define regOTG0_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define regOTG0_OTG_CONTROL 0x1b43
+#define regOTG0_OTG_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_DLPC_CONTROL 0x1b44
+#define regOTG0_OTG_DLPC_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_INTERLACE_CONTROL 0x1b45
+#define regOTG0_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_INTERLACE_STATUS 0x1b46
+#define regOTG0_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define regOTG0_OTG_PIXEL_DATA_READBACK0 0x1b47
+#define regOTG0_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define regOTG0_OTG_PIXEL_DATA_READBACK1 0x1b48
+#define regOTG0_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define regOTG0_OTG_STATUS 0x1b49
+#define regOTG0_OTG_STATUS_BASE_IDX 2
+#define regOTG0_OTG_STATUS_POSITION 0x1b4a
+#define regOTG0_OTG_STATUS_POSITION_BASE_IDX 2
+#define regOTG0_OTG_LONG_VBLANK_STATUS 0x1b4b
+#define regOTG0_OTG_LONG_VBLANK_STATUS_BASE_IDX 2
+#define regOTG0_OTG_NOM_VERT_POSITION 0x1b4c
+#define regOTG0_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define regOTG0_OTG_STATUS_FRAME_COUNT 0x1b4d
+#define regOTG0_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define regOTG0_OTG_STATUS_VF_COUNT 0x1b4e
+#define regOTG0_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define regOTG0_OTG_STATUS_HV_COUNT 0x1b4f
+#define regOTG0_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define regOTG0_OTG_COUNT_CONTROL 0x1b50
+#define regOTG0_OTG_COUNT_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_COUNT_RESET 0x1b51
+#define regOTG0_OTG_COUNT_RESET_BASE_IDX 2
+#define regOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1b52
+#define regOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define regOTG0_OTG_VERT_SYNC_CONTROL 0x1b53
+#define regOTG0_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_STEREO_STATUS 0x1b54
+#define regOTG0_OTG_STEREO_STATUS_BASE_IDX 2
+#define regOTG0_OTG_STEREO_CONTROL 0x1b55
+#define regOTG0_OTG_STEREO_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_SNAPSHOT_STATUS 0x1b56
+#define regOTG0_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define regOTG0_OTG_SNAPSHOT_CONTROL 0x1b57
+#define regOTG0_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_SNAPSHOT_POSITION 0x1b58
+#define regOTG0_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define regOTG0_OTG_SNAPSHOT_FRAME 0x1b59
+#define regOTG0_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define regOTG0_OTG_INTERRUPT_CONTROL 0x1b5a
+#define regOTG0_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_UPDATE_LOCK 0x1b5b
+#define regOTG0_OTG_UPDATE_LOCK_BASE_IDX 2
+#define regOTG0_OTG_DOUBLE_BUFFER_CONTROL 0x1b5c
+#define regOTG0_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_MASTER_EN 0x1b5d
+#define regOTG0_OTG_MASTER_EN_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT0_POSITION 0x1b5f
+#define regOTG0_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1b60
+#define regOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT1_POSITION 0x1b61
+#define regOTG0_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1b62
+#define regOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT2_POSITION 0x1b63
+#define regOTG0_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define regOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1b64
+#define regOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC_CNTL 0x1b65
+#define regOTG0_OTG_CRC_CNTL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWA_X_CONTROL 0x1b66
+#define regOTG0_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWA_Y_CONTROL 0x1b67
+#define regOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWB_X_CONTROL 0x1b68
+#define regOTG0_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWB_Y_CONTROL 0x1b69
+#define regOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC0_DATA_RG 0x1b6a
+#define regOTG0_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define regOTG0_OTG_CRC0_DATA_B 0x1b6b
+#define regOTG0_OTG_CRC0_DATA_B_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWA_X_CONTROL 0x1b6c
+#define regOTG0_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWA_Y_CONTROL 0x1b6d
+#define regOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWB_X_CONTROL 0x1b6e
+#define regOTG0_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWB_Y_CONTROL 0x1b6f
+#define regOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_CRC1_DATA_RG 0x1b70
+#define regOTG0_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define regOTG0_OTG_CRC1_DATA_B 0x1b71
+#define regOTG0_OTG_CRC1_DATA_B_BASE_IDX 2
+#define regOTG0_OTG_CRC2_DATA_RG 0x1b72
+#define regOTG0_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define regOTG0_OTG_CRC2_DATA_B 0x1b73
+#define regOTG0_OTG_CRC2_DATA_B_BASE_IDX 2
+#define regOTG0_OTG_CRC3_DATA_RG 0x1b74
+#define regOTG0_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define regOTG0_OTG_CRC3_DATA_B 0x1b75
+#define regOTG0_OTG_CRC3_DATA_B_BASE_IDX 2
+#define regOTG0_OTG_CRC_SIG_RED_GREEN_MASK 0x1b76
+#define regOTG0_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define regOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1b77
+#define regOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK 0x1b78
+#define regOTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK 0x1b79
+#define regOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK 0x1b7a
+#define regOTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK 0x1b7b
+#define regOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK 0x1b7c
+#define regOTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK 0x1b7d
+#define regOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK 0x1b7e
+#define regOTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK 0x1b7f
+#define regOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG0_OTG_STATIC_SCREEN_CONTROL 0x1b80
+#define regOTG0_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_3D_STRUCTURE_CONTROL 0x1b81
+#define regOTG0_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_GSL_VSYNC_GAP 0x1b82
+#define regOTG0_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define regOTG0_OTG_MASTER_UPDATE_MODE 0x1b83
+#define regOTG0_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define regOTG0_OTG_CLOCK_CONTROL 0x1b84
+#define regOTG0_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_VSTARTUP_PARAM 0x1b85
+#define regOTG0_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define regOTG0_OTG_VUPDATE_PARAM 0x1b86
+#define regOTG0_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define regOTG0_OTG_VREADY_PARAM 0x1b87
+#define regOTG0_OTG_VREADY_PARAM_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_SYNC_STATUS 0x1b88
+#define regOTG0_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define regOTG0_OTG_MASTER_UPDATE_LOCK 0x1b89
+#define regOTG0_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define regOTG0_OTG_GSL_CONTROL 0x1b8a
+#define regOTG0_OTG_GSL_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_GSL_WINDOW_X 0x1b8b
+#define regOTG0_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define regOTG0_OTG_GSL_WINDOW_Y 0x1b8c
+#define regOTG0_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define regOTG0_OTG_VUPDATE_KEEPOUT 0x1b8d
+#define regOTG0_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL0 0x1b8e
+#define regOTG0_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL1 0x1b8f
+#define regOTG0_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL2 0x1b90
+#define regOTG0_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL3 0x1b91
+#define regOTG0_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define regOTG0_OTG_GLOBAL_CONTROL4 0x1b92
+#define regOTG0_OTG_GLOBAL_CONTROL4_BASE_IDX 2
+#define regOTG0_OTG_TRIG_MANUAL_CONTROL 0x1b93
+#define regOTG0_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_DRR_TIMING_INT_STATUS 0x1b95
+#define regOTG0_OTG_DRR_TIMING_INT_STATUS_BASE_IDX 2
+#define regOTG0_OTG_DRR_V_TOTAL_REACH_RANGE 0x1b96
+#define regOTG0_OTG_DRR_V_TOTAL_REACH_RANGE_BASE_IDX 2
+#define regOTG0_OTG_DRR_V_TOTAL_CHANGE 0x1b97
+#define regOTG0_OTG_DRR_V_TOTAL_CHANGE_BASE_IDX 2
+#define regOTG0_OTG_DRR_TRIGGER_WINDOW 0x1b98
+#define regOTG0_OTG_DRR_TRIGGER_WINDOW_BASE_IDX 2
+#define regOTG0_OTG_DRR_CONTROL 0x1b99
+#define regOTG0_OTG_DRR_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_DRR_CONTOL2 0x1b9a
+#define regOTG0_OTG_DRR_CONTOL2_BASE_IDX 2
+#define regOTG0_OTG_M_CONST_DTO0 0x1b9b
+#define regOTG0_OTG_M_CONST_DTO0_BASE_IDX 2
+#define regOTG0_OTG_M_CONST_DTO1 0x1b9c
+#define regOTG0_OTG_M_CONST_DTO1_BASE_IDX 2
+#define regOTG0_OTG_REQUEST_CONTROL 0x1b9d
+#define regOTG0_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define regOTG0_OTG_DSC_START_POSITION 0x1b9e
+#define regOTG0_OTG_DSC_START_POSITION_BASE_IDX 2
+#define regOTG0_OTG_PIPE_UPDATE_STATUS 0x1b9f
+#define regOTG0_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define regOTG0_OTG_SPARE_REGISTER 0x1ba0
+#define regOTG0_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg1_dispdec
+// base address: 0x200
+#define regOTG1_OTG_H_TOTAL 0x1baa
+#define regOTG1_OTG_H_TOTAL_BASE_IDX 2
+#define regOTG1_OTG_H_BLANK_START_END 0x1bab
+#define regOTG1_OTG_H_BLANK_START_END_BASE_IDX 2
+#define regOTG1_OTG_H_SYNC_A 0x1bac
+#define regOTG1_OTG_H_SYNC_A_BASE_IDX 2
+#define regOTG1_OTG_H_SYNC_A_CNTL 0x1bad
+#define regOTG1_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG1_OTG_H_TIMING_CNTL 0x1bae
+#define regOTG1_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL 0x1baf
+#define regOTG1_OTG_V_TOTAL_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_MIN 0x1bb0
+#define regOTG1_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_MAX 0x1bb1
+#define regOTG1_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_MID 0x1bb2
+#define regOTG1_OTG_V_TOTAL_MID_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_CONTROL 0x1bb3
+#define regOTG1_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_V_COUNT_STOP_CONTROL 0x1bb4
+#define regOTG1_OTG_V_COUNT_STOP_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_V_COUNT_STOP_CONTROL2 0x1bb5
+#define regOTG1_OTG_V_COUNT_STOP_CONTROL2_BASE_IDX 2
+#define regOTG1_OTG_V_TOTAL_INT_STATUS 0x1bb6
+#define regOTG1_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define regOTG1_OTG_VSYNC_NOM_INT_STATUS 0x1bb7
+#define regOTG1_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define regOTG1_OTG_V_BLANK_START_END 0x1bb8
+#define regOTG1_OTG_V_BLANK_START_END_BASE_IDX 2
+#define regOTG1_OTG_V_SYNC_A 0x1bb9
+#define regOTG1_OTG_V_SYNC_A_BASE_IDX 2
+#define regOTG1_OTG_V_SYNC_A_CNTL 0x1bba
+#define regOTG1_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG1_OTG_TRIGA_CNTL 0x1bbb
+#define regOTG1_OTG_TRIGA_CNTL_BASE_IDX 2
+#define regOTG1_OTG_TRIGA_MANUAL_TRIG 0x1bbc
+#define regOTG1_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define regOTG1_OTG_TRIGB_CNTL 0x1bbd
+#define regOTG1_OTG_TRIGB_CNTL_BASE_IDX 2
+#define regOTG1_OTG_TRIGB_MANUAL_TRIG 0x1bbe
+#define regOTG1_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define regOTG1_OTG_FORCE_COUNT_NOW_CNTL 0x1bbf
+#define regOTG1_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define regOTG1_OTG_STEREO_FORCE_NEXT_EYE 0x1bc1
+#define regOTG1_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define regOTG1_OTG_CONTROL 0x1bc3
+#define regOTG1_OTG_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_DLPC_CONTROL 0x1bc4
+#define regOTG1_OTG_DLPC_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_INTERLACE_CONTROL 0x1bc5
+#define regOTG1_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_INTERLACE_STATUS 0x1bc6
+#define regOTG1_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define regOTG1_OTG_PIXEL_DATA_READBACK0 0x1bc7
+#define regOTG1_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define regOTG1_OTG_PIXEL_DATA_READBACK1 0x1bc8
+#define regOTG1_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define regOTG1_OTG_STATUS 0x1bc9
+#define regOTG1_OTG_STATUS_BASE_IDX 2
+#define regOTG1_OTG_STATUS_POSITION 0x1bca
+#define regOTG1_OTG_STATUS_POSITION_BASE_IDX 2
+#define regOTG1_OTG_LONG_VBLANK_STATUS 0x1bcb
+#define regOTG1_OTG_LONG_VBLANK_STATUS_BASE_IDX 2
+#define regOTG1_OTG_NOM_VERT_POSITION 0x1bcc
+#define regOTG1_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define regOTG1_OTG_STATUS_FRAME_COUNT 0x1bcd
+#define regOTG1_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define regOTG1_OTG_STATUS_VF_COUNT 0x1bce
+#define regOTG1_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define regOTG1_OTG_STATUS_HV_COUNT 0x1bcf
+#define regOTG1_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define regOTG1_OTG_COUNT_CONTROL 0x1bd0
+#define regOTG1_OTG_COUNT_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_COUNT_RESET 0x1bd1
+#define regOTG1_OTG_COUNT_RESET_BASE_IDX 2
+#define regOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1bd2
+#define regOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define regOTG1_OTG_VERT_SYNC_CONTROL 0x1bd3
+#define regOTG1_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_STEREO_STATUS 0x1bd4
+#define regOTG1_OTG_STEREO_STATUS_BASE_IDX 2
+#define regOTG1_OTG_STEREO_CONTROL 0x1bd5
+#define regOTG1_OTG_STEREO_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_SNAPSHOT_STATUS 0x1bd6
+#define regOTG1_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define regOTG1_OTG_SNAPSHOT_CONTROL 0x1bd7
+#define regOTG1_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_SNAPSHOT_POSITION 0x1bd8
+#define regOTG1_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define regOTG1_OTG_SNAPSHOT_FRAME 0x1bd9
+#define regOTG1_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define regOTG1_OTG_INTERRUPT_CONTROL 0x1bda
+#define regOTG1_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_UPDATE_LOCK 0x1bdb
+#define regOTG1_OTG_UPDATE_LOCK_BASE_IDX 2
+#define regOTG1_OTG_DOUBLE_BUFFER_CONTROL 0x1bdc
+#define regOTG1_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_MASTER_EN 0x1bdd
+#define regOTG1_OTG_MASTER_EN_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT0_POSITION 0x1bdf
+#define regOTG1_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1be0
+#define regOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT1_POSITION 0x1be1
+#define regOTG1_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1be2
+#define regOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT2_POSITION 0x1be3
+#define regOTG1_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define regOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1be4
+#define regOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC_CNTL 0x1be5
+#define regOTG1_OTG_CRC_CNTL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWA_X_CONTROL 0x1be6
+#define regOTG1_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWA_Y_CONTROL 0x1be7
+#define regOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWB_X_CONTROL 0x1be8
+#define regOTG1_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWB_Y_CONTROL 0x1be9
+#define regOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC0_DATA_RG 0x1bea
+#define regOTG1_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define regOTG1_OTG_CRC0_DATA_B 0x1beb
+#define regOTG1_OTG_CRC0_DATA_B_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWA_X_CONTROL 0x1bec
+#define regOTG1_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWA_Y_CONTROL 0x1bed
+#define regOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWB_X_CONTROL 0x1bee
+#define regOTG1_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWB_Y_CONTROL 0x1bef
+#define regOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_CRC1_DATA_RG 0x1bf0
+#define regOTG1_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define regOTG1_OTG_CRC1_DATA_B 0x1bf1
+#define regOTG1_OTG_CRC1_DATA_B_BASE_IDX 2
+#define regOTG1_OTG_CRC2_DATA_RG 0x1bf2
+#define regOTG1_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define regOTG1_OTG_CRC2_DATA_B 0x1bf3
+#define regOTG1_OTG_CRC2_DATA_B_BASE_IDX 2
+#define regOTG1_OTG_CRC3_DATA_RG 0x1bf4
+#define regOTG1_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define regOTG1_OTG_CRC3_DATA_B 0x1bf5
+#define regOTG1_OTG_CRC3_DATA_B_BASE_IDX 2
+#define regOTG1_OTG_CRC_SIG_RED_GREEN_MASK 0x1bf6
+#define regOTG1_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define regOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1bf7
+#define regOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK 0x1bf8
+#define regOTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK 0x1bf9
+#define regOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK 0x1bfa
+#define regOTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK 0x1bfb
+#define regOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK 0x1bfc
+#define regOTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK 0x1bfd
+#define regOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK 0x1bfe
+#define regOTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK 0x1bff
+#define regOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG1_OTG_STATIC_SCREEN_CONTROL 0x1c00
+#define regOTG1_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_3D_STRUCTURE_CONTROL 0x1c01
+#define regOTG1_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_GSL_VSYNC_GAP 0x1c02
+#define regOTG1_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define regOTG1_OTG_MASTER_UPDATE_MODE 0x1c03
+#define regOTG1_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define regOTG1_OTG_CLOCK_CONTROL 0x1c04
+#define regOTG1_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_VSTARTUP_PARAM 0x1c05
+#define regOTG1_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define regOTG1_OTG_VUPDATE_PARAM 0x1c06
+#define regOTG1_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define regOTG1_OTG_VREADY_PARAM 0x1c07
+#define regOTG1_OTG_VREADY_PARAM_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_SYNC_STATUS 0x1c08
+#define regOTG1_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define regOTG1_OTG_MASTER_UPDATE_LOCK 0x1c09
+#define regOTG1_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define regOTG1_OTG_GSL_CONTROL 0x1c0a
+#define regOTG1_OTG_GSL_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_GSL_WINDOW_X 0x1c0b
+#define regOTG1_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define regOTG1_OTG_GSL_WINDOW_Y 0x1c0c
+#define regOTG1_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define regOTG1_OTG_VUPDATE_KEEPOUT 0x1c0d
+#define regOTG1_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL0 0x1c0e
+#define regOTG1_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL1 0x1c0f
+#define regOTG1_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL2 0x1c10
+#define regOTG1_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL3 0x1c11
+#define regOTG1_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define regOTG1_OTG_GLOBAL_CONTROL4 0x1c12
+#define regOTG1_OTG_GLOBAL_CONTROL4_BASE_IDX 2
+#define regOTG1_OTG_TRIG_MANUAL_CONTROL 0x1c13
+#define regOTG1_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_DRR_TIMING_INT_STATUS 0x1c15
+#define regOTG1_OTG_DRR_TIMING_INT_STATUS_BASE_IDX 2
+#define regOTG1_OTG_DRR_V_TOTAL_REACH_RANGE 0x1c16
+#define regOTG1_OTG_DRR_V_TOTAL_REACH_RANGE_BASE_IDX 2
+#define regOTG1_OTG_DRR_V_TOTAL_CHANGE 0x1c17
+#define regOTG1_OTG_DRR_V_TOTAL_CHANGE_BASE_IDX 2
+#define regOTG1_OTG_DRR_TRIGGER_WINDOW 0x1c18
+#define regOTG1_OTG_DRR_TRIGGER_WINDOW_BASE_IDX 2
+#define regOTG1_OTG_DRR_CONTROL 0x1c19
+#define regOTG1_OTG_DRR_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_DRR_CONTOL2 0x1c1a
+#define regOTG1_OTG_DRR_CONTOL2_BASE_IDX 2
+#define regOTG1_OTG_M_CONST_DTO0 0x1c1b
+#define regOTG1_OTG_M_CONST_DTO0_BASE_IDX 2
+#define regOTG1_OTG_M_CONST_DTO1 0x1c1c
+#define regOTG1_OTG_M_CONST_DTO1_BASE_IDX 2
+#define regOTG1_OTG_REQUEST_CONTROL 0x1c1d
+#define regOTG1_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define regOTG1_OTG_DSC_START_POSITION 0x1c1e
+#define regOTG1_OTG_DSC_START_POSITION_BASE_IDX 2
+#define regOTG1_OTG_PIPE_UPDATE_STATUS 0x1c1f
+#define regOTG1_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define regOTG1_OTG_SPARE_REGISTER 0x1c20
+#define regOTG1_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg2_dispdec
+// base address: 0x400
+#define regOTG2_OTG_H_TOTAL 0x1c2a
+#define regOTG2_OTG_H_TOTAL_BASE_IDX 2
+#define regOTG2_OTG_H_BLANK_START_END 0x1c2b
+#define regOTG2_OTG_H_BLANK_START_END_BASE_IDX 2
+#define regOTG2_OTG_H_SYNC_A 0x1c2c
+#define regOTG2_OTG_H_SYNC_A_BASE_IDX 2
+#define regOTG2_OTG_H_SYNC_A_CNTL 0x1c2d
+#define regOTG2_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG2_OTG_H_TIMING_CNTL 0x1c2e
+#define regOTG2_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL 0x1c2f
+#define regOTG2_OTG_V_TOTAL_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_MIN 0x1c30
+#define regOTG2_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_MAX 0x1c31
+#define regOTG2_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_MID 0x1c32
+#define regOTG2_OTG_V_TOTAL_MID_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_CONTROL 0x1c33
+#define regOTG2_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_V_COUNT_STOP_CONTROL 0x1c34
+#define regOTG2_OTG_V_COUNT_STOP_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_V_COUNT_STOP_CONTROL2 0x1c35
+#define regOTG2_OTG_V_COUNT_STOP_CONTROL2_BASE_IDX 2
+#define regOTG2_OTG_V_TOTAL_INT_STATUS 0x1c36
+#define regOTG2_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define regOTG2_OTG_VSYNC_NOM_INT_STATUS 0x1c37
+#define regOTG2_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define regOTG2_OTG_V_BLANK_START_END 0x1c38
+#define regOTG2_OTG_V_BLANK_START_END_BASE_IDX 2
+#define regOTG2_OTG_V_SYNC_A 0x1c39
+#define regOTG2_OTG_V_SYNC_A_BASE_IDX 2
+#define regOTG2_OTG_V_SYNC_A_CNTL 0x1c3a
+#define regOTG2_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG2_OTG_TRIGA_CNTL 0x1c3b
+#define regOTG2_OTG_TRIGA_CNTL_BASE_IDX 2
+#define regOTG2_OTG_TRIGA_MANUAL_TRIG 0x1c3c
+#define regOTG2_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define regOTG2_OTG_TRIGB_CNTL 0x1c3d
+#define regOTG2_OTG_TRIGB_CNTL_BASE_IDX 2
+#define regOTG2_OTG_TRIGB_MANUAL_TRIG 0x1c3e
+#define regOTG2_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define regOTG2_OTG_FORCE_COUNT_NOW_CNTL 0x1c3f
+#define regOTG2_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define regOTG2_OTG_STEREO_FORCE_NEXT_EYE 0x1c41
+#define regOTG2_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define regOTG2_OTG_CONTROL 0x1c43
+#define regOTG2_OTG_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_DLPC_CONTROL 0x1c44
+#define regOTG2_OTG_DLPC_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_INTERLACE_CONTROL 0x1c45
+#define regOTG2_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_INTERLACE_STATUS 0x1c46
+#define regOTG2_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define regOTG2_OTG_PIXEL_DATA_READBACK0 0x1c47
+#define regOTG2_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define regOTG2_OTG_PIXEL_DATA_READBACK1 0x1c48
+#define regOTG2_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define regOTG2_OTG_STATUS 0x1c49
+#define regOTG2_OTG_STATUS_BASE_IDX 2
+#define regOTG2_OTG_STATUS_POSITION 0x1c4a
+#define regOTG2_OTG_STATUS_POSITION_BASE_IDX 2
+#define regOTG2_OTG_LONG_VBLANK_STATUS 0x1c4b
+#define regOTG2_OTG_LONG_VBLANK_STATUS_BASE_IDX 2
+#define regOTG2_OTG_NOM_VERT_POSITION 0x1c4c
+#define regOTG2_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define regOTG2_OTG_STATUS_FRAME_COUNT 0x1c4d
+#define regOTG2_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define regOTG2_OTG_STATUS_VF_COUNT 0x1c4e
+#define regOTG2_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define regOTG2_OTG_STATUS_HV_COUNT 0x1c4f
+#define regOTG2_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define regOTG2_OTG_COUNT_CONTROL 0x1c50
+#define regOTG2_OTG_COUNT_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_COUNT_RESET 0x1c51
+#define regOTG2_OTG_COUNT_RESET_BASE_IDX 2
+#define regOTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1c52
+#define regOTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define regOTG2_OTG_VERT_SYNC_CONTROL 0x1c53
+#define regOTG2_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_STEREO_STATUS 0x1c54
+#define regOTG2_OTG_STEREO_STATUS_BASE_IDX 2
+#define regOTG2_OTG_STEREO_CONTROL 0x1c55
+#define regOTG2_OTG_STEREO_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_SNAPSHOT_STATUS 0x1c56
+#define regOTG2_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define regOTG2_OTG_SNAPSHOT_CONTROL 0x1c57
+#define regOTG2_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_SNAPSHOT_POSITION 0x1c58
+#define regOTG2_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define regOTG2_OTG_SNAPSHOT_FRAME 0x1c59
+#define regOTG2_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define regOTG2_OTG_INTERRUPT_CONTROL 0x1c5a
+#define regOTG2_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_UPDATE_LOCK 0x1c5b
+#define regOTG2_OTG_UPDATE_LOCK_BASE_IDX 2
+#define regOTG2_OTG_DOUBLE_BUFFER_CONTROL 0x1c5c
+#define regOTG2_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_MASTER_EN 0x1c5d
+#define regOTG2_OTG_MASTER_EN_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT0_POSITION 0x1c5f
+#define regOTG2_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1c60
+#define regOTG2_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT1_POSITION 0x1c61
+#define regOTG2_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1c62
+#define regOTG2_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT2_POSITION 0x1c63
+#define regOTG2_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define regOTG2_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1c64
+#define regOTG2_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC_CNTL 0x1c65
+#define regOTG2_OTG_CRC_CNTL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWA_X_CONTROL 0x1c66
+#define regOTG2_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWA_Y_CONTROL 0x1c67
+#define regOTG2_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWB_X_CONTROL 0x1c68
+#define regOTG2_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWB_Y_CONTROL 0x1c69
+#define regOTG2_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC0_DATA_RG 0x1c6a
+#define regOTG2_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define regOTG2_OTG_CRC0_DATA_B 0x1c6b
+#define regOTG2_OTG_CRC0_DATA_B_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWA_X_CONTROL 0x1c6c
+#define regOTG2_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWA_Y_CONTROL 0x1c6d
+#define regOTG2_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWB_X_CONTROL 0x1c6e
+#define regOTG2_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWB_Y_CONTROL 0x1c6f
+#define regOTG2_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_CRC1_DATA_RG 0x1c70
+#define regOTG2_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define regOTG2_OTG_CRC1_DATA_B 0x1c71
+#define regOTG2_OTG_CRC1_DATA_B_BASE_IDX 2
+#define regOTG2_OTG_CRC2_DATA_RG 0x1c72
+#define regOTG2_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define regOTG2_OTG_CRC2_DATA_B 0x1c73
+#define regOTG2_OTG_CRC2_DATA_B_BASE_IDX 2
+#define regOTG2_OTG_CRC3_DATA_RG 0x1c74
+#define regOTG2_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define regOTG2_OTG_CRC3_DATA_B 0x1c75
+#define regOTG2_OTG_CRC3_DATA_B_BASE_IDX 2
+#define regOTG2_OTG_CRC_SIG_RED_GREEN_MASK 0x1c76
+#define regOTG2_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define regOTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1c77
+#define regOTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK 0x1c78
+#define regOTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK 0x1c79
+#define regOTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK 0x1c7a
+#define regOTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK 0x1c7b
+#define regOTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK 0x1c7c
+#define regOTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK 0x1c7d
+#define regOTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK 0x1c7e
+#define regOTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK 0x1c7f
+#define regOTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG2_OTG_STATIC_SCREEN_CONTROL 0x1c80
+#define regOTG2_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_3D_STRUCTURE_CONTROL 0x1c81
+#define regOTG2_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_GSL_VSYNC_GAP 0x1c82
+#define regOTG2_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define regOTG2_OTG_MASTER_UPDATE_MODE 0x1c83
+#define regOTG2_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define regOTG2_OTG_CLOCK_CONTROL 0x1c84
+#define regOTG2_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_VSTARTUP_PARAM 0x1c85
+#define regOTG2_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define regOTG2_OTG_VUPDATE_PARAM 0x1c86
+#define regOTG2_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define regOTG2_OTG_VREADY_PARAM 0x1c87
+#define regOTG2_OTG_VREADY_PARAM_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_SYNC_STATUS 0x1c88
+#define regOTG2_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define regOTG2_OTG_MASTER_UPDATE_LOCK 0x1c89
+#define regOTG2_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define regOTG2_OTG_GSL_CONTROL 0x1c8a
+#define regOTG2_OTG_GSL_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_GSL_WINDOW_X 0x1c8b
+#define regOTG2_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define regOTG2_OTG_GSL_WINDOW_Y 0x1c8c
+#define regOTG2_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define regOTG2_OTG_VUPDATE_KEEPOUT 0x1c8d
+#define regOTG2_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL0 0x1c8e
+#define regOTG2_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL1 0x1c8f
+#define regOTG2_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL2 0x1c90
+#define regOTG2_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL3 0x1c91
+#define regOTG2_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define regOTG2_OTG_GLOBAL_CONTROL4 0x1c92
+#define regOTG2_OTG_GLOBAL_CONTROL4_BASE_IDX 2
+#define regOTG2_OTG_TRIG_MANUAL_CONTROL 0x1c93
+#define regOTG2_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_DRR_TIMING_INT_STATUS 0x1c95
+#define regOTG2_OTG_DRR_TIMING_INT_STATUS_BASE_IDX 2
+#define regOTG2_OTG_DRR_V_TOTAL_REACH_RANGE 0x1c96
+#define regOTG2_OTG_DRR_V_TOTAL_REACH_RANGE_BASE_IDX 2
+#define regOTG2_OTG_DRR_V_TOTAL_CHANGE 0x1c97
+#define regOTG2_OTG_DRR_V_TOTAL_CHANGE_BASE_IDX 2
+#define regOTG2_OTG_DRR_TRIGGER_WINDOW 0x1c98
+#define regOTG2_OTG_DRR_TRIGGER_WINDOW_BASE_IDX 2
+#define regOTG2_OTG_DRR_CONTROL 0x1c99
+#define regOTG2_OTG_DRR_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_DRR_CONTOL2 0x1c9a
+#define regOTG2_OTG_DRR_CONTOL2_BASE_IDX 2
+#define regOTG2_OTG_M_CONST_DTO0 0x1c9b
+#define regOTG2_OTG_M_CONST_DTO0_BASE_IDX 2
+#define regOTG2_OTG_M_CONST_DTO1 0x1c9c
+#define regOTG2_OTG_M_CONST_DTO1_BASE_IDX 2
+#define regOTG2_OTG_REQUEST_CONTROL 0x1c9d
+#define regOTG2_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define regOTG2_OTG_DSC_START_POSITION 0x1c9e
+#define regOTG2_OTG_DSC_START_POSITION_BASE_IDX 2
+#define regOTG2_OTG_PIPE_UPDATE_STATUS 0x1c9f
+#define regOTG2_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define regOTG2_OTG_SPARE_REGISTER 0x1ca0
+#define regOTG2_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg3_dispdec
+// base address: 0x600
+#define regOTG3_OTG_H_TOTAL 0x1caa
+#define regOTG3_OTG_H_TOTAL_BASE_IDX 2
+#define regOTG3_OTG_H_BLANK_START_END 0x1cab
+#define regOTG3_OTG_H_BLANK_START_END_BASE_IDX 2
+#define regOTG3_OTG_H_SYNC_A 0x1cac
+#define regOTG3_OTG_H_SYNC_A_BASE_IDX 2
+#define regOTG3_OTG_H_SYNC_A_CNTL 0x1cad
+#define regOTG3_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG3_OTG_H_TIMING_CNTL 0x1cae
+#define regOTG3_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL 0x1caf
+#define regOTG3_OTG_V_TOTAL_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_MIN 0x1cb0
+#define regOTG3_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_MAX 0x1cb1
+#define regOTG3_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_MID 0x1cb2
+#define regOTG3_OTG_V_TOTAL_MID_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_CONTROL 0x1cb3
+#define regOTG3_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_V_COUNT_STOP_CONTROL 0x1cb4
+#define regOTG3_OTG_V_COUNT_STOP_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_V_COUNT_STOP_CONTROL2 0x1cb5
+#define regOTG3_OTG_V_COUNT_STOP_CONTROL2_BASE_IDX 2
+#define regOTG3_OTG_V_TOTAL_INT_STATUS 0x1cb6
+#define regOTG3_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define regOTG3_OTG_VSYNC_NOM_INT_STATUS 0x1cb7
+#define regOTG3_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define regOTG3_OTG_V_BLANK_START_END 0x1cb8
+#define regOTG3_OTG_V_BLANK_START_END_BASE_IDX 2
+#define regOTG3_OTG_V_SYNC_A 0x1cb9
+#define regOTG3_OTG_V_SYNC_A_BASE_IDX 2
+#define regOTG3_OTG_V_SYNC_A_CNTL 0x1cba
+#define regOTG3_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define regOTG3_OTG_TRIGA_CNTL 0x1cbb
+#define regOTG3_OTG_TRIGA_CNTL_BASE_IDX 2
+#define regOTG3_OTG_TRIGA_MANUAL_TRIG 0x1cbc
+#define regOTG3_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define regOTG3_OTG_TRIGB_CNTL 0x1cbd
+#define regOTG3_OTG_TRIGB_CNTL_BASE_IDX 2
+#define regOTG3_OTG_TRIGB_MANUAL_TRIG 0x1cbe
+#define regOTG3_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define regOTG3_OTG_FORCE_COUNT_NOW_CNTL 0x1cbf
+#define regOTG3_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define regOTG3_OTG_STEREO_FORCE_NEXT_EYE 0x1cc1
+#define regOTG3_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define regOTG3_OTG_CONTROL 0x1cc3
+#define regOTG3_OTG_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_DLPC_CONTROL 0x1cc4
+#define regOTG3_OTG_DLPC_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_INTERLACE_CONTROL 0x1cc5
+#define regOTG3_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_INTERLACE_STATUS 0x1cc6
+#define regOTG3_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define regOTG3_OTG_PIXEL_DATA_READBACK0 0x1cc7
+#define regOTG3_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define regOTG3_OTG_PIXEL_DATA_READBACK1 0x1cc8
+#define regOTG3_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define regOTG3_OTG_STATUS 0x1cc9
+#define regOTG3_OTG_STATUS_BASE_IDX 2
+#define regOTG3_OTG_STATUS_POSITION 0x1cca
+#define regOTG3_OTG_STATUS_POSITION_BASE_IDX 2
+#define regOTG3_OTG_LONG_VBLANK_STATUS 0x1ccb
+#define regOTG3_OTG_LONG_VBLANK_STATUS_BASE_IDX 2
+#define regOTG3_OTG_NOM_VERT_POSITION 0x1ccc
+#define regOTG3_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define regOTG3_OTG_STATUS_FRAME_COUNT 0x1ccd
+#define regOTG3_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define regOTG3_OTG_STATUS_VF_COUNT 0x1cce
+#define regOTG3_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define regOTG3_OTG_STATUS_HV_COUNT 0x1ccf
+#define regOTG3_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define regOTG3_OTG_COUNT_CONTROL 0x1cd0
+#define regOTG3_OTG_COUNT_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_COUNT_RESET 0x1cd1
+#define regOTG3_OTG_COUNT_RESET_BASE_IDX 2
+#define regOTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1cd2
+#define regOTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define regOTG3_OTG_VERT_SYNC_CONTROL 0x1cd3
+#define regOTG3_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_STEREO_STATUS 0x1cd4
+#define regOTG3_OTG_STEREO_STATUS_BASE_IDX 2
+#define regOTG3_OTG_STEREO_CONTROL 0x1cd5
+#define regOTG3_OTG_STEREO_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_SNAPSHOT_STATUS 0x1cd6
+#define regOTG3_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define regOTG3_OTG_SNAPSHOT_CONTROL 0x1cd7
+#define regOTG3_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_SNAPSHOT_POSITION 0x1cd8
+#define regOTG3_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define regOTG3_OTG_SNAPSHOT_FRAME 0x1cd9
+#define regOTG3_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define regOTG3_OTG_INTERRUPT_CONTROL 0x1cda
+#define regOTG3_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_UPDATE_LOCK 0x1cdb
+#define regOTG3_OTG_UPDATE_LOCK_BASE_IDX 2
+#define regOTG3_OTG_DOUBLE_BUFFER_CONTROL 0x1cdc
+#define regOTG3_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_MASTER_EN 0x1cdd
+#define regOTG3_OTG_MASTER_EN_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT0_POSITION 0x1cdf
+#define regOTG3_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1ce0
+#define regOTG3_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT1_POSITION 0x1ce1
+#define regOTG3_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1ce2
+#define regOTG3_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT2_POSITION 0x1ce3
+#define regOTG3_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define regOTG3_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1ce4
+#define regOTG3_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC_CNTL 0x1ce5
+#define regOTG3_OTG_CRC_CNTL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWA_X_CONTROL 0x1ce6
+#define regOTG3_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWA_Y_CONTROL 0x1ce7
+#define regOTG3_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWB_X_CONTROL 0x1ce8
+#define regOTG3_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWB_Y_CONTROL 0x1ce9
+#define regOTG3_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC0_DATA_RG 0x1cea
+#define regOTG3_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define regOTG3_OTG_CRC0_DATA_B 0x1ceb
+#define regOTG3_OTG_CRC0_DATA_B_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWA_X_CONTROL 0x1cec
+#define regOTG3_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWA_Y_CONTROL 0x1ced
+#define regOTG3_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWB_X_CONTROL 0x1cee
+#define regOTG3_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWB_Y_CONTROL 0x1cef
+#define regOTG3_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_CRC1_DATA_RG 0x1cf0
+#define regOTG3_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define regOTG3_OTG_CRC1_DATA_B 0x1cf1
+#define regOTG3_OTG_CRC1_DATA_B_BASE_IDX 2
+#define regOTG3_OTG_CRC2_DATA_RG 0x1cf2
+#define regOTG3_OTG_CRC2_DATA_RG_BASE_IDX 2
+#define regOTG3_OTG_CRC2_DATA_B 0x1cf3
+#define regOTG3_OTG_CRC2_DATA_B_BASE_IDX 2
+#define regOTG3_OTG_CRC3_DATA_RG 0x1cf4
+#define regOTG3_OTG_CRC3_DATA_RG_BASE_IDX 2
+#define regOTG3_OTG_CRC3_DATA_B 0x1cf5
+#define regOTG3_OTG_CRC3_DATA_B_BASE_IDX 2
+#define regOTG3_OTG_CRC_SIG_RED_GREEN_MASK 0x1cf6
+#define regOTG3_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define regOTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1cf7
+#define regOTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK 0x1cf8
+#define regOTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK 0x1cf9
+#define regOTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK 0x1cfa
+#define regOTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK 0x1cfb
+#define regOTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK 0x1cfc
+#define regOTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK 0x1cfd
+#define regOTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK 0x1cfe
+#define regOTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK 0x1cff
+#define regOTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK_BASE_IDX 2
+#define regOTG3_OTG_STATIC_SCREEN_CONTROL 0x1d00
+#define regOTG3_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_3D_STRUCTURE_CONTROL 0x1d01
+#define regOTG3_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_GSL_VSYNC_GAP 0x1d02
+#define regOTG3_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define regOTG3_OTG_MASTER_UPDATE_MODE 0x1d03
+#define regOTG3_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define regOTG3_OTG_CLOCK_CONTROL 0x1d04
+#define regOTG3_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_VSTARTUP_PARAM 0x1d05
+#define regOTG3_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define regOTG3_OTG_VUPDATE_PARAM 0x1d06
+#define regOTG3_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define regOTG3_OTG_VREADY_PARAM 0x1d07
+#define regOTG3_OTG_VREADY_PARAM_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_SYNC_STATUS 0x1d08
+#define regOTG3_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define regOTG3_OTG_MASTER_UPDATE_LOCK 0x1d09
+#define regOTG3_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define regOTG3_OTG_GSL_CONTROL 0x1d0a
+#define regOTG3_OTG_GSL_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_GSL_WINDOW_X 0x1d0b
+#define regOTG3_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define regOTG3_OTG_GSL_WINDOW_Y 0x1d0c
+#define regOTG3_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define regOTG3_OTG_VUPDATE_KEEPOUT 0x1d0d
+#define regOTG3_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL0 0x1d0e
+#define regOTG3_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL1 0x1d0f
+#define regOTG3_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL2 0x1d10
+#define regOTG3_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL3 0x1d11
+#define regOTG3_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define regOTG3_OTG_GLOBAL_CONTROL4 0x1d12
+#define regOTG3_OTG_GLOBAL_CONTROL4_BASE_IDX 2
+#define regOTG3_OTG_TRIG_MANUAL_CONTROL 0x1d13
+#define regOTG3_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_DRR_TIMING_INT_STATUS 0x1d15
+#define regOTG3_OTG_DRR_TIMING_INT_STATUS_BASE_IDX 2
+#define regOTG3_OTG_DRR_V_TOTAL_REACH_RANGE 0x1d16
+#define regOTG3_OTG_DRR_V_TOTAL_REACH_RANGE_BASE_IDX 2
+#define regOTG3_OTG_DRR_V_TOTAL_CHANGE 0x1d17
+#define regOTG3_OTG_DRR_V_TOTAL_CHANGE_BASE_IDX 2
+#define regOTG3_OTG_DRR_TRIGGER_WINDOW 0x1d18
+#define regOTG3_OTG_DRR_TRIGGER_WINDOW_BASE_IDX 2
+#define regOTG3_OTG_DRR_CONTROL 0x1d19
+#define regOTG3_OTG_DRR_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_DRR_CONTOL2 0x1d1a
+#define regOTG3_OTG_DRR_CONTOL2_BASE_IDX 2
+#define regOTG3_OTG_M_CONST_DTO0 0x1d1b
+#define regOTG3_OTG_M_CONST_DTO0_BASE_IDX 2
+#define regOTG3_OTG_M_CONST_DTO1 0x1d1c
+#define regOTG3_OTG_M_CONST_DTO1_BASE_IDX 2
+#define regOTG3_OTG_REQUEST_CONTROL 0x1d1d
+#define regOTG3_OTG_REQUEST_CONTROL_BASE_IDX 2
+#define regOTG3_OTG_DSC_START_POSITION 0x1d1e
+#define regOTG3_OTG_DSC_START_POSITION_BASE_IDX 2
+#define regOTG3_OTG_PIPE_UPDATE_STATUS 0x1d1f
+#define regOTG3_OTG_PIPE_UPDATE_STATUS_BASE_IDX 2
+#define regOTG3_OTG_SPARE_REGISTER 0x1d20
+#define regOTG3_OTG_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg_crc320_dispdec
+// base address: 0x0
+#define regOTG_CRC320_OTG_CRC0_DATA_R32 0x1dd2
+#define regOTG_CRC320_OTG_CRC0_DATA_R32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC0_DATA_G32 0x1dd3
+#define regOTG_CRC320_OTG_CRC0_DATA_G32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC0_DATA_B32 0x1dd4
+#define regOTG_CRC320_OTG_CRC0_DATA_B32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC0_DATA_C32 0x1dd5
+#define regOTG_CRC320_OTG_CRC0_DATA_C32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC0_DATA_AES 0x1dd6
+#define regOTG_CRC320_OTG_CRC0_DATA_AES_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC1_DATA_R32 0x1dd7
+#define regOTG_CRC320_OTG_CRC1_DATA_R32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC1_DATA_G32 0x1dd8
+#define regOTG_CRC320_OTG_CRC1_DATA_G32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC1_DATA_B32 0x1dd9
+#define regOTG_CRC320_OTG_CRC1_DATA_B32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC1_DATA_C32 0x1dda
+#define regOTG_CRC320_OTG_CRC1_DATA_C32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC2_DATA_R32 0x1ddb
+#define regOTG_CRC320_OTG_CRC2_DATA_R32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC2_DATA_G32 0x1ddc
+#define regOTG_CRC320_OTG_CRC2_DATA_G32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC2_DATA_B32 0x1ddd
+#define regOTG_CRC320_OTG_CRC2_DATA_B32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC2_DATA_C32 0x1dde
+#define regOTG_CRC320_OTG_CRC2_DATA_C32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC3_DATA_R32 0x1ddf
+#define regOTG_CRC320_OTG_CRC3_DATA_R32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC3_DATA_G32 0x1de0
+#define regOTG_CRC320_OTG_CRC3_DATA_G32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC3_DATA_B32 0x1de1
+#define regOTG_CRC320_OTG_CRC3_DATA_B32_BASE_IDX 2
+#define regOTG_CRC320_OTG_CRC3_DATA_C32 0x1de2
+#define regOTG_CRC320_OTG_CRC3_DATA_C32_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg_crc321_dispdec
+// base address: 0x50
+#define regOTG_CRC321_OTG_CRC0_DATA_R32 0x1de6
+#define regOTG_CRC321_OTG_CRC0_DATA_R32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC0_DATA_G32 0x1de7
+#define regOTG_CRC321_OTG_CRC0_DATA_G32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC0_DATA_B32 0x1de8
+#define regOTG_CRC321_OTG_CRC0_DATA_B32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC0_DATA_C32 0x1de9
+#define regOTG_CRC321_OTG_CRC0_DATA_C32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC0_DATA_AES 0x1dea
+#define regOTG_CRC321_OTG_CRC0_DATA_AES_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC1_DATA_R32 0x1deb
+#define regOTG_CRC321_OTG_CRC1_DATA_R32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC1_DATA_G32 0x1dec
+#define regOTG_CRC321_OTG_CRC1_DATA_G32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC1_DATA_B32 0x1ded
+#define regOTG_CRC321_OTG_CRC1_DATA_B32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC1_DATA_C32 0x1dee
+#define regOTG_CRC321_OTG_CRC1_DATA_C32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC2_DATA_R32 0x1def
+#define regOTG_CRC321_OTG_CRC2_DATA_R32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC2_DATA_G32 0x1df0
+#define regOTG_CRC321_OTG_CRC2_DATA_G32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC2_DATA_B32 0x1df1
+#define regOTG_CRC321_OTG_CRC2_DATA_B32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC2_DATA_C32 0x1df2
+#define regOTG_CRC321_OTG_CRC2_DATA_C32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC3_DATA_R32 0x1df3
+#define regOTG_CRC321_OTG_CRC3_DATA_R32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC3_DATA_G32 0x1df4
+#define regOTG_CRC321_OTG_CRC3_DATA_G32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC3_DATA_B32 0x1df5
+#define regOTG_CRC321_OTG_CRC3_DATA_B32_BASE_IDX 2
+#define regOTG_CRC321_OTG_CRC3_DATA_C32 0x1df6
+#define regOTG_CRC321_OTG_CRC3_DATA_C32_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg_crc322_dispdec
+// base address: 0xa0
+#define regOTG_CRC322_OTG_CRC0_DATA_R32 0x1dfa
+#define regOTG_CRC322_OTG_CRC0_DATA_R32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC0_DATA_G32 0x1dfb
+#define regOTG_CRC322_OTG_CRC0_DATA_G32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC0_DATA_B32 0x1dfc
+#define regOTG_CRC322_OTG_CRC0_DATA_B32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC0_DATA_C32 0x1dfd
+#define regOTG_CRC322_OTG_CRC0_DATA_C32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC0_DATA_AES 0x1dfe
+#define regOTG_CRC322_OTG_CRC0_DATA_AES_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC1_DATA_R32 0x1dff
+#define regOTG_CRC322_OTG_CRC1_DATA_R32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC1_DATA_G32 0x1e00
+#define regOTG_CRC322_OTG_CRC1_DATA_G32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC1_DATA_B32 0x1e01
+#define regOTG_CRC322_OTG_CRC1_DATA_B32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC1_DATA_C32 0x1e02
+#define regOTG_CRC322_OTG_CRC1_DATA_C32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC2_DATA_R32 0x1e03
+#define regOTG_CRC322_OTG_CRC2_DATA_R32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC2_DATA_G32 0x1e04
+#define regOTG_CRC322_OTG_CRC2_DATA_G32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC2_DATA_B32 0x1e05
+#define regOTG_CRC322_OTG_CRC2_DATA_B32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC2_DATA_C32 0x1e06
+#define regOTG_CRC322_OTG_CRC2_DATA_C32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC3_DATA_R32 0x1e07
+#define regOTG_CRC322_OTG_CRC3_DATA_R32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC3_DATA_G32 0x1e08
+#define regOTG_CRC322_OTG_CRC3_DATA_G32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC3_DATA_B32 0x1e09
+#define regOTG_CRC322_OTG_CRC3_DATA_B32_BASE_IDX 2
+#define regOTG_CRC322_OTG_CRC3_DATA_C32 0x1e0a
+#define regOTG_CRC322_OTG_CRC3_DATA_C32_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg_crc323_dispdec
+// base address: 0xf0
+#define regOTG_CRC323_OTG_CRC0_DATA_R32 0x1e0e
+#define regOTG_CRC323_OTG_CRC0_DATA_R32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC0_DATA_G32 0x1e0f
+#define regOTG_CRC323_OTG_CRC0_DATA_G32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC0_DATA_B32 0x1e10
+#define regOTG_CRC323_OTG_CRC0_DATA_B32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC0_DATA_C32 0x1e11
+#define regOTG_CRC323_OTG_CRC0_DATA_C32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC0_DATA_AES 0x1e12
+#define regOTG_CRC323_OTG_CRC0_DATA_AES_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC1_DATA_R32 0x1e13
+#define regOTG_CRC323_OTG_CRC1_DATA_R32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC1_DATA_G32 0x1e14
+#define regOTG_CRC323_OTG_CRC1_DATA_G32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC1_DATA_B32 0x1e15
+#define regOTG_CRC323_OTG_CRC1_DATA_B32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC1_DATA_C32 0x1e16
+#define regOTG_CRC323_OTG_CRC1_DATA_C32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC2_DATA_R32 0x1e17
+#define regOTG_CRC323_OTG_CRC2_DATA_R32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC2_DATA_G32 0x1e18
+#define regOTG_CRC323_OTG_CRC2_DATA_G32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC2_DATA_B32 0x1e19
+#define regOTG_CRC323_OTG_CRC2_DATA_B32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC2_DATA_C32 0x1e1a
+#define regOTG_CRC323_OTG_CRC2_DATA_C32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC3_DATA_R32 0x1e1b
+#define regOTG_CRC323_OTG_CRC3_DATA_R32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC3_DATA_G32 0x1e1c
+#define regOTG_CRC323_OTG_CRC3_DATA_G32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC3_DATA_B32 0x1e1d
+#define regOTG_CRC323_OTG_CRC3_DATA_B32_BASE_IDX 2
+#define regOTG_CRC323_OTG_CRC3_DATA_C32 0x1e1e
+#define regOTG_CRC323_OTG_CRC3_DATA_C32_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_optc_misc_dispdec
+// base address: 0x0
+#define regGSL_SOURCE_SELECT 0x1e2b
+#define regGSL_SOURCE_SELECT_BASE_IDX 2
+#define regOPTC_DLPC_CONTROL 0x1e2c
+#define regOPTC_DLPC_CONTROL_BASE_IDX 2
+#define regOPTC_CLOCK_CONTROL 0x1e2d
+#define regOPTC_CLOCK_CONTROL_BASE_IDX 2
+#define regODM_MEM_PWR_CTRL 0x1e2e
+#define regODM_MEM_PWR_CTRL_BASE_IDX 2
+#define regODM_MEM_PWR_CTRL3 0x1e30
+#define regODM_MEM_PWR_CTRL3_BASE_IDX 2
+#define regODM_MEM_PWR_STATUS 0x1e31
+#define regODM_MEM_PWR_STATUS_BASE_IDX 2
+#define regOPTC_MISC_SPARE_REGISTER 0x1e32
+#define regOPTC_MISC_SPARE_REGISTER_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_optc_dcperfmon_dc_perfmon_dispdec
+// base address: 0x79a8
+#define regDC_PERFMON17_PERFCOUNTER_CNTL 0x1e6a
+#define regDC_PERFMON17_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON17_PERFCOUNTER_CNTL2 0x1e6b
+#define regDC_PERFMON17_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON17_PERFCOUNTER_STATE 0x1e6c
+#define regDC_PERFMON17_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_CNTL 0x1e6d
+#define regDC_PERFMON17_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_CNTL2 0x1e6e
+#define regDC_PERFMON17_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_CVALUE_INT_MISC 0x1e6f
+#define regDC_PERFMON17_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_CVALUE_LOW 0x1e70
+#define regDC_PERFMON17_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_HI 0x1e71
+#define regDC_PERFMON17_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON17_PERFMON_LOW 0x1e72
+#define regDC_PERFMON17_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd0_dispdec
+// base address: 0x0
+#define regHPD0_DC_HPD_INT_STATUS 0x1f14
+#define regHPD0_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD0_DC_HPD_INT_CONTROL 0x1f15
+#define regHPD0_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD0_DC_HPD_CONTROL 0x1f16
+#define regHPD0_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD0_DC_HPD_FAST_TRAIN_CNTL 0x1f17
+#define regHPD0_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD0_DC_HPD_TOGGLE_FILT_CNTL 0x1f18
+#define regHPD0_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd1_dispdec
+// base address: 0x20
+#define regHPD1_DC_HPD_INT_STATUS 0x1f1c
+#define regHPD1_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD1_DC_HPD_INT_CONTROL 0x1f1d
+#define regHPD1_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD1_DC_HPD_CONTROL 0x1f1e
+#define regHPD1_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD1_DC_HPD_FAST_TRAIN_CNTL 0x1f1f
+#define regHPD1_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD1_DC_HPD_TOGGLE_FILT_CNTL 0x1f20
+#define regHPD1_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd2_dispdec
+// base address: 0x40
+#define regHPD2_DC_HPD_INT_STATUS 0x1f24
+#define regHPD2_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD2_DC_HPD_INT_CONTROL 0x1f25
+#define regHPD2_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD2_DC_HPD_CONTROL 0x1f26
+#define regHPD2_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD2_DC_HPD_FAST_TRAIN_CNTL 0x1f27
+#define regHPD2_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD2_DC_HPD_TOGGLE_FILT_CNTL 0x1f28
+#define regHPD2_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd3_dispdec
+// base address: 0x60
+#define regHPD3_DC_HPD_INT_STATUS 0x1f2c
+#define regHPD3_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD3_DC_HPD_INT_CONTROL 0x1f2d
+#define regHPD3_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD3_DC_HPD_CONTROL 0x1f2e
+#define regHPD3_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD3_DC_HPD_FAST_TRAIN_CNTL 0x1f2f
+#define regHPD3_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD3_DC_HPD_TOGGLE_FILT_CNTL 0x1f30
+#define regHPD3_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd4_dispdec
+// base address: 0x80
+#define regHPD4_DC_HPD_INT_STATUS 0x1f34
+#define regHPD4_DC_HPD_INT_STATUS_BASE_IDX 2
+#define regHPD4_DC_HPD_INT_CONTROL 0x1f35
+#define regHPD4_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define regHPD4_DC_HPD_CONTROL 0x1f36
+#define regHPD4_DC_HPD_CONTROL_BASE_IDX 2
+#define regHPD4_DC_HPD_FAST_TRAIN_CNTL 0x1f37
+#define regHPD4_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define regHPD4_DC_HPD_TOGGLE_FILT_CNTL 0x1f38
+#define regHPD4_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp0_dispdec
+// base address: 0x0
+#define regDP0_DP_LINK_CNTL 0x211e
+#define regDP0_DP_LINK_CNTL_BASE_IDX 2
+#define regDP0_DP_PIXEL_FORMAT 0x211f
+#define regDP0_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP0_DP_MSA_COLORIMETRY 0x2120
+#define regDP0_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP0_DP_CONFIG 0x2121
+#define regDP0_DP_CONFIG_BASE_IDX 2
+#define regDP0_DP_VID_STREAM_CNTL 0x2122
+#define regDP0_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP0_DP_STEER_FIFO 0x2123
+#define regDP0_DP_STEER_FIFO_BASE_IDX 2
+#define regDP0_DP_MSA_MISC 0x2124
+#define regDP0_DP_MSA_MISC_BASE_IDX 2
+#define regDP0_DP_DPHY_INTERNAL_CTRL 0x2125
+#define regDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP0_DP_VID_TIMING 0x2126
+#define regDP0_DP_VID_TIMING_BASE_IDX 2
+#define regDP0_DP_VID_N 0x2127
+#define regDP0_DP_VID_N_BASE_IDX 2
+#define regDP0_DP_VID_M 0x2128
+#define regDP0_DP_VID_M_BASE_IDX 2
+#define regDP0_DP_LINK_FRAMING_CNTL 0x2129
+#define regDP0_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP0_DP_HBR2_EYE_PATTERN 0x212a
+#define regDP0_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP0_DP_VID_MSA_VBID 0x212b
+#define regDP0_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP0_DP_VID_INTERRUPT_CNTL 0x212c
+#define regDP0_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_CNTL 0x212d
+#define regDP0_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_TRAINING_PATTERN_SEL 0x212e
+#define regDP0_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP0_DP_DPHY_SYM0 0x212f
+#define regDP0_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP0_DP_DPHY_SYM1 0x2130
+#define regDP0_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP0_DP_DPHY_SYM2 0x2131
+#define regDP0_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP0_DP_DPHY_8B10B_CNTL 0x2132
+#define regDP0_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_PRBS_CNTL 0x2133
+#define regDP0_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_SCRAM_CNTL 0x2134
+#define regDP0_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_CONTROL0 0x2135
+#define regDP0_DP_DPHY_CRC_CONTROL0_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_CONTROL1 0x2136
+#define regDP0_DP_DPHY_CRC_CONTROL1_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_RESULT0 0x2137
+#define regDP0_DP_DPHY_CRC_RESULT0_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_RESULT1 0x2138
+#define regDP0_DP_DPHY_CRC_RESULT1_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_STATUS 0x2139
+#define regDP0_DP_DPHY_CRC_STATUS_BASE_IDX 2
+#define regDP0_DP_DPHY_FAST_TRAINING 0x213a
+#define regDP0_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP0_DP_DPHY_FAST_TRAINING_STATUS 0x213b
+#define regDP0_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_RESULT2 0x213c
+#define regDP0_DP_DPHY_CRC_RESULT2_BASE_IDX 2
+#define regDP0_DP_DPHY_CRC_RESULT3 0x213d
+#define regDP0_DP_DPHY_CRC_RESULT3_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL 0x2141
+#define regDP0_DP_SEC_CNTL_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL1 0x2142
+#define regDP0_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP0_DP_SEC_FRAMING1 0x2143
+#define regDP0_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP0_DP_SEC_FRAMING2 0x2144
+#define regDP0_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP0_DP_SEC_FRAMING3 0x2145
+#define regDP0_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP0_DP_SEC_FRAMING4 0x2146
+#define regDP0_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP0_DP_SEC_AUD_N 0x2147
+#define regDP0_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP0_DP_SEC_AUD_N_READBACK 0x2148
+#define regDP0_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP0_DP_SEC_AUD_M 0x2149
+#define regDP0_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP0_DP_SEC_AUD_M_READBACK 0x214a
+#define regDP0_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP0_DP_SEC_TIMESTAMP 0x214b
+#define regDP0_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP0_DP_SEC_PACKET_CNTL 0x214c
+#define regDP0_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP0_DP_MSE_RATE_CNTL 0x214d
+#define regDP0_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP0_DP_MSE_RATE_UPDATE 0x214f
+#define regDP0_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP0_DP_MSE_SAT0 0x2150
+#define regDP0_DP_MSE_SAT0_BASE_IDX 2
+#define regDP0_DP_MSE_SAT1 0x2151
+#define regDP0_DP_MSE_SAT1_BASE_IDX 2
+#define regDP0_DP_MSE_SAT2 0x2152
+#define regDP0_DP_MSE_SAT2_BASE_IDX 2
+#define regDP0_DP_MSE_SAT_UPDATE 0x2153
+#define regDP0_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP0_DP_MSE_LINK_TIMING 0x2154
+#define regDP0_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP0_DP_MSE_MISC_CNTL 0x2155
+#define regDP0_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x215a
+#define regDP0_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP0_DP_DPHY_HBR2_PATTERN_CONTROL 0x215b
+#define regDP0_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP0_DP_MSE_SAT0_STATUS 0x215d
+#define regDP0_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP0_DP_MSE_SAT1_STATUS 0x215e
+#define regDP0_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP0_DP_MSE_SAT2_STATUS 0x215f
+#define regDP0_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP0_DP_DPIA_SPARE 0x2160
+#define regDP0_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP0_DP_MSA_TIMING_PARAM1 0x2162
+#define regDP0_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP0_DP_MSA_TIMING_PARAM2 0x2163
+#define regDP0_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP0_DP_MSA_TIMING_PARAM3 0x2164
+#define regDP0_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP0_DP_MSA_TIMING_PARAM4 0x2165
+#define regDP0_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP0_DP_MSO_CNTL 0x2166
+#define regDP0_DP_MSO_CNTL_BASE_IDX 2
+#define regDP0_DP_MSO_CNTL1 0x2167
+#define regDP0_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP0_DP_DSC_CNTL 0x2168
+#define regDP0_DP_DSC_CNTL_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL2 0x2169
+#define regDP0_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL3 0x216a
+#define regDP0_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL4 0x216b
+#define regDP0_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL5 0x216c
+#define regDP0_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL6 0x216d
+#define regDP0_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP0_DP_SEC_CNTL7 0x216e
+#define regDP0_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP0_DP_DB_CNTL 0x216f
+#define regDP0_DP_DB_CNTL_BASE_IDX 2
+#define regDP0_DP_MSA_VBID_MISC 0x2170
+#define regDP0_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP0_DP_SEC_METADATA_TRANSMISSION 0x2171
+#define regDP0_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP0_DP_ALPM_CNTL 0x2173
+#define regDP0_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP8_CNTL 0x2174
+#define regDP0_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP9_CNTL 0x2175
+#define regDP0_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP10_CNTL 0x2176
+#define regDP0_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP11_CNTL 0x2177
+#define regDP0_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP0_DP_GSP_EN_DB_STATUS 0x2178
+#define regDP0_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL1 0x2179
+#define regDP0_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL2 0x217a
+#define regDP0_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL3 0x217b
+#define regDP0_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL4 0x217c
+#define regDP0_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP0_DP_AUXLESS_ALPM_CNTL5 0x217d
+#define regDP0_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP0_DP_STREAM_SYMBOL_COUNT_STATUS 0x217e
+#define regDP0_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP0_DP_STREAM_SYMBOL_COUNT_CONTROL 0x217f
+#define regDP0_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP0_DP_LINK_SYMBOL_COUNT_STATUS0 0x2180
+#define regDP0_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP0_DP_LINK_SYMBOL_COUNT_STATUS1 0x2181
+#define regDP0_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP0_DP_LINK_SYMBOL_COUNT_CONTROL 0x2182
+#define regDP0_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_dispdec
+// base address: 0x0
+#define regDIG0_DIG_FE_CNTL 0x2093
+#define regDIG0_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG0_DIG_FE_CLK_CNTL 0x2094
+#define regDIG0_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG0_DIG_FE_EN_CNTL 0x2095
+#define regDIG0_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG0_DIG_OUTPUT_CRC_CNTL 0x2096
+#define regDIG0_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG0_DIG_OUTPUT_CRC_RESULT 0x2097
+#define regDIG0_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG0_DIG_CLOCK_PATTERN 0x2098
+#define regDIG0_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG0_DIG_TEST_PATTERN 0x2099
+#define regDIG0_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG0_DIG_RANDOM_PATTERN_SEED 0x209a
+#define regDIG0_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG0_DIG_FIFO_CTRL0 0x209b
+#define regDIG0_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG0_DIG_FIFO_CTRL1 0x209c
+#define regDIG0_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG0_HDMI_METADATA_PACKET_CONTROL 0x209d
+#define regDIG0_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_CONTROL 0x209e
+#define regDIG0_HDMI_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_STATUS 0x209f
+#define regDIG0_HDMI_STATUS_BASE_IDX 2
+#define regDIG0_HDMI_AUDIO_PACKET_CONTROL 0x20a0
+#define regDIG0_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_ACR_PACKET_CONTROL 0x20a1
+#define regDIG0_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_VBI_PACKET_CONTROL 0x20a2
+#define regDIG0_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_INFOFRAME_CONTROL0 0x20a3
+#define regDIG0_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG0_HDMI_INFOFRAME_CONTROL1 0x20a4
+#define regDIG0_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL0 0x20a5
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL6 0x20a6
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL5 0x20a7
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG0_HDMI_GC 0x20a8
+#define regDIG0_HDMI_GC_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL1 0x20a9
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL2 0x20aa
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL3 0x20ab
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL4 0x20ac
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL7 0x20ad
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL8 0x20ae
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL9 0x20af
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL10 0x20b0
+#define regDIG0_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG0_HDMI_DB_CONTROL 0x20b1
+#define regDIG0_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG0_HDMI_ACR_32_0 0x20b2
+#define regDIG0_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG0_HDMI_ACR_32_1 0x20b3
+#define regDIG0_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG0_HDMI_ACR_44_0 0x20b4
+#define regDIG0_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG0_HDMI_ACR_44_1 0x20b5
+#define regDIG0_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG0_HDMI_ACR_48_0 0x20b6
+#define regDIG0_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG0_HDMI_ACR_48_1 0x20b7
+#define regDIG0_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG0_HDMI_ACR_STATUS_0 0x20b8
+#define regDIG0_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG0_HDMI_ACR_STATUS_1 0x20b9
+#define regDIG0_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG0_AFMT_CNTL 0x20ba
+#define regDIG0_AFMT_CNTL_BASE_IDX 2
+#define regDIG0_DIG_BE_CLK_CNTL 0x20bb
+#define regDIG0_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG0_DIG_BE_CNTL 0x20bc
+#define regDIG0_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG0_DIG_BE_EN_CNTL 0x20bd
+#define regDIG0_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG0_TMDS_CNTL 0x20e4
+#define regDIG0_TMDS_CNTL_BASE_IDX 2
+#define regDIG0_TMDS_CONTROL_CHAR 0x20e5
+#define regDIG0_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG0_TMDS_CONTROL0_FEEDBACK 0x20e6
+#define regDIG0_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG0_TMDS_STEREOSYNC_CTL_SEL 0x20e7
+#define regDIG0_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG0_TMDS_SYNC_CHAR_PATTERN_0_1 0x20e8
+#define regDIG0_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG0_TMDS_SYNC_CHAR_PATTERN_2_3 0x20e9
+#define regDIG0_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG0_TMDS_CTL_BITS 0x20eb
+#define regDIG0_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG0_TMDS_DCBALANCER_CONTROL 0x20ec
+#define regDIG0_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG0_TMDS_SYNC_DCBALANCE_CHAR 0x20ed
+#define regDIG0_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG0_TMDS_CTL0_1_GEN_CNTL 0x20ee
+#define regDIG0_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG0_TMDS_CTL2_3_GEN_CNTL 0x20ef
+#define regDIG0_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG0_DIG_VERSION 0x20f1
+#define regDIG0_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp1_dispdec
+// base address: 0x490
+#define regDP1_DP_LINK_CNTL 0x2242
+#define regDP1_DP_LINK_CNTL_BASE_IDX 2
+#define regDP1_DP_PIXEL_FORMAT 0x2243
+#define regDP1_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP1_DP_MSA_COLORIMETRY 0x2244
+#define regDP1_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP1_DP_CONFIG 0x2245
+#define regDP1_DP_CONFIG_BASE_IDX 2
+#define regDP1_DP_VID_STREAM_CNTL 0x2246
+#define regDP1_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP1_DP_STEER_FIFO 0x2247
+#define regDP1_DP_STEER_FIFO_BASE_IDX 2
+#define regDP1_DP_MSA_MISC 0x2248
+#define regDP1_DP_MSA_MISC_BASE_IDX 2
+#define regDP1_DP_DPHY_INTERNAL_CTRL 0x2249
+#define regDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP1_DP_VID_TIMING 0x224a
+#define regDP1_DP_VID_TIMING_BASE_IDX 2
+#define regDP1_DP_VID_N 0x224b
+#define regDP1_DP_VID_N_BASE_IDX 2
+#define regDP1_DP_VID_M 0x224c
+#define regDP1_DP_VID_M_BASE_IDX 2
+#define regDP1_DP_LINK_FRAMING_CNTL 0x224d
+#define regDP1_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP1_DP_HBR2_EYE_PATTERN 0x224e
+#define regDP1_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP1_DP_VID_MSA_VBID 0x224f
+#define regDP1_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP1_DP_VID_INTERRUPT_CNTL 0x2250
+#define regDP1_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_CNTL 0x2251
+#define regDP1_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_TRAINING_PATTERN_SEL 0x2252
+#define regDP1_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP1_DP_DPHY_SYM0 0x2253
+#define regDP1_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP1_DP_DPHY_SYM1 0x2254
+#define regDP1_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP1_DP_DPHY_SYM2 0x2255
+#define regDP1_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP1_DP_DPHY_8B10B_CNTL 0x2256
+#define regDP1_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_PRBS_CNTL 0x2257
+#define regDP1_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_SCRAM_CNTL 0x2258
+#define regDP1_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_CONTROL0 0x2259
+#define regDP1_DP_DPHY_CRC_CONTROL0_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_CONTROL1 0x225a
+#define regDP1_DP_DPHY_CRC_CONTROL1_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_RESULT0 0x225b
+#define regDP1_DP_DPHY_CRC_RESULT0_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_RESULT1 0x225c
+#define regDP1_DP_DPHY_CRC_RESULT1_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_STATUS 0x225d
+#define regDP1_DP_DPHY_CRC_STATUS_BASE_IDX 2
+#define regDP1_DP_DPHY_FAST_TRAINING 0x225e
+#define regDP1_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP1_DP_DPHY_FAST_TRAINING_STATUS 0x225f
+#define regDP1_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_RESULT2 0x2260
+#define regDP1_DP_DPHY_CRC_RESULT2_BASE_IDX 2
+#define regDP1_DP_DPHY_CRC_RESULT3 0x2261
+#define regDP1_DP_DPHY_CRC_RESULT3_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL 0x2265
+#define regDP1_DP_SEC_CNTL_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL1 0x2266
+#define regDP1_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP1_DP_SEC_FRAMING1 0x2267
+#define regDP1_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP1_DP_SEC_FRAMING2 0x2268
+#define regDP1_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP1_DP_SEC_FRAMING3 0x2269
+#define regDP1_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP1_DP_SEC_FRAMING4 0x226a
+#define regDP1_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP1_DP_SEC_AUD_N 0x226b
+#define regDP1_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP1_DP_SEC_AUD_N_READBACK 0x226c
+#define regDP1_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP1_DP_SEC_AUD_M 0x226d
+#define regDP1_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP1_DP_SEC_AUD_M_READBACK 0x226e
+#define regDP1_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP1_DP_SEC_TIMESTAMP 0x226f
+#define regDP1_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP1_DP_SEC_PACKET_CNTL 0x2270
+#define regDP1_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP1_DP_MSE_RATE_CNTL 0x2271
+#define regDP1_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP1_DP_MSE_RATE_UPDATE 0x2273
+#define regDP1_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP1_DP_MSE_SAT0 0x2274
+#define regDP1_DP_MSE_SAT0_BASE_IDX 2
+#define regDP1_DP_MSE_SAT1 0x2275
+#define regDP1_DP_MSE_SAT1_BASE_IDX 2
+#define regDP1_DP_MSE_SAT2 0x2276
+#define regDP1_DP_MSE_SAT2_BASE_IDX 2
+#define regDP1_DP_MSE_SAT_UPDATE 0x2277
+#define regDP1_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP1_DP_MSE_LINK_TIMING 0x2278
+#define regDP1_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP1_DP_MSE_MISC_CNTL 0x2279
+#define regDP1_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x227e
+#define regDP1_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP1_DP_DPHY_HBR2_PATTERN_CONTROL 0x227f
+#define regDP1_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP1_DP_MSE_SAT0_STATUS 0x2281
+#define regDP1_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP1_DP_MSE_SAT1_STATUS 0x2282
+#define regDP1_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP1_DP_MSE_SAT2_STATUS 0x2283
+#define regDP1_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP1_DP_DPIA_SPARE 0x2284
+#define regDP1_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP1_DP_MSA_TIMING_PARAM1 0x2286
+#define regDP1_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP1_DP_MSA_TIMING_PARAM2 0x2287
+#define regDP1_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP1_DP_MSA_TIMING_PARAM3 0x2288
+#define regDP1_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP1_DP_MSA_TIMING_PARAM4 0x2289
+#define regDP1_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP1_DP_MSO_CNTL 0x228a
+#define regDP1_DP_MSO_CNTL_BASE_IDX 2
+#define regDP1_DP_MSO_CNTL1 0x228b
+#define regDP1_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP1_DP_DSC_CNTL 0x228c
+#define regDP1_DP_DSC_CNTL_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL2 0x228d
+#define regDP1_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL3 0x228e
+#define regDP1_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL4 0x228f
+#define regDP1_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL5 0x2290
+#define regDP1_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL6 0x2291
+#define regDP1_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP1_DP_SEC_CNTL7 0x2292
+#define regDP1_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP1_DP_DB_CNTL 0x2293
+#define regDP1_DP_DB_CNTL_BASE_IDX 2
+#define regDP1_DP_MSA_VBID_MISC 0x2294
+#define regDP1_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP1_DP_SEC_METADATA_TRANSMISSION 0x2295
+#define regDP1_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP1_DP_ALPM_CNTL 0x2297
+#define regDP1_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP8_CNTL 0x2298
+#define regDP1_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP9_CNTL 0x2299
+#define regDP1_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP10_CNTL 0x229a
+#define regDP1_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP11_CNTL 0x229b
+#define regDP1_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP1_DP_GSP_EN_DB_STATUS 0x229c
+#define regDP1_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL1 0x229d
+#define regDP1_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL2 0x229e
+#define regDP1_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL3 0x229f
+#define regDP1_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL4 0x22a0
+#define regDP1_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP1_DP_AUXLESS_ALPM_CNTL5 0x22a1
+#define regDP1_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP1_DP_STREAM_SYMBOL_COUNT_STATUS 0x22a2
+#define regDP1_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP1_DP_STREAM_SYMBOL_COUNT_CONTROL 0x22a3
+#define regDP1_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP1_DP_LINK_SYMBOL_COUNT_STATUS0 0x22a4
+#define regDP1_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP1_DP_LINK_SYMBOL_COUNT_STATUS1 0x22a5
+#define regDP1_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP1_DP_LINK_SYMBOL_COUNT_CONTROL 0x22a6
+#define regDP1_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_dispdec
+// base address: 0x490
+#define regDIG1_DIG_FE_CNTL 0x21b7
+#define regDIG1_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG1_DIG_FE_CLK_CNTL 0x21b8
+#define regDIG1_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG1_DIG_FE_EN_CNTL 0x21b9
+#define regDIG1_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG1_DIG_OUTPUT_CRC_CNTL 0x21ba
+#define regDIG1_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG1_DIG_OUTPUT_CRC_RESULT 0x21bb
+#define regDIG1_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG1_DIG_CLOCK_PATTERN 0x21bc
+#define regDIG1_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG1_DIG_TEST_PATTERN 0x21bd
+#define regDIG1_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG1_DIG_RANDOM_PATTERN_SEED 0x21be
+#define regDIG1_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG1_DIG_FIFO_CTRL0 0x21bf
+#define regDIG1_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG1_DIG_FIFO_CTRL1 0x21c0
+#define regDIG1_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG1_HDMI_METADATA_PACKET_CONTROL 0x21c1
+#define regDIG1_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_CONTROL 0x21c2
+#define regDIG1_HDMI_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_STATUS 0x21c3
+#define regDIG1_HDMI_STATUS_BASE_IDX 2
+#define regDIG1_HDMI_AUDIO_PACKET_CONTROL 0x21c4
+#define regDIG1_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_ACR_PACKET_CONTROL 0x21c5
+#define regDIG1_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_VBI_PACKET_CONTROL 0x21c6
+#define regDIG1_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_INFOFRAME_CONTROL0 0x21c7
+#define regDIG1_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG1_HDMI_INFOFRAME_CONTROL1 0x21c8
+#define regDIG1_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL0 0x21c9
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL6 0x21ca
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL5 0x21cb
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG1_HDMI_GC 0x21cc
+#define regDIG1_HDMI_GC_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL1 0x21cd
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL2 0x21ce
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL3 0x21cf
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL4 0x21d0
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL7 0x21d1
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL8 0x21d2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL9 0x21d3
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL10 0x21d4
+#define regDIG1_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG1_HDMI_DB_CONTROL 0x21d5
+#define regDIG1_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG1_HDMI_ACR_32_0 0x21d6
+#define regDIG1_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG1_HDMI_ACR_32_1 0x21d7
+#define regDIG1_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG1_HDMI_ACR_44_0 0x21d8
+#define regDIG1_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG1_HDMI_ACR_44_1 0x21d9
+#define regDIG1_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG1_HDMI_ACR_48_0 0x21da
+#define regDIG1_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG1_HDMI_ACR_48_1 0x21db
+#define regDIG1_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG1_HDMI_ACR_STATUS_0 0x21dc
+#define regDIG1_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG1_HDMI_ACR_STATUS_1 0x21dd
+#define regDIG1_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG1_AFMT_CNTL 0x21de
+#define regDIG1_AFMT_CNTL_BASE_IDX 2
+#define regDIG1_DIG_BE_CLK_CNTL 0x21df
+#define regDIG1_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG1_DIG_BE_CNTL 0x21e0
+#define regDIG1_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG1_DIG_BE_EN_CNTL 0x21e1
+#define regDIG1_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG1_TMDS_CNTL 0x2208
+#define regDIG1_TMDS_CNTL_BASE_IDX 2
+#define regDIG1_TMDS_CONTROL_CHAR 0x2209
+#define regDIG1_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG1_TMDS_CONTROL0_FEEDBACK 0x220a
+#define regDIG1_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG1_TMDS_STEREOSYNC_CTL_SEL 0x220b
+#define regDIG1_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG1_TMDS_SYNC_CHAR_PATTERN_0_1 0x220c
+#define regDIG1_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG1_TMDS_SYNC_CHAR_PATTERN_2_3 0x220d
+#define regDIG1_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG1_TMDS_CTL_BITS 0x220f
+#define regDIG1_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG1_TMDS_DCBALANCER_CONTROL 0x2210
+#define regDIG1_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG1_TMDS_SYNC_DCBALANCE_CHAR 0x2211
+#define regDIG1_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG1_TMDS_CTL0_1_GEN_CNTL 0x2212
+#define regDIG1_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG1_TMDS_CTL2_3_GEN_CNTL 0x2213
+#define regDIG1_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG1_DIG_VERSION 0x2215
+#define regDIG1_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp2_dispdec
+// base address: 0x920
+#define regDP2_DP_LINK_CNTL 0x2366
+#define regDP2_DP_LINK_CNTL_BASE_IDX 2
+#define regDP2_DP_PIXEL_FORMAT 0x2367
+#define regDP2_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP2_DP_MSA_COLORIMETRY 0x2368
+#define regDP2_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP2_DP_CONFIG 0x2369
+#define regDP2_DP_CONFIG_BASE_IDX 2
+#define regDP2_DP_VID_STREAM_CNTL 0x236a
+#define regDP2_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP2_DP_STEER_FIFO 0x236b
+#define regDP2_DP_STEER_FIFO_BASE_IDX 2
+#define regDP2_DP_MSA_MISC 0x236c
+#define regDP2_DP_MSA_MISC_BASE_IDX 2
+#define regDP2_DP_DPHY_INTERNAL_CTRL 0x236d
+#define regDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP2_DP_VID_TIMING 0x236e
+#define regDP2_DP_VID_TIMING_BASE_IDX 2
+#define regDP2_DP_VID_N 0x236f
+#define regDP2_DP_VID_N_BASE_IDX 2
+#define regDP2_DP_VID_M 0x2370
+#define regDP2_DP_VID_M_BASE_IDX 2
+#define regDP2_DP_LINK_FRAMING_CNTL 0x2371
+#define regDP2_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP2_DP_HBR2_EYE_PATTERN 0x2372
+#define regDP2_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP2_DP_VID_MSA_VBID 0x2373
+#define regDP2_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP2_DP_VID_INTERRUPT_CNTL 0x2374
+#define regDP2_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_CNTL 0x2375
+#define regDP2_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_TRAINING_PATTERN_SEL 0x2376
+#define regDP2_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP2_DP_DPHY_SYM0 0x2377
+#define regDP2_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP2_DP_DPHY_SYM1 0x2378
+#define regDP2_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP2_DP_DPHY_SYM2 0x2379
+#define regDP2_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP2_DP_DPHY_8B10B_CNTL 0x237a
+#define regDP2_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_PRBS_CNTL 0x237b
+#define regDP2_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_SCRAM_CNTL 0x237c
+#define regDP2_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_CONTROL0 0x237d
+#define regDP2_DP_DPHY_CRC_CONTROL0_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_CONTROL1 0x237e
+#define regDP2_DP_DPHY_CRC_CONTROL1_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_RESULT0 0x237f
+#define regDP2_DP_DPHY_CRC_RESULT0_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_RESULT1 0x2380
+#define regDP2_DP_DPHY_CRC_RESULT1_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_STATUS 0x2381
+#define regDP2_DP_DPHY_CRC_STATUS_BASE_IDX 2
+#define regDP2_DP_DPHY_FAST_TRAINING 0x2382
+#define regDP2_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP2_DP_DPHY_FAST_TRAINING_STATUS 0x2383
+#define regDP2_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_RESULT2 0x2384
+#define regDP2_DP_DPHY_CRC_RESULT2_BASE_IDX 2
+#define regDP2_DP_DPHY_CRC_RESULT3 0x2385
+#define regDP2_DP_DPHY_CRC_RESULT3_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL 0x2389
+#define regDP2_DP_SEC_CNTL_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL1 0x238a
+#define regDP2_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP2_DP_SEC_FRAMING1 0x238b
+#define regDP2_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP2_DP_SEC_FRAMING2 0x238c
+#define regDP2_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP2_DP_SEC_FRAMING3 0x238d
+#define regDP2_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP2_DP_SEC_FRAMING4 0x238e
+#define regDP2_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP2_DP_SEC_AUD_N 0x238f
+#define regDP2_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP2_DP_SEC_AUD_N_READBACK 0x2390
+#define regDP2_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP2_DP_SEC_AUD_M 0x2391
+#define regDP2_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP2_DP_SEC_AUD_M_READBACK 0x2392
+#define regDP2_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP2_DP_SEC_TIMESTAMP 0x2393
+#define regDP2_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP2_DP_SEC_PACKET_CNTL 0x2394
+#define regDP2_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP2_DP_MSE_RATE_CNTL 0x2395
+#define regDP2_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP2_DP_MSE_RATE_UPDATE 0x2397
+#define regDP2_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP2_DP_MSE_SAT0 0x2398
+#define regDP2_DP_MSE_SAT0_BASE_IDX 2
+#define regDP2_DP_MSE_SAT1 0x2399
+#define regDP2_DP_MSE_SAT1_BASE_IDX 2
+#define regDP2_DP_MSE_SAT2 0x239a
+#define regDP2_DP_MSE_SAT2_BASE_IDX 2
+#define regDP2_DP_MSE_SAT_UPDATE 0x239b
+#define regDP2_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP2_DP_MSE_LINK_TIMING 0x239c
+#define regDP2_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP2_DP_MSE_MISC_CNTL 0x239d
+#define regDP2_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x23a2
+#define regDP2_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP2_DP_DPHY_HBR2_PATTERN_CONTROL 0x23a3
+#define regDP2_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP2_DP_MSE_SAT0_STATUS 0x23a5
+#define regDP2_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP2_DP_MSE_SAT1_STATUS 0x23a6
+#define regDP2_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP2_DP_MSE_SAT2_STATUS 0x23a7
+#define regDP2_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP2_DP_DPIA_SPARE 0x23a8
+#define regDP2_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP2_DP_MSA_TIMING_PARAM1 0x23aa
+#define regDP2_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP2_DP_MSA_TIMING_PARAM2 0x23ab
+#define regDP2_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP2_DP_MSA_TIMING_PARAM3 0x23ac
+#define regDP2_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP2_DP_MSA_TIMING_PARAM4 0x23ad
+#define regDP2_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP2_DP_MSO_CNTL 0x23ae
+#define regDP2_DP_MSO_CNTL_BASE_IDX 2
+#define regDP2_DP_MSO_CNTL1 0x23af
+#define regDP2_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP2_DP_DSC_CNTL 0x23b0
+#define regDP2_DP_DSC_CNTL_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL2 0x23b1
+#define regDP2_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL3 0x23b2
+#define regDP2_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL4 0x23b3
+#define regDP2_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL5 0x23b4
+#define regDP2_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL6 0x23b5
+#define regDP2_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP2_DP_SEC_CNTL7 0x23b6
+#define regDP2_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP2_DP_DB_CNTL 0x23b7
+#define regDP2_DP_DB_CNTL_BASE_IDX 2
+#define regDP2_DP_MSA_VBID_MISC 0x23b8
+#define regDP2_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP2_DP_SEC_METADATA_TRANSMISSION 0x23b9
+#define regDP2_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP2_DP_ALPM_CNTL 0x23bb
+#define regDP2_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP8_CNTL 0x23bc
+#define regDP2_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP9_CNTL 0x23bd
+#define regDP2_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP10_CNTL 0x23be
+#define regDP2_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP11_CNTL 0x23bf
+#define regDP2_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP2_DP_GSP_EN_DB_STATUS 0x23c0
+#define regDP2_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL1 0x23c1
+#define regDP2_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL2 0x23c2
+#define regDP2_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL3 0x23c3
+#define regDP2_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL4 0x23c4
+#define regDP2_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP2_DP_AUXLESS_ALPM_CNTL5 0x23c5
+#define regDP2_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP2_DP_STREAM_SYMBOL_COUNT_STATUS 0x23c6
+#define regDP2_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP2_DP_STREAM_SYMBOL_COUNT_CONTROL 0x23c7
+#define regDP2_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP2_DP_LINK_SYMBOL_COUNT_STATUS0 0x23c8
+#define regDP2_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP2_DP_LINK_SYMBOL_COUNT_STATUS1 0x23c9
+#define regDP2_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP2_DP_LINK_SYMBOL_COUNT_CONTROL 0x23ca
+#define regDP2_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig2_dispdec
+// base address: 0x920
+#define regDIG2_DIG_FE_CNTL 0x22db
+#define regDIG2_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG2_DIG_FE_CLK_CNTL 0x22dc
+#define regDIG2_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG2_DIG_FE_EN_CNTL 0x22dd
+#define regDIG2_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG2_DIG_OUTPUT_CRC_CNTL 0x22de
+#define regDIG2_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG2_DIG_OUTPUT_CRC_RESULT 0x22df
+#define regDIG2_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG2_DIG_CLOCK_PATTERN 0x22e0
+#define regDIG2_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG2_DIG_TEST_PATTERN 0x22e1
+#define regDIG2_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG2_DIG_RANDOM_PATTERN_SEED 0x22e2
+#define regDIG2_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG2_DIG_FIFO_CTRL0 0x22e3
+#define regDIG2_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG2_DIG_FIFO_CTRL1 0x22e4
+#define regDIG2_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG2_HDMI_METADATA_PACKET_CONTROL 0x22e5
+#define regDIG2_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_CONTROL 0x22e6
+#define regDIG2_HDMI_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_STATUS 0x22e7
+#define regDIG2_HDMI_STATUS_BASE_IDX 2
+#define regDIG2_HDMI_AUDIO_PACKET_CONTROL 0x22e8
+#define regDIG2_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_ACR_PACKET_CONTROL 0x22e9
+#define regDIG2_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_VBI_PACKET_CONTROL 0x22ea
+#define regDIG2_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_INFOFRAME_CONTROL0 0x22eb
+#define regDIG2_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG2_HDMI_INFOFRAME_CONTROL1 0x22ec
+#define regDIG2_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL0 0x22ed
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL6 0x22ee
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL5 0x22ef
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG2_HDMI_GC 0x22f0
+#define regDIG2_HDMI_GC_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL1 0x22f1
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL2 0x22f2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL3 0x22f3
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL4 0x22f4
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL7 0x22f5
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL8 0x22f6
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL9 0x22f7
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL10 0x22f8
+#define regDIG2_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG2_HDMI_DB_CONTROL 0x22f9
+#define regDIG2_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG2_HDMI_ACR_32_0 0x22fa
+#define regDIG2_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG2_HDMI_ACR_32_1 0x22fb
+#define regDIG2_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG2_HDMI_ACR_44_0 0x22fc
+#define regDIG2_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG2_HDMI_ACR_44_1 0x22fd
+#define regDIG2_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG2_HDMI_ACR_48_0 0x22fe
+#define regDIG2_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG2_HDMI_ACR_48_1 0x22ff
+#define regDIG2_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG2_HDMI_ACR_STATUS_0 0x2300
+#define regDIG2_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG2_HDMI_ACR_STATUS_1 0x2301
+#define regDIG2_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG2_AFMT_CNTL 0x2302
+#define regDIG2_AFMT_CNTL_BASE_IDX 2
+#define regDIG2_DIG_BE_CLK_CNTL 0x2303
+#define regDIG2_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG2_DIG_BE_CNTL 0x2304
+#define regDIG2_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG2_DIG_BE_EN_CNTL 0x2305
+#define regDIG2_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG2_TMDS_CNTL 0x232c
+#define regDIG2_TMDS_CNTL_BASE_IDX 2
+#define regDIG2_TMDS_CONTROL_CHAR 0x232d
+#define regDIG2_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG2_TMDS_CONTROL0_FEEDBACK 0x232e
+#define regDIG2_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG2_TMDS_STEREOSYNC_CTL_SEL 0x232f
+#define regDIG2_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG2_TMDS_SYNC_CHAR_PATTERN_0_1 0x2330
+#define regDIG2_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG2_TMDS_SYNC_CHAR_PATTERN_2_3 0x2331
+#define regDIG2_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG2_TMDS_CTL_BITS 0x2333
+#define regDIG2_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG2_TMDS_DCBALANCER_CONTROL 0x2334
+#define regDIG2_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG2_TMDS_SYNC_DCBALANCE_CHAR 0x2335
+#define regDIG2_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG2_TMDS_CTL0_1_GEN_CNTL 0x2336
+#define regDIG2_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG2_TMDS_CTL2_3_GEN_CNTL 0x2337
+#define regDIG2_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG2_DIG_VERSION 0x2339
+#define regDIG2_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp3_dispdec
+// base address: 0xdb0
+#define regDP3_DP_LINK_CNTL 0x248a
+#define regDP3_DP_LINK_CNTL_BASE_IDX 2
+#define regDP3_DP_PIXEL_FORMAT 0x248b
+#define regDP3_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP3_DP_MSA_COLORIMETRY 0x248c
+#define regDP3_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP3_DP_CONFIG 0x248d
+#define regDP3_DP_CONFIG_BASE_IDX 2
+#define regDP3_DP_VID_STREAM_CNTL 0x248e
+#define regDP3_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP3_DP_STEER_FIFO 0x248f
+#define regDP3_DP_STEER_FIFO_BASE_IDX 2
+#define regDP3_DP_MSA_MISC 0x2490
+#define regDP3_DP_MSA_MISC_BASE_IDX 2
+#define regDP3_DP_DPHY_INTERNAL_CTRL 0x2491
+#define regDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP3_DP_VID_TIMING 0x2492
+#define regDP3_DP_VID_TIMING_BASE_IDX 2
+#define regDP3_DP_VID_N 0x2493
+#define regDP3_DP_VID_N_BASE_IDX 2
+#define regDP3_DP_VID_M 0x2494
+#define regDP3_DP_VID_M_BASE_IDX 2
+#define regDP3_DP_LINK_FRAMING_CNTL 0x2495
+#define regDP3_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP3_DP_HBR2_EYE_PATTERN 0x2496
+#define regDP3_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP3_DP_VID_MSA_VBID 0x2497
+#define regDP3_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP3_DP_VID_INTERRUPT_CNTL 0x2498
+#define regDP3_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_CNTL 0x2499
+#define regDP3_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_TRAINING_PATTERN_SEL 0x249a
+#define regDP3_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP3_DP_DPHY_SYM0 0x249b
+#define regDP3_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP3_DP_DPHY_SYM1 0x249c
+#define regDP3_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP3_DP_DPHY_SYM2 0x249d
+#define regDP3_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP3_DP_DPHY_8B10B_CNTL 0x249e
+#define regDP3_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_PRBS_CNTL 0x249f
+#define regDP3_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_SCRAM_CNTL 0x24a0
+#define regDP3_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_CONTROL0 0x24a1
+#define regDP3_DP_DPHY_CRC_CONTROL0_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_CONTROL1 0x24a2
+#define regDP3_DP_DPHY_CRC_CONTROL1_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_RESULT0 0x24a3
+#define regDP3_DP_DPHY_CRC_RESULT0_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_RESULT1 0x24a4
+#define regDP3_DP_DPHY_CRC_RESULT1_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_STATUS 0x24a5
+#define regDP3_DP_DPHY_CRC_STATUS_BASE_IDX 2
+#define regDP3_DP_DPHY_FAST_TRAINING 0x24a6
+#define regDP3_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP3_DP_DPHY_FAST_TRAINING_STATUS 0x24a7
+#define regDP3_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_RESULT2 0x24a8
+#define regDP3_DP_DPHY_CRC_RESULT2_BASE_IDX 2
+#define regDP3_DP_DPHY_CRC_RESULT3 0x24a9
+#define regDP3_DP_DPHY_CRC_RESULT3_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL 0x24ad
+#define regDP3_DP_SEC_CNTL_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL1 0x24ae
+#define regDP3_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP3_DP_SEC_FRAMING1 0x24af
+#define regDP3_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP3_DP_SEC_FRAMING2 0x24b0
+#define regDP3_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP3_DP_SEC_FRAMING3 0x24b1
+#define regDP3_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP3_DP_SEC_FRAMING4 0x24b2
+#define regDP3_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP3_DP_SEC_AUD_N 0x24b3
+#define regDP3_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP3_DP_SEC_AUD_N_READBACK 0x24b4
+#define regDP3_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP3_DP_SEC_AUD_M 0x24b5
+#define regDP3_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP3_DP_SEC_AUD_M_READBACK 0x24b6
+#define regDP3_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP3_DP_SEC_TIMESTAMP 0x24b7
+#define regDP3_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP3_DP_SEC_PACKET_CNTL 0x24b8
+#define regDP3_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP3_DP_MSE_RATE_CNTL 0x24b9
+#define regDP3_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP3_DP_MSE_RATE_UPDATE 0x24bb
+#define regDP3_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP3_DP_MSE_SAT0 0x24bc
+#define regDP3_DP_MSE_SAT0_BASE_IDX 2
+#define regDP3_DP_MSE_SAT1 0x24bd
+#define regDP3_DP_MSE_SAT1_BASE_IDX 2
+#define regDP3_DP_MSE_SAT2 0x24be
+#define regDP3_DP_MSE_SAT2_BASE_IDX 2
+#define regDP3_DP_MSE_SAT_UPDATE 0x24bf
+#define regDP3_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP3_DP_MSE_LINK_TIMING 0x24c0
+#define regDP3_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP3_DP_MSE_MISC_CNTL 0x24c1
+#define regDP3_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x24c6
+#define regDP3_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP3_DP_DPHY_HBR2_PATTERN_CONTROL 0x24c7
+#define regDP3_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP3_DP_MSE_SAT0_STATUS 0x24c9
+#define regDP3_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP3_DP_MSE_SAT1_STATUS 0x24ca
+#define regDP3_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP3_DP_MSE_SAT2_STATUS 0x24cb
+#define regDP3_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP3_DP_DPIA_SPARE 0x24cc
+#define regDP3_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP3_DP_MSA_TIMING_PARAM1 0x24ce
+#define regDP3_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP3_DP_MSA_TIMING_PARAM2 0x24cf
+#define regDP3_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP3_DP_MSA_TIMING_PARAM3 0x24d0
+#define regDP3_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP3_DP_MSA_TIMING_PARAM4 0x24d1
+#define regDP3_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP3_DP_MSO_CNTL 0x24d2
+#define regDP3_DP_MSO_CNTL_BASE_IDX 2
+#define regDP3_DP_MSO_CNTL1 0x24d3
+#define regDP3_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP3_DP_DSC_CNTL 0x24d4
+#define regDP3_DP_DSC_CNTL_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL2 0x24d5
+#define regDP3_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL3 0x24d6
+#define regDP3_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL4 0x24d7
+#define regDP3_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL5 0x24d8
+#define regDP3_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL6 0x24d9
+#define regDP3_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP3_DP_SEC_CNTL7 0x24da
+#define regDP3_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP3_DP_DB_CNTL 0x24db
+#define regDP3_DP_DB_CNTL_BASE_IDX 2
+#define regDP3_DP_MSA_VBID_MISC 0x24dc
+#define regDP3_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP3_DP_SEC_METADATA_TRANSMISSION 0x24dd
+#define regDP3_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP3_DP_ALPM_CNTL 0x24df
+#define regDP3_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP8_CNTL 0x24e0
+#define regDP3_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP9_CNTL 0x24e1
+#define regDP3_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP10_CNTL 0x24e2
+#define regDP3_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP11_CNTL 0x24e3
+#define regDP3_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP3_DP_GSP_EN_DB_STATUS 0x24e4
+#define regDP3_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL1 0x24e5
+#define regDP3_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL2 0x24e6
+#define regDP3_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL3 0x24e7
+#define regDP3_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL4 0x24e8
+#define regDP3_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP3_DP_AUXLESS_ALPM_CNTL5 0x24e9
+#define regDP3_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP3_DP_STREAM_SYMBOL_COUNT_STATUS 0x24ea
+#define regDP3_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP3_DP_STREAM_SYMBOL_COUNT_CONTROL 0x24eb
+#define regDP3_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP3_DP_LINK_SYMBOL_COUNT_STATUS0 0x24ec
+#define regDP3_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP3_DP_LINK_SYMBOL_COUNT_STATUS1 0x24ed
+#define regDP3_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP3_DP_LINK_SYMBOL_COUNT_CONTROL 0x24ee
+#define regDP3_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig3_dispdec
+// base address: 0xdb0
+#define regDIG3_DIG_FE_CNTL 0x23ff
+#define regDIG3_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG3_DIG_FE_CLK_CNTL 0x2400
+#define regDIG3_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG3_DIG_FE_EN_CNTL 0x2401
+#define regDIG3_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG3_DIG_OUTPUT_CRC_CNTL 0x2402
+#define regDIG3_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG3_DIG_OUTPUT_CRC_RESULT 0x2403
+#define regDIG3_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG3_DIG_CLOCK_PATTERN 0x2404
+#define regDIG3_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG3_DIG_TEST_PATTERN 0x2405
+#define regDIG3_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG3_DIG_RANDOM_PATTERN_SEED 0x2406
+#define regDIG3_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG3_DIG_FIFO_CTRL0 0x2407
+#define regDIG3_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG3_DIG_FIFO_CTRL1 0x2408
+#define regDIG3_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG3_HDMI_METADATA_PACKET_CONTROL 0x2409
+#define regDIG3_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_CONTROL 0x240a
+#define regDIG3_HDMI_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_STATUS 0x240b
+#define regDIG3_HDMI_STATUS_BASE_IDX 2
+#define regDIG3_HDMI_AUDIO_PACKET_CONTROL 0x240c
+#define regDIG3_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_ACR_PACKET_CONTROL 0x240d
+#define regDIG3_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_VBI_PACKET_CONTROL 0x240e
+#define regDIG3_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_INFOFRAME_CONTROL0 0x240f
+#define regDIG3_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG3_HDMI_INFOFRAME_CONTROL1 0x2410
+#define regDIG3_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL0 0x2411
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL6 0x2412
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL5 0x2413
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG3_HDMI_GC 0x2414
+#define regDIG3_HDMI_GC_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL1 0x2415
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL2 0x2416
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL3 0x2417
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL4 0x2418
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL7 0x2419
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL8 0x241a
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL9 0x241b
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL10 0x241c
+#define regDIG3_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG3_HDMI_DB_CONTROL 0x241d
+#define regDIG3_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG3_HDMI_ACR_32_0 0x241e
+#define regDIG3_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG3_HDMI_ACR_32_1 0x241f
+#define regDIG3_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG3_HDMI_ACR_44_0 0x2420
+#define regDIG3_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG3_HDMI_ACR_44_1 0x2421
+#define regDIG3_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG3_HDMI_ACR_48_0 0x2422
+#define regDIG3_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG3_HDMI_ACR_48_1 0x2423
+#define regDIG3_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG3_HDMI_ACR_STATUS_0 0x2424
+#define regDIG3_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG3_HDMI_ACR_STATUS_1 0x2425
+#define regDIG3_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG3_AFMT_CNTL 0x2426
+#define regDIG3_AFMT_CNTL_BASE_IDX 2
+#define regDIG3_DIG_BE_CLK_CNTL 0x2427
+#define regDIG3_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG3_DIG_BE_CNTL 0x2428
+#define regDIG3_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG3_DIG_BE_EN_CNTL 0x2429
+#define regDIG3_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG3_TMDS_CNTL 0x2450
+#define regDIG3_TMDS_CNTL_BASE_IDX 2
+#define regDIG3_TMDS_CONTROL_CHAR 0x2451
+#define regDIG3_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG3_TMDS_CONTROL0_FEEDBACK 0x2452
+#define regDIG3_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG3_TMDS_STEREOSYNC_CTL_SEL 0x2453
+#define regDIG3_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG3_TMDS_SYNC_CHAR_PATTERN_0_1 0x2454
+#define regDIG3_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG3_TMDS_SYNC_CHAR_PATTERN_2_3 0x2455
+#define regDIG3_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG3_TMDS_CTL_BITS 0x2457
+#define regDIG3_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG3_TMDS_DCBALANCER_CONTROL 0x2458
+#define regDIG3_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG3_TMDS_SYNC_DCBALANCE_CHAR 0x2459
+#define regDIG3_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG3_TMDS_CTL0_1_GEN_CNTL 0x245a
+#define regDIG3_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG3_TMDS_CTL2_3_GEN_CNTL 0x245b
+#define regDIG3_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG3_DIG_VERSION 0x245d
+#define regDIG3_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp4_dispdec
+// base address: 0x1240
+#define regDP4_DP_LINK_CNTL 0x25ae
+#define regDP4_DP_LINK_CNTL_BASE_IDX 2
+#define regDP4_DP_PIXEL_FORMAT 0x25af
+#define regDP4_DP_PIXEL_FORMAT_BASE_IDX 2
+#define regDP4_DP_MSA_COLORIMETRY 0x25b0
+#define regDP4_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define regDP4_DP_CONFIG 0x25b1
+#define regDP4_DP_CONFIG_BASE_IDX 2
+#define regDP4_DP_VID_STREAM_CNTL 0x25b2
+#define regDP4_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define regDP4_DP_STEER_FIFO 0x25b3
+#define regDP4_DP_STEER_FIFO_BASE_IDX 2
+#define regDP4_DP_MSA_MISC 0x25b4
+#define regDP4_DP_MSA_MISC_BASE_IDX 2
+#define regDP4_DP_DPHY_INTERNAL_CTRL 0x25b5
+#define regDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#define regDP4_DP_VID_TIMING 0x25b6
+#define regDP4_DP_VID_TIMING_BASE_IDX 2
+#define regDP4_DP_VID_N 0x25b7
+#define regDP4_DP_VID_N_BASE_IDX 2
+#define regDP4_DP_VID_M 0x25b8
+#define regDP4_DP_VID_M_BASE_IDX 2
+#define regDP4_DP_LINK_FRAMING_CNTL 0x25b9
+#define regDP4_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define regDP4_DP_HBR2_EYE_PATTERN 0x25ba
+#define regDP4_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define regDP4_DP_VID_MSA_VBID 0x25bb
+#define regDP4_DP_VID_MSA_VBID_BASE_IDX 2
+#define regDP4_DP_VID_INTERRUPT_CNTL 0x25bc
+#define regDP4_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_CNTL 0x25bd
+#define regDP4_DP_DPHY_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_TRAINING_PATTERN_SEL 0x25be
+#define regDP4_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define regDP4_DP_DPHY_SYM0 0x25bf
+#define regDP4_DP_DPHY_SYM0_BASE_IDX 2
+#define regDP4_DP_DPHY_SYM1 0x25c0
+#define regDP4_DP_DPHY_SYM1_BASE_IDX 2
+#define regDP4_DP_DPHY_SYM2 0x25c1
+#define regDP4_DP_DPHY_SYM2_BASE_IDX 2
+#define regDP4_DP_DPHY_8B10B_CNTL 0x25c2
+#define regDP4_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_PRBS_CNTL 0x25c3
+#define regDP4_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_SCRAM_CNTL 0x25c4
+#define regDP4_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_CONTROL0 0x25c5
+#define regDP4_DP_DPHY_CRC_CONTROL0_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_CONTROL1 0x25c6
+#define regDP4_DP_DPHY_CRC_CONTROL1_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_RESULT0 0x25c7
+#define regDP4_DP_DPHY_CRC_RESULT0_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_RESULT1 0x25c8
+#define regDP4_DP_DPHY_CRC_RESULT1_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_STATUS 0x25c9
+#define regDP4_DP_DPHY_CRC_STATUS_BASE_IDX 2
+#define regDP4_DP_DPHY_FAST_TRAINING 0x25ca
+#define regDP4_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define regDP4_DP_DPHY_FAST_TRAINING_STATUS 0x25cb
+#define regDP4_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_RESULT2 0x25cc
+#define regDP4_DP_DPHY_CRC_RESULT2_BASE_IDX 2
+#define regDP4_DP_DPHY_CRC_RESULT3 0x25cd
+#define regDP4_DP_DPHY_CRC_RESULT3_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL 0x25d1
+#define regDP4_DP_SEC_CNTL_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL1 0x25d2
+#define regDP4_DP_SEC_CNTL1_BASE_IDX 2
+#define regDP4_DP_SEC_FRAMING1 0x25d3
+#define regDP4_DP_SEC_FRAMING1_BASE_IDX 2
+#define regDP4_DP_SEC_FRAMING2 0x25d4
+#define regDP4_DP_SEC_FRAMING2_BASE_IDX 2
+#define regDP4_DP_SEC_FRAMING3 0x25d5
+#define regDP4_DP_SEC_FRAMING3_BASE_IDX 2
+#define regDP4_DP_SEC_FRAMING4 0x25d6
+#define regDP4_DP_SEC_FRAMING4_BASE_IDX 2
+#define regDP4_DP_SEC_AUD_N 0x25d7
+#define regDP4_DP_SEC_AUD_N_BASE_IDX 2
+#define regDP4_DP_SEC_AUD_N_READBACK 0x25d8
+#define regDP4_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define regDP4_DP_SEC_AUD_M 0x25d9
+#define regDP4_DP_SEC_AUD_M_BASE_IDX 2
+#define regDP4_DP_SEC_AUD_M_READBACK 0x25da
+#define regDP4_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define regDP4_DP_SEC_TIMESTAMP 0x25db
+#define regDP4_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define regDP4_DP_SEC_PACKET_CNTL 0x25dc
+#define regDP4_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define regDP4_DP_MSE_RATE_CNTL 0x25dd
+#define regDP4_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define regDP4_DP_MSE_RATE_UPDATE 0x25df
+#define regDP4_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define regDP4_DP_MSE_SAT0 0x25e0
+#define regDP4_DP_MSE_SAT0_BASE_IDX 2
+#define regDP4_DP_MSE_SAT1 0x25e1
+#define regDP4_DP_MSE_SAT1_BASE_IDX 2
+#define regDP4_DP_MSE_SAT2 0x25e2
+#define regDP4_DP_MSE_SAT2_BASE_IDX 2
+#define regDP4_DP_MSE_SAT_UPDATE 0x25e3
+#define regDP4_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define regDP4_DP_MSE_LINK_TIMING 0x25e4
+#define regDP4_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define regDP4_DP_MSE_MISC_CNTL 0x25e5
+#define regDP4_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x25ea
+#define regDP4_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define regDP4_DP_DPHY_HBR2_PATTERN_CONTROL 0x25eb
+#define regDP4_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define regDP4_DP_MSE_SAT0_STATUS 0x25ed
+#define regDP4_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define regDP4_DP_MSE_SAT1_STATUS 0x25ee
+#define regDP4_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define regDP4_DP_MSE_SAT2_STATUS 0x25ef
+#define regDP4_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define regDP4_DP_DPIA_SPARE 0x25f0
+#define regDP4_DP_DPIA_SPARE_BASE_IDX 2
+#define regDP4_DP_MSA_TIMING_PARAM1 0x25f2
+#define regDP4_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define regDP4_DP_MSA_TIMING_PARAM2 0x25f3
+#define regDP4_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define regDP4_DP_MSA_TIMING_PARAM3 0x25f4
+#define regDP4_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define regDP4_DP_MSA_TIMING_PARAM4 0x25f5
+#define regDP4_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define regDP4_DP_MSO_CNTL 0x25f6
+#define regDP4_DP_MSO_CNTL_BASE_IDX 2
+#define regDP4_DP_MSO_CNTL1 0x25f7
+#define regDP4_DP_MSO_CNTL1_BASE_IDX 2
+#define regDP4_DP_DSC_CNTL 0x25f8
+#define regDP4_DP_DSC_CNTL_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL2 0x25f9
+#define regDP4_DP_SEC_CNTL2_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL3 0x25fa
+#define regDP4_DP_SEC_CNTL3_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL4 0x25fb
+#define regDP4_DP_SEC_CNTL4_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL5 0x25fc
+#define regDP4_DP_SEC_CNTL5_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL6 0x25fd
+#define regDP4_DP_SEC_CNTL6_BASE_IDX 2
+#define regDP4_DP_SEC_CNTL7 0x25fe
+#define regDP4_DP_SEC_CNTL7_BASE_IDX 2
+#define regDP4_DP_DB_CNTL 0x25ff
+#define regDP4_DP_DB_CNTL_BASE_IDX 2
+#define regDP4_DP_MSA_VBID_MISC 0x2600
+#define regDP4_DP_MSA_VBID_MISC_BASE_IDX 2
+#define regDP4_DP_SEC_METADATA_TRANSMISSION 0x2601
+#define regDP4_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define regDP4_DP_ALPM_CNTL 0x2603
+#define regDP4_DP_ALPM_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP8_CNTL 0x2604
+#define regDP4_DP_GSP8_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP9_CNTL 0x2605
+#define regDP4_DP_GSP9_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP10_CNTL 0x2606
+#define regDP4_DP_GSP10_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP11_CNTL 0x2607
+#define regDP4_DP_GSP11_CNTL_BASE_IDX 2
+#define regDP4_DP_GSP_EN_DB_STATUS 0x2608
+#define regDP4_DP_GSP_EN_DB_STATUS_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL1 0x2609
+#define regDP4_DP_AUXLESS_ALPM_CNTL1_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL2 0x260a
+#define regDP4_DP_AUXLESS_ALPM_CNTL2_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL3 0x260b
+#define regDP4_DP_AUXLESS_ALPM_CNTL3_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL4 0x260c
+#define regDP4_DP_AUXLESS_ALPM_CNTL4_BASE_IDX 2
+#define regDP4_DP_AUXLESS_ALPM_CNTL5 0x260d
+#define regDP4_DP_AUXLESS_ALPM_CNTL5_BASE_IDX 2
+#define regDP4_DP_STREAM_SYMBOL_COUNT_STATUS 0x260e
+#define regDP4_DP_STREAM_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP4_DP_STREAM_SYMBOL_COUNT_CONTROL 0x260f
+#define regDP4_DP_STREAM_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP4_DP_LINK_SYMBOL_COUNT_STATUS0 0x2610
+#define regDP4_DP_LINK_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP4_DP_LINK_SYMBOL_COUNT_STATUS1 0x2611
+#define regDP4_DP_LINK_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP4_DP_LINK_SYMBOL_COUNT_CONTROL 0x2612
+#define regDP4_DP_LINK_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig4_dispdec
+// base address: 0x1240
+#define regDIG4_DIG_FE_CNTL 0x2523
+#define regDIG4_DIG_FE_CNTL_BASE_IDX 2
+#define regDIG4_DIG_FE_CLK_CNTL 0x2524
+#define regDIG4_DIG_FE_CLK_CNTL_BASE_IDX 2
+#define regDIG4_DIG_FE_EN_CNTL 0x2525
+#define regDIG4_DIG_FE_EN_CNTL_BASE_IDX 2
+#define regDIG4_DIG_OUTPUT_CRC_CNTL 0x2526
+#define regDIG4_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define regDIG4_DIG_OUTPUT_CRC_RESULT 0x2527
+#define regDIG4_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define regDIG4_DIG_CLOCK_PATTERN 0x2528
+#define regDIG4_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define regDIG4_DIG_TEST_PATTERN 0x2529
+#define regDIG4_DIG_TEST_PATTERN_BASE_IDX 2
+#define regDIG4_DIG_RANDOM_PATTERN_SEED 0x252a
+#define regDIG4_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define regDIG4_DIG_FIFO_CTRL0 0x252b
+#define regDIG4_DIG_FIFO_CTRL0_BASE_IDX 2
+#define regDIG4_DIG_FIFO_CTRL1 0x252c
+#define regDIG4_DIG_FIFO_CTRL1_BASE_IDX 2
+#define regDIG4_HDMI_METADATA_PACKET_CONTROL 0x252d
+#define regDIG4_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_CONTROL 0x252e
+#define regDIG4_HDMI_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_STATUS 0x252f
+#define regDIG4_HDMI_STATUS_BASE_IDX 2
+#define regDIG4_HDMI_AUDIO_PACKET_CONTROL 0x2530
+#define regDIG4_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_ACR_PACKET_CONTROL 0x2531
+#define regDIG4_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_VBI_PACKET_CONTROL 0x2532
+#define regDIG4_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_INFOFRAME_CONTROL0 0x2533
+#define regDIG4_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regDIG4_HDMI_INFOFRAME_CONTROL1 0x2534
+#define regDIG4_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL0 0x2535
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL6 0x2536
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL6_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL5 0x2537
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+#define regDIG4_HDMI_GC 0x2538
+#define regDIG4_HDMI_GC_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL1 0x2539
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL2 0x253a
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL3 0x253b
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL4 0x253c
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL7 0x253d
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL7_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL8 0x253e
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL8_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL9 0x253f
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL9_BASE_IDX 2
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL10 0x2540
+#define regDIG4_HDMI_GENERIC_PACKET_CONTROL10_BASE_IDX 2
+#define regDIG4_HDMI_DB_CONTROL 0x2541
+#define regDIG4_HDMI_DB_CONTROL_BASE_IDX 2
+#define regDIG4_HDMI_ACR_32_0 0x2542
+#define regDIG4_HDMI_ACR_32_0_BASE_IDX 2
+#define regDIG4_HDMI_ACR_32_1 0x2543
+#define regDIG4_HDMI_ACR_32_1_BASE_IDX 2
+#define regDIG4_HDMI_ACR_44_0 0x2544
+#define regDIG4_HDMI_ACR_44_0_BASE_IDX 2
+#define regDIG4_HDMI_ACR_44_1 0x2545
+#define regDIG4_HDMI_ACR_44_1_BASE_IDX 2
+#define regDIG4_HDMI_ACR_48_0 0x2546
+#define regDIG4_HDMI_ACR_48_0_BASE_IDX 2
+#define regDIG4_HDMI_ACR_48_1 0x2547
+#define regDIG4_HDMI_ACR_48_1_BASE_IDX 2
+#define regDIG4_HDMI_ACR_STATUS_0 0x2548
+#define regDIG4_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define regDIG4_HDMI_ACR_STATUS_1 0x2549
+#define regDIG4_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define regDIG4_AFMT_CNTL 0x254a
+#define regDIG4_AFMT_CNTL_BASE_IDX 2
+#define regDIG4_DIG_BE_CLK_CNTL 0x254b
+#define regDIG4_DIG_BE_CLK_CNTL_BASE_IDX 2
+#define regDIG4_DIG_BE_CNTL 0x254c
+#define regDIG4_DIG_BE_CNTL_BASE_IDX 2
+#define regDIG4_DIG_BE_EN_CNTL 0x254d
+#define regDIG4_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG4_TMDS_CNTL 0x2574
+#define regDIG4_TMDS_CNTL_BASE_IDX 2
+#define regDIG4_TMDS_CONTROL_CHAR 0x2575
+#define regDIG4_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define regDIG4_TMDS_CONTROL0_FEEDBACK 0x2576
+#define regDIG4_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define regDIG4_TMDS_STEREOSYNC_CTL_SEL 0x2577
+#define regDIG4_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define regDIG4_TMDS_SYNC_CHAR_PATTERN_0_1 0x2578
+#define regDIG4_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define regDIG4_TMDS_SYNC_CHAR_PATTERN_2_3 0x2579
+#define regDIG4_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define regDIG4_TMDS_CTL_BITS 0x257b
+#define regDIG4_TMDS_CTL_BITS_BASE_IDX 2
+#define regDIG4_TMDS_DCBALANCER_CONTROL 0x257c
+#define regDIG4_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define regDIG4_TMDS_SYNC_DCBALANCE_CHAR 0x257d
+#define regDIG4_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define regDIG4_TMDS_CTL0_1_GEN_CNTL 0x257e
+#define regDIG4_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define regDIG4_TMDS_CTL2_3_GEN_CNTL 0x257f
+#define regDIG4_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define regDIG4_DIG_VERSION 0x2581
+#define regDIG4_DIG_VERSION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_afmt_afmt_dispdec
+// base address: 0x154cc
+#define regAFMT0_AFMT_ACP 0x2073
+#define regAFMT0_AFMT_ACP_BASE_IDX 2
+#define regAFMT0_AFMT_VBI_PACKET_CONTROL 0x2074
+#define regAFMT0_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_PACKET_CONTROL2 0x2075
+#define regAFMT0_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_INFO0 0x2076
+#define regAFMT0_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_INFO1 0x2077
+#define regAFMT0_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT0_AFMT_60958_0 0x2078
+#define regAFMT0_AFMT_60958_0_BASE_IDX 2
+#define regAFMT0_AFMT_60958_1 0x2079
+#define regAFMT0_AFMT_60958_1_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_CRC_CONTROL 0x207a
+#define regAFMT0_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT0_AFMT_RAMP_CONTROL0 0x207b
+#define regAFMT0_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT0_AFMT_RAMP_CONTROL1 0x207c
+#define regAFMT0_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT0_AFMT_RAMP_CONTROL2 0x207d
+#define regAFMT0_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT0_AFMT_RAMP_CONTROL3 0x207e
+#define regAFMT0_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT0_AFMT_60958_2 0x207f
+#define regAFMT0_AFMT_60958_2_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_CRC_RESULT 0x2080
+#define regAFMT0_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT0_AFMT_STATUS 0x2081
+#define regAFMT0_AFMT_STATUS_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_PACKET_CONTROL 0x2082
+#define regAFMT0_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT0_AFMT_INFOFRAME_CONTROL0 0x2083
+#define regAFMT0_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT0_AFMT_INTERRUPT_STATUS 0x2084
+#define regAFMT0_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT0_AFMT_AUDIO_SRC_CONTROL 0x2085
+#define regAFMT0_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT0_AFMT_MEM_PWR 0x2087
+#define regAFMT0_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_afmt_afmt_dispdec
+// base address: 0x1595c
+#define regAFMT1_AFMT_ACP 0x2197
+#define regAFMT1_AFMT_ACP_BASE_IDX 2
+#define regAFMT1_AFMT_VBI_PACKET_CONTROL 0x2198
+#define regAFMT1_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_PACKET_CONTROL2 0x2199
+#define regAFMT1_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_INFO0 0x219a
+#define regAFMT1_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_INFO1 0x219b
+#define regAFMT1_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT1_AFMT_60958_0 0x219c
+#define regAFMT1_AFMT_60958_0_BASE_IDX 2
+#define regAFMT1_AFMT_60958_1 0x219d
+#define regAFMT1_AFMT_60958_1_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_CRC_CONTROL 0x219e
+#define regAFMT1_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT1_AFMT_RAMP_CONTROL0 0x219f
+#define regAFMT1_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT1_AFMT_RAMP_CONTROL1 0x21a0
+#define regAFMT1_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT1_AFMT_RAMP_CONTROL2 0x21a1
+#define regAFMT1_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT1_AFMT_RAMP_CONTROL3 0x21a2
+#define regAFMT1_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT1_AFMT_60958_2 0x21a3
+#define regAFMT1_AFMT_60958_2_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_CRC_RESULT 0x21a4
+#define regAFMT1_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT1_AFMT_STATUS 0x21a5
+#define regAFMT1_AFMT_STATUS_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_PACKET_CONTROL 0x21a6
+#define regAFMT1_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT1_AFMT_INFOFRAME_CONTROL0 0x21a7
+#define regAFMT1_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT1_AFMT_INTERRUPT_STATUS 0x21a8
+#define regAFMT1_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT1_AFMT_AUDIO_SRC_CONTROL 0x21a9
+#define regAFMT1_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT1_AFMT_MEM_PWR 0x21ab
+#define regAFMT1_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig2_afmt_afmt_dispdec
+// base address: 0x15dec
+#define regAFMT2_AFMT_ACP 0x22bb
+#define regAFMT2_AFMT_ACP_BASE_IDX 2
+#define regAFMT2_AFMT_VBI_PACKET_CONTROL 0x22bc
+#define regAFMT2_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_PACKET_CONTROL2 0x22bd
+#define regAFMT2_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_INFO0 0x22be
+#define regAFMT2_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_INFO1 0x22bf
+#define regAFMT2_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT2_AFMT_60958_0 0x22c0
+#define regAFMT2_AFMT_60958_0_BASE_IDX 2
+#define regAFMT2_AFMT_60958_1 0x22c1
+#define regAFMT2_AFMT_60958_1_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_CRC_CONTROL 0x22c2
+#define regAFMT2_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT2_AFMT_RAMP_CONTROL0 0x22c3
+#define regAFMT2_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT2_AFMT_RAMP_CONTROL1 0x22c4
+#define regAFMT2_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT2_AFMT_RAMP_CONTROL2 0x22c5
+#define regAFMT2_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT2_AFMT_RAMP_CONTROL3 0x22c6
+#define regAFMT2_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT2_AFMT_60958_2 0x22c7
+#define regAFMT2_AFMT_60958_2_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_CRC_RESULT 0x22c8
+#define regAFMT2_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT2_AFMT_STATUS 0x22c9
+#define regAFMT2_AFMT_STATUS_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_PACKET_CONTROL 0x22ca
+#define regAFMT2_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT2_AFMT_INFOFRAME_CONTROL0 0x22cb
+#define regAFMT2_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT2_AFMT_INTERRUPT_STATUS 0x22cc
+#define regAFMT2_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT2_AFMT_AUDIO_SRC_CONTROL 0x22cd
+#define regAFMT2_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT2_AFMT_MEM_PWR 0x22cf
+#define regAFMT2_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig3_afmt_afmt_dispdec
+// base address: 0x1627c
+#define regAFMT3_AFMT_ACP 0x23df
+#define regAFMT3_AFMT_ACP_BASE_IDX 2
+#define regAFMT3_AFMT_VBI_PACKET_CONTROL 0x23e0
+#define regAFMT3_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_PACKET_CONTROL2 0x23e1
+#define regAFMT3_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_INFO0 0x23e2
+#define regAFMT3_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_INFO1 0x23e3
+#define regAFMT3_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT3_AFMT_60958_0 0x23e4
+#define regAFMT3_AFMT_60958_0_BASE_IDX 2
+#define regAFMT3_AFMT_60958_1 0x23e5
+#define regAFMT3_AFMT_60958_1_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_CRC_CONTROL 0x23e6
+#define regAFMT3_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT3_AFMT_RAMP_CONTROL0 0x23e7
+#define regAFMT3_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT3_AFMT_RAMP_CONTROL1 0x23e8
+#define regAFMT3_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT3_AFMT_RAMP_CONTROL2 0x23e9
+#define regAFMT3_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT3_AFMT_RAMP_CONTROL3 0x23ea
+#define regAFMT3_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT3_AFMT_60958_2 0x23eb
+#define regAFMT3_AFMT_60958_2_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_CRC_RESULT 0x23ec
+#define regAFMT3_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT3_AFMT_STATUS 0x23ed
+#define regAFMT3_AFMT_STATUS_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_PACKET_CONTROL 0x23ee
+#define regAFMT3_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT3_AFMT_INFOFRAME_CONTROL0 0x23ef
+#define regAFMT3_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT3_AFMT_INTERRUPT_STATUS 0x23f0
+#define regAFMT3_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT3_AFMT_AUDIO_SRC_CONTROL 0x23f1
+#define regAFMT3_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT3_AFMT_MEM_PWR 0x23f3
+#define regAFMT3_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig4_afmt_afmt_dispdec
+// base address: 0x1670c
+#define regAFMT4_AFMT_ACP 0x2503
+#define regAFMT4_AFMT_ACP_BASE_IDX 2
+#define regAFMT4_AFMT_VBI_PACKET_CONTROL 0x2504
+#define regAFMT4_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_PACKET_CONTROL2 0x2505
+#define regAFMT4_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_INFO0 0x2506
+#define regAFMT4_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_INFO1 0x2507
+#define regAFMT4_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define regAFMT4_AFMT_60958_0 0x2508
+#define regAFMT4_AFMT_60958_0_BASE_IDX 2
+#define regAFMT4_AFMT_60958_1 0x2509
+#define regAFMT4_AFMT_60958_1_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_CRC_CONTROL 0x250a
+#define regAFMT4_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAFMT4_AFMT_RAMP_CONTROL0 0x250b
+#define regAFMT4_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define regAFMT4_AFMT_RAMP_CONTROL1 0x250c
+#define regAFMT4_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define regAFMT4_AFMT_RAMP_CONTROL2 0x250d
+#define regAFMT4_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define regAFMT4_AFMT_RAMP_CONTROL3 0x250e
+#define regAFMT4_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define regAFMT4_AFMT_60958_2 0x250f
+#define regAFMT4_AFMT_60958_2_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_CRC_RESULT 0x2510
+#define regAFMT4_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAFMT4_AFMT_STATUS 0x2511
+#define regAFMT4_AFMT_STATUS_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_PACKET_CONTROL 0x2512
+#define regAFMT4_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define regAFMT4_AFMT_INFOFRAME_CONTROL0 0x2513
+#define regAFMT4_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define regAFMT4_AFMT_INTERRUPT_STATUS 0x2514
+#define regAFMT4_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define regAFMT4_AFMT_AUDIO_SRC_CONTROL 0x2515
+#define regAFMT4_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define regAFMT4_AFMT_MEM_PWR 0x2517
+#define regAFMT4_AFMT_MEM_PWR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_dme_dme_dispdec
+// base address: 0x15544
+#define regDME0_DME_CONTROL 0x2091
+#define regDME0_DME_CONTROL_BASE_IDX 2
+#define regDME0_DME_MEMORY_CONTROL 0x2092
+#define regDME0_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_vpg_vpg_dispdec
+// base address: 0x154a0
+#define regVPG0_VPG_GENERIC_PACKET_ACCESS_CTRL 0x2068
+#define regVPG0_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG0_VPG_GENERIC_PACKET_DATA 0x2069
+#define regVPG0_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG0_VPG_GSP_FRAME_UPDATE_CTRL 0x206a
+#define regVPG0_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x206b
+#define regVPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG0_VPG_GENERIC_STATUS 0x206c
+#define regVPG0_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG0_VPG_MEM_PWR 0x206d
+#define regVPG0_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG0_VPG_ISRC1_2_ACCESS_CTRL 0x206e
+#define regVPG0_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG0_VPG_ISRC1_2_DATA 0x206f
+#define regVPG0_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG0_VPG_MPEG_INFO0 0x2070
+#define regVPG0_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG0_VPG_MPEG_INFO1 0x2071
+#define regVPG0_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_dme_dme_dispdec
+// base address: 0x159d4
+#define regDME1_DME_CONTROL 0x21b5
+#define regDME1_DME_CONTROL_BASE_IDX 2
+#define regDME1_DME_MEMORY_CONTROL 0x21b6
+#define regDME1_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_vpg_vpg_dispdec
+// base address: 0x15930
+#define regVPG1_VPG_GENERIC_PACKET_ACCESS_CTRL 0x218c
+#define regVPG1_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG1_VPG_GENERIC_PACKET_DATA 0x218d
+#define regVPG1_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG1_VPG_GSP_FRAME_UPDATE_CTRL 0x218e
+#define regVPG1_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x218f
+#define regVPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG1_VPG_GENERIC_STATUS 0x2190
+#define regVPG1_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG1_VPG_MEM_PWR 0x2191
+#define regVPG1_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG1_VPG_ISRC1_2_ACCESS_CTRL 0x2192
+#define regVPG1_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG1_VPG_ISRC1_2_DATA 0x2193
+#define regVPG1_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG1_VPG_MPEG_INFO0 0x2194
+#define regVPG1_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG1_VPG_MPEG_INFO1 0x2195
+#define regVPG1_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig2_dme_dme_dispdec
+// base address: 0x15e64
+#define regDME2_DME_CONTROL 0x22d9
+#define regDME2_DME_CONTROL_BASE_IDX 2
+#define regDME2_DME_MEMORY_CONTROL 0x22da
+#define regDME2_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig2_vpg_vpg_dispdec
+// base address: 0x15dc0
+#define regVPG2_VPG_GENERIC_PACKET_ACCESS_CTRL 0x22b0
+#define regVPG2_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG2_VPG_GENERIC_PACKET_DATA 0x22b1
+#define regVPG2_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG2_VPG_GSP_FRAME_UPDATE_CTRL 0x22b2
+#define regVPG2_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x22b3
+#define regVPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG2_VPG_GENERIC_STATUS 0x22b4
+#define regVPG2_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG2_VPG_MEM_PWR 0x22b5
+#define regVPG2_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG2_VPG_ISRC1_2_ACCESS_CTRL 0x22b6
+#define regVPG2_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG2_VPG_ISRC1_2_DATA 0x22b7
+#define regVPG2_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG2_VPG_MPEG_INFO0 0x22b8
+#define regVPG2_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG2_VPG_MPEG_INFO1 0x22b9
+#define regVPG2_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig3_dme_dme_dispdec
+// base address: 0x162f4
+#define regDME3_DME_CONTROL 0x23fd
+#define regDME3_DME_CONTROL_BASE_IDX 2
+#define regDME3_DME_MEMORY_CONTROL 0x23fe
+#define regDME3_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig3_vpg_vpg_dispdec
+// base address: 0x16250
+#define regVPG3_VPG_GENERIC_PACKET_ACCESS_CTRL 0x23d4
+#define regVPG3_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG3_VPG_GENERIC_PACKET_DATA 0x23d5
+#define regVPG3_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG3_VPG_GSP_FRAME_UPDATE_CTRL 0x23d6
+#define regVPG3_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x23d7
+#define regVPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG3_VPG_GENERIC_STATUS 0x23d8
+#define regVPG3_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG3_VPG_MEM_PWR 0x23d9
+#define regVPG3_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG3_VPG_ISRC1_2_ACCESS_CTRL 0x23da
+#define regVPG3_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG3_VPG_ISRC1_2_DATA 0x23db
+#define regVPG3_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG3_VPG_MPEG_INFO0 0x23dc
+#define regVPG3_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG3_VPG_MPEG_INFO1 0x23dd
+#define regVPG3_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig4_dme_dme_dispdec
+// base address: 0x16784
+#define regDME4_DME_CONTROL 0x2521
+#define regDME4_DME_CONTROL_BASE_IDX 2
+#define regDME4_DME_MEMORY_CONTROL 0x2522
+#define regDME4_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig4_vpg_vpg_dispdec
+// base address: 0x166e0
+#define regVPG4_VPG_GENERIC_PACKET_ACCESS_CTRL 0x24f8
+#define regVPG4_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG4_VPG_GENERIC_PACKET_DATA 0x24f9
+#define regVPG4_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG4_VPG_GSP_FRAME_UPDATE_CTRL 0x24fa
+#define regVPG4_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x24fb
+#define regVPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG4_VPG_GENERIC_STATUS 0x24fc
+#define regVPG4_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG4_VPG_MEM_PWR 0x24fd
+#define regVPG4_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG4_VPG_ISRC1_2_ACCESS_CTRL 0x24fe
+#define regVPG4_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG4_VPG_ISRC1_2_DATA 0x24ff
+#define regVPG4_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG4_VPG_MPEG_INFO0 0x2500
+#define regVPG4_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG4_VPG_MPEG_INFO1 0x2501
+#define regVPG4_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux0_dispdec
+// base address: 0x0
+#define regDP_AUX0_AUX_CONTROL 0x1f50
+#define regDP_AUX0_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_SW_CONTROL 0x1f51
+#define regDP_AUX0_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_ARB_CONTROL 0x1f52
+#define regDP_AUX0_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_INTERRUPT_CONTROL 0x1f53
+#define regDP_AUX0_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_SW_STATUS 0x1f54
+#define regDP_AUX0_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_LS_STATUS 0x1f55
+#define regDP_AUX0_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_SW_DATA 0x1f56
+#define regDP_AUX0_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX0_AUX_LS_DATA 0x1f57
+#define regDP_AUX0_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_TX_REF_CONTROL 0x1f58
+#define regDP_AUX0_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_TX_CONTROL 0x1f59
+#define regDP_AUX0_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_RX_CONTROL0 0x1f5a
+#define regDP_AUX0_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_RX_CONTROL1 0x1f5b
+#define regDP_AUX0_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_TX_STATUS 0x1f5c
+#define regDP_AUX0_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_DPHY_RX_STATUS 0x1f5d
+#define regDP_AUX0_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_GTC_SYNC_CONTROL 0x1f5e
+#define regDP_AUX0_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL 0x1f5f
+#define regDP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1f60
+#define regDP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_GTC_SYNC_STATUS 0x1f61
+#define regDP_AUX0_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX0_AUX_PHY_WAKE_CNTL 0x1f66
+#define regDP_AUX0_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux1_dispdec
+// base address: 0x70
+#define regDP_AUX1_AUX_CONTROL 0x1f6c
+#define regDP_AUX1_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_SW_CONTROL 0x1f6d
+#define regDP_AUX1_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_ARB_CONTROL 0x1f6e
+#define regDP_AUX1_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_INTERRUPT_CONTROL 0x1f6f
+#define regDP_AUX1_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_SW_STATUS 0x1f70
+#define regDP_AUX1_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_LS_STATUS 0x1f71
+#define regDP_AUX1_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_SW_DATA 0x1f72
+#define regDP_AUX1_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX1_AUX_LS_DATA 0x1f73
+#define regDP_AUX1_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_TX_REF_CONTROL 0x1f74
+#define regDP_AUX1_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_TX_CONTROL 0x1f75
+#define regDP_AUX1_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_RX_CONTROL0 0x1f76
+#define regDP_AUX1_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_RX_CONTROL1 0x1f77
+#define regDP_AUX1_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_TX_STATUS 0x1f78
+#define regDP_AUX1_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_DPHY_RX_STATUS 0x1f79
+#define regDP_AUX1_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_GTC_SYNC_CONTROL 0x1f7a
+#define regDP_AUX1_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL 0x1f7b
+#define regDP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1f7c
+#define regDP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_GTC_SYNC_STATUS 0x1f7d
+#define regDP_AUX1_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX1_AUX_PHY_WAKE_CNTL 0x1f82
+#define regDP_AUX1_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux2_dispdec
+// base address: 0xe0
+#define regDP_AUX2_AUX_CONTROL 0x1f88
+#define regDP_AUX2_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_SW_CONTROL 0x1f89
+#define regDP_AUX2_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_ARB_CONTROL 0x1f8a
+#define regDP_AUX2_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_INTERRUPT_CONTROL 0x1f8b
+#define regDP_AUX2_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_SW_STATUS 0x1f8c
+#define regDP_AUX2_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_LS_STATUS 0x1f8d
+#define regDP_AUX2_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_SW_DATA 0x1f8e
+#define regDP_AUX2_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX2_AUX_LS_DATA 0x1f8f
+#define regDP_AUX2_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_TX_REF_CONTROL 0x1f90
+#define regDP_AUX2_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_TX_CONTROL 0x1f91
+#define regDP_AUX2_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_RX_CONTROL0 0x1f92
+#define regDP_AUX2_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_RX_CONTROL1 0x1f93
+#define regDP_AUX2_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_TX_STATUS 0x1f94
+#define regDP_AUX2_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_DPHY_RX_STATUS 0x1f95
+#define regDP_AUX2_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_GTC_SYNC_CONTROL 0x1f96
+#define regDP_AUX2_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL 0x1f97
+#define regDP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1f98
+#define regDP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_GTC_SYNC_STATUS 0x1f99
+#define regDP_AUX2_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX2_AUX_PHY_WAKE_CNTL 0x1f9e
+#define regDP_AUX2_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux3_dispdec
+// base address: 0x150
+#define regDP_AUX3_AUX_CONTROL 0x1fa4
+#define regDP_AUX3_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_SW_CONTROL 0x1fa5
+#define regDP_AUX3_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_ARB_CONTROL 0x1fa6
+#define regDP_AUX3_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_INTERRUPT_CONTROL 0x1fa7
+#define regDP_AUX3_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_SW_STATUS 0x1fa8
+#define regDP_AUX3_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_LS_STATUS 0x1fa9
+#define regDP_AUX3_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_SW_DATA 0x1faa
+#define regDP_AUX3_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX3_AUX_LS_DATA 0x1fab
+#define regDP_AUX3_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_TX_REF_CONTROL 0x1fac
+#define regDP_AUX3_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_TX_CONTROL 0x1fad
+#define regDP_AUX3_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_RX_CONTROL0 0x1fae
+#define regDP_AUX3_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_RX_CONTROL1 0x1faf
+#define regDP_AUX3_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_TX_STATUS 0x1fb0
+#define regDP_AUX3_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_DPHY_RX_STATUS 0x1fb1
+#define regDP_AUX3_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_GTC_SYNC_CONTROL 0x1fb2
+#define regDP_AUX3_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL 0x1fb3
+#define regDP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1fb4
+#define regDP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_GTC_SYNC_STATUS 0x1fb5
+#define regDP_AUX3_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX3_AUX_PHY_WAKE_CNTL 0x1fba
+#define regDP_AUX3_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux4_dispdec
+// base address: 0x1c0
+#define regDP_AUX4_AUX_CONTROL 0x1fc0
+#define regDP_AUX4_AUX_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_SW_CONTROL 0x1fc1
+#define regDP_AUX4_AUX_SW_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_ARB_CONTROL 0x1fc2
+#define regDP_AUX4_AUX_ARB_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_INTERRUPT_CONTROL 0x1fc3
+#define regDP_AUX4_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_SW_STATUS 0x1fc4
+#define regDP_AUX4_AUX_SW_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_LS_STATUS 0x1fc5
+#define regDP_AUX4_AUX_LS_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_SW_DATA 0x1fc6
+#define regDP_AUX4_AUX_SW_DATA_BASE_IDX 2
+#define regDP_AUX4_AUX_LS_DATA 0x1fc7
+#define regDP_AUX4_AUX_LS_DATA_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_TX_REF_CONTROL 0x1fc8
+#define regDP_AUX4_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_TX_CONTROL 0x1fc9
+#define regDP_AUX4_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_RX_CONTROL0 0x1fca
+#define regDP_AUX4_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_RX_CONTROL1 0x1fcb
+#define regDP_AUX4_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_TX_STATUS 0x1fcc
+#define regDP_AUX4_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_DPHY_RX_STATUS 0x1fcd
+#define regDP_AUX4_AUX_DPHY_RX_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_GTC_SYNC_CONTROL 0x1fce
+#define regDP_AUX4_AUX_GTC_SYNC_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL 0x1fcf
+#define regDP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL_BASE_IDX 2
+#define regDP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS 0x1fd0
+#define regDP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_GTC_SYNC_STATUS 0x1fd1
+#define regDP_AUX4_AUX_GTC_SYNC_STATUS_BASE_IDX 2
+#define regDP_AUX4_AUX_PHY_WAKE_CNTL 0x1fd6
+#define regDP_AUX4_AUX_PHY_WAKE_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux0_dispdec
+// base address: 0x14de0
+#define regDIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL 0x1eb8
+#define regDIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux1_dispdec
+// base address: 0x14de4
+#define regDIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL 0x1eb9
+#define regDIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux2_dispdec
+// base address: 0x14de8
+#define regDIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL 0x1eba
+#define regDIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux3_dispdec
+// base address: 0x14dec
+#define regDIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL 0x1ebb
+#define regDIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dout_i2c_dispdec
+// base address: 0x0
+#define regDC_I2C_CONTROL 0x1e98
+#define regDC_I2C_CONTROL_BASE_IDX 2
+#define regDC_I2C_ARBITRATION 0x1e99
+#define regDC_I2C_ARBITRATION_BASE_IDX 2
+#define regDC_I2C_INTERRUPT_CONTROL 0x1e9a
+#define regDC_I2C_INTERRUPT_CONTROL_BASE_IDX 2
+#define regDC_I2C_SW_STATUS 0x1e9b
+#define regDC_I2C_SW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC1_HW_STATUS 0x1e9c
+#define regDC_I2C_DDC1_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC2_HW_STATUS 0x1e9d
+#define regDC_I2C_DDC2_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC3_HW_STATUS 0x1e9e
+#define regDC_I2C_DDC3_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC4_HW_STATUS 0x1e9f
+#define regDC_I2C_DDC4_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC5_HW_STATUS 0x1ea0
+#define regDC_I2C_DDC5_HW_STATUS_BASE_IDX 2
+#define regDC_I2C_DDC1_SPEED 0x1ea2
+#define regDC_I2C_DDC1_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC1_SETUP 0x1ea3
+#define regDC_I2C_DDC1_SETUP_BASE_IDX 2
+#define regDC_I2C_DDC2_SPEED 0x1ea4
+#define regDC_I2C_DDC2_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC2_SETUP 0x1ea5
+#define regDC_I2C_DDC2_SETUP_BASE_IDX 2
+#define regDC_I2C_DDC3_SPEED 0x1ea6
+#define regDC_I2C_DDC3_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC3_SETUP 0x1ea7
+#define regDC_I2C_DDC3_SETUP_BASE_IDX 2
+#define regDC_I2C_DDC4_SPEED 0x1ea8
+#define regDC_I2C_DDC4_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC4_SETUP 0x1ea9
+#define regDC_I2C_DDC4_SETUP_BASE_IDX 2
+#define regDC_I2C_DDC5_SPEED 0x1eaa
+#define regDC_I2C_DDC5_SPEED_BASE_IDX 2
+#define regDC_I2C_DDC5_SETUP 0x1eab
+#define regDC_I2C_DDC5_SETUP_BASE_IDX 2
+#define regDC_I2C_TRANSACTION0 0x1eae
+#define regDC_I2C_TRANSACTION0_BASE_IDX 2
+#define regDC_I2C_TRANSACTION1 0x1eaf
+#define regDC_I2C_TRANSACTION1_BASE_IDX 2
+#define regDC_I2C_TRANSACTION2 0x1eb0
+#define regDC_I2C_TRANSACTION2_BASE_IDX 2
+#define regDC_I2C_TRANSACTION3 0x1eb1
+#define regDC_I2C_TRANSACTION3_BASE_IDX 2
+#define regDC_I2C_DATA 0x1eb2
+#define regDC_I2C_DATA_BASE_IDX 2
+#define regDC_I2C_EDID_DETECT_CTRL 0x1eb6
+#define regDC_I2C_EDID_DETECT_CTRL_BASE_IDX 2
+#define regDC_I2C_READ_REQUEST_INTERRUPT 0x1eb7
+#define regDC_I2C_READ_REQUEST_INTERRUPT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_misc_dispdec
+// base address: 0x0
+#define regDIO_DCN_STATUS 0x1ec3
+#define regDIO_DCN_STATUS_BASE_IDX 2
+#define regDIO_SCRATCH0 0x1eca
+#define regDIO_SCRATCH0_BASE_IDX 2
+#define regDIO_SCRATCH1 0x1ecb
+#define regDIO_SCRATCH1_BASE_IDX 2
+#define regDIO_SCRATCH2 0x1ecc
+#define regDIO_SCRATCH2_BASE_IDX 2
+#define regDIO_SCRATCH3 0x1ecd
+#define regDIO_SCRATCH3_BASE_IDX 2
+#define regDIO_SCRATCH4 0x1ece
+#define regDIO_SCRATCH4_BASE_IDX 2
+#define regDIO_SCRATCH5 0x1ecf
+#define regDIO_SCRATCH5_BASE_IDX 2
+#define regDIO_SCRATCH6 0x1ed0
+#define regDIO_SCRATCH6_BASE_IDX 2
+#define regDIO_SCRATCH7 0x1ed1
+#define regDIO_SCRATCH7_BASE_IDX 2
+#define regDIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS 0x1ed3
+#define regDIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS_BASE_IDX 2
+#define regDIO_MEM_PWR_STATUS 0x1edd
+#define regDIO_MEM_PWR_STATUS_BASE_IDX 2
+#define regDIO_MEM_PWR_CTRL 0x1ede
+#define regDIO_MEM_PWR_CTRL_BASE_IDX 2
+#define regDIO_MEM_PWR_CTRL2 0x1edf
+#define regDIO_MEM_PWR_CTRL2_BASE_IDX 2
+#define regDIO_CLK_CNTL 0x1ee0
+#define regDIO_CLK_CNTL_BASE_IDX 2
+#define regDIO_POWER_MANAGEMENT_CNTL 0x1ee4
+#define regDIO_POWER_MANAGEMENT_CNTL_BASE_IDX 2
+#define regDIO_HDMI_RXSTATUS_TIMER_CONTROL 0x1eff
+#define regDIO_HDMI_RXSTATUS_TIMER_CONTROL_BASE_IDX 2
+#define regDIO_PSP_INTERRUPT_STATUS 0x1f00
+#define regDIO_PSP_INTERRUPT_STATUS_BASE_IDX 2
+#define regDIO_PSP_INTERRUPT_CLEAR 0x1f01
+#define regDIO_PSP_INTERRUPT_CLEAR_BASE_IDX 2
+#define regDIO_STATUS 0x1f02
+#define regDIO_STATUS_BASE_IDX 2
+#define regDIO_LINKA_CNTL 0x1f04
+#define regDIO_LINKA_CNTL_BASE_IDX 2
+#define regDIO_LINKB_CNTL 0x1f05
+#define regDIO_LINKB_CNTL_BASE_IDX 2
+#define regDIO_LINKC_CNTL 0x1f06
+#define regDIO_LINKC_CNTL_BASE_IDX 2
+#define regDIO_LINKD_CNTL 0x1f07
+#define regDIO_LINKD_CNTL_BASE_IDX 2
+#define regDIO_LINKE_CNTL 0x1f08
+#define regDIO_LINKE_CNTL_BASE_IDX 2
+#define regDIO_LINKF_CNTL 0x1f09
+#define regDIO_LINKF_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig_stream_mapper_dispdec
+// base address: 0x0
+#define regDIG0_STREAM_MAPPER_CONTROL 0x1f0d
+#define regDIG0_STREAM_MAPPER_CONTROL_BASE_IDX 2
+#define regDIG1_STREAM_MAPPER_CONTROL 0x1f0e
+#define regDIG1_STREAM_MAPPER_CONTROL_BASE_IDX 2
+#define regDIG2_STREAM_MAPPER_CONTROL 0x1f0f
+#define regDIG2_STREAM_MAPPER_CONTROL_BASE_IDX 2
+#define regDIG3_STREAM_MAPPER_CONTROL 0x1f10
+#define regDIG3_STREAM_MAPPER_CONTROL_BASE_IDX 2
+#define regDIG4_STREAM_MAPPER_CONTROL 0x1f11
+#define regDIG4_STREAM_MAPPER_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_dcperfmon_dc_perfmon_dispdec
+// base address: 0x7d10
+#define regDC_PERFMON18_PERFCOUNTER_CNTL 0x1f44
+#define regDC_PERFMON18_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON18_PERFCOUNTER_CNTL2 0x1f45
+#define regDC_PERFMON18_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON18_PERFCOUNTER_STATE 0x1f46
+#define regDC_PERFMON18_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_CNTL 0x1f47
+#define regDC_PERFMON18_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_CNTL2 0x1f48
+#define regDC_PERFMON18_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_CVALUE_INT_MISC 0x1f49
+#define regDC_PERFMON18_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_CVALUE_LOW 0x1f4a
+#define regDC_PERFMON18_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_HI 0x1f4b
+#define regDC_PERFMON18_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON18_PERFMON_LOW 0x1f4c
+#define regDC_PERFMON18_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_dispdec
+// base address: 0x0
+#define regDC_GENERICA 0x2868
+#define regDC_GENERICA_BASE_IDX 2
+#define regDC_GENERICB 0x2869
+#define regDC_GENERICB_BASE_IDX 2
+#define regDCIO_CLOCK_CNTL 0x286a
+#define regDCIO_CLOCK_CNTL_BASE_IDX 2
+#define regDC_REF_CLK_CNTL 0x286b
+#define regDC_REF_CLK_CNTL_BASE_IDX 2
+#define regUNIPHYA_LINK_CNTL 0x286d
+#define regUNIPHYA_LINK_CNTL_BASE_IDX 2
+#define regUNIPHYA_CHANNEL_XBAR_CNTL 0x286e
+#define regUNIPHYA_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regUNIPHYB_LINK_CNTL 0x286f
+#define regUNIPHYB_LINK_CNTL_BASE_IDX 2
+#define regUNIPHYB_CHANNEL_XBAR_CNTL 0x2870
+#define regUNIPHYB_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regUNIPHYC_LINK_CNTL 0x2871
+#define regUNIPHYC_LINK_CNTL_BASE_IDX 2
+#define regUNIPHYC_CHANNEL_XBAR_CNTL 0x2872
+#define regUNIPHYC_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regUNIPHYD_CHANNEL_XBAR_CNTL 0x2874
+#define regUNIPHYD_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regUNIPHYE_CHANNEL_XBAR_CNTL 0x2876
+#define regUNIPHYE_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define regDCIO_WRCMD_DELAY 0x287e
+#define regDCIO_WRCMD_DELAY_BASE_IDX 2
+#define regDC_PINSTRAPS 0x2880
+#define regDC_PINSTRAPS_BASE_IDX 2
+#define regDCIO_SPARE 0x2882
+#define regDCIO_SPARE_BASE_IDX 2
+#define regINTERCEPT_STATE 0x2884
+#define regINTERCEPT_STATE_BASE_IDX 2
+#define regDCIO_PATTERN_GEN_PAT 0x2886
+#define regDCIO_PATTERN_GEN_PAT_BASE_IDX 2
+#define regDCIO_PATTERN_GEN_EN 0x2887
+#define regDCIO_PATTERN_GEN_EN_BASE_IDX 2
+#define regDCIO_BL_PWM_FRAME_START_DISP_SEL 0x288b
+#define regDCIO_BL_PWM_FRAME_START_DISP_SEL_BASE_IDX 2
+#define regDCIO_GSL_GENLK_PAD_CNTL 0x288c
+#define regDCIO_GSL_GENLK_PAD_CNTL_BASE_IDX 2
+#define regDCIO_GSL_SWAPLOCK_PAD_CNTL 0x288d
+#define regDCIO_GSL_SWAPLOCK_PAD_CNTL_BASE_IDX 2
+#define regDCIO_SOFT_RESET 0x289e
+#define regDCIO_SOFT_RESET_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_chip_dispdec
+// base address: 0x0
+#define regDC_GPIO_GENERIC_MASK 0x28c8
+#define regDC_GPIO_GENERIC_MASK_BASE_IDX 2
+#define regDC_GPIO_GENERIC_A 0x28c9
+#define regDC_GPIO_GENERIC_A_BASE_IDX 2
+#define regDC_GPIO_GENERIC_EN 0x28ca
+#define regDC_GPIO_GENERIC_EN_BASE_IDX 2
+#define regDC_GPIO_GENERIC_Y 0x28cb
+#define regDC_GPIO_GENERIC_Y_BASE_IDX 2
+#define regDC_GPIO_DDC1_MASK 0x28d0
+#define regDC_GPIO_DDC1_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC1_A 0x28d1
+#define regDC_GPIO_DDC1_A_BASE_IDX 2
+#define regDC_GPIO_DDC1_EN 0x28d2
+#define regDC_GPIO_DDC1_EN_BASE_IDX 2
+#define regDC_GPIO_DDC1_Y 0x28d3
+#define regDC_GPIO_DDC1_Y_BASE_IDX 2
+#define regDC_GPIO_DDC2_MASK 0x28d4
+#define regDC_GPIO_DDC2_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC2_A 0x28d5
+#define regDC_GPIO_DDC2_A_BASE_IDX 2
+#define regDC_GPIO_DDC2_EN 0x28d6
+#define regDC_GPIO_DDC2_EN_BASE_IDX 2
+#define regDC_GPIO_DDC2_Y 0x28d7
+#define regDC_GPIO_DDC2_Y_BASE_IDX 2
+#define regDC_GPIO_DDC3_MASK 0x28d8
+#define regDC_GPIO_DDC3_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC3_A 0x28d9
+#define regDC_GPIO_DDC3_A_BASE_IDX 2
+#define regDC_GPIO_DDC3_EN 0x28da
+#define regDC_GPIO_DDC3_EN_BASE_IDX 2
+#define regDC_GPIO_DDC3_Y 0x28db
+#define regDC_GPIO_DDC3_Y_BASE_IDX 2
+#define regDC_GPIO_DDC4_MASK 0x28dc
+#define regDC_GPIO_DDC4_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC4_A 0x28dd
+#define regDC_GPIO_DDC4_A_BASE_IDX 2
+#define regDC_GPIO_DDC4_EN 0x28de
+#define regDC_GPIO_DDC4_EN_BASE_IDX 2
+#define regDC_GPIO_DDC4_Y 0x28df
+#define regDC_GPIO_DDC4_Y_BASE_IDX 2
+#define regDC_GPIO_DDC5_MASK 0x28e0
+#define regDC_GPIO_DDC5_MASK_BASE_IDX 2
+#define regDC_GPIO_DDC5_A 0x28e1
+#define regDC_GPIO_DDC5_A_BASE_IDX 2
+#define regDC_GPIO_DDC5_EN 0x28e2
+#define regDC_GPIO_DDC5_EN_BASE_IDX 2
+#define regDC_GPIO_DDC5_Y 0x28e3
+#define regDC_GPIO_DDC5_Y_BASE_IDX 2
+#define regDC_GPIO_DDCVGA_MASK 0x28e8
+#define regDC_GPIO_DDCVGA_MASK_BASE_IDX 2
+#define regDC_GPIO_DDCVGA_A 0x28e9
+#define regDC_GPIO_DDCVGA_A_BASE_IDX 2
+#define regDC_GPIO_DDCVGA_EN 0x28ea
+#define regDC_GPIO_DDCVGA_EN_BASE_IDX 2
+#define regDC_GPIO_DDCVGA_Y 0x28eb
+#define regDC_GPIO_DDCVGA_Y_BASE_IDX 2
+#define regDC_GPIO_GENLK_MASK 0x28f0
+#define regDC_GPIO_GENLK_MASK_BASE_IDX 2
+#define regDC_GPIO_GENLK_A 0x28f1
+#define regDC_GPIO_GENLK_A_BASE_IDX 2
+#define regDC_GPIO_GENLK_EN 0x28f2
+#define regDC_GPIO_GENLK_EN_BASE_IDX 2
+#define regDC_GPIO_GENLK_Y 0x28f3
+#define regDC_GPIO_GENLK_Y_BASE_IDX 2
+#define regDC_GPIO_HPD_MASK 0x28f4
+#define regDC_GPIO_HPD_MASK_BASE_IDX 2
+#define regDC_GPIO_HPD_A 0x28f5
+#define regDC_GPIO_HPD_A_BASE_IDX 2
+#define regDC_GPIO_HPD_EN 0x28f6
+#define regDC_GPIO_HPD_EN_BASE_IDX 2
+#define regDC_GPIO_HPD_Y 0x28f7
+#define regDC_GPIO_HPD_Y_BASE_IDX 2
+#define regDC_GPIO_DRIVE_STRENGTH_S0 0x28f8
+#define regDC_GPIO_DRIVE_STRENGTH_S0_BASE_IDX 2
+#define regDC_GPIO_DRIVE_STRENGTH_S1 0x28f9
+#define regDC_GPIO_DRIVE_STRENGTH_S1_BASE_IDX 2
+#define regDC_GPIO_PWRSEQ0_EN 0x28fa
+#define regDC_GPIO_PWRSEQ0_EN_BASE_IDX 2
+#define regDC_GPIO_PAD_STRENGTH_1 0x28fc
+#define regDC_GPIO_PAD_STRENGTH_1_BASE_IDX 2
+#define regDC_GPIO_PAD_STRENGTH_2 0x28fd
+#define regDC_GPIO_PAD_STRENGTH_2_BASE_IDX 2
+#define regPHY_AUX_CNTL 0x28ff
+#define regPHY_AUX_CNTL_BASE_IDX 2
+#define regDC_GPIO_DRIVE_TXIMPSEL 0x2900
+#define regDC_GPIO_DRIVE_TXIMPSEL_BASE_IDX 2
+#define regDC_GPIO_PWRSEQ1_EN 0x2902
+#define regDC_GPIO_PWRSEQ1_EN_BASE_IDX 2
+#define regDC_GPIO_TX12_EN 0x2915
+#define regDC_GPIO_TX12_EN_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_0 0x2916
+#define regDC_GPIO_AUX_CTRL_0_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_1 0x2917
+#define regDC_GPIO_AUX_CTRL_1_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_2 0x2918
+#define regDC_GPIO_AUX_CTRL_2_BASE_IDX 2
+#define regDC_GPIO_RXEN 0x2919
+#define regDC_GPIO_RXEN_BASE_IDX 2
+#define regDC_GPIO_PULLUPEN 0x291a
+#define regDC_GPIO_PULLUPEN_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_3 0x291b
+#define regDC_GPIO_AUX_CTRL_3_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_4 0x291c
+#define regDC_GPIO_AUX_CTRL_4_BASE_IDX 2
+#define regDC_GPIO_AUX_CTRL_5 0x291d
+#define regDC_GPIO_AUX_CTRL_5_BASE_IDX 2
+#define regAUXI2C_PAD_ALL_PWR_OK 0x291e
+#define regAUXI2C_PAD_ALL_PWR_OK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy1_dispdec
+// base address: 0x360
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0 0x2a00
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1 0x2a01
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2 0x2a02
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3 0x2a03
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4 0x2a04
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5 0x2a05
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6 0x2a06
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7 0x2a07
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8 0x2a08
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9 0x2a09
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10 0x2a0a
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11 0x2a0b
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12 0x2a0c
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13 0x2a0d
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14 0x2a0e
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15 0x2a0f
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16 0x2a10
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17 0x2a11
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18 0x2a12
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19 0x2a13
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20 0x2a14
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21 0x2a15
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22 0x2a16
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23 0x2a17
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24 0x2a18
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25 0x2a19
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26 0x2a1a
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27 0x2a1b
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28 0x2a1c
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29 0x2a1d
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30 0x2a1e
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31 0x2a1f
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32 0x2a20
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33 0x2a21
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34 0x2a22
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35 0x2a23
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36 0x2a24
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37 0x2a25
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38 0x2a26
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39 0x2a27
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40 0x2a28
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41 0x2a29
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42 0x2a2a
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43 0x2a2b
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44 0x2a2c
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45 0x2a2d
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46 0x2a2e
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47 0x2a2f
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48 0x2a30
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49 0x2a31
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50 0x2a32
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51 0x2a33
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52 0x2a34
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53 0x2a35
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54 0x2a36
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55 0x2a37
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56 0x2a38
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56_BASE_IDX 2
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57 0x2a39
+#define regDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy2_dispdec
+// base address: 0x6c0
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0 0x2ad8
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1 0x2ad9
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2 0x2ada
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3 0x2adb
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4 0x2adc
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5 0x2add
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6 0x2ade
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7 0x2adf
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8 0x2ae0
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9 0x2ae1
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10 0x2ae2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11 0x2ae3
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12 0x2ae4
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13 0x2ae5
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14 0x2ae6
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15 0x2ae7
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16 0x2ae8
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17 0x2ae9
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18 0x2aea
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19 0x2aeb
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20 0x2aec
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21 0x2aed
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22 0x2aee
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23 0x2aef
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24 0x2af0
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25 0x2af1
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26 0x2af2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27 0x2af3
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28 0x2af4
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29 0x2af5
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30 0x2af6
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31 0x2af7
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32 0x2af8
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33 0x2af9
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34 0x2afa
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35 0x2afb
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36 0x2afc
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37 0x2afd
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38 0x2afe
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39 0x2aff
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40 0x2b00
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41 0x2b01
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42 0x2b02
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43 0x2b03
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44 0x2b04
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45 0x2b05
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46 0x2b06
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47 0x2b07
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48 0x2b08
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49 0x2b09
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50 0x2b0a
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51 0x2b0b
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52 0x2b0c
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53 0x2b0d
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54 0x2b0e
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55 0x2b0f
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56 0x2b10
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56_BASE_IDX 2
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57 0x2b11
+#define regDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy3_dispdec
+// base address: 0xa20
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0 0x2bb0
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1 0x2bb1
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2 0x2bb2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3 0x2bb3
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4 0x2bb4
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5 0x2bb5
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6 0x2bb6
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7 0x2bb7
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8 0x2bb8
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9 0x2bb9
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10 0x2bba
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11 0x2bbb
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12 0x2bbc
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13 0x2bbd
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14 0x2bbe
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15 0x2bbf
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16 0x2bc0
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17 0x2bc1
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18 0x2bc2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19 0x2bc3
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20 0x2bc4
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21 0x2bc5
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22 0x2bc6
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23 0x2bc7
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24 0x2bc8
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25 0x2bc9
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26 0x2bca
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27 0x2bcb
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28 0x2bcc
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29 0x2bcd
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30 0x2bce
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31 0x2bcf
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32 0x2bd0
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33 0x2bd1
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34 0x2bd2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35 0x2bd3
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36 0x2bd4
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37 0x2bd5
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38 0x2bd6
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39 0x2bd7
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40 0x2bd8
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41 0x2bd9
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42 0x2bda
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43 0x2bdb
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44 0x2bdc
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45 0x2bdd
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46 0x2bde
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47 0x2bdf
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48 0x2be0
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49 0x2be1
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50 0x2be2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51 0x2be3
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52 0x2be4
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53 0x2be5
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54 0x2be6
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55 0x2be7
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56 0x2be8
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56_BASE_IDX 2
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57 0x2be9
+#define regDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy4_dispdec
+// base address: 0xd80
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0 0x2c88
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1 0x2c89
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2 0x2c8a
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3 0x2c8b
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4 0x2c8c
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5 0x2c8d
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6 0x2c8e
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7 0x2c8f
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8 0x2c90
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9 0x2c91
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10 0x2c92
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11 0x2c93
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12 0x2c94
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13 0x2c95
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14 0x2c96
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15 0x2c97
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16 0x2c98
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17 0x2c99
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18 0x2c9a
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19 0x2c9b
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20 0x2c9c
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21 0x2c9d
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22 0x2c9e
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23 0x2c9f
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24 0x2ca0
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25 0x2ca1
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26 0x2ca2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27 0x2ca3
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28 0x2ca4
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29 0x2ca5
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30 0x2ca6
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31 0x2ca7
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32 0x2ca8
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33 0x2ca9
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34 0x2caa
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35 0x2cab
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36 0x2cac
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37 0x2cad
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38 0x2cae
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39 0x2caf
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40 0x2cb0
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41 0x2cb1
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42 0x2cb2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43 0x2cb3
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44 0x2cb4
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45 0x2cb5
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46 0x2cb6
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47 0x2cb7
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48 0x2cb8
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49 0x2cb9
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50 0x2cba
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51 0x2cbb
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52 0x2cbc
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53 0x2cbd
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54 0x2cbe
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55 0x2cbf
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56 0x2cc0
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56_BASE_IDX 2
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57 0x2cc1
+#define regDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57_BASE_IDX 2
+
+
+// addressBlock: dce_dc_pwrseq0_dispdec_pwrseq_dispdec
+// base address: 0x0
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_EN 0x2f10
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_EN_BASE_IDX 2
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_CTRL 0x2f11
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_CTRL_BASE_IDX 2
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_MASK 0x2f12
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_MASK_BASE_IDX 2
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_A_Y 0x2f13
+#define regPWRSEQ0_DC_GPIO_PWRSEQ_A_Y_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_CNTL 0x2f14
+#define regPWRSEQ0_PANEL_PWRSEQ_CNTL_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_STATE 0x2f15
+#define regPWRSEQ0_PANEL_PWRSEQ_STATE_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_DELAY1 0x2f16
+#define regPWRSEQ0_PANEL_PWRSEQ_DELAY1_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_DELAY2 0x2f17
+#define regPWRSEQ0_PANEL_PWRSEQ_DELAY2_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_REF_DIV1 0x2f18
+#define regPWRSEQ0_PANEL_PWRSEQ_REF_DIV1_BASE_IDX 2
+#define regPWRSEQ0_BL_PWM_CNTL 0x2f19
+#define regPWRSEQ0_BL_PWM_CNTL_BASE_IDX 2
+#define regPWRSEQ0_BL_PWM_CNTL2 0x2f1a
+#define regPWRSEQ0_BL_PWM_CNTL2_BASE_IDX 2
+#define regPWRSEQ0_BL_PWM_PERIOD_CNTL 0x2f1b
+#define regPWRSEQ0_BL_PWM_PERIOD_CNTL_BASE_IDX 2
+#define regPWRSEQ0_BL_PWM_GRP1_REG_LOCK 0x2f1c
+#define regPWRSEQ0_BL_PWM_GRP1_REG_LOCK_BASE_IDX 2
+#define regPWRSEQ0_PANEL_PWRSEQ_REF_DIV2 0x2f1d
+#define regPWRSEQ0_PANEL_PWRSEQ_REF_DIV2_BASE_IDX 2
+#define regPWRSEQ0_PWRSEQ_SPARE 0x2f21
+#define regPWRSEQ0_PWRSEQ_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_pwrseq1_dispdec_pwrseq_dispdec
+// base address: 0x1b0
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_EN 0x2f7c
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_EN_BASE_IDX 2
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_CTRL 0x2f7d
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_CTRL_BASE_IDX 2
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_MASK 0x2f7e
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_MASK_BASE_IDX 2
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_A_Y 0x2f7f
+#define regPWRSEQ1_DC_GPIO_PWRSEQ_A_Y_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_CNTL 0x2f80
+#define regPWRSEQ1_PANEL_PWRSEQ_CNTL_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_STATE 0x2f81
+#define regPWRSEQ1_PANEL_PWRSEQ_STATE_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_DELAY1 0x2f82
+#define regPWRSEQ1_PANEL_PWRSEQ_DELAY1_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_DELAY2 0x2f83
+#define regPWRSEQ1_PANEL_PWRSEQ_DELAY2_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_REF_DIV1 0x2f84
+#define regPWRSEQ1_PANEL_PWRSEQ_REF_DIV1_BASE_IDX 2
+#define regPWRSEQ1_BL_PWM_CNTL 0x2f85
+#define regPWRSEQ1_BL_PWM_CNTL_BASE_IDX 2
+#define regPWRSEQ1_BL_PWM_CNTL2 0x2f86
+#define regPWRSEQ1_BL_PWM_CNTL2_BASE_IDX 2
+#define regPWRSEQ1_BL_PWM_PERIOD_CNTL 0x2f87
+#define regPWRSEQ1_BL_PWM_PERIOD_CNTL_BASE_IDX 2
+#define regPWRSEQ1_BL_PWM_GRP1_REG_LOCK 0x2f88
+#define regPWRSEQ1_BL_PWM_GRP1_REG_LOCK_BASE_IDX 2
+#define regPWRSEQ1_PANEL_PWRSEQ_REF_DIV2 0x2f89
+#define regPWRSEQ1_PANEL_PWRSEQ_REF_DIV2_BASE_IDX 2
+#define regPWRSEQ1_PWRSEQ_SPARE 0x2f8d
+#define regPWRSEQ1_PWRSEQ_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dscc_dispdec
+// base address: 0x0
+#define regDSCC0_DSCC_CONFIG0 0x300a
+#define regDSCC0_DSCC_CONFIG0_BASE_IDX 2
+#define regDSCC0_DSCC_CONFIG1 0x300b
+#define regDSCC0_DSCC_CONFIG1_BASE_IDX 2
+#define regDSCC0_DSCC_STATUS 0x300c
+#define regDSCC0_DSCC_STATUS_BASE_IDX 2
+#define regDSCC0_DSCC_INTERRUPT_CONTROL_STATUS 0x300d
+#define regDSCC0_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG0 0x300e
+#define regDSCC0_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG1 0x300f
+#define regDSCC0_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG2 0x3010
+#define regDSCC0_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG3 0x3011
+#define regDSCC0_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG4 0x3012
+#define regDSCC0_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG5 0x3013
+#define regDSCC0_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG6 0x3014
+#define regDSCC0_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG7 0x3015
+#define regDSCC0_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG8 0x3016
+#define regDSCC0_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG9 0x3017
+#define regDSCC0_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG10 0x3018
+#define regDSCC0_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG11 0x3019
+#define regDSCC0_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG12 0x301a
+#define regDSCC0_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG13 0x301b
+#define regDSCC0_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG14 0x301c
+#define regDSCC0_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG15 0x301d
+#define regDSCC0_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG16 0x301e
+#define regDSCC0_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG17 0x301f
+#define regDSCC0_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG18 0x3020
+#define regDSCC0_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG19 0x3021
+#define regDSCC0_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG20 0x3022
+#define regDSCC0_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG21 0x3023
+#define regDSCC0_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define regDSCC0_DSCC_PPS_CONFIG22 0x3024
+#define regDSCC0_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define regDSCC0_DSCC_MEM_POWER_CONTROL 0x3025
+#define regDSCC0_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER 0x3026
+#define regDSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER 0x3027
+#define regDSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER 0x3028
+#define regDSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER 0x3029
+#define regDSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER 0x302a
+#define regDSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER 0x302b
+#define regDSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC0_DSCC_MAX_ABS_ERROR0 0x302c
+#define regDSCC0_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define regDSCC0_DSCC_MAX_ABS_ERROR1 0x302d
+#define regDSCC0_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x302e
+#define regDSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x302f
+#define regDSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x3030
+#define regDSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x3031
+#define regDSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x3032
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x3033
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x3034
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035
+#define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
+// base address: 0x0
+#define regDSCCIF0_DSCCIF_CONFIG0 0x3005
+#define regDSCCIF0_DSCCIF_CONFIG0_BASE_IDX 2
+#define regDSCCIF0_DSCCIF_CONFIG1 0x3006
+#define regDSCCIF0_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_top_dispdec
+// base address: 0x0
+#define regDSC_TOP0_DSC_TOP_CONTROL 0x3000
+#define regDSC_TOP0_DSC_TOP_CONTROL_BASE_IDX 2
+#define regDSC_TOP0_DSC_DEBUG_CONTROL 0x3001
+#define regDSC_TOP0_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc140
+#define regDC_PERFMON19_PERFCOUNTER_CNTL 0x3050
+#define regDC_PERFMON19_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON19_PERFCOUNTER_CNTL2 0x3051
+#define regDC_PERFMON19_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON19_PERFCOUNTER_STATE 0x3052
+#define regDC_PERFMON19_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_CNTL 0x3053
+#define regDC_PERFMON19_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_CNTL2 0x3054
+#define regDC_PERFMON19_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_CVALUE_INT_MISC 0x3055
+#define regDC_PERFMON19_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_CVALUE_LOW 0x3056
+#define regDC_PERFMON19_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_HI 0x3057
+#define regDC_PERFMON19_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON19_PERFMON_LOW 0x3058
+#define regDC_PERFMON19_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dscc_dispdec
+// base address: 0x170
+#define regDSCC1_DSCC_CONFIG0 0x3066
+#define regDSCC1_DSCC_CONFIG0_BASE_IDX 2
+#define regDSCC1_DSCC_CONFIG1 0x3067
+#define regDSCC1_DSCC_CONFIG1_BASE_IDX 2
+#define regDSCC1_DSCC_STATUS 0x3068
+#define regDSCC1_DSCC_STATUS_BASE_IDX 2
+#define regDSCC1_DSCC_INTERRUPT_CONTROL_STATUS 0x3069
+#define regDSCC1_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG0 0x306a
+#define regDSCC1_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG1 0x306b
+#define regDSCC1_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG2 0x306c
+#define regDSCC1_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG3 0x306d
+#define regDSCC1_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG4 0x306e
+#define regDSCC1_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG5 0x306f
+#define regDSCC1_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG6 0x3070
+#define regDSCC1_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG7 0x3071
+#define regDSCC1_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG8 0x3072
+#define regDSCC1_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG9 0x3073
+#define regDSCC1_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG10 0x3074
+#define regDSCC1_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG11 0x3075
+#define regDSCC1_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG12 0x3076
+#define regDSCC1_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG13 0x3077
+#define regDSCC1_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG14 0x3078
+#define regDSCC1_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG15 0x3079
+#define regDSCC1_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG16 0x307a
+#define regDSCC1_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG17 0x307b
+#define regDSCC1_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG18 0x307c
+#define regDSCC1_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG19 0x307d
+#define regDSCC1_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG20 0x307e
+#define regDSCC1_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG21 0x307f
+#define regDSCC1_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define regDSCC1_DSCC_PPS_CONFIG22 0x3080
+#define regDSCC1_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define regDSCC1_DSCC_MEM_POWER_CONTROL 0x3081
+#define regDSCC1_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER 0x3082
+#define regDSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER 0x3083
+#define regDSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER 0x3084
+#define regDSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER 0x3085
+#define regDSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER 0x3086
+#define regDSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER 0x3087
+#define regDSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC1_DSCC_MAX_ABS_ERROR0 0x3088
+#define regDSCC1_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define regDSCC1_DSCC_MAX_ABS_ERROR1 0x3089
+#define regDSCC1_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x308a
+#define regDSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x308b
+#define regDSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x308c
+#define regDSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x308d
+#define regDSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x308e
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x308f
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x3090
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091
+#define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsccif_dispdec
+// base address: 0x170
+#define regDSCCIF1_DSCCIF_CONFIG0 0x3061
+#define regDSCCIF1_DSCCIF_CONFIG0_BASE_IDX 2
+#define regDSCCIF1_DSCCIF_CONFIG1 0x3062
+#define regDSCCIF1_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_top_dispdec
+// base address: 0x170
+#define regDSC_TOP1_DSC_TOP_CONTROL 0x305c
+#define regDSC_TOP1_DSC_TOP_CONTROL_BASE_IDX 2
+#define regDSC_TOP1_DSC_DEBUG_CONTROL 0x305d
+#define regDSC_TOP1_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc2b0
+#define regDC_PERFMON20_PERFCOUNTER_CNTL 0x30ac
+#define regDC_PERFMON20_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON20_PERFCOUNTER_CNTL2 0x30ad
+#define regDC_PERFMON20_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON20_PERFCOUNTER_STATE 0x30ae
+#define regDC_PERFMON20_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_CNTL 0x30af
+#define regDC_PERFMON20_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_CNTL2 0x30b0
+#define regDC_PERFMON20_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_CVALUE_INT_MISC 0x30b1
+#define regDC_PERFMON20_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_CVALUE_LOW 0x30b2
+#define regDC_PERFMON20_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_HI 0x30b3
+#define regDC_PERFMON20_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON20_PERFMON_LOW 0x30b4
+#define regDC_PERFMON20_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dscc_dispdec
+// base address: 0x2e0
+#define regDSCC2_DSCC_CONFIG0 0x30c2
+#define regDSCC2_DSCC_CONFIG0_BASE_IDX 2
+#define regDSCC2_DSCC_CONFIG1 0x30c3
+#define regDSCC2_DSCC_CONFIG1_BASE_IDX 2
+#define regDSCC2_DSCC_STATUS 0x30c4
+#define regDSCC2_DSCC_STATUS_BASE_IDX 2
+#define regDSCC2_DSCC_INTERRUPT_CONTROL_STATUS 0x30c5
+#define regDSCC2_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG0 0x30c6
+#define regDSCC2_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG1 0x30c7
+#define regDSCC2_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG2 0x30c8
+#define regDSCC2_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG3 0x30c9
+#define regDSCC2_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG4 0x30ca
+#define regDSCC2_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG5 0x30cb
+#define regDSCC2_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG6 0x30cc
+#define regDSCC2_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG7 0x30cd
+#define regDSCC2_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG8 0x30ce
+#define regDSCC2_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG9 0x30cf
+#define regDSCC2_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG10 0x30d0
+#define regDSCC2_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG11 0x30d1
+#define regDSCC2_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG12 0x30d2
+#define regDSCC2_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG13 0x30d3
+#define regDSCC2_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG14 0x30d4
+#define regDSCC2_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG15 0x30d5
+#define regDSCC2_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG16 0x30d6
+#define regDSCC2_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG17 0x30d7
+#define regDSCC2_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG18 0x30d8
+#define regDSCC2_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG19 0x30d9
+#define regDSCC2_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG20 0x30da
+#define regDSCC2_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG21 0x30db
+#define regDSCC2_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define regDSCC2_DSCC_PPS_CONFIG22 0x30dc
+#define regDSCC2_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define regDSCC2_DSCC_MEM_POWER_CONTROL 0x30dd
+#define regDSCC2_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER 0x30de
+#define regDSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER 0x30df
+#define regDSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER 0x30e0
+#define regDSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER 0x30e1
+#define regDSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER 0x30e2
+#define regDSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER 0x30e3
+#define regDSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC2_DSCC_MAX_ABS_ERROR0 0x30e4
+#define regDSCC2_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define regDSCC2_DSCC_MAX_ABS_ERROR1 0x30e5
+#define regDSCC2_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x30e6
+#define regDSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x30e7
+#define regDSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x30e8
+#define regDSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x30e9
+#define regDSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x30ea
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x30eb
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x30ec
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed
+#define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsccif_dispdec
+// base address: 0x2e0
+#define regDSCCIF2_DSCCIF_CONFIG0 0x30bd
+#define regDSCCIF2_DSCCIF_CONFIG0_BASE_IDX 2
+#define regDSCCIF2_DSCCIF_CONFIG1 0x30be
+#define regDSCCIF2_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_top_dispdec
+// base address: 0x2e0
+#define regDSC_TOP2_DSC_TOP_CONTROL 0x30b8
+#define regDSC_TOP2_DSC_TOP_CONTROL_BASE_IDX 2
+#define regDSC_TOP2_DSC_DEBUG_CONTROL 0x30b9
+#define regDSC_TOP2_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc420
+#define regDC_PERFMON21_PERFCOUNTER_CNTL 0x3108
+#define regDC_PERFMON21_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON21_PERFCOUNTER_CNTL2 0x3109
+#define regDC_PERFMON21_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON21_PERFCOUNTER_STATE 0x310a
+#define regDC_PERFMON21_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_CNTL 0x310b
+#define regDC_PERFMON21_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_CNTL2 0x310c
+#define regDC_PERFMON21_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_CVALUE_INT_MISC 0x310d
+#define regDC_PERFMON21_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_CVALUE_LOW 0x310e
+#define regDC_PERFMON21_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_HI 0x310f
+#define regDC_PERFMON21_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON21_PERFMON_LOW 0x3110
+#define regDC_PERFMON21_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dscc_dispdec
+// base address: 0x450
+#define regDSCC3_DSCC_CONFIG0 0x311e
+#define regDSCC3_DSCC_CONFIG0_BASE_IDX 2
+#define regDSCC3_DSCC_CONFIG1 0x311f
+#define regDSCC3_DSCC_CONFIG1_BASE_IDX 2
+#define regDSCC3_DSCC_STATUS 0x3120
+#define regDSCC3_DSCC_STATUS_BASE_IDX 2
+#define regDSCC3_DSCC_INTERRUPT_CONTROL_STATUS 0x3121
+#define regDSCC3_DSCC_INTERRUPT_CONTROL_STATUS_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG0 0x3122
+#define regDSCC3_DSCC_PPS_CONFIG0_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG1 0x3123
+#define regDSCC3_DSCC_PPS_CONFIG1_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG2 0x3124
+#define regDSCC3_DSCC_PPS_CONFIG2_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG3 0x3125
+#define regDSCC3_DSCC_PPS_CONFIG3_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG4 0x3126
+#define regDSCC3_DSCC_PPS_CONFIG4_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG5 0x3127
+#define regDSCC3_DSCC_PPS_CONFIG5_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG6 0x3128
+#define regDSCC3_DSCC_PPS_CONFIG6_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG7 0x3129
+#define regDSCC3_DSCC_PPS_CONFIG7_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG8 0x312a
+#define regDSCC3_DSCC_PPS_CONFIG8_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG9 0x312b
+#define regDSCC3_DSCC_PPS_CONFIG9_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG10 0x312c
+#define regDSCC3_DSCC_PPS_CONFIG10_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG11 0x312d
+#define regDSCC3_DSCC_PPS_CONFIG11_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG12 0x312e
+#define regDSCC3_DSCC_PPS_CONFIG12_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG13 0x312f
+#define regDSCC3_DSCC_PPS_CONFIG13_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG14 0x3130
+#define regDSCC3_DSCC_PPS_CONFIG14_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG15 0x3131
+#define regDSCC3_DSCC_PPS_CONFIG15_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG16 0x3132
+#define regDSCC3_DSCC_PPS_CONFIG16_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG17 0x3133
+#define regDSCC3_DSCC_PPS_CONFIG17_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG18 0x3134
+#define regDSCC3_DSCC_PPS_CONFIG18_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG19 0x3135
+#define regDSCC3_DSCC_PPS_CONFIG19_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG20 0x3136
+#define regDSCC3_DSCC_PPS_CONFIG20_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG21 0x3137
+#define regDSCC3_DSCC_PPS_CONFIG21_BASE_IDX 2
+#define regDSCC3_DSCC_PPS_CONFIG22 0x3138
+#define regDSCC3_DSCC_PPS_CONFIG22_BASE_IDX 2
+#define regDSCC3_DSCC_MEM_POWER_CONTROL 0x3139
+#define regDSCC3_DSCC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER 0x313a
+#define regDSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER 0x313b
+#define regDSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER 0x313c
+#define regDSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER 0x313d
+#define regDSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER 0x313e
+#define regDSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER_BASE_IDX 2
+#define regDSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER 0x313f
+#define regDSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER_BASE_IDX 2
+#define regDSCC3_DSCC_MAX_ABS_ERROR0 0x3140
+#define regDSCC3_DSCC_MAX_ABS_ERROR0_BASE_IDX 2
+#define regDSCC3_DSCC_MAX_ABS_ERROR1 0x3141
+#define regDSCC3_DSCC_MAX_ABS_ERROR1_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL 0x3142
+#define regDSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL 0x3143
+#define regDSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL 0x3144
+#define regDSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL 0x3145
+#define regDSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL 0x3146
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL 0x3147
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL 0x3148
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149
+#define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsccif_dispdec
+// base address: 0x450
+#define regDSCCIF3_DSCCIF_CONFIG0 0x3119
+#define regDSCCIF3_DSCCIF_CONFIG0_BASE_IDX 2
+#define regDSCCIF3_DSCCIF_CONFIG1 0x311a
+#define regDSCCIF3_DSCCIF_CONFIG1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_top_dispdec
+// base address: 0x450
+#define regDSC_TOP3_DSC_TOP_CONTROL 0x3114
+#define regDSC_TOP3_DSC_TOP_CONTROL_BASE_IDX 2
+#define regDSC_TOP3_DSC_DEBUG_CONTROL 0x3115
+#define regDSC_TOP3_DSC_DEBUG_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+// base address: 0xc590
+#define regDC_PERFMON22_PERFCOUNTER_CNTL 0x3164
+#define regDC_PERFMON22_PERFCOUNTER_CNTL_BASE_IDX 2
+#define regDC_PERFMON22_PERFCOUNTER_CNTL2 0x3165
+#define regDC_PERFMON22_PERFCOUNTER_CNTL2_BASE_IDX 2
+#define regDC_PERFMON22_PERFCOUNTER_STATE 0x3166
+#define regDC_PERFMON22_PERFCOUNTER_STATE_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_CNTL 0x3167
+#define regDC_PERFMON22_PERFMON_CNTL_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_CNTL2 0x3168
+#define regDC_PERFMON22_PERFMON_CNTL2_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_CVALUE_INT_MISC 0x3169
+#define regDC_PERFMON22_PERFMON_CVALUE_INT_MISC_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_CVALUE_LOW 0x316a
+#define regDC_PERFMON22_PERFMON_CVALUE_LOW_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_HI 0x316b
+#define regDC_PERFMON22_PERFMON_HI_BASE_IDX 2
+#define regDC_PERFMON22_PERFMON_LOW 0x316c
+#define regDC_PERFMON22_PERFMON_LOW_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_hpo_top_dispdec
+// base address: 0x2790c
+#define regHPO_TOP_CLOCK_CONTROL 0x0e43
+#define regHPO_TOP_CLOCK_CONTROL_BASE_IDX 3
+#define regHPO_TOP_HW_CONTROL 0x0e4a
+#define regHPO_TOP_HW_CONTROL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_dp_stream_mapper_dispdec
+// base address: 0x27958
+#define regDP_STREAM_MAPPER_CONTROL0 0x0e56
+#define regDP_STREAM_MAPPER_CONTROL0_BASE_IDX 3
+#define regDP_STREAM_MAPPER_CONTROL1 0x0e57
+#define regDP_STREAM_MAPPER_CONTROL1_BASE_IDX 3
+#define regDP_STREAM_MAPPER_CONTROL2 0x0e58
+#define regDP_STREAM_MAPPER_CONTROL2_BASE_IDX 3
+#define regDP_STREAM_MAPPER_CONTROL3 0x0e59
+#define regDP_STREAM_MAPPER_CONTROL3_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hpo_dcperfmon_dc_perfmon_dispdec
+// base address: 0x1a698
+#define regDC_PERFMON23_PERFCOUNTER_CNTL 0x0e66
+#define regDC_PERFMON23_PERFCOUNTER_CNTL_BASE_IDX 3
+#define regDC_PERFMON23_PERFCOUNTER_CNTL2 0x0e67
+#define regDC_PERFMON23_PERFCOUNTER_CNTL2_BASE_IDX 3
+#define regDC_PERFMON23_PERFCOUNTER_STATE 0x0e68
+#define regDC_PERFMON23_PERFCOUNTER_STATE_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_CNTL 0x0e69
+#define regDC_PERFMON23_PERFMON_CNTL_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_CNTL2 0x0e6a
+#define regDC_PERFMON23_PERFMON_CNTL2_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_CVALUE_INT_MISC 0x0e6b
+#define regDC_PERFMON23_PERFMON_CVALUE_INT_MISC_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_CVALUE_LOW 0x0e6c
+#define regDC_PERFMON23_PERFMON_CVALUE_LOW_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_HI 0x0e6d
+#define regDC_PERFMON23_PERFMON_HI_BASE_IDX 3
+#define regDC_PERFMON23_PERFMON_LOW 0x0e6e
+#define regDC_PERFMON23_PERFMON_LOW_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_link_enc0_dispdec
+// base address: 0x2656c
+#define regHDMI_LINK_ENC_CONTROL 0x095b
+#define regHDMI_LINK_ENC_CONTROL_BASE_IDX 3
+#define regHDMI_LINK_ENC_CLK_CTRL 0x095c
+#define regHDMI_LINK_ENC_CLK_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_frl_enc0_dispdec
+// base address: 0x26594
+#define regHDMI_FRL_ENC_CONFIG 0x0965
+#define regHDMI_FRL_ENC_CONFIG_BASE_IDX 3
+#define regHDMI_FRL_ENC_CONFIG2 0x0966
+#define regHDMI_FRL_ENC_CONFIG2_BASE_IDX 3
+#define regHDMI_FRL_ENC_METER_BUFFER_STATUS 0x0967
+#define regHDMI_FRL_ENC_METER_BUFFER_STATUS_BASE_IDX 3
+#define regHDMI_FRL_ENC_MEM_CTRL 0x0968
+#define regHDMI_FRL_ENC_MEM_CTRL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_dispdec
+// base address: 0x2634c
+#define regHDMI_STREAM_ENC_CLOCK_CONTROL 0x08d3
+#define regHDMI_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 3
+#define regHDMI_STREAM_ENC_INPUT_MUX_CONTROL 0x08d5
+#define regHDMI_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 3
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x08d6
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 3
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x08d7
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 3
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2 0x08d8
+#define regHDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_afmt_afmt_dispdec
+// base address: 0x2646c
+#define regAFMT5_AFMT_ACP 0x091b
+#define regAFMT5_AFMT_ACP_BASE_IDX 3
+#define regAFMT5_AFMT_VBI_PACKET_CONTROL 0x091c
+#define regAFMT5_AFMT_VBI_PACKET_CONTROL_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_PACKET_CONTROL2 0x091d
+#define regAFMT5_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_INFO0 0x091e
+#define regAFMT5_AFMT_AUDIO_INFO0_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_INFO1 0x091f
+#define regAFMT5_AFMT_AUDIO_INFO1_BASE_IDX 3
+#define regAFMT5_AFMT_60958_0 0x0920
+#define regAFMT5_AFMT_60958_0_BASE_IDX 3
+#define regAFMT5_AFMT_60958_1 0x0921
+#define regAFMT5_AFMT_60958_1_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_CRC_CONTROL 0x0922
+#define regAFMT5_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 3
+#define regAFMT5_AFMT_RAMP_CONTROL0 0x0923
+#define regAFMT5_AFMT_RAMP_CONTROL0_BASE_IDX 3
+#define regAFMT5_AFMT_RAMP_CONTROL1 0x0924
+#define regAFMT5_AFMT_RAMP_CONTROL1_BASE_IDX 3
+#define regAFMT5_AFMT_RAMP_CONTROL2 0x0925
+#define regAFMT5_AFMT_RAMP_CONTROL2_BASE_IDX 3
+#define regAFMT5_AFMT_RAMP_CONTROL3 0x0926
+#define regAFMT5_AFMT_RAMP_CONTROL3_BASE_IDX 3
+#define regAFMT5_AFMT_60958_2 0x0927
+#define regAFMT5_AFMT_60958_2_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_CRC_RESULT 0x0928
+#define regAFMT5_AFMT_AUDIO_CRC_RESULT_BASE_IDX 3
+#define regAFMT5_AFMT_STATUS 0x0929
+#define regAFMT5_AFMT_STATUS_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_PACKET_CONTROL 0x092a
+#define regAFMT5_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 3
+#define regAFMT5_AFMT_INFOFRAME_CONTROL0 0x092b
+#define regAFMT5_AFMT_INFOFRAME_CONTROL0_BASE_IDX 3
+#define regAFMT5_AFMT_INTERRUPT_STATUS 0x092c
+#define regAFMT5_AFMT_INTERRUPT_STATUS_BASE_IDX 3
+#define regAFMT5_AFMT_AUDIO_SRC_CONTROL 0x092d
+#define regAFMT5_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 3
+#define regAFMT5_AFMT_MEM_PWR 0x092f
+#define regAFMT5_AFMT_MEM_PWR_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_dme_dme_dispdec
+// base address: 0x264f0
+#define regDME5_DME_CONTROL 0x093c
+#define regDME5_DME_CONTROL_BASE_IDX 3
+#define regDME5_DME_MEMORY_CONTROL 0x093d
+#define regDME5_DME_MEMORY_CONTROL_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_vpg_vpg_dispdec
+// base address: 0x264c4
+#define regVPG5_VPG_GENERIC_PACKET_ACCESS_CTRL 0x0931
+#define regVPG5_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 3
+#define regVPG5_VPG_GENERIC_PACKET_DATA 0x0932
+#define regVPG5_VPG_GENERIC_PACKET_DATA_BASE_IDX 3
+#define regVPG5_VPG_GSP_FRAME_UPDATE_CTRL 0x0933
+#define regVPG5_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 3
+#define regVPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x0934
+#define regVPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 3
+#define regVPG5_VPG_GENERIC_STATUS 0x0935
+#define regVPG5_VPG_GENERIC_STATUS_BASE_IDX 3
+#define regVPG5_VPG_MEM_PWR 0x0936
+#define regVPG5_VPG_MEM_PWR_BASE_IDX 3
+#define regVPG5_VPG_ISRC1_2_ACCESS_CTRL 0x0937
+#define regVPG5_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 3
+#define regVPG5_VPG_ISRC1_2_DATA 0x0938
+#define regVPG5_VPG_ISRC1_2_DATA_BASE_IDX 3
+#define regVPG5_VPG_MPEG_INFO0 0x0939
+#define regVPG5_VPG_MPEG_INFO0_BASE_IDX 3
+#define regVPG5_VPG_MPEG_INFO1 0x093a
+#define regVPG5_VPG_MPEG_INFO1_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_hdmi_tb_enc0_dispdec
+// base address: 0x2637c
+#define regHDMI_TB_ENC_CONTROL 0x08df
+#define regHDMI_TB_ENC_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_PIXEL_FORMAT 0x08e0
+#define regHDMI_TB_ENC_PIXEL_FORMAT_BASE_IDX 3
+#define regHDMI_TB_ENC_PACKET_CONTROL 0x08e1
+#define regHDMI_TB_ENC_PACKET_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_PACKET_CONTROL 0x08e2
+#define regHDMI_TB_ENC_ACR_PACKET_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_VBI_PACKET_CONTROL1 0x08e3
+#define regHDMI_TB_ENC_VBI_PACKET_CONTROL1_BASE_IDX 3
+#define regHDMI_TB_ENC_VBI_PACKET_CONTROL2 0x08e4
+#define regHDMI_TB_ENC_VBI_PACKET_CONTROL2_BASE_IDX 3
+#define regHDMI_TB_ENC_GC_CONTROL 0x08e5
+#define regHDMI_TB_ENC_GC_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL0 0x08e6
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL0_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL1 0x08e7
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL1_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL2 0x08e8
+#define regHDMI_TB_ENC_GENERIC_PACKET_CONTROL2_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET0_1_LINE 0x08e9
+#define regHDMI_TB_ENC_GENERIC_PACKET0_1_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET2_3_LINE 0x08ea
+#define regHDMI_TB_ENC_GENERIC_PACKET2_3_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET4_5_LINE 0x08eb
+#define regHDMI_TB_ENC_GENERIC_PACKET4_5_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET6_7_LINE 0x08ec
+#define regHDMI_TB_ENC_GENERIC_PACKET6_7_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET8_9_LINE 0x08ed
+#define regHDMI_TB_ENC_GENERIC_PACKET8_9_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET10_11_LINE 0x08ee
+#define regHDMI_TB_ENC_GENERIC_PACKET10_11_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET12_13_LINE 0x08ef
+#define regHDMI_TB_ENC_GENERIC_PACKET12_13_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_GENERIC_PACKET14_LINE 0x08f0
+#define regHDMI_TB_ENC_GENERIC_PACKET14_LINE_BASE_IDX 3
+#define regHDMI_TB_ENC_DB_CONTROL 0x08f1
+#define regHDMI_TB_ENC_DB_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_32_0 0x08f2
+#define regHDMI_TB_ENC_ACR_32_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_32_1 0x08f3
+#define regHDMI_TB_ENC_ACR_32_1_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_44_0 0x08f4
+#define regHDMI_TB_ENC_ACR_44_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_44_1 0x08f5
+#define regHDMI_TB_ENC_ACR_44_1_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_48_0 0x08f6
+#define regHDMI_TB_ENC_ACR_48_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_48_1 0x08f7
+#define regHDMI_TB_ENC_ACR_48_1_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_STATUS_0 0x08f8
+#define regHDMI_TB_ENC_ACR_STATUS_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ACR_STATUS_1 0x08f9
+#define regHDMI_TB_ENC_ACR_STATUS_1_BASE_IDX 3
+#define regHDMI_TB_ENC_BUFFER_CONTROL 0x08fb
+#define regHDMI_TB_ENC_BUFFER_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_MEM_CTRL 0x08fe
+#define regHDMI_TB_ENC_MEM_CTRL_BASE_IDX 3
+#define regHDMI_TB_ENC_METADATA_PACKET_CONTROL 0x08ff
+#define regHDMI_TB_ENC_METADATA_PACKET_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_H_ACTIVE_BLANK 0x0900
+#define regHDMI_TB_ENC_H_ACTIVE_BLANK_BASE_IDX 3
+#define regHDMI_TB_ENC_HC_ACTIVE_BLANK 0x0901
+#define regHDMI_TB_ENC_HC_ACTIVE_BLANK_BASE_IDX 3
+#define regHDMI_TB_ENC_CRC_CNTL 0x0903
+#define regHDMI_TB_ENC_CRC_CNTL_BASE_IDX 3
+#define regHDMI_TB_ENC_CRC_RESULT_0 0x0904
+#define regHDMI_TB_ENC_CRC_RESULT_0_BASE_IDX 3
+#define regHDMI_TB_ENC_ENCRYPTION_CONTROL 0x0907
+#define regHDMI_TB_ENC_ENCRYPTION_CONTROL_BASE_IDX 3
+#define regHDMI_TB_ENC_MODE 0x0908
+#define regHDMI_TB_ENC_MODE_BASE_IDX 3
+#define regHDMI_TB_ENC_INPUT_FIFO_STATUS 0x0909
+#define regHDMI_TB_ENC_INPUT_FIFO_STATUS_BASE_IDX 3
+#define regHDMI_TB_ENC_CRC_RESULT_1 0x090a
+#define regHDMI_TB_ENC_CRC_RESULT_1_BASE_IDX 3
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_dispdec
+// base address: 0x1ab8c
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL 0x3623
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL 0x3624
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL 0x3625
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x3626
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x3627
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 2
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_SPARE 0x3628
+#define regDP_STREAM_ENC0_DP_STREAM_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_apg_apg_dispdec
+// base address: 0x1abc0
+#define regAPG0_APG_CONTROL 0x3630
+#define regAPG0_APG_CONTROL_BASE_IDX 2
+#define regAPG0_APG_CONTROL2 0x3631
+#define regAPG0_APG_CONTROL2_BASE_IDX 2
+#define regAPG0_APG_DBG_GEN_CONTROL 0x3632
+#define regAPG0_APG_DBG_GEN_CONTROL_BASE_IDX 2
+#define regAPG0_APG_PACKET_CONTROL 0x3633
+#define regAPG0_APG_PACKET_CONTROL_BASE_IDX 2
+#define regAPG0_APG_AUDIO_CRC_CONTROL 0x363a
+#define regAPG0_APG_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAPG0_APG_AUDIO_CRC_CONTROL2 0x363b
+#define regAPG0_APG_AUDIO_CRC_CONTROL2_BASE_IDX 2
+#define regAPG0_APG_AUDIO_CRC_RESULT 0x363c
+#define regAPG0_APG_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAPG0_APG_STATUS 0x3641
+#define regAPG0_APG_STATUS_BASE_IDX 2
+#define regAPG0_APG_STATUS2 0x3642
+#define regAPG0_APG_STATUS2_BASE_IDX 2
+#define regAPG0_APG_MEM_PWR 0x3644
+#define regAPG0_APG_MEM_PWR_BASE_IDX 2
+#define regAPG0_APG_SPARE 0x3646
+#define regAPG0_APG_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_dme_dme_dispdec
+// base address: 0x1ac38
+#define regDME6_DME_CONTROL 0x364e
+#define regDME6_DME_CONTROL_BASE_IDX 2
+#define regDME6_DME_MEMORY_CONTROL 0x364f
+#define regDME6_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_vpg_vpg_dispdec
+// base address: 0x1ac44
+#define regVPG6_VPG_GENERIC_PACKET_ACCESS_CTRL 0x3651
+#define regVPG6_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG6_VPG_GENERIC_PACKET_DATA 0x3652
+#define regVPG6_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG6_VPG_GSP_FRAME_UPDATE_CTRL 0x3653
+#define regVPG6_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x3654
+#define regVPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG6_VPG_GENERIC_STATUS 0x3655
+#define regVPG6_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG6_VPG_MEM_PWR 0x3656
+#define regVPG6_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG6_VPG_ISRC1_2_ACCESS_CTRL 0x3657
+#define regVPG6_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG6_VPG_ISRC1_2_DATA 0x3658
+#define regVPG6_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG6_VPG_MPEG_INFO0 0x3659
+#define regVPG6_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG6_VPG_MPEG_INFO1 0x365a
+#define regVPG6_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc0_dispdec
+// base address: 0x1ac74
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_CONTROL 0x365d
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL 0x365e
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL 0x365f
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL 0x3660
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT 0x3661
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0 0x3662
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1 0x3663
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2 0x3664
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3 0x3665
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4 0x3666
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5 0x3667
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6 0x3668
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7 0x3669
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8 0x366a
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL 0x366b
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0 0x366c
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1 0x366d
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2 0x366e
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3 0x366f
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4 0x3670
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5 0x3671
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6 0x3672
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7 0x3673
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8 0x3674
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9 0x3675
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10 0x3676
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11 0x3677
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12 0x3678
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13 0x3679
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14 0x367a
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL 0x367b
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0 0x367c
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1 0x367d
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL 0x367e
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL 0x3683
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL 0x3684
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL 0x3685
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL 0x3686
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL 0x3687
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0 0x3688
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1 0x3689
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS 0x368a
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS 0x368b
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL 0x368c
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL 0x368d
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SPARE 0x368e
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_SPARE_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT2 0x3693
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT2_BASE_IDX 2
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT3 0x3694
+#define regDP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT3_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_dispdec
+// base address: 0x1aedc
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL 0x36f7
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL 0x36f8
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL 0x36f9
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x36fa
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x36fb
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 2
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_SPARE 0x36fc
+#define regDP_STREAM_ENC1_DP_STREAM_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_apg_apg_dispdec
+// base address: 0x1af10
+#define regAPG1_APG_CONTROL 0x3704
+#define regAPG1_APG_CONTROL_BASE_IDX 2
+#define regAPG1_APG_CONTROL2 0x3705
+#define regAPG1_APG_CONTROL2_BASE_IDX 2
+#define regAPG1_APG_DBG_GEN_CONTROL 0x3706
+#define regAPG1_APG_DBG_GEN_CONTROL_BASE_IDX 2
+#define regAPG1_APG_PACKET_CONTROL 0x3707
+#define regAPG1_APG_PACKET_CONTROL_BASE_IDX 2
+#define regAPG1_APG_AUDIO_CRC_CONTROL 0x370e
+#define regAPG1_APG_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAPG1_APG_AUDIO_CRC_CONTROL2 0x370f
+#define regAPG1_APG_AUDIO_CRC_CONTROL2_BASE_IDX 2
+#define regAPG1_APG_AUDIO_CRC_RESULT 0x3710
+#define regAPG1_APG_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAPG1_APG_STATUS 0x3715
+#define regAPG1_APG_STATUS_BASE_IDX 2
+#define regAPG1_APG_STATUS2 0x3716
+#define regAPG1_APG_STATUS2_BASE_IDX 2
+#define regAPG1_APG_MEM_PWR 0x3718
+#define regAPG1_APG_MEM_PWR_BASE_IDX 2
+#define regAPG1_APG_SPARE 0x371a
+#define regAPG1_APG_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_dme_dme_dispdec
+// base address: 0x1af88
+#define regDME7_DME_CONTROL 0x3722
+#define regDME7_DME_CONTROL_BASE_IDX 2
+#define regDME7_DME_MEMORY_CONTROL 0x3723
+#define regDME7_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_vpg_vpg_dispdec
+// base address: 0x1af94
+#define regVPG7_VPG_GENERIC_PACKET_ACCESS_CTRL 0x3725
+#define regVPG7_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG7_VPG_GENERIC_PACKET_DATA 0x3726
+#define regVPG7_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG7_VPG_GSP_FRAME_UPDATE_CTRL 0x3727
+#define regVPG7_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x3728
+#define regVPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG7_VPG_GENERIC_STATUS 0x3729
+#define regVPG7_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG7_VPG_MEM_PWR 0x372a
+#define regVPG7_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG7_VPG_ISRC1_2_ACCESS_CTRL 0x372b
+#define regVPG7_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG7_VPG_ISRC1_2_DATA 0x372c
+#define regVPG7_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG7_VPG_MPEG_INFO0 0x372d
+#define regVPG7_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG7_VPG_MPEG_INFO1 0x372e
+#define regVPG7_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc1_dispdec
+// base address: 0x1afc4
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_CONTROL 0x3731
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL 0x3732
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL 0x3733
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL 0x3734
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT 0x3735
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0 0x3736
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1 0x3737
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2 0x3738
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3 0x3739
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4 0x373a
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5 0x373b
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6 0x373c
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7 0x373d
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8 0x373e
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL 0x373f
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0 0x3740
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1 0x3741
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2 0x3742
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3 0x3743
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4 0x3744
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5 0x3745
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6 0x3746
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7 0x3747
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8 0x3748
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9 0x3749
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10 0x374a
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11 0x374b
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12 0x374c
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13 0x374d
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14 0x374e
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL 0x374f
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0 0x3750
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1 0x3751
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL 0x3752
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL 0x3757
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL 0x3758
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL 0x3759
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL 0x375a
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL 0x375b
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0 0x375c
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1 0x375d
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS 0x375e
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS 0x375f
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL 0x3760
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL 0x3761
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SPARE 0x3762
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_SPARE_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT2 0x3767
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT2_BASE_IDX 2
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT3 0x3768
+#define regDP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT3_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_dispdec
+// base address: 0x1b22c
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL 0x37cb
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL 0x37cc
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL 0x37cd
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x37ce
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x37cf
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 2
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_SPARE 0x37d0
+#define regDP_STREAM_ENC2_DP_STREAM_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_apg_apg_dispdec
+// base address: 0x1b260
+#define regAPG2_APG_CONTROL 0x37d8
+#define regAPG2_APG_CONTROL_BASE_IDX 2
+#define regAPG2_APG_CONTROL2 0x37d9
+#define regAPG2_APG_CONTROL2_BASE_IDX 2
+#define regAPG2_APG_DBG_GEN_CONTROL 0x37da
+#define regAPG2_APG_DBG_GEN_CONTROL_BASE_IDX 2
+#define regAPG2_APG_PACKET_CONTROL 0x37db
+#define regAPG2_APG_PACKET_CONTROL_BASE_IDX 2
+#define regAPG2_APG_AUDIO_CRC_CONTROL 0x37e2
+#define regAPG2_APG_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAPG2_APG_AUDIO_CRC_CONTROL2 0x37e3
+#define regAPG2_APG_AUDIO_CRC_CONTROL2_BASE_IDX 2
+#define regAPG2_APG_AUDIO_CRC_RESULT 0x37e4
+#define regAPG2_APG_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAPG2_APG_STATUS 0x37e9
+#define regAPG2_APG_STATUS_BASE_IDX 2
+#define regAPG2_APG_STATUS2 0x37ea
+#define regAPG2_APG_STATUS2_BASE_IDX 2
+#define regAPG2_APG_MEM_PWR 0x37ec
+#define regAPG2_APG_MEM_PWR_BASE_IDX 2
+#define regAPG2_APG_SPARE 0x37ee
+#define regAPG2_APG_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_dme_dme_dispdec
+// base address: 0x1b2d8
+#define regDME8_DME_CONTROL 0x37f6
+#define regDME8_DME_CONTROL_BASE_IDX 2
+#define regDME8_DME_MEMORY_CONTROL 0x37f7
+#define regDME8_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_vpg_vpg_dispdec
+// base address: 0x1b2e4
+#define regVPG8_VPG_GENERIC_PACKET_ACCESS_CTRL 0x37f9
+#define regVPG8_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG8_VPG_GENERIC_PACKET_DATA 0x37fa
+#define regVPG8_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG8_VPG_GSP_FRAME_UPDATE_CTRL 0x37fb
+#define regVPG8_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x37fc
+#define regVPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG8_VPG_GENERIC_STATUS 0x37fd
+#define regVPG8_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG8_VPG_MEM_PWR 0x37fe
+#define regVPG8_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG8_VPG_ISRC1_2_ACCESS_CTRL 0x37ff
+#define regVPG8_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG8_VPG_ISRC1_2_DATA 0x3800
+#define regVPG8_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG8_VPG_MPEG_INFO0 0x3801
+#define regVPG8_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG8_VPG_MPEG_INFO1 0x3802
+#define regVPG8_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc2_dispdec
+// base address: 0x1b314
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_CONTROL 0x3805
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL 0x3806
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL 0x3807
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL 0x3808
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT 0x3809
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0 0x380a
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1 0x380b
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2 0x380c
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3 0x380d
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4 0x380e
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5 0x380f
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6 0x3810
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7 0x3811
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8 0x3812
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL 0x3813
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0 0x3814
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1 0x3815
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2 0x3816
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3 0x3817
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4 0x3818
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5 0x3819
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6 0x381a
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7 0x381b
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8 0x381c
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9 0x381d
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10 0x381e
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11 0x381f
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12 0x3820
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13 0x3821
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14 0x3822
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL 0x3823
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0 0x3824
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1 0x3825
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL 0x3826
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL 0x382b
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL 0x382c
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL 0x382d
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL 0x382e
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL 0x382f
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0 0x3830
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1 0x3831
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS 0x3832
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS 0x3833
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL 0x3834
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL 0x3835
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SPARE 0x3836
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_SPARE_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT2 0x383b
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT2_BASE_IDX 2
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT3 0x383c
+#define regDP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT3_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_dispdec
+// base address: 0x1b57c
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL 0x389f
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL 0x38a0
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL 0x38a1
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0 0x38a2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1 0x38a3
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1_BASE_IDX 2
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_SPARE 0x38a4
+#define regDP_STREAM_ENC3_DP_STREAM_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_apg_apg_dispdec
+// base address: 0x1b5b0
+#define regAPG3_APG_CONTROL 0x38ac
+#define regAPG3_APG_CONTROL_BASE_IDX 2
+#define regAPG3_APG_CONTROL2 0x38ad
+#define regAPG3_APG_CONTROL2_BASE_IDX 2
+#define regAPG3_APG_DBG_GEN_CONTROL 0x38ae
+#define regAPG3_APG_DBG_GEN_CONTROL_BASE_IDX 2
+#define regAPG3_APG_PACKET_CONTROL 0x38af
+#define regAPG3_APG_PACKET_CONTROL_BASE_IDX 2
+#define regAPG3_APG_AUDIO_CRC_CONTROL 0x38b6
+#define regAPG3_APG_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define regAPG3_APG_AUDIO_CRC_CONTROL2 0x38b7
+#define regAPG3_APG_AUDIO_CRC_CONTROL2_BASE_IDX 2
+#define regAPG3_APG_AUDIO_CRC_RESULT 0x38b8
+#define regAPG3_APG_AUDIO_CRC_RESULT_BASE_IDX 2
+#define regAPG3_APG_STATUS 0x38bd
+#define regAPG3_APG_STATUS_BASE_IDX 2
+#define regAPG3_APG_STATUS2 0x38be
+#define regAPG3_APG_STATUS2_BASE_IDX 2
+#define regAPG3_APG_MEM_PWR 0x38c0
+#define regAPG3_APG_MEM_PWR_BASE_IDX 2
+#define regAPG3_APG_SPARE 0x38c2
+#define regAPG3_APG_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_dme_dme_dispdec
+// base address: 0x1b628
+#define regDME9_DME_CONTROL 0x38ca
+#define regDME9_DME_CONTROL_BASE_IDX 2
+#define regDME9_DME_MEMORY_CONTROL 0x38cb
+#define regDME9_DME_MEMORY_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_vpg_vpg_dispdec
+// base address: 0x1b634
+#define regVPG9_VPG_GENERIC_PACKET_ACCESS_CTRL 0x38cd
+#define regVPG9_VPG_GENERIC_PACKET_ACCESS_CTRL_BASE_IDX 2
+#define regVPG9_VPG_GENERIC_PACKET_DATA 0x38ce
+#define regVPG9_VPG_GENERIC_PACKET_DATA_BASE_IDX 2
+#define regVPG9_VPG_GSP_FRAME_UPDATE_CTRL 0x38cf
+#define regVPG9_VPG_GSP_FRAME_UPDATE_CTRL_BASE_IDX 2
+#define regVPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL 0x38d0
+#define regVPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL_BASE_IDX 2
+#define regVPG9_VPG_GENERIC_STATUS 0x38d1
+#define regVPG9_VPG_GENERIC_STATUS_BASE_IDX 2
+#define regVPG9_VPG_MEM_PWR 0x38d2
+#define regVPG9_VPG_MEM_PWR_BASE_IDX 2
+#define regVPG9_VPG_ISRC1_2_ACCESS_CTRL 0x38d3
+#define regVPG9_VPG_ISRC1_2_ACCESS_CTRL_BASE_IDX 2
+#define regVPG9_VPG_ISRC1_2_DATA 0x38d4
+#define regVPG9_VPG_ISRC1_2_DATA_BASE_IDX 2
+#define regVPG9_VPG_MPEG_INFO0 0x38d5
+#define regVPG9_VPG_MPEG_INFO0_BASE_IDX 2
+#define regVPG9_VPG_MPEG_INFO1 0x38d6
+#define regVPG9_VPG_MPEG_INFO1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc3_dispdec
+// base address: 0x1b664
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_CONTROL 0x38d9
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL 0x38da
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL 0x38db
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL 0x38dc
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT 0x38dd
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0 0x38de
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1 0x38df
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2 0x38e0
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3 0x38e1
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4 0x38e2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5 0x38e3
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6 0x38e4
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7 0x38e5
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8 0x38e6
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL 0x38e7
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0 0x38e8
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1 0x38e9
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2 0x38ea
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3 0x38eb
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4 0x38ec
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5 0x38ed
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6 0x38ee
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7 0x38ef
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8 0x38f0
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9 0x38f1
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10 0x38f2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11 0x38f3
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12 0x38f4
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13 0x38f5
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14 0x38f6
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL 0x38f7
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0 0x38f8
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1 0x38f9
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL 0x38fa
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL 0x38ff
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL 0x3900
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL 0x3901
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL 0x3902
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL 0x3903
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0 0x3904
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1 0x3905
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS 0x3906
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS 0x3907
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL 0x3908
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL 0x3909
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SPARE 0x390a
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_SPARE_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT2 0x390f
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT2_BASE_IDX 2
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT3 0x3910
+#define regDP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT3_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_link_enc0_dispdec
+// base address: 0x1ad5c
+#define regDP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL 0x3697
+#define regDP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_LINK_ENC0_DP_LINK_ENC_SPARE 0x3698
+#define regDP_LINK_ENC0_DP_LINK_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_dphy_sym320_dispdec
+// base address: 0x1ae00
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL 0x36c0
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_STATUS 0x36c1
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE 0x36c4
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0 0x36c5
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1 0x36c6
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2 0x36c7
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3 0x36c8
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0 0x36cb
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1 0x36cc
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2 0x36cd
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3 0x36ce
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0 0x36d1
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1 0x36d2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2 0x36d3
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3 0x36d4
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG 0x36d7
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0 0x36d8
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1 0x36d9
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2 0x36da
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3 0x36db
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE 0x36dc
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0 0x36dd
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1 0x36de
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2 0x36df
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3 0x36e0
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4 0x36e1
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5 0x36e2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6 0x36e3
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7 0x36e4
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8 0x36e5
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9 0x36e6
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10 0x36e7
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS 0x36e8
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE 0x36ea
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0 0x36eb
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1 0x36ec
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL 0x36ed
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0 0x36ee
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1 0x36ef
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS 0x36f0
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT 0x36f1
+#define regDP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_link_enc1_dispdec
+// base address: 0x1b0ac
+#define regDP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL 0x376b
+#define regDP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL_BASE_IDX 2
+#define regDP_LINK_ENC1_DP_LINK_ENC_SPARE 0x376c
+#define regDP_LINK_ENC1_DP_LINK_ENC_SPARE_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hpo_dp_dphy_sym321_dispdec
+// base address: 0x1b150
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL 0x3794
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_STATUS 0x3795
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE 0x3798
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0 0x3799
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1 0x379a
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2 0x379b
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3 0x379c
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0 0x379f
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1 0x37a0
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2 0x37a1
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3 0x37a2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0 0x37a5
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1 0x37a6
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2 0x37a7
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3 0x37a8
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG 0x37ab
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0 0x37ac
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1 0x37ad
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2 0x37ae
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3 0x37af
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE 0x37b0
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0 0x37b1
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1 0x37b2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2 0x37b3
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3 0x37b4
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4 0x37b5
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5 0x37b6
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6 0x37b7
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7 0x37b8
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8 0x37b9
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9 0x37ba
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10 0x37bb
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS 0x37bc
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE 0x37be
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0 0x37bf
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1 0x37c0
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL 0x37c1
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0 0x37c2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1 0x37c3
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS 0x37c4
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS_BASE_IDX 2
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT 0x37c5
+#define regDP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchvm_hvm_dispdec
+// base address: 0x0
+#define regDCHVM_CTRL0 0x3603
+#define regDCHVM_CTRL0_BASE_IDX 2
+#define regDCHVM_CTRL1 0x3604
+#define regDCHVM_CTRL1_BASE_IDX 2
+#define regDCHVM_CLK_CTRL 0x3605
+#define regDCHVM_CLK_CTRL_BASE_IDX 2
+#define regDCHVM_MEM_CTRL 0x3606
+#define regDCHVM_MEM_CTRL_BASE_IDX 2
+#define regDCHVM_RIOMMU_CTRL0 0x3607
+#define regDCHVM_RIOMMU_CTRL0_BASE_IDX 2
+#define regDCHVM_RIOMMU_STAT0 0x3608
+#define regDCHVM_RIOMMU_STAT0_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dlpc_dlpc_dispdec
+// base address: 0x0
+#define regDLPC_ENABLE 0x2fe8
+#define regDLPC_ENABLE_BASE_IDX 2
+#define regDLPC_CURRENT_COUNT 0x2fe9
+#define regDLPC_CURRENT_COUNT_BASE_IDX 2
+#define regDLPC_OPTC_SNAPSHOT 0x2fea
+#define regDLPC_OPTC_SNAPSHOT_BASE_IDX 2
+#define regDLPC_PWRUP 0x2feb
+#define regDLPC_PWRUP_BASE_IDX 2
+#define regDLPC_OTG_RESYNC 0x2fec
+#define regDLPC_OTG_RESYNC_BASE_IDX 2
+#define regDLPC_DCN_ZSC_LONO_PWRUP 0x2fed
+#define regDLPC_DCN_ZSC_LONO_PWRUP_BASE_IDX 2
+#define regDLPC_SPARE 0x2fee
+#define regDLPC_SPARE_BASE_IDX 2
+#define regDLPC_COUNTER_INIT_VALUE 0x2fef
+#define regDLPC_COUNTER_INIT_VALUE_BASE_IDX 2
+
+
+// addressBlock: dce_dpia_dpia_mu0_dpiadec
+// base address: 0x72000
+#define regDPIA_MU_CLOCK_CTRL 0x13800
+#define regDPIA_MU_CLOCK_CTRL_BASE_IDX 3
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT0 0x13801
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT0_BASE_IDX 3
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT0 0x13802
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT0_BASE_IDX 3
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT1 0x13803
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT1_BASE_IDX 3
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT1 0x13804
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT1_BASE_IDX 3
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT2 0x13805
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT2_BASE_IDX 3
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT2 0x13806
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT2_BASE_IDX 3
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT3 0x13807
+#define regDPIA_MU_CLOCK_CTRL_DPIA_PORT3_BASE_IDX 3
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT3 0x13808
+#define regDPIA_MU_RESET_CTRL_DPIA_PORT3_BASE_IDX 3
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT0 0x13811
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT0_BASE_IDX 3
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT1 0x13812
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT1_BASE_IDX 3
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT2 0x13813
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT2_BASE_IDX 3
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT3 0x13814
+#define regDPIA_MU_TPI_STATUS_DPIA_PORT3_BASE_IDX 3
+#define regDPIA_MU_TPI_MAX_CREDIT_COUNT 0x13819
+#define regDPIA_MU_TPI_MAX_CREDIT_COUNT_BASE_IDX 3
+#define regDPIA_MU_INTERRUPT_STATUS 0x1381a
+#define regDPIA_MU_INTERRUPT_STATUS_BASE_IDX 3
+#define regDPIA_MU_INTERRUPT_CTRL 0x1381b
+#define regDPIA_MU_INTERRUPT_CTRL_BASE_IDX 3
+#define regDPIA_MU_LOCAL_INTERRUPT_CTRL 0x1381c
+#define regDPIA_MU_LOCAL_INTERRUPT_CTRL_BASE_IDX 3
+#define regDPIA_MU_LOCAL_INTERRUPT_ACK 0x1381d
+#define regDPIA_MU_LOCAL_INTERRUPT_ACK_BASE_IDX 3
+#define regDPIA_MU_RBBMIF_TIMEOUT_CTRL 0x1381e
+#define regDPIA_MU_RBBMIF_TIMEOUT_CTRL_BASE_IDX 3
+#define regDPIA_MU_RBBMIF_TIMEOUT_CTRL2 0x1381f
+#define regDPIA_MU_RBBMIF_TIMEOUT_CTRL2_BASE_IDX 3
+#define regDPIA_MU_RBBMIF_STATUS 0x13820
+#define regDPIA_MU_RBBMIF_STATUS_BASE_IDX 3
+#define regDPIA_MU_MICROSECOND_REF_CTRL 0x13821
+#define regDPIA_MU_MICROSECOND_REF_CTRL_BASE_IDX 3
+#define regDPIA_MU_PORT_ADP_STATUS 0x13822
+#define regDPIA_MU_PORT_ADP_STATUS_BASE_IDX 3
+#define regDPIA_GLUE_CTRL 0x13823
+#define regDPIA_GLUE_CTRL_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL0 0x13825
+#define regDPIA_PERF_COUNT_CONTROL0_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL1 0x13826
+#define regDPIA_PERF_COUNT_CONTROL1_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL2 0x13827
+#define regDPIA_PERF_COUNT_CONTROL2_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL3 0x13828
+#define regDPIA_PERF_COUNT_CONTROL3_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL4 0x13829
+#define regDPIA_PERF_COUNT_CONTROL4_BASE_IDX 3
+#define regDPIA_PERF_COUNT_CONTROL5 0x1382a
+#define regDPIA_PERF_COUNT_CONTROL5_BASE_IDX 3
+#define regDPIA_PERF_COUNT_INDEX 0x1382b
+#define regDPIA_PERF_COUNT_INDEX_BASE_IDX 3
+#define regDPIA_PERF_COUNT_DATA_LO 0x1382c
+#define regDPIA_PERF_COUNT_DATA_LO_BASE_IDX 3
+#define regDPIA_MU_SPARE 0x1382d
+#define regDPIA_MU_SPARE_BASE_IDX 3
+
+
+// addressBlock: azendpoint_f2codecind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x2200
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x2706
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x270d
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2 0x270e
+#define ixAZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL 0x2724
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3 0x273e
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x2770
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x2771
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x2f09
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x2f0a
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x2f0b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY 0x3702
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x3707
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x3708
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x3709
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x371c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2 0x371d
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3 0x371e
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4 0x371f
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION 0x3770
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION 0x3771
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO 0x3772
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR 0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA 0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE 0x3777
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE 0x3778
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE 0x3779
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE 0x377a
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC 0x377b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_HBR 0x377c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX 0x3780
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA 0x3781
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE 0x3785
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE 0x3786
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE 0x3787
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE 0x3788
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x3789
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x378a
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x378b
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x378c
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x378d
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x378e
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x378f
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x3790
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x3791
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x3792
+#define ixAZALIA_F2_CODEC_PIN_ASSOCIATION_INFO 0x3793
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x3797
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x3798
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB 0x3799
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x379a
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE 0x379b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x379c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x379d
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x379e
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x3f09
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES 0x3f0c
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH 0x3f0e
+
+
+// addressBlock: azendpoint_descriptorind
+// base address: 0x0
+#define ixAUDIO_DESCRIPTOR0 0x0001
+#define ixAUDIO_DESCRIPTOR1 0x0002
+#define ixAUDIO_DESCRIPTOR2 0x0003
+#define ixAUDIO_DESCRIPTOR3 0x0004
+#define ixAUDIO_DESCRIPTOR4 0x0005
+#define ixAUDIO_DESCRIPTOR5 0x0006
+#define ixAUDIO_DESCRIPTOR6 0x0007
+#define ixAUDIO_DESCRIPTOR7 0x0008
+#define ixAUDIO_DESCRIPTOR8 0x0009
+#define ixAUDIO_DESCRIPTOR9 0x000a
+#define ixAUDIO_DESCRIPTOR10 0x000b
+#define ixAUDIO_DESCRIPTOR11 0x000c
+#define ixAUDIO_DESCRIPTOR12 0x000d
+#define ixAUDIO_DESCRIPTOR13 0x000e
+
+
+// addressBlock: azendpoint_sinkinfoind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID 0x0000
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID 0x0001
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN 0x0002
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID0 0x0003
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID1 0x0004
+#define ixSINK_DESCRIPTION0 0x0005
+#define ixSINK_DESCRIPTION1 0x0006
+#define ixSINK_DESCRIPTION2 0x0007
+#define ixSINK_DESCRIPTION3 0x0008
+#define ixSINK_DESCRIPTION4 0x0009
+#define ixSINK_DESCRIPTION5 0x000a
+#define ixSINK_DESCRIPTION6 0x000b
+#define ixSINK_DESCRIPTION7 0x000c
+#define ixSINK_DESCRIPTION8 0x000d
+#define ixSINK_DESCRIPTION9 0x000e
+#define ixSINK_DESCRIPTION10 0x000f
+#define ixSINK_DESCRIPTION11 0x0010
+#define ixSINK_DESCRIPTION12 0x0011
+#define ixSINK_DESCRIPTION13 0x0012
+#define ixSINK_DESCRIPTION14 0x0013
+#define ixSINK_DESCRIPTION15 0x0014
+#define ixSINK_DESCRIPTION16 0x0015
+#define ixSINK_DESCRIPTION17 0x0016
+
+
+// addressBlock: azf0controller_azinputcrc0resultind
+// base address: 0x0
+#define ixAZALIA_INPUT_CRC0_CHANNEL0 0x0000
+#define ixAZALIA_INPUT_CRC0_CHANNEL1 0x0001
+#define ixAZALIA_INPUT_CRC0_CHANNEL2 0x0002
+#define ixAZALIA_INPUT_CRC0_CHANNEL3 0x0003
+#define ixAZALIA_INPUT_CRC0_CHANNEL4 0x0004
+#define ixAZALIA_INPUT_CRC0_CHANNEL5 0x0005
+#define ixAZALIA_INPUT_CRC0_CHANNEL6 0x0006
+#define ixAZALIA_INPUT_CRC0_CHANNEL7 0x0007
+
+
+// addressBlock: azf0controller_azinputcrc1resultind
+// base address: 0x0
+#define ixAZALIA_INPUT_CRC1_CHANNEL0 0x0000
+#define ixAZALIA_INPUT_CRC1_CHANNEL1 0x0001
+#define ixAZALIA_INPUT_CRC1_CHANNEL2 0x0002
+#define ixAZALIA_INPUT_CRC1_CHANNEL3 0x0003
+#define ixAZALIA_INPUT_CRC1_CHANNEL4 0x0004
+#define ixAZALIA_INPUT_CRC1_CHANNEL5 0x0005
+#define ixAZALIA_INPUT_CRC1_CHANNEL6 0x0006
+#define ixAZALIA_INPUT_CRC1_CHANNEL7 0x0007
+
+
+// addressBlock: azf0controller_azcrc0resultind
+// base address: 0x0
+#define ixAZALIA_CRC0_CHANNEL0 0x0000
+#define ixAZALIA_CRC0_CHANNEL1 0x0001
+#define ixAZALIA_CRC0_CHANNEL2 0x0002
+#define ixAZALIA_CRC0_CHANNEL3 0x0003
+#define ixAZALIA_CRC0_CHANNEL4 0x0004
+#define ixAZALIA_CRC0_CHANNEL5 0x0005
+#define ixAZALIA_CRC0_CHANNEL6 0x0006
+#define ixAZALIA_CRC0_CHANNEL7 0x0007
+
+
+// addressBlock: azf0controller_azcrc1resultind
+// base address: 0x0
+#define ixAZALIA_CRC1_CHANNEL0 0x0000
+#define ixAZALIA_CRC1_CHANNEL1 0x0001
+#define ixAZALIA_CRC1_CHANNEL2 0x0002
+#define ixAZALIA_CRC1_CHANNEL3 0x0003
+#define ixAZALIA_CRC1_CHANNEL4 0x0004
+#define ixAZALIA_CRC1_CHANNEL5 0x0005
+#define ixAZALIA_CRC1_CHANNEL6 0x0006
+#define ixAZALIA_CRC1_CHANNEL7 0x0007
+
+
+// addressBlock: azinputendpoint_f2codecind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x6200
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x6706
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x670d
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x6f09
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x6f0a
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x6f0b
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x7707
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x7708
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE 0x7709
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x771c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2 0x771d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3 0x771e
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4 0x771f
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x7771
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE 0x7777
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE 0x7778
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE 0x7779
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE 0x777a
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR 0x777c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE 0x7785
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE 0x7786
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE 0x7787
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE 0x7788
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x7798
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB 0x7799
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x779a
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x779b
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x779c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L 0x779d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H 0x779e
+#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x7f09
+#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x7f0c
+
+
+// addressBlock: azroot_f2codecind
+// base address: 0x0
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x0f00
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID 0x0f02
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT 0x0f04
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE 0x1705
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x1720
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2 0x1721
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3 0x1722
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4 0x1723
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x1770
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESET 0x17ff
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT 0x1f04
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x1f05
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x1f0a
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x1f0b
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x1f0f
+
+
+// addressBlock: azf0stream0_streamind
+// base address: 0x0
+#define ixAZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream1_streamind
+// base address: 0x0
+#define ixAZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream2_streamind
+// base address: 0x0
+#define ixAZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream3_streamind
+// base address: 0x0
+#define ixAZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream4_streamind
+// base address: 0x0
+#define ixAZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream5_streamind
+// base address: 0x0
+#define ixAZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream6_streamind
+// base address: 0x0
+#define ixAZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream7_streamind
+// base address: 0x0
+#define ixAZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream8_streamind
+// base address: 0x0
+#define ixAZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream9_streamind
+// base address: 0x0
+#define ixAZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream10_streamind
+// base address: 0x0
+#define ixAZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream11_streamind
+// base address: 0x0
+#define ixAZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream12_streamind
+// base address: 0x0
+#define ixAZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream13_streamind
+// base address: 0x0
+#define ixAZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream14_streamind
+// base address: 0x0
+#define ixAZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0stream15_streamind
+// base address: 0x0
+#define ixAZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL 0x0000
+#define ixAZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL 0x0001
+#define ixAZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT 0x0002
+#define ixAZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT 0x0003
+#define ixAZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT 0x0004
+
+
+// addressBlock: azf0endpoint0_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT0_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint1_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT1_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint2_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT2_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint3_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT3_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint4_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT4_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint5_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT5_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint6_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT6_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0endpoint7_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+#define ixAZF0ENDPOINT7_AZALIA_F0_ENDPOINT_FGCG_REP_DIS 0x0070
+
+
+// addressBlock: azf0inputendpoint0_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint1_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint2_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint3_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint4_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint5_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint6_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint7_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_sh_mask.h
new file mode 100644
index 000000000000..3ffb6ec8b40a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_sh_mask.h
@@ -0,0 +1,61940 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2025 Advanced Micro Devices, Inc. */
+
+
+#ifndef _dcn_3_6_0_SH_MASK_HEADER
+#define _dcn_3_6_0_SH_MASK_HEADER
+
+
+// addressBlock: dce_dc_hda_azcontroller_azdec
+//GLOBAL_CAPABILITIES
+#define GLOBAL_CAPABILITIES__SIXTY_FOUR_BIT_ADDRESS_SUPPORTED__SHIFT 0x0
+#define GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS__SHIFT 0x1
+#define GLOBAL_CAPABILITIES__NUMBER_OF_BIDIRECTIONAL_STREAMS_SUPPORTED__SHIFT 0x3
+#define GLOBAL_CAPABILITIES__NUMBER_OF_INPUT_STREAMS_SUPPORTED__SHIFT 0x8
+#define GLOBAL_CAPABILITIES__NUMBER_OF_OUTPUT_STREAMS_SUPPORTED__SHIFT 0xc
+#define GLOBAL_CAPABILITIES__SIXTY_FOUR_BIT_ADDRESS_SUPPORTED_MASK 0x0001L
+#define GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS_MASK 0x0006L
+#define GLOBAL_CAPABILITIES__NUMBER_OF_BIDIRECTIONAL_STREAMS_SUPPORTED_MASK 0x00F8L
+#define GLOBAL_CAPABILITIES__NUMBER_OF_INPUT_STREAMS_SUPPORTED_MASK 0x0F00L
+#define GLOBAL_CAPABILITIES__NUMBER_OF_OUTPUT_STREAMS_SUPPORTED_MASK 0xF000L
+//MINOR_VERSION
+#define MINOR_VERSION__MINOR_VERSION__SHIFT 0x0
+#define MINOR_VERSION__MINOR_VERSION_MASK 0xFFL
+//MAJOR_VERSION
+#define MAJOR_VERSION__MAJOR_VERSION__SHIFT 0x0
+#define MAJOR_VERSION__MAJOR_VERSION_MASK 0xFFL
+//OUTPUT_PAYLOAD_CAPABILITY
+#define OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY_MASK 0xFFFFL
+//INPUT_PAYLOAD_CAPABILITY
+#define INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY_MASK 0xFFFFL
+//GLOBAL_CONTROL
+#define GLOBAL_CONTROL__CONTROLLER_RESET__SHIFT 0x0
+#define GLOBAL_CONTROL__FLUSH_CONTROL__SHIFT 0x1
+#define GLOBAL_CONTROL__ACCEPT_UNSOLICITED_RESPONSE_ENABLE__SHIFT 0x8
+#define GLOBAL_CONTROL__CONTROLLER_RESET_MASK 0x00000001L
+#define GLOBAL_CONTROL__FLUSH_CONTROL_MASK 0x00000002L
+#define GLOBAL_CONTROL__ACCEPT_UNSOLICITED_RESPONSE_ENABLE_MASK 0x00000100L
+//WAKE_ENABLE
+#define WAKE_ENABLE__SDIN_WAKE_ENABLE_FLAG__SHIFT 0x0
+#define WAKE_ENABLE__SDIN_WAKE_ENABLE_FLAG_MASK 0x0001L
+//STATE_CHANGE_STATUS
+#define STATE_CHANGE_STATUS__STATE_CHANGE_STATUS__SHIFT 0x0
+#define STATE_CHANGE_STATUS__STATE_CHANGE_STATUS_MASK 0x0001L
+//GLOBAL_STATUS
+#define GLOBAL_STATUS__FLUSH_STATUS__SHIFT 0x1
+#define GLOBAL_STATUS__FLUSH_STATUS_MASK 0x00000002L
+//OUTPUT_STREAM_PAYLOAD_CAPABILITY
+#define OUTPUT_STREAM_PAYLOAD_CAPABILITY__OUTSTRMPAY__SHIFT 0x0
+#define OUTPUT_STREAM_PAYLOAD_CAPABILITY__OUTSTRMPAY_MASK 0xFFFFL
+//INPUT_STREAM_PAYLOAD_CAPABILITY
+#define INPUT_STREAM_PAYLOAD_CAPABILITY__INSTRMPAY__SHIFT 0x0
+#define INPUT_STREAM_PAYLOAD_CAPABILITY__INSTRMPAY_MASK 0xFFFFL
+//INTERRUPT_CONTROL
+#define INTERRUPT_CONTROL__STREAM_0_INTERRUPT_ENABLE__SHIFT 0x0
+#define INTERRUPT_CONTROL__STREAM_1_INTERRUPT_ENABLE__SHIFT 0x1
+#define INTERRUPT_CONTROL__STREAM_2_INTERRUPT_ENABLE__SHIFT 0x2
+#define INTERRUPT_CONTROL__STREAM_3_INTERRUPT_ENABLE__SHIFT 0x3
+#define INTERRUPT_CONTROL__STREAM_4_INTERRUPT_ENABLE__SHIFT 0x4
+#define INTERRUPT_CONTROL__STREAM_5_INTERRUPT_ENABLE__SHIFT 0x5
+#define INTERRUPT_CONTROL__STREAM_6_INTERRUPT_ENABLE__SHIFT 0x6
+#define INTERRUPT_CONTROL__STREAM_7_INTERRUPT_ENABLE__SHIFT 0x7
+#define INTERRUPT_CONTROL__STREAM_8_INTERRUPT_ENABLE__SHIFT 0x8
+#define INTERRUPT_CONTROL__STREAM_9_INTERRUPT_ENABLE__SHIFT 0x9
+#define INTERRUPT_CONTROL__STREAM_10_INTERRUPT_ENABLE__SHIFT 0xa
+#define INTERRUPT_CONTROL__STREAM_11_INTERRUPT_ENABLE__SHIFT 0xb
+#define INTERRUPT_CONTROL__STREAM_12_INTERRUPT_ENABLE__SHIFT 0xc
+#define INTERRUPT_CONTROL__STREAM_13_INTERRUPT_ENABLE__SHIFT 0xd
+#define INTERRUPT_CONTROL__STREAM_14_INTERRUPT_ENABLE__SHIFT 0xe
+#define INTERRUPT_CONTROL__STREAM_15_INTERRUPT_ENABLE__SHIFT 0xf
+#define INTERRUPT_CONTROL__CONTROLLER_INTERRUPT_ENABLE__SHIFT 0x1e
+#define INTERRUPT_CONTROL__GLOBAL_INTERRUPT_ENABLE__SHIFT 0x1f
+#define INTERRUPT_CONTROL__STREAM_0_INTERRUPT_ENABLE_MASK 0x00000001L
+#define INTERRUPT_CONTROL__STREAM_1_INTERRUPT_ENABLE_MASK 0x00000002L
+#define INTERRUPT_CONTROL__STREAM_2_INTERRUPT_ENABLE_MASK 0x00000004L
+#define INTERRUPT_CONTROL__STREAM_3_INTERRUPT_ENABLE_MASK 0x00000008L
+#define INTERRUPT_CONTROL__STREAM_4_INTERRUPT_ENABLE_MASK 0x00000010L
+#define INTERRUPT_CONTROL__STREAM_5_INTERRUPT_ENABLE_MASK 0x00000020L
+#define INTERRUPT_CONTROL__STREAM_6_INTERRUPT_ENABLE_MASK 0x00000040L
+#define INTERRUPT_CONTROL__STREAM_7_INTERRUPT_ENABLE_MASK 0x00000080L
+#define INTERRUPT_CONTROL__STREAM_8_INTERRUPT_ENABLE_MASK 0x00000100L
+#define INTERRUPT_CONTROL__STREAM_9_INTERRUPT_ENABLE_MASK 0x00000200L
+#define INTERRUPT_CONTROL__STREAM_10_INTERRUPT_ENABLE_MASK 0x00000400L
+#define INTERRUPT_CONTROL__STREAM_11_INTERRUPT_ENABLE_MASK 0x00000800L
+#define INTERRUPT_CONTROL__STREAM_12_INTERRUPT_ENABLE_MASK 0x00001000L
+#define INTERRUPT_CONTROL__STREAM_13_INTERRUPT_ENABLE_MASK 0x00002000L
+#define INTERRUPT_CONTROL__STREAM_14_INTERRUPT_ENABLE_MASK 0x00004000L
+#define INTERRUPT_CONTROL__STREAM_15_INTERRUPT_ENABLE_MASK 0x00008000L
+#define INTERRUPT_CONTROL__CONTROLLER_INTERRUPT_ENABLE_MASK 0x40000000L
+#define INTERRUPT_CONTROL__GLOBAL_INTERRUPT_ENABLE_MASK 0x80000000L
+//INTERRUPT_STATUS
+#define INTERRUPT_STATUS__STREAM_0_INTERRUPT_STATUS__SHIFT 0x0
+#define INTERRUPT_STATUS__STREAM_1_INTERRUPT_STATUS__SHIFT 0x1
+#define INTERRUPT_STATUS__STREAM_2_INTERRUPT_STATUS__SHIFT 0x2
+#define INTERRUPT_STATUS__STREAM_3_INTERRUPT_STATUS__SHIFT 0x3
+#define INTERRUPT_STATUS__STREAM_4_INTERRUPT_STATUS__SHIFT 0x4
+#define INTERRUPT_STATUS__STREAM_5_INTERRUPT_STATUS__SHIFT 0x5
+#define INTERRUPT_STATUS__STREAM_6_INTERRUPT_STATUS__SHIFT 0x6
+#define INTERRUPT_STATUS__STREAM_7_INTERRUPT_STATUS__SHIFT 0x7
+#define INTERRUPT_STATUS__STREAM_8_INTERRUPT_STATUS__SHIFT 0x8
+#define INTERRUPT_STATUS__STREAM_9_INTERRUPT_STATUS__SHIFT 0x9
+#define INTERRUPT_STATUS__STREAM_10_INTERRUPT_STATUS__SHIFT 0xa
+#define INTERRUPT_STATUS__STREAM_11_INTERRUPT_STATUS__SHIFT 0xb
+#define INTERRUPT_STATUS__STREAM_12_INTERRUPT_STATUS__SHIFT 0xc
+#define INTERRUPT_STATUS__STREAM_13_INTERRUPT_STATUS__SHIFT 0xd
+#define INTERRUPT_STATUS__STREAM_14_INTERRUPT_STATUS__SHIFT 0xe
+#define INTERRUPT_STATUS__STREAM_15_INTERRUPT_STATUS__SHIFT 0xf
+#define INTERRUPT_STATUS__CONTROLLER_INTERRUPT_STATUS__SHIFT 0x1e
+#define INTERRUPT_STATUS__GLOBAL_INTERRUPT_STATUS__SHIFT 0x1f
+#define INTERRUPT_STATUS__STREAM_0_INTERRUPT_STATUS_MASK 0x00000001L
+#define INTERRUPT_STATUS__STREAM_1_INTERRUPT_STATUS_MASK 0x00000002L
+#define INTERRUPT_STATUS__STREAM_2_INTERRUPT_STATUS_MASK 0x00000004L
+#define INTERRUPT_STATUS__STREAM_3_INTERRUPT_STATUS_MASK 0x00000008L
+#define INTERRUPT_STATUS__STREAM_4_INTERRUPT_STATUS_MASK 0x00000010L
+#define INTERRUPT_STATUS__STREAM_5_INTERRUPT_STATUS_MASK 0x00000020L
+#define INTERRUPT_STATUS__STREAM_6_INTERRUPT_STATUS_MASK 0x00000040L
+#define INTERRUPT_STATUS__STREAM_7_INTERRUPT_STATUS_MASK 0x00000080L
+#define INTERRUPT_STATUS__STREAM_8_INTERRUPT_STATUS_MASK 0x00000100L
+#define INTERRUPT_STATUS__STREAM_9_INTERRUPT_STATUS_MASK 0x00000200L
+#define INTERRUPT_STATUS__STREAM_10_INTERRUPT_STATUS_MASK 0x00000400L
+#define INTERRUPT_STATUS__STREAM_11_INTERRUPT_STATUS_MASK 0x00000800L
+#define INTERRUPT_STATUS__STREAM_12_INTERRUPT_STATUS_MASK 0x00001000L
+#define INTERRUPT_STATUS__STREAM_13_INTERRUPT_STATUS_MASK 0x00002000L
+#define INTERRUPT_STATUS__STREAM_14_INTERRUPT_STATUS_MASK 0x00004000L
+#define INTERRUPT_STATUS__STREAM_15_INTERRUPT_STATUS_MASK 0x00008000L
+#define INTERRUPT_STATUS__CONTROLLER_INTERRUPT_STATUS_MASK 0x40000000L
+#define INTERRUPT_STATUS__GLOBAL_INTERRUPT_STATUS_MASK 0x80000000L
+//WALL_CLOCK_COUNTER
+#define WALL_CLOCK_COUNTER__WALL_CLOCK_COUNTER__SHIFT 0x0
+#define WALL_CLOCK_COUNTER__WALL_CLOCK_COUNTER_MASK 0xFFFFFFFFL
+//STREAM_SYNCHRONIZATION
+#define STREAM_SYNCHRONIZATION__STREAM_0_SYNCHRONIZATION__SHIFT 0x0
+#define STREAM_SYNCHRONIZATION__STREAM_1_SYNCHRONIZATION__SHIFT 0x1
+#define STREAM_SYNCHRONIZATION__STREAM_2_SYNCHRONIZATION__SHIFT 0x2
+#define STREAM_SYNCHRONIZATION__STREAM_3_SYNCHRONIZATION__SHIFT 0x3
+#define STREAM_SYNCHRONIZATION__STREAM_4_SYNCHRONIZATION__SHIFT 0x4
+#define STREAM_SYNCHRONIZATION__STREAM_5_SYNCHRONIZATION__SHIFT 0x5
+#define STREAM_SYNCHRONIZATION__STREAM_6_SYNCHRONIZATION__SHIFT 0x6
+#define STREAM_SYNCHRONIZATION__STREAM_7_SYNCHRONIZATION__SHIFT 0x7
+#define STREAM_SYNCHRONIZATION__STREAM_8_SYNCHRONIZATION__SHIFT 0x8
+#define STREAM_SYNCHRONIZATION__STREAM_9_SYNCHRONIZATION__SHIFT 0x9
+#define STREAM_SYNCHRONIZATION__STREAM_10_SYNCHRONIZATION__SHIFT 0xa
+#define STREAM_SYNCHRONIZATION__STREAM_11_SYNCHRONIZATION__SHIFT 0xb
+#define STREAM_SYNCHRONIZATION__STREAM_12_SYNCHRONIZATION__SHIFT 0xc
+#define STREAM_SYNCHRONIZATION__STREAM_13_SYNCHRONIZATION__SHIFT 0xd
+#define STREAM_SYNCHRONIZATION__STREAM_14_SYNCHRONIZATION__SHIFT 0xe
+#define STREAM_SYNCHRONIZATION__STREAM_15_SYNCHRONIZATION__SHIFT 0xf
+#define STREAM_SYNCHRONIZATION__STREAM_0_SYNCHRONIZATION_MASK 0x00000001L
+#define STREAM_SYNCHRONIZATION__STREAM_1_SYNCHRONIZATION_MASK 0x00000002L
+#define STREAM_SYNCHRONIZATION__STREAM_2_SYNCHRONIZATION_MASK 0x00000004L
+#define STREAM_SYNCHRONIZATION__STREAM_3_SYNCHRONIZATION_MASK 0x00000008L
+#define STREAM_SYNCHRONIZATION__STREAM_4_SYNCHRONIZATION_MASK 0x00000010L
+#define STREAM_SYNCHRONIZATION__STREAM_5_SYNCHRONIZATION_MASK 0x00000020L
+#define STREAM_SYNCHRONIZATION__STREAM_6_SYNCHRONIZATION_MASK 0x00000040L
+#define STREAM_SYNCHRONIZATION__STREAM_7_SYNCHRONIZATION_MASK 0x00000080L
+#define STREAM_SYNCHRONIZATION__STREAM_8_SYNCHRONIZATION_MASK 0x00000100L
+#define STREAM_SYNCHRONIZATION__STREAM_9_SYNCHRONIZATION_MASK 0x00000200L
+#define STREAM_SYNCHRONIZATION__STREAM_10_SYNCHRONIZATION_MASK 0x00000400L
+#define STREAM_SYNCHRONIZATION__STREAM_11_SYNCHRONIZATION_MASK 0x00000800L
+#define STREAM_SYNCHRONIZATION__STREAM_12_SYNCHRONIZATION_MASK 0x00001000L
+#define STREAM_SYNCHRONIZATION__STREAM_13_SYNCHRONIZATION_MASK 0x00002000L
+#define STREAM_SYNCHRONIZATION__STREAM_14_SYNCHRONIZATION_MASK 0x00004000L
+#define STREAM_SYNCHRONIZATION__STREAM_15_SYNCHRONIZATION_MASK 0x00008000L
+//CORB_LOWER_BASE_ADDRESS
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007FL
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+//CORB_UPPER_BASE_ADDRESS
+#define CORB_UPPER_BASE_ADDRESS__CORB_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define CORB_UPPER_BASE_ADDRESS__CORB_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//AZCONTROLLER0_CORB_WRITE_POINTER
+#define AZCONTROLLER0_CORB_WRITE_POINTER__CORB_WRITE_POINTER__SHIFT 0x0
+#define AZCONTROLLER0_CORB_WRITE_POINTER__CORB_WRITE_POINTER_MASK 0x00FFL
+//AZCONTROLLER0_CORB_READ_POINTER
+#define AZCONTROLLER0_CORB_READ_POINTER__CORB_READ_POINTER__SHIFT 0x0
+#define AZCONTROLLER0_CORB_READ_POINTER__CORB_READ_POINTER_RESET__SHIFT 0xf
+#define AZCONTROLLER0_CORB_READ_POINTER__CORB_READ_POINTER_MASK 0x00FFL
+#define AZCONTROLLER0_CORB_READ_POINTER__CORB_READ_POINTER_RESET_MASK 0x8000L
+//AZCONTROLLER0_CORB_CONTROL
+#define AZCONTROLLER0_CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE__SHIFT 0x0
+#define AZCONTROLLER0_CORB_CONTROL__ENABLE_CORB_DMA_ENGINE__SHIFT 0x1
+#define AZCONTROLLER0_CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE_MASK 0x01L
+#define AZCONTROLLER0_CORB_CONTROL__ENABLE_CORB_DMA_ENGINE_MASK 0x02L
+//AZCONTROLLER0_CORB_STATUS
+#define AZCONTROLLER0_CORB_STATUS__CORB_MEMORY_ERROR_INDICATION__SHIFT 0x0
+#define AZCONTROLLER0_CORB_STATUS__CORB_MEMORY_ERROR_INDICATION_MASK 0x01L
+//AZCONTROLLER0_CORB_SIZE
+#define AZCONTROLLER0_CORB_SIZE__CORB_SIZE__SHIFT 0x0
+#define AZCONTROLLER0_CORB_SIZE__CORB_SIZE_CAPABILITY__SHIFT 0x4
+#define AZCONTROLLER0_CORB_SIZE__CORB_SIZE_MASK 0x0003L
+#define AZCONTROLLER0_CORB_SIZE__CORB_SIZE_CAPABILITY_MASK 0x00F0L
+//AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS
+#define AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007FL
+#define AZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+//AZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS
+#define AZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//AZCONTROLLER0_RIRB_WRITE_POINTER
+#define AZCONTROLLER0_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET__SHIFT 0xf
+#define AZCONTROLLER0_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_MASK 0x00FFL
+#define AZCONTROLLER0_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET_MASK 0x8000L
+//AZCONTROLLER0_RESPONSE_INTERRUPT_COUNT
+#define AZCONTROLLER0_RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT__SHIFT 0x0
+#define AZCONTROLLER0_RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT_MASK 0x00FFL
+//AZCONTROLLER0_RIRB_CONTROL
+#define AZCONTROLLER0_RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_CONTROL__RIRB_DMA_ENABLE__SHIFT 0x1
+#define AZCONTROLLER0_RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL__SHIFT 0x2
+#define AZCONTROLLER0_RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL_MASK 0x01L
+#define AZCONTROLLER0_RIRB_CONTROL__RIRB_DMA_ENABLE_MASK 0x02L
+#define AZCONTROLLER0_RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL_MASK 0x04L
+//AZCONTROLLER0_RIRB_STATUS
+#define AZCONTROLLER0_RIRB_STATUS__RESPONSE_INTERRUPT__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS__SHIFT 0x2
+#define AZCONTROLLER0_RIRB_STATUS__RESPONSE_INTERRUPT_MASK 0x01L
+#define AZCONTROLLER0_RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS_MASK 0x04L
+//AZCONTROLLER0_RIRB_SIZE
+#define AZCONTROLLER0_RIRB_SIZE__RIRB_SIZE__SHIFT 0x0
+#define AZCONTROLLER0_RIRB_SIZE__RIRB_SIZE_CAPABILITY__SHIFT 0x4
+#define AZCONTROLLER0_RIRB_SIZE__RIRB_SIZE_MASK 0x0003L
+#define AZCONTROLLER0_RIRB_SIZE__RIRB_SIZE_CAPABILITY_MASK 0x00F0L
+//AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS__SHIFT 0x1c
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD_MASK 0x0FFFFFFFL
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS_MASK 0xF0000000L
+//AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+//AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0000FFFFL
+//AZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE
+#define AZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ_MASK 0xFFFFFFFFL
+//AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY__SHIFT 0x0
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID__SHIFT 0x1
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY_MASK 0x00000001L
+#define AZCONTROLLER0_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID_MASK 0x00000002L
+//AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE__SHIFT 0x0
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x1
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE_MASK 0x00000001L
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007EL
+#define AZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+//AZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS
+#define AZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define AZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//AZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS
+#define AZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS__SHIFT 0x0
+#define AZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azendpoint_azdec
+//AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA
+#define AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+//AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX
+#define AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0001FFFFL
+
+
+// addressBlock: dce_dc_hda_azinputendpoint_azdec
+//AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA
+#define AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+//AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX
+#define AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0001FFFFL
+
+
+// addressBlock: dce_dc_hda_azcontroller_azdec
+//AZCONTROLLER1_CORB_WRITE_POINTER
+#define AZCONTROLLER1_CORB_WRITE_POINTER__CORB_WRITE_POINTER__SHIFT 0x0
+#define AZCONTROLLER1_CORB_WRITE_POINTER__CORB_WRITE_POINTER_MASK 0x00FFL
+//AZCONTROLLER1_CORB_READ_POINTER
+#define AZCONTROLLER1_CORB_READ_POINTER__CORB_READ_POINTER__SHIFT 0x0
+#define AZCONTROLLER1_CORB_READ_POINTER__CORB_READ_POINTER_RESET__SHIFT 0xf
+#define AZCONTROLLER1_CORB_READ_POINTER__CORB_READ_POINTER_MASK 0x00FFL
+#define AZCONTROLLER1_CORB_READ_POINTER__CORB_READ_POINTER_RESET_MASK 0x8000L
+//AZCONTROLLER1_CORB_CONTROL
+#define AZCONTROLLER1_CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE__SHIFT 0x0
+#define AZCONTROLLER1_CORB_CONTROL__ENABLE_CORB_DMA_ENGINE__SHIFT 0x1
+#define AZCONTROLLER1_CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE_MASK 0x01L
+#define AZCONTROLLER1_CORB_CONTROL__ENABLE_CORB_DMA_ENGINE_MASK 0x02L
+//AZCONTROLLER1_CORB_STATUS
+#define AZCONTROLLER1_CORB_STATUS__CORB_MEMORY_ERROR_INDICATION__SHIFT 0x0
+#define AZCONTROLLER1_CORB_STATUS__CORB_MEMORY_ERROR_INDICATION_MASK 0x01L
+//AZCONTROLLER1_CORB_SIZE
+#define AZCONTROLLER1_CORB_SIZE__CORB_SIZE__SHIFT 0x0
+#define AZCONTROLLER1_CORB_SIZE__CORB_SIZE_CAPABILITY__SHIFT 0x4
+#define AZCONTROLLER1_CORB_SIZE__CORB_SIZE_MASK 0x0003L
+#define AZCONTROLLER1_CORB_SIZE__CORB_SIZE_CAPABILITY_MASK 0x00F0L
+//AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS
+#define AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007FL
+#define AZCONTROLLER1_RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+//AZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS
+#define AZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//AZCONTROLLER1_RIRB_WRITE_POINTER
+#define AZCONTROLLER1_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET__SHIFT 0xf
+#define AZCONTROLLER1_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_MASK 0x00FFL
+#define AZCONTROLLER1_RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET_MASK 0x8000L
+//AZCONTROLLER1_RESPONSE_INTERRUPT_COUNT
+#define AZCONTROLLER1_RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT__SHIFT 0x0
+#define AZCONTROLLER1_RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT_MASK 0x00FFL
+//AZCONTROLLER1_RIRB_CONTROL
+#define AZCONTROLLER1_RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_CONTROL__RIRB_DMA_ENABLE__SHIFT 0x1
+#define AZCONTROLLER1_RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL__SHIFT 0x2
+#define AZCONTROLLER1_RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL_MASK 0x01L
+#define AZCONTROLLER1_RIRB_CONTROL__RIRB_DMA_ENABLE_MASK 0x02L
+#define AZCONTROLLER1_RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL_MASK 0x04L
+//AZCONTROLLER1_RIRB_STATUS
+#define AZCONTROLLER1_RIRB_STATUS__RESPONSE_INTERRUPT__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS__SHIFT 0x2
+#define AZCONTROLLER1_RIRB_STATUS__RESPONSE_INTERRUPT_MASK 0x01L
+#define AZCONTROLLER1_RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS_MASK 0x04L
+//AZCONTROLLER1_RIRB_SIZE
+#define AZCONTROLLER1_RIRB_SIZE__RIRB_SIZE__SHIFT 0x0
+#define AZCONTROLLER1_RIRB_SIZE__RIRB_SIZE_CAPABILITY__SHIFT 0x4
+#define AZCONTROLLER1_RIRB_SIZE__RIRB_SIZE_MASK 0x0003L
+#define AZCONTROLLER1_RIRB_SIZE__RIRB_SIZE_CAPABILITY_MASK 0x00F0L
+//AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS__SHIFT 0x1c
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD_MASK 0x0FFFFFFFL
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS_MASK 0xF0000000L
+//AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+//AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0000FFFFL
+//AZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE
+#define AZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ_MASK 0xFFFFFFFFL
+//AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY__SHIFT 0x0
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID__SHIFT 0x1
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY_MASK 0x00000001L
+#define AZCONTROLLER1_IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID_MASK 0x00000002L
+//AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE__SHIFT 0x0
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x1
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE_MASK 0x00000001L
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x0000007EL
+#define AZCONTROLLER1_DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS_MASK 0xFFFFFF80L
+//AZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS
+#define AZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define AZCONTROLLER1_DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//AZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS
+#define AZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS__SHIFT 0x0
+#define AZCONTROLLER1_WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azendpoint_azdec
+//AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA
+#define AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+//AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX
+#define AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0001FFFFL
+
+
+// addressBlock: dce_dc_hda_azinputendpoint_azdec
+//AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA
+#define AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xFFFFFFFFL
+//AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX
+#define AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZINPUTENDPOINT1_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x0001FFFFL
+
+
+// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
+//DENTIST_DISPCLK_CNTL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE__SHIFT 0xf
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG__SHIFT 0x11
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG__SHIFT 0x12
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG__SHIFT 0x15
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG__SHIFT 0x16
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE_MASK 0x00018000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG_MASK 0x00020000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG_MASK 0x00040000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG_MASK 0x00200000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG_MASK 0x00400000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
+
+
+// addressBlock: dce_dc_dccg_dccg_dispdec
+//PHYPLLA_PIXCLK_RESYNC_CNTL
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE_MASK 0x00000100L
+//PHYPLLB_PIXCLK_RESYNC_CNTL
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE_MASK 0x00000100L
+//PHYPLLC_PIXCLK_RESYNC_CNTL
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_ENABLE_MASK 0x00000100L
+//PHYPLLD_PIXCLK_RESYNC_CNTL
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_ENABLE_MASK 0x00000100L
+//DP_DTO_DBUF_EN
+#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN__SHIFT 0x0
+#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN__SHIFT 0x1
+#define DP_DTO_DBUF_EN__DP_DTO2_DBUF_EN__SHIFT 0x2
+#define DP_DTO_DBUF_EN__DP_DTO3_DBUF_EN__SHIFT 0x3
+#define DP_DTO_DBUF_EN__DP_DTO4_DBUF_EN__SHIFT 0x4
+#define DP_DTO_DBUF_EN__DP_DTO5_DBUF_EN__SHIFT 0x5
+#define DP_DTO_DBUF_EN__DP_DTO6_DBUF_EN__SHIFT 0x6
+#define DP_DTO_DBUF_EN__DP_DTO7_DBUF_EN__SHIFT 0x7
+#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN_MASK 0x00000001L
+#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN_MASK 0x00000002L
+#define DP_DTO_DBUF_EN__DP_DTO2_DBUF_EN_MASK 0x00000004L
+#define DP_DTO_DBUF_EN__DP_DTO3_DBUF_EN_MASK 0x00000008L
+#define DP_DTO_DBUF_EN__DP_DTO4_DBUF_EN_MASK 0x00000010L
+#define DP_DTO_DBUF_EN__DP_DTO5_DBUF_EN_MASK 0x00000020L
+#define DP_DTO_DBUF_EN__DP_DTO6_DBUF_EN_MASK 0x00000040L
+#define DP_DTO_DBUF_EN__DP_DTO7_DBUF_EN_MASK 0x00000080L
+//DSCCLK3_DTO_PARAM
+#define DSCCLK3_DTO_PARAM__DSCCLK3_DTO_PHASE__SHIFT 0x0
+#define DSCCLK3_DTO_PARAM__DSCCLK3_DTO_MODULO__SHIFT 0x10
+#define DSCCLK3_DTO_PARAM__DSCCLK3_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK3_DTO_PARAM__DSCCLK3_DTO_MODULO_MASK 0x00FF0000L
+//DPREFCLK_CGTT_BLK_CTRL_REG
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DCCG_GATE_DISABLE_CNTL4
+#define DCCG_GATE_DISABLE_CNTL4__PHYA_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL4__PHYB_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL4__PHYC_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL4__PHYD_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL4__PHYE_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL4__HDMICHARCLK0_ROOT_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK0_GATE_DISABLE__SHIFT 0x17
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK1_GATE_DISABLE__SHIFT 0x18
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK2_GATE_DISABLE__SHIFT 0x19
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK3_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL4__PHYA_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL4__PHYB_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL4__PHYC_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL4__PHYD_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL4__PHYE_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL4__HDMICHARCLK0_ROOT_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK0_GATE_DISABLE_MASK 0x00800000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK1_GATE_DISABLE_MASK 0x01000000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK2_GATE_DISABLE_MASK 0x02000000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK3_GATE_DISABLE_MASK 0x04000000L
+//DPSTREAMCLK_CNTL
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_SRC_SEL__SHIFT 0x0
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_EN__SHIFT 0x3
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK1_SRC_SEL__SHIFT 0x4
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK1_EN__SHIFT 0x7
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK2_SRC_SEL__SHIFT 0x8
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK2_EN__SHIFT 0xb
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK3_SRC_SEL__SHIFT 0xc
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK3_EN__SHIFT 0xf
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_SRC_SEL_MASK 0x00000007L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_EN_MASK 0x00000008L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK1_SRC_SEL_MASK 0x00000070L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK1_EN_MASK 0x00000080L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK2_SRC_SEL_MASK 0x00000700L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK2_EN_MASK 0x00000800L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK3_SRC_SEL_MASK 0x00007000L
+#define DPSTREAMCLK_CNTL__DPSTREAMCLK3_EN_MASK 0x00008000L
+//REFCLK_CGTT_BLK_CTRL_REG
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//PHYPLLE_PIXCLK_RESYNC_CNTL
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_ENABLE_MASK 0x00000100L
+//DCCG_PERFMON_CNTL2
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DSICLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_REFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK1_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK2_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYC_PIXCLK_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYD_PIXCLK_ENABLE__SHIFT 0x5
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYE_PIXCLK_ENABLE__SHIFT 0x6
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYF_PIXCLK_ENABLE__SHIFT 0x7
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYG_PIXCLK_ENABLE__SHIFT 0x8
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DTBCLK0_ENABLE__SHIFT 0x9
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DSICLK_ENABLE_MASK 0x00000001L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_REFCLK_ENABLE_MASK 0x00000002L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK1_ENABLE_MASK 0x00000004L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK2_ENABLE_MASK 0x00000008L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYC_PIXCLK_ENABLE_MASK 0x00000010L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYD_PIXCLK_ENABLE_MASK 0x00000020L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYE_PIXCLK_ENABLE_MASK 0x00000040L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYF_PIXCLK_ENABLE_MASK 0x00000080L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYG_PIXCLK_ENABLE_MASK 0x00000100L
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DTBCLK0_ENABLE_MASK 0x00000200L
+//DCCG_GLOBAL_FGCG_REP_CNTL
+#define DCCG_GLOBAL_FGCG_REP_CNTL__DCCG_GLOBAL_FGCG_REP_DIS__SHIFT 0x0
+#define DCCG_GLOBAL_FGCG_REP_CNTL__DCCG_GLOBAL_FGCG_REP_DIS_MASK 0x00000001L
+//DCCG_DS_DTO_INCR
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR__SHIFT 0x0
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR_MASK 0xFFFFFFFFL
+//DCCG_DS_DTO_MODULO
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO__SHIFT 0x0
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO_MASK 0xFFFFFFFFL
+//DCCG_DS_CNTL
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE__SHIFT 0x0
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC__SHIFT 0x4
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE__SHIFT 0x8
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS__SHIFT 0x9
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV__SHIFT 0x10
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS__SHIFT 0x18
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL__SHIFT 0x19
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE_MASK 0x00000001L
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC_MASK 0x00000030L
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE_MASK 0x00000100L
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS_MASK 0x00000200L
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV_MASK 0x00030000L
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS_MASK 0x01000000L
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL_MASK 0x02000000L
+//DCCG_DS_HW_CAL_INTERVAL
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL__SHIFT 0x0
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL_MASK 0xFFFFFFFFL
+//DPREFCLK_CNTL
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL__SHIFT 0x0
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL_MASK 0x00000007L
+//DCE_VERSION
+#define DCE_VERSION__MAJOR_VERSION__SHIFT 0x0
+#define DCE_VERSION__MINOR_VERSION__SHIFT 0x8
+#define DCE_VERSION__MAJOR_VERSION_MASK 0x000000FFL
+#define DCE_VERSION__MINOR_VERSION_MASK 0x0000FF00L
+//DCCG_GTC_CNTL
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE__SHIFT 0x0
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE_MASK 0x00000001L
+//DCCG_GTC_DTO_INCR
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR__SHIFT 0x0
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR_MASK 0xFFFFFFFFL
+//DCCG_GTC_DTO_MODULO
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO__SHIFT 0x0
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO_MASK 0xFFFFFFFFL
+//DCCG_GTC_CURRENT
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT__SHIFT 0x0
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT_MASK 0xFFFFFFFFL
+//SYMCLK32_SE_CNTL
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE0_SRC_SEL__SHIFT 0x0
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE0_EN__SHIFT 0x3
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE1_SRC_SEL__SHIFT 0x4
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE1_EN__SHIFT 0x7
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE2_SRC_SEL__SHIFT 0x8
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE2_EN__SHIFT 0xb
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE3_SRC_SEL__SHIFT 0xc
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE3_EN__SHIFT 0xf
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE0_SRC_SEL_MASK 0x00000007L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE0_EN_MASK 0x00000008L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE1_SRC_SEL_MASK 0x00000070L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE1_EN_MASK 0x00000080L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE2_SRC_SEL_MASK 0x00000700L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE2_EN_MASK 0x00000800L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE3_SRC_SEL_MASK 0x00007000L
+#define SYMCLK32_SE_CNTL__SYMCLK32_SE3_EN_MASK 0x00008000L
+//SYMCLK32_LE_CNTL
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE0_SRC_SEL__SHIFT 0x0
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE0_EN__SHIFT 0x3
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE1_SRC_SEL__SHIFT 0x4
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE1_EN__SHIFT 0x7
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE0_SRC_SEL_MASK 0x00000007L
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE0_EN_MASK 0x00000008L
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE1_SRC_SEL_MASK 0x00000070L
+#define SYMCLK32_LE_CNTL__SYMCLK32_LE1_EN_MASK 0x00000080L
+//DTBCLK_P_CNTL
+#define DTBCLK_P_CNTL__DTBCLK_P0_SRC_SEL__SHIFT 0x0
+#define DTBCLK_P_CNTL__DTBCLK_P0_EN__SHIFT 0x2
+#define DTBCLK_P_CNTL__DTBCLK_P1_SRC_SEL__SHIFT 0x3
+#define DTBCLK_P_CNTL__DTBCLK_P1_EN__SHIFT 0x5
+#define DTBCLK_P_CNTL__DTBCLK_P2_SRC_SEL__SHIFT 0x6
+#define DTBCLK_P_CNTL__DTBCLK_P2_EN__SHIFT 0x8
+#define DTBCLK_P_CNTL__DTBCLK_P3_SRC_SEL__SHIFT 0x9
+#define DTBCLK_P_CNTL__DTBCLK_P3_EN__SHIFT 0xb
+#define DTBCLK_P_CNTL__DTBCLK_P0_SRC_SEL_MASK 0x00000003L
+#define DTBCLK_P_CNTL__DTBCLK_P0_EN_MASK 0x00000004L
+#define DTBCLK_P_CNTL__DTBCLK_P1_SRC_SEL_MASK 0x00000018L
+#define DTBCLK_P_CNTL__DTBCLK_P1_EN_MASK 0x00000020L
+#define DTBCLK_P_CNTL__DTBCLK_P2_SRC_SEL_MASK 0x000000C0L
+#define DTBCLK_P_CNTL__DTBCLK_P2_EN_MASK 0x00000100L
+#define DTBCLK_P_CNTL__DTBCLK_P3_SRC_SEL_MASK 0x00000600L
+#define DTBCLK_P_CNTL__DTBCLK_P3_EN_MASK 0x00000800L
+//DCCG_GATE_DISABLE_CNTL5
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P0_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P1_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P2_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P3_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK0_ROOT_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK0_GATE_DISABLE__SHIFT 0x7
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK1_ROOT_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK1_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK2_ROOT_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK2_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK3_ROOT_GATE_DISABLE__SHIFT 0xc
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK3_GATE_DISABLE__SHIFT 0xd
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKA_ROOT_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKB_ROOT_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKC_ROOT_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKD_ROOT_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKE_ROOT_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKA_FE_ROOT_GATE_DISABLE__SHIFT 0x18
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKB_FE_ROOT_GATE_DISABLE__SHIFT 0x19
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKC_FE_ROOT_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKD_FE_ROOT_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKE_FE_ROOT_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P0_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P1_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P2_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL5__DTBCLK_P3_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK0_ROOT_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK0_GATE_DISABLE_MASK 0x00000080L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK1_ROOT_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK1_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK2_ROOT_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK2_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK3_ROOT_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL5__DPSTREAMCLK3_GATE_DISABLE_MASK 0x00002000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKA_ROOT_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKB_ROOT_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKC_ROOT_GATE_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKD_ROOT_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKE_ROOT_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKA_FE_ROOT_GATE_DISABLE_MASK 0x01000000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKB_FE_ROOT_GATE_DISABLE_MASK 0x02000000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKC_FE_ROOT_GATE_DISABLE_MASK 0x04000000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKD_FE_ROOT_GATE_DISABLE_MASK 0x08000000L
+#define DCCG_GATE_DISABLE_CNTL5__SYMCLKE_FE_ROOT_GATE_DISABLE_MASK 0x10000000L
+//DSCCLK0_DTO_PARAM
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_PHASE__SHIFT 0x0
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_MODULO__SHIFT 0x10
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK0_DTO_PARAM__DSCCLK0_DTO_MODULO_MASK 0x00FF0000L
+//DSCCLK1_DTO_PARAM
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_PHASE__SHIFT 0x0
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_MODULO__SHIFT 0x10
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK1_DTO_PARAM__DSCCLK1_DTO_MODULO_MASK 0x00FF0000L
+//DSCCLK2_DTO_PARAM
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_PHASE__SHIFT 0x0
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_MODULO__SHIFT 0x10
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_PHASE_MASK 0x000000FFL
+#define DSCCLK2_DTO_PARAM__DSCCLK2_DTO_MODULO_MASK 0x00FF0000L
+//OTG_PIXEL_RATE_DIV
+#define OTG_PIXEL_RATE_DIV__OTG0_PIXEL_RATE_DIVK1__SHIFT 0x0
+#define OTG_PIXEL_RATE_DIV__OTG0_PIXEL_RATE_DIVK2__SHIFT 0x1
+#define OTG_PIXEL_RATE_DIV__OTG1_PIXEL_RATE_DIVK1__SHIFT 0x3
+#define OTG_PIXEL_RATE_DIV__OTG1_PIXEL_RATE_DIVK2__SHIFT 0x4
+#define OTG_PIXEL_RATE_DIV__OTG2_PIXEL_RATE_DIVK1__SHIFT 0x6
+#define OTG_PIXEL_RATE_DIV__OTG2_PIXEL_RATE_DIVK2__SHIFT 0x7
+#define OTG_PIXEL_RATE_DIV__OTG3_PIXEL_RATE_DIVK1__SHIFT 0x9
+#define OTG_PIXEL_RATE_DIV__OTG3_PIXEL_RATE_DIVK2__SHIFT 0xa
+#define OTG_PIXEL_RATE_DIV__OTG0_PIXEL_RATE_DIVK1_MASK 0x00000001L
+#define OTG_PIXEL_RATE_DIV__OTG0_PIXEL_RATE_DIVK2_MASK 0x00000006L
+#define OTG_PIXEL_RATE_DIV__OTG1_PIXEL_RATE_DIVK1_MASK 0x00000008L
+#define OTG_PIXEL_RATE_DIV__OTG1_PIXEL_RATE_DIVK2_MASK 0x00000030L
+#define OTG_PIXEL_RATE_DIV__OTG2_PIXEL_RATE_DIVK1_MASK 0x00000040L
+#define OTG_PIXEL_RATE_DIV__OTG2_PIXEL_RATE_DIVK2_MASK 0x00000180L
+#define OTG_PIXEL_RATE_DIV__OTG3_PIXEL_RATE_DIVK1_MASK 0x00000200L
+#define OTG_PIXEL_RATE_DIV__OTG3_PIXEL_RATE_DIVK2_MASK 0x00000C00L
+//MILLISECOND_TIME_BASE_DIV
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV_MASK 0x0001FFFFL
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+//DISPCLK_FREQ_CHANGE_CNTL
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY__SHIFT 0x0
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE__SHIFT 0x10
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE__SHIFT 0x14
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES__SHIFT 0x19
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET__SHIFT 0x1c
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE__SHIFT 0x1d
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN__SHIFT 0x1e
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE__SHIFT 0x1f
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY_MASK 0x00003FFFL
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE_MASK 0x000F0000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE_MASK 0x00100000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES_MASK 0x0E000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET_MASK 0x10000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE_MASK 0x20000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN_MASK 0x40000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE_MASK 0x80000000L
+//DC_MEM_GLOBAL_PWR_REQ_CNTL
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS__SHIFT 0x0
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS_MASK 0x00000001L
+//DCCG_PERFMON_CNTL
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN__SHIFT 0x5
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC__SHIFT 0x6
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC__SHIFT 0x7
+#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL__SHIFT 0x8
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV__SHIFT 0xb
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE_MASK 0x00000001L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE_MASK 0x00000002L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE_MASK 0x00000004L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE_MASK 0x00000008L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE_MASK 0x00000010L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN_MASK 0x00000020L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC_MASK 0x00000040L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC_MASK 0x00000080L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL_MASK 0x00000700L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV_MASK 0xFFFFF800L
+//DCCG_GATE_DISABLE_CNTL
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE__SHIFT 0x1d
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE__SHIFT 0x1e
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE_MASK 0x04000000L
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE_MASK 0x08000000L
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE_MASK 0x10000000L
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE_MASK 0x20000000L
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE_MASK 0x40000000L
+//DISPCLK_CGTT_BLK_CTRL_REG
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//SOCCLK_CGTT_BLK_CTRL_REG
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DCCG_CAC_STATUS
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA__SHIFT 0x0
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA_MASK 0xFFFFFFFFL
+//MICROSECOND_TIME_BASE_DIV
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV__SHIFT 0x8
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL__SHIFT 0x10
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL__SHIFT 0x11
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV_MASK 0x0000007FL
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV_MASK 0x00007F00L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL_MASK 0x00010000L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL_MASK 0x00020000L
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+//DCCG_GATE_DISABLE_CNTL2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE__SHIFT 0x5
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK0_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK1_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK2_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK3_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK4_GATE_DISABLE__SHIFT 0xc
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK5_GATE_DISABLE__SHIFT 0xd
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE__SHIFT 0x10
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL2__PHYASYMCLK_ROOT_GATE_DISABLE__SHIFT 0x18
+#define DCCG_GATE_DISABLE_CNTL2__PHYBSYMCLK_ROOT_GATE_DISABLE__SHIFT 0x19
+#define DCCG_GATE_DISABLE_CNTL2__PHYCSYMCLK_ROOT_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL2__PHYDSYMCLK_ROOT_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL2__PHYESYMCLK_ROOT_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE_MASK 0x00000020L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK0_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK1_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK2_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK3_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK4_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL2__HDMICHARCLK5_GATE_DISABLE_MASK 0x00002000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE_MASK 0x00010000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYASYMCLK_ROOT_GATE_DISABLE_MASK 0x01000000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYBSYMCLK_ROOT_GATE_DISABLE_MASK 0x02000000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYCSYMCLK_ROOT_GATE_DISABLE_MASK 0x04000000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYDSYMCLK_ROOT_GATE_DISABLE_MASK 0x08000000L
+#define DCCG_GATE_DISABLE_CNTL2__PHYESYMCLK_ROOT_GATE_DISABLE_MASK 0x10000000L
+//SYMCLK_CGTT_BLK_CTRL_REG
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DCCG_DISP_CNTL_REG
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ__SHIFT 0x8
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ_MASK 0x00000100L
+//OTG0_PIXEL_RATE_CNTL
+#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG0_PIXEL_RATE_CNTL__DTBCLK_DTO0_ENABLE__SHIFT 0x3
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE__SHIFT 0x4
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE__SHIFT 0x5
+#define OTG0_PIXEL_RATE_CNTL__DTBCLKDTO0_ENABLE_STATUS__SHIFT 0x6
+#define OTG0_PIXEL_RATE_CNTL__DPDTO0_ENABLE_STATUS__SHIFT 0x7
+#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL__SHIFT 0x8
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL__SHIFT 0x9
+#define OTG0_PIXEL_RATE_CNTL__PIPE0_DTO_SRC_SEL__SHIFT 0xc
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG0_PIXEL_RATE_CNTL__DTBCLK_DTO0_ENABLE_MASK 0x00000008L
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE_MASK 0x00000010L
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE_MASK 0x00000020L
+#define OTG0_PIXEL_RATE_CNTL__DTBCLKDTO0_ENABLE_STATUS_MASK 0x00000040L
+#define OTG0_PIXEL_RATE_CNTL__DPDTO0_ENABLE_STATUS_MASK 0x00000080L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL_MASK 0x00000100L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL_MASK 0x00000200L
+#define OTG0_PIXEL_RATE_CNTL__PIPE0_DTO_SRC_SEL_MASK 0x00003000L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO0_PHASE
+#define DP_DTO0_PHASE__DP_DTO0_PHASE__SHIFT 0x0
+#define DP_DTO0_PHASE__DP_DTO0_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO0_MODULO
+#define DP_DTO0_MODULO__DP_DTO0_MODULO__SHIFT 0x0
+#define DP_DTO0_MODULO__DP_DTO0_MODULO_MASK 0xFFFFFFFFL
+//OTG0_PHYPLL_PIXEL_RATE_CNTL
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//OTG1_PIXEL_RATE_CNTL
+#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG1_PIXEL_RATE_CNTL__DTBCLK_DTO1_ENABLE__SHIFT 0x3
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE__SHIFT 0x4
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE__SHIFT 0x5
+#define OTG1_PIXEL_RATE_CNTL__DTBCLKDTO1_ENABLE_STATUS__SHIFT 0x6
+#define OTG1_PIXEL_RATE_CNTL__DPDTO1_ENABLE_STATUS__SHIFT 0x7
+#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL__SHIFT 0x8
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL__SHIFT 0x9
+#define OTG1_PIXEL_RATE_CNTL__PIPE1_DTO_SRC_SEL__SHIFT 0xc
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG1_PIXEL_RATE_CNTL__DTBCLK_DTO1_ENABLE_MASK 0x00000008L
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE_MASK 0x00000010L
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE_MASK 0x00000020L
+#define OTG1_PIXEL_RATE_CNTL__DTBCLKDTO1_ENABLE_STATUS_MASK 0x00000040L
+#define OTG1_PIXEL_RATE_CNTL__DPDTO1_ENABLE_STATUS_MASK 0x00000080L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL_MASK 0x00000100L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL_MASK 0x00000200L
+#define OTG1_PIXEL_RATE_CNTL__PIPE1_DTO_SRC_SEL_MASK 0x00003000L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO1_PHASE
+#define DP_DTO1_PHASE__DP_DTO1_PHASE__SHIFT 0x0
+#define DP_DTO1_PHASE__DP_DTO1_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO1_MODULO
+#define DP_DTO1_MODULO__DP_DTO1_MODULO__SHIFT 0x0
+#define DP_DTO1_MODULO__DP_DTO1_MODULO_MASK 0xFFFFFFFFL
+//OTG1_PHYPLL_PIXEL_RATE_CNTL
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//OTG2_PIXEL_RATE_CNTL
+#define OTG2_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG2_PIXEL_RATE_CNTL__DTBCLK_DTO2_ENABLE__SHIFT 0x3
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE__SHIFT 0x4
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_DS_DISABLE__SHIFT 0x5
+#define OTG2_PIXEL_RATE_CNTL__DTBCLKDTO2_ENABLE_STATUS__SHIFT 0x6
+#define OTG2_PIXEL_RATE_CNTL__DPDTO2_ENABLE_STATUS__SHIFT 0x7
+#define OTG2_PIXEL_RATE_CNTL__OTG2_ADD_PIXEL__SHIFT 0x8
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DROP_PIXEL__SHIFT 0x9
+#define OTG2_PIXEL_RATE_CNTL__PIPE2_DTO_SRC_SEL__SHIFT 0xc
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG2_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG2_PIXEL_RATE_CNTL__DTBCLK_DTO2_ENABLE_MASK 0x00000008L
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE_MASK 0x00000010L
+#define OTG2_PIXEL_RATE_CNTL__DP_DTO2_DS_DISABLE_MASK 0x00000020L
+#define OTG2_PIXEL_RATE_CNTL__DTBCLKDTO2_ENABLE_STATUS_MASK 0x00000040L
+#define OTG2_PIXEL_RATE_CNTL__DPDTO2_ENABLE_STATUS_MASK 0x00000080L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_ADD_PIXEL_MASK 0x00000100L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DROP_PIXEL_MASK 0x00000200L
+#define OTG2_PIXEL_RATE_CNTL__PIPE2_DTO_SRC_SEL_MASK 0x00003000L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG2_PIXEL_RATE_CNTL__OTG2_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO2_PHASE
+#define DP_DTO2_PHASE__DP_DTO2_PHASE__SHIFT 0x0
+#define DP_DTO2_PHASE__DP_DTO2_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO2_MODULO
+#define DP_DTO2_MODULO__DP_DTO2_MODULO__SHIFT 0x0
+#define DP_DTO2_MODULO__DP_DTO2_MODULO_MASK 0xFFFFFFFFL
+//OTG2_PHYPLL_PIXEL_RATE_CNTL
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG2_PHYPLL_PIXEL_RATE_CNTL__OTG2_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//OTG3_PIXEL_RATE_CNTL
+#define OTG3_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG3_PIXEL_RATE_CNTL__DTBCLK_DTO3_ENABLE__SHIFT 0x3
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE__SHIFT 0x4
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_DS_DISABLE__SHIFT 0x5
+#define OTG3_PIXEL_RATE_CNTL__DTBCLKDTO3_ENABLE_STATUS__SHIFT 0x6
+#define OTG3_PIXEL_RATE_CNTL__DPDTO3_ENABLE_STATUS__SHIFT 0x7
+#define OTG3_PIXEL_RATE_CNTL__OTG3_ADD_PIXEL__SHIFT 0x8
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DROP_PIXEL__SHIFT 0x9
+#define OTG3_PIXEL_RATE_CNTL__PIPE3_DTO_SRC_SEL__SHIFT 0xc
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG3_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG3_PIXEL_RATE_CNTL__DTBCLK_DTO3_ENABLE_MASK 0x00000008L
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE_MASK 0x00000010L
+#define OTG3_PIXEL_RATE_CNTL__DP_DTO3_DS_DISABLE_MASK 0x00000020L
+#define OTG3_PIXEL_RATE_CNTL__DTBCLKDTO3_ENABLE_STATUS_MASK 0x00000040L
+#define OTG3_PIXEL_RATE_CNTL__DPDTO3_ENABLE_STATUS_MASK 0x00000080L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_ADD_PIXEL_MASK 0x00000100L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DROP_PIXEL_MASK 0x00000200L
+#define OTG3_PIXEL_RATE_CNTL__PIPE3_DTO_SRC_SEL_MASK 0x00003000L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG3_PIXEL_RATE_CNTL__OTG3_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO3_PHASE
+#define DP_DTO3_PHASE__DP_DTO3_PHASE__SHIFT 0x0
+#define DP_DTO3_PHASE__DP_DTO3_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO3_MODULO
+#define DP_DTO3_MODULO__DP_DTO3_MODULO__SHIFT 0x0
+#define DP_DTO3_MODULO__DP_DTO3_MODULO_MASK 0xFFFFFFFFL
+//OTG3_PHYPLL_PIXEL_RATE_CNTL
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG3_PHYPLL_PIXEL_RATE_CNTL__OTG3_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//DPPCLK_CGTT_BLK_CTRL_REG
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DPPCLK0_DTO_PARAM
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE__SHIFT 0x0
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO__SHIFT 0x10
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK1_DTO_PARAM
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE__SHIFT 0x0
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO__SHIFT 0x10
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK2_DTO_PARAM
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE__SHIFT 0x0
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO__SHIFT 0x10
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK3_DTO_PARAM
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE__SHIFT 0x0
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO__SHIFT 0x10
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO_MASK 0x00FF0000L
+//DCCG_CAC_STATUS2
+#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2__SHIFT 0x0
+#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2_MASK 0x0007FFFFL
+//SYMCLKA_CLOCK_ENABLE
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_EN__SHIFT 0x4
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_SRC_SEL__SHIFT 0x5
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_EN_MASK 0x00000010L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_SRC_SEL_MASK 0x00000700L
+//SYMCLKB_CLOCK_ENABLE
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_EN__SHIFT 0x4
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_SRC_SEL__SHIFT 0x5
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_EN_MASK 0x00000010L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_SRC_SEL_MASK 0x00000700L
+//SYMCLKC_CLOCK_ENABLE
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_EN__SHIFT 0x4
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_SRC_SEL__SHIFT 0x5
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_EN_MASK 0x00000010L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_SRC_SEL_MASK 0x00000700L
+//SYMCLKD_CLOCK_ENABLE
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_EN__SHIFT 0x4
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_SRC_SEL__SHIFT 0x5
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_EN_MASK 0x00000010L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_SRC_SEL_MASK 0x00000700L
+//SYMCLKE_CLOCK_ENABLE
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_EN__SHIFT 0x4
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_SRC_SEL__SHIFT 0x5
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_SRC_SEL__SHIFT 0x8
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_EN_MASK 0x00000010L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_SRC_SEL_MASK 0x000000E0L
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_SRC_SEL_MASK 0x00000700L
+//DCCG_SOFT_RESET
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET__SHIFT 0x0
+#define DCCG_SOFT_RESET__SOFT_RESET_DVO__SHIFT 0x2
+#define DCCG_SOFT_RESET__DVO_ENABLE_RST__SHIFT 0x3
+#define DCCG_SOFT_RESET__AUDIO_DTO2_CLK_SOFT_RESET__SHIFT 0x4
+#define DCCG_SOFT_RESET__DPREFCLK_SOFT_RESET__SHIFT 0x8
+#define DCCG_SOFT_RESET__AMCLK0_SOFT_RESET__SHIFT 0xc
+#define DCCG_SOFT_RESET__AMCLK1_SOFT_RESET__SHIFT 0xd
+#define DCCG_SOFT_RESET__P0PLL_CFG_IF_SOFT_RESET__SHIFT 0xe
+#define DCCG_SOFT_RESET__P1PLL_CFG_IF_SOFT_RESET__SHIFT 0xf
+#define DCCG_SOFT_RESET__P2PLL_CFG_IF_SOFT_RESET__SHIFT 0x10
+#define DCCG_SOFT_RESET__A0PLL_CFG_IF_SOFT_RESET__SHIFT 0x11
+#define DCCG_SOFT_RESET__A1PLL_CFG_IF_SOFT_RESET__SHIFT 0x12
+#define DCCG_SOFT_RESET__C0PLL_CFG_IF_SOFT_RESET__SHIFT 0x13
+#define DCCG_SOFT_RESET__C1PLL_CFG_IF_SOFT_RESET__SHIFT 0x14
+#define DCCG_SOFT_RESET__C2PLL_CFG_IF_SOFT_RESET__SHIFT 0x15
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET_MASK 0x00000001L
+#define DCCG_SOFT_RESET__SOFT_RESET_DVO_MASK 0x00000004L
+#define DCCG_SOFT_RESET__DVO_ENABLE_RST_MASK 0x00000008L
+#define DCCG_SOFT_RESET__AUDIO_DTO2_CLK_SOFT_RESET_MASK 0x00000010L
+#define DCCG_SOFT_RESET__DPREFCLK_SOFT_RESET_MASK 0x00000100L
+#define DCCG_SOFT_RESET__AMCLK0_SOFT_RESET_MASK 0x00001000L
+#define DCCG_SOFT_RESET__AMCLK1_SOFT_RESET_MASK 0x00002000L
+#define DCCG_SOFT_RESET__P0PLL_CFG_IF_SOFT_RESET_MASK 0x00004000L
+#define DCCG_SOFT_RESET__P1PLL_CFG_IF_SOFT_RESET_MASK 0x00008000L
+#define DCCG_SOFT_RESET__P2PLL_CFG_IF_SOFT_RESET_MASK 0x00010000L
+#define DCCG_SOFT_RESET__A0PLL_CFG_IF_SOFT_RESET_MASK 0x00020000L
+#define DCCG_SOFT_RESET__A1PLL_CFG_IF_SOFT_RESET_MASK 0x00040000L
+#define DCCG_SOFT_RESET__C0PLL_CFG_IF_SOFT_RESET_MASK 0x00080000L
+#define DCCG_SOFT_RESET__C1PLL_CFG_IF_SOFT_RESET_MASK 0x00100000L
+#define DCCG_SOFT_RESET__C2PLL_CFG_IF_SOFT_RESET_MASK 0x00200000L
+//DSCCLK_DTO_CTRL
+#define DSCCLK_DTO_CTRL__DSCCLK0_EN__SHIFT 0x0
+#define DSCCLK_DTO_CTRL__DSCCLK1_EN__SHIFT 0x1
+#define DSCCLK_DTO_CTRL__DSCCLK2_EN__SHIFT 0x2
+#define DSCCLK_DTO_CTRL__DSCCLK3_EN__SHIFT 0x3
+#define DSCCLK_DTO_CTRL__DSCCLK4_EN__SHIFT 0x4
+#define DSCCLK_DTO_CTRL__DSCCLK5_EN__SHIFT 0x5
+#define DSCCLK_DTO_CTRL__DSCCLK0_DTO_DB_EN__SHIFT 0x8
+#define DSCCLK_DTO_CTRL__DSCCLK1_DTO_DB_EN__SHIFT 0x9
+#define DSCCLK_DTO_CTRL__DSCCLK2_DTO_DB_EN__SHIFT 0xa
+#define DSCCLK_DTO_CTRL__DSCCLK3_DTO_DB_EN__SHIFT 0xb
+#define DSCCLK_DTO_CTRL__DSCCLK4_DTO_DB_EN__SHIFT 0xc
+#define DSCCLK_DTO_CTRL__DSCCLK5_DTO_DB_EN__SHIFT 0xd
+#define DSCCLK_DTO_CTRL__DSCCLK0_EN_MASK 0x00000001L
+#define DSCCLK_DTO_CTRL__DSCCLK1_EN_MASK 0x00000002L
+#define DSCCLK_DTO_CTRL__DSCCLK2_EN_MASK 0x00000004L
+#define DSCCLK_DTO_CTRL__DSCCLK3_EN_MASK 0x00000008L
+#define DSCCLK_DTO_CTRL__DSCCLK4_EN_MASK 0x00000010L
+#define DSCCLK_DTO_CTRL__DSCCLK5_EN_MASK 0x00000020L
+#define DSCCLK_DTO_CTRL__DSCCLK0_DTO_DB_EN_MASK 0x00000100L
+#define DSCCLK_DTO_CTRL__DSCCLK1_DTO_DB_EN_MASK 0x00000200L
+#define DSCCLK_DTO_CTRL__DSCCLK2_DTO_DB_EN_MASK 0x00000400L
+#define DSCCLK_DTO_CTRL__DSCCLK3_DTO_DB_EN_MASK 0x00000800L
+#define DSCCLK_DTO_CTRL__DSCCLK4_DTO_DB_EN_MASK 0x00001000L
+#define DSCCLK_DTO_CTRL__DSCCLK5_DTO_DB_EN_MASK 0x00002000L
+//DPPCLK_CTRL
+#define DPPCLK_CTRL__DPPCLK0_EN__SHIFT 0x0
+#define DPPCLK_CTRL__DPPCLK1_EN__SHIFT 0x3
+#define DPPCLK_CTRL__DPPCLK2_EN__SHIFT 0x6
+#define DPPCLK_CTRL__DPPCLK3_EN__SHIFT 0x9
+#define DPPCLK_CTRL__DPPCLK0_EN_MASK 0x00000001L
+#define DPPCLK_CTRL__DPPCLK1_EN_MASK 0x00000008L
+#define DPPCLK_CTRL__DPPCLK2_EN_MASK 0x00000040L
+#define DPPCLK_CTRL__DPPCLK3_EN_MASK 0x00000200L
+//DCCG_GATE_DISABLE_CNTL6
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK0_ROOT_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK1_ROOT_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK2_ROOT_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK3_ROOT_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK0_ROOT_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK1_ROOT_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK2_ROOT_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK3_ROOT_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL6__HDMISTREAMCLK0_ROOT_GATE_DISABLE__SHIFT 0xf
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK0_ROOT_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK1_ROOT_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK2_ROOT_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL6__DPPCLK3_ROOT_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK0_ROOT_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK1_ROOT_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK2_ROOT_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL6__DSCCLK3_ROOT_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL6__HDMISTREAMCLK0_ROOT_GATE_DISABLE_MASK 0x00008000L
+//SYMCLK_PSP_CNTL
+#define SYMCLK_PSP_CNTL__SYMCLK_PSP_FORCE_ON__SHIFT 0x0
+#define SYMCLK_PSP_CNTL__SYMCLK_PSP_FORCE_ON_MASK 0x00000001L
+//DCCG_AUDIO_DTO_SOURCE
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT 0x0
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL__SHIFT 0x4
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO__SHIFT 0x14
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO__SHIFT 0x18
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO__SHIFT 0x1c
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTBCLK_DTO_USE_512FBR_DTO__SHIFT 0x1d
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL_MASK 0x00000007L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL_MASK 0x00000070L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO_MASK 0x00100000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO_MASK 0x01000000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO_MASK 0x10000000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTBCLK_DTO_USE_512FBR_DTO_MASK 0x20000000L
+//DCCG_AUDIO_DTO0_PHASE
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO0_MODULE
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO1_PHASE
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO1_MODULE
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG0_LATCH_VALUE
+#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG1_LATCH_VALUE
+#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG2_LATCH_VALUE
+#define DCCG_VSYNC_OTG2_LATCH_VALUE__DCCG_VSYNC_CNT_OTG2_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG2_LATCH_VALUE__DCCG_VSYNC_CNT_OTG2_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG3_LATCH_VALUE
+#define DCCG_VSYNC_OTG3_LATCH_VALUE__DCCG_VSYNC_CNT_OTG3_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG3_LATCH_VALUE__DCCG_VSYNC_CNT_OTG3_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG4_LATCH_VALUE
+#define DCCG_VSYNC_OTG4_LATCH_VALUE__DCCG_VSYNC_CNT_OTG4_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG4_LATCH_VALUE__DCCG_VSYNC_CNT_OTG4_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG5_LATCH_VALUE
+#define DCCG_VSYNC_OTG5_LATCH_VALUE__DCCG_VSYNC_CNT_OTG5_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG5_LATCH_VALUE__DCCG_VSYNC_CNT_OTG5_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DPPCLK_DTO_CTRL
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN__SHIFT 0x1
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN__SHIFT 0x5
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN__SHIFT 0x9
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN__SHIFT 0xd
+#define DPPCLK_DTO_CTRL__DPPCLK4_DTO_DB_EN__SHIFT 0x11
+#define DPPCLK_DTO_CTRL__DPPCLK5_DTO_DB_EN__SHIFT 0x15
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN_MASK 0x00000002L
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN_MASK 0x00000020L
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN_MASK 0x00000200L
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN_MASK 0x00002000L
+#define DPPCLK_DTO_CTRL__DPPCLK4_DTO_DB_EN_MASK 0x00020000L
+#define DPPCLK_DTO_CTRL__DPPCLK5_DTO_DB_EN_MASK 0x00200000L
+//DCCG_VSYNC_CNT_CTRL
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE__SHIFT 0x0
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET__SHIFT 0x2
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL__SHIFT 0x3
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL__SHIFT 0x4
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT__SHIFT 0x8
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN__SHIFT 0x10
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN__SHIFT 0x11
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_LATCH_EN__SHIFT 0x12
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_LATCH_EN__SHIFT 0x13
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_LATCH_EN__SHIFT 0x14
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_LATCH_EN__SHIFT 0x15
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL__SHIFT 0x18
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL__SHIFT 0x19
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_VSYNC_TRIG_SEL__SHIFT 0x1a
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_VSYNC_TRIG_SEL__SHIFT 0x1b
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_VSYNC_TRIG_SEL__SHIFT 0x1c
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_VSYNC_TRIG_SEL__SHIFT 0x1d
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL_MASK 0x000000F0L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT_MASK 0x00000F00L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN_MASK 0x00010000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN_MASK 0x00020000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_LATCH_EN_MASK 0x00040000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_LATCH_EN_MASK 0x00080000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_LATCH_EN_MASK 0x00100000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_LATCH_EN_MASK 0x00200000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL_MASK 0x01000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL_MASK 0x02000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG2_VSYNC_TRIG_SEL_MASK 0x04000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG3_VSYNC_TRIG_SEL_MASK 0x08000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG4_VSYNC_TRIG_SEL_MASK 0x10000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG5_VSYNC_TRIG_SEL_MASK 0x20000000L
+//DCCG_VSYNC_CNT_INT_CTRL
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT__SHIFT 0x0
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR__SHIFT 0x0
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT__SHIFT 0x1
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR__SHIFT 0x1
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT__SHIFT 0x2
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT_CLEAR__SHIFT 0x2
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT__SHIFT 0x3
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT_CLEAR__SHIFT 0x3
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT__SHIFT 0x4
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT_CLEAR__SHIFT 0x4
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT__SHIFT 0x5
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT_CLEAR__SHIFT 0x5
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK__SHIFT 0x8
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK__SHIFT 0x9
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_MASK__SHIFT 0xa
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_MASK__SHIFT 0xb
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_MASK__SHIFT 0xc
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_MASK__SHIFT 0xd
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_INTERRUPT_CLEAR_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT_MASK 0x00000010L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_INTERRUPT_CLEAR_MASK 0x00000010L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT_MASK 0x00000020L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_INTERRUPT_CLEAR_MASK 0x00000020L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK_MASK 0x00000100L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK_MASK 0x00000200L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG2_LATCH_MASK_MASK 0x00000400L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG3_LATCH_MASK_MASK 0x00000800L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG4_LATCH_MASK_MASK 0x00001000L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG5_LATCH_MASK_MASK 0x00002000L
+//FORCE_SYMCLK_DISABLE
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKA_DISABLE__SHIFT 0x0
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKB_DISABLE__SHIFT 0x1
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKC_DISABLE__SHIFT 0x2
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKD_DISABLE__SHIFT 0x3
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKE_DISABLE__SHIFT 0x4
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKF_DISABLE__SHIFT 0x5
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKG_DISABLE__SHIFT 0x6
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKA_DISABLE_MASK 0x00000001L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKB_DISABLE_MASK 0x00000002L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKC_DISABLE_MASK 0x00000004L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKD_DISABLE_MASK 0x00000008L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKE_DISABLE_MASK 0x00000010L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKF_DISABLE_MASK 0x00000020L
+#define FORCE_SYMCLK_DISABLE__FORCE_SYMCLKG_DISABLE_MASK 0x00000040L
+//DTBCLK_DTO0_PHASE
+#define DTBCLK_DTO0_PHASE__DTBCLK_DTO0_PHASE__SHIFT 0x0
+#define DTBCLK_DTO0_PHASE__DTBCLK_DTO0_PHASE_MASK 0xFFFFFFFFL
+//DTBCLK_DTO1_PHASE
+#define DTBCLK_DTO1_PHASE__DTBCLK_DTO1_PHASE__SHIFT 0x0
+#define DTBCLK_DTO1_PHASE__DTBCLK_DTO1_PHASE_MASK 0xFFFFFFFFL
+//DTBCLK_DTO2_PHASE
+#define DTBCLK_DTO2_PHASE__DTBCLK_DTO2_PHASE__SHIFT 0x0
+#define DTBCLK_DTO2_PHASE__DTBCLK_DTO2_PHASE_MASK 0xFFFFFFFFL
+//DTBCLK_DTO3_PHASE
+#define DTBCLK_DTO3_PHASE__DTBCLK_DTO3_PHASE__SHIFT 0x0
+#define DTBCLK_DTO3_PHASE__DTBCLK_DTO3_PHASE_MASK 0xFFFFFFFFL
+//DTBCLK_DTO0_MODULO
+#define DTBCLK_DTO0_MODULO__DTBCLK_DTO0_MODULO__SHIFT 0x0
+#define DTBCLK_DTO0_MODULO__DTBCLK_DTO0_MODULO_MASK 0xFFFFFFFFL
+//DTBCLK_DTO1_MODULO
+#define DTBCLK_DTO1_MODULO__DTBCLK_DTO1_MODULO__SHIFT 0x0
+#define DTBCLK_DTO1_MODULO__DTBCLK_DTO1_MODULO_MASK 0xFFFFFFFFL
+//DTBCLK_DTO2_MODULO
+#define DTBCLK_DTO2_MODULO__DTBCLK_DTO2_MODULO__SHIFT 0x0
+#define DTBCLK_DTO2_MODULO__DTBCLK_DTO2_MODULO_MASK 0xFFFFFFFFL
+//DTBCLK_DTO3_MODULO
+#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT 0x0
+#define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK 0xFFFFFFFFL
+//HDMICHARCLK0_CLOCK_CNTL
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L
+//PHYASYMCLK_CLOCK_CNTL
+#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_EN__SHIFT 0x0
+#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_EN_MASK 0x00000001L
+#define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_SRC_SEL_MASK 0x00000030L
+//PHYBSYMCLK_CLOCK_CNTL
+#define PHYBSYMCLK_CLOCK_CNTL__PHYBSYMCLK_EN__SHIFT 0x0
+#define PHYBSYMCLK_CLOCK_CNTL__PHYBSYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYBSYMCLK_CLOCK_CNTL__PHYBSYMCLK_EN_MASK 0x00000001L
+#define PHYBSYMCLK_CLOCK_CNTL__PHYBSYMCLK_SRC_SEL_MASK 0x00000030L
+//PHYCSYMCLK_CLOCK_CNTL
+#define PHYCSYMCLK_CLOCK_CNTL__PHYCSYMCLK_EN__SHIFT 0x0
+#define PHYCSYMCLK_CLOCK_CNTL__PHYCSYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYCSYMCLK_CLOCK_CNTL__PHYCSYMCLK_EN_MASK 0x00000001L
+#define PHYCSYMCLK_CLOCK_CNTL__PHYCSYMCLK_SRC_SEL_MASK 0x00000030L
+//PHYDSYMCLK_CLOCK_CNTL
+#define PHYDSYMCLK_CLOCK_CNTL__PHYDSYMCLK_EN__SHIFT 0x0
+#define PHYDSYMCLK_CLOCK_CNTL__PHYDSYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYDSYMCLK_CLOCK_CNTL__PHYDSYMCLK_EN_MASK 0x00000001L
+#define PHYDSYMCLK_CLOCK_CNTL__PHYDSYMCLK_SRC_SEL_MASK 0x00000030L
+//PHYESYMCLK_CLOCK_CNTL
+#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_EN__SHIFT 0x0
+#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_SRC_SEL__SHIFT 0x4
+#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_EN_MASK 0x00000001L
+#define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_SRC_SEL_MASK 0x00000030L
+//HDMISTREAMCLK_CNTL
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT 0x0
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN__SHIFT 0x3
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK 0x00000007L
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN_MASK 0x00000008L
+//DCCG_GATE_DISABLE_CNTL3
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK2_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK3_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK4_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK5_GATE_DISABLE__SHIFT 0x5
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE0_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE0_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE1_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE1_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE2_GATE_DISABLE__SHIFT 0xc
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE2_GATE_DISABLE__SHIFT 0xd
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE3_GATE_DISABLE__SHIFT 0xe
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE3_GATE_DISABLE__SHIFT 0xf
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE0_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE0_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE1_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE1_GATE_DISABLE__SHIFT 0x17
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK2_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK3_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK4_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK5_GATE_DISABLE_MASK 0x00000020L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE0_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE0_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE1_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE1_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE2_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE2_GATE_DISABLE_MASK 0x00002000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_SE3_GATE_DISABLE_MASK 0x00004000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_SE3_GATE_DISABLE_MASK 0x00008000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE0_GATE_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE0_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE1_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE1_GATE_DISABLE_MASK 0x00800000L
+//HDMISTREAMCLK0_DTO_PARAM
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE__SHIFT 0x0
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO__SHIFT 0x8
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN__SHIFT 0x10
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE_MASK 0x000000FFL
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO_MASK 0x0000FF00L
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN_MASK 0x00010000L
+//DCCG_AUDIO_DTBCLK_DTO_PHASE
+#define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTBCLK_DTO_MODULO
+#define DCCG_AUDIO_DTBCLK_DTO_MODULO__DCCG_AUDIO_DTBCLK_DTO_MODULO__SHIFT 0x0
+#define DCCG_AUDIO_DTBCLK_DTO_MODULO__DCCG_AUDIO_DTBCLK_DTO_MODULO_MASK 0xFFFFFFFFL
+//DTBCLK_DTO_DBUF_EN
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO0_DBUF_EN__SHIFT 0x0
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO1_DBUF_EN__SHIFT 0x1
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO2_DBUF_EN__SHIFT 0x2
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO3_DBUF_EN__SHIFT 0x3
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO0_DBUF_EN_MASK 0x00000001L
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO1_DBUF_EN_MASK 0x00000002L
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO2_DBUF_EN_MASK 0x00000004L
+#define DTBCLK_DTO_DBUF_EN__DTBCLK_DTO3_DBUF_EN_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon0_dc_perfmon_dispdec
+//DC_PERFMON0_PERFCOUNTER_CNTL
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON0_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON0_PERFCOUNTER_CNTL2
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON0_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON0_PERFCOUNTER_STATE
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON0_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON0_PERFMON_CNTL
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON0_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON0_PERFMON_CNTL2
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON0_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON0_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON0_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON0_PERFMON_CVALUE_LOW
+#define DC_PERFMON0_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON0_PERFMON_HI
+#define DC_PERFMON0_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON0_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON0_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON0_PERFMON_LOW
+#define DC_PERFMON0_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON0_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dccg_dccg_dcperfmon1_dc_perfmon_dispdec
+//DC_PERFMON1_PERFCOUNTER_CNTL
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON1_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON1_PERFCOUNTER_CNTL2
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON1_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON1_PERFCOUNTER_STATE
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON1_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON1_PERFMON_CNTL
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON1_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON1_PERFMON_CNTL2
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON1_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON1_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON1_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON1_PERFMON_CVALUE_LOW
+#define DC_PERFMON1_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON1_PERFMON_HI
+#define DC_PERFMON1_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON1_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON1_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON1_PERFMON_LOW
+#define DC_PERFMON1_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON1_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+//DMCUB_RBBMIF_SEC_CNTL
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_SEC_LVL__SHIFT 0x0
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_TRUST_LVL__SHIFT 0x4
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_SOURCE_ID__SHIFT 0x8
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_SEC_LVL_MASK 0x00000007L
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_TRUST_LVL_MASK 0x00000070L
+#define DMCUB_RBBMIF_SEC_CNTL__DMCUB_RBBMIF_SOURCE_ID_MASK 0x01FFFF00L
+
+
+// addressBlock: dce_dc_dmu_rbbmif_dispdec
+//RBBMIF_TIMEOUT
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_DELAY__SHIFT 0x0
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_TO_REQ_HOLD__SHIFT 0x14
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_DELAY_MASK 0x000FFFFFL
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_TO_REQ_HOLD_MASK 0xFFF00000L
+//RBBMIF_STATUS
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_CLIENTS_DEC__SHIFT 0x0
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_CLIENTS_DEC_MASK 0xFFFFFFFFL
+//RBBMIF_STATUS_2
+#define RBBMIF_STATUS_2__RBBMIF_TIMEOUT_CLIENTS_DEC_2__SHIFT 0x0
+#define RBBMIF_STATUS_2__RBBMIF_TIMEOUT_CLIENTS_DEC_2_MASK 0x0000007FL
+//RBBMIF_INT_STATUS
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ADDR__SHIFT 0x2
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_OP__SHIFT 0x1c
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_RDWR_STATUS__SHIFT 0x1d
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ACK__SHIFT 0x1e
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_MASK__SHIFT 0x1f
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ADDR_MASK 0x0003FFFCL
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_OP_MASK 0x10000000L
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_RDWR_STATUS_MASK 0x20000000L
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_ACK_MASK 0x40000000L
+#define RBBMIF_INT_STATUS__RBBMIF_TIMEOUT_MASK_MASK 0x80000000L
+//RBBMIF_TIMEOUT_DIS
+#define RBBMIF_TIMEOUT_DIS__CLIENT0_TIMEOUT_DIS__SHIFT 0x0
+#define RBBMIF_TIMEOUT_DIS__CLIENT1_TIMEOUT_DIS__SHIFT 0x1
+#define RBBMIF_TIMEOUT_DIS__CLIENT2_TIMEOUT_DIS__SHIFT 0x2
+#define RBBMIF_TIMEOUT_DIS__CLIENT3_TIMEOUT_DIS__SHIFT 0x3
+#define RBBMIF_TIMEOUT_DIS__CLIENT4_TIMEOUT_DIS__SHIFT 0x4
+#define RBBMIF_TIMEOUT_DIS__CLIENT5_TIMEOUT_DIS__SHIFT 0x5
+#define RBBMIF_TIMEOUT_DIS__CLIENT6_TIMEOUT_DIS__SHIFT 0x6
+#define RBBMIF_TIMEOUT_DIS__CLIENT7_TIMEOUT_DIS__SHIFT 0x7
+#define RBBMIF_TIMEOUT_DIS__CLIENT8_TIMEOUT_DIS__SHIFT 0x8
+#define RBBMIF_TIMEOUT_DIS__CLIENT9_TIMEOUT_DIS__SHIFT 0x9
+#define RBBMIF_TIMEOUT_DIS__CLIENT10_TIMEOUT_DIS__SHIFT 0xa
+#define RBBMIF_TIMEOUT_DIS__CLIENT11_TIMEOUT_DIS__SHIFT 0xb
+#define RBBMIF_TIMEOUT_DIS__CLIENT12_TIMEOUT_DIS__SHIFT 0xc
+#define RBBMIF_TIMEOUT_DIS__CLIENT13_TIMEOUT_DIS__SHIFT 0xd
+#define RBBMIF_TIMEOUT_DIS__CLIENT14_TIMEOUT_DIS__SHIFT 0xe
+#define RBBMIF_TIMEOUT_DIS__CLIENT15_TIMEOUT_DIS__SHIFT 0xf
+#define RBBMIF_TIMEOUT_DIS__CLIENT16_TIMEOUT_DIS__SHIFT 0x10
+#define RBBMIF_TIMEOUT_DIS__CLIENT17_TIMEOUT_DIS__SHIFT 0x11
+#define RBBMIF_TIMEOUT_DIS__CLIENT18_TIMEOUT_DIS__SHIFT 0x12
+#define RBBMIF_TIMEOUT_DIS__CLIENT19_TIMEOUT_DIS__SHIFT 0x13
+#define RBBMIF_TIMEOUT_DIS__CLIENT20_TIMEOUT_DIS__SHIFT 0x14
+#define RBBMIF_TIMEOUT_DIS__CLIENT21_TIMEOUT_DIS__SHIFT 0x15
+#define RBBMIF_TIMEOUT_DIS__CLIENT22_TIMEOUT_DIS__SHIFT 0x16
+#define RBBMIF_TIMEOUT_DIS__CLIENT23_TIMEOUT_DIS__SHIFT 0x17
+#define RBBMIF_TIMEOUT_DIS__CLIENT24_TIMEOUT_DIS__SHIFT 0x18
+#define RBBMIF_TIMEOUT_DIS__CLIENT25_TIMEOUT_DIS__SHIFT 0x19
+#define RBBMIF_TIMEOUT_DIS__CLIENT26_TIMEOUT_DIS__SHIFT 0x1a
+#define RBBMIF_TIMEOUT_DIS__CLIENT27_TIMEOUT_DIS__SHIFT 0x1b
+#define RBBMIF_TIMEOUT_DIS__CLIENT28_TIMEOUT_DIS__SHIFT 0x1c
+#define RBBMIF_TIMEOUT_DIS__CLIENT29_TIMEOUT_DIS__SHIFT 0x1d
+#define RBBMIF_TIMEOUT_DIS__CLIENT30_TIMEOUT_DIS__SHIFT 0x1e
+#define RBBMIF_TIMEOUT_DIS__CLIENT31_TIMEOUT_DIS__SHIFT 0x1f
+#define RBBMIF_TIMEOUT_DIS__CLIENT0_TIMEOUT_DIS_MASK 0x00000001L
+#define RBBMIF_TIMEOUT_DIS__CLIENT1_TIMEOUT_DIS_MASK 0x00000002L
+#define RBBMIF_TIMEOUT_DIS__CLIENT2_TIMEOUT_DIS_MASK 0x00000004L
+#define RBBMIF_TIMEOUT_DIS__CLIENT3_TIMEOUT_DIS_MASK 0x00000008L
+#define RBBMIF_TIMEOUT_DIS__CLIENT4_TIMEOUT_DIS_MASK 0x00000010L
+#define RBBMIF_TIMEOUT_DIS__CLIENT5_TIMEOUT_DIS_MASK 0x00000020L
+#define RBBMIF_TIMEOUT_DIS__CLIENT6_TIMEOUT_DIS_MASK 0x00000040L
+#define RBBMIF_TIMEOUT_DIS__CLIENT7_TIMEOUT_DIS_MASK 0x00000080L
+#define RBBMIF_TIMEOUT_DIS__CLIENT8_TIMEOUT_DIS_MASK 0x00000100L
+#define RBBMIF_TIMEOUT_DIS__CLIENT9_TIMEOUT_DIS_MASK 0x00000200L
+#define RBBMIF_TIMEOUT_DIS__CLIENT10_TIMEOUT_DIS_MASK 0x00000400L
+#define RBBMIF_TIMEOUT_DIS__CLIENT11_TIMEOUT_DIS_MASK 0x00000800L
+#define RBBMIF_TIMEOUT_DIS__CLIENT12_TIMEOUT_DIS_MASK 0x00001000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT13_TIMEOUT_DIS_MASK 0x00002000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT14_TIMEOUT_DIS_MASK 0x00004000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT15_TIMEOUT_DIS_MASK 0x00008000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT16_TIMEOUT_DIS_MASK 0x00010000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT17_TIMEOUT_DIS_MASK 0x00020000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT18_TIMEOUT_DIS_MASK 0x00040000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT19_TIMEOUT_DIS_MASK 0x00080000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT20_TIMEOUT_DIS_MASK 0x00100000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT21_TIMEOUT_DIS_MASK 0x00200000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT22_TIMEOUT_DIS_MASK 0x00400000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT23_TIMEOUT_DIS_MASK 0x00800000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT24_TIMEOUT_DIS_MASK 0x01000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT25_TIMEOUT_DIS_MASK 0x02000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT26_TIMEOUT_DIS_MASK 0x04000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT27_TIMEOUT_DIS_MASK 0x08000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT28_TIMEOUT_DIS_MASK 0x10000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT29_TIMEOUT_DIS_MASK 0x20000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT30_TIMEOUT_DIS_MASK 0x40000000L
+#define RBBMIF_TIMEOUT_DIS__CLIENT31_TIMEOUT_DIS_MASK 0x80000000L
+//RBBMIF_TIMEOUT_DIS_2
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT32_TIMEOUT_DIS__SHIFT 0x0
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT33_TIMEOUT_DIS__SHIFT 0x1
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT34_TIMEOUT_DIS__SHIFT 0x2
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT35_TIMEOUT_DIS__SHIFT 0x3
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT36_TIMEOUT_DIS__SHIFT 0x4
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT37_TIMEOUT_DIS__SHIFT 0x5
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT38_TIMEOUT_DIS__SHIFT 0x6
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT32_TIMEOUT_DIS_MASK 0x00000001L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT33_TIMEOUT_DIS_MASK 0x00000002L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT34_TIMEOUT_DIS_MASK 0x00000004L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT35_TIMEOUT_DIS_MASK 0x00000008L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT36_TIMEOUT_DIS_MASK 0x00000010L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT37_TIMEOUT_DIS_MASK 0x00000020L
+#define RBBMIF_TIMEOUT_DIS_2__CLIENT38_TIMEOUT_DIS_MASK 0x00000040L
+//RBBMIF_STATUS_FLAG
+#define RBBMIF_STATUS_FLAG__RBBMIF_STATE__SHIFT 0x0
+#define RBBMIF_STATUS_FLAG__RBBMIF_READ_TIMEOUT__SHIFT 0x4
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_EMPTY__SHIFT 0x5
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_FULL__SHIFT 0x6
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_FLAG__SHIFT 0x8
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_TYPE__SHIFT 0x9
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_ADDR__SHIFT 0x10
+#define RBBMIF_STATUS_FLAG__RBBMIF_STATE_MASK 0x00000003L
+#define RBBMIF_STATUS_FLAG__RBBMIF_READ_TIMEOUT_MASK 0x00000010L
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_EMPTY_MASK 0x00000020L
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_FULL_MASK 0x00000040L
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_FLAG_MASK 0x00000100L
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_TYPE_MASK 0x00000E00L
+#define RBBMIF_STATUS_FLAG__RBBMIF_INVALID_ACCESS_ADDR_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dmu_dmu_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON2_PERFCOUNTER_CNTL
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON2_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON2_PERFCOUNTER_CNTL2
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON2_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON2_PERFCOUNTER_STATE
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON2_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON2_PERFMON_CNTL
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON2_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON2_PERFMON_CNTL2
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON2_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON2_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON2_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON2_PERFMON_CVALUE_LOW
+#define DC_PERFMON2_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON2_PERFMON_HI
+#define DC_PERFMON2_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON2_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON2_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON2_PERFMON_LOW
+#define DC_PERFMON2_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON2_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dmu_ihc_dispdec
+//DC_GPU_TIMER_START_POSITION_V_UPDATE
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_MASK 0x00700000L
+//DC_GPU_TIMER_START_POSITION_VSTARTUP
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D1_VSTARTUP__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D2_VSTARTUP__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D3_VSTARTUP__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D4_VSTARTUP__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D5_VSTARTUP__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D6_VSTARTUP__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D1_VSTARTUP_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D2_VSTARTUP_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D3_VSTARTUP_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D4_VSTARTUP_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D5_VSTARTUP_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_VSTARTUP__DC_GPU_TIMER_START_POSITION_D6_VSTARTUP_MASK 0x00700000L
+//DC_GPU_TIMER_READ
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ__SHIFT 0x0
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ_MASK 0xFFFFFFFFL
+//DC_GPU_TIMER_READ_CNTL
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT__SHIFT 0x0
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM__SHIFT 0x8
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM__SHIFT 0xb
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM__SHIFT 0xe
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM__SHIFT 0x11
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM__SHIFT 0x14
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM__SHIFT 0x17
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT_MASK 0x0000007FL
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM_MASK 0x00000700L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM_MASK 0x00003800L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM_MASK 0x0001C000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM_MASK 0x000E0000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM_MASK 0x00700000L
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM_MASK 0x03800000L
+//DISP_INTERRUPT_STATUS
+#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS__DIO_ALPM_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS__DIO_ALPM_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE
+#define DISP_INTERRUPT_STATUS_CONTINUE__OPTC2_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE__OPTC2_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__OTG1_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE2
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OPTC3_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OPTC3_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__OTG2_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE3
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OPTC4_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL0_DATA_OVERFLOW_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OPTC4_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL0_DATA_OVERFLOW_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__OTG3_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC5_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC6_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC5_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OPTC6_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__OTG4_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE5
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT0__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DISP_INTERRUPT_STATUS_CONTINUE6__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT0_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT1_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG5_IHC_VERTICAL_INTERRUPT2_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT0_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT1_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__OTG6_IHC_VERTICAL_INTERRUPT2_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DISP_INTERRUPT_STATUS_CONTINUE6_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE6
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB0_IHIF_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB1_IHIF_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB0_IHIF_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB1_IHIF_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB2_IHIF_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DISP_INTERRUPT_STATUS_CONTINUE7__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB0_IHIF_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_CWB1_IHIF_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB0_IHIF_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB1_IHIF_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__MCIF_DWB2_IHIF_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_ERROR_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_ERROR_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_ERROR_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_ERROR_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_ERROR_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_ERROR_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DISP_INTERRUPT_STATUS_CONTINUE7_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE7
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DISP_INTERRUPT_STATUS_CONTINUE8__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DMU_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DIO_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER0_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB0_PERFMON_COUNTER1_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DISP_INTERRUPT_STATUS_CONTINUE8_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE8
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DISP_INTERRUPT_STATUS_CONTINUE9__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP0_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DPP2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DISP_INTERRUPT_STATUS_CONTINUE9_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE9
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL1_DATA_OVERFLOW_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL2_DATA_OVERFLOW_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DISP_INTERRUPT_STATUS_CONTINUE10__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP3_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP4_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DPP5_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL1_DATA_OVERFLOW_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WBSCL2_DATA_OVERFLOW_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DISP_INTERRUPT_STATUS_CONTINUE10_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE10
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG0_LATCH_INT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG1_LATCH_INT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG2_LATCH_INT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG3_LATCH_INT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG4_LATCH_INT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG5_LATCH_INT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER0_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER1_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG1_IHC_DRR_TIMING_UPDATE__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG2_IHC_DRR_TIMING_UPDATE__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG3_IHC_DRR_TIMING_UPDATE__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG4_IHC_DRR_TIMING_UPDATE__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG5_IHC_DRR_TIMING_UPDATE__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG6_IHC_DRR_TIMING_UPDATE__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DISP_INTERRUPT_STATUS_CONTINUE11__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG0_LATCH_INT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG1_LATCH_INT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG2_LATCH_INT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG3_LATCH_INT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG4_LATCH_INT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_IHC_VSYNC_OTG5_LATCH_INT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER0_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER1_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG1_IHC_DRR_TIMING_UPDATE_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG2_IHC_DRR_TIMING_UPDATE_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG3_IHC_DRR_TIMING_UPDATE_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG4_IHC_DRR_TIMING_UPDATE_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG5_IHC_DRR_TIMING_UPDATE_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__OTG6_IHC_DRR_TIMING_UPDATE_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DISP_INTERRUPT_STATUS_CONTINUE11_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE11
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC0_STALL_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC1_STALL_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC2_STALL_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC3_STALL_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC4_STALL_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC5_STALL_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC6_STALL_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC7_STALL_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE11__DISP_INTERRUPT_STATUS_CONTINUE12__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__WB2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC0_STALL_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC1_STALL_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC2_STALL_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC3_STALL_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC4_STALL_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC5_STALL_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC6_STALL_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__MPCC7_STALL_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE11__DISP_INTERRUPT_STATUS_CONTINUE12_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE12
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DISP_INTERRUPT_STATUS_CONTINUE13__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__MPC_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP6_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DPP7_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE12__DISP_INTERRUPT_STATUS_CONTINUE13_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE13
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_VM_FAULT_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_TIMEOUT_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_COMPBUF_SIZE_CHANGE_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VBLANK_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DISP_INTERRUPT_STATUS_CONTINUE14__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_VM_FAULT_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_TIMEOUT_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBBUB_IHC_COMPBUF_SIZE_CHANGE_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VBLANK_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_VLINE2_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__HUBP0_IHC_TIMEOUT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE13__DISP_INTERRUPT_STATUS_CONTINUE14_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE14
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VBLANK_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE14__DISP_INTERRUPT_STATUS_CONTINUE15__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP3_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VBLANK_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_VLINE2_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__HUBP1_IHC_TIMEOUT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE14__DISP_INTERRUPT_STATUS_CONTINUE15_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE15
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VBLANK_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE15__DISP_INTERRUPT_STATUS_CONTINUE16__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP4_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP5_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP6_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VBLANK_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_VLINE2_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__HUBP2_IHC_TIMEOUT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE15__DISP_INTERRUPT_STATUS_CONTINUE16_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE16
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VBLANK_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE2_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VBLANK_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE2_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VBLANK_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE2_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VBLANK_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE2_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VBLANK_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE2_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_TIMEOUT_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_TIMEOUT_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_TIMEOUT_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE16__DISP_INTERRUPT_STATUS_CONTINUE17__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VBLANK_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_VLINE2_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VBLANK_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_VLINE2_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VBLANK_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_VLINE2_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VBLANK_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_VLINE2_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VBLANK_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_VLINE2_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP3_IHC_TIMEOUT_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP4_IHC_TIMEOUT_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP5_IHC_TIMEOUT_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP6_IHC_TIMEOUT_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__HUBP7_IHC_TIMEOUT_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE16__DISP_INTERRUPT_STATUS_CONTINUE17_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE17
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER0_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_AWAY_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE17__DISP_INTERRUPT_STATUS_CONTINUE18__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPP_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__OPTC_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER0_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__MMHUBBUB_PERFMON_COUNTER1_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP0_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP1_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP2_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP3_IHC_FLIP_AWAY_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP4_IHC_FLIP_AWAY_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP5_IHC_FLIP_AWAY_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP6_IHC_FLIP_AWAY_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__HUBP7_IHC_FLIP_AWAY_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE17__DISP_INTERRUPT_STATUS_CONTINUE18_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE18
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DISP_INTERRUPT_STATUS_CONTINUE19__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__AZ_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE18__DISP_INTERRUPT_STATUS_CONTINUE19_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE19
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DISP_INTERRUPT_STATUS_CONTINUE20__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DIGG_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE19__DISP_INTERRUPT_STATUS_CONTINUE20_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE20
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_CPU_SS_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_CPU_SS_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_CPU_SS_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_CPU_SS_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_CPU_SS_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_CPU_SS_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_V_UPDATE_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_V_UPDATE_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_V_UPDATE_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_V_UPDATE_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_V_UPDATE_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_V_UPDATE_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_GSL_VSYNC_GAP_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VSTARTUP_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VSTARTUP_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VSTARTUP_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VSTARTUP_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VSTARTUP_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VSTARTUP_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VREADY_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VREADY_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VREADY_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VREADY_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VREADY_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VREADY_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE20__DISP_INTERRUPT_STATUS_CONTINUE21__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_CPU_SS_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_CPU_SS_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_CPU_SS_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_CPU_SS_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_CPU_SS_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_CPU_SS_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_V_UPDATE_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_V_UPDATE_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_V_UPDATE_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_V_UPDATE_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_V_UPDATE_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_V_UPDATE_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_GSL_VSYNC_GAP_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VSTARTUP_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VSTARTUP_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VSTARTUP_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VSTARTUP_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VSTARTUP_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VSTARTUP_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG1_IHC_VREADY_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG2_IHC_VREADY_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG3_IHC_VREADY_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG4_IHC_VREADY_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG5_IHC_VREADY_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__OTG6_IHC_VREADY_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE20__DISP_INTERRUPT_STATUS_CONTINUE21_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE21
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC1_READ_REQUEST_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC2_READ_REQUEST_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC3_READ_REQUEST_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC4_READ_REQUEST_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC5_READ_REQUEST_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC6_READ_REQUEST_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_VGA_READ_REQUEST_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DISP_INTERRUPT_STATUS_CONTINUE22__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC1_READ_REQUEST_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC2_READ_REQUEST_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC3_READ_REQUEST_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC4_READ_REQUEST_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC5_READ_REQUEST_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_DDC6_READ_REQUEST_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DC_I2C_VGA_READ_REQUEST_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DIGH_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE21__DISP_INTERRUPT_STATUS_CONTINUE22_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE22
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_DRR_V_TOTAL_REACH_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DISP_INTERRUPT_STATUS_CONTINUE23__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG0_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG1_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG2_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG3_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG4_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__OTG5_DRR_V_TOTAL_REACH_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE22__DISP_INTERRUPT_STATUS_CONTINUE23_MASK 0x80000000L
+//DC_GPU_TIMER_START_POSITION_VREADY
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D1_VREADY__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D2_VREADY__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D3_VREADY__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D4_VREADY__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D5_VREADY__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D6_VREADY__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D1_VREADY_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D2_VREADY_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D3_VREADY_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D4_VREADY_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D5_VREADY_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_VREADY__DC_GPU_TIMER_START_POSITION_D6_VREADY_MASK 0x00700000L
+//DC_GPU_TIMER_START_POSITION_FLIP
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D1_FLIP__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D2_FLIP__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D3_FLIP__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D4_FLIP__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D5_FLIP__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D6_FLIP__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D7_FLIP__SHIFT 0x18
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D8_FLIP__SHIFT 0x1c
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D1_FLIP_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D2_FLIP_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D3_FLIP_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D4_FLIP_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D5_FLIP_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D6_FLIP_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D7_FLIP_MASK 0x07000000L
+#define DC_GPU_TIMER_START_POSITION_FLIP__DC_GPU_TIMER_START_POSITION_D8_FLIP_MASK 0x70000000L
+//DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_NO_LOCK__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_NO_LOCK__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_NO_LOCK__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_NO_LOCK__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_NO_LOCK__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_NO_LOCK__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_NO_LOCK_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_NO_LOCK_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_NO_LOCK_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_NO_LOCK_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_NO_LOCK_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_NO_LOCK_MASK 0x00700000L
+//DC_GPU_TIMER_START_POSITION_FLIP_AWAY
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D1_FLIP_AWAY__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D2_FLIP_AWAY__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D3_FLIP_AWAY__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D4_FLIP_AWAY__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D5_FLIP_AWAY__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D6_FLIP_AWAY__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D7_FLIP_AWAY__SHIFT 0x18
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D8_FLIP_AWAY__SHIFT 0x1c
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D1_FLIP_AWAY_MASK 0x00000007L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D2_FLIP_AWAY_MASK 0x00000070L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D3_FLIP_AWAY_MASK 0x00000700L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D4_FLIP_AWAY_MASK 0x00007000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D5_FLIP_AWAY_MASK 0x00070000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D6_FLIP_AWAY_MASK 0x00700000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D7_FLIP_AWAY_MASK 0x07000000L
+#define DC_GPU_TIMER_START_POSITION_FLIP_AWAY__DC_GPU_TIMER_START_POSITION_D8_FLIP_AWAY_MASK 0x70000000L
+//DISP_INTERRUPT_STATUS_CONTINUE23
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN22_POWER_UP_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN23_POWER_UP_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN24_POWER_UP_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN25_POWER_UP_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN22_POWER_DOWN_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN23_POWER_DOWN_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN24_POWER_DOWN_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN25_POWER_DOWN_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DISP_INTERRUPT_STATUS_CONTINUE24__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN22_POWER_UP_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN23_POWER_UP_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN24_POWER_UP_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN25_POWER_UP_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN22_POWER_DOWN_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN23_POWER_DOWN_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN24_POWER_DOWN_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DCPG_IHC_DOMAIN25_POWER_DOWN_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE23__DISP_INTERRUPT_STATUS_CONTINUE24_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE24
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_HIGH_PRIORITY_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_LOW_PRIORITY_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_READY_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_DONE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_READY_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_DONE_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_READY_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_DONE_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN0_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN1_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN2_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN3_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN4_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN5_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN6_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAOUT_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_UNDEFINED_ADDRESS_FAULT_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DISP_INTERRUPT_STATUS_CONTINUE25__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC0_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC1_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC2_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC3_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC4_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER0_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DSC5_PERFMON_COUNTER1_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_HIGH_PRIORITY_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_TIMER_LOW_PRIORITY_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_READY_INTERRUPT_MASK 0x00004000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_HIGH_PRIORITY_DONE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_READY_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_INBOX_LOW_PRIORITY_DONE_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_HIGH_PRIORITY_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_READY_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_OUTBOX_LOW_PRIORITY_DONE_INTERRUPT_MASK 0x00200000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN0_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN1_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN2_INTERRUPT_MASK 0x01000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN3_INTERRUPT_MASK 0x02000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN4_INTERRUPT_MASK 0x04000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN5_INTERRUPT_MASK 0x08000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAIN6_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_GENERAL_DATAOUT_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DMCUB_UNDEFINED_ADDRESS_FAULT_INTERRUPT_MASK 0x40000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE24__DISP_INTERRUPT_STATUS_CONTINUE25_MASK 0x80000000L
+//DISP_INTERRUPT_STATUS_CONTINUE25
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC0_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC1_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC2_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC3_IHC_CORE_ERROR_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC4_IHC_CORE_ERROR_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC5_IHC_CORE_ERROR_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DPIA_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DMCUB_WHITELIST_INVALID_ACCESS_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE25__HPO_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE25__HPO_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE25__MMHUBBUB_WARMUP_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000001L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000004L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000008L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC0_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC1_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC2_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC3_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC4_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DSC5_IHC_CORE_ERROR_INTERRUPT_MASK 0x00000800L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DPIA_INTERRUPT_MASK 0x00001000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__DMCUB_WHITELIST_INVALID_ACCESS_INTERRUPT_MASK 0x00002000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__HPO_PERFMON_COUNTER0_INTERRUPT_MASK 0x10000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__HPO_PERFMON_COUNTER1_INTERRUPT_MASK 0x20000000L
+#define DISP_INTERRUPT_STATUS_CONTINUE25__MMHUBBUB_WARMUP_INTERRUPT_MASK 0x40000000L
+//DCCG_INTERRUPT_DEST
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG0_LATCH_INT_DEST__SHIFT 0x0
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG1_LATCH_INT_DEST__SHIFT 0x1
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG2_LATCH_INT_DEST__SHIFT 0x2
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG3_LATCH_INT_DEST__SHIFT 0x3
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG4_LATCH_INT_DEST__SHIFT 0x4
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG5_LATCH_INT_DEST__SHIFT 0x5
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG0_LATCH_INT_DEST_MASK 0x00000001L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG1_LATCH_INT_DEST_MASK 0x00000002L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG2_LATCH_INT_DEST_MASK 0x00000004L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG3_LATCH_INT_DEST_MASK 0x00000008L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG4_LATCH_INT_DEST_MASK 0x00000010L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_VSYNC_OTG5_LATCH_INT_DEST_MASK 0x00000020L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DCCG_INTERRUPT_DEST__DCCG_IHC_PERFMON2_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+//DMU_INTERRUPT_DEST
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER0_INT_DEST__SHIFT 0x0
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER1_INT_DEST__SHIFT 0x1
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT0_INT_DEST__SHIFT 0x2
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT1_INT_DEST__SHIFT 0x3
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT2_INT_DEST__SHIFT 0x4
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT3_INT_DEST__SHIFT 0x5
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT4_INT_DEST__SHIFT 0x6
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT5_INT_DEST__SHIFT 0x7
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT6_INT_DEST__SHIFT 0x8
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT_IH_INT_DEST__SHIFT 0x9
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_READY_INT_DEST__SHIFT 0xa
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_DONE_INT_DEST__SHIFT 0xb
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_READY_INT_DEST__SHIFT 0xc
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_DONE_INT_DEST__SHIFT 0xd
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_READY_INT_DEST__SHIFT 0xe
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_DONE_INT_DEST__SHIFT 0xf
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_READY_INT_DEST__SHIFT 0x10
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_DONE_INT_DEST__SHIFT 0x11
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_UNDEFINED_ADDRESS_FAULT_INT_DEST__SHIFT 0x1a
+#define DMU_INTERRUPT_DEST__RBBMIF_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x1b
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER0_INT_DEST_MASK 0x00000001L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_TIMER1_INT_DEST_MASK 0x00000002L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT0_INT_DEST_MASK 0x00000004L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT1_INT_DEST_MASK 0x00000008L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT2_INT_DEST_MASK 0x00000010L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT3_INT_DEST_MASK 0x00000020L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT4_INT_DEST_MASK 0x00000040L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT5_INT_DEST_MASK 0x00000080L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT6_INT_DEST_MASK 0x00000100L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_GPINT_IH_INT_DEST_MASK 0x00000200L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_READY_INT_DEST_MASK 0x00000400L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX0_DONE_INT_DEST_MASK 0x00000800L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_READY_INT_DEST_MASK 0x00001000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_INBOX1_DONE_INT_DEST_MASK 0x00002000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_READY_INT_DEST_MASK 0x00004000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX0_DONE_INT_DEST_MASK 0x00008000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_READY_INT_DEST_MASK 0x00010000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_OUTBOX1_DONE_INT_DEST_MASK 0x00020000L
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DMU_INTERRUPT_DEST__DMU_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DMU_INTERRUPT_DEST__DMCUB_IHC_UNDEFINED_ADDRESS_FAULT_INT_DEST_MASK 0x04000000L
+#define DMU_INTERRUPT_DEST__RBBMIF_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x08000000L
+//DMU_INTERRUPT_DEST2
+#define DMU_INTERRUPT_DEST2__DPIA_IHC_INTERRUPT_DEST__SHIFT 0xc
+#define DMU_INTERRUPT_DEST2__DMCUB_IHC_WHITELIST_INVALID_ACCESS_INTERRUPT_DEST__SHIFT 0xd
+#define DMU_INTERRUPT_DEST2__DPIA_IHC_INTERRUPT_DEST_MASK 0x00001000L
+#define DMU_INTERRUPT_DEST2__DMCUB_IHC_WHITELIST_INVALID_ACCESS_INTERRUPT_DEST_MASK 0x00002000L
+//DCPG_INTERRUPT_DEST
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT_DEST__SHIFT 0x0
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT_DEST__SHIFT 0x1
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT_DEST__SHIFT 0x2
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT_DEST__SHIFT 0x3
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT_DEST__SHIFT 0x4
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT_DEST__SHIFT 0x5
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT_DEST__SHIFT 0x6
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT_DEST__SHIFT 0x7
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x10
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x11
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x12
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x13
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x14
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x15
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x16
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x17
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_UP_INTERRUPT_DEST_MASK 0x00000001L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_UP_INTERRUPT_DEST_MASK 0x00000002L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_UP_INTERRUPT_DEST_MASK 0x00000004L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_UP_INTERRUPT_DEST_MASK 0x00000008L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_UP_INTERRUPT_DEST_MASK 0x00000010L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_UP_INTERRUPT_DEST_MASK 0x00000020L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_UP_INTERRUPT_DEST_MASK 0x00000040L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_UP_INTERRUPT_DEST_MASK 0x00000080L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN0_POWER_DOWN_INTERRUPT_DEST_MASK 0x00010000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN1_POWER_DOWN_INTERRUPT_DEST_MASK 0x00020000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN2_POWER_DOWN_INTERRUPT_DEST_MASK 0x00040000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN3_POWER_DOWN_INTERRUPT_DEST_MASK 0x00080000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN4_POWER_DOWN_INTERRUPT_DEST_MASK 0x00100000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN5_POWER_DOWN_INTERRUPT_DEST_MASK 0x00200000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN6_POWER_DOWN_INTERRUPT_DEST_MASK 0x00400000L
+#define DCPG_INTERRUPT_DEST__DCPG_IHC_DOMAIN7_POWER_DOWN_INTERRUPT_DEST_MASK 0x00800000L
+//DCPG_INTERRUPT_DEST2
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT_DEST__SHIFT 0x0
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT_DEST__SHIFT 0x1
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT_DEST__SHIFT 0x2
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT_DEST__SHIFT 0x3
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT_DEST__SHIFT 0x4
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT_DEST__SHIFT 0x5
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN22_POWER_UP_INTERRUPT_DEST__SHIFT 0x6
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN23_POWER_UP_INTERRUPT_DEST__SHIFT 0x7
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN24_POWER_UP_INTERRUPT_DEST__SHIFT 0x8
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN25_POWER_UP_INTERRUPT_DEST__SHIFT 0x9
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xa
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xb
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xc
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xd
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xe
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT_DEST__SHIFT 0xf
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN22_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x10
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN23_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x11
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN24_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x12
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN25_POWER_DOWN_INTERRUPT_DEST__SHIFT 0x13
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_UP_INTERRUPT_DEST_MASK 0x00000001L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_UP_INTERRUPT_DEST_MASK 0x00000002L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_UP_INTERRUPT_DEST_MASK 0x00000004L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_UP_INTERRUPT_DEST_MASK 0x00000008L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_UP_INTERRUPT_DEST_MASK 0x00000010L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_UP_INTERRUPT_DEST_MASK 0x00000020L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN22_POWER_UP_INTERRUPT_DEST_MASK 0x00000040L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN23_POWER_UP_INTERRUPT_DEST_MASK 0x00000080L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN24_POWER_UP_INTERRUPT_DEST_MASK 0x00000100L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN25_POWER_UP_INTERRUPT_DEST_MASK 0x00000200L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN16_POWER_DOWN_INTERRUPT_DEST_MASK 0x00000400L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN17_POWER_DOWN_INTERRUPT_DEST_MASK 0x00000800L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN18_POWER_DOWN_INTERRUPT_DEST_MASK 0x00001000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN19_POWER_DOWN_INTERRUPT_DEST_MASK 0x00002000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN20_POWER_DOWN_INTERRUPT_DEST_MASK 0x00004000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN21_POWER_DOWN_INTERRUPT_DEST_MASK 0x00008000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN22_POWER_DOWN_INTERRUPT_DEST_MASK 0x00010000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN23_POWER_DOWN_INTERRUPT_DEST_MASK 0x00020000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN24_POWER_DOWN_INTERRUPT_DEST_MASK 0x00040000L
+#define DCPG_INTERRUPT_DEST2__DCPG_IHC_DOMAIN25_POWER_DOWN_INTERRUPT_DEST_MASK 0x00080000L
+//MMHUBBUB_INTERRUPT_DEST
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB0_IHIF_INTERRUPT_DEST__SHIFT 0x1
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB1_IHIF_INTERRUPT_DEST__SHIFT 0x2
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB0_IHIF_INTERRUPT_DEST__SHIFT 0x3
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB1_IHIF_INTERRUPT_DEST__SHIFT 0x4
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB2_IHIF_INTERRUPT_DEST__SHIFT 0x5
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_WARMUP_INTERRUPT_DEST__SHIFT 0x8
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB0_IHIF_INTERRUPT_DEST_MASK 0x00000002L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_CWB1_IHIF_INTERRUPT_DEST_MASK 0x00000004L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB0_IHIF_INTERRUPT_DEST_MASK 0x00000008L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB1_IHIF_INTERRUPT_DEST_MASK 0x00000010L
+#define MMHUBBUB_INTERRUPT_DEST__BUFMGR_DWB2_IHIF_INTERRUPT_DEST_MASK 0x00000020L
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_WARMUP_INTERRUPT_DEST_MASK 0x00000100L
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define MMHUBBUB_INTERRUPT_DEST__MMHUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+//WB_INTERRUPT_DEST
+#define WB_INTERRUPT_DEST__WBSCL0_IHIF_DATA_OVERFLOW_INTERRUPT_DEST__SHIFT 0x1
+#define WB_INTERRUPT_DEST__WBSCL1_IHIF_DATA_OVERFLOW_INTERRUPT_DEST__SHIFT 0x9
+#define WB_INTERRUPT_DEST__WBSCL2_IHIF_DATA_OVERFLOW_INTERRUPT_DEST__SHIFT 0xb
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x10
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x11
+#define WB_INTERRUPT_DEST__WBSCL0_IHIF_DATA_OVERFLOW_INTERRUPT_DEST_MASK 0x00000002L
+#define WB_INTERRUPT_DEST__WBSCL1_IHIF_DATA_OVERFLOW_INTERRUPT_DEST_MASK 0x00000200L
+#define WB_INTERRUPT_DEST__WBSCL2_IHIF_DATA_OVERFLOW_INTERRUPT_DEST_MASK 0x00000800L
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define WB_INTERRUPT_DEST__WB0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define WB_INTERRUPT_DEST__WB1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00010000L
+#define WB_INTERRUPT_DEST__WB2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00020000L
+//DCHUB_INTERRUPT_DEST
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x0
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x1
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x2
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x3
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x4
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x5
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x6
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x7
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x8
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x9
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0xa
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0xb
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0xc
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE_INTERRUPT_DEST__SHIFT 0xd
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0xe
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0xf
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x10
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x11
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x12
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x13
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x14
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x15
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x16
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x17
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x18
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x19
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x1a
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x1b
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VBLANK_INTERRUPT_DEST__SHIFT 0x1c
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE_INTERRUPT_DEST__SHIFT 0x1d
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE2_INTERRUPT_DEST__SHIFT 0x1e
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x1f
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00000001L
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE_INTERRUPT_DEST_MASK 0x00000002L
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00000004L
+#define DCHUB_INTERRUPT_DEST__HUBP0_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00000008L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00000010L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE_INTERRUPT_DEST_MASK 0x00000020L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00000040L
+#define DCHUB_INTERRUPT_DEST__HUBP1_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00000080L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00000100L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE_INTERRUPT_DEST_MASK 0x00000200L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00000400L
+#define DCHUB_INTERRUPT_DEST__HUBP2_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00000800L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00001000L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE_INTERRUPT_DEST_MASK 0x00002000L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00004000L
+#define DCHUB_INTERRUPT_DEST__HUBP3_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00008000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00010000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE_INTERRUPT_DEST_MASK 0x00020000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00040000L
+#define DCHUB_INTERRUPT_DEST__HUBP4_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00080000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VBLANK_INTERRUPT_DEST_MASK 0x00100000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE_INTERRUPT_DEST_MASK 0x00200000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_VLINE2_INTERRUPT_DEST_MASK 0x00400000L
+#define DCHUB_INTERRUPT_DEST__HUBP5_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x00800000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VBLANK_INTERRUPT_DEST_MASK 0x01000000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE_INTERRUPT_DEST_MASK 0x02000000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_VLINE2_INTERRUPT_DEST_MASK 0x04000000L
+#define DCHUB_INTERRUPT_DEST__HUBP6_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x08000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VBLANK_INTERRUPT_DEST_MASK 0x10000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE_INTERRUPT_DEST_MASK 0x20000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_VLINE2_INTERRUPT_DEST_MASK 0x40000000L
+#define DCHUB_INTERRUPT_DEST__HUBP7_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x80000000L
+//DCHUB_PERFCOUNTER_INTERRUPT_DEST
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x10
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x11
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x14
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x15
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x16
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x17
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x18
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x19
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1a
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1b
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1c
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1d
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBBUB_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00010000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00020000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00100000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00200000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00400000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00800000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x01000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x02000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x04000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x08000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x10000000L
+#define DCHUB_PERFCOUNTER_INTERRUPT_DEST__HUBP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x20000000L
+//DCHUB_INTERRUPT_DEST2
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x0
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x1
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x2
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x3
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x4
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x5
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x6
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x7
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_INTERRUPT_DEST__SHIFT 0x8
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0x9
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_INTERRUPT_DEST__SHIFT 0xa
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0xb
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_INTERRUPT_DEST__SHIFT 0xc
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0xd
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_INTERRUPT_DEST__SHIFT 0xe
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_AWAY_INTERRUPT_DEST__SHIFT 0xf
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_VM_FAULT_INTERRUPT_DEST__SHIFT 0x18
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_TIMEOUT_INTERRUPT_DEST__SHIFT 0x19
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_COMPBUF_SIZE_CHANGE_INTERRUPT_DEST__SHIFT 0x1a
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000001L
+#define DCHUB_INTERRUPT_DEST2__HUBP0_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000002L
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000004L
+#define DCHUB_INTERRUPT_DEST2__HUBP1_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000008L
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000010L
+#define DCHUB_INTERRUPT_DEST2__HUBP2_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000020L
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000040L
+#define DCHUB_INTERRUPT_DEST2__HUBP3_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000080L
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000100L
+#define DCHUB_INTERRUPT_DEST2__HUBP4_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000200L
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_INTERRUPT_DEST_MASK 0x00000400L
+#define DCHUB_INTERRUPT_DEST2__HUBP5_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00000800L
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_INTERRUPT_DEST_MASK 0x00001000L
+#define DCHUB_INTERRUPT_DEST2__HUBP6_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00002000L
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_INTERRUPT_DEST_MASK 0x00004000L
+#define DCHUB_INTERRUPT_DEST2__HUBP7_IHC_FLIP_AWAY_INTERRUPT_DEST_MASK 0x00008000L
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_VM_FAULT_INTERRUPT_DEST_MASK 0x01000000L
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_TIMEOUT_INTERRUPT_DEST_MASK 0x02000000L
+#define DCHUB_INTERRUPT_DEST2__HUBBUB_IHC_COMPBUF_SIZE_CHANGE_INTERRUPT_DEST_MASK 0x04000000L
+//DPP_PERFCOUNTER_INTERRUPT_DEST
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x10
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x11
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x14
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x15
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x16
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x17
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x18
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x19
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1a
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1b
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00010000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00020000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00100000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00200000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00400000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00800000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x01000000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP6_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x02000000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x04000000L
+#define DPP_PERFCOUNTER_INTERRUPT_DEST__DPP7_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x08000000L
+//MPC_INTERRUPT_DEST
+#define MPC_INTERRUPT_DEST__MPCC0_STALL_INTERRUPT_DEST__SHIFT 0x0
+#define MPC_INTERRUPT_DEST__MPCC1_STALL_INTERRUPT_DEST__SHIFT 0x1
+#define MPC_INTERRUPT_DEST__MPCC2_STALL_INTERRUPT_DEST__SHIFT 0x2
+#define MPC_INTERRUPT_DEST__MPCC3_STALL_INTERRUPT_DEST__SHIFT 0x3
+#define MPC_INTERRUPT_DEST__MPCC4_STALL_INTERRUPT_DEST__SHIFT 0x4
+#define MPC_INTERRUPT_DEST__MPCC5_STALL_INTERRUPT_DEST__SHIFT 0x5
+#define MPC_INTERRUPT_DEST__MPCC6_STALL_INTERRUPT_DEST__SHIFT 0x6
+#define MPC_INTERRUPT_DEST__MPCC7_STALL_INTERRUPT_DEST__SHIFT 0x7
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define MPC_INTERRUPT_DEST__MPCC0_STALL_INTERRUPT_DEST_MASK 0x00000001L
+#define MPC_INTERRUPT_DEST__MPCC1_STALL_INTERRUPT_DEST_MASK 0x00000002L
+#define MPC_INTERRUPT_DEST__MPCC2_STALL_INTERRUPT_DEST_MASK 0x00000004L
+#define MPC_INTERRUPT_DEST__MPCC3_STALL_INTERRUPT_DEST_MASK 0x00000008L
+#define MPC_INTERRUPT_DEST__MPCC4_STALL_INTERRUPT_DEST_MASK 0x00000010L
+#define MPC_INTERRUPT_DEST__MPCC5_STALL_INTERRUPT_DEST_MASK 0x00000020L
+#define MPC_INTERRUPT_DEST__MPCC6_STALL_INTERRUPT_DEST_MASK 0x00000040L
+#define MPC_INTERRUPT_DEST__MPCC7_STALL_INTERRUPT_DEST_MASK 0x00000080L
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define MPC_INTERRUPT_DEST__MPC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+//OPP_INTERRUPT_DEST
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define OPP_INTERRUPT_DEST__OPP_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+//OPTC_INTERRUPT_DEST
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define OPTC_INTERRUPT_DEST__OPTC0_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x18
+#define OPTC_INTERRUPT_DEST__OPTC1_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x19
+#define OPTC_INTERRUPT_DEST__OPTC2_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1a
+#define OPTC_INTERRUPT_DEST__OPTC3_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1b
+#define OPTC_INTERRUPT_DEST__OPTC4_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1c
+#define OPTC_INTERRUPT_DEST__OPTC5_IHC_DATA_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x1d
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define OPTC_INTERRUPT_DEST__OPTC_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+#define OPTC_INTERRUPT_DEST__OPTC0_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x01000000L
+#define OPTC_INTERRUPT_DEST__OPTC1_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x02000000L
+#define OPTC_INTERRUPT_DEST__OPTC2_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x04000000L
+#define OPTC_INTERRUPT_DEST__OPTC3_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x08000000L
+#define OPTC_INTERRUPT_DEST__OPTC4_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x10000000L
+#define OPTC_INTERRUPT_DEST__OPTC5_IHC_DATA_UNDERFLOW_INTERRUPT_DEST_MASK 0x20000000L
+//OTG0_INTERRUPT_DEST
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG0_INTERRUPT_DEST__OTG0_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG0_INTERRUPT_DEST__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG0_INTERRUPT_DEST__OTG0_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+//OTG1_INTERRUPT_DEST
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG1_INTERRUPT_DEST__OTG1_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG1_INTERRUPT_DEST__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG1_INTERRUPT_DEST__OTG1_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+//OTG2_INTERRUPT_DEST
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG2_INTERRUPT_DEST__OTG2_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG2_INTERRUPT_DEST__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG2_INTERRUPT_DEST__OTG2_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+//OTG3_INTERRUPT_DEST
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG3_INTERRUPT_DEST__OTG3_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG3_INTERRUPT_DEST__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG3_INTERRUPT_DEST__OTG3_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+//OTG4_INTERRUPT_DEST
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG4_INTERRUPT_DEST__OTG4_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG4_INTERRUPT_DEST__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG4_INTERRUPT_DEST__OTG4_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+//OTG5_INTERRUPT_DEST
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_CPU_SS_INTERRUPT_DEST__SHIFT 0x0
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_DRR_TIMING_INTERRUPT_DEST__SHIFT 0x1
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_INTERRUPT_DEST__SHIFT 0x2
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SNAPSHOT_INTERRUPT_DEST__SHIFT 0x3
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST__SHIFT 0x4
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST__SHIFT 0x5
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGA_INTERRUPT_DEST__SHIFT 0x6
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGB_INTERRUPT_DEST__SHIFT 0x7
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST__SHIFT 0x8
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT0_DEST__SHIFT 0x9
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT1_DEST__SHIFT 0xa
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT2_DEST__SHIFT 0xb
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST__SHIFT 0xf
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSTARTUP_INTERRUPT_DEST__SHIFT 0x10
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VREADY_INTERRUPT_DEST__SHIFT 0x11
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSYNC_NOM_INTERRUPT_DEST__SHIFT 0x12
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST__SHIFT 0x13
+#define OTG5_INTERRUPT_DEST__OTG5_DRR_V_TOTAL_REACH_INTERRUPT_DEST__SHIFT 0x14
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_CPU_SS_INTERRUPT_DEST_MASK 0x00000001L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_DRR_TIMING_INTERRUPT_DEST_MASK 0x00000002L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_INTERRUPT_DEST_MASK 0x00000004L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SNAPSHOT_INTERRUPT_DEST_MASK 0x00000008L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_FORCE_COUNT_NOW_INTERRUPT_DEST_MASK 0x00000010L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_DEST_MASK 0x00000020L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGA_INTERRUPT_DEST_MASK 0x00000040L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_TRIGB_INTERRUPT_DEST_MASK 0x00000080L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_GSL_VSYNC_GAP_INTERRUPT_DEST_MASK 0x00000100L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT0_DEST_MASK 0x00000200L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT1_DEST_MASK 0x00000400L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_OTG_VERTICAL_INTERRUPT2_DEST_MASK 0x00000800L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INTERRUPT_DEST_MASK 0x00008000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSTARTUP_INTERRUPT_DEST_MASK 0x00010000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VREADY_INTERRUPT_DEST_MASK 0x00020000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_VSYNC_NOM_INTERRUPT_DEST_MASK 0x00040000L
+#define OTG5_INTERRUPT_DEST__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT_DEST_MASK 0x00080000L
+#define OTG5_INTERRUPT_DEST__OTG5_DRR_V_TOTAL_REACH_INTERRUPT_DEST_MASK 0x00100000L
+//DIG_INTERRUPT_DEST
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x0
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x1
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x2
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x3
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x4
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x5
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x6
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_VID_STREAM_DISABLE_INTERRUPT_DEST__SHIFT 0x7
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0x8
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0x9
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xa
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xb
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xc
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xd
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xe
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_FAST_TRAINING_COMPLETE_INTERRUPT_DEST__SHIFT 0xf
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000001L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000002L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000004L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000008L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000010L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000020L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000040L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_VID_STREAM_DISABLE_INTERRUPT_DEST_MASK 0x00000080L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGA_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000100L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGB_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000200L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGC_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000400L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGD_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00000800L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGE_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00001000L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGF_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00002000L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGG_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00004000L
+#define DIG_INTERRUPT_DEST__DOUT_IHC_DIGH_FAST_TRAINING_COMPLETE_INTERRUPT_DEST_MASK 0x00008000L
+//I2C_DDC_HPD_INTERRUPT_DEST
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_SW_DONE_INTERRUPT_DEST__SHIFT 0x0
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT_DEST__SHIFT 0x1
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT_DEST__SHIFT 0x2
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT_DEST__SHIFT 0x3
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT_DEST__SHIFT 0x4
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT_DEST__SHIFT 0x5
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT_DEST__SHIFT 0x6
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT_DEST__SHIFT 0x7
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC1_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x10
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC2_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x11
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC3_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x12
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC4_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x13
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC5_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x14
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC6_READ_REQUEST_INTERRUPT_DEST__SHIFT 0x15
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDCVGA_READ_REQUEST_INTERRPUT_DEST__SHIFT 0x16
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_SW_DONE_INTERRUPT_DEST_MASK 0x00000001L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC1_HW_DONE_INTERRUPT_DEST_MASK 0x00000002L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC2_HW_DONE_INTERRUPT_DEST_MASK 0x00000004L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC3_HW_DONE_INTERRUPT_DEST_MASK 0x00000008L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC4_HW_DONE_INTERRUPT_DEST_MASK 0x00000010L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC5_HW_DONE_INTERRUPT_DEST_MASK 0x00000020L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDC6_HW_DONE_INTERRUPT_DEST_MASK 0x00000040L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DOUT_IHC_I2C_DDCVGA_HW_DONE_INTERRUPT_DEST_MASK 0x00000080L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC1_READ_REQUEST_INTERRUPT_DEST_MASK 0x00010000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC2_READ_REQUEST_INTERRUPT_DEST_MASK 0x00020000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC3_READ_REQUEST_INTERRUPT_DEST_MASK 0x00040000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC4_READ_REQUEST_INTERRUPT_DEST_MASK 0x00080000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC5_READ_REQUEST_INTERRUPT_DEST_MASK 0x00100000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDC6_READ_REQUEST_INTERRUPT_DEST_MASK 0x00200000L
+#define I2C_DDC_HPD_INTERRUPT_DEST__DC_I2C_DDCVGA_READ_REQUEST_INTERRPUT_DEST_MASK 0x00400000L
+//HDCP_INTERRUPT_DEST
+//DIO_INTERRUPT_DEST
+#define DIO_INTERRUPT_DEST__DIO_ALPM_INTERRUPT_DEST__SHIFT 0x4
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xc
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xd
+#define DIO_INTERRUPT_DEST__DIO_ALPM_INTERRUPT_DEST_MASK 0x00000010L
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00001000L
+#define DIO_INTERRUPT_DEST__DIO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00002000L
+//DCIO_INTERRUPT_DEST
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x0
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x1
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x2
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x3
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x4
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x5
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x6
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT_DEST__SHIFT 0x10
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXA_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000001L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXB_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000002L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXC_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000004L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXD_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000008L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXE_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000010L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXF_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000020L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_TXG_IHC_ERROR_INTERRUPT_DEST_MASK 0x00000040L
+#define DCIO_INTERRUPT_DEST__DCIO_DPCS_RXA_IHC_ERROR_INTERRUPT_DEST_MASK 0x00010000L
+//HPD_INTERRUPT_DEST
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_INTERRUPT_DEST__SHIFT 0x0
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_INTERRUPT_DEST__SHIFT 0x1
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_INTERRUPT_DEST__SHIFT 0x2
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_INTERRUPT_DEST__SHIFT 0x3
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_INTERRUPT_DEST__SHIFT 0x4
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_INTERRUPT_DEST__SHIFT 0x5
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_RX_INTERRUPT_DEST__SHIFT 0x8
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_RX_INTERRUPT_DEST__SHIFT 0x9
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_RX_INTERRUPT_DEST__SHIFT 0xa
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_RX_INTERRUPT_DEST__SHIFT 0xb
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_RX_INTERRUPT_DEST__SHIFT 0xc
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_RX_INTERRUPT_DEST__SHIFT 0xd
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_INTERRUPT_DEST_MASK 0x00000001L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_INTERRUPT_DEST_MASK 0x00000002L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_INTERRUPT_DEST_MASK 0x00000004L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_INTERRUPT_DEST_MASK 0x00000008L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_INTERRUPT_DEST_MASK 0x00000010L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_INTERRUPT_DEST_MASK 0x00000020L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD1_RX_INTERRUPT_DEST_MASK 0x00000100L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD2_RX_INTERRUPT_DEST_MASK 0x00000200L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD3_RX_INTERRUPT_DEST_MASK 0x00000400L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD4_RX_INTERRUPT_DEST_MASK 0x00000800L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD5_RX_INTERRUPT_DEST_MASK 0x00001000L
+#define HPD_INTERRUPT_DEST__DOUT_IHC_HPD6_RX_INTERRUPT_DEST_MASK 0x00002000L
+//AZ_INTERRUPT_DEST
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x0
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x1
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x2
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x3
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x4
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x5
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x6
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT_DEST__SHIFT 0x7
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT_DEST__SHIFT 0x8
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT_DEST__SHIFT 0x9
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT_DEST__SHIFT 0xa
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT_DEST__SHIFT 0xb
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT_DEST__SHIFT 0xc
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT_DEST__SHIFT 0xd
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT_DEST__SHIFT 0xe
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT_DEST__SHIFT 0xf
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT_DEST__SHIFT 0x10
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT_DEST__SHIFT 0x11
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT_DEST__SHIFT 0x12
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT_DEST__SHIFT 0x13
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT_DEST__SHIFT 0x14
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT_DEST__SHIFT 0x15
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT_DEST__SHIFT 0x16
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT_DEST__SHIFT 0x17
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x1e
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x1f
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000001L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000002L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000004L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000008L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000010L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000020L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000040L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_FORMAT_CHANGED_INT_DEST_MASK 0x00000080L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_ENABLED_INT_DEST_MASK 0x00000100L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_ENABLED_INT_DEST_MASK 0x00000200L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_ENABLED_INT_DEST_MASK 0x00000400L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_ENABLED_INT_DEST_MASK 0x00000800L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_ENABLED_INT_DEST_MASK 0x00001000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_ENABLED_INT_DEST_MASK 0x00002000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_ENABLED_INT_DEST_MASK 0x00004000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_ENABLED_INT_DEST_MASK 0x00008000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT0_AUDIO_DISABLED_INT_DEST_MASK 0x00010000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT1_AUDIO_DISABLED_INT_DEST_MASK 0x00020000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT2_AUDIO_DISABLED_INT_DEST_MASK 0x00040000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT3_AUDIO_DISABLED_INT_DEST_MASK 0x00080000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT4_AUDIO_DISABLED_INT_DEST_MASK 0x00100000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT5_AUDIO_DISABLED_INT_DEST_MASK 0x00200000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT6_AUDIO_DISABLED_INT_DEST_MASK 0x00400000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_ENDPOINT7_AUDIO_DISABLED_INT_DEST_MASK 0x00800000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x40000000L
+#define AZ_INTERRUPT_DEST__AZ_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x80000000L
+//AUX_INTERRUPT_DEST
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_SW_DONE_INTERRUPT_DEST__SHIFT 0x0
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_LS_DONE_INTERRUPT_DEST__SHIFT 0x1
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_SW_DONE_INTERRUPT_DEST__SHIFT 0x2
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_LS_DONE_INTERRUPT_DEST__SHIFT 0x3
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_SW_DONE_INTERRUPT_DEST__SHIFT 0x4
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_LS_DONE_INTERRUPT_DEST__SHIFT 0x5
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_SW_DONE_INTERRUPT_DEST__SHIFT 0x6
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_LS_DONE_INTERRUPT_DEST__SHIFT 0x7
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_SW_DONE_INTERRUPT_DEST__SHIFT 0x8
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_LS_DONE_INTERRUPT_DEST__SHIFT 0x9
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_SW_DONE_INTERRUPT_DEST__SHIFT 0xa
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_LS_DONE_INTERRUPT_DEST__SHIFT 0xb
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x10
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x11
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x12
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x13
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x14
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x15
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x16
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x17
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x18
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x19
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST__SHIFT 0x1a
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_ERROR_INTERRUPT_DEST__SHIFT 0x1b
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_SW_DONE_INTERRUPT_DEST_MASK 0x00000001L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_LS_DONE_INTERRUPT_DEST_MASK 0x00000002L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_SW_DONE_INTERRUPT_DEST_MASK 0x00000004L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_LS_DONE_INTERRUPT_DEST_MASK 0x00000008L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_SW_DONE_INTERRUPT_DEST_MASK 0x00000010L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_LS_DONE_INTERRUPT_DEST_MASK 0x00000020L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_SW_DONE_INTERRUPT_DEST_MASK 0x00000040L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_LS_DONE_INTERRUPT_DEST_MASK 0x00000080L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_SW_DONE_INTERRUPT_DEST_MASK 0x00000100L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_LS_DONE_INTERRUPT_DEST_MASK 0x00000200L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_SW_DONE_INTERRUPT_DEST_MASK 0x00000400L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_LS_DONE_INTERRUPT_DEST_MASK 0x00000800L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00010000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX1_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00020000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00040000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX2_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00080000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00100000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX3_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00200000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x00400000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX4_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x00800000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x01000000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX5_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x02000000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_DEST_MASK 0x04000000L
+#define AUX_INTERRUPT_DEST__DOUT_IHC_AUX6_GTC_SYNC_ERROR_INTERRUPT_DEST_MASK 0x08000000L
+//DSC_INTERRUPT_DEST
+#define DSC_INTERRUPT_DEST__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x0
+#define DSC_INTERRUPT_DEST__DSC0_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x1
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x2
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x3
+#define DSC_INTERRUPT_DEST__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x4
+#define DSC_INTERRUPT_DEST__DSC1_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x5
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x6
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x7
+#define DSC_INTERRUPT_DEST__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x8
+#define DSC_INTERRUPT_DEST__DSC2_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x9
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xa
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xb
+#define DSC_INTERRUPT_DEST__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0xc
+#define DSC_INTERRUPT_DEST__DSC3_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0xd
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0xe
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0xf
+#define DSC_INTERRUPT_DEST__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x10
+#define DSC_INTERRUPT_DEST__DSC4_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x11
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x12
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x13
+#define DSC_INTERRUPT_DEST__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST__SHIFT 0x14
+#define DSC_INTERRUPT_DEST__DSC5_IHC_CORE_ERROR_INTERRUPT_DEST__SHIFT 0x15
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x16
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x17
+#define DSC_INTERRUPT_DEST__DSC0_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00000001L
+#define DSC_INTERRUPT_DEST__DSC0_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00000002L
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000004L
+#define DSC_INTERRUPT_DEST__DSC0_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000008L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00000010L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00000020L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000040L
+#define DSC_INTERRUPT_DEST__DSC1_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000080L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00000100L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00000200L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000400L
+#define DSC_INTERRUPT_DEST__DSC2_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000800L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00001000L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00002000L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00004000L
+#define DSC_INTERRUPT_DEST__DSC3_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00008000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00010000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00020000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00040000L
+#define DSC_INTERRUPT_DEST__DSC4_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00080000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_INPUT_UNDERFLOW_INTERRUPT_DEST_MASK 0x00100000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_CORE_ERROR_INTERRUPT_DEST_MASK 0x00200000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00400000L
+#define DSC_INTERRUPT_DEST__DSC5_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00800000L
+//HPO_INTERRUPT_DEST
+#define HPO_INTERRUPT_DEST__HPO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST__SHIFT 0x2
+#define HPO_INTERRUPT_DEST__HPO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST__SHIFT 0x3
+#define HPO_INTERRUPT_DEST__HPO_IHC_PERFMON_COUNTER0_INTERRUPT_DEST_MASK 0x00000004L
+#define HPO_INTERRUPT_DEST__HPO_IHC_PERFMON_COUNTER1_INTERRUPT_DEST_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dmu_dmu_misc_dispdec
+//CC_DC_PIPE_DIS
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS__SHIFT 0x0
+#define CC_DC_PIPE_DIS__DC_FULL_DIS__SHIFT 0xc
+#define CC_DC_PIPE_DIS__DC_DMCUB_ENABLE__SHIFT 0x10
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS_MASK 0x000000FFL
+#define CC_DC_PIPE_DIS__DC_FULL_DIS_MASK 0x00001000L
+#define CC_DC_PIPE_DIS__DC_DMCUB_ENABLE_MASK 0x00010000L
+//DMU_CLK_CNTL
+#define DMU_CLK_CNTL__DMU_TEST_CLK_SEL__SHIFT 0x0
+#define DMU_CLK_CNTL__DISPCLK_R_DMU_GATE_DIS__SHIFT 0x4
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_GATE_DIS__SHIFT 0x5
+#define DMU_CLK_CNTL__DISPCLK_R_CLOCK_ON__SHIFT 0x6
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_CLOCK_ON__SHIFT 0x7
+#define DMU_CLK_CNTL__RIOMMU_CLK_SEL__SHIFT 0x8
+#define DMU_CLK_CNTL__RBBMIF_FGCG_REP_DIS__SHIFT 0xc
+#define DMU_CLK_CNTL__DMCUB_DMCUBCLK_SRC_SEL__SHIFT 0xd
+#define DMU_CLK_CNTL__DPREFCLK_ALLOW_DS_CLKSTOP__SHIFT 0x10
+#define DMU_CLK_CNTL__DISPCLK_ALLOW_DS_CLKSTOP__SHIFT 0x12
+#define DMU_CLK_CNTL__DPPCLK_ALLOW_DS_CLKSTOP__SHIFT 0x14
+#define DMU_CLK_CNTL__DTBCLK_ALLOW_DS_CLKSTOP__SHIFT 0x16
+#define DMU_CLK_CNTL__DCFCLK_ALLOW_DS_CLKSTOP__SHIFT 0x18
+#define DMU_CLK_CNTL__DPIACLK_ALLOW_DS_CLKSTOP__SHIFT 0x1a
+#define DMU_CLK_CNTL__LONO_FGCG_REP_DIS__SHIFT 0x1c
+#define DMU_CLK_CNTL__LONO_DISPCLK_GATE_DISABLE__SHIFT 0x1d
+#define DMU_CLK_CNTL__LONO_SOCCLK_GATE_DISABLE__SHIFT 0x1e
+#define DMU_CLK_CNTL__LONO_DMCUBCLK_GATE_DISABLE__SHIFT 0x1f
+#define DMU_CLK_CNTL__DMU_TEST_CLK_SEL_MASK 0x0000000FL
+#define DMU_CLK_CNTL__DISPCLK_R_DMU_GATE_DIS_MASK 0x00000010L
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_GATE_DIS_MASK 0x00000020L
+#define DMU_CLK_CNTL__DISPCLK_R_CLOCK_ON_MASK 0x00000040L
+#define DMU_CLK_CNTL__DISPCLK_G_RBBMIF_CLOCK_ON_MASK 0x00000080L
+#define DMU_CLK_CNTL__RIOMMU_CLK_SEL_MASK 0x00000100L
+#define DMU_CLK_CNTL__RBBMIF_FGCG_REP_DIS_MASK 0x00001000L
+#define DMU_CLK_CNTL__DMCUB_DMCUBCLK_SRC_SEL_MASK 0x00006000L
+#define DMU_CLK_CNTL__DPREFCLK_ALLOW_DS_CLKSTOP_MASK 0x00030000L
+#define DMU_CLK_CNTL__DISPCLK_ALLOW_DS_CLKSTOP_MASK 0x000C0000L
+#define DMU_CLK_CNTL__DPPCLK_ALLOW_DS_CLKSTOP_MASK 0x00300000L
+#define DMU_CLK_CNTL__DTBCLK_ALLOW_DS_CLKSTOP_MASK 0x00C00000L
+#define DMU_CLK_CNTL__DCFCLK_ALLOW_DS_CLKSTOP_MASK 0x03000000L
+#define DMU_CLK_CNTL__DPIACLK_ALLOW_DS_CLKSTOP_MASK 0x0C000000L
+#define DMU_CLK_CNTL__LONO_FGCG_REP_DIS_MASK 0x10000000L
+#define DMU_CLK_CNTL__LONO_DISPCLK_GATE_DISABLE_MASK 0x20000000L
+#define DMU_CLK_CNTL__LONO_SOCCLK_GATE_DISABLE_MASK 0x40000000L
+#define DMU_CLK_CNTL__LONO_DMCUBCLK_GATE_DISABLE_MASK 0x80000000L
+//DMCUB_SMU_INTERRUPT_CNTL
+#define DMCUB_SMU_INTERRUPT_CNTL__DMCUB_SMU_MSG_INT__SHIFT 0x0
+#define DMCUB_SMU_INTERRUPT_CNTL__DMCUB_SMU_MSG__SHIFT 0x10
+#define DMCUB_SMU_INTERRUPT_CNTL__DMCUB_SMU_MSG_INT_MASK 0x00000001L
+#define DMCUB_SMU_INTERRUPT_CNTL__DMCUB_SMU_MSG_MASK 0xFFFF0000L
+//SMU_INTERRUPT_CONTROL
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_ENABLE__SHIFT 0x0
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_STATUS__SHIFT 0x4
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_EVENT__SHIFT 0x10
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_ENABLE_MASK 0x00000001L
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_STATUS_MASK 0x00000010L
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_EVENT_MASK 0xFFFF0000L
+//ZSC_CNTL
+#define ZSC_CNTL__FORCE_SOC_ACCESS__SHIFT 0x0
+#define ZSC_CNTL__LONO_PWR_DN__SHIFT 0x8
+#define ZSC_CNTL__FORCE_SOC_ACCESS_MASK 0x00000003L
+#define ZSC_CNTL__LONO_PWR_DN_MASK 0x00000100L
+//ZSC_CNTL2
+#define ZSC_CNTL2__ALLOW_Z10__SHIFT 0x0
+#define ZSC_CNTL2__ALLOW_Z10_MASK 0x00000001L
+//DMU_MISC_ALLOW_DS_FORCE
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_EN__SHIFT 0x0
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_VALUE__SHIFT 0x4
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_EN_MASK 0x00000001L
+#define DMU_MISC_ALLOW_DS_FORCE__DMU_MISC_ALLOW_DS_FORCE_VALUE_MASK 0x00000010L
+//ZSC_STATUS
+#define ZSC_STATUS__SOC_ACCESS_TRIGGER_STATUS__SHIFT 0x0
+#define ZSC_STATUS__SOC_ACCESS_STICKY_TRIGGER_STATUS__SHIFT 0x4
+#define ZSC_STATUS__FENCE_REQ_STATUS__SHIFT 0x8
+#define ZSC_STATUS__FENCE_ACK_STATUS__SHIFT 0x9
+#define ZSC_STATUS__FENCE_STATUS__SHIFT 0xa
+#define ZSC_STATUS__SOC_ACCESS_TRIGGER_STATUS_MASK 0x00000007L
+#define ZSC_STATUS__SOC_ACCESS_STICKY_TRIGGER_STATUS_MASK 0x00000070L
+#define ZSC_STATUS__FENCE_REQ_STATUS_MASK 0x00000100L
+#define ZSC_STATUS__FENCE_ACK_STATUS_MASK 0x00000200L
+#define ZSC_STATUS__FENCE_STATUS_MASK 0x00000C00L
+//DMU_DISPCLK_CGTT_BLK_CTRL_REG
+#define DMU_DISPCLK_CGTT_BLK_CTRL_REG__LONO_DISPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DMU_DISPCLK_CGTT_BLK_CTRL_REG__LONO_DISPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DMU_DISPCLK_CGTT_BLK_CTRL_REG__LONO_DISPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DMU_DISPCLK_CGTT_BLK_CTRL_REG__LONO_DISPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DMU_SOCCLK_CGTT_BLK_CTRL_REG
+#define DMU_SOCCLK_CGTT_BLK_CTRL_REG__LONO_SOCCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DMU_SOCCLK_CGTT_BLK_CTRL_REG__LONO_SOCCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DMU_SOCCLK_CGTT_BLK_CTRL_REG__LONO_SOCCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DMU_SOCCLK_CGTT_BLK_CTRL_REG__LONO_SOCCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//ZPR_CLK_UNGATE_DELAY
+#define ZPR_CLK_UNGATE_DELAY__ZPR_CLK_UNGATE_DELAY__SHIFT 0x0
+#define ZPR_CLK_UNGATE_DELAY__ZPR_CLK_UNGATE_DELAY_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dmu_dc_pg_dispdec
+//DOMAIN0_PG_CONFIG
+#define DOMAIN0_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN0_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN0_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN0_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN0_PG_STATUS
+#define DOMAIN0_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN0_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN0_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN0_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN1_PG_CONFIG
+#define DOMAIN1_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN1_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN1_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN1_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN1_PG_STATUS
+#define DOMAIN1_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN1_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN1_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN1_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN2_PG_CONFIG
+#define DOMAIN2_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN2_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN2_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN2_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN2_PG_STATUS
+#define DOMAIN2_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN2_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN2_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN2_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN3_PG_CONFIG
+#define DOMAIN3_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN3_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN3_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN3_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN3_PG_STATUS
+#define DOMAIN3_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN3_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN3_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN3_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN16_PG_CONFIG
+#define DOMAIN16_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN16_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN16_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN16_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN16_PG_STATUS
+#define DOMAIN16_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN16_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN16_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN16_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN17_PG_CONFIG
+#define DOMAIN17_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN17_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN17_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN17_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN17_PG_STATUS
+#define DOMAIN17_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN17_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN17_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN17_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN18_PG_CONFIG
+#define DOMAIN18_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN18_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN18_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN18_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN18_PG_STATUS
+#define DOMAIN18_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN18_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN18_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN18_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN19_PG_CONFIG
+#define DOMAIN19_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN19_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN19_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN19_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN19_PG_STATUS
+#define DOMAIN19_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN19_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN19_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN19_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN22_PG_CONFIG
+#define DOMAIN22_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN22_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN22_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN22_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN22_PG_STATUS
+#define DOMAIN22_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN22_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN22_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN22_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN23_PG_CONFIG
+#define DOMAIN23_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN23_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN23_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN23_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN23_PG_STATUS
+#define DOMAIN23_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN23_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN23_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN23_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN24_PG_CONFIG
+#define DOMAIN24_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN24_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN24_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN24_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN24_PG_STATUS
+#define DOMAIN24_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN24_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN24_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN24_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DOMAIN25_PG_CONFIG
+#define DOMAIN25_PG_CONFIG__DOMAIN_POWER_FORCEON__SHIFT 0x0
+#define DOMAIN25_PG_CONFIG__DOMAIN_POWER_GATE__SHIFT 0x8
+#define DOMAIN25_PG_CONFIG__DOMAIN_POWER_FORCEON_MASK 0x00000001L
+#define DOMAIN25_PG_CONFIG__DOMAIN_POWER_GATE_MASK 0x00000100L
+//DOMAIN25_PG_STATUS
+#define DOMAIN25_PG_STATUS__DOMAIN_DESIRED_PWR_STATE__SHIFT 0x1c
+#define DOMAIN25_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DOMAIN25_PG_STATUS__DOMAIN_DESIRED_PWR_STATE_MASK 0x10000000L
+#define DOMAIN25_PG_STATUS__DOMAIN_PGFSM_PWR_STATUS_MASK 0xC0000000L
+//DCPG_INTERRUPT_STATUS
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_DOWN_INT_OCCURRED__SHIFT 0x1
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_UP_INT_OCCURRED_MASK 0x00000001L
+#define DCPG_INTERRUPT_STATUS__DOMAIN0_POWER_DOWN_INT_OCCURRED_MASK 0x00000002L
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_UP_INT_OCCURRED_MASK 0x00000004L
+#define DCPG_INTERRUPT_STATUS__DOMAIN1_POWER_DOWN_INT_OCCURRED_MASK 0x00000008L
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_UP_INT_OCCURRED_MASK 0x00000010L
+#define DCPG_INTERRUPT_STATUS__DOMAIN2_POWER_DOWN_INT_OCCURRED_MASK 0x00000020L
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_UP_INT_OCCURRED_MASK 0x00000040L
+#define DCPG_INTERRUPT_STATUS__DOMAIN3_POWER_DOWN_INT_OCCURRED_MASK 0x00000080L
+//DCPG_INTERRUPT_STATUS_2
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_DOWN_INT_OCCURRED__SHIFT 0x1
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_UP_INT_OCCURRED_MASK 0x00000001L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN16_POWER_DOWN_INT_OCCURRED_MASK 0x00000002L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_UP_INT_OCCURRED_MASK 0x00000004L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN17_POWER_DOWN_INT_OCCURRED_MASK 0x00000008L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_UP_INT_OCCURRED_MASK 0x00000010L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN18_POWER_DOWN_INT_OCCURRED_MASK 0x00000020L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_UP_INT_OCCURRED_MASK 0x00000040L
+#define DCPG_INTERRUPT_STATUS_2__DOMAIN19_POWER_DOWN_INT_OCCURRED_MASK 0x00000080L
+//DCPG_INTERRUPT_STATUS_3
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN22_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN22_POWER_DOWN_INT_OCCURRED__SHIFT 0x1
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN23_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN23_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN24_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN24_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN25_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN25_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN22_POWER_UP_INT_OCCURRED_MASK 0x00000001L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN22_POWER_DOWN_INT_OCCURRED_MASK 0x00000002L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN23_POWER_UP_INT_OCCURRED_MASK 0x00000004L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN23_POWER_DOWN_INT_OCCURRED_MASK 0x00000008L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN24_POWER_UP_INT_OCCURRED_MASK 0x00000010L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN24_POWER_DOWN_INT_OCCURRED_MASK 0x00000020L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN25_POWER_UP_INT_OCCURRED_MASK 0x00000040L
+#define DCPG_INTERRUPT_STATUS_3__DOMAIN25_POWER_DOWN_INT_OCCURRED_MASK 0x00000080L
+//DCPG_INTERRUPT_CONTROL_1
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_MASK_MASK 0x00000001L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_MASK_MASK 0x00000004L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN0_POWER_DOWN_INT_CLEAR_MASK 0x00000008L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_MASK_MASK 0x00000010L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_MASK_MASK 0x00000040L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN1_POWER_DOWN_INT_CLEAR_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_MASK_MASK 0x00000100L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_UP_INT_CLEAR_MASK 0x00000200L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_MASK_MASK 0x00000400L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN2_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_MASK_MASK 0x00004000L
+#define DCPG_INTERRUPT_CONTROL_1__DOMAIN3_POWER_DOWN_INT_CLEAR_MASK 0x00008000L
+//DCPG_INTERRUPT_CONTROL_2
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_UP_INT_MASK_MASK 0x00000001L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_DOWN_INT_MASK_MASK 0x00000004L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN16_POWER_DOWN_INT_CLEAR_MASK 0x00000008L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_UP_INT_MASK_MASK 0x00000010L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_DOWN_INT_MASK_MASK 0x00000040L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN17_POWER_DOWN_INT_CLEAR_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_UP_INT_MASK_MASK 0x00000100L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_UP_INT_CLEAR_MASK 0x00000200L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_DOWN_INT_MASK_MASK 0x00000400L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN18_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_DOWN_INT_MASK_MASK 0x00004000L
+#define DCPG_INTERRUPT_CONTROL_2__DOMAIN19_POWER_DOWN_INT_CLEAR_MASK 0x00008000L
+//DCPG_INTERRUPT_CONTROL_3
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_UP_INT_MASK_MASK 0x00000001L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_UP_INT_CLEAR_MASK 0x00000002L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_DOWN_INT_MASK_MASK 0x00000004L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN22_POWER_DOWN_INT_CLEAR_MASK 0x00000008L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_UP_INT_MASK_MASK 0x00000010L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_UP_INT_CLEAR_MASK 0x00000020L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_DOWN_INT_MASK_MASK 0x00000040L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN23_POWER_DOWN_INT_CLEAR_MASK 0x00000080L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_UP_INT_MASK_MASK 0x00000100L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_UP_INT_CLEAR_MASK 0x00000200L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_DOWN_INT_MASK_MASK 0x00000400L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN24_POWER_DOWN_INT_CLEAR_MASK 0x00000800L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_UP_INT_MASK_MASK 0x00001000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_UP_INT_CLEAR_MASK 0x00002000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_DOWN_INT_MASK_MASK 0x00004000L
+#define DCPG_INTERRUPT_CONTROL_3__DOMAIN25_POWER_DOWN_INT_CLEAR_MASK 0x00008000L
+//DC_IP_REQUEST_CNTL
+#define DC_IP_REQUEST_CNTL__IP_REQUEST_EN__SHIFT 0x0
+#define DC_IP_REQUEST_CNTL__IP_REQUEST_EN_MASK 0x00000001L
+//LONO_MEM_PWR_REQ_CNTL
+#define LONO_MEM_PWR_REQ_CNTL__LONO_MEM_PWR_REQ_DIS__SHIFT 0x0
+#define LONO_MEM_PWR_REQ_CNTL__LONO_MEM_PWR_REQ_DIS_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dmu_dmcub_dispdec
+//DMCUB_REGION0_OFFSET
+#define DMCUB_REGION0_OFFSET__DMCUB_REGION0_OFFSET__SHIFT 0x8
+#define DMCUB_REGION0_OFFSET__DMCUB_REGION0_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION0_OFFSET_HIGH
+#define DMCUB_REGION0_OFFSET_HIGH__DMCUB_REGION0_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION0_OFFSET_HIGH__DMCUB_REGION0_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION1_OFFSET
+#define DMCUB_REGION1_OFFSET__DMCUB_REGION1_OFFSET__SHIFT 0x8
+#define DMCUB_REGION1_OFFSET__DMCUB_REGION1_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION1_OFFSET_HIGH
+#define DMCUB_REGION1_OFFSET_HIGH__DMCUB_REGION1_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION1_OFFSET_HIGH__DMCUB_REGION1_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION2_OFFSET
+#define DMCUB_REGION2_OFFSET__DMCUB_REGION2_OFFSET__SHIFT 0x8
+#define DMCUB_REGION2_OFFSET__DMCUB_REGION2_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION2_OFFSET_HIGH
+#define DMCUB_REGION2_OFFSET_HIGH__DMCUB_REGION2_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION2_OFFSET_HIGH__DMCUB_REGION2_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION4_OFFSET
+#define DMCUB_REGION4_OFFSET__DMCUB_REGION4_OFFSET__SHIFT 0x8
+#define DMCUB_REGION4_OFFSET__DMCUB_REGION4_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION4_OFFSET_HIGH
+#define DMCUB_REGION4_OFFSET_HIGH__DMCUB_REGION4_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION4_OFFSET_HIGH__DMCUB_REGION4_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION5_OFFSET
+#define DMCUB_REGION5_OFFSET__DMCUB_REGION5_OFFSET__SHIFT 0x8
+#define DMCUB_REGION5_OFFSET__DMCUB_REGION5_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION5_OFFSET_HIGH
+#define DMCUB_REGION5_OFFSET_HIGH__DMCUB_REGION5_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION5_OFFSET_HIGH__DMCUB_REGION5_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION6_OFFSET
+#define DMCUB_REGION6_OFFSET__DMCUB_REGION6_OFFSET__SHIFT 0x8
+#define DMCUB_REGION6_OFFSET__DMCUB_REGION6_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION6_OFFSET_HIGH
+#define DMCUB_REGION6_OFFSET_HIGH__DMCUB_REGION6_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION6_OFFSET_HIGH__DMCUB_REGION6_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION7_OFFSET
+#define DMCUB_REGION7_OFFSET__DMCUB_REGION7_OFFSET__SHIFT 0x8
+#define DMCUB_REGION7_OFFSET__DMCUB_REGION7_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION7_OFFSET_HIGH
+#define DMCUB_REGION7_OFFSET_HIGH__DMCUB_REGION7_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION7_OFFSET_HIGH__DMCUB_REGION7_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION0_TOP_ADDRESS
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION0_TOP_ADDRESS__DMCUB_REGION0_ENABLE_MASK 0x80000000L
+//DMCUB_REGION1_TOP_ADDRESS
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION1_TOP_ADDRESS__DMCUB_REGION1_ENABLE_MASK 0x80000000L
+//DMCUB_REGION2_TOP_ADDRESS
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION2_TOP_ADDRESS__DMCUB_REGION2_ENABLE_MASK 0x80000000L
+//DMCUB_REGION4_TOP_ADDRESS
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION4_TOP_ADDRESS__DMCUB_REGION4_ENABLE_MASK 0x80000000L
+//DMCUB_REGION5_TOP_ADDRESS
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION5_TOP_ADDRESS__DMCUB_REGION5_ENABLE_MASK 0x80000000L
+//DMCUB_REGION6_TOP_ADDRESS
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION6_TOP_ADDRESS__DMCUB_REGION6_ENABLE_MASK 0x80000000L
+//DMCUB_REGION7_TOP_ADDRESS
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION7_TOP_ADDRESS__DMCUB_REGION7_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW0_BASE_ADDRESS
+#define DMCUB_REGION3_CW0_BASE_ADDRESS__DMCUB_REGION3_CW0_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW0_BASE_ADDRESS__DMCUB_REGION3_CW0_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW1_BASE_ADDRESS
+#define DMCUB_REGION3_CW1_BASE_ADDRESS__DMCUB_REGION3_CW1_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW1_BASE_ADDRESS__DMCUB_REGION3_CW1_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW2_BASE_ADDRESS
+#define DMCUB_REGION3_CW2_BASE_ADDRESS__DMCUB_REGION3_CW2_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW2_BASE_ADDRESS__DMCUB_REGION3_CW2_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW3_BASE_ADDRESS
+#define DMCUB_REGION3_CW3_BASE_ADDRESS__DMCUB_REGION3_CW3_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW3_BASE_ADDRESS__DMCUB_REGION3_CW3_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW4_BASE_ADDRESS
+#define DMCUB_REGION3_CW4_BASE_ADDRESS__DMCUB_REGION3_CW4_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW4_BASE_ADDRESS__DMCUB_REGION3_CW4_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW5_BASE_ADDRESS
+#define DMCUB_REGION3_CW5_BASE_ADDRESS__DMCUB_REGION3_CW5_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW5_BASE_ADDRESS__DMCUB_REGION3_CW5_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW6_BASE_ADDRESS
+#define DMCUB_REGION3_CW6_BASE_ADDRESS__DMCUB_REGION3_CW6_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW6_BASE_ADDRESS__DMCUB_REGION3_CW6_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW7_BASE_ADDRESS
+#define DMCUB_REGION3_CW7_BASE_ADDRESS__DMCUB_REGION3_CW7_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW7_BASE_ADDRESS__DMCUB_REGION3_CW7_BASE_ADDRESS_MASK 0x1FFFFFFFL
+//DMCUB_REGION3_CW0_TOP_ADDRESS
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW0_TOP_ADDRESS__DMCUB_REGION3_CW0_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW1_TOP_ADDRESS
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW1_TOP_ADDRESS__DMCUB_REGION3_CW1_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW2_TOP_ADDRESS
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW2_TOP_ADDRESS__DMCUB_REGION3_CW2_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW3_TOP_ADDRESS
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW3_TOP_ADDRESS__DMCUB_REGION3_CW3_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW4_TOP_ADDRESS
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW4_TOP_ADDRESS__DMCUB_REGION3_CW4_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW5_TOP_ADDRESS
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW5_TOP_ADDRESS__DMCUB_REGION3_CW5_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW6_TOP_ADDRESS
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW6_TOP_ADDRESS__DMCUB_REGION3_CW6_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW7_TOP_ADDRESS
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_TOP_ADDRESS__SHIFT 0x0
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_ENABLE__SHIFT 0x1f
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_TOP_ADDRESS_MASK 0x1FFFFFFFL
+#define DMCUB_REGION3_CW7_TOP_ADDRESS__DMCUB_REGION3_CW7_ENABLE_MASK 0x80000000L
+//DMCUB_REGION3_CW0_OFFSET
+#define DMCUB_REGION3_CW0_OFFSET__DMCUB_REGION3_CW0_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW0_OFFSET__DMCUB_REGION3_CW0_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW0_OFFSET_HIGH
+#define DMCUB_REGION3_CW0_OFFSET_HIGH__DMCUB_REGION3_CW0_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW0_OFFSET_HIGH__DMCUB_REGION3_CW0_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW1_OFFSET
+#define DMCUB_REGION3_CW1_OFFSET__DMCUB_REGION3_CW1_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW1_OFFSET__DMCUB_REGION3_CW1_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW1_OFFSET_HIGH
+#define DMCUB_REGION3_CW1_OFFSET_HIGH__DMCUB_REGION3_CW1_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW1_OFFSET_HIGH__DMCUB_REGION3_CW1_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW2_OFFSET
+#define DMCUB_REGION3_CW2_OFFSET__DMCUB_REGION3_CW2_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW2_OFFSET__DMCUB_REGION3_CW2_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW2_OFFSET_HIGH
+#define DMCUB_REGION3_CW2_OFFSET_HIGH__DMCUB_REGION3_CW2_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW2_OFFSET_HIGH__DMCUB_REGION3_CW2_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW3_OFFSET
+#define DMCUB_REGION3_CW3_OFFSET__DMCUB_REGION3_CW3_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW3_OFFSET__DMCUB_REGION3_CW3_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW3_OFFSET_HIGH
+#define DMCUB_REGION3_CW3_OFFSET_HIGH__DMCUB_REGION3_CW3_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW3_OFFSET_HIGH__DMCUB_REGION3_CW3_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW4_OFFSET
+#define DMCUB_REGION3_CW4_OFFSET__DMCUB_REGION3_CW4_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW4_OFFSET__DMCUB_REGION3_CW4_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW4_OFFSET_HIGH
+#define DMCUB_REGION3_CW4_OFFSET_HIGH__DMCUB_REGION3_CW4_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW4_OFFSET_HIGH__DMCUB_REGION3_CW4_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW5_OFFSET
+#define DMCUB_REGION3_CW5_OFFSET__DMCUB_REGION3_CW5_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW5_OFFSET__DMCUB_REGION3_CW5_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW5_OFFSET_HIGH
+#define DMCUB_REGION3_CW5_OFFSET_HIGH__DMCUB_REGION3_CW5_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW5_OFFSET_HIGH__DMCUB_REGION3_CW5_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW6_OFFSET
+#define DMCUB_REGION3_CW6_OFFSET__DMCUB_REGION3_CW6_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW6_OFFSET__DMCUB_REGION3_CW6_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW6_OFFSET_HIGH
+#define DMCUB_REGION3_CW6_OFFSET_HIGH__DMCUB_REGION3_CW6_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW6_OFFSET_HIGH__DMCUB_REGION3_CW6_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_REGION3_CW7_OFFSET
+#define DMCUB_REGION3_CW7_OFFSET__DMCUB_REGION3_CW7_OFFSET__SHIFT 0x8
+#define DMCUB_REGION3_CW7_OFFSET__DMCUB_REGION3_CW7_OFFSET_MASK 0xFFFFFF00L
+//DMCUB_REGION3_CW7_OFFSET_HIGH
+#define DMCUB_REGION3_CW7_OFFSET_HIGH__DMCUB_REGION3_CW7_OFFSET_HIGH__SHIFT 0x0
+#define DMCUB_REGION3_CW7_OFFSET_HIGH__DMCUB_REGION3_CW7_OFFSET_HIGH_MASK 0x0000FFFFL
+//DMCUB_INTERRUPT_ENABLE
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER0_INT_EN__SHIFT 0x0
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER1_INT_EN__SHIFT 0x1
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_READY_INT_EN__SHIFT 0x2
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_DONE_INT_EN__SHIFT 0x3
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_READY_INT_EN__SHIFT 0x4
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_DONE_INT_EN__SHIFT 0x5
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_READY_INT_EN__SHIFT 0x6
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_DONE_INT_EN__SHIFT 0x7
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_READY_INT_EN__SHIFT 0x8
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_DONE_INT_EN__SHIFT 0x9
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT0_INT_EN__SHIFT 0xa
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT1_INT_EN__SHIFT 0xb
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT2_INT_EN__SHIFT 0xc
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT3_INT_EN__SHIFT 0xd
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT4_INT_EN__SHIFT 0xe
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT5_INT_EN__SHIFT 0xf
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT6_INT_EN__SHIFT 0x10
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT_IH_INT_EN__SHIFT 0x11
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_EN__SHIFT 0x12
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER0_INT_EN_MASK 0x00000001L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_TIMER1_INT_EN_MASK 0x00000002L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_READY_INT_EN_MASK 0x00000004L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX0_DONE_INT_EN_MASK 0x00000008L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_READY_INT_EN_MASK 0x00000010L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_INBOX1_DONE_INT_EN_MASK 0x00000020L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_READY_INT_EN_MASK 0x00000040L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX0_DONE_INT_EN_MASK 0x00000080L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_READY_INT_EN_MASK 0x00000100L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_OUTBOX1_DONE_INT_EN_MASK 0x00000200L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT0_INT_EN_MASK 0x00000400L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT1_INT_EN_MASK 0x00000800L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT2_INT_EN_MASK 0x00001000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT3_INT_EN_MASK 0x00002000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT4_INT_EN_MASK 0x00004000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT5_INT_EN_MASK 0x00008000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT6_INT_EN_MASK 0x00010000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_GPINT_IH_INT_EN_MASK 0x00020000L
+#define DMCUB_INTERRUPT_ENABLE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_EN_MASK 0x00040000L
+//DMCUB_INTERRUPT_ACK
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER0_INT_ACK__SHIFT 0x0
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER1_INT_ACK__SHIFT 0x1
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_READY_INT_ACK__SHIFT 0x2
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_DONE_INT_ACK__SHIFT 0x3
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_READY_INT_ACK__SHIFT 0x4
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_DONE_INT_ACK__SHIFT 0x5
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_READY_INT_ACK__SHIFT 0x6
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_DONE_INT_ACK__SHIFT 0x7
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_READY_INT_ACK__SHIFT 0x8
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_DONE_INT_ACK__SHIFT 0x9
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT0_INT_ACK__SHIFT 0xa
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT1_INT_ACK__SHIFT 0xb
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT2_INT_ACK__SHIFT 0xc
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT3_INT_ACK__SHIFT 0xd
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT4_INT_ACK__SHIFT 0xe
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT5_INT_ACK__SHIFT 0xf
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT6_INT_ACK__SHIFT 0x10
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT_IH_INT_ACK__SHIFT 0x11
+#define DMCUB_INTERRUPT_ACK__DMCUB_UNDEFINED_ADDRESS_FAULT_ACK__SHIFT 0x12
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER0_INT_ACK_MASK 0x00000001L
+#define DMCUB_INTERRUPT_ACK__DMCUB_TIMER1_INT_ACK_MASK 0x00000002L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_READY_INT_ACK_MASK 0x00000004L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX0_DONE_INT_ACK_MASK 0x00000008L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_READY_INT_ACK_MASK 0x00000010L
+#define DMCUB_INTERRUPT_ACK__DMCUB_INBOX1_DONE_INT_ACK_MASK 0x00000020L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_READY_INT_ACK_MASK 0x00000040L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX0_DONE_INT_ACK_MASK 0x00000080L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_READY_INT_ACK_MASK 0x00000100L
+#define DMCUB_INTERRUPT_ACK__DMCUB_OUTBOX1_DONE_INT_ACK_MASK 0x00000200L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT0_INT_ACK_MASK 0x00000400L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT1_INT_ACK_MASK 0x00000800L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT2_INT_ACK_MASK 0x00001000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT3_INT_ACK_MASK 0x00002000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT4_INT_ACK_MASK 0x00004000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT5_INT_ACK_MASK 0x00008000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT6_INT_ACK_MASK 0x00010000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_GPINT_IH_INT_ACK_MASK 0x00020000L
+#define DMCUB_INTERRUPT_ACK__DMCUB_UNDEFINED_ADDRESS_FAULT_ACK_MASK 0x00040000L
+//DMCUB_INTERRUPT_STATUS
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER0_INT_STAT__SHIFT 0x0
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER1_INT_STAT__SHIFT 0x1
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_READY_INT_STAT__SHIFT 0x2
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_DONE_INT_STAT__SHIFT 0x3
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_READY_INT_STAT__SHIFT 0x4
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_DONE_INT_STAT__SHIFT 0x5
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_READY_INT_STAT__SHIFT 0x6
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_DONE_INT_STAT__SHIFT 0x7
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_READY_INT_STAT__SHIFT 0x8
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_DONE_INT_STAT__SHIFT 0x9
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT0_INT_STAT__SHIFT 0xa
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT1_INT_STAT__SHIFT 0xb
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT2_INT_STAT__SHIFT 0xc
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT3_INT_STAT__SHIFT 0xd
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT4_INT_STAT__SHIFT 0xe
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT5_INT_STAT__SHIFT 0xf
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT6_INT_STAT__SHIFT 0x10
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT_IH_INT_STAT__SHIFT 0x11
+#define DMCUB_INTERRUPT_STATUS__DMCUB_UNDEFINED_ADDRESS_FAULT__SHIFT 0x12
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INST_FETCH_FAULT__SHIFT 0x13
+#define DMCUB_INTERRUPT_STATUS__DMCUB_DATA_WRITE_FAULT__SHIFT 0x14
+#define DMCUB_INTERRUPT_STATUS__DMCUB_PWR_UP_TRIG_INT_STAT__SHIFT 0x15
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OTG_RESYNC_TRIG_INT_STAT__SHIFT 0x16
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER0_INT_STAT_MASK 0x00000001L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_TIMER1_INT_STAT_MASK 0x00000002L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_READY_INT_STAT_MASK 0x00000004L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX0_DONE_INT_STAT_MASK 0x00000008L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_READY_INT_STAT_MASK 0x00000010L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INBOX1_DONE_INT_STAT_MASK 0x00000020L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_READY_INT_STAT_MASK 0x00000040L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX0_DONE_INT_STAT_MASK 0x00000080L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_READY_INT_STAT_MASK 0x00000100L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OUTBOX1_DONE_INT_STAT_MASK 0x00000200L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT0_INT_STAT_MASK 0x00000400L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT1_INT_STAT_MASK 0x00000800L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT2_INT_STAT_MASK 0x00001000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT3_INT_STAT_MASK 0x00002000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT4_INT_STAT_MASK 0x00004000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT5_INT_STAT_MASK 0x00008000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT6_INT_STAT_MASK 0x00010000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_GPINT_IH_INT_STAT_MASK 0x00020000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_UNDEFINED_ADDRESS_FAULT_MASK 0x00040000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_INST_FETCH_FAULT_MASK 0x00080000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_DATA_WRITE_FAULT_MASK 0x00100000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_PWR_UP_TRIG_INT_STAT_MASK 0x00200000L
+#define DMCUB_INTERRUPT_STATUS__DMCUB_OTG_RESYNC_TRIG_INT_STAT_MASK 0x00400000L
+//DMCUB_INTERRUPT_TYPE
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER0_INT_TYPE__SHIFT 0x0
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER1_INT_TYPE__SHIFT 0x1
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_READY_INT_TYPE__SHIFT 0x2
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_DONE_INT_TYPE__SHIFT 0x3
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_READY_INT_TYPE__SHIFT 0x4
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_DONE_INT_TYPE__SHIFT 0x5
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_READY_INT_TYPE__SHIFT 0x6
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_DONE_INT_TYPE__SHIFT 0x7
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_READY_INT_TYPE__SHIFT 0x8
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_DONE_INT_TYPE__SHIFT 0x9
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT0_INT_TYPE__SHIFT 0xa
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT1_INT_TYPE__SHIFT 0xb
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT2_INT_TYPE__SHIFT 0xc
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT3_INT_TYPE__SHIFT 0xd
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT4_INT_TYPE__SHIFT 0xe
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT5_INT_TYPE__SHIFT 0xf
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT6_INT_TYPE__SHIFT 0x10
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT_IH_INT_TYPE__SHIFT 0x11
+#define DMCUB_INTERRUPT_TYPE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_TYPE__SHIFT 0x12
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER0_INT_TYPE_MASK 0x00000001L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_TIMER1_INT_TYPE_MASK 0x00000002L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_READY_INT_TYPE_MASK 0x00000004L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX0_DONE_INT_TYPE_MASK 0x00000008L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_READY_INT_TYPE_MASK 0x00000010L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_INBOX1_DONE_INT_TYPE_MASK 0x00000020L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_READY_INT_TYPE_MASK 0x00000040L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX0_DONE_INT_TYPE_MASK 0x00000080L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_READY_INT_TYPE_MASK 0x00000100L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_OUTBOX1_DONE_INT_TYPE_MASK 0x00000200L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT0_INT_TYPE_MASK 0x00000400L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT1_INT_TYPE_MASK 0x00000800L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT2_INT_TYPE_MASK 0x00001000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT3_INT_TYPE_MASK 0x00002000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT4_INT_TYPE_MASK 0x00004000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT5_INT_TYPE_MASK 0x00008000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT6_INT_TYPE_MASK 0x00010000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_GPINT_IH_INT_TYPE_MASK 0x00020000L
+#define DMCUB_INTERRUPT_TYPE__DMCUB_UNDEFINED_ADDRESS_FAULT_INT_TYPE_MASK 0x00040000L
+//DMCUB_EXT_INTERRUPT_STATUS
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_COUNT__SHIFT 0x0
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_ID__SHIFT 0x8
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_COUNT_MASK 0x000000FFL
+#define DMCUB_EXT_INTERRUPT_STATUS__DMCUB_EXT_INTERRUPT_ID_MASK 0x0000FF00L
+//DMCUB_EXT_INTERRUPT_CTXID
+#define DMCUB_EXT_INTERRUPT_CTXID__DMCUB_EXT_INTERRUPT_CTXID__SHIFT 0x0
+#define DMCUB_EXT_INTERRUPT_CTXID__DMCUB_EXT_INTERRUPT_CTXID_MASK 0x0FFFFFFFL
+//DMCUB_EXT_INTERRUPT_ACK
+#define DMCUB_EXT_INTERRUPT_ACK__DMCUB_EXT_INTERRUPT_ACK__SHIFT 0x0
+#define DMCUB_EXT_INTERRUPT_ACK__DMCUB_EXT_INTERRUPT_ACK_MASK 0x00000001L
+//DMCUB_INST_FETCH_FAULT_ADDR
+#define DMCUB_INST_FETCH_FAULT_ADDR__DMCUB_INST_FETCH_FAULT_ADDR__SHIFT 0x0
+#define DMCUB_INST_FETCH_FAULT_ADDR__DMCUB_INST_FETCH_FAULT_ADDR_MASK 0xFFFFFFFFL
+//DMCUB_DATA_WRITE_FAULT_ADDR
+#define DMCUB_DATA_WRITE_FAULT_ADDR__DMCUB_DATA_WRITE_FAULT_ADDR__SHIFT 0x0
+#define DMCUB_DATA_WRITE_FAULT_ADDR__DMCUB_DATA_WRITE_FAULT_ADDR_MASK 0xFFFFFFFFL
+//DMCUB_SEC_CNTL
+#define DMCUB_SEC_CNTL__DMCUB_MEM_SEC_LVL__SHIFT 0x0
+#define DMCUB_SEC_CNTL__DMCUB_MEM_UNIT_ID__SHIFT 0x8
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET__SHIFT 0x10
+#define DMCUB_SEC_CNTL__DMCUB_DATA_FAULT_INT_DISABLE__SHIFT 0x11
+#define DMCUB_SEC_CNTL__DMCUB_AUTO_RESET_STATUS__SHIFT 0x14
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET_STATUS__SHIFT 0x15
+#define DMCUB_SEC_CNTL__DMCUB_INST_FETCH_FAULT_CLEAR__SHIFT 0x18
+#define DMCUB_SEC_CNTL__DMCUB_DATA_WRITE_FAULT_CLEAR__SHIFT 0x19
+#define DMCUB_SEC_CNTL__DMCUB_MEM_SEC_LVL_MASK 0x00000007L
+#define DMCUB_SEC_CNTL__DMCUB_MEM_UNIT_ID_MASK 0x00003F00L
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET_MASK 0x00010000L
+#define DMCUB_SEC_CNTL__DMCUB_DATA_FAULT_INT_DISABLE_MASK 0x00020000L
+#define DMCUB_SEC_CNTL__DMCUB_AUTO_RESET_STATUS_MASK 0x00100000L
+#define DMCUB_SEC_CNTL__DMCUB_SEC_RESET_STATUS_MASK 0x00200000L
+#define DMCUB_SEC_CNTL__DMCUB_INST_FETCH_FAULT_CLEAR_MASK 0x01000000L
+#define DMCUB_SEC_CNTL__DMCUB_DATA_WRITE_FAULT_CLEAR_MASK 0x02000000L
+//DMCUB_MEM_CNTL
+#define DMCUB_MEM_CNTL__DMCUB_MEM_WRITE_QOS__SHIFT 0x0
+#define DMCUB_MEM_CNTL__DMCUB_MEM_READ_QOS__SHIFT 0x4
+#define DMCUB_MEM_CNTL__DMCUB_MEM_WRITE_QOS_MASK 0x0000000FL
+#define DMCUB_MEM_CNTL__DMCUB_MEM_READ_QOS_MASK 0x000000F0L
+//DMCUB_INBOX0_BASE_ADDRESS
+#define DMCUB_INBOX0_BASE_ADDRESS__DMCUB_INBOX0_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_INBOX0_BASE_ADDRESS__DMCUB_INBOX0_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//DMCUB_INBOX0_SIZE
+#define DMCUB_INBOX0_SIZE__DMCUB_INBOX0_SIZE__SHIFT 0x0
+#define DMCUB_INBOX0_SIZE__DMCUB_INBOX0_SIZE_MASK 0xFFFFFFFFL
+//DMCUB_INBOX0_WPTR
+#define DMCUB_INBOX0_WPTR__DMCUB_INBOX0_WPTR__SHIFT 0x0
+#define DMCUB_INBOX0_WPTR__DMCUB_INBOX0_WPTR_MASK 0xFFFFFFFFL
+//DMCUB_INBOX0_RPTR
+#define DMCUB_INBOX0_RPTR__DMCUB_INBOX0_RPTR__SHIFT 0x0
+#define DMCUB_INBOX0_RPTR__DMCUB_INBOX0_RPTR_MASK 0xFFFFFFFFL
+//DMCUB_INBOX1_BASE_ADDRESS
+#define DMCUB_INBOX1_BASE_ADDRESS__DMCUB_INBOX1_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_INBOX1_BASE_ADDRESS__DMCUB_INBOX1_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//DMCUB_INBOX1_SIZE
+#define DMCUB_INBOX1_SIZE__DMCUB_INBOX1_SIZE__SHIFT 0x0
+#define DMCUB_INBOX1_SIZE__DMCUB_INBOX1_SIZE_MASK 0xFFFFFFFFL
+//DMCUB_INBOX1_WPTR
+#define DMCUB_INBOX1_WPTR__DMCUB_INBOX1_WPTR__SHIFT 0x0
+#define DMCUB_INBOX1_WPTR__DMCUB_INBOX1_WPTR_MASK 0xFFFFFFFFL
+//DMCUB_INBOX1_RPTR
+#define DMCUB_INBOX1_RPTR__DMCUB_INBOX1_RPTR__SHIFT 0x0
+#define DMCUB_INBOX1_RPTR__DMCUB_INBOX1_RPTR_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX0_BASE_ADDRESS
+#define DMCUB_OUTBOX0_BASE_ADDRESS__DMCUB_OUTBOX0_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_OUTBOX0_BASE_ADDRESS__DMCUB_OUTBOX0_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX0_SIZE
+#define DMCUB_OUTBOX0_SIZE__DMCUB_OUTBOX0_SIZE__SHIFT 0x0
+#define DMCUB_OUTBOX0_SIZE__DMCUB_OUTBOX0_SIZE_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX0_WPTR
+#define DMCUB_OUTBOX0_WPTR__DMCUB_OUTBOX0_WPTR__SHIFT 0x0
+#define DMCUB_OUTBOX0_WPTR__DMCUB_OUTBOX0_WPTR_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX0_RPTR
+#define DMCUB_OUTBOX0_RPTR__DMCUB_OUTBOX0_RPTR__SHIFT 0x0
+#define DMCUB_OUTBOX0_RPTR__DMCUB_OUTBOX0_RPTR_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX1_BASE_ADDRESS
+#define DMCUB_OUTBOX1_BASE_ADDRESS__DMCUB_OUTBOX1_BASE_ADDRESS__SHIFT 0x0
+#define DMCUB_OUTBOX1_BASE_ADDRESS__DMCUB_OUTBOX1_BASE_ADDRESS_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX1_SIZE
+#define DMCUB_OUTBOX1_SIZE__DMCUB_OUTBOX1_SIZE__SHIFT 0x0
+#define DMCUB_OUTBOX1_SIZE__DMCUB_OUTBOX1_SIZE_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX1_WPTR
+#define DMCUB_OUTBOX1_WPTR__DMCUB_OUTBOX1_WPTR__SHIFT 0x0
+#define DMCUB_OUTBOX1_WPTR__DMCUB_OUTBOX1_WPTR_MASK 0xFFFFFFFFL
+//DMCUB_OUTBOX1_RPTR
+#define DMCUB_OUTBOX1_RPTR__DMCUB_OUTBOX1_RPTR__SHIFT 0x0
+#define DMCUB_OUTBOX1_RPTR__DMCUB_OUTBOX1_RPTR_MASK 0xFFFFFFFFL
+//DMCUB_TIMER_TRIGGER0
+#define DMCUB_TIMER_TRIGGER0__DMCUB_TIMER_TRIGGER0__SHIFT 0x0
+#define DMCUB_TIMER_TRIGGER0__DMCUB_TIMER_TRIGGER0_MASK 0xFFFFFFFFL
+//DMCUB_TIMER_TRIGGER1
+#define DMCUB_TIMER_TRIGGER1__DMCUB_TIMER_TRIGGER1__SHIFT 0x0
+#define DMCUB_TIMER_TRIGGER1__DMCUB_TIMER_TRIGGER1_MASK 0xFFFFFFFFL
+//DMCUB_TIMER_WINDOW
+#define DMCUB_TIMER_WINDOW__DMCUB_TIMER_WINDOW__SHIFT 0x0
+#define DMCUB_TIMER_WINDOW__DMCUB_TIMER_WINDOW_MASK 0x00000007L
+//DMCUB_SCRATCH0
+#define DMCUB_SCRATCH0__DMCUB_SCRATCH0__SHIFT 0x0
+#define DMCUB_SCRATCH0__DMCUB_SCRATCH0_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH1
+#define DMCUB_SCRATCH1__DMCUB_SCRATCH1__SHIFT 0x0
+#define DMCUB_SCRATCH1__DMCUB_SCRATCH1_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH2
+#define DMCUB_SCRATCH2__DMCUB_SCRATCH2__SHIFT 0x0
+#define DMCUB_SCRATCH2__DMCUB_SCRATCH2_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH3
+#define DMCUB_SCRATCH3__DMCUB_SCRATCH3__SHIFT 0x0
+#define DMCUB_SCRATCH3__DMCUB_SCRATCH3_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH4
+#define DMCUB_SCRATCH4__DMCUB_SCRATCH4__SHIFT 0x0
+#define DMCUB_SCRATCH4__DMCUB_SCRATCH4_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH5
+#define DMCUB_SCRATCH5__DMCUB_SCRATCH5__SHIFT 0x0
+#define DMCUB_SCRATCH5__DMCUB_SCRATCH5_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH6
+#define DMCUB_SCRATCH6__DMCUB_SCRATCH6__SHIFT 0x0
+#define DMCUB_SCRATCH6__DMCUB_SCRATCH6_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH7
+#define DMCUB_SCRATCH7__DMCUB_SCRATCH7__SHIFT 0x0
+#define DMCUB_SCRATCH7__DMCUB_SCRATCH7_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH8
+#define DMCUB_SCRATCH8__DMCUB_SCRATCH8__SHIFT 0x0
+#define DMCUB_SCRATCH8__DMCUB_SCRATCH8_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH9
+#define DMCUB_SCRATCH9__DMCUB_SCRATCH9__SHIFT 0x0
+#define DMCUB_SCRATCH9__DMCUB_SCRATCH9_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH10
+#define DMCUB_SCRATCH10__DMCUB_SCRATCH10__SHIFT 0x0
+#define DMCUB_SCRATCH10__DMCUB_SCRATCH10_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH11
+#define DMCUB_SCRATCH11__DMCUB_SCRATCH11__SHIFT 0x0
+#define DMCUB_SCRATCH11__DMCUB_SCRATCH11_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH12
+#define DMCUB_SCRATCH12__DMCUB_SCRATCH12__SHIFT 0x0
+#define DMCUB_SCRATCH12__DMCUB_SCRATCH12_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH13
+#define DMCUB_SCRATCH13__DMCUB_SCRATCH13__SHIFT 0x0
+#define DMCUB_SCRATCH13__DMCUB_SCRATCH13_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH14
+#define DMCUB_SCRATCH14__DMCUB_SCRATCH14__SHIFT 0x0
+#define DMCUB_SCRATCH14__DMCUB_SCRATCH14_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH15
+#define DMCUB_SCRATCH15__DMCUB_SCRATCH15__SHIFT 0x0
+#define DMCUB_SCRATCH15__DMCUB_SCRATCH15_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH16
+#define DMCUB_SCRATCH16__DMCUB_SCRATCH16__SHIFT 0x0
+#define DMCUB_SCRATCH16__DMCUB_SCRATCH16_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH17
+#define DMCUB_SCRATCH17__DMCUB_SCRATCH17__SHIFT 0x0
+#define DMCUB_SCRATCH17__DMCUB_SCRATCH17_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH18
+#define DMCUB_SCRATCH18__DMCUB_SCRATCH18__SHIFT 0x0
+#define DMCUB_SCRATCH18__DMCUB_SCRATCH18_MASK 0xFFFFFFFFL
+//DMCUB_CNTL
+#define DMCUB_CNTL__DMCUB_LS_WAKE_DELAY__SHIFT 0x0
+#define DMCUB_CNTL__DMCUB_DMCUBCLK_R_GATE_DIS__SHIFT 0x8
+#define DMCUB_CNTL__DMCUB_ENABLE__SHIFT 0x10
+#define DMCUB_CNTL__DMCUB_MEM_LIGHT_SLEEP_DISABLE__SHIFT 0x12
+#define DMCUB_CNTL__DMCUB_TRACEPORT_EN__SHIFT 0x13
+#define DMCUB_CNTL__DMCUB_PWAIT_MODE_STATUS__SHIFT 0x14
+#define DMCUB_CNTL__DMCUB_LS_WAKE_DELAY_MASK 0x000000FFL
+#define DMCUB_CNTL__DMCUB_DMCUBCLK_R_GATE_DIS_MASK 0x00000100L
+#define DMCUB_CNTL__DMCUB_ENABLE_MASK 0x00010000L
+#define DMCUB_CNTL__DMCUB_MEM_LIGHT_SLEEP_DISABLE_MASK 0x00040000L
+#define DMCUB_CNTL__DMCUB_TRACEPORT_EN_MASK 0x00080000L
+#define DMCUB_CNTL__DMCUB_PWAIT_MODE_STATUS_MASK 0x00100000L
+//DMCUB_GPINT_DATAIN0
+#define DMCUB_GPINT_DATAIN0__DMCUB_GPINT_DATAIN0__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN0__DMCUB_GPINT_DATAIN0_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAIN1
+#define DMCUB_GPINT_DATAIN1__DMCUB_GPINT_DATAIN1__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN1__DMCUB_GPINT_DATAIN1_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAOUT
+#define DMCUB_GPINT_DATAOUT__DMCUB_GPINT_DATAOUT__SHIFT 0x0
+#define DMCUB_GPINT_DATAOUT__DMCUB_GPINT_DATAOUT_MASK 0xFFFFFFFFL
+//DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR
+#define DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR__DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR__SHIFT 0x0
+#define DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR__DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR_MASK 0xFFFFFFFFL
+//DMCUB_LS_WAKE_INT_ENABLE
+#define DMCUB_LS_WAKE_INT_ENABLE__DMCUB_LS_WAKE_INT_ENABLE__SHIFT 0x0
+#define DMCUB_LS_WAKE_INT_ENABLE__DMCUB_LS_WAKE_INT_ENABLE_MASK 0xFFFFFFFFL
+//DMCUB_MEM_PWR_CNTL
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_FORCE__SHIFT 0x1
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_DIS__SHIFT 0x3
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_STATE__SHIFT 0x4
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_FORCE_MASK 0x00000006L
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_DIS_MASK 0x00000008L
+#define DMCUB_MEM_PWR_CNTL__DMCUB_MEM_PWR_STATE_MASK 0x00000030L
+//DMCUB_TIMER_CURRENT
+#define DMCUB_TIMER_CURRENT__DMCUB_TIMER_CURRENT__SHIFT 0x0
+#define DMCUB_TIMER_CURRENT__DMCUB_TIMER_CURRENT_MASK 0xFFFFFFFFL
+//DMCUB_DBG_BUS_SELECT
+//DMCUB_PROC_ID
+#define DMCUB_PROC_ID__DMCUB_PROC_ID__SHIFT 0x0
+#define DMCUB_PROC_ID__DMCUB_PROC_ID_MASK 0x0000FFFFL
+//DMCUB_CNTL2
+#define DMCUB_CNTL2__DMCUB_SOFT_RESET__SHIFT 0x0
+#define DMCUB_CNTL2__DMCUB_SOFT_RESET_MASK 0x00000001L
+//DMCUB_GPINT_DATAIN2
+#define DMCUB_GPINT_DATAIN2__DMCUB_GPINT_DATAIN2__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN2__DMCUB_GPINT_DATAIN2_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAIN3
+#define DMCUB_GPINT_DATAIN3__DMCUB_GPINT_DATAIN3__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN3__DMCUB_GPINT_DATAIN3_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAIN4
+#define DMCUB_GPINT_DATAIN4__DMCUB_GPINT_DATAIN4__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN4__DMCUB_GPINT_DATAIN4_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAIN5
+#define DMCUB_GPINT_DATAIN5__DMCUB_GPINT_DATAIN5__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN5__DMCUB_GPINT_DATAIN5_MASK 0xFFFFFFFFL
+//DMCUB_GPINT_DATAIN6
+#define DMCUB_GPINT_DATAIN6__DMCUB_GPINT_DATAIN6__SHIFT 0x0
+#define DMCUB_GPINT_DATAIN6__DMCUB_GPINT_DATAIN6_MASK 0xFFFFFFFFL
+//DMCUB_REGION3_TMR_AXI_SPACE
+#define DMCUB_REGION3_TMR_AXI_SPACE__DMCUB_REGION3_TMR_AXI_SPACE__SHIFT 0x0
+#define DMCUB_REGION3_TMR_AXI_SPACE__DMCUB_REGION3_TMR_AXI_SPACE_MASK 0x07L
+//DMCUB_SCRATCH19
+#define DMCUB_SCRATCH19__DMCUB_SCRATCH19__SHIFT 0x0
+#define DMCUB_SCRATCH19__DMCUB_SCRATCH19_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH20
+#define DMCUB_SCRATCH20__DMCUB_SCRATCH20__SHIFT 0x0
+#define DMCUB_SCRATCH20__DMCUB_SCRATCH20_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH21
+#define DMCUB_SCRATCH21__DMCUB_SCRATCH21__SHIFT 0x0
+#define DMCUB_SCRATCH21__DMCUB_SCRATCH21_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH22
+#define DMCUB_SCRATCH22__DMCUB_SCRATCH22__SHIFT 0x0
+#define DMCUB_SCRATCH22__DMCUB_SCRATCH22_MASK 0xFFFFFFFFL
+//DMCUB_SCRATCH23
+#define DMCUB_SCRATCH23__DMCUB_SCRATCH23__SHIFT 0x0
+#define DMCUB_SCRATCH23__DMCUB_SCRATCH23_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_wb0_dispdec_dwb_top_dispdec
+//DWB_ENABLE_CLK_CTRL
+#define DWB_ENABLE_CLK_CTRL__DWB_ENABLE__SHIFT 0x0
+#define DWB_ENABLE_CLK_CTRL__DISPCLK_R_DWB_GATE_DIS__SHIFT 0x4
+#define DWB_ENABLE_CLK_CTRL__DISPCLK_G_DWB_GATE_DIS__SHIFT 0x8
+#define DWB_ENABLE_CLK_CTRL__DWB_TEST_CLK_SEL__SHIFT 0xc
+#define DWB_ENABLE_CLK_CTRL__DWB_FGCG_REP_DIS__SHIFT 0x18
+#define DWB_ENABLE_CLK_CTRL__DWB_ENABLE_MASK 0x00000001L
+#define DWB_ENABLE_CLK_CTRL__DISPCLK_R_DWB_GATE_DIS_MASK 0x00000010L
+#define DWB_ENABLE_CLK_CTRL__DISPCLK_G_DWB_GATE_DIS_MASK 0x00000100L
+#define DWB_ENABLE_CLK_CTRL__DWB_TEST_CLK_SEL_MASK 0x00003000L
+#define DWB_ENABLE_CLK_CTRL__DWB_FGCG_REP_DIS_MASK 0x01000000L
+//DWB_MEM_PWR_CTRL
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_FORCE__SHIFT 0x8
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_DIS__SHIFT 0xa
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_STATE__SHIFT 0xc
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_FORCE__SHIFT 0x10
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_DIS__SHIFT 0x12
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_STATE__SHIFT 0x14
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_FORCE_MASK 0x00000300L
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_DIS_MASK 0x00000400L
+#define DWB_MEM_PWR_CTRL__DWB_OUT_FIFO_MEM_PWR_STATE_MASK 0x00003000L
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_DIS_MASK 0x00040000L
+#define DWB_MEM_PWR_CTRL__DWB_OGAM_LUT_MEM_PWR_STATE_MASK 0x00300000L
+//FC_MODE_CTRL
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_EN__SHIFT 0x0
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_RATE__SHIFT 0x4
+#define FC_MODE_CTRL__FC_WINDOW_CROP_EN__SHIFT 0x8
+#define FC_MODE_CTRL__FC_EYE_SELECTION__SHIFT 0xc
+#define FC_MODE_CTRL__FC_STEREO_EYE_POLARITY__SHIFT 0x10
+#define FC_MODE_CTRL__FC_NEW_CONTENT__SHIFT 0x14
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_EN_CURRENT__SHIFT 0x1f
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_EN_MASK 0x00000001L
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_RATE_MASK 0x00000030L
+#define FC_MODE_CTRL__FC_WINDOW_CROP_EN_MASK 0x00000100L
+#define FC_MODE_CTRL__FC_EYE_SELECTION_MASK 0x00003000L
+#define FC_MODE_CTRL__FC_STEREO_EYE_POLARITY_MASK 0x00010000L
+#define FC_MODE_CTRL__FC_NEW_CONTENT_MASK 0x00100000L
+#define FC_MODE_CTRL__FC_FRAME_CAPTURE_EN_CURRENT_MASK 0x80000000L
+//FC_FLOW_CTRL
+#define FC_FLOW_CTRL__FC_FIRST_PIXEL_DELAY_COUNT__SHIFT 0x0
+#define FC_FLOW_CTRL__FC_FIRST_PIXEL_DELAY_COUNT_MASK 0x00000FFFL
+//FC_WINDOW_START
+#define FC_WINDOW_START__FC_WINDOW_START_X__SHIFT 0x0
+#define FC_WINDOW_START__FC_WINDOW_START_Y__SHIFT 0x10
+#define FC_WINDOW_START__FC_WINDOW_START_X_MASK 0x00001FFFL
+#define FC_WINDOW_START__FC_WINDOW_START_Y_MASK 0x1FFF0000L
+//FC_WINDOW_SIZE
+#define FC_WINDOW_SIZE__FC_WINDOW_WIDTH__SHIFT 0x0
+#define FC_WINDOW_SIZE__FC_WINDOW_HEIGHT__SHIFT 0x10
+#define FC_WINDOW_SIZE__FC_WINDOW_WIDTH_MASK 0x00000FFFL
+#define FC_WINDOW_SIZE__FC_WINDOW_HEIGHT_MASK 0x0FFF0000L
+//FC_SOURCE_SIZE
+#define FC_SOURCE_SIZE__FC_SOURCE_WIDTH__SHIFT 0x0
+#define FC_SOURCE_SIZE__FC_SOURCE_HEIGHT__SHIFT 0x10
+#define FC_SOURCE_SIZE__FC_SOURCE_WIDTH_MASK 0x00007FFFL
+#define FC_SOURCE_SIZE__FC_SOURCE_HEIGHT_MASK 0x7FFF0000L
+//DWB_UPDATE_CTRL
+#define DWB_UPDATE_CTRL__DWB_UPDATE_LOCK__SHIFT 0x0
+#define DWB_UPDATE_CTRL__DWB_UPDATE_PENDING__SHIFT 0x4
+#define DWB_UPDATE_CTRL__DWB_UPDATE_LOCK_MASK 0x00000001L
+#define DWB_UPDATE_CTRL__DWB_UPDATE_PENDING_MASK 0x00000010L
+//DWB_CRC_CTRL
+#define DWB_CRC_CTRL__DWB_CRC_EN__SHIFT 0x0
+#define DWB_CRC_CTRL__DWB_CRC_CONT_EN__SHIFT 0x4
+#define DWB_CRC_CTRL__DWB_CRC_SRC_SEL__SHIFT 0x8
+#define DWB_CRC_CTRL__DWB_CRC_EN_MASK 0x00000001L
+#define DWB_CRC_CTRL__DWB_CRC_CONT_EN_MASK 0x00000010L
+#define DWB_CRC_CTRL__DWB_CRC_SRC_SEL_MASK 0x00000300L
+//DWB_CRC_MASK_R_G
+#define DWB_CRC_MASK_R_G__DWB_CRC_RED_MASK__SHIFT 0x0
+#define DWB_CRC_MASK_R_G__DWB_CRC_GREEN_MASK__SHIFT 0x10
+#define DWB_CRC_MASK_R_G__DWB_CRC_RED_MASK_MASK 0x0000FFFFL
+#define DWB_CRC_MASK_R_G__DWB_CRC_GREEN_MASK_MASK 0xFFFF0000L
+//DWB_CRC_MASK_B_A
+#define DWB_CRC_MASK_B_A__DWB_CRC_BLUE_MASK__SHIFT 0x0
+#define DWB_CRC_MASK_B_A__DWB_CRC_A_MASK__SHIFT 0x10
+#define DWB_CRC_MASK_B_A__DWB_CRC_BLUE_MASK_MASK 0x0000FFFFL
+#define DWB_CRC_MASK_B_A__DWB_CRC_A_MASK_MASK 0xFFFF0000L
+//DWB_CRC_VAL_R_G
+#define DWB_CRC_VAL_R_G__DWB_CRC_SIG_RED__SHIFT 0x0
+#define DWB_CRC_VAL_R_G__DWB_CRC_SIG_GREEN__SHIFT 0x10
+#define DWB_CRC_VAL_R_G__DWB_CRC_SIG_RED_MASK 0x0000FFFFL
+#define DWB_CRC_VAL_R_G__DWB_CRC_SIG_GREEN_MASK 0xFFFF0000L
+//DWB_CRC_VAL_B_A
+#define DWB_CRC_VAL_B_A__DWB_CRC_SIG_BLUE__SHIFT 0x0
+#define DWB_CRC_VAL_B_A__DWB_CRC_SIG_A__SHIFT 0x10
+#define DWB_CRC_VAL_B_A__DWB_CRC_SIG_BLUE_MASK 0x0000FFFFL
+#define DWB_CRC_VAL_B_A__DWB_CRC_SIG_A_MASK 0xFFFF0000L
+//DWB_OUT_CTRL
+#define DWB_OUT_CTRL__OUT_FORMAT__SHIFT 0x0
+#define DWB_OUT_CTRL__OUT_DENORM__SHIFT 0x4
+#define DWB_OUT_CTRL__OUT_MAX__SHIFT 0x8
+#define DWB_OUT_CTRL__OUT_MIN__SHIFT 0x14
+#define DWB_OUT_CTRL__OUT_FORMAT_MASK 0x00000003L
+#define DWB_OUT_CTRL__OUT_DENORM_MASK 0x00000030L
+#define DWB_OUT_CTRL__OUT_MAX_MASK 0x0003FF00L
+#define DWB_OUT_CTRL__OUT_MIN_MASK 0x3FF00000L
+//DWB_MMHUBBUB_BACKPRESSURE_CNT_EN
+#define DWB_MMHUBBUB_BACKPRESSURE_CNT_EN__DWB_MMHUBBUB_BACKPRESSURE_CNT_EN__SHIFT 0x0
+#define DWB_MMHUBBUB_BACKPRESSURE_CNT_EN__DWB_MMHUBBUB_BACKPRESSURE_CNT_EN_MASK 0x00000001L
+//DWB_MMHUBBUB_BACKPRESSURE_CNT
+#define DWB_MMHUBBUB_BACKPRESSURE_CNT__DWB_MMHUBBUB_MAX_BACKPRESSURE__SHIFT 0x0
+#define DWB_MMHUBBUB_BACKPRESSURE_CNT__DWB_MMHUBBUB_MAX_BACKPRESSURE_MASK 0x0000FFFFL
+//DWB_HOST_READ_CONTROL
+#define DWB_HOST_READ_CONTROL__DWB_HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DWB_HOST_READ_CONTROL__DWB_HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+//DWB_OVERFLOW_STATUS
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_FLAG__SHIFT 0x0
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_ACK__SHIFT 0x8
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_MASK__SHIFT 0xc
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_INT_STATUS__SHIFT 0x10
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_INT_TYPE__SHIFT 0x14
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_FLAG_MASK 0x00000001L
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_ACK_MASK 0x00000100L
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_MASK_MASK 0x00001000L
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_INT_STATUS_MASK 0x00010000L
+#define DWB_OVERFLOW_STATUS__DWB_DATA_OVERFLOW_INT_TYPE_MASK 0x00100000L
+//DWB_OVERFLOW_COUNTER
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_TYPE__SHIFT 0x0
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_OUT_X_CNT__SHIFT 0x4
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_OUT_Y_CNT__SHIFT 0x10
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_TYPE_MASK 0x00000003L
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_OUT_X_CNT_MASK 0x0000FFF0L
+#define DWB_OVERFLOW_COUNTER__DWB_DATA_OVERFLOW_OUT_Y_CNT_MASK 0x0FFF0000L
+//DWB_SOFT_RESET
+#define DWB_SOFT_RESET__DWB_SOFT_RESET__SHIFT 0x0
+#define DWB_SOFT_RESET__DWB_SOFT_RESET_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_wb0_dispdec_dwbcp_dispdec
+//DWB_HDR_MULT_COEF
+#define DWB_HDR_MULT_COEF__DWB_HDR_MULT_COEF__SHIFT 0x0
+#define DWB_HDR_MULT_COEF__DWB_HDR_MULT_COEF_MASK 0x0007FFFFL
+//DWB_GAMUT_REMAP_MODE
+#define DWB_GAMUT_REMAP_MODE__DWB_GAMUT_REMAP_MODE__SHIFT 0x0
+#define DWB_GAMUT_REMAP_MODE__DWB_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x18
+#define DWB_GAMUT_REMAP_MODE__DWB_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define DWB_GAMUT_REMAP_MODE__DWB_GAMUT_REMAP_MODE_CURRENT_MASK 0x03000000L
+//DWB_GAMUT_REMAP_COEF_FORMAT
+#define DWB_GAMUT_REMAP_COEF_FORMAT__DWB_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define DWB_GAMUT_REMAP_COEF_FORMAT__DWB_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+//DWB_GAMUT_REMAPA_C11_C12
+#define DWB_GAMUT_REMAPA_C11_C12__DWB_GAMUT_REMAPA_C11__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C11_C12__DWB_GAMUT_REMAPA_C12__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C11_C12__DWB_GAMUT_REMAPA_C11_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C11_C12__DWB_GAMUT_REMAPA_C12_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPA_C13_C14
+#define DWB_GAMUT_REMAPA_C13_C14__DWB_GAMUT_REMAPA_C13__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C13_C14__DWB_GAMUT_REMAPA_C14__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C13_C14__DWB_GAMUT_REMAPA_C13_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C13_C14__DWB_GAMUT_REMAPA_C14_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPA_C21_C22
+#define DWB_GAMUT_REMAPA_C21_C22__DWB_GAMUT_REMAPA_C21__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C21_C22__DWB_GAMUT_REMAPA_C22__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C21_C22__DWB_GAMUT_REMAPA_C21_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C21_C22__DWB_GAMUT_REMAPA_C22_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPA_C23_C24
+#define DWB_GAMUT_REMAPA_C23_C24__DWB_GAMUT_REMAPA_C23__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C23_C24__DWB_GAMUT_REMAPA_C24__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C23_C24__DWB_GAMUT_REMAPA_C23_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C23_C24__DWB_GAMUT_REMAPA_C24_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPA_C31_C32
+#define DWB_GAMUT_REMAPA_C31_C32__DWB_GAMUT_REMAPA_C31__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C31_C32__DWB_GAMUT_REMAPA_C32__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C31_C32__DWB_GAMUT_REMAPA_C31_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C31_C32__DWB_GAMUT_REMAPA_C32_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPA_C33_C34
+#define DWB_GAMUT_REMAPA_C33_C34__DWB_GAMUT_REMAPA_C33__SHIFT 0x0
+#define DWB_GAMUT_REMAPA_C33_C34__DWB_GAMUT_REMAPA_C34__SHIFT 0x10
+#define DWB_GAMUT_REMAPA_C33_C34__DWB_GAMUT_REMAPA_C33_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPA_C33_C34__DWB_GAMUT_REMAPA_C34_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPB_C11_C12
+#define DWB_GAMUT_REMAPB_C11_C12__DWB_GAMUT_REMAPB_C11__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C11_C12__DWB_GAMUT_REMAPB_C12__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C11_C12__DWB_GAMUT_REMAPB_C11_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C11_C12__DWB_GAMUT_REMAPB_C12_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPB_C13_C14
+#define DWB_GAMUT_REMAPB_C13_C14__DWB_GAMUT_REMAPB_C13__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C13_C14__DWB_GAMUT_REMAPB_C14__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C13_C14__DWB_GAMUT_REMAPB_C13_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C13_C14__DWB_GAMUT_REMAPB_C14_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPB_C21_C22
+#define DWB_GAMUT_REMAPB_C21_C22__DWB_GAMUT_REMAPB_C21__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C21_C22__DWB_GAMUT_REMAPB_C22__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C21_C22__DWB_GAMUT_REMAPB_C21_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C21_C22__DWB_GAMUT_REMAPB_C22_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPB_C23_C24
+#define DWB_GAMUT_REMAPB_C23_C24__DWB_GAMUT_REMAPB_C23__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C23_C24__DWB_GAMUT_REMAPB_C24__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C23_C24__DWB_GAMUT_REMAPB_C23_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C23_C24__DWB_GAMUT_REMAPB_C24_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPB_C31_C32
+#define DWB_GAMUT_REMAPB_C31_C32__DWB_GAMUT_REMAPB_C31__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C31_C32__DWB_GAMUT_REMAPB_C32__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C31_C32__DWB_GAMUT_REMAPB_C31_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C31_C32__DWB_GAMUT_REMAPB_C32_MASK 0xFFFF0000L
+//DWB_GAMUT_REMAPB_C33_C34
+#define DWB_GAMUT_REMAPB_C33_C34__DWB_GAMUT_REMAPB_C33__SHIFT 0x0
+#define DWB_GAMUT_REMAPB_C33_C34__DWB_GAMUT_REMAPB_C34__SHIFT 0x10
+#define DWB_GAMUT_REMAPB_C33_C34__DWB_GAMUT_REMAPB_C33_MASK 0x0000FFFFL
+#define DWB_GAMUT_REMAPB_C33_C34__DWB_GAMUT_REMAPB_C34_MASK 0xFFFF0000L
+//DWB_OGAM_CONTROL
+#define DWB_OGAM_CONTROL__DWB_OGAM_MODE__SHIFT 0x0
+#define DWB_OGAM_CONTROL__DWB_OGAM_SELECT__SHIFT 0x4
+#define DWB_OGAM_CONTROL__DWB_OGAM_PWL_DISABLE__SHIFT 0x8
+#define DWB_OGAM_CONTROL__DWB_OGAM_MODE_CURRENT__SHIFT 0x18
+#define DWB_OGAM_CONTROL__DWB_OGAM_SELECT_CURRENT__SHIFT 0x1c
+#define DWB_OGAM_CONTROL__DWB_OGAM_MODE_MASK 0x00000003L
+#define DWB_OGAM_CONTROL__DWB_OGAM_SELECT_MASK 0x00000010L
+#define DWB_OGAM_CONTROL__DWB_OGAM_PWL_DISABLE_MASK 0x00000100L
+#define DWB_OGAM_CONTROL__DWB_OGAM_MODE_CURRENT_MASK 0x03000000L
+#define DWB_OGAM_CONTROL__DWB_OGAM_SELECT_CURRENT_MASK 0x10000000L
+//DWB_OGAM_LUT_INDEX
+#define DWB_OGAM_LUT_INDEX__DWB_OGAM_LUT_INDEX__SHIFT 0x0
+#define DWB_OGAM_LUT_INDEX__DWB_OGAM_LUT_INDEX_MASK 0x000001FFL
+//DWB_OGAM_LUT_DATA
+#define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA__SHIFT 0x0
+#define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA_MASK 0x0003FFFFL
+//DWB_OGAM_LUT_CONTROL
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x4
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG__SHIFT 0x8
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL__SHIFT 0xc
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE__SHIFT 0x10
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000030L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG_MASK 0x00000100L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL_MASK 0x00001000L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE_MASK 0x00010000L
+//DWB_OGAM_RAMA_START_CNTL_B
+#define DWB_OGAM_RAMA_START_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define DWB_OGAM_RAMA_START_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//DWB_OGAM_RAMA_START_CNTL_G
+#define DWB_OGAM_RAMA_START_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define DWB_OGAM_RAMA_START_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//DWB_OGAM_RAMA_START_CNTL_R
+#define DWB_OGAM_RAMA_START_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define DWB_OGAM_RAMA_START_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMA_START_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//DWB_OGAM_RAMA_START_BASE_CNTL_B
+#define DWB_OGAM_RAMA_START_BASE_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_BASE_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_START_SLOPE_CNTL_B
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_B__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_START_BASE_CNTL_G
+#define DWB_OGAM_RAMA_START_BASE_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_BASE_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_START_SLOPE_CNTL_G
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_G__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_START_BASE_CNTL_R
+#define DWB_OGAM_RAMA_START_BASE_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_BASE_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_START_SLOPE_CNTL_R
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_START_SLOPE_CNTL_R__DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_END_CNTL1_B
+#define DWB_OGAM_RAMA_END_CNTL1_B__DWB_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL1_B__DWB_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_END_CNTL2_B
+#define DWB_OGAM_RAMA_END_CNTL2_B__DWB_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL2_B__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define DWB_OGAM_RAMA_END_CNTL2_B__DWB_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMA_END_CNTL2_B__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//DWB_OGAM_RAMA_END_CNTL1_G
+#define DWB_OGAM_RAMA_END_CNTL1_G__DWB_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL1_G__DWB_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_END_CNTL2_G
+#define DWB_OGAM_RAMA_END_CNTL2_G__DWB_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL2_G__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define DWB_OGAM_RAMA_END_CNTL2_G__DWB_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMA_END_CNTL2_G__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//DWB_OGAM_RAMA_END_CNTL1_R
+#define DWB_OGAM_RAMA_END_CNTL1_R__DWB_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL1_R__DWB_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//DWB_OGAM_RAMA_END_CNTL2_R
+#define DWB_OGAM_RAMA_END_CNTL2_R__DWB_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_END_CNTL2_R__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define DWB_OGAM_RAMA_END_CNTL2_R__DWB_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMA_END_CNTL2_R__DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//DWB_OGAM_RAMA_OFFSET_B
+#define DWB_OGAM_RAMA_OFFSET_B__DWB_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define DWB_OGAM_RAMA_OFFSET_B__DWB_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//DWB_OGAM_RAMA_OFFSET_G
+#define DWB_OGAM_RAMA_OFFSET_G__DWB_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define DWB_OGAM_RAMA_OFFSET_G__DWB_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//DWB_OGAM_RAMA_OFFSET_R
+#define DWB_OGAM_RAMA_OFFSET_R__DWB_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define DWB_OGAM_RAMA_OFFSET_R__DWB_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//DWB_OGAM_RAMA_REGION_0_1
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_0_1__DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_2_3
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_2_3__DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_4_5
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_4_5__DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_6_7
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_6_7__DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_8_9
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_8_9__DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_10_11
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_10_11__DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_12_13
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_12_13__DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_14_15
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_14_15__DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_16_17
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_16_17__DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_18_19
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_18_19__DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_20_21
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_20_21__DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_22_23
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_22_23__DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_24_25
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_24_25__DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_26_27
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_26_27__DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_28_29
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_28_29__DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_30_31
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_30_31__DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMA_REGION_32_33
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMA_REGION_32_33__DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_START_CNTL_B
+#define DWB_OGAM_RAMB_START_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define DWB_OGAM_RAMB_START_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//DWB_OGAM_RAMB_START_CNTL_G
+#define DWB_OGAM_RAMB_START_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define DWB_OGAM_RAMB_START_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//DWB_OGAM_RAMB_START_CNTL_R
+#define DWB_OGAM_RAMB_START_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define DWB_OGAM_RAMB_START_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define DWB_OGAM_RAMB_START_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//DWB_OGAM_RAMB_START_BASE_CNTL_B
+#define DWB_OGAM_RAMB_START_BASE_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_BASE_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_START_SLOPE_CNTL_B
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_B__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_START_BASE_CNTL_G
+#define DWB_OGAM_RAMB_START_BASE_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_BASE_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_START_SLOPE_CNTL_G
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_G__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_START_BASE_CNTL_R
+#define DWB_OGAM_RAMB_START_BASE_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_BASE_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_START_SLOPE_CNTL_R
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_START_SLOPE_CNTL_R__DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_END_CNTL1_B
+#define DWB_OGAM_RAMB_END_CNTL1_B__DWB_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL1_B__DWB_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_END_CNTL2_B
+#define DWB_OGAM_RAMB_END_CNTL2_B__DWB_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL2_B__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define DWB_OGAM_RAMB_END_CNTL2_B__DWB_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMB_END_CNTL2_B__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//DWB_OGAM_RAMB_END_CNTL1_G
+#define DWB_OGAM_RAMB_END_CNTL1_G__DWB_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL1_G__DWB_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_END_CNTL2_G
+#define DWB_OGAM_RAMB_END_CNTL2_G__DWB_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL2_G__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define DWB_OGAM_RAMB_END_CNTL2_G__DWB_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMB_END_CNTL2_G__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//DWB_OGAM_RAMB_END_CNTL1_R
+#define DWB_OGAM_RAMB_END_CNTL1_R__DWB_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL1_R__DWB_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//DWB_OGAM_RAMB_END_CNTL2_R
+#define DWB_OGAM_RAMB_END_CNTL2_R__DWB_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_END_CNTL2_R__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define DWB_OGAM_RAMB_END_CNTL2_R__DWB_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define DWB_OGAM_RAMB_END_CNTL2_R__DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//DWB_OGAM_RAMB_OFFSET_B
+#define DWB_OGAM_RAMB_OFFSET_B__DWB_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define DWB_OGAM_RAMB_OFFSET_B__DWB_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//DWB_OGAM_RAMB_OFFSET_G
+#define DWB_OGAM_RAMB_OFFSET_G__DWB_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define DWB_OGAM_RAMB_OFFSET_G__DWB_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//DWB_OGAM_RAMB_OFFSET_R
+#define DWB_OGAM_RAMB_OFFSET_R__DWB_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define DWB_OGAM_RAMB_OFFSET_R__DWB_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//DWB_OGAM_RAMB_REGION_0_1
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_0_1__DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_2_3
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_2_3__DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_4_5
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_4_5__DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_6_7
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_6_7__DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_8_9
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_8_9__DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_10_11
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_10_11__DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_12_13
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_12_13__DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_14_15
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_14_15__DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_16_17
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_16_17__DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_18_19
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_18_19__DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_20_21
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_20_21__DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_22_23
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_22_23__DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_24_25
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_24_25__DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_26_27
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_26_27__DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_28_29
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_28_29__DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_30_31
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_30_31__DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//DWB_OGAM_RAMB_REGION_32_33
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define DWB_OGAM_RAMB_REGION_32_33__DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_wb0_dispdec_wb_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON3_PERFCOUNTER_CNTL
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON3_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON3_PERFCOUNTER_CNTL2
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON3_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON3_PERFCOUNTER_STATE
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON3_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON3_PERFMON_CNTL
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON3_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON3_PERFMON_CNTL2
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON3_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON3_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON3_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON3_PERFMON_CVALUE_LOW
+#define DC_PERFMON3_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON3_PERFMON_HI
+#define DC_PERFMON3_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON3_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON3_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON3_PERFMON_LOW
+#define DC_PERFMON3_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON3_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_mmhubbub_mcif_wb0_dispdec
+//MCIF_WB_BUFMGR_SW_CONTROL
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_ENABLE__SHIFT 0x0
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_EN__SHIFT 0x4
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_ACK__SHIFT 0x5
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_SLICE_INT_EN__SHIFT 0x6
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN__SHIFT 0x7
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_LOCK__SHIFT 0x8
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_ADDR_FENCE_EN__SHIFT 0x18
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_ENABLE_MASK 0x00000001L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_EN_MASK 0x00000010L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_INT_ACK_MASK 0x00000020L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_SLICE_INT_EN_MASK 0x00000040L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN_MASK 0x00000080L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUFMGR_SW_LOCK_MASK 0x00000F00L
+#define MCIF_WB_BUFMGR_SW_CONTROL__MCIF_WB_BUF_ADDR_FENCE_EN_MASK 0x01000000L
+//MCIF_WB_BUFMGR_STATUS
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_INT_STATUS__SHIFT 0x0
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS__SHIFT 0x1
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_BUF__SHIFT 0x4
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUF_DUALSIZE_STATUS__SHIFT 0x7
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_LINE_L__SHIFT 0xc
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_NEXT_BUF__SHIFT 0x1c
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_INT_STATUS_MASK 0x00000001L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS_MASK 0x00000002L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_BUF_MASK 0x00000070L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUF_DUALSIZE_STATUS_MASK 0x00000080L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_CUR_LINE_L_MASK 0x01FFF000L
+#define MCIF_WB_BUFMGR_STATUS__MCIF_WB_BUFMGR_NEXT_BUF_MASK 0x70000000L
+//MCIF_WB_BUF_PITCH
+#define MCIF_WB_BUF_PITCH__MCIF_WB_BUF_LUMA_PITCH__SHIFT 0x8
+#define MCIF_WB_BUF_PITCH__MCIF_WB_BUF_CHROMA_PITCH__SHIFT 0x18
+#define MCIF_WB_BUF_PITCH__MCIF_WB_BUF_LUMA_PITCH_MASK 0x0000FF00L
+#define MCIF_WB_BUF_PITCH__MCIF_WB_BUF_CHROMA_PITCH_MASK 0xFF000000L
+//MCIF_WB_BUF_1_STATUS
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_ACTIVE__SHIFT 0x0
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_OVERFLOW__SHIFT 0x3
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_DISABLE__SHIFT 0x4
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_MODE__SHIFT 0x5
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_NXT_BUF__SHIFT 0xc
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_ACTIVE_MASK 0x00000001L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_DISABLE_MASK 0x00000010L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_MODE_MASK 0x000000E0L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB_BUF_1_STATUS__MCIF_WB_BUF_1_CUR_LINE_L_MASK 0x1FFF0000L
+//MCIF_WB_BUF_1_STATUS2
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ__SHIFT 0x10
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_TMZ_MASK 0x00010000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB_BUF_1_STATUS2__MCIF_WB_BUF_1_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB_BUF_2_STATUS
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_ACTIVE__SHIFT 0x0
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_OVERFLOW__SHIFT 0x3
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_DISABLE__SHIFT 0x4
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_MODE__SHIFT 0x5
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_NXT_BUF__SHIFT 0xc
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_ACTIVE_MASK 0x00000001L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_DISABLE_MASK 0x00000010L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_MODE_MASK 0x000000E0L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB_BUF_2_STATUS__MCIF_WB_BUF_2_CUR_LINE_L_MASK 0x1FFF0000L
+//MCIF_WB_BUF_2_STATUS2
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ__SHIFT 0x10
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_TMZ_MASK 0x00010000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB_BUF_2_STATUS2__MCIF_WB_BUF_2_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB_BUF_3_STATUS
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_ACTIVE__SHIFT 0x0
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_OVERFLOW__SHIFT 0x3
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_DISABLE__SHIFT 0x4
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_MODE__SHIFT 0x5
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_NXT_BUF__SHIFT 0xc
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_ACTIVE_MASK 0x00000001L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_DISABLE_MASK 0x00000010L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_MODE_MASK 0x000000E0L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB_BUF_3_STATUS__MCIF_WB_BUF_3_CUR_LINE_L_MASK 0x1FFF0000L
+//MCIF_WB_BUF_3_STATUS2
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ__SHIFT 0x10
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_TMZ_MASK 0x00010000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB_BUF_3_STATUS2__MCIF_WB_BUF_3_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB_BUF_4_STATUS
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_ACTIVE__SHIFT 0x0
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SW_LOCKED__SHIFT 0x1
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_VCE_LOCKED__SHIFT 0x2
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_OVERFLOW__SHIFT 0x3
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_DISABLE__SHIFT 0x4
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_MODE__SHIFT 0x5
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_BUFTAG__SHIFT 0x8
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_NXT_BUF__SHIFT 0xc
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_CUR_LINE_L__SHIFT 0x10
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_ACTIVE_MASK 0x00000001L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_SW_LOCKED_MASK 0x00000002L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_VCE_LOCKED_MASK 0x00000004L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_OVERFLOW_MASK 0x00000008L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_DISABLE_MASK 0x00000010L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_MODE_MASK 0x000000E0L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_BUFTAG_MASK 0x00000F00L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_NXT_BUF_MASK 0x00007000L
+#define MCIF_WB_BUF_4_STATUS__MCIF_WB_BUF_4_CUR_LINE_L_MASK 0x1FFF0000L
+//MCIF_WB_BUF_4_STATUS2
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_NEW_CONTENT__SHIFT 0xd
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_COLOR_DEPTH__SHIFT 0xe
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_BLACK_PIXEL__SHIFT 0xf
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ__SHIFT 0x10
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_Y_OVERRUN__SHIFT 0x11
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_C_OVERRUN__SHIFT 0x12
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_EYE_FLAG__SHIFT 0x13
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_NEW_CONTENT_MASK 0x00002000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_COLOR_DEPTH_MASK 0x00004000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_BLACK_PIXEL_MASK 0x00008000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_TMZ_MASK 0x00010000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_Y_OVERRUN_MASK 0x00020000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_C_OVERRUN_MASK 0x00040000L
+#define MCIF_WB_BUF_4_STATUS2__MCIF_WB_BUF_4_EYE_FLAG_MASK 0x00080000L
+//MCIF_WB_ARBITRATION_CONTROL
+#define MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_CLIENT_ARBITRATION_SLICE__SHIFT 0x0
+#define MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_TIME_PER_PIXEL__SHIFT 0x14
+#define MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_CLIENT_ARBITRATION_SLICE_MASK 0x00000003L
+#define MCIF_WB_ARBITRATION_CONTROL__MCIF_WB_TIME_PER_PIXEL_MASK 0xFFF00000L
+//MCIF_WB_SCLK_CHANGE
+#define MCIF_WB_SCLK_CHANGE__WM_CHANGE_ACK_FORCE_ON__SHIFT 0x0
+#define MCIF_WB_SCLK_CHANGE__WM_CHANGE_ACK_FORCE_ON_MASK 0x00000001L
+//MCIF_WB_BUF_1_ADDR_Y
+#define MCIF_WB_BUF_1_ADDR_Y__MCIF_WB_BUF_1_ADDR_Y__SHIFT 0x0
+#define MCIF_WB_BUF_1_ADDR_Y__MCIF_WB_BUF_1_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_1_ADDR_C
+#define MCIF_WB_BUF_1_ADDR_C__MCIF_WB_BUF_1_ADDR_C__SHIFT 0x0
+#define MCIF_WB_BUF_1_ADDR_C__MCIF_WB_BUF_1_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_2_ADDR_Y
+#define MCIF_WB_BUF_2_ADDR_Y__MCIF_WB_BUF_2_ADDR_Y__SHIFT 0x0
+#define MCIF_WB_BUF_2_ADDR_Y__MCIF_WB_BUF_2_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_2_ADDR_C
+#define MCIF_WB_BUF_2_ADDR_C__MCIF_WB_BUF_2_ADDR_C__SHIFT 0x0
+#define MCIF_WB_BUF_2_ADDR_C__MCIF_WB_BUF_2_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_3_ADDR_Y
+#define MCIF_WB_BUF_3_ADDR_Y__MCIF_WB_BUF_3_ADDR_Y__SHIFT 0x0
+#define MCIF_WB_BUF_3_ADDR_Y__MCIF_WB_BUF_3_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_3_ADDR_C
+#define MCIF_WB_BUF_3_ADDR_C__MCIF_WB_BUF_3_ADDR_C__SHIFT 0x0
+#define MCIF_WB_BUF_3_ADDR_C__MCIF_WB_BUF_3_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_4_ADDR_Y
+#define MCIF_WB_BUF_4_ADDR_Y__MCIF_WB_BUF_4_ADDR_Y__SHIFT 0x0
+#define MCIF_WB_BUF_4_ADDR_Y__MCIF_WB_BUF_4_ADDR_Y_MASK 0xFFFFFFFFL
+//MCIF_WB_BUF_4_ADDR_C
+#define MCIF_WB_BUF_4_ADDR_C__MCIF_WB_BUF_4_ADDR_C__SHIFT 0x0
+#define MCIF_WB_BUF_4_ADDR_C__MCIF_WB_BUF_4_ADDR_C_MASK 0xFFFFFFFFL
+//MCIF_WB_BUFMGR_VCE_CONTROL
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_IGNORE__SHIFT 0x0
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK__SHIFT 0x8
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_SLICE_SIZE__SHIFT 0x10
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_IGNORE_MASK 0x00000001L
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_VCE_LOCK_MASK 0x00000F00L
+#define MCIF_WB_BUFMGR_VCE_CONTROL__MCIF_WB_BUFMGR_SLICE_SIZE_MASK 0x1FFF0000L
+//MCIF_WB_NB_PSTATE_CONTROL
+#define MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x1
+#define MCIF_WB_NB_PSTATE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x00000002L
+//MCIF_WB_CLOCK_GATER_CONTROL
+#define MCIF_WB_CLOCK_GATER_CONTROL__MCIF_WB_CLI_CLOCK_GATER_OVERRIDE__SHIFT 0x0
+#define MCIF_WB_CLOCK_GATER_CONTROL__MCIF_WB_CLI_CLOCK_GATER_OVERRIDE_MASK 0x00000001L
+//MCIF_WB_SELF_REFRESH_CONTROL
+#define MCIF_WB_SELF_REFRESH_CONTROL__PERFRAME_SELF_REFRESH__SHIFT 0x1
+#define MCIF_WB_SELF_REFRESH_CONTROL__PERFRAME_SELF_REFRESH_MASK 0x00000002L
+//MULTI_LEVEL_QOS_CTRL
+#define MULTI_LEVEL_QOS_CTRL__MAX_SCALED_TIME_TO_URGENT__SHIFT 0x0
+#define MULTI_LEVEL_QOS_CTRL__MAX_SCALED_TIME_TO_URGENT_MASK 0x003FFFFFL
+//MCIF_WB_SECURITY_LEVEL
+#define MCIF_WB_SECURITY_LEVEL__MCIF_WB_SECURITY_LEVEL__SHIFT 0x0
+#define MCIF_WB_SECURITY_LEVEL__MCIF_WB_SPACE__SHIFT 0x4
+#define MCIF_WB_SECURITY_LEVEL__MCIF_WB_SECURITY_LEVEL_MASK 0x00000007L
+#define MCIF_WB_SECURITY_LEVEL__MCIF_WB_SPACE_MASK 0x00000070L
+//MCIF_WB_BUF_LUMA_SIZE
+#define MCIF_WB_BUF_LUMA_SIZE__MCIF_WB_BUF_LUMA_SIZE__SHIFT 0x0
+#define MCIF_WB_BUF_LUMA_SIZE__MCIF_WB_BUF_LUMA_SIZE_MASK 0x000FFFFFL
+//MCIF_WB_BUF_CHROMA_SIZE
+#define MCIF_WB_BUF_CHROMA_SIZE__MCIF_WB_BUF_CHROMA_SIZE__SHIFT 0x0
+#define MCIF_WB_BUF_CHROMA_SIZE__MCIF_WB_BUF_CHROMA_SIZE_MASK 0x000FFFFFL
+//MCIF_WB_BUF_1_ADDR_Y_HIGH
+#define MCIF_WB_BUF_1_ADDR_Y_HIGH__MCIF_WB_BUF_1_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_1_ADDR_Y_HIGH__MCIF_WB_BUF_1_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_1_ADDR_C_HIGH
+#define MCIF_WB_BUF_1_ADDR_C_HIGH__MCIF_WB_BUF_1_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_1_ADDR_C_HIGH__MCIF_WB_BUF_1_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_2_ADDR_Y_HIGH
+#define MCIF_WB_BUF_2_ADDR_Y_HIGH__MCIF_WB_BUF_2_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_2_ADDR_Y_HIGH__MCIF_WB_BUF_2_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_2_ADDR_C_HIGH
+#define MCIF_WB_BUF_2_ADDR_C_HIGH__MCIF_WB_BUF_2_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_2_ADDR_C_HIGH__MCIF_WB_BUF_2_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_3_ADDR_Y_HIGH
+#define MCIF_WB_BUF_3_ADDR_Y_HIGH__MCIF_WB_BUF_3_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_3_ADDR_Y_HIGH__MCIF_WB_BUF_3_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_3_ADDR_C_HIGH
+#define MCIF_WB_BUF_3_ADDR_C_HIGH__MCIF_WB_BUF_3_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_3_ADDR_C_HIGH__MCIF_WB_BUF_3_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_4_ADDR_Y_HIGH
+#define MCIF_WB_BUF_4_ADDR_Y_HIGH__MCIF_WB_BUF_4_ADDR_Y_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_4_ADDR_Y_HIGH__MCIF_WB_BUF_4_ADDR_Y_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_4_ADDR_C_HIGH
+#define MCIF_WB_BUF_4_ADDR_C_HIGH__MCIF_WB_BUF_4_ADDR_C_HIGH__SHIFT 0x0
+#define MCIF_WB_BUF_4_ADDR_C_HIGH__MCIF_WB_BUF_4_ADDR_C_HIGH_MASK 0x000000FFL
+//MCIF_WB_BUF_1_RESOLUTION
+#define MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB_BUF_1_RESOLUTION__MCIF_WB_BUF_1_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB_BUF_2_RESOLUTION
+#define MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB_BUF_2_RESOLUTION__MCIF_WB_BUF_2_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB_BUF_3_RESOLUTION
+#define MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB_BUF_3_RESOLUTION__MCIF_WB_BUF_3_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB_BUF_4_RESOLUTION
+#define MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_WIDTH__SHIFT 0x0
+#define MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_HEIGHT__SHIFT 0x10
+#define MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_WIDTH_MASK 0x00001FFFL
+#define MCIF_WB_BUF_4_RESOLUTION__MCIF_WB_BUF_4_RESOLUTION_HEIGHT_MASK 0x1FFF0000L
+//MCIF_WB_PSTATE_CHANGE_DURATION_VBI
+#define MCIF_WB_PSTATE_CHANGE_DURATION_VBI__MCIF_WB_UCLK_PSTATE_CHANGE_DURATION_VBI__SHIFT 0x0
+#define MCIF_WB_PSTATE_CHANGE_DURATION_VBI__MCIF_WB_FCLK_PSTATE_CHANGE_DURATION_VBI__SHIFT 0x10
+#define MCIF_WB_PSTATE_CHANGE_DURATION_VBI__MCIF_WB_UCLK_PSTATE_CHANGE_DURATION_VBI_MASK 0x0000FFFFL
+#define MCIF_WB_PSTATE_CHANGE_DURATION_VBI__MCIF_WB_FCLK_PSTATE_CHANGE_DURATION_VBI_MASK 0xFFFF0000L
+//MCIF_WB_VMID_CONTROL
+#define MCIF_WB_VMID_CONTROL__MCIF_WB_P_VMID__SHIFT 0x0
+#define MCIF_WB_VMID_CONTROL__MCIF_WB_P_VMID_MASK 0x0000000FL
+//MCIF_WB_MIN_TTO
+#define MCIF_WB_MIN_TTO__MCIF_WB_MIN_TTO__SHIFT 0x0
+#define MCIF_WB_MIN_TTO__MCIF_WB_MIN_TTO_MASK 0x0007FFFFL
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON4_PERFCOUNTER_CNTL
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON4_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON4_PERFCOUNTER_CNTL2
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON4_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON4_PERFCOUNTER_STATE
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON4_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON4_PERFMON_CNTL
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON4_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON4_PERFMON_CNTL2
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON4_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON4_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON4_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON4_PERFMON_CVALUE_LOW
+#define DC_PERFMON4_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON4_PERFMON_HI
+#define DC_PERFMON4_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON4_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON4_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON4_PERFMON_LOW
+#define DC_PERFMON4_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON4_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_mmhubbub_mmhubbub_dispdec
+//MCIF_WB_NB_PSTATE_LATENCY_WATERMARK
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_REFRESH_WATERMARK__SHIFT 0x0
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x18
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_WATERMARK_TYPE__SHIFT 0x1f
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_REFRESH_WATERMARK_MASK 0x001FFFFFL
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x07000000L
+#define MCIF_WB_NB_PSTATE_LATENCY_WATERMARK__NB_PSTATE_CHANGE_WATERMARK_TYPE_MASK 0x80000000L
+//MCIF_WB_WATERMARK
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK__SHIFT 0x0
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK__SHIFT 0x18
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK 0x001FFFFFL
+#define MCIF_WB_WATERMARK__MCIF_WB_CLI_WATERMARK_MASK_MASK 0x07000000L
+//MMHUBBUB_WARMUP_CONFIG
+#define MMHUBBUB_WARMUP_CONFIG__MMHUBBUB_WARMUP_QOS__SHIFT 0x10
+#define MMHUBBUB_WARMUP_CONFIG__MMHUBBUB_WARMUP_AWID__SHIFT 0x14
+#define MMHUBBUB_WARMUP_CONFIG__MMHUBBUB_WARMUP_QOS_MASK 0x000F0000L
+#define MMHUBBUB_WARMUP_CONFIG__MMHUBBUB_WARMUP_AWID_MASK 0x00F00000L
+//MMHUBBUB_WARMUP_CONTROL_STATUS
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_EN__SHIFT 0x0
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_EN__SHIFT 0x4
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_STATUS__SHIFT 0x5
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_ACK__SHIFT 0x6
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_INC_ADDR__SHIFT 0x8
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_EN_MASK 0x00000001L
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_EN_MASK 0x00000010L
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_STATUS_MASK 0x00000020L
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_SW_INT_ACK_MASK 0x00000040L
+#define MMHUBBUB_WARMUP_CONTROL_STATUS__MMHUBBUB_WARMUP_INC_ADDR_MASK 0x03FFFF00L
+//MMHUBBUB_WARMUP_BASE_ADDR_LOW
+#define MMHUBBUB_WARMUP_BASE_ADDR_LOW__MMHUBBUB_WARMUP_BASE_ADDR_LOW__SHIFT 0x0
+#define MMHUBBUB_WARMUP_BASE_ADDR_LOW__MMHUBBUB_WARMUP_BASE_ADDR_LOW_MASK 0xFFFFFFFFL
+//MMHUBBUB_WARMUP_BASE_ADDR_HIGH
+#define MMHUBBUB_WARMUP_BASE_ADDR_HIGH__MMHUBBUB_WARMUP_BASE_ADDR_HIGH__SHIFT 0x0
+#define MMHUBBUB_WARMUP_BASE_ADDR_HIGH__MMHUBBUB_WARMUP_BASE_ADDR_HIGH_MASK 0x000007FFL
+//MMHUBBUB_WARMUP_ADDR_REGION
+#define MMHUBBUB_WARMUP_ADDR_REGION__MMHUBBUB_WARMUP_ADDR_REGION__SHIFT 0x0
+#define MMHUBBUB_WARMUP_ADDR_REGION__MMHUBBUB_WARMUP_ADDR_REGION_MASK 0x07FFFFFFL
+//MMHUBBUB_MIN_TTO
+#define MMHUBBUB_MIN_TTO__MMHUBBUB_MIN_TTO__SHIFT 0x0
+#define MMHUBBUB_MIN_TTO__MMHUBBUB_MIN_TTO_MASK 0x0007FFFFL
+//MMHUBBUB_CTRL
+#define MMHUBBUB_CTRL__MMHUB_SOCCLK_DS_MODE__SHIFT 0x0
+#define MMHUBBUB_CTRL__MMHUB_SOCCLK_DS_MODE_MASK 0x00000003L
+//WBIF_SMU_WM_CONTROL
+#define WBIF_SMU_WM_CONTROL__MCIF_WB_WM_CHG_SEL__SHIFT 0x14
+#define WBIF_SMU_WM_CONTROL__MCIF_WB_WM_CHG_REQ__SHIFT 0x16
+#define WBIF_SMU_WM_CONTROL__MCIF_WB_WM_CHG_SEL_MASK 0x00300000L
+#define WBIF_SMU_WM_CONTROL__MCIF_WB_WM_CHG_REQ_MASK 0x00400000L
+//WBIF0_MISC_CTRL
+#define WBIF0_MISC_CTRL__MCIFWB0_WR_COMBINE_TIMEOUT_THRESH__SHIFT 0x0
+#define WBIF0_MISC_CTRL__MCIF_WB0_SOCCLK_DS_ENABLE__SHIFT 0x10
+#define WBIF0_MISC_CTRL__MCIF_WB0_WM_CHG_ACK_INT_DIS__SHIFT 0x18
+#define WBIF0_MISC_CTRL__MCIF_WB0_WM_CHG_ACK_INT_STATUS__SHIFT 0x19
+#define WBIF0_MISC_CTRL__MCIFWB0_WR_COMBINE_TIMEOUT_THRESH_MASK 0x000003FFL
+#define WBIF0_MISC_CTRL__MCIF_WB0_SOCCLK_DS_ENABLE_MASK 0x00010000L
+#define WBIF0_MISC_CTRL__MCIF_WB0_WM_CHG_ACK_INT_DIS_MASK 0x01000000L
+#define WBIF0_MISC_CTRL__MCIF_WB0_WM_CHG_ACK_INT_STATUS_MASK 0x02000000L
+//WBIF0_PHASE0_OUTSTANDING_COUNTER
+#define WBIF0_PHASE0_OUTSTANDING_COUNTER__MCIF_WB0_PHASE0_OUTSTANDING_COUNTER__SHIFT 0x0
+#define WBIF0_PHASE0_OUTSTANDING_COUNTER__MCIF_WB0_PHASE0_OUTSTANDING_COUNTER_MASK 0x07FFFFFFL
+//WBIF0_PHASE1_OUTSTANDING_COUNTER
+#define WBIF0_PHASE1_OUTSTANDING_COUNTER__MCIF_WB0_PHASE1_OUTSTANDING_COUNTER__SHIFT 0x0
+#define WBIF0_PHASE1_OUTSTANDING_COUNTER__MCIF_WB0_PHASE1_OUTSTANDING_COUNTER_MASK 0x07FFFFFFL
+//MMHUBBUB_MEM_PWR_STATUS
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM0_PWR_STATE__SHIFT 0x0
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM1_PWR_STATE__SHIFT 0x2
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM0_PWR_STATE__SHIFT 0x4
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM1_PWR_STATE__SHIFT 0x6
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM0_PWR_STATE_MASK 0x00000003L
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_LUMA_MEM1_PWR_STATE_MASK 0x0000000CL
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM0_PWR_STATE_MASK 0x00000030L
+#define MMHUBBUB_MEM_PWR_STATUS__MCIF_DWB0_CHROMA_MEM1_PWR_STATE_MASK 0x000000C0L
+//MMHUBBUB_MEM_PWR_CNTL
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_FORCE__SHIFT 0x2
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_DIS__SHIFT 0x4
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_MODE_SEL__SHIFT 0x5
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_LUMA_MEM_EN_NUM__SHIFT 0x7
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_CHROMA_MEM_EN_NUM__SHIFT 0x8
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_FORCE_MASK 0x0000000CL
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_DIS_MASK 0x00000010L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_MEM_PWR_MODE_SEL_MASK 0x00000060L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_LUMA_MEM_EN_NUM_MASK 0x00000080L
+#define MMHUBBUB_MEM_PWR_CNTL__MCIF_DWB0_CHROMA_MEM_EN_NUM_MASK 0x00000100L
+//MMHUBBUB_CLOCK_CNTL
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_TEST_CLK_SEL__SHIFT 0x0
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_R_MMHUBBUB_GATE_DIS__SHIFT 0x5
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_G_WBIF0_GATE_DIS__SHIFT 0x9
+#define MMHUBBUB_CLOCK_CNTL__SOCCLK_G_WBIF0_GATE_DIS__SHIFT 0xa
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_FGCG_REP_DIS__SHIFT 0x11
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_TEST_CLK_SEL_MASK 0x0000001FL
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_R_MMHUBBUB_GATE_DIS_MASK 0x00000020L
+#define MMHUBBUB_CLOCK_CNTL__DISPCLK_G_WBIF0_GATE_DIS_MASK 0x00000200L
+#define MMHUBBUB_CLOCK_CNTL__SOCCLK_G_WBIF0_GATE_DIS_MASK 0x00000400L
+#define MMHUBBUB_CLOCK_CNTL__MMHUBBUB_FGCG_REP_DIS_MASK 0x00020000L
+//MMHUBBUB_SOFT_RESET
+#define MMHUBBUB_SOFT_RESET__WBIF0_SOFT_RESET__SHIFT 0x2
+#define MMHUBBUB_SOFT_RESET__DMUIF_SOFT_RESET__SHIFT 0x8
+#define MMHUBBUB_SOFT_RESET__WBIF0_SOFT_RESET_MASK 0x00000004L
+#define MMHUBBUB_SOFT_RESET__DMUIF_SOFT_RESET_MASK 0x00000100L
+//DMU_IF_ERR_STATUS
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR__SHIFT 0x0
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR_CLR__SHIFT 0x4
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR_MASK 0x00000001L
+#define DMU_IF_ERR_STATUS__DMU_RD_OUTSTANDING_ERR_CLR_MASK 0x00000010L
+//MMHUBBUB_CLIENT_UNIT_ID
+#define MMHUBBUB_CLIENT_UNIT_ID__WBIF0_UNIT_ID__SHIFT 0x8
+#define MMHUBBUB_CLIENT_UNIT_ID__WBIF0_UNIT_ID_MASK 0x00003F00L
+//MMHUBBUB_WARMUP_VMID_CONTROL
+#define MMHUBBUB_WARMUP_VMID_CONTROL__MMHUBBUB_WARMUP_P_VMID__SHIFT 0x0
+#define MMHUBBUB_WARMUP_VMID_CONTROL__MMHUBBUB_WARMUP_P_VMID_MASK 0x0000000FL
+
+
+// addressBlock: dce_dc_hda_azf0controller_dispdec
+//AZALIA_CONTROLLER_CLOCK_GATING
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING__SHIFT 0x0
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE__SHIFT 0x4
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING_MASK 0x00000001L
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE_MASK 0x00000010L
+//AZALIA_AUDIO_DTO
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE__SHIFT 0x0
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE__SHIFT 0x10
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE_MASK 0x0000FFFFL
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE_MASK 0xFFFF0000L
+//AZALIA_AUDIO_DTO_CONTROL
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO__SHIFT 0x8
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO_MASK 0x00000300L
+//AZALIA_SOCCLK_CONTROL
+#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN__SHIFT 0x1
+#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN_MASK 0x00000002L
+//AZALIA_UNDERFLOW_FILLER_SAMPLE
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE__SHIFT 0x0
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE_MASK 0xFFFFFFFFL
+//AZALIA_DATA_DMA_CONTROL
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD__SHIFT 0x10
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL__SHIFT 0x11
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP_MASK 0x0000000CL
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS_MASK 0x000000C0L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD_MASK 0x00010000L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL_MASK 0x00020000L
+//AZALIA_BDL_DMA_CONTROL
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP_MASK 0x0000000CL
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS_MASK 0x000000C0L
+//AZALIA_RIRB_AND_DP_CONTROL
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP__SHIFT 0x0
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP__SHIFT 0x4
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER__SHIFT 0x5
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP_MASK 0x00000010L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER_MASK 0x000001E0L
+//AZALIA_CORB_DMA_CONTROL
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS_MASK 0x00000010L
+//AZALIA_GLOBAL_CAPABILITIES
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS__SHIFT 0x1
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS_MASK 0x00000006L
+//AZALIA_OUTPUT_PAYLOAD_CAPABILITY
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY__SHIFT 0x10
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY_MASK 0x0000FFFFL
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY_MASK 0xFFFF0000L
+//AZALIA_OUTPUT_STREAM_ARBITER_CONTROL
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL__SHIFT 0x0
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE__SHIFT 0x8
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__INPUT_LATENCY_HIDING_LEVEL__SHIFT 0x10
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL_MASK 0x000000FFL
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE_MASK 0x00000100L
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__INPUT_LATENCY_HIDING_LEVEL_MASK 0x00FF0000L
+//AZALIA_INPUT_PAYLOAD_CAPABILITY
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INSTRMPAY__SHIFT 0x10
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY_MASK 0x0000FFFFL
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INSTRMPAY_MASK 0xFFFF0000L
+//AZALIA_CONTROLLER_DEBUG
+//AZALIA_INPUT_CRC0_CONTROL0
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_EN_MASK 0x00000001L
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_INSTANCE_SEL_MASK 0x00000700L
+//AZALIA_INPUT_CRC0_CONTROL1
+#define AZALIA_INPUT_CRC0_CONTROL1__INPUT_CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL1__INPUT_CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CONTROL2
+#define AZALIA_INPUT_CRC0_CONTROL2__INPUT_CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL2__INPUT_CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+//AZALIA_INPUT_CRC0_CONTROL3
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+//AZALIA_INPUT_CRC0_RESULT
+#define AZALIA_INPUT_CRC0_RESULT__INPUT_CRC_RESULT__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_RESULT__INPUT_CRC_RESULT_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CONTROL0
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_EN_MASK 0x00000001L
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_INSTANCE_SEL_MASK 0x00000700L
+//AZALIA_INPUT_CRC1_CONTROL1
+#define AZALIA_INPUT_CRC1_CONTROL1__INPUT_CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL1__INPUT_CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CONTROL2
+#define AZALIA_INPUT_CRC1_CONTROL2__INPUT_CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL2__INPUT_CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+//AZALIA_INPUT_CRC1_CONTROL3
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+//AZALIA_INPUT_CRC1_RESULT
+#define AZALIA_INPUT_CRC1_RESULT__INPUT_CRC_RESULT__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_RESULT__INPUT_CRC_RESULT_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CONTROL0
+#define AZALIA_CRC0_CONTROL0__CRC_EN__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL0__CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_CRC0_CONTROL0__CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_CRC0_CONTROL0__CRC_SOURCE_SEL__SHIFT 0xc
+#define AZALIA_CRC0_CONTROL0__CRC_EN_MASK 0x00000001L
+#define AZALIA_CRC0_CONTROL0__CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_CRC0_CONTROL0__CRC_INSTANCE_SEL_MASK 0x00000700L
+#define AZALIA_CRC0_CONTROL0__CRC_SOURCE_SEL_MASK 0x00001000L
+//AZALIA_CRC0_CONTROL1
+#define AZALIA_CRC0_CONTROL1__CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL1__CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CONTROL2
+#define AZALIA_CRC0_CONTROL2__CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL2__CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+//AZALIA_CRC0_CONTROL3
+#define AZALIA_CRC0_CONTROL3__CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL3__CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_CRC0_CONTROL3__CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_CRC0_CONTROL3__CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_CRC0_CONTROL3__CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_CRC0_CONTROL3__CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+//AZALIA_CRC0_RESULT
+#define AZALIA_CRC0_RESULT__CRC_RESULT__SHIFT 0x0
+#define AZALIA_CRC0_RESULT__CRC_RESULT_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CONTROL0
+#define AZALIA_CRC1_CONTROL0__CRC_EN__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL0__CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_CRC1_CONTROL0__CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_CRC1_CONTROL0__CRC_SOURCE_SEL__SHIFT 0xc
+#define AZALIA_CRC1_CONTROL0__CRC_EN_MASK 0x00000001L
+#define AZALIA_CRC1_CONTROL0__CRC_BLOCK_MODE_MASK 0x00000010L
+#define AZALIA_CRC1_CONTROL0__CRC_INSTANCE_SEL_MASK 0x00000700L
+#define AZALIA_CRC1_CONTROL0__CRC_SOURCE_SEL_MASK 0x00001000L
+//AZALIA_CRC1_CONTROL1
+#define AZALIA_CRC1_CONTROL1__CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL1__CRC_BLOCK_SIZE_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CONTROL2
+#define AZALIA_CRC1_CONTROL2__CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL2__CRC_BLOCK_ITERATION_MASK 0x0000FFFFL
+//AZALIA_CRC1_CONTROL3
+#define AZALIA_CRC1_CONTROL3__CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL3__CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_CRC1_CONTROL3__CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_CRC1_CONTROL3__CRC_COMPLETE_MASK 0x00000001L
+#define AZALIA_CRC1_CONTROL3__CRC_BLOCK_COMPLETE_PHASE_MASK 0x00000010L
+#define AZALIA_CRC1_CONTROL3__CRC_CHANNEL_RESULT_SEL_MASK 0x00000700L
+//AZALIA_CRC1_RESULT
+#define AZALIA_CRC1_RESULT__CRC_RESULT__SHIFT 0x0
+#define AZALIA_CRC1_RESULT__CRC_RESULT_MASK 0xFFFFFFFFL
+//AZALIA_MEM_PWR_CTRL
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_FORCE__SHIFT 0x0
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_DIS__SHIFT 0x2
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_FORCE__SHIFT 0x3
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_DIS__SHIFT 0x5
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_FORCE__SHIFT 0x6
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_DIS__SHIFT 0x8
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_FORCE__SHIFT 0x9
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_DIS__SHIFT 0xb
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_FORCE__SHIFT 0xc
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_DIS__SHIFT 0xe
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_FORCE__SHIFT 0xf
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_DIS__SHIFT 0x11
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_FORCE__SHIFT 0x12
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_DIS__SHIFT 0x14
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_MODE_SEL__SHIFT 0x1c
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_FORCE_MASK 0x00000003L
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_DIS_MASK 0x00000004L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_FORCE_MASK 0x00000018L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_DIS_MASK 0x00000020L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_FORCE_MASK 0x000000C0L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_DIS_MASK 0x00000100L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_FORCE_MASK 0x00000600L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_DIS_MASK 0x00000800L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_FORCE_MASK 0x00003000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_DIS_MASK 0x00004000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_FORCE_MASK 0x00018000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_DIS_MASK 0x00020000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_FORCE_MASK 0x000C0000L
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_DIS_MASK 0x00100000L
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_MODE_SEL_MASK 0x30000000L
+//AZALIA_MEM_PWR_STATUS
+#define AZALIA_MEM_PWR_STATUS__AZ_MEM_PWR_STATE__SHIFT 0x0
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM0_MEM_PWR_STATE__SHIFT 0x2
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM1_MEM_PWR_STATE__SHIFT 0x4
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM2_MEM_PWR_STATE__SHIFT 0x6
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM3_MEM_PWR_STATE__SHIFT 0x8
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM4_MEM_PWR_STATE__SHIFT 0xa
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM5_MEM_PWR_STATE__SHIFT 0xc
+#define AZALIA_MEM_PWR_STATUS__AZ_MEM_PWR_STATE_MASK 0x00000003L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM0_MEM_PWR_STATE_MASK 0x0000000CL
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM1_MEM_PWR_STATE_MASK 0x00000030L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM2_MEM_PWR_STATE_MASK 0x000000C0L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM3_MEM_PWR_STATE_MASK 0x00000300L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM4_MEM_PWR_STATE_MASK 0x00000C00L
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM5_MEM_PWR_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_hda_azf0root_dispdec
+//AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT__SHIFT 0x4
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT_MASK 0x00000007L
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT_MASK 0x00000070L
+//AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW__SHIFT 0x0
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW_MASK 0x0000003FL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3FFFFFFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000FL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000F0L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000FFL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000FF00L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00FF0000L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xFF000000L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x000000FFL
+//CC_RCU_DC_AUDIO_PORT_CONNECTIVITY
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//AZALIA_F0_GTC_GROUP_OFFSET0
+#define AZALIA_F0_GTC_GROUP_OFFSET0__GTC_GROUP_OFFSET0__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET0__GTC_GROUP_OFFSET0_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET1
+#define AZALIA_F0_GTC_GROUP_OFFSET1__GTC_GROUP_OFFSET1__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET1__GTC_GROUP_OFFSET1_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET2
+#define AZALIA_F0_GTC_GROUP_OFFSET2__GTC_GROUP_OFFSET2__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET2__GTC_GROUP_OFFSET2_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET3
+#define AZALIA_F0_GTC_GROUP_OFFSET3__GTC_GROUP_OFFSET3__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET3__GTC_GROUP_OFFSET3_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET4
+#define AZALIA_F0_GTC_GROUP_OFFSET4__GTC_GROUP_OFFSET4__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET4__GTC_GROUP_OFFSET4_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET5
+#define AZALIA_F0_GTC_GROUP_OFFSET5__GTC_GROUP_OFFSET5__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET5__GTC_GROUP_OFFSET5_MASK 0xFFFFFFFFL
+//AZALIA_F0_GTC_GROUP_OFFSET6
+#define AZALIA_F0_GTC_GROUP_OFFSET6__GTC_GROUP_OFFSET6__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET6__GTC_GROUP_OFFSET6_MASK 0xFFFFFFFFL
+//REG_DC_AUDIO_PORT_CONNECTIVITY
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY__SHIFT 0x0
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_MASK 0x00000007L
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_MASK 0x00000007L
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_hda_az_misc_dispdec
+//AZ_CLOCK_CNTL
+#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS__SHIFT 0x0
+#define AZ_CLOCK_CNTL__AZ_GLOBAL_FGCG_REP_DIS__SHIFT 0x1
+#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS__SHIFT 0x4
+#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS__SHIFT 0x8
+#define AZ_CLOCK_CNTL__DCIPG_TEST_CLK_SEL__SHIFT 0xc
+#define AZ_CLOCK_CNTL__SCLK_GATE_DIS__SHIFT 0x10
+#define AZ_CLOCK_CNTL__SCLK_TURN_ON_DELAY__SHIFT 0x14
+#define AZ_CLOCK_CNTL__SCLK_TURN_OFF_DELAY__SHIFT 0x18
+#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS_MASK 0x00000001L
+#define AZ_CLOCK_CNTL__AZ_GLOBAL_FGCG_REP_DIS_MASK 0x00000002L
+#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS_MASK 0x00000010L
+#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS_MASK 0x00000100L
+#define AZ_CLOCK_CNTL__DCIPG_TEST_CLK_SEL_MASK 0x0000F000L
+#define AZ_CLOCK_CNTL__SCLK_GATE_DIS_MASK 0x00010000L
+#define AZ_CLOCK_CNTL__SCLK_TURN_ON_DELAY_MASK 0x00F00000L
+#define AZ_CLOCK_CNTL__SCLK_TURN_OFF_DELAY_MASK 0xFF000000L
+//AZ_MEM_GLOBAL_PWR_REQ_CNTL
+#define AZ_MEM_GLOBAL_PWR_REQ_CNTL__AZ_MEM_GLOBAL_PWR_REQ_DIS__SHIFT 0x0
+#define AZ_MEM_GLOBAL_PWR_REQ_CNTL__AZ_MEM_GLOBAL_PWR_REQ_DIS_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_hda_az_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON5_PERFCOUNTER_CNTL
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON5_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON5_PERFCOUNTER_CNTL2
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON5_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON5_PERFCOUNTER_STATE
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON5_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON5_PERFMON_CNTL
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON5_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON5_PERFMON_CNTL2
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON5_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON5_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON5_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON5_PERFMON_CVALUE_LOW
+#define DC_PERFMON5_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON5_PERFMON_HI
+#define DC_PERFMON5_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON5_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON5_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON5_PERFMON_LOW
+#define DC_PERFMON5_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON5_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream0_dispdec
+//AZF0STREAM0_AZALIA_STREAM_INDEX
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM0_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM0_AZALIA_STREAM_DATA
+#define AZF0STREAM0_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream1_dispdec
+//AZF0STREAM1_AZALIA_STREAM_INDEX
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM1_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM1_AZALIA_STREAM_DATA
+#define AZF0STREAM1_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream2_dispdec
+//AZF0STREAM2_AZALIA_STREAM_INDEX
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM2_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM2_AZALIA_STREAM_DATA
+#define AZF0STREAM2_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream3_dispdec
+//AZF0STREAM3_AZALIA_STREAM_INDEX
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM3_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM3_AZALIA_STREAM_DATA
+#define AZF0STREAM3_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream4_dispdec
+//AZF0STREAM4_AZALIA_STREAM_INDEX
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM4_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM4_AZALIA_STREAM_DATA
+#define AZF0STREAM4_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream5_dispdec
+//AZF0STREAM5_AZALIA_STREAM_INDEX
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM5_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM5_AZALIA_STREAM_DATA
+#define AZF0STREAM5_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream6_dispdec
+//AZF0STREAM6_AZALIA_STREAM_INDEX
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM6_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM6_AZALIA_STREAM_DATA
+#define AZF0STREAM6_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream7_dispdec
+//AZF0STREAM7_AZALIA_STREAM_INDEX
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM7_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM7_AZALIA_STREAM_DATA
+#define AZF0STREAM7_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream8_dispdec
+//AZF0STREAM8_AZALIA_STREAM_INDEX
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM8_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM8_AZALIA_STREAM_DATA
+#define AZF0STREAM8_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream9_dispdec
+//AZF0STREAM9_AZALIA_STREAM_INDEX
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM9_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM9_AZALIA_STREAM_DATA
+#define AZF0STREAM9_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream10_dispdec
+//AZF0STREAM10_AZALIA_STREAM_INDEX
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM10_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM10_AZALIA_STREAM_DATA
+#define AZF0STREAM10_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream11_dispdec
+//AZF0STREAM11_AZALIA_STREAM_INDEX
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM11_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM11_AZALIA_STREAM_DATA
+#define AZF0STREAM11_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream12_dispdec
+//AZF0STREAM12_AZALIA_STREAM_INDEX
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM12_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM12_AZALIA_STREAM_DATA
+#define AZF0STREAM12_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream13_dispdec
+//AZF0STREAM13_AZALIA_STREAM_INDEX
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM13_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM13_AZALIA_STREAM_DATA
+#define AZF0STREAM13_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream14_dispdec
+//AZF0STREAM14_AZALIA_STREAM_INDEX
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM14_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM14_AZALIA_STREAM_DATA
+#define AZF0STREAM14_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0stream15_dispdec
+//AZF0STREAM15_AZALIA_STREAM_INDEX
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0x000000FFL
+#define AZF0STREAM15_AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x00000100L
+//AZF0STREAM15_AZALIA_STREAM_DATA
+#define AZF0STREAM15_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint0_dispdec
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint1_dispdec
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint2_dispdec
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint3_dispdec
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint4_dispdec
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint5_dispdec
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint6_dispdec
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0endpoint7_dispdec
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint2_dispdec
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint3_dispdec
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint4_dispdec
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint5_dispdec
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint6_dispdec
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint7_dispdec
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_dispdec
+//DCHUBBUB_ARB_DF_REQ_OUTSTAND
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND__SHIFT 0x0
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND__SHIFT 0xa
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD__SHIFT 0x16
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND_MASK 0x000001FFL
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_MASK 0x0007FC00L
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD_MASK 0x7FC00000L
+//DCHUBBUB_ARB_SAT_LEVEL
+#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL__SHIFT 0x0
+#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL_MASK 0xFFFFFFFFL
+//DCHUBBUB_ARB_QOS_FORCE
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE__SHIFT 0x0
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE__SHIFT 0x8
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_DURING_PSTATE_CHANGE_REQUEST__SHIFT 0x9
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_HOSTVM_STALL_QOS__SHIFT 0xc
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE_MASK 0x0000000FL
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE_MASK 0x00000100L
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_DURING_PSTATE_CHANGE_REQUEST_MASK 0x00000200L
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_HOSTVM_STALL_QOS_MASK 0x0000F000L
+//DCHUBBUB_ARB_DRAM_STATE_CNTL
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE__SHIFT 0x0
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE__SHIFT 0x1
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_CSTATE_DURING_PSTATE_CHANGE_REQUEST__SHIFT 0x2
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE__SHIFT 0x4
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE__SHIFT 0x5
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__ENABLE_QOS_FORCE_PSTATE__SHIFT 0x7
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE__SHIFT 0xc
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_CSTATE_DEEPSLEEP_LEGACY_MODE__SHIFT 0xd
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DISABLE_HOSTVM_FORCE_DCFCLK_DEEP_SLEEP__SHIFT 0xf
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_VALUE__SHIFT 0x10
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_ENABLE__SHIFT 0x11
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_LEGACY__SHIFT 0x12
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DCFCLK_DEEP_SLEEP_HYSTERESIS__SHIFT 0x18
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE_MASK 0x00000001L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE_MASK 0x00000002L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_CSTATE_DURING_PSTATE_CHANGE_REQUEST_MASK 0x00000004L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE_MASK 0x00000010L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE_MASK 0x00000020L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__ENABLE_QOS_FORCE_PSTATE_MASK 0x00000080L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE_MASK 0x00001000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_CSTATE_DEEPSLEEP_LEGACY_MODE_MASK 0x00002000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DISABLE_HOSTVM_FORCE_DCFCLK_DEEP_SLEEP_MASK 0x00008000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_VALUE_MASK 0x00010000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_ENABLE_MASK 0x00020000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_DCFCLK_DEEP_SLEEP_FORCE_LEGACY_MASK 0x00040000L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DCFCLK_DEEP_SLEEP_HYSTERESIS_MASK 0xFF000000L
+//DCHUBBUB_ARB_USR_RETRAINING_CNTL
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__USR_RETRAINING_REQUEST__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__ALLOW_USR_RETRAINING__SHIFT 0x1
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE__SHIFT 0x8
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE__SHIFT 0x9
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PSTATE_CHANGE_REQUEST__SHIFT 0xa
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PRE_CSTATE__SHIFT 0xb
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__USR_RETRAINING_REQUEST_MASK 0x00000001L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__ALLOW_USR_RETRAINING_MASK 0x00000002L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE_MASK 0x00000100L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE_MASK 0x00000200L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PSTATE_CHANGE_REQUEST_MASK 0x00000400L
+#define DCHUBBUB_ARB_USR_RETRAINING_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PRE_CSTATE_MASK 0x00000800L
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_MASK 0x00003FFFL
+//DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A_MASK 0x00003FFFL
+//DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A_MASK 0x00003FFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FRAC_URG_BW_NOM_A
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_A__DCHUBBUB_ARB_FRAC_URG_BW_NOM_A__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_A__DCHUBBUB_ARB_FRAC_URG_BW_NOM_A_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A_MASK 0x000003FFL
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_MASK 0x00003FFFL
+//DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B_MASK 0x00003FFFL
+//DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B_MASK 0x00003FFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FRAC_URG_BW_NOM_B
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_B__DCHUBBUB_ARB_FRAC_URG_BW_NOM_B__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_B__DCHUBBUB_ARB_FRAC_URG_BW_NOM_B_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B_MASK 0x000003FFL
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_MASK 0x00003FFFL
+//DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C_MASK 0x00003FFFL
+//DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C_MASK 0x00003FFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FRAC_URG_BW_NOM_C
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_C__DCHUBBUB_ARB_FRAC_URG_BW_NOM_C__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_C__DCHUBBUB_ARB_FRAC_URG_BW_NOM_C_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C_MASK 0x000003FFL
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_MASK 0x00003FFFL
+//DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D__DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D_MASK 0x00003FFFL
+//DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D__SHIFT 0x0
+#define DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D__DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D_MASK 0x00003FFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D__DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D__DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D_MASK 0x000FFFFFL
+//DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D__DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D__DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D_MASK 0x0000FFFFL
+//DCHUBBUB_ARB_FRAC_URG_BW_NOM_D
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_D__DCHUBBUB_ARB_FRAC_URG_BW_NOM_D__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_NOM_D__DCHUBBUB_ARB_FRAC_URG_BW_NOM_D_MASK 0x000003FFL
+//DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D__SHIFT 0x0
+#define DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D__DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D_MASK 0x000003FFL
+//DCHUBBUB_ARB_HOSTVM_CNTL
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_CSTATE__SHIFT 0x0
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_PSTATE__SHIFT 0x1
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_USR_RETRAINING__SHIFT 0x2
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SLACK_MASK__SHIFT 0x3
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SPACE_OK_STATUS__SHIFT 0x4
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_GID_FREE_STATUS__SHIFT 0x5
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHVM_RET_FIFO_FREE_STATUS__SHIFT 0x6
+#define DCHUBBUB_ARB_HOSTVM_CNTL__NON_PRQ_CLIENT_WINNER_STATUS__SHIFT 0x7
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_ALLOCATED_GROUPS__SHIFT 0x8
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_RD_FIFO_ENTRIES__SHIFT 0x10
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_QOS__SHIFT 0x18
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD__SHIFT 0x1c
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_CSTATE_MASK 0x00000001L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_PSTATE_MASK 0x00000002L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DISABLE_HOSTVM_FORCE_ALLOW_USR_RETRAINING_MASK 0x00000004L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SLACK_MASK_MASK 0x00000008L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_SPACE_OK_STATUS_MASK 0x00000010L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__PRQ_GID_FREE_STATUS_MASK 0x00000020L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHVM_RET_FIFO_FREE_STATUS_MASK 0x00000040L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__NON_PRQ_CLIENT_WINNER_STATUS_MASK 0x00000080L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_ALLOCATED_GROUPS_MASK 0x00003F00L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_MAX_RD_FIFO_ENTRIES_MASK 0x00FF0000L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__HOSTVM_QOS_MASK 0x0F000000L
+#define DCHUBBUB_ARB_HOSTVM_CNTL__DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD_MASK 0xF0000000L
+//DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT__SHIFT 0x0
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE__SHIFT 0x4
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS__SHIFT 0x5
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST__SHIFT 0x8
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT_Z8__SHIFT 0x10
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__PSTATE_CHANGE_TYPE__SHIFT 0x18
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT_MASK 0x00000003L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE_MASK 0x00000010L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS_MASK 0x00000020L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST_MASK 0x00000100L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT_Z8_MASK 0x00010000L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__PSTATE_CHANGE_TYPE_MASK 0x01000000L
+//DCHUBBUB_ARB_MALL_CNTL
+#define DCHUBBUB_ARB_MALL_CNTL__GLOBAL_USE_MALL_FOR_SS__SHIFT 0x0
+#define DCHUBBUB_ARB_MALL_CNTL__MALL_IN_USE__SHIFT 0x4
+#define DCHUBBUB_ARB_MALL_CNTL__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define DCHUBBUB_ARB_MALL_CNTL__GLOBAL_USE_MALL_FOR_SS_MASK 0x00000001L
+#define DCHUBBUB_ARB_MALL_CNTL__MALL_IN_USE_MASK 0x00000010L
+#define DCHUBBUB_ARB_MALL_CNTL__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+//DCHUBBUB_ARB_TIMEOUT_ENABLE
+#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE__SHIFT 0x0
+#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE_MASK 0x00000001L
+//DCHUBBUB_GLOBAL_TIMER_CNTL
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV__SHIFT 0x0
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE__SHIFT 0xc
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT__SHIFT 0x10
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV_MASK 0x0000000FL
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE_MASK 0x00001000L
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT_MASK 0xFFFF0000L
+//SURFACE_CHECK0_ADDRESS_LSB
+#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK0_ADDRESS_MSB
+#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK1_ADDRESS_LSB
+#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK1_ADDRESS_MSB
+#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK2_ADDRESS_LSB
+#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK2_ADDRESS_MSB
+#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK3_ADDRESS_LSB
+#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK3_ADDRESS_MSB
+#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE_MASK 0x80000000L
+//VTG0_CONTROL
+#define VTG0_CONTROL__VTG0_FP2__SHIFT 0x0
+#define VTG0_CONTROL__VTG0_VCOUNT_INIT__SHIFT 0x10
+#define VTG0_CONTROL__VTG0_ENABLE__SHIFT 0x1f
+#define VTG0_CONTROL__VTG0_FP2_MASK 0x00007FFFL
+#define VTG0_CONTROL__VTG0_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG0_CONTROL__VTG0_ENABLE_MASK 0x80000000L
+//VTG1_CONTROL
+#define VTG1_CONTROL__VTG1_FP2__SHIFT 0x0
+#define VTG1_CONTROL__VTG1_VCOUNT_INIT__SHIFT 0x10
+#define VTG1_CONTROL__VTG1_ENABLE__SHIFT 0x1f
+#define VTG1_CONTROL__VTG1_FP2_MASK 0x00007FFFL
+#define VTG1_CONTROL__VTG1_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG1_CONTROL__VTG1_ENABLE_MASK 0x80000000L
+//VTG2_CONTROL
+#define VTG2_CONTROL__VTG2_FP2__SHIFT 0x0
+#define VTG2_CONTROL__VTG2_VCOUNT_INIT__SHIFT 0x10
+#define VTG2_CONTROL__VTG2_ENABLE__SHIFT 0x1f
+#define VTG2_CONTROL__VTG2_FP2_MASK 0x00007FFFL
+#define VTG2_CONTROL__VTG2_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG2_CONTROL__VTG2_ENABLE_MASK 0x80000000L
+//VTG3_CONTROL
+#define VTG3_CONTROL__VTG3_FP2__SHIFT 0x0
+#define VTG3_CONTROL__VTG3_VCOUNT_INIT__SHIFT 0x10
+#define VTG3_CONTROL__VTG3_ENABLE__SHIFT 0x1f
+#define VTG3_CONTROL__VTG3_FP2_MASK 0x00007FFFL
+#define VTG3_CONTROL__VTG3_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG3_CONTROL__VTG3_ENABLE_MASK 0x80000000L
+//DCHUBBUB_SOFT_RESET
+#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET__SHIFT 0x0
+#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET__SHIFT 0x1
+#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET__SHIFT 0x4
+#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET_MASK 0x00000001L
+#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET_MASK 0x00000002L
+#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET_MASK 0x00000010L
+//DCHUBBUB_CLOCK_CNTL
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_TEST_CLK_SEL__SHIFT 0x0
+#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x5
+#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x6
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_FGCG_REP_DIS__SHIFT 0x7
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_TEST_CLK_SEL_MASK 0x0000001FL
+#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000020L
+#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000040L
+#define DCHUBBUB_CLOCK_CNTL__DCHUBBUB_FGCG_REP_DIS_MASK 0x00000080L
+//DCFCLK_CNTL
+#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DCFCLK_CNTL__DCFCLK_GATE_DIS__SHIFT 0x1f
+#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define DCFCLK_CNTL__DCFCLK_GATE_DIS_MASK 0x80000000L
+//DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_CNT_EN__SHIFT 0x0
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_EVENT_SHORT_PULSE_FILTER_EN__SHIFT 0x1
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_DF_REQ_CMD_LATENCY_SEL__SHIFT 0x2
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_PIPE_SEL__SHIFT 0x3
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_REQ_TYPE_SEL__SHIFT 0x7
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DF_LATENCY_URGENT_ONLY__SHIFT 0xa
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ROB_FIFO_LEVEL__SHIFT 0xb
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_CNT_EN_MASK 0x00000001L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_LATENCY_EVENT_SHORT_PULSE_FILTER_EN_MASK 0x00000002L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DCHUBBUB_DF_REQ_CMD_LATENCY_SEL_MASK 0x00000004L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_PIPE_SEL_MASK 0x00000078L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ARB_LATENCY_REQ_TYPE_SEL_MASK 0x00000380L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__DF_LATENCY_URGENT_ONLY_MASK 0x00000400L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL__ROB_FIFO_LEVEL_MASK 0x003FF800L
+//DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_EN__SHIFT 0x0
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_SRC_SEL__SHIFT 0x1
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_DUR__SHIFT 0x4
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__LATENCY_SOURCE_SEL__SHIFT 0xc
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL__SHIFT 0x14
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL_RESET__SHIFT 0x1f
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_EN_MASK 0x00000001L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_SRC_SEL_MASK 0x0000000EL
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__DCHUBBUB_LATENCY_FRAME_WIN_DUR_MASK 0x00000FF0L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__LATENCY_SOURCE_SEL_MASK 0x00007000L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL_MASK 0x7FF00000L
+#define DCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2__ROB_MAX_FIFO_LEVEL_RESET_MASK 0x80000000L
+//DCHUBBUB_VLINE_SNAPSHOT
+#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT__SHIFT 0x0
+#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT_MASK 0x00000001L
+//DCHUBBUB_CTRL_STATUS
+#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN__SHIFT 0x0
+#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS__SHIFT 0x2
+#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR__SHIFT 0x3
+#define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE__SHIFT 0x1f
+#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN_MASK 0x00000001L
+#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS_MASK 0x00000004L
+#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR_MASK 0x00000008L
+#define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE_MASK 0x80000000L
+//DCHUBBUB_TIMEOUT_DETECTION_CTRL1
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD__SHIFT 0x6
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS_MASK 0x0000003FL
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD_MASK 0xFFFFFFC0L
+//DCHUBBUB_TIMEOUT_DETECTION_CTRL2
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN__SHIFT 0x1b
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET__SHIFT 0x1c
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD_MASK 0x07FFFFFFL
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN_MASK 0x08000000L
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET_MASK 0x10000000L
+//DCHUBBUB_TIMEOUT_INTERRUPT_STATUS
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS__SHIFT 0x1
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR__SHIFT 0x2
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK__SHIFT 0x3
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE_MASK 0x00000001L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS_MASK 0x00000002L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK_MASK 0x000000F8L
+//FMON_CTRL
+#define FMON_CTRL__FMON_START__SHIFT 0x0
+#define FMON_CTRL__FMON_MODE__SHIFT 0x1
+#define FMON_CTRL__FMON_PSTATE_IGNORE__SHIFT 0x4
+#define FMON_CTRL__FMON_STATUS_IGNORE__SHIFT 0x5
+#define FMON_CTRL__FMON_URG_MODE_GREATER__SHIFT 0x6
+#define FMON_CTRL__FMON_FILTER_UID_EN__SHIFT 0x7
+#define FMON_CTRL__FMON_STATE__SHIFT 0x9
+#define FMON_CTRL__FMON_URG_FILTER__SHIFT 0xc
+#define FMON_CTRL__FMON_URG_THRESHOLD__SHIFT 0xd
+#define FMON_CTRL__FMON_FILTER_UID_1__SHIFT 0x11
+#define FMON_CTRL__FMON_FILTER_UID_2__SHIFT 0x16
+#define FMON_CTRL__FMON_SOF_SEL__SHIFT 0x1b
+#define FMON_CTRL__FMON_START_MASK 0x00000001L
+#define FMON_CTRL__FMON_MODE_MASK 0x00000006L
+#define FMON_CTRL__FMON_PSTATE_IGNORE_MASK 0x00000010L
+#define FMON_CTRL__FMON_STATUS_IGNORE_MASK 0x00000020L
+#define FMON_CTRL__FMON_URG_MODE_GREATER_MASK 0x00000040L
+#define FMON_CTRL__FMON_FILTER_UID_EN_MASK 0x00000180L
+#define FMON_CTRL__FMON_STATE_MASK 0x00000600L
+#define FMON_CTRL__FMON_URG_FILTER_MASK 0x00001000L
+#define FMON_CTRL__FMON_URG_THRESHOLD_MASK 0x0001E000L
+#define FMON_CTRL__FMON_FILTER_UID_1_MASK 0x003E0000L
+#define FMON_CTRL__FMON_FILTER_UID_2_MASK 0x07C00000L
+#define FMON_CTRL__FMON_SOF_SEL_MASK 0x38000000L
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_sdpif_dispdec
+//DCHUBBUB_SDPIF_CFG0
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS__SHIFT 0x1
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS__SHIFT 0x3
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS__SHIFT 0x6
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR__SHIFT 0xa
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR__SHIFT 0xb
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR__SHIFT 0xc
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN__SHIFT 0xd
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN__SHIFT 0xe
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL__SHIFT 0xf
+#define DCHUBBUB_SDPIF_CFG0__DF_CSTATE_DISALLOW__SHIFT 0x10
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY__SHIFT 0x19
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS_MASK 0x00000006L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS_MASK 0x00000038L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_MASK 0x000003C0L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_MASK 0x00000400L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR_MASK 0x00000800L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR_MASK 0x00001000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN_MASK 0x00002000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN_MASK 0x00004000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL_MASK 0x00008000L
+#define DCHUBBUB_SDPIF_CFG0__DF_CSTATE_DISALLOW_MASK 0x00010000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY_MASK 0x7E000000L
+//DCHUBBUB_SDPIF_CFG1
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS__SHIFT 0x1
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR__SHIFT 0x2
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP__SHIFT 0x8
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_MAX_NUM_OUTSTANDING__SHIFT 0x9
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_MASK 0x00000002L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP_MASK 0x00000100L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_MAX_NUM_OUTSTANDING_MASK 0x00000200L
+//DCHUBBUB_SDPIF_CFG2
+#define DCHUBBUB_SDPIF_CFG2__dGPU_ADDR_PRESENT__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_HOSTVM_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_UNIT_ID_BITMASK__SHIFT 0x10
+#define DCHUBBUB_SDPIF_CFG2__dGPU_ADDR_PRESENT_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_HOSTVM_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_CFG2__SDPIF_UNIT_ID_BITMASK_MASK 0x01FF0000L
+//VM_REQUEST_PHYSICAL
+#define VM_REQUEST_PHYSICAL__PDE_REQUEST_PHYSICAL__SHIFT 0x0
+#define VM_REQUEST_PHYSICAL__PTE_REQUEST_PHYSICAL__SHIFT 0x3
+#define VM_REQUEST_PHYSICAL__PDE_REQUEST_PHYSICAL_MASK 0x00000001L
+#define VM_REQUEST_PHYSICAL__PTE_REQUEST_PHYSICAL_MASK 0x00000008L
+//DCHUBBUB_FORCE_IO_STATUS_0
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS__SHIFT 0x0
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_STICKY__SHIFT 0x1
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_CLEAR__SHIFT 0x2
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_PIPE_ID__SHIFT 0x3
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_REQUEST_TYPE__SHIFT 0x7
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_ADDR_LO__SHIFT 0xa
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_MASK 0x00000001L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_STICKY_MASK 0x00000002L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_PIPE_ID_MASK 0x00000078L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_REQUEST_TYPE_MASK 0x00000380L
+#define DCHUBBUB_FORCE_IO_STATUS_0__SDPIF_FORCE_IO_STATUS_ADDR_LO_MASK 0xFFFFFC00L
+//DCHUBBUB_FORCE_IO_STATUS_1
+#define DCHUBBUB_FORCE_IO_STATUS_1__SDPIF_FORCE_IO_STATUS_ADDR_HI__SHIFT 0x0
+#define DCHUBBUB_FORCE_IO_STATUS_1__SDPIF_FORCE_IO_STATUS_ADDR_HI_MASK 0x001FFFFFL
+//DCN_VM_FB_LOCATION_BASE
+#define DCN_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define DCN_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
+//DCN_VM_FB_LOCATION_TOP
+#define DCN_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
+#define DCN_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
+//DCN_VM_FB_OFFSET
+#define DCN_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
+#define DCN_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
+//DCN_VM_AGP_BOT
+#define DCN_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
+#define DCN_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
+//DCN_VM_AGP_TOP
+#define DCN_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
+#define DCN_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
+//DCN_VM_AGP_BASE
+#define DCN_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
+#define DCN_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
+//DCN_VM_LOCAL_HBM_ADDRESS_START
+#define DCN_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_START__SHIFT 0x0
+#define DCN_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_START_MASK 0x000FFFFFL
+//DCN_VM_LOCAL_HBM_ADDRESS_END
+#define DCN_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_END__SHIFT 0x0
+#define DCN_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_END_MASK 0x000FFFFFL
+//DCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL
+#define DCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
+#define DCN_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+//DCHUBBUB_SDPIF_PIPE_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL_MASK 0x0000F000L
+//DCHUBBUB_SDPIF_PIPE_NOALLOC
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE0_NOALLOC__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE1_NOALLOC__SHIFT 0x1
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE2_NOALLOC__SHIFT 0x2
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE3_NOALLOC__SHIFT 0x3
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE0_NOALLOC_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE1_NOALLOC_MASK 0x00000002L
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE2_NOALLOC_MASK 0x00000004L
+#define DCHUBBUB_SDPIF_PIPE_NOALLOC__SDPIF_PIPE3_NOALLOC_MASK 0x00000008L
+//DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL_MASK 0x0000F000L
+//DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE0_DCCMETA_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE1_DCCMETA_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE2_DCCMETA_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE3_DCCMETA_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE0_DCCMETA_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE1_DCCMETA_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE2_DCCMETA_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_DCCMETA_SEC_LVL__SDPIF_PIPE3_DCCMETA_SEC_LVL_MASK 0x0000F000L
+//DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE0_CURSOR0_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE1_CURSOR0_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE2_CURSOR0_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE3_CURSOR0_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE0_CURSOR0_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE1_CURSOR0_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE2_CURSOR0_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_CURSOR0_SEC_LVL__SDPIF_PIPE3_CURSOR0_SEC_LVL_MASK 0x0000F000L
+//DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE0_GPUVM_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE1_GPUVM_SEC_LVL__SHIFT 0x4
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE2_GPUVM_SEC_LVL__SHIFT 0x8
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE3_GPUVM_SEC_LVL__SHIFT 0xc
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE0_GPUVM_SEC_LVL_MASK 0x0000000FL
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE1_GPUVM_SEC_LVL_MASK 0x000000F0L
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE2_GPUVM_SEC_LVL_MASK 0x00000F00L
+#define DCHUBBUB_SDPIF_PIPE_GPUVM_SEC_LVL__SDPIF_PIPE3_GPUVM_SEC_LVL_MASK 0x0000F000L
+//SDPIF_REQUEST_RATE_LIMIT
+#define SDPIF_REQUEST_RATE_LIMIT__SDPIF_REQUEST_RATE_LIMIT__SHIFT 0x0
+#define SDPIF_REQUEST_RATE_LIMIT__SDPIF_REQUEST_RATE_LIMIT_MASK 0x00000FFFL
+//DCHUBBUB_SDPIF_MEM_PWR_CTRL
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE__SHIFT 0x0
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS__SHIFT 0x2
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS_MASK 0x00000004L
+//DCHUBBUB_SDPIF_MEM_PWR_STATUS
+#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE_MASK 0x00000003L
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_ret_path_dispdec
+//DCHUBBUB_RET_PATH_MEM_PWR_CTRL
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS__SHIFT 0x2
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE_MASK 0x00000003L
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS_MASK 0x00000004L
+//DCHUBBUB_RET_PATH_MEM_PWR_STATUS
+#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE_MASK 0x00000003L
+//DCHUBBUB_CRC_CTRL
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_EN__SHIFT 0x0
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_CONT_EN__SHIFT 0x1
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_ONE_SHOT_PENDING__SHIFT 0x2
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_ONE_SHOT_PENDING__SHIFT 0x3
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_SRC_SEL__SHIFT 0x4
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_SRC_SEL__SHIFT 0x6
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_PIPE_SEL__SHIFT 0x8
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_SURF_SEL__SHIFT 0xc
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_DATA_SRC_SEL__SHIFT 0x14
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_EN_MASK 0x00000001L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_CONT_EN_MASK 0x00000002L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_ONE_SHOT_PENDING_MASK 0x00000008L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC0_SRC_SEL_MASK 0x00000030L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC1_SRC_SEL_MASK 0x000000C0L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_PIPE_SEL_MASK 0x00000F00L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_SURF_SEL_MASK 0x00001000L
+#define DCHUBBUB_CRC_CTRL__DCHUBBUB_CRC_DATA_SRC_SEL_MASK 0x00100000L
+//DCHUBBUB_CRC0_VAL_R
+#define DCHUBBUB_CRC0_VAL_R__DCHUBBUB_CRC0_R_CR__SHIFT 0x0
+#define DCHUBBUB_CRC0_VAL_R__DCHUBBUB_CRC0_R_CR_MASK 0xFFFFFFFFL
+//DCHUBBUB_CRC0_VAL_G
+#define DCHUBBUB_CRC0_VAL_G__DCHUBBUB_CRC0_G_Y__SHIFT 0x0
+#define DCHUBBUB_CRC0_VAL_G__DCHUBBUB_CRC0_G_Y_MASK 0xFFFFFFFFL
+//DCHUBBUB_CRC0_VAL_B
+#define DCHUBBUB_CRC0_VAL_B__DCHUBBUB_CRC0_B_CB__SHIFT 0x0
+#define DCHUBBUB_CRC0_VAL_B__DCHUBBUB_CRC0_B_CB_MASK 0xFFFFFFFFL
+//DCHUBBUB_CRC0_VAL_A
+#define DCHUBBUB_CRC0_VAL_A__DCHUBBUB_CRC0_ALPHA__SHIFT 0x0
+#define DCHUBBUB_CRC0_VAL_A__DCHUBBUB_CRC0_ALPHA_MASK 0xFFFFFFFFL
+//DCHUBBUB_DCC_STAT_CNTL
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_MODE__SHIFT 0x0
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_EN__SHIFT 0x1
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_DONE__SHIFT 0x2
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_PIPE_SEL__SHIFT 0x4
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_FRAME_CNT__SHIFT 0x10
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_MODE_MASK 0x00000001L
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_EN_MASK 0x00000002L
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_DONE_MASK 0x00000004L
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_PIPE_SEL_MASK 0x000000F0L
+#define DCHUBBUB_DCC_STAT_CNTL__DCHUBBUB_DCC_STAT_FRAME_CNT_MASK 0xFFFF0000L
+//DCHUBBUB_DCC_STAT0
+#define DCHUBBUB_DCC_STAT0__DCHUBBUB_DCC_STAT_TOTAL_REQ__SHIFT 0x0
+#define DCHUBBUB_DCC_STAT0__DCHUBBUB_DCC_STAT_TOTAL_REQ_MASK 0xFFFFFFFFL
+//DCHUBBUB_DCC_STAT1
+#define DCHUBBUB_DCC_STAT1__DCHUBBUB_DCC_STAT_ZS_REQ__SHIFT 0x0
+#define DCHUBBUB_DCC_STAT1__DCHUBBUB_DCC_STAT_ZS_REQ_MASK 0xFFFFFFFFL
+//DCHUBBUB_DCC_STAT2
+#define DCHUBBUB_DCC_STAT2__DCHUBBUB_DCC_STAT_DCC_REQ__SHIFT 0x0
+#define DCHUBBUB_DCC_STAT2__DCHUBBUB_DCC_STAT_DCC_REQ_MASK 0xFFFFFFFFL
+//DCHUBBUB_COMPBUF_CTRL
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE__SHIFT 0x0
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_ENABLE__SHIFT 0x10
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_STATUS__SHIFT 0x12
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_CLEAR__SHIFT 0x13
+#define DCHUBBUB_COMPBUF_CTRL__CONFIG_ERROR__SHIFT 0x1f
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CURRENT_MASK 0x00001F00L
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_ENABLE_MASK 0x00010000L
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_STATUS_MASK 0x00040000L
+#define DCHUBBUB_COMPBUF_CTRL__COMPBUF_SIZE_CHANGE_DONE_INT_CLEAR_MASK 0x00080000L
+#define DCHUBBUB_COMPBUF_CTRL__CONFIG_ERROR_MASK 0x80000000L
+//DCHUBBUB_DET0_CTRL
+#define DCHUBBUB_DET0_CTRL__DET0_SIZE__SHIFT 0x0
+#define DCHUBBUB_DET0_CTRL__DET0_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_DET0_CTRL__DET0_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_DET0_CTRL__DET0_SIZE_CURRENT_MASK 0x00001F00L
+//DCHUBBUB_DET1_CTRL
+#define DCHUBBUB_DET1_CTRL__DET1_SIZE__SHIFT 0x0
+#define DCHUBBUB_DET1_CTRL__DET1_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_DET1_CTRL__DET1_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_DET1_CTRL__DET1_SIZE_CURRENT_MASK 0x00001F00L
+//DCHUBBUB_DET2_CTRL
+#define DCHUBBUB_DET2_CTRL__DET2_SIZE__SHIFT 0x0
+#define DCHUBBUB_DET2_CTRL__DET2_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_DET2_CTRL__DET2_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_DET2_CTRL__DET2_SIZE_CURRENT_MASK 0x00001F00L
+//DCHUBBUB_DET3_CTRL
+#define DCHUBBUB_DET3_CTRL__DET3_SIZE__SHIFT 0x0
+#define DCHUBBUB_DET3_CTRL__DET3_SIZE_CURRENT__SHIFT 0x8
+#define DCHUBBUB_DET3_CTRL__DET3_SIZE_MASK 0x0000001FL
+#define DCHUBBUB_DET3_CTRL__DET3_SIZE_CURRENT_MASK 0x00001F00L
+//DCHUBBUB_MEM_PWR_MODE_CTRL
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_ACCESS_MEM_PWR_MODE__SHIFT 0x0
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_ACTIVE_MEM_PWR_MODE__SHIFT 0x2
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_IDLE_MEM_PWR_MODE__SHIFT 0x4
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__METAFIFO_MEM_PWR_FORCE__SHIFT 0x6
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DCC_SKID_MEM_PWR_FORCE__SHIFT 0x8
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__UNALLOCATED_MEM_PWR_MODE__SHIFT 0xa
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x10
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_IDLE_MEM_PWR_MODE__SHIFT 0x12
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x14
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__SEGMENT_MEM_PWR_DIS__SHIFT 0x18
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__METAFIFO_MEM_PWR_DIS__SHIFT 0x19
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DCC_SKID_MEM_PWR_DIS__SHIFT 0x1a
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_ACCESS_MEM_PWR_MODE_MASK 0x00000003L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_ACTIVE_MEM_PWR_MODE_MASK 0x0000000CL
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__COMPBUF_IDLE_MEM_PWR_MODE_MASK 0x00000030L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__METAFIFO_MEM_PWR_FORCE_MASK 0x000000C0L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DCC_SKID_MEM_PWR_FORCE_MASK 0x00000300L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__UNALLOCATED_MEM_PWR_MODE_MASK 0x00000C00L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_MEM_PWR_FORCE_MASK 0x00030000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_IDLE_MEM_PWR_MODE_MASK 0x000C0000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00300000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__SEGMENT_MEM_PWR_DIS_MASK 0x01000000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__METAFIFO_MEM_PWR_DIS_MASK 0x02000000L
+#define DCHUBBUB_MEM_PWR_MODE_CTRL__DCC_SKID_MEM_PWR_DIS_MASK 0x04000000L
+//COMPBUF_MEM_PWR_CTRL_1
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_ACTIVE_WAKE_LATENCY__SHIFT 0x0
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_ACTIVE_SLEEP_LATENCY__SHIFT 0x8
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_IDLE_WAKE_LATENCY__SHIFT 0x10
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_IDLE_SLEEP_LATENCY__SHIFT 0x18
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_ACTIVE_WAKE_LATENCY_MASK 0x000000FFL
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_ACTIVE_SLEEP_LATENCY_MASK 0x0000FF00L
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_IDLE_WAKE_LATENCY_MASK 0x00FF0000L
+#define COMPBUF_MEM_PWR_CTRL_1__COMPBUF_IDLE_SLEEP_LATENCY_MASK 0xFF000000L
+//COMPBUF_MEM_PWR_CTRL_2
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_UNALLOCATED_WAKE_LATENCY__SHIFT 0x0
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_ACTIVE_ENTER_LATENCY__SHIFT 0x8
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_IDLE_ENTER_LATENCY__SHIFT 0xc
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_UNALLOCATED_WAKE_LATENCY_MASK 0x000000FFL
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_ACTIVE_ENTER_LATENCY_MASK 0x00000F00L
+#define COMPBUF_MEM_PWR_CTRL_2__COMPBUF_IDLE_ENTER_LATENCY_MASK 0x0000F000L
+//DCHUBBUB_MEM_PWR_STATUS
+#define DCHUBBUB_MEM_PWR_STATUS__COMPBUF_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_MEM_PWR_STATUS__METAFIFO_MEM_PWR_STATE__SHIFT 0x2
+#define DCHUBBUB_MEM_PWR_STATUS__UNALLOCATED_MEM_PWR_STATE__SHIFT 0x4
+#define DCHUBBUB_MEM_PWR_STATUS__DCC_SKID_MEM_PWR_STATE__SHIFT 0x6
+#define DCHUBBUB_MEM_PWR_STATUS__DET0_MEM_PWR_STATE__SHIFT 0x8
+#define DCHUBBUB_MEM_PWR_STATUS__DET1_MEM_PWR_STATE__SHIFT 0xa
+#define DCHUBBUB_MEM_PWR_STATUS__DET2_MEM_PWR_STATE__SHIFT 0xc
+#define DCHUBBUB_MEM_PWR_STATUS__DET3_MEM_PWR_STATE__SHIFT 0xe
+#define DCHUBBUB_MEM_PWR_STATUS__COMPBUF_MEM_PWR_STATE_MASK 0x00000003L
+#define DCHUBBUB_MEM_PWR_STATUS__METAFIFO_MEM_PWR_STATE_MASK 0x0000000CL
+#define DCHUBBUB_MEM_PWR_STATUS__UNALLOCATED_MEM_PWR_STATE_MASK 0x00000030L
+#define DCHUBBUB_MEM_PWR_STATUS__DCC_SKID_MEM_PWR_STATE_MASK 0x000000C0L
+#define DCHUBBUB_MEM_PWR_STATUS__DET0_MEM_PWR_STATE_MASK 0x00000300L
+#define DCHUBBUB_MEM_PWR_STATUS__DET1_MEM_PWR_STATE_MASK 0x00000C00L
+#define DCHUBBUB_MEM_PWR_STATUS__DET2_MEM_PWR_STATE_MASK 0x00003000L
+#define DCHUBBUB_MEM_PWR_STATUS__DET3_MEM_PWR_STATE_MASK 0x0000C000L
+//COMPBUF_RESERVED_SPACE
+#define COMPBUF_RESERVED_SPACE__COMPBUF_RESERVED_SPACE_64B__SHIFT 0x0
+#define COMPBUF_RESERVED_SPACE__COMPBUF_RESERVED_SPACE_ZS__SHIFT 0x10
+#define COMPBUF_RESERVED_SPACE__COMPBUF_RESERVED_SPACE_64B_MASK 0x00000FFFL
+#define COMPBUF_RESERVED_SPACE__COMPBUF_RESERVED_SPACE_ZS_MASK 0x0FFF0000L
+//DCHUBBUB_DEBUG_CTRL_0
+#define DCHUBBUB_DEBUG_CTRL_0__METAFIFO_DEPTH__SHIFT 0x0
+#define DCHUBBUB_DEBUG_CTRL_0__COMPBUF_SEG_DEPTH__SHIFT 0x8
+#define DCHUBBUB_DEBUG_CTRL_0__DET_SEG_DEPTH__SHIFT 0xc
+#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10
+#define DCHUBBUB_DEBUG_CTRL_0__DELAY_COMPBUF_DEALLOC_ON_DRQ_STOP_DISABLE__SHIFT 0x1b
+#define DCHUBBUB_DEBUG_CTRL_0__SEG_ALLOC_ERR_PIPE_BLANK_ENABLE__SHIFT 0x1c
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_RESET_OPTIMIZATION_DISABLE__SHIFT 0x1d
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_STALL_FOR_ALLOC_ENABLE__SHIFT 0x1e
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_STALL_FOR_DEALLOC_ENABLE__SHIFT 0x1f
+#define DCHUBBUB_DEBUG_CTRL_0__METAFIFO_DEPTH_MASK 0x000000FFL
+#define DCHUBBUB_DEBUG_CTRL_0__COMPBUF_SEG_DEPTH_MASK 0x00000F00L
+#define DCHUBBUB_DEBUG_CTRL_0__DET_SEG_DEPTH_MASK 0x0000F000L
+#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x07FF0000L
+#define DCHUBBUB_DEBUG_CTRL_0__DELAY_COMPBUF_DEALLOC_ON_DRQ_STOP_DISABLE_MASK 0x08000000L
+#define DCHUBBUB_DEBUG_CTRL_0__SEG_ALLOC_ERR_PIPE_BLANK_ENABLE_MASK 0x10000000L
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_RESET_OPTIMIZATION_DISABLE_MASK 0x20000000L
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_STALL_FOR_ALLOC_ENABLE_MASK 0x40000000L
+#define DCHUBBUB_DEBUG_CTRL_0__DATAFIFO_STALL_FOR_DEALLOC_ENABLE_MASK 0x80000000L
+//DCHUBBUB_CRC1_VAL_R
+#define DCHUBBUB_CRC1_VAL_R__DCHUBBUB_CRC1_R_CR__SHIFT 0x0
+#define DCHUBBUB_CRC1_VAL_R__DCHUBBUB_CRC1_R_CR_MASK 0xFFFFFFFFL
+//DCHUBBUB_CRC1_VAL_G
+#define DCHUBBUB_CRC1_VAL_G__DCHUBBUB_CRC1_G_Y__SHIFT 0x0
+#define DCHUBBUB_CRC1_VAL_G__DCHUBBUB_CRC1_G_Y_MASK 0xFFFFFFFFL
+//DCHUBBUB_CRC1_VAL_B
+#define DCHUBBUB_CRC1_VAL_B__DCHUBBUB_CRC1_B_CB__SHIFT 0x0
+#define DCHUBBUB_CRC1_VAL_B__DCHUBBUB_CRC1_B_CB_MASK 0xFFFFFFFFL
+//DCHUBBUB_CRC1_VAL_A
+#define DCHUBBUB_CRC1_VAL_A__DCHUBBUB_CRC1_ALPHA__SHIFT 0x0
+#define DCHUBBUB_CRC1_VAL_A__DCHUBBUB_CRC1_ALPHA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dchubbubl_hubbub_vmrq_if_dispdec
+//DCN_VM_CONTEXT0_CNTL
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT0_CNTL__VM_CONTEXT0_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT1_CNTL
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT1_CNTL__VM_CONTEXT1_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT1_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT1_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT1_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT2_CNTL
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT2_CNTL__VM_CONTEXT2_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT2_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT2_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT2_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT3_CNTL
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT3_CNTL__VM_CONTEXT3_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT3_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT3_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT3_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT4_CNTL
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT4_CNTL__VM_CONTEXT4_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT4_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT4_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT4_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT5_CNTL
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT5_CNTL__VM_CONTEXT5_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT5_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT5_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT5_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT6_CNTL
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT6_CNTL__VM_CONTEXT6_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT6_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT6_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT6_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT7_CNTL
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT7_CNTL__VM_CONTEXT7_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT7_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT7_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT7_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT8_CNTL
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT8_CNTL__VM_CONTEXT8_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT8_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT8_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT8_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT9_CNTL
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT9_CNTL__VM_CONTEXT9_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT9_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT9_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT9_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT10_CNTL
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT10_CNTL__VM_CONTEXT10_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT10_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT10_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT10_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT11_CNTL
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT11_CNTL__VM_CONTEXT11_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT11_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT11_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT11_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT12_CNTL
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT12_CNTL__VM_CONTEXT12_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT12_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT12_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT12_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT13_CNTL
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT13_CNTL__VM_CONTEXT13_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT13_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT13_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT13_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT14_CNTL
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT14_CNTL__VM_CONTEXT14_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT14_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT14_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT14_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT15_CNTL
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_DEPTH__SHIFT 0x1
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define DCN_VM_CONTEXT15_CNTL__VM_CONTEXT15_PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+//DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__VM_CONTEXT15_PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__VM_CONTEXT15_START_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define DCN_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__VM_CONTEXT15_END_LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//DCN_VM_DEFAULT_ADDR_MSB
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_ADDR_MSB__SHIFT 0x0
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SPA__SHIFT 0x1c
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SNOOP__SHIFT 0x1d
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_ADDR_MSB_MASK 0x0000000FL
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SPA_MASK 0x10000000L
+#define DCN_VM_DEFAULT_ADDR_MSB__DCN_VM_DEFAULT_SNOOP_MASK 0x20000000L
+//DCN_VM_DEFAULT_ADDR_LSB
+#define DCN_VM_DEFAULT_ADDR_LSB__DCN_VM_DEFAULT_ADDR_LSB__SHIFT 0x0
+#define DCN_VM_DEFAULT_ADDR_LSB__DCN_VM_DEFAULT_ADDR_LSB_MASK 0xFFFFFFFFL
+//DCN_VM_FAULT_CNTL
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_CLEAR__SHIFT 0x0
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_MODE__SHIFT 0x1
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_INTERRUPT_ENABLE__SHIFT 0x2
+#define DCN_VM_FAULT_CNTL__DCN_VM_RANGE_FAULT_DISABLE__SHIFT 0x8
+#define DCN_VM_FAULT_CNTL__DCN_VM_PRQ_FAULT_DISABLE__SHIFT 0x9
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_CLEAR_MASK 0x00000001L
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_STATUS_MODE_MASK 0x00000002L
+#define DCN_VM_FAULT_CNTL__DCN_VM_ERROR_INTERRUPT_ENABLE_MASK 0x00000004L
+#define DCN_VM_FAULT_CNTL__DCN_VM_RANGE_FAULT_DISABLE_MASK 0x00000100L
+#define DCN_VM_FAULT_CNTL__DCN_VM_PRQ_FAULT_DISABLE_MASK 0x00000200L
+//DCN_VM_FAULT_STATUS
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_STATUS__SHIFT 0x0
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_VMID__SHIFT 0x10
+#define DCN_VM_FAULT_STATUS__DCN_VM_TR_RESP_ERROR_VMID__SHIFT 0x14
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_TABLE_LEVEL__SHIFT 0x18
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_PIPE__SHIFT 0x1a
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_INTERRUPT_STATUS__SHIFT 0x1f
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_STATUS_MASK 0x0000FFFFL
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_VMID_MASK 0x000F0000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_TR_RESP_ERROR_VMID_MASK 0x00F00000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_TABLE_LEVEL_MASK 0x03000000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_PIPE_MASK 0x3C000000L
+#define DCN_VM_FAULT_STATUS__DCN_VM_ERROR_INTERRUPT_STATUS_MASK 0x80000000L
+//DCN_VM_FAULT_ADDR_MSB
+#define DCN_VM_FAULT_ADDR_MSB__DCN_VM_FAULT_ADDR_MSB__SHIFT 0x0
+#define DCN_VM_FAULT_ADDR_MSB__DCN_VM_FAULT_ADDR_MSB_MASK 0x0000000FL
+//DCN_VM_FAULT_ADDR_LSB
+#define DCN_VM_FAULT_ADDR_LSB__DCN_VM_FAULT_ADDR_LSB__SHIFT 0x0
+#define DCN_VM_FAULT_ADDR_LSB__DCN_VM_FAULT_ADDR_LSB_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dchubbubl_dchubbub_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON6_PERFCOUNTER_CNTL
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON6_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON6_PERFCOUNTER_CNTL2
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON6_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON6_PERFCOUNTER_STATE
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON6_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON6_PERFMON_CNTL
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON6_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON6_PERFMON_CNTL2
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON6_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON6_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON6_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON6_PERFMON_CVALUE_LOW
+#define DC_PERFMON6_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON6_PERFMON_HI
+#define DC_PERFMON6_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON6_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON6_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON6_PERFMON_LOW
+#define DC_PERFMON6_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON6_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec
+//HUBP0_DCSURF_SURFACE_CONFIG
+#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP0_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN__SHIFT 0xb
+#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+#define HUBP0_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN_MASK 0x00000800L
+//HUBP0_DCSURF_ADDR_CONFIG
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PKRS__SHIFT 0x10
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PKRS_MASK 0x00070000L
+//HUBP0_DCSURF_TILING_CONFIG
+#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP0_DCSURF_PRI_VIEWPORT_START
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_START
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP0_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+//HUBP0_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+//HUBP0_DCHUBP_CNTL
+#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP0_DCHUBP_CNTL__HUBP_SOFT_RESET__SHIFT 0x2
+#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP0_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE__SHIFT 0xa
+#define HUBP0_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS__SHIFT 0xb
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP0_DCHUBP_CNTL__HUBP_SOFT_RESET_MASK 0x00000004L
+#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE_MASK 0x00000400L
+#define HUBP0_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS_MASK 0x00000800L
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP0_HUBP_CLK_CNTL
+#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP0_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS__SHIFT 0x18
+#define HUBP0_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS_MASK 0x01000000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+//HUBP0_DCHUBP_VMPG_CONFIG
+#define HUBP0_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP0_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE__SHIFT 0x1
+#define HUBP0_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE__SHIFT 0x2
+#define HUBP0_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME__SHIFT 0x7
+#define HUBP0_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+#define HUBP0_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE_MASK 0x00000002L
+#define HUBP0_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE_MASK 0x0000007CL
+#define HUBP0_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME_MASK 0x00000080L
+//HUBP0_DCHUBP_MALL_CONFIG
+#define HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_SEL__SHIFT 0x0
+#define HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR__SHIFT 0x2
+#define HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_SEL_MASK 0x00000003L
+#define HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR_MASK 0x00000004L
+//HUBP0_DCHUBP_MALL_SUB_VP
+#define HUBP0_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE__SHIFT 0x0
+#define HUBP0_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0__SHIFT 0x1
+#define HUBP0_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1__SHIFT 0xf
+#define HUBP0_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE_MASK 0x00000001L
+#define HUBP0_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0_MASK 0x00007FFEL
+#define HUBP0_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1_MASK 0x1FFF8000L
+//HUBP0_HUBPREQ_DEBUG_DB
+#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP0_HUBPREQ_DEBUG
+#define HUBP0_HUBPREQ_DEBUG__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP0_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS__SHIFT 0x1f
+#define HUBP0_HUBPREQ_DEBUG__HUBPREQ_DEBUG_MASK 0x7FFFFFFFL
+#define HUBP0_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS_MASK 0x80000000L
+//HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+//HUBP0_HUBP_MALL_STATUS
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN__SHIFT 0x0
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE__SHIFT 0x1
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQUEST__SHIFT 0x2
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_RESPONSE__SHIFT 0x3
+#define HUBP0_HUBP_MALL_STATUS__MALL_IN_USE__SHIFT 0x4
+#define HUBP0_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define HUBP0_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE__SHIFT 0x6
+#define HUBP0_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE__SHIFT 0x7
+#define HUBP0_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE__SHIFT 0x8
+#define HUBP0_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH__SHIFT 0x9
+#define HUBP0_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME__SHIFT 0xa
+#define HUBP0_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME__SHIFT 0xb
+#define HUBP0_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL__SHIFT 0xc
+#define HUBP0_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL__SHIFT 0xd
+#define HUBP0_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL__SHIFT 0xe
+#define HUBP0_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME__SHIFT 0xf
+#define HUBP0_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS__SHIFT 0x10
+#define HUBP0_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING__SHIFT 0x11
+#define HUBP0_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING__SHIFT 0x12
+#define HUBP0_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO__SHIFT 0x13
+#define HUBP0_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP__SHIFT 0x14
+#define HUBP0_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP__SHIFT 0x15
+#define HUBP0_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING__SHIFT 0x16
+#define HUBP0_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP__SHIFT 0x17
+#define HUBP0_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING__SHIFT 0x18
+#define HUBP0_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING__SHIFT 0x19
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN_MASK 0x00000001L
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE_MASK 0x00000002L
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_REQUEST_MASK 0x00000004L
+#define HUBP0_HUBP_MALL_STATUS__MALL_USE_RESPONSE_MASK 0x00000008L
+#define HUBP0_HUBP_MALL_STATUS__MALL_IN_USE_MASK 0x00000010L
+#define HUBP0_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+#define HUBP0_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE_MASK 0x00000040L
+#define HUBP0_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE_MASK 0x00000080L
+#define HUBP0_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE_MASK 0x00000100L
+#define HUBP0_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH_MASK 0x00000200L
+#define HUBP0_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME_MASK 0x00000400L
+#define HUBP0_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME_MASK 0x00000800L
+#define HUBP0_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL_MASK 0x00001000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_MASK 0x00002000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL_MASK 0x00004000L
+#define HUBP0_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME_MASK 0x00008000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS_MASK 0x00010000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING_MASK 0x00020000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING_MASK 0x00040000L
+#define HUBP0_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO_MASK 0x00080000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP_MASK 0x00100000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP_MASK 0x00200000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING_MASK 0x00400000L
+#define HUBP0_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP_MASK 0x00800000L
+#define HUBP0_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING_MASK 0x01000000L
+#define HUBP0_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING_MASK 0x02000000L
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec
+//HUBPREQ0_DCSURF_SURFACE_PITCH
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ0_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ0_VMID_SETTINGS_0
+#define HUBPREQ0_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ0_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_CONTROL
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK__SHIFT 0x2
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C__SHIFT 0x5
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK__SHIFT 0xa
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C__SHIFT 0xd
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_MASK 0x0000000CL
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C_MASK 0x00000060L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_MASK 0x00000C00L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C_MASK 0x00006000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ0_DCSURF_FLIP_CONTROL
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ0_DCSURF_FLIP_CONTROL2
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ0_DCSURF_SURFACE_INUSE
+#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ0_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ0_DCN_EXPANSION_MODE
+#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ0_DCN_TTU_QOS_WM
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ0_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT__SHIFT 0x18
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT__SHIFT 0x19
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE__SHIFT 0x1b
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT_MASK 0x01000000L
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT_MASK 0x02000000L
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE_MASK 0x08000000L
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ0_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_DMDATA_VM_CNTL
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA__SHIFT 0x0
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS__SHIFT 0x10
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR__SHIFT 0x14
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS__SHIFT 0x18
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS__SHIFT 0x19
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR__SHIFT 0x1a
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE__SHIFT 0x1f
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA_MASK 0x0000FFFFL
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_MASK 0x000F0000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR_MASK 0x00100000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_MASK 0x01000000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS_MASK 0x02000000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR_MASK 0x04000000L
+#define HUBPREQ0_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE_MASK 0x80000000L
+//HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+//HUBPREQ0_BLANK_OFFSET_0
+#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ0_BLANK_OFFSET_1
+#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ0_DST_DIMENSIONS
+#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ0_DST_AFTER_SCALER
+#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ0_PREFETCH_SETTINGS
+#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ0_PREFETCH_SETTINGS_C
+#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_0
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ0_VBLANK_PARAMETERS_1
+#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_2
+#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_3
+#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_4
+#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_0
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ0_FLIP_PARAMETERS_1
+#define HUBPREQ0_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_2
+#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_0
+#define HUBPREQ0_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_1
+#define HUBPREQ0_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_2
+#define HUBPREQ0_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_3
+#define HUBPREQ0_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_4
+#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_5
+#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_6
+#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_7
+#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ0_PER_LINE_DELIVERY_PRE
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ0_PER_LINE_DELIVERY
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ0_CURSOR_SETTINGS
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ0_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ0_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ0_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+//HUBPREQ0_VBLANK_PARAMETERS_5
+#define HUBPREQ0_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_6
+#define HUBPREQ0_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_3
+#define HUBPREQ0_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_4
+#define HUBPREQ0_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_5
+#define HUBPREQ0_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_6
+#define HUBPREQ0_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ0_UCLK_PSTATE_FORCE
+#define HUBPREQ0_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN__SHIFT 0x0
+#define HUBPREQ0_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x1
+#define HUBPREQ0_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN__SHIFT 0x2
+#define HUBPREQ0_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x3
+#define HUBPREQ0_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN_MASK 0x00000001L
+#define HUBPREQ0_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000002L
+#define HUBPREQ0_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN_MASK 0x00000004L
+#define HUBPREQ0_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000008L
+//HUBPREQ0_HUBPREQ_STATUS_REG0
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1__SHIFT 0x8
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT__SHIFT 0x10
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0_MASK 0x0000001FL
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1_MASK 0x00001F00L
+#define HUBPREQ0_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT_MASK 0x7FFF0000L
+//HUBPREQ0_HUBPREQ_STATUS_REG1
+#define HUBPREQ0_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1__SHIFT 0x10
+#define HUBPREQ0_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0_MASK 0x00003FFFL
+#define HUBPREQ0_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1_MASK 0x3FFF0000L
+//HUBPREQ0_HUBPREQ_STATUS_REG2
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0__SHIFT 0x1
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0__SHIFT 0x3
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0__SHIFT 0x5
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1__SHIFT 0x8
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1__SHIFT 0x9
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1__SHIFT 0xa
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1__SHIFT 0xb
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1__SHIFT 0xc
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1__SHIFT 0xd
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR__SHIFT 0x10
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR__SHIFT 0x11
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR__SHIFT 0x12
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR__SHIFT 0x13
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR__SHIFT 0x14
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR__SHIFT 0x15
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_VBLANK__SHIFT 0x1a
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN__SHIFT 0x1b
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY__SHIFT 0x1c
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH__SHIFT 0x1d
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0__SHIFT 0x1e
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1__SHIFT 0x1f
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0_MASK 0x00000001L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0_MASK 0x00000002L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0_MASK 0x00000004L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0_MASK 0x00000008L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0_MASK 0x00000010L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0_MASK 0x00000020L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1_MASK 0x00000100L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1_MASK 0x00000200L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1_MASK 0x00000400L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1_MASK 0x00000800L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1_MASK 0x00001000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1_MASK 0x00002000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR_MASK 0x00010000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR_MASK 0x00020000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR_MASK 0x00040000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR_MASK 0x00080000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR_MASK 0x00100000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR_MASK 0x00200000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_VBLANK_MASK 0x04000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN_MASK 0x08000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY_MASK 0x10000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH_MASK 0x20000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0_MASK 0x40000000L
+#define HUBPREQ0_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec
+//HUBPRET0_HUBPRET_CONTROL
+#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x4
+#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xf
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00007FF0L
+#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00008000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET0_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET0_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET0_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET0_HUBPRET_READ_LINE0
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE1
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_INTERRUPT
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET0_HUBPRET_READ_LINE_VALUE
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE_STATUS
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_cursor0_dispdec
+//CURSOR0_0_CURSOR_CONTROL
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_REQ_MODE__SHIFT 0x2
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_REQ_MODE_MASK 0x00000004L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_0_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_0_CURSOR_SIZE
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_0_CURSOR_POSITION
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_0_CURSOR_HOT_SPOT
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_0_CURSOR_STEREO_CONTROL
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_0_CURSOR_DST_OFFSET
+#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_0_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_0_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_0_DMDATA_ADDRESS_HIGH
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_0_DMDATA_ADDRESS_LOW
+#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_0_DMDATA_CNTL
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_0_DMDATA_QOS_CNTL
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_0_DMDATA_STATUS
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_0_DMDATA_SW_CNTL
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_0_DMDATA_SW_DATA
+#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON7_PERFCOUNTER_CNTL
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON7_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON7_PERFCOUNTER_CNTL2
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON7_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON7_PERFCOUNTER_STATE
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON7_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON7_PERFMON_CNTL
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON7_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON7_PERFMON_CNTL2
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON7_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON7_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON7_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON7_PERFMON_CVALUE_LOW
+#define DC_PERFMON7_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON7_PERFMON_HI
+#define DC_PERFMON7_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON7_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON7_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON7_PERFMON_LOW
+#define DC_PERFMON7_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON7_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec
+//HUBP1_DCSURF_SURFACE_CONFIG
+#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP1_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN__SHIFT 0xb
+#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+#define HUBP1_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN_MASK 0x00000800L
+//HUBP1_DCSURF_ADDR_CONFIG
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PKRS__SHIFT 0x10
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PKRS_MASK 0x00070000L
+//HUBP1_DCSURF_TILING_CONFIG
+#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP1_DCSURF_PRI_VIEWPORT_START
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_START
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP1_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+//HUBP1_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+//HUBP1_DCHUBP_CNTL
+#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP1_DCHUBP_CNTL__HUBP_SOFT_RESET__SHIFT 0x2
+#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP1_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE__SHIFT 0xa
+#define HUBP1_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS__SHIFT 0xb
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP1_DCHUBP_CNTL__HUBP_SOFT_RESET_MASK 0x00000004L
+#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE_MASK 0x00000400L
+#define HUBP1_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS_MASK 0x00000800L
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP1_HUBP_CLK_CNTL
+#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP1_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS__SHIFT 0x18
+#define HUBP1_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS_MASK 0x01000000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+//HUBP1_DCHUBP_VMPG_CONFIG
+#define HUBP1_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP1_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE__SHIFT 0x1
+#define HUBP1_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE__SHIFT 0x2
+#define HUBP1_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME__SHIFT 0x7
+#define HUBP1_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+#define HUBP1_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE_MASK 0x00000002L
+#define HUBP1_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE_MASK 0x0000007CL
+#define HUBP1_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME_MASK 0x00000080L
+//HUBP1_DCHUBP_MALL_CONFIG
+#define HUBP1_DCHUBP_MALL_CONFIG__USE_MALL_SEL__SHIFT 0x0
+#define HUBP1_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR__SHIFT 0x2
+#define HUBP1_DCHUBP_MALL_CONFIG__USE_MALL_SEL_MASK 0x00000003L
+#define HUBP1_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR_MASK 0x00000004L
+//HUBP1_DCHUBP_MALL_SUB_VP
+#define HUBP1_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE__SHIFT 0x0
+#define HUBP1_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0__SHIFT 0x1
+#define HUBP1_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1__SHIFT 0xf
+#define HUBP1_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE_MASK 0x00000001L
+#define HUBP1_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0_MASK 0x00007FFEL
+#define HUBP1_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1_MASK 0x1FFF8000L
+//HUBP1_HUBPREQ_DEBUG_DB
+#define HUBP1_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP1_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP1_HUBPREQ_DEBUG
+#define HUBP1_HUBPREQ_DEBUG__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP1_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS__SHIFT 0x1f
+#define HUBP1_HUBPREQ_DEBUG__HUBPREQ_DEBUG_MASK 0x7FFFFFFFL
+#define HUBP1_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS_MASK 0x80000000L
+//HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+//HUBP1_HUBP_MALL_STATUS
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN__SHIFT 0x0
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE__SHIFT 0x1
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQUEST__SHIFT 0x2
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_RESPONSE__SHIFT 0x3
+#define HUBP1_HUBP_MALL_STATUS__MALL_IN_USE__SHIFT 0x4
+#define HUBP1_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define HUBP1_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE__SHIFT 0x6
+#define HUBP1_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE__SHIFT 0x7
+#define HUBP1_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE__SHIFT 0x8
+#define HUBP1_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH__SHIFT 0x9
+#define HUBP1_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME__SHIFT 0xa
+#define HUBP1_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME__SHIFT 0xb
+#define HUBP1_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL__SHIFT 0xc
+#define HUBP1_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL__SHIFT 0xd
+#define HUBP1_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL__SHIFT 0xe
+#define HUBP1_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME__SHIFT 0xf
+#define HUBP1_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS__SHIFT 0x10
+#define HUBP1_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING__SHIFT 0x11
+#define HUBP1_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING__SHIFT 0x12
+#define HUBP1_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO__SHIFT 0x13
+#define HUBP1_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP__SHIFT 0x14
+#define HUBP1_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP__SHIFT 0x15
+#define HUBP1_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING__SHIFT 0x16
+#define HUBP1_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP__SHIFT 0x17
+#define HUBP1_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING__SHIFT 0x18
+#define HUBP1_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING__SHIFT 0x19
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN_MASK 0x00000001L
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE_MASK 0x00000002L
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_REQUEST_MASK 0x00000004L
+#define HUBP1_HUBP_MALL_STATUS__MALL_USE_RESPONSE_MASK 0x00000008L
+#define HUBP1_HUBP_MALL_STATUS__MALL_IN_USE_MASK 0x00000010L
+#define HUBP1_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+#define HUBP1_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE_MASK 0x00000040L
+#define HUBP1_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE_MASK 0x00000080L
+#define HUBP1_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE_MASK 0x00000100L
+#define HUBP1_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH_MASK 0x00000200L
+#define HUBP1_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME_MASK 0x00000400L
+#define HUBP1_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME_MASK 0x00000800L
+#define HUBP1_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL_MASK 0x00001000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_MASK 0x00002000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL_MASK 0x00004000L
+#define HUBP1_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME_MASK 0x00008000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS_MASK 0x00010000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING_MASK 0x00020000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING_MASK 0x00040000L
+#define HUBP1_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO_MASK 0x00080000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP_MASK 0x00100000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP_MASK 0x00200000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING_MASK 0x00400000L
+#define HUBP1_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP_MASK 0x00800000L
+#define HUBP1_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING_MASK 0x01000000L
+#define HUBP1_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING_MASK 0x02000000L
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec
+//HUBPREQ1_DCSURF_SURFACE_PITCH
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ1_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ1_VMID_SETTINGS_0
+#define HUBPREQ1_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ1_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_CONTROL
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK__SHIFT 0x2
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C__SHIFT 0x5
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK__SHIFT 0xa
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C__SHIFT 0xd
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_MASK 0x0000000CL
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C_MASK 0x00000060L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_MASK 0x00000C00L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C_MASK 0x00006000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ1_DCSURF_FLIP_CONTROL
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ1_DCSURF_FLIP_CONTROL2
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ1_DCSURF_SURFACE_INUSE
+#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ1_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ1_DCN_EXPANSION_MODE
+#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ1_DCN_TTU_QOS_WM
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ1_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT__SHIFT 0x18
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT__SHIFT 0x19
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE__SHIFT 0x1b
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT_MASK 0x01000000L
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT_MASK 0x02000000L
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE_MASK 0x08000000L
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ1_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_DMDATA_VM_CNTL
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA__SHIFT 0x0
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS__SHIFT 0x10
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR__SHIFT 0x14
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS__SHIFT 0x18
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS__SHIFT 0x19
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR__SHIFT 0x1a
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE__SHIFT 0x1f
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA_MASK 0x0000FFFFL
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_MASK 0x000F0000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR_MASK 0x00100000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_MASK 0x01000000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS_MASK 0x02000000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR_MASK 0x04000000L
+#define HUBPREQ1_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE_MASK 0x80000000L
+//HUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ1_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+//HUBPREQ1_BLANK_OFFSET_0
+#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ1_BLANK_OFFSET_1
+#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ1_DST_DIMENSIONS
+#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ1_DST_AFTER_SCALER
+#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ1_PREFETCH_SETTINGS
+#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ1_PREFETCH_SETTINGS_C
+#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_0
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ1_VBLANK_PARAMETERS_1
+#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_2
+#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_3
+#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_4
+#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_0
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ1_FLIP_PARAMETERS_1
+#define HUBPREQ1_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_2
+#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_0
+#define HUBPREQ1_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_1
+#define HUBPREQ1_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_2
+#define HUBPREQ1_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_3
+#define HUBPREQ1_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_4
+#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_5
+#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_6
+#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_7
+#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ1_PER_LINE_DELIVERY_PRE
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ1_PER_LINE_DELIVERY
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ1_CURSOR_SETTINGS
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ1_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ1_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ1_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+//HUBPREQ1_VBLANK_PARAMETERS_5
+#define HUBPREQ1_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_6
+#define HUBPREQ1_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_3
+#define HUBPREQ1_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_4
+#define HUBPREQ1_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_5
+#define HUBPREQ1_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_6
+#define HUBPREQ1_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ1_UCLK_PSTATE_FORCE
+#define HUBPREQ1_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN__SHIFT 0x0
+#define HUBPREQ1_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x1
+#define HUBPREQ1_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN__SHIFT 0x2
+#define HUBPREQ1_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x3
+#define HUBPREQ1_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN_MASK 0x00000001L
+#define HUBPREQ1_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000002L
+#define HUBPREQ1_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN_MASK 0x00000004L
+#define HUBPREQ1_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000008L
+//HUBPREQ1_HUBPREQ_STATUS_REG0
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1__SHIFT 0x8
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT__SHIFT 0x10
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0_MASK 0x0000001FL
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1_MASK 0x00001F00L
+#define HUBPREQ1_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT_MASK 0x7FFF0000L
+//HUBPREQ1_HUBPREQ_STATUS_REG1
+#define HUBPREQ1_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1__SHIFT 0x10
+#define HUBPREQ1_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0_MASK 0x00003FFFL
+#define HUBPREQ1_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1_MASK 0x3FFF0000L
+//HUBPREQ1_HUBPREQ_STATUS_REG2
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0__SHIFT 0x1
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0__SHIFT 0x3
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0__SHIFT 0x5
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1__SHIFT 0x8
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1__SHIFT 0x9
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1__SHIFT 0xa
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1__SHIFT 0xb
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1__SHIFT 0xc
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1__SHIFT 0xd
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR__SHIFT 0x10
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR__SHIFT 0x11
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR__SHIFT 0x12
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR__SHIFT 0x13
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR__SHIFT 0x14
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR__SHIFT 0x15
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_VBLANK__SHIFT 0x1a
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN__SHIFT 0x1b
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY__SHIFT 0x1c
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH__SHIFT 0x1d
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0__SHIFT 0x1e
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1__SHIFT 0x1f
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0_MASK 0x00000001L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0_MASK 0x00000002L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0_MASK 0x00000004L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0_MASK 0x00000008L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0_MASK 0x00000010L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0_MASK 0x00000020L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1_MASK 0x00000100L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1_MASK 0x00000200L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1_MASK 0x00000400L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1_MASK 0x00000800L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1_MASK 0x00001000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1_MASK 0x00002000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR_MASK 0x00010000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR_MASK 0x00020000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR_MASK 0x00040000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR_MASK 0x00080000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR_MASK 0x00100000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR_MASK 0x00200000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_VBLANK_MASK 0x04000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN_MASK 0x08000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY_MASK 0x10000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH_MASK 0x20000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0_MASK 0x40000000L
+#define HUBPREQ1_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec
+//HUBPRET1_HUBPRET_CONTROL
+#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x4
+#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xf
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00007FF0L
+#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00008000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET1_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET1_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET1_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET1_HUBPRET_READ_LINE0
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE1
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_INTERRUPT
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET1_HUBPRET_READ_LINE_VALUE
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE_STATUS
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_cursor0_dispdec
+//CURSOR0_1_CURSOR_CONTROL
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_REQ_MODE__SHIFT 0x2
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_REQ_MODE_MASK 0x00000004L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_1_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_1_CURSOR_SIZE
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_1_CURSOR_POSITION
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_1_CURSOR_HOT_SPOT
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_1_CURSOR_STEREO_CONTROL
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_1_CURSOR_DST_OFFSET
+#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_1_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_1_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_1_DMDATA_ADDRESS_HIGH
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_1_DMDATA_ADDRESS_LOW
+#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_1_DMDATA_CNTL
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_1_DMDATA_QOS_CNTL
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_1_DMDATA_STATUS
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_1_DMDATA_SW_CNTL
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_1_DMDATA_SW_DATA
+#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON8_PERFCOUNTER_CNTL
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON8_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON8_PERFCOUNTER_CNTL2
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON8_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON8_PERFCOUNTER_STATE
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON8_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON8_PERFMON_CNTL
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON8_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON8_PERFMON_CNTL2
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON8_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON8_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON8_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON8_PERFMON_CVALUE_LOW
+#define DC_PERFMON8_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON8_PERFMON_HI
+#define DC_PERFMON8_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON8_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON8_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON8_PERFMON_LOW
+#define DC_PERFMON8_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON8_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec
+//HUBP2_DCSURF_SURFACE_CONFIG
+#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP2_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN__SHIFT 0xb
+#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+#define HUBP2_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN_MASK 0x00000800L
+//HUBP2_DCSURF_ADDR_CONFIG
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PKRS__SHIFT 0x10
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PKRS_MASK 0x00070000L
+//HUBP2_DCSURF_TILING_CONFIG
+#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP2_DCSURF_PRI_VIEWPORT_START
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_START
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP2_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+//HUBP2_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+//HUBP2_DCHUBP_CNTL
+#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP2_DCHUBP_CNTL__HUBP_SOFT_RESET__SHIFT 0x2
+#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP2_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE__SHIFT 0xa
+#define HUBP2_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS__SHIFT 0xb
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP2_DCHUBP_CNTL__HUBP_SOFT_RESET_MASK 0x00000004L
+#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE_MASK 0x00000400L
+#define HUBP2_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS_MASK 0x00000800L
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP2_HUBP_CLK_CNTL
+#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP2_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS__SHIFT 0x18
+#define HUBP2_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS_MASK 0x01000000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+//HUBP2_DCHUBP_VMPG_CONFIG
+#define HUBP2_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP2_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE__SHIFT 0x1
+#define HUBP2_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE__SHIFT 0x2
+#define HUBP2_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME__SHIFT 0x7
+#define HUBP2_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+#define HUBP2_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE_MASK 0x00000002L
+#define HUBP2_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE_MASK 0x0000007CL
+#define HUBP2_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME_MASK 0x00000080L
+//HUBP2_DCHUBP_MALL_CONFIG
+#define HUBP2_DCHUBP_MALL_CONFIG__USE_MALL_SEL__SHIFT 0x0
+#define HUBP2_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR__SHIFT 0x2
+#define HUBP2_DCHUBP_MALL_CONFIG__USE_MALL_SEL_MASK 0x00000003L
+#define HUBP2_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR_MASK 0x00000004L
+//HUBP2_DCHUBP_MALL_SUB_VP
+#define HUBP2_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE__SHIFT 0x0
+#define HUBP2_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0__SHIFT 0x1
+#define HUBP2_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1__SHIFT 0xf
+#define HUBP2_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE_MASK 0x00000001L
+#define HUBP2_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0_MASK 0x00007FFEL
+#define HUBP2_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1_MASK 0x1FFF8000L
+//HUBP2_HUBPREQ_DEBUG_DB
+#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP2_HUBPREQ_DEBUG
+#define HUBP2_HUBPREQ_DEBUG__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP2_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS__SHIFT 0x1f
+#define HUBP2_HUBPREQ_DEBUG__HUBPREQ_DEBUG_MASK 0x7FFFFFFFL
+#define HUBP2_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS_MASK 0x80000000L
+//HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+//HUBP2_HUBP_MALL_STATUS
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN__SHIFT 0x0
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE__SHIFT 0x1
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQUEST__SHIFT 0x2
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_RESPONSE__SHIFT 0x3
+#define HUBP2_HUBP_MALL_STATUS__MALL_IN_USE__SHIFT 0x4
+#define HUBP2_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define HUBP2_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE__SHIFT 0x6
+#define HUBP2_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE__SHIFT 0x7
+#define HUBP2_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE__SHIFT 0x8
+#define HUBP2_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH__SHIFT 0x9
+#define HUBP2_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME__SHIFT 0xa
+#define HUBP2_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME__SHIFT 0xb
+#define HUBP2_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL__SHIFT 0xc
+#define HUBP2_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL__SHIFT 0xd
+#define HUBP2_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL__SHIFT 0xe
+#define HUBP2_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME__SHIFT 0xf
+#define HUBP2_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS__SHIFT 0x10
+#define HUBP2_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING__SHIFT 0x11
+#define HUBP2_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING__SHIFT 0x12
+#define HUBP2_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO__SHIFT 0x13
+#define HUBP2_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP__SHIFT 0x14
+#define HUBP2_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP__SHIFT 0x15
+#define HUBP2_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING__SHIFT 0x16
+#define HUBP2_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP__SHIFT 0x17
+#define HUBP2_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING__SHIFT 0x18
+#define HUBP2_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING__SHIFT 0x19
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN_MASK 0x00000001L
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE_MASK 0x00000002L
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_REQUEST_MASK 0x00000004L
+#define HUBP2_HUBP_MALL_STATUS__MALL_USE_RESPONSE_MASK 0x00000008L
+#define HUBP2_HUBP_MALL_STATUS__MALL_IN_USE_MASK 0x00000010L
+#define HUBP2_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+#define HUBP2_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE_MASK 0x00000040L
+#define HUBP2_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE_MASK 0x00000080L
+#define HUBP2_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE_MASK 0x00000100L
+#define HUBP2_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH_MASK 0x00000200L
+#define HUBP2_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME_MASK 0x00000400L
+#define HUBP2_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME_MASK 0x00000800L
+#define HUBP2_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL_MASK 0x00001000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_MASK 0x00002000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL_MASK 0x00004000L
+#define HUBP2_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME_MASK 0x00008000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS_MASK 0x00010000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING_MASK 0x00020000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING_MASK 0x00040000L
+#define HUBP2_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO_MASK 0x00080000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP_MASK 0x00100000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP_MASK 0x00200000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING_MASK 0x00400000L
+#define HUBP2_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP_MASK 0x00800000L
+#define HUBP2_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING_MASK 0x01000000L
+#define HUBP2_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING_MASK 0x02000000L
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec
+//HUBPREQ2_DCSURF_SURFACE_PITCH
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ2_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ2_VMID_SETTINGS_0
+#define HUBPREQ2_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ2_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_CONTROL
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK__SHIFT 0x2
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C__SHIFT 0x5
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK__SHIFT 0xa
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C__SHIFT 0xd
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_MASK 0x0000000CL
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C_MASK 0x00000060L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_MASK 0x00000C00L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C_MASK 0x00006000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ2_DCSURF_FLIP_CONTROL
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ2_DCSURF_FLIP_CONTROL2
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ2_DCSURF_SURFACE_INUSE
+#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ2_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ2_DCN_EXPANSION_MODE
+#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ2_DCN_TTU_QOS_WM
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ2_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT__SHIFT 0x18
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT__SHIFT 0x19
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE__SHIFT 0x1b
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT_MASK 0x01000000L
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT_MASK 0x02000000L
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE_MASK 0x08000000L
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ2_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_DMDATA_VM_CNTL
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA__SHIFT 0x0
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS__SHIFT 0x10
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR__SHIFT 0x14
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS__SHIFT 0x18
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS__SHIFT 0x19
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR__SHIFT 0x1a
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE__SHIFT 0x1f
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA_MASK 0x0000FFFFL
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_MASK 0x000F0000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR_MASK 0x00100000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_MASK 0x01000000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS_MASK 0x02000000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR_MASK 0x04000000L
+#define HUBPREQ2_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE_MASK 0x80000000L
+//HUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ2_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+//HUBPREQ2_BLANK_OFFSET_0
+#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ2_BLANK_OFFSET_1
+#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ2_DST_DIMENSIONS
+#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ2_DST_AFTER_SCALER
+#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ2_PREFETCH_SETTINGS
+#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ2_PREFETCH_SETTINGS_C
+#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_0
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ2_VBLANK_PARAMETERS_1
+#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_2
+#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_3
+#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_4
+#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_0
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ2_FLIP_PARAMETERS_1
+#define HUBPREQ2_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_2
+#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_0
+#define HUBPREQ2_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_1
+#define HUBPREQ2_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_2
+#define HUBPREQ2_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_3
+#define HUBPREQ2_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_4
+#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_5
+#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_6
+#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_7
+#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ2_PER_LINE_DELIVERY_PRE
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ2_PER_LINE_DELIVERY
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ2_CURSOR_SETTINGS
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ2_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ2_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ2_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+//HUBPREQ2_VBLANK_PARAMETERS_5
+#define HUBPREQ2_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_6
+#define HUBPREQ2_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_3
+#define HUBPREQ2_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_4
+#define HUBPREQ2_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_5
+#define HUBPREQ2_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_6
+#define HUBPREQ2_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ2_UCLK_PSTATE_FORCE
+#define HUBPREQ2_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN__SHIFT 0x0
+#define HUBPREQ2_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x1
+#define HUBPREQ2_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN__SHIFT 0x2
+#define HUBPREQ2_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x3
+#define HUBPREQ2_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN_MASK 0x00000001L
+#define HUBPREQ2_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000002L
+#define HUBPREQ2_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN_MASK 0x00000004L
+#define HUBPREQ2_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000008L
+//HUBPREQ2_HUBPREQ_STATUS_REG0
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1__SHIFT 0x8
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT__SHIFT 0x10
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0_MASK 0x0000001FL
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1_MASK 0x00001F00L
+#define HUBPREQ2_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT_MASK 0x7FFF0000L
+//HUBPREQ2_HUBPREQ_STATUS_REG1
+#define HUBPREQ2_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1__SHIFT 0x10
+#define HUBPREQ2_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0_MASK 0x00003FFFL
+#define HUBPREQ2_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1_MASK 0x3FFF0000L
+//HUBPREQ2_HUBPREQ_STATUS_REG2
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0__SHIFT 0x1
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0__SHIFT 0x3
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0__SHIFT 0x5
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1__SHIFT 0x8
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1__SHIFT 0x9
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1__SHIFT 0xa
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1__SHIFT 0xb
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1__SHIFT 0xc
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1__SHIFT 0xd
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR__SHIFT 0x10
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR__SHIFT 0x11
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR__SHIFT 0x12
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR__SHIFT 0x13
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR__SHIFT 0x14
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR__SHIFT 0x15
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_VBLANK__SHIFT 0x1a
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN__SHIFT 0x1b
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY__SHIFT 0x1c
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH__SHIFT 0x1d
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0__SHIFT 0x1e
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1__SHIFT 0x1f
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0_MASK 0x00000001L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0_MASK 0x00000002L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0_MASK 0x00000004L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0_MASK 0x00000008L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0_MASK 0x00000010L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0_MASK 0x00000020L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1_MASK 0x00000100L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1_MASK 0x00000200L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1_MASK 0x00000400L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1_MASK 0x00000800L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1_MASK 0x00001000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1_MASK 0x00002000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR_MASK 0x00010000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR_MASK 0x00020000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR_MASK 0x00040000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR_MASK 0x00080000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR_MASK 0x00100000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR_MASK 0x00200000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_VBLANK_MASK 0x04000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN_MASK 0x08000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY_MASK 0x10000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH_MASK 0x20000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0_MASK 0x40000000L
+#define HUBPREQ2_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec
+//HUBPRET2_HUBPRET_CONTROL
+#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x4
+#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xf
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00007FF0L
+#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00008000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET2_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET2_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET2_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET2_HUBPRET_READ_LINE0
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE1
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_INTERRUPT
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET2_HUBPRET_READ_LINE_VALUE
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE_STATUS
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_cursor0_dispdec
+//CURSOR0_2_CURSOR_CONTROL
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_REQ_MODE__SHIFT 0x2
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_REQ_MODE_MASK 0x00000004L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_2_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_2_CURSOR_SIZE
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_2_CURSOR_POSITION
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_2_CURSOR_HOT_SPOT
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_2_CURSOR_STEREO_CONTROL
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_2_CURSOR_DST_OFFSET
+#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_2_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_2_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_2_DMDATA_ADDRESS_HIGH
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_2_DMDATA_ADDRESS_LOW
+#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_2_DMDATA_CNTL
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_2_DMDATA_QOS_CNTL
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_2_DMDATA_STATUS
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_2_DMDATA_SW_CNTL
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_2_DMDATA_SW_DATA
+#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON9_PERFCOUNTER_CNTL
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON9_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON9_PERFCOUNTER_CNTL2
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON9_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON9_PERFCOUNTER_STATE
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON9_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON9_PERFMON_CNTL
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON9_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON9_PERFMON_CNTL2
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON9_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON9_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON9_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON9_PERFMON_CVALUE_LOW
+#define DC_PERFMON9_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON9_PERFMON_HI
+#define DC_PERFMON9_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON9_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON9_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON9_PERFMON_LOW
+#define DC_PERFMON9_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON9_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec
+//HUBP3_DCSURF_SURFACE_CONFIG
+#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP3_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN__SHIFT 0xb
+#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+#define HUBP3_DCSURF_SURFACE_CONFIG__ALPHA_PLANE_EN_MASK 0x00000800L
+//HUBP3_DCSURF_ADDR_CONFIG
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PKRS__SHIFT 0x10
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PKRS_MASK 0x00070000L
+//HUBP3_DCSURF_TILING_CONFIG
+#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP3_DCSURF_PRI_VIEWPORT_START
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_START
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP3_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE__SHIFT 0x18
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__VM_GROUP_SIZE_MASK 0x07000000L
+//HUBP3_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+//HUBP3_DCHUBP_CNTL
+#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP3_DCHUBP_CNTL__HUBP_SOFT_RESET__SHIFT 0x2
+#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP3_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE__SHIFT 0xa
+#define HUBP3_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS__SHIFT 0xb
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP3_DCHUBP_CNTL__HUBP_SOFT_RESET_MASK 0x00000004L
+#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNBOUNDED_REQ_MODE_MASK 0x00000400L
+#define HUBP3_DCHUBP_CNTL__HUBP_SEG_ALLOC_ERR_STATUS_MASK 0x00000800L
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP3_HUBP_CLK_CNTL
+#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP3_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS__SHIFT 0x18
+#define HUBP3_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL__SHIFT 0x1c
+#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_FGCG_REP_DIS_MASK 0x01000000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_TEST_CLK_SEL_MASK 0xF0000000L
+//HUBP3_DCHUBP_VMPG_CONFIG
+#define HUBP3_DCHUBP_VMPG_CONFIG__VMPG_SIZE__SHIFT 0x0
+#define HUBP3_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE__SHIFT 0x1
+#define HUBP3_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE__SHIFT 0x2
+#define HUBP3_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME__SHIFT 0x7
+#define HUBP3_DCHUBP_VMPG_CONFIG__VMPG_SIZE_MASK 0x00000001L
+#define HUBP3_DCHUBP_VMPG_CONFIG__PTE_BUFFER_MODE_MASK 0x00000002L
+#define HUBP3_DCHUBP_VMPG_CONFIG__BIGK_FRAGMENT_SIZE_MASK 0x0000007CL
+#define HUBP3_DCHUBP_VMPG_CONFIG__FORCE_ONE_ROW_FOR_FRAME_MASK 0x00000080L
+//HUBP3_DCHUBP_MALL_CONFIG
+#define HUBP3_DCHUBP_MALL_CONFIG__USE_MALL_SEL__SHIFT 0x0
+#define HUBP3_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR__SHIFT 0x2
+#define HUBP3_DCHUBP_MALL_CONFIG__USE_MALL_SEL_MASK 0x00000003L
+#define HUBP3_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR_MASK 0x00000004L
+//HUBP3_DCHUBP_MALL_SUB_VP
+#define HUBP3_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE__SHIFT 0x0
+#define HUBP3_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0__SHIFT 0x1
+#define HUBP3_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1__SHIFT 0xf
+#define HUBP3_DCHUBP_MALL_SUB_VP__USE_MALL_AT_START_LINE_MASK 0x00000001L
+#define HUBP3_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S0_MASK 0x00007FFEL
+#define HUBP3_DCHUBP_MALL_SUB_VP__SUB_VP_START_LINE_S1_MASK 0x1FFF8000L
+//HUBP3_HUBPREQ_DEBUG_DB
+#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP3_HUBPREQ_DEBUG
+#define HUBP3_HUBPREQ_DEBUG__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP3_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS__SHIFT 0x1f
+#define HUBP3_HUBPREQ_DEBUG__HUBPREQ_DEBUG_MASK 0x7FFFFFFFL
+#define HUBP3_HUBPREQ_DEBUG__HUBPREQ_DEBUG_FLIP_REQ_DURING_MALL_STATUS_MASK 0x80000000L
+//HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+//HUBP3_HUBP_MALL_STATUS
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN__SHIFT 0x0
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE__SHIFT 0x1
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQUEST__SHIFT 0x2
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_RESPONSE__SHIFT 0x3
+#define HUBP3_HUBP_MALL_STATUS__MALL_IN_USE__SHIFT 0x4
+#define HUBP3_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE__SHIFT 0x5
+#define HUBP3_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE__SHIFT 0x6
+#define HUBP3_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE__SHIFT 0x7
+#define HUBP3_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE__SHIFT 0x8
+#define HUBP3_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH__SHIFT 0x9
+#define HUBP3_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME__SHIFT 0xa
+#define HUBP3_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME__SHIFT 0xb
+#define HUBP3_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL__SHIFT 0xc
+#define HUBP3_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL__SHIFT 0xd
+#define HUBP3_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL__SHIFT 0xe
+#define HUBP3_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME__SHIFT 0xf
+#define HUBP3_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS__SHIFT 0x10
+#define HUBP3_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING__SHIFT 0x11
+#define HUBP3_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING__SHIFT 0x12
+#define HUBP3_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO__SHIFT 0x13
+#define HUBP3_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP__SHIFT 0x14
+#define HUBP3_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP__SHIFT 0x15
+#define HUBP3_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING__SHIFT 0x16
+#define HUBP3_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP__SHIFT 0x17
+#define HUBP3_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING__SHIFT 0x18
+#define HUBP3_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING__SHIFT 0x19
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_STATIC_SCREEN_MASK 0x00000001L
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQ_FOR_PSTATE_CHANGE_MASK 0x00000002L
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_REQUEST_MASK 0x00000004L
+#define HUBP3_HUBP_MALL_STATUS__MALL_USE_RESPONSE_MASK 0x00000008L
+#define HUBP3_HUBP_MALL_STATUS__MALL_IN_USE_MASK 0x00000010L
+#define HUBP3_HUBP_MALL_STATUS__MALL_PREFETCH_COMPLETE_MASK 0x00000020L
+#define HUBP3_HUBP_MALL_STATUS__SUB_VP_MALL_RETRIEVE_MASK 0x00000040L
+#define HUBP3_HUBP_MALL_STATUS__MCB_MALL_USE_RESPONSE_MASK 0x00000080L
+#define HUBP3_HUBP_MALL_STATUS__CURSOR_LOCAL_RETRIEVE_MASK 0x00000100L
+#define HUBP3_HUBP_MALL_STATUS__CURSOR_LOCAL_PREFETCH_MASK 0x00000200L
+#define HUBP3_HUBP_MALL_STATUS__MALL_RETRIEVE_FRAME_MASK 0x00000400L
+#define HUBP3_HUBP_MALL_STATUS__MALL_PREFETCH_FRAME_MASK 0x00000800L
+#define HUBP3_HUBP_MALL_STATUS__CRQ_BUSY_WITH_MALL_MASK 0x00001000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_MASK 0x00002000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_BUSY_WITH_MALL_MASK 0x00004000L
+#define HUBP3_HUBP_MALL_STATUS__USE_ONE_ROW_FOR_FRAME_MASK 0x00008000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_RETRIEVE_IN_PROGRESS_MASK 0x00010000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_SUB_VP_MALL_OUTSTANDING_MASK 0x00020000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_MALL_OUTSTANDING_MASK 0x00040000L
+#define HUBP3_HUBP_MALL_STATUS__DRQ_MALL_CNT_ZERO_MASK 0x00080000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_S1_MALL_RETRIEVE_SUB_VP_MASK 0x00100000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_S0_MALL_RETRIEVE_SUB_VP_MASK 0x00200000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_MALL_OUTSTANDING_MASK 0x00400000L
+#define HUBP3_HUBP_MALL_STATUS__MRQ_BUSY_WITH_MALL_SUB_VP_MASK 0x00800000L
+#define HUBP3_HUBP_MALL_STATUS__CRQ_MALL_OUTSTANDING_MASK 0x01000000L
+#define HUBP3_HUBP_MALL_STATUS__CRQ_LOCAL_OUTSTANDING_MASK 0x02000000L
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec
+//HUBPREQ3_DCSURF_SURFACE_PITCH
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ3_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ3_VMID_SETTINGS_0
+#define HUBPREQ3_VMID_SETTINGS_0__VMID__SHIFT 0x0
+#define HUBPREQ3_VMID_SETTINGS_0__VMID_MASK 0x0000000FL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_CONTROL
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK__SHIFT 0x2
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C__SHIFT 0x5
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK__SHIFT 0xa
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C__SHIFT 0xd
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_MASK 0x0000000CL
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_BLK_C_MASK 0x00000060L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_MASK 0x00000C00L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_BLK_C_MASK 0x00006000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ3_DCSURF_FLIP_CONTROL
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ3_DCSURF_FLIP_CONTROL2
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ3_DCSURF_SURFACE_INUSE
+#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ3_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID__SHIFT 0x1c
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_VMID_MASK 0xF0000000L
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C__SHIFT 0x1c
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_VMID_C_MASK 0xF0000000L
+//HUBPREQ3_DCN_EXPANSION_MODE
+#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ3_DCN_TTU_QOS_WM
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ3_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT__SHIFT 0x18
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT__SHIFT 0x19
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE__SHIFT 0x1b
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__PIPE_IN_FLUSH_URGENT_MASK 0x01000000L
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__PRQ_MRQ_FLUSH_URGENT_MASK 0x02000000L
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__ROW_TTU_MODE_MASK 0x08000000L
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ3_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_DMDATA_VM_CNTL
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA__SHIFT 0x0
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS__SHIFT 0x10
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR__SHIFT 0x14
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS__SHIFT 0x18
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS__SHIFT 0x19
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR__SHIFT 0x1a
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE__SHIFT 0x1f
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__REFCYC_PER_VM_DMDATA_MASK 0x0000FFFFL
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_MASK 0x000F0000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_FAULT_STATUS_CLEAR_MASK 0x00100000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_MASK 0x01000000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_LATE_STATUS_MASK 0x02000000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_UNDERFLOW_STATUS_CLEAR_MASK 0x04000000L
+#define HUBPREQ3_DCN_DMDATA_VM_CNTL__DMDATA_VM_DONE_MASK 0x80000000L
+//HUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR__SHIFT 0x0
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR__MC_VM_SYSTEM_APERTURE_LOW_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR__SHIFT 0x0
+#define HUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR__MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MASK 0x3FFFFFFFL
+//HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define HUBPREQ3_DCN_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+//HUBPREQ3_BLANK_OFFSET_0
+#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ3_BLANK_OFFSET_1
+#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ3_DST_DIMENSIONS
+#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ3_DST_AFTER_SCALER
+#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ3_PREFETCH_SETTINGS
+#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ3_PREFETCH_SETTINGS_C
+#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_0
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000007FL
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ3_VBLANK_PARAMETERS_1
+#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_2
+#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_3
+#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_4
+#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_0
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000007FL
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ3_FLIP_PARAMETERS_1
+#define HUBPREQ3_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_1__REFCYC_PER_PTE_GROUP_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_2
+#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_0
+#define HUBPREQ3_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_0__DST_Y_PER_PTE_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_1
+#define HUBPREQ3_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_1__REFCYC_PER_PTE_GROUP_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_2
+#define HUBPREQ3_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_2__DST_Y_PER_PTE_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_3
+#define HUBPREQ3_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_3__REFCYC_PER_PTE_GROUP_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_4
+#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_5
+#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_6
+#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_7
+#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ3_PER_LINE_DELIVERY_PRE
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ3_PER_LINE_DELIVERY
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ3_CURSOR_SETTINGS
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ3_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ3_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ3_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+//HUBPREQ3_VBLANK_PARAMETERS_5
+#define HUBPREQ3_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_5__REFCYC_PER_VM_GROUP_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_6
+#define HUBPREQ3_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_6__REFCYC_PER_VM_REQ_VBLANK_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_3
+#define HUBPREQ3_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_3__REFCYC_PER_VM_GROUP_FLIP_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_4
+#define HUBPREQ3_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_4__REFCYC_PER_VM_REQ_FLIP_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_5
+#define HUBPREQ3_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_5__REFCYC_PER_PTE_GROUP_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_6
+#define HUBPREQ3_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_6__REFCYC_PER_META_CHUNK_FLIP_C_MASK 0x007FFFFFL
+//HUBPREQ3_UCLK_PSTATE_FORCE
+#define HUBPREQ3_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN__SHIFT 0x0
+#define HUBPREQ3_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x1
+#define HUBPREQ3_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN__SHIFT 0x2
+#define HUBPREQ3_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE__SHIFT 0x3
+#define HUBPREQ3_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_EN_MASK 0x00000001L
+#define HUBPREQ3_UCLK_PSTATE_FORCE__DATA_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000002L
+#define HUBPREQ3_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_EN_MASK 0x00000004L
+#define HUBPREQ3_UCLK_PSTATE_FORCE__CURSOR_UCLK_PSTATE_FORCE_VALUE_MASK 0x00000008L
+//HUBPREQ3_HUBPREQ_STATUS_REG0
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1__SHIFT 0x8
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT__SHIFT 0x10
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S0_MASK 0x0000001FL
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_MPTE_ROW_READY_S1_MASK 0x00001F00L
+#define HUBPREQ3_HUBPREQ_STATUS_REG0__STATUS_VTG_COUNT_MASK 0x7FFF0000L
+//HUBPREQ3_HUBPREQ_STATUS_REG1
+#define HUBPREQ3_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1__SHIFT 0x10
+#define HUBPREQ3_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S0_MASK 0x00003FFFL
+#define HUBPREQ3_HUBPREQ_STATUS_REG1__STATUS_CHUNK_REQ_X_OR_Y_S1_MASK 0x3FFF0000L
+//HUBPREQ3_HUBPREQ_STATUS_REG2
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0__SHIFT 0x1
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0__SHIFT 0x3
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0__SHIFT 0x5
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1__SHIFT 0x8
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1__SHIFT 0x9
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1__SHIFT 0xa
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1__SHIFT 0xb
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1__SHIFT 0xc
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1__SHIFT 0xd
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR__SHIFT 0x10
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR__SHIFT 0x11
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR__SHIFT 0x12
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR__SHIFT 0x13
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR__SHIFT 0x14
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR__SHIFT 0x15
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_VBLANK__SHIFT 0x1a
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN__SHIFT 0x1b
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY__SHIFT 0x1c
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH__SHIFT 0x1d
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0__SHIFT 0x1e
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1__SHIFT 0x1f
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S0_MASK 0x00000001L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S0_MASK 0x00000002L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S0_MASK 0x00000004L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S0_MASK 0x00000008L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S0_MASK 0x00000010L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S0_MASK 0x00000020L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_S1_MASK 0x00000100L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_S1_MASK 0x00000200L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_S1_MASK 0x00000400L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_S1_MASK 0x00000800L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_S1_MASK 0x00001000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_S1_MASK 0x00002000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_EXIT_SELF_REFRESH_CUR_MASK 0x00010000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ENTER_SELF_REFRESH_CUR_MASK 0x00020000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_UCLK_PSTATE_CHANGE_CUR_MASK 0x00040000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_FCLK_PSTATE_CHANGE_CUR_MASK 0x00080000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_ALLOW_USR_RETRAINING_CUR_MASK 0x00100000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_QOS_URGENT_CUR_MASK 0x00200000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_VBLANK_MASK 0x04000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_HUBP_EN_MASK 0x08000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_RECOVERY_MASK 0x10000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_PIPE_IN_FLUSH_MASK 0x20000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S0_MASK 0x40000000L
+#define HUBPREQ3_HUBPREQ_STATUS_REG2__STATUS_FLIP_ACTIVE_S1_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec
+//HUBPRET3_HUBPRET_CONTROL
+#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x4
+#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xf
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00007FF0L
+#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00008000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET3_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET3_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET3_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET3_HUBPRET_READ_LINE0
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE1
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_INTERRUPT
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET3_HUBPRET_READ_LINE_VALUE
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE_STATUS
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_cursor0_dispdec
+//CURSOR0_3_CURSOR_CONTROL
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_REQ_MODE__SHIFT 0x2
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_REQ_MODE_MASK 0x00000004L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_3_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_3_CURSOR_SIZE
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_3_CURSOR_POSITION
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_3_CURSOR_HOT_SPOT
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_3_CURSOR_STEREO_CONTROL
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_3_CURSOR_DST_OFFSET
+#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_3_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_3_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_3_DMDATA_ADDRESS_HIGH
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_3_DMDATA_ADDRESS_LOW
+#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_3_DMDATA_CNTL
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_3_DMDATA_QOS_CNTL
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_3_DMDATA_STATUS
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_3_DMDATA_SW_CNTL
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_3_DMDATA_SW_DATA
+#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON10_PERFCOUNTER_CNTL
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON10_PERFCOUNTER_CNTL2
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON10_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON10_PERFCOUNTER_STATE
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON10_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON10_PERFMON_CNTL
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON10_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON10_PERFMON_CNTL2
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON10_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON10_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON10_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON10_PERFMON_CVALUE_LOW
+#define DC_PERFMON10_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON10_PERFMON_HI
+#define DC_PERFMON10_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON10_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON10_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON10_PERFMON_LOW
+#define DC_PERFMON10_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON10_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE__SHIFT 0x8
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE_MASK 0x00000100L
+//CNVC_CFG0_FORMAT_CONTROL
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_R__SHIFT 0x18
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_G__SHIFT 0x1a
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_B__SHIFT 0x1c
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_R_MASK 0x03000000L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_G_MASK 0x0C000000L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CROSSBAR_B_MASK 0x30000000L
+//CNVC_CFG0_FCNV_FP_BIAS_R
+#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_BIAS_G
+#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_BIAS_B
+#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_R
+#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_G
+#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_B
+#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG0_COLOR_KEYER_CONTROL
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG0_COLOR_KEYER_ALPHA
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_RED
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_GREEN
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_BLUE
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_ALPHA_2BIT_LUT
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+//CNVC_CFG0_PRE_DEALPHA
+#define CNVC_CFG0_PRE_DEALPHA__PRE_DEALPHA_EN__SHIFT 0x0
+#define CNVC_CFG0_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG0_PRE_DEALPHA__PRE_DEALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG0_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN_MASK 0x00000010L
+//CNVC_CFG0_PRE_CSC_MODE
+#define CNVC_CFG0_PRE_CSC_MODE__PRE_CSC_MODE__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT__SHIFT 0x2
+#define CNVC_CFG0_PRE_CSC_MODE__PRE_CSC_MODE_MASK 0x00000003L
+#define CNVC_CFG0_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CNVC_CFG0_PRE_CSC_C11_C12
+#define CNVC_CFG0_PRE_CSC_C11_C12__PRE_CSC_C11__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C11_C12__PRE_CSC_C12__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C11_C12__PRE_CSC_C11_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C11_C12__PRE_CSC_C12_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_C13_C14
+#define CNVC_CFG0_PRE_CSC_C13_C14__PRE_CSC_C13__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C13_C14__PRE_CSC_C14__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C13_C14__PRE_CSC_C13_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C13_C14__PRE_CSC_C14_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_C21_C22
+#define CNVC_CFG0_PRE_CSC_C21_C22__PRE_CSC_C21__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C21_C22__PRE_CSC_C22__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C21_C22__PRE_CSC_C21_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C21_C22__PRE_CSC_C22_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_C23_C24
+#define CNVC_CFG0_PRE_CSC_C23_C24__PRE_CSC_C23__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C23_C24__PRE_CSC_C24__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C23_C24__PRE_CSC_C23_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C23_C24__PRE_CSC_C24_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_C31_C32
+#define CNVC_CFG0_PRE_CSC_C31_C32__PRE_CSC_C31__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C31_C32__PRE_CSC_C32__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C31_C32__PRE_CSC_C31_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C31_C32__PRE_CSC_C32_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_C33_C34
+#define CNVC_CFG0_PRE_CSC_C33_C34__PRE_CSC_C33__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_C33_C34__PRE_CSC_C34__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_C33_C34__PRE_CSC_C33_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_C33_C34__PRE_CSC_C34_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_B_C11_C12
+#define CNVC_CFG0_PRE_CSC_B_C11_C12__PRE_CSC_B_C11__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C11_C12__PRE_CSC_B_C12__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C11_C12__PRE_CSC_B_C11_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C11_C12__PRE_CSC_B_C12_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_B_C13_C14
+#define CNVC_CFG0_PRE_CSC_B_C13_C14__PRE_CSC_B_C13__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C13_C14__PRE_CSC_B_C14__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C13_C14__PRE_CSC_B_C13_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C13_C14__PRE_CSC_B_C14_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_B_C21_C22
+#define CNVC_CFG0_PRE_CSC_B_C21_C22__PRE_CSC_B_C21__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C21_C22__PRE_CSC_B_C22__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C21_C22__PRE_CSC_B_C21_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C21_C22__PRE_CSC_B_C22_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_B_C23_C24
+#define CNVC_CFG0_PRE_CSC_B_C23_C24__PRE_CSC_B_C23__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C23_C24__PRE_CSC_B_C24__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C23_C24__PRE_CSC_B_C23_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C23_C24__PRE_CSC_B_C24_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_B_C31_C32
+#define CNVC_CFG0_PRE_CSC_B_C31_C32__PRE_CSC_B_C31__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C31_C32__PRE_CSC_B_C32__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C31_C32__PRE_CSC_B_C31_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C31_C32__PRE_CSC_B_C32_MASK 0xFFFF0000L
+//CNVC_CFG0_PRE_CSC_B_C33_C34
+#define CNVC_CFG0_PRE_CSC_B_C33_C34__PRE_CSC_B_C33__SHIFT 0x0
+#define CNVC_CFG0_PRE_CSC_B_C33_C34__PRE_CSC_B_C34__SHIFT 0x10
+#define CNVC_CFG0_PRE_CSC_B_C33_C34__PRE_CSC_B_C33_MASK 0x0000FFFFL
+#define CNVC_CFG0_PRE_CSC_B_C33_C34__PRE_CSC_B_C34_MASK 0xFFFF0000L
+//CNVC_CFG0_CNVC_COEF_FORMAT
+#define CNVC_CFG0_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT__SHIFT 0x0
+#define CNVC_CFG0_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT_MASK 0x00000001L
+//CNVC_CFG0_PRE_DEGAM
+#define CNVC_CFG0_PRE_DEGAM__PRE_DEGAM_MODE__SHIFT 0x0
+#define CNVC_CFG0_PRE_DEGAM__PRE_DEGAM_SELECT__SHIFT 0x4
+#define CNVC_CFG0_PRE_DEGAM__PRE_DEGAM_MODE_MASK 0x00000003L
+#define CNVC_CFG0_PRE_DEGAM__PRE_DEGAM_SELECT_MASK 0x00000070L
+//CNVC_CFG0_PRE_REALPHA
+#define CNVC_CFG0_PRE_REALPHA__PRE_REALPHA_EN__SHIFT 0x0
+#define CNVC_CFG0_PRE_REALPHA__PRE_REALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG0_PRE_REALPHA__PRE_REALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG0_PRE_REALPHA__PRE_REALPHA_ABLND_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec
+//CNVC_CUR0_CURSOR0_CONTROL
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR0_CURSOR0_COLOR0
+#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR0_CURSOR0_COLOR1
+#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR0_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec
+//DSCL0_SCL_COEF_RAM_TAP_SELECT
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00030000L
+//DSCL0_SCL_COEF_RAM_TAP_DATA
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL0_SCL_MODE
+#define DSCL0_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL0_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL0_SCL_TAP_CONTROL
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL0_DSCL_CONTROL
+#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL0_DSCL_2TAP_CONTROL
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL0_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL0_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL0_SCL_HORZ_FILTER_INIT
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL0_SCL_HORZ_FILTER_INIT_C
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL0_SCL_VERT_FILTER_INIT
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_INIT_BOT
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL0_SCL_VERT_FILTER_INIT_C
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL0_SCL_BLACK_COLOR
+#define DSCL0_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y__SHIFT 0x0
+#define DSCL0_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR__SHIFT 0x10
+#define DSCL0_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y_MASK 0x0000FFFFL
+#define DSCL0_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR_MASK 0xFFFF0000L
+//DSCL0_DSCL_UPDATE
+#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL0_DSCL_AUTOCAL
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL0_OTG_H_BLANK
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL0_OTG_V_BLANK
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL0_RECOUT_START
+#define DSCL0_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL0_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL0_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL0_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL0_RECOUT_SIZE
+#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL0_MPC_SIZE
+#define DSCL0_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL0_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL0_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL0_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL0_LB_DATA_FORMAT
+#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL0_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL0_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL0_LB_MEMORY_CTRL
+#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL0_LB_V_COUNTER
+#define DSCL0_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL0_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL0_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL0_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL0_DSCL_MEM_PWR_CTRL
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL0_DSCL_MEM_PWR_STATUS
+#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL0_OBUF_CONTROL
+#define DSCL0_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x1
+#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0x2
+#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x4
+#define DSCL0_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000002L
+#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00000004L
+#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0x000000F0L
+//DSCL0_OBUF_MEM_PWR_CTRL
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec
+//CM0_CM_CONTROL
+#define CM0_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM0_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM0_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM0_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM0_CM_POST_CSC_CONTROL
+#define CM0_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE__SHIFT 0x0
+#define CM0_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT__SHIFT 0x2
+#define CM0_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_MASK 0x00000003L
+#define CM0_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CM0_CM_POST_CSC_C11_C12
+#define CM0_CM_POST_CSC_C11_C12__CM_POST_CSC_C11__SHIFT 0x0
+#define CM0_CM_POST_CSC_C11_C12__CM_POST_CSC_C12__SHIFT 0x10
+#define CM0_CM_POST_CSC_C11_C12__CM_POST_CSC_C11_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C11_C12__CM_POST_CSC_C12_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_C13_C14
+#define CM0_CM_POST_CSC_C13_C14__CM_POST_CSC_C13__SHIFT 0x0
+#define CM0_CM_POST_CSC_C13_C14__CM_POST_CSC_C14__SHIFT 0x10
+#define CM0_CM_POST_CSC_C13_C14__CM_POST_CSC_C13_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C13_C14__CM_POST_CSC_C14_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_C21_C22
+#define CM0_CM_POST_CSC_C21_C22__CM_POST_CSC_C21__SHIFT 0x0
+#define CM0_CM_POST_CSC_C21_C22__CM_POST_CSC_C22__SHIFT 0x10
+#define CM0_CM_POST_CSC_C21_C22__CM_POST_CSC_C21_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C21_C22__CM_POST_CSC_C22_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_C23_C24
+#define CM0_CM_POST_CSC_C23_C24__CM_POST_CSC_C23__SHIFT 0x0
+#define CM0_CM_POST_CSC_C23_C24__CM_POST_CSC_C24__SHIFT 0x10
+#define CM0_CM_POST_CSC_C23_C24__CM_POST_CSC_C23_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C23_C24__CM_POST_CSC_C24_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_C31_C32
+#define CM0_CM_POST_CSC_C31_C32__CM_POST_CSC_C31__SHIFT 0x0
+#define CM0_CM_POST_CSC_C31_C32__CM_POST_CSC_C32__SHIFT 0x10
+#define CM0_CM_POST_CSC_C31_C32__CM_POST_CSC_C31_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C31_C32__CM_POST_CSC_C32_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_C33_C34
+#define CM0_CM_POST_CSC_C33_C34__CM_POST_CSC_C33__SHIFT 0x0
+#define CM0_CM_POST_CSC_C33_C34__CM_POST_CSC_C34__SHIFT 0x10
+#define CM0_CM_POST_CSC_C33_C34__CM_POST_CSC_C33_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_C33_C34__CM_POST_CSC_C34_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_B_C11_C12
+#define CM0_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_B_C13_C14
+#define CM0_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_B_C21_C22
+#define CM0_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_B_C23_C24
+#define CM0_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_B_C31_C32
+#define CM0_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32_MASK 0xFFFF0000L
+//CM0_CM_POST_CSC_B_C33_C34
+#define CM0_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33__SHIFT 0x0
+#define CM0_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34__SHIFT 0x10
+#define CM0_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33_MASK 0x0000FFFFL
+#define CM0_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_CONTROL
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x2
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT_MASK 0x0000000CL
+//CM0_CM_GAMUT_REMAP_C11_C12
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C13_C14
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C21_C22
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C23_C24
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C31_C32
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C33_C34
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C11_C12
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C13_C14
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C21_C22
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C23_C24
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C31_C32
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C33_C34
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM0_CM_BIAS_CR_R
+#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM0_CM_BIAS_Y_G_CB_B
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_CONTROL
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE__SHIFT 0x0
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT__SHIFT 0x2
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE__SHIFT 0x3
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT__SHIFT 0x4
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT__SHIFT 0x6
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_MASK 0x00000003L
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_MASK 0x00000004L
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE_MASK 0x00000008L
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT_MASK 0x00000030L
+#define CM0_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT_MASK 0x00000040L
+//CM0_CM_GAMCOR_LUT_INDEX
+#define CM0_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX__SHIFT 0x0
+#define CM0_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX_MASK 0x000001FFL
+//CM0_CM_GAMCOR_LUT_DATA
+#define CM0_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA__SHIFT 0x0
+#define CM0_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_LUT_CONTROL
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL__SHIFT 0x6
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE__SHIFT 0x7
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL_MASK 0x00000040L
+#define CM0_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE_MASK 0x00000080L
+//CM0_CM_GAMCOR_RAMA_START_CNTL_B
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_GAMCOR_RAMA_START_CNTL_G
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_GAMCOR_RAMA_START_CNTL_R
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_END_CNTL1_B
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_END_CNTL2_B
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_RAMA_END_CNTL1_G
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_END_CNTL2_G
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_RAMA_END_CNTL1_R
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMA_END_CNTL2_R
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_RAMA_OFFSET_B
+#define CM0_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//CM0_CM_GAMCOR_RAMA_OFFSET_G
+#define CM0_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//CM0_CM_GAMCOR_RAMA_OFFSET_R
+#define CM0_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//CM0_CM_GAMCOR_RAMA_REGION_0_1
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_2_3
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_4_5
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_6_7
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_8_9
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_10_11
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_12_13
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_14_15
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_16_17
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_18_19
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_20_21
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_22_23
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_24_25
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_26_27
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_28_29
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_30_31
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMA_REGION_32_33
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_START_CNTL_B
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_GAMCOR_RAMB_START_CNTL_G
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_GAMCOR_RAMB_START_CNTL_R
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_END_CNTL1_B
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_END_CNTL2_B
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_RAMB_END_CNTL1_G
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_END_CNTL2_G
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_RAMB_END_CNTL1_R
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM0_CM_GAMCOR_RAMB_END_CNTL2_R
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM0_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM0_CM_GAMCOR_RAMB_OFFSET_B
+#define CM0_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//CM0_CM_GAMCOR_RAMB_OFFSET_G
+#define CM0_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//CM0_CM_GAMCOR_RAMB_OFFSET_R
+#define CM0_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//CM0_CM_GAMCOR_RAMB_REGION_0_1
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_2_3
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_4_5
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_6_7
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_8_9
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_10_11
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_12_13
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_14_15
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_16_17
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_18_19
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_20_21
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_22_23
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_24_25
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_26_27
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_28_29
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_30_31
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_GAMCOR_RAMB_REGION_32_33
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_HDR_MULT_COEF
+#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM0_CM_MEM_PWR_CTRL
+#define CM0_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE__SHIFT 0x0
+#define CM0_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS__SHIFT 0x2
+#define CM0_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM0_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS_MASK 0x00000004L
+//CM0_CM_MEM_PWR_STATUS
+#define CM0_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE__SHIFT 0x0
+#define CM0_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE_MASK 0x00000003L
+//CM0_CM_DEALPHA
+#define CM0_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM0_CM_DEALPHA__CM_DEALPHA_ABLND__SHIFT 0x1
+#define CM0_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+#define CM0_CM_DEALPHA__CM_DEALPHA_ABLND_MASK 0x00000002L
+//CM0_CM_COEF_FORMAT
+#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT__SHIFT 0x4
+#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
+#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_DATA
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+//CM0_DPP_CRC_VAL_R
+#define CM0_DPP_CRC_VAL_R__DPP_CRC_R_CR__SHIFT 0x0
+#define CM0_DPP_CRC_VAL_R__DPP_CRC_R_CR_MASK 0xFFFFFFFFL
+//CM0_DPP_CRC_VAL_G
+#define CM0_DPP_CRC_VAL_G__DPP_CRC_G_Y__SHIFT 0x0
+#define CM0_DPP_CRC_VAL_G__DPP_CRC_G_Y_MASK 0xFFFFFFFFL
+//CM0_DPP_CRC_VAL_B
+#define CM0_DPP_CRC_VAL_B__DPP_CRC_B_CB__SHIFT 0x0
+#define CM0_DPP_CRC_VAL_B__DPP_CRC_B_CB_MASK 0xFFFFFFFFL
+//CM0_DPP_CRC_VAL_A
+#define CM0_DPP_CRC_VAL_A__DPP_CRC_ALPHA__SHIFT 0x0
+#define CM0_DPP_CRC_VAL_A__DPP_CRC_ALPHA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
+//DPP_TOP0_DPP_CONTROL
+#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP0_DPP_CONTROL__DPP_FGCG_REP_DIS__SHIFT 0x18
+#define DPP_TOP0_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP0_DPP_CONTROL__DPP_FGCG_REP_DIS_MASK 0x01000000L
+#define DPP_TOP0_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0x70000000L
+//DPP_TOP0_DPP_SOFT_RESET
+#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP0_DPP_CRC_CTRL
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x6
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x7
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0x9
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xb
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xe
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000040L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000180L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000600L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00003800L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x0000C000L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP0_HOST_READ_CONTROL
+#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON11_PERFCOUNTER_CNTL
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON11_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON11_PERFCOUNTER_CNTL2
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON11_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON11_PERFCOUNTER_STATE
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON11_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON11_PERFMON_CNTL
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON11_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON11_PERFMON_CNTL2
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON11_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON11_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON11_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON11_PERFMON_CVALUE_LOW
+#define DC_PERFMON11_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON11_PERFMON_HI
+#define DC_PERFMON11_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON11_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON11_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON11_PERFMON_LOW
+#define DC_PERFMON11_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON11_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE__SHIFT 0x8
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE_MASK 0x00000100L
+//CNVC_CFG1_FORMAT_CONTROL
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_R__SHIFT 0x18
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_G__SHIFT 0x1a
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_B__SHIFT 0x1c
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_R_MASK 0x03000000L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_G_MASK 0x0C000000L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CROSSBAR_B_MASK 0x30000000L
+//CNVC_CFG1_FCNV_FP_BIAS_R
+#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_BIAS_G
+#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_BIAS_B
+#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_R
+#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_G
+#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_B
+#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG1_COLOR_KEYER_CONTROL
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG1_COLOR_KEYER_ALPHA
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_RED
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_GREEN
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_BLUE
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_ALPHA_2BIT_LUT
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+//CNVC_CFG1_PRE_DEALPHA
+#define CNVC_CFG1_PRE_DEALPHA__PRE_DEALPHA_EN__SHIFT 0x0
+#define CNVC_CFG1_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG1_PRE_DEALPHA__PRE_DEALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG1_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN_MASK 0x00000010L
+//CNVC_CFG1_PRE_CSC_MODE
+#define CNVC_CFG1_PRE_CSC_MODE__PRE_CSC_MODE__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT__SHIFT 0x2
+#define CNVC_CFG1_PRE_CSC_MODE__PRE_CSC_MODE_MASK 0x00000003L
+#define CNVC_CFG1_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CNVC_CFG1_PRE_CSC_C11_C12
+#define CNVC_CFG1_PRE_CSC_C11_C12__PRE_CSC_C11__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C11_C12__PRE_CSC_C12__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C11_C12__PRE_CSC_C11_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C11_C12__PRE_CSC_C12_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_C13_C14
+#define CNVC_CFG1_PRE_CSC_C13_C14__PRE_CSC_C13__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C13_C14__PRE_CSC_C14__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C13_C14__PRE_CSC_C13_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C13_C14__PRE_CSC_C14_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_C21_C22
+#define CNVC_CFG1_PRE_CSC_C21_C22__PRE_CSC_C21__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C21_C22__PRE_CSC_C22__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C21_C22__PRE_CSC_C21_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C21_C22__PRE_CSC_C22_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_C23_C24
+#define CNVC_CFG1_PRE_CSC_C23_C24__PRE_CSC_C23__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C23_C24__PRE_CSC_C24__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C23_C24__PRE_CSC_C23_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C23_C24__PRE_CSC_C24_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_C31_C32
+#define CNVC_CFG1_PRE_CSC_C31_C32__PRE_CSC_C31__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C31_C32__PRE_CSC_C32__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C31_C32__PRE_CSC_C31_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C31_C32__PRE_CSC_C32_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_C33_C34
+#define CNVC_CFG1_PRE_CSC_C33_C34__PRE_CSC_C33__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_C33_C34__PRE_CSC_C34__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_C33_C34__PRE_CSC_C33_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_C33_C34__PRE_CSC_C34_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_B_C11_C12
+#define CNVC_CFG1_PRE_CSC_B_C11_C12__PRE_CSC_B_C11__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C11_C12__PRE_CSC_B_C12__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C11_C12__PRE_CSC_B_C11_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C11_C12__PRE_CSC_B_C12_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_B_C13_C14
+#define CNVC_CFG1_PRE_CSC_B_C13_C14__PRE_CSC_B_C13__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C13_C14__PRE_CSC_B_C14__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C13_C14__PRE_CSC_B_C13_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C13_C14__PRE_CSC_B_C14_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_B_C21_C22
+#define CNVC_CFG1_PRE_CSC_B_C21_C22__PRE_CSC_B_C21__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C21_C22__PRE_CSC_B_C22__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C21_C22__PRE_CSC_B_C21_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C21_C22__PRE_CSC_B_C22_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_B_C23_C24
+#define CNVC_CFG1_PRE_CSC_B_C23_C24__PRE_CSC_B_C23__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C23_C24__PRE_CSC_B_C24__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C23_C24__PRE_CSC_B_C23_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C23_C24__PRE_CSC_B_C24_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_B_C31_C32
+#define CNVC_CFG1_PRE_CSC_B_C31_C32__PRE_CSC_B_C31__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C31_C32__PRE_CSC_B_C32__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C31_C32__PRE_CSC_B_C31_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C31_C32__PRE_CSC_B_C32_MASK 0xFFFF0000L
+//CNVC_CFG1_PRE_CSC_B_C33_C34
+#define CNVC_CFG1_PRE_CSC_B_C33_C34__PRE_CSC_B_C33__SHIFT 0x0
+#define CNVC_CFG1_PRE_CSC_B_C33_C34__PRE_CSC_B_C34__SHIFT 0x10
+#define CNVC_CFG1_PRE_CSC_B_C33_C34__PRE_CSC_B_C33_MASK 0x0000FFFFL
+#define CNVC_CFG1_PRE_CSC_B_C33_C34__PRE_CSC_B_C34_MASK 0xFFFF0000L
+//CNVC_CFG1_CNVC_COEF_FORMAT
+#define CNVC_CFG1_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT__SHIFT 0x0
+#define CNVC_CFG1_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT_MASK 0x00000001L
+//CNVC_CFG1_PRE_DEGAM
+#define CNVC_CFG1_PRE_DEGAM__PRE_DEGAM_MODE__SHIFT 0x0
+#define CNVC_CFG1_PRE_DEGAM__PRE_DEGAM_SELECT__SHIFT 0x4
+#define CNVC_CFG1_PRE_DEGAM__PRE_DEGAM_MODE_MASK 0x00000003L
+#define CNVC_CFG1_PRE_DEGAM__PRE_DEGAM_SELECT_MASK 0x00000070L
+//CNVC_CFG1_PRE_REALPHA
+#define CNVC_CFG1_PRE_REALPHA__PRE_REALPHA_EN__SHIFT 0x0
+#define CNVC_CFG1_PRE_REALPHA__PRE_REALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG1_PRE_REALPHA__PRE_REALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG1_PRE_REALPHA__PRE_REALPHA_ABLND_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec
+//CNVC_CUR1_CURSOR0_CONTROL
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR1_CURSOR0_COLOR0
+#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR1_CURSOR0_COLOR1
+#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR1_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec
+//DSCL1_SCL_COEF_RAM_TAP_SELECT
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00030000L
+//DSCL1_SCL_COEF_RAM_TAP_DATA
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL1_SCL_MODE
+#define DSCL1_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL1_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL1_SCL_TAP_CONTROL
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL1_DSCL_CONTROL
+#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL1_DSCL_2TAP_CONTROL
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL1_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL1_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL1_SCL_HORZ_FILTER_INIT
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL1_SCL_HORZ_FILTER_INIT_C
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL1_SCL_VERT_FILTER_INIT
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_INIT_BOT
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL1_SCL_VERT_FILTER_INIT_C
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL1_SCL_BLACK_COLOR
+#define DSCL1_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y__SHIFT 0x0
+#define DSCL1_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR__SHIFT 0x10
+#define DSCL1_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y_MASK 0x0000FFFFL
+#define DSCL1_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR_MASK 0xFFFF0000L
+//DSCL1_DSCL_UPDATE
+#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL1_DSCL_AUTOCAL
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL1_OTG_H_BLANK
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL1_OTG_V_BLANK
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL1_RECOUT_START
+#define DSCL1_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL1_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL1_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL1_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL1_RECOUT_SIZE
+#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL1_MPC_SIZE
+#define DSCL1_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL1_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL1_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL1_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL1_LB_DATA_FORMAT
+#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL1_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL1_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL1_LB_MEMORY_CTRL
+#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL1_LB_V_COUNTER
+#define DSCL1_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL1_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL1_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL1_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL1_DSCL_MEM_PWR_CTRL
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL1_DSCL_MEM_PWR_STATUS
+#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL1_OBUF_CONTROL
+#define DSCL1_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x1
+#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0x2
+#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x4
+#define DSCL1_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000002L
+#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00000004L
+#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0x000000F0L
+//DSCL1_OBUF_MEM_PWR_CTRL
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec
+//CM1_CM_CONTROL
+#define CM1_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM1_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM1_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM1_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM1_CM_POST_CSC_CONTROL
+#define CM1_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE__SHIFT 0x0
+#define CM1_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT__SHIFT 0x2
+#define CM1_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_MASK 0x00000003L
+#define CM1_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CM1_CM_POST_CSC_C11_C12
+#define CM1_CM_POST_CSC_C11_C12__CM_POST_CSC_C11__SHIFT 0x0
+#define CM1_CM_POST_CSC_C11_C12__CM_POST_CSC_C12__SHIFT 0x10
+#define CM1_CM_POST_CSC_C11_C12__CM_POST_CSC_C11_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C11_C12__CM_POST_CSC_C12_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_C13_C14
+#define CM1_CM_POST_CSC_C13_C14__CM_POST_CSC_C13__SHIFT 0x0
+#define CM1_CM_POST_CSC_C13_C14__CM_POST_CSC_C14__SHIFT 0x10
+#define CM1_CM_POST_CSC_C13_C14__CM_POST_CSC_C13_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C13_C14__CM_POST_CSC_C14_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_C21_C22
+#define CM1_CM_POST_CSC_C21_C22__CM_POST_CSC_C21__SHIFT 0x0
+#define CM1_CM_POST_CSC_C21_C22__CM_POST_CSC_C22__SHIFT 0x10
+#define CM1_CM_POST_CSC_C21_C22__CM_POST_CSC_C21_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C21_C22__CM_POST_CSC_C22_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_C23_C24
+#define CM1_CM_POST_CSC_C23_C24__CM_POST_CSC_C23__SHIFT 0x0
+#define CM1_CM_POST_CSC_C23_C24__CM_POST_CSC_C24__SHIFT 0x10
+#define CM1_CM_POST_CSC_C23_C24__CM_POST_CSC_C23_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C23_C24__CM_POST_CSC_C24_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_C31_C32
+#define CM1_CM_POST_CSC_C31_C32__CM_POST_CSC_C31__SHIFT 0x0
+#define CM1_CM_POST_CSC_C31_C32__CM_POST_CSC_C32__SHIFT 0x10
+#define CM1_CM_POST_CSC_C31_C32__CM_POST_CSC_C31_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C31_C32__CM_POST_CSC_C32_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_C33_C34
+#define CM1_CM_POST_CSC_C33_C34__CM_POST_CSC_C33__SHIFT 0x0
+#define CM1_CM_POST_CSC_C33_C34__CM_POST_CSC_C34__SHIFT 0x10
+#define CM1_CM_POST_CSC_C33_C34__CM_POST_CSC_C33_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_C33_C34__CM_POST_CSC_C34_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_B_C11_C12
+#define CM1_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_B_C13_C14
+#define CM1_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_B_C21_C22
+#define CM1_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_B_C23_C24
+#define CM1_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_B_C31_C32
+#define CM1_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32_MASK 0xFFFF0000L
+//CM1_CM_POST_CSC_B_C33_C34
+#define CM1_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33__SHIFT 0x0
+#define CM1_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34__SHIFT 0x10
+#define CM1_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33_MASK 0x0000FFFFL
+#define CM1_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_CONTROL
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x2
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT_MASK 0x0000000CL
+//CM1_CM_GAMUT_REMAP_C11_C12
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C13_C14
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C21_C22
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C23_C24
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C31_C32
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C33_C34
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C11_C12
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C13_C14
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C21_C22
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C23_C24
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C31_C32
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C33_C34
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM1_CM_BIAS_CR_R
+#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM1_CM_BIAS_Y_G_CB_B
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_CONTROL
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE__SHIFT 0x0
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT__SHIFT 0x2
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE__SHIFT 0x3
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT__SHIFT 0x4
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT__SHIFT 0x6
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_MASK 0x00000003L
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_MASK 0x00000004L
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE_MASK 0x00000008L
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT_MASK 0x00000030L
+#define CM1_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT_MASK 0x00000040L
+//CM1_CM_GAMCOR_LUT_INDEX
+#define CM1_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX__SHIFT 0x0
+#define CM1_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX_MASK 0x000001FFL
+//CM1_CM_GAMCOR_LUT_DATA
+#define CM1_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA__SHIFT 0x0
+#define CM1_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_LUT_CONTROL
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL__SHIFT 0x6
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE__SHIFT 0x7
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL_MASK 0x00000040L
+#define CM1_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE_MASK 0x00000080L
+//CM1_CM_GAMCOR_RAMA_START_CNTL_B
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_GAMCOR_RAMA_START_CNTL_G
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_GAMCOR_RAMA_START_CNTL_R
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_END_CNTL1_B
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_END_CNTL2_B
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_RAMA_END_CNTL1_G
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_END_CNTL2_G
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_RAMA_END_CNTL1_R
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMA_END_CNTL2_R
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_RAMA_OFFSET_B
+#define CM1_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//CM1_CM_GAMCOR_RAMA_OFFSET_G
+#define CM1_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//CM1_CM_GAMCOR_RAMA_OFFSET_R
+#define CM1_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//CM1_CM_GAMCOR_RAMA_REGION_0_1
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_2_3
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_4_5
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_6_7
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_8_9
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_10_11
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_12_13
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_14_15
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_16_17
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_18_19
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_20_21
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_22_23
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_24_25
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_26_27
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_28_29
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_30_31
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMA_REGION_32_33
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_START_CNTL_B
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_GAMCOR_RAMB_START_CNTL_G
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_GAMCOR_RAMB_START_CNTL_R
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_END_CNTL1_B
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_END_CNTL2_B
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_RAMB_END_CNTL1_G
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_END_CNTL2_G
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_RAMB_END_CNTL1_R
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM1_CM_GAMCOR_RAMB_END_CNTL2_R
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM1_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM1_CM_GAMCOR_RAMB_OFFSET_B
+#define CM1_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//CM1_CM_GAMCOR_RAMB_OFFSET_G
+#define CM1_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//CM1_CM_GAMCOR_RAMB_OFFSET_R
+#define CM1_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//CM1_CM_GAMCOR_RAMB_REGION_0_1
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_2_3
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_4_5
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_6_7
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_8_9
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_10_11
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_12_13
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_14_15
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_16_17
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_18_19
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_20_21
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_22_23
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_24_25
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_26_27
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_28_29
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_30_31
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_GAMCOR_RAMB_REGION_32_33
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_HDR_MULT_COEF
+#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM1_CM_MEM_PWR_CTRL
+#define CM1_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE__SHIFT 0x0
+#define CM1_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS__SHIFT 0x2
+#define CM1_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM1_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS_MASK 0x00000004L
+//CM1_CM_MEM_PWR_STATUS
+#define CM1_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE__SHIFT 0x0
+#define CM1_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE_MASK 0x00000003L
+//CM1_CM_DEALPHA
+#define CM1_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM1_CM_DEALPHA__CM_DEALPHA_ABLND__SHIFT 0x1
+#define CM1_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+#define CM1_CM_DEALPHA__CM_DEALPHA_ABLND_MASK 0x00000002L
+//CM1_CM_COEF_FORMAT
+#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM1_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT__SHIFT 0x4
+#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM1_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
+#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM1_CM_TEST_DEBUG_INDEX
+#define CM1_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM1_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM1_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM1_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM1_CM_TEST_DEBUG_DATA
+#define CM1_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM1_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+//CM1_DPP_CRC_VAL_R
+#define CM1_DPP_CRC_VAL_R__DPP_CRC_R_CR__SHIFT 0x0
+#define CM1_DPP_CRC_VAL_R__DPP_CRC_R_CR_MASK 0xFFFFFFFFL
+//CM1_DPP_CRC_VAL_G
+#define CM1_DPP_CRC_VAL_G__DPP_CRC_G_Y__SHIFT 0x0
+#define CM1_DPP_CRC_VAL_G__DPP_CRC_G_Y_MASK 0xFFFFFFFFL
+//CM1_DPP_CRC_VAL_B
+#define CM1_DPP_CRC_VAL_B__DPP_CRC_B_CB__SHIFT 0x0
+#define CM1_DPP_CRC_VAL_B__DPP_CRC_B_CB_MASK 0xFFFFFFFFL
+//CM1_DPP_CRC_VAL_A
+#define CM1_DPP_CRC_VAL_A__DPP_CRC_ALPHA__SHIFT 0x0
+#define CM1_DPP_CRC_VAL_A__DPP_CRC_ALPHA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
+//DPP_TOP1_DPP_CONTROL
+#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP1_DPP_CONTROL__DPP_FGCG_REP_DIS__SHIFT 0x18
+#define DPP_TOP1_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP1_DPP_CONTROL__DPP_FGCG_REP_DIS_MASK 0x01000000L
+#define DPP_TOP1_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0x70000000L
+//DPP_TOP1_DPP_SOFT_RESET
+#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP1_DPP_CRC_CTRL
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x6
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x7
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0x9
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xb
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xe
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000040L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000180L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000600L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00003800L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x0000C000L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP1_HOST_READ_CONTROL
+#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON12_PERFCOUNTER_CNTL
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON12_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON12_PERFCOUNTER_CNTL2
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON12_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON12_PERFCOUNTER_STATE
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON12_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON12_PERFMON_CNTL
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON12_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON12_PERFMON_CNTL2
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON12_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON12_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON12_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON12_PERFMON_CVALUE_LOW
+#define DC_PERFMON12_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON12_PERFMON_HI
+#define DC_PERFMON12_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON12_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON12_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON12_PERFMON_LOW
+#define DC_PERFMON12_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON12_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE__SHIFT 0x8
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE_MASK 0x00000100L
+//CNVC_CFG2_FORMAT_CONTROL
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_R__SHIFT 0x18
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_G__SHIFT 0x1a
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_B__SHIFT 0x1c
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_R_MASK 0x03000000L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_G_MASK 0x0C000000L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CROSSBAR_B_MASK 0x30000000L
+//CNVC_CFG2_FCNV_FP_BIAS_R
+#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_BIAS_G
+#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_BIAS_B
+#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_R
+#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_G
+#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_B
+#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG2_COLOR_KEYER_CONTROL
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG2_COLOR_KEYER_ALPHA
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_RED
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_GREEN
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_BLUE
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_ALPHA_2BIT_LUT
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+//CNVC_CFG2_PRE_DEALPHA
+#define CNVC_CFG2_PRE_DEALPHA__PRE_DEALPHA_EN__SHIFT 0x0
+#define CNVC_CFG2_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG2_PRE_DEALPHA__PRE_DEALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG2_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN_MASK 0x00000010L
+//CNVC_CFG2_PRE_CSC_MODE
+#define CNVC_CFG2_PRE_CSC_MODE__PRE_CSC_MODE__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT__SHIFT 0x2
+#define CNVC_CFG2_PRE_CSC_MODE__PRE_CSC_MODE_MASK 0x00000003L
+#define CNVC_CFG2_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CNVC_CFG2_PRE_CSC_C11_C12
+#define CNVC_CFG2_PRE_CSC_C11_C12__PRE_CSC_C11__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C11_C12__PRE_CSC_C12__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C11_C12__PRE_CSC_C11_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C11_C12__PRE_CSC_C12_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_C13_C14
+#define CNVC_CFG2_PRE_CSC_C13_C14__PRE_CSC_C13__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C13_C14__PRE_CSC_C14__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C13_C14__PRE_CSC_C13_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C13_C14__PRE_CSC_C14_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_C21_C22
+#define CNVC_CFG2_PRE_CSC_C21_C22__PRE_CSC_C21__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C21_C22__PRE_CSC_C22__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C21_C22__PRE_CSC_C21_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C21_C22__PRE_CSC_C22_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_C23_C24
+#define CNVC_CFG2_PRE_CSC_C23_C24__PRE_CSC_C23__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C23_C24__PRE_CSC_C24__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C23_C24__PRE_CSC_C23_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C23_C24__PRE_CSC_C24_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_C31_C32
+#define CNVC_CFG2_PRE_CSC_C31_C32__PRE_CSC_C31__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C31_C32__PRE_CSC_C32__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C31_C32__PRE_CSC_C31_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C31_C32__PRE_CSC_C32_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_C33_C34
+#define CNVC_CFG2_PRE_CSC_C33_C34__PRE_CSC_C33__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_C33_C34__PRE_CSC_C34__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_C33_C34__PRE_CSC_C33_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_C33_C34__PRE_CSC_C34_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_B_C11_C12
+#define CNVC_CFG2_PRE_CSC_B_C11_C12__PRE_CSC_B_C11__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C11_C12__PRE_CSC_B_C12__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C11_C12__PRE_CSC_B_C11_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C11_C12__PRE_CSC_B_C12_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_B_C13_C14
+#define CNVC_CFG2_PRE_CSC_B_C13_C14__PRE_CSC_B_C13__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C13_C14__PRE_CSC_B_C14__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C13_C14__PRE_CSC_B_C13_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C13_C14__PRE_CSC_B_C14_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_B_C21_C22
+#define CNVC_CFG2_PRE_CSC_B_C21_C22__PRE_CSC_B_C21__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C21_C22__PRE_CSC_B_C22__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C21_C22__PRE_CSC_B_C21_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C21_C22__PRE_CSC_B_C22_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_B_C23_C24
+#define CNVC_CFG2_PRE_CSC_B_C23_C24__PRE_CSC_B_C23__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C23_C24__PRE_CSC_B_C24__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C23_C24__PRE_CSC_B_C23_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C23_C24__PRE_CSC_B_C24_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_B_C31_C32
+#define CNVC_CFG2_PRE_CSC_B_C31_C32__PRE_CSC_B_C31__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C31_C32__PRE_CSC_B_C32__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C31_C32__PRE_CSC_B_C31_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C31_C32__PRE_CSC_B_C32_MASK 0xFFFF0000L
+//CNVC_CFG2_PRE_CSC_B_C33_C34
+#define CNVC_CFG2_PRE_CSC_B_C33_C34__PRE_CSC_B_C33__SHIFT 0x0
+#define CNVC_CFG2_PRE_CSC_B_C33_C34__PRE_CSC_B_C34__SHIFT 0x10
+#define CNVC_CFG2_PRE_CSC_B_C33_C34__PRE_CSC_B_C33_MASK 0x0000FFFFL
+#define CNVC_CFG2_PRE_CSC_B_C33_C34__PRE_CSC_B_C34_MASK 0xFFFF0000L
+//CNVC_CFG2_CNVC_COEF_FORMAT
+#define CNVC_CFG2_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT__SHIFT 0x0
+#define CNVC_CFG2_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT_MASK 0x00000001L
+//CNVC_CFG2_PRE_DEGAM
+#define CNVC_CFG2_PRE_DEGAM__PRE_DEGAM_MODE__SHIFT 0x0
+#define CNVC_CFG2_PRE_DEGAM__PRE_DEGAM_SELECT__SHIFT 0x4
+#define CNVC_CFG2_PRE_DEGAM__PRE_DEGAM_MODE_MASK 0x00000003L
+#define CNVC_CFG2_PRE_DEGAM__PRE_DEGAM_SELECT_MASK 0x00000070L
+//CNVC_CFG2_PRE_REALPHA
+#define CNVC_CFG2_PRE_REALPHA__PRE_REALPHA_EN__SHIFT 0x0
+#define CNVC_CFG2_PRE_REALPHA__PRE_REALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG2_PRE_REALPHA__PRE_REALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG2_PRE_REALPHA__PRE_REALPHA_ABLND_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec
+//CNVC_CUR2_CURSOR0_CONTROL
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR2_CURSOR0_COLOR0
+#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR2_CURSOR0_COLOR1
+#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR2_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec
+//DSCL2_SCL_COEF_RAM_TAP_SELECT
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00030000L
+//DSCL2_SCL_COEF_RAM_TAP_DATA
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL2_SCL_MODE
+#define DSCL2_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL2_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL2_SCL_TAP_CONTROL
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL2_DSCL_CONTROL
+#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL2_DSCL_2TAP_CONTROL
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL2_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL2_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL2_SCL_HORZ_FILTER_INIT
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL2_SCL_HORZ_FILTER_INIT_C
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL2_SCL_VERT_FILTER_INIT
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_INIT_BOT
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL2_SCL_VERT_FILTER_INIT_C
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL2_SCL_BLACK_COLOR
+#define DSCL2_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y__SHIFT 0x0
+#define DSCL2_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR__SHIFT 0x10
+#define DSCL2_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y_MASK 0x0000FFFFL
+#define DSCL2_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR_MASK 0xFFFF0000L
+//DSCL2_DSCL_UPDATE
+#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL2_DSCL_AUTOCAL
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL2_OTG_H_BLANK
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL2_OTG_V_BLANK
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL2_RECOUT_START
+#define DSCL2_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL2_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL2_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL2_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL2_RECOUT_SIZE
+#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL2_MPC_SIZE
+#define DSCL2_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL2_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL2_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL2_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL2_LB_DATA_FORMAT
+#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL2_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL2_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL2_LB_MEMORY_CTRL
+#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL2_LB_V_COUNTER
+#define DSCL2_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL2_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL2_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL2_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL2_DSCL_MEM_PWR_CTRL
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL2_DSCL_MEM_PWR_STATUS
+#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL2_OBUF_CONTROL
+#define DSCL2_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x1
+#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0x2
+#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x4
+#define DSCL2_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000002L
+#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00000004L
+#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0x000000F0L
+//DSCL2_OBUF_MEM_PWR_CTRL
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec
+//CM2_CM_CONTROL
+#define CM2_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM2_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM2_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM2_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM2_CM_POST_CSC_CONTROL
+#define CM2_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE__SHIFT 0x0
+#define CM2_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT__SHIFT 0x2
+#define CM2_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_MASK 0x00000003L
+#define CM2_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CM2_CM_POST_CSC_C11_C12
+#define CM2_CM_POST_CSC_C11_C12__CM_POST_CSC_C11__SHIFT 0x0
+#define CM2_CM_POST_CSC_C11_C12__CM_POST_CSC_C12__SHIFT 0x10
+#define CM2_CM_POST_CSC_C11_C12__CM_POST_CSC_C11_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C11_C12__CM_POST_CSC_C12_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_C13_C14
+#define CM2_CM_POST_CSC_C13_C14__CM_POST_CSC_C13__SHIFT 0x0
+#define CM2_CM_POST_CSC_C13_C14__CM_POST_CSC_C14__SHIFT 0x10
+#define CM2_CM_POST_CSC_C13_C14__CM_POST_CSC_C13_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C13_C14__CM_POST_CSC_C14_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_C21_C22
+#define CM2_CM_POST_CSC_C21_C22__CM_POST_CSC_C21__SHIFT 0x0
+#define CM2_CM_POST_CSC_C21_C22__CM_POST_CSC_C22__SHIFT 0x10
+#define CM2_CM_POST_CSC_C21_C22__CM_POST_CSC_C21_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C21_C22__CM_POST_CSC_C22_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_C23_C24
+#define CM2_CM_POST_CSC_C23_C24__CM_POST_CSC_C23__SHIFT 0x0
+#define CM2_CM_POST_CSC_C23_C24__CM_POST_CSC_C24__SHIFT 0x10
+#define CM2_CM_POST_CSC_C23_C24__CM_POST_CSC_C23_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C23_C24__CM_POST_CSC_C24_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_C31_C32
+#define CM2_CM_POST_CSC_C31_C32__CM_POST_CSC_C31__SHIFT 0x0
+#define CM2_CM_POST_CSC_C31_C32__CM_POST_CSC_C32__SHIFT 0x10
+#define CM2_CM_POST_CSC_C31_C32__CM_POST_CSC_C31_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C31_C32__CM_POST_CSC_C32_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_C33_C34
+#define CM2_CM_POST_CSC_C33_C34__CM_POST_CSC_C33__SHIFT 0x0
+#define CM2_CM_POST_CSC_C33_C34__CM_POST_CSC_C34__SHIFT 0x10
+#define CM2_CM_POST_CSC_C33_C34__CM_POST_CSC_C33_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_C33_C34__CM_POST_CSC_C34_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_B_C11_C12
+#define CM2_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_B_C13_C14
+#define CM2_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_B_C21_C22
+#define CM2_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_B_C23_C24
+#define CM2_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_B_C31_C32
+#define CM2_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32_MASK 0xFFFF0000L
+//CM2_CM_POST_CSC_B_C33_C34
+#define CM2_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33__SHIFT 0x0
+#define CM2_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34__SHIFT 0x10
+#define CM2_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33_MASK 0x0000FFFFL
+#define CM2_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_CONTROL
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x2
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT_MASK 0x0000000CL
+//CM2_CM_GAMUT_REMAP_C11_C12
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C13_C14
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C21_C22
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C23_C24
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C31_C32
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C33_C34
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C11_C12
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C13_C14
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C21_C22
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C23_C24
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C31_C32
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C33_C34
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM2_CM_BIAS_CR_R
+#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM2_CM_BIAS_Y_G_CB_B
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_CONTROL
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE__SHIFT 0x0
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT__SHIFT 0x2
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE__SHIFT 0x3
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT__SHIFT 0x4
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT__SHIFT 0x6
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_MASK 0x00000003L
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_MASK 0x00000004L
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE_MASK 0x00000008L
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT_MASK 0x00000030L
+#define CM2_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT_MASK 0x00000040L
+//CM2_CM_GAMCOR_LUT_INDEX
+#define CM2_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX__SHIFT 0x0
+#define CM2_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX_MASK 0x000001FFL
+//CM2_CM_GAMCOR_LUT_DATA
+#define CM2_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA__SHIFT 0x0
+#define CM2_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_LUT_CONTROL
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL__SHIFT 0x6
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE__SHIFT 0x7
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL_MASK 0x00000040L
+#define CM2_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE_MASK 0x00000080L
+//CM2_CM_GAMCOR_RAMA_START_CNTL_B
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_GAMCOR_RAMA_START_CNTL_G
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_GAMCOR_RAMA_START_CNTL_R
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_END_CNTL1_B
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_END_CNTL2_B
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_RAMA_END_CNTL1_G
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_END_CNTL2_G
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_RAMA_END_CNTL1_R
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMA_END_CNTL2_R
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_RAMA_OFFSET_B
+#define CM2_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//CM2_CM_GAMCOR_RAMA_OFFSET_G
+#define CM2_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//CM2_CM_GAMCOR_RAMA_OFFSET_R
+#define CM2_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//CM2_CM_GAMCOR_RAMA_REGION_0_1
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_2_3
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_4_5
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_6_7
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_8_9
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_10_11
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_12_13
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_14_15
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_16_17
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_18_19
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_20_21
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_22_23
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_24_25
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_26_27
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_28_29
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_30_31
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMA_REGION_32_33
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_START_CNTL_B
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_GAMCOR_RAMB_START_CNTL_G
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_GAMCOR_RAMB_START_CNTL_R
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_END_CNTL1_B
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_END_CNTL2_B
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_RAMB_END_CNTL1_G
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_END_CNTL2_G
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_RAMB_END_CNTL1_R
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM2_CM_GAMCOR_RAMB_END_CNTL2_R
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM2_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM2_CM_GAMCOR_RAMB_OFFSET_B
+#define CM2_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//CM2_CM_GAMCOR_RAMB_OFFSET_G
+#define CM2_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//CM2_CM_GAMCOR_RAMB_OFFSET_R
+#define CM2_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//CM2_CM_GAMCOR_RAMB_REGION_0_1
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_2_3
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_4_5
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_6_7
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_8_9
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_10_11
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_12_13
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_14_15
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_16_17
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_18_19
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_20_21
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_22_23
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_24_25
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_26_27
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_28_29
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_30_31
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_GAMCOR_RAMB_REGION_32_33
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_HDR_MULT_COEF
+#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM2_CM_MEM_PWR_CTRL
+#define CM2_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE__SHIFT 0x0
+#define CM2_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS__SHIFT 0x2
+#define CM2_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM2_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS_MASK 0x00000004L
+//CM2_CM_MEM_PWR_STATUS
+#define CM2_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE__SHIFT 0x0
+#define CM2_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE_MASK 0x00000003L
+//CM2_CM_DEALPHA
+#define CM2_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM2_CM_DEALPHA__CM_DEALPHA_ABLND__SHIFT 0x1
+#define CM2_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+#define CM2_CM_DEALPHA__CM_DEALPHA_ABLND_MASK 0x00000002L
+//CM2_CM_COEF_FORMAT
+#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM2_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT__SHIFT 0x4
+#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM2_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
+#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM2_CM_TEST_DEBUG_INDEX
+#define CM2_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM2_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM2_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM2_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM2_CM_TEST_DEBUG_DATA
+#define CM2_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM2_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+//CM2_DPP_CRC_VAL_R
+#define CM2_DPP_CRC_VAL_R__DPP_CRC_R_CR__SHIFT 0x0
+#define CM2_DPP_CRC_VAL_R__DPP_CRC_R_CR_MASK 0xFFFFFFFFL
+//CM2_DPP_CRC_VAL_G
+#define CM2_DPP_CRC_VAL_G__DPP_CRC_G_Y__SHIFT 0x0
+#define CM2_DPP_CRC_VAL_G__DPP_CRC_G_Y_MASK 0xFFFFFFFFL
+//CM2_DPP_CRC_VAL_B
+#define CM2_DPP_CRC_VAL_B__DPP_CRC_B_CB__SHIFT 0x0
+#define CM2_DPP_CRC_VAL_B__DPP_CRC_B_CB_MASK 0xFFFFFFFFL
+//CM2_DPP_CRC_VAL_A
+#define CM2_DPP_CRC_VAL_A__DPP_CRC_ALPHA__SHIFT 0x0
+#define CM2_DPP_CRC_VAL_A__DPP_CRC_ALPHA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
+//DPP_TOP2_DPP_CONTROL
+#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP2_DPP_CONTROL__DPP_FGCG_REP_DIS__SHIFT 0x18
+#define DPP_TOP2_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP2_DPP_CONTROL__DPP_FGCG_REP_DIS_MASK 0x01000000L
+#define DPP_TOP2_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0x70000000L
+//DPP_TOP2_DPP_SOFT_RESET
+#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP2_DPP_CRC_CTRL
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x6
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x7
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0x9
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xb
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xe
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000040L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000180L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000600L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00003800L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x0000C000L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP2_HOST_READ_CONTROL
+#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON13_PERFCOUNTER_CNTL
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON13_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON13_PERFCOUNTER_CNTL2
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON13_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON13_PERFCOUNTER_STATE
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON13_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON13_PERFMON_CNTL
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON13_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON13_PERFMON_CNTL2
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON13_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON13_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON13_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON13_PERFMON_CVALUE_LOW
+#define DC_PERFMON13_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON13_PERFMON_HI
+#define DC_PERFMON13_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON13_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON13_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON13_PERFMON_LOW
+#define DC_PERFMON13_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON13_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE__SHIFT 0x8
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_ALPHA_PLANE_ENABLE_MASK 0x00000100L
+//CNVC_CFG3_FORMAT_CONTROL
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_R__SHIFT 0x18
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_G__SHIFT 0x1a
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_B__SHIFT 0x1c
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_R_MASK 0x03000000L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_G_MASK 0x0C000000L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CROSSBAR_B_MASK 0x30000000L
+//CNVC_CFG3_FCNV_FP_BIAS_R
+#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_BIAS_G
+#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_BIAS_B
+#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_R
+#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_G
+#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_B
+#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG3_COLOR_KEYER_CONTROL
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG3_COLOR_KEYER_ALPHA
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_RED
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_GREEN
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_BLUE
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_ALPHA_2BIT_LUT
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+//CNVC_CFG3_PRE_DEALPHA
+#define CNVC_CFG3_PRE_DEALPHA__PRE_DEALPHA_EN__SHIFT 0x0
+#define CNVC_CFG3_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG3_PRE_DEALPHA__PRE_DEALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG3_PRE_DEALPHA__PRE_DEALPHA_ABLND_EN_MASK 0x00000010L
+//CNVC_CFG3_PRE_CSC_MODE
+#define CNVC_CFG3_PRE_CSC_MODE__PRE_CSC_MODE__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT__SHIFT 0x2
+#define CNVC_CFG3_PRE_CSC_MODE__PRE_CSC_MODE_MASK 0x00000003L
+#define CNVC_CFG3_PRE_CSC_MODE__PRE_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CNVC_CFG3_PRE_CSC_C11_C12
+#define CNVC_CFG3_PRE_CSC_C11_C12__PRE_CSC_C11__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C11_C12__PRE_CSC_C12__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C11_C12__PRE_CSC_C11_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C11_C12__PRE_CSC_C12_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_C13_C14
+#define CNVC_CFG3_PRE_CSC_C13_C14__PRE_CSC_C13__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C13_C14__PRE_CSC_C14__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C13_C14__PRE_CSC_C13_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C13_C14__PRE_CSC_C14_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_C21_C22
+#define CNVC_CFG3_PRE_CSC_C21_C22__PRE_CSC_C21__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C21_C22__PRE_CSC_C22__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C21_C22__PRE_CSC_C21_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C21_C22__PRE_CSC_C22_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_C23_C24
+#define CNVC_CFG3_PRE_CSC_C23_C24__PRE_CSC_C23__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C23_C24__PRE_CSC_C24__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C23_C24__PRE_CSC_C23_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C23_C24__PRE_CSC_C24_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_C31_C32
+#define CNVC_CFG3_PRE_CSC_C31_C32__PRE_CSC_C31__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C31_C32__PRE_CSC_C32__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C31_C32__PRE_CSC_C31_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C31_C32__PRE_CSC_C32_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_C33_C34
+#define CNVC_CFG3_PRE_CSC_C33_C34__PRE_CSC_C33__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_C33_C34__PRE_CSC_C34__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_C33_C34__PRE_CSC_C33_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_C33_C34__PRE_CSC_C34_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_B_C11_C12
+#define CNVC_CFG3_PRE_CSC_B_C11_C12__PRE_CSC_B_C11__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C11_C12__PRE_CSC_B_C12__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C11_C12__PRE_CSC_B_C11_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C11_C12__PRE_CSC_B_C12_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_B_C13_C14
+#define CNVC_CFG3_PRE_CSC_B_C13_C14__PRE_CSC_B_C13__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C13_C14__PRE_CSC_B_C14__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C13_C14__PRE_CSC_B_C13_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C13_C14__PRE_CSC_B_C14_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_B_C21_C22
+#define CNVC_CFG3_PRE_CSC_B_C21_C22__PRE_CSC_B_C21__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C21_C22__PRE_CSC_B_C22__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C21_C22__PRE_CSC_B_C21_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C21_C22__PRE_CSC_B_C22_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_B_C23_C24
+#define CNVC_CFG3_PRE_CSC_B_C23_C24__PRE_CSC_B_C23__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C23_C24__PRE_CSC_B_C24__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C23_C24__PRE_CSC_B_C23_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C23_C24__PRE_CSC_B_C24_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_B_C31_C32
+#define CNVC_CFG3_PRE_CSC_B_C31_C32__PRE_CSC_B_C31__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C31_C32__PRE_CSC_B_C32__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C31_C32__PRE_CSC_B_C31_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C31_C32__PRE_CSC_B_C32_MASK 0xFFFF0000L
+//CNVC_CFG3_PRE_CSC_B_C33_C34
+#define CNVC_CFG3_PRE_CSC_B_C33_C34__PRE_CSC_B_C33__SHIFT 0x0
+#define CNVC_CFG3_PRE_CSC_B_C33_C34__PRE_CSC_B_C34__SHIFT 0x10
+#define CNVC_CFG3_PRE_CSC_B_C33_C34__PRE_CSC_B_C33_MASK 0x0000FFFFL
+#define CNVC_CFG3_PRE_CSC_B_C33_C34__PRE_CSC_B_C34_MASK 0xFFFF0000L
+//CNVC_CFG3_CNVC_COEF_FORMAT
+#define CNVC_CFG3_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT__SHIFT 0x0
+#define CNVC_CFG3_CNVC_COEF_FORMAT__PRE_CSC_COEF_FORMAT_MASK 0x00000001L
+//CNVC_CFG3_PRE_DEGAM
+#define CNVC_CFG3_PRE_DEGAM__PRE_DEGAM_MODE__SHIFT 0x0
+#define CNVC_CFG3_PRE_DEGAM__PRE_DEGAM_SELECT__SHIFT 0x4
+#define CNVC_CFG3_PRE_DEGAM__PRE_DEGAM_MODE_MASK 0x00000003L
+#define CNVC_CFG3_PRE_DEGAM__PRE_DEGAM_SELECT_MASK 0x00000070L
+//CNVC_CFG3_PRE_REALPHA
+#define CNVC_CFG3_PRE_REALPHA__PRE_REALPHA_EN__SHIFT 0x0
+#define CNVC_CFG3_PRE_REALPHA__PRE_REALPHA_ABLND_EN__SHIFT 0x4
+#define CNVC_CFG3_PRE_REALPHA__PRE_REALPHA_EN_MASK 0x00000001L
+#define CNVC_CFG3_PRE_REALPHA__PRE_REALPHA_ABLND_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec
+//CNVC_CUR3_CURSOR0_CONTROL
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR3_CURSOR0_COLOR0
+#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR3_CURSOR0_COLOR1
+#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR3_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec
+//DSCL3_SCL_COEF_RAM_TAP_SELECT
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00030000L
+//DSCL3_SCL_COEF_RAM_TAP_DATA
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL3_SCL_MODE
+#define DSCL3_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL3_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL3_SCL_TAP_CONTROL
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL3_DSCL_CONTROL
+#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL3_DSCL_2TAP_CONTROL
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL3_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL3_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL3_SCL_HORZ_FILTER_INIT
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL3_SCL_HORZ_FILTER_INIT_C
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL3_SCL_VERT_FILTER_INIT
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_INIT_BOT
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL3_SCL_VERT_FILTER_INIT_C
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL3_SCL_BLACK_COLOR
+#define DSCL3_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y__SHIFT 0x0
+#define DSCL3_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR__SHIFT 0x10
+#define DSCL3_SCL_BLACK_COLOR__SCL_BLACK_COLOR_RGB_Y_MASK 0x0000FFFFL
+#define DSCL3_SCL_BLACK_COLOR__SCL_BLACK_COLOR_CBCR_MASK 0xFFFF0000L
+//DSCL3_DSCL_UPDATE
+#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL3_DSCL_AUTOCAL
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL3_OTG_H_BLANK
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL3_OTG_V_BLANK
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL3_RECOUT_START
+#define DSCL3_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL3_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL3_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL3_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL3_RECOUT_SIZE
+#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL3_MPC_SIZE
+#define DSCL3_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL3_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL3_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL3_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL3_LB_DATA_FORMAT
+#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL3_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL3_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL3_LB_MEMORY_CTRL
+#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL3_LB_V_COUNTER
+#define DSCL3_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL3_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL3_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL3_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL3_DSCL_MEM_PWR_CTRL
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL3_DSCL_MEM_PWR_STATUS
+#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL3_OBUF_CONTROL
+#define DSCL3_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x1
+#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0x2
+#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x4
+#define DSCL3_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000002L
+#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00000004L
+#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0x000000F0L
+//DSCL3_OBUF_MEM_PWR_CTRL
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec
+//CM3_CM_CONTROL
+#define CM3_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM3_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM3_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM3_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM3_CM_POST_CSC_CONTROL
+#define CM3_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE__SHIFT 0x0
+#define CM3_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT__SHIFT 0x2
+#define CM3_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_MASK 0x00000003L
+#define CM3_CM_POST_CSC_CONTROL__CM_POST_CSC_MODE_CURRENT_MASK 0x0000000CL
+//CM3_CM_POST_CSC_C11_C12
+#define CM3_CM_POST_CSC_C11_C12__CM_POST_CSC_C11__SHIFT 0x0
+#define CM3_CM_POST_CSC_C11_C12__CM_POST_CSC_C12__SHIFT 0x10
+#define CM3_CM_POST_CSC_C11_C12__CM_POST_CSC_C11_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C11_C12__CM_POST_CSC_C12_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_C13_C14
+#define CM3_CM_POST_CSC_C13_C14__CM_POST_CSC_C13__SHIFT 0x0
+#define CM3_CM_POST_CSC_C13_C14__CM_POST_CSC_C14__SHIFT 0x10
+#define CM3_CM_POST_CSC_C13_C14__CM_POST_CSC_C13_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C13_C14__CM_POST_CSC_C14_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_C21_C22
+#define CM3_CM_POST_CSC_C21_C22__CM_POST_CSC_C21__SHIFT 0x0
+#define CM3_CM_POST_CSC_C21_C22__CM_POST_CSC_C22__SHIFT 0x10
+#define CM3_CM_POST_CSC_C21_C22__CM_POST_CSC_C21_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C21_C22__CM_POST_CSC_C22_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_C23_C24
+#define CM3_CM_POST_CSC_C23_C24__CM_POST_CSC_C23__SHIFT 0x0
+#define CM3_CM_POST_CSC_C23_C24__CM_POST_CSC_C24__SHIFT 0x10
+#define CM3_CM_POST_CSC_C23_C24__CM_POST_CSC_C23_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C23_C24__CM_POST_CSC_C24_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_C31_C32
+#define CM3_CM_POST_CSC_C31_C32__CM_POST_CSC_C31__SHIFT 0x0
+#define CM3_CM_POST_CSC_C31_C32__CM_POST_CSC_C32__SHIFT 0x10
+#define CM3_CM_POST_CSC_C31_C32__CM_POST_CSC_C31_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C31_C32__CM_POST_CSC_C32_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_C33_C34
+#define CM3_CM_POST_CSC_C33_C34__CM_POST_CSC_C33__SHIFT 0x0
+#define CM3_CM_POST_CSC_C33_C34__CM_POST_CSC_C34__SHIFT 0x10
+#define CM3_CM_POST_CSC_C33_C34__CM_POST_CSC_C33_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_C33_C34__CM_POST_CSC_C34_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_B_C11_C12
+#define CM3_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C11_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C11_C12__CM_POST_CSC_B_C12_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_B_C13_C14
+#define CM3_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C13_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C13_C14__CM_POST_CSC_B_C14_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_B_C21_C22
+#define CM3_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C21_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C21_C22__CM_POST_CSC_B_C22_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_B_C23_C24
+#define CM3_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C23_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C23_C24__CM_POST_CSC_B_C24_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_B_C31_C32
+#define CM3_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C31_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C31_C32__CM_POST_CSC_B_C32_MASK 0xFFFF0000L
+//CM3_CM_POST_CSC_B_C33_C34
+#define CM3_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33__SHIFT 0x0
+#define CM3_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34__SHIFT 0x10
+#define CM3_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C33_MASK 0x0000FFFFL
+#define CM3_CM_POST_CSC_B_C33_C34__CM_POST_CSC_B_C34_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_CONTROL
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x2
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_CURRENT_MASK 0x0000000CL
+//CM3_CM_GAMUT_REMAP_C11_C12
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C13_C14
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C21_C22
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C23_C24
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C31_C32
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C33_C34
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C11_C12
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C13_C14
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C21_C22
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C23_C24
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C31_C32
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C33_C34
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM3_CM_BIAS_CR_R
+#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM3_CM_BIAS_Y_G_CB_B
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_CONTROL
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE__SHIFT 0x0
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT__SHIFT 0x2
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE__SHIFT 0x3
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT__SHIFT 0x4
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT__SHIFT 0x6
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_MASK 0x00000003L
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_MASK 0x00000004L
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_PWL_DISABLE_MASK 0x00000008L
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_MODE_CURRENT_MASK 0x00000030L
+#define CM3_CM_GAMCOR_CONTROL__CM_GAMCOR_SELECT_CURRENT_MASK 0x00000040L
+//CM3_CM_GAMCOR_LUT_INDEX
+#define CM3_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX__SHIFT 0x0
+#define CM3_CM_GAMCOR_LUT_INDEX__CM_GAMCOR_LUT_INDEX_MASK 0x000001FFL
+//CM3_CM_GAMCOR_LUT_DATA
+#define CM3_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA__SHIFT 0x0
+#define CM3_CM_GAMCOR_LUT_DATA__CM_GAMCOR_LUT_DATA_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_LUT_CONTROL
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL__SHIFT 0x6
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE__SHIFT 0x7
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_HOST_SEL_MASK 0x00000040L
+#define CM3_CM_GAMCOR_LUT_CONTROL__CM_GAMCOR_LUT_CONFIG_MODE_MASK 0x00000080L
+//CM3_CM_GAMCOR_RAMA_START_CNTL_B
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_GAMCOR_RAMA_START_CNTL_G
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_GAMCOR_RAMA_START_CNTL_R
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMA_START_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_SLOPE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_B__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_G__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_START_BASE_CNTL_R__CM_GAMCOR_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_END_CNTL1_B
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_B__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_END_CNTL2_B
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_B__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_RAMA_END_CNTL1_G
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_G__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_END_CNTL2_G
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_G__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_RAMA_END_CNTL1_R
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL1_R__CM_GAMCOR_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMA_END_CNTL2_R
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMA_END_CNTL2_R__CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_RAMA_OFFSET_B
+#define CM3_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_OFFSET_B__CM_GAMCOR_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//CM3_CM_GAMCOR_RAMA_OFFSET_G
+#define CM3_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_OFFSET_G__CM_GAMCOR_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//CM3_CM_GAMCOR_RAMA_OFFSET_R
+#define CM3_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_OFFSET_R__CM_GAMCOR_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//CM3_CM_GAMCOR_RAMA_REGION_0_1
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_0_1__CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_2_3
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_2_3__CM_GAMCOR_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_4_5
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_4_5__CM_GAMCOR_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_6_7
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_6_7__CM_GAMCOR_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_8_9
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_8_9__CM_GAMCOR_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_10_11
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_10_11__CM_GAMCOR_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_12_13
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_12_13__CM_GAMCOR_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_14_15
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_14_15__CM_GAMCOR_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_16_17
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_16_17__CM_GAMCOR_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_18_19
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_18_19__CM_GAMCOR_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_20_21
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_20_21__CM_GAMCOR_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_22_23
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_22_23__CM_GAMCOR_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_24_25
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_24_25__CM_GAMCOR_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_26_27
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_26_27__CM_GAMCOR_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_28_29
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_28_29__CM_GAMCOR_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_30_31
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_30_31__CM_GAMCOR_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMA_REGION_32_33
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMA_REGION_32_33__CM_GAMCOR_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_START_CNTL_B
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_GAMCOR_RAMB_START_CNTL_G
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_GAMCOR_RAMB_START_CNTL_R
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_GAMCOR_RAMB_START_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_SLOPE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_B__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_G__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_START_BASE_CNTL_R__CM_GAMCOR_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_END_CNTL1_B
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_B__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_END_CNTL2_B
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_B__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_RAMB_END_CNTL1_G
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_G__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_END_CNTL2_G
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_G__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_RAMB_END_CNTL1_R
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL1_R__CM_GAMCOR_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//CM3_CM_GAMCOR_RAMB_END_CNTL2_R
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM3_CM_GAMCOR_RAMB_END_CNTL2_R__CM_GAMCOR_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//CM3_CM_GAMCOR_RAMB_OFFSET_B
+#define CM3_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_OFFSET_B__CM_GAMCOR_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//CM3_CM_GAMCOR_RAMB_OFFSET_G
+#define CM3_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_OFFSET_G__CM_GAMCOR_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//CM3_CM_GAMCOR_RAMB_OFFSET_R
+#define CM3_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_OFFSET_R__CM_GAMCOR_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//CM3_CM_GAMCOR_RAMB_REGION_0_1
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_0_1__CM_GAMCOR_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_2_3
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_2_3__CM_GAMCOR_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_4_5
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_4_5__CM_GAMCOR_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_6_7
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_6_7__CM_GAMCOR_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_8_9
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_8_9__CM_GAMCOR_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_10_11
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_10_11__CM_GAMCOR_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_12_13
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_12_13__CM_GAMCOR_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_14_15
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_14_15__CM_GAMCOR_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_16_17
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_16_17__CM_GAMCOR_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_18_19
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_18_19__CM_GAMCOR_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_20_21
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_20_21__CM_GAMCOR_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_22_23
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_22_23__CM_GAMCOR_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_24_25
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_24_25__CM_GAMCOR_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_26_27
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_26_27__CM_GAMCOR_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_28_29
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_28_29__CM_GAMCOR_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_30_31
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_30_31__CM_GAMCOR_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_GAMCOR_RAMB_REGION_32_33
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_GAMCOR_RAMB_REGION_32_33__CM_GAMCOR_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_HDR_MULT_COEF
+#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM3_CM_MEM_PWR_CTRL
+#define CM3_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE__SHIFT 0x0
+#define CM3_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS__SHIFT 0x2
+#define CM3_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM3_CM_MEM_PWR_CTRL__GAMCOR_MEM_PWR_DIS_MASK 0x00000004L
+//CM3_CM_MEM_PWR_STATUS
+#define CM3_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE__SHIFT 0x0
+#define CM3_CM_MEM_PWR_STATUS__GAMCOR_MEM_PWR_STATE_MASK 0x00000003L
+//CM3_CM_DEALPHA
+#define CM3_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM3_CM_DEALPHA__CM_DEALPHA_ABLND__SHIFT 0x1
+#define CM3_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+#define CM3_CM_DEALPHA__CM_DEALPHA_ABLND_MASK 0x00000002L
+//CM3_CM_COEF_FORMAT
+#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM3_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT__SHIFT 0x4
+#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM3_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L
+#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM3_CM_TEST_DEBUG_INDEX
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM3_CM_TEST_DEBUG_DATA
+#define CM3_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM3_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+//CM3_DPP_CRC_VAL_R
+#define CM3_DPP_CRC_VAL_R__DPP_CRC_R_CR__SHIFT 0x0
+#define CM3_DPP_CRC_VAL_R__DPP_CRC_R_CR_MASK 0xFFFFFFFFL
+//CM3_DPP_CRC_VAL_G
+#define CM3_DPP_CRC_VAL_G__DPP_CRC_G_Y__SHIFT 0x0
+#define CM3_DPP_CRC_VAL_G__DPP_CRC_G_Y_MASK 0xFFFFFFFFL
+//CM3_DPP_CRC_VAL_B
+#define CM3_DPP_CRC_VAL_B__DPP_CRC_B_CB__SHIFT 0x0
+#define CM3_DPP_CRC_VAL_B__DPP_CRC_B_CB_MASK 0xFFFFFFFFL
+//CM3_DPP_CRC_VAL_A
+#define CM3_DPP_CRC_VAL_A__DPP_CRC_ALPHA__SHIFT 0x0
+#define CM3_DPP_CRC_VAL_A__DPP_CRC_ALPHA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
+//DPP_TOP3_DPP_CONTROL
+#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP3_DPP_CONTROL__DPP_FGCG_REP_DIS__SHIFT 0x18
+#define DPP_TOP3_DPP_CONTROL__DPP_TEST_CLK_SEL__SHIFT 0x1c
+#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP3_DPP_CONTROL__DPP_FGCG_REP_DIS_MASK 0x01000000L
+#define DPP_TOP3_DPP_CONTROL__DPP_TEST_CLK_SEL_MASK 0x70000000L
+//DPP_TOP3_DPP_SOFT_RESET
+#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP3_DPP_CRC_CTRL
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x6
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x7
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0x9
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xb
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xe
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000040L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000180L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000600L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00003800L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x0000C000L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP3_HOST_READ_CONTROL
+#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON14_PERFCOUNTER_CNTL
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON14_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON14_PERFCOUNTER_CNTL2
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON14_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON14_PERFCOUNTER_STATE
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON14_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON14_PERFMON_CNTL
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON14_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON14_PERFMON_CNTL2
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON14_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON14_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON14_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON14_PERFMON_CVALUE_LOW
+#define DC_PERFMON14_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON14_PERFMON_HI
+#define DC_PERFMON14_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON14_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON14_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON14_PERFMON_LOW
+#define DC_PERFMON14_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON14_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_mpc_mpcc0_dispdec
+//MPCC0_MPCC_TOP_SEL
+#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC0_MPCC_BOT_SEL
+#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC0_MPCC_OPP_ID
+#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC0_MPCC_CONTROL
+#define MPCC0_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC0_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC0_MPCC_SM_CONTROL
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC0_MPCC_UPDATE_LOCK_SEL
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC0_MPCC_TOP_GAIN
+#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC0_MPCC_BOT_GAIN_INSIDE
+#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC0_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL
+#define MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL__SHIFT 0x0
+#define MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT__SHIFT 0x4
+#define MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_MASK 0x00000001L
+#define MPCC0_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT_MASK 0x00000010L
+//MPCC0_MPCC_BG_R_CR
+#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC0_MPCC_BG_G_Y
+#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC0_MPCC_BG_B_CB
+#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC0_MPCC_MEM_PWR_CTRL
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x8
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000300L
+//MPCC0_MPCC_STATUS
+#define MPCC0_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC0_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC0_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC0_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC0_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC0_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+
+
+// addressBlock: dce_dc_mpc_mpcc1_dispdec
+//MPCC1_MPCC_TOP_SEL
+#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC1_MPCC_BOT_SEL
+#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC1_MPCC_OPP_ID
+#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC1_MPCC_CONTROL
+#define MPCC1_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC1_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC1_MPCC_SM_CONTROL
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC1_MPCC_UPDATE_LOCK_SEL
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC1_MPCC_TOP_GAIN
+#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC1_MPCC_BOT_GAIN_INSIDE
+#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC1_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL
+#define MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL__SHIFT 0x0
+#define MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT__SHIFT 0x4
+#define MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_MASK 0x00000001L
+#define MPCC1_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT_MASK 0x00000010L
+//MPCC1_MPCC_BG_R_CR
+#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC1_MPCC_BG_G_Y
+#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC1_MPCC_BG_B_CB
+#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC1_MPCC_MEM_PWR_CTRL
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x8
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000300L
+//MPCC1_MPCC_STATUS
+#define MPCC1_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC1_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC1_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC1_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC1_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC1_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+
+
+// addressBlock: dce_dc_mpc_mpcc2_dispdec
+//MPCC2_MPCC_TOP_SEL
+#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC2_MPCC_BOT_SEL
+#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC2_MPCC_OPP_ID
+#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC2_MPCC_CONTROL
+#define MPCC2_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC2_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC2_MPCC_SM_CONTROL
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC2_MPCC_UPDATE_LOCK_SEL
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC2_MPCC_TOP_GAIN
+#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC2_MPCC_BOT_GAIN_INSIDE
+#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC2_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL
+#define MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL__SHIFT 0x0
+#define MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT__SHIFT 0x4
+#define MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_MASK 0x00000001L
+#define MPCC2_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT_MASK 0x00000010L
+//MPCC2_MPCC_BG_R_CR
+#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC2_MPCC_BG_G_Y
+#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC2_MPCC_BG_B_CB
+#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC2_MPCC_MEM_PWR_CTRL
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x8
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000300L
+//MPCC2_MPCC_STATUS
+#define MPCC2_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC2_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC2_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC2_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC2_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC2_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+
+
+// addressBlock: dce_dc_mpc_mpcc3_dispdec
+//MPCC3_MPCC_TOP_SEL
+#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC3_MPCC_BOT_SEL
+#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC3_MPCC_OPP_ID
+#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC3_MPCC_CONTROL
+#define MPCC3_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC3_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC3_MPCC_SM_CONTROL
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC3_MPCC_UPDATE_LOCK_SEL
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC3_MPCC_TOP_GAIN
+#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC3_MPCC_BOT_GAIN_INSIDE
+#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC3_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL
+#define MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL__SHIFT 0x0
+#define MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT__SHIFT 0x4
+#define MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_MASK 0x00000001L
+#define MPCC3_MPCC_MOVABLE_CM_LOCATION_CONTROL__MPCC_MOVABLE_CM_LOCATION_CNTL_CURRENT_MASK 0x00000010L
+//MPCC3_MPCC_BG_R_CR
+#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC3_MPCC_BG_G_Y
+#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC3_MPCC_BG_B_CB
+#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC3_MPCC_MEM_PWR_CTRL
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x8
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000300L
+//MPCC3_MPCC_STATUS
+#define MPCC3_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC3_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC3_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC3_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC3_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC3_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+
+
+// addressBlock: dce_dc_mpc_mpc_cfg_dispdec
+//MPC_CLOCK_CONTROL
+#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x1
+#define MPC_CLOCK_CONTROL__MPC_TEST_CLK_SEL__SHIFT 0x4
+#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00000002L
+#define MPC_CLOCK_CONTROL__MPC_TEST_CLK_SEL_MASK 0x00000030L
+//MPC_SOFT_RESET
+#define MPC_SOFT_RESET__MPCC0_SOFT_RESET__SHIFT 0x0
+#define MPC_SOFT_RESET__MPCC1_SOFT_RESET__SHIFT 0x1
+#define MPC_SOFT_RESET__MPCC2_SOFT_RESET__SHIFT 0x2
+#define MPC_SOFT_RESET__MPCC3_SOFT_RESET__SHIFT 0x3
+#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET__SHIFT 0xa
+#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET__SHIFT 0xb
+#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET__SHIFT 0xc
+#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET__SHIFT 0xd
+#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET__SHIFT 0x14
+#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET__SHIFT 0x15
+#define MPC_SOFT_RESET__MPC_SFT2_SOFT_RESET__SHIFT 0x16
+#define MPC_SOFT_RESET__MPC_SFT3_SOFT_RESET__SHIFT 0x17
+#define MPC_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x1f
+#define MPC_SOFT_RESET__MPCC0_SOFT_RESET_MASK 0x00000001L
+#define MPC_SOFT_RESET__MPCC1_SOFT_RESET_MASK 0x00000002L
+#define MPC_SOFT_RESET__MPCC2_SOFT_RESET_MASK 0x00000004L
+#define MPC_SOFT_RESET__MPCC3_SOFT_RESET_MASK 0x00000008L
+#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET_MASK 0x00000400L
+#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET_MASK 0x00000800L
+#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET_MASK 0x00001000L
+#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET_MASK 0x00002000L
+#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET_MASK 0x00100000L
+#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET_MASK 0x00200000L
+#define MPC_SOFT_RESET__MPC_SFT2_SOFT_RESET_MASK 0x00400000L
+#define MPC_SOFT_RESET__MPC_SFT3_SOFT_RESET_MASK 0x00800000L
+#define MPC_SOFT_RESET__MPC_SOFT_RESET_MASK 0x80000000L
+//MPC_CRC_CTRL
+#define MPC_CRC_CTRL__MPC_CRC_EN__SHIFT 0x0
+#define MPC_CRC_CTRL__MPC_CRC_CONT_EN__SHIFT 0x4
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_MODE__SHIFT 0x8
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_EN__SHIFT 0xa
+#define MPC_CRC_CTRL__MPC_CRC_INTERLACE_MODE__SHIFT 0xc
+#define MPC_CRC_CTRL__MPC_CRC_SRC_SEL__SHIFT 0x18
+#define MPC_CRC_CTRL__MPC_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_ENABLED__SHIFT 0x1e
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_LOCK__SHIFT 0x1f
+#define MPC_CRC_CTRL__MPC_CRC_EN_MASK 0x00000001L
+#define MPC_CRC_CTRL__MPC_CRC_CONT_EN_MASK 0x00000010L
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_MODE_MASK 0x00000300L
+#define MPC_CRC_CTRL__MPC_CRC_STEREO_EN_MASK 0x00000400L
+#define MPC_CRC_CTRL__MPC_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define MPC_CRC_CTRL__MPC_CRC_SRC_SEL_MASK 0x03000000L
+#define MPC_CRC_CTRL__MPC_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_ENABLED_MASK 0x40000000L
+#define MPC_CRC_CTRL__MPC_CRC_UPDATE_LOCK_MASK 0x80000000L
+//MPC_CRC_SEL_CONTROL
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DPP_SEL__SHIFT 0x0
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_OPP_SEL__SHIFT 0x4
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DWB_SEL__SHIFT 0x8
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_MASK__SHIFT 0x10
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DPP_SEL_MASK 0x0000000FL
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_OPP_SEL_MASK 0x000000F0L
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_DWB_SEL_MASK 0x00000300L
+#define MPC_CRC_SEL_CONTROL__MPC_CRC_MASK_MASK 0xFFFF0000L
+//MPC_PERFMON_EVENT_CTRL
+#define MPC_PERFMON_EVENT_CTRL__MPC_PERFMON_EVENT_EN__SHIFT 0x0
+#define MPC_PERFMON_EVENT_CTRL__MPC_PERFMON_EVENT_EN_MASK 0x00000001L
+//MPC_BYPASS_BG_AR
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA__SHIFT 0x0
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR__SHIFT 0x10
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA_MASK 0x0000FFFFL
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR_MASK 0xFFFF0000L
+//MPC_BYPASS_BG_GB
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y__SHIFT 0x0
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB__SHIFT 0x10
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y_MASK 0x0000FFFFL
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB_MASK 0xFFFF0000L
+//MPC_HOST_READ_CONTROL
+#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+//MPC_DPP_PENDING_STATUS
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_SURFACE_UPDATE_PENDING__SHIFT 0x0
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_CONFIG_UPDATE_PENDING__SHIFT 0x1
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_CURSOR_UPDATE_PENDING__SHIFT 0x2
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_SURFACE_UPDATE_PENDING__SHIFT 0x4
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_CONFIG_UPDATE_PENDING__SHIFT 0x5
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_CURSOR_UPDATE_PENDING__SHIFT 0x6
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_SURFACE_UPDATE_PENDING__SHIFT 0x8
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_CONFIG_UPDATE_PENDING__SHIFT 0x9
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_CURSOR_UPDATE_PENDING__SHIFT 0xa
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_SURFACE_UPDATE_PENDING__SHIFT 0xc
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_CONFIG_UPDATE_PENDING__SHIFT 0xd
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_CURSOR_UPDATE_PENDING__SHIFT 0xe
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_SURFACE_UPDATE_PENDING_MASK 0x00000001L
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_CONFIG_UPDATE_PENDING_MASK 0x00000002L
+#define MPC_DPP_PENDING_STATUS__IN_DPP0_CURSOR_UPDATE_PENDING_MASK 0x00000004L
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_SURFACE_UPDATE_PENDING_MASK 0x00000010L
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_CONFIG_UPDATE_PENDING_MASK 0x00000020L
+#define MPC_DPP_PENDING_STATUS__IN_DPP1_CURSOR_UPDATE_PENDING_MASK 0x00000040L
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_SURFACE_UPDATE_PENDING_MASK 0x00000100L
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_CONFIG_UPDATE_PENDING_MASK 0x00000200L
+#define MPC_DPP_PENDING_STATUS__IN_DPP2_CURSOR_UPDATE_PENDING_MASK 0x00000400L
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_SURFACE_UPDATE_PENDING_MASK 0x00001000L
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_CONFIG_UPDATE_PENDING_MASK 0x00002000L
+#define MPC_DPP_PENDING_STATUS__IN_DPP3_CURSOR_UPDATE_PENDING_MASK 0x00004000L
+//MPC_PENDING_STATUS_MISC
+#define MPC_PENDING_STATUS_MISC__OUT_OPP0_CONFIG_UPDATE_PENDING__SHIFT 0x0
+#define MPC_PENDING_STATUS_MISC__OUT_OPP1_CONFIG_UPDATE_PENDING__SHIFT 0x1
+#define MPC_PENDING_STATUS_MISC__OUT_OPP2_CONFIG_UPDATE_PENDING__SHIFT 0x2
+#define MPC_PENDING_STATUS_MISC__OUT_OPP3_CONFIG_UPDATE_PENDING__SHIFT 0x3
+#define MPC_PENDING_STATUS_MISC__MPCC0_CONFIG_UPDATE_PENDING__SHIFT 0x8
+#define MPC_PENDING_STATUS_MISC__MPCC1_CONFIG_UPDATE_PENDING__SHIFT 0x9
+#define MPC_PENDING_STATUS_MISC__MPCC2_CONFIG_UPDATE_PENDING__SHIFT 0xa
+#define MPC_PENDING_STATUS_MISC__MPCC3_CONFIG_UPDATE_PENDING__SHIFT 0xb
+#define MPC_PENDING_STATUS_MISC__IN_DWB0_CONFIG_UPDATE_PENDING__SHIFT 0x10
+#define MPC_PENDING_STATUS_MISC__OUT_OPP0_CONFIG_UPDATE_PENDING_MASK 0x00000001L
+#define MPC_PENDING_STATUS_MISC__OUT_OPP1_CONFIG_UPDATE_PENDING_MASK 0x00000002L
+#define MPC_PENDING_STATUS_MISC__OUT_OPP2_CONFIG_UPDATE_PENDING_MASK 0x00000004L
+#define MPC_PENDING_STATUS_MISC__OUT_OPP3_CONFIG_UPDATE_PENDING_MASK 0x00000008L
+#define MPC_PENDING_STATUS_MISC__MPCC0_CONFIG_UPDATE_PENDING_MASK 0x00000100L
+#define MPC_PENDING_STATUS_MISC__MPCC1_CONFIG_UPDATE_PENDING_MASK 0x00000200L
+#define MPC_PENDING_STATUS_MISC__MPCC2_CONFIG_UPDATE_PENDING_MASK 0x00000400L
+#define MPC_PENDING_STATUS_MISC__MPCC3_CONFIG_UPDATE_PENDING_MASK 0x00000800L
+#define MPC_PENDING_STATUS_MISC__IN_DWB0_CONFIG_UPDATE_PENDING_MASK 0x00010000L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET0
+#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET0
+#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET0
+#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET0
+#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET1
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET1
+#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET1
+#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET1
+#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET1
+#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET2
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET2
+#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET2
+#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET2
+#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET2
+#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET3
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET3
+#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET3
+#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET3
+#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET3
+#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//MPC_CRC_RESULT_A
+#define MPC_CRC_RESULT_A__MPC_CRC_RESULT_A__SHIFT 0x0
+#define MPC_CRC_RESULT_A__MPC_CRC_RESULT_A_MASK 0xFFFFFFFFL
+//MPC_CRC_RESULT_R
+#define MPC_CRC_RESULT_R__MPC_CRC_RESULT_R__SHIFT 0x0
+#define MPC_CRC_RESULT_R__MPC_CRC_RESULT_R_MASK 0xFFFFFFFFL
+//MPC_CRC_RESULT_G
+#define MPC_CRC_RESULT_G__MPC_CRC_RESULT_G__SHIFT 0x0
+#define MPC_CRC_RESULT_G__MPC_CRC_RESULT_G_MASK 0xFFFFFFFFL
+//MPC_CRC_RESULT_B
+#define MPC_CRC_RESULT_B__MPC_CRC_RESULT_B__SHIFT 0x0
+#define MPC_CRC_RESULT_B__MPC_CRC_RESULT_B_MASK 0xFFFFFFFFL
+//MPC_DWB0_MUX
+#define MPC_DWB0_MUX__MPC_DWB0_MUX__SHIFT 0x0
+#define MPC_DWB0_MUX__MPC_DWB0_MUX_STATUS__SHIFT 0x4
+#define MPC_DWB0_MUX__MPC_DWB0_MUX_MASK 0x0000000FL
+#define MPC_DWB0_MUX__MPC_DWB0_MUX_STATUS_MASK 0x000000F0L
+
+
+// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON15_PERFCOUNTER_CNTL
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON15_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON15_PERFCOUNTER_CNTL2
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON15_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON15_PERFCOUNTER_STATE
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON15_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON15_PERFMON_CNTL
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON15_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON15_PERFMON_CNTL2
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON15_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON15_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON15_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON15_PERFMON_CVALUE_LOW
+#define DC_PERFMON15_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON15_PERFMON_HI
+#define DC_PERFMON15_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON15_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON15_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON15_PERFMON_LOW
+#define DC_PERFMON15_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON15_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam0_dispdec
+//MPCC_OGAM0_MPCC_OGAM_CONTROL
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT__SHIFT 0x2
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE__SHIFT 0x3
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT__SHIFT 0x9
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_MASK 0x00000003L
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_MASK 0x00000004L
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM0_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT_MASK 0x00000200L
+//MPCC_OGAM0_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM0_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+//MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT_MASK 0x00000180L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B__SHIFT 0x0
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B__SHIFT 0x10
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam1_dispdec
+//MPCC_OGAM1_MPCC_OGAM_CONTROL
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT__SHIFT 0x2
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE__SHIFT 0x3
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT__SHIFT 0x9
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_MASK 0x00000003L
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_MASK 0x00000004L
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM1_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT_MASK 0x00000200L
+//MPCC_OGAM1_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM1_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+//MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define MPCC_OGAM1_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT_MASK 0x00000180L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B__SHIFT 0x0
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B__SHIFT 0x10
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam2_dispdec
+//MPCC_OGAM2_MPCC_OGAM_CONTROL
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT__SHIFT 0x2
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE__SHIFT 0x3
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT__SHIFT 0x9
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_MASK 0x00000003L
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_MASK 0x00000004L
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM2_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT_MASK 0x00000200L
+//MPCC_OGAM2_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM2_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+//MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define MPCC_OGAM2_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT_MASK 0x00000180L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B__SHIFT 0x0
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B__SHIFT 0x10
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam3_dispdec
+//MPCC_OGAM3_MPCC_OGAM_CONTROL
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT__SHIFT 0x2
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE__SHIFT 0x3
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT__SHIFT 0x9
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_MASK 0x00000003L
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_MASK 0x00000004L
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_MODE_CURRENT_MASK 0x00000180L
+#define MPCC_OGAM3_MPCC_OGAM_CONTROL__MPCC_OGAM_SELECT_CURRENT_MASK 0x00000200L
+//MPCC_OGAM3_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM3_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_CONTROL__MPCC_OGAM_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_BASE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_B__MPCC_OGAM_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_G__MPCC_OGAM_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_OFFSET_R__MPCC_OGAM_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_BASE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_B__MPCC_OGAM_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_G__MPCC_OGAM_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_OFFSET_R__MPCC_OGAM_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_COEF_FORMAT__MPCC_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000001L
+//MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT__SHIFT 0x7
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_MASK 0x00000003L
+#define MPCC_OGAM3_MPCC_GAMUT_REMAP_MODE__MPCC_GAMUT_REMAP_MODE_CURRENT_MASK 0x00000180L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C11_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_A__MPCC_GAMUT_REMAP_C12_A_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C13_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_A__MPCC_GAMUT_REMAP_C14_A_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C21_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_A__MPCC_GAMUT_REMAP_C22_A_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C23_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_A__MPCC_GAMUT_REMAP_C24_A_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C31_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_A__MPCC_GAMUT_REMAP_C32_A_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C33_A_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_A__MPCC_GAMUT_REMAP_C34_A_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C11_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C11_C12_B__MPCC_GAMUT_REMAP_C12_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C13_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C13_C14_B__MPCC_GAMUT_REMAP_C14_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C21_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C21_C22_B__MPCC_GAMUT_REMAP_C22_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C23_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C23_C24_B__MPCC_GAMUT_REMAP_C24_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C31_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C31_C32_B__MPCC_GAMUT_REMAP_C32_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B__SHIFT 0x0
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B__SHIFT 0x10
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C33_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPC_GAMUT_REMAP_C33_C34_B__MPCC_GAMUT_REMAP_C34_B_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm0_dispdec
+//MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT__SHIFT 0x2
+#define MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE_MASK 0x00000003L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT_MASK 0x0000000CL
+//MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//MPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_3DLUT_MODE
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT__SHIFT 0x8
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE_MASK 0x00000010L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT_MASK 0x00000300L
+//MPCC_MCM0_MPCC_MCM_3DLUT_INDEX
+#define MPCC_MCM0_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX_MASK 0x000007FFL
+//MPCC_MCM0_MPCC_MCM_3DLUT_DATA
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define MPCC_MCM0_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN__SHIFT 0x8
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define MPCC_MCM0_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL_MASK 0x00030000L
+//MPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT__SHIFT 0x2
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE__SHIFT 0x3
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT__SHIFT 0x6
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_MASK 0x00000004L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT_MASK 0x00000030L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT_MASK 0x00000040L
+//MPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX_MASK 0x000001FFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM0_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE__SHIFT 0x8
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS__SHIFT 0xa
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE__SHIFT 0xc
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE__SHIFT 0x10
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS__SHIFT 0x12
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE__SHIFT 0x14
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE__SHIFT 0x18
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE__SHIFT 0x1a
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE__SHIFT 0x1c
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE_MASK 0x00000300L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS_MASK 0x00000400L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE_MASK 0x00003000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS_MASK 0x00040000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE_MASK 0x00300000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE_MASK 0x03000000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE_MASK 0x0C000000L
+#define MPCC_MCM0_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE_MASK 0x30000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm1_dispdec
+//MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT__SHIFT 0x2
+#define MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE_MASK 0x00000003L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT_MASK 0x0000000CL
+//MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//MPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_3DLUT_MODE
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT__SHIFT 0x8
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE_MASK 0x00000010L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT_MASK 0x00000300L
+//MPCC_MCM1_MPCC_MCM_3DLUT_INDEX
+#define MPCC_MCM1_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX_MASK 0x000007FFL
+//MPCC_MCM1_MPCC_MCM_3DLUT_DATA
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define MPCC_MCM1_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN__SHIFT 0x8
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define MPCC_MCM1_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL_MASK 0x00030000L
+//MPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT__SHIFT 0x2
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE__SHIFT 0x3
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT__SHIFT 0x6
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_MASK 0x00000004L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT_MASK 0x00000030L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT_MASK 0x00000040L
+//MPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX_MASK 0x000001FFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM1_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE__SHIFT 0x8
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS__SHIFT 0xa
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE__SHIFT 0xc
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE__SHIFT 0x10
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS__SHIFT 0x12
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE__SHIFT 0x14
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE__SHIFT 0x18
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE__SHIFT 0x1a
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE__SHIFT 0x1c
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE_MASK 0x00000300L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS_MASK 0x00000400L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE_MASK 0x00003000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS_MASK 0x00040000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE_MASK 0x00300000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE_MASK 0x03000000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE_MASK 0x0C000000L
+#define MPCC_MCM1_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE_MASK 0x30000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm2_dispdec
+//MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT__SHIFT 0x2
+#define MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE_MASK 0x00000003L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT_MASK 0x0000000CL
+//MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//MPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_3DLUT_MODE
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT__SHIFT 0x8
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE_MASK 0x00000010L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT_MASK 0x00000300L
+//MPCC_MCM2_MPCC_MCM_3DLUT_INDEX
+#define MPCC_MCM2_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX_MASK 0x000007FFL
+//MPCC_MCM2_MPCC_MCM_3DLUT_DATA
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define MPCC_MCM2_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN__SHIFT 0x8
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define MPCC_MCM2_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL_MASK 0x00030000L
+//MPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT__SHIFT 0x2
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE__SHIFT 0x3
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT__SHIFT 0x6
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_MASK 0x00000004L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT_MASK 0x00000030L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT_MASK 0x00000040L
+//MPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX_MASK 0x000001FFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM2_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE__SHIFT 0x8
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS__SHIFT 0xa
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE__SHIFT 0xc
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE__SHIFT 0x10
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS__SHIFT 0x12
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE__SHIFT 0x14
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE__SHIFT 0x18
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE__SHIFT 0x1a
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE__SHIFT 0x1c
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE_MASK 0x00000300L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS_MASK 0x00000400L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE_MASK 0x00003000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS_MASK 0x00040000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE_MASK 0x00300000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE_MASK 0x03000000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE_MASK 0x0C000000L
+#define MPCC_MCM2_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE_MASK 0x30000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_mcm3_dispdec
+//MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT__SHIFT 0x2
+#define MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_LUT_MODE_MASK 0x00000003L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_CONTROL__MPCC_MCM_SHAPER_MODE_CURRENT_MASK 0x0000000CL
+//MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_R__MPCC_MCM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_G__MPCC_MCM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_OFFSET_B__MPCC_MCM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_R__MPCC_MCM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_SCALE_G_B__MPCC_MCM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_INDEX__MPCC_MCM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//MPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_DATA__MPCC_MCM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK__MPCC_MCM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_START_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_B__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_G__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_END_CNTL_R__MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_0_1__MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_2_3__MPCC_MCM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_4_5__MPCC_MCM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_6_7__MPCC_MCM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_8_9__MPCC_MCM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_10_11__MPCC_MCM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_12_13__MPCC_MCM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_14_15__MPCC_MCM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_16_17__MPCC_MCM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_18_19__MPCC_MCM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_20_21__MPCC_MCM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_22_23__MPCC_MCM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_24_25__MPCC_MCM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_26_27__MPCC_MCM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_28_29__MPCC_MCM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_30_31__MPCC_MCM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMA_REGION_32_33__MPCC_MCM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_START_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_B__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_G__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_END_CNTL_R__MPCC_MCM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_0_1__MPCC_MCM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_2_3__MPCC_MCM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_4_5__MPCC_MCM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_6_7__MPCC_MCM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_8_9__MPCC_MCM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_10_11__MPCC_MCM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_12_13__MPCC_MCM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_14_15__MPCC_MCM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_16_17__MPCC_MCM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_18_19__MPCC_MCM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_20_21__MPCC_MCM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_22_23__MPCC_MCM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_24_25__MPCC_MCM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_26_27__MPCC_MCM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_28_29__MPCC_MCM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_30_31__MPCC_MCM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_SHAPER_RAMB_REGION_32_33__MPCC_MCM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_3DLUT_MODE
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT__SHIFT 0x8
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_SIZE_MASK 0x00000010L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_MODE__MPCC_MCM_3DLUT_MODE_CURRENT_MASK 0x00000300L
+//MPCC_MCM3_MPCC_MCM_3DLUT_INDEX
+#define MPCC_MCM3_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_INDEX__MPCC_MCM_3DLUT_INDEX_MASK 0x000007FFL
+//MPCC_MCM3_MPCC_MCM_3DLUT_DATA
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA__MPCC_MCM_3DLUT_DATA1_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define MPCC_MCM3_MPCC_MCM_3DLUT_DATA_30BIT__MPCC_MCM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN__SHIFT 0x8
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define MPCC_MCM3_MPCC_MCM_3DLUT_READ_WRITE_CONTROL__MPCC_MCM_3DLUT_READ_SEL_MASK 0x00030000L
+//MPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_NORM_FACTOR__MPCC_MCM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_R__MPCC_MCM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_G__MPCC_MCM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_3DLUT_OUT_OFFSET_B__MPCC_MCM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT__SHIFT 0x2
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE__SHIFT 0x3
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT__SHIFT 0x6
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_MASK 0x00000003L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_MASK 0x00000004L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_PWL_DISABLE_MASK 0x00000008L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_MODE_CURRENT_MASK 0x00000030L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_CONTROL__MPCC_MCM_1DLUT_SELECT_CURRENT_MASK 0x00000040L
+//MPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_INDEX__MPCC_MCM_1DLUT_LUT_INDEX_MASK 0x000001FFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_DATA__MPCC_MCM_1DLUT_LUT_DATA_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL__SHIFT 0x3
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL__SHIFT 0x6
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE__SHIFT 0x7
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK_MASK 0x00000007L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_READ_COLOR_SEL_MASK 0x00000018L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_HOST_SEL_MASK 0x00000040L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_LUT_CONTROL__MPCC_MCM_1DLUT_LUT_CONFIG_MODE_MASK 0x00000080L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_B__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_G__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL1_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_END_CNTL2_R__MPCC_MCM_1DLUT_RAMA_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_B__MPCC_MCM_1DLUT_RAMA_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_G__MPCC_MCM_1DLUT_RAMA_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_OFFSET_R__MPCC_MCM_1DLUT_RAMA_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_0_1__MPCC_MCM_1DLUT_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_2_3__MPCC_MCM_1DLUT_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_4_5__MPCC_MCM_1DLUT_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_6_7__MPCC_MCM_1DLUT_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_8_9__MPCC_MCM_1DLUT_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_10_11__MPCC_MCM_1DLUT_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_12_13__MPCC_MCM_1DLUT_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_14_15__MPCC_MCM_1DLUT_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_16_17__MPCC_MCM_1DLUT_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_18_19__MPCC_MCM_1DLUT_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_20_21__MPCC_MCM_1DLUT_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_22_23__MPCC_MCM_1DLUT_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_24_25__MPCC_MCM_1DLUT_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_26_27__MPCC_MCM_1DLUT_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_28_29__MPCC_MCM_1DLUT_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_30_31__MPCC_MCM_1DLUT_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMA_REGION_32_33__MPCC_MCM_1DLUT_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_START_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_B_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_B__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_B_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_G_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_G__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_G_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL1_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_BASE_R_MASK 0x0003FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_END_CNTL2_R__MPCC_MCM_1DLUT_RAMB_EXP_REGION_END_SLOPE_R_MASK 0xFFFF0000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_B__MPCC_MCM_1DLUT_RAMB_OFFSET_B_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_G__MPCC_MCM_1DLUT_RAMB_OFFSET_G_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_OFFSET_R__MPCC_MCM_1DLUT_RAMB_OFFSET_R_MASK 0x0007FFFFL
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_0_1__MPCC_MCM_1DLUT_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_2_3__MPCC_MCM_1DLUT_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_4_5__MPCC_MCM_1DLUT_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_6_7__MPCC_MCM_1DLUT_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_8_9__MPCC_MCM_1DLUT_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_10_11__MPCC_MCM_1DLUT_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_12_13__MPCC_MCM_1DLUT_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_14_15__MPCC_MCM_1DLUT_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_16_17__MPCC_MCM_1DLUT_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_18_19__MPCC_MCM_1DLUT_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_20_21__MPCC_MCM_1DLUT_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_22_23__MPCC_MCM_1DLUT_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_24_25__MPCC_MCM_1DLUT_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_26_27__MPCC_MCM_1DLUT_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_28_29__MPCC_MCM_1DLUT_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_30_31__MPCC_MCM_1DLUT_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_MCM3_MPCC_MCM_1DLUT_RAMB_REGION_32_33__MPCC_MCM_1DLUT_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE__SHIFT 0x4
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE__SHIFT 0x8
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS__SHIFT 0xa
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE__SHIFT 0xc
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE__SHIFT 0x10
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS__SHIFT 0x12
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE__SHIFT 0x14
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE__SHIFT 0x18
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE__SHIFT 0x1a
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE__SHIFT 0x1c
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE_MASK 0x00000030L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_FORCE_MASK 0x00000300L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_DIS_MASK 0x00000400L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE_MASK 0x00003000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_FORCE_MASK 0x00030000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_DIS_MASK 0x00040000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_LOW_PWR_MODE_MASK 0x00300000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_SHAPER_MEM_PWR_STATE_MASK 0x03000000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_3DLUT_MEM_PWR_STATE_MASK 0x0C000000L
+#define MPCC_MCM3_MPCC_MCM_MEM_PWR_CTRL__MPCC_MCM_1DLUT_MEM_PWR_STATE_MASK 0x30000000L
+
+
+// addressBlock: dce_dc_mpc_mpc_ocsc_dispdec
+//MPC_OUT0_MUX
+#define MPC_OUT0_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT__SHIFT 0xb
+#define MPC_OUT0_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT_MASK 0x007FF800L
+//MPC_OUT0_DENORM_CONTROL
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT0_DENORM_CLAMP_G_Y
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT0_DENORM_CLAMP_B_CB
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+//MPC_OUT1_MUX
+#define MPC_OUT1_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT__SHIFT 0xb
+#define MPC_OUT1_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT_MASK 0x007FF800L
+//MPC_OUT1_DENORM_CONTROL
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT1_DENORM_CLAMP_G_Y
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT1_DENORM_CLAMP_B_CB
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+//MPC_OUT2_MUX
+#define MPC_OUT2_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT2_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT2_MUX__MPC_OUT_FLOW_CONTROL_COUNT__SHIFT 0xb
+#define MPC_OUT2_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT2_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT2_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT2_MUX__MPC_OUT_FLOW_CONTROL_COUNT_MASK 0x007FF800L
+//MPC_OUT2_DENORM_CONTROL
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT2_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT2_DENORM_CLAMP_G_Y
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT2_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT2_DENORM_CLAMP_B_CB
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT2_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+//MPC_OUT3_MUX
+#define MPC_OUT3_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT3_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT3_MUX__MPC_OUT_FLOW_CONTROL_COUNT__SHIFT 0xb
+#define MPC_OUT3_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT3_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT3_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT3_MUX__MPC_OUT_FLOW_CONTROL_COUNT_MASK 0x007FF800L
+//MPC_OUT3_DENORM_CONTROL
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT3_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT3_DENORM_CLAMP_G_Y
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT3_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT3_DENORM_CLAMP_B_CB
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT3_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+//MPC_OUT_CSC_COEF_FORMAT
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT__SHIFT 0x0
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT__SHIFT 0x1
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC2_COEF_FORMAT__SHIFT 0x2
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC3_COEF_FORMAT__SHIFT 0x3
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT_MASK 0x00000001L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT_MASK 0x00000002L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC2_COEF_FORMAT_MASK 0x00000004L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC3_COEF_FORMAT_MASK 0x00000008L
+//MPC_OUT0_CSC_MODE
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE_CURRENT__SHIFT 0x7
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE_CURRENT_MASK 0x00000180L
+//MPC_OUT0_CSC_C11_C12_A
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C13_C14_A
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C21_C22_A
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C23_C24_A
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C31_C32_A
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C33_C34_A
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C11_C12_B
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C13_C14_B
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C21_C22_B
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C23_C24_B
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C31_C32_B
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C33_C34_B
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_MODE
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE_CURRENT__SHIFT 0x7
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE_CURRENT_MASK 0x00000180L
+//MPC_OUT1_CSC_C11_C12_A
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C13_C14_A
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C21_C22_A
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C23_C24_A
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C31_C32_A
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C33_C34_A
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C11_C12_B
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C13_C14_B
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C21_C22_B
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C23_C24_B
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C31_C32_B
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C33_C34_B
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_MODE
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE_CURRENT__SHIFT 0x7
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+#define MPC_OUT2_CSC_MODE__MPC_OCSC_MODE_CURRENT_MASK 0x00000180L
+//MPC_OUT2_CSC_C11_C12_A
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C13_C14_A
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C21_C22_A
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C23_C24_A
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C31_C32_A
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C33_C34_A
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C11_C12_B
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C13_C14_B
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C21_C22_B
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C23_C24_B
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C31_C32_B
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT2_CSC_C33_C34_B
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT2_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_MODE
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE_CURRENT__SHIFT 0x7
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+#define MPC_OUT3_CSC_MODE__MPC_OCSC_MODE_CURRENT_MASK 0x00000180L
+//MPC_OUT3_CSC_C11_C12_A
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C13_C14_A
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C21_C22_A
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C23_C24_A
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C31_C32_A
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C33_C34_A
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C11_C12_B
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C13_C14_B
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C21_C22_B
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C23_C24_B
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C31_C32_B
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT3_CSC_C33_C34_B
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT3_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_opp_abm0_dispdec
+//ABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL
+#define ABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define ABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001FFFFL
+//ABM0_BL1_PWM_USER_LEVEL
+#define ABM0_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define ABM0_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001FFFFL
+//ABM0_BL1_PWM_TARGET_ABM_LEVEL
+#define ABM0_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define ABM0_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM0_BL1_PWM_CURRENT_ABM_LEVEL
+#define ABM0_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define ABM0_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM0_BL1_PWM_FINAL_DUTY_CYCLE
+#define ABM0_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define ABM0_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM0_BL1_PWM_MINIMUM_DUTY_CYCLE
+#define ABM0_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define ABM0_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM0_BL1_PWM_ABM_CNTL
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define ABM0_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xFFFF0000L
+//ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM0_BL1_PWM_GRP2_REG_LOCK
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define ABM0_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//ABM0_DC_ABM1_CNTL
+#define ABM0_DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define ABM0_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS__SHIFT 0x4
+#define ABM0_DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define ABM0_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS_MASK 0x00000010L
+//ABM0_DC_ABM1_IPCSC_COEFF_SEL
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000FL
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000F00L
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000F0000L
+#define ABM0_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007FFFL
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07FF0000L
+#define ABM0_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_THRES_12
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003FFL
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03FF0000L
+#define ABM0_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_THRES_34
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003FFL
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03FF0000L
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM0_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_ACE_CNTL_MISC
+#define ABM0_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define ABM0_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define ABM0_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define ABM0_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+//ABM0_DC_ABM1_DEBUG_MISC
+//ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+//ABM0_DC_ABM1_HG_MISC_CTRL
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM0_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_LS_SUM_OF_LUMA
+#define ABM0_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_LS_MIN_MAX_LUMA
+#define ABM0_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define ABM0_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003FFL
+#define ABM0_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03FF0000L
+//ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA
+#define ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003FFL
+#define ABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03FF0000L
+//ABM0_DC_ABM1_LS_PIXEL_COUNT
+#define ABM0_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB__SHIFT 0x18
+#define ABM0_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00FFFFFFL
+#define ABM0_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB_MASK 0xFF000000L
+//ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003FFL
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03FF0000L
+#define ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT
+#define ABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT
+#define ABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM0_DC_ABM1_HG_SAMPLE_RATE
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM0_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_LS_SAMPLE_RATE
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM0_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG
+#define ABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX
+#define ABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX
+#define ABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX
+#define ABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX
+#define ABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_1
+#define ABM0_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_2
+#define ABM0_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_3
+#define ABM0_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_4
+#define ABM0_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_5
+#define ABM0_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_6
+#define ABM0_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_7
+#define ABM0_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_8
+#define ABM0_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_9
+#define ABM0_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_10
+#define ABM0_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_11
+#define ABM0_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_12
+#define ABM0_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_13
+#define ABM0_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_14
+#define ABM0_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_15
+#define ABM0_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_16
+#define ABM0_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_17
+#define ABM0_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_18
+#define ABM0_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_19
+#define ABM0_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_20
+#define ABM0_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_21
+#define ABM0_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_22
+#define ABM0_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_23
+#define ABM0_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_HG_RESULT_24
+#define ABM0_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define ABM0_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xFFFFFFFFL
+//ABM0_DC_ABM1_BL_MASTER_LOCK
+#define ABM0_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM0_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_opp_abm1_dispdec
+//ABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL
+#define ABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define ABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001FFFFL
+//ABM1_BL1_PWM_USER_LEVEL
+#define ABM1_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define ABM1_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001FFFFL
+//ABM1_BL1_PWM_TARGET_ABM_LEVEL
+#define ABM1_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define ABM1_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM1_BL1_PWM_CURRENT_ABM_LEVEL
+#define ABM1_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define ABM1_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM1_BL1_PWM_FINAL_DUTY_CYCLE
+#define ABM1_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define ABM1_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM1_BL1_PWM_MINIMUM_DUTY_CYCLE
+#define ABM1_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define ABM1_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM1_BL1_PWM_ABM_CNTL
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define ABM1_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xFFFF0000L
+//ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM1_BL1_PWM_GRP2_REG_LOCK
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define ABM1_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//ABM1_DC_ABM1_CNTL
+#define ABM1_DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define ABM1_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS__SHIFT 0x4
+#define ABM1_DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define ABM1_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS_MASK 0x00000010L
+//ABM1_DC_ABM1_IPCSC_COEFF_SEL
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000FL
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000F00L
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000F0000L
+#define ABM1_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007FFFL
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07FF0000L
+#define ABM1_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_THRES_12
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003FFL
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03FF0000L
+#define ABM1_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_THRES_34
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003FFL
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03FF0000L
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM1_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_ACE_CNTL_MISC
+#define ABM1_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define ABM1_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define ABM1_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define ABM1_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+//ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define ABM1_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+//ABM1_DC_ABM1_HG_MISC_CTRL
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM1_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_LS_SUM_OF_LUMA
+#define ABM1_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_LS_MIN_MAX_LUMA
+#define ABM1_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define ABM1_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003FFL
+#define ABM1_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03FF0000L
+//ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA
+#define ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003FFL
+#define ABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03FF0000L
+//ABM1_DC_ABM1_LS_PIXEL_COUNT
+#define ABM1_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB__SHIFT 0x18
+#define ABM1_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00FFFFFFL
+#define ABM1_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB_MASK 0xFF000000L
+//ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003FFL
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03FF0000L
+#define ABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT
+#define ABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT
+#define ABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM1_DC_ABM1_HG_SAMPLE_RATE
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM1_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_LS_SAMPLE_RATE
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM1_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG
+#define ABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX
+#define ABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX
+#define ABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX
+#define ABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX
+#define ABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_1
+#define ABM1_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_2
+#define ABM1_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_3
+#define ABM1_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_4
+#define ABM1_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_5
+#define ABM1_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_6
+#define ABM1_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_7
+#define ABM1_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_8
+#define ABM1_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_9
+#define ABM1_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_10
+#define ABM1_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_11
+#define ABM1_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_12
+#define ABM1_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_13
+#define ABM1_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_14
+#define ABM1_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_15
+#define ABM1_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_16
+#define ABM1_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_17
+#define ABM1_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_18
+#define ABM1_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_19
+#define ABM1_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_20
+#define ABM1_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_21
+#define ABM1_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_22
+#define ABM1_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_23
+#define ABM1_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_HG_RESULT_24
+#define ABM1_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define ABM1_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xFFFFFFFFL
+//ABM1_DC_ABM1_BL_MASTER_LOCK
+#define ABM1_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM1_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_opp_abm2_dispdec
+//ABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL
+#define ABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define ABM2_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001FFFFL
+//ABM2_BL1_PWM_USER_LEVEL
+#define ABM2_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define ABM2_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001FFFFL
+//ABM2_BL1_PWM_TARGET_ABM_LEVEL
+#define ABM2_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define ABM2_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM2_BL1_PWM_CURRENT_ABM_LEVEL
+#define ABM2_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define ABM2_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM2_BL1_PWM_FINAL_DUTY_CYCLE
+#define ABM2_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define ABM2_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM2_BL1_PWM_MINIMUM_DUTY_CYCLE
+#define ABM2_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define ABM2_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM2_BL1_PWM_ABM_CNTL
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define ABM2_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xFFFF0000L
+//ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM2_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM2_BL1_PWM_GRP2_REG_LOCK
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define ABM2_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//ABM2_DC_ABM1_CNTL
+#define ABM2_DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define ABM2_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS__SHIFT 0x4
+#define ABM2_DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define ABM2_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS_MASK 0x00000010L
+//ABM2_DC_ABM1_IPCSC_COEFF_SEL
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000FL
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000F00L
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000F0000L
+#define ABM2_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007FFFL
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07FF0000L
+#define ABM2_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_THRES_12
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003FFL
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03FF0000L
+#define ABM2_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_THRES_34
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003FFL
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03FF0000L
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM2_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_ACE_CNTL_MISC
+#define ABM2_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define ABM2_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define ABM2_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define ABM2_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+//ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define ABM2_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+//ABM2_DC_ABM1_HG_MISC_CTRL
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM2_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_LS_SUM_OF_LUMA
+#define ABM2_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_LS_MIN_MAX_LUMA
+#define ABM2_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define ABM2_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003FFL
+#define ABM2_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03FF0000L
+//ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA
+#define ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003FFL
+#define ABM2_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03FF0000L
+//ABM2_DC_ABM1_LS_PIXEL_COUNT
+#define ABM2_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB__SHIFT 0x18
+#define ABM2_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00FFFFFFL
+#define ABM2_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB_MASK 0xFF000000L
+//ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003FFL
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03FF0000L
+#define ABM2_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT
+#define ABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT
+#define ABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM2_DC_ABM1_HG_SAMPLE_RATE
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM2_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_LS_SAMPLE_RATE
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM2_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG
+#define ABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX
+#define ABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX
+#define ABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX
+#define ABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX
+#define ABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_1
+#define ABM2_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_2
+#define ABM2_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_3
+#define ABM2_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_4
+#define ABM2_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_5
+#define ABM2_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_6
+#define ABM2_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_7
+#define ABM2_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_8
+#define ABM2_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_9
+#define ABM2_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_10
+#define ABM2_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_11
+#define ABM2_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_12
+#define ABM2_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_13
+#define ABM2_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_14
+#define ABM2_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_15
+#define ABM2_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_16
+#define ABM2_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_17
+#define ABM2_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_18
+#define ABM2_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_19
+#define ABM2_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_20
+#define ABM2_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_21
+#define ABM2_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_22
+#define ABM2_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_23
+#define ABM2_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_HG_RESULT_24
+#define ABM2_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define ABM2_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xFFFFFFFFL
+//ABM2_DC_ABM1_BL_MASTER_LOCK
+#define ABM2_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM2_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_opp_abm3_dispdec
+//ABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL
+#define ABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define ABM3_BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x0001FFFFL
+//ABM3_BL1_PWM_USER_LEVEL
+#define ABM3_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define ABM3_BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x0001FFFFL
+//ABM3_BL1_PWM_TARGET_ABM_LEVEL
+#define ABM3_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define ABM3_BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM3_BL1_PWM_CURRENT_ABM_LEVEL
+#define ABM3_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define ABM3_BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x0001FFFFL
+//ABM3_BL1_PWM_FINAL_DUTY_CYCLE
+#define ABM3_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define ABM3_BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM3_BL1_PWM_MINIMUM_DUTY_CYCLE
+#define ABM3_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define ABM3_BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x0001FFFFL
+//ABM3_BL1_PWM_ABM_CNTL
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x00000001L
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x00000002L
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x00000004L
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x00000008L
+#define ABM3_BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xFFFF0000L
+//ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM3_BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM3_BL1_PWM_GRP2_REG_LOCK
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x00000001L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x00000100L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0x000E0000L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define ABM3_BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//ABM3_DC_ABM1_CNTL
+#define ABM3_DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define ABM3_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS__SHIFT 0x4
+#define ABM3_DC_ABM1_CNTL__ABM1_EN_MASK 0x00000001L
+#define ABM3_DC_ABM1_CNTL__ABM1_PROCESSING_BYPASS_MASK 0x00000010L
+//ABM3_DC_ABM1_IPCSC_COEFF_SEL
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0x0000000FL
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0x00000F00L
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0x000F0000L
+#define ABM3_DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x00007FFFL
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x07FF0000L
+#define ABM3_DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_THRES_12
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x000003FFL
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x03FF0000L
+#define ABM3_DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_THRES_34
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x000003FFL
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x03FF0000L
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000L
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000L
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM3_DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_ACE_CNTL_MISC
+#define ABM3_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define ABM3_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define ABM3_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x00000001L
+#define ABM3_DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x00000100L
+//ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x00000001L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x00000002L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x00000004L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x00000100L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x00000200L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x00000400L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x00010000L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x01000000L
+#define ABM3_DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000L
+//ABM3_DC_ABM1_HG_MISC_CTRL
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x00000003L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x00000100L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x00001000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x00030000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x00100000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x00800000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x07000000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000L
+#define ABM3_DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_LS_SUM_OF_LUMA
+#define ABM3_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_LS_MIN_MAX_LUMA
+#define ABM3_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define ABM3_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x000003FFL
+#define ABM3_DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x03FF0000L
+//ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA
+#define ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x000003FFL
+#define ABM3_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x03FF0000L
+//ABM3_DC_ABM1_LS_PIXEL_COUNT
+#define ABM3_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB__SHIFT 0x18
+#define ABM3_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0x00FFFFFFL
+#define ABM3_DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_SUM_OF_LUMA_MSB_MASK 0xFF000000L
+//ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x000003FFL
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x03FF0000L
+#define ABM3_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT
+#define ABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT
+#define ABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0x00FFFFFFL
+//ABM3_DC_ABM1_HG_SAMPLE_RATE
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM3_DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_LS_SAMPLE_RATE
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x00000001L
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x00000002L
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0x0000FF00L
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0x00FF0000L
+#define ABM3_DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000L
+//ABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG
+#define ABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX
+#define ABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX
+#define ABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX
+#define ABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX
+#define ABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_1
+#define ABM3_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_2
+#define ABM3_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_3
+#define ABM3_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_4
+#define ABM3_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_5
+#define ABM3_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_6
+#define ABM3_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_7
+#define ABM3_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_8
+#define ABM3_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_9
+#define ABM3_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_10
+#define ABM3_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_11
+#define ABM3_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_12
+#define ABM3_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_13
+#define ABM3_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_14
+#define ABM3_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_15
+#define ABM3_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_16
+#define ABM3_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_17
+#define ABM3_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_18
+#define ABM3_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_19
+#define ABM3_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_20
+#define ABM3_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_21
+#define ABM3_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_22
+#define ABM3_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_23
+#define ABM3_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_HG_RESULT_24
+#define ABM3_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define ABM3_DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xFFFFFFFFL
+//ABM3_DC_ABM1_BL_MASTER_LOCK
+#define ABM3_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM3_DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_opp_dpg0_dispdec
+//DPG0_DPG_CONTROL
+#define DPG0_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG0_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG0_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG0_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG0_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG0_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG0_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG0_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG0_DPG_RAMP_CONTROL
+#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG0_DPG_DIMENSIONS
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG0_DPG_COLOUR_R_CR
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG0_DPG_COLOUR_G_Y
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG0_DPG_COLOUR_B_CB
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG0_DPG_OFFSET_SEGMENT
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG0_DPG_STATUS
+#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+//DPG0_OPP_PIPE_CRC_RESULTA
+#define DPG0_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define DPG0_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A_MASK 0xFFFFFFFFL
+//DPG0_OPP_PIPE_CRC_RESULTR
+#define DPG0_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R__SHIFT 0x0
+#define DPG0_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFFFFFFL
+//DPG0_OPP_PIPE_CRC_RESULTG
+#define DPG0_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define DPG0_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G_MASK 0xFFFFFFFFL
+//DPG0_OPP_PIPE_CRC_RESULTB
+#define DPG0_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B__SHIFT 0x0
+#define DPG0_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFFFFFFL
+//DPG0_OPP_PIPE_CRC_RESULTC
+#define DPG0_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define DPG0_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_opp_fmt0_dispdec
+//FMT0_FMT_CLAMP_COMPONENT_R
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_COMPONENT_G
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_COMPONENT_B
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT0_FMT_DYNAMIC_EXP_CNTL
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT0_FMT_CONTROL
+#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//FMT0_FMT_BIT_DEPTH_CONTROL
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT0_FMT_DITHER_RAND_R_SEED
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT0_FMT_DITHER_RAND_G_SEED
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT0_FMT_DITHER_RAND_B_SEED
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_CNTL
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT0_FMT_MAP420_MEMORY_CONTROL
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT0_FMT_422_CONTROL
+#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf0_dispdec
+//OPPBUF0_OPPBUF_CONTROL
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF0_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF0_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF0_OPPBUF_CONTROL1
+#define OPPBUF0_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF0_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe0_dispdec
+//OPP_PIPE0_OPP_PIPE_CONTROL
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc0_dispdec
+//OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC0_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC0_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_dpg1_dispdec
+//DPG1_DPG_CONTROL
+#define DPG1_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG1_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG1_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG1_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG1_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG1_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG1_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG1_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG1_DPG_RAMP_CONTROL
+#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG1_DPG_DIMENSIONS
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG1_DPG_COLOUR_R_CR
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG1_DPG_COLOUR_G_Y
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG1_DPG_COLOUR_B_CB
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG1_DPG_OFFSET_SEGMENT
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG1_DPG_STATUS
+#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+//DPG1_OPP_PIPE_CRC_RESULTA
+#define DPG1_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define DPG1_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A_MASK 0xFFFFFFFFL
+//DPG1_OPP_PIPE_CRC_RESULTR
+#define DPG1_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R__SHIFT 0x0
+#define DPG1_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFFFFFFL
+//DPG1_OPP_PIPE_CRC_RESULTG
+#define DPG1_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define DPG1_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G_MASK 0xFFFFFFFFL
+//DPG1_OPP_PIPE_CRC_RESULTB
+#define DPG1_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B__SHIFT 0x0
+#define DPG1_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFFFFFFL
+//DPG1_OPP_PIPE_CRC_RESULTC
+#define DPG1_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define DPG1_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_opp_fmt1_dispdec
+//FMT1_FMT_CLAMP_COMPONENT_R
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_COMPONENT_G
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_COMPONENT_B
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT1_FMT_DYNAMIC_EXP_CNTL
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT1_FMT_CONTROL
+#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//FMT1_FMT_BIT_DEPTH_CONTROL
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT1_FMT_DITHER_RAND_R_SEED
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT1_FMT_DITHER_RAND_G_SEED
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT1_FMT_DITHER_RAND_B_SEED
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_CNTL
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT1_FMT_MAP420_MEMORY_CONTROL
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT1_FMT_422_CONTROL
+#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf1_dispdec
+//OPPBUF1_OPPBUF_CONTROL
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF1_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF1_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF1_OPPBUF_CONTROL1
+#define OPPBUF1_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF1_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe1_dispdec
+//OPP_PIPE1_OPP_PIPE_CONTROL
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc1_dispdec
+//OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC1_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC1_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_dpg2_dispdec
+//DPG2_DPG_CONTROL
+#define DPG2_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG2_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG2_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG2_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG2_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG2_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG2_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG2_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG2_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG2_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG2_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG2_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG2_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG2_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG2_DPG_RAMP_CONTROL
+#define DPG2_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG2_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG2_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG2_DPG_DIMENSIONS
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG2_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG2_DPG_COLOUR_R_CR
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG2_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG2_DPG_COLOUR_G_Y
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG2_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG2_DPG_COLOUR_B_CB
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG2_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG2_DPG_OFFSET_SEGMENT
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG2_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG2_DPG_STATUS
+#define DPG2_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG2_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+//DPG2_OPP_PIPE_CRC_RESULTA
+#define DPG2_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define DPG2_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A_MASK 0xFFFFFFFFL
+//DPG2_OPP_PIPE_CRC_RESULTR
+#define DPG2_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R__SHIFT 0x0
+#define DPG2_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFFFFFFL
+//DPG2_OPP_PIPE_CRC_RESULTG
+#define DPG2_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define DPG2_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G_MASK 0xFFFFFFFFL
+//DPG2_OPP_PIPE_CRC_RESULTB
+#define DPG2_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B__SHIFT 0x0
+#define DPG2_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFFFFFFL
+//DPG2_OPP_PIPE_CRC_RESULTC
+#define DPG2_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define DPG2_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_opp_fmt2_dispdec
+//FMT2_FMT_CLAMP_COMPONENT_R
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT2_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT2_FMT_CLAMP_COMPONENT_G
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT2_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT2_FMT_CLAMP_COMPONENT_B
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT2_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT2_FMT_DYNAMIC_EXP_CNTL
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT2_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT2_FMT_CONTROL
+#define FMT2_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT2_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT2_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT2_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT2_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT2_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT2_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT2_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT2_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT2_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//FMT2_FMT_BIT_DEPTH_CONTROL
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT2_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT2_FMT_DITHER_RAND_R_SEED
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT2_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT2_FMT_DITHER_RAND_G_SEED
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT2_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT2_FMT_DITHER_RAND_B_SEED
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT2_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT2_FMT_CLAMP_CNTL
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT2_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT2_FMT_MAP420_MEMORY_CONTROL
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT2_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT2_FMT_422_CONTROL
+#define FMT2_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT2_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf2_dispdec
+//OPPBUF2_OPPBUF_CONTROL
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF2_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF2_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF2_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF2_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF2_OPPBUF_CONTROL1
+#define OPPBUF2_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF2_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe2_dispdec
+//OPP_PIPE2_OPP_PIPE_CONTROL
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE2_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc2_dispdec
+//OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC2_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC2_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_dpg3_dispdec
+//DPG3_DPG_CONTROL
+#define DPG3_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG3_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG3_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG3_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG3_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG3_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG3_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG3_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG3_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG3_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG3_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG3_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG3_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG3_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG3_DPG_RAMP_CONTROL
+#define DPG3_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG3_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG3_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG3_DPG_DIMENSIONS
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG3_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG3_DPG_COLOUR_R_CR
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG3_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG3_DPG_COLOUR_G_Y
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG3_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG3_DPG_COLOUR_B_CB
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG3_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG3_DPG_OFFSET_SEGMENT
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG3_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG3_DPG_STATUS
+#define DPG3_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG3_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+//DPG3_OPP_PIPE_CRC_RESULTA
+#define DPG3_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A__SHIFT 0x0
+#define DPG3_OPP_PIPE_CRC_RESULTA__OPP_PIPE_CRC_RESULT_A_MASK 0xFFFFFFFFL
+//DPG3_OPP_PIPE_CRC_RESULTR
+#define DPG3_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R__SHIFT 0x0
+#define DPG3_OPP_PIPE_CRC_RESULTR__OPP_PIPE_CRC_RESULT_R_MASK 0xFFFFFFFFL
+//DPG3_OPP_PIPE_CRC_RESULTG
+#define DPG3_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G__SHIFT 0x0
+#define DPG3_OPP_PIPE_CRC_RESULTG__OPP_PIPE_CRC_RESULT_G_MASK 0xFFFFFFFFL
+//DPG3_OPP_PIPE_CRC_RESULTB
+#define DPG3_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B__SHIFT 0x0
+#define DPG3_OPP_PIPE_CRC_RESULTB__OPP_PIPE_CRC_RESULT_B_MASK 0xFFFFFFFFL
+//DPG3_OPP_PIPE_CRC_RESULTC
+#define DPG3_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C__SHIFT 0x0
+#define DPG3_OPP_PIPE_CRC_RESULTC__OPP_PIPE_CRC_RESULT_C_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_opp_fmt3_dispdec
+//FMT3_FMT_CLAMP_COMPONENT_R
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT3_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT3_FMT_CLAMP_COMPONENT_G
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT3_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT3_FMT_CLAMP_COMPONENT_B
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT3_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT3_FMT_DYNAMIC_EXP_CNTL
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT3_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT3_FMT_CONTROL
+#define FMT3_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT3_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT3_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT3_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT3_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT3_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT3_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT3_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT3_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT3_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//FMT3_FMT_BIT_DEPTH_CONTROL
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT3_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT3_FMT_DITHER_RAND_R_SEED
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT3_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT3_FMT_DITHER_RAND_G_SEED
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT3_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT3_FMT_DITHER_RAND_B_SEED
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT3_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT3_FMT_CLAMP_CNTL
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT3_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT3_FMT_MAP420_MEMORY_CONTROL
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT3_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT3_FMT_422_CONTROL
+#define FMT3_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT3_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf3_dispdec
+//OPPBUF3_OPPBUF_CONTROL
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF3_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF3_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF3_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF3_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+//OPPBUF3_OPPBUF_CONTROL1
+#define OPPBUF3_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS__SHIFT 0x0
+#define OPPBUF3_OPPBUF_CONTROL1__OPPBUF_NUM_SEGMENT_PADDED_PIXELS_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_pipe3_dispdec
+//OPP_PIPE3_OPP_PIPE_CONTROL
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE3_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc3_dispdec
+//OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN__SHIFT 0x4
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE__SHIFT 0x8
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN__SHIFT 0xa
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN__SHIFT 0xe
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT__SHIFT 0x14
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT__SHIFT 0x18
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING__SHIFT 0x1c
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_EN_MASK 0x00000001L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_CONT_EN_MASK 0x00000010L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_MODE_MASK 0x00000300L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_STEREO_EN_MASK 0x00000400L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_INTERLACE_EN_MASK 0x00004000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_PIXEL_SELECT_MASK 0x00300000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_SOURCE_SELECT_MASK 0x01000000L
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL__OPP_PIPE_CRC_ONE_SHOT_PENDING_MASK 0x10000000L
+//OPP_PIPE_CRC3_OPP_PIPE_CRC_MASK
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK__SHIFT 0x0
+#define OPP_PIPE_CRC3_OPP_PIPE_CRC_MASK__OPP_PIPE_CRC_MASK_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_opp_dscrm0_dispdec
+//DSCRM0_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM0_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_dscrm1_dispdec
+//DSCRM1_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM1_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_dscrm2_dispdec
+//DSCRM2_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM2_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_dscrm3_dispdec
+//DSCRM3_DSCRM_DSC_FORWARD_CONFIG
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN__SHIFT 0x0
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE__SHIFT 0x4
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x8
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS__SHIFT 0xc
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_MASK 0x00000001L
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_OPP_PIPE_SOURCE_MASK 0x00000070L
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000100L
+#define DSCRM3_DSCRM_DSC_FORWARD_CONFIG__DSCRM_DSC_FORWARD_EN_STATUS_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_opp_opp_top_dispdec
+//OPP_TOP_CLK_CONTROL
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS__SHIFT 0x4
+#define OPP_TOP_CLK_CONTROL__OPP_TEST_CLK_SEL__SHIFT 0x8
+#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON__SHIFT 0xc
+#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON__SHIFT 0xd
+#define OPP_TOP_CLK_CONTROL__OPP_ABM2_CLOCK_ON__SHIFT 0xe
+#define OPP_TOP_CLK_CONTROL__OPP_ABM3_CLOCK_ON__SHIFT 0xf
+#define OPP_TOP_CLK_CONTROL__OPP_FGCG_REP_DIS__SHIFT 0x18
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS_MASK 0x00000010L
+#define OPP_TOP_CLK_CONTROL__OPP_TEST_CLK_SEL_MASK 0x00000F00L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON_MASK 0x00001000L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON_MASK 0x00002000L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM2_CLOCK_ON_MASK 0x00004000L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM3_CLOCK_ON_MASK 0x00008000L
+#define OPP_TOP_CLK_CONTROL__OPP_FGCG_REP_DIS_MASK 0x01000000L
+#define OPP_ABM_CONTROL__OPP_ABM_BLPWM_SEL__SHIFT 0x0
+#define OPP_ABM_CONTROL__OPP_ABM_BLPWM_SEL_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_opp_opp_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON16_PERFCOUNTER_CNTL
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON16_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON16_PERFCOUNTER_CNTL2
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON16_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON16_PERFCOUNTER_STATE
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON16_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON16_PERFMON_CNTL
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON16_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON16_PERFMON_CNTL2
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON16_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON16_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON16_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON16_PERFMON_CVALUE_LOW
+#define DC_PERFMON16_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON16_PERFMON_HI
+#define DC_PERFMON16_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON16_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON16_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON16_PERFMON_LOW
+#define DC_PERFMON16_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON16_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm0_dispdec
+//ODM0_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM0_OPTC_DATA_SOURCE_SELECT
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x8
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x10
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0x14
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL__SHIFT 0x18
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL__SHIFT 0x1c
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000003L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x00000300L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x000F0000L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x00F00000L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL_MASK 0x0F000000L
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL_MASK 0xF0000000L
+//ODM0_OPTC_DATA_FORMAT_CONTROL
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM0_OPTC_BYTES_PER_PIXEL
+#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM0_OPTC_WIDTH_CONTROL
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM0_OPTC_INPUT_CLOCK_CONTROL
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM0_OPTC_MEMORY_CONFIG
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS__SHIFT 0x10
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+#define ODM0_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS_MASK 0xFFFF0000L
+//ODM0_OPTC_INPUT_SPARE_REGISTER
+#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+//ODM0_OPTC_UNDERFLOW_THRESHOLD
+#define ODM0_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD__SHIFT 0x0
+#define ODM0_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD_MASK 0x01FFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm1_dispdec
+//ODM1_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM1_OPTC_DATA_SOURCE_SELECT
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x8
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x10
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0x14
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL__SHIFT 0x18
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL__SHIFT 0x1c
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000003L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x00000300L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x000F0000L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x00F00000L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL_MASK 0x0F000000L
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL_MASK 0xF0000000L
+//ODM1_OPTC_DATA_FORMAT_CONTROL
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM1_OPTC_BYTES_PER_PIXEL
+#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM1_OPTC_WIDTH_CONTROL
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM1_OPTC_INPUT_CLOCK_CONTROL
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM1_OPTC_MEMORY_CONFIG
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS__SHIFT 0x10
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+#define ODM1_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS_MASK 0xFFFF0000L
+//ODM1_OPTC_INPUT_SPARE_REGISTER
+#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+//ODM1_OPTC_UNDERFLOW_THRESHOLD
+#define ODM1_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD__SHIFT 0x0
+#define ODM1_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD_MASK 0x01FFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm2_dispdec
+//ODM2_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM2_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM2_OPTC_DATA_SOURCE_SELECT
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x8
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x10
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0x14
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL__SHIFT 0x18
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL__SHIFT 0x1c
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000003L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x00000300L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x000F0000L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x00F00000L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL_MASK 0x0F000000L
+#define ODM2_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL_MASK 0xF0000000L
+//ODM2_OPTC_DATA_FORMAT_CONTROL
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM2_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM2_OPTC_BYTES_PER_PIXEL
+#define ODM2_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM2_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM2_OPTC_WIDTH_CONTROL
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM2_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM2_OPTC_INPUT_CLOCK_CONTROL
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM2_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM2_OPTC_MEMORY_CONFIG
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS__SHIFT 0x10
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+#define ODM2_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS_MASK 0xFFFF0000L
+//ODM2_OPTC_INPUT_SPARE_REGISTER
+#define ODM2_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM2_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+//ODM2_OPTC_UNDERFLOW_THRESHOLD
+#define ODM2_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD__SHIFT 0x0
+#define ODM2_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD_MASK 0x01FFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm3_dispdec
+//ODM3_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM3_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM3_OPTC_DATA_SOURCE_SELECT
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT__SHIFT 0x0
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT__SHIFT 0x8
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x10
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL__SHIFT 0x14
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL__SHIFT 0x18
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL__SHIFT 0x1c
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_INPUT_SEGMENT_MASK 0x00000003L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_NUM_OF_OUTPUT_SEGMENT_MASK 0x00000300L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x000F0000L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG1_SRC_SEL_MASK 0x00F00000L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG2_SRC_SEL_MASK 0x0F000000L
+#define ODM3_OPTC_DATA_SOURCE_SELECT__OPTC_SEG3_SRC_SEL_MASK 0xF0000000L
+//ODM3_OPTC_DATA_FORMAT_CONTROL
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM3_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM3_OPTC_BYTES_PER_PIXEL
+#define ODM3_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM3_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM3_OPTC_WIDTH_CONTROL
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH__SHIFT 0x0
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_SEGMENT_WIDTH_MASK 0x00001FFFL
+#define ODM3_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM3_OPTC_INPUT_CLOCK_CONTROL
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM3_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM3_OPTC_MEMORY_CONFIG
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL__SHIFT 0x0
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS__SHIFT 0x10
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_MASK 0x0000FFFFL
+#define ODM3_OPTC_MEMORY_CONFIG__OPTC_MEM_SEL_STATUS_MASK 0xFFFF0000L
+//ODM3_OPTC_INPUT_SPARE_REGISTER
+#define ODM3_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM3_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+//ODM3_OPTC_UNDERFLOW_THRESHOLD
+#define ODM3_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD__SHIFT 0x0
+#define ODM3_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD_MASK 0x01FFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg0_dispdec
+//OTG0_OTG_H_TOTAL
+#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG0_OTG_H_BLANK_START_END
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG0_OTG_H_SYNC_A
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG0_OTG_H_SYNC_A_CNTL
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG0_OTG_H_TIMING_CNTL
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE__SHIFT 0x0
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL__SHIFT 0x8
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR__SHIFT 0x10
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MASK 0x00000003L
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL_MASK 0x00000100L
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR_MASK 0x00030000L
+//OTG0_OTG_V_TOTAL
+#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MIN
+#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MAX
+#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MID
+#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_CONTROL
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_V_COUNT_STOP_CONTROL
+#define OTG0_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP__SHIFT 0x0
+#define OTG0_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP_MASK 0x00007FFFL
+//OTG0_OTG_V_COUNT_STOP_CONTROL2
+#define OTG0_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER__SHIFT 0x0
+#define OTG0_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER_MASK 0xFFFFFFFFL
+//OTG0_OTG_V_TOTAL_INT_STATUS
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG0_OTG_VSYNC_NOM_INT_STATUS
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG0_OTG_V_BLANK_START_END
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG0_OTG_V_SYNC_A
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG0_OTG_V_SYNC_A_CNTL
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE__SHIFT 0x8
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE_MASK 0x00000100L
+//OTG0_OTG_TRIGA_CNTL
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG0_OTG_TRIGA_MANUAL_TRIG
+#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG0_OTG_TRIGB_CNTL
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG0_OTG_TRIGB_MANUAL_TRIG
+#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG0_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG0_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+//OTG0_OTG_CONTROL
+#define OTG0_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG0_OTG_CONTROL__OTG_OUT_MUX__SHIFT 0x14
+#define OTG0_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG0_OTG_CONTROL__OTG_OUT_MUX_MASK 0x00300000L
+//OTG0_OTG_DLPC_CONTROL
+#define OTG0_OTG_DLPC_CONTROL__OTG_RESYNC_MODE__SHIFT 0x0
+#define OTG0_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION__SHIFT 0x10
+#define OTG0_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT__SHIFT 0x1f
+#define OTG0_OTG_DLPC_CONTROL__OTG_RESYNC_MODE_MASK 0x00000001L
+#define OTG0_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION_MASK 0x7FFF0000L
+#define OTG0_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT_MASK 0x80000000L
+//OTG0_OTG_INTERLACE_CONTROL
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG0_OTG_INTERLACE_STATUS
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG0_OTG_PIXEL_DATA_READBACK0
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG0_OTG_PIXEL_DATA_READBACK1
+#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG0_OTG_STATUS
+#define OTG0_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG0_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG0_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG0_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG0_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG0_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG0_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG0_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG0_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG0_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG0_OTG_STATUS_POSITION
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK__SHIFT 0xf
+#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK_MASK 0x00008000L
+#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG0_OTG_LONG_VBLANK_STATUS
+#define OTG0_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT__SHIFT 0x0
+#define OTG0_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT_MASK 0xFFFFFFFFL
+//OTG0_OTG_NOM_VERT_POSITION
+#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG0_OTG_STATUS_FRAME_COUNT
+#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG0_OTG_STATUS_VF_COUNT
+#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG0_OTG_STATUS_HV_COUNT
+#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG0_OTG_COUNT_CONTROL
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG0_OTG_COUNT_RESET
+#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG0_OTG_VERT_SYNC_CONTROL
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG0_OTG_STEREO_STATUS
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG0_OTG_STEREO_CONTROL
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG0_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG0_OTG_SNAPSHOT_STATUS
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG0_OTG_SNAPSHOT_CONTROL
+#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG0_OTG_SNAPSHOT_POSITION
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG0_OTG_SNAPSHOT_FRAME
+#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG0_OTG_INTERRUPT_CONTROL
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG0_OTG_UPDATE_LOCK
+#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG0_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG0_OTG_MASTER_EN
+#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG0_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS__SHIFT 0x1c
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS_MASK 0x10000000L
+//OTG0_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG0_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG0_OTG_CRC_CNTL
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN__SHIFT 0x1
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_POLY_SEL__SHIFT 0x2
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_EN__SHIFT 0x7
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_MODE__SHIFT 0xa
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN_MASK 0x00000002L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_POLY_SEL_MASK 0x00000004L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_EN_MASK 0x00000080L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_MODE_MASK 0x00000400L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG0_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_DATA_RG
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC0_DATA_B
+#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_DATA_RG
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC1_DATA_B
+#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC2_DATA_RG
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC2_DATA_B
+#define OTG0_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG0_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC3_DATA_RG
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC3_DATA_B
+#define OTG0_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG0_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG0_OTG_STATIC_SCREEN_CONTROL
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG0_OTG_3D_STRUCTURE_CONTROL
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG0_OTG_GSL_VSYNC_GAP
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG0_OTG_MASTER_UPDATE_MODE
+#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG0_OTG_CLOCK_CONTROL
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG0_OTG_VSTARTUP_PARAM
+#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG0_OTG_VUPDATE_PARAM
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG0_OTG_VREADY_PARAM
+#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG0_OTG_GLOBAL_SYNC_STATUS
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG0_OTG_MASTER_UPDATE_LOCK
+#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG0_OTG_GSL_CONTROL
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG0_OTG_GSL_WINDOW_X
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG0_OTG_GSL_WINDOW_Y
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG0_OTG_VUPDATE_KEEPOUT
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL0
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X_MASK 0x00007FFFL
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X_MASK 0x7FFF0000L
+#define OTG0_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL1
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y_MASK 0x00007FFFL
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y_MASK 0x7FFF0000L
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL2
+#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+#define OTG0_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL3
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG0_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL__SHIFT 0x14
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG0_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL_MASK 0x00030000L
+#define OTG0_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL_MASK 0x00300000L
+//OTG0_OTG_GLOBAL_CONTROL4
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X_MASK 0x00007FFFL
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y_MASK 0x7FFF0000L
+#define OTG0_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE_MASK 0x80000000L
+//OTG0_OTG_TRIG_MANUAL_CONTROL
+#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG0_OTG_DRR_TIMING_INT_STATUS
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0xd
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED__SHIFT 0x10
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT__SHIFT 0x14
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR__SHIFT 0x18
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK__SHIFT 0x1c
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE__SHIFT 0x1d
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00002000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_MASK 0x00010000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MASK 0x00100000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR_MASK 0x01000000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK_MASK 0x10000000L
+#define OTG0_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE_MASK 0x20000000L
+//OTG0_OTG_DRR_V_TOTAL_REACH_RANGE
+#define OTG0_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE__SHIFT 0x0
+#define OTG0_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE__SHIFT 0x10
+#define OTG0_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE_MASK 0x00007FFFL
+#define OTG0_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE_MASK 0x7FFF0000L
+//OTG0_OTG_DRR_V_TOTAL_CHANGE
+#define OTG0_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT__SHIFT 0x0
+#define OTG0_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT_MASK 0x00007FFFL
+//OTG0_OTG_DRR_TRIGGER_WINDOW
+#define OTG0_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X__SHIFT 0x0
+#define OTG0_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X__SHIFT 0x10
+#define OTG0_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG0_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG0_OTG_DRR_CONTROL
+#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000003L
+#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG0_OTG_DRR_CONTOL2
+#define OTG0_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR__SHIFT 0x0
+#define OTG0_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR_MASK 0xFFFFFFFFL
+//OTG0_OTG_M_CONST_DTO0
+#define OTG0_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE__SHIFT 0x0
+#define OTG0_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE_MASK 0xFFFFFFFFL
+//OTG0_OTG_M_CONST_DTO1
+#define OTG0_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO__SHIFT 0x0
+#define OTG0_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO_MASK 0xFFFFFFFFL
+//OTG0_OTG_REQUEST_CONTROL
+#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG0_OTG_DSC_START_POSITION
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG0_OTG_PIPE_UPDATE_STATUS
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG0_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG0_OTG_SPARE_REGISTER
+#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg1_dispdec
+//OTG1_OTG_H_TOTAL
+#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG1_OTG_H_BLANK_START_END
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG1_OTG_H_SYNC_A
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG1_OTG_H_SYNC_A_CNTL
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG1_OTG_H_TIMING_CNTL
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE__SHIFT 0x0
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL__SHIFT 0x8
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR__SHIFT 0x10
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MASK 0x00000003L
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL_MASK 0x00000100L
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR_MASK 0x00030000L
+//OTG1_OTG_V_TOTAL
+#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MIN
+#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MAX
+#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MID
+#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_CONTROL
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_V_COUNT_STOP_CONTROL
+#define OTG1_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP__SHIFT 0x0
+#define OTG1_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP_MASK 0x00007FFFL
+//OTG1_OTG_V_COUNT_STOP_CONTROL2
+#define OTG1_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER__SHIFT 0x0
+#define OTG1_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER_MASK 0xFFFFFFFFL
+//OTG1_OTG_V_TOTAL_INT_STATUS
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG1_OTG_VSYNC_NOM_INT_STATUS
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG1_OTG_V_BLANK_START_END
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG1_OTG_V_SYNC_A
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG1_OTG_V_SYNC_A_CNTL
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE__SHIFT 0x8
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE_MASK 0x00000100L
+//OTG1_OTG_TRIGA_CNTL
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG1_OTG_TRIGA_MANUAL_TRIG
+#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG1_OTG_TRIGB_CNTL
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG1_OTG_TRIGB_MANUAL_TRIG
+#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG1_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG1_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+//OTG1_OTG_CONTROL
+#define OTG1_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG1_OTG_CONTROL__OTG_OUT_MUX__SHIFT 0x14
+#define OTG1_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG1_OTG_CONTROL__OTG_OUT_MUX_MASK 0x00300000L
+//OTG1_OTG_DLPC_CONTROL
+#define OTG1_OTG_DLPC_CONTROL__OTG_RESYNC_MODE__SHIFT 0x0
+#define OTG1_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION__SHIFT 0x10
+#define OTG1_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT__SHIFT 0x1f
+#define OTG1_OTG_DLPC_CONTROL__OTG_RESYNC_MODE_MASK 0x00000001L
+#define OTG1_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION_MASK 0x7FFF0000L
+#define OTG1_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT_MASK 0x80000000L
+//OTG1_OTG_INTERLACE_CONTROL
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG1_OTG_INTERLACE_STATUS
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG1_OTG_PIXEL_DATA_READBACK0
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG1_OTG_PIXEL_DATA_READBACK1
+#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG1_OTG_STATUS
+#define OTG1_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG1_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG1_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG1_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG1_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG1_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG1_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG1_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG1_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG1_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG1_OTG_STATUS_POSITION
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK__SHIFT 0xf
+#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK_MASK 0x00008000L
+#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG1_OTG_LONG_VBLANK_STATUS
+#define OTG1_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT__SHIFT 0x0
+#define OTG1_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT_MASK 0xFFFFFFFFL
+//OTG1_OTG_NOM_VERT_POSITION
+#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG1_OTG_STATUS_FRAME_COUNT
+#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG1_OTG_STATUS_VF_COUNT
+#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG1_OTG_STATUS_HV_COUNT
+#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG1_OTG_COUNT_CONTROL
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG1_OTG_COUNT_RESET
+#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG1_OTG_VERT_SYNC_CONTROL
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG1_OTG_STEREO_STATUS
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG1_OTG_STEREO_CONTROL
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG1_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG1_OTG_SNAPSHOT_STATUS
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG1_OTG_SNAPSHOT_CONTROL
+#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG1_OTG_SNAPSHOT_POSITION
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG1_OTG_SNAPSHOT_FRAME
+#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG1_OTG_INTERRUPT_CONTROL
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG1_OTG_UPDATE_LOCK
+#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG1_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG1_OTG_MASTER_EN
+#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG1_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS__SHIFT 0x1c
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS_MASK 0x10000000L
+//OTG1_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG1_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG1_OTG_CRC_CNTL
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN__SHIFT 0x1
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_POLY_SEL__SHIFT 0x2
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_EN__SHIFT 0x7
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_MODE__SHIFT 0xa
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN_MASK 0x00000002L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_POLY_SEL_MASK 0x00000004L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_EN_MASK 0x00000080L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_MODE_MASK 0x00000400L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG1_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_DATA_RG
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC0_DATA_B
+#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_DATA_RG
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC1_DATA_B
+#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC2_DATA_RG
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC2_DATA_B
+#define OTG1_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG1_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC3_DATA_RG
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC3_DATA_B
+#define OTG1_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG1_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG1_OTG_STATIC_SCREEN_CONTROL
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG1_OTG_3D_STRUCTURE_CONTROL
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG1_OTG_GSL_VSYNC_GAP
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG1_OTG_MASTER_UPDATE_MODE
+#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG1_OTG_CLOCK_CONTROL
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG1_OTG_VSTARTUP_PARAM
+#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG1_OTG_VUPDATE_PARAM
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG1_OTG_VREADY_PARAM
+#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG1_OTG_GLOBAL_SYNC_STATUS
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG1_OTG_MASTER_UPDATE_LOCK
+#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG1_OTG_GSL_CONTROL
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG1_OTG_GSL_WINDOW_X
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG1_OTG_GSL_WINDOW_Y
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG1_OTG_VUPDATE_KEEPOUT
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL0
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X_MASK 0x00007FFFL
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X_MASK 0x7FFF0000L
+#define OTG1_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL1
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y_MASK 0x00007FFFL
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y_MASK 0x7FFF0000L
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL2
+#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+#define OTG1_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL3
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG1_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL__SHIFT 0x14
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG1_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL_MASK 0x00030000L
+#define OTG1_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL_MASK 0x00300000L
+//OTG1_OTG_GLOBAL_CONTROL4
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X_MASK 0x00007FFFL
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y_MASK 0x7FFF0000L
+#define OTG1_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE_MASK 0x80000000L
+//OTG1_OTG_TRIG_MANUAL_CONTROL
+#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG1_OTG_DRR_TIMING_INT_STATUS
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0xd
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED__SHIFT 0x10
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT__SHIFT 0x14
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR__SHIFT 0x18
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK__SHIFT 0x1c
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE__SHIFT 0x1d
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00002000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_MASK 0x00010000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MASK 0x00100000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR_MASK 0x01000000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK_MASK 0x10000000L
+#define OTG1_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE_MASK 0x20000000L
+//OTG1_OTG_DRR_V_TOTAL_REACH_RANGE
+#define OTG1_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE__SHIFT 0x0
+#define OTG1_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE__SHIFT 0x10
+#define OTG1_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE_MASK 0x00007FFFL
+#define OTG1_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE_MASK 0x7FFF0000L
+//OTG1_OTG_DRR_V_TOTAL_CHANGE
+#define OTG1_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT__SHIFT 0x0
+#define OTG1_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT_MASK 0x00007FFFL
+//OTG1_OTG_DRR_TRIGGER_WINDOW
+#define OTG1_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X__SHIFT 0x0
+#define OTG1_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X__SHIFT 0x10
+#define OTG1_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG1_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG1_OTG_DRR_CONTROL
+#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000003L
+#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG1_OTG_DRR_CONTOL2
+#define OTG1_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR__SHIFT 0x0
+#define OTG1_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR_MASK 0xFFFFFFFFL
+//OTG1_OTG_M_CONST_DTO0
+#define OTG1_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE__SHIFT 0x0
+#define OTG1_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE_MASK 0xFFFFFFFFL
+//OTG1_OTG_M_CONST_DTO1
+#define OTG1_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO__SHIFT 0x0
+#define OTG1_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO_MASK 0xFFFFFFFFL
+//OTG1_OTG_REQUEST_CONTROL
+#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG1_OTG_DSC_START_POSITION
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG1_OTG_PIPE_UPDATE_STATUS
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG1_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG1_OTG_SPARE_REGISTER
+#define OTG1_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG1_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg2_dispdec
+//OTG2_OTG_H_TOTAL
+#define OTG2_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG2_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG2_OTG_H_BLANK_START_END
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG2_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG2_OTG_H_SYNC_A
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG2_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG2_OTG_H_SYNC_A_CNTL
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG2_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG2_OTG_H_TIMING_CNTL
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE__SHIFT 0x0
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL__SHIFT 0x8
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR__SHIFT 0x10
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MASK 0x00000003L
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL_MASK 0x00000100L
+#define OTG2_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR_MASK 0x00030000L
+//OTG2_OTG_V_TOTAL
+#define OTG2_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG2_OTG_V_TOTAL_MIN
+#define OTG2_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG2_OTG_V_TOTAL_MAX
+#define OTG2_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG2_OTG_V_TOTAL_MID
+#define OTG2_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG2_OTG_V_TOTAL_CONTROL
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG2_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG2_OTG_V_COUNT_STOP_CONTROL
+#define OTG2_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP__SHIFT 0x0
+#define OTG2_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP_MASK 0x00007FFFL
+//OTG2_OTG_V_COUNT_STOP_CONTROL2
+#define OTG2_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER__SHIFT 0x0
+#define OTG2_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER_MASK 0xFFFFFFFFL
+//OTG2_OTG_V_TOTAL_INT_STATUS
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG2_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG2_OTG_VSYNC_NOM_INT_STATUS
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG2_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG2_OTG_V_BLANK_START_END
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG2_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG2_OTG_V_SYNC_A
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG2_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG2_OTG_V_SYNC_A_CNTL
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE__SHIFT 0x8
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+#define OTG2_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE_MASK 0x00000100L
+//OTG2_OTG_TRIGA_CNTL
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG2_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG2_OTG_TRIGA_MANUAL_TRIG
+#define OTG2_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG2_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG2_OTG_TRIGB_CNTL
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG2_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG2_OTG_TRIGB_MANUAL_TRIG
+#define OTG2_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG2_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG2_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG2_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG2_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG2_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG2_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+//OTG2_OTG_CONTROL
+#define OTG2_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG2_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG2_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG2_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG2_OTG_CONTROL__OTG_OUT_MUX__SHIFT 0x14
+#define OTG2_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG2_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG2_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG2_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG2_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG2_OTG_CONTROL__OTG_OUT_MUX_MASK 0x00300000L
+//OTG2_OTG_DLPC_CONTROL
+#define OTG2_OTG_DLPC_CONTROL__OTG_RESYNC_MODE__SHIFT 0x0
+#define OTG2_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION__SHIFT 0x10
+#define OTG2_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT__SHIFT 0x1f
+#define OTG2_OTG_DLPC_CONTROL__OTG_RESYNC_MODE_MASK 0x00000001L
+#define OTG2_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION_MASK 0x7FFF0000L
+#define OTG2_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT_MASK 0x80000000L
+//OTG2_OTG_INTERLACE_CONTROL
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG2_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG2_OTG_INTERLACE_STATUS
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG2_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG2_OTG_PIXEL_DATA_READBACK0
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG2_OTG_PIXEL_DATA_READBACK1
+#define OTG2_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG2_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG2_OTG_STATUS
+#define OTG2_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG2_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG2_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG2_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG2_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG2_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG2_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG2_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG2_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG2_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG2_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG2_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG2_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG2_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG2_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG2_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG2_OTG_STATUS_POSITION
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK__SHIFT 0xf
+#define OTG2_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG2_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK_MASK 0x00008000L
+#define OTG2_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG2_OTG_LONG_VBLANK_STATUS
+#define OTG2_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT__SHIFT 0x0
+#define OTG2_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT_MASK 0xFFFFFFFFL
+//OTG2_OTG_NOM_VERT_POSITION
+#define OTG2_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG2_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG2_OTG_STATUS_FRAME_COUNT
+#define OTG2_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG2_OTG_STATUS_VF_COUNT
+#define OTG2_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG2_OTG_STATUS_HV_COUNT
+#define OTG2_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG2_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG2_OTG_COUNT_CONTROL
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG2_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG2_OTG_COUNT_RESET
+#define OTG2_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG2_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG2_OTG_VERT_SYNC_CONTROL
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG2_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG2_OTG_STEREO_STATUS
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG2_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG2_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG2_OTG_STEREO_CONTROL
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG2_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG2_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG2_OTG_SNAPSHOT_STATUS
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG2_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG2_OTG_SNAPSHOT_CONTROL
+#define OTG2_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG2_OTG_SNAPSHOT_POSITION
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG2_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG2_OTG_SNAPSHOT_FRAME
+#define OTG2_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG2_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG2_OTG_INTERRUPT_CONTROL
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG2_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG2_OTG_UPDATE_LOCK
+#define OTG2_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG2_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG2_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG2_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG2_OTG_MASTER_EN
+#define OTG2_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG2_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG2_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG2_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS__SHIFT 0x1c
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+#define OTG2_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS_MASK 0x10000000L
+//OTG2_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG2_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG2_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG2_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG2_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG2_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG2_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG2_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG2_OTG_CRC_CNTL
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN__SHIFT 0x1
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_POLY_SEL__SHIFT 0x2
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_EN__SHIFT 0x7
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_MODE__SHIFT 0xa
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG2_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN_MASK 0x00000002L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_POLY_SEL_MASK 0x00000004L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_EN_MASK 0x00000080L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_CONT_MODE_MASK 0x00000400L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG2_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG2_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG2_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_DATA_RG
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG2_OTG_CRC0_DATA_B
+#define OTG2_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG2_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG2_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_DATA_RG
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG2_OTG_CRC1_DATA_B
+#define OTG2_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG2_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG2_OTG_CRC2_DATA_RG
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG2_OTG_CRC2_DATA_B
+#define OTG2_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG2_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG2_OTG_CRC3_DATA_RG
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG2_OTG_CRC3_DATA_B
+#define OTG2_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG2_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG2_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG2_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG2_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG2_OTG_STATIC_SCREEN_CONTROL
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG2_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG2_OTG_3D_STRUCTURE_CONTROL
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG2_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG2_OTG_GSL_VSYNC_GAP
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG2_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG2_OTG_MASTER_UPDATE_MODE
+#define OTG2_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG2_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG2_OTG_CLOCK_CONTROL
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG2_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG2_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG2_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG2_OTG_VSTARTUP_PARAM
+#define OTG2_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG2_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG2_OTG_VUPDATE_PARAM
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG2_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG2_OTG_VREADY_PARAM
+#define OTG2_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG2_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG2_OTG_GLOBAL_SYNC_STATUS
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG2_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG2_OTG_MASTER_UPDATE_LOCK
+#define OTG2_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG2_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG2_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG2_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG2_OTG_GSL_CONTROL
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG2_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG2_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG2_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG2_OTG_GSL_WINDOW_X
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG2_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG2_OTG_GSL_WINDOW_Y
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG2_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG2_OTG_VUPDATE_KEEPOUT
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG2_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG2_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG2_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG2_OTG_GLOBAL_CONTROL0
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X_MASK 0x00007FFFL
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X_MASK 0x7FFF0000L
+#define OTG2_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG2_OTG_GLOBAL_CONTROL1
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y_MASK 0x00007FFFL
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y_MASK 0x7FFF0000L
+#define OTG2_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE_MASK 0x80000000L
+//OTG2_OTG_GLOBAL_CONTROL2
+#define OTG2_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG2_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG2_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG2_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+#define OTG2_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG2_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG2_OTG_GLOBAL_CONTROL3
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG2_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL__SHIFT 0x14
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG2_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG2_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL_MASK 0x00030000L
+#define OTG2_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL_MASK 0x00300000L
+//OTG2_OTG_GLOBAL_CONTROL4
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X__SHIFT 0x0
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y__SHIFT 0x10
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE__SHIFT 0x1f
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X_MASK 0x00007FFFL
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y_MASK 0x7FFF0000L
+#define OTG2_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE_MASK 0x80000000L
+//OTG2_OTG_TRIG_MANUAL_CONTROL
+#define OTG2_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG2_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG2_OTG_DRR_TIMING_INT_STATUS
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0xd
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED__SHIFT 0x10
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT__SHIFT 0x14
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR__SHIFT 0x18
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK__SHIFT 0x1c
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE__SHIFT 0x1d
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00002000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_MASK 0x00010000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MASK 0x00100000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR_MASK 0x01000000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK_MASK 0x10000000L
+#define OTG2_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE_MASK 0x20000000L
+//OTG2_OTG_DRR_V_TOTAL_REACH_RANGE
+#define OTG2_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE__SHIFT 0x0
+#define OTG2_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE__SHIFT 0x10
+#define OTG2_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE_MASK 0x00007FFFL
+#define OTG2_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE_MASK 0x7FFF0000L
+//OTG2_OTG_DRR_V_TOTAL_CHANGE
+#define OTG2_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT__SHIFT 0x0
+#define OTG2_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT_MASK 0x00007FFFL
+//OTG2_OTG_DRR_TRIGGER_WINDOW
+#define OTG2_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X__SHIFT 0x0
+#define OTG2_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X__SHIFT 0x10
+#define OTG2_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG2_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG2_OTG_DRR_CONTROL
+#define OTG2_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG2_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG2_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000003L
+#define OTG2_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG2_OTG_DRR_CONTOL2
+#define OTG2_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR__SHIFT 0x0
+#define OTG2_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR_MASK 0xFFFFFFFFL
+//OTG2_OTG_M_CONST_DTO0
+#define OTG2_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE__SHIFT 0x0
+#define OTG2_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE_MASK 0xFFFFFFFFL
+//OTG2_OTG_M_CONST_DTO1
+#define OTG2_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO__SHIFT 0x0
+#define OTG2_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO_MASK 0xFFFFFFFFL
+//OTG2_OTG_REQUEST_CONTROL
+#define OTG2_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG2_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG2_OTG_DSC_START_POSITION
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG2_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG2_OTG_PIPE_UPDATE_STATUS
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG2_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG2_OTG_SPARE_REGISTER
+#define OTG2_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG2_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg3_dispdec
+//OTG3_OTG_H_TOTAL
+#define OTG3_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG3_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG3_OTG_H_BLANK_START_END
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG3_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG3_OTG_H_SYNC_A
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG3_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG3_OTG_H_SYNC_A_CNTL
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG3_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG3_OTG_H_TIMING_CNTL
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE__SHIFT 0x0
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL__SHIFT 0x8
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR__SHIFT 0x10
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MASK 0x00000003L
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_MANUAL_MASK 0x00000100L
+#define OTG3_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_MODE_CURR_MASK 0x00030000L
+//OTG3_OTG_V_TOTAL
+#define OTG3_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG3_OTG_V_TOTAL_MIN
+#define OTG3_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG3_OTG_V_TOTAL_MAX
+#define OTG3_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG3_OTG_V_TOTAL_MID
+#define OTG3_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG3_OTG_V_TOTAL_CONTROL
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG3_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG3_OTG_V_COUNT_STOP_CONTROL
+#define OTG3_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP__SHIFT 0x0
+#define OTG3_OTG_V_COUNT_STOP_CONTROL__OTG_V_COUNT_STOP_MASK 0x00007FFFL
+//OTG3_OTG_V_COUNT_STOP_CONTROL2
+#define OTG3_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER__SHIFT 0x0
+#define OTG3_OTG_V_COUNT_STOP_CONTROL2__OTG_V_COUNT_STOP_TIMER_MASK 0xFFFFFFFFL
+//OTG3_OTG_V_TOTAL_INT_STATUS
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT__SHIFT 0x4
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK__SHIFT 0x8
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK__SHIFT 0xc
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_INT_MASK 0x00000010L
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_ACK_MASK 0x00000100L
+#define OTG3_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURRED_MSK_MASK 0x00001000L
+//OTG3_OTG_VSYNC_NOM_INT_STATUS
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG3_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG3_OTG_V_BLANK_START_END
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG3_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG3_OTG_V_SYNC_A
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG3_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG3_OTG_V_SYNC_A_CNTL
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE__SHIFT 0x8
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+#define OTG3_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_MODE_MASK 0x00000100L
+//OTG3_OTG_TRIGA_CNTL
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG3_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG3_OTG_TRIGA_MANUAL_TRIG
+#define OTG3_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG3_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG3_OTG_TRIGB_CNTL
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG3_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG3_OTG_TRIGB_MANUAL_TRIG
+#define OTG3_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG3_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG3_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG3_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG3_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG3_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG3_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+//OTG3_OTG_CONTROL
+#define OTG3_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG3_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG3_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG3_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG3_OTG_CONTROL__OTG_OUT_MUX__SHIFT 0x14
+#define OTG3_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG3_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG3_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG3_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG3_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG3_OTG_CONTROL__OTG_OUT_MUX_MASK 0x00300000L
+//OTG3_OTG_DLPC_CONTROL
+#define OTG3_OTG_DLPC_CONTROL__OTG_RESYNC_MODE__SHIFT 0x0
+#define OTG3_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION__SHIFT 0x10
+#define OTG3_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT__SHIFT 0x1f
+#define OTG3_OTG_DLPC_CONTROL__OTG_RESYNC_MODE_MASK 0x00000001L
+#define OTG3_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_LOCATION_MASK 0x7FFF0000L
+#define OTG3_OTG_DLPC_CONTROL__OTG_DLPC_SNAPSHOT_CURRENT_MASK 0x80000000L
+//OTG3_OTG_INTERLACE_CONTROL
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG3_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG3_OTG_INTERLACE_STATUS
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG3_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG3_OTG_PIXEL_DATA_READBACK0
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG3_OTG_PIXEL_DATA_READBACK1
+#define OTG3_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG3_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG3_OTG_STATUS
+#define OTG3_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG3_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG3_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG3_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG3_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG3_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG3_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG3_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG3_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG3_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG3_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG3_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG3_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG3_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG3_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG3_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG3_OTG_STATUS_POSITION
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK__SHIFT 0xf
+#define OTG3_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG3_OTG_STATUS_POSITION__OTG_VERT_LONG_VBLANK_MASK 0x00008000L
+#define OTG3_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG3_OTG_LONG_VBLANK_STATUS
+#define OTG3_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT__SHIFT 0x0
+#define OTG3_OTG_LONG_VBLANK_STATUS__OTG_V_COUNT_STOP_COUNT_MASK 0xFFFFFFFFL
+//OTG3_OTG_NOM_VERT_POSITION
+#define OTG3_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG3_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG3_OTG_STATUS_FRAME_COUNT
+#define OTG3_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG3_OTG_STATUS_VF_COUNT
+#define OTG3_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG3_OTG_STATUS_HV_COUNT
+#define OTG3_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG3_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG3_OTG_COUNT_CONTROL
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG3_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG3_OTG_COUNT_RESET
+#define OTG3_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG3_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG3_OTG_VERT_SYNC_CONTROL
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG3_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG3_OTG_STEREO_STATUS
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG3_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG3_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG3_OTG_STEREO_CONTROL
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG3_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL__SHIFT 0x15
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_FIELD_NUM_SEL_MASK 0x00200000L
+#define OTG3_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG3_OTG_SNAPSHOT_STATUS
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG3_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG3_OTG_SNAPSHOT_CONTROL
+#define OTG3_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG3_OTG_SNAPSHOT_POSITION
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG3_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG3_OTG_SNAPSHOT_FRAME
+#define OTG3_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG3_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG3_OTG_INTERRUPT_CONTROL
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG3_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG3_OTG_UPDATE_LOCK
+#define OTG3_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG3_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG3_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG3_OTG_DOUBLE_BUFFER_CONTROL__OTG_DRR_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG3_OTG_MASTER_EN
+#define OTG3_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG3_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG3_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG3_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS__SHIFT 0x1c
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+#define OTG3_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VINTE_STATUS_MASK 0x10000000L
+//OTG3_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG3_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG3_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG3_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG3_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG3_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG3_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG3_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG3_OTG_CRC_CNTL
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN__SHIFT 0x1
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_POLY_SEL__SHIFT 0x2
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_EN__SHIFT 0x7
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_MODE__SHIFT 0xa
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG3_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_WINDOW_DB_EN_MASK 0x00000002L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_POLY_SEL_MASK 0x00000004L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_EN_MASK 0x00000080L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_CONT_MODE_MASK 0x00000400L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG3_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG3_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG3_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_DATA_RG
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG3_OTG_CRC0_DATA_B
+#define OTG3_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG3_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG3_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_DATA_RG
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG3_OTG_CRC1_DATA_B
+#define OTG3_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG3_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG3_OTG_CRC2_DATA_RG
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC2_DATA_RG__CRC2_G_Y_MASK 0xFFFF0000L
+//OTG3_OTG_CRC2_DATA_B
+#define OTG3_OTG_CRC2_DATA_B__CRC2_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC2_DATA_B__CRC2_C__SHIFT 0x10
+#define OTG3_OTG_CRC2_DATA_B__CRC2_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC2_DATA_B__CRC2_C_MASK 0xFFFF0000L
+//OTG3_OTG_CRC3_DATA_RG
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_R_CR__SHIFT 0x0
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_G_Y__SHIFT 0x10
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_R_CR_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC3_DATA_RG__CRC3_G_Y_MASK 0xFFFF0000L
+//OTG3_OTG_CRC3_DATA_B
+#define OTG3_OTG_CRC3_DATA_B__CRC3_B_CB__SHIFT 0x0
+#define OTG3_OTG_CRC3_DATA_B__CRC3_C__SHIFT 0x10
+#define OTG3_OTG_CRC3_DATA_B__CRC3_B_CB_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC3_DATA_B__CRC3_C_MASK 0xFFFF0000L
+//OTG3_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_X_CONTROL_READBACK__OTG_CRC0_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK__OTG_CRC0_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_X_CONTROL_READBACK__OTG_CRC0_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK__OTG_CRC0_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_X_CONTROL_READBACK__OTG_CRC1_WINDOWA_X_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK__OTG_CRC1_WINDOWA_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_X_CONTROL_READBACK__OTG_CRC1_WINDOWB_X_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK__SHIFT 0x0
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK__SHIFT 0x10
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_START_READBACK_MASK 0x00007FFFL
+#define OTG3_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK__OTG_CRC1_WINDOWB_Y_END_READBACK_MASK 0x7FFF0000L
+//OTG3_OTG_STATIC_SCREEN_CONTROL
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG3_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG3_OTG_3D_STRUCTURE_CONTROL
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG3_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG3_OTG_GSL_VSYNC_GAP
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG3_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG3_OTG_MASTER_UPDATE_MODE
+#define OTG3_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG3_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG3_OTG_CLOCK_CONTROL
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG3_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG3_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG3_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG3_OTG_VSTARTUP_PARAM
+#define OTG3_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG3_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG3_OTG_VUPDATE_PARAM
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG3_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG3_OTG_VREADY_PARAM
+#define OTG3_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG3_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG3_OTG_GLOBAL_SYNC_STATUS
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG3_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG3_OTG_MASTER_UPDATE_LOCK
+#define OTG3_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG3_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG3_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG3_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG3_OTG_GSL_CONTROL
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG3_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG3_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG3_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG3_OTG_GSL_WINDOW_X
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG3_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG3_OTG_GSL_WINDOW_Y
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG3_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG3_OTG_VUPDATE_KEEPOUT
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG3_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG3_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG3_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG3_OTG_GLOBAL_CONTROL0
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_START_X_MASK 0x00007FFFL
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_END_X_MASK 0x7FFF0000L
+#define OTG3_OTG_GLOBAL_CONTROL0__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG3_OTG_GLOBAL_CONTROL1
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_START_Y_MASK 0x00007FFFL
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_END_Y_MASK 0x7FFF0000L
+#define OTG3_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_VCOUNT_MODE_MASK 0x80000000L
+//OTG3_OTG_GLOBAL_CONTROL2
+#define OTG3_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG3_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE__SHIFT 0x1e
+#define OTG3_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG3_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+#define OTG3_OTG_GLOBAL_CONTROL2__OTG_VUPDATE_BLOCK_DISABLE_MASK 0x40000000L
+#define OTG3_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG3_OTG_GLOBAL_CONTROL3
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG3_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL__SHIFT 0x14
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG3_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG3_OTG_GLOBAL_CONTROL3__DIG_UPDATE_FIELD_SEL_MASK 0x00030000L
+#define OTG3_OTG_GLOBAL_CONTROL3__DIG_UPDATE_EYE_SEL_MASK 0x00300000L
+//OTG3_OTG_GLOBAL_CONTROL4
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X__SHIFT 0x0
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y__SHIFT 0x10
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE__SHIFT 0x1f
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_X_MASK 0x00007FFFL
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_POSITION_Y_MASK 0x7FFF0000L
+#define OTG3_OTG_GLOBAL_CONTROL4__DIG_UPDATE_VCOUNT_MODE_MASK 0x80000000L
+//OTG3_OTG_TRIG_MANUAL_CONTROL
+#define OTG3_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG3_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG3_OTG_DRR_TIMING_INT_STATUS
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0xd
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED__SHIFT 0x10
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT__SHIFT 0x14
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR__SHIFT 0x18
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK__SHIFT 0x1c
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE__SHIFT 0x1d
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00002000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_MASK 0x00010000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MASK 0x00100000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_CLEAR_MASK 0x01000000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_MSK_MASK 0x10000000L
+#define OTG3_OTG_DRR_TIMING_INT_STATUS__OTG_DRR_V_TOTAL_REACH_OCCURRED_INT_TYPE_MASK 0x20000000L
+//OTG3_OTG_DRR_V_TOTAL_REACH_RANGE
+#define OTG3_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE__SHIFT 0x0
+#define OTG3_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE__SHIFT 0x10
+#define OTG3_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_LOWER_RANGE_MASK 0x00007FFFL
+#define OTG3_OTG_DRR_V_TOTAL_REACH_RANGE__OTG_DRR_V_TOTAL_REACH_UPPER_RANGE_MASK 0x7FFF0000L
+//OTG3_OTG_DRR_V_TOTAL_CHANGE
+#define OTG3_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT__SHIFT 0x0
+#define OTG3_OTG_DRR_V_TOTAL_CHANGE__OTG_DRR_V_TOTAL_CHANGE_LIMIT_MASK 0x00007FFFL
+//OTG3_OTG_DRR_TRIGGER_WINDOW
+#define OTG3_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X__SHIFT 0x0
+#define OTG3_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X__SHIFT 0x10
+#define OTG3_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG3_OTG_DRR_TRIGGER_WINDOW__OTG_DRR_TRIGGER_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG3_OTG_DRR_CONTROL
+#define OTG3_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG3_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG3_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000003L
+#define OTG3_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG3_OTG_DRR_CONTOL2
+#define OTG3_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR__SHIFT 0x0
+#define OTG3_OTG_DRR_CONTOL2__OTG_VCOUNT2_LAST_USED_BY_DRR_MASK 0xFFFFFFFFL
+//OTG3_OTG_M_CONST_DTO0
+#define OTG3_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE__SHIFT 0x0
+#define OTG3_OTG_M_CONST_DTO0__OTG_M_CONST_DTO_PHASE_MASK 0xFFFFFFFFL
+//OTG3_OTG_M_CONST_DTO1
+#define OTG3_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO__SHIFT 0x0
+#define OTG3_OTG_M_CONST_DTO1__OTG_M_CONST_DTO_MODULO_MASK 0xFFFFFFFFL
+//OTG3_OTG_REQUEST_CONTROL
+#define OTG3_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG3_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG3_OTG_DSC_START_POSITION
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG3_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG3_OTG_PIPE_UPDATE_STATUS
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING__SHIFT 0x0
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING__SHIFT 0x4
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING__SHIFT 0x8
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS__SHIFT 0x10
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_FLIP_PENDING_MASK 0x00000001L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_DC_REG_UPDATE_PENDING_MASK 0x00000010L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_CURSOR_UPDATE_PENDING_MASK 0x00000100L
+#define OTG3_OTG_PIPE_UPDATE_STATUS__OTG_VUPDATE_KEEPOUT_STATUS_MASK 0x00010000L
+//OTG3_OTG_SPARE_REGISTER
+#define OTG3_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG3_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg_crc320_dispdec
+//OTG_CRC320_OTG_CRC0_DATA_R32
+#define OTG_CRC320_OTG_CRC0_DATA_R32__CRC0_R_CR32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC0_DATA_R32__CRC0_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC0_DATA_G32
+#define OTG_CRC320_OTG_CRC0_DATA_G32__CRC0_G_Y32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC0_DATA_G32__CRC0_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC0_DATA_B32
+#define OTG_CRC320_OTG_CRC0_DATA_B32__CRC0_B_CB32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC0_DATA_B32__CRC0_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC0_DATA_C32
+#define OTG_CRC320_OTG_CRC0_DATA_C32__CRC0_C32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC0_DATA_C32__CRC0_C32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC0_DATA_AES
+#define OTG_CRC320_OTG_CRC0_DATA_AES__CRC0_AES__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC0_DATA_AES__CRC0_AES_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC1_DATA_R32
+#define OTG_CRC320_OTG_CRC1_DATA_R32__CRC1_R_CR32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC1_DATA_R32__CRC1_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC1_DATA_G32
+#define OTG_CRC320_OTG_CRC1_DATA_G32__CRC1_G_Y32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC1_DATA_G32__CRC1_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC1_DATA_B32
+#define OTG_CRC320_OTG_CRC1_DATA_B32__CRC1_B_CB32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC1_DATA_B32__CRC1_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC1_DATA_C32
+#define OTG_CRC320_OTG_CRC1_DATA_C32__CRC1_C32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC1_DATA_C32__CRC1_C32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC2_DATA_R32
+#define OTG_CRC320_OTG_CRC2_DATA_R32__CRC2_R_CR32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC2_DATA_R32__CRC2_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC2_DATA_G32
+#define OTG_CRC320_OTG_CRC2_DATA_G32__CRC2_G_Y32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC2_DATA_G32__CRC2_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC2_DATA_B32
+#define OTG_CRC320_OTG_CRC2_DATA_B32__CRC2_B_CB32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC2_DATA_B32__CRC2_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC2_DATA_C32
+#define OTG_CRC320_OTG_CRC2_DATA_C32__CRC2_C32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC2_DATA_C32__CRC2_C32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC3_DATA_R32
+#define OTG_CRC320_OTG_CRC3_DATA_R32__CRC3_R_CR32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC3_DATA_R32__CRC3_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC3_DATA_G32
+#define OTG_CRC320_OTG_CRC3_DATA_G32__CRC3_G_Y32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC3_DATA_G32__CRC3_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC3_DATA_B32
+#define OTG_CRC320_OTG_CRC3_DATA_B32__CRC3_B_CB32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC3_DATA_B32__CRC3_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC320_OTG_CRC3_DATA_C32
+#define OTG_CRC320_OTG_CRC3_DATA_C32__CRC3_C32__SHIFT 0x0
+#define OTG_CRC320_OTG_CRC3_DATA_C32__CRC3_C32_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg_crc321_dispdec
+//OTG_CRC321_OTG_CRC0_DATA_R32
+#define OTG_CRC321_OTG_CRC0_DATA_R32__CRC0_R_CR32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC0_DATA_R32__CRC0_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC0_DATA_G32
+#define OTG_CRC321_OTG_CRC0_DATA_G32__CRC0_G_Y32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC0_DATA_G32__CRC0_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC0_DATA_B32
+#define OTG_CRC321_OTG_CRC0_DATA_B32__CRC0_B_CB32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC0_DATA_B32__CRC0_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC0_DATA_C32
+#define OTG_CRC321_OTG_CRC0_DATA_C32__CRC0_C32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC0_DATA_C32__CRC0_C32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC0_DATA_AES
+#define OTG_CRC321_OTG_CRC0_DATA_AES__CRC0_AES__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC0_DATA_AES__CRC0_AES_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC1_DATA_R32
+#define OTG_CRC321_OTG_CRC1_DATA_R32__CRC1_R_CR32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC1_DATA_R32__CRC1_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC1_DATA_G32
+#define OTG_CRC321_OTG_CRC1_DATA_G32__CRC1_G_Y32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC1_DATA_G32__CRC1_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC1_DATA_B32
+#define OTG_CRC321_OTG_CRC1_DATA_B32__CRC1_B_CB32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC1_DATA_B32__CRC1_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC1_DATA_C32
+#define OTG_CRC321_OTG_CRC1_DATA_C32__CRC1_C32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC1_DATA_C32__CRC1_C32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC2_DATA_R32
+#define OTG_CRC321_OTG_CRC2_DATA_R32__CRC2_R_CR32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC2_DATA_R32__CRC2_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC2_DATA_G32
+#define OTG_CRC321_OTG_CRC2_DATA_G32__CRC2_G_Y32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC2_DATA_G32__CRC2_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC2_DATA_B32
+#define OTG_CRC321_OTG_CRC2_DATA_B32__CRC2_B_CB32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC2_DATA_B32__CRC2_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC2_DATA_C32
+#define OTG_CRC321_OTG_CRC2_DATA_C32__CRC2_C32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC2_DATA_C32__CRC2_C32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC3_DATA_R32
+#define OTG_CRC321_OTG_CRC3_DATA_R32__CRC3_R_CR32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC3_DATA_R32__CRC3_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC3_DATA_G32
+#define OTG_CRC321_OTG_CRC3_DATA_G32__CRC3_G_Y32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC3_DATA_G32__CRC3_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC3_DATA_B32
+#define OTG_CRC321_OTG_CRC3_DATA_B32__CRC3_B_CB32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC3_DATA_B32__CRC3_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC321_OTG_CRC3_DATA_C32
+#define OTG_CRC321_OTG_CRC3_DATA_C32__CRC3_C32__SHIFT 0x0
+#define OTG_CRC321_OTG_CRC3_DATA_C32__CRC3_C32_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg_crc322_dispdec
+//OTG_CRC322_OTG_CRC0_DATA_R32
+#define OTG_CRC322_OTG_CRC0_DATA_R32__CRC0_R_CR32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC0_DATA_R32__CRC0_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC0_DATA_G32
+#define OTG_CRC322_OTG_CRC0_DATA_G32__CRC0_G_Y32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC0_DATA_G32__CRC0_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC0_DATA_B32
+#define OTG_CRC322_OTG_CRC0_DATA_B32__CRC0_B_CB32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC0_DATA_B32__CRC0_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC0_DATA_C32
+#define OTG_CRC322_OTG_CRC0_DATA_C32__CRC0_C32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC0_DATA_C32__CRC0_C32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC0_DATA_AES
+#define OTG_CRC322_OTG_CRC0_DATA_AES__CRC0_AES__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC0_DATA_AES__CRC0_AES_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC1_DATA_R32
+#define OTG_CRC322_OTG_CRC1_DATA_R32__CRC1_R_CR32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC1_DATA_R32__CRC1_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC1_DATA_G32
+#define OTG_CRC322_OTG_CRC1_DATA_G32__CRC1_G_Y32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC1_DATA_G32__CRC1_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC1_DATA_B32
+#define OTG_CRC322_OTG_CRC1_DATA_B32__CRC1_B_CB32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC1_DATA_B32__CRC1_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC1_DATA_C32
+#define OTG_CRC322_OTG_CRC1_DATA_C32__CRC1_C32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC1_DATA_C32__CRC1_C32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC2_DATA_R32
+#define OTG_CRC322_OTG_CRC2_DATA_R32__CRC2_R_CR32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC2_DATA_R32__CRC2_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC2_DATA_G32
+#define OTG_CRC322_OTG_CRC2_DATA_G32__CRC2_G_Y32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC2_DATA_G32__CRC2_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC2_DATA_B32
+#define OTG_CRC322_OTG_CRC2_DATA_B32__CRC2_B_CB32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC2_DATA_B32__CRC2_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC2_DATA_C32
+#define OTG_CRC322_OTG_CRC2_DATA_C32__CRC2_C32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC2_DATA_C32__CRC2_C32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC3_DATA_R32
+#define OTG_CRC322_OTG_CRC3_DATA_R32__CRC3_R_CR32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC3_DATA_R32__CRC3_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC3_DATA_G32
+#define OTG_CRC322_OTG_CRC3_DATA_G32__CRC3_G_Y32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC3_DATA_G32__CRC3_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC3_DATA_B32
+#define OTG_CRC322_OTG_CRC3_DATA_B32__CRC3_B_CB32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC3_DATA_B32__CRC3_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC322_OTG_CRC3_DATA_C32
+#define OTG_CRC322_OTG_CRC3_DATA_C32__CRC3_C32__SHIFT 0x0
+#define OTG_CRC322_OTG_CRC3_DATA_C32__CRC3_C32_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg_crc323_dispdec
+//OTG_CRC323_OTG_CRC0_DATA_R32
+#define OTG_CRC323_OTG_CRC0_DATA_R32__CRC0_R_CR32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC0_DATA_R32__CRC0_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC0_DATA_G32
+#define OTG_CRC323_OTG_CRC0_DATA_G32__CRC0_G_Y32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC0_DATA_G32__CRC0_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC0_DATA_B32
+#define OTG_CRC323_OTG_CRC0_DATA_B32__CRC0_B_CB32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC0_DATA_B32__CRC0_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC0_DATA_C32
+#define OTG_CRC323_OTG_CRC0_DATA_C32__CRC0_C32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC0_DATA_C32__CRC0_C32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC0_DATA_AES
+#define OTG_CRC323_OTG_CRC0_DATA_AES__CRC0_AES__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC0_DATA_AES__CRC0_AES_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC1_DATA_R32
+#define OTG_CRC323_OTG_CRC1_DATA_R32__CRC1_R_CR32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC1_DATA_R32__CRC1_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC1_DATA_G32
+#define OTG_CRC323_OTG_CRC1_DATA_G32__CRC1_G_Y32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC1_DATA_G32__CRC1_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC1_DATA_B32
+#define OTG_CRC323_OTG_CRC1_DATA_B32__CRC1_B_CB32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC1_DATA_B32__CRC1_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC1_DATA_C32
+#define OTG_CRC323_OTG_CRC1_DATA_C32__CRC1_C32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC1_DATA_C32__CRC1_C32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC2_DATA_R32
+#define OTG_CRC323_OTG_CRC2_DATA_R32__CRC2_R_CR32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC2_DATA_R32__CRC2_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC2_DATA_G32
+#define OTG_CRC323_OTG_CRC2_DATA_G32__CRC2_G_Y32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC2_DATA_G32__CRC2_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC2_DATA_B32
+#define OTG_CRC323_OTG_CRC2_DATA_B32__CRC2_B_CB32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC2_DATA_B32__CRC2_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC2_DATA_C32
+#define OTG_CRC323_OTG_CRC2_DATA_C32__CRC2_C32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC2_DATA_C32__CRC2_C32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC3_DATA_R32
+#define OTG_CRC323_OTG_CRC3_DATA_R32__CRC3_R_CR32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC3_DATA_R32__CRC3_R_CR32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC3_DATA_G32
+#define OTG_CRC323_OTG_CRC3_DATA_G32__CRC3_G_Y32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC3_DATA_G32__CRC3_G_Y32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC3_DATA_B32
+#define OTG_CRC323_OTG_CRC3_DATA_B32__CRC3_B_CB32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC3_DATA_B32__CRC3_B_CB32_MASK 0xFFFFFFFFL
+//OTG_CRC323_OTG_CRC3_DATA_C32
+#define OTG_CRC323_OTG_CRC3_DATA_C32__CRC3_C32__SHIFT 0x0
+#define OTG_CRC323_OTG_CRC3_DATA_C32__CRC3_C32_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_optc_misc_dispdec
+//GSL_SOURCE_SELECT
+#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL__SHIFT 0x0
+#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL__SHIFT 0x4
+#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL__SHIFT 0x8
+#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL__SHIFT 0x10
+#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL_MASK 0x00000007L
+#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL_MASK 0x00000070L
+#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL_MASK 0x00000700L
+#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL_MASK 0x00070000L
+//OPTC_DLPC_CONTROL
+#define OPTC_DLPC_CONTROL__OPTC_DLPC_SNAPSHOT_MUX__SHIFT 0x0
+#define OPTC_DLPC_CONTROL__OPTC_DLPC_SNAPSHOT_MUX_MASK 0x00000007L
+//OPTC_CLOCK_CONTROL
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON__SHIFT 0x1
+#define OPTC_CLOCK_CONTROL__OPTC_TEST_CLK_SEL__SHIFT 0x8
+#define OPTC_CLOCK_CONTROL__OPTC_FGCG_REP_DIS__SHIFT 0xf
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON_MASK 0x00000002L
+#define OPTC_CLOCK_CONTROL__OPTC_TEST_CLK_SEL_MASK 0x00000F00L
+#define OPTC_CLOCK_CONTROL__OPTC_FGCG_REP_DIS_MASK 0x00008000L
+//ODM_MEM_PWR_CTRL
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_FORCE__SHIFT 0x0
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_DIS__SHIFT 0x2
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_FORCE__SHIFT 0x4
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_DIS__SHIFT 0x6
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_FORCE__SHIFT 0x8
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_DIS__SHIFT 0xa
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_FORCE__SHIFT 0xc
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_DIS__SHIFT 0xe
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_FORCE__SHIFT 0x10
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_DIS__SHIFT 0x12
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_FORCE__SHIFT 0x14
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_DIS__SHIFT 0x16
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_FORCE__SHIFT 0x18
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_DIS__SHIFT 0x1a
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_FORCE__SHIFT 0x1c
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_DIS__SHIFT 0x1e
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_FORCE_MASK 0x00000003L
+#define ODM_MEM_PWR_CTRL__ODM_MEM0_PWR_DIS_MASK 0x00000004L
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_FORCE_MASK 0x00000030L
+#define ODM_MEM_PWR_CTRL__ODM_MEM1_PWR_DIS_MASK 0x00000040L
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_FORCE_MASK 0x00000300L
+#define ODM_MEM_PWR_CTRL__ODM_MEM2_PWR_DIS_MASK 0x00000400L
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_FORCE_MASK 0x00003000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM3_PWR_DIS_MASK 0x00004000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_FORCE_MASK 0x00030000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM4_PWR_DIS_MASK 0x00040000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_FORCE_MASK 0x00300000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM5_PWR_DIS_MASK 0x00400000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_FORCE_MASK 0x03000000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM6_PWR_DIS_MASK 0x04000000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_FORCE_MASK 0x30000000L
+#define ODM_MEM_PWR_CTRL__ODM_MEM7_PWR_DIS_MASK 0x40000000L
+//ODM_MEM_PWR_CTRL3
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_UNASSIGNED_PWR_MODE__SHIFT 0x0
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_VBLANK_PWR_MODE__SHIFT 0x2
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_UNASSIGNED_PWR_MODE_MASK 0x00000003L
+#define ODM_MEM_PWR_CTRL3__ODM_MEM_VBLANK_PWR_MODE_MASK 0x0000000CL
+//ODM_MEM_PWR_STATUS
+#define ODM_MEM_PWR_STATUS__ODM_MEM0_PWR_STATE__SHIFT 0x0
+#define ODM_MEM_PWR_STATUS__ODM_MEM1_PWR_STATE__SHIFT 0x2
+#define ODM_MEM_PWR_STATUS__ODM_MEM2_PWR_STATE__SHIFT 0x4
+#define ODM_MEM_PWR_STATUS__ODM_MEM3_PWR_STATE__SHIFT 0x6
+#define ODM_MEM_PWR_STATUS__ODM_MEM4_PWR_STATE__SHIFT 0x8
+#define ODM_MEM_PWR_STATUS__ODM_MEM5_PWR_STATE__SHIFT 0xa
+#define ODM_MEM_PWR_STATUS__ODM_MEM6_PWR_STATE__SHIFT 0xc
+#define ODM_MEM_PWR_STATUS__ODM_MEM7_PWR_STATE__SHIFT 0xe
+#define ODM_MEM_PWR_STATUS__ODM_MEM0_PWR_STATE_MASK 0x00000003L
+#define ODM_MEM_PWR_STATUS__ODM_MEM1_PWR_STATE_MASK 0x0000000CL
+#define ODM_MEM_PWR_STATUS__ODM_MEM2_PWR_STATE_MASK 0x00000030L
+#define ODM_MEM_PWR_STATUS__ODM_MEM3_PWR_STATE_MASK 0x000000C0L
+#define ODM_MEM_PWR_STATUS__ODM_MEM4_PWR_STATE_MASK 0x00000300L
+#define ODM_MEM_PWR_STATUS__ODM_MEM5_PWR_STATE_MASK 0x00000C00L
+#define ODM_MEM_PWR_STATUS__ODM_MEM6_PWR_STATE_MASK 0x00003000L
+#define ODM_MEM_PWR_STATUS__ODM_MEM7_PWR_STATE_MASK 0x0000C000L
+//OPTC_MISC_SPARE_REGISTER
+#define OPTC_MISC_SPARE_REGISTER__OPTC_MISC_SPARE_REG__SHIFT 0x0
+#define OPTC_MISC_SPARE_REGISTER__OPTC_MISC_SPARE_REG_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_optc_optc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON17_PERFCOUNTER_CNTL
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON17_PERFCOUNTER_CNTL2
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON17_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON17_PERFCOUNTER_STATE
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON17_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON17_PERFMON_CNTL
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON17_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON17_PERFMON_CNTL2
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON17_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON17_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON17_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON17_PERFMON_CVALUE_LOW
+#define DC_PERFMON17_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON17_PERFMON_HI
+#define DC_PERFMON17_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON17_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON17_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON17_PERFMON_LOW
+#define DC_PERFMON17_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON17_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dio_hpd0_dispdec
+//HPD0_DC_HPD_INT_STATUS
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD0_DC_HPD_INT_CONTROL
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD0_DC_HPD_CONTROL
+#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD0_DC_HPD_FAST_TRAIN_CNTL
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD0_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd1_dispdec
+//HPD1_DC_HPD_INT_STATUS
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD1_DC_HPD_INT_CONTROL
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD1_DC_HPD_CONTROL
+#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD1_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD1_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD1_DC_HPD_FAST_TRAIN_CNTL
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD1_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd2_dispdec
+//HPD2_DC_HPD_INT_STATUS
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD2_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD2_DC_HPD_INT_CONTROL
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD2_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD2_DC_HPD_CONTROL
+#define HPD2_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD2_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD2_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD2_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD2_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD2_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD2_DC_HPD_FAST_TRAIN_CNTL
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD2_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD2_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD2_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd3_dispdec
+//HPD3_DC_HPD_INT_STATUS
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD3_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD3_DC_HPD_INT_CONTROL
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD3_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD3_DC_HPD_CONTROL
+#define HPD3_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD3_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD3_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD3_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD3_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD3_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD3_DC_HPD_FAST_TRAIN_CNTL
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD3_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD3_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD3_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd4_dispdec
+//HPD4_DC_HPD_INT_STATUS
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD4_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD4_DC_HPD_INT_CONTROL
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD4_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD4_DC_HPD_CONTROL
+#define HPD4_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD4_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD4_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD4_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD4_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD4_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD4_DC_HPD_FAST_TRAIN_CNTL
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD4_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD4_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD4_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_dp0_dispdec
+//DP0_DP_LINK_CNTL
+#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP0_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP0_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+//DP0_DP_PIXEL_FORMAT
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+//DP0_DP_MSA_COLORIMETRY
+#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP0_DP_CONFIG
+#define DP0_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP0_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP0_DP_VID_STREAM_CNTL
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP0_DP_STEER_FIFO
+#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP0_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP0_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+//DP0_DP_MSA_MISC
+#define DP0_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP0_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP0_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP0_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP0_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP0_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP0_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP0_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP0_DP_DPHY_INTERNAL_CTRL
+#define DP0_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP0_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP0_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+//DP0_DP_VID_TIMING
+#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP0_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP0_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP0_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP0_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP0_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP0_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP0_DP_VID_N
+#define DP0_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP0_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP0_DP_VID_M
+#define DP0_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP0_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP0_DP_LINK_FRAMING_CNTL
+#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP0_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP0_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP0_DP_HBR2_EYE_PATTERN
+#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP0_DP_VID_MSA_VBID
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP0_DP_VID_INTERRUPT_CNTL
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP0_DP_DPHY_CNTL
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP0_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP0_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP0_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP0_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP0_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP0_DP_DPHY_SYM0
+#define DP0_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP0_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP0_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP0_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP0_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP0_DP_DPHY_SYM1
+#define DP0_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP0_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP0_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP0_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP0_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP0_DP_DPHY_SYM2
+#define DP0_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP0_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP0_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP0_DP_DPHY_8B10B_CNTL
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP0_DP_DPHY_PRBS_CNTL
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP0_DP_DPHY_SCRAM_CNTL
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP0_DP_DPHY_CRC_CONTROL0
+#define DP0_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP0_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP0_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD_MASK 0x00000100L
+//DP0_DP_DPHY_CRC_CONTROL1
+#define DP0_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP0_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP0_DP_DPHY_CRC_RESULT0
+#define DP0_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP0_DP_DPHY_CRC_RESULT1
+#define DP0_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP0_DP_DPHY_CRC_STATUS
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK__SHIFT 0x4
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0xc
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK_MASK 0x00000010L
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000100L
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00001000L
+#define DP0_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP0_DP_DPHY_FAST_TRAINING
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP0_DP_DPHY_FAST_TRAINING_STATUS
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP0_DP_DPHY_CRC_RESULT2
+#define DP0_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP0_DP_DPHY_CRC_RESULT3
+#define DP0_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP0_DP_SEC_CNTL
+#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP0_DP_SEC_CNTL1
+#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING1
+#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING2
+#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING3
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING4
+#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP0_DP_SEC_AUD_N
+#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_N_READBACK
+#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_M
+#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_M_READBACK
+#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP0_DP_SEC_TIMESTAMP
+#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP0_DP_SEC_PACKET_CNTL
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP0_DP_MSE_RATE_CNTL
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP0_DP_MSE_RATE_UPDATE
+#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP0_DP_MSE_SAT0
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP0_DP_MSE_SAT1
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP0_DP_MSE_SAT2
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP0_DP_MSE_SAT_UPDATE
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP0_DP_MSE_LINK_TIMING
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP0_DP_MSE_MISC_CNTL
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP0_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP0_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP0_DP_CP_ENCRYPTION_CONTROL
+//DP0_DP_MSE_SAT0_STATUS
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP0_DP_MSE_SAT1_STATUS
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP0_DP_MSE_SAT2_STATUS
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP0_DP_DPIA_SPARE
+#define DP0_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP0_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+//DP0_DP_MSA_TIMING_PARAM1
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP0_DP_MSA_TIMING_PARAM2
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP0_DP_MSA_TIMING_PARAM3
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP0_DP_MSA_TIMING_PARAM4
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP0_DP_MSO_CNTL
+#define DP0_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP0_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP0_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP0_DP_MSO_CNTL1
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP0_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP0_DP_DSC_CNTL
+#define DP0_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP0_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+//DP0_DP_SEC_CNTL2
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+//DP0_DP_SEC_CNTL3
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL4
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL5
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL6
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+//DP0_DP_SEC_CNTL7
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP0_DP_DB_CNTL
+#define DP0_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP0_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP0_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP0_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP0_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP0_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP0_DP_MSA_VBID_MISC
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_METADATA_TRANSMISSION
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP0_DP_ALPM_CNTL
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP0_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP0_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP0_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP0_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP0_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP0_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP0_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP0_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP0_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP0_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP0_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_GSP8_CNTL
+#define DP0_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP0_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_GSP9_CNTL
+#define DP0_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP0_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_GSP10_CNTL
+#define DP0_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP0_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_GSP11_CNTL
+#define DP0_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP0_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_GSP_EN_DB_STATUS
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP0_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+//DP0_DP_AUXLESS_ALPM_CNTL1
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP0_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+//DP0_DP_AUXLESS_ALPM_CNTL2
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP0_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+//DP0_DP_AUXLESS_ALPM_CNTL3
+#define DP0_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP0_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP0_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_AUXLESS_ALPM_CNTL4
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP0_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+//DP0_DP_AUXLESS_ALPM_CNTL5
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP0_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_STREAM_SYMBOL_COUNT_STATUS
+#define DP0_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP0_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+//DP0_DP_STREAM_SYMBOL_COUNT_CONTROL
+#define DP0_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP0_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP0_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP0_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+//DP0_DP_LINK_SYMBOL_COUNT_STATUS0
+#define DP0_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP0_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+//DP0_DP_LINK_SYMBOL_COUNT_STATUS1
+#define DP0_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP0_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP0_DP_LINK_SYMBOL_COUNT_CONTROL
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP0_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_dio_dig0_dispdec
+//DIG0_DIG_FE_CNTL
+#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG0_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG0_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+//DIG0_DIG_FE_CLK_CNTL
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG0_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+//DIG0_DIG_FE_EN_CNTL
+#define DIG0_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG0_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+//DIG0_DIG_OUTPUT_CRC_CNTL
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG0_DIG_OUTPUT_CRC_RESULT
+#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG0_DIG_CLOCK_PATTERN
+#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG0_DIG_TEST_PATTERN
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG0_DIG_RANDOM_PATTERN_SEED
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG0_DIG_FIFO_CTRL0
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG0_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+//DIG0_DIG_FIFO_CTRL1
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG0_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG0_HDMI_METADATA_PACKET_CONTROL
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_CONTROL
+#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG0_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG0_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG0_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG0_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG0_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG0_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG0_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG0_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG0_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG0_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG0_HDMI_STATUS
+#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG0_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG0_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG0_HDMI_AUDIO_PACKET_CONTROL
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+//DIG0_HDMI_ACR_PACKET_CONTROL
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG0_HDMI_VBI_PACKET_CONTROL
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+//DIG0_HDMI_INFOFRAME_CONTROL0
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG0_HDMI_INFOFRAME_CONTROL1
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+//DIG0_HDMI_GC
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+//DIG0_HDMI_DB_CONTROL
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG0_HDMI_ACR_32_0
+#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_32_1
+#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_44_0
+#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_44_1
+#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_48_0
+#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_48_1
+#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_STATUS_0
+#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_STATUS_1
+#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG0_AFMT_CNTL
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG0_DIG_BE_CLK_CNTL
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET__SHIFT 0x6
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON__SHIFT 0xc
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET_MASK 0x00000040L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON_MASK 0x00001000L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+//DIG0_DIG_BE_CNTL
+#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG0_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG0_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG0_DIG_BE_EN_CNTL
+#define DIG0_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG0_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+//DIG0_TMDS_CNTL
+#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG0_TMDS_CONTROL_CHAR
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG0_TMDS_CONTROL0_FEEDBACK
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG0_TMDS_STEREOSYNC_CTL_SEL
+#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG0_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG0_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG0_TMDS_CTL_BITS
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG0_TMDS_DCBALANCER_CONTROL
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG0_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG0_TMDS_CTL0_1_GEN_CNTL
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG0_TMDS_CTL2_3_GEN_CNTL
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG0_DIG_VERSION
+#define DIG0_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG0_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dp1_dispdec
+//DP1_DP_LINK_CNTL
+#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP1_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP1_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+//DP1_DP_PIXEL_FORMAT
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+//DP1_DP_MSA_COLORIMETRY
+#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP1_DP_CONFIG
+#define DP1_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP1_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP1_DP_VID_STREAM_CNTL
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP1_DP_STEER_FIFO
+#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP1_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP1_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+//DP1_DP_MSA_MISC
+#define DP1_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP1_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP1_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP1_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP1_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP1_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP1_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP1_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP1_DP_DPHY_INTERNAL_CTRL
+#define DP1_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP1_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP1_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+//DP1_DP_VID_TIMING
+#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP1_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP1_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP1_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP1_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP1_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP1_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP1_DP_VID_N
+#define DP1_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP1_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP1_DP_VID_M
+#define DP1_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP1_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP1_DP_LINK_FRAMING_CNTL
+#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP1_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP1_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP1_DP_HBR2_EYE_PATTERN
+#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP1_DP_VID_MSA_VBID
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP1_DP_VID_INTERRUPT_CNTL
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP1_DP_DPHY_CNTL
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP1_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP1_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP1_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP1_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP1_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP1_DP_DPHY_SYM0
+#define DP1_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP1_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP1_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP1_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP1_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP1_DP_DPHY_SYM1
+#define DP1_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP1_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP1_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP1_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP1_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP1_DP_DPHY_SYM2
+#define DP1_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP1_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP1_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP1_DP_DPHY_8B10B_CNTL
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP1_DP_DPHY_PRBS_CNTL
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP1_DP_DPHY_SCRAM_CNTL
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP1_DP_DPHY_CRC_CONTROL0
+#define DP1_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP1_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP1_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD_MASK 0x00000100L
+//DP1_DP_DPHY_CRC_CONTROL1
+#define DP1_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP1_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP1_DP_DPHY_CRC_RESULT0
+#define DP1_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP1_DP_DPHY_CRC_RESULT1
+#define DP1_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP1_DP_DPHY_CRC_STATUS
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK__SHIFT 0x4
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0xc
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK_MASK 0x00000010L
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000100L
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00001000L
+#define DP1_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP1_DP_DPHY_FAST_TRAINING
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP1_DP_DPHY_FAST_TRAINING_STATUS
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP1_DP_DPHY_CRC_RESULT2
+#define DP1_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP1_DP_DPHY_CRC_RESULT3
+#define DP1_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP1_DP_SEC_CNTL
+#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP1_DP_SEC_CNTL1
+#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING1
+#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING2
+#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING3
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING4
+#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP1_DP_SEC_AUD_N
+#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_N_READBACK
+#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_M
+#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_M_READBACK
+#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP1_DP_SEC_TIMESTAMP
+#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP1_DP_SEC_PACKET_CNTL
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP1_DP_MSE_RATE_CNTL
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP1_DP_CP_MSE_STATUS
+//DP1_DP_MSE_RATE_UPDATE
+#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP1_DP_MSE_SAT0
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP1_DP_MSE_SAT1
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP1_DP_MSE_SAT2
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP1_DP_MSE_SAT_UPDATE
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP1_DP_MSE_LINK_TIMING
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP1_DP_MSE_MISC_CNTL
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP1_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP1_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP1_DP_MSE_SAT0_STATUS
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP1_DP_MSE_SAT1_STATUS
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP1_DP_MSE_SAT2_STATUS
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP1_DP_DPIA_SPARE
+#define DP1_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP1_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+//DP1_DP_MSA_TIMING_PARAM1
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP1_DP_MSA_TIMING_PARAM2
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP1_DP_MSA_TIMING_PARAM3
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP1_DP_MSA_TIMING_PARAM4
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP1_DP_MSO_CNTL
+#define DP1_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP1_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP1_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP1_DP_MSO_CNTL1
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP1_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP1_DP_DSC_CNTL
+#define DP1_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP1_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+//DP1_DP_SEC_CNTL2
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+//DP1_DP_SEC_CNTL3
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL4
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL5
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL6
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+//DP1_DP_SEC_CNTL7
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP1_DP_DB_CNTL
+#define DP1_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP1_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP1_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP1_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP1_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP1_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP1_DP_MSA_VBID_MISC
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_METADATA_TRANSMISSION
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP1_DP_ALPM_CNTL
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP1_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP1_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP1_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP1_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP1_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP1_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP1_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP1_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP1_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP1_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP1_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_GSP8_CNTL
+#define DP1_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP1_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_GSP9_CNTL
+#define DP1_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP1_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_GSP10_CNTL
+#define DP1_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP1_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_GSP11_CNTL
+#define DP1_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP1_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_GSP_EN_DB_STATUS
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP1_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+//DP1_DP_AUXLESS_ALPM_CNTL1
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP1_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+//DP1_DP_AUXLESS_ALPM_CNTL2
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP1_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+//DP1_DP_AUXLESS_ALPM_CNTL3
+#define DP1_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP1_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP1_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_AUXLESS_ALPM_CNTL4
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP1_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+//DP1_DP_AUXLESS_ALPM_CNTL5
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP1_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_STREAM_SYMBOL_COUNT_STATUS
+#define DP1_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP1_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+//DP1_DP_STREAM_SYMBOL_COUNT_CONTROL
+#define DP1_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP1_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP1_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP1_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+//DP1_DP_LINK_SYMBOL_COUNT_STATUS0
+#define DP1_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP1_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+//DP1_DP_LINK_SYMBOL_COUNT_STATUS1
+#define DP1_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP1_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP1_DP_LINK_SYMBOL_COUNT_CONTROL
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP1_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_dio_dig1_dispdec
+//DIG1_DIG_FE_CNTL
+#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG1_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG1_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+//DIG1_DIG_FE_CLK_CNTL
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG1_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+//DIG1_DIG_FE_EN_CNTL
+#define DIG1_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG1_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+//DIG1_DIG_OUTPUT_CRC_CNTL
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG1_DIG_OUTPUT_CRC_RESULT
+#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG1_DIG_CLOCK_PATTERN
+#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG1_DIG_TEST_PATTERN
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG1_DIG_RANDOM_PATTERN_SEED
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG1_DIG_FIFO_CTRL0
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG1_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+//DIG1_DIG_FIFO_CTRL1
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG1_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG1_HDMI_METADATA_PACKET_CONTROL
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_CONTROL
+#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG1_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG1_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG1_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG1_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG1_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG1_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG1_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG1_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG1_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG1_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG1_HDMI_STATUS
+#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG1_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG1_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG1_HDMI_AUDIO_PACKET_CONTROL
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+//DIG1_HDMI_ACR_PACKET_CONTROL
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG1_HDMI_VBI_PACKET_CONTROL
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+//DIG1_HDMI_INFOFRAME_CONTROL0
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG1_HDMI_INFOFRAME_CONTROL1
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+//DIG1_HDMI_GC
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+//DIG1_HDMI_DB_CONTROL
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG1_HDMI_ACR_32_0
+#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_32_1
+#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_44_0
+#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_44_1
+#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_48_0
+#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_48_1
+#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_STATUS_0
+#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_STATUS_1
+#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG1_AFMT_CNTL
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG1_DIG_BE_CLK_CNTL
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG1_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+//DIG1_DIG_BE_CNTL
+#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG1_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG1_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG1_DIG_BE_EN_CNTL
+#define DIG1_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG1_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+//DIG1_TMDS_CNTL
+#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG1_TMDS_CONTROL_CHAR
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG1_TMDS_CONTROL0_FEEDBACK
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG1_TMDS_STEREOSYNC_CTL_SEL
+#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG1_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG1_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG1_TMDS_CTL_BITS
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG1_TMDS_DCBALANCER_CONTROL
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG1_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG1_TMDS_CTL0_1_GEN_CNTL
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG1_TMDS_CTL2_3_GEN_CNTL
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG1_DIG_DEBUG
+//DIG1_DIG_VERSION
+#define DIG1_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG1_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dp2_dispdec
+//DP2_DP_LINK_CNTL
+#define DP2_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP2_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP2_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP2_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+//DP2_DP_PIXEL_FORMAT
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP2_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP2_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP2_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+//DP2_DP_MSA_COLORIMETRY
+#define DP2_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP2_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP2_DP_CONFIG
+#define DP2_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP2_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP2_DP_VID_STREAM_CNTL
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP2_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP2_DP_STEER_FIFO
+#define DP2_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP2_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP2_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP2_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP2_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP2_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+//DP2_DP_MSA_MISC
+#define DP2_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP2_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP2_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP2_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP2_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP2_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP2_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP2_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP2_DP_DPHY_INTERNAL_CTRL
+#define DP2_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP2_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP2_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP2_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+//DP2_DP_VID_TIMING
+#define DP2_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP2_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP2_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP2_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP2_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP2_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP2_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP2_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP2_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP2_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP2_DP_VID_N
+#define DP2_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP2_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP2_DP_VID_M
+#define DP2_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP2_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP2_DP_LINK_FRAMING_CNTL
+#define DP2_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP2_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP2_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP2_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP2_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP2_DP_HBR2_EYE_PATTERN
+#define DP2_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP2_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP2_DP_VID_MSA_VBID
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP2_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP2_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP2_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP2_DP_VID_INTERRUPT_CNTL
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP2_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP2_DP_DPHY_CNTL
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP2_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP2_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP2_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP2_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP2_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP2_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP2_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP2_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP2_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP2_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP2_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP2_DP_DPHY_SYM0
+#define DP2_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP2_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP2_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP2_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP2_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP2_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP2_DP_DPHY_SYM1
+#define DP2_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP2_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP2_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP2_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP2_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP2_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP2_DP_DPHY_SYM2
+#define DP2_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP2_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP2_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP2_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP2_DP_DPHY_8B10B_CNTL
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP2_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP2_DP_DPHY_PRBS_CNTL
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP2_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP2_DP_DPHY_SCRAM_CNTL
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP2_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP2_DP_DPHY_CRC_CONTROL0
+#define DP2_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP2_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN_MASK 0x00000001L
+#define DP2_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP2_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD_MASK 0x00000100L
+//DP2_DP_DPHY_CRC_CONTROL1
+#define DP2_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP2_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP2_DP_DPHY_CRC_RESULT0
+#define DP2_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP2_DP_DPHY_CRC_RESULT1
+#define DP2_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP2_DP_DPHY_CRC_STATUS
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK__SHIFT 0x4
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x8
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0xc
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_MASK 0x00000001L
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK_MASK 0x00000010L
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000100L
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00001000L
+#define DP2_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP2_DP_DPHY_FAST_TRAINING
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP2_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP2_DP_DPHY_FAST_TRAINING_STATUS
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP2_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP2_DP_DPHY_CRC_RESULT2
+#define DP2_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP2_DP_DPHY_CRC_RESULT3
+#define DP2_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP2_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP2_DP_SEC_CNTL
+#define DP2_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP2_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP2_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP2_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP2_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP2_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP2_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP2_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP2_DP_SEC_CNTL1
+#define DP2_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP2_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_FRAMING1
+#define DP2_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP2_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP2_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP2_DP_SEC_FRAMING2
+#define DP2_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP2_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP2_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP2_DP_SEC_FRAMING3
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP2_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP2_DP_SEC_FRAMING4
+#define DP2_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP2_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP2_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP2_DP_SEC_AUD_N
+#define DP2_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP2_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP2_DP_SEC_AUD_N_READBACK
+#define DP2_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP2_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP2_DP_SEC_AUD_M
+#define DP2_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP2_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP2_DP_SEC_AUD_M_READBACK
+#define DP2_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP2_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP2_DP_SEC_TIMESTAMP
+#define DP2_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP2_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP2_DP_SEC_PACKET_CNTL
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP2_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP2_DP_MSE_RATE_CNTL
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP2_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP2_DP_MSE_RATE_UPDATE
+#define DP2_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP2_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP2_DP_MSE_SAT0
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP2_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP2_DP_MSE_SAT1
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP2_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP2_DP_MSE_SAT2
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP2_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP2_DP_MSE_SAT_UPDATE
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP2_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP2_DP_MSE_LINK_TIMING
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP2_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP2_DP_MSE_MISC_CNTL
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP2_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP2_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP2_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP2_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP2_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP2_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP2_DP_MSE_SAT0_STATUS
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP2_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP2_DP_MSE_SAT1_STATUS
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP2_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP2_DP_MSE_SAT2_STATUS
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP2_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP2_DP_DPIA_SPARE
+#define DP2_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP2_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+//DP2_DP_MSA_TIMING_PARAM1
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP2_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP2_DP_MSA_TIMING_PARAM2
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP2_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP2_DP_MSA_TIMING_PARAM3
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP2_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP2_DP_MSA_TIMING_PARAM4
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP2_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP2_DP_MSO_CNTL
+#define DP2_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP2_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP2_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP2_DP_MSO_CNTL1
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP2_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP2_DP_DSC_CNTL
+#define DP2_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP2_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+//DP2_DP_SEC_CNTL2
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP2_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+//DP2_DP_SEC_CNTL3
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_CNTL4
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_CNTL5
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_CNTL6
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP2_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+//DP2_DP_SEC_CNTL7
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP2_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP2_DP_DB_CNTL
+#define DP2_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP2_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP2_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP2_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP2_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP2_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP2_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP2_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP2_DP_MSA_VBID_MISC
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP2_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP2_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_SEC_METADATA_TRANSMISSION
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP2_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP2_DP_ALPM_CNTL
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP2_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP2_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP2_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP2_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP2_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP2_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP2_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP2_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP2_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP2_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP2_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_GSP8_CNTL
+#define DP2_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP2_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_GSP9_CNTL
+#define DP2_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP2_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_GSP10_CNTL
+#define DP2_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP2_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_GSP11_CNTL
+#define DP2_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP2_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP2_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_GSP_EN_DB_STATUS
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP2_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+//DP2_DP_AUXLESS_ALPM_CNTL1
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP2_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+//DP2_DP_AUXLESS_ALPM_CNTL2
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP2_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+//DP2_DP_AUXLESS_ALPM_CNTL3
+#define DP2_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP2_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP2_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP2_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_AUXLESS_ALPM_CNTL4
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP2_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+//DP2_DP_AUXLESS_ALPM_CNTL5
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP2_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+//DP2_DP_STREAM_SYMBOL_COUNT_STATUS
+#define DP2_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP2_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+//DP2_DP_STREAM_SYMBOL_COUNT_CONTROL
+#define DP2_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP2_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP2_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP2_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+//DP2_DP_LINK_SYMBOL_COUNT_STATUS0
+#define DP2_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP2_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+//DP2_DP_LINK_SYMBOL_COUNT_STATUS1
+#define DP2_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP2_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP2_DP_LINK_SYMBOL_COUNT_CONTROL
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP2_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_dio_dig2_dispdec
+//DIG2_DIG_FE_CNTL
+#define DIG2_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG2_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG2_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG2_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG2_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG2_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG2_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG2_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+//DIG2_DIG_FE_CLK_CNTL
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG2_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+//DIG2_DIG_FE_EN_CNTL
+#define DIG2_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG2_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+//DIG2_DIG_OUTPUT_CRC_CNTL
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG2_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG2_DIG_OUTPUT_CRC_RESULT
+#define DIG2_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG2_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG2_DIG_CLOCK_PATTERN
+#define DIG2_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG2_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG2_DIG_TEST_PATTERN
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG2_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG2_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG2_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG2_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG2_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG2_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG2_DIG_RANDOM_PATTERN_SEED
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG2_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG2_DIG_FIFO_CTRL0
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG2_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+//DIG2_DIG_FIFO_CTRL1
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG2_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG2_HDMI_METADATA_PACKET_CONTROL
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG2_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_CONTROL
+#define DIG2_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG2_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG2_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG2_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG2_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG2_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG2_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG2_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG2_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG2_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG2_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG2_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG2_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG2_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG2_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG2_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG2_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG2_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG2_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG2_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG2_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG2_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG2_HDMI_STATUS
+#define DIG2_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG2_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG2_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG2_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG2_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG2_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG2_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG2_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG2_HDMI_AUDIO_PACKET_CONTROL
+#define DIG2_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG2_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+//DIG2_HDMI_ACR_PACKET_CONTROL
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG2_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG2_HDMI_VBI_PACKET_CONTROL
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG2_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+//DIG2_HDMI_INFOFRAME_CONTROL0
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG2_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG2_HDMI_INFOFRAME_CONTROL1
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG2_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL6
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+//DIG2_HDMI_GC
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG2_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG2_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG2_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG2_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL7
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL8
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL9
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+//DIG2_HDMI_GENERIC_PACKET_CONTROL10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG2_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+//DIG2_HDMI_DB_CONTROL
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG2_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG2_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG2_HDMI_ACR_32_0
+#define DIG2_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG2_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG2_HDMI_ACR_32_1
+#define DIG2_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG2_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG2_HDMI_ACR_44_0
+#define DIG2_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG2_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG2_HDMI_ACR_44_1
+#define DIG2_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG2_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG2_HDMI_ACR_48_0
+#define DIG2_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG2_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG2_HDMI_ACR_48_1
+#define DIG2_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG2_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG2_HDMI_ACR_STATUS_0
+#define DIG2_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG2_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG2_HDMI_ACR_STATUS_1
+#define DIG2_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG2_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG2_AFMT_CNTL
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG2_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG2_DIG_BE_CLK_CNTL
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG2_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+//DIG2_DIG_BE_CNTL
+#define DIG2_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG2_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG2_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG2_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG2_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG2_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG2_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG2_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG2_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG2_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG2_DIG_BE_EN_CNTL
+#define DIG2_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG2_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+//DIG2_TMDS_CNTL
+#define DIG2_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG2_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG2_TMDS_CONTROL_CHAR
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG2_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG2_TMDS_CONTROL0_FEEDBACK
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG2_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG2_TMDS_STEREOSYNC_CTL_SEL
+#define DIG2_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG2_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG2_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG2_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG2_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG2_TMDS_CTL_BITS
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG2_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG2_TMDS_DCBALANCER_CONTROL
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG2_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG2_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG2_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG2_TMDS_CTL0_1_GEN_CNTL
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG2_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG2_TMDS_CTL2_3_GEN_CNTL
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG2_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG2_DIG_VERSION
+#define DIG2_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG2_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dp3_dispdec
+//DP3_DP_LINK_CNTL
+#define DP3_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP3_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP3_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP3_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+//DP3_DP_PIXEL_FORMAT
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP3_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP3_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP3_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+//DP3_DP_MSA_COLORIMETRY
+#define DP3_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP3_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP3_DP_CONFIG
+#define DP3_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP3_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP3_DP_VID_STREAM_CNTL
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP3_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP3_DP_STEER_FIFO
+#define DP3_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP3_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP3_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP3_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP3_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP3_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+//DP3_DP_MSA_MISC
+#define DP3_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP3_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP3_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP3_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP3_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP3_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP3_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP3_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP3_DP_DPHY_INTERNAL_CTRL
+#define DP3_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP3_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP3_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP3_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+//DP3_DP_VID_TIMING
+#define DP3_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP3_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP3_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP3_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP3_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP3_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP3_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP3_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP3_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP3_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP3_DP_VID_N
+#define DP3_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP3_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP3_DP_VID_M
+#define DP3_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP3_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP3_DP_LINK_FRAMING_CNTL
+#define DP3_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP3_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP3_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP3_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP3_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP3_DP_HBR2_EYE_PATTERN
+#define DP3_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP3_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP3_DP_VID_MSA_VBID
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP3_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP3_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP3_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP3_DP_VID_INTERRUPT_CNTL
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP3_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP3_DP_DPHY_CNTL
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP3_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP3_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP3_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP3_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP3_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP3_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP3_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP3_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP3_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP3_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP3_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP3_DP_DPHY_SYM0
+#define DP3_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP3_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP3_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP3_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP3_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP3_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP3_DP_DPHY_SYM1
+#define DP3_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP3_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP3_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP3_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP3_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP3_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP3_DP_DPHY_SYM2
+#define DP3_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP3_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP3_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP3_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP3_DP_DPHY_8B10B_CNTL
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP3_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP3_DP_DPHY_PRBS_CNTL
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP3_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP3_DP_DPHY_SCRAM_CNTL
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP3_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP3_DP_DPHY_CRC_CONTROL0
+#define DP3_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP3_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN_MASK 0x00000001L
+#define DP3_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP3_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD_MASK 0x00000100L
+//DP3_DP_DPHY_CRC_CONTROL1
+#define DP3_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP3_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP3_DP_DPHY_CRC_RESULT0
+#define DP3_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP3_DP_DPHY_CRC_RESULT1
+#define DP3_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP3_DP_DPHY_CRC_STATUS
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK__SHIFT 0x4
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x8
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0xc
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_MASK 0x00000001L
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK_MASK 0x00000010L
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000100L
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00001000L
+#define DP3_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP3_DP_DPHY_FAST_TRAINING
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP3_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP3_DP_DPHY_FAST_TRAINING_STATUS
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP3_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP3_DP_DPHY_CRC_RESULT2
+#define DP3_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP3_DP_DPHY_CRC_RESULT3
+#define DP3_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP3_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP3_DP_SEC_CNTL
+#define DP3_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP3_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP3_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP3_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP3_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP3_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP3_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP3_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP3_DP_SEC_CNTL1
+#define DP3_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP3_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_FRAMING1
+#define DP3_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP3_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP3_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP3_DP_SEC_FRAMING2
+#define DP3_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP3_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP3_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP3_DP_SEC_FRAMING3
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP3_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP3_DP_SEC_FRAMING4
+#define DP3_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP3_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP3_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP3_DP_SEC_AUD_N
+#define DP3_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP3_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP3_DP_SEC_AUD_N_READBACK
+#define DP3_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP3_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP3_DP_SEC_AUD_M
+#define DP3_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP3_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP3_DP_SEC_AUD_M_READBACK
+#define DP3_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP3_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP3_DP_SEC_TIMESTAMP
+#define DP3_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP3_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP3_DP_SEC_PACKET_CNTL
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP3_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP3_DP_MSE_RATE_CNTL
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP3_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP3_DP_MSE_RATE_UPDATE
+#define DP3_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP3_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP3_DP_MSE_SAT0
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP3_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP3_DP_MSE_SAT1
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP3_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP3_DP_MSE_SAT2
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP3_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP3_DP_MSE_SAT_UPDATE
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP3_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP3_DP_MSE_LINK_TIMING
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP3_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP3_DP_MSE_MISC_CNTL
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP3_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP3_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP3_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP3_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP3_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP3_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP3_DP_MSE_SAT0_STATUS
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP3_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP3_DP_MSE_SAT1_STATUS
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP3_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP3_DP_MSE_SAT2_STATUS
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP3_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP3_DP_DPIA_SPARE
+#define DP3_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP3_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+//DP3_DP_MSA_TIMING_PARAM1
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP3_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP3_DP_MSA_TIMING_PARAM2
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP3_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP3_DP_MSA_TIMING_PARAM3
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP3_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP3_DP_MSA_TIMING_PARAM4
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP3_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP3_DP_MSO_CNTL
+#define DP3_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP3_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP3_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP3_DP_MSO_CNTL1
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP3_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP3_DP_DSC_CNTL
+#define DP3_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP3_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+//DP3_DP_SEC_CNTL2
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP3_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+//DP3_DP_SEC_CNTL3
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_CNTL4
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_CNTL5
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_CNTL6
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP3_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+//DP3_DP_SEC_CNTL7
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP3_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP3_DP_DB_CNTL
+#define DP3_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP3_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP3_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP3_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP3_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP3_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP3_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP3_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP3_DP_MSA_VBID_MISC
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP3_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP3_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_SEC_METADATA_TRANSMISSION
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP3_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP3_DP_ALPM_CNTL
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP3_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP3_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP3_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP3_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP3_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP3_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP3_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP3_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP3_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP3_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP3_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_GSP8_CNTL
+#define DP3_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP3_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_GSP9_CNTL
+#define DP3_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP3_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_GSP10_CNTL
+#define DP3_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP3_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_GSP11_CNTL
+#define DP3_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP3_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP3_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_GSP_EN_DB_STATUS
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP3_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+//DP3_DP_AUXLESS_ALPM_CNTL1
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP3_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+//DP3_DP_AUXLESS_ALPM_CNTL2
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP3_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+//DP3_DP_AUXLESS_ALPM_CNTL3
+#define DP3_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP3_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP3_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP3_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_AUXLESS_ALPM_CNTL4
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP3_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+//DP3_DP_AUXLESS_ALPM_CNTL5
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP3_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+//DP3_DP_STREAM_SYMBOL_COUNT_STATUS
+#define DP3_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP3_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+//DP3_DP_STREAM_SYMBOL_COUNT_CONTROL
+#define DP3_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP3_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP3_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP3_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+//DP3_DP_LINK_SYMBOL_COUNT_STATUS0
+#define DP3_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP3_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+//DP3_DP_LINK_SYMBOL_COUNT_STATUS1
+#define DP3_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP3_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP3_DP_LINK_SYMBOL_COUNT_CONTROL
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP3_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_dio_dig3_dispdec
+//DIG3_DIG_FE_CNTL
+#define DIG3_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG3_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG3_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG3_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG3_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG3_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG3_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG3_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+//DIG3_DIG_FE_CLK_CNTL
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG3_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+//DIG3_DIG_FE_EN_CNTL
+#define DIG3_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG3_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+//DIG3_DIG_OUTPUT_CRC_CNTL
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG3_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG3_DIG_OUTPUT_CRC_RESULT
+#define DIG3_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG3_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG3_DIG_CLOCK_PATTERN
+#define DIG3_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG3_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG3_DIG_TEST_PATTERN
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG3_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG3_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG3_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG3_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG3_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG3_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG3_DIG_RANDOM_PATTERN_SEED
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG3_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG3_DIG_FIFO_CTRL0
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG3_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+//DIG3_DIG_FIFO_CTRL1
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG3_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG3_HDMI_METADATA_PACKET_CONTROL
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG3_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_CONTROL
+#define DIG3_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG3_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG3_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG3_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG3_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG3_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG3_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG3_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG3_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG3_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG3_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG3_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG3_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG3_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG3_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG3_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG3_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG3_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG3_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG3_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG3_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG3_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG3_HDMI_STATUS
+#define DIG3_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG3_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG3_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG3_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG3_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG3_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG3_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG3_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG3_HDMI_AUDIO_PACKET_CONTROL
+#define DIG3_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG3_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+//DIG3_HDMI_ACR_PACKET_CONTROL
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG3_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG3_HDMI_VBI_PACKET_CONTROL
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG3_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+//DIG3_HDMI_INFOFRAME_CONTROL0
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG3_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG3_HDMI_INFOFRAME_CONTROL1
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG3_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL6
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+//DIG3_HDMI_GC
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG3_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG3_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG3_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG3_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL7
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL8
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL9
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+//DIG3_HDMI_GENERIC_PACKET_CONTROL10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG3_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+//DIG3_HDMI_DB_CONTROL
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG3_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG3_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG3_HDMI_ACR_32_0
+#define DIG3_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG3_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG3_HDMI_ACR_32_1
+#define DIG3_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG3_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG3_HDMI_ACR_44_0
+#define DIG3_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG3_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG3_HDMI_ACR_44_1
+#define DIG3_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG3_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG3_HDMI_ACR_48_0
+#define DIG3_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG3_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG3_HDMI_ACR_48_1
+#define DIG3_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG3_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG3_HDMI_ACR_STATUS_0
+#define DIG3_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG3_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG3_HDMI_ACR_STATUS_1
+#define DIG3_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG3_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG3_AFMT_CNTL
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG3_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG3_DIG_BE_CLK_CNTL
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG3_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+//DIG3_DIG_BE_CNTL
+#define DIG3_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG3_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG3_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG3_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG3_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG3_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG3_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG3_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG3_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG3_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG3_DIG_BE_EN_CNTL
+#define DIG3_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG3_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+//DIG3_TMDS_CNTL
+#define DIG3_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG3_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG3_TMDS_CONTROL_CHAR
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG3_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG3_TMDS_CONTROL0_FEEDBACK
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG3_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG3_TMDS_STEREOSYNC_CTL_SEL
+#define DIG3_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG3_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG3_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG3_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG3_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG3_TMDS_CTL_BITS
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG3_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG3_TMDS_DCBALANCER_CONTROL
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG3_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG3_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG3_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG3_TMDS_CTL0_1_GEN_CNTL
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG3_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG3_TMDS_CTL2_3_GEN_CNTL
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG3_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG3_DIG_VERSION
+#define DIG3_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG3_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dp4_dispdec
+//DP4_DP_LINK_CNTL
+#define DP4_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP4_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP4_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP4_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+//DP4_DP_PIXEL_FORMAT
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP4_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE__SHIFT 0x1e
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP4_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP4_DP_PIXEL_FORMAT__DP_PIXEL_PER_CYCLE_PROCESSING_MODE_MASK 0x40000000L
+//DP4_DP_MSA_COLORIMETRY
+#define DP4_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP4_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP4_DP_CONFIG
+#define DP4_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP4_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP4_DP_VID_STREAM_CNTL
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP4_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP4_DP_STEER_FIFO
+#define DP4_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP4_DP_STEER_FIFO__DP_TU_SIZE__SHIFT 0x18
+#define DP4_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP4_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP4_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+#define DP4_DP_STEER_FIFO__DP_TU_SIZE_MASK 0x3F000000L
+//DP4_DP_MSA_MISC
+#define DP4_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP4_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP4_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP4_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP4_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP4_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP4_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP4_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP4_DP_DPHY_INTERNAL_CTRL
+#define DP4_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN__SHIFT 0x0
+#define DP4_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL__SHIFT 0x4
+#define DP4_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_EN_MASK 0x00000001L
+#define DP4_DP_DPHY_INTERNAL_CTRL__DPHY_ALT_SCRAMBLER_RESET_SEL_MASK 0x00000010L
+//DP4_DP_VID_TIMING
+#define DP4_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP4_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP4_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP4_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP4_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP4_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP4_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP4_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP4_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP4_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP4_DP_VID_N
+#define DP4_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP4_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP4_DP_VID_M
+#define DP4_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP4_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP4_DP_LINK_FRAMING_CNTL
+#define DP4_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP4_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE__SHIFT 0x14
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP4_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP4_DP_LINK_FRAMING_CNTL__DP_BACK_TO_BACK_BS_AVOIDANCE_ENABLE_MASK 0x00100000L
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP4_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP4_DP_HBR2_EYE_PATTERN
+#define DP4_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP4_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP4_DP_VID_MSA_VBID
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE__SHIFT 0xc
+#define DP4_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP4_DP_VID_MSA_VBID__DP_VID_MSA_TRANSMISSION_ENABLE_MASK 0x00001000L
+#define DP4_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP4_DP_VID_INTERRUPT_CNTL
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP4_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP4_DP_DPHY_CNTL
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM__SHIFT 0x7
+#define DP4_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL__SHIFT 0x8
+#define DP4_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP4_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP4_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP4_DP_DPHY_CNTL__DPHY_FEC_DISABLE_MODE_FOR_ALPM_MASK 0x00000080L
+#define DP4_DP_DPHY_CNTL__DPHY_SCRAMBLER_SEL_MASK 0x00000100L
+#define DP4_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP4_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP4_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP4_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP4_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP4_DP_DPHY_SYM0
+#define DP4_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP4_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP4_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP4_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP4_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP4_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP4_DP_DPHY_SYM1
+#define DP4_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP4_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP4_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP4_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP4_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP4_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP4_DP_DPHY_SYM2
+#define DP4_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP4_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP4_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP4_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP4_DP_DPHY_8B10B_CNTL
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP4_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP4_DP_DPHY_PRBS_CNTL
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP4_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP4_DP_DPHY_SCRAM_CNTL
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP4_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP4_DP_DPHY_CRC_CONTROL0
+#define DP4_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP4_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_CONTROL0__DPHY_CRC_EN_MASK 0x00000001L
+#define DP4_DP_DPHY_CRC_CONTROL0__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP4_DP_DPHY_CRC_CONTROL0__DPHY_CRC_FIELD_MASK 0x00000100L
+//DP4_DP_DPHY_CRC_CONTROL1
+#define DP4_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP4_DP_DPHY_CRC_CONTROL1__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP4_DP_DPHY_CRC_RESULT0
+#define DP4_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_RESULT0__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP4_DP_DPHY_CRC_RESULT1
+#define DP4_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_RESULT1__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP4_DP_DPHY_CRC_STATUS
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK__SHIFT 0x4
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x8
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0xc
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_MASK 0x00000001L
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_RESULT_VALID_ACK_MASK 0x00000010L
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000100L
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00001000L
+#define DP4_DP_DPHY_CRC_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP4_DP_DPHY_FAST_TRAINING
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING__SHIFT 0x4
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_STREAM_RESET_DURING_FAST_TRAINING_MASK 0x00000010L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP4_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP4_DP_DPHY_FAST_TRAINING_STATUS
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP4_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP4_DP_DPHY_CRC_RESULT2
+#define DP4_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_RESULT2__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP4_DP_DPHY_CRC_RESULT3
+#define DP4_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP4_DP_DPHY_CRC_RESULT3__DPHY_CRC_RESULT_MASK 0xFFFFFFFFL
+//DP4_DP_SEC_CNTL
+#define DP4_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP4_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP4_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP4_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP4_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP4_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP4_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP4_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP4_DP_SEC_CNTL1
+#define DP4_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP4_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_FRAMING1
+#define DP4_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP4_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP4_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP4_DP_SEC_FRAMING2
+#define DP4_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP4_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP4_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP4_DP_SEC_FRAMING3
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP4_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP4_DP_SEC_FRAMING4
+#define DP4_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP4_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP4_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP4_DP_SEC_AUD_N
+#define DP4_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP4_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP4_DP_SEC_AUD_N_READBACK
+#define DP4_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP4_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP4_DP_SEC_AUD_M
+#define DP4_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP4_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP4_DP_SEC_AUD_M_READBACK
+#define DP4_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP4_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP4_DP_SEC_TIMESTAMP
+#define DP4_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP4_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP4_DP_SEC_PACKET_CNTL
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP4_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP4_DP_MSE_RATE_CNTL
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP4_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP4_DP_MSE_RATE_UPDATE
+#define DP4_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP4_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP4_DP_MSE_SAT0
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0__SHIFT 0x4
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0__SHIFT 0x5
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1__SHIFT 0x14
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1__SHIFT 0x15
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT0_MASK 0x00000010L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE0_MASK 0x00000020L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPT1_MASK 0x00100000L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_ENCRYPTION_TYPE1_MASK 0x00200000L
+#define DP4_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP4_DP_MSE_SAT1
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2__SHIFT 0x4
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2__SHIFT 0x5
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3__SHIFT 0x14
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3__SHIFT 0x15
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT2_MASK 0x00000010L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE2_MASK 0x00000020L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPT3_MASK 0x00100000L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_ENCRYPTION_TYPE3_MASK 0x00200000L
+#define DP4_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP4_DP_MSE_SAT2
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4__SHIFT 0x4
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4__SHIFT 0x5
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5__SHIFT 0x14
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5__SHIFT 0x15
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT4_MASK 0x00000010L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE4_MASK 0x00000020L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPT5_MASK 0x00100000L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_ENCRYPTION_TYPE5_MASK 0x00200000L
+#define DP4_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP4_DP_MSE_SAT_UPDATE
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP4_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP4_DP_MSE_LINK_TIMING
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP4_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP4_DP_MSE_MISC_CNTL
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP4_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+//DP4_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP4_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP4_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP4_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP4_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP4_DP_MSE_SAT0_STATUS
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS__SHIFT 0x4
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS__SHIFT 0x5
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS__SHIFT 0x14
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS__SHIFT 0x15
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT0_STATUS_MASK 0x00000010L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE0_STATUS_MASK 0x00000020L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPT1_STATUS_MASK 0x00100000L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE1_STATUS_MASK 0x00200000L
+#define DP4_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP4_DP_MSE_SAT1_STATUS
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS__SHIFT 0x4
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS__SHIFT 0x5
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS__SHIFT 0x14
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS__SHIFT 0x15
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT2_STATUS_MASK 0x00000010L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE2_STATUS_MASK 0x00000020L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPT3_STATUS_MASK 0x00100000L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE3_STATUS_MASK 0x00200000L
+#define DP4_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP4_DP_MSE_SAT2_STATUS
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS__SHIFT 0x4
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS__SHIFT 0x5
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS__SHIFT 0x14
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS__SHIFT 0x15
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT4_STATUS_MASK 0x00000010L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE4_STATUS_MASK 0x00000020L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPT5_STATUS_MASK 0x00100000L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_ENCRYPTION_TYPE5_STATUS_MASK 0x00200000L
+#define DP4_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP4_DP_DPIA_SPARE
+#define DP4_DP_DPIA_SPARE__DP_DPIA_SPARE__SHIFT 0x0
+#define DP4_DP_DPIA_SPARE__DP_DPIA_SPARE_MASK 0x00000003L
+//DP4_DP_MSA_TIMING_PARAM1
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP4_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP4_DP_MSA_TIMING_PARAM2
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP4_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP4_DP_MSA_TIMING_PARAM3
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP4_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP4_DP_MSA_TIMING_PARAM4
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP4_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP4_DP_MSO_CNTL
+#define DP4_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK__SHIFT 0x0
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE__SHIFT 0x4
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE__SHIFT 0x8
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE__SHIFT 0xc
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE__SHIFT 0x10
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE__SHIFT 0x14
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE__SHIFT 0x18
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE__SHIFT 0x1c
+#define DP4_DP_MSO_CNTL__DP_MSO_NUM_OF_SSTLINK_MASK 0x00000003L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_STREAM_ENABLE_MASK 0x000000F0L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ASP_ENABLE_MASK 0x00000F00L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ATP_ENABLE_MASK 0x0000F000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_AIP_ENABLE_MASK 0x000F0000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_ACM_ENABLE_MASK 0x00F00000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP0_ENABLE_MASK 0x0F000000L
+#define DP4_DP_MSO_CNTL__DP_MSO_SEC_GSP1_ENABLE_MASK 0xF0000000L
+//DP4_DP_MSO_CNTL1
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE__SHIFT 0x0
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE__SHIFT 0x4
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE__SHIFT 0x8
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE__SHIFT 0xc
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE__SHIFT 0x10
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE__SHIFT 0x14
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE__SHIFT 0x18
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE__SHIFT 0x1c
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP2_ENABLE_MASK 0x0000000FL
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP3_ENABLE_MASK 0x000000F0L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP4_ENABLE_MASK 0x00000F00L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP5_ENABLE_MASK 0x0000F000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP6_ENABLE_MASK 0x000F0000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_GSP7_ENABLE_MASK 0x00F00000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_MPG_ENABLE_MASK 0x0F000000L
+#define DP4_DP_MSO_CNTL1__DP_MSO_SEC_ISRC_ENABLE_MASK 0xF0000000L
+//DP4_DP_DSC_CNTL
+#define DP4_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP4_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000001L
+//DP4_DP_SEC_CNTL2
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP11_PPS__SHIFT 0x1c
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP4_DP_SEC_CNTL2__DP_SEC_GSP11_PPS_MASK 0x10000000L
+//DP4_DP_SEC_CNTL3
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_CNTL4
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_CNTL5
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_CNTL6
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE__SHIFT 0x10
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE__SHIFT 0x11
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE__SHIFT 0x12
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE__SHIFT 0x13
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE__SHIFT 0x14
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE__SHIFT 0x15
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE__SHIFT 0x16
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE__SHIFT 0x17
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE__SHIFT 0x18
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE__SHIFT 0x19
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE__SHIFT 0x1a
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE__SHIFT 0x1b
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP0_EN_DB_DISABLE_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP1_EN_DB_DISABLE_MASK 0x00020000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP2_EN_DB_DISABLE_MASK 0x00040000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP3_EN_DB_DISABLE_MASK 0x00080000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP4_EN_DB_DISABLE_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP5_EN_DB_DISABLE_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP6_EN_DB_DISABLE_MASK 0x00400000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP7_EN_DB_DISABLE_MASK 0x00800000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP8_EN_DB_DISABLE_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP9_EN_DB_DISABLE_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP10_EN_DB_DISABLE_MASK 0x04000000L
+#define DP4_DP_SEC_CNTL6__DP_SEC_GSP11_EN_DB_DISABLE_MASK 0x08000000L
+//DP4_DP_SEC_CNTL7
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP4_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP4_DP_DB_CNTL
+#define DP4_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP4_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP4_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP4_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP4_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP4_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP4_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP4_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP4_DP_MSA_VBID_MISC
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP4_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP4_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_SEC_METADATA_TRANSMISSION
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP4_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP4_DP_ALPM_CNTL
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND__SHIFT 0x0
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING__SHIFT 0x1
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND__SHIFT 0x2
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING__SHIFT 0x3
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE__SHIFT 0x4
+#define DP4_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO__SHIFT 0x5
+#define DP4_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE__SHIFT 0x6
+#define DP4_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP__SHIFT 0x7
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM__SHIFT 0x8
+#define DP4_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS__SHIFT 0xb
+#define DP4_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN__SHIFT 0xc
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM__SHIFT 0x10
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_SEND_MASK 0x00000001L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PENDING_MASK 0x00000002L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_SEND_MASK 0x00000004L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_STANDBY_PENDING_MASK 0x00000008L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_IMMEDIATE_MASK 0x00000010L
+#define DP4_DP_ALPM_CNTL__DP_LINK_TRAINING_SWITCH_BETWEEN_VIDEO_MASK 0x00000020L
+#define DP4_DP_ALPM_CNTL__DP_ALPM_SLEEP_SEQUENCE_MODE_MASK 0x00000040L
+#define DP4_DP_ALPM_CNTL__DP_STOP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_MASK 0x00000080L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_PATTERN_NUM_MASK 0x00000300L
+#define DP4_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_STATUS_MASK 0x00000800L
+#define DP4_DP_ALPM_CNTL__DP_FORCE_SCRAMBLED_ZERO_AFTER_SLEEP_EN_MASK 0x00001000L
+#define DP4_DP_ALPM_CNTL__DP_ML_PHY_SLEEP_STANDBY_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_GSP8_CNTL
+#define DP4_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE__SHIFT 0x0
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE__SHIFT 0x4
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE__SHIFT 0x5
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE__SHIFT 0x6
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND__SHIFT 0x7
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING__SHIFT 0xc
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE__SHIFT 0xd
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM__SHIFT 0x10
+#define DP4_DP_GSP8_CNTL__DP_MSO_SEC_GSP8_ENABLE_MASK 0x0000000FL
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_ENABLE_MASK 0x00000010L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_REFERENCE_MASK 0x00000020L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_IN_IDLE_MASK 0x00000040L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_MASK 0x00000080L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_PENDING_MASK 0x00001000L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_ACTIVE_MASK 0x00002000L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_GSP8_CNTL__DP_SEC_GSP8_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_GSP9_CNTL
+#define DP4_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE__SHIFT 0x0
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE__SHIFT 0x4
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE__SHIFT 0x5
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE__SHIFT 0x6
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND__SHIFT 0x7
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING__SHIFT 0xc
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE__SHIFT 0xd
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM__SHIFT 0x10
+#define DP4_DP_GSP9_CNTL__DP_MSO_SEC_GSP9_ENABLE_MASK 0x0000000FL
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_ENABLE_MASK 0x00000010L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_REFERENCE_MASK 0x00000020L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_IN_IDLE_MASK 0x00000040L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_MASK 0x00000080L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_PENDING_MASK 0x00001000L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_ACTIVE_MASK 0x00002000L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_GSP9_CNTL__DP_SEC_GSP9_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_GSP10_CNTL
+#define DP4_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE__SHIFT 0x0
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE__SHIFT 0x4
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE__SHIFT 0x5
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE__SHIFT 0x6
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND__SHIFT 0x7
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING__SHIFT 0xc
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE__SHIFT 0xd
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM__SHIFT 0x10
+#define DP4_DP_GSP10_CNTL__DP_MSO_SEC_GSP10_ENABLE_MASK 0x0000000FL
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_ENABLE_MASK 0x00000010L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_REFERENCE_MASK 0x00000020L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_IN_IDLE_MASK 0x00000040L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_MASK 0x00000080L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_PENDING_MASK 0x00001000L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_ACTIVE_MASK 0x00002000L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_GSP10_CNTL__DP_SEC_GSP10_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_GSP11_CNTL
+#define DP4_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE__SHIFT 0x0
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE__SHIFT 0x4
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE__SHIFT 0x5
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE__SHIFT 0x6
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND__SHIFT 0x7
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE__SHIFT 0x8
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING__SHIFT 0xc
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE__SHIFT 0xd
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM__SHIFT 0x10
+#define DP4_DP_GSP11_CNTL__DP_MSO_SEC_GSP11_ENABLE_MASK 0x0000000FL
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_ENABLE_MASK 0x00000010L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_REFERENCE_MASK 0x00000020L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_IN_IDLE_MASK 0x00000040L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_MASK 0x00000080L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ANY_LINE_MASK 0x00000100L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_PENDING_MASK 0x00001000L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_ACTIVE_MASK 0x00002000L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP4_DP_GSP11_CNTL__DP_SEC_GSP11_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_GSP_EN_DB_STATUS
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING__SHIFT 0x0
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING__SHIFT 0x1
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING__SHIFT 0x2
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING__SHIFT 0x3
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING__SHIFT 0x4
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING__SHIFT 0x5
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING__SHIFT 0x6
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING__SHIFT 0x7
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING__SHIFT 0x8
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING__SHIFT 0x9
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING__SHIFT 0xa
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING__SHIFT 0xb
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP0_EN_DB_PENDING_MASK 0x00000001L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP1_EN_DB_PENDING_MASK 0x00000002L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP2_EN_DB_PENDING_MASK 0x00000004L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP3_EN_DB_PENDING_MASK 0x00000008L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP4_EN_DB_PENDING_MASK 0x00000010L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP5_EN_DB_PENDING_MASK 0x00000020L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP6_EN_DB_PENDING_MASK 0x00000040L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP7_EN_DB_PENDING_MASK 0x00000080L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP8_EN_DB_PENDING_MASK 0x00000100L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP9_EN_DB_PENDING_MASK 0x00000200L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP10_EN_DB_PENDING_MASK 0x00000400L
+#define DP4_DP_GSP_EN_DB_STATUS__DP_SEC_GSP11_EN_DB_PENDING_MASK 0x00000800L
+//DP4_DP_AUXLESS_ALPM_CNTL1
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT__SHIFT 0x4
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY__SHIFT 0x8
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL__SHIFT 0x14
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE__SHIFT 0x1f
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_REPEAT_MASK 0x000000F0L
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_DELAY_MASK 0x0007FF00L
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_ML_PHY_SLEEP_INTERVAL_MASK 0x1FF00000L
+#define DP4_DP_AUXLESS_ALPM_CNTL1__DP_SET_AUXLESS_ALPM_SLEEP_STATE_MASK 0x80000000L
+//DP4_DP_AUXLESS_ALPM_CNTL2
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME__SHIFT 0x0
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND__SHIFT 0x7
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE__SHIFT 0x10
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING__SHIFT 0x11
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE__SHIFT 0x12
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING__SHIFT 0x13
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD__SHIFT 0x14
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ML_PHY_SLEEP_HOLD_TIME_MASK 0x0000007FL
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_SEND_MASK 0x00000080L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_IMMEDIATE_MASK 0x00010000L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_WAKEUP_PENDING_MASK 0x00020000L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_IMMEDIATE_MASK 0x00040000L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_FEC_EN_PENDING_MASK 0x00080000L
+#define DP4_DP_AUXLESS_ALPM_CNTL2__DP_ALPM_ML_PHY_LOCK_PERIOD_MASK 0x3FF00000L
+//DP4_DP_AUXLESS_ALPM_CNTL3
+#define DP4_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM__SHIFT 0x0
+#define DP4_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM__SHIFT 0x10
+#define DP4_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_WAKEUP_LINE_NUM_MASK 0x0000FFFFL
+#define DP4_DP_AUXLESS_ALPM_CNTL3__DP_ALPM_FEC_EN_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_AUXLESS_ALPM_CNTL4
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN__SHIFT 0x1
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL__SHIFT 0x2
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME__SHIFT 0x3
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE__SHIFT 0x4
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS__SHIFT 0x5
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE__SHIFT 0x6
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM__SHIFT 0x18
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_MASK 0x00000002L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_SLEEP_PATTERN_SEL_MASK 0x00000004L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FORCE_WAKEUP_NEXT_FRAME_MASK 0x00000008L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_DIS_IMMEDIATE_MASK 0x00000010L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_HW_MODE_EN_STATUS_MASK 0x00000020L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_CURRENT_STATE_MASK 0x00000040L
+#define DP4_DP_AUXLESS_ALPM_CNTL4__DP_ALPM_FRAME_NUM_MASK 0xFF000000L
+//DP4_DP_AUXLESS_ALPM_CNTL5
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK__SHIFT 0x0
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED__SHIFT 0x1
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR__SHIFT 0x3
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM__SHIFT 0x8
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM__SHIFT 0x10
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_MASK_MASK 0x00000001L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_OCCURRED_MASK 0x00000002L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_CLEAR_MASK 0x00000008L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_FRAME_NUM_MASK 0x0000FF00L
+#define DP4_DP_AUXLESS_ALPM_CNTL5__DP_ALPM_WAKEUP_INTERRUPT_LINE_NUM_MASK 0xFFFF0000L
+//DP4_DP_STREAM_SYMBOL_COUNT_STATUS
+#define DP4_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT__SHIFT 0x0
+#define DP4_DP_STREAM_SYMBOL_COUNT_STATUS__DP_STREAM_BS_COUNT_MASK 0x0000FFFFL
+//DP4_DP_STREAM_SYMBOL_COUNT_CONTROL
+#define DP4_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE__SHIFT 0x0
+#define DP4_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET__SHIFT 0x4
+#define DP4_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP4_DP_STREAM_SYMBOL_COUNT_CONTROL__DP_STREAM_BS_COUNT_RESET_MASK 0x00000010L
+//DP4_DP_LINK_SYMBOL_COUNT_STATUS0
+#define DP4_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT__SHIFT 0x0
+#define DP4_DP_LINK_SYMBOL_COUNT_STATUS0__DP_LINK_SR_COUNT_MASK 0x0000FFFFL
+//DP4_DP_LINK_SYMBOL_COUNT_STATUS1
+#define DP4_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT__SHIFT 0x0
+#define DP4_DP_LINK_SYMBOL_COUNT_STATUS1__DP_LINK_CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP4_DP_LINK_SYMBOL_COUNT_CONTROL
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE__SHIFT 0x0
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET__SHIFT 0x4
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE__SHIFT 0x8
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET__SHIFT 0xc
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_ENABLE_MASK 0x00000001L
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_SR_COUNT_RESET_MASK 0x00000010L
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_ENABLE_MASK 0x00000100L
+#define DP4_DP_LINK_SYMBOL_COUNT_CONTROL__DP_LINK_CYCLE_COUNT_RESET_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_dio_dig4_dispdec
+//DIG4_DIG_FE_CNTL
+#define DIG4_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG4_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING__SHIFT 0xf
+#define DIG4_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN__SHIFT 0x14
+#define DIG4_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG4_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG4_DIG_FE_CNTL__DIG_SPLIT_LINK_PIXEL_GROUPING_MASK 0x00008000L
+#define DIG4_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG4_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_EN_MASK 0x00100000L
+//DIG4_DIG_FE_CLK_CNTL
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_MODE__SHIFT 0x0
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN__SHIFT 0x4
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET__SHIFT 0x5
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON__SHIFT 0xa
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON__SHIFT 0xb
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON__SHIFT 0xc
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON__SHIFT 0xe
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_MODE_MASK 0x00000007L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_CLK_EN_MASK 0x00000010L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SOFT_RESET_MASK 0x00000020L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_DISPCLK_G_CLOCK_ON_MASK 0x00000400L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_CLOCK_ON_MASK 0x00000800L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_AFMT_CLOCK_ON_MASK 0x00001000L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SYMCLK_FE_G_TMDS_CLOCK_ON_MASK 0x00002000L
+#define DIG4_DIG_FE_CLK_CNTL__DIG_FE_SOCCLK_G_AFMT_CLOCK_ON_MASK 0x00004000L
+//DIG4_DIG_FE_EN_CNTL
+#define DIG4_DIG_FE_EN_CNTL__DIG_FE_ENABLE__SHIFT 0x0
+#define DIG4_DIG_FE_EN_CNTL__DIG_FE_ENABLE_MASK 0x00000001L
+//DIG4_DIG_OUTPUT_CRC_CNTL
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG4_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG4_DIG_OUTPUT_CRC_RESULT
+#define DIG4_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG4_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG4_DIG_CLOCK_PATTERN
+#define DIG4_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG4_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG4_DIG_TEST_PATTERN
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG4_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG4_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG4_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG4_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG4_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG4_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG4_DIG_RANDOM_PATTERN_SEED
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG4_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG4_DIG_FIFO_CTRL0
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE__SHIFT 0x0
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_RESET__SHIFT 0x1
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL__SHIFT 0x2
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x7
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE__SHIFT 0x8
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE__SHIFT 0x14
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_ERROR__SHIFT 0x1c
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_ENABLE_MASK 0x00000001L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_RESET_MASK 0x00000002L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_READ_START_LEVEL_MASK 0x0000007CL
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_READ_CLOCK_SRC_MASK 0x00000080L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_OUTPUT_PIXEL_MODE_MASK 0x00000100L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_RESET_DONE_MASK 0x00100000L
+#define DIG4_DIG_FIFO_CTRL0__DIG_FIFO_ERROR_MASK 0x30000000L
+//DIG4_DIG_FIFO_CTRL1
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG4_DIG_FIFO_CTRL1__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG4_HDMI_METADATA_PACKET_CONTROL
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG4_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_CONTROL
+#define DIG4_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG4_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG4_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG4_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG4_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG4_HDMI_CONTROL__DOLBY_VISION_EN__SHIFT 0xa
+#define DIG4_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0xb
+#define DIG4_HDMI_CONTROL__TMDS_PIXEL_ENCODING__SHIFT 0xc
+#define DIG4_HDMI_CONTROL__TMDS_COLOR_FORMAT__SHIFT 0xd
+#define DIG4_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM__SHIFT 0x10
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG4_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG4_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG4_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG4_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG4_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG4_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG4_HDMI_CONTROL__DOLBY_VISION_EN_MASK 0x00000400L
+#define DIG4_HDMI_CONTROL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00000800L
+#define DIG4_HDMI_CONTROL__TMDS_PIXEL_ENCODING_MASK 0x00001000L
+#define DIG4_HDMI_CONTROL__TMDS_COLOR_FORMAT_MASK 0x00006000L
+#define DIG4_HDMI_CONTROL__HDMI_UNSCRAMBLED_CONTROL_LINE_NUM_MASK 0x003F0000L
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG4_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG4_HDMI_STATUS
+#define DIG4_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG4_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG4_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG4_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG4_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG4_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG4_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG4_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG4_HDMI_AUDIO_PACKET_CONTROL
+#define DIG4_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG4_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+//DIG4_HDMI_ACR_PACKET_CONTROL
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG4_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG4_HDMI_VBI_PACKET_CONTROL
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND__SHIFT 0xc
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE__SHIFT 0x18
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_SEND_MASK 0x00001000L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+#define DIG4_HDMI_VBI_PACKET_CONTROL__HDMI_ACP_LINE_MASK 0x3F000000L
+//DIG4_HDMI_INFOFRAME_CONTROL0
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG4_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG4_HDMI_INFOFRAME_CONTROL1
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG4_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE__SHIFT 0x1f
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_UPDATE_LOCK_DISABLE_MASK 0x80000000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL6
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE__SHIFT 0x3
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x6
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE__SHIFT 0x7
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xa
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE__SHIFT 0xb
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xe
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE__SHIFT 0xf
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x12
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE__SHIFT 0x13
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x16
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE__SHIFT 0x17
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1a
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE__SHIFT 0x1b
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000004L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC8_UPDATE_LOCK_DISABLE_MASK 0x00000008L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000040L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC9_UPDATE_LOCK_DISABLE_MASK 0x00000080L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000400L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC10_UPDATE_LOCK_DISABLE_MASK 0x00000800L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00004000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC11_UPDATE_LOCK_DISABLE_MASK 0x00008000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00040000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC12_UPDATE_LOCK_DISABLE_MASK 0x00080000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00400000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC13_UPDATE_LOCK_DISABLE_MASK 0x00800000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x04000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL6__HDMI_GENERIC14_UPDATE_LOCK_DISABLE_MASK 0x08000000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+//DIG4_HDMI_GC
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG4_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG4_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG4_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG4_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL7
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC8_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL7__HDMI_GENERIC9_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL8
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC10_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL8__HDMI_GENERIC11_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL9
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC12_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL9__HDMI_GENERIC13_LINE_MASK 0xFFFF0000L
+//DIG4_HDMI_GENERIC_PACKET_CONTROL10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING__SHIFT 0x10
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING__SHIFT 0x11
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING__SHIFT 0x12
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING__SHIFT 0x13
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING__SHIFT 0x14
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING__SHIFT 0x15
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING__SHIFT 0x16
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING__SHIFT 0x17
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING__SHIFT 0x18
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING__SHIFT 0x19
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING__SHIFT 0x1a
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING__SHIFT 0x1b
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING__SHIFT 0x1c
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING__SHIFT 0x1d
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING__SHIFT 0x1e
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_LINE_MASK 0x0000FFFFL
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC0_EN_DB_PENDING_MASK 0x00010000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC1_EN_DB_PENDING_MASK 0x00020000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC2_EN_DB_PENDING_MASK 0x00040000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC3_EN_DB_PENDING_MASK 0x00080000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC4_EN_DB_PENDING_MASK 0x00100000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC5_EN_DB_PENDING_MASK 0x00200000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC6_EN_DB_PENDING_MASK 0x00400000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC7_EN_DB_PENDING_MASK 0x00800000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC8_EN_DB_PENDING_MASK 0x01000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC9_EN_DB_PENDING_MASK 0x02000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC10_EN_DB_PENDING_MASK 0x04000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC11_EN_DB_PENDING_MASK 0x08000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC12_EN_DB_PENDING_MASK 0x10000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC13_EN_DB_PENDING_MASK 0x20000000L
+#define DIG4_HDMI_GENERIC_PACKET_CONTROL10__HDMI_GENERIC14_EN_DB_PENDING_MASK 0x40000000L
+//DIG4_HDMI_DB_CONTROL
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG4_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG4_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG4_HDMI_ACR_32_0
+#define DIG4_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG4_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG4_HDMI_ACR_32_1
+#define DIG4_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG4_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG4_HDMI_ACR_44_0
+#define DIG4_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG4_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG4_HDMI_ACR_44_1
+#define DIG4_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG4_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG4_HDMI_ACR_48_0
+#define DIG4_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG4_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG4_HDMI_ACR_48_1
+#define DIG4_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG4_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG4_HDMI_ACR_STATUS_0
+#define DIG4_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG4_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG4_HDMI_ACR_STATUS_1
+#define DIG4_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG4_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG4_AFMT_CNTL
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG4_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG4_DIG_BE_CLK_CNTL
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT 0x0
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT 0x4
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT 0x5
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT 0xb
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT 0xd
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK 0x00000007L
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK 0x00000010L
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK 0x00000020L
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK 0x00000800L
+#define DIG4_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK 0x00002000L
+//DIG4_DIG_BE_CNTL
+#define DIG4_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG4_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG4_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG4_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG4_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG4_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG4_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG4_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG4_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG4_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG4_DIG_BE_EN_CNTL
+#define DIG4_DIG_BE_EN_CNTL__DIG_BE_ENABLE__SHIFT 0x0
+#define DIG4_DIG_BE_EN_CNTL__DIG_BE_ENABLE_MASK 0x00000001L
+//DIG4_TMDS_CNTL
+#define DIG4_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG4_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG4_TMDS_CONTROL_CHAR
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG4_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG4_TMDS_CONTROL0_FEEDBACK
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG4_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG4_TMDS_STEREOSYNC_CTL_SEL
+#define DIG4_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG4_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG4_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG4_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG4_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+//DIG4_TMDS_CTL_BITS
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG4_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG4_TMDS_DCBALANCER_CONTROL
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG4_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG4_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG4_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG4_TMDS_CTL0_1_GEN_CNTL
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG4_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG4_TMDS_CTL2_3_GEN_CNTL
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG4_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+//DIG4_DIG_DEBUG
+//DIG4_DIG_VERSION
+#define DIG4_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG4_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_dig0_afmt_afmt_dispdec
+//AFMT0_AFMT_ACP
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT0_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+//AFMT0_AFMT_VBI_PACKET_CONTROL
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT0_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+//AFMT0_AFMT_AUDIO_PACKET_CONTROL2
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//AFMT0_AFMT_AUDIO_INFO0
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//AFMT0_AFMT_AUDIO_INFO1
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//AFMT0_AFMT_60958_0
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//AFMT0_AFMT_60958_1
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT0_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT0_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT0_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT0_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//AFMT0_AFMT_AUDIO_CRC_CONTROL
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//AFMT0_AFMT_RAMP_CONTROL0
+#define AFMT0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//AFMT0_AFMT_RAMP_CONTROL1
+#define AFMT0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//AFMT0_AFMT_RAMP_CONTROL2
+#define AFMT0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//AFMT0_AFMT_RAMP_CONTROL3
+#define AFMT0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//AFMT0_AFMT_60958_2
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//AFMT0_AFMT_AUDIO_CRC_RESULT
+#define AFMT0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//AFMT0_AFMT_STATUS
+#define AFMT0_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT0_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//AFMT0_AFMT_AUDIO_PACKET_CONTROL
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//AFMT0_AFMT_INFOFRAME_CONTROL0
+#define AFMT0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+//AFMT0_AFMT_AUDIO_SRC_CONTROL
+#define AFMT0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//AFMT0_AFMT_MEM_PWR
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT0_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_dio_dig1_afmt_afmt_dispdec
+//AFMT1_AFMT_ACP
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT1_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+//AFMT1_AFMT_VBI_PACKET_CONTROL
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT1_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+//AFMT1_AFMT_AUDIO_PACKET_CONTROL2
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//AFMT1_AFMT_AUDIO_INFO0
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//AFMT1_AFMT_AUDIO_INFO1
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//AFMT1_AFMT_60958_0
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//AFMT1_AFMT_60958_1
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT1_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT1_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT1_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT1_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//AFMT1_AFMT_AUDIO_CRC_CONTROL
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//AFMT1_AFMT_RAMP_CONTROL0
+#define AFMT1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//AFMT1_AFMT_RAMP_CONTROL1
+#define AFMT1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//AFMT1_AFMT_RAMP_CONTROL2
+#define AFMT1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//AFMT1_AFMT_RAMP_CONTROL3
+#define AFMT1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//AFMT1_AFMT_60958_2
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//AFMT1_AFMT_AUDIO_CRC_RESULT
+#define AFMT1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//AFMT1_AFMT_STATUS
+#define AFMT1_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT1_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//AFMT1_AFMT_AUDIO_PACKET_CONTROL
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//AFMT1_AFMT_INFOFRAME_CONTROL0
+#define AFMT1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+//AFMT1_AFMT_AUDIO_SRC_CONTROL
+#define AFMT1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//AFMT1_AFMT_MEM_PWR
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT1_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_dio_dig2_afmt_afmt_dispdec
+//AFMT2_AFMT_ACP
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT2_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+//AFMT2_AFMT_VBI_PACKET_CONTROL
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT2_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+//AFMT2_AFMT_AUDIO_PACKET_CONTROL2
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//AFMT2_AFMT_AUDIO_INFO0
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT2_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//AFMT2_AFMT_AUDIO_INFO1
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT2_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//AFMT2_AFMT_60958_0
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT2_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//AFMT2_AFMT_60958_1
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT2_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT2_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT2_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT2_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT2_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//AFMT2_AFMT_AUDIO_CRC_CONTROL
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT2_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//AFMT2_AFMT_RAMP_CONTROL0
+#define AFMT2_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT2_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT2_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT2_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//AFMT2_AFMT_RAMP_CONTROL1
+#define AFMT2_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT2_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT2_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT2_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//AFMT2_AFMT_RAMP_CONTROL2
+#define AFMT2_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT2_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//AFMT2_AFMT_RAMP_CONTROL3
+#define AFMT2_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT2_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//AFMT2_AFMT_60958_2
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT2_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//AFMT2_AFMT_AUDIO_CRC_RESULT
+#define AFMT2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT2_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//AFMT2_AFMT_STATUS
+#define AFMT2_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT2_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT2_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT2_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT2_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT2_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT2_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT2_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//AFMT2_AFMT_AUDIO_PACKET_CONTROL
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT2_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//AFMT2_AFMT_INFOFRAME_CONTROL0
+#define AFMT2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT2_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+//AFMT2_AFMT_AUDIO_SRC_CONTROL
+#define AFMT2_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT2_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//AFMT2_AFMT_MEM_PWR
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT2_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_dio_dig3_afmt_afmt_dispdec
+//AFMT3_AFMT_ACP
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT3_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+//AFMT3_AFMT_VBI_PACKET_CONTROL
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT3_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+//AFMT3_AFMT_AUDIO_PACKET_CONTROL2
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//AFMT3_AFMT_AUDIO_INFO0
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT3_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//AFMT3_AFMT_AUDIO_INFO1
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT3_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//AFMT3_AFMT_60958_0
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT3_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//AFMT3_AFMT_60958_1
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT3_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT3_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT3_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT3_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT3_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//AFMT3_AFMT_AUDIO_CRC_CONTROL
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT3_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//AFMT3_AFMT_RAMP_CONTROL0
+#define AFMT3_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT3_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT3_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT3_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//AFMT3_AFMT_RAMP_CONTROL1
+#define AFMT3_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT3_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT3_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT3_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//AFMT3_AFMT_RAMP_CONTROL2
+#define AFMT3_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT3_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//AFMT3_AFMT_RAMP_CONTROL3
+#define AFMT3_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT3_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//AFMT3_AFMT_60958_2
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT3_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//AFMT3_AFMT_AUDIO_CRC_RESULT
+#define AFMT3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT3_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//AFMT3_AFMT_STATUS
+#define AFMT3_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT3_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT3_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT3_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT3_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT3_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT3_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT3_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//AFMT3_AFMT_AUDIO_PACKET_CONTROL
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT3_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//AFMT3_AFMT_INFOFRAME_CONTROL0
+#define AFMT3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT3_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+//AFMT3_AFMT_AUDIO_SRC_CONTROL
+#define AFMT3_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT3_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//AFMT3_AFMT_MEM_PWR
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT3_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_dio_dig4_afmt_afmt_dispdec
+//AFMT4_AFMT_ACP
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT4_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+//AFMT4_AFMT_VBI_PACKET_CONTROL
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT4_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+//AFMT4_AFMT_AUDIO_PACKET_CONTROL2
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//AFMT4_AFMT_AUDIO_INFO0
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT4_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//AFMT4_AFMT_AUDIO_INFO1
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT4_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//AFMT4_AFMT_60958_0
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT4_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//AFMT4_AFMT_60958_1
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT4_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT4_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT4_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT4_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT4_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//AFMT4_AFMT_AUDIO_CRC_CONTROL
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT4_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//AFMT4_AFMT_RAMP_CONTROL0
+#define AFMT4_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT4_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT4_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT4_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//AFMT4_AFMT_RAMP_CONTROL1
+#define AFMT4_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT4_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT4_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT4_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//AFMT4_AFMT_RAMP_CONTROL2
+#define AFMT4_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT4_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//AFMT4_AFMT_RAMP_CONTROL3
+#define AFMT4_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT4_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//AFMT4_AFMT_60958_2
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT4_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//AFMT4_AFMT_AUDIO_CRC_RESULT
+#define AFMT4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT4_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//AFMT4_AFMT_STATUS
+#define AFMT4_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT4_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT4_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT4_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT4_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT4_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT4_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT4_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//AFMT4_AFMT_AUDIO_PACKET_CONTROL
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT4_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//AFMT4_AFMT_INFOFRAME_CONTROL0
+#define AFMT4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT4_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+//AFMT4_AFMT_AUDIO_SRC_CONTROL
+#define AFMT4_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT4_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//AFMT4_AFMT_MEM_PWR
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT4_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_dio_dig0_dme_dme_dispdec
+//DME0_DME_CONTROL
+#define DME0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME0_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME0_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME0_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME0_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME0_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME0_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME0_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME0_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME0_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME0_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME0_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME0_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME0_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME0_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME0_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME0_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME0_DME_MEMORY_CONTROL
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME0_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_dio_dig0_vpg_vpg_dispdec
+//VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG0_VPG_GENERIC_PACKET_DATA
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG0_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG0_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG0_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG0_VPG_GENERIC_STATUS
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG0_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG0_VPG_MEM_PWR
+#define VPG0_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG0_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG0_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG0_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG0_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG0_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG0_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG0_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG0_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG0_VPG_ISRC1_2_DATA
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG0_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG0_VPG_MPEG_INFO0
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG0_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG0_VPG_MPEG_INFO1
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG0_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_dio_dig1_dme_dme_dispdec
+//DME1_DME_CONTROL
+#define DME1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME1_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME1_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME1_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME1_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME1_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME1_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME1_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME1_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME1_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME1_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME1_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME1_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME1_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME1_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME1_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME1_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME1_DME_MEMORY_CONTROL
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME1_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_dio_dig1_vpg_vpg_dispdec
+//VPG1_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG1_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG1_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG1_VPG_GENERIC_PACKET_DATA
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG1_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG1_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG1_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG1_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG1_VPG_GENERIC_STATUS
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG1_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG1_VPG_MEM_PWR
+#define VPG1_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG1_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG1_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG1_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG1_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG1_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG1_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG1_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG1_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG1_VPG_ISRC1_2_DATA
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG1_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG1_VPG_MPEG_INFO0
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG1_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG1_VPG_MPEG_INFO1
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG1_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_dio_dig2_dme_dme_dispdec
+//DME2_DME_CONTROL
+#define DME2_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME2_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME2_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME2_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME2_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME2_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME2_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME2_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME2_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME2_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME2_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME2_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME2_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME2_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME2_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME2_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME2_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME2_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME2_DME_MEMORY_CONTROL
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME2_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_dio_dig2_vpg_vpg_dispdec
+//VPG2_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG2_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG2_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG2_VPG_GENERIC_PACKET_DATA
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG2_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG2_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG2_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG2_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG2_VPG_GENERIC_STATUS
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG2_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG2_VPG_MEM_PWR
+#define VPG2_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG2_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG2_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG2_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG2_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG2_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG2_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG2_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG2_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG2_VPG_ISRC1_2_DATA
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG2_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG2_VPG_MPEG_INFO0
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG2_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG2_VPG_MPEG_INFO1
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG2_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_dio_dig3_dme_dme_dispdec
+//DME3_DME_CONTROL
+#define DME3_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME3_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME3_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME3_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME3_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME3_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME3_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME3_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME3_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME3_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME3_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME3_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME3_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME3_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME3_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME3_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME3_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME3_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME3_DME_MEMORY_CONTROL
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME3_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_dio_dig3_vpg_vpg_dispdec
+//VPG3_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG3_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG3_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG3_VPG_GENERIC_PACKET_DATA
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG3_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG3_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG3_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG3_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG3_VPG_GENERIC_STATUS
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG3_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG3_VPG_MEM_PWR
+#define VPG3_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG3_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG3_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG3_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG3_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG3_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG3_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG3_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG3_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG3_VPG_ISRC1_2_DATA
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG3_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG3_VPG_MPEG_INFO0
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG3_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG3_VPG_MPEG_INFO1
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG3_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_dio_dig4_dme_dme_dispdec
+//DME4_DME_CONTROL
+#define DME4_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME4_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME4_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME4_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME4_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME4_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME4_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME4_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME4_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME4_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME4_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME4_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME4_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME4_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME4_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME4_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME4_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME4_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME4_DME_MEMORY_CONTROL
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME4_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_dio_dig4_vpg_vpg_dispdec
+//VPG4_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG4_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG4_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG4_VPG_GENERIC_PACKET_DATA
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG4_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG4_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG4_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG4_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG4_VPG_GENERIC_STATUS
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG4_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG4_VPG_MEM_PWR
+#define VPG4_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG4_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG4_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG4_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG4_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG4_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG4_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG4_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG4_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG4_VPG_ISRC1_2_DATA
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG4_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG4_VPG_MPEG_INFO0
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG4_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG4_VPG_MPEG_INFO1
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG4_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_dio_dp_aux0_dispdec
+//DP_AUX0_AUX_CONTROL
+#define DP_AUX0_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX0_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX0_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX0_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX0_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX0_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX0_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX0_AUX_SW_CONTROL
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX0_AUX_ARB_CONTROL
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX0_AUX_INTERRUPT_CONTROL
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX0_AUX_SW_STATUS
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX0_AUX_LS_STATUS
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX0_AUX_SW_DATA
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX0_AUX_LS_DATA
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX0_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX0_AUX_DPHY_TX_CONTROL
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX0_AUX_DPHY_RX_CONTROL0
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX0_AUX_DPHY_RX_CONTROL1
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX0_AUX_DPHY_TX_STATUS
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX0_AUX_DPHY_RX_STATUS
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX0_AUX_GTC_SYNC_CONTROL
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX0_AUX_GTC_SYNC_STATUS
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX0_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX0_AUX_PHY_WAKE_CNTL
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX0_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dp_aux1_dispdec
+//DP_AUX1_AUX_CONTROL
+#define DP_AUX1_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX1_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX1_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX1_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX1_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX1_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX1_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX1_AUX_SW_CONTROL
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX1_AUX_ARB_CONTROL
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX1_AUX_INTERRUPT_CONTROL
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX1_AUX_SW_STATUS
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX1_AUX_LS_STATUS
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX1_AUX_SW_DATA
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX1_AUX_LS_DATA
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX1_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX1_AUX_DPHY_TX_CONTROL
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX1_AUX_DPHY_RX_CONTROL0
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX1_AUX_DPHY_RX_CONTROL1
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX1_AUX_DPHY_TX_STATUS
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX1_AUX_DPHY_RX_STATUS
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX1_AUX_GTC_SYNC_CONTROL
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX1_AUX_GTC_SYNC_STATUS
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX1_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX1_AUX_PHY_WAKE_CNTL
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX1_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dp_aux2_dispdec
+//DP_AUX2_AUX_CONTROL
+#define DP_AUX2_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX2_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX2_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX2_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX2_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX2_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX2_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX2_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX2_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX2_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX2_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX2_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX2_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX2_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX2_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX2_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX2_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX2_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX2_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX2_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX2_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX2_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX2_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX2_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX2_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX2_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX2_AUX_SW_CONTROL
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX2_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX2_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX2_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX2_AUX_ARB_CONTROL
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX2_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX2_AUX_INTERRUPT_CONTROL
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX2_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX2_AUX_SW_STATUS
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX2_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX2_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX2_AUX_LS_STATUS
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX2_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX2_AUX_SW_DATA
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX2_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX2_AUX_LS_DATA
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX2_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX2_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX2_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX2_AUX_DPHY_TX_CONTROL
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX2_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX2_AUX_DPHY_RX_CONTROL0
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX2_AUX_DPHY_RX_CONTROL1
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX2_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX2_AUX_DPHY_TX_STATUS
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX2_AUX_DPHY_RX_STATUS
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX2_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX2_AUX_GTC_SYNC_CONTROL
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX2_AUX_GTC_SYNC_STATUS
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX2_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX2_AUX_PHY_WAKE_CNTL
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX2_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dp_aux3_dispdec
+//DP_AUX3_AUX_CONTROL
+#define DP_AUX3_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX3_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX3_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX3_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX3_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX3_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX3_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX3_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX3_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX3_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX3_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX3_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX3_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX3_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX3_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX3_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX3_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX3_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX3_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX3_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX3_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX3_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX3_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX3_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX3_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX3_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX3_AUX_SW_CONTROL
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX3_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX3_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX3_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX3_AUX_ARB_CONTROL
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX3_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX3_AUX_INTERRUPT_CONTROL
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX3_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX3_AUX_SW_STATUS
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX3_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX3_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX3_AUX_LS_STATUS
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX3_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX3_AUX_SW_DATA
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX3_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX3_AUX_LS_DATA
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX3_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX3_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX3_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX3_AUX_DPHY_TX_CONTROL
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX3_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX3_AUX_DPHY_RX_CONTROL0
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX3_AUX_DPHY_RX_CONTROL1
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX3_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX3_AUX_DPHY_TX_STATUS
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX3_AUX_DPHY_RX_STATUS
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX3_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX3_AUX_GTC_SYNC_CONTROL
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX3_AUX_GTC_SYNC_STATUS
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX3_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX3_AUX_PHY_WAKE_CNTL
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX3_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dp_aux4_dispdec
+//DP_AUX4_AUX_CONTROL
+#define DP_AUX4_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX4_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX4_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX4_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX4_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX4_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX4_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX4_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX4_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX4_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX4_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX4_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX4_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX4_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX4_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX4_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX4_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX4_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX4_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX4_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX4_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX4_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX4_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX4_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX4_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX4_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX4_AUX_SW_CONTROL
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX4_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX4_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX4_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX4_AUX_ARB_CONTROL
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX4_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX4_AUX_INTERRUPT_CONTROL
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x00000100L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x00000200L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x00000400L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x00001000L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x00002000L
+#define DP_AUX4_AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x00004000L
+//DP_AUX4_AUX_SW_STATUS
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX4_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX4_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX4_AUX_LS_STATUS
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX4_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX4_AUX_SW_DATA
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX4_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX4_AUX_LS_DATA
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX4_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX4_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX4_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX4_AUX_DPHY_TX_CONTROL
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX4_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX4_AUX_DPHY_RX_CONTROL0
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX4_AUX_DPHY_RX_CONTROL1
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX4_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX4_AUX_DPHY_TX_STATUS
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX4_AUX_DPHY_RX_STATUS
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX4_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+//DP_AUX4_AUX_GTC_SYNC_CONTROL
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN__SHIFT 0x4
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD__SHIFT 0xc
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD__SHIFT 0x10
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW__SHIFT 0x16
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT__SHIFT 0x18
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT__SHIFT 0x1c
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_EN_MASK 0x00000001L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_EN_MASK 0x00000010L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_IMPCAL_INTERVAL_MASK 0x00000F00L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_PERIOD_MASK 0x0000F000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_MAINT_PERIOD_MASK 0x00070000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_BLOCK_REQ_MASK 0x00100000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_INTERVAL_RESET_WINDOW_MASK 0x00C00000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_OFFSET_CALC_MAX_ATTEMPT_MASK 0x03000000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_MAX_ATTEMPT_MASK 0xF0000000L
+//DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x0000001FL
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x00001F00L
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x00030000L
+#define DP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x00300000L
+//DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x00000001L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x00000010L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x00000100L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x00001E00L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x00010000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x00100000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x00200000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x00400000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x00800000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x01000000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x02000000L
+#define DP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xF0000000L
+//DP_AUX4_AUX_GTC_SYNC_STATUS
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x00000001L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x00000002L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x00000080L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000L
+#define DP_AUX4_AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000L
+//DP_AUX4_AUX_PHY_WAKE_CNTL
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO__SHIFT 0x0
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING__SHIFT 0x1
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY__SHIFT 0x2
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK__SHIFT 0x3
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_GO_MASK 0x00000001L
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PENDING_MASK 0x00000002L
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_PRIORITY_MASK 0x00000004L
+#define DP_AUX4_AUX_PHY_WAKE_CNTL__DP_AUX_PHY_WAKE_ACK_MASK 0x00000008L
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux0_dispdec
+//DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__ENABLE__SHIFT 0x0
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__RESET__SHIFT 0x4
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT__SHIFT 0x8
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__ENABLE_MASK 0x00000001L
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__RESET_MASK 0x00000010L
+#define DIO_DPIA_MUX0_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT_MASK 0x00000F00L
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux1_dispdec
+//DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__ENABLE__SHIFT 0x0
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__RESET__SHIFT 0x4
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT__SHIFT 0x8
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__ENABLE_MASK 0x00000001L
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__RESET_MASK 0x00000010L
+#define DIO_DPIA_MUX1_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT_MASK 0x00000F00L
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux2_dispdec
+//DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__ENABLE__SHIFT 0x0
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__RESET__SHIFT 0x4
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT__SHIFT 0x8
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__ENABLE_MASK 0x00000001L
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__RESET_MASK 0x00000010L
+#define DIO_DPIA_MUX2_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT_MASK 0x00000F00L
+
+
+// addressBlock: dce_dc_dio_dio_dpia_mux3_dispdec
+//DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__ENABLE__SHIFT 0x0
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__RESET__SHIFT 0x4
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT__SHIFT 0x8
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__ENABLE_MASK 0x00000001L
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__RESET_MASK 0x00000010L
+#define DIO_DPIA_MUX3_DIO_DPIA_MUX_CONTROL__DIG_DP_SOURCE_SELECT_MASK 0x00000F00L
+
+
+// addressBlock: dce_dc_dio_dout_i2c_dispdec
+//DC_I2C_CONTROL
+#define DC_I2C_CONTROL__DC_I2C_GO__SHIFT 0x0
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET__SHIFT 0x1
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET__SHIFT 0x2
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET__SHIFT 0x3
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT__SHIFT 0x8
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT__SHIFT 0x14
+#define DC_I2C_CONTROL__DC_I2C_GO_MASK 0x00000001L
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET_MASK 0x00000002L
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET_MASK 0x00000004L
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET_MASK 0x00000008L
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT_MASK 0x00000700L
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT_MASK 0x00300000L
+//DC_I2C_ARBITRATION
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY__SHIFT 0x0
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO__SHIFT 0x4
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER__SHIFT 0x8
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER__SHIFT 0xc
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ__SHIFT 0x14
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG__SHIFT 0x15
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ__SHIFT 0x18
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG__SHIFT 0x19
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_MASK 0x00000003L
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO_MASK 0x00000010L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER_MASK 0x00000100L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER_MASK 0x00001000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ_MASK 0x00100000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG_MASK 0x00200000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ_MASK 0x01000000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG_MASK 0x02000000L
+//DC_I2C_INTERRUPT_CONTROL
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT__SHIFT 0x0
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK__SHIFT 0x1
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK__SHIFT 0x2
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT__SHIFT 0x4
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK__SHIFT 0x5
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK__SHIFT 0x6
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT__SHIFT 0x8
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK__SHIFT 0x9
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK__SHIFT 0xa
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT__SHIFT 0xc
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK__SHIFT 0xd
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK__SHIFT 0xe
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT__SHIFT 0x10
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK__SHIFT 0x11
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK__SHIFT 0x12
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT__SHIFT 0x14
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK__SHIFT 0x15
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK__SHIFT 0x16
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT__SHIFT 0x18
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK__SHIFT 0x19
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK__SHIFT 0x1a
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT__SHIFT 0x1b
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK__SHIFT 0x1c
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK__SHIFT 0x1d
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT_MASK 0x00000001L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK_MASK 0x00000002L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK_MASK 0x00000004L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT_MASK 0x00000010L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK_MASK 0x00000020L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK_MASK 0x00000040L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT_MASK 0x00000100L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK_MASK 0x00000200L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK_MASK 0x00000400L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT_MASK 0x00001000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK_MASK 0x00002000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK_MASK 0x00004000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT_MASK 0x00010000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK_MASK 0x00020000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK_MASK 0x00040000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT_MASK 0x00100000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK_MASK 0x00200000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK_MASK 0x00400000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT_MASK 0x01000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK_MASK 0x02000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK_MASK 0x04000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT_MASK 0x08000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK_MASK 0x10000000L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK_MASK 0x20000000L
+//DC_I2C_SW_STATUS
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS__SHIFT 0x0
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE__SHIFT 0x2
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED__SHIFT 0x4
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT__SHIFT 0x5
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED__SHIFT 0x6
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW__SHIFT 0x7
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK__SHIFT 0x8
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0__SHIFT 0xc
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1__SHIFT 0xd
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2__SHIFT 0xe
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3__SHIFT 0xf
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ__SHIFT 0x12
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS_MASK 0x00000003L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK 0x00000004L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK 0x00000010L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK 0x00000020L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED_MASK 0x00000040L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW_MASK 0x00000080L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK 0x00000100L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0_MASK 0x00001000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1_MASK 0x00002000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2_MASK 0x00004000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3_MASK 0x00008000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ_MASK 0x00040000L
+//DC_I2C_DDC1_HW_STATUS
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC2_HW_STATUS
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC3_HW_STATUS
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC4_HW_STATUS
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC5_HW_STATUS
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC1_SPEED
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC1_SETUP
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC2_SPEED
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC2_SETUP
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC3_SPEED
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC3_SETUP
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC4_SPEED
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC4_SETUP
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC5_SPEED
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC5_SETUP
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_EN__SHIFT 0x3
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_EN_MASK 0x00000008L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_TRANSACTION0
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0__SHIFT 0x0
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0__SHIFT 0x8
+#define DC_I2C_TRANSACTION0__DC_I2C_START0__SHIFT 0xc
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0__SHIFT 0xd
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0__SHIFT 0x10
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0_MASK 0x00000001L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0_MASK 0x00000100L
+#define DC_I2C_TRANSACTION0__DC_I2C_START0_MASK 0x00001000L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0_MASK 0x00002000L
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION1
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1__SHIFT 0x0
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1__SHIFT 0x8
+#define DC_I2C_TRANSACTION1__DC_I2C_START1__SHIFT 0xc
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1__SHIFT 0xd
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1__SHIFT 0x10
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1_MASK 0x00000001L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1_MASK 0x00000100L
+#define DC_I2C_TRANSACTION1__DC_I2C_START1_MASK 0x00001000L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1_MASK 0x00002000L
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION2
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2__SHIFT 0x0
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2__SHIFT 0x8
+#define DC_I2C_TRANSACTION2__DC_I2C_START2__SHIFT 0xc
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2__SHIFT 0xd
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2__SHIFT 0x10
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2_MASK 0x00000001L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2_MASK 0x00000100L
+#define DC_I2C_TRANSACTION2__DC_I2C_START2_MASK 0x00001000L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2_MASK 0x00002000L
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION3
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3__SHIFT 0x0
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3__SHIFT 0x8
+#define DC_I2C_TRANSACTION3__DC_I2C_START3__SHIFT 0xc
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3__SHIFT 0xd
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3__SHIFT 0x10
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3_MASK 0x00000001L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3_MASK 0x00000100L
+#define DC_I2C_TRANSACTION3__DC_I2C_START3_MASK 0x00001000L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3_MASK 0x00002000L
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3_MASK 0x03FF0000L
+//DC_I2C_DATA
+#define DC_I2C_DATA__DC_I2C_DATA_RW__SHIFT 0x0
+#define DC_I2C_DATA__DC_I2C_DATA__SHIFT 0x8
+#define DC_I2C_DATA__DC_I2C_INDEX__SHIFT 0x10
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE__SHIFT 0x1f
+#define DC_I2C_DATA__DC_I2C_DATA_RW_MASK 0x00000001L
+#define DC_I2C_DATA__DC_I2C_DATA_MASK 0x0000FF00L
+#define DC_I2C_DATA__DC_I2C_INDEX_MASK 0x03FF0000L
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE_MASK 0x80000000L
+//DC_I2C_EDID_DETECT_CTRL
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME__SHIFT 0x0
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID__SHIFT 0x14
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET__SHIFT 0x1c
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME_MASK 0x0000FFFFL
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID_MASK 0x00F00000L
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET_MASK 0x10000000L
+//DC_I2C_READ_REQUEST_INTERRUPT
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_OCCURRED__SHIFT 0x0
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_INT__SHIFT 0x1
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_ACK__SHIFT 0x2
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_MASK__SHIFT 0x3
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_OCCURRED__SHIFT 0x4
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_INT__SHIFT 0x5
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_ACK__SHIFT 0x6
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_MASK__SHIFT 0x7
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_OCCURRED__SHIFT 0x8
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_INT__SHIFT 0x9
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_ACK__SHIFT 0xa
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_MASK__SHIFT 0xb
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_OCCURRED__SHIFT 0xc
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_INT__SHIFT 0xd
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_ACK__SHIFT 0xe
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_MASK__SHIFT 0xf
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_OCCURRED__SHIFT 0x10
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_INT__SHIFT 0x11
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_ACK__SHIFT 0x12
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_MASK__SHIFT 0x13
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_OCCURRED__SHIFT 0x14
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_INT__SHIFT 0x15
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_ACK__SHIFT 0x16
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_MASK__SHIFT 0x17
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_OCCURRED__SHIFT 0x18
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_INT__SHIFT 0x19
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_ACK__SHIFT 0x1a
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_MASK__SHIFT 0x1b
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_ACK_ENABLE__SHIFT 0x1e
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_INT_TYPE__SHIFT 0x1f
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_OCCURRED_MASK 0x00000001L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_INT_MASK 0x00000002L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_ACK_MASK 0x00000004L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_MASK_MASK 0x00000008L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_OCCURRED_MASK 0x00000010L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_INT_MASK 0x00000020L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_ACK_MASK 0x00000040L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_MASK_MASK 0x00000080L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_OCCURRED_MASK 0x00000100L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_INT_MASK 0x00000200L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_ACK_MASK 0x00000400L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_MASK_MASK 0x00000800L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_OCCURRED_MASK 0x00001000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_INT_MASK 0x00002000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_ACK_MASK 0x00004000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_MASK_MASK 0x00008000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_OCCURRED_MASK 0x00010000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_INT_MASK 0x00020000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_ACK_MASK 0x00040000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_MASK_MASK 0x00080000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_OCCURRED_MASK 0x00100000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_INT_MASK 0x00200000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_ACK_MASK 0x00400000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_MASK_MASK 0x00800000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_OCCURRED_MASK 0x01000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_INT_MASK 0x02000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_ACK_MASK 0x04000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_MASK_MASK 0x08000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_ACK_ENABLE_MASK 0x40000000L
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_INT_TYPE_MASK 0x80000000L
+
+
+// addressBlock: dce_dc_dio_dio_misc_dispdec
+//DIO_DCN_STATUS
+#define DIO_DCN_STATUS__DCN_ACTIVE__SHIFT 0x0
+#define DIO_DCN_STATUS__DCN_ACTIVE_MASK 0x00000001L
+//DIO_SCRATCH0
+#define DIO_SCRATCH0__DIO_SCRATCH0__SHIFT 0x0
+#define DIO_SCRATCH0__DIO_SCRATCH0_MASK 0xFFFFFFFFL
+//DIO_SCRATCH1
+#define DIO_SCRATCH1__DIO_SCRATCH1__SHIFT 0x0
+#define DIO_SCRATCH1__DIO_SCRATCH1_MASK 0xFFFFFFFFL
+//DIO_SCRATCH2
+#define DIO_SCRATCH2__DIO_SCRATCH2__SHIFT 0x0
+#define DIO_SCRATCH2__DIO_SCRATCH2_MASK 0xFFFFFFFFL
+//DIO_SCRATCH3
+#define DIO_SCRATCH3__DIO_SCRATCH3__SHIFT 0x0
+#define DIO_SCRATCH3__DIO_SCRATCH3_MASK 0xFFFFFFFFL
+//DIO_SCRATCH4
+#define DIO_SCRATCH4__DIO_SCRATCH4__SHIFT 0x0
+#define DIO_SCRATCH4__DIO_SCRATCH4_MASK 0xFFFFFFFFL
+//DIO_SCRATCH5
+#define DIO_SCRATCH5__DIO_SCRATCH5__SHIFT 0x0
+#define DIO_SCRATCH5__DIO_SCRATCH5_MASK 0xFFFFFFFFL
+//DIO_SCRATCH6
+#define DIO_SCRATCH6__DIO_SCRATCH6__SHIFT 0x0
+#define DIO_SCRATCH6__DIO_SCRATCH6_MASK 0xFFFFFFFFL
+//DIO_SCRATCH7
+#define DIO_SCRATCH7__DIO_SCRATCH7__SHIFT 0x0
+#define DIO_SCRATCH7__DIO_SCRATCH7_MASK 0xFFFFFFFFL
+//DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGA_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x0
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGB_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x1
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGC_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x2
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGD_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x3
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGE_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x4
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGF_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x5
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGG_DP_ALPM_WAKEUP_INTERRUPT_STATUS__SHIFT 0x6
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGA_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000001L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGB_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000002L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGC_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000004L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGD_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000008L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGE_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000010L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGF_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000020L
+#define DIO_DP_ALPM_WAKEUP_INTERRUPT_STATUS__DIGG_DP_ALPM_WAKEUP_INTERRUPT_STATUS_MASK 0x00000040L
+//DIO_MEM_PWR_STATUS
+#define DIO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE__SHIFT 0x0
+#define DIO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE__SHIFT 0x3
+#define DIO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE__SHIFT 0x4
+#define DIO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE__SHIFT 0x5
+#define DIO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE__SHIFT 0x6
+#define DIO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE__SHIFT 0x7
+#define DIO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE__SHIFT 0x8
+#define DIO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE__SHIFT 0x9
+#define DIO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE_MASK 0x00000001L
+#define DIO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE_MASK 0x00000008L
+#define DIO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE_MASK 0x00000010L
+#define DIO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE_MASK 0x00000020L
+#define DIO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE_MASK 0x00000040L
+#define DIO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE_MASK 0x00000080L
+#define DIO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE_MASK 0x00000100L
+#define DIO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE_MASK 0x00000200L
+//DIO_MEM_PWR_CTRL
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE__SHIFT 0x0
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS__SHIFT 0x1
+#define DIO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS__SHIFT 0x4
+#define DIO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS__SHIFT 0x5
+#define DIO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS__SHIFT 0x6
+#define DIO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS__SHIFT 0x7
+#define DIO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS__SHIFT 0x8
+#define DIO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS__SHIFT 0x9
+#define DIO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS__SHIFT 0xa
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE_MASK 0x00000001L
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS_MASK 0x00000002L
+#define DIO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS_MASK 0x00000010L
+#define DIO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS_MASK 0x00000020L
+#define DIO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS_MASK 0x00000040L
+#define DIO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS_MASK 0x00000080L
+#define DIO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS_MASK 0x00000100L
+#define DIO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS_MASK 0x00000200L
+#define DIO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS_MASK 0x00000400L
+//DIO_MEM_PWR_CTRL2
+#define DIO_MEM_PWR_CTRL2__DPA_LIGHT_SLEEP_FORCE__SHIFT 0x18
+#define DIO_MEM_PWR_CTRL2__DPB_LIGHT_SLEEP_FORCE__SHIFT 0x19
+#define DIO_MEM_PWR_CTRL2__DPC_LIGHT_SLEEP_FORCE__SHIFT 0x1a
+#define DIO_MEM_PWR_CTRL2__DPD_LIGHT_SLEEP_FORCE__SHIFT 0x1b
+#define DIO_MEM_PWR_CTRL2__DPE_LIGHT_SLEEP_FORCE__SHIFT 0x1c
+#define DIO_MEM_PWR_CTRL2__DPF_LIGHT_SLEEP_FORCE__SHIFT 0x1d
+#define DIO_MEM_PWR_CTRL2__DPG_LIGHT_SLEEP_FORCE__SHIFT 0x1e
+#define DIO_MEM_PWR_CTRL2__DPA_LIGHT_SLEEP_FORCE_MASK 0x01000000L
+#define DIO_MEM_PWR_CTRL2__DPB_LIGHT_SLEEP_FORCE_MASK 0x02000000L
+#define DIO_MEM_PWR_CTRL2__DPC_LIGHT_SLEEP_FORCE_MASK 0x04000000L
+#define DIO_MEM_PWR_CTRL2__DPD_LIGHT_SLEEP_FORCE_MASK 0x08000000L
+#define DIO_MEM_PWR_CTRL2__DPE_LIGHT_SLEEP_FORCE_MASK 0x10000000L
+#define DIO_MEM_PWR_CTRL2__DPF_LIGHT_SLEEP_FORCE_MASK 0x20000000L
+#define DIO_MEM_PWR_CTRL2__DPG_LIGHT_SLEEP_FORCE_MASK 0x40000000L
+//DIO_CLK_CNTL
+#define DIO_CLK_CNTL__DIO_TEST_CLK_SEL__SHIFT 0x0
+#define DIO_CLK_CNTL__DISPCLK_R_GATE_DIS__SHIFT 0x9
+#define DIO_CLK_CNTL__DISPCLK_G_GATE_DIS__SHIFT 0xa
+#define DIO_CLK_CNTL__REFCLK_R_GATE_DIS__SHIFT 0xb
+#define DIO_CLK_CNTL__REFCLK_G_GATE_DIS__SHIFT 0xc
+#define DIO_CLK_CNTL__SOCCLK_G_GATE_DIS__SHIFT 0xd
+#define DIO_CLK_CNTL__SYMCLK_FE_R_GATE_DIS__SHIFT 0xe
+#define DIO_CLK_CNTL__SYMCLK_FE_G_GATE_DIS__SHIFT 0xf
+#define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS__SHIFT 0x10
+#define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS__SHIFT 0x11
+#define DIO_CLK_CNTL__DIO_FGCG_REP_DIS__SHIFT 0x14
+#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS__SHIFT 0x15
+#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS__SHIFT 0x16
+#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS__SHIFT 0x17
+#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS__SHIFT 0x18
+#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS__SHIFT 0x19
+#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS__SHIFT 0x1a
+#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS__SHIFT 0x1b
+#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS__SHIFT 0x1c
+#define DIO_CLK_CNTL__DIO_TEST_CLK_SEL_MASK 0x0000007FL
+#define DIO_CLK_CNTL__DISPCLK_R_GATE_DIS_MASK 0x00000200L
+#define DIO_CLK_CNTL__DISPCLK_G_GATE_DIS_MASK 0x00000400L
+#define DIO_CLK_CNTL__REFCLK_R_GATE_DIS_MASK 0x00000800L
+#define DIO_CLK_CNTL__REFCLK_G_GATE_DIS_MASK 0x00001000L
+#define DIO_CLK_CNTL__SOCCLK_G_GATE_DIS_MASK 0x00002000L
+#define DIO_CLK_CNTL__SYMCLK_FE_R_GATE_DIS_MASK 0x00004000L
+#define DIO_CLK_CNTL__SYMCLK_FE_G_GATE_DIS_MASK 0x00008000L
+#define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS_MASK 0x00010000L
+#define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS_MASK 0x00020000L
+#define DIO_CLK_CNTL__DIO_FGCG_REP_DIS_MASK 0x00100000L
+#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS_MASK 0x00200000L
+#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS_MASK 0x00400000L
+#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS_MASK 0x00800000L
+#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS_MASK 0x01000000L
+#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS_MASK 0x02000000L
+#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS_MASK 0x04000000L
+#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS_MASK 0x08000000L
+#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS_MASK 0x10000000L
+//DIO_POWER_MANAGEMENT_CNTL
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET__SHIFT 0x0
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF__SHIFT 0x8
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET_MASK 0x00000001L
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF_MASK 0x00000100L
+//DIO_HDMI_RXSTATUS_TIMER_CONTROL
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE__SHIFT 0x0
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE__SHIFT 0x4
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS__SHIFT 0x8
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK__SHIFT 0xc
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL__SHIFT 0x10
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE_MASK 0x00000001L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE_MASK 0x00000010L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS_MASK 0x00000100L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK_MASK 0x00001000L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL_MASK 0x0FFF0000L
+//DIO_PSP_INTERRUPT_STATUS
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS__SHIFT 0x0
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_MESSAGE__SHIFT 0x1
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS_MASK 0x00000001L
+#define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_MESSAGE_MASK 0xFFFFFFFEL
+//DIO_PSP_INTERRUPT_CLEAR
+#define DIO_PSP_INTERRUPT_CLEAR__DIO_PSP_INTERRUPT_CLEAR__SHIFT 0x0
+#define DIO_PSP_INTERRUPT_CLEAR__DIO_PSP_INTERRUPT_CLEAR_MASK 0x00000001L
+//DIO_STATUS
+#define DIO_STATUS__DIO_EN__SHIFT 0x0
+#define DIO_STATUS__DIO_EN_MASK 0x00000001L
+//DIO_LINKA_CNTL
+#define DIO_LINKA_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKA_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKA_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKA_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKA_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKA_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+//DIO_LINKB_CNTL
+#define DIO_LINKB_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKB_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKB_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKB_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKB_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKB_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+//DIO_LINKC_CNTL
+#define DIO_LINKC_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKC_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKC_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKC_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKC_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKC_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+//DIO_LINKD_CNTL
+#define DIO_LINKD_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKD_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKD_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKD_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKD_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKD_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+//DIO_LINKE_CNTL
+#define DIO_LINKE_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKE_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKE_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKE_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKE_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKE_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+//DIO_LINKF_CNTL
+#define DIO_LINKF_CNTL__ENC_TYPE_SEL__SHIFT 0x0
+#define DIO_LINKF_CNTL__HPO_HDMI_ENC_SEL__SHIFT 0x4
+#define DIO_LINKF_CNTL__HPO_DP_ENC_SEL__SHIFT 0x8
+#define DIO_LINKF_CNTL__ENC_TYPE_SEL_MASK 0x00000003L
+#define DIO_LINKF_CNTL__HPO_HDMI_ENC_SEL_MASK 0x00000070L
+#define DIO_LINKF_CNTL__HPO_DP_ENC_SEL_MASK 0x00000700L
+
+
+// addressBlock: dce_dc_dio_dig_stream_mapper_dispdec
+//DIG0_STREAM_MAPPER_CONTROL
+#define DIG0_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG0_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+//DIG1_STREAM_MAPPER_CONTROL
+#define DIG1_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG1_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+//DIG2_STREAM_MAPPER_CONTROL
+#define DIG2_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG2_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+//DIG3_STREAM_MAPPER_CONTROL
+#define DIG3_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG3_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+//DIG4_STREAM_MAPPER_CONTROL
+#define DIG4_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET__SHIFT 0x0
+#define DIG4_STREAM_MAPPER_CONTROL__DIG_STREAM_LINK_TARGET_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_dio_dio_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON18_PERFCOUNTER_CNTL
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON18_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON18_PERFCOUNTER_CNTL2
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON18_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON18_PERFCOUNTER_STATE
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON18_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON18_PERFMON_CNTL
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON18_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON18_PERFMON_CNTL2
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON18_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON18_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON18_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON18_PERFMON_CVALUE_LOW
+#define DC_PERFMON18_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON18_PERFMON_HI
+#define DC_PERFMON18_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON18_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON18_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON18_PERFMON_LOW
+#define DC_PERFMON18_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON18_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcio_dcio_dispdec
+//DC_GENERICA
+#define DC_GENERICA__GENERICA_EN__SHIFT 0x0
+#define DC_GENERICA__GENERICA_SEL__SHIFT 0x7
+#define DC_GENERICA__GENERICA_EN_MASK 0x00000001L
+#define DC_GENERICA__GENERICA_SEL_MASK 0x00000F80L
+//DC_GENERICB
+#define DC_GENERICB__GENERICB_EN__SHIFT 0x0
+#define DC_GENERICB__GENERICB_SEL__SHIFT 0x8
+#define DC_GENERICB__GENERICB_EN_MASK 0x00000001L
+#define DC_GENERICB__GENERICB_SEL_MASK 0x00000F00L
+//DCIO_CLOCK_CNTL
+#define DCIO_CLOCK_CNTL__DCIO_TEST_CLK_SEL__SHIFT 0x0
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS__SHIFT 0x5
+#define DCIO_CLOCK_CNTL__DCIO_TEST_CLK_SEL_MASK 0x0000001FL
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS_MASK 0x00000020L
+//DC_REF_CLK_CNTL
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL__SHIFT 0x8
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL_MASK 0x00000300L
+//UNIPHYA_LINK_CNTL
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+//UNIPHYA_CHANNEL_XBAR_CNTL
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+//UNIPHYB_LINK_CNTL
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+//UNIPHYB_CHANNEL_XBAR_CNTL
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+//UNIPHYC_LINK_CNTL
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+//UNIPHYC_CHANNEL_XBAR_CNTL
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYC_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+//UNIPHYD_CHANNEL_XBAR_CNTL
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYD_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+//UNIPHYE_CHANNEL_XBAR_CNTL
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN__SHIFT 0x1c
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN__SHIFT 0x1d
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN__SHIFT 0x1e
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN__SHIFT 0x1f
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL0_EN_MASK 0x10000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL1_EN_MASK 0x20000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL2_EN_MASK 0x40000000L
+#define UNIPHYE_CHANNEL_XBAR_CNTL__DOUT_PHY_CHANNEL3_EN_MASK 0x80000000L
+//DCIO_WRCMD_DELAY
+#define DCIO_WRCMD_DELAY__UNIPHY_DELAY__SHIFT 0x18
+#define DCIO_WRCMD_DELAY__UNIPHY_DELAY_MASK 0xFF000000L
+//DC_PINSTRAPS
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD__SHIFT 0xd
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS__SHIFT 0x10
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD_MASK 0x00002000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000C000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS_MASK 0x00010000L
+//DCIO_SPARE
+#define DCIO_SPARE__DCIO_SPARE__SHIFT 0x0
+#define DCIO_SPARE__DCIO_SPARE_MASK 0xFFFFFFFFL
+//INTERCEPT_STATE
+#define INTERCEPT_STATE__PWRSEQ0_INTERCEPTB_STATE__SHIFT 0x0
+#define INTERCEPT_STATE__PWRSEQ1_INTERCEPTB_STATE__SHIFT 0x1
+#define INTERCEPT_STATE__DLPC_INTERCEPTB_STATE__SHIFT 0x2
+#define INTERCEPT_STATE__DPCS0_INTERCEPTB_STATE__SHIFT 0x4
+#define INTERCEPT_STATE__DPCS1_INTERCEPTB_STATE__SHIFT 0x5
+#define INTERCEPT_STATE__DPCS2_INTERCEPTB_STATE__SHIFT 0x6
+#define INTERCEPT_STATE__DPCS3_INTERCEPTB_STATE__SHIFT 0x7
+#define INTERCEPT_STATE__DPCS4_INTERCEPTB_STATE__SHIFT 0x8
+#define INTERCEPT_STATE__DPCS5_INTERCEPTB_STATE__SHIFT 0x9
+#define INTERCEPT_STATE__DPCS6_INTERCEPTB_STATE__SHIFT 0xa
+#define INTERCEPT_STATE__PWRSEQ0_INTERCEPTB_STATE_MASK 0x00000001L
+#define INTERCEPT_STATE__PWRSEQ1_INTERCEPTB_STATE_MASK 0x00000002L
+#define INTERCEPT_STATE__DLPC_INTERCEPTB_STATE_MASK 0x00000004L
+#define INTERCEPT_STATE__DPCS0_INTERCEPTB_STATE_MASK 0x00000010L
+#define INTERCEPT_STATE__DPCS1_INTERCEPTB_STATE_MASK 0x00000020L
+#define INTERCEPT_STATE__DPCS2_INTERCEPTB_STATE_MASK 0x00000040L
+#define INTERCEPT_STATE__DPCS3_INTERCEPTB_STATE_MASK 0x00000080L
+#define INTERCEPT_STATE__DPCS4_INTERCEPTB_STATE_MASK 0x00000100L
+#define INTERCEPT_STATE__DPCS5_INTERCEPTB_STATE_MASK 0x00000200L
+#define INTERCEPT_STATE__DPCS6_INTERCEPTB_STATE_MASK 0x00000400L
+//DCIO_PATTERN_GEN_PAT
+#define DCIO_PATTERN_GEN_PAT__DCIO_PATTERN_GEN_PAT__SHIFT 0x0
+#define DCIO_PATTERN_GEN_PAT__DCIO_PATTERN_GEN_PAT_MASK 0xFFFFFFFFL
+//DCIO_PATTERN_GEN_EN
+#define DCIO_PATTERN_GEN_EN__DCIO_PATTERN_GEN_EN__SHIFT 0x0
+#define DCIO_PATTERN_GEN_EN__DCIO_PATTERN_GEN_EN_MASK 0x00000001L
+//DCIO_BL_PWM_FRAME_START_DISP_SEL
+#define DCIO_BL_PWM_FRAME_START_DISP_SEL__BL_PWM0_GRP1_FRAME_START_DISP_SEL__SHIFT 0x0
+#define DCIO_BL_PWM_FRAME_START_DISP_SEL__BL_PWM1_GRP1_FRAME_START_DISP_SEL__SHIFT 0x4
+#define DCIO_BL_PWM_FRAME_START_DISP_SEL__BL_PWM0_GRP1_FRAME_START_DISP_SEL_MASK 0x00000007L
+#define DCIO_BL_PWM_FRAME_START_DISP_SEL__BL_PWM1_GRP1_FRAME_START_DISP_SEL_MASK 0x00000070L
+//DCIO_GSL_GENLK_PAD_CNTL
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_READY_SEL__SHIFT 0x4
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK__SHIFT 0x8
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_READY_SEL__SHIFT 0x14
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK__SHIFT 0x18
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_READY_SEL_MASK 0x00000030L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK_MASK 0x00000300L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_READY_SEL_MASK 0x00300000L
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK_MASK 0x03000000L
+//DCIO_GSL_SWAPLOCK_PAD_CNTL
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_READY_SEL__SHIFT 0x4
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK__SHIFT 0x8
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_READY_SEL__SHIFT 0x14
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK__SHIFT 0x18
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_READY_SEL_MASK 0x00000030L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK_MASK 0x00000300L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_READY_SEL_MASK 0x00300000L
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK_MASK 0x03000000L
+//DCIO_SOFT_RESET
+#define DCIO_SOFT_RESET__UNIPHYA_SOFT_RESET__SHIFT 0x0
+#define DCIO_SOFT_RESET__UNIPHYB_SOFT_RESET__SHIFT 0x1
+#define DCIO_SOFT_RESET__UNIPHYC_SOFT_RESET__SHIFT 0x2
+#define DCIO_SOFT_RESET__UNIPHYD_SOFT_RESET__SHIFT 0x3
+#define DCIO_SOFT_RESET__UNIPHYE_SOFT_RESET__SHIFT 0x4
+#define DCIO_SOFT_RESET__UNIPHYF_SOFT_RESET__SHIFT 0x5
+#define DCIO_SOFT_RESET__UNIPHYG_SOFT_RESET__SHIFT 0x6
+#define DCIO_SOFT_RESET__DSYNCA_SOFT_RESET__SHIFT 0x8
+#define DCIO_SOFT_RESET__DSYNCB_SOFT_RESET__SHIFT 0x9
+#define DCIO_SOFT_RESET__DSYNCC_SOFT_RESET__SHIFT 0xa
+#define DCIO_SOFT_RESET__DSYNCD_SOFT_RESET__SHIFT 0xb
+#define DCIO_SOFT_RESET__DSYNCE_SOFT_RESET__SHIFT 0xc
+#define DCIO_SOFT_RESET__DSYNCF_SOFT_RESET__SHIFT 0xd
+#define DCIO_SOFT_RESET__DSYNCG_SOFT_RESET__SHIFT 0xe
+#define DCIO_SOFT_RESET__PWRSEQ0_SOFT_RESET__SHIFT 0x10
+#define DCIO_SOFT_RESET__PWRSEQ1_SOFT_RESET__SHIFT 0x11
+#define DCIO_SOFT_RESET__DLPC_SOFT_RESET__SHIFT 0x14
+#define DCIO_SOFT_RESET__UNIPHYA_SOFT_RESET_MASK 0x00000001L
+#define DCIO_SOFT_RESET__UNIPHYB_SOFT_RESET_MASK 0x00000002L
+#define DCIO_SOFT_RESET__UNIPHYC_SOFT_RESET_MASK 0x00000004L
+#define DCIO_SOFT_RESET__UNIPHYD_SOFT_RESET_MASK 0x00000008L
+#define DCIO_SOFT_RESET__UNIPHYE_SOFT_RESET_MASK 0x00000010L
+#define DCIO_SOFT_RESET__UNIPHYF_SOFT_RESET_MASK 0x00000020L
+#define DCIO_SOFT_RESET__UNIPHYG_SOFT_RESET_MASK 0x00000040L
+#define DCIO_SOFT_RESET__DSYNCA_SOFT_RESET_MASK 0x00000100L
+#define DCIO_SOFT_RESET__DSYNCB_SOFT_RESET_MASK 0x00000200L
+#define DCIO_SOFT_RESET__DSYNCC_SOFT_RESET_MASK 0x00000400L
+#define DCIO_SOFT_RESET__DSYNCD_SOFT_RESET_MASK 0x00000800L
+#define DCIO_SOFT_RESET__DSYNCE_SOFT_RESET_MASK 0x00001000L
+#define DCIO_SOFT_RESET__DSYNCF_SOFT_RESET_MASK 0x00002000L
+#define DCIO_SOFT_RESET__DSYNCG_SOFT_RESET_MASK 0x00004000L
+#define DCIO_SOFT_RESET__PWRSEQ0_SOFT_RESET_MASK 0x00010000L
+#define DCIO_SOFT_RESET__PWRSEQ1_SOFT_RESET_MASK 0x00020000L
+#define DCIO_SOFT_RESET__DLPC_SOFT_RESET_MASK 0x00100000L
+
+
+// addressBlock: dce_dc_dcio_dcio_chip_dispdec
+//DC_GPIO_GENERIC_MASK
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK__SHIFT 0x0
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS__SHIFT 0x1
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV__SHIFT 0x2
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK__SHIFT 0x4
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS__SHIFT 0x5
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV__SHIFT 0x6
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK__SHIFT 0x8
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS__SHIFT 0x9
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV__SHIFT 0xa
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK__SHIFT 0xc
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS__SHIFT 0xd
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV__SHIFT 0xe
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK__SHIFT 0x10
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS__SHIFT 0x11
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV__SHIFT 0x12
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK__SHIFT 0x14
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS__SHIFT 0x15
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV__SHIFT 0x16
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK__SHIFT 0x18
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS__SHIFT 0x19
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV__SHIFT 0x1a
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_STRENGTH_SN__SHIFT 0x1c
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK_MASK 0x00000001L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV_MASK 0x0000000CL
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK_MASK 0x00000010L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS_MASK 0x00000020L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV_MASK 0x000000C0L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK_MASK 0x00000100L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV_MASK 0x00000C00L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK_MASK 0x00001000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS_MASK 0x00002000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV_MASK 0x0000C000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK_MASK 0x00010000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV_MASK 0x000C0000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK_MASK 0x00100000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV_MASK 0x00C00000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK_MASK 0x01000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV_MASK 0x0C000000L
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_STRENGTH_SN_MASK 0xF0000000L
+//DC_GPIO_GENERIC_A
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A__SHIFT 0x0
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A__SHIFT 0x8
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A__SHIFT 0x10
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A__SHIFT 0x14
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A__SHIFT 0x15
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A__SHIFT 0x16
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A__SHIFT 0x17
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK 0x00000001L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK 0x00000100L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK 0x00010000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK 0x00100000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK 0x00200000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK 0x00400000L
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK 0x00800000L
+//DC_GPIO_GENERIC_EN
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN__SHIFT 0x0
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN__SHIFT 0x8
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN__SHIFT 0x10
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN__SHIFT 0x14
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN__SHIFT 0x15
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN__SHIFT 0x16
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN__SHIFT 0x17
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN_MASK 0x00000001L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN_MASK 0x00000100L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN_MASK 0x00010000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN_MASK 0x00100000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN_MASK 0x00200000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN_MASK 0x00400000L
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN_MASK 0x00800000L
+//DC_GPIO_GENERIC_Y
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y__SHIFT 0x0
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y__SHIFT 0x8
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y__SHIFT 0x10
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y__SHIFT 0x14
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y__SHIFT 0x15
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y__SHIFT 0x16
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y__SHIFT 0x17
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y_MASK 0x00000001L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y_MASK 0x00000100L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y_MASK 0x00010000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y_MASK 0x00100000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y_MASK 0x00200000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y_MASK 0x00400000L
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y_MASK 0x00800000L
+//DC_GPIO_DDC1_MASK
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE__SHIFT 0x10
+#define DC_GPIO_DDC1_MASK__AUX1_POL__SHIFT 0x14
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC1_MASK__AUX1_POL_MASK 0x00100000L
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC1_A
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC1_EN
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC1_Y
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC2_MASK
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE__SHIFT 0x10
+#define DC_GPIO_DDC2_MASK__AUX2_POL__SHIFT 0x14
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC2_MASK__AUX2_POL_MASK 0x00100000L
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC2_A
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC2_EN
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC2_Y
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC3_MASK
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE__SHIFT 0x10
+#define DC_GPIO_DDC3_MASK__AUX3_POL__SHIFT 0x14
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC3_MASK__AUX3_POL_MASK 0x00100000L
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC3_A
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC3_EN
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC3_Y
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC4_MASK
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE__SHIFT 0x10
+#define DC_GPIO_DDC4_MASK__AUX4_POL__SHIFT 0x14
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC4_MASK__AUX4_POL_MASK 0x00100000L
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC4_A
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC4_EN
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC4_Y
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC5_MASK
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE__SHIFT 0x10
+#define DC_GPIO_DDC5_MASK__AUX5_POL__SHIFT 0x14
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC5_MASK__AUX5_POL_MASK 0x00100000L
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC5_A
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC5_EN
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC5_Y
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDCVGA_MASK
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDCVGA_MASK__DDCVGA_INVERT_INPUT_POLARITY__SHIFT 0x4
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE__SHIFT 0x10
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL__SHIFT 0x14
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR__SHIFT 0x18
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_MASK__DDCVGA_INVERT_INPUT_POLARITY_MASK 0x00000010L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE_MASK 0x00010000L
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL_MASK 0x00100000L
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDCVGA_A
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A__SHIFT 0x0
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A__SHIFT 0x8
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A_MASK 0x00000100L
+//DC_GPIO_DDCVGA_EN
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN__SHIFT 0x0
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN__SHIFT 0x8
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN_MASK 0x00000100L
+//DC_GPIO_DDCVGA_Y
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y__SHIFT 0x0
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y__SHIFT 0x8
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y_MASK 0x00000100L
+//DC_GPIO_GENLK_MASK
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK__SHIFT 0x0
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS__SHIFT 0x1
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN__SHIFT 0x3
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV__SHIFT 0x4
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK__SHIFT 0x8
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS__SHIFT 0x9
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN__SHIFT 0xb
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV__SHIFT 0xc
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK__SHIFT 0x10
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS__SHIFT 0x11
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN__SHIFT 0x13
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV__SHIFT 0x14
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK__SHIFT 0x18
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS__SHIFT 0x19
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN__SHIFT 0x1b
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV__SHIFT 0x1c
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS_MASK 0x00000002L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN_MASK 0x00000008L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV_MASK 0x00000030L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK_MASK 0x00000100L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN_MASK 0x00000800L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV_MASK 0x00003000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK_MASK 0x00010000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN_MASK 0x00080000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV_MASK 0x00300000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK_MASK 0x01000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN_MASK 0x08000000L
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV_MASK 0x30000000L
+//DC_GPIO_GENLK_A
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A__SHIFT 0x0
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A__SHIFT 0x8
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A__SHIFT 0x10
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A__SHIFT 0x18
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK 0x00000001L
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK 0x00000100L
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK 0x00010000L
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK 0x01000000L
+//DC_GPIO_GENLK_EN
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN__SHIFT 0x0
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN__SHIFT 0x8
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN__SHIFT 0x10
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN__SHIFT 0x18
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN_MASK 0x00000001L
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN_MASK 0x00000100L
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN_MASK 0x00010000L
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN_MASK 0x01000000L
+//DC_GPIO_GENLK_Y
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y__SHIFT 0x0
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y__SHIFT 0x8
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y__SHIFT 0x10
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y__SHIFT 0x18
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y_MASK 0x00000001L
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y_MASK 0x00000100L
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y_MASK 0x00010000L
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y_MASK 0x01000000L
+//DC_GPIO_HPD_MASK
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK__SHIFT 0x0
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS__SHIFT 0x4
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV__SHIFT 0x6
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK__SHIFT 0x8
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS__SHIFT 0x9
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV__SHIFT 0xa
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK__SHIFT 0x10
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS__SHIFT 0x11
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV__SHIFT 0x12
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK__SHIFT 0x14
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS__SHIFT 0x15
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV__SHIFT 0x16
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK__SHIFT 0x18
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS__SHIFT 0x19
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV__SHIFT 0x1a
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK__SHIFT 0x1c
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS__SHIFT 0x1d
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV__SHIFT 0x1e
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK_MASK 0x00000001L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS_MASK 0x00000010L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV_MASK 0x000000C0L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK_MASK 0x00000100L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV_MASK 0x00000C00L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK_MASK 0x00010000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV_MASK 0x000C0000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK_MASK 0x00100000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV_MASK 0x00C00000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK_MASK 0x01000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV_MASK 0x0C000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK_MASK 0x10000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS_MASK 0x20000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV_MASK 0xC0000000L
+//DC_GPIO_HPD_A
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A__SHIFT 0x0
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A__SHIFT 0x8
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A__SHIFT 0x10
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A__SHIFT 0x18
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A__SHIFT 0x1a
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A__SHIFT 0x1c
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK 0x00000001L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK 0x00000100L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK 0x00010000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK 0x01000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK 0x04000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK 0x10000000L
+//DC_GPIO_HPD_EN
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN__SHIFT 0x0
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI__SHIFT 0x1
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE__SHIFT 0x2
+#define DC_GPIO_HPD_EN__HPD12_SPARE0__SHIFT 0x5
+#define DC_GPIO_HPD_EN__HPD1_SEL0__SHIFT 0x6
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN__SHIFT 0x8
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI__SHIFT 0x9
+#define DC_GPIO_HPD_EN__HPD12_SPARE1__SHIFT 0xa
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN__SHIFT 0x10
+#define DC_GPIO_HPD_EN__HPD3_SCHMEN_PI__SHIFT 0x11
+#define DC_GPIO_HPD_EN__HPD34_SPARE0__SHIFT 0x12
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN__SHIFT 0x14
+#define DC_GPIO_HPD_EN__HPD4_SCHMEN_PI__SHIFT 0x15
+#define DC_GPIO_HPD_EN__HPD34_SPARE1__SHIFT 0x16
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN__SHIFT 0x18
+#define DC_GPIO_HPD_EN__HPD5_SCHMEN_PI__SHIFT 0x19
+#define DC_GPIO_HPD_EN__HPD56_SPARE0__SHIFT 0x1a
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN__SHIFT 0x1c
+#define DC_GPIO_HPD_EN__HPD6_SCHMEN_PI__SHIFT 0x1d
+#define DC_GPIO_HPD_EN__HPD56_SPARE1__SHIFT 0x1e
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN_MASK 0x00000001L
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI_MASK 0x00000002L
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE_MASK 0x00000004L
+#define DC_GPIO_HPD_EN__HPD12_SPARE0_MASK 0x00000020L
+#define DC_GPIO_HPD_EN__HPD1_SEL0_MASK 0x00000040L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN_MASK 0x00000100L
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI_MASK 0x00000200L
+#define DC_GPIO_HPD_EN__HPD12_SPARE1_MASK 0x00000400L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN_MASK 0x00010000L
+#define DC_GPIO_HPD_EN__HPD3_SCHMEN_PI_MASK 0x00020000L
+#define DC_GPIO_HPD_EN__HPD34_SPARE0_MASK 0x00040000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN_MASK 0x00100000L
+#define DC_GPIO_HPD_EN__HPD4_SCHMEN_PI_MASK 0x00200000L
+#define DC_GPIO_HPD_EN__HPD34_SPARE1_MASK 0x00400000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN_MASK 0x01000000L
+#define DC_GPIO_HPD_EN__HPD5_SCHMEN_PI_MASK 0x02000000L
+#define DC_GPIO_HPD_EN__HPD56_SPARE0_MASK 0x04000000L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN_MASK 0x10000000L
+#define DC_GPIO_HPD_EN__HPD6_SCHMEN_PI_MASK 0x20000000L
+#define DC_GPIO_HPD_EN__HPD56_SPARE1_MASK 0x40000000L
+//DC_GPIO_HPD_Y
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y__SHIFT 0x0
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y__SHIFT 0x8
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y__SHIFT 0x10
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y__SHIFT 0x18
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y__SHIFT 0x1a
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y__SHIFT 0x1c
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y_MASK 0x00000001L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y_MASK 0x00000100L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y_MASK 0x00010000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y_MASK 0x01000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y_MASK 0x04000000L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y_MASK 0x10000000L
+//DC_GPIO_DRIVE_STRENGTH_S0
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICA_S0__SHIFT 0x0
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICB_S0__SHIFT 0x1
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICC_S0__SHIFT 0x2
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICD_S0__SHIFT 0x3
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICE_S0__SHIFT 0x4
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICF_S0__SHIFT 0x5
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICG_S0__SHIFT 0x6
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENLK_CLK_S0__SHIFT 0x8
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENLK_VSYNC_S0__SHIFT 0x9
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_SWAPLOCK_A_S0__SHIFT 0xa
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_SWAPLOCK_B_S0__SHIFT 0xb
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICA_S0_MASK 0x00000001L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICB_S0_MASK 0x00000002L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICC_S0_MASK 0x00000004L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICD_S0_MASK 0x00000008L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICE_S0_MASK 0x00000010L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICF_S0_MASK 0x00000020L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENERICG_S0_MASK 0x00000040L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENLK_CLK_S0_MASK 0x00000100L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_GENLK_VSYNC_S0_MASK 0x00000200L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_SWAPLOCK_A_S0_MASK 0x00000400L
+#define DC_GPIO_DRIVE_STRENGTH_S0__DC_GPIO_SWAPLOCK_B_S0_MASK 0x00000800L
+//DC_GPIO_DRIVE_STRENGTH_S1
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICA_S1__SHIFT 0x0
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICB_S1__SHIFT 0x1
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICC_S1__SHIFT 0x2
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICD_S1__SHIFT 0x3
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICE_S1__SHIFT 0x4
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICF_S1__SHIFT 0x5
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICG_S1__SHIFT 0x6
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENLK_CLK_S1__SHIFT 0x8
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENLK_VSYNC_S1__SHIFT 0x9
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_SWAPLOCK_A_S1__SHIFT 0xa
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_SWAPLOCK_B_S1__SHIFT 0xb
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICA_S1_MASK 0x00000001L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICB_S1_MASK 0x00000002L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICC_S1_MASK 0x00000004L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICD_S1_MASK 0x00000008L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICE_S1_MASK 0x00000010L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICF_S1_MASK 0x00000020L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENERICG_S1_MASK 0x00000040L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENLK_CLK_S1_MASK 0x00000100L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_GENLK_VSYNC_S1_MASK 0x00000200L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_SWAPLOCK_A_S1_MASK 0x00000400L
+#define DC_GPIO_DRIVE_STRENGTH_S1__DC_GPIO_SWAPLOCK_B_S1_MASK 0x00000800L
+//DC_GPIO_PWRSEQ0_EN
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_OTG_VSYNC_EN__SHIFT 0x14
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_OTG_VSYNC_SEL__SHIFT 0x15
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_BLON_OTG_VSYNC_EN__SHIFT 0x19
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_BLON_OTG_VSYNC_SEL__SHIFT 0x1a
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_GENERICA_EN__SHIFT 0x1d
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_OTG_VSYNC_EN_MASK 0x00100000L
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_OTG_VSYNC_SEL_MASK 0x00E00000L
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_BLON_OTG_VSYNC_EN_MASK 0x02000000L
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_BLON_OTG_VSYNC_SEL_MASK 0x1C000000L
+#define DC_GPIO_PWRSEQ0_EN__DC_GPIO_VARY_BL_GENERICA_EN_MASK 0x20000000L
+//DC_GPIO_PAD_STRENGTH_1
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN__SHIFT 0x10
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP__SHIFT 0x14
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN__SHIFT 0x18
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP__SHIFT 0x1c
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN_MASK 0x0000000FL
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK 0x000000F0L
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN_MASK 0x000F0000L
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP_MASK 0x00F00000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN_MASK 0x0F000000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP_MASK 0xF0000000L
+//DC_GPIO_PAD_STRENGTH_2
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_2__EXT_RESET_DRVSTRENGTH__SHIFT 0x8
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_DRVSTRENGTH__SHIFT 0xc
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_SRC_SEL__SHIFT 0x1e
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN_MASK 0x0000000FL
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP_MASK 0x000000F0L
+#define DC_GPIO_PAD_STRENGTH_2__EXT_RESET_DRVSTRENGTH_MASK 0x00000700L
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_DRVSTRENGTH_MASK 0x00007000L
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_SRC_SEL_MASK 0xC0000000L
+//PHY_AUX_CNTL
+#define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0x9
+#define PHY_AUX_CNTL__AUX1_PAD_RXSEL__SHIFT 0xa
+#define PHY_AUX_CNTL__AUX2_PAD_RXSEL__SHIFT 0xc
+#define PHY_AUX_CNTL__AUX3_PAD_RXSEL__SHIFT 0xe
+#define PHY_AUX_CNTL__AUX4_PAD_RXSEL__SHIFT 0x10
+#define PHY_AUX_CNTL__AUX5_PAD_RXSEL__SHIFT 0x12
+#define PHY_AUX_CNTL__AUX6_PAD_RXSEL__SHIFT 0x14
+#define PHY_AUX_CNTL__AUX_PAD_WAKE_MASK 0x00000200L
+#define PHY_AUX_CNTL__AUX1_PAD_RXSEL_MASK 0x00000C00L
+#define PHY_AUX_CNTL__AUX2_PAD_RXSEL_MASK 0x00003000L
+#define PHY_AUX_CNTL__AUX3_PAD_RXSEL_MASK 0x0000C000L
+#define PHY_AUX_CNTL__AUX4_PAD_RXSEL_MASK 0x00030000L
+#define PHY_AUX_CNTL__AUX5_PAD_RXSEL_MASK 0x000C0000L
+#define PHY_AUX_CNTL__AUX6_PAD_RXSEL_MASK 0x00300000L
+//DC_GPIO_DRIVE_TXIMPSEL
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICA_TXIMPSEL__SHIFT 0x0
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICB_TXIMPSEL__SHIFT 0x1
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICC_TXIMPSEL__SHIFT 0x2
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICD_TXIMPSEL__SHIFT 0x3
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICE_TXIMPSEL__SHIFT 0x4
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICF_TXIMPSEL__SHIFT 0x5
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICG_TXIMPSEL__SHIFT 0x6
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENLK_CLK_TXIMPSEL__SHIFT 0x8
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENLK_VSYNC_TXIMPSEL__SHIFT 0x9
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_SWAPLOCK_A_TXIMPSEL__SHIFT 0xa
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_SWAPLOCK_B_TXIMPSEL__SHIFT 0xb
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD1_TXIMPSEL__SHIFT 0xc
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD2_TXIMPSEL__SHIFT 0xd
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD3_TXIMPSEL__SHIFT 0xe
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD4_TXIMPSEL__SHIFT 0xf
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD5_TXIMPSEL__SHIFT 0x10
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD6_TXIMPSEL__SHIFT 0x11
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICA_TXIMPSEL_MASK 0x00000001L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICB_TXIMPSEL_MASK 0x00000002L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICC_TXIMPSEL_MASK 0x00000004L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICD_TXIMPSEL_MASK 0x00000008L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICE_TXIMPSEL_MASK 0x00000010L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICF_TXIMPSEL_MASK 0x00000020L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENERICG_TXIMPSEL_MASK 0x00000040L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENLK_CLK_TXIMPSEL_MASK 0x00000100L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_GENLK_VSYNC_TXIMPSEL_MASK 0x00000200L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_SWAPLOCK_A_TXIMPSEL_MASK 0x00000400L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_SWAPLOCK_B_TXIMPSEL_MASK 0x00000800L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD1_TXIMPSEL_MASK 0x00001000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD2_TXIMPSEL_MASK 0x00002000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD3_TXIMPSEL_MASK 0x00004000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD4_TXIMPSEL_MASK 0x00008000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD5_TXIMPSEL_MASK 0x00010000L
+#define DC_GPIO_DRIVE_TXIMPSEL__DC_GPIO_HPD6_TXIMPSEL_MASK 0x00020000L
+//DC_GPIO_PWRSEQ1_EN
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_OTG_VSYNC_EN__SHIFT 0x14
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_OTG_VSYNC_SEL__SHIFT 0x15
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_BLON_OTG_VSYNC_EN__SHIFT 0x19
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_BLON_OTG_VSYNC_SEL__SHIFT 0x1a
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_GENERICA_EN__SHIFT 0x1d
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_OTG_VSYNC_EN_MASK 0x00100000L
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_OTG_VSYNC_SEL_MASK 0x00E00000L
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_BLON_OTG_VSYNC_EN_MASK 0x02000000L
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_BLON_OTG_VSYNC_SEL_MASK 0x1C000000L
+#define DC_GPIO_PWRSEQ1_EN__DC_GPIO_VARY_BL_GENERICA_EN_MASK 0x20000000L
+//DC_GPIO_TX12_EN
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICA_TX12_EN__SHIFT 0x3
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICB_TX12_EN__SHIFT 0x4
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICC_TX12_EN__SHIFT 0x5
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICD_TX12_EN__SHIFT 0x6
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICE_TX12_EN__SHIFT 0x7
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICF_TX12_EN__SHIFT 0x8
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICG_TX12_EN__SHIFT 0x9
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICA_TX12_EN_MASK 0x00000008L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICB_TX12_EN_MASK 0x00000010L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICC_TX12_EN_MASK 0x00000020L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICD_TX12_EN_MASK 0x00000040L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICE_TX12_EN_MASK 0x00000080L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICF_TX12_EN_MASK 0x00000100L
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICG_TX12_EN_MASK 0x00000200L
+//DC_GPIO_AUX_CTRL_0
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_FALLSLEWSEL__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_FALLSLEWSEL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_FALLSLEWSEL__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_FALLSLEWSEL__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_FALLSLEWSEL__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_FALLSLEWSEL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCEN__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCEN__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCEN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCEN__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCEN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCEN__SHIFT 0x15
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCEN__SHIFT 0x16
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCSEL__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCSEL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCSEL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCSEL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_FALLSLEWSEL_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_FALLSLEWSEL_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_FALLSLEWSEL_MASK 0x00000030L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_FALLSLEWSEL_MASK 0x000000C0L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_FALLSLEWSEL_MASK 0x00000300L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_FALLSLEWSEL_MASK 0x00000C00L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_FALLSLEWSEL_MASK 0x00003000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCEN_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCEN_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCEN_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCEN_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCEN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCEN_MASK 0x00200000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCEN_MASK 0x00C00000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCSEL_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCSEL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCSEL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCSEL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_DDCVGA_SPIKERCSEL_MASK 0xC0000000L
+//DC_GPIO_AUX_CTRL_1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1__SHIFT 0x7
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN__SHIFT 0xb
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9_MASK 0x00000001L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1_MASK 0x00000002L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9_MASK 0x00000004L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1_MASK 0x00000008L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9_MASK 0x00000010L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1_MASK 0x00000020L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9_MASK 0x00000040L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1_MASK 0x00000080L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN_MASK 0x00001800L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE_MASK 0x0000C000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN_MASK 0x00030000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN_MASK 0x000C0000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL_MASK 0x00300000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL_MASK 0xC0000000L
+//DC_GPIO_AUX_CTRL_2
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_FALLSLEWSEL__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_FALLSLEWSEL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCEN__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCEN__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCSEL__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCSEL__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SLEWN__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SLEWN__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_COMPSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_COMPSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_FALLSLEWSEL_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_FALLSLEWSEL_MASK 0x00000030L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCEN_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCEN_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SPIKERCSEL_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SPIKERCSEL_MASK 0x00004000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_SLEWN_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_SLEWN_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD34_COMPSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD56_COMPSEL_MASK 0x40000000L
+//DC_GPIO_RXEN
+#define DC_GPIO_RXEN__DC_GPIO_GENERICA_RXEN__SHIFT 0x0
+#define DC_GPIO_RXEN__DC_GPIO_GENERICB_RXEN__SHIFT 0x1
+#define DC_GPIO_RXEN__DC_GPIO_GENERICC_RXEN__SHIFT 0x2
+#define DC_GPIO_RXEN__DC_GPIO_GENERICD_RXEN__SHIFT 0x3
+#define DC_GPIO_RXEN__DC_GPIO_GENERICE_RXEN__SHIFT 0x4
+#define DC_GPIO_RXEN__DC_GPIO_GENERICF_RXEN__SHIFT 0x5
+#define DC_GPIO_RXEN__DC_GPIO_GENERICG_RXEN__SHIFT 0x6
+#define DC_GPIO_RXEN__DC_GPIO_HSYNCA_RXEN__SHIFT 0x8
+#define DC_GPIO_RXEN__DC_GPIO_VSYNCA_RXEN__SHIFT 0x9
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_CLK_RXEN__SHIFT 0xa
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_VSYNC_RXEN__SHIFT 0xb
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_A_RXEN__SHIFT 0xc
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_B_RXEN__SHIFT 0xd
+#define DC_GPIO_RXEN__DC_GPIO_HPD1_RXEN__SHIFT 0xe
+#define DC_GPIO_RXEN__DC_GPIO_HPD2_RXEN__SHIFT 0xf
+#define DC_GPIO_RXEN__DC_GPIO_HPD3_RXEN__SHIFT 0x10
+#define DC_GPIO_RXEN__DC_GPIO_HPD4_RXEN__SHIFT 0x11
+#define DC_GPIO_RXEN__DC_GPIO_HPD5_RXEN__SHIFT 0x12
+#define DC_GPIO_RXEN__DC_GPIO_HPD6_RXEN__SHIFT 0x13
+#define DC_GPIO_RXEN__DC_GPIO_GENERICA_RXEN_MASK 0x00000001L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICB_RXEN_MASK 0x00000002L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICC_RXEN_MASK 0x00000004L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICD_RXEN_MASK 0x00000008L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICE_RXEN_MASK 0x00000010L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICF_RXEN_MASK 0x00000020L
+#define DC_GPIO_RXEN__DC_GPIO_GENERICG_RXEN_MASK 0x00000040L
+#define DC_GPIO_RXEN__DC_GPIO_HSYNCA_RXEN_MASK 0x00000100L
+#define DC_GPIO_RXEN__DC_GPIO_VSYNCA_RXEN_MASK 0x00000200L
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_CLK_RXEN_MASK 0x00000400L
+#define DC_GPIO_RXEN__DC_GPIO_GENLK_VSYNC_RXEN_MASK 0x00000800L
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_A_RXEN_MASK 0x00001000L
+#define DC_GPIO_RXEN__DC_GPIO_SWAPLOCK_B_RXEN_MASK 0x00002000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD1_RXEN_MASK 0x00004000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD2_RXEN_MASK 0x00008000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD3_RXEN_MASK 0x00010000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD4_RXEN_MASK 0x00020000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD5_RXEN_MASK 0x00040000L
+#define DC_GPIO_RXEN__DC_GPIO_HPD6_RXEN_MASK 0x00080000L
+//DC_GPIO_PULLUPEN
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN__SHIFT 0x0
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN__SHIFT 0x1
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN__SHIFT 0x2
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN__SHIFT 0x3
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN__SHIFT 0x4
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN__SHIFT 0x5
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN__SHIFT 0x6
+#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN__SHIFT 0x8
+#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN__SHIFT 0x9
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN__SHIFT 0xe
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD2_PU_EN__SHIFT 0xf
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD3_PU_EN__SHIFT 0x10
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD4_PU_EN__SHIFT 0x11
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD5_PU_EN__SHIFT 0x12
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD6_PU_EN__SHIFT 0x13
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN_MASK 0x00000001L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN_MASK 0x00000002L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN_MASK 0x00000004L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN_MASK 0x00000008L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN_MASK 0x00000010L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN_MASK 0x00000020L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN_MASK 0x00000040L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN_MASK 0x00000100L
+#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN_MASK 0x00000200L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN_MASK 0x00004000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD2_PU_EN_MASK 0x00008000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD3_PU_EN_MASK 0x00010000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD4_PU_EN_MASK 0x00020000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD5_PU_EN_MASK 0x00040000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD6_PU_EN_MASK 0x00080000L
+//DC_GPIO_AUX_CTRL_3
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP__SHIFT 0xb
+#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE__SHIFT 0x16
+#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM_MASK 0x00000001L
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM_MASK 0x00000002L
+#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM_MASK 0x00000004L
+#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM_MASK 0x00000008L
+#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM_MASK 0x00000010L
+#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM_MASK 0x00000020L
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP_MASK 0x00000800L
+#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE_MASK 0x00030000L
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE_MASK 0x000C0000L
+#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE_MASK 0x00300000L
+#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE_MASK 0x00C00000L
+#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE_MASK 0x03000000L
+#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE_MASK 0x0C000000L
+//DC_GPIO_AUX_CTRL_4
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL_MASK 0x0000000FL
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL_MASK 0x000000F0L
+#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL_MASK 0x00000F00L
+#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL_MASK 0x0000F000L
+#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL_MASK 0x000F0000L
+#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL_MASK 0x00F00000L
+//DC_GPIO_AUX_CTRL_5
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE__SHIFT 0xf
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN__SHIFT 0x15
+#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN__SHIFT 0x16
+#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN__SHIFT 0x17
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE_MASK 0x00000030L
+#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE_MASK 0x000000C0L
+#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE_MASK 0x00000300L
+#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE_MASK 0x00000C00L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE_MASK 0x00004000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE_MASK 0x00008000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN_MASK 0x00200000L
+#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN_MASK 0x00400000L
+#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN_MASK 0x00800000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL_MASK 0x20000000L
+//AUXI2C_PAD_ALL_PWR_OK
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK__SHIFT 0x0
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK__SHIFT 0x1
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK__SHIFT 0x2
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK__SHIFT 0x3
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK__SHIFT 0x4
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK__SHIFT 0x5
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK_MASK 0x00000001L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK_MASK 0x00000002L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK_MASK 0x00000004L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK_MASK 0x00000008L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK_MASK 0x00000010L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK_MASK 0x00000020L
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy1_dispdec
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy2_dispdec
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy3_dispdec
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcio_dcio_uniphy4_dispdec
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+//DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_pwrseq0_dispdec_pwrseq_dispdec
+//PWRSEQ0_DC_GPIO_PWRSEQ_EN
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_EN__SHIFT 0x0
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN__SHIFT 0x8
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN__SHIFT 0x10
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_EN_MASK 0x00000001L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN_MASK 0x00000100L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN_MASK 0x00010000L
+//PWRSEQ0_DC_GPIO_PWRSEQ_CTRL
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_TXIMPSEL__SHIFT 0x0
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_TXIMPSEL__SHIFT 0x1
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_TXIMPSEL__SHIFT 0x2
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_RXEN__SHIFT 0x3
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_RXEN__SHIFT 0x4
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_RXEN__SHIFT 0x5
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_PU_EN__SHIFT 0x6
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_PU_EN__SHIFT 0x7
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_PU_EN__SHIFT 0x8
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__PWRSEQ_STRENGTH_S0__SHIFT 0x10
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_S1__SHIFT 0x14
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_S1__SHIFT 0x15
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_S1__SHIFT 0x16
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_TXIMPSEL_MASK 0x00000001L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_TXIMPSEL_MASK 0x00000002L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_TXIMPSEL_MASK 0x00000004L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_RXEN_MASK 0x00000008L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_RXEN_MASK 0x00000010L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_RXEN_MASK 0x00000020L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_PU_EN_MASK 0x00000040L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_PU_EN_MASK 0x00000080L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_PU_EN_MASK 0x00000100L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__PWRSEQ_STRENGTH_S0_MASK 0x00010000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_S1_MASK 0x00100000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_S1_MASK 0x00200000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_S1_MASK 0x00400000L
+//PWRSEQ0_DC_GPIO_PWRSEQ_MASK
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_MASK__SHIFT 0x0
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_PD_DIS__SHIFT 0x4
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_RECV__SHIFT 0x6
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK__SHIFT 0x8
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS__SHIFT 0xc
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV__SHIFT 0xe
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK__SHIFT 0x10
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS__SHIFT 0x14
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV__SHIFT 0x16
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_MASK_MASK 0x00000001L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_PD_DIS_MASK 0x00000010L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_RECV_MASK 0x000000C0L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK_MASK 0x00000100L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS_MASK 0x00001000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV_MASK 0x0000C000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK_MASK 0x00010000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS_MASK 0x00100000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV_MASK 0x00C00000L
+//PWRSEQ0_DC_GPIO_PWRSEQ_A_Y
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_A__SHIFT 0x0
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_Y__SHIFT 0x1
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_A__SHIFT 0x8
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_Y__SHIFT 0x9
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_A__SHIFT 0x10
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_Y__SHIFT 0x11
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_A_MASK 0x00000001L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_Y_MASK 0x00000002L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_A_MASK 0x00000100L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_Y_MASK 0x00000200L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_A_MASK 0x00010000L
+#define PWRSEQ0_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_Y_MASK 0x00020000L
+//PWRSEQ0_PANEL_PWRSEQ_CNTL
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_EN__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_TARGET_STATE__SHIFT 0x4
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_OVRD__SHIFT 0x9
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_POL__SHIFT 0xa
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_OVRD__SHIFT 0x11
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_POL__SHIFT 0x12
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON__SHIFT 0x18
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_OVRD__SHIFT 0x19
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_POL__SHIFT 0x1a
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_EN_MASK 0x00000001L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_TARGET_STATE_MASK 0x00000010L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_MASK 0x00000100L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_OVRD_MASK 0x00000200L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_POL_MASK 0x00000400L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_MASK 0x00010000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_OVRD_MASK 0x00020000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_DIGON_POL_MASK 0x00040000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_MASK 0x01000000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_OVRD_MASK 0x02000000L
+#define PWRSEQ0_PANEL_PWRSEQ_CNTL__PANEL_BLON_POL_MASK 0x04000000L
+//PWRSEQ0_PANEL_PWRSEQ_STATE
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_TARGET_STATE_R__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DIGON__SHIFT 0x1
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_SYNCEN__SHIFT 0x2
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_BLON__SHIFT 0x3
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DONE__SHIFT 0x4
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_STATE__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_TARGET_STATE_R_MASK 0x00000001L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DIGON_MASK 0x00000002L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_SYNCEN_MASK 0x00000004L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_BLON_MASK 0x00000008L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DONE_MASK 0x00000010L
+#define PWRSEQ0_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_STATE_MASK 0x00000F00L
+//PWRSEQ0_PANEL_PWRSEQ_DELAY1
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY1__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY2__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY1__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY2__SHIFT 0x18
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY1_MASK 0x000000FFL
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY2_MASK 0x0000FF00L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY1_MASK 0x00FF0000L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY2_MASK 0xFF000000L
+//PWRSEQ0_PANEL_PWRSEQ_DELAY2
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_MIN_LENGTH__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRUP_DELAY3__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_DELAY3__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_VARY_BL_OVERRIDE_EN__SHIFT 0x18
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_MIN_LENGTH_MASK 0x000000FFL
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRUP_DELAY3_MASK 0x0000FF00L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_DELAY3_MASK 0x00FF0000L
+#define PWRSEQ0_PANEL_PWRSEQ_DELAY2__PANEL_VARY_BL_OVERRIDE_EN_MASK 0x01000000L
+//PWRSEQ0_PANEL_PWRSEQ_REF_DIV1
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV1__PANEL_PWRSEQ_REF_DIV__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV1__BL_PWM_REF_DIV__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV1__PANEL_PWRSEQ_REF_DIV_MASK 0x00000FFFL
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV1__BL_PWM_REF_DIV_MASK 0xFFFF0000L
+//PWRSEQ0_BL_PWM_CNTL
+#define PWRSEQ0_BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT__SHIFT 0x0
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_EN_EQ_ZERO__SHIFT 0x13
+#define PWRSEQ0_BL_PWM_CNTL__FRAME_START_EVENT_RECOGNIZED__SHIFT 0x14
+#define PWRSEQ0_BL_PWM_CNTL__RECOGNIZE_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x15
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN__SHIFT 0x1e
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_EN__SHIFT 0x1f
+#define PWRSEQ0_BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT_MASK 0x0000FFFFL
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_EN_EQ_ZERO_MASK 0x00080000L
+#define PWRSEQ0_BL_PWM_CNTL__FRAME_START_EVENT_RECOGNIZED_MASK 0x00100000L
+#define PWRSEQ0_BL_PWM_CNTL__RECOGNIZE_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x00200000L
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN_MASK 0x40000000L
+#define PWRSEQ0_BL_PWM_CNTL__BL_PWM_EN_MASK 0x80000000L
+//PWRSEQ0_BL_PWM_CNTL2
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x0
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE__SHIFT 0x1e
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_OVERRIDE_PANEL_PWRSEQ_EN__SHIFT 0x1f
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x0000FFFFL
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE_MASK 0x40000000L
+#define PWRSEQ0_BL_PWM_CNTL2__BL_PWM_OVERRIDE_PANEL_PWRSEQ_EN_MASK 0x80000000L
+//PWRSEQ0_BL_PWM_PERIOD_CNTL
+#define PWRSEQ0_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD__SHIFT 0x0
+#define PWRSEQ0_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT__SHIFT 0x10
+#define PWRSEQ0_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_MASK 0x0000FFFFL
+#define PWRSEQ0_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT_MASK 0x000F0000L
+//PWRSEQ0_BL_PWM_GRP1_REG_LOCK
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK__SHIFT 0x0
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING__SHIFT 0x8
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK_MASK 0x00000001L
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING_MASK 0x00000100L
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define PWRSEQ0_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//PWRSEQ0_PANEL_PWRSEQ_REF_DIV2
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_DIV__SHIFT 0x0
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__MICROSECOND_TIME_BASE_DIV__SHIFT 0x8
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_START_ON_VARY_BL_ACTIVE__SHIFT 0x10
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_DIV_MASK 0x0000007FL
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__MICROSECOND_TIME_BASE_DIV_MASK 0x00007F00L
+#define PWRSEQ0_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_START_ON_VARY_BL_ACTIVE_MASK 0x00010000L
+//PWRSEQ0_PWRSEQ_SPARE
+#define PWRSEQ0_PWRSEQ_SPARE__PWRSEQ_SPARE__SHIFT 0x0
+#define PWRSEQ0_PWRSEQ_SPARE__PWRSEQ_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_pwrseq1_dispdec_pwrseq_dispdec
+//PWRSEQ1_DC_GPIO_PWRSEQ_EN
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_EN__SHIFT 0x0
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN__SHIFT 0x8
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN__SHIFT 0x10
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_EN_MASK 0x00000001L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN_MASK 0x00000100L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN_MASK 0x00010000L
+//PWRSEQ1_DC_GPIO_PWRSEQ_CTRL
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_TXIMPSEL__SHIFT 0x0
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_TXIMPSEL__SHIFT 0x1
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_TXIMPSEL__SHIFT 0x2
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_RXEN__SHIFT 0x3
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_RXEN__SHIFT 0x4
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_RXEN__SHIFT 0x5
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_PU_EN__SHIFT 0x6
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_PU_EN__SHIFT 0x7
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_PU_EN__SHIFT 0x8
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__PWRSEQ_STRENGTH_S0__SHIFT 0x10
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_S1__SHIFT 0x14
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_S1__SHIFT 0x15
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_S1__SHIFT 0x16
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_TXIMPSEL_MASK 0x00000001L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_TXIMPSEL_MASK 0x00000002L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_TXIMPSEL_MASK 0x00000004L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_RXEN_MASK 0x00000008L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_RXEN_MASK 0x00000010L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_RXEN_MASK 0x00000020L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_PU_EN_MASK 0x00000040L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_PU_EN_MASK 0x00000080L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_PU_EN_MASK 0x00000100L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__PWRSEQ_STRENGTH_S0_MASK 0x00010000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_VARY_BL_S1_MASK 0x00100000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_DIGON_S1_MASK 0x00200000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_CTRL__DC_GPIO_BLON_S1_MASK 0x00400000L
+//PWRSEQ1_DC_GPIO_PWRSEQ_MASK
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_MASK__SHIFT 0x0
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_PD_DIS__SHIFT 0x4
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_RECV__SHIFT 0x6
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK__SHIFT 0x8
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS__SHIFT 0xc
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV__SHIFT 0xe
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK__SHIFT 0x10
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS__SHIFT 0x14
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV__SHIFT 0x16
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_MASK_MASK 0x00000001L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_PD_DIS_MASK 0x00000010L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_VARY_BL_RECV_MASK 0x000000C0L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK_MASK 0x00000100L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS_MASK 0x00001000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV_MASK 0x0000C000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK_MASK 0x00010000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS_MASK 0x00100000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV_MASK 0x00C00000L
+//PWRSEQ1_DC_GPIO_PWRSEQ_A_Y
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_A__SHIFT 0x0
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_Y__SHIFT 0x1
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_A__SHIFT 0x8
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_Y__SHIFT 0x9
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_A__SHIFT 0x10
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_Y__SHIFT 0x11
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_A_MASK 0x00000001L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_VARY_BL_Y_MASK 0x00000002L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_A_MASK 0x00000100L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_DIGON_Y_MASK 0x00000200L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_A_MASK 0x00010000L
+#define PWRSEQ1_DC_GPIO_PWRSEQ_A_Y__DC_GPIO_BLON_Y_MASK 0x00020000L
+//PWRSEQ1_PANEL_PWRSEQ_CNTL
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_EN__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_TARGET_STATE__SHIFT 0x4
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_OVRD__SHIFT 0x9
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_POL__SHIFT 0xa
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_OVRD__SHIFT 0x11
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_POL__SHIFT 0x12
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON__SHIFT 0x18
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_OVRD__SHIFT 0x19
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_POL__SHIFT 0x1a
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_EN_MASK 0x00000001L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_PWRSEQ_TARGET_STATE_MASK 0x00000010L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_MASK 0x00000100L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_OVRD_MASK 0x00000200L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_SYNCEN_POL_MASK 0x00000400L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_MASK 0x00010000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_OVRD_MASK 0x00020000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_DIGON_POL_MASK 0x00040000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_MASK 0x01000000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_OVRD_MASK 0x02000000L
+#define PWRSEQ1_PANEL_PWRSEQ_CNTL__PANEL_BLON_POL_MASK 0x04000000L
+//PWRSEQ1_PANEL_PWRSEQ_STATE
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_TARGET_STATE_R__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DIGON__SHIFT 0x1
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_SYNCEN__SHIFT 0x2
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_BLON__SHIFT 0x3
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DONE__SHIFT 0x4
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_STATE__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_TARGET_STATE_R_MASK 0x00000001L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DIGON_MASK 0x00000002L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_SYNCEN_MASK 0x00000004L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_BLON_MASK 0x00000008L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_DONE_MASK 0x00000010L
+#define PWRSEQ1_PANEL_PWRSEQ_STATE__PANEL_PWRSEQ_STATE_MASK 0x00000F00L
+//PWRSEQ1_PANEL_PWRSEQ_DELAY1
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY1__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY2__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY1__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY2__SHIFT 0x18
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY1_MASK 0x000000FFL
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRUP_DELAY2_MASK 0x0000FF00L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY1_MASK 0x00FF0000L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY1__PANEL_PWRDN_DELAY2_MASK 0xFF000000L
+//PWRSEQ1_PANEL_PWRSEQ_DELAY2
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_MIN_LENGTH__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRUP_DELAY3__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_DELAY3__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_VARY_BL_OVERRIDE_EN__SHIFT 0x18
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_MIN_LENGTH_MASK 0x000000FFL
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRUP_DELAY3_MASK 0x0000FF00L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_PWRDN_DELAY3_MASK 0x00FF0000L
+#define PWRSEQ1_PANEL_PWRSEQ_DELAY2__PANEL_VARY_BL_OVERRIDE_EN_MASK 0x01000000L
+//PWRSEQ1_PANEL_PWRSEQ_REF_DIV1
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV1__PANEL_PWRSEQ_REF_DIV__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV1__BL_PWM_REF_DIV__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV1__PANEL_PWRSEQ_REF_DIV_MASK 0x00000FFFL
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV1__BL_PWM_REF_DIV_MASK 0xFFFF0000L
+//PWRSEQ1_BL_PWM_CNTL
+#define PWRSEQ1_BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT__SHIFT 0x0
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_EN_EQ_ZERO__SHIFT 0x13
+#define PWRSEQ1_BL_PWM_CNTL__FRAME_START_EVENT_RECOGNIZED__SHIFT 0x14
+#define PWRSEQ1_BL_PWM_CNTL__RECOGNIZE_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x15
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN__SHIFT 0x1e
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_EN__SHIFT 0x1f
+#define PWRSEQ1_BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT_MASK 0x0000FFFFL
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_EN_EQ_ZERO_MASK 0x00080000L
+#define PWRSEQ1_BL_PWM_CNTL__FRAME_START_EVENT_RECOGNIZED_MASK 0x00100000L
+#define PWRSEQ1_BL_PWM_CNTL__RECOGNIZE_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x00200000L
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN_MASK 0x40000000L
+#define PWRSEQ1_BL_PWM_CNTL__BL_PWM_EN_MASK 0x80000000L
+//PWRSEQ1_BL_PWM_CNTL2
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x0
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE__SHIFT 0x1e
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_OVERRIDE_PANEL_PWRSEQ_EN__SHIFT 0x1f
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0x0000FFFFL
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE_MASK 0x40000000L
+#define PWRSEQ1_BL_PWM_CNTL2__BL_PWM_OVERRIDE_PANEL_PWRSEQ_EN_MASK 0x80000000L
+//PWRSEQ1_BL_PWM_PERIOD_CNTL
+#define PWRSEQ1_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD__SHIFT 0x0
+#define PWRSEQ1_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT__SHIFT 0x10
+#define PWRSEQ1_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_MASK 0x0000FFFFL
+#define PWRSEQ1_BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT_MASK 0x000F0000L
+//PWRSEQ1_BL_PWM_GRP1_REG_LOCK
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK__SHIFT 0x0
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING__SHIFT 0x8
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK_MASK 0x00000001L
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING_MASK 0x00000100L
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START_MASK 0x00010000L
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_MASK 0x01000000L
+#define PWRSEQ1_BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN_MASK 0x80000000L
+//PWRSEQ1_PANEL_PWRSEQ_REF_DIV2
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_DIV__SHIFT 0x0
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__MICROSECOND_TIME_BASE_DIV__SHIFT 0x8
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_START_ON_VARY_BL_ACTIVE__SHIFT 0x10
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_DIV_MASK 0x0000007FL
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__MICROSECOND_TIME_BASE_DIV_MASK 0x00007F00L
+#define PWRSEQ1_PANEL_PWRSEQ_REF_DIV2__XTAL_REF_START_ON_VARY_BL_ACTIVE_MASK 0x00010000L
+//PWRSEQ1_PWRSEQ_SPARE
+#define PWRSEQ1_PWRSEQ_SPARE__PWRSEQ_SPARE__SHIFT 0x0
+#define PWRSEQ1_PWRSEQ_SPARE__PWRSEQ_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dscc_dispdec
+//DSCC0_DSCC_CONFIG0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC0_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC0_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC0_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC0_DSCC_CONFIG1
+#define DSCC0_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC0_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC0_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC0_DSCC_STATUS
+#define DSCC0_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC0_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC0_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED__SHIFT 0xc
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN__SHIFT 0x1c
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_MASK 0x00001000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+#define DSCC0_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN_MASK 0x10000000L
+//DSCC0_DSCC_PPS_CONFIG0
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC0_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC0_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC0_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC0_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC0_DSCC_PPS_CONFIG1
+#define DSCC0_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC0_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC0_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC0_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC0_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC0_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC0_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC0_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC0_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC0_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG2
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG3
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG4
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC0_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG5
+#define DSCC0_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC0_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG6
+#define DSCC0_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC0_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC0_DSCC_PPS_CONFIG7
+#define DSCC0_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG8
+#define DSCC0_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG9
+#define DSCC0_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC0_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG10
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC0_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC0_DSCC_PPS_CONFIG11
+#define DSCC0_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC0_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC0_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC0_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC0_DSCC_PPS_CONFIG12
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC0_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC0_DSCC_PPS_CONFIG13
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC0_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC0_DSCC_PPS_CONFIG14
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC0_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC0_DSCC_PPS_CONFIG15
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC0_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG16
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG17
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG18
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG19
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG20
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG21
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC0_DSCC_PPS_CONFIG22
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC0_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC0_DSCC_MEM_POWER_CONTROL
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC0_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC0_DSCC_MAX_ABS_ERROR0
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC0_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC0_DSCC_MAX_ABS_ERROR1
+#define DSCC0_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC0_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
+//DSCCIF0_DSCCIF_CONFIG0
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF0_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF0_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF0_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF0_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF0_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF0_DSCCIF_CONFIG1
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF0_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_top_dispdec
+//DSC_TOP0_DSC_TOP_CONTROL
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS__SHIFT 0xc
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN__SHIFT 0x10
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS_MASK 0x00001000L
+#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN_MASK 0x00010000L
+//DSC_TOP0_DSC_DEBUG_CONTROL
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON19_PERFCOUNTER_CNTL
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON19_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON19_PERFCOUNTER_CNTL2
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON19_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON19_PERFCOUNTER_STATE
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON19_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON19_PERFMON_CNTL
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON19_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON19_PERFMON_CNTL2
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON19_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON19_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON19_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON19_PERFMON_CVALUE_LOW
+#define DC_PERFMON19_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON19_PERFMON_HI
+#define DC_PERFMON19_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON19_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON19_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON19_PERFMON_LOW
+#define DC_PERFMON19_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON19_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dscc_dispdec
+//DSCC1_DSCC_CONFIG0
+#define DSCC1_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC1_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC1_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC1_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC1_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC1_DSCC_CONFIG1
+#define DSCC1_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC1_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC1_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC1_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC1_DSCC_STATUS
+#define DSCC1_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC1_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC1_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED__SHIFT 0xc
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN__SHIFT 0x1c
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_MASK 0x00001000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+#define DSCC1_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN_MASK 0x10000000L
+//DSCC1_DSCC_PPS_CONFIG0
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC1_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC1_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC1_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC1_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC1_DSCC_PPS_CONFIG1
+#define DSCC1_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC1_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC1_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC1_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC1_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC1_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC1_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC1_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC1_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC1_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG2
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG3
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG4
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC1_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG5
+#define DSCC1_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC1_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG6
+#define DSCC1_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC1_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC1_DSCC_PPS_CONFIG7
+#define DSCC1_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG8
+#define DSCC1_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG9
+#define DSCC1_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC1_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG10
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC1_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC1_DSCC_PPS_CONFIG11
+#define DSCC1_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC1_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC1_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC1_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC1_DSCC_PPS_CONFIG12
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC1_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC1_DSCC_PPS_CONFIG13
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC1_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC1_DSCC_PPS_CONFIG14
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC1_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC1_DSCC_PPS_CONFIG15
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC1_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG16
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG17
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG18
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG19
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG20
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG21
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC1_DSCC_PPS_CONFIG22
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC1_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC1_DSCC_MEM_POWER_CONTROL
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC1_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC1_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC1_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC1_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC1_DSCC_MAX_ABS_ERROR0
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC1_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC1_DSCC_MAX_ABS_ERROR1
+#define DSCC1_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC1_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC1_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+// addressBlock: dce_dc_dsc1_dispdec_dsccif_dispdec
+//DSCCIF1_DSCCIF_CONFIG0
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF1_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF1_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF1_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF1_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF1_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF1_DSCCIF_CONFIG1
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF1_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_top_dispdec
+//DSC_TOP1_DSC_TOP_CONTROL
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS__SHIFT 0xc
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN__SHIFT 0x10
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS_MASK 0x00001000L
+#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN_MASK 0x00010000L
+//DSC_TOP1_DSC_DEBUG_CONTROL
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON20_PERFCOUNTER_CNTL
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON20_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON20_PERFCOUNTER_CNTL2
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON20_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON20_PERFCOUNTER_STATE
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON20_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON20_PERFMON_CNTL
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON20_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON20_PERFMON_CNTL2
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON20_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON20_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON20_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON20_PERFMON_CVALUE_LOW
+#define DC_PERFMON20_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON20_PERFMON_HI
+#define DC_PERFMON20_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON20_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON20_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON20_PERFMON_LOW
+#define DC_PERFMON20_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON20_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dscc_dispdec
+//DSCC2_DSCC_CONFIG0
+#define DSCC2_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC2_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC2_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC2_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC2_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC2_DSCC_CONFIG1
+#define DSCC2_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC2_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC2_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC2_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC2_DSCC_STATUS
+#define DSCC2_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC2_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC2_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED__SHIFT 0xc
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN__SHIFT 0x1c
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_MASK 0x00001000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+#define DSCC2_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN_MASK 0x10000000L
+//DSCC2_DSCC_PPS_CONFIG0
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC2_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC2_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC2_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC2_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC2_DSCC_PPS_CONFIG1
+#define DSCC2_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC2_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC2_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC2_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC2_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC2_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC2_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC2_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC2_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC2_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG2
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG3
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG4
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC2_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG5
+#define DSCC2_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC2_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG6
+#define DSCC2_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC2_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC2_DSCC_PPS_CONFIG7
+#define DSCC2_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG8
+#define DSCC2_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG9
+#define DSCC2_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC2_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG10
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC2_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC2_DSCC_PPS_CONFIG11
+#define DSCC2_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC2_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC2_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC2_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC2_DSCC_PPS_CONFIG12
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC2_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC2_DSCC_PPS_CONFIG13
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC2_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC2_DSCC_PPS_CONFIG14
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC2_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC2_DSCC_PPS_CONFIG15
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC2_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG16
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG17
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG18
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG19
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG20
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG21
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC2_DSCC_PPS_CONFIG22
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC2_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC2_DSCC_MEM_POWER_CONTROL
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC2_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC2_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC2_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC2_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC2_DSCC_MAX_ABS_ERROR0
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC2_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC2_DSCC_MAX_ABS_ERROR1
+#define DSCC2_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC2_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC2_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+// addressBlock: dce_dc_dsc2_dispdec_dsccif_dispdec
+//DSCCIF2_DSCCIF_CONFIG0
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF2_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF2_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF2_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF2_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF2_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF2_DSCCIF_CONFIG1
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF2_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_top_dispdec
+//DSC_TOP2_DSC_TOP_CONTROL
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS__SHIFT 0xc
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN__SHIFT 0x10
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS_MASK 0x00001000L
+#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN_MASK 0x00010000L
+//DSC_TOP2_DSC_DEBUG_CONTROL
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc2_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON21_PERFCOUNTER_CNTL
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON21_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON21_PERFCOUNTER_CNTL2
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON21_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON21_PERFCOUNTER_STATE
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON21_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON21_PERFMON_CNTL
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON21_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON21_PERFMON_CNTL2
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON21_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON21_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON21_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON21_PERFMON_CVALUE_LOW
+#define DC_PERFMON21_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON21_PERFMON_HI
+#define DC_PERFMON21_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON21_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON21_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON21_PERFMON_LOW
+#define DC_PERFMON21_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON21_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dscc_dispdec
+//DSCC3_DSCC_CONFIG0
+#define DSCC3_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE__SHIFT 0x4
+#define DSCC3_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN__SHIFT 0x8
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION__SHIFT 0x10
+#define DSCC3_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_PER_LINE_MASK 0x00000030L
+#define DSCC3_DSCC_CONFIG0__ALTERNATE_ICH_ENCODING_EN_MASK 0x00000100L
+#define DSCC3_DSCC_CONFIG0__NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION_MASK 0xFFFF0000L
+//DSCC3_DSCC_CONFIG1
+#define DSCC3_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE__SHIFT 0x0
+#define DSCC3_DSCC_CONFIG1__DSCC_DISABLE_ICH__SHIFT 0x18
+#define DSCC3_DSCC_CONFIG1__DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE_MASK 0x0003FFFFL
+#define DSCC3_DSCC_CONFIG1__DSCC_DISABLE_ICH_MASK 0x01000000L
+//DSCC3_DSCC_STATUS
+#define DSCC3_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x0
+#define DSCC3_DSCC_STATUS__DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x00000001L
+//DSCC3_DSCC_INTERRUPT_CONTROL_STATUS
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED__SHIFT 0x0
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED__SHIFT 0x1
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED__SHIFT 0x2
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED__SHIFT 0x3
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED__SHIFT 0x4
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED__SHIFT 0x5
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED__SHIFT 0x6
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED__SHIFT 0x7
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED__SHIFT 0x8
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED__SHIFT 0x9
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED__SHIFT 0xa
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED__SHIFT 0xb
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED__SHIFT 0xc
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x10
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x11
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x12
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x13
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x14
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x15
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x16
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x17
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x18
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x19
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1a
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN__SHIFT 0x1b
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN__SHIFT 0x1c
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_MASK 0x00000001L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_MASK 0x00000002L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_MASK 0x00000004L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_MASK 0x00000008L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_MASK 0x00000010L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_MASK 0x00000020L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_MASK 0x00000040L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_MASK 0x00000080L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_MASK 0x00000100L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_MASK 0x00000200L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_MASK 0x00000400L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_MASK 0x00000800L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_MASK 0x00001000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN_MASK 0x00010000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN_MASK 0x00020000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN_MASK 0x00040000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN_MASK 0x00080000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00100000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00200000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00400000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00800000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN_MASK 0x01000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN_MASK 0x02000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN_MASK 0x04000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN_MASK 0x08000000L
+#define DSCC3_DSCC_INTERRUPT_CONTROL_STATUS__DSCC_END_OF_FRAME_NOT_REACHED_OCCURRED_INT_EN_MASK 0x10000000L
+//DSCC3_DSCC_PPS_CONFIG0
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR__SHIFT 0x4
+#define DSCC3_DSCC_PPS_CONFIG0__PPS_IDENTIFIER__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG0__LINEBUF_DEPTH__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x1c
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MINOR_MASK 0x0000000FL
+#define DSCC3_DSCC_PPS_CONFIG0__DSC_VERSION_MAJOR_MASK 0x000000F0L
+#define DSCC3_DSCC_PPS_CONFIG0__PPS_IDENTIFIER_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG0__LINEBUF_DEPTH_MASK 0x0F000000L
+#define DSCC3_DSCC_PPS_CONFIG0__BITS_PER_COMPONENT_MASK 0xF0000000L
+//DSCC3_DSCC_PPS_CONFIG1
+#define DSCC3_DSCC_PPS_CONFIG1__BITS_PER_PIXEL__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG1__VBR_ENABLE__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG1__SIMPLE_422__SHIFT 0xb
+#define DSCC3_DSCC_PPS_CONFIG1__CONVERT_RGB__SHIFT 0xc
+#define DSCC3_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE__SHIFT 0xd
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_422__SHIFT 0xe
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_420__SHIFT 0xf
+#define DSCC3_DSCC_PPS_CONFIG1__CHUNK_SIZE__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG1__BITS_PER_PIXEL_MASK 0x000003FFL
+#define DSCC3_DSCC_PPS_CONFIG1__VBR_ENABLE_MASK 0x00000400L
+#define DSCC3_DSCC_PPS_CONFIG1__SIMPLE_422_MASK 0x00000800L
+#define DSCC3_DSCC_PPS_CONFIG1__CONVERT_RGB_MASK 0x00001000L
+#define DSCC3_DSCC_PPS_CONFIG1__BLOCK_PRED_ENABLE_MASK 0x00002000L
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_422_MASK 0x00004000L
+#define DSCC3_DSCC_PPS_CONFIG1__NATIVE_420_MASK 0x00008000L
+#define DSCC3_DSCC_PPS_CONFIG1__CHUNK_SIZE_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG2
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_WIDTH__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_HEIGHT__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG2__PIC_HEIGHT_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG3
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_WIDTH__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_HEIGHT__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_WIDTH_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG3__SLICE_HEIGHT_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG4
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_XMIT_DELAY_MASK 0x000003FFL
+#define DSCC3_DSCC_PPS_CONFIG4__INITIAL_DEC_DELAY_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG5
+#define DSCC3_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG5__INITIAL_SCALE_VALUE_MASK 0x0000003FL
+#define DSCC3_DSCC_PPS_CONFIG5__SCALE_INCREMENT_INTERVAL_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG6
+#define DSCC3_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG6__SCALE_DECREMENT_INTERVAL_MASK 0x00000FFFL
+#define DSCC3_DSCC_PPS_CONFIG6__FIRST_LINE_BPG_OFFSET_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG6__SECOND_LINE_BPG_OFFSET_MASK 0x1F000000L
+//DSCC3_DSCC_PPS_CONFIG7
+#define DSCC3_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG7__NFL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG7__SLICE_BPG_OFFSET_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG8
+#define DSCC3_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG8__NSL_BPG_OFFSET_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG8__SECOND_LINE_OFFSET_ADJ_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG9
+#define DSCC3_DSCC_PPS_CONFIG9__INITIAL_OFFSET__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG9__FINAL_OFFSET__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG9__INITIAL_OFFSET_MASK 0x0000FFFFL
+#define DSCC3_DSCC_PPS_CONFIG9__FINAL_OFFSET_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG10
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG10__RC_MODEL_SIZE__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MIN_QP_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG10__FLATNESS_MAX_QP_MASK 0x00001F00L
+#define DSCC3_DSCC_PPS_CONFIG10__RC_MODEL_SIZE_MASK 0xFFFF0000L
+//DSCC3_DSCC_PPS_CONFIG11
+#define DSCC3_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI__SHIFT 0x1c
+#define DSCC3_DSCC_PPS_CONFIG11__RC_EDGE_FACTOR_MASK 0x0000000FL
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT0_MASK 0x00001F00L
+#define DSCC3_DSCC_PPS_CONFIG11__RC_QUANT_INCR_LIMIT1_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_LO_MASK 0x0F000000L
+#define DSCC3_DSCC_PPS_CONFIG11__RC_TGT_OFFSET_HI_MASK 0xF0000000L
+//DSCC3_DSCC_PPS_CONFIG12
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH0__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH1__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH2__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH3__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH0_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH1_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH2_MASK 0x00FF0000L
+#define DSCC3_DSCC_PPS_CONFIG12__RC_BUF_THRESH3_MASK 0xFF000000L
+//DSCC3_DSCC_PPS_CONFIG13
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH4__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH5__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH6__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH7__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH4_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH5_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH6_MASK 0x00FF0000L
+#define DSCC3_DSCC_PPS_CONFIG13__RC_BUF_THRESH7_MASK 0xFF000000L
+//DSCC3_DSCC_PPS_CONFIG14
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH8__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH9__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH10__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH11__SHIFT 0x18
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH8_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH9_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH10_MASK 0x00FF0000L
+#define DSCC3_DSCC_PPS_CONFIG14__RC_BUF_THRESH11_MASK 0xFF000000L
+//DSCC3_DSCC_PPS_CONFIG15
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH12__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH13__SHIFT 0x8
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MIN_QP0__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MAX_QP0__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH12_MASK 0x000000FFL
+#define DSCC3_DSCC_PPS_CONFIG15__RC_BUF_THRESH13_MASK 0x0000FF00L
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MIN_QP0_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_MAX_QP0_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG15__RANGE_BPG_OFFSET0_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG16
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP1__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP1__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP2__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP2__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP1_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP1_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET1_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MIN_QP2_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_MAX_QP2_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG16__RANGE_BPG_OFFSET2_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG17
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP3__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP3__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP4__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP4__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP3_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP3_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET3_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MIN_QP4_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_MAX_QP4_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG17__RANGE_BPG_OFFSET4_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG18
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP5__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP5__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP6__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP6__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP5_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP5_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET5_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MIN_QP6_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_MAX_QP6_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG18__RANGE_BPG_OFFSET6_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG19
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP7__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP7__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP8__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP8__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP7_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP7_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET7_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MIN_QP8_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_MAX_QP8_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG19__RANGE_BPG_OFFSET8_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG20
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP9__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP9__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP10__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP10__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP9_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP9_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET9_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MIN_QP10_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_MAX_QP10_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG20__RANGE_BPG_OFFSET10_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG21
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP11__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP11__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP12__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP12__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP11_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP11_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET11_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MIN_QP12_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_MAX_QP12_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG21__RANGE_BPG_OFFSET12_MASK 0xFC000000L
+//DSCC3_DSCC_PPS_CONFIG22
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP13__SHIFT 0x0
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP13__SHIFT 0x5
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13__SHIFT 0xa
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP14__SHIFT 0x10
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP14__SHIFT 0x15
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14__SHIFT 0x1a
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP13_MASK 0x0000001FL
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP13_MASK 0x000003E0L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET13_MASK 0x0000FC00L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MIN_QP14_MASK 0x001F0000L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_MAX_QP14_MASK 0x03E00000L
+#define DSCC3_DSCC_PPS_CONFIG22__RANGE_BPG_OFFSET14_MASK 0xFC000000L
+//DSCC3_DSCC_MEM_POWER_CONTROL
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x0
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS__SHIFT 0x8
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE__SHIFT 0x10
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS__SHIFT 0x18
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE__SHIFT 0x1c
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000003L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_DIS_MASK 0x00000100L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_MEM_PWR_STATE_MASK 0x00030000L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_DIS_MASK 0x01000000L
+#define DSCC3_DSCC_MEM_POWER_CONTROL__DSCC_NATIVE_422_MEM_PWR_STATE_MASK 0x30000000L
+//DSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_LOWER__DSCC_R_Y_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC3_DSCC_R_Y_SQUARED_ERROR_UPPER__DSCC_R_Y_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_LOWER__DSCC_G_CB_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC3_DSCC_G_CB_SQUARED_ERROR_UPPER__DSCC_G_CB_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER__SHIFT 0x0
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_LOWER__DSCC_B_CR_SQUARED_ERROR_LOWER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER__SHIFT 0x0
+#define DSCC3_DSCC_B_CR_SQUARED_ERROR_UPPER__DSCC_B_CR_SQUARED_ERROR_UPPER_MASK 0xFFFFFFFFL
+//DSCC3_DSCC_MAX_ABS_ERROR0
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR__SHIFT 0x10
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_R_Y_MAX_ABS_ERROR_MASK 0x0000FFFFL
+#define DSCC3_DSCC_MAX_ABS_ERROR0__DSCC_G_CB_MAX_ABS_ERROR_MASK 0xFFFF0000L
+//DSCC3_DSCC_MAX_ABS_ERROR1
+#define DSCC3_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR__SHIFT 0x0
+#define DSCC3_DSCC_MAX_ABS_ERROR1__DSCC_B_CR_MAX_ABS_ERROR_MASK 0x0000FFFFL
+//DSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0
+#define DSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL
+//DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L
+#define DSCC3_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L
+
+// addressBlock: dce_dc_dsc3_dispdec_dsccif_dispdec
+//DSCCIF3_DSCCIF_CONFIG0
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN__SHIFT 0x0
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN__SHIFT 0x4
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x8
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT__SHIFT 0xc
+#define DSCCIF3_DSCCIF_CONFIG0__BITS_PER_COMPONENT__SHIFT 0x10
+#define DSCCIF3_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN_MASK 0x00000001L
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN_MASK 0x00000010L
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000100L
+#define DSCCIF3_DSCCIF_CONFIG0__INPUT_PIXEL_FORMAT_MASK 0x00007000L
+#define DSCCIF3_DSCCIF_CONFIG0__BITS_PER_COMPONENT_MASK 0x000F0000L
+#define DSCCIF3_DSCCIF_CONFIG0__DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//DSCCIF3_DSCCIF_CONFIG1
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_WIDTH__SHIFT 0x0
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_HEIGHT__SHIFT 0x10
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_WIDTH_MASK 0x0000FFFFL
+#define DSCCIF3_DSCCIF_CONFIG1__PIC_HEIGHT_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_top_dispdec
+//DSC_TOP3_DSC_TOP_CONTROL
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_CLOCK_EN__SHIFT 0x0
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS__SHIFT 0x8
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS__SHIFT 0xc
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN__SHIFT 0x10
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_FGCG_REP_DIS_MASK 0x00001000L
+#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_DYNAMIC_CLOCK_GATE_EN_MASK 0x00010000L
+//DSC_TOP3_DSC_DEBUG_CONTROL
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L
+
+
+// addressBlock: dce_dc_dsc3_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON22_PERFCOUNTER_CNTL
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON22_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON22_PERFCOUNTER_CNTL2
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON22_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON22_PERFCOUNTER_STATE
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON22_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON22_PERFMON_CNTL
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON22_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON22_PERFMON_CNTL2
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON22_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON22_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON22_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON22_PERFMON_CVALUE_LOW
+#define DC_PERFMON22_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON22_PERFMON_HI
+#define DC_PERFMON22_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON22_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON22_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON22_PERFMON_LOW
+#define DC_PERFMON22_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON22_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_hpo_top_dispdec
+//HPO_TOP_CLOCK_CONTROL
+#define HPO_TOP_CLOCK_CONTROL__HPO_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define HPO_TOP_CLOCK_CONTROL__HPO_DISPCLK_GATE_DIS__SHIFT 0x1
+#define HPO_TOP_CLOCK_CONTROL__HPO_SOCCLK_R_GATE_DIS__SHIFT 0x4
+#define HPO_TOP_CLOCK_CONTROL__HPO_SOCCLK_GATE_DIS__SHIFT 0x5
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_R_GATE_DIS__SHIFT 0x8
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_G_GATE_DIS__SHIFT 0x9
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMICHARCLK_R_GATE_DIS__SHIFT 0xc
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMICHARCLK_G_GATE_DIS__SHIFT 0xd
+#define HPO_TOP_CLOCK_CONTROL__HPO_DPSTREAMCLK_R_GATE_DIS__SHIFT 0x10
+#define HPO_TOP_CLOCK_CONTROL__HPO_DPSTREAMCLK_G_GATE_DIS__SHIFT 0x11
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_SE_R_GATE_DIS__SHIFT 0x12
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_SE_G_GATE_DIS__SHIFT 0x13
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_LE_R_GATE_DIS__SHIFT 0x14
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_LE_G_GATE_DIS__SHIFT 0x15
+#define HPO_TOP_CLOCK_CONTROL__HPO_TEST_CLK_SEL__SHIFT 0x18
+#define HPO_TOP_CLOCK_CONTROL__HPO_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define HPO_TOP_CLOCK_CONTROL__HPO_DISPCLK_GATE_DIS_MASK 0x00000002L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SOCCLK_R_GATE_DIS_MASK 0x00000010L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SOCCLK_GATE_DIS_MASK 0x00000020L
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_R_GATE_DIS_MASK 0x00000100L
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_G_GATE_DIS_MASK 0x00000200L
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMICHARCLK_R_GATE_DIS_MASK 0x00001000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMICHARCLK_G_GATE_DIS_MASK 0x00002000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_DPSTREAMCLK_R_GATE_DIS_MASK 0x00010000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_DPSTREAMCLK_G_GATE_DIS_MASK 0x00020000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_SE_R_GATE_DIS_MASK 0x00040000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_SE_G_GATE_DIS_MASK 0x00080000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_LE_R_GATE_DIS_MASK 0x00100000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_SYMCLK32_LE_G_GATE_DIS_MASK 0x00200000L
+#define HPO_TOP_CLOCK_CONTROL__HPO_TEST_CLK_SEL_MASK 0xFF000000L
+//HPO_TOP_HW_CONTROL
+#define HPO_TOP_HW_CONTROL__HPO_IO_EN__SHIFT 0x0
+#define HPO_TOP_HW_CONTROL__HPO_IO_EN_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_hpo_dp_stream_mapper_dispdec
+//DP_STREAM_MAPPER_CONTROL0
+#define DP_STREAM_MAPPER_CONTROL0__DP_STREAM_LINK_TARGET__SHIFT 0x0
+#define DP_STREAM_MAPPER_CONTROL0__DP_STREAM_LINK_TARGET_MASK 0x00000007L
+//DP_STREAM_MAPPER_CONTROL1
+#define DP_STREAM_MAPPER_CONTROL1__DP_STREAM_LINK_TARGET__SHIFT 0x0
+#define DP_STREAM_MAPPER_CONTROL1__DP_STREAM_LINK_TARGET_MASK 0x00000007L
+//DP_STREAM_MAPPER_CONTROL2
+#define DP_STREAM_MAPPER_CONTROL2__DP_STREAM_LINK_TARGET__SHIFT 0x0
+#define DP_STREAM_MAPPER_CONTROL2__DP_STREAM_LINK_TARGET_MASK 0x00000007L
+//DP_STREAM_MAPPER_CONTROL3
+#define DP_STREAM_MAPPER_CONTROL3__DP_STREAM_LINK_TARGET__SHIFT 0x0
+#define DP_STREAM_MAPPER_CONTROL3__DP_STREAM_LINK_TARGET_MASK 0x00000007L
+
+
+// addressBlock: dce_dc_hpo_hpo_dcperfmon_dc_perfmon_dispdec
+//DC_PERFMON23_PERFCOUNTER_CNTL
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xf
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0x10
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x16
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x17
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x18
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x19
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x1a
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x000001FFL
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0x00000E00L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x00007000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x00008000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x00010000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x00400000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x00800000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x01000000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x02000000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x04000000L
+#define DC_PERFMON23_PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xE0000000L
+//DC_PERFMON23_PERFCOUNTER_CNTL2
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x0
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL__SHIFT 0x2
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL__SHIFT 0x3
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x8
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL__SHIFT 0x1d
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x00000003L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP1_SEL_MASK 0x00000004L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_HW_STOP2_SEL_MASK 0x00000008L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTOFF_SEL_MASK 0x00003F00L
+#define DC_PERFMON23_PERFCOUNTER_CNTL2__PERFCOUNTER_CNTL2_SEL_MASK 0xE0000000L
+//DC_PERFMON23_PERFCOUNTER_STATE
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x00000003L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x00000004L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x00000030L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x00000040L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x00000300L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x00000400L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x00003000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x00004000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x00030000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x00040000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x00300000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x00400000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x03000000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x04000000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000L
+#define DC_PERFMON23_PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000L
+//DC_PERFMON23_PERFMON_CNTL
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000003L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0x0FFFFF00L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000L
+#define DC_PERFMON23_PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000L
+//DC_PERFMON23_PERFMON_CNTL2
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL__SHIFT 0x2
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL__SHIFT 0xa
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x00000001L
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x00000002L
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_START_SEL_MASK 0x000003FCL
+#define DC_PERFMON23_PERFMON_CNTL2__PERFMON_RUN_ENABLE_STOP_SEL_MASK 0x0003FC00L
+//DC_PERFMON23_PERFMON_CVALUE_INT_MISC
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x00000001L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x00000002L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x00000004L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x00000008L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x00000010L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x00000020L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x00000040L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x00000080L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x00000100L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x00000200L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x00000400L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x00000800L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x00001000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x00002000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x00004000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x00008000L
+#define DC_PERFMON23_PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xFFFF0000L
+//DC_PERFMON23_PERFMON_CVALUE_LOW
+#define DC_PERFMON23_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xFFFFFFFFL
+//DC_PERFMON23_PERFMON_HI
+#define DC_PERFMON23_PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define DC_PERFMON23_PERFMON_HI__PERFMON_HI_MASK 0x0000FFFFL
+#define DC_PERFMON23_PERFMON_HI__PERFMON_READ_SEL_MASK 0xE0000000L
+//DC_PERFMON23_PERFMON_LOW
+#define DC_PERFMON23_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define DC_PERFMON23_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_hdmi_link_enc0_dispdec
+//HDMI_LINK_ENC_CONTROL
+#define HDMI_LINK_ENC_CONTROL__HDMI_LINK_ENC_ENABLE__SHIFT 0x0
+#define HDMI_LINK_ENC_CONTROL__HDMI_LINK_ENC_SOFT_RESET__SHIFT 0x4
+#define HDMI_LINK_ENC_CONTROL__HDMI_LINK_ENC_ENABLE_MASK 0x00000001L
+#define HDMI_LINK_ENC_CONTROL__HDMI_LINK_ENC_SOFT_RESET_MASK 0x00000010L
+//HDMI_LINK_ENC_CLK_CTRL
+#define HDMI_LINK_ENC_CLK_CTRL__HDMI_LINK_ENC_CLOCK_EN__SHIFT 0x0
+#define HDMI_LINK_ENC_CLK_CTRL__HDMI_LINK_ENC_CLOCK_ON_HDMICHARCLK__SHIFT 0x1
+#define HDMI_LINK_ENC_CLK_CTRL__HDMI_LINK_ENC_CLOCK_EN_MASK 0x00000001L
+#define HDMI_LINK_ENC_CLK_CTRL__HDMI_LINK_ENC_CLOCK_ON_HDMICHARCLK_MASK 0x00000002L
+
+
+// addressBlock: dce_dc_hpo_hdmi_frl_enc0_dispdec
+//HDMI_FRL_ENC_CONFIG
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE_COUNT__SHIFT 0x0
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_TRAINING_ENABLE__SHIFT 0x1
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_SCRAMBLER_DISABLE__SHIFT 0x2
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE0_TRAINING_PATTERN__SHIFT 0x10
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE1_TRAINING_PATTERN__SHIFT 0x14
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE2_TRAINING_PATTERN__SHIFT 0x18
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE3_TRAINING_PATTERN__SHIFT 0x1c
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE_COUNT_MASK 0x00000001L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_TRAINING_ENABLE_MASK 0x00000002L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_SCRAMBLER_DISABLE_MASK 0x00000004L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE0_TRAINING_PATTERN_MASK 0x000F0000L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE1_TRAINING_PATTERN_MASK 0x00F00000L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE2_TRAINING_PATTERN_MASK 0x0F000000L
+#define HDMI_FRL_ENC_CONFIG__HDMI_LINK_LANE3_TRAINING_PATTERN_MASK 0xF0000000L
+//HDMI_FRL_ENC_CONFIG2
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_MAX_JITTER_VALUE__SHIFT 0x0
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_THRESHOLD__SHIFT 0xc
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_CAL_EN__SHIFT 0x18
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_RC_COMPRESS_DISABLE__SHIFT 0x19
+#define HDMI_FRL_ENC_CONFIG2__HDMI_FRL_HDMISTREAMCLK_DB_SEL__SHIFT 0x1a
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_MAX_JITTER_VALUE_RESET__SHIFT 0x1c
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_EXCEED_STATUS__SHIFT 0x1d
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_METER_BUFFER_OVERFLOW_STATUS__SHIFT 0x1e
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_MAX_JITTER_VALUE_MASK 0x000001FFL
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_THRESHOLD_MASK 0x001FF000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_CAL_EN_MASK 0x01000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_RC_COMPRESS_DISABLE_MASK 0x02000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_FRL_HDMISTREAMCLK_DB_SEL_MASK 0x0C000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_MAX_JITTER_VALUE_RESET_MASK 0x10000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_JITTER_EXCEED_STATUS_MASK 0x20000000L
+#define HDMI_FRL_ENC_CONFIG2__HDMI_LINK_METER_BUFFER_OVERFLOW_STATUS_MASK 0x40000000L
+//HDMI_FRL_ENC_METER_BUFFER_STATUS
+#define HDMI_FRL_ENC_METER_BUFFER_STATUS__HDMI_LINK_MAX_METER_BUFFER_LEVEL__SHIFT 0x0
+#define HDMI_FRL_ENC_METER_BUFFER_STATUS__HDMI_LINK_METER_BUFFER_MAX_LEVEL_RESET__SHIFT 0x1f
+#define HDMI_FRL_ENC_METER_BUFFER_STATUS__HDMI_LINK_MAX_METER_BUFFER_LEVEL_MASK 0x0000007FL
+#define HDMI_FRL_ENC_METER_BUFFER_STATUS__HDMI_LINK_METER_BUFFER_MAX_LEVEL_RESET_MASK 0x80000000L
+//HDMI_FRL_ENC_MEM_CTRL
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_DIS__SHIFT 0x0
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_FORCE__SHIFT 0x1
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_STATE__SHIFT 0x4
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x8
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_DIS_MASK 0x00000001L
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_FORCE_MASK 0x00000006L
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_PWR_STATE_MASK 0x00000030L
+#define HDMI_FRL_ENC_MEM_CTRL__METERBUFFER_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_dispdec
+//HDMI_STREAM_ENC_CLOCK_CONTROL
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_HDMISTREAMCLK__SHIFT 0xc
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define HDMI_STREAM_ENC_CLOCK_CONTROL__HDMI_STREAM_ENC_CLOCK_ON_HDMISTREAMCLK_MASK 0x00001000L
+//HDMI_STREAM_ENC_INPUT_MUX_CONTROL
+#define HDMI_STREAM_ENC_INPUT_MUX_CONTROL__HDMI_STREAM_ENC_INPUT_MUX_SOURCE_SEL__SHIFT 0x0
+#define HDMI_STREAM_ENC_INPUT_MUX_CONTROL__HDMI_STREAM_ENC_INPUT_MUX_SOURCE_SEL_MASK 0x00000007L
+//HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_PIXEL_ENCODING__SHIFT 0x8
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ODM_COMBINE_MODE__SHIFT 0xc
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_DSC_MODE__SHIFT 0x10
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_PIXEL_ENCODING_MASK 0x00000300L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ODM_COMBINE_MODE_MASK 0x00003000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_DSC_MODE_MASK 0x00030000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+//HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+//HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_READ_START_LEVEL__SHIFT 0x0
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_READ_CLOCK_SRC__SHIFT 0x5
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_DB_PENDING__SHIFT 0x8
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_DB_DISABLE__SHIFT 0xc
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_READ_START_LEVEL_MASK 0x0000001FL
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_READ_CLOCK_SRC_MASK 0x00000020L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_DB_PENDING_MASK 0x00000100L
+#define HDMI_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL2__FIFO_DB_DISABLE_MASK 0x00001000L
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_afmt_afmt_dispdec
+//AFMT5_AFMT_ACP
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE__SHIFT 0x0
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x8
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x10
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_MASK 0x00000003L
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE0_MASK 0x0000FF00L
+#define AFMT5_AFMT_ACP__AFMT_ACP_TYPE_DEPENDENT_BYTE1_MASK 0x00FF0000L
+//AFMT5_AFMT_VBI_PACKET_CONTROL
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE__SHIFT 0xd
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x18
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_ACP_SOURCE_MASK 0x00002000L
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+#define AFMT5_AFMT_VBI_PACKET_CONTROL__AFMT_HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x01000000L
+//AFMT5_AFMT_AUDIO_PACKET_CONTROL2
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//AFMT5_AFMT_AUDIO_INFO0
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define AFMT5_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//AFMT5_AFMT_AUDIO_INFO1
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define AFMT5_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//AFMT5_AFMT_60958_0
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define AFMT5_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//AFMT5_AFMT_60958_1
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT5_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT5_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define AFMT5_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define AFMT5_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define AFMT5_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//AFMT5_AFMT_AUDIO_CRC_CONTROL
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define AFMT5_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//AFMT5_AFMT_RAMP_CONTROL0
+#define AFMT5_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT5_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT5_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define AFMT5_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//AFMT5_AFMT_RAMP_CONTROL1
+#define AFMT5_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT5_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT5_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define AFMT5_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//AFMT5_AFMT_RAMP_CONTROL2
+#define AFMT5_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT5_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//AFMT5_AFMT_RAMP_CONTROL3
+#define AFMT5_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT5_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//AFMT5_AFMT_60958_2
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define AFMT5_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//AFMT5_AFMT_AUDIO_CRC_RESULT
+#define AFMT5_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT5_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define AFMT5_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//AFMT5_AFMT_STATUS
+#define AFMT5_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT5_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT5_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT5_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT5_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define AFMT5_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define AFMT5_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define AFMT5_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//AFMT5_AFMT_AUDIO_PACKET_CONTROL
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define AFMT5_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//AFMT5_AFMT_INFOFRAME_CONTROL0
+#define AFMT5_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT5_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT5_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define AFMT5_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+//AFMT5_AFMT_AUDIO_SRC_CONTROL
+#define AFMT5_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT5_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//AFMT5_AFMT_MEM_PWR
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS__SHIFT 0x0
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE__SHIFT 0x4
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE__SHIFT 0x8
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_DIS_MASK 0x00000001L
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_FORCE_MASK 0x00000030L
+#define AFMT5_AFMT_MEM_PWR__AFMT_MEM_PWR_STATE_MASK 0x00000300L
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_dme_dme_dispdec
+//DME5_DME_CONTROL
+#define DME5_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME5_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME5_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME5_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME5_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME5_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME5_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME5_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME5_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME5_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME5_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME5_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME5_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME5_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME5_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME5_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME5_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME5_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME5_DME_MEMORY_CONTROL
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME5_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_hpo_hdmi_stream_enc0_vpg_vpg_dispdec
+//VPG5_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG5_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG5_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG5_VPG_GENERIC_PACKET_DATA
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG5_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG5_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG5_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG5_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG5_VPG_GENERIC_STATUS
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG5_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG5_VPG_MEM_PWR
+#define VPG5_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG5_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG5_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG5_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG5_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG5_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG5_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG5_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG5_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG5_VPG_ISRC1_2_DATA
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG5_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG5_VPG_MPEG_INFO0
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG5_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG5_VPG_MPEG_INFO1
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG5_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_hpo_hdmi_tb_enc0_dispdec
+//HDMI_TB_ENC_CONTROL
+#define HDMI_TB_ENC_CONTROL__HDMI_TB_ENC_EN__SHIFT 0x0
+#define HDMI_TB_ENC_CONTROL__HDMI_RESET__SHIFT 0x4
+#define HDMI_TB_ENC_CONTROL__HDMI_RESET_DONE__SHIFT 0x8
+#define HDMI_TB_ENC_CONTROL__HDMI_TB_ENC_EN_MASK 0x00000001L
+#define HDMI_TB_ENC_CONTROL__HDMI_RESET_MASK 0x00000010L
+#define HDMI_TB_ENC_CONTROL__HDMI_RESET_DONE_MASK 0x00000100L
+//HDMI_TB_ENC_PIXEL_FORMAT
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x0
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x8
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_PIXEL_ENCODING__SHIFT 0x10
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DSC_MODE__SHIFT 0x18
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DEEP_COLOR_ENABLE_MASK 0x00000001L
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DEEP_COLOR_DEPTH_MASK 0x00000300L
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_PIXEL_ENCODING_MASK 0x00030000L
+#define HDMI_TB_ENC_PIXEL_FORMAT__HDMI_DSC_MODE_MASK 0x03000000L
+//HDMI_TB_ENC_PACKET_CONTROL
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_MAX_PACKETS_PER_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_MAX_ISLANDS_PER_LINE__SHIFT 0x8
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_OVERFLOW__SHIFT 0xc
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_TB_ENC_PACKET_ERROR_CLEAR__SHIFT 0x10
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_MAX_PACKETS_PER_LINE_MASK 0x0000001FL
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_MAX_ISLANDS_PER_LINE_MASK 0x00000300L
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_OVERFLOW_MASK 0x00001000L
+#define HDMI_TB_ENC_PACKET_CONTROL__HDMI_TB_ENC_PACKET_ERROR_CLEAR_MASK 0x00010000L
+//HDMI_TB_ENC_ACR_PACKET_CONTROL
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define HDMI_TB_ENC_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//HDMI_TB_ENC_VBI_PACKET_CONTROL1
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_GC_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_GC_CONT__SHIFT 0x1
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_SEND__SHIFT 0x4
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_CONT__SHIFT 0x5
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_LINE_REFERENCE__SHIFT 0x6
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ACP_SEND__SHIFT 0x8
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ACP_LINE_REFERENCE__SHIFT 0x9
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_CONT__SHIFT 0xd
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_LINE_REFERENCE__SHIFT 0xe
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_GC_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_GC_CONT_MASK 0x00000002L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_SEND_MASK 0x00000010L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_CONT_MASK 0x00000020L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ISRC_LINE_REFERENCE_MASK 0x00000040L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ACP_SEND_MASK 0x00000100L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_ACP_LINE_REFERENCE_MASK 0x00000200L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_CONT_MASK 0x00002000L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_LINE_REFERENCE_MASK 0x00004000L
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x7FFF0000L
+//HDMI_TB_ENC_VBI_PACKET_CONTROL2
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL2__HDMI_ISRC_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL2__HDMI_ACP_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL2__HDMI_ISRC_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_VBI_PACKET_CONTROL2__HDMI_ACP_LINE_MASK 0x7FFF0000L
+//HDMI_TB_ENC_GC_CONTROL
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_GC_AVMUTE__SHIFT 0x0
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define HDMI_TB_ENC_GC_CONTROL__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+//HDMI_TB_ENC_GENERIC_PACKET_CONTROL0
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LOCK_EN__SHIFT 0x2
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x3
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LOCK_EN__SHIFT 0x6
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x7
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LOCK_EN__SHIFT 0xa
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xb
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LOCK_EN__SHIFT 0xe
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LOCK_EN__SHIFT 0x12
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x13
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LOCK_EN__SHIFT 0x16
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x17
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LOCK_EN__SHIFT 0x1a
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1b
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LOCK_EN__SHIFT 0x1e
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LOCK_EN_MASK 0x00000004L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000008L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LOCK_EN_MASK 0x00000040L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000080L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LOCK_EN_MASK 0x00000400L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000800L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LOCK_EN_MASK 0x00004000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LOCK_EN_MASK 0x00040000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00080000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LOCK_EN_MASK 0x00400000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00800000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LOCK_EN_MASK 0x04000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x08000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LOCK_EN_MASK 0x40000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET_CONTROL1
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_CONT__SHIFT 0x1
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_LOCK_EN__SHIFT 0x2
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_LINE_REFERENCE__SHIFT 0x3
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_SEND__SHIFT 0x4
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_CONT__SHIFT 0x5
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_LOCK_EN__SHIFT 0x6
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_LINE_REFERENCE__SHIFT 0x7
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_SEND__SHIFT 0x8
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_CONT__SHIFT 0x9
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_LOCK_EN__SHIFT 0xa
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_LINE_REFERENCE__SHIFT 0xb
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_CONT__SHIFT 0xd
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_LOCK_EN__SHIFT 0xe
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_LINE_REFERENCE__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_SEND__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_CONT__SHIFT 0x11
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_LOCK_EN__SHIFT 0x12
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_LINE_REFERENCE__SHIFT 0x13
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_SEND__SHIFT 0x14
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_CONT__SHIFT 0x15
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_LOCK_EN__SHIFT 0x16
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_LINE_REFERENCE__SHIFT 0x17
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_SEND__SHIFT 0x18
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_CONT__SHIFT 0x19
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_LOCK_EN__SHIFT 0x1a
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_LINE_REFERENCE__SHIFT 0x1b
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_CONT_MASK 0x00000002L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_LOCK_EN_MASK 0x00000004L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC8_LINE_REFERENCE_MASK 0x00000008L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_SEND_MASK 0x00000010L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_CONT_MASK 0x00000020L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_LOCK_EN_MASK 0x00000040L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC9_LINE_REFERENCE_MASK 0x00000080L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_SEND_MASK 0x00000100L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_CONT_MASK 0x00000200L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_LOCK_EN_MASK 0x00000400L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC10_LINE_REFERENCE_MASK 0x00000800L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_CONT_MASK 0x00002000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_LOCK_EN_MASK 0x00004000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC11_LINE_REFERENCE_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_SEND_MASK 0x00010000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_CONT_MASK 0x00020000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_LOCK_EN_MASK 0x00040000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC12_LINE_REFERENCE_MASK 0x00080000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_SEND_MASK 0x00100000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_CONT_MASK 0x00200000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_LOCK_EN_MASK 0x00400000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC13_LINE_REFERENCE_MASK 0x00800000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_SEND_MASK 0x01000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_CONT_MASK 0x02000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_LOCK_EN_MASK 0x04000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL1__HDMI_GENERIC14_LINE_REFERENCE_MASK 0x08000000L
+//HDMI_TB_ENC_GENERIC_PACKET_CONTROL2
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC8_IMMEDIATE_SEND__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING__SHIFT 0x11
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC9_IMMEDIATE_SEND__SHIFT 0x12
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING__SHIFT 0x13
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC10_IMMEDIATE_SEND__SHIFT 0x14
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING__SHIFT 0x15
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC11_IMMEDIATE_SEND__SHIFT 0x16
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING__SHIFT 0x17
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC12_IMMEDIATE_SEND__SHIFT 0x18
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING__SHIFT 0x19
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC13_IMMEDIATE_SEND__SHIFT 0x1a
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING__SHIFT 0x1b
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC14_IMMEDIATE_SEND__SHIFT 0x1c
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING__SHIFT 0x1d
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC8_IMMEDIATE_SEND_MASK 0x00010000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC8_IMMEDIATE_SEND_PENDING_MASK 0x00020000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC9_IMMEDIATE_SEND_MASK 0x00040000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC9_IMMEDIATE_SEND_PENDING_MASK 0x00080000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC10_IMMEDIATE_SEND_MASK 0x00100000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC10_IMMEDIATE_SEND_PENDING_MASK 0x00200000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC11_IMMEDIATE_SEND_MASK 0x00400000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC11_IMMEDIATE_SEND_PENDING_MASK 0x00800000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC12_IMMEDIATE_SEND_MASK 0x01000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC12_IMMEDIATE_SEND_PENDING_MASK 0x02000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC13_IMMEDIATE_SEND_MASK 0x04000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC13_IMMEDIATE_SEND_PENDING_MASK 0x08000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC14_IMMEDIATE_SEND_MASK 0x10000000L
+#define HDMI_TB_ENC_GENERIC_PACKET_CONTROL2__HDMI_GENERIC14_IMMEDIATE_SEND_PENDING_MASK 0x20000000L
+//HDMI_TB_ENC_GENERIC_PACKET0_1_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC0_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC1_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC0_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC0_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC1_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET0_1_LINE__HDMI_GENERIC1_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET2_3_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC2_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC3_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC2_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC2_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC3_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET2_3_LINE__HDMI_GENERIC3_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET4_5_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC4_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC5_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC4_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC4_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC5_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET4_5_LINE__HDMI_GENERIC5_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET6_7_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC6_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC7_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC6_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC6_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC7_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET6_7_LINE__HDMI_GENERIC7_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET8_9_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC8_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC8_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC9_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC9_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC8_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC8_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC9_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET8_9_LINE__HDMI_GENERIC9_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET10_11_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC10_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC10_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC11_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC11_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC10_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC10_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC11_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET10_11_LINE__HDMI_GENERIC11_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET12_13_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC12_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC12_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC13_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC13_EMP__SHIFT 0x1f
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC12_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC12_EMP_MASK 0x00008000L
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC13_LINE_MASK 0x7FFF0000L
+#define HDMI_TB_ENC_GENERIC_PACKET12_13_LINE__HDMI_GENERIC13_EMP_MASK 0x80000000L
+//HDMI_TB_ENC_GENERIC_PACKET14_LINE
+#define HDMI_TB_ENC_GENERIC_PACKET14_LINE__HDMI_GENERIC14_LINE__SHIFT 0x0
+#define HDMI_TB_ENC_GENERIC_PACKET14_LINE__HDMI_GENERIC14_EMP__SHIFT 0xf
+#define HDMI_TB_ENC_GENERIC_PACKET14_LINE__HDMI_GENERIC14_LINE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_GENERIC_PACKET14_LINE__HDMI_GENERIC14_EMP_MASK 0x00008000L
+//HDMI_TB_ENC_DB_CONTROL
+#define HDMI_TB_ENC_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define HDMI_TB_ENC_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define HDMI_TB_ENC_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define HDMI_TB_ENC_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define HDMI_TB_ENC_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define HDMI_TB_ENC_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+//HDMI_TB_ENC_ACR_32_0
+#define HDMI_TB_ENC_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//HDMI_TB_ENC_ACR_32_1
+#define HDMI_TB_ENC_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//HDMI_TB_ENC_ACR_44_0
+#define HDMI_TB_ENC_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//HDMI_TB_ENC_ACR_44_1
+#define HDMI_TB_ENC_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//HDMI_TB_ENC_ACR_48_0
+#define HDMI_TB_ENC_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//HDMI_TB_ENC_ACR_48_1
+#define HDMI_TB_ENC_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//HDMI_TB_ENC_ACR_STATUS_0
+#define HDMI_TB_ENC_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define HDMI_TB_ENC_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//HDMI_TB_ENC_ACR_STATUS_1
+#define HDMI_TB_ENC_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define HDMI_TB_ENC_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//HDMI_TB_ENC_BUFFER_CONTROL
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_PREFILL_OVERRIDE_EN__SHIFT 0x0
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_RATE_BUFFER_PREFILL_OVERRIDE_EN__SHIFT 0x1
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_MAX_MIN_LEVEL_RESET__SHIFT 0x4
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_PREFILL_OVERRIDE_LEVEL__SHIFT 0x8
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_RATE_BUFFER_PREFILL_OVERRIDE_LEVEL__SHIFT 0x18
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_PREFILL_OVERRIDE_EN_MASK 0x00000001L
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_RATE_BUFFER_PREFILL_OVERRIDE_EN_MASK 0x00000002L
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_MAX_MIN_LEVEL_RESET_MASK 0x00000010L
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_BORROWBUFFER_PREFILL_OVERRIDE_LEVEL_MASK 0x0000FF00L
+#define HDMI_TB_ENC_BUFFER_CONTROL__HDMI_RATE_BUFFER_PREFILL_OVERRIDE_LEVEL_MASK 0x1F000000L
+//HDMI_TB_ENC_MEM_CTRL
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_DIS__SHIFT 0x0
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_FORCE__SHIFT 0x1
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_STATE__SHIFT 0x4
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0x8
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_DIS_MASK 0x00000001L
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_FORCE_MASK 0x00000006L
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_PWR_STATE_MASK 0x00000030L
+#define HDMI_TB_ENC_MEM_CTRL__BORROWBUFFER_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00000300L
+//HDMI_TB_ENC_METADATA_PACKET_CONTROL
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define HDMI_TB_ENC_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//HDMI_TB_ENC_H_ACTIVE_BLANK
+#define HDMI_TB_ENC_H_ACTIVE_BLANK__HDMI_H_ACTIVE__SHIFT 0x0
+#define HDMI_TB_ENC_H_ACTIVE_BLANK__HDMI_H_BLANK__SHIFT 0x10
+#define HDMI_TB_ENC_H_ACTIVE_BLANK__HDMI_H_ACTIVE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_H_ACTIVE_BLANK__HDMI_H_BLANK_MASK 0x7FFF0000L
+//HDMI_TB_ENC_HC_ACTIVE_BLANK
+#define HDMI_TB_ENC_HC_ACTIVE_BLANK__HDMI_HC_ACTIVE__SHIFT 0x0
+#define HDMI_TB_ENC_HC_ACTIVE_BLANK__HDMI_HC_BLANK__SHIFT 0x10
+#define HDMI_TB_ENC_HC_ACTIVE_BLANK__HDMI_HC_ACTIVE_MASK 0x00007FFFL
+#define HDMI_TB_ENC_HC_ACTIVE_BLANK__HDMI_HC_BLANK_MASK 0x7FFF0000L
+//HDMI_TB_ENC_CRC_CNTL
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_EN__SHIFT 0x0
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_CONT_EN__SHIFT 0x1
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_TYPE__SHIFT 0x8
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_SRC_SEL__SHIFT 0xa
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_INTERLACE_EN__SHIFT 0x10
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_INTERLACE_MODE__SHIFT 0x11
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_EN_MASK 0x00000001L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_CONT_EN_MASK 0x00000002L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_TYPE_MASK 0x00000300L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_SRC_SEL_MASK 0x00000C00L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_INTERLACE_EN_MASK 0x00010000L
+#define HDMI_TB_ENC_CRC_CNTL__HDMI_CRC_INTERLACE_MODE_MASK 0x00060000L
+//HDMI_TB_ENC_CRC_RESULT_0
+#define HDMI_TB_ENC_CRC_RESULT_0__CRC_TRIBYTE0__SHIFT 0x0
+#define HDMI_TB_ENC_CRC_RESULT_0__CRC_TRIBYTE1__SHIFT 0x10
+#define HDMI_TB_ENC_CRC_RESULT_0__CRC_TRIBYTE0_MASK 0x0000FFFFL
+#define HDMI_TB_ENC_CRC_RESULT_0__CRC_TRIBYTE1_MASK 0xFFFF0000L
+//HDMI_TB_ENC_ENCRYPTION_CONTROL
+#define HDMI_TB_ENC_ENCRYPTION_CONTROL__HDMI_EESS_ENABLE__SHIFT 0x0
+#define HDMI_TB_ENC_ENCRYPTION_CONTROL__HDMI_EESS_WHEN_AVMUTE__SHIFT 0x4
+#define HDMI_TB_ENC_ENCRYPTION_CONTROL__HDMI_EESS_ENABLE_MASK 0x00000001L
+#define HDMI_TB_ENC_ENCRYPTION_CONTROL__HDMI_EESS_WHEN_AVMUTE_MASK 0x00000010L
+//HDMI_TB_ENC_MODE
+#define HDMI_TB_ENC_MODE__HDMI_BORROW_MODE__SHIFT 0x0
+#define HDMI_TB_ENC_MODE__HDMI_SKIP_FIRST_HBLANK__SHIFT 0x8
+#define HDMI_TB_ENC_MODE__HDMI_BORROW_MODE_MASK 0x00000003L
+#define HDMI_TB_ENC_MODE__HDMI_SKIP_FIRST_HBLANK_MASK 0x00000100L
+//HDMI_TB_ENC_INPUT_FIFO_STATUS
+#define HDMI_TB_ENC_INPUT_FIFO_STATUS__INPUT_FIFO_ERROR__SHIFT 0x0
+#define HDMI_TB_ENC_INPUT_FIFO_STATUS__INPUT_FIFO_ERROR_MASK 0x00000001L
+//HDMI_TB_ENC_CRC_RESULT_1
+#define HDMI_TB_ENC_CRC_RESULT_1__CRC_TRIBYTE2__SHIFT 0x0
+#define HDMI_TB_ENC_CRC_RESULT_1__CRC_TRIBYTE2_MASK 0x0000FFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_dispdec
+//DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK__SHIFT 0xc
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x10
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK_MASK 0x00001000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32_MASK 0x00010000L
+//DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL
+#define DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL
+#define DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL__SHIFT 0x8
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC__SHIFT 0x10
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL_MASK 0x00001F00L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC_MASK 0x00010000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+//DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+//DP_STREAM_ENC0_DP_STREAM_ENC_SPARE
+#define DP_STREAM_ENC0_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE__SHIFT 0x0
+#define DP_STREAM_ENC0_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_apg_apg_dispdec
+//APG0_APG_CONTROL
+#define APG0_APG_CONTROL__APG_RESET__SHIFT 0x1
+#define APG0_APG_CONTROL__APG_RESET_DONE__SHIFT 0x2
+#define APG0_APG_CONTROL__APG_RESET_MASK 0x00000002L
+#define APG0_APG_CONTROL__APG_RESET_DONE_MASK 0x00000004L
+//APG0_APG_CONTROL2
+#define APG0_APG_CONTROL2__APG_ENABLE__SHIFT 0x0
+#define APG0_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID__SHIFT 0x8
+#define APG0_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x18
+#define APG0_APG_CONTROL2__APG_ENABLE_MASK 0x00000001L
+#define APG0_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID_MASK 0x0000FF00L
+#define APG0_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x01000000L
+//APG0_APG_DBG_GEN_CONTROL
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE__SHIFT 0x0
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET__SHIFT 0x1
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE_MASK 0x00000001L
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET_MASK 0x00000002L
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define APG0_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//APG0_APG_PACKET_CONTROL
+#define APG0_APG_PACKET_CONTROL__APG_ACP_SOURCE__SHIFT 0x1
+#define APG0_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE__SHIFT 0x2
+#define APG0_APG_PACKET_CONTROL__APG_ACP_SOURCE_MASK 0x00000002L
+#define APG0_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE_MASK 0x00000004L
+//APG0_APG_AUDIO_CRC_CONTROL
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN__SHIFT 0x0
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT__SHIFT 0x4
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL__SHIFT 0xd
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT__SHIFT 0x10
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN_MASK 0x00000001L
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT_MASK 0x00000010L
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL_MASK 0x0000E000L
+#define APG0_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//APG0_APG_AUDIO_CRC_CONTROL2
+#define APG0_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT__SHIFT 0x0
+#define APG0_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT_MASK 0x0000FFFFL
+//APG0_APG_AUDIO_CRC_RESULT
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE__SHIFT 0x0
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR__SHIFT 0x8
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC__SHIFT 0x10
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_MASK 0x00000001L
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR_MASK 0x00000100L
+#define APG0_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_MASK 0xFFFF0000L
+//APG0_APG_STATUS
+#define APG0_APG_STATUS__APG_AUDIO_ENABLE__SHIFT 0x4
+#define APG0_APG_STATUS__APG_HBR_ENABLE__SHIFT 0x8
+#define APG0_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS__SHIFT 0x18
+#define APG0_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR__SHIFT 0x19
+#define APG0_APG_STATUS__APG_AUDIO_ENABLE_MASK 0x00000010L
+#define APG0_APG_STATUS__APG_HBR_ENABLE_MASK 0x00000100L
+#define APG0_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_MASK 0x01000000L
+#define APG0_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR_MASK 0x02000000L
+//APG0_APG_STATUS2
+#define APG0_APG_STATUS2__APG_OUTPUT_ACTIVE__SHIFT 0x0
+#define APG0_APG_STATUS2__APG_OUTPUT_ACTIVE_MASK 0x00000001L
+//APG0_APG_MEM_PWR
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_DIS__SHIFT 0x0
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_FORCE__SHIFT 0x4
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_STATE__SHIFT 0x8
+#define APG0_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0xc
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_DIS_MASK 0x00000001L
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_FORCE_MASK 0x00000030L
+#define APG0_APG_MEM_PWR__APG_MEM_PWR_STATE_MASK 0x00000300L
+#define APG0_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00003000L
+//APG0_APG_SPARE
+#define APG0_APG_SPARE__APG_SPARE__SHIFT 0x0
+#define APG0_APG_SPARE__APG_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_dme_dme_dispdec
+//DME6_DME_CONTROL
+#define DME6_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME6_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME6_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME6_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME6_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME6_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME6_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME6_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME6_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME6_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME6_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME6_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME6_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME6_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME6_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME6_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME6_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME6_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME6_DME_MEMORY_CONTROL
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME6_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc0_vpg_vpg_dispdec
+//VPG6_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG6_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG6_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG6_VPG_GENERIC_PACKET_DATA
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG6_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG6_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG6_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG6_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG6_VPG_GENERIC_STATUS
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG6_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG6_VPG_MEM_PWR
+#define VPG6_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG6_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG6_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG6_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG6_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG6_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG6_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG6_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG6_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG6_VPG_ISRC1_2_DATA
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG6_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG6_VPG_MPEG_INFO0
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG6_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG6_VPG_MPEG_INFO1
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG6_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc0_dispdec
+//DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE_MASK 0x00000100L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS__SHIFT 0xc
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS_MASK 0x00001000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING_MASK 0x00000030L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH_MASK 0x00000300L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA0__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA1__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA2__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA3__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA4__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA5__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA6__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA7__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA8__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH_MASK 0x0000FFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE_MASK 0x00000100L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE__SHIFT 0x2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE__SHIFT 0x3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY__SHIFT 0x5
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE__SHIFT 0x1c
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE_MASK 0x00000004L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE_MASK 0x00000008L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY_MASK 0x00000020L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER_MASK 0x00003F00L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_MASK 0x10000000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0xc
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x14
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x000003F0L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x0003F000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x03F00000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING__SHIFT 0xc
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING_MASK 0x00001000L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER_MASK 0x00000030L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS_MASK 0x00000100L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK_MASK 0x00000010L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT_MASK 0x0000FFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET_MASK 0x00000010L
+//DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE__SHIFT 0x4
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS__SHIFT 0x8
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE__SHIFT 0xc
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00000003L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE_MASK 0x00000030L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS_MASK 0x00000100L
+#define DP_SYM32_ENC0_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE_MASK 0x00003000L
+//DP_SYM32_ENC0_DP_SYM32_ENC_SPARE
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT2
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT3
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_dispdec
+//DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK__SHIFT 0xc
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x10
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK_MASK 0x00001000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32_MASK 0x00010000L
+//DP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL
+#define DP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL
+#define DP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL__SHIFT 0x8
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC__SHIFT 0x10
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL_MASK 0x00001F00L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC_MASK 0x00010000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+//DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define DP_STREAM_ENC1_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+//DP_STREAM_ENC1_DP_STREAM_ENC_SPARE
+#define DP_STREAM_ENC1_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE__SHIFT 0x0
+#define DP_STREAM_ENC1_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_apg_apg_dispdec
+//APG1_APG_CONTROL
+#define APG1_APG_CONTROL__APG_RESET__SHIFT 0x1
+#define APG1_APG_CONTROL__APG_RESET_DONE__SHIFT 0x2
+#define APG1_APG_CONTROL__APG_RESET_MASK 0x00000002L
+#define APG1_APG_CONTROL__APG_RESET_DONE_MASK 0x00000004L
+//APG1_APG_CONTROL2
+#define APG1_APG_CONTROL2__APG_ENABLE__SHIFT 0x0
+#define APG1_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID__SHIFT 0x8
+#define APG1_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x18
+#define APG1_APG_CONTROL2__APG_ENABLE_MASK 0x00000001L
+#define APG1_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID_MASK 0x0000FF00L
+#define APG1_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x01000000L
+//APG1_APG_DBG_GEN_CONTROL
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE__SHIFT 0x0
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET__SHIFT 0x1
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE_MASK 0x00000001L
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET_MASK 0x00000002L
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define APG1_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//APG1_APG_PACKET_CONTROL
+#define APG1_APG_PACKET_CONTROL__APG_ACP_SOURCE__SHIFT 0x1
+#define APG1_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE__SHIFT 0x2
+#define APG1_APG_PACKET_CONTROL__APG_ACP_SOURCE_MASK 0x00000002L
+#define APG1_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE_MASK 0x00000004L
+//APG1_APG_AUDIO_CRC_CONTROL
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN__SHIFT 0x0
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT__SHIFT 0x4
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL__SHIFT 0xd
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT__SHIFT 0x10
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN_MASK 0x00000001L
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT_MASK 0x00000010L
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL_MASK 0x0000E000L
+#define APG1_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//APG1_APG_AUDIO_CRC_CONTROL2
+#define APG1_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT__SHIFT 0x0
+#define APG1_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT_MASK 0x0000FFFFL
+//APG1_APG_AUDIO_CRC_RESULT
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE__SHIFT 0x0
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR__SHIFT 0x8
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC__SHIFT 0x10
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_MASK 0x00000001L
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR_MASK 0x00000100L
+#define APG1_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_MASK 0xFFFF0000L
+//APG1_APG_STATUS
+#define APG1_APG_STATUS__APG_AUDIO_ENABLE__SHIFT 0x4
+#define APG1_APG_STATUS__APG_HBR_ENABLE__SHIFT 0x8
+#define APG1_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS__SHIFT 0x18
+#define APG1_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR__SHIFT 0x19
+#define APG1_APG_STATUS__APG_AUDIO_ENABLE_MASK 0x00000010L
+#define APG1_APG_STATUS__APG_HBR_ENABLE_MASK 0x00000100L
+#define APG1_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_MASK 0x01000000L
+#define APG1_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR_MASK 0x02000000L
+//APG1_APG_STATUS2
+#define APG1_APG_STATUS2__APG_OUTPUT_ACTIVE__SHIFT 0x0
+#define APG1_APG_STATUS2__APG_OUTPUT_ACTIVE_MASK 0x00000001L
+//APG1_APG_MEM_PWR
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_DIS__SHIFT 0x0
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_FORCE__SHIFT 0x4
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_STATE__SHIFT 0x8
+#define APG1_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0xc
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_DIS_MASK 0x00000001L
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_FORCE_MASK 0x00000030L
+#define APG1_APG_MEM_PWR__APG_MEM_PWR_STATE_MASK 0x00000300L
+#define APG1_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00003000L
+//APG1_APG_SPARE
+#define APG1_APG_SPARE__APG_SPARE__SHIFT 0x0
+#define APG1_APG_SPARE__APG_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_dme_dme_dispdec
+//DME7_DME_CONTROL
+#define DME7_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME7_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME7_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME7_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME7_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME7_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME7_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME7_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME7_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME7_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME7_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME7_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME7_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME7_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME7_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME7_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME7_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME7_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME7_DME_MEMORY_CONTROL
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME7_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc1_vpg_vpg_dispdec
+//VPG7_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG7_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG7_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG7_VPG_GENERIC_PACKET_DATA
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG7_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG7_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG7_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG7_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG7_VPG_GENERIC_STATUS
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG7_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG7_VPG_MEM_PWR
+#define VPG7_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG7_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG7_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG7_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG7_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG7_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG7_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG7_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG7_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG7_VPG_ISRC1_2_DATA
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG7_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG7_VPG_MPEG_INFO0
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG7_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG7_VPG_MPEG_INFO1
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG7_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc1_dispdec
+//DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE_MASK 0x00000100L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS__SHIFT 0xc
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS_MASK 0x00001000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING_MASK 0x00000030L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH_MASK 0x00000300L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA0__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA1__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA2__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA3__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA4__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA5__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA6__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA7__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA8__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH_MASK 0x0000FFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE_MASK 0x00000100L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE__SHIFT 0x2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE__SHIFT 0x3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY__SHIFT 0x5
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE__SHIFT 0x1c
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE_MASK 0x00000004L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE_MASK 0x00000008L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY_MASK 0x00000020L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER_MASK 0x00003F00L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_MASK 0x10000000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0xc
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x14
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x000003F0L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x0003F000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x03F00000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING__SHIFT 0xc
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING_MASK 0x00001000L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER_MASK 0x00000030L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS_MASK 0x00000100L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK_MASK 0x00000010L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT_MASK 0x0000FFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET_MASK 0x00000010L
+//DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE__SHIFT 0x4
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS__SHIFT 0x8
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE__SHIFT 0xc
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00000003L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE_MASK 0x00000030L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS_MASK 0x00000100L
+#define DP_SYM32_ENC1_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE_MASK 0x00003000L
+//DP_SYM32_ENC1_DP_SYM32_ENC_SPARE
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT2
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT3
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC1_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_dispdec
+//DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK__SHIFT 0xc
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x10
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK_MASK 0x00001000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32_MASK 0x00010000L
+//DP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL
+#define DP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL
+#define DP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL__SHIFT 0x8
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC__SHIFT 0x10
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL_MASK 0x00001F00L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC_MASK 0x00010000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+//DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define DP_STREAM_ENC2_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+//DP_STREAM_ENC2_DP_STREAM_ENC_SPARE
+#define DP_STREAM_ENC2_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE__SHIFT 0x0
+#define DP_STREAM_ENC2_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_apg_apg_dispdec
+//APG2_APG_CONTROL
+#define APG2_APG_CONTROL__APG_RESET__SHIFT 0x1
+#define APG2_APG_CONTROL__APG_RESET_DONE__SHIFT 0x2
+#define APG2_APG_CONTROL__APG_RESET_MASK 0x00000002L
+#define APG2_APG_CONTROL__APG_RESET_DONE_MASK 0x00000004L
+//APG2_APG_CONTROL2
+#define APG2_APG_CONTROL2__APG_ENABLE__SHIFT 0x0
+#define APG2_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID__SHIFT 0x8
+#define APG2_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x18
+#define APG2_APG_CONTROL2__APG_ENABLE_MASK 0x00000001L
+#define APG2_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID_MASK 0x0000FF00L
+#define APG2_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x01000000L
+//APG2_APG_DBG_GEN_CONTROL
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE__SHIFT 0x0
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET__SHIFT 0x1
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE_MASK 0x00000001L
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET_MASK 0x00000002L
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define APG2_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//APG2_APG_PACKET_CONTROL
+#define APG2_APG_PACKET_CONTROL__APG_ACP_SOURCE__SHIFT 0x1
+#define APG2_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE__SHIFT 0x2
+#define APG2_APG_PACKET_CONTROL__APG_ACP_SOURCE_MASK 0x00000002L
+#define APG2_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE_MASK 0x00000004L
+//APG2_APG_AUDIO_CRC_CONTROL
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN__SHIFT 0x0
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT__SHIFT 0x4
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL__SHIFT 0xd
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT__SHIFT 0x10
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN_MASK 0x00000001L
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT_MASK 0x00000010L
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL_MASK 0x0000E000L
+#define APG2_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//APG2_APG_AUDIO_CRC_CONTROL2
+#define APG2_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT__SHIFT 0x0
+#define APG2_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT_MASK 0x0000FFFFL
+//APG2_APG_AUDIO_CRC_RESULT
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE__SHIFT 0x0
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR__SHIFT 0x8
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC__SHIFT 0x10
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_MASK 0x00000001L
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR_MASK 0x00000100L
+#define APG2_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_MASK 0xFFFF0000L
+//APG2_APG_STATUS
+#define APG2_APG_STATUS__APG_AUDIO_ENABLE__SHIFT 0x4
+#define APG2_APG_STATUS__APG_HBR_ENABLE__SHIFT 0x8
+#define APG2_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS__SHIFT 0x18
+#define APG2_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR__SHIFT 0x19
+#define APG2_APG_STATUS__APG_AUDIO_ENABLE_MASK 0x00000010L
+#define APG2_APG_STATUS__APG_HBR_ENABLE_MASK 0x00000100L
+#define APG2_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_MASK 0x01000000L
+#define APG2_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR_MASK 0x02000000L
+//APG2_APG_STATUS2
+#define APG2_APG_STATUS2__APG_OUTPUT_ACTIVE__SHIFT 0x0
+#define APG2_APG_STATUS2__APG_OUTPUT_ACTIVE_MASK 0x00000001L
+//APG2_APG_MEM_PWR
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_DIS__SHIFT 0x0
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_FORCE__SHIFT 0x4
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_STATE__SHIFT 0x8
+#define APG2_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0xc
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_DIS_MASK 0x00000001L
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_FORCE_MASK 0x00000030L
+#define APG2_APG_MEM_PWR__APG_MEM_PWR_STATE_MASK 0x00000300L
+#define APG2_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00003000L
+//APG2_APG_SPARE
+#define APG2_APG_SPARE__APG_SPARE__SHIFT 0x0
+#define APG2_APG_SPARE__APG_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_dme_dme_dispdec
+//DME8_DME_CONTROL
+#define DME8_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME8_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME8_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME8_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME8_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME8_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME8_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME8_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME8_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME8_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME8_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME8_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME8_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME8_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME8_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME8_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME8_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME8_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME8_DME_MEMORY_CONTROL
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME8_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc2_vpg_vpg_dispdec
+//VPG8_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG8_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG8_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG8_VPG_GENERIC_PACKET_DATA
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG8_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG8_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG8_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG8_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG8_VPG_GENERIC_STATUS
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG8_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG8_VPG_MEM_PWR
+#define VPG8_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG8_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG8_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG8_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG8_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG8_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG8_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG8_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG8_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG8_VPG_ISRC1_2_DATA
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG8_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG8_VPG_MPEG_INFO0
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG8_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG8_VPG_MPEG_INFO1
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG8_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc2_dispdec
+//DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE_MASK 0x00000100L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS__SHIFT 0xc
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS_MASK 0x00001000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING_MASK 0x00000030L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH_MASK 0x00000300L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA0__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA1__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA2__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA3__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA4__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA5__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA6__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA7__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA8__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH_MASK 0x0000FFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE_MASK 0x00000100L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE__SHIFT 0x2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE__SHIFT 0x3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY__SHIFT 0x5
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE__SHIFT 0x1c
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE_MASK 0x00000004L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE_MASK 0x00000008L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY_MASK 0x00000020L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER_MASK 0x00003F00L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_MASK 0x10000000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0xc
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x14
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x000003F0L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x0003F000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x03F00000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING__SHIFT 0xc
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING_MASK 0x00001000L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER_MASK 0x00000030L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS_MASK 0x00000100L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK_MASK 0x00000010L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT_MASK 0x0000FFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET_MASK 0x00000010L
+//DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE__SHIFT 0x4
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS__SHIFT 0x8
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE__SHIFT 0xc
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00000003L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE_MASK 0x00000030L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS_MASK 0x00000100L
+#define DP_SYM32_ENC2_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE_MASK 0x00003000L
+//DP_SYM32_ENC2_DP_SYM32_ENC_SPARE
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT2
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT3
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC2_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_dispdec
+//DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK__SHIFT 0x4
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK__SHIFT 0x8
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK__SHIFT 0xc
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x10
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DISPCLK_MASK 0x00000010L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SOCCLK_MASK 0x00000100L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_DPSTREAMCLK_MASK 0x00001000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_CONTROL__DP_STREAM_ENC_CLOCK_ON_SYMCLK32_MASK 0x00010000L
+//DP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL
+#define DP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_INPUT_MUX_CONTROL__DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL
+#define DP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_AUDIO_CONTROL__DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL_MASK 0x00000007L
+//DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET__SHIFT 0x4
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL__SHIFT 0x8
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC__SHIFT 0x10
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE__SHIFT 0x14
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE__SHIFT 0x18
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR__SHIFT 0x1c
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ENABLE_MASK 0x00000001L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_MASK 0x00000010L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_START_LEVEL_MASK 0x00001F00L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_READ_CLOCK_SRC_MASK 0x00010000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_RESET_DONE_MASK 0x00100000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_VIDEO_STREAM_ACTIVE_MASK 0x01000000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0__FIFO_ERROR_MASK 0x30000000L
+//DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x2
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL__SHIFT 0x4
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL__SHIFT 0xc
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL__SHIFT 0x18
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED__SHIFT 0x1f
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000001L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECAL_AVERAGE_MASK 0x00000002L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_FORCE_RECOMP_MINMAX_MASK 0x00000004L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_OVERWRITE_LEVEL_MASK 0x000003F0L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MINIMUM_LEVEL_MASK 0x0000F000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CAL_AVERAGE_LEVEL_MASK 0x3F000000L
+#define DP_STREAM_ENC3_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL1__FIFO_CALIBRATED_MASK 0x80000000L
+//DP_STREAM_ENC3_DP_STREAM_ENC_SPARE
+#define DP_STREAM_ENC3_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE__SHIFT 0x0
+#define DP_STREAM_ENC3_DP_STREAM_ENC_SPARE__DP_STREAM_ENC_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_apg_apg_dispdec
+//APG3_APG_CONTROL
+#define APG3_APG_CONTROL__APG_RESET__SHIFT 0x1
+#define APG3_APG_CONTROL__APG_RESET_DONE__SHIFT 0x2
+#define APG3_APG_CONTROL__APG_RESET_MASK 0x00000002L
+#define APG3_APG_CONTROL__APG_RESET_DONE_MASK 0x00000004L
+//APG3_APG_CONTROL2
+#define APG3_APG_CONTROL2__APG_ENABLE__SHIFT 0x0
+#define APG3_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID__SHIFT 0x8
+#define APG3_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x18
+#define APG3_APG_CONTROL2__APG_ENABLE_MASK 0x00000001L
+#define APG3_APG_CONTROL2__APG_DP_AUDIO_STREAM_ID_MASK 0x0000FF00L
+#define APG3_APG_CONTROL2__APG_DP_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x01000000L
+//APG3_APG_DBG_GEN_CONTROL
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE__SHIFT 0x0
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET__SHIFT 0x1
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_GEN_ENABLE_MASK 0x00000001L
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_GEN_RESET_MASK 0x00000002L
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define APG3_APG_DBG_GEN_CONTROL__APG_DBG_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//APG3_APG_PACKET_CONTROL
+#define APG3_APG_PACKET_CONTROL__APG_ACP_SOURCE__SHIFT 0x1
+#define APG3_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE__SHIFT 0x2
+#define APG3_APG_PACKET_CONTROL__APG_ACP_SOURCE_MASK 0x00000002L
+#define APG3_APG_PACKET_CONTROL__APG_AUDIO_INFO_SOURCE_MASK 0x00000004L
+//APG3_APG_AUDIO_CRC_CONTROL
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN__SHIFT 0x0
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT__SHIFT 0x4
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL__SHIFT 0xd
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT__SHIFT 0x10
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_EN_MASK 0x00000001L
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CONT_MASK 0x00000010L
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_CH_SEL_MASK 0x0000E000L
+#define APG3_APG_AUDIO_CRC_CONTROL__APG_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//APG3_APG_AUDIO_CRC_CONTROL2
+#define APG3_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT__SHIFT 0x0
+#define APG3_APG_AUDIO_CRC_CONTROL2__APG_AUDIO_CRC_COUNT_FORCE_DEFAULT_MASK 0x0000FFFFL
+//APG3_APG_AUDIO_CRC_RESULT
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE__SHIFT 0x0
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR__SHIFT 0x8
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC__SHIFT 0x10
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_MASK 0x00000001L
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_DONE_CLEAR_MASK 0x00000100L
+#define APG3_APG_AUDIO_CRC_RESULT__APG_AUDIO_CRC_MASK 0xFFFF0000L
+//APG3_APG_STATUS
+#define APG3_APG_STATUS__APG_AUDIO_ENABLE__SHIFT 0x4
+#define APG3_APG_STATUS__APG_HBR_ENABLE__SHIFT 0x8
+#define APG3_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS__SHIFT 0x18
+#define APG3_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR__SHIFT 0x19
+#define APG3_APG_STATUS__APG_AUDIO_ENABLE_MASK 0x00000010L
+#define APG3_APG_STATUS__APG_HBR_ENABLE_MASK 0x00000100L
+#define APG3_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_MASK 0x01000000L
+#define APG3_APG_STATUS__APG_AUDIO_FIFO_OVERFLOW_STATUS_CLEAR_MASK 0x02000000L
+//APG3_APG_STATUS2
+#define APG3_APG_STATUS2__APG_OUTPUT_ACTIVE__SHIFT 0x0
+#define APG3_APG_STATUS2__APG_OUTPUT_ACTIVE_MASK 0x00000001L
+//APG3_APG_MEM_PWR
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_DIS__SHIFT 0x0
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_FORCE__SHIFT 0x4
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_STATE__SHIFT 0x8
+#define APG3_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0xc
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_DIS_MASK 0x00000001L
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_FORCE_MASK 0x00000030L
+#define APG3_APG_MEM_PWR__APG_MEM_PWR_STATE_MASK 0x00000300L
+#define APG3_APG_MEM_PWR__APG_MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00003000L
+//APG3_APG_SPARE
+#define APG3_APG_SPARE__APG_SPARE__SHIFT 0x0
+#define APG3_APG_SPARE__APG_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_dme_dme_dispdec
+//DME9_DME_CONTROL
+#define DME9_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DME9_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DME9_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DME9_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DME9_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DME9_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DME9_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DME9_DME_CONTROL__METADATA_TRANSMISSION_MISSED__SHIFT 0x18
+#define DME9_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR__SHIFT 0x19
+#define DME9_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DME9_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DME9_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DME9_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DME9_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DME9_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DME9_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+#define DME9_DME_CONTROL__METADATA_TRANSMISSION_MISSED_MASK 0x01000000L
+#define DME9_DME_CONTROL__METADATA_TRANSMISSION_MISSED_CLR_MASK 0x02000000L
+//DME9_DME_MEMORY_CONTROL
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE__SHIFT 0x0
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS__SHIFT 0x4
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE__SHIFT 0x8
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_FORCE_MASK 0x00000003L
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_DIS_MASK 0x00000010L
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
+#define DME9_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+
+
+// addressBlock: dce_dc_hpo_dp_stream_enc3_vpg_vpg_dispdec
+//VPG9_VPG_GENERIC_PACKET_ACCESS_CTRL
+#define VPG9_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX__SHIFT 0x0
+#define VPG9_VPG_GENERIC_PACKET_ACCESS_CTRL__VPG_GENERIC_DATA_INDEX_MASK 0x000000FFL
+//VPG9_VPG_GENERIC_PACKET_DATA
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0__SHIFT 0x0
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1__SHIFT 0x8
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2__SHIFT 0x10
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3__SHIFT 0x18
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG9_VPG_GENERIC_PACKET_DATA__VPG_GENERIC_DATA_BYTE3_MASK 0xFF000000L
+//VPG9_VPG_GSP_FRAME_UPDATE_CTRL
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE__SHIFT 0x1
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE__SHIFT 0x2
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE__SHIFT 0x3
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE__SHIFT 0x4
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE__SHIFT 0x5
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE__SHIFT 0x6
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE__SHIFT 0x7
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE__SHIFT 0x8
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE__SHIFT 0x9
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE__SHIFT 0xa
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE__SHIFT 0xb
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE__SHIFT 0xc
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE__SHIFT 0xd
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE__SHIFT 0xe
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x10
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x12
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0x13
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x14
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x16
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x17
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING__SHIFT 0x18
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING__SHIFT 0x1a
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING__SHIFT 0x1b
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING__SHIFT 0x1c
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING__SHIFT 0x1e
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_MASK 0x00000002L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_MASK 0x00000004L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_MASK 0x00000008L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_MASK 0x00000010L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_MASK 0x00000020L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_MASK 0x00000040L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_MASK 0x00000080L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_MASK 0x00000100L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_MASK 0x00000200L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_MASK 0x00000400L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_MASK 0x00000800L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_MASK 0x00001000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_MASK 0x00002000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_MASK 0x00004000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00010000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00040000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00080000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00100000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x00400000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x00800000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC8_FRAME_UPDATE_PENDING_MASK 0x01000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC9_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC10_FRAME_UPDATE_PENDING_MASK 0x04000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC11_FRAME_UPDATE_PENDING_MASK 0x08000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC12_FRAME_UPDATE_PENDING_MASK 0x10000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC13_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define VPG9_VPG_GSP_FRAME_UPDATE_CTRL__VPG_GENERIC14_FRAME_UPDATE_PENDING_MASK 0x40000000L
+//VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x0
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x1
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0x2
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0x3
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x4
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x5
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x6
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x7
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE__SHIFT 0x8
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE__SHIFT 0x9
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE__SHIFT 0xa
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE__SHIFT 0xb
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE__SHIFT 0xc
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE__SHIFT 0xd
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE__SHIFT 0xe
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x10
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x11
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0x12
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x14
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x15
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x16
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING__SHIFT 0x18
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING__SHIFT 0x19
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1a
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1c
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1d
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1e
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000001L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000002L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00000008L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00000010L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00000020L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_MASK 0x00000080L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_MASK 0x00000100L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_MASK 0x00000200L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_MASK 0x00000800L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_MASK 0x00001000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_MASK 0x00002000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00010000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00020000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00040000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00100000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00200000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x00400000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC8_IMMEDIATE_UPDATE_PENDING_MASK 0x01000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC9_IMMEDIATE_UPDATE_PENDING_MASK 0x02000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC10_IMMEDIATE_UPDATE_PENDING_MASK 0x04000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC11_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC12_IMMEDIATE_UPDATE_PENDING_MASK 0x10000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC13_IMMEDIATE_UPDATE_PENDING_MASK 0x20000000L
+#define VPG9_VPG_GSP_IMMEDIATE_UPDATE_CTRL__VPG_GENERIC14_IMMEDIATE_UPDATE_PENDING_MASK 0x40000000L
+//VPG9_VPG_GENERIC_STATUS
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS__SHIFT 0x0
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED__SHIFT 0x1
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR__SHIFT 0x4
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_LOCK_STATUS_MASK 0x00000001L
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_OCCURED_MASK 0x00000002L
+#define VPG9_VPG_GENERIC_STATUS__VPG_GENERIC_CONFLICT_CLR_MASK 0x00000010L
+//VPG9_VPG_MEM_PWR
+#define VPG9_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS__SHIFT 0x0
+#define VPG9_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE__SHIFT 0x4
+#define VPG9_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE__SHIFT 0x8
+#define VPG9_VPG_MEM_PWR__VPG_GSP_MEM_LIGHT_SLEEP_DIS_MASK 0x00000001L
+#define VPG9_VPG_MEM_PWR__VPG_GSP_LIGHT_SLEEP_FORCE_MASK 0x00000010L
+#define VPG9_VPG_MEM_PWR__VPG_GSP_MEM_PWR_STATE_MASK 0x00000100L
+//VPG9_VPG_ISRC1_2_ACCESS_CTRL
+#define VPG9_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX__SHIFT 0x0
+#define VPG9_VPG_ISRC1_2_ACCESS_CTRL__VPG_ISRC1_2_DATA_INDEX_MASK 0x0000000FL
+//VPG9_VPG_ISRC1_2_DATA
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0__SHIFT 0x0
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1__SHIFT 0x8
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2__SHIFT 0x10
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3__SHIFT 0x18
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE0_MASK 0x000000FFL
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE1_MASK 0x0000FF00L
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE2_MASK 0x00FF0000L
+#define VPG9_VPG_ISRC1_2_DATA__VPG_ISRC_DATA_BYTE3_MASK 0xFF000000L
+//VPG9_VPG_MPEG_INFO0
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0__SHIFT 0x8
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1__SHIFT 0x10
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2__SHIFT 0x18
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define VPG9_VPG_MPEG_INFO0__VPG_MPEG_INFO_MB2_MASK 0xFF000000L
+//VPG9_VPG_MPEG_INFO1
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3__SHIFT 0x0
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF__SHIFT 0x8
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR__SHIFT 0xc
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE__SHIFT 0x10
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_MB3_MASK 0x000000FFL
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_MF_MASK 0x00000300L
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_FR_MASK 0x00001000L
+#define VPG9_VPG_MPEG_INFO1__VPG_MPEG_INFO_UPDATE_MASK 0x00010000L
+
+
+// addressBlock: dce_dc_hpo_dp_sym32_enc3_dispdec
+//DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_CONTROL__DP_SYM32_ENC_RESET_DONE_MASK 0x00000100L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS__SHIFT 0xc
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_RESET_DONE_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_FIFO_CONTROL__PIXEL_TO_SYMBOL_FIFO_OVERFLOW_STATUS_MASK 0x00001000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL__MSA_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL__PIXEL_FORMAT_DOUBLE_BUFFER_PENDING_MASK 0x00000010L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__PIXEL_ENCODING_TYPE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_PIXEL_ENCODING_MASK 0x00000030L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PIXEL_FORMAT__UNCOMPRESSED_COMPONENT_DEPTH_MASK 0x00000300L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA0__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA1__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA2__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA3__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA4__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA5__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA6__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA7__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8__MSA_DATA__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA8__MSA_DATA_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_HBLANK_CONTROL__HBLANK_MINIMUM_SYMBOL_WIDTH_MASK 0x0000FFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL0__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL1__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL2__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL3__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL4__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL5__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL6__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL7__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL8__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL9__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL10__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL11__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL12__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL13__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE__SHIFT 0x7
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING__SHIFT 0x9
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING__SHIFT 0xa
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_IDLE_CONTINUOUS_TRANSMISSION_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_SEND_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_ONE_SHOT_POSITION_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_PAYLOAD_SIZE_MASK 0x00000060L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_SOF_REFERENCE_MASK 0x00000080L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_DEADLINE_MISSED_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRIGGER_TRANSMISSION_PENDING_MASK 0x00000200L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_DOUBLE_BUFFER_PENDING_MASK 0x00000400L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_GSP_CONTROL14__GSP_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__SDP_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__GSP0_PRIORITY_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_CONTROL__SDP_CRC16_ENABLE_MASK 0x00000100L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE__SHIFT 0x1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE__SHIFT 0x2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE__SHIFT 0x3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY__SHIFT 0x5
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE__SHIFT 0x1c
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_ENABLE_MASK 0x00000002L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AIP_ENABLE_MASK 0x00000004L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ACM_ENABLE_MASK 0x00000008L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ISRC_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ASP_PRIORITY_MASK 0x00000020L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__ATP_VERSION_NUMBER_MASK 0x00003F00L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_MASK 0x10000000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL0__AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0xc
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT__SHIFT 0x14
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_2_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x000003F0L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_8_CHANNEL_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x0003F000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_AUDIO_CONTROL1__ASP_CONCATENATION_HBR_LAYOUT_MAX_SAMPLE_COUNT_MASK 0x03F00000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING__SHIFT 0xc
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_SOF_REFERENCE_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_DOUBLE_BUFFER_PENDING_MASK 0x00001000L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL__METADATA_PACKET_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_ENABLE_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_MSA_CONTROL__MSA_TRANSMISSION_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER__SHIFT 0x10
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_VBID_CONTROL__VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER_MASK 0xFFFF0000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_DISABLE_DEFER_MASK 0x00000030L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_STREAM_CONTROL__VID_STREAM_STATUS_MASK 0x00000100L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_PANEL_REPLAY_CONTROL__PANEL_REPLAY_TUNNELING_OPTIMIZATION_DOUBLE_BUFFER_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_CONTROL__CRC_CONT_MODE_ENABLE_MASK 0x00000010L
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT0__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT1__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_STATUS__CRC_VALID_ACK_MASK 0x00000010L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_STATUS__BS_COUNT_MASK 0x0000FFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_ENABLE_MASK 0x00000001L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SYMBOL_COUNT_CONTROL__BS_COUNT_RESET_MASK 0x00000010L
+//DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE__SHIFT 0x4
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS__SHIFT 0x8
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE__SHIFT 0xc
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_DEFAULT_LOW_POWER_STATE_MASK 0x00000003L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_FORCE_MASK 0x00000030L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_DIS_MASK 0x00000100L
+#define DP_SYM32_ENC3_DP_SYM32_ENC_MEM_POWER_CONTROL__MEM_PWR_STATE_MASK 0x00003000L
+//DP_SYM32_ENC3_DP_SYM32_ENC_SPARE
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_SPARE__DP_SYM32_ENC_SPARE_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT2
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT2__CRC_RESULT_MASK 0xFFFFFFFFL
+//DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT3
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT__SHIFT 0x0
+#define DP_SYM32_ENC3_DP_SYM32_ENC_VID_CRC_RESULT3__CRC_RESULT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_link_enc0_dispdec
+//DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL
+#define DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x4
+#define DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_ON_SYMCLK32_MASK 0x00000010L
+//DP_LINK_ENC0_DP_LINK_ENC_SPARE
+#define DP_LINK_ENC0_DP_LINK_ENC_SPARE__DP_LINK_ENC_SPARE__SHIFT 0x0
+#define DP_LINK_ENC0_DP_LINK_ENC_SPARE__DP_LINK_ENC_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_dphy_sym320_dispdec
+//DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__DPHY_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__DPHY_RESET__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__PRECODER_ENABLE__SHIFT 0x2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__MODE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__NUM_LANES__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__SHORT_LAST_TPS2_PERIOD__SHIFT 0xc
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__DPHY_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__DPHY_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__PRECODER_ENABLE_MASK 0x00000004L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__MODE_MASK 0x00000030L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__NUM_LANES_MASK 0x00000300L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL__SHORT_LAST_TPS2_PERIOD_MASK 0x00001000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__STATUS__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__RESET_STATUS__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__CURRENT_MODE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__ENCRYPTION_ENABLED__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__RATE_UPDATE_PENDING__SHIFT 0xc
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__SAT_UPDATE_PENDING__SHIFT 0x10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__STATUS_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__RESET_STATUS_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__CURRENT_MODE_MASK 0x00000030L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__ENCRYPTION_ENABLED_MASK 0x00000100L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__RATE_UPDATE_PENDING_MASK 0x00001000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS__SAT_UPDATE_PENDING_MASK 0x00030000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE__SAT_UPDATE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE__SAT_UPDATE_MASK 0x00000003L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC1__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC2__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC3__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT0__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL0__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT1__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL1__SHIFT 0xc
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT2__SHIFT 0x10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL2__SHIFT 0x14
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT3__SHIFT 0x18
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL3__SHIFT 0x1c
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT0_MASK 0x00000007L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL0_MASK 0x00000070L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT1_MASK 0x00000700L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL1_MASK 0x00007000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT2_MASK 0x00070000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL2_MASK 0x00700000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT3_MASK 0x07000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL3_MASK 0x70000000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED0__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED1__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED2__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_PRBS_SEED3__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE__TP_SQ_PULSE_WIDTH__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE__TP_SQ_PULSE_WIDTH_MASK 0x000000FFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM1__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM2__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM3__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM4__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM5__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM6__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM7__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM8__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM9__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM10__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__TOTAL_SLOT_COUNT_ERROR__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__RATE_ERROR__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__VC_SAME_STREAM_SOURCE__SHIFT 0x2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__NO_ACT_ERROR__SHIFT 0x3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__UNEXPECT_MODE_TRANSITION__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__ILLEGAL_STREAM_SYMBOL__SHIFT 0x5
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__RATE_COUNTER_SATURATION__SHIFT 0x6
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__COUNTER_OVERFLOW__SHIFT 0x7
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__CIPHER_ERROR__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__TOTAL_SLOT_COUNT_ERROR_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__RATE_ERROR_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__VC_SAME_STREAM_SOURCE_MASK 0x00000004L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__NO_ACT_ERROR_MASK 0x00000008L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__UNEXPECT_MODE_TRANSITION_MASK 0x00000010L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__ILLEGAL_STREAM_SYMBOL_MASK 0x00000020L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__RATE_COUNTER_SATURATION_MASK 0x00000040L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__COUNTER_OVERFLOW_MASK 0x00000080L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_ERROR_STATUS__CIPHER_ERROR_MASK 0x00000100L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_TYPE__SHIFT 0x2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_SYMBOL__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_ENABLE__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_TYPE__SHIFT 0xa
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_SYMBOL__SHIFT 0xc
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_ENABLE__SHIFT 0x10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_TYPE__SHIFT 0x12
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_SYMBOL__SHIFT 0x14
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_ENABLE__SHIFT 0x18
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_TYPE__SHIFT 0x1a
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_SYMBOL__SHIFT 0x1c
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_ENABLE_MASK 0x00000003L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_TYPE_MASK 0x00000004L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_SYMBOL_MASK 0x000000F0L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_ENABLE_MASK 0x00000300L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_TYPE_MASK 0x00000400L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_SYMBOL_MASK 0x0000F000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_ENABLE_MASK 0x00030000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_TYPE_MASK 0x00040000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_SYMBOL_MASK 0x00F00000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_ENABLE_MASK 0x03000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_TYPE_MASK 0x04000000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_SYMBOL_MASK 0xF0000000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0__LLCP_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0__LLCP_COUNT_MASK 0x0000FFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1__CYCLE_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1__CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_RESET__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_ENABLE__SHIFT 0x2
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_RESET__SHIFT 0x3
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_ENABLE_MASK 0x00000004L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_RESET_MASK 0x00000008L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_RESET__SHIFT 0x1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_LANE_SOURCE__SHIFT 0x4
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_TAP_SOURCE__SHIFT 0x6
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_SCHEDULER_SOURCE__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_IGNORE_VCPF__SHIFT 0x10
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_START_EVENT__SHIFT 0x11
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_USE_NUM_SYMBOLS__SHIFT 0x14
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_END_EVENT__SHIFT 0x15
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_LANE_SOURCE_MASK 0x00000030L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_TAP_SOURCE_MASK 0x000000C0L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_SCHEDULER_SOURCE_MASK 0x00003F00L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_IGNORE_VCPF_MASK 0x00010000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_START_EVENT_MASK 0x000E0000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_USE_NUM_SYMBOLS_MASK 0x00100000L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG0__CRC_END_EVENT_MASK 0x00600000L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1__CRC_NUM_SYMBOLS__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_CONFIG1__CRC_NUM_SYMBOLS_MASK 0xFFFFFFFFL
+//DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS__CRC_DONE__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS__CRC_VALUE__SHIFT 0x8
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS__CRC_DONE_MASK 0x00000001L
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_STATUS__CRC_VALUE_MASK 0x00FFFF00L
+//DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT__CRC_SYMBOL_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM320_DP_DPHY_SYM32_CRC_COUNT__CRC_SYMBOL_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_link_enc1_dispdec
+//DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL
+#define DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_EN__SHIFT 0x0
+#define DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_ON_SYMCLK32__SHIFT 0x4
+#define DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_EN_MASK 0x00000001L
+#define DP_LINK_ENC1_DP_LINK_ENC_CLOCK_CONTROL__DP_LINK_ENC_CLOCK_ON_SYMCLK32_MASK 0x00000010L
+//DP_LINK_ENC1_DP_LINK_ENC_SPARE
+#define DP_LINK_ENC1_DP_LINK_ENC_SPARE__DP_LINK_ENC_SPARE__SHIFT 0x0
+#define DP_LINK_ENC1_DP_LINK_ENC_SPARE__DP_LINK_ENC_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hpo_dp_dphy_sym321_dispdec
+//DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__DPHY_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__DPHY_RESET__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__PRECODER_ENABLE__SHIFT 0x2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__MODE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__NUM_LANES__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__SHORT_LAST_TPS2_PERIOD__SHIFT 0xc
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__DPHY_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__DPHY_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__PRECODER_ENABLE_MASK 0x00000004L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__MODE_MASK 0x00000030L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__NUM_LANES_MASK 0x00000300L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CONTROL__SHORT_LAST_TPS2_PERIOD_MASK 0x00001000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__STATUS__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__RESET_STATUS__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__CURRENT_MODE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__ENCRYPTION_ENABLED__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__RATE_UPDATE_PENDING__SHIFT 0xc
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__SAT_UPDATE_PENDING__SHIFT 0x10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__STATUS_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__RESET_STATUS_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__CURRENT_MODE_MASK 0x00000030L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__ENCRYPTION_ENABLED_MASK 0x00000100L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__RATE_UPDATE_PENDING_MASK 0x00001000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_STATUS__SAT_UPDATE_PENDING_MASK 0x00030000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE__SAT_UPDATE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_UPDATE__SAT_UPDATE_MASK 0x00000003L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL0__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL1__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL2__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_Y__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_X__SHIFT 0x19
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_Y_MASK 0x01FFFFFFL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_VC_RATE_CNTL3__STREAM_VC_RATE_X_MASK 0xFE000000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC0__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC1__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC2__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC3__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS0__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS1__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS2__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_STREAM_SOURCE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_ENABLE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_TYPE__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_SLOT_COUNT__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_STREAM_SOURCE_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_ENABLE_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_ENCRYPTION_TYPE_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SAT_VC_STATUS3__SAT_SLOT_COUNT_MASK 0x00007F00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT0__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL0__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT1__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL1__SHIFT 0xc
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT2__SHIFT 0x10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL2__SHIFT 0x14
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT3__SHIFT 0x18
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL3__SHIFT 0x1c
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT0_MASK 0x00000007L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL0_MASK 0x00000070L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT1_MASK 0x00000700L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL1_MASK 0x00007000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT2_MASK 0x00070000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL2_MASK 0x00700000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_SELECT3_MASK 0x07000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CONFIG__TP_PRBS_SEL3_MASK 0x70000000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED0__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED1__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED2__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3__TP_PRBS_SEED__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_PRBS_SEED3__TP_PRBS_SEED_MASK 0x7FFFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE__TP_SQ_PULSE_WIDTH__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_SQ_PULSE__TP_SQ_PULSE_WIDTH_MASK 0x000000FFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM0__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM1__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM2__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM3__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM4__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM5__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM6__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM7__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM8__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM9__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10__TP_CUSTOM__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_TP_CUSTOM10__TP_CUSTOM_MASK 0x00FFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__TOTAL_SLOT_COUNT_ERROR__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__RATE_ERROR__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__VC_SAME_STREAM_SOURCE__SHIFT 0x2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__NO_ACT_ERROR__SHIFT 0x3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__UNEXPECT_MODE_TRANSITION__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__ILLEGAL_STREAM_SYMBOL__SHIFT 0x5
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__RATE_COUNTER_SATURATION__SHIFT 0x6
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__COUNTER_OVERFLOW__SHIFT 0x7
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__CIPHER_ERROR__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__TOTAL_SLOT_COUNT_ERROR_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__RATE_ERROR_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__VC_SAME_STREAM_SOURCE_MASK 0x00000004L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__NO_ACT_ERROR_MASK 0x00000008L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__UNEXPECT_MODE_TRANSITION_MASK 0x00000010L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__ILLEGAL_STREAM_SYMBOL_MASK 0x00000020L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__RATE_COUNTER_SATURATION_MASK 0x00000040L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__COUNTER_OVERFLOW_MASK 0x00000080L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_ERROR_STATUS__CIPHER_ERROR_MASK 0x00000100L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_TYPE__SHIFT 0x2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_SYMBOL__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_ENABLE__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_TYPE__SHIFT 0xa
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_SYMBOL__SHIFT 0xc
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_ENABLE__SHIFT 0x10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_TYPE__SHIFT 0x12
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_SYMBOL__SHIFT 0x14
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_ENABLE__SHIFT 0x18
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_TYPE__SHIFT 0x1a
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_SYMBOL__SHIFT 0x1c
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_ENABLE_MASK 0x00000003L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_TYPE_MASK 0x00000004L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM0_OVR_SYMBOL_MASK 0x000000F0L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_ENABLE_MASK 0x00000300L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_TYPE_MASK 0x00000400L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM1_OVR_SYMBOL_MASK 0x0000F000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_ENABLE_MASK 0x00030000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_TYPE_MASK 0x00040000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM2_OVR_SYMBOL_MASK 0x00F00000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_ENABLE_MASK 0x03000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_TYPE_MASK 0x04000000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_OVERRIDE__STREAM3_OVR_SYMBOL_MASK 0xF0000000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0__LLCP_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS0__LLCP_COUNT_MASK 0x0000FFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1__CYCLE_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_STATUS1__CYCLE_COUNT_MASK 0xFFFFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_RESET__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_ENABLE__SHIFT 0x2
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_RESET__SHIFT 0x3
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__LLCP_COUNT_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_ENABLE_MASK 0x00000004L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_SYMBOL_COUNT_CONTROL__CYCLE_COUNT_RESET_MASK 0x00000008L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_ENABLE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_RESET__SHIFT 0x1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_LANE_SOURCE__SHIFT 0x4
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_TAP_SOURCE__SHIFT 0x6
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_SCHEDULER_SOURCE__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_IGNORE_VCPF__SHIFT 0x10
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_START_EVENT__SHIFT 0x11
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_USE_NUM_SYMBOLS__SHIFT 0x14
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_END_EVENT__SHIFT 0x15
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_ENABLE_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_RESET_MASK 0x00000002L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_LANE_SOURCE_MASK 0x00000030L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_TAP_SOURCE_MASK 0x000000C0L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_SCHEDULER_SOURCE_MASK 0x00003F00L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_IGNORE_VCPF_MASK 0x00010000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_START_EVENT_MASK 0x000E0000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_USE_NUM_SYMBOLS_MASK 0x00100000L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG0__CRC_END_EVENT_MASK 0x00600000L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1__CRC_NUM_SYMBOLS__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_CONFIG1__CRC_NUM_SYMBOLS_MASK 0xFFFFFFFFL
+//DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS__CRC_DONE__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS__CRC_VALUE__SHIFT 0x8
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS__CRC_DONE_MASK 0x00000001L
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_STATUS__CRC_VALUE_MASK 0x00FFFF00L
+//DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT__CRC_SYMBOL_COUNT__SHIFT 0x0
+#define DP_DPHY_SYM321_DP_DPHY_SYM32_CRC_COUNT__CRC_SYMBOL_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dchvm_hvm_dispdec
+//DCHVM_CTRL0
+#define DCHVM_CTRL0__HOSTVM_INIT_REQ__SHIFT 0x0
+#define DCHVM_CTRL0__HOSTVM_INIT_REQ_MASK 0x00000001L
+//DCHVM_CTRL1
+#define DCHVM_CTRL1__DUMMY1__SHIFT 0x0
+#define DCHVM_CTRL1__DUMMY1_MASK 0xFFFFFFFFL
+//DCHVM_CLK_CTRL
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_G_GATE_DIS__SHIFT 0x1
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_R_GATE_DIS__SHIFT 0x4
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_G_GATE_DIS__SHIFT 0x5
+#define DCHVM_CLK_CTRL__TR_REQ_REQCLKREQ_MODE__SHIFT 0x8
+#define DCHVM_CLK_CTRL__TW_RSP_COMPCLKREQ_MODE__SHIFT 0xa
+#define DCHVM_CLK_CTRL__HVM_FGCG_REP_DIS__SHIFT 0xc
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define DCHVM_CLK_CTRL__HVM_DISPCLK_G_GATE_DIS_MASK 0x00000002L
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_R_GATE_DIS_MASK 0x00000010L
+#define DCHVM_CLK_CTRL__HVM_DCFCLK_G_GATE_DIS_MASK 0x00000020L
+#define DCHVM_CLK_CTRL__TR_REQ_REQCLKREQ_MODE_MASK 0x00000300L
+#define DCHVM_CLK_CTRL__TW_RSP_COMPCLKREQ_MODE_MASK 0x00000C00L
+#define DCHVM_CLK_CTRL__HVM_FGCG_REP_DIS_MASK 0x00001000L
+//DCHVM_MEM_CTRL
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_PWR_REQ_DIS__SHIFT 0x0
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_FORCE_REQ__SHIFT 0x2
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_POWER_STATUS__SHIFT 0x4
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_PWR_REQ_DIS_MASK 0x00000001L
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_FORCE_REQ_MASK 0x0000000CL
+#define DCHVM_MEM_CTRL__HVM_GPUVMRET_POWER_STATUS_MASK 0x00000030L
+//DCHVM_RIOMMU_CTRL0
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_PREFETCH_REQ__SHIFT 0x0
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_POWERSTATUS__SHIFT 0x1
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_PREFETCH_REQ_MASK 0x00000001L
+#define DCHVM_RIOMMU_CTRL0__HOSTVM_POWERSTATUS_MASK 0x00000002L
+//DCHVM_RIOMMU_STAT0
+#define DCHVM_RIOMMU_STAT0__RIOMMU_ACTIVE__SHIFT 0x0
+#define DCHVM_RIOMMU_STAT0__HOSTVM_PREFETCH_DONE__SHIFT 0x1
+#define DCHVM_RIOMMU_STAT0__RIOMMU_ACTIVE_MASK 0x00000001L
+#define DCHVM_RIOMMU_STAT0__HOSTVM_PREFETCH_DONE_MASK 0x00000002L
+
+
+// addressBlock: dce_dc_dlpc_dlpc_dispdec
+//DLPC_ENABLE
+#define DLPC_ENABLE__DLPC_EN__SHIFT 0x0
+#define DLPC_ENABLE__PWRUP_TRIGGER_EN__SHIFT 0x4
+#define DLPC_ENABLE__PWRUP_TRIGGER_CLR__SHIFT 0x5
+#define DLPC_ENABLE__PWRUP_TRIGGER_STATUS__SHIFT 0x6
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_EN__SHIFT 0x8
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_CLR__SHIFT 0x9
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_STATUS__SHIFT 0xa
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_EN__SHIFT 0xc
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_CLR__SHIFT 0xd
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_STATUS__SHIFT 0xe
+#define DLPC_ENABLE__DLPC_EN_MASK 0x00000001L
+#define DLPC_ENABLE__PWRUP_TRIGGER_EN_MASK 0x00000010L
+#define DLPC_ENABLE__PWRUP_TRIGGER_CLR_MASK 0x00000020L
+#define DLPC_ENABLE__PWRUP_TRIGGER_STATUS_MASK 0x00000040L
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_EN_MASK 0x00000100L
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_CLR_MASK 0x00000200L
+#define DLPC_ENABLE__OTG_RESYNC_TRIGGER_STATUS_MASK 0x00000400L
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_EN_MASK 0x00001000L
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_CLR_MASK 0x00002000L
+#define DLPC_ENABLE__DCN_ZSC_LONO_PWRUP_TRIGGER_STATUS_MASK 0x00004000L
+//DLPC_CURRENT_COUNT
+#define DLPC_CURRENT_COUNT__VALUE__SHIFT 0x0
+#define DLPC_CURRENT_COUNT__VALUE_MASK 0xFFFFFFFFL
+//DLPC_OPTC_SNAPSHOT
+#define DLPC_OPTC_SNAPSHOT__VALUE__SHIFT 0x0
+#define DLPC_OPTC_SNAPSHOT__VALUE_MASK 0xFFFFFFFFL
+//DLPC_PWRUP
+#define DLPC_PWRUP__VALUE__SHIFT 0x0
+#define DLPC_PWRUP__VALUE_MASK 0xFFFFFFFFL
+//DLPC_OTG_RESYNC
+#define DLPC_OTG_RESYNC__VALUE__SHIFT 0x0
+#define DLPC_OTG_RESYNC__VALUE_MASK 0xFFFFFFFFL
+//DLPC_DCN_ZSC_LONO_PWRUP
+#define DLPC_DCN_ZSC_LONO_PWRUP__VALUE__SHIFT 0x0
+#define DLPC_DCN_ZSC_LONO_PWRUP__VALUE_MASK 0xFFFFFFFFL
+//DLPC_SPARE
+#define DLPC_SPARE__SPARE__SHIFT 0x0
+#define DLPC_SPARE__SPARE_MASK 0xFFFFFFFFL
+//DLPC_COUNTER_INIT_VALUE
+#define DLPC_COUNTER_INIT_VALUE__DLPC_COUNTER_INIT_VALUE__SHIFT 0x0
+#define DLPC_COUNTER_INIT_VALUE__DLPC_COUNTER_INIT_VALUE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dpia_dpia_mu0_dpiadec
+//DPIA_MU_CLOCK_CTRL
+#define DPIA_MU_CLOCK_CTRL__DPIA_REFCLK_GATE_DIS__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL__DPIA_CIO_CLKS_GATE_DIS__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_REFCLK_R_GATE_DIS__SHIFT 0x8
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_ML_SSCLK_G_GATE_DIS__SHIFT 0xb
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_AUXIN_SSCLK_G_GATE_DIS__SHIFT 0xc
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_AUXOUT_SSCLK_G_GATE_DIS__SHIFT 0xd
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_TMUCLK_G_GATE_DIS__SHIFT 0xe
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_TEST_CLK_SEL__SHIFT 0x18
+#define DPIA_MU_CLOCK_CTRL__DPIA_REFCLK_GATE_DIS_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL__DPIA_CIO_CLKS_GATE_DIS_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_REFCLK_R_GATE_DIS_MASK 0x00000100L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_ML_SSCLK_G_GATE_DIS_MASK 0x00000800L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_AUXIN_SSCLK_G_GATE_DIS_MASK 0x00001000L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_AUXOUT_SSCLK_G_GATE_DIS_MASK 0x00002000L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_TMUCLK_G_GATE_DIS_MASK 0x00004000L
+#define DPIA_MU_CLOCK_CTRL__DPIA_MU_TEST_CLK_SEL_MASK 0xFF000000L
+//DPIA_MU_CLOCK_CTRL_DPIA_PORT0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__CLK_SRC__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__ML_CLK_EN__SHIFT 0x1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUX_CLK_EN__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__ML_SSCLK_CLOCK_ON__SHIFT 0x9
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__TMUCLK_CLOCK_ON__SHIFT 0xa
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUXIN_SSCLK_CLOCK_ON__SHIFT 0x11
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUXOUT_SSCLK_CLOCK_ON__SHIFT 0x12
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__CLK_SRC_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__ML_CLK_EN_MASK 0x00000002L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUX_CLK_EN_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__ML_SSCLK_CLOCK_ON_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__TMUCLK_CLOCK_ON_MASK 0x00000400L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUXIN_SSCLK_CLOCK_ON_MASK 0x00020000L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT0__AUXOUT_SSCLK_CLOCK_ON_MASK 0x00040000L
+//DPIA_MU_RESET_CTRL_DPIA_PORT0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CORE_SW_RST__SHIFT 0x0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CAPREG_SW_RST__SHIFT 0x1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__AUX_TPI_SW_RST__SHIFT 0x2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__ML_TPI_SW_RST__SHIFT 0x3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CAPREG_RESET_DONE__SHIFT 0x8
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CORE_RESET_DONE__SHIFT 0x9
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CORE_SW_RST_MASK 0x00000001L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CAPREG_SW_RST_MASK 0x00000002L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__AUX_TPI_SW_RST_MASK 0x00000004L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__ML_TPI_SW_RST_MASK 0x00000008L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CAPREG_RESET_DONE_MASK 0x00000100L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT0__CORE_RESET_DONE_MASK 0x00000200L
+//DPIA_MU_CLOCK_CTRL_DPIA_PORT1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__CLK_SRC__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__ML_CLK_EN__SHIFT 0x1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUX_CLK_EN__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__ML_SSCLK_CLOCK_ON__SHIFT 0x9
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__TMUCLK_CLOCK_ON__SHIFT 0xa
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUXIN_SSCLK_CLOCK_ON__SHIFT 0x11
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUXOUT_SSCLK_CLOCK_ON__SHIFT 0x12
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__CLK_SRC_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__ML_CLK_EN_MASK 0x00000002L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUX_CLK_EN_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__ML_SSCLK_CLOCK_ON_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__TMUCLK_CLOCK_ON_MASK 0x00000400L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUXIN_SSCLK_CLOCK_ON_MASK 0x00020000L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT1__AUXOUT_SSCLK_CLOCK_ON_MASK 0x00040000L
+//DPIA_MU_RESET_CTRL_DPIA_PORT1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CORE_SW_RST__SHIFT 0x0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CAPREG_SW_RST__SHIFT 0x1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__AUX_TPI_SW_RST__SHIFT 0x2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__ML_TPI_SW_RST__SHIFT 0x3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CAPREG_RESET_DONE__SHIFT 0x8
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CORE_RESET_DONE__SHIFT 0x9
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CORE_SW_RST_MASK 0x00000001L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CAPREG_SW_RST_MASK 0x00000002L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__AUX_TPI_SW_RST_MASK 0x00000004L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__ML_TPI_SW_RST_MASK 0x00000008L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CAPREG_RESET_DONE_MASK 0x00000100L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT1__CORE_RESET_DONE_MASK 0x00000200L
+//DPIA_MU_CLOCK_CTRL_DPIA_PORT2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__CLK_SRC__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__ML_CLK_EN__SHIFT 0x1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUX_CLK_EN__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__ML_SSCLK_CLOCK_ON__SHIFT 0x9
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__TMUCLK_CLOCK_ON__SHIFT 0xa
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUXIN_SSCLK_CLOCK_ON__SHIFT 0x11
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUXOUT_SSCLK_CLOCK_ON__SHIFT 0x12
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__CLK_SRC_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__ML_CLK_EN_MASK 0x00000002L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUX_CLK_EN_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__ML_SSCLK_CLOCK_ON_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__TMUCLK_CLOCK_ON_MASK 0x00000400L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUXIN_SSCLK_CLOCK_ON_MASK 0x00020000L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT2__AUXOUT_SSCLK_CLOCK_ON_MASK 0x00040000L
+//DPIA_MU_RESET_CTRL_DPIA_PORT2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CORE_SW_RST__SHIFT 0x0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CAPREG_SW_RST__SHIFT 0x1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__AUX_TPI_SW_RST__SHIFT 0x2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__ML_TPI_SW_RST__SHIFT 0x3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CAPREG_RESET_DONE__SHIFT 0x8
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CORE_RESET_DONE__SHIFT 0x9
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CORE_SW_RST_MASK 0x00000001L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CAPREG_SW_RST_MASK 0x00000002L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__AUX_TPI_SW_RST_MASK 0x00000004L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__ML_TPI_SW_RST_MASK 0x00000008L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CAPREG_RESET_DONE_MASK 0x00000100L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT2__CORE_RESET_DONE_MASK 0x00000200L
+//DPIA_MU_CLOCK_CTRL_DPIA_PORT3
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__CLK_SRC__SHIFT 0x0
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__ML_CLK_EN__SHIFT 0x1
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUX_CLK_EN__SHIFT 0x2
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__ML_SSCLK_CLOCK_ON__SHIFT 0x9
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__TMUCLK_CLOCK_ON__SHIFT 0xa
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUXIN_SSCLK_CLOCK_ON__SHIFT 0x11
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUXOUT_SSCLK_CLOCK_ON__SHIFT 0x12
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__CLK_SRC_MASK 0x00000001L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__ML_CLK_EN_MASK 0x00000002L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUX_CLK_EN_MASK 0x00000004L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__ML_SSCLK_CLOCK_ON_MASK 0x00000200L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__TMUCLK_CLOCK_ON_MASK 0x00000400L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUXIN_SSCLK_CLOCK_ON_MASK 0x00020000L
+#define DPIA_MU_CLOCK_CTRL_DPIA_PORT3__AUXOUT_SSCLK_CLOCK_ON_MASK 0x00040000L
+//DPIA_MU_RESET_CTRL_DPIA_PORT3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CORE_SW_RST__SHIFT 0x0
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CAPREG_SW_RST__SHIFT 0x1
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__AUX_TPI_SW_RST__SHIFT 0x2
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__ML_TPI_SW_RST__SHIFT 0x3
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CAPREG_RESET_DONE__SHIFT 0x8
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CORE_RESET_DONE__SHIFT 0x9
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CORE_SW_RST_MASK 0x00000001L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CAPREG_SW_RST_MASK 0x00000002L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__AUX_TPI_SW_RST_MASK 0x00000004L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__ML_TPI_SW_RST_MASK 0x00000008L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CAPREG_RESET_DONE_MASK 0x00000100L
+#define DPIA_MU_RESET_CTRL_DPIA_PORT3__CORE_RESET_DONE_MASK 0x00000200L
+//DPIA_MU_TPI_STATUS_DPIA_PORT0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__MAIN_LINK_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXOUT_CREDIT_COUNT__SHIFT 0x8
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXIN_CREDIT_COUNT__SHIFT 0x10
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__MAIN_LINK_EXTRA_CREDIT_RECEIVED__SHIFT 0x18
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXOUT_EXTRA_CREDIT_RECEIVED__SHIFT 0x19
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXIN_EXTRA_PAYLOAD_RECEIVED__SHIFT 0x1a
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__MAIN_LINK_CREDIT_COUNT_MASK 0x0000007FL
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXOUT_CREDIT_COUNT_MASK 0x00000F00L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXIN_CREDIT_COUNT_MASK 0x000F0000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__MAIN_LINK_EXTRA_CREDIT_RECEIVED_MASK 0x01000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXOUT_EXTRA_CREDIT_RECEIVED_MASK 0x02000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT0__AUXIN_EXTRA_PAYLOAD_RECEIVED_MASK 0x04000000L
+//DPIA_MU_TPI_STATUS_DPIA_PORT1
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__MAIN_LINK_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXOUT_CREDIT_COUNT__SHIFT 0x8
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXIN_CREDIT_COUNT__SHIFT 0x10
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__MAIN_LINK_EXTRA_CREDIT_RECEIVED__SHIFT 0x18
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXOUT_EXTRA_CREDIT_RECEIVED__SHIFT 0x19
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXIN_EXTRA_PAYLOAD_RECEIVED__SHIFT 0x1a
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__MAIN_LINK_CREDIT_COUNT_MASK 0x0000007FL
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXOUT_CREDIT_COUNT_MASK 0x00000F00L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXIN_CREDIT_COUNT_MASK 0x000F0000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__MAIN_LINK_EXTRA_CREDIT_RECEIVED_MASK 0x01000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXOUT_EXTRA_CREDIT_RECEIVED_MASK 0x02000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT1__AUXIN_EXTRA_PAYLOAD_RECEIVED_MASK 0x04000000L
+//DPIA_MU_TPI_STATUS_DPIA_PORT2
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__MAIN_LINK_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXOUT_CREDIT_COUNT__SHIFT 0x8
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXIN_CREDIT_COUNT__SHIFT 0x10
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__MAIN_LINK_EXTRA_CREDIT_RECEIVED__SHIFT 0x18
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXOUT_EXTRA_CREDIT_RECEIVED__SHIFT 0x19
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXIN_EXTRA_PAYLOAD_RECEIVED__SHIFT 0x1a
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__MAIN_LINK_CREDIT_COUNT_MASK 0x0000007FL
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXOUT_CREDIT_COUNT_MASK 0x00000F00L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXIN_CREDIT_COUNT_MASK 0x000F0000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__MAIN_LINK_EXTRA_CREDIT_RECEIVED_MASK 0x01000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXOUT_EXTRA_CREDIT_RECEIVED_MASK 0x02000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT2__AUXIN_EXTRA_PAYLOAD_RECEIVED_MASK 0x04000000L
+//DPIA_MU_TPI_STATUS_DPIA_PORT3
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__MAIN_LINK_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXOUT_CREDIT_COUNT__SHIFT 0x8
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXIN_CREDIT_COUNT__SHIFT 0x10
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__MAIN_LINK_EXTRA_CREDIT_RECEIVED__SHIFT 0x18
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXOUT_EXTRA_CREDIT_RECEIVED__SHIFT 0x19
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXIN_EXTRA_PAYLOAD_RECEIVED__SHIFT 0x1a
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__MAIN_LINK_CREDIT_COUNT_MASK 0x0000007FL
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXOUT_CREDIT_COUNT_MASK 0x00000F00L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXIN_CREDIT_COUNT_MASK 0x000F0000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__MAIN_LINK_EXTRA_CREDIT_RECEIVED_MASK 0x01000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXOUT_EXTRA_CREDIT_RECEIVED_MASK 0x02000000L
+#define DPIA_MU_TPI_STATUS_DPIA_PORT3__AUXIN_EXTRA_PAYLOAD_RECEIVED_MASK 0x04000000L
+//DPIA_MU_TPI_MAX_CREDIT_COUNT
+#define DPIA_MU_TPI_MAX_CREDIT_COUNT__DPIA_TPI_MAX_CREDIT_COUNT__SHIFT 0x0
+#define DPIA_MU_TPI_MAX_CREDIT_COUNT__DPIA_TPI_MAX_CREDIT_COUNT_MASK 0x0000003FL
+//DPIA_MU_INTERRUPT_STATUS
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT0_INT_STATUS__SHIFT 0x0
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT1_INT_STATUS__SHIFT 0x3
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT2_INT_STATUS__SHIFT 0x6
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT3_INT_STATUS__SHIFT 0x9
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_MU_LOCAL_INT_STATUS__SHIFT 0x1f
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT0_INT_STATUS_MASK 0x00000007L
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT1_INT_STATUS_MASK 0x00000038L
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT2_INT_STATUS_MASK 0x000001C0L
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_PORT3_INT_STATUS_MASK 0x00000E00L
+#define DPIA_MU_INTERRUPT_STATUS__DPIA_MU_LOCAL_INT_STATUS_MASK 0x80000000L
+//DPIA_MU_INTERRUPT_CTRL
+#define DPIA_MU_INTERRUPT_CTRL__DPIA_DCN_INT_EN__SHIFT 0x0
+#define DPIA_MU_INTERRUPT_CTRL__RBBMIF_TIMEOUT_INT_MASK__SHIFT 0x18
+#define DPIA_MU_INTERRUPT_CTRL__DPIA_DCN_INT_EN_MASK 0x00000001L
+#define DPIA_MU_INTERRUPT_CTRL__RBBMIF_TIMEOUT_INT_MASK_MASK 0x01000000L
+//DPIA_MU_LOCAL_INTERRUPT_CTRL
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_INT_STATUS__SHIFT 0x0
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_ADDR__SHIFT 0x2
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_OP__SHIFT 0x14
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__DPIA_RST_DONE_INT_STATUS__SHIFT 0x18
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_INT_STATUS_MASK 0x00000001L
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_ADDR_MASK 0x000FFFFCL
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__RBBMIF_TIMEOUT_OP_MASK 0x00100000L
+#define DPIA_MU_LOCAL_INTERRUPT_CTRL__DPIA_RST_DONE_INT_STATUS_MASK 0x01000000L
+//DPIA_MU_LOCAL_INTERRUPT_ACK
+#define DPIA_MU_LOCAL_INTERRUPT_ACK__RBBMIF_TIMEOUT_INT_ACK__SHIFT 0x0
+#define DPIA_MU_LOCAL_INTERRUPT_ACK__DPIA_RST_DONE_INT_ACK__SHIFT 0x1
+#define DPIA_MU_LOCAL_INTERRUPT_ACK__RBBMIF_TIMEOUT_INT_ACK_MASK 0x00000001L
+#define DPIA_MU_LOCAL_INTERRUPT_ACK__DPIA_RST_DONE_INT_ACK_MASK 0x00000002L
+//DPIA_MU_RBBMIF_TIMEOUT_CTRL
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL__RBBMIF_TIMEOUT_DELAY__SHIFT 0x0
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL__RBBMIF_TIMEOUT_HOLD__SHIFT 0x14
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL__RBBMIF_TIMEOUT_DELAY_MASK 0x000FFFFFL
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL__RBBMIF_TIMEOUT_HOLD_MASK 0xFFF00000L
+//DPIA_MU_RBBMIF_TIMEOUT_CTRL2
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL2__RBBMIF_TIMEOUT_DIS__SHIFT 0x0
+#define DPIA_MU_RBBMIF_TIMEOUT_CTRL2__RBBMIF_TIMEOUT_DIS_MASK 0x00000001L
+//DPIA_MU_RBBMIF_STATUS
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_FLAG__SHIFT 0x0
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_TYPE__SHIFT 0x1
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_ADDR__SHIFT 0x5
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_TIMEOUT_STATUS_READBACK__SHIFT 0x18
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_STATUS_CLEAR__SHIFT 0x1f
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_FLAG_MASK 0x00000001L
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_TYPE_MASK 0x00000006L
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_ADDR_MASK 0x007FFFE0L
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_TIMEOUT_STATUS_READBACK_MASK 0x01000000L
+#define DPIA_MU_RBBMIF_STATUS__RBBMIF_INVALID_ACCESS_STATUS_CLEAR_MASK 0x80000000L
+//DPIA_MU_MICROSECOND_REF_CTRL
+#define DPIA_MU_MICROSECOND_REF_CTRL__MICROSECOND_TIME_BASE_DIV__SHIFT 0x0
+#define DPIA_MU_MICROSECOND_REF_CTRL__MICROSECOND_TIME_BASE_DIV_MASK 0x0000007FL
+//DPIA_MU_PORT_ADP_STATUS
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT0_HIDDEN_STATUS__SHIFT 0x0
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT1_HIDDEN_STATUS__SHIFT 0x1
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT2_HIDDEN_STATUS__SHIFT 0x2
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT3_HIDDEN_STATUS__SHIFT 0x3
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT0_HIDDEN_STATUS_MASK 0x00000001L
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT1_HIDDEN_STATUS_MASK 0x00000002L
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT2_HIDDEN_STATUS_MASK 0x00000004L
+#define DPIA_MU_PORT_ADP_STATUS__DPIA_PORT3_HIDDEN_STATUS_MASK 0x00000008L
+//DPIA_GLUE_CTRL
+#define DPIA_GLUE_CTRL__DPIA_IO_EN__SHIFT 0x0
+#define DPIA_GLUE_CTRL__DPIA_IO_EN_MASK 0x00000001L
+//DPIA_DEBUG_CONTROL
+//DPIA_PERF_COUNT_CONTROL0
+#define DPIA_PERF_COUNT_CONTROL0__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL0__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL0__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL0__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL0__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL0__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL0__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL0__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL0__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL0__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL0__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL0__COUNT_LIMIT_MASK 0xFFFFF000L
+//DPIA_PERF_COUNT_CONTROL1
+#define DPIA_PERF_COUNT_CONTROL1__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL1__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL1__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL1__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL1__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL1__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL1__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL1__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL1__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL1__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL1__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL1__COUNT_LIMIT_MASK 0xFFFFF000L
+//DPIA_PERF_COUNT_CONTROL2
+#define DPIA_PERF_COUNT_CONTROL2__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL2__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL2__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL2__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL2__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL2__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL2__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL2__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL2__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL2__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL2__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL2__COUNT_LIMIT_MASK 0xFFFFF000L
+//DPIA_PERF_COUNT_CONTROL3
+#define DPIA_PERF_COUNT_CONTROL3__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL3__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL3__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL3__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL3__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL3__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL3__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL3__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL3__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL3__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL3__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL3__COUNT_LIMIT_MASK 0xFFFFF000L
+//DPIA_PERF_COUNT_CONTROL4
+#define DPIA_PERF_COUNT_CONTROL4__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL4__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL4__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL4__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL4__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL4__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL4__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL4__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL4__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL4__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL4__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL4__COUNT_LIMIT_MASK 0xFFFFF000L
+//DPIA_PERF_COUNT_CONTROL5
+#define DPIA_PERF_COUNT_CONTROL5__ENABLE__SHIFT 0x0
+#define DPIA_PERF_COUNT_CONTROL5__MODE__SHIFT 0x1
+#define DPIA_PERF_COUNT_CONTROL5__STATUS__SHIFT 0x4
+#define DPIA_PERF_COUNT_CONTROL5__PORT_SELECT__SHIFT 0x5
+#define DPIA_PERF_COUNT_CONTROL5__STAT_SELECT__SHIFT 0x8
+#define DPIA_PERF_COUNT_CONTROL5__COUNT_LIMIT__SHIFT 0xc
+#define DPIA_PERF_COUNT_CONTROL5__ENABLE_MASK 0x00000001L
+#define DPIA_PERF_COUNT_CONTROL5__MODE_MASK 0x0000000EL
+#define DPIA_PERF_COUNT_CONTROL5__STATUS_MASK 0x00000010L
+#define DPIA_PERF_COUNT_CONTROL5__PORT_SELECT_MASK 0x000000E0L
+#define DPIA_PERF_COUNT_CONTROL5__STAT_SELECT_MASK 0x00000300L
+#define DPIA_PERF_COUNT_CONTROL5__COUNT_LIMIT_MASK 0xFFFFF000L
+//DPIA_PERF_COUNT_INDEX
+#define DPIA_PERF_COUNT_INDEX__COUNTER_SELECT__SHIFT 0x0
+#define DPIA_PERF_COUNT_INDEX__MEAS_SELECT__SHIFT 0x4
+#define DPIA_PERF_COUNT_INDEX__COUNTER_SELECT_MASK 0x00000007L
+#define DPIA_PERF_COUNT_INDEX__MEAS_SELECT_MASK 0x00000070L
+//DPIA_PERF_COUNT_DATA_LO
+#define DPIA_PERF_COUNT_DATA_LO__VALUE__SHIFT 0x0
+#define DPIA_PERF_COUNT_DATA_LO__VALUE_MASK 0xFFFFFFFFL
+//DPIA_MU_SPARE
+#define DPIA_MU_SPARE__DPIA_MU_SPARE__SHIFT 0x0
+#define DPIA_MU_SPARE__DPIA_MU_SPARE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azendpoint_f2codecind
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R__SHIFT 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R_MASK 0x00008000L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC_MASK 0x0000007FL
+//AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE_MASK 0x00000080L
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0x000000C0L
+//AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION__SHIFT 0x9
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO__SHIFT 0xa
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION_MASK 0x00000100L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION_MASK 0x00000200L
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO_MASK 0x0000FC00L
+//AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LFE_PLAYBACK_LEVEL__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LFE_PLAYBACK_LEVEL_MASK 0x00000003L
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT_MASK 0x00000078L
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT_MASK 0x00000080L
+//AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__ACP_INDEX__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__SUPPORTS_AI__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__ACP_INDEX_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__SUPPORTS_AI_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_INDEX__ACP_PACKET_ENABLE_MASK 0x00000080L
+//AZALIA_F2_CODEC_PIN_CONTROL_ACP_DATA
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_DATA__ACP_DATA__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_ACP_DATA__ACP_DATA_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS_MASK 0x00000007L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE_MASK 0x00000078L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZALIA_F2_CODEC_PIN_CONTROL_HBR
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZALIA_F2_CODEC_PIN_CONTROL_LPIB
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azendpoint_descriptorind
+//AUDIO_DESCRIPTOR0
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR1
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR2
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR3
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR4
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR5
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR6
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR7
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR8
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR9
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR10
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR11
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR12
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AUDIO_DESCRIPTOR13
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+
+
+// addressBlock: azendpoint_sinkinfoind
+//AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID_MASK 0x0000FFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID_MASK 0x0000FFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZALIA_F2_CODEC_PIN_CONTROL_PORTID0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_PIN_CONTROL_PORTID1
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID_MASK 0xFFFFFFFFL
+//SINK_DESCRIPTION0
+#define SINK_DESCRIPTION0__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION0__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION1
+#define SINK_DESCRIPTION1__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION1__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION2
+#define SINK_DESCRIPTION2__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION2__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION3
+#define SINK_DESCRIPTION3__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION3__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION4
+#define SINK_DESCRIPTION4__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION4__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION5
+#define SINK_DESCRIPTION5__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION5__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION6
+#define SINK_DESCRIPTION6__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION6__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION7
+#define SINK_DESCRIPTION7__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION7__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION8
+#define SINK_DESCRIPTION8__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION8__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION9
+#define SINK_DESCRIPTION9__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION9__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION10
+#define SINK_DESCRIPTION10__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION10__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION11
+#define SINK_DESCRIPTION11__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION11__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION12
+#define SINK_DESCRIPTION12__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION12__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION13
+#define SINK_DESCRIPTION13__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION13__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION14
+#define SINK_DESCRIPTION14__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION14__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION15
+#define SINK_DESCRIPTION15__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION15__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION16
+#define SINK_DESCRIPTION16__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION16__DESCRIPTION_MASK 0x000000FFL
+//SINK_DESCRIPTION17
+#define SINK_DESCRIPTION17__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION17__DESCRIPTION_MASK 0x000000FFL
+
+
+// addressBlock: azf0controller_azinputcrc0resultind
+//AZALIA_INPUT_CRC0_CHANNEL0
+#define AZALIA_INPUT_CRC0_CHANNEL0__INPUT_CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL0__INPUT_CRC_CHANNEL0_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL1
+#define AZALIA_INPUT_CRC0_CHANNEL1__INPUT_CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL1__INPUT_CRC_CHANNEL1_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL2
+#define AZALIA_INPUT_CRC0_CHANNEL2__INPUT_CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL2__INPUT_CRC_CHANNEL2_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL3
+#define AZALIA_INPUT_CRC0_CHANNEL3__INPUT_CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL3__INPUT_CRC_CHANNEL3_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL4
+#define AZALIA_INPUT_CRC0_CHANNEL4__INPUT_CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL4__INPUT_CRC_CHANNEL4_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL5
+#define AZALIA_INPUT_CRC0_CHANNEL5__INPUT_CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL5__INPUT_CRC_CHANNEL5_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL6
+#define AZALIA_INPUT_CRC0_CHANNEL6__INPUT_CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL6__INPUT_CRC_CHANNEL6_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC0_CHANNEL7
+#define AZALIA_INPUT_CRC0_CHANNEL7__INPUT_CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL7__INPUT_CRC_CHANNEL7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0controller_azinputcrc1resultind
+//AZALIA_INPUT_CRC1_CHANNEL0
+#define AZALIA_INPUT_CRC1_CHANNEL0__INPUT_CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL0__INPUT_CRC_CHANNEL0_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL1
+#define AZALIA_INPUT_CRC1_CHANNEL1__INPUT_CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL1__INPUT_CRC_CHANNEL1_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL2
+#define AZALIA_INPUT_CRC1_CHANNEL2__INPUT_CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL2__INPUT_CRC_CHANNEL2_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL3
+#define AZALIA_INPUT_CRC1_CHANNEL3__INPUT_CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL3__INPUT_CRC_CHANNEL3_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL4
+#define AZALIA_INPUT_CRC1_CHANNEL4__INPUT_CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL4__INPUT_CRC_CHANNEL4_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL5
+#define AZALIA_INPUT_CRC1_CHANNEL5__INPUT_CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL5__INPUT_CRC_CHANNEL5_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL6
+#define AZALIA_INPUT_CRC1_CHANNEL6__INPUT_CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL6__INPUT_CRC_CHANNEL6_MASK 0xFFFFFFFFL
+//AZALIA_INPUT_CRC1_CHANNEL7
+#define AZALIA_INPUT_CRC1_CHANNEL7__INPUT_CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL7__INPUT_CRC_CHANNEL7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0controller_azcrc0resultind
+//AZALIA_CRC0_CHANNEL0
+#define AZALIA_CRC0_CHANNEL0__CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL0__CRC_CHANNEL0_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL1
+#define AZALIA_CRC0_CHANNEL1__CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL1__CRC_CHANNEL1_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL2
+#define AZALIA_CRC0_CHANNEL2__CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL2__CRC_CHANNEL2_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL3
+#define AZALIA_CRC0_CHANNEL3__CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL3__CRC_CHANNEL3_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL4
+#define AZALIA_CRC0_CHANNEL4__CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL4__CRC_CHANNEL4_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL5
+#define AZALIA_CRC0_CHANNEL5__CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL5__CRC_CHANNEL5_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL6
+#define AZALIA_CRC0_CHANNEL6__CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL6__CRC_CHANNEL6_MASK 0xFFFFFFFFL
+//AZALIA_CRC0_CHANNEL7
+#define AZALIA_CRC0_CHANNEL7__CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL7__CRC_CHANNEL7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0controller_azcrc1resultind
+//AZALIA_CRC1_CHANNEL0
+#define AZALIA_CRC1_CHANNEL0__CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL0__CRC_CHANNEL0_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL1
+#define AZALIA_CRC1_CHANNEL1__CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL1__CRC_CHANNEL1_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL2
+#define AZALIA_CRC1_CHANNEL2__CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL2__CRC_CHANNEL2_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL3
+#define AZALIA_CRC1_CHANNEL3__CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL3__CRC_CHANNEL3_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL4
+#define AZALIA_CRC1_CHANNEL4__CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL4__CRC_CHANNEL4_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL5
+#define AZALIA_CRC1_CHANNEL5__CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL5__CRC_CHANNEL5_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL6
+#define AZALIA_CRC1_CHANNEL6__CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL6__CRC_CHANNEL6_MASK 0xFFFFFFFFL
+//AZALIA_CRC1_CHANNEL7
+#define AZALIA_CRC1_CHANNEL7__CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL7__CRC_CHANNEL7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azinputendpoint_f2codecind
+//AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x0000003FL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0x000000C0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0x000000F0L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L__CHANNEL_STATUS_L__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L__CHANNEL_STATUS_L_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H__CHANNEL_STATUS_H__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H__CHANNEL_STATUS_H_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+
+
+// addressBlock: azroot_f2codecind
+//AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000FL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000F0L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000FFL
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000FF00L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00FF0000L
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xFF000000L
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1_MASK 0x000000FFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2_MASK 0x000000FFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3_MASK 0x000000FFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x000000FFL
+//AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3FFFFFFFL
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+
+
+// addressBlock: azf0stream0_streamind
+//AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream1_streamind
+//AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream2_streamind
+//AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream3_streamind
+//AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream4_streamind
+//AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream5_streamind
+//AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream6_streamind
+//AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream7_streamind
+//AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream8_streamind
+//AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream9_streamind
+//AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream10_streamind
+//AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream11_streamind
+//AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream12_streamind
+//AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream13_streamind
+//AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream14_streamind
+//AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0stream15_streamind
+//AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x0000007FL
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x00007F00L
+#define AZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0x00FF0000L
+//AZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL
+#define AZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x00000001L
+//AZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT
+#define AZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT
+#define AZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xFFFFFFFFL
+//AZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT
+#define AZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: azf0endpoint0_endpointind
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT0_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT0_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint1_endpointind
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT1_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT1_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint2_endpointind
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT2_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT2_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint3_endpointind
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT3_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT3_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT3_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint4_endpointind
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT4_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT4_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT4_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint5_endpointind
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_OFFSET_DEBUG
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT5_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT5_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT5_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint6_endpointind
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT6_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT6_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT6_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0endpoint7_endpointind
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_INDEX_MASK 0x0000003FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__SUPPORTS_AI_MASK 0x00000040L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_PACKET_ENABLE_MASK 0x00000080L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_MASK 0x00000300L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE0_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_ACP_DATA__ACP_TYPE_DEPENDENT_BYTE1_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT7_AZALIA_F0_ENDPOINT_FGCG_REP_DIS
+#define AZF0ENDPOINT7_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS__SHIFT 0x0
+#define AZF0ENDPOINT7_AZALIA_F0_ENDPOINT_FGCG_REP_DIS__ENDPOINT_FGCG_REP_DIS_MASK 0x00000001L
+
+
+// addressBlock: azf0inputendpoint0_inputendpointind
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint1_inputendpointind
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint2_inputendpointind
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint3_inputendpointind
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint4_inputendpointind
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint5_inputendpointind
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint6_inputendpointind
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint7_inputendpointind
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h
index abdb8728156e..d6c02cf815be 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h
@@ -9478,6 +9478,8 @@
#define regRLC_GFX_IMU_CMD_BASE_IDX 1
#define regGFX_IMU_RLC_STATUS 0x4054
#define regGFX_IMU_RLC_STATUS_BASE_IDX 1
+#define regGFX_IMU_STATUS 0x4055
+#define regGFX_IMU_STATUS_BASE_IDX 1
#define regGFX_IMU_SOC_DATA 0x4059
#define regGFX_IMU_SOC_DATA_BASE_IDX 1
#define regGFX_IMU_SOC_ADDR 0x405a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
index 2bd9f3f1026f..0122a21c50cf 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
@@ -2261,11 +2261,13 @@
#define SH_MEM_CONFIG__ADDRESS_MODE__SHIFT 0x0
#define SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT 0x3
#define SH_MEM_CONFIG__F8_MODE__SHIFT 0x8
+#define SH_MEM_CONFIG__PRECISION_MODE__SHIFT 0x9
#define SH_MEM_CONFIG__RETRY_DISABLE__SHIFT 0xc
#define SH_MEM_CONFIG__PRIVATE_NV__SHIFT 0xd
#define SH_MEM_CONFIG__ADDRESS_MODE_MASK 0x00000001L
#define SH_MEM_CONFIG__ALIGNMENT_MODE_MASK 0x00000018L
#define SH_MEM_CONFIG__F8_MODE_MASK 0x00000100L
+#define SH_MEM_CONFIG__PRECISION_MODE_MASK 0x00000200L
#define SH_MEM_CONFIG__RETRY_DISABLE_MASK 0x00001000L
#define SH_MEM_CONFIG__PRIVATE_NV_MASK 0x00002000L
//SP_MFMA_PORTD_RD_CONFIG
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
index c488d4a50cf4..b2252deabc17 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
@@ -203,6 +203,10 @@
#define mmDAGB0_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB0_WR_MISC_CREDIT 0x0058
#define mmDAGB0_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE 0x005b
+#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x005c
+#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB0_WRCLI_ASK_PENDING 0x005d
#define mmDAGB0_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB0_WRCLI_GO_PENDING 0x005e
@@ -455,6 +459,10 @@
#define mmDAGB1_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB1_WR_MISC_CREDIT 0x00d8
#define mmDAGB1_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE 0x00db
+#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x00dc
+#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB1_WRCLI_ASK_PENDING 0x00dd
#define mmDAGB1_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB1_WRCLI_GO_PENDING 0x00de
@@ -707,6 +715,10 @@
#define mmDAGB2_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB2_WR_MISC_CREDIT 0x0158
#define mmDAGB2_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE 0x015b
+#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x015c
+#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB2_WRCLI_ASK_PENDING 0x015d
#define mmDAGB2_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB2_WRCLI_GO_PENDING 0x015e
@@ -959,6 +971,10 @@
#define mmDAGB3_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB3_WR_MISC_CREDIT 0x01d8
#define mmDAGB3_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE 0x01db
+#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x01dc
+#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB3_WRCLI_ASK_PENDING 0x01dd
#define mmDAGB3_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB3_WRCLI_GO_PENDING 0x01de
@@ -1211,6 +1227,10 @@
#define mmDAGB4_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB4_WR_MISC_CREDIT 0x0258
#define mmDAGB4_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE 0x025b
+#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x025c
+#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB4_WRCLI_ASK_PENDING 0x025d
#define mmDAGB4_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB4_WRCLI_GO_PENDING 0x025e
@@ -4793,6 +4813,10 @@
#define mmDAGB5_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB5_WR_MISC_CREDIT 0x3058
#define mmDAGB5_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE 0x305b
+#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x305c
+#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB5_WRCLI_ASK_PENDING 0x305d
#define mmDAGB5_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB5_WRCLI_GO_PENDING 0x305e
@@ -5045,6 +5069,10 @@
#define mmDAGB6_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB6_WR_MISC_CREDIT 0x30d8
#define mmDAGB6_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE 0x30db
+#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x30dc
+#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB6_WRCLI_ASK_PENDING 0x30dd
#define mmDAGB6_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB6_WRCLI_GO_PENDING 0x30de
@@ -5297,6 +5325,10 @@
#define mmDAGB7_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB7_WR_MISC_CREDIT 0x3158
#define mmDAGB7_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE 0x315b
+#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x315c
+#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB7_WRCLI_ASK_PENDING 0x315d
#define mmDAGB7_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB7_WRCLI_GO_PENDING 0x315e
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
index 2969fbf282b7..5069d2fd467f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
@@ -1532,6 +1532,12 @@
//DAGB0_WRCLI_DBUS_GO_PENDING
#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB0_DAGB_DLY
#define DAGB0_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB0_DAGB_DLY__CLI__SHIFT 0x8
@@ -3207,6 +3213,12 @@
//DAGB1_WRCLI_DBUS_GO_PENDING
#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB1_DAGB_DLY
#define DAGB1_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB1_DAGB_DLY__CLI__SHIFT 0x8
@@ -4882,6 +4894,12 @@
//DAGB2_WRCLI_DBUS_GO_PENDING
#define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB2_DAGB_DLY
#define DAGB2_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB2_DAGB_DLY__CLI__SHIFT 0x8
@@ -6557,6 +6575,12 @@
//DAGB3_WRCLI_DBUS_GO_PENDING
#define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB3_DAGB_DLY
#define DAGB3_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB3_DAGB_DLY__CLI__SHIFT 0x8
@@ -8232,6 +8256,12 @@
//DAGB4_WRCLI_DBUS_GO_PENDING
#define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB4_DAGB_DLY
#define DAGB4_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB4_DAGB_DLY__CLI__SHIFT 0x8
@@ -28737,6 +28767,12 @@
//DAGB5_WRCLI_DBUS_GO_PENDING
#define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB5_DAGB_DLY
#define DAGB5_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB5_DAGB_DLY__CLI__SHIFT 0x8
@@ -30412,6 +30448,12 @@
//DAGB6_WRCLI_DBUS_GO_PENDING
#define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB6_DAGB_DLY
#define DAGB6_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB6_DAGB_DLY__CLI__SHIFT 0x8
@@ -32087,6 +32129,12 @@
//DAGB7_WRCLI_DBUS_GO_PENDING
#define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB7_DAGB_DLY
#define DAGB7_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB7_DAGB_DLY__CLI__SHIFT 0x8
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h
index e9742d10de1c..3ed10e60afbf 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h
@@ -779,7 +779,8 @@
#define regUVD_LMI_MIF_REF_LUMA_64BIT_BAR_HIGH_BASE_IDX 1
#define regVCN_RAS_CNTL 0x02df
#define regVCN_RAS_CNTL_BASE_IDX 1
-
+#define regVCN_RRMT_CNTL 0x0940
+#define regVCN_RRMT_CNTL_BASE_IDX 1
// addressBlock: aid_uvd0_uvd_jpeg0_jpegnpdec
// base address: 0x20f00
@@ -953,6 +954,10 @@
#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1
#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0679
#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_CLIENT_STALL 0x067a
+#define regUVD_JMI0_UVD_JMI_CLIENT_STALL_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS 0x067b
+#define regUVD_JMI0_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 1
#define regUVD_JMI0_UVD_JMI_ATOMIC_CNTL2 0x067d
#define regUVD_JMI0_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 1
@@ -1055,6 +1060,8 @@
#define regJPEG_PERF_BANK_COUNT2_BASE_IDX 1
#define regJPEG_PERF_BANK_COUNT3 0x072c
#define regJPEG_PERF_BANK_COUNT3_BASE_IDX 1
+#define regJPEG_CORE_RST_CTRL 0x072e
+#define regJPEG_CORE_RST_CTRL_BASE_IDX 1
// addressBlock: aid_uvd0_uvd_pg_dec
@@ -1929,6 +1936,10 @@
#define regUVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI1_UVD_JMI_ATOMIC_CNTL2 0x003d
#define regUVD_JMI1_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI1_UVD_JMI_CLIENT_STALL 0x003a
+#define regUVD_JMI1_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI1_UVD_JMI_CLIENT_CLEAN_STATUS 0x003b
+#define regUVD_JMI1_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: aid_uvd0_uvd_jmi2_uvd_jmi_dec
@@ -1987,6 +1998,10 @@
#define regUVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI2_UVD_JMI_ATOMIC_CNTL2 0x007d
#define regUVD_JMI2_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI2_UVD_JMI_CLIENT_STALL 0x007a
+#define regUVD_JMI2_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI2_UVD_JMI_CLIENT_CLEAN_STATUS 0x007b
+#define regUVD_JMI2_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: aid_uvd0_uvd_jmi3_uvd_jmi_dec
@@ -2045,6 +2060,10 @@
#define regUVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI3_UVD_JMI_ATOMIC_CNTL2 0x00bd
#define regUVD_JMI3_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI3_UVD_JMI_CLIENT_STALL 0x00ba
+#define regUVD_JMI3_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI3_UVD_JMI_CLIENT_CLEAN_STATUS 0x00bb
+#define regUVD_JMI3_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: aid_uvd0_uvd_jmi4_uvd_jmi_dec
@@ -2103,6 +2122,10 @@
#define regUVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI4_UVD_JMI_ATOMIC_CNTL2 0x00fd
#define regUVD_JMI4_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI4_UVD_JMI_CLIENT_STALL 0x00fa
+#define regUVD_JMI4_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI4_UVD_JMI_CLIENT_CLEAN_STATUS 0x00fb
+#define regUVD_JMI4_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: aid_uvd0_uvd_jmi5_uvd_jmi_dec
@@ -2161,6 +2184,10 @@
#define regUVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI5_UVD_JMI_ATOMIC_CNTL2 0x013d
#define regUVD_JMI5_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI5_UVD_JMI_CLIENT_STALL 0x013a
+#define regUVD_JMI5_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI5_UVD_JMI_CLIENT_CLEAN_STATUS 0x013b
+#define regUVD_JMI5_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: aid_uvd0_uvd_jmi6_uvd_jmi_dec
@@ -2219,6 +2246,10 @@
#define regUVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI6_UVD_JMI_ATOMIC_CNTL2 0x017d
#define regUVD_JMI6_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI6_UVD_JMI_CLIENT_STALL 0x017a
+#define regUVD_JMI6_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI6_UVD_JMI_CLIENT_CLEAN_STATUS 0x017b
+#define regUVD_JMI6_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: aid_uvd0_uvd_jmi7_uvd_jmi_dec
@@ -2277,6 +2308,10 @@
#define regUVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
#define regUVD_JMI7_UVD_JMI_ATOMIC_CNTL2 0x01bd
#define regUVD_JMI7_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+#define regUVD_JMI7_UVD_JMI_CLIENT_STALL 0x01ba
+#define regUVD_JMI7_UVD_JMI_CLIENT_STALL_BASE_IDX 0
+#define regUVD_JMI7_UVD_JMI_CLIENT_CLEAN_STATUS 0x01bb
+#define regUVD_JMI7_UVD_JMI_CLIENT_CLEAN_STATUS_BASE_IDX 0
// addressBlock: uvdctxind
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index e3e635a31b8a..9aba8596faa7 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -313,9 +313,10 @@ struct kfd2kgd_calls {
void (*get_iq_wait_times)(struct amdgpu_device *adev,
uint32_t *wait_times,
uint32_t inst);
- void (*build_grace_period_packet_info)(struct amdgpu_device *adev,
+ void (*build_dequeue_wait_counts_packet_info)(struct amdgpu_device *adev,
uint32_t wait_times,
- uint32_t grace_period,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
uint32_t *reg_offset,
uint32_t *reg_data);
void (*get_cu_occupancy)(struct amdgpu_device *adev,
@@ -330,6 +331,8 @@ struct kfd2kgd_calls {
uint64_t (*hqd_reset)(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t inst, unsigned int utimeout);
+ uint32_t (*hqd_sdma_get_doorbell)(struct amdgpu_device *adev,
+ int engine, int queue);
};
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index eb46cb10c24d..15680c3f4970 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -266,7 +266,8 @@ union MESAPI_SET_HW_RESOURCES_1 {
};
uint64_t mes_info_ctx_mc_addr;
uint32_t mes_info_ctx_size;
- uint32_t mes_kiq_unmap_timeout; // unit is 100ms
+ uint64_t reserved1;
+ uint64_t cleaner_shader_fence_mc_addr;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
index c9b2ca5cf75f..d85ffab2aff9 100644
--- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
@@ -105,6 +105,43 @@ struct MES_API_STATUS {
uint64_t api_completion_fence_value;
};
+/*
+ * MES will set api_completion_fence_value in api_completion_fence_addr
+ * when it can successflly process the API. MES will also trigger
+ * following interrupt when it finish process the API no matter success
+ * or failed.
+ * Interrupt source id 181 (EOP) with context ID (DW 6 in the int
+ * cookie) set to 0xb1 and context type set to 8. Driver side need
+ * to enable TIME_STAMP_INT_ENABLE in CPC_INT_CNTL for MES pipe to
+ * catch this interrupt.
+ * Driver side also need to set enable_mes_fence_int = 1 in
+ * set_HW_resource package to enable this fence interrupt.
+ * when the API process failed.
+ * lowre 32 bits set to 0.
+ * higher 32 bits set as follows (bit shift within high 32)
+ * bit 0 - 7 API specific error code.
+ * bit 8 - 15 API OPCODE.
+ * bit 16 - 23 MISC OPCODE if any
+ * bit 24 - 30 ERROR category (API_ERROR_XXX)
+ * bit 31 Set to 1 to indicate error status
+ *
+ */
+enum { MES_SCH_ERROR_CODE_HEADER_SHIFT_12 = 8 };
+enum { MES_SCH_ERROR_CODE_MISC_OP_SHIFT_12 = 16 };
+enum { MES_ERROR_CATEGORY_SHIFT_12 = 24 };
+enum { MES_API_STATUS_ERROR_SHIFT_12 = 31 };
+
+enum MES_ERROR_CATEGORY_CODE_12 {
+ MES_ERROR_API = 1,
+ MES_ERROR_SCHEDULING = 2,
+ MES_ERROR_UNKNOWN = 3,
+};
+
+#define MES_ERR_CODE(api_err, opcode, misc_op, category) \
+ ((uint64) (api_err | opcode << MES_SCH_ERROR_CODE_HEADER_SHIFT_12 | \
+ misc_op << MES_SCH_ERROR_CODE_MISC_OP_SHIFT_12 | \
+ category << MES_ERROR_CATEGORY_SHIFT_12 | \
+ 1 << MES_API_STATUS_ERROR_SHIFT_12) << 32)
enum { MAX_COMPUTE_PIPES = 8 };
enum { MAX_GFX_PIPES = 2 };
@@ -248,7 +285,8 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t enable_mes_sch_stb_log : 1;
uint32_t limit_single_process : 1;
uint32_t unmapped_doorbell_handling: 2;
- uint32_t reserved : 11;
+ uint32_t enable_mes_fence_int: 1;
+ uint32_t reserved : 10;
};
uint32_t uint32_all;
};
@@ -278,6 +316,8 @@ union MESAPI_SET_HW_RESOURCES_1 {
uint32_t mes_debug_ctx_size;
/* unit is 100ms */
uint32_t mes_kiq_unmap_timeout;
+ uint64_t reserved1;
+ uint64_t cleaner_shader_fence_mc_addr;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 7a22aef6e59c..81e9b443ca0a 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -716,6 +716,33 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
ret = smu_send_rma_reason(smu);
mutex_unlock(&adev->pm.mutex);
+ if (adev->cper.enabled)
+ if (amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+
+ return ret;
+}
+
+/**
+ * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported
+ * @adev: amdgpu_device pointer
+ *
+ * This function checks if the SMU supports resetting the SDMA engine.
+ * It returns false if the hardware does not support software SMU or
+ * if the feature is not supported.
+ */
+bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool ret;
+
+ if (!is_support_sw_smu(adev))
+ return false;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_sdma_is_supported(smu);
+ mutex_unlock(&adev->pm.mutex);
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e8ae7681bf0a..922def51685b 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -98,6 +98,85 @@ const char * const amdgpu_pp_profile_name[] = {
};
/**
+ * amdgpu_pm_dev_state_check - Check if device can be accessed.
+ * @adev: Target device.
+ * @runpm: Check runpm status for suspend state checks.
+ *
+ * Checks the state of the @adev for access. Return 0 if the device is
+ * accessible or a negative error code otherwise.
+ */
+static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
+{
+ bool runpm_check = runpm ? adev->in_runpm : false;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !runpm_check)
+ return -EPERM;
+
+ return 0;
+}
+
+/**
+ * amdgpu_pm_get_access - Check if device can be accessed, resume if needed.
+ * @adev: Target device.
+ *
+ * Checks the state of the @adev for access. Use runtime pm API to resume if
+ * needed. Return 0 if the device is accessible or a negative error code
+ * otherwise.
+ */
+static int amdgpu_pm_get_access(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = amdgpu_pm_dev_state_check(adev, true);
+ if (ret)
+ return ret;
+
+ return pm_runtime_resume_and_get(adev->dev);
+}
+
+/**
+ * amdgpu_pm_get_access_if_active - Check if device is active for access.
+ * @adev: Target device.
+ *
+ * Checks the state of the @adev for access. Use runtime pm API to determine
+ * if device is active. Allow access only if device is active.Return 0 if the
+ * device is accessible or a negative error code otherwise.
+ */
+static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
+{
+ int ret;
+
+ /* Ignore runpm status. If device is in suspended state, deny access */
+ ret = amdgpu_pm_dev_state_check(adev, false);
+ if (ret)
+ return ret;
+
+ /*
+ * Allow only if device is active. If runpm is disabled also, as in
+ * kernels without CONFIG_PM, allow access.
+ */
+ ret = pm_runtime_get_if_active(adev->dev);
+ if (!ret)
+ return -EPERM;
+
+ return 0;
+}
+
+/**
+ * amdgpu_pm_put_access - Put to auto suspend mode after a device access.
+ * @adev: Target device.
+ *
+ * Should be paired with amdgpu_pm_get_access* calls
+ */
+static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
+{
+ pm_runtime_mark_last_busy(adev->dev);
+ pm_runtime_put_autosuspend(adev->dev);
+}
+
+/**
* DOC: power_dpm_state
*
* The power_dpm_state file is a legacy interface and is only provided for
@@ -140,18 +219,13 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
enum amd_pm_state_type pm;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_dpm_get_current_power_state(adev, &pm);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
@@ -168,11 +242,6 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
enum amd_pm_state_type state;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
@@ -182,14 +251,13 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_power_state(adev, state);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -263,18 +331,13 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level level = 0xff;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
level = amdgpu_dpm_get_performance_level(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
@@ -299,11 +362,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level level;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -328,14 +386,13 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
if (amdgpu_dpm_force_performance_level(adev, level)) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
return -EINVAL;
}
@@ -343,8 +400,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
adev->pm.stable_pstate_ctx = NULL;
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -359,19 +415,14 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
uint32_t i;
int buf_len, ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
if (amdgpu_dpm_get_pp_num_states(adev, &data))
memset(&data, 0, sizeof(data));
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
for (i = 0; i < data.nums; i++)
@@ -394,20 +445,15 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_dpm_get_current_power_state(adev, &pm);
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -430,11 +476,6 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->pm.pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
@@ -453,11 +494,6 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
unsigned long idx;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
adev->pm.pp_force_state_enabled = false;
if (strlen(buf) == 1)
@@ -469,7 +505,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
@@ -490,14 +526,13 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
adev->pm.pp_force_state_enabled = true;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return ret;
}
@@ -521,18 +556,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size, ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_pp_table(adev, &table);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (size <= 0)
return size;
@@ -554,19 +584,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_pp_table(adev, buf, count);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -735,11 +759,6 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (count > 127 || count == 0)
return -EINVAL;
@@ -785,7 +804,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
@@ -806,14 +825,13 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
goto err_out;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return -EINVAL;
}
@@ -835,14 +853,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
};
uint clk_index;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
@@ -861,7 +874,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -892,23 +905,17 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
uint64_t featuremask;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -925,20 +932,15 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_ppfeature_status(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -991,14 +993,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
int size = 0;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
if (ret == -ENOENT)
@@ -1007,7 +1004,7 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1057,23 +1054,17 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_force_clock_level(adev, type, mask);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -1240,18 +1231,13 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
value = amdgpu_dpm_get_sclk_od(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%d\n", value);
}
@@ -1266,24 +1252,18 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1297,18 +1277,13 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
value = amdgpu_dpm_get_mclk_od(adev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%d\n", value);
}
@@ -1323,24 +1298,18 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1378,20 +1347,15 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_power_profile_mode(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1414,11 +1378,6 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
tmp[0] = *(buf);
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
@@ -1445,14 +1404,13 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
parameter[parameter_size] = profile_mode;
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (!ret)
return count;
@@ -1466,19 +1424,14 @@ static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
{
int r, size = sizeof(uint32_t);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_if_active(adev->dev);
- if (r <= 0)
- return r ?: -EPERM;
+ r = amdgpu_pm_get_access_if_active(adev);
+ if (r)
+ return r;
/* get the sensor value */
r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
return r;
}
@@ -1576,24 +1529,19 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
uint64_t count0 = 0, count1 = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->flags & AMD_IS_APU)
return -ENODATA;
if (!adev->asic_funcs->get_pcie_usage)
return -ENODATA;
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return sysfs_emit(buf, "%llu %llu %i\n",
count0, count1, pcie_get_mps(adev->pdev));
@@ -1616,11 +1564,6 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (adev->unique_id)
return sysfs_emit(buf, "%016llx\n", adev->unique_id);
@@ -1715,9 +1658,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
if (!ret)
@@ -1725,7 +1668,7 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
else
size = sysfs_emit(buf, "failed to get thermal limit\n");
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1749,20 +1692,18 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
dev_err(dev, "failed to update thermal limit\n");
return ret;
}
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return count;
}
@@ -1786,18 +1727,13 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev,
ssize_t size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1824,14 +1760,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
ssize_t size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(ddev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
if (size <= 0)
@@ -1843,7 +1774,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
memcpy(buf, gpu_metrics, size);
out:
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -1939,19 +1870,14 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
int r = 0;
int bias = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_resume_and_get(ddev->dev);
- if (r < 0)
- return r;
-
r = kstrtoint(buf, 10, &bias);
if (r)
goto out;
+ r = amdgpu_pm_get_access(adev);
+ if (r < 0)
+ return r;
+
if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
@@ -1963,8 +1889,8 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
/* TODO: update bias level with SMU message */
out:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
+
return r;
}
@@ -2006,10 +1932,11 @@ static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdg
return 0;
}
- /* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */
+ /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0 SRIOV/BM support */
if (gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)) {
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)) {
+ if (amdgpu_sriov_multi_vf_mode(adev))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
}
@@ -2044,7 +1971,7 @@ static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_
* setting should not be allowed from VF if not in one VF mode.
*/
if (gc_ver >= IP_VERSION(10, 0, 0) ||
- (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))) {
+ (amdgpu_sriov_multi_vf_mode(adev))) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
@@ -2087,7 +2014,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3) ||
gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
@@ -2109,7 +2037,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3) ||
gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
@@ -2120,7 +2049,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
} else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
if (gc_ver == IP_VERSION(9, 4, 2) ||
gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4))
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))
*states = ATTR_STATE_UNSUPPORTED;
}
@@ -2218,11 +2148,6 @@ static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
}
@@ -2239,11 +2164,6 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
char *tmp, *param;
long val;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
count = min(count, sizeof(tmp_buf));
memcpy(tmp_buf, buf, count);
tmp_buf[count - 1] = '\0';
@@ -2269,14 +2189,13 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
- ret = pm_runtime_resume_and_get(ddev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return ret;
@@ -2395,13 +2314,22 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
gc_ver == IP_VERSION(9, 0, 1))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
- if (!(gc_ver == IP_VERSION(10, 3, 1) ||
- gc_ver == IP_VERSION(10, 3, 3) ||
- gc_ver == IP_VERSION(10, 3, 6) ||
- gc_ver == IP_VERSION(10, 3, 7) ||
- gc_ver == IP_VERSION(11, 0, 1) ||
- gc_ver == IP_VERSION(11, 0, 4) ||
- gc_ver == IP_VERSION(11, 5, 0)))
+ if (!(gc_ver == IP_VERSION(9, 3, 0) ||
+ gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 3) ||
+ gc_ver == IP_VERSION(10, 3, 6) ||
+ gc_ver == IP_VERSION(10, 3, 7) ||
+ gc_ver == IP_VERSION(11, 0, 0) ||
+ gc_ver == IP_VERSION(11, 0, 1) ||
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3) ||
+ gc_ver == IP_VERSION(11, 0, 4) ||
+ gc_ver == IP_VERSION(11, 5, 0) ||
+ gc_ver == IP_VERSION(11, 5, 1) ||
+ gc_ver == IP_VERSION(11, 5, 2) ||
+ gc_ver == IP_VERSION(11, 5, 3) ||
+ gc_ver == IP_VERSION(12, 0, 0) ||
+ gc_ver == IP_VERSION(12, 0, 1)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pcie_bw)) {
/* PCIe Perf counters won't work on APU nodes */
@@ -2416,11 +2344,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
*states = ATTR_STATE_SUPPORTED;
break;
default:
@@ -2699,18 +2630,13 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -2728,11 +2654,6 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
u32 pwm_mode;
int value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2746,14 +2667,13 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_resume_and_get(adev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -2784,16 +2704,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
u32 value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
@@ -2810,8 +2725,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2827,18 +2741,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2854,18 +2763,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2915,18 +2819,13 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
int err;
u32 rpm = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- err = pm_runtime_get_if_active(adev->dev);
- if (err <= 0)
- return err ?: -EPERM;
+ err = amdgpu_pm_get_access_if_active(adev);
+ if (err)
+ return err;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2943,16 +2842,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
@@ -2968,8 +2862,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -2985,18 +2878,13 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (ret)
return -EINVAL;
@@ -3014,11 +2902,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
int value;
u32 pwm_mode;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -3030,14 +2913,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return -EINVAL;
@@ -3152,14 +3034,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
ssize_t size;
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_if_active(adev->dev);
- if (r <= 0)
- return r ?: -EPERM;
+ r = amdgpu_pm_get_access_if_active(adev);
+ if (r)
+ return r;
r = amdgpu_dpm_get_power_limit(adev, &limit,
pp_limit_level, power_type);
@@ -3169,7 +3046,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
else
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -3230,11 +3107,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -3245,14 +3117,13 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
value = value / 1000000; /* convert to Watt */
value |= limit_type << 24;
- err = pm_runtime_resume_and_get(adev->dev);
+ err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
err = amdgpu_dpm_set_power_limit(adev, value);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_pm_put_access(adev);
if (err)
return err;
@@ -3530,7 +3401,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* Skip crit temp on APU */
if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
- (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4))) &&
+ (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
return 0;
@@ -3605,7 +3477,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
(gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4))) &&
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
attr == &sensor_dev_attr_in0_label.dev_attr.attr))
return 0;
@@ -3613,7 +3486,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* only APUs other than gc 9,4,3 have vddnb */
if ((!(adev->flags & AMD_IS_APU) ||
(gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4))) &&
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0))) &&
(attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
@@ -3636,7 +3510,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* hotspot temperature for gc 9,4,3*/
if (gc_ver == IP_VERSION(9, 4, 3) ||
- gc_ver == IP_VERSION(9, 4, 4)) {
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0)) {
if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
@@ -3686,20 +3561,15 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
int size = 0;
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- ret = pm_runtime_get_if_active(adev->dev);
- if (ret <= 0)
- return ret ?: -EPERM;
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return size;
}
@@ -3767,11 +3637,6 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
long parameter[64];
int ret;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
ret = parse_input_od_command_lines(in_buf,
count,
&cmd_type,
@@ -3780,7 +3645,7 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
if (ret)
return ret;
- ret = pm_runtime_resume_and_get(adev->dev);
+ ret = amdgpu_pm_get_access(adev);
if (ret < 0)
return ret;
@@ -3799,14 +3664,12 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
goto err_out;
}
- pm_runtime_mark_last_busy(adev->dev);
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return count;
err_out:
- pm_runtime_mark_last_busy(adev->dev);
- pm_runtime_put_autosuspend(adev->dev);
+ amdgpu_pm_put_access(adev);
return ret;
}
@@ -4776,16 +4639,10 @@ static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
{
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
- struct drm_device *dev = adev_to_drm(adev);
u64 flags = 0;
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_resume_and_get(dev->dev);
+ r = amdgpu_pm_get_access(adev);
if (r < 0)
return r;
@@ -4802,7 +4659,7 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
seq_printf(m, "\n");
out:
- pm_runtime_put_autosuspend(dev->dev);
+ amdgpu_pm_put_access(adev);
return r;
}
@@ -4822,10 +4679,9 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
void *smu_prv_buf;
int ret = 0;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
+ ret = amdgpu_pm_dev_state_check(adev, true);
+ if (ret)
+ return ret;
ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
if (ret)
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 1f5ac7e0230d..f93d287dbf13 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -295,7 +295,8 @@ enum ip_power_state {
};
/* Used to mask smu debug modes */
-#define SMU_DEBUG_HALT_ON_ERROR 0x1
+#define SMU_DEBUG_HALT_ON_ERROR BIT(0)
+#define SMU_DEBUG_POOL_USE_VRAM BIT(1)
#define MAX_SMU_I2C_BUSES 2
@@ -603,5 +604,6 @@ int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
enum pp_pm_policy p_type, char *buf);
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
+bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index e237ea1185a7..59fae668dc3f 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -3107,7 +3107,7 @@ static int kv_dpm_resume(struct amdgpu_ip_block *ip_block)
return ret;
}
-static bool kv_dpm_is_idle(void *handle)
+static bool kv_dpm_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index d6dfe2599ebe..1c25f3023e93 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7852,7 +7852,7 @@ static int si_dpm_resume(struct amdgpu_ip_block *ip_block)
return ret;
}
-static bool si_dpm_is_idle(void *handle)
+static bool si_dpm_is_idle(struct amdgpu_ip_block *ip_block)
{
/* XXX */
return true;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 686345f75f26..b48a031cbba0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -51,6 +51,11 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
hwmgr->adev = adev;
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
hwmgr->device = amdgpu_cgs_create_device(adev);
+ if (!hwmgr->device) {
+ kfree(hwmgr);
+ return -ENOMEM;
+ }
+
mutex_init(&hwmgr->msg_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
@@ -239,7 +244,7 @@ static void pp_late_fini(struct amdgpu_ip_block *ip_block)
}
-static bool pp_is_idle(void *handle)
+static bool pp_is_idle(struct amdgpu_ip_block *ip_block)
{
return false;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
index 90452b66e107..a59677cf8dfc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
@@ -149,16 +149,6 @@ int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr)
return 0;
}
-int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
-{
- PHM_FUNC_CHECK(hwmgr);
-
- if (hwmgr->hwmgr_func->powerdown_uvd != NULL)
- return hwmgr->hwmgr_func->powerdown_uvd(hwmgr);
- return 0;
-}
-
-
int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
index 82d540334318..6120f14caab0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
@@ -158,84 +158,6 @@ int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr,
return result;
}
-
-static struct atom_gpio_pin_lut_v2_1 *pp_atomfwctrl_get_gpio_lookup_table(
- struct pp_hwmgr *hwmgr)
-{
- const void *table_address;
- uint16_t idx;
-
- idx = GetIndexIntoMasterDataTable(gpio_pin_lut);
- table_address = smu_atom_get_data_table(hwmgr->adev,
- idx, NULL, NULL, NULL);
- PP_ASSERT_WITH_CODE(table_address,
- "Error retrieving BIOS Table Address!",
- return NULL);
-
- return (struct atom_gpio_pin_lut_v2_1 *)table_address;
-}
-
-static bool pp_atomfwctrl_lookup_gpio_pin(
- struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table,
- const uint32_t pin_id,
- struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment)
-{
- unsigned int size = le16_to_cpu(
- gpio_lookup_table->table_header.structuresize);
- unsigned int offset =
- offsetof(struct atom_gpio_pin_lut_v2_1, gpio_pin[0]);
- unsigned long start = (unsigned long)gpio_lookup_table;
-
- while (offset < size) {
- const struct atom_gpio_pin_assignment *pin_assignment =
- (const struct atom_gpio_pin_assignment *)(start + offset);
-
- if (pin_id == pin_assignment->gpio_id) {
- gpio_pin_assignment->uc_gpio_pin_bit_shift =
- pin_assignment->gpio_bitshift;
- gpio_pin_assignment->us_gpio_pin_aindex =
- le16_to_cpu(pin_assignment->data_a_reg_index);
- return true;
- }
- offset += offsetof(struct atom_gpio_pin_assignment, gpio_id) + 1;
- }
- return false;
-}
-
-/*
- * Returns TRUE if the given pin id find in lookup table.
- */
-bool pp_atomfwctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr,
- const uint32_t pin_id,
- struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment)
-{
- bool ret = false;
- struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table =
- pp_atomfwctrl_get_gpio_lookup_table(hwmgr);
-
- /* If we cannot find the table do NOT try to control this voltage. */
- PP_ASSERT_WITH_CODE(gpio_lookup_table,
- "Could not find GPIO lookup Table in BIOS.",
- return false);
-
- ret = pp_atomfwctrl_lookup_gpio_pin(gpio_lookup_table,
- pin_id, gpio_pin_assignment);
-
- return ret;
-}
-
-/*
- * Enter to SelfRefresh mode.
- * @param hwmgr
- */
-int pp_atomfwctrl_enter_self_refresh(struct pp_hwmgr *hwmgr)
-{
- /* 0 - no action
- * 1 - leave power to video memory always on
- */
- return 0;
-}
-
/** pp_atomfwctrl_get_gpu_pll_dividers_vega10().
*
* @param hwmgr input parameter: pointer to HwMgr
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
index e86e05c786d9..0d62903d5676 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
@@ -217,9 +217,6 @@ struct pp_atomfwctrl_smc_dpm_parameters {
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
uint32_t clock_type, uint32_t clock_value,
struct pp_atomfwctrl_clock_dividers_soc15 *dividers);
-int pp_atomfwctrl_enter_self_refresh(struct pp_hwmgr *hwmgr);
-bool pp_atomfwctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pin_id,
- struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment);
int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint8_t voltage_mode, struct pp_atomfwctrl_voltage_table *voltage_table);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index a8c732e07006..9a821563bc8e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1642,7 +1642,6 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.apply_state_adjust_rules = smu10_apply_state_adjust_rules,
.force_dpm_level = smu10_dpm_force_dpm_level,
.get_power_state_size = smu10_get_power_state_size,
- .powerdown_uvd = NULL,
.powergate_uvd = smu10_powergate_vcn,
.powergate_vce = NULL,
.get_mclk = smu10_dpm_get_mclk,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c
index f2bda3bcbbde..5e4c80f7b20a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c
@@ -55,7 +55,7 @@ static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
}
-int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h
index fc8f8a6acc72..e56abbadc78b 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h
@@ -28,7 +28,6 @@
void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 632a25957477..8da882c51856 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -5754,7 +5754,6 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.patch_boot_state = smu7_dpm_patch_boot_state,
.get_pp_table_entry = smu7_get_pp_table_entry,
.get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
- .powerdown_uvd = smu7_powerdown_uvd,
.powergate_uvd = smu7_powergate_uvd,
.powergate_vce = smu7_powergate_vce,
.disable_clock_power_gating = smu7_disable_clock_power_gating,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index 7e1197420873..9d3b33446adc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -2044,7 +2044,6 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
.apply_state_adjust_rules = smu8_apply_state_adjust_rules,
.force_dpm_level = smu8_dpm_force_dpm_level,
.get_power_state_size = smu8_get_power_state_size,
- .powerdown_uvd = smu8_dpm_powerdown_uvd,
.powergate_uvd = smu8_dpm_powergate_uvd,
.powergate_vce = smu8_dpm_powergate_vce,
.powergate_acp = smu8_dpm_powergate_acp,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
index f4f9a104d170..915f1b8e4dba 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
@@ -396,7 +396,6 @@ struct phm_odn_clock_levels {
};
extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr);
-extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 227bf0e84a13..c661185753b4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -257,7 +257,6 @@ struct pp_hwmgr_func {
int (*get_pp_table_entry)(struct pp_hwmgr *hwmgr,
unsigned long, struct pp_power_state *);
int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr);
- int (*powerdown_uvd)(struct pp_hwmgr *hwmgr);
void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate);
void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate);
void (*powergate_acp)(struct pp_hwmgr *hwmgr, bool bgate);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index ed9dac00ebfb..033c3229b555 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -694,6 +694,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
renoir_set_ppt_funcs(smu);
break;
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
vangogh_set_ppt_funcs(smu);
break;
case IP_VERSION(13, 0, 1):
@@ -739,6 +740,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
smu_v14_0_0_set_ppt_funcs(smu);
break;
case IP_VERSION(14, 0, 2):
@@ -1025,7 +1027,10 @@ static int smu_alloc_memory_pool(struct smu_context *smu)
memory_pool->size = pool_size;
memory_pool->align = PAGE_SIZE;
- memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
+ memory_pool->domain =
+ (adev->pm.smu_debug_mask & SMU_DEBUG_POOL_USE_VRAM) ?
+ AMDGPU_GEM_DOMAIN_VRAM :
+ AMDGPU_GEM_DOMAIN_GTT;
switch (pool_size) {
case SMU_MEMORY_POOL_SIZE_256_MB:
@@ -1252,26 +1257,11 @@ static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
}
}
-static bool smu_is_workload_profile_available(struct smu_context *smu,
- u32 profile)
-{
- if (profile >= PP_SMC_POWER_PROFILE_COUNT)
- return false;
- return smu->workload_map && smu->workload_map[profile].valid_mapping;
-}
-
static void smu_init_power_profile(struct smu_context *smu)
{
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) {
- if (smu->is_apu ||
- !smu_is_workload_profile_available(
- smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
- smu->power_profile_mode =
- PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
- else
- smu->power_profile_mode =
- PP_SMC_POWER_PROFILE_FULLSCREEN3D;
- }
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN)
+ smu->power_profile_mode =
+ PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu_power_profile_mode_get(smu, smu->power_profile_mode);
}
@@ -1580,6 +1570,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 0, 12):
if (adev->in_suspend && smu_is_dpm_running(smu)) {
dev_info(adev->dev, "dpm has been enabled\n");
@@ -1826,7 +1817,7 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
+ if (amdgpu_sriov_multi_vf_mode(adev)) {
smu->pm_enabled = false;
return 0;
}
@@ -1933,6 +1924,7 @@ static int smu_disable_dpms(struct smu_context *smu)
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
return 0;
@@ -2049,18 +2041,18 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
struct smu_context *smu = adev->powerplay.pp_handle;
int i, ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
smu_dpm_set_vcn_enable(smu, false, i);
+ adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE;
+ }
smu_dpm_set_jpeg_enable(smu, false);
+ adev->jpeg.cur_state = AMD_PG_STATE_GATE;
smu_dpm_set_vpe_enable(smu, false);
smu_dpm_set_umsch_mm_enable(smu, false);
- adev->vcn.cur_state = AMD_PG_STATE_GATE;
- adev->jpeg.cur_state = AMD_PG_STATE_GATE;
-
if (!smu->pm_enabled)
return 0;
@@ -2117,7 +2109,7 @@ static int smu_suspend(struct amdgpu_ip_block *ip_block)
int ret;
uint64_t count;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
if (!smu->pm_enabled)
@@ -2153,7 +2145,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
- if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+ if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
if (!smu->pm_enabled)
@@ -2326,7 +2318,12 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
if (smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
- dev_err(smu->adev->dev, "Failed to set performance level!");
+ if (ret == -EOPNOTSUPP)
+ dev_info(smu->adev->dev, "set performance level %d not supported",
+ level);
+ else
+ dev_err(smu->adev->dev, "Failed to set performance level %d",
+ level);
return ret;
}
@@ -2802,6 +2799,7 @@ int smu_get_power_limit(void *handle,
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
@@ -3917,6 +3915,23 @@ int smu_send_rma_reason(struct smu_context *smu)
return ret;
}
+/**
+ * smu_reset_sdma_is_supported - Check if SDMA reset is supported by SMU
+ * @smu: smu_context pointer
+ *
+ * This function checks if the SMU supports resetting the SDMA engine.
+ * It returns true if supported, false otherwise.
+ */
+bool smu_reset_sdma_is_supported(struct smu_context *smu)
+{
+ bool ret = false;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported)
+ ret = smu->ppt_funcs->reset_sdma_is_supported(smu);
+
+ return ret;
+}
+
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
{
int ret = 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 3630593bce61..3ba169639f54 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1376,6 +1376,10 @@ struct pptable_funcs {
* @reset_sdma: message SMU to soft reset sdma instance.
*/
int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
+ /**
+ * @reset_sdma_is_supported: Check if support resets the SDMA engine.
+ */
+ bool (*reset_sdma_is_supported)(struct smu_context *smu);
/**
* @get_ecc_table: message SMU to get ECC INFO table.
@@ -1637,6 +1641,7 @@ int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
int smu_send_rma_reason(struct smu_context *smu);
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
+bool smu_reset_sdma_is_supported(struct smu_context *smu);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
new file mode 100644
index 000000000000..d7505cfc433a
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_12_PMFW_H
+#define SMU_13_0_12_PMFW_H
+
+#define NUM_VCLK_DPM_LEVELS 4
+#define NUM_DCLK_DPM_LEVELS 4
+#define NUM_SOCCLK_DPM_LEVELS 4
+#define NUM_LCLK_DPM_LEVELS 4
+#define NUM_UCLK_DPM_LEVELS 4
+#define NUM_FCLK_DPM_LEVELS 4
+#define NUM_XGMI_DPM_LEVELS 2
+#define NUM_CXL_BITRATES 4
+#define NUM_PCIE_BITRATES 4
+#define NUM_XGMI_BITRATES 4
+#define NUM_XGMI_WIDTHS 3
+#define NUM_TDP_GROUPS 4
+#define NUM_SOC_P2S_TABLES 6
+#define NUM_GFX_P2S_TABLES 8
+#define NUM_PSM_DIDT_THRESHOLDS 3
+#define NUM_XVMIN_VMIN_THRESHOLDS 3
+
+#define PRODUCT_MODEL_NUMBER_LEN 20
+#define PRODUCT_NAME_LEN 64
+#define PRODUCT_SERIAL_LEN 20
+#define PRODUCT_MANUFACTURER_NAME_LEN 32
+#define PRODUCT_FRU_ID_LEN 32
+
+typedef enum {
+/*0*/ FEATURE_DATA_CALCULATION = 0,
+/*1*/ FEATURE_DPM_FCLK = 1,
+/*2*/ FEATURE_DPM_GFXCLK = 2,
+/*3*/ FEATURE_DPM_LCLK = 3,
+/*4*/ FEATURE_DPM_SOCCLK = 4,
+/*5*/ FEATURE_DPM_UCLK = 5,
+/*6*/ FEATURE_DPM_VCN = 6,
+/*7*/ FEATURE_DPM_XGMI = 7,
+/*8*/ FEATURE_DS_FCLK = 8,
+/*9*/ FEATURE_DS_GFXCLK = 9,
+/*10*/ FEATURE_DS_LCLK = 10,
+/*11*/ FEATURE_DS_MP0CLK = 11,
+/*12*/ FEATURE_DS_MP1CLK = 12,
+/*13*/ FEATURE_DS_MPIOCLK = 13,
+/*14*/ FEATURE_DS_SOCCLK = 14,
+/*15*/ FEATURE_DS_VCN = 15,
+/*16*/ FEATURE_APCC_DFLL = 16,
+/*17*/ FEATURE_APCC_PLUS = 17,
+/*18*/ FEATURE_PPT = 18,
+/*19*/ FEATURE_TDC = 19,
+/*20*/ FEATURE_THERMAL = 20,
+/*21*/ FEATURE_SOC_PCC = 21,
+/*22*/ FEATURE_PROCHOT = 22,
+/*23*/ FEATURE_FDD_AID_HBM = 23,
+/*24*/ FEATURE_FDD_AID_SOC = 24,
+/*25*/ FEATURE_FDD_XCD_EDC = 25,
+/*26*/ FEATURE_FDD_XCD_XVMIN = 26,
+/*27*/ FEATURE_FW_CTF = 27,
+/*28*/ FEATURE_SMU_CG = 28,
+/*29*/ FEATURE_PSI7 = 29,
+/*30*/ FEATURE_XGMI_PER_LINK_PWR_DOWN = 30,
+/*31*/ FEATURE_SOC_DC_RTC = 31,
+/*32*/ FEATURE_GFX_DC_RTC = 32,
+/*33*/ FEATURE_DVM_MIN_PSM = 33,
+/*34*/ FEATURE_PRC = 34,
+/*35*/ FEATURE_PSM_SQ_THROTTLER = 35,
+/*36*/ FEATURE_PIT = 36,
+/*37*/ FEATURE_DVO = 37,
+/*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38,
+
+/*39*/ NUM_FEATURES = 39
+} FEATURE_LIST_e;
+
+//enum for MPIO PCIe gen speed msgs
+typedef enum {
+ PCIE_LINK_SPEED_INDEX_TABLE_RESERVED,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN1,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN2,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN3,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN4,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN5,
+ PCIE_LINK_SPEED_INDEX_TABLE_COUNT
+} PCIE_LINK_SPEED_INDEX_TABLE_e;
+
+typedef enum {
+ GFX_GUARDBAND_OFFSET_0,
+ GFX_GUARDBAND_OFFSET_1,
+ GFX_GUARDBAND_OFFSET_2,
+ GFX_GUARDBAND_OFFSET_3,
+ GFX_GUARDBAND_OFFSET_4,
+ GFX_GUARDBAND_OFFSET_5,
+ GFX_GUARDBAND_OFFSET_6,
+ GFX_GUARDBAND_OFFSET_7,
+ GFX_GUARDBAND_OFFSET_COUNT
+} GFX_GUARDBAND_OFFSET_e;
+
+typedef enum {
+ GFX_DVM_MARGINHI_0,
+ GFX_DVM_MARGINHI_1,
+ GFX_DVM_MARGINHI_2,
+ GFX_DVM_MARGINHI_3,
+ GFX_DVM_MARGINHI_4,
+ GFX_DVM_MARGINHI_5,
+ GFX_DVM_MARGINHI_6,
+ GFX_DVM_MARGINHI_7,
+ GFX_DVM_MARGINLO_0,
+ GFX_DVM_MARGINLO_1,
+ GFX_DVM_MARGINLO_2,
+ GFX_DVM_MARGINLO_3,
+ GFX_DVM_MARGINLO_4,
+ GFX_DVM_MARGINLO_5,
+ GFX_DVM_MARGINLO_6,
+ GFX_DVM_MARGINLO_7,
+ GFX_DVM_MARGIN_COUNT
+} GFX_DVM_MARGIN_e;
+
+#define SMU_METRICS_TABLE_VERSION 0x12
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint64_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t MaxHbmTemperature;
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t MaxHbmTemperatureAcc;
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t XcdEnergyAcc;
+ uint64_t AidEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency;
+ uint32_t UclkFrequency;
+ uint32_t SocclkFrequency[4];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[4];
+ uint64_t GfxclkFrequencyAcc[8];
+
+ //FREQUENCY RANGE
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+
+ //XGMI
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc[8];
+ uint64_t XgmiWriteBandwidthAcc[8];
+
+ //ACTIVITY
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[4];
+
+ //THROTTLERS
+ uint32_t ProchotResidencyAcc;
+ uint32_t PptResidencyAcc;
+ uint32_t SocketThmResidencyAcc;
+ uint32_t VrThmResidencyAcc;
+ uint32_t HbmThmResidencyAcc;
+ uint32_t GfxLockXCDMak;
+
+ // New Items at end to maintain driver compatibility
+ uint32_t GfxclkFrequency[8];
+
+ //XGMI Data tranfser size
+ uint64_t XgmiReadDataSizeAcc[8];//in KByte
+ uint64_t XgmiWriteDataSizeAcc[8];//in KByte
+
+ //PCIE BW Data and error count
+ uint32_t PcieBandwidth[4];
+ uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[40];
+
+ // PCIE LINK Speed and width
+ uint32_t PCIeLinkSpeed;
+ uint32_t PCIeLinkWidth;
+
+ // PER XCD ACTIVITY
+ uint32_t GfxBusy[8];
+ uint64_t GfxBusyAcc[8];
+
+ //PCIE BW Data and error count
+ uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
+
+ //Total App Clock Counter
+ uint64_t GfxclkBelowHostLimitPptAcc[8];
+ uint64_t GfxclkBelowHostLimitThmAcc[8];
+ uint64_t GfxclkBelowHostLimitTotalAcc[8];
+ uint64_t GfxclkLowUtilizationAcc[8];
+} MetricsTable_t;
+
+#define SMU_VF_METRICS_TABLE_MASK (1 << 31)
+#define SMU_VF_METRICS_TABLE_VERSION (0x6 | SMU_VF_METRICS_TABLE_MASK)
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint32_t AccumulationCounter;
+ uint32_t InstGfxclk_TargFreq;
+ uint64_t AccGfxclk_TargFreq;
+ uint64_t AccGfxRsmuDpm_Busy;
+ uint64_t AccGfxclkBelowHostLimitPpt;
+ uint64_t AccGfxclkBelowHostLimitThm;
+ uint64_t AccGfxclkBelowHostLimitTotal;
+ uint64_t AccGfxclkLowUtilization;
+} VfMetricsTable_t;
+
+/* FRU product information */
+typedef struct __attribute__((packed, aligned(4))) {
+ uint8_t ModelNumber[PRODUCT_MODEL_NUMBER_LEN];
+ uint8_t Name[PRODUCT_NAME_LEN];
+ uint8_t Serial[PRODUCT_SERIAL_LEN];
+ uint8_t ManufacturerName[PRODUCT_MANUFACTURER_NAME_LEN];
+ uint8_t FruId[PRODUCT_FRU_ID_LEN];
+} FRUProductInfo_t;
+
+#pragma pack(push, 4)
+typedef struct {
+ //FRU PRODUCT INFO
+ FRUProductInfo_t ProductInfo;
+
+ //POWER
+ uint32_t MaxSocketPowerLimit;
+
+ //FREQUENCY RANGE
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+
+ //PSNs
+ uint64_t PublicSerialNumber_AID[4];
+ uint64_t PublicSerialNumber_XCD[8];
+} StaticMetricsTable_t;
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
new file mode 100644
index 000000000000..e1f490b6ce64
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_12_PPSMC_H
+#define SMU_13_0_12_PPSMC_H
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+// Message Definitions:
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GfxDriverReset 0x3
+#define PPSMC_MSG_GetDriverIfVersion 0x4
+#define PPSMC_MSG_EnableAllSmuFeatures 0x5
+#define PPSMC_MSG_DisableAllSmuFeatures 0x6
+#define PPSMC_MSG_RequestI2cTransaction 0x7
+#define PPSMC_MSG_GetMetricsVersion 0x8
+#define PPSMC_MSG_GetMetricsTable 0x9
+#define PPSMC_MSG_GetEccInfoTable 0xA
+#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xB
+#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xC
+#define PPSMC_MSG_SetDriverDramAddrHigh 0xD
+#define PPSMC_MSG_SetDriverDramAddrLow 0xE
+#define PPSMC_MSG_SetToolsDramAddrHigh 0xF
+#define PPSMC_MSG_SetToolsDramAddrLow 0x10
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x11
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x12
+#define PPSMC_MSG_SetSoftMinByFreq 0x13
+#define PPSMC_MSG_SetSoftMaxByFreq 0x14
+#define PPSMC_MSG_GetMinDpmFreq 0x15
+#define PPSMC_MSG_GetMaxDpmFreq 0x16
+#define PPSMC_MSG_GetDpmFreqByIndex 0x17
+#define PPSMC_MSG_SetPptLimit 0x18
+#define PPSMC_MSG_GetPptLimit 0x19
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x1A
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x1B
+#define PPSMC_MSG_DramLogSetDramSize 0x1C
+#define PPSMC_MSG_GetDebugData 0x1D
+#define PPSMC_MSG_HeavySBR 0x1E
+#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x1F
+#define PPSMC_MSG_DFCstateControl 0x20
+#define PPSMC_MSG_GetGmiPwrDnHyst 0x21
+#define PPSMC_MSG_SetGmiPwrDnHyst 0x22
+#define PPSMC_MSG_GmiPwrDnControl 0x23
+#define PPSMC_MSG_EnterGfxoff 0x24
+#define PPSMC_MSG_ExitGfxoff 0x25
+#define PPSMC_MSG_EnableDeterminism 0x26
+#define PPSMC_MSG_DisableDeterminism 0x27
+#define PPSMC_MSG_DumpSTBtoDram 0x28
+#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x29
+#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x2A
+#define PPSMC_MSG_STBtoDramLogSetDramSize 0x2B
+#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x2C
+#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x2D
+#define PPSMC_MSG_GfxDriverResetRecovery 0x2E
+#define PPSMC_MSG_TriggerVFFLR 0x2F
+#define PPSMC_MSG_SetSoftMinGfxClk 0x30
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x31
+#define PPSMC_MSG_GetMinGfxDpmFreq 0x32
+#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33
+#define PPSMC_MSG_PrepareForDriverUnload 0x34
+#define PPSMC_MSG_ReadThrottlerLimit 0x35
+#define PPSMC_MSG_QueryValidMcaCount 0x36
+#define PPSMC_MSG_McaBankDumpDW 0x37
+#define PPSMC_MSG_GetCTFLimit 0x38
+#define PPSMC_MSG_ClearMcaOnRead 0x39
+#define PPSMC_MSG_QueryValidMcaCeCount 0x3A
+#define PPSMC_MSG_McaBankCeDumpDW 0x3B
+#define PPSMC_MSG_SelectPLPDMode 0x40
+#define PPSMC_MSG_PmLogReadSample 0x41
+#define PPSMC_MSG_PmLogGetTableVersion 0x42
+#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
+#define PPSMC_MSG_SetThrottlingPolicy 0x44
+#define PPSMC_MSG_SetPhaseDetectCSBWThreshold 0x45
+#define PPSMC_MSG_SetPhaseDetectFreqHigh 0x46
+#define PPSMC_MSG_SetPhaseDetectFreqLow 0x47
+#define PPSMC_MSG_SetPhaseDetectDownHysterisis 0x48
+#define PPSMC_MSG_SetPhaseDetectAlphaX1e6 0x49
+#define PPSMC_MSG_SetPhaseDetectOnOff 0x4A
+#define PPSMC_MSG_GetPhaseDetectResidency 0x4B
+#define PPSMC_MSG_UpdatePccWaitDecMaxStr 0x4C
+#define PPSMC_MSG_ResetSDMA 0x4D
+#define PPSMC_MSG_GetRasTableVersion 0x4E
+#define PPSMC_MSG_GetRmaStatus 0x4F
+#define PPSMC_MSG_GetErrorCount 0x50
+#define PPSMC_MSG_GetBadPageCount 0x51
+#define PPSMC_MSG_GetBadPageInfo 0x52
+#define PPSMC_MSG_GetBadPagePaAddrLoHi 0x53
+#define PPSMC_MSG_SetTimestampLoHi 0x54
+#define PPSMC_MSG_GetTimestampLoHi 0x55
+#define PPSMC_MSG_GetRasPolicy 0x56
+#define PPSMC_MSG_DumpErrorRecord 0x57
+#define PPSMC_MSG_EraseRasTable 0x58
+#define PPSMC_MSG_GetStaticMetricsTable 0x59
+#define PPSMC_Message_Count 0x5A
+
+//PPSMC Reset Types for driver msg argument
+#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
+#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2
+#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3
+
+//PPSMC Reset Types for driver msg argument
+#define PPSMC_THROTTLING_LIMIT_TYPE_SOCKET 0x1
+#define PPSMC_THROTTLING_LIMIT_TYPE_HBM 0x2
+
+//CTF/Throttle Limit types
+#define PPSMC_AID_THM_TYPE 0x1
+#define PPSMC_CCD_THM_TYPE 0x2
+#define PPSMC_XCD_THM_TYPE 0x3
+#define PPSMC_HBM_THM_TYPE 0x4
+
+//PLPD modes
+#define PPSMC_PLPD_MODE_DEFAULT 0x1
+#define PPSMC_PLPD_MODE_OPTIMIZED 0x2
+
+typedef uint32_t PPSMC_Result;
+typedef uint32_t PPSMC_MSG;
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 274b3e1cc4fb..f8ed45857878 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -129,6 +129,7 @@ typedef enum {
#define SMU_METRICS_TABLE_VERSION 0xF
+// Unified metrics table for smu_v13_0_6
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -241,8 +242,9 @@ typedef struct __attribute__((packed, aligned(4))) {
//Total App Clock Counter
uint64_t GfxclkBelowHostLimitAcc[8];
-} MetricsTableX_t;
+} MetricsTableV0_t;
+// Metrics table for smu_v13_0_6 APUS
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -333,7 +335,116 @@ typedef struct __attribute__((packed, aligned(4))) {
// VCN/JPEG ACTIVITY
uint32_t VcnBusy[4];
uint32_t JpegBusy[32];
-} MetricsTableA_t;
+} MetricsTableV1_t;
+
+// Metrics table for smu_v13_0_12
+typedef struct __attribute__((packed, aligned(4))) {
+ uint64_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t MaxHbmTemperature;
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t MaxHbmTemperatureAcc;
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t MaxSocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t CcdEnergyAcc;
+ uint64_t XcdEnergyAcc;
+ uint64_t AidEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency;
+ uint32_t UclkFrequency;
+ uint32_t SocclkFrequency[4];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[4];
+ uint64_t GfxclkFrequencyAcc[8];
+
+ //FREQUENCY RANGE
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+
+ //XGMI
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc[8];
+ uint64_t XgmiWriteBandwidthAcc[8];
+
+ //ACTIVITY
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketC0ResidencyAcc;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[4];
+
+ //THROTTLERS
+ uint32_t ProchotResidencyAcc;
+ uint32_t PptResidencyAcc;
+ uint32_t SocketThmResidencyAcc;
+ uint32_t VrThmResidencyAcc;
+ uint32_t HbmThmResidencyAcc;
+ uint32_t GfxLockXCDMak;
+
+ // New Items at end to maintain driver compatibility
+ uint32_t GfxclkFrequency[8];
+
+ //PSNs
+ uint64_t PublicSerialNumber_AID[4];
+ uint64_t PublicSerialNumber_XCD[8];
+
+ //XGMI Data tranfser size
+ uint64_t XgmiReadDataSizeAcc[8];//in KByte
+ uint64_t XgmiWriteDataSizeAcc[8];//in KByte
+
+ //PCIE BW Data and error count
+ uint32_t PcieBandwidth[4];
+ uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[32];
+
+ // PCIE LINK Speed and width
+ uint32_t PCIeLinkSpeed;
+ uint32_t PCIeLinkWidth;
+
+ // PER XCD ACTIVITY
+ uint32_t GfxBusy[8];
+ uint64_t GfxBusyAcc[8];
+
+ //PCIE BW Data and error count
+ uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
+
+ //Total App Clock Counter
+ uint64_t GfxclkBelowHostLimitAcc[8];
+} MetricsTableV2_t;
#define SMU_VF_METRICS_TABLE_VERSION 0x5
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 147bfb12fd75..288b2576432b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -92,7 +92,7 @@
#define PPSMC_MSG_McaBankCeDumpDW 0x3B
#define PPSMC_MSG_SelectPLPDMode 0x40
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
-#define PPSMC_MSG_SelectPstatePolicy 0x44
+#define PPSMC_MSG_SetThrottlingPolicy 0x44
#define PPSMC_MSG_ResetSDMA 0x4D
#define PPSMC_Message_Count 0x4E
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index e4cd6a0d13da..c9dee09395e3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -273,10 +273,11 @@
__SMU_DUMMY_MAP(GetMetricsVersion), \
__SMU_DUMMY_MAP(EnableUCLKShadow), \
__SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \
- __SMU_DUMMY_MAP(SelectPstatePolicy), \
+ __SMU_DUMMY_MAP(SetThrottlingPolicy), \
__SMU_DUMMY_MAP(MALLPowerController), \
__SMU_DUMMY_MAP(MALLPowerState), \
- __SMU_DUMMY_MAP(ResetSDMA),
+ __SMU_DUMMY_MAP(ResetSDMA), \
+ __SMU_DUMMY_MAP(GetStaticMetricsTable),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -355,6 +356,7 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(DS_FCLK), \
__SMU_DUMMY_MAP(DS_MP1CLK), \
__SMU_DUMMY_MAP(DS_MP0CLK), \
+ __SMU_DUMMY_MAP(DS_MPIOCLK), \
__SMU_DUMMY_MAP(XGMI_PER_LINK_PWR_DWN), \
__SMU_DUMMY_MAP(DPM_GFX_PACE), \
__SMU_DUMMY_MAP(MEM_VDDCI_SCALING), \
@@ -451,7 +453,8 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(APT_PF_DCS), \
__SMU_DUMMY_MAP(GFX_EDC_XVMIN), \
__SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \
- __SMU_DUMMY_MAP(FAN_ABNORMAL),
+ __SMU_DUMMY_MAP(FAN_ABNORMAL), \
+ __SMU_DUMMY_MAP(PIT),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 8d4a96e23326..cd03caffe317 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -53,6 +53,10 @@
#define SMU_13_VCLK_SHIFT 16
+#define SMUQ10_TO_UINT(x) ((x) >> 10)
+#define SMUQ10_FRAC(x) ((x) & 0x3ff)
+#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
+
extern const int pmfw_decoded_link_speed[5];
extern const int pmfw_decoded_link_width[7];
@@ -306,5 +310,14 @@ int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
uint32_t *value);
void smu_v13_0_interrupt_work(struct smu_context *smu);
+bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
+int smu_v13_0_12_get_max_metrics_size(void);
+int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
+int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value);
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table);
+extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
+extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 189c6a32b6bd..78391d8f35a9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -227,6 +227,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
break;
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH;
break;
case IP_VERSION(11, 0, 12):
@@ -471,10 +472,11 @@ int smu_v11_0_init_power(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct smu_power_context *smu_power = &smu->smu_power;
- size_t size = amdgpu_ip_version(adev, MP1_HWIP, 0) ==
- IP_VERSION(11, 5, 0) ?
- sizeof(struct smu_11_5_power_context) :
- sizeof(struct smu_11_0_power_context);
+ u32 ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ size_t size = ((ip_version == IP_VERSION(11, 5, 0)) ||
+ (ip_version == IP_VERSION(11, 5, 2))) ?
+ sizeof(struct smu_11_5_power_context) :
+ sizeof(struct smu_11_0_power_context);
smu_power->power_context = kzalloc(size, GFP_KERNEL);
if (!smu_power->power_context)
@@ -731,6 +733,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
*/
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 11) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 12) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13))
return 0;
@@ -1110,6 +1113,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 37d82a71a2d7..9481f897432d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1285,6 +1285,12 @@ static int renoir_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = renoir_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_EDGE_TEMP:
ret = renoir_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_EDGE,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
index 7f3493b6c53c..51f1fa9789ab 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
@@ -24,7 +24,7 @@
# It provides the smu management services for the driver.
SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o smu_v13_0_0_ppt.o smu_v13_0_4_ppt.o \
- smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o
+ smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o smu_v13_0_12_ppt.o
AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index fbbdfa54f6a2..0915d6377613 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -267,10 +267,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu_major = (smu_version >> 16) & 0xff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
- if (smu->is_apu ||
- amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6) ||
- amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14))
- adev->pm.fw_version = smu_version;
+ adev->pm.fw_version = smu_version;
/* only for dGPU w/ SMU13*/
if (adev->pm.fw)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 0551a3311217..5a9711e8cf68 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -126,7 +126,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
- MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
+ MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 0),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
@@ -140,7 +140,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
- MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
+ MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0),
MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
@@ -149,7 +149,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
- MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
+ MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 0),
MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
@@ -836,6 +836,10 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_MEMACTIVITY:
*value = metrics->AverageUclkActivity;
break;
+ case METRICS_AVERAGE_VCNACTIVITY:
+ *value = max(metrics->Vcn0ActivityPercentage,
+ metrics->Vcn1ActivityPercentage);
+ break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = metrics->AverageSocketPower << 8;
break;
@@ -962,6 +966,12 @@ static int smu_v13_0_0_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = smu_v13_0_0_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v13_0_0_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
@@ -3234,4 +3244,9 @@ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
smu->workload_map = smu_v13_0_0_workload_map;
smu->smc_driver_if_version = SMU13_0_0_DRIVER_IF_VERSION;
smu_v13_0_0_set_smu_mailbox_registers(smu);
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 10) &&
+ !amdgpu_device_has_display_hardware(smu->adev))
+ smu->adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
new file mode 100644
index 000000000000..238bd71baa6d
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -0,0 +1,490 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define SWSMU_CODE_LAYER_L2
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "smu_v13_0_12_pmfw.h"
+#include "smu_v13_0_6_ppt.h"
+#include "smu_v13_0_12_ppsmc.h"
+#include "smu_v13_0.h"
+#include "amdgpu_xgmi.h"
+#include "amdgpu_fru_eeprom.h"
+#include <linux/pci.h>
+#include "smu_cmn.h"
+
+#undef MP1_Public
+#undef smnMP1_FIRMWARE_FLAGS
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+#define SMU_13_0_12_FEA_MAP(smu_feature, smu_13_0_12_feature) \
+ [smu_feature] = { 1, (smu_13_0_12_feature) }
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE \
+ (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \
+ FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_FCLK))
+
+#define NUM_JPEG_RINGS_FW 10
+#define NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics) \
+ (ARRAY_SIZE(gpu_metrics->xcp_stats[0].jpeg_busy) / 4)
+
+const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = {
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_SOC_PCC_BIT, FEATURE_SOC_PCC),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_DS_VCN),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_DS_MP1CLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT),
+};
+
+// clang-format off
+const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
+ MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
+ MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0),
+ MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
+ MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1),
+ MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
+ MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
+ MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
+ MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
+ MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
+ MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
+ MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
+ MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
+ MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
+ MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
+ MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0),
+ MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0),
+ MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0),
+ MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0),
+ MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0),
+ MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0),
+ MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0),
+ MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0),
+ MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
+ MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
+ MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1),
+ MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1),
+ MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
+ MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
+ MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0),
+ MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0),
+ MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI),
+ MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI),
+ MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
+ MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
+ MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
+ MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
+};
+
+static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu,
+ uint64_t *feature_mask)
+{
+ int ret;
+
+ ret = smu_cmn_get_enabled_mask(smu, feature_mask);
+
+ if (ret == -EIO) {
+ *feature_mask = 0;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_12_fru_get_product_info(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct amdgpu_fru_info *fru_info;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->fru_info) {
+ adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL);
+ if (!adev->fru_info)
+ return -ENOMEM;
+ }
+
+ fru_info = adev->fru_info;
+ strscpy(fru_info->product_number, static_metrics->ProductInfo.ModelNumber,
+ sizeof(fru_info->product_number));
+ strscpy(fru_info->product_name, static_metrics->ProductInfo.Name,
+ sizeof(fru_info->product_name));
+ strscpy(fru_info->serial, static_metrics->ProductInfo.Serial,
+ sizeof(fru_info->serial));
+ strscpy(fru_info->manufacturer_name, static_metrics->ProductInfo.ManufacturerName,
+ sizeof(fru_info->manufacturer_name));
+ strscpy(fru_info->fru_id, static_metrics->ProductInfo.FruId,
+ sizeof(fru_info->fru_id));
+
+ return 0;
+}
+
+int smu_v13_0_12_get_max_metrics_size(void)
+{
+ return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
+}
+
+static int smu_v13_0_12_get_static_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export static metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ return 0;
+}
+
+int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ uint32_t table_version;
+ int ret, i;
+
+ if (!pptable->Init) {
+ ret = smu_v13_0_12_get_static_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion,
+ &table_version);
+ if (ret)
+ return ret;
+ smu_table->tables[SMU_TABLE_SMU_METRICS].version =
+ table_version;
+
+ pptable->MaxSocketPowerLimit =
+ SMUQ10_ROUND(static_metrics->MaxSocketPowerLimit);
+ pptable->MaxGfxclkFrequency =
+ SMUQ10_ROUND(static_metrics->MaxGfxclkFrequency);
+ pptable->MinGfxclkFrequency =
+ SMUQ10_ROUND(static_metrics->MinGfxclkFrequency);
+
+ for (i = 0; i < 4; ++i) {
+ pptable->FclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->FclkFrequencyTable[i]);
+ pptable->UclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->UclkFrequencyTable[i]);
+ pptable->SocclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->SocclkFrequencyTable[i]);
+ pptable->VclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->VclkFrequencyTable[i]);
+ pptable->DclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->DclkFrequencyTable[i]);
+ pptable->LclkFrequencyTable[i] =
+ SMUQ10_ROUND(static_metrics->LclkFrequencyTable[i]);
+ }
+
+ /* use AID0 serial number by default */
+ pptable->PublicSerialNumber_AID =
+ static_metrics->PublicSerialNumber_AID[0];
+ ret = smu_v13_0_12_fru_get_product_info(smu, static_metrics);
+ if (ret)
+ return ret;
+
+ pptable->Init = true;
+ }
+
+ return 0;
+}
+
+bool smu_v13_0_12_is_dpm_running(struct smu_context *smu)
+{
+ int ret;
+ uint64_t feature_enabled;
+
+ ret = smu_v13_0_12_get_enabled_mask(smu, &feature_enabled);
+
+ if (ret)
+ return false;
+
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+ int xcc_id;
+
+ /* For clocks with multiple instances, only report the first one */
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ case METRICS_AVERAGE_GFXCLK:
+ xcc_id = GET_INST(GC, 0);
+ *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+ break;
+ case METRICS_CURR_SOCCLK:
+ case METRICS_AVERAGE_SOCCLK:
+ *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]);
+ break;
+ case METRICS_CURR_UCLK:
+ case METRICS_AVERAGE_UCLK:
+ *value = SMUQ10_ROUND(metrics->UclkFrequency);
+ break;
+ case METRICS_CURR_VCLK:
+ *value = SMUQ10_ROUND(metrics->VclkFrequency[0]);
+ break;
+ case METRICS_CURR_DCLK:
+ *value = SMUQ10_ROUND(metrics->DclkFrequency[0]);
+ break;
+ case METRICS_CURR_FCLK:
+ *value = SMUQ10_ROUND(metrics->FclkFrequency);
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = SMUQ10_ROUND(metrics->SocketGfxBusy);
+ break;
+ case METRICS_AVERAGE_MEMACTIVITY:
+ *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+ break;
+ case METRICS_CURR_SOCKETPOWER:
+ *value = SMUQ10_ROUND(metrics->SocketPower) << 8;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_MEM:
+ *value = SMUQ10_ROUND(metrics->MaxHbmTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ /* This is the max of all VRs and not just SOC VR.
+ * No need to define another data type for the same.
+ */
+ case METRICS_TEMPERATURE_VRSOC:
+ *value = SMUQ10_ROUND(metrics->MaxVrTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ return ret;
+}
+
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v1_7 *gpu_metrics =
+ (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
+ int ret = 0, xcc_id, inst, i, j, k, idx;
+ struct amdgpu_device *adev = smu->adev;
+ u8 num_jpeg_rings_gpu_metrics;
+ MetricsTable_t *metrics;
+ struct amdgpu_xcp *xcp;
+ u32 inst_mask;
+
+ metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ memcpy(metrics, smu_table->metrics_table, sizeof(MetricsTable_t));
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
+
+ gpu_metrics->temperature_hotspot =
+ SMUQ10_ROUND(metrics->MaxSocketTemperature);
+ /* Individual HBM stack temperature is not reported */
+ gpu_metrics->temperature_mem =
+ SMUQ10_ROUND(metrics->MaxHbmTemperature);
+ /* Reports max temperature of all voltage rails */
+ gpu_metrics->temperature_vrsoc =
+ SMUQ10_ROUND(metrics->MaxVrTemperature);
+
+ gpu_metrics->average_gfx_activity =
+ SMUQ10_ROUND(metrics->SocketGfxBusy);
+ gpu_metrics->average_umc_activity =
+ SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+
+ gpu_metrics->mem_max_bandwidth =
+ SMUQ10_ROUND(metrics->MaxDramBandwidth);
+
+ gpu_metrics->curr_socket_power =
+ SMUQ10_ROUND(metrics->SocketPower);
+ /* Energy counter reported in 15.259uJ (2^-16) units */
+ gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
+
+ for (i = 0; i < MAX_GFX_CLKS; i++) {
+ xcc_id = GET_INST(GC, i);
+ if (xcc_id >= 0)
+ gpu_metrics->current_gfxclk[i] =
+ SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+
+ if (i < MAX_CLKS) {
+ gpu_metrics->current_socclk[i] =
+ SMUQ10_ROUND(metrics->SocclkFrequency[i]);
+ inst = GET_INST(VCN, i);
+ if (inst >= 0) {
+ gpu_metrics->current_vclk0[i] =
+ SMUQ10_ROUND(metrics->VclkFrequency[inst]);
+ gpu_metrics->current_dclk0[i] =
+ SMUQ10_ROUND(metrics->DclkFrequency[inst]);
+ }
+ }
+ }
+
+ gpu_metrics->current_uclk = SMUQ10_ROUND(metrics->UclkFrequency);
+
+ /* Total accumulated cycle counter */
+ gpu_metrics->accumulation_counter = metrics->AccumulationCounter;
+
+ /* Accumulated throttler residencies */
+ gpu_metrics->prochot_residency_acc = metrics->ProchotResidencyAcc;
+ gpu_metrics->ppt_residency_acc = metrics->PptResidencyAcc;
+ gpu_metrics->socket_thm_residency_acc = metrics->SocketThmResidencyAcc;
+ gpu_metrics->vr_thm_residency_acc = metrics->VrThmResidencyAcc;
+ gpu_metrics->hbm_thm_residency_acc = metrics->HbmThmResidencyAcc;
+
+ /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
+ gpu_metrics->gfxclk_lock_status = metrics->GfxLockXCDMak >> GET_INST(GC, 0);
+
+ gpu_metrics->pcie_link_width = metrics->PCIeLinkWidth;
+ gpu_metrics->pcie_link_speed =
+ pcie_gen_to_speed(metrics->PCIeLinkSpeed);
+ gpu_metrics->pcie_bandwidth_acc =
+ SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]);
+ gpu_metrics->pcie_bandwidth_inst =
+ SMUQ10_ROUND(metrics->PcieBandwidth[0]);
+ gpu_metrics->pcie_l0_to_recov_count_acc = metrics->PCIeL0ToRecoveryCountAcc;
+ gpu_metrics->pcie_replay_count_acc = metrics->PCIenReplayAAcc;
+ gpu_metrics->pcie_replay_rover_count_acc =
+ metrics->PCIenReplayARolloverCountAcc;
+ gpu_metrics->pcie_nak_sent_count_acc = metrics->PCIeNAKSentCountAcc;
+ gpu_metrics->pcie_nak_rcvd_count_acc = metrics->PCIeNAKReceivedCountAcc;
+ gpu_metrics->pcie_lc_perf_other_end_recovery = metrics->PCIeOtherEndRecoveryAcc;
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ gpu_metrics->gfx_activity_acc = SMUQ10_ROUND(metrics->SocketGfxBusyAcc);
+ gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
+
+ for (i = 0; i < NUM_XGMI_LINKS; i++) {
+ gpu_metrics->xgmi_read_data_acc[i] =
+ SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]);
+ gpu_metrics->xgmi_write_data_acc[i] =
+ SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]);
+ ret = amdgpu_get_xgmi_link_status(adev, i);
+ if (ret >= 0)
+ gpu_metrics->xgmi_link_status[i] = ret;
+ }
+
+ gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
+
+ num_jpeg_rings_gpu_metrics = NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics);
+ for_each_xcp(adev->xcp_mgr, xcp, i) {
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instances */
+ inst = GET_INST(VCN, k);
+
+ for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) {
+ gpu_metrics->xcp_stats[i].jpeg_busy
+ [(idx * num_jpeg_rings_gpu_metrics) + j] =
+ SMUQ10_ROUND(metrics->JpegBusy
+ [(inst * NUM_JPEG_RINGS_FW) + j]);
+ }
+ gpu_metrics->xcp_stats[i].vcn_busy[idx] =
+ SMUQ10_ROUND(metrics->VcnBusy[inst]);
+ idx++;
+ }
+
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
+ SMUQ10_ROUND(metrics->GfxBusy[inst]);
+ gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ idx++;
+ }
+ }
+
+ gpu_metrics->xgmi_link_width = metrics->XgmiWidth;
+ gpu_metrics->xgmi_link_speed = metrics->XgmiBitrate;
+
+ gpu_metrics->firmware_timestamp = metrics->Timestamp;
+
+ *table = (void *)gpu_metrics;
+ kfree(metrics);
+
+ return sizeof(*gpu_metrics);
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index da7bd9227afe..682646068000 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -105,7 +105,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
enum smu_v13_0_6_caps {
SMU_CAP(DPM),
- SMU_CAP(UNI_METRICS),
SMU_CAP(DPM_POLICY),
SMU_CAP(OTHER_END_METRICS),
SMU_CAP(SET_UCLK_MAX),
@@ -117,6 +116,7 @@ enum smu_v13_0_6_caps {
SMU_CAP(RMA_MSG),
SMU_CAP(ACA_SYND),
SMU_CAP(SDMA_RESET),
+ SMU_CAP(STATIC_METRICS),
SMU_CAP(ALL),
};
@@ -193,7 +193,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI),
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
- MSG_MAP(SelectPstatePolicy, PPSMC_MSG_SelectPstatePolicy, 0),
+ MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
};
@@ -231,7 +231,11 @@ static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_CO
SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN),
- SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_DS_VCN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_DS_MP1CLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK),
};
#define TABLE_PMSTATUSLOG 0
@@ -253,27 +257,13 @@ static const uint8_t smu_v13_0_6_throttler_map[] = {
[THROTTLER_PROCHOT_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT),
};
-struct PPTable_t {
- uint32_t MaxSocketPowerLimit;
- uint32_t MaxGfxclkFrequency;
- uint32_t MinGfxclkFrequency;
- uint32_t FclkFrequencyTable[4];
- uint32_t UclkFrequencyTable[4];
- uint32_t SocclkFrequencyTable[4];
- uint32_t VclkFrequencyTable[4];
- uint32_t DclkFrequencyTable[4];
- uint32_t LclkFrequencyTable[4];
- uint32_t MaxLclkDpmRange;
- uint32_t MinLclkDpmRange;
- uint64_t PublicSerialNumber_AID;
- bool Init;
-};
-
-#define SMUQ10_TO_UINT(x) ((x) >> 10)
-#define SMUQ10_FRAC(x) ((x) & 0x3ff)
-#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
-#define GET_METRIC_FIELD(field, flag) ((flag) ?\
- (metrics_a->field) : (metrics_x->field))
+#define GET_GPU_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V0) ?\
+ (metrics_v0->field) : (metrics_v2->field))
+#define GET_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V1) ?\
+ (metrics_v1->field) : GET_GPU_METRIC_FIELD(field, version))
+#define METRICS_TABLE_SIZE (max3(sizeof(MetricsTableV0_t),\
+ sizeof(MetricsTableV1_t),\
+ sizeof(MetricsTableV2_t)))
struct smu_v13_0_6_dpm_map {
enum smu_clk_type clk_type;
@@ -282,6 +272,18 @@ struct smu_v13_0_6_dpm_map {
uint32_t *freq_table;
};
+static inline int smu_v13_0_6_get_metrics_version(struct smu_context *smu)
+{
+ if ((smu->adev->flags & AMD_IS_APU) &&
+ smu->smc_fw_version <= 0x4556900)
+ return METRICS_VERSION_V1;
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12))
+ return METRICS_VERSION_V2;
+
+ return METRICS_VERSION_V0;
+}
+
static inline void smu_v13_0_6_cap_set(struct smu_context *smu,
enum smu_v13_0_6_caps cap)
{
@@ -309,7 +311,6 @@ static inline bool smu_v13_0_6_cap_supported(struct smu_context *smu,
static void smu_v13_0_14_init_caps(struct smu_context *smu)
{
enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
- SMU_CAP(UNI_METRICS),
SMU_CAP(SET_UCLK_MAX),
SMU_CAP(DPM_POLICY),
SMU_CAP(PCIE_METRICS),
@@ -335,12 +336,14 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu)
static void smu_v13_0_12_init_caps(struct smu_context *smu)
{
enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
- SMU_CAP(UNI_METRICS),
SMU_CAP(PCIE_METRICS),
SMU_CAP(CTF_LIMIT),
SMU_CAP(MCA_DEBUG_MODE),
SMU_CAP(RMA_MSG),
- SMU_CAP(ACA_SYND) };
+ SMU_CAP(ACA_SYND),
+ SMU_CAP(OTHER_END_METRICS),
+ SMU_CAP(HST_LIMIT_METRICS),
+ SMU_CAP(PER_INST_METRICS) };
uint32_t fw_ver = smu->smc_fw_version;
for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
@@ -351,12 +354,14 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
if (fw_ver >= 0x00561700)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+
+ if (fw_ver >= 0x00561E00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
{
enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
- SMU_CAP(UNI_METRICS),
SMU_CAP(SET_UCLK_MAX),
SMU_CAP(DPM_POLICY),
SMU_CAP(PCIE_METRICS),
@@ -382,8 +387,6 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
- if (fw_ver <= 0x4556900)
- smu_v13_0_6_cap_clear(smu, SMU_CAP(UNI_METRICS));
if (fw_ver >= 0x04556F00)
smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
if (fw_ver >= 0x04556A00)
@@ -450,8 +453,9 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
int var = (adev->pdev->device & 0xF);
char ucode_prefix[15];
- /* No need to load P2S tables in IOV mode */
- if (amdgpu_sriov_vf(adev))
+ /* No need to load P2S tables in IOV mode or for smu v13.0.12 */
+ if (amdgpu_sriov_vf(adev) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)))
return 0;
if (!(adev->flags & AMD_IS_APU)) {
@@ -508,13 +512,15 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
struct amdgpu_device *adev = smu->adev;
+ int gpu_metrcs_size = METRICS_TABLE_SIZE;
if (!(adev->flags & AMD_IS_APU))
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
- max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)),
+ max(gpu_metrcs_size,
+ smu_v13_0_12_get_max_metrics_size()),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
@@ -522,8 +528,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
- smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t),
- sizeof(MetricsTableA_t)), GFP_KERNEL);
+ smu_table->metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
@@ -570,7 +575,7 @@ static int smu_v13_0_6_select_policy_soc_pstate(struct smu_context *smu,
return -EINVAL;
}
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SelectPstatePolicy,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetThrottlingPolicy,
param, NULL);
if (ret)
@@ -740,10 +745,8 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
memset(&pm_metrics->common_header, 0,
sizeof(pm_metrics->common_header));
- if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6))
- pm_metrics->common_header.mp1_ip_discovery_version = IP_VERSION(13, 0, 6);
- if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14))
- pm_metrics->common_header.mp1_ip_discovery_version = IP_VERSION(13, 0, 14);
+ pm_metrics->common_header.mp1_ip_discovery_version =
+ amdgpu_ip_version(smu->adev, MP1_HWIP, 0);
pm_metrics->common_header.pmfw_version = pmfw_version;
pm_metrics->common_header.pmmetrics_version = table_version;
pm_metrics->common_header.structure_size =
@@ -755,14 +758,18 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
- MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
+ MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
+ MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
+ MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
+ int version = smu_v13_0_6_get_metrics_version(smu);
int ret, i, retry = 100;
uint32_t table_version;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_setup_driver_pptable(smu);
+
/* Store one-time values in driver PPTable */
if (!pptable->Init) {
while (--retry) {
@@ -771,7 +778,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return ret;
/* Ensure that metrics have been updated */
- if (GET_METRIC_FIELD(AccumulationCounter, flag))
+ if (GET_METRIC_FIELD(AccumulationCounter, version))
break;
usleep_range(1000, 1100);
@@ -788,29 +795,30 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
table_version;
pptable->MaxSocketPowerLimit =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, version));
pptable->MaxGfxclkFrequency =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version));
pptable->MinGfxclkFrequency =
- SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version));
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, version)[i]);
pptable->UclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, version)[i]);
pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND(
- GET_METRIC_FIELD(SocclkFrequencyTable, flag)[i]);
+ GET_METRIC_FIELD(SocclkFrequencyTable, version)[i]);
pptable->VclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, version)[i]);
pptable->DclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, version)[i]);
pptable->LclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, version)[i]);
}
/* use AID0 serial number by default */
- pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID, flag)[0];
+ pptable->PublicSerialNumber_AID =
+ GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
pptable->Init = true;
}
@@ -1130,9 +1138,10 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
- MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
- bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
+ MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
+ MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
+ MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
+ int version = smu_v13_0_6_get_metrics_version(smu);
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int xcc_id;
@@ -1141,56 +1150,59 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
if (ret)
return ret;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_get_smu_metrics_data(smu, member, value);
+
/* For clocks with multiple instances, only report the first one */
switch (member) {
case METRICS_CURR_GFXCLK:
case METRICS_AVERAGE_GFXCLK:
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
xcc_id = GET_INST(GC, 0);
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]);
} else {
*value = 0;
}
break;
case METRICS_CURR_SOCCLK:
case METRICS_AVERAGE_SOCCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[0]);
break;
case METRICS_CURR_UCLK:
case METRICS_AVERAGE_UCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
break;
case METRICS_CURR_VCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, version)[0]);
break;
case METRICS_CURR_DCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, version)[0]);
break;
case METRICS_CURR_FCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, flag));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, version));
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version));
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version));
break;
case METRICS_CURR_SOCKETPOWER:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag)) << 8;
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version)) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
@@ -1354,6 +1366,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
break;
case SMU_OD_MCLK:
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SET_UCLK_MAX)))
+ return 0;
+
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
pstate_table->uclk_pstate.curr.min,
@@ -1929,7 +1944,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
break;
}
- return -EINVAL;
+ return -EOPNOTSUPP;
}
static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
@@ -2195,6 +2210,9 @@ static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu)
int ret;
uint64_t feature_enabled;
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
+ return smu_v13_0_12_is_dpm_running(smu);
+
ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled);
if (ret)
@@ -2478,82 +2496,92 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v1_7 *gpu_metrics =
(struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
- bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
+ int version = smu_v13_0_6_get_metrics_version(smu);
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
- MetricsTableX_t *metrics_x;
- MetricsTableA_t *metrics_a;
+ MetricsTableV0_t *metrics_v0;
+ MetricsTableV1_t *metrics_v1;
+ MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
u16 link_width_level;
+ u8 num_jpeg_rings;
u32 inst_mask;
bool per_inst;
- metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
- ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
+ metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true);
if (ret) {
- kfree(metrics_x);
+ kfree(metrics_v0);
return ret;
}
- metrics_a = (MetricsTableA_t *)metrics_x;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_get_gpu_metrics(smu, table);
+
+ metrics_v1 = (MetricsTableV1_t *)metrics_v0;
+ metrics_v2 = (MetricsTableV2_t *)metrics_v0;
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
gpu_metrics->temperature_hotspot =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version));
/* Individual HBM stack temperature is not reported */
gpu_metrics->temperature_mem =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version));
/* Reports max temperature of all voltage rails */
gpu_metrics->temperature_vrsoc =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version));
gpu_metrics->average_gfx_activity =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version));
gpu_metrics->average_umc_activity =
- SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version));
gpu_metrics->mem_max_bandwidth =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, version));
gpu_metrics->curr_socket_power =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version));
/* Energy counter reported in 15.259uJ (2^-16) units */
- gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, flag);
+ gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, version);
for (i = 0; i < MAX_GFX_CLKS; i++) {
xcc_id = GET_INST(GC, i);
if (xcc_id >= 0)
gpu_metrics->current_gfxclk[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]);
if (i < MAX_CLKS) {
gpu_metrics->current_socclk[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[i]);
inst = GET_INST(VCN, i);
if (inst >= 0) {
gpu_metrics->current_vclk0[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency,
+ version)[inst]);
gpu_metrics->current_dclk0[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency,
+ version)[inst]);
}
}
}
- gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag));
+ gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
/* Total accumulated cycle counter */
- gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, flag);
+ gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, version);
/* Accumulated throttler residencies */
- gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, flag);
- gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, flag);
- gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, flag);
- gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, flag);
- gpu_metrics->hbm_thm_residency_acc = GET_METRIC_FIELD(HbmThmResidencyAcc, flag);
+ gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, version);
+ gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, version);
+ gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, version);
+ gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, version);
+ gpu_metrics->hbm_thm_residency_acc =
+ GET_METRIC_FIELD(HbmThmResidencyAcc, version);
/* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
- gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak, flag) >> GET_INST(GC, 0);
+ gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak,
+ version) >> GET_INST(GC, 0);
if (!(adev->flags & AMD_IS_APU)) {
/*Check smu version, PCIE link speed and width will be reported from pmfw metric
@@ -2561,9 +2589,9 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
* for pf from registers
*/
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PCIE_METRICS))) {
- gpu_metrics->pcie_link_width = metrics_x->PCIeLinkWidth;
+ gpu_metrics->pcie_link_width = GET_GPU_METRIC_FIELD(PCIeLinkWidth, version);
gpu_metrics->pcie_link_speed =
- pcie_gen_to_speed(metrics_x->PCIeLinkSpeed);
+ pcie_gen_to_speed(GET_GPU_METRIC_FIELD(PCIeLinkSpeed, version));
} else if (!amdgpu_sriov_vf(adev)) {
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
if (link_width_level > MAX_LINK_WIDTH)
@@ -2576,37 +2604,37 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
}
gpu_metrics->pcie_bandwidth_acc =
- SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]);
+ SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidthAcc, version)[0]);
gpu_metrics->pcie_bandwidth_inst =
- SMUQ10_ROUND(metrics_x->PcieBandwidth[0]);
+ SMUQ10_ROUND(GET_GPU_METRIC_FIELD(PcieBandwidth, version)[0]);
gpu_metrics->pcie_l0_to_recov_count_acc =
- metrics_x->PCIeL0ToRecoveryCountAcc;
+ GET_GPU_METRIC_FIELD(PCIeL0ToRecoveryCountAcc, version);
gpu_metrics->pcie_replay_count_acc =
- metrics_x->PCIenReplayAAcc;
+ GET_GPU_METRIC_FIELD(PCIenReplayAAcc, version);
gpu_metrics->pcie_replay_rover_count_acc =
- metrics_x->PCIenReplayARolloverCountAcc;
+ GET_GPU_METRIC_FIELD(PCIenReplayARolloverCountAcc, version);
gpu_metrics->pcie_nak_sent_count_acc =
- metrics_x->PCIeNAKSentCountAcc;
+ GET_GPU_METRIC_FIELD(PCIeNAKSentCountAcc, version);
gpu_metrics->pcie_nak_rcvd_count_acc =
- metrics_x->PCIeNAKReceivedCountAcc;
+ GET_GPU_METRIC_FIELD(PCIeNAKReceivedCountAcc, version);
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(OTHER_END_METRICS)))
gpu_metrics->pcie_lc_perf_other_end_recovery =
- metrics_x->PCIeOtherEndRecoveryAcc;
+ GET_GPU_METRIC_FIELD(PCIeOtherEndRecoveryAcc, version);
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
gpu_metrics->gfx_activity_acc =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, version));
gpu_metrics->mem_activity_acc =
- SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, flag));
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version));
for (i = 0; i < NUM_XGMI_LINKS; i++) {
gpu_metrics->xgmi_read_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]);
gpu_metrics->xgmi_write_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, flag)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]);
ret = amdgpu_get_xgmi_link_status(adev, i);
if (ret >= 0)
gpu_metrics->xgmi_link_status[i] = ret;
@@ -2616,6 +2644,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
+ num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
for_each_xcp(adev->xcp_mgr, xcp, i) {
amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
idx = 0;
@@ -2623,14 +2652,14 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
/* Both JPEG and VCN has same instances */
inst = GET_INST(VCN, k);
- for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ for (j = 0; j < num_jpeg_rings; ++j) {
gpu_metrics->xcp_stats[i].jpeg_busy
- [(idx * adev->jpeg.num_jpeg_rings) + j] =
- SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, flag)
- [(inst * adev->jpeg.num_jpeg_rings) + j]);
+ [(idx * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, version)
+ [(inst * num_jpeg_rings) + j]);
}
gpu_metrics->xcp_stats[i].vcn_busy[idx] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, flag)[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
idx++;
}
@@ -2641,27 +2670,29 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
for_each_inst(k, inst_mask) {
inst = GET_INST(GC, k);
gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
- SMUQ10_ROUND(metrics_x->GfxBusy[inst]);
+ SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
- SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]);
+ SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc,
+ version)[inst]);
if (smu_v13_0_6_cap_supported(
smu, SMU_CAP(HST_LIMIT_METRICS)))
gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] =
- SMUQ10_ROUND(metrics_x->GfxclkBelowHostLimitAcc
+ SMUQ10_ROUND(GET_GPU_METRIC_FIELD
+ (GfxclkBelowHostLimitAcc, version)
[inst]);
idx++;
}
}
}
- gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth, flag));
- gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate, flag));
+ gpu_metrics->xgmi_link_width = GET_METRIC_FIELD(XgmiWidth, version);
+ gpu_metrics->xgmi_link_speed = GET_METRIC_FIELD(XgmiBitrate, version);
- gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, flag);
+ gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version);
*table = (void *)gpu_metrics;
- kfree(metrics_x);
+ kfree(metrics_v0);
return sizeof(*gpu_metrics);
}
@@ -2871,11 +2902,31 @@ static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
return ret;
}
+/**
+ * smu_v13_0_6_reset_sdma_is_supported - Check if SDMA reset is supported
+ * @smu: smu_context pointer
+ *
+ * This function checks if the SMU supports resetting the SDMA engine.
+ * It returns false if the capability is not supported.
+ */
+static bool smu_v13_0_6_reset_sdma_is_supported(struct smu_context *smu)
+{
+ bool ret = true;
+
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET))) {
+ dev_info(smu->adev->dev,
+ "SDMA reset capability is not supported\n");
+ ret = false;
+ }
+
+ return ret;
+}
+
static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
{
int ret = 0;
- if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET)))
+ if (!smu_v13_0_6_reset_sdma_is_supported(smu))
return -EOPNOTSUPP;
ret = smu_cmn_send_smc_msg_with_param(smu,
@@ -3559,14 +3610,17 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
.send_rma_reason = smu_v13_0_6_send_rma_reason,
.reset_sdma = smu_v13_0_6_reset_sdma,
+ .reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
- smu->message_map = smu_v13_0_6_message_map;
+ smu->message_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
+ smu_v13_0_12_message_map : smu_v13_0_6_message_map;
smu->clock_map = smu_v13_0_6_clk_map;
- smu->feature_map = smu_v13_0_6_feature_mask_map;
+ smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
+ smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map;
smu->table_map = smu_v13_0_6_table_map;
smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index f0fa42a645c0..83745909e564 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -27,6 +27,30 @@
#define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4
#define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2
+typedef enum {
+/*0*/ METRICS_VERSION_V0 = 0,
+/*1*/ METRICS_VERSION_V1 = 1,
+/*2*/ METRICS_VERSION_V2 = 2,
+
+/*3*/ NUM_METRICS = 3
+} METRICS_LIST_e;
+
+struct PPTable_t {
+ uint32_t MaxSocketPowerLimit;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+ uint64_t PublicSerialNumber_AID;
+ bool Init;
+};
+
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 55ef18517b0f..c8f4f6fb4083 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -807,6 +807,10 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
else
*value = metrics->AverageMemclkFrequencyPreDs;
break;
+ case METRICS_AVERAGE_VCNACTIVITY:
+ *value = max(metrics->Vcn0ActivityPercentage,
+ metrics->Vcn1ActivityPercentage);
+ break;
case METRICS_AVERAGE_VCLK:
*value = metrics->AverageVclk0Frequency;
break;
@@ -951,6 +955,12 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = smu_v13_0_7_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v13_0_7_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
@@ -2811,5 +2821,4 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
smu->workload_map = smu_v13_0_7_workload_map;
smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION;
smu_v13_0_set_smu_mailbox_registers(smu);
- smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index ddb6444406d2..76c1adda83db 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -245,6 +245,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
case IP_VERSION(14, 0, 1):
@@ -769,6 +770,7 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@@ -948,6 +950,14 @@ static int smu_v14_0_irq_process(struct amdgpu_device *adev,
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
+ /*
+ * ctxid is used to distinguish different
+ * events for SMCToHost interrupt.
+ */
+ uint32_t ctxid = entry->src_data[0];
+ uint32_t data;
+ uint32_t high;
+
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
@@ -962,6 +972,50 @@ static int smu_v14_0_irq_process(struct amdgpu_device *adev,
src_id);
break;
}
+ } else if (client_id == SOC15_IH_CLIENTID_MP1) {
+ if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
+ /* ACK SMUToHost interrupt */
+ data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
+
+ switch (ctxid) {
+ case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL:
+ high = smu->thermal_range.software_shutdown_temp +
+ smu->thermal_range.software_shutdown_temp_offset;
+ high = min_t(typeof(high),
+ SMU_THERMAL_MAXIMUM_ALERT_TEMP,
+ high);
+ dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n",
+ high,
+ smu->thermal_range.software_shutdown_temp_offset);
+
+ data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
+ data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
+ DIG_THERM_INTH,
+ (high & 0xff));
+ data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+ WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
+ break;
+ case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY:
+ high = min_t(typeof(high),
+ SMU_THERMAL_MAXIMUM_ALERT_TEMP,
+ smu->thermal_range.software_shutdown_temp);
+ dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high);
+
+ data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
+ data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
+ DIG_THERM_INTH,
+ (high & 0xff));
+ data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+ WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
+ break;
+ default:
+ dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+ ctxid, client_id);
+ break;
+ }
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 5cad09c5f2ff..f7cfe1f35cae 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -756,6 +756,10 @@ static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_MEMACTIVITY:
*value = metrics->AverageUclkActivity;
break;
+ case METRICS_AVERAGE_VCNACTIVITY:
+ *value = max(metrics->AverageVcn0ActivityPercentage,
+ metrics->Vcn1ActivityPercentage);
+ break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = metrics->AverageSocketPower << 8;
break;
@@ -882,6 +886,12 @@ static int smu_v14_0_2_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_LOAD:
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_VCNACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v14_0_2_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
@@ -1193,16 +1203,9 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_BIT))
break;
- PPTable_t *pptable = smu->smu_table.driver_pptable;
- const OverDriveLimits_t * const overdrive_upperlimits =
- &pptable->SkuTable.OverDriveLimitsBasicMax;
- const OverDriveLimits_t * const overdrive_lowerlimits =
- &pptable->SkuTable.OverDriveLimitsBasicMin;
-
size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
- size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n",
- overdrive_lowerlimits->GfxclkFoffset,
- overdrive_upperlimits->GfxclkFoffset);
+ size += sysfs_emit_at(buf, size, "%dMhz\n",
+ od_table->OverDriveTable.GfxclkFoffset);
break;
case SMU_OD_MCLK:
@@ -1337,12 +1340,8 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMIN,
- &min_value,
- NULL);
- smu_v14_0_2_get_od_setting_limits(smu,
PP_OD_FEATURE_GFXCLK_FMAX,
- NULL,
+ &min_value,
&max_value);
size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
min_value, max_value);
@@ -1627,6 +1626,39 @@ out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
}
+static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ int ret;
+
+ if (!speed)
+ return -EINVAL;
+
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_FANPWM,
+ speed);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
+ return ret;
+ }
+
+ /* Convert the PMFW output which is in percent to pwm(255) based */
+ *speed = min(*speed * 255 / 100, (uint32_t)255);
+
+ return 0;
+}
+
+static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ if (!speed)
+ return -EINVAL;
+
+ return smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ speed);
+}
+
static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
@@ -2417,36 +2449,24 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
return -ENOTSUPP;
}
- for (i = 0; i < size; i += 2) {
- if (i + 2 > size) {
- dev_info(adev->dev, "invalid number of input parameters %d\n", size);
- return -EINVAL;
- }
-
- switch (input[i]) {
- case 1:
- smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMAX,
- &minimum,
- &maximum);
- if (input[i + 1] < minimum ||
- input[i + 1] > maximum) {
- dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
- input[i + 1], minimum, maximum);
- return -EINVAL;
- }
-
- od_table->OverDriveTable.GfxclkFoffset = input[i + 1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
- break;
+ if (size != 1) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
- default:
- dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
- dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
- return -EINVAL;
- }
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n",
+ minimum, maximum);
+ return -EINVAL;
}
+ od_table->OverDriveTable.GfxclkFoffset = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
@@ -2804,6 +2824,8 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.set_performance_level = smu_v14_0_set_performance_level,
.gfx_off_control = smu_v14_0_gfx_off_control,
.get_unique_id = smu_v14_0_2_get_unique_id,
+ .get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm,
+ .get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm,
.get_power_limit = smu_v14_0_2_get_power_limit,
.set_power_limit = smu_v14_0_2_set_power_limit,
.get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 9f55207ea9bc..d834d134ad2b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -459,8 +459,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
}
if (read_arg) {
smu_cmn_read_arg(smu, read_arg);
- dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\
- readval: 0x%08x\n",
+ dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n",
smu_get_message_name(smu, msg), index, param, reg, *read_arg);
} else {
dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n",
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index c901ac00c0c3..ed3ed617c688 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -9,6 +9,7 @@ config DRM_HDLCD
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
+ select DRM_BRIDGE # for TDA998x
help
Choose this option if you have an ARM High Definition Colour LCD
controller.
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index f30b3d5eeca5..875cdbff18c9 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -88,7 +88,7 @@ komeda_wb_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
komeda_wb_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 2577f0cef8fc..600af5ad81b1 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -43,7 +43,7 @@ static int malidp_mw_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
malidp_mw_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 397e677a691c..46094cca2974 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -144,11 +144,9 @@ static int aspeed_gfx_load(struct drm_device *drm)
struct aspeed_gfx *priv = to_aspeed_gfx(drm);
struct device_node *np = pdev->dev.of_node;
const struct aspeed_gfx_config *config;
- struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(drm->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 47da848fa3fc..8d09ba5d5889 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -4,6 +4,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ast-y := \
+ ast_cursor.o \
ast_ddc.o \
ast_dp501.o \
ast_dp.o \
@@ -13,6 +14,7 @@ ast-y := \
ast_mode.o \
ast_post.o \
ast_sil164.o \
+ ast_vbios.o \
ast_vga.o
obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
new file mode 100644
index 000000000000..139ab00dee8f
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_cursor.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+
+#include <linux/bits.h>
+#include <linux/sizes.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
+
+#include "ast_drv.h"
+
+/*
+ * Hardware cursor
+ */
+
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_CHECKSUM 0x00
+#define AST_HWC_SIGNATURE_SizeX 0x04
+#define AST_HWC_SIGNATURE_SizeY 0x08
+#define AST_HWC_SIGNATURE_X 0x0C
+#define AST_HWC_SIGNATURE_Y 0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+
+static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, unsigned int height)
+{
+ u32 csum = 0;
+ unsigned int one_pixel_copy = width & BIT(0);
+ unsigned int two_pixel_copy = width - one_pixel_copy;
+ unsigned int trailing_bytes = (AST_MAX_HWC_WIDTH - width) * sizeof(u16);
+ unsigned int x, y;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < two_pixel_copy; x += 2) {
+ const u32 *src32 = (const u32 *)src;
+
+ csum += *src32;
+ src += SZ_4;
+ }
+ if (one_pixel_copy) {
+ const u16 *src16 = (const u16 *)src;
+
+ csum += *src16;
+ src += SZ_2;
+ }
+ src += trailing_bytes;
+ }
+
+ return csum;
+}
+
+static void ast_set_cursor_image(struct ast_device *ast, const u8 *src,
+ unsigned int width, unsigned int height)
+{
+ u8 __iomem *dst = ast->cursor_plane.base.vaddr;
+ u32 csum;
+
+ csum = ast_cursor_calculate_checksum(src, width, height);
+
+ /* write pixel data */
+ memcpy_toio(dst, src, AST_HWC_SIZE);
+
+ /* write checksum + signature */
+ dst += AST_HWC_SIZE;
+ writel(csum, dst);
+ writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+ writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+}
+
+static void ast_set_cursor_base(struct ast_device *ast, u64 address)
+{
+ u8 addr0 = (address >> 3) & 0xff;
+ u8 addr1 = (address >> 11) & 0xff;
+ u8 addr2 = (address >> 19) & 0xff;
+
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc8, addr0);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc9, addr1);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xca, addr2);
+}
+
+static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y,
+ u8 x_offset, u8 y_offset)
+{
+ u8 x0 = (x & 0x00ff);
+ u8 x1 = (x & 0x0f00) >> 8;
+ u8 y0 = (y & 0x00ff);
+ u8 y1 = (y & 0x0700) >> 8;
+
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc2, x_offset);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc3, y_offset);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc4, x0);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc5, x1);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc6, y0);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xc7, y1);
+}
+
+static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled)
+{
+ static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP |
+ AST_IO_VGACRCB_HWC_ENABLED);
+
+ u8 vgacrcb = AST_IO_VGACRCB_HWC_16BPP;
+
+ if (enabled)
+ vgacrcb |= AST_IO_VGACRCB_HWC_ENABLED;
+
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xcb, mask, vgacrcb);
+}
+
+/*
+ * Cursor plane
+ */
+
+static const uint32_t ast_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB8888,
+};
+
+static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
+
+ if (new_plane_state->crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+ if (ret || !new_plane_state->visible)
+ return ret;
+
+ if (new_fb->width > AST_MAX_HWC_WIDTH || new_fb->height > AST_MAX_HWC_HEIGHT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane);
+ struct ast_plane *ast_plane = to_ast_plane(plane);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct ast_device *ast = to_ast_device(plane->dev);
+ struct drm_rect damage;
+ u64 dst_off = ast_plane->offset;
+ u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
+ u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
+ unsigned int offset_x, offset_y;
+ u16 x, y;
+ u8 x_offset, y_offset;
+
+ /*
+ * Do data transfer to hardware buffer and point the scanout
+ * engine to the offset.
+ */
+
+ if (drm_atomic_helper_damage_merged(old_plane_state, plane_state, &damage)) {
+ u8 *argb4444;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_ARGB4444:
+ argb4444 = shadow_plane_state->data[0].vaddr;
+ break;
+ default:
+ argb4444 = ast_cursor_plane->argb4444;
+ {
+ struct iosys_map argb4444_dst[DRM_FORMAT_MAX_PLANES] = {
+ IOSYS_MAP_INIT_VADDR(argb4444),
+ };
+ unsigned int argb4444_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
+ AST_HWC_PITCH,
+ };
+
+ drm_fb_argb8888_to_argb4444(argb4444_dst, argb4444_dst_pitch,
+ shadow_plane_state->data, fb, &damage,
+ &shadow_plane_state->fmtcnv_state);
+ }
+ break;
+ }
+ ast_set_cursor_image(ast, argb4444, fb->width, fb->height);
+ ast_set_cursor_base(ast, dst_off);
+ }
+
+ /*
+ * Update location in HWC signature and registers.
+ */
+
+ writel(plane_state->crtc_x, sig + AST_HWC_SIGNATURE_X);
+ writel(plane_state->crtc_y, sig + AST_HWC_SIGNATURE_Y);
+
+ offset_x = AST_MAX_HWC_WIDTH - fb->width;
+ offset_y = AST_MAX_HWC_HEIGHT - fb->height;
+
+ if (plane_state->crtc_x < 0) {
+ x_offset = (-plane_state->crtc_x) + offset_x;
+ x = 0;
+ } else {
+ x_offset = offset_x;
+ x = plane_state->crtc_x;
+ }
+ if (plane_state->crtc_y < 0) {
+ y_offset = (-plane_state->crtc_y) + offset_y;
+ y = 0;
+ } else {
+ y_offset = offset_y;
+ y = plane_state->crtc_y;
+ }
+
+ ast_set_cursor_location(ast, x, y, x_offset, y_offset);
+
+ /* Dummy write to enable HWC and make the HW pick-up the changes. */
+ ast_set_cursor_enabled(ast, true);
+}
+
+static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct ast_device *ast = to_ast_device(plane->dev);
+
+ ast_set_cursor_enabled(ast, false);
+}
+
+static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = ast_cursor_plane_helper_atomic_check,
+ .atomic_update = ast_cursor_plane_helper_atomic_update,
+ .atomic_disable = ast_cursor_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs ast_cursor_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+int ast_cursor_plane_init(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane;
+ struct ast_plane *ast_plane = &ast_cursor_plane->base;
+ struct drm_plane *cursor_plane = &ast_plane->base;
+ size_t size;
+ void __iomem *vaddr;
+ u64 offset;
+ int ret;
+
+ /*
+ * Allocate backing storage for cursors. The BOs are permanently
+ * pinned to the top end of the VRAM.
+ */
+
+ size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
+
+ if (ast->vram_fb_available < size)
+ return -ENOMEM;
+
+ vaddr = ast->vram + ast->vram_fb_available - size;
+ offset = ast->vram_fb_available - size;
+
+ ret = ast_plane_init(dev, ast_plane, vaddr, offset, size,
+ 0x01, &ast_cursor_plane_funcs,
+ ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
+ NULL, DRM_PLANE_TYPE_CURSOR);
+ if (ret) {
+ drm_err(dev, "ast_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(cursor_plane);
+
+ ast->vram_fb_available -= size;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index b9eb67e3fa90..19c04687b0fe 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -5,6 +5,7 @@
#include <linux/firmware.h>
#include <linux/delay.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
@@ -12,11 +13,71 @@
#include <drm/drm_probe_helper.h>
#include "ast_drv.h"
+#include "ast_vbios.h"
+
+struct ast_astdp_mode_index_table_entry {
+ unsigned int hdisplay;
+ unsigned int vdisplay;
+ unsigned int mode_index;
+};
+
+/* FIXME: Do refresh rate and flags actually matter? */
+static const struct ast_astdp_mode_index_table_entry ast_astdp_mode_index_table[] = {
+ { 320, 240, ASTDP_320x240_60 },
+ { 400, 300, ASTDP_400x300_60 },
+ { 512, 384, ASTDP_512x384_60 },
+ { 640, 480, ASTDP_640x480_60 },
+ { 800, 600, ASTDP_800x600_56 },
+ { 1024, 768, ASTDP_1024x768_60 },
+ { 1152, 864, ASTDP_1152x864_75 },
+ { 1280, 800, ASTDP_1280x800_60_RB },
+ { 1280, 1024, ASTDP_1280x1024_60 },
+ { 1360, 768, ASTDP_1366x768_60 }, // same as 1366x786
+ { 1366, 768, ASTDP_1366x768_60 },
+ { 1440, 900, ASTDP_1440x900_60_RB },
+ { 1600, 900, ASTDP_1600x900_60_RB },
+ { 1600, 1200, ASTDP_1600x1200_60 },
+ { 1680, 1050, ASTDP_1680x1050_60_RB },
+ { 1920, 1080, ASTDP_1920x1080_60 },
+ { 1920, 1200, ASTDP_1920x1200_60 },
+ { 0 }
+};
+
+struct ast_astdp_connector_state {
+ struct drm_connector_state base;
+
+ int mode_index;
+};
+
+static struct ast_astdp_connector_state *
+to_ast_astdp_connector_state(const struct drm_connector_state *state)
+{
+ return container_of(state, struct ast_astdp_connector_state, base);
+}
+
+static int ast_astdp_get_mode_index(unsigned int hdisplay, unsigned int vdisplay)
+{
+ const struct ast_astdp_mode_index_table_entry *entry = ast_astdp_mode_index_table;
+
+ while (entry->hdisplay && entry->vdisplay) {
+ if (entry->hdisplay == hdisplay && entry->vdisplay == vdisplay)
+ return entry->mode_index;
+ ++entry;
+ }
+
+ return -EINVAL;
+}
static bool ast_astdp_is_connected(struct ast_device *ast)
{
if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, AST_IO_VGACRDF_HPD))
return false;
+ /*
+ * HPD might be set even if no monitor is connected, so also check that
+ * the link training was successful.
+ */
+ if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, AST_IO_VGACRDC_LINK_SUCCESS))
+ return false;
return true;
}
@@ -221,80 +282,6 @@ static void ast_dp_set_enable(struct ast_device *ast, bool enabled)
drm_WARN_ON(dev, !__ast_dp_wait_enable(ast, enabled));
}
-static void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode)
-{
- struct ast_device *ast = to_ast_device(crtc->dev);
-
- u32 ulRefreshRateIndex;
- u8 ModeIdx;
-
- ulRefreshRateIndex = vbios_mode->enh_table->refresh_rate_index - 1;
-
- switch (crtc->mode.crtc_hdisplay) {
- case 320:
- ModeIdx = ASTDP_320x240_60;
- break;
- case 400:
- ModeIdx = ASTDP_400x300_60;
- break;
- case 512:
- ModeIdx = ASTDP_512x384_60;
- break;
- case 640:
- ModeIdx = (ASTDP_640x480_60 + (u8) ulRefreshRateIndex);
- break;
- case 800:
- ModeIdx = (ASTDP_800x600_56 + (u8) ulRefreshRateIndex);
- break;
- case 1024:
- ModeIdx = (ASTDP_1024x768_60 + (u8) ulRefreshRateIndex);
- break;
- case 1152:
- ModeIdx = ASTDP_1152x864_75;
- break;
- case 1280:
- if (crtc->mode.crtc_vdisplay == 800)
- ModeIdx = (ASTDP_1280x800_60_RB - (u8) ulRefreshRateIndex);
- else // 1024
- ModeIdx = (ASTDP_1280x1024_60 + (u8) ulRefreshRateIndex);
- break;
- case 1360:
- case 1366:
- ModeIdx = ASTDP_1366x768_60;
- break;
- case 1440:
- ModeIdx = (ASTDP_1440x900_60_RB - (u8) ulRefreshRateIndex);
- break;
- case 1600:
- if (crtc->mode.crtc_vdisplay == 900)
- ModeIdx = (ASTDP_1600x900_60_RB - (u8) ulRefreshRateIndex);
- else //1200
- ModeIdx = ASTDP_1600x1200_60;
- break;
- case 1680:
- ModeIdx = (ASTDP_1680x1050_60_RB - (u8) ulRefreshRateIndex);
- break;
- case 1920:
- if (crtc->mode.crtc_vdisplay == 1080)
- ModeIdx = ASTDP_1920x1080_60;
- else //1200
- ModeIdx = ASTDP_1920x1200_60;
- break;
- default:
- return;
- }
-
- /*
- * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
- * CRE1[7:0]: MISC1 (default: 0x00)
- * CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
- */
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE0, ASTDP_AND_CLEAR_MASK,
- ASTDP_MISC0_24bpp);
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE1, ASTDP_AND_CLEAR_MASK, ASTDP_MISC1);
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE2, ASTDP_AND_CLEAR_MASK, ModeIdx);
-}
-
static void ast_wait_for_vretrace(struct ast_device *ast)
{
unsigned long timeout = jiffies + HZ;
@@ -313,15 +300,62 @@ static const struct drm_encoder_funcs ast_astdp_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static enum drm_mode_status
+ast_astdp_encoder_helper_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ int res;
+
+ res = ast_astdp_get_mode_index(mode->hdisplay, mode->vdisplay);
+ if (res < 0)
+ return MODE_NOMODE;
+
+ return MODE_OK;
+}
+
static void ast_astdp_encoder_helper_atomic_mode_set(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_crtc *crtc = crtc_state->crtc;
+ struct drm_device *dev = encoder->dev;
+ struct ast_device *ast = to_ast_device(dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
- struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
+ const struct ast_vbios_enhtable *vmode = ast_crtc_state->vmode;
+ struct ast_astdp_connector_state *astdp_conn_state =
+ to_ast_astdp_connector_state(conn_state);
+ int mode_index = astdp_conn_state->mode_index;
+ u8 refresh_rate_index;
+ u8 vgacre0, vgacre1, vgacre2;
+
+ if (drm_WARN_ON(dev, vmode->refresh_rate_index < 1 || vmode->refresh_rate_index > 255))
+ return;
+ refresh_rate_index = vmode->refresh_rate_index - 1;
+
+ /* FIXME: Why are we doing this? */
+ switch (mode_index) {
+ case ASTDP_1280x800_60_RB:
+ case ASTDP_1440x900_60_RB:
+ case ASTDP_1600x900_60_RB:
+ case ASTDP_1680x1050_60_RB:
+ mode_index = (u8)(mode_index - (u8)refresh_rate_index);
+ break;
+ default:
+ mode_index = (u8)(mode_index + (u8)refresh_rate_index);
+ break;
+ }
- ast_dp_set_mode(crtc, vbios_mode_info);
+ /*
+ * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
+ * CRE1[7:0]: MISC1 (default: 0x00)
+ * CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
+ */
+ vgacre0 = AST_IO_VGACRE0_24BPP;
+ vgacre1 = 0x00;
+ vgacre2 = mode_index & 0xff;
+
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xe0, vgacre0);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xe1, vgacre1);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xe2, vgacre2);
}
static void ast_astdp_encoder_helper_atomic_enable(struct drm_encoder *encoder,
@@ -348,10 +382,31 @@ static void ast_astdp_encoder_helper_atomic_disable(struct drm_encoder *encoder,
ast_dp_set_phy_sleep(ast, true);
}
+static int ast_astdp_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ struct ast_astdp_connector_state *astdp_conn_state =
+ to_ast_astdp_connector_state(conn_state);
+ int res;
+
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ res = ast_astdp_get_mode_index(mode->hdisplay, mode->vdisplay);
+ if (res < 0)
+ return res;
+ astdp_conn_state->mode_index = res;
+ }
+
+ return 0;
+}
+
static const struct drm_encoder_helper_funcs ast_astdp_encoder_helper_funcs = {
+ .mode_valid = ast_astdp_encoder_helper_mode_valid,
.atomic_mode_set = ast_astdp_encoder_helper_atomic_mode_set,
.atomic_enable = ast_astdp_encoder_helper_atomic_enable,
.atomic_disable = ast_astdp_encoder_helper_atomic_disable,
+ .atomic_check = ast_astdp_encoder_helper_atomic_check,
};
/*
@@ -422,18 +477,62 @@ static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs
.detect_ctx = ast_astdp_connector_helper_detect_ctx,
};
-/*
- * Output
- */
+static void ast_astdp_connector_reset(struct drm_connector *connector)
+{
+ struct ast_astdp_connector_state *astdp_state =
+ kzalloc(sizeof(*astdp_state), GFP_KERNEL);
+
+ if (connector->state)
+ connector->funcs->atomic_destroy_state(connector, connector->state);
+
+ if (astdp_state)
+ __drm_atomic_helper_connector_reset(connector, &astdp_state->base);
+ else
+ __drm_atomic_helper_connector_reset(connector, NULL);
+}
+
+static struct drm_connector_state *
+ast_astdp_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct ast_astdp_connector_state *new_astdp_state, *astdp_state;
+ struct drm_device *dev = connector->dev;
+
+ if (drm_WARN_ON(dev, !connector->state))
+ return NULL;
+
+ new_astdp_state = kmalloc(sizeof(*new_astdp_state), GFP_KERNEL);
+ if (!new_astdp_state)
+ return NULL;
+ __drm_atomic_helper_connector_duplicate_state(connector, &new_astdp_state->base);
+
+ astdp_state = to_ast_astdp_connector_state(connector->state);
+
+ new_astdp_state->mode_index = astdp_state->mode_index;
+
+ return &new_astdp_state->base;
+}
+
+static void ast_astdp_connector_atomic_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct ast_astdp_connector_state *astdp_state = to_ast_astdp_connector_state(state);
+
+ __drm_atomic_helper_connector_destroy_state(&astdp_state->base);
+ kfree(astdp_state);
+}
static const struct drm_connector_funcs ast_astdp_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
+ .reset = ast_astdp_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = ast_astdp_connector_atomic_duplicate_state,
+ .atomic_destroy_state = ast_astdp_connector_atomic_destroy_state,
};
+/*
+ * Output
+ */
+
int ast_astdp_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index ff3bcdd1cff2..6fbf62a99c48 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -170,7 +170,7 @@ static int ast_detect_chip(struct pci_dev *pdev,
/* Patch AST2500/AST2510 */
if ((pdev->revision & 0xf0) == 0x40) {
- if (!(vgacrd0 & AST_VRAM_INIT_STATUS_MASK))
+ if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK))
ast_patch_ahb_2500(regs);
}
@@ -393,11 +393,15 @@ static int ast_drm_freeze(struct drm_device *dev)
static int ast_drm_thaw(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
+ int ret;
ast_enable_vga(ast->ioregs);
ast_open_key(ast->ioregs);
ast_enable_mmio(dev->dev, ast->ioregs);
- ast_post_gpu(ast);
+
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ret;
return drm_mode_config_helper_resume(dev);
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 6b4305ac07d4..d2c2605d2728 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -39,6 +39,8 @@
#include "ast_reg.h"
+struct ast_vbios_enhtable;
+
#define DRIVER_AUTHOR "Dave Airlie"
#define DRIVER_NAME "ast"
@@ -111,17 +113,10 @@ enum ast_config_mode {
#define AST_MAX_HWC_WIDTH 64
#define AST_MAX_HWC_HEIGHT 64
-#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH * AST_MAX_HWC_HEIGHT * 2)
-#define AST_HWC_SIGNATURE_SIZE 32
+#define AST_HWC_PITCH (AST_MAX_HWC_WIDTH * SZ_2)
+#define AST_HWC_SIZE (AST_MAX_HWC_HEIGHT * AST_HWC_PITCH)
-/* define for signature structure */
-#define AST_HWC_SIGNATURE_CHECKSUM 0x00
-#define AST_HWC_SIGNATURE_SizeX 0x04
-#define AST_HWC_SIGNATURE_SizeY 0x08
-#define AST_HWC_SIGNATURE_X 0x0C
-#define AST_HWC_SIGNATURE_Y 0x10
-#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
-#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+#define AST_HWC_SIGNATURE_SIZE 32
/*
* Planes
@@ -140,6 +135,17 @@ static inline struct ast_plane *to_ast_plane(struct drm_plane *plane)
return container_of(plane, struct ast_plane, base);
}
+struct ast_cursor_plane {
+ struct ast_plane base;
+
+ u8 argb4444[AST_HWC_SIZE];
+};
+
+static inline struct ast_cursor_plane *to_ast_cursor_plane(struct drm_plane *plane)
+{
+ return container_of(to_ast_plane(plane), struct ast_cursor_plane, base);
+}
+
/*
* Connector
*/
@@ -184,7 +190,7 @@ struct ast_device {
enum ast_tx_chip tx_chip;
struct ast_plane primary_plane;
- struct ast_plane cursor_plane;
+ struct ast_cursor_plane cursor_plane;
struct drm_crtc crtc;
union {
struct {
@@ -205,7 +211,9 @@ struct ast_device {
} astdp;
} output;
- bool support_wide_screen;
+ bool support_wsxga_p; /* 1680x1050 */
+ bool support_fullhd; /* 1920x1080 */
+ bool support_wuxga; /* 1920x1200 */
u8 *dp501_fw_addr;
const struct firmware *dp501_fw; /* dp501 fw */
@@ -348,46 +356,24 @@ struct ast_vbios_stdtable {
u8 gr[9];
};
-struct ast_vbios_enhtable {
- u32 ht;
- u32 hde;
- u32 hfp;
- u32 hsync;
- u32 vt;
- u32 vde;
- u32 vfp;
- u32 vsync;
- u32 dclk_index;
- u32 flags;
- u32 refresh_rate;
- u32 refresh_rate_index;
- u32 mode_id;
-};
-
struct ast_vbios_dclk_info {
u8 param1;
u8 param2;
u8 param3;
};
-struct ast_vbios_mode_info {
- const struct ast_vbios_stdtable *std_table;
- const struct ast_vbios_enhtable *enh_table;
-};
-
struct ast_crtc_state {
struct drm_crtc_state base;
/* Last known format of primary plane */
const struct drm_format_info *format;
- struct ast_vbios_mode_info vbios_mode_info;
+ const struct ast_vbios_stdtable *std_table;
+ const struct ast_vbios_enhtable *vmode;
};
#define to_ast_crtc_state(state) container_of(state, struct ast_crtc_state, base)
-int ast_mode_config_init(struct ast_device *ast);
-
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
@@ -445,7 +431,7 @@ int ast_mode_config_init(struct ast_device *ast);
int ast_mm_init(struct ast_device *ast);
/* ast post */
-void ast_post_gpu(struct ast_device *ast);
+int ast_post_gpu(struct ast_device *ast);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
void ast_patch_ahb_2500(void __iomem *regs);
@@ -453,6 +439,9 @@ void ast_patch_ahb_2500(void __iomem *regs);
int ast_vga_output_init(struct ast_device *ast);
int ast_sil164_output_init(struct ast_device *ast);
+/* ast_cursor.c */
+int ast_cursor_plane_init(struct ast_device *ast);
+
/* ast dp501 */
bool ast_backup_fw(struct ast_device *ast, u8 *addr, u32 size);
void ast_init_3rdtx(struct ast_device *ast);
@@ -462,4 +451,14 @@ int ast_dp501_output_init(struct ast_device *ast);
int ast_dp_launch(struct ast_device *ast);
int ast_astdp_output_init(struct ast_device *ast);
+/* ast_mode.c */
+int ast_mode_config_init(struct ast_device *ast);
+int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
+ void __iomem *vaddr, u64 offset, unsigned long size,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type);
+
#endif
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index bc37c65305d4..44b9b5f659fc 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -36,33 +36,89 @@
#include "ast_drv.h"
+/* Try to detect WSXGA+ on Gen2+ */
+static bool __ast_2100_detect_wsxga_p(struct ast_device *ast)
+{
+ u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0);
+
+ if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC))
+ return true;
+ if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN)
+ return true;
+
+ return false;
+}
+
+/* Try to detect WUXGA on Gen2+ */
+static bool __ast_2100_detect_wuxga(struct ast_device *ast)
+{
+ u8 vgacrd1;
+
+ if (ast->support_fullhd) {
+ vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1);
+ if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA))
+ return true;
+ }
+
+ return false;
+}
+
static void ast_detect_widescreen(struct ast_device *ast)
{
- u8 jreg;
+ ast->support_wsxga_p = false;
+ ast->support_fullhd = false;
+ ast->support_wuxga = false;
- /* Check if we support wide screen */
- switch (AST_GEN(ast)) {
- case 1:
- ast->support_wide_screen = false;
- break;
- default:
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- if (!(jreg & 0x80))
- ast->support_wide_screen = true;
- else if (jreg & 0x01)
- ast->support_wide_screen = true;
- else {
- ast->support_wide_screen = false;
- if (ast->chip == AST1300)
- ast->support_wide_screen = true;
- if (ast->chip == AST1400)
- ast->support_wide_screen = true;
- if (ast->chip == AST2510)
- ast->support_wide_screen = true;
- if (IS_AST_GEN7(ast))
- ast->support_wide_screen = true;
+ if (AST_GEN(ast) >= 7) {
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+ } else if (AST_GEN(ast) >= 6) {
+ if (__ast_2100_detect_wsxga_p(ast))
+ ast->support_wsxga_p = true;
+ else if (ast->chip == AST2510)
+ ast->support_wsxga_p = true;
+ if (ast->support_wsxga_p)
+ ast->support_fullhd = true;
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+ } else if (AST_GEN(ast) >= 5) {
+ if (__ast_2100_detect_wsxga_p(ast))
+ ast->support_wsxga_p = true;
+ else if (ast->chip == AST1400)
+ ast->support_wsxga_p = true;
+ if (ast->support_wsxga_p)
+ ast->support_fullhd = true;
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+ } else if (AST_GEN(ast) >= 4) {
+ if (__ast_2100_detect_wsxga_p(ast))
+ ast->support_wsxga_p = true;
+ else if (ast->chip == AST1300)
+ ast->support_wsxga_p = true;
+ if (ast->support_wsxga_p)
+ ast->support_fullhd = true;
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+ } else if (AST_GEN(ast) >= 3) {
+ if (__ast_2100_detect_wsxga_p(ast))
+ ast->support_wsxga_p = true;
+ if (ast->support_wsxga_p) {
+ if (ast->chip == AST2200)
+ ast->support_fullhd = true;
}
- break;
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+ } else if (AST_GEN(ast) >= 2) {
+ if (__ast_2100_detect_wsxga_p(ast))
+ ast->support_wsxga_p = true;
+ if (ast->support_wsxga_p) {
+ if (ast->chip == AST2100)
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
}
}
@@ -76,49 +132,37 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
};
struct drm_device *dev = &ast->base;
- u8 jreg, vgacrd1;
-
- /*
- * Several of the listed TX chips are not explicitly supported
- * by the ast driver. If these exist in real-world devices, they
- * are most likely reported as VGA or SIL164 outputs. We warn here
- * to get bug reports for these devices. If none come in for some
- * time, we can begin to fail device probing on these values.
- */
- vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, AST_IO_VGACRD1_TX_TYPE_MASK);
- drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_ITE66121_VBIOS,
- "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
- drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_CH7003_VBIOS,
- "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
- drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_ANX9807_VBIOS,
- "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
+ u8 vgacra3, vgacrd1;
/* Check 3rd Tx option (digital output afaik) */
ast->tx_chip = AST_TX_NONE;
- /*
- * VGACRA3 Enhanced Color Mode Register, check if DVO is already
- * enabled, in that case, assume we have a SIL164 TMDS transmitter
- *
- * Don't make that assumption if we the chip wasn't enabled and
- * is at power-on reset, otherwise we'll incorrectly "detect" a
- * SIL164 when there is none.
- */
- if (!need_post) {
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
- if (jreg & 0x80)
- ast->tx_chip = AST_TX_SIL164;
- }
-
- if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) {
+ if (AST_GEN(ast) <= 3) {
/*
- * On AST GEN4+, look the configuration set by the SoC in
+ * VGACRA3 Enhanced Color Mode Register, check if DVO is already
+ * enabled, in that case, assume we have a SIL164 TMDS transmitter
+ *
+ * Don't make that assumption if we the chip wasn't enabled and
+ * is at power-on reset, otherwise we'll incorrectly "detect" a
+ * SIL164 when there is none.
+ */
+ if (!need_post) {
+ vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
+ if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED)
+ ast->tx_chip = AST_TX_SIL164;
+ }
+ } else {
+ /*
+ * On AST GEN4+, look at the configuration set by the SoC in
* the SOC scratch register #1 bits 11:8 (interestingly marked
* as "reserved" in the spec)
*/
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
- AST_IO_VGACRD1_TX_TYPE_MASK);
- switch (jreg) {
+ vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
+ AST_IO_VGACRD1_TX_TYPE_MASK);
+ switch (vgacrd1) {
+ /*
+ * GEN4 to GEN6
+ */
case AST_IO_VGACRD1_TX_SIL164_VBIOS:
ast->tx_chip = AST_TX_SIL164;
break;
@@ -134,14 +178,32 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
fallthrough;
case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW:
ast->tx_chip = AST_TX_DP501;
- }
- } else if (IS_AST_GEN7(ast)) {
- if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, AST_IO_VGACRD1_TX_TYPE_MASK) ==
- AST_IO_VGACRD1_TX_ASTDP) {
- int ret = ast_dp_launch(ast);
-
- if (!ret)
- ast->tx_chip = AST_TX_ASTDP;
+ break;
+ /*
+ * GEN7+
+ */
+ case AST_IO_VGACRD1_TX_ASTDP:
+ ast->tx_chip = AST_TX_ASTDP;
+ break;
+ /*
+ * Several of the listed TX chips are not explicitly supported
+ * by the ast driver. If these exist in real-world devices, they
+ * are most likely reported as VGA or SIL164 outputs. We warn here
+ * to get bug reports for these devices. If none come in for some
+ * time, we can begin to fail device probing on these values.
+ */
+ case AST_IO_VGACRD1_TX_ITE66121_VBIOS:
+ drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n",
+ vgacrd1, AST_GEN(ast));
+ break;
+ case AST_IO_VGACRD1_TX_CH7003_VBIOS:
+ drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n",
+ vgacrd1, AST_GEN(ast));
+ break;
+ case AST_IO_VGACRD1_TX_ANX9807_VBIOS:
+ drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n",
+ vgacrd1, AST_GEN(ast));
+ break;
}
}
@@ -290,18 +352,25 @@ struct drm_device *ast_device_create(struct pci_dev *pdev,
ast->regs = regs;
ast->ioregs = ioregs;
- ast_detect_widescreen(ast);
- ast_detect_tx_chip(ast, need_post);
-
ret = ast_get_dram_info(ast);
if (ret)
return ERR_PTR(ret);
-
drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d\n",
ast->mclk, ast->dram_type, ast->dram_bus_width);
- if (need_post)
- ast_post_gpu(ast);
+ ast_detect_tx_chip(ast, need_post);
+ switch (ast->tx_chip) {
+ case AST_TX_ASTDP:
+ ret = ast_post_gpu(ast);
+ break;
+ default:
+ ret = 0;
+ if (need_post)
+ ret = ast_post_gpu(ast);
+ break;
+ }
+ if (ret)
+ return ERR_PTR(ret);
ret = ast_mm_init(ast);
if (ret)
@@ -315,6 +384,8 @@ struct drm_device *ast_device_create(struct pci_dev *pdev,
drm_info(dev, "failed to map reserved buffer!\n");
}
+ ast_detect_widescreen(ast);
+
ret = ast_mode_config_init(ast);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 9d5321c81e68..c3b950675485 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -47,6 +47,7 @@
#include "ast_drv.h"
#include "ast_tables.h"
+#include "ast_vbios.h"
#define AST_LUT_SIZE 256
@@ -106,134 +107,9 @@ static void ast_crtc_set_gamma(struct ast_device *ast,
}
}
-static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- struct ast_vbios_mode_info *vbios_mode)
-{
- u32 refresh_rate_index = 0, refresh_rate;
- const struct ast_vbios_enhtable *best = NULL;
- u32 hborder, vborder;
- bool check_sync;
-
- switch (format->cpp[0] * 8) {
- case 8:
- vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
- break;
- case 16:
- vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
- break;
- case 24:
- case 32:
- vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
- break;
- default:
- return false;
- }
-
- switch (mode->crtc_hdisplay) {
- case 640:
- vbios_mode->enh_table = &res_640x480[refresh_rate_index];
- break;
- case 800:
- vbios_mode->enh_table = &res_800x600[refresh_rate_index];
- break;
- case 1024:
- vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
- break;
- case 1152:
- vbios_mode->enh_table = &res_1152x864[refresh_rate_index];
- break;
- case 1280:
- if (mode->crtc_vdisplay == 800)
- vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
- else
- vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
- break;
- case 1360:
- vbios_mode->enh_table = &res_1360x768[refresh_rate_index];
- break;
- case 1440:
- vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
- break;
- case 1600:
- if (mode->crtc_vdisplay == 900)
- vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
- else
- vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
- break;
- case 1680:
- vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
- break;
- case 1920:
- if (mode->crtc_vdisplay == 1080)
- vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
- else
- vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
- break;
- default:
- return false;
- }
-
- refresh_rate = drm_mode_vrefresh(mode);
- check_sync = vbios_mode->enh_table->flags & WideScreenMode;
-
- while (1) {
- const struct ast_vbios_enhtable *loop = vbios_mode->enh_table;
-
- while (loop->refresh_rate != 0xff) {
- if ((check_sync) &&
- (((mode->flags & DRM_MODE_FLAG_NVSYNC) &&
- (loop->flags & PVSync)) ||
- ((mode->flags & DRM_MODE_FLAG_PVSYNC) &&
- (loop->flags & NVSync)) ||
- ((mode->flags & DRM_MODE_FLAG_NHSYNC) &&
- (loop->flags & PHSync)) ||
- ((mode->flags & DRM_MODE_FLAG_PHSYNC) &&
- (loop->flags & NHSync)))) {
- loop++;
- continue;
- }
- if (loop->refresh_rate <= refresh_rate
- && (!best || loop->refresh_rate > best->refresh_rate))
- best = loop;
- loop++;
- }
- if (best || !check_sync)
- break;
- check_sync = 0;
- }
-
- if (best)
- vbios_mode->enh_table = best;
-
- hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
- vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
-
- adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
- adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
- adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
- adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder +
- vbios_mode->enh_table->hfp;
- adjusted_mode->crtc_hsync_end = (vbios_mode->enh_table->hde + hborder +
- vbios_mode->enh_table->hfp +
- vbios_mode->enh_table->hsync);
-
- adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
- adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
- adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
- adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder +
- vbios_mode->enh_table->vfp;
- adjusted_mode->crtc_vsync_end = (vbios_mode->enh_table->vde + vborder +
- vbios_mode->enh_table->vfp +
- vbios_mode->enh_table->vsync);
-
- return true;
-}
-
static void ast_set_vbios_color_reg(struct ast_device *ast,
const struct drm_format_info *format,
- const struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
u32 color_index;
@@ -256,7 +132,7 @@ static void ast_set_vbios_color_reg(struct ast_device *ast,
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0x00);
- if (vbios_mode->enh_table->flags & NewModeInfo) {
+ if (vmode->flags & NewModeInfo) {
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x92, format->cpp[0] * 8);
}
@@ -264,19 +140,19 @@ static void ast_set_vbios_color_reg(struct ast_device *ast,
static void ast_set_vbios_mode_reg(struct ast_device *ast,
const struct drm_display_mode *adjusted_mode,
- const struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
u32 refresh_rate_index, mode_id;
- refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
- mode_id = vbios_mode->enh_table->mode_id;
+ refresh_rate_index = vmode->refresh_rate_index;
+ mode_id = vmode->mode_id;
ast_set_index_reg(ast, AST_IO_VGACRI, 0x8d, refresh_rate_index & 0xff);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x8e, mode_id & 0xff);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0x00);
- if (vbios_mode->enh_table->flags & NewModeInfo) {
+ if (vmode->flags & NewModeInfo) {
ast_set_index_reg(ast, AST_IO_VGACRI, 0x91, 0xa8);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x93, adjusted_mode->clock / 1000);
ast_set_index_reg(ast, AST_IO_VGACRI, 0x94, adjusted_mode->crtc_hdisplay);
@@ -288,14 +164,11 @@ static void ast_set_vbios_mode_reg(struct ast_device *ast,
static void ast_set_std_reg(struct ast_device *ast,
struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_stdtable *stdtable)
{
- const struct ast_vbios_stdtable *stdtable;
u32 i;
u8 jreg;
- stdtable = vbios_mode->std_table;
-
jreg = stdtable->misc;
ast_io_write8(ast, AST_IO_VGAMR_W, jreg);
@@ -336,13 +209,13 @@ static void ast_set_std_reg(struct ast_device *ast,
static void ast_set_crtc_reg(struct ast_device *ast,
struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
u16 temp, precache = 0;
if ((IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) &&
- (vbios_mode->enh_table->flags & AST2500PreCatchCRT))
+ (vmode->flags & AST2500PreCatchCRT))
precache = 40;
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x11, 0x7f, 0x00);
@@ -461,14 +334,14 @@ static void ast_set_offset_reg(struct ast_device *ast,
static void ast_set_dclk_reg(struct ast_device *ast,
struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
const struct ast_vbios_dclk_info *clk_info;
if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast))
- clk_info = &dclk_table_ast2500[vbios_mode->enh_table->dclk_index];
+ clk_info = &dclk_table_ast2500[vmode->dclk_index];
else
- clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
+ clk_info = &dclk_table[vmode->dclk_index];
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc0, 0x00, clk_info->param1);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc1, 0x00, clk_info->param2);
@@ -526,15 +399,15 @@ static void ast_set_crtthd_reg(struct ast_device *ast)
static void ast_set_sync_reg(struct ast_device *ast,
struct drm_display_mode *mode,
- struct ast_vbios_mode_info *vbios_mode)
+ const struct ast_vbios_enhtable *vmode)
{
u8 jreg;
jreg = ast_io_read8(ast, AST_IO_VGAMR_R);
jreg &= ~0xC0;
- if (vbios_mode->enh_table->flags & NVSync)
+ if (vmode->flags & NVSync)
jreg |= 0x80;
- if (vbios_mode->enh_table->flags & NHSync)
+ if (vmode->flags & NHSync)
jreg |= 0x40;
ast_io_write8(ast, AST_IO_VGAMR_W, jreg);
}
@@ -565,13 +438,13 @@ static void ast_wait_for_vretrace(struct ast_device *ast)
* Planes
*/
-static int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
- void __iomem *vaddr, u64 offset, unsigned long size,
- uint32_t possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- const uint64_t *format_modifiers,
- enum drm_plane_type type)
+int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
+ void __iomem *vaddr, u64 offset, unsigned long size,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type)
{
struct drm_plane *plane = &ast_plane->base;
@@ -654,10 +527,9 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
if (!old_fb || (fb->format != old_fb->format) || crtc_state->mode_changed) {
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
- struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
ast_set_color_reg(ast, fb->format);
- ast_set_vbios_color_reg(ast, fb->format, vbios_mode_info);
+ ast_set_vbios_color_reg(ast, fb->format, ast_crtc_state->vmode);
}
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
@@ -757,263 +629,6 @@ static int ast_primary_plane_init(struct ast_device *ast)
}
/*
- * Cursor plane
- */
-
-static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int height)
-{
- union {
- u32 ul;
- u8 b[4];
- } srcdata32[2], data32;
- union {
- u16 us;
- u8 b[2];
- } data16;
- u32 csum = 0;
- s32 alpha_dst_delta, last_alpha_dst_delta;
- u8 __iomem *dstxor;
- const u8 *srcxor;
- int i, j;
- u32 per_pixel_copy, two_pixel_copy;
-
- alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
- last_alpha_dst_delta = alpha_dst_delta - (width << 1);
-
- srcxor = src;
- dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
- per_pixel_copy = width & 1;
- two_pixel_copy = width >> 1;
-
- for (j = 0; j < height; j++) {
- for (i = 0; i < two_pixel_copy; i++) {
- srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
- srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
- data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
- data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
- data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
- data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
-
- writel(data32.ul, dstxor);
- csum += data32.ul;
-
- dstxor += 4;
- srcxor += 8;
-
- }
-
- for (i = 0; i < per_pixel_copy; i++) {
- srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
- data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
- data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
- writew(data16.us, dstxor);
- csum += (u32)data16.us;
-
- dstxor += 2;
- srcxor += 4;
- }
- dstxor += last_alpha_dst_delta;
- }
-
- /* write checksum + signature */
- dst += AST_HWC_SIZE;
- writel(csum, dst);
- writel(width, dst + AST_HWC_SIGNATURE_SizeX);
- writel(height, dst + AST_HWC_SIGNATURE_SizeY);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
-}
-
-static void ast_set_cursor_base(struct ast_device *ast, u64 address)
-{
- u8 addr0 = (address >> 3) & 0xff;
- u8 addr1 = (address >> 11) & 0xff;
- u8 addr2 = (address >> 19) & 0xff;
-
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc8, addr0);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc9, addr1);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xca, addr2);
-}
-
-static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y,
- u8 x_offset, u8 y_offset)
-{
- u8 x0 = (x & 0x00ff);
- u8 x1 = (x & 0x0f00) >> 8;
- u8 y0 = (y & 0x00ff);
- u8 y1 = (y & 0x0700) >> 8;
-
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc2, x_offset);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc3, y_offset);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc4, x0);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc5, x1);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc6, y0);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xc7, y1);
-}
-
-static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled)
-{
- static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP |
- AST_IO_VGACRCB_HWC_ENABLED);
-
- u8 vgacrcb = AST_IO_VGACRCB_HWC_16BPP;
-
- if (enabled)
- vgacrcb |= AST_IO_VGACRCB_HWC_ENABLED;
-
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xcb, mask, vgacrcb);
-}
-
-static const uint32_t ast_cursor_plane_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
-
-static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_framebuffer *new_fb = new_plane_state->fb;
- struct drm_crtc_state *new_crtc_state = NULL;
- int ret;
-
- if (new_plane_state->crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true, true);
- if (ret || !new_plane_state->visible)
- return ret;
-
- if (new_fb->width > AST_MAX_HWC_WIDTH || new_fb->height > AST_MAX_HWC_HEIGHT)
- return -EINVAL;
-
- return 0;
-}
-
-static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct ast_plane *ast_plane = to_ast_plane(plane);
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct ast_device *ast = to_ast_device(plane->dev);
- struct iosys_map src_map = shadow_plane_state->data[0];
- struct drm_rect damage;
- const u8 *src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
- u64 dst_off = ast_plane->offset;
- u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
- u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
- unsigned int offset_x, offset_y;
- u16 x, y;
- u8 x_offset, y_offset;
-
- /*
- * Do data transfer to hardware buffer and point the scanout
- * engine to the offset.
- */
-
- if (drm_atomic_helper_damage_merged(old_plane_state, plane_state, &damage)) {
- ast_update_cursor_image(dst, src, fb->width, fb->height);
- ast_set_cursor_base(ast, dst_off);
- }
-
- /*
- * Update location in HWC signature and registers.
- */
-
- writel(plane_state->crtc_x, sig + AST_HWC_SIGNATURE_X);
- writel(plane_state->crtc_y, sig + AST_HWC_SIGNATURE_Y);
-
- offset_x = AST_MAX_HWC_WIDTH - fb->width;
- offset_y = AST_MAX_HWC_HEIGHT - fb->height;
-
- if (plane_state->crtc_x < 0) {
- x_offset = (-plane_state->crtc_x) + offset_x;
- x = 0;
- } else {
- x_offset = offset_x;
- x = plane_state->crtc_x;
- }
- if (plane_state->crtc_y < 0) {
- y_offset = (-plane_state->crtc_y) + offset_y;
- y = 0;
- } else {
- y_offset = offset_y;
- y = plane_state->crtc_y;
- }
-
- ast_set_cursor_location(ast, x, y, x_offset, y_offset);
-
- /* Dummy write to enable HWC and make the HW pick-up the changes. */
- ast_set_cursor_enabled(ast, true);
-}
-
-static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct ast_device *ast = to_ast_device(plane->dev);
-
- ast_set_cursor_enabled(ast, false);
-}
-
-static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = ast_cursor_plane_helper_atomic_check,
- .atomic_update = ast_cursor_plane_helper_atomic_update,
- .atomic_disable = ast_cursor_plane_helper_atomic_disable,
-};
-
-static const struct drm_plane_funcs ast_cursor_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
-};
-
-static int ast_cursor_plane_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct ast_plane *ast_cursor_plane = &ast->cursor_plane;
- struct drm_plane *cursor_plane = &ast_cursor_plane->base;
- size_t size;
- void __iomem *vaddr;
- u64 offset;
- int ret;
-
- /*
- * Allocate backing storage for cursors. The BOs are permanently
- * pinned to the top end of the VRAM.
- */
-
- size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
-
- if (ast->vram_fb_available < size)
- return -ENOMEM;
-
- vaddr = ast->vram + ast->vram_fb_available - size;
- offset = ast->vram_fb_available - size;
-
- ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size,
- 0x01, &ast_cursor_plane_funcs,
- ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
- NULL, DRM_PLANE_TYPE_CURSOR);
- if (ret) {
- drm_err(dev, "ast_plane_init() failed: %d\n", ret);
- return ret;
- }
- drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
- drm_plane_enable_fb_damage_clips(cursor_plane);
-
- ast->vram_fb_available -= size;
-
- return 0;
-}
-
-/*
* CRTC
*/
@@ -1021,72 +636,13 @@ static enum drm_mode_status
ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
{
struct ast_device *ast = to_ast_device(crtc->dev);
- enum drm_mode_status status;
- uint32_t jtemp;
-
- if (ast->support_wide_screen) {
- if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050))
- return MODE_OK;
- if ((mode->hdisplay == 1280) && (mode->vdisplay == 800))
- return MODE_OK;
- if ((mode->hdisplay == 1440) && (mode->vdisplay == 900))
- return MODE_OK;
- if ((mode->hdisplay == 1360) && (mode->vdisplay == 768))
- return MODE_OK;
- if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
- return MODE_OK;
- if ((mode->hdisplay == 1152) && (mode->vdisplay == 864))
- return MODE_OK;
-
- if ((ast->chip == AST2100) || // GEN2, but not AST1100 (?)
- (ast->chip == AST2200) || // GEN3, but not AST2150 (?)
- IS_AST_GEN4(ast) || IS_AST_GEN5(ast) ||
- IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) {
- if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
- return MODE_OK;
-
- if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) {
- jtemp = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff);
- if (jtemp & 0x01)
- return MODE_NOMODE;
- else
- return MODE_OK;
- }
- }
- }
+ const struct ast_vbios_enhtable *vmode;
- status = MODE_NOMODE;
+ vmode = ast_vbios_find_mode(ast, mode);
+ if (!vmode)
+ return MODE_NOMODE;
- switch (mode->hdisplay) {
- case 640:
- if (mode->vdisplay == 480)
- status = MODE_OK;
- break;
- case 800:
- if (mode->vdisplay == 600)
- status = MODE_OK;
- break;
- case 1024:
- if (mode->vdisplay == 768)
- status = MODE_OK;
- break;
- case 1152:
- if (mode->vdisplay == 864)
- status = MODE_OK;
- break;
- case 1280:
- if (mode->vdisplay == 1024)
- status = MODE_OK;
- break;
- case 1600:
- if (mode->vdisplay == 1200)
- status = MODE_OK;
- break;
- default:
- break;
- }
-
- return status;
+ return MODE_OK;
}
static void ast_crtc_helper_mode_set_nofb(struct drm_crtc *crtc)
@@ -1095,8 +651,8 @@ static void ast_crtc_helper_mode_set_nofb(struct drm_crtc *crtc)
struct ast_device *ast = to_ast_device(dev);
struct drm_crtc_state *crtc_state = crtc->state;
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
- struct ast_vbios_mode_info *vbios_mode_info =
- &ast_crtc_state->vbios_mode_info;
+ const struct ast_vbios_stdtable *std_table = ast_crtc_state->std_table;
+ const struct ast_vbios_enhtable *vmode = ast_crtc_state->vmode;
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
/*
@@ -1107,25 +663,29 @@ static void ast_crtc_helper_mode_set_nofb(struct drm_crtc *crtc)
*/
ast_wait_for_vretrace(ast);
- ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_vbios_mode_reg(ast, adjusted_mode, vmode);
ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x06);
- ast_set_std_reg(ast, adjusted_mode, vbios_mode_info);
- ast_set_crtc_reg(ast, adjusted_mode, vbios_mode_info);
- ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_std_reg(ast, adjusted_mode, std_table);
+ ast_set_crtc_reg(ast, adjusted_mode, vmode);
+ ast_set_dclk_reg(ast, adjusted_mode, vmode);
ast_set_crtthd_reg(ast);
- ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info);
+ ast_set_sync_reg(ast, adjusted_mode, vmode);
}
static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
struct drm_device *dev = crtc->dev;
+ struct ast_device *ast = to_ast_device(dev);
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
- bool succ;
+ const struct ast_vbios_enhtable *vmode;
+ unsigned int hborder = 0;
+ unsigned int vborder = 0;
int ret;
if (!crtc_state->enable)
@@ -1157,11 +717,56 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
}
}
- succ = ast_get_vbios_mode_info(format, &crtc_state->mode,
- &crtc_state->adjusted_mode,
- &ast_state->vbios_mode_info);
- if (!succ)
+ /*
+ * Set register tables.
+ *
+ * TODO: These tables mix all kinds of fields and should
+ * probably be resolved into various helper functions.
+ */
+ switch (format->format) {
+ case DRM_FORMAT_C8:
+ ast_state->std_table = &vbios_stdtable[VGAModeIndex];
+ break;
+ case DRM_FORMAT_RGB565:
+ ast_state->std_table = &vbios_stdtable[HiCModeIndex];
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ ast_state->std_table = &vbios_stdtable[TrueCModeIndex];
+ break;
+ default:
return -EINVAL;
+ }
+
+ /*
+ * Find the VBIOS mode and adjust the DRM display mode accordingly
+ * if a full modeset is required. Otherwise keep the existing values.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ vmode = ast_vbios_find_mode(ast, &crtc_state->mode);
+ if (!vmode)
+ return -EINVAL;
+ ast_state->vmode = vmode;
+
+ if (vmode->flags & HBorder)
+ hborder = 8;
+ if (vmode->flags & VBorder)
+ vborder = 8;
+
+ adjusted_mode->crtc_hdisplay = vmode->hde;
+ adjusted_mode->crtc_hblank_start = vmode->hde + hborder;
+ adjusted_mode->crtc_hblank_end = vmode->ht - hborder;
+ adjusted_mode->crtc_hsync_start = vmode->hde + hborder + vmode->hfp;
+ adjusted_mode->crtc_hsync_end = vmode->hde + hborder + vmode->hfp + vmode->hsync;
+ adjusted_mode->crtc_htotal = vmode->ht;
+
+ adjusted_mode->crtc_vdisplay = vmode->vde;
+ adjusted_mode->crtc_vblank_start = vmode->vde + vborder;
+ adjusted_mode->crtc_vblank_end = vmode->vt - vborder;
+ adjusted_mode->crtc_vsync_start = vmode->vde + vborder + vmode->vfp;
+ adjusted_mode->crtc_vsync_end = vmode->vde + vborder + vmode->vfp + vmode->vsync;
+ adjusted_mode->crtc_vtotal = vmode->vt;
+ }
return 0;
}
@@ -1263,8 +868,8 @@ ast_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
ast_state = to_ast_crtc_state(crtc->state);
new_ast_state->format = ast_state->format;
- memcpy(&new_ast_state->vbios_mode_info, &ast_state->vbios_mode_info,
- sizeof(new_ast_state->vbios_mode_info));
+ new_ast_state->std_table = ast_state->std_table;
+ new_ast_state->vmode = ast_state->vmode;
return &new_ast_state->base;
}
@@ -1294,7 +899,7 @@ static int ast_crtc_init(struct ast_device *ast)
int ret;
ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane.base,
- &ast->cursor_plane.base, &ast_crtc_funcs,
+ &ast->cursor_plane.base.base, &ast_crtc_funcs,
NULL);
if (ret)
return ret;
@@ -1373,12 +978,7 @@ int ast_mode_config_init(struct ast_device *ast)
dev->mode_config.min_height = 0;
dev->mode_config.preferred_depth = 24;
- if (ast->chip == AST2100 || // GEN2, but not AST1100 (?)
- ast->chip == AST2200 || // GEN3, but not AST2150 (?)
- IS_AST_GEN7(ast) ||
- IS_AST_GEN6(ast) ||
- IS_AST_GEN5(ast) ||
- IS_AST_GEN4(ast)) {
+ if (ast->support_fullhd) {
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 2048;
} else {
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 364030f97571..91e85e457bdf 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -340,26 +340,49 @@ static void ast_init_dram_reg(struct ast_device *ast)
} while ((j & 0x40) == 0);
}
-void ast_post_gpu(struct ast_device *ast)
+int ast_post_gpu(struct ast_device *ast)
{
+ int ret;
+
ast_set_def_ext_reg(ast);
- if (IS_AST_GEN7(ast)) {
- if (ast->tx_chip == AST_TX_ASTDP)
- ast_dp_launch(ast);
- } else if (ast->config_mode == ast_use_p2a) {
- if (IS_AST_GEN6(ast))
+ if (AST_GEN(ast) >= 7) {
+ if (ast->tx_chip == AST_TX_ASTDP) {
+ ret = ast_dp_launch(ast);
+ if (ret)
+ return ret;
+ }
+ } else if (AST_GEN(ast) >= 6) {
+ if (ast->config_mode == ast_use_p2a) {
ast_post_chip_2500(ast);
- else if (IS_AST_GEN5(ast) || IS_AST_GEN4(ast))
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+ } else if (AST_GEN(ast) >= 4) {
+ if (ast->config_mode == ast_use_p2a) {
ast_post_chip_2300(ast);
- else
+ ast_init_3rdtx(ast);
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
+ } else {
+ if (ast->config_mode == ast_use_p2a) {
ast_init_dram_reg(ast);
-
- ast_init_3rdtx(ast);
- } else {
- if (ast->tx_chip == AST_TX_SIL164)
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); /* Enable DVO */
+ } else {
+ if (ast->tx_chip == AST_TX_SIL164) {
+ /* Enable DVO */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80);
+ }
+ }
}
+
+ return 0;
}
/* AST 2300 DRAM settings */
@@ -2039,7 +2062,7 @@ void ast_post_chip_2500(struct ast_device *ast)
u8 reg;
reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
+ if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
/* Clear bus lock condition */
ast_patch_ahb_2500(ast->regs);
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index 2aadf07d135a..bb2cc1d8b84e 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -32,11 +32,18 @@
#define AST_IO_VGACR80_PASSWORD (0xa8)
#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1)
#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2)
+#define AST_IO_VGACRA3_DVO_ENABLED BIT(7)
#define AST_IO_VGACRB6_HSYNC_OFF BIT(0)
#define AST_IO_VGACRB6_VSYNC_OFF BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
+/* mirrors SCU100[7:0] */
+#define AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK GENMASK(7, 6)
+#define AST_IO_VGACRD0_VRAM_INIT_BY_BMC BIT(7)
+#define AST_IO_VGACRD0_VRAM_INIT_READY BIT(6)
+#define AST_IO_VGACRD0_IKVM_WIDESCREEN BIT(0)
+
#define AST_IO_VGACRD1_MCU_FW_EXECUTING BIT(5)
/* Display Transmitter Type */
#define AST_IO_VGACRD1_TX_TYPE_MASK GENMASK(3, 1)
@@ -48,11 +55,16 @@
#define AST_IO_VGACRD1_TX_ANX9807_VBIOS 0x0a
#define AST_IO_VGACRD1_TX_FW_EMBEDDED_FW 0x0c /* special case of DP501 */
#define AST_IO_VGACRD1_TX_ASTDP 0x0e
+#define AST_IO_VGACRD1_SUPPORTS_WUXGA BIT(0)
+/*
+ * AST DisplayPort
+ */
#define AST_IO_VGACRD7_EDID_VALID_FLAG BIT(0)
#define AST_IO_VGACRDC_LINK_SUCCESS BIT(0)
#define AST_IO_VGACRDF_HPD BIT(0)
#define AST_IO_VGACRDF_DP_VIDEO_ENABLE BIT(4) /* mirrors AST_IO_VGACRE3_DP_VIDEO_ENABLE */
+#define AST_IO_VGACRE0_24BPP BIT(5) /* 18 bpp, if unset */
#define AST_IO_VGACRE3_DP_VIDEO_ENABLE BIT(0)
#define AST_IO_VGACRE3_DP_PHY_SLEEP BIT(4)
#define AST_IO_VGACRE5_EDID_READ_DONE BIT(0)
@@ -60,23 +72,4 @@
#define AST_IO_VGAIR1_R (0x5A)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
-
-#define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6)
-//#define AST_VRAM_INIT_BY_BMC BIT(7)
-//#define AST_VRAM_INIT_READY BIT(6)
-
-/*
- * AST DisplayPort
- */
-
-/*
- * ASTDP setmode registers:
- * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
- * CRE1[7:0]: MISC1 (default: 0x00)
- * CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50)
- */
-#define ASTDP_MISC0_24bpp BIT(5)
-#define ASTDP_MISC1 0
-#define ASTDP_AND_CLEAR_MASK 0x00
-
#endif
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index 0378c9bc079b..f1c9f7e1f1fc 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -24,6 +24,8 @@
#ifndef AST_TABLES_H
#define AST_TABLES_H
+#include "ast_drv.h"
+
/* Std. Table Index Definition */
#define TextModeIndex 0
#define EGAModeIndex 1
@@ -31,54 +33,6 @@
#define HiCModeIndex 3
#define TrueCModeIndex 4
-#define Charx8Dot 0x00000001
-#define HalfDCLK 0x00000002
-#define DoubleScanMode 0x00000004
-#define LineCompareOff 0x00000008
-#define HBorder 0x00000020
-#define VBorder 0x00000010
-#define WideScreenMode 0x00000100
-#define NewModeInfo 0x00000200
-#define NHSync 0x00000400
-#define PHSync 0x00000800
-#define NVSync 0x00001000
-#define PVSync 0x00002000
-#define SyncPP (PVSync | PHSync)
-#define SyncPN (PVSync | NHSync)
-#define SyncNP (NVSync | PHSync)
-#define SyncNN (NVSync | NHSync)
-#define AST2500PreCatchCRT 0x00004000
-
-/* DCLK Index */
-#define VCLK25_175 0x00
-#define VCLK28_322 0x01
-#define VCLK31_5 0x02
-#define VCLK36 0x03
-#define VCLK40 0x04
-#define VCLK49_5 0x05
-#define VCLK50 0x06
-#define VCLK56_25 0x07
-#define VCLK65 0x08
-#define VCLK75 0x09
-#define VCLK78_75 0x0A
-#define VCLK94_5 0x0B
-#define VCLK108 0x0C
-#define VCLK135 0x0D
-#define VCLK157_5 0x0E
-#define VCLK162 0x0F
-/* #define VCLK193_25 0x10 */
-#define VCLK154 0x10
-#define VCLK83_5 0x11
-#define VCLK106_5 0x12
-#define VCLK146_25 0x13
-#define VCLK148_5 0x14
-#define VCLK71 0x15
-#define VCLK88_75 0x16
-#define VCLK119 0x17
-#define VCLK85_5 0x18
-#define VCLK97_75 0x19
-#define VCLK118_25 0x1A
-
static const struct ast_vbios_dclk_info dclk_table[] = {
{0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
{0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
@@ -212,141 +166,4 @@ static const struct ast_vbios_stdtable vbios_stdtable[] = {
},
};
-static const struct ast_vbios_enhtable res_640x480[] = {
- { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */
- (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E },
- { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */
- (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E },
- { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */
- (SyncNN | Charx8Dot) , 75, 3, 0x2E },
- { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */
- (SyncNN | Charx8Dot) , 85, 4, 0x2E },
- { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */
- (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E },
-};
-
-static const struct ast_vbios_enhtable res_800x600[] = {
- {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */
- (SyncPP | Charx8Dot), 56, 1, 0x30 },
- {1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */
- (SyncPP | Charx8Dot), 60, 2, 0x30 },
- {1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72Hz */
- (SyncPP | Charx8Dot), 72, 3, 0x30 },
- {1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75Hz */
- (SyncPP | Charx8Dot), 75, 4, 0x30 },
- {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85Hz */
- (SyncPP | Charx8Dot), 84, 5, 0x30 },
- {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* end */
- (SyncPP | Charx8Dot), 0xFF, 5, 0x30 },
-};
-
-
-static const struct ast_vbios_enhtable res_1024x768[] = {
- {1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */
- (SyncNN | Charx8Dot), 60, 1, 0x31 },
- {1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */
- (SyncNN | Charx8Dot), 70, 2, 0x31 },
- {1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75Hz */
- (SyncPP | Charx8Dot), 75, 3, 0x31 },
- {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85Hz */
- (SyncPP | Charx8Dot), 84, 4, 0x31 },
- {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* end */
- (SyncPP | Charx8Dot), 0xFF, 4, 0x31 },
-};
-
-static const struct ast_vbios_enhtable res_1280x1024[] = {
- {1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */
- (SyncPP | Charx8Dot), 60, 1, 0x32 },
- {1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */
- (SyncPP | Charx8Dot), 75, 2, 0x32 },
- {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85Hz */
- (SyncPP | Charx8Dot), 85, 3, 0x32 },
- {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* end */
- (SyncPP | Charx8Dot), 0xFF, 3, 0x32 },
-};
-
-static const struct ast_vbios_enhtable res_1600x1200[] = {
- {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */
- (SyncPP | Charx8Dot), 60, 1, 0x33 },
- {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */
- (SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
-};
-
-static const struct ast_vbios_enhtable res_1152x864[] = {
- {1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* 75Hz */
- (SyncPP | Charx8Dot | NewModeInfo), 75, 1, 0x3B },
- {1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* end */
- (SyncPP | Charx8Dot | NewModeInfo), 0xFF, 1, 0x3B },
-};
-
-/* 16:9 */
-static const struct ast_vbios_enhtable res_1360x768[] = {
- {1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x39 },
- {1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* end */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 0xFF, 1, 0x39 },
-};
-
-static const struct ast_vbios_enhtable res_1600x900[] = {
- {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x3A },
- {2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x3A },
- {2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x3A },
-};
-
-static const struct ast_vbios_enhtable res_1920x1080[] = {
- {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x38 },
- {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 0xFF, 1, 0x38 },
-};
-
-
-/* 16:10 */
-static const struct ast_vbios_enhtable res_1280x800[] = {
- {1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x35 },
- {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x35 },
- {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x35 },
-
-};
-
-static const struct ast_vbios_enhtable res_1440x900[] = {
- {1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x36 },
- {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x36 },
- {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x36 },
-};
-
-static const struct ast_vbios_enhtable res_1680x1050[] = {
- {1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x37 },
- {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x37 },
- {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
- (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 0xFF, 2, 0x37 },
-};
-
-static const struct ast_vbios_enhtable res_1920x1200[] = {
- {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB*/
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 60, 1, 0x34 },
- {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB */
- (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
- AST2500PreCatchCRT), 0xFF, 1, 0x34 },
-};
-
#endif
diff --git a/drivers/gpu/drm/ast/ast_vbios.c b/drivers/gpu/drm/ast/ast_vbios.c
new file mode 100644
index 000000000000..0953e6dd3976
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_vbios.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of the authors not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. The authors makes no representations
+ * about the suitability of this software for any purpose. It is provided
+ * "as is" without express or implied warranty.
+ *
+ * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ast_drv.h"
+#include "ast_vbios.h"
+
+/* 4:3 */
+
+static const struct ast_vbios_enhtable res_640x480[] = {
+ { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60 Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2e },
+ { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72 Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2e },
+ { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75 Hz */
+ (SyncNN | Charx8Dot), 75, 3, 0x2e },
+ { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85 Hz */
+ (SyncNN | Charx8Dot), 85, 4, 0x2e },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_800x600[] = {
+ { 1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56 Hz */
+ (SyncPP | Charx8Dot), 56, 1, 0x30 },
+ { 1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60 Hz */
+ (SyncPP | Charx8Dot), 60, 2, 0x30 },
+ { 1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72 Hz */
+ (SyncPP | Charx8Dot), 72, 3, 0x30 },
+ { 1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75 Hz */
+ (SyncPP | Charx8Dot), 75, 4, 0x30 },
+ { 1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85 Hz */
+ (SyncPP | Charx8Dot), 84, 5, 0x30 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1024x768[] = {
+ { 1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60 Hz */
+ (SyncNN | Charx8Dot), 60, 1, 0x31 },
+ { 1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70 Hz */
+ (SyncNN | Charx8Dot), 70, 2, 0x31 },
+ { 1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75 Hz */
+ (SyncPP | Charx8Dot), 75, 3, 0x31 },
+ { 1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85 Hz */
+ (SyncPP | Charx8Dot), 84, 4, 0x31 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1152x864[] = {
+ { 1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* 75 Hz */
+ (SyncPP | Charx8Dot | NewModeInfo), 75, 1, 0x3b },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1280x1024[] = {
+ { 1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60 Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x32 },
+ { 1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75 Hz */
+ (SyncPP | Charx8Dot), 75, 2, 0x32 },
+ { 1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85 Hz */
+ (SyncPP | Charx8Dot), 85, 3, 0x32 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1600x1200[] = {
+ { 2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60 Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x33 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+/* 16:9 */
+
+static const struct ast_vbios_enhtable res_1360x768[] = {
+ { 1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60 Hz */
+ (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 1, 0x39 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1600x900[] = {
+ { 1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60 Hz CVT RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x3a },
+ { 2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60 Hz CVT */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x3a },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1920x1080[] = {
+ { 2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60 Hz */
+ (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x38 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+/* 16:10 */
+
+static const struct ast_vbios_enhtable res_1280x800[] = {
+ { 1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60 Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x35 },
+ { 1680, 1280, 72, 128, 831, 800, 3, 6, VCLK83_5, /* 60 Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x35 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1440x900[] = {
+ { 1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60 Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x36 },
+ { 1904, 1440, 80, 152, 934, 900, 3, 6, VCLK106_5, /* 60 Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x36 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1680x1050[] = {
+ { 1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60 Hz RB */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x37 },
+ { 2240, 1680, 104, 176, 1089, 1050, 3, 6, VCLK146_25, /* 60 Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 2, 0x37 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+static const struct ast_vbios_enhtable res_1920x1200[] = {
+ { 2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60 Hz RB*/
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
+ AST2500PreCatchCRT), 60, 1, 0x34 },
+ AST_VBIOS_INVALID_MODE, /* end */
+};
+
+/*
+ * VBIOS mode tables
+ */
+
+static const struct ast_vbios_enhtable *res_table_wuxga[] = {
+ &res_1920x1200[0],
+ NULL,
+};
+
+static const struct ast_vbios_enhtable *res_table_fullhd[] = {
+ &res_1920x1080[0],
+ NULL,
+};
+
+static const struct ast_vbios_enhtable *res_table_wsxga_p[] = {
+ &res_1280x800[0],
+ &res_1360x768[0],
+ &res_1440x900[0],
+ &res_1600x900[0],
+ &res_1680x1050[0],
+ NULL,
+};
+
+static const struct ast_vbios_enhtable *res_table[] = {
+ &res_640x480[0],
+ &res_800x600[0],
+ &res_1024x768[0],
+ &res_1152x864[0],
+ &res_1280x1024[0],
+ &res_1600x1200[0],
+ NULL,
+};
+
+static const struct ast_vbios_enhtable *
+__ast_vbios_find_mode_table(const struct ast_vbios_enhtable **vmode_tables,
+ unsigned int hdisplay,
+ unsigned int vdisplay)
+{
+ while (*vmode_tables) {
+ if ((*vmode_tables)->hde == hdisplay && (*vmode_tables)->vde == vdisplay)
+ return *vmode_tables;
+ ++vmode_tables;
+ }
+
+ return NULL;
+}
+
+static const struct ast_vbios_enhtable *ast_vbios_find_mode_table(const struct ast_device *ast,
+ unsigned int hdisplay,
+ unsigned int vdisplay)
+{
+ const struct ast_vbios_enhtable *vmode_table = NULL;
+
+ if (ast->support_wuxga)
+ vmode_table = __ast_vbios_find_mode_table(res_table_wuxga, hdisplay, vdisplay);
+ if (!vmode_table && ast->support_fullhd)
+ vmode_table = __ast_vbios_find_mode_table(res_table_fullhd, hdisplay, vdisplay);
+ if (!vmode_table && ast->support_wsxga_p)
+ vmode_table = __ast_vbios_find_mode_table(res_table_wsxga_p, hdisplay, vdisplay);
+ if (!vmode_table)
+ vmode_table = __ast_vbios_find_mode_table(res_table, hdisplay, vdisplay);
+
+ return vmode_table;
+}
+
+const struct ast_vbios_enhtable *ast_vbios_find_mode(const struct ast_device *ast,
+ const struct drm_display_mode *mode)
+{
+ const struct ast_vbios_enhtable *best_vmode = NULL;
+ const struct ast_vbios_enhtable *vmode_table;
+ const struct ast_vbios_enhtable *vmode;
+ u32 refresh_rate;
+
+ vmode_table = ast_vbios_find_mode_table(ast, mode->hdisplay, mode->vdisplay);
+ if (!vmode_table)
+ return NULL;
+
+ refresh_rate = drm_mode_vrefresh(mode);
+
+ for (vmode = vmode_table; ast_vbios_mode_is_valid(vmode); ++vmode) {
+ if (((mode->flags & DRM_MODE_FLAG_NVSYNC) && (vmode->flags & PVSync)) ||
+ ((mode->flags & DRM_MODE_FLAG_PVSYNC) && (vmode->flags & NVSync)) ||
+ ((mode->flags & DRM_MODE_FLAG_NHSYNC) && (vmode->flags & PHSync)) ||
+ ((mode->flags & DRM_MODE_FLAG_PHSYNC) && (vmode->flags & NHSync))) {
+ continue;
+ }
+ if (vmode->refresh_rate <= refresh_rate &&
+ (!best_vmode || vmode->refresh_rate > best_vmode->refresh_rate))
+ best_vmode = vmode;
+ }
+
+ return best_vmode;
+}
diff --git a/drivers/gpu/drm/ast/ast_vbios.h b/drivers/gpu/drm/ast/ast_vbios.h
new file mode 100644
index 000000000000..8cf025010594
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_vbios.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of the authors not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. The authors makes no representations
+ * about the suitability of this software for any purpose. It is provided
+ * "as is" without express or implied warranty.
+ *
+ * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Ported from xf86-video-ast driver */
+
+#ifndef AST_VBIOS_H
+#define AST_VBIOS_H
+
+#include <linux/types.h>
+
+struct ast_device;
+struct drm_display_mode;
+
+#define Charx8Dot 0x00000001
+#define HalfDCLK 0x00000002
+#define DoubleScanMode 0x00000004
+#define LineCompareOff 0x00000008
+#define HBorder 0x00000020
+#define VBorder 0x00000010
+#define WideScreenMode 0x00000100
+#define NewModeInfo 0x00000200
+#define NHSync 0x00000400
+#define PHSync 0x00000800
+#define NVSync 0x00001000
+#define PVSync 0x00002000
+#define SyncPP (PVSync | PHSync)
+#define SyncPN (PVSync | NHSync)
+#define SyncNP (NVSync | PHSync)
+#define SyncNN (NVSync | NHSync)
+#define AST2500PreCatchCRT 0x00004000
+
+/* DCLK Index */
+#define VCLK25_175 0x00
+#define VCLK28_322 0x01
+#define VCLK31_5 0x02
+#define VCLK36 0x03
+#define VCLK40 0x04
+#define VCLK49_5 0x05
+#define VCLK50 0x06
+#define VCLK56_25 0x07
+#define VCLK65 0x08
+#define VCLK75 0x09
+#define VCLK78_75 0x0a
+#define VCLK94_5 0x0b
+#define VCLK108 0x0c
+#define VCLK135 0x0d
+#define VCLK157_5 0x0e
+#define VCLK162 0x0f
+/* #define VCLK193_25 0x10 */
+#define VCLK154 0x10
+#define VCLK83_5 0x11
+#define VCLK106_5 0x12
+#define VCLK146_25 0x13
+#define VCLK148_5 0x14
+#define VCLK71 0x15
+#define VCLK88_75 0x16
+#define VCLK119 0x17
+#define VCLK85_5 0x18
+#define VCLK97_75 0x19
+#define VCLK118_25 0x1a
+
+struct ast_vbios_enhtable {
+ u32 ht;
+ u32 hde;
+ u32 hfp;
+ u32 hsync;
+ u32 vt;
+ u32 vde;
+ u32 vfp;
+ u32 vsync;
+ u32 dclk_index;
+ u32 flags;
+ u32 refresh_rate;
+ u32 refresh_rate_index;
+ u32 mode_id;
+};
+
+#define AST_VBIOS_INVALID_MODE \
+ {0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u}
+
+static inline bool ast_vbios_mode_is_valid(const struct ast_vbios_enhtable *vmode)
+{
+ return vmode->ht && vmode->vt && vmode->refresh_rate;
+}
+
+const struct ast_vbios_enhtable *ast_vbios_find_mode(const struct ast_device *ast,
+ const struct drm_display_mode *mode);
+
+#endif
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 6b4664d91faa..d20f1646dac2 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -90,6 +90,14 @@ config DRM_FSL_LDB
help
Support for i.MX8MP DPI-to-LVDS on-SoC encoder.
+config DRM_I2C_NXP_TDA998X
+ tristate "NXP Semiconductors TDA998X HDMI encoder"
+ default m if DRM_TILCDC
+ select CEC_CORE if CEC_NOTIFIER
+ select SND_SOC_HDMI_CODEC if SND_SOC
+ help
+ Support for NXP Semiconductors TDA998X HDMI encoders.
+
config DRM_ITE_IT6263
tristate "ITE IT6263 LVDS/HDMI bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 97304b429a53..245e8a27e3fc 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -6,6 +6,10 @@ obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_FSL_LDB) += fsl-ldb.o
+
+tda998x-y := tda998x_drv.o
+obj-$(CONFIG_DRM_I2C_NXP_TDA998X) += tda998x.o
+
obj-$(CONFIG_DRM_ITE_IT6263) += ite-it6263.o
obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o
obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 657bc3dd18df..1ff8c815ec79 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -243,9 +243,14 @@ static const struct hdmi_codec_ops adv7511_codec_ops = {
static const struct hdmi_codec_pdata codec_data = {
.ops = &adv7511_codec_ops,
+ .i2s_formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |
+ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE),
.max_i2s_channels = 2,
.i2s = 1,
+ .no_i2s_capture = 1,
.spdif = 1,
+ .no_spdif_capture = 1,
};
int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index a13b3d8ab6ac..050dae338ffe 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -847,7 +847,7 @@ static int adv7511_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
adv7511_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct adv7511 *adv = connector_to_adv7511(connector);
@@ -910,14 +910,16 @@ static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
return container_of(bridge, struct adv7511, bridge);
}
-static void adv7511_bridge_enable(struct drm_bridge *bridge)
+static void adv7511_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
adv7511_power_on(adv);
}
-static void adv7511_bridge_disable(struct drm_bridge *bridge)
+static void adv7511_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
@@ -996,14 +998,18 @@ static void adv7511_bridge_hpd_notify(struct drm_bridge *bridge,
}
static const struct drm_bridge_funcs adv7511_bridge_funcs = {
- .enable = adv7511_bridge_enable,
- .disable = adv7511_bridge_disable,
.mode_set = adv7511_bridge_mode_set,
.mode_valid = adv7511_bridge_mode_valid,
.attach = adv7511_bridge_attach,
.detect = adv7511_bridge_detect,
.edid_read = adv7511_bridge_edid_read,
.hpd_notify = adv7511_bridge_hpd_notify,
+
+ .atomic_enable = adv7511_bridge_atomic_enable,
+ .atomic_disable = adv7511_bridge_atomic_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index bfa88409a7ff..071168aa0c3b 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1197,11 +1197,9 @@ struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
return conn_state->crtc;
}
-static void
-analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
- struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -1257,11 +1255,9 @@ out_dp_init:
return ret;
}
-static void
-analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
- struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -1327,11 +1323,9 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
-static void
-analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
- struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *old_crtc, *new_crtc;
struct drm_crtc_state *old_crtc_state = NULL;
@@ -1367,11 +1361,9 @@ out:
analogix_dp_bridge_disable(bridge);
}
-static void
-analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
{
- struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -1553,7 +1545,6 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
{
struct platform_device *pdev = to_platform_device(dev);
struct analogix_dp_device *dp;
- struct resource *res;
unsigned int irq_flags;
int ret;
@@ -1605,9 +1596,7 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
return ERR_CAST(dp->clock);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ dp->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dp->reg_base)) {
ret = PTR_ERR(dp->reg_base);
goto err_disable_clk;
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 4be34d5c7a3b..0b97b66de577 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2380,7 +2380,7 @@ static int anx7625_bridge_atomic_check(struct drm_bridge *bridge,
}
static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *state)
+ struct drm_atomic_state *state)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
@@ -2389,7 +2389,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
dev_dbg(dev, "drm atomic enable\n");
- connector = drm_atomic_get_new_connector_for_encoder(state->base.state,
+ connector = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
if (!connector)
return;
@@ -2401,7 +2401,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
anx7625_dp_start(ctx);
- conn_state = drm_atomic_get_new_connector_state(state->base.state, connector);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
if (WARN_ON(!conn_state))
return;
@@ -2419,7 +2419,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old)
+ struct drm_atomic_state *state)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index d081850e3c03..81fad14c2cd5 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -1619,7 +1619,7 @@ bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
static
enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
@@ -1979,10 +1979,9 @@ static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
}
static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
struct cdns_mhdp_bridge_state *mhdp_state;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
@@ -2070,7 +2069,7 @@ out:
}
static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
u32 resp;
@@ -2463,9 +2462,9 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
if (!mhdp)
return -ENOMEM;
- clk = devm_clk_get(dev, NULL);
+ clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
- dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
+ dev_err(dev, "couldn't get and enable clk: %ld\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
@@ -2504,14 +2503,12 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
mhdp->info = of_device_get_match_data(dev);
- clk_prepare_enable(clk);
-
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "pm_runtime_resume_and_get failed\n");
pm_runtime_disable(dev);
- goto clk_disable;
+ return ret;
}
if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
@@ -2590,8 +2587,6 @@ plat_fini:
runtime_put:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
-clk_disable:
- clk_disable_unprepare(mhdp->clk);
return ret;
}
@@ -2632,8 +2627,6 @@ static void cdns_mhdp_remove(struct platform_device *pdev)
cancel_work_sync(&mhdp->modeset_retry_work);
flush_work(&mhdp->hpd_work);
/* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
-
- clk_disable_unprepare(mhdp->clk);
}
static const struct of_device_id mhdp_ids[] = {
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index d47703559b0d..81f7c701961f 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -341,10 +341,9 @@ static void chipone_configure_pll(struct chipone *icn,
}
static void chipone_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct chipone *icn = bridge_to_chipone(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_display_mode *mode = &icn->mode;
const struct drm_bridge_state *bridge_state;
u16 hfp, hbp, hsync;
@@ -445,7 +444,7 @@ static void chipone_atomic_enable(struct drm_bridge *bridge,
}
static void chipone_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct chipone *icn = bridge_to_chipone(bridge);
int ret;
@@ -482,7 +481,7 @@ static void chipone_atomic_pre_enable(struct drm_bridge *bridge,
}
static void chipone_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct chipone *icn = bridge_to_chipone(bridge);
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index 0fc8a14fd800..26ae1ab5237f 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -122,10 +122,9 @@ static int fsl_ldb_attach(struct drm_bridge *bridge,
}
static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_bridge_state *bridge_state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *mode;
@@ -224,7 +223,7 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
}
static void fsl_ldb_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index 0d1ac3edcab4..a17433a7c755 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -49,16 +49,17 @@ static int imx8mp_hdmi_pvi_bridge_attach(struct drm_bridge *bridge,
}
static void imx8mp_hdmi_pvi_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = bridge_state->base.state;
struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
struct drm_connector_state *conn_state;
+ struct drm_bridge_state *bridge_state;
const struct drm_display_mode *mode;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
u32 bus_flags = 0, val;
+ bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
conn_state = drm_atomic_get_new_connector_state(state, connector);
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
@@ -88,7 +89,7 @@ static void imx8mp_hdmi_pvi_bridge_enable(struct drm_bridge *bridge,
}
static void imx8mp_hdmi_pvi_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
index dd5823f04c70..524aac751359 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
@@ -200,9 +200,8 @@ imx8qm_ldb_bridge_mode_set(struct drm_bridge *bridge,
CH_HSYNC_M(chno), CH_PHSYNC(chno));
}
-static void
-imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
@@ -247,9 +246,8 @@ imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
ldb_bridge_enable_helper(bridge);
}
-static void
-imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
index 7bce2305d676..3cb484773ddf 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
@@ -203,9 +203,8 @@ imx8qxp_ldb_bridge_mode_set(struct drm_bridge *bridge,
companion->funcs->mode_set(companion, mode, adjusted_mode);
}
-static void
-imx8qxp_ldb_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_ldb_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
@@ -217,12 +216,11 @@ imx8qxp_ldb_bridge_atomic_pre_enable(struct drm_bridge *bridge,
clk_prepare_enable(imx8qxp_ldb->clk_bypass);
if (is_split && companion)
- companion->funcs->atomic_pre_enable(companion, old_bridge_state);
+ companion->funcs->atomic_pre_enable(companion, state);
}
-static void
-imx8qxp_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
@@ -252,12 +250,11 @@ imx8qxp_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
DRM_DEV_ERROR(dev, "failed to power on PHY: %d\n", ret);
if (is_split && companion)
- companion->funcs->atomic_enable(companion, old_bridge_state);
+ companion->funcs->atomic_enable(companion, state);
}
-static void
-imx8qxp_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
struct ldb *ldb = ldb_ch->ldb;
@@ -283,7 +280,7 @@ imx8qxp_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
clk_disable_unprepare(imx8qxp_ldb->clk_pixel);
if (is_split && companion)
- companion->funcs->atomic_disable(companion, old_bridge_state);
+ companion->funcs->atomic_disable(companion, state);
ret = pm_runtime_put(dev);
if (ret < 0)
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 1812bd106261..1d9529dc7f2a 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -176,9 +176,8 @@ imx8qxp_pc_bridge_mode_set(struct drm_bridge *bridge,
clk_disable_unprepare(pc->clk_apb);
}
-static void
-imx8qxp_pc_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_pc_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct imx8qxp_pc_channel *ch = bridge->driver_private;
struct imx8qxp_pc *pc = ch->pc;
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index 4b0715ed6f38..cd6818db0fd3 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -153,9 +153,8 @@ imx8qxp_pixel_link_bridge_mode_set(struct drm_bridge *bridge,
imx8qxp_pixel_link_set_mst_addr(pl);
}
-static void
-imx8qxp_pixel_link_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_pixel_link_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct imx8qxp_pixel_link *pl = bridge->driver_private;
@@ -164,9 +163,8 @@ imx8qxp_pixel_link_bridge_atomic_enable(struct drm_bridge *bridge,
imx8qxp_pixel_link_enable_sync(pl);
}
-static void
-imx8qxp_pixel_link_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_pixel_link_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct imx8qxp_pixel_link *pl = bridge->driver_private;
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index 65cf3a6c8ec6..49dd4f96d52c 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -122,9 +122,8 @@ imx8qxp_pxl2dpi_bridge_mode_set(struct drm_bridge *bridge,
}
}
-static void
-imx8qxp_pxl2dpi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void imx8qxp_pxl2dpi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
int ret;
@@ -134,8 +133,7 @@ imx8qxp_pxl2dpi_bridge_atomic_disable(struct drm_bridge *bridge,
DRM_DEV_ERROR(p2d->dev, "failed to put runtime PM: %d\n", ret);
if (p2d->companion)
- p2d->companion->funcs->atomic_disable(p2d->companion,
- old_bridge_state);
+ p2d->companion->funcs->atomic_disable(p2d->companion, state);
}
static const u32 imx8qxp_pxl2dpi_bus_output_fmts[] = {
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index 306b5e374b9e..21152a1c28f7 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -569,9 +569,8 @@ static int it6263_read_edid(void *data, u8 *buf, unsigned int block, size_t len)
return 0;
}
-static void
-it6263_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void it6263_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct it6263 *it = bridge_to_it6263(bridge);
@@ -581,11 +580,9 @@ it6263_bridge_atomic_disable(struct drm_bridge *bridge,
AFE_DRV_RST | AFE_DRV_PWD);
}
-static void
-it6263_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void it6263_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct it6263 *it = bridge_to_it6263(bridge);
const struct drm_crtc_state *crtc_state;
struct regmap *regmap = it->hdmi_regmap;
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 88ef76a37fe6..8a607558ac89 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -2250,12 +2250,13 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
continue;
}
- for (i = 0; i < 5; i++) {
+ for (i = 0; i < 5; i++)
if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
- av[i][1] != av[i][2] || bv[i][0] != av[i][3])
+ bv[i][1] != av[i][2] || bv[i][0] != av[i][3])
break;
- DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d, %d", retry, i);
+ if (i == 5) {
+ DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d", retry);
return true;
}
}
@@ -3182,11 +3183,10 @@ it6505_bridge_mode_valid(struct drm_bridge *bridge,
}
static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
- struct drm_atomic_state *state = old_state->base.state;
struct hdmi_avi_infoframe frame;
struct drm_crtc_state *crtc_state;
struct drm_connector_state *conn_state;
@@ -3238,7 +3238,7 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
@@ -3253,7 +3253,7 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void it6505_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
@@ -3264,7 +3264,7 @@ static void it6505_bridge_atomic_pre_enable(struct drm_bridge *bridge,
}
static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = it6505->dev;
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 23edcde6b9a7..b9f90f32145d 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -721,10 +721,9 @@ static u32 *it66121_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
}
static void it66121_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
ctx->connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
@@ -732,7 +731,7 @@ static void it66121_bridge_enable(struct drm_bridge *bridge,
}
static void it66121_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 999ddebb832d..0fc5ea18fe6a 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -455,10 +455,9 @@ static int lt9211_configure_tx(struct lt9211 *ctx, bool jeida,
}
static void lt9211_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_bridge_state *bridge_state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *mode;
@@ -553,7 +552,7 @@ static void lt9211_atomic_enable(struct drm_bridge *bridge,
}
static void lt9211_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
int ret;
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index e650cd83fc8d..026803034231 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -640,12 +640,10 @@ lt9611_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
}
/* bridge funcs */
-static void
-lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
@@ -689,9 +687,8 @@ lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
regmap_write(lt9611->regmap, 0x8130, 0xea);
}
-static void
-lt9611_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void lt9611_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
int ret;
@@ -767,7 +764,7 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
}
static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
static const struct reg_sequence reg_cfg[] = {
@@ -786,9 +783,8 @@ static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge,
lt9611->sleep = false;
}
-static void
-lt9611_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void lt9611_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index a3dcee62e7a5..a47aabf134fd 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -115,16 +115,9 @@ static int ge_b850v3_lvds_get_modes(struct drm_connector *connector)
return num_modes;
}
-static enum drm_mode_status ge_b850v3_lvds_mode_valid(
- struct drm_connector *connector, struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static const struct
drm_connector_helper_funcs ge_b850v3_lvds_connector_helper_funcs = {
.get_modes = ge_b850v3_lvds_get_modes,
- .mode_valid = ge_b850v3_lvds_mode_valid,
};
static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c
index b8313dad6072..53dd140a1b8d 100644
--- a/drivers/gpu/drm/bridge/microchip-lvds.c
+++ b/drivers/gpu/drm/bridge/microchip-lvds.c
@@ -162,8 +162,7 @@ static int mchp_lvds_probe(struct platform_device *pdev)
lvds->dev = dev;
- lvds->regs = devm_ioremap_resource(lvds->dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ lvds->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lvds->regs))
return PTR_ERR(lvds->regs);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 1e5b2a37cb8c..d04c62a0cb9f 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -736,9 +736,8 @@ static int nwl_dsi_disable(struct nwl_dsi *dsi)
return 0;
}
-static void
-nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@@ -898,9 +897,8 @@ runtime_put:
pm_runtime_put_sync(dev);
}
-static void
-nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@@ -1184,6 +1182,7 @@ static int nwl_dsi_probe(struct platform_device *pdev)
dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.timings = &nwl_dsi_timings;
+ dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
dev_set_drvdata(dev, dsi);
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 44e36ae66db4..27261b2ac9c8 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -15,7 +15,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 6e88339dec0f..258c85c83a28 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -109,10 +109,9 @@ static void panel_bridge_detach(struct drm_bridge *bridge)
}
static void panel_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_encoder *encoder = bridge->encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -129,10 +128,9 @@ static void panel_bridge_atomic_pre_enable(struct drm_bridge *bridge,
}
static void panel_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_encoder *encoder = bridge->encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -149,10 +147,9 @@ static void panel_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void panel_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_encoder *encoder = bridge->encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -169,10 +166,9 @@ static void panel_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void panel_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_encoder *encoder = bridge->encoder;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -322,8 +318,10 @@ void drm_panel_bridge_remove(struct drm_bridge *bridge)
if (!bridge)
return;
- if (bridge->funcs != &panel_bridge_bridge_funcs)
+ if (!drm_bridge_is_panel(bridge)) {
+ drm_warn(bridge->dev, "%s: called on non-panel bridge!\n", __func__);
return;
+ }
panel_bridge = drm_bridge_to_panel_bridge(bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index ae3ab9262ef1..13ada42a5514 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -19,7 +19,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 14d4dcf239da..a42138b33258 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -20,7 +20,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#define PAGE0_AUXCH_CFG3 0x76
@@ -438,7 +437,7 @@ static const struct dev_pm_ops ps8640_pm_ops = {
};
static void ps8640_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
@@ -473,7 +472,7 @@ static void ps8640_atomic_pre_enable(struct drm_bridge *bridge,
}
static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index f8b4fb835765..54de6ed2fae8 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -1457,7 +1457,7 @@ static int samsung_dsim_init(struct samsung_dsim *dsi)
}
static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
int ret;
@@ -1485,7 +1485,7 @@ static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
}
static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
@@ -1496,7 +1496,7 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
}
static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
@@ -1508,7 +1508,7 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
}
static void samsung_dsim_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index bf2d1632b020..914a2609a685 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -325,7 +325,7 @@ static const struct drm_connector_helper_funcs sii902x_connector_helper_funcs =
};
static void sii902x_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -339,7 +339,7 @@ static void sii902x_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void sii902x_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -887,7 +887,7 @@ static int sii902x_audio_codec_init(struct sii902x *sii902x,
lanes[0] = 0;
} else if (num_lanes < 0) {
dev_err(dev,
- "%s: Error gettin \"sil,i2s-data-lanes\": %d\n",
+ "%s: Error getting \"sil,i2s-data-lanes\": %d\n",
__func__, num_lanes);
return num_lanes;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index b281cabfe992..6166f197e37b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -36,6 +36,88 @@
#define SCRAMB_POLL_DELAY_MS 3000
+/*
+ * Unless otherwise noted, entries in this table are 100% optimization.
+ * Values can be obtained from dw_hdmi_qp_compute_n() but that function is
+ * slow so we pre-compute values we expect to see.
+ *
+ * The values for TMDS 25175, 25200, 27000, 54000, 74250 and 148500 kHz are
+ * the recommended N values specified in the Audio chapter of the HDMI
+ * specification.
+ */
+static const struct dw_hdmi_audio_tmds_n {
+ unsigned long tmds;
+ unsigned int n_32k;
+ unsigned int n_44k1;
+ unsigned int n_48k;
+} common_tmds_n_table[] = {
+ { .tmds = 25175000, .n_32k = 4576, .n_44k1 = 7007, .n_48k = 6864, },
+ { .tmds = 25200000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 27000000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 28320000, .n_32k = 4096, .n_44k1 = 5586, .n_48k = 6144, },
+ { .tmds = 30240000, .n_32k = 4096, .n_44k1 = 5642, .n_48k = 6144, },
+ { .tmds = 31500000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, },
+ { .tmds = 32000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, },
+ { .tmds = 33750000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 36000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
+ { .tmds = 40000000, .n_32k = 4096, .n_44k1 = 5733, .n_48k = 6144, },
+ { .tmds = 49500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
+ { .tmds = 50000000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, },
+ { .tmds = 54000000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 65000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
+ { .tmds = 68250000, .n_32k = 4096, .n_44k1 = 5376, .n_48k = 6144, },
+ { .tmds = 71000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
+ { .tmds = 72000000, .n_32k = 4096, .n_44k1 = 5635, .n_48k = 6144, },
+ { .tmds = 73250000, .n_32k = 11648, .n_44k1 = 14112, .n_48k = 6144, },
+ { .tmds = 74250000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 75000000, .n_32k = 4096, .n_44k1 = 5880, .n_48k = 6144, },
+ { .tmds = 78750000, .n_32k = 4096, .n_44k1 = 5600, .n_48k = 6144, },
+ { .tmds = 78800000, .n_32k = 4096, .n_44k1 = 5292, .n_48k = 6144, },
+ { .tmds = 79500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, },
+ { .tmds = 83500000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
+ { .tmds = 85500000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
+ { .tmds = 88750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, },
+ { .tmds = 97750000, .n_32k = 4096, .n_44k1 = 14112, .n_48k = 6144, },
+ { .tmds = 101000000, .n_32k = 4096, .n_44k1 = 7056, .n_48k = 6144, },
+ { .tmds = 106500000, .n_32k = 4096, .n_44k1 = 4704, .n_48k = 6144, },
+ { .tmds = 108000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
+ { .tmds = 115500000, .n_32k = 4096, .n_44k1 = 5712, .n_48k = 6144, },
+ { .tmds = 119000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, },
+ { .tmds = 135000000, .n_32k = 4096, .n_44k1 = 5488, .n_48k = 6144, },
+ { .tmds = 146250000, .n_32k = 11648, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 148500000, .n_32k = 4096, .n_44k1 = 6272, .n_48k = 6144, },
+ { .tmds = 154000000, .n_32k = 4096, .n_44k1 = 5544, .n_48k = 6144, },
+ { .tmds = 162000000, .n_32k = 4096, .n_44k1 = 5684, .n_48k = 6144, },
+
+ /* For 297 MHz+ HDMI spec have some other rule for setting N */
+ { .tmds = 297000000, .n_32k = 3073, .n_44k1 = 4704, .n_48k = 5120, },
+ { .tmds = 594000000, .n_32k = 3073, .n_44k1 = 9408, .n_48k = 10240,},
+
+ /* End of table */
+ { .tmds = 0, .n_32k = 0, .n_44k1 = 0, .n_48k = 0, },
+};
+
+/*
+ * These are the CTS values as recommended in the Audio chapter of the HDMI
+ * specification.
+ */
+static const struct dw_hdmi_audio_tmds_cts {
+ unsigned long tmds;
+ unsigned int cts_32k;
+ unsigned int cts_44k1;
+ unsigned int cts_48k;
+} common_tmds_cts_table[] = {
+ { .tmds = 25175000, .cts_32k = 28125, .cts_44k1 = 31250, .cts_48k = 28125, },
+ { .tmds = 25200000, .cts_32k = 25200, .cts_44k1 = 28000, .cts_48k = 25200, },
+ { .tmds = 27000000, .cts_32k = 27000, .cts_44k1 = 30000, .cts_48k = 27000, },
+ { .tmds = 54000000, .cts_32k = 54000, .cts_44k1 = 60000, .cts_48k = 54000, },
+ { .tmds = 74250000, .cts_32k = 74250, .cts_44k1 = 82500, .cts_48k = 74250, },
+ { .tmds = 148500000, .cts_32k = 148500, .cts_44k1 = 165000, .cts_48k = 148500, },
+
+ /* End of table */
+ { .tmds = 0, .cts_32k = 0, .cts_44k1 = 0, .cts_48k = 0, },
+};
+
struct dw_hdmi_qp_i2c {
struct i2c_adapter adap;
@@ -60,6 +142,8 @@ struct dw_hdmi_qp {
} phy;
struct regmap *regm;
+
+ unsigned long tmds_char_rate;
};
static void dw_hdmi_qp_write(struct dw_hdmi_qp *hdmi, unsigned int val,
@@ -83,6 +167,346 @@ static void dw_hdmi_qp_mod(struct dw_hdmi_qp *hdmi, unsigned int data,
regmap_update_bits(hdmi->regm, reg, mask, data);
}
+static struct dw_hdmi_qp *dw_hdmi_qp_from_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct dw_hdmi_qp, bridge);
+}
+
+static void dw_hdmi_qp_set_cts_n(struct dw_hdmi_qp *hdmi, unsigned int cts,
+ unsigned int n)
+{
+ /* Set N */
+ dw_hdmi_qp_mod(hdmi, n, AUDPKT_ACR_N_VALUE, AUDPKT_ACR_CONTROL0);
+
+ /* Set CTS */
+ if (cts)
+ dw_hdmi_qp_mod(hdmi, AUDPKT_ACR_CTS_OVR_EN, AUDPKT_ACR_CTS_OVR_EN_MSK,
+ AUDPKT_ACR_CONTROL1);
+ else
+ dw_hdmi_qp_mod(hdmi, 0, AUDPKT_ACR_CTS_OVR_EN_MSK,
+ AUDPKT_ACR_CONTROL1);
+
+ dw_hdmi_qp_mod(hdmi, AUDPKT_ACR_CTS_OVR_VAL(cts), AUDPKT_ACR_CTS_OVR_VAL_MSK,
+ AUDPKT_ACR_CONTROL1);
+}
+
+static int dw_hdmi_qp_match_tmds_n_table(struct dw_hdmi_qp *hdmi,
+ unsigned long pixel_clk,
+ unsigned long freq)
+{
+ const struct dw_hdmi_audio_tmds_n *tmds_n = NULL;
+ int i;
+
+ for (i = 0; common_tmds_n_table[i].tmds != 0; i++) {
+ if (pixel_clk == common_tmds_n_table[i].tmds) {
+ tmds_n = &common_tmds_n_table[i];
+ break;
+ }
+ }
+
+ if (!tmds_n)
+ return -ENOENT;
+
+ switch (freq) {
+ case 32000:
+ return tmds_n->n_32k;
+ case 44100:
+ case 88200:
+ case 176400:
+ return (freq / 44100) * tmds_n->n_44k1;
+ case 48000:
+ case 96000:
+ case 192000:
+ return (freq / 48000) * tmds_n->n_48k;
+ default:
+ return -ENOENT;
+ }
+}
+
+static u32 dw_hdmi_qp_audio_math_diff(unsigned int freq, unsigned int n,
+ unsigned int pixel_clk)
+{
+ u64 cts = mul_u32_u32(pixel_clk, n);
+
+ return do_div(cts, 128 * freq);
+}
+
+static unsigned int dw_hdmi_qp_compute_n(struct dw_hdmi_qp *hdmi,
+ unsigned long pixel_clk,
+ unsigned long freq)
+{
+ unsigned int min_n = DIV_ROUND_UP((128 * freq), 1500);
+ unsigned int max_n = (128 * freq) / 300;
+ unsigned int ideal_n = (128 * freq) / 1000;
+ unsigned int best_n_distance = ideal_n;
+ unsigned int best_n = 0;
+ u64 best_diff = U64_MAX;
+ int n;
+
+ /* If the ideal N could satisfy the audio math, then just take it */
+ if (dw_hdmi_qp_audio_math_diff(freq, ideal_n, pixel_clk) == 0)
+ return ideal_n;
+
+ for (n = min_n; n <= max_n; n++) {
+ u64 diff = dw_hdmi_qp_audio_math_diff(freq, n, pixel_clk);
+
+ if (diff < best_diff ||
+ (diff == best_diff && abs(n - ideal_n) < best_n_distance)) {
+ best_n = n;
+ best_diff = diff;
+ best_n_distance = abs(best_n - ideal_n);
+ }
+
+ /*
+ * The best N already satisfy the audio math, and also be
+ * the closest value to ideal N, so just cut the loop.
+ */
+ if (best_diff == 0 && (abs(n - ideal_n) > best_n_distance))
+ break;
+ }
+
+ return best_n;
+}
+
+static unsigned int dw_hdmi_qp_find_n(struct dw_hdmi_qp *hdmi, unsigned long pixel_clk,
+ unsigned long sample_rate)
+{
+ int n = dw_hdmi_qp_match_tmds_n_table(hdmi, pixel_clk, sample_rate);
+
+ if (n > 0)
+ return n;
+
+ dev_warn(hdmi->dev, "Rate %lu missing; compute N dynamically\n",
+ pixel_clk);
+
+ return dw_hdmi_qp_compute_n(hdmi, pixel_clk, sample_rate);
+}
+
+static unsigned int dw_hdmi_qp_find_cts(struct dw_hdmi_qp *hdmi, unsigned long pixel_clk,
+ unsigned long sample_rate)
+{
+ const struct dw_hdmi_audio_tmds_cts *tmds_cts = NULL;
+ int i;
+
+ for (i = 0; common_tmds_cts_table[i].tmds != 0; i++) {
+ if (pixel_clk == common_tmds_cts_table[i].tmds) {
+ tmds_cts = &common_tmds_cts_table[i];
+ break;
+ }
+ }
+
+ if (!tmds_cts)
+ return 0;
+
+ switch (sample_rate) {
+ case 32000:
+ return tmds_cts->cts_32k;
+ case 44100:
+ case 88200:
+ case 176400:
+ return tmds_cts->cts_44k1;
+ case 48000:
+ case 96000:
+ case 192000:
+ return tmds_cts->cts_48k;
+ default:
+ return -ENOENT;
+ }
+}
+
+static void dw_hdmi_qp_set_audio_interface(struct dw_hdmi_qp *hdmi,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ u32 conf0 = 0;
+
+ /* Reset the audio data path of the AVP */
+ dw_hdmi_qp_write(hdmi, AVP_DATAPATH_PACKET_AUDIO_SWINIT_P, GLOBAL_SWRESET_REQUEST);
+
+ /* Disable AUDS, ACR, AUDI */
+ dw_hdmi_qp_mod(hdmi, 0,
+ PKTSCHED_ACR_TX_EN | PKTSCHED_AUDS_TX_EN | PKTSCHED_AUDI_TX_EN,
+ PKTSCHED_PKT_EN);
+
+ /* Clear the audio FIFO */
+ dw_hdmi_qp_write(hdmi, AUDIO_FIFO_CLR_P, AUDIO_INTERFACE_CONTROL0);
+
+ /* Select I2S interface as the audio source */
+ dw_hdmi_qp_mod(hdmi, AUD_IF_I2S, AUD_IF_SEL_MSK, AUDIO_INTERFACE_CONFIG0);
+
+ /* Enable the active i2s lanes */
+ switch (hparms->channels) {
+ case 7 ... 8:
+ conf0 |= I2S_LINES_EN(3);
+ fallthrough;
+ case 5 ... 6:
+ conf0 |= I2S_LINES_EN(2);
+ fallthrough;
+ case 3 ... 4:
+ conf0 |= I2S_LINES_EN(1);
+ fallthrough;
+ default:
+ conf0 |= I2S_LINES_EN(0);
+ break;
+ }
+
+ dw_hdmi_qp_mod(hdmi, conf0, I2S_LINES_EN_MSK, AUDIO_INTERFACE_CONFIG0);
+
+ /*
+ * Enable bpcuv generated internally for L-PCM, or received
+ * from stream for NLPCM/HBR.
+ */
+ switch (fmt->bit_fmt) {
+ case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
+ conf0 = (hparms->channels == 8) ? AUD_HBR : AUD_ASP;
+ conf0 |= I2S_BPCUV_RCV_EN;
+ break;
+ default:
+ conf0 = AUD_ASP | I2S_BPCUV_RCV_DIS;
+ break;
+ }
+
+ dw_hdmi_qp_mod(hdmi, conf0, I2S_BPCUV_RCV_MSK | AUD_FORMAT_MSK,
+ AUDIO_INTERFACE_CONFIG0);
+
+ /* Enable audio FIFO auto clear when overflow */
+ dw_hdmi_qp_mod(hdmi, AUD_FIFO_INIT_ON_OVF_EN, AUD_FIFO_INIT_ON_OVF_MSK,
+ AUDIO_INTERFACE_CONFIG0);
+}
+
+/*
+ * When transmitting IEC60958 linear PCM audio, these registers allow to
+ * configure the channel status information of all the channel status
+ * bits in the IEC60958 frame. For the moment this configuration is only
+ * used when the I2S audio interface, General Purpose Audio (GPA),
+ * or AHB audio DMA (AHBAUDDMA) interface is active
+ * (for S/PDIF interface this information comes from the stream).
+ */
+static void dw_hdmi_qp_set_channel_status(struct dw_hdmi_qp *hdmi,
+ u8 *channel_status, bool ref2stream)
+{
+ /*
+ * AUDPKT_CHSTATUS_OVR0: { RSV, RSV, CS1, CS0 }
+ * AUDPKT_CHSTATUS_OVR1: { CS6, CS5, CS4, CS3 }
+ *
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * CS0: | Mode | d | c | b | a |
+ * CS1: | Category Code |
+ * CS2: | Channel Number | Source Number |
+ * CS3: | Clock Accuracy | Sample Freq |
+ * CS4: | Ori Sample Freq | Word Length |
+ * CS5: | | CGMS-A |
+ * CS6~CS23: Reserved
+ *
+ * a: use of channel status block
+ * b: linear PCM identification: 0 for lpcm, 1 for nlpcm
+ * c: copyright information
+ * d: additional format information
+ */
+
+ if (ref2stream)
+ channel_status[0] |= IEC958_AES0_NONAUDIO;
+
+ if ((dw_hdmi_qp_read(hdmi, AUDIO_INTERFACE_CONFIG0) & GENMASK(25, 24)) == AUD_HBR) {
+ /* fixup cs for HBR */
+ channel_status[3] = (channel_status[3] & 0xf0) | IEC958_AES3_CON_FS_768000;
+ channel_status[4] = (channel_status[4] & 0x0f) | IEC958_AES4_CON_ORIGFS_NOTID;
+ }
+
+ dw_hdmi_qp_write(hdmi, channel_status[0] | (channel_status[1] << 8),
+ AUDPKT_CHSTATUS_OVR0);
+
+ regmap_bulk_write(hdmi->regm, AUDPKT_CHSTATUS_OVR1, &channel_status[3], 1);
+
+ if (ref2stream)
+ dw_hdmi_qp_mod(hdmi, 0,
+ AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK,
+ AUDPKT_CONTROL0);
+ else
+ dw_hdmi_qp_mod(hdmi, AUDPKT_PBIT_FORCE_EN | AUDPKT_CHSTATUS_OVR_EN,
+ AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK,
+ AUDPKT_CONTROL0);
+}
+
+static void dw_hdmi_qp_set_sample_rate(struct dw_hdmi_qp *hdmi, unsigned long long tmds_char_rate,
+ unsigned int sample_rate)
+{
+ unsigned int n, cts;
+
+ n = dw_hdmi_qp_find_n(hdmi, tmds_char_rate, sample_rate);
+ cts = dw_hdmi_qp_find_cts(hdmi, tmds_char_rate, sample_rate);
+
+ dw_hdmi_qp_set_cts_n(hdmi, cts, n);
+}
+
+static int dw_hdmi_qp_audio_enable(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+
+ if (hdmi->tmds_char_rate)
+ dw_hdmi_qp_mod(hdmi, 0, AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE);
+
+ return 0;
+}
+
+static int dw_hdmi_qp_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ bool ref2stream = false;
+
+ if (!hdmi->tmds_char_rate)
+ return -ENODEV;
+
+ if (fmt->bit_clk_provider | fmt->frame_clk_provider) {
+ dev_err(hdmi->dev, "unsupported clock settings\n");
+ return -EINVAL;
+ }
+
+ if (fmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE)
+ ref2stream = true;
+
+ dw_hdmi_qp_set_audio_interface(hdmi, fmt, hparms);
+ dw_hdmi_qp_set_sample_rate(hdmi, hdmi->tmds_char_rate, hparms->sample_rate);
+ dw_hdmi_qp_set_channel_status(hdmi, hparms->iec.status, ref2stream);
+ drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector, &hparms->cea);
+
+ return 0;
+}
+
+static void dw_hdmi_qp_audio_disable_regs(struct dw_hdmi_qp *hdmi)
+{
+ /*
+ * Keep ACR, AUDI, AUDS packet always on to make SINK device
+ * active for better compatibility and user experience.
+ *
+ * This also fix POP sound on some SINK devices which wakeup
+ * from suspend to active.
+ */
+ dw_hdmi_qp_mod(hdmi, I2S_BPCUV_RCV_DIS, I2S_BPCUV_RCV_MSK,
+ AUDIO_INTERFACE_CONFIG0);
+ dw_hdmi_qp_mod(hdmi, AUDPKT_PBIT_FORCE_EN | AUDPKT_CHSTATUS_OVR_EN,
+ AUDPKT_PBIT_FORCE_EN_MASK | AUDPKT_CHSTATUS_OVR_EN_MASK,
+ AUDPKT_CONTROL0);
+
+ dw_hdmi_qp_mod(hdmi, AVP_DATAPATH_PACKET_AUDIO_SWDISABLE,
+ AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE);
+}
+
+static void dw_hdmi_qp_audio_disable(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
+
+ if (hdmi->tmds_char_rate)
+ dw_hdmi_qp_audio_disable_regs(hdmi);
+}
+
static int dw_hdmi_qp_i2c_read(struct dw_hdmi_qp *hdmi,
unsigned char *buf, unsigned int length)
{
@@ -361,11 +785,55 @@ static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi,
return 0;
}
+/*
+ * Static values documented in the TRM
+ * Different values are only used for debug purposes
+ */
+#define DW_HDMI_QP_AUDIO_INFOFRAME_HB1 0x1
+#define DW_HDMI_QP_AUDIO_INFOFRAME_HB2 0xa
+
+static int dw_hdmi_qp_config_audio_infoframe(struct dw_hdmi_qp *hdmi,
+ const u8 *buffer, size_t len)
+{
+ /*
+ * AUDI_CONTENTS0: { RSV, HB2, HB1, RSV }
+ * AUDI_CONTENTS1: { PB3, PB2, PB1, PB0 }
+ * AUDI_CONTENTS2: { PB7, PB6, PB5, PB4 }
+ *
+ * PB0: CheckSum
+ * PB1: | CT3 | CT2 | CT1 | CT0 | F13 | CC2 | CC1 | CC0 |
+ * PB2: | F27 | F26 | F25 | SF2 | SF1 | SF0 | SS1 | SS0 |
+ * PB3: | F37 | F36 | F35 | F34 | F33 | F32 | F31 | F30 |
+ * PB4: | CA7 | CA6 | CA5 | CA4 | CA3 | CA2 | CA1 | CA0 |
+ * PB5: | DM_INH | LSV3 | LSV2 | LSV1 | LSV0 | F52 | F51 | F50 |
+ * PB6~PB10: Reserved
+ *
+ * AUDI_CONTENTS0 default value defined by HDMI specification,
+ * and shall only be changed for debug purposes.
+ */
+ u32 header_bytes = (DW_HDMI_QP_AUDIO_INFOFRAME_HB1 << 8) |
+ (DW_HDMI_QP_AUDIO_INFOFRAME_HB2 << 16);
+
+ regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS0, &header_bytes, 1);
+ regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS1, &buffer[3], 1);
+ regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS2, &buffer[4], 1);
+
+ /* Enable ACR, AUDI, AMD */
+ dw_hdmi_qp_mod(hdmi,
+ PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN,
+ PKTSCHED_ACR_TX_EN | PKTSCHED_AUDI_TX_EN | PKTSCHED_AMD_TX_EN,
+ PKTSCHED_PKT_EN);
+
+ /* Enable AUDS */
+ dw_hdmi_qp_mod(hdmi, PKTSCHED_AUDS_TX_EN, PKTSCHED_AUDS_TX_EN, PKTSCHED_PKT_EN);
+
+ return 0;
+}
+
static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
- struct drm_atomic_state *state = old_state->base.state;
struct drm_connector_state *conn_state;
struct drm_connector *connector;
unsigned int op_mode;
@@ -382,6 +850,7 @@ static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n",
__func__, conn_state->hdmi.tmds_char_rate);
op_mode = 0;
+ hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate;
} else {
dev_dbg(hdmi->dev, "%s mode=DVI\n", __func__);
op_mode = OPMODE_DVI;
@@ -396,10 +865,12 @@ static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
+ hdmi->tmds_char_rate = 0;
+
hdmi->phy.ops->disable(hdmi, hdmi->phy.data);
}
@@ -455,6 +926,13 @@ static int dw_hdmi_qp_bridge_clear_infoframe(struct drm_bridge *bridge,
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN);
break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ dw_hdmi_qp_mod(hdmi, 0,
+ PKTSCHED_ACR_TX_EN |
+ PKTSCHED_AUDS_TX_EN |
+ PKTSCHED_AUDI_TX_EN,
+ PKTSCHED_PKT_EN);
+ break;
default:
dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type);
}
@@ -477,6 +955,9 @@ static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge,
case HDMI_INFOFRAME_TYPE_DRM:
return dw_hdmi_qp_config_drm_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ return dw_hdmi_qp_config_audio_infoframe(hdmi, buffer, len);
+
default:
dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type);
return 0;
@@ -494,6 +975,9 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
.hdmi_tmds_char_rate_valid = dw_hdmi_qp_bridge_tmds_char_rate_valid,
.hdmi_clear_infoframe = dw_hdmi_qp_bridge_clear_infoframe,
.hdmi_write_infoframe = dw_hdmi_qp_bridge_write_infoframe,
+ .hdmi_audio_startup = dw_hdmi_qp_audio_enable,
+ .hdmi_audio_shutdown = dw_hdmi_qp_audio_disable,
+ .hdmi_audio_prepare = dw_hdmi_qp_audio_prepare,
};
static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id)
@@ -603,6 +1087,10 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
if (IS_ERR(hdmi->bridge.ddc))
return ERR_CAST(hdmi->bridge.ddc);
+ hdmi->bridge.hdmi_audio_max_i2s_playback_channels = 8;
+ hdmi->bridge.hdmi_audio_dev = dev;
+ hdmi->bridge.hdmi_audio_dai_port = 1;
+
ret = devm_drm_bridge_add(dev, &hdmi->bridge);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 996733ed2c00..0890add5f707 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2621,6 +2621,7 @@ static int dw_hdmi_connector_create(struct dw_hdmi *hdmi)
* - MEDIA_BUS_FMT_UYYVYY12_0_5X36,
* - MEDIA_BUS_FMT_UYYVYY10_0_5X30,
* - MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+ * - MEDIA_BUS_FMT_RGB888_1X24,
* - MEDIA_BUS_FMT_YUV16_1X48,
* - MEDIA_BUS_FMT_RGB161616_1X48,
* - MEDIA_BUS_FMT_UYVY12_1X24,
@@ -2631,7 +2632,6 @@ static int dw_hdmi_connector_create(struct dw_hdmi *hdmi)
* - MEDIA_BUS_FMT_RGB101010_1X30,
* - MEDIA_BUS_FMT_UYVY8_1X16,
* - MEDIA_BUS_FMT_YUV8_1X24,
- * - MEDIA_BUS_FMT_RGB888_1X24,
*/
/* Can return a maximum of 11 possible output formats for a mode/connector */
@@ -2669,7 +2669,7 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
}
/*
- * If the current mode enforces 4:2:0, force the output but format
+ * If the current mode enforces 4:2:0, force the output bus format
* to 4:2:0 and do not add the YUV422/444/RGB formats
*/
if (conn->ycbcr_420_allowed &&
@@ -2945,7 +2945,7 @@ static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
}
static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct dw_hdmi *hdmi = bridge->driver_private;
@@ -2959,10 +2959,9 @@ static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct dw_hdmi *hdmi = bridge->driver_private;
- struct drm_atomic_state *state = old_state->base.state;
struct drm_connector *connector;
connector = drm_atomic_get_new_connector_for_encoder(state,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 0fb02e4e7f4e..2b6e70a49f43 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -934,7 +934,7 @@ static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi)
}
static void dw_mipi_dsi_bridge_post_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
@@ -1022,7 +1022,7 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
}
static void dw_mipi_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
@@ -1043,7 +1043,7 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
}
static void dw_mipi_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
index d7569bf2d9c3..5fd7a459efdd 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -745,7 +745,7 @@ static int dw_mipi_dsi2_bridge_atomic_check(struct drm_bridge *bridge,
}
static void dw_mipi_dsi2_bridge_post_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
@@ -821,7 +821,7 @@ static void dw_mipi_dsi2_mode_set(struct dw_mipi_dsi2 *dsi2,
}
static void dw_mipi_dsi2_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
@@ -840,7 +840,7 @@ static void dw_mipi_dsi2_bridge_mode_set(struct drm_bridge *bridge,
}
static void dw_mipi_dsi2_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 46198af9eebb..49c76027f831 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -20,10 +20,10 @@
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -149,7 +149,8 @@ static int tc358762_init(struct tc358762 *ctx)
return tc358762_clear_error(ctx);
}
-static void tc358762_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *state)
+static void tc358762_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
int ret;
@@ -171,7 +172,8 @@ static void tc358762_post_disable(struct drm_bridge *bridge, struct drm_bridge_s
dev_err(ctx->dev, "error disabling regulators (%d)\n", ret);
}
-static void tc358762_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *state)
+static void tc358762_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
int ret;
@@ -188,7 +190,8 @@ static void tc358762_pre_enable(struct drm_bridge *bridge, struct drm_bridge_sta
ctx->pre_enabled = true;
}
-static void tc358762_enable(struct drm_bridge *bridge, struct drm_bridge_state *state)
+static void tc358762_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
int ret;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 4637bf6ea7a3..39e2d3a7a27d 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1548,9 +1548,8 @@ static int tc_edp_stream_disable(struct tc_data *tc)
return 0;
}
-static void
-tc_dpi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void tc_dpi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
@@ -1564,9 +1563,8 @@ tc_dpi_bridge_atomic_enable(struct drm_bridge *bridge,
}
}
-static void
-tc_dpi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void tc_dpi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
@@ -1576,9 +1574,8 @@ tc_dpi_bridge_atomic_disable(struct drm_bridge *bridge,
dev_err(tc->dev, "main link stream stop error: %d\n", ret);
}
-static void
-tc_edp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void tc_edp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
@@ -1603,9 +1600,8 @@ tc_edp_bridge_atomic_enable(struct drm_bridge *bridge,
}
}
-static void
-tc_edp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void tc_edp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 0b4efaca6d68..c89757bec4e6 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -26,7 +26,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#define FLD_VAL(val, start, end) FIELD_PREP(GENMASK(start, end), val)
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/bridge/tda998x_drv.c
index 82d4a4e206a5..ebc758c72891 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/bridge/tda998x_drv.c
@@ -21,10 +21,11 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/i2c/tda998x.h>
#include <media/cec-notifier.h>
+#include <dt-bindings/display/tda998x.h>
+
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
enum {
@@ -1717,10 +1718,10 @@ static int tda998x_get_audio_ports(struct tda998x_priv *priv,
u8 ena_ap = be32_to_cpup(&port_data[2*i+1]);
switch (afmt) {
- case AFMT_I2S:
+ case TDA998x_I2S:
route = AUDIO_ROUTE_I2S;
break;
- case AFMT_SPDIF:
+ case TDA998x_SPDIF:
route = AUDIO_ROUTE_SPDIF;
break;
default:
@@ -1746,44 +1747,6 @@ static int tda998x_get_audio_ports(struct tda998x_priv *priv,
return 0;
}
-static int tda998x_set_config(struct tda998x_priv *priv,
- const struct tda998x_encoder_params *p)
-{
- priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
- (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
- VIP_CNTRL_0_SWAP_B(p->swap_b) |
- (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
- priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
- (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
- VIP_CNTRL_1_SWAP_D(p->swap_d) |
- (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
- priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
- (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
- VIP_CNTRL_2_SWAP_F(p->swap_f) |
- (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
-
- if (p->audio_params.format != AFMT_UNUSED) {
- unsigned int ratio, route;
- bool spdif = p->audio_params.format == AFMT_SPDIF;
-
- route = AUDIO_ROUTE_I2S + spdif;
-
- priv->audio.route = &tda998x_audio_route[route];
- priv->audio.cea = p->audio_params.cea;
- priv->audio.sample_rate = p->audio_params.sample_rate;
- memcpy(priv->audio.status, p->audio_params.status,
- min(sizeof(priv->audio.status),
- sizeof(p->audio_params.status)));
- priv->audio.ena_ap = p->audio_params.config;
- priv->audio.i2s_format = I2S_FORMAT_PHILIPS;
-
- ratio = spdif ? 64 : p->audio_params.sample_width * 2;
- return tda998x_derive_cts_n(priv, &priv->audio, ratio);
- }
-
- return 0;
-}
-
static void tda998x_destroy(struct device *dev)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
@@ -1982,10 +1945,6 @@ static int tda998x_create(struct device *dev)
if (priv->audio_port_enable[AUDIO_ROUTE_I2S] ||
priv->audio_port_enable[AUDIO_ROUTE_SPDIF])
tda998x_audio_codec_init(priv, &client->dev);
- } else if (dev->platform_data) {
- ret = tda998x_set_config(priv, dev->platform_data);
- if (ret)
- goto fail;
}
priv->bridge.funcs = &tda998x_bridge_funcs;
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index eaec70fa42b6..85f2a0e74a1c 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -105,7 +105,7 @@ static const struct regmap_config dlpc_regmap_config = {
};
static void dlpc_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
struct device *dev = dlpc->dev;
@@ -170,7 +170,7 @@ static void dlpc_atomic_enable(struct drm_bridge *bridge,
}
static void dlpc_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
int ret;
@@ -193,7 +193,7 @@ static void dlpc_atomic_pre_enable(struct drm_bridge *bridge,
}
static void dlpc_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 336380114eea..95563aa1b450 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -35,12 +35,14 @@
#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h> /* DRM_MODESET_LOCK_ALL_BEGIN() needs drm_drv_uses_atomic_modeset() */
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -159,6 +161,9 @@ struct sn65dsi83 {
bool lvds_dual_link_even_odd_swap;
int lvds_vod_swing_conf[2];
int lvds_term_conf[2];
+ int irq;
+ struct delayed_work monitor_work;
+ struct work_struct reset_work;
};
static const struct regmap_range sn65dsi83_readable_ranges[] = {
@@ -363,11 +368,110 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
return dsi_div - 1;
}
+static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
+{
+ struct drm_device *dev = sn65dsi83->bridge.dev;
+ struct drm_modeset_acquire_ctx ctx;
+ int err;
+
+ /*
+ * Reset active outputs of the related CRTC.
+ *
+ * This way, drm core will reconfigure each components in the CRTC
+ * outputs path. In our case, this will force the previous component to
+ * go back in LP11 mode and so allow the reconfiguration of SN65DSI83
+ * bridge.
+ *
+ * Keep the lock during the whole operation to be atomic.
+ */
+
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
+
+ if (!sn65dsi83->bridge.encoder->crtc) {
+ /*
+ * No CRTC attached -> No CRTC active outputs to reset
+ * This can happen when the SN65DSI83 is reset. Simply do
+ * nothing without returning any errors.
+ */
+ err = 0;
+ goto end;
+ }
+
+ dev_warn(sn65dsi83->dev, "reset the pipe\n");
+
+ err = drm_atomic_helper_reset_crtc(sn65dsi83->bridge.encoder->crtc, &ctx);
+
+end:
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
+
+ return err;
+}
+
+static void sn65dsi83_reset_work(struct work_struct *ws)
+{
+ struct sn65dsi83 *ctx = container_of(ws, struct sn65dsi83, reset_work);
+ int ret;
+
+ /* Reset the pipe */
+ ret = sn65dsi83_reset_pipe(ctx);
+ if (ret) {
+ dev_err(ctx->dev, "reset pipe failed %pe\n", ERR_PTR(ret));
+ return;
+ }
+ if (ctx->irq)
+ enable_irq(ctx->irq);
+}
+
+static void sn65dsi83_handle_errors(struct sn65dsi83 *ctx)
+{
+ unsigned int irq_stat;
+ int ret;
+
+ /*
+ * Schedule a reset in case of:
+ * - the bridge doesn't answer
+ * - the bridge signals an error
+ */
+
+ ret = regmap_read(ctx->regmap, REG_IRQ_STAT, &irq_stat);
+ if (ret || irq_stat) {
+ /*
+ * IRQ acknowledged is not always possible (the bridge can be in
+ * a state where it doesn't answer anymore). To prevent an
+ * interrupt storm, disable interrupt. The interrupt will be
+ * after the reset.
+ */
+ if (ctx->irq)
+ disable_irq_nosync(ctx->irq);
+
+ schedule_work(&ctx->reset_work);
+ }
+}
+
+static void sn65dsi83_monitor_work(struct work_struct *work)
+{
+ struct sn65dsi83 *ctx = container_of(to_delayed_work(work),
+ struct sn65dsi83, monitor_work);
+
+ sn65dsi83_handle_errors(ctx);
+
+ schedule_delayed_work(&ctx->monitor_work, msecs_to_jiffies(1000));
+}
+
+static void sn65dsi83_monitor_start(struct sn65dsi83 *ctx)
+{
+ schedule_delayed_work(&ctx->monitor_work, msecs_to_jiffies(1000));
+}
+
+static void sn65dsi83_monitor_stop(struct sn65dsi83 *ctx)
+{
+ cancel_delayed_work_sync(&ctx->monitor_work);
+}
+
static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_bridge_state *bridge_state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *mode;
@@ -457,6 +561,8 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
REG_LVDS_FMT_HS_NEG_POLARITY : 0) |
(mode->flags & DRM_MODE_FLAG_NVSYNC ?
REG_LVDS_FMT_VS_NEG_POLARITY : 0);
+ val |= bridge_state->output_bus_cfg.flags & DRM_BUS_FLAG_DE_LOW ?
+ REG_LVDS_FMT_DE_NEG_POLARITY : 0;
/* Set up bits-per-pixel, 18bpp or 24bpp. */
if (lvds_format_24bpp) {
@@ -535,7 +641,7 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
}
static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
unsigned int pval;
@@ -549,14 +655,32 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
if (pval)
dev_err(ctx->dev, "Unexpected link status 0x%02x\n", pval);
+
+ if (ctx->irq) {
+ /* Enable irq to detect errors */
+ regmap_write(ctx->regmap, REG_IRQ_GLOBAL, REG_IRQ_GLOBAL_IRQ_EN);
+ regmap_write(ctx->regmap, REG_IRQ_EN, 0xff);
+ } else {
+ /* Use the polling task */
+ sn65dsi83_monitor_start(ctx);
+ }
}
static void sn65dsi83_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
int ret;
+ if (ctx->irq) {
+ /* Disable irq */
+ regmap_write(ctx->regmap, REG_IRQ_EN, 0x0);
+ regmap_write(ctx->regmap, REG_IRQ_GLOBAL, 0x0);
+ } else {
+ /* Stop the polling task */
+ sn65dsi83_monitor_stop(ctx);
+ }
+
/* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */
gpiod_set_value_cansleep(ctx->enable_gpio, 0);
usleep_range(10000, 11000);
@@ -806,6 +930,14 @@ static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
return 0;
}
+static irqreturn_t sn65dsi83_irq(int irq, void *data)
+{
+ struct sn65dsi83 *ctx = data;
+
+ sn65dsi83_handle_errors(ctx);
+ return IRQ_HANDLED;
+}
+
static int sn65dsi83_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -819,6 +951,8 @@ static int sn65dsi83_probe(struct i2c_client *client)
return -ENOMEM;
ctx->dev = dev;
+ INIT_WORK(&ctx->reset_work, sn65dsi83_reset_work);
+ INIT_DELAYED_WORK(&ctx->monitor_work, sn65dsi83_monitor_work);
if (dev->of_node) {
model = (enum sn65dsi83_model)(uintptr_t)
@@ -843,12 +977,21 @@ static int sn65dsi83_probe(struct i2c_client *client)
if (IS_ERR(ctx->regmap))
return dev_err_probe(dev, PTR_ERR(ctx->regmap), "failed to get regmap\n");
+ if (client->irq) {
+ ctx->irq = client->irq;
+ ret = devm_request_threaded_irq(ctx->dev, ctx->irq, NULL, sn65dsi83_irq,
+ IRQF_ONESHOT, dev_name(ctx->dev), ctx);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq\n");
+ }
+
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
ctx->bridge.funcs = &sn65dsi83_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
+ ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
drm_bridge_add(&ctx->bridge);
ret = sn65dsi83_host_attach(ctx);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index e4d9006b59f1..01d456b955ab 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -32,7 +32,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -196,7 +195,7 @@ struct ti_sn65dsi86 {
struct gpio_chip gchip;
DECLARE_BITMAP(gchip_output, SN_NUM_GPIOS);
#endif
-#if defined(CONFIG_PWM)
+#if IS_REACHABLE(CONFIG_PWM)
struct pwm_chip *pchip;
bool pwm_enabled;
atomic_t pwm_pin_busy;
@@ -480,6 +479,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
const char *name)
{
struct device *dev = pdata->dev;
+ const struct i2c_client *client = to_i2c_client(dev);
struct auxiliary_device *aux;
int ret;
@@ -488,6 +488,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
return -ENOMEM;
aux->name = name;
+ aux->id = (client->adapter->nr << 10) | client->addr;
aux->dev.parent = dev;
aux->dev.release = ti_sn65dsi86_aux_device_release;
device_set_of_node_from_dev(&aux->dev, dev);
@@ -812,7 +813,7 @@ ti_sn_bridge_mode_valid(struct drm_bridge *bridge,
}
static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -1072,7 +1073,7 @@ exit:
}
static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
struct drm_connector *connector;
@@ -1084,7 +1085,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
int max_dp_lanes;
unsigned int bpp;
- connector = drm_atomic_get_new_connector_for_encoder(old_bridge_state->base.state,
+ connector = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
if (!connector) {
dev_err_ratelimited(pdata->dev, "Could not get the connector\n");
@@ -1163,7 +1164,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -1177,7 +1178,7 @@ static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge,
}
static void ti_sn_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -1361,7 +1362,7 @@ static struct auxiliary_driver ti_sn_bridge_driver = {
/* -----------------------------------------------------------------------------
* PWM Controller
*/
-#if defined(CONFIG_PWM)
+#if IS_REACHABLE(CONFIG_PWM)
static int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata)
{
return atomic_xchg(&pdata->pwm_pin_busy, 1) ? -EBUSY : 0;
@@ -1955,7 +1956,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
return ret;
}
- if (IS_ENABLED(CONFIG_PWM)) {
+ if (IS_REACHABLE(CONFIG_PWM)) {
ret = ti_sn65dsi86_add_aux_device(pdata, &pdata->pwm_aux, "pwm");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c
index 3472ed5924e8..22316382451f 100644
--- a/drivers/gpu/drm/bridge/ti-tdp158.c
+++ b/drivers/gpu/drm/bridge/ti-tdp158.c
@@ -18,7 +18,8 @@ struct tdp158 {
struct device *dev;
};
-static void tdp158_enable(struct drm_bridge *bridge, struct drm_bridge_state *prev)
+static void tdp158_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
int err;
struct tdp158 *tdp158 = bridge->driver_private;
@@ -34,7 +35,8 @@ static void tdp158_enable(struct drm_bridge *bridge, struct drm_bridge_state *pr
gpiod_set_value_cansleep(tdp158->enable, 1);
}
-static void tdp158_disable(struct drm_bridge *bridge, struct drm_bridge_state *prev)
+static void tdp158_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tdp158 *tdp158 = bridge->driver_private;
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index 139b81db6312..19fe01257ab9 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -132,7 +132,7 @@ fi
# Pass needed files to the test stage
mkdir -p install
cp -rfv .gitlab-ci/* install/.
-cp -rfv ci/* install/.
+cp -rfv bin/ci/* install/.
cp -rfv install/common install/ci-common
cp -rfv drivers/gpu/drm/ci/* install/.
diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml
index 9c198239033d..274f118533a7 100644
--- a/drivers/gpu/drm/ci/build.yml
+++ b/drivers/gpu/drm/ci/build.yml
@@ -1,8 +1,7 @@
.build:
extends:
- - .build-rules
- .container+build-rules
- stage: build
+ stage: build-only
artifacts:
paths:
- artifacts
@@ -110,3 +109,104 @@ build-nodebugfs:arm64:
build:x86_64:
extends: .build:x86_64
+
+# Disable build jobs that we won't use
+alpine-build-testing:
+ rules:
+ - when: never
+
+debian-android:
+ rules:
+ - when: never
+
+debian-arm32:
+ rules:
+ - when: never
+
+debian-arm32-asan:
+ rules:
+ - when: never
+
+debian-arm64:
+ rules:
+ - when: never
+
+debian-arm64-asan:
+ rules:
+ - when: never
+
+debian-arm64-build-test:
+ rules:
+ - when: never
+
+debian-arm64-release:
+ rules:
+ - when: never
+
+debian-build-testing:
+ rules:
+ - when: never
+
+debian-clang:
+ rules:
+ - when: never
+
+debian-clang-release:
+ rules:
+ - when: never
+
+debian-no-libdrm:
+ rules:
+ - when: never
+
+debian-ppc64el:
+ rules:
+ - when: never
+
+debian-release:
+ rules:
+ - when: never
+
+debian-s390x:
+ rules:
+ - when: never
+
+debian-testing:
+ rules:
+ - when: never
+
+debian-testing-asan:
+ rules:
+ - when: never
+
+debian-testing-msan:
+ rules:
+ - when: never
+
+debian-vulkan:
+ rules:
+ - when: never
+
+debian-x86_32:
+ rules:
+ - when: never
+
+fedora-release:
+ rules:
+ - when: never
+
+rustfmt:
+ rules:
+ - when: never
+
+shader-db:
+ rules:
+ - when: never
+
+windows-msvc:
+ rules:
+ - when: never
+
+yaml-toml-shell-py-test:
+ rules:
+ - when: never
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
index 2a94f54ce4cf..07dc13ff865d 100644
--- a/drivers/gpu/drm/ci/container.yml
+++ b/drivers/gpu/drm/ci/container.yml
@@ -24,7 +24,7 @@ alpine/x86_64_build:
rules:
- when: never
-debian/x86_64_test-vk:
+debian/arm64_test-gl:
rules:
- when: never
@@ -32,7 +32,15 @@ debian/arm64_test-vk:
rules:
- when: never
-debian/arm64_test-gl:
+debian/ppc64el_build:
+ rules:
+ - when: never
+
+debian/s390x_build:
+ rules:
+ - when: never
+
+debian/x86_64_test-vk:
rules:
- when: never
@@ -56,14 +64,6 @@ windows_test_msvc:
rules:
- when: never
-.debian/x86_64_build-mingw:
- rules:
- - when: never
-
-rustfmt:
- rules:
- - when: never
-
windows_msvc:
rules:
- - when: never \ No newline at end of file
+ - when: never
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index 90bde9f00cc3..f04aabe8327c 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,11 +1,11 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha c6a9a9c3bce90923f7700219354e0b6e5a3c9ba6
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 7d3062470f3ccc6cb40540e772e902c7e2248024
UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: a73311079a5d8ac99eb25336a8369a2c3c6b519b
+ IGT_VERSION: 33adea9ebafd059ac88a5ccfec60536394f36c7c
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.20.0
@@ -20,6 +20,9 @@ variables:
rm download-git-cache.sh
set +o xtrace
S3_JWT_FILE: /s3_jwt
+ S3_JWT_FILE_SCRIPT: |-
+ echo -n '${S3_JWT}' > '${S3_JWT_FILE}' &&
+ unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables
S3_HOST: s3.freedesktop.org
# This bucket is used to fetch the kernel image
S3_KERNEL_BUCKET: mesa-rootfs
@@ -31,17 +34,14 @@ variables:
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/${S3_ARTIFACTS_BUCKET}/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
# per-job artifact storage on MinIO
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
- # default kernel for rootfs before injecting the current kernel tree
- KERNEL_REPO: "gfx-ci/linux"
- KERNEL_TAG: "v6.6.21-mesa-f8ea"
KERNEL_IMAGE_BASE: https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${KERNEL_TAG}
- PKG_REPO_REV: "3cc12a2a"
LAVA_TAGS: subset-1-gfx
LAVA_JOB_PRIORITY: 30
ARTIFACTS_BASE_URL: https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts
# Python scripts for structured logger
PYTHONPATH: "$PYTHONPATH:$CI_PROJECT_DIR/install"
+
default:
id_tokens:
S3_JWT:
@@ -50,16 +50,13 @@ default:
- export SCRIPTS_DIR=$(mktemp -d)
- curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 -O --output-dir "${SCRIPTS_DIR}" "${DRM_CI_PROJECT_URL}/-/raw/${DRM_CI_COMMIT_SHA}/.gitlab-ci/setup-test-env.sh"
- source ${SCRIPTS_DIR}/setup-test-env.sh
- - echo -e "\e[0Ksection_start:$(date +%s):unset_env_vars_section[collapsed=true]\r\e[0KUnsetting vulnerable environment variables"
- - echo -n "${S3_JWT}" > "${S3_JWT_FILE}"
- - unset CI_JOB_JWT S3_JWT
- - echo -e "\e[0Ksection_end:$(date +%s):unset_env_vars_section\r\e[0K"
+ - eval "$S3_JWT_FILE_SCRIPT"
- echo -e "\e[0Ksection_start:$(date +%s):drm_ci_download_section[collapsed=true]\r\e[0KDownloading mesa from $DRM_CI_PROJECT_URL/-/archive/$DRM_CI_COMMIT_SHA/mesa-$DRM_CI_COMMIT_SHA.tar.gz"
- cd $CI_PROJECT_DIR
- curl --output - $DRM_CI_PROJECT_URL/-/archive/$DRM_CI_COMMIT_SHA/mesa-$DRM_CI_COMMIT_SHA.tar.gz | tar -xz
- mv mesa-$DRM_CI_COMMIT_SHA/.gitlab-ci* .
- - mv mesa-$DRM_CI_COMMIT_SHA/bin/ci .
+ - mv mesa-$DRM_CI_COMMIT_SHA/bin .
- rm -rf mesa-$DRM_CI_COMMIT_SHA/
- echo -e "\e[0Ksection_end:$(date +%s):drm_ci_download_section\r\e[0K"
@@ -71,6 +68,7 @@ default:
export S3_JWT="$(<${S3_JWT_FILE})" &&
rm "${S3_JWT_FILE}"
+
include:
- project: 'freedesktop/ci-templates'
ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811
@@ -85,6 +83,7 @@ include:
- project: *drm-ci-project-path
ref: *drm-ci-commit-sha
file:
+ - '/.gitlab-ci/build/gitlab-ci.yml'
- '/.gitlab-ci/container/gitlab-ci.yml'
- '/.gitlab-ci/farm-rules.yml'
- '/.gitlab-ci/lava/lava-gitlab-ci.yml'
@@ -115,9 +114,10 @@ include:
stages:
- sanity
- container
- - code-validation
- git-archive
- - build
+ - build-for-tests
+ - build-only
+ - code-validation
- amdgpu
- i915
- mediatek
@@ -128,33 +128,27 @@ stages:
- rockchip
- software-driver
+
# YAML anchors for rule conditions
# --------------------------------
.rules-anchors:
rules:
- # Pipeline for forked project branch
- - if: &is-forked-branch '$CI_COMMIT_BRANCH && $CI_PROJECT_NAMESPACE != "mesa"'
- when: manual
- # Forked project branch / pre-merge pipeline not for Marge bot
- - if: &is-forked-branch-or-pre-merge-not-for-marge '$CI_PROJECT_NAMESPACE != "mesa" || ($GITLAB_USER_LOGIN != "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event")'
- when: manual
- # Pipeline runs for the main branch of the upstream Mesa project
- - if: &is-mesa-main '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $CI_COMMIT_BRANCH'
- when: always
- # Post-merge pipeline
- - if: &is-post-merge '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_BRANCH'
- when: on_success
- # Post-merge pipeline, not for Marge Bot
- - if: &is-post-merge-not-for-marge '$CI_PROJECT_NAMESPACE == "mesa" && $GITLAB_USER_LOGIN != "marge-bot" && $CI_COMMIT_BRANCH'
- when: on_success
+ # do not duplicate pipelines on merge pipelines
+ - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS && $CI_PIPELINE_SOURCE == "push"
+ when: never
+ # merge pipeline
+ - if: &is-merge-attempt $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"
+ # post-merge pipeline
+ - if: &is-post-merge $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "push"
# Pre-merge pipeline
- - if: &is-pre-merge '$CI_PIPELINE_SOURCE == "merge_request_event"'
- when: on_success
- # Pre-merge pipeline for Marge Bot
- - if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
- when: on_success
+ - if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event"
# Push to a branch on a fork
- - &is-fork-push '$CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"'
+ - if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"
+ # nightly pipeline
+ - if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
+ # pipeline for direct pushes that bypassed the CI
+ - if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
+
# Rules applied to every job in the pipeline
.common-rules:
@@ -162,42 +156,51 @@ stages:
- if: *is-fork-push
when: manual
+
.never-post-merge-rules:
rules:
- if: *is-post-merge
when: never
-# Rule to filter for only scheduled pipelines.
-.scheduled_pipeline-rules:
- rules:
- - if: &is-scheduled-pipeline '$CI_PIPELINE_SOURCE == "schedule"'
- when: on_success
-
-# Generic rule to not run the job during scheduled pipelines. Jobs that aren't
-# something like a nightly run should include this rule.
-.no_scheduled_pipelines-rules:
- rules:
- - if: *is-scheduled-pipeline
- when: never
-
-# When to automatically run the CI for build jobs
-.build-rules:
- rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
- - !reference [.never-post-merge-rules, rules]
- # Run automatically once all dependency jobs have passed
- - when: on_success
-# When to automatically run the CI for container jobs
.container+build-rules:
rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
+ - !reference [.common-rules, rules]
+ # Run when re-enabling a disabled farm, but not when disabling it
+ - !reference [.disable-farm-mr-rules, rules]
+ # Never run immediately after merging, as we just ran everything
- !reference [.never-post-merge-rules, rules]
+ # Build everything in merge pipelines, if any files affecting the pipeline
+ # were changed
+ - if: *is-merge-attempt
+ changes: &all_paths
+ - drivers/gpu/drm/ci/**/*
+ when: on_success
+ # Same as above, but for pre-merge pipelines
+ - if: *is-pre-merge
+ changes:
+ *all_paths
+ when: manual
+ # Skip everything for pre-merge and merge pipelines which don't change
+ # anything in the build
+ - if: *is-merge-attempt
+ when: never
+ - if: *is-pre-merge
+ when: never
+ # Build everything after someone bypassed the CI
+ - if: *is-direct-push
+ when: on_success
+ # Build everything in scheduled pipelines
+ - if: *is-scheduled-pipeline
+ when: on_success
+ # Allow building everything in fork pipelines, but build nothing unless
+ # manually triggered
- when: manual
+
.ci-deqp-artifacts:
artifacts:
- name: "mesa_${CI_JOB_NAME}"
+ name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}"
when: always
untracked: false
paths:
@@ -208,31 +211,7 @@ stages:
- _build/meson-logs/strace
-.container-rules:
- rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
- - !reference [.never-post-merge-rules, rules]
- # Run pipeline by default in the main project if any CI pipeline
- # configuration files were changed, to ensure docker images are up to date
- - if: *is-post-merge
- changes:
- - drivers/gpu/drm/ci/**/*
- when: on_success
- # Run pipeline by default if it was triggered by Marge Bot, is for a
- # merge request, and any files affecting the pipeline were changed
- - if: *is-pre-merge-for-marge
- when: on_success
- # Run pipeline by default in the main project if it was not triggered by
- # Marge Bot, and any files affecting the pipeline were changed
- - if: *is-post-merge-not-for-marge
- when: on_success
- # Allow triggering jobs manually in other cases
- - when: manual
-
-
-
# Git archive
-
make git archive:
extends:
- .fdo.ci-fairy
@@ -264,30 +243,64 @@ sanity:
rules:
- if: *is-pre-merge
when: on_success
- # Other cases default to never
+ - when: never
variables:
GIT_STRATEGY: none
script:
# ci-fairy check-commits --junit-xml=check-commits.xml
- ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
+ - |
+ set -eu
+ image_tags=(
+ ALPINE_X86_64_LAVA_SSH_TAG
+ CONTAINER_TAG
+ DEBIAN_BASE_TAG
+ DEBIAN_BUILD_TAG
+ DEBIAN_PYUTILS_TAG
+ DEBIAN_TEST_GL_TAG
+ KERNEL_ROOTFS_TAG
+ KERNEL_TAG
+ PKG_REPO_REV
+ )
+ for var in "${image_tags[@]}"
+ do
+ if [ "$(echo -n "${!var}" | wc -c)" -gt 20 ]
+ then
+ echo "$var is too long; please make sure it is at most 20 chars."
+ exit 1
+ fi
+ done
artifacts:
when: on_failure
reports:
junit: check-*.xml
+ tags:
+ - placeholder-job
-# Rules for tests that should not block merging, but should be available to
-# optionally run with the "play" button in the UI in pre-merge non-marge
-# pipelines. This should appear in "extends:" after any includes of
-# test-source-dep.yml rules, so that these rules replace those.
-.test-manual-mr:
+
+mr-label-maker-test:
+ extends:
+ - .fdo.ci-fairy
+ stage: sanity
rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
- - if: *is-forked-branch-or-pre-merge-not-for-marge
- when: manual
+ - !reference [.mr-label-maker-rules, rules]
variables:
- JOB_TIMEOUT: 80
+ GIT_STRATEGY: fetch
+ timeout: 10m
+ script:
+ - set -eu
+ - python3 -m venv .venv
+ - source .venv/bin/activate
+ - pip install git+https://gitlab.freedesktop.org/freedesktop/mr-label-maker
+ - mr-label-maker --dry-run --mr $CI_MERGE_REQUEST_IID
# Jobs that need to pass before spending hardware resources on further testing
.required-for-hardware-jobs:
- needs: []
+ needs:
+ - job: clang-format
+ optional: true
+ - job: rustfmt
+ optional: true
+ - job: toml-lint
+ optional: true
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index f38836ec837c..68b042e43b7f 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -47,7 +47,7 @@ else
ARCH="x86_64"
fi
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s ${FDO_HTTP_CACHE_URI:-}$PIPELINE_ARTIFACTS_BASE/$ARCH/igt.tar.gz | tar --zstd -v -x -C /
+curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s $PIPELINE_ARTIFACTS_BASE/$ARCH/igt.tar.gz | tar --zstd -v -x -C /
TESTLIST="/igt/libexec/igt-gpu-tools/ci-testlist.txt"
@@ -69,7 +69,7 @@ igt-runner \
run \
--igt-folder /igt/libexec/igt-gpu-tools \
--caselist $TESTLIST \
- --output /results \
+ --output $RESULTS_DIR \
-vvvv \
$IGT_SKIPS \
$IGT_FLAKES \
@@ -80,13 +80,10 @@ set -e
deqp-runner junit \
--testsuite IGT \
- --results /results/failures.csv \
- --output /results/junit.xml \
+ --results $RESULTS_DIR/failures.csv \
+ --output $RESULTS_DIR/junit.xml \
--limit 50 \
- --template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
-
-# Store the results also in the simpler format used by the runner in ChromeOS CI
-#sed -r 's/(dmesg-warn|pass)/success/g' /results/results.txt > /results/results_simple.txt
+ --template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml"
cd $oldpath
exit $ret
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
index 8d8b9e71852e..20049f3626b2 100644
--- a/drivers/gpu/drm/ci/image-tags.yml
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -1,5 +1,5 @@
variables:
- CONTAINER_TAG: "2024-09-09-uprevs"
+ CONTAINER_TAG: "20250204-mesa-uprev"
DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
@@ -7,9 +7,16 @@ variables:
DEBIAN_BUILD_TAG: "${CONTAINER_TAG}"
KERNEL_ROOTFS_TAG: "${CONTAINER_TAG}"
+ # default kernel for rootfs before injecting the current kernel tree
+ KERNEL_TAG: "v6.13-rc4-mesa-5e77"
+ KERNEL_REPO: "gfx-ci/linux"
+ PKG_REPO_REV: "bca9635d"
DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base"
DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl"
DEBIAN_TEST_GL_TAG: "${CONTAINER_TAG}"
- ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}" \ No newline at end of file
+ DEBIAN_PYUTILS_IMAGE: "debian/x86_64_pyutils"
+ DEBIAN_PYUTILS_TAG: "${CONTAINER_TAG}"
+
+ ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}"
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
index 6add15083c78..6e5ac51e8c0a 100755
--- a/drivers/gpu/drm/ci/lava-submit.sh
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -1,58 +1,102 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: MIT
+# shellcheck disable=SC2086 # we want word splitting
+# shellcheck disable=SC1091 # paths only become valid at runtime
-set -e
-set -x
+# If we run in the fork (not from mesa or Marge-bot), reuse mainline kernel and rootfs, if exist.
+_check_artifact_path() {
+ _url="https://${1}/${2}"
+ if curl -s -o /dev/null -I -L -f --retry 4 --retry-delay 15 "${_url}"; then
+ echo -n "${_url}"
+ fi
+}
-# Try to use the kernel and rootfs built in mainline first, so we're more
-# likely to hit cache
-if curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s "https://${BASE_SYSTEM_MAINLINE_HOST_PATH}/done"; then
- BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_MAINLINE_HOST_PATH}"
-else
- BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_FORK_HOST_PATH}"
-fi
+get_path_to_artifact() {
+ _mainline_artifact="$(_check_artifact_path ${BASE_SYSTEM_MAINLINE_HOST_PATH} ${1})"
+ if [ -n "${_mainline_artifact}" ]; then
+ echo -n "${_mainline_artifact}"
+ return
+ fi
+ _fork_artifact="$(_check_artifact_path ${BASE_SYSTEM_FORK_HOST_PATH} ${1})"
+ if [ -n "${_fork_artifact}" ]; then
+ echo -n "${_fork_artifact}"
+ return
+ fi
+ set +x
+ error "Sorry, I couldn't find a viable built path for ${1} in either mainline or a fork." >&2
+ echo "" >&2
+ echo "If you're working on CI, this probably means that you're missing a dependency:" >&2
+ echo "this job ran ahead of the job which was supposed to upload that artifact." >&2
+ echo "" >&2
+ echo "If you aren't working on CI, please ping @mesa/ci-helpers to see if we can help." >&2
+ echo "" >&2
+ echo "This job is going to fail, because I can't find the resources I need. Sorry." >&2
+ set -x
+ exit 1
+}
+
+. "${SCRIPTS_DIR}/setup-test-env.sh"
+
+section_start prepare_rootfs "Preparing root filesystem"
+
+set -ex
+
+section_switch rootfs "Assembling root filesystem"
+ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)"
+[ $? != 1 ] || exit 1
rm -rf results
mkdir -p results/job-rootfs-overlay/
-cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/
+artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
-cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/
cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
-# Prepare env vars for upload.
-section_start variables "Variables passed through:"
-KERNEL_IMAGE_BASE="https://${BASE_SYSTEM_HOST_PATH}" \
- artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh
-section_end variables
-
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
ci-fairy s3cp --token-file "${S3_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
+# Prepare env vars for upload.
+section_switch variables "Environment variables passed through to device:"
+cat results/job-rootfs-overlay/set-job-env-vars.sh
+
+section_switch lava_submit "Submitting job for scheduling"
+
touch results/lava.log
tail -f results/lava.log &
-
PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
- submit \
+ --farm "${FARM}" \
+ --device-type "${DEVICE_TYPE}" \
+ --boot-method "${BOOT_METHOD}" \
+ --job-timeout-min $((CI_JOB_TIMEOUT/60 - 5)) \
--dump-yaml \
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
- --rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
+ --rootfs-url "${ROOTFS_URL}" \
--kernel-url-prefix "https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}" \
- --build-url "${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/kernel-files.tar.zst" \
- --job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
- --job-timeout-min ${JOB_TIMEOUT:-80} \
+ --kernel-external "${EXTERNAL_KERNEL_TAG}" \
--first-stage-init artifacts/ci-common/init-stage1.sh \
- --ci-project-dir "${CI_PROJECT_DIR}" \
- --device-type "${DEVICE_TYPE}" \
- --farm "${FARM}" \
--dtb-filename "${DTB}" \
--jwt-file "${S3_JWT_FILE}" \
--kernel-image-name "${KERNEL_IMAGE_NAME}" \
--kernel-image-type "${KERNEL_IMAGE_TYPE}" \
- --boot-method "${BOOT_METHOD}" \
--visibility-group "${VISIBILITY_GROUP}" \
--lava-tags "${LAVA_TAGS}" \
--mesa-job-name "$CI_JOB_NAME" \
--structured-log-file "results/lava_job_detail.json" \
--ssh-client-image "${LAVA_SSH_CLIENT_IMAGE}" \
+ --project-name "${CI_PROJECT_NAME}" \
+ --starting-section "${CURRENT_SECTION}" \
+ --job-submitted-at "${CI_JOB_STARTED_AT}" \
+ - append-overlay \
+ --name=kernel-build \
+ --url="${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/kernel-files.tar.zst" \
+ --compression=zstd \
+ --path="${CI_PROJECT_DIR}" \
+ --format=tar \
+ - append-overlay \
+ --name=job-overlay \
+ --url="https://${JOB_ROOTFS_OVERLAY_PATH}" \
+ --compression=gz \
+ --path="/" \
+ --format=tar \
+ - submit \
>> results/lava.log
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index f0ef60c8f56d..6a1e059858e5 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -1,21 +1,16 @@
-.test-rules:
- rules:
- - if: '$FD_FARM == "offline" && $RUNNER_TAG =~ /^google-freedreno-/'
- when: never
- - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
- when: never
- - !reference [.no_scheduled_pipelines-rules, rules]
- - when: on_success
-
.lava-test:
extends:
- - .test-rules
+ - .container+build-rules
timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - !reference [.collabora-farm-rules, rules]
+ - when: on_success
script:
# Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
- rm -rf install
- tar -xf artifacts/install.tar
- - mv install/* artifacts/.
+ - mv -n install/* artifacts/.
# Override it with our lava-submit.sh script
- ./artifacts/lava-submit.sh
@@ -32,6 +27,7 @@
- alpine/x86_64_lava_ssh_client
- kernel+rootfs_arm32
- debian/x86_64_build
+ - python-artifacts
- testing:arm32
- igt:arm32
@@ -48,6 +44,7 @@
- alpine/x86_64_lava_ssh_client
- kernel+rootfs_arm64
- debian/x86_64_build
+ - python-artifacts
- testing:arm64
- igt:arm64
@@ -64,6 +61,7 @@
- alpine/x86_64_lava_ssh_client
- kernel+rootfs_x86_64
- debian/x86_64_build
+ - python-artifacts
- testing:x86_64
- igt:x86_64
@@ -71,8 +69,11 @@
extends:
- .baremetal-test-arm64
- .use-debian/baremetal_arm64_test
- - .test-rules
timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - !reference [.google-freedreno-farm-rules, rules]
+ - when: on_success
variables:
FDO_CI_CONCURRENT: 10
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
@@ -441,20 +442,20 @@ panfrost:g12b:
virtio_gpu:none:
stage: software-driver
timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - when: on_success
variables:
CROSVM_GALLIUM_DRIVER: llvmpipe
DRIVER_NAME: virtio_gpu
GPU_VERSION: none
extends:
- .test-gl
- - .test-rules
tags:
- kvm
script:
- ln -sf $CI_PROJECT_DIR/install /install
- mv install/bzImage /lava-files/bzImage
- - mkdir -p $CI_PROJECT_DIR/results
- - ln -sf $CI_PROJECT_DIR/results /results
- install/crosvm-runner.sh install/igt_runner.sh
needs:
- debian/x86_64_test-gl
@@ -464,20 +465,20 @@ virtio_gpu:none:
vkms:none:
stage: software-driver
timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - when: on_success
variables:
DRIVER_NAME: vkms
GPU_VERSION: none
extends:
- .test-gl
- - .test-rules
tags:
- kvm
script:
- ln -sf $CI_PROJECT_DIR/install /install
- mv install/bzImage /lava-files/bzImage
- mkdir -p /lib/modules
- - mkdir -p $CI_PROJECT_DIR/results
- - ln -sf $CI_PROJECT_DIR/results /results
- ./install/crosvm-runner.sh ./install/igt_runner.sh
needs:
- debian/x86_64_test-gl
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
index f44dbce3151a..75374085f40f 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
@@ -14,10 +14,16 @@ amdgpu/amd_plane@mpo-scale-nv12,Fail
amdgpu/amd_plane@mpo-scale-p010,Fail
amdgpu/amd_plane@mpo-scale-rgb,Crash
amdgpu/amd_plane@mpo-swizzle-toggle,Fail
-amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Fail
+amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Crash
kms_addfb_basic@bad-pitch-65536,Fail
kms_addfb_basic@bo-too-small,Fail
kms_addfb_basic@too-high,Fail
+kms_async_flips@alternate-sync-async-flip,Fail
+kms_async_flips@alternate-sync-async-flip-atomic,Fail
+kms_async_flips@test-cursor,Fail
+kms_async_flips@test-cursor-atomic,Fail
+kms_async_flips@test-time-stamp,Fail
+kms_async_flips@test-time-stamp-atomic,Fail
kms_atomic_transition@plane-all-modeset-transition-internal-panels,Fail
kms_atomic_transition@plane-all-transition,Fail
kms_atomic_transition@plane-all-transition-nonblocking,Fail
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
index e70bd9d447ca..adffb011298a 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
@@ -3,13 +3,6 @@
# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-kms_async_flips@async-flip-with-page-flip-events
-
-# Board Name: hp-11A-G6-EE-grunt
-# Bug Report: https://lore.kernel.org/amd-gfx/3542730f-b8d7-404d-a947-b7a5e95d661c@collabora.com/T/#u
-# Failure Rate: 50
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
kms_async_flips@crc
# Board Name: hp-11A-G6-EE-grunt
@@ -25,3 +18,17 @@ kms_plane@pixel-format-source-clamping
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc2
kms_async_flips@async-flip-with-page-flip-events
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://gitlab.freedesktop.org/drm/amd/-/issues/3835
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_async_flips@async-flip-with-page-flip-events-atomic
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://gitlab.freedesktop.org/drm/amd/-/issues/3836
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_async_flips@crc-atomic
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
index f41b3e112976..3879c4812a22 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
@@ -32,3 +32,8 @@ amdgpu/amd_pci_unplug@amdgpu_hotunplug_with_exported_bo
amdgpu/amd_pci_unplug@amdgpu_hotunplug_with_exported_fence
amdgpu/amd_vrr_range@freesync-parsing
device_reset.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
index 0907cb0f6d9e..a29cea4f234c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -1,16 +1,14 @@
core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
-core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
-kms_fb_coherency@memset-crc,Crash
-kms_flip@busy-flip,Timeout
-kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
+kms_async_flips@test-time-stamp,Timeout
+kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
+kms_flip@dpms-off-confusion-interruptible,Timeout
+kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -33,20 +31,12 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
-kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
kms_lease@lease-uevent,Fail
+kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
-kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
-kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
-kms_prop_blob@invalid-set-prop,Fail
-kms_rotation_crc@primary-rotation-180,Timeout
-kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
index 0207c9807bee..fee246c25b9a 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
@@ -53,3 +53,17 @@ kms_pm_rpm@modeset-lpsp-stress
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc2
kms_pm_rpm@drm-resources-equal
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13346
+# Failure Rate: 60
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+i915_hangman@gt-engine-error
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13345
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+gen9_exec_parse@unaligned-access
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
index 5186ba3dbbc6..2ef1dc35a7fa 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
@@ -20,6 +20,7 @@ i915_pm_rc6_residency.*
i915_suspend.*
kms_scaling_modes.*
i915_pm_rpm.*
+i915_module_load@reload-with-fault-injection
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
index 64772fedaed5..ee11999e3da1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
@@ -1,3 +1,4 @@
+core_setmaster@master-drop-set-user,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -8,13 +9,13 @@ kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
-kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
@@ -29,13 +30,10 @@ kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_pm_backlight@basic-brightness,Fail
+kms_pm_backlight@brightness-with-dpms,Crash
kms_pm_backlight@fade,Fail
kms_pm_backlight@fade-with-dpms,Fail
-kms_pm_rpm@legacy-planes,Timeout
-kms_pm_rpm@legacy-planes-dpms,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
kms_sysfs_edid_timing,Fail
perf@i915-ref-count,Fail
perf@non-zero-reason,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
index f352b719cf7d..47b3f1d42bb6 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -1,5 +1,4 @@
core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -9,9 +8,8 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-i915_pm_rps@engine-order,Fail
-kms_big_fb@linear-16bpp-rotate-180,Timeout
-kms_fb_coherency@memset-crc,Crash
+kms_async_flips@test-time-stamp,Timeout
+kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
kms_flip@busy-flip,Timeout
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
@@ -36,25 +34,29 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_lease@lease-uevent,Fail
+kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
-kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
kms_psr2_sf@cursor-plane-update-sf,Fail
kms_psr2_sf@overlay-plane-update-continuous-sf,Fail
kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail
kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail
kms_psr2_sf@plane-move-sf-dmg-area,Fail
+kms_psr2_sf@pr-cursor-plane-update-sf,Timeout
kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail
+kms_psr2_sf@psr2-cursor-plane-update-sf,Fail
+kms_psr2_sf@psr2-overlay-plane-update-continuous-sf,Fail
+kms_psr2_sf@psr2-overlay-plane-update-sf-dmg-area,Fail
+kms_psr2_sf@psr2-overlay-primary-update-sf-dmg-area,Fail
+kms_psr2_sf@psr2-plane-move-sf-dmg-area,Fail
+kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area,Fail
+kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_su@page_flip-NV12,Fail
-kms_rotation_crc@primary-rotation-180,Timeout
+kms_psr2_su@page_flip-P010,Fail
kms_setmode@basic,Fail
-kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
index d8401251e5f4..5343cc1c8696 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
@@ -25,3 +25,10 @@ kms_plane_alpha_blend@constant-alpha-min
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc2
kms_async_flips@crc
+
+# Board Name: asus-C436FA-Flip-hatch
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13347
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+gen9_exec_parse@unaligned-access
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
index 9d753d97c9ab..c87ff8b40e99 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
@@ -20,6 +20,7 @@ xe_module_load.*
api_intel_allocator.*
kms_cursor_legacy.*
i915_pm_rpm.*
+kms_flip.*
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
index 6eb64c672f7d..843c363b42f5 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -1,61 +1,35 @@
+core_setmaster@master-drop-set-shared-fd,Fail
core_setmaster@master-drop-set-user,Fail
-core_setmaster_vs_auth,Fail
+gen9_exec_parse@unaligned-access,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
kms_dirtyfb@default-dirtyfb-ioctl,Fail
kms_dirtyfb@drrs-dirtyfb-ioctl,Fail
kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
-kms_flip@blocking-wf_vblank,Fail
-kms_flip@busy-flip,Timeout
-kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
-kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
-kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
-kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
-kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
-kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
-kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
-kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
-kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
-kms_pm_rpm@legacy-planes,Timeout
-kms_pm_rpm@legacy-planes-dpms,Timeout
-kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
-kms_prop_blob@invalid-set-prop,Fail
-kms_rotation_crc@multiplane-rotation,Fail
-kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
-kms_rotation_crc@primary-rotation-180,Timeout
-kms_vblank@query-forked-hang,Timeout
perf@non-zero-reason,Timeout
sysfs_heartbeat_interval@long,Timeout
-sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
xe_module_load@force-load,Fail
@@ -63,4 +37,3 @@ xe_module_load@load,Fail
xe_module_load@many-reload,Fail
xe_module_load@reload,Fail
xe_module_load@reload-no-display,Fail
-core_setmaster@master-drop-set-shared-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
index 077886b76093..2a909c657343 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
@@ -11,3 +11,10 @@ core_hotunplug@unplug-rescan
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_fb_coherency@memset-crc
+
+# Board Name: hp-x360-12b-ca0010nr-n4020-octopus
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13348
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@dpms-off-confusion-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
index 9c64146aed90..219ae839323a 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
@@ -27,3 +27,301 @@ perf.*
# Kernel panic
drm_fdinfo.*
kms_plane_alpha_blend.*
+
+kms_async_flips.*
+# [IGT] kms_async_flips: executing
+# ------------[ cut here ]------------
+# i915 0000:00:02.0: [drm] drm_WARN_ON(intel_dp->pps.vdd_wakeref)
+# WARNING: CPU: 0 PID: 1253 at drivers/gpu/drm/i915/display/intel_pps.c:760 intel_pps_vdd_on_unlocked+0x351/0x360
+# Modules linked in:
+# CPU: 0 UID: 0 PID: 1253 Comm: kms_async_flips Tainted: G W 6.13.0-rc2-ge95c88d68ac3 #1
+# Tainted: [W]=WARN
+# Hardware name: HP Bloog/Bloog, BIOS 09/19/2019
+# RIP: 0010:intel_pps_vdd_on_unlocked+0x351/0x360
+# Code: 8b 77 50 4d 85 f6 75 03 4c 8b 37 e8 19 98 03 00 48 c7 c1 10 d3 61 84 4c 89 f2 48 c7 c7 67 56 69 84 48 89 c6 e8 a0 22 63 ff 90 <0f> 0b 90 90 e9 5f fd ff ff e8 81 94 69 00 90 90 90 90 90 90 90 90
+# RSP: 0018:ffff9573c0dcfad8 EFLAGS: 00010286
+# RAX: 0000000000000000 RBX: ffff93498148e238 RCX: 00000000ffffdfff
+# RDX: 0000000000000000 RSI: ffff9573c0dcf988 RDI: 0000000000000001
+# RBP: ffff934980d80000 R08: 0000000000009ffb R09: 00000000ffffdfff
+# R10: 00000000ffffdfff R11: ffffffff8488c8a0 R12: ffff934980d80d68
+# R13: 0000000000000000 R14: ffff93498093c9b0 R15: 0000000000000000
+# FS: 00007fbf1f4486c0(0000) GS:ffff9349fbc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000055f260419028 CR3: 0000000106594000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# intel_pps_vdd_on+0x78/0x150
+# intel_dp_detect+0xb9/0x7c0
+# drm_helper_probe_detect+0x47/0xb0
+# drm_helper_probe_single_connector_modes+0x40b/0x660
+# drm_mode_getconnector+0x369/0x490
+# drm_ioctl_kernel+0xad/0x110
+# drm_ioctl+0x235/0x4b0
+# __x64_sys_ioctl+0x8f/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7fbf208e4cdb
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffe061f6c60 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fbf208e4cdb
+# RDX: 00007ffe061f6d00 RSI: 00000000c05064a7 RDI: 0000000000000003
+# RBP: 00007ffe061f6d00 R08: 0000000000000007 R09: 000055f260410cb0
+# R10: 650094cf40322201 R11: 0000000000000246 R12: 00000000c05064a7
+# R13: 0000000000000003 R14: 00000000c05064a7 R15: 00007ffe061f6d00
+# </TASK>
+# irq event stamp: 0
+# hardirqs last enabled at (0): [<0000000000000000>] 0x0
+# hardirqs last disabled at (0): [<ffffffff82d0a283>] copy_process+0xaf3/0x2eb0
+# softirqs last enabled at (0): [<ffffffff82d0a283>] copy_process+0xaf3/0x2eb0
+# softirqs last disabled at (0): [<0000000000000000>] 0x0
+# ---[ end trace 0000000000000000 ]---
+# i915 0000:00:02.0: [drm] *ERROR* PPS state mismatch
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* Failed to read DPCD register 0x92
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# [IGT] kms_async_flips: starting subtest test-time-stamp
+# i915 0000:00:02.0: [drm] *ERROR* [CRTC:70:pipe A] flip_done timed out
+# i915 0000:00:02.0: [drm] *ERROR* flip_done timed out
+# i915 0000:00:02.0: [drm] *ERROR* [CRTC:70:pipe A] commit wait timed out
+# INFO: task kms_async_flips:1253 blocked for more than 122 seconds.
+# Tainted: G W 6.13.0-rc2-ge95c88d68ac3 #1
+# "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+# task:kms_async_flips state:D stack:12720 pid:1253 tgid:1253 ppid:164 flags:0x00004006
+# Call Trace:
+# <TASK>
+# __schedule+0x495/0xd20
+# schedule+0x35/0x120
+# drm_vblank_work_flush+0x91/0xf0
+# intel_wait_for_vblank_workers+0x58/0x90
+# intel_atomic_commit_tail+0xa86/0x15f0
+# intel_atomic_commit+0x257/0x2a0
+# drm_atomic_commit+0xac/0xe0
+# set_property_atomic+0x91/0x160
+# drm_mode_obj_set_property_ioctl+0xc1/0x110
+# drm_ioctl_kernel+0xad/0x110
+# drm_ioctl+0x235/0x4b0
+# __x64_sys_ioctl+0x8f/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7fbf208e4cdb
+# RSP: 002b:00007ffe061f6c50 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 000055f24e272330 RCX: 00007fbf208e4cdb
+# RDX: 00007ffe061f6ce0 RSI: 00000000c01864ba RDI: 0000000000000003
+# RBP: 00007ffe061f6ce0 R08: 0000000000000000 R09: 000055f2604184f0
+# R10: 00007fbf209ca310 R11: 0000000000000246 R12: 00000000c01864ba
+# R13: 0000000000000003 R14: 0000000000000000 R15: 000055f26040ab90
+# </TASK>
+# INFO: lockdep is turned off.
+
+kms_lease.*
+# [IGT] kms_lease: executing
+# ------------[ cut here ]------------
+# i915 0000:00:02.0: [drm] drm_WARN_ON(intel_dp->pps.vdd_wakeref)
+# WARNING: CPU: 1 PID: 1271 at drivers/gpu/drm/i915/display/intel_pps.c:760 intel_pps_vdd_on_unlocked+0x351/0x360
+# Modules linked in:
+# CPU: 1 UID: 0 PID: 1271 Comm: kms_lease Tainted: G W 6.13.0-rc2-gfb6fd142a8eb #1
+# Tainted: [W]=WARN
+# Hardware name: HP Bloog/Bloog, BIOS 09/19/2019
+# RIP: 0010:intel_pps_vdd_on_unlocked+0x351/0x360
+# Code: 8b 77 50 4d 85 f6 75 03 4c 8b 37 e8 19 98 03 00 48 c7 c1 10 d3 61 a1 4c 89 f2 48 c7 c7 67 56 69 a1 48 89 c6 e8 a0 22 63 ff 90 <0f> 0b 90 90 e9 5f fd ff ff e8 81 94 69 00 90 90 90 90 90 90 90 90
+# RSP: 0018:ffffb26500ddfad8 EFLAGS: 00010286
+# RAX: 0000000000000000 RBX: ffff988580dc6238 RCX: 00000000ffffdfff
+# RDX: 0000000000000000 RSI: ffffb26500ddf988 RDI: 0000000000000001
+# RBP: ffff988580d90000 R08: 0000000000009ffb R09: 00000000ffffdfff
+# R10: 00000000ffffdfff R11: ffffffffa188c8a0 R12: ffff988580d90d68
+# R13: 0000000000000000 R14: ffff98858093c9b0 R15: 0000000000000000
+# FS: 00007f6b0c60d6c0(0000) GS:ffff9885fbd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000055eea27460a8 CR3: 0000000105ac6000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# intel_pps_vdd_on+0x78/0x150
+# intel_dp_detect+0xb9/0x7c0
+# drm_helper_probe_detect+0x47/0xb0
+# drm_helper_probe_single_connector_modes+0x40b/0x660
+# drm_mode_getconnector+0x369/0x490
+# drm_ioctl_kernel+0xad/0x110
+# drm_ioctl+0x235/0x4b0
+# __x64_sys_ioctl+0x8f/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f6b0d637c5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffc42b13670 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f6b0d637c5b
+# RDX: 00007ffc42b13710 RSI: 00000000c05064a7 RDI: 0000000000000003
+# RBP: 00007ffc42b13710 R08: 0000000000000007 R09: 000055eea2741720
+# R10: 9fb9be4e118bfa43 R11: 0000000000000246 R12: 00000000c05064a7
+# R13: 0000000000000003 R14: 00000000c05064a7 R15: 00007ffc42b13710
+# </TASK>
+# irq event stamp: 0
+# hardirqs last enabled at (0): [<0000000000000000>] 0x0
+# hardirqs last disabled at (0): [<ffffffff9fd0a283>] copy_process+0xaf3/0x2eb0
+# softirqs last enabled at (0): [<ffffffff9fd0a283>] copy_process+0xaf3/0x2eb0
+# softirqs last disabled at (0): [<0000000000000000>] 0x0
+# ---[ end trace 0000000000000000 ]---
+# i915 0000:00:02.0: [drm] *ERROR* PPS state mismatch
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* Failed to read DPCD register 0x92
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# i915 0000:00:02.0: [drm] *ERROR* AUX A/DDI A/PHY A: not done (status 0x00000000)
+# [IGT] kms_lease: starting subtest page-flip-implicit-plane
+# [IGT] kms_lease: starting dynamic subtest pipe-A-eDP-1
+# i915 0000:00:02.0: [drm] *ERROR* [CRTC:70:pipe A] flip_done timed out
+# i915 0000:00:02.0: [drm] *ERROR* flip_done timed out
+# i915 0000:00:02.0: [drm] *ERROR* [CRTC:70:pipe A] commit wait timed out
+# INFO: task kms_lease:1271 blocked for more than 122 seconds.
+# Tainted: G W 6.13.0-rc2-gfb6fd142a8eb #1
+# "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+# task:kms_lease state:D stack:12720 pid:1271 tgid:1271 ppid:184 flags:0x00004006
+# Call Trace:
+# <TASK>
+# __schedule+0x495/0xd20
+# schedule+0x35/0x120
+# drm_vblank_work_flush+0x91/0xf0
+# intel_wait_for_vblank_workers+0x58/0x90
+# intel_atomic_commit_tail+0xa86/0x15f0
+# intel_atomic_commit+0x257/0x2a0
+# drm_atomic_commit+0xac/0xe0
+# set_property_atomic+0x91/0x160
+# drm_mode_obj_set_property_ioctl+0xc1/0x110
+# drm_ioctl_kernel+0xad/0x110
+# drm_ioctl+0x235/0x4b0
+# __x64_sys_ioctl+0x8f/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f6b0d637c5b
+# RSP: 002b:00007ffc42b13650 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 00007ffc42b14270 RCX: 00007f6b0d637c5b
+# RDX: 00007ffc42b136e0 RSI: 00000000c01864ba RDI: 0000000000000003
+# RBP: 00007ffc42b136e0 R08: 0000000000000000 R09: 000055eea2741750
+# R10: 00007f6b0d71d310 R11: 0000000000000246 R12: 00000000c01864ba
+# R13: 0000000000000003 R14: 0000000000000000 R15: 000055eea27378d0
+# </TASK>
+# INFO: lockdep is turned off.
+
+# The test alternates between failing and timing out on reruns, causing the pipeline to fail
+gen9_exec_parse@unaligned-access
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
index ed9f7b576843..0e08fff741aa 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
@@ -1,15 +1,16 @@
-core_setmaster@master-drop-set-user,Fail
+drm_fdinfo@busy-check-all,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-i915_pm_rpm@gem-execbuf-stress,Timeout
-i915_pm_rpm@module-reload,Fail
-kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset,Fail
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
@@ -17,6 +18,7 @@ kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
@@ -26,19 +28,15 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb565-draw-blt,Timeout
kms_lease@lease-uevent,Fail
-kms_pm_rpm@legacy-planes,Timeout
-kms_pm_rpm@legacy-planes-dpms,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
+kms_rotation_crc@bad-pixel-format,Fail
kms_rotation_crc@multiplane-rotation,Fail
kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
perf@i915-ref-count,Fail
-perf_pmu@busy-accuracy-50,Fail
perf_pmu@module-unload,Fail
-perf_pmu@most-busy-idle-check-all,Fail
perf_pmu@rc6,Crash
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt
index 5c3ef4486b9d..8b2aefd1cc52 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt
@@ -1,13 +1,6 @@
# Board Name: acer-cb317-1h-c3z6-dedede
-# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12475
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13349
# Failure Rate: 100
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.12.0-rc1
-kms_flip@flip-vs-panning-interruptible
-
-# Board Name: acer-cb317-1h-c3z6-dedede
-# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12476
-# Failure Rate: 100
-# IGT Version: 1.28-ga73311079
-# Linux Version: 6.12.0-rc1
-kms_universal_plane@cursor-fb-leak
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@flip-vs-fences-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
index 6ec2f83ffe13..dc722d6a774e 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
@@ -37,3 +37,115 @@ tools_test.*
# GPU hang
sysfs_timeslice_.*
sysfs_heartbeat_.*
+
+device_reset@unbind-reset-rebind
+# [IGT] device_reset: starting subtest unbind-reset-rebind
+# i915 0000:00:02.0: [drm] Found kabylake (device ID 5917) display version 9.00 stepping C0
+# i915 0000:00:02.0: vgaarb: deactivate vga console
+# i915 0000:00:02.0: [drm] *ERROR* DPLL0 not locked
+# ------------[ cut here ]------------
+# i915 0000:00:02.0: [drm] drm_WARN_ON((val & (1 << 30)) == 0)
+# WARNING: CPU: 4 PID: 472 at drivers/gpu/drm/i915/display/intel_cdclk.c:944 skl_get_cdclk+0x1ca/0x360
+# Modules linked in:
+# CPU: 4 UID: 0 PID: 472 Comm: device_reset Not tainted 6.13.0-rc2-ge95c88d68ac3 #1
+# Hardware name: HP Nami/Nami, BIOS 09/19/2019
+# RIP: 0010:skl_get_cdclk+0x1ca/0x360
+# Code: 67 50 4d 85 e4 0f 84 89 01 00 00 e8 e0 16 13 00 48 c7 c1 a0 71 5f 9a 4c 89 e2 48 c7 c7 67 56 69 9a 48 89 c6 e8 67 a1 72 ff 90 <0f> 0b 90 90 8b 43 08 8b 53 04 89 43 0c 89 03 85 d2 0f 85 26 ff ff
+# RSP: 0018:ffffacba40f5fbd0 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: ffff953182571058 RCX: 00000000ffffdfff
+# RDX: 0000000000000000 RSI: ffffacba40f5fa80 RDI: 0000000000000001
+# RBP: ffff953182570d68 R08: 0000000000009ffb R09: 00000000ffffdfff
+# R10: 00000000ffffdfff R11: ffffffff9a88c8a0 R12: ffff953180d3f4a0
+# R13: 0000000000000000 R14: ffff953182570d68 R15: 0000000000000000
+# FS: 00007f74989c46c0(0000) GS:ffff9534eed00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f1df530eff8 CR3: 0000000102302002 CR4: 00000000003706f0
+# DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+# DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+# Call Trace:
+# <TASK>
+# intel_update_cdclk+0x1c/0x90
+# intel_cdclk_init_hw+0x46e/0x4c0
+# intel_power_domains_init_hw+0x3eb/0x770
+# intel_display_driver_probe_noirq+0x85/0x230
+# i915_driver_probe+0x66d/0xc60
+# local_pci_probe+0x3a/0x90
+# pci_device_probe+0xb5/0x180
+# really_probe+0xc9/0x2b0
+# __driver_probe_device+0x6e/0x110
+# device_driver_attach+0x42/0xa0
+# bind_store+0x71/0xc0
+# kernfs_fop_write_iter+0x121/0x1c0
+# vfs_write+0x2a8/0x530
+# ksys_write+0x6f/0xf0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f7499e662c0
+# Code: 40 00 48 8b 15 41 9b 0d 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 80 3d 21 23 0e 00 00 74 17 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 58 c3 0f 1f 80 00 00 00 00 48 83 ec 28 48 89
+# RSP: 002b:00007ffffaf36a68 EFLAGS: 00000202 ORIG_RAX: 0000000000000001
+# RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f7499e662c0
+# RDX: 000000000000000c RSI: 00007ffffaf36b50 RDI: 0000000000000003
+# RBP: 000000000000000c R08: 0000000000000007 R09: 000055b37d0964a0
+# R10: 0000000000000000 R11: 0000000000000202 R12: 00007ffffaf36b50
+# R13: 0000000000000003 R14: 000055b3526a7220 R15: 00007f749a0dc020
+# </TASK>
+# irq event stamp: 28591
+# hardirqs last enabled at (28625): [<ffffffff98da3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (28658): [<ffffffff98da3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (28688): [<ffffffff98d17980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (28701): [<ffffffff98d17bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# ------------[ cut here ]------------
+# i915 0000:00:02.0: [drm] drm_WARN_ON((val & (1 << 30)) == 0)
+# WARNING: CPU: 1 PID: 472 at drivers/gpu/drm/i915/display/intel_cdclk.c:944 skl_get_cdclk+0x1ca/0x360
+# Modules linked in:
+# CPU: 1 UID: 0 PID: 472 Comm: device_reset Tainted: G W 6.13.0-rc2-ge95c88d68ac3 #1
+# Tainted: [W]=WARN
+# Hardware name: HP Nami/Nami, BIOS 09/19/2019
+# RIP: 0010:skl_get_cdclk+0x1ca/0x360
+# Code: 67 50 4d 85 e4 0f 84 89 01 00 00 e8 e0 16 13 00 48 c7 c1 a0 71 5f 9a 4c 89 e2 48 c7 c7 67 56 69 9a 48 89 c6 e8 67 a1 72 ff 90 <0f> 0b 90 90 8b 43 08 8b 53 04 89 43 0c 89 03 85 d2 0f 85 26 ff ff
+# RSP: 0018:ffffacba40f5fb98 EFLAGS: 00010286
+# RAX: 0000000000000000 RBX: ffffacba40f5fbbc RCX: 00000000ffffdfff
+# RDX: 0000000000000000 RSI: ffffacba40f5fa48 RDI: 0000000000000001
+# RBP: ffff953182570d68 R08: 0000000000009ffb R09: 00000000ffffdfff
+# R10: 00000000ffffdfff R11: ffffffff9a88c8a0 R12: ffff953180d3f4a0
+# R13: ffff953182573208 R14: ffff953182570d68 R15: 0000000000000000
+# FS: 00007f74989c46c0(0000) GS:ffff9534eec40000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f2b6598ee00 CR3: 0000000102302001 CR4: 00000000003706f0
+# DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+# DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+# Call Trace:
+# <TASK>
+# gen9_disable_dc_states+0x86/0x300
+# intel_power_well_enable+0x56/0x70
+# __intel_display_power_get_domain.part.0+0x4d/0x70
+# intel_power_domains_init_hw+0x95/0x770
+# intel_display_driver_probe_noirq+0x85/0x230
+# i915_driver_probe+0x66d/0xc60
+# local_pci_probe+0x3a/0x90
+# pci_device_probe+0xb5/0x180
+# really_probe+0xc9/0x2b0
+# __driver_probe_device+0x6e/0x110
+# device_driver_attach+0x42/0xa0
+# bind_store+0x71/0xc0
+# kernfs_fop_write_iter+0x121/0x1c0
+# vfs_write+0x2a8/0x530
+# ksys_write+0x6f/0xf0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f7499e662c0
+# Code: 40 00 48 8b 15 41 9b 0d 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 80 3d 21 23 0e 00 00 74 17 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 58 c3 0f 1f 80 00 00 00 00 48 83 ec 28 48 89
+# RSP: 002b:00007ffffaf36a68 EFLAGS: 00000202 ORIG_RAX: 0000000000000001
+# RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f7499e662c0
+# RDX: 000000000000000c RSI: 00007ffffaf36b50 RDI: 0000000000000003
+# RBP: 000000000000000c R08: 0000000000000007 R09: 000055b37d0964a0
+# R10: 0000000000000000 R11: 0000000000000202 R12: 00007ffffaf36b50
+# R13: 0000000000000003 R14: 000055b3526a7220 R15: 00007f749a0dc020
+# </TASK>
+# irq event stamp: 29605
+# hardirqs last enabled at (29617): [<ffffffff98da3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (29634): [<ffffffff98da3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (29630): [<ffffffff98d17980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (29625): [<ffffffff98d17bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
index 461ef69ef08a..93d42b146df9 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -1,64 +1,24 @@
-api_intel_allocator@fork-simple-stress-signal,Timeout
-api_intel_allocator@open-vm,Timeout
-api_intel_allocator@simple-allocator,Timeout
-api_intel_bb@lot-of-buffers,Timeout
-api_intel_bb@object-reloc-keep-cache,Timeout
-api_intel_bb@offset-control,Timeout
-api_intel_bb@render-ccs,Timeout
-api_intel_bb@reset-bb,Timeout
-core_auth@basic-auth,Timeout
-core_hotunplug@hotrebind,Timeout
-core_setmaster@master-drop-set-user,Fail
-debugfs_test@read_all_entries_display_on,Timeout
-drm_read@empty-block,Timeout
-dumb_buffer@create-clear,Timeout
-dumb_buffer@invalid-bpp,Timeout
-gen3_render_tiledx_blits,Timeout
-gen7_exec_parse@basic-allocation,Timeout
-gen9_exec_parse@batch-invalid-length,Timeout
-gen9_exec_parse@bb-secure,Timeout
-gen9_exec_parse@secure-batches,Timeout
-gen9_exec_parse@shadow-peek,Timeout
-gen9_exec_parse@unaligned-jump,Timeout
-i915_getparams_basic@basic-subslice-total,Timeout
-i915_hangman@gt-engine-hang,Timeout
+api_intel_allocator@reopen,Timeout
+api_intel_bb@destroy-bb,Timeout
+core_hotunplug@hotrebind-lateclose,Timeout
+drm_read@short-buffer-block,Timeout
+dumb_buffer@map-valid,Timeout
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-i915_pciid,Timeout
-i915_pipe_stress@stress-xrgb8888-ytiled,Timeout
-i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rps@engine-order,Timeout
-i915_pm_rps@thresholds-idle-park,Timeout
-i915_query@engine-info,Timeout
-i915_query@query-topology-kernel-writes,Timeout
-i915_query@test-query-geometry-subslices,Timeout
kms_lease@lease-uevent,Fail
kms_rotation_crc@multiplane-rotation,Fail
perf@i915-ref-count,Fail
perf_pmu@busy,Timeout
perf_pmu@enable-race,Timeout
-perf_pmu@event-wait,Timeout
-perf_pmu@faulting-read,Timeout
-perf_pmu@gt-awake,Timeout
-perf_pmu@interrupts,Timeout
perf_pmu@module-unload,Fail
-perf_pmu@most-busy-idle-check-all,Timeout
perf_pmu@rc6,Crash
-perf_pmu@render-node-busy-idle,Fail
perf_pmu@semaphore-wait-idle,Timeout
-prime_busy@after,Timeout
-prime_mmap@test_aperture_limit,Timeout
-prime_mmap@test_map_unmap,Timeout
prime_mmap@test_refcounting,Timeout
-prime_self_import@basic-with_one_bo,Timeout
-sriov_basic@enable-vfs-autoprobe-off,Timeout
-syncobj_basic@bad-destroy,Timeout
-syncobj_basic@bad-flags-fd-to-handle,Timeout
-syncobj_basic@create-signaled,Timeout
-syncobj_eventfd@invalid-bad-pad,Timeout
-syncobj_eventfd@timeline-wait-before-signal,Timeout
+sriov_basic@enable-vfs-bind-unbind-each-numvfs-all,Timeout
+syncobj_basic@illegal-fd-to-handle,Timeout
syncobj_wait@invalid-multi-wait-unsubmitted-signaled,Timeout
syncobj_wait@invalid-signal-illegal-handle,Timeout
syncobj_wait@invalid-single-wait-all-unsubmitted,Timeout
@@ -66,7 +26,6 @@ syncobj_wait@multi-wait-all-submitted,Timeout
syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout
syncobj_wait@wait-any-complex,Timeout
syncobj_wait@wait-delayed-signal,Timeout
-template@A,Timeout
xe_module_load@force-load,Fail
xe_module_load@load,Fail
xe_module_load@many-reload,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
index b47df5855e8d..938377896841 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
@@ -35,3 +35,16 @@ syncobj_wait.*
# Kernel panic and test hangs with multiple kms tests
kms_.*
+
+# The test alternates between failing and timing out on reruns, causing the pipeline to fail
+api_intel_allocator@reopen
+api_intel_bb@destroy-bb
+core_hotunplug@hotrebind-lateclose
+dumb_buffer@map-valid
+i915_pm_rps@engine-order
+perf_pmu@busy
+perf_pmu@enable-race
+perf_pmu@semaphore-wait-idle
+prime_mmap@test_refcounting
+sriov_basic@enable-vfs-bind-unbind-each-numvfs-all
+syncobj_basic@illegal-fd-to-handle
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
index 0ce240e3aa07..1cb6978c86dc 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -1,5 +1,4 @@
core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -7,12 +6,10 @@ i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-i915_pm_rps@engine-order,Fail
-kms_big_fb@linear-16bpp-rotate-180,Timeout
-kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
+kms_async_flips@test-time-stamp,Timeout
+kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
kms_dirtyfb@default-dirtyfb-ioctl,Fail
kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
-kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -34,18 +31,12 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
-kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack-mmap-gtt,Timeout
kms_lease@lease-uevent,Fail
+kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
-kms_pm_rpm@modeset-stress-extra-wait,Timeout
-kms_pm_rpm@universal-planes,Timeout
-kms_pm_rpm@universal-planes-dpms,Timeout
-kms_rotation_crc@primary-rotation-180,Timeout
-kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
index 60b8d1c64e70..19a289617080 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
@@ -4,3 +4,10 @@
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_pm_rpm@modeset-lpsp-stress
+
+# Board Name: dell-latitude-5400-8665U-sarien
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13351
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+gen9_exec_parse@unaligned-access
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index 8e0efc80d510..4f176c04ec4e 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -1,8 +1,5 @@
-fbdev@eof,Fail
-fbdev@read,Fail
kms_3d,Fail
kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
-kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
kms_bw@connected-linear-tiling-2-displays-1920x1080p,Fail
@@ -22,6 +19,6 @@ kms_cursor_legacy@cursor-vs-flip-atomic,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
-kms_lease@lease-uevent,Fail
-kms_rmfb@close-fd,Fail
+kms_flip@flip-vs-suspend,Fail
kms_flip@flip-vs-suspend-interruptible,Fail
+kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
index 2e5bf6ae25f2..2956567c3048 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
@@ -39,3 +39,10 @@ kms_cursor_legacy@cursor-vs-flip-atomic-transitions
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_prop_blob@invalid-set-prop
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/dri-devel/c0b4cded-0971-4abb-9965-2acf4602d7cd@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_bw@connected-linear-tiling-1-displays-2160x1440p
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
index 8198e06344a3..d0db51874aef 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
@@ -15,3 +15,8 @@ tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
index 845f852bb4a0..5a063361d7f2 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -1,16 +1,12 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster@master-drop-set-user,Fail
dumb_buffer@create-clear,Crash
-fbdev@eof,Fail
-fbdev@pan,Fail
-fbdev@read,Fail
-fbdev@unaligned-read,Fail
kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
-kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-3840x2160p,Fail
-kms_color@invalid-gamma-lut-sizes,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
kms_lease@lease-uevent,Fail
-kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
index 8198e06344a3..d0db51874aef 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
@@ -15,3 +15,8 @@ tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 066d24ee3e08..7752adff05c1 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -1,4 +1,7 @@
kms_3d,Fail
+kms_cursor_legacy@forked-bo,Fail
+kms_cursor_legacy@forked-move,Fail
+kms_cursor_legacy@single-bo,Fail
kms_cursor_legacy@torture-bo,Fail
kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
index 6dbc2080347d..e4a8f8352cd6 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
@@ -15,4 +15,3 @@ kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
index d74e04405e65..8910afb6acf2 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
@@ -18,3 +18,17 @@ msm/msm_shrink@copy-gpu-oom-32
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
msm/msm_shrink@copy-gpu-oom-8
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/68
+# Failure Rate: 40
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_lease@page-flip-implicit-plane
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/69
+# Failure Rate: 20
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_plane@plane-position-hole-dpms
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
index c2833eee1c4b..478d7c161616 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -23,3 +23,8 @@ kms_flip@2x-wf_vblank-ts-check
# Hangs the machine
kms_cursor_crc@cursor-random-max-size
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
index 6dbc2080347d..e4a8f8352cd6 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
@@ -15,4 +15,3 @@ kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
index 7c69c1f1d55b..ef9318afcd89 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -17,3 +17,8 @@ tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
index fa8c7e663858..7a2ab58b706f 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
@@ -1,4 +1,3 @@
-drm_read@invalid-buffer,Fail
kms_color@ctm-0-25,Fail
kms_color@ctm-0-50,Fail
kms_color@ctm-0-75,Fail
@@ -28,4 +27,3 @@ kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
kms_plane_cursor@overlay,Fail
kms_plane_cursor@viewport,Fail
-kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
index 94783cafc21a..2ce7f7e23a01 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -30,3 +30,8 @@ kms_cursor_crc.*
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc2
kms_content_protection@uevent
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt
index 4892c0c70a6d..8d26b23133aa 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt
@@ -7,9 +7,4 @@ kms_cursor_legacy@torture-bo,Fail
kms_cursor_legacy@torture-move,Fail
kms_hdmi_inject@inject-4k,Fail
kms_lease@lease-uevent,Fail
-kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@alpha-basic,Fail
-kms_plane_alpha_blend@alpha-opaque-fb,Fail
-kms_plane_alpha_blend@alpha-transparent-fb,Fail
-kms_plane_alpha_blend@constant-alpha-max,Fail
msm/msm_recovery@gpu-fault-parallel,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
index 90282dfa19f4..ba9160d4d8eb 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -1,24 +1,10 @@
+core_setmaster@master-drop-set-root,Crash
+core_setmaster@master-drop-set-shared-fd,Crash
core_setmaster@master-drop-set-user,Crash
+core_setmaster_vs_auth,Crash
dumb_buffer@create-clear,Crash
fbdev@pan,Crash
-kms_bw@linear-tiling-2-displays-1920x1080p,Fail
-kms_cursor_crc@cursor-onscreen-32x10,Crash
-kms_cursor_crc@cursor-onscreen-32x32,Crash
-kms_cursor_crc@cursor-onscreen-64x64,Crash
-kms_cursor_crc@cursor-random-32x10,Crash
-kms_cursor_crc@cursor-sliding-32x10,Crash
-kms_cursor_crc@cursor-sliding-32x32,Crash
-kms_cursor_crc@cursor-sliding-64x21,Crash
-kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
-kms_cursor_legacy@cursor-vs-flip-legacy,Fail
-kms_cursor_legacy@flip-vs-cursor-crc-atomic,Crash
-kms_flip@flip-vs-panning-vs-hang,Crash
-kms_invalid_mode@int-max-clock,Crash
-kms_lease@invalid-create-leases,Fail
-kms_pipe_crc_basic@read-crc-frame-sequence,Crash
-kms_plane@pixel-format,Crash
-kms_plane@pixel-format-source-clamping,Crash
+kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
+kms_flip@flip-vs-modeset-vs-hang,Crash
kms_prop_blob@invalid-set-prop,Crash
-kms_properties@get_properties-sanity-atomic,Crash
-kms_properties@get_properties-sanity-non-atomic,Crash
-kms_rmfb@close-fd,Crash
+kms_prop_blob@invalid-set-prop-any,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
index 83a38853b4af..2803d0d80192 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -2,6 +2,7 @@ dumb_buffer@create-clear,Crash
kms_atomic_transition@modeset-transition,Fail
kms_atomic_transition@modeset-transition-fencing,Fail
kms_atomic_transition@plane-toggle-modeset-transition,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_color@gamma,Fail
kms_color@legacy-gamma,Fail
kms_cursor_crc@cursor-alpha-opaque,Fail
@@ -24,6 +25,7 @@ kms_cursor_crc@cursor-rapid-movement-32x32,Fail
kms_cursor_crc@cursor-rapid-movement-64x21,Fail
kms_cursor_crc@cursor-rapid-movement-64x64,Fail
kms_cursor_crc@cursor-size-change,Fail
+kms_cursor_crc@cursor-size-hints,Fail
kms_cursor_crc@cursor-sliding-32x10,Fail
kms_cursor_crc@cursor-sliding-32x32,Fail
kms_cursor_crc@cursor-sliding-64x21,Fail
@@ -32,6 +34,7 @@ kms_cursor_edge_walk@64x64-left-edge,Fail
kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
kms_cursor_legacy@cursor-vs-flip-toggle,Fail
kms_cursor_legacy@flip-vs-cursor-atomic,Fail
@@ -41,14 +44,11 @@ kms_cursor_legacy@flip-vs-cursor-legacy,Fail
kms_cursor_legacy@long-nonblocking-modeset-vs-cursor-atomic,Fail
kms_flip@basic-flip-vs-wf_vblank,Fail
kms_flip@blocking-wf_vblank,Fail
-kms_flip@flip-vs-absolute-wf_vblank,Fail
-kms_flip@flip-vs-blocking-wf-vblank,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning,Fail
kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@modeset-vs-vblank-race,Fail
-kms_flip@modeset-vs-vblank-race-interruptible,Fail
kms_flip@plain-flip-fb-recreate,Fail
kms_flip@plain-flip-fb-recreate-interruptible,Fail
kms_flip@plain-flip-ts-check,Fail
@@ -64,14 +64,11 @@ kms_pipe_crc_basic@nonblocking-crc,Fail
kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
kms_pipe_crc_basic@read-crc,Fail
kms_pipe_crc_basic@read-crc-frame-sequence,Fail
-kms_plane@pixel-format,Crash
-kms_plane@pixel-format-source-clamping,Crash
+kms_plane@pixel-format,Fail
+kms_plane@pixel-format-source-clamping,Fail
kms_plane@plane-panning-bottom-right,Fail
kms_plane@plane-panning-top-left,Fail
kms_plane@plane-position-covered,Fail
kms_plane@plane-position-hole,Fail
-kms_plane@plane-position-hole-dpms,Fail
kms_plane_cursor@primary,Fail
-kms_plane_multiple@tiling-none,Fail
-kms_rmfb@close-fd,Fail
kms_universal_plane@universal-plane-functional,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
index 56f7d4f1ed15..348b4ce7eb4b 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -74,3 +74,59 @@ kms_bw@linear-tiling-2-displays-2160x1440p
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc5
kms_flip@flip-vs-expired-vblank
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/f944dd08-c88c-49ae-aff0-274374550a93@collabora.com/T/#u
+# Failure Rate: 40
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_bw@linear-tiling-1-displays-2160x1440p
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/afa2d3bf-29f2-488d-8cc9-f30d461444b0@collabora.com/T/#u
+# Failure Rate: 80
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_plane_multiple@tiling-none
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/6fdaa97f-c1a5-4216-831f-dbb7c5f90498@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_bw@linear-tiling-1-displays-1920x1080p
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/616aa015-9574-4527-9d07-d8d698bbcc3c@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_plane@plane-position-hole-dpms
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/7a1b888f-d7db-4ed7-96cd-3975ace837fb@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@flip-vs-absolute-wf_vblank
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/f17fffb6-abc4-464e-8465-395311b01f6a@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@flip-vs-blocking-wf-vblank
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/9b590b26-1bf9-4951-b6a3-ef6c67e6a1c6@collabora.com/T/#u
+# Failure Rate: 60
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_bw@linear-tiling-2-displays-1920x1080p
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/dri-devel/059545fa-65b1-4f5c-a13e-4d2898679f51@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.29-g33adea9eb
+# Linux Version: 6.13.0-rc2
+kms_flip@modeset-vs-vblank-race-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
index eb16b29dee48..e8e994d92557 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
@@ -18,3 +18,8 @@ tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
index 9c9e048725f8..adbcdd0f28d2 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
@@ -23,3 +23,8 @@ tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
index 71c02104a683..6ebcc7d89fbd 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
@@ -1,12 +1,5 @@
-kms_cursor_crc@cursor-rapid-movement-128x128,Fail
-kms_cursor_crc@cursor-rapid-movement-128x42,Fail
-kms_cursor_crc@cursor-rapid-movement-256x256,Fail
kms_cursor_crc@cursor-rapid-movement-256x85,Fail
kms_cursor_crc@cursor-rapid-movement-32x10,Fail
-kms_cursor_crc@cursor-rapid-movement-32x32,Fail
-kms_cursor_crc@cursor-rapid-movement-512x170,Fail
-kms_cursor_crc@cursor-rapid-movement-512x512,Fail
-kms_cursor_crc@cursor-rapid-movement-64x21,Fail
kms_cursor_crc@cursor-rapid-movement-64x64,Fail
kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
@@ -20,8 +13,9 @@ kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
kms_cursor_legacy@flip-vs-cursor-legacy,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
+kms_flip@flip-vs-suspend,Fail
+kms_flip@flip-vs-suspend-interruptible,Fail
kms_lease@lease-uevent,Fail
-kms_pipe_crc_basic@nonblocking-crc,Fail
kms_writeback@writeback-check-output,Fail
kms_writeback@writeback-check-output-XRGB2101010,Fail
kms_writeback@writeback-fb-id,Fail
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
index b3d16e82e9a2..319789806271 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
@@ -1,8 +1,9 @@
# keeps printing vkms_vblank_simulate: vblank timer overrun and never ends
kms_invalid_mode@int-max-clock
-# Kernel panic
-kms_cursor_crc@cursor-rapid-movement-32x10
+# kernel panic seen with kms_cursor_crc tests
+kms_cursor_crc.*
+# kms_cursor_crc@cursor-rapid-movement-32x10
# Oops: 0000 [#1] PREEMPT SMP NOPTI
# CPU: 0 PID: 2635 Comm: kworker/u8:13 Not tainted 6.9.0-rc7-g40935263a1fd #1
# Hardware name: ChromiumOS crosvm, BIOS 0
@@ -52,7 +53,7 @@ kms_cursor_crc@cursor-rapid-movement-32x10
# FS: 0000000000000000(0000) GS:ffffa2ad2bc00000(0000) knlGS:0000000000000000
# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
-kms_cursor_crc@cursor-rapid-movement-256x85
+#kms_cursor_crc@cursor-rapid-movement-256x85
# [drm:drm_crtc_add_crc_entry] *ERROR* Overflow of CRC buffer, userspace reads too slow.
# Oops: 0000 [#1] PREEMPT SMP NOPTI
# CPU: 1 PID: 10 Comm: kworker/u8:0 Not tainted 6.9.0-rc7-g646381cde463 #1
@@ -104,7 +105,7 @@ kms_cursor_crc@cursor-rapid-movement-256x85
# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
# CR2: 0000000000000078 CR3: 0000000109b38000 CR4: 0000000000350ef0
-kms_cursor_crc@cursor-onscreen-256x256
+#kms_cursor_crc@cursor-onscreen-256x256
# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
# CPU: 1 PID: 1913 Comm: kworker/u8:6 Not tainted 6.10.0-rc5-g8a28e73ebead #1
# Hardware name: ChromiumOS crosvm, BIOS 0
@@ -258,6 +259,535 @@ kms_cursor_edge_walk@128x128-left-edge
# CR2: 0000000000000018 CR3: 0000000101710000 CR4: 0000000000350ef0
# vkms_vblank_simulate: vblank timer overrun
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-64x64
+# ------------[ cut here ]------------
+# WARNING: CPU: 1 PID: 1250 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 1 UID: 0 PID: 1250 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-ge95c88d68ac3 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf ee ec 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 95 ec 48 89 df 5b e9 50 05 95 ec 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffff9fb640aafb08 EFLAGS: 00010202
+# RAX: ffff8e7240859e05 RBX: ffff8e7241cd6400 RCX: ffffffffae496b65
+# RDX: ffffffffad2d1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000047dd15a5 R11: 00000000547dd15a R12: ffff8e72590cc000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f0942ad56c0(0000) GS:ffff8e726bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f0942ad0008 CR3: 0000000118d1e000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f0943a84cdb
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007fff267d68d0 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 000000000000005a RCX: 00007f0943a84cdb
+# RDX: 00007fff267d6960 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007fff267d6960 R08: 0000000000000007 R09: 00005619a60f3450
+# R10: fe95a83851609dee R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 00005619a3cd2c68 R15: 00005619a608c830
+# </TASK>
+# irq event stamp: 57793
+# hardirqs last enabled at (57799): [<ffffffffacba3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (57804): [<ffffffffacba3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (45586): [<ffffffffacb17980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (45569): [<ffffffffacb17bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 0 UID: 0 PID: 119 Comm: kworker/u8:6 Tainted: G W 6.13.0-rc2-ge95c88d68ac3 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffff9fb640efbd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff8e7241926000 RSI: ffff8e7241927ffc RDI: 000000000b7fb767
+# RBP: ffff9fb640efbde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff8e7241b09a80 R11: 0000000000000000 R12: ffff8e7241cd6200
+# R13: 0000000000000013 R14: 0000000000000000 R15: ffff8e7241b09a80
+# FS: 0000000000000000(0000) GS:ffff8e726bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000118d1e000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? srso_return_thunk+0x5/0x5f
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffff9fb640efbd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff8e7241926000 RSI: ffff8e7241927ffc RDI: 000000000b7fb767
+# RBP: ffff9fb640efbde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff8e7241b09a80 R11: 0000000000000000 R12: ffff8e7241cd6200
+# R13: 0000000000000013 R14: 0000000000000000 R15: ffff8e7241b09a80
+# FS: 0000000000000000(0000) GS:ffff8e726bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000118d1e000 CR4: 0000000000350ef0
+
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-128x42
+# ------------[ cut here ]------------
+# WARNING: CPU: 0 PID: 2933 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 0 UID: 0 PID: 2933 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-g5219242748c8 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf 6e d0 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 15 d0 48 89 df 5b e9 50 05 15 d0 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffffa14cc08b3b08 EFLAGS: 00010202
+# RAX: ffff9b864084b605 RBX: ffff9b8641ba4600 RCX: ffffffff91c96b65
+# RDX: ffffffff90ad1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000047dd15a5 R11: 00000000547dd15a R12: ffff9b864099c000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f4f437ab6c0(0000) GS:ffff9b866bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f4f44c3cd40 CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f4f44960c5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffcdfb0b560 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000060 RCX: 00007f4f44960c5b
+# RDX: 00007ffcdfb0b5f0 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007ffcdfb0b5f0 R08: 0000000000000007 R09: 000055c3a5801a30
+# R10: 3107764f00e1f281 R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 000055c38b7e42c8 R15: 000055c3a579aab0
+# </TASK>
+# irq event stamp: 58747
+# hardirqs last enabled at (58753): [<ffffffff903a3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (58758): [<ffffffff903a3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (47324): [<ffffffff90317980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (47307): [<ffffffff90317bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 UID: 0 PID: 11 Comm: kworker/u8:0 Tainted: G W 6.13.0-rc2-g5219242748c8 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? __kvmalloc_node_noprof+0x3e/0xc0
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-32x32
+# ------------[ cut here ]------------
+# WARNING: CPU: 0 PID: 2933 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 0 UID: 0 PID: 2933 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-g5219242748c8 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf 6e d0 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 15 d0 48 89 df 5b e9 50 05 15 d0 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffffa14cc08b3b08 EFLAGS: 00010202
+# RAX: ffff9b864084b605 RBX: ffff9b8641ba4600 RCX: ffffffff91c96b65
+# RDX: ffffffff90ad1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000047dd15a5 R11: 00000000547dd15a R12: ffff9b864099c000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f4f437ab6c0(0000) GS:ffff9b866bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f4f44c3cd40 CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f4f44960c5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffcdfb0b560 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000060 RCX: 00007f4f44960c5b
+# RDX: 00007ffcdfb0b5f0 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007ffcdfb0b5f0 R08: 0000000000000007 R09: 000055c3a5801a30
+# R10: 3107764f00e1f281 R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 000055c38b7e42c8 R15: 000055c3a579aab0
+# </TASK>
+# irq event stamp: 58747
+# hardirqs last enabled at (58753): [<ffffffff903a3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (58758): [<ffffffff903a3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (47324): [<ffffffff90317980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (47307): [<ffffffff90317bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 UID: 0 PID: 11 Comm: kworker/u8:0 Tainted: G W 6.13.0-rc2-g5219242748c8 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? __kvmalloc_node_noprof+0x3e/0xc0
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-32x32
+# ------------[ cut here ]------------
+# WARNING: CPU: 0 PID: 2933 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 0 UID: 0 PID: 2933 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-g5219242748c8 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf 6e d0 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 15 d0 48 89 df 5b e9 50 05 15 d0 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffffa14cc08b3b08 EFLAGS: 00010202
+# RAX: ffff9b864084b605 RBX: ffff9b8641ba4600 RCX: ffffffff91c96b65
+# RDX: ffffffff90ad1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000047dd15a5 R11: 00000000547dd15a R12: ffff9b864099c000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f4f437ab6c0(0000) GS:ffff9b866bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 00007f4f44c3cd40 CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f4f44960c5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffcdfb0b560 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 0000000000000060 RCX: 00007f4f44960c5b
+# RDX: 00007ffcdfb0b5f0 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007ffcdfb0b5f0 R08: 0000000000000007 R09: 000055c3a5801a30
+# R10: 3107764f00e1f281 R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 000055c38b7e42c8 R15: 000055c3a579aab0
+# </TASK>
+# irq event stamp: 58747
+# hardirqs last enabled at (58753): [<ffffffff903a3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (58758): [<ffffffff903a3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (47324): [<ffffffff90317980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (47307): [<ffffffff90317bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 UID: 0 PID: 11 Comm: kworker/u8:0 Tainted: G W 6.13.0-rc2-g5219242748c8 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? __kvmalloc_node_noprof+0x3e/0xc0
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa14cc005fd20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff9b8669626000 RSI: ffff9b8669627ffc RDI: 00000000348d6c39
+# RBP: ffffa14cc005fde0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff9b8645650eb0 R11: 0000000000000000 R12: ffff9b8641ba5800
+# R13: 0000000000000028 R14: 0000000000000000 R15: ffff9b8645650eb0
+# FS: 0000000000000000(0000) GS:ffff9b866bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000108c14000 CR4: 0000000000350ef0
+
+# DEBUG - Begin test kms_cursor_crc@cursor-rapid-movement-128x128
+# ------------[ cut here ]------------
+# WARNING: CPU: 0 PID: 2898 at drivers/gpu/drm/vkms/vkms_crtc.c:139 vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Modules linked in: vkms
+# CPU: 0 UID: 0 PID: 2898 Comm: kms_cursor_crc Not tainted 6.13.0-rc2-g1f006005ebf8 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# RIP: 0010:vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# Code: f7 48 89 f3 e8 d0 bf 6e e1 48 8b 83 50 01 00 00 a8 01 75 15 48 8b bb a0 01 00 00 e8 59 05 15 e1 48 89 df 5b e9 50 05 15 e1 90 <0f> 0b 90 eb e5 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90
+# RSP: 0018:ffffa35c4082fb08 EFLAGS: 00010202
+# RAX: ffff8b02c1c0f005 RBX: ffff8b02d4509600 RCX: ffffffffa2c96b65
+# RDX: ffffffffa1ad1f80 RSI: 0000000000000000 RDI: 0000000000000000
+# RBP: 0000000000000000 R08: 0000000000000034 R09: 0000000000000002
+# R10: 0000000030f11ddf R11: 00000000f30f11dd R12: ffff8b02d2528000
+# R13: 0000000000000000 R14: 00000000ffffffff R15: 0000000000000000
+# FS: 00007f4b071ab6c0(0000) GS:ffff8b02efc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000055f7f8384b48 CR3: 0000000104ef0000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __warn+0x8c/0x190
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? report_bug+0x164/0x190
+# ? handle_bug+0x54/0x90
+# ? exc_invalid_op+0x17/0x70
+# ? asm_exc_invalid_op+0x1a/0x20
+# ? __pfx_drm_property_free_blob+0x10/0x10
+# ? vkms_atomic_crtc_destroy_state+0x31/0x40 [vkms]
+# ? vkms_atomic_crtc_destroy_state+0x10/0x40 [vkms]
+# drm_atomic_state_default_clear+0x137/0x2f0
+# __drm_atomic_state_free+0x6c/0xb0
+# drm_atomic_helper_update_plane+0x100/0x150
+# drm_mode_cursor_universal+0x10e/0x270
+# drm_mode_cursor_common+0x115/0x240
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# drm_mode_cursor_ioctl+0x4a/0x70
+# drm_ioctl_kernel+0xb0/0x110
+# drm_ioctl+0x235/0x4b0
+# ? __pfx_drm_mode_cursor_ioctl+0x10/0x10
+# __x64_sys_ioctl+0x92/0xc0
+# do_syscall_64+0xbb/0x1d0
+# entry_SYSCALL_64_after_hwframe+0x77/0x7f
+# RIP: 0033:0x7f4b0815ac5b
+# Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1c 48 8b 44 24 18 64 48 2b 04 25 28 00 00
+# RSP: 002b:00007ffd726f75e0 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+# RAX: ffffffffffffffda RBX: 000000000000005d RCX: 00007f4b0815ac5b
+# RDX: 00007ffd726f7670 RSI: 00000000c01c64a3 RDI: 0000000000000005
+# RBP: 00007ffd726f7670 R08: 0000000000000007 R09: 000055f7f83e30c0
+# R10: 2b16893c03bd381a R11: 0000000000000246 R12: 00000000c01c64a3
+# R13: 0000000000000005 R14: 000055f7dbee72c8 R15: 000055f7f837bab0
+# </TASK>
+# irq event stamp: 58921
+# hardirqs last enabled at (58927): [<ffffffffa13a3e4d>] __up_console_sem+0x4d/0x60
+# hardirqs last disabled at (58932): [<ffffffffa13a3e32>] __up_console_sem+0x32/0x60
+# softirqs last enabled at (46140): [<ffffffffa1317980>] handle_softirqs+0x310/0x3f0
+# softirqs last disabled at (46135): [<ffffffffa1317bc1>] __irq_exit_rcu+0xa1/0xc0
+# ---[ end trace 0000000000000000 ]---
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 UID: 0 PID: 1657 Comm: kworker/u8:14 Tainted: G W 6.13.0-rc2-g1f006005ebf8 #1
+# Tainted: [W]=WARN
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa35c40c43d20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff8b02c52c8000 RSI: ffff8b02c52c9ffc RDI: 00000000fa4761c9
+# RBP: ffffa35c40c43de0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff8b02c1c87840 R11: 0000000000000000 R12: ffff8b02d4508800
+# R13: 0000000000000021 R14: 0000000000000000 R15: ffff8b02c1c87840
+# FS: 0000000000000000(0000) GS:ffff8b02efd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 000000000000001c CR3: 0000000104ef0000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? srso_return_thunk+0x5/0x5f
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x1a3/0x760 [vkms]
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x320
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 000000000000001c
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x1a3/0x760 [vkms]
+# Code: db 4d 89 fa 85 c9 0f 84 32 03 00 00 4d 8b 24 da 48 c7 44 24 60 00 00 00 00 48 c7 44 24 68 00 00 00 00 49 8b 84 24 48 01 00 00 <8b> 50 1c 44 39 ea 0f 8f f3 02 00 00 44 39 68 24 0f 8e e9 02 00 00
+# RSP: 0018:ffffa35c40c43d20 EFLAGS: 00010202
+# RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000002
+# RDX: ffff8b02c52c8000 RSI: ffff8b02c52c9ffc RDI: 00000000fa4761c9
+# RBP: ffffa35c40c43de0 R08: 0000000000000000 R09: 0000000000000000
+# R10: ffff8b02c1c87840 R11: 0000000000000000 R12: ffff8b02d4508800
+# R13: 0000000000000021 R14: 0000000000000000 R15: ffff8b02c1c87840
+# FS: 0000000000000000(0000) GS:ffff8b02efd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+
# Skip driver specific tests
^amdgpu.*
^msm.*
@@ -272,3 +802,8 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+
+# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
+# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
+kms_display_modes@extended-mode-basic
+kms_display_modes@mst-extended-mode-negative
diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c
index 379850c83e51..d239f1e3c456 100644
--- a/drivers/gpu/drm/clients/drm_log.c
+++ b/drivers/gpu/drm/clients/drm_log.c
@@ -323,7 +323,7 @@ static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_l
{
struct drm_log *dlog = client_to_drm_log(client);
- console_stop(&dlog->con);
+ console_suspend(&dlog->con);
return 0;
}
@@ -332,7 +332,7 @@ static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lo
{
struct drm_log *dlog = client_to_drm_log(client);
- console_start(&dlog->con);
+ console_resume(&dlog->con);
return 0;
}
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 56f977bbe62d..30c736fc0067 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -327,7 +327,7 @@ static int drm_bridge_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
drm_bridge_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
index c491e3203bf1..4c350c7f5144 100644
--- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
@@ -486,16 +486,16 @@ EXPORT_SYMBOL(drm_lspcon_get_mode);
* @dev: &drm_device to use
* @adapter: I2C-over-aux adapter
* @mode: required mode of operation
+ * @time_out: LSPCON mode change settle timeout
*
* Returns:
* 0 on success, -error on failure/timeout
*/
int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
- enum drm_lspcon_mode mode)
+ enum drm_lspcon_mode mode, int time_out)
{
u8 data = 0;
int ret;
- int time_out = 200;
enum drm_lspcon_mode current_mode;
if (mode == DRM_LSPCON_MODE_PCON)
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 61c7c2c588c6..dbce1c3f4969 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -459,6 +459,64 @@ void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_lttpr_link_train_channel_eq_delay);
+/**
+ * drm_dp_lttpr_wake_timeout_setup() - Grant extended time for sink to wake up
+ * @aux: The DP AUX channel to use
+ * @transparent_mode: This is true if lttpr is in transparent mode
+ *
+ * This function checks if the sink needs any extended wake time, if it does
+ * it grants this request. Post this setup the source device can keep trying
+ * the Aux transaction till the granted wake timeout.
+ * If this function is not called all Aux transactions are expected to take
+ * a default of 1ms before they throw an error.
+ */
+void drm_dp_lttpr_wake_timeout_setup(struct drm_dp_aux *aux, bool transparent_mode)
+{
+ u8 val = 1;
+ int ret;
+
+ if (transparent_mode) {
+ static const u8 timeout_mapping[] = {
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_1_MS] = 1,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_20_MS] = 20,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_40_MS] = 40,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_60_MS] = 60,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_80_MS] = 80,
+ [DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_100_MS] = 100,
+ };
+
+ ret = drm_dp_dpcd_readb(aux, DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST, &val);
+ if (ret != 1) {
+ drm_dbg_kms(aux->drm_dev,
+ "Failed to read Extended sleep wake timeout request\n");
+ return;
+ }
+
+ val = (val < sizeof(timeout_mapping) && timeout_mapping[val]) ?
+ timeout_mapping[val] : 1;
+
+ if (val > 1)
+ drm_dp_dpcd_writeb(aux,
+ DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_GRANT,
+ DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_GRANTED);
+ } else {
+ ret = drm_dp_dpcd_readb(aux, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &val);
+ if (ret != 1) {
+ drm_dbg_kms(aux->drm_dev,
+ "Failed to read Extended sleep wake timeout request\n");
+ return;
+ }
+
+ val = (val & DP_EXTENDED_WAKE_TIMEOUT_REQUEST_MASK) ?
+ (val & DP_EXTENDED_WAKE_TIMEOUT_REQUEST_MASK) * 10 : 1;
+
+ if (val > 1)
+ drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT,
+ DP_EXTENDED_WAKE_TIMEOUT_GRANT);
+ }
+}
+EXPORT_SYMBOL(drm_dp_lttpr_wake_timeout_setup);
+
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
switch (link_rate) {
@@ -2818,6 +2876,67 @@ int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE])
EXPORT_SYMBOL(drm_dp_lttpr_max_link_rate);
/**
+ * drm_dp_lttpr_set_transparent_mode() - set the LTTPR in transparent mode
+ * @aux: DisplayPort AUX channel
+ * @enable: Enable or disable transparent mode
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_lttpr_set_transparent_mode(struct drm_dp_aux *aux, bool enable)
+{
+ u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
+ DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
+ int ret = drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE, val);
+
+ if (ret < 0)
+ return ret;
+
+ return (ret == 1) ? 0 : -EIO;
+}
+EXPORT_SYMBOL(drm_dp_lttpr_set_transparent_mode);
+
+/**
+ * drm_dp_lttpr_init() - init LTTPR transparency mode according to DP standard
+ * @aux: DisplayPort AUX channel
+ * @lttpr_count: Number of LTTPRs. Between 0 and 8, according to DP standard.
+ * Negative error code for any non-valid number.
+ * See drm_dp_lttpr_count().
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_lttpr_init(struct drm_dp_aux *aux, int lttpr_count)
+{
+ int ret;
+
+ if (!lttpr_count)
+ return 0;
+
+ /*
+ * See DP Standard v2.0 3.6.6.1 about the explicit disabling of
+ * non-transparent mode and the disable->enable non-transparent mode
+ * sequence.
+ */
+ ret = drm_dp_lttpr_set_transparent_mode(aux, true);
+ if (ret)
+ return ret;
+
+ if (lttpr_count < 0)
+ return -ENODEV;
+
+ if (drm_dp_lttpr_set_transparent_mode(aux, false)) {
+ /*
+ * Roll-back to transparent mode if setting non-transparent
+ * mode has failed
+ */
+ drm_dp_lttpr_set_transparent_mode(aux, true);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_lttpr_init);
+
+/**
* drm_dp_lttpr_max_lane_count - get the maximum lane count supported by all LTTPRs
* @caps: LTTPR common capabilities
*
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 06c91c5b7f7c..3a1f1ffc7b55 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -171,18 +171,30 @@ static const char *drm_dp_mst_sideband_tx_state_str(int state)
return sideband_reason_str[state];
}
+static inline u8
+drm_dp_mst_get_ufp_num_at_lct_from_rad(u8 lct, const u8 *rad)
+{
+ int idx = (lct / 2) - 1;
+ int shift = (lct % 2) ? 0 : 4;
+ u8 ufp_num;
+
+ /* mst_primary, it's rad is unset*/
+ if (lct == 1)
+ return 0;
+
+ ufp_num = (rad[idx] >> shift) & 0xf;
+
+ return ufp_num;
+}
+
static int
drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
{
int i;
- u8 unpacked_rad[16];
+ u8 unpacked_rad[16] = {};
- for (i = 0; i < lct; i++) {
- if (i % 2)
- unpacked_rad[i] = rad[i / 2] >> 4;
- else
- unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
- }
+ for (i = 0; i < lct; i++)
+ unpacked_rad[i] = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad);
/* TODO: Eventually add something to printk so we can format the rad
* like this: 1.2.3
@@ -2544,9 +2556,8 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
if (!mstb)
goto out;
- for (i = 0; i < lct - 1; i++) {
- int shift = (i % 2) ? 0 : 4;
- int port_num = (rad[i / 2] >> shift) & 0xf;
+ for (i = 1; i < lct; i++) {
+ int port_num = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad);
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
@@ -4025,6 +4036,22 @@ out:
return 0;
}
+static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr)
+{
+ bool probing_done = false;
+
+ mutex_lock(&mgr->lock);
+
+ if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) {
+ probing_done = mgr->mst_primary->link_address_sent;
+ drm_dp_mst_topology_put_mstb(mgr->mst_primary);
+ }
+
+ mutex_unlock(&mgr->lock);
+
+ return probing_done;
+}
+
static inline bool
drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_pending_up_req *up_req)
@@ -4055,8 +4082,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
- dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
- hotplug = true;
+ if (!primary_mstb_probing_is_done(mgr)) {
+ drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n");
+ } else {
+ dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ hotplug = true;
+ }
}
drm_dp_mst_topology_put_mstb(mstb);
@@ -4138,10 +4169,11 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
false);
+ drm_dp_mst_topology_put_mstb(mst_primary);
+
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
const struct drm_dp_connection_status_notify *conn_stat =
&up_req->msg.u.conn_stat;
- bool handle_csn;
drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
conn_stat->port_number,
@@ -4150,16 +4182,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
conn_stat->message_capability_status,
conn_stat->input_port,
conn_stat->peer_device_type);
-
- mutex_lock(&mgr->probe_lock);
- handle_csn = mst_primary->link_address_sent;
- mutex_unlock(&mgr->probe_lock);
-
- if (!handle_csn) {
- drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
- kfree(up_req);
- goto out_put_primary;
- }
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
const struct drm_dp_resource_status_notify *res_stat =
&up_req->msg.u.resource_stat;
@@ -4174,9 +4196,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
list_add_tail(&up_req->next, &mgr->up_req_list);
mutex_unlock(&mgr->up_req_lock);
queue_work(system_long_wq, &mgr->up_req_work);
-
-out_put_primary:
- drm_dp_mst_topology_put_mstb(mst_primary);
out_clear_reply:
reset_msg_rx_state(&mgr->up_req_recv);
return ret;
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index 9b2ee2385634..c205f37da1e1 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -542,7 +542,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check);
*/
enum drm_mode_status
drm_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
unsigned long long clock;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5186d2114a50..5302ab324898 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -574,6 +574,30 @@ mode_valid(struct drm_atomic_state *state)
return 0;
}
+static int drm_atomic_check_valid_clones(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *drm_enc;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) {
+ if (!drm_enc->possible_clones) {
+ DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id);
+ continue;
+ }
+
+ if ((crtc_state->encoder_mask & drm_enc->possible_clones) !=
+ crtc_state->encoder_mask) {
+ DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n",
+ crtc->base.id, crtc_state->encoder_mask);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/**
* drm_atomic_helper_check_modeset - validate state object for modeset changes
* @dev: DRM device
@@ -666,8 +690,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
}
if (new_crtc_state->enable != has_connectors) {
- drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch (%d/%d)\n",
+ crtc->base.id, crtc->name,
+ new_crtc_state->enable, has_connectors);
return -EINVAL;
}
@@ -745,6 +770,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret != 0)
return ret;
+
+ ret = drm_atomic_check_valid_clones(state, crtc);
+ if (ret != 0)
+ return ret;
}
/*
@@ -1059,6 +1088,15 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
* For example enable/disable of a cursor plane which have fixed zpos value
* would trigger all other enabled planes to be forced to the state change.
*
+ * IMPORTANT:
+ *
+ * As this function calls drm_atomic_helper_check_modeset() internally, its
+ * restrictions also apply:
+ * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
+ * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
+ * without a full modeset) _must_ call drm_atomic_helper_check_modeset()
+ * function again after that change.
+ *
* RETURNS:
* Zero for success or -errno
*/
@@ -1122,7 +1160,7 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
}
static void
-disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
+disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@@ -1130,7 +1168,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
- for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
@@ -1142,11 +1180,11 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!old_conn_state->crtc)
continue;
- old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
if (new_conn_state->crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(
- old_state,
+ state,
new_conn_state->crtc);
else
new_crtc_state = NULL;
@@ -1173,12 +1211,12 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
* it away), so we won't call disable hooks twice.
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
- drm_atomic_bridge_chain_disable(bridge, old_state);
+ drm_atomic_bridge_chain_disable(bridge, state);
/* Right function depends upon target state. */
if (funcs) {
if (funcs->atomic_disable)
- funcs->atomic_disable(encoder, old_state);
+ funcs->atomic_disable(encoder, state);
else if (new_conn_state->crtc && funcs->prepare)
funcs->prepare(encoder);
else if (funcs->disable)
@@ -1187,10 +1225,10 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
- drm_atomic_bridge_chain_post_disable(bridge, old_state);
+ drm_atomic_bridge_chain_post_disable(bridge, state);
}
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
int ret;
@@ -1211,7 +1249,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (new_crtc_state->enable && funcs->prepare)
funcs->prepare(crtc);
else if (funcs->atomic_disable)
- funcs->atomic_disable(crtc, old_state);
+ funcs->atomic_disable(crtc, state);
else if (funcs->disable)
funcs->disable(crtc);
else if (funcs->dpms)
@@ -1239,7 +1277,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
/**
* drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function updates all the various legacy modeset state pointers in
* connectors, encoders and CRTCs.
@@ -1255,7 +1293,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
*/
void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@@ -1264,7 +1302,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
int i;
/* clear out existing links and update dpms */
- for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
if (connector->encoder) {
WARN_ON(!connector->encoder->crtc);
@@ -1285,7 +1323,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
}
/* set new links */
- for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
if (!new_conn_state->crtc)
continue;
@@ -1297,7 +1335,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
}
/* set legacy state in the crtc structure */
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct drm_plane *primary = crtc->primary;
struct drm_plane_state *new_plane_state;
@@ -1305,7 +1343,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
crtc->enabled = new_crtc_state->enable;
new_plane_state =
- drm_atomic_get_new_plane_state(old_state, primary);
+ drm_atomic_get_new_plane_state(state, primary);
if (new_plane_state && new_plane_state->crtc == crtc) {
crtc->x = new_plane_state->src_x >> 16;
@@ -1337,7 +1375,7 @@ void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *stat
EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
static void
-crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
+crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@@ -1345,7 +1383,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
struct drm_connector_state *new_conn_state;
int i;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
if (!new_crtc_state->mode_changed)
@@ -1361,7 +1399,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
}
}
- for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_display_mode *mode, *adjusted_mode;
@@ -1376,7 +1414,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
mode = &new_crtc_state->mode;
adjusted_mode = &new_crtc_state->adjusted_mode;
- if (!new_crtc_state->mode_changed)
+ if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
continue;
drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",
@@ -1401,7 +1439,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
/**
* drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function shuts down all the outputs that need to be shut down and
* prepares them (if required) with the new mode.
@@ -1413,25 +1451,25 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
* PM since planes updates then only happen when the CRTC is actually enabled.
*/
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
- disable_outputs(dev, old_state);
+ disable_outputs(dev, state);
- drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
- drm_atomic_helper_calc_timestamping_constants(old_state);
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_atomic_helper_calc_timestamping_constants(state);
- crtc_set_mode(dev, old_state);
+ crtc_set_mode(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
- for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_connector_helper_funcs *funcs;
funcs = connector->helper_private;
@@ -1440,7 +1478,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
- funcs->atomic_commit(connector, old_state);
+ funcs->atomic_commit(connector, state);
}
}
}
@@ -1448,7 +1486,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function enables all the outputs with the new configuration which had to
* be turned off for the update.
@@ -1460,7 +1498,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
* PM since planes updates then only happen when the CRTC is actually enabled.
*/
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -1469,7 +1507,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_connector_state *new_conn_state;
int i;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
/* Need to filter out CRTCs where only planes change. */
@@ -1485,13 +1523,13 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
if (funcs->atomic_enable)
- funcs->atomic_enable(crtc, old_state);
+ funcs->atomic_enable(crtc, state);
else if (funcs->commit)
funcs->commit(crtc);
}
}
- for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
@@ -1514,21 +1552,21 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
* it away), so we won't call enable hooks twice.
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
- drm_atomic_bridge_chain_pre_enable(bridge, old_state);
+ drm_atomic_bridge_chain_pre_enable(bridge, state);
if (funcs) {
if (funcs->atomic_enable)
- funcs->atomic_enable(encoder, old_state);
+ funcs->atomic_enable(encoder, state);
else if (funcs->enable)
funcs->enable(encoder);
else if (funcs->commit)
funcs->commit(encoder);
}
- drm_atomic_bridge_chain_enable(bridge, old_state);
+ drm_atomic_bridge_chain_enable(bridge, state);
}
- drm_atomic_helper_commit_writebacks(dev, old_state);
+ drm_atomic_helper_commit_writebacks(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
@@ -1630,7 +1668,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
/**
* drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* Helper to, after atomic commit, wait for vblanks on all affected
* CRTCs (ie. before cleaning up old framebuffers using
@@ -1644,7 +1682,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
*/
void
drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -1655,10 +1693,10 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
* Legacy cursor ioctls are completely unsynced, and userspace
* relies on that (by doing tons of cursor updates).
*/
- if (old_state->legacy_cursor_update)
+ if (state->legacy_cursor_update)
return;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!new_crtc_state->active)
continue;
@@ -1667,17 +1705,17 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
continue;
crtc_mask |= drm_crtc_mask(crtc);
- old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
+ state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
}
- for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
if (!(crtc_mask & drm_crtc_mask(crtc)))
continue;
ret = wait_event_timeout(dev->vblank[i].queue,
- old_state->crtcs[i].last_vblank_count !=
- drm_crtc_vblank_count(crtc),
- msecs_to_jiffies(100));
+ state->crtcs[i].last_vblank_count !=
+ drm_crtc_vblank_count(crtc),
+ msecs_to_jiffies(100));
WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
crtc->base.id, crtc->name);
@@ -1690,7 +1728,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
/**
* drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* Helper to, after atomic commit, wait for page flips on all affected
* crtcs (ie. before cleaning up old framebuffers using
@@ -1703,16 +1741,16 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
* initialized using drm_atomic_helper_setup_commit().
*/
void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
int i;
for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
+ struct drm_crtc_commit *commit = state->crtcs[i].commit;
int ret;
- crtc = old_state->crtcs[i].ptr;
+ crtc = state->crtcs[i].ptr;
if (!crtc || !commit)
continue;
@@ -1723,14 +1761,14 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
crtc->base.id, crtc->name);
}
- if (old_state->fake_commit)
- complete_all(&old_state->fake_commit->flip_done);
+ if (state->fake_commit)
+ complete_all(&state->fake_commit->flip_done);
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
/**
* drm_atomic_helper_commit_tail - commit atomic update to hardware
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This is the default implementation for the
* &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
@@ -1741,29 +1779,29 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
* Note that the default ordering of how the various stages are called is to
* match the legacy modeset helper library closest.
*/
-void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
{
- struct drm_device *dev = old_state->dev;
+ struct drm_device *dev = state->dev;
- drm_atomic_helper_commit_modeset_disables(dev, old_state);
+ drm_atomic_helper_commit_modeset_disables(dev, state);
- drm_atomic_helper_commit_planes(dev, old_state, 0);
+ drm_atomic_helper_commit_planes(dev, state, 0);
- drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
- drm_atomic_helper_fake_vblank(old_state);
+ drm_atomic_helper_fake_vblank(state);
- drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_commit_hw_done(state);
- drm_atomic_helper_wait_for_vblanks(dev, old_state);
+ drm_atomic_helper_wait_for_vblanks(dev, state);
- drm_atomic_helper_cleanup_planes(dev, old_state);
+ drm_atomic_helper_cleanup_planes(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
/**
* drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
- * @old_state: new modeset state to be committed
+ * @state: new modeset state to be committed
*
* This is an alternative implementation for the
* &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
@@ -1771,30 +1809,30 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
* commit. Otherwise, one should use the default implementation
* drm_atomic_helper_commit_tail().
*/
-void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state)
{
- struct drm_device *dev = old_state->dev;
+ struct drm_device *dev = state->dev;
- drm_atomic_helper_commit_modeset_disables(dev, old_state);
+ drm_atomic_helper_commit_modeset_disables(dev, state);
- drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
- drm_atomic_helper_commit_planes(dev, old_state,
+ drm_atomic_helper_commit_planes(dev, state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
- drm_atomic_helper_fake_vblank(old_state);
+ drm_atomic_helper_fake_vblank(state);
- drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_commit_hw_done(state);
- drm_atomic_helper_wait_for_vblanks(dev, old_state);
+ drm_atomic_helper_wait_for_vblanks(dev, state);
- drm_atomic_helper_cleanup_planes(dev, old_state);
+ drm_atomic_helper_cleanup_planes(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
-static void commit_tail(struct drm_atomic_state *old_state)
+static void commit_tail(struct drm_atomic_state *state)
{
- struct drm_device *dev = old_state->dev;
+ struct drm_device *dev = state->dev;
const struct drm_mode_config_helper_funcs *funcs;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
@@ -1816,33 +1854,33 @@ static void commit_tail(struct drm_atomic_state *old_state)
*/
start = ktime_get();
- drm_atomic_helper_wait_for_fences(dev, old_state, false);
+ drm_atomic_helper_wait_for_fences(dev, state, false);
- drm_atomic_helper_wait_for_dependencies(old_state);
+ drm_atomic_helper_wait_for_dependencies(state);
/*
* We cannot safely access new_crtc_state after
* drm_atomic_helper_commit_hw_done() so figure out which crtc's have
* self-refresh active beforehand:
*/
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
if (new_crtc_state->self_refresh_active)
new_self_refresh_mask |= BIT(i);
if (funcs && funcs->atomic_commit_tail)
- funcs->atomic_commit_tail(old_state);
+ funcs->atomic_commit_tail(state);
else
- drm_atomic_helper_commit_tail(old_state);
+ drm_atomic_helper_commit_tail(state);
commit_time_ms = ktime_ms_delta(ktime_get(), start);
if (commit_time_ms > 0)
- drm_self_refresh_helper_update_avg_times(old_state,
+ drm_self_refresh_helper_update_avg_times(state,
(unsigned long)commit_time_ms,
new_self_refresh_mask);
- drm_atomic_helper_commit_cleanup_done(old_state);
+ drm_atomic_helper_commit_cleanup_done(state);
- drm_atomic_state_put(old_state);
+ drm_atomic_state_put(state);
}
static void commit_work(struct work_struct *work)
@@ -1928,7 +1966,7 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
return -EBUSY;
}
- ret = funcs->atomic_async_check(plane, state);
+ ret = funcs->atomic_async_check(plane, state, false);
if (ret != 0)
drm_dbg_atomic(dev,
"[PLANE:%d:%s] driver async check failed\n",
@@ -2385,17 +2423,17 @@ EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
/**
* drm_atomic_helper_wait_for_dependencies - wait for required preceding commits
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function waits for all preceding commits that touch the same CRTC as
- * @old_state to both be committed to the hardware (as signalled by
+ * @state to both be committed to the hardware (as signalled by
* drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
* by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
+void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@@ -2406,7 +2444,7 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
int i;
long ret;
- for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
ret = drm_crtc_commit_wait(old_crtc_state->commit);
if (ret)
drm_err(crtc->dev,
@@ -2414,7 +2452,7 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
crtc->base.id, crtc->name);
}
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(state, conn, old_conn_state, i) {
ret = drm_crtc_commit_wait(old_conn_state->commit);
if (ret)
drm_err(conn->dev,
@@ -2422,7 +2460,7 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
conn->base.id, conn->name);
}
- for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ for_each_old_plane_in_state(state, plane, old_plane_state, i) {
ret = drm_crtc_commit_wait(old_plane_state->commit);
if (ret)
drm_err(plane->dev,
@@ -2434,7 +2472,7 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
/**
* drm_atomic_helper_fake_vblank - fake VBLANK events if needed
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function walks all CRTCs and fakes VBLANK events on those with
* &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
@@ -2450,32 +2488,32 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
+void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state)
{
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int i;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
unsigned long flags;
if (!new_crtc_state->no_vblank)
continue;
- spin_lock_irqsave(&old_state->dev->event_lock, flags);
+ spin_lock_irqsave(&state->dev->event_lock, flags);
if (new_crtc_state->event) {
drm_crtc_send_vblank_event(crtc,
new_crtc_state->event);
new_crtc_state->event = NULL;
}
- spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
+ spin_unlock_irqrestore(&state->dev->event_lock, flags);
}
}
EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
/**
* drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function is used to signal completion of the hardware commit step. After
* this step the driver is not allowed to read or change any permanent software
@@ -2488,14 +2526,14 @@ EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc_commit *commit;
int i;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
commit = new_crtc_state->commit;
if (!commit)
continue;
@@ -2515,32 +2553,32 @@ void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
complete_all(&commit->hw_done);
}
- if (old_state->fake_commit) {
- complete_all(&old_state->fake_commit->hw_done);
- complete_all(&old_state->fake_commit->flip_done);
+ if (state->fake_commit) {
+ complete_all(&state->fake_commit->hw_done);
+ complete_all(&state->fake_commit->flip_done);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
/**
* drm_atomic_helper_commit_cleanup_done - signal completion of commit
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
- * This signals completion of the atomic update @old_state, including any
+ * This signals completion of the atomic update @state, including any
* cleanup work. If used, it must be called right before calling
* drm_atomic_state_put().
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
*/
-void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_commit *commit;
int i;
- for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
commit = old_crtc_state->commit;
if (WARN_ON(!commit))
continue;
@@ -2553,9 +2591,9 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
spin_unlock(&crtc->commit_lock);
}
- if (old_state->fake_commit) {
- complete_all(&old_state->fake_commit->cleanup_done);
- WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
+ if (state->fake_commit) {
+ complete_all(&state->fake_commit->cleanup_done);
+ WARN_ON(!try_wait_for_completion(&state->fake_commit->hw_done));
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
@@ -2693,7 +2731,7 @@ static bool plane_crtc_active(const struct drm_plane_state *state)
/**
* drm_atomic_helper_commit_planes - commit plane state
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
* @flags: flags for committing plane state
*
* This function commits the new plane state using the plane and atomic helper
@@ -2701,7 +2739,7 @@ static bool plane_crtc_active(const struct drm_plane_state *state)
* been pushed into the relevant object state pointers, since this step can no
* longer fail.
*
- * It still requires the global state object @old_state to know which planes and
+ * It still requires the global state object @state to know which planes and
* crtcs need to be updated though.
*
* Note that this function does all plane updates across all CRTCs in one step.
@@ -2732,7 +2770,7 @@ static bool plane_crtc_active(const struct drm_plane_state *state)
* This should not be copied blindly by drivers.
*/
void drm_atomic_helper_commit_planes(struct drm_device *dev,
- struct drm_atomic_state *old_state,
+ struct drm_atomic_state *state,
uint32_t flags)
{
struct drm_crtc *crtc;
@@ -2743,7 +2781,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
@@ -2754,10 +2792,10 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
if (active_only && !new_crtc_state->active)
continue;
- funcs->atomic_begin(crtc, old_state);
+ funcs->atomic_begin(crtc, state);
}
- for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
bool disabling;
@@ -2795,18 +2833,18 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
no_disable)
continue;
- funcs->atomic_disable(plane, old_state);
+ funcs->atomic_disable(plane, state);
} else if (new_plane_state->crtc || disabling) {
- funcs->atomic_update(plane, old_state);
+ funcs->atomic_update(plane, state);
if (!disabling && funcs->atomic_enable) {
if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
- funcs->atomic_enable(plane, old_state);
+ funcs->atomic_enable(plane, state);
}
}
}
- for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
funcs = crtc->helper_private;
@@ -2817,14 +2855,14 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
if (active_only && !new_crtc_state->active)
continue;
- funcs->atomic_flush(crtc, old_state);
+ funcs->atomic_flush(crtc, state);
}
/*
* Signal end of framebuffer access here before hw_done. After hw_done,
* a later commit might have already released the plane state.
*/
- for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ for_each_old_plane_in_state(state, plane, old_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->end_fb_access)
@@ -2951,10 +2989,10 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
/**
* drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
* @dev: DRM device
- * @old_state: atomic state object with old state structures
+ * @state: atomic state object being committed
*
* This function cleans up plane state, specifically framebuffers, from the old
- * configuration. Hence the old configuration must be perserved in @old_state to
+ * configuration. Hence the old configuration must be perserved in @state to
* be able to call this function.
*
* This function may not be called on the new state when the atomic update
@@ -2962,13 +3000,13 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
* drm_atomic_helper_unprepare_planes() in this case.
*/
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_plane *plane;
struct drm_plane_state *old_plane_state;
int i;
- for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ for_each_old_plane_in_state(state, plane, old_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->cleanup_fb)
@@ -3363,6 +3401,51 @@ free:
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
/**
+ * drm_atomic_helper_reset_crtc - reset the active outputs of a CRTC
+ * @crtc: DRM CRTC
+ * @ctx: lock acquisition context
+ *
+ * Reset the active outputs by indicating that connectors have changed.
+ * This implies a reset of all active components available between the CRTC and
+ * connectors.
+ *
+ * NOTE: This relies on resetting &drm_crtc_state.connectors_changed.
+ * For drivers which optimize out unnecessary modesets this will result in
+ * a no-op commit, achieving nothing.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_atomic_helper_reset_crtc(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ crtc_state->connectors_changed = true;
+
+ ret = drm_atomic_commit(state);
+out:
+ drm_atomic_state_put(state);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_reset_crtc);
+
+/**
* drm_atomic_helper_shutdown - shutdown all CRTC
* @dev: DRM device
*
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 370dc676e3aa..c2726af6698e 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -27,8 +27,9 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
-#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include <drm/drm_drv.h>
@@ -956,6 +957,10 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
+
+ if (connector->dpms == mode)
+ goto out;
+
connector->dpms = mode;
crtc = connector->state->crtc;
@@ -1063,6 +1068,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
struct drm_plane *plane = obj_to_plane(obj);
struct drm_plane_state *plane_state;
struct drm_mode_config *config = &plane->dev->mode_config;
+ const struct drm_plane_helper_funcs *plane_funcs = plane->helper_private;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
@@ -1070,15 +1076,30 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
- if (async_flip &&
- (plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY ||
- (prop != config->prop_fb_id &&
- prop != config->prop_in_fence_fd &&
- prop != config->prop_fb_damage_clips))) {
- ret = drm_atomic_plane_get_property(plane, plane_state,
- prop, &old_val);
- ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
- break;
+ if (async_flip) {
+ /* check if the prop does a nop change */
+ if ((prop != config->prop_fb_id &&
+ prop != config->prop_in_fence_fd &&
+ prop != config->prop_fb_damage_clips)) {
+ ret = drm_atomic_plane_get_property(plane, plane_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+ }
+
+ /* ask the driver if this non-primary plane is supported */
+ if (plane->type != DRM_PLANE_TYPE_PRIMARY) {
+ ret = -EINVAL;
+
+ if (plane_funcs && plane_funcs->atomic_async_check)
+ ret = plane_funcs->atomic_async_check(plane, state, true);
+
+ if (ret) {
+ drm_dbg_atomic(prop->dev,
+ "[PLANE:%d:%s] does not support async flips\n",
+ obj->id, plane->name);
+ break;
+ }
+ }
}
ret = drm_atomic_plane_set_property(plane,
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 241a384ebce3..fa2794217a90 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -550,7 +550,7 @@ EXPORT_SYMBOL(drm_bridge_chain_mode_set);
/**
* drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
- * @old_state: old atomic state
+ * @state: atomic state being committed
*
* Calls &drm_bridge_funcs.atomic_disable (falls back on
* &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
@@ -560,7 +560,7 @@ EXPORT_SYMBOL(drm_bridge_chain_mode_set);
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder;
struct drm_bridge *iter;
@@ -571,15 +571,7 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
if (iter->funcs->atomic_disable) {
- struct drm_bridge_state *old_bridge_state;
-
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- iter);
- if (WARN_ON(!old_bridge_state))
- return;
-
- iter->funcs->atomic_disable(iter, old_bridge_state);
+ iter->funcs->atomic_disable(iter, state);
} else if (iter->funcs->disable) {
iter->funcs->disable(iter);
}
@@ -591,29 +583,19 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
- if (old_state && bridge->funcs->atomic_post_disable) {
- struct drm_bridge_state *old_bridge_state;
-
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- bridge);
- if (WARN_ON(!old_bridge_state))
- return;
-
- bridge->funcs->atomic_post_disable(bridge,
- old_bridge_state);
- } else if (bridge->funcs->post_disable) {
+ if (state && bridge->funcs->atomic_post_disable)
+ bridge->funcs->atomic_post_disable(bridge, state);
+ else if (bridge->funcs->post_disable)
bridge->funcs->post_disable(bridge);
- }
}
/**
* drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
* in the encoder chain
* @bridge: bridge control structure
- * @old_state: old atomic state
+ * @state: atomic state being committed
*
* Calls &drm_bridge_funcs.atomic_post_disable (falls back on
* &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
@@ -634,7 +616,7 @@ static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder;
struct drm_bridge *next, *limit;
@@ -681,12 +663,12 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
break;
drm_atomic_bridge_call_post_disable(next,
- old_state);
+ state);
}
}
}
- drm_atomic_bridge_call_post_disable(bridge, old_state);
+ drm_atomic_bridge_call_post_disable(bridge, state);
if (limit)
/* Jump all bridges that we have already post_disabled */
@@ -696,28 +678,19 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
- if (old_state && bridge->funcs->atomic_pre_enable) {
- struct drm_bridge_state *old_bridge_state;
-
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- bridge);
- if (WARN_ON(!old_bridge_state))
- return;
-
- bridge->funcs->atomic_pre_enable(bridge, old_bridge_state);
- } else if (bridge->funcs->pre_enable) {
+ if (state && bridge->funcs->atomic_pre_enable)
+ bridge->funcs->atomic_pre_enable(bridge, state);
+ else if (bridge->funcs->pre_enable)
bridge->funcs->pre_enable(bridge);
- }
}
/**
* drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
* the encoder chain
* @bridge: bridge control structure
- * @old_state: old atomic state
+ * @state: atomic state being committed
*
* Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
* &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
@@ -737,7 +710,7 @@ static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder;
struct drm_bridge *iter, *next, *limit;
@@ -776,11 +749,11 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
*/
break;
- drm_atomic_bridge_call_pre_enable(next, old_state);
+ drm_atomic_bridge_call_pre_enable(next, state);
}
}
- drm_atomic_bridge_call_pre_enable(iter, old_state);
+ drm_atomic_bridge_call_pre_enable(iter, state);
if (iter->pre_enable_prev_first)
/* Jump all bridges that we have already pre_enabled */
@@ -795,7 +768,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
/**
* drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
* @bridge: bridge control structure
- * @old_state: old atomic state
+ * @state: atomic state being committed
*
* Calls &drm_bridge_funcs.atomic_enable (falls back on
* &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
@@ -805,7 +778,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder;
@@ -815,15 +788,7 @@ void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
if (bridge->funcs->atomic_enable) {
- struct drm_bridge_state *old_bridge_state;
-
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- bridge);
- if (WARN_ON(!old_bridge_state))
- return;
-
- bridge->funcs->atomic_enable(bridge, old_bridge_state);
+ bridge->funcs->atomic_enable(bridge, state);
} else if (bridge->funcs->enable) {
bridge->funcs->enable(bridge);
}
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 103c185bb1c8..241c855f891f 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -3,6 +3,8 @@
* Copyright © 2021 Intel Corporation
*/
+#include <kunit/test-bug.h>
+
#include <linux/kmemleak.h>
#include <linux/module.h>
#include <linux/sizes.h>
@@ -324,7 +326,7 @@ EXPORT_SYMBOL(drm_buddy_init);
*/
void drm_buddy_fini(struct drm_buddy *mm)
{
- u64 root_size, size;
+ u64 root_size, size, start;
unsigned int order;
int i;
@@ -332,9 +334,12 @@ void drm_buddy_fini(struct drm_buddy *mm)
for (i = 0; i < mm->n_roots; ++i) {
order = ilog2(size) - ilog2(mm->chunk_size);
- __force_merge(mm, 0, size, order);
+ start = drm_buddy_block_offset(mm->roots[i]);
+ __force_merge(mm, start, start + size, order);
+
+ if (WARN_ON(!drm_buddy_block_is_free(mm->roots[i])))
+ kunit_fail_current_test("buddy_fini() root");
- WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
drm_block_free(mm, mm->roots[i]);
root_size = mm->chunk_size << order;
diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c
index e303de564485..bd93cd93d519 100644
--- a/drivers/gpu/drm/drm_client_event.c
+++ b/drivers/gpu/drm/drm_client_event.c
@@ -49,6 +49,29 @@ void drm_client_dev_unregister(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_client_dev_unregister);
+static void drm_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ if (!client->funcs || !client->funcs->hotplug)
+ return;
+
+ if (client->hotplug_failed)
+ return;
+
+ if (client->suspended) {
+ client->hotplug_pending = true;
+ return;
+ }
+
+ client->hotplug_pending = false;
+ ret = client->funcs->hotplug(client);
+ drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
+ if (ret)
+ client->hotplug_failed = true;
+}
+
/**
* drm_client_dev_hotplug - Send hotplug event to clients
* @dev: DRM device
@@ -61,7 +84,6 @@ EXPORT_SYMBOL(drm_client_dev_unregister);
void drm_client_dev_hotplug(struct drm_device *dev)
{
struct drm_client_dev *client;
- int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
@@ -72,18 +94,8 @@ void drm_client_dev_hotplug(struct drm_device *dev)
}
mutex_lock(&dev->clientlist_mutex);
- list_for_each_entry(client, &dev->clientlist, list) {
- if (!client->funcs || !client->funcs->hotplug)
- continue;
-
- if (client->hotplug_failed)
- continue;
-
- ret = client->funcs->hotplug(client);
- drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
- if (ret)
- client->hotplug_failed = true;
- }
+ list_for_each_entry(client, &dev->clientlist, list)
+ drm_client_hotplug(client);
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_hotplug);
@@ -153,6 +165,9 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l
client->suspended = false;
+ if (client->hotplug_pending)
+ drm_client_hotplug(client);
+
return ret;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 5f24d6b41cc6..48b08c9611a7 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1427,6 +1427,10 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name);
* callback. For atomic drivers the remapping to the "ACTIVE" property is
* implemented in the DRM core.
*
+ * On atomic drivers any DPMS setproperty ioctl where the value does not
+ * change is completely skipped, otherwise a full atomic commit will occur.
+ * On legacy drivers the exact behavior is driver specific.
+ *
* Note that this property cannot be set through the MODE_ATOMIC ioctl,
* userspace must use "ACTIVE" on the CRTC instead.
*
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3488ff067c69..46655339003d 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -939,3 +939,23 @@ int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
return 0;
}
EXPORT_SYMBOL(drm_crtc_create_scaling_filter_property);
+
+/**
+ * drm_crtc_in_clone_mode - check if the given CRTC state is in clone mode
+ *
+ * @crtc_state: CRTC state to check
+ *
+ * This function determines if the given CRTC state is being cloned by multiple
+ * encoders.
+ *
+ * RETURNS:
+ * True if the CRTC state is in clone mode. False otherwise
+ */
+bool drm_crtc_in_clone_mode(struct drm_crtc_state *crtc_state)
+{
+ if (!crtc_state)
+ return false;
+
+ return hweight32(crtc_state->encoder_mask) > 1;
+}
+EXPORT_SYMBOL(drm_crtc_in_clone_mode);
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index 8059f65c5d6c..bae73936acf9 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -43,7 +43,7 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode);
int
drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status);
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index afb02aae707b..44a5a36806e3 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -308,7 +308,7 @@ EXPORT_SYMBOL(drm_atomic_helper_damage_iter_next);
* True if there is valid plane damage otherwise false.
*/
bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state,
- struct drm_plane_state *state,
+ const struct drm_plane_state *state,
struct drm_rect *rect)
{
struct drm_atomic_helper_damage_iter iter;
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 536409a35df4..6b2178864c7e 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -748,7 +748,7 @@ static int bridges_show(struct seq_file *m, void *data)
unsigned int idx = 0;
drm_for_each_bridge_in_chain(encoder, bridge) {
- drm_printf(&p, "bridge[%d]: %ps\n", idx++, bridge->funcs);
+ drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs);
drm_printf(&p, "\ttype: [%d] %s\n",
bridge->type,
drm_get_connector_type_name(bridge->type));
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
index cb2ad12bce57..385eb5e10047 100644
--- a/drivers/gpu/drm/drm_draw.c
+++ b/drivers/gpu/drm/drm_draw.c
@@ -5,6 +5,8 @@
*/
#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/types.h>
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 3cf440eee8a2..17fc5dc708f4 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -26,6 +26,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/bitops.h>
#include <linux/cgroup_dmem.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
@@ -34,6 +35,7 @@
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/slab.h>
+#include <linux/sprintf.h>
#include <linux/srcu.h>
#include <linux/xarray.h>
@@ -499,6 +501,72 @@ void drm_dev_unplug(struct drm_device *dev)
EXPORT_SYMBOL(drm_dev_unplug);
/*
+ * Available recovery methods for wedged device. To be sent along with device
+ * wedged uevent.
+ */
+static const char *drm_get_wedge_recovery(unsigned int opt)
+{
+ switch (BIT(opt)) {
+ case DRM_WEDGE_RECOVERY_NONE:
+ return "none";
+ case DRM_WEDGE_RECOVERY_REBIND:
+ return "rebind";
+ case DRM_WEDGE_RECOVERY_BUS_RESET:
+ return "bus-reset";
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * drm_dev_wedged_event - generate a device wedged uevent
+ * @dev: DRM device
+ * @method: method(s) to be used for recovery
+ *
+ * This generates a device wedged uevent for the DRM device specified by @dev.
+ * Recovery @method\(s) of choice will be sent in the uevent environment as
+ * ``WEDGED=<method1>[,..,<methodN>]`` in order of less to more side-effects.
+ * If caller is unsure about recovery or @method is unknown (0),
+ * ``WEDGED=unknown`` will be sent instead.
+ *
+ * Refer to "Device Wedging" chapter in Documentation/gpu/drm-uapi.rst for more
+ * details.
+ *
+ * Returns: 0 on success, negative error code otherwise.
+ */
+int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
+{
+ const char *recovery = NULL;
+ unsigned int len, opt;
+ /* Event string length up to 28+ characters with available methods */
+ char event_string[32];
+ char *envp[] = { event_string, NULL };
+
+ len = scnprintf(event_string, sizeof(event_string), "%s", "WEDGED=");
+
+ for_each_set_bit(opt, &method, BITS_PER_TYPE(method)) {
+ recovery = drm_get_wedge_recovery(opt);
+ if (drm_WARN_ONCE(dev, !recovery, "invalid recovery method %u\n", opt))
+ break;
+
+ len += scnprintf(event_string + len, sizeof(event_string), "%s,", recovery);
+ }
+
+ if (recovery)
+ /* Get rid of trailing comma */
+ event_string[len - 1] = '\0';
+ else
+ /* Caller is unsure about recovery, do the best we can at this point. */
+ snprintf(event_string, sizeof(event_string), "%s", "WEDGED=unknown");
+
+ drm_info(dev, "device wedged, %s\n", method == DRM_WEDGE_RECOVERY_NONE ?
+ "but recovered through reset" : "needs recovery");
+
+ return kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(drm_dev_wedged_event);
+
+/*
* DRM internal mount
* We want to be able to allocate our own "struct address_space" to control
* memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
diff --git a/drivers/gpu/drm/drm_fb_dma_helper.c b/drivers/gpu/drm/drm_fb_dma_helper.c
index e1d61a65210b..2c4dc7ebc0c3 100644
--- a/drivers/gpu/drm/drm_fb_dma_helper.c
+++ b/drivers/gpu/drm/drm_fb_dma_helper.c
@@ -178,7 +178,7 @@ int drm_fb_dma_get_scanout_buffer(struct drm_plane *plane,
dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
/* Buffer should be accessible from the CPU */
- if (dma_obj->base.import_attach)
+ if (drm_gem_is_imported(&dma_obj->base))
return -ENODEV;
/* Buffer should be already mapped to CPU */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index fb3614a7ba44..937c3939e502 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -245,6 +245,9 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
if (do_delayed)
drm_fb_helper_hotplug_event(fb_helper);
+ if (fb_helper->funcs->fb_restore)
+ fb_helper->funcs->fb_restore(fb_helper);
+
return ret;
}
@@ -754,7 +757,12 @@ EXPORT_SYMBOL(drm_fb_helper_deferred_io);
*/
void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend)
{
- if (fb_helper && fb_helper->info)
+ if (!fb_helper || !fb_helper->info)
+ return;
+
+ if (fb_helper->funcs->fb_set_suspend)
+ fb_helper->funcs->fb_set_suspend(fb_helper, suspend);
+ else
fb_set_suspend(fb_helper->info, suspend);
}
EXPORT_SYMBOL(drm_fb_helper_set_suspend);
@@ -800,7 +808,7 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
}
}
- fb_set_suspend(fb_helper->info, suspend);
+ drm_fb_helper_set_suspend(fb_helper, suspend);
console_unlock();
}
EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked);
@@ -1626,6 +1634,9 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
struct fb_info *info;
int ret;
+ if (drm_WARN_ON(dev, !dev->driver->fbdev_probe))
+ return -EINVAL;
+
ret = drm_fb_helper_find_sizes(fb_helper, &sizes);
if (ret) {
/* First time: disable all crtc's.. */
@@ -1635,10 +1646,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
}
/* push down into drivers */
- if (dev->driver->fbdev_probe)
- ret = dev->driver->fbdev_probe(fb_helper, &sizes);
- else if (fb_helper->funcs)
- ret = fb_helper->funcs->fb_probe(fb_helper, &sizes);
+ ret = dev->driver->fbdev_probe(fb_helper, &sizes);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 2289e71e2fa2..c299cd94d3f7 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -830,8 +830,11 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
}
EXPORT_SYMBOL(drm_send_event);
-static void print_size(struct drm_printer *p, const char *stat,
- const char *region, u64 sz)
+void drm_fdinfo_print_size(struct drm_printer *p,
+ const char *prefix,
+ const char *stat,
+ const char *region,
+ u64 sz)
{
const char *units[] = {"", " KiB", " MiB"};
unsigned u;
@@ -842,8 +845,10 @@ static void print_size(struct drm_printer *p, const char *stat,
sz = div_u64(sz, SZ_1K);
}
- drm_printf(p, "drm-%s-%s:\t%llu%s\n", stat, region, sz, units[u]);
+ drm_printf(p, "%s-%s-%s:\t%llu%s\n",
+ prefix, stat, region, sz, units[u]);
}
+EXPORT_SYMBOL(drm_fdinfo_print_size);
int drm_memory_stats_is_zero(const struct drm_memory_stats *stats)
{
@@ -868,17 +873,22 @@ void drm_print_memory_stats(struct drm_printer *p,
enum drm_gem_object_status supported_status,
const char *region)
{
- print_size(p, "total", region, stats->private + stats->shared);
- print_size(p, "shared", region, stats->shared);
+ const char *prefix = "drm";
+
+ drm_fdinfo_print_size(p, prefix, "total", region,
+ stats->private + stats->shared);
+ drm_fdinfo_print_size(p, prefix, "shared", region, stats->shared);
if (supported_status & DRM_GEM_OBJECT_ACTIVE)
- print_size(p, "active", region, stats->active);
+ drm_fdinfo_print_size(p, prefix, "active", region, stats->active);
if (supported_status & DRM_GEM_OBJECT_RESIDENT)
- print_size(p, "resident", region, stats->resident);
+ drm_fdinfo_print_size(p, prefix, "resident", region,
+ stats->resident);
if (supported_status & DRM_GEM_OBJECT_PURGEABLE)
- print_size(p, "purgeable", region, stats->purgeable);
+ drm_fdinfo_print_size(p, prefix, "purgeable", region,
+ stats->purgeable);
}
EXPORT_SYMBOL(drm_print_memory_stats);
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index b1be458ed4dd..01d3ab307ac3 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -702,6 +702,57 @@ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pi
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
+static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ u8 *dbuf8 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ /* write red-green-blue to output in little endianness */
+ *dbuf8++ = (pix & 0x00ff0000) >> 16;
+ *dbuf8++ = (pix & 0x0000ff00) >> 8;
+ *dbuf8++ = (pix & 0x000000ff) >> 0;
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_bgr888 - Convert XRGB8888 to BGR888 clip buffer
+ * @dst: Array of BGR888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for BGR888 devices that don't natively
+ * support XRGB8888.
+ */
+void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 3,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_bgr888_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888);
+
static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le32 *dbuf32 = dbuf;
@@ -978,6 +1029,75 @@ void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pit
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
+static void drm_fb_argb8888_to_argb4444_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ unsigned int pixels2 = pixels & ~GENMASK_ULL(0, 0);
+ __le32 *dbuf32 = dbuf;
+ __le16 *dbuf16 = dbuf + pixels2 * sizeof(*dbuf16);
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u32 val32;
+ u16 val16;
+ u32 pix[2];
+
+ for (x = 0; x < pixels2; x += 2, ++dbuf32) {
+ pix[0] = le32_to_cpu(sbuf32[x]);
+ pix[1] = le32_to_cpu(sbuf32[x + 1]);
+ val32 = ((pix[0] & 0xf0000000) >> 16) |
+ ((pix[0] & 0x00f00000) >> 12) |
+ ((pix[0] & 0x0000f000) >> 8) |
+ ((pix[0] & 0x000000f0) >> 4) |
+ ((pix[1] & 0xf0000000) >> 0) |
+ ((pix[1] & 0x00f00000) << 4) |
+ ((pix[1] & 0x0000f000) << 8) |
+ ((pix[1] & 0x000000f0) << 12);
+ *dbuf32 = cpu_to_le32(val32);
+ }
+ for (; x < pixels; x++) {
+ pix[0] = le32_to_cpu(sbuf32[x]);
+ val16 = ((pix[0] & 0xf0000000) >> 16) |
+ ((pix[0] & 0x00f00000) >> 12) |
+ ((pix[0] & 0x0000f000) >> 8) |
+ ((pix[0] & 0x000000f0) >> 4);
+ dbuf16[x] = cpu_to_le16(val16);
+ }
+}
+
+/**
+ * drm_fb_argb8888_to_argb4444 - Convert ARGB8888 to ARGB4444 clip buffer
+ * @dst: Array of ARGB4444 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of ARGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ARGB4444 devices that don't support
+ * ARGB8888 natively.
+ */
+void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_argb8888_to_argb4444_line);
+}
+EXPORT_SYMBOL(drm_fb_argb8888_to_argb4444);
+
/**
* drm_fb_blit - Copy parts of a framebuffer to display memory
* @dst: Array of display-memory addresses to copy to
@@ -1035,6 +1155,9 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
} else if (dst_format == DRM_FORMAT_RGB888) {
drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state);
return 0;
+ } else if (dst_format == DRM_FORMAT_BGR888) {
+ drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state);
+ return 0;
} else if (dst_format == DRM_FORMAT_ARGB8888) {
drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state);
return 0;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ee811764c3df..c6240bab3fa5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -348,7 +348,7 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
return -ENOENT;
/* Don't allow imported objects to be mapped */
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
ret = -EINVAL;
goto out;
}
@@ -1178,7 +1178,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
drm_vma_node_start(&obj->vma_node));
drm_printf_indent(p, indent, "size=%zu\n", obj->size);
drm_printf_indent(p, indent, "imported=%s\n",
- str_yes_no(obj->import_attach));
+ str_yes_no(drm_gem_is_imported(obj)));
if (obj->funcs->print_info)
obj->funcs->print_info(p, indent, obj);
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index 16988d316a6d..b7f033d4352a 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -228,9 +228,9 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
struct drm_gem_object *gem_obj = &dma_obj->base;
struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr);
- if (gem_obj->import_attach) {
+ if (drm_gem_is_imported(gem_obj)) {
if (dma_obj->vaddr)
- dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
+ dma_buf_vunmap_unlocked(gem_obj->dma_buf, &map);
drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
} else if (dma_obj->vaddr) {
if (dma_obj->map_noncoherent)
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 185534f56bab..0fbeb686e561 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -419,7 +419,6 @@ EXPORT_SYMBOL(drm_gem_fb_vunmap);
static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir,
unsigned int num_planes)
{
- struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
int ret;
@@ -428,10 +427,9 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat
obj = drm_gem_fb_get_obj(fb, num_planes);
if (!obj)
continue;
- import_attach = obj->import_attach;
- if (!import_attach)
+ if (!drm_gem_is_imported(obj))
continue;
- ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
+ ret = dma_buf_end_cpu_access(obj->dma_buf, dir);
if (ret)
drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n",
ret, num_planes, dir);
@@ -454,7 +452,6 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat
*/
int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir)
{
- struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
unsigned int i;
int ret;
@@ -465,10 +462,9 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct
ret = -EINVAL;
goto err___drm_gem_fb_end_cpu_access;
}
- import_attach = obj->import_attach;
- if (!import_attach)
+ if (!drm_gem_is_imported(obj))
continue;
- ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir);
+ ret = dma_buf_begin_cpu_access(obj->dma_buf, dir);
if (ret)
goto err___drm_gem_fb_end_cpu_access;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 5ab351409312..d99dee67353a 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -160,7 +160,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
drm_prime_gem_destroy(obj, shmem->sgt);
} else {
dma_resv_lock(shmem->base.resv, NULL);
@@ -255,7 +255,7 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
dma_resv_assert_held(shmem->base.resv);
- drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
+ drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
ret = drm_gem_shmem_get_pages(shmem);
@@ -286,7 +286,7 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
struct drm_gem_object *obj = &shmem->base;
int ret;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
if (ret)
@@ -309,7 +309,7 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
dma_resv_lock(shmem->base.resv, NULL);
drm_gem_shmem_unpin_locked(shmem);
@@ -338,11 +338,11 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
- if (obj->import_attach) {
- ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
+ if (drm_gem_is_imported(obj)) {
+ ret = dma_buf_vmap(obj->dma_buf, map);
if (!ret) {
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ dma_buf_vunmap(obj->dma_buf, map);
return -EIO;
}
}
@@ -378,7 +378,7 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
return 0;
err_put_pages:
- if (!obj->import_attach)
+ if (!drm_gem_is_imported(obj))
drm_gem_shmem_put_pages(shmem);
err_zero_use:
shmem->vmap_use_count = 0;
@@ -404,8 +404,8 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
{
struct drm_gem_object *obj = &shmem->base;
- if (obj->import_attach) {
- dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ if (drm_gem_is_imported(obj)) {
+ dma_buf_vunmap(obj->dma_buf, map);
} else {
dma_resv_assert_held(shmem->base.resv);
@@ -566,7 +566,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
dma_resv_lock(shmem->base.resv, NULL);
@@ -618,7 +618,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
struct drm_gem_object *obj = &shmem->base;
int ret;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
/* Reset both vm_ops and vm_private_data, so we don't end up with
* vm_ops pointing to our implementation if the dma-buf backend
* doesn't set those fields.
@@ -663,7 +663,7 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
struct drm_printer *p, unsigned int indent)
{
- if (shmem->base.import_attach)
+ if (drm_gem_is_imported(&shmem->base))
return;
drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
@@ -690,7 +690,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
}
@@ -705,7 +705,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
if (shmem->sgt)
return shmem->sgt;
- drm_WARN_ON(obj->dev, obj->import_attach);
+ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
ret = drm_gem_shmem_get_pages(shmem);
if (ret)
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
new file mode 100644
index 000000000000..38431e8360e7
--- /dev/null
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -0,0 +1,2250 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ *
+ * Authors:
+ * Matthew Brost <matthew.brost@intel.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/hmm.h>
+#include <linux/memremap.h>
+#include <linux/migrate.h>
+#include <linux/mm_types.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_gpusvm.h>
+#include <drm/drm_pagemap.h>
+#include <drm/drm_print.h>
+
+/**
+ * DOC: Overview
+ *
+ * GPU Shared Virtual Memory (GPU SVM) layer for the Direct Rendering Manager (DRM)
+ * is a component of the DRM framework designed to manage shared virtual memory
+ * between the CPU and GPU. It enables efficient data exchange and processing
+ * for GPU-accelerated applications by allowing memory sharing and
+ * synchronization between the CPU's and GPU's virtual address spaces.
+ *
+ * Key GPU SVM Components:
+ *
+ * - Notifiers:
+ * Used for tracking memory intervals and notifying the GPU of changes,
+ * notifiers are sized based on a GPU SVM initialization parameter, with a
+ * recommendation of 512M or larger. They maintain a Red-BlacK tree and a
+ * list of ranges that fall within the notifier interval. Notifiers are
+ * tracked within a GPU SVM Red-BlacK tree and list and are dynamically
+ * inserted or removed as ranges within the interval are created or
+ * destroyed.
+ * - Ranges:
+ * Represent memory ranges mapped in a DRM device and managed by GPU SVM.
+ * They are sized based on an array of chunk sizes, which is a GPU SVM
+ * initialization parameter, and the CPU address space. Upon GPU fault,
+ * the largest aligned chunk that fits within the faulting CPU address
+ * space is chosen for the range size. Ranges are expected to be
+ * dynamically allocated on GPU fault and removed on an MMU notifier UNMAP
+ * event. As mentioned above, ranges are tracked in a notifier's Red-Black
+ * tree.
+ *
+ * - Operations:
+ * Define the interface for driver-specific GPU SVM operations such as
+ * range allocation, notifier allocation, and invalidations.
+ *
+ * - Device Memory Allocations:
+ * Embedded structure containing enough information for GPU SVM to migrate
+ * to / from device memory.
+ *
+ * - Device Memory Operations:
+ * Define the interface for driver-specific device memory operations
+ * release memory, populate pfns, and copy to / from device memory.
+ *
+ * This layer provides interfaces for allocating, mapping, migrating, and
+ * releasing memory ranges between the CPU and GPU. It handles all core memory
+ * management interactions (DMA mapping, HMM, and migration) and provides
+ * driver-specific virtual functions (vfuncs). This infrastructure is sufficient
+ * to build the expected driver components for an SVM implementation as detailed
+ * below.
+ *
+ * Expected Driver Components:
+ *
+ * - GPU page fault handler:
+ * Used to create ranges and notifiers based on the fault address,
+ * optionally migrate the range to device memory, and create GPU bindings.
+ *
+ * - Garbage collector:
+ * Used to unmap and destroy GPU bindings for ranges. Ranges are expected
+ * to be added to the garbage collector upon a MMU_NOTIFY_UNMAP event in
+ * notifier callback.
+ *
+ * - Notifier callback:
+ * Used to invalidate and DMA unmap GPU bindings for ranges.
+ */
+
+/**
+ * DOC: Locking
+ *
+ * GPU SVM handles locking for core MM interactions, i.e., it locks/unlocks the
+ * mmap lock as needed.
+ *
+ * GPU SVM introduces a global notifier lock, which safeguards the notifier's
+ * range RB tree and list, as well as the range's DMA mappings and sequence
+ * number. GPU SVM manages all necessary locking and unlocking operations,
+ * except for the recheck range's pages being valid
+ * (drm_gpusvm_range_pages_valid) when the driver is committing GPU bindings.
+ * This lock corresponds to the ``driver->update`` lock mentioned in
+ * Documentation/mm/hmm.rst. Future revisions may transition from a GPU SVM
+ * global lock to a per-notifier lock if finer-grained locking is deemed
+ * necessary.
+ *
+ * In addition to the locking mentioned above, the driver should implement a
+ * lock to safeguard core GPU SVM function calls that modify state, such as
+ * drm_gpusvm_range_find_or_insert and drm_gpusvm_range_remove. This lock is
+ * denoted as 'driver_svm_lock' in code examples. Finer grained driver side
+ * locking should also be possible for concurrent GPU fault processing within a
+ * single GPU SVM. The 'driver_svm_lock' can be via drm_gpusvm_driver_set_lock
+ * to add annotations to GPU SVM.
+ */
+
+/**
+ * DOC: Migration
+ *
+ * The migration support is quite simple, allowing migration between RAM and
+ * device memory at the range granularity. For example, GPU SVM currently does
+ * not support mixing RAM and device memory pages within a range. This means
+ * that upon GPU fault, the entire range can be migrated to device memory, and
+ * upon CPU fault, the entire range is migrated to RAM. Mixed RAM and device
+ * memory storage within a range could be added in the future if required.
+ *
+ * The reasoning for only supporting range granularity is as follows: it
+ * simplifies the implementation, and range sizes are driver-defined and should
+ * be relatively small.
+ */
+
+/**
+ * DOC: Partial Unmapping of Ranges
+ *
+ * Partial unmapping of ranges (e.g., 1M out of 2M is unmapped by CPU resulting
+ * in MMU_NOTIFY_UNMAP event) presents several challenges, with the main one
+ * being that a subset of the range still has CPU and GPU mappings. If the
+ * backing store for the range is in device memory, a subset of the backing
+ * store has references. One option would be to split the range and device
+ * memory backing store, but the implementation for this would be quite
+ * complicated. Given that partial unmappings are rare and driver-defined range
+ * sizes are relatively small, GPU SVM does not support splitting of ranges.
+ *
+ * With no support for range splitting, upon partial unmapping of a range, the
+ * driver is expected to invalidate and destroy the entire range. If the range
+ * has device memory as its backing, the driver is also expected to migrate any
+ * remaining pages back to RAM.
+ */
+
+/**
+ * DOC: Examples
+ *
+ * This section provides three examples of how to build the expected driver
+ * components: the GPU page fault handler, the garbage collector, and the
+ * notifier callback.
+ *
+ * The generic code provided does not include logic for complex migration
+ * policies, optimized invalidations, fined grained driver locking, or other
+ * potentially required driver locking (e.g., DMA-resv locks).
+ *
+ * 1) GPU page fault handler
+ *
+ * .. code-block:: c
+ *
+ * int driver_bind_range(struct drm_gpusvm *gpusvm, struct drm_gpusvm_range *range)
+ * {
+ * int err = 0;
+ *
+ * driver_alloc_and_setup_memory_for_bind(gpusvm, range);
+ *
+ * drm_gpusvm_notifier_lock(gpusvm);
+ * if (drm_gpusvm_range_pages_valid(range))
+ * driver_commit_bind(gpusvm, range);
+ * else
+ * err = -EAGAIN;
+ * drm_gpusvm_notifier_unlock(gpusvm);
+ *
+ * return err;
+ * }
+ *
+ * int driver_gpu_fault(struct drm_gpusvm *gpusvm, unsigned long fault_addr,
+ * unsigned long gpuva_start, unsigned long gpuva_end)
+ * {
+ * struct drm_gpusvm_ctx ctx = {};
+ * int err;
+ *
+ * driver_svm_lock();
+ * retry:
+ * // Always process UNMAPs first so view of GPU SVM ranges is current
+ * driver_garbage_collector(gpusvm);
+ *
+ * range = drm_gpusvm_range_find_or_insert(gpusvm, fault_addr,
+ * gpuva_start, gpuva_end,
+ * &ctx);
+ * if (IS_ERR(range)) {
+ * err = PTR_ERR(range);
+ * goto unlock;
+ * }
+ *
+ * if (driver_migration_policy(range)) {
+ * mmap_read_lock(mm);
+ * devmem = driver_alloc_devmem();
+ * err = drm_gpusvm_migrate_to_devmem(gpusvm, range,
+ * devmem_allocation,
+ * &ctx);
+ * mmap_read_unlock(mm);
+ * if (err) // CPU mappings may have changed
+ * goto retry;
+ * }
+ *
+ * err = drm_gpusvm_range_get_pages(gpusvm, range, &ctx);
+ * if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) { // CPU mappings changed
+ * if (err == -EOPNOTSUPP)
+ * drm_gpusvm_range_evict(gpusvm, range);
+ * goto retry;
+ * } else if (err) {
+ * goto unlock;
+ * }
+ *
+ * err = driver_bind_range(gpusvm, range);
+ * if (err == -EAGAIN) // CPU mappings changed
+ * goto retry
+ *
+ * unlock:
+ * driver_svm_unlock();
+ * return err;
+ * }
+ *
+ * 2) Garbage Collector
+ *
+ * .. code-block:: c
+ *
+ * void __driver_garbage_collector(struct drm_gpusvm *gpusvm,
+ * struct drm_gpusvm_range *range)
+ * {
+ * assert_driver_svm_locked(gpusvm);
+ *
+ * // Partial unmap, migrate any remaining device memory pages back to RAM
+ * if (range->flags.partial_unmap)
+ * drm_gpusvm_range_evict(gpusvm, range);
+ *
+ * driver_unbind_range(range);
+ * drm_gpusvm_range_remove(gpusvm, range);
+ * }
+ *
+ * void driver_garbage_collector(struct drm_gpusvm *gpusvm)
+ * {
+ * assert_driver_svm_locked(gpusvm);
+ *
+ * for_each_range_in_garbage_collector(gpusvm, range)
+ * __driver_garbage_collector(gpusvm, range);
+ * }
+ *
+ * 3) Notifier callback
+ *
+ * .. code-block:: c
+ *
+ * void driver_invalidation(struct drm_gpusvm *gpusvm,
+ * struct drm_gpusvm_notifier *notifier,
+ * const struct mmu_notifier_range *mmu_range)
+ * {
+ * struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
+ * struct drm_gpusvm_range *range = NULL;
+ *
+ * driver_invalidate_device_pages(gpusvm, mmu_range->start, mmu_range->end);
+ *
+ * drm_gpusvm_for_each_range(range, notifier, mmu_range->start,
+ * mmu_range->end) {
+ * drm_gpusvm_range_unmap_pages(gpusvm, range, &ctx);
+ *
+ * if (mmu_range->event != MMU_NOTIFY_UNMAP)
+ * continue;
+ *
+ * drm_gpusvm_range_set_unmapped(range, mmu_range);
+ * driver_garbage_collector_add(gpusvm, range);
+ * }
+ * }
+ */
+
+/**
+ * npages_in_range() - Calculate the number of pages in a given range
+ * @start: The start address of the range
+ * @end: The end address of the range
+ *
+ * This macro calculates the number of pages in a given memory range,
+ * specified by the start and end addresses. It divides the difference
+ * between the end and start addresses by the page size (PAGE_SIZE) to
+ * determine the number of pages in the range.
+ *
+ * Return: The number of pages in the specified range.
+ */
+static unsigned long
+npages_in_range(unsigned long start, unsigned long end)
+{
+ return (end - start) >> PAGE_SHIFT;
+}
+
+/**
+ * struct drm_gpusvm_zdd - GPU SVM zone device data
+ *
+ * @refcount: Reference count for the zdd
+ * @devmem_allocation: device memory allocation
+ * @device_private_page_owner: Device private pages owner
+ *
+ * This structure serves as a generic wrapper installed in
+ * page->zone_device_data. It provides infrastructure for looking up a device
+ * memory allocation upon CPU page fault and asynchronously releasing device
+ * memory once the CPU has no page references. Asynchronous release is useful
+ * because CPU page references can be dropped in IRQ contexts, while releasing
+ * device memory likely requires sleeping locks.
+ */
+struct drm_gpusvm_zdd {
+ struct kref refcount;
+ struct drm_gpusvm_devmem *devmem_allocation;
+ void *device_private_page_owner;
+};
+
+/**
+ * drm_gpusvm_zdd_alloc() - Allocate a zdd structure.
+ * @device_private_page_owner: Device private pages owner
+ *
+ * This function allocates and initializes a new zdd structure. It sets up the
+ * reference count and initializes the destroy work.
+ *
+ * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
+ */
+static struct drm_gpusvm_zdd *
+drm_gpusvm_zdd_alloc(void *device_private_page_owner)
+{
+ struct drm_gpusvm_zdd *zdd;
+
+ zdd = kmalloc(sizeof(*zdd), GFP_KERNEL);
+ if (!zdd)
+ return NULL;
+
+ kref_init(&zdd->refcount);
+ zdd->devmem_allocation = NULL;
+ zdd->device_private_page_owner = device_private_page_owner;
+
+ return zdd;
+}
+
+/**
+ * drm_gpusvm_zdd_get() - Get a reference to a zdd structure.
+ * @zdd: Pointer to the zdd structure.
+ *
+ * This function increments the reference count of the provided zdd structure.
+ *
+ * Return: Pointer to the zdd structure.
+ */
+static struct drm_gpusvm_zdd *drm_gpusvm_zdd_get(struct drm_gpusvm_zdd *zdd)
+{
+ kref_get(&zdd->refcount);
+ return zdd;
+}
+
+/**
+ * drm_gpusvm_zdd_destroy() - Destroy a zdd structure.
+ * @ref: Pointer to the reference count structure.
+ *
+ * This function queues the destroy_work of the zdd for asynchronous destruction.
+ */
+static void drm_gpusvm_zdd_destroy(struct kref *ref)
+{
+ struct drm_gpusvm_zdd *zdd =
+ container_of(ref, struct drm_gpusvm_zdd, refcount);
+ struct drm_gpusvm_devmem *devmem = zdd->devmem_allocation;
+
+ if (devmem) {
+ complete_all(&devmem->detached);
+ if (devmem->ops->devmem_release)
+ devmem->ops->devmem_release(devmem);
+ }
+ kfree(zdd);
+}
+
+/**
+ * drm_gpusvm_zdd_put() - Put a zdd reference.
+ * @zdd: Pointer to the zdd structure.
+ *
+ * This function decrements the reference count of the provided zdd structure
+ * and schedules its destruction if the count drops to zero.
+ */
+static void drm_gpusvm_zdd_put(struct drm_gpusvm_zdd *zdd)
+{
+ kref_put(&zdd->refcount, drm_gpusvm_zdd_destroy);
+}
+
+/**
+ * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
+ * @notifier: Pointer to the GPU SVM notifier structure.
+ * @start: Start address of the range
+ * @end: End address of the range
+ *
+ * Return: A pointer to the drm_gpusvm_range if found or NULL
+ */
+struct drm_gpusvm_range *
+drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
+ unsigned long end)
+{
+ struct interval_tree_node *itree;
+
+ itree = interval_tree_iter_first(&notifier->root, start, end - 1);
+
+ if (itree)
+ return container_of(itree, struct drm_gpusvm_range, itree);
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
+
+/**
+ * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
+ * @range__: Iterator variable for the ranges
+ * @next__: Iterator variable for the ranges temporay storage
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the range
+ * @end__: End address of the range
+ *
+ * This macro is used to iterate over GPU SVM ranges in a notifier while
+ * removing ranges from it.
+ */
+#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
+ for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_range_next(range__); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
+
+/**
+ * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
+ * @notifier: a pointer to the current drm_gpusvm_notifier
+ *
+ * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
+ * the current notifier is the last one or if the input notifier is
+ * NULL.
+ */
+static struct drm_gpusvm_notifier *
+__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
+{
+ if (notifier && !list_is_last(&notifier->entry,
+ &notifier->gpusvm->notifier_list))
+ return list_next_entry(notifier, entry);
+
+ return NULL;
+}
+
+static struct drm_gpusvm_notifier *
+notifier_iter_first(struct rb_root_cached *root, unsigned long start,
+ unsigned long last)
+{
+ struct interval_tree_node *itree;
+
+ itree = interval_tree_iter_first(root, start, last);
+
+ if (itree)
+ return container_of(itree, struct drm_gpusvm_notifier, itree);
+ else
+ return NULL;
+}
+
+/**
+ * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
+ * @notifier__: Iterator variable for the notifiers
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the notifier
+ * @end__: End address of the notifier
+ *
+ * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
+ */
+#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
+ for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = __drm_gpusvm_notifier_next(notifier__))
+
+/**
+ * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
+ * @notifier__: Iterator variable for the notifiers
+ * @next__: Iterator variable for the notifiers temporay storage
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the notifier
+ * @end__: End address of the notifier
+ *
+ * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
+ * removing notifiers from it.
+ */
+#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
+ for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1), \
+ (next__) = __drm_gpusvm_notifier_next(notifier__); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
+
+/**
+ * drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier.
+ * @mni: Pointer to the mmu_interval_notifier structure.
+ * @mmu_range: Pointer to the mmu_notifier_range structure.
+ * @cur_seq: Current sequence number.
+ *
+ * This function serves as a generic MMU notifier for GPU SVM. It sets the MMU
+ * notifier sequence number and calls the driver invalidate vfunc under
+ * gpusvm->notifier_lock.
+ *
+ * Return: true if the operation succeeds, false otherwise.
+ */
+static bool
+drm_gpusvm_notifier_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *mmu_range,
+ unsigned long cur_seq)
+{
+ struct drm_gpusvm_notifier *notifier =
+ container_of(mni, typeof(*notifier), notifier);
+ struct drm_gpusvm *gpusvm = notifier->gpusvm;
+
+ if (!mmu_notifier_range_blockable(mmu_range))
+ return false;
+
+ down_write(&gpusvm->notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+ gpusvm->ops->invalidate(gpusvm, notifier, mmu_range);
+ up_write(&gpusvm->notifier_lock);
+
+ return true;
+}
+
+/*
+ * drm_gpusvm_notifier_ops - MMU interval notifier operations for GPU SVM
+ */
+static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
+ .invalidate = drm_gpusvm_notifier_invalidate,
+};
+
+/**
+ * drm_gpusvm_init() - Initialize the GPU SVM.
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @name: Name of the GPU SVM.
+ * @drm: Pointer to the DRM device structure.
+ * @mm: Pointer to the mm_struct for the address space.
+ * @device_private_page_owner: Device private pages owner.
+ * @mm_start: Start address of GPU SVM.
+ * @mm_range: Range of the GPU SVM.
+ * @notifier_size: Size of individual notifiers.
+ * @ops: Pointer to the operations structure for GPU SVM.
+ * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation.
+ * Entries should be powers of 2 in descending order with last
+ * entry being SZ_4K.
+ * @num_chunks: Number of chunks.
+ *
+ * This function initializes the GPU SVM.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
+ const char *name, struct drm_device *drm,
+ struct mm_struct *mm, void *device_private_page_owner,
+ unsigned long mm_start, unsigned long mm_range,
+ unsigned long notifier_size,
+ const struct drm_gpusvm_ops *ops,
+ const unsigned long *chunk_sizes, int num_chunks)
+{
+ if (!ops->invalidate || !num_chunks)
+ return -EINVAL;
+
+ gpusvm->name = name;
+ gpusvm->drm = drm;
+ gpusvm->mm = mm;
+ gpusvm->device_private_page_owner = device_private_page_owner;
+ gpusvm->mm_start = mm_start;
+ gpusvm->mm_range = mm_range;
+ gpusvm->notifier_size = notifier_size;
+ gpusvm->ops = ops;
+ gpusvm->chunk_sizes = chunk_sizes;
+ gpusvm->num_chunks = num_chunks;
+
+ mmgrab(mm);
+ gpusvm->root = RB_ROOT_CACHED;
+ INIT_LIST_HEAD(&gpusvm->notifier_list);
+
+ init_rwsem(&gpusvm->notifier_lock);
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&gpusvm->notifier_lock);
+ fs_reclaim_release(GFP_KERNEL);
+
+#ifdef CONFIG_LOCKDEP
+ gpusvm->lock_dep_map = NULL;
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_init);
+
+/**
+ * drm_gpusvm_notifier_find() - Find GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @fault_addr: Fault address
+ *
+ * This function finds the GPU SVM notifier associated with the fault address.
+ *
+ * Return: Pointer to the GPU SVM notifier on success, NULL otherwise.
+ */
+static struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm,
+ unsigned long fault_addr)
+{
+ return notifier_iter_first(&gpusvm->root, fault_addr, fault_addr + 1);
+}
+
+/**
+ * to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node
+ * @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct
+ *
+ * Return: A pointer to the containing drm_gpusvm_notifier structure.
+ */
+static struct drm_gpusvm_notifier *to_drm_gpusvm_notifier(struct rb_node *node)
+{
+ return container_of(node, struct drm_gpusvm_notifier, itree.rb);
+}
+
+/**
+ * drm_gpusvm_notifier_insert() - Insert GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ *
+ * This function inserts the GPU SVM notifier into the GPU SVM RB tree and list.
+ */
+static void drm_gpusvm_notifier_insert(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier)
+{
+ struct rb_node *node;
+ struct list_head *head;
+
+ interval_tree_insert(&notifier->itree, &gpusvm->root);
+
+ node = rb_prev(&notifier->itree.rb);
+ if (node)
+ head = &(to_drm_gpusvm_notifier(node))->entry;
+ else
+ head = &gpusvm->notifier_list;
+
+ list_add(&notifier->entry, head);
+}
+
+/**
+ * drm_gpusvm_notifier_remove() - Remove GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM tructure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ *
+ * This function removes the GPU SVM notifier from the GPU SVM RB tree and list.
+ */
+static void drm_gpusvm_notifier_remove(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier)
+{
+ interval_tree_remove(&notifier->itree, &gpusvm->root);
+ list_del(&notifier->entry);
+}
+
+/**
+ * drm_gpusvm_fini() - Finalize the GPU SVM.
+ * @gpusvm: Pointer to the GPU SVM structure.
+ *
+ * This function finalizes the GPU SVM by cleaning up any remaining ranges and
+ * notifiers, and dropping a reference to struct MM.
+ */
+void drm_gpusvm_fini(struct drm_gpusvm *gpusvm)
+{
+ struct drm_gpusvm_notifier *notifier, *next;
+
+ drm_gpusvm_for_each_notifier_safe(notifier, next, gpusvm, 0, LONG_MAX) {
+ struct drm_gpusvm_range *range, *__next;
+
+ /*
+ * Remove notifier first to avoid racing with any invalidation
+ */
+ mmu_interval_notifier_remove(&notifier->notifier);
+ notifier->flags.removed = true;
+
+ drm_gpusvm_for_each_range_safe(range, __next, notifier, 0,
+ LONG_MAX)
+ drm_gpusvm_range_remove(gpusvm, range);
+ }
+
+ mmdrop(gpusvm->mm);
+ WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root));
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_fini);
+
+/**
+ * drm_gpusvm_notifier_alloc() - Allocate GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @fault_addr: Fault address
+ *
+ * This function allocates and initializes the GPU SVM notifier structure.
+ *
+ * Return: Pointer to the allocated GPU SVM notifier on success, ERR_PTR() on failure.
+ */
+static struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_alloc(struct drm_gpusvm *gpusvm, unsigned long fault_addr)
+{
+ struct drm_gpusvm_notifier *notifier;
+
+ if (gpusvm->ops->notifier_alloc)
+ notifier = gpusvm->ops->notifier_alloc();
+ else
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+
+ if (!notifier)
+ return ERR_PTR(-ENOMEM);
+
+ notifier->gpusvm = gpusvm;
+ notifier->itree.start = ALIGN_DOWN(fault_addr, gpusvm->notifier_size);
+ notifier->itree.last = ALIGN(fault_addr + 1, gpusvm->notifier_size) - 1;
+ INIT_LIST_HEAD(&notifier->entry);
+ notifier->root = RB_ROOT_CACHED;
+ INIT_LIST_HEAD(&notifier->range_list);
+
+ return notifier;
+}
+
+/**
+ * drm_gpusvm_notifier_free() - Free GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ *
+ * This function frees the GPU SVM notifier structure.
+ */
+static void drm_gpusvm_notifier_free(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier)
+{
+ WARN_ON(!RB_EMPTY_ROOT(&notifier->root.rb_root));
+
+ if (gpusvm->ops->notifier_free)
+ gpusvm->ops->notifier_free(notifier);
+ else
+ kfree(notifier);
+}
+
+/**
+ * to_drm_gpusvm_range() - retrieve the container struct for a given rbtree node
+ * @node: a pointer to the rbtree node embedded within a drm_gpusvm_range struct
+ *
+ * Return: A pointer to the containing drm_gpusvm_range structure.
+ */
+static struct drm_gpusvm_range *to_drm_gpusvm_range(struct rb_node *node)
+{
+ return container_of(node, struct drm_gpusvm_range, itree.rb);
+}
+
+/**
+ * drm_gpusvm_range_insert() - Insert GPU SVM range
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This function inserts the GPU SVM range into the notifier RB tree and list.
+ */
+static void drm_gpusvm_range_insert(struct drm_gpusvm_notifier *notifier,
+ struct drm_gpusvm_range *range)
+{
+ struct rb_node *node;
+ struct list_head *head;
+
+ drm_gpusvm_notifier_lock(notifier->gpusvm);
+ interval_tree_insert(&range->itree, &notifier->root);
+
+ node = rb_prev(&range->itree.rb);
+ if (node)
+ head = &(to_drm_gpusvm_range(node))->entry;
+ else
+ head = &notifier->range_list;
+
+ list_add(&range->entry, head);
+ drm_gpusvm_notifier_unlock(notifier->gpusvm);
+}
+
+/**
+ * __drm_gpusvm_range_remove() - Remove GPU SVM range
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This macro removes the GPU SVM range from the notifier RB tree and list.
+ */
+static void __drm_gpusvm_range_remove(struct drm_gpusvm_notifier *notifier,
+ struct drm_gpusvm_range *range)
+{
+ interval_tree_remove(&range->itree, &notifier->root);
+ list_del(&range->entry);
+}
+
+/**
+ * drm_gpusvm_range_alloc() - Allocate GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @fault_addr: Fault address
+ * @chunk_size: Chunk size
+ * @migrate_devmem: Flag indicating whether to migrate device memory
+ *
+ * This function allocates and initializes the GPU SVM range structure.
+ *
+ * Return: Pointer to the allocated GPU SVM range on success, ERR_PTR() on failure.
+ */
+static struct drm_gpusvm_range *
+drm_gpusvm_range_alloc(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ unsigned long fault_addr, unsigned long chunk_size,
+ bool migrate_devmem)
+{
+ struct drm_gpusvm_range *range;
+
+ if (gpusvm->ops->range_alloc)
+ range = gpusvm->ops->range_alloc(gpusvm);
+ else
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+
+ if (!range)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&range->refcount);
+ range->gpusvm = gpusvm;
+ range->notifier = notifier;
+ range->itree.start = ALIGN_DOWN(fault_addr, chunk_size);
+ range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1;
+ INIT_LIST_HEAD(&range->entry);
+ range->notifier_seq = LONG_MAX;
+ range->flags.migrate_devmem = migrate_devmem ? 1 : 0;
+
+ return range;
+}
+
+/**
+ * drm_gpusvm_check_pages() - Check pages
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @start: Start address
+ * @end: End address
+ *
+ * Check if pages between start and end have been faulted in on the CPU. Use to
+ * prevent migration of pages without CPU backing store.
+ *
+ * Return: True if pages have been faulted into CPU, False otherwise
+ */
+static bool drm_gpusvm_check_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ unsigned long start, unsigned long end)
+{
+ struct hmm_range hmm_range = {
+ .default_flags = 0,
+ .notifier = &notifier->notifier,
+ .start = start,
+ .end = end,
+ .dev_private_owner = gpusvm->device_private_page_owner,
+ };
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long *pfns;
+ unsigned long npages = npages_in_range(start, end);
+ int err, i;
+
+ mmap_assert_locked(gpusvm->mm);
+
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
+ return false;
+
+ hmm_range.notifier_seq = mmu_interval_read_begin(&notifier->notifier);
+ hmm_range.hmm_pfns = pfns;
+
+ while (true) {
+ err = hmm_range_fault(&hmm_range);
+ if (err == -EBUSY) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ hmm_range.notifier_seq =
+ mmu_interval_read_begin(&notifier->notifier);
+ continue;
+ }
+ break;
+ }
+ if (err)
+ goto err_free;
+
+ for (i = 0; i < npages;) {
+ if (!(pfns[i] & HMM_PFN_VALID)) {
+ err = -EFAULT;
+ goto err_free;
+ }
+ i += 0x1 << hmm_pfn_to_map_order(pfns[i]);
+ }
+
+err_free:
+ kvfree(pfns);
+ return err ? false : true;
+}
+
+/**
+ * drm_gpusvm_range_chunk_size() - Determine chunk size for GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @vas: Pointer to the virtual memory area structure
+ * @fault_addr: Fault address
+ * @gpuva_start: Start address of GPUVA which mirrors CPU
+ * @gpuva_end: End address of GPUVA which mirrors CPU
+ * @check_pages_threshold: Check CPU pages for present threshold
+ *
+ * This function determines the chunk size for the GPU SVM range based on the
+ * fault address, GPU SVM chunk sizes, existing GPU SVM ranges, and the virtual
+ * memory area boundaries.
+ *
+ * Return: Chunk size on success, LONG_MAX on failure.
+ */
+static unsigned long
+drm_gpusvm_range_chunk_size(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ struct vm_area_struct *vas,
+ unsigned long fault_addr,
+ unsigned long gpuva_start,
+ unsigned long gpuva_end,
+ unsigned long check_pages_threshold)
+{
+ unsigned long start, end;
+ int i = 0;
+
+retry:
+ for (; i < gpusvm->num_chunks; ++i) {
+ start = ALIGN_DOWN(fault_addr, gpusvm->chunk_sizes[i]);
+ end = ALIGN(fault_addr + 1, gpusvm->chunk_sizes[i]);
+
+ if (start >= vas->vm_start && end <= vas->vm_end &&
+ start >= drm_gpusvm_notifier_start(notifier) &&
+ end <= drm_gpusvm_notifier_end(notifier) &&
+ start >= gpuva_start && end <= gpuva_end)
+ break;
+ }
+
+ if (i == gpusvm->num_chunks)
+ return LONG_MAX;
+
+ /*
+ * If allocation more than page, ensure not to overlap with existing
+ * ranges.
+ */
+ if (end - start != SZ_4K) {
+ struct drm_gpusvm_range *range;
+
+ range = drm_gpusvm_range_find(notifier, start, end);
+ if (range) {
+ ++i;
+ goto retry;
+ }
+
+ /*
+ * XXX: Only create range on pages CPU has faulted in. Without
+ * this check, or prefault, on BMG 'xe_exec_system_allocator --r
+ * process-many-malloc' fails. In the failure case, each process
+ * mallocs 16k but the CPU VMA is ~128k which results in 64k SVM
+ * ranges. When migrating the SVM ranges, some processes fail in
+ * drm_gpusvm_migrate_to_devmem with 'migrate.cpages != npages'
+ * and then upon drm_gpusvm_range_get_pages device pages from
+ * other processes are collected + faulted in which creates all
+ * sorts of problems. Unsure exactly how this happening, also
+ * problem goes away if 'xe_exec_system_allocator --r
+ * process-many-malloc' mallocs at least 64k at a time.
+ */
+ if (end - start <= check_pages_threshold &&
+ !drm_gpusvm_check_pages(gpusvm, notifier, start, end)) {
+ ++i;
+ goto retry;
+ }
+ }
+
+ return end - start;
+}
+
+#ifdef CONFIG_LOCKDEP
+/**
+ * drm_gpusvm_driver_lock_held() - Assert GPU SVM driver lock is held
+ * @gpusvm: Pointer to the GPU SVM structure.
+ *
+ * Ensure driver lock is held.
+ */
+static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm)
+{
+ if ((gpusvm)->lock_dep_map)
+ lockdep_assert(lock_is_held_type((gpusvm)->lock_dep_map, 0));
+}
+#else
+static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm)
+{
+}
+#endif
+
+/**
+ * drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @fault_addr: Fault address
+ * @gpuva_start: Start address of GPUVA which mirrors CPU
+ * @gpuva_end: End address of GPUVA which mirrors CPU
+ * @ctx: GPU SVM context
+ *
+ * This function finds or inserts a newly allocated a GPU SVM range based on the
+ * fault address. Caller must hold a lock to protect range lookup and insertion.
+ *
+ * Return: Pointer to the GPU SVM range on success, ERR_PTR() on failure.
+ */
+struct drm_gpusvm_range *
+drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
+ unsigned long fault_addr,
+ unsigned long gpuva_start,
+ unsigned long gpuva_end,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ struct drm_gpusvm_notifier *notifier;
+ struct drm_gpusvm_range *range;
+ struct mm_struct *mm = gpusvm->mm;
+ struct vm_area_struct *vas;
+ bool notifier_alloc = false;
+ unsigned long chunk_size;
+ int err;
+ bool migrate_devmem;
+
+ drm_gpusvm_driver_lock_held(gpusvm);
+
+ if (fault_addr < gpusvm->mm_start ||
+ fault_addr > gpusvm->mm_start + gpusvm->mm_range)
+ return ERR_PTR(-EINVAL);
+
+ if (!mmget_not_zero(mm))
+ return ERR_PTR(-EFAULT);
+
+ notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr);
+ if (!notifier) {
+ notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr);
+ if (IS_ERR(notifier)) {
+ err = PTR_ERR(notifier);
+ goto err_mmunlock;
+ }
+ notifier_alloc = true;
+ err = mmu_interval_notifier_insert(&notifier->notifier,
+ mm,
+ drm_gpusvm_notifier_start(notifier),
+ drm_gpusvm_notifier_size(notifier),
+ &drm_gpusvm_notifier_ops);
+ if (err)
+ goto err_notifier;
+ }
+
+ mmap_read_lock(mm);
+
+ vas = vma_lookup(mm, fault_addr);
+ if (!vas) {
+ err = -ENOENT;
+ goto err_notifier_remove;
+ }
+
+ if (!ctx->read_only && !(vas->vm_flags & VM_WRITE)) {
+ err = -EPERM;
+ goto err_notifier_remove;
+ }
+
+ range = drm_gpusvm_range_find(notifier, fault_addr, fault_addr + 1);
+ if (range)
+ goto out_mmunlock;
+ /*
+ * XXX: Short-circuiting migration based on migrate_vma_* current
+ * limitations. If/when migrate_vma_* add more support, this logic will
+ * have to change.
+ */
+ migrate_devmem = ctx->devmem_possible &&
+ vma_is_anonymous(vas) && !is_vm_hugetlb_page(vas);
+
+ chunk_size = drm_gpusvm_range_chunk_size(gpusvm, notifier, vas,
+ fault_addr, gpuva_start,
+ gpuva_end,
+ ctx->check_pages_threshold);
+ if (chunk_size == LONG_MAX) {
+ err = -EINVAL;
+ goto err_notifier_remove;
+ }
+
+ range = drm_gpusvm_range_alloc(gpusvm, notifier, fault_addr, chunk_size,
+ migrate_devmem);
+ if (IS_ERR(range)) {
+ err = PTR_ERR(range);
+ goto err_notifier_remove;
+ }
+
+ drm_gpusvm_range_insert(notifier, range);
+ if (notifier_alloc)
+ drm_gpusvm_notifier_insert(gpusvm, notifier);
+
+out_mmunlock:
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return range;
+
+err_notifier_remove:
+ mmap_read_unlock(mm);
+ if (notifier_alloc)
+ mmu_interval_notifier_remove(&notifier->notifier);
+err_notifier:
+ if (notifier_alloc)
+ drm_gpusvm_notifier_free(gpusvm, notifier);
+err_mmunlock:
+ mmput(mm);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_find_or_insert);
+
+/**
+ * __drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range (internal)
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ * @npages: Number of pages to unmap
+ *
+ * This function unmap pages associated with a GPU SVM range. Assumes and
+ * asserts correct locking is in place when called.
+ */
+static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ unsigned long npages)
+{
+ unsigned long i, j;
+ struct drm_pagemap *dpagemap = range->dpagemap;
+ struct device *dev = gpusvm->drm->dev;
+
+ lockdep_assert_held(&gpusvm->notifier_lock);
+
+ if (range->flags.has_dma_mapping) {
+ for (i = 0, j = 0; i < npages; j++) {
+ struct drm_pagemap_device_addr *addr = &range->dma_addr[j];
+
+ if (addr->proto == DRM_INTERCONNECT_SYSTEM)
+ dma_unmap_page(dev,
+ addr->addr,
+ PAGE_SIZE << addr->order,
+ addr->dir);
+ else if (dpagemap && dpagemap->ops->device_unmap)
+ dpagemap->ops->device_unmap(dpagemap,
+ dev, *addr);
+ i += 1 << addr->order;
+ }
+ range->flags.has_devmem_pages = false;
+ range->flags.has_dma_mapping = false;
+ range->dpagemap = NULL;
+ }
+}
+
+/**
+ * drm_gpusvm_range_free_pages() - Free pages associated with a GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This function frees the dma address array associated with a GPU SVM range.
+ */
+static void drm_gpusvm_range_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range)
+{
+ lockdep_assert_held(&gpusvm->notifier_lock);
+
+ if (range->dma_addr) {
+ kvfree(range->dma_addr);
+ range->dma_addr = NULL;
+ }
+}
+
+/**
+ * drm_gpusvm_range_remove() - Remove GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range to be removed
+ *
+ * This function removes the specified GPU SVM range and also removes the parent
+ * GPU SVM notifier if no more ranges remain in the notifier. The caller must
+ * hold a lock to protect range and notifier removal.
+ */
+void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range)
+{
+ unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range));
+ struct drm_gpusvm_notifier *notifier;
+
+ drm_gpusvm_driver_lock_held(gpusvm);
+
+ notifier = drm_gpusvm_notifier_find(gpusvm,
+ drm_gpusvm_range_start(range));
+ if (WARN_ON_ONCE(!notifier))
+ return;
+
+ drm_gpusvm_notifier_lock(gpusvm);
+ __drm_gpusvm_range_unmap_pages(gpusvm, range, npages);
+ drm_gpusvm_range_free_pages(gpusvm, range);
+ __drm_gpusvm_range_remove(notifier, range);
+ drm_gpusvm_notifier_unlock(gpusvm);
+
+ drm_gpusvm_range_put(range);
+
+ if (RB_EMPTY_ROOT(&notifier->root.rb_root)) {
+ if (!notifier->flags.removed)
+ mmu_interval_notifier_remove(&notifier->notifier);
+ drm_gpusvm_notifier_remove(gpusvm, notifier);
+ drm_gpusvm_notifier_free(gpusvm, notifier);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_remove);
+
+/**
+ * drm_gpusvm_range_get() - Get a reference to GPU SVM range
+ * @range: Pointer to the GPU SVM range
+ *
+ * This function increments the reference count of the specified GPU SVM range.
+ *
+ * Return: Pointer to the GPU SVM range.
+ */
+struct drm_gpusvm_range *
+drm_gpusvm_range_get(struct drm_gpusvm_range *range)
+{
+ kref_get(&range->refcount);
+
+ return range;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_get);
+
+/**
+ * drm_gpusvm_range_destroy() - Destroy GPU SVM range
+ * @refcount: Pointer to the reference counter embedded in the GPU SVM range
+ *
+ * This function destroys the specified GPU SVM range when its reference count
+ * reaches zero. If a custom range-free function is provided, it is invoked to
+ * free the range; otherwise, the range is deallocated using kfree().
+ */
+static void drm_gpusvm_range_destroy(struct kref *refcount)
+{
+ struct drm_gpusvm_range *range =
+ container_of(refcount, struct drm_gpusvm_range, refcount);
+ struct drm_gpusvm *gpusvm = range->gpusvm;
+
+ if (gpusvm->ops->range_free)
+ gpusvm->ops->range_free(range);
+ else
+ kfree(range);
+}
+
+/**
+ * drm_gpusvm_range_put() - Put a reference to GPU SVM range
+ * @range: Pointer to the GPU SVM range
+ *
+ * This function decrements the reference count of the specified GPU SVM range
+ * and frees it when the count reaches zero.
+ */
+void drm_gpusvm_range_put(struct drm_gpusvm_range *range)
+{
+ kref_put(&range->refcount, drm_gpusvm_range_destroy);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_put);
+
+/**
+ * drm_gpusvm_range_pages_valid() - GPU SVM range pages valid
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This function determines if a GPU SVM range pages are valid. Expected be
+ * called holding gpusvm->notifier_lock and as the last step before committing a
+ * GPU binding. This is akin to a notifier seqno check in the HMM documentation
+ * but due to wider notifiers (i.e., notifiers which span multiple ranges) this
+ * function is required for finer grained checking (i.e., per range) if pages
+ * are valid.
+ *
+ * Return: True if GPU SVM range has valid pages, False otherwise
+ */
+bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range)
+{
+ lockdep_assert_held(&gpusvm->notifier_lock);
+
+ return range->flags.has_devmem_pages || range->flags.has_dma_mapping;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid);
+
+/**
+ * drm_gpusvm_range_pages_valid_unlocked() - GPU SVM range pages valid unlocked
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This function determines if a GPU SVM range pages are valid. Expected be
+ * called without holding gpusvm->notifier_lock.
+ *
+ * Return: True if GPU SVM range has valid pages, False otherwise
+ */
+static bool
+drm_gpusvm_range_pages_valid_unlocked(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range)
+{
+ bool pages_valid;
+
+ if (!range->dma_addr)
+ return false;
+
+ drm_gpusvm_notifier_lock(gpusvm);
+ pages_valid = drm_gpusvm_range_pages_valid(gpusvm, range);
+ if (!pages_valid)
+ drm_gpusvm_range_free_pages(gpusvm, range);
+ drm_gpusvm_notifier_unlock(gpusvm);
+
+ return pages_valid;
+}
+
+/**
+ * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ * @ctx: GPU SVM context
+ *
+ * This function gets pages for a GPU SVM range and ensures they are mapped for
+ * DMA access.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ struct mmu_interval_notifier *notifier = &range->notifier->notifier;
+ struct hmm_range hmm_range = {
+ .default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 :
+ HMM_PFN_REQ_WRITE),
+ .notifier = notifier,
+ .start = drm_gpusvm_range_start(range),
+ .end = drm_gpusvm_range_end(range),
+ .dev_private_owner = gpusvm->device_private_page_owner,
+ };
+ struct mm_struct *mm = gpusvm->mm;
+ struct drm_gpusvm_zdd *zdd;
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long i, j;
+ unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range));
+ unsigned long num_dma_mapped;
+ unsigned int order = 0;
+ unsigned long *pfns;
+ struct page **pages;
+ int err = 0;
+ struct dev_pagemap *pagemap;
+ struct drm_pagemap *dpagemap;
+
+retry:
+ hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
+ if (drm_gpusvm_range_pages_valid_unlocked(gpusvm, range))
+ goto set_seqno;
+
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
+ return -ENOMEM;
+
+ if (!mmget_not_zero(mm)) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ hmm_range.hmm_pfns = pfns;
+ while (true) {
+ mmap_read_lock(mm);
+ err = hmm_range_fault(&hmm_range);
+ mmap_read_unlock(mm);
+
+ if (err == -EBUSY) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ hmm_range.notifier_seq =
+ mmu_interval_read_begin(notifier);
+ continue;
+ }
+ break;
+ }
+ mmput(mm);
+ if (err)
+ goto err_free;
+
+ pages = (struct page **)pfns;
+map_pages:
+ /*
+ * Perform all dma mappings under the notifier lock to not
+ * access freed pages. A notifier will either block on
+ * the notifier lock or unmap dma.
+ */
+ drm_gpusvm_notifier_lock(gpusvm);
+
+ if (range->flags.unmapped) {
+ drm_gpusvm_notifier_unlock(gpusvm);
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ if (mmu_interval_read_retry(notifier, hmm_range.notifier_seq)) {
+ drm_gpusvm_notifier_unlock(gpusvm);
+ kvfree(pfns);
+ goto retry;
+ }
+
+ if (!range->dma_addr) {
+ /* Unlock and restart mapping to allocate memory. */
+ drm_gpusvm_notifier_unlock(gpusvm);
+ range->dma_addr = kvmalloc_array(npages,
+ sizeof(*range->dma_addr),
+ GFP_KERNEL);
+ if (!range->dma_addr) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+ goto map_pages;
+ }
+
+ zdd = NULL;
+ num_dma_mapped = 0;
+ for (i = 0, j = 0; i < npages; ++j) {
+ struct page *page = hmm_pfn_to_page(pfns[i]);
+
+ order = hmm_pfn_to_map_order(pfns[i]);
+ if (is_device_private_page(page) ||
+ is_device_coherent_page(page)) {
+ if (zdd != page->zone_device_data && i > 0) {
+ err = -EOPNOTSUPP;
+ goto err_unmap;
+ }
+ zdd = page->zone_device_data;
+ if (pagemap != page_pgmap(page)) {
+ if (i > 0) {
+ err = -EOPNOTSUPP;
+ goto err_unmap;
+ }
+
+ pagemap = page_pgmap(page);
+ dpagemap = zdd->devmem_allocation->dpagemap;
+ if (drm_WARN_ON(gpusvm->drm, !dpagemap)) {
+ /*
+ * Raced. This is not supposed to happen
+ * since hmm_range_fault() should've migrated
+ * this page to system.
+ */
+ err = -EAGAIN;
+ goto err_unmap;
+ }
+ }
+ range->dma_addr[j] =
+ dpagemap->ops->device_map(dpagemap,
+ gpusvm->drm->dev,
+ page, order,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(gpusvm->drm->dev,
+ range->dma_addr[j].addr)) {
+ err = -EFAULT;
+ goto err_unmap;
+ }
+
+ pages[i] = page;
+ } else {
+ dma_addr_t addr;
+
+ if (is_zone_device_page(page) || zdd) {
+ err = -EOPNOTSUPP;
+ goto err_unmap;
+ }
+
+ addr = dma_map_page(gpusvm->drm->dev,
+ page, 0,
+ PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(gpusvm->drm->dev, addr)) {
+ err = -EFAULT;
+ goto err_unmap;
+ }
+
+ range->dma_addr[j] = drm_pagemap_device_addr_encode
+ (addr, DRM_INTERCONNECT_SYSTEM, order,
+ DMA_BIDIRECTIONAL);
+ }
+ i += 1 << order;
+ num_dma_mapped = i;
+ }
+
+ range->flags.has_dma_mapping = true;
+ if (zdd) {
+ range->flags.has_devmem_pages = true;
+ range->dpagemap = dpagemap;
+ }
+
+ drm_gpusvm_notifier_unlock(gpusvm);
+ kvfree(pfns);
+set_seqno:
+ range->notifier_seq = hmm_range.notifier_seq;
+
+ return 0;
+
+err_unmap:
+ __drm_gpusvm_range_unmap_pages(gpusvm, range, num_dma_mapped);
+ drm_gpusvm_notifier_unlock(gpusvm);
+err_free:
+ kvfree(pfns);
+ if (err == -EAGAIN)
+ goto retry;
+ return err;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
+
+/**
+ * drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ * @ctx: GPU SVM context
+ *
+ * This function unmaps pages associated with a GPU SVM range. If @in_notifier
+ * is set, it is assumed that gpusvm->notifier_lock is held in write mode; if it
+ * is clear, it acquires gpusvm->notifier_lock in read mode. Must be called on
+ * each GPU SVM range attached to notifier in gpusvm->ops->invalidate for IOMMU
+ * security model.
+ */
+void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range));
+
+ if (ctx->in_notifier)
+ lockdep_assert_held_write(&gpusvm->notifier_lock);
+ else
+ drm_gpusvm_notifier_lock(gpusvm);
+
+ __drm_gpusvm_range_unmap_pages(gpusvm, range, npages);
+
+ if (!ctx->in_notifier)
+ drm_gpusvm_notifier_unlock(gpusvm);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages);
+
+/**
+ * drm_gpusvm_migration_unlock_put_page() - Put a migration page
+ * @page: Pointer to the page to put
+ *
+ * This function unlocks and puts a page.
+ */
+static void drm_gpusvm_migration_unlock_put_page(struct page *page)
+{
+ unlock_page(page);
+ put_page(page);
+}
+
+/**
+ * drm_gpusvm_migration_unlock_put_pages() - Put migration pages
+ * @npages: Number of pages
+ * @migrate_pfn: Array of migrate page frame numbers
+ *
+ * This function unlocks and puts an array of pages.
+ */
+static void drm_gpusvm_migration_unlock_put_pages(unsigned long npages,
+ unsigned long *migrate_pfn)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page;
+
+ if (!migrate_pfn[i])
+ continue;
+
+ page = migrate_pfn_to_page(migrate_pfn[i]);
+ drm_gpusvm_migration_unlock_put_page(page);
+ migrate_pfn[i] = 0;
+ }
+}
+
+/**
+ * drm_gpusvm_get_devmem_page() - Get a reference to a device memory page
+ * @page: Pointer to the page
+ * @zdd: Pointer to the GPU SVM zone device data
+ *
+ * This function associates the given page with the specified GPU SVM zone
+ * device data and initializes it for zone device usage.
+ */
+static void drm_gpusvm_get_devmem_page(struct page *page,
+ struct drm_gpusvm_zdd *zdd)
+{
+ page->zone_device_data = drm_gpusvm_zdd_get(zdd);
+ zone_device_page_init(page);
+}
+
+/**
+ * drm_gpusvm_migrate_map_pages() - Map migration pages for GPU SVM migration
+ * @dev: The device for which the pages are being mapped
+ * @dma_addr: Array to store DMA addresses corresponding to mapped pages
+ * @migrate_pfn: Array of migrate page frame numbers to map
+ * @npages: Number of pages to map
+ * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
+ *
+ * This function maps pages of memory for migration usage in GPU SVM. It
+ * iterates over each page frame number provided in @migrate_pfn, maps the
+ * corresponding page, and stores the DMA address in the provided @dma_addr
+ * array.
+ *
+ * Return: 0 on success, -EFAULT if an error occurs during mapping.
+ */
+static int drm_gpusvm_migrate_map_pages(struct device *dev,
+ dma_addr_t *dma_addr,
+ unsigned long *migrate_pfn,
+ unsigned long npages,
+ enum dma_data_direction dir)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
+
+ if (!page)
+ continue;
+
+ if (WARN_ON_ONCE(is_zone_device_page(page)))
+ return -EFAULT;
+
+ dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
+ if (dma_mapping_error(dev, dma_addr[i]))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_gpusvm_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
+ * @dev: The device for which the pages were mapped
+ * @dma_addr: Array of DMA addresses corresponding to mapped pages
+ * @npages: Number of pages to unmap
+ * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
+ *
+ * This function unmaps previously mapped pages of memory for GPU Shared Virtual
+ * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks
+ * if it's valid and not already unmapped, and unmaps the corresponding page.
+ */
+static void drm_gpusvm_migrate_unmap_pages(struct device *dev,
+ dma_addr_t *dma_addr,
+ unsigned long npages,
+ enum dma_data_direction dir)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
+ continue;
+
+ dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
+ }
+}
+
+/**
+ * drm_gpusvm_migrate_to_devmem() - Migrate GPU SVM range to device memory
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ * @devmem_allocation: Pointer to the device memory allocation. The caller
+ * should hold a reference to the device memory allocation,
+ * which should be dropped via ops->devmem_release or upon
+ * the failure of this function.
+ * @ctx: GPU SVM context
+ *
+ * This function migrates the specified GPU SVM range to device memory. It
+ * performs the necessary setup and invokes the driver-specific operations for
+ * migration to device memory. Upon successful return, @devmem_allocation can
+ * safely reference @range until ops->devmem_release is called which only upon
+ * successful return. Expected to be called while holding the mmap lock in read
+ * mode.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ struct drm_gpusvm_devmem *devmem_allocation,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops;
+ unsigned long start = drm_gpusvm_range_start(range),
+ end = drm_gpusvm_range_end(range);
+ struct migrate_vma migrate = {
+ .start = start,
+ .end = end,
+ .pgmap_owner = gpusvm->device_private_page_owner,
+ .flags = MIGRATE_VMA_SELECT_SYSTEM,
+ };
+ struct mm_struct *mm = gpusvm->mm;
+ unsigned long i, npages = npages_in_range(start, end);
+ struct vm_area_struct *vas;
+ struct drm_gpusvm_zdd *zdd = NULL;
+ struct page **pages;
+ dma_addr_t *dma_addr;
+ void *buf;
+ int err;
+
+ mmap_assert_locked(gpusvm->mm);
+
+ if (!range->flags.migrate_devmem)
+ return -EINVAL;
+
+ if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
+ !ops->copy_to_ram)
+ return -EOPNOTSUPP;
+
+ vas = vma_lookup(mm, start);
+ if (!vas) {
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ if (end > vas->vm_end || start < vas->vm_start) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (!vma_is_anonymous(vas)) {
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+
+ zdd = drm_gpusvm_zdd_alloc(gpusvm->device_private_page_owner);
+ if (!zdd) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ migrate.vma = vas;
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+
+ err = migrate_vma_setup(&migrate);
+ if (err)
+ goto err_free;
+
+ if (!migrate.cpages) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ if (migrate.cpages != npages) {
+ err = -EBUSY;
+ goto err_finalize;
+ }
+
+ err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
+ if (err)
+ goto err_finalize;
+
+ err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ migrate.src, npages, DMA_TO_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = pfn_to_page(migrate.dst[i]);
+
+ pages[i] = page;
+ migrate.dst[i] = migrate_pfn(migrate.dst[i]);
+ drm_gpusvm_get_devmem_page(page, zdd);
+ }
+
+ err = ops->copy_to_devmem(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+ /* Upon success bind devmem allocation to range and zdd */
+ zdd->devmem_allocation = devmem_allocation; /* Owns ref */
+
+err_finalize:
+ if (err)
+ drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst);
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+ drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ DMA_TO_DEVICE);
+err_free:
+ if (zdd)
+ drm_gpusvm_zdd_put(zdd);
+ kvfree(buf);
+err_out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_migrate_to_devmem);
+
+/**
+ * drm_gpusvm_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area
+ * @vas: Pointer to the VM area structure, can be NULL
+ * @fault_page: Fault page
+ * @npages: Number of pages to populate
+ * @mpages: Number of pages to migrate
+ * @src_mpfn: Source array of migrate PFNs
+ * @mpfn: Array of migrate PFNs to populate
+ * @addr: Start address for PFN allocation
+ *
+ * This function populates the RAM migrate page frame numbers (PFNs) for the
+ * specified VM area structure. It allocates and locks pages in the VM area for
+ * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use
+ * alloc_page for allocation.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int drm_gpusvm_migrate_populate_ram_pfn(struct vm_area_struct *vas,
+ struct page *fault_page,
+ unsigned long npages,
+ unsigned long *mpages,
+ unsigned long *src_mpfn,
+ unsigned long *mpfn,
+ unsigned long addr)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
+ struct page *page, *src_page;
+
+ if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ src_page = migrate_pfn_to_page(src_mpfn[i]);
+ if (!src_page)
+ continue;
+
+ if (fault_page) {
+ if (src_page->zone_device_data !=
+ fault_page->zone_device_data)
+ continue;
+ }
+
+ if (vas)
+ page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
+ else
+ page = alloc_page(GFP_HIGHUSER);
+
+ if (!page)
+ goto free_pages;
+
+ mpfn[i] = migrate_pfn(page_to_pfn(page));
+ }
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(mpfn[i]);
+
+ if (!page)
+ continue;
+
+ WARN_ON_ONCE(!trylock_page(page));
+ ++*mpages;
+ }
+
+ return 0;
+
+free_pages:
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(mpfn[i]);
+
+ if (!page)
+ continue;
+
+ put_page(page);
+ mpfn[i] = 0;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * drm_gpusvm_evict_to_ram() - Evict GPU SVM range to RAM
+ * @devmem_allocation: Pointer to the device memory allocation
+ *
+ * Similar to __drm_gpusvm_migrate_to_ram but does not require mmap lock and
+ * migration done via migrate_device_* functions.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation)
+{
+ const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops;
+ unsigned long npages, mpages = 0;
+ struct page **pages;
+ unsigned long *src, *dst;
+ dma_addr_t *dma_addr;
+ void *buf;
+ int i, err = 0;
+ unsigned int retry_count = 2;
+
+ npages = devmem_allocation->size >> PAGE_SHIFT;
+
+retry:
+ if (!mmget_not_zero(devmem_allocation->mm))
+ return -EFAULT;
+
+ buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ src = buf;
+ dst = buf + (sizeof(*src) * npages);
+ dma_addr = buf + (2 * sizeof(*src) * npages);
+ pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
+
+ err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
+ if (err)
+ goto err_free;
+
+ err = migrate_device_pfns(src, npages);
+ if (err)
+ goto err_free;
+
+ err = drm_gpusvm_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages,
+ src, dst, 0);
+ if (err || !mpages)
+ goto err_finalize;
+
+ err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ dst, npages, DMA_FROM_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(src[i]);
+
+ err = ops->copy_to_ram(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+err_finalize:
+ if (err)
+ drm_gpusvm_migration_unlock_put_pages(npages, dst);
+ migrate_device_pages(src, dst, npages);
+ migrate_device_finalize(src, dst, npages);
+ drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ DMA_FROM_DEVICE);
+err_free:
+ kvfree(buf);
+err_out:
+ mmput_async(devmem_allocation->mm);
+
+ if (completion_done(&devmem_allocation->detached))
+ return 0;
+
+ if (retry_count--) {
+ cond_resched();
+ goto retry;
+ }
+
+ return err ?: -EBUSY;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_evict_to_ram);
+
+/**
+ * __drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
+ * @vas: Pointer to the VM area structure
+ * @device_private_page_owner: Device private pages owner
+ * @page: Pointer to the page for fault handling (can be NULL)
+ * @fault_addr: Fault address
+ * @size: Size of migration
+ *
+ * This internal function performs the migration of the specified GPU SVM range
+ * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and
+ * invokes the driver-specific operations for migration to RAM.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas,
+ void *device_private_page_owner,
+ struct page *page,
+ unsigned long fault_addr,
+ unsigned long size)
+{
+ struct migrate_vma migrate = {
+ .vma = vas,
+ .pgmap_owner = device_private_page_owner,
+ .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT,
+ .fault_page = page,
+ };
+ struct drm_gpusvm_zdd *zdd;
+ const struct drm_gpusvm_devmem_ops *ops;
+ struct device *dev = NULL;
+ unsigned long npages, mpages = 0;
+ struct page **pages;
+ dma_addr_t *dma_addr;
+ unsigned long start, end;
+ void *buf;
+ int i, err = 0;
+
+ start = ALIGN_DOWN(fault_addr, size);
+ end = ALIGN(fault_addr + 1, size);
+
+ /* Corner where VMA area struct has been partially unmapped */
+ if (start < vas->vm_start)
+ start = vas->vm_start;
+ if (end > vas->vm_end)
+ end = vas->vm_end;
+
+ migrate.start = start;
+ migrate.end = end;
+ npages = npages_in_range(start, end);
+
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+
+ migrate.vma = vas;
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+
+ err = migrate_vma_setup(&migrate);
+ if (err)
+ goto err_free;
+
+ /* Raced with another CPU fault, nothing to do */
+ if (!migrate.cpages)
+ goto err_free;
+
+ if (!page) {
+ for (i = 0; i < npages; ++i) {
+ if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ page = migrate_pfn_to_page(migrate.src[i]);
+ break;
+ }
+
+ if (!page)
+ goto err_finalize;
+ }
+ zdd = page->zone_device_data;
+ ops = zdd->devmem_allocation->ops;
+ dev = zdd->devmem_allocation->dev;
+
+ err = drm_gpusvm_migrate_populate_ram_pfn(vas, page, npages, &mpages,
+ migrate.src, migrate.dst,
+ start);
+ if (err)
+ goto err_finalize;
+
+ err = drm_gpusvm_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
+ DMA_FROM_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(migrate.src[i]);
+
+ err = ops->copy_to_ram(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+err_finalize:
+ if (err)
+ drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst);
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+ if (dev)
+ drm_gpusvm_migrate_unmap_pages(dev, dma_addr, npages,
+ DMA_FROM_DEVICE);
+err_free:
+ kvfree(buf);
+err_out:
+
+ return err;
+}
+
+/**
+ * drm_gpusvm_range_evict - Evict GPU SVM range
+ * @range: Pointer to the GPU SVM range to be removed
+ *
+ * This function evicts the specified GPU SVM range. This function will not
+ * evict coherent pages.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range)
+{
+ struct mmu_interval_notifier *notifier = &range->notifier->notifier;
+ struct hmm_range hmm_range = {
+ .default_flags = HMM_PFN_REQ_FAULT,
+ .notifier = notifier,
+ .start = drm_gpusvm_range_start(range),
+ .end = drm_gpusvm_range_end(range),
+ .dev_private_owner = NULL,
+ };
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long *pfns;
+ unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range));
+ int err = 0;
+ struct mm_struct *mm = gpusvm->mm;
+
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
+ return -ENOMEM;
+
+ hmm_range.hmm_pfns = pfns;
+ while (!time_after(jiffies, timeout)) {
+ hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
+ if (time_after(jiffies, timeout)) {
+ err = -ETIME;
+ break;
+ }
+
+ mmap_read_lock(mm);
+ err = hmm_range_fault(&hmm_range);
+ mmap_read_unlock(mm);
+ if (err != -EBUSY)
+ break;
+ }
+
+ kvfree(pfns);
+ mmput(mm);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_evict);
+
+/**
+ * drm_gpusvm_page_free() - Put GPU SVM zone device data associated with a page
+ * @page: Pointer to the page
+ *
+ * This function is a callback used to put the GPU SVM zone device data
+ * associated with a page when it is being released.
+ */
+static void drm_gpusvm_page_free(struct page *page)
+{
+ drm_gpusvm_zdd_put(page->zone_device_data);
+}
+
+/**
+ * drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (page fault handler)
+ * @vmf: Pointer to the fault information structure
+ *
+ * This function is a page fault handler used to migrate a GPU SVM range to RAM.
+ * It retrieves the GPU SVM range information from the faulting page and invokes
+ * the internal migration function to migrate the range back to RAM.
+ *
+ * Return: VM_FAULT_SIGBUS on failure, 0 on success.
+ */
+static vm_fault_t drm_gpusvm_migrate_to_ram(struct vm_fault *vmf)
+{
+ struct drm_gpusvm_zdd *zdd = vmf->page->zone_device_data;
+ int err;
+
+ err = __drm_gpusvm_migrate_to_ram(vmf->vma,
+ zdd->device_private_page_owner,
+ vmf->page, vmf->address,
+ zdd->devmem_allocation->size);
+
+ return err ? VM_FAULT_SIGBUS : 0;
+}
+
+/*
+ * drm_gpusvm_pagemap_ops - Device page map operations for GPU SVM
+ */
+static const struct dev_pagemap_ops drm_gpusvm_pagemap_ops = {
+ .page_free = drm_gpusvm_page_free,
+ .migrate_to_ram = drm_gpusvm_migrate_to_ram,
+};
+
+/**
+ * drm_gpusvm_pagemap_ops_get() - Retrieve GPU SVM device page map operations
+ *
+ * Return: Pointer to the GPU SVM device page map operations structure.
+ */
+const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void)
+{
+ return &drm_gpusvm_pagemap_ops;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_pagemap_ops_get);
+
+/**
+ * drm_gpusvm_has_mapping() - Check if GPU SVM has mapping for the given address range
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @start: Start address
+ * @end: End address
+ *
+ * Return: True if GPU SVM has mapping, False otherwise
+ */
+bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end)
+{
+ struct drm_gpusvm_notifier *notifier;
+
+ drm_gpusvm_for_each_notifier(notifier, gpusvm, start, end) {
+ struct drm_gpusvm_range *range = NULL;
+
+ drm_gpusvm_for_each_range(range, notifier, start, end)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_has_mapping);
+
+/**
+ * drm_gpusvm_range_set_unmapped() - Mark a GPU SVM range as unmapped
+ * @range: Pointer to the GPU SVM range structure.
+ * @mmu_range: Pointer to the MMU notifier range structure.
+ *
+ * This function marks a GPU SVM range as unmapped and sets the partial_unmap flag
+ * if the range partially falls within the provided MMU notifier range.
+ */
+void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
+ const struct mmu_notifier_range *mmu_range)
+{
+ lockdep_assert_held_write(&range->gpusvm->notifier_lock);
+
+ range->flags.unmapped = true;
+ if (drm_gpusvm_range_start(range) < mmu_range->start ||
+ drm_gpusvm_range_end(range) > mmu_range->end)
+ range->flags.partial_unmap = true;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped);
+
+/**
+ * drm_gpusvm_devmem_init() - Initialize a GPU SVM device memory allocation
+ *
+ * @dev: Pointer to the device structure which device memory allocation belongs to
+ * @mm: Pointer to the mm_struct for the address space
+ * @ops: Pointer to the operations structure for GPU SVM device memory
+ * @dpagemap: The struct drm_pagemap we're allocating from.
+ * @size: Size of device memory allocation
+ */
+void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_gpusvm_devmem_ops *ops,
+ struct drm_pagemap *dpagemap, size_t size)
+{
+ init_completion(&devmem_allocation->detached);
+ devmem_allocation->dev = dev;
+ devmem_allocation->mm = mm;
+ devmem_allocation->ops = ops;
+ devmem_allocation->dpagemap = dpagemap;
+ devmem_allocation->size = size;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_devmem_init);
+
+MODULE_DESCRIPTION("DRM GPUSVM");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index 79ce86a5bd67..cc4c463daae7 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -310,3 +310,11 @@ void __drmm_mutex_release(struct drm_device *dev, void *res)
mutex_destroy(lock);
}
EXPORT_SYMBOL(__drmm_mutex_release);
+
+void __drmm_workqueue_release(struct drm_device *device, void *res)
+{
+ struct workqueue_struct *wq = res;
+
+ destroy_workqueue(wq);
+}
+EXPORT_SYMBOL(__drmm_workqueue_release);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 34bca7567576..89e05a5bed1d 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -218,7 +218,7 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach,
+ drm_fb_swab(&dst_map, NULL, src, fb, clip, !drm_gem_is_imported(gem),
fmtcnv_state);
else
drm_fb_memcpy(&dst_map, NULL, src, fb, clip);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 5e5c5f84daac..dfa595556320 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -162,13 +162,13 @@ of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
u32 reg;
if (of_alias_from_compatible(node, info.type, sizeof(info.type)) < 0) {
- drm_err(host, "modalias failure on %pOF\n", node);
+ dev_err(host->dev, "modalias failure on %pOF\n", node);
return ERR_PTR(-EINVAL);
}
ret = of_property_read_u32(node, "reg", &reg);
if (ret) {
- drm_err(host, "device node %pOF has no valid reg property: %d\n",
+ dev_err(host->dev, "device node %pOF has no valid reg property: %d\n",
node, ret);
return ERR_PTR(-EINVAL);
}
@@ -206,18 +206,18 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
int ret;
if (!info) {
- drm_err(host, "invalid mipi_dsi_device_info pointer\n");
+ dev_err(host->dev, "invalid mipi_dsi_device_info pointer\n");
return ERR_PTR(-EINVAL);
}
if (info->channel > 3) {
- drm_err(host, "invalid virtual channel: %u\n", info->channel);
+ dev_err(host->dev, "invalid virtual channel: %u\n", info->channel);
return ERR_PTR(-EINVAL);
}
dsi = mipi_dsi_device_alloc(host);
if (IS_ERR(dsi)) {
- drm_err(host, "failed to allocate DSI device %ld\n",
+ dev_err(host->dev, "failed to allocate DSI device %ld\n",
PTR_ERR(dsi));
return dsi;
}
@@ -228,7 +228,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
ret = mipi_dsi_device_add(dsi);
if (ret) {
- drm_err(host, "failed to add DSI device %d\n", ret);
+ dev_err(host->dev, "failed to add DSI device %d\n", ret);
kfree(dsi);
return ERR_PTR(ret);
}
@@ -1266,25 +1266,6 @@ int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
EXPORT_SYMBOL(mipi_dsi_dcs_set_page_address);
/**
- * mipi_dsi_dcs_set_tear_off() - turn off the display module's Tearing Effect
- * output signal on the TE signal line
- * @dsi: DSI peripheral device
- *
- * Return: 0 on success or a negative error code on failure
- */
-int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi)
-{
- ssize_t err;
-
- err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_OFF, NULL, 0);
- if (err < 0)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off);
-
-/**
* mipi_dsi_dcs_set_tear_on() - turn on the display module's Tearing Effect
* output signal on the TE signal line.
* @dsi: DSI peripheral device
@@ -1714,6 +1695,29 @@ void mipi_dsi_turn_on_peripheral_multi(struct mipi_dsi_multi_context *ctx)
EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral_multi);
/**
+ * mipi_dsi_dcs_set_tear_off_multi() - turn off the display module's Tearing Effect
+ * output signal on the TE signal line
+ * @ctx: Context for multiple DSI transactions
+ */
+void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ ssize_t err;
+
+ if (ctx->accum_err)
+ return;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_OFF, NULL, 0);
+ if (err < 0) {
+ ctx->accum_err = err;
+ dev_err(dev, "Failed to set tear off: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off_multi);
+
+/**
* mipi_dsi_dcs_soft_reset_multi() - perform a software reset of the display module
* @ctx: Context for multiple DSI transactions
*
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 5530919e0ba0..d0183dea7703 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -268,9 +268,9 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
*panel = NULL;
}
- /* No panel found yet, check for a bridge next. */
if (bridge) {
if (ret) {
+ /* No panel found yet, check for a bridge next. */
*bridge = of_drm_find_bridge(remote);
if (*bridge)
ret = 0;
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 9940e96d35e3..c627e42a7ce7 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -50,7 +50,7 @@ static LIST_HEAD(panel_list);
* @dev: parent device of the panel
* @funcs: panel operations
* @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
- * the panel interface
+ * the panel interface (must NOT be DRM_MODE_CONNECTOR_Unknown)
*
* Initialize the panel structure for subsequent registration with
* drm_panel_add().
@@ -58,6 +58,9 @@ static LIST_HEAD(panel_list);
void drm_panel_init(struct drm_panel *panel, struct device *dev,
const struct drm_panel_funcs *funcs, int connector_type)
{
+ if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+ DRM_WARN("%s: %s: a valid connector type is required!\n", __func__, dev_name(dev));
+
INIT_LIST_HEAD(&panel->list);
INIT_LIST_HEAD(&panel->followers);
mutex_init(&panel->follower_lock);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 4a73821b81f6..c554ad8f246b 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -93,6 +93,12 @@ static const struct drm_dmi_panel_orientation_data onegx1_pro = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd640x960_leftside_up = {
+ .width = 640,
+ .height = 960,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
.width = 720,
.height = 1280,
@@ -123,6 +129,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1200x1920_leftside_up = {
+ .width = 1200,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.width = 1200,
.height = 1920,
@@ -184,10 +196,10 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
- }, { /* AYA NEO AYANEO 2 */
+ }, { /* AYA NEO AYANEO 2/2S */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* AYA NEO 2021 */
@@ -202,6 +214,18 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
},
.driver_data = (void *)&lcd1080x1920_leftside_up,
+ }, { /* AYA NEO Flip DS Bottom Screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FLIP DS"),
+ },
+ .driver_data = (void *)&lcd640x960_leftside_up,
+ }, { /* AYA NEO Flip KB/DS Top Screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FLIP"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYA NEO Founder */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"),
@@ -226,6 +250,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_BOARD_NAME, "KUN"),
},
.driver_data = (void *)&lcd1600x2560_rightside_up,
+ }, { /* AYA NEO SLIDE */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SLIDE"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYN Loki Max */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
@@ -315,6 +345,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_win2,
+ }, { /* GPD Win 2 (correct DMI strings) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "WIN2")
+ },
+ .driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* GPD Win 3 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
@@ -443,6 +479,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
},
.driver_data = (void *)&lcd1600x2560_leftside_up,
+ }, { /* OneXPlayer Mini (Intel) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
+ },
+ .driver_data = (void *)&lcd1200x1920_leftside_up,
}, { /* OrangePi Neo */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index f128d345b16d..b47ea25fdfaa 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -486,11 +486,6 @@ static void drm_panic_qr_exit(void)
stream.workspace = NULL;
}
-extern size_t drm_panic_qr_max_data_size(u8 version, size_t url_len);
-
-extern u8 drm_panic_qr_generate(const char *url, u8 *data, size_t data_len, size_t data_size,
- u8 *tmp, size_t tmp_size);
-
static int drm_panic_get_qr_code_url(u8 **qr_image)
{
struct kmsg_dump_iter iter;
@@ -499,7 +494,7 @@ static int drm_panic_get_qr_code_url(u8 **qr_image)
char *kmsg;
int max_qr_data_size, url_len;
- url_len = snprintf(url, sizeof(url), CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL "?a=%s&v=%s&zl=",
+ url_len = snprintf(url, sizeof(url), CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL "?a=%s&v=%s&z=",
utsname()->machine, utsname()->release);
max_qr_data_size = drm_panic_qr_max_data_size(panic_qr_version, url_len);
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index bcf248f69252..f2a99681b998 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -13,12 +13,13 @@
//! The binary data must be a valid URL parameter, so the easiest way is
//! to use base64 encoding. But this wastes 25% of data space, so the
//! whole stack trace won't fit in the QR code. So instead it encodes
-//! every 13bits of input into 4 decimal digits, and then uses the
+//! every 7 bytes of input into 17 decimal digits, and then uses the
//! efficient numeric encoding, that encode 3 decimal digits into
-//! 10bits. This makes 39bits of compressed data into 12 decimal digits,
-//! into 40bits in the QR code, so wasting only 2.5%. And the numbers are
+//! 10bits. This makes 168bits of compressed data into 51 decimal digits,
+//! into 170bits in the QR code, so wasting only 1.17%. And the numbers are
//! valid URL parameter, so the website can do the reverse, to get the
-//! binary data.
+//! binary data. This is the same algorithm used by Fido v2.2 QR-initiated
+//! authentication specification.
//!
//! Inspired by these 3 projects, all under MIT license:
//!
@@ -26,8 +27,7 @@
//! * <https://github.com/erwanvivien/fast_qr>
//! * <https://github.com/bjguillot/qr>
-use core::cmp;
-use kernel::str::CStr;
+use kernel::{prelude::*, str::CStr};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
struct Version(usize);
@@ -296,35 +296,11 @@ const MODE_BINARY: u16 = 4;
/// Padding bytes.
const PADDING: [u8; 2] = [236, 17];
-/// Get the next 13 bits of data, starting at specified offset (in bits).
-fn get_next_13b(data: &[u8], offset: usize) -> Option<(u16, usize)> {
- if offset < data.len() * 8 {
- let size = cmp::min(13, data.len() * 8 - offset);
- let byte_off = offset / 8;
- let bit_off = offset % 8;
- // `b` is 20 at max (`bit_off` <= 7 and `size` <= 13).
- let b = (bit_off + size) as u16;
-
- let first_byte = (data[byte_off] << bit_off >> bit_off) as u16;
-
- let number = match b {
- 0..=8 => first_byte >> (8 - b),
- 9..=16 => (first_byte << (b - 8)) + (data[byte_off + 1] >> (16 - b)) as u16,
- _ => {
- (first_byte << (b - 8))
- + ((data[byte_off + 1] as u16) << (b - 16))
- + (data[byte_off + 2] >> (24 - b)) as u16
- }
- };
- Some((number, size))
- } else {
- None
- }
-}
-
/// Number of bits to encode characters in numeric mode.
const NUM_CHARS_BITS: [usize; 4] = [0, 4, 7, 10];
-const POW10: [u16; 4] = [1, 10, 100, 1000];
+/// Number of decimal digits required to encode n bytes of binary data.
+/// eg: you need 15 decimal digits to fit 6 bytes of binary data.
+const BYTES_TO_DIGITS: [usize; 8] = [0, 3, 5, 8, 10, 13, 15, 17];
enum Segment<'a> {
Numeric(&'a [u8]),
@@ -360,13 +336,9 @@ impl Segment<'_> {
match self {
Segment::Binary(data) => data.len(),
Segment::Numeric(data) => {
- let data_bits = data.len() * 8;
- let last_chars = match data_bits % 13 {
- 1 => 1,
- k => (k + 1) / 3,
- };
- // 4 decimal numbers per 13bits + remainder.
- 4 * (data_bits / 13) + last_chars
+ let last_chars = BYTES_TO_DIGITS[data.len() % 7];
+ // 17 decimal numbers per 7bytes + remainder.
+ 17 * (data.len() / 7) + last_chars
}
}
}
@@ -403,7 +375,7 @@ impl Segment<'_> {
struct SegmentIterator<'a> {
segment: &'a Segment<'a>,
offset: usize,
- carry: u16,
+ carry: u64,
carry_len: usize,
}
@@ -422,40 +394,30 @@ impl Iterator for SegmentIterator<'_> {
}
}
Segment::Numeric(data) => {
- if self.carry_len == 3 {
- let out = (self.carry, NUM_CHARS_BITS[self.carry_len]);
- self.carry_len = 0;
- self.carry = 0;
- Some(out)
- } else if let Some((bits, size)) = get_next_13b(data, self.offset) {
- self.offset += size;
- let new_chars = match size {
- 1 => 1,
- k => (k + 1) / 3,
- };
- if self.carry_len + new_chars > 3 {
- self.carry_len = new_chars + self.carry_len - 3;
- let out = (
- self.carry * POW10[new_chars - self.carry_len]
- + bits / POW10[self.carry_len],
- NUM_CHARS_BITS[3],
- );
- self.carry = bits % POW10[self.carry_len];
- Some(out)
- } else {
- let out = (
- self.carry * POW10[new_chars] + bits,
- NUM_CHARS_BITS[self.carry_len + new_chars],
- );
- self.carry_len = 0;
- Some(out)
+ if self.carry_len < 3 && self.offset < data.len() {
+ // If there are less than 3 decimal digits in the carry,
+ // take the next 7 bytes of input, and add them to the carry.
+ let mut buf = [0u8; 8];
+ let len = 7.min(data.len() - self.offset);
+ buf[..len].copy_from_slice(&data[self.offset..self.offset + len]);
+ let chunk = u64::from_le_bytes(buf);
+ let pow = u64::pow(10, BYTES_TO_DIGITS[len] as u32);
+ self.carry = chunk + self.carry * pow;
+ self.offset += len;
+ self.carry_len += BYTES_TO_DIGITS[len];
+ }
+ match self.carry_len {
+ 0 => None,
+ len => {
+ // take the next 3 decimal digits of the carry
+ // and return 10bits of numeric data.
+ let out_len = 3.min(len);
+ self.carry_len -= out_len;
+ let pow = u64::pow(10, self.carry_len as u32);
+ let out = (self.carry / pow) as u16;
+ self.carry = self.carry % pow;
+ Some((out, NUM_CHARS_BITS[out_len]))
}
- } else if self.carry_len > 0 {
- let out = (self.carry, NUM_CHARS_BITS[self.carry_len]);
- self.carry_len = 0;
- Some(out)
- } else {
- None
}
}
}
@@ -545,7 +507,7 @@ impl EncodedMsg<'_> {
}
self.push(&mut offset, (MODE_STOP, 4));
- let pad_offset = (offset + 7) / 8;
+ let pad_offset = offset.div_ceil(8);
for i in pad_offset..self.version.max_data() {
self.data[i] = PADDING[(i & 1) ^ (pad_offset & 1)];
}
@@ -659,7 +621,7 @@ struct QrImage<'a> {
impl QrImage<'_> {
fn new<'a, 'b>(em: &'b EncodedMsg<'b>, qrdata: &'a mut [u8]) -> QrImage<'a> {
let width = em.version.width();
- let stride = (width + 7) / 8;
+ let stride = width.div_ceil(8);
let data = qrdata;
let mut qr_image = QrImage {
@@ -911,16 +873,16 @@ impl QrImage<'_> {
///
/// * `url`: The base URL of the QR code. It will be encoded as Binary segment.
/// * `data`: A pointer to the binary data, to be encoded. if URL is NULL, it
-/// will be encoded as binary segment, otherwise it will be encoded
-/// efficiently as a numeric segment, and appended to the URL.
+/// will be encoded as binary segment, otherwise it will be encoded
+/// efficiently as a numeric segment, and appended to the URL.
/// * `data_len`: Length of the data, that needs to be encoded, must be less
-/// than data_size.
+/// than data_size.
/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
-/// a V40 QR code. It will then be overwritten with the QR code image.
+/// a V40 QR code. It will then be overwritten with the QR code image.
/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
-/// segments and ECC.
+/// segments and ECC.
/// * `tmp_size`: Size of the temporary buffer, it must be at least 3706 bytes
-/// long for V40.
+/// long for V40.
///
/// # Safety
///
@@ -929,7 +891,7 @@ impl QrImage<'_> {
/// * `tmp` must be valid for reading and writing for `tmp_size` bytes.
///
/// They must remain valid for the duration of the function call.
-#[no_mangle]
+#[export]
pub unsafe extern "C" fn drm_panic_qr_generate(
url: *const kernel::ffi::c_char,
data: *mut u8,
@@ -980,8 +942,13 @@ pub unsafe extern "C" fn drm_panic_qr_generate(
/// * If `url_len` > 0, remove the 2 segments header/length and also count the
/// conversion to numeric segments.
/// * If `url_len` = 0, only removes 3 bytes for 1 binary segment.
-#[no_mangle]
-pub extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize {
+///
+/// # Safety
+///
+/// Always safe to call.
+// Required to be unsafe due to the `#[export]` annotation.
+#[export]
+pub unsafe extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize {
#[expect(clippy::manual_range_contains)]
if version < 1 || version > 40 {
return 0;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 32a8781cfd67..bdb51c8f262e 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -453,13 +453,7 @@ struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
}
mutex_lock(&dev->object_name_lock);
- /* re-export the original imported object */
- if (obj->import_attach) {
- dmabuf = obj->import_attach->dmabuf;
- get_dma_buf(dmabuf);
- goto out_have_obj;
- }
-
+ /* re-export the original imported/exported object */
if (obj->dma_buf) {
get_dma_buf(obj->dma_buf);
dmabuf = obj->dma_buf;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 96b266b37ba4..7ba16323e7c2 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -202,7 +202,7 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
int
drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status)
{
@@ -338,10 +338,23 @@ void drm_kms_helper_poll_reschedule(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_reschedule);
+static int detect_connector_status(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+
+ if (funcs->detect_ctx)
+ return funcs->detect_ctx(connector, ctx, force);
+ else if (connector->funcs->detect)
+ return connector->funcs->detect(connector, force);
+
+ return connector_status_connected;
+}
+
static enum drm_connector_status
drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force)
{
- const struct drm_connector_helper_funcs *funcs = connector->helper_private;
struct drm_modeset_acquire_ctx ctx;
int ret;
@@ -349,14 +362,8 @@ drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force)
retry:
ret = drm_modeset_lock(&connector->dev->mode_config.connection_mutex, &ctx);
- if (!ret) {
- if (funcs->detect_ctx)
- ret = funcs->detect_ctx(connector, &ctx, force);
- else if (connector->funcs->detect)
- ret = connector->funcs->detect(connector, force);
- else
- ret = connector_status_connected;
- }
+ if (!ret)
+ ret = detect_connector_status(connector, &ctx, force);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
@@ -390,7 +397,6 @@ drm_helper_probe_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
- const struct drm_connector_helper_funcs *funcs = connector->helper_private;
struct drm_device *dev = connector->dev;
int ret;
@@ -401,12 +407,7 @@ drm_helper_probe_detect(struct drm_connector *connector,
if (ret)
return ret;
- if (funcs->detect_ctx)
- ret = funcs->detect_ctx(connector, ctx, force);
- else if (connector->funcs->detect)
- ret = connector->funcs->detect(connector, force);
- else
- ret = connector_status_connected;
+ ret = detect_connector_status(connector, ctx, force);
if (ret != connector->status)
connector->epoch_counter += 1;
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index 33a3c98a962d..edbeab88ff2b 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -15,6 +15,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_property.h>
#include <drm/drm_writeback.h>
@@ -195,14 +196,29 @@ int drm_writeback_connector_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_writeback_connector_init);
+static void delete_writeback_properties(struct drm_device *dev)
+{
+ if (dev->mode_config.writeback_pixel_formats_property) {
+ drm_property_destroy(dev, dev->mode_config.writeback_pixel_formats_property);
+ dev->mode_config.writeback_pixel_formats_property = NULL;
+ }
+ if (dev->mode_config.writeback_out_fence_ptr_property) {
+ drm_property_destroy(dev, dev->mode_config.writeback_out_fence_ptr_property);
+ dev->mode_config.writeback_out_fence_ptr_property = NULL;
+ }
+ if (dev->mode_config.writeback_fb_id_property) {
+ drm_property_destroy(dev, dev->mode_config.writeback_fb_id_property);
+ dev->mode_config.writeback_fb_id_property = NULL;
+ }
+}
+
/**
- * drm_writeback_connector_init_with_encoder - Initialize a writeback connector with
+ * __drm_writeback_connector_init - Initialize a writeback connector with
* a custom encoder
*
* @dev: DRM device
* @wb_connector: Writeback connector to initialize
* @enc: handle to the already initialized drm encoder
- * @con_funcs: Connector funcs vtable
* @formats: Array of supported pixel formats for the writeback engine
* @n_formats: Length of the formats array
*
@@ -218,41 +234,33 @@ EXPORT_SYMBOL(drm_writeback_connector_init);
* assigning the encoder helper functions, possible_crtcs and any other encoder
* specific operation.
*
- * Drivers should always use this function instead of drm_connector_init() to
- * set up writeback connectors if they want to manage themselves the lifetime of the
- * associated encoder.
- *
* Returns: 0 on success, or a negative error code
*/
-int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
- struct drm_writeback_connector *wb_connector, struct drm_encoder *enc,
- const struct drm_connector_funcs *con_funcs, const u32 *formats,
- int n_formats)
+static int __drm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ struct drm_encoder *enc, const u32 *formats,
+ int n_formats)
{
- struct drm_property_blob *blob;
struct drm_connector *connector = &wb_connector->base;
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property_blob *blob;
int ret = create_writeback_properties(dev);
if (ret != 0)
- return ret;
-
- blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
- formats);
- if (IS_ERR(blob))
- return PTR_ERR(blob);
-
+ goto failed_properties;
connector->interlace_allowed = 0;
- ret = drm_connector_init(dev, connector, con_funcs,
- DRM_MODE_CONNECTOR_WRITEBACK);
- if (ret)
- goto connector_fail;
-
ret = drm_connector_attach_encoder(connector, enc);
if (ret)
- goto attach_fail;
+ goto failed_properties;
+
+ blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
+ formats);
+ if (IS_ERR(blob)) {
+ ret = PTR_ERR(blob);
+ goto failed_properties;
+ }
INIT_LIST_HEAD(&wb_connector->job_queue);
spin_lock_init(&wb_connector->job_lock);
@@ -275,15 +283,137 @@ int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
wb_connector->pixel_formats_blob_ptr = blob;
return 0;
+failed_properties:
+ delete_writeback_properties(dev);
+ return ret;
+}
+
+/**
+ * drm_writeback_connector_init_with_encoder - Initialize a writeback connector with
+ * a custom encoder
+ *
+ * @dev: DRM device
+ * @wb_connector: Writeback connector to initialize
+ * @enc: handle to the already initialized drm encoder
+ * @con_funcs: Connector funcs vtable
+ * @formats: Array of supported pixel formats for the writeback engine
+ * @n_formats: Length of the formats array
+ *
+ * This function creates the writeback-connector-specific properties if they
+ * have not been already created, initializes the connector as
+ * type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
+ * values.
+ *
+ * This function assumes that the drm_writeback_connector's encoder has already been
+ * created and initialized before invoking this function.
+ *
+ * In addition, this function also assumes that callers of this API will manage
+ * assigning the encoder helper functions, possible_crtcs and any other encoder
+ * specific operation.
+ *
+ * Drivers should always use this function instead of drm_connector_init() to
+ * set up writeback connectors if they want to manage themselves the lifetime of the
+ * associated encoder.
+ *
+ * Returns: 0 on success, or a negative error code
+ */
+int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ struct drm_encoder *enc,
+ const struct drm_connector_funcs *con_funcs,
+ const u32 *formats, int n_formats)
+{
+ struct drm_connector *connector = &wb_connector->base;
+ int ret;
+
+ ret = drm_connector_init(dev, connector, con_funcs,
+ DRM_MODE_CONNECTOR_WRITEBACK);
+ if (ret)
+ return ret;
+
+ ret = __drm_writeback_connector_init(dev, wb_connector, enc, formats,
+ n_formats);
+ if (ret)
+ drm_connector_cleanup(connector);
-attach_fail:
- drm_connector_cleanup(connector);
-connector_fail:
- drm_property_blob_put(blob);
return ret;
}
EXPORT_SYMBOL(drm_writeback_connector_init_with_encoder);
+/**
+ * drm_writeback_connector_cleanup - Cleanup the writeback connector
+ * @dev: DRM device
+ * @wb_connector: Pointer to the writeback connector to clean up
+ *
+ * This will decrement the reference counter of blobs and destroy properties. It
+ * will also clean the remaining jobs in this writeback connector. Caution: This helper will not
+ * clean up the attached encoder and the drm_connector.
+ */
+static void drm_writeback_connector_cleanup(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector)
+{
+ unsigned long flags;
+ struct drm_writeback_job *pos, *n;
+
+ delete_writeback_properties(dev);
+ drm_property_blob_put(wb_connector->pixel_formats_blob_ptr);
+
+ spin_lock_irqsave(&wb_connector->job_lock, flags);
+ list_for_each_entry_safe(pos, n, &wb_connector->job_queue, list_entry) {
+ list_del(&pos->list_entry);
+ drm_writeback_cleanup_job(pos);
+ }
+ spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+}
+
+/**
+ * drmm_writeback_connector_init - Initialize a writeback connector with
+ * a custom encoder
+ *
+ * @dev: DRM device
+ * @wb_connector: Writeback connector to initialize
+ * @con_funcs: Connector funcs vtable
+ * @enc: Encoder to connect this writeback connector
+ * @formats: Array of supported pixel formats for the writeback engine
+ * @n_formats: Length of the formats array
+ *
+ * This function initialize a writeback connector and register its cleanup.
+ *
+ * This function creates the writeback-connector-specific properties if they
+ * have not been already created, initializes the connector as
+ * type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
+ * values.
+ *
+ * Returns: 0 on success, or a negative error code
+ */
+int drmm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ const struct drm_connector_funcs *con_funcs,
+ struct drm_encoder *enc,
+ const u32 *formats, int n_formats)
+{
+ struct drm_connector *connector = &wb_connector->base;
+ int ret;
+
+ ret = drmm_connector_init(dev, connector, con_funcs,
+ DRM_MODE_CONNECTOR_WRITEBACK, NULL);
+ if (ret)
+ return ret;
+
+ ret = __drm_writeback_connector_init(dev, wb_connector, enc, formats,
+ n_formats);
+ if (ret)
+ return ret;
+
+ ret = drmm_add_action_or_reset(dev, (void *)drm_writeback_connector_cleanup,
+ wb_connector);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drmm_writeback_connector_init);
+
int drm_writeback_set_fb(struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
{
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 5b67eda122db..76a3a3e517d8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -144,17 +144,17 @@ out_unlock:
int etnaviv_sched_init(struct etnaviv_gpu *gpu)
{
- int ret;
-
- ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
- msecs_to_jiffies(500), NULL, NULL,
- dev_name(gpu->dev), gpu->dev);
- if (ret)
- return ret;
-
- return 0;
+ const struct drm_sched_init_args args = {
+ .ops = &etnaviv_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = etnaviv_hw_jobs_limit,
+ .hang_limit = etnaviv_job_hang_limit,
+ .timeout = msecs_to_jiffies(500),
+ .name = dev_name(gpu->dev),
+ .dev = gpu->dev,
+ };
+
+ return drm_sched_init(&gpu->sched, &args);
}
void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 176fd8871759..01813e11e6c6 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -931,7 +931,7 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
}
static enum drm_mode_status hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct hdmi_context *hdata = connector_to_hdmi(connector);
int ret;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index c418e8496bdf..84eff7519e32 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -63,7 +63,7 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->hdisplay & 0xf)
return MODE_ERROR;
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 3e83299113e3..718d45891fc7 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -215,7 +215,7 @@ static void cdv_errata(struct drm_device *dev)
* Bonus Launch to work around the issue, by degrading
* performance.
*/
- CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108);
+ CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108);
}
/**
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 5a0acd914f76..06fe7480e7af 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -69,7 +69,7 @@ static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
}
static enum drm_mode_status cdv_intel_crt_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index cc2ed9b3fd2d..c85143792019 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -505,7 +505,7 @@ static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
static enum drm_mode_status
cdv_intel_dp_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -855,8 +855,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
intel_dp->adapter.owner = THIS_MODULE;
- strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
- intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+ strscpy(intel_dp->adapter.name, name);
intel_dp->adapter.algo_data = &intel_dp->algo;
intel_dp->adapter.dev.parent = connector->base.kdev;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 2d95e0471291..f2a3e37ef632 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -222,7 +222,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status cdv_hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index f3a4517bdf27..9276e3676ba0 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -153,7 +153,7 @@ static void cdv_intel_lvds_restore(struct drm_connector *connector)
}
static enum drm_mode_status cdv_intel_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 7e76790c6a81..cba97d7db131 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -279,6 +279,11 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
0, PCI_DEVFN(2, 0));
int ret = -1;
+ if (pci_gfx_root == NULL) {
+ WARN_ON(1);
+ return;
+ }
+
/* Get the address of the platform config vbt */
pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
pci_dev_put(pci_gfx_root);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index ed8626c73541..1cf394369127 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -514,7 +514,7 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
}
static enum drm_mode_status oaktrail_hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 2499fd6a80c9..9dc9dcd1b09f 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -212,7 +212,7 @@ extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
extern int psb_intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 138f153d38ba..9ad611b5956e 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -331,7 +331,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
}
enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 8dafff963ca8..afda40fc4494 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1159,7 +1159,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
}
static enum drm_mode_status psb_intel_sdvo_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index f59abfa7622a..0d49f168a919 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -154,6 +154,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
return 0;
err_free_mmio:
+ iounmap(hv->vram);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
err_vmbus_close:
vmbus_close(hdev->channel);
@@ -172,6 +173,7 @@ static void hyperv_vmbus_remove(struct hv_device *hdev)
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
+ iounmap(hv->vram);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
}
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
deleted file mode 100644
index 6f19e1c35e30..000000000000
--- a/drivers/gpu/drm/i2c/Kconfig
+++ /dev/null
@@ -1,36 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "I2C encoder or helper chips"
- depends on DRM && DRM_KMS_HELPER && I2C
-
-config DRM_I2C_CH7006
- tristate "Chrontel ch7006 TV encoder"
- default m if DRM_NOUVEAU
- help
- Support for Chrontel ch7006 and similar TV encoders, found
- on some nVidia video cards.
-
- This driver is currently only useful if you're also using
- the nouveau driver.
-
-config DRM_I2C_SIL164
- tristate "Silicon Image sil164 TMDS transmitter"
- default m if DRM_NOUVEAU
- help
- Support for sil164 and similar single-link (or dual-link
- when used in pairs) TMDS transmitters, used in some nVidia
- video cards.
-
-config DRM_I2C_NXP_TDA998X
- tristate "NXP Semiconductors TDA998X HDMI encoder"
- default m if DRM_TILCDC
- select CEC_CORE if CEC_NOTIFIER
- select SND_SOC_HDMI_CODEC if SND_SOC
- help
- Support for NXP Semiconductors TDA998X HDMI encoders.
-
-config DRM_I2C_NXP_TDA9950
- tristate "NXP Semiconductors TDA9950/TDA998X HDMI CEC"
- select CEC_NOTIFIER
- select CEC_CORE
-
-endmenu
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
deleted file mode 100644
index a962f6f08568..000000000000
--- a/drivers/gpu/drm/i2c/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-ch7006-y := ch7006_drv.o ch7006_mode.o
-obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
-
-sil164-y := sil164_drv.o
-obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o
-
-tda998x-y := tda998x_drv.o
-obj-$(CONFIG_DRM_I2C_NXP_TDA998X) += tda998x.o
-obj-$(CONFIG_DRM_I2C_NXP_TDA9950) += tda9950.o
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 3dda9f0eda82..ed05b131ed3a 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -231,6 +231,7 @@ i915-y += \
display/intel_bo.o \
display/intel_bw.o \
display/intel_cdclk.o \
+ display/intel_cmtg.o \
display/intel_color.o \
display/intel_combo_phy.o \
display/intel_connector.o \
@@ -346,6 +347,7 @@ i915-y += \
display/intel_pps.o \
display/intel_qp_tables.o \
display/intel_sdvo.o \
+ display/intel_snps_hdmi_pll.o \
display/intel_snps_phy.o \
display/intel_tv.o \
display/intel_vdsc.o \
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index 493e730c685b..206818f9ad49 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -247,7 +247,7 @@ static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 160000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 534b8544e0a4..10ab3cc73e58 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -276,7 +276,7 @@ static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index 0d5cce6051b1..d9c3152d4338 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -314,7 +314,7 @@ static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 112000)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 686393dfbbf5..92d32d6b5bce 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -517,13 +517,13 @@ static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
* Even if not, the detection bit of the 2501 is unreliable as
* it only works for some display types.
* It is even more unreliable as the PLL must be active for
- * allowing reading from the chiop.
+ * allowing reading from the chip.
*/
return connector_status_connected;
}
static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS
("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index a8dd40c00997..b42c717085f3 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -189,7 +189,7 @@ static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
return MODE_OK;
}
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index d9a0cd753a87..280699438526 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -217,7 +217,7 @@ static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
}
static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
return MODE_OK;
}
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 56353377466c..55b9e9bfcc4d 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -51,28 +51,29 @@ static const struct dpll chv_dpll[] = {
{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
};
-const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
+const struct dpll *vlv_get_dpll(struct intel_display *display)
{
- return IS_CHERRYVIEW(i915) ? &chv_dpll[0] : &vlv_dpll[0];
+ return display->platform.cherryview ? &chv_dpll[0] : &vlv_dpll[0];
}
static void g4x_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct dpll *divisor = NULL;
int i, count = 0;
- if (IS_G4X(dev_priv)) {
+ if (display->platform.g4x) {
divisor = g4x_dpll;
count = ARRAY_SIZE(g4x_dpll);
} else if (HAS_PCH_SPLIT(dev_priv)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
- } else if (IS_CHERRYVIEW(dev_priv)) {
+ } else if (display->platform.cherryview) {
divisor = chv_dpll;
count = ARRAY_SIZE(chv_dpll);
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (display->platform.valleyview) {
divisor = vlv_dpll;
count = ARRAY_SIZE(vlv_dpll);
}
@@ -129,7 +130,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Split out the IBX/CPU vs CPT settings */
- if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
+ if (display->platform.ivybridge && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -148,7 +149,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
pipe_config->enhanced_framing ?
TRANS_DP_ENH_FRAMING : 0);
} else {
- if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
+ if (display->platform.g4x && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -160,7 +161,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
if (pipe_config->enhanced_framing)
intel_dp->DP |= DP_ENHANCED_FRAMING;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
else
intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
@@ -180,9 +181,8 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
-static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
+static void assert_edp_pll(struct intel_display *display, bool state)
{
- struct intel_display *display = &dev_priv->display;
bool cur_state = intel_de_read(display, DP_A) & DP_PLL_ENABLE;
INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
@@ -197,11 +197,10 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder);
+ assert_transcoder_disabled(display, pipe_config->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
- assert_edp_pll_disabled(dev_priv);
+ assert_edp_pll_disabled(display);
drm_dbg_kms(display->drm, "enabling eDP PLL for clock %d\n",
pipe_config->port_clock);
@@ -223,8 +222,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
* 1. Wait for the start of vertical blank on the enabled pipe going to FDI
* 2. Program DP PLL enable
*/
- if (IS_IRONLAKE(dev_priv))
- intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
+ if (display->platform.ironlake)
+ intel_wait_for_vblank_if_active(display, !crtc->pipe);
intel_dp->DP |= DP_PLL_ENABLE;
@@ -237,12 +236,10 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(display, old_crtc_state->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
- assert_edp_pll_enabled(dev_priv);
+ assert_edp_pll_enabled(display);
drm_dbg_kms(display->drm, "disabling eDP PLL\n");
@@ -253,10 +250,9 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
udelay(200);
}
-static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
+static bool cpt_dp_port_selected(struct intel_display *display,
enum port port, enum pipe *pipe)
{
- struct intel_display *display = &dev_priv->display;
enum pipe p;
for_each_pipe(display, p) {
@@ -277,11 +273,11 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
return false;
}
-bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
+bool g4x_dp_port_enabled(struct intel_display *display,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
bool ret;
u32 val;
@@ -290,11 +286,11 @@ bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
ret = val & DP_PORT_EN;
/* asserts want to know the pipe even if the port is disabled */
- if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+ if (display->platform.ivybridge && port == PORT_A)
*pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
- ret &= cpt_dp_port_selected(dev_priv, port, pipe);
- else if (IS_CHERRYVIEW(dev_priv))
+ ret &= cpt_dp_port_selected(display, port, pipe);
+ else if (display->platform.cherryview)
*pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
else
*pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
@@ -305,20 +301,20 @@ bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_wakeref_t wakeref;
bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
encoder->power_domain);
if (!wakeref)
return false;
- ret = g4x_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ ret = g4x_dp_port_enabled(display, intel_dp->output_reg,
encoder->port, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return ret;
}
@@ -390,7 +386,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->hw.adjusted_mode.flags |= flags;
- if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
+ if (display->platform.g4x && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
pipe_config->lane_count =
@@ -432,7 +428,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
drm_dbg_kms(display->drm, "\n");
- if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
+ if ((display->platform.ivybridge && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
@@ -457,8 +453,8 @@ intel_dp_link_down(struct intel_encoder *encoder,
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
*/
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_cpu_fifo_underrun_reporting(display, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, false);
/* always enable with pattern 1 (as per spec) */
intel_dp->DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
@@ -471,14 +467,14 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(display, intel_dp->output_reg);
- intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_wait_for_vblank_if_active(display, PIPE_A);
+ intel_set_cpu_fifo_underrun_reporting(display, PIPE_A, true);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, true);
}
msleep(intel_dp->pps.panel_power_down_delay);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
vlv_pps_port_disable(encoder, old_crtc_state);
}
@@ -681,7 +677,6 @@ static void intel_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 dp_reg = intel_de_read(display, intel_dp->output_reg);
intel_wakeref_t wakeref;
@@ -690,7 +685,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
return;
with_intel_pps_lock(intel_dp, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
vlv_pps_port_enable_unlocked(encoder, pipe_config);
intel_dp_enable_port(intel_dp, pipe_config);
@@ -700,13 +695,13 @@ static void intel_enable_dp(struct intel_atomic_state *state,
intel_pps_vdd_off_unlocked(intel_dp, true);
}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
unsigned int lane_mask = 0x0;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
- vlv_wait_port_ready(display, dp_to_dig_port(intel_dp), lane_mask);
+ vlv_wait_port_ready(encoder, lane_mask);
}
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
@@ -1263,7 +1258,6 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder->dev);
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
intel_dp->DP = intel_de_read(display, intel_dp->output_reg);
@@ -1271,7 +1265,7 @@ static void intel_dp_encoder_reset(struct drm_encoder *encoder)
intel_dp->reset_link_params = true;
intel_dp_invalidate_source_oui(intel_dp);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
vlv_pps_pipe_reset(intel_dp);
intel_pps_encoder_reset(intel_dp);
@@ -1282,17 +1276,17 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
-bool g4x_dp_init(struct drm_i915_private *dev_priv,
+bool g4x_dp_init(struct intel_display *display,
i915_reg_t output_reg, enum port port)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
- if (!assert_port_valid(dev_priv, port))
+ if (!assert_port_valid(display, port))
return false;
devdata = intel_bios_encoder_data_lookup(display, port);
@@ -1317,7 +1311,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->devdata = devdata;
- mutex_init(&dig_port->hdcp_mutex);
+ mutex_init(&dig_port->hdcp.mutex);
if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
@@ -1336,14 +1330,14 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->suspend = intel_dp_encoder_suspend;
intel_encoder->suspend_complete = g4x_dp_suspend_complete;
intel_encoder->shutdown = intel_dp_encoder_shutdown;
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->disable = vlv_disable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (display->platform.valleyview) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
@@ -1358,24 +1352,24 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->audio_enable = g4x_dp_audio_enable;
intel_encoder->audio_disable = g4x_dp_audio_disable;
- if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
+ if ((display->platform.ivybridge && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A))
dig_port->dp.set_link_train = cpt_set_link_train;
else
dig_port->dp.set_link_train = g4x_set_link_train;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_encoder->set_signal_levels = chv_set_signal_levels;
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
intel_encoder->set_signal_levels = vlv_set_signal_levels;
- else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+ else if (display->platform.ivybridge && port == PORT_A)
intel_encoder->set_signal_levels = ivb_cpu_edp_set_signal_levels;
- else if (IS_SANDYBRIDGE(dev_priv) && port == PORT_A)
+ else if (display->platform.sandybridge && port == PORT_A)
intel_encoder->set_signal_levels = snb_cpu_edp_set_signal_levels;
else
intel_encoder->set_signal_levels = g4x_set_signal_levels;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
+ if (display->platform.valleyview || display->platform.cherryview ||
(HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
dig_port->dp.preemph_max = intel_dp_preemph_max_3;
dig_port->dp.voltage_max = intel_dp_voltage_max_3;
@@ -1388,8 +1382,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
- intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
- if (IS_CHERRYVIEW(dev_priv)) {
+ intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(display, port);
+ if (display->platform.cherryview) {
if (port == PORT_D)
intel_encoder->pipe_mask = BIT(PIPE_C);
else
@@ -1399,7 +1393,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
}
intel_encoder->cloneable = 0;
intel_encoder->port = port;
- intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ intel_encoder->hpd_pin = intel_hpd_pin_default(port);
dig_port->hpd_pulse = intel_dp_hpd_pulse;
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.h b/drivers/gpu/drm/i915/display/g4x_dp.h
index 839a251dc069..0b28951b8365 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.h
+++ b/drivers/gpu/drm/i915/display/g4x_dp.h
@@ -12,30 +12,30 @@
enum pipe;
enum port;
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
#ifdef I915
-const struct dpll *vlv_get_dpll(struct drm_i915_private *i915);
-bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
+const struct dpll *vlv_get_dpll(struct intel_display *display);
+bool g4x_dp_port_enabled(struct intel_display *display,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe);
-bool g4x_dp_init(struct drm_i915_private *dev_priv,
+bool g4x_dp_init(struct intel_display *display,
i915_reg_t output_reg, enum port port);
#else
-static inline const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
+static inline const struct dpll *vlv_get_dpll(struct intel_display *display)
{
return NULL;
}
-static inline bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
+static inline bool g4x_dp_port_enabled(struct intel_display *display,
i915_reg_t dp_reg, int port,
enum pipe *pipe)
{
return false;
}
-static inline bool g4x_dp_init(struct drm_i915_private *dev_priv,
+static inline bool g4x_dp_init(struct intel_display *display,
i915_reg_t output_reg, int port)
{
return false;
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 98e6a931042f..3dc2c59a3df0 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -27,8 +27,8 @@
static void intel_hdmi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -54,31 +54,31 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
if (HAS_PCH_CPT(dev_priv))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
- else if (IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.cherryview)
hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
else
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, hdmi_val);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, hdmi_val);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
}
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
intel_wakeref_t wakeref;
bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
encoder->power_domain);
if (!wakeref)
return false;
- ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
+ ret = intel_sdvo_port_enabled(display, intel_hdmi->hdmi_reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return ret;
}
@@ -131,6 +131,7 @@ static int g4x_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
@@ -141,7 +142,7 @@ static int g4x_hdmi_compute_config(struct intel_encoder *encoder,
return -EINVAL;
}
- if (IS_G4X(i915))
+ if (display->platform.g4x)
crtc_state->has_hdmi_sink = g4x_compute_has_hdmi_sink(state, crtc);
else
crtc_state->has_hdmi_sink =
@@ -153,15 +154,15 @@ static int g4x_hdmi_compute_config(struct intel_encoder *encoder,
static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp, flags = 0;
int dotclock;
pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
- tmp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+ tmp = intel_de_read(display, intel_hdmi->hdmi_reg);
if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -221,33 +222,32 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
static void g4x_hdmi_enable_port(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
- temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+ temp = intel_de_read(display, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
}
static void g4x_hdmi_audio_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
if (!crtc_state->has_audio)
return;
- drm_WARN_ON(&i915->drm, !crtc_state->has_hdmi_sink);
+ drm_WARN_ON(display->drm, !crtc_state->has_hdmi_sink);
/* Enable audio presence detect */
- intel_de_rmw(i915, hdmi->hdmi_reg, 0, HDMI_AUDIO_ENABLE);
+ intel_de_rmw(display, hdmi->hdmi_reg, 0, HDMI_AUDIO_ENABLE);
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
@@ -256,7 +256,7 @@ static void g4x_hdmi_audio_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
if (!old_crtc_state->has_audio)
@@ -265,7 +265,7 @@ static void g4x_hdmi_audio_disable(struct intel_encoder *encoder,
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
/* Disable audio presence detect */
- intel_de_rmw(i915, hdmi->hdmi_reg, HDMI_AUDIO_ENABLE, 0);
+ intel_de_rmw(display, hdmi->hdmi_reg, HDMI_AUDIO_ENABLE, 0);
}
static void g4x_enable_hdmi(struct intel_atomic_state *state,
@@ -281,12 +281,11 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
- temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+ temp = intel_de_read(display, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
@@ -294,10 +293,10 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
/*
* HW workaround, need to toggle enable bit off and on
@@ -308,18 +307,18 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
*/
if (pipe_config->pipe_bpp > 24 &&
pipe_config->pixel_multiplier > 1) {
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg,
+ intel_de_write(display, intel_hdmi->hdmi_reg,
temp & ~SDVO_ENABLE);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
}
}
@@ -328,14 +327,13 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
enum pipe pipe = crtc->pipe;
u32 temp;
- temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+ temp = intel_de_read(display, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
@@ -350,24 +348,24 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
*/
if (pipe_config->pipe_bpp > 24) {
- intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_rmw(display, TRANS_CHICKEN1(pipe),
0, TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= SDVO_COLOR_FORMAT_8bpc;
}
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
if (pipe_config->pipe_bpp > 24) {
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= HDMI_COLOR_FORMAT_12bpc;
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
- intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_rmw(display, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0);
}
}
@@ -384,19 +382,19 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
- temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+ temp = intel_de_read(display, intel_hdmi->hdmi_reg);
temp &= ~SDVO_ENABLE;
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
/*
* HW workaround for IBX, we need to move the port
@@ -408,8 +406,8 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
*/
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_cpu_fifo_underrun_reporting(display, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, false);
temp &= ~SDVO_PIPE_SEL_MASK;
temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
@@ -417,18 +415,18 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
temp &= ~SDVO_ENABLE;
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(display, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(display, intel_hdmi->hdmi_reg);
- intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_wait_for_vblank_if_active(display, PIPE_A);
+ intel_set_cpu_fifo_underrun_reporting(display, PIPE_A, true);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, true);
}
dig_port->set_infoframes(encoder,
@@ -481,7 +479,6 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
vlv_phy_pre_encoder_enable(encoder, pipe_config);
@@ -497,7 +494,7 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
g4x_hdmi_enable_port(encoder, pipe_config);
- vlv_wait_port_ready(display, dig_port, 0x0);
+ vlv_wait_port_ready(encoder, 0x0);
}
static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
@@ -542,8 +539,8 @@ static void chv_hdmi_post_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
vlv_dpio_get(dev_priv);
@@ -558,7 +555,6 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
chv_phy_pre_encoder_enable(encoder, pipe_config);
@@ -573,7 +569,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
g4x_hdmi_enable_port(encoder, pipe_config);
- vlv_wait_port_ready(display, dig_port, 0x0);
+ vlv_wait_port_ready(encoder, 0x0);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
@@ -612,7 +608,7 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_connector_list_iter conn_iter;
struct drm_connector *conn;
int ret;
@@ -621,7 +617,7 @@ int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
if (ret)
return ret;
- if (!IS_G4X(i915))
+ if (!display->platform.g4x)
return 0;
if (!intel_connector_needs_modeset(to_intel_atomic_state(state), connector))
@@ -635,7 +631,7 @@ int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
*
* See also g4x_compute_has_hdmi_sink().
*/
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(conn, &conn_iter) {
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
@@ -644,7 +640,7 @@ int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
if (!connector_is_hdmi(conn))
continue;
- drm_dbg_kms(&i915->drm, "Adding [CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "Adding [CONNECTOR:%d:%s]\n",
conn->base.id, conn->name);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -669,40 +665,40 @@ int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
return ret;
}
-static bool is_hdmi_port_valid(struct drm_i915_private *i915, enum port port)
+static bool is_hdmi_port_valid(struct intel_display *display, enum port port)
{
- if (IS_G4X(i915) || IS_VALLEYVIEW(i915))
+ if (display->platform.g4x || display->platform.valleyview)
return port == PORT_B || port == PORT_C;
else
return port == PORT_B || port == PORT_C || port == PORT_D;
}
-static bool assert_hdmi_port_valid(struct drm_i915_private *i915, enum port port)
+static bool assert_hdmi_port_valid(struct intel_display *display, enum port port)
{
- return !drm_WARN(&i915->drm, !is_hdmi_port_valid(i915, port),
+ return !drm_WARN(display->drm, !is_hdmi_port_valid(display, port),
"Platform does not support HDMI %c\n", port_name(port));
}
-bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
+bool g4x_hdmi_init(struct intel_display *display,
i915_reg_t hdmi_reg, enum port port)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- if (!assert_port_valid(dev_priv, port))
+ if (!assert_port_valid(display, port))
return false;
- if (!assert_hdmi_port_valid(dev_priv, port))
+ if (!assert_hdmi_port_valid(display, port))
return false;
devdata = intel_bios_encoder_data_lookup(display, port);
/* FIXME bail? */
if (!devdata)
- drm_dbg_kms(&dev_priv->drm, "No VBT child device for HDMI-%c\n",
+ drm_dbg_kms(display->drm, "No VBT child device for HDMI-%c\n",
port_name(port));
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
@@ -719,9 +715,9 @@ bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder->devdata = devdata;
- mutex_init(&dig_port->hdcp_mutex);
+ mutex_init(&dig_port->hdcp.mutex);
- if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
"HDMI %c", port_name(port)))
goto err_encoder_init;
@@ -736,13 +732,13 @@ bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
}
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (display->platform.valleyview) {
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
@@ -761,9 +757,9 @@ bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder->shutdown = intel_hdmi_encoder_shutdown;
intel_encoder->type = INTEL_OUTPUT_HDMI;
- intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
+ intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(display, port);
intel_encoder->port = port;
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
if (port == PORT_D)
intel_encoder->pipe_mask = BIT(PIPE_C);
else
@@ -772,13 +768,13 @@ bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder->pipe_mask = ~0;
}
intel_encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG);
- intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ intel_encoder->hpd_pin = intel_hpd_pin_default(port);
/*
* BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
* to work on real hardware. And since g4x can send infoframes to
* only one port anyway, nothing is lost by allowing it.
*/
- if (IS_G4X(dev_priv))
+ if (display->platform.g4x)
intel_encoder->cloneable |= BIT(INTEL_OUTPUT_HDMI);
dig_port->hdmi.hdmi_reg = hdmi_reg;
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.h b/drivers/gpu/drm/i915/display/g4x_hdmi.h
index a52e8986ec7a..039d2bdba06c 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.h
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.h
@@ -13,15 +13,15 @@
enum port;
struct drm_atomic_state;
struct drm_connector;
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
+bool g4x_hdmi_init(struct intel_display *display,
i915_reg_t hdmi_reg, enum port port);
int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state);
#else
-static inline bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
+static inline bool g4x_hdmi_init(struct intel_display *display,
i915_reg_t hdmi_reg, int port)
{
return false;
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index d02c328bf902..674a0e5f0858 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -36,7 +36,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
if (display->ips.false_color)
val |= IPS_FALSE_COLOR;
- if (IS_BROADWELL(i915)) {
+ if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
val | IPS_PCODE_CONTROL));
@@ -71,7 +71,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
if (!crtc_state->ips_enabled)
return need_vblank_wait;
- if (IS_BROADWELL(i915)) {
+ if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0));
/*
@@ -96,7 +96,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
static bool hsw_ips_need_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
@@ -114,7 +114,7 @@ static bool hsw_ips_need_disable(struct intel_atomic_state *state,
*
* Disable IPS before we program the LUT.
*/
- if (IS_HASWELL(i915) &&
+ if (display->platform.haswell &&
intel_crtc_needs_color_update(new_crtc_state) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
@@ -137,7 +137,7 @@ bool hsw_ips_pre_update(struct intel_atomic_state *state,
static bool hsw_ips_need_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
@@ -155,7 +155,7 @@ static bool hsw_ips_need_enable(struct intel_atomic_state *state,
*
* Re-enable IPS after the LUT has been programmed.
*/
- if (IS_HASWELL(i915) &&
+ if (display->platform.haswell &&
intel_crtc_needs_color_update(new_crtc_state) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
@@ -194,7 +194,6 @@ static bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* IPS only exists on ULT machines and is tied to pipe A. */
if (!hsw_crtc_supports_ips(crtc))
@@ -213,7 +212,7 @@ static bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state
*
* Should measure whether using a lower cdclk w/o IPS
*/
- if (IS_BROADWELL(i915) &&
+ if (display->platform.broadwell &&
crtc_state->pixel_rate > display->cdclk.max_cdclk_freq * 95 / 100)
return false;
@@ -222,9 +221,9 @@ static bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state
int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (!IS_BROADWELL(i915))
+ if (!display->platform.broadwell)
return 0;
if (!hsw_crtc_state_ips_capable(crtc_state))
@@ -237,7 +236,7 @@ int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -259,7 +258,7 @@ int hsw_ips_compute_config(struct intel_atomic_state *state,
if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
return 0;
- if (IS_BROADWELL(i915)) {
+ if (display->platform.broadwell) {
const struct intel_cdclk_state *cdclk_state;
cdclk_state = intel_atomic_get_cdclk_state(state);
@@ -280,12 +279,11 @@ void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (!hsw_crtc_supports_ips(crtc))
return;
- if (IS_HASWELL(i915)) {
+ if (display->platform.haswell) {
crtc_state->ips_enabled = intel_de_read(display, IPS_CTL) & IPS_ENABLE;
} else {
/*
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 48e657a80a16..013295f66d56 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -109,42 +109,42 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
}
}
-static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
+static bool i9xx_plane_has_fbc(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- if (!HAS_FBC(dev_priv))
+ if (!HAS_FBC(display))
return false;
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (display->platform.broadwell || display->platform.haswell)
return i9xx_plane == PLANE_A; /* tied to pipe A */
- else if (IS_IVYBRIDGE(dev_priv))
+ else if (display->platform.ivybridge)
return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
i9xx_plane == PLANE_C;
- else if (DISPLAY_VER(dev_priv) >= 4)
+ else if (DISPLAY_VER(display) >= 4)
return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
else
return i9xx_plane == PLANE_A;
}
-static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv,
+static struct intel_fbc *i9xx_plane_fbc(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- if (i9xx_plane_has_fbc(dev_priv, i9xx_plane))
- return dev_priv->display.fbc[INTEL_FBC_A];
+ if (i9xx_plane_has_fbc(display, i9xx_plane))
+ return display->fbc[INTEL_FBC_A];
else
return NULL;
}
static bool i9xx_plane_has_windowing(struct intel_plane *plane)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
return i9xx_plane == PLANE_B;
- else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
+ else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return false;
- else if (DISPLAY_VER(dev_priv) == 4)
+ else if (DISPLAY_VER(display) == 4)
return i9xx_plane == PLANE_C;
else
return i9xx_plane == PLANE_B ||
@@ -154,16 +154,15 @@ static bool i9xx_plane_has_windowing(struct intel_plane *plane)
static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
u32 dspcntr;
dspcntr = DISP_ENABLE;
- if (IS_G4X(dev_priv) || IS_IRONLAKE(dev_priv) ||
- IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ if (display->platform.g4x || display->platform.ironlake ||
+ display->platform.sandybridge || display->platform.ivybridge)
dspcntr |= DISP_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
@@ -211,7 +210,7 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- if (DISPLAY_VER(dev_priv) >= 4 &&
+ if (DISPLAY_VER(display) >= 4 &&
fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISP_TILED;
@@ -226,8 +225,8 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
int src_x, src_y, src_w;
u32 offset;
@@ -245,12 +244,16 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
src_y = plane_state->uapi.src.y1 >> 16;
/* Undocumented hardware limit on i965/g4x/vlv/chv */
- if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
+ if (HAS_GMCH(display) && fb->format->cpp[0] == 8 && src_w > 2048) {
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] plane too wide (%d) for 64bpp\n",
+ plane->base.base.id, plane->base.name, src_w);
return -EINVAL;
+ }
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
plane_state, 0);
else
@@ -267,14 +270,15 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
* Linear surfaces seem to work just fine, even on hsw/bdw
* despite them not using the linear offset anymore.
*/
- if (DISPLAY_VER(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ if (DISPLAY_VER(display) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) {
unsigned int alignment = plane->min_alignment(plane, fb, 0);
int cpp = fb->format->cpp[0];
while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].mapping_stride) {
if (offset == 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Unable to find suitable display surface offset due to X-tiling\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] unable to find suitable display surface offset due to X-tiling\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -291,7 +295,7 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
src_x << 16, src_y << 16);
/* HSW/BDW do this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+ if (!display->platform.haswell && !display->platform.broadwell) {
unsigned int rotation = plane_state->hw.rotation;
int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
@@ -304,11 +308,11 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
}
}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- drm_WARN_ON(&dev_priv->drm, src_x > 8191 || src_y > 4095);
- } else if (DISPLAY_VER(dev_priv) >= 4 &&
+ if (display->platform.haswell || display->platform.broadwell) {
+ drm_WARN_ON(display->drm, src_x > 8191 || src_y > 4095);
+ } else if (DISPLAY_VER(display) >= 4 &&
fb->modifier == I915_FORMAT_MOD_X_TILED) {
- drm_WARN_ON(&dev_priv->drm, src_x > 4095 || src_y > 4095);
+ drm_WARN_ON(display->drm, src_x > 4095 || src_y > 4095);
}
plane_state->view.color_plane[0].offset = offset;
@@ -354,8 +358,8 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dspcntr = 0;
if (crtc_state->gamma_enable)
@@ -364,7 +368,7 @@ static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (crtc_state->csc_enable)
dspcntr |= DISP_PIPE_CSC_ENABLE;
- if (DISPLAY_VER(dev_priv) < 5)
+ if (DISPLAY_VER(display) < 5)
dspcntr |= DISP_PIPE_SEL(crtc->pipe);
return dspcntr;
@@ -422,13 +426,13 @@ static void i9xx_plane_update_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- intel_de_write_fw(dev_priv, DSPSTRIDE(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, DSPSTRIDE(display, i9xx_plane),
plane_state->view.color_plane[0].mapping_stride);
- if (DISPLAY_VER(dev_priv) < 4) {
+ if (DISPLAY_VER(display) < 4) {
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
int crtc_w = drm_rect_width(&plane_state->uapi.dst);
@@ -439,9 +443,9 @@ static void i9xx_plane_update_noarm(struct intel_dsb *dsb,
* generator but let's assume we still need to
* program whatever is there.
*/
- intel_de_write_fw(dev_priv, DSPPOS(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, DSPPOS(display, i9xx_plane),
DISP_POS_Y(crtc_y) | DISP_POS_X(crtc_x));
- intel_de_write_fw(dev_priv, DSPSIZE(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, DSPSIZE(display, i9xx_plane),
DISP_HEIGHT(crtc_h - 1) | DISP_WIDTH(crtc_w - 1));
}
}
@@ -451,7 +455,7 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
int x = plane_state->view.color_plane[0].x;
int y = plane_state->view.color_plane[0].y;
@@ -466,32 +470,32 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
dspaddr_offset = plane_state->view.color_plane[0].offset;
else
dspaddr_offset = linear_offset;
- if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ if (display->platform.cherryview && i9xx_plane == PLANE_B) {
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
int crtc_w = drm_rect_width(&plane_state->uapi.dst);
int crtc_h = drm_rect_height(&plane_state->uapi.dst);
- intel_de_write_fw(dev_priv, PRIMPOS(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, PRIMPOS(display, i9xx_plane),
PRIM_POS_Y(crtc_y) | PRIM_POS_X(crtc_x));
- intel_de_write_fw(dev_priv, PRIMSIZE(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, PRIMSIZE(display, i9xx_plane),
PRIM_HEIGHT(crtc_h - 1) | PRIM_WIDTH(crtc_w - 1));
- intel_de_write_fw(dev_priv,
- PRIMCNSTALPHA(dev_priv, i9xx_plane), 0);
+ intel_de_write_fw(display,
+ PRIMCNSTALPHA(display, i9xx_plane), 0);
}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- intel_de_write_fw(dev_priv, DSPOFFSET(dev_priv, i9xx_plane),
+ if (display->platform.haswell || display->platform.broadwell) {
+ intel_de_write_fw(display, DSPOFFSET(display, i9xx_plane),
DISP_OFFSET_Y(y) | DISP_OFFSET_X(x));
- } else if (DISPLAY_VER(dev_priv) >= 4) {
- intel_de_write_fw(dev_priv, DSPLINOFF(dev_priv, i9xx_plane),
+ } else if (DISPLAY_VER(display) >= 4) {
+ intel_de_write_fw(display, DSPLINOFF(display, i9xx_plane),
linear_offset);
- intel_de_write_fw(dev_priv, DSPTILEOFF(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, DSPTILEOFF(display, i9xx_plane),
DISP_OFFSET_Y(y) | DISP_OFFSET_X(x));
}
@@ -500,13 +504,13 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- intel_de_write_fw(dev_priv, DSPCNTR(dev_priv, i9xx_plane), dspcntr);
+ intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
- if (DISPLAY_VER(dev_priv) >= 4)
- intel_de_write_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane),
+ if (DISPLAY_VER(display) >= 4)
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane),
intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
else
- intel_de_write_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, DSPADDR(display, i9xx_plane),
intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
}
@@ -529,7 +533,7 @@ static void i9xx_plane_disable_arm(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 dspcntr;
@@ -545,12 +549,46 @@ static void i9xx_plane_disable_arm(struct intel_dsb *dsb,
*/
dspcntr = i9xx_plane_ctl_crtc(crtc_state);
- intel_de_write_fw(dev_priv, DSPCNTR(dev_priv, i9xx_plane), dspcntr);
+ intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
- if (DISPLAY_VER(dev_priv) >= 4)
- intel_de_write_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane), 0);
+ if (DISPLAY_VER(display) >= 4)
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), 0);
else
- intel_de_write_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane), 0);
+ intel_de_write_fw(display, DSPADDR(display, i9xx_plane), 0);
+}
+
+static void g4x_primary_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ error->ctl = intel_de_read(display, DSPCNTR(display, i9xx_plane));
+ error->surf = intel_de_read(display, DSPSURF(display, i9xx_plane));
+ error->surflive = intel_de_read(display, DSPSURFLIVE(display, i9xx_plane));
+}
+
+static void i965_plane_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ error->ctl = intel_de_read(display, DSPCNTR(display, i9xx_plane));
+ error->surf = intel_de_read(display, DSPSURF(display, i9xx_plane));
+}
+
+static void i8xx_plane_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ error->ctl = intel_de_read(display, DSPCNTR(display, i9xx_plane));
+ error->surf = intel_de_read(display, DSPADDR(display, i9xx_plane));
}
static void
@@ -560,7 +598,7 @@ g4x_primary_async_flip(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state,
bool async_flip)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@@ -568,9 +606,9 @@ g4x_primary_async_flip(struct intel_dsb *dsb,
if (async_flip)
dspcntr |= DISP_ASYNC_FLIP;
- intel_de_write_fw(dev_priv, DSPCNTR(dev_priv, i9xx_plane), dspcntr);
+ intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
- intel_de_write_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane),
intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
}
@@ -581,11 +619,11 @@ vlv_primary_async_flip(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state,
bool async_flip)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- intel_de_write_fw(dev_priv, DSPADDR_VLV(dev_priv, i9xx_plane),
+ intel_de_write_fw(display, DSPADDR_VLV(display, i9xx_plane),
intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
}
@@ -673,10 +711,15 @@ vlv_primary_disable_flip_done(struct intel_plane *plane)
spin_unlock_irq(&i915->irq_lock);
}
+static bool i9xx_plane_can_async_flip(u64 modifier)
+{
+ return modifier == I915_FORMAT_MOD_X_TILED;
+}
+
static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
intel_wakeref_t wakeref;
@@ -689,20 +732,20 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
* display power wells.
*/
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
- val = intel_de_read(dev_priv, DSPCNTR(dev_priv, i9xx_plane));
+ val = intel_de_read(display, DSPCNTR(display, i9xx_plane));
ret = val & DISP_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 5)
+ if (DISPLAY_VER(display) >= 5)
*pipe = plane->pipe;
else
*pipe = REG_FIELD_GET(DISP_PIPE_SEL_MASK, val);
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
@@ -771,16 +814,21 @@ i8xx_plane_max_stride(struct intel_plane *plane,
return 8 * 1024;
}
-static unsigned int vlv_primary_min_alignment(struct intel_plane *plane,
- const struct drm_framebuffer *fb,
- int color_plane)
+unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
+
+ if (intel_plane_can_async_flip(plane, fb->modifier))
+ return 256 * 1024;
+
+ /* FIXME undocumented so not sure what's actually needed */
+ if (intel_scanout_needs_vtd_wa(display))
+ return 256 * 1024;
switch (fb->modifier) {
case I915_FORMAT_MOD_X_TILED:
- if (HAS_ASYNC_FLIPS(i915))
- return 256 * 1024;
return 4 * 1024;
case DRM_FORMAT_MOD_LINEAR:
return 128 * 1024;
@@ -794,13 +842,16 @@ static unsigned int g4x_primary_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
+
+ if (intel_plane_can_async_flip(plane, fb->modifier))
+ return 256 * 1024;
+
+ if (intel_scanout_needs_vtd_wa(display))
+ return 256 * 1024;
switch (fb->modifier) {
case I915_FORMAT_MOD_X_TILED:
- if (HAS_ASYNC_FLIPS(i915))
- return 256 * 1024;
- return 4 * 1024;
case DRM_FORMAT_MOD_LINEAR:
return 4 * 1024;
default:
@@ -850,7 +901,7 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
};
struct intel_plane *
-intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
+intel_primary_plane_create(struct intel_display *display, enum pipe pipe)
{
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
@@ -869,20 +920,20 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
* On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
* port is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
- if (HAS_FBC(dev_priv) && DISPLAY_VER(dev_priv) < 4 &&
- INTEL_NUM_PIPES(dev_priv) == 2)
+ if (HAS_FBC(display) && DISPLAY_VER(display) < 4 &&
+ INTEL_NUM_PIPES(display) == 2)
plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
else
plane->i9xx_plane = (enum i9xx_plane_id) pipe;
plane->id = PLANE_PRIMARY;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- intel_fbc_add_plane(i9xx_plane_fbc(dev_priv, plane->i9xx_plane), plane);
+ intel_fbc_add_plane(i9xx_plane_fbc(display, plane->i9xx_plane), plane);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
formats = vlv_primary_formats;
num_formats = ARRAY_SIZE(vlv_primary_formats);
- } else if (DISPLAY_VER(dev_priv) >= 4) {
+ } else if (DISPLAY_VER(display) >= 4) {
/*
* WaFP16GammaEnabling:ivb
* "Workaround : When using the 64-bit format, the plane
@@ -896,7 +947,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
* planes, so we choose not to expose fp16 on IVB primary
* planes. HSW primary planes no longer have this problem.
*/
- if (IS_IVYBRIDGE(dev_priv)) {
+ if (display->platform.ivybridge) {
formats = ivb_primary_formats;
num_formats = ARRAY_SIZE(ivb_primary_formats);
} else {
@@ -908,44 +959,48 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(i8xx_primary_formats);
}
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
plane_funcs = &i965_plane_funcs;
else
plane_funcs = &i8xx_plane_funcs;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
plane->min_cdclk = vlv_plane_min_cdclk;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ else if (display->platform.broadwell || display->platform.haswell)
plane->min_cdclk = hsw_plane_min_cdclk;
- else if (IS_IVYBRIDGE(dev_priv))
+ else if (display->platform.ivybridge)
plane->min_cdclk = ivb_plane_min_cdclk;
else
plane->min_cdclk = i9xx_plane_min_cdclk;
- if (HAS_GMCH(dev_priv)) {
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (HAS_GMCH(display)) {
+ if (DISPLAY_VER(display) >= 4)
plane->max_stride = i965_plane_max_stride;
- else if (DISPLAY_VER(dev_priv) == 3)
+ else if (DISPLAY_VER(display) == 3)
plane->max_stride = i915_plane_max_stride;
else
plane->max_stride = i8xx_plane_max_stride;
} else {
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (display->platform.broadwell || display->platform.haswell)
plane->max_stride = hsw_primary_max_stride;
else
plane->max_stride = ilk_primary_max_stride;
}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- plane->min_alignment = vlv_primary_min_alignment;
- else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
+ plane->min_alignment = vlv_plane_min_alignment;
+ else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
plane->min_alignment = g4x_primary_min_alignment;
- else if (DISPLAY_VER(dev_priv) == 4)
+ else if (DISPLAY_VER(display) == 4)
plane->min_alignment = i965_plane_min_alignment;
else
plane->min_alignment = i9xx_plane_min_alignment;
- if (IS_I830(dev_priv) || IS_I845G(dev_priv)) {
+ /* FIXME undocumented for VLV/CHV so not sure what's actually needed */
+ if (intel_scanout_needs_vtd_wa(display))
+ plane->vtd_guard = 128;
+
+ if (display->platform.i830 || display->platform.i845g) {
plane->update_arm = i830_plane_update_arm;
} else {
plane->update_noarm = i9xx_plane_update_noarm;
@@ -955,36 +1010,49 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- plane->async_flip = vlv_primary_async_flip;
- plane->enable_flip_done = vlv_primary_enable_flip_done;
- plane->disable_flip_done = vlv_primary_disable_flip_done;
- } else if (IS_BROADWELL(dev_priv)) {
- plane->need_async_flip_toggle_wa = true;
- plane->async_flip = g4x_primary_async_flip;
- plane->enable_flip_done = bdw_primary_enable_flip_done;
- plane->disable_flip_done = bdw_primary_disable_flip_done;
- } else if (DISPLAY_VER(dev_priv) >= 7) {
- plane->async_flip = g4x_primary_async_flip;
- plane->enable_flip_done = ivb_primary_enable_flip_done;
- plane->disable_flip_done = ivb_primary_disable_flip_done;
- } else if (DISPLAY_VER(dev_priv) >= 5) {
- plane->async_flip = g4x_primary_async_flip;
- plane->enable_flip_done = ilk_primary_enable_flip_done;
- plane->disable_flip_done = ilk_primary_disable_flip_done;
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
+ plane->capture_error = g4x_primary_capture_error;
+ else if (DISPLAY_VER(display) >= 4)
+ plane->capture_error = i965_plane_capture_error;
+ else
+ plane->capture_error = i8xx_plane_capture_error;
+
+ if (HAS_ASYNC_FLIPS(display)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
+ plane->async_flip = vlv_primary_async_flip;
+ plane->enable_flip_done = vlv_primary_enable_flip_done;
+ plane->disable_flip_done = vlv_primary_disable_flip_done;
+ plane->can_async_flip = i9xx_plane_can_async_flip;
+ } else if (display->platform.broadwell) {
+ plane->need_async_flip_toggle_wa = true;
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = bdw_primary_enable_flip_done;
+ plane->disable_flip_done = bdw_primary_disable_flip_done;
+ plane->can_async_flip = i9xx_plane_can_async_flip;
+ } else if (DISPLAY_VER(display) >= 7) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ivb_primary_enable_flip_done;
+ plane->disable_flip_done = ivb_primary_disable_flip_done;
+ plane->can_async_flip = i9xx_plane_can_async_flip;
+ } else if (DISPLAY_VER(display) >= 5) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ilk_primary_enable_flip_done;
+ plane->disable_flip_done = ilk_primary_disable_flip_done;
+ plane->can_async_flip = i9xx_plane_can_async_flip;
+ }
}
- modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X);
+ modifiers = intel_fb_plane_get_modifiers(display, INTEL_PLANE_CAP_TILING_X);
- if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
+ ret = drm_universal_plane_init(display->drm, &plane->base,
0, plane_funcs,
formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ ret = drm_universal_plane_init(display->drm, &plane->base,
0, plane_funcs,
formats, num_formats,
modifiers,
@@ -997,18 +1065,18 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
if (ret)
goto fail;
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ if (display->platform.cherryview && pipe == PIPE_B) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X;
- } else if (DISPLAY_VER(dev_priv) >= 4) {
+ } else if (DISPLAY_VER(display) >= 4) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
} else {
supported_rotations = DRM_MODE_ROTATE_0;
}
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
drm_plane_create_rotation_property(&plane->base,
DRM_MODE_ROTATE_0,
supported_rotations);
@@ -1063,8 +1131,7 @@ void
i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
enum pipe pipe;
@@ -1077,21 +1144,21 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
if (!plane->get_hw_state(plane, &pipe))
return;
- drm_WARN_ON(dev, pipe != crtc->pipe);
+ drm_WARN_ON(display->drm, pipe != crtc->pipe);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
- drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
+ drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
}
fb = &intel_fb->base;
- fb->dev = dev;
+ fb->dev = display->drm;
- val = intel_de_read(dev_priv, DSPCNTR(dev_priv, i9xx_plane));
+ val = intel_de_read(display, DSPCNTR(display, i9xx_plane));
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
if (val & DISP_TILED) {
plane_config->tiling = I915_TILING_X;
fb->modifier = I915_FORMAT_MOD_X_TILED;
@@ -1101,50 +1168,51 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->rotation = DRM_MODE_ROTATE_180;
}
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
- val & DISP_MIRROR)
+ if (display->platform.cherryview &&
+ pipe == PIPE_B && val & DISP_MIRROR)
plane_config->rotation |= DRM_MODE_REFLECT_X;
pixel_format = val & DISP_FORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
fb->format = drm_format_info(fourcc);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- offset = intel_de_read(dev_priv,
- DSPOFFSET(dev_priv, i9xx_plane));
- base = intel_de_read(dev_priv, DSPSURF(dev_priv, i9xx_plane)) & DISP_ADDR_MASK;
- } else if (DISPLAY_VER(dev_priv) >= 4) {
+ if (display->platform.haswell || display->platform.broadwell) {
+ offset = intel_de_read(display,
+ DSPOFFSET(display, i9xx_plane));
+ base = intel_de_read(display, DSPSURF(display, i9xx_plane)) & DISP_ADDR_MASK;
+ } else if (DISPLAY_VER(display) >= 4) {
if (plane_config->tiling)
- offset = intel_de_read(dev_priv,
- DSPTILEOFF(dev_priv, i9xx_plane));
+ offset = intel_de_read(display,
+ DSPTILEOFF(display, i9xx_plane));
else
- offset = intel_de_read(dev_priv,
- DSPLINOFF(dev_priv, i9xx_plane));
- base = intel_de_read(dev_priv, DSPSURF(dev_priv, i9xx_plane)) & DISP_ADDR_MASK;
+ offset = intel_de_read(display,
+ DSPLINOFF(display, i9xx_plane));
+ base = intel_de_read(display, DSPSURF(display, i9xx_plane)) & DISP_ADDR_MASK;
} else {
offset = 0;
- base = intel_de_read(dev_priv, DSPADDR(dev_priv, i9xx_plane));
+ base = intel_de_read(display, DSPADDR(display, i9xx_plane));
}
plane_config->base = base;
- drm_WARN_ON(&dev_priv->drm, offset != 0);
+ drm_WARN_ON(display->drm, offset != 0);
- val = intel_de_read(dev_priv, PIPESRC(dev_priv, pipe));
+ val = intel_de_read(display, PIPESRC(display, pipe));
fb->width = REG_FIELD_GET(PIPESRC_WIDTH_MASK, val) + 1;
fb->height = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, val) + 1;
- val = intel_de_read(dev_priv, DSPSTRIDE(dev_priv, i9xx_plane));
+ val = intel_de_read(display, DSPSTRIDE(display, i9xx_plane));
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(fb, 0, fb->height);
plane_config->size = fb->pitches[0] * aligned_height;
- drm_dbg_kms(&dev_priv->drm,
- "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- crtc->base.name, plane->base.name, fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s][PLANE:%d:%s] with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ plane->base.base.id, plane->base.name,
+ fb->width, fb->height, fb->format->cpp[0] * 8,
+ base, fb->pitches[0], plane_config->size);
plane_config->fb = intel_fb;
}
@@ -1152,7 +1220,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -1171,10 +1239,10 @@ bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
if (plane_config->base == base)
return false;
- if (DISPLAY_VER(dev_priv) >= 4)
- intel_de_write(dev_priv, DSPSURF(dev_priv, i9xx_plane), base);
+ if (DISPLAY_VER(display) >= 4)
+ intel_de_write(display, DSPSURF(display, i9xx_plane), base);
else
- intel_de_write(dev_priv, DSPADDR(dev_priv, i9xx_plane), base);
+ intel_de_write(display, DSPADDR(display, i9xx_plane), base);
return true;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
index 0ca12d1e6839..d90546d60855 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.h
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -9,8 +9,9 @@
#include <linux/types.h>
enum pipe;
-struct drm_i915_private;
+struct drm_framebuffer;
struct intel_crtc;
+struct intel_display;
struct intel_initial_plane_config;
struct intel_plane;
struct intel_plane_state;
@@ -19,10 +20,13 @@ struct intel_plane_state;
unsigned int i965_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
+unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int colot_plane);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
struct intel_plane *
-intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
+intel_primary_plane_create(struct intel_display *display, enum pipe pipe);
void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config);
@@ -40,7 +44,7 @@ static inline int i9xx_check_plane_surface(struct intel_plane_state *plane_state
return 0;
}
static inline struct intel_plane *
-intel_primary_plane_create(struct drm_i915_private *dev_priv, int pipe)
+intel_primary_plane_create(struct intel_display *display, int pipe)
{
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index db78c1e6b0a3..7c80e37c1c5f 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -446,7 +446,7 @@ static const struct intel_watermark_params i845_wm_info = {
* @latency: Memory wakeup latency in 0.1us units
*
* Compute the watermark using the method 1 or "small buffer"
- * formula. The caller may additonally add extra cachelines
+ * formula. The caller may additionally add extra cachelines
* to account for TLB misses and clock crossings.
*
* This method is concerned with the short term drain rate
@@ -493,7 +493,7 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate,
* @latency: Memory wakeup latency in 0.1us units
*
* Compute the watermark using the method 2 or "large buffer"
- * formula. The caller may additonally add extra cachelines
+ * formula. The caller may additionally add extra cachelines
* to account for TLB misses and clock crossings.
*
* This method is concerned with the long term drain rate
@@ -1562,7 +1562,7 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
/*
* When enabling sprite0 after sprite1 has already been enabled
* we tend to get an underrun unless sprite0 already has some
- * FIFO space allcoated. Hence we always allocate at least one
+ * FIFO space allocated. Hence we always allocate at least one
* cacheline for sprite0 whenever sprite1 is enabled.
*
* All other plane enable sequences appear immune to this problem.
@@ -3902,12 +3902,6 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
-static void g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
-{
- g4x_wm_get_hw_state(i915);
- g4x_wm_sanitize(i915);
-}
-
static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
@@ -4055,12 +4049,6 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
-static void vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
-{
- vlv_wm_get_hw_state(i915);
- vlv_wm_sanitize(i915);
-}
-
/*
* FIXME should probably kill this and improve
* the real watermark readout/sanitation instead
@@ -4122,14 +4110,16 @@ static const struct intel_wm_funcs vlv_wm_funcs = {
.initial_watermarks = vlv_initial_watermarks,
.optimize_watermarks = vlv_optimize_watermarks,
.atomic_update_watermarks = vlv_atomic_update_fifo,
- .get_hw_state = vlv_wm_get_hw_state_and_sanitize,
+ .get_hw_state = vlv_wm_get_hw_state,
+ .sanitize = vlv_wm_sanitize,
};
static const struct intel_wm_funcs g4x_wm_funcs = {
.compute_watermarks = g4x_compute_watermarks,
.initial_watermarks = g4x_initial_watermarks,
.optimize_watermarks = g4x_optimize_watermarks,
- .get_hw_state = g4x_wm_get_hw_state_and_sanitize,
+ .get_hw_state = g4x_wm_get_hw_state,
+ .sanitize = g4x_wm_sanitize,
};
static const struct intel_wm_funcs pnv_wm_funcs = {
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 82bf6c654de2..402b7b2e1829 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -31,8 +31,8 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "icl_dsi.h"
#include "icl_dsi_regs.h"
#include "intel_atomic.h"
@@ -243,7 +243,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
for_each_dsi_phy(phy, intel_dsi->phys) {
/*
* Program voltage swing and pre-emphasis level values as per
- * table in BSPEC under DDI buffer programing
+ * table in BSPEC under DDI buffer programming.
*/
mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK;
val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE |
@@ -345,7 +345,6 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
int afe_clk_khz;
@@ -354,7 +353,7 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
afe_clk_khz = afe_clk(encoder, crtc_state);
- if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) {
+ if (display->platform.alderlake_s || display->platform.alderlake_p) {
theo_word_clk = DIV_ROUND_UP(afe_clk_khz, 8 * DSI_MAX_ESC_CLK);
act_word_clk = max(3, theo_word_clk + (theo_word_clk + 1) % 2);
esc_clk_div_m = act_word_clk * 8;
@@ -375,7 +374,7 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
intel_de_posting_read(display, ICL_DPHY_ESC_CLK_DIV(port));
}
- if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) {
+ if (display->platform.alderlake_s || display->platform.alderlake_p) {
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_write(display, ADL_MIPIO_DW(port, 8),
esc_clk_div_m_phy & TX_ESC_CLK_DIV_PHY);
@@ -387,13 +386,12 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
static void get_dsi_io_power_domains(struct intel_dsi *intel_dsi)
{
struct intel_display *display = to_intel_display(&intel_dsi->base);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
drm_WARN_ON(display->drm, intel_dsi->io_wakeref[port]);
intel_dsi->io_wakeref[port] =
- intel_display_power_get(dev_priv,
+ intel_display_power_get(display,
port == PORT_A ?
POWER_DOMAIN_PORT_DDI_IO_A :
POWER_DOMAIN_PORT_DDI_IO_B);
@@ -415,19 +413,18 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
for_each_dsi_phy(phy, intel_dsi->phys)
- intel_combo_phy_power_up_lanes(dev_priv, phy, true,
+ intel_combo_phy_power_up_lanes(display, phy, true,
intel_dsi->lane_count, false);
}
static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
u32 tmp;
@@ -452,7 +449,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
intel_de_write(display, ICL_PORT_TX_DW2_GRP(phy), tmp);
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
- if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv) ||
+ if (display->platform.jasperlake || display->platform.elkhartlake ||
(DISPLAY_VER(display) >= 12)) {
intel_de_rmw(display, ICL_PORT_PCS_DW1_AUX(phy),
LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0));
@@ -534,7 +531,6 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum phy phy;
@@ -564,7 +560,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
}
}
- if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
+ if (display->platform.jasperlake || display->platform.elkhartlake) {
for_each_dsi_phy(phy, intel_dsi->phys)
intel_de_rmw(display, ICL_DPHY_CHKN(phy),
0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP);
@@ -961,7 +957,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
/*
- * FIXME: Programing this by assuming progressive mode, since
+ * FIXME: Programming this by assuming progressive mode, since
* non-interlaced info from VBT is not saved inside
* struct drm_display_mode.
* For interlace mode: program required pixel minus 2
@@ -1385,7 +1381,6 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -1393,7 +1388,7 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
intel_wakeref_t wakeref;
wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
- intel_display_power_put(dev_priv,
+ intel_display_power_put(display,
port == PORT_A ?
POWER_DOMAIN_PORT_DDI_IO_A :
POWER_DOMAIN_PORT_DDI_IO_B,
@@ -1460,12 +1455,12 @@ static void gen11_dsi_post_disable(struct intel_atomic_state *state,
}
static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
enum drm_mode_status status;
- status = intel_cpu_transcoder_mode_valid(i915, mode);
+ status = intel_cpu_transcoder_mode_valid(display, mode);
if (status != MODE_OK)
return status;
@@ -1652,7 +1647,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = intel_panel_fitting(pipe_config, conn_state);
+ ret = intel_pfit_compute_config(pipe_config, conn_state);
if (ret)
return ret;
@@ -1697,7 +1692,6 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum transcoder dsi_trans;
intel_wakeref_t wakeref;
@@ -1705,7 +1699,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
bool ret = false;
u32 tmp;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
encoder->power_domain);
if (!wakeref)
return false;
@@ -1736,7 +1730,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
ret = tmp & TRANSCONF_ENABLE;
}
out:
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index bbf8c5a8fdbd..1addd6288241 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -9,6 +9,8 @@
#include <linux/acpi.h>
#include <acpi/video.h>
+#include <drm/drm_print.h>
+
#include "i915_utils.h"
#include "intel_acpi.h"
#include "intel_display_core.h"
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index e506f6a87344..a5a7e2906ba8 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -14,7 +14,6 @@ struct drm_connector_state;
struct drm_crtc;
struct drm_crtc_state;
struct drm_device;
-struct drm_i915_private;
struct drm_property;
struct intel_atomic_state;
struct intel_connector;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 612e9b0ec14a..7276179df878 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -36,12 +36,15 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
-#include "i915_drv.h"
+#include "gem/i915_gem_object.h"
#include "i915_config.h"
+#include "i915_scheduler_types.h"
+#include "i915_vma.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
@@ -52,6 +55,7 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "skl_scaler.h"
+#include "skl_universal_plane.h"
#include "skl_watermark.h"
static void intel_plane_state_reset(struct intel_plane_state *plane_state,
@@ -93,6 +97,19 @@ void intel_plane_free(struct intel_plane *plane)
}
/**
+ * intel_plane_destroy - destroy a plane
+ * @plane: plane to destroy
+ *
+ * Common destruction function for all types of planes (primary, cursor,
+ * sprite).
+ */
+void intel_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_cleanup(plane);
+ kfree(to_intel_plane(plane));
+}
+
+/**
* intel_plane_duplicate_state - duplicate plane state
* @plane: drm plane
*
@@ -117,6 +134,7 @@ intel_plane_duplicate_state(struct drm_plane *plane)
intel_state->ggtt_vma = NULL;
intel_state->dpt_vma = NULL;
intel_state->flags = 0;
+ intel_state->damage = DRM_RECT_INIT(0, 0, 0, 0);
/* add reference to fb */
if (intel_state->hw.fb)
@@ -150,10 +168,15 @@ intel_plane_destroy_state(struct drm_plane *plane,
bool intel_plane_needs_physical(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
return plane->id == PLANE_CURSOR &&
- DISPLAY_INFO(i915)->cursor_needs_physical;
+ DISPLAY_INFO(display)->cursor_needs_physical;
+}
+
+bool intel_plane_can_async_flip(struct intel_plane *plane, u64 modifier)
+{
+ return plane->can_async_flip && plane->can_async_flip(modifier);
}
unsigned int intel_adjusted_rate(const struct drm_rect *src,
@@ -253,7 +276,7 @@ int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
struct intel_plane *plane,
bool *need_cdclk_calc)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
@@ -298,7 +321,7 @@ int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
cdclk_state->min_cdclk[crtc->pipe])
return 0;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
plane->base.base.id, plane->base.name,
new_crtc_state->min_cdclk[plane->id],
@@ -317,6 +340,25 @@ static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
memset(&plane_state->hw, 0, sizeof(plane_state->hw));
}
+static void
+intel_plane_copy_uapi_plane_damage(struct intel_plane_state *new_plane_state,
+ const struct intel_plane_state *old_uapi_plane_state,
+ const struct intel_plane_state *new_uapi_plane_state)
+{
+ struct intel_display *display = to_intel_display(new_plane_state);
+ struct drm_rect *damage = &new_plane_state->damage;
+
+ /* damage property tracking enabled from display version 12 onwards */
+ if (DISPLAY_VER(display) < 12)
+ return;
+
+ if (!drm_atomic_helper_damage_merged(&old_uapi_plane_state->uapi,
+ &new_uapi_plane_state->uapi,
+ damage))
+ /* Incase helper fails, mark whole plane region as damage */
+ *damage = drm_plane_state_src(&new_uapi_plane_state->uapi);
+}
+
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
const struct intel_plane_state *from_plane_state,
struct intel_crtc *crtc)
@@ -392,7 +434,7 @@ static bool intel_plane_do_async_flip(struct intel_plane *plane,
const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
if (!plane->async_flip)
return false;
@@ -413,7 +455,7 @@ static bool intel_plane_do_async_flip(struct intel_plane *plane,
* extend this so other scanout parameters (stride/etc) could
* be changed as well...
*/
- return DISPLAY_VER(i915) < 9 || old_crtc_state->uapi.async_flip;
+ return DISPLAY_VER(display) < 9 || old_crtc_state->uapi.async_flip;
}
static bool i9xx_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state,
@@ -517,16 +559,16 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state)
{
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
bool was_crtc_enabled = old_crtc_state->hw.active;
bool is_crtc_enabled = new_crtc_state->hw.active;
bool turn_off, turn_on, visible, was_visible;
int ret;
- if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
+ if (DISPLAY_VER(display) >= 9 && plane->id != PLANE_CURSOR) {
ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
if (ret)
return ret;
@@ -535,7 +577,7 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
was_visible = old_plane_state->uapi.visible;
visible = new_plane_state->uapi.visible;
- if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
+ if (!was_crtc_enabled && drm_WARN_ON(display->drm, was_visible))
was_visible = false;
/*
@@ -559,7 +601,7 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
- drm_dbg_atomic(&dev_priv->drm,
+ drm_dbg_atomic(display->drm,
"[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
crtc->base.base.id, crtc->base.name,
plane->base.base.id, plane->base.name,
@@ -569,11 +611,11 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
if (visible || was_visible)
new_crtc_state->fb_bits |= plane->frontbuffer_bit;
- if (HAS_GMCH(dev_priv) &&
+ if (HAS_GMCH(display) &&
i9xx_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state))
new_crtc_state->disable_cxsr = true;
- if ((IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) &&
+ if ((display->platform.ironlake || display->platform.sandybridge || display->platform.ivybridge) &&
ilk_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state))
new_crtc_state->disable_cxsr = true;
@@ -663,13 +705,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
old_plane_state, new_plane_state);
}
-static struct intel_plane *
+struct intel_plane *
intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
if (plane->id == plane_id)
return plane;
}
@@ -686,6 +728,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
const struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
const struct intel_plane_state *new_primary_crtc_plane_state;
+ const struct intel_plane_state *old_primary_crtc_plane_state;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -700,10 +743,17 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
new_primary_crtc_plane_state =
intel_atomic_get_new_plane_state(state, primary_crtc_plane);
+ old_primary_crtc_plane_state =
+ intel_atomic_get_old_plane_state(state, primary_crtc_plane);
} else {
new_primary_crtc_plane_state = new_plane_state;
+ old_primary_crtc_plane_state = old_plane_state;
}
+ intel_plane_copy_uapi_plane_damage(new_plane_state,
+ old_primary_crtc_plane_state,
+ new_primary_crtc_plane_state);
+
intel_plane_copy_uapi_to_hw_state(new_plane_state,
new_primary_crtc_plane_state,
crtc);
@@ -767,7 +817,10 @@ void intel_plane_update_noarm(struct intel_dsb *dsb,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- trace_intel_plane_update_noarm(plane, crtc);
+ trace_intel_plane_update_noarm(plane_state, crtc);
+
+ if (plane->fbc)
+ intel_fbc_dirty_rect_update_noarm(dsb, plane);
if (plane->update_noarm)
plane->update_noarm(dsb, plane, crtc_state, plane_state);
@@ -797,7 +850,7 @@ void intel_plane_update_arm(struct intel_dsb *dsb,
return;
}
- trace_intel_plane_update_arm(plane, crtc);
+ trace_intel_plane_update_arm(plane_state, crtc);
plane->update_arm(dsb, plane, crtc_state, plane_state);
}
@@ -836,7 +889,7 @@ void intel_crtc_planes_update_noarm(struct intel_dsb *dsb,
/* TODO: for mailbox updates this should be skipped */
if (new_plane_state->uapi.visible ||
- new_plane_state->planar_slave)
+ new_plane_state->is_y_plane)
intel_plane_update_noarm(dsb, plane,
new_crtc_state, new_plane_state);
}
@@ -869,7 +922,7 @@ static void skl_crtc_planes_update_arm(struct intel_dsb *dsb,
* would have to be called here as well.
*/
if (new_plane_state->uapi.visible ||
- new_plane_state->planar_slave)
+ new_plane_state->is_y_plane)
intel_plane_update_arm(dsb, plane, new_crtc_state, new_plane_state);
else
intel_plane_disable_arm(dsb, plane, new_crtc_state);
@@ -907,9 +960,9 @@ void intel_crtc_planes_update_arm(struct intel_dsb *dsb,
struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_crtc_planes_update_arm(dsb, state, crtc);
else
i9xx_crtc_planes_update_arm(dsb, state, crtc);
@@ -920,7 +973,8 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
int min_scale, int max_scale,
bool can_position)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_rect *src = &plane_state->uapi.src;
struct drm_rect *dst = &plane_state->uapi.dst;
@@ -939,9 +993,10 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
if (hscale < 0 || vscale < 0) {
- drm_dbg_kms(&i915->drm, "Invalid scaling of plane\n");
- drm_rect_debug_print("src: ", src, true);
- drm_rect_debug_print("dst: ", dst, false);
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] invalid scaling "DRM_RECT_FP_FMT " -> " DRM_RECT_FMT "\n",
+ plane->base.base.id, plane->base.name,
+ DRM_RECT_FP_ARG(src), DRM_RECT_ARG(dst));
return -ERANGE;
}
@@ -955,9 +1010,10 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
if (!can_position && plane_state->uapi.visible &&
!drm_rect_equals(dst, clip)) {
- drm_dbg_kms(&i915->drm, "Plane must cover entire CRTC\n");
- drm_rect_debug_print("dst: ", dst, false);
- drm_rect_debug_print("clip: ", clip, false);
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] plane (" DRM_RECT_FMT ") must cover entire CRTC (" DRM_RECT_FMT ")\n",
+ plane->base.base.id, plane->base.name,
+ DRM_RECT_ARG(dst), DRM_RECT_ARG(clip));
return -EINVAL;
}
@@ -969,7 +1025,8 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_rect *src = &plane_state->uapi.src;
u32 src_x, src_y, src_w, src_h, hsub, vsub;
@@ -1002,18 +1059,18 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
hsub = 2;
vsub = 2;
- } else if (DISPLAY_VER(i915) >= 20 &&
+ } else if (DISPLAY_VER(display) >= 20 &&
intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
/*
* This allows NV12 and P0xx formats to have odd size and/or odd
- * source coordinates on DISPLAY_VER(i915) >= 20
+ * source coordinates on DISPLAY_VER(display) >= 20
*/
hsub = 1;
vsub = 1;
/* Wa_16023981245 */
- if ((DISPLAY_VERx100(i915) == 2000 ||
- DISPLAY_VERx100(i915) == 3000) &&
+ if ((DISPLAY_VERx100(display) == 2000 ||
+ DISPLAY_VERx100(display) == 3000) &&
src_x % 2 != 0)
hsub = 2;
} else {
@@ -1025,13 +1082,17 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
hsub = vsub = max(hsub, vsub);
if (src_x % hsub || src_w % hsub) {
- drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ plane->base.base.id, plane->base.name,
src_x, src_w, hsub, str_yes_no(rotated));
return -EINVAL;
}
if (src_y % vsub || src_h % vsub) {
- drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ plane->base.base.id, plane->base.name,
src_y, src_h, vsub, str_yes_no(rotated));
return -EINVAL;
}
@@ -1092,11 +1153,11 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
{
struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_display *display = to_intel_display(plane);
struct intel_plane_state *new_plane_state =
to_intel_plane_state(_new_plane_state);
struct intel_atomic_state *state =
to_intel_atomic_state(new_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
struct drm_gem_object *obj = intel_fb_bo(new_plane_state->hw.fb);
@@ -1119,7 +1180,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
ret = add_dma_resv_fences(old_obj->resv,
&new_plane_state->uapi);
if (ret < 0)
@@ -1130,7 +1191,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (!obj)
return 0;
- ret = intel_plane_pin_fb(new_plane_state);
+ ret = intel_plane_pin_fb(new_plane_state, old_plane_state);
if (ret)
return ret;
@@ -1154,7 +1215,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* that are not quite steady state without resorting to forcing
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
- intel_display_rps_mark_interactive(dev_priv, state, true);
+ intel_display_rps_mark_interactive(display, state, true);
return 0;
@@ -1175,17 +1236,17 @@ static void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *_old_plane_state)
{
+ struct intel_display *display = to_intel_display(plane->dev);
struct intel_plane_state *old_plane_state =
to_intel_plane_state(_old_plane_state);
struct intel_atomic_state *state =
to_intel_atomic_state(old_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_gem_object *obj = intel_fb_bo(old_plane_state->hw.fb);
if (!obj)
return;
- intel_display_rps_mark_interactive(dev_priv, state, false);
+ intel_display_rps_mark_interactive(display, state, false);
intel_plane_unpin_fb(old_plane_state);
}
@@ -1210,3 +1271,302 @@ void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_sta
drm_vblank_work_init(&old_plane_state->unpin_work, old_plane_state->uapi.crtc,
intel_cursor_unpin_work);
}
+
+static void link_nv12_planes(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *uv_plane_state,
+ struct intel_plane_state *y_plane_state)
+{
+ struct intel_display *display = to_intel_display(uv_plane_state);
+ struct intel_plane *uv_plane = to_intel_plane(uv_plane_state->uapi.plane);
+ struct intel_plane *y_plane = to_intel_plane(y_plane_state->uapi.plane);
+
+ drm_dbg_kms(display->drm, "UV plane [PLANE:%d:%s] using Y plane [PLANE:%d:%s]\n",
+ uv_plane->base.base.id, uv_plane->base.name,
+ y_plane->base.base.id, y_plane->base.name);
+
+ uv_plane_state->planar_linked_plane = y_plane;
+
+ y_plane_state->is_y_plane = true;
+ y_plane_state->planar_linked_plane = uv_plane;
+
+ crtc_state->enabled_planes |= BIT(y_plane->id);
+ crtc_state->active_planes |= BIT(y_plane->id);
+ crtc_state->update_planes |= BIT(y_plane->id);
+
+ crtc_state->data_rate[y_plane->id] = crtc_state->data_rate_y[uv_plane->id];
+ crtc_state->rel_data_rate[y_plane->id] = crtc_state->rel_data_rate_y[uv_plane->id];
+
+ /* Copy parameters to Y plane */
+ intel_plane_copy_hw_state(y_plane_state, uv_plane_state);
+ y_plane_state->uapi.src = uv_plane_state->uapi.src;
+ y_plane_state->uapi.dst = uv_plane_state->uapi.dst;
+
+ y_plane_state->ctl = uv_plane_state->ctl;
+ y_plane_state->color_ctl = uv_plane_state->color_ctl;
+ y_plane_state->view = uv_plane_state->view;
+ y_plane_state->decrypt = uv_plane_state->decrypt;
+
+ icl_link_nv12_planes(uv_plane_state, y_plane_state);
+}
+
+static void unlink_nv12_plane(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+
+ plane_state->planar_linked_plane = NULL;
+
+ if (!plane_state->is_y_plane)
+ return;
+
+ drm_WARN_ON(display->drm, plane_state->uapi.visible);
+
+ plane_state->is_y_plane = false;
+
+ crtc_state->enabled_planes &= ~BIT(plane->id);
+ crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->update_planes |= BIT(plane->id);
+ crtc_state->data_rate[plane->id] = 0;
+ crtc_state->rel_data_rate[plane->id] = 0;
+}
+
+static int icl_check_nv12_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ if (DISPLAY_VER(display) < 11)
+ return 0;
+
+ /*
+ * Destroy all old plane links and make the Y plane invisible
+ * in the crtc_state->active_planes mask.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->pipe != crtc->pipe)
+ continue;
+
+ if (plane_state->planar_linked_plane)
+ unlink_nv12_plane(crtc_state, plane_state);
+ }
+
+ if (!crtc_state->nv12_planes)
+ return 0;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_plane_state *y_plane_state = NULL;
+ struct intel_plane *y_plane;
+
+ if (plane->pipe != crtc->pipe)
+ continue;
+
+ if ((crtc_state->nv12_planes & BIT(plane->id)) == 0)
+ continue;
+
+ for_each_intel_plane_on_crtc(display->drm, crtc, y_plane) {
+ if (!icl_is_nv12_y_plane(display, y_plane->id))
+ continue;
+
+ if (crtc_state->active_planes & BIT(y_plane->id))
+ continue;
+
+ y_plane_state = intel_atomic_get_plane_state(state, y_plane);
+ if (IS_ERR(y_plane_state))
+ return PTR_ERR(y_plane_state);
+
+ break;
+ }
+
+ if (!y_plane_state) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] need %d free Y planes for planar YUV\n",
+ crtc->base.base.id, crtc->base.name,
+ hweight8(crtc_state->nv12_planes));
+ return -EINVAL;
+ }
+
+ link_nv12_planes(crtc_state, plane_state, y_plane_state);
+ }
+
+ return 0;
+}
+
+static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ u8 plane_ids_mask)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+
+ if ((plane_ids_mask & BIT(plane->id)) == 0)
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
+}
+
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ return intel_crtc_add_planes_to_state(state, crtc,
+ old_crtc_state->enabled_planes |
+ new_crtc_state->enabled_planes);
+}
+
+static bool active_planes_affects_min_cdclk(struct intel_display *display)
+{
+ /* See {hsw,vlv,ivb}_plane_ratio() */
+ return display->platform.broadwell || display->platform.haswell ||
+ display->platform.cherryview || display->platform.valleyview ||
+ display->platform.ivybridge;
+}
+
+static u8 intel_joiner_affected_planes(struct intel_atomic_state *state,
+ u8 joined_pipes)
+{
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ u8 affected_planes = 0;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_plane *linked = plane_state->planar_linked_plane;
+
+ if ((joined_pipes & BIT(plane->pipe)) == 0)
+ continue;
+
+ affected_planes |= BIT(plane->id);
+ if (linked)
+ affected_planes |= BIT(linked->id);
+ }
+
+ return affected_planes;
+}
+
+static int intel_joiner_add_affected_planes(struct intel_atomic_state *state,
+ u8 joined_pipes)
+{
+ u8 prev_affected_planes, affected_planes = 0;
+
+ /*
+ * We want all the joined pipes to have the same
+ * set of planes in the atomic state, to make sure
+ * state copying always works correctly, and the
+ * UV<->Y plane linkage is always up to date.
+ * Keep pulling planes in until we've determined
+ * the full set of affected planes. A bit complicated
+ * on account of each pipe being capable of selecting
+ * their own Y planes independently of the other pipes,
+ * and the selection being done from the set of
+ * inactive planes.
+ */
+ do {
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc_in_pipe_mask(state->base.dev, crtc, joined_pipes) {
+ int ret;
+
+ ret = intel_crtc_add_planes_to_state(state, crtc, affected_planes);
+ if (ret)
+ return ret;
+ }
+
+ prev_affected_planes = affected_planes;
+ affected_planes = intel_joiner_affected_planes(state, joined_pipes);
+ } while (affected_planes != prev_affected_planes);
+
+ return 0;
+}
+
+static int intel_add_affected_planes(struct intel_atomic_state *state)
+{
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret;
+
+ ret = intel_joiner_add_affected_planes(state, intel_crtc_joined_pipe_mask(crtc_state));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_atomic_check_planes(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_plane_state __maybe_unused *plane_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+ int i, ret;
+
+ ret = intel_add_affected_planes(state);
+ if (ret)
+ return ret;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ ret = intel_plane_atomic_check(state, plane);
+ if (ret) {
+ drm_dbg_atomic(display->drm,
+ "[PLANE:%d:%s] atomic driver check failed\n",
+ plane->base.base.id, plane->base.name);
+ return ret;
+ }
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ u8 old_active_planes, new_active_planes;
+
+ ret = icl_check_nv12_planes(state, crtc);
+ if (ret)
+ return ret;
+
+ /*
+ * On some platforms the number of active planes affects
+ * the planes' minimum cdclk calculation. Add such planes
+ * to the state before we compute the minimum cdclk.
+ */
+ if (!active_planes_affects_min_cdclk(display))
+ continue;
+
+ old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+
+ if (hweight8(old_active_planes) == hweight8(new_active_planes))
+ continue;
+
+ ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state)
+{
+ return i915_ggtt_offset(plane_state->ggtt_vma);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 0f982f452ff3..6efac923dcbc 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -19,6 +19,9 @@ struct intel_plane;
struct intel_plane_state;
enum plane_id;
+struct intel_plane *
+intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id);
+bool intel_plane_can_async_flip(struct intel_plane *plane, u64 modifier);
unsigned int intel_adjusted_rate(const struct drm_rect *src,
const struct drm_rect *dst,
unsigned int rate);
@@ -51,6 +54,7 @@ void intel_plane_disable_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
struct intel_plane *intel_plane_alloc(void);
void intel_plane_free(struct intel_plane *plane);
+void intel_plane_destroy(struct drm_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
@@ -80,5 +84,10 @@ void intel_plane_helper_add(struct intel_plane *plane);
bool intel_plane_needs_physical(struct intel_plane *plane);
void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state);
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_atomic_check_planes(struct intel_atomic_state *state);
+
+u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ce8a4319a63c..ea935a5d94c8 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -188,15 +188,17 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
* WA_14020863754: Implement Audio Workaround
* Corner case with Min Hblank Fix can cause audio hang
*/
-static bool needs_wa_14020863754(struct drm_i915_private *i915)
+static bool needs_wa_14020863754(struct intel_display *display)
{
- return (DISPLAY_VER(i915) == 20 || IS_BATTLEMAGE(i915));
+ return DISPLAY_VERx100(display) == 3000 ||
+ DISPLAY_VERx100(display) == 2000 ||
+ DISPLAY_VERx100(display) == 1401;
}
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int i;
@@ -206,17 +208,17 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
break;
}
- if (DISPLAY_VER(i915) < 12 && adjusted_mode->crtc_clock > 148500)
+ if (DISPLAY_VER(display) < 12 && adjusted_mode->crtc_clock > 148500)
i = ARRAY_SIZE(hdmi_audio_clock);
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
adjusted_mode->crtc_clock);
i = 1;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Configuring HDMI audio for pixel clock %d (0x%08x)\n",
hdmi_audio_clock[i].clock,
hdmi_audio_clock[i].config);
@@ -251,11 +253,11 @@ static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
}
/* ELD buffer size in dwords */
-static int g4x_eld_buffer_size(struct drm_i915_private *i915)
+static int g4x_eld_buffer_size(struct intel_display *display)
{
u32 tmp;
- tmp = intel_de_read(i915, G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(display, G4X_AUD_CNTL_ST);
return REG_FIELD_GET(G4X_ELD_BUFFER_SIZE_MASK, tmp);
}
@@ -263,33 +265,33 @@ static int g4x_eld_buffer_size(struct drm_i915_private *i915)
static void g4x_audio_codec_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 *eld = (u32 *)crtc_state->eld;
int eld_buffer_size, len, i;
u32 tmp;
- tmp = intel_de_read(i915, G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(display, G4X_AUD_CNTL_ST);
if ((tmp & G4X_ELD_VALID) == 0)
return;
- intel_de_rmw(i915, G4X_AUD_CNTL_ST, G4X_ELD_ADDRESS_MASK, 0);
+ intel_de_rmw(display, G4X_AUD_CNTL_ST, G4X_ELD_ADDRESS_MASK, 0);
- eld_buffer_size = g4x_eld_buffer_size(i915);
+ eld_buffer_size = g4x_eld_buffer_size(display);
len = min_t(int, sizeof(crtc_state->eld) / 4, eld_buffer_size);
for (i = 0; i < len; i++)
- eld[i] = intel_de_read(i915, G4X_HDMIW_HDMIEDID);
+ eld[i] = intel_de_read(display, G4X_HDMIW_HDMIEDID);
}
static void g4x_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
/* Invalidate ELD */
- intel_de_rmw(i915, G4X_AUD_CNTL_ST,
+ intel_de_rmw(display, G4X_AUD_CNTL_ST,
G4X_ELD_VALID, 0);
intel_crtc_wait_for_next_vblank(crtc);
@@ -300,28 +302,28 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const u32 *eld = (const u32 *)crtc_state->eld;
int eld_buffer_size, len, i;
intel_crtc_wait_for_next_vblank(crtc);
- intel_de_rmw(i915, G4X_AUD_CNTL_ST,
+ intel_de_rmw(display, G4X_AUD_CNTL_ST,
G4X_ELD_VALID | G4X_ELD_ADDRESS_MASK, 0);
- eld_buffer_size = g4x_eld_buffer_size(i915);
+ eld_buffer_size = g4x_eld_buffer_size(display);
len = min(drm_eld_size(crtc_state->eld) / 4, eld_buffer_size);
for (i = 0; i < len; i++)
- intel_de_write(i915, G4X_HDMIW_HDMIEDID, eld[i]);
+ intel_de_write(display, G4X_HDMIW_HDMIEDID, eld[i]);
for (; i < eld_buffer_size; i++)
- intel_de_write(i915, G4X_HDMIW_HDMIEDID, 0);
+ intel_de_write(display, G4X_HDMIW_HDMIEDID, 0);
- drm_WARN_ON(&i915->drm,
- (intel_de_read(i915, G4X_AUD_CNTL_ST) & G4X_ELD_ADDRESS_MASK) != 0);
+ drm_WARN_ON(display->drm,
+ (intel_de_read(display, G4X_AUD_CNTL_ST) & G4X_ELD_ADDRESS_MASK) != 0);
- intel_de_rmw(i915, G4X_AUD_CNTL_ST,
+ intel_de_rmw(display, G4X_AUD_CNTL_ST,
0, G4X_ELD_VALID);
}
@@ -329,11 +331,11 @@ static void
hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/* Enable time stamps. Let HW calculate Maud/Naud values */
- intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder),
+ intel_de_rmw(display, HSW_AUD_CFG(cpu_transcoder),
AUD_CONFIG_N_VALUE_INDEX |
AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK |
AUD_CONFIG_UPPER_N_MASK |
@@ -347,8 +349,8 @@ static void
hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = i915->display.audio.component;
+ struct intel_display *display = to_intel_display(encoder);
+ struct i915_audio_component *acomp = display->audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
int n, rate;
@@ -356,7 +358,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
rate = acomp ? acomp->aud_sample_rate[port] : 0;
- tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(display, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -364,25 +366,25 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
n = audio_config_hdmi_get_n(crtc_state, rate);
if (n != 0) {
- drm_dbg_kms(&i915->drm, "using N %d\n", n);
+ drm_dbg_kms(display->drm, "using N %d\n", n);
tmp &= ~AUD_CONFIG_N_MASK;
tmp |= AUD_CONFIG_N(n);
tmp |= AUD_CONFIG_N_PROG_ENABLE;
} else {
- drm_dbg_kms(&i915->drm, "using automatic N\n");
+ drm_dbg_kms(display->drm, "using automatic N\n");
}
- intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(display, HSW_AUD_CFG(cpu_transcoder), tmp);
/*
* Let's disable "Enable CTS or M Prog bit"
* and let HW calculate the value
*/
- tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+ tmp = intel_de_read(display, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
- intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+ intel_de_write(display, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -399,14 +401,14 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
/* Disable timestamps */
- intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder),
+ intel_de_rmw(display, HSW_AUD_CFG(cpu_transcoder),
AUD_CONFIG_N_VALUE_INDEX |
AUD_CONFIG_UPPER_N_MASK |
AUD_CONFIG_LOWER_N_MASK,
@@ -415,26 +417,26 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
AUD_CONFIG_N_VALUE_INDEX : 0));
/* Invalidate ELD */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_ELD_VALID(cpu_transcoder), 0);
intel_crtc_wait_for_next_vblank(crtc);
intel_crtc_wait_for_next_vblank(crtc);
/* Disable audio presence detect */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_OUTPUT_ENABLE(cpu_transcoder), 0);
- if (needs_wa_14020863754(i915))
- intel_de_rmw(i915, AUD_CHICKENBIT_REG3, DACBE_DISABLE_MIN_HBLANK_FIX, 0);
+ if (needs_wa_14020863754(display))
+ intel_de_rmw(display, AUD_CHICKENBIT_REG3, DACBE_DISABLE_MIN_HBLANK_FIX, 0);
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
}
static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
unsigned int link_clks_available, link_clks_required;
unsigned int tu_data, tu_line, link_clks_active;
unsigned int h_active, h_total, hblank_delta, pixel_clk;
@@ -446,13 +448,13 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
vdsc_bppx16 = crtc_state->dsc.compressed_bpp_x16;
- cdclk = i915->display.cdclk.hw.cdclk;
+ cdclk = display->cdclk.hw.cdclk;
/* fec= 0.972261, using rounding multiplier of 1000000 */
fec_coeff = 972261;
link_clk = crtc_state->port_clock;
lanes = crtc_state->lane_count;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " FXP_Q4_FMT " cdclk = %u\n",
h_active, link_clk, lanes, FXP_Q4_ARGS(vdsc_bppx16), cdclk);
@@ -497,19 +499,19 @@ static unsigned int calc_samples_room(const struct intel_crtc_state *crtc_state)
static void enable_audio_dsc_wa(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
unsigned int hblank_early_prog, samples_room;
unsigned int val;
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return;
- val = intel_de_read(i915, AUD_CONFIG_BE);
+ val = intel_de_read(display, AUD_CONFIG_BE);
- if (DISPLAY_VER(i915) == 11)
+ if (DISPLAY_VER(display) == 11)
val |= HBLANK_EARLY_ENABLE_ICL(cpu_transcoder);
- else if (DISPLAY_VER(i915) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
val |= HBLANK_EARLY_ENABLE_TGL(cpu_transcoder);
if (crtc_state->dsc.compression_enable &&
@@ -536,56 +538,58 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, 0x0);
}
- intel_de_write(i915, AUD_CONFIG_BE, val);
+ intel_de_write(display, AUD_CONFIG_BE, val);
}
static void hsw_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
/* Enable Audio WA for 4k DSC usecases */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
enable_audio_dsc_wa(encoder, crtc_state);
- if (needs_wa_14020863754(i915))
- intel_de_rmw(i915, AUD_CHICKENBIT_REG3, 0, DACBE_DISABLE_MIN_HBLANK_FIX);
+ if (needs_wa_14020863754(display))
+ intel_de_rmw(display, AUD_CHICKENBIT_REG3, 0, DACBE_DISABLE_MIN_HBLANK_FIX);
/* Enable audio presence detect */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
0, AUDIO_OUTPUT_ENABLE(cpu_transcoder));
intel_crtc_wait_for_next_vblank(crtc);
/* Invalidate ELD */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_ELD_VALID(cpu_transcoder), 0);
/*
- * The audio componenent is used to convey the ELD
+ * The audio component is used to convey the ELD
* instead using of the hardware ELD buffer.
*/
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
}
struct ibx_audio_regs {
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
};
-static void ibx_audio_regs_init(struct drm_i915_private *i915,
+static void ibx_audio_regs_init(struct intel_display *display,
enum pipe pipe,
struct ibx_audio_regs *regs)
{
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (display->platform.valleyview || display->platform.cherryview) {
regs->hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
regs->aud_config = VLV_AUD_CFG(pipe);
regs->aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
@@ -607,21 +611,21 @@ static void ibx_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
struct ibx_audio_regs regs;
- if (drm_WARN_ON(&i915->drm, port == PORT_A))
+ if (drm_WARN_ON(display->drm, port == PORT_A))
return;
- ibx_audio_regs_init(i915, pipe, &regs);
+ ibx_audio_regs_init(display, pipe, &regs);
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
/* Disable timestamps */
- intel_de_rmw(i915, regs.aud_config,
+ intel_de_rmw(display, regs.aud_config,
AUD_CONFIG_N_VALUE_INDEX |
AUD_CONFIG_UPPER_N_MASK |
AUD_CONFIG_LOWER_N_MASK,
@@ -630,10 +634,10 @@ static void ibx_audio_codec_disable(struct intel_encoder *encoder,
AUD_CONFIG_N_VALUE_INDEX : 0));
/* Invalidate ELD */
- intel_de_rmw(i915, regs.aud_cntrl_st2,
+ intel_de_rmw(display, regs.aud_cntrl_st2,
IBX_ELD_VALID(port), 0);
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
intel_crtc_wait_for_next_vblank(crtc);
intel_crtc_wait_for_next_vblank(crtc);
@@ -643,32 +647,32 @@ static void ibx_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
struct ibx_audio_regs regs;
- if (drm_WARN_ON(&i915->drm, port == PORT_A))
+ if (drm_WARN_ON(display->drm, port == PORT_A))
return;
intel_crtc_wait_for_next_vblank(crtc);
- ibx_audio_regs_init(i915, pipe, &regs);
+ ibx_audio_regs_init(display, pipe, &regs);
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
/* Invalidate ELD */
- intel_de_rmw(i915, regs.aud_cntrl_st2,
+ intel_de_rmw(display, regs.aud_cntrl_st2,
IBX_ELD_VALID(port), 0);
/*
- * The audio componenent is used to convey the ELD
+ * The audio component is used to convey the ELD
* instead using of the hardware ELD buffer.
*/
/* Enable timestamps */
- intel_de_rmw(i915, regs.aud_config,
+ intel_de_rmw(display, regs.aud_config,
AUD_CONFIG_N_VALUE_INDEX |
AUD_CONFIG_N_PROG_ENABLE |
AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK,
@@ -676,7 +680,7 @@ static void ibx_audio_codec_enable(struct intel_encoder *encoder,
AUD_CONFIG_N_VALUE_INDEX :
audio_config_hdmi_pixel_clock(crtc_state)));
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
}
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state)
@@ -693,14 +697,14 @@ bool intel_audio_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
mutex_lock(&connector->eld_mutex);
if (!connector->eld[0]) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Bogus ELD on [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
mutex_unlock(&connector->eld_mutex);
@@ -729,8 +733,8 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = i915->display.audio.component;
+ struct intel_display *display = to_intel_display(encoder);
+ struct i915_audio_component *acomp = display->audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -740,26 +744,27 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
if (!crtc_state->has_audio)
return;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on [CRTC:%d:%s], %u bytes ELD\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on [CRTC:%d:%s], %u bytes ELD\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
drm_eld_size(crtc_state->eld));
- if (i915->display.funcs.audio)
- i915->display.funcs.audio->audio_codec_enable(encoder,
+ if (display->funcs.audio)
+ display->funcs.audio->audio_codec_enable(encoder,
crtc_state,
conn_state);
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
audio_state->encoder = encoder;
BUILD_BUG_ON(sizeof(audio_state->eld) != sizeof(crtc_state->eld));
memcpy(audio_state->eld, crtc_state->eld, sizeof(audio_state->eld));
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -770,7 +775,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
(int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, cpu_transcoder, port, crtc_state->eld,
+ intel_lpe_audio_notify(display, cpu_transcoder, port, crtc_state->eld,
crtc_state->port_clock,
intel_crtc_has_dp_encoder(crtc_state));
}
@@ -788,8 +793,8 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = i915->display.audio.component;
+ struct intel_display *display = to_intel_display(encoder);
+ struct i915_audio_component *acomp = display->audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
@@ -799,24 +804,25 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
if (!old_crtc_state->has_audio)
return;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on [CRTC:%d:%s]\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on [CRTC:%d:%s]\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name);
- if (i915->display.funcs.audio)
- i915->display.funcs.audio->audio_codec_disable(encoder,
+ if (display->funcs.audio)
+ display->funcs.audio->audio_codec_disable(encoder,
old_crtc_state,
old_conn_state);
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
audio_state->encoder = NULL;
memset(audio_state->eld, 0, sizeof(audio_state->eld));
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -827,36 +833,36 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
(int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, cpu_transcoder, port, NULL, 0, false);
+ intel_lpe_audio_notify(display, cpu_transcoder, port, NULL, 0, false);
}
static void intel_acomp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_audio_state *audio_state;
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
if (audio_state->encoder)
memcpy(crtc_state->eld, audio_state->eld, sizeof(audio_state->eld));
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
}
void intel_audio_codec_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
if (!crtc_state->has_audio)
return;
- if (i915->display.funcs.audio)
- i915->display.funcs.audio->audio_codec_get_config(encoder, crtc_state);
+ if (display->funcs.audio)
+ display->funcs.audio->audio_codec_get_config(encoder, crtc_state);
}
static const struct intel_audio_funcs g4x_audio_funcs = {
@@ -879,17 +885,19 @@ static const struct intel_audio_funcs hsw_audio_funcs = {
/**
* intel_audio_hooks_init - Set up chip specific audio hooks
- * @i915: device private
+ * @display: display device
*/
-void intel_audio_hooks_init(struct drm_i915_private *i915)
+void intel_audio_hooks_init(struct intel_display *display)
{
- if (IS_G4X(i915))
- i915->display.funcs.audio = &g4x_audio_funcs;
- else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915) ||
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (display->platform.g4x)
+ display->funcs.audio = &g4x_audio_funcs;
+ else if (display->platform.valleyview || display->platform.cherryview ||
HAS_PCH_CPT(i915) || HAS_PCH_IBX(i915))
- i915->display.funcs.audio = &ibx_audio_funcs;
- else if (IS_HASWELL(i915) || DISPLAY_VER(i915) >= 8)
- i915->display.funcs.audio = &hsw_audio_funcs;
+ display->funcs.audio = &ibx_audio_funcs;
+ else if (display->platform.haswell || DISPLAY_VER(display) >= 8)
+ display->funcs.audio = &hsw_audio_funcs;
}
struct aud_ts_cdclk_m_n {
@@ -897,10 +905,10 @@ struct aud_ts_cdclk_m_n {
u16 n;
};
-void intel_audio_cdclk_change_pre(struct drm_i915_private *i915)
+void intel_audio_cdclk_change_pre(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 13)
- intel_de_rmw(i915, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0);
+ if (DISPLAY_VER(display) >= 13)
+ intel_de_rmw(display, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0);
}
static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts)
@@ -909,16 +917,18 @@ static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n
aud_ts->n = cdclk * aud_ts->m / 24000;
}
-void intel_audio_cdclk_change_post(struct drm_i915_private *i915)
+void intel_audio_cdclk_change_post(struct intel_display *display)
{
struct aud_ts_cdclk_m_n aud_ts;
- if (DISPLAY_VER(i915) >= 13) {
- get_aud_ts_cdclk_m_n(i915->display.cdclk.hw.ref, i915->display.cdclk.hw.cdclk, &aud_ts);
+ if (DISPLAY_VER(display) >= 13) {
+ get_aud_ts_cdclk_m_n(display->cdclk.hw.ref,
+ display->cdclk.hw.cdclk, &aud_ts);
- intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n);
- intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN);
- drm_dbg_kms(&i915->drm, "aud_ts_cdclk set to M=%u, N=%u\n", aud_ts.m, aud_ts.n);
+ intel_de_write(display, AUD_TS_CDCLK_N, aud_ts.n);
+ intel_de_write(display, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN);
+ drm_dbg_kms(display->drm, "aud_ts_cdclk set to M=%u, N=%u\n",
+ aud_ts.m, aud_ts.n);
}
}
@@ -943,7 +953,7 @@ static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
return drm_atomic_commit(&state->base);
}
-static void glk_force_audio_cdclk(struct drm_i915_private *i915,
+static void glk_force_audio_cdclk(struct intel_display *display,
bool enable)
{
struct drm_modeset_acquire_ctx ctx;
@@ -951,13 +961,13 @@ static void glk_force_audio_cdclk(struct drm_i915_private *i915,
struct intel_crtc *crtc;
int ret;
- crtc = intel_first_crtc(i915);
+ crtc = intel_first_crtc(display);
if (!crtc)
return;
drm_modeset_acquire_init(&ctx, 0);
- state = drm_atomic_state_alloc(&i915->drm);
- if (drm_WARN_ON(&i915->drm, !state))
+ state = drm_atomic_state_alloc(display->drm);
+ if (drm_WARN_ON(display->drm, !state))
return;
state->acquire_ctx = &ctx;
@@ -972,7 +982,7 @@ retry:
goto retry;
}
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
drm_atomic_state_put(state);
@@ -983,7 +993,6 @@ retry:
int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int min_cdclk = 0;
if (!crtc_state->has_audio)
@@ -1000,7 +1009,7 @@ int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(display) == 10) {
/* Display WA #1145: glk */
min_cdclk = max(min_cdclk, 316800);
- } else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv)) {
+ } else if (DISPLAY_VER(display) == 9 || display->platform.broadwell) {
/* Display WA #1144: skl,bxt */
min_cdclk = max(min_cdclk, 432000);
}
@@ -1020,99 +1029,95 @@ int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state)
* 270 | 320 or higher
* 162 | 200 or higher"
*/
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
intel_crtc_has_dp_encoder(crtc_state))
min_cdclk = max(min_cdclk, crtc_state->port_clock);
return min_cdclk;
}
-static unsigned long i915_audio_component_get_power(struct device *kdev)
+static unsigned long intel_audio_component_get_power(struct device *kdev)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
/* Catch potential impedance mismatches before they occur! */
BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
- wakeref = intel_display_power_get(i915, POWER_DOMAIN_AUDIO_PLAYBACK);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_AUDIO_PLAYBACK);
- if (i915->display.audio.power_refcount++ == 0) {
- if (DISPLAY_VER(i915) >= 9) {
- intel_de_write(i915, AUD_FREQ_CNTRL,
- i915->display.audio.freq_cntrl);
- drm_dbg_kms(&i915->drm,
+ if (display->audio.power_refcount++ == 0) {
+ if (DISPLAY_VER(display) >= 9) {
+ intel_de_write(display, AUD_FREQ_CNTRL,
+ display->audio.freq_cntrl);
+ drm_dbg_kms(display->drm,
"restored AUD_FREQ_CNTRL to 0x%x\n",
- i915->display.audio.freq_cntrl);
+ display->audio.freq_cntrl);
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
- if (IS_GEMINILAKE(i915))
- glk_force_audio_cdclk(i915, true);
+ if (display->platform.geminilake)
+ glk_force_audio_cdclk(display, true);
- if (DISPLAY_VER(i915) >= 10)
- intel_de_rmw(i915, AUD_PIN_BUF_CTL,
+ if (DISPLAY_VER(display) >= 10)
+ intel_de_rmw(display, AUD_PIN_BUF_CTL,
0, AUD_PIN_BUF_ENABLE);
}
return (unsigned long)wakeref;
}
-static void i915_audio_component_put_power(struct device *kdev,
- unsigned long cookie)
+static void intel_audio_component_put_power(struct device *kdev,
+ unsigned long cookie)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref = (intel_wakeref_t)cookie;
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
- if (--i915->display.audio.power_refcount == 0)
- if (IS_GEMINILAKE(i915))
- glk_force_audio_cdclk(i915, false);
+ if (--display->audio.power_refcount == 0)
+ if (display->platform.geminilake)
+ glk_force_audio_cdclk(display, false);
- intel_display_power_put(i915, POWER_DOMAIN_AUDIO_PLAYBACK, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_AUDIO_PLAYBACK, wakeref);
}
-static void i915_audio_component_codec_wake_override(struct device *kdev,
- bool enable)
+static void intel_audio_component_codec_wake_override(struct device *kdev,
+ bool enable)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned long cookie;
- if (DISPLAY_VER(i915) < 9)
+ if (DISPLAY_VER(display) < 9)
return;
- cookie = i915_audio_component_get_power(kdev);
+ cookie = intel_audio_component_get_power(kdev);
/*
* Enable/disable generating the codec wake signal, overriding the
* internal logic to generate the codec wake to controller.
*/
- intel_de_rmw(i915, HSW_AUD_CHICKENBIT,
+ intel_de_rmw(display, HSW_AUD_CHICKENBIT,
SKL_AUD_CODEC_WAKE_SIGNAL, 0);
usleep_range(1000, 1500);
if (enable) {
- intel_de_rmw(i915, HSW_AUD_CHICKENBIT,
+ intel_de_rmw(display, HSW_AUD_CHICKENBIT,
0, SKL_AUD_CODEC_WAKE_SIGNAL);
usleep_range(1000, 1500);
}
- i915_audio_component_put_power(kdev, cookie);
+ intel_audio_component_put_power(kdev, cookie);
}
/* Get CDCLK in kHz */
-static int i915_audio_component_get_cdclk_freq(struct device *kdev)
+static int intel_audio_component_get_cdclk_freq(struct device *kdev)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
- if (drm_WARN_ON_ONCE(&i915->drm, !HAS_DDI(i915)))
+ if (drm_WARN_ON_ONCE(display->drm, !HAS_DDI(display)))
return -ENODEV;
- return i915->display.cdclk.hw.cdclk;
+ return display->cdclk.hw.cdclk;
}
/*
@@ -1124,7 +1129,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
* will get the right intel_encoder with port matched
* Non-MST & (cpu_transcoder < 0): get the right intel_encoder with port matched
*/
-static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
+static struct intel_audio_state *find_audio_state(struct intel_display *display,
int port, int cpu_transcoder)
{
/* MST */
@@ -1132,11 +1137,11 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
- if (drm_WARN_ON(&i915->drm,
- cpu_transcoder >= ARRAY_SIZE(i915->display.audio.state)))
+ if (drm_WARN_ON(display->drm,
+ cpu_transcoder >= ARRAY_SIZE(display->audio.state)))
return NULL;
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
encoder = audio_state->encoder;
if (encoder && encoder->port == port &&
@@ -1148,11 +1153,11 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
if (cpu_transcoder > 0)
return NULL;
- for_each_cpu_transcoder(i915, cpu_transcoder) {
+ for_each_cpu_transcoder(display, cpu_transcoder) {
struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
- audio_state = &i915->display.audio.state[cpu_transcoder];
+ audio_state = &display->audio.state[cpu_transcoder];
encoder = audio_state->encoder;
if (encoder && encoder->port == port &&
@@ -1163,27 +1168,27 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
return NULL;
}
-static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
- int cpu_transcoder, int rate)
+static int intel_audio_component_sync_audio_rate(struct device *kdev, int port,
+ int cpu_transcoder, int rate)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct i915_audio_component *acomp = i915->display.audio.component;
+ struct i915_audio_component *acomp = display->audio.component;
const struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
unsigned long cookie;
int err = 0;
- if (!HAS_DDI(i915))
+ if (!HAS_DDI(display))
return 0;
- cookie = i915_audio_component_get_power(kdev);
- mutex_lock(&i915->display.audio.mutex);
+ cookie = intel_audio_component_get_power(kdev);
+ mutex_lock(&display->audio.mutex);
- audio_state = find_audio_state(i915, port, cpu_transcoder);
+ audio_state = find_audio_state(display, port, cpu_transcoder);
if (!audio_state) {
- drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
+ drm_dbg_kms(display->drm, "Not valid for port %c\n",
+ port_name(port));
err = -ENODEV;
goto unlock;
}
@@ -1200,26 +1205,26 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
hsw_audio_config_update(encoder, crtc->config);
unlock:
- mutex_unlock(&i915->display.audio.mutex);
- i915_audio_component_put_power(kdev, cookie);
+ mutex_unlock(&display->audio.mutex);
+ intel_audio_component_put_power(kdev, cookie);
return err;
}
-static int i915_audio_component_get_eld(struct device *kdev, int port,
- int cpu_transcoder, bool *enabled,
- unsigned char *buf, int max_bytes)
+static int intel_audio_component_get_eld(struct device *kdev, int port,
+ int cpu_transcoder, bool *enabled,
+ unsigned char *buf, int max_bytes)
{
struct intel_display *display = to_intel_display(kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_audio_state *audio_state;
int ret = 0;
- mutex_lock(&i915->display.audio.mutex);
+ mutex_lock(&display->audio.mutex);
- audio_state = find_audio_state(i915, port, cpu_transcoder);
+ audio_state = find_audio_state(display, port, cpu_transcoder);
if (!audio_state) {
- drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
- mutex_unlock(&i915->display.audio.mutex);
+ drm_dbg_kms(display->drm, "Not valid for port %c\n",
+ port_name(port));
+ mutex_unlock(&display->audio.mutex);
return -EINVAL;
}
@@ -1231,71 +1236,70 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
memcpy(buf, eld, min(max_bytes, ret));
}
- mutex_unlock(&i915->display.audio.mutex);
+ mutex_unlock(&display->audio.mutex);
return ret;
}
-static const struct drm_audio_component_ops i915_audio_component_ops = {
- .owner = THIS_MODULE,
- .get_power = i915_audio_component_get_power,
- .put_power = i915_audio_component_put_power,
- .codec_wake_override = i915_audio_component_codec_wake_override,
- .get_cdclk_freq = i915_audio_component_get_cdclk_freq,
- .sync_audio_rate = i915_audio_component_sync_audio_rate,
- .get_eld = i915_audio_component_get_eld,
+static const struct drm_audio_component_ops intel_audio_component_ops = {
+ .owner = THIS_MODULE,
+ .get_power = intel_audio_component_get_power,
+ .put_power = intel_audio_component_put_power,
+ .codec_wake_override = intel_audio_component_codec_wake_override,
+ .get_cdclk_freq = intel_audio_component_get_cdclk_freq,
+ .sync_audio_rate = intel_audio_component_sync_audio_rate,
+ .get_eld = intel_audio_component_get_eld,
};
-static int i915_audio_component_bind(struct device *drv_kdev,
- struct device *hda_kdev, void *data)
+static int intel_audio_component_bind(struct device *drv_kdev,
+ struct device *hda_kdev, void *data)
{
struct intel_display *display = to_intel_display(drv_kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = data;
int i;
- if (drm_WARN_ON(&i915->drm, acomp->base.ops || acomp->base.dev))
+ if (drm_WARN_ON(display->drm, acomp->base.ops || acomp->base.dev))
return -EEXIST;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
!device_link_add(hda_kdev, drv_kdev,
DL_FLAG_STATELESS)))
return -ENOMEM;
- drm_modeset_lock_all(&i915->drm);
- acomp->base.ops = &i915_audio_component_ops;
+ drm_modeset_lock_all(display->drm);
+ acomp->base.ops = &intel_audio_component_ops;
acomp->base.dev = drv_kdev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
- i915->display.audio.component = acomp;
- drm_modeset_unlock_all(&i915->drm);
+ display->audio.component = acomp;
+ drm_modeset_unlock_all(display->drm);
return 0;
}
-static void i915_audio_component_unbind(struct device *drv_kdev,
- struct device *hda_kdev, void *data)
+static void intel_audio_component_unbind(struct device *drv_kdev,
+ struct device *hda_kdev, void *data)
{
struct intel_display *display = to_intel_display(drv_kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = data;
- drm_modeset_lock_all(&i915->drm);
+ drm_modeset_lock_all(display->drm);
acomp->base.ops = NULL;
acomp->base.dev = NULL;
- i915->display.audio.component = NULL;
- drm_modeset_unlock_all(&i915->drm);
+ display->audio.component = NULL;
+ drm_modeset_unlock_all(display->drm);
device_link_remove(hda_kdev, drv_kdev);
- if (i915->display.audio.power_refcount)
- drm_err(&i915->drm, "audio power refcount %d after unbind\n",
- i915->display.audio.power_refcount);
+ if (display->audio.power_refcount)
+ drm_err(display->drm,
+ "audio power refcount %d after unbind\n",
+ display->audio.power_refcount);
}
-static const struct component_ops i915_audio_component_bind_ops = {
- .bind = i915_audio_component_bind,
- .unbind = i915_audio_component_unbind,
+static const struct component_ops intel_audio_component_bind_ops = {
+ .bind = intel_audio_component_bind,
+ .unbind = intel_audio_component_unbind,
};
#define AUD_FREQ_TMODE_SHIFT 14
@@ -1308,8 +1312,8 @@ static const struct component_ops i915_audio_component_bind_ops = {
#define AUD_FREQ_TGL_BROKEN (AUD_FREQ_8T | AUD_FREQ_PULLCLKS(2) | AUD_FREQ_BCLK_96M)
/**
- * i915_audio_component_init - initialize and register the audio component
- * @i915: i915 device instance
+ * intel_audio_component_init - initialize and register the audio component
+ * @display: display device
*
* This will register with the component framework a child component which
* will bind dynamically to the snd_hda_intel driver's corresponding master
@@ -1323,93 +1327,93 @@ static const struct component_ops i915_audio_component_bind_ops = {
* We ignore any error during registration and continue with reduced
* functionality (i.e. without HDMI audio).
*/
-static void i915_audio_component_init(struct drm_i915_private *i915)
+static void intel_audio_component_init(struct intel_display *display)
{
u32 aud_freq, aud_freq_init;
- if (DISPLAY_VER(i915) >= 9) {
- aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
+ if (DISPLAY_VER(display) >= 9) {
+ aud_freq_init = intel_de_read(display, AUD_FREQ_CNTRL);
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
aud_freq = AUD_FREQ_GEN12;
else
aud_freq = aud_freq_init;
/* use BIOS provided value for TGL and RKL unless it is a known bad value */
- if ((IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) &&
+ if ((display->platform.tigerlake || display->platform.rocketlake) &&
aud_freq_init != AUD_FREQ_TGL_BROKEN)
aud_freq = aud_freq_init;
- drm_dbg_kms(&i915->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
+ drm_dbg_kms(display->drm,
+ "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
aud_freq, aud_freq_init);
- i915->display.audio.freq_cntrl = aud_freq;
+ display->audio.freq_cntrl = aud_freq;
}
/* init with current cdclk */
- intel_audio_cdclk_change_post(i915);
+ intel_audio_cdclk_change_post(display);
}
-static void i915_audio_component_register(struct drm_i915_private *i915)
+static void intel_audio_component_register(struct intel_display *display)
{
int ret;
- ret = component_add_typed(i915->drm.dev,
- &i915_audio_component_bind_ops,
+ ret = component_add_typed(display->drm->dev,
+ &intel_audio_component_bind_ops,
I915_COMPONENT_AUDIO);
if (ret < 0) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
return;
}
- i915->display.audio.component_registered = true;
+ display->audio.component_registered = true;
}
/**
- * i915_audio_component_cleanup - deregister the audio component
- * @i915: i915 device instance
+ * intel_audio_component_cleanup - deregister the audio component
+ * @display: display device
*
* Deregisters the audio component, breaking any existing binding to the
* corresponding snd_hda_intel driver's master component.
*/
-static void i915_audio_component_cleanup(struct drm_i915_private *i915)
+static void intel_audio_component_cleanup(struct intel_display *display)
{
- if (!i915->display.audio.component_registered)
+ if (!display->audio.component_registered)
return;
- component_del(i915->drm.dev, &i915_audio_component_bind_ops);
- i915->display.audio.component_registered = false;
+ component_del(display->drm->dev, &intel_audio_component_bind_ops);
+ display->audio.component_registered = false;
}
/**
* intel_audio_init() - Initialize the audio driver either using
* component framework or using lpe audio bridge
- * @i915: the i915 drm device private data
+ * @display: display device
*
*/
-void intel_audio_init(struct drm_i915_private *i915)
+void intel_audio_init(struct intel_display *display)
{
- if (intel_lpe_audio_init(i915) < 0)
- i915_audio_component_init(i915);
+ if (intel_lpe_audio_init(display) < 0)
+ intel_audio_component_init(display);
}
-void intel_audio_register(struct drm_i915_private *i915)
+void intel_audio_register(struct intel_display *display)
{
- if (!i915->display.audio.lpe.platdev)
- i915_audio_component_register(i915);
+ if (!display->audio.lpe.platdev)
+ intel_audio_component_register(display);
}
/**
* intel_audio_deinit() - deinitialize the audio driver
- * @i915: the i915 drm device private data
- *
+ * @display: display device
*/
-void intel_audio_deinit(struct drm_i915_private *i915)
+void intel_audio_deinit(struct intel_display *display)
{
- if (i915->display.audio.lpe.platdev != NULL)
- intel_lpe_audio_teardown(i915);
+ if (display->audio.lpe.platdev)
+ intel_lpe_audio_teardown(display);
else
- i915_audio_component_cleanup(i915);
+ intel_audio_component_cleanup(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 1bafc155434a..ad49eefa7182 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -9,11 +9,11 @@
#include <linux/types.h>
struct drm_connector_state;
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_encoder;
-void intel_audio_hooks_init(struct drm_i915_private *dev_priv);
+void intel_audio_hooks_init(struct intel_display *display);
bool intel_audio_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
@@ -25,12 +25,12 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state);
void intel_audio_codec_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
-void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
-void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
+void intel_audio_cdclk_change_pre(struct intel_display *display);
+void intel_audio_cdclk_change_post(struct intel_display *display);
int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state);
-void intel_audio_init(struct drm_i915_private *dev_priv);
-void intel_audio_register(struct drm_i915_private *i915);
-void intel_audio_deinit(struct drm_i915_private *dev_priv);
+void intel_audio_init(struct intel_display *display);
+void intel_audio_register(struct intel_display *display);
+void intel_audio_deinit(struct intel_display *display);
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 7e6ce905bdaf..178dc6c8de80 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -104,20 +104,20 @@ u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight PWM = %d\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] set backlight PWM = %d\n",
connector->base.base.id, connector->base.name, val);
panel->backlight.pwm_funcs->set(conn_state, val);
}
u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON_ONCE(&i915->drm,
+ drm_WARN_ON_ONCE(display->drm,
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
val = scale(val, panel->backlight.min, panel->backlight.max,
@@ -145,32 +145,33 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- return intel_de_read(i915, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(display, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- return intel_de_read(i915, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(display, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 val;
- val = intel_de_read(i915, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (DISPLAY_VER(i915) < 4)
+ val = intel_de_read(display, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (DISPLAY_VER(display) < 4)
val >>= 1;
if (panel->backlight.combination_mode) {
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u8 lbpc;
- pci_read_config_byte(to_pci_dev(i915->drm.dev), LBPC, &lbpc);
+ pci_read_config_byte(pdev, LBPC, &lbpc);
val *= lbpc;
}
@@ -179,20 +180,20 @@ static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unuse
static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- if (drm_WARN_ON(&i915->drm, pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B))
return 0;
- return intel_de_read(i915, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(display, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- return intel_de_read(i915, BXT_BLC_PWM_DUTY(panel->backlight.controller));
+ return intel_de_read(display, BXT_BLC_PWM_DUTY(panel->backlight.controller));
}
static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
@@ -207,69 +208,70 @@ static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe un
static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u32 val;
- val = intel_de_read(i915, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(i915, BLC_PWM_PCH_CTL2, val | level);
+ val = intel_de_read(display, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(display, BLC_PWM_PCH_CTL2, val | level);
}
static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u32 tmp;
- tmp = intel_de_read(i915, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(i915, BLC_PWM_CPU_CTL, tmp | level);
+ tmp = intel_de_read(display, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(display, BLC_PWM_CPU_CTL, tmp | level);
}
static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
+ drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0);
if (panel->backlight.combination_mode) {
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u8 lbpc;
lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
level /= lbpc;
- pci_write_config_byte(to_pci_dev(i915->drm.dev), LBPC, lbpc);
+ pci_write_config_byte(pdev, LBPC, lbpc);
}
- if (DISPLAY_VER(i915) == 4) {
+ if (DISPLAY_VER(display) == 4) {
mask = BACKLIGHT_DUTY_CYCLE_MASK;
} else {
level <<= 1;
mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
}
- tmp = intel_de_read(i915, BLC_PWM_CTL) & ~mask;
- intel_de_write(i915, BLC_PWM_CTL, tmp | level);
+ tmp = intel_de_read(display, BLC_PWM_CTL) & ~mask;
+ intel_de_write(display, BLC_PWM_CTL, tmp | level);
}
static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 tmp;
- tmp = intel_de_read(i915, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(i915, VLV_BLC_PWM_CTL(pipe), tmp | level);
+ tmp = intel_de_read(display, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(display, VLV_BLC_PWM_CTL(pipe), tmp | level);
}
static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- intel_de_write(i915, BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
+ intel_de_write(display, BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
}
static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -284,10 +286,10 @@ static void
intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight level = %d\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] set backlight level = %d\n",
connector->base.base.id, connector->base.name, level);
panel->backlight.funcs->set(conn_state, level);
@@ -300,7 +302,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
u32 user_level, u32 user_max)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 hw_level;
@@ -313,9 +315,9 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (!panel->backlight.present || !conn_state->crtc)
return;
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
- drm_WARN_ON(&i915->drm, panel->backlight.max == 0);
+ drm_WARN_ON(display->drm, panel->backlight.max == 0);
hw_level = clamp_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -329,13 +331,13 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
}
static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, level);
@@ -348,26 +350,26 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
* This needs rework if we need to add support for CPU PWM on PCH split
* platforms.
*/
- tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2);
+ tmp = intel_de_read(display, BLC_PWM_CPU_CTL2);
if (tmp & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight was enabled, disabling\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] CPU backlight was enabled, disabling\n",
connector->base.base.id, connector->base.name);
- intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+ intel_de_write(display, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
}
- intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
+ intel_de_rmw(display, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
}
static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
intel_backlight_set_pwm_level(old_conn_state, val);
- intel_de_rmw(i915, BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE, 0);
+ intel_de_rmw(display, BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE, 0);
- intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
+ intel_de_rmw(display, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
}
static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -377,48 +379,49 @@ static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_st
static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
- struct drm_i915_private *i915 = to_i915(old_conn_state->connector->dev);
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct intel_display *display = to_intel_display(connector);
intel_backlight_set_pwm_level(old_conn_state, val);
- intel_de_rmw(i915, BLC_PWM_CTL2, BLM_PWM_ENABLE, 0);
+ intel_de_rmw(display, BLC_PWM_CTL2, BLM_PWM_ENABLE, 0);
}
static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
intel_backlight_set_pwm_level(old_conn_state, val);
- intel_de_rmw(i915, VLV_BLC_PWM_CTL2(pipe), BLM_PWM_ENABLE, 0);
+ intel_de_rmw(display, VLV_BLC_PWM_CTL2(pipe), BLM_PWM_ENABLE, 0);
}
static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
intel_backlight_set_pwm_level(old_conn_state, val);
- intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_rmw(display, BXT_BLC_PWM_CTL(panel->backlight.controller),
BXT_BLC_PWM_ENABLE, 0);
if (panel->backlight.controller == 1)
- intel_de_rmw(i915, UTIL_PIN_CTL, UTIL_PIN_ENABLE, 0);
+ intel_de_rmw(display, UTIL_PIN_CTL, UTIL_PIN_ENABLE, 0);
}
static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
intel_backlight_set_pwm_level(old_conn_state, val);
- intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_rmw(display, BXT_BLC_PWM_CTL(panel->backlight.controller),
BXT_BLC_PWM_ENABLE, 0);
}
@@ -436,7 +439,7 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn
void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
if (!panel->backlight.present)
@@ -448,49 +451,51 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
* backlight. This will leave the backlight on unnecessarily when
* another client is not activated.
*/
- if (i915->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Skipping backlight disable on vga switch\n",
+ if (display->drm->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Skipping backlight disable on vga switch\n",
connector->base.base.id, connector->base.name);
return;
}
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
if (panel->backlight.device)
panel->backlight.device->props.power = BACKLIGHT_POWER_OFF;
panel->backlight.enabled = false;
panel->backlight.funcs->disable(old_conn_state, 0);
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
}
static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_display *display = to_intel_display(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2;
- pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(display, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
connector->base.base.id, connector->base.name);
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
}
if (HAS_PCH_LPT(i915))
- intel_de_rmw(i915, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY,
+ intel_de_rmw(display, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY,
panel->backlight.alternate_pwm_increment ?
LPT_PWM_GRANULARITY : 0);
else
- intel_de_rmw(i915, SOUTH_CHICKEN1, SPT_PWM_GRANULARITY,
+ intel_de_rmw(display, SOUTH_CHICKEN1, SPT_PWM_GRANULARITY,
panel->backlight.alternate_pwm_increment ?
SPT_PWM_GRANULARITY : 0);
pch_ctl2 = panel->backlight.pwm_level_max << 16;
- intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(display, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
@@ -500,9 +505,9 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (HAS_PCH_LPT(i915))
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
- intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
- intel_de_posting_read(i915, BLC_PWM_PCH_CTL1);
- intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(display, BLC_PWM_PCH_CTL1);
+ intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_backlight_set_pwm_level(conn_state, level);
@@ -512,63 +517,66 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
- cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(display, BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] CPU backlight already enabled\n",
connector->base.base.id, connector->base.name);
cpu_ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_write(display, BLC_PWM_CPU_CTL2, cpu_ctl2);
}
- pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(display, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
connector->base.base.id, connector->base.name);
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
}
if (cpu_transcoder == TRANSCODER_EDP)
cpu_ctl2 = BLM_TRANSCODER_EDP;
else
cpu_ctl2 = BLM_PIPE(cpu_transcoder);
- intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2);
- intel_de_posting_read(i915, BLC_PWM_CPU_CTL2);
- intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(display, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_posting_read(display, BLC_PWM_CPU_CTL2);
+ intel_de_write(display, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_backlight_set_pwm_level(conn_state, level);
pch_ctl2 = panel->backlight.pwm_level_max << 16;
- intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(display, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
- intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
- intel_de_posting_read(i915, BLC_PWM_PCH_CTL1);
- intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(display, BLC_PWM_PCH_CTL1);
+ intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
}
static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 ctl, freq;
- ctl = intel_de_read(i915, BLC_PWM_CTL);
+ ctl = intel_de_read(display, BLC_PWM_CTL);
if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] backlight already enabled\n",
connector->base.base.id, connector->base.name);
- intel_de_write(i915, BLC_PWM_CTL, 0);
+ intel_de_write(display, BLC_PWM_CTL, 0);
}
freq = panel->backlight.pwm_level_max;
@@ -578,11 +586,11 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl = freq << 17;
if (panel->backlight.combination_mode)
ctl |= BLM_LEGACY_MODE;
- if (IS_PINEVIEW(i915) && panel->backlight.active_low_pwm)
+ if (display->platform.pineview && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
- intel_de_write(i915, BLC_PWM_CTL, ctl);
- intel_de_posting_read(i915, BLC_PWM_CTL);
+ intel_de_write(display, BLC_PWM_CTL, ctl);
+ intel_de_posting_read(display, BLC_PWM_CTL);
/* XXX: combine this into above write? */
intel_backlight_set_pwm_level(conn_state, level);
@@ -592,25 +600,26 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
* 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
* that has backlight.
*/
- if (DISPLAY_VER(i915) == 2)
- intel_de_write(i915, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+ if (DISPLAY_VER(display) == 2)
+ intel_de_write(display, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 ctl, ctl2, freq;
- ctl2 = intel_de_read(i915, BLC_PWM_CTL2);
+ ctl2 = intel_de_read(display, BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] backlight already enabled\n",
connector->base.base.id, connector->base.name);
ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(i915, BLC_PWM_CTL2, ctl2);
+ intel_de_write(display, BLC_PWM_CTL2, ctl2);
}
freq = panel->backlight.pwm_level_max;
@@ -618,16 +627,16 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
freq /= 0xff;
ctl = freq << 16;
- intel_de_write(i915, BLC_PWM_CTL, ctl);
+ intel_de_write(display, BLC_PWM_CTL, ctl);
ctl2 = BLM_PIPE(pipe);
if (panel->backlight.combination_mode)
ctl2 |= BLM_COMBINATION_MODE;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- intel_de_write(i915, BLC_PWM_CTL2, ctl2);
- intel_de_posting_read(i915, BLC_PWM_CTL2);
- intel_de_write(i915, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(display, BLC_PWM_CTL2, ctl2);
+ intel_de_posting_read(display, BLC_PWM_CTL2);
+ intel_de_write(display, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
intel_backlight_set_pwm_level(conn_state, level);
}
@@ -636,21 +645,22 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 ctl, ctl2;
- ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(display, VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] backlight already enabled\n",
connector->base.base.id, connector->base.name);
ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_write(display, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
ctl = panel->backlight.pwm_level_max << 16;
- intel_de_write(i915, VLV_BLC_PWM_CTL(pipe), ctl);
+ intel_de_write(display, VLV_BLC_PWM_CTL(pipe), ctl);
/* XXX: combine this into above write? */
intel_backlight_set_pwm_level(conn_state, level);
@@ -658,47 +668,49 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl2 = 0;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2);
- intel_de_posting_read(i915, VLV_BLC_PWM_CTL2(pipe));
- intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(display, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_posting_read(display, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(display, VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
}
static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 pwm_ctl, val;
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = intel_de_read(i915, UTIL_PIN_CTL);
+ val = intel_de_read(display, UTIL_PIN_CTL);
if (val & UTIL_PIN_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] utility pin already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] utility pin already enabled\n",
connector->base.base.id, connector->base.name);
val &= ~UTIL_PIN_ENABLE;
- intel_de_write(i915, UTIL_PIN_CTL, val);
+ intel_de_write(display, UTIL_PIN_CTL, val);
}
val = 0;
if (panel->backlight.util_pin_active_low)
val |= UTIL_PIN_POLARITY;
- intel_de_write(i915, UTIL_PIN_CTL,
+ intel_de_write(display, UTIL_PIN_CTL,
val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
}
- pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(display, BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] backlight already enabled\n",
connector->base.base.id, connector->base.name);
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(display, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl);
}
- intel_de_write(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ intel_de_write(display, BXT_BLC_PWM_FREQ(panel->backlight.controller),
panel->backlight.pwm_level_max);
intel_backlight_set_pwm_level(conn_state, level);
@@ -707,9 +719,9 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
- intel_de_posting_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(display, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
+ intel_de_posting_read(display, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(display, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
@@ -717,19 +729,19 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl;
- pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(display, BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(display->drm, "backlight already enabled\n");
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(display, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl);
}
- intel_de_write(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ intel_de_write(display, BXT_BLC_PWM_FREQ(panel->backlight.controller),
panel->backlight.pwm_level_max);
intel_backlight_set_pwm_level(conn_state, level);
@@ -738,9 +750,9 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
- intel_de_posting_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(display, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
+ intel_de_posting_read(display, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(display, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
@@ -782,37 +794,37 @@ void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
if (!panel->backlight.present)
return;
- drm_dbg_kms(&i915->drm, "pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(display->drm, "pipe %c\n", pipe_name(pipe));
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 val = 0;
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
if (panel->backlight.enabled)
val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
- drm_dbg_kms(&i915->drm, "get backlight PWM = %d\n", val);
+ drm_dbg_kms(display->drm, "get backlight PWM = %d\n", val);
return val;
}
@@ -831,16 +843,16 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
u32 user_level, u32 user_max)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 hw_level;
if (!panel->backlight.present)
return;
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
- drm_WARN_ON(&i915->drm, panel->backlight.max == 0);
+ drm_WARN_ON(display->drm, panel->backlight.max == 0);
hw_level = scale_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -848,18 +860,18 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
}
static int intel_backlight_device_update_status(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
- drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
- drm_dbg_kms(&i915->drm, "updating intel_backlight, brightness=%d/%d\n",
+ drm_dbg_kms(display->drm, "updating intel_backlight, brightness=%d/%d\n",
bd->props.brightness, bd->props.max_brightness);
intel_panel_set_backlight(connector->base.state, bd->props.brightness,
bd->props.max_brightness);
@@ -880,7 +892,7 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
bd->props.power = BACKLIGHT_POWER_OFF;
}
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -888,6 +900,7 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
+ struct intel_display *display = to_intel_display(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
intel_wakeref_t wakeref;
int ret = 0;
@@ -895,13 +908,13 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
u32 hw_level;
- drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
hw_level = intel_panel_get_backlight(connector);
ret = scale_hw_to_user(connector,
hw_level, bd->props.max_brightness);
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
}
return ret;
@@ -914,7 +927,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
int intel_backlight_device_register(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
struct backlight_properties props;
struct backlight_device *bd;
@@ -930,7 +943,8 @@ int intel_backlight_device_register(struct intel_connector *connector)
WARN_ON(panel->backlight.max == 0);
if (!acpi_video_backlight_use_native()) {
- drm_info(&i915->drm, "Skipping intel_backlight registration\n");
+ drm_info(display->drm,
+ "Skipping intel_backlight registration\n");
return 0;
}
@@ -967,7 +981,8 @@ int intel_backlight_device_register(struct intel_connector *connector)
*/
kfree(name);
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
- i915->drm.primary->index, connector->base.name);
+ display->drm->primary->index,
+ connector->base.name);
if (!name)
return -ENOMEM;
}
@@ -975,7 +990,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
&intel_backlight_device_ops, &props);
if (IS_ERR(bd)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[CONNECTOR:%d:%s] backlight device %s register failed: %ld\n",
connector->base.base.id, connector->base.name, name, PTR_ERR(bd));
ret = PTR_ERR(bd);
@@ -984,7 +999,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
panel->backlight.device = bd;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] backlight device %s registered\n",
connector->base.base.id, connector->base.name, name);
@@ -1011,9 +1026,9 @@ void intel_backlight_device_unregister(struct intel_connector *connector)
*/
static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq),
+ return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(display)->rawclk_freq),
pwm_freq_hz);
}
@@ -1073,9 +1088,9 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq),
+ return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(display)->rawclk_freq),
pwm_freq_hz * 128);
}
@@ -1089,13 +1104,13 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
int clock;
- if (IS_PINEVIEW(i915))
- clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq);
+ if (display->platform.pineview)
+ clock = KHz(DISPLAY_RUNTIME_INFO(display)->rawclk_freq);
else
- clock = KHz(i915->display.cdclk.hw.cdclk);
+ clock = KHz(display->cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
}
@@ -1107,13 +1122,13 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
int clock;
- if (IS_G4X(i915))
- clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq);
+ if (display->platform.g4x)
+ clock = KHz(DISPLAY_RUNTIME_INFO(display)->rawclk_freq);
else
- clock = KHz(i915->display.cdclk.hw.cdclk);
+ clock = KHz(display->cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
}
@@ -1125,17 +1140,17 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
int mul, clock;
- if ((intel_de_read(i915, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
- if (IS_CHERRYVIEW(i915))
+ if ((intel_de_read(display, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
+ if (display->platform.cherryview)
clock = KHz(19200);
else
clock = MHz(25);
mul = 16;
} else {
- clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq);
+ clock = KHz(DISPLAY_RUNTIME_INFO(display)->rawclk_freq);
mul = 128;
}
@@ -1144,16 +1159,16 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
static u16 get_vbt_pwm_freq(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u16 pwm_freq_hz = connector->panel.vbt.backlight.pwm_freq_hz;
if (pwm_freq_hz) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT defined backlight frequency %u Hz\n",
pwm_freq_hz);
} else {
pwm_freq_hz = 200;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"default backlight frequency %u Hz\n",
pwm_freq_hz);
}
@@ -1163,20 +1178,20 @@ static u16 get_vbt_pwm_freq(struct intel_connector *connector)
static u32 get_backlight_max_vbt(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u16 pwm_freq_hz = get_vbt_pwm_freq(connector);
u32 pwm;
if (!panel->backlight.pwm_funcs->hz_to_pwm) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"backlight frequency conversion not supported\n");
return 0;
}
pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
if (!pwm) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"backlight frequency conversion failed\n");
return 0;
}
@@ -1189,11 +1204,11 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
*/
static u32 get_backlight_min_vbt(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
int min;
- drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
+ drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0);
/*
* XXX: If the vbt value is 255, it makes min equal to max, which leads
@@ -1204,7 +1219,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
*/
min = clamp_t(int, connector->panel.vbt.backlight.min_brightness, 0, 64);
if (min != connector->panel.vbt.backlight.min_brightness) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"clamping VBT min backlight %d/255 to %d/255\n",
connector->panel.vbt.backlight.min_brightness, min);
}
@@ -1215,24 +1230,25 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
+ struct intel_display *display = to_intel_display(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
bool alt, cpu_mode;
if (HAS_PCH_LPT(i915))
- alt = intel_de_read(i915, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ alt = intel_de_read(display, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
else
- alt = intel_de_read(i915, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+ alt = intel_de_read(display, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
panel->backlight.alternate_pwm_increment = alt;
- pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(display, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = intel_de_read(i915, BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(display, BLC_PWM_PCH_CTL2);
panel->backlight.pwm_level_max = pch_ctl2 >> 16;
- cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(display, BLC_PWM_CPU_CTL2);
if (!panel->backlight.pwm_level_max)
panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
@@ -1251,19 +1267,19 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
if (cpu_mode) {
val = pch_get_backlight(connector, unused);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
lpt_set_backlight(connector->base.state, val);
- intel_de_write(i915, BLC_PWM_PCH_CTL1,
+ intel_de_write(display, BLC_PWM_PCH_CTL1,
pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
- intel_de_write(i915, BLC_PWM_CPU_CTL2,
+ intel_de_write(display, BLC_PWM_CPU_CTL2,
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n",
connector->base.base.id, connector->base.name);
@@ -1272,14 +1288,14 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
- pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(display, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = intel_de_read(i915, BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(display, BLC_PWM_PCH_CTL2);
panel->backlight.pwm_level_max = pch_ctl2 >> 16;
if (!panel->backlight.pwm_level_max)
@@ -1290,11 +1306,11 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(display, BLC_PWM_CPU_CTL2);
panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n",
connector->base.base.id, connector->base.name);
@@ -1303,16 +1319,16 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 ctl, val;
- ctl = intel_de_read(i915, BLC_PWM_CTL);
+ ctl = intel_de_read(display, BLC_PWM_CTL);
- if (DISPLAY_VER(i915) == 2 || IS_I915GM(i915) || IS_I945GM(i915))
+ if (DISPLAY_VER(display) == 2 || display->platform.i915gm || display->platform.i945gm)
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
panel->backlight.pwm_level_max = ctl >> 17;
@@ -1336,7 +1352,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.pwm_enabled = val != 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PWM for backlight control\n",
connector->base.base.id, connector->base.name);
@@ -1345,15 +1361,15 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2;
- ctl2 = intel_de_read(i915, BLC_PWM_CTL2);
+ ctl2 = intel_de_read(display, BLC_PWM_CTL2);
panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = intel_de_read(i915, BLC_PWM_CTL);
+ ctl = intel_de_read(display, BLC_PWM_CTL);
panel->backlight.pwm_level_max = ctl >> 16;
if (!panel->backlight.pwm_level_max)
@@ -1369,7 +1385,7 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PWM for backlight control\n",
connector->base.base.id, connector->base.name);
@@ -1378,17 +1394,17 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2;
- if (drm_WARN_ON(&i915->drm, pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
- ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(display, VLV_BLC_PWM_CTL2(pipe));
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = intel_de_read(i915, VLV_BLC_PWM_CTL(pipe));
+ ctl = intel_de_read(display, VLV_BLC_PWM_CTL(pipe));
panel->backlight.pwm_level_max = ctl >> 16;
if (!panel->backlight.pwm_level_max)
@@ -1401,7 +1417,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PWM for backlight control (on pipe %c)\n",
connector->base.base.id, connector->base.name, pipe_name(pipe));
@@ -1411,25 +1427,25 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
static int
bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl, val;
panel->backlight.controller = connector->panel.vbt.backlight.controller;
- pwm_ctl = intel_de_read(i915,
+ pwm_ctl = intel_de_read(display,
BXT_BLC_PWM_CTL(panel->backlight.controller));
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = intel_de_read(i915, UTIL_PIN_CTL);
+ val = intel_de_read(display, UTIL_PIN_CTL);
panel->backlight.util_pin_active_low =
val & UTIL_PIN_POLARITY;
}
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.pwm_level_max =
- intel_de_read(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(display, BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.pwm_level_max)
panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
@@ -1441,7 +1457,7 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PWM for backlight control (controller=%d)\n",
connector->base.base.id, connector->base.name,
panel->backlight.controller);
@@ -1449,8 +1465,10 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
return 0;
}
-static int cnp_num_backlight_controllers(struct drm_i915_private *i915)
+static int cnp_num_backlight_controllers(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
return 2;
@@ -1463,15 +1481,17 @@ static int cnp_num_backlight_controllers(struct drm_i915_private *i915)
return 1;
}
-static bool cnp_backlight_controller_is_valid(struct drm_i915_private *i915, int controller)
+static bool cnp_backlight_controller_is_valid(struct intel_display *display, int controller)
{
- if (controller < 0 || controller >= cnp_num_backlight_controllers(i915))
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (controller < 0 || controller >= cnp_num_backlight_controllers(display))
return false;
if (controller == 1 &&
INTEL_PCH_TYPE(i915) >= PCH_ICP &&
INTEL_PCH_TYPE(i915) <= PCH_ADP)
- return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
+ return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
}
@@ -1479,7 +1499,7 @@ static bool cnp_backlight_controller_is_valid(struct drm_i915_private *i915, int
static int
cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl;
@@ -1488,19 +1508,20 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
* controller. ICP+ can have two controllers, depending on pin muxing.
*/
panel->backlight.controller = connector->panel.vbt.backlight.controller;
- if (!cnp_backlight_controller_is_valid(i915, panel->backlight.controller)) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Invalid backlight controller %d, assuming 0\n",
+ if (!cnp_backlight_controller_is_valid(display, panel->backlight.controller)) {
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Invalid backlight controller %d, assuming 0\n",
connector->base.base.id, connector->base.name,
panel->backlight.controller);
panel->backlight.controller = 0;
}
- pwm_ctl = intel_de_read(i915,
+ pwm_ctl = intel_de_read(display,
BXT_BLC_PWM_CTL(panel->backlight.controller));
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.pwm_level_max =
- intel_de_read(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(display, BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.pwm_level_max)
panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
@@ -1512,7 +1533,7 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using native PCH PWM for backlight control (controller=%d)\n",
connector->base.base.id, connector->base.name,
panel->backlight.controller);
@@ -1523,22 +1544,25 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
static int ext_pwm_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
const char *desc;
u32 level;
/* Get the right PWM chip for DSI backlight according to VBT */
if (connector->panel.vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
- panel->backlight.pwm = pwm_get(i915->drm.dev, "pwm_pmic_backlight");
+ panel->backlight.pwm = pwm_get(display->drm->dev,
+ "pwm_pmic_backlight");
desc = "PMIC";
} else {
- panel->backlight.pwm = pwm_get(i915->drm.dev, "pwm_soc_backlight");
+ panel->backlight.pwm = pwm_get(display->drm->dev,
+ "pwm_soc_backlight");
desc = "SoC";
}
if (IS_ERR(panel->backlight.pwm)) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to get the %s PWM chip\n",
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to get the %s PWM chip\n",
connector->base.base.id, connector->base.name, desc);
panel->backlight.pwm = NULL;
return -ENODEV;
@@ -1556,7 +1580,8 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
level = intel_backlight_invert_pwm_level(connector, level);
panel->backlight.pwm_enabled = true;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] PWM already enabled at freq %ld, VBT freq %d, level %d\n",
connector->base.base.id, connector->base.name,
NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
get_vbt_pwm_freq(connector), level);
@@ -1566,7 +1591,7 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
NSEC_PER_SEC / get_vbt_pwm_freq(connector);
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using %s PWM for backlight control\n",
connector->base.base.id, connector->base.name, desc);
@@ -1632,17 +1657,17 @@ void intel_backlight_update(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
if (!panel->backlight.present)
return;
- mutex_lock(&i915->display.backlight.lock);
+ mutex_lock(&display->backlight.lock);
if (!panel->backlight.enabled)
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&i915->display.backlight.lock);
+ mutex_unlock(&display->backlight.lock);
}
int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
@@ -1793,30 +1818,31 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
{
struct intel_connector *connector =
container_of(panel, struct intel_connector, panel);
+ struct intel_display *display = to_intel_display(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
return;
- if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ if (display->platform.geminilake || display->platform.broxton) {
panel->backlight.pwm_funcs = &bxt_pwm_funcs;
} else if (INTEL_PCH_TYPE(i915) >= PCH_CNP) {
panel->backlight.pwm_funcs = &cnp_pwm_funcs;
- } else if (INTEL_PCH_TYPE(i915) >= PCH_LPT) {
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_LPT_H) {
if (HAS_PCH_LPT(i915))
panel->backlight.pwm_funcs = &lpt_pwm_funcs;
else
panel->backlight.pwm_funcs = &spt_pwm_funcs;
} else if (HAS_PCH_SPLIT(i915)) {
panel->backlight.pwm_funcs = &pch_pwm_funcs;
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ } else if (display->platform.valleyview || display->platform.cherryview) {
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
panel->backlight.pwm_funcs = &ext_pwm_funcs;
} else {
panel->backlight.pwm_funcs = &vlv_pwm_funcs;
}
- } else if (DISPLAY_VER(i915) == 4) {
+ } else if (DISPLAY_VER(display) == 4) {
panel->backlight.pwm_funcs = &i965_pwm_funcs;
} else {
panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
@@ -1826,7 +1852,7 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
if (intel_dp_aux_init_backlight_funcs(connector) == 0)
return;
- if (!intel_has_quirk(&i915->display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ if (!intel_has_quirk(display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
connector->panel.backlight.power = intel_pps_backlight_power;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index e0e4e9b62d8d..a8d08d7d82b3 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -2902,7 +2902,6 @@ init_vbt_panel_defaults(struct intel_panel *panel)
static void
init_vbt_missing_defaults(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int ports = DISPLAY_RUNTIME_INFO(display)->port_mask;
enum port port;
@@ -2912,13 +2911,13 @@ init_vbt_missing_defaults(struct intel_display *display)
for_each_port_masked(port, ports) {
struct intel_bios_encoder_data *devdata;
struct child_device_config *child;
- enum phy phy = intel_port_to_phy(i915, port);
+ enum phy phy = intel_port_to_phy(display, port);
/*
* VBT has the TypeC mode (native,TBT/USB) and we don't want
* to detect it.
*/
- if (intel_phy_is_tc(i915, phy))
+ if (intel_phy_is_tc(display, phy))
continue;
/* Create fake child device config */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 23edc81741de..048be2872247 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -806,24 +806,6 @@ static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512);
}
-void intel_bw_crtc_update(struct intel_bw_state *bw_state,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- bw_state->data_rate[crtc->pipe] =
- intel_bw_crtc_data_rate(crtc_state);
- bw_state->num_active_planes[crtc->pipe] =
- intel_bw_crtc_num_active_planes(crtc_state);
- bw_state->force_check_qgv = true;
-
- drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
- pipe_name(crtc->pipe),
- bw_state->data_rate[crtc->pipe],
- bw_state->num_active_planes[crtc->pipe]);
-}
-
static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
const struct intel_bw_state *bw_state)
{
@@ -1422,6 +1404,62 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
return 0;
}
+static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ bw_state->data_rate[crtc->pipe] =
+ intel_bw_crtc_data_rate(crtc_state);
+ bw_state->num_active_planes[crtc->pipe] =
+ intel_bw_crtc_num_active_planes(crtc_state);
+ bw_state->force_check_qgv = true;
+
+ drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
+}
+
+void intel_bw_update_hw_state(struct intel_display *display)
+{
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(display->bw.obj.state);
+ struct intel_crtc *crtc;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ bw_state->active_pipes = 0;
+
+ for_each_intel_crtc(display->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (crtc_state->hw.active)
+ bw_state->active_pipes |= BIT(pipe);
+
+ if (DISPLAY_VER(display) >= 11)
+ intel_bw_crtc_update(bw_state, crtc_state);
+ }
+}
+
+void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(display->bw.obj.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ bw_state->data_rate[pipe] = 0;
+ bw_state->num_active_planes[pipe] = 0;
+}
+
static struct intel_global_state *
intel_bw_duplicate_state(struct intel_global_obj *obj)
{
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 161813cca473..3313e4eac4f0 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -14,7 +14,9 @@
struct drm_i915_private;
struct intel_atomic_state;
+struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dbuf_bw {
unsigned int max_bw[I915_MAX_DBUF_SLICES];
@@ -73,13 +75,13 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state);
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);
-void intel_bw_crtc_update(struct intel_bw_state *bw_state,
- const struct intel_crtc_state *crtc_state);
int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
u32 points_mask);
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc);
int intel_bw_min_cdclk(struct drm_i915_private *i915,
const struct intel_bw_state *bw_state);
+void intel_bw_update_hw_state(struct intel_display *display);
+void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index c7a603589412..2a8749a0213e 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -314,27 +314,26 @@ static unsigned int intel_hpll_vco(struct intel_display *display)
[4] = 2666667,
[5] = 4266667,
};
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const unsigned int *vco_table;
unsigned int vco;
u8 tmp = 0;
/* FIXME other chipsets? */
- if (IS_GM45(dev_priv))
+ if (display->platform.gm45)
vco_table = ctg_vco;
- else if (IS_G45(dev_priv))
+ else if (display->platform.g45)
vco_table = elk_vco;
- else if (IS_I965GM(dev_priv))
+ else if (display->platform.i965gm)
vco_table = cl_vco;
- else if (IS_PINEVIEW(dev_priv))
+ else if (display->platform.pineview)
vco_table = pnv_vco;
- else if (IS_G33(dev_priv))
+ else if (display->platform.g33)
vco_table = blb_vco;
else
return 0;
- tmp = intel_de_read(display,
- IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
+ tmp = intel_de_read(display, display->platform.pineview ||
+ display->platform.mobile ? HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
@@ -508,7 +507,6 @@ static void gm45_get_cdclk(struct intel_display *display,
static void hsw_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 lcpll = intel_de_read(display, LCPLL_CTL);
u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -518,7 +516,7 @@ static void hsw_get_cdclk(struct intel_display *display,
cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_450)
cdclk_config->cdclk = 450000;
- else if (IS_HASWELL_ULT(dev_priv))
+ else if (display->platform.haswell_ult)
cdclk_config->cdclk = 337500;
else
cdclk_config->cdclk = 540000;
@@ -535,7 +533,7 @@ static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk)
* Not sure what's wrong. For now use 200MHz only when all pipes
* are off.
*/
- if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320)
+ if (display->platform.valleyview && min_cdclk > freq_320)
return 400000;
else if (min_cdclk > 266667)
return freq_320;
@@ -549,7 +547,7 @@ static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (IS_VALLEYVIEW(dev_priv)) {
+ if (display->platform.valleyview) {
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
return 2;
else if (cdclk >= 266667)
@@ -585,7 +583,7 @@ static void vlv_get_cdclk(struct intel_display *display,
vlv_iosf_sb_put(dev_priv,
BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
- if (IS_VALLEYVIEW(dev_priv))
+ if (display->platform.valleyview)
cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >>
DSPFREQGUAR_SHIFT;
else
@@ -598,14 +596,14 @@ static void vlv_program_pfi_credits(struct intel_display *display)
struct drm_i915_private *dev_priv = to_i915(display->drm);
unsigned int credits, default_credits;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
default_credits = PFI_CREDIT(12);
else
default_credits = PFI_CREDIT(8);
if (display->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
/* CHV suggested value is 31 or 63 */
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
credits = PFI_CREDIT_63;
else
credits = PFI_CREDIT(15);
@@ -658,7 +656,7 @@ static void vlv_set_cdclk(struct intel_display *display,
* a system suspend. So grab the display core domain, which covers
* the HW blocks needed for the following programming.
*/
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE);
vlv_iosf_sb_get(dev_priv,
BIT(VLV_IOSF_SB_CCK) |
@@ -718,7 +716,7 @@ static void vlv_set_cdclk(struct intel_display *display,
vlv_program_pfi_credits(display);
- intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
static void chv_set_cdclk(struct intel_display *display,
@@ -747,7 +745,7 @@ static void chv_set_cdclk(struct intel_display *display,
* a system suspend. So grab the display core domain, which covers
* the HW blocks needed for the following programming.
*/
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE);
vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
@@ -767,7 +765,7 @@ static void chv_set_cdclk(struct intel_display *display,
vlv_program_pfi_credits(display);
- intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
static int bdw_calc_cdclk(int min_cdclk)
@@ -1142,7 +1140,7 @@ static void skl_set_cdclk(struct intel_display *display,
* minimum 308MHz CDCLK.
*/
drm_WARN_ON_ONCE(display->drm,
- IS_SKYLAKE(dev_priv) && vco == 8640000);
+ display->platform.skylake && vco == 8640000);
ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
@@ -1662,10 +1660,9 @@ static void icl_readout_refclk(struct intel_display *display,
static void bxt_de_pll_readout(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val, ratio;
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
cdclk_config->ref = 38400;
else if (DISPLAY_VER(display) >= 11)
icl_readout_refclk(display, cdclk_config);
@@ -2057,11 +2054,9 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct intel_display *displa
static bool pll_enable_wa_needed(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
return (DISPLAY_VERx100(display) == 2000 ||
DISPLAY_VERx100(display) == 1400 ||
- IS_DG2(dev_priv)) &&
+ display->platform.dg2) &&
display->cdclk.hw.vco > 0;
}
@@ -2069,7 +2064,6 @@ static u32 bxt_cdclk_ctl(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
u16 waveform;
@@ -2084,7 +2078,7 @@ static u32 bxt_cdclk_ctl(struct intel_display *display,
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
*/
- if ((IS_GEMINILAKE(i915) || IS_BROXTON(i915)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
cdclk >= 500000)
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
@@ -2144,8 +2138,8 @@ static void bxt_set_cdclk(struct intel_display *display,
* mailbox communication, skip
* this step.
*/
- if (DISPLAY_VER(display) >= 14 || IS_DG2(dev_priv))
- /* NOOP */;
+ if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
+ ; /* NOOP */
else if (DISPLAY_VER(display) >= 11)
ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
@@ -2186,7 +2180,7 @@ static void bxt_set_cdclk(struct intel_display *display,
* NOOP - No Pcode communication needed for
* Display versions 14 and beyond
*/;
- else if (DISPLAY_VER(display) >= 11 && !IS_DG2(dev_priv))
+ else if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
cdclk_config->voltage_level);
if (DISPLAY_VER(display) < 11) {
@@ -2250,7 +2244,7 @@ static void bxt_sanitize_cdclk(struct intel_display *display)
/*
* Let's ignore the pipe field, since BIOS could have configured the
- * dividers both synching to an active pipe, or asynchronously
+ * dividers both syncing to an active pipe, or asynchronously
* (PIPE_NONE).
*/
cdctl &= ~bxt_cdclk_cd2x_pipe(display, INVALID_PIPE);
@@ -2318,9 +2312,7 @@ static void bxt_cdclk_uninit_hw(struct intel_display *display)
*/
void intel_cdclk_init_hw(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (DISPLAY_VER(display) >= 10 || IS_BROXTON(i915))
+ if (DISPLAY_VER(display) >= 10 || display->platform.broxton)
bxt_cdclk_init_hw(display);
else if (DISPLAY_VER(display) == 9)
skl_cdclk_init_hw(display);
@@ -2335,9 +2327,7 @@ void intel_cdclk_init_hw(struct intel_display *display)
*/
void intel_cdclk_uninit_hw(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (DISPLAY_VER(display) >= 10 || IS_BROXTON(i915))
+ if (DISPLAY_VER(display) >= 10 || display->platform.broxton)
bxt_cdclk_uninit_hw(display);
else if (DISPLAY_VER(display) == 9)
skl_cdclk_uninit_hw(display);
@@ -2438,10 +2428,8 @@ static bool intel_cdclk_can_cd2x_update(struct intel_display *display,
const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
/* Older hw doesn't have the capability */
- if (DISPLAY_VER(display) < 10 && !IS_BROXTON(dev_priv))
+ if (DISPLAY_VER(display) < 10 && !display->platform.broxton)
return false;
/*
@@ -2495,7 +2483,7 @@ static void intel_pcode_notify(struct intel_display *display,
int ret;
u32 update_mask = 0;
- if (!IS_DG2(i915))
+ if (!display->platform.dg2)
return;
update_mask = DISPLAY_TO_PCODE_UPDATE_MASK(cdclk, active_pipe_count, voltage_level);
@@ -2521,7 +2509,6 @@ static void intel_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe, const char *context)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
if (!intel_cdclk_changed(&display->cdclk.hw, cdclk_config))
@@ -2538,7 +2525,7 @@ static void intel_set_cdclk(struct intel_display *display,
intel_psr_pause(intel_dp);
}
- intel_audio_cdclk_change_pre(dev_priv);
+ intel_audio_cdclk_change_pre(display);
/*
* Lock aux/gmbus while we change cdclk in case those
@@ -2568,7 +2555,7 @@ static void intel_set_cdclk(struct intel_display *display,
intel_psr_resume(intel_dp);
}
- intel_audio_cdclk_change_post(dev_priv);
+ intel_audio_cdclk_change_post(display);
if (drm_WARN(display->drm,
intel_cdclk_changed(&display->cdclk.hw, cdclk_config),
@@ -2682,7 +2669,6 @@ void
intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_cdclk_state *old_cdclk_state =
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
@@ -2694,7 +2680,7 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
&new_cdclk_state->actual))
return;
- if (IS_DG2(i915))
+ if (display->platform.dg2)
intel_cdclk_pcode_pre_notify(state);
if (new_cdclk_state->disable_pipes) {
@@ -2736,7 +2722,6 @@ void
intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_cdclk_state *old_cdclk_state =
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
@@ -2747,7 +2732,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
&new_cdclk_state->actual))
return;
- if (IS_DG2(i915))
+ if (display->platform.dg2)
intel_cdclk_pcode_post_notify(state);
if (!new_cdclk_state->disable_pipes &&
@@ -2771,12 +2756,10 @@ static int intel_cdclk_ppc(struct intel_display *display, bool double_wide)
/* max pixel rate as % of CDCLK (not accounting for PPC) */
static int intel_cdclk_guardband(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ display->platform.broadwell || display->platform.haswell)
return 100;
- else if (IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.cherryview)
return 95;
else
return 90;
@@ -2805,7 +2788,7 @@ static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
return min_cdclk;
}
-int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
+static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
int min_cdclk;
@@ -2879,7 +2862,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
* by changing the cd2x divider (see glk_cdclk_table[]) and
* thus a full modeset won't be needed then.
*/
- if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
+ if (display->platform.geminilake && cdclk_state->active_pipes &&
!is_power_of_2(cdclk_state->active_pipes))
min_cdclk = max(min_cdclk, 2 * 96000);
@@ -3233,7 +3216,6 @@ static bool intel_cdclk_need_serialize(struct intel_display *display,
const struct intel_cdclk_state *old_cdclk_state,
const struct intel_cdclk_state *new_cdclk_state)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
bool power_well_cnt_changed = hweight8(old_cdclk_state->active_pipes) !=
hweight8(new_cdclk_state->active_pipes);
bool cdclk_changed = intel_cdclk_changed(&old_cdclk_state->actual,
@@ -3242,7 +3224,7 @@ static bool intel_cdclk_need_serialize(struct intel_display *display,
* We need to poke hw for gen >= 12, because we notify PCode if
* pipe power well count changes.
*/
- return cdclk_changed || (IS_DG2(i915) && power_well_cnt_changed);
+ return cdclk_changed || (display->platform.dg2 && power_well_cnt_changed);
}
int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
@@ -3358,6 +3340,34 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
return 0;
}
+void intel_cdclk_update_hw_state(struct intel_display *display)
+{
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(display->cdclk.obj.state);
+ struct intel_crtc *crtc;
+
+ cdclk_state->active_pipes = 0;
+
+ for_each_intel_crtc(display->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (crtc_state->hw.active)
+ cdclk_state->active_pipes |= BIT(pipe);
+
+ cdclk_state->min_cdclk[pipe] = intel_crtc_compute_min_cdclk(crtc_state);
+ cdclk_state->min_voltage_level[pipe] = crtc_state->min_voltage_level;
+ }
+}
+
+void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_cdclk_update_hw_state(display);
+}
+
static int intel_compute_max_dotclk(struct intel_display *display)
{
int ppc = intel_cdclk_ppc(display, HAS_DOUBLE_WIDE(display));
@@ -3377,11 +3387,9 @@ static int intel_compute_max_dotclk(struct intel_display *display)
*/
void intel_update_max_cdclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 30) {
display->cdclk.max_cdclk_freq = 691200;
- } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
+ } else if (display->platform.jasperlake || display->platform.elkhartlake) {
if (display->cdclk.hw.ref == 24000)
display->cdclk.max_cdclk_freq = 552000;
else
@@ -3391,9 +3399,9 @@ void intel_update_max_cdclk(struct intel_display *display)
display->cdclk.max_cdclk_freq = 648000;
else
display->cdclk.max_cdclk_freq = 652800;
- } else if (IS_GEMINILAKE(dev_priv)) {
+ } else if (display->platform.geminilake) {
display->cdclk.max_cdclk_freq = 316800;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (display->platform.broxton) {
display->cdclk.max_cdclk_freq = 624000;
} else if (DISPLAY_VER(display) == 9) {
u32 limit = intel_de_read(display, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
@@ -3417,7 +3425,7 @@ void intel_update_max_cdclk(struct intel_display *display)
max_cdclk = 308571;
display->cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
/*
* FIXME with extra cooling we can allow
* 540 MHz for ULX and 675 Mhz for ULT.
@@ -3426,15 +3434,15 @@ void intel_update_max_cdclk(struct intel_display *display)
*/
if (intel_de_read(display, FUSE_STRAP) & HSW_CDCLK_LIMIT)
display->cdclk.max_cdclk_freq = 450000;
- else if (IS_BROADWELL_ULX(dev_priv))
+ else if (display->platform.broadwell_ulx)
display->cdclk.max_cdclk_freq = 450000;
- else if (IS_BROADWELL_ULT(dev_priv))
+ else if (display->platform.broadwell_ult)
display->cdclk.max_cdclk_freq = 540000;
else
display->cdclk.max_cdclk_freq = 675000;
- } else if (IS_CHERRYVIEW(dev_priv)) {
+ } else if (display->platform.cherryview) {
display->cdclk.max_cdclk_freq = 320000;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (display->platform.valleyview) {
display->cdclk.max_cdclk_freq = 400000;
} else {
/* otherwise assume cdclk is fixed */
@@ -3458,8 +3466,6 @@ void intel_update_max_cdclk(struct intel_display *display)
*/
void intel_update_cdclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
intel_cdclk_get_cdclk(display, &display->cdclk.hw);
/*
@@ -3468,7 +3474,7 @@ void intel_update_cdclk(struct intel_display *display)
* of cdclk that generates 4MHz reference clock freq which is used to
* generate GMBus clock. This will vary with the cdclk freq.
*/
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
intel_de_write(display, GMBUSFREQ_VLV,
DIV_ROUND_UP(display->cdclk.hw.cdclk, 1000));
}
@@ -3562,7 +3568,7 @@ u32 intel_read_rawclk(struct intel_display *display)
freq = cnp_rawclk(display);
else if (HAS_PCH_SPLIT(dev_priv))
freq = pch_rawclk(display);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.valleyview || display->platform.cherryview)
freq = vlv_hrawclk(display);
else if (DISPLAY_VER(display) >= 3)
freq = i9xx_hrawclk(display);
@@ -3743,8 +3749,6 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
*/
void intel_init_cdclk_hooks(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 30) {
display->funcs.cdclk = &xe3lpd_cdclk_funcs;
display->cdclk.table = xe3lpd_cdclk_table;
@@ -3757,80 +3761,80 @@ void intel_init_cdclk_hooks(struct intel_display *display)
} else if (DISPLAY_VER(display) >= 14) {
display->funcs.cdclk = &rplu_cdclk_funcs;
display->cdclk.table = mtl_cdclk_table;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
display->funcs.cdclk = &tgl_cdclk_funcs;
display->cdclk.table = dg2_cdclk_table;
- } else if (IS_ALDERLAKE_P(dev_priv)) {
+ } else if (display->platform.alderlake_p) {
/* Wa_22011320316:adl-p[a0] */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
display->cdclk.table = adlp_a_step_cdclk_table;
display->funcs.cdclk = &tgl_cdclk_funcs;
- } else if (IS_RAPTORLAKE_U(dev_priv)) {
+ } else if (display->platform.alderlake_p_raptorlake_u) {
display->cdclk.table = rplu_cdclk_table;
display->funcs.cdclk = &rplu_cdclk_funcs;
} else {
display->cdclk.table = adlp_cdclk_table;
display->funcs.cdclk = &tgl_cdclk_funcs;
}
- } else if (IS_ROCKETLAKE(dev_priv)) {
+ } else if (display->platform.rocketlake) {
display->funcs.cdclk = &tgl_cdclk_funcs;
display->cdclk.table = rkl_cdclk_table;
} else if (DISPLAY_VER(display) >= 12) {
display->funcs.cdclk = &tgl_cdclk_funcs;
display->cdclk.table = icl_cdclk_table;
- } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
+ } else if (display->platform.jasperlake || display->platform.elkhartlake) {
display->funcs.cdclk = &ehl_cdclk_funcs;
display->cdclk.table = icl_cdclk_table;
} else if (DISPLAY_VER(display) >= 11) {
display->funcs.cdclk = &icl_cdclk_funcs;
display->cdclk.table = icl_cdclk_table;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
display->funcs.cdclk = &bxt_cdclk_funcs;
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
display->cdclk.table = glk_cdclk_table;
else
display->cdclk.table = bxt_cdclk_table;
} else if (DISPLAY_VER(display) == 9) {
display->funcs.cdclk = &skl_cdclk_funcs;
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
display->funcs.cdclk = &bdw_cdclk_funcs;
- } else if (IS_HASWELL(dev_priv)) {
+ } else if (display->platform.haswell) {
display->funcs.cdclk = &hsw_cdclk_funcs;
- } else if (IS_CHERRYVIEW(dev_priv)) {
+ } else if (display->platform.cherryview) {
display->funcs.cdclk = &chv_cdclk_funcs;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (display->platform.valleyview) {
display->funcs.cdclk = &vlv_cdclk_funcs;
- } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
+ } else if (display->platform.sandybridge || display->platform.ivybridge) {
display->funcs.cdclk = &fixed_400mhz_cdclk_funcs;
- } else if (IS_IRONLAKE(dev_priv)) {
+ } else if (display->platform.ironlake) {
display->funcs.cdclk = &ilk_cdclk_funcs;
- } else if (IS_GM45(dev_priv)) {
+ } else if (display->platform.gm45) {
display->funcs.cdclk = &gm45_cdclk_funcs;
- } else if (IS_G45(dev_priv)) {
+ } else if (display->platform.g45) {
display->funcs.cdclk = &g33_cdclk_funcs;
- } else if (IS_I965GM(dev_priv)) {
+ } else if (display->platform.i965gm) {
display->funcs.cdclk = &i965gm_cdclk_funcs;
- } else if (IS_I965G(dev_priv)) {
+ } else if (display->platform.i965g) {
display->funcs.cdclk = &fixed_400mhz_cdclk_funcs;
- } else if (IS_PINEVIEW(dev_priv)) {
+ } else if (display->platform.pineview) {
display->funcs.cdclk = &pnv_cdclk_funcs;
- } else if (IS_G33(dev_priv)) {
+ } else if (display->platform.g33) {
display->funcs.cdclk = &g33_cdclk_funcs;
- } else if (IS_I945GM(dev_priv)) {
+ } else if (display->platform.i945gm) {
display->funcs.cdclk = &i945gm_cdclk_funcs;
- } else if (IS_I945G(dev_priv)) {
+ } else if (display->platform.i945g) {
display->funcs.cdclk = &fixed_400mhz_cdclk_funcs;
- } else if (IS_I915GM(dev_priv)) {
+ } else if (display->platform.i915gm) {
display->funcs.cdclk = &i915gm_cdclk_funcs;
- } else if (IS_I915G(dev_priv)) {
+ } else if (display->platform.i915g) {
display->funcs.cdclk = &i915g_cdclk_funcs;
- } else if (IS_I865G(dev_priv)) {
+ } else if (display->platform.i865g) {
display->funcs.cdclk = &i865g_cdclk_funcs;
- } else if (IS_I85X(dev_priv)) {
+ } else if (display->platform.i85x) {
display->funcs.cdclk = &i85x_cdclk_funcs;
- } else if (IS_I845G(dev_priv)) {
+ } else if (display->platform.i845g) {
display->funcs.cdclk = &i845g_cdclk_funcs;
- } else if (IS_I830(dev_priv)) {
+ } else if (display->platform.i830) {
display->funcs.cdclk = &i830_cdclk_funcs;
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 6b0e7a41eba3..a1cefd455d92 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -12,6 +12,7 @@
#include "intel_global_state.h"
struct intel_atomic_state;
+struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
@@ -58,7 +59,6 @@ struct intel_cdclk_state {
bool disable_pipes;
};
-int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_cdclk_init_hw(struct intel_display *display);
void intel_cdclk_uninit_hw(struct intel_display *display);
void intel_init_cdclk_hooks(struct intel_display *display);
@@ -83,6 +83,8 @@ int intel_cdclk_atomic_check(struct intel_atomic_state *state,
int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus);
struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
+void intel_cdclk_update_hw_state(struct intel_display *display);
+void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc);
#define to_intel_cdclk_state(global_state) \
container_of_const((global_state), struct intel_cdclk_state, base)
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg.c b/drivers/gpu/drm/i915/display/intel_cmtg.c
new file mode 100644
index 000000000000..07d7f4e8f60f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cmtg.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_crtc.h"
+#include "intel_cmtg.h"
+#include "intel_cmtg_regs.h"
+#include "intel_de.h"
+#include "intel_display_device.h"
+#include "intel_display_power.h"
+
+/**
+ * DOC: Common Primary Timing Generator (CMTG)
+ *
+ * The CMTG is a timing generator that runs in parallel to transcoders timing
+ * generators (TG) to provide a synchronization mechanism where CMTG acts as
+ * primary and transcoders TGs act as secondary to the CMTG. The CMTG outputs
+ * its TG start and frame sync signals to the transcoders that are configured
+ * as secondary, which use those signals to synchronize their own timing with
+ * the CMTG's.
+ *
+ * The CMTG can be used only with eDP or MIPI command mode and supports the
+ * following use cases:
+ *
+ * - Dual eDP: The CMTG can be used to keep two eDP TGs in sync when on a
+ * dual eDP configuration (with or without PSR/PSR2 enabled).
+ *
+ * - Single eDP as secondary: It is also possible to use a single eDP
+ * configuration with the transcoder TG as secondary to the CMTG. That would
+ * allow a flow that would not require a modeset on the existing eDP when a
+ * new eDP is added for a dual eDP configuration with CMTG.
+ *
+ * - DC6v: In DC6v, the transcoder might be off but the CMTG keeps running to
+ * maintain frame timings. When exiting DC6v, the transcoder TG then is
+ * synced back the CMTG.
+ *
+ * Currently, the driver does not use the CMTG, but we need to make sure that
+ * we disable it in case we inherit a display configuration with it enabled.
+ */
+
+/*
+ * We describe here only the minimum data required to allow us to properly
+ * disable the CMTG if necessary.
+ */
+struct intel_cmtg_config {
+ bool cmtg_a_enable;
+ /*
+ * Xe2_LPD adds a second CMTG that can be used for dual eDP async mode.
+ */
+ bool cmtg_b_enable;
+ bool trans_a_secondary;
+ bool trans_b_secondary;
+};
+
+static bool intel_cmtg_has_cmtg_b(struct intel_display *display)
+{
+ return DISPLAY_VER(display) >= 20;
+}
+
+static bool intel_cmtg_has_clock_sel(struct intel_display *display)
+{
+ return DISPLAY_VER(display) >= 14;
+}
+
+static void intel_cmtg_dump_config(struct intel_display *display,
+ struct intel_cmtg_config *cmtg_config)
+{
+ drm_dbg_kms(display->drm,
+ "CMTG readout: CMTG A: %s, CMTG B: %s, Transcoder A secondary: %s, Transcoder B secondary: %s\n",
+ str_enabled_disabled(cmtg_config->cmtg_a_enable),
+ intel_cmtg_has_cmtg_b(display) ? str_enabled_disabled(cmtg_config->cmtg_b_enable) : "n/a",
+ str_yes_no(cmtg_config->trans_a_secondary),
+ str_yes_no(cmtg_config->trans_b_secondary));
+}
+
+static bool intel_cmtg_transcoder_is_secondary(struct intel_display *display,
+ enum transcoder trans)
+{
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ u32 val = 0;
+
+ if (!HAS_TRANSCODER(display, trans))
+ return false;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(trans);
+
+ with_intel_display_power_if_enabled(display, power_domain, wakeref)
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, trans));
+
+ return val & CMTG_SECONDARY_MODE;
+}
+
+static void intel_cmtg_get_config(struct intel_display *display,
+ struct intel_cmtg_config *cmtg_config)
+{
+ u32 val;
+
+ val = intel_de_read(display, TRANS_CMTG_CTL_A);
+ cmtg_config->cmtg_a_enable = val & CMTG_ENABLE;
+
+ if (intel_cmtg_has_cmtg_b(display)) {
+ val = intel_de_read(display, TRANS_CMTG_CTL_B);
+ cmtg_config->cmtg_b_enable = val & CMTG_ENABLE;
+ }
+
+ cmtg_config->trans_a_secondary = intel_cmtg_transcoder_is_secondary(display, TRANSCODER_A);
+ cmtg_config->trans_b_secondary = intel_cmtg_transcoder_is_secondary(display, TRANSCODER_B);
+}
+
+static bool intel_cmtg_disable_requires_modeset(struct intel_display *display,
+ struct intel_cmtg_config *cmtg_config)
+{
+ if (DISPLAY_VER(display) >= 20)
+ return false;
+
+ return cmtg_config->trans_a_secondary || cmtg_config->trans_b_secondary;
+}
+
+static void intel_cmtg_disable(struct intel_display *display,
+ struct intel_cmtg_config *cmtg_config)
+{
+ u32 clk_sel_clr = 0;
+ u32 clk_sel_set = 0;
+
+ if (cmtg_config->trans_a_secondary)
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_A),
+ CMTG_SECONDARY_MODE, 0);
+
+ if (cmtg_config->trans_b_secondary)
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_B),
+ CMTG_SECONDARY_MODE, 0);
+
+ if (cmtg_config->cmtg_a_enable) {
+ drm_dbg_kms(display->drm, "Disabling CMTG A\n");
+ intel_de_rmw(display, TRANS_CMTG_CTL_A, CMTG_ENABLE, 0);
+ clk_sel_clr |= CMTG_CLK_SEL_A_MASK;
+ clk_sel_set |= CMTG_CLK_SEL_A_DISABLED;
+ }
+
+ if (cmtg_config->cmtg_b_enable) {
+ drm_dbg_kms(display->drm, "Disabling CMTG B\n");
+ intel_de_rmw(display, TRANS_CMTG_CTL_B, CMTG_ENABLE, 0);
+ clk_sel_clr |= CMTG_CLK_SEL_B_MASK;
+ clk_sel_set |= CMTG_CLK_SEL_B_DISABLED;
+ }
+
+ if (intel_cmtg_has_clock_sel(display) && clk_sel_clr)
+ intel_de_rmw(display, CMTG_CLK_SEL, clk_sel_clr, clk_sel_set);
+}
+
+/*
+ * Read out CMTG configuration and, on platforms that allow disabling it without
+ * a modeset, do it.
+ *
+ * This function must be called before any port PLL is disabled in the general
+ * sanitization process, because we need whatever port PLL that is providing the
+ * clock for CMTG to be on before accessing CMTG registers.
+ */
+void intel_cmtg_sanitize(struct intel_display *display)
+{
+ struct intel_cmtg_config cmtg_config = {};
+
+ if (!HAS_CMTG(display))
+ return;
+
+ intel_cmtg_get_config(display, &cmtg_config);
+ intel_cmtg_dump_config(display, &cmtg_config);
+
+ /*
+ * FIXME: The driver is not prepared to handle cases where a modeset is
+ * required for disabling the CMTG: we need a proper way of tracking
+ * CMTG state and do the right syncronization with respect to triggering
+ * the modeset as part of the disable sequence.
+ */
+ if (intel_cmtg_disable_requires_modeset(display, &cmtg_config))
+ return;
+
+ intel_cmtg_disable(display, &cmtg_config);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg.h b/drivers/gpu/drm/i915/display/intel_cmtg.h
new file mode 100644
index 000000000000..ba62199adaa2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cmtg.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CMTG_H__
+#define __INTEL_CMTG_H__
+
+struct intel_display;
+
+void intel_cmtg_sanitize(struct intel_display *display);
+
+#endif /* __INTEL_CMTG_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg_regs.h b/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
new file mode 100644
index 000000000000..668e41d65e86
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cmtg_regs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CMTG_REGS_H__
+#define __INTEL_CMTG_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define CMTG_CLK_SEL _MMIO(0x46160)
+#define CMTG_CLK_SEL_A_MASK REG_GENMASK(31, 29)
+#define CMTG_CLK_SEL_A_DISABLED REG_FIELD_PREP(CMTG_CLK_SEL_A_MASK, 0)
+#define CMTG_CLK_SEL_B_MASK REG_GENMASK(15, 13)
+#define CMTG_CLK_SEL_B_DISABLED REG_FIELD_PREP(CMTG_CLK_SEL_B_MASK, 0)
+
+#define TRANS_CMTG_CTL_A _MMIO(0x6fa88)
+#define TRANS_CMTG_CTL_B _MMIO(0x6fb88)
+#define CMTG_ENABLE REG_BIT(31)
+
+#endif /* __INTEL_CMTG_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 2f51eccdb27a..cfe14162231d 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -29,6 +29,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
+#include "intel_vrr.h"
struct intel_color_funcs {
int (*color_check)(struct intel_atomic_state *state,
@@ -998,7 +999,7 @@ static void skl_color_commit_noarm(struct intel_dsb *dsb,
* output all black (until CSC_MODE is rearmed and properly latched).
* Once PSR exit (and proper register latching) has occurred the
* danger is over. Thus when PSR is enabled the CSC coeff/offset
- * register programming will be peformed from skl_color_commit_arm()
+ * register programming will be performed from skl_color_commit_arm()
* which is called after PSR exit.
*/
if (!crtc_state->has_psr)
@@ -1987,8 +1988,12 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
display->funcs.color->load_luts(crtc_state);
- intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color_vblank);
- intel_dsb_interrupt(crtc_state->dsb_color_vblank);
+ if (crtc_state->use_dsb) {
+ intel_vrr_send_push(crtc_state->dsb_color_vblank, crtc_state);
+ intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color_vblank);
+ intel_vrr_check_push_sent(crtc_state->dsb_color_vblank, crtc_state);
+ intel_dsb_interrupt(crtc_state->dsb_color_vblank);
+ }
intel_dsb_finish(crtc_state->dsb_color_vblank);
}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 4fbe2e3542ca..17eea244cc83 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -3,20 +3,20 @@
* Copyright © 2018 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_de.h"
#include "intel_display_types.h"
-#define for_each_combo_phy(__dev_priv, __phy) \
+#define for_each_combo_phy(__display, __phy) \
for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
- for_each_if(intel_phy_is_combo(__dev_priv, __phy))
+ for_each_if(intel_phy_is_combo(__display, __phy))
-#define for_each_combo_phy_reverse(__dev_priv, __phy) \
+#define for_each_combo_phy_reverse(__display, __phy) \
for ((__phy) = I915_MAX_PHYS; (__phy)-- > PHY_A;) \
- for_each_if(intel_phy_is_combo(__dev_priv, __phy))
+ for_each_if(intel_phy_is_combo(__display, __phy))
enum {
PROCMON_0_85V_DOT_0,
@@ -53,11 +53,11 @@ static const struct icl_procmon {
};
static const struct icl_procmon *
-icl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
+icl_get_procmon_ref_values(struct intel_display *display, enum phy phy)
{
u32 val;
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW3(phy));
+ val = intel_de_read(display, ICL_PORT_COMP_DW3(phy));
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
@@ -75,57 +75,57 @@ icl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
}
}
-static void icl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
+static void icl_set_procmon_ref_values(struct intel_display *display,
enum phy phy)
{
const struct icl_procmon *procmon;
- procmon = icl_get_procmon_ref_values(dev_priv, phy);
+ procmon = icl_get_procmon_ref_values(display, phy);
- intel_de_rmw(dev_priv, ICL_PORT_COMP_DW1(phy),
+ intel_de_rmw(display, ICL_PORT_COMP_DW1(phy),
(0xff << 16) | 0xff, procmon->dw1);
- intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9);
- intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10);
+ intel_de_write(display, ICL_PORT_COMP_DW9(phy), procmon->dw9);
+ intel_de_write(display, ICL_PORT_COMP_DW10(phy), procmon->dw10);
}
-static bool check_phy_reg(struct drm_i915_private *dev_priv,
+static bool check_phy_reg(struct intel_display *display,
enum phy phy, i915_reg_t reg, u32 mask,
u32 expected_val)
{
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
if ((val & mask) != expected_val) {
- drm_dbg(&dev_priv->drm,
- "Combo PHY %c reg %08x state mismatch: "
- "current %08x mask %08x expected %08x\n",
- phy_name(phy),
- reg.reg, val, mask, expected_val);
+ drm_dbg_kms(display->drm,
+ "Combo PHY %c reg %08x state mismatch: "
+ "current %08x mask %08x expected %08x\n",
+ phy_name(phy),
+ reg.reg, val, mask, expected_val);
return false;
}
return true;
}
-static bool icl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
+static bool icl_verify_procmon_ref_values(struct intel_display *display,
enum phy phy)
{
const struct icl_procmon *procmon;
bool ret;
- procmon = icl_get_procmon_ref_values(dev_priv, phy);
+ procmon = icl_get_procmon_ref_values(display, phy);
- ret = check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW1(phy),
+ ret = check_phy_reg(display, phy, ICL_PORT_COMP_DW1(phy),
(0xff << 16) | 0xff, procmon->dw1);
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW9(phy),
+ ret &= check_phy_reg(display, phy, ICL_PORT_COMP_DW9(phy),
-1U, procmon->dw9);
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW10(phy),
+ ret &= check_phy_reg(display, phy, ICL_PORT_COMP_DW10(phy),
-1U, procmon->dw10);
return ret;
}
-static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
+static bool has_phy_misc(struct intel_display *display, enum phy phy)
{
/*
* Some platforms only expect PHY_MISC to be programmed for PHY-A and
@@ -136,32 +136,30 @@ static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
* that we program it for PHY A.
*/
- if (IS_ALDERLAKE_S(i915))
+ if (display->platform.alderlake_s)
return phy == PHY_A;
- else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) ||
- IS_ROCKETLAKE(i915) ||
- IS_DG1(i915))
+ else if ((display->platform.jasperlake || display->platform.elkhartlake) ||
+ display->platform.rocketlake ||
+ display->platform.dg1)
return phy < PHY_C;
return true;
}
-static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
+static bool icl_combo_phy_enabled(struct intel_display *display,
enum phy phy)
{
/* The PHY C added by EHL has no PHY_MISC register */
- if (!has_phy_misc(dev_priv, phy))
- return intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
+ if (!has_phy_misc(display, phy))
+ return intel_de_read(display, ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
else
- return !(intel_de_read(dev_priv, ICL_PHY_MISC(phy)) &
+ return !(intel_de_read(display, ICL_PHY_MISC(phy)) &
ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
- (intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
+ (intel_de_read(display, ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
}
-static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
+static bool ehl_vbt_ddi_d_present(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
bool ddi_a_present = intel_bios_is_port_present(display, PORT_A);
bool ddi_d_present = intel_bios_is_port_present(display, PORT_D);
bool dsi_present = intel_bios_is_dsi_present(display, NULL);
@@ -181,13 +179,13 @@ static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
* in the log and let the internal display win.
*/
if (ddi_d_present)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n");
return false;
}
-static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
+static bool phy_is_master(struct intel_display *display, enum phy phy)
{
/*
* Certain PHYs are connected to compensation resistors and act
@@ -207,64 +205,64 @@ static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
*/
if (phy == PHY_A)
return true;
- else if (IS_ALDERLAKE_S(dev_priv))
+ else if (display->platform.alderlake_s)
return phy == PHY_D;
- else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ else if (display->platform.dg1 || display->platform.rocketlake)
return phy == PHY_C;
return false;
}
-static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
+static bool icl_combo_phy_verify_state(struct intel_display *display,
enum phy phy)
{
bool ret = true;
u32 expected_val = 0;
- if (!icl_combo_phy_enabled(dev_priv, phy))
+ if (!icl_combo_phy_enabled(display, phy))
return false;
- if (DISPLAY_VER(dev_priv) >= 12) {
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN(0, phy),
+ if (DISPLAY_VER(display) >= 12) {
+ ret &= check_phy_reg(display, phy, ICL_PORT_TX_DW8_LN(0, phy),
ICL_PORT_TX_DW8_ODCC_CLK_SEL |
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK,
ICL_PORT_TX_DW8_ODCC_CLK_SEL |
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy),
+ ret &= check_phy_reg(display, phy, ICL_PORT_PCS_DW1_LN(0, phy),
DCC_MODE_SELECT_MASK, RUN_DCC_ONCE);
}
- ret &= icl_verify_procmon_ref_values(dev_priv, phy);
+ ret &= icl_verify_procmon_ref_values(display, phy);
- if (phy_is_master(dev_priv, phy)) {
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
+ if (phy_is_master(display, phy)) {
+ ret &= check_phy_reg(display, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
- if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
- if (ehl_vbt_ddi_d_present(dev_priv))
+ if (display->platform.jasperlake || display->platform.elkhartlake) {
+ if (ehl_vbt_ddi_d_present(display))
expected_val = ICL_PHY_MISC_MUX_DDID;
- ret &= check_phy_reg(dev_priv, phy, ICL_PHY_MISC(phy),
+ ret &= check_phy_reg(display, phy, ICL_PHY_MISC(phy),
ICL_PHY_MISC_MUX_DDID,
expected_val);
}
}
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy),
+ ret &= check_phy_reg(display, phy, ICL_PORT_CL_DW5(phy),
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
}
-void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+void intel_combo_phy_power_up_lanes(struct intel_display *display,
enum phy phy, bool is_dsi,
int lane_count, bool lane_reversal)
{
u8 lane_mask;
if (is_dsi) {
- drm_WARN_ON(&dev_priv->drm, lane_reversal);
+ drm_WARN_ON(display->drm, lane_reversal);
switch (lane_count) {
case 1:
@@ -302,28 +300,28 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
}
}
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy),
+ intel_de_rmw(display, ICL_PORT_CL_DW10(phy),
PWR_DOWN_LN_MASK, lane_mask);
}
-static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
+static void icl_combo_phys_init(struct intel_display *display)
{
enum phy phy;
- for_each_combo_phy(dev_priv, phy) {
+ for_each_combo_phy(display, phy) {
const struct icl_procmon *procmon;
u32 val;
- if (icl_combo_phy_verify_state(dev_priv, phy))
+ if (icl_combo_phy_verify_state(display, phy))
continue;
- procmon = icl_get_procmon_ref_values(dev_priv, phy);
+ procmon = icl_get_procmon_ref_values(display, phy);
- drm_dbg(&dev_priv->drm,
- "Initializing combo PHY %c (Voltage/Process Info : %s)\n",
- phy_name(phy), procmon->name);
+ drm_dbg_kms(display->drm,
+ "Initializing combo PHY %c (Voltage/Process Info : %s)\n",
+ phy_name(phy), procmon->name);
- if (!has_phy_misc(dev_priv, phy))
+ if (!has_phy_misc(display, phy))
goto skip_phy_misc;
/*
@@ -334,84 +332,84 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
* based on whether our VBT indicates the presence of any
* "internal" child devices.
*/
- val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
- if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ val = intel_de_read(display, ICL_PHY_MISC(phy));
+ if ((display->platform.jasperlake || display->platform.elkhartlake) &&
phy == PHY_A) {
val &= ~ICL_PHY_MISC_MUX_DDID;
- if (ehl_vbt_ddi_d_present(dev_priv))
+ if (ehl_vbt_ddi_d_present(display))
val |= ICL_PHY_MISC_MUX_DDID;
}
val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
+ intel_de_write(display, ICL_PHY_MISC(phy), val);
skip_phy_misc:
- if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN(0, phy));
+ if (DISPLAY_VER(display) >= 12) {
+ val = intel_de_read(display, ICL_PORT_TX_DW8_LN(0, phy));
val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK;
val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL;
val |= ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2;
- intel_de_write(dev_priv, ICL_PORT_TX_DW8_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW8_GRP(phy), val);
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy));
val &= ~DCC_MODE_SELECT_MASK;
val |= RUN_DCC_ONCE;
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), val);
}
- icl_set_procmon_ref_values(dev_priv, phy);
+ icl_set_procmon_ref_values(display, phy);
- if (phy_is_master(dev_priv, phy))
- intel_de_rmw(dev_priv, ICL_PORT_COMP_DW8(phy),
+ if (phy_is_master(display, phy))
+ intel_de_rmw(display, ICL_PORT_COMP_DW8(phy),
0, IREFGEN);
- intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), 0, COMP_INIT);
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ intel_de_rmw(display, ICL_PORT_COMP_DW0(phy), 0, COMP_INIT);
+ intel_de_rmw(display, ICL_PORT_CL_DW5(phy),
0, CL_POWER_DOWN_ENABLE);
}
}
-static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+static void icl_combo_phys_uninit(struct intel_display *display)
{
enum phy phy;
- for_each_combo_phy_reverse(dev_priv, phy) {
+ for_each_combo_phy_reverse(display, phy) {
if (phy == PHY_A &&
- !icl_combo_phy_verify_state(dev_priv, phy)) {
- if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) {
+ !icl_combo_phy_verify_state(display, phy)) {
+ if (display->platform.tigerlake || display->platform.dg1) {
/*
* A known problem with old ifwi:
* https://gitlab.freedesktop.org/drm/intel/-/issues/2411
* Suppress the warning for CI. Remove ASAP!
*/
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Combo PHY %c HW state changed unexpectedly\n",
phy_name(phy));
} else {
- drm_warn(&dev_priv->drm,
+ drm_warn(display->drm,
"Combo PHY %c HW state changed unexpectedly\n",
phy_name(phy));
}
}
- if (!has_phy_misc(dev_priv, phy))
+ if (!has_phy_misc(display, phy))
goto skip_phy_misc;
- intel_de_rmw(dev_priv, ICL_PHY_MISC(phy), 0,
+ intel_de_rmw(display, ICL_PHY_MISC(phy), 0,
ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN);
skip_phy_misc:
- intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), COMP_INIT, 0);
+ intel_de_rmw(display, ICL_PORT_COMP_DW0(phy), COMP_INIT, 0);
}
}
-void intel_combo_phy_init(struct drm_i915_private *i915)
+void intel_combo_phy_init(struct intel_display *display)
{
- icl_combo_phys_init(i915);
+ icl_combo_phys_init(display);
}
-void intel_combo_phy_uninit(struct drm_i915_private *i915)
+void intel_combo_phy_uninit(struct intel_display *display)
{
- icl_combo_phys_uninit(i915);
+ icl_combo_phys_uninit(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.h b/drivers/gpu/drm/i915/display/intel_combo_phy.h
index 660886f86c59..3f5dba78e533 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.h
@@ -8,12 +8,12 @@
#include <linux/types.h>
-struct drm_i915_private;
enum phy;
+struct intel_display;
-void intel_combo_phy_init(struct drm_i915_private *dev_priv);
-void intel_combo_phy_uninit(struct drm_i915_private *dev_priv);
-void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+void intel_combo_phy_init(struct intel_display *display);
+void intel_combo_phy_uninit(struct intel_display *display);
+void intel_combo_phy_power_up_lanes(struct intel_display *display,
enum phy phy, bool is_dsi,
int lane_count, bool lane_reversal);
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
index 0964e392d02c..ee41acdccf4e 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -133,6 +133,8 @@
#define TX_TRAINING_EN REG_BIT(31)
#define TAP2_DISABLE REG_BIT(30)
#define TAP3_DISABLE REG_BIT(29)
+#define CURSOR_PROGRAM REG_BIT(26)
+#define COEFF_POLARITY REG_BIT(25)
#define SCALING_MODE_SEL_MASK REG_GENMASK(20, 18)
#define SCALING_MODE_SEL(x) REG_FIELD_PREP(SCALING_MODE_SEL_MASK, (x))
#define RTERM_SELECT_MASK REG_GENMASK(5, 3)
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index c65887870ddc..e42357bd9e80 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -28,6 +28,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
#include "intel_backlight.h"
@@ -37,6 +38,44 @@
#include "intel_hdcp.h"
#include "intel_panel.h"
+static void intel_connector_modeset_retry_work_fn(struct work_struct *work)
+{
+ struct intel_connector *connector = container_of(work, typeof(*connector),
+ modeset_retry_work);
+ struct intel_display *display = to_intel_display(connector);
+
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n", connector->base.base.id,
+ connector->base.name);
+
+ /* Grab the locks before changing connector property*/
+ mutex_lock(&display->drm->mode_config.mutex);
+ /* Set connector link status to BAD and send a Uevent to notify
+ * userspace to do a modeset.
+ */
+ drm_connector_set_link_status_property(&connector->base,
+ DRM_MODE_LINK_STATUS_BAD);
+ mutex_unlock(&display->drm->mode_config.mutex);
+ /* Send Hotplug uevent so userspace can reprobe */
+ drm_kms_helper_connector_hotplug_event(&connector->base);
+
+ drm_connector_put(&connector->base);
+}
+
+void intel_connector_queue_modeset_retry_work(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ drm_connector_get(&connector->base);
+ if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work))
+ drm_connector_put(&connector->base);
+}
+
+void intel_connector_cancel_modeset_retry_work(struct intel_connector *connector)
+{
+ if (cancel_work_sync(&connector->modeset_retry_work))
+ drm_connector_put(&connector->base);
+}
+
int intel_connector_init(struct intel_connector *connector)
{
struct intel_digital_connector_state *conn_state;
@@ -56,6 +95,9 @@ int intel_connector_init(struct intel_connector *connector)
intel_panel_init_alloc(connector);
+ INIT_WORK(&connector->modeset_retry_work,
+ intel_connector_modeset_retry_work_fn);
+
return 0;
}
@@ -103,8 +145,8 @@ void intel_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
- if (intel_connector->port)
- drm_dp_mst_put_port_malloc(intel_connector->port);
+ if (intel_connector->mst.port)
+ drm_dp_mst_put_port_malloc(intel_connector->mst.port);
kfree(connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
index bafde3f11ff4..aafb25a814fa 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.h
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
@@ -33,5 +33,7 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector);
void intel_attach_hdmi_colorspace_property(struct drm_connector *connector);
void intel_attach_dp_colorspace_property(struct drm_connector *connector);
void intel_attach_scaling_mode_property(struct drm_connector *connector);
+void intel_connector_queue_modeset_retry_work(struct intel_connector *connector);
+void intel_connector_cancel_modeset_retry_work(struct intel_connector *connector);
#endif /* __INTEL_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 4634d3fd9f20..76ffb3f8467c 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -54,6 +54,7 @@
#include "intel_load_detect.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
+#include "intel_pfit.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_ENABLE | \
@@ -108,19 +109,18 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
intel_wakeref_t wakeref;
bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
encoder->power_domain);
if (!wakeref)
return false;
ret = intel_crt_port_enabled(display, crt->adpa_reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return ret;
}
@@ -251,11 +251,10 @@ static void hsw_disable_crt(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
drm_WARN_ON(display->drm, !old_crtc_state->has_pch_encoder);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, false);
}
static void hsw_post_disable_crt(struct intel_atomic_state *state,
@@ -265,7 +264,6 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
intel_crtc_vblank_off(old_crtc_state);
@@ -285,7 +283,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
drm_WARN_ON(display->drm, !old_crtc_state->has_pch_encoder);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, true);
}
static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
@@ -294,11 +292,10 @@ static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, false);
}
static void hsw_pre_enable_crt(struct intel_atomic_state *state,
@@ -307,13 +304,12 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
hsw_fdi_link_train(encoder, crtc_state);
@@ -326,7 +322,6 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
@@ -344,8 +339,8 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
intel_crtc_wait_for_next_vblank(crtc);
intel_crtc_wait_for_next_vblank(crtc);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, true);
}
static void intel_enable_crt(struct intel_atomic_state *state,
@@ -358,7 +353,7 @@ static void intel_enable_crt(struct intel_atomic_state *state,
static enum drm_mode_status
intel_crt_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
@@ -366,7 +361,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
enum drm_mode_status status;
int max_clock;
- status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ status = intel_cpu_transcoder_mode_valid(display, mode);
if (status != MODE_OK)
return status;
@@ -745,8 +740,10 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
transconf | TRANSCONF_FORCE_BORDER);
intel_de_posting_read(display,
TRANSCONF(display, cpu_transcoder));
- /* Wait for next Vblank to substitue
- * border color for Color info */
+ /*
+ * Wait for next Vblank to substitute
+ * border color for Color info.
+ */
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, pipe));
st00 = intel_de_read8(display, _VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
@@ -856,7 +853,6 @@ intel_crt_detect(struct drm_connector *connector,
bool force)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *encoder = &crt->base;
struct drm_atomic_state *state;
@@ -874,7 +870,7 @@ intel_crt_detect(struct drm_connector *connector,
return connector->status;
if (display->params.load_detect_test) {
- wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
+ wakeref = intel_display_power_get(display, encoder->power_domain);
goto load_detect;
}
@@ -882,7 +878,7 @@ intel_crt_detect(struct drm_connector *connector,
if (dmi_check_system(intel_spurious_crt_detect))
return connector_status_disconnected;
- wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
+ wakeref = intel_display_power_get(display, encoder->power_domain);
if (I915_HAS_HOTPLUG(display)) {
/* We can not rely on the HPD pin always being correctly wired
@@ -939,7 +935,7 @@ load_detect:
}
out:
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return status;
}
@@ -957,7 +953,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(connector);
- wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
+ wakeref = intel_display_power_get(display, encoder->power_domain);
ret = intel_crt_ddc_get_modes(connector, connector->ddc);
if (ret || !IS_G4X(dev_priv))
@@ -968,7 +964,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
ret = intel_crt_ddc_get_modes(connector, ddc);
out:
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return ret;
}
@@ -1099,7 +1095,7 @@ void intel_crt_init(struct intel_display *display)
connector->base.polled = connector->polled;
if (HAS_DDI(display)) {
- assert_port_valid(dev_priv, PORT_E);
+ assert_port_valid(display, PORT_E);
crt->base.port = PORT_E;
crt->base.get_config = hsw_crt_get_config;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index c910168602d2..5b2603ef2ff7 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -45,9 +45,9 @@ static void assert_vblank_disabled(struct drm_crtc *crtc)
drm_crtc_vblank_put(crtc);
}
-struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915)
+struct intel_crtc *intel_first_crtc(struct intel_display *display)
{
- return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0));
+ return to_intel_crtc(drm_crtc_from_index(display->drm, 0));
}
struct intel_crtc *intel_crtc_for_pipe(struct intel_display *display,
@@ -68,10 +68,9 @@ void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc)
drm_crtc_wait_one_vblank(&crtc->base);
}
-void intel_wait_for_vblank_if_active(struct drm_i915_private *i915,
+void intel_wait_for_vblank_if_active(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
if (crtc->active)
@@ -93,10 +92,10 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/*
- * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
+ * From Gen 11, in case of dsi cmd mode, frame counter wouldn't
* have updated at the beginning of TE, if we want to use
* the hw counter, then we would find it updated in only
* the next TE, hence switching to sw counter.
@@ -109,13 +108,13 @@ u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
* On i965gm the hardware frame counter reads
* zero when the TV encoder is enabled :(
*/
- if (IS_I965GM(dev_priv) &&
+ if (display->platform.i965gm &&
(crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
return 0;
- if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return 0xffffffff; /* full 32 bit counter */
- else if (DISPLAY_VER(dev_priv) >= 3)
+ else if (DISPLAY_VER(display) >= 3)
return 0xffffff; /* only 24 bits of frame count */
else
return 0; /* Gen2 doesn't have a hardware frame counter */
@@ -142,8 +141,8 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
/*
* Should really happen exactly when we disable the pipe
@@ -304,8 +303,9 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
-int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
+int intel_crtc_init(struct intel_display *display, enum pipe pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_plane *primary, *cursor;
const struct drm_crtc_funcs *funcs;
struct intel_crtc *crtc;
@@ -316,27 +316,27 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
return PTR_ERR(crtc);
crtc->pipe = pipe;
- crtc->num_scalers = DISPLAY_RUNTIME_INFO(dev_priv)->num_scalers[pipe];
+ crtc->num_scalers = DISPLAY_RUNTIME_INFO(display)->num_scalers[pipe];
- if (DISPLAY_VER(dev_priv) >= 9)
- primary = skl_universal_plane_create(dev_priv, pipe, PLANE_1);
+ if (DISPLAY_VER(display) >= 9)
+ primary = skl_universal_plane_create(display, pipe, PLANE_1);
else
- primary = intel_primary_plane_create(dev_priv, pipe);
+ primary = intel_primary_plane_create(display, pipe);
if (IS_ERR(primary)) {
ret = PTR_ERR(primary);
goto fail;
}
crtc->plane_ids_mask |= BIT(primary->id);
- intel_init_fifo_underrun_reporting(dev_priv, crtc, false);
+ intel_init_fifo_underrun_reporting(display, crtc, false);
- for_each_sprite(dev_priv, pipe, sprite) {
+ for_each_sprite(display, pipe, sprite) {
struct intel_plane *plane;
if (DISPLAY_VER(dev_priv) >= 9)
- plane = skl_universal_plane_create(dev_priv, pipe, PLANE_2 + sprite);
+ plane = skl_universal_plane_create(display, pipe, PLANE_2 + sprite);
else
- plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+ plane = intel_sprite_plane_create(display, pipe, sprite);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
goto fail;
@@ -344,39 +344,41 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
crtc->plane_ids_mask |= BIT(plane->id);
}
- cursor = intel_cursor_plane_create(dev_priv, pipe);
+ cursor = intel_cursor_plane_create(display, pipe);
if (IS_ERR(cursor)) {
ret = PTR_ERR(cursor);
goto fail;
}
crtc->plane_ids_mask |= BIT(cursor->id);
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
+ if (HAS_GMCH(display)) {
+ if (display->platform.cherryview ||
+ display->platform.valleyview ||
+ display->platform.g4x)
funcs = &g4x_crtc_funcs;
- else if (DISPLAY_VER(dev_priv) == 4)
+ else if (DISPLAY_VER(display) == 4)
funcs = &i965_crtc_funcs;
- else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
+ else if (display->platform.i945gm ||
+ display->platform.i915gm)
funcs = &i915gm_crtc_funcs;
- else if (DISPLAY_VER(dev_priv) == 3)
+ else if (DISPLAY_VER(display) == 3)
funcs = &i915_crtc_funcs;
else
funcs = &i8xx_crtc_funcs;
} else {
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
funcs = &bdw_crtc_funcs;
else
funcs = &ilk_crtc_funcs;
}
- ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
+ ret = drm_crtc_init_with_planes(display->drm, &crtc->base,
&primary->base, &cursor->base,
funcs, "pipe %c", pipe_name(pipe));
if (ret)
goto fail;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
drm_crtc_create_scaling_filter_property(&crtc->base,
BIT(DRM_SCALING_FILTER_DEFAULT) |
BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
@@ -387,7 +389,7 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE);
- drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
+ drm_WARN_ON(display->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
return 0;
@@ -512,7 +514,7 @@ int intel_scanlines_to_usecs(const struct drm_display_mode *adjusted_mode,
void intel_pipe_update_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
@@ -520,6 +522,8 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
struct intel_vblank_evade_ctx evade;
int scanline;
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
+
intel_psr_lock(new_crtc_state);
if (new_crtc_state->do_async_flip) {
@@ -546,7 +550,7 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
intel_vblank_evade_init(old_crtc_state, new_crtc_state, &evade);
- if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
+ if (drm_WARN_ON(display->drm, drm_crtc_vblank_get(&crtc->base)))
goto irq_disable;
/*
@@ -649,6 +653,7 @@ void intel_crtc_prepare_vblank_event(struct intel_crtc_state *crtc_state,
void intel_pipe_update_end(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
@@ -657,6 +662,8 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
ktime_t end_vbl_time = ktime_get();
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
+
if (new_crtc_state->do_async_flip)
goto out;
@@ -666,7 +673,7 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
* Incase of mipi dsi command mode, we need to set frame update
* request for every commit.
*/
- if (DISPLAY_VER(dev_priv) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
icl_dsi_frame_update(new_crtc_state);
@@ -714,7 +721,8 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
* which would cause the next frame to terminate already at vmin
* vblank start instead of vmax vblank start.
*/
- intel_vrr_send_push(new_crtc_state);
+ if (!state->base.legacy_cursor_update)
+ intel_vrr_send_push(NULL, new_crtc_state);
local_irq_enable();
@@ -723,7 +731,7 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
if (crtc->debug.start_vbl_count &&
crtc->debug.start_vbl_count != end_vbl_count) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
pipe_name(pipe), crtc->debug.start_vbl_count,
end_vbl_count,
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
index de54ae1deedf..8c14ff8b391e 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.h
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -13,7 +13,6 @@ enum pipe;
struct drm_device;
struct drm_display_mode;
struct drm_file;
-struct drm_i915_private;
struct drm_pending_vblank_event;
struct intel_atomic_state;
struct intel_crtc;
@@ -38,7 +37,7 @@ void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state);
void intel_crtc_prepare_vblank_event(struct intel_crtc_state *crtc_state,
struct drm_pending_vblank_event **event);
u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
-int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe);
+int intel_crtc_init(struct intel_display *display, enum pipe pipe);
int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
@@ -52,10 +51,10 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
void intel_pipe_update_end(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_wait_for_vblank_workers(struct intel_atomic_state *state);
-struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915);
+struct intel_crtc *intel_first_crtc(struct intel_display *display);
struct intel_crtc *intel_crtc_for_pipe(struct intel_display *display,
enum pipe pipe);
-void intel_wait_for_vblank_if_active(struct drm_i915_private *i915,
+void intel_wait_for_vblank_if_active(struct intel_display *display,
enum pipe pipe);
void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 1faef60be472..599ddce96371 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -10,6 +10,7 @@
#include "intel_crtc_state_dump.h"
#include "intel_display_types.h"
#include "intel_hdmi.h"
+#include "intel_vblank.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
@@ -175,6 +176,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
struct intel_atomic_state *state,
const char *context)
{
+ struct intel_display *display = to_intel_display(pipe_config);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
@@ -248,11 +250,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
str_enabled_disabled(pipe_config->has_sel_update),
str_enabled_disabled(pipe_config->has_panel_replay),
str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
+ drm_printf(&p, "minimum HBlank: %d\n", pipe_config->min_hblank);
}
- drm_printf(&p, "framestart delay: %d, MSA timing delay: %d\n",
- pipe_config->framestart_delay, pipe_config->msa_timing_delay);
-
drm_printf(&p, "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
pipe_config->has_audio, pipe_config->has_infoframe,
pipe_config->infoframes.enable);
@@ -286,13 +286,23 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_print_hex_dump(&p, "ELD: ", pipe_config->eld,
drm_eld_size(pipe_config->eld));
- drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ drm_printf(&p, "scanline offset: %d\n",
+ intel_crtc_scanline_offset(pipe_config));
+
+ drm_printf(&p, "vblank delay: %d, framestart delay: %d, MSA timing delay: %d\n",
+ pipe_config->hw.adjusted_mode.crtc_vblank_start -
+ pipe_config->hw.adjusted_mode.crtc_vdisplay,
+ pipe_config->framestart_delay, pipe_config->msa_timing_delay);
+
+ drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
str_yes_no(pipe_config->vrr.enable),
- pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+ pipe_config->vrr.vmin, pipe_config->vrr.vmax, pipe_config->vrr.flipline,
pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
- pipe_config->vrr.flipline,
- intel_vrr_vmin_vblank_start(pipe_config),
- intel_vrr_vmax_vblank_start(pipe_config));
+ pipe_config->vrr.vsync_start, pipe_config->vrr.vsync_end);
+
+ drm_printf(&p, "vrr: vmin vblank: %d, vmax vblank: %d, vmin vtotal: %d, vmax vtotal: %d\n",
+ intel_vrr_vmin_vblank_start(pipe_config), intel_vrr_vmax_vblank_start(pipe_config),
+ intel_vrr_vmin_vtotal(pipe_config), intel_vrr_vmax_vtotal(pipe_config));
drm_printf(&p, "requested mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&pipe_config->hw.mode));
@@ -331,7 +341,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
pipe_config->ips_enabled, pipe_config->double_wide,
pipe_config->has_drrs);
- intel_dpll_dump_hw_state(i915, &p, &pipe_config->dpll_hw_state);
+ intel_dpll_dump_hw_state(display, &p, &pipe_config->dpll_hw_state);
if (IS_CHERRYVIEW(i915))
drm_printf(&p, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 57cf8f46a458..3276a5b4a9b0 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -35,11 +35,10 @@ static const u32 intel_cursor_formats[] = {
static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
u32 base;
- if (DISPLAY_INFO(dev_priv)->cursor_needs_physical)
+ if (DISPLAY_INFO(display)->cursor_needs_physical)
base = plane_state->phys_dma_addr;
else
base = intel_plane_ggtt_offset(plane_state);
@@ -92,8 +91,8 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
unsigned int rotation = plane_state->hw.rotation;
int src_x, src_y;
u32 offset;
@@ -114,8 +113,9 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
plane_state, 0);
if (src_x != 0 || src_y != 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Arbitrary cursor panning not supported\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] arbitrary cursor panning not supported\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -127,7 +127,7 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
src_x << 16, src_y << 16);
/* ILK+ do this automagically in hardware */
- if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
+ if (HAS_GMCH(display) && rotation & DRM_MODE_ROTATE_180) {
const struct drm_framebuffer *fb = plane_state->hw.fb;
int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
@@ -145,14 +145,16 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
static int intel_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
const struct drm_rect src = plane_state->uapi.src;
const struct drm_rect dst = plane_state->uapi.dst;
int ret;
if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
- drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] cursor cannot be tiled\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -233,8 +235,9 @@ static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
static int i845_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
int ret;
ret = intel_check_cursor(crtc_state, plane_state);
@@ -247,14 +250,15 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i845_cursor_size_ok(plane_state)) {
- drm_dbg_kms(&i915->drm,
- "Cursor dimension %dx%d not supported\n",
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] cursor dimension %dx%d not supported\n",
+ plane->base.base.id, plane->base.name,
drm_rect_width(&plane_state->uapi.dst),
drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
+ drm_WARN_ON(display->drm, plane_state->uapi.visible &&
plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]);
switch (fb->pitches[0]) {
@@ -264,7 +268,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
case 2048:
break;
default:
- drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] invalid cursor stride (%u)\n",
+ plane->base.base.id, plane->base.name,
fb->pitches[0]);
return -EINVAL;
}
@@ -280,7 +285,7 @@ static void i845_cursor_update_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
u32 cntl = 0, base = 0, pos = 0, size = 0;
if (plane_state && plane_state->uapi.visible) {
@@ -302,17 +307,17 @@ static void i845_cursor_update_arm(struct intel_dsb *dsb,
if (plane->cursor.base != base ||
plane->cursor.size != size ||
plane->cursor.cntl != cntl) {
- intel_de_write_fw(dev_priv, CURCNTR(dev_priv, PIPE_A), 0);
- intel_de_write_fw(dev_priv, CURBASE(dev_priv, PIPE_A), base);
- intel_de_write_fw(dev_priv, CURSIZE(dev_priv, PIPE_A), size);
- intel_de_write_fw(dev_priv, CURPOS(dev_priv, PIPE_A), pos);
- intel_de_write_fw(dev_priv, CURCNTR(dev_priv, PIPE_A), cntl);
+ intel_de_write_fw(display, CURCNTR(display, PIPE_A), 0);
+ intel_de_write_fw(display, CURBASE(display, PIPE_A), base);
+ intel_de_write_fw(display, CURSIZE(display, PIPE_A), size);
+ intel_de_write_fw(display, CURPOS(display, PIPE_A), pos);
+ intel_de_write_fw(display, CURCNTR(display, PIPE_A), cntl);
plane->cursor.base = base;
plane->cursor.size = size;
plane->cursor.cntl = cntl;
} else {
- intel_de_write_fw(dev_priv, CURPOS(dev_priv, PIPE_A), pos);
+ intel_de_write_fw(display, CURPOS(display, PIPE_A), pos);
}
}
@@ -326,21 +331,21 @@ static void i845_cursor_disable_arm(struct intel_dsb *dsb,
static bool i845_cursor_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(PIPE_A);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
- ret = intel_de_read(dev_priv, CURCNTR(dev_priv, PIPE_A)) & CURSOR_ENABLE;
+ ret = intel_de_read(display, CURCNTR(display, PIPE_A)) & CURSOR_ENABLE;
*pipe = PIPE_A;
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
@@ -372,16 +377,21 @@ static unsigned int i9xx_cursor_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
+ struct intel_display *display = to_intel_display(plane);
+
+ if (intel_scanout_needs_vtd_wa(display))
+ return 64 * 1024;
+
return 4 * 1024; /* physical for i915/i945 */
}
static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 cntl = 0;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return cntl;
if (crtc_state->gamma_enable)
@@ -390,7 +400,7 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (crtc_state->csc_enable)
cntl |= MCURSOR_PIPE_CSC_ENABLE;
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) < 5 && !display->platform.g4x)
cntl |= MCURSOR_PIPE_SEL(crtc->pipe);
return cntl;
@@ -399,11 +409,10 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
u32 cntl = 0;
- if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ if (display->platform.sandybridge || display->platform.ivybridge)
cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
switch (drm_rect_width(&plane_state->uapi.dst)) {
@@ -425,7 +434,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
cntl |= MCURSOR_ROTATE_180;
/* Wa_22012358565:adl-p */
- if (DISPLAY_VER(dev_priv) == 13)
+ if (DISPLAY_VER(display) == 13)
cntl |= MCURSOR_ARB_SLOTS(1);
return cntl;
@@ -433,8 +442,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
int width = drm_rect_width(&plane_state->uapi.dst);
int height = drm_rect_height(&plane_state->uapi.dst);
@@ -457,7 +465,7 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
* cursor is not rotated. Everything else requires square
* cursors.
*/
- if (HAS_CUR_FBC(dev_priv) &&
+ if (HAS_CUR_FBC(display) &&
plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
if (height < 8 || height > width)
return false;
@@ -472,8 +480,8 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
int ret;
@@ -488,22 +496,23 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i9xx_cursor_size_ok(plane_state)) {
- drm_dbg(&dev_priv->drm,
- "Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] cursor dimension %dx%d not supported\n",
+ plane->base.base.id, plane->base.name,
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
+ drm_WARN_ON(display->drm, plane_state->uapi.visible &&
plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]);
if (fb->pitches[0] !=
drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
- drm_dbg_kms(&dev_priv->drm,
- "Invalid cursor stride (%u) (cursor width %d)\n",
- fb->pitches[0],
- drm_rect_width(&plane_state->uapi.dst));
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] invalid cursor stride (%u) (cursor width %d)\n",
+ plane->base.base.id, plane->base.name,
+ fb->pitches[0], drm_rect_width(&plane_state->uapi.dst));
return -EINVAL;
}
@@ -517,10 +526,11 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
* display power well must be turned off and on again.
* Refuse the put the cursor into that compromised position.
*/
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
+ if (display->platform.cherryview && pipe == PIPE_C &&
plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
- drm_dbg_kms(&dev_priv->drm,
- "CHV cursor C not allowed to straddle the left screen edge\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] cursor not allowed to straddle the left screen edge\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -533,7 +543,7 @@ static void i9xx_cursor_disable_sel_fetch_arm(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -547,8 +557,7 @@ static void wa_16021440873(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
u32 ctl = plane_state->ctl;
int et_y_position = drm_rect_height(&crtc_state->pipe_src) + 1;
enum pipe pipe = plane->pipe;
@@ -558,7 +567,7 @@ static void wa_16021440873(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), ctl);
- intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe),
+ intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(display, pipe),
CURSOR_POS_Y(et_y_position));
}
@@ -567,8 +576,7 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -579,7 +587,7 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_dsb *dsb,
u32 val = intel_cursor_position(crtc_state, plane_state,
true);
- intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe), val);
+ intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(display, pipe), val);
}
intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), plane_state->ctl);
@@ -653,8 +661,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
@@ -680,7 +687,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
* CURPOS.
*
* On other platforms CURPOS always requires the
- * CURBASE write to arm the update. Additonally
+ * CURBASE write to arm the update. Additionally
* a write to any of the cursor register will cancel
* an already armed cursor update. Thus leaving out
* the CURBASE write after CURPOS could lead to a
@@ -692,7 +699,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
* the CURCNTR write arms the update.
*/
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_write_cursor_wm(dsb, plane, crtc_state);
if (plane_state)
@@ -703,18 +710,18 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
plane->cursor.cntl != cntl) {
- if (HAS_CUR_FBC(dev_priv))
- intel_de_write_dsb(display, dsb, CUR_FBC_CTL(dev_priv, pipe), fbc_ctl);
- intel_de_write_dsb(display, dsb, CURCNTR(dev_priv, pipe), cntl);
- intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos);
- intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base);
+ if (HAS_CUR_FBC(display))
+ intel_de_write_dsb(display, dsb, CUR_FBC_CTL(display, pipe), fbc_ctl);
+ intel_de_write_dsb(display, dsb, CURCNTR(display, pipe), cntl);
+ intel_de_write_dsb(display, dsb, CURPOS(display, pipe), pos);
+ intel_de_write_dsb(display, dsb, CURBASE(display, pipe), base);
plane->cursor.base = base;
plane->cursor.size = fbc_ctl;
plane->cursor.cntl = cntl;
} else {
- intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos);
- intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base);
+ intel_de_write_dsb(display, dsb, CURPOS(display, pipe), pos);
+ intel_de_write_dsb(display, dsb, CURBASE(display, pipe), base);
}
}
@@ -728,7 +735,7 @@ static void i9xx_cursor_disable_arm(struct intel_dsb *dsb,
static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
bool ret;
@@ -740,24 +747,45 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
* display power wells.
*/
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
- val = intel_de_read(dev_priv, CURCNTR(dev_priv, plane->pipe));
+ val = intel_de_read(display, CURCNTR(display, plane->pipe));
ret = val & MCURSOR_MODE_MASK;
- if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
*pipe = plane->pipe;
else
*pipe = REG_FIELD_GET(MCURSOR_PIPE_SEL_MASK, val);
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
+static void g4x_cursor_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+
+ error->ctl = intel_de_read(display, CURCNTR(display, crtc->pipe));
+ error->surf = intel_de_read(display, CURBASE(display, crtc->pipe));
+ error->surflive = intel_de_read(display, CURSURFLIVE(display, crtc->pipe));
+}
+
+static void i9xx_cursor_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+
+ error->ctl = intel_de_read(display, CURCNTR(display, crtc->pipe));
+ error->surf = intel_de_read(display, CURBASE(display, crtc->pipe));
+}
+
static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
@@ -790,7 +818,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
{
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_crtc *crtc = to_intel_crtc(_crtc);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->base.state);
struct intel_plane_state *new_plane_state;
@@ -865,7 +893,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
if (ret)
goto out_free;
- ret = intel_plane_pin_fb(new_plane_state);
+ ret = intel_plane_pin_fb(new_plane_state, old_plane_state);
if (ret)
goto out_free;
@@ -894,7 +922,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
intel_psr_lock(crtc_state);
- if (!drm_WARN_ON(&i915->drm, drm_crtc_vblank_get(&crtc->base))) {
+ if (!drm_WARN_ON(display->drm, drm_crtc_vblank_get(&crtc->base))) {
/*
* TODO: maybe check if we're still in PSR
* and skip the vblank evasion entirely?
@@ -960,8 +988,8 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
static void intel_cursor_add_size_hints_property(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- const struct drm_mode_config *config = &i915->drm.mode_config;
+ struct intel_display *display = to_intel_display(plane);
+ const struct drm_mode_config *config = &display->drm->mode_config;
struct drm_plane_size_hint hints[4];
int size, max_size, num_hints = 0;
@@ -969,7 +997,7 @@ static void intel_cursor_add_size_hints_property(struct intel_plane *plane)
/* for simplicity only enumerate the supported square+POT sizes */
for (size = 64; size <= max_size; size *= 2) {
- if (drm_WARN_ON(&i915->drm, num_hints >= ARRAY_SIZE(hints)))
+ if (drm_WARN_ON(display->drm, num_hints >= ARRAY_SIZE(hints)))
break;
hints[num_hints].width = size;
@@ -981,7 +1009,7 @@ static void intel_cursor_add_size_hints_property(struct intel_plane *plane)
}
struct intel_plane *
-intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+intel_cursor_plane_create(struct intel_display *display,
enum pipe pipe)
{
struct intel_plane *cursor;
@@ -997,7 +1025,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
cursor->id = PLANE_CURSOR;
cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+ if (display->platform.i845g || display->platform.i865g) {
cursor->max_stride = i845_cursor_max_stride;
cursor->min_alignment = i845_cursor_min_alignment;
cursor->update_arm = i845_cursor_update_arm;
@@ -1007,28 +1035,36 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
} else {
cursor->max_stride = i9xx_cursor_max_stride;
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
cursor->min_alignment = i830_cursor_min_alignment;
- else if (IS_I85X(dev_priv))
+ else if (display->platform.i85x)
cursor->min_alignment = i85x_cursor_min_alignment;
else
cursor->min_alignment = i9xx_cursor_min_alignment;
+ if (intel_scanout_needs_vtd_wa(display))
+ cursor->vtd_guard = 2;
+
cursor->update_arm = i9xx_cursor_update_arm;
cursor->disable_arm = i9xx_cursor_disable_arm;
cursor->get_hw_state = i9xx_cursor_get_hw_state;
cursor->check_plane = i9xx_check_cursor;
}
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
+ cursor->capture_error = g4x_cursor_capture_error;
+ else
+ cursor->capture_error = i9xx_cursor_capture_error;
+
cursor->cursor.base = ~0;
cursor->cursor.cntl = ~0;
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
+ if (display->platform.i845g || display->platform.i865g || HAS_CUR_FBC(display))
cursor->cursor.size = ~0;
- modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_NONE);
+ modifiers = intel_fb_plane_get_modifiers(display, INTEL_PLANE_CAP_NONE);
- ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+ ret = drm_universal_plane_init(display->drm, &cursor->base,
0, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
@@ -1041,7 +1077,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (ret)
goto fail;
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
drm_plane_create_rotation_property(&cursor->base,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
@@ -1049,10 +1085,10 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
intel_cursor_add_size_hints_property(cursor);
- zpos = DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
+ zpos = DISPLAY_RUNTIME_INFO(display)->num_sprites[pipe] + 1;
drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
drm_plane_enable_fb_damage_clips(&cursor->base);
intel_plane_helper_add(cursor);
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.h b/drivers/gpu/drm/i915/display/intel_cursor.h
index e2d9ec710a86..65a9e7eb88c2 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.h
+++ b/drivers/gpu/drm/i915/display/intel_cursor.h
@@ -7,12 +7,12 @@
#define _INTEL_CURSOR_H_
enum pipe;
-struct drm_i915_private;
+struct intel_display;
struct intel_plane;
struct kthread_work;
struct intel_plane *
-intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+intel_cursor_plane_create(struct intel_display *display,
enum pipe pipe);
void intel_cursor_unpin_work(struct kthread_work *base);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index e768dc6a15b3..22595766eac5 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -18,6 +18,7 @@
#include "intel_hdmi.h"
#include "intel_panel.h"
#include "intel_psr.h"
+#include "intel_snps_hdmi_pll.h"
#include "intel_tc.h"
#define MB_WRITE_COMMITTED true
@@ -33,13 +34,13 @@
bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- if (IS_PANTHERLAKE(i915) && phy == PHY_A)
+ if (display->platform.pantherlake && phy == PHY_A)
return true;
- if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C)
+ if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C)
return true;
return false;
@@ -72,10 +73,9 @@ static u8 intel_cx0_get_owned_lane_mask(struct intel_encoder *encoder)
static void
assert_dc_off(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
bool enabled;
- enabled = intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF);
+ enabled = intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF);
drm_WARN_ON(display->drm, !enabled);
}
@@ -102,12 +102,12 @@ static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder)
*/
static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *encoder)
{
- intel_wakeref_t wakeref;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_wakeref_t wakeref;
intel_psr_pause(intel_dp);
- wakeref = intel_display_power_get(i915, POWER_DOMAIN_DC_OFF);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF);
intel_cx0_program_msgbus_timer(encoder);
return wakeref;
@@ -115,11 +115,11 @@ static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *enc
static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_psr_resume(intel_dp);
- intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref);
}
static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
@@ -2003,19 +2003,6 @@ static const struct intel_c20pll_state * const mtl_c20_hdmi_tables[] = {
NULL,
};
-static int intel_c10_phy_check_hdmi_link_rate(int clock)
-{
- const struct intel_c10pll_state * const *tables = mtl_c10_hdmi_tables;
- int i;
-
- for (i = 0; tables[i]; i++) {
- if (clock == tables[i]->clock)
- return MODE_OK;
- }
-
- return MODE_CLOCK_RANGE;
-}
-
static const struct intel_c10pll_state * const *
intel_c10pll_tables_get(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
@@ -2033,21 +2020,25 @@ intel_c10pll_tables_get(struct intel_crtc_state *crtc_state,
return NULL;
}
-static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static void intel_cx0pll_update_ssc(struct intel_encoder *encoder,
+ struct intel_cx0pll_state *pll_state, bool is_dp)
{
struct intel_display *display = to_intel_display(encoder);
- struct intel_cx0pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll;
- int i;
- if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (is_dp) {
if (intel_panel_use_ssc(display)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
pll_state->ssc_enabled =
(intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
}
}
+}
+
+static void intel_c10pll_update_pll(struct intel_encoder *encoder,
+ struct intel_cx0pll_state *pll_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ int i;
if (pll_state->ssc_enabled)
return;
@@ -2057,27 +2048,53 @@ static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state,
pll_state->c10.pll[i] = 0;
}
+static int intel_c10pll_calc_state_from_table(struct intel_encoder *encoder,
+ const struct intel_c10pll_state * const *tables,
+ bool is_dp, int port_clock,
+ struct intel_cx0pll_state *pll_state)
+{
+ int i;
+
+ for (i = 0; tables[i]; i++) {
+ if (port_clock == tables[i]->clock) {
+ pll_state->c10 = *tables[i];
+ intel_cx0pll_update_ssc(encoder, pll_state, is_dp);
+ intel_c10pll_update_pll(encoder, pll_state);
+ pll_state->use_c10 = true;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
const struct intel_c10pll_state * const *tables;
- int i;
+ int err;
tables = intel_c10pll_tables_get(crtc_state, encoder);
if (!tables)
return -EINVAL;
- for (i = 0; tables[i]; i++) {
- if (crtc_state->port_clock == tables[i]->clock) {
- crtc_state->dpll_hw_state.cx0pll.c10 = *tables[i];
- intel_c10pll_update_pll(crtc_state, encoder);
- crtc_state->dpll_hw_state.cx0pll.use_c10 = true;
+ err = intel_c10pll_calc_state_from_table(encoder, tables,
+ intel_crtc_has_dp_encoder(crtc_state),
+ crtc_state->port_clock,
+ &crtc_state->dpll_hw_state.cx0pll);
- return 0;
- }
- }
+ if (err == 0 || !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return err;
- return -EINVAL;
+ /* For HDMI PLLs try SNPS PHY algorithm, if there are no precomputed tables */
+ intel_snps_hdmi_pll_compute_c10pll(&crtc_state->dpll_hw_state.cx0pll.c10,
+ crtc_state->port_clock);
+ intel_c10pll_update_pll(encoder,
+ &crtc_state->dpll_hw_state.cx0pll);
+ crtc_state->dpll_hw_state.cx0pll.use_c10 = true;
+
+ return 0;
}
static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
@@ -2107,10 +2124,9 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
}
static void intel_c10_pll_program(struct intel_display *display,
- const struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+ struct intel_encoder *encoder,
+ const struct intel_c10pll_state *pll_state)
{
- const struct intel_c10pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c10;
int i;
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
@@ -2173,9 +2189,47 @@ static void intel_c10pll_dump_hw_state(struct intel_display *display,
i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]);
}
-static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
+/*
+ * Some ARLs SoCs have the same drm PCI IDs, so need a helper to differentiate based
+ * on the host bridge device ID to get the correct txx_mics value.
+ */
+static bool is_arrowlake_s_by_host_bridge(void)
+{
+ struct pci_dev *pdev = NULL;
+ u16 host_bridge_pci_dev_id;
+
+ while ((pdev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, pdev)))
+ host_bridge_pci_dev_id = pdev->device;
+
+ return pdev && IS_ARROWLAKE_S_BY_HOST_BRIDGE_ID(host_bridge_pci_dev_id);
+}
+
+static u16 intel_c20_hdmi_tmds_tx_cgf_1(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
+ u16 tx_misc;
+ u16 tx_dcc_cal_dac_ctrl_range = 8;
+ u16 tx_term_ctrl = 2;
+
+ if (DISPLAY_VER(display) >= 20) {
+ tx_misc = 5;
+ tx_term_ctrl = 4;
+ } else if (display->platform.battlemage) {
+ tx_misc = 0;
+ } else if (display->platform.meteorlake_u ||
+ is_arrowlake_s_by_host_bridge()) {
+ tx_misc = 3;
+ } else {
+ tx_misc = 7;
+ }
+
+ return (C20_PHY_TX_MISC(tx_misc) |
+ C20_PHY_TX_DCC_CAL_RANGE(tx_dcc_cal_dac_ctrl_range) |
+ C20_PHY_TX_DCC_BYPASS | C20_PHY_TX_TERM_CTL(tx_term_ctrl));
+}
+
+static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
+{
struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20;
u64 datarate;
u64 mpll_tx_clk_div;
@@ -2185,7 +2239,6 @@ static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
u64 mpll_multiplier;
u64 mpll_fracn_quot;
u64 mpll_fracn_rem;
- u16 tx_misc;
u8 mpllb_ana_freq_vco;
u8 mpll_div_multiplier;
@@ -2205,11 +2258,6 @@ static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
mpll_div_multiplier = min_t(u8, div64_u64((vco_freq * 16 + (datarate >> 1)),
datarate), 255);
- if (DISPLAY_VER(display) >= 20)
- tx_misc = 0x5;
- else
- tx_misc = 0x0;
-
if (vco_freq <= DATARATE_3000000000)
mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_3;
else if (vco_freq <= DATARATE_3500000000)
@@ -2221,7 +2269,7 @@ static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
pll_state->clock = crtc_state->port_clock;
pll_state->tx[0] = 0xbe88;
- pll_state->tx[1] = 0x9800 | C20_PHY_TX_MISC(tx_misc);
+ pll_state->tx[1] = intel_c20_hdmi_tmds_tx_cgf_1(crtc_state);
pll_state->tx[2] = 0x0000;
pll_state->cmn[0] = 0x0500;
pll_state->cmn[1] = 0x0005;
@@ -2249,31 +2297,6 @@ static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
return 0;
}
-static int intel_c20_phy_check_hdmi_link_rate(int clock)
-{
- const struct intel_c20pll_state * const *tables = mtl_c20_hdmi_tables;
- int i;
-
- for (i = 0; tables[i]; i++) {
- if (clock == tables[i]->clock)
- return MODE_OK;
- }
-
- if (clock >= 25175 && clock <= 594000)
- return MODE_OK;
-
- return MODE_CLOCK_RANGE;
-}
-
-int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock)
-{
- struct intel_digital_port *dig_port = hdmi_to_dig_port(hdmi);
-
- if (intel_encoder_is_c10phy(&dig_port->base))
- return intel_c10_phy_check_hdmi_link_rate(clock);
- return intel_c20_phy_check_hdmi_link_rate(clock);
-}
-
static const struct intel_c20pll_state * const *
intel_c20_pll_tables_get(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
@@ -2322,6 +2345,9 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
for (i = 0; tables[i]; i++) {
if (crtc_state->port_clock == tables[i]->clock) {
crtc_state->dpll_hw_state.cx0pll.c20 = *tables[i];
+ intel_cx0pll_update_ssc(encoder,
+ &crtc_state->dpll_hw_state.cx0pll,
+ intel_crtc_has_dp_encoder(crtc_state));
crtc_state->dpll_hw_state.cx0pll.use_c10 = false;
return 0;
}
@@ -2587,19 +2613,14 @@ static int intel_get_c20_custom_width(u32 clock, bool dp)
}
static void intel_c20_pll_program(struct intel_display *display,
- const struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+ struct intel_encoder *encoder,
+ const struct intel_c20pll_state *pll_state,
+ bool is_dp, int port_clock)
{
- const struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20;
- bool dp = false;
u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
- u32 clock = crtc_state->port_clock;
bool cntx;
int i;
- if (intel_crtc_has_dp_encoder(crtc_state))
- dp = true;
-
/* 1. Read current context selection */
cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
@@ -2667,23 +2688,23 @@ static void intel_c20_pll_program(struct intel_display *display,
/* 4. Program custom width to match the link protocol */
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_WIDTH,
PHY_C20_CUSTOM_WIDTH_MASK,
- PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(clock, dp)),
+ PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(port_clock, is_dp)),
MB_WRITE_COMMITTED);
/* 5. For DP or 6. For HDMI */
- if (dp) {
+ if (is_dp) {
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
- BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(clock)),
+ BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(port_clock)),
MB_WRITE_COMMITTED);
} else {
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
- is_hdmi_frl(clock) ? BIT(7) : 0,
+ is_hdmi_frl(port_clock) ? BIT(7) : 0,
MB_WRITE_COMMITTED);
intel_cx0_write(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
- intel_c20_get_hdmi_rate(clock),
+ intel_c20_get_hdmi_rate(port_clock),
MB_WRITE_COMMITTED);
}
@@ -2723,7 +2744,8 @@ static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
}
static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
+ const struct intel_cx0pll_state *pll_state,
+ bool is_dp, int port_clock,
bool lane_reversal)
{
struct intel_display *display = to_intel_display(encoder);
@@ -2738,18 +2760,17 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
val |= XELPDP_FORWARD_CLOCK_UNGATE;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
- is_hdmi_frl(crtc_state->port_clock))
+ if (!is_dp && is_hdmi_frl(port_clock))
val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
/* TODO: HDMI FRL */
/* DP2.0 10G and 20G rates enable MPLLA*/
- if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000)
- val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
+ if (port_clock == 1000000 || port_clock == 2000000)
+ val |= pll_state->ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
else
- val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
+ val |= pll_state->ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
@@ -2979,8 +3000,9 @@ static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask)
return val;
}
-static void intel_cx0pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void __intel_cx0pll_enable(struct intel_encoder *encoder,
+ const struct intel_cx0pll_state *pll_state,
+ bool is_dp, int port_clock, int lane_count)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -2994,7 +3016,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
* 1. Program PORT_CLOCK_CTL REGISTER to configure
* clock muxes, gating and SSC
*/
- intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
+ intel_program_port_clock_ctl(encoder, pll_state, is_dp, port_clock, lane_reversal);
/* 2. Bring PHY out of reset. */
intel_cx0_phy_lane_reset(encoder, lane_reversal);
@@ -3014,15 +3036,15 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
/* 5. Program PHY internal PLL internal registers. */
if (intel_encoder_is_c10phy(encoder))
- intel_c10_pll_program(display, crtc_state, encoder);
+ intel_c10_pll_program(display, encoder, &pll_state->c10);
else
- intel_c20_pll_program(display, crtc_state, encoder);
+ intel_c20_pll_program(display, encoder, &pll_state->c20, is_dp, port_clock);
/*
* 6. Program the enabled and disabled owned PHY lane
* transmitters over message bus
*/
- intel_cx0_program_phy_lane(encoder, crtc_state->lane_count, lane_reversal);
+ intel_cx0_program_phy_lane(encoder, lane_count, lane_reversal);
/*
* 7. Follow the Display Voltage Frequency Switching - Sequence
@@ -3033,8 +3055,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
* 8. Program DDI_CLK_VALFREQ to match intended DDI
* clock frequency.
*/
- intel_de_write(display, DDI_CLK_VALFREQ(encoder->port),
- crtc_state->port_clock);
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), port_clock);
/*
* 9. Set PORT_CLOCK_CTL register PCLK PLL Request
@@ -3061,6 +3082,14 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
intel_cx0_phy_transaction_end(encoder, wakeref);
}
+static void intel_cx0pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ __intel_cx0pll_enable(encoder, &crtc_state->dpll_hw_state.cx0pll,
+ intel_crtc_has_dp_encoder(crtc_state),
+ crtc_state->port_clock, crtc_state->lane_count);
+}
+
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
@@ -3203,12 +3232,11 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder,
static u8 cx0_power_control_disable_val(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_encoder_is_c10phy(encoder))
return CX0_P2PG_STATE_DISABLE;
- if ((IS_BATTLEMAGE(i915) && encoder->port == PORT_A) ||
+ if ((display->platform.battlemage && encoder->port == PORT_A) ||
(DISPLAY_VER(display) >= 30 && encoder->type == INTEL_OUTPUT_EDP))
return CX0_P2PG_STATE_DISABLE;
@@ -3266,6 +3294,16 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
intel_cx0_phy_transaction_end(encoder, wakeref);
}
+static bool intel_cx0_pll_is_enabled(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ u8 lane = dig_port->lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0;
+
+ return intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)) &
+ intel_cx0_get_pclk_pll_request(lane);
+}
+
static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
@@ -3527,3 +3565,54 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
else
intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20);
}
+
+/*
+ * WA 14022081154
+ * The dedicated display PHYs reset to a power state that blocks S0ix, increasing idle
+ * system power. After a system reset (cold boot, S3/4/5, warm reset) if a dedicated
+ * PHY is not being brought up shortly, use these steps to move the PHY to the lowest
+ * power state to save power. For PTL the workaround is needed only for port A. Port B
+ * is not connected.
+ *
+ * 1. Follow the PLL Enable Sequence, using any valid frequency such as DP 1.62 GHz.
+ * This brings lanes out of reset and enables the PLL to allow powerdown to be moved
+ * to the Disable state.
+ * 2. Follow PLL Disable Sequence. This moves powerdown to the Disable state and disables the PLL.
+ */
+void intel_cx0_pll_power_save_wa(struct intel_display *display)
+{
+ struct intel_encoder *encoder;
+
+ if (DISPLAY_VER(display) != 30)
+ return;
+
+ for_each_intel_encoder(display->drm, encoder) {
+ struct intel_cx0pll_state pll_state = {};
+ int port_clock = 162000;
+
+ if (!intel_encoder_is_dig_port(encoder))
+ continue;
+
+ if (!intel_encoder_is_c10phy(encoder))
+ continue;
+
+ if (intel_cx0_pll_is_enabled(encoder))
+ continue;
+
+ if (intel_c10pll_calc_state_from_table(encoder,
+ mtl_c10_edp_tables,
+ true, port_clock,
+ &pll_state) < 0) {
+ drm_WARN_ON(display->drm,
+ "Unable to calc C10 state from the tables\n");
+ continue;
+ }
+
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Applying power saving workaround on disabled PLL\n",
+ encoder->base.base.id, encoder->base.name);
+
+ __intel_cx0pll_enable(encoder, &pll_state, true, port_clock, 4);
+ intel_cx0pll_disable(encoder);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index 711168882684..a8f811ca5e7b 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -41,7 +41,7 @@ bool intel_cx0pll_compare_hw_state(const struct intel_cx0pll_state *a,
const struct intel_cx0pll_state *b);
void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
+void intel_cx0_pll_power_save_wa(struct intel_display *display);
#endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index da154ff26b96..960f7f778fb8 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -110,7 +110,8 @@
#define XELPDP_TCSS_POWER_REQUEST REG_BIT(5)
#define XELPDP_TCSS_POWER_STATE REG_BIT(4)
#define XELPDP_PORT_WIDTH_MASK REG_GENMASK(3, 1)
-#define XELPDP_PORT_WIDTH(val) REG_FIELD_PREP(XELPDP_PORT_WIDTH_MASK, val)
+#define XELPDP_PORT_WIDTH(width) REG_FIELD_PREP(XELPDP_PORT_WIDTH_MASK, \
+ ((width) == 3 ? 4 : (width) - 1))
#define _XELPDP_PORT_BUF_CTL2(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
_XELPDP_PORT_BUF_CTL1_LN0_A, \
@@ -218,10 +219,34 @@
/* C10 Vendor Registers */
#define PHY_C10_VDR_PLL(idx) (0xC00 + (idx))
+#define C10_PLL0_SSC_EN REG_BIT8(0)
+#define C10_PLL0_DIVCLK_EN REG_BIT8(1)
+#define C10_PLL0_DIV5CLK_EN REG_BIT8(2)
+#define C10_PLL0_WORDDIV2_EN REG_BIT8(3)
#define C10_PLL0_FRACEN REG_BIT8(4)
+#define C10_PLL0_PMIX_EN REG_BIT8(5)
+#define C10_PLL0_ANA_FREQ_VCO_MASK REG_GENMASK8(7, 6)
+#define C10_PLL1_DIV_MULTIPLIER_MASK REG_GENMASK8(7, 0)
+#define C10_PLL2_MULTIPLIERL_MASK REG_GENMASK8(7, 0)
#define C10_PLL3_MULTIPLIERH_MASK REG_GENMASK8(3, 0)
+#define C10_PLL8_SSC_UP_SPREAD REG_BIT8(5)
+#define C10_PLL9_FRACN_DENL_MASK REG_GENMASK8(7, 0)
+#define C10_PLL10_FRACN_DENH_MASK REG_GENMASK8(7, 0)
+#define C10_PLL11_FRACN_QUOT_L_MASK REG_GENMASK8(7, 0)
+#define C10_PLL12_FRACN_QUOT_H_MASK REG_GENMASK8(7, 0)
+#define C10_PLL13_FRACN_REM_L_MASK REG_GENMASK8(7, 0)
+#define C10_PLL14_FRACN_REM_H_MASK REG_GENMASK8(7, 0)
#define C10_PLL15_TXCLKDIV_MASK REG_GENMASK8(2, 0)
#define C10_PLL15_HDMIDIV_MASK REG_GENMASK8(5, 3)
+#define C10_PLL15_PIXELCLKDIV_MASK REG_GENMASK8(7, 6)
+#define C10_PLL16_ANA_CPINT REG_GENMASK8(6, 0)
+#define C10_PLL16_ANA_CPINTGS_L REG_BIT8(7)
+#define C10_PLL17_ANA_CPINTGS_H_MASK REG_GENMASK8(5, 0)
+#define C10_PLL17_ANA_CPPROP_L_MASK REG_GENMASK8(7, 6)
+#define C10_PLL18_ANA_CPPROP_H_MASK REG_GENMASK8(4, 0)
+#define C10_PLL18_ANA_CPPROPGS_L_MASK REG_GENMASK8(7, 5)
+#define C10_PLL19_ANA_CPPROPGS_H_MASK REG_GENMASK8(3, 0)
+#define C10_PLL19_ANA_V2I_MASK REG_GENMASK8(5, 4)
#define PHY_C10_VDR_CMN(idx) (0xC20 + (idx))
#define C10_CMN0_REF_RANGE REG_FIELD_PREP(REG_GENMASK(4, 0), 1)
@@ -298,6 +323,12 @@
#define C20_PHY_TX_RATE REG_GENMASK(2, 0)
#define C20_PHY_TX_MISC_MASK REG_GENMASK16(7, 0)
#define C20_PHY_TX_MISC(val) REG_FIELD_PREP16(C20_PHY_TX_MISC_MASK, (val))
+#define C20_PHY_TX_DCC_CAL_RANGE_MASK REG_GENMASK16(11, 8)
+#define C20_PHY_TX_DCC_CAL_RANGE(val) \
+ REG_FIELD_PREP16(C20_PHY_TX_DCC_CAL_RANGE_MASK, (val))
+#define C20_PHY_TX_DCC_BYPASS REG_BIT(12)
+#define C20_PHY_TX_TERM_CTL_MASK REG_GENMASK16(15, 13)
+#define C20_PHY_TX_TERM_CTL(val) REG_FIELD_PREP16(C20_PHY_TX_TERM_CTL_MASK, (val))
#define PHY_C20_A_CMN_CNTX_CFG(i915, idx) \
((_IS_XE2HPD_C20(i915) ? _XE2HPD_C20_A_CMN_CNTX_CFG : _MTL_C20_A_CMN_CNTX_CFG) - (idx))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index ff2cf3daa7a2..f38c998935b9 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -70,6 +70,7 @@
#include "intel_lspcon.h"
#include "intel_mg_phy_regs.h"
#include "intel_modeset_lock.h"
+#include "intel_pfit.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
@@ -177,69 +178,60 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
trans->entries[level].hsw.trans2);
}
-static void mtl_wait_ddi_buf_idle(struct drm_i915_private *i915, enum port port)
+static i915_reg_t intel_ddi_buf_status_reg(struct intel_display *display, enum port port)
{
- int ret;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- /* FIXME: find out why Bspec's 100us timeout is too short */
- ret = wait_for_us((intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port)) &
- XELPDP_PORT_BUF_PHY_IDLE), 10000);
- if (ret)
- drm_err(&i915->drm, "Timeout waiting for DDI BUF %c to get idle\n",
- port_name(port));
+ if (DISPLAY_VER(display) >= 14)
+ return XELPDP_PORT_BUF_CTL1(i915, port);
+ else
+ return DDI_BUF_CTL(port);
}
-void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
- enum port port)
+void intel_wait_ddi_buf_idle(struct intel_display *display, enum port port)
{
- if (IS_BROXTON(dev_priv)) {
+ /*
+ * Bspec's platform specific timeouts:
+ * MTL+ : 100 us
+ * BXT : fixed 16 us
+ * HSW-ADL: 8 us
+ *
+ * FIXME: MTL requires 10 ms based on tests, find out why 100 us is too short
+ */
+ if (display->platform.broxton) {
udelay(16);
return;
}
- if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
- DDI_BUF_IS_IDLE), 8))
- drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get idle\n",
+ static_assert(DDI_BUF_IS_IDLE == XELPDP_PORT_BUF_PHY_IDLE);
+ if (intel_de_wait_for_set(display, intel_ddi_buf_status_reg(display, port),
+ DDI_BUF_IS_IDLE, 10))
+ drm_err(display->drm, "Timeout waiting for DDI BUF %c to get idle\n",
port_name(port));
}
static void intel_wait_ddi_buf_active(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- int timeout_us;
- int ret;
- /* Wait > 518 usecs for DDI_BUF_CTL to be non idle */
- if (DISPLAY_VER(dev_priv) < 10) {
+ /*
+ * Bspec's platform specific timeouts:
+ * MTL+ : 10000 us
+ * DG2 : 1200 us
+ * TGL-ADL combo PHY: 1000 us
+ * TGL-ADL TypeC PHY: 3000 us
+ * HSW-ICL : fixed 518 us
+ */
+ if (DISPLAY_VER(display) < 10) {
usleep_range(518, 1000);
return;
}
- if (DISPLAY_VER(dev_priv) >= 14) {
- timeout_us = 10000;
- } else if (IS_DG2(dev_priv)) {
- timeout_us = 1200;
- } else if (DISPLAY_VER(dev_priv) >= 12) {
- if (intel_encoder_is_tc(encoder))
- timeout_us = 3000;
- else
- timeout_us = 1000;
- } else {
- timeout_us = 500;
- }
-
- if (DISPLAY_VER(dev_priv) >= 14)
- ret = _wait_for(!(intel_de_read(dev_priv,
- XELPDP_PORT_BUF_CTL1(dev_priv, port)) &
- XELPDP_PORT_BUF_PHY_IDLE),
- timeout_us, 10, 10);
- else
- ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE),
- timeout_us, 10, 10);
-
- if (ret)
- drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n",
+ static_assert(DDI_BUF_IS_IDLE == XELPDP_PORT_BUF_PHY_IDLE);
+ if (intel_de_wait_for_clear(display, intel_ddi_buf_status_reg(display, port),
+ DDI_BUF_IS_IDLE, 10))
+ drm_err(display->drm, "Timeout waiting for DDI BUF %c to get active\n",
port_name(port));
}
@@ -328,9 +320,32 @@ static u32 ddi_buf_phy_link_rate(int port_clock)
}
}
+static int dp_phy_lane_stagger_delay(int port_clock)
+{
+ /*
+ * Return the number of symbol clocks delay used to stagger the
+ * assertion/desassertion of the port lane enables. The target delay
+ * time is 100 ns or greater, return the number of symbols specific to
+ * the provided port_clock (aka link clock) corresponding to this delay
+ * time, i.e. so that
+ *
+ * number_of_symbols * duration_of_one_symbol >= 100 ns
+ *
+ * The delay must be applied only on TypeC DP outputs, for everything else
+ * the delay must be set to 0.
+ *
+ * Return the number of link symbols per 100 ns:
+ * port_clock (10 kHz) -> bits / 100 us
+ * / symbol_size -> symbols / 100 us
+ * / 1000 -> symbols / 100 ns
+ */
+ return DIV_ROUND_UP(port_clock, intel_dp_link_symbol_size(port_clock) * 1000);
+}
+
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -356,12 +371,18 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
+
+ if (IS_DISPLAY_VER(display, 11, 13) && intel_encoder_is_tc(encoder)) {
+ int delay = dp_phy_lane_stagger_delay(crtc_state->port_clock);
+
+ intel_dp->DP |= DDI_BUF_LANE_STAGGER_DELAY(delay);
+ }
}
-static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
+static int icl_calc_tbt_pll_link(struct intel_display *display,
enum port port)
{
- u32 val = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+ u32 val = intel_de_read(display, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
switch (val) {
case DDI_CLK_SEL_NONE:
@@ -703,19 +724,20 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
enum transcoder cpu_transcoder,
bool enable, u32 hdcp_mask)
{
+ struct intel_display *display = to_intel_display(intel_encoder);
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
intel_wakeref_t wakeref;
int ret = 0;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
intel_encoder->power_domain);
if (drm_WARN_ON(dev, !wakeref))
return -ENXIO;
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
hdcp_mask, enable ? hdcp_mask : 0);
- intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+ intel_display_power_put(display, intel_encoder->power_domain, wakeref);
return ret;
}
@@ -732,7 +754,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
u32 ddi_mode;
bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
encoder->power_domain);
if (!wakeref)
return false;
@@ -773,7 +795,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
}
out:
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return ret;
}
@@ -792,7 +814,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
*pipe_mask = 0;
*is_dp_mst = false;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
encoder->power_domain);
if (!wakeref)
return;
@@ -829,7 +851,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
u32 port_mask, ddi_select, ddi_mode;
intel_wakeref_t trans_wakeref;
- trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ trans_wakeref = intel_display_power_get_if_enabled(display,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!trans_wakeref)
continue;
@@ -844,7 +866,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
tmp = intel_de_read(dev_priv,
TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
- intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
+ intel_display_power_put(display, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
trans_wakeref);
if ((tmp & port_mask) != ddi_select)
@@ -910,7 +932,7 @@ out:
encoder->base.base.id, encoder->base.name, tmp);
}
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -933,7 +955,7 @@ static enum intel_display_power_domain
intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
/*
* ICL+ HW requires corresponding AUX IOs to be powered up for PSR with
@@ -949,8 +971,8 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
* extra wells.
*/
if (intel_psr_needs_aux_io_power(&dig_port->base, crtc_state))
- return intel_display_power_aux_io_domain(i915, dig_port->aux_ch);
- else if (DISPLAY_VER(i915) < 14 &&
+ return intel_display_power_aux_io_domain(display, dig_port->aux_ch);
+ else if (DISPLAY_VER(display) < 14 &&
(intel_crtc_has_dp_encoder(crtc_state) ||
intel_encoder_is_tc(&dig_port->base)))
return intel_aux_power_domain(dig_port);
@@ -962,23 +984,23 @@ static void
main_link_aux_power_domain_get(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
enum intel_display_power_domain domain =
intel_ddi_main_link_aux_domain(dig_port, crtc_state);
- drm_WARN_ON(&i915->drm, dig_port->aux_wakeref);
+ drm_WARN_ON(display->drm, dig_port->aux_wakeref);
if (domain == POWER_DOMAIN_INVALID)
return;
- dig_port->aux_wakeref = intel_display_power_get(i915, domain);
+ dig_port->aux_wakeref = intel_display_power_get(display, domain);
}
static void
main_link_aux_power_domain_put(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
enum intel_display_power_domain domain =
intel_ddi_main_link_aux_domain(dig_port, crtc_state);
intel_wakeref_t wf;
@@ -987,13 +1009,13 @@ main_link_aux_power_domain_put(struct intel_digital_port *dig_port,
if (!wf)
return;
- intel_display_power_put(i915, domain, wf);
+ intel_display_power_put(display, domain, wf);
}
static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port;
/*
@@ -1001,15 +1023,15 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
* happen since fake-MST encoders don't set their get_power_domains()
* hook.
*/
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
return;
dig_port = enc_to_dig_port(encoder);
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
- dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
@@ -1171,7 +1193,8 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
/* Set PORT_TX_DW5 */
val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
- TAP2_DISABLE | TAP3_DISABLE);
+ COEFF_POLARITY | CURSOR_PROGRAM |
+ TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
@@ -1365,7 +1388,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1374,17 +1397,17 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
return;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
for (ln = 0; ln < 2; ln++) {
int level;
- intel_dkl_phy_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port, ln), 0);
+ intel_dkl_phy_write(display, DKL_TX_PMD_LANE_SUS(tc_port, ln), 0);
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port, ln),
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL0(tc_port, ln),
DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK,
@@ -1394,7 +1417,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port, ln),
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL1(tc_port, ln),
DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK,
@@ -1402,10 +1425,10 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
- intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port, ln),
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln),
DKL_TX_DP20BITMODE, 0);
- if (IS_ALDERLAKE_P(dev_priv)) {
+ if (display->platform.alderlake_p) {
u32 val;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -1421,7 +1444,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0);
}
- intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port, ln),
+ intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln),
DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK |
DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK,
val);
@@ -1549,14 +1572,14 @@ static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t
}
static struct intel_shared_dpll *
-_icl_ddi_get_pll(struct drm_i915_private *i915, i915_reg_t reg,
+_icl_ddi_get_pll(struct intel_display *display, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel_shift)
{
enum intel_dpll_id id;
- id = (intel_de_read(i915, reg) & clk_sel_mask) >> clk_sel_shift;
+ id = (intel_de_read(display, reg) & clk_sel_mask) >> clk_sel_shift;
- return intel_get_shared_dpll_by_id(i915, id);
+ return intel_get_shared_dpll_by_id(display, id);
}
static void adls_ddi_enable_clock(struct intel_encoder *encoder,
@@ -1595,10 +1618,10 @@ static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy),
+ return _icl_ddi_get_pll(display, ADLS_DPCLKA_CFGCR(phy),
ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy));
}
@@ -1639,10 +1662,10 @@ static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_get_pll(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
}
@@ -1692,12 +1715,12 @@ static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
enum intel_dpll_id id;
u32 val;
- val = intel_de_read(i915, DG1_DPCLKA_CFGCR0(phy));
+ val = intel_de_read(display, DG1_DPCLKA_CFGCR0(phy));
val &= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
val >>= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
id = val;
@@ -1710,7 +1733,7 @@ static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
if (phy >= PHY_C)
id += DPLL_ID_DG1_DPLL2;
- return intel_get_shared_dpll_by_id(i915, id);
+ return intel_get_shared_dpll_by_id(display, id);
}
static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
@@ -1749,10 +1772,10 @@ static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_get_pll(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
}
@@ -1857,13 +1880,13 @@ static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
enum intel_dpll_id id;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
switch (tmp & DDI_CLK_SEL_MASK) {
case DDI_CLK_SEL_TBT_162:
@@ -1882,12 +1905,12 @@ static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encode
return NULL;
}
- return intel_get_shared_dpll_by_id(i915, id);
+ return intel_get_shared_dpll_by_id(display, id);
}
static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder->base.dev);
enum intel_dpll_id id;
switch (encoder->port) {
@@ -1905,7 +1928,7 @@ static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
return NULL;
}
- return intel_get_shared_dpll_by_id(i915, id);
+ return intel_get_shared_dpll_by_id(display, id);
}
static void skl_ddi_enable_clock(struct intel_encoder *encoder,
@@ -1956,12 +1979,12 @@ static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum intel_dpll_id id;
u32 tmp;
- tmp = intel_de_read(i915, DPLL_CTRL2);
+ tmp = intel_de_read(display, DPLL_CTRL2);
/*
* FIXME Not sure if the override affects both
@@ -1973,7 +1996,7 @@ static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
id = (tmp & DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
- return intel_get_shared_dpll_by_id(i915, id);
+ return intel_get_shared_dpll_by_id(display, id);
}
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
@@ -2007,12 +2030,12 @@ bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder)
static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum intel_dpll_id id;
u32 tmp;
- tmp = intel_de_read(i915, PORT_CLK_SEL(port));
+ tmp = intel_de_read(display, PORT_CLK_SEL(port));
switch (tmp & PORT_CLK_SEL_MASK) {
case PORT_CLK_SEL_WRPLL1:
@@ -2040,7 +2063,7 @@ static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
return NULL;
}
- return intel_get_shared_dpll_by_id(i915, id);
+ return intel_get_shared_dpll_by_id(display, id);
}
void intel_ddi_enable_clock(struct intel_encoder *encoder,
@@ -2120,27 +2143,37 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
}
static void
+tgl_dkl_phy_check_and_rewrite(struct intel_display *display,
+ enum tc_port tc_port, u32 ln0, u32 ln1)
+{
+ if (ln0 != intel_dkl_phy_read(display, DKL_DP_MODE(tc_port, 0)))
+ intel_dkl_phy_write(display, DKL_DP_MODE(tc_port, 0), ln0);
+ if (ln1 != intel_dkl_phy_read(display, DKL_DP_MODE(tc_port, 1)))
+ intel_dkl_phy_write(display, DKL_DP_MODE(tc_port, 1), ln1);
+}
+
+static void
icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
u32 ln0, ln1, pin_assignment;
u8 width;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return;
if (!intel_encoder_is_tc(&dig_port->base) ||
intel_tc_port_in_tbt_alt_mode(dig_port))
return;
- if (DISPLAY_VER(dev_priv) >= 12) {
- ln0 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port, 0));
- ln1 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port, 1));
+ if (DISPLAY_VER(display) >= 12) {
+ ln0 = intel_dkl_phy_read(display, DKL_DP_MODE(tc_port, 0));
+ ln1 = intel_dkl_phy_read(display, DKL_DP_MODE(tc_port, 1));
} else {
- ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port));
- ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port));
+ ln0 = intel_de_read(display, MG_DP_MODE(0, tc_port));
+ ln1 = intel_de_read(display, MG_DP_MODE(1, tc_port));
}
ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
@@ -2152,7 +2185,7 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
switch (pin_assignment) {
case 0x0:
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
!intel_tc_port_in_legacy_mode(dig_port));
if (width == 1) {
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
@@ -2197,12 +2230,16 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
MISSING_CASE(pin_assignment);
}
- if (DISPLAY_VER(dev_priv) >= 12) {
- intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port, 0), ln0);
- intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port, 1), ln1);
+ if (DISPLAY_VER(display) >= 12) {
+ intel_dkl_phy_write(display, DKL_DP_MODE(tc_port, 0), ln0);
+ intel_dkl_phy_write(display, DKL_DP_MODE(tc_port, 1), ln1);
+ /* WA_14018221282 */
+ if (IS_DISPLAY_VER(display, 12, 13))
+ tgl_dkl_phy_check_and_rewrite(display, tc_port, ln0, ln1);
+
} else {
- intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0);
- intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1);
+ intel_de_write(display, MG_DP_MODE(0, tc_port), ln0);
+ intel_de_write(display, MG_DP_MODE(1, tc_port), ln1);
}
}
@@ -2421,13 +2458,13 @@ static void intel_ddi_disable_fec(struct intel_encoder *encoder,
static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
if (intel_encoder_is_combo(encoder)) {
enum phy phy = intel_encoder_to_phy(encoder);
- intel_combo_phy_power_up_lanes(i915, phy, false,
+ intel_combo_phy_power_up_lanes(display, phy, false,
crtc_state->lane_count,
dig_port->lane_reversal);
}
@@ -2509,23 +2546,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
OVERLAP_PIXELS_MASK, dss1);
}
-static u8 mtl_get_port_width(u8 lane_count)
-{
- switch (lane_count) {
- case 1:
- return 0;
- case 2:
- return 1;
- case 3:
- return 4;
- case 4:
- return 3;
- default:
- MISSING_CASE(lane_count);
- return 4;
- }
-}
-
static void
mtl_ddi_enable_d2d(struct intel_encoder *encoder)
{
@@ -2534,6 +2554,9 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
i915_reg_t reg;
u32 set_bits, wait_bits;
+ if (DISPLAY_VER(dev_priv) < 14)
+ return;
+
if (DISPLAY_VER(dev_priv) >= 20) {
reg = DDI_BUF_CTL(port);
set_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
@@ -2559,7 +2582,7 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
enum port port = encoder->port;
u32 val = 0;
- val |= XELPDP_PORT_WIDTH(mtl_get_port_width(crtc_state->lane_count));
+ val |= XELPDP_PORT_WIDTH(crtc_state->lane_count);
if (intel_dp_is_uhbr(crtc_state))
val |= XELPDP_PORT_BUF_PORT_DATA_40BIT;
@@ -2593,6 +2616,7 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ bool transparent_mode;
int ret;
intel_dp_set_link_params(intel_dp,
@@ -2644,6 +2668,9 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+ transparent_mode = intel_dp_lttpr_transparent_mode_enabled(intel_dp);
+ drm_dp_lttpr_wake_timeout_setup(&intel_dp->aux, transparent_mode);
+
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
if (!is_mst)
intel_dp_sink_enable_decompression(state,
@@ -2705,6 +2732,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -2751,7 +2779,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
- dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
@@ -2852,6 +2880,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -2880,7 +2909,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
- dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
@@ -2926,8 +2955,7 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state);
/* Panel replay has to be enabled in sink dpcd before link training. */
- if (crtc_state->has_panel_replay)
- intel_psr_enable_sink(enc_to_intel_dp(encoder), crtc_state);
+ intel_psr_panel_replay_enable_sink(enc_to_intel_dp(encoder));
if (DISPLAY_VER(display) >= 14)
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -2948,6 +2976,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -2956,7 +2985,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
intel_ddi_enable_clock(encoder, crtc_state);
drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
- dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -2984,20 +3013,21 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
* - crtc_state will be the state of the first stream to be activated on this
* port, and it may not be the same stream that will be deactivated last, but
* each stream should have a state that is identical when it comes to the DP
- * link parameteres
+ * link parameters.
*/
static void intel_ddi_pre_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
intel_ddi_pre_enable_hdmi(state, encoder, crtc_state,
@@ -3010,7 +3040,7 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
/* FIXME precompute everything properly */
/* FIXME how do we turn infoframes off again? */
- if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp))
+ if (intel_lspcon_active(dig_port) && intel_dp_has_hdmi_sink(&dig_port->dp))
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
crtc_state, conn_state);
@@ -3018,13 +3048,16 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
}
static void
-mtl_ddi_disable_d2d_link(struct intel_encoder *encoder)
+mtl_ddi_disable_d2d(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
i915_reg_t reg;
u32 clr_bits, wait_bits;
+ if (DISPLAY_VER(dev_priv) < 14)
+ return;
+
if (DISPLAY_VER(dev_priv) >= 20) {
reg = DDI_BUF_CTL(port);
clr_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
@@ -3041,71 +3074,40 @@ mtl_ddi_disable_d2d_link(struct intel_encoder *encoder)
port_name(port));
}
-static void mtl_disable_ddi_buf(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void intel_ddi_buf_enable(struct intel_encoder *encoder, u32 buf_ctl)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- u32 val;
- /* 3.b Clear DDI_CTL_DE Enable to 0. */
- val = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- if (val & DDI_BUF_CTL_ENABLE) {
- val &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), val);
-
- /* 3.c Poll for PORT_BUF_CTL Idle Status == 1, timeout after 100us */
- mtl_wait_ddi_buf_idle(dev_priv, port);
- }
-
- /* 3.d Disable D2D Link */
- mtl_ddi_disable_d2d_link(encoder);
+ intel_de_write(display, DDI_BUF_CTL(port), buf_ctl | DDI_BUF_CTL_ENABLE);
+ intel_de_posting_read(display, DDI_BUF_CTL(port));
- /* 3.e Disable DP_TP_CTL */
- if (intel_crtc_has_dp_encoder(crtc_state)) {
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
- DP_TP_CTL_ENABLE, 0);
- }
+ intel_wait_ddi_buf_active(encoder);
}
-static void disable_ddi_buf(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void intel_ddi_buf_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- bool wait = false;
- u32 val;
- val = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- if (val & DDI_BUF_CTL_ENABLE) {
- val &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), val);
- wait = true;
- }
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
+
+ if (DISPLAY_VER(display) >= 14)
+ intel_wait_ddi_buf_idle(display, port);
+
+ mtl_ddi_disable_d2d(encoder);
- if (intel_crtc_has_dp_encoder(crtc_state))
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_ENABLE, 0);
+ }
intel_ddi_disable_fec(encoder, crtc_state);
- if (wait)
- intel_wait_ddi_buf_idle(dev_priv, port);
-}
-
-static void intel_disable_ddi_buf(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (DISPLAY_VER(dev_priv) >= 14) {
- mtl_disable_ddi_buf(encoder, crtc_state);
-
- /* 3.f Disable DP_TP_CTL FEC Enable if it is needed */
- intel_ddi_disable_fec(encoder, crtc_state);
- } else {
- disable_ddi_buf(encoder, crtc_state);
- }
+ if (DISPLAY_VER(display) < 14)
+ intel_wait_ddi_buf_idle(display, port);
intel_ddi_wait_for_fec_status(encoder, crtc_state, false);
}
@@ -3115,6 +3117,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
@@ -3146,7 +3149,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_transcoder_clock(old_crtc_state);
}
- intel_disable_ddi_buf(encoder, old_crtc_state);
+ intel_ddi_buf_disable(encoder, old_crtc_state);
intel_dp_sink_set_fec_ready(intel_dp, old_crtc_state, false);
@@ -3166,7 +3169,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref);
if (wakeref)
- intel_display_power_put(dev_priv,
+ intel_display_power_put(display,
dig_port->ddi_io_power_domain,
wakeref);
@@ -3183,6 +3186,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
@@ -3194,14 +3198,14 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) < 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
- intel_disable_ddi_buf(encoder, old_crtc_state);
+ intel_ddi_buf_disable(encoder, old_crtc_state);
if (DISPLAY_VER(dev_priv) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref);
if (wakeref)
- intel_display_power_put(dev_priv,
+ intel_display_power_put(display,
dig_port->ddi_io_power_domain,
wakeref);
@@ -3284,7 +3288,7 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
* be deactivated on this port, and it may not be the same
* stream that was activated last, but each stream
* should have a state that is identical when it comes to
- * the DP link parameteres
+ * the DP link parameters
*/
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
@@ -3366,7 +3370,7 @@ static void intel_ddi_enable_dp(struct intel_atomic_state *state,
drm_connector_update_privacy_screen(conn_state);
intel_edp_backlight_on(crtc_state, conn_state);
- if (!dig_port->lspcon.active || intel_dp_has_hdmi_sink(&dig_port->dp))
+ if (!intel_lspcon_active(dig_port) || intel_dp_has_hdmi_sink(&dig_port->dp))
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
trans_port_sync_stop_link_train(state, encoder, crtc_state);
@@ -3401,7 +3405,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
- u32 buf_ctl;
+ u32 buf_ctl = 0;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
@@ -3414,8 +3418,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state);
/* e. Enable D2D Link for C10/C20 Phy */
- if (DISPLAY_VER(dev_priv) >= 14)
- mtl_ddi_enable_d2d(encoder);
+ mtl_ddi_enable_d2d(encoder);
encoder->set_signal_levels(encoder, crtc_state);
@@ -3467,18 +3470,15 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
* is filled with lane count, already set in the crtc_state.
* The same is required to be filled in PORT_BUF_CTL for C10/20 Phy.
*/
- buf_ctl = DDI_BUF_CTL_ENABLE;
-
if (dig_port->lane_reversal)
buf_ctl |= DDI_BUF_PORT_REVERSAL;
if (dig_port->ddi_a_4_lanes)
buf_ctl |= DDI_A_4_LANES;
if (DISPLAY_VER(dev_priv) >= 14) {
- u8 lane_count = mtl_get_port_width(crtc_state->lane_count);
u32 port_buf = 0;
- port_buf |= XELPDP_PORT_WIDTH(lane_count);
+ port_buf |= XELPDP_PORT_WIDTH(crtc_state->lane_count);
if (dig_port->lane_reversal)
port_buf |= XELPDP_PORT_REVERSAL;
@@ -3495,9 +3495,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
- intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
-
- intel_wait_ddi_buf_active(encoder);
+ intel_ddi_buf_enable(encoder, buf_ctl);
}
static void intel_ddi_enable(struct intel_atomic_state *state,
@@ -3706,12 +3704,13 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
int ln;
for (ln = 0; ln < 2; ln++)
- intel_dkl_phy_rmw(i915, DKL_PCS_DW5(tc_port, ln), DKL_PCS_DW5_CORE_SOFTRESET, 0);
+ intel_dkl_phy_rmw(display, DKL_PCS_DW5(tc_port, ln),
+ DKL_PCS_DW5_CORE_SOFTRESET, 0);
}
static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
@@ -3720,7 +3719,6 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(crtc_state);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
- enum port port = encoder->port;
u32 dp_tp_ctl;
/*
@@ -3728,8 +3726,8 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
* necessary disable and enable port
*/
dp_tp_ctl = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
- if (dp_tp_ctl & DP_TP_CTL_ENABLE)
- mtl_disable_ddi_buf(encoder, crtc_state);
+
+ drm_WARN_ON(display->drm, dp_tp_ctl & DP_TP_CTL_ENABLE);
/* 6.d Configure and enable DP_TP_CTL with link training pattern 1 selected */
dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
@@ -3754,44 +3752,25 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
mtl_port_buf_ctl_program(encoder, crtc_state);
/* 6.i Configure and enable DDI_CTL_DE to start sending valid data to port slice */
- intel_dp->DP |= DDI_BUF_CTL_ENABLE;
if (DISPLAY_VER(display) >= 20)
intel_dp->DP |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- intel_de_write(display, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(display, DDI_BUF_CTL(port));
-
- /* 6.j Poll for PORT_BUF_CTL Idle Status == 0, timeout after 100 us */
- intel_wait_ddi_buf_active(encoder);
+ intel_ddi_buf_enable(encoder, intel_dp->DP);
+ intel_dp->DP |= DDI_BUF_CTL_ENABLE;
}
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
- u32 dp_tp_ctl, ddi_buf_ctl;
- bool wait = false;
+ u32 dp_tp_ctl;
dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- if (dp_tp_ctl & DP_TP_CTL_ENABLE) {
- ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) {
- intel_de_write(dev_priv, DDI_BUF_CTL(port),
- ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
- wait = true;
- }
-
- dp_tp_ctl &= ~DP_TP_CTL_ENABLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
-
- if (wait)
- intel_wait_ddi_buf_idle(dev_priv, port);
- }
+ drm_WARN_ON(display->drm, dp_tp_ctl & DP_TP_CTL_ENABLE);
dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) ||
@@ -3809,11 +3788,8 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
(intel_tc_port_in_dp_alt_mode(dig_port) || intel_tc_port_in_legacy_mode(dig_port)))
adlp_tbt_to_dp_alt_switch_wa(encoder);
+ intel_ddi_buf_enable(encoder, intel_dp->DP);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
-
- intel_wait_ddi_buf_active(encoder);
}
static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
@@ -3878,10 +3854,12 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
+ struct intel_display *display = &dev_priv->display;
+
if (cpu_transcoder == TRANSCODER_EDP)
return false;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO_MMIO))
+ if (!intel_display_power_is_enabled(display, POWER_DOMAIN_AUDIO_MMIO))
return false;
return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
@@ -3957,6 +3935,7 @@ static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *de
static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
@@ -3970,7 +3949,7 @@ static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
intel_wakeref_t trans_wakeref;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ trans_wakeref = intel_display_power_get_if_enabled(display,
power_domain);
if (!trans_wakeref)
@@ -3980,7 +3959,7 @@ static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
crtc_state->cpu_transcoder)
crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
- intel_display_power_put(dev_priv, power_domain, trans_wakeref);
+ intel_display_power_put(display, power_domain, trans_wakeref);
}
drm_WARN_ON(&dev_priv->drm,
@@ -4067,7 +4046,7 @@ static void intel_ddi_read_func_ctl_dp_sst(struct intel_encoder *encoder,
intel_de_read(display,
dp_tp_ctl_reg(encoder, crtc_state)) & DP_TP_CTL_FEC_ENABLE;
- if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp))
+ if (intel_lspcon_active(dig_port) && intel_dp_has_hdmi_sink(&dig_port->dp))
crtc_state->infoframes.enable |=
intel_lspcon_infoframes_enabled(encoder, crtc_state);
else
@@ -4229,21 +4208,21 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct intel_shared_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[port_dpll_id];
bool pll_active;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
port_dpll->pll = pll;
- pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state);
- drm_WARN_ON(&i915->drm, !pll_active);
+ pll_active = intel_dpll_get_hw_state(display, pll, &port_dpll->hw_state);
+ drm_WARN_ON(display->drm, !pll_active);
icl_set_active_port_dpll(crtc_state, port_dpll_id);
- crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
+ crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->shared_dpll,
&crtc_state->dpll_hw_state);
}
@@ -4332,12 +4311,12 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct intel_shared_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum icl_port_dpll_id port_dpll_id;
struct icl_port_dpll *port_dpll;
bool pll_active;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
if (icl_ddi_tc_pll_is_tbt(pll))
@@ -4348,15 +4327,15 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
port_dpll = &crtc_state->icl_port_dplls[port_dpll_id];
port_dpll->pll = pll;
- pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state);
- drm_WARN_ON(&i915->drm, !pll_active);
+ pll_active = intel_dpll_get_hw_state(display, pll, &port_dpll->hw_state);
+ drm_WARN_ON(display->drm, !pll_active);
icl_set_active_port_dpll(crtc_state, port_dpll_id);
if (icl_ddi_tc_pll_is_tbt(crtc_state->shared_dpll))
- crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port);
+ crtc_state->port_clock = icl_calc_tbt_pll_link(display, encoder->port);
else
- crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
+ crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->shared_dpll,
&crtc_state->dpll_hw_state);
}
@@ -4595,16 +4574,16 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->dev);
+ struct intel_display *display = to_intel_display(encoder->dev);
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
intel_dp_encoder_flush_work(encoder);
if (intel_encoder_is_tc(&dig_port->base))
intel_tc_port_cleanup(dig_port);
- intel_display_power_flush_work(i915);
+ intel_display_power_flush_work(display);
drm_encoder_cleanup(encoder);
- kfree(dig_port->hdcp_port_data.streams);
+ kfree(dig_port->hdcp.port_data.streams);
kfree(dig_port);
}
@@ -4682,6 +4661,7 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
static int intel_hdmi_reset_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
struct intel_connector *connector = hdmi->attached_connector;
@@ -4748,7 +4728,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
* would be perfectly happy if were to just reconfigure
* the SCDC settings on the fly.
*/
- return intel_modeset_commit_pipes(dev_priv, BIT(crtc->pipe), ctx);
+ return intel_modeset_commit_pipes(display, BIT(crtc->pipe), ctx);
}
static void intel_ddi_link_check(struct intel_encoder *encoder)
@@ -5104,7 +5084,7 @@ void intel_ddi_init(struct intel_display *display,
return;
}
- if (!assert_port_valid(dev_priv, port))
+ if (!assert_port_valid(display, port))
return;
if (port_in_use(dev_priv, port)) {
@@ -5122,7 +5102,7 @@ void intel_ddi_init(struct intel_display *display,
return;
}
- phy = intel_port_to_phy(dev_priv, port);
+ phy = intel_port_to_phy(display, port);
/*
* On platforms with HTI (aka HDPORT), if it's enabled at boot it may
@@ -5159,7 +5139,7 @@ void intel_ddi_init(struct intel_display *display,
return;
}
- if (intel_phy_is_snps(dev_priv, phy) &&
+ if (intel_phy_is_snps(display, phy) &&
dev_priv->display.snps.phy_failed_calibration & BIT(phy)) {
drm_dbg_kms(&dev_priv->drm,
"SNPS PHY %c failed to calibrate, proceeding anyway\n",
@@ -5182,7 +5162,7 @@ void intel_ddi_init(struct intel_display *display,
port_name(port - PORT_D_XELPD + PORT_D),
phy_name(phy));
} else if (DISPLAY_VER(dev_priv) >= 12) {
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ enum tc_port tc_port = intel_port_to_tc(display, port);
drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
@@ -5192,7 +5172,7 @@ void intel_ddi_init(struct intel_display *display,
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
} else if (DISPLAY_VER(dev_priv) >= 11) {
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ enum tc_port tc_port = intel_port_to_tc(display, port);
drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
@@ -5209,8 +5189,8 @@ void intel_ddi_init(struct intel_display *display,
intel_encoder_link_check_init(encoder, intel_ddi_link_check);
- mutex_init(&dig_port->hdcp_mutex);
- dig_port->num_hdcp_streams = 0;
+ mutex_init(&dig_port->hdcp.mutex);
+ dig_port->hdcp.num_streams = 0;
encoder->hotplug = intel_ddi_hotplug;
encoder->compute_output_type = intel_ddi_compute_output_type;
@@ -5233,7 +5213,7 @@ void intel_ddi_init(struct intel_display *display,
encoder->get_power_domains = intel_ddi_get_power_domains;
encoder->type = INTEL_OUTPUT_DDI;
- encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
+ encoder->power_domain = intel_display_power_ddi_lanes_domain(display, port);
encoder->port = port;
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
@@ -5340,7 +5320,7 @@ void intel_ddi_init(struct intel_display *display,
else if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
encoder->hpd_pin = skl_hpd_pin(dev_priv, port);
else
- encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ encoder->hpd_pin = intel_hpd_pin_default(port);
ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
@@ -5384,7 +5364,7 @@ void intel_ddi_init(struct intel_display *display,
}
drm_WARN_ON(&dev_priv->drm, port > PORT_I);
- dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
+ dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(display, port);
if (DISPLAY_VER(dev_priv) >= 11) {
if (intel_encoder_is_tc(encoder))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 2faadd1441e2..353eb04079e9 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -9,7 +9,6 @@
#include "i915_reg_defs.h"
struct drm_connector_state;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_bios_encoder_data;
struct intel_connector;
@@ -54,8 +53,7 @@ void hsw_ddi_get_config(struct intel_encoder *encoder,
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
- enum port port);
+void intel_wait_ddi_buf_idle(struct intel_display *display, enum port port);
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 9389b295036e..a238be5bc455 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -3,13 +3,13 @@
* Copyright © 2020 Intel Corporation
*/
-#include "i915_drv.h"
+#include "i915_utils.h"
+#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
-#include "intel_cx0_phy.h"
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
@@ -1407,10 +1407,10 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
if (crtc_state->port_clock > 270000) {
- if (IS_TIGERLAKE_UY(dev_priv)) {
+ if (display->platform.tigerlake_uy) {
return intel_get_buf_trans(&tgl_uy_combo_phy_trans_dp_hbr2,
n_entries);
} else {
@@ -1709,59 +1709,67 @@ mtl_get_c20_buf_trans(struct intel_encoder *encoder,
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
if (intel_encoder_is_c10phy(encoder))
encoder->get_buf_trans = mtl_get_c10_buf_trans;
else
encoder->get_buf_trans = mtl_get_c20_buf_trans;
- } else if (IS_DG2(i915)) {
+ } else if (display->platform.dg2) {
encoder->get_buf_trans = dg2_get_snps_buf_trans;
- } else if (IS_ALDERLAKE_P(i915)) {
+ } else if (display->platform.alderlake_p) {
if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = adlp_get_combo_buf_trans;
else
encoder->get_buf_trans = adlp_get_dkl_buf_trans;
- } else if (IS_ALDERLAKE_S(i915)) {
+ } else if (display->platform.alderlake_s) {
encoder->get_buf_trans = adls_get_combo_buf_trans;
- } else if (IS_ROCKETLAKE(i915)) {
+ } else if (display->platform.rocketlake) {
encoder->get_buf_trans = rkl_get_combo_buf_trans;
- } else if (IS_DG1(i915)) {
+ } else if (display->platform.dg1) {
encoder->get_buf_trans = dg1_get_combo_buf_trans;
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = tgl_get_combo_buf_trans;
else
encoder->get_buf_trans = tgl_get_dkl_buf_trans;
- } else if (DISPLAY_VER(i915) == 11) {
- if (IS_JASPERLAKE(i915))
+ } else if (DISPLAY_VER(display) == 11) {
+ if (display->platform.jasperlake)
encoder->get_buf_trans = jsl_get_combo_buf_trans;
- else if (IS_ELKHARTLAKE(i915))
+ else if (display->platform.elkhartlake)
encoder->get_buf_trans = ehl_get_combo_buf_trans;
else if (intel_encoder_is_combo(encoder))
encoder->get_buf_trans = icl_get_combo_buf_trans;
else
encoder->get_buf_trans = icl_get_mg_buf_trans;
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
encoder->get_buf_trans = bxt_get_buf_trans;
- } else if (IS_COMETLAKE_ULX(i915) || IS_COFFEELAKE_ULX(i915) || IS_KABYLAKE_ULX(i915)) {
+ } else if (display->platform.cometlake_ulx ||
+ display->platform.coffeelake_ulx ||
+ display->platform.kabylake_ulx) {
encoder->get_buf_trans = kbl_y_get_buf_trans;
- } else if (IS_COMETLAKE_ULT(i915) || IS_COFFEELAKE_ULT(i915) || IS_KABYLAKE_ULT(i915)) {
+ } else if (display->platform.cometlake_ult ||
+ display->platform.coffeelake_ult ||
+ display->platform.kabylake_ult) {
encoder->get_buf_trans = kbl_u_get_buf_trans;
- } else if (IS_COMETLAKE(i915) || IS_COFFEELAKE(i915) || IS_KABYLAKE(i915)) {
+ } else if (display->platform.cometlake ||
+ display->platform.coffeelake ||
+ display->platform.kabylake) {
encoder->get_buf_trans = kbl_get_buf_trans;
- } else if (IS_SKYLAKE_ULX(i915)) {
+ } else if (display->platform.skylake_ulx) {
encoder->get_buf_trans = skl_y_get_buf_trans;
- } else if (IS_SKYLAKE_ULT(i915)) {
+ } else if (display->platform.skylake_ult) {
encoder->get_buf_trans = skl_u_get_buf_trans;
- } else if (IS_SKYLAKE(i915)) {
+ } else if (display->platform.skylake) {
encoder->get_buf_trans = skl_get_buf_trans;
- } else if (IS_BROADWELL(i915)) {
+ } else if (display->platform.broadwell) {
encoder->get_buf_trans = bdw_get_buf_trans;
- } else if (IS_HASWELL(i915)) {
+ } else if (display->platform.haswell) {
encoder->get_buf_trans = hsw_get_buf_trans;
} else {
- MISSING_CASE(INTEL_INFO(i915)->platform);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+
+ MISSING_CASE(pdev->device);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
index 2133984a572b..29a190390192 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -8,7 +8,6 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_encoder;
struct intel_crtc_state;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 41128469f12a..3afb85fe8536 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -104,6 +104,7 @@
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
+#include "intel_pfit.h"
#include "intel_pipe_crc.h"
#include "intel_plane_initial.h"
#include "intel_pmdemand.h"
@@ -123,7 +124,6 @@
#include "intel_wm.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
-#include "skl_universal_plane_regs.h"
#include "skl_watermark.h"
#include "vlv_dpio_phy_regs.h"
#include "vlv_dsi.h"
@@ -182,16 +182,17 @@ int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
return hpll;
}
-void intel_update_czclk(struct drm_i915_private *dev_priv)
+void intel_update_czclk(struct intel_display *display)
{
- if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (!display->platform.valleyview && !display->platform.cherryview)
return;
dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
CCK_CZ_CLOCK_CONTROL);
- drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
- dev_priv->czclk_freq);
+ drm_dbg_kms(display->drm, "CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
}
static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
@@ -202,29 +203,29 @@ static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
/* WA Display #0827: Gen9:all */
static void
-skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
+skl_wa_827(struct intel_display *display, enum pipe pipe, bool enable)
{
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL(pipe),
DUPS1_GATING_DIS | DUPS2_GATING_DIS,
enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0);
}
/* Wa_2006604312:icl,ehl */
static void
-icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+icl_wa_scalerclkgating(struct intel_display *display, enum pipe pipe,
bool enable)
{
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL(pipe),
DPFR_GATING_DIS,
enable ? DPFR_GATING_DIS : 0);
}
/* Wa_1604331009:icl,jsl,ehl */
static void
-icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+icl_wa_cursorclkgating(struct intel_display *display, enum pipe pipe,
bool enable)
{
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL(pipe),
CURSOR_GATING_DIS,
enable ? CURSOR_GATING_DIS : 0);
}
@@ -404,41 +405,40 @@ struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state)
static void
intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(old_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
/* Wait for the Pipe State to go off */
- if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, cpu_transcoder),
+ if (intel_de_wait_for_clear(display, TRANSCONF(display, cpu_transcoder),
TRANSCONF_STATE_ENABLE, 100))
- drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
+ drm_WARN(display->drm, 1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
}
}
-void assert_transcoder(struct drm_i915_private *dev_priv,
+void assert_transcoder(struct intel_display *display,
enum transcoder cpu_transcoder, bool state)
{
- struct intel_display *display = &dev_priv->display;
bool cur_state;
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
/* we keep both pipes enabled on 830 */
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
state = true;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (wakeref) {
- u32 val = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder));
+ u32 val = intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder));
cur_state = !!(val & TRANSCONF_ENABLE);
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
} else {
cur_state = false;
}
@@ -468,57 +468,22 @@ static void assert_plane(struct intel_plane *plane, bool state)
static void assert_planes_disabled(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane)
assert_plane_disabled(plane);
}
-void vlv_wait_port_ready(struct intel_display *display,
- struct intel_digital_port *dig_port,
- unsigned int expected_mask)
-{
- u32 port_mask;
- i915_reg_t dpll_reg;
-
- switch (dig_port->base.port) {
- default:
- MISSING_CASE(dig_port->base.port);
- fallthrough;
- case PORT_B:
- port_mask = DPLL_PORTB_READY_MASK;
- dpll_reg = DPLL(display, 0);
- break;
- case PORT_C:
- port_mask = DPLL_PORTC_READY_MASK;
- dpll_reg = DPLL(display, 0);
- expected_mask <<= 4;
- break;
- case PORT_D:
- port_mask = DPLL_PORTD_READY_MASK;
- dpll_reg = DPIO_PHY_STATUS;
- break;
- }
-
- if (intel_de_wait(display, dpll_reg, port_mask, expected_mask, 1000))
- drm_WARN(display->drm, 1,
- "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
- dig_port->base.base.base.id, dig_port->base.base.name,
- intel_de_read(display, dpll_reg) & port_mask,
- expected_mask);
-}
-
void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
{
struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
u32 val;
- drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(display->drm, "enabling pipe %c\n", pipe_name(pipe));
assert_planes_disabled(crtc);
@@ -527,56 +492,56 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
* need the check.
*/
- if (HAS_GMCH(dev_priv)) {
+ if (HAS_GMCH(display)) {
if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
- assert_dsi_pll_enabled(dev_priv);
+ assert_dsi_pll_enabled(display);
else
- assert_pll_enabled(dev_priv, pipe);
+ assert_pll_enabled(display, pipe);
} else {
if (new_crtc_state->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
- assert_fdi_rx_pll_enabled(dev_priv,
+ assert_fdi_rx_pll_enabled(display,
intel_crtc_pch_transcoder(crtc));
- assert_fdi_tx_pll_enabled(dev_priv,
+ assert_fdi_tx_pll_enabled(display,
(enum pipe) cpu_transcoder);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
/* Wa_22012358565:adl-p */
- if (DISPLAY_VER(dev_priv) == 13)
- intel_de_rmw(dev_priv, PIPE_ARB_CTL(dev_priv, pipe),
+ if (DISPLAY_VER(display) == 13)
+ intel_de_rmw(display, PIPE_ARB_CTL(display, pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA;
u32 set = 0;
- if (DISPLAY_VER(dev_priv) == 14)
+ if (DISPLAY_VER(display) == 14)
set |= DP_FEC_BS_JITTER_WA;
intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
clear, set);
}
- val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
if (val & TRANSCONF_ENABLE) {
/* we keep both pipes enabled on 830 */
- drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
+ drm_WARN_ON(display->drm, !display->platform.i830);
return;
}
/* Wa_1409098942:adlp+ */
- if (DISPLAY_VER(dev_priv) >= 13 &&
+ if (DISPLAY_VER(display) >= 13 &&
new_crtc_state->dsc.compression_enable) {
val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK,
TRANSCONF_PIXEL_COUNT_SCALING_X4);
}
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder),
val | TRANSCONF_ENABLE);
- intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder));
/*
* Until the pipe starts PIPEDSL reads will return a stale value,
@@ -593,12 +558,11 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(old_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
u32 val;
- drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(display->drm, "disabling pipe %c\n", pipe_name(pipe));
/*
* Make sure planes won't keep trying to pump pixels to us,
@@ -606,7 +570,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
if ((val & TRANSCONF_ENABLE) == 0)
return;
@@ -618,17 +582,17 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
val &= ~TRANSCONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
- if (!IS_I830(dev_priv))
+ if (!display->platform.i830)
val &= ~TRANSCONF_ENABLE;
/* Wa_1409098942:adlp+ */
- if (DISPLAY_VER(dev_priv) >= 13 &&
+ if (DISPLAY_VER(display) >= 13 &&
old_crtc_state->dsc.compression_enable)
val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder), val);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
@@ -636,90 +600,14 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
intel_wait_for_pipe_off(old_crtc_state);
}
-unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
-{
- unsigned int size = 0;
- int i;
-
- for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
- size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
-
- return size;
-}
-
-unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
-{
- unsigned int size = 0;
- int i;
-
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
- unsigned int plane_size;
-
- if (rem_info->plane[i].linear)
- plane_size = rem_info->plane[i].size;
- else
- plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
-
- if (plane_size == 0)
- continue;
-
- if (rem_info->plane_alignment)
- size = ALIGN(size, rem_info->plane_alignment);
-
- size += plane_size;
- }
-
- return size;
-}
-
-bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
- return DISPLAY_VER(dev_priv) < 4 ||
- (plane->fbc && !plane_state->no_fbc_reason &&
- plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
-}
-
-/*
- * Convert the x/y offsets into a linear offset.
- * Only valid with 0/180 degree rotation, which is fine since linear
- * offset is only used with linear buffers on pre-hsw and tiled buffers
- * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
- */
-u32 intel_fb_xy_to_linear(int x, int y,
- const struct intel_plane_state *state,
- int color_plane)
-{
- const struct drm_framebuffer *fb = state->hw.fb;
- unsigned int cpp = fb->format->cpp[color_plane];
- unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
-
- return y * pitch + x * cpp;
-}
-
-/*
- * Add the x/y offsets derived from fb->offsets[] to the user
- * specified plane src x/y offsets. The resulting x/y offsets
- * specify the start of scanout from the beginning of the gtt mapping.
- */
-void intel_add_fb_offsets(int *x, int *y,
- const struct intel_plane_state *state,
- int color_plane)
-
-{
- *x += state->view.color_plane[color_plane].x;
- *y += state->view.color_plane[color_plane].y;
-}
-
-u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+u32 intel_plane_fb_max_stride(struct drm_device *drm,
u32 pixel_format, u64 modifier)
{
+ struct intel_display *display = to_intel_display(drm);
struct intel_crtc *crtc;
struct intel_plane *plane;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return 0;
/*
@@ -727,7 +615,7 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
* the highest stride limits of them all,
* if in case pipe A is disabled, use the first pipe from pipe_mask.
*/
- crtc = intel_first_crtc(dev_priv);
+ crtc = intel_first_crtc(display);
if (!crtc)
return 0;
@@ -753,7 +641,7 @@ void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_plane *plane;
/*
@@ -764,7 +652,7 @@ void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
crtc_state->enabled_planes = 0;
crtc_state->active_planes = 0;
- drm_for_each_plane_mask(plane, &dev_priv->drm,
+ drm_for_each_plane_mask(plane, display->drm,
crtc_state->uapi.plane_mask) {
crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
@@ -774,29 +662,28 @@ void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
plane->base.base.id, plane->base.name,
crtc->base.base.id, crtc->base.name);
+ intel_plane_set_invisible(crtc_state, plane_state);
intel_set_plane_visible(crtc_state, plane_state, false);
intel_plane_fixup_bitmasks(crtc_state);
- crtc_state->data_rate[plane->id] = 0;
- crtc_state->data_rate_y[plane->id] = 0;
- crtc_state->rel_data_rate[plane->id] = 0;
- crtc_state->rel_data_rate_y[plane->id] = 0;
- crtc_state->min_cdclk[plane->id] = 0;
+
+ skl_wm_plane_disable_noatomic(crtc, plane);
if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
hsw_ips_disable(crtc_state)) {
crtc_state->ips_enabled = false;
- intel_crtc_wait_for_next_vblank(crtc);
+ intel_plane_initial_vblank_wait(crtc);
}
/*
@@ -808,19 +695,19 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH(dev_priv) &&
+ if (HAS_GMCH(display) &&
intel_set_memory_cxsr(dev_priv, false))
- intel_crtc_wait_for_next_vblank(crtc);
+ intel_plane_initial_vblank_wait(crtc);
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
* So disable underrun reporting before all the planes get disabled.
*/
- if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+ if (DISPLAY_VER(display) == 2 && !crtc_state->active_planes)
+ intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, false);
intel_plane_disable_arm(NULL, plane, crtc_state);
- intel_crtc_wait_for_next_vblank(crtc);
+ intel_plane_initial_vblank_wait(crtc);
}
unsigned int
@@ -836,12 +723,12 @@ intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
+ tmp = intel_de_read(display, PIPE_CHICKEN(pipe));
/*
* Display WA #1153: icl
@@ -861,24 +748,24 @@ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
* Underrun recovery must always be disabled on display 13+.
* DG2 chicken bit meaning is inverted compared to other platforms.
*/
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
- else if ((DISPLAY_VER(dev_priv) >= 13) && (DISPLAY_VER(dev_priv) < 30))
+ else if ((DISPLAY_VER(display) >= 13) && (DISPLAY_VER(display) < 30))
tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
/* Wa_14010547955:dg2 */
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
tmp |= DG2_RENDER_CCSTAG_4_3_EN;
- intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
+ intel_de_write(display, PIPE_CHICKEN(pipe), tmp);
}
-bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
+bool intel_has_pending_fb_unpin(struct intel_display *display)
{
struct drm_crtc *crtc;
bool cleanup_done;
- drm_for_each_crtc(crtc, &dev_priv->drm) {
+ drm_for_each_crtc(crtc, display->drm) {
struct drm_crtc_commit *commit;
spin_lock(&crtc->commit_lock);
commit = list_first_entry_or_null(&crtc->commit_list,
@@ -930,36 +817,6 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
return encoder;
}
-static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
- enum pipe pipe = crtc->pipe;
- int width = drm_rect_width(dst);
- int height = drm_rect_height(dst);
- int x = dst->x1;
- int y = dst->y1;
-
- if (!crtc_state->pch_pfit.enabled)
- return;
-
- /* Force use of hard-coded filter coefficients
- * as some pre-programmed values are broken,
- * e.g. x201.
- */
- if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
- intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
- PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
- else
- intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
- PF_FILTER_MED_3x3);
- intel_de_write_fw(dev_priv, PF_WIN_POS(pipe),
- PF_WIN_XPOS(x) | PF_WIN_YPOS(y));
- intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe),
- PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height));
-}
-
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
{
if (crtc->overlay)
@@ -972,13 +829,13 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (!crtc_state->nv12_planes)
return false;
/* WA Display #0827: Gen9:all */
- if (DISPLAY_VER(dev_priv) == 9)
+ if (DISPLAY_VER(display) == 9)
return true;
return false;
@@ -986,10 +843,10 @@ static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/* Wa_2006604312:icl,ehl */
- if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
+ if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(display) == 11)
return true;
return false;
@@ -997,31 +854,31 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/* Wa_1604331009:icl,jsl,ehl */
if (is_hdr_mode(crtc_state) &&
crtc_state->active_planes & BIT(PLANE_CURSOR) &&
- DISPLAY_VER(dev_priv) == 11)
+ DISPLAY_VER(display) == 11)
return true;
return false;
}
-static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
+static void intel_async_flip_vtd_wa(struct intel_display *display,
enum pipe pipe, bool enable)
{
- if (DISPLAY_VER(i915) == 9) {
+ if (DISPLAY_VER(display) == 9) {
/*
- * "Plane N strech max must be programmed to 11b (x1)
+ * "Plane N stretch max must be programmed to 11b (x1)
* when Async flips are enabled on that plane."
*/
- intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ intel_de_rmw(display, CHICKEN_PIPESL_1(pipe),
SKL_PLANE1_STRETCH_MAX_MASK,
enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
} else {
/* Also needed on HSW/BDW albeit undocumented */
- intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ intel_de_rmw(display, CHICKEN_PIPESL_1(pipe),
HSW_PRI_STRETCH_MAX_MASK,
enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
}
@@ -1029,10 +886,12 @@ static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
- (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
+ (DISPLAY_VER(display) == 9 || display->platform.broadwell ||
+ display->platform.haswell);
}
static void intel_encoders_audio_enable(struct intel_atomic_state *state,
@@ -1181,6 +1040,7 @@ static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
static void intel_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -1199,19 +1059,19 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (needs_async_flip_vtd_wa(old_crtc_state) &&
!needs_async_flip_vtd_wa(new_crtc_state))
- intel_async_flip_vtd_wa(dev_priv, pipe, false);
+ intel_async_flip_vtd_wa(display, pipe, false);
if (needs_nv12_wa(old_crtc_state) &&
!needs_nv12_wa(new_crtc_state))
- skl_wa_827(dev_priv, pipe, false);
+ skl_wa_827(display, pipe, false);
if (needs_scalerclk_wa(old_crtc_state) &&
!needs_scalerclk_wa(new_crtc_state))
- icl_wa_scalerclkgating(dev_priv, pipe, false);
+ icl_wa_scalerclkgating(display, pipe, false);
if (needs_cursorclk_wa(old_crtc_state) &&
!needs_cursorclk_wa(new_crtc_state))
- icl_wa_cursorclkgating(dev_priv, pipe, false);
+ icl_wa_cursorclkgating(display, pipe, false);
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_post_update(new_crtc_state);
@@ -1305,6 +1165,7 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
static void intel_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -1332,22 +1193,22 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
if (!needs_async_flip_vtd_wa(old_crtc_state) &&
needs_async_flip_vtd_wa(new_crtc_state))
- intel_async_flip_vtd_wa(dev_priv, pipe, true);
+ intel_async_flip_vtd_wa(display, pipe, true);
/* Display WA 827 */
if (!needs_nv12_wa(old_crtc_state) &&
needs_nv12_wa(new_crtc_state))
- skl_wa_827(dev_priv, pipe, true);
+ skl_wa_827(display, pipe, true);
/* Wa_2006604312:icl,ehl */
if (!needs_scalerclk_wa(old_crtc_state) &&
needs_scalerclk_wa(new_crtc_state))
- icl_wa_scalerclkgating(dev_priv, pipe, true);
+ icl_wa_scalerclkgating(display, pipe, true);
/* Wa_1604331009:icl,jsl,ehl */
if (!needs_cursorclk_wa(old_crtc_state) &&
needs_cursorclk_wa(new_crtc_state))
- icl_wa_cursorclkgating(dev_priv, pipe, true);
+ icl_wa_cursorclkgating(display, pipe, true);
/*
* Vblank time updates from the shadow to live plane control register
@@ -1358,7 +1219,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
+ if (HAS_GMCH(display) && old_crtc_state->hw.active &&
new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
intel_crtc_wait_for_next_vblank(crtc);
@@ -1369,7 +1230,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
- if (!HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
+ if (!HAS_GMCH(display) && old_crtc_state->hw.active &&
new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv))
intel_crtc_wait_for_next_vblank(crtc);
@@ -1405,8 +1266,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* chance of catching underruns with the intermediate watermarks
* vs. the old plane configuration.
*/
- if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ if (DISPLAY_VER(display) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
/*
* WA for platforms where async address update enable bit
@@ -1446,7 +1307,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
static void intel_encoders_update_prepare(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -1455,7 +1316,7 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state)
* Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
* TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
*/
- if (i915->display.dpll.mgr) {
+ if (display->dpll.mgr) {
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (intel_crtc_needs_modeset(new_crtc_state))
continue;
@@ -1645,12 +1506,13 @@ static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
static void ilk_crtc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ if (drm_WARN_ON(display->drm, crtc->active))
return;
/*
@@ -1663,8 +1525,8 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
*
* Spurious PCH underruns also occur during PCH enabling.
*/
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(display, pipe, false);
ilk_configure_cpu_transcoder(new_crtc_state);
@@ -1677,8 +1539,8 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
if (new_crtc_state->has_pch_encoder) {
ilk_pch_pre_enable(state, crtc);
} else {
- assert_fdi_tx_disabled(dev_priv, pipe);
- assert_fdi_rx_disabled(dev_priv, pipe);
+ assert_fdi_tx_disabled(display, pipe);
+ assert_fdi_rx_disabled(display, pipe);
}
ilk_pfit_enable(new_crtc_state);
@@ -1712,33 +1574,33 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_crtc_wait_for_next_vblank(crtc);
intel_crtc_wait_for_next_vblank(crtc);
}
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(display, pipe, true);
}
/* Display WA #1180: WaDisableScalarClockGating: glk */
static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- return DISPLAY_VER(i915) == 10 && crtc_state->pch_pfit.enabled;
+ return DISPLAY_VER(display) == 10 && crtc_state->pch_pfit.enabled;
}
static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
- intel_de_rmw(i915, CLKGATE_DIS_PSL(crtc->pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL(crtc->pipe),
mask, enable ? mask : 0);
}
static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
+ intel_de_write(display, WM_LINETIME(crtc->pipe),
HSW_LINETIME(crtc_state->linetime) |
HSW_IPS_LINETIME(crtc_state->ips_linetime));
}
@@ -1754,8 +1616,8 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (crtc_state->has_pch_encoder) {
@@ -1769,11 +1631,11 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
}
intel_set_transcoder_timings(crtc_state);
- if (HAS_VRR(dev_priv))
+ if (HAS_VRR(display))
intel_vrr_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
- intel_de_write(dev_priv, TRANS_MULT(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
crtc_state->pixel_multiplier - 1);
hsw_set_frame_start_delay(crtc_state);
@@ -1787,12 +1649,11 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
struct intel_crtc *pipe_crtc;
int i;
- if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ if (drm_WARN_ON(display->drm, crtc->active))
return;
for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i)
intel_dmc_enable_pipe(display, pipe_crtc->pipe);
@@ -1815,12 +1676,12 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_dsc_enable(pipe_crtc_state);
- if (HAS_UNCOMPRESSED_JOINER(dev_priv))
+ if (HAS_UNCOMPRESSED_JOINER(display))
intel_uncompressed_joiner_enable(pipe_crtc_state);
intel_set_pipe_src_size(pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ if (DISPLAY_VER(display) >= 9 || display->platform.broadwell)
bdw_set_pipe_misc(NULL, pipe_crtc_state);
}
@@ -1836,7 +1697,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (glk_need_scaler_clock_gating_wa(pipe_crtc_state))
glk_pipe_scaler_clock_gating_wa(pipe_crtc, true);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_pfit_enable(pipe_crtc_state);
else
ilk_pfit_enable(pipe_crtc_state);
@@ -1849,7 +1710,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
hsw_set_linetime_wm(pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
icl_set_pipe_chicken(pipe_crtc_state);
intel_initial_watermarks(state, pipe_crtc);
@@ -1872,7 +1733,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
* enabling, we need to change the workaround.
*/
hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe;
- if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
+ if (display->platform.haswell && hsw_workaround_pipe != INVALID_PIPE) {
struct intel_crtc *wa_crtc =
intel_crtc_for_pipe(display, hsw_workaround_pipe);
@@ -1882,28 +1743,12 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
}
}
-void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- /* To avoid upsetting the power well on haswell only disable the pfit if
- * it's in use. The hw state code will make sure we get this right. */
- if (!old_crtc_state->pch_pfit.enabled)
- return;
-
- intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
- intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
- intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
-}
-
static void ilk_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/*
@@ -1911,8 +1756,8 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
* pipe is already disabled, but FDI RX/TX is still enabled.
* Happens at least with VGA+HDMI cloning. Suppress them.
*/
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(display, pipe, false);
intel_encoders_disable(state, crtc);
@@ -1930,8 +1775,8 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
if (old_crtc_state->has_pch_encoder)
ilk_pch_post_disable(state, crtc);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(display, pipe, true);
intel_disable_shared_dpll(old_crtc_state);
}
@@ -1965,44 +1810,18 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_dmc_disable_pipe(display, pipe_crtc->pipe);
}
-static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!crtc_state->gmch_pfit.control)
- return;
-
- /*
- * The panel fitter should only be adjusted whilst the pipe is disabled,
- * according to register description and PRM.
- */
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)) & PFIT_ENABLE);
- assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
-
- intel_de_write(dev_priv, PFIT_PGM_RATIOS(dev_priv),
- crtc_state->gmch_pfit.pgm_ratios);
- intel_de_write(dev_priv, PFIT_CONTROL(dev_priv),
- crtc_state->gmch_pfit.control);
-
- /* Border color in case we don't scale up to the full screen. Black by
- * default, change to something else for debugging. */
- intel_de_write(dev_priv, BCLRPAT(dev_priv, crtc->pipe), 0);
-}
-
/* Prefer intel_encoder_is_combo() */
-bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
+bool intel_phy_is_combo(struct intel_display *display, enum phy phy)
{
if (phy == PHY_NONE)
return false;
- else if (IS_ALDERLAKE_S(dev_priv))
+ else if (display->platform.alderlake_s)
return phy <= PHY_E;
- else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ else if (display->platform.dg1 || display->platform.rocketlake)
return phy <= PHY_D;
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
return phy <= PHY_C;
- else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
+ else if (display->platform.alderlake_p || IS_DISPLAY_VER(display, 11, 12))
return phy <= PHY_B;
else
/*
@@ -2014,47 +1833,47 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
}
/* Prefer intel_encoder_is_tc() */
-bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
+bool intel_phy_is_tc(struct intel_display *display, enum phy phy)
{
/*
* Discrete GPU phy's are not attached to FIA's to support TC
* subsystem Legacy or non-legacy, and only support native DP/HDMI
*/
- if (IS_DGFX(dev_priv))
+ if (display->platform.dgfx)
return false;
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return phy >= PHY_F && phy <= PHY_I;
- else if (IS_TIGERLAKE(dev_priv))
+ else if (display->platform.tigerlake)
return phy >= PHY_D && phy <= PHY_I;
- else if (IS_ICELAKE(dev_priv))
+ else if (display->platform.icelake)
return phy >= PHY_C && phy <= PHY_F;
return false;
}
/* Prefer intel_encoder_is_snps() */
-bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
+bool intel_phy_is_snps(struct intel_display *display, enum phy phy)
{
/*
* For DG2, and for DG2 only, all four "combo" ports and the TC1 port
* (PHY E) use Synopsis PHYs. See intel_phy_is_tc().
*/
- return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E;
+ return display->platform.dg2 && phy > PHY_NONE && phy <= PHY_E;
}
/* Prefer intel_encoder_to_phy() */
-enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
+enum phy intel_port_to_phy(struct intel_display *display, enum port port)
{
- if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
+ if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD)
return PHY_D + port - PORT_D_XELPD;
- else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
+ else if (DISPLAY_VER(display) >= 13 && port >= PORT_TC1)
return PHY_F + port - PORT_TC1;
- else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
+ else if (display->platform.alderlake_s && port >= PORT_TC1)
return PHY_B + port - PORT_TC1;
- else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
+ else if ((display->platform.dg1 || display->platform.rocketlake) && port >= PORT_TC1)
return PHY_C + port - PORT_TC1;
- else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
port == PORT_D)
return PHY_A;
@@ -2062,12 +1881,12 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
}
/* Prefer intel_encoder_to_tc() */
-enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
+enum tc_port intel_port_to_tc(struct intel_display *display, enum port port)
{
- if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
+ if (!intel_phy_is_tc(display, intel_port_to_phy(display, port)))
return TC_PORT_NONE;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return TC_PORT_1 + port - PORT_TC1;
else
return TC_PORT_1 + port - PORT_C;
@@ -2075,55 +1894,55 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
enum phy intel_encoder_to_phy(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_port_to_phy(i915, encoder->port);
+ return intel_port_to_phy(display, encoder->port);
}
bool intel_encoder_is_combo(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_phy_is_combo(i915, intel_encoder_to_phy(encoder));
+ return intel_phy_is_combo(display, intel_encoder_to_phy(encoder));
}
bool intel_encoder_is_snps(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_phy_is_snps(i915, intel_encoder_to_phy(encoder));
+ return intel_phy_is_snps(display, intel_encoder_to_phy(encoder));
}
bool intel_encoder_is_tc(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_phy_is_tc(i915, intel_encoder_to_phy(encoder));
+ return intel_phy_is_tc(display, intel_encoder_to_phy(encoder));
}
enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_port_to_tc(i915, encoder->port);
+ return intel_port_to_tc(display, encoder->port);
}
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
if (intel_tc_port_in_tbt_alt_mode(dig_port))
- return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
+ return intel_display_power_tbt_aux_domain(display, dig_port->aux_ch);
- return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
+ return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
}
static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
struct intel_power_domain_mask *mask)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct drm_encoder *encoder;
enum pipe pipe = crtc->pipe;
@@ -2139,14 +1958,14 @@ static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
crtc_state->pch_pfit.force_thru)
set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
- drm_for_each_encoder_mask(encoder, &dev_priv->drm,
+ drm_for_each_encoder_mask(encoder, display->drm,
crtc_state->uapi.encoder_mask) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
set_bit(intel_encoder->power_domain, mask->bits);
}
- if (HAS_DDI(dev_priv) && crtc_state->has_audio)
+ if (HAS_DDI(display) && crtc_state->has_audio)
set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
if (crtc_state->shared_dpll)
@@ -2159,8 +1978,8 @@ static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
struct intel_power_domain_mask *old_domains)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain domain;
struct intel_power_domain_mask domains, new_domains;
@@ -2176,7 +1995,7 @@ void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
POWER_DOMAIN_NUM);
for_each_power_domain(domain, &new_domains)
- intel_display_power_get_in_set(dev_priv,
+ intel_display_power_get_in_set(display,
&crtc->enabled_power_domains,
domain);
}
@@ -2184,7 +2003,9 @@ void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
struct intel_power_domain_mask *domains)
{
- intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_display_power_put_mask_in_set(display,
&crtc->enabled_power_domains,
domains);
}
@@ -2209,33 +2030,33 @@ static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_st
static void valleyview_crtc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ if (drm_WARN_ON(display->drm, crtc->active))
return;
i9xx_configure_cpu_transcoder(new_crtc_state);
intel_set_pipe_src_size(new_crtc_state);
- intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0);
+ intel_de_write(display, VLV_PIPE_MSA_MISC(display, pipe), 0);
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
- intel_de_write(dev_priv, CHV_BLEND(dev_priv, pipe),
+ if (display->platform.cherryview && pipe == PIPE_B) {
+ intel_de_write(display, CHV_BLEND(display, pipe),
CHV_BLEND_LEGACY);
- intel_de_write(dev_priv, CHV_CANVAS(dev_priv, pipe), 0);
+ intel_de_write(display, CHV_CANVAS(display, pipe), 0);
}
crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
intel_encoders_pre_pll_enable(state, crtc);
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
chv_enable_pll(new_crtc_state);
else
vlv_enable_pll(new_crtc_state);
@@ -2257,12 +2078,13 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ if (drm_WARN_ON(display->drm, crtc->active))
return;
i9xx_configure_cpu_transcoder(new_crtc_state);
@@ -2271,8 +2093,8 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
crtc->active = true;
- if (DISPLAY_VER(dev_priv) != 2)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ if (DISPLAY_VER(display) != 2)
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
intel_encoders_pre_enable(state, crtc);
@@ -2291,25 +2113,10 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
/* prevents spurious underruns */
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
intel_crtc_wait_for_next_vblank(crtc);
}
-static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!old_crtc_state->gmch_pfit.control)
- return;
-
- assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
-
- drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
- intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)));
- intel_de_write(dev_priv, PFIT_CONTROL(dev_priv), 0);
-}
-
static void i9xx_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -2323,7 +2130,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
*/
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
intel_crtc_wait_for_next_vblank(crtc);
intel_encoders_disable(state, crtc);
@@ -2337,9 +2144,9 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_encoders_post_disable(state, crtc);
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
chv_disable_pll(dev_priv, pipe);
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
vlv_disable_pll(dev_priv, pipe);
else
i9xx_disable_pll(old_crtc_state);
@@ -2347,14 +2154,14 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_encoders_post_pll_disable(state, crtc);
- if (DISPLAY_VER(dev_priv) != 2)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ if (DISPLAY_VER(display) != 2)
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
- if (!dev_priv->display.funcs.wm->initial_watermarks)
+ if (!display->funcs.wm->initial_watermarks)
intel_update_watermarks(dev_priv);
/* clock the pipe down to 640x480@60 to potentially save power */
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
i830_enable_pipe(display, pipe);
}
@@ -2368,11 +2175,11 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
{
- const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
/* GDG double wide on either pipe, otherwise pipe A only */
- return HAS_DOUBLE_WIDE(dev_priv) &&
- (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
+ return HAS_DOUBLE_WIDE(display) &&
+ (crtc->pipe == PIPE_A || display->platform.i915g);
}
static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
@@ -2419,9 +2226,9 @@ static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
/* FIXME calculate proper pipe pixel rate for GMCH pfit */
crtc_state->pixel_rate =
crtc_state->hw.pipe_mode.crtc_clock;
@@ -2532,6 +2339,7 @@ static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
@@ -2545,7 +2353,7 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
*/
if (drm_rect_width(&crtc_state->pipe_src) & 1) {
if (crtc_state->double_wide) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
@@ -2553,7 +2361,7 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(i915)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
@@ -2565,11 +2373,11 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
- int clock_limit = i915->display.cdclk.max_dotclk_freq;
+ int clock_limit = display->cdclk.max_dotclk_freq;
/*
* Start with the adjusted_mode crtc timings, which
@@ -2584,8 +2392,8 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
intel_joiner_adjust_timings(crtc_state, pipe_mode);
intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
- if (DISPLAY_VER(i915) < 4) {
- clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
+ if (DISPLAY_VER(display) < 4) {
+ clock_limit = display->cdclk.max_cdclk_freq * 9 / 10;
/*
* Enable double wide mode when the dot clock
@@ -2593,13 +2401,13 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
*/
if (intel_crtc_supports_double_wide(crtc) &&
pipe_mode->crtc_clock > clock_limit) {
- clock_limit = i915->display.cdclk.max_dotclk_freq;
+ clock_limit = display->cdclk.max_dotclk_freq;
crtc_state->double_wide = true;
}
}
if (pipe_mode->crtc_clock > clock_limit) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
crtc->base.base.id, crtc->base.name,
pipe_mode->crtc_clock, clock_limit,
@@ -2610,28 +2418,63 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
return 0;
}
-static bool intel_crtc_needs_wa_14015401596(struct intel_crtc_state *crtc_state)
+static bool intel_crtc_needs_wa_14015401596(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
return intel_vrr_possible(crtc_state) && crtc_state->has_psr &&
- adjusted_mode->crtc_vblank_start == adjusted_mode->crtc_vdisplay &&
IS_DISPLAY_VER(display, 13, 14);
}
-static int intel_crtc_compute_config(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int vblank_delay = 0;
+
+ if (!HAS_DSB(display))
+ return 0;
+
+ /* Wa_14015401596 */
+ if (intel_crtc_needs_wa_14015401596(crtc_state))
+ vblank_delay = max(vblank_delay, 1);
+
+ return vblank_delay;
+}
+
+static int intel_crtc_compute_vblank_delay(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ int vblank_delay, max_vblank_delay;
+
+ vblank_delay = intel_crtc_vblank_delay(crtc_state);
+ max_vblank_delay = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start - 1;
+
+ if (vblank_delay > max_vblank_delay) {
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] vblank delay (%d) exceeds max (%d)\n",
+ crtc->base.base.id, crtc->base.name, vblank_delay, max_vblank_delay);
+ return -EINVAL;
+ }
+
+ adjusted_mode->crtc_vblank_start += vblank_delay;
+
+ return 0;
+}
+
+static int intel_crtc_compute_config(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- /* Wa_14015401596 */
- if (intel_crtc_needs_wa_14015401596(crtc_state))
- adjusted_mode->crtc_vblank_start += 1;
+ ret = intel_crtc_compute_vblank_delay(state, crtc);
+ if (ret)
+ return ret;
ret = intel_dpll_crtc_compute_clock(state, crtc);
if (ret)
@@ -2703,8 +2546,10 @@ intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
0x80000);
}
-void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
+void intel_panel_sanitize_ssc(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
/*
* There may be no VBT; and if the BIOS enabled SSC we can
* just keep using it to avoid unnecessary flicker. Whereas if the
@@ -2712,16 +2557,16 @@ void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
* indicates as much.
*/
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- bool bios_lvds_use_ssc = intel_de_read(dev_priv,
+ bool bios_lvds_use_ssc = intel_de_read(display,
PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE;
- if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
- drm_dbg_kms(&dev_priv->drm,
+ if (display->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ drm_dbg_kms(display->drm,
"SSC %s by BIOS, overriding VBT which says %s\n",
str_enabled_disabled(bios_lvds_use_ssc),
- str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
- dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ str_enabled_disabled(display->vbt.lvds_use_ssc));
+ display->vbt.lvds_use_ssc = bios_lvds_use_ssc;
}
}
}
@@ -2733,45 +2578,45 @@ void intel_zero_m_n(struct intel_link_m_n *m_n)
m_n->tu = 1;
}
-void intel_set_m_n(struct drm_i915_private *i915,
+void intel_set_m_n(struct intel_display *display,
const struct intel_link_m_n *m_n,
i915_reg_t data_m_reg, i915_reg_t data_n_reg,
i915_reg_t link_m_reg, i915_reg_t link_n_reg)
{
- intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
- intel_de_write(i915, data_n_reg, m_n->data_n);
- intel_de_write(i915, link_m_reg, m_n->link_m);
+ intel_de_write(display, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
+ intel_de_write(display, data_n_reg, m_n->data_n);
+ intel_de_write(display, link_m_reg, m_n->link_m);
/*
* On BDW+ writing LINK_N arms the double buffered update
* of all the M/N registers, so it must be written last.
*/
- intel_de_write(i915, link_n_reg, m_n->link_n);
+ intel_de_write(display, link_n_reg, m_n->link_n);
}
-bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+bool intel_cpu_transcoder_has_m2_n2(struct intel_display *display,
enum transcoder transcoder)
{
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
return transcoder == TRANSCODER_EDP;
- return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
+ return IS_DISPLAY_VER(display, 5, 7) || display->platform.cherryview;
}
void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
enum transcoder transcoder,
const struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- if (DISPLAY_VER(dev_priv) >= 5)
- intel_set_m_n(dev_priv, m_n,
- PIPE_DATA_M1(dev_priv, transcoder),
- PIPE_DATA_N1(dev_priv, transcoder),
- PIPE_LINK_M1(dev_priv, transcoder),
- PIPE_LINK_N1(dev_priv, transcoder));
+ if (DISPLAY_VER(display) >= 5)
+ intel_set_m_n(display, m_n,
+ PIPE_DATA_M1(display, transcoder),
+ PIPE_DATA_N1(display, transcoder),
+ PIPE_LINK_M1(display, transcoder),
+ PIPE_LINK_N1(display, transcoder));
else
- intel_set_m_n(dev_priv, m_n,
+ intel_set_m_n(display, m_n,
PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
}
@@ -2780,28 +2625,30 @@ void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
enum transcoder transcoder,
const struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
+ if (!intel_cpu_transcoder_has_m2_n2(display, transcoder))
return;
- intel_set_m_n(dev_priv, m_n,
- PIPE_DATA_M2(dev_priv, transcoder),
- PIPE_DATA_N2(dev_priv, transcoder),
- PIPE_LINK_M2(dev_priv, transcoder),
- PIPE_LINK_N2(dev_priv, transcoder));
+ intel_set_m_n(display, m_n,
+ PIPE_DATA_M2(display, transcoder),
+ PIPE_DATA_N2(display, transcoder),
+ PIPE_LINK_M2(display, transcoder),
+ PIPE_LINK_N2(display, transcoder));
}
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
int vsyncshift = 0;
+ drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder));
+
/* We need to be careful not to changed the adjusted mode, for otherwise
* the hw state checker will get angry at the mismatch. */
crtc_vdisplay = adjusted_mode->crtc_vdisplay;
@@ -2827,9 +2674,9 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
* VBLANK_START no longer works on ADL+, instead we must use
* TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start.
*/
- if (DISPLAY_VER(dev_priv) >= 13) {
- intel_de_write(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder),
+ if (DISPLAY_VER(display) >= 13) {
+ intel_de_write(display,
+ TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder),
crtc_vblank_start - crtc_vdisplay);
/*
@@ -2839,28 +2686,28 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
crtc_vblank_start = 1;
}
- if (DISPLAY_VER(dev_priv) >= 4)
- intel_de_write(dev_priv,
- TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder),
+ if (DISPLAY_VER(display) >= 4)
+ intel_de_write(display,
+ TRANS_VSYNCSHIFT(display, cpu_transcoder),
vsyncshift);
- intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder),
HACTIVE(adjusted_mode->crtc_hdisplay - 1) |
HTOTAL(adjusted_mode->crtc_htotal - 1));
- intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder),
HBLANK_START(adjusted_mode->crtc_hblank_start - 1) |
HBLANK_END(adjusted_mode->crtc_hblank_end - 1));
- intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder),
HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
- intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
- intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder),
VBLANK_START(crtc_vblank_start - 1) |
VBLANK_END(crtc_vblank_end - 1));
- intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder),
VSYNC_START(adjusted_mode->crtc_vsync_start - 1) |
VSYNC_END(adjusted_mode->crtc_vsync_end - 1));
@@ -2868,48 +2715,65 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
* documented on the DDI_FUNC_CTL register description, EDP Input Select
* bits. */
- if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
+ if (display->platform.haswell && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
- intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, pipe),
+ intel_de_write(display, TRANS_VTOTAL(display, pipe),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
}
static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
+ drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder));
+
crtc_vdisplay = adjusted_mode->crtc_vdisplay;
crtc_vtotal = adjusted_mode->crtc_vtotal;
crtc_vblank_start = adjusted_mode->crtc_vblank_start;
crtc_vblank_end = adjusted_mode->crtc_vblank_end;
- drm_WARN_ON(&dev_priv->drm, adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* the chip adds 2 halflines automatically */
+ crtc_vtotal -= 1;
+ crtc_vblank_end -= 1;
+ }
+
+ if (DISPLAY_VER(display) >= 13) {
+ intel_de_write(display,
+ TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder),
+ crtc_vblank_start - crtc_vdisplay);
+
+ /*
+ * VBLANK_START not used by hw, just clear it
+ * to make it stand out in register dumps.
+ */
+ crtc_vblank_start = 1;
+ }
/*
* The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode.
* But let's write it anyway to keep the state checker happy.
*/
- intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder),
VBLANK_START(crtc_vblank_start - 1) |
VBLANK_END(crtc_vblank_end - 1));
/*
* The double buffer latch point for TRANS_VTOTAL
* is the transcoder's undelayed vblank.
*/
- intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
}
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int width = drm_rect_width(&crtc_state->pipe_src);
int height = drm_rect_height(&crtc_state->pipe_src);
enum pipe pipe = crtc->pipe;
@@ -2917,63 +2781,62 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
- intel_de_write(dev_priv, PIPESRC(dev_priv, pipe),
+ intel_de_write(display, PIPESRC(display, pipe),
PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
return false;
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell)
+ return intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
else
- return intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
+ return intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
}
static void intel_get_transcoder_timings(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u32 tmp;
- tmp = intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display, TRANS_HTOTAL(display, cpu_transcoder));
adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1;
adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv,
- TRANS_HBLANK(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANS_HBLANK(display, cpu_transcoder));
adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1;
adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display, TRANS_HSYNC(display, cpu_transcoder));
adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1;
adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1;
- tmp = intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display, TRANS_VTOTAL(display, cpu_transcoder));
adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1;
adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1;
/* FIXME TGL+ DSI transcoders have this! */
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv,
- TRANS_VBLANK(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANS_VBLANK(display, cpu_transcoder));
adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1;
adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display, TRANS_VSYNC(display, cpu_transcoder));
adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1;
adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1;
@@ -2983,11 +2846,11 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
adjusted_mode->crtc_vblank_end += 1;
}
- if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder))
+ if (DISPLAY_VER(display) >= 13 && !transcoder_is_dsi(cpu_transcoder))
adjusted_mode->crtc_vblank_start =
adjusted_mode->crtc_vdisplay +
- intel_de_read(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder));
+ intel_de_read(display,
+ TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder));
}
static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
@@ -3010,11 +2873,10 @@ static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
static void intel_get_pipe_src_size(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPESRC(dev_priv, crtc->pipe));
+ tmp = intel_de_read(display, PIPESRC(display, crtc->pipe));
drm_rect_init(&pipe_config->pipe_src, 0, 0,
REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
@@ -3025,8 +2887,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
@@ -3035,15 +2896,15 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
* - During modeset the pipe is still disabled and must remain so
* - During fastset the pipe is already enabled and must remain so
*/
- if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
+ if (display->platform.i830 || !intel_crtc_needs_modeset(crtc_state))
val |= TRANSCONF_ENABLE;
if (crtc_state->double_wide)
val |= TRANSCONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (crtc_state->dither && crtc_state->pipe_bpp != 30)
val |= TRANSCONF_DITHER_EN |
@@ -3067,7 +2928,7 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
}
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
- if (DISPLAY_VER(dev_priv) < 4 ||
+ if (DISPLAY_VER(display) < 4 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION;
else
@@ -3076,8 +2937,8 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= TRANSCONF_INTERLACE_PROGRESSIVE;
}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- crtc_state->limited_color_range)
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ crtc_state->limited_color_range)
val |= TRANSCONF_COLOR_RANGE_SELECT;
val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
@@ -3087,54 +2948,17 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
- intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
-}
-
-static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
-{
- if (IS_I830(dev_priv))
- return false;
-
- return DISPLAY_VER(dev_priv) >= 4 ||
- IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
-}
-
-static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe;
- u32 tmp;
-
- if (!i9xx_has_pfit(dev_priv))
- return;
-
- tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv));
- if (!(tmp & PFIT_ENABLE))
- return;
-
- /* Check whether the pfit is attached to our pipe. */
- if (DISPLAY_VER(dev_priv) >= 4)
- pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp);
- else
- pipe = PIPE_B;
-
- if (pipe != crtc->pipe)
- return;
-
- crtc_state->gmch_pfit.control = tmp;
- crtc_state->gmch_pfit.pgm_ratios =
- intel_de_read(dev_priv, PFIT_PGM_RATIOS(dev_priv));
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder), val);
+ intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder));
}
static enum intel_output_format
bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
+ tmp = intel_de_read(display, PIPE_MISC(crtc->pipe));
if (tmp & PIPE_MISC_YUV420_ENABLE) {
/*
@@ -3142,8 +2966,8 @@ bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
* For xe3_lpd+ this is implied in YUV420 Enable bit.
* Ensure the same for prior platforms in YUV420 Mode bit.
*/
- if (DISPLAY_VER(dev_priv) < 30)
- drm_WARN_ON(&dev_priv->drm,
+ if (DISPLAY_VER(display) < 30)
+ drm_WARN_ON(display->drm,
(tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
@@ -3157,31 +2981,29 @@ bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum intel_display_power_domain power_domain;
+ enum transcoder cpu_transcoder = (enum transcoder)crtc->pipe;
intel_wakeref_t wakeref;
+ bool ret = false;
u32 tmp;
- bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->sink_format = pipe_config->output_format;
- pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->shared_dpll = NULL;
-
- ret = false;
-
- tmp = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
if (!(tmp & TRANSCONF_ENABLE))
goto out;
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) {
+ pipe_config->cpu_transcoder = cpu_transcoder;
+
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ pipe_config->sink_format = pipe_config->output_format;
+
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview) {
switch (tmp & TRANSCONF_BPC_MASK) {
case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
@@ -3198,7 +3020,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
(tmp & TRANSCONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
@@ -3206,29 +3028,29 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
(tmp & TRANSCONF_WGC_ENABLE))
pipe_config->wgc_enable = true;
intel_color_get_config(pipe_config);
- if (HAS_DOUBLE_WIDE(dev_priv))
+ if (HAS_DOUBLE_WIDE(display))
pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
intel_get_transcoder_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- i9xx_get_pfit_config(pipe_config);
+ i9xx_pfit_get_config(pipe_config);
i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state);
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
tmp = pipe_config->dpll_hw_state.i9xx.dpll_md;
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
- } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ } else if (display->platform.i945g || display->platform.i945gm ||
+ display->platform.g33 || display->platform.pineview) {
tmp = pipe_config->dpll_hw_state.i9xx.dpll;
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
@@ -3240,9 +3062,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
chv_crtc_clock_get(pipe_config);
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
vlv_crtc_clock_get(pipe_config);
else
i9xx_crtc_clock_get(pipe_config);
@@ -3258,15 +3080,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
ret = true;
out:
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
@@ -3308,7 +3129,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
* This would end up with an odd purple hue over
* the entire display. Make sure we don't do it.
*/
- drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ drm_WARN_ON(display->drm, crtc_state->limited_color_range &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range &&
@@ -3323,14 +3144,13 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
- intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder), val);
+ intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder));
}
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
@@ -3341,7 +3161,7 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
if (!intel_crtc_needs_modeset(crtc_state))
val |= TRANSCONF_ENABLE;
- if (IS_HASWELL(dev_priv) && crtc_state->dither)
+ if (display->platform.haswell && crtc_state->dither)
val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
@@ -3349,20 +3169,19 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
else
val |= TRANSCONF_INTERLACE_PF_PD_ILK;
- if (IS_HASWELL(dev_priv) &&
+ if (display->platform.haswell &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW;
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
- intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder), val);
+ intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder));
}
static void bdw_set_pipe_misc(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc->base.dev);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = 0;
switch (crtc_state->pipe_bpp) {
@@ -3377,7 +3196,7 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
break;
case 36:
/* Port output 12BPC defined for ADLP+ */
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
val |= PIPE_MISC_BPC_12_ADLP;
break;
default:
@@ -3396,14 +3215,14 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
val |= DISPLAY_VER(display) >= 30 ? PIPE_MISC_YUV420_ENABLE :
PIPE_MISC_YUV420_ENABLE | PIPE_MISC_YUV420_MODE_FULL_BLEND;
- if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
+ if (DISPLAY_VER(display) >= 11 && is_hdr_mode(crtc_state))
val |= PIPE_MISC_HDR_MODE_PRECISION;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
/* allow PSR with sprite enabled */
- if (IS_BROADWELL(dev_priv))
+ if (display->platform.broadwell)
val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE;
intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val);
@@ -3411,10 +3230,10 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
+ tmp = intel_de_read(display, PIPE_MISC(crtc->pipe));
switch (tmp & PIPE_MISC_BPC_MASK) {
case PIPE_MISC_BPC_6:
@@ -3434,7 +3253,7 @@ int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
* MIPI DSI HW readout.
*/
case PIPE_MISC_BPC_12_ADLP:
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return 36;
fallthrough;
default:
@@ -3454,33 +3273,33 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
return DIV_ROUND_UP(bps, link_bw * 8);
}
-void intel_get_m_n(struct drm_i915_private *i915,
+void intel_get_m_n(struct intel_display *display,
struct intel_link_m_n *m_n,
i915_reg_t data_m_reg, i915_reg_t data_n_reg,
i915_reg_t link_m_reg, i915_reg_t link_n_reg)
{
- m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
- m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
- m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
- m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
- m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
+ m_n->link_m = intel_de_read(display, link_m_reg) & DATA_LINK_M_N_MASK;
+ m_n->link_n = intel_de_read(display, link_n_reg) & DATA_LINK_M_N_MASK;
+ m_n->data_m = intel_de_read(display, data_m_reg) & DATA_LINK_M_N_MASK;
+ m_n->data_n = intel_de_read(display, data_n_reg) & DATA_LINK_M_N_MASK;
+ m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(display, data_m_reg)) + 1;
}
void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
enum transcoder transcoder,
struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- if (DISPLAY_VER(dev_priv) >= 5)
- intel_get_m_n(dev_priv, m_n,
- PIPE_DATA_M1(dev_priv, transcoder),
- PIPE_DATA_N1(dev_priv, transcoder),
- PIPE_LINK_M1(dev_priv, transcoder),
- PIPE_LINK_N1(dev_priv, transcoder));
+ if (DISPLAY_VER(display) >= 5)
+ intel_get_m_n(display, m_n,
+ PIPE_DATA_M1(display, transcoder),
+ PIPE_DATA_N1(display, transcoder),
+ PIPE_LINK_M1(display, transcoder),
+ PIPE_LINK_N1(display, transcoder));
else
- intel_get_m_n(dev_priv, m_n,
+ intel_get_m_n(display, m_n,
PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
}
@@ -3489,77 +3308,39 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
enum transcoder transcoder,
struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
+ if (!intel_cpu_transcoder_has_m2_n2(display, transcoder))
return;
- intel_get_m_n(dev_priv, m_n,
- PIPE_DATA_M2(dev_priv, transcoder),
- PIPE_DATA_N2(dev_priv, transcoder),
- PIPE_LINK_M2(dev_priv, transcoder),
- PIPE_LINK_N2(dev_priv, transcoder));
-}
-
-static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 ctl, pos, size;
- enum pipe pipe;
-
- ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
- if ((ctl & PF_ENABLE) == 0)
- return;
-
- if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
- pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl);
- else
- pipe = crtc->pipe;
-
- crtc_state->pch_pfit.enabled = true;
-
- pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
- size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
-
- drm_rect_init(&crtc_state->pch_pfit.dst,
- REG_FIELD_GET(PF_WIN_XPOS_MASK, pos),
- REG_FIELD_GET(PF_WIN_YPOS_MASK, pos),
- REG_FIELD_GET(PF_WIN_XSIZE_MASK, size),
- REG_FIELD_GET(PF_WIN_YSIZE_MASK, size));
-
- /*
- * We currently do not free assignements of panel fitters on
- * ivb/hsw (since we don't use the higher upscaling modes which
- * differentiates them) so just WARN about this case for now.
- */
- drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe);
+ intel_get_m_n(display, m_n,
+ PIPE_DATA_M2(display, transcoder),
+ PIPE_DATA_N2(display, transcoder),
+ PIPE_LINK_M2(display, transcoder),
+ PIPE_LINK_N2(display, transcoder));
}
static bool ilk_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
enum intel_display_power_domain power_domain;
+ enum transcoder cpu_transcoder = (enum transcoder)crtc->pipe;
intel_wakeref_t wakeref;
+ bool ret = false;
u32 tmp;
- bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
- pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->shared_dpll = NULL;
-
- ret = false;
- tmp = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
if (!(tmp & TRANSCONF_ENABLE))
goto out;
+ pipe_config->cpu_transcoder = cpu_transcoder;
+
switch (tmp & TRANSCONF_BPC_MASK) {
case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
@@ -3607,31 +3388,31 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
intel_get_transcoder_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- ilk_get_pfit_config(pipe_config);
+ ilk_pfit_get_config(pipe_config);
ret = true;
out:
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
-static u8 joiner_pipes(struct drm_i915_private *i915)
+static u8 joiner_pipes(struct intel_display *display)
{
u8 pipes;
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
- else if (DISPLAY_VER(i915) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
pipes = BIT(PIPE_B) | BIT(PIPE_C);
else
pipes = 0;
- return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask;
+ return pipes & DISPLAY_RUNTIME_INFO(display)->pipe_mask;
}
-static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
+static bool transcoder_ddi_func_is_enabled(struct intel_display *display,
enum transcoder cpu_transcoder)
{
enum intel_display_power_domain power_domain;
@@ -3640,9 +3421,9 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ with_intel_display_power_if_enabled(display, power_domain, wakeref)
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
return tmp & TRANS_DDI_FUNC_ENABLE;
}
@@ -3650,7 +3431,6 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
static void enabled_uncompressed_joiner_pipes(struct intel_display *display,
u8 *primary_pipes, u8 *secondary_pipes)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc *crtc;
*primary_pipes = 0;
@@ -3659,14 +3439,14 @@ static void enabled_uncompressed_joiner_pipes(struct intel_display *display,
if (!HAS_UNCOMPRESSED_JOINER(display))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
- joiner_pipes(i915)) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc,
+ joiner_pipes(display)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
power_domain = POWER_DOMAIN_PIPE(pipe);
- with_intel_display_power_if_enabled(i915, power_domain, wakeref) {
+ with_intel_display_power_if_enabled(display, power_domain, wakeref) {
u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
if (tmp & UNCOMPRESSED_JOINER_PRIMARY)
@@ -3680,7 +3460,6 @@ static void enabled_uncompressed_joiner_pipes(struct intel_display *display,
static void enabled_bigjoiner_pipes(struct intel_display *display,
u8 *primary_pipes, u8 *secondary_pipes)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc *crtc;
*primary_pipes = 0;
@@ -3689,14 +3468,14 @@ static void enabled_bigjoiner_pipes(struct intel_display *display,
if (!HAS_BIGJOINER(display))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
- joiner_pipes(i915)) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc,
+ joiner_pipes(display)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe);
- with_intel_display_power_if_enabled(i915, power_domain, wakeref) {
+ with_intel_display_power_if_enabled(display, power_domain, wakeref) {
u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
if (!(tmp & BIG_JOINER_ENABLE))
@@ -3748,10 +3527,9 @@ static u8 fixup_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes,
return ultrajoiner_secondary_pipes | ultrajoiner_primary_pipes << 3;
}
-static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915,
+static void enabled_ultrajoiner_pipes(struct intel_display *display,
u8 *primary_pipes, u8 *secondary_pipes)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc;
*primary_pipes = 0;
@@ -3760,15 +3538,15 @@ static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915,
if (!HAS_ULTRAJOINER(display))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
- joiner_pipes(i915)) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc,
+ joiner_pipes(display)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe);
- with_intel_display_power_if_enabled(i915, power_domain, wakeref) {
- u32 tmp = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe));
+ with_intel_display_power_if_enabled(display, power_domain, wakeref) {
+ u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
if (!(tmp & ULTRA_JOINER_ENABLE))
continue;
@@ -3781,11 +3559,10 @@ static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915,
}
}
-static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
+static void enabled_joiner_pipes(struct intel_display *display,
enum pipe pipe,
u8 *primary_pipe, u8 *secondary_pipes)
{
- struct intel_display *display = to_intel_display(&dev_priv->drm);
u8 primary_ultrajoiner_pipes;
u8 primary_uncompressed_joiner_pipes, primary_bigjoiner_pipes;
u8 secondary_ultrajoiner_pipes;
@@ -3793,21 +3570,21 @@ static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
u8 ultrajoiner_pipes;
u8 uncompressed_joiner_pipes, bigjoiner_pipes;
- enabled_ultrajoiner_pipes(dev_priv, &primary_ultrajoiner_pipes,
+ enabled_ultrajoiner_pipes(display, &primary_ultrajoiner_pipes,
&secondary_ultrajoiner_pipes);
/*
* For some strange reason the last pipe in the set of four
* shouldn't have ultrajoiner enable bit set in hardware.
* Set the bit anyway to make life easier.
*/
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
expected_secondary_pipes(primary_ultrajoiner_pipes, 3) !=
secondary_ultrajoiner_pipes);
secondary_ultrajoiner_pipes =
fixup_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes,
secondary_ultrajoiner_pipes);
- drm_WARN_ON(&dev_priv->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0);
+ drm_WARN_ON(display->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0);
enabled_uncompressed_joiner_pipes(display, &primary_uncompressed_joiner_pipes,
&secondary_uncompressed_joiner_pipes);
@@ -3901,11 +3678,11 @@ static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
}
}
-static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
+static u8 hsw_panel_transcoders(struct intel_display *display)
{
u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
return panel_transcoder_mask;
@@ -3913,9 +3690,8 @@ static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
+ struct intel_display *display = to_intel_display(crtc);
+ u8 panel_transcoder_mask = hsw_panel_transcoders(display);
enum transcoder cpu_transcoder;
u8 primary_pipe, secondary_pipes;
u8 enabled_transcoders = 0;
@@ -3924,7 +3700,7 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
+ for_each_cpu_transcoder_masked(display, cpu_transcoder,
panel_transcoder_mask) {
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -3932,16 +3708,16 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
u32 tmp = 0;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ with_intel_display_power_if_enabled(display, power_domain, wakeref)
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if (!(tmp & TRANS_DDI_FUNC_ENABLE))
continue;
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
- drm_WARN(dev, 1,
+ drm_WARN(display->drm, 1,
"unknown pipe linked to transcoder %s\n",
transcoder_name(cpu_transcoder));
fallthrough;
@@ -3966,14 +3742,14 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
/* single pipe or joiner primary */
cpu_transcoder = (enum transcoder) crtc->pipe;
- if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
+ if (transcoder_ddi_func_is_enabled(display, cpu_transcoder))
enabled_transcoders |= BIT(cpu_transcoder);
/* joiner secondary -> consider the primary pipe's transcoder as well */
- enabled_joiner_pipes(dev_priv, crtc->pipe, &primary_pipe, &secondary_pipes);
+ enabled_joiner_pipes(display, crtc->pipe, &primary_pipe, &secondary_pipes);
if (secondary_pipes & BIT(crtc->pipe)) {
cpu_transcoder = (enum transcoder)ffs(primary_pipe) - 1;
- if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
+ if (transcoder_ddi_func_is_enabled(display, cpu_transcoder))
enabled_transcoders |= BIT(cpu_transcoder);
}
@@ -3998,17 +3774,17 @@ static bool has_pipe_transcoders(u8 enabled_transcoders)
BIT(TRANSCODER_DSI_1));
}
-static void assert_enabled_transcoders(struct drm_i915_private *i915,
+static void assert_enabled_transcoders(struct intel_display *display,
u8 enabled_transcoders)
{
/* Only one type of transcoder please */
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
has_edp_transcoders(enabled_transcoders) +
has_dsi_transcoders(enabled_transcoders) +
has_pipe_transcoders(enabled_transcoders) > 1);
/* Only DSI transcoders can be ganged */
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!has_dsi_transcoders(enabled_transcoders) &&
!is_power_of_2(enabled_transcoders));
}
@@ -4017,8 +3793,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
struct intel_display_power_domain_set *power_domain_set)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
unsigned long enabled_transcoders;
u32 tmp;
@@ -4026,7 +3801,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
if (!enabled_transcoders)
return false;
- assert_enabled_transcoders(dev_priv, enabled_transcoders);
+ assert_enabled_transcoders(display, enabled_transcoders);
/*
* With the exception of DSI we should only ever have
@@ -4035,20 +3810,20 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
*/
pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
- if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ if (!intel_display_power_get_in_set_if_enabled(display, power_domain_set,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
- if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, pipe_config->cpu_transcoder));
+ if (hsw_panel_transcoders(display) & BIT(pipe_config->cpu_transcoder)) {
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, pipe_config->cpu_transcoder));
if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
pipe_config->pch_pfit.force_thru = true;
}
- tmp = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANSCONF(display, pipe_config->cpu_transcoder));
return tmp & TRANSCONF_ENABLE;
}
@@ -4069,7 +3844,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
else
cpu_transcoder = TRANSCODER_DSI_C;
- if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ if (!intel_display_power_get_in_set_if_enabled(display, power_domain_set,
POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
continue;
@@ -4101,12 +3876,12 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
static void intel_joiner_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u8 primary_pipe, secondary_pipes;
enum pipe pipe = crtc->pipe;
- enabled_joiner_pipes(i915, pipe, &primary_pipe, &secondary_pipes);
+ enabled_joiner_pipes(display, pipe, &primary_pipe, &secondary_pipes);
if (((primary_pipe | secondary_pipes) & BIT(pipe)) == 0)
return;
@@ -4118,21 +3893,18 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool active;
u32 tmp;
- if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
+ if (!intel_display_power_get_in_set_if_enabled(display, &crtc->hw_readout_power_domains,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
- pipe_config->shared_dpll = NULL;
-
active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains);
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) {
- drm_WARN_ON(&dev_priv->drm, active);
+ drm_WARN_ON(display->drm, active);
active = true;
}
@@ -4143,17 +3915,17 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_dsc_get_config(pipe_config);
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
- DISPLAY_VER(dev_priv) >= 11)
+ DISPLAY_VER(display) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
- if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ if (HAS_VRR(display) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
intel_vrr_get_config(pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- if (IS_HASWELL(dev_priv)) {
- u32 tmp = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
+ if (display->platform.haswell) {
+ u32 tmp = intel_de_read(display,
+ TRANSCONF(display, pipe_config->cpu_transcoder));
if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW)
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
@@ -4168,18 +3940,18 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_color_get_config(pipe_config);
- tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
+ tmp = intel_de_read(display, WM_LINETIME(crtc->pipe));
pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (display->platform.broadwell || display->platform.haswell)
pipe_config->ips_linetime =
REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
- if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
+ if (intel_display_power_get_in_set_if_enabled(display, &crtc->hw_readout_power_domains,
POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_scaler_get_config(pipe_config);
else
- ilk_get_pfit_config(pipe_config);
+ ilk_pfit_get_config(pipe_config);
}
hsw_ips_get_config(pipe_config);
@@ -4187,8 +3959,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
- intel_de_read(dev_priv,
- TRANS_MULT(dev_priv, pipe_config->cpu_transcoder)) + 1;
+ intel_de_read(display,
+ TRANS_MULT(display, pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -4203,17 +3975,17 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
out:
- intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains);
+ intel_display_power_put_all_in_set(display, &crtc->hw_readout_power_domains);
return active;
}
bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
+ if (!display->funcs.display->get_pipe_config(crtc, crtc_state))
return false;
crtc_state->hw.active = true;
@@ -4229,7 +4001,7 @@ int intel_dotclock_calculate(int link_freq,
/*
* The calculation for the data clock -> pixel clock is:
* pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
- * But we want to avoid losing precison if possible, so:
+ * But we want to avoid losing precision if possible, so:
* pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
*
* and for link freq (10kbs units) -> pixel clock it is:
@@ -4339,137 +4111,6 @@ static bool check_single_encoder_cloning(struct intel_atomic_state *state,
return true;
}
-static int icl_add_linked_planes(struct intel_atomic_state *state)
-{
- struct intel_plane *plane, *linked;
- struct intel_plane_state *plane_state, *linked_plane_state;
- int i;
-
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- linked = plane_state->planar_linked_plane;
-
- if (!linked)
- continue;
-
- linked_plane_state = intel_atomic_get_plane_state(state, linked);
- if (IS_ERR(linked_plane_state))
- return PTR_ERR(linked_plane_state);
-
- drm_WARN_ON(state->base.dev,
- linked_plane_state->planar_linked_plane != plane);
- drm_WARN_ON(state->base.dev,
- linked_plane_state->planar_slave == plane_state->planar_slave);
- }
-
- return 0;
-}
-
-static int icl_check_nv12_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_plane *plane, *linked;
- struct intel_plane_state *plane_state;
- int i;
-
- if (DISPLAY_VER(dev_priv) < 11)
- return 0;
-
- /*
- * Destroy all old plane links and make the slave plane invisible
- * in the crtc_state->active_planes mask.
- */
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
- continue;
-
- plane_state->planar_linked_plane = NULL;
- if (plane_state->planar_slave && !plane_state->uapi.visible) {
- crtc_state->enabled_planes &= ~BIT(plane->id);
- crtc_state->active_planes &= ~BIT(plane->id);
- crtc_state->update_planes |= BIT(plane->id);
- crtc_state->data_rate[plane->id] = 0;
- crtc_state->rel_data_rate[plane->id] = 0;
- }
-
- plane_state->planar_slave = false;
- }
-
- if (!crtc_state->nv12_planes)
- return 0;
-
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *linked_state = NULL;
-
- if (plane->pipe != crtc->pipe ||
- !(crtc_state->nv12_planes & BIT(plane->id)))
- continue;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
- if (!icl_is_nv12_y_plane(dev_priv, linked->id))
- continue;
-
- if (crtc_state->active_planes & BIT(linked->id))
- continue;
-
- linked_state = intel_atomic_get_plane_state(state, linked);
- if (IS_ERR(linked_state))
- return PTR_ERR(linked_state);
-
- break;
- }
-
- if (!linked_state) {
- drm_dbg_kms(&dev_priv->drm,
- "Need %d free Y planes for planar YUV\n",
- hweight8(crtc_state->nv12_planes));
-
- return -EINVAL;
- }
-
- plane_state->planar_linked_plane = linked;
-
- linked_state->planar_slave = true;
- linked_state->planar_linked_plane = plane;
- crtc_state->enabled_planes |= BIT(linked->id);
- crtc_state->active_planes |= BIT(linked->id);
- crtc_state->update_planes |= BIT(linked->id);
- crtc_state->data_rate[linked->id] =
- crtc_state->data_rate_y[plane->id];
- crtc_state->rel_data_rate[linked->id] =
- crtc_state->rel_data_rate_y[plane->id];
- drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
- linked->base.name, plane->base.name);
-
- /* Copy parameters to slave plane */
- linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
- linked_state->color_ctl = plane_state->color_ctl;
- linked_state->view = plane_state->view;
- linked_state->decrypt = plane_state->decrypt;
-
- intel_plane_copy_hw_state(linked_state, plane_state);
- linked_state->uapi.src = plane_state->uapi.src;
- linked_state->uapi.dst = plane_state->uapi.dst;
-
- if (icl_is_hdr_plane(dev_priv, plane->id)) {
- if (linked->id == PLANE_7)
- plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
- else if (linked->id == PLANE_6)
- plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
- else if (linked->id == PLANE_5)
- plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
- else if (linked->id == PLANE_4)
- plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
- else
- MISSING_CASE(linked->id);
- }
- }
-
- return 0;
-}
-
static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
{
const struct drm_display_mode *pipe_mode =
@@ -4503,6 +4144,7 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *pipe_mode =
@@ -4516,7 +4158,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
crtc_state->pixel_rate);
/* Display WA #1135: BXT:ALL GLK:ALL */
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
skl_watermark_ipc_enabled(dev_priv))
linetime_wm /= 2;
@@ -4526,12 +4168,12 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_cdclk_state *cdclk_state;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
crtc_state->linetime = skl_linetime_wm(crtc_state);
else
crtc_state->linetime = hsw_linetime_wm(crtc_state);
@@ -4553,12 +4195,11 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
+ if (DISPLAY_VER(display) < 5 && !display->platform.g4x &&
intel_crtc_needs_modeset(crtc_state) &&
!crtc_state->hw.active)
crtc_state->update_wm_post = true;
@@ -4575,13 +4216,13 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
ret = intel_wm_compute(state, crtc);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] watermarks are invalid\n",
crtc->base.base.id, crtc->base.name);
return ret;
}
- if (DISPLAY_VER(dev_priv) >= 9) {
+ if (DISPLAY_VER(display) >= 9) {
if (intel_crtc_needs_modeset(crtc_state) ||
intel_crtc_needs_fastset(crtc_state)) {
ret = skl_update_scaler_crtc(crtc_state);
@@ -4600,8 +4241,8 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell) {
ret = hsw_compute_linetime_wm(state, crtc);
if (ret)
return ret;
@@ -4619,8 +4260,8 @@ static int
compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_connector *connector = conn_state->connector;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_info *info = &connector->display_info;
int bpp;
@@ -4643,7 +4284,7 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
}
if (bpp < crtc_state->pipe_bpp) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Limiting display bpp to %d "
"(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
connector->base.id, connector->name,
@@ -4661,17 +4302,17 @@ static int
compute_baseline_pipe_bpp(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
- if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)))
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview)
bpp = 10*3;
- else if (DISPLAY_VER(dev_priv) >= 5)
+ else if (DISPLAY_VER(display) >= 5)
bpp = 12*3;
else
bpp = 8*3;
@@ -4695,7 +4336,7 @@ compute_baseline_pipe_bpp(struct intel_atomic_state *state,
static bool check_digital_port_conflicts(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->base.dev;
+ struct intel_display *display = to_intel_display(state);
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
unsigned int used_ports = 0;
@@ -4706,14 +4347,14 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
* We're going to peek into connector->state,
* hence connection_mutex must be held.
*/
- drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
+ drm_modeset_lock_assert_held(&display->drm->mode_config.connection_mutex);
/*
* Walk the connector list instead of the encoder
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
*/
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
@@ -4729,11 +4370,11 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
encoder = to_intel_encoder(connector_state->best_encoder);
- drm_WARN_ON(dev, !connector_state->crtc);
+ drm_WARN_ON(display->drm, !connector_state->crtc);
switch (encoder->type) {
case INTEL_OUTPUT_DDI:
- if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
+ if (drm_WARN_ON(display->drm, !HAS_DDI(display)))
break;
fallthrough;
case INTEL_OUTPUT_DP:
@@ -4881,9 +4522,9 @@ static int
intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *saved_state;
saved_state = intel_crtc_state_alloc(crtc);
@@ -4908,8 +4549,8 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
sizeof(saved_state->icl_port_dplls));
saved_state->crc_enabled = crtc_state->crc_enabled;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview)
saved_state->wm = crtc_state->wm;
memcpy(crtc_state, saved_state, sizeof(*crtc_state));
@@ -4925,7 +4566,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
struct intel_crtc *crtc,
const struct intel_link_bw_limits *limits)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector *connector;
@@ -4958,7 +4599,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n",
crtc->base.base.id, crtc->base.name,
FXP_Q4_ARGS(crtc_state->max_link_bpp_x16));
@@ -4988,7 +4629,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
continue;
if (!check_single_encoder_cloning(state, crtc, encoder)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
encoder->base.base.id, encoder->base.name);
return -EINVAL;
@@ -5030,7 +4671,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
if (ret == -EDEADLK)
return ret;
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] config failure: %d\n",
encoder->base.base.id, encoder->base.name, ret);
return ret;
}
@@ -5046,7 +4687,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
if (ret == -EDEADLK)
return ret;
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] config failure: %d\n",
crtc->base.base.id, crtc->base.name, ret);
return ret;
}
@@ -5057,7 +4698,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
*/
crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
!crtc_state->dither_force_disable;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
crtc->base.base.id, crtc->base.name,
base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
@@ -5189,7 +4830,7 @@ pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset,
const union hdmi_infoframe *a,
const union hdmi_infoframe *b)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const char *loglevel;
if (fastset) {
@@ -5204,9 +4845,9 @@ pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset,
pipe_config_mismatch(p, fastset, crtc, name, "infoframe");
drm_printf(p, "expected:\n");
- hdmi_infoframe_log(loglevel, i915->drm.dev, a);
+ hdmi_infoframe_log(loglevel, display->drm->dev, a);
drm_printf(p, "found:\n");
- hdmi_infoframe_log(loglevel, i915->drm.dev, b);
+ hdmi_infoframe_log(loglevel, display->drm->dev, b);
}
static void
@@ -5275,14 +4916,14 @@ pipe_config_pll_mismatch(struct drm_printer *p, bool fastset,
const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */
drm_printf(p, "expected:\n");
- intel_dpll_dump_hw_state(i915, p, a);
+ intel_dpll_dump_hw_state(display, p, a);
drm_printf(p, "found:\n");
- intel_dpll_dump_hw_state(i915, p, b);
+ intel_dpll_dump_hw_state(display, p, b);
}
static void
@@ -5303,21 +4944,34 @@ pipe_config_cx0pll_mismatch(struct drm_printer *p, bool fastset,
intel_cx0pll_dump_hw_state(display, b);
}
+static bool allow_vblank_delay_fastset(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_display *display = to_intel_display(old_crtc_state);
+
+ /*
+ * Allow fastboot to fix up vblank delay (handled via LRR
+ * codepaths), a bit dodgy as the registers aren't
+ * double buffered but seems to be working more or less...
+ */
+ return HAS_LRR(display) && old_crtc_state->inherited &&
+ !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI);
+}
+
bool
intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset)
{
struct intel_display *display = to_intel_display(current_config);
- struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_printer p;
+ u32 exclude_infoframes = 0;
bool ret = true;
if (fastset)
- p = drm_dbg_printer(&dev_priv->drm, DRM_UT_KMS, NULL);
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
else
- p = drm_err_printer(&dev_priv->drm, NULL);
+ p = drm_err_printer(display->drm, NULL);
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
@@ -5408,7 +5062,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} while (0)
#define PIPE_CONF_CHECK_PLL(name) do { \
- if (!intel_dpll_compare_hw_state(dev_priv, &current_config->name, \
+ if (!intel_dpll_compare_hw_state(display, &current_config->name, \
&pipe_config->name)) { \
pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \
&current_config->name, \
@@ -5435,7 +5089,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
- PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
+ if (!fastset || !allow_vblank_delay_fastset(current_config)) \
+ PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
if (!fastset || !pipe_config->update_lrr) { \
@@ -5583,8 +5238,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(output_format);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
- if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if ((DISPLAY_VER(display) < 8 && !display->platform.haswell) ||
+ display->platform.valleyview || display->platform.cherryview)
PIPE_CONF_CHECK_BOOL(limited_color_range);
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
@@ -5600,7 +5255,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
- if (DISPLAY_VER(dev_priv) < 4)
+ if (DISPLAY_VER(display) < 4)
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
@@ -5620,7 +5275,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(pixel_rate);
PIPE_CONF_CHECK_X(gamma_mode);
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
PIPE_CONF_CHECK_X(cgm_mode);
else
PIPE_CONF_CHECK_X(csc_mode);
@@ -5638,37 +5293,23 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_CSC(output_csc);
}
- /*
- * Panel replay has to be enabled before link training. PSR doesn't have
- * this requirement -> check these only if using panel replay
- */
- if (current_config->active_planes &&
- (current_config->has_panel_replay ||
- pipe_config->has_panel_replay)) {
- PIPE_CONF_CHECK_BOOL(has_psr);
- PIPE_CONF_CHECK_BOOL(has_sel_update);
- PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
- PIPE_CONF_CHECK_BOOL(enable_psr2_su_region_et);
- PIPE_CONF_CHECK_BOOL(has_panel_replay);
- }
-
PIPE_CONF_CHECK_BOOL(double_wide);
- if (dev_priv->display.dpll.mgr)
+ if (display->dpll.mgr)
PIPE_CONF_CHECK_P(shared_dpll);
/* FIXME convert everything over the dpll_mgr */
- if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv))
+ if (display->dpll.mgr || HAS_GMCH(display))
PIPE_CONF_CHECK_PLL(dpll_hw_state);
/* FIXME convert MTL+ platforms over to dpll_mgr */
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
- if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
+ if (display->platform.g4x || DISPLAY_VER(display) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
if (!fastset || !pipe_config->update_m_n) {
@@ -5680,19 +5321,21 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(min_voltage_level);
if (current_config->has_psr || pipe_config->has_psr)
- PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
- ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
- else
- PIPE_CONF_CHECK_X(infoframes.enable);
+ exclude_infoframes |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
+
+ if (current_config->vrr.enable || pipe_config->vrr.enable)
+ exclude_infoframes |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
+ PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, ~exclude_infoframes);
PIPE_CONF_CHECK_X(infoframes.gcp);
PIPE_CONF_CHECK_INFOFRAME(avi);
PIPE_CONF_CHECK_INFOFRAME(spd);
PIPE_CONF_CHECK_INFOFRAME(hdmi);
- if (!fastset)
+ if (!fastset) {
PIPE_CONF_CHECK_INFOFRAME(drm);
+ PIPE_CONF_CHECK_DP_AS_SDP(as_sdp);
+ }
PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
- PIPE_CONF_CHECK_DP_AS_SDP(as_sdp);
PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
@@ -5774,7 +5417,7 @@ intel_verify_planes(struct intel_atomic_state *state)
for_each_new_intel_plane_in_state(state, plane,
plane_state, i)
- assert_plane(plane, plane_state->planar_slave ||
+ assert_plane(plane, plane_state->is_y_plane ||
plane_state->uapi.visible);
}
@@ -5782,11 +5425,11 @@ static int intel_modeset_pipe(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state,
const char *reason)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int ret;
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
crtc->base.base.id, crtc->base.name, reason);
ret = drm_atomic_add_affected_connectors(&state->base,
@@ -5826,10 +5469,10 @@ static int intel_modeset_pipe(struct intel_atomic_state *state,
int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
const char *reason, u8 mask)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, mask) {
struct intel_crtc_state *crtc_state;
int ret;
@@ -5873,10 +5516,10 @@ intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state)
int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
const char *reason)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state;
int ret;
@@ -5902,7 +5545,7 @@ int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
return 0;
}
-int intel_modeset_commit_pipes(struct drm_i915_private *i915,
+int intel_modeset_commit_pipes(struct intel_display *display,
u8 pipe_mask,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -5910,14 +5553,14 @@ int intel_modeset_commit_pipes(struct drm_i915_private *i915,
struct intel_crtc *crtc;
int ret;
- state = drm_atomic_state_alloc(&i915->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
to_intel_atomic_state(state)->internal = true;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
struct intel_crtc_state *crtc_state =
intel_atomic_get_crtc_state(state, crtc);
@@ -6016,38 +5659,49 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state,
static int intel_modeset_checks(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
state->modeset = true;
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
return hsw_mode_set_planes_workaround(state);
return 0;
}
+static bool lrr_params_changed(const struct drm_display_mode *old_adjusted_mode,
+ const struct drm_display_mode *new_adjusted_mode)
+{
+ return old_adjusted_mode->crtc_vblank_start != new_adjusted_mode->crtc_vblank_start ||
+ old_adjusted_mode->crtc_vblank_end != new_adjusted_mode->crtc_vblank_end ||
+ old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal;
+}
+
static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* only allow LRR when the timings stay within the VRR range */
if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range)
new_crtc_state->update_lrr = false;
- if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n",
+ if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) {
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n",
crtc->base.base.id, crtc->base.name);
- else
+ } else {
+ if (allow_vblank_delay_fastset(old_crtc_state))
+ new_crtc_state->update_lrr = true;
new_crtc_state->uapi.mode_changed = false;
+ }
if (intel_compare_link_m_n(&old_crtc_state->dp_m_n,
&new_crtc_state->dp_m_n))
new_crtc_state->update_m_n = false;
- if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
- old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end))
+ if (!lrr_params_changed(&old_crtc_state->hw.adjusted_mode,
+ &new_crtc_state->hw.adjusted_mode))
new_crtc_state->update_lrr = false;
if (intel_crtc_needs_modeset(new_crtc_state))
@@ -6056,161 +5710,19 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
new_crtc_state->update_pipe = true;
}
-static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- u8 plane_ids_mask)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_plane *plane;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- struct intel_plane_state *plane_state;
-
- if ((plane_ids_mask & BIT(plane->id)) == 0)
- continue;
-
- plane_state = intel_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
- }
-
- return 0;
-}
-
-int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- const struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- return intel_crtc_add_planes_to_state(state, crtc,
- old_crtc_state->enabled_planes |
- new_crtc_state->enabled_planes);
-}
-
-static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
-{
- /* See {hsw,vlv,ivb}_plane_ratio() */
- return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_IVYBRIDGE(dev_priv);
-}
-
-static int intel_crtc_add_joiner_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_crtc *other)
-{
- const struct intel_plane_state __maybe_unused *plane_state;
- struct intel_plane *plane;
- u8 plane_ids = 0;
- int i;
-
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe == crtc->pipe)
- plane_ids |= BIT(plane->id);
- }
-
- return intel_crtc_add_planes_to_state(state, other, plane_ids);
-}
-
-static int intel_joiner_add_affected_planes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- struct intel_crtc *other;
-
- for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
- crtc_state->joiner_pipes) {
- int ret;
-
- if (crtc == other)
- continue;
-
- ret = intel_crtc_add_joiner_planes(state, crtc, other);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-static int intel_atomic_check_planes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *old_crtc_state, *new_crtc_state;
- struct intel_plane_state __maybe_unused *plane_state;
- struct intel_plane *plane;
- struct intel_crtc *crtc;
- int i, ret;
-
- ret = icl_add_linked_planes(state);
- if (ret)
- return ret;
-
- ret = intel_joiner_add_affected_planes(state);
- if (ret)
- return ret;
-
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- ret = intel_plane_atomic_check(state, plane);
- if (ret) {
- drm_dbg_atomic(&dev_priv->drm,
- "[PLANE:%d:%s] atomic driver check failed\n",
- plane->base.base.id, plane->base.name);
- return ret;
- }
- }
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- u8 old_active_planes, new_active_planes;
-
- ret = icl_check_nv12_planes(state, crtc);
- if (ret)
- return ret;
-
- /*
- * On some platforms the number of active planes affects
- * the planes' minimum cdclk calculation. Add such planes
- * to the state before we compute the minimum cdclk.
- */
- if (!active_planes_affects_min_cdclk(dev_priv))
- continue;
-
- old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
-
- if (hweight8(old_active_planes) == hweight8(new_active_planes))
- continue;
-
- ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state __maybe_unused *crtc_state;
struct intel_crtc *crtc;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
int ret;
ret = intel_crtc_atomic_check(state, crtc);
if (ret) {
- drm_dbg_atomic(&i915->drm,
+ drm_dbg_atomic(display->drm,
"[CRTC:%d:%s] atomic driver check failed\n",
crtc->base.base.id, crtc->base.name);
return ret;
@@ -6257,7 +5769,7 @@ static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
static int intel_atomic_check_joiner(struct intel_atomic_state *state,
struct intel_crtc *primary_crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *primary_crtc_state =
intel_atomic_get_new_crtc_state(state, primary_crtc);
struct intel_crtc *secondary_crtc;
@@ -6266,20 +5778,20 @@ static int intel_atomic_check_joiner(struct intel_atomic_state *state,
return 0;
/* sanity check */
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
primary_crtc->pipe != joiner_primary_pipe(primary_crtc_state)))
return -EINVAL;
- if (primary_crtc_state->joiner_pipes & ~joiner_pipes(i915)) {
- drm_dbg_kms(&i915->drm,
+ if (primary_crtc_state->joiner_pipes & ~joiner_pipes(display)) {
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Cannot act as joiner primary "
"(need 0x%x as pipes, only 0x%x possible)\n",
primary_crtc->base.base.id, primary_crtc->base.name,
- primary_crtc_state->joiner_pipes, joiner_pipes(i915));
+ primary_crtc_state->joiner_pipes, joiner_pipes(display));
return -EINVAL;
}
- for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc,
intel_crtc_joiner_secondary_pipes(primary_crtc_state)) {
struct intel_crtc_state *secondary_crtc_state;
int ret;
@@ -6290,7 +5802,7 @@ static int intel_atomic_check_joiner(struct intel_atomic_state *state,
/* primary being enabled, secondary was already configured? */
if (secondary_crtc_state->uapi.enable) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] secondary is enabled as normal CRTC, but "
"[CRTC:%d:%s] claiming this CRTC for joiner.\n",
secondary_crtc->base.base.id, secondary_crtc->base.name,
@@ -6309,7 +5821,7 @@ static int intel_atomic_check_joiner(struct intel_atomic_state *state,
drm_crtc_index(&secondary_crtc->base)))
return -EINVAL;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Used as secondary for joiner primary [CRTC:%d:%s]\n",
secondary_crtc->base.base.id, secondary_crtc->base.name,
primary_crtc->base.base.id, primary_crtc->base.name);
@@ -6328,12 +5840,12 @@ static int intel_atomic_check_joiner(struct intel_atomic_state *state,
static void kill_joiner_secondaries(struct intel_atomic_state *state,
struct intel_crtc *primary_crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *primary_crtc_state =
intel_atomic_get_new_crtc_state(state, primary_crtc);
struct intel_crtc *secondary_crtc;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc,
intel_crtc_joiner_secondary_pipes(primary_crtc_state)) {
struct intel_crtc_state *secondary_crtc_state =
intel_atomic_get_new_crtc_state(state, secondary_crtc);
@@ -6359,7 +5871,7 @@ static void kill_joiner_secondaries(struct intel_atomic_state *state,
* the intel_crtc_enable_flip_done() function.
*
* As soon as the surface address register is written, flip done interrupt is
- * generated and the requested events are sent to the usersapce in the interrupt
+ * generated and the requested events are sent to the userspace in the interrupt
* handler itself. The timestamp and sequence sent during the flip done event
* correspond to the last vblank and have no relation to the actual time when
* the flip done event was sent.
@@ -6367,7 +5879,7 @@ static void kill_joiner_secondaries(struct intel_atomic_state *state,
static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_plane_state *old_plane_state;
@@ -6379,14 +5891,14 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
return 0;
if (!new_crtc_state->uapi.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (intel_crtc_needs_modeset(new_crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] modeset required\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
@@ -6397,7 +5909,7 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
* Remove this check once the issues are fixed.
*/
if (new_crtc_state->joiner_pipes) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] async flip disallowed with joiner\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
@@ -6416,14 +5928,14 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
* enabled in the atomic IOCTL path.
*/
if (!plane->async_flip) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] async flip not supported\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] no old or new framebuffer\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6435,7 +5947,7 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
const struct intel_plane_state *new_plane_state, *old_plane_state;
struct intel_plane *plane;
@@ -6448,21 +5960,21 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
return 0;
if (!new_crtc_state->hw.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (intel_crtc_needs_modeset(new_crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] modeset required\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Active planes cannot be in async flip\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
@@ -6478,7 +5990,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
* if we're really about to ask the hardware to perform
* an async flip. We should never get this far otherwise.
*/
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
new_crtc_state->do_async_flip && !plane->async_flip))
return -EINVAL;
@@ -6493,45 +6005,17 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (!plane->async_flip)
continue;
- /*
- * FIXME: This check is kept generic for all platforms.
- * Need to verify this for all gen9 platforms to enable
- * this selectively if required.
- */
- switch (new_plane_state->hw.fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- /*
- * FIXME: Async on Linear buffer is supported on ICL as
- * but with additional alignment and fbc restrictions
- * need to be taken care of. These aren't applicable for
- * gen12+.
- */
- if (DISPLAY_VER(i915) < 12) {
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n",
- plane->base.base.id, plane->base.name,
- new_plane_state->hw.fb->modifier, DISPLAY_VER(i915));
- return -EINVAL;
- }
- break;
-
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- case I915_FORMAT_MOD_4_TILED:
- case I915_FORMAT_MOD_4_TILED_BMG_CCS:
- case I915_FORMAT_MOD_4_TILED_LNL_CCS:
- break;
- default:
- drm_dbg_kms(&i915->drm,
+ if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->modifier)) {
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n",
plane->base.base.id, plane->base.name,
new_plane_state->hw.fb->modifier);
return -EINVAL;
}
- if (new_plane_state->hw.fb->format->num_planes > 1) {
- drm_dbg_kms(&i915->drm,
+ if (intel_format_info_is_yuv_semiplanar(new_plane_state->hw.fb->format,
+ new_plane_state->hw.fb->modifier)) {
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Planar formats do not support async flips\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6546,7 +6030,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->view.color_plane[0].mapping_stride !=
new_plane_state->view.color_plane[0].mapping_stride) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Stride cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6554,7 +6038,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->hw.fb->modifier !=
new_plane_state->hw.fb->modifier) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6562,7 +6046,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->hw.fb->format !=
new_plane_state->hw.fb->format) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6570,22 +6054,30 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->hw.rotation !=
new_plane_state->hw.rotation) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
+ if (skl_plane_aux_dist(old_plane_state, 0) !=
+ skl_plane_aux_dist(new_plane_state, 0)) {
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] AUX_DIST cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
!drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6593,21 +6085,21 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->hw.pixel_blend_mode !=
new_plane_state->hw.pixel_blend_mode) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Color range cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6615,7 +6107,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
/* plane decryption is allow to change only in synchronous flips */
if (old_plane_state->decrypt != new_plane_state->decrypt) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6627,7 +6119,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_plane_state *plane_state;
struct intel_crtc_state *crtc_state;
struct intel_plane *plane;
@@ -6658,13 +6150,13 @@ static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
modeset_pipes |= crtc_state->joiner_pipes;
}
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, affected_pipes) {
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
}
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, modeset_pipes) {
int ret;
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -6694,7 +6186,7 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits,
enum pipe *failed_pipe)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret;
@@ -6719,7 +6211,7 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
continue;
}
- if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
+ if (drm_WARN_ON(display->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
continue;
ret = intel_crtc_prepare_cleared_state(state, crtc);
@@ -6738,7 +6230,7 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
+ if (drm_WARN_ON(display->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
continue;
if (!new_crtc_state->hw.enable)
@@ -6803,7 +6295,6 @@ int intel_atomic_check(struct drm_device *dev,
struct drm_atomic_state *_state)
{
struct intel_display *display = to_intel_display(dev);
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
@@ -6851,7 +6342,7 @@ int intel_atomic_check(struct drm_device *dev,
continue;
if (intel_crtc_is_joiner_secondary(new_crtc_state)) {
- drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
+ drm_WARN_ON(display->drm, new_crtc_state->uapi.enable);
continue;
}
@@ -6922,7 +6413,7 @@ int intel_atomic_check(struct drm_device *dev,
}
if (any_ms && !check_digital_port_conflicts(state)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"rejecting conflicting digital port configuration\n");
ret = -EINVAL;
goto fail;
@@ -6978,7 +6469,7 @@ int intel_atomic_check(struct drm_device *dev,
goto fail;
/* Either full modeset or fastset (or neither), never both */
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_crtc_needs_modeset(new_crtc_state) &&
intel_crtc_needs_fastset(new_crtc_state));
@@ -7022,22 +6513,23 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+ if (DISPLAY_VER(display) != 2 || crtc_state->active_planes)
+ intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, true);
if (crtc_state->has_pch_encoder) {
enum pipe pch_transcoder =
intel_crtc_pch_transcoder(crtc);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+ intel_set_pch_fifo_underrun_reporting(display, pch_transcoder, true);
}
}
static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -7052,7 +6544,7 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
intel_set_pipe_src_size(new_crtc_state);
/* on skylake this is done by detaching scalers */
- if (DISPLAY_VER(dev_priv) >= 9) {
+ if (DISPLAY_VER(display) >= 9) {
if (new_crtc_state->pch_pfit.enabled)
skl_pfit_enable(new_crtc_state);
} else if (HAS_PCH_SPLIT(dev_priv)) {
@@ -7070,8 +6562,8 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
* HSW/BDW only really need this here for fastboot, after
* that the value should not change without a full modeset.
*/
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell)
hsw_set_linetime_wm(new_crtc_state);
if (new_crtc_state->update_m_n)
@@ -7085,29 +6577,31 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
static void commit_pipe_pre_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
bool modeset = intel_crtc_needs_modeset(new_crtc_state);
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
+
/*
* During modesets pipe configuration was programmed as the
* CRTC was enabled.
*/
- if (!modeset && !new_crtc_state->use_dsb) {
+ if (!modeset) {
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_arm(NULL, new_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ if (DISPLAY_VER(display) >= 9 || display->platform.broadwell)
bdw_set_pipe_misc(NULL, new_crtc_state);
if (intel_crtc_needs_fastset(new_crtc_state))
intel_pipe_fastset(old_crtc_state, new_crtc_state);
}
- intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
+ intel_psr2_program_trans_man_trk_ctl(NULL, new_crtc_state);
intel_atomic_update_watermarks(state, crtc);
}
@@ -7115,18 +6609,20 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
static void commit_pipe_post_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
+
/*
* Disable the scaler(s) after the plane(s) so that we don't
* get a catastrophic underrun even if the two operations
* end up happening in two different frames.
*/
- if (DISPLAY_VER(dev_priv) >= 9 &&
+ if (DISPLAY_VER(display) >= 9 &&
!intel_crtc_needs_modeset(new_crtc_state))
- skl_detach_scalers(new_crtc_state);
+ skl_detach_scalers(NULL, new_crtc_state);
if (intel_crtc_vrr_enabling(state, crtc))
intel_vrr_enable(new_crtc_state);
@@ -7135,7 +6631,7 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
static void intel_enable_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc *pipe_crtc;
@@ -7143,7 +6639,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
if (!intel_crtc_needs_modeset(new_crtc_state))
return;
- for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask_reverse(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(new_crtc_state)) {
const struct intel_crtc_state *pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
@@ -7152,7 +6648,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_update_active_timings(pipe_crtc_state, false);
}
- dev_priv->display.funcs.display->crtc_enable(state, crtc);
+ display->funcs.display->crtc_enable(state, crtc);
/* vblanks work again, re-enable pipe CRC. */
intel_crtc_enable_pipe_crc(crtc);
@@ -7161,7 +6657,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
static void intel_pre_update_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
@@ -7170,7 +6666,7 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
if (old_crtc_state->inherited ||
intel_crtc_needs_modeset(new_crtc_state)) {
- if (HAS_DPT(i915))
+ if (HAS_DPT(display))
intel_dpt_configure(crtc);
}
@@ -7184,7 +6680,7 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
if (intel_crtc_needs_fastset(new_crtc_state))
intel_encoders_update_pipe(state, crtc);
- if (DISPLAY_VER(i915) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
intel_crtc_needs_fastset(new_crtc_state))
icl_set_pipe_chicken(new_crtc_state);
@@ -7195,7 +6691,7 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
intel_fbc_update(state, crtc);
- drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
+ drm_WARN_ON(display->drm, !intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF));
if (!modeset &&
intel_crtc_needs_color_update(new_crtc_state) &&
@@ -7258,7 +6754,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc *pipe_crtc;
@@ -7267,13 +6763,13 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
* We need to disable pipe CRC before disabling the pipe,
* or we race against vblank off.
*/
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(old_crtc_state))
intel_crtc_disable_pipe_crc(pipe_crtc);
- dev_priv->display.funcs.display->crtc_disable(state, crtc);
+ display->funcs.display->crtc_disable(state, crtc);
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(old_crtc_state)) {
const struct intel_crtc_state *new_pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
@@ -7288,7 +6784,7 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
static void intel_commit_modeset_disables(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
u8 disable_pipes = 0;
@@ -7355,7 +6851,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state);
}
- drm_WARN_ON(&i915->drm, disable_pipes);
+ drm_WARN_ON(display->drm, disable_pipes);
}
static void intel_commit_modeset_enables(struct intel_atomic_state *state)
@@ -7382,7 +6878,7 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
static void skl_commit_modeset_enables(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
@@ -7524,8 +7020,9 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((update_pipes & BIT(pipe)) == 0)
continue;
- drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, I915_MAX_PIPES, pipe));
+ drm_WARN_ON(display->drm,
+ skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, I915_MAX_PIPES, pipe));
entries[pipe] = new_crtc_state->wm.skl.ddb;
update_pipes &= ~BIT(pipe);
@@ -7533,8 +7030,8 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
intel_update_crtc(state, crtc);
}
- drm_WARN_ON(&dev_priv->drm, modeset_pipes);
- drm_WARN_ON(&dev_priv->drm, update_pipes);
+ drm_WARN_ON(display->drm, modeset_pipes);
+ drm_WARN_ON(display->drm, update_pipes);
}
static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
@@ -7579,7 +7076,7 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
{
struct intel_atomic_state *state =
container_of(work, struct intel_atomic_state, cleanup_work);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *old_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -7587,14 +7084,14 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i)
intel_atomic_dsb_cleanup(old_crtc_state);
- drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
+ drm_atomic_helper_cleanup_planes(display->drm, &state->base);
drm_atomic_helper_commit_cleanup_done(&state->base);
drm_atomic_state_put(&state->base);
}
static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_plane *plane;
struct intel_plane_state *plane_state;
int i;
@@ -7631,21 +7128,14 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s
&plane_state->ccval,
sizeof(plane_state->ccval));
/* The above could only fail if the FB obj has an unexpected backing store type. */
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
}
}
static void intel_atomic_dsb_prepare(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- intel_color_prepare_commit(state, crtc);
-}
-
-static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -7657,15 +7147,21 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
/* FIXME deal with everything */
new_crtc_state->use_dsb =
- new_crtc_state->update_planes &&
- !new_crtc_state->vrr.enable &&
!new_crtc_state->do_async_flip &&
- !new_crtc_state->has_psr &&
- !new_crtc_state->scaler_state.scaler_users &&
- !old_crtc_state->scaler_state.scaler_users &&
+ (DISPLAY_VER(display) >= 20 || !new_crtc_state->has_psr) &&
!intel_crtc_needs_modeset(new_crtc_state) &&
!intel_crtc_needs_fastset(new_crtc_state);
+ intel_color_prepare_commit(state, crtc);
+}
+
+static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
if (!new_crtc_state->use_dsb && !new_crtc_state->dsb_color_vblank)
return;
@@ -7689,6 +7185,14 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_crtc_planes_update_noarm(new_crtc_state->dsb_commit,
state, crtc);
+ /*
+ * Ensure we have "Frame Change" event when PSR state is
+ * SRDENT(PSR1) or DEEP_SLEEP(PSR2). Otherwise DSB vblank
+ * evasion hangs as PIPEDSL is reading as 0.
+ */
+ intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit,
+ state, crtc);
+
intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
if (intel_crtc_needs_color_update(new_crtc_state))
@@ -7696,12 +7200,21 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
new_crtc_state);
bdw_set_pipe_misc(new_crtc_state->dsb_commit,
new_crtc_state);
+ intel_psr2_program_trans_man_trk_ctl(new_crtc_state->dsb_commit,
+ new_crtc_state);
intel_crtc_planes_update_arm(new_crtc_state->dsb_commit,
state, crtc);
+ if (DISPLAY_VER(display) >= 9)
+ skl_detach_scalers(new_crtc_state->dsb_commit,
+ new_crtc_state);
+
if (!new_crtc_state->dsb_color_vblank) {
intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
+
+ intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit);
+ intel_vrr_check_push_sent(new_crtc_state->dsb_commit, new_crtc_state);
intel_dsb_interrupt(new_crtc_state->dsb_commit);
}
}
@@ -7715,8 +7228,8 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
@@ -7728,11 +7241,14 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_atomic_commit_fence_wait(state);
- intel_td_flush(dev_priv);
+ intel_td_flush(display);
intel_atomic_prepare_plane_clear_colors(state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ intel_fbc_prepare_dirty_rect(state, crtc);
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
intel_atomic_dsb_finish(state, crtc);
drm_atomic_helper_wait_for_dependencies(&state->base);
@@ -7766,7 +7282,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* the CSC latched register values with the readout (see
* skl_read_csc() and skl_color_commit_noarm()).
*/
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF);
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
@@ -7792,7 +7308,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_pmdemand_pre_plane_update(state);
if (state->modeset) {
- drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
+ drm_atomic_helper_update_legacy_modeset_state(display->drm, &state->base);
intel_set_cdclk_pre_plane_update(state);
@@ -7807,10 +7323,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Complete events for now disable pipes here. */
if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
- spin_lock_irq(&dev->event_lock);
+ spin_lock_irq(&display->drm->event_lock);
drm_crtc_send_vblank_event(&crtc->base,
new_crtc_state->uapi.event);
- spin_unlock_irq(&dev->event_lock);
+ spin_unlock_irq(&display->drm->event_lock);
new_crtc_state->uapi.event = NULL;
}
@@ -7826,13 +7342,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display.funcs.display->commit_modeset_enables(state);
+ display->funcs.display->commit_modeset_enables(state);
intel_program_dpkgc_latency(state);
- if (state->modeset)
- intel_set_cdclk_post_plane_update(state);
-
intel_wait_for_vblank_workers(state);
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
@@ -7844,13 +7357,16 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* - switch over to the vblank wait helper in the core after that since
* we don't need out special handling any more.
*/
- drm_atomic_helper_wait_for_flip_done(dev, &state->base);
+ drm_atomic_helper_wait_for_flip_done(display->drm, &state->base);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->do_async_flip)
intel_crtc_disable_flip_done(state, crtc);
intel_atomic_dsb_wait_commit(new_crtc_state);
+
+ if (!state->base.legacy_cursor_update && !new_crtc_state->use_dsb)
+ intel_vrr_check_push_sent(NULL, new_crtc_state);
}
/*
@@ -7870,8 +7386,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* chance of catching underruns with the intermediate watermarks
* vs. the new plane configuration.
*/
- if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+ if (DISPLAY_VER(display) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
+ intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, true);
intel_optimize_watermarks(state, crtc);
}
@@ -7899,13 +7415,15 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
}
/* Underruns don't always raise interrupts, so check manually */
- intel_check_cpu_fifo_underruns(dev_priv);
- intel_check_pch_fifo_underruns(dev_priv);
+ intel_check_cpu_fifo_underruns(display);
+ intel_check_pch_fifo_underruns(display);
if (state->modeset)
intel_verify_planes(state);
intel_sagv_post_plane_update(state);
+ if (state->modeset)
+ intel_set_cdclk_post_plane_update(state);
intel_pmdemand_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
@@ -7924,7 +7442,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* Delay re-enabling DC states by 17 ms to avoid the off->on->off
* toggling overhead at and above 60 FPS.
*/
- intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17);
+ intel_display_power_put_async_delay(display, POWER_DOMAIN_DC_OFF, wakeref, 17);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
/*
@@ -7936,7 +7454,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* down.
*/
INIT_WORK(&state->cleanup_work, intel_atomic_cleanup_work);
- queue_work(dev_priv->display.wq.cleanup, &state->cleanup_work);
+ queue_work(display->wq.cleanup, &state->cleanup_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
@@ -7995,6 +7513,7 @@ static int intel_atomic_swap_state(struct intel_atomic_state *state)
int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
bool nonblock)
{
+ struct intel_display *display = to_intel_display(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
@@ -8018,7 +7537,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
* FIXME doing watermarks and fb cleanup from a vblank worker
* (assuming we had any) would solve these problems.
*/
- if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
+ if (DISPLAY_VER(display) < 9 && state->base.legacy_cursor_update) {
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -8031,7 +7550,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
ret = intel_atomic_prepare_commit(state);
if (ret) {
- drm_dbg_atomic(&dev_priv->drm,
+ drm_dbg_atomic(display->drm,
"Preparing state failed with %i\n", ret);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
@@ -8051,38 +7570,25 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
if (nonblock && state->modeset) {
- queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
+ queue_work(display->wq.modeset, &state->base.commit_work);
} else if (nonblock) {
- queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
+ queue_work(display->wq.flip, &state->base.commit_work);
} else {
if (state->modeset)
- flush_workqueue(dev_priv->display.wq.modeset);
+ flush_workqueue(display->wq.modeset);
intel_atomic_commit_tail(state);
}
return 0;
}
-/**
- * intel_plane_destroy - destroy a plane
- * @plane: plane to destroy
- *
- * Common destruction function for all types of planes (primary, cursor,
- * sprite).
- */
-void intel_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
- kfree(to_intel_plane(plane));
-}
-
static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct intel_display *display = to_intel_display(encoder);
struct intel_encoder *source_encoder;
u32 possible_clones = 0;
- for_each_intel_encoder(dev, source_encoder) {
+ for_each_intel_encoder(display->drm, source_encoder) {
if (encoders_cloneable(encoder, source_encoder))
possible_clones |= drm_encoder_mask(&source_encoder->base);
}
@@ -8092,76 +7598,78 @@ static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc;
u32 possible_crtcs = 0;
- for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, encoder->pipe_mask)
possible_crtcs |= drm_crtc_mask(&crtc->base);
return possible_crtcs;
}
-static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
+static bool ilk_has_edp_a(struct intel_display *display)
{
- if (!IS_MOBILE(dev_priv))
+ if (!display->platform.mobile)
return false;
- if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
+ if ((intel_de_read(display, DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ if (display->platform.ironlake && (intel_de_read(display, FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
}
-static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
+static bool intel_ddi_crt_present(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 9)
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 9)
return false;
- if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
+ if (display->platform.haswell_ult || display->platform.broadwell_ult)
return false;
if (HAS_PCH_LPT_H(dev_priv) &&
- intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+ intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
/* DDI E can't be used if DDI A requires 4 lanes */
- if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
- if (!dev_priv->display.vbt.int_crt_support)
+ if (!display->vbt.int_crt_support)
return false;
return true;
}
-bool assert_port_valid(struct drm_i915_private *i915, enum port port)
+bool assert_port_valid(struct intel_display *display, enum port port)
{
- return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)),
+ return !drm_WARN(display->drm, !(DISPLAY_RUNTIME_INFO(display)->port_mask & BIT(port)),
"Platform does not support port %c\n", port_name(port));
}
-void intel_setup_outputs(struct drm_i915_private *dev_priv)
+void intel_setup_outputs(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
bool dpd_is_edp = false;
intel_pps_unlock_regs_wa(display);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- if (HAS_DDI(dev_priv)) {
- if (intel_ddi_crt_present(dev_priv))
+ if (HAS_DDI(display)) {
+ if (intel_ddi_crt_present(display))
intel_crt_init(display);
intel_bios_for_each_encoder(display, intel_ddi_init);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
vlv_dsi_init(dev_priv);
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
@@ -8176,33 +7684,33 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
dpd_is_edp = intel_dp_is_port_edp(display, PORT_D);
- if (ilk_has_edp_a(dev_priv))
- g4x_dp_init(dev_priv, DP_A, PORT_A);
+ if (ilk_has_edp_a(display))
+ g4x_dp_init(display, DP_A, PORT_A);
- if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
+ if (intel_de_read(display, PCH_HDMIB) & SDVO_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
- found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
+ found = intel_sdvo_init(display, PCH_SDVOB, PORT_B);
if (!found)
- g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
- if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
- g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
+ g4x_hdmi_init(display, PCH_HDMIB, PORT_B);
+ if (!found && (intel_de_read(display, PCH_DP_B) & DP_DETECTED))
+ g4x_dp_init(display, PCH_DP_B, PORT_B);
}
- if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
- g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
+ if (intel_de_read(display, PCH_HDMIC) & SDVO_DETECTED)
+ g4x_hdmi_init(display, PCH_HDMIC, PORT_C);
- if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
- g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
+ if (!dpd_is_edp && intel_de_read(display, PCH_HDMID) & SDVO_DETECTED)
+ g4x_hdmi_init(display, PCH_HDMID, PORT_D);
- if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
- g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
+ if (intel_de_read(display, PCH_DP_C) & DP_DETECTED)
+ g4x_dp_init(display, PCH_DP_C, PORT_C);
- if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
- g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (intel_de_read(display, PCH_DP_D) & DP_DETECTED)
+ g4x_dp_init(display, PCH_DP_D, PORT_D);
+ } else if (display->platform.valleyview || display->platform.cherryview) {
bool has_edp, has_port;
- if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
+ if (display->platform.valleyview && display->vbt.int_crt_support)
intel_crt_init(display);
/*
@@ -8222,87 +7730,87 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
*/
has_edp = intel_dp_is_port_edp(display, PORT_B);
has_port = intel_bios_is_port_present(display, PORT_B);
- if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
- has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
- if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
- g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
+ if (intel_de_read(display, VLV_DP_B) & DP_DETECTED || has_port)
+ has_edp &= g4x_dp_init(display, VLV_DP_B, PORT_B);
+ if ((intel_de_read(display, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
+ g4x_hdmi_init(display, VLV_HDMIB, PORT_B);
has_edp = intel_dp_is_port_edp(display, PORT_C);
has_port = intel_bios_is_port_present(display, PORT_C);
- if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
- has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
- if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
- g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
+ if (intel_de_read(display, VLV_DP_C) & DP_DETECTED || has_port)
+ has_edp &= g4x_dp_init(display, VLV_DP_C, PORT_C);
+ if ((intel_de_read(display, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
+ g4x_hdmi_init(display, VLV_HDMIC, PORT_C);
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
/*
* eDP not supported on port D,
* so no need to worry about it
*/
has_port = intel_bios_is_port_present(display, PORT_D);
- if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
- g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
- if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
- g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
+ if (intel_de_read(display, CHV_DP_D) & DP_DETECTED || has_port)
+ g4x_dp_init(display, CHV_DP_D, PORT_D);
+ if (intel_de_read(display, CHV_HDMID) & SDVO_DETECTED || has_port)
+ g4x_hdmi_init(display, CHV_HDMID, PORT_D);
}
vlv_dsi_init(dev_priv);
- } else if (IS_PINEVIEW(dev_priv)) {
+ } else if (display->platform.pineview) {
intel_lvds_init(dev_priv);
intel_crt_init(display);
- } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
+ } else if (IS_DISPLAY_VER(display, 3, 4)) {
bool found = false;
- if (IS_MOBILE(dev_priv))
+ if (display->platform.mobile)
intel_lvds_init(dev_priv);
intel_crt_init(display);
- if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
- drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
- found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
- if (!found && IS_G4X(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (intel_de_read(display, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(display->drm, "probing SDVOB\n");
+ found = intel_sdvo_init(display, GEN3_SDVOB, PORT_B);
+ if (!found && display->platform.g4x) {
+ drm_dbg_kms(display->drm,
"probing HDMI on SDVOB\n");
- g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
+ g4x_hdmi_init(display, GEN4_HDMIB, PORT_B);
}
- if (!found && IS_G4X(dev_priv))
- g4x_dp_init(dev_priv, DP_B, PORT_B);
+ if (!found && display->platform.g4x)
+ g4x_dp_init(display, DP_B, PORT_B);
}
/* Before G4X SDVOC doesn't have its own detect register */
- if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
- drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
- found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
+ if (intel_de_read(display, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(display->drm, "probing SDVOC\n");
+ found = intel_sdvo_init(display, GEN3_SDVOC, PORT_C);
}
- if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
+ if (!found && (intel_de_read(display, GEN3_SDVOC) & SDVO_DETECTED)) {
- if (IS_G4X(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (display->platform.g4x) {
+ drm_dbg_kms(display->drm,
"probing HDMI on SDVOC\n");
- g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
+ g4x_hdmi_init(display, GEN4_HDMIC, PORT_C);
}
- if (IS_G4X(dev_priv))
- g4x_dp_init(dev_priv, DP_C, PORT_C);
+ if (display->platform.g4x)
+ g4x_dp_init(display, DP_C, PORT_C);
}
- if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
- g4x_dp_init(dev_priv, DP_D, PORT_D);
+ if (display->platform.g4x && (intel_de_read(display, DP_D) & DP_DETECTED))
+ g4x_dp_init(display, DP_D, PORT_D);
- if (SUPPORTS_TV(dev_priv))
+ if (SUPPORTS_TV(display))
intel_tv_init(display);
- } else if (DISPLAY_VER(dev_priv) == 2) {
- if (IS_I85X(dev_priv))
+ } else if (DISPLAY_VER(display) == 2) {
+ if (display->platform.i85x)
intel_lvds_init(dev_priv);
intel_crt_init(display);
intel_dvo_init(dev_priv);
}
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
encoder->base.possible_crtcs =
intel_encoder_possible_crtcs(encoder);
encoder->base.possible_clones =
@@ -8311,12 +7819,11 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_init_pch_refclk(dev_priv);
- drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
+ drm_helper_move_panel_connectors_to_head(display->drm);
}
-static int max_dotclock(struct drm_i915_private *i915)
+static int max_dotclock(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
int max_dotclock = display->cdclk.max_dotclk_freq;
if (HAS_ULTRAJOINER(display))
@@ -8330,7 +7837,7 @@ static int max_dotclock(struct drm_i915_private *i915)
enum drm_mode_status intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(dev);
int hdisplay_max, htotal_max;
int vdisplay_max, vtotal_max;
@@ -8367,22 +7874,22 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev,
* Reject clearly excessive dotclocks early to
* avoid having to worry about huge integers later.
*/
- if (mode->clock > max_dotclock(dev_priv))
+ if (mode->clock > max_dotclock(display))
return MODE_CLOCK_HIGH;
/* Transcoder timing limits */
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
hdisplay_max = 16384;
vdisplay_max = 8192;
htotal_max = 16384;
vtotal_max = 8192;
- } else if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ } else if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell) {
hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
vdisplay_max = 4096;
htotal_max = 8192;
vtotal_max = 8192;
- } else if (DISPLAY_VER(dev_priv) >= 3) {
+ } else if (DISPLAY_VER(display) >= 3) {
hdisplay_max = 4096;
vdisplay_max = 4096;
htotal_max = 8192;
@@ -8409,14 +7916,14 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev,
return MODE_OK;
}
-enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv,
+enum drm_mode_status intel_cpu_transcoder_mode_valid(struct intel_display *display,
const struct drm_display_mode *mode)
{
/*
* Additional transcoder timing limits,
* excluding BXT/GLK DSI transcoders.
*/
- if (DISPLAY_VER(dev_priv) >= 5) {
+ if (DISPLAY_VER(display) >= 5) {
if (mode->hdisplay < 64 ||
mode->htotal - mode->hdisplay < 32)
return MODE_H_ILLEGAL;
@@ -8435,7 +7942,7 @@ enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *de
* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
- if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) &&
+ if ((DISPLAY_VER(display) >= 5 || display->platform.g4x) &&
mode->hsync_start == mode->hdisplay)
return MODE_H_ILLEGAL;
@@ -8443,7 +7950,7 @@ enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *de
}
enum drm_mode_status
-intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+intel_mode_valid_max_plane_size(struct intel_display *display,
const struct drm_display_mode *mode,
int num_joined_pipes)
{
@@ -8453,7 +7960,7 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
* intel_mode_valid() should be
* sufficient on older platforms.
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return MODE_OK;
/*
@@ -8461,10 +7968,10 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
* plane so let's not advertize modes that are
* too big for that.
*/
- if (DISPLAY_VER(dev_priv) >= 30) {
+ if (DISPLAY_VER(display) >= 30) {
plane_width_max = 6144 * num_joined_pipes;
plane_height_max = 4800;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
plane_width_max = 5120 * num_joined_pipes;
plane_height_max = 4320;
} else {
@@ -8528,32 +8035,34 @@ static const struct intel_display_funcs i9xx_display_funcs = {
/**
* intel_init_display_hooks - initialize the display modesetting hooks
- * @dev_priv: device private
+ * @display: display device private
*/
-void intel_init_display_hooks(struct drm_i915_private *dev_priv)
+void intel_init_display_hooks(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 9) {
- dev_priv->display.funcs.display = &skl_display_funcs;
- } else if (HAS_DDI(dev_priv)) {
- dev_priv->display.funcs.display = &ddi_display_funcs;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 9) {
+ display->funcs.display = &skl_display_funcs;
+ } else if (HAS_DDI(display)) {
+ display->funcs.display = &ddi_display_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display.funcs.display = &pch_split_display_funcs;
- } else if (IS_CHERRYVIEW(dev_priv) ||
- IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display.funcs.display = &vlv_display_funcs;
+ display->funcs.display = &pch_split_display_funcs;
+ } else if (display->platform.cherryview ||
+ display->platform.valleyview) {
+ display->funcs.display = &vlv_display_funcs;
} else {
- dev_priv->display.funcs.display = &i9xx_display_funcs;
+ display->funcs.display = &i9xx_display_funcs;
}
}
-int intel_initial_commit(struct drm_device *dev)
+int intel_initial_commit(struct intel_display *display)
{
struct drm_atomic_state *state = NULL;
struct drm_modeset_acquire_ctx ctx;
struct intel_crtc *crtc;
int ret = 0;
- state = drm_atomic_state_alloc(dev);
+ state = drm_atomic_state_alloc(display->drm);
if (!state)
return -ENOMEM;
@@ -8563,7 +8072,7 @@ int intel_initial_commit(struct drm_device *dev)
to_intel_atomic_state(state)->internal = true;
retry:
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
intel_atomic_get_crtc_state(state, crtc);
@@ -8587,7 +8096,7 @@ retry:
*/
crtc_state->uapi.color_mgmt_changed = true;
- for_each_intel_encoder_mask(dev, encoder,
+ for_each_intel_encoder_mask(display->drm, encoder,
crtc_state->uapi.encoder_mask) {
if (encoder->initial_fastset_check &&
!encoder->initial_fastset_check(encoder, crtc_state)) {
@@ -8725,26 +8234,9 @@ void i830_disable_pipe(struct intel_display *display, enum pipe pipe)
intel_de_posting_read(display, DPLL(display, pipe));
}
-void intel_hpd_poll_fini(struct drm_i915_private *i915)
+bool intel_scanout_needs_vtd_wa(struct intel_display *display)
{
- struct intel_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- /* Kill all the work that may have been queued by hpd. */
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
- for_each_intel_connector_iter(connector, &conn_iter) {
- if (connector->modeset_retry_work.func &&
- cancel_work_sync(&connector->modeset_retry_work))
- drm_connector_put(&connector->base);
- if (connector->hdcp.shim) {
- cancel_delayed_work_sync(&connector->hdcp.check_work);
- cancel_work_sync(&connector->hdcp.prop_work);
- }
- }
- drm_connector_list_iter_end(&conn_iter);
-}
+ struct drm_i915_private *i915 = to_i915(display->drm);
-bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
-{
- return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
+ return IS_DISPLAY_VER(display, 6, 11) && i915_vtd_active(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 49a246feb1ae..3b54a62c290a 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -59,8 +59,6 @@ struct intel_link_m_n;
struct intel_plane;
struct intel_plane_state;
struct intel_power_domain_mask;
-struct intel_remapped_info;
-struct intel_rotation_info;
struct pci_dev;
struct work_struct;
@@ -413,24 +411,22 @@ enum phy_fia {
i)
int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
-int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
int bw_overhead,
struct intel_link_m_n *m_n);
-u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+u32 intel_plane_fb_max_stride(struct drm_device *drm,
u32 pixel_format, u64 modifier);
enum drm_mode_status
-intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+intel_mode_valid_max_plane_size(struct intel_display *display,
const struct drm_display_mode *mode,
int num_joined_pipes);
enum drm_mode_status
-intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
+intel_cpu_transcoder_mode_valid(struct intel_display *display,
const struct drm_display_mode *mode);
-enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
+enum phy intel_port_to_phy(struct intel_display *display, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
bool is_trans_port_sync_master(const struct intel_crtc_state *state);
u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state);
@@ -450,7 +446,6 @@ bool intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset);
-void intel_plane_destroy(struct drm_plane *plane);
void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
@@ -462,25 +457,16 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg);
-void intel_init_display_hooks(struct drm_i915_private *dev_priv);
-unsigned int intel_fb_xy_to_linear(int x, int y,
- const struct intel_plane_state *state,
- int plane);
-void intel_add_fb_offsets(int *x, int *y,
- const struct intel_plane_state *state, int plane);
-unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
-unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
-bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
+bool intel_has_pending_fb_unpin(struct intel_display *display);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
void intel_encoder_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
-bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
-bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
-bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy);
-enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
- enum port port);
+bool intel_phy_is_combo(struct intel_display *display, enum phy phy);
+bool intel_phy_is_tc(struct intel_display *display, enum phy phy);
+bool intel_phy_is_snps(struct intel_display *display, enum phy phy);
+enum tc_port intel_port_to_tc(struct intel_display *display, enum port port);
enum phy intel_encoder_to_phy(struct intel_encoder *encoder);
bool intel_encoder_is_combo(struct intel_encoder *encoder);
@@ -489,22 +475,19 @@ bool intel_encoder_is_tc(struct intel_encoder *encoder);
enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder);
int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
-void vlv_wait_port_ready(struct intel_display *display,
- struct intel_digital_port *dig_port,
- unsigned int expected_mask);
bool intel_fuzzy_clock_check(int clock1, int clock2);
void intel_zero_m_n(struct intel_link_m_n *m_n);
-void intel_set_m_n(struct drm_i915_private *i915,
+void intel_set_m_n(struct intel_display *display,
const struct intel_link_m_n *m_n,
i915_reg_t data_m_reg, i915_reg_t data_n_reg,
i915_reg_t link_m_reg, i915_reg_t link_n_reg);
-void intel_get_m_n(struct drm_i915_private *i915,
+void intel_get_m_n(struct intel_display *display,
struct intel_link_m_n *m_n,
i915_reg_t data_m_reg, i915_reg_t data_n_reg,
i915_reg_t link_m_reg, i915_reg_t link_n_reg);
-bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+bool intel_cpu_transcoder_has_m2_n2(struct intel_display *display,
enum transcoder transcoder);
void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
enum transcoder cpu_transcoder,
@@ -525,13 +508,9 @@ enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
-void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
-
int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
-bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);
-
struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state);
@@ -542,8 +521,6 @@ void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
bool visible);
void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state);
-void intel_update_watermarks(struct drm_i915_private *i915);
-
bool intel_crtc_vrr_disabling(struct intel_atomic_state *state,
struct intel_crtc *crtc);
@@ -552,7 +529,7 @@ int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
const char *reason, u8 pipe_mask);
int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
const char *reason);
-int intel_modeset_commit_pipes(struct drm_i915_private *i915,
+int intel_modeset_commit_pipes(struct intel_display *display,
u8 pipe_mask,
struct drm_modeset_acquire_ctx *ctx);
void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
@@ -561,25 +538,23 @@ void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
struct intel_power_domain_mask *domains);
/* interface for intel_display_driver.c */
-void intel_setup_outputs(struct drm_i915_private *i915);
-int intel_initial_commit(struct drm_device *dev);
-void intel_panel_sanitize_ssc(struct drm_i915_private *i915);
-void intel_update_czclk(struct drm_i915_private *i915);
-void intel_atomic_helper_free_state_worker(struct work_struct *work);
+void intel_init_display_hooks(struct intel_display *display);
+void intel_setup_outputs(struct intel_display *display);
+int intel_initial_commit(struct intel_display *display);
+void intel_panel_sanitize_ssc(struct intel_display *display);
+void intel_update_czclk(struct intel_display *display);
enum drm_mode_status intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode);
int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
bool nonblock);
-void intel_hpd_poll_fini(struct drm_i915_private *i915);
-
/* modesetting asserts */
-void assert_transcoder(struct drm_i915_private *dev_priv,
+void assert_transcoder(struct intel_display *display,
enum transcoder cpu_transcoder, bool state);
#define assert_transcoder_enabled(d, t) assert_transcoder(d, t, true)
#define assert_transcoder_disabled(d, t) assert_transcoder(d, t, false)
-bool assert_port_valid(struct drm_i915_private *i915, enum port port);
+bool assert_port_valid(struct intel_display *display, enum port port);
/*
* Use INTEL_DISPLAY_STATE_WARN(x) (rather than WARN() and WARN_ON()) for hw
@@ -596,7 +571,7 @@ bool assert_port_valid(struct drm_i915_private *i915, enum port port);
unlikely(__ret_warn_on); \
})
-bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915);
+bool intel_scanout_needs_vtd_wa(struct intel_display *display);
int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 554870d2494b..eeb7ae3eaea8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -91,6 +91,7 @@ struct intel_wm_funcs {
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*get_hw_state)(struct drm_i915_private *i915);
+ void (*sanitize)(struct drm_i915_private *i915);
};
struct intel_audio_state {
@@ -386,7 +387,6 @@ struct intel_display {
struct {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
- struct work_struct suspend_work;
} fbdev;
struct {
@@ -512,6 +512,8 @@ struct intel_display {
/* restore state for suspend/resume and display reset */
struct drm_atomic_state *modeset_state;
struct drm_modeset_acquire_ctx reset_ctx;
+ /* modeset stuck tracking for reset */
+ atomic_t pending_fb_pin;
u32 saveDSPARB;
u32 saveSWF0[16];
u32 saveSWF1[16];
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index f1d76484025a..fdedf65bee53 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -49,11 +49,6 @@ static struct intel_display *node_to_intel_display(struct drm_info_node *node)
return to_intel_display(node->minor->dev);
}
-static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
-{
- return to_i915(node->minor->dev);
-}
-
static int intel_display_caps(struct seq_file *m, void *data)
{
struct intel_display *display = node_to_intel_display(m->private);
@@ -68,44 +63,45 @@ static int intel_display_caps(struct seq_file *m, void *data)
static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
- spin_lock(&dev_priv->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
seq_printf(m, "FB tracking busy bits: 0x%08x\n",
- dev_priv->display.fb_tracking.busy_bits);
+ display->fb_tracking.busy_bits);
seq_printf(m, "FB tracking flip bits: 0x%08x\n",
- dev_priv->display.fb_tracking.flip_bits);
+ display->fb_tracking.flip_bits);
- spin_unlock(&dev_priv->display.fb_tracking.lock);
+ spin_unlock(&display->fb_tracking.lock);
return 0;
}
static int i915_sr_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
intel_wakeref_t wakeref;
bool sr_enabled = false;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_INIT);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
/* no global SR status; inspect per-plane WM */;
else if (HAS_PCH_SPLIT(dev_priv))
- sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM_LP_ENABLE;
- else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
- IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
- else if (IS_I915GM(dev_priv))
- sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
- else if (IS_PINEVIEW(dev_priv))
- sr_enabled = intel_de_read(dev_priv, DSPFW3(dev_priv)) & PINEVIEW_SELF_REFRESH_EN;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+ sr_enabled = intel_de_read(display, WM1_LP_ILK) & WM_LP_ENABLE;
+ else if (display->platform.i965gm || display->platform.g4x ||
+ display->platform.i945g || display->platform.i945gm)
+ sr_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ else if (display->platform.i915gm)
+ sr_enabled = intel_de_read(display, INSTPM) & INSTPM_SELF_EN;
+ else if (display->platform.pineview)
+ sr_enabled = intel_de_read(display, DSPFW3(display)) & PINEVIEW_SELF_REFRESH_EN;
+ else if (display->platform.valleyview || display->platform.cherryview)
+ sr_enabled = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+
+ intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
seq_printf(m, "self-refresh: %s\n", str_enabled_disabled(sr_enabled));
@@ -114,12 +110,11 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- fbdev_fb = intel_fbdev_framebuffer(dev_priv->display.fbdev.fbdev);
+ fbdev_fb = intel_fbdev_framebuffer(display->fbdev.fbdev);
if (fbdev_fb) {
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fbdev_fb->base.width,
@@ -131,10 +126,9 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
intel_bo_describe(m, intel_fb_bo(&fbdev_fb->base));
seq_putc(m, '\n');
}
-#endif
- mutex_lock(&dev_priv->drm.mode_config.fb_lock);
- drm_for_each_fb(drm_fb, &dev_priv->drm) {
+ mutex_lock(&display->drm->mode_config.fb_lock);
+ drm_for_each_fb(drm_fb, display->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb == fbdev_fb)
continue;
@@ -149,16 +143,16 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
intel_bo_describe(m, intel_fb_bo(&fb->base));
seq_putc(m, '\n');
}
- mutex_unlock(&dev_priv->drm.mode_config.fb_lock);
+ mutex_unlock(&display->drm->mode_config.fb_lock);
return 0;
}
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
- intel_display_power_debug(i915, m);
+ intel_display_power_debug(display, m);
return 0;
}
@@ -178,14 +172,14 @@ static void intel_encoder_info(struct seq_file *m,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
encoder->base.base.id, encoder->base.name);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
const struct drm_connector_state *conn_state =
connector->state;
@@ -213,38 +207,6 @@ static void intel_panel_info(struct seq_file *m,
intel_seq_print_mode(m, 2, fixed_mode);
}
-static void intel_hdcp_info(struct seq_file *m,
- struct intel_connector *intel_connector,
- bool remote_req)
-{
- bool hdcp_cap = false, hdcp2_cap = false;
-
- if (!intel_connector->hdcp.shim) {
- seq_puts(m, "No Connector Support");
- goto out;
- }
-
- if (remote_req) {
- intel_hdcp_get_remote_capability(intel_connector,
- &hdcp_cap,
- &hdcp2_cap);
- } else {
- hdcp_cap = intel_hdcp_get_capability(intel_connector);
- hdcp2_cap = intel_hdcp2_get_capability(intel_connector);
- }
-
- if (hdcp_cap)
- seq_puts(m, "HDCP1.4 ");
- if (hdcp2_cap)
- seq_puts(m, "HDCP2.2 ");
-
- if (!hdcp_cap && !hdcp2_cap)
- seq_puts(m, "None");
-
-out:
- seq_puts(m, "\n");
-}
-
static void intel_dp_info(struct seq_file *m, struct intel_connector *connector)
{
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
@@ -297,7 +259,7 @@ static void intel_connector_info(struct seq_file *m,
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- if (intel_connector->mst_port)
+ if (intel_connector->mst.dp)
intel_dp_mst_info(m, intel_connector);
else
intel_dp_info(m, intel_connector);
@@ -309,12 +271,7 @@ static void intel_connector_info(struct seq_file *m,
break;
}
- seq_puts(m, "\tHDCP version: ");
- if (intel_connector->mst_port) {
- intel_hdcp_info(m, intel_connector, true);
- seq_puts(m, "\tMST Hub HDCP version: ");
- }
- intel_hdcp_info(m, intel_connector, false);
+ intel_hdcp_info(m, intel_connector);
seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc);
@@ -365,8 +322,8 @@ static const char *plane_visibility(const struct intel_plane_state *plane_state)
if (plane_state->uapi.visible)
return "visible";
- if (plane_state->planar_slave)
- return "planar-slave";
+ if (plane_state->is_y_plane)
+ return "Y plane";
return "hidden";
}
@@ -399,7 +356,7 @@ static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
if (plane_state->planar_linked_plane)
seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
- plane_state->planar_slave ? "slave" : "master");
+ plane_state->is_y_plane ? "Y plane" : "UV plane");
}
static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
@@ -427,10 +384,10 @@ static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
plane->base.base.id, plane->base.name,
plane_type(plane->base.type));
@@ -573,7 +530,7 @@ static void crtc_updates_add(struct intel_crtc *crtc)
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct drm_printer p = drm_seq_file_printer(m);
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -607,7 +564,7 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
intel_vdsc_state_dump(&p, 1, crtc_state);
- for_each_intel_encoder_mask(&dev_priv->drm, encoder,
+ for_each_intel_encoder_mask(display->drm, encoder,
crtc_state->uapi.encoder_mask)
intel_encoder_info(m, crtc, encoder);
@@ -622,7 +579,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
static int i915_display_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
@@ -630,22 +588,22 @@ static int i915_display_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
- for_each_intel_crtc(&dev_priv->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
intel_crtc_info(m, crtc);
seq_printf(m, "\n");
seq_printf(m, "Connector info\n");
seq_printf(m, "--------------\n");
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
intel_connector_info(m, connector);
drm_connector_list_iter_end(&conn_iter);
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -654,56 +612,56 @@ static int i915_display_info(struct seq_file *m, void *unused)
static int i915_display_capabilities(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- intel_display_device_info_print(DISPLAY_INFO(i915),
- DISPLAY_RUNTIME_INFO(i915), &p);
+ intel_display_device_info_print(DISPLAY_INFO(display),
+ DISPLAY_RUNTIME_INFO(display), &p);
return 0;
}
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct drm_printer p = drm_seq_file_printer(m);
struct intel_shared_dpll *pll;
int i;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
drm_printf(&p, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
- dev_priv->display.dpll.ref_clks.nssc,
- dev_priv->display.dpll.ref_clks.ssc);
+ display->dpll.ref_clks.nssc,
+ display->dpll.ref_clks.ssc);
- for_each_shared_dpll(dev_priv, pll, i) {
+ for_each_shared_dpll(display, pll, i) {
drm_printf(&p, "DPLL%i: %s, id: %i\n", pll->index,
pll->info->name, pll->info->id);
drm_printf(&p, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
pll->state.pipe_mask, pll->active_mask,
str_yes_no(pll->on));
drm_printf(&p, " tracked hardware state:\n");
- intel_dpll_dump_hw_state(dev_priv, &p, &pll->state.hw_state);
+ intel_dpll_dump_hw_state(display, &p, &pll->state.hw_state);
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
return 0;
}
static int i915_ddb_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct skl_ddb_entry *entry;
struct intel_crtc *crtc;
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return -ENODEV;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
@@ -723,16 +681,16 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
entry->end, skl_ddb_entry_size(entry));
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
return 0;
}
static bool
-intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
+intel_lpsp_power_well_enabled(struct intel_display *display,
enum i915_power_well_id power_well_id)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
bool is_enabled;
@@ -746,15 +704,15 @@ intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
static int i915_lpsp_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
bool lpsp_enabled = false;
- if (DISPLAY_VER(i915) >= 13 || IS_DISPLAY_VER(i915, 9, 10)) {
- lpsp_enabled = !intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2);
- } else if (IS_DISPLAY_VER(i915, 11, 12)) {
- lpsp_enabled = !intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- lpsp_enabled = !intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL);
+ if (DISPLAY_VER(display) >= 13 || IS_DISPLAY_VER(display, 9, 10)) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(display, SKL_DISP_PW_2);
+ } else if (IS_DISPLAY_VER(display, 11, 12)) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(display, ICL_DISP_PW_3);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(display, HSW_DISP_PW_GLOBAL);
} else {
seq_puts(m, "LPSP: not supported\n");
return 0;
@@ -767,13 +725,13 @@ static int i915_lpsp_status(struct seq_file *m, void *unused)
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_display *display = node_to_intel_display(m->private);
struct intel_encoder *intel_encoder;
struct intel_digital_port *dig_port;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -789,7 +747,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
dig_port->base.base.base.id,
dig_port->base.base.name);
- drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
+ drm_dp_mst_dump_topology(m, &dig_port->dp.mst.mgr);
}
drm_connector_list_iter_end(&conn_iter);
@@ -801,7 +759,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- struct drm_i915_private *dev_priv = filp->private_data;
+ struct intel_display *display = filp->private_data;
struct intel_crtc *crtc;
int ret;
bool reset;
@@ -813,7 +771,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
if (!reset)
return cnt;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct drm_crtc_commit *commit;
struct intel_crtc_state *crtc_state;
@@ -830,7 +788,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
}
if (!ret && crtc_state->hw.active) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Re-arming FIFO underruns on pipe %c\n",
pipe_name(crtc->pipe));
@@ -843,7 +801,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
return ret;
}
- intel_fbc_reset_underrun(&dev_priv->display);
+ intel_fbc_reset_underrun(display);
return cnt;
}
@@ -869,13 +827,13 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_lpsp_status", i915_lpsp_status, 0},
};
-void intel_display_debugfs_register(struct drm_i915_private *i915)
+void intel_display_debugfs_register(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root,
- to_i915(minor->dev), &i915_fifo_underrun_reset_ops);
+ display, &i915_fifo_underrun_reset_ops);
drm_debugfs_create_files(intel_display_debugfs_list,
ARRAY_SIZE(intel_display_debugfs_list),
@@ -893,37 +851,10 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_display_debugfs_params(display);
}
-static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
-{
- struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- int ret;
-
- ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
- if (ret)
- return ret;
-
- if (!connector->base.encoder ||
- connector->base.status != connector_status_connected) {
- ret = -ENODEV;
- goto out;
- }
-
- seq_printf(m, "%s:%d HDCP version: ", connector->base.name,
- connector->base.base.id);
- intel_hdcp_info(m, connector, false);
-
-out:
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
-
- return ret;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
-
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
int connector_type = connector->base.connector_type;
bool lpsp_capable = false;
@@ -934,24 +865,24 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data)
if (connector->base.status != connector_status_connected)
return -ENODEV;
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
lpsp_capable = encoder->port <= PORT_B;
- else if (DISPLAY_VER(i915) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
/*
* Actually TGL can drive LPSP on port till DDI_C
* but there is no physical connected DDI_C on TGL sku's,
- * even driver is not initilizing DDI_C port for gen12.
+ * even driver is not initializing DDI_C port for gen12.
*/
lpsp_capable = encoder->port <= PORT_B;
- else if (DISPLAY_VER(i915) == 11)
+ else if (DISPLAY_VER(display) == 11)
lpsp_capable = (connector_type == DRM_MODE_CONNECTOR_DSI ||
connector_type == DRM_MODE_CONNECTOR_eDP);
- else if (IS_DISPLAY_VER(i915, 9, 10))
+ else if (IS_DISPLAY_VER(display, 9, 10))
lpsp_capable = (encoder->port == PORT_A &&
(connector_type == DRM_MODE_CONNECTOR_DSI ||
connector_type == DRM_MODE_CONNECTOR_eDP ||
connector_type == DRM_MODE_CONNECTOR_DisplayPort));
- else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ else if (display->platform.haswell || display->platform.broadwell)
lpsp_capable = connector_type == DRM_MODE_CONNECTOR_eDP;
seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable");
@@ -963,7 +894,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_crtc *crtc;
struct intel_dp *intel_dp;
struct drm_modeset_acquire_ctx ctx;
@@ -975,7 +906,7 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
do {
try_again = false;
- ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex,
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex,
&ctx);
if (ret) {
if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
@@ -1036,7 +967,7 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool dsc_enable = false;
@@ -1045,14 +976,14 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
if (len == 0)
return 0;
- drm_dbg(&i915->drm,
+ drm_dbg(display->drm,
"Copied %zu bytes from user to force DSC\n", len);
ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
if (ret < 0)
return ret;
- drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
+ drm_dbg(display->drm, "Got %s for DSC Enable\n",
(dsc_enable) ? "true" : "false");
intel_dp->force_dsc_en = dsc_enable;
@@ -1079,7 +1010,7 @@ static const struct file_operations i915_dsc_fec_support_fops = {
static int i915_dsc_bpc_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
struct intel_crtc_state *crtc_state;
@@ -1088,7 +1019,7 @@ static int i915_dsc_bpc_show(struct seq_file *m, void *data)
if (!encoder)
return -ENODEV;
- ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (ret)
return ret;
@@ -1101,7 +1032,7 @@ static int i915_dsc_bpc_show(struct seq_file *m, void *data)
crtc_state = to_intel_crtc_state(crtc->state);
seq_printf(m, "Input_BPC: %d\n", crtc_state->dsc.config.bits_per_component);
-out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+out: drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return ret;
}
@@ -1145,7 +1076,7 @@ static const struct file_operations i915_dsc_bpc_fops = {
static int i915_dsc_output_format_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
struct intel_crtc_state *crtc_state;
@@ -1154,7 +1085,7 @@ static int i915_dsc_output_format_show(struct seq_file *m, void *data)
if (!encoder)
return -ENODEV;
- ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (ret)
return ret;
@@ -1168,7 +1099,7 @@ static int i915_dsc_output_format_show(struct seq_file *m, void *data)
seq_printf(m, "DSC_Output_Format: %s\n",
intel_output_format_name(crtc_state->output_format));
-out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+out: drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return ret;
}
@@ -1212,7 +1143,7 @@ static const struct file_operations i915_dsc_output_format_fops = {
static int i915_dsc_fractional_bpp_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
struct intel_dp *intel_dp;
@@ -1221,7 +1152,7 @@ static int i915_dsc_fractional_bpp_show(struct seq_file *m, void *data)
if (!encoder)
return -ENODEV;
- ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (ret)
return ret;
@@ -1236,7 +1167,7 @@ static int i915_dsc_fractional_bpp_show(struct seq_file *m, void *data)
str_yes_no(intel_dp->force_dsc_fractional_bpp_en));
out:
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return ret;
}
@@ -1247,8 +1178,8 @@ static ssize_t i915_dsc_fractional_bpp_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct intel_connector *connector = m->private;
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool dsc_fractional_bpp_enable = false;
int ret;
@@ -1256,14 +1187,14 @@ static ssize_t i915_dsc_fractional_bpp_write(struct file *file,
if (len == 0)
return 0;
- drm_dbg(&i915->drm,
+ drm_dbg(display->drm,
"Copied %zu bytes from user to force fractional bpp for DSC\n", len);
ret = kstrtobool_from_user(ubuf, len, &dsc_fractional_bpp_enable);
if (ret < 0)
return ret;
- drm_dbg(&i915->drm, "Got %s for DSC Fractional BPP Enable\n",
+ drm_dbg(display->drm, "Got %s for DSC Fractional BPP Enable\n",
(dsc_fractional_bpp_enable) ? "true" : "false");
intel_dp->force_dsc_fractional_bpp_en = dsc_fractional_bpp_enable;
@@ -1392,7 +1323,7 @@ static const struct file_operations i915_joiner_fops = {
*/
void intel_connector_debugfs_add(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct dentry *root = connector->base.debugfs_entry;
int connector_type = connector->base.connector_type;
@@ -1401,20 +1332,14 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
return;
intel_drrs_connector_debugfs_add(connector);
+ intel_hdcp_connector_debugfs_add(connector);
intel_pps_connector_debugfs_add(connector);
intel_psr_connector_debugfs_add(connector);
intel_alpm_lobf_debugfs_add(connector);
intel_dp_link_training_debugfs_add(connector);
- if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector_type == DRM_MODE_CONNECTOR_HDMIB) {
- debugfs_create_file("i915_hdcp_sink_capability", 0444, root,
- connector, &i915_hdcp_sink_capability_fops);
- }
-
- if (DISPLAY_VER(i915) >= 11 &&
- ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !connector->mst_port) ||
+ if (DISPLAY_VER(display) >= 11 &&
+ ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !connector->mst.dp) ||
connector_type == DRM_MODE_CONNECTOR_eDP)) {
debugfs_create_file("i915_dsc_fec_support", 0644, root,
connector, &i915_dsc_fec_support_fops);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index e1f479b7acd1..82af2f608111 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -6,16 +6,16 @@
#ifndef __INTEL_DISPLAY_DEBUGFS_H__
#define __INTEL_DISPLAY_DEBUGFS_H__
-struct drm_i915_private;
struct intel_connector;
struct intel_crtc;
+struct intel_display;
#ifdef CONFIG_DEBUG_FS
-void intel_display_debugfs_register(struct drm_i915_private *i915);
+void intel_display_debugfs_register(struct intel_display *display);
void intel_connector_debugfs_add(struct intel_connector *connector);
void intel_crtc_debugfs_add(struct intel_crtc *crtc);
#else
-static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
+static inline void intel_display_debugfs_register(struct intel_display *display) {}
static inline void intel_connector_debugfs_add(struct intel_connector *connector) {}
static inline void intel_crtc_debugfs_add(struct intel_crtc *crtc) {}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 68cb7f9b9ef3..738ae522c8f4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -1357,6 +1357,12 @@ static const struct intel_display_device_info xe2_hpd_display = {
BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4),
};
+static const u16 mtl_u_ids[] = {
+ INTEL_MTL_U_IDS(ID),
+ INTEL_ARL_U_IDS(ID),
+ 0
+};
+
/*
* Do not initialize the .info member of the platform desc for GMD ID based
* platforms. Their display will be probed automatically based on the IP version
@@ -1364,6 +1370,13 @@ static const struct intel_display_device_info xe2_hpd_display = {
*/
static const struct platform_desc mtl_desc = {
PLATFORM(meteorlake),
+ .subplatforms = (const struct subplatform_desc[]) {
+ {
+ SUBPLATFORM(meteorlake, u),
+ .pciidlist = mtl_u_ids,
+ },
+ {},
+ }
};
static const struct platform_desc lnl_desc = {
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 9a333d9e6601..717286981687 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -96,6 +96,7 @@ struct pci_dev;
func(dg2_g12) \
/* Display ver 14 (based on GMD ID) */ \
func(meteorlake) \
+ func(meteorlake_u) \
/* Display ver 20 (based on GMD ID) */ \
func(lunarlake) \
/* Display ver 14.1 (based on GMD ID) */ \
@@ -145,6 +146,7 @@ struct intel_display_platforms {
#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
+#define HAS_CMTG(__display) (!(__display)->platform.dg2 && DISPLAY_VER(__display) >= 13)
#define HAS_CUR_FBC(__display) (!HAS_GMCH(__display) && IS_DISPLAY_VER(__display, 7, 13))
#define HAS_D12_PLANE_MINIMIZATION(__display) ((__display)->platform.rocketlake || (__display)->platform.alderlake_s)
#define HAS_DBUF_OVERLAP_DETECTION(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dbuf_overlap_detection)
@@ -161,6 +163,7 @@ struct intel_display_platforms {
#define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc)
#define HAS_DSC_MST(__display) (DISPLAY_VER(__display) >= 12 && HAS_DSC(__display))
#define HAS_FBC(__display) (DISPLAY_RUNTIME_INFO(__display)->fbc_mask != 0)
+#define HAS_FBC_DIRTY_RECT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg)
#define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3)
#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
@@ -233,6 +236,17 @@ struct intel_display_platforms {
(drm_WARN_ON(__to_intel_display(__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \
INTEL_DISPLAY_STEP(__display) >= (since) && INTEL_DISPLAY_STEP(__display) < (until))
+#define ARLS_HOST_BRIDGE_PCI_ID1 0x7D1C
+#define ARLS_HOST_BRIDGE_PCI_ID2 0x7D2D
+#define ARLS_HOST_BRIDGE_PCI_ID3 0x7D2E
+#define ARLS_HOST_BRIDGE_PCI_ID4 0x7D2F
+
+#define IS_ARROWLAKE_S_BY_HOST_BRIDGE_ID(id) \
+ (((id) == ARLS_HOST_BRIDGE_PCI_ID1) || \
+ ((id) == ARLS_HOST_BRIDGE_PCI_ID2) || \
+ ((id) == ARLS_HOST_BRIDGE_PCI_ID3) || \
+ ((id) == ARLS_HOST_BRIDGE_PCI_ID4))
+
struct intel_display_runtime_info {
struct intel_display_ip_ver {
u16 ver;
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 50ec0c3c7588..31740a677dd8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -194,13 +194,13 @@ void intel_display_driver_early_probe(struct intel_display *display)
mutex_init(&display->hdcp.hdcp_mutex);
intel_display_irq_init(i915);
- intel_dkl_phy_init(i915);
+ intel_dkl_phy_init(display);
intel_color_init_hooks(display);
intel_init_cdclk_hooks(display);
- intel_audio_hooks_init(i915);
+ intel_audio_hooks_init(display);
intel_dpll_init_clock_hook(i915);
- intel_init_display_hooks(i915);
- intel_fdi_init_hook(i915);
+ intel_init_display_hooks(display);
+ intel_fdi_init_hook(display);
intel_dmc_wl_init(display);
}
@@ -431,7 +431,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_wm_init(i915);
- intel_panel_sanitize_ssc(i915);
+ intel_panel_sanitize_ssc(display);
intel_pps_setup(display);
@@ -442,18 +442,18 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
INTEL_NUM_PIPES(display) > 1 ? "s" : "");
for_each_pipe(display, pipe) {
- ret = intel_crtc_init(i915, pipe);
+ ret = intel_crtc_init(display, pipe);
if (ret)
goto err_mode_config;
}
intel_plane_possible_crtcs_init(display);
- intel_shared_dpll_init(i915);
- intel_fdi_pll_freq_update(i915);
+ intel_shared_dpll_init(display);
+ intel_fdi_pll_freq_update(display);
- intel_update_czclk(i915);
+ intel_update_czclk(display);
intel_display_driver_init_hw(display);
- intel_dpll_update_ref_clks(i915);
+ intel_dpll_update_ref_clks(display);
if (display->cdclk.max_cdclk_freq == 0)
intel_update_max_cdclk(display);
@@ -462,7 +462,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
/* Just disable it once at startup */
intel_vga_disable(display);
- intel_setup_outputs(i915);
+ intel_setup_outputs(display);
ret = intel_dp_tunnel_mgr_init(display);
if (ret)
@@ -517,7 +517,7 @@ int intel_display_driver_probe(struct intel_display *display)
* are already calculated and there is no assert_plane warnings
* during bootup.
*/
- ret = intel_initial_commit(display->drm);
+ ret = intel_initial_commit(display);
if (ret)
drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret);
@@ -544,13 +544,13 @@ void intel_display_driver_register(struct intel_display *display)
intel_opregion_register(display);
intel_acpi_video_register(display);
- intel_audio_init(i915);
+ intel_audio_init(display);
intel_display_driver_enable_user_access(display);
- intel_audio_register(i915);
+ intel_audio_register(display);
- intel_display_debugfs_register(i915);
+ intel_display_debugfs_register(display);
/*
* We need to coordinate the hotplugs with the asynchronous
@@ -564,6 +564,8 @@ void intel_display_driver_register(struct intel_display *display)
intel_display_device_info_print(DISPLAY_INFO(display),
DISPLAY_RUNTIME_INFO(display), &p);
+
+ intel_register_dsm_handler();
}
/* part #1: call before irq uninstall */
@@ -636,11 +638,11 @@ void intel_display_driver_remove_nogem(struct intel_display *display)
void intel_display_driver_unregister(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!HAS_DISPLAY(display))
return;
+ intel_unregister_dsm_handler();
+
drm_client_dev_unregister(display->drm);
/*
@@ -652,7 +654,7 @@ void intel_display_driver_unregister(struct intel_display *display)
intel_display_driver_disable_user_access(display);
- intel_audio_deinit(i915);
+ intel_audio_deinit(display);
drm_atomic_helper_shutdown(display->drm);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 069043f9d894..aa23bb817805 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -10,11 +10,13 @@
#include "i915_irq.h"
#include "i915_reg.h"
#include "icl_dsi_regs.h"
+#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_dmc_wl.h"
#include "intel_dp_aux.h"
#include "intel_dsb.h"
#include "intel_fdi_regs.h"
@@ -25,6 +27,92 @@
#include "intel_pmdemand.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#include "intel_uncore.h"
+
+static void
+intel_display_irq_regs_init(struct intel_display *display, struct i915_irq_regs regs,
+ u32 imr_val, u32 ier_val)
+{
+ intel_dmc_wl_get(display, regs.imr);
+ intel_dmc_wl_get(display, regs.ier);
+ intel_dmc_wl_get(display, regs.iir);
+
+ gen2_irq_init(to_intel_uncore(display->drm), regs, imr_val, ier_val);
+
+ intel_dmc_wl_put(display, regs.iir);
+ intel_dmc_wl_put(display, regs.ier);
+ intel_dmc_wl_put(display, regs.imr);
+}
+
+static void
+intel_display_irq_regs_reset(struct intel_display *display, struct i915_irq_regs regs)
+{
+ intel_dmc_wl_get(display, regs.imr);
+ intel_dmc_wl_get(display, regs.ier);
+ intel_dmc_wl_get(display, regs.iir);
+
+ gen2_irq_reset(to_intel_uncore(display->drm), regs);
+
+ intel_dmc_wl_put(display, regs.iir);
+ intel_dmc_wl_put(display, regs.ier);
+ intel_dmc_wl_put(display, regs.imr);
+}
+
+static void
+intel_display_irq_regs_assert_irr_is_zero(struct intel_display *display, i915_reg_t reg)
+{
+ intel_dmc_wl_get(display, reg);
+
+ gen2_assert_iir_is_zero(to_intel_uncore(display->drm), reg);
+
+ intel_dmc_wl_put(display, reg);
+}
+
+struct pipe_fault_handler {
+ bool (*handle)(struct intel_crtc *crtc, enum plane_id plane_id);
+ u32 fault;
+ enum plane_id plane_id;
+};
+
+static bool handle_plane_fault(struct intel_crtc *crtc, enum plane_id plane_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_plane_error error = {};
+ struct intel_plane *plane;
+
+ plane = intel_crtc_get_plane(crtc, plane_id);
+ if (!plane || !plane->capture_error)
+ return false;
+
+ plane->capture_error(crtc, plane, &error);
+
+ drm_err_ratelimited(display->drm,
+ "[CRTC:%d:%s][PLANE:%d:%s] fault (CTL=0x%x, SURF=0x%x, SURFLIVE=0x%x)\n",
+ crtc->base.base.id, crtc->base.name,
+ plane->base.base.id, plane->base.name,
+ error.ctl, error.surf, error.surflive);
+
+ return true;
+}
+
+static void intel_pipe_fault_irq_handler(struct intel_display *display,
+ const struct pipe_fault_handler *handlers,
+ enum pipe pipe, u32 fault_errors)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
+ const struct pipe_fault_handler *handler;
+
+ for (handler = handlers; handler && handler->fault; handler++) {
+ if ((fault_errors & handler->fault) == 0)
+ continue;
+
+ if (handler->handle(crtc, handler->plane_id))
+ fault_errors &= ~handler->fault;
+ }
+
+ WARN_ONCE(fault_errors, "[CRTC:%d:%s] unreported faults 0x%x\n",
+ crtc->base.base.id, crtc->base.name, fault_errors);
+}
static void
intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -44,6 +132,7 @@ intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
u32 interrupt_mask, u32 enabled_irq_mask)
{
+ struct intel_display *display = &dev_priv->display;
u32 new_val;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -56,8 +145,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->irq_mask &&
!drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
dev_priv->irq_mask = new_val;
- intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
- intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
+ intel_de_write(display, DEIMR, dev_priv->irq_mask);
+ intel_de_posting_read(display, DEIMR);
}
}
@@ -80,6 +169,7 @@ void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
void bdw_update_port_irq(struct drm_i915_private *dev_priv,
u32 interrupt_mask, u32 enabled_irq_mask)
{
+ struct intel_display *display = &dev_priv->display;
u32 new_val;
u32 old_val;
@@ -90,15 +180,15 @@ void bdw_update_port_irq(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
+ old_val = intel_de_read(display, GEN8_DE_PORT_IMR);
new_val = old_val;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != old_val) {
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
- intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
+ intel_de_write(display, GEN8_DE_PORT_IMR, new_val);
+ intel_de_posting_read(display, GEN8_DE_PORT_IMR);
}
}
@@ -113,6 +203,7 @@ static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe, u32 interrupt_mask,
u32 enabled_irq_mask)
{
+ struct intel_display *display = &dev_priv->display;
u32 new_val;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -128,9 +219,8 @@ static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
dev_priv->display.irq.de_irq_mask[pipe] = new_val;
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe),
- dev_priv->display.irq.de_irq_mask[pipe]);
- intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
+ intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]);
+ intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
}
}
@@ -156,7 +246,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
+ struct intel_display *display = &dev_priv->display;
+ u32 sdeimr = intel_de_read(display, SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
@@ -168,8 +259,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
- intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
+ intel_de_write(display, SDEIMR, sdeimr);
+ intel_de_posting_read(display, SDEIMR);
}
void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
@@ -182,29 +273,30 @@ void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
ibx_display_interrupt_update(i915, bits, 0);
}
-u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
+u32 i915_pipestat_enable_mask(struct intel_display *display,
enum pipe pipe)
{
- u32 status_mask = dev_priv->display.irq.pipestat_irq_mask[pipe];
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ u32 status_mask = display->irq.pipestat_irq_mask[pipe];
u32 enable_mask = status_mask << 16;
lockdep_assert_held(&dev_priv->irq_lock);
- if (DISPLAY_VER(dev_priv) < 5)
+ if (DISPLAY_VER(display) < 5)
goto out;
/*
* On pipe A we don't support the PSR interrupt yet,
* on pipe B and C the same bit MBZ.
*/
- if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ if (drm_WARN_ON_ONCE(display->drm,
status_mask & PIPE_A_PSR_STATUS_VLV))
return 0;
/*
* On pipe B and C we don't support the PSR interrupt yet, on pipe
* A the same bit is for perf counters which we don't use either.
*/
- if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ if (drm_WARN_ON_ONCE(display->drm,
status_mask & PIPE_B_PSR_STATUS_VLV))
return 0;
@@ -217,7 +309,7 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
out:
- drm_WARN_ONCE(&dev_priv->drm,
+ drm_WARN_ONCE(display->drm,
enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
@@ -229,6 +321,7 @@ out:
void i915_enable_pipestat(struct drm_i915_private *dev_priv,
enum pipe pipe, u32 status_mask)
{
+ struct intel_display *display = &dev_priv->display;
i915_reg_t reg = PIPESTAT(dev_priv, pipe);
u32 enable_mask;
@@ -243,15 +336,16 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
return;
dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
- enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+ enable_mask = i915_pipestat_enable_mask(display, pipe);
- intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
- intel_uncore_posting_read(&dev_priv->uncore, reg);
+ intel_de_write(display, reg, enable_mask | status_mask);
+ intel_de_posting_read(display, reg);
}
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
enum pipe pipe, u32 status_mask)
{
+ struct intel_display *display = &dev_priv->display;
i915_reg_t reg = PIPESTAT(dev_priv, pipe);
u32 enable_mask;
@@ -266,10 +360,10 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
return;
dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
- enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+ enable_mask = i915_pipestat_enable_mask(display, pipe);
- intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
- intel_uncore_posting_read(&dev_priv->uncore, reg);
+ intel_de_write(display, reg, enable_mask | status_mask);
+ intel_de_posting_read(display, reg);
}
static bool i915_has_legacy_blc_interrupt(struct intel_display *display)
@@ -373,55 +467,58 @@ static void flip_done_handler(struct drm_i915_private *i915,
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
+ struct intel_display *display = &dev_priv->display;
+
display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_HSW(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_HSW(pipe)),
0, 0, 0, 0);
}
static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
+ struct intel_display *display = &dev_priv->display;
+
display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
+ intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_4_IVB(pipe)),
+ intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe)));
}
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
+ struct intel_display *display = &dev_priv->display;
u32 res1, res2;
if (DISPLAY_VER(dev_priv) >= 3)
- res1 = intel_uncore_read(&dev_priv->uncore,
- PIPE_CRC_RES_RES1_I915(dev_priv, pipe));
+ res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(dev_priv, pipe));
else
res1 = 0;
if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
- res2 = intel_uncore_read(&dev_priv->uncore,
- PIPE_CRC_RES_RES2_G4X(dev_priv, pipe));
+ res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(dev_priv, pipe));
else
res2 = 0;
display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(dev_priv, pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(dev_priv, pipe)),
- intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(dev_priv, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_RED(dev_priv, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_GREEN(dev_priv, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_BLUE(dev_priv, pipe)),
res1, res2);
}
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
- intel_uncore_write(&dev_priv->uncore,
- PIPESTAT(dev_priv, pipe),
- PIPESTAT_INT_STATUS_MASK |
- PIPE_FIFO_UNDERRUN_STATUS);
+ intel_de_write(display,
+ PIPESTAT(dev_priv, pipe),
+ PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS);
dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
}
@@ -430,6 +527,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
spin_lock(&dev_priv->irq_lock);
@@ -474,8 +572,8 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
continue;
reg = PIPESTAT(dev_priv, pipe);
- pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
- enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+ pipe_stats[pipe] = intel_de_read(display, reg) & status_mask;
+ enable_mask = i915_pipestat_enable_mask(display, pipe);
/*
* Clear the PIPE*STAT regs before the IIR
@@ -487,8 +585,8 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
* an interrupt is still pending.
*/
if (pipe_stats[pipe]) {
- intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
- intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
+ intel_de_write(display, reg, pipe_stats[pipe]);
+ intel_de_write(display, reg, enable_mask);
}
}
spin_unlock(&dev_priv->irq_lock);
@@ -512,7 +610,7 @@ void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_cpu_fifo_underrun_irq_handler(display, pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -537,7 +635,7 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_cpu_fifo_underrun_irq_handler(display, pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -564,7 +662,7 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_cpu_fifo_underrun_irq_handler(display, pipe);
}
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
@@ -605,7 +703,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
+ intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
@@ -616,23 +714,65 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
"PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
- intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
+ intel_pch_fifo_underrun_irq_handler(display, PIPE_A);
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
- intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
+ intel_pch_fifo_underrun_irq_handler(display, PIPE_B);
+}
+
+static u32 ivb_err_int_pipe_fault_mask(enum pipe pipe)
+{
+ switch (pipe) {
+ case PIPE_A:
+ return ERR_INT_SPRITE_A_FAULT |
+ ERR_INT_PRIMARY_A_FAULT |
+ ERR_INT_CURSOR_A_FAULT;
+ case PIPE_B:
+ return ERR_INT_SPRITE_B_FAULT |
+ ERR_INT_PRIMARY_B_FAULT |
+ ERR_INT_CURSOR_B_FAULT;
+ case PIPE_C:
+ return ERR_INT_SPRITE_C_FAULT |
+ ERR_INT_PRIMARY_C_FAULT |
+ ERR_INT_CURSOR_C_FAULT;
+ default:
+ return 0;
+ }
}
+static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = {
+ { .fault = ERR_INT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = ERR_INT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = ERR_INT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ { .fault = ERR_INT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = ERR_INT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = ERR_INT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ { .fault = ERR_INT_SPRITE_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = ERR_INT_PRIMARY_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = ERR_INT_CURSOR_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
{
- u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
+ struct intel_display *display = &dev_priv->display;
+ u32 err_int = intel_de_read(display, GEN7_ERR_INT);
enum pipe pipe;
if (err_int & ERR_INT_POISON)
drm_err(&dev_priv->drm, "Poison interrupt\n");
+ if (err_int & ERR_INT_INVALID_GTT_PTE)
+ drm_err_ratelimited(display->drm, "Invalid GTT PTE\n");
+
+ if (err_int & ERR_INT_INVALID_PTE_DATA)
+ drm_err_ratelimited(display->drm, "Invalid PTE data\n");
+
for_each_pipe(dev_priv, pipe) {
+ u32 fault_errors;
+
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
if (IS_IVYBRIDGE(dev_priv))
@@ -640,14 +780,20 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
else
hsw_pipe_crc_irq_handler(dev_priv, pipe);
}
+
+ fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe);
+ if (fault_errors)
+ intel_pipe_fault_irq_handler(display, ivb_pipe_fault_handlers,
+ pipe, fault_errors);
}
- intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
+ intel_de_write(display, GEN7_ERR_INT, err_int);
}
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
{
- u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
+ struct intel_display *display = &dev_priv->display;
+ u32 serr_int = intel_de_read(display, SERR_INT);
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
@@ -655,9 +801,9 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
for_each_pipe(dev_priv, pipe)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
- intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_pch_fifo_underrun_irq_handler(display, pipe);
- intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
+ intel_de_write(display, SERR_INT, serr_int);
}
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
@@ -691,13 +837,63 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
+ intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & SDE_ERROR_CPT)
cpt_serr_int_handler(dev_priv);
}
+static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe)
+{
+ switch (pipe) {
+ case PIPE_A:
+ return GTT_FAULT_SPRITE_A_FAULT |
+ GTT_FAULT_PRIMARY_A_FAULT |
+ GTT_FAULT_CURSOR_A_FAULT;
+ case PIPE_B:
+ return GTT_FAULT_SPRITE_B_FAULT |
+ GTT_FAULT_PRIMARY_B_FAULT |
+ GTT_FAULT_CURSOR_B_FAULT;
+ default:
+ return 0;
+ }
+}
+
+static const struct pipe_fault_handler ilk_pipe_fault_handlers[] = {
+ { .fault = GTT_FAULT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = GTT_FAULT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = GTT_FAULT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = GTT_FAULT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = GTT_FAULT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ { .fault = GTT_FAULT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static void ilk_gtt_fault_irq_handler(struct intel_display *display)
+{
+ enum pipe pipe;
+ u32 gtt_fault;
+
+ gtt_fault = intel_de_read(display, ILK_GTT_FAULT);
+ intel_de_write(display, ILK_GTT_FAULT, gtt_fault);
+
+ if (gtt_fault & GTT_FAULT_INVALID_GTT_PTE)
+ drm_err_ratelimited(display->drm, "Invalid GTT PTE\n");
+
+ if (gtt_fault & GTT_FAULT_INVALID_PTE_DATA)
+ drm_err_ratelimited(display->drm, "Invalid PTE data\n");
+
+ for_each_pipe(display, pipe) {
+ u32 fault_errors;
+
+ fault_errors = gtt_fault & ilk_gtt_fault_pipe_fault_mask(pipe);
+ if (fault_errors)
+ intel_pipe_fault_irq_handler(display, ilk_pipe_fault_handlers,
+ pipe, fault_errors);
+ }
+}
+
void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
{
struct intel_display *display = &dev_priv->display;
@@ -716,6 +912,9 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
if (de_iir & DE_POISON)
drm_err(&dev_priv->drm, "Poison interrupt\n");
+ if (de_iir & DE_GTT_FAULT)
+ ilk_gtt_fault_irq_handler(display);
+
for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe))
intel_handle_vblank(dev_priv, pipe);
@@ -724,7 +923,7 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
flip_done_handler(dev_priv, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -732,7 +931,7 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
- u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
+ u32 pch_iir = intel_de_read(display, SDEIIR);
if (HAS_PCH_CPT(dev_priv))
cpt_irq_handler(dev_priv, pch_iir);
@@ -740,7 +939,7 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
ibx_irq_handler(dev_priv, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
- intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
+ intel_de_write(display, SDEIIR, pch_iir);
}
if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
@@ -766,8 +965,7 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 psr_iir;
- psr_iir = intel_uncore_rmw(&dev_priv->uncore,
- EDP_PSR_IIR, 0, 0);
+ psr_iir = intel_de_rmw(display, EDP_PSR_IIR, 0, 0);
intel_psr_irq_handler(intel_dp, psr_iir);
break;
}
@@ -789,12 +987,12 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
/* check event from PCH */
if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
- u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
+ u32 pch_iir = intel_de_read(display, SDEIIR);
cpt_irq_handler(dev_priv, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
- intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
+ intel_de_write(display, SDEIIR, pch_iir);
}
}
@@ -856,7 +1054,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
+ else if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
return GEN12_PIPEDMC_FAULT |
GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE5_FAULT |
@@ -895,6 +1093,108 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN8_PIPE_PRIMARY_FAULT;
}
+static bool handle_plane_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ drm_err_ratelimited(display->drm,
+ "[CRTC:%d:%s] PLANE ATS fault\n",
+ crtc->base.base.id, crtc->base.name);
+
+ return true;
+}
+
+static bool handle_pipedmc_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ drm_err_ratelimited(display->drm,
+ "[CRTC:%d:%s] PIPEDMC ATS fault\n",
+ crtc->base.base.id, crtc->base.name);
+
+ return true;
+}
+
+static bool handle_pipedmc_fault(struct intel_crtc *crtc, enum plane_id plane_id)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ drm_err_ratelimited(display->drm,
+ "[CRTC:%d:%s] PIPEDMC fault\n",
+ crtc->base.base.id, crtc->base.name);
+
+ return true;
+}
+
+static const struct pipe_fault_handler mtl_pipe_fault_handlers[] = {
+ { .fault = MTL_PLANE_ATS_FAULT, .handle = handle_plane_ats_fault, },
+ { .fault = MTL_PIPEDMC_ATS_FAULT, .handle = handle_pipedmc_ats_fault, },
+ { .fault = GEN12_PIPEDMC_FAULT, .handle = handle_pipedmc_fault, },
+ { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, },
+ { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, },
+ { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, },
+ { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, },
+ { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, },
+ { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static const struct pipe_fault_handler tgl_pipe_fault_handlers[] = {
+ { .fault = GEN12_PIPEDMC_FAULT, .handle = handle_pipedmc_fault, },
+ { .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, },
+ { .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, },
+ { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, },
+ { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, },
+ { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, },
+ { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, },
+ { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, },
+ { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static const struct pipe_fault_handler icl_pipe_fault_handlers[] = {
+ { .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, },
+ { .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, },
+ { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, },
+ { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, },
+ { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, },
+ { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, },
+ { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, },
+ { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static const struct pipe_fault_handler skl_pipe_fault_handlers[] = {
+ { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, },
+ { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, },
+ { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, },
+ { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, },
+ { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static const struct pipe_fault_handler bdw_pipe_fault_handlers[] = {
+ { .fault = GEN8_PIPE_SPRITE_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = GEN8_PIPE_PRIMARY_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = GEN8_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static const struct pipe_fault_handler *
+gen8_pipe_fault_handlers(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_pipe_fault_handlers;
+ else if (DISPLAY_VER(display) >= 12)
+ return tgl_pipe_fault_handlers;
+ else if (DISPLAY_VER(display) >= 11)
+ return icl_pipe_fault_handlers;
+ else if (DISPLAY_VER(display) >= 9)
+ return skl_pipe_fault_handlers;
+ else
+ return bdw_pipe_fault_handlers;
+}
+
static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
{
wake_up_all(&dev_priv->display.pmdemand.waitqueue);
@@ -925,8 +1225,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
if (iir & XELPDP_RM_TIMEOUT) {
- u32 val = intel_uncore_read(&dev_priv->uncore,
- RM_TIMEOUT_REG_CAPTURE);
+ u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE);
drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val);
found = true;
}
@@ -949,7 +1248,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
else
iir_reg = EDP_PSR_IIR;
- psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
+ psr_iir = intel_de_rmw(display, iir_reg, 0, 0);
if (psr_iir)
found = true;
@@ -969,6 +1268,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
u32 te_trigger)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe = INVALID_PIPE;
enum transcoder dsi_trans;
enum port port;
@@ -978,8 +1278,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
* Incase of dual link, TE comes from DSI_1
* this is to check if dual link is enabled
*/
- val = intel_uncore_read(&dev_priv->uncore,
- TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0));
val &= PORT_SYNC_MODE_ENABLE;
/*
@@ -991,7 +1290,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
/* Check if DSI configured in command mode */
- val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
+ val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans));
val = val & OP_MODE_MASK;
if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
@@ -1000,8 +1299,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
}
/* Get PIPE for handling VBLANK event */
- val = intel_uncore_read(&dev_priv->uncore,
- TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
switch (val & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
pipe = PIPE_A;
@@ -1021,7 +1319,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
- intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
+ intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
}
static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
@@ -1034,10 +1332,11 @@ static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
{
+ struct intel_display *display = &i915->display;
u32 pica_ier = 0;
*pica_iir = 0;
- *pch_iir = intel_de_read(i915, SDEIIR);
+ *pch_iir = intel_de_read(display, SDEIIR);
if (!*pch_iir)
return;
@@ -1049,15 +1348,15 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
if (*pch_iir & SDE_PICAINTERRUPT) {
drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
- pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0);
- *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR);
- intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir);
+ pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0);
+ *pica_iir = intel_de_read(display, PICAINTERRUPT_IIR);
+ intel_de_write(display, PICAINTERRUPT_IIR, *pica_iir);
}
- intel_de_write(i915, SDEIIR, *pch_iir);
+ intel_de_write(display, SDEIIR, *pch_iir);
if (pica_ier)
- intel_de_write(i915, PICAINTERRUPT_IER, pica_ier);
+ intel_de_write(display, PICAINTERRUPT_IER, pica_ier);
}
void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
@@ -1069,9 +1368,9 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
if (master_ctl & GEN8_DE_MISC_IRQ) {
- iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
+ iir = intel_de_read(display, GEN8_DE_MISC_IIR);
if (iir) {
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
+ intel_de_write(display, GEN8_DE_MISC_IIR, iir);
gen8_de_misc_irq_handler(dev_priv, iir);
} else {
drm_err_ratelimited(&dev_priv->drm,
@@ -1080,9 +1379,9 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
- iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
+ iir = intel_de_read(display, GEN11_DE_HPD_IIR);
if (iir) {
- intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
+ intel_de_write(display, GEN11_DE_HPD_IIR, iir);
gen11_hpd_irq_handler(dev_priv, iir);
} else {
drm_err_ratelimited(&dev_priv->drm,
@@ -1091,11 +1390,11 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
- iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
+ iir = intel_de_read(display, GEN8_DE_PORT_IIR);
if (iir) {
bool found = false;
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
+ intel_de_write(display, GEN8_DE_PORT_IIR, iir);
if (iir & gen8_de_port_aux_mask(dev_priv)) {
intel_dp_aux_irq_handler(display);
@@ -1148,14 +1447,14 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
- iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
+ iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
drm_err_ratelimited(&dev_priv->drm,
"The master control interrupt lied (DE PIPE)!\n");
continue;
}
- intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
+ intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
intel_handle_vblank(dev_priv, pipe);
@@ -1178,14 +1477,13 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
hsw_pipe_crc_irq_handler(dev_priv, pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ intel_cpu_fifo_underrun_irq_handler(display, pipe);
fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
if (fault_errors)
- drm_err_ratelimited(&dev_priv->drm,
- "Fault errors on pipe %c: 0x%08x\n",
- pipe_name(pipe),
- fault_errors);
+ intel_pipe_fault_irq_handler(display,
+ gen8_pipe_fault_handlers(display),
+ pipe, fault_errors);
}
if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
@@ -1221,14 +1519,15 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
{
+ struct intel_display *display = &i915->display;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
return 0;
- iir = intel_de_read(i915, GEN11_GU_MISC_IIR);
+ iir = intel_de_read(display, GEN11_GU_MISC_IIR);
if (likely(iir))
- intel_de_write(i915, GEN11_GU_MISC_IIR, iir);
+ intel_de_write(display, GEN11_GU_MISC_IIR, iir);
return iir;
}
@@ -1243,6 +1542,7 @@ void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
void gen11_display_irq_handler(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
u32 disp_ctl;
disable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -1250,17 +1550,18 @@ void gen11_display_irq_handler(struct drm_i915_private *i915)
* GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
* for the display related bits.
*/
- disp_ctl = intel_de_read(i915, GEN11_DISPLAY_INT_CTL);
+ disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL);
- intel_de_write(i915, GEN11_DISPLAY_INT_CTL, 0);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
gen8_de_irq_handler(i915, disp_ctl);
- intel_de_write(i915, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
lockdep_assert_held(&i915->drm.vblank_time_lock);
/*
@@ -1270,15 +1571,18 @@ static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
* only when vblank/CRC interrupts are actually enabled.
*/
if (i915->display.irq.vblank_enabled++ == 0)
- intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ intel_de_write(display, SCPD0,
+ _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
lockdep_assert_held(&i915->drm.vblank_time_lock);
if (--i915->display.irq.vblank_enabled == 0)
- intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ intel_de_write(display, SCPD0,
+ _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable)
@@ -1398,7 +1702,7 @@ void ilk_disable_vblank(struct drm_crtc *crtc)
static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct intel_display *display = to_intel_display(intel_crtc);
enum port port;
if (!(intel_crtc->mode_flags &
@@ -1411,10 +1715,9 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
else
port = PORT_A;
- intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
- enable ? 0 : DSI_TE_EVENT);
+ intel_de_rmw(display, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, enable ? 0 : DSI_TE_EVENT);
- intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
+ intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
return true;
}
@@ -1481,21 +1784,132 @@ void bdw_disable_vblank(struct drm_crtc *_crtc)
schedule_work(&display->irq.vblank_dc_work);
}
+static u32 vlv_dpinvgtt_pipe_fault_mask(enum pipe pipe)
+{
+ switch (pipe) {
+ case PIPE_A:
+ return SPRITEB_INVALID_GTT_STATUS |
+ SPRITEA_INVALID_GTT_STATUS |
+ PLANEA_INVALID_GTT_STATUS |
+ CURSORA_INVALID_GTT_STATUS;
+ case PIPE_B:
+ return SPRITED_INVALID_GTT_STATUS |
+ SPRITEC_INVALID_GTT_STATUS |
+ PLANEB_INVALID_GTT_STATUS |
+ CURSORB_INVALID_GTT_STATUS;
+ case PIPE_C:
+ return SPRITEF_INVALID_GTT_STATUS |
+ SPRITEE_INVALID_GTT_STATUS |
+ PLANEC_INVALID_GTT_STATUS |
+ CURSORC_INVALID_GTT_STATUS;
+ default:
+ return 0;
+ }
+}
+
+static const struct pipe_fault_handler vlv_pipe_fault_handlers[] = {
+ { .fault = SPRITEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
+ { .fault = SPRITEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = PLANEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = CURSORA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ { .fault = SPRITED_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
+ { .fault = SPRITEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = PLANEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = CURSORB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ { .fault = SPRITEF_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, },
+ { .fault = SPRITEE_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, },
+ { .fault = PLANEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, },
+ { .fault = CURSORC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, },
+ {}
+};
+
+static void vlv_page_table_error_irq_ack(struct intel_display *display, u32 *dpinvgtt)
+{
+ u32 status, enable, tmp;
+
+ tmp = intel_de_read(display, DPINVGTT);
+
+ enable = tmp >> 16;
+ status = tmp & 0xffff;
+
+ /*
+ * Despite what the docs claim, the status bits seem to get
+ * stuck permanently (similar the old PGTBL_ER register), so
+ * we have to disable and ignore them once set. They do get
+ * reset if the display power well goes down, so no need to
+ * track the enable mask explicitly.
+ */
+ *dpinvgtt = status & enable;
+ enable &= ~status;
+
+ /* customary ack+disable then re-enable to guarantee an edge */
+ intel_de_write(display, DPINVGTT, status);
+ intel_de_write(display, DPINVGTT, enable << 16);
+}
+
+static void vlv_page_table_error_irq_handler(struct intel_display *display, u32 dpinvgtt)
+{
+ enum pipe pipe;
+
+ for_each_pipe(display, pipe) {
+ u32 fault_errors;
+
+ fault_errors = dpinvgtt & vlv_dpinvgtt_pipe_fault_mask(pipe);
+ if (fault_errors)
+ intel_pipe_fault_irq_handler(display, vlv_pipe_fault_handlers,
+ pipe, fault_errors);
+ }
+}
+
+void vlv_display_error_irq_ack(struct intel_display *display,
+ u32 *eir, u32 *dpinvgtt)
+{
+ u32 emr;
+
+ *eir = intel_de_read(display, VLV_EIR);
+
+ if (*eir & VLV_ERROR_PAGE_TABLE)
+ vlv_page_table_error_irq_ack(display, dpinvgtt);
+
+ intel_de_write(display, VLV_EIR, *eir);
+
+ /*
+ * Toggle all EMR bits to make sure we get an edge
+ * in the ISR master error bit if we don't clear
+ * all the EIR bits.
+ */
+ emr = intel_de_read(display, VLV_EMR);
+ intel_de_write(display, VLV_EMR, 0xffffffff);
+ intel_de_write(display, VLV_EMR, emr);
+}
+
+void vlv_display_error_irq_handler(struct intel_display *display,
+ u32 eir, u32 dpinvgtt)
+{
+ drm_dbg(display->drm, "Master Error, EIR 0x%08x\n", eir);
+
+ if (eir & VLV_ERROR_PAGE_TABLE)
+ vlv_page_table_error_irq_handler(display, dpinvgtt);
+}
+
static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
if (IS_CHERRYVIEW(dev_priv))
- intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+ intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
- intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
+ intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
+
+ gen2_error_reset(to_intel_uncore(display->drm),
+ VLV_ERROR_REGS);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
i9xx_pipestat_irq_reset(dev_priv);
- gen2_irq_reset(uncore, VLV_IRQ_REGS);
+ intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
dev_priv->irq_mask = ~0u;
}
@@ -1507,19 +1921,25 @@ void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
void i9xx_display_irq_reset(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
if (I915_HAS_HOTPLUG(i915)) {
i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
- intel_uncore_rmw(&i915->uncore,
- PORT_HOTPLUG_STAT(i915), 0, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(i915), 0, 0);
}
i9xx_pipestat_irq_reset(i915);
}
-void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+static u32 vlv_error_mask(void)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ /* TODO enable other errors too? */
+ return VLV_ERROR_PAGE_TABLE;
+}
+void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ struct intel_display *display = &dev_priv->display;
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
@@ -1527,6 +1947,18 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
if (!dev_priv->display.irq.vlv_display_irqs_enabled)
return;
+ if (IS_CHERRYVIEW(dev_priv))
+ intel_de_write(display, DPINVGTT,
+ DPINVGTT_STATUS_MASK_CHV |
+ DPINVGTT_EN_MASK_CHV);
+ else
+ intel_de_write(display, DPINVGTT,
+ DPINVGTT_STATUS_MASK_VLV |
+ DPINVGTT_EN_MASK_VLV);
+
+ gen2_error_init(to_intel_uncore(display->drm),
+ VLV_ERROR_REGS, ~vlv_error_mask());
+
pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
@@ -1537,7 +1969,8 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_LPE_PIPE_A_INTERRUPT |
- I915_LPE_PIPE_B_INTERRUPT;
+ I915_LPE_PIPE_B_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT;
if (IS_CHERRYVIEW(dev_priv))
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
@@ -1547,32 +1980,32 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~enable_mask;
- gen2_irq_init(uncore, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
+ intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
}
void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
if (!HAS_DISPLAY(dev_priv))
return;
- intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
- intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
for_each_pipe(dev_priv, pipe)
- if (intel_display_power_is_enabled(dev_priv,
+ if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
- gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
+ intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
- gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS);
- gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
}
void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
@@ -1580,7 +2013,7 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
if (DISPLAY_VER(dev_priv) >= 12) {
enum transcoder trans;
@@ -1589,42 +2022,42 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
- if (!intel_display_power_is_enabled(dev_priv, domain))
+ if (!intel_display_power_is_enabled(display, domain))
continue;
- intel_uncore_write(uncore,
- TRANS_PSR_IMR(dev_priv, trans),
- 0xffffffff);
- intel_uncore_write(uncore,
- TRANS_PSR_IIR(dev_priv, trans),
- 0xffffffff);
+ intel_de_write(display,
+ TRANS_PSR_IMR(dev_priv, trans),
+ 0xffffffff);
+ intel_de_write(display,
+ TRANS_PSR_IIR(dev_priv, trans),
+ 0xffffffff);
}
} else {
- intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
- intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
}
for_each_pipe(dev_priv, pipe)
- if (intel_display_power_is_enabled(dev_priv,
+ if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
- gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
+ intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
- gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS);
- gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
if (DISPLAY_VER(dev_priv) >= 14)
- gen2_irq_reset(uncore, PICAINTERRUPT_IRQ_REGS);
+ intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS);
else
- gen2_irq_reset(uncore, GEN11_DE_HPD_IRQ_REGS);
+ intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS);
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- gen2_irq_reset(uncore, SDE_IRQ_REGS);
+ intel_display_irq_regs_reset(display, SDE_IRQ_REGS);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
gen8_de_pipe_flip_done_mask(dev_priv);
enum pipe pipe;
@@ -1637,9 +2070,9 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
- ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
+ intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
+ dev_priv->display.irq.de_irq_mask[pipe],
+ ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -1647,7 +2080,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
@@ -1658,7 +2091,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
+ intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1679,7 +2112,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
*/
static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
u32 mask;
if (HAS_PCH_NOP(dev_priv))
@@ -1692,7 +2125,7 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
else
mask = SDE_GMBUS_CPT;
- gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff);
+ intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
@@ -1725,7 +2158,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
void ilk_de_irq_postinstall(struct drm_i915_private *i915)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_display *display = &i915->display;
u32 display_mask, extra_mask;
if (DISPLAY_VER(i915) >= 7) {
@@ -1738,7 +2171,8 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
DE_DP_A_HOTPLUG_IVB);
} else {
- display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE |
+ DE_PCH_EVENT | DE_GTT_FAULT |
DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
DE_PIPEA_CRC_DONE | DE_POISON);
extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
@@ -1749,7 +2183,7 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
}
if (IS_HASWELL(i915)) {
- gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
display_mask |= DE_EDP_PSR_INT_HSW;
}
@@ -1760,8 +2194,8 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
ibx_irq_postinstall(i915);
- gen2_irq_init(uncore, DE_IRQ_REGS, i915->irq_mask,
- display_mask | extra_mask);
+ intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask,
+ display_mask | extra_mask);
}
static void mtp_irq_postinstall(struct drm_i915_private *i915);
@@ -1770,7 +2204,6 @@ static void icp_irq_postinstall(struct drm_i915_private *i915);
void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_display *display = &dev_priv->display;
- struct intel_uncore *uncore = &dev_priv->uncore;
u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
GEN8_PIPE_CDCLK_CRC_DONE;
@@ -1833,80 +2266,84 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
- if (!intel_display_power_is_enabled(dev_priv, domain))
+ if (!intel_display_power_is_enabled(display, domain))
continue;
- gen2_assert_iir_is_zero(uncore,
- TRANS_PSR_IIR(dev_priv, trans));
+ intel_display_irq_regs_assert_irr_is_zero(display,
+ TRANS_PSR_IIR(dev_priv, trans));
}
} else {
- gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
}
for_each_pipe(dev_priv, pipe) {
dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
- if (intel_display_power_is_enabled(dev_priv,
+ if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
- gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
- de_pipe_enables);
+ intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
+ dev_priv->display.irq.de_irq_mask[pipe],
+ de_pipe_enables);
}
- gen2_irq_init(uncore, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked, de_port_enables);
- gen2_irq_init(uncore, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, de_misc_masked);
+ intel_display_irq_regs_init(display, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked,
+ de_port_enables);
+ intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked,
+ de_misc_masked);
if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
- gen2_irq_init(uncore, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked,
- de_hpd_enables);
+ intel_display_irq_regs_init(display, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked,
+ de_hpd_enables);
}
}
static void mtp_irq_postinstall(struct drm_i915_private *i915)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_display *display = &i915->display;
u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
XELPDP_TBT_HOTPLUG_MASK;
- gen2_irq_init(uncore, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask,
- de_hpd_enables);
+ intel_display_irq_regs_init(display, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask,
+ de_hpd_enables);
- gen2_irq_init(uncore, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
+ intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
}
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_display *display = &dev_priv->display;
u32 mask = SDE_GMBUS_ICP;
- gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff);
+ intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
if (!HAS_DISPLAY(dev_priv))
return;
gen8_de_irq_postinstall(dev_priv);
- intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
- GEN11_DISPLAY_IRQ_ENABLE);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
void dg1_de_irq_postinstall(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
if (!HAS_DISPLAY(i915))
return;
gen8_de_irq_postinstall(i915);
- intel_uncore_write(&i915->uncore, GEN11_DISPLAY_INT_CTL,
- GEN11_DISPLAY_IRQ_ENABLE);
+ intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
void intel_display_irq_init(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h
index b077712b7be1..d9867cd0a220 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.h
@@ -11,8 +11,9 @@
#include "intel_display_limits.h"
enum pipe;
-struct drm_i915_private;
struct drm_crtc;
+struct drm_i915_private;
+struct intel_display;
void valleyview_enable_display_irqs(struct drm_i915_private *i915);
void valleyview_disable_display_irqs(struct drm_i915_private *i915);
@@ -64,7 +65,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *i915);
void gen11_de_irq_postinstall(struct drm_i915_private *i915);
void dg1_de_irq_postinstall(struct drm_i915_private *i915);
-u32 i915_pipestat_enable_mask(struct drm_i915_private *i915, enum pipe pipe);
+u32 i915_pipestat_enable_mask(struct intel_display *display, enum pipe pipe);
void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
void i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
void i915_enable_asle_pipestat(struct drm_i915_private *i915);
@@ -75,6 +76,9 @@ void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_
void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]);
+void vlv_display_error_irq_ack(struct intel_display *display, u32 *eir, u32 *dpinvgtt);
+void vlv_display_error_irq_handler(struct intel_display *display, u32 eir, u32 dpinvgtt);
+
void intel_display_irq_init(struct drm_i915_private *i915);
void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable);
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index f92e4640a613..c4f1ab43fc0c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -130,7 +130,7 @@ intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
intel_display_param_named_unsafe(enable_dmc_wl, int, 0400,
"Enable DMC wakelock "
- "(-1=use per-chip default, 0=disabled, 1=enabled) "
+ "(-1=use per-chip default, 0=disabled, 1=enabled, 2=match any register, 3=always locked) "
"Default: -1");
__maybe_unused
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index d3b8453a1705..f7171e6932dc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -224,7 +224,7 @@ static bool __intel_display_power_is_enabled(struct intel_display *display,
/**
* intel_display_power_is_enabled - check for a power domain
- * @dev_priv: i915 device instance
+ * @display: display device instance
* @domain: power domain to check
*
* This function can be used to check the hw power domain state. It is mostly
@@ -239,10 +239,9 @@ static bool __intel_display_power_is_enabled(struct intel_display *display,
* Returns:
* True when the power domain is enabled, false otherwise.
*/
-bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_is_enabled(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
bool ret;
@@ -500,7 +499,7 @@ __intel_display_power_get_domain(struct intel_display *display,
/**
* intel_display_power_get - grab a power domain reference
- * @dev_priv: i915 device instance
+ * @display: display device instance
* @domain: power domain to reference
*
* This function grabs a power domain reference for @domain and ensures that the
@@ -510,10 +509,10 @@ __intel_display_power_get_domain(struct intel_display *display,
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
-intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+intel_wakeref_t intel_display_power_get(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
@@ -526,7 +525,7 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
/**
* intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
- * @dev_priv: i915 device instance
+ * @display: display device instance
* @domain: power domain to reference
*
* This function grabs a power domain reference for @domain and ensures that the
@@ -537,10 +536,10 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
* call to intel_display_power_put() to release the reference again.
*/
intel_wakeref_t
-intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+intel_display_power_get_if_enabled(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref;
bool is_enabled;
@@ -696,7 +695,7 @@ out_verify:
/**
* __intel_display_power_put_async - release a power domain reference asynchronously
- * @i915: i915 device instance
+ * @display: display device instance
* @domain: power domain to reference
* @wakeref: wakeref acquired for the reference that is being released
* @delay_ms: delay of powering down the power domain
@@ -707,12 +706,12 @@ out_verify:
* The power down is delayed by @delay_ms if this is >= 0, or by a default
* 100 ms otherwise.
*/
-void __intel_display_power_put_async(struct drm_i915_private *i915,
+void __intel_display_power_put_async(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref,
int delay_ms)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
@@ -754,7 +753,7 @@ out_verify:
/**
* intel_display_power_flush_work - flushes the async display power disabling work
- * @i915: i915 device instance
+ * @display: display device instance
*
* Flushes any pending work that was scheduled by a preceding
* intel_display_power_put_async() call, completing the disabling of the
@@ -764,9 +763,9 @@ out_verify:
* function returns; to ensure that the work handler isn't running use
* intel_display_power_flush_work_sync() instead.
*/
-void intel_display_power_flush_work(struct drm_i915_private *i915)
+void intel_display_power_flush_work(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
@@ -800,10 +799,9 @@ out_verify:
static void
intel_display_power_flush_work_sync(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- intel_display_power_flush_work(i915);
+ intel_display_power_flush_work(display);
cancel_async_put_work(power_domains, true);
verify_async_put_domains_state(power_domains);
@@ -814,7 +812,7 @@ intel_display_power_flush_work_sync(struct intel_display *display)
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
/**
* intel_display_power_put - release a power domain reference
- * @dev_priv: i915 device instance
+ * @display: display device instance
* @domain: power domain to reference
* @wakeref: wakeref acquired for the reference that is being released
*
@@ -822,11 +820,11 @@ intel_display_power_flush_work_sync(struct intel_display *display)
* intel_display_power_get() and might power down the corresponding hardware
* block right away if this is the last reference.
*/
-void intel_display_power_put(struct drm_i915_private *dev_priv,
+void intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
__intel_display_power_put(display, domain);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -834,7 +832,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
#else
/**
* intel_display_power_put_unchecked - release an unchecked power domain reference
- * @dev_priv: i915 device instance
+ * @display: display device instance
* @domain: power domain to reference
*
* This function drops the power domain reference obtained by
@@ -842,13 +840,13 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
* block right away if this is the last reference.
*
* This function is only for the power domain code's internal use to suppress wakeref
- * tracking when the correspondig debug kconfig option is disabled, should not
+ * tracking when the corresponding debug kconfig option is disabled, should not
* be used otherwise.
*/
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+void intel_display_power_put_unchecked(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
__intel_display_power_put(display, domain);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
@@ -856,16 +854,15 @@ void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
#endif
void
-intel_display_power_get_in_set(struct drm_i915_private *i915,
+intel_display_power_get_in_set(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
- struct intel_display *display = &i915->display;
intel_wakeref_t __maybe_unused wf;
drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
- wf = intel_display_power_get(i915, domain);
+ wf = intel_display_power_get(display, domain);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
power_domain_set->wakerefs[domain] = wf;
#endif
@@ -873,16 +870,15 @@ intel_display_power_get_in_set(struct drm_i915_private *i915,
}
bool
-intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+intel_display_power_get_in_set_if_enabled(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
- struct intel_display *display = &i915->display;
intel_wakeref_t wf;
drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
- wf = intel_display_power_get_if_enabled(i915, domain);
+ wf = intel_display_power_get_if_enabled(display, domain);
if (!wf)
return false;
@@ -895,11 +891,10 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
}
void
-intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+intel_display_power_put_mask_in_set(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
struct intel_power_domain_mask *mask)
{
- struct intel_display *display = &i915->display;
enum intel_display_power_domain domain;
drm_WARN_ON(display->drm,
@@ -911,7 +906,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
#endif
- intel_display_power_put(i915, domain, wf);
+ intel_display_power_put(display, domain, wf);
clear_bit(domain, power_domain_set->mask.bits);
}
}
@@ -999,7 +994,7 @@ static u32 get_allowed_dc_mask(struct intel_display *display, int enable_dc)
* intel_power_domains_init - initializes the power domain structures
* @display: display device instance
*
- * Initializes the power domain structures for @dev_priv depending upon the
+ * Initializes the power domain structures for @display depending upon the
* supported platform.
*/
int intel_power_domains_init(struct intel_display *display)
@@ -1061,10 +1056,9 @@ static void gen9_dbuf_slice_set(struct intel_display *display,
slice, str_enable_disable(enable));
}
-void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
+void gen9_dbuf_slices_update(struct intel_display *display,
u8 req_slices)
{
- struct intel_display *display = &dev_priv->display;
struct i915_power_domains *power_domains = &display->power.domains;
u8 slice_mask = DISPLAY_INFO(display)->dbuf.slice_mask;
enum dbuf_slice slice;
@@ -1095,10 +1089,9 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
static void gen9_dbuf_enable(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u8 slices_mask;
- display->dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+ display->dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(display);
slices_mask = BIT(DBUF_S1) | display->dbuf.enabled_slices;
@@ -1109,14 +1102,12 @@ static void gen9_dbuf_enable(struct intel_display *display)
* Just power up at least 1 slice, we will
* figure out later which slices we have and what we need.
*/
- gen9_dbuf_slices_update(dev_priv, slices_mask);
+ gen9_dbuf_slices_update(display, slices_mask);
}
static void gen9_dbuf_disable(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- gen9_dbuf_slices_update(dev_priv, 0);
+ gen9_dbuf_slices_update(display, 0);
if (DISPLAY_VER(display) >= 14)
intel_pmdemand_program_dbuf(display, 0);
@@ -1126,9 +1117,6 @@ static void gen12_dbuf_slices_config(struct intel_display *display)
{
enum dbuf_slice slice;
- if (display->platform.alderlake_p)
- return;
-
for_each_dbuf_slice(display, slice)
intel_de_rmw(display, DBUF_CTL_S(slice),
DBUF_TRACKER_STATE_SERVICE_MASK,
@@ -1663,7 +1651,7 @@ static void icl_display_core_init(struct intel_display *display,
return;
/* 2. Initialize all combo phys */
- intel_combo_phy_init(dev_priv);
+ intel_combo_phy_init(display);
/*
* 3. Enable Power Well 1 (PG1).
@@ -1681,7 +1669,7 @@ static void icl_display_core_init(struct intel_display *display,
/* 4. Enable CDCLK. */
intel_cdclk_init_hw(display);
- if (DISPLAY_VER(display) >= 12)
+ if (DISPLAY_VER(display) == 12 || display->platform.dg2)
gen12_dbuf_slices_config(display);
/* 5. Enable DBUF. */
@@ -1696,7 +1684,7 @@ static void icl_display_core_init(struct intel_display *display,
/* 8. Ensure PHYs have completed calibration and adaptation */
if (display->platform.dg2)
- intel_snps_phy_wait_for_calibration(dev_priv);
+ intel_snps_phy_wait_for_calibration(display);
/* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */
if (DISPLAY_VERx100(display) == 1401)
@@ -1726,7 +1714,6 @@ static void icl_display_core_init(struct intel_display *display,
static void icl_display_core_uninit(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
@@ -1736,7 +1723,7 @@ static void icl_display_core_uninit(struct intel_display *display)
gen9_disable_dc_states(display);
intel_dmc_disable_program(display);
- /* 1. Disable all display engine functions -> aready done */
+ /* 1. Disable all display engine functions -> already done */
/* 2. Disable DBUF */
gen9_dbuf_disable(display);
@@ -1759,7 +1746,7 @@ static void icl_display_core_uninit(struct intel_display *display)
mutex_unlock(&power_domains->lock);
/* 5. */
- intel_combo_phy_uninit(dev_priv);
+ intel_combo_phy_uninit(display);
}
static void chv_phy_control_init(struct intel_display *display)
@@ -1966,12 +1953,12 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
*/
drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ intel_display_power_get(display, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
if (!display->params.disable_power_well) {
drm_WARN_ON(display->drm, power_domains->disable_wakeref);
- display->power.domains.disable_wakeref = intel_display_power_get(i915,
+ display->power.domains.disable_wakeref = intel_display_power_get(display,
POWER_DOMAIN_INIT);
}
intel_power_domains_sync_hw(display);
@@ -1998,7 +1985,7 @@ void intel_power_domains_driver_remove(struct intel_display *display)
/* Remove the refcount we took to keep power well support disabled. */
if (!display->params.disable_power_well)
- intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ intel_display_power_put(display, POWER_DOMAIN_INIT,
fetch_and_zero(&display->power.domains.disable_wakeref));
intel_display_power_flush_work_sync(display);
@@ -2054,11 +2041,10 @@ void intel_power_domains_sanitize_state(struct intel_display *display)
*/
void intel_power_domains_enable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&display->power.domains.init_wakeref);
- intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
intel_power_domains_verify_state(display);
}
@@ -2071,12 +2057,11 @@ void intel_power_domains_enable(struct intel_display *display)
*/
void intel_power_domains_disable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ intel_display_power_get(display, POWER_DOMAIN_INIT);
intel_power_domains_verify_state(display);
}
@@ -2094,12 +2079,11 @@ void intel_power_domains_disable(struct intel_display *display)
*/
void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&power_domains->init_wakeref);
- intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
/*
* In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
@@ -2110,7 +2094,7 @@ void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
*/
if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle &&
intel_dmc_has_payload(display)) {
- intel_display_power_flush_work(i915);
+ intel_display_power_flush_work(display);
intel_power_domains_verify_state(display);
return;
}
@@ -2120,10 +2104,10 @@ void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
* power wells if power domains must be deinitialized for suspend.
*/
if (!display->params.disable_power_well)
- intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ intel_display_power_put(display, POWER_DOMAIN_INIT,
fetch_and_zero(&display->power.domains.disable_wakeref));
- intel_display_power_flush_work(i915);
+ intel_display_power_flush_work(display);
intel_power_domains_verify_state(display);
if (DISPLAY_VER(display) >= 11)
@@ -2148,7 +2132,6 @@ void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
*/
void intel_power_domains_resume(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
if (power_domains->display_core_suspended) {
@@ -2157,7 +2140,7 @@ void intel_power_domains_resume(struct intel_display *display)
} else {
drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ intel_display_power_get(display, POWER_DOMAIN_INIT);
}
intel_power_domains_verify_state(display);
@@ -2327,14 +2310,16 @@ void intel_display_power_resume(struct intel_display *display)
}
}
-void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
+void intel_display_power_debug(struct intel_display *display, struct seq_file *m)
{
- struct intel_display *display = &i915->display;
struct i915_power_domains *power_domains = &display->power.domains;
int i;
mutex_lock(&power_domains->lock);
+ seq_printf(m, "Runtime power status: %s\n",
+ str_enabled_disabled(!power_domains->init_wakeref));
+
seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
for (i = 0; i < power_domains->power_well_count; i++) {
struct i915_power_well *power_well;
@@ -2510,9 +2495,8 @@ intel_port_domains_for_port(struct intel_display *display, enum port port)
}
enum intel_display_power_domain
-intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
+intel_display_power_ddi_io_domain(struct intel_display *display, enum port port)
{
- struct intel_display *display = &i915->display;
const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
if (drm_WARN_ON(display->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
@@ -2522,9 +2506,8 @@ intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
}
enum intel_display_power_domain
-intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
+intel_display_power_ddi_lanes_domain(struct intel_display *display, enum port port)
{
- struct intel_display *display = &i915->display;
const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
if (drm_WARN_ON(display->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
@@ -2549,9 +2532,8 @@ intel_port_domains_for_aux_ch(struct intel_display *display, enum aux_ch aux_ch)
}
enum intel_display_power_domain
-intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
+intel_display_power_aux_io_domain(struct intel_display *display, enum aux_ch aux_ch)
{
- struct intel_display *display = &i915->display;
const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
if (drm_WARN_ON(display->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
@@ -2561,9 +2543,8 @@ intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux
}
enum intel_display_power_domain
-intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
+intel_display_power_legacy_aux_domain(struct intel_display *display, enum aux_ch aux_ch)
{
- struct intel_display *display = &i915->display;
const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
if (drm_WARN_ON(display->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
@@ -2573,9 +2554,8 @@ intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch
}
enum intel_display_power_domain
-intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
+intel_display_power_tbt_aux_domain(struct intel_display *display, enum aux_ch aux_ch)
{
- struct intel_display *display = &i915->display;
const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
if (drm_WARN_ON(display->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 7b294eec4431..1b53d67f9b60 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -13,7 +13,6 @@
enum aux_ch;
enum port;
-struct drm_i915_private;
struct i915_power_well;
struct intel_display;
struct intel_encoder;
@@ -118,12 +117,13 @@ enum intel_display_power_domain {
POWER_DOMAIN_INVALID = POWER_DOMAIN_NUM,
};
-#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE(pipe) \
+ ((enum intel_display_power_domain)((pipe) - PIPE_A + POWER_DOMAIN_PIPE_A))
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
- ((pipe) + POWER_DOMAIN_PIPE_PANEL_FITTER_A)
+ ((enum intel_display_power_domain)((pipe) - PIPE_A + POWER_DOMAIN_PIPE_PANEL_FITTER_A))
#define POWER_DOMAIN_TRANSCODER(tran) \
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
- (tran) + POWER_DOMAIN_TRANSCODER_A)
+ (enum intel_display_power_domain)((tran) - TRANSCODER_A + POWER_DOMAIN_TRANSCODER_A))
struct intel_power_domain_mask {
DECLARE_BITMAP(bits, POWER_DOMAIN_NUM);
@@ -184,102 +184,102 @@ void intel_display_power_resume(struct intel_display *display);
void intel_display_power_set_target_dc_state(struct intel_display *display,
u32 state);
-bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_is_enabled(struct intel_display *display,
enum intel_display_power_domain domain);
-intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+intel_wakeref_t intel_display_power_get(struct intel_display *display,
enum intel_display_power_domain domain);
intel_wakeref_t
-intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+intel_display_power_get_if_enabled(struct intel_display *display,
enum intel_display_power_domain domain);
-void __intel_display_power_put_async(struct drm_i915_private *i915,
+void __intel_display_power_put_async(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref,
int delay_ms);
-void intel_display_power_flush_work(struct drm_i915_private *i915);
+void intel_display_power_flush_work(struct intel_display *display);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void intel_display_power_put(struct drm_i915_private *dev_priv,
+void intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref);
static inline void
-intel_display_power_put_async(struct drm_i915_private *i915,
+intel_display_power_put_async(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- __intel_display_power_put_async(i915, domain, wakeref, -1);
+ __intel_display_power_put_async(display, domain, wakeref, -1);
}
static inline void
-intel_display_power_put_async_delay(struct drm_i915_private *i915,
+intel_display_power_put_async_delay(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref,
int delay_ms)
{
- __intel_display_power_put_async(i915, domain, wakeref, delay_ms);
+ __intel_display_power_put_async(display, domain, wakeref, delay_ms);
}
#else
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+void intel_display_power_put_unchecked(struct intel_display *display,
enum intel_display_power_domain domain);
static inline void
-intel_display_power_put(struct drm_i915_private *i915,
+intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- intel_display_power_put_unchecked(i915, domain);
+ intel_display_power_put_unchecked(display, domain);
}
static inline void
-intel_display_power_put_async(struct drm_i915_private *i915,
+intel_display_power_put_async(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- __intel_display_power_put_async(i915, domain, INTEL_WAKEREF_DEF, -1);
+ __intel_display_power_put_async(display, domain, INTEL_WAKEREF_DEF, -1);
}
static inline void
-intel_display_power_put_async_delay(struct drm_i915_private *i915,
+intel_display_power_put_async_delay(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref,
int delay_ms)
{
- __intel_display_power_put_async(i915, domain, INTEL_WAKEREF_DEF, delay_ms);
+ __intel_display_power_put_async(display, domain, INTEL_WAKEREF_DEF, delay_ms);
}
#endif
void
-intel_display_power_get_in_set(struct drm_i915_private *i915,
+intel_display_power_get_in_set(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain);
bool
-intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+intel_display_power_get_in_set_if_enabled(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain);
void
-intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+intel_display_power_put_mask_in_set(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
struct intel_power_domain_mask *mask);
static inline void
-intel_display_power_put_all_in_set(struct drm_i915_private *i915,
+intel_display_power_put_all_in_set(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set)
{
- intel_display_power_put_mask_in_set(i915, power_domain_set, &power_domain_set->mask);
+ intel_display_power_put_mask_in_set(display, power_domain_set, &power_domain_set->mask);
}
-void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m);
+void intel_display_power_debug(struct intel_display *display, struct seq_file *m);
enum intel_display_power_domain
-intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port);
+intel_display_power_ddi_lanes_domain(struct intel_display *display, enum port port);
enum intel_display_power_domain
-intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port);
+intel_display_power_ddi_io_domain(struct intel_display *display, enum port port);
enum intel_display_power_domain
-intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
+intel_display_power_aux_io_domain(struct intel_display *display, enum aux_ch aux_ch);
enum intel_display_power_domain
-intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
+intel_display_power_legacy_aux_domain(struct intel_display *display, enum aux_ch aux_ch);
enum intel_display_power_domain
-intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
+intel_display_power_tbt_aux_domain(struct intel_display *display, enum aux_ch aux_ch);
/*
* FIXME: We should probably switch this to a 0-based scheme to be consistent
@@ -293,15 +293,15 @@ enum dbuf_slice {
I915_MAX_DBUF_SLICES
};
-void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
+void gen9_dbuf_slices_update(struct intel_display *display,
u8 req_slices);
-#define with_intel_display_power(i915, domain, wf) \
- for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
- intel_display_power_put_async((i915), (domain), (wf)), (wf) = NULL)
+#define with_intel_display_power(display, domain, wf) \
+ for ((wf) = intel_display_power_get((display), (domain)); (wf); \
+ intel_display_power_put_async((display), (domain), (wf)), (wf) = NULL)
-#define with_intel_display_power_if_enabled(i915, domain, wf) \
- for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \
- intel_display_power_put_async((i915), (domain), (wf)), (wf) = NULL)
+#define with_intel_display_power_if_enabled(display, domain, wf) \
+ for ((wf) = intel_display_power_get_if_enabled((display), (domain)); (wf); \
+ intel_display_power_put_async((display), (domain), (wf)), (wf) = NULL)
#endif /* __INTEL_DISPLAY_POWER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 0c8ac1af6db7..e80e1fd611ca 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_display_core.h"
#include "intel_display_power_map.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index f45a4f9ba23c..8ec87ffd87d2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -507,7 +507,6 @@ static void
icl_tc_phy_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
@@ -539,7 +538,7 @@ icl_tc_phy_aux_power_well_enable(struct intel_display *display,
tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
- if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port)) &
+ if (wait_for(intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)) &
DKL_CMN_UC_DW27_UC_HEALTH, 1))
drm_warn(display->drm,
"Timeout waiting TC uC health\n");
@@ -550,10 +549,9 @@ static void
icl_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum phy phy = icl_aux_pw_to_phy(display, power_well);
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(display, phy))
return icl_tc_phy_aux_power_well_enable(display, power_well);
else if (display->platform.icelake)
return icl_combo_phy_aux_power_well_enable(display,
@@ -566,10 +564,9 @@ static void
icl_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum phy phy = icl_aux_pw_to_phy(display, power_well);
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(display, phy))
return hsw_power_well_disable(display, power_well);
else if (display->platform.icelake)
return icl_combo_phy_aux_power_well_disable(display,
@@ -962,8 +959,7 @@ static bool gen9_dc_off_power_well_enabled(struct intel_display *display,
static void gen9_assert_dbuf_enabled(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+ u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(display);
u8 enabled_dbuf_slices = display->dbuf.enabled_slices;
drm_WARN(display->drm,
@@ -975,7 +971,6 @@ static void gen9_assert_dbuf_enabled(struct intel_display *display)
void gen9_disable_dc_states(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_cdclk_config cdclk_config = {};
u32 old_state = power_domains->dc_state;
@@ -1015,7 +1010,7 @@ void gen9_disable_dc_states(struct intel_display *display)
* PHY's HW context for port B is lost after DC transitions,
* so we need to restore it manually.
*/
- intel_combo_phy_init(dev_priv);
+ intel_combo_phy_init(display);
}
static void gen9_dc_off_power_well_enable(struct intel_display *display,
@@ -1314,11 +1309,10 @@ static void vlv_dpio_cmn_power_well_enable(struct intel_display *display,
static void vlv_dpio_cmn_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
for_each_pipe(display, pipe)
- assert_pll_disabled(dev_priv, pipe);
+ assert_pll_disabled(display, pipe);
/* Assert common reset */
intel_de_rmw(display, DPIO_CTL, DPIO_CMNRST, 0);
@@ -1500,7 +1494,6 @@ static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
@@ -1510,11 +1503,11 @@ static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
phy = DPIO_PHY0;
- assert_pll_disabled(dev_priv, PIPE_A);
- assert_pll_disabled(dev_priv, PIPE_B);
+ assert_pll_disabled(display, PIPE_A);
+ assert_pll_disabled(display, PIPE_B);
} else {
phy = DPIO_PHY1;
- assert_pll_disabled(dev_priv, PIPE_C);
+ assert_pll_disabled(display, PIPE_C);
}
display->power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
@@ -1834,11 +1827,10 @@ tgl_tc_cold_off_power_well_is_enabled(struct intel_display *display,
static void xelpdp_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
enum phy phy = icl_aux_pw_to_phy(display, power_well);
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(display, phy))
icl_tc_port_assert_ref_held(display, power_well,
aux_ch_to_digital_port(display, aux_ch));
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index 338379dae44c..ec8e508d0593 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -60,7 +60,7 @@ struct i915_power_well_instance {
/* unique identifier for this power well */
enum i915_power_well_id id;
/*
- * Arbitraty data associated with this power well. Platform and power
+ * Arbitrary data associated with this power well. Platform and power
* well specific.
*/
union {
@@ -77,7 +77,7 @@ struct i915_power_well_instance {
struct {
/*
* request/status flag index in the power well
- * constrol/status registers.
+ * control/status registers.
*/
u8 idx;
} hsw;
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 093b386c95e8..1f2798404f2c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -7,51 +7,43 @@
#include "i915_drv.h"
#include "intel_clock_gating.h"
+#include "intel_cx0_phy.h"
#include "intel_display_driver.h"
#include "intel_display_reset.h"
#include "intel_display_types.h"
#include "intel_hotplug.h"
#include "intel_pps.h"
-static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
+bool intel_display_reset_test(struct intel_display *display)
{
- return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
- intel_has_gpu_reset(to_gt(dev_priv)));
+ return display->params.force_reset_modeset_test;
}
-void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
+/* returns true if intel_display_reset_finish() needs to be called */
+bool intel_display_reset_prepare(struct intel_display *display,
+ modeset_stuck_fn modeset_stuck, void *context)
{
- struct drm_modeset_acquire_ctx *ctx = &dev_priv->display.restore.reset_ctx;
+ struct drm_modeset_acquire_ctx *ctx = &display->restore.reset_ctx;
struct drm_atomic_state *state;
int ret;
- if (!HAS_DISPLAY(dev_priv))
- return;
+ if (!HAS_DISPLAY(display))
+ return false;
- /* reset doesn't touch the display */
- if (!dev_priv->display.params.force_reset_modeset_test &&
- !gpu_reset_clobbers_display(dev_priv))
- return;
-
- /* We have a modeset vs reset deadlock, defensively unbreak it. */
- set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
- smp_mb__after_atomic();
- wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
-
- if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (atomic_read(&display->restore.pending_fb_pin)) {
+ drm_dbg_kms(display->drm,
"Modeset potentially stuck, unbreaking through wedging\n");
- intel_gt_set_wedged(to_gt(dev_priv));
+ modeset_stuck(context);
}
/*
* Need mode_config.mutex so that we don't
* trample ongoing ->detect() and whatnot.
*/
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
drm_modeset_acquire_init(ctx, 0);
while (1) {
- ret = drm_modeset_lock_all_ctx(&dev_priv->drm, ctx);
+ ret = drm_modeset_lock_all_ctx(display->drm, ctx);
if (ret != -EDEADLK)
break;
@@ -61,38 +53,36 @@ void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
- state = drm_atomic_helper_duplicate_state(&dev_priv->drm, ctx);
+ state = drm_atomic_helper_duplicate_state(display->drm, ctx);
if (IS_ERR(state)) {
ret = PTR_ERR(state);
- drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
+ drm_err(display->drm, "Duplicating state failed with %i\n",
ret);
- return;
+ return true;
}
- ret = drm_atomic_helper_disable_all(&dev_priv->drm, ctx);
+ ret = drm_atomic_helper_disable_all(display->drm, ctx);
if (ret) {
- drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+ drm_err(display->drm, "Suspending crtc's failed with %i\n",
ret);
drm_atomic_state_put(state);
- return;
+ return true;
}
- dev_priv->display.restore.modeset_state = state;
+ display->restore.modeset_state = state;
state->acquire_ctx = ctx;
+
+ return true;
}
-void intel_display_reset_finish(struct drm_i915_private *i915)
+void intel_display_reset_finish(struct intel_display *display, bool test_only)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_modeset_acquire_ctx *ctx = &display->restore.reset_ctx;
struct drm_atomic_state *state;
int ret;
- if (!HAS_DISPLAY(i915))
- return;
-
- /* reset doesn't touch the display */
- if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
+ if (!HAS_DISPLAY(display))
return;
state = fetch_and_zero(&display->restore.modeset_state);
@@ -100,12 +90,12 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
goto unlock;
/* reset doesn't touch the display */
- if (!gpu_reset_clobbers_display(i915)) {
+ if (test_only) {
/* for testing only restore the display */
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
if (ret) {
- drm_WARN_ON(&i915->drm, ret == -EDEADLK);
- drm_err(&i915->drm,
+ drm_WARN_ON(display->drm, ret == -EDEADLK);
+ drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
}
} else {
@@ -116,11 +106,12 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
intel_pps_unlock_regs_wa(display);
intel_display_driver_init_hw(display);
intel_clock_gating_init(i915);
+ intel_cx0_pll_power_save_wa(display);
intel_hpd_init(i915);
ret = __intel_display_driver_resume(display, state, ctx);
if (ret)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
intel_hpd_poll_disable(i915);
@@ -130,7 +121,5 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
unlock:
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
- mutex_unlock(&i915->drm.mode_config.mutex);
-
- clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags);
+ mutex_unlock(&display->drm->mode_config.mutex);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.h b/drivers/gpu/drm/i915/display/intel_display_reset.h
index f06d0d35b86b..8b3bda134454 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.h
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.h
@@ -6,9 +6,15 @@
#ifndef __INTEL_RESET_H__
#define __INTEL_RESET_H__
-struct drm_i915_private;
+#include <linux/types.h>
-void intel_display_reset_prepare(struct drm_i915_private *i915);
-void intel_display_reset_finish(struct drm_i915_private *i915);
+struct intel_display;
+
+typedef void modeset_stuck_fn(void *context);
+
+bool intel_display_reset_test(struct intel_display *display);
+bool intel_display_reset_prepare(struct intel_display *display,
+ modeset_stuck_fn modeset_stuck, void *context);
+void intel_display_reset_finish(struct intel_display *display, bool test_only);
#endif /* __INTEL_RESET_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
index 918d0327169a..4074a1879828 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -69,10 +69,12 @@ void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
}
-void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+void intel_display_rps_mark_interactive(struct intel_display *display,
struct intel_atomic_state *state,
bool interactive)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (state->rps_interactive == interactive)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.h b/drivers/gpu/drm/i915/display/intel_display_rps.h
index e19009c2371a..556891edb2dd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.h
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.h
@@ -10,12 +10,12 @@
struct dma_fence;
struct drm_crtc;
-struct drm_i915_private;
struct intel_atomic_state;
+struct intel_display;
void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence);
-void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+void intel_display_rps_mark_interactive(struct intel_display *display,
struct intel_atomic_state *state,
bool interactive);
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index 338b9f7b20b8..27ebc32cb61a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -4,7 +4,11 @@
*/
#undef TRACE_SYSTEM
+#ifdef I915
#define TRACE_SYSTEM i915
+#else
+#define TRACE_SYSTEM xe
+#endif
#if !defined(__INTEL_DISPLAY_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
#define __INTEL_DISPLAY_TRACE_H__
@@ -21,6 +25,7 @@
#include "intel_vblank.h"
#define __dev_name_display(display) dev_name((display)->drm->dev)
+#define __dev_name_drm(obj) dev_name((obj)->dev->dev)
#define __dev_name_kms(obj) dev_name((obj)->base.dev->dev)
/*
@@ -397,23 +402,24 @@ TRACE_EVENT(intel_plane_async_flip,
__entry->async_flip = async_flip;
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, async_flip=%s",
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u, async_flip=%s",
__get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline, str_yes_no(__entry->async_flip))
);
TRACE_EVENT(intel_plane_update_noarm,
- TP_PROTO(struct intel_plane *plane, struct intel_crtc *crtc),
- TP_ARGS(plane, crtc),
+ TP_PROTO(const struct intel_plane_state *plane_state, struct intel_crtc *crtc),
+ TP_ARGS(plane_state, crtc),
TP_STRUCT__entry(
- __string(dev, __dev_name_kms(plane))
+ __string(dev, __dev_name_drm(plane_state->uapi.plane))
__field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
+ __field(u32, format)
__array(int, src, 4)
__array(int, dst, 4)
- __string(name, plane->base.name)
+ __string(name, plane_state->uapi.plane->name)
),
TP_fast_assign(
@@ -422,29 +428,31 @@ TRACE_EVENT(intel_plane_update_noarm,
__entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
- memcpy(__entry->src, &plane->base.state->src, sizeof(__entry->src));
- memcpy(__entry->dst, &plane->base.state->dst, sizeof(__entry->dst));
+ __entry->format = plane_state->hw.fb->format->format;
+ memcpy(__entry->src, &plane_state->uapi.src, sizeof(__entry->src));
+ memcpy(__entry->dst, &plane_state->uapi.dst, sizeof(__entry->dst));
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u, format=%p4cc, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
__get_str(dev), __entry->pipe_name, __get_str(name),
- __entry->frame, __entry->scanline,
+ __entry->frame, __entry->scanline, &__entry->format,
DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
);
TRACE_EVENT(intel_plane_update_arm,
- TP_PROTO(struct intel_plane *plane, struct intel_crtc *crtc),
- TP_ARGS(plane, crtc),
+ TP_PROTO(const struct intel_plane_state *plane_state, struct intel_crtc *crtc),
+ TP_ARGS(plane_state, crtc),
TP_STRUCT__entry(
- __string(dev, __dev_name_kms(plane))
+ __string(dev, __dev_name_drm(plane_state->uapi.plane))
__field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
+ __field(u32, format)
__array(int, src, 4)
__array(int, dst, 4)
- __string(name, plane->base.name)
+ __string(name, plane_state->uapi.plane->name)
),
TP_fast_assign(
@@ -453,13 +461,14 @@ TRACE_EVENT(intel_plane_update_arm,
__entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
- memcpy(__entry->src, &plane->base.state->src, sizeof(__entry->src));
- memcpy(__entry->dst, &plane->base.state->dst, sizeof(__entry->dst));
+ __entry->format = plane_state->hw.fb->format->format;
+ memcpy(__entry->src, &plane_state->uapi.src, sizeof(__entry->src));
+ memcpy(__entry->dst, &plane_state->uapi.dst, sizeof(__entry->dst));
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u, format=%p4cc, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
__get_str(dev), __entry->pipe_name, __get_str(name),
- __entry->frame, __entry->scanline,
+ __entry->frame, __entry->scanline, &__entry->format,
DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
);
@@ -484,11 +493,110 @@ TRACE_EVENT(intel_plane_disable_arm,
__entry->scanline = intel_get_crtc_scanline(crtc);
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u",
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u",
__get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline)
);
+TRACE_EVENT(intel_plane_scaler_update_arm,
+ TP_PROTO(struct intel_plane *plane,
+ int scaler_id, int x, int y, int w, int h),
+ TP_ARGS(plane, scaler_id, x, y, w, h),
+
+ TP_STRUCT__entry(
+ __string(dev, __dev_name_kms(plane))
+ __field(char, pipe_name)
+ __field(int, scaler_id)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(int, x)
+ __field(int, y)
+ __field(int, w)
+ __field(int, h)
+ __string(name, plane->base.name)
+ ),
+
+ TP_fast_assign(
+ struct intel_display *display = to_intel_display(plane);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe);
+ __assign_str(dev);
+ __assign_str(name);
+ __entry->pipe_name = pipe_name(crtc->pipe);
+ __entry->scaler_id = scaler_id;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->x = x;
+ __entry->y = y;
+ __entry->w = w;
+ __entry->h = h;
+ ),
+
+ TP_printk("dev %s, pipe %c, scaler %d, plane %s, frame=%u, scanline=%u, " DRM_RECT_FMT,
+ __get_str(dev), __entry->pipe_name, __entry->scaler_id,
+ __get_str(name), __entry->frame, __entry->scanline,
+ __entry->w, __entry->h, __entry->x, __entry->y)
+);
+
+TRACE_EVENT(intel_pipe_scaler_update_arm,
+ TP_PROTO(struct intel_crtc *crtc, int scaler_id,
+ int x, int y, int w, int h),
+ TP_ARGS(crtc, scaler_id, x, y, w, h),
+
+ TP_STRUCT__entry(
+ __string(dev, __dev_name_kms(crtc))
+ __field(char, pipe_name)
+ __field(int, scaler_id)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(int, x)
+ __field(int, y)
+ __field(int, w)
+ __field(int, h)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __entry->pipe_name = pipe_name(crtc->pipe);
+ __entry->scaler_id = scaler_id;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->x = x;
+ __entry->y = y;
+ __entry->w = w;
+ __entry->h = h;
+ ),
+
+ TP_printk("dev %s, pipe %c, scaler %d frame=%u, scanline=%u, " DRM_RECT_FMT,
+ __get_str(dev), __entry->pipe_name, __entry->scaler_id,
+ __entry->frame, __entry->scanline,
+ __entry->w, __entry->h, __entry->x, __entry->y)
+);
+
+TRACE_EVENT(intel_scaler_disable_arm,
+ TP_PROTO(struct intel_crtc *crtc, int scaler_id),
+ TP_ARGS(crtc, scaler_id),
+
+ TP_STRUCT__entry(
+ __string(dev, __dev_name_kms(crtc))
+ __field(char, pipe_name)
+ __field(int, scaler_id)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __entry->pipe_name = pipe_name(crtc->pipe);
+ __entry->scaler_id = scaler_id;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("dev %s, pipe %c, scaler %d, frame=%u, scanline=%u",
+ __get_str(dev), __entry->pipe_name, __entry->scaler_id,
+ __entry->frame, __entry->scanline)
+);
+
TRACE_EVENT(intel_fbc_activate,
TP_PROTO(struct intel_plane *plane),
TP_ARGS(plane),
@@ -512,7 +620,7 @@ TRACE_EVENT(intel_fbc_activate,
__entry->scanline = intel_get_crtc_scanline(crtc);
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u",
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u",
__get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline)
);
@@ -540,7 +648,7 @@ TRACE_EVENT(intel_fbc_deactivate,
__entry->scanline = intel_get_crtc_scanline(crtc);
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u",
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u",
__get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline)
);
@@ -568,7 +676,7 @@ TRACE_EVENT(intel_fbc_nuke,
__entry->scanline = intel_get_crtc_scanline(crtc);
),
- TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u",
+ TP_printk("dev %s, pipe %c, %s, frame=%u, scanline=%u",
__get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline)
);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 8271e50e3644..99a6fd2900b9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -40,9 +40,9 @@
#include <drm/drm_rect.h>
#include <drm/drm_vblank_work.h>
#include <drm/intel/i915_hdcp_interface.h>
+#include <uapi/drm/i915_drm.h>
-#include "i915_vma.h"
-#include "i915_vma_types.h"
+#include "i915_gtt_view_types.h"
#include "intel_bios.h"
#include "intel_display.h"
#include "intel_display_conversion.h"
@@ -144,6 +144,7 @@ struct intel_framebuffer {
struct i915_address_space *dpt_vm;
unsigned int min_alignment;
+ unsigned int vtd_guard;
};
enum intel_hotplug_state {
@@ -412,6 +413,7 @@ struct intel_panel {
union {
struct {
struct drm_edp_backlight_info info;
+ bool luminance_control_support;
} vesa;
struct {
bool sdr_uses_aux;
@@ -494,6 +496,8 @@ struct intel_hdcp {
enum transcoder cpu_transcoder;
/* Only used for DP MST stream encryption */
enum transcoder stream_transcoder;
+ /* Used to force HDCP 1.4 bypassing HDCP 2.x */
+ bool force_hdcp14;
};
struct intel_connector {
@@ -530,10 +534,6 @@ struct intel_connector {
state of connector->polled in case hotplug storm detection changes it */
u8 polled;
- struct drm_dp_mst_port *port;
-
- struct intel_dp *mst_port;
-
int force_joined_pipes;
struct {
@@ -545,6 +545,11 @@ struct intel_connector {
u8 dsc_decompression_enabled:1;
} dp;
+ struct {
+ struct drm_dp_mst_port *port;
+ struct intel_dp *dp;
+ } mst;
+
/* Work struct to schedule a uevent on link train failure */
struct work_struct modeset_retry_work;
@@ -638,6 +643,9 @@ struct intel_plane_state {
/* Plane state to display black pixels when pxp is borked */
bool force_black;
+ /* Acting as Y plane for another UV plane? */
+ bool is_y_plane;
+
/* plane control register */
u32 ctl;
@@ -677,16 +685,6 @@ struct intel_plane_state {
*/
struct intel_plane *planar_linked_plane;
- /*
- * planar_slave:
- * If set don't update use the linked plane's state for updating
- * this plane during atomic commit with the update_slave() callback.
- *
- * It's also used by the watermark code to ignore wm calculations on
- * this plane. They're calculated by the linked plane's wm code.
- */
- u32 planar_slave;
-
struct drm_intel_sprite_colorkey ckey;
struct drm_rect psr2_sel_fetch_area;
@@ -695,6 +693,8 @@ struct intel_plane_state {
u64 ccval;
const char *no_fbc_reason;
+
+ struct drm_rect damage;
};
struct intel_initial_plane_config {
@@ -711,6 +711,8 @@ struct intel_initial_plane_config {
struct intel_scaler {
u32 mode;
bool in_use;
+ int hscale;
+ int vscale;
};
struct intel_crtc_scaler_state {
@@ -732,7 +734,7 @@ struct intel_crtc_scaler_state {
*
* intel_atomic_setup_scalers will setup available scalers to users
* requesting scalers. It will gracefully fail if request exceeds
- * avilability.
+ * availability.
*/
#define SKL_CRTC_INDEX 31
unsigned scaler_users;
@@ -1095,6 +1097,7 @@ struct intel_crtc_state {
int max_link_bpp_x16; /* in 1/16 bpp units */
int pipe_bpp; /* in 1 bpp units */
+ int min_hblank;
struct intel_link_m_n dp_m_n;
/* m2_n2 for eDP downclock */
@@ -1113,7 +1116,7 @@ struct intel_crtc_state {
u16 su_y_granularity;
/*
- * Frequence the dpll for the port should run at. Differs from the
+ * Frequency the dpll for the port should run at. Differs from the
* adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
* already multiplied by pixel_multiplier.
*/
@@ -1439,12 +1442,17 @@ struct intel_crtc {
bool block_dc_for_vblank;
};
+struct intel_plane_error {
+ u32 ctl, surf, surflive;
+};
+
struct intel_plane {
struct drm_plane base;
enum i9xx_plane_id i9xx_plane;
enum plane_id id;
enum pipe pipe;
bool need_async_flip_toggle_wa;
+ u8 vtd_guard;
u32 frontbuffer_bit;
struct {
@@ -1474,6 +1482,7 @@ struct intel_plane {
unsigned int (*max_stride)(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
+ bool (*can_async_flip)(u64 modifier);
/* Write all non-self arming plane registers */
void (*update_noarm)(struct intel_dsb *dsb,
struct intel_plane *plane,
@@ -1488,6 +1497,9 @@ struct intel_plane {
void (*disable_arm)(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
+ void (*capture_error)(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
@@ -1715,7 +1727,6 @@ struct intel_dp {
struct intel_pps pps;
bool is_mst;
- int active_mst_links;
enum drm_dp_mst_mode mst_detect;
/* connector directly attached - won't be use for modeset in mst world */
@@ -1725,9 +1736,11 @@ struct intel_dp {
struct drm_dp_tunnel *tunnel;
bool tunnel_suspended:1;
- /* mst connector list */
- struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
- struct drm_dp_mst_topology_mgr mst_mgr;
+ struct {
+ struct intel_dp_mst_encoder *stream_encoders[I915_MAX_PIPES];
+ struct drm_dp_mst_topology_mgr mgr;
+ int active_links;
+ } mst;
u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index);
/*
@@ -1838,16 +1851,18 @@ struct intel_digital_port {
struct intel_tc_port *tc;
- /* protects num_hdcp_streams reference count, hdcp_port_data and hdcp_auth_status */
- struct mutex hdcp_mutex;
- /* the number of pipes using HDCP signalling out of this port */
- unsigned int num_hdcp_streams;
- /* port HDCP auth status */
- bool hdcp_auth_status;
- /* HDCP port data need to pass to security f/w */
- struct hdcp_port_data hdcp_port_data;
- /* Whether the MST topology supports HDCP Type 1 Content */
- bool hdcp_mst_type1_capable;
+ struct {
+ /* protects num_streams reference count, port_data and auth_status */
+ struct mutex mutex;
+ /* the number of pipes using HDCP signalling out of this port */
+ unsigned int num_streams;
+ /* port HDCP auth status */
+ bool auth_status;
+ /* HDCP port data need to pass to security f/w */
+ struct hdcp_port_data port_data;
+ /* Whether the MST topology supports HDCP Type 1 Content */
+ bool mst_type1_capable;
+ } hdcp;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1946,8 +1961,8 @@ static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
static inline struct intel_dp *intel_attached_dp(struct intel_connector *connector)
{
- if (connector->mst_port)
- return connector->mst_port;
+ if (connector->mst.dp)
+ return connector->mst.dp;
else
return enc_to_intel_dp(intel_attached_encoder(connector));
}
@@ -1979,24 +1994,12 @@ static inline bool intel_encoder_is_hdmi(struct intel_encoder *encoder)
}
}
-static inline struct intel_lspcon *
-enc_to_intel_lspcon(struct intel_encoder *encoder)
-{
- return &enc_to_dig_port(encoder)->lspcon;
-}
-
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
return container_of(intel_dp, struct intel_digital_port, dp);
}
-static inline struct intel_lspcon *
-dp_to_lspcon(struct intel_dp *intel_dp)
-{
- return &dp_to_dig_port(intel_dp)->lspcon;
-}
-
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
@@ -2103,11 +2106,6 @@ intel_crtc_needs_color_update(const struct intel_crtc_state *crtc_state)
intel_crtc_needs_modeset(crtc_state);
}
-static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state)
-{
- return i915_ggtt_offset(plane_state->ggtt_vma);
-}
-
static inline struct intel_frontbuffer *
to_intel_frontbuffer(struct drm_framebuffer *fb)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
index b146b4c46943..0813fb9b5823 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
@@ -3,7 +3,7 @@
* Copyright © 2022 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_device.h>
#include "intel_de.h"
#include "intel_display.h"
@@ -12,28 +12,28 @@
/**
* intel_dkl_phy_init - initialize Dekel PHY
- * @i915: i915 device instance
+ * @display: display device instance
*/
-void intel_dkl_phy_init(struct drm_i915_private *i915)
+void intel_dkl_phy_init(struct intel_display *display)
{
- spin_lock_init(&i915->display.dkl.phy_lock);
+ spin_lock_init(&display->dkl.phy_lock);
}
static void
-dkl_phy_set_hip_idx(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg)
+dkl_phy_set_hip_idx(struct intel_display *display, struct intel_dkl_phy_reg reg)
{
enum tc_port tc_port = DKL_REG_TC_PORT(reg);
- drm_WARN_ON(&i915->drm, tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS);
+ drm_WARN_ON(display->drm, tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS);
- intel_de_write(i915,
+ intel_de_write(display,
HIP_INDEX_REG(tc_port),
HIP_INDEX_VAL(tc_port, reg.bank_idx));
}
/**
* intel_dkl_phy_read - read a Dekel PHY register
- * @i915: i915 device instance
+ * @display: intel_display device instance
* @reg: Dekel PHY register
*
* Read the @reg Dekel PHY register.
@@ -41,42 +41,42 @@ dkl_phy_set_hip_idx(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg)
* Returns the read value.
*/
u32
-intel_dkl_phy_read(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg)
+intel_dkl_phy_read(struct intel_display *display, struct intel_dkl_phy_reg reg)
{
u32 val;
- spin_lock(&i915->display.dkl.phy_lock);
+ spin_lock(&display->dkl.phy_lock);
- dkl_phy_set_hip_idx(i915, reg);
- val = intel_de_read(i915, DKL_REG_MMIO(reg));
+ dkl_phy_set_hip_idx(display, reg);
+ val = intel_de_read(display, DKL_REG_MMIO(reg));
- spin_unlock(&i915->display.dkl.phy_lock);
+ spin_unlock(&display->dkl.phy_lock);
return val;
}
/**
* intel_dkl_phy_write - write a Dekel PHY register
- * @i915: i915 device instance
+ * @display: intel_display device instance
* @reg: Dekel PHY register
* @val: value to write
*
* Write @val to the @reg Dekel PHY register.
*/
void
-intel_dkl_phy_write(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg, u32 val)
+intel_dkl_phy_write(struct intel_display *display, struct intel_dkl_phy_reg reg, u32 val)
{
- spin_lock(&i915->display.dkl.phy_lock);
+ spin_lock(&display->dkl.phy_lock);
- dkl_phy_set_hip_idx(i915, reg);
- intel_de_write(i915, DKL_REG_MMIO(reg), val);
+ dkl_phy_set_hip_idx(display, reg);
+ intel_de_write(display, DKL_REG_MMIO(reg), val);
- spin_unlock(&i915->display.dkl.phy_lock);
+ spin_unlock(&display->dkl.phy_lock);
}
/**
* intel_dkl_phy_rmw - read-modify-write a Dekel PHY register
- * @i915: i915 device instance
+ * @display: display device instance
* @reg: Dekel PHY register
* @clear: mask to clear
* @set: mask to set
@@ -85,30 +85,30 @@ intel_dkl_phy_write(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg,
* this value back to the register if the value differs from the read one.
*/
void
-intel_dkl_phy_rmw(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg, u32 clear, u32 set)
+intel_dkl_phy_rmw(struct intel_display *display, struct intel_dkl_phy_reg reg, u32 clear, u32 set)
{
- spin_lock(&i915->display.dkl.phy_lock);
+ spin_lock(&display->dkl.phy_lock);
- dkl_phy_set_hip_idx(i915, reg);
- intel_de_rmw(i915, DKL_REG_MMIO(reg), clear, set);
+ dkl_phy_set_hip_idx(display, reg);
+ intel_de_rmw(display, DKL_REG_MMIO(reg), clear, set);
- spin_unlock(&i915->display.dkl.phy_lock);
+ spin_unlock(&display->dkl.phy_lock);
}
/**
* intel_dkl_phy_posting_read - do a posting read from a Dekel PHY register
- * @i915: i915 device instance
+ * @display: display device instance
* @reg: Dekel PHY register
*
* Read the @reg Dekel PHY register without returning the read value.
*/
void
-intel_dkl_phy_posting_read(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg)
+intel_dkl_phy_posting_read(struct intel_display *display, struct intel_dkl_phy_reg reg)
{
- spin_lock(&i915->display.dkl.phy_lock);
+ spin_lock(&display->dkl.phy_lock);
- dkl_phy_set_hip_idx(i915, reg);
- intel_de_posting_read(i915, DKL_REG_MMIO(reg));
+ dkl_phy_set_hip_idx(display, reg);
+ intel_de_posting_read(display, DKL_REG_MMIO(reg));
- spin_unlock(&i915->display.dkl.phy_lock);
+ spin_unlock(&display->dkl.phy_lock);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.h b/drivers/gpu/drm/i915/display/intel_dkl_phy.h
index 5956ec3e940b..ccb445c0022b 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.h
@@ -10,16 +10,16 @@
#include "intel_dkl_phy_regs.h"
-struct drm_i915_private;
+struct intel_display;
-void intel_dkl_phy_init(struct drm_i915_private *i915);
+void intel_dkl_phy_init(struct intel_display *display);
u32
-intel_dkl_phy_read(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg);
+intel_dkl_phy_read(struct intel_display *display, struct intel_dkl_phy_reg reg);
void
-intel_dkl_phy_write(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg, u32 val);
+intel_dkl_phy_write(struct intel_display *display, struct intel_dkl_phy_reg reg, u32 val);
void
-intel_dkl_phy_rmw(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg, u32 clear, u32 set);
+intel_dkl_phy_rmw(struct intel_display *display, struct intel_dkl_phy_reg reg, u32 clear, u32 set);
void
-intel_dkl_phy_posting_read(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg);
+intel_dkl_phy_posting_read(struct intel_display *display, struct intel_dkl_phy_reg reg);
#endif /* __INTEL_DKL_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 221d3abda791..fa6944e55d95 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -992,19 +992,16 @@ static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
static void intel_dmc_runtime_pm_get(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
drm_WARN_ON(display->drm, display->dmc.wakeref);
- display->dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ display->dmc.wakeref = intel_display_power_get(display, POWER_DOMAIN_INIT);
}
static void intel_dmc_runtime_pm_put(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&display->dmc.wakeref);
- intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
}
static const char *dmc_fallback_path(struct intel_display *display)
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
index 02de3ae15074..7e2ce0c2f6c3 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -10,7 +10,6 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
-#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_dmc_wl.h"
@@ -50,12 +49,24 @@
#define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
#define DMC_WAKELOCK_HOLD_TIME 50
+/*
+ * Possible non-negative values for the enable_dmc_wl param.
+ */
+enum {
+ ENABLE_DMC_WL_DISABLED,
+ ENABLE_DMC_WL_ENABLED,
+ ENABLE_DMC_WL_ANY_REGISTER,
+ ENABLE_DMC_WL_ALWAYS_LOCKED,
+ ENABLE_DMC_WL_MAX,
+};
+
struct intel_dmc_wl_range {
u32 start;
u32 end;
};
static const struct intel_dmc_wl_range powered_off_ranges[] = {
+ { .start = 0x44400, .end = 0x4447f }, /* PIPE interrupt registers */
{ .start = 0x60000, .end = 0x7ffff },
{},
};
@@ -90,6 +101,7 @@ static const struct intel_dmc_wl_range xe3lpd_dc5_dc6_dmc_ranges[] = {
{ .start = 0x42088 }, /* CHICKEN_MISC_3 */
{ .start = 0x46160 }, /* CMTG_CLK_SEL */
{ .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
+ { .start = 0x45230 }, /* INITIATE_PM_DMD_REQ */
{},
};
@@ -230,10 +242,15 @@ static bool intel_dmc_wl_reg_in_range(i915_reg_t reg,
return false;
}
-static bool intel_dmc_wl_check_range(i915_reg_t reg, u32 dc_state)
+static bool intel_dmc_wl_check_range(struct intel_display *display,
+ i915_reg_t reg,
+ u32 dc_state)
{
const struct intel_dmc_wl_range *ranges;
+ if (display->params.enable_dmc_wl == ENABLE_DMC_WL_ANY_REGISTER)
+ return true;
+
/*
* Check that the offset is in one of the ranges for which
* registers are powered off during DC states.
@@ -265,20 +282,48 @@ static bool intel_dmc_wl_check_range(i915_reg_t reg, u32 dc_state)
static bool __intel_dmc_wl_supported(struct intel_display *display)
{
- return display->params.enable_dmc_wl && intel_dmc_has_payload(display);
+ return display->params.enable_dmc_wl;
}
static void intel_dmc_wl_sanitize_param(struct intel_display *display)
{
- if (!HAS_DMC_WAKELOCK(display))
- display->params.enable_dmc_wl = 0;
- else if (display->params.enable_dmc_wl >= 0)
- display->params.enable_dmc_wl = !!display->params.enable_dmc_wl;
- else
- display->params.enable_dmc_wl = DISPLAY_VER(display) >= 30;
-
- drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d\n",
- display->params.enable_dmc_wl);
+ const char *desc;
+
+ if (!HAS_DMC_WAKELOCK(display)) {
+ display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
+ } else if (display->params.enable_dmc_wl < 0) {
+ if (DISPLAY_VER(display) >= 30)
+ display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
+ else
+ display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
+ } else if (display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX) {
+ display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
+ }
+
+ drm_WARN_ON(display->drm,
+ display->params.enable_dmc_wl < 0 ||
+ display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX);
+
+ switch (display->params.enable_dmc_wl) {
+ case ENABLE_DMC_WL_DISABLED:
+ desc = "disabled";
+ break;
+ case ENABLE_DMC_WL_ENABLED:
+ desc = "enabled";
+ break;
+ case ENABLE_DMC_WL_ANY_REGISTER:
+ desc = "match any register";
+ break;
+ case ENABLE_DMC_WL_ALWAYS_LOCKED:
+ desc = "always locked";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+
+ drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d (%s)\n",
+ display->params.enable_dmc_wl, desc);
}
void intel_dmc_wl_init(struct intel_display *display)
@@ -292,7 +337,8 @@ void intel_dmc_wl_init(struct intel_display *display)
INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
spin_lock_init(&wl->lock);
- refcount_set(&wl->refcount, 0);
+ refcount_set(&wl->refcount,
+ display->params.enable_dmc_wl == ENABLE_DMC_WL_ALWAYS_LOCKED ? 1 : 0);
}
/* Must only be called as part of enabling dynamic DC states. */
@@ -398,7 +444,8 @@ void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
spin_lock_irqsave(&wl->lock, flags);
- if (i915_mmio_reg_valid(reg) && !intel_dmc_wl_check_range(reg, wl->dc_state))
+ if (i915_mmio_reg_valid(reg) &&
+ !intel_dmc_wl_check_range(display, reg, wl->dc_state))
goto out_unlock;
if (!wl->enabled) {
@@ -430,7 +477,8 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
spin_lock_irqsave(&wl->lock, flags);
- if (i915_mmio_reg_valid(reg) && !intel_dmc_wl_check_range(reg, wl->dc_state))
+ if (i915_mmio_reg_valid(reg) &&
+ !intel_dmc_wl_check_range(display, reg, wl->dc_state))
goto out_unlock;
if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index aa77ddcee42c..a236b5fc7a3d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1075,7 +1075,7 @@ static bool source_can_output(struct intel_dp *intel_dp,
/*
* No YCbCr output support on gmch platforms.
* Also, ILK doesn't seem capable of DP YCbCr output.
- * The displayed image is severly corrupted. SNB+ is fine.
+ * The displayed image is severely corrupted. SNB+ is fine.
*/
return !HAS_GMCH(display) && !display->platform.ironlake;
@@ -1376,7 +1376,7 @@ bool intel_dp_has_dsc(const struct intel_connector *connector)
if (!HAS_DSC(display))
return false;
- if (connector->mst_port && !HAS_DSC_MST(display))
+ if (connector->mst.dp && !HAS_DSC_MST(display))
return false;
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
@@ -1391,12 +1391,11 @@ bool intel_dp_has_dsc(const struct intel_connector *connector)
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *_connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
@@ -1407,7 +1406,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
bool dsc = false;
int num_joined_pipes;
- status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ status = intel_cpu_transcoder_mode_valid(display, mode);
if (status != MODE_OK)
return status;
@@ -1496,7 +1495,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
if (status != MODE_OK)
return status;
- return intel_mode_valid_max_plane_size(dev_priv, mode, num_joined_pipes);
+ return intel_mode_valid_max_plane_size(display, mode, num_joined_pipes);
}
bool intel_dp_source_supports_tps3(struct intel_display *display)
@@ -1926,7 +1925,7 @@ static bool intel_dp_dsc_supports_format(const struct intel_connector *connector
return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format);
}
-static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock,
+static bool is_bw_sufficient_for_dsc_config(int dsc_bpp_x16, u32 link_clock,
u32 lane_count, u32 mode_clock,
enum intel_output_format output_format,
int timeslots)
@@ -1934,15 +1933,16 @@ static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_cloc
u32 available_bw, required_bw;
available_bw = (link_clock * lane_count * timeslots * 16) / 8;
- required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock));
+ required_bw = dsc_bpp_x16 * (intel_dp_mode_to_fec_clock(mode_clock));
return available_bw > required_bw;
}
static int dsc_compute_link_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
- struct link_config_limits *limits,
- u16 compressed_bppx16,
+ struct drm_connector_state *conn_state,
+ const struct link_config_limits *limits,
+ int dsc_bpp_x16,
int timeslots)
{
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -1957,15 +1957,37 @@ static int dsc_compute_link_config(struct intel_dp *intel_dp,
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
- if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate,
- lane_count, adjusted_mode->clock,
- pipe_config->output_format,
- timeslots))
- continue;
+ /*
+ * FIXME: intel_dp_mtp_tu_compute_config() requires
+ * ->lane_count and ->port_clock set before we know
+ * they'll work. If we end up failing altogether,
+ * they'll remain in crtc state. This shouldn't matter,
+ * as we'd then bail out from compute config, but it's
+ * just ugly.
+ */
pipe_config->lane_count = lane_count;
pipe_config->port_clock = link_rate;
+ if (drm_dp_is_uhbr_rate(link_rate)) {
+ int ret;
+
+ ret = intel_dp_mtp_tu_compute_config(intel_dp,
+ pipe_config,
+ conn_state,
+ dsc_bpp_x16,
+ dsc_bpp_x16,
+ 0, true);
+ if (ret)
+ continue;
+ } else {
+ if (!is_bw_sufficient_for_dsc_config(dsc_bpp_x16, link_rate,
+ lane_count, adjusted_mode->clock,
+ pipe_config->output_format,
+ timeslots))
+ continue;
+ }
+
return 0;
}
}
@@ -2055,111 +2077,66 @@ static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
}
/*
- * From a list of valid compressed bpps try different compressed bpp and find a
- * suitable link configuration that can support it.
+ * Note: for pre-13 display you still need to check the validity of each step.
*/
-static int
-icl_dsc_compute_link_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- struct link_config_limits *limits,
- int dsc_max_bpp,
- int dsc_min_bpp,
- int pipe_bpp,
- int timeslots)
+static int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector)
{
- int i, ret;
-
- /* Compressed BPP should be less than the Input DSC bpp */
- dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
+ struct intel_display *display = to_intel_display(connector);
+ u8 incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
- for (i = ARRAY_SIZE(valid_dsc_bpp) - 1; i >= 0; i--) {
- if (valid_dsc_bpp[i] < dsc_min_bpp ||
- valid_dsc_bpp[i] > dsc_max_bpp)
- continue;
+ if (DISPLAY_VER(display) < 14 || !incr)
+ return fxp_q4_from_int(1);
- ret = dsc_compute_link_config(intel_dp,
- pipe_config,
- limits,
- valid_dsc_bpp[i] << 4,
- timeslots);
- if (ret == 0) {
- pipe_config->dsc.compressed_bpp_x16 =
- fxp_q4_from_int(valid_dsc_bpp[i]);
- return 0;
- }
- }
-
- return -EINVAL;
+ /* fxp q4 */
+ return fxp_q4_from_int(1) / incr;
}
-/*
- * From XE_LPD onwards we supports compression bpps in steps of 1 up to
- * uncompressed bpp-1. So we start from max compressed bpp and see if any
- * link configuration is able to support that compressed bpp, if not we
- * step down and check for lower compressed bpp.
- */
-static int
-xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
- struct link_config_limits *limits,
- int dsc_max_bpp,
- int dsc_min_bpp,
- int pipe_bpp,
- int timeslots)
+/* Note: This is not universally usable! */
+static bool intel_dp_dsc_valid_bpp(struct intel_dp *intel_dp, int bpp_x16)
{
struct intel_display *display = to_intel_display(intel_dp);
- u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
- u16 compressed_bppx16;
- u8 bppx16_step;
- int ret;
+ int i;
- if (DISPLAY_VER(display) < 14 || bppx16_incr <= 1)
- bppx16_step = 16;
- else
- bppx16_step = 16 / bppx16_incr;
+ if (DISPLAY_VER(display) >= 13) {
+ if (intel_dp->force_dsc_fractional_bpp_en && !fxp_q4_to_frac(bpp_x16))
+ return false;
- /* Compressed BPP should be less than the Input DSC bpp */
- dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step);
- dsc_min_bpp = dsc_min_bpp << 4;
-
- for (compressed_bppx16 = dsc_max_bpp;
- compressed_bppx16 >= dsc_min_bpp;
- compressed_bppx16 -= bppx16_step) {
- if (intel_dp->force_dsc_fractional_bpp_en &&
- !fxp_q4_to_frac(compressed_bppx16))
- continue;
- ret = dsc_compute_link_config(intel_dp,
- pipe_config,
- limits,
- compressed_bppx16,
- timeslots);
- if (ret == 0) {
- pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
- if (intel_dp->force_dsc_fractional_bpp_en &&
- fxp_q4_to_frac(compressed_bppx16))
- drm_dbg_kms(display->drm,
- "Forcing DSC fractional bpp\n");
+ return true;
+ }
- return 0;
- }
+ if (fxp_q4_to_frac(bpp_x16))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
+ if (fxp_q4_to_int(bpp_x16) == valid_dsc_bpp[i])
+ return true;
}
- return -EINVAL;
+
+ return false;
}
+/*
+ * Find the max compressed BPP we can find a link configuration for. The BPPs to
+ * try depend on the source (platform) and sink.
+ */
static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
- struct link_config_limits *limits,
+ struct drm_connector_state *conn_state,
+ const struct link_config_limits *limits,
int pipe_bpp,
int timeslots)
{
struct intel_display *display = to_intel_display(intel_dp);
+ const struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ int output_bpp;
int dsc_min_bpp;
int dsc_max_bpp;
+ int min_bpp_x16, max_bpp_x16, bpp_step_x16;
int dsc_joiner_max_bpp;
int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
+ int bpp_x16;
+ int ret;
dsc_min_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
@@ -2168,11 +2145,38 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
num_joined_pipes);
dsc_max_bpp = min(dsc_joiner_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
- if (DISPLAY_VER(display) >= 13)
- return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
- dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
- return icl_dsc_compute_link_config(intel_dp, pipe_config, limits,
- dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
+ /* FIXME: remove the round trip via integers */
+ min_bpp_x16 = fxp_q4_from_int(dsc_min_bpp);
+ max_bpp_x16 = fxp_q4_from_int(dsc_max_bpp);
+
+ bpp_step_x16 = intel_dp_dsc_bpp_step_x16(connector);
+
+ /* Compressed BPP should be less than the Input DSC bpp */
+ output_bpp = intel_dp_output_bpp(pipe_config->output_format, pipe_bpp);
+ max_bpp_x16 = min(max_bpp_x16, fxp_q4_from_int(output_bpp) - bpp_step_x16);
+
+ for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) {
+ if (!intel_dp_dsc_valid_bpp(intel_dp, bpp_x16))
+ continue;
+
+ ret = dsc_compute_link_config(intel_dp,
+ pipe_config,
+ conn_state,
+ limits,
+ bpp_x16,
+ timeslots);
+ if (ret == 0) {
+ pipe_config->dsc.compressed_bpp_x16 = bpp_x16;
+ if (intel_dp->force_dsc_fractional_bpp_en &&
+ fxp_q4_to_frac(bpp_x16))
+ drm_dbg_kms(display->drm,
+ "Forcing DSC fractional bpp\n");
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
}
int intel_dp_dsc_min_src_input_bpc(void)
@@ -2182,7 +2186,7 @@ int intel_dp_dsc_min_src_input_bpc(void)
}
static
-bool is_dsc_pipe_bpp_sufficient(struct link_config_limits *limits,
+bool is_dsc_pipe_bpp_sufficient(const struct link_config_limits *limits,
int pipe_bpp)
{
return pipe_bpp >= limits->pipe.min_bpp &&
@@ -2191,7 +2195,7 @@ bool is_dsc_pipe_bpp_sufficient(struct link_config_limits *limits,
static
int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp,
- struct link_config_limits *limits)
+ const struct link_config_limits *limits)
{
struct intel_display *display = to_intel_display(intel_dp);
int forced_bpp;
@@ -2217,13 +2221,11 @@ int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp,
static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
- struct link_config_limits *limits,
+ const struct link_config_limits *limits,
int timeslots)
{
const struct intel_connector *connector =
to_intel_connector(conn_state->connector);
- int dsc_max_bpp;
- int dsc_min_bpp;
u8 dsc_bpc[3] = {};
int forced_bpp, pipe_bpp;
int num_bpc, i, ret;
@@ -2231,7 +2233,7 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, limits);
if (forced_bpp) {
- ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config,
+ ret = dsc_compute_compressed_bpp(intel_dp, pipe_config, conn_state,
limits, forced_bpp, timeslots);
if (ret == 0) {
pipe_config->pipe_bpp = forced_bpp;
@@ -2239,9 +2241,6 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
}
}
- dsc_max_bpp = limits->pipe.max_bpp;
- dsc_min_bpp = limits->pipe.min_bpp;
-
/*
* Get the maximum DSC bpc that will be supported by any valid
* link configuration and compressed bpp.
@@ -2249,11 +2248,10 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, dsc_bpc);
for (i = 0; i < num_bpc; i++) {
pipe_bpp = dsc_bpc[i] * 3;
- if (pipe_bpp < dsc_min_bpp)
- break;
- if (pipe_bpp > dsc_max_bpp)
+ if (pipe_bpp < limits->pipe.min_bpp || pipe_bpp > limits->pipe.max_bpp)
continue;
- ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config,
+
+ ret = dsc_compute_compressed_bpp(intel_dp, pipe_config, conn_state,
limits, pipe_bpp, timeslots);
if (ret == 0) {
pipe_config->pipe_bpp = pipe_bpp;
@@ -2267,7 +2265,7 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+ const struct link_config_limits *limits)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector =
@@ -2332,9 +2330,8 @@ static void intel_dp_fec_compute_config(struct intel_dp *intel_dp,
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
- struct link_config_limits *limits,
- int timeslots,
- bool compute_pipe_bpp)
+ const struct link_config_limits *limits,
+ int timeslots)
{
struct intel_display *display = to_intel_display(intel_dp);
const struct intel_connector *connector =
@@ -2342,6 +2339,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
+ bool is_mst = intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST);
int ret;
intel_dp_fec_compute_config(intel_dp, pipe_config);
@@ -2350,12 +2348,10 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return -EINVAL;
/*
- * compute pipe bpp is set to false for DP MST DSC case
- * and compressed_bpp is calculated same time once
- * vpci timeslots are allocated, because overall bpp
- * calculation procedure is bit different for MST case.
+ * Link parameters, pipe bpp and compressed bpp have already been
+ * figured out for DP MST DSC.
*/
- if (compute_pipe_bpp) {
+ if (!is_mst) {
if (intel_dp_is_edp(intel_dp))
ret = intel_edp_dsc_compute_pipe_bpp(intel_dp, pipe_config,
conn_state, limits);
@@ -2518,9 +2514,6 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
limits->min_rate = intel_dp_min_link_rate(intel_dp);
limits->max_rate = intel_dp_max_link_rate(intel_dp);
- /* FIXME 128b/132b SST+DSC support missing */
- if (!is_mst && dsc)
- limits->max_rate = min(limits->max_rate, 810000);
limits->min_rate = min(limits->min_rate, limits->max_rate);
limits->min_lane_count = intel_dp_min_lane_count(intel_dp);
@@ -2640,9 +2633,9 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
if (!ret && intel_dp_is_uhbr(pipe_config))
ret = intel_dp_mtp_tu_compute_config(intel_dp,
pipe_config,
- pipe_config->pipe_bpp,
- pipe_config->pipe_bpp,
conn_state,
+ fxp_q4_from_int(pipe_config->pipe_bpp),
+ fxp_q4_from_int(pipe_config->pipe_bpp),
0, false);
if (ret)
dsc_needed = true;
@@ -2666,7 +2659,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return -EINVAL;
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
- conn_state, &limits, 64, true);
+ conn_state, &limits, 64);
if (ret < 0)
return ret;
}
@@ -2831,15 +2824,14 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
as_sdp->length = 0x9;
as_sdp->duration_incr_ms = 0;
+ as_sdp->vtotal = intel_vrr_vmin_vtotal(crtc_state);
if (crtc_state->cmrr.enable) {
as_sdp->mode = DP_AS_SDP_FAVT_TRR_REACHED;
- as_sdp->vtotal = adjusted_mode->vtotal;
as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode);
as_sdp->target_rr_divider = true;
} else {
as_sdp->mode = DP_AS_SDP_AVT_DYNAMIC_VTOTAL;
- as_sdp->vtotal = adjusted_mode->vtotal;
as_sdp->target_rr = 0;
}
}
@@ -2920,7 +2912,7 @@ static bool can_enable_drrs(struct intel_connector *connector,
const struct intel_crtc_state *pipe_config,
const struct drm_display_mode *downclock_mode)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (pipe_config->vrr.enable)
return false;
@@ -2938,7 +2930,7 @@ static bool can_enable_drrs(struct intel_connector *connector,
if (pipe_config->has_pch_encoder)
return false;
- if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
+ if (!intel_cpu_transcoder_has_drrs(display, pipe_config->cpu_transcoder))
return false;
return downclock_mode &&
@@ -2951,7 +2943,6 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
int link_bpp_x16)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *downclock_mode =
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
@@ -2964,7 +2955,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
pipe_config->update_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
- if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
+ if (intel_cpu_transcoder_has_m2_n2(display, pipe_config->cpu_transcoder))
intel_zero_m_n(&pipe_config->dp_m2_n2);
return;
}
@@ -3064,15 +3055,6 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder,
intel_dp_is_uhbr(pipe_config);
}
-static void intel_dp_queue_modeset_retry_work(struct intel_connector *connector)
-{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
-
- drm_connector_get(&connector->base);
- if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work))
- drm_connector_put(&connector->base);
-}
-
void
intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
struct intel_encoder *encoder,
@@ -3089,7 +3071,7 @@ intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
intel_dp->needs_modeset_retry = true;
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
- intel_dp_queue_modeset_retry_work(intel_dp->attached_connector);
+ intel_connector_queue_modeset_retry_work(intel_dp->attached_connector);
return;
}
@@ -3098,8 +3080,8 @@ intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
if (!conn_state->base.crtc)
continue;
- if (connector->mst_port == intel_dp)
- intel_dp_queue_modeset_retry_work(connector);
+ if (connector->mst.dp == intel_dp)
+ intel_connector_queue_modeset_retry_work(connector);
}
}
@@ -3148,7 +3130,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if ((intel_dp_is_edp(intel_dp) && fixed_mode) ||
pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
- ret = intel_panel_fitting(pipe_config, conn_state);
+ ret = intel_pfit_compute_config(pipe_config, conn_state);
if (ret)
return ret;
}
@@ -3320,8 +3302,8 @@ intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
bool enable)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_dp_aux *aux = connector->port ?
- connector->port->passthrough_aux : NULL;
+ struct drm_dp_aux *aux = connector->mst.port ?
+ connector->mst.port->passthrough_aux : NULL;
if (!aux)
return;
@@ -3348,7 +3330,7 @@ static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
* On SST the decompression AUX device won't be shared, each connector
* uses for this its own AUX targeting the sink device.
*/
- if (!connector->mst_port)
+ if (!connector->mst.dp)
return connector->dp.dsc_decompression_enabled ? 1 : 0;
for_each_oldnew_connector_in_state(&state->base, _connector_iter,
@@ -3356,7 +3338,7 @@ static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
const struct intel_connector *
connector_iter = to_intel_connector(_connector_iter);
- if (connector_iter->mst_port != connector->mst_port)
+ if (connector_iter->mst.dp != connector->mst.dp)
continue;
if (!connector_iter->dp.dsc_decompression_enabled)
@@ -3526,9 +3508,9 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
} else {
- struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- lspcon_resume(dp_to_dig_port(intel_dp));
+ intel_lspcon_resume(dig_port);
/* Write the source OUI as early as possible */
intel_dp_init_source_oui(intel_dp);
@@ -3544,8 +3526,8 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
msleep(1);
}
- if (ret == 1 && lspcon->active)
- lspcon_wait_pcon_mode(lspcon);
+ if (ret == 1 && intel_lspcon_active(dig_port))
+ intel_lspcon_wait_pcon_mode(dig_port);
}
if (ret != 1)
@@ -4414,7 +4396,7 @@ intel_dp_mst_configure(struct intel_dp *intel_dp)
if (intel_dp->is_mst)
intel_dp_mst_prepare_probe(intel_dp);
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst.mgr, intel_dp->is_mst);
/* Avoid stale info on the next detect cycle. */
intel_dp->mst_detect = DRM_DP_SST;
@@ -4430,9 +4412,9 @@ intel_dp_mst_disconnect(struct intel_dp *intel_dp)
drm_dbg_kms(display->drm,
"MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst, intel_dp->mst.mgr.mst_state);
intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst.mgr, intel_dp->is_mst);
}
static bool
@@ -4938,7 +4920,7 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
{
bool handled = false;
- drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled);
+ drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst.mgr, esi, ack, &handled);
if (esi[1] & DP_CP_IRQ) {
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
@@ -4987,7 +4969,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
bool link_ok = true;
bool reprobe_needed = false;
- drm_WARN_ON_ONCE(display->drm, intel_dp->active_mst_links < 0);
+ drm_WARN_ON_ONCE(display->drm, intel_dp->mst.active_links < 0);
for (;;) {
u8 esi[4] = {};
@@ -5003,7 +4985,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi);
- if (intel_dp->active_mst_links > 0 && link_ok &&
+ if (intel_dp->mst.active_links > 0 && link_ok &&
esi[3] & LINK_STATUS_CHANGED) {
if (!intel_dp_mst_link_status(intel_dp))
link_ok = false;
@@ -5026,7 +5008,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dbg_kms(display->drm, "Failed to ack ESI\n");
if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
- drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
+ drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst.mgr);
}
if (!link_ok || intel_dp->link.force_retrain)
@@ -5125,7 +5107,7 @@ bool intel_dp_has_connector(struct intel_dp *intel_dp,
/* MST */
for_each_pipe(display, pipe) {
- encoder = &intel_dp->mst_encoders[pipe]->base;
+ encoder = &intel_dp->mst.stream_encoders[pipe]->base;
if (conn_state->best_encoder == &encoder->base)
return true;
}
@@ -5211,7 +5193,6 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 pipe_mask;
int ret;
@@ -5242,7 +5223,7 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp->link.force_retrain));
- ret = intel_modeset_commit_pipes(dev_priv, pipe_mask, ctx);
+ ret = intel_modeset_commit_pipes(display, pipe_mask, ctx);
if (ret == -EDEADLK)
return ret;
@@ -5395,7 +5376,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (drm_WARN_ON(display->drm, intel_dp_is_edp(intel_dp)))
return connector_status_connected;
- lspcon_resume(dig_port);
+ intel_lspcon_resume(dig_port);
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
@@ -5477,13 +5458,13 @@ void intel_digital_port_unlock(struct intel_encoder *encoder)
*/
bool intel_digital_port_connected_locked(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port);
bool is_connected = false;
intel_wakeref_t wakeref;
- with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
+ with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4);
do {
@@ -5595,7 +5576,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
intel_dp->downstream_ports);
/* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
intel_dp->dfp.ycbcr_444_to_420 =
- dp_to_dig_port(intel_dp)->lspcon.active ||
+ intel_lspcon_active(dp_to_dig_port(intel_dp)) ||
drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
intel_dp->downstream_ports);
intel_dp->dfp.rgb_to_ycbcr =
@@ -5869,7 +5850,6 @@ intel_dp_connector_register(struct drm_connector *connector)
struct intel_display *display = to_intel_display(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_lspcon *lspcon = &dig_port->lspcon;
int ret;
ret = intel_connector_register(connector);
@@ -5891,9 +5871,8 @@ intel_dp_connector_register(struct drm_connector *connector)
* ToDo: Clean this up to handle lspcon init and resume more
* efficiently and streamlined.
*/
- if (lspcon_init(dig_port)) {
- lspcon_detect_hdr_capability(lspcon);
- if (lspcon->hdr_supported)
+ if (intel_lspcon_init(dig_port)) {
+ if (intel_lspcon_detect_hdr_capability(dig_port))
drm_connector_attach_hdr_output_metadata_property(connector);
}
@@ -6086,7 +6065,7 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
return ret;
if (intel_dp_mst_source_support(intel_dp)) {
- ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr);
+ ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst.mgr);
if (ret)
return ret;
}
@@ -6495,35 +6474,6 @@ out_vdd_off:
return false;
}
-static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
-{
- struct intel_connector *connector = container_of(work, typeof(*connector),
- modeset_retry_work);
- struct intel_display *display = to_intel_display(connector);
-
- drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n", connector->base.base.id,
- connector->base.name);
-
- /* Grab the locks before changing connector property*/
- mutex_lock(&display->drm->mode_config.mutex);
- /* Set connector link status to BAD and send a Uevent to notify
- * userspace to do a modeset.
- */
- drm_connector_set_link_status_property(&connector->base,
- DRM_MODE_LINK_STATUS_BAD);
- mutex_unlock(&display->drm->mode_config.mutex);
- /* Send Hotplug uevent so userspace can reprobe */
- drm_kms_helper_connector_hotplug_event(&connector->base);
-
- drm_connector_put(&connector->base);
-}
-
-void intel_dp_init_modeset_retry_work(struct intel_connector *connector)
-{
- INIT_WORK(&connector->modeset_retry_work,
- intel_dp_modeset_retry_work_fn);
-}
-
bool
intel_dp_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *connector)
@@ -6532,13 +6482,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_encoder *encoder = &dig_port->base;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = encoder->port;
int type;
- /* Initialize the work for modeset in case of link train failure */
- intel_dp_init_modeset_retry_work(connector);
-
if (drm_WARN(dev, dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
dig_port->max_lanes, encoder->base.base.id,
@@ -6632,7 +6578,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
return true;
fail:
- intel_display_power_flush_work(dev_priv);
+ intel_display_power_flush_work(display);
drm_connector_cleanup(&connector->base);
return false;
@@ -6657,7 +6603,7 @@ void intel_dp_mst_suspend(struct intel_display *display)
continue;
if (intel_dp->is_mst)
- drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
+ drm_dp_mst_topology_mgr_suspend(&intel_dp->mst.mgr);
}
}
@@ -6680,12 +6626,10 @@ void intel_dp_mst_resume(struct intel_display *display)
if (!intel_dp_mst_source_support(intel_dp))
continue;
- ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
- true);
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst.mgr, true);
if (ret) {
intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- false);
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst.mgr, false);
}
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index ca49f0a05da5..9189db4c2594 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -75,9 +75,8 @@ int intel_dp_compute_config(struct intel_encoder *encoder,
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
- struct link_config_limits *limits,
- int timeslots,
- bool recompute_pipe_bpp);
+ const struct link_config_limits *limits,
+ int timeslots);
void intel_dp_audio_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 40c697476b72..ec27bbd70bcf 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -243,7 +243,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain;
@@ -272,7 +271,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
aux_domain = intel_aux_power_domain(dig_port);
- aux_wakeref = intel_display_power_get(i915, aux_domain);
+ aux_wakeref = intel_display_power_get(display, aux_domain);
pps_wakeref = intel_pps_lock(intel_dp);
/*
@@ -432,7 +431,7 @@ out:
intel_pps_vdd_off_unlocked(intel_dp, false);
intel_pps_unlock(intel_dp, pps_wakeref);
- intel_display_power_put_async(i915, aux_domain, aux_wakeref);
+ intel_display_power_put_async(display, aux_domain, aux_wakeref);
out_unlock:
intel_digital_port_unlock(encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index c846ef4acf5b..8173de8aec63 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -34,6 +34,8 @@
* for some reason.
*/
+#include <drm/drm_print.h>
+
#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_display_core.h"
@@ -451,9 +453,54 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
/* VESA backlight callbacks */
static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, enum pipe unused)
{
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct intel_panel *panel = &connector->panel;
+ u8 buf[3];
+ u32 val = 0;
+ int ret;
+
+ if (panel->backlight.edp.vesa.luminance_control_support) {
+ ret = drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, buf,
+ sizeof(buf));
+ if (ret < 0) {
+ drm_err(intel_dp->aux.drm_dev,
+ "[CONNECTOR:%d:%s] Failed to read Luminance from DPCD\n",
+ connector->base.base.id, connector->base.name);
+ return 0;
+ }
+
+ val |= buf[0] | buf[1] << 8 | buf[2] << 16;
+ return val / 1000;
+ }
+
return connector->panel.backlight.level;
}
+static int
+intel_dp_aux_vesa_set_luminance(struct intel_connector *connector, u32 level)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ u8 buf[3];
+ int ret;
+
+ level = level * 1000;
+ level &= 0xffffff;
+ buf[0] = (level & 0x0000ff);
+ buf[1] = (level & 0x00ff00) >> 8;
+ buf[2] = (level & 0xff0000) >> 16;
+
+ ret = drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
+ buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ drm_err(intel_dp->aux.drm_dev,
+ "%s: Failed to set VESA Aux Luminance: %d\n",
+ intel_dp->aux.name, ret);
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+}
+
static void
intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
@@ -461,6 +508,11 @@ intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u3
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ if (panel->backlight.edp.vesa.luminance_control_support) {
+ if (!intel_dp_aux_vesa_set_luminance(connector, level))
+ return;
+ }
+
if (!panel->backlight.edp.vesa.info.aux_set) {
const u32 pwm_level = intel_backlight_level_to_pwm(connector, level);
@@ -477,6 +529,18 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ int ret;
+
+ if (panel->backlight.edp.vesa.luminance_control_support) {
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE);
+
+ if (ret == 1)
+ return;
+
+ if (!intel_dp_aux_vesa_set_luminance(connector, level))
+ return;
+ }
if (!panel->backlight.edp.vesa.info.aux_enable) {
u32 pwm_level;
@@ -500,6 +564,9 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ if (panel->backlight.edp.vesa.luminance_control_support)
+ return;
+
drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
if (!panel->backlight.edp.vesa.info.aux_enable)
@@ -510,56 +577,75 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
struct intel_display *display = to_intel_display(connector);
+ struct drm_luminance_range_info *luminance_range =
+ &connector->base.display_info.luminance_range;
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_panel *panel = &connector->panel;
u16 current_level;
u8 current_mode;
int ret;
- ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info,
- panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
- &current_level, &current_mode);
- if (ret < 0)
- return ret;
-
- drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
- connector->base.base.id, connector->base.name,
- dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
- drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
- connector->base.base.id, connector->base.name,
- dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
-
- if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) {
- ret = panel->backlight.pwm_funcs->setup(connector, pipe);
- if (ret < 0) {
- drm_err(display->drm,
- "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
- connector->base.base.id, connector->base.name, ret);
- return ret;
- }
- }
-
- if (panel->backlight.edp.vesa.info.aux_set) {
- panel->backlight.max = panel->backlight.edp.vesa.info.max;
- panel->backlight.min = 0;
- if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
- panel->backlight.level = current_level;
- panel->backlight.enabled = panel->backlight.level != 0;
+ if (panel->backlight.edp.vesa.luminance_control_support) {
+ if (luminance_range->max_luminance) {
+ panel->backlight.max = luminance_range->max_luminance;
+ panel->backlight.min = luminance_range->min_luminance;
} else {
- panel->backlight.level = panel->backlight.max;
- panel->backlight.enabled = false;
+ panel->backlight.max = 512;
+ panel->backlight.min = 0;
}
+ panel->backlight.level = intel_dp_aux_vesa_get_backlight(connector, 0);
+ panel->backlight.enabled = panel->backlight.level != 0;
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX VESA Nits backlight level is controlled through DPCD\n",
+ connector->base.base.id, connector->base.name);
} else {
- panel->backlight.max = panel->backlight.pwm_level_max;
- panel->backlight.min = panel->backlight.pwm_level_min;
- if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) {
- panel->backlight.level = panel->backlight.pwm_funcs->get(connector, pipe);
- panel->backlight.enabled = panel->backlight.pwm_enabled;
+ ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info,
+ panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
+ &current_level, &current_mode);
+ if (ret < 0)
+ return ret;
+
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
+
+ if (!panel->backlight.edp.vesa.info.aux_set ||
+ !panel->backlight.edp.vesa.info.aux_enable) {
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ if (ret < 0) {
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
+ connector->base.base.id, connector->base.name, ret);
+ return ret;
+ }
+ }
+
+ if (panel->backlight.edp.vesa.info.aux_set) {
+ panel->backlight.max = panel->backlight.edp.vesa.info.max;
+ panel->backlight.min = 0;
+ if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
+ panel->backlight.level = current_level;
+ panel->backlight.enabled = panel->backlight.level != 0;
+ } else {
+ panel->backlight.level = panel->backlight.max;
+ panel->backlight.enabled = false;
+ }
} else {
- panel->backlight.level = panel->backlight.max;
- panel->backlight.enabled = false;
+ panel->backlight.max = panel->backlight.pwm_level_max;
+ panel->backlight.min = panel->backlight.pwm_level_min;
+ if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) {
+ panel->backlight.level =
+ panel->backlight.pwm_funcs->get(connector, pipe);
+ panel->backlight.enabled = panel->backlight.pwm_enabled;
+ } else {
+ panel->backlight.level = panel->backlight.max;
+ panel->backlight.enabled = false;
+ }
}
}
@@ -575,6 +661,15 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_panel *panel = &connector->panel;
+
+ if ((intel_dp->edp_dpcd[3] & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE)) {
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX Luminance Based Backlight Control Supported!\n",
+ connector->base.base.id, connector->base.name);
+ panel->backlight.edp.vesa.luminance_control_support = true;
+ return true;
+ }
if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
drm_dbg_kms(display->drm,
@@ -604,6 +699,7 @@ static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_device *dev = connector->base.dev;
struct intel_panel *panel = &connector->panel;
bool try_intel_interface = false, try_vesa_interface = false;
@@ -640,6 +736,10 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
break;
}
+ /* For eDP 1.5 and above we are supposed to use VESA interface for brightness control */
+ if (intel_dp->edp_dpcd[0] >= DP_EDP_15)
+ try_vesa_interface = true;
+
/*
* Since Intel has their own backlight control interface, the majority of machines out there
* using DPCD backlight controls with Intel GPUs will be using this interface as opposed to
@@ -653,7 +753,8 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
* backlight interface at all. This means that the only sensible way for us to detect both
* interfaces is to probe for Intel's first, and VESA's second.
*/
- if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) {
+ if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector) &&
+ intel_dp->edp_dpcd[0] <= DP_EDP_14b) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using Intel proprietary eDP backlight controls\n",
connector->base.base.id, connector->base.name);
panel->backlight.funcs = &intel_dp_hdr_bl_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 00c493cc8a4b..cc312596fb77 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -705,10 +705,10 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
*hdcp_capable = false;
*hdcp2_capable = false;
- if (!connector->mst_port)
+ if (!connector->mst.dp)
return -EINVAL;
- aux = &connector->port->aux;
+ aux = &connector->mst.port->aux;
ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
if (ret)
drm_dbg_kms(display->drm,
@@ -799,7 +799,7 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
enum transcoder cpu_transcoder = hdcp->stream_transcoder;
enum pipe pipe = (enum pipe)cpu_transcoder;
@@ -883,7 +883,7 @@ int intel_dp_hdcp_init(struct intel_digital_port *dig_port,
if (!is_hdcp_supported(display, port))
return 0;
- if (intel_connector->mst_port)
+ if (intel_connector->mst.dp)
return intel_hdcp_init(intel_connector, dig_port,
&intel_dp_mst_hdcp_shim);
else if (!intel_dp_is_edp(intel_dp))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 6696a32cdd3e..2966f5b39392 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -24,6 +24,7 @@
#include <linux/debugfs.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_print.h>
#include "i915_utils.h"
#include "intel_display_core.h"
@@ -119,16 +120,13 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
- if (drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) != 1)
- return false;
-
intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = val;
return true;
}
-static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
+bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
{
return intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] ==
@@ -146,6 +144,7 @@ static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
int lttpr_count;
+ int ret;
if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd))
return 0;
@@ -172,22 +171,8 @@ static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_
return lttpr_count;
}
- /*
- * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
- * non-transparent mode and the disable->enable non-transparent mode
- * sequence.
- */
- intel_dp_set_lttpr_transparent_mode(intel_dp, true);
-
- /*
- * In case of unsupported number of LTTPRs or failing to switch to
- * non-transparent mode fall-back to transparent link training mode,
- * still taking into account any LTTPR common lane- rate/count limits.
- */
- if (lttpr_count < 0)
- goto out_reset_lttpr_count;
-
- if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
+ ret = drm_dp_lttpr_init(&intel_dp->aux, lttpr_count);
+ if (ret) {
lt_dbg(intel_dp, DP_PHY_DPRX,
"Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
@@ -196,6 +181,8 @@ static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_
goto out_reset_lttpr_count;
}
+ intel_dp_set_lttpr_transparent_mode(intel_dp, false);
+
return lttpr_count;
out_reset_lttpr_count:
@@ -783,7 +770,7 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
/*
* WaEdpLinkRateDataReload
*
- * Parade PS8461E MUX (used on varius TGL+ laptops) needs
+ * Parade PS8461E MUX (used on various TGL+ laptops) needs
* to snoop the link rates reported by the sink when we
* use LINK_RATE_SET in order to operate in jitter cleaning
* mode (as opposed to redriver mode). Unfortunately it
@@ -1642,7 +1629,7 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
/*
* Ignore the link failure in CI
*
- * In fixed enviroments like CI, sometimes unexpected long HPDs are
+ * In fixed environments like CI, sometimes unexpected long HPDs are
* generated by the displays. If ignore_long_hpd flag is set, such long
* HPDs are ignored. And probably as a consequence of these ignored
* long HPDs, subsequent link trainings are failed resulting into CI
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 2066b9146762..46614124569f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -15,6 +15,7 @@ struct intel_dp;
int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]);
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp);
+bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp);
void intel_dp_link_training_set_mode(struct intel_dp *intel_dp,
int link_rate, bool is_vrr);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 86d6185fda50..02f95108c637 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -49,6 +49,7 @@
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_link_bw.h"
+#include "intel_pfit.h"
#include "intel_psr.h"
#include "intel_vdsc.h"
#include "skl_scaler.h"
@@ -111,7 +112,7 @@ static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
&crtc_state->hw.adjusted_mode;
if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(display) >= 20 || !dsc)
- return INT_MAX;
+ return 0;
/*
* DSC->DPT interface width:
@@ -209,22 +210,56 @@ static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connec
num_joined_pipes);
}
+static void intel_dp_mst_compute_min_hblank(struct intel_crtc_state *crtc_state,
+ int bpp_x16)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int symbol_size = intel_dp_is_uhbr(crtc_state) ? 32 : 8;
+ int hblank;
+
+ if (DISPLAY_VER(display) < 20)
+ return;
+
+ /* Calculate min Hblank Link Layer Symbol Cycle Count for 8b/10b MST & 128b/132b */
+ hblank = DIV_ROUND_UP((DIV_ROUND_UP
+ (adjusted_mode->htotal - adjusted_mode->hdisplay, 4) * bpp_x16),
+ symbol_size);
+
+ crtc_state->min_hblank = hblank;
+}
+
int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
- int max_bpp, int min_bpp,
struct drm_connector_state *conn_state,
- int step, bool dsc)
+ int min_bpp_x16, int max_bpp_x16, int bpp_step_x16, bool dsc)
{
struct intel_display *display = to_intel_display(intel_dp);
struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct drm_dp_mst_topology_state *mst_state = NULL;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- fixed20_12 pbn_div;
- int bpp, slots = -EINVAL;
+ bool is_mst = intel_dp->is_mst;
+ int bpp_x16, slots = -EINVAL;
int dsc_slice_count = 0;
- int max_dpt_bpp;
+ int max_dpt_bpp_x16;
+
+ /* shouldn't happen, sanity check */
+ drm_WARN_ON(display->drm, !dsc && (fxp_q4_to_frac(min_bpp_x16) ||
+ fxp_q4_to_frac(max_bpp_x16) ||
+ fxp_q4_to_frac(bpp_step_x16)));
+
+ if (is_mst) {
+ mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst.mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count);
+ }
if (dsc) {
if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
@@ -233,18 +268,15 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
}
- pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
- crtc_state->lane_count);
-
- max_dpt_bpp = intel_dp_mst_max_dpt_bpp(crtc_state, dsc);
- if (max_bpp > max_dpt_bpp) {
- drm_dbg_kms(display->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n",
- max_bpp, max_dpt_bpp);
- max_bpp = max_dpt_bpp;
+ max_dpt_bpp_x16 = fxp_q4_from_int(intel_dp_mst_max_dpt_bpp(crtc_state, dsc));
+ if (max_dpt_bpp_x16 && max_bpp_x16 > max_dpt_bpp_x16) {
+ drm_dbg_kms(display->drm, "Limiting bpp to max DPT bpp (" FXP_Q4_FMT " -> " FXP_Q4_FMT ")\n",
+ FXP_Q4_ARGS(max_bpp_x16), FXP_Q4_ARGS(max_dpt_bpp_x16));
+ max_bpp_x16 = max_dpt_bpp_x16;
}
- drm_dbg_kms(display->drm, "Looking for slots in range min bpp %d max bpp %d\n",
- min_bpp, max_bpp);
+ drm_dbg_kms(display->drm, "Looking for slots in range min bpp " FXP_Q4_FMT " max bpp " FXP_Q4_FMT "\n",
+ FXP_Q4_ARGS(min_bpp_x16), FXP_Q4_ARGS(max_bpp_x16));
if (dsc) {
dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state);
@@ -255,23 +287,27 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
}
}
- for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
+ for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) {
int local_bw_overhead;
int link_bpp_x16;
- drm_dbg_kms(display->drm, "Trying bpp %d\n", bpp);
+ drm_dbg_kms(display->drm, "Trying bpp " FXP_Q4_FMT "\n", FXP_Q4_ARGS(bpp_x16));
- link_bpp_x16 = fxp_q4_from_int(dsc ? bpp :
- intel_dp_output_bpp(crtc_state->output_format, bpp));
+ link_bpp_x16 = dsc ? bpp_x16 :
+ fxp_q4_from_int(intel_dp_output_bpp(crtc_state->output_format,
+ fxp_q4_to_int(bpp_x16)));
local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
false, dsc_slice_count, link_bpp_x16);
+
+ intel_dp_mst_compute_min_hblank(crtc_state, link_bpp_x16);
+
intel_dp_mst_compute_m_n(crtc_state,
local_bw_overhead,
link_bpp_x16,
&crtc_state->dp_m_n);
- if (intel_dp->is_mst) {
+ if (is_mst) {
int remote_bw_overhead;
int remote_tu;
fixed20_12 pbn;
@@ -296,7 +332,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
link_bpp_x16,
remote_bw_overhead));
- remote_tu = DIV_ROUND_UP(pbn.full, pbn_div.full);
+ remote_tu = DIV_ROUND_UP(pbn.full, mst_state->pbn_div.full);
/*
* Aligning the TUs ensures that symbols consisting of multiple
@@ -314,13 +350,13 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
* allocated for the whole path and the TUs allocated for the
* first branch device's link also applies here.
*/
- pbn.full = remote_tu * pbn_div.full;
+ pbn.full = remote_tu * mst_state->pbn_div.full;
drm_WARN_ON(display->drm, remote_tu < crtc_state->dp_m_n.tu);
crtc_state->dp_m_n.tu = remote_tu;
- slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
- connector->port,
+ slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst.mgr,
+ connector->mst.port,
dfixed_trunc(pbn));
} else {
/* Same as above for remote_tu */
@@ -343,7 +379,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
}
/* Allow using zero step to indicate one try */
- if (!step)
+ if (!bpp_step_x16)
break;
}
@@ -354,65 +390,42 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
}
if (!dsc)
- crtc_state->pipe_bpp = bpp;
+ crtc_state->pipe_bpp = fxp_q4_to_int(bpp_x16);
else
- crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp);
+ crtc_state->dsc.compressed_bpp_x16 = bpp_x16;
- drm_dbg_kms(display->drm, "Got %d slots for pipe bpp %d dsc %d\n",
- slots, bpp, dsc);
+ drm_dbg_kms(display->drm, "Got %d slots for pipe bpp " FXP_Q4_FMT " dsc %d\n",
+ slots, FXP_Q4_ARGS(bpp_x16), dsc);
return 0;
}
-static int mst_stream_find_vcpi_slots_for_bpp(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state,
- int max_bpp, int min_bpp,
- struct link_config_limits *limits,
- struct drm_connector_state *conn_state,
- int step, bool dsc)
-{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_dp_mst_topology_state *mst_state;
-
- mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
- if (IS_ERR(mst_state))
- return PTR_ERR(mst_state);
-
- crtc_state->lane_count = limits->max_lane_count;
- crtc_state->port_clock = limits->max_rate;
-
- mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
- crtc_state->lane_count);
-
- return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state,
- max_bpp, min_bpp,
- conn_state, step, dsc);
-}
-
static int mst_stream_compute_link_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+ const struct link_config_limits *limits)
{
+ crtc_state->lane_count = limits->max_lane_count;
+ crtc_state->port_clock = limits->max_rate;
+
/*
* FIXME: allocate the BW according to link_bpp, which in the case of
* YUV420 is only half of the pipe bpp value.
*/
- return mst_stream_find_vcpi_slots_for_bpp(intel_dp, crtc_state,
- fxp_q4_to_int(limits->link.max_bpp_x16),
- fxp_q4_to_int(limits->link.min_bpp_x16),
- limits,
- conn_state, 2 * 3, false);
+ return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state,
+ limits->link.min_bpp_x16,
+ limits->link.max_bpp_x16,
+ fxp_q4_from_int(2 * 3), false);
}
static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+ const struct link_config_limits *limits)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- int i, num_bpc;
+ int num_bpc;
u8 dsc_bpc[3] = {};
int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
int min_compressed_bpp, max_compressed_bpp;
@@ -426,15 +439,8 @@ static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "DSC Source supported min bpp %d max bpp %d\n",
min_bpp, max_bpp);
- sink_max_bpp = dsc_bpc[0] * 3;
- sink_min_bpp = sink_max_bpp;
-
- for (i = 1; i < num_bpc; i++) {
- if (sink_min_bpp > dsc_bpc[i] * 3)
- sink_min_bpp = dsc_bpc[i] * 3;
- if (sink_max_bpp < dsc_bpc[i] * 3)
- sink_max_bpp = dsc_bpc[i] * 3;
- }
+ sink_min_bpp = min_array(dsc_bpc, num_bpc) * 3;
+ sink_max_bpp = max_array(dsc_bpc, num_bpc) * 3;
drm_dbg_kms(display->drm, "DSC Sink supported min bpp %d max bpp %d\n",
sink_min_bpp, sink_max_bpp);
@@ -459,9 +465,13 @@ static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, min_compressed_bpp,
crtc_state->pipe_bpp);
- return mst_stream_find_vcpi_slots_for_bpp(intel_dp, crtc_state, max_compressed_bpp,
- min_compressed_bpp, limits,
- conn_state, 1, true);
+ crtc_state->lane_count = limits->max_lane_count;
+ crtc_state->port_clock = limits->max_rate;
+
+ return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state,
+ fxp_q4_from_int(min_compressed_bpp),
+ fxp_q4_from_int(max_compressed_bpp),
+ fxp_q4_from_int(1), true);
}
static int mst_stream_update_slots(struct intel_dp *intel_dp,
@@ -469,7 +479,7 @@ static int mst_stream_update_slots(struct intel_dp *intel_dp,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
+ struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst.mgr;
struct drm_dp_mst_topology_state *topology_state;
u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
@@ -499,8 +509,8 @@ hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- bool is_uhbr_sink = connector->mst_port &&
- drm_dp_128b132b_supported(connector->mst_port->dpcd);
+ bool is_uhbr_sink = connector->mst.dp &&
+ drm_dp_128b132b_supported(connector->mst.dp->dpcd);
int hblank_limit = is_uhbr_sink ? 500 : 300;
if (!connector->dp.dsc_hblank_expansion_quirk)
@@ -683,7 +693,7 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
conn_state, &limits,
- pipe_config->dp_m_n.tu, false);
+ pipe_config->dp_m_n.tu);
}
if (ret)
@@ -731,7 +741,7 @@ intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
- if (connector->mst_port != mst_port || !conn_state->base.crtc)
+ if (connector->mst.dp != mst_port || !conn_state->base.crtc)
continue;
crtc = to_intel_crtc(conn_state->base.crtc);
@@ -759,12 +769,12 @@ static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state,
if (!conn_state->base.crtc)
continue;
- if (&connector->mst_port->mst_mgr != mst_mgr)
+ if (&connector->mst.dp->mst.mgr != mst_mgr)
continue;
- if (connector->port != parent_port &&
+ if (connector->mst.port != parent_port &&
!drm_dp_mst_port_downstream_of_parent(mst_mgr,
- connector->port,
+ connector->mst.port,
parent_port))
continue;
@@ -841,7 +851,7 @@ static int intel_dp_mst_check_bw(struct intel_atomic_state *state,
* @state must be recomputed with the updated @limits.
*
* Returns:
- * - 0 if the confugration is valid
+ * - 0 if the configuration is valid
* - %-EAGAIN, if the configuration is invalid and @limits got updated
* with fallback values with which the configuration of all CRTCs in
* @state must be recomputed
@@ -915,7 +925,7 @@ mst_connector_atomic_topology_check(struct intel_connector *connector,
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
- if (connector_iter->mst_port != connector->mst_port ||
+ if (connector_iter->mst.dp != connector->mst.dp ||
connector_iter == connector)
continue;
@@ -947,33 +957,32 @@ mst_connector_atomic_topology_check(struct intel_connector *connector,
}
static int
-mst_connector_atomic_check(struct drm_connector *connector,
+mst_connector_atomic_check(struct drm_connector *_connector,
struct drm_atomic_state *_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct intel_connector *intel_connector =
- to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
int ret;
- ret = intel_digital_connector_atomic_check(connector, &state->base);
+ ret = intel_digital_connector_atomic_check(&connector->base, &state->base);
if (ret)
return ret;
- ret = mst_connector_atomic_topology_check(intel_connector, state);
+ ret = mst_connector_atomic_topology_check(connector, state);
if (ret)
return ret;
- if (intel_connector_needs_modeset(state, connector)) {
+ if (intel_connector_needs_modeset(state, &connector->base)) {
ret = intel_dp_tunnel_atomic_check_state(state,
- intel_connector->mst_port,
- intel_connector);
+ connector->mst.dp,
+ connector);
if (ret)
return ret;
}
return drm_dp_atomic_release_time_slots(&state->base,
- &intel_connector->mst_port->mst_mgr,
- intel_connector->port);
+ &connector->mst.dp->mst.mgr,
+ connector->mst.port);
}
static void mst_stream_disable(struct intel_atomic_state *state,
@@ -986,16 +995,20 @@ static void mst_stream_disable(struct intel_atomic_state *state,
struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ enum transcoder trans = old_crtc_state->cpu_transcoder;
drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->active_mst_links);
+ intel_dp->mst.active_links);
- if (intel_dp->active_mst_links == 1)
+ if (intel_dp->mst.active_links == 1)
intel_dp->link_trained = false;
intel_hdcp_disable(intel_mst->connector);
intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
+
+ if (DISPLAY_VER(display) >= 20)
+ intel_de_write(display, DP_MIN_HBLANK_CTL(trans), 0);
}
static void mst_stream_post_disable(struct intel_atomic_state *state,
@@ -1010,19 +1023,19 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_dp_mst_topology_state *old_mst_state =
- drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst.mgr);
struct drm_dp_mst_topology_state *new_mst_state =
- drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
const struct drm_dp_mst_atomic_payload *old_payload =
- drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
+ drm_atomic_get_mst_payload_state(old_mst_state, connector->mst.port);
struct drm_dp_mst_atomic_payload *new_payload =
- drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
+ drm_atomic_get_mst_payload_state(new_mst_state, connector->mst.port);
struct intel_crtc *pipe_crtc;
bool last_mst_stream;
int i;
- intel_dp->active_mst_links--;
- last_mst_stream = intel_dp->active_mst_links == 0;
+ intel_dp->mst.active_links--;
+ last_mst_stream = intel_dp->mst.active_links == 0;
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
@@ -1035,7 +1048,7 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
intel_disable_transcoder(old_crtc_state);
- drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
+ drm_dp_remove_payload_part1(&intel_dp->mst.mgr, new_mst_state, new_payload);
intel_ddi_clear_act_sent(encoder, old_crtc_state);
@@ -1044,9 +1057,9 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
intel_ddi_wait_for_act_sent(encoder, old_crtc_state);
- drm_dp_check_act_status(&intel_dp->mst_mgr);
+ drm_dp_check_act_status(&intel_dp->mst.mgr);
- drm_dp_remove_payload_part2(&intel_dp->mst_mgr, new_mst_state,
+ drm_dp_remove_payload_part2(&intel_dp->mst.mgr, new_mst_state,
old_payload, new_payload);
intel_ddi_disable_transcoder_func(old_crtc_state);
@@ -1067,7 +1080,7 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
* Power down mst path before disabling the port, otherwise we end
* up getting interrupts from the sink upon detecting link loss.
*/
- drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
+ drm_dp_send_power_updown_phy(&intel_dp->mst.mgr, connector->mst.port,
false);
/*
@@ -1092,7 +1105,7 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
old_crtc_state, NULL);
drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->active_mst_links);
+ intel_dp->mst.active_links);
}
static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
@@ -1103,7 +1116,7 @@ static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->active_mst_links == 0 &&
+ if (intel_dp->mst.active_links == 0 &&
primary_encoder->post_pll_disable)
primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state);
}
@@ -1116,7 +1129,7 @@ static void mst_stream_pre_pll_enable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->active_mst_links == 0)
+ if (intel_dp->mst.active_links == 0)
primary_encoder->pre_pll_enable(state, primary_encoder,
pipe_config, NULL);
else
@@ -1149,7 +1162,7 @@ static void intel_mst_reprobe_topology(struct intel_dp *intel_dp,
crtc_state->port_clock, crtc_state->lane_count))
return;
- drm_dp_mst_topology_queue_probe(&intel_dp->mst_mgr);
+ drm_dp_mst_topology_queue_probe(&intel_dp->mst.mgr);
intel_mst_set_probed_link_params(intel_dp,
crtc_state->port_clock, crtc_state->lane_count);
@@ -1167,7 +1180,7 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_dp_mst_topology_state *mst_state =
- drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
int ret;
bool first_mst_stream;
@@ -1176,17 +1189,17 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
*/
connector->encoder = encoder;
intel_mst->connector = connector;
- first_mst_stream = intel_dp->active_mst_links == 0;
+ first_mst_stream = intel_dp->mst.active_links == 0;
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->active_mst_links);
+ intel_dp->mst.active_links);
if (first_mst_stream)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
- drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
+ drm_dp_send_power_updown_phy(&intel_dp->mst.mgr, connector->mst.port, true);
intel_dp_sink_enable_decompression(state, connector, pipe_config);
@@ -1197,10 +1210,10 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
intel_mst_reprobe_topology(intel_dp, pipe_config);
}
- intel_dp->active_mst_links++;
+ intel_dp->mst.active_links++;
- ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
- drm_atomic_get_mst_payload_state(mst_state, connector->port));
+ ret = drm_dp_add_payload_part1(&intel_dp->mst.mgr, mst_state,
+ drm_atomic_get_mst_payload_state(mst_state, connector->mst.port));
if (ret < 0)
intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config);
@@ -1224,11 +1237,10 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
u32 clear = 0;
u32 set = 0;
- if (!IS_ALDERLAKE_P(i915))
+ if (!display->platform.alderlake_p)
return;
if (!IS_DISPLAY_STEP(display, STEP_D0, STEP_FOREVER))
@@ -1265,11 +1277,11 @@ static void mst_stream_enable(struct intel_atomic_state *state,
struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_dp_mst_topology_state *mst_state =
- drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
- bool first_mst_stream = intel_dp->active_mst_links == 1;
+ bool first_mst_stream = intel_dp->mst.active_links == 1;
struct intel_crtc *pipe_crtc;
- int ret, i;
+ int ret, i, min_hblank;
drm_WARN_ON(display->drm, pipe_config->has_pch_encoder);
@@ -1284,6 +1296,29 @@ static void mst_stream_enable(struct intel_atomic_state *state,
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
}
+ if (DISPLAY_VER(display) >= 20) {
+ /*
+ * adjust the BlankingStart/BlankingEnd framing control from
+ * the calculated value
+ */
+ min_hblank = pipe_config->min_hblank - 2;
+
+ /* Maximum value to be programmed is limited to 0x10 */
+ min_hblank = min(0x10, min_hblank);
+
+ /*
+ * Minimum hblank accepted for 128b/132b would be 5 and for
+ * 8b/10b would be 3 symbol count
+ */
+ if (intel_dp_is_uhbr(pipe_config))
+ min_hblank = max(min_hblank, 5);
+ else
+ min_hblank = max(min_hblank, 3);
+
+ intel_de_write(display, DP_MIN_HBLANK_CTL(trans),
+ min_hblank);
+ }
+
enable_bs_jitter_was(pipe_config);
intel_ddi_enable_transcoder_func(encoder, pipe_config);
@@ -1294,17 +1329,17 @@ static void mst_stream_enable(struct intel_atomic_state *state,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->active_mst_links);
+ intel_dp->mst.active_links);
intel_ddi_wait_for_act_sent(encoder, pipe_config);
- drm_dp_check_act_status(&intel_dp->mst_mgr);
+ drm_dp_check_act_status(&intel_dp->mst.mgr);
if (first_mst_stream)
intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
- ret = drm_dp_add_payload_part2(&intel_dp->mst_mgr,
+ ret = drm_dp_add_payload_part2(&intel_dp->mst.mgr,
drm_atomic_get_mst_payload_state(mst_state,
- connector->port));
+ connector->mst.port));
if (ret < 0)
intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config);
@@ -1353,23 +1388,23 @@ static bool mst_stream_initial_fastset_check(struct intel_encoder *encoder,
return intel_dp_initial_fastset_check(primary_encoder, crtc_state);
}
-static int mst_connector_get_ddc_modes(struct drm_connector *connector)
+static int mst_connector_get_ddc_modes(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = connector->mst.dp;
const struct drm_edid *drm_edid;
int ret;
- if (drm_connector_is_unregistered(connector))
- return intel_connector_update_modes(connector, NULL);
+ if (drm_connector_is_unregistered(&connector->base))
+ return intel_connector_update_modes(&connector->base, NULL);
if (!intel_display_driver_check_access(display))
- return drm_edid_connector_add_modes(connector);
+ return drm_edid_connector_add_modes(&connector->base);
- drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port);
+ drm_edid = drm_dp_mst_edid_read(&connector->base, &intel_dp->mst.mgr, connector->mst.port);
- ret = intel_connector_update_modes(connector, drm_edid);
+ ret = intel_connector_update_modes(&connector->base, drm_edid);
drm_edid_free(drm_edid);
@@ -1377,32 +1412,29 @@ static int mst_connector_get_ddc_modes(struct drm_connector *connector)
}
static int
-mst_connector_late_register(struct drm_connector *connector)
+mst_connector_late_register(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
int ret;
- ret = drm_dp_mst_connector_late_register(connector,
- intel_connector->port);
+ ret = drm_dp_mst_connector_late_register(&connector->base, connector->mst.port);
if (ret < 0)
return ret;
- ret = intel_connector_register(connector);
+ ret = intel_connector_register(&connector->base);
if (ret < 0)
- drm_dp_mst_connector_early_unregister(connector,
- intel_connector->port);
+ drm_dp_mst_connector_early_unregister(&connector->base, connector->mst.port);
return ret;
}
static void
-mst_connector_early_unregister(struct drm_connector *connector)
+mst_connector_early_unregister(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
- intel_connector_unregister(connector);
- drm_dp_mst_connector_early_unregister(connector,
- intel_connector->port);
+ intel_connector_unregister(&connector->base);
+ drm_dp_mst_connector_early_unregister(&connector->base, connector->mst.port);
}
static const struct drm_connector_funcs mst_connector_funcs = {
@@ -1416,23 +1448,24 @@ static const struct drm_connector_funcs mst_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static int mst_connector_get_modes(struct drm_connector *connector)
+static int mst_connector_get_modes(struct drm_connector *_connector)
{
- return mst_connector_get_ddc_modes(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
+
+ return mst_connector_get_ddc_modes(&connector->base);
}
static int
-mst_connector_mode_valid_ctx(struct drm_connector *connector,
- struct drm_display_mode *mode,
+mst_connector_mode_valid_ctx(struct drm_connector *_connector,
+ const struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
- struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
- struct drm_dp_mst_port *port = intel_connector->port;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = connector->mst.dp;
+ struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst.mgr;
+ struct drm_dp_mst_port *port = connector->mst.port;
const int min_bpp = 18;
int max_dotclk = display->cdclk.max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
@@ -1443,12 +1476,12 @@ mst_connector_mode_valid_ctx(struct drm_connector *connector,
int target_clock = mode->clock;
int num_joined_pipes;
- if (drm_connector_is_unregistered(connector)) {
+ if (drm_connector_is_unregistered(&connector->base)) {
*status = MODE_ERROR;
return 0;
}
- *status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ *status = intel_cpu_transcoder_mode_valid(display, mode);
if (*status != MODE_OK)
return 0;
@@ -1481,7 +1514,7 @@ mst_connector_mode_valid_ctx(struct drm_connector *connector,
* corresponding link capabilities of the sink) in case the
* stream is uncompressed for it by the last branch device.
*/
- num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, intel_connector,
+ num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
mode->hdisplay, target_clock);
max_dotclk *= num_joined_pipes;
@@ -1495,14 +1528,14 @@ mst_connector_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
- if (intel_dp_has_dsc(intel_connector)) {
+ if (intel_dp_has_dsc(connector)) {
/*
* TBD pass the connector BPC,
* for now U8_MAX so that max BPC on that platform would be picked
*/
- int pipe_bpp = intel_dp_dsc_compute_max_bpp(intel_connector, U8_MAX);
+ int pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
- if (drm_dp_sink_supports_fec(intel_connector->dp.fec_capability)) {
+ if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
dsc_max_compressed_bpp =
intel_dp_dsc_get_max_compressed_bpp(display,
max_link_clock,
@@ -1513,7 +1546,7 @@ mst_connector_mode_valid_ctx(struct drm_connector *connector,
INTEL_OUTPUT_FORMAT_RGB,
pipe_bpp, 64);
dsc_slice_count =
- intel_dp_dsc_get_slice_count(intel_connector,
+ intel_dp_dsc_get_slice_count(connector,
target_clock,
mode->hdisplay,
num_joined_pipes);
@@ -1532,44 +1565,44 @@ mst_connector_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
- *status = intel_mode_valid_max_plane_size(dev_priv, mode, num_joined_pipes);
+ *status = intel_mode_valid_max_plane_size(display, mode, num_joined_pipes);
return 0;
}
static struct drm_encoder *
-mst_connector_atomic_best_encoder(struct drm_connector *connector,
+mst_connector_atomic_best_encoder(struct drm_connector *_connector,
struct drm_atomic_state *state)
{
- struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
- connector);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_connector_state *connector_state =
+ drm_atomic_get_new_connector_state(state, &connector->base);
+ struct intel_dp *intel_dp = connector->mst.dp;
struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc);
- return &intel_dp->mst_encoders[crtc->pipe]->base.base;
+ return &intel_dp->mst.stream_encoders[crtc->pipe]->base.base;
}
static int
-mst_connector_detect_ctx(struct drm_connector *connector,
+mst_connector_detect_ctx(struct drm_connector *_connector,
struct drm_modeset_acquire_ctx *ctx, bool force)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = connector->mst.dp;
if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (drm_connector_is_unregistered(connector))
+ if (drm_connector_is_unregistered(&connector->base))
return connector_status_disconnected;
if (!intel_display_driver_check_access(display))
- return connector->status;
+ return connector->base.status;
- intel_dp_flush_connector_commits(intel_connector);
+ intel_dp_flush_connector_commits(connector);
- return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
- intel_connector->port);
+ return drm_dp_mst_detect_port(&connector->base, ctx, &intel_dp->mst.mgr,
+ connector->mst.port);
}
static const struct drm_connector_helper_funcs mst_connector_helper_funcs = {
@@ -1605,29 +1638,30 @@ static bool mst_connector_get_hw_state(struct intel_connector *connector)
}
static int mst_topology_add_connector_properties(struct intel_dp *intel_dp,
- struct drm_connector *connector,
+ struct drm_connector *_connector,
const char *pathprop)
{
struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = to_intel_connector(_connector);
- drm_object_attach_property(&connector->base,
+ drm_object_attach_property(&connector->base.base,
display->drm->mode_config.path_property, 0);
- drm_object_attach_property(&connector->base,
+ drm_object_attach_property(&connector->base.base,
display->drm->mode_config.tile_property, 0);
- intel_attach_force_audio_property(connector);
- intel_attach_broadcast_rgb_property(connector);
+ intel_attach_force_audio_property(&connector->base);
+ intel_attach_broadcast_rgb_property(&connector->base);
/*
* Reuse the prop from the SST connector because we're
* not allowed to create new props after device registration.
*/
- connector->max_bpc_property =
+ connector->base.max_bpc_property =
intel_dp->attached_connector->base.max_bpc_property;
- if (connector->max_bpc_property)
- drm_connector_attach_max_bpc_property(connector, 6, 12);
+ if (connector->base.max_bpc_property)
+ drm_connector_attach_max_bpc_property(&connector->base, 6, 12);
- return drm_connector_set_path_property(connector, pathprop);
+ return drm_connector_set_path_property(&connector->base, pathprop);
}
static void
@@ -1659,10 +1693,10 @@ static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *conn
* A logical port's OUI (at least for affected sinks) is all 0, so
* instead of that the parent port's OUI is used for identification.
*/
- if (drm_dp_mst_port_is_logical(connector->port)) {
- aux = drm_dp_mst_aux_for_parent(connector->port);
+ if (drm_dp_mst_port_is_logical(connector->mst.port)) {
+ aux = drm_dp_mst_aux_for_parent(connector->mst.port);
if (!aux)
- aux = &connector->mst_port->aux;
+ aux = &connector->mst.dp->aux;
}
if (drm_dp_read_dpcd_caps(aux, dpcd) < 0)
@@ -1697,72 +1731,68 @@ mst_topology_add_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
const char *pathprop)
{
- struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst.mgr);
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_connector *intel_connector;
- struct drm_connector *connector;
+ struct intel_connector *connector;
enum pipe pipe;
int ret;
- intel_connector = intel_connector_alloc();
- if (!intel_connector)
+ connector = intel_connector_alloc();
+ if (!connector)
return NULL;
- connector = &intel_connector->base;
-
- intel_connector->get_hw_state = mst_connector_get_hw_state;
- intel_connector->sync_state = intel_dp_connector_sync_state;
- intel_connector->mst_port = intel_dp;
- intel_connector->port = port;
+ connector->get_hw_state = mst_connector_get_hw_state;
+ connector->sync_state = intel_dp_connector_sync_state;
+ connector->mst.dp = intel_dp;
+ connector->mst.port = port;
drm_dp_mst_get_port_malloc(port);
- intel_dp_init_modeset_retry_work(intel_connector);
-
- ret = drm_connector_dynamic_init(display->drm, connector, &mst_connector_funcs,
+ ret = drm_connector_dynamic_init(display->drm, &connector->base, &mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort, NULL);
- if (ret) {
- drm_dp_mst_put_port_malloc(port);
- intel_connector_free(intel_connector);
- return NULL;
- }
+ if (ret)
+ goto err_put_port;
- intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
- intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
- intel_connector->dp.dsc_hblank_expansion_quirk =
- detect_dsc_hblank_expansion_quirk(intel_connector);
+ connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
+ intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, connector);
+ connector->dp.dsc_hblank_expansion_quirk =
+ detect_dsc_hblank_expansion_quirk(connector);
- drm_connector_helper_add(connector, &mst_connector_helper_funcs);
+ drm_connector_helper_add(&connector->base, &mst_connector_helper_funcs);
for_each_pipe(display, pipe) {
struct drm_encoder *enc =
- &intel_dp->mst_encoders[pipe]->base.base;
+ &intel_dp->mst.stream_encoders[pipe]->base.base;
- ret = drm_connector_attach_encoder(&intel_connector->base, enc);
+ ret = drm_connector_attach_encoder(&connector->base, enc);
if (ret)
- goto err;
+ goto err_cleanup_connector;
}
- ret = mst_topology_add_connector_properties(intel_dp, connector, pathprop);
+ ret = mst_topology_add_connector_properties(intel_dp, &connector->base, pathprop);
if (ret)
- goto err;
+ goto err_cleanup_connector;
- ret = intel_dp_hdcp_init(dig_port, intel_connector);
+ ret = intel_dp_hdcp_init(dig_port, connector);
if (ret)
drm_dbg_kms(display->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
- connector->name, connector->base.id);
+ connector->base.name, connector->base.base.id);
+
+ return &connector->base;
- return connector;
+err_cleanup_connector:
+ drm_connector_cleanup(&connector->base);
+err_put_port:
+ drm_dp_mst_put_port_malloc(port);
+ intel_connector_free(connector);
-err:
- drm_connector_cleanup(connector);
return NULL;
}
static void
mst_topology_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
{
- struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst.mgr);
intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
}
@@ -1835,14 +1865,14 @@ mst_stream_encoders_create(struct intel_digital_port *dig_port)
enum pipe pipe;
for_each_pipe(display, pipe)
- intel_dp->mst_encoders[pipe] = mst_stream_encoder_create(dig_port, pipe);
+ intel_dp->mst.stream_encoders[pipe] = mst_stream_encoder_create(dig_port, pipe);
return true;
}
int
intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
{
- return dig_port->dp.active_mst_links;
+ return dig_port->dp.mst.active_links;
}
int
@@ -1862,15 +1892,15 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
if (DISPLAY_VER(display) < 11 && port == PORT_E)
return 0;
- intel_dp->mst_mgr.cbs = &mst_topology_cbs;
+ intel_dp->mst.mgr.cbs = &mst_topology_cbs;
/* create encoders */
mst_stream_encoders_create(dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, display->drm,
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst.mgr, display->drm,
&intel_dp->aux, 16,
INTEL_NUM_PIPES(display), conn_base_id);
if (ret) {
- intel_dp->mst_mgr.cbs = NULL;
+ intel_dp->mst.mgr.cbs = NULL;
return ret;
}
@@ -1879,7 +1909,7 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
bool intel_dp_mst_source_support(struct intel_dp *intel_dp)
{
- return intel_dp->mst_mgr.cbs;
+ return intel_dp->mst.mgr.cbs;
}
void
@@ -1890,10 +1920,10 @@ intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
if (!intel_dp_mst_source_support(intel_dp))
return;
- drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
+ drm_dp_mst_topology_mgr_destroy(&intel_dp->mst.mgr);
/* encoders will get killed by normal cleanup */
- intel_dp->mst_mgr.cbs = NULL;
+ intel_dp->mst.mgr.cbs = NULL;
}
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
@@ -1924,11 +1954,11 @@ intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state,
{
struct drm_dp_mst_topology_state *mst_state;
- if (!connector->mst_port)
+ if (!connector->mst.dp)
return 0;
mst_state = drm_atomic_get_mst_topology_state(&state->base,
- &connector->mst_port->mst_mgr);
+ &connector->mst.dp->mst.mgr);
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
@@ -2026,7 +2056,7 @@ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc *crtc_iter;
- if (connector->mst_port != crtc_connector->mst_port ||
+ if (connector->mst.dp != crtc_connector->mst.dp ||
!conn_state->crtc)
continue;
@@ -2049,7 +2079,7 @@ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
* case.
*/
if (connector->dp.dsc_decompression_aux ==
- &connector->mst_port->aux)
+ &connector->mst.dp->aux)
return true;
}
@@ -2061,7 +2091,7 @@ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
* @intel_dp: DP port object
*
* Prepare an MST link for topology probing, programming the target
- * link parameters to DPCD. This step is a requirement of the enumaration
+ * link parameters to DPCD. This step is a requirement of the enumeration
* of path resources during probing.
*/
void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp)
@@ -2110,7 +2140,7 @@ bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp)
if (!intel_dp->is_mst)
return true;
- ret = drm_dp_dpcd_readb(intel_dp->mst_mgr.aux, DP_MSTM_CTRL, &val);
+ ret = drm_dp_dpcd_readb(intel_dp->mst.mgr.aux, DP_MSTM_CTRL, &val);
/* Adjust the expected register value for SST + SideBand. */
if (ret < 0 || val != (DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC)) {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index c6bdc1d190a4..c1bbfeb02ca9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -33,8 +33,7 @@ bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp);
int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
- int max_bpp, int min_bpp,
struct drm_connector_state *conn_state,
- int step, bool dsc);
+ int min_bpp_x16, int max_bpp_x16, int bpp_step_x16, bool dsc);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c
index 380b359b0420..bd61f3c3ec91 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_test.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_test.c
@@ -6,6 +6,8 @@
#include <drm/display/drm_dp.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
@@ -257,7 +259,7 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
/*
* FIXME: Ideally pattern should come from DPCD 0x250. As
* current firmware of DPR-100 could not set it, so hardcoding
- * now for complaince test.
+ * now for compliance test.
*/
drm_dbg_kms(display->drm,
"Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
@@ -275,7 +277,7 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
/*
* FIXME: Ideally pattern should come from DPCD 0x24A. As
* current firmware of DPR-100 could not set it, so hardcoding
- * now for complaince test.
+ * now for compliance test.
*/
drm_dbg_kms(display->drm,
"Set HBR2 compliance Phy Test Pattern\n");
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 589872babdd7..faa2b7a46699 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -4,6 +4,7 @@
*/
#include <drm/display/drm_dp_tunnel.h>
+#include <drm/drm_print.h>
#include "intel_atomic.h"
#include "intel_display_core.h"
@@ -647,7 +648,7 @@ void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
* @state must be recomputed with the updated @limits.
*
* Returns:
- * - 0 if the confugration is valid
+ * - 0 if the configuration is valid
* - %-EAGAIN, if the configuration is invalid and @limits got updated
* with fallback values with which the configuration of all CRTCs in
* @state must be recomputed
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
index e9314cf25a19..7f0f720e8dca 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
@@ -20,7 +20,8 @@ struct intel_dp;
struct intel_encoder;
struct intel_link_bw_limits;
-#if IS_ENABLED(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915)
+#if (IS_ENABLED(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915)) || \
+ (IS_ENABLED(CONFIG_DRM_XE_DP_TUNNEL) && !defined(I915))
int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx);
void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp);
@@ -127,6 +128,6 @@ intel_dp_tunnel_mgr_init(struct intel_display *display)
static inline void intel_dp_tunnel_mgr_cleanup(struct intel_display *display) {}
-#endif /* CONFIG_DRM_I915_DP_TUNNEL */
+#endif /* CONFIG_DRM_I915_DP_TUNNEL || CONFIG_DRM_XE_DP_TUNNEL */
#endif /* __INTEL_DP_TUNNEL_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 52a36a2281e6..429f89543789 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -40,7 +40,7 @@
* VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
* ports. DPIO is the name given to such a display PHY. These PHYs
* don't follow the standard programming model using direct MMIO
- * registers, and instead their registers must be accessed trough IOSF
+ * registers, and instead their registers must be accessed through IOSF
* sideband. VLV has one such PHY for driving ports B and C, and CHV
* adds another PHY for driving port D. Each PHY responds to specific
* IOSF-SB port.
@@ -1156,3 +1156,37 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder,
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060);
vlv_dpio_put(dev_priv);
}
+
+void vlv_wait_port_ready(struct intel_encoder *encoder,
+ unsigned int expected_mask)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ u32 port_mask;
+ i915_reg_t dpll_reg;
+
+ switch (encoder->port) {
+ default:
+ MISSING_CASE(encoder->port);
+ fallthrough;
+ case PORT_B:
+ port_mask = DPLL_PORTB_READY_MASK;
+ dpll_reg = DPLL(display, 0);
+ break;
+ case PORT_C:
+ port_mask = DPLL_PORTC_READY_MASK;
+ dpll_reg = DPLL(display, 0);
+ expected_mask <<= 4;
+ break;
+ case PORT_D:
+ port_mask = DPLL_PORTD_READY_MASK;
+ dpll_reg = DPIO_PHY_STATUS;
+ break;
+ }
+
+ if (intel_de_wait(display, dpll_reg, port_mask, expected_mask, 1000))
+ drm_WARN(display->drm, 1,
+ "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_de_read(display, dpll_reg) & port_mask,
+ expected_mask);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
index a82939165546..35baede3d6ad 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
@@ -72,6 +72,8 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
+void vlv_wait_port_ready(struct intel_encoder *encoder,
+ unsigned int expected_mask);
#else
static inline void bxt_port_to_phy_channel(struct intel_display *display, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch)
@@ -170,6 +172,10 @@ static inline void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
}
+static inline void vlv_wait_port_ready(struct intel_encoder *encoder,
+ unsigned int expected_mask)
+{
+}
#endif
#endif /* __INTEL_DPIO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 3256b1293f7f..08a30e5aafce 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -1843,7 +1843,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
int i;
- assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
if (i9xx_has_pps(dev_priv))
@@ -2024,7 +2024,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
- assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
assert_pps_unlocked(display, pipe);
@@ -2171,7 +2171,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
- assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
assert_pps_unlocked(display, pipe);
@@ -2253,36 +2253,38 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ struct intel_display *display = &dev_priv->display;
u32 val;
/* Make sure the pipe isn't still relying on us */
- assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
+ assert_transcoder_disabled(display, (enum transcoder)pipe);
val = DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), val);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), val);
+ intel_de_posting_read(display, DPLL(display, pipe));
}
void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ struct intel_display *display = &dev_priv->display;
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
/* Make sure the pipe isn't still relying on us */
- assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
+ assert_transcoder_disabled(display, (enum transcoder)pipe);
val = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), val);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), val);
+ intel_de_posting_read(display, DPLL(display, pipe));
vlv_dpio_get(dev_priv);
@@ -2296,19 +2298,19 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/* Don't disable pipe or pipe PLLs if needed */
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
return;
/* Make sure the pipe isn't still relying on us */
- assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), DPLL_VGA_MODE_DIS);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS);
+ intel_de_posting_read(display, DPLL(display, pipe));
}
@@ -2329,10 +2331,9 @@ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
}
/* Only for pre-ILK configs */
-static void assert_pll(struct drm_i915_private *dev_priv,
+static void assert_pll(struct intel_display *display,
enum pipe pipe, bool state)
{
- struct intel_display *display = &dev_priv->display;
bool cur_state;
cur_state = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
@@ -2341,12 +2342,12 @@ static void assert_pll(struct drm_i915_private *dev_priv,
str_on_off(state), str_on_off(cur_state));
}
-void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
+void assert_pll_enabled(struct intel_display *display, enum pipe pipe)
{
- assert_pll(i915, pipe, true);
+ assert_pll(display, pipe, true);
}
-void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
+void assert_pll_disabled(struct intel_display *display, enum pipe pipe)
{
- assert_pll(i915, pipe, false);
+ assert_pll(display, pipe, false);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index a86a79408af0..21d06cbd2ce7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -13,6 +13,7 @@ struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dpll_hw_state;
enum pipe;
@@ -46,7 +47,7 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state);
void vlv_crtc_clock_get(struct intel_crtc_state *crtc_state);
void chv_crtc_clock_get(struct intel_crtc_state *crtc_state);
-void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
-void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_pll_enabled(struct intel_display *display, enum pipe pipe);
+void assert_pll_disabled(struct intel_display *display, enum pipe pipe);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index d86cc9ffd4ac..c825a507b905 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -27,6 +27,7 @@
#include "bxt_dpio_phy_regs.h"
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_cx0_phy.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
@@ -65,7 +66,7 @@ struct intel_shared_dpll_funcs {
* Hook for enabling the pll, called from intel_enable_shared_dpll() if
* the pll is not already enabled.
*/
- void (*enable)(struct drm_i915_private *i915,
+ void (*enable)(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
@@ -74,7 +75,7 @@ struct intel_shared_dpll_funcs {
* only when it is safe to disable the pll, i.e., there are no more
* tracked users for it.
*/
- void (*disable)(struct drm_i915_private *i915,
+ void (*disable)(struct intel_display *display,
struct intel_shared_dpll *pll);
/*
@@ -82,7 +83,7 @@ struct intel_shared_dpll_funcs {
* registers. This is used for initial hw state readout and state
* verification after a mode set.
*/
- bool (*get_hw_state)(struct drm_i915_private *i915,
+ bool (*get_hw_state)(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state);
@@ -90,7 +91,7 @@ struct intel_shared_dpll_funcs {
* Hook for calculating the pll's output frequency based on its passed
* in state.
*/
- int (*get_freq)(struct drm_i915_private *i915,
+ int (*get_freq)(struct intel_display *i915,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
};
@@ -109,7 +110,7 @@ struct intel_dpll_mgr {
void (*update_active_dpll)(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
- void (*update_ref_clks)(struct drm_i915_private *i915);
+ void (*update_ref_clks)(struct intel_display *display);
void (*dump_hw_state)(struct drm_printer *p,
const struct intel_dpll_hw_state *dpll_hw_state);
bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
@@ -117,14 +118,14 @@ struct intel_dpll_mgr {
};
static void
-intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
+intel_atomic_duplicate_dpll_state(struct intel_display *display,
struct intel_shared_dpll_state *shared_dpll)
{
struct intel_shared_dpll *pll;
int i;
/* Copy shared dpll state */
- for_each_shared_dpll(i915, pll, i)
+ for_each_shared_dpll(display, pll, i)
shared_dpll[pll->index] = pll->state;
}
@@ -132,13 +133,14 @@ static struct intel_shared_dpll_state *
intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
+ struct intel_display *display = to_intel_display(state);
drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
if (!state->dpll_set) {
state->dpll_set = true;
- intel_atomic_duplicate_dpll_state(to_i915(s->dev),
+ intel_atomic_duplicate_dpll_state(display,
state->shared_dpll);
}
@@ -147,20 +149,20 @@ intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
/**
* intel_get_shared_dpll_by_id - get a DPLL given its id
- * @i915: i915 device instance
+ * @display: intel_display device instance
* @id: pll id
*
* Returns:
* A pointer to the DPLL with @id
*/
struct intel_shared_dpll *
-intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
+intel_get_shared_dpll_by_id(struct intel_display *display,
enum intel_dpll_id id)
{
struct intel_shared_dpll *pll;
int i;
- for_each_shared_dpll(i915, pll, i) {
+ for_each_shared_dpll(display, pll, i) {
if (pll->info->id == id)
return pll;
}
@@ -170,11 +172,10 @@ intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
}
/* For ILK+ */
-void assert_shared_dpll(struct drm_i915_private *i915,
+void assert_shared_dpll(struct intel_display *display,
struct intel_shared_dpll *pll,
bool state)
{
- struct intel_display *display = &i915->display;
bool cur_state;
struct intel_dpll_hw_state hw_state;
@@ -182,7 +183,7 @@ void assert_shared_dpll(struct drm_i915_private *i915,
"asserting DPLL %s with no DPLL\n", str_on_off(state)))
return;
- cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
+ cur_state = intel_dpll_get_hw_state(display, pll, &hw_state);
INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
pll->info->name, str_on_off(state),
@@ -200,12 +201,12 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
}
static i915_reg_t
-intel_combo_pll_enable_reg(struct drm_i915_private *i915,
+intel_combo_pll_enable_reg(struct intel_display *display,
struct intel_shared_dpll *pll)
{
- if (IS_DG1(i915))
+ if (display->platform.dg1)
return DG1_DPLL_ENABLE(pll->info->id);
- else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
(pll->info->id == DPLL_ID_EHL_DPLL4))
return MG_PLL_ENABLE(0);
@@ -213,36 +214,36 @@ intel_combo_pll_enable_reg(struct drm_i915_private *i915,
}
static i915_reg_t
-intel_tc_pll_enable_reg(struct drm_i915_private *i915,
+intel_tc_pll_enable_reg(struct intel_display *display,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
enum tc_port tc_port = icl_pll_id_to_tc_port(id);
- if (IS_ALDERLAKE_P(i915))
+ if (display->platform.alderlake_p)
return ADLP_PORTTC_PLL_ENABLE(tc_port);
return MG_PLL_ENABLE(tc_port);
}
-static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
+static void _intel_enable_shared_dpll(struct intel_display *display,
struct intel_shared_dpll *pll)
{
if (pll->info->power_domain)
- pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
+ pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
- pll->info->funcs->enable(i915, pll, &pll->state.hw_state);
+ pll->info->funcs->enable(display, pll, &pll->state.hw_state);
pll->on = true;
}
-static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
+static void _intel_disable_shared_dpll(struct intel_display *display,
struct intel_shared_dpll *pll)
{
- pll->info->funcs->disable(i915, pll);
+ pll->info->funcs->disable(display, pll);
pll->on = false;
if (pll->info->power_domain)
- intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
+ intel_display_power_put(display, pll->info->power_domain, pll->wakeref);
}
/**
@@ -253,42 +254,42 @@ static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
*/
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int pipe_mask = BIT(crtc->pipe);
unsigned int old_mask;
- if (drm_WARN_ON(&i915->drm, pll == NULL))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
old_mask = pll->active_mask;
- if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
- drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
+ if (drm_WARN_ON(display->drm, !(pll->state.pipe_mask & pipe_mask)) ||
+ drm_WARN_ON(display->drm, pll->active_mask & pipe_mask))
goto out;
pll->active_mask |= pipe_mask;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
pll->info->name, pll->active_mask, pll->on,
crtc->base.base.id, crtc->base.name);
if (old_mask) {
- drm_WARN_ON(&i915->drm, !pll->on);
- assert_shared_dpll_enabled(i915, pll);
+ drm_WARN_ON(display->drm, !pll->on);
+ assert_shared_dpll_enabled(display, pll);
goto out;
}
- drm_WARN_ON(&i915->drm, pll->on);
+ drm_WARN_ON(display->drm, pll->on);
- drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
+ drm_dbg_kms(display->drm, "enabling %s\n", pll->info->name);
- _intel_enable_shared_dpll(i915, pll);
+ _intel_enable_shared_dpll(display, pll);
out:
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
/**
@@ -299,53 +300,53 @@ out:
*/
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int pipe_mask = BIT(crtc->pipe);
/* PCH only available on ILK+ */
- if (DISPLAY_VER(i915) < 5)
+ if (DISPLAY_VER(display) < 5)
return;
if (pll == NULL)
return;
- mutex_lock(&i915->display.dpll.lock);
- if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
+ mutex_lock(&display->dpll.lock);
+ if (drm_WARN(display->drm, !(pll->active_mask & pipe_mask),
"%s not used by [CRTC:%d:%s]\n", pll->info->name,
crtc->base.base.id, crtc->base.name))
goto out;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
pll->info->name, pll->active_mask, pll->on,
crtc->base.base.id, crtc->base.name);
- assert_shared_dpll_enabled(i915, pll);
- drm_WARN_ON(&i915->drm, !pll->on);
+ assert_shared_dpll_enabled(display, pll);
+ drm_WARN_ON(display->drm, !pll->on);
pll->active_mask &= ~pipe_mask;
if (pll->active_mask)
goto out;
- drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
+ drm_dbg_kms(display->drm, "disabling %s\n", pll->info->name);
- _intel_disable_shared_dpll(i915, pll);
+ _intel_disable_shared_dpll(display, pll);
out:
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static unsigned long
-intel_dpll_mask_all(struct drm_i915_private *i915)
+intel_dpll_mask_all(struct intel_display *display)
{
struct intel_shared_dpll *pll;
unsigned long dpll_mask = 0;
int i;
- for_each_shared_dpll(i915, pll, i) {
- drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
+ for_each_shared_dpll(display, pll, i) {
+ drm_WARN_ON(display->drm, dpll_mask & BIT(pll->info->id));
dpll_mask |= BIT(pll->info->id);
}
@@ -359,20 +360,20 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
const struct intel_dpll_hw_state *dpll_hw_state,
unsigned long dpll_mask)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
+ struct intel_display *display = to_intel_display(crtc);
+ unsigned long dpll_mask_all = intel_dpll_mask_all(display);
struct intel_shared_dpll_state *shared_dpll;
struct intel_shared_dpll *unused_pll = NULL;
enum intel_dpll_id id;
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
- drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
+ drm_WARN_ON(display->drm, dpll_mask & ~dpll_mask_all);
for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
struct intel_shared_dpll *pll;
- pll = intel_get_shared_dpll_by_id(i915, id);
+ pll = intel_get_shared_dpll_by_id(display, id);
if (!pll)
continue;
@@ -386,7 +387,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
if (memcmp(dpll_hw_state,
&shared_dpll[pll->index].hw_state,
sizeof(*dpll_hw_state)) == 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
crtc->base.base.id, crtc->base.name,
pll->info->name,
@@ -398,7 +399,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
/* Ok no matching timings, maybe there's a free one? */
if (unused_pll) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] allocated %s\n",
crtc->base.base.id, crtc->base.name,
unused_pll->info->name);
return unused_pll;
@@ -420,13 +421,13 @@ intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
const struct intel_shared_dpll *pll,
struct intel_shared_dpll_state *shared_dpll_state)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
+ drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] reserving %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
}
@@ -459,13 +460,13 @@ intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
const struct intel_shared_dpll *pll,
struct intel_shared_dpll_state *shared_dpll_state)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
+ drm_WARN_ON(display->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] releasing %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
}
@@ -509,7 +510,7 @@ static void intel_put_dpll(struct intel_atomic_state *state,
*/
void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
struct intel_shared_dpll *pll;
int i;
@@ -517,11 +518,11 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
if (!state->dpll_set)
return;
- for_each_shared_dpll(i915, pll, i)
+ for_each_shared_dpll(display, pll, i)
swap(pll->state, shared_dpll[pll->index]);
}
-static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
+static bool ibx_pch_dpll_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -530,24 +531,23 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
intel_wakeref_t wakeref;
u32 val;
- wakeref = intel_display_power_get_if_enabled(i915,
+ wakeref = intel_display_power_get_if_enabled(display,
POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
- val = intel_de_read(i915, PCH_DPLL(id));
+ val = intel_de_read(display, PCH_DPLL(id));
hw_state->dpll = val;
- hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
- hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
+ hw_state->fp0 = intel_de_read(display, PCH_FP0(id));
+ hw_state->fp1 = intel_de_read(display, PCH_FP1(id));
- intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return val & DPLL_VCO_ENABLE;
}
-static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
+static void ibx_assert_pch_refclk_enabled(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
u32 val;
bool enabled;
@@ -558,7 +558,7 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
"PCH refclk assertion failure, should be active but is disabled\n");
}
-static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
+static void ibx_pch_dpll_enable(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -566,15 +566,15 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
const enum intel_dpll_id id = pll->info->id;
/* PCH refclock must be enabled first */
- ibx_assert_pch_refclk_enabled(i915);
+ ibx_assert_pch_refclk_enabled(display);
- intel_de_write(i915, PCH_FP0(id), hw_state->fp0);
- intel_de_write(i915, PCH_FP1(id), hw_state->fp1);
+ intel_de_write(display, PCH_FP0(id), hw_state->fp0);
+ intel_de_write(display, PCH_FP1(id), hw_state->fp1);
- intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
+ intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
/* Wait for the clocks to stabilize. */
- intel_de_posting_read(i915, PCH_DPLL(id));
+ intel_de_posting_read(display, PCH_DPLL(id));
udelay(150);
/* The pixel multiplier can only be updated once the
@@ -582,18 +582,18 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
*
* So write it again.
*/
- intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
- intel_de_posting_read(i915, PCH_DPLL(id));
+ intel_de_write(display, PCH_DPLL(id), hw_state->dpll);
+ intel_de_posting_read(display, PCH_DPLL(id));
udelay(200);
}
-static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
+static void ibx_pch_dpll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- intel_de_write(i915, PCH_DPLL(id), 0);
- intel_de_posting_read(i915, PCH_DPLL(id));
+ intel_de_write(display, PCH_DPLL(id), 0);
+ intel_de_posting_read(display, PCH_DPLL(id));
udelay(200);
}
@@ -608,18 +608,19 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id id;
if (HAS_PCH_IBX(i915)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
id = (enum intel_dpll_id) crtc->pipe;
- pll = intel_get_shared_dpll_by_id(i915, id);
+ pll = intel_get_shared_dpll_by_id(display, id);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name,
pll->info->name);
@@ -688,62 +689,64 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
.compare_hw_state = ibx_compare_hw_state,
};
-static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
+static void hsw_ddi_wrpll_enable(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
const enum intel_dpll_id id = pll->info->id;
- intel_de_write(i915, WRPLL_CTL(id), hw_state->wrpll);
- intel_de_posting_read(i915, WRPLL_CTL(id));
+ intel_de_write(display, WRPLL_CTL(id), hw_state->wrpll);
+ intel_de_posting_read(display, WRPLL_CTL(id));
udelay(20);
}
-static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
+static void hsw_ddi_spll_enable(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
- intel_de_write(i915, SPLL_CTL, hw_state->spll);
- intel_de_posting_read(i915, SPLL_CTL);
+ intel_de_write(display, SPLL_CTL, hw_state->spll);
+ intel_de_posting_read(display, SPLL_CTL);
udelay(20);
}
-static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
+static void hsw_ddi_wrpll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const enum intel_dpll_id id = pll->info->id;
- intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
- intel_de_posting_read(i915, WRPLL_CTL(id));
+ intel_de_rmw(display, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
+ intel_de_posting_read(display, WRPLL_CTL(id));
/*
* Try to set up the PCH reference clock once all DPLLs
* that depend on it have been shut down.
*/
- if (i915->display.dpll.pch_ssc_use & BIT(id))
+ if (display->dpll.pch_ssc_use & BIT(id))
intel_init_pch_refclk(i915);
}
-static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
+static void hsw_ddi_spll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
enum intel_dpll_id id = pll->info->id;
- intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
- intel_de_posting_read(i915, SPLL_CTL);
+ intel_de_rmw(display, SPLL_CTL, SPLL_PLL_ENABLE, 0);
+ intel_de_posting_read(display, SPLL_CTL);
/*
* Try to set up the PCH reference clock once all DPLLs
* that depend on it have been shut down.
*/
- if (i915->display.dpll.pch_ssc_use & BIT(id))
+ if (display->dpll.pch_ssc_use & BIT(id))
intel_init_pch_refclk(i915);
}
-static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
+static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -752,20 +755,20 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
intel_wakeref_t wakeref;
u32 val;
- wakeref = intel_display_power_get_if_enabled(i915,
+ wakeref = intel_display_power_get_if_enabled(display,
POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
- val = intel_de_read(i915, WRPLL_CTL(id));
+ val = intel_de_read(display, WRPLL_CTL(id));
hw_state->wrpll = val;
- intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return val & WRPLL_PLL_ENABLE;
}
-static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
+static bool hsw_ddi_spll_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -773,15 +776,15 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
intel_wakeref_t wakeref;
u32 val;
- wakeref = intel_display_power_get_if_enabled(i915,
+ wakeref = intel_display_power_get_if_enabled(display,
POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
- val = intel_de_read(i915, SPLL_CTL);
+ val = intel_de_read(display, SPLL_CTL);
hw_state->spll = val;
- intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return val & SPLL_PLL_ENABLE;
}
@@ -992,7 +995,7 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
-static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+static int hsw_ddi_wrpll_get_freq(struct intel_display *display,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -1004,8 +1007,8 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
switch (wrpll & WRPLL_REF_MASK) {
case WRPLL_REF_SPECIAL_HSW:
/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
- if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
- refclk = i915->display.dpll.ref_clks.nssc;
+ if (display->platform.haswell && !display->platform.haswell_ult) {
+ refclk = display->dpll.ref_clks.nssc;
break;
}
fallthrough;
@@ -1015,7 +1018,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
* code only cares about 5% accuracy, and spread is a max of
* 0.5% downspread.
*/
- refclk = i915->display.dpll.ref_clks.ssc;
+ refclk = display->dpll.ref_clks.ssc;
break;
case WRPLL_REF_LCPLL:
refclk = 2700000;
@@ -1037,7 +1040,7 @@ static int
hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
@@ -1050,7 +1053,7 @@ hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
- crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
+ crtc_state->port_clock = hsw_ddi_wrpll_get_freq(display, NULL,
&crtc_state->dpll_hw_state);
return 0;
@@ -1072,7 +1075,7 @@ hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
static int
hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
int clock = crtc_state->port_clock;
switch (clock / 2) {
@@ -1081,7 +1084,7 @@ hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
case 270000:
return 0;
default:
- drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
+ drm_dbg_kms(display->drm, "Invalid clock for DP: %d\n",
clock);
return -EINVAL;
}
@@ -1090,7 +1093,7 @@ hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
static struct intel_shared_dpll *
hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
int clock = crtc_state->port_clock;
@@ -1110,7 +1113,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
return NULL;
}
- pll = intel_get_shared_dpll_by_id(i915, pll_id);
+ pll = intel_get_shared_dpll_by_id(display, pll_id);
if (!pll)
return NULL;
@@ -1118,7 +1121,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
return pll;
}
-static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+static int hsw_ddi_lcpll_get_freq(struct intel_display *display,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -1135,7 +1138,7 @@ static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
link_clock = 270000;
break;
default:
- drm_WARN(&i915->drm, 1, "bad port clock sel\n");
+ drm_WARN(display->drm, 1, "bad port clock sel\n");
break;
}
@@ -1170,7 +1173,7 @@ hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
BIT(DPLL_ID_SPLL));
}
-static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
+static int hsw_ddi_spll_get_freq(struct intel_display *display,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -1188,7 +1191,7 @@ static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
link_clock = 270000;
break;
default:
- drm_WARN(&i915->drm, 1, "bad spll freq\n");
+ drm_WARN(display->drm, 1, "bad spll freq\n");
break;
}
@@ -1238,14 +1241,14 @@ static int hsw_get_dpll(struct intel_atomic_state *state,
return 0;
}
-static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
+static void hsw_update_dpll_ref_clks(struct intel_display *display)
{
- i915->display.dpll.ref_clks.ssc = 135000;
+ display->dpll.ref_clks.ssc = 135000;
/* Non-SSC is only used on non-ULT HSW. */
- if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
- i915->display.dpll.ref_clks.nssc = 24000;
+ if (intel_de_read(display, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
+ display->dpll.ref_clks.nssc = 24000;
else
- i915->display.dpll.ref_clks.nssc = 135000;
+ display->dpll.ref_clks.nssc = 135000;
}
static void hsw_dump_hw_state(struct drm_printer *p,
@@ -1281,18 +1284,18 @@ static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
.get_freq = hsw_ddi_spll_get_freq,
};
-static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
+static void hsw_ddi_lcpll_enable(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *hw_state)
{
}
-static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
+static void hsw_ddi_lcpll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
}
-static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
+static bool hsw_ddi_lcpll_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -1360,21 +1363,21 @@ static const struct skl_dpll_regs skl_dpll_regs[4] = {
},
};
-static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
+static void skl_ddi_pll_write_ctrl1(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct skl_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
- intel_de_rmw(i915, DPLL_CTRL1,
+ intel_de_rmw(display, DPLL_CTRL1,
DPLL_CTRL1_HDMI_MODE(id) |
DPLL_CTRL1_SSC(id) |
DPLL_CTRL1_LINK_RATE_MASK(id),
hw_state->ctrl1 << (id * 6));
- intel_de_posting_read(i915, DPLL_CTRL1);
+ intel_de_posting_read(display, DPLL_CTRL1);
}
-static void skl_ddi_pll_enable(struct drm_i915_private *i915,
+static void skl_ddi_pll_enable(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -1382,46 +1385,46 @@ static void skl_ddi_pll_enable(struct drm_i915_private *i915,
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
- skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
+ skl_ddi_pll_write_ctrl1(display, pll, hw_state);
- intel_de_write(i915, regs[id].cfgcr1, hw_state->cfgcr1);
- intel_de_write(i915, regs[id].cfgcr2, hw_state->cfgcr2);
- intel_de_posting_read(i915, regs[id].cfgcr1);
- intel_de_posting_read(i915, regs[id].cfgcr2);
+ intel_de_write(display, regs[id].cfgcr1, hw_state->cfgcr1);
+ intel_de_write(display, regs[id].cfgcr2, hw_state->cfgcr2);
+ intel_de_posting_read(display, regs[id].cfgcr1);
+ intel_de_posting_read(display, regs[id].cfgcr2);
/* the enable bit is always bit 31 */
- intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
+ intel_de_rmw(display, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
- if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
- drm_err(&i915->drm, "DPLL %d not locked\n", id);
+ if (intel_de_wait_for_set(display, DPLL_STATUS, DPLL_LOCK(id), 5))
+ drm_err(display->drm, "DPLL %d not locked\n", id);
}
-static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
+static void skl_ddi_dpll0_enable(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
- skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
+ skl_ddi_pll_write_ctrl1(display, pll, hw_state);
}
-static void skl_ddi_pll_disable(struct drm_i915_private *i915,
+static void skl_ddi_pll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
/* the enable bit is always bit 31 */
- intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
- intel_de_posting_read(i915, regs[id].ctl);
+ intel_de_rmw(display, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
+ intel_de_posting_read(display, regs[id].ctl);
}
-static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
+static void skl_ddi_dpll0_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
}
-static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
+static bool skl_ddi_pll_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -1432,34 +1435,34 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
bool ret;
u32 val;
- wakeref = intel_display_power_get_if_enabled(i915,
+ wakeref = intel_display_power_get_if_enabled(display,
POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
ret = false;
- val = intel_de_read(i915, regs[id].ctl);
+ val = intel_de_read(display, regs[id].ctl);
if (!(val & LCPLL_PLL_ENABLE))
goto out;
- val = intel_de_read(i915, DPLL_CTRL1);
+ val = intel_de_read(display, DPLL_CTRL1);
hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
/* avoid reading back stale values if HDMI mode is not enabled */
if (val & DPLL_CTRL1_HDMI_MODE(id)) {
- hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
- hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
+ hw_state->cfgcr1 = intel_de_read(display, regs[id].cfgcr1);
+ hw_state->cfgcr2 = intel_de_read(display, regs[id].cfgcr2);
}
ret = true;
out:
- intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
-static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
+static bool skl_ddi_dpll0_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -1470,7 +1473,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
u32 val;
bool ret;
- wakeref = intel_display_power_get_if_enabled(i915,
+ wakeref = intel_display_power_get_if_enabled(display,
POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -1478,17 +1481,17 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
ret = false;
/* DPLL0 is always enabled since it drives CDCLK */
- val = intel_de_read(i915, regs[id].ctl);
- if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
+ val = intel_de_read(display, regs[id].ctl);
+ if (drm_WARN_ON(display->drm, !(val & LCPLL_PLL_ENABLE)))
goto out;
- val = intel_de_read(i915, DPLL_CTRL1);
+ val = intel_de_read(display, DPLL_CTRL1);
hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
ret = true;
out:
- intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -1732,12 +1735,12 @@ skip_remaining_dividers:
return 0;
}
-static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+static int skl_ddi_wrpll_get_freq(struct intel_display *display,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
- int ref_clock = i915->display.dpll.ref_clks.nssc;
+ int ref_clock = display->dpll.ref_clks.nssc;
u32 p0, p1, p2, dco_freq;
p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
@@ -1764,7 +1767,7 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
* Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
* handling it the same way as PDIV_7.
*/
- drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
+ drm_dbg_kms(display->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
fallthrough;
case DPLL_CFGCR2_PDIV_7:
p0 = 7;
@@ -1798,7 +1801,7 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
ref_clock / 0x8000;
- if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
+ if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
return 0;
return dco_freq / (p0 * p1 * p2 * 5);
@@ -1806,13 +1809,13 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
struct skl_wrpll_params wrpll_params = {};
int ret;
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
- i915->display.dpll.ref_clks.nssc, &wrpll_params);
+ display->dpll.ref_clks.nssc, &wrpll_params);
if (ret)
return ret;
@@ -1836,7 +1839,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
- crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
+ crtc_state->port_clock = skl_ddi_wrpll_get_freq(display, NULL,
&crtc_state->dpll_hw_state);
return 0;
@@ -1880,7 +1883,7 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return 0;
}
-static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+static int skl_ddi_lcpll_get_freq(struct intel_display *display,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -1908,7 +1911,7 @@ static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
link_clock = 270000;
break;
default:
- drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+ drm_WARN(display->drm, 1, "Unsupported link rate\n");
break;
}
@@ -1959,7 +1962,7 @@ static int skl_get_dpll(struct intel_atomic_state *state,
return 0;
}
-static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
+static int skl_ddi_pll_get_freq(struct intel_display *display,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -1970,15 +1973,15 @@ static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
* the internal shift for each field
*/
if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
- return skl_ddi_wrpll_get_freq(i915, pll, dpll_hw_state);
+ return skl_ddi_wrpll_get_freq(display, pll, dpll_hw_state);
else
- return skl_ddi_lcpll_get_freq(i915, pll, dpll_hw_state);
+ return skl_ddi_lcpll_get_freq(display, pll, dpll_hw_state);
}
-static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
+static void skl_update_dpll_ref_clks(struct intel_display *display)
{
/* No SSC ref */
- i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
+ display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
}
static void skl_dump_hw_state(struct drm_printer *p,
@@ -2034,134 +2037,132 @@ static const struct intel_dpll_mgr skl_pll_mgr = {
.compare_hw_state = skl_compare_hw_state,
};
-static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
+static void bxt_ddi_pll_enable(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
- struct intel_display *display = &i915->display;
const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
- enum dpio_phy phy;
- enum dpio_channel ch;
+ enum dpio_phy phy = DPIO_PHY0;
+ enum dpio_channel ch = DPIO_CH0;
u32 temp;
bxt_port_to_phy_channel(display, port, &phy, &ch);
/* Non-SSC reference */
- intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
+ intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
- if (IS_GEMINILAKE(i915)) {
- intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
+ if (display->platform.geminilake) {
+ intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
0, PORT_PLL_POWER_ENABLE);
- if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
+ if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_POWER_STATE), 200))
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Power state not set for PLL:%d\n", port);
}
/* Disable 10 bit clock */
- intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
+ intel_de_rmw(display, BXT_PORT_PLL_EBB_4(phy, ch),
PORT_PLL_10BIT_CLK_ENABLE, 0);
/* Write P1 & P2 */
- intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
+ intel_de_rmw(display, BXT_PORT_PLL_EBB_0(phy, ch),
PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
/* Write M2 integer */
- intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
+ intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 0),
PORT_PLL_M2_INT_MASK, hw_state->pll0);
/* Write N */
- intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
+ intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 1),
PORT_PLL_N_MASK, hw_state->pll1);
/* Write M2 fraction */
- intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
+ intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 2),
PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
/* Write M2 fraction enable */
- intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
+ intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 3),
PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
/* Write coeff */
- temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
+ temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
temp |= hw_state->pll6;
- intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
+ intel_de_write(display, BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
- intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
+ intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 8),
PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
- intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
+ intel_de_rmw(display, BXT_PORT_PLL(phy, ch, 9),
PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
- temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
+ temp = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
temp |= hw_state->pll10;
- intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
+ intel_de_write(display, BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
- temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
+ temp = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
temp |= PORT_PLL_RECALIBRATE;
- intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
temp |= hw_state->ebb4;
- intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_write(display, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
- intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
- intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
+ intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
+ intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
- if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
+ if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
200))
- drm_err(&i915->drm, "PLL %d not locked\n", port);
+ drm_err(display->drm, "PLL %d not locked\n", port);
- if (IS_GEMINILAKE(i915)) {
- temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN(phy, ch, 0));
+ if (display->platform.geminilake) {
+ temp = intel_de_read(display, BXT_PORT_TX_DW5_LN(phy, ch, 0));
temp |= DCC_DELAY_RANGE_2;
- intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
+ intel_de_write(display, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
}
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
+ temp = intel_de_read(display, BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
temp |= hw_state->pcsdw12;
- intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
+ intel_de_write(display, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
-static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
+static void bxt_ddi_pll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
- intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
- intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
+ intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
+ intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
- if (IS_GEMINILAKE(i915)) {
- intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
+ if (display->platform.geminilake) {
+ intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
PORT_PLL_POWER_ENABLE, 0);
- if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
+ if (wait_for_us(!(intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_POWER_STATE), 200))
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Power state not reset for PLL:%d\n", port);
}
}
-static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
+static bool bxt_ddi_pll_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
- struct intel_display *display = &i915->display;
struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
intel_wakeref_t wakeref;
@@ -2172,47 +2173,47 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
bxt_port_to_phy_channel(display, port, &phy, &ch);
- wakeref = intel_display_power_get_if_enabled(i915,
+ wakeref = intel_display_power_get_if_enabled(display,
POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
ret = false;
- val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
+ val = intel_de_read(display, BXT_PORT_PLL_ENABLE(port));
if (!(val & PORT_PLL_ENABLE))
goto out;
- hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
+ hw_state->ebb0 = intel_de_read(display, BXT_PORT_PLL_EBB_0(phy, ch));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
- hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
+ hw_state->ebb4 = intel_de_read(display, BXT_PORT_PLL_EBB_4(phy, ch));
hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
- hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
+ hw_state->pll0 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 0));
hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
- hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
+ hw_state->pll1 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 1));
hw_state->pll1 &= PORT_PLL_N_MASK;
- hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
+ hw_state->pll2 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 2));
hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
- hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
+ hw_state->pll3 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 3));
hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
- hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
+ hw_state->pll6 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 6));
hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
PORT_PLL_INT_COEFF_MASK |
PORT_PLL_GAIN_CTL_MASK;
- hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
+ hw_state->pll8 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 8));
hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
- hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
+ hw_state->pll9 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 9));
hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
- hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
+ hw_state->pll10 = intel_de_read(display, BXT_PORT_PLL(phy, ch, 10));
hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
PORT_PLL_DCO_AMP_MASK;
@@ -2221,20 +2222,20 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
* can read only lane registers. We configure all lanes the same way, so
* here just read out lanes 0/1 and output a note if lanes 2/3 differ.
*/
- hw_state->pcsdw12 = intel_de_read(i915,
+ hw_state->pcsdw12 = intel_de_read(display,
BXT_PORT_PCS_DW12_LN01(phy, ch));
- if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
- drm_dbg(&i915->drm,
+ if (intel_de_read(display, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
+ drm_dbg(display->drm,
"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
hw_state->pcsdw12,
- intel_de_read(i915,
+ intel_de_read(display,
BXT_PORT_PCS_DW12_LN23(phy, ch)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
ret = true;
out:
- intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -2255,7 +2256,7 @@ static int
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
struct dpll *clk_div)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/* Calculate HDMI div */
/*
@@ -2265,7 +2266,7 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
if (!bxt_find_best_dpll(crtc_state, clk_div))
return -EINVAL;
- drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
+ drm_WARN_ON(display->drm, clk_div->m1 != 2);
return 0;
}
@@ -2273,7 +2274,7 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
struct dpll *clk_div)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
int i;
*clk_div = bxt_dp_clk_val[0];
@@ -2284,16 +2285,16 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
}
}
- chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
+ chv_calc_dpll_params(display->dpll.ref_clks.nssc, clk_div);
- drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
+ drm_WARN_ON(display->drm, clk_div->vco == 0 ||
clk_div->dot != crtc_state->port_clock);
}
static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
const struct dpll *clk_div)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
int clock = crtc_state->port_clock;
int vco = clk_div->vco;
@@ -2317,7 +2318,7 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
gain_ctl = 1;
targ_cnt = 9;
} else {
- drm_err(&i915->drm, "Invalid VCO\n");
+ drm_err(display->drm, "Invalid VCO\n");
return -EINVAL;
}
@@ -2358,7 +2359,7 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
return 0;
}
-static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
+static int bxt_ddi_pll_get_freq(struct intel_display *display,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -2374,7 +2375,7 @@ static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
- return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
+ return chv_calc_dpll_params(display->dpll.ref_clks.nssc, &clock);
}
static int
@@ -2390,7 +2391,7 @@ bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
static int
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clk_div = {};
int ret;
@@ -2400,7 +2401,7 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
- crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
+ crtc_state->port_clock = bxt_ddi_pll_get_freq(display, NULL,
&crtc_state->dpll_hw_state);
return 0;
@@ -2425,17 +2426,17 @@ static int bxt_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id id;
/* 1:1 mapping between ports and PLLs */
id = (enum intel_dpll_id) encoder->port;
- pll = intel_get_shared_dpll_by_id(i915, id);
+ pll = intel_get_shared_dpll_by_id(display, id);
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
intel_reference_shared_dpll(state, crtc,
@@ -2446,10 +2447,10 @@ static int bxt_get_dpll(struct intel_atomic_state *state,
return 0;
}
-static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
+static void bxt_update_dpll_ref_clks(struct intel_display *display)
{
- i915->display.dpll.ref_clks.ssc = 100000;
- i915->display.dpll.ref_clks.nssc = 100000;
+ display->dpll.ref_clks.ssc = 100000;
+ display->dpll.ref_clks.nssc = 100000;
/* DSI non-SSC ref 19.2MHz */
}
@@ -2601,12 +2602,14 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
* Program half of the nominal DCO divider fraction value.
*/
static bool
-ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
+ehl_combo_pll_div_frac_wa_needed(struct intel_display *display)
{
- return ((IS_ELKHARTLAKE(i915) &&
- IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
- IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
- i915->display.dpll.ref_clks.nssc == 38400;
+ return ((display->platform.elkhartlake &&
+ IS_DISPLAY_STEP(display, STEP_B0, STEP_FOREVER)) ||
+ display->platform.tigerlake ||
+ display->platform.alderlake_s ||
+ display->platform.alderlake_p) &&
+ display->dpll.ref_clks.nssc == 38400;
}
struct icl_combo_pll_params {
@@ -2698,9 +2701,9 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct icl_combo_pll_params *params =
- i915->display.dpll.ref_clks.nssc == 24000 ?
+ display->dpll.ref_clks.nssc == 24000 ?
icl_dp_combo_pll_24MHz_values :
icl_dp_combo_pll_19_2MHz_values;
int clock = crtc_state->port_clock;
@@ -2720,12 +2723,12 @@ static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(i915) >= 12) {
- switch (i915->display.dpll.ref_clks.nssc) {
+ if (DISPLAY_VER(display) >= 12) {
+ switch (display->dpll.ref_clks.nssc) {
default:
- MISSING_CASE(i915->display.dpll.ref_clks.nssc);
+ MISSING_CASE(display->dpll.ref_clks.nssc);
fallthrough;
case 19200:
case 38400:
@@ -2736,9 +2739,9 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
break;
}
} else {
- switch (i915->display.dpll.ref_clks.nssc) {
+ switch (display->dpll.ref_clks.nssc) {
default:
- MISSING_CASE(i915->display.dpll.ref_clks.nssc);
+ MISSING_CASE(display->dpll.ref_clks.nssc);
fallthrough;
case 19200:
case 38400:
@@ -2753,7 +2756,7 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
return 0;
}
-static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
+static int icl_ddi_tbt_pll_get_freq(struct intel_display *display,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -2761,14 +2764,14 @@ static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
* The PLL outputs multiple frequencies at the same time, selection is
* made at DDI clock mux level.
*/
- drm_WARN_ON(&i915->drm, 1);
+ drm_WARN_ON(display->drm, 1);
return 0;
}
-static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
+static int icl_wrpll_ref_clock(struct intel_display *display)
{
- int ref_clock = i915->display.dpll.ref_clks.nssc;
+ int ref_clock = display->dpll.ref_clks.nssc;
/*
* For ICL+, the spec states: if reference frequency is 38.4,
@@ -2784,8 +2787,8 @@ static int
icl_calc_wrpll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *wrpll_params)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- int ref_clock = icl_wrpll_ref_clock(i915);
+ struct intel_display *display = to_intel_display(crtc_state);
+ int ref_clock = icl_wrpll_ref_clock(display);
u32 afe_clock = crtc_state->port_clock * 5;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
@@ -2824,12 +2827,12 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state,
return 0;
}
-static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
+static int icl_ddi_combo_pll_get_freq(struct intel_display *display,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
- int ref_clock = icl_wrpll_ref_clock(i915);
+ int ref_clock = icl_wrpll_ref_clock(display);
u32 dco_fraction;
u32 p0, p1, p2, dco_freq;
@@ -2875,25 +2878,25 @@ static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
DPLL_CFGCR0_DCO_FRACTION_SHIFT;
- if (ehl_combo_pll_div_frac_wa_needed(i915))
+ if (ehl_combo_pll_div_frac_wa_needed(display))
dco_fraction *= 2;
dco_freq += (dco_fraction * ref_clock) / 0x8000;
- if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
+ if (drm_WARN_ON(display->drm, p0 == 0 || p1 == 0 || p2 == 0))
return 0;
return dco_freq / (p0 * p1 * p2 * 5);
}
-static void icl_calc_dpll_state(struct drm_i915_private *i915,
+static void icl_calc_dpll_state(struct intel_display *display,
const struct skl_wrpll_params *pll_params,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
u32 dco_fraction = pll_params->dco_fraction;
- if (ehl_combo_pll_div_frac_wa_needed(i915))
+ if (ehl_combo_pll_div_frac_wa_needed(display))
dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
@@ -2904,13 +2907,13 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
DPLL_CFGCR1_KDIV(pll_params->kdiv) |
DPLL_CFGCR1_PDIV(pll_params->pdiv);
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
else
hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
- if (i915->display.vbt.override_afc_startup)
- hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
+ if (display->vbt.override_afc_startup)
+ hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(display->vbt.override_afc_startup_val);
}
static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
@@ -2996,9 +2999,9 @@ static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *dpll_hw_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
- int refclk_khz = i915->display.dpll.ref_clks.nssc;
+ int refclk_khz = display->dpll.ref_clks.nssc;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
u32 iref_ndiv, iref_trim, iref_pulse_w;
@@ -3008,7 +3011,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
u64 tmp;
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
- bool is_dkl = DISPLAY_VER(i915) >= 12;
+ bool is_dkl = DISPLAY_VER(display) >= 12;
int ret;
ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
@@ -3106,8 +3109,8 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
DKL_PLL_DIV0_FBPREDIV(m1div) |
DKL_PLL_DIV0_FBDIV_INT(m2div_int);
- if (i915->display.vbt.override_afc_startup) {
- u8 val = i915->display.vbt.override_afc_startup_val;
+ if (display->vbt.override_afc_startup) {
+ u8 val = display->vbt.override_afc_startup_val;
hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
}
@@ -3197,7 +3200,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
return 0;
}
-static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
+static int icl_ddi_mg_pll_get_freq(struct intel_display *display,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -3205,9 +3208,9 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
u64 tmp;
- ref_clock = i915->display.dpll.ref_clks.nssc;
+ ref_clock = display->dpll.ref_clks.nssc;
- if (DISPLAY_VER(i915) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
@@ -3312,7 +3315,7 @@ static void icl_update_active_dpll(struct intel_atomic_state *state,
static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll =
@@ -3329,12 +3332,12 @@ static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
if (ret)
return ret;
- icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
+ icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
/* this is mainly for the fastset check */
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
- crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
+ crtc_state->port_clock = icl_ddi_combo_pll_get_freq(display, NULL,
&port_dpll->hw_state);
return 0;
@@ -3345,7 +3348,6 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll =
@@ -3353,13 +3355,13 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
enum port port = encoder->port;
unsigned long dpll_mask;
- if (IS_ALDERLAKE_S(i915)) {
+ if (display->platform.alderlake_s) {
dpll_mask =
BIT(DPLL_ID_DG1_DPLL3) |
BIT(DPLL_ID_DG1_DPLL2) |
BIT(DPLL_ID_ICL_DPLL1) |
BIT(DPLL_ID_ICL_DPLL0);
- } else if (IS_DG1(i915)) {
+ } else if (display->platform.dg1) {
if (port == PORT_D || port == PORT_E) {
dpll_mask =
BIT(DPLL_ID_DG1_DPLL2) |
@@ -3369,12 +3371,13 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
BIT(DPLL_ID_DG1_DPLL0) |
BIT(DPLL_ID_DG1_DPLL1);
}
- } else if (IS_ROCKETLAKE(i915)) {
+ } else if (display->platform.rocketlake) {
dpll_mask =
BIT(DPLL_ID_EHL_DPLL4) |
BIT(DPLL_ID_ICL_DPLL1) |
BIT(DPLL_ID_ICL_DPLL0);
- } else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ } else if ((display->platform.jasperlake ||
+ display->platform.elkhartlake) &&
port != PORT_A) {
dpll_mask =
BIT(DPLL_ID_EHL_DPLL4) |
@@ -3404,7 +3407,7 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
@@ -3419,7 +3422,7 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
if (ret)
return ret;
- icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
+ icl_calc_dpll_state(display, &pll_params, &port_dpll->hw_state);
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
@@ -3433,7 +3436,7 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
else
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
- crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
+ crtc_state->port_clock = icl_ddi_mg_pll_get_freq(display, NULL,
&port_dpll->hw_state);
return 0;
@@ -3537,7 +3540,7 @@ static void icl_put_dplls(struct intel_atomic_state *state,
}
}
-static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
+static bool mg_pll_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -3548,46 +3551,46 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
bool ret = false;
u32 val;
- i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
+ i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
- wakeref = intel_display_power_get_if_enabled(i915,
+ wakeref = intel_display_power_get_if_enabled(display,
POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
- val = intel_de_read(i915, enable_reg);
+ val = intel_de_read(display, enable_reg);
if (!(val & PLL_ENABLE))
goto out;
- hw_state->mg_refclkin_ctl = intel_de_read(i915,
+ hw_state->mg_refclkin_ctl = intel_de_read(display,
MG_REFCLKIN_CTL(tc_port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
hw_state->mg_clktop2_coreclkctl1 =
- intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
+ intel_de_read(display, MG_CLKTOP2_CORECLKCTL1(tc_port));
hw_state->mg_clktop2_coreclkctl1 &=
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
hw_state->mg_clktop2_hsclkctl =
- intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
+ intel_de_read(display, MG_CLKTOP2_HSCLKCTL(tc_port));
hw_state->mg_clktop2_hsclkctl &=
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
- hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
- hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
- hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
- hw_state->mg_pll_frac_lock = intel_de_read(i915,
+ hw_state->mg_pll_div0 = intel_de_read(display, MG_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div1 = intel_de_read(display, MG_PLL_DIV1(tc_port));
+ hw_state->mg_pll_lf = intel_de_read(display, MG_PLL_LF(tc_port));
+ hw_state->mg_pll_frac_lock = intel_de_read(display,
MG_PLL_FRAC_LOCK(tc_port));
- hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
+ hw_state->mg_pll_ssc = intel_de_read(display, MG_PLL_SSC(tc_port));
- hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
+ hw_state->mg_pll_bias = intel_de_read(display, MG_PLL_BIAS(tc_port));
hw_state->mg_pll_tdc_coldst_bias =
- intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
- if (i915->display.dpll.ref_clks.nssc == 38400) {
+ if (display->dpll.ref_clks.nssc == 38400) {
hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
hw_state->mg_pll_bias_mask = 0;
} else {
@@ -3600,11 +3603,11 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
ret = true;
out:
- intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
-static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
+static bool dkl_pll_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
@@ -3615,12 +3618,12 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
bool ret = false;
u32 val;
- wakeref = intel_display_power_get_if_enabled(i915,
+ wakeref = intel_display_power_get_if_enabled(display,
POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
- val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
+ val = intel_de_read(display, intel_tc_pll_enable_reg(display, pll));
if (!(val & PLL_ENABLE))
goto out;
@@ -3628,12 +3631,12 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
* All registers read here have the same HIP_INDEX_REG even though
* they are on different building blocks
*/
- hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
+ hw_state->mg_refclkin_ctl = intel_dkl_phy_read(display,
DKL_REFCLKIN_CTL(tc_port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
hw_state->mg_clktop2_hsclkctl =
- intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
+ intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
hw_state->mg_clktop2_hsclkctl &=
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
@@ -3641,42 +3644,42 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
hw_state->mg_clktop2_coreclkctl1 =
- intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
hw_state->mg_clktop2_coreclkctl1 &=
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
- hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div0 = intel_dkl_phy_read(display, DKL_PLL_DIV0(tc_port));
val = DKL_PLL_DIV0_MASK;
- if (i915->display.vbt.override_afc_startup)
+ if (display->vbt.override_afc_startup)
val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
hw_state->mg_pll_div0 &= val;
- hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
+ hw_state->mg_pll_div1 = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
- hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
+ hw_state->mg_pll_ssc = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN);
- hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
+ hw_state->mg_pll_bias = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK);
hw_state->mg_pll_tdc_coldst_bias =
- intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
ret = true;
out:
- intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
-static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
+static bool icl_pll_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state,
i915_reg_t enable_reg)
@@ -3687,94 +3690,94 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
bool ret = false;
u32 val;
- wakeref = intel_display_power_get_if_enabled(i915,
+ wakeref = intel_display_power_get_if_enabled(display,
POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
- val = intel_de_read(i915, enable_reg);
+ val = intel_de_read(display, enable_reg);
if (!(val & PLL_ENABLE))
goto out;
- if (IS_ALDERLAKE_S(i915)) {
- hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
- } else if (IS_DG1(i915)) {
- hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
- } else if (IS_ROCKETLAKE(i915)) {
- hw_state->cfgcr0 = intel_de_read(i915,
+ if (display->platform.alderlake_s) {
+ hw_state->cfgcr0 = intel_de_read(display, ADLS_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(display, ADLS_DPLL_CFGCR1(id));
+ } else if (display->platform.dg1) {
+ hw_state->cfgcr0 = intel_de_read(display, DG1_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(display, DG1_DPLL_CFGCR1(id));
+ } else if (display->platform.rocketlake) {
+ hw_state->cfgcr0 = intel_de_read(display,
RKL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = intel_de_read(i915,
+ hw_state->cfgcr1 = intel_de_read(display,
RKL_DPLL_CFGCR1(id));
- } else if (DISPLAY_VER(i915) >= 12) {
- hw_state->cfgcr0 = intel_de_read(i915,
+ } else if (DISPLAY_VER(display) >= 12) {
+ hw_state->cfgcr0 = intel_de_read(display,
TGL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = intel_de_read(i915,
+ hw_state->cfgcr1 = intel_de_read(display,
TGL_DPLL_CFGCR1(id));
- if (i915->display.vbt.override_afc_startup) {
- hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
+ if (display->vbt.override_afc_startup) {
+ hw_state->div0 = intel_de_read(display, TGL_DPLL0_DIV0(id));
hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
}
} else {
- if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ if ((display->platform.jasperlake || display->platform.elkhartlake) &&
id == DPLL_ID_EHL_DPLL4) {
- hw_state->cfgcr0 = intel_de_read(i915,
+ hw_state->cfgcr0 = intel_de_read(display,
ICL_DPLL_CFGCR0(4));
- hw_state->cfgcr1 = intel_de_read(i915,
+ hw_state->cfgcr1 = intel_de_read(display,
ICL_DPLL_CFGCR1(4));
} else {
- hw_state->cfgcr0 = intel_de_read(i915,
+ hw_state->cfgcr0 = intel_de_read(display,
ICL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = intel_de_read(i915,
+ hw_state->cfgcr1 = intel_de_read(display,
ICL_DPLL_CFGCR1(id));
}
}
ret = true;
out:
- intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
-static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
+static bool combo_pll_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
- i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
+ i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
- return icl_pll_get_hw_state(i915, pll, dpll_hw_state, enable_reg);
+ return icl_pll_get_hw_state(display, pll, dpll_hw_state, enable_reg);
}
-static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
+static bool tbt_pll_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
- return icl_pll_get_hw_state(i915, pll, dpll_hw_state, TBT_PLL_ENABLE);
+ return icl_pll_get_hw_state(display, pll, dpll_hw_state, TBT_PLL_ENABLE);
}
-static void icl_dpll_write(struct drm_i915_private *i915,
+static void icl_dpll_write(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct icl_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
- if (IS_ALDERLAKE_S(i915)) {
+ if (display->platform.alderlake_s) {
cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
- } else if (IS_DG1(i915)) {
+ } else if (display->platform.dg1) {
cfgcr0_reg = DG1_DPLL_CFGCR0(id);
cfgcr1_reg = DG1_DPLL_CFGCR1(id);
- } else if (IS_ROCKETLAKE(i915)) {
+ } else if (display->platform.rocketlake) {
cfgcr0_reg = RKL_DPLL_CFGCR0(id);
cfgcr1_reg = RKL_DPLL_CFGCR1(id);
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
cfgcr0_reg = TGL_DPLL_CFGCR0(id);
cfgcr1_reg = TGL_DPLL_CFGCR1(id);
div0_reg = TGL_DPLL0_DIV0(id);
} else {
- if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ if ((display->platform.jasperlake || display->platform.elkhartlake) &&
id == DPLL_ID_EHL_DPLL4) {
cfgcr0_reg = ICL_DPLL_CFGCR0(4);
cfgcr1_reg = ICL_DPLL_CFGCR1(4);
@@ -3784,18 +3787,18 @@ static void icl_dpll_write(struct drm_i915_private *i915,
}
}
- intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
- intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
- drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
+ intel_de_write(display, cfgcr0_reg, hw_state->cfgcr0);
+ intel_de_write(display, cfgcr1_reg, hw_state->cfgcr1);
+ drm_WARN_ON_ONCE(display->drm, display->vbt.override_afc_startup &&
!i915_mmio_reg_valid(div0_reg));
- if (i915->display.vbt.override_afc_startup &&
+ if (display->vbt.override_afc_startup &&
i915_mmio_reg_valid(div0_reg))
- intel_de_rmw(i915, div0_reg,
+ intel_de_rmw(display, div0_reg,
TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
- intel_de_posting_read(i915, cfgcr1_reg);
+ intel_de_posting_read(display, cfgcr1_reg);
}
-static void icl_mg_pll_write(struct drm_i915_private *i915,
+static void icl_mg_pll_write(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct icl_dpll_hw_state *hw_state)
{
@@ -3807,38 +3810,38 @@ static void icl_mg_pll_write(struct drm_i915_private *i915,
* during the calc/readout phase if the mask depends on some other HW
* state like refclk, see icl_calc_mg_pll_state().
*/
- intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
+ intel_de_rmw(display, MG_REFCLKIN_CTL(tc_port),
MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
- intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
+ intel_de_rmw(display, MG_CLKTOP2_CORECLKCTL1(tc_port),
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
hw_state->mg_clktop2_coreclkctl1);
- intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
+ intel_de_rmw(display, MG_CLKTOP2_HSCLKCTL(tc_port),
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
hw_state->mg_clktop2_hsclkctl);
- intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
- intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
- intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
- intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
+ intel_de_write(display, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
+ intel_de_write(display, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
+ intel_de_write(display, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
+ intel_de_write(display, MG_PLL_FRAC_LOCK(tc_port),
hw_state->mg_pll_frac_lock);
- intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
+ intel_de_write(display, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
- intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
+ intel_de_rmw(display, MG_PLL_BIAS(tc_port),
hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
- intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
+ intel_de_rmw(display, MG_PLL_TDC_COLDST_BIAS(tc_port),
hw_state->mg_pll_tdc_coldst_bias_mask,
hw_state->mg_pll_tdc_coldst_bias);
- intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_posting_read(display, MG_PLL_TDC_COLDST_BIAS(tc_port));
}
-static void dkl_pll_write(struct drm_i915_private *i915,
+static void dkl_pll_write(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct icl_dpll_hw_state *hw_state)
{
@@ -3850,90 +3853,90 @@ static void dkl_pll_write(struct drm_i915_private *i915,
* though on different building block
*/
/* All the registers are RMW */
- val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
+ val = intel_dkl_phy_read(display, DKL_REFCLKIN_CTL(tc_port));
val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
val |= hw_state->mg_refclkin_ctl;
- intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
+ intel_dkl_phy_write(display, DKL_REFCLKIN_CTL(tc_port), val);
- val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ val = intel_dkl_phy_read(display, DKL_CLKTOP2_CORECLKCTL1(tc_port));
val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
val |= hw_state->mg_clktop2_coreclkctl1;
- intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
+ intel_dkl_phy_write(display, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
- val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
+ val = intel_dkl_phy_read(display, DKL_CLKTOP2_HSCLKCTL(tc_port));
val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
val |= hw_state->mg_clktop2_hsclkctl;
- intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
+ intel_dkl_phy_write(display, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
val = DKL_PLL_DIV0_MASK;
- if (i915->display.vbt.override_afc_startup)
+ if (display->vbt.override_afc_startup)
val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
- intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
+ intel_dkl_phy_rmw(display, DKL_PLL_DIV0(tc_port), val,
hw_state->mg_pll_div0);
- val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
+ val = intel_dkl_phy_read(display, DKL_PLL_DIV1(tc_port));
val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
val |= hw_state->mg_pll_div1;
- intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
+ intel_dkl_phy_write(display, DKL_PLL_DIV1(tc_port), val);
- val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
+ val = intel_dkl_phy_read(display, DKL_PLL_SSC(tc_port));
val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN);
val |= hw_state->mg_pll_ssc;
- intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
+ intel_dkl_phy_write(display, DKL_PLL_SSC(tc_port), val);
- val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
+ val = intel_dkl_phy_read(display, DKL_PLL_BIAS(tc_port));
val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK);
val |= hw_state->mg_pll_bias;
- intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
+ intel_dkl_phy_write(display, DKL_PLL_BIAS(tc_port), val);
- val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ val = intel_dkl_phy_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
val |= hw_state->mg_pll_tdc_coldst_bias;
- intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
+ intel_dkl_phy_write(display, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
- intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_dkl_phy_posting_read(display, DKL_PLL_TDC_COLDST_BIAS(tc_port));
}
-static void icl_pll_power_enable(struct drm_i915_private *i915,
+static void icl_pll_power_enable(struct intel_display *display,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
+ intel_de_rmw(display, enable_reg, 0, PLL_POWER_ENABLE);
/*
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
- drm_err(&i915->drm, "PLL %d Power not enabled\n",
+ if (intel_de_wait_for_set(display, enable_reg, PLL_POWER_STATE, 1))
+ drm_err(display->drm, "PLL %d Power not enabled\n",
pll->info->id);
}
-static void icl_pll_enable(struct drm_i915_private *i915,
+static void icl_pll_enable(struct intel_display *display,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
+ intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
/* Timeout is actually 600us. */
- if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
- drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
+ if (intel_de_wait_for_set(display, enable_reg, PLL_LOCK, 1))
+ drm_err(display->drm, "PLL %d not locked\n", pll->info->id);
}
-static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
+static void adlp_cmtg_clock_gating_wa(struct intel_display *display, struct intel_shared_dpll *pll)
{
u32 val;
- if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
+ if (!(display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) ||
pll->info->id != DPLL_ID_ICL_DPLL0)
return;
/*
@@ -3947,22 +3950,22 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte
* Instead of the usual place for workarounds we apply this one here,
* since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
*/
- val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
- val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
- if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
- drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
+ val = intel_de_read(display, TRANS_CMTG_CHICKEN);
+ val = intel_de_rmw(display, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
+ if (drm_WARN_ON(display->drm, val & ~DISABLE_DPT_CLK_GATING))
+ drm_dbg_kms(display->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
}
-static void combo_pll_enable(struct drm_i915_private *i915,
+static void combo_pll_enable(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
- i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
+ i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
- icl_pll_power_enable(i915, pll, enable_reg);
+ icl_pll_power_enable(display, pll, enable_reg);
- icl_dpll_write(i915, pll, hw_state);
+ icl_dpll_write(display, pll, hw_state);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3970,22 +3973,22 @@ static void combo_pll_enable(struct drm_i915_private *i915,
* nothing here.
*/
- icl_pll_enable(i915, pll, enable_reg);
+ icl_pll_enable(display, pll, enable_reg);
- adlp_cmtg_clock_gating_wa(i915, pll);
+ adlp_cmtg_clock_gating_wa(display, pll);
/* DVFS post sequence would be here. See the comment above. */
}
-static void tbt_pll_enable(struct drm_i915_private *i915,
+static void tbt_pll_enable(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
- icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
+ icl_pll_power_enable(display, pll, TBT_PLL_ENABLE);
- icl_dpll_write(i915, pll, hw_state);
+ icl_dpll_write(display, pll, hw_state);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3993,24 +3996,24 @@ static void tbt_pll_enable(struct drm_i915_private *i915,
* nothing here.
*/
- icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
+ icl_pll_enable(display, pll, TBT_PLL_ENABLE);
/* DVFS post sequence would be here. See the comment above. */
}
-static void mg_pll_enable(struct drm_i915_private *i915,
+static void mg_pll_enable(struct intel_display *display,
struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
- i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
+ i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
- icl_pll_power_enable(i915, pll, enable_reg);
+ icl_pll_power_enable(display, pll, enable_reg);
- if (DISPLAY_VER(i915) >= 12)
- dkl_pll_write(i915, pll, hw_state);
+ if (DISPLAY_VER(display) >= 12)
+ dkl_pll_write(display, pll, hw_state);
else
- icl_mg_pll_write(i915, pll, hw_state);
+ icl_mg_pll_write(display, pll, hw_state);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -4018,12 +4021,12 @@ static void mg_pll_enable(struct drm_i915_private *i915,
* nothing here.
*/
- icl_pll_enable(i915, pll, enable_reg);
+ icl_pll_enable(display, pll, enable_reg);
/* DVFS post sequence would be here. See the comment above. */
}
-static void icl_pll_disable(struct drm_i915_private *i915,
+static void icl_pll_disable(struct intel_display *display,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
@@ -4035,51 +4038,51 @@ static void icl_pll_disable(struct drm_i915_private *i915,
* nothing here.
*/
- intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
+ intel_de_rmw(display, enable_reg, PLL_ENABLE, 0);
/* Timeout is actually 1us. */
- if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
- drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
+ if (intel_de_wait_for_clear(display, enable_reg, PLL_LOCK, 1))
+ drm_err(display->drm, "PLL %d locked\n", pll->info->id);
/* DVFS post sequence would be here. See the comment above. */
- intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
+ intel_de_rmw(display, enable_reg, PLL_POWER_ENABLE, 0);
/*
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
- drm_err(&i915->drm, "PLL %d Power not disabled\n",
+ if (intel_de_wait_for_clear(display, enable_reg, PLL_POWER_STATE, 1))
+ drm_err(display->drm, "PLL %d Power not disabled\n",
pll->info->id);
}
-static void combo_pll_disable(struct drm_i915_private *i915,
+static void combo_pll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
- i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
+ i915_reg_t enable_reg = intel_combo_pll_enable_reg(display, pll);
- icl_pll_disable(i915, pll, enable_reg);
+ icl_pll_disable(display, pll, enable_reg);
}
-static void tbt_pll_disable(struct drm_i915_private *i915,
+static void tbt_pll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
- icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
+ icl_pll_disable(display, pll, TBT_PLL_ENABLE);
}
-static void mg_pll_disable(struct drm_i915_private *i915,
+static void mg_pll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
- i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
+ i915_reg_t enable_reg = intel_tc_pll_enable_reg(display, pll);
- icl_pll_disable(i915, pll, enable_reg);
+ icl_pll_disable(display, pll, enable_reg);
}
-static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
+static void icl_update_dpll_ref_clks(struct intel_display *display)
{
/* No SSC ref */
- i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
+ display->dpll.ref_clks.nssc = display->cdclk.hw.ref;
}
static void icl_dump_hw_state(struct drm_printer *p,
@@ -4300,40 +4303,41 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
/**
* intel_shared_dpll_init - Initialize shared DPLLs
- * @i915: i915 device
+ * @display: intel_display device
*
- * Initialize shared DPLLs for @i915.
+ * Initialize shared DPLLs for @display.
*/
-void intel_shared_dpll_init(struct drm_i915_private *i915)
+void intel_shared_dpll_init(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_dpll_mgr *dpll_mgr = NULL;
const struct dpll_info *dpll_info;
int i;
- mutex_init(&i915->display.dpll.lock);
+ mutex_init(&display->dpll.lock);
- if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
+ if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
/* No shared DPLLs on DG2; port PLLs are part of the PHY */
dpll_mgr = NULL;
- else if (IS_ALDERLAKE_P(i915))
+ else if (display->platform.alderlake_p)
dpll_mgr = &adlp_pll_mgr;
- else if (IS_ALDERLAKE_S(i915))
+ else if (display->platform.alderlake_s)
dpll_mgr = &adls_pll_mgr;
- else if (IS_DG1(i915))
+ else if (display->platform.dg1)
dpll_mgr = &dg1_pll_mgr;
- else if (IS_ROCKETLAKE(i915))
+ else if (display->platform.rocketlake)
dpll_mgr = &rkl_pll_mgr;
- else if (DISPLAY_VER(i915) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
dpll_mgr = &tgl_pll_mgr;
- else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
dpll_mgr = &ehl_pll_mgr;
- else if (DISPLAY_VER(i915) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
dpll_mgr = &icl_pll_mgr;
- else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ else if (display->platform.geminilake || display->platform.broxton)
dpll_mgr = &bxt_pll_mgr;
- else if (DISPLAY_VER(i915) == 9)
+ else if (DISPLAY_VER(display) == 9)
dpll_mgr = &skl_pll_mgr;
- else if (HAS_DDI(i915))
+ else if (HAS_DDI(display))
dpll_mgr = &hsw_pll_mgr;
else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
dpll_mgr = &pch_pll_mgr;
@@ -4344,20 +4348,20 @@ void intel_shared_dpll_init(struct drm_i915_private *i915)
dpll_info = dpll_mgr->dpll_info;
for (i = 0; dpll_info[i].name; i++) {
- if (drm_WARN_ON(&i915->drm,
- i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
+ if (drm_WARN_ON(display->drm,
+ i >= ARRAY_SIZE(display->dpll.shared_dplls)))
break;
/* must fit into unsigned long bitmask on 32bit */
- if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
+ if (drm_WARN_ON(display->drm, dpll_info[i].id >= 32))
break;
- i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
- i915->display.dpll.shared_dplls[i].index = i;
+ display->dpll.shared_dplls[i].info = &dpll_info[i];
+ display->dpll.shared_dplls[i].index = i;
}
- i915->display.dpll.mgr = dpll_mgr;
- i915->display.dpll.num_shared_dpll = i;
+ display->dpll.mgr = dpll_mgr;
+ display->dpll.num_shared_dpll = i;
}
/**
@@ -4372,16 +4376,16 @@ void intel_shared_dpll_init(struct drm_i915_private *i915)
* calling intel_shared_dpll_swap_state().
*
* Returns:
- * 0 on success, negative error code on falure.
+ * 0 on success, negative error code on failure.
*/
int intel_compute_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
- if (drm_WARN_ON(&i915->drm, !dpll_mgr))
+ if (drm_WARN_ON(display->drm, !dpll_mgr))
return -EINVAL;
return dpll_mgr->compute_dplls(state, crtc, encoder);
@@ -4411,10 +4415,10 @@ int intel_reserve_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
- if (drm_WARN_ON(&i915->drm, !dpll_mgr))
+ if (drm_WARN_ON(display->drm, !dpll_mgr))
return -EINVAL;
return dpll_mgr->get_dplls(state, crtc, encoder);
@@ -4434,8 +4438,8 @@ int intel_reserve_shared_dplls(struct intel_atomic_state *state,
void intel_release_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
/*
* FIXME: this function is called for every platform having a
@@ -4463,10 +4467,10 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_dpll_mgr *dpll_mgr = display->dpll.mgr;
- if (drm_WARN_ON(&i915->drm, !dpll_mgr))
+ if (drm_WARN_ON(display->drm, !dpll_mgr))
return;
dpll_mgr->update_active_dpll(state, crtc, encoder);
@@ -4474,49 +4478,49 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
/**
* intel_dpll_get_freq - calculate the DPLL's output frequency
- * @i915: i915 device
+ * @display: intel_display device
* @pll: DPLL for which to calculate the output frequency
* @dpll_hw_state: DPLL state from which to calculate the output frequency
*
* Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
*/
-int intel_dpll_get_freq(struct drm_i915_private *i915,
+int intel_dpll_get_freq(struct intel_display *display,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
- if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
+ if (drm_WARN_ON(display->drm, !pll->info->funcs->get_freq))
return 0;
- return pll->info->funcs->get_freq(i915, pll, dpll_hw_state);
+ return pll->info->funcs->get_freq(display, pll, dpll_hw_state);
}
/**
* intel_dpll_get_hw_state - readout the DPLL's hardware state
- * @i915: i915 device
+ * @display: intel_display device instance
* @pll: DPLL for which to calculate the output frequency
* @dpll_hw_state: DPLL's hardware state
*
* Read out @pll's hardware state into @dpll_hw_state.
*/
-bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
+bool intel_dpll_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
- return pll->info->funcs->get_hw_state(i915, pll, dpll_hw_state);
+ return pll->info->funcs->get_hw_state(display, pll, dpll_hw_state);
}
-static void readout_dpll_hw_state(struct drm_i915_private *i915,
+static void readout_dpll_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll)
{
struct intel_crtc *crtc;
- pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
+ pll->on = intel_dpll_get_hw_state(display, pll, &pll->state.hw_state);
if (pll->on && pll->info->power_domain)
- pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
+ pll->wakeref = intel_display_power_get(display, pll->info->power_domain);
pll->state.pipe_mask = 0;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -4525,67 +4529,69 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
}
pll->active_mask = pll->state.pipe_mask;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"%s hw state readout: pipe_mask 0x%x, on %i\n",
pll->info->name, pll->state.pipe_mask, pll->on);
}
-void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
+void intel_dpll_update_ref_clks(struct intel_display *display)
{
- if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
- i915->display.dpll.mgr->update_ref_clks(i915);
+ if (display->dpll.mgr && display->dpll.mgr->update_ref_clks)
+ display->dpll.mgr->update_ref_clks(display);
}
-void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
+void intel_dpll_readout_hw_state(struct intel_display *display)
{
struct intel_shared_dpll *pll;
int i;
- for_each_shared_dpll(i915, pll, i)
- readout_dpll_hw_state(i915, pll);
+ for_each_shared_dpll(display, pll, i)
+ readout_dpll_hw_state(display, pll);
}
-static void sanitize_dpll_state(struct drm_i915_private *i915,
+static void sanitize_dpll_state(struct intel_display *display,
struct intel_shared_dpll *pll)
{
if (!pll->on)
return;
- adlp_cmtg_clock_gating_wa(i915, pll);
+ adlp_cmtg_clock_gating_wa(display, pll);
if (pll->active_mask)
return;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"%s enabled but not in use, disabling\n",
pll->info->name);
- _intel_disable_shared_dpll(i915, pll);
+ _intel_disable_shared_dpll(display, pll);
}
-void intel_dpll_sanitize_state(struct drm_i915_private *i915)
+void intel_dpll_sanitize_state(struct intel_display *display)
{
struct intel_shared_dpll *pll;
int i;
- for_each_shared_dpll(i915, pll, i)
- sanitize_dpll_state(i915, pll);
+ intel_cx0_pll_power_save_wa(display);
+
+ for_each_shared_dpll(display, pll, i)
+ sanitize_dpll_state(display, pll);
}
/**
* intel_dpll_dump_hw_state - dump hw_state
- * @i915: i915 drm device
+ * @display: intel_display structure
* @p: where to print the state to
* @dpll_hw_state: hw state to be dumped
*
* Dumo out the relevant values in @dpll_hw_state.
*/
-void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
+void intel_dpll_dump_hw_state(struct intel_display *display,
struct drm_printer *p,
const struct intel_dpll_hw_state *dpll_hw_state)
{
- if (i915->display.dpll.mgr) {
- i915->display.dpll.mgr->dump_hw_state(p, dpll_hw_state);
+ if (display->dpll.mgr) {
+ display->dpll.mgr->dump_hw_state(p, dpll_hw_state);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
@@ -4596,7 +4602,7 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
/**
* intel_dpll_compare_hw_state - compare the two states
- * @i915: i915 drm device
+ * @display: intel_display structure
* @a: first DPLL hw state
* @b: second DPLL hw state
*
@@ -4604,12 +4610,12 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
*
* Returns: true if the states are equal, false if the differ
*/
-bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
+bool intel_dpll_compare_hw_state(struct intel_display *display,
const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b)
{
- if (i915->display.dpll.mgr) {
- return i915->display.dpll.mgr->compare_hw_state(a, b);
+ if (display->dpll.mgr) {
+ return display->dpll.mgr->compare_hw_state(a, b);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
@@ -4619,17 +4625,16 @@ bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
}
static void
-verify_single_dpll_state(struct drm_i915_private *i915,
+verify_single_dpll_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_crtc *crtc,
const struct intel_crtc_state *new_crtc_state)
{
- struct intel_display *display = &i915->display;
struct intel_dpll_hw_state dpll_hw_state = {};
u8 pipe_mask;
bool active;
- active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
+ active = intel_dpll_get_hw_state(display, pll, &dpll_hw_state);
if (!pll->info->always_on) {
INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask,
@@ -4685,14 +4690,13 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (new_crtc_state->shared_dpll)
- verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
+ verify_single_dpll_state(display, new_crtc_state->shared_dpll,
crtc, new_crtc_state);
if (old_crtc_state->shared_dpll &&
@@ -4715,10 +4719,10 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_shared_dpll *pll;
int i;
- for_each_shared_dpll(i915, pll, i)
- verify_single_dpll_state(i915, pll, NULL, NULL);
+ for_each_shared_dpll(display, pll, i)
+ verify_single_dpll_state(display, pll, NULL, NULL);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 6af325b8e27d..caffb084830c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -30,12 +30,11 @@
#include "intel_display_power.h"
#include "intel_wakeref.h"
-#define for_each_shared_dpll(__i915, __pll, __i) \
- for ((__i) = 0; (__i) < (__i915)->display.dpll.num_shared_dpll && \
- ((__pll) = &(__i915)->display.dpll.shared_dplls[(__i)]) ; (__i)++)
+#define for_each_shared_dpll(__display, __pll, __i) \
+ for ((__i) = 0; (__i) < (__display)->dpll.num_shared_dpll && \
+ ((__pll) = &(__display)->dpll.shared_dplls[(__i)]) ; (__i)++)
enum tc_port;
-struct drm_i915_private;
struct drm_printer;
struct intel_atomic_state;
struct intel_crtc;
@@ -318,7 +317,7 @@ struct dpll_info {
const struct intel_shared_dpll_funcs *funcs;
/**
- * @id: unique indentifier for this DPLL
+ * @id: unique identifier for this DPLL
*/
enum intel_dpll_id id;
@@ -390,9 +389,9 @@ struct intel_shared_dpll {
/* shared dpll functions */
struct intel_shared_dpll *
-intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
+intel_get_shared_dpll_by_id(struct intel_display *display,
enum intel_dpll_id id);
-void assert_shared_dpll(struct drm_i915_private *i915,
+void assert_shared_dpll(struct intel_display *display,
struct intel_shared_dpll *pll,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
@@ -413,24 +412,24 @@ void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
-int intel_dpll_get_freq(struct drm_i915_private *i915,
+int intel_dpll_get_freq(struct intel_display *display,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
-bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
+bool intel_dpll_get_hw_state(struct intel_display *display,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
-void intel_shared_dpll_init(struct drm_i915_private *i915);
-void intel_dpll_update_ref_clks(struct drm_i915_private *i915);
-void intel_dpll_readout_hw_state(struct drm_i915_private *i915);
-void intel_dpll_sanitize_state(struct drm_i915_private *i915);
+void intel_shared_dpll_init(struct intel_display *display);
+void intel_dpll_update_ref_clks(struct intel_display *display);
+void intel_dpll_readout_hw_state(struct intel_display *display);
+void intel_dpll_sanitize_state(struct intel_display *display);
-void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
+void intel_dpll_dump_hw_state(struct intel_display *display,
struct drm_printer *p,
const struct intel_dpll_hw_state *dpll_hw_state);
-bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
+bool intel_dpll_compare_hw_state(struct intel_display *display,
const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 8b1f0e92a11c..0d8ebe38226e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -125,6 +125,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
unsigned int alignment)
{
struct drm_i915_private *i915 = vm->i915;
+ struct intel_display *display = &i915->display;
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
intel_wakeref_t wakeref;
struct i915_vma *vma;
@@ -137,7 +138,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
pin_flags |= PIN_MAPPABLE;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- atomic_inc(&i915->gpu_error.pending_fb_pin);
+ atomic_inc(&display->restore.pending_fb_pin);
for_i915_gem_ww(&ww, err, true) {
err = i915_gem_object_lock(dpt->obj, &ww);
@@ -167,7 +168,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
dpt->obj->mm.dirty = true;
- atomic_dec(&i915->gpu_error.pending_fb_pin);
+ atomic_dec(&display->restore.pending_fb_pin);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return err ? ERR_PTR(err) : vma;
@@ -183,7 +184,7 @@ void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm)
/**
* intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
- * @i915: device instance
+ * @display: display device instance
*
* Restore the memory mapping during system resume for all framebuffers which
* are mapped to HW via a GGTT->DPT page table. The content of these page
@@ -193,26 +194,26 @@ void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm)
* This function must be called after the mappings in GGTT have been restored calling
* i915_ggtt_resume().
*/
-void intel_dpt_resume(struct drm_i915_private *i915)
+void intel_dpt_resume(struct intel_display *display)
{
struct drm_framebuffer *drm_fb;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- mutex_lock(&i915->drm.mode_config.fb_lock);
- drm_for_each_fb(drm_fb, &i915->drm) {
+ mutex_lock(&display->drm->mode_config.fb_lock);
+ drm_for_each_fb(drm_fb, display->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
i915_ggtt_resume_vm(fb->dpt_vm, true);
}
- mutex_unlock(&i915->drm.mode_config.fb_lock);
+ mutex_unlock(&display->drm->mode_config.fb_lock);
}
/**
* intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
- * @i915: device instance
+ * @display: display device instance
*
* Suspend the memory mapping during system suspend for all framebuffers which
* are mapped to HW via a GGTT->DPT page table.
@@ -220,23 +221,23 @@ void intel_dpt_resume(struct drm_i915_private *i915)
* This function must be called before the mappings in GGTT are suspended calling
* i915_ggtt_suspend().
*/
-void intel_dpt_suspend(struct drm_i915_private *i915)
+void intel_dpt_suspend(struct intel_display *display)
{
struct drm_framebuffer *drm_fb;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- mutex_lock(&i915->drm.mode_config.fb_lock);
+ mutex_lock(&display->drm->mode_config.fb_lock);
- drm_for_each_fb(drm_fb, &i915->drm) {
+ drm_for_each_fb(drm_fb, display->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
i915_ggtt_suspend_vm(fb->dpt_vm, true);
}
- mutex_unlock(&i915->drm.mode_config.fb_lock);
+ mutex_unlock(&display->drm->mode_config.fb_lock);
}
struct i915_address_space *
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
index 1f88b0ee17e7..db521401b828 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.h
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -8,18 +8,17 @@
#include <linux/types.h>
-struct drm_i915_private;
-
struct i915_address_space;
struct i915_vma;
+struct intel_display;
struct intel_framebuffer;
void intel_dpt_destroy(struct i915_address_space *vm);
struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
unsigned int alignment);
void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm);
-void intel_dpt_suspend(struct drm_i915_private *i915);
-void intel_dpt_resume(struct drm_i915_private *i915);
+void intel_dpt_suspend(struct intel_display *display);
+void intel_dpt_resume(struct intel_display *display);
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb);
u64 intel_dpt_offset(struct i915_vma *dpt_vma);
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 0fec01b79b23..05cd0f6e6d71 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -65,31 +65,29 @@ const char *intel_drrs_type_str(enum drrs_type drrs_type)
return str[drrs_type];
}
-bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+bool intel_cpu_transcoder_has_drrs(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- struct intel_display *display = &i915->display;
-
if (HAS_DOUBLE_BUFFERED_M_N(display))
return true;
- return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
+ return intel_cpu_transcoder_has_m2_n2(display, cpu_transcoder);
}
static void
intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
enum drrs_refresh_rate refresh_rate)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder;
u32 bit;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
bit = TRANSCONF_REFRESH_RATE_ALT_VLV;
else
bit = TRANSCONF_REFRESH_RATE_ALT_ILK;
- intel_de_rmw(dev_priv, TRANSCONF(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, TRANSCONF(display, cpu_transcoder),
bit, refresh_rate == DRRS_REFRESH_RATE_LOW ? bit : 0);
}
@@ -110,12 +108,12 @@ bool intel_drrs_is_active(struct intel_crtc *crtc)
static void intel_drrs_set_state(struct intel_crtc *crtc,
enum drrs_refresh_rate refresh_rate)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
if (refresh_rate == crtc->drrs.refresh_rate)
return;
- if (intel_cpu_transcoder_has_m2_n2(dev_priv, crtc->drrs.cpu_transcoder))
+ if (intel_cpu_transcoder_has_m2_n2(display, crtc->drrs.cpu_transcoder))
intel_drrs_set_refresh_rate_pipeconf(crtc, refresh_rate);
else
intel_drrs_set_refresh_rate_m_n(crtc, refresh_rate);
@@ -132,13 +130,13 @@ static void intel_drrs_schedule_work(struct intel_crtc *crtc)
static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
unsigned int frontbuffer_bits;
frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc,
crtc_state->joiner_pipes)
frontbuffer_bits |= INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
@@ -222,13 +220,13 @@ static void intel_drrs_downclock_work(struct work_struct *work)
mutex_unlock(&crtc->drrs.mutex);
}
-static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
+static void intel_drrs_frontbuffer_update(struct intel_display *display,
unsigned int all_frontbuffer_bits,
bool invalidate)
{
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
unsigned int frontbuffer_bits;
mutex_lock(&crtc->drrs.mutex);
@@ -262,7 +260,7 @@ static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
/**
* intel_drrs_invalidate - Disable Idleness DRRS
- * @dev_priv: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called everytime rendering on the given planes start.
@@ -270,15 +268,15 @@ static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
-void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
+void intel_drrs_invalidate(struct intel_display *display,
unsigned int frontbuffer_bits)
{
- intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, true);
+ intel_drrs_frontbuffer_update(display, frontbuffer_bits, true);
}
/**
* intel_drrs_flush - Restart Idleness DRRS
- * @dev_priv: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called every time rendering on the given planes has
@@ -288,10 +286,10 @@ void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
-void intel_drrs_flush(struct drm_i915_private *dev_priv,
+void intel_drrs_flush(struct intel_display *display,
unsigned int frontbuffer_bits)
{
- intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false);
+ intel_drrs_frontbuffer_update(display, frontbuffer_bits, false);
}
/**
@@ -312,7 +310,7 @@ void intel_drrs_crtc_init(struct intel_crtc *crtc)
static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_crtc *crtc = m->private;
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state;
int ret;
@@ -325,7 +323,7 @@ static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
mutex_lock(&crtc->drrs.mutex);
seq_printf(m, "DRRS capable: %s\n",
- str_yes_no(intel_cpu_transcoder_has_drrs(i915,
+ str_yes_no(intel_cpu_transcoder_has_drrs(display,
crtc_state->cpu_transcoder)));
seq_printf(m, "DRRS enabled: %s\n",
@@ -353,7 +351,7 @@ DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_status);
static int intel_drrs_debugfs_ctl_set(void *data, u64 val)
{
struct intel_crtc *crtc = data;
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state;
struct drm_crtc_commit *commit;
int ret;
@@ -375,8 +373,7 @@ static int intel_drrs_debugfs_ctl_set(void *data, u64 val)
goto out;
}
- drm_dbg(&i915->drm,
- "Manually %sactivating DRRS\n", val ? "" : "de");
+ drm_dbg_kms(display->drm, "Manually %sactivating DRRS\n", val ? "" : "de");
if (val)
intel_drrs_activate(crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h
index 0982f95eab72..32b45a93a68f 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.h
+++ b/drivers/gpu/drm/i915/display/intel_drrs.h
@@ -10,21 +10,21 @@
enum drrs_type;
enum transcoder;
-struct drm_i915_private;
struct intel_atomic_state;
+struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
-struct intel_connector;
+struct intel_display;
-bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+bool intel_cpu_transcoder_has_drrs(struct intel_display *display,
enum transcoder cpu_transcoder);
const char *intel_drrs_type_str(enum drrs_type drrs_type);
bool intel_drrs_is_active(struct intel_crtc *crtc);
void intel_drrs_activate(const struct intel_crtc_state *crtc_state);
void intel_drrs_deactivate(const struct intel_crtc_state *crtc_state);
-void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
+void intel_drrs_invalidate(struct intel_display *display,
unsigned int frontbuffer_bits);
-void intel_drrs_flush(struct drm_i915_private *dev_priv,
+void intel_drrs_flush(struct intel_display *display,
unsigned int frontbuffer_bits);
void intel_drrs_crtc_init(struct intel_crtc *crtc);
void intel_drrs_crtc_debugfs_add(struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index e6f8fc743fb4..9fc4003d1579 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -109,38 +109,32 @@ static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc);
}
-static const struct intel_crtc_state *
-pre_commit_crtc_state(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int dsb_vblank_delay(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- const struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
- /*
- * During fastsets/etc. the transcoder is still
- * running with the old timings at this point.
- */
- if (intel_crtc_needs_modeset(new_crtc_state))
- return new_crtc_state;
+ if (pre_commit_is_vrr_active(state, crtc))
+ /*
+ * When the push is sent during vblank it will trigger
+ * on the next scanline, hence we have up to one extra
+ * scanline until the delayed vblank occurs after
+ * TRANS_PUSH has been written.
+ */
+ return intel_vrr_vblank_delay(crtc_state) + 1;
else
- return old_crtc_state;
-}
-
-static int dsb_vblank_delay(const struct intel_crtc_state *crtc_state)
-{
- return intel_mode_vblank_start(&crtc_state->hw.adjusted_mode) -
- intel_mode_vdisplay(&crtc_state->hw.adjusted_mode);
+ return intel_mode_vblank_delay(&crtc_state->hw.adjusted_mode);
}
static int dsb_vtotal(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
if (pre_commit_is_vrr_active(state, crtc))
- return crtc_state->vrr.vmax;
+ return intel_vrr_vmax_vtotal(crtc_state);
else
return intel_mode_vtotal(&crtc_state->hw.adjusted_mode);
}
@@ -148,7 +142,8 @@ static int dsb_vtotal(struct intel_atomic_state *state,
static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
struct drm_i915_private *i915 = to_i915(state->base.dev);
unsigned int latency = skl_watermark_max_latency(i915, 0);
@@ -159,7 +154,8 @@ static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
static int dsb_dewake_scanline_end(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode);
}
@@ -167,23 +163,33 @@ static int dsb_dewake_scanline_end(struct intel_atomic_state *state,
static int dsb_scanline_to_hw(struct intel_atomic_state *state,
struct intel_crtc *crtc, int scanline)
{
- const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
int vtotal = dsb_vtotal(state, crtc);
return (scanline + vtotal - intel_crtc_scanline_offset(crtc_state)) % vtotal;
}
+/*
+ * Bspec suggests that we should always set DSB_SKIP_WAITS_EN. We have approach
+ * different from what is explained in Bspec on how flip is considered being
+ * complete. We are waiting for vblank in DSB and generate interrupt when it
+ * happens and this interrupt is considered as indication of completion -> we
+ * definitely do not want to skip vblank wait. We also have concern what comes
+ * to skipping vblank evasion. I.e. arming registers are latched before we have
+ * managed writing them. Due to these reasons we are not setting
+ * DSB_SKIP_WAITS_EN.
+ */
static u32 dsb_chicken(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
if (pre_commit_is_vrr_active(state, crtc))
- return DSB_SKIP_WAITS_EN |
- DSB_CTRL_WAIT_SAFE_WINDOW |
+ return DSB_CTRL_WAIT_SAFE_WINDOW |
DSB_CTRL_NO_WAIT_VBLANK |
DSB_INST_WAIT_SAFE_WINDOW |
DSB_INST_NO_WAIT_VBLANK;
else
- return DSB_SKIP_WAITS_EN;
+ return 0;
}
static bool assert_dsb_has_room(struct intel_dsb *dsb)
@@ -378,7 +384,8 @@ void intel_dsb_interrupt(struct intel_dsb *dsb)
void intel_dsb_wait_usec(struct intel_dsb *dsb, int count)
{
- intel_dsb_emit(dsb, count,
+ /* +1 to make sure we never wait less time than asked for */
+ intel_dsb_emit(dsb, count + 1,
DSB_OPCODE_WAIT_USEC << DSB_OPCODE_SHIFT);
}
@@ -461,6 +468,25 @@ void intel_dsb_wait_scanline_out(struct intel_atomic_state *state,
start, end);
}
+void intel_dsb_poll(struct intel_dsb *dsb,
+ i915_reg_t reg, u32 mask, u32 val,
+ int wait_us, int count)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ enum pipe pipe = crtc->pipe;
+
+ intel_dsb_reg_write(dsb, DSB_POLLMASK(pipe, dsb->id), mask);
+ intel_dsb_reg_write(dsb, DSB_POLLFUNC(pipe, dsb->id),
+ DSB_POLL_ENABLE |
+ DSB_POLL_WAIT(wait_us) | DSB_POLL_COUNT(count));
+
+ intel_dsb_noop(dsb, 5);
+
+ intel_dsb_emit(dsb, val,
+ (DSB_OPCODE_POLL << DSB_OPCODE_SHIFT) |
+ i915_mmio_reg_offset(reg));
+}
+
static void intel_dsb_align_tail(struct intel_dsb *dsb)
{
u32 aligned_tail, tail;
@@ -532,13 +558,27 @@ void intel_dsb_vblank_evade(struct intel_atomic_state *state,
struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
- const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
/* FIXME calibrate sensibly */
int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 20);
- int vblank_delay = dsb_vblank_delay(crtc_state);
int start, end;
+ /*
+ * PIPEDSL is reading as 0 when in SRDENT(PSR1) or DEEP_SLEEP(PSR2). On
+ * wake-up scanline counting starts from vblank_start - 1. We don't know
+ * if wake-up is already ongoing when evasion starts. In worst case
+ * PIPEDSL could start reading valid value right after checking the
+ * scanline. In this scenario we wouldn't have enough time to write all
+ * registers. To tackle this evade scanline 0 as well. As a drawback we
+ * have 1 frame delay in flip when waking up.
+ */
+ if (crtc_state->has_psr)
+ intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT, 0, 0);
+
if (pre_commit_is_vrr_active(state, crtc)) {
+ int vblank_delay = intel_vrr_vblank_delay(crtc_state);
+
end = intel_vrr_vmin_vblank_start(crtc_state);
start = end - vblank_delay - latency;
intel_dsb_wait_scanline_out(state, dsb, start, end);
@@ -547,6 +587,8 @@ void intel_dsb_vblank_evade(struct intel_atomic_state *state,
start = end - vblank_delay - latency;
intel_dsb_wait_scanline_out(state, dsb, start, end);
} else {
+ int vblank_delay = intel_mode_vblank_delay(&crtc_state->hw.adjusted_mode);
+
end = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode);
start = end - vblank_delay - latency;
intel_dsb_wait_scanline_out(state, dsb, start, end);
@@ -624,9 +666,10 @@ void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
- const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
int usecs = intel_scanlines_to_usecs(&crtc_state->hw.adjusted_mode,
- dsb_vblank_delay(crtc_state)) + 1;
+ dsb_vblank_delay(state, crtc));
intel_dsb_wait_usec(dsb, usecs);
}
@@ -825,7 +868,7 @@ void intel_dsb_irq_handler(struct intel_display *display,
if (crtc->dsb_event) {
/*
- * Update vblank counter/timestmap in case it
+ * Update vblank counter/timestamp in case it
* hasn't been done yet for this frame.
*/
drm_crtc_accurate_vblank_count(&crtc->base);
@@ -838,7 +881,16 @@ void intel_dsb_irq_handler(struct intel_display *display,
}
errors = tmp & dsb_error_int_status(display);
- if (errors)
- drm_err(display->drm, "[CRTC:%d:%s] DSB %d error interrupt: 0x%x\n",
- crtc->base.base.id, crtc->base.name, dsb_id, errors);
+ if (errors & DSB_ATS_FAULT_INT_STATUS)
+ drm_err(display->drm, "[CRTC:%d:%s] DSB %d ATS fault\n",
+ crtc->base.base.id, crtc->base.name, dsb_id);
+ if (errors & DSB_GTT_FAULT_INT_STATUS)
+ drm_err(display->drm, "[CRTC:%d:%s] DSB %d GTT fault\n",
+ crtc->base.base.id, crtc->base.name, dsb_id);
+ if (errors & DSB_RSPTIMEOUT_INT_STATUS)
+ drm_err(display->drm, "[CRTC:%d:%s] DSB %d response timeout\n",
+ crtc->base.base.id, crtc->base.name, dsb_id);
+ if (errors & DSB_POLL_ERR_INT_STATUS)
+ drm_err(display->drm, "[CRTC:%d:%s] DSB %d poll error\n",
+ crtc->base.base.id, crtc->base.name, dsb_id);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index da6df07a3c83..e843c52bf97c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -54,6 +54,9 @@ void intel_dsb_wait_scanline_out(struct intel_atomic_state *state,
int lower, int upper);
void intel_dsb_vblank_evade(struct intel_atomic_state *state,
struct intel_dsb *dsb);
+void intel_dsb_poll(struct intel_dsb *dsb,
+ i915_reg_t reg, u32 mask, u32 val,
+ int wait_us, int count);
void intel_dsb_chain(struct intel_atomic_state *state,
struct intel_dsb *dsb,
struct intel_dsb *chained_dsb,
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 0be46c6c9611..403151175a87 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -58,16 +58,16 @@ int intel_dsi_get_modes(struct drm_connector *connector)
}
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(intel_connector, mode);
- int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
+ int max_dotclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
status = intel_panel_mode_valid(intel_connector, mode);
if (status != MODE_OK)
@@ -76,7 +76,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
if (fixed_mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
- return intel_mode_valid_max_plane_size(dev_priv, mode, 1);
+ return intel_mode_valid_max_plane_size(display, mode, 1);
}
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index e8ba4ccd99d3..89c7166a3860 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -165,7 +165,7 @@ enum drm_panel_orientation
intel_dsi_get_panel_orientation(struct intel_connector *connector);
int intel_dsi_get_modes(struct drm_connector *connector);
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
const struct mipi_dsi_host_ops *funcs,
enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index b2b78f39cfd3..7b2ffd14ae6e 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -582,7 +582,7 @@ static const fn_mipi_elem_exec exec_elem[] = {
/*
* MIPI Sequence from VBT #53 parsing logic
- * We have already separated each seqence during bios parsing
+ * We have already separated each sequence during bios parsing
* Following is generic execution function for any sequence
*/
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index abf19dfd6d9d..c16fb34b737d 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -217,18 +217,18 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
static enum drm_mode_status
intel_dvo_mode_valid(struct drm_connector *_connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
+ struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(connector, mode);
- int max_dotclk = to_i915(connector->base.dev)->display.cdclk.max_dotclk_freq;
+ int max_dotclk = display->cdclk.max_dotclk_freq;
int target_clock = mode->clock;
enum drm_mode_status status;
- status = intel_cpu_transcoder_mode_valid(i915, mode);
+ status = intel_cpu_transcoder_mode_valid(display, mode);
if (status != MODE_OK)
return status;
@@ -524,7 +524,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
return;
}
- assert_port_valid(i915, intel_dvo->dev.port);
+ assert_port_valid(display, intel_dvo->dev.port);
encoder->type = INTEL_OUTPUT_DVO;
encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index 4bf476656b8c..f1e939aaf7fa 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -57,7 +57,7 @@ struct intel_dvo_dev_ops {
* Turn on/off output.
*
* Because none of our dvo drivers support an intermediate power levels,
- * we don't expose this in the interfac.
+ * we don't expose this in the interface.
*/
void (*dpms)(struct intel_dvo_device *dvo, bool enable);
@@ -71,7 +71,7 @@ struct intel_dvo_dev_ops {
* \return MODE_OK if the mode is valid, or another MODE_* otherwise.
*/
enum drm_mode_status (*mode_valid)(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
/*
* Callback for setting up a video mode after fixups have been made.
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 223c4218c019..2b0e0f220442 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -20,7 +20,7 @@
#include "intel_fb_bo.h"
#include "intel_frontbuffer.h"
-#define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a))
+#define check_array_bounds(display, a, i) drm_WARN_ON((display)->drm, (i) >= ARRAY_SIZE(a))
/*
* From the Sky Lake PRM:
@@ -539,11 +539,13 @@ static bool check_modifier_display_ver_range(const struct intel_modifier_desc *m
display_ver_from <= md->display_ver.until;
}
-static bool plane_has_modifier(struct drm_i915_private *i915,
+static bool plane_has_modifier(struct intel_display *display,
u8 plane_caps,
const struct intel_modifier_desc *md)
{
- if (!IS_DISPLAY_VER(i915, md->display_ver.from, md->display_ver.until))
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (!IS_DISPLAY_VER(display, md->display_ver.from, md->display_ver.until))
return false;
if (!plane_caps_contain_all(plane_caps, md->plane_caps))
@@ -570,14 +572,14 @@ static bool plane_has_modifier(struct drm_i915_private *i915,
/**
* intel_fb_plane_get_modifiers: Get the modifiers for the given platform and plane capabilities
- * @i915: i915 device instance
+ * @display: display instance
* @plane_caps: capabilities for the plane the modifiers are queried for
*
* Returns:
- * Returns the list of modifiers allowed by the @i915 platform and @plane_caps.
+ * Returns the list of modifiers allowed by the @display platform and @plane_caps.
* The caller must free the returned buffer.
*/
-u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915,
+u64 *intel_fb_plane_get_modifiers(struct intel_display *display,
u8 plane_caps)
{
u64 *list, *p;
@@ -585,17 +587,17 @@ u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915,
int i;
for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) {
- if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i]))
+ if (plane_has_modifier(display, plane_caps, &intel_modifiers[i]))
count++;
}
list = kmalloc_array(count, sizeof(*list), GFP_KERNEL);
- if (drm_WARN_ON(&i915->drm, !list))
+ if (drm_WARN_ON(display->drm, !list))
return NULL;
p = list;
for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) {
- if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i]))
+ if (plane_has_modifier(display, plane_caps, &intel_modifiers[i]))
*p++ = intel_modifiers[i].modifier;
}
*p++ = DRM_FORMAT_MOD_INVALID;
@@ -751,33 +753,34 @@ static unsigned int gen12_ccs_aux_stride(struct intel_framebuffer *fb, int ccs_p
int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
{
const struct intel_modifier_desc *md = lookup_modifier(fb->modifier);
- struct drm_i915_private *i915 = to_i915(fb->dev);
+ struct intel_display *display = to_intel_display(fb->dev);
if (md->ccs.packed_aux_planes | md->ccs.planar_aux_planes)
return main_to_ccs_plane(fb, main_plane);
- else if (DISPLAY_VER(i915) < 11 &&
+ else if (DISPLAY_VER(display) < 11 &&
format_is_yuv_semiplanar(md, fb->format))
return 1;
else
return 0;
}
-unsigned int intel_tile_size(const struct drm_i915_private *i915)
+unsigned int intel_tile_size(struct intel_display *display)
{
- return DISPLAY_VER(i915) == 2 ? 2048 : 4096;
+ return DISPLAY_VER(display) == 2 ? 2048 : 4096;
}
unsigned int
intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ struct intel_display *display = to_intel_display(fb->dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int cpp = fb->format->cpp[color_plane];
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
- return intel_tile_size(dev_priv);
+ return intel_tile_size(display);
case I915_FORMAT_MOD_X_TILED:
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
return 128;
else
return 512;
@@ -807,7 +810,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 64;
fallthrough;
case I915_FORMAT_MOD_Y_TILED:
- if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
+ if (DISPLAY_VER(display) == 2 || HAS_128_BYTE_Y_TILING(i915))
return 128;
else
return 512;
@@ -838,7 +841,9 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
{
- return intel_tile_size(to_i915(fb->dev)) /
+ struct intel_display *display = to_intel_display(fb->dev);
+
+ return intel_tile_size(display) /
intel_tile_width_bytes(fb, color_plane);
}
@@ -890,15 +895,17 @@ intel_fb_align_height(const struct drm_framebuffer *fb,
return ALIGN(height, tile_height);
}
-bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
+bool intel_fb_modifier_uses_dpt(struct intel_display *display, u64 modifier)
{
- return HAS_DPT(i915) && modifier != DRM_FORMAT_MOD_LINEAR;
+ return HAS_DPT(display) && modifier != DRM_FORMAT_MOD_LINEAR;
}
bool intel_fb_uses_dpt(const struct drm_framebuffer *fb)
{
- return to_i915(fb->dev)->display.params.enable_dpt &&
- intel_fb_modifier_uses_dpt(to_i915(fb->dev), fb->modifier);
+ struct intel_display *display = to_intel_display(fb->dev);
+
+ return display->params.enable_dpt &&
+ intel_fb_modifier_uses_dpt(display, fb->modifier);
}
void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
@@ -1007,16 +1014,16 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
unsigned int pitch,
u32 old_offset, u32 new_offset)
{
- struct drm_i915_private *i915 = to_i915(fb->dev);
+ struct intel_display *display = to_intel_display(fb->dev);
unsigned int cpp = fb->format->cpp[color_plane];
- drm_WARN_ON(&i915->drm, new_offset > old_offset);
+ drm_WARN_ON(display->drm, new_offset > old_offset);
if (!is_surface_linear(fb, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int pitch_tiles;
- tile_size = intel_tile_size(i915);
+ tile_size = intel_tile_size(display);
intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
if (drm_rotation_90_or_270(rotation)) {
@@ -1042,13 +1049,13 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
* the x/y offsets.
*/
u32 intel_plane_adjust_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
+ const struct intel_plane_state *plane_state,
int color_plane,
u32 old_offset, u32 new_offset)
{
- return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
- state->hw.rotation,
- state->view.color_plane[color_plane].mapping_stride,
+ return intel_adjust_aligned_offset(x, y, plane_state->hw.fb, color_plane,
+ plane_state->hw.rotation,
+ plane_state->view.color_plane[color_plane].mapping_stride,
old_offset, new_offset);
}
@@ -1066,7 +1073,7 @@ u32 intel_plane_adjust_aligned_offset(int *x, int *y,
* used. This is why the user has to pass in the pitch since it
* is specified in the rotated orientation.
*/
-static u32 intel_compute_aligned_offset(struct drm_i915_private *i915,
+static u32 intel_compute_aligned_offset(struct intel_display *display,
int *x, int *y,
const struct drm_framebuffer *fb,
int color_plane,
@@ -1081,7 +1088,7 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *i915,
unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles, pitch_tiles;
- tile_size = intel_tile_size(i915);
+ tile_size = intel_tile_size(display);
intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
if (drm_rotation_90_or_270(rotation)) {
@@ -1122,17 +1129,17 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *i915,
}
u32 intel_plane_compute_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
+ const struct intel_plane_state *plane_state,
int color_plane)
{
- struct intel_plane *plane = to_intel_plane(state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = state->hw.fb;
- unsigned int rotation = state->hw.rotation;
- unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ unsigned int pitch = plane_state->view.color_plane[color_plane].mapping_stride;
unsigned int alignment = plane->min_alignment(plane, fb, color_plane);
- return intel_compute_aligned_offset(i915, x, y, fb, color_plane,
+ return intel_compute_aligned_offset(display, x, y, fb, color_plane,
pitch, rotation, alignment);
}
@@ -1141,16 +1148,16 @@ static int intel_fb_offset_to_xy(int *x, int *y,
const struct drm_framebuffer *fb,
int color_plane)
{
- struct drm_i915_private *i915 = to_i915(fb->dev);
+ struct intel_display *display = to_intel_display(fb->dev);
unsigned int height, alignment, unused;
if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
- alignment = intel_tile_size(i915);
+ alignment = intel_tile_size(display);
else
alignment = 0;
if (alignment != 0 && fb->offsets[color_plane] % alignment) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Misaligned offset 0x%08x for color plane %d\n",
fb->offsets[color_plane], color_plane);
return -EINVAL;
@@ -1162,7 +1169,7 @@ static int intel_fb_offset_to_xy(int *x, int *y,
/* Catch potential overflows early */
if (check_add_overflow(mul_u32_u32(height, fb->pitches[color_plane]),
fb->offsets[color_plane], &unused)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Bad offset 0x%08x or pitch %d for color plane %d\n",
fb->offsets[color_plane], fb->pitches[color_plane],
color_plane);
@@ -1182,7 +1189,7 @@ static int intel_fb_offset_to_xy(int *x, int *y,
static int intel_fb_check_ccs_xy(const struct drm_framebuffer *fb, int ccs_plane, int x, int y)
{
- struct drm_i915_private *i915 = to_i915(fb->dev);
+ struct intel_display *display = to_intel_display(fb->dev);
const struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
int main_plane;
int hsub, vsub;
@@ -1216,13 +1223,12 @@ static int intel_fb_check_ccs_xy(const struct drm_framebuffer *fb, int ccs_plane
* x/y offsets must match between CCS and the main surface.
*/
if (main_x != ccs_x || main_y != ccs_y) {
- drm_dbg_kms(&i915->drm,
- "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
- main_x, main_y,
- ccs_x, ccs_y,
- intel_fb->normal_view.color_plane[main_plane].x,
- intel_fb->normal_view.color_plane[main_plane].y,
- x, y);
+ drm_dbg_kms(display->drm,
+ "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+ main_x, main_y, ccs_x, ccs_y,
+ intel_fb->normal_view.color_plane[main_plane].x,
+ intel_fb->normal_view.color_plane[main_plane].y,
+ x, y);
return -EINVAL;
}
@@ -1231,8 +1237,8 @@ static int intel_fb_check_ccs_xy(const struct drm_framebuffer *fb, int ccs_plane
static bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
int i;
@@ -1246,7 +1252,7 @@ static bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
* Would also need to deal with the fence POT alignment
* and gen2 2KiB GTT tile size.
*/
- if (DISPLAY_VER(i915) < 4)
+ if (DISPLAY_VER(display) < 4)
return false;
/*
@@ -1258,7 +1264,7 @@ static bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
/* Linear needs a page aligned stride for remapping */
if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
- unsigned int alignment = intel_tile_size(i915) - 1;
+ unsigned int alignment = intel_tile_size(display) - 1;
for (i = 0; i < fb->format->num_planes; i++) {
if (fb->pitches[i] & alignment)
@@ -1271,12 +1277,22 @@ static bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
{
- struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ struct intel_display *display = to_intel_display(fb->base.dev);
- return (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
+ return (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) &&
intel_fb_uses_dpt(&fb->base);
}
+bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ return DISPLAY_VER(dev_priv) < 4 ||
+ (plane->fbc && !plane_state->no_fbc_reason &&
+ plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
+}
+
static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation)
{
if (drm_rotation_90_or_270(rotation))
@@ -1318,12 +1334,13 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
static int convert_plane_offset_to_xy(const struct intel_framebuffer *fb, int color_plane,
int plane_width, int *x, int *y)
{
+ struct intel_display *display = to_intel_display(fb->base.dev);
struct drm_gem_object *obj = intel_fb_bo(&fb->base);
int ret;
ret = intel_fb_offset_to_xy(x, y, &fb->base, color_plane);
if (ret) {
- drm_dbg_kms(fb->base.dev,
+ drm_dbg_kms(display->drm,
"bad fb plane %d offset: 0x%x\n",
color_plane, fb->base.offsets[color_plane]);
return ret;
@@ -1344,7 +1361,7 @@ static int convert_plane_offset_to_xy(const struct intel_framebuffer *fb, int co
*/
if (color_plane == 0 && intel_bo_is_tiled(obj) &&
(*x + plane_width) * fb->base.format->cpp[color_plane] > fb->base.pitches[color_plane]) {
- drm_dbg_kms(fb->base.dev,
+ drm_dbg_kms(display->drm,
"bad fb plane %d offset: 0x%x\n",
color_plane, fb->base.offsets[color_plane]);
return -EINVAL;
@@ -1355,11 +1372,11 @@ static int convert_plane_offset_to_xy(const struct intel_framebuffer *fb, int co
static u32 calc_plane_aligned_offset(const struct intel_framebuffer *fb, int color_plane, int *x, int *y)
{
- struct drm_i915_private *i915 = to_i915(fb->base.dev);
- unsigned int tile_size = intel_tile_size(i915);
+ struct intel_display *display = to_intel_display(fb->base.dev);
+ unsigned int tile_size = intel_tile_size(display);
u32 offset;
- offset = intel_compute_aligned_offset(i915, x, y, &fb->base, color_plane,
+ offset = intel_compute_aligned_offset(display, x, y, &fb->base, color_plane,
fb->base.pitches[color_plane],
DRM_MODE_ROTATE_0,
tile_size);
@@ -1410,10 +1427,10 @@ plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
unsigned int tile_width,
unsigned int src_stride_tiles, unsigned int dst_stride_tiles)
{
- struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ struct intel_display *display = to_intel_display(fb->base.dev);
unsigned int stride_tiles;
- if ((IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
+ if ((display->platform.alderlake_p || DISPLAY_VER(display) >= 14) &&
src_stride_tiles < dst_stride_tiles)
stride_tiles = src_stride_tiles;
else
@@ -1443,23 +1460,23 @@ plane_view_linear_tiles(const struct intel_framebuffer *fb, int color_plane,
const struct fb_plane_view_dims *dims,
int x, int y)
{
- struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ struct intel_display *display = to_intel_display(fb->base.dev);
unsigned int size;
size = (y + dims->height) * fb->base.pitches[color_plane] +
x * fb->base.format->cpp[color_plane];
- return DIV_ROUND_UP(size, intel_tile_size(i915));
+ return DIV_ROUND_UP(size, intel_tile_size(display));
}
-#define assign_chk_ovf(i915, var, val) ({ \
- drm_WARN_ON(&(i915)->drm, overflows_type(val, var)); \
+#define assign_chk_ovf(display, var, val) ({ \
+ drm_WARN_ON((display)->drm, overflows_type(val, var)); \
(var) = (val); \
})
-#define assign_bfld_chk_ovf(i915, var, val) ({ \
+#define assign_bfld_chk_ovf(display, var, val) ({ \
(var) = (val); \
- drm_WARN_ON(&(i915)->drm, (var) != (val)); \
+ drm_WARN_ON((display)->drm, (var) != (val)); \
(var); \
})
@@ -1468,38 +1485,38 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
u32 obj_offset, u32 gtt_offset, int x, int y,
struct intel_fb_view *view)
{
- struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ struct intel_display *display = to_intel_display(fb->base.dev);
struct intel_remapped_plane_info *remap_info = &view->gtt.remapped.plane[color_plane];
struct i915_color_plane_view *color_plane_info = &view->color_plane[color_plane];
unsigned int tile_width = dims->tile_width;
unsigned int tile_height = dims->tile_height;
- unsigned int tile_size = intel_tile_size(i915);
+ unsigned int tile_size = intel_tile_size(display);
struct drm_rect r;
u32 size = 0;
- assign_bfld_chk_ovf(i915, remap_info->offset, obj_offset);
+ assign_bfld_chk_ovf(display, remap_info->offset, obj_offset);
if (intel_fb_is_gen12_ccs_aux_plane(&fb->base, color_plane)) {
remap_info->linear = 1;
- assign_chk_ovf(i915, remap_info->size,
+ assign_chk_ovf(display, remap_info->size,
plane_view_linear_tiles(fb, color_plane, dims, x, y));
} else {
remap_info->linear = 0;
- assign_chk_ovf(i915, remap_info->src_stride,
+ assign_chk_ovf(display, remap_info->src_stride,
plane_view_src_stride_tiles(fb, color_plane, dims));
- assign_chk_ovf(i915, remap_info->width,
+ assign_chk_ovf(display, remap_info->width,
plane_view_width_tiles(fb, color_plane, dims, x));
- assign_chk_ovf(i915, remap_info->height,
+ assign_chk_ovf(display, remap_info->height,
plane_view_height_tiles(fb, color_plane, dims, y));
}
if (view->gtt.type == I915_GTT_VIEW_ROTATED) {
- drm_WARN_ON(&i915->drm, remap_info->linear);
- check_array_bounds(i915, view->gtt.rotated.plane, color_plane);
+ drm_WARN_ON(display->drm, remap_info->linear);
+ check_array_bounds(display, view->gtt.rotated.plane, color_plane);
- assign_chk_ovf(i915, remap_info->dst_stride,
+ assign_chk_ovf(display, remap_info->dst_stride,
plane_view_dst_stride_tiles(fb, color_plane, remap_info->height));
/* rotate the x/y offsets to match the GTT view */
@@ -1520,9 +1537,9 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
/* rotate the tile dimensions to match the GTT view */
swap(tile_width, tile_height);
} else {
- drm_WARN_ON(&i915->drm, view->gtt.type != I915_GTT_VIEW_REMAPPED);
+ drm_WARN_ON(display->drm, view->gtt.type != I915_GTT_VIEW_REMAPPED);
- check_array_bounds(i915, view->gtt.remapped.plane, color_plane);
+ check_array_bounds(display, view->gtt.remapped.plane, color_plane);
if (view->gtt.remapped.plane_alignment) {
u32 aligned_offset = ALIGN(gtt_offset,
@@ -1556,7 +1573,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
dst_stride = plane_view_dst_stride_tiles(fb, color_plane, dst_stride);
- assign_chk_ovf(i915, remap_info->dst_stride, dst_stride);
+ assign_chk_ovf(display, remap_info->dst_stride, dst_stride);
color_plane_info->mapping_stride = dst_stride *
tile_width *
fb->base.format->cpp[color_plane];
@@ -1614,20 +1631,23 @@ calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane,
return tiles;
}
-static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_view *view,
+static void intel_fb_view_init(struct intel_display *display,
+ struct intel_fb_view *view,
enum i915_gtt_view_type view_type)
{
memset(view, 0, sizeof(*view));
view->gtt.type = view_type;
if (view_type == I915_GTT_VIEW_REMAPPED &&
- (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14))
+ (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE;
}
bool intel_fb_supports_90_270_rotation(const struct intel_framebuffer *fb)
{
- if (DISPLAY_VER(to_i915(fb->base.dev)) >= 13)
+ struct intel_display *display = to_intel_display(fb->base.dev);
+
+ if (DISPLAY_VER(display) >= 13)
return false;
return fb->base.modifier == I915_FORMAT_MOD_Y_TILED ||
@@ -1636,11 +1656,11 @@ bool intel_fb_supports_90_270_rotation(const struct intel_framebuffer *fb)
static unsigned int intel_fb_min_alignment(const struct drm_framebuffer *fb)
{
- struct drm_i915_private *i915 = to_i915(fb->dev);
+ struct intel_display *display = to_intel_display(fb->dev);
struct intel_plane *plane;
unsigned int min_alignment = 0;
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
unsigned int plane_min_alignment;
if (!drm_plane_has_format(&plane->base, fb->format->format, fb->modifier))
@@ -1648,7 +1668,7 @@ static unsigned int intel_fb_min_alignment(const struct drm_framebuffer *fb)
plane_min_alignment = plane->min_alignment(plane, fb, 0);
- drm_WARN_ON(&i915->drm, plane_min_alignment &&
+ drm_WARN_ON(display->drm, plane_min_alignment &&
!is_power_of_2(plane_min_alignment));
if (intel_plane_needs_physical(plane))
@@ -1660,25 +1680,41 @@ static unsigned int intel_fb_min_alignment(const struct drm_framebuffer *fb)
return min_alignment;
}
-int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *fb)
+static unsigned int intel_fb_vtd_guard(const struct drm_framebuffer *fb)
+{
+ struct intel_display *display = to_intel_display(fb->dev);
+ struct intel_plane *plane;
+ unsigned int vtd_guard = 0;
+
+ for_each_intel_plane(display->drm, plane) {
+ if (!drm_plane_has_format(&plane->base, fb->format->format, fb->modifier))
+ continue;
+
+ vtd_guard = max_t(unsigned int, vtd_guard, plane->vtd_guard);
+ }
+
+ return vtd_guard;
+}
+
+int intel_fill_fb_info(struct intel_display *display, struct intel_framebuffer *fb)
{
struct drm_gem_object *obj = intel_fb_bo(&fb->base);
u32 gtt_offset_rotated = 0;
u32 gtt_offset_remapped = 0;
unsigned int max_size = 0;
int i, num_planes = fb->base.format->num_planes;
- unsigned int tile_size = intel_tile_size(i915);
+ unsigned int tile_size = intel_tile_size(display);
- intel_fb_view_init(i915, &fb->normal_view, I915_GTT_VIEW_NORMAL);
+ intel_fb_view_init(display, &fb->normal_view, I915_GTT_VIEW_NORMAL);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
intel_fb_supports_90_270_rotation(fb) &&
intel_fb_needs_pot_stride_remap(fb));
if (intel_fb_supports_90_270_rotation(fb))
- intel_fb_view_init(i915, &fb->rotated_view, I915_GTT_VIEW_ROTATED);
+ intel_fb_view_init(display, &fb->rotated_view, I915_GTT_VIEW_ROTATED);
if (intel_fb_needs_pot_stride_remap(fb))
- intel_fb_view_init(i915, &fb->remapped_view, I915_GTT_VIEW_REMAPPED);
+ intel_fb_view_init(display, &fb->remapped_view, I915_GTT_VIEW_REMAPPED);
for (i = 0; i < num_planes; i++) {
struct fb_plane_view_dims view_dims;
@@ -1694,10 +1730,24 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
* arithmetic related to alignment and offset calculation.
*/
if (is_gen12_ccs_cc_plane(&fb->base, i)) {
- if (IS_ALIGNED(fb->base.offsets[i], 64))
- continue;
- else
+ unsigned int end;
+
+ if (!IS_ALIGNED(fb->base.offsets[i], 64)) {
+ drm_dbg_kms(display->drm,
+ "fb misaligned clear color plane %d offset (0x%x)\n",
+ i, fb->base.offsets[i]);
+ return -EINVAL;
+ }
+
+ if (check_add_overflow(fb->base.offsets[i], 64, &end)) {
+ drm_dbg_kms(display->drm,
+ "fb bad clear color plane %d offset (0x%x)\n",
+ i, fb->base.offsets[i]);
return -EINVAL;
+ }
+
+ max_size = max(max_size, DIV_ROUND_UP(end, tile_size));
+ continue;
}
intel_fb_plane_dims(fb, i, &width, &height);
@@ -1736,21 +1786,52 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
}
if (mul_u32_u32(max_size, tile_size) > obj->size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"fb too big for bo (need %llu bytes, have %zu bytes)\n",
mul_u32_u32(max_size, tile_size), obj->size);
return -EINVAL;
}
fb->min_alignment = intel_fb_min_alignment(&fb->base);
+ fb->vtd_guard = intel_fb_vtd_guard(&fb->base);
return 0;
}
+unsigned int intel_fb_view_vtd_guard(const struct drm_framebuffer *fb,
+ const struct intel_fb_view *view,
+ unsigned int rotation)
+{
+ unsigned int vtd_guard;
+ int color_plane;
+
+ vtd_guard = to_intel_framebuffer(fb)->vtd_guard;
+ if (!vtd_guard)
+ return 0;
+
+ for (color_plane = 0; color_plane < fb->format->num_planes; color_plane++) {
+ unsigned int stride, tile;
+
+ if (intel_fb_is_ccs_aux_plane(fb, color_plane) ||
+ is_gen12_ccs_cc_plane(fb, color_plane))
+ continue;
+
+ stride = view->color_plane[color_plane].mapping_stride;
+
+ if (drm_rotation_90_or_270(rotation))
+ tile = intel_tile_height(fb, color_plane);
+ else
+ tile = intel_tile_width_bytes(fb, color_plane);
+
+ vtd_guard = max(vtd_guard, DIV_ROUND_UP(stride, tile));
+ }
+
+ return vtd_guard;
+}
+
static void intel_plane_remap_gtt(struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
struct drm_framebuffer *fb = plane_state->hw.fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
unsigned int rotation = plane_state->hw.rotation;
@@ -1759,7 +1840,7 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state)
unsigned int src_w, src_h;
u32 gtt_offset = 0;
- intel_fb_view_init(i915, &plane_state->view,
+ intel_fb_view_init(display, &plane_state->view,
drm_rotation_90_or_270(rotation) ? I915_GTT_VIEW_ROTATED :
I915_GTT_VIEW_REMAPPED);
@@ -1768,7 +1849,7 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state)
src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- drm_WARN_ON(&i915->drm, intel_fb_is_ccs_modifier(fb->modifier));
+ drm_WARN_ON(display->drm, intel_fb_is_ccs_modifier(fb->modifier));
/* Make src coordinates relative to the viewport */
drm_rect_translate(&plane_state->uapi.src,
@@ -1810,6 +1891,42 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state)
}
}
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
+{
+ unsigned int size = 0;
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
+ size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
+
+ return size;
+}
+
+unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
+{
+ unsigned int size = 0;
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+ unsigned int plane_size;
+
+ if (rem_info->plane[i].linear)
+ plane_size = rem_info->plane[i].size;
+ else
+ plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
+
+ if (plane_size == 0)
+ continue;
+
+ if (rem_info->plane_alignment)
+ size = ALIGN(size, rem_info->plane_alignment);
+
+ size += plane_size;
+ }
+
+ return size;
+}
+
void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotation,
struct intel_fb_view *view)
{
@@ -1821,8 +1938,39 @@ void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotatio
*view = fb->normal_view;
}
+/*
+ * Convert the x/y offsets into a linear offset.
+ * Only valid with 0/180 degree rotation, which is fine since linear
+ * offset is only used with linear buffers on pre-hsw and tiled buffers
+ * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
+ */
+u32 intel_fb_xy_to_linear(int x, int y,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[color_plane];
+ unsigned int pitch = plane_state->view.color_plane[color_plane].mapping_stride;
+
+ return y * pitch + x * cpp;
+}
+
+/*
+ * Add the x/y offsets derived from fb->offsets[] to the user
+ * specified plane src x/y offsets. The resulting x/y offsets
+ * specify the start of scanout from the beginning of the gtt mapping.
+ */
+void intel_add_fb_offsets(int *x, int *y,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
+
+{
+ *x += plane_state->view.color_plane[color_plane].x;
+ *y += plane_state->view.color_plane[color_plane].y;
+}
+
static
-u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
+u32 intel_fb_max_stride(struct intel_display *display,
u32 pixel_format, u64 modifier)
{
/*
@@ -1831,10 +1979,10 @@ u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
*
* The new CCS hash mode makes remapping impossible
*/
- if (DISPLAY_VER(dev_priv) < 4 || intel_fb_is_ccs_modifier(modifier) ||
- intel_fb_modifier_uses_dpt(dev_priv, modifier))
- return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
- else if (DISPLAY_VER(dev_priv) >= 7)
+ if (DISPLAY_VER(display) < 4 || intel_fb_is_ccs_modifier(modifier) ||
+ intel_fb_modifier_uses_dpt(display, modifier))
+ return intel_plane_fb_max_stride(display->drm, pixel_format, modifier);
+ else if (DISPLAY_VER(display) >= 7)
return 256 * 1024;
else
return 128 * 1024;
@@ -1843,11 +1991,11 @@ u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
static unsigned int
intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ struct intel_display *display = to_intel_display(fb->dev);
unsigned int tile_width;
if (is_surface_linear(fb, color_plane)) {
- unsigned int max_stride = intel_plane_fb_max_stride(dev_priv,
+ unsigned int max_stride = intel_plane_fb_max_stride(display->drm,
fb->format->format,
fb->modifier);
@@ -1857,7 +2005,7 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
*/
if (fb->pitches[color_plane] > max_stride &&
!intel_fb_is_ccs_modifier(fb->modifier))
- return intel_tile_size(dev_priv);
+ return intel_tile_size(display);
else
return 64;
}
@@ -1868,7 +2016,7 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
* On TGL the surface stride must be 4 tile aligned, mapped by
* one 64 byte cacheline on the CCS AUX surface.
*/
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
tile_width *= 4;
/*
* Display WA #0531: skl,bxt,kbl,glk
@@ -1879,7 +2027,7 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
* require the entire fb to accommodate that to avoid
* potential runtime errors at plane configuration time.
*/
- else if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
+ else if ((DISPLAY_VER(display) == 9 || display->platform.geminilake) &&
color_plane == 0 && fb->width > 3840)
tile_width *= 4;
}
@@ -1888,6 +2036,7 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
static int intel_plane_check_stride(const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
@@ -1909,7 +2058,7 @@ static int intel_plane_check_stride(const struct intel_plane_state *plane_state)
fb->modifier, rotation);
if (stride > max_stride) {
- drm_dbg_kms(plane->base.dev,
+ drm_dbg_kms(display->drm,
"[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n",
fb->base.id, stride,
plane->base.base.id, plane->base.name, max_stride);
@@ -2058,13 +2207,13 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_i915_private *dev_priv = to_i915(obj->dev);
+ struct intel_display *display = to_intel_display(obj->dev);
struct drm_framebuffer *fb = &intel_fb->base;
u32 max_stride;
int ret = -EINVAL;
int i;
- ret = intel_fb_bo_framebuffer_init(intel_fb, obj, mode_cmd);
+ ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd);
if (ret)
return ret;
@@ -2075,19 +2224,19 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
ret = -EINVAL;
- if (!drm_any_plane_has_format(&dev_priv->drm,
+ if (!drm_any_plane_has_format(display->drm,
mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unsupported pixel format %p4cc / modifier 0x%llx\n",
&mode_cmd->pixel_format, mode_cmd->modifier[0]);
goto err_frontbuffer_put;
}
- max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
+ max_stride = intel_fb_max_stride(display, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
if (mode_cmd->pitches[0] > max_stride) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s pitch (%u) must be at most %d\n",
mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
"tiled" : "linear",
@@ -2097,26 +2246,25 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
if (mode_cmd->offsets[0] != 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"plane 0 offset (0x%08x) must be 0\n",
mode_cmd->offsets[0]);
goto err_frontbuffer_put;
}
- drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(display->drm, fb, mode_cmd);
for (i = 0; i < fb->format->num_planes; i++) {
unsigned int stride_alignment;
if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
- drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
- i);
+ drm_dbg_kms(display->drm, "bad plane %d handle\n", i);
goto err_frontbuffer_put;
}
stride_alignment = intel_fb_stride_alignment(fb, i);
if (fb->pitches[i] & (stride_alignment - 1)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"plane %d pitch (%d) must be at least %u byte aligned\n",
i, fb->pitches[i], stride_alignment);
goto err_frontbuffer_put;
@@ -2126,10 +2274,9 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
unsigned int ccs_aux_stride = gen12_ccs_aux_stride(intel_fb, i);
if (fb->pitches[i] != ccs_aux_stride) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"ccs aux plane %d pitch (%d) must be %d\n",
- i,
- fb->pitches[i], ccs_aux_stride);
+ i, fb->pitches[i], ccs_aux_stride);
goto err_frontbuffer_put;
}
}
@@ -2137,7 +2284,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
fb->obj[i] = obj;
}
- ret = intel_fill_fb_info(dev_priv, intel_fb);
+ ret = intel_fill_fb_info(display, intel_fb);
if (ret)
goto err_frontbuffer_put;
@@ -2146,7 +2293,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
vm = intel_dpt_create(intel_fb);
if (IS_ERR(vm)) {
- drm_dbg_kms(&dev_priv->drm, "failed to create DPT\n");
+ drm_dbg_kms(display->drm, "failed to create DPT\n");
ret = PTR_ERR(vm);
goto err_frontbuffer_put;
}
@@ -2154,9 +2301,9 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
intel_fb->dpt_vm = vm;
}
- ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
+ ret = drm_framebuffer_init(display->drm, fb, &intel_fb_funcs);
if (ret) {
- drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
+ drm_err(display->drm, "framebuffer init failed %d\n", ret);
goto err_free_dpt;
}
@@ -2180,9 +2327,8 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *fb;
struct drm_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
- struct drm_i915_private *i915 = to_i915(dev);
- obj = intel_fb_bo_lookup_valid_bo(i915, filp, &mode_cmd);
+ obj = intel_fb_bo_lookup_valid_bo(dev, filp, &mode_cmd);
if (IS_ERR(obj))
return ERR_CAST(obj);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index d78993e5eb62..bdd76b372957 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -13,13 +13,14 @@ struct drm_device;
struct drm_file;
struct drm_framebuffer;
struct drm_gem_object;
-struct drm_i915_gem_object;
-struct drm_i915_private;
struct drm_mode_fb_cmd2;
+struct intel_display;
struct intel_fb_view;
struct intel_framebuffer;
struct intel_plane;
struct intel_plane_state;
+struct intel_remapped_info;
+struct intel_rotation_info;
#define INTEL_PLANE_CAP_NONE 0
#define INTEL_PLANE_CAP_CCS_RC BIT(0)
@@ -41,7 +42,7 @@ bool intel_fb_is_tile4_modifier(u64 modifier);
bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane);
int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb);
-u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915,
+u64 *intel_fb_plane_get_modifiers(struct intel_display *display,
u8 plane_caps);
bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier);
@@ -58,7 +59,7 @@ int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane);
int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane);
int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane);
-unsigned int intel_tile_size(const struct drm_i915_private *i915);
+unsigned int intel_tile_size(struct intel_display *display);
unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane);
unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane);
unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_plane);
@@ -70,21 +71,35 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
int color_plane);
u32 intel_plane_adjust_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
+ const struct intel_plane_state *plane_state,
int color_plane,
u32 old_offset, u32 new_offset);
u32 intel_plane_compute_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
+ const struct intel_plane_state *plane_state,
int color_plane);
bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb);
+bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);
bool intel_fb_supports_90_270_rotation(const struct intel_framebuffer *fb);
-int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *fb);
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
+unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
+
+int intel_fill_fb_info(struct intel_display *display, struct intel_framebuffer *fb);
void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotation,
struct intel_fb_view *view);
+unsigned int intel_fb_view_vtd_guard(const struct drm_framebuffer *fb,
+ const struct intel_fb_view *view,
+ unsigned int rotation);
int intel_plane_compute_gtt(struct intel_plane_state *plane_state);
+unsigned int intel_fb_xy_to_linear(int x, int y,
+ const struct intel_plane_state *plane_state,
+ int color_plane);
+void intel_add_fb_offsets(int *x, int *y,
+ const struct intel_plane_state *plane_state,
+ int color_plane);
+
int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
@@ -96,7 +111,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
const struct drm_mode_fb_cmd2 *user_mode_cmd);
-bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier);
+bool intel_fb_modifier_uses_dpt(struct intel_display *display, u64 modifier);
bool intel_fb_uses_dpt(const struct drm_framebuffer *fb);
unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c
index 810ca6ff8640..3d338a728354 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
+#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_bo.h"
@@ -16,12 +17,12 @@ void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
/* Nothing to do for i915 */
}
-int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
+int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb,
struct drm_gem_object *_obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_display *display = to_intel_display(obj->base.dev);
unsigned int tiling, stride;
i915_gem_object_lock(obj, NULL);
@@ -36,7 +37,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
*/
if (tiling != I915_TILING_NONE &&
tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"tiling_mode doesn't match fb modifier\n");
return -EINVAL;
}
@@ -44,7 +45,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
if (tiling == I915_TILING_X) {
mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
} else if (tiling == I915_TILING_Y) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"No Y tiling for legacy addfb\n");
return -EINVAL;
}
@@ -54,9 +55,9 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
* gen2/3 display engine uses the fence if present,
* so the tiling mode must match the fb modifier exactly.
*/
- if (DISPLAY_VER(i915) < 4 &&
+ if (DISPLAY_VER(display) < 4 &&
tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"tiling_mode must match fb modifier exactly on gen2/3\n");
return -EINVAL;
}
@@ -66,7 +67,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
* the fb pitch and fence stride match.
*/
if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"pitch (%d) must match tiling stride (%d)\n",
mode_cmd->pitches[0], stride);
return -EINVAL;
@@ -76,10 +77,11 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
}
struct drm_gem_object *
-intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
+intel_fb_bo_lookup_valid_bo(struct drm_device *drm,
struct drm_file *filp,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ struct drm_i915_private *i915 = to_i915(drm);
struct drm_i915_gem_object *obj;
obj = i915_gem_object_lookup(filp, mode_cmd->handles[0]);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h
index e71acd1bcb24..eefcb05a99f0 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.h
@@ -6,20 +6,20 @@
#ifndef __INTEL_FB_BO_H__
#define __INTEL_FB_BO_H__
+struct drm_device;
struct drm_file;
+struct drm_framebuffer;
struct drm_gem_object;
-struct drm_i915_private;
struct drm_mode_fb_cmd2;
-struct intel_framebuffer;
void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj);
-int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
+int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb,
struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_gem_object *
-intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
+intel_fb_bo_lookup_valid_bo(struct drm_device *drm,
struct drm_file *filp,
const struct drm_mode_fb_cmd2 *user_mode_cmd);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index d3a86f9c6bc8..30ac9b089ad6 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -25,6 +25,7 @@ intel_fb_pin_to_dpt(const struct drm_framebuffer *fb,
struct i915_address_space *vm)
{
struct drm_device *dev = fb->dev;
+ struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_gem_object *_obj = intel_fb_bo(fb);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
@@ -42,7 +43,7 @@ intel_fb_pin_to_dpt(const struct drm_framebuffer *fb,
if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+ atomic_inc(&display->restore.pending_fb_pin);
for_i915_gem_ww(&ww, ret, true) {
ret = i915_gem_object_lock(obj, &ww);
@@ -97,7 +98,7 @@ intel_fb_pin_to_dpt(const struct drm_framebuffer *fb,
i915_vma_get(vma);
err:
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+ atomic_dec(&display->restore.pending_fb_pin);
return vma;
}
@@ -107,10 +108,12 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
const struct i915_gtt_view *view,
unsigned int alignment,
unsigned int phys_alignment,
+ unsigned int vtd_guard,
bool uses_fence,
unsigned long *out_flags)
{
struct drm_device *dev = fb->dev;
+ struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_gem_object *_obj = intel_fb_bo(fb);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
@@ -126,14 +129,6 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
return ERR_PTR(-EINVAL);
- /* Note that the w/a also requires 64 PTE of padding following the
- * bo. We currently fill all unused PTE with the shadow page and so
- * we should always have valid PTE following the scanout preventing
- * the VT-d warning.
- */
- if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
- alignment = 256 * 1024;
-
/*
* Global gtt pte registers are special registers which actually forward
* writes to a chunk of system memory. Which means that there is no risk
@@ -143,7 +138,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
*/
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+ atomic_inc(&display->restore.pending_fb_pin);
/*
* Valleyview is definitely limited to scanning out the first
@@ -170,7 +165,7 @@ retry:
goto err;
vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
- view, pinctl);
+ vtd_guard, view, pinctl);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unpin;
@@ -219,7 +214,7 @@ err:
if (ret)
vma = ERR_PTR(ret);
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+ atomic_dec(&display->restore.pending_fb_pin);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return vma;
}
@@ -252,7 +247,16 @@ intel_plane_fb_min_phys_alignment(const struct intel_plane_state *plane_state)
return plane->min_alignment(plane, fb, 0);
}
-int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+static unsigned int
+intel_plane_fb_vtd_guard(const struct intel_plane_state *plane_state)
+{
+ return intel_fb_view_vtd_guard(plane_state->hw.fb,
+ &plane_state->view,
+ plane_state->hw.rotation);
+}
+
+int intel_plane_pin_fb(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *old_plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct intel_framebuffer *fb =
@@ -263,6 +267,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
vma = intel_fb_pin_to_ggtt(&fb->base, &plane_state->view.gtt,
intel_plane_fb_min_alignment(plane_state),
intel_plane_fb_min_phys_alignment(plane_state),
+ intel_plane_fb_vtd_guard(plane_state),
intel_plane_uses_fence(plane_state),
&plane_state->flags);
if (IS_ERR(vma))
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h
index ac0319b53af0..01770dbba2e0 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.h
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h
@@ -18,12 +18,14 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
const struct i915_gtt_view *view,
unsigned int alignment,
unsigned int phys_alignment,
+ unsigned int vtd_guard,
bool uses_fence,
unsigned long *out_flags);
void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags);
-int intel_plane_pin_fb(struct intel_plane_state *plane_state);
+int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
+ const struct intel_plane_state *old_plane_state);
void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index df05904bac8a..b6978135e8ad 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -88,6 +88,7 @@ struct intel_fbc_state {
u16 override_cfb_stride;
u16 interval;
s8 fence_id;
+ struct drm_rect dirty_rect;
};
struct intel_fbc {
@@ -215,11 +216,9 @@ static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_s
*/
static unsigned int intel_fbc_max_cfb_height(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 8)
return 2560;
- else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return 2048;
else
return 1536;
@@ -269,9 +268,8 @@ static bool intel_fbc_has_fences(struct intel_display *display)
static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct intel_fbc_state *fbc_state = &fbc->state;
unsigned int cfb_stride;
u32 fbc_ctl;
@@ -287,7 +285,7 @@ static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
FBC_CTL_INTERVAL(fbc_state->interval) |
FBC_CTL_STRIDE(cfb_stride);
- if (IS_I945GM(i915))
+ if (display->platform.i945gm)
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
if (fbc_state->fence_id >= 0)
@@ -333,8 +331,8 @@ static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
static void i8xx_fbc_activate(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
int i;
/* Clear old tags */
@@ -365,12 +363,12 @@ static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
static void i8xx_fbc_nuke(struct intel_fbc *fbc)
{
+ struct intel_display *display = fbc->display;
struct intel_fbc_state *fbc_state = &fbc->state;
enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
- struct drm_i915_private *dev_priv = to_i915(fbc->display->drm);
- intel_de_write_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane),
- intel_de_read_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane)));
+ intel_de_write_fw(display, DSPADDR(display, i9xx_plane),
+ intel_de_read_fw(display, DSPADDR(display, i9xx_plane)));
}
static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
@@ -386,9 +384,9 @@ static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
i915_gem_stolen_node_offset(&fbc->compressed_llb),
U32_MAX));
- intel_de_write(i915, FBC_CFB_BASE,
+ intel_de_write(display, FBC_CFB_BASE,
i915_gem_stolen_node_address(i915, &fbc->compressed_fb));
- intel_de_write(i915, FBC_LL_BASE,
+ intel_de_write(display, FBC_LL_BASE,
i915_gem_stolen_node_address(i915, &fbc->compressed_llb));
}
@@ -403,12 +401,12 @@ static const struct intel_fbc_funcs i8xx_fbc_funcs = {
static void i965_fbc_nuke(struct intel_fbc *fbc)
{
+ struct intel_display *display = fbc->display;
struct intel_fbc_state *fbc_state = &fbc->state;
enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
- struct drm_i915_private *dev_priv = to_i915(fbc->display->drm);
- intel_de_write_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane),
- intel_de_read_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane)));
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane),
+ intel_de_read_fw(display, DSPSURF(display, i9xx_plane)));
}
static const struct intel_fbc_funcs i965_fbc_funcs = {
@@ -437,15 +435,14 @@ static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct intel_fbc_state *fbc_state = &fbc->state;
u32 dpfc_ctl;
dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane);
- if (IS_G4X(i915))
+ if (display->platform.g4x)
dpfc_ctl |= DPFC_CTL_SR_EN;
if (fbc_state->fence_id >= 0) {
@@ -460,8 +457,8 @@ static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
static void g4x_fbc_activate(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
intel_de_write(display, DPFC_FENCE_YOFF,
fbc_state->fence_y_offset);
@@ -512,8 +509,8 @@ static const struct intel_fbc_funcs g4x_fbc_funcs = {
static void ilk_fbc_activate(struct intel_fbc *fbc)
{
- struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
+ struct intel_fbc_state *fbc_state = &fbc->state;
intel_de_write(display, ILK_DPFC_FENCE_YOFF(fbc->id),
fbc_state->fence_y_offset);
@@ -527,6 +524,9 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc)
struct intel_display *display = fbc->display;
u32 dpfc_ctl;
+ if (HAS_FBC_DIRTY_RECT(display))
+ intel_de_write(display, XE3_FBC_DIRTY_CTL(fbc->id), 0);
+
/* Disable compression */
dpfc_ctl = intel_de_read(display, ILK_DPFC_CONTROL(fbc->id));
if (dpfc_ctl & DPFC_CTL_EN) {
@@ -564,8 +564,8 @@ static const struct intel_fbc_funcs ilk_fbc_funcs = {
static void snb_fbc_program_fence(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
u32 ctl = 0;
if (fbc_state->fence_id >= 0)
@@ -601,8 +601,8 @@ static const struct intel_fbc_funcs snb_fbc_funcs = {
static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
u32 val = 0;
if (fbc_state->override_cfb_stride)
@@ -614,8 +614,8 @@ static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
u32 val = 0;
/* Display WA #0529: skl, kbl, bxt. */
@@ -630,14 +630,13 @@ static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct intel_fbc_state *fbc_state = &fbc->state;
u32 dpfc_ctl;
dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
- if (IS_IVYBRIDGE(i915))
+ if (display->platform.ivybridge)
dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane);
if (DISPLAY_VER(display) >= 20)
@@ -670,6 +669,10 @@ static void ivb_fbc_activate(struct intel_fbc *fbc)
if (DISPLAY_VER(display) >= 20)
intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+ if (HAS_FBC_DIRTY_RECT(display))
+ intel_de_write(display, XE3_FBC_DIRTY_CTL(fbc->id),
+ FBC_DIRTY_RECT_EN);
+
intel_de_write(display, ILK_DPFC_CONTROL(fbc->id),
DPFC_CTL_EN | dpfc_ctl);
}
@@ -739,8 +742,19 @@ static void intel_fbc_nuke(struct intel_fbc *fbc)
static void intel_fbc_activate(struct intel_fbc *fbc)
{
+ struct intel_display *display = fbc->display;
+
lockdep_assert_held(&fbc->lock);
+ /* only the fence can change for a flip nuke */
+ if (fbc->active && !intel_fbc_has_fences(display))
+ return;
+ /*
+ * In case of FBC dirt rect, any updates to the FBC registers will
+ * trigger the nuke.
+ */
+ drm_WARN_ON(display->drm, fbc->active && HAS_FBC_DIRTY_RECT(display));
+
intel_fbc_hw_activate(fbc);
intel_fbc_nuke(fbc);
@@ -759,9 +773,7 @@ static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
static u64 intel_fbc_cfb_base_max(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return BIT_ULL(28);
else
return BIT_ULL(32);
@@ -776,8 +788,8 @@ static u64 intel_fbc_stolen_end(struct intel_display *display)
* reserved range size, so it always assumes the maximum (8mb) is used.
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
- if (IS_BROADWELL(i915) ||
- (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915)))
+ if (display->platform.broadwell ||
+ (DISPLAY_VER(display) == 9 && !display->platform.broxton))
end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -792,10 +804,8 @@ static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
static int intel_fbc_max_limit(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
/* WaFbcOnly1to1Ratio:ctg */
- if (IS_G4X(i915))
+ if (display->platform.g4x)
return 1;
/*
@@ -843,7 +853,7 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
drm_WARN_ON(display->drm,
i915_gem_stolen_node_allocated(&fbc->compressed_llb));
- if (DISPLAY_VER(display) < 5 && !IS_G4X(i915)) {
+ if (DISPLAY_VER(display) < 5 && !display->platform.g4x) {
ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
4096, 4096);
if (ret)
@@ -882,9 +892,8 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
{
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
- if (IS_SKYLAKE(i915) || IS_BROXTON(i915)) {
+ if (display->platform.skylake || display->platform.broxton) {
/*
* WaFbcHighMemBwCorruptionAvoidance:skl,bxt
* Display WA #0883: skl,bxt
@@ -893,8 +902,8 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
0, DPFC_DISABLE_DUMMY0);
}
- if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
+ if (display->platform.skylake || display->platform.kabylake ||
+ display->platform.coffeelake || display->platform.cometlake) {
/*
* WaFbcNukeOnHostModify:skl,kbl,cfl
* Display WA #0873: skl,kbl,cfl
@@ -909,7 +918,7 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
- if (DISPLAY_VER(display) >= 11 && !IS_DG2(i915))
+ if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
}
@@ -986,13 +995,12 @@ static bool icl_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
static bool stride_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (DISPLAY_VER(display) >= 11)
return icl_fbc_stride_is_valid(plane_state);
else if (DISPLAY_VER(display) >= 9)
return skl_fbc_stride_is_valid(plane_state);
- else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return g4x_fbc_stride_is_valid(plane_state);
else if (DISPLAY_VER(display) == 4)
return i965_fbc_stride_is_valid(plane_state);
@@ -1023,7 +1031,6 @@ static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane
static bool g4x_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct drm_framebuffer *fb = plane_state->hw.fb;
switch (fb->format->format) {
@@ -1032,7 +1039,7 @@ static bool g4x_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_
return true;
case DRM_FORMAT_RGB565:
/* WaFbcOnly1to1Ratio:ctg */
- if (IS_G4X(i915))
+ if (display->platform.g4x)
return false;
return true;
default:
@@ -1059,11 +1066,10 @@ static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_
static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (DISPLAY_VER(display) >= 20)
return lnl_fbc_pixel_format_is_valid(plane_state);
- else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return g4x_fbc_pixel_format_is_valid(plane_state);
else
return i8xx_fbc_pixel_format_is_valid(plane_state);
@@ -1094,11 +1100,10 @@ static bool skl_fbc_rotation_is_valid(const struct intel_plane_state *plane_stat
static bool rotation_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (DISPLAY_VER(display) >= 9)
return skl_fbc_rotation_is_valid(plane_state);
- else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return g4x_fbc_rotation_is_valid(plane_state);
else
return i8xx_fbc_rotation_is_valid(plane_state);
@@ -1107,8 +1112,6 @@ static bool rotation_is_valid(const struct intel_plane_state *plane_state)
static void intel_fbc_max_surface_size(struct intel_display *display,
unsigned int *w, unsigned int *h)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 11) {
*w = 8192;
*h = 4096;
@@ -1118,7 +1121,7 @@ static void intel_fbc_max_surface_size(struct intel_display *display,
} else if (DISPLAY_VER(display) >= 7) {
*w = 4096;
*h = 4096;
- } else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) {
+ } else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) {
*w = 4096;
*h = 2048;
} else {
@@ -1151,15 +1154,13 @@ static bool intel_fbc_surface_size_ok(const struct intel_plane_state *plane_stat
static void intel_fbc_max_plane_size(struct intel_display *display,
unsigned int *w, unsigned int *h)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 10) {
*w = 5120;
*h = 4096;
- } else if (DISPLAY_VER(display) >= 8 || IS_HASWELL(i915)) {
+ } else if (DISPLAY_VER(display) >= 8 || display->platform.haswell) {
*w = 4096;
*h = 4096;
- } else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) {
+ } else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) {
*w = 4096;
*h = 2048;
} else {
@@ -1203,6 +1204,74 @@ static bool tiling_is_valid(const struct intel_plane_state *plane_state)
return i8xx_fbc_tiling_valid(plane_state);
}
+static void
+intel_fbc_invalidate_dirty_rect(struct intel_fbc *fbc)
+{
+ lockdep_assert_held(&fbc->lock);
+
+ fbc->state.dirty_rect = DRM_RECT_INIT(0, 0, 0, 0);
+}
+
+static void
+intel_fbc_program_dirty_rect(struct intel_dsb *dsb, struct intel_fbc *fbc,
+ const struct drm_rect *fbc_dirty_rect)
+{
+ struct intel_display *display = fbc->display;
+
+ drm_WARN_ON(display->drm, fbc_dirty_rect->y2 == 0);
+
+ intel_de_write_dsb(display, dsb, XE3_FBC_DIRTY_RECT(fbc->id),
+ FBC_DIRTY_RECT_START_LINE(fbc_dirty_rect->y1) |
+ FBC_DIRTY_RECT_END_LINE(fbc_dirty_rect->y2 - 1));
+}
+
+static void
+intel_fbc_dirty_rect_update(struct intel_dsb *dsb, struct intel_fbc *fbc)
+{
+ const struct drm_rect *fbc_dirty_rect = &fbc->state.dirty_rect;
+
+ lockdep_assert_held(&fbc->lock);
+
+ if (!drm_rect_visible(fbc_dirty_rect))
+ return;
+
+ intel_fbc_program_dirty_rect(dsb, fbc, fbc_dirty_rect);
+}
+
+void
+intel_fbc_dirty_rect_update_noarm(struct intel_dsb *dsb,
+ struct intel_plane *plane)
+{
+ struct intel_display *display = to_intel_display(plane);
+ struct intel_fbc *fbc = plane->fbc;
+
+ if (!HAS_FBC_DIRTY_RECT(display))
+ return;
+
+ mutex_lock(&fbc->lock);
+
+ if (fbc->state.plane == plane)
+ intel_fbc_dirty_rect_update(dsb, fbc);
+
+ mutex_unlock(&fbc->lock);
+}
+
+static void
+intel_fbc_hw_intialize_dirty_rect(struct intel_fbc *fbc,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_rect src;
+
+ /*
+ * Initializing the FBC HW with the whole plane area as the dirty rect.
+ * This is to ensure that we have valid coords be written to the
+ * HW as dirty rect.
+ */
+ drm_rect_fp_to_int(&src, &plane_state->uapi.src);
+
+ intel_fbc_program_dirty_rect(NULL, fbc, &src);
+}
+
static void intel_fbc_update_state(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_plane *plane)
@@ -1276,6 +1345,62 @@ static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
intel_fbc_is_cfb_ok(plane_state);
}
+static void
+__intel_fbc_prepare_dirty_rect(const struct intel_plane_state *plane_state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct intel_fbc *fbc = plane->fbc;
+ struct drm_rect *fbc_dirty_rect = &fbc->state.dirty_rect;
+ int width = drm_rect_width(&plane_state->uapi.src) >> 16;
+ const struct drm_rect *damage = &plane_state->damage;
+ int y_offset = plane_state->view.color_plane[0].y;
+
+ lockdep_assert_held(&fbc->lock);
+
+ if (intel_crtc_needs_modeset(crtc_state) ||
+ !intel_fbc_is_ok(plane_state)) {
+ intel_fbc_invalidate_dirty_rect(fbc);
+ return;
+ }
+
+ if (drm_rect_visible(damage))
+ *fbc_dirty_rect = *damage;
+ else
+ /* dirty rect must cover at least one line */
+ *fbc_dirty_rect = DRM_RECT_INIT(0, y_offset, width, 1);
+}
+
+void
+intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ if (!HAS_FBC_DIRTY_RECT(display))
+ return;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_fbc *fbc = plane->fbc;
+
+ if (!fbc || plane->pipe != crtc->pipe)
+ continue;
+
+ mutex_lock(&fbc->lock);
+
+ if (fbc->state.plane == plane)
+ __intel_fbc_prepare_dirty_rect(plane_state,
+ crtc_state);
+
+ mutex_unlock(&fbc->lock);
+ }
+}
+
static int intel_fbc_check_plane(struct intel_atomic_state *state,
struct intel_plane *plane)
{
@@ -1317,7 +1442,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
}
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
- if (i915_vtd_active(i915) && (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
+ if (i915_vtd_active(i915) && (display->platform.skylake || display->platform.broxton)) {
plane_state->no_fbc_reason = "VT-d enabled";
return 0;
}
@@ -1338,16 +1463,21 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Display 12+ is not supporting FBC with PSR2.
* Recommendation is to keep this combination disabled
* Bspec: 50422 HSD: 14010260002
+ *
+ * In Xe3, PSR2 selective fetch and FBC dirty rect feature cannot
+ * coexist. So if PSR2 selective fetch is supported then mark that
+ * FBC is not supported.
+ * TODO: Need a logic to decide between PSR2 and FBC Dirty rect
*/
- if (IS_DISPLAY_VER(display, 12, 14) && crtc_state->has_sel_update &&
- !crtc_state->has_panel_replay) {
+ if ((IS_DISPLAY_VER(display, 12, 14) || HAS_FBC_DIRTY_RECT(display)) &&
+ crtc_state->has_sel_update && !crtc_state->has_panel_replay) {
plane_state->no_fbc_reason = "PSR2 enabled";
return 0;
}
/* Wa_14016291713 */
if ((IS_DISPLAY_VER(display, 12, 13) ||
- IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_C0)) &&
+ IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0)) &&
crtc_state->has_psr && !crtc_state->has_panel_replay) {
plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
return 0;
@@ -1410,7 +1540,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
}
/* WaFbcExceedCdClockThreshold:hsw,bdw */
- if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ if (display->platform.haswell || display->platform.broadwell) {
const struct intel_cdclk_state *cdclk_state;
cdclk_state = intel_atomic_get_cdclk_state(state);
@@ -1547,6 +1677,8 @@ static void __intel_fbc_disable(struct intel_fbc *fbc)
drm_dbg_kms(display->drm, "Disabling FBC on [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
+ intel_fbc_invalidate_dirty_rect(fbc);
+
__intel_fbc_cleanup_cfb(fbc);
fbc->state.plane = NULL;
@@ -1614,14 +1746,14 @@ out:
mutex_unlock(&fbc->lock);
}
-void intel_fbc_invalidate(struct drm_i915_private *i915,
+void intel_fbc_invalidate(struct intel_display *display,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(&i915->display, fbc, fbc_id)
+ for_each_intel_fbc(display, fbc, fbc_id)
__intel_fbc_invalidate(fbc, frontbuffer_bits, origin);
}
@@ -1653,14 +1785,14 @@ out:
mutex_unlock(&fbc->lock);
}
-void intel_fbc_flush(struct drm_i915_private *i915,
+void intel_fbc_flush(struct intel_display *display,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(&i915->display, fbc, fbc_id)
+ for_each_intel_fbc(display, fbc, fbc_id)
__intel_fbc_flush(fbc, frontbuffer_bits, origin);
}
@@ -1732,6 +1864,9 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
intel_fbc_update_state(state, crtc, plane);
+ if (HAS_FBC_DIRTY_RECT(display))
+ intel_fbc_hw_intialize_dirty_rect(fbc, plane_state);
+
intel_fbc_program_workarounds(fbc);
intel_fbc_program_cfb(fbc);
}
@@ -1897,15 +2032,13 @@ void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display)
*/
static int intel_sanitize_fbc_option(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->params.enable_fbc >= 0)
return !!display->params.enable_fbc;
if (!HAS_FBC(display))
return 0;
- if (IS_BROADWELL(i915) || DISPLAY_VER(display) >= 9)
+ if (display->platform.broadwell || DISPLAY_VER(display) >= 9)
return 1;
return 0;
@@ -1919,7 +2052,6 @@ void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
static struct intel_fbc *intel_fbc_create(struct intel_display *display,
enum intel_fbc_id fbc_id)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_fbc *fbc;
fbc = kzalloc(sizeof(*fbc), GFP_KERNEL);
@@ -1937,7 +2069,7 @@ static struct intel_fbc *intel_fbc_create(struct intel_display *display,
fbc->funcs = &snb_fbc_funcs;
else if (DISPLAY_VER(display) == 5)
fbc->funcs = &ilk_fbc_funcs;
- else if (IS_G4X(i915))
+ else if (display->platform.g4x)
fbc->funcs = &g4x_fbc_funcs;
else if (DISPLAY_VER(display) == 4)
fbc->funcs = &i965_fbc_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index ceae55458e14..0e715cb6b4e6 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -9,11 +9,11 @@
#include <linux/types.h>
enum fb_op_origin;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
+struct intel_dsb;
struct intel_fbc;
struct intel_plane;
struct intel_plane_state;
@@ -38,15 +38,19 @@ void intel_fbc_sanitize(struct intel_display *display);
void intel_fbc_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_disable(struct intel_crtc *crtc);
-void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+void intel_fbc_invalidate(struct intel_display *display,
unsigned int frontbuffer_bits,
enum fb_op_origin origin);
-void intel_fbc_flush(struct drm_i915_private *dev_priv,
+void intel_fbc_flush(struct intel_display *display,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane);
void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display);
void intel_fbc_reset_underrun(struct intel_display *display);
void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc);
void intel_fbc_debugfs_register(struct intel_display *display);
+void intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_fbc_dirty_rect_update_noarm(struct intel_dsb *dsb,
+ struct intel_plane *plane);
#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbc_regs.h b/drivers/gpu/drm/i915/display/intel_fbc_regs.h
index ae0699c3c2fe..b1d0161a3196 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc_regs.h
@@ -100,6 +100,15 @@
#define FBC_STRIDE_MASK REG_GENMASK(14, 0)
#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x))
+#define XE3_FBC_DIRTY_RECT(fbc_id) _MMIO_PIPE((fbc_id), 0x43230, 0x43270)
+#define FBC_DIRTY_RECT_END_LINE_MASK REG_GENMASK(31, 16)
+#define FBC_DIRTY_RECT_END_LINE(val) REG_FIELD_PREP(FBC_DIRTY_RECT_END_LINE_MASK, (val))
+#define FBC_DIRTY_RECT_START_LINE_MASK REG_GENMASK(15, 0)
+#define FBC_DIRTY_RECT_START_LINE(val) REG_FIELD_PREP(FBC_DIRTY_RECT_START_LINE_MASK, (val))
+
+#define XE3_FBC_DIRTY_CTL(fbc_id) _MMIO_PIPE((fbc_id), 0x43234, 0x43274)
+#define FBC_DIRTY_RECT_EN REG_BIT(31)
+
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID REG_BIT(0)
#define SNB_FBC_FRONT_BUFFER REG_BIT(1)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 00852ff5b247..adc19d5607de 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -37,14 +37,18 @@
#include <linux/tty.h>
#include <linux/vga_switcheroo.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "i915_drv.h"
+#include "i915_vma.h"
#include "intel_bo.h"
#include "intel_display_types.h"
#include "intel_fb.h"
@@ -54,24 +58,16 @@
#include "intel_frontbuffer.h"
struct intel_fbdev {
- struct drm_fb_helper helper;
struct intel_framebuffer *fb;
struct i915_vma *vma;
unsigned long vma_flags;
- int preferred_bpp;
-
- /* Whether or not fbdev hpd processing is temporarily suspended */
- bool hpd_suspended: 1;
- /* Set when a hotplug was received while HPD processing was suspended */
- bool hpd_waiting: 1;
-
- /* Protects hpd_suspended */
- struct mutex hpd_lock;
};
static struct intel_fbdev *to_intel_fbdev(struct drm_fb_helper *fb_helper)
{
- return container_of(fb_helper, struct intel_fbdev, helper);
+ struct drm_i915_private *i915 = to_i915(fb_helper->client.dev);
+
+ return i915->display.fbdev.fbdev;
}
static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
@@ -127,8 +123,8 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
- struct intel_fbdev *fbdev = to_intel_fbdev(info->par);
- struct drm_gem_object *obj = drm_gem_fb_get_obj(&fbdev->fb->base, 0);
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_gem_object *obj = drm_gem_fb_get_obj(fb_helper->fb, 0);
return intel_bo_fb_mmap(obj, vma);
}
@@ -136,9 +132,9 @@ static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
static void intel_fbdev_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct intel_fbdev *ifbdev = container_of(fb_helper, struct intel_fbdev, helper);
+ struct intel_fbdev *ifbdev = to_intel_fbdev(fb_helper);
- drm_fb_helper_fini(&ifbdev->helper);
+ drm_fb_helper_fini(fb_helper);
/*
* We rely on the object-free to release the VMA pinning for
@@ -146,11 +142,11 @@ static void intel_fbdev_fb_destroy(struct fb_info *info)
* trying to rectify all the possible error paths leading here.
*/
intel_fb_unpin_vma(ifbdev->vma, ifbdev->vma_flags);
- drm_framebuffer_remove(&ifbdev->fb->base);
+ drm_framebuffer_remove(fb_helper->fb);
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(&ifbdev->helper);
- kfree(ifbdev);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
}
__diag_push();
@@ -170,16 +166,53 @@ static const struct fb_ops intelfb_ops = {
__diag_pop();
-static int intelfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty)
+ return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+
+ return 0;
+}
+
+static void intelfb_restore(struct drm_fb_helper *fb_helper)
+{
+ struct intel_fbdev *ifbdev = to_intel_fbdev(fb_helper);
+
+ intel_fbdev_invalidate(ifbdev);
+}
+
+static void intelfb_set_suspend(struct drm_fb_helper *fb_helper, bool suspend)
+{
+ struct fb_info *info = fb_helper->info;
+
+ /*
+ * When resuming from hibernation, Linux restores the object's
+ * content from swap if the buffer is backed by shmemfs. If the
+ * object is stolen however, it will be full of whatever garbage
+ * was left in there. Clear it to zero in this case.
+ */
+ if (!suspend && !intel_bo_is_shmem(intel_fb_bo(fb_helper->fb)))
+ memset_io(info->screen_base, 0, info->screen_size);
+
+ fb_set_suspend(info, suspend);
+}
+
+static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .fb_dirty = intelfb_dirty,
+ .fb_restore = intelfb_restore,
+ .fb_set_suspend = intelfb_set_suspend,
+};
+
+int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
struct intel_framebuffer *fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- const struct i915_gtt_view view = {
- .type = I915_GTT_VIEW_NORMAL,
- };
intel_wakeref_t wakeref;
struct fb_info *info;
struct i915_vma *vma;
@@ -188,12 +221,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_gem_object *obj;
int ret;
- mutex_lock(&ifbdev->hpd_lock);
- ret = ifbdev->hpd_suspended ? -EAGAIN : 0;
- mutex_unlock(&ifbdev->hpd_lock);
- if (ret)
- return ret;
-
ifbdev->fb = NULL;
if (fb &&
@@ -226,8 +253,10 @@ static int intelfb_create(struct drm_fb_helper *helper,
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
- vma = intel_fb_pin_to_ggtt(&fb->base, &view,
+ vma = intel_fb_pin_to_ggtt(&fb->base, &fb->normal_view.gtt,
fb->min_alignment, 0,
+ intel_fb_view_vtd_guard(&fb->base, &fb->normal_view,
+ DRM_MODE_ROTATE_0),
false, &flags);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
@@ -241,7 +270,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unpin;
}
- ifbdev->helper.fb = &fb->base;
+ helper->funcs = &intel_fb_helper_funcs;
+ helper->fb = &fb->base;
info->fbops = &intelfb_ops;
@@ -251,7 +281,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
if (ret)
goto out_unpin;
- drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
+ drm_fb_helper_fill_info(info, dev->fb_helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
@@ -280,22 +310,6 @@ out_unlock:
return ret;
}
-static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
-{
- if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
- return 0;
-
- if (helper->fb->funcs->dirty)
- return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
- .fb_probe = intelfb_create,
- .fb_dirty = intelfb_dirty,
-};
-
/*
* Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
* The core display code will have read out the current plane configuration,
@@ -418,7 +432,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
goto out;
}
- ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
ifbdev->fb = fb;
drm_framebuffer_get(&ifbdev->fb->base);
@@ -449,249 +462,54 @@ out:
return false;
}
-static void intel_fbdev_suspend_worker(struct work_struct *work)
+static unsigned int intel_fbdev_color_mode(const struct drm_format_info *info)
{
- intel_fbdev_set_suspend(&container_of(work,
- struct drm_i915_private,
- display.fbdev.suspend_work)->drm,
- FBINFO_STATE_RUNNING,
- true);
-}
+ unsigned int bpp;
-/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
- * processing, fbdev will perform a full connector reprobe if a hotplug event
- * was received while HPD was suspended.
- */
-static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state)
-{
- struct intel_fbdev *ifbdev = i915->display.fbdev.fbdev;
- bool send_hpd = false;
-
- mutex_lock(&ifbdev->hpd_lock);
- ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
- send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
- ifbdev->hpd_waiting = false;
- mutex_unlock(&ifbdev->hpd_lock);
-
- if (send_hpd) {
- drm_dbg_kms(&i915->drm, "Handling delayed fbcon HPD event\n");
- drm_fb_helper_hotplug_event(&ifbdev->helper);
- }
-}
-
-void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
- struct fb_info *info;
-
- if (!ifbdev)
- return;
-
- if (drm_WARN_ON(&dev_priv->drm, !HAS_DISPLAY(dev_priv)))
- return;
-
- if (!ifbdev->vma)
- goto set_suspend;
-
- info = ifbdev->helper.info;
-
- if (synchronous) {
- /* Flush any pending work to turn the console on, and then
- * wait to turn it off. It must be synchronous as we are
- * about to suspend or unload the driver.
- *
- * Note that from within the work-handler, we cannot flush
- * ourselves, so only flush outstanding work upon suspend!
- */
- if (state != FBINFO_STATE_RUNNING)
- flush_work(&dev_priv->display.fbdev.suspend_work);
-
- console_lock();
- } else {
- /*
- * The console lock can be pretty contented on resume due
- * to all the printk activity. Try to keep it out of the hot
- * path of resume if possible.
- */
- drm_WARN_ON(dev, state != FBINFO_STATE_RUNNING);
- if (!console_trylock()) {
- /* Don't block our own workqueue as this can
- * be run in parallel with other i915.ko tasks.
- */
- queue_work(dev_priv->unordered_wq,
- &dev_priv->display.fbdev.suspend_work);
- return;
- }
- }
-
- /* On resume from hibernation: If the object is shmemfs backed, it has
- * been restored from swap. If the object is stolen however, it will be
- * full of whatever garbage was left in there.
- */
- if (state == FBINFO_STATE_RUNNING &&
- !intel_bo_is_shmem(intel_fb_bo(&ifbdev->fb->base)))
- memset_io(info->screen_base, 0, info->screen_size);
-
- drm_fb_helper_set_suspend(&ifbdev->helper, state);
- console_unlock();
-
-set_suspend:
- intel_fbdev_hpd_set_suspend(dev_priv, state);
-}
-
-static int intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
- struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
- bool send_hpd;
-
- if (!ifbdev)
- return -EINVAL;
-
- mutex_lock(&ifbdev->hpd_lock);
- send_hpd = !ifbdev->hpd_suspended;
- ifbdev->hpd_waiting = true;
- mutex_unlock(&ifbdev->hpd_lock);
-
- if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
- drm_fb_helper_hotplug_event(&ifbdev->helper);
-
- return 0;
-}
-
-static int intel_fbdev_restore_mode(struct drm_i915_private *dev_priv)
-{
- struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
- int ret;
-
- if (!ifbdev)
- return -EINVAL;
-
- if (!ifbdev->vma)
- return -ENOMEM;
-
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper);
- if (ret)
- return ret;
-
- intel_fbdev_invalidate(ifbdev);
-
- return 0;
-}
-
-/*
- * Fbdev client and struct drm_client_funcs
- */
+ if (!info->depth || info->num_planes != 1 || info->has_alpha || info->is_yuv)
+ return 0;
-static void intel_fbdev_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = fb_helper->dev;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ bpp = drm_format_info_bpp(info, 0);
- if (fb_helper->info) {
- vga_switcheroo_client_fb_set(pdev, NULL);
- drm_fb_helper_unregister_info(fb_helper);
- } else {
- drm_fb_helper_unprepare(fb_helper);
- drm_client_release(&fb_helper->client);
- kfree(fb_helper);
+ switch (bpp) {
+ case 16:
+ return info->depth; // 15 or 16
+ default:
+ return bpp;
}
}
-static int intel_fbdev_client_restore(struct drm_client_dev *client)
-{
- struct drm_i915_private *dev_priv = to_i915(client->dev);
- int ret;
-
- ret = intel_fbdev_restore_mode(dev_priv);
- if (ret)
- return ret;
-
- vga_switcheroo_process_delayed_switch();
-
- return 0;
-}
-
-static int intel_fbdev_client_hotplug(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- int ret;
-
- if (dev->fb_helper)
- return intel_fbdev_output_poll_changed(dev);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto err_drm_err;
-
- ret = drm_fb_helper_initial_config(fb_helper);
- if (ret)
- goto err_drm_fb_helper_fini;
-
- vga_switcheroo_client_fb_set(pdev, fb_helper->info);
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_drm_err:
- drm_err(dev, "Failed to setup i915 fbdev emulation (ret=%d)\n", ret);
- return ret;
-}
-
-static const struct drm_client_funcs intel_fbdev_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = intel_fbdev_client_unregister,
- .restore = intel_fbdev_client_restore,
- .hotplug = intel_fbdev_client_hotplug,
-};
-
void intel_fbdev_setup(struct drm_i915_private *i915)
{
struct drm_device *dev = &i915->drm;
struct intel_fbdev *ifbdev;
- int ret;
+ unsigned int preferred_bpp = 0;
if (!HAS_DISPLAY(i915))
return;
- ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
+ ifbdev = drmm_kzalloc(dev, sizeof(*ifbdev), GFP_KERNEL);
if (!ifbdev)
return;
- drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs);
i915->display.fbdev.fbdev = ifbdev;
- INIT_WORK(&i915->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
- mutex_init(&ifbdev->hpd_lock);
if (intel_fbdev_init_bios(dev, ifbdev))
- ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp;
- else
- ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp;
-
- ret = drm_client_init(dev, &ifbdev->helper.client, "intel-fbdev",
- &intel_fbdev_client_funcs);
- if (ret) {
- drm_err(dev, "Failed to register client: %d\n", ret);
- goto err_drm_fb_helper_unprepare;
- }
-
- drm_client_register(&ifbdev->helper.client);
+ preferred_bpp = intel_fbdev_color_mode(ifbdev->fb->base.format);
+ if (!preferred_bpp)
+ preferred_bpp = 32;
- return;
-
-err_drm_fb_helper_unprepare:
- drm_fb_helper_unprepare(&ifbdev->helper);
- mutex_destroy(&ifbdev->hpd_lock);
- kfree(ifbdev);
+ drm_client_setup_with_color_mode(dev, preferred_bpp);
}
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
{
- if (!fbdev || !fbdev->helper.fb)
+ if (!fbdev)
return NULL;
- return to_intel_framebuffer(fbdev->helper.fb);
+ return fbdev->fb;
+}
+
+struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev)
+{
+ return fbdev ? fbdev->vma : NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 08de2d5b3433..ca2c8c438f02 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -6,30 +6,37 @@
#ifndef __INTEL_FBDEV_H__
#define __INTEL_FBDEV_H__
-#include <linux/types.h>
-
-struct drm_device;
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
struct drm_i915_private;
struct intel_fbdev;
struct intel_framebuffer;
#ifdef CONFIG_DRM_FBDEV_EMULATION
+int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+#define INTEL_FBDEV_DRIVER_OPS \
+ .fbdev_probe = intel_fbdev_driver_fbdev_probe
void intel_fbdev_setup(struct drm_i915_private *dev_priv);
-void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
+struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev);
+
#else
+#define INTEL_FBDEV_DRIVER_OPS \
+ .fbdev_probe = NULL
static inline void intel_fbdev_setup(struct drm_i915_private *dev_priv)
{
}
-
-static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
+static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
{
+ return NULL;
}
-static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
+static inline struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev)
{
return NULL;
}
+
#endif
#endif /* __INTEL_FBDEV_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 37cdfa9c692a..40deee0769ae 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -24,10 +24,9 @@ struct intel_fdi_funcs {
const struct intel_crtc_state *crtc_state);
};
-static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+static void assert_fdi_tx(struct intel_display *display,
enum pipe pipe, bool state)
{
- struct intel_display *display = &dev_priv->display;
bool cur_state;
if (HAS_DDI(display)) {
@@ -48,20 +47,19 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
str_on_off(state), str_on_off(cur_state));
}
-void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
+void assert_fdi_tx_enabled(struct intel_display *display, enum pipe pipe)
{
- assert_fdi_tx(i915, pipe, true);
+ assert_fdi_tx(display, pipe, true);
}
-void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
+void assert_fdi_tx_disabled(struct intel_display *display, enum pipe pipe)
{
- assert_fdi_tx(i915, pipe, false);
+ assert_fdi_tx(display, pipe, false);
}
-static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+static void assert_fdi_rx(struct intel_display *display,
enum pipe pipe, bool state)
{
- struct intel_display *display = &dev_priv->display;
bool cur_state;
cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
@@ -70,24 +68,22 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
str_on_off(state), str_on_off(cur_state));
}
-void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
+void assert_fdi_rx_enabled(struct intel_display *display, enum pipe pipe)
{
- assert_fdi_rx(i915, pipe, true);
+ assert_fdi_rx(display, pipe, true);
}
-void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
+void assert_fdi_rx_disabled(struct intel_display *display, enum pipe pipe)
{
- assert_fdi_rx(i915, pipe, false);
+ assert_fdi_rx(display, pipe, false);
}
-void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
- enum pipe pipe)
+void assert_fdi_tx_pll_enabled(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &i915->display;
bool cur_state;
/* ILK FDI PLL is always enabled */
- if (IS_IRONLAKE(i915))
+ if (display->platform.ironlake)
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -99,10 +95,9 @@ void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
"FDI TX PLL assertion failure, should be active but is disabled\n");
}
-static void assert_fdi_rx_pll(struct drm_i915_private *i915,
+static void assert_fdi_rx_pll(struct intel_display *display,
enum pipe pipe, bool state)
{
- struct intel_display *display = &i915->display;
bool cur_state;
cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
@@ -111,22 +106,22 @@ static void assert_fdi_rx_pll(struct drm_i915_private *i915,
str_on_off(state), str_on_off(cur_state));
}
-void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
+void assert_fdi_rx_pll_enabled(struct intel_display *display, enum pipe pipe)
{
- assert_fdi_rx_pll(i915, pipe, true);
+ assert_fdi_rx_pll(display, pipe, true);
}
-void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
+void assert_fdi_rx_pll_disabled(struct intel_display *display, enum pipe pipe)
{
- assert_fdi_rx_pll(i915, pipe, false);
+ assert_fdi_rx_pll(display, pipe, false);
}
void intel_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
+ display->funcs.fdi->fdi_link_train(crtc, crtc_state);
}
/**
@@ -143,12 +138,11 @@ void intel_fdi_link_train(struct intel_crtc *crtc,
int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state;
const struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3)
+ if (!display->platform.ivybridge || INTEL_NUM_PIPES(display) != 3)
return 0;
crtc = intel_crtc_for_pipe(display, PIPE_C);
@@ -186,31 +180,29 @@ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
return 0;
}
-static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+static int ilk_check_fdi_lanes(struct intel_display *display, enum pipe pipe,
struct intel_crtc_state *pipe_config,
enum pipe *pipe_to_reduce)
{
- struct intel_display *display = to_intel_display(dev);
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state = pipe_config->uapi.state;
struct intel_crtc *other_crtc;
struct intel_crtc_state *other_crtc_state;
*pipe_to_reduce = pipe;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"checking fdi config on pipe %c, lanes %i\n",
pipe_name(pipe), pipe_config->fdi_lanes);
if (pipe_config->fdi_lanes > 4) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"invalid fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ if (display->platform.haswell || display->platform.broadwell) {
if (pipe_config->fdi_lanes > 2) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"only 2 lanes on haswell, required: %i lanes\n",
pipe_config->fdi_lanes);
return -EINVAL;
@@ -219,7 +211,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
- if (INTEL_NUM_PIPES(dev_priv) == 2)
+ if (INTEL_NUM_PIPES(display) == 2)
return 0;
/* Ivybridge 3 pipe is really complicated */
@@ -237,7 +229,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return PTR_ERR(other_crtc_state);
if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
@@ -245,7 +237,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return 0;
case PIPE_C:
if (pipe_config->fdi_lanes > 2) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"only 2 lanes on pipe %c: required %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
@@ -258,7 +250,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return PTR_ERR(other_crtc_state);
if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"fdi link B uses too many lanes to enable link C\n");
*pipe_to_reduce = PIPE_B;
@@ -272,29 +264,30 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
-void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
+void intel_fdi_pll_freq_update(struct intel_display *display)
{
- if (IS_IRONLAKE(i915)) {
- u32 fdi_pll_clk =
- intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+ if (display->platform.ironlake) {
+ u32 fdi_pll_clk;
+
+ fdi_pll_clk = intel_de_read(display, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
- i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
- } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
- i915->display.fdi.pll_freq = 270000;
+ display->fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
+ } else if (display->platform.sandybridge || display->platform.ivybridge) {
+ display->fdi.pll_freq = 270000;
} else {
return;
}
- drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
+ drm_dbg(display->drm, "FDI PLL freq=%d\n", display->fdi.pll_freq);
}
-int intel_fdi_link_freq(struct drm_i915_private *i915,
+int intel_fdi_link_freq(struct intel_display *display,
const struct intel_crtc_state *pipe_config)
{
- if (HAS_DDI(i915))
+ if (HAS_DDI(display))
return pipe_config->port_clock; /* SPLL */
else
- return i915->display.fdi.pll_freq;
+ return display->fdi.pll_freq;
}
/**
@@ -328,8 +321,7 @@ bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
int ilk_fdi_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int lane, link_bw, fdi_dotclock;
@@ -340,7 +332,7 @@ int ilk_fdi_compute_config(struct intel_crtc *crtc,
* Hence the bw of each lane in terms of the mode signal
* is:
*/
- link_bw = intel_fdi_link_freq(i915, pipe_config);
+ link_bw = intel_fdi_link_freq(display, pipe_config);
fdi_dotclock = adjusted_mode->crtc_clock;
@@ -363,11 +355,11 @@ static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
struct intel_crtc_state *pipe_config,
struct intel_link_bw_limits *limits)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe_to_reduce;
int ret;
- ret = ilk_check_fdi_lanes(&i915->drm, crtc->pipe, pipe_config,
+ ret = ilk_check_fdi_lanes(display, crtc->pipe, pipe_config,
&pipe_to_reduce);
if (ret != -EINVAL)
return ret;
@@ -390,7 +382,7 @@ static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
* @state must be recomputed with the updated @limits.
*
* Returns:
- * - 0 if the confugration is valid
+ * - 0 if the configuration is valid
* - %-EAGAIN, if the configuration is invalid and @limits got updated
* with fallback values with which the configuration of all CRTCs
* in @state must be recomputed
@@ -420,48 +412,48 @@ int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
return 0;
}
-static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
+static void cpt_set_fdi_bc_bifurcation(struct intel_display *display, bool enable)
{
u32 temp;
- temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
+ temp = intel_de_read(display, SOUTH_CHICKEN1);
if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
return;
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, FDI_RX_CTL(PIPE_B)) &
FDI_RX_ENABLE);
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, FDI_RX_CTL(PIPE_C)) &
FDI_RX_ENABLE);
temp &= ~FDI_BC_BIFURCATION_SELECT;
if (enable)
temp |= FDI_BC_BIFURCATION_SELECT;
- drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
+ drm_dbg_kms(display->drm, "%sabling fdi C rx\n",
enable ? "en" : "dis");
- intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
- intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
+ intel_de_write(display, SOUTH_CHICKEN1, temp);
+ intel_de_posting_read(display, SOUTH_CHICKEN1);
}
static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
switch (crtc->pipe) {
case PIPE_A:
break;
case PIPE_B:
if (crtc_state->fdi_lanes > 2)
- cpt_set_fdi_bc_bifurcation(dev_priv, false);
+ cpt_set_fdi_bc_bifurcation(display, false);
else
- cpt_set_fdi_bc_bifurcation(dev_priv, true);
+ cpt_set_fdi_bc_bifurcation(display, true);
break;
case PIPE_C:
- cpt_set_fdi_bc_bifurcation(dev_priv, true);
+ cpt_set_fdi_bc_bifurcation(display, true);
break;
default:
@@ -471,26 +463,26 @@ static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_st
void intel_fdi_normal_train(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
/* enable normal train */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (IS_IVYBRIDGE(dev_priv)) {
+ temp = intel_de_read(display, reg);
+ if (display->platform.ivybridge) {
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
}
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
@@ -498,23 +490,22 @@ void intel_fdi_normal_train(struct intel_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE;
}
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+ intel_de_write(display, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
/* wait one idle pattern time */
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(1000);
/* IVB wants error correction enabled */
- if (IS_IVYBRIDGE(dev_priv))
- intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
+ if (display->platform.ivybridge)
+ intel_de_rmw(display, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
}
/* The FDI link training functions for ILK/Ibexpeak. */
static void ilk_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, tries;
@@ -523,84 +514,84 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
* Write the TU size bits before fdi link training, so that error
* detection works.
*/
- intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
- intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
+ intel_de_write(display, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK);
/* FDI needs bits from pipe first */
- assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_enabled(display, crtc_state->cpu_transcoder);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
- intel_de_read(dev_priv, reg);
+ intel_de_write(display, reg, temp);
+ intel_de_read(display, reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+ intel_de_write(display, reg, temp | FDI_TX_ENABLE);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+ intel_de_write(display, reg, temp | FDI_RX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ intel_de_write(display, FDI_RX_CHICKEN(pipe),
FDI_RX_PHASE_SYNC_POINTER_OVR);
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ intel_de_write(display, FDI_RX_CHICKEN(pipe),
FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(display, reg);
+ drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_BIT_LOCK)) {
- drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
- intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(display->drm, "FDI train 1 done.\n");
+ intel_de_write(display, reg, temp | FDI_RX_BIT_LOCK);
break;
}
}
if (tries == 5)
- drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+ drm_err(display->drm, "FDI train 1 fail!\n");
/* Train 2 */
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ intel_de_rmw(display, FDI_TX_CTL(pipe),
FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
- intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
+ intel_de_rmw(display, FDI_RX_CTL(pipe),
FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
+ intel_de_posting_read(display, FDI_RX_CTL(pipe));
udelay(150);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(display, reg);
+ drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- intel_de_write(dev_priv, reg,
+ intel_de_write(display, reg,
temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
+ drm_dbg_kms(display->drm, "FDI train 2 done.\n");
break;
}
}
if (tries == 5)
- drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+ drm_err(display->drm, "FDI train 2 fail!\n");
- drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
+ drm_dbg_kms(display->drm, "FDI train done\n");
}
@@ -615,8 +606,8 @@ static const int snb_b_fdi_train_param[] = {
static void gen6_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, retry;
@@ -625,23 +616,23 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
* Write the TU size bits before fdi link training, so that error
* detection works.
*/
- intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
- intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
+ intel_de_write(display, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
@@ -649,13 +640,13 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+ intel_de_write(display, reg, temp | FDI_TX_ENABLE);
- intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ intel_de_write(display, FDI_RX_MISC(pipe),
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -663,25 +654,25 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+ intel_de_write(display, reg, temp | FDI_RX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(150);
for (i = 0; i < 4; i++) {
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ intel_de_rmw(display, FDI_TX_CTL(pipe),
FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
- intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
+ intel_de_posting_read(display, FDI_TX_CTL(pipe));
udelay(500);
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(display, reg);
+ drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK) {
- intel_de_write(dev_priv, reg,
+ intel_de_write(display, reg,
temp | FDI_RX_BIT_LOCK);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI train 1 done.\n");
break;
}
@@ -691,22 +682,22 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
break;
}
if (i == 4)
- drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+ drm_err(display->drm, "FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- if (IS_SANDYBRIDGE(dev_priv)) {
+ if (display->platform.sandybridge) {
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
}
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
@@ -714,25 +705,25 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
}
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(150);
for (i = 0; i < 4; i++) {
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ intel_de_rmw(display, FDI_TX_CTL(pipe),
FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
- intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
+ intel_de_posting_read(display, FDI_TX_CTL(pipe));
udelay(500);
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(display, reg);
+ drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- intel_de_write(dev_priv, reg,
+ intel_de_write(display, reg,
temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI train 2 done.\n");
break;
}
@@ -742,17 +733,16 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
break;
}
if (i == 4)
- drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+ drm_err(display->drm, "FDI train 2 fail!\n");
- drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+ drm_dbg_kms(display->drm, "FDI train done.\n");
}
/* Manual link training for Ivy Bridge A0 parts */
static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, j;
@@ -763,72 +753,72 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
* Write the TU size bits before fdi link training, so that error
* detection works.
*/
- intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
- intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
+ intel_de_write(display, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(150);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
- intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
+ drm_dbg_kms(display->drm, "FDI_RX_IIR before link train 0x%x\n",
+ intel_de_read(display, FDI_RX_IIR(pipe)));
/* Try each vswing and preemphasis setting twice before moving on */
for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
/* disable first in case we need to retry */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
temp &= ~FDI_TX_ENABLE;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[j/2];
temp |= FDI_COMPOSITE_SYNC;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+ intel_de_write(display, reg, temp | FDI_TX_ENABLE);
- intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ intel_de_write(display, FDI_RX_MISC(pipe),
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
temp |= FDI_COMPOSITE_SYNC;
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+ intel_de_write(display, reg, temp | FDI_RX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(1); /* should be 0.5us */
for (i = 0; i < 4; i++) {
reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(display, reg);
+ drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK ||
- (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
- intel_de_write(dev_priv, reg,
+ (intel_de_read(display, reg) & FDI_RX_BIT_LOCK)) {
+ intel_de_write(display, reg,
temp | FDI_RX_BIT_LOCK);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI train 1 done, level %i.\n",
i);
break;
@@ -836,31 +826,31 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
udelay(1); /* should be 0.5us */
}
if (i == 4) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI train 1 fail on vswing %d\n", j / 2);
continue;
}
/* Train 2 */
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ intel_de_rmw(display, FDI_TX_CTL(pipe),
FDI_LINK_TRAIN_NONE_IVB,
FDI_LINK_TRAIN_PATTERN_2_IVB);
- intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
+ intel_de_rmw(display, FDI_RX_CTL(pipe),
FDI_LINK_TRAIN_PATTERN_MASK_CPT,
FDI_LINK_TRAIN_PATTERN_2_CPT);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
+ intel_de_posting_read(display, FDI_RX_CTL(pipe));
udelay(2); /* should be 1.5us */
for (i = 0; i < 4; i++) {
reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(display, reg);
+ drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK ||
- (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
- intel_de_write(dev_priv, reg,
+ (intel_de_read(display, reg) & FDI_RX_SYMBOL_LOCK)) {
+ intel_de_write(display, reg,
temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI train 2 done, level %i.\n",
i);
goto train_done;
@@ -868,12 +858,12 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
udelay(2); /* should be 1.5us */
}
if (i == 4)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI train 2 fail on vswing %d\n", j / 2);
}
train_done:
- drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+ drm_dbg_kms(display->drm, "FDI train done.\n");
}
/* Starting with Haswell, different DDI ports can work in FDI mode for
@@ -887,8 +877,7 @@ train_done:
void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 temp, i, rx_ctl_val;
int n_entries;
@@ -903,33 +892,33 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
*
* WaFDIAutoLinkSetTimingOverrride:hsw
*/
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
+ intel_de_write(display, FDI_RX_MISC(PIPE_A),
FDI_RX_PWRDN_LANE1_VAL(2) |
FDI_RX_PWRDN_LANE0_VAL(2) |
FDI_RX_TP1_TO_TP2_48 |
FDI_RX_FDI_DELAY_90);
/* Enable the PCH Receiver FDI PLL */
- rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ rx_ctl_val = display->fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(display, FDI_RX_CTL(PIPE_A));
udelay(220);
/* Switch from Rawclk to PCDclk */
rx_ctl_val |= FDI_PCDCLK;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
/* Configure Port Clock Select */
- drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
+ drm_WARN_ON(display->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
intel_ddi_enable_clock(encoder, crtc_state);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
for (i = 0; i < n_entries * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ intel_de_write(display, DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
DP_TP_CTL_LINK_TRAIN_PAT1 |
@@ -939,36 +928,36 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
* DDI E does not support port reversal, the functionality is
* achieved on the PCH side in FDI_RX_CTL, so no need to set the
* port reversal bit */
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
+ intel_de_write(display, DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
((crtc_state->fdi_lanes - 1) << 1) |
DDI_BUF_TRANS_SELECT(i / 2));
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
+ intel_de_posting_read(display, DDI_BUF_CTL(PORT_E));
udelay(600);
/* Program PCH FDI Receiver TU */
- intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
+ intel_de_write(display, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
/* Enable PCH FDI Receiver with auto-training */
rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(display, FDI_RX_CTL(PIPE_A));
/* Wait for FDI receiver lane calibration */
udelay(30);
/* Unset FDI_RX_MISC pwrdn lanes */
- intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ intel_de_rmw(display, FDI_RX_MISC(PIPE_A),
FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
- intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ intel_de_posting_read(display, FDI_RX_MISC(PIPE_A));
/* Wait for FDI auto training time */
udelay(5);
- temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
+ temp = intel_de_read(display, DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI link training done on step %d\n", i);
break;
}
@@ -978,32 +967,32 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
* Results in less fireworks from the state checker.
*/
if (i == n_entries * 2 - 1) {
- drm_err(&dev_priv->drm, "FDI link training failed!\n");
+ drm_err(display->drm, "FDI link training failed!\n");
break;
}
rx_ctl_val &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(display, FDI_RX_CTL(PIPE_A));
- intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
+ intel_de_rmw(display, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
+ intel_de_posting_read(display, DDI_BUF_CTL(PORT_E));
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
- intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
- intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
+ intel_de_rmw(display, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
+ intel_de_posting_read(display, DP_TP_CTL(PORT_E));
- intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+ intel_wait_ddi_buf_idle(display, PORT_E);
/* Reset FDI_RX_MISC pwrdn lanes */
- intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ intel_de_rmw(display, FDI_RX_MISC(PIPE_A),
FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
- intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ intel_de_posting_read(display, FDI_RX_MISC(PIPE_A));
}
/* Enable normal pixel sending for FDI */
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ intel_de_write(display, DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
DP_TP_CTL_LINK_TRAIN_NORMAL |
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
@@ -1012,7 +1001,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
void hsw_fdi_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
/*
* Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
@@ -1020,103 +1009,103 @@ void hsw_fdi_disable(struct intel_encoder *encoder)
* step 13 is the correct place for it. Step 18 is where it was
* originally before the BUN.
*/
- intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
- intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
- intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+ intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
+ intel_de_rmw(display, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
+ intel_wait_ddi_buf_idle(display, PORT_E);
intel_ddi_disable_clock(encoder);
- intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ intel_de_rmw(display, FDI_RX_MISC(PIPE_A),
FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
- intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
- intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
+ intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
+ intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
}
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
+ temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11;
+ intel_de_write(display, reg, temp | FDI_RX_PLL_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(200);
/* Switch from Rawclk to PCDclk */
- intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(display, reg, 0, FDI_PCDCLK);
+ intel_de_posting_read(display, reg);
udelay(200);
/* Enable CPU FDI TX PLL, always on for Ironlake */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
+ intel_de_write(display, reg, temp | FDI_TX_PLL_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(100);
}
}
void ilk_fdi_pll_disable(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
/* Switch from PCDclk to Rawclk */
- intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
+ intel_de_rmw(display, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
/* Disable CPU FDI TX PLL */
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
- intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
+ intel_de_rmw(display, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
+ intel_de_posting_read(display, FDI_TX_CTL(pipe));
udelay(100);
/* Wait for the clocks to turn off. */
- intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
+ intel_de_rmw(display, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
+ intel_de_posting_read(display, FDI_RX_CTL(pipe));
udelay(100);
}
void ilk_fdi_disable(struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
/* disable CPU FDI tx and PCH FDI rx */
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
- intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
+ intel_de_rmw(display, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
+ intel_de_posting_read(display, FDI_TX_CTL(pipe));
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~(0x7 << 16);
- temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
+ temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11;
+ intel_de_write(display, reg, temp & ~FDI_RX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev_priv))
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ intel_de_write(display, FDI_RX_CHICKEN(pipe),
FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ intel_de_rmw(display, FDI_TX_CTL(pipe),
FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -1126,10 +1115,10 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
}
/* BPC in FDI rx is consistent with that in TRANSCONF */
temp &= ~(0x07 << 16);
- temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp);
+ temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11;
+ intel_de_write(display, reg, temp);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(100);
}
@@ -1146,14 +1135,14 @@ static const struct intel_fdi_funcs ivb_funcs = {
};
void
-intel_fdi_init_hook(struct drm_i915_private *dev_priv)
+intel_fdi_init_hook(struct intel_display *display)
{
- if (IS_IRONLAKE(dev_priv)) {
- dev_priv->display.funcs.fdi = &ilk_funcs;
- } else if (IS_SANDYBRIDGE(dev_priv)) {
- dev_priv->display.funcs.fdi = &gen6_funcs;
- } else if (IS_IVYBRIDGE(dev_priv)) {
+ if (display->platform.ironlake) {
+ display->funcs.fdi = &ilk_funcs;
+ } else if (display->platform.sandybridge) {
+ display->funcs.fdi = &gen6_funcs;
+ } else if (display->platform.ivybridge) {
/* FIXME: detect B0+ stepping and use auto training */
- dev_priv->display.funcs.fdi = &ivb_funcs;
+ display->funcs.fdi = &ivb_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
index 477ff0136934..ad5e103c38a8 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.h
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -9,15 +9,16 @@
#include <linux/types.h>
enum pipe;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
+struct intel_display;
struct intel_encoder;
struct intel_link_bw_limits;
int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state);
-int intel_fdi_link_freq(struct drm_i915_private *i915,
+int intel_fdi_link_freq(struct intel_display *display,
const struct intel_crtc_state *pipe_config);
bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state);
int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
@@ -28,21 +29,21 @@ void intel_fdi_normal_train(struct intel_crtc *crtc);
void ilk_fdi_disable(struct intel_crtc *crtc);
void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc);
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
-void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
+void intel_fdi_init_hook(struct intel_display *display);
void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_fdi_disable(struct intel_encoder *encoder);
-void intel_fdi_pll_freq_update(struct drm_i915_private *i915);
+void intel_fdi_pll_freq_update(struct intel_display *display);
void intel_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
-void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe);
-void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe);
-void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe);
-void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe);
-void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
-void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
-void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_tx_enabled(struct intel_display *display, enum pipe pipe);
+void assert_fdi_tx_disabled(struct intel_display *display, enum pipe pipe);
+void assert_fdi_rx_enabled(struct intel_display *display, enum pipe pipe);
+void assert_fdi_rx_disabled(struct intel_display *display, enum pipe pipe);
+void assert_fdi_tx_pll_enabled(struct intel_display *display, enum pipe pipe);
+void assert_fdi_rx_pll_enabled(struct intel_display *display, enum pipe pipe);
+void assert_fdi_rx_pll_disabled(struct intel_display *display, enum pipe pipe);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index cda1daf4cdea..7a8fbff39be0 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -55,16 +55,15 @@
* The code also supports underrun detection on the PCH transcoder.
*/
-static bool ivb_can_enable_err_int(struct drm_device *dev)
+static bool ivb_can_enable_err_int(struct intel_display *display)
{
- struct intel_display *display = to_intel_display(dev);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
enum pipe pipe;
lockdep_assert_held(&dev_priv->irq_lock);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
crtc = intel_crtc_for_pipe(display, pipe);
if (crtc->cpu_fifo_underrun_disabled)
@@ -74,16 +73,15 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
return true;
}
-static bool cpt_can_enable_serr_int(struct drm_device *dev)
+static bool cpt_can_enable_serr_int(struct intel_display *display)
{
- struct intel_display *display = to_intel_display(dev);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
struct intel_crtc *crtc;
lockdep_assert_held(&dev_priv->irq_lock);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
crtc = intel_crtc_for_pipe(display, pipe);
if (crtc->pch_fifo_underrun_disabled)
@@ -97,48 +95,48 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- i915_reg_t reg = PIPESTAT(dev_priv, crtc->pipe);
+ i915_reg_t reg = PIPESTAT(display, crtc->pipe);
u32 enable_mask;
lockdep_assert_held(&dev_priv->irq_lock);
- if ((intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+ if ((intel_de_read(display, reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
return;
- enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe);
- intel_de_write(dev_priv, reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
- intel_de_posting_read(dev_priv, reg);
+ enable_mask = i915_pipestat_enable_mask(display, crtc->pipe);
+ intel_de_write(display, reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
+ intel_de_posting_read(display, reg);
trace_intel_cpu_fifo_underrun(display, crtc->pipe);
- drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(crtc->pipe));
+ drm_err(display->drm, "pipe %c underrun\n", pipe_name(crtc->pipe));
}
-static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
+static void i9xx_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- i915_reg_t reg = PIPESTAT(dev_priv, pipe);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ i915_reg_t reg = PIPESTAT(display, pipe);
lockdep_assert_held(&dev_priv->irq_lock);
if (enable) {
- u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+ u32 enable_mask = i915_pipestat_enable_mask(display, pipe);
- intel_de_write(dev_priv, reg,
+ intel_de_write(display, reg,
enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
} else {
- if (old && intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS)
- drm_err(&dev_priv->drm, "pipe %c underrun\n",
+ if (old && intel_de_read(display, reg) & PIPE_FIFO_UNDERRUN_STATUS)
+ drm_err(display->drm, "pipe %c underrun\n",
pipe_name(pipe));
}
}
-static void ilk_set_fifo_underrun_reporting(struct drm_device *dev,
+static void ilk_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bit = (pipe == PIPE_A) ?
DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
@@ -153,30 +151,30 @@ static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- u32 err_int = intel_de_read(dev_priv, GEN7_ERR_INT);
+ u32 err_int = intel_de_read(display, GEN7_ERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
return;
- intel_de_write(dev_priv, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
- intel_de_posting_read(dev_priv, GEN7_ERR_INT);
+ intel_de_write(display, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+ intel_de_posting_read(display, GEN7_ERR_INT);
trace_intel_cpu_fifo_underrun(display, pipe);
- drm_err(&dev_priv->drm, "fifo underrun on pipe %c\n", pipe_name(pipe));
+ drm_err(display->drm, "fifo underrun on pipe %c\n", pipe_name(pipe));
}
-static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
+static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable,
bool old)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
if (enable) {
- intel_de_write(dev_priv, GEN7_ERR_INT,
+ intel_de_write(display, GEN7_ERR_INT,
ERR_INT_FIFO_UNDERRUN(pipe));
- if (!ivb_can_enable_err_int(dev))
+ if (!ivb_can_enable_err_int(display))
return;
ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
@@ -184,18 +182,18 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
if (old &&
- intel_de_read(dev_priv, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
- drm_err(&dev_priv->drm,
+ intel_de_read(display, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ drm_err(display->drm,
"uncleared fifo underrun on pipe %c\n",
pipe_name(pipe));
}
}
}
-static void bdw_set_fifo_underrun_reporting(struct drm_device *dev,
+static void bdw_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
if (enable)
bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
@@ -203,11 +201,11 @@ static void bdw_set_fifo_underrun_reporting(struct drm_device *dev,
bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
}
-static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+static void ibx_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bit = (pch_transcoder == PIPE_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
@@ -222,53 +220,52 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pch_transcoder = crtc->pipe;
- u32 serr_int = intel_de_read(dev_priv, SERR_INT);
+ u32 serr_int = intel_de_read(display, SERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
return;
- intel_de_write(dev_priv, SERR_INT,
+ intel_de_write(display, SERR_INT,
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
- intel_de_posting_read(dev_priv, SERR_INT);
+ intel_de_posting_read(display, SERR_INT);
trace_intel_pch_fifo_underrun(display, pch_transcoder);
- drm_err(&dev_priv->drm, "pch fifo underrun on pch transcoder %c\n",
+ drm_err(display->drm, "pch fifo underrun on pch transcoder %c\n",
pipe_name(pch_transcoder));
}
-static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
+static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
if (enable) {
- intel_de_write(dev_priv, SERR_INT,
+ intel_de_write(display, SERR_INT,
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
- if (!cpt_can_enable_serr_int(dev))
+ if (!cpt_can_enable_serr_int(display))
return;
ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
} else {
ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
- if (old && intel_de_read(dev_priv, SERR_INT) &
+ if (old && intel_de_read(display, SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"uncleared pch fifo underrun on pch transcoder %c\n",
pipe_name(pch_transcoder));
}
}
}
-static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+static bool __intel_set_cpu_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct intel_display *display = to_intel_display(dev);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
bool old;
@@ -277,21 +274,21 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
old = !crtc->cpu_fifo_underrun_disabled;
crtc->cpu_fifo_underrun_disabled = !enable;
- if (HAS_GMCH(dev_priv))
- i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
- ilk_set_fifo_underrun_reporting(dev, pipe, enable);
- else if (DISPLAY_VER(dev_priv) == 7)
- ivb_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (DISPLAY_VER(dev_priv) >= 8)
- bdw_set_fifo_underrun_reporting(dev, pipe, enable);
+ if (HAS_GMCH(display))
+ i9xx_set_fifo_underrun_reporting(display, pipe, enable, old);
+ else if (display->platform.ironlake || display->platform.sandybridge)
+ ilk_set_fifo_underrun_reporting(display, pipe, enable);
+ else if (DISPLAY_VER(display) == 7)
+ ivb_set_fifo_underrun_reporting(display, pipe, enable, old);
+ else if (DISPLAY_VER(display) >= 8)
+ bdw_set_fifo_underrun_reporting(display, pipe, enable);
return old;
}
/**
- * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
- * @dev_priv: i915 device instance
+ * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrun reporting state
+ * @display: display device instance
* @pipe: (CPU) pipe to set state for
* @enable: whether underruns should be reported or not
*
@@ -305,15 +302,15 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
*
* Returns the previous state of underrun reporting.
*/
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+bool intel_set_cpu_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
unsigned long flags;
bool ret;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
- enable);
+ ret = __intel_set_cpu_fifo_underrun_reporting(display, pipe, enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return ret;
@@ -321,7 +318,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
/**
* intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
- * @dev_priv: i915 device instance
+ * @display: display device instance
* @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
* @enable: whether underruns should be reported or not
*
@@ -333,13 +330,12 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
*
* Returns the previous state of underrun reporting.
*/
-bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable)
{
- struct intel_display *display = &dev_priv->display;
- struct intel_crtc *crtc =
- intel_crtc_for_pipe(display, pch_transcoder);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pch_transcoder);
unsigned long flags;
bool old;
@@ -358,11 +354,11 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev_priv))
- ibx_set_fifo_underrun_reporting(&dev_priv->drm,
+ ibx_set_fifo_underrun_reporting(display,
pch_transcoder,
enable);
else
- cpt_set_fifo_underrun_reporting(&dev_priv->drm,
+ cpt_set_fifo_underrun_reporting(display,
pch_transcoder,
enable, old);
@@ -372,17 +368,16 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
/**
* intel_cpu_fifo_underrun_irq_handler - handle CPU fifo underrun interrupt
- * @dev_priv: i915 device instance
+ * @display: display device instance
* @pipe: (CPU) pipe to set state for
*
* This handles a CPU fifo underrun interrupt, generating an underrun warning
* into dmesg if underrun reporting is enabled and then disables the underrun
* interrupt to avoid an irq storm.
*/
-void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+void intel_cpu_fifo_underrun_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
/* We may be called too early in init, thanks BIOS! */
@@ -390,63 +385,62 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
return;
/* GMCH can't disable fifo underruns, filter them. */
- if (HAS_GMCH(dev_priv) &&
+ if (HAS_GMCH(display) &&
crtc->cpu_fifo_underrun_disabled)
return;
- if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
+ if (intel_set_cpu_fifo_underrun_reporting(display, pipe, false)) {
trace_intel_cpu_fifo_underrun(display, pipe);
- drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe));
+ drm_err(display->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe));
}
- intel_fbc_handle_fifo_underrun_irq(&dev_priv->display);
+ intel_fbc_handle_fifo_underrun_irq(display);
}
/**
* intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
- * @dev_priv: i915 device instance
+ * @display: display device instance
* @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
*
* This handles a PCH fifo underrun interrupt, generating an underrun warning
* into dmesg if underrun reporting is enabled and then disables the underrun
* interrupt to avoid an irq storm.
*/
-void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+void intel_pch_fifo_underrun_irq_handler(struct intel_display *display,
enum pipe pch_transcoder)
{
- struct intel_display *display = &dev_priv->display;
-
- if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
+ if (intel_set_pch_fifo_underrun_reporting(display, pch_transcoder,
false)) {
trace_intel_pch_fifo_underrun(display, pch_transcoder);
- drm_err(&dev_priv->drm, "PCH transcoder %c FIFO underrun\n",
+ drm_err(display->drm, "PCH transcoder %c FIFO underrun\n",
pipe_name(pch_transcoder));
}
}
/**
* intel_check_cpu_fifo_underruns - check for CPU fifo underruns immediately
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* Check for CPU fifo underruns immediately. Useful on IVB/HSW where the shared
* error interrupt may have been disabled, and so CPU fifo underruns won't
* necessarily raise an interrupt, and on GMCH platforms where underruns never
* raise an interrupt.
*/
-void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
+void intel_check_cpu_fifo_underruns(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
spin_lock_irq(&dev_priv->irq_lock);
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
if (crtc->cpu_fifo_underrun_disabled)
continue;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
i9xx_check_fifo_underruns(crtc);
- else if (DISPLAY_VER(dev_priv) == 7)
+ else if (DISPLAY_VER(display) == 7)
ivb_check_fifo_underruns(crtc);
}
@@ -455,19 +449,20 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
/**
* intel_check_pch_fifo_underruns - check for PCH fifo underruns immediately
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* Check for PCH fifo underruns immediately. Useful on CPT/PPT where the shared
* error interrupt may have been disabled, and so PCH fifo underruns won't
* necessarily raise an interrupt.
*/
-void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
+void intel_check_pch_fifo_underruns(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
spin_lock_irq(&dev_priv->irq_lock);
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
if (crtc->pch_fifo_underrun_disabled)
continue;
@@ -478,10 +473,12 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
-void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915,
+void intel_init_fifo_underrun_reporting(struct intel_display *display,
struct intel_crtc *crtc,
bool enable)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
crtc->cpu_fifo_underrun_disabled = !enable;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
index b00d8abebcf9..ebecc4347cfb 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
@@ -8,22 +8,22 @@
#include <linux/types.h>
-struct drm_i915_private;
-struct intel_crtc;
enum pipe;
+struct intel_crtc;
+struct intel_display;
-void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915,
+void intel_init_fifo_underrun_reporting(struct intel_display *display,
struct intel_crtc *crtc, bool enable);
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+bool intel_set_cpu_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable);
-bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable);
-void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+void intel_cpu_fifo_underrun_irq_handler(struct intel_display *display,
enum pipe pipe);
-void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+void intel_pch_fifo_underrun_irq_handler(struct intel_display *display,
enum pipe pch_transcoder);
-void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
-void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
+void intel_check_cpu_fifo_underruns(struct intel_display *display);
+void intel_check_pch_fifo_underruns(struct intel_display *display);
#endif /* __INTEL_FIFO_UNDERRUN_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 6ed5f726ee60..ba2f88ca6117 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -59,6 +59,7 @@
#include "i915_active.h"
#include "i915_drv.h"
+#include "i915_vma.h"
#include "intel_bo.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
@@ -98,10 +99,10 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
trace_intel_frontbuffer_flush(display, frontbuffer_bits, origin);
might_sleep();
- intel_td_flush(i915);
- intel_drrs_flush(i915, frontbuffer_bits);
+ intel_td_flush(display);
+ intel_drrs_flush(display, frontbuffer_bits);
intel_psr_flush(display, frontbuffer_bits, origin);
- intel_fbc_flush(i915, frontbuffer_bits, origin);
+ intel_fbc_flush(display, frontbuffer_bits, origin);
}
/**
@@ -176,7 +177,6 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
unsigned int frontbuffer_bits)
{
struct intel_display *display = to_intel_display(front->obj->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (origin == ORIGIN_CS) {
spin_lock(&display->fb_tracking.lock);
@@ -189,8 +189,8 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
might_sleep();
intel_psr_invalidate(display, frontbuffer_bits, origin);
- intel_drrs_invalidate(i915, frontbuffer_bits);
- intel_fbc_invalidate(i915, frontbuffer_bits, origin);
+ intel_drrs_invalidate(display, frontbuffer_bits);
+ intel_fbc_invalidate(display, frontbuffer_bits, origin);
}
void __intel_fb_flush(struct intel_frontbuffer *front,
@@ -227,7 +227,7 @@ static void intel_frontbuffer_flush_work(struct work_struct *work)
* @front: GEM object to flush
*
* This function is targeted for our dirty callback for queueing flush when
- * dma fence is signales
+ * dma fence is signals
*/
void intel_frontbuffer_queue_flush(struct intel_frontbuffer *front)
{
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 807cf606e7a8..abf457e68ee9 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -761,11 +761,10 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
int ret;
- wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);
if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
@@ -777,7 +776,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
}
- intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}
@@ -786,7 +785,6 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
u8 cmd = DRM_HDCP_DDC_AKSV;
u8 buf[DRM_HDCP_KSV_LEN] = {};
struct i2c_msg msgs[] = {
@@ -806,7 +804,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
intel_wakeref_t wakeref;
int ret;
- wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);
mutex_lock(&display->gmbus.mutex);
/*
@@ -817,7 +815,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT);
mutex_unlock(&display->gmbus.mutex);
- intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 1bab7c34a794..1bf424a822f3 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -9,6 +9,7 @@
*/
#include <linux/component.h>
+#include <linux/debugfs.h>
#include <linux/i2c.h>
#include <linux/random.h>
@@ -69,13 +70,13 @@ static int intel_conn_to_vcpi(struct intel_atomic_state *state,
int vcpi = 0;
/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
- if (!connector->port)
+ if (!connector->mst.port)
return 0;
- mgr = connector->port->mgr;
+ mgr = connector->mst.port->mgr;
drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx);
mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
- payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
+ payload = drm_atomic_get_mst_payload_state(mst_state, connector->mst.port);
if (drm_WARN_ON(mgr->dev, !payload))
goto out;
@@ -106,16 +107,16 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state,
struct drm_connector_list_iter conn_iter;
struct intel_digital_port *conn_dig_port;
struct intel_connector *connector;
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
bool enforce_type0 = false;
int k;
- if (dig_port->hdcp_auth_status)
+ if (dig_port->hdcp.auth_status)
return 0;
data->k = 0;
- if (!dig_port->hdcp_mst_type1_capable)
+ if (!dig_port->hdcp.mst_type1_capable)
enforce_type0 = true;
drm_connector_list_iter_begin(display->drm, &conn_iter);
@@ -135,7 +136,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state,
data->k++;
/* if there is only one active stream */
- if (dig_port->dp.active_mst_links <= 1)
+ if (dig_port->dp.mst.active_links <= 1)
break;
}
drm_connector_list_iter_end(&conn_iter);
@@ -158,7 +159,7 @@ static int intel_hdcp_prepare_streams(struct intel_atomic_state *state,
struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
if (intel_encoder_is_mst(intel_attached_encoder(connector)))
@@ -208,7 +209,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
}
/* Is HDCP1.4 capable on Platform and Sink */
-bool intel_hdcp_get_capability(struct intel_connector *connector)
+static bool intel_hdcp_get_capability(struct intel_connector *connector)
{
struct intel_digital_port *dig_port;
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
@@ -264,7 +265,7 @@ static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
}
/* Is HDCP2.2 capable on Platform and Sink */
-bool intel_hdcp2_get_capability(struct intel_connector *connector)
+static bool intel_hdcp2_get_capability(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
bool capable = false;
@@ -278,9 +279,9 @@ bool intel_hdcp2_get_capability(struct intel_connector *connector)
return capable;
}
-void intel_hdcp_get_remote_capability(struct intel_connector *connector,
- bool *hdcp_capable,
- bool *hdcp2_capable)
+static void intel_hdcp_get_remote_capability(struct intel_connector *connector,
+ bool *hdcp_capable,
+ bool *hdcp2_capable)
{
struct intel_hdcp *hdcp = &connector->hdcp;
@@ -342,7 +343,7 @@ static bool hdcp_key_loadable(struct intel_display *display)
* On HSW and BDW, Display HW loads the Key as soon as Display resumes.
* On all BXT+, SW can load the keys only when the PW#1 is turned on.
*/
- if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ if (display->platform.haswell || display->platform.broadwell)
id = HSW_DISP_PW_GLOBAL;
else
id = SKL_DISP_PW_1;
@@ -353,7 +354,7 @@ static bool hdcp_key_loadable(struct intel_display *display)
/*
* Another req for hdcp key loadability is enabled state of pll for
- * cdclk. Without active crtc we wont land here. So we are assuming that
+ * cdclk. Without active crtc we won't land here. So we are assuming that
* cdclk is already on.
*/
@@ -381,7 +382,7 @@ static int intel_hdcp_load_keys(struct intel_display *display)
* On HSW and BDW HW loads the HDCP1.4 Key when Display comes
* out of reset. So if Key is not already loaded, its an error state.
*/
- if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ if (display->platform.haswell || display->platform.broadwell)
if (!(intel_de_read(display, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
return -ENXIO;
@@ -393,7 +394,7 @@ static int intel_hdcp_load_keys(struct intel_display *display)
* process from other platforms. These platforms use the GT Driver
* Mailbox interface.
*/
- if (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915)) {
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
drm_err(display->drm,
@@ -1000,7 +1001,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
* don't disable it until it disabled HDCP encryption for
* all connectors in MST topology.
*/
- if (dig_port->num_hdcp_streams > 0)
+ if (dig_port->hdcp.num_streams > 0)
return 0;
}
@@ -1093,13 +1094,13 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
if (hdcp->value == value)
return;
- drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp_mutex));
+ drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp.mutex));
if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
- if (!drm_WARN_ON(display->drm, dig_port->num_hdcp_streams == 0))
- dig_port->num_hdcp_streams--;
+ if (!drm_WARN_ON(display->drm, dig_port->hdcp.num_streams == 0))
+ dig_port->hdcp.num_streams--;
} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
- dig_port->num_hdcp_streams++;
+ dig_port->hdcp.num_streams++;
}
hdcp->value = value;
@@ -1121,7 +1122,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
int ret = 0;
mutex_lock(&hdcp->mutex);
- mutex_lock(&dig_port->hdcp_mutex);
+ mutex_lock(&dig_port->hdcp.mutex);
cpu_transcoder = hdcp->cpu_transcoder;
@@ -1176,7 +1177,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
}
out:
- mutex_unlock(&dig_port->hdcp_mutex);
+ mutex_unlock(&dig_port->hdcp.mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -1218,7 +1219,7 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1248,7 +1249,7 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1276,7 +1277,7 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1302,7 +1303,7 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1329,7 +1330,7 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1356,7 +1357,7 @@ hdcp2_verify_lprime(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1382,7 +1383,7 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1411,7 +1412,7 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1441,7 +1442,7 @@ hdcp2_verify_mprime(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1465,7 +1466,7 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1502,7 +1503,7 @@ static int hdcp2_close_session(struct intel_connector *connector)
}
ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
- &dig_port->hdcp_port_data);
+ &dig_port->hdcp.port_data);
mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
@@ -1550,9 +1551,9 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
* with a 50ms delay if not hdcp2 capable for DP/DPMST encoders
* (dock decides to stop advertising hdcp2 capability for some reason).
* The reason being that during suspend resume dock usually keeps the
- * HDCP2 registers inaccesible causing AUX error. This wouldn't be a
+ * HDCP2 registers inaccessible causing AUX error. This wouldn't be a
* big problem if the userspace just kept retrying with some delay while
- * it continues to play low value content but most userpace applications
+ * it continues to play low value content but most userspace applications
* end up throwing an error when it receives one from KMD. This makes
* sure we give the dock and the sink devices to complete its power cycle
* and then try HDCP authentication. The values of 10 and delay of 50ms
@@ -1690,7 +1691,7 @@ static
int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_rep_stream_manage stream_manage;
@@ -1768,11 +1769,11 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
* MST topology is not Type 1 capable if it contains a downstream
* device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
*/
- dig_port->hdcp_mst_type1_capable =
+ dig_port->hdcp.mst_type1_capable =
!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
- if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) {
+ if (!dig_port->hdcp.mst_type1_capable && hdcp->content_type) {
drm_dbg_kms(display->drm,
"HDCP1.x or 2.0 Legacy Device Downstream\n");
return -EINVAL;
@@ -1868,7 +1869,7 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
enum port port = dig_port->base.port;
@@ -1899,7 +1900,7 @@ link_recover:
if (hdcp2_deauthenticate_port(connector) < 0)
drm_dbg_kms(display->drm, "Port deauth failed.\n");
- dig_port->hdcp_auth_status = false;
+ dig_port->hdcp.auth_status = false;
data->k = 0;
return ret;
@@ -1939,7 +1940,7 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
port),
LINK_ENCRYPTION_STATUS,
HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
- dig_port->hdcp_auth_status = true;
+ dig_port->hdcp.auth_status = true;
return ret;
}
@@ -2018,7 +2019,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
int ret = 0, i, tries = 3;
- for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
+ for (i = 0; i < tries && !dig_port->hdcp.auth_status; i++) {
ret = hdcp2_authenticate_sink(connector);
if (!ret) {
ret = intel_hdcp_prepare_streams(state, connector);
@@ -2051,7 +2052,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
drm_dbg_kms(display->drm, "Port deauth failed.\n");
}
- if (!ret && !dig_port->hdcp_auth_status) {
+ if (!ret && !dig_port->hdcp.auth_status) {
/*
* Ensuring the required 200mSec min time interval between
* Session Key Exchange and encryption.
@@ -2105,7 +2106,7 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
@@ -2122,7 +2123,7 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery
drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
transcoder_name(hdcp->stream_transcoder));
- if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
+ if (dig_port->hdcp.num_streams > 0 && !hdcp2_link_recovery)
return 0;
}
@@ -2132,7 +2133,7 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery
drm_dbg_kms(display->drm, "Port deauth failed.\n");
connector->hdcp.hdcp2_encrypted = false;
- dig_port->hdcp_auth_status = false;
+ dig_port->hdcp.auth_status = false;
data->k = 0;
return ret;
@@ -2149,7 +2150,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
int ret = 0;
mutex_lock(&hdcp->mutex);
- mutex_lock(&dig_port->hdcp_mutex);
+ mutex_lock(&dig_port->hdcp.mutex);
cpu_transcoder = hdcp->cpu_transcoder;
/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
@@ -2220,7 +2221,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
out:
- mutex_unlock(&dig_port->hdcp_mutex);
+ mutex_unlock(&dig_port->hdcp.mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -2302,7 +2303,7 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
struct intel_display *display = to_intel_display(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
enum port port = dig_port->base.port;
if (DISPLAY_VER(display) < 12)
@@ -2338,18 +2339,16 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
static bool is_hdcp2_supported(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (intel_hdcp_gsc_cs_required(display))
return true;
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
return false;
- return (DISPLAY_VER(display) >= 10 ||
- IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) ||
- IS_COMETLAKE(i915));
+ return DISPLAY_VER(display) >= 10 ||
+ display->platform.kabylake ||
+ display->platform.coffeelake ||
+ display->platform.cometlake;
}
void intel_hdcp_component_init(struct intel_display *display)
@@ -2415,7 +2414,7 @@ int intel_hdcp_init(struct intel_connector *connector,
hdcp->hdcp2_supported);
if (ret) {
hdcp->hdcp2_supported = false;
- kfree(dig_port->hdcp_port_data.streams);
+ kfree(dig_port->hdcp.port_data.streams);
return ret;
}
@@ -2452,7 +2451,7 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
}
mutex_lock(&hdcp->mutex);
- mutex_lock(&dig_port->hdcp_mutex);
+ mutex_lock(&dig_port->hdcp.mutex);
drm_WARN_ON(display->drm,
hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = (u8)conn_state->hdcp_content_type;
@@ -2466,20 +2465,23 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
}
if (DISPLAY_VER(display) >= 12)
- dig_port->hdcp_port_data.hdcp_transcoder =
+ dig_port->hdcp.port_data.hdcp_transcoder =
intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
* is capable of HDCP2.2, it is preferred to use HDCP2.2.
*/
- if (intel_hdcp2_get_capability(connector)) {
+ if (!hdcp->force_hdcp14 && intel_hdcp2_get_capability(connector)) {
ret = _intel_hdcp2_enable(state, connector);
if (!ret)
check_link_interval =
DRM_HDCP2_CHECK_PERIOD_MS;
}
+ if (hdcp->force_hdcp14)
+ drm_dbg_kms(display->drm, "Forcing HDCP 1.4\n");
+
/*
* When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
* be attempted.
@@ -2497,7 +2499,7 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
true);
}
- mutex_unlock(&dig_port->hdcp_mutex);
+ mutex_unlock(&dig_port->hdcp.mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -2533,7 +2535,7 @@ int intel_hdcp_disable(struct intel_connector *connector)
return -ENOENT;
mutex_lock(&hdcp->mutex);
- mutex_lock(&dig_port->hdcp_mutex);
+ mutex_lock(&dig_port->hdcp.mutex);
if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
goto out;
@@ -2546,7 +2548,7 @@ int intel_hdcp_disable(struct intel_connector *connector)
ret = _intel_hdcp_disable(connector);
out:
- mutex_unlock(&dig_port->hdcp_mutex);
+ mutex_unlock(&dig_port->hdcp.mutex);
mutex_unlock(&hdcp->mutex);
cancel_delayed_work_sync(&hdcp->check_work);
return ret;
@@ -2573,7 +2575,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
/*
* During the HDCP encryption session if Type change is requested,
- * disable the HDCP and reenable it with new TYPE value.
+ * disable the HDCP and re-enable it with new TYPE value.
*/
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
@@ -2616,6 +2618,15 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
}
+void intel_hdcp_cancel_works(struct intel_connector *connector)
+{
+ if (!connector->hdcp.shim)
+ return;
+
+ cancel_delayed_work_sync(&connector->hdcp.check_work);
+ cancel_work_sync(&connector->hdcp.prop_work);
+}
+
void intel_hdcp_component_fini(struct intel_display *display)
{
mutex_lock(&display->hdcp.hdcp_mutex);
@@ -2731,3 +2742,153 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
}
+
+static void __intel_hdcp_info(struct seq_file *m, struct intel_connector *connector,
+ bool remote_req)
+{
+ bool hdcp_cap = false, hdcp2_cap = false;
+
+ if (!connector->hdcp.shim) {
+ seq_puts(m, "No Connector Support");
+ goto out;
+ }
+
+ if (remote_req) {
+ intel_hdcp_get_remote_capability(connector, &hdcp_cap, &hdcp2_cap);
+ } else {
+ hdcp_cap = intel_hdcp_get_capability(connector);
+ hdcp2_cap = intel_hdcp2_get_capability(connector);
+ }
+
+ if (hdcp_cap)
+ seq_puts(m, "HDCP1.4 ");
+ if (hdcp2_cap)
+ seq_puts(m, "HDCP2.2 ");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_puts(m, "None");
+
+out:
+ seq_puts(m, "\n");
+}
+
+void intel_hdcp_info(struct seq_file *m, struct intel_connector *connector)
+{
+ seq_puts(m, "\tHDCP version: ");
+ if (connector->mst.dp) {
+ __intel_hdcp_info(m, connector, true);
+ seq_puts(m, "\tMST Hub HDCP version: ");
+ }
+ __intel_hdcp_info(m, connector, false);
+}
+
+static int intel_hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct intel_display *display = to_intel_display(connector);
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
+ if (!connector->base.encoder ||
+ connector->base.status != connector_status_connected) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ seq_printf(m, "%s:%d HDCP version: ", connector->base.name,
+ connector->base.base.id);
+ __intel_hdcp_info(m, connector, false);
+
+out:
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(intel_hdcp_sink_capability);
+
+static ssize_t intel_hdcp_force_14_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool force_hdcp14 = false;
+ int ret;
+
+ if (len == 0)
+ return 0;
+
+ ret = kstrtobool_from_user(ubuf, len, &force_hdcp14);
+ if (ret < 0)
+ return ret;
+
+ hdcp->force_hdcp14 = force_hdcp14;
+ *offp += len;
+
+ return len;
+}
+
+static int intel_hdcp_force_14_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_crtc *crtc;
+ int ret;
+
+ if (!encoder)
+ return -ENODEV;
+
+ ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
+ crtc = connector->base.state->crtc;
+ if (connector->base.status != connector_status_connected || !crtc) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ seq_printf(m, "%s\n",
+ str_yes_no(hdcp->force_hdcp14));
+out:
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
+
+ return ret;
+}
+
+static int intel_hdcp_force_14_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, intel_hdcp_force_14_show,
+ inode->i_private);
+}
+
+static const struct file_operations intel_hdcp_force_14_fops = {
+ .owner = THIS_MODULE,
+ .open = intel_hdcp_force_14_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = intel_hdcp_force_14_write
+};
+
+void intel_hdcp_connector_debugfs_add(struct intel_connector *connector)
+{
+ struct dentry *root = connector->base.debugfs_entry;
+ int connector_type = connector->base.connector_type;
+
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ debugfs_create_file("i915_hdcp_sink_capability", 0444, root,
+ connector, &intel_hdcp_sink_capability_fops);
+ debugfs_create_file("i915_force_hdcp14", 0644, root,
+ connector, &intel_hdcp_force_14_fops);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index d99830cfb798..efe86808e17e 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -19,8 +19,8 @@ struct intel_digital_port;
struct intel_display;
struct intel_encoder;
struct intel_hdcp_shim;
+struct seq_file;
enum port;
-enum transcoder;
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
@@ -33,19 +33,18 @@ void intel_hdcp_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state);
int intel_hdcp_disable(struct intel_connector *connector);
+void intel_hdcp_cancel_works(struct intel_connector *connector);
void intel_hdcp_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool is_hdcp_supported(struct intel_display *display, enum port port);
-bool intel_hdcp_get_capability(struct intel_connector *connector);
-bool intel_hdcp2_get_capability(struct intel_connector *connector);
-void intel_hdcp_get_remote_capability(struct intel_connector *connector,
- bool *hdcp_capable,
- bool *hdcp2_capable);
void intel_hdcp_component_init(struct intel_display *display);
void intel_hdcp_component_fini(struct intel_display *display);
void intel_hdcp_cleanup(struct intel_connector *connector);
void intel_hdcp_handle_cp_irq(struct intel_connector *connector);
+void intel_hdcp_info(struct seq_file *m, struct intel_connector *connector);
+void intel_hdcp_connector_debugfs_add(struct intel_connector *connector);
+
#endif /* __INTEL_HDCP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index ed29dd0ccef0..33b8d5229db0 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -1909,18 +1909,6 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
if (intel_encoder_is_tc(encoder) && clock > 500000 && clock < 532800)
return MODE_CLOCK_RANGE;
- /*
- * SNPS PHYs' MPLLB table-based programming can only handle a fixed
- * set of link rates.
- *
- * FIXME: We will hopefully get an algorithmic way of programming
- * the MPLLB for HDMI in the future.
- */
- if (DISPLAY_VER(display) >= 14)
- return intel_cx0_phy_check_hdmi_link_rate(hdmi, clock);
- else if (IS_DG2(dev_priv))
- return intel_snps_phy_check_hdmi_link_rate(clock);
-
return MODE_OK;
}
@@ -2023,11 +2011,10 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum drm_mode_status status;
int clock = mode->clock;
int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
@@ -2035,7 +2022,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
bool ycbcr_420_only;
enum intel_output_format sink_format;
- status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ status = intel_cpu_transcoder_mode_valid(display, mode);
if (status != MODE_OK)
return status;
@@ -2080,7 +2067,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
return status;
}
- return intel_mode_valid_max_plane_size(dev_priv, mode, 1);
+ return intel_mode_valid_max_plane_size(display, mode, 1);
}
bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
@@ -2373,7 +2360,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
}
if (intel_hdmi_is_ycbcr420(pipe_config)) {
- ret = intel_panel_fitting(pipe_config, conn_state);
+ ret = intel_pfit_compute_config(pipe_config, conn_state);
if (ret)
return ret;
}
@@ -2503,14 +2490,13 @@ static bool
intel_hdmi_set_edid(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct i2c_adapter *ddc = connector->ddc;
intel_wakeref_t wakeref;
const struct drm_edid *drm_edid;
bool connected = false;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);
drm_edid = drm_edid_read_ddc(connector, ddc);
@@ -2533,7 +2519,7 @@ intel_hdmi_set_edid(struct drm_connector *connector)
connected = true;
}
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_GMBUS, wakeref);
cec_notifier_set_phys_addr(intel_hdmi->cec_notifier,
connector->display_info.source_physical_address);
@@ -2546,7 +2532,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
{
struct intel_display *display = to_intel_display(connector->dev);
enum drm_connector_status status = connector_status_disconnected;
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
@@ -2560,7 +2545,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (!intel_display_driver_check_access(display))
return connector->status;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);
if (DISPLAY_VER(display) >= 11 &&
!intel_digital_port_connected(encoder))
@@ -2572,7 +2557,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
status = connector_status_connected;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_GMBUS, wakeref);
if (status != connector_status_connected)
cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 38deaeb302a2..dec2ad7dd8a2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -14,7 +14,6 @@ enum port;
struct drm_connector;
struct drm_connector_state;
struct drm_encoder;
-struct drm_i915_private;
struct intel_connector;
struct intel_crtc_state;
struct intel_digital_port;
@@ -62,4 +61,13 @@ int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
int hdmi_max_slices, int hdmi_throughput);
int intel_hdmi_dsc_get_slice_height(int vactive);
+void hsw_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len);
+void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
+
#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 3adc791d3776..00d7b1ccf190 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -28,8 +28,10 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "intel_connector.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_hotplug_irq.h"
@@ -82,15 +84,13 @@
/**
* intel_hpd_pin_default - return default pin associated with certain port.
- * @dev_priv: private driver data pointer
* @port: the hpd port to get associated pin
*
* It is only valid and used by digital port encoder.
*
* Return pin that is associatade with @port.
*/
-enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
- enum port port)
+enum hpd_pin intel_hpd_pin_default(enum port port)
{
return HPD_PORT_A + port - PORT_A;
}
@@ -732,6 +732,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
display.hotplug.poll_init_work);
+ struct intel_display *display = &dev_priv->display;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
intel_wakeref_t wakeref;
@@ -747,7 +748,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
* and so risk an endless loop of this same sequence.
*/
if (!enabled) {
- wakeref = intel_display_power_get(dev_priv,
+ wakeref = intel_display_power_get(display,
POWER_DOMAIN_DISPLAY_CORE);
drm_WARN_ON(&dev_priv->drm,
READ_ONCE(dev_priv->display.hotplug.poll_enabled));
@@ -789,7 +790,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (!enabled) {
i915_hpd_poll_detect_connectors(dev_priv);
- intel_display_power_put(dev_priv,
+ intel_display_power_put(display,
POWER_DOMAIN_DISPLAY_CORE,
wakeref);
}
@@ -806,7 +807,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
* of the powerwells.
*
* Since this function can get called in contexts where we're already holding
- * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
+ * dev->mode_config.mutex, we do the actual hotplug enabling in a separate
* worker.
*
* Also see: intel_hpd_init() and intel_hpd_poll_disable().
@@ -823,7 +824,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
/*
* We might already be holding dev->mode_config.mutex, so do this in a
- * seperate worker
+ * separate worker
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
@@ -844,7 +845,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* of the powerwells.
*
* Since this function can get called in contexts where we're already holding
- * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
+ * dev->mode_config.mutex, we do the actual hotplug enabling in a separate
* worker.
*
* Also used during driver init to initialize connector->polled
@@ -865,6 +866,20 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
+void intel_hpd_poll_fini(struct drm_i915_private *i915)
+{
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ /* Kill all the work that may have been queued by hpd. */
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ intel_connector_cancel_modeset_retry_work(connector);
+ intel_hdcp_cancel_works(connector);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
void intel_hpd_init_early(struct drm_i915_private *i915)
{
INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work,
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index a17253ddec83..d6986902b054 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -16,6 +16,7 @@ enum port;
void intel_hpd_poll_enable(struct drm_i915_private *dev_priv);
void intel_hpd_poll_disable(struct drm_i915_private *dev_priv);
+void intel_hpd_poll_fini(struct drm_i915_private *i915);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector);
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
@@ -24,8 +25,7 @@ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port);
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_early(struct drm_i915_private *i915);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
-enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
- enum port port);
+enum hpd_pin intel_hpd_pin_default(enum port port);
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_debugfs_register(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 476ac88087e0..2137ac7b882a 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -197,7 +197,7 @@ void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
* @bits: bits to enable
* NOTE: the HPD enable bits are modified both inside and outside
* of an interrupt context. To avoid that read-modify-write cycles
- * interfer, these bits are protected by a spinlock. Since this
+ * interfere, these bits are protected by a spinlock. Since this
* function is usually not called from a context where the lock is
* held already, this function acquires the lock itself. A non-locking
* version is also available.
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index 29705c159119..a10cd3992607 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include "intel_atomic.h"
#include "intel_crtc.h"
@@ -221,7 +222,7 @@ assert_link_limit_change_valid(struct intel_display *display,
* limits in @new_limits if there is a BW limitation.
*
* Returns:
- * - 0 if the confugration is valid
+ * - 0 if the configuration is valid
* - %-EAGAIN, if the configuration is invalid and @new_limits got updated
* with fallback values with which the configuration of all CRTCs
* in @state must be recomputed
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index f11626176fe2..59551c8414c2 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -77,12 +77,12 @@
#include "intel_lpe_audio.h"
#include "intel_pci_config.h"
-#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->display.audio.lpe.platdev != NULL)
+#define HAS_LPE_AUDIO(display) ((display)->audio.lpe.platdev)
static struct platform_device *
-lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
+lpe_audio_platdev_create(struct intel_display *display)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
struct platform_device_info pinfo = {};
struct resource *rsc;
struct platform_device *platdev;
@@ -98,7 +98,8 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
return ERR_PTR(-ENOMEM);
}
- rsc[0].start = rsc[0].end = dev_priv->display.audio.lpe.irq;
+ rsc[0].start = display->audio.lpe.irq;
+ rsc[0].end = display->audio.lpe.irq;
rsc[0].flags = IORESOURCE_IRQ;
rsc[0].name = "hdmi-lpe-audio-irq";
@@ -109,7 +110,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
rsc[1].flags = IORESOURCE_MEM;
rsc[1].name = "hdmi-lpe-audio-mmio";
- pinfo.parent = dev_priv->drm.dev;
+ pinfo.parent = display->drm->dev;
pinfo.name = "hdmi-lpe-audio";
pinfo.id = -1;
pinfo.res = rsc;
@@ -118,8 +119,8 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
pinfo.size_data = sizeof(*pdata);
pinfo.dma_mask = DMA_BIT_MASK(32);
- pdata->num_pipes = INTEL_NUM_PIPES(dev_priv);
- pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */
+ pdata->num_pipes = INTEL_NUM_PIPES(display);
+ pdata->num_ports = display->platform.cherryview ? 3 : 2; /* B,C,D or B,C */
pdata->port[0].pipe = -1;
pdata->port[1].pipe = -1;
pdata->port[2].pipe = -1;
@@ -130,7 +131,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
kfree(pdata);
if (IS_ERR(platdev)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to allocate LPE audio platform device\n");
return platdev;
}
@@ -140,7 +141,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
return platdev;
}
-static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
+static void lpe_audio_platdev_destroy(struct intel_display *display)
{
/* XXX Note that platform_device_register_full() allocates a dma_mask
* and never frees it. We can't free it here as we cannot guarantee
@@ -150,7 +151,7 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
* than us fiddle with its internals.
*/
- platform_device_unregister(dev_priv->display.audio.lpe.platdev);
+ platform_device_unregister(display->audio.lpe.platdev);
}
static void lpe_audio_irq_unmask(struct irq_data *d)
@@ -167,11 +168,12 @@ static struct irq_chip lpe_audio_irqchip = {
.irq_unmask = lpe_audio_irq_unmask,
};
-static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
+static int lpe_audio_irq_init(struct intel_display *display)
{
- int irq = dev_priv->display.audio.lpe.irq;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ int irq = display->audio.lpe.irq;
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
irq_set_chip_and_handler_name(irq,
&lpe_audio_irqchip,
handle_simple_irq,
@@ -180,11 +182,11 @@ static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
return irq_set_chip_data(irq, dev_priv);
}
-static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
+static bool lpe_audio_detect(struct intel_display *display)
{
int lpe_present = false;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
static const struct pci_device_id atom_hdaudio_ids[] = {
/* Baytrail */
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f04)},
@@ -194,7 +196,7 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
};
if (!pci_dev_present(atom_hdaudio_ids)) {
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"HDaudio controller not detected, using LPE audio instead\n");
lpe_present = true;
}
@@ -202,34 +204,34 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
return lpe_present;
}
-static int lpe_audio_setup(struct drm_i915_private *dev_priv)
+static int lpe_audio_setup(struct intel_display *display)
{
int ret;
- dev_priv->display.audio.lpe.irq = irq_alloc_desc(0);
- if (dev_priv->display.audio.lpe.irq < 0) {
- drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
- dev_priv->display.audio.lpe.irq);
- ret = dev_priv->display.audio.lpe.irq;
+ display->audio.lpe.irq = irq_alloc_desc(0);
+ if (display->audio.lpe.irq < 0) {
+ drm_err(display->drm, "Failed to allocate IRQ desc: %d\n",
+ display->audio.lpe.irq);
+ ret = display->audio.lpe.irq;
goto err;
}
- drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->display.audio.lpe.irq);
+ drm_dbg(display->drm, "irq = %d\n", display->audio.lpe.irq);
- ret = lpe_audio_irq_init(dev_priv);
+ ret = lpe_audio_irq_init(display);
if (ret) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to initialize irqchip for lpe audio: %d\n",
ret);
goto err_free_irq;
}
- dev_priv->display.audio.lpe.platdev = lpe_audio_platdev_create(dev_priv);
+ display->audio.lpe.platdev = lpe_audio_platdev_create(display);
- if (IS_ERR(dev_priv->display.audio.lpe.platdev)) {
- ret = PTR_ERR(dev_priv->display.audio.lpe.platdev);
- drm_err(&dev_priv->drm,
+ if (IS_ERR(display->audio.lpe.platdev)) {
+ ret = PTR_ERR(display->audio.lpe.platdev);
+ drm_err(display->drm,
"Failed to create lpe audio platform device: %d\n",
ret);
goto err_free_irq;
@@ -238,54 +240,54 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
/* enable chicken bit; at least this is required for Dell Wyse 3040
* with DP outputs (but only sometimes by some reason!)
*/
- intel_de_write(dev_priv, VLV_AUD_CHICKEN_BIT_REG,
+ intel_de_write(display, VLV_AUD_CHICKEN_BIT_REG,
VLV_CHICKEN_BIT_DBG_ENABLE);
return 0;
err_free_irq:
- irq_free_desc(dev_priv->display.audio.lpe.irq);
+ irq_free_desc(display->audio.lpe.irq);
err:
- dev_priv->display.audio.lpe.irq = -1;
- dev_priv->display.audio.lpe.platdev = NULL;
+ display->audio.lpe.irq = -1;
+ display->audio.lpe.platdev = NULL;
return ret;
}
/**
* intel_lpe_audio_irq_handler() - forwards the LPE audio irq
- * @dev_priv: the i915 drm device private data
+ * @display: display device
*
* the LPE Audio irq is forwarded to the irq handler registered by LPE audio
* driver.
*/
-void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
+void intel_lpe_audio_irq_handler(struct intel_display *display)
{
int ret;
- if (!HAS_LPE_AUDIO(dev_priv))
+ if (!HAS_LPE_AUDIO(display))
return;
- ret = generic_handle_irq(dev_priv->display.audio.lpe.irq);
+ ret = generic_handle_irq(display->audio.lpe.irq);
if (ret)
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"error handling LPE audio irq: %d\n", ret);
}
/**
* intel_lpe_audio_init() - detect and setup the bridge between HDMI LPE Audio
* driver and i915
- * @dev_priv: the i915 drm device private data
+ * @display: display device
*
* Return: 0 if successful. non-zero if detection or
* llocation/initialization fails
*/
-int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
+int intel_lpe_audio_init(struct intel_display *display)
{
int ret = -ENODEV;
- if (lpe_audio_detect(dev_priv)) {
- ret = lpe_audio_setup(dev_priv);
+ if (lpe_audio_detect(display)) {
+ ret = lpe_audio_setup(display);
if (ret < 0)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"failed to setup LPE Audio bridge\n");
}
return ret;
@@ -294,27 +296,27 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
/**
* intel_lpe_audio_teardown() - destroy the bridge between HDMI LPE
* audio driver and i915
- * @dev_priv: the i915 drm device private data
+ * @display: display device
*
* release all the resources for LPE audio <-> i915 bridge.
*/
-void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
+void intel_lpe_audio_teardown(struct intel_display *display)
{
- if (!HAS_LPE_AUDIO(dev_priv))
+ if (!HAS_LPE_AUDIO(display))
return;
- lpe_audio_platdev_destroy(dev_priv);
+ lpe_audio_platdev_destroy(display);
- irq_free_desc(dev_priv->display.audio.lpe.irq);
+ irq_free_desc(display->audio.lpe.irq);
- dev_priv->display.audio.lpe.irq = -1;
- dev_priv->display.audio.lpe.platdev = NULL;
+ display->audio.lpe.irq = -1;
+ display->audio.lpe.platdev = NULL;
}
/**
* intel_lpe_audio_notify() - notify lpe audio event
* audio driver and i915
- * @dev_priv: the i915 drm device private data
+ * @display: display device
* @cpu_transcoder: CPU transcoder
* @port: port
* @eld : ELD data
@@ -323,7 +325,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
*
* Notify lpe audio driver of eld change.
*/
-void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+void intel_lpe_audio_notify(struct intel_display *display,
enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output)
{
@@ -332,15 +334,15 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
struct intel_hdmi_lpe_audio_port_pdata *ppdata;
u32 audio_enable;
- if (!HAS_LPE_AUDIO(dev_priv))
+ if (!HAS_LPE_AUDIO(display))
return;
- pdata = dev_get_platdata(&dev_priv->display.audio.lpe.platdev->dev);
+ pdata = dev_get_platdata(&display->audio.lpe.platdev->dev);
ppdata = &pdata->port[port - PORT_B];
spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
- audio_enable = intel_de_read(dev_priv, VLV_AUD_PORT_EN_DBG(port));
+ audio_enable = intel_de_read(display, VLV_AUD_PORT_EN_DBG(port));
if (eld != NULL) {
memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
@@ -349,7 +351,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
ppdata->dp_output = dp_output;
/* Unmute the amp for both DP and HDMI */
- intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+ intel_de_write(display, VLV_AUD_PORT_EN_DBG(port),
audio_enable & ~VLV_AMP_MUTE);
} else {
memset(ppdata->eld, 0, HDMI_MAX_ELD_BYTES);
@@ -358,12 +360,12 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
ppdata->dp_output = false;
/* Mute the amp for both DP and HDMI */
- intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+ intel_de_write(display, VLV_AUD_PORT_EN_DBG(port),
audio_enable | VLV_AMP_MUTE);
}
if (pdata->notify_audio_lpe)
- pdata->notify_audio_lpe(dev_priv->display.audio.lpe.platdev, port - PORT_B);
+ pdata->notify_audio_lpe(display->audio.lpe.platdev, port - PORT_B);
spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.h b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
index 2c5fcb6e1fd0..5234e11fd662 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
@@ -10,27 +10,27 @@
enum port;
enum transcoder;
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+int intel_lpe_audio_init(struct intel_display *display);
+void intel_lpe_audio_teardown(struct intel_display *display);
+void intel_lpe_audio_irq_handler(struct intel_display *display);
+void intel_lpe_audio_notify(struct intel_display *display,
enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output);
#else
-static inline int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
+static inline int intel_lpe_audio_init(struct intel_display *display)
{
return -ENODEV;
}
-static inline void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
+static inline void intel_lpe_audio_teardown(struct intel_display *display)
{
}
-static inline void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
+static inline void intel_lpe_audio_irq_handler(struct intel_display *display)
{
}
-static inline void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+static inline void intel_lpe_audio_notify(struct intel_display *display,
enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output)
{
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index d75dd17fad32..63c1afa30b05 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -55,6 +55,11 @@
#define LSPCON_PARADE_AVI_IF_KICKOFF (1 << 7)
#define LSPCON_PARADE_AVI_IF_DATA_SIZE 32
+static struct intel_lspcon *enc_to_intel_lspcon(struct intel_encoder *encoder)
+{
+ return &enc_to_dig_port(encoder)->lspcon;
+}
+
static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
{
struct intel_digital_port *dig_port =
@@ -121,8 +126,9 @@ static u32 get_hdr_status_reg(struct intel_lspcon *lspcon)
return DPCD_PARADE_LSPCON_HDR_STATUS;
}
-void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
+bool intel_lspcon_detect_hdr_capability(struct intel_digital_port *dig_port)
{
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
struct intel_display *display = to_intel_display(intel_dp);
u8 hdr_caps;
@@ -138,6 +144,8 @@ void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
drm_dbg_kms(display->drm, "LSPCON capable of HDR\n");
lspcon->hdr_supported = true;
}
+
+ return lspcon->hdr_supported;
}
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
@@ -212,7 +220,8 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
return 0;
}
- err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, ddc, mode);
+ err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, ddc, mode,
+ lspcon_get_mode_settle_timeout(lspcon));
if (err < 0) {
drm_err(display->drm, "LSPCON mode change failed\n");
return err;
@@ -652,12 +661,14 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
return val;
}
-void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
+void intel_lspcon_wait_pcon_mode(struct intel_digital_port *dig_port)
{
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
+
lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
}
-bool lspcon_init(struct intel_digital_port *dig_port)
+bool intel_lspcon_init(struct intel_digital_port *dig_port)
{
struct intel_display *display = to_intel_display(dig_port);
struct intel_dp *intel_dp = &dig_port->dp;
@@ -688,6 +699,13 @@ bool lspcon_init(struct intel_digital_port *dig_port)
return true;
}
+bool intel_lspcon_active(struct intel_digital_port *dig_port)
+{
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
+
+ return lspcon->active;
+}
+
u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
@@ -696,7 +714,7 @@ u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
return dig_port->infoframes_enabled(encoder, pipe_config);
}
-void lspcon_resume(struct intel_digital_port *dig_port)
+void intel_lspcon_resume(struct intel_digital_port *dig_port)
{
struct intel_display *display = to_intel_display(dig_port);
struct intel_lspcon *lspcon = &dig_port->lspcon;
@@ -706,7 +724,7 @@ void lspcon_resume(struct intel_digital_port *dig_port)
return;
if (!lspcon->active) {
- if (!lspcon_init(dig_port)) {
+ if (!intel_lspcon_init(dig_port)) {
drm_err(display->drm, "LSPCON init failed on port %c\n",
port_name(dig_port->base.port));
return;
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index e19e10492b05..41d142a5c440 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
@@ -8,17 +8,20 @@
#include <linux/types.h>
-struct drm_connector;
struct drm_connector_state;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_encoder;
-struct intel_lspcon;
-bool lspcon_init(struct intel_digital_port *dig_port);
-void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon);
-void lspcon_resume(struct intel_digital_port *dig_port);
-void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
+bool intel_lspcon_init(struct intel_digital_port *dig_port);
+bool intel_lspcon_active(struct intel_digital_port *dig_port);
+bool intel_lspcon_detect_hdr_capability(struct intel_digital_port *dig_port);
+void intel_lspcon_resume(struct intel_digital_port *dig_port);
+void intel_lspcon_wait_pcon_mode(struct intel_digital_port *dig_port);
+u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+
+/* digital port infoframes hooks */
void lspcon_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
@@ -33,15 +36,5 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state);
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
-u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config);
-void hsw_write_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- unsigned int type,
- const void *frame, ssize_t len);
-void hsw_read_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- unsigned int type,
- void *frame, ssize_t len);
#endif /* __INTEL_LSPCON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 6ffd55c17445..19f52d1659fa 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -53,6 +53,7 @@
#include "intel_lvds_regs.h"
#include "intel_panel.h"
#include "intel_pfit.h"
+#include "intel_pfit_regs.h"
#include "intel_pps_regs.h"
/* Private structure for the integrated LVDS support */
@@ -102,18 +103,19 @@ bool intel_lvds_port_enabled(struct drm_i915_private *i915,
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_wakeref_t wakeref;
bool ret;
- wakeref = intel_display_power_get_if_enabled(i915, encoder->power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, encoder->power_domain);
if (!wakeref)
return false;
ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe);
- intel_display_power_put(i915, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return ret;
}
@@ -239,6 +241,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -247,10 +250,10 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
u32 temp;
if (HAS_PCH_SPLIT(i915)) {
- assert_fdi_rx_pll_disabled(i915, pipe);
- assert_shared_dpll_disabled(i915, crtc_state->shared_dpll);
+ assert_fdi_rx_pll_disabled(display, pipe);
+ assert_shared_dpll_disabled(display, crtc_state->shared_dpll);
} else {
- assert_pll_disabled(i915, pipe);
+ assert_pll_disabled(display, pipe);
}
intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps);
@@ -390,16 +393,16 @@ static void intel_lvds_shutdown(struct intel_encoder *encoder)
static enum drm_mode_status
intel_lvds_mode_valid(struct drm_connector *_connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
+ struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(connector, mode);
- int max_pixclk = to_i915(connector->base.dev)->display.cdclk.max_dotclk_freq;
+ int max_pixclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
- status = intel_cpu_transcoder_mode_valid(i915, mode);
+ status = intel_cpu_transcoder_mode_valid(display, mode);
if (status != MODE_OK)
return status;
@@ -466,7 +469,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- ret = intel_panel_fitting(crtc_state, conn_state);
+ ret = intel_pfit_compute_config(crtc_state, conn_state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 9a2bea19f17b..312b21b1ab59 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -15,6 +15,7 @@
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_bw.h"
+#include "intel_cmtg.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
@@ -155,12 +156,6 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_bw_state *bw_state =
- to_intel_bw_state(i915->display.bw.obj.state);
- struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(i915->display.cdclk.obj.state);
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
struct intel_pmdemand_state *pmdemand_state =
to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
struct intel_crtc_state *crtc_state =
@@ -176,16 +171,11 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
intel_fbc_disable(crtc);
intel_update_watermarks(i915);
- intel_display_power_put_all_in_set(i915, &crtc->enabled_power_domains);
+ intel_display_power_put_all_in_set(display, &crtc->enabled_power_domains);
- cdclk_state->min_cdclk[pipe] = 0;
- cdclk_state->min_voltage_level[pipe] = 0;
- cdclk_state->active_pipes &= ~BIT(pipe);
-
- dbuf_state->active_pipes &= ~BIT(pipe);
-
- bw_state->data_rate[pipe] = 0;
- bw_state->num_active_planes[pipe] = 0;
+ intel_cdclk_crtc_disable_noatomic(crtc);
+ skl_wm_crtc_disable_noatomic(crtc);
+ intel_bw_crtc_disable_noatomic(crtc);
intel_pmdemand_update_port_clock(display, pmdemand_state, pipe, 0);
}
@@ -453,8 +443,8 @@ static struct intel_connector *intel_encoder_find_connector(struct intel_encoder
static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/*
* We start out with underrun reporting disabled on active
@@ -469,9 +459,9 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state
* No protection against concurrent access is required - at
* worst a fifo underrun happens which also sets this to false.
*/
- intel_init_fifo_underrun_reporting(i915, crtc,
+ intel_init_fifo_underrun_reporting(display, crtc,
!crtc_state->hw.active &&
- !HAS_GMCH(i915));
+ !HAS_GMCH(display));
}
static bool intel_sanitize_crtc(struct intel_crtc *crtc,
@@ -703,10 +693,6 @@ static void readout_plane_state(struct drm_i915_private *i915)
static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
{
struct intel_display *display = &i915->display;
- struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(i915->display.cdclk.obj.state);
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
struct intel_pmdemand_state *pmdemand_state =
to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
enum pipe pipe;
@@ -714,7 +700,6 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- u8 active_pipes = 0;
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
@@ -731,18 +716,12 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
crtc->base.enabled = crtc_state->hw.enable;
crtc->active = crtc_state->hw.active;
- if (crtc_state->hw.active)
- active_pipes |= BIT(crtc->pipe);
-
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
str_enabled_disabled(crtc_state->hw.active));
}
- cdclk_state->active_pipes = active_pipes;
- dbuf_state->active_pipes = active_pipes;
-
readout_plane_state(i915);
for_each_intel_encoder(&i915->drm, encoder) {
@@ -794,7 +773,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
pipe_name(pipe));
}
- intel_dpll_readout_hw_state(i915);
+ intel_dpll_readout_hw_state(display);
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
@@ -838,12 +817,9 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_bw_state *bw_state =
- to_intel_bw_state(i915->display.bw.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
- int min_cdclk = 0;
if (crtc_state->hw.active) {
/*
@@ -892,22 +868,17 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
crtc_state->min_cdclk[plane->id]);
}
- if (crtc_state->hw.active) {
- min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (drm_WARN_ON(&i915->drm, min_cdclk < 0))
- min_cdclk = 0;
- }
-
- cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
- cdclk_state->min_voltage_level[crtc->pipe] =
- crtc_state->min_voltage_level;
-
intel_pmdemand_update_port_clock(display, pmdemand_state, pipe,
crtc_state->port_clock);
-
- intel_bw_crtc_update(bw_state, crtc_state);
}
+ /* TODO move here (or even earlier?) on all platforms */
+ if (DISPLAY_VER(display) >= 9)
+ intel_wm_get_hw_state(i915);
+
+ intel_bw_update_hw_state(display);
+ intel_cdclk_update_hw_state(display);
+
intel_pmdemand_init_pmdemand_params(display, pmdemand_state);
}
@@ -968,7 +939,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
- wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_INIT);
intel_early_display_was(i915);
intel_modeset_readout_hw_state(i915);
@@ -978,6 +949,8 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
intel_pch_sanitize(i915);
+ intel_cmtg_sanitize(display);
+
/*
* intel_sanitize_plane_mapping() may need to do vblank
* waits, so we need vblank interrupts restored beforehand.
@@ -1011,9 +984,12 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
intel_sanitize_all_crtcs(i915, ctx);
- intel_dpll_sanitize_state(i915);
+ intel_dpll_sanitize_state(display);
- intel_wm_get_hw_state(i915);
+ /* TODO move earlier on all platforms */
+ if (DISPLAY_VER(display) < 9)
+ intel_wm_get_hw_state(i915);
+ intel_wm_sanitize(i915);
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
@@ -1025,7 +1001,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
intel_modeset_put_crtc_power_domains(crtc, &put_domains);
}
- intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
intel_power_domains_sanitize_state(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index bc70e72ccc2e..a008412fdd04 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -90,10 +90,11 @@ verify_connector_state(struct intel_atomic_state *state,
static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
if (crtc_state->has_pch_encoder) {
- int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(i915, crtc_state),
+ int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(display, crtc_state),
&crtc_state->fdi_m_n);
int dotclock = crtc_state->hw.adjusted_mode.crtc_clock;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index ca30fff61876..aff9a3455c1b 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -42,11 +42,13 @@
#include "intel_frontbuffer.h"
#include "intel_overlay.h"
#include "intel_pci_config.h"
+#include "intel_pfit_regs.h"
/* Limits for overlay size. According to intel doc, the real limits are:
* Y width: 4095, UV width (planar): 2047, Y height: 2047,
* UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
- * the mininum of both. */
+ * the minimum of both.
+ */
#define IMAGE_MAX_WIDTH 2048
#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
/* on 830 and 845 these large limits result in the card hanging */
@@ -408,10 +410,12 @@ static int intel_overlay_off(struct intel_overlay *overlay)
drm_WARN_ON(display->drm, !overlay->active);
- /* According to intel docs the overlay hw may hang (when switching
+ /*
+ * According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether
* this applies to the disabling of the overlay or to the switching off
- * of the hw. Do it in both cases */
+ * of the hw. Do it in both cases.
+ */
flip_addr |= OFC_UPDATE;
rq = alloc_request(overlay, intel_overlay_off_tail);
@@ -442,16 +446,19 @@ static int intel_overlay_off(struct intel_overlay *overlay)
return i915_active_wait(&overlay->last_flip);
}
-/* recover from an interruption due to a signal
- * We have to be careful not to repeat work forever an make forward progess. */
+/*
+ * Recover from an interruption due to a signal.
+ * We have to be careful not to repeat work forever an make forward progress.
+ */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
return i915_active_wait(&overlay->last_flip);
}
-/* Wait for pending overlay flip and release old frame.
+/*
+ * Wait for pending overlay flip and release old frame.
* Needs to be called before the overlay register are changed
- * via intel_overlay_(un)map_regs
+ * via intel_overlay_(un)map_regs.
*/
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
@@ -772,7 +779,7 @@ static struct i915_vma *intel_overlay_pin_fb(struct drm_i915_gem_object *new_bo)
retry:
ret = i915_gem_object_lock(new_bo, &ww);
if (!ret) {
- vma = i915_gem_object_pin_to_display_plane(new_bo, &ww, 0,
+ vma = i915_gem_object_pin_to_display_plane(new_bo, &ww, 0, 0,
NULL, PIN_MAPPABLE);
ret = PTR_ERR_OR_ZERO(vma);
}
@@ -793,7 +800,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_intel_overlay_put_image *params)
{
struct intel_display *display = overlay->display;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct overlay_registers __iomem *regs = overlay->regs;
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
@@ -808,7 +814,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+ atomic_inc(&display->restore.pending_fb_pin);
vma = intel_overlay_pin_fb(new_bo);
if (IS_ERR(vma)) {
@@ -896,7 +902,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
out_unpin:
i915_vma_unpin(vma);
out_pin_section:
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+ atomic_dec(&display->restore.pending_fb_pin);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h
index 45a42fce754e..d259e4c74b03 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.h
+++ b/drivers/gpu/drm/i915/display/intel_overlay.h
@@ -10,7 +10,6 @@
struct drm_device;
struct drm_file;
-struct drm_i915_private;
struct drm_printer;
struct intel_display;
struct intel_overlay;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 4e6c5592c7ae..f5c972880391 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -32,6 +32,7 @@
#include <linux/pwm.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include "intel_backlight.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 8fa5a6334d10..99f6d6f53fa7 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -45,7 +45,7 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
enum pipe port_pipe;
bool state;
- state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
+ state = g4x_dp_port_enabled(display, dp_reg, port, &port_pipe);
INTEL_DISPLAY_STATE_WARN(display, state && port_pipe == pipe,
"PCH DP %c enabled on transcoder %c, should be disabled\n",
@@ -65,7 +65,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe port_pipe;
bool state;
- state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
+ state = intel_sdvo_port_enabled(display, hdmi_reg, &port_pipe);
INTEL_DISPLAY_STATE_WARN(display, state && port_pipe == pipe,
"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
@@ -181,10 +181,10 @@ static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc,
const struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- intel_set_m_n(dev_priv, m_n,
+ intel_set_m_n(display, m_n,
PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe),
PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe));
}
@@ -192,10 +192,10 @@ static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc,
static void intel_pch_transcoder_set_m2_n2(struct intel_crtc *crtc,
const struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- intel_set_m_n(dev_priv, m_n,
+ intel_set_m_n(display, m_n,
PCH_TRANS_DATA_M2(pipe), PCH_TRANS_DATA_N2(pipe),
PCH_TRANS_LINK_M2(pipe), PCH_TRANS_LINK_N2(pipe));
}
@@ -203,10 +203,10 @@ static void intel_pch_transcoder_set_m2_n2(struct intel_crtc *crtc,
void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- intel_get_m_n(dev_priv, m_n,
+ intel_get_m_n(display, m_n,
PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe),
PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe));
}
@@ -214,10 +214,10 @@ void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc,
void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- intel_get_m_n(dev_priv, m_n,
+ intel_get_m_n(display, m_n,
PCH_TRANS_DATA_M2(pipe), PCH_TRANS_DATA_N2(pipe),
PCH_TRANS_LINK_M2(pipe), PCH_TRANS_LINK_N2(pipe));
}
@@ -249,21 +249,22 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 val, pipeconf_val;
/* Make sure PCH DPLL is enabled */
- assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
+ assert_shared_dpll_enabled(display, crtc_state->shared_dpll);
/* FDI must be feeding us bits for PCH ports */
- assert_fdi_tx_enabled(dev_priv, pipe);
- assert_fdi_rx_enabled(dev_priv, pipe);
+ assert_fdi_tx_enabled(display, pipe);
+ assert_fdi_rx_enabled(display, pipe);
if (HAS_PCH_CPT(dev_priv)) {
reg = TRANS_CHICKEN2(pipe);
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(display, reg);
/*
* Workaround: Set the timing override bit
* before enabling the pch transcoder.
@@ -272,12 +273,12 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
}
reg = PCH_TRANSCONF(pipe);
- val = intel_de_read(dev_priv, reg);
- pipeconf_val = intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe));
+ val = intel_de_read(display, reg);
+ pipeconf_val = intel_de_read(display, TRANSCONF(display, pipe));
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
@@ -307,21 +308,22 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val |= TRANS_INTERLACE_PROGRESSIVE;
}
- intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
- if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
- drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
+ intel_de_write(display, reg, val | TRANS_ENABLE);
+ if (intel_de_wait_for_set(display, reg, TRANS_STATE_ENABLE, 100))
+ drm_err(display->drm, "failed to enable transcoder %c\n",
pipe_name(pipe));
}
static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
/* FDI relies on the transcoder */
- assert_fdi_tx_disabled(dev_priv, pipe);
- assert_fdi_rx_disabled(dev_priv, pipe);
+ assert_fdi_tx_disabled(display, pipe);
+ assert_fdi_rx_disabled(display, pipe);
/* Ports must be off as well */
assert_pch_ports_disabled(dev_priv, pipe);
@@ -383,15 +385,15 @@ void ilk_pch_enable(struct intel_atomic_state *state,
if (HAS_PCH_CPT(dev_priv)) {
u32 sel;
- temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
+ temp = intel_de_read(display, PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
if (crtc_state->shared_dpll ==
- intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
+ intel_get_shared_dpll_by_id(display, DPLL_ID_PCH_PLL_B))
temp |= sel;
else
temp &= ~sel;
- intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
+ intel_de_write(display, PCH_DPLL_SEL, temp);
}
/*
@@ -420,11 +422,12 @@ void ilk_pch_enable(struct intel_atomic_state *state,
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- u32 bpc = (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) >> 5;
+ u32 bpc = (intel_de_read(display, TRANSCONF(display, pipe))
+ & TRANSCONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
enum port port;
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_VSYNC_ACTIVE_HIGH |
TRANS_DP_HSYNC_ACTIVE_HIGH |
@@ -438,10 +441,10 @@ void ilk_pch_enable(struct intel_atomic_state *state,
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
port = intel_get_crtc_new_encoder(state, crtc_state)->port;
- drm_WARN_ON(&dev_priv->drm, port < PORT_B || port > PORT_D);
+ drm_WARN_ON(display->drm, port < PORT_B || port > PORT_D);
temp |= TRANS_DP_PORT_SEL(port);
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
}
ilk_enable_pch_transcoder(crtc_state);
@@ -477,8 +480,7 @@ void ilk_pch_post_disable(struct intel_atomic_state *state,
static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/* read out port_clock from the DPLL */
i9xx_crtc_clock_get(crtc_state);
@@ -489,13 +491,14 @@ static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
* Calculate one based on the FDI configuration.
*/
crtc_state->hw.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, crtc_state),
+ intel_dotclock_calculate(intel_fdi_link_freq(display, crtc_state),
&crtc_state->fdi_m_n);
}
void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum pipe pipe = crtc->pipe;
@@ -503,12 +506,12 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
bool pll_active;
u32 tmp;
- if ((intel_de_read(dev_priv, PCH_TRANSCONF(pipe)) & TRANS_ENABLE) == 0)
+ if ((intel_de_read(display, PCH_TRANSCONF(pipe)) & TRANS_ENABLE) == 0)
return;
crtc_state->has_pch_encoder = true;
- tmp = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
+ tmp = intel_de_read(display, FDI_RX_CTL(pipe));
crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
@@ -522,19 +525,19 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
*/
pll_id = (enum intel_dpll_id) pipe;
} else {
- tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
+ tmp = intel_de_read(display, PCH_DPLL_SEL);
if (tmp & TRANS_DPLLB_SEL(pipe))
pll_id = DPLL_ID_PCH_PLL_B;
else
pll_id = DPLL_ID_PCH_PLL_A;
}
- crtc_state->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+ crtc_state->shared_dpll = intel_get_shared_dpll_by_id(display, pll_id);
pll = crtc_state->shared_dpll;
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
+ pll_active = intel_dpll_get_hw_state(display, pll,
&crtc_state->dpll_hw_state);
- drm_WARN_ON(&dev_priv->drm, !pll_active);
+ drm_WARN_ON(display->drm, !pll_active);
tmp = crtc_state->dpll_hw_state.i9xx.dpll;
crtc_state->pixel_multiplier =
@@ -546,14 +549,15 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val, pipeconf_val;
/* FDI must be feeding us bits for PCH ports */
- assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
- assert_fdi_rx_enabled(dev_priv, PIPE_A);
+ assert_fdi_tx_enabled(display, (enum pipe)cpu_transcoder);
+ assert_fdi_rx_enabled(display, PIPE_A);
val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
/* Workaround: set timing override bit. */
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 71471c1d7dc9..33467de3d115 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -505,7 +505,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
bool using_ssc_source = false;
/* We need to take the global config into account */
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
has_panel = true;
@@ -522,7 +522,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
if (HAS_PCH_IBX(dev_priv)) {
- has_ck505 = dev_priv->display.vbt.display_clock_mode;
+ has_ck505 = display->vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
has_ck505 = false;
@@ -530,10 +530,10 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
/* Check if any DPLLs are using the SSC source */
- for_each_shared_dpll(dev_priv, pll, i) {
+ for_each_shared_dpll(display, pll, i) {
u32 temp;
- temp = intel_de_read(dev_priv, PCH_DPLL(pll->info->id));
+ temp = intel_de_read(display, PCH_DPLL(pll->info->id));
if (!(temp & DPLL_VCO_ENABLE))
continue;
@@ -545,7 +545,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
has_panel, has_lvds, has_ck505, using_ssc_source);
@@ -554,7 +554,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
- val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
+ val = intel_de_read(display, PCH_DREF_CONTROL);
/* As we must carefully and slowly disable/enable each source in turn,
* compute the final state we want first and check if we need to
@@ -614,8 +614,8 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
/* Get SSC going before enabling the outputs */
- intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
- intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
+ intel_de_write(display, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(display, PCH_DREF_CONTROL);
udelay(200);
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
@@ -633,23 +633,23 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
}
- intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
- intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
+ intel_de_write(display, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(display, PCH_DREF_CONTROL);
udelay(200);
} else {
- drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
+ drm_dbg_kms(display->drm, "Disabling CPU source output\n");
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
/* Turn off CPU output */
val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
- intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
+ intel_de_write(display, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(display, PCH_DREF_CONTROL);
udelay(200);
if (!using_ssc_source) {
- drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
+ drm_dbg_kms(display->drm, "Disabling SSC source\n");
/* Turn off the SSC source */
val &= ~DREF_SSC_SOURCE_MASK;
@@ -658,13 +658,13 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Turn off SSC1 */
val &= ~DREF_SSC1_ENABLE;
- intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
- intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
+ intel_de_write(display, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(display, PCH_DREF_CONTROL);
udelay(200);
}
}
- drm_WARN_ON(&dev_priv->drm, val != final);
+ drm_WARN_ON(display->drm, val != final);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c
index 4ee03d9d14ad..3c3ecf288570 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.c
+++ b/drivers/gpu/drm/i915/display/intel_pfit.c
@@ -3,13 +3,17 @@
* Copyright © 2024 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
+#include "intel_de.h"
#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_lvds_regs.h"
#include "intel_pfit.h"
+#include "intel_pfit_regs.h"
static int intel_pch_pfit_check_dst_window(const struct intel_crtc_state *crtc_state)
{
@@ -542,8 +546,8 @@ out:
return intel_gmch_pfit_check_timings(crtc_state);
}
-int intel_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+int intel_pfit_compute_config(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -552,3 +556,165 @@ int intel_panel_fitting(struct intel_crtc_state *crtc_state,
else
return pch_panel_fitting(crtc_state, conn_state);
}
+
+void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
+ enum pipe pipe = crtc->pipe;
+ int width = drm_rect_width(dst);
+ int height = drm_rect_height(dst);
+ int x = dst->x1;
+ int y = dst->y1;
+
+ if (!crtc_state->pch_pfit.enabled)
+ return;
+
+ /*
+ * Force use of hard-coded filter coefficients as some pre-programmed
+ * values are broken, e.g. x201.
+ */
+ if (display->platform.ivybridge || display->platform.haswell)
+ intel_de_write_fw(display, PF_CTL(pipe), PF_ENABLE |
+ PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
+ else
+ intel_de_write_fw(display, PF_CTL(pipe), PF_ENABLE |
+ PF_FILTER_MED_3x3);
+ intel_de_write_fw(display, PF_WIN_POS(pipe),
+ PF_WIN_XPOS(x) | PF_WIN_YPOS(y));
+ intel_de_write_fw(display, PF_WIN_SZ(pipe),
+ PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height));
+}
+
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_display *display = to_intel_display(old_crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+
+ /*
+ * To avoid upsetting the power well on haswell only disable the pfit if
+ * it's in use. The hw state code will make sure we get this right.
+ */
+ if (!old_crtc_state->pch_pfit.enabled)
+ return;
+
+ intel_de_write_fw(display, PF_CTL(pipe), 0);
+ intel_de_write_fw(display, PF_WIN_POS(pipe), 0);
+ intel_de_write_fw(display, PF_WIN_SZ(pipe), 0);
+}
+
+void ilk_pfit_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 ctl, pos, size;
+ enum pipe pipe;
+
+ ctl = intel_de_read(display, PF_CTL(crtc->pipe));
+ if ((ctl & PF_ENABLE) == 0)
+ return;
+
+ if (display->platform.ivybridge || display->platform.haswell)
+ pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl);
+ else
+ pipe = crtc->pipe;
+
+ crtc_state->pch_pfit.enabled = true;
+
+ pos = intel_de_read(display, PF_WIN_POS(crtc->pipe));
+ size = intel_de_read(display, PF_WIN_SZ(crtc->pipe));
+
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ REG_FIELD_GET(PF_WIN_XPOS_MASK, pos),
+ REG_FIELD_GET(PF_WIN_YPOS_MASK, pos),
+ REG_FIELD_GET(PF_WIN_XSIZE_MASK, size),
+ REG_FIELD_GET(PF_WIN_YSIZE_MASK, size));
+
+ /*
+ * We currently do not free assignments of panel fitters on
+ * ivb/hsw (since we don't use the higher upscaling modes which
+ * differentiates them) so just WARN about this case for now.
+ */
+ drm_WARN_ON(display->drm, pipe != crtc->pipe);
+}
+
+void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (!crtc_state->gmch_pfit.control)
+ return;
+
+ /*
+ * The panel fitter should only be adjusted whilst the pipe is disabled,
+ * according to register description and PRM.
+ */
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, PFIT_CONTROL(display)) & PFIT_ENABLE);
+ assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
+
+ intel_de_write(display, PFIT_PGM_RATIOS(display),
+ crtc_state->gmch_pfit.pgm_ratios);
+ intel_de_write(display, PFIT_CONTROL(display),
+ crtc_state->gmch_pfit.control);
+
+ /*
+ * Border color in case we don't scale up to the full screen. Black by
+ * default, change to something else for debugging.
+ */
+ intel_de_write(display, BCLRPAT(display, crtc->pipe), 0);
+}
+
+void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_display *display = to_intel_display(old_crtc_state);
+
+ if (!old_crtc_state->gmch_pfit.control)
+ return;
+
+ assert_transcoder_disabled(display, old_crtc_state->cpu_transcoder);
+
+ drm_dbg_kms(display->drm, "disabling pfit, current: 0x%08x\n",
+ intel_de_read(display, PFIT_CONTROL(display)));
+ intel_de_write(display, PFIT_CONTROL(display), 0);
+}
+
+static bool i9xx_has_pfit(struct intel_display *display)
+{
+ if (display->platform.i830)
+ return false;
+
+ return DISPLAY_VER(display) >= 4 ||
+ display->platform.pineview || display->platform.mobile;
+}
+
+void i9xx_pfit_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe;
+ u32 tmp;
+
+ if (!i9xx_has_pfit(display))
+ return;
+
+ tmp = intel_de_read(display, PFIT_CONTROL(display));
+ if (!(tmp & PFIT_ENABLE))
+ return;
+
+ /* Check whether the pfit is attached to our pipe. */
+ if (DISPLAY_VER(display) >= 4)
+ pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp);
+ else
+ pipe = PIPE_B;
+
+ if (pipe != crtc->pipe)
+ return;
+
+ crtc_state->gmch_pfit.control = tmp;
+ crtc_state->gmch_pfit.pgm_ratios =
+ intel_de_read(display, PFIT_PGM_RATIOS(display));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.h b/drivers/gpu/drm/i915/display/intel_pfit.h
index add8d78de2c9..ef34f9b49d09 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.h
+++ b/drivers/gpu/drm/i915/display/intel_pfit.h
@@ -9,7 +9,13 @@
struct drm_connector_state;
struct intel_crtc_state;
-int intel_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
+int intel_pfit_compute_config(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
+void ilk_pfit_get_config(struct intel_crtc_state *crtc_state);
+void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state);
+void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state);
+void i9xx_pfit_get_config(struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PFIT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pfit_regs.h b/drivers/gpu/drm/i915/display/intel_pfit_regs.h
new file mode 100644
index 000000000000..add8ce28004e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pfit_regs.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_PFIT_REGS_H__
+#define __INTEL_PFIT_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* Panel fitting */
+#define PFIT_CONTROL(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
+#define PFIT_ENABLE REG_BIT(31)
+#define PFIT_PIPE_MASK REG_GENMASK(30, 29) /* 965+ */
+#define PFIT_PIPE(pipe) REG_FIELD_PREP(PFIT_PIPE_MASK, (pipe))
+#define PFIT_SCALING_MASK REG_GENMASK(28, 26) /* 965+ */
+#define PFIT_SCALING_AUTO REG_FIELD_PREP(PFIT_SCALING_MASK, 0)
+#define PFIT_SCALING_PROGRAMMED REG_FIELD_PREP(PFIT_SCALING_MASK, 1)
+#define PFIT_SCALING_PILLAR REG_FIELD_PREP(PFIT_SCALING_MASK, 2)
+#define PFIT_SCALING_LETTER REG_FIELD_PREP(PFIT_SCALING_MASK, 3)
+#define PFIT_FILTER_MASK REG_GENMASK(25, 24) /* 965+ */
+#define PFIT_FILTER_FUZZY REG_FIELD_PREP(PFIT_FILTER_MASK, 0)
+#define PFIT_FILTER_CRISP REG_FIELD_PREP(PFIT_FILTER_MASK, 1)
+#define PFIT_FILTER_MEDIAN REG_FIELD_PREP(PFIT_FILTER_MASK, 2)
+#define PFIT_VERT_INTERP_MASK REG_GENMASK(11, 10) /* pre-965 */
+#define PFIT_VERT_INTERP_BILINEAR REG_FIELD_PREP(PFIT_VERT_INTERP_MASK, 1)
+#define PFIT_VERT_AUTO_SCALE REG_BIT(9) /* pre-965 */
+#define PFIT_HORIZ_INTERP_MASK REG_GENMASK(7, 6) /* pre-965 */
+#define PFIT_HORIZ_INTERP_BILINEAR REG_FIELD_PREP(PFIT_HORIZ_INTERP_MASK, 1)
+#define PFIT_HORIZ_AUTO_SCALE REG_BIT(5) /* pre-965 */
+#define PFIT_PANEL_8TO6_DITHER_ENABLE REG_BIT(3) /* pre-965 */
+
+#define PFIT_PGM_RATIOS(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234)
+#define PFIT_VERT_SCALE_MASK REG_GENMASK(31, 20) /* pre-965 */
+#define PFIT_VERT_SCALE(x) REG_FIELD_PREP(PFIT_VERT_SCALE_MASK, (x))
+#define PFIT_HORIZ_SCALE_MASK REG_GENMASK(15, 4) /* pre-965 */
+#define PFIT_HORIZ_SCALE(x) REG_FIELD_PREP(PFIT_HORIZ_SCALE_MASK, (x))
+#define PFIT_VERT_SCALE_MASK_965 REG_GENMASK(28, 16) /* 965+ */
+#define PFIT_HORIZ_SCALE_MASK_965 REG_GENMASK(12, 0) /* 965+ */
+
+#define PFIT_AUTO_RATIOS(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
+
+/* CPU panel fitter */
+/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
+#define _PFA_CTL_1 0x68080
+#define _PFB_CTL_1 0x68880
+#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
+#define PF_ENABLE REG_BIT(31)
+#define PF_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29) /* ivb/hsw */
+#define PF_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(PF_PIPE_SEL_MASK_IVB, (pipe))
+#define PF_FILTER_MASK REG_GENMASK(24, 23)
+#define PF_FILTER_PROGRAMMED REG_FIELD_PREP(PF_FILTER_MASK, 0)
+#define PF_FILTER_MED_3x3 REG_FIELD_PREP(PF_FILTER_MASK, 1)
+#define PF_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 2)
+#define PF_FILTER_EDGE_SOFTEN REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 3)
+
+#define _PFA_WIN_SZ 0x68074
+#define _PFB_WIN_SZ 0x68874
+#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
+#define PF_WIN_XSIZE_MASK REG_GENMASK(31, 16)
+#define PF_WIN_XSIZE(w) REG_FIELD_PREP(PF_WIN_XSIZE_MASK, (w))
+#define PF_WIN_YSIZE_MASK REG_GENMASK(15, 0)
+#define PF_WIN_YSIZE(h) REG_FIELD_PREP(PF_WIN_YSIZE_MASK, (h))
+
+#define _PFA_WIN_POS 0x68070
+#define _PFB_WIN_POS 0x68870
+#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
+#define PF_WIN_XPOS_MASK REG_GENMASK(31, 16)
+#define PF_WIN_XPOS(x) REG_FIELD_PREP(PF_WIN_XPOS_MASK, (x))
+#define PF_WIN_YPOS_MASK REG_GENMASK(15, 0)
+#define PF_WIN_YPOS(y) REG_FIELD_PREP(PF_WIN_YPOS_MASK, (y))
+
+#define _PFA_VSCALE 0x68084
+#define _PFB_VSCALE 0x68884
+#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
+
+#define _PFA_HSCALE 0x68090
+#define _PFB_HSCALE 0x68890
+#define PF_HSCALE(pipe) _MMIO_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
+
+#endif /* __INTEL_PFIT_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 90efc6f64e52..10e26c3db946 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -582,6 +582,7 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum intel_display_power_domain power_domain;
@@ -598,7 +599,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
}
power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref) {
drm_dbg_kms(&dev_priv->drm,
"Trying to capture CRC while pipe is off\n");
@@ -628,7 +629,7 @@ out:
if (!enable)
intel_crtc_crc_setup_workarounds(crtc, false);
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.h b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
index 43012b189415..6ddcea38488b 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.h
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
@@ -9,7 +9,6 @@
#include <linux/types.h>
struct drm_crtc;
-struct drm_i915_private;
struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index 6789b7f14095..b1675b46e06c 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -14,6 +14,11 @@
#include "intel_frontbuffer.h"
#include "intel_plane_initial.h"
+void intel_plane_initial_vblank_wait(struct intel_crtc *crtc)
+{
+ intel_crtc_wait_for_next_vblank(crtc);
+}
+
static bool
intel_reuse_initial_plane_obj(struct intel_crtc *this,
const struct intel_initial_plane_config plane_configs[],
@@ -442,7 +447,7 @@ void intel_initial_plane_config(struct intel_display *display)
intel_find_initial_plane_obj(crtc, plane_configs);
if (display->funcs.display->fixup_initial_plane_config(crtc, plane_config))
- intel_crtc_wait_for_next_vblank(crtc);
+ intel_plane_initial_vblank_wait(crtc);
plane_config_fini(plane_config);
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.h b/drivers/gpu/drm/i915/display/intel_plane_initial.h
index 6c6aa717ed21..5c315acda210 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.h
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.h
@@ -6,8 +6,10 @@
#ifndef __INTEL_PLANE_INITIAL_H__
#define __INTEL_PLANE_INITIAL_H__
+struct intel_crtc;
struct intel_display;
void intel_initial_plane_config(struct intel_display *display);
+void intel_plane_initial_vblank_wait(struct intel_crtc *crtc);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 975520322136..63301a01906c 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -609,7 +609,7 @@ intel_pmdemand_program_params(struct intel_display *display,
goto unlock;
drm_dbg_kms(display->drm,
- "initate pmdemand request values: (0x%x 0x%x)\n",
+ "initiate pmdemand request values: (0x%x 0x%x)\n",
mod_reg1, mod_reg2);
intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index eb35f0249f2b..617ce4993172 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -65,13 +65,12 @@ static const char *pps_name(struct intel_dp *intel_dp)
intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
intel_wakeref_t wakeref;
/*
* See vlv_pps_reset_all() why we need a power domain reference here.
*/
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE);
mutex_lock(&display->pps.mutex);
return wakeref;
@@ -81,10 +80,9 @@ intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
intel_wakeref_t wakeref)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
mutex_unlock(&display->pps.mutex);
- intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return NULL;
}
@@ -136,7 +134,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
release_cl_override = display->platform.cherryview &&
!chv_phy_powergate_ch(display, phy, ch, true);
- if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
+ if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(display))) {
drm_err(display->drm,
"Failed to force on PLL for pipe %c!\n",
pipe_name(pipe));
@@ -741,7 +739,6 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -759,7 +756,7 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
return need_to_disable;
drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
- intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+ intel_dp->pps.vdd_wakeref = intel_display_power_get(display,
intel_aux_power_domain(dig_port));
pp_stat_reg = _pp_stat_reg(intel_dp);
@@ -825,7 +822,6 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp)
static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -863,7 +859,7 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
intel_dp_invalidate_source_oui(intel_dp);
}
- intel_display_power_put(dev_priv,
+ intel_display_power_put(display,
intel_aux_power_domain(dig_port),
fetch_and_zero(&intel_dp->pps.vdd_wakeref));
}
@@ -1036,7 +1032,6 @@ void intel_pps_on(struct intel_dp *intel_dp)
void intel_pps_off_unlocked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -1074,7 +1069,7 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp)
intel_dp_invalidate_source_oui(intel_dp);
/* We got a reference when we enabled the VDD. */
- intel_display_power_put(dev_priv,
+ intel_display_power_put(display,
intel_aux_power_domain(dig_port),
fetch_and_zero(&intel_dp->pps.vdd_wakeref));
}
@@ -1230,11 +1225,10 @@ static void vlv_steal_power_sequencer(struct intel_display *display,
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum pipe pipe;
- if (g4x_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ if (g4x_dp_port_enabled(display, intel_dp->output_reg,
encoder->port, &pipe))
return pipe;
@@ -1338,7 +1332,6 @@ void vlv_pps_port_disable(struct intel_encoder *encoder,
static void pps_vdd_init(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
lockdep_assert_held(&display->pps.mutex);
@@ -1357,7 +1350,7 @@ static void pps_vdd_init(struct intel_dp *intel_dp)
dig_port->base.base.base.id, dig_port->base.base.name,
pps_name(intel_dp));
drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
- intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+ intel_dp->pps.vdd_wakeref = intel_display_power_get(display,
intel_aux_power_domain(dig_port));
}
@@ -1501,8 +1494,9 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
if (!pps_delays_valid(vbt))
return;
- /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
- * of 500ms appears to be too short. Ocassionally the panel
+ /*
+ * On Toshiba Satellite P50-C-18C system the VBT T12 delay
+ * of 500ms appears to be too short. Occasionally the panel
* just fails to power back on. Increasing the delay to 800ms
* seems sufficient to avoid this problem.
*/
@@ -1864,13 +1858,13 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPA:
- g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
+ g4x_dp_port_enabled(display, DP_A, PORT_A, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPC:
- g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
+ g4x_dp_port_enabled(display, PCH_DP_C, PORT_C, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPD:
- g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
+ g4x_dp_port_enabled(display, PCH_DP_D, PORT_D, &panel_pipe);
break;
default:
MISSING_CASE(port_sel);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 0b021acb330f..4e938bad808c 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -44,6 +44,7 @@
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_snps_phy.h"
+#include "intel_vblank.h"
#include "skl_universal_plane.h"
/**
@@ -154,7 +155,7 @@
*
* Unfortunately CHICKEN_TRANS itself seems to be double buffered
* and thus won't latch until the first vblank. So with DC states
- * enabled the register effctively uses the reset value during DC5
+ * enabled the register effectively uses the reset value during DC5
* exit+PSR exit sequence, and thus the bit does nothing until
* latched by the vblank that it was trying to prevent from being
* generated in the first place. So we should probably call this
@@ -171,7 +172,7 @@
* CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
*
* On BDW without this bit is no vblanks whatsoever are
- * generated after PSR exit. On HSW this has no apparant effect.
+ * generated after PSR exit. On HSW this has no apparent effect.
* WaPsrDPRSUnmaskVBlankInSRD says to set this.
*
* The rest of the bits are more self-explanatory and/or
@@ -185,7 +186,7 @@
* has_psr + has_panel_replay: Panel Replay
* has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update
*
- * Description of some intel_psr varibles. enabled, panel_replay_enabled,
+ * Description of some intel_psr variables. enabled, panel_replay_enabled,
* sel_update_enabled
*
* enabled (alone): PSR1
@@ -814,8 +815,8 @@ static void intel_psr_enable_sink_alpm(struct intel_dp *intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
}
-void intel_psr_enable_sink(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void intel_psr_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
intel_psr_enable_sink_alpm(intel_dp, crtc_state);
@@ -827,6 +828,13 @@ void intel_psr_enable_sink(struct intel_dp *intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
+void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
+{
+ if (CAN_PANEL_REPLAY(intel_dp))
+ drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
+ DP_PANEL_REPLAY_ENABLE);
+}
+
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -1043,7 +1051,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
};
/*
* Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
- * comments bellow for more information
+ * comments below for more information
*/
int tmp;
@@ -1991,18 +1999,25 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
if (!psr_interrupt_error_check(intel_dp))
return;
- if (intel_dp->psr.panel_replay_enabled) {
+ if (intel_dp->psr.panel_replay_enabled)
drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
- } else {
+ else
drm_dbg_kms(display->drm, "Enabling PSR%s\n",
intel_dp->psr.sel_update_enabled ? "2" : "1");
- /*
- * Panel replay has to be enabled before link training: doing it
- * only for PSR here.
- */
- intel_psr_enable_sink(intel_dp, crtc_state);
- }
+ /*
+ * Enabling here only for PSR. Panel Replay enable bit is already
+ * written at this point. See
+ * intel_psr_panel_replay_enable_sink. Modifiers/options:
+ * - Selective Update
+ * - Region Early Transport
+ * - Selective Update Region Scanline Capture
+ * - VSC_SDP_CRC
+ * - HPD on different Errors
+ * - CRC verification
+ * are written for PSR and Panel Replay here.
+ */
+ intel_psr_enable_sink(intel_dp, crtc_state);
if (intel_dp_is_edp(intel_dp))
intel_snps_phy_update_psr_power_state(&dig_port->base, true);
@@ -2172,7 +2187,8 @@ void intel_psr_disable(struct intel_dp *intel_dp,
if (!old_crtc_state->has_psr)
return;
- if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp)))
+ if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) &&
+ !CAN_PANEL_REPLAY(intel_dp)))
return;
mutex_lock(&intel_dp->psr.lock);
@@ -2275,6 +2291,27 @@ bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
return false;
}
+/**
+ * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event
+ * @dsb: DSB context
+ * @state: the atomic state
+ * @crtc: the CRTC
+ *
+ * Generate PSR "Frame Change" event.
+ */
+void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
+ struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
+ struct intel_display *display = to_intel_display(crtc);
+
+ if (crtc_state->has_psr)
+ intel_de_write_dsb(display, dsb,
+ CURSURFLIVE(display, crtc->pipe), 0);
+}
+
static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
@@ -2310,18 +2347,9 @@ static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
}
-static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
+static void intel_psr_force_update(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
-
- if (intel_dp->psr.psr2_sel_fetch_enabled)
- intel_de_write(display,
- PSR2_MAN_TRK_CTL(display, cpu_transcoder),
- man_trk_ctl_enable_bit_get(display) |
- man_trk_ctl_partial_frame_bit_get(display) |
- man_trk_ctl_single_full_frame_bit_get(display) |
- man_trk_ctl_continuos_full_frame(display));
/*
* Display WA #0884: skl+
@@ -2339,7 +2367,8 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
}
-void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
+void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -2353,20 +2382,23 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- lockdep_assert_held(&intel_dp->psr.lock);
- if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
+ if (!dsb)
+ lockdep_assert_held(&intel_dp->psr.lock);
+
+ if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled)
return;
break;
}
- intel_de_write(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder),
- crtc_state->psr2_man_track_ctl);
+ intel_de_write_dsb(display, dsb,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder),
+ crtc_state->psr2_man_track_ctl);
if (!crtc_state->enable_psr2_su_region_et)
return;
- intel_de_write(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
- crtc_state->pipe_srcsz_early_tpt);
+ intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
+ crtc_state->pipe_srcsz_early_tpt);
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
@@ -2381,7 +2413,6 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
val |= man_trk_ctl_partial_frame_bit_get(display);
if (full_update) {
- val |= man_trk_ctl_single_full_frame_bit_get(display);
val |= man_trk_ctl_continuos_full_frame(display);
goto exit;
}
@@ -2790,32 +2821,30 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
old_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_psr *psr = &intel_dp->psr;
- bool needs_to_disable = false;
mutex_lock(&psr->lock);
- /*
- * Reasons to disable:
- * - PSR disabled in new state
- * - All planes will go inactive
- * - Changing between PSR versions
- * - Region Early Transport changing
- * - Display WA #1136: skl, bxt
- */
- needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
- needs_to_disable |= !new_crtc_state->has_psr;
- needs_to_disable |= !new_crtc_state->active_planes;
- needs_to_disable |= new_crtc_state->has_sel_update != psr->sel_update_enabled;
- needs_to_disable |= new_crtc_state->enable_psr2_su_region_et !=
- psr->su_region_et_enabled;
- needs_to_disable |= DISPLAY_VER(i915) < 11 &&
- new_crtc_state->wm_level_disabled;
-
- if (psr->enabled && needs_to_disable)
- intel_psr_disable_locked(intel_dp);
- else if (psr->enabled && new_crtc_state->wm_level_disabled)
- /* Wa_14015648006 */
- wm_optimization_wa(intel_dp, new_crtc_state);
+ if (psr->enabled) {
+ /*
+ * Reasons to disable:
+ * - PSR disabled in new state
+ * - All planes will go inactive
+ * - Changing between PSR versions
+ * - Region Early Transport changing
+ * - Display WA #1136: skl, bxt
+ */
+ if (intel_crtc_needs_modeset(new_crtc_state) ||
+ !new_crtc_state->has_psr ||
+ !new_crtc_state->active_planes ||
+ new_crtc_state->has_sel_update != psr->sel_update_enabled ||
+ new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
+ new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
+ (DISPLAY_VER(i915) < 11 && new_crtc_state->wm_level_disabled))
+ intel_psr_disable_locked(intel_dp);
+ else if (new_crtc_state->wm_level_disabled)
+ /* Wa_14015648006 */
+ wm_optimization_wa(intel_dp, new_crtc_state);
+ }
mutex_unlock(&psr->lock);
}
@@ -2858,7 +2887,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
if (crtc_state->crc_enabled && psr->enabled)
- psr_force_hw_tracking_exit(intel_dp);
+ intel_psr_force_update(intel_dp);
/*
* Clear possible busy bits in case we have
@@ -3120,31 +3149,35 @@ unlock:
mutex_unlock(&intel_dp->psr.lock);
}
-static void _psr_invalidate_handle(struct intel_dp *intel_dp)
+static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- if (intel_dp->psr.psr2_sel_fetch_enabled) {
- u32 val;
-
- if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
- /* Send one update otherwise lag is observed in screen */
- intel_de_write(display,
- CURSURFLIVE(display, intel_dp->psr.pipe),
- 0);
- return;
- }
+ if (!intel_dp->psr.psr2_sel_fetch_enabled)
+ return;
- val = man_trk_ctl_enable_bit_get(display) |
- man_trk_ctl_partial_frame_bit_get(display) |
- man_trk_ctl_continuos_full_frame(display);
+ if (DISPLAY_VER(display) >= 20)
+ intel_de_write(display, LNL_SFF_CTL(cpu_transcoder),
+ LNL_SFF_CTL_SF_SINGLE_FULL_FRAME);
+ else
intel_de_write(display,
PSR2_MAN_TRK_CTL(display, cpu_transcoder),
- val);
- intel_de_write(display,
- CURSURFLIVE(display, intel_dp->psr.pipe), 0);
- intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
+ man_trk_ctl_enable_bit_get(display) |
+ man_trk_ctl_partial_frame_bit_get(display) |
+ man_trk_ctl_single_full_frame_bit_get(display) |
+ man_trk_ctl_continuos_full_frame(display));
+}
+
+static void _psr_invalidate_handle(struct intel_dp *intel_dp)
+{
+ if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
+ intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
+ intel_psr_configure_full_frame_update(intel_dp);
+ }
+
+ intel_psr_force_update(intel_dp);
} else {
intel_psr_exit(intel_dp);
}
@@ -3225,44 +3258,31 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
struct drm_i915_private *dev_priv = to_i915(display->drm);
- enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled) {
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* can we turn CFF off? */
- if (intel_dp->psr.busy_frontbuffer_bits == 0) {
- u32 val = man_trk_ctl_enable_bit_get(display) |
- man_trk_ctl_partial_frame_bit_get(display) |
- man_trk_ctl_single_full_frame_bit_get(display) |
- man_trk_ctl_continuos_full_frame(display);
-
- /*
- * Set psr2_sel_fetch_cff_enabled as false to allow selective
- * updates. Still keep cff bit enabled as we don't have proper
- * SU configuration in case update is sent for any reason after
- * sff bit gets cleared by the HW on next vblank.
- */
- intel_de_write(display,
- PSR2_MAN_TRK_CTL(display, cpu_transcoder),
- val);
- intel_de_write(display,
- CURSURFLIVE(display, intel_dp->psr.pipe),
- 0);
+ if (intel_dp->psr.busy_frontbuffer_bits == 0)
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
- }
- } else {
- /*
- * continuous full frame is disabled, only a single full
- * frame is required
- */
- psr_force_hw_tracking_exit(intel_dp);
}
- } else {
- psr_force_hw_tracking_exit(intel_dp);
- if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
- queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
+ /*
+ * Still keep cff bit enabled as we don't have proper SU
+ * configuration in case update is sent for any reason after
+ * sff bit gets cleared by the HW on next vblank.
+ *
+ * NOTE: Setting cff bit is not needed for LunarLake onwards as
+ * we have own register for SFF bit and we are not overwriting
+ * existing SU configuration
+ */
+ intel_psr_configure_full_frame_update(intel_dp);
}
+
+ intel_psr_force_update(intel_dp);
+
+ if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active &&
+ !intel_dp->psr.busy_frontbuffer_bits)
+ queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 956be263c09e..a43a374cff55 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -17,6 +17,7 @@ struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
struct intel_dp;
+struct intel_dsb;
struct intel_encoder;
struct intel_plane;
struct intel_plane_state;
@@ -28,8 +29,7 @@ bool intel_encoder_can_psr(struct intel_encoder *encoder);
bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
-void intel_psr_enable_sink(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
+void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp);
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr_post_plane_update(struct intel_atomic_state *state,
@@ -55,7 +55,8 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat
bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
-void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
+void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state);
@@ -63,6 +64,9 @@ bool intel_psr_link_ok(struct intel_dp *intel_dp);
void intel_psr_lock(const struct intel_crtc_state *crtc_state);
void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
+void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
+ struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
void intel_psr_debugfs_register(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index 9ad7611506e8..795e6b9cc575 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -251,6 +251,16 @@
#define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14)
#define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13)
+#define _LNL_SFF_CTL_A 0x60918
+#define _LNL_SFF_CTL_B 0x61918
+#define LNL_SFF_CTL(tran) _MMIO_TRANS(tran, _LNL_SFF_CTL_A, _LNL_SFF_CTL_B)
+#define LNL_SFF_CTL_SF_SINGLE_FULL_FRAME REG_BIT(1)
+
+#define _LNL_CFF_CTL_A 0x6091c
+#define _LNL_CFF_CTL_B 0x6191c
+#define LNL_CFF_CTL(tran) _MMIO_TRANS(tran, _LNL_CFF_CTL_A, _LNL_CFF_CTL_B)
+#define LNL_CFF_CTL_SF_CONTINUOUS_FULL_FRAME REG_BIT(1)
+
/* PSR2 Early transport */
#define _PIPE_SRCSZ_ERLY_TPT_A 0x70074
#define _PIPE_SRCSZ_ERLY_TPT_B 0x71074
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 8b30e9fd936e..a32fae510ed2 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -5,6 +5,8 @@
#include <linux/dmi.h>
+#include <drm/drm_print.h>
+
#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_quirks.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 498b35ec4e0f..6e2d9929b4d7 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -213,29 +213,29 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
*/
static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
{
- struct drm_device *dev = intel_sdvo->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bval = val, cval = val;
int i;
if (HAS_PCH_SPLIT(dev_priv)) {
- intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val);
- intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg);
+ intel_de_write(display, intel_sdvo->sdvo_reg, val);
+ intel_de_posting_read(display, intel_sdvo->sdvo_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
if (HAS_PCH_IBX(dev_priv)) {
- intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val);
- intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg);
+ intel_de_write(display, intel_sdvo->sdvo_reg, val);
+ intel_de_posting_read(display, intel_sdvo->sdvo_reg);
}
return;
}
if (intel_sdvo->base.port == PORT_B)
- cval = intel_de_read(dev_priv, GEN3_SDVOC);
+ cval = intel_de_read(display, GEN3_SDVOC);
else
- bval = intel_de_read(dev_priv, GEN3_SDVOB);
+ bval = intel_de_read(display, GEN3_SDVOB);
/*
* Write the registers twice for luck. Sometimes,
@@ -243,17 +243,17 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
* The BIOS does this too. Yay, magic
*/
for (i = 0; i < 2; i++) {
- intel_de_write(dev_priv, GEN3_SDVOB, bval);
- intel_de_posting_read(dev_priv, GEN3_SDVOB);
+ intel_de_write(display, GEN3_SDVOB, bval);
+ intel_de_posting_read(display, GEN3_SDVOB);
- intel_de_write(dev_priv, GEN3_SDVOC, cval);
- intel_de_posting_read(dev_priv, GEN3_SDVOC);
+ intel_de_write(display, GEN3_SDVOC, cval);
+ intel_de_posting_read(display, GEN3_SDVOC);
}
}
static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
{
- struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
struct i2c_msg msgs[] = {
{
.addr = intel_sdvo->target_addr,
@@ -273,7 +273,7 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
return true;
- drm_dbg_kms(&i915->drm, "i2c transfer returned %d\n", ret);
+ drm_dbg_kms(display->drm, "i2c transfer returned %d\n", ret);
return false;
}
@@ -415,7 +415,7 @@ static const char *sdvo_cmd_name(u8 cmd)
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
- struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
const char *cmd_name;
int i, pos = 0;
char buffer[64];
@@ -436,10 +436,10 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
else
BUF_PRINT("(%02X)", cmd);
- drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
+ drm_WARN_ON(display->drm, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
- drm_dbg_kms(&dev_priv->drm, "%s: W: %02X %s\n", SDVO_NAME(intel_sdvo),
+ drm_dbg_kms(display->drm, "%s: W: %02X %s\n", SDVO_NAME(intel_sdvo),
cmd, buffer);
}
@@ -465,7 +465,7 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len,
bool unlocked)
{
- struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
u8 *buf, status;
struct i2c_msg *msgs;
int i, ret = true;
@@ -515,13 +515,13 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
else
ret = __i2c_transfer(intel_sdvo->i2c, msgs, i+3);
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "I2c transfer returned %d\n", ret);
+ drm_dbg_kms(display->drm, "I2c transfer returned %d\n", ret);
ret = false;
goto out;
}
if (ret != i+3) {
/* failure in I2C transfer */
- drm_dbg_kms(&i915->drm, "I2c transfer returned %d/%d\n", ret, i+3);
+ drm_dbg_kms(display->drm, "I2c transfer returned %d/%d\n", ret, i + 3);
ret = false;
}
@@ -540,7 +540,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
- struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
const char *cmd_status;
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
@@ -605,15 +605,15 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
- drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
+ drm_WARN_ON(display->drm, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
- drm_dbg_kms(&dev_priv->drm, "%s: R: %s\n",
+ drm_dbg_kms(display->drm, "%s: R: %s\n",
SDVO_NAME(intel_sdvo), buffer);
return true;
log_fail:
- drm_dbg_kms(&dev_priv->drm, "%s: R: ... failed %s\n",
+ drm_dbg_kms(display->drm, "%s: R: ... failed %s\n",
SDVO_NAME(intel_sdvo), buffer);
return false;
}
@@ -1009,7 +1009,7 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
unsigned int if_index, u8 tx_rate,
const u8 *data, unsigned int length)
{
- struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
u8 set_buf_index[2] = { if_index, 0 };
u8 hbuf_size, tmp[8];
int i;
@@ -1022,7 +1022,7 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
return false;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"writing sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
@@ -1049,7 +1049,7 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
unsigned int if_index,
u8 *data, unsigned int length)
{
- struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
u8 set_buf_index[2] = { if_index, 0 };
u8 hbuf_size, tx_rate, av_split;
int i;
@@ -1079,7 +1079,7 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
return false;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"reading sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
@@ -1100,7 +1100,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -1126,7 +1126,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
HDMI_QUANTIZATION_RANGE_FULL);
ret = hdmi_avi_infoframe_check(frame);
- if (drm_WARN_ON(&dev_priv->drm, ret))
+ if (drm_WARN_ON(display->drm, ret))
return false;
return true;
@@ -1135,7 +1135,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
const union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
ssize_t len;
@@ -1144,12 +1144,12 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) == 0)
return true;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
frame->any.type != HDMI_INFOFRAME_TYPE_AVI))
return false;
len = hdmi_infoframe_pack_only(frame, sdvo_data, sizeof(sdvo_data));
- if (drm_WARN_ON(&dev_priv->drm, len < 0))
+ if (drm_WARN_ON(display->drm, len < 0))
return false;
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
@@ -1160,7 +1160,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
ssize_t len;
@@ -1172,7 +1172,7 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
sdvo_data, sizeof(sdvo_data));
if (len < 0) {
- drm_dbg_kms(&i915->drm, "failed to read AVI infoframe\n");
+ drm_dbg_kms(display->drm, "failed to read AVI infoframe\n");
return;
} else if (len == 0) {
return;
@@ -1183,12 +1183,12 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
ret = hdmi_infoframe_unpack(frame, sdvo_data, len);
if (ret) {
- drm_dbg_kms(&i915->drm, "Failed to unpack AVI infoframe\n");
+ drm_dbg_kms(display->drm, "Failed to unpack AVI infoframe\n");
return;
}
if (frame->any.type != HDMI_INFOFRAME_TYPE_AVI)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
}
@@ -1196,7 +1196,7 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
static void intel_sdvo_get_eld(struct intel_sdvo *intel_sdvo,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
ssize_t len;
u8 val;
@@ -1212,7 +1212,7 @@ static void intel_sdvo_get_eld(struct intel_sdvo *intel_sdvo,
len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
crtc_state->eld, sizeof(crtc_state->eld));
if (len < 0)
- drm_dbg_kms(&i915->drm, "failed to read ELD\n");
+ drm_dbg_kms(display->drm, "failed to read ELD\n");
}
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
@@ -1282,7 +1282,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
static int i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(pipe_config->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(pipe_config);
unsigned int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
struct dpll *clock = &pipe_config->dpll;
@@ -1303,7 +1303,7 @@ static int i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
clock->m1 = 12;
clock->m2 = 8;
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"SDVO TV clock out of range: %i\n", dotclock);
return -EINVAL;
}
@@ -1359,6 +1359,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_connector *intel_sdvo_connector =
@@ -1366,13 +1367,13 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_display_mode *mode = &pipe_config->hw.mode;
- if (HAS_PCH_SPLIT(to_i915(encoder->base.dev))) {
+ if (HAS_PCH_SPLIT(i915)) {
pipe_config->has_pch_encoder = true;
if (!intel_fdi_compute_pipe_bpp(pipe_config))
return -EINVAL;
}
- drm_dbg_kms(&i915->drm, "forcing bpc to 8 for SDVO\n");
+ drm_dbg_kms(display->drm, "forcing bpc to 8 for SDVO\n");
/* FIXME: Don't increase pipe_bpp */
pipe_config->pipe_bpp = 8*3;
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -1451,7 +1452,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
if (!intel_sdvo_compute_avi_infoframe(intel_sdvo,
pipe_config, conn_state)) {
- drm_dbg_kms(&i915->drm, "bad AVI infoframe\n");
+ drm_dbg_kms(display->drm, "bad AVI infoframe\n");
return -EINVAL;
}
@@ -1525,6 +1526,7 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(intel_encoder);
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -1570,7 +1572,7 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
}
if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"Setting output timings on %s failed\n",
SDVO_NAME(intel_sdvo));
@@ -1600,13 +1602,13 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
if (IS_TV(intel_sdvo_connector) || IS_LVDS(intel_sdvo_connector))
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
switch (crtc_state->pixel_multiplier) {
default:
- drm_WARN(&dev_priv->drm, 1,
+ drm_WARN(display->drm, 1,
"unknown pixel multiplier specified\n");
fallthrough;
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1617,14 +1619,14 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
return;
/* Set the SDVO control regs. */
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (DISPLAY_VER(dev_priv) < 5)
+ if (DISPLAY_VER(display) < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
- sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
+ sdvox = intel_de_read(display, intel_sdvo->sdvo_reg);
if (intel_sdvo->base.port == PORT_B)
sdvox &= SDVOB_PRESERVE_MASK;
else
@@ -1637,10 +1639,10 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
- } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ } else if (display->platform.i945g || display->platform.i945gm ||
+ display->platform.g33 || display->platform.pineview) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
sdvox |= (crtc_state->pixel_multiplier - 1)
@@ -1648,7 +1650,7 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
}
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
- DISPLAY_VER(dev_priv) < 5)
+ DISPLAY_VER(display) < 5)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
@@ -1665,17 +1667,18 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
return active_outputs & intel_sdvo_connector->output_flag;
}
-bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_sdvo_port_enabled(struct intel_display *display,
i915_reg_t sdvo_reg, enum pipe *pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
- val = intel_de_read(dev_priv, sdvo_reg);
+ val = intel_de_read(display, sdvo_reg);
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
*pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT;
- else if (IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.cherryview)
*pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV;
else
*pipe = (val & SDVO_PIPE_SEL_MASK) >> SDVO_PIPE_SEL_SHIFT;
@@ -1686,14 +1689,14 @@ bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u16 active_outputs = 0;
bool ret;
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
- ret = intel_sdvo_port_enabled(dev_priv, intel_sdvo->sdvo_reg, pipe);
+ ret = intel_sdvo_port_enabled(display, intel_sdvo->sdvo_reg, pipe);
return ret || active_outputs;
}
@@ -1701,8 +1704,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
static void intel_sdvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_dtd dtd;
int encoder_pixel_multiplier = 0;
@@ -1713,7 +1715,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_SDVO);
- sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
+ sdvox = intel_de_read(display, intel_sdvo->sdvo_reg);
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
if (!ret) {
@@ -1721,7 +1723,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
* Some sdvo encoders are not spec compliant and don't
* implement the mandatory get_timings function.
*/
- drm_dbg(&dev_priv->drm, "failed to retrieve SDVO DTD\n");
+ drm_dbg_kms(display->drm, "failed to retrieve SDVO DTD\n");
pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
} else {
if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
@@ -1741,10 +1743,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
* pixel multiplier readout is tricky: Only on i915g/gm it is stored in
* the sdvo port register, on all other platforms it is part of the dpll
* state. Since the general pipe state readout happens before the
- * encoder->get_config we so already have a valid pixel multplier on all
- * other platfroms.
+ * encoder->get_config we so already have a valid pixel multiplier on all
+ * other platforms.
*/
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
+ if (display->platform.i915g || display->platform.i915gm) {
pipe_config->pixel_multiplier =
((sdvox & SDVO_PORT_MULTIPLY_MASK)
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
@@ -1773,7 +1775,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
}
- drm_WARN(dev,
+ drm_WARN(display->drm,
encoder_pixel_multiplier != pipe_config->pixel_multiplier,
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
@@ -1838,6 +1840,7 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
@@ -1848,7 +1851,7 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_OFF);
- temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
+ temp = intel_de_read(display, intel_sdvo->sdvo_reg);
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
@@ -1863,8 +1866,8 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
*/
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_cpu_fifo_underrun_reporting(display, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, false);
temp &= ~SDVO_PIPE_SEL_MASK;
temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
@@ -1873,9 +1876,9 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
- intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_wait_for_vblank_if_active(display, PIPE_A);
+ intel_set_cpu_fifo_underrun_reporting(display, PIPE_A, true);
+ intel_set_pch_fifo_underrun_reporting(display, PIPE_A, true);
}
}
@@ -1899,8 +1902,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
@@ -1910,7 +1912,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
int i;
bool success;
- temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
+ temp = intel_de_read(display, intel_sdvo->sdvo_reg);
temp |= SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
@@ -1925,7 +1927,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
* a given it the status is a success, we succeeded.
*/
if (success && !input1) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"First %s output reported failure to sync\n",
SDVO_NAME(intel_sdvo));
}
@@ -1938,18 +1940,18 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
static enum drm_mode_status
intel_sdvo_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state);
- int max_dotclk = i915->display.cdclk.max_dotclk_freq;
+ int max_dotclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
int clock = mode->clock;
- status = intel_cpu_transcoder_mode_valid(i915, mode);
+ status = intel_cpu_transcoder_mode_valid(display, mode);
if (status != MODE_OK)
return status;
@@ -1981,14 +1983,15 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
- struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
+
BUILD_BUG_ON(sizeof(*caps) != 8);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_DEVICE_CAPS,
caps, sizeof(*caps)))
return false;
- drm_dbg_kms(&i915->drm, "SDVO capabilities:\n"
+ drm_dbg_kms(display->drm, "SDVO capabilities:\n"
" vendor_id: %d\n"
" device_id: %d\n"
" device_rev_id: %d\n"
@@ -2030,17 +2033,17 @@ static u8 intel_sdvo_get_colorimetry_cap(struct intel_sdvo *intel_sdvo)
static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
- struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
u16 hotplug;
- if (!I915_HAS_HOTPLUG(dev_priv))
+ if (!I915_HAS_HOTPLUG(display))
return 0;
/*
* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line.
*/
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+ if (display->platform.i945g || display->platform.i945gm)
return 0;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
@@ -2137,13 +2140,12 @@ static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
u16 response;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (!intel_display_device_enabled(display))
@@ -2161,7 +2163,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
&response, 2))
return connector_status_unknown;
- drm_dbg_kms(&i915->drm, "SDVO response %d %d [%x]\n",
+ drm_dbg_kms(display->drm, "SDVO response %d %d [%x]\n",
response & 0xff, response >> 8,
intel_sdvo_connector->output_flag);
@@ -2300,7 +2302,6 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
- struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
const struct drm_connector_state *conn_state = connector->state;
@@ -2309,7 +2310,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
int num_modes = 0;
int i;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (!intel_display_driver_check_access(display))
@@ -2351,9 +2352,9 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
static int intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
return intel_panel_get_modes(to_intel_connector(connector));
@@ -2617,14 +2618,14 @@ static struct intel_sdvo_ddc *
intel_sdvo_select_ddc_bus(struct intel_sdvo *sdvo,
struct intel_sdvo_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&sdvo->base);
const struct sdvo_device_mapping *mapping;
int ddc_bus;
if (sdvo->base.port == PORT_B)
- mapping = &dev_priv->display.vbt.sdvo_mappings[0];
+ mapping = &display->vbt.sdvo_mappings[0];
else
- mapping = &dev_priv->display.vbt.sdvo_mappings[1];
+ mapping = &display->vbt.sdvo_mappings[1];
if (mapping->initialized)
ddc_bus = (mapping->ddc_pin & 0xf0) >> 4;
@@ -2641,14 +2642,13 @@ static void
intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo)
{
struct intel_display *display = to_intel_display(&sdvo->base);
- struct drm_i915_private *dev_priv = to_i915(sdvo->base.base.dev);
const struct sdvo_device_mapping *mapping;
u8 pin;
if (sdvo->base.port == PORT_B)
- mapping = &dev_priv->display.vbt.sdvo_mappings[0];
+ mapping = &display->vbt.sdvo_mappings[0];
else
- mapping = &dev_priv->display.vbt.sdvo_mappings[1];
+ mapping = &display->vbt.sdvo_mappings[1];
if (mapping->initialized &&
intel_gmbus_is_valid_pin(display, mapping->i2c_pin))
@@ -2656,7 +2656,7 @@ intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo)
else
pin = GMBUS_PIN_DPB;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] I2C pin %d, target addr 0x%x\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] I2C pin %d, target addr 0x%x\n",
sdvo->base.base.base.id, sdvo->base.base.name,
pin, sdvo->target_addr);
@@ -2686,15 +2686,15 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo)
static u8
intel_sdvo_get_target_addr(struct intel_sdvo *sdvo)
{
- struct drm_i915_private *dev_priv = to_i915(sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&sdvo->base);
const struct sdvo_device_mapping *my_mapping, *other_mapping;
if (sdvo->base.port == PORT_B) {
- my_mapping = &dev_priv->display.vbt.sdvo_mappings[0];
- other_mapping = &dev_priv->display.vbt.sdvo_mappings[1];
+ my_mapping = &display->vbt.sdvo_mappings[0];
+ other_mapping = &display->vbt.sdvo_mappings[1];
} else {
- my_mapping = &dev_priv->display.vbt.sdvo_mappings[1];
- other_mapping = &dev_priv->display.vbt.sdvo_mappings[0];
+ my_mapping = &display->vbt.sdvo_mappings[1];
+ other_mapping = &display->vbt.sdvo_mappings[0];
}
/* If the BIOS described our SDVO device, take advantage of it. */
@@ -2730,7 +2730,7 @@ static int
intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
struct intel_sdvo *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.base.dev);
+ struct intel_display *display = to_intel_display(&encoder->base);
struct intel_sdvo_ddc *ddc = NULL;
int ret;
@@ -2755,7 +2755,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
intel_connector_attach_encoder(&connector->base, &encoder->base);
if (ddc)
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] using %s\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] using %s\n",
connector->base.base.base.id, connector->base.base.name,
ddc->ddc.name);
@@ -2798,14 +2798,14 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
static bool
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
{
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- drm_dbg_kms(&i915->drm, "initialising DVI type 0x%x\n", type);
+ drm_dbg_kms(display->drm, "initialising DVI type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@@ -2851,13 +2851,13 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
static bool
intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, u16 type)
{
- struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- drm_dbg_kms(&i915->drm, "initialising TV type 0x%x\n", type);
+ drm_dbg_kms(display->drm, "initialising TV type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@@ -2891,13 +2891,13 @@ err:
static bool
intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type)
{
- struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- drm_dbg_kms(&i915->drm, "initialising analog type 0x%x\n", type);
+ drm_dbg_kms(display->drm, "initialising analog type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@@ -2925,12 +2925,11 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
{
struct intel_display *display = to_intel_display(&intel_sdvo->base);
struct drm_encoder *encoder = &intel_sdvo->base.base;
- struct drm_i915_private *i915 = to_i915(encoder->dev);
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- drm_dbg_kms(&i915->drm, "initialising LVDS type 0x%x\n", type);
+ drm_dbg_kms(display->drm, "initialising LVDS type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@@ -2960,12 +2959,12 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
intel_panel_add_vbt_sdvo_fixed_mode(intel_connector);
if (!intel_panel_preferred_fixed_mode(intel_connector)) {
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
intel_ddc_get_modes(connector, connector->ddc);
intel_panel_add_edid_fixed_modes(intel_connector, false);
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
}
intel_panel_init(intel_connector, NULL);
@@ -3014,7 +3013,7 @@ static bool intel_sdvo_output_init(struct intel_sdvo *sdvo, u16 type)
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo)
{
- struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
static const u16 probe_order[] = {
SDVO_OUTPUT_TMDS0,
SDVO_OUTPUT_TMDS1,
@@ -3033,7 +3032,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo)
flags = intel_sdvo_filter_output_flags(intel_sdvo->caps.output_flags);
if (flags == 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"%s: Unknown SDVO output type (0x%04x)\n",
SDVO_NAME(intel_sdvo), intel_sdvo->caps.output_flags);
return false;
@@ -3056,11 +3055,11 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo)
static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
{
- struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
struct drm_connector *connector, *tmp;
list_for_each_entry_safe(connector, tmp,
- &dev->mode_config.connector_list, head) {
+ &display->drm->mode_config.connector_list, head) {
if (intel_attached_encoder(to_intel_connector(connector)) == &intel_sdvo->base) {
drm_connector_unregister(connector);
intel_connector_destroy(connector);
@@ -3072,7 +3071,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
int type)
{
- struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
struct intel_sdvo_tv_format format;
u32 format_map, i;
@@ -3097,7 +3096,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->tv_format =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ drm_property_create(display->drm, DRM_MODE_PROP_ENUM,
"mode", intel_sdvo_connector->format_supported_num);
if (!intel_sdvo_connector->tv_format)
return false;
@@ -3119,12 +3118,12 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
return false; \
intel_sdvo_connector->name = \
- drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
+ drm_property_create_range(display->drm, 0, #name, 0, data_value[0]); \
if (!intel_sdvo_connector->name) return false; \
state_assignment = response; \
drm_object_attach_property(&connector->base, \
intel_sdvo_connector->name, 0); \
- drm_dbg_kms(dev, #name ": max %d, default %d, current %d\n", \
+ drm_dbg_kms(display->drm, #name ": max %d, default %d, current %d\n", \
data_value[0], data_value[1], response); \
} \
} while (0)
@@ -3136,8 +3135,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
struct intel_sdvo_enhancements_reply enhancements)
{
- struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
- struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
struct drm_connector *connector = &intel_sdvo_connector->base.base;
struct drm_connector_state *conn_state = connector->state;
struct intel_sdvo_connector_state *sdvo_state =
@@ -3160,7 +3158,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->max_hscan = data_value[0];
intel_sdvo_connector->left =
- drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
+ drm_property_create_range(display->drm, 0, "left_margin", 0, data_value[0]);
if (!intel_sdvo_connector->left)
return false;
@@ -3168,13 +3166,13 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->left, 0);
intel_sdvo_connector->right =
- drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
+ drm_property_create_range(display->drm, 0, "right_margin", 0, data_value[0]);
if (!intel_sdvo_connector->right)
return false;
drm_object_attach_property(&connector->base,
intel_sdvo_connector->right, 0);
- drm_dbg_kms(&i915->drm, "h_overscan: max %d, default %d, current %d\n",
+ drm_dbg_kms(display->drm, "h_overscan: max %d, default %d, current %d\n",
data_value[0], data_value[1], response);
}
@@ -3193,7 +3191,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->max_vscan = data_value[0];
intel_sdvo_connector->top =
- drm_property_create_range(dev, 0,
+ drm_property_create_range(display->drm, 0,
"top_margin", 0, data_value[0]);
if (!intel_sdvo_connector->top)
return false;
@@ -3202,14 +3200,14 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->top, 0);
intel_sdvo_connector->bottom =
- drm_property_create_range(dev, 0,
+ drm_property_create_range(display->drm, 0,
"bottom_margin", 0, data_value[0]);
if (!intel_sdvo_connector->bottom)
return false;
drm_object_attach_property(&connector->base,
intel_sdvo_connector->bottom, 0);
- drm_dbg_kms(&i915->drm, "v_overscan: max %d, default %d, current %d\n",
+ drm_dbg_kms(display->drm, "v_overscan: max %d, default %d, current %d\n",
data_value[0], data_value[1], response);
}
@@ -3232,13 +3230,13 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
sdvo_state->tv.dot_crawl = response & 0x1;
intel_sdvo_connector->dot_crawl =
- drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
+ drm_property_create_range(display->drm, 0, "dot_crawl", 0, 1);
if (!intel_sdvo_connector->dot_crawl)
return false;
drm_object_attach_property(&connector->base,
intel_sdvo_connector->dot_crawl, 0);
- drm_dbg_kms(&i915->drm, "dot crawl: current %d\n", response);
+ drm_dbg_kms(display->drm, "dot crawl: current %d\n", response);
}
return true;
@@ -3249,7 +3247,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
struct intel_sdvo_enhancements_reply enhancements)
{
- struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
struct drm_connector *connector = &intel_sdvo_connector->base.base;
u16 response, data_value[2];
@@ -3263,7 +3261,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector)
{
- struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
union {
struct intel_sdvo_enhancements_reply reply;
u16 response;
@@ -3275,7 +3273,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
&enhancements, sizeof(enhancements)) ||
enhancements.response == 0) {
- drm_dbg_kms(&i915->drm, "No enhancement is supported\n");
+ drm_dbg_kms(display->drm, "No enhancement is supported\n");
return true;
}
@@ -3350,8 +3348,8 @@ static int
intel_sdvo_init_ddc_proxy(struct intel_sdvo_ddc *ddc,
struct intel_sdvo *sdvo, int ddc_bus)
{
- struct drm_i915_private *dev_priv = to_i915(sdvo->base.base.dev);
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct intel_display *display = to_intel_display(&sdvo->base);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
ddc->sdvo = sdvo;
ddc->ddc_bus = ddc_bus;
@@ -3367,32 +3365,34 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo_ddc *ddc,
return i2c_add_adapter(&ddc->ddc);
}
-static bool is_sdvo_port_valid(struct drm_i915_private *dev_priv, enum port port)
+static bool is_sdvo_port_valid(struct intel_display *display, enum port port)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
if (HAS_PCH_SPLIT(dev_priv))
return port == PORT_B;
else
return port == PORT_B || port == PORT_C;
}
-static bool assert_sdvo_port_valid(struct drm_i915_private *dev_priv,
- enum port port)
+static bool assert_sdvo_port_valid(struct intel_display *display, enum port port)
{
- return !drm_WARN(&dev_priv->drm, !is_sdvo_port_valid(dev_priv, port),
+ return !drm_WARN(display->drm, !is_sdvo_port_valid(display, port),
"Platform does not support SDVO %c\n", port_name(port));
}
-bool intel_sdvo_init(struct drm_i915_private *dev_priv,
+bool intel_sdvo_init(struct intel_display *display,
i915_reg_t sdvo_reg, enum port port)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
int i;
- if (!assert_port_valid(dev_priv, port))
+ if (!assert_port_valid(display, port))
return false;
- if (!assert_sdvo_port_valid(dev_priv, port))
+ if (!assert_sdvo_port_valid(display, port))
return false;
intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
@@ -3405,7 +3405,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = port;
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ drm_encoder_init(display->drm, &intel_encoder->base,
&intel_sdvo_enc_funcs, 0,
"SDVO %c", port_name(port));
@@ -3419,7 +3419,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
u8 byte;
if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"No SDVO device found on %s\n",
SDVO_NAME(intel_sdvo));
goto err;
@@ -3457,7 +3457,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
}
if (!intel_sdvo_output_setup(intel_sdvo)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"SDVO output failed to setup on %s\n",
SDVO_NAME(intel_sdvo));
/* Output_setup can leave behind connectors! */
@@ -3494,7 +3494,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
&intel_sdvo->pixel_clock_max))
goto err_output;
- drm_dbg_kms(&dev_priv->drm, "%s device VID/DID: %02X:%02X.%02X, "
+ drm_dbg_kms(display->drm, "%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"num inputs: %d, "
"output 1: %c, output 2: %c\n",
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
index d1815b4103d4..1a9e40fdd8a8 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
@@ -10,22 +10,22 @@
#include "i915_reg_defs.h"
-struct drm_i915_private;
enum pipe;
enum port;
+struct intel_display;
#ifdef I915
-bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_sdvo_port_enabled(struct intel_display *display,
i915_reg_t sdvo_reg, enum pipe *pipe);
-bool intel_sdvo_init(struct drm_i915_private *dev_priv,
+bool intel_sdvo_init(struct intel_display *display,
i915_reg_t reg, enum port port);
#else
-static inline bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+static inline bool intel_sdvo_port_enabled(struct intel_display *display,
i915_reg_t sdvo_reg, enum pipe *pipe)
{
return false;
}
-static inline bool intel_sdvo_init(struct drm_i915_private *dev_priv,
+static inline bool intel_sdvo_init(struct intel_display *display,
i915_reg_t reg, enum port port)
{
return false;
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo_regs.h b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
index 54f099abefeb..56c4551abefd 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
@@ -244,7 +244,7 @@ struct intel_sdvo_set_target_input_args {
* Takes a struct intel_sdvo_output_flags of which outputs are targeted by
* future output commands.
*
- * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * Affected commands include SET_OUTPUT_TIMINGS_PART[12],
* GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
*/
#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
new file mode 100644
index 000000000000..c6321dafef4f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Synopsys, Inc., Intel Corporation
+ */
+
+#include <linux/math.h>
+
+#include "intel_cx0_phy_regs.h"
+#include "intel_display_types.h"
+#include "intel_snps_phy.h"
+#include "intel_snps_phy_regs.h"
+#include "intel_snps_hdmi_pll.h"
+
+#define INTEL_SNPS_PHY_HDMI_4999MHZ 4999999900ULL
+#define INTEL_SNPS_PHY_HDMI_16GHZ 16000000000ULL
+#define INTEL_SNPS_PHY_HDMI_9999MHZ (2 * INTEL_SNPS_PHY_HDMI_4999MHZ)
+
+#define CURVE0_MULTIPLIER 1000000000
+#define CURVE1_MULTIPLIER 100
+#define CURVE2_MULTIPLIER 1000000000000ULL
+
+struct pll_output_params {
+ u32 ssc_up_spread;
+ u32 mpll_div5_en;
+ u32 hdmi_div;
+ u32 ana_cp_int;
+ u32 ana_cp_prop;
+ u32 refclk_postscalar;
+ u32 tx_clk_div;
+ u32 fracn_quot;
+ u32 fracn_rem;
+ u32 fracn_den;
+ u32 fracn_en;
+ u32 pmix_en;
+ u32 multiplier;
+ int mpll_ana_v2i;
+ int ana_freq_vco;
+};
+
+static s64 interp(s64 x, s64 x1, s64 x2, s64 y1, s64 y2)
+{
+ s64 dydx;
+
+ dydx = DIV_ROUND_UP_ULL((y2 - y1) * 100000, (x2 - x1));
+
+ return (y1 + DIV_ROUND_UP_ULL(dydx * (x - x1), 100000));
+}
+
+static void get_ana_cp_int_prop(u32 vco_clk,
+ u32 refclk_postscalar,
+ int mpll_ana_v2i,
+ int c, int a,
+ const u64 curve_freq_hz[2][8],
+ const u64 curve_0[2][8],
+ const u64 curve_1[2][8],
+ const u64 curve_2[2][8],
+ u32 *ana_cp_int,
+ u32 *ana_cp_prop)
+{
+ u64 vco_div_refclk_float;
+ u64 curve_0_interpolated;
+ u64 curve_2_interpolated;
+ u64 curve_1_interpolated;
+ u64 curve_2_scaled1;
+ u64 curve_2_scaled2;
+ u64 adjusted_vco_clk1;
+ u64 adjusted_vco_clk2;
+ u64 curve_2_scaled_int;
+ u64 interpolated_product;
+ u64 scaled_interpolated_sqrt;
+ u64 scaled_vco_div_refclk1;
+ u64 scaled_vco_div_refclk2;
+ u64 ana_cp_int_temp;
+ u64 temp;
+
+ vco_div_refclk_float = vco_clk * DIV_ROUND_DOWN_ULL(1000000000000ULL, refclk_postscalar);
+
+ /* Interpolate curve values at the target vco_clk frequency */
+ curve_0_interpolated = interp(vco_clk, curve_freq_hz[c][a], curve_freq_hz[c][a + 1],
+ curve_0[c][a], curve_0[c][a + 1]);
+
+ curve_2_interpolated = interp(vco_clk, curve_freq_hz[c][a], curve_freq_hz[c][a + 1],
+ curve_2[c][a], curve_2[c][a + 1]);
+
+ curve_1_interpolated = interp(vco_clk, curve_freq_hz[c][a], curve_freq_hz[c][a + 1],
+ curve_1[c][a], curve_1[c][a + 1]);
+
+ curve_1_interpolated = DIV_ROUND_DOWN_ULL(curve_1_interpolated, CURVE1_MULTIPLIER);
+
+ /*
+ * Scale curve_2_interpolated based on mpll_ana_v2i, for integer part
+ * ana_cp_int and for the proportional part ana_cp_prop
+ */
+ temp = curve_2_interpolated * (4 - mpll_ana_v2i);
+ curve_2_scaled1 = DIV_ROUND_DOWN_ULL(temp, 16000);
+ curve_2_scaled2 = DIV_ROUND_DOWN_ULL(temp, 160);
+
+ /* Scale vco_div_refclk for ana_cp_int */
+ scaled_vco_div_refclk1 = 112008301 * DIV_ROUND_DOWN_ULL(vco_div_refclk_float, 100000);
+
+ adjusted_vco_clk1 = CURVE2_MULTIPLIER *
+ DIV_ROUND_DOWN_ULL(scaled_vco_div_refclk1, (curve_0_interpolated *
+ DIV_ROUND_DOWN_ULL(curve_1_interpolated, CURVE0_MULTIPLIER)));
+
+ ana_cp_int_temp =
+ DIV_ROUND_CLOSEST_ULL(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1),
+ CURVE2_MULTIPLIER);
+
+ *ana_cp_int = max(1, min(ana_cp_int_temp, 127));
+
+ curve_2_scaled_int = curve_2_scaled1 * (*ana_cp_int);
+
+ interpolated_product = curve_1_interpolated *
+ (curve_2_scaled_int * DIV_ROUND_DOWN_ULL(curve_0_interpolated,
+ CURVE0_MULTIPLIER));
+
+ scaled_interpolated_sqrt =
+ int_sqrt(DIV_ROUND_UP_ULL(interpolated_product, vco_div_refclk_float) *
+ DIV_ROUND_DOWN_ULL(1000000000000ULL, 55));
+
+ /* Scale vco_div_refclk for ana_cp_int */
+ scaled_vco_div_refclk2 = DIV_ROUND_UP_ULL(vco_div_refclk_float, 1000000);
+ adjusted_vco_clk2 = 1460281 * DIV_ROUND_UP_ULL(scaled_interpolated_sqrt *
+ scaled_vco_div_refclk2,
+ curve_1_interpolated);
+
+ *ana_cp_prop = DIV_ROUND_UP_ULL(adjusted_vco_clk2, curve_2_scaled2);
+ *ana_cp_prop = max(1, min(*ana_cp_prop, 127));
+}
+
+static void compute_hdmi_tmds_pll(u64 pixel_clock, u32 refclk,
+ u32 ref_range,
+ u32 ana_cp_int_gs,
+ u32 ana_cp_prop_gs,
+ const u64 curve_freq_hz[2][8],
+ const u64 curve_0[2][8],
+ const u64 curve_1[2][8],
+ const u64 curve_2[2][8],
+ u32 prescaler_divider,
+ struct pll_output_params *pll_params)
+{
+ u64 datarate = pixel_clock * 10000;
+ u32 ssc_up_spread = 1;
+ u32 mpll_div5_en = 1;
+ u32 hdmi_div = 1;
+ u32 ana_cp_int;
+ u32 ana_cp_prop;
+ u32 refclk_postscalar = refclk >> prescaler_divider;
+ u32 tx_clk_div;
+ u64 vco_clk;
+ u64 vco_clk_do_div;
+ u32 vco_div_refclk_integer;
+ u32 vco_div_refclk_fracn;
+ u32 fracn_quot;
+ u32 fracn_rem;
+ u32 fracn_den;
+ u32 fracn_en;
+ u32 pmix_en;
+ u32 multiplier;
+ int mpll_ana_v2i;
+ int ana_freq_vco = 0;
+ int c, a = 0;
+ int i;
+
+ /* Select appropriate v2i point */
+ if (datarate <= INTEL_SNPS_PHY_HDMI_9999MHZ) {
+ mpll_ana_v2i = 2;
+ tx_clk_div = ilog2(DIV_ROUND_DOWN_ULL(INTEL_SNPS_PHY_HDMI_9999MHZ, datarate));
+ } else {
+ mpll_ana_v2i = 3;
+ tx_clk_div = ilog2(DIV_ROUND_DOWN_ULL(INTEL_SNPS_PHY_HDMI_16GHZ, datarate));
+ }
+ vco_clk = (datarate << tx_clk_div) >> 1;
+
+ vco_div_refclk_integer = DIV_ROUND_DOWN_ULL(vco_clk, refclk_postscalar);
+ vco_clk_do_div = do_div(vco_clk, refclk_postscalar);
+ vco_div_refclk_fracn = DIV_ROUND_DOWN_ULL(vco_clk_do_div << 32, refclk_postscalar);
+
+ fracn_quot = vco_div_refclk_fracn >> 16;
+ fracn_rem = vco_div_refclk_fracn & 0xffff;
+ fracn_rem = fracn_rem - (fracn_rem >> 15);
+ fracn_den = 0xffff;
+ fracn_en = (fracn_quot != 0 || fracn_rem != 0) ? 1 : 0;
+ pmix_en = fracn_en;
+ multiplier = (vco_div_refclk_integer - 16) * 2;
+ /* Curve selection for ana_cp_* calculations. One curve hardcoded per v2i range */
+ c = mpll_ana_v2i - 2;
+
+ /* Find the right segment of the table */
+ for (i = 0; i < 8; i += 2) {
+ if (vco_clk <= curve_freq_hz[c][i + 1]) {
+ a = i;
+ ana_freq_vco = 3 - (a >> 1);
+ break;
+ }
+ }
+
+ get_ana_cp_int_prop(vco_clk, refclk_postscalar, mpll_ana_v2i, c, a,
+ curve_freq_hz, curve_0, curve_1, curve_2,
+ &ana_cp_int, &ana_cp_prop);
+
+ pll_params->ssc_up_spread = ssc_up_spread;
+ pll_params->mpll_div5_en = mpll_div5_en;
+ pll_params->hdmi_div = hdmi_div;
+ pll_params->ana_cp_int = ana_cp_int;
+ pll_params->refclk_postscalar = refclk_postscalar;
+ pll_params->tx_clk_div = tx_clk_div;
+ pll_params->fracn_quot = fracn_quot;
+ pll_params->fracn_rem = fracn_rem;
+ pll_params->fracn_den = fracn_den;
+ pll_params->fracn_en = fracn_en;
+ pll_params->pmix_en = pmix_en;
+ pll_params->multiplier = multiplier;
+ pll_params->ana_cp_prop = ana_cp_prop;
+ pll_params->mpll_ana_v2i = mpll_ana_v2i;
+ pll_params->ana_freq_vco = ana_freq_vco;
+}
+
+void intel_snps_hdmi_pll_compute_mpllb(struct intel_mpllb_state *pll_state, u64 pixel_clock)
+{
+ /* x axis frequencies. One curve in each array per v2i point */
+ static const u64 dg2_curve_freq_hz[2][8] = {
+ { 2500000000ULL, 3000000000ULL, 3000000000ULL, 3500000000ULL, 3500000000ULL,
+ 4000000000ULL, 4000000000ULL, 5000000000ULL },
+ { 4000000000ULL, 4600000000ULL, 4601000000ULL, 5400000000ULL, 5401000000ULL,
+ 6600000000ULL, 6601000000ULL, 8001000000ULL }
+ };
+
+ /* y axis heights multiplied with 1000000000 */
+ static const u64 dg2_curve_0[2][8] = {
+ { 34149871, 39803269, 36034544, 40601014, 35646940, 40016109, 35127987, 41889522 },
+ { 70000000, 78770454, 70451838, 80427119, 70991400, 84230173, 72945921, 87064218 }
+ };
+
+ /* Multiplied with 100 */
+ static const u64 dg2_curve_1[2][8] = {
+ { 85177000000000ULL, 79385227160000ULL, 95672603580000ULL, 88857207160000ULL,
+ 109379790900000ULL, 103528193900000ULL, 131941242400000ULL, 117279000000000ULL },
+ { 60255000000000ULL, 55569000000000ULL, 72036000000000ULL, 69509000000000ULL,
+ 81785000000000ULL, 731030000000000ULL, 96591000000000ULL, 69077000000000ULL }
+ };
+
+ /* Multiplied with 1000000000000 */
+ static const u64 dg2_curve_2[2][8] = {
+ { 2186930000ULL, 2835287134ULL, 2395395343ULL, 2932270687ULL, 2351887545ULL,
+ 2861031697ULL, 2294149152ULL, 3091730000ULL },
+ { 4560000000ULL, 5570000000ULL, 4610000000ULL, 5770000000ULL, 4670000000ULL,
+ 6240000000ULL, 4890000000ULL, 6600000000ULL }
+ };
+
+ struct pll_output_params pll_params;
+ u32 refclk = 100000000;
+ u32 prescaler_divider = 1;
+ u32 ref_range = 3;
+ u32 ana_cp_int_gs = 64;
+ u32 ana_cp_prop_gs = 124;
+
+ compute_hdmi_tmds_pll(pixel_clock, refclk, ref_range, ana_cp_int_gs, ana_cp_prop_gs,
+ dg2_curve_freq_hz, dg2_curve_0, dg2_curve_1, dg2_curve_2,
+ prescaler_divider, &pll_params);
+
+ pll_state->clock = pixel_clock;
+ pll_state->ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, ref_range);
+ pll_state->mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, pll_params.ana_cp_int) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, pll_params.ana_cp_prop) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, ana_cp_int_gs) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, ana_cp_prop_gs);
+ pll_state->mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, pll_params.mpll_div5_en) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, pll_params.tx_clk_div) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, pll_params.pmix_en) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, pll_params.mpll_ana_v2i) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, pll_params.ana_freq_vco);
+ pll_state->mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, prescaler_divider) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, pll_params.multiplier) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, pll_params.hdmi_div);
+ pll_state->mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, pll_params.fracn_en) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, pll_params.fracn_den);
+ pll_state->mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, pll_params.fracn_quot) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, pll_params.fracn_rem);
+ pll_state->mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, pll_params.ssc_up_spread);
+}
+
+void intel_snps_hdmi_pll_compute_c10pll(struct intel_c10pll_state *pll_state, u64 pixel_clock)
+{
+ /* x axis frequencies. One curve in each array per v2i point */
+ static const u64 c10_curve_freq_hz[2][8] = {
+ { 2500000000ULL, 3000000000ULL, 3000000000ULL, 3500000000ULL, 3500000000ULL,
+ 4000000000ULL, 4000000000ULL, 5000000000ULL },
+ { 4000000000ULL, 4600000000ULL, 4601000000ULL, 5400000000ULL, 5401000000ULL,
+ 6600000000ULL, 6601000000ULL, 8001000000ULL }
+ };
+
+ /* y axis heights multiplied with 1000000000 */
+ static const u64 c10_curve_0[2][8] = {
+ { 41174500, 48605500, 42973700, 49433100, 42408600, 47681900, 40297400, 49131400 },
+ { 82056800, 94420700, 82323400, 96370600, 81273300, 98630100, 81728700, 99105700}
+ };
+
+ static const u64 c10_curve_1[2][8] = {
+ { 73300000000000ULL, 66000000000000ULL, 83100000000000ULL, 75300000000000ULL,
+ 99700000000000ULL, 92300000000000ULL, 125000000000000ULL, 110000000000000ULL },
+ { 53700000000000ULL, 47700000000000ULL, 62200000000000ULL, 54400000000000ULL,
+ 75100000000000ULL, 63400000000000ULL, 90600000000000ULL, 76300000000000ULL }
+ };
+
+ /* Multiplied with 1000000000000 */
+ static const u64 c10_curve_2[2][8] = {
+ { 2415790000ULL, 3136460000ULL, 2581990000ULL, 3222670000ULL, 2529330000ULL,
+ 3042020000ULL, 2336970000ULL, 3191460000ULL},
+ { 4808390000ULL, 5994250000ULL, 4832730000ULL, 6193730000ULL, 4737700000ULL,
+ 6428750000ULL, 4779200000ULL, 6479340000ULL }
+ };
+
+ struct pll_output_params pll_params;
+ u32 refclk = 38400000;
+ u32 prescaler_divider = 0;
+ u32 ref_range = 1;
+ u32 ana_cp_int_gs = 30;
+ u32 ana_cp_prop_gs = 28;
+
+ compute_hdmi_tmds_pll(pixel_clock, refclk, ref_range,
+ ana_cp_int_gs, ana_cp_prop_gs,
+ c10_curve_freq_hz, c10_curve_0,
+ c10_curve_1, c10_curve_2, prescaler_divider,
+ &pll_params);
+
+ pll_state->tx = 0x10;
+ pll_state->cmn = 0x1;
+ pll_state->pll[0] = REG_FIELD_PREP(C10_PLL0_DIV5CLK_EN, pll_params.mpll_div5_en) |
+ REG_FIELD_PREP(C10_PLL0_FRACEN, pll_params.fracn_en) |
+ REG_FIELD_PREP(C10_PLL0_PMIX_EN, pll_params.pmix_en) |
+ REG_FIELD_PREP(C10_PLL0_ANA_FREQ_VCO_MASK, pll_params.ana_freq_vco);
+ pll_state->pll[2] = REG_FIELD_PREP(C10_PLL2_MULTIPLIERL_MASK, pll_params.multiplier);
+ pll_state->pll[3] = REG_FIELD_PREP(C10_PLL3_MULTIPLIERH_MASK, pll_params.multiplier >> 8);
+ pll_state->pll[8] = REG_FIELD_PREP(C10_PLL8_SSC_UP_SPREAD, pll_params.ssc_up_spread);
+ pll_state->pll[9] = REG_FIELD_PREP(C10_PLL9_FRACN_DENL_MASK, pll_params.fracn_den);
+ pll_state->pll[10] = REG_FIELD_PREP(C10_PLL10_FRACN_DENH_MASK, pll_params.fracn_den >> 8);
+ pll_state->pll[11] = REG_FIELD_PREP(C10_PLL11_FRACN_QUOT_L_MASK, pll_params.fracn_quot);
+ pll_state->pll[12] = REG_FIELD_PREP(C10_PLL12_FRACN_QUOT_H_MASK,
+ pll_params.fracn_quot >> 8);
+
+ pll_state->pll[13] = REG_FIELD_PREP(C10_PLL13_FRACN_REM_L_MASK, pll_params.fracn_rem);
+ pll_state->pll[14] = REG_FIELD_PREP(C10_PLL14_FRACN_REM_H_MASK, pll_params.fracn_rem >> 8);
+ pll_state->pll[15] = REG_FIELD_PREP(C10_PLL15_TXCLKDIV_MASK, pll_params.tx_clk_div) |
+ REG_FIELD_PREP(C10_PLL15_HDMIDIV_MASK, pll_params.hdmi_div);
+ pll_state->pll[16] = REG_FIELD_PREP(C10_PLL16_ANA_CPINT, pll_params.ana_cp_int) |
+ REG_FIELD_PREP(C10_PLL16_ANA_CPINTGS_L, ana_cp_int_gs);
+ pll_state->pll[17] = REG_FIELD_PREP(C10_PLL17_ANA_CPINTGS_H_MASK, ana_cp_int_gs >> 1) |
+ REG_FIELD_PREP(C10_PLL17_ANA_CPPROP_L_MASK, pll_params.ana_cp_prop);
+ pll_state->pll[18] =
+ REG_FIELD_PREP(C10_PLL18_ANA_CPPROP_H_MASK, pll_params.ana_cp_prop >> 2) |
+ REG_FIELD_PREP(C10_PLL18_ANA_CPPROPGS_L_MASK, ana_cp_prop_gs);
+
+ pll_state->pll[19] = REG_FIELD_PREP(C10_PLL19_ANA_CPPROPGS_H_MASK, ana_cp_prop_gs >> 3) |
+ REG_FIELD_PREP(C10_PLL19_ANA_V2I_MASK, pll_params.mpll_ana_v2i);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.h b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.h
new file mode 100644
index 000000000000..aac70c4bb0f8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Synopsys, Inc., Intel Corporation
+ */
+
+#ifndef __INTEL_SNPS_HDMI_PLL_H__
+#define __INTEL_SNPS_HDMI_PLL_H__
+
+#include <linux/types.h>
+
+struct intel_c10pll_state;
+struct intel_mpllb_state;
+
+void intel_snps_hdmi_pll_compute_mpllb(struct intel_mpllb_state *pll_state, u64 pixel_clock);
+void intel_snps_hdmi_pll_compute_c10pll(struct intel_c10pll_state *pll_state, u64 pixel_clock);
+
+#endif /* __INTEL_SNPS_HDMI_PLL_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 41fe26dc200b..b9acd9fe160c 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -5,12 +5,13 @@
#include <linux/math.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_snps_hdmi_pll.h"
#include "intel_snps_phy.h"
#include "intel_snps_phy_regs.h"
@@ -26,12 +27,12 @@
* since it is not handled by the shared DPLL framework as on other platforms.
*/
-void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
+void intel_snps_phy_wait_for_calibration(struct intel_display *display)
{
enum phy phy;
for_each_phy_masked(phy, ~0) {
- if (!intel_phy_is_snps(i915, phy))
+ if (!intel_phy_is_snps(display, phy))
continue;
/*
@@ -39,16 +40,16 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
* which phy was affected and skip setup of the corresponding
* output later.
*/
- if (intel_de_wait_for_clear(i915, DG2_PHY_MISC(phy),
+ if (intel_de_wait_for_clear(display, DG2_PHY_MISC(phy),
DG2_PHY_DP_TX_ACK_MASK, 25))
- i915->display.snps.phy_failed_calibration |= BIT(phy);
+ display->snps.phy_failed_calibration |= BIT(phy);
}
}
void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
bool enable)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
@@ -57,20 +58,20 @@ void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
val = REG_FIELD_PREP(SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR,
enable ? 2 : 3);
- intel_de_rmw(i915, SNPS_PHY_TX_REQ(phy),
+ intel_de_rmw(display, SNPS_PHY_TX_REQ(phy),
SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, val);
}
void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
for (ln = 0; ln < 4; ln++) {
@@ -81,7 +82,7 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_PRE, trans->entries[level].snps.pre_cursor);
val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, trans->entries[level].snps.post_cursor);
- intel_de_write(dev_priv, SNPS_PHY_TX_EQ(ln, phy), val);
+ intel_de_write(display, SNPS_PHY_TX_EQ(ln, phy), val);
}
}
@@ -521,7 +522,7 @@ static const struct intel_mpllb_state dg2_hdmi_148_5 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
-/* values in the below table are calculted using the algo */
+/* values in the below table are calculated using the algo */
static const struct intel_mpllb_state dg2_hdmi_25200 = {
.clock = 25200,
.ref_control =
@@ -1788,24 +1789,9 @@ intel_mpllb_tables_get(struct intel_crtc_state *crtc_state,
int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_mpllb_state * const *tables;
int i;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- if (intel_snps_phy_check_hdmi_link_rate(crtc_state->port_clock)
- != MODE_OK) {
- /*
- * FIXME: Can only support fixed HDMI frequencies
- * until we have a proper algorithm under a valid
- * license.
- */
- drm_dbg_kms(&i915->drm, "Can't support HDMI link rate %d\n",
- crtc_state->port_clock);
- return -EINVAL;
- }
- }
-
tables = intel_mpllb_tables_get(crtc_state, encoder);
if (!tables)
return -EINVAL;
@@ -1817,13 +1803,21 @@ int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
}
}
+ /* For HDMI PLLs try SNPS PHY algorithm, if there are no precomputed tables */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ intel_snps_hdmi_pll_compute_mpllb(&crtc_state->dpll_hw_state.mpllb,
+ crtc_state->port_clock);
+
+ return 0;
+ }
+
return -EINVAL;
}
void intel_mpllb_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_mpllb_state *pll_state = &crtc_state->dpll_hw_state.mpllb;
enum phy phy = intel_encoder_to_phy(encoder);
i915_reg_t enable_reg = (phy <= PHY_D ?
@@ -1833,13 +1827,13 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
* 3. Software programs the following PLL registers for the desired
* frequency.
*/
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_CP(phy), pll_state->mpllb_cp);
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV(phy), pll_state->mpllb_div);
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV2(phy), pll_state->mpllb_div2);
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_SSCEN(phy), pll_state->mpllb_sscen);
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_SSCSTEP(phy), pll_state->mpllb_sscstep);
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_FRACN1(phy), pll_state->mpllb_fracn1);
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_FRACN2(phy), pll_state->mpllb_fracn2);
+ intel_de_write(display, SNPS_PHY_MPLLB_CP(phy), pll_state->mpllb_cp);
+ intel_de_write(display, SNPS_PHY_MPLLB_DIV(phy), pll_state->mpllb_div);
+ intel_de_write(display, SNPS_PHY_MPLLB_DIV2(phy), pll_state->mpllb_div2);
+ intel_de_write(display, SNPS_PHY_MPLLB_SSCEN(phy), pll_state->mpllb_sscen);
+ intel_de_write(display, SNPS_PHY_MPLLB_SSCSTEP(phy), pll_state->mpllb_sscstep);
+ intel_de_write(display, SNPS_PHY_MPLLB_FRACN1(phy), pll_state->mpllb_fracn1);
+ intel_de_write(display, SNPS_PHY_MPLLB_FRACN2(phy), pll_state->mpllb_fracn2);
/*
* 4. If the frequency will result in a change to the voltage
@@ -1850,7 +1844,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
*/
/* 5. Software sets DPLL_ENABLE [PLL Enable] to "1". */
- intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
+ intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
/*
* 9. Software sets SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "1". This
@@ -1859,7 +1853,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
* PLL because that will start the PLL before it has sampled the
* divider values.
*/
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV(phy),
+ intel_de_write(display, SNPS_PHY_MPLLB_DIV(phy),
pll_state->mpllb_div | SNPS_PHY_MPLLB_FORCE_EN);
/*
@@ -1867,8 +1861,8 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
* is locked at new settings. This register bit is sampling PHY
* dp_mpllb_state interface signal.
*/
- if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 5))
- drm_dbg_kms(&dev_priv->drm, "Port %c PLL not locked\n", phy_name(phy));
+ if (intel_de_wait_for_set(display, enable_reg, PLL_LOCK, 5))
+ drm_dbg_kms(display->drm, "Port %c PLL not locked\n", phy_name(phy));
/*
* 11. If the frequency will result in a change to the voltage
@@ -1881,7 +1875,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
void intel_mpllb_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
i915_reg_t enable_reg = (phy <= PHY_D ?
DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
@@ -1895,20 +1889,20 @@ void intel_mpllb_disable(struct intel_encoder *encoder)
*/
/* 2. Software programs DPLL_ENABLE [PLL Enable] to "0" */
- intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
+ intel_de_rmw(display, enable_reg, PLL_ENABLE, 0);
/*
* 4. Software programs SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "0".
* This will allow the PLL to stop running.
*/
- intel_de_rmw(i915, SNPS_PHY_MPLLB_DIV(phy), SNPS_PHY_MPLLB_FORCE_EN, 0);
+ intel_de_rmw(display, SNPS_PHY_MPLLB_DIV(phy), SNPS_PHY_MPLLB_FORCE_EN, 0);
/*
* 5. Software polls DPLL_ENABLE [PLL Lock] for PHY acknowledgment
* (dp_txX_ack) that the new transmitter setting request is completed.
*/
- if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 5))
- drm_err(&i915->drm, "Port %c PLL not locked\n", phy_name(phy));
+ if (intel_de_wait_for_clear(display, enable_reg, PLL_LOCK, 5))
+ drm_err(display->drm, "Port %c PLL not locked\n", phy_name(phy));
/*
* 6. If the frequency will result in a change to the voltage
@@ -1953,16 +1947,16 @@ int intel_mpllb_calc_port_clock(struct intel_encoder *encoder,
void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
struct intel_mpllb_state *pll_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- pll_state->mpllb_cp = intel_de_read(dev_priv, SNPS_PHY_MPLLB_CP(phy));
- pll_state->mpllb_div = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV(phy));
- pll_state->mpllb_div2 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV2(phy));
- pll_state->mpllb_sscen = intel_de_read(dev_priv, SNPS_PHY_MPLLB_SSCEN(phy));
- pll_state->mpllb_sscstep = intel_de_read(dev_priv, SNPS_PHY_MPLLB_SSCSTEP(phy));
- pll_state->mpllb_fracn1 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_FRACN1(phy));
- pll_state->mpllb_fracn2 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_FRACN2(phy));
+ pll_state->mpllb_cp = intel_de_read(display, SNPS_PHY_MPLLB_CP(phy));
+ pll_state->mpllb_div = intel_de_read(display, SNPS_PHY_MPLLB_DIV(phy));
+ pll_state->mpllb_div2 = intel_de_read(display, SNPS_PHY_MPLLB_DIV2(phy));
+ pll_state->mpllb_sscen = intel_de_read(display, SNPS_PHY_MPLLB_SSCEN(phy));
+ pll_state->mpllb_sscstep = intel_de_read(display, SNPS_PHY_MPLLB_SSCSTEP(phy));
+ pll_state->mpllb_fracn1 = intel_de_read(display, SNPS_PHY_MPLLB_FRACN1(phy));
+ pll_state->mpllb_fracn2 = intel_de_read(display, SNPS_PHY_MPLLB_FRACN2(phy));
/*
* REF_CONTROL is under firmware control and never programmed by the
@@ -1970,7 +1964,7 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
* only tells us the expected value for one field in this register,
* so we'll only read out those specific bits here.
*/
- pll_state->ref_control = intel_de_read(dev_priv, SNPS_PHY_REF_CONTROL(phy)) &
+ pll_state->ref_control = intel_de_read(display, SNPS_PHY_REF_CONTROL(phy)) &
SNPS_PHY_REF_CONTROL_REF_RANGE;
/*
@@ -1982,31 +1976,17 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
pll_state->mpllb_div &= ~SNPS_PHY_MPLLB_FORCE_EN;
}
-int intel_snps_phy_check_hdmi_link_rate(int clock)
-{
- const struct intel_mpllb_state * const *tables = dg2_hdmi_tables;
- int i;
-
- for (i = 0; tables[i]; i++) {
- if (clock == tables[i]->clock)
- return MODE_OK;
- }
-
- return MODE_CLOCK_RANGE;
-}
-
void intel_mpllb_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_mpllb_state mpllb_hw_state = {};
const struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->dpll_hw_state.mpllb;
struct intel_encoder *encoder;
- if (!IS_DG2(i915))
+ if (!display->platform.dg2)
return;
if (!new_crtc_state->hw.active)
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h
index bc08b92a7cd9..7f96da22d028 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h
@@ -8,15 +8,15 @@
#include <linux/types.h>
-struct drm_i915_private;
+enum phy;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_encoder;
struct intel_mpllb_state;
-enum phy;
-void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv);
+void intel_snps_phy_wait_for_calibration(struct intel_display *display);
void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
bool enable);
@@ -30,7 +30,6 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
int intel_mpllb_calc_port_clock(struct intel_encoder *encoder,
const struct intel_mpllb_state *pll_state);
-int intel_snps_phy_check_hdmi_link_rate(int clock);
void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_mpllb_state_verify(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index e6fadcef58e0..1ad6c8a94b3d 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -66,8 +66,8 @@ static void i9xx_plane_linear_gamma(u16 gamma[8])
static void
chv_sprite_update_csc(const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id plane_id = plane->id;
/*
@@ -138,8 +138,8 @@ chv_sprite_update_csc(const struct intel_plane_state *plane_state)
static void
vlv_sprite_update_clrc(const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
@@ -253,21 +253,6 @@ int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
return DIV_ROUND_UP(pixel_rate * num, den);
}
-static unsigned int vlv_sprite_min_alignment(struct intel_plane *plane,
- const struct drm_framebuffer *fb,
- int color_plane)
-{
- switch (fb->modifier) {
- case I915_FORMAT_MOD_X_TILED:
- return 4 * 1024;
- case DRM_FORMAT_MOD_LINEAR:
- return 128 * 1024;
- default:
- MISSING_CASE(fb->modifier);
- return 0;
- }
-}
-
static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
u32 sprctl = 0;
@@ -356,8 +341,8 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
@@ -383,7 +368,7 @@ vlv_sprite_update_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
int crtc_x = plane_state->uapi.dst.x1;
@@ -405,8 +390,7 @@ vlv_sprite_update_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
@@ -419,7 +403,7 @@ vlv_sprite_update_arm(struct intel_dsb *dsb,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
+ if (display->platform.cherryview && pipe == PIPE_B)
chv_sprite_update_csc(plane_state);
if (key->flags) {
@@ -455,7 +439,7 @@ vlv_sprite_disable_arm(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
@@ -463,19 +447,29 @@ vlv_sprite_disable_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, SPSURF(pipe, plane_id), 0);
}
+static void vlv_sprite_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+
+ error->ctl = intel_de_read(display, SPCNTR(crtc->pipe, plane->id));
+ error->surf = intel_de_read(display, SPSURF(crtc->pipe, plane->id));
+ error->surflive = intel_de_read(display, SPSURFLIVE(crtc->pipe, plane->id));
+}
+
static bool
vlv_sprite_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
@@ -483,7 +477,7 @@ vlv_sprite_get_hw_state(struct intel_plane *plane,
*pipe = plane->pipe;
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
@@ -661,19 +655,17 @@ static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
return fb->format->cpp[0] == 8 &&
- (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv));
+ (display->platform.ivybridge || display->platform.haswell);
}
static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
@@ -681,7 +673,7 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
sprctl = SPRITE_ENABLE;
- if (IS_IVYBRIDGE(dev_priv))
+ if (display->platform.ivybridge)
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
@@ -770,8 +762,8 @@ static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state,
static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
u16 gamma[18];
int i;
@@ -803,8 +795,7 @@ ivb_sprite_update_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
@@ -825,7 +816,7 @@ ivb_sprite_update_noarm(struct intel_dsb *dsb,
SPRITE_POS_Y(crtc_y) | SPRITE_POS_X(crtc_x));
intel_de_write_fw(display, SPRSIZE(pipe),
SPRITE_HEIGHT(crtc_h - 1) | SPRITE_WIDTH(crtc_w - 1));
- if (IS_IVYBRIDGE(dev_priv))
+ if (display->platform.ivybridge)
intel_de_write_fw(display, SPRSCALE(pipe), sprscale);
}
@@ -835,8 +826,7 @@ ivb_sprite_update_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
@@ -857,7 +847,7 @@ ivb_sprite_update_arm(struct intel_dsb *dsb,
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ if (display->platform.haswell || display->platform.broadwell) {
intel_de_write_fw(display, SPROFFSET(pipe),
SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x));
} else {
@@ -883,29 +873,38 @@ ivb_sprite_disable_arm(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
intel_de_write_fw(display, SPRCTL(pipe), 0);
/* Disable the scaler */
- if (IS_IVYBRIDGE(dev_priv))
+ if (display->platform.ivybridge)
intel_de_write_fw(display, SPRSCALE(pipe), 0);
intel_de_write_fw(display, SPRSURF(pipe), 0);
}
+static void ivb_sprite_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+
+ error->ctl = intel_de_read(display, SPRCTL(crtc->pipe));
+ error->surf = intel_de_read(display, SPRSURF(crtc->pipe));
+ error->surflive = intel_de_read(display, SPRSURFLIVE(crtc->pipe));
+}
+
static bool
ivb_sprite_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
@@ -913,7 +912,7 @@ ivb_sprite_get_hw_state(struct intel_plane *plane,
*pipe = plane->pipe;
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
@@ -995,6 +994,11 @@ static unsigned int g4x_sprite_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
+ struct intel_display *display = to_intel_display(plane);
+
+ if (intel_scanout_needs_vtd_wa(display))
+ return 128 * 1024;
+
return 4 * 1024;
}
@@ -1014,8 +1018,7 @@ static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
@@ -1023,7 +1026,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
dvscntr = DVS_ENABLE;
- if (IS_SANDYBRIDGE(dev_priv))
+ if (display->platform.sandybridge)
dvscntr |= DVS_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
@@ -1084,8 +1087,8 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
u16 gamma[8];
@@ -1114,8 +1117,8 @@ static void ilk_sprite_linear_gamma(u16 gamma[17])
static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
u16 gamma[17];
@@ -1144,7 +1147,7 @@ g4x_sprite_update_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
@@ -1174,8 +1177,7 @@ g4x_sprite_update_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 dvssurf_offset = plane_state->view.color_plane[0].offset;
@@ -1207,7 +1209,7 @@ g4x_sprite_update_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, DVSSURF(pipe),
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
- if (IS_G4X(dev_priv))
+ if (display->platform.g4x)
g4x_sprite_update_gamma(plane_state);
else
ilk_sprite_update_gamma(plane_state);
@@ -1218,7 +1220,7 @@ g4x_sprite_disable_arm(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
intel_de_write_fw(display, DVSCNTR(pipe), 0);
@@ -1227,18 +1229,28 @@ g4x_sprite_disable_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, DVSSURF(pipe), 0);
}
+static void g4x_sprite_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+
+ error->ctl = intel_de_read(display, DVSCNTR(crtc->pipe));
+ error->surf = intel_de_read(display, DVSSURF(crtc->pipe));
+ error->surflive = intel_de_read(display, DVSSURFLIVE(crtc->pipe));
+}
+
static bool
g4x_sprite_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
@@ -1246,7 +1258,7 @@ g4x_sprite_get_hw_state(struct intel_plane *plane,
*pipe = plane->pipe;
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
@@ -1272,7 +1284,7 @@ static int
g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
const struct drm_rect *src = &plane_state->uapi.src;
const struct drm_rect *dst = &plane_state->uapi.dst;
@@ -1338,9 +1350,7 @@ static int
g4x_sprite_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane_state);
int min_scale = DRM_PLANE_NO_SCALING;
int max_scale = DRM_PLANE_NO_SCALING;
int ret;
@@ -1349,7 +1359,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
if (DISPLAY_VER(display) < 7) {
min_scale = 1;
max_scale = 16 << 16;
- } else if (IS_IVYBRIDGE(dev_priv)) {
+ } else if (display->platform.ivybridge) {
min_scale = 1;
max_scale = 2 << 16;
}
@@ -1385,13 +1395,11 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane_state);
unsigned int rotation = plane_state->hw.rotation;
/* CHV ignores the mirror bit when the rotate bit is set :( */
- if (IS_CHERRYVIEW(dev_priv) &&
+ if (display->platform.cherryview &&
rotation & DRM_MODE_ROTATE_180 &&
rotation & DRM_MODE_REFLECT_X) {
drm_dbg_kms(display->drm,
@@ -1593,10 +1601,9 @@ static const struct drm_plane_funcs vlv_sprite_funcs = {
};
struct intel_plane *
-intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+intel_sprite_plane_create(struct intel_display *display,
enum pipe pipe, int sprite)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
unsigned int supported_rotations;
@@ -1609,17 +1616,22 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
if (IS_ERR(plane))
return plane;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
plane->update_noarm = vlv_sprite_update_noarm;
plane->update_arm = vlv_sprite_update_arm;
plane->disable_arm = vlv_sprite_disable_arm;
+ plane->capture_error = vlv_sprite_capture_error;
plane->get_hw_state = vlv_sprite_get_hw_state;
plane->check_plane = vlv_sprite_check;
plane->max_stride = i965_plane_max_stride;
- plane->min_alignment = vlv_sprite_min_alignment;
+ plane->min_alignment = vlv_plane_min_alignment;
plane->min_cdclk = vlv_plane_min_cdclk;
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ /* FIXME undocumented for VLV/CHV so not sure what's actually needed */
+ if (intel_scanout_needs_vtd_wa(display))
+ plane->vtd_guard = 128;
+
+ if (display->platform.cherryview && pipe == PIPE_B) {
formats = chv_pipe_b_sprite_formats;
num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats);
} else {
@@ -1632,10 +1644,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->update_noarm = ivb_sprite_update_noarm;
plane->update_arm = ivb_sprite_update_arm;
plane->disable_arm = ivb_sprite_disable_arm;
+ plane->capture_error = ivb_sprite_capture_error;
plane->get_hw_state = ivb_sprite_get_hw_state;
plane->check_plane = g4x_sprite_check;
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ if (display->platform.broadwell || display->platform.haswell) {
plane->max_stride = hsw_sprite_max_stride;
plane->min_cdclk = hsw_plane_min_cdclk;
} else {
@@ -1645,6 +1658,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->min_alignment = g4x_sprite_min_alignment;
+ if (intel_scanout_needs_vtd_wa(display))
+ plane->vtd_guard = 64;
+
formats = snb_sprite_formats;
num_formats = ARRAY_SIZE(snb_sprite_formats);
@@ -1653,13 +1669,17 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->update_noarm = g4x_sprite_update_noarm;
plane->update_arm = g4x_sprite_update_arm;
plane->disable_arm = g4x_sprite_disable_arm;
+ plane->capture_error = g4x_sprite_capture_error;
plane->get_hw_state = g4x_sprite_get_hw_state;
plane->check_plane = g4x_sprite_check;
plane->max_stride = g4x_sprite_max_stride;
plane->min_alignment = g4x_sprite_min_alignment;
plane->min_cdclk = g4x_sprite_min_cdclk;
- if (IS_SANDYBRIDGE(dev_priv)) {
+ if (intel_scanout_needs_vtd_wa(display))
+ plane->vtd_guard = 64;
+
+ if (display->platform.sandybridge) {
formats = snb_sprite_formats;
num_formats = ARRAY_SIZE(snb_sprite_formats);
@@ -1672,7 +1692,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
}
}
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ if (display->platform.cherryview && pipe == PIPE_B) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X;
@@ -1685,7 +1705,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->id = PLANE_SPRITE0 + sprite;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X);
+ modifiers = intel_fb_plane_get_modifiers(display, INTEL_PLANE_CAP_TILING_X);
ret = drm_universal_plane_init(display->drm, &plane->base,
0, plane_funcs,
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 531079979c05..c33a2808da8c 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -8,13 +8,13 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_plane_state;
enum pipe;
#ifdef I915
-struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+struct intel_plane *intel_sprite_plane_create(struct intel_display *display,
enum pipe pipe, int plane);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
@@ -26,7 +26,7 @@ int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
#else
-static inline struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+static inline struct intel_plane *intel_sprite_plane_create(struct intel_display *display,
int pipe, int plane)
{
return NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 13811244c82b..b8d14ed8a56e 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -177,21 +177,21 @@ bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
*/
bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
return tc_phy_cold_off_domain(tc) ==
- intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
+ intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
}
static intel_wakeref_t
__tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
*domain = tc_phy_cold_off_domain(tc);
- return intel_display_power_get(i915, *domain);
+ return intel_display_power_get(display, *domain);
}
static intel_wakeref_t
@@ -211,9 +211,9 @@ static void
__tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
- intel_display_power_put(i915, domain, wakeref);
+ intel_display_power_put(display, domain, wakeref);
}
static void
@@ -230,21 +230,21 @@ tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
static void
assert_display_core_power_enabled(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
- drm_WARN_ON(&i915->drm,
- !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE));
+ drm_WARN_ON(display->drm,
+ !intel_display_power_is_enabled(display, POWER_DOMAIN_DISPLAY_CORE));
}
static void
assert_tc_cold_blocked(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
bool enabled;
- enabled = intel_display_power_is_enabled(i915,
+ enabled = intel_display_power_is_enabled(display,
tc_phy_cold_off_domain(tc));
- drm_WARN_ON(&i915->drm, !enabled);
+ drm_WARN_ON(display->drm, !enabled);
}
static enum intel_display_power_domain
@@ -258,10 +258,10 @@ tc_port_power_domain(struct intel_tc_port *tc)
static void
assert_tc_port_power_enabled(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
- drm_WARN_ON(&i915->drm,
- !intel_display_power_is_enabled(i915, tc_port_power_domain(tc)));
+ drm_WARN_ON(display->drm,
+ !intel_display_power_is_enabled(display, tc_port_power_domain(tc)));
}
static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
@@ -296,12 +296,13 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
intel_wakeref_t wakeref;
u32 val, pin_assignment;
- with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
pin_assignment =
@@ -321,11 +322,11 @@ static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
intel_wakeref_t wakeref;
u32 pin_mask;
- with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
switch (pin_mask) {
@@ -342,11 +343,11 @@ static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
intel_wakeref_t wakeref;
u32 lane_mask = 0;
- with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
lane_mask = intel_tc_port_get_lane_mask(dig_port);
switch (lane_mask) {
@@ -477,17 +478,18 @@ static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
static enum intel_display_power_domain
icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
if (tc->legacy_port)
- return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
+ return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
return POWER_DOMAIN_TC_COLD_OFF;
}
static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
@@ -496,7 +498,7 @@ static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
u32 pch_isr;
u32 mask = 0;
- with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) {
+ with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref) {
fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
pch_isr = intel_de_read(i915, SDEISR);
}
@@ -730,11 +732,12 @@ tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static void tgl_tc_phy_init(struct intel_tc_port *tc)
{
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct drm_i915_private *i915 = tc_to_i915(tc);
intel_wakeref_t wakeref;
u32 val;
- with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref)
+ with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref)
val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
drm_WARN_ON(&i915->drm, val == 0xffffffff);
@@ -760,17 +763,18 @@ static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
static enum intel_display_power_domain
adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
if (tc->mode != TC_PORT_TBT_ALT)
- return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
+ return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
return POWER_DOMAIN_TC_COLD_OFF;
}
static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
@@ -781,7 +785,7 @@ static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
u32 pch_isr;
u32 mask = 0;
- with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
+ with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
pch_isr = intel_de_read(i915, SDEISR);
}
@@ -851,22 +855,23 @@ static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum intel_display_power_domain port_power_domain =
tc_port_power_domain(tc);
intel_wakeref_t port_wakeref;
- port_wakeref = intel_display_power_get(i915, port_power_domain);
+ port_wakeref = intel_display_power_get(display, port_power_domain);
tc->mode = tc_phy_get_current_mode(tc);
if (tc->mode != TC_PORT_DISCONNECTED)
tc->lock_wakeref = tc_cold_block(tc);
- intel_display_power_put(i915, port_power_domain, port_wakeref);
+ intel_display_power_put(display, port_power_domain, port_wakeref);
}
static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct drm_i915_private *i915 = tc_to_i915(tc);
enum intel_display_power_domain port_power_domain =
tc_port_power_domain(tc);
@@ -877,7 +882,7 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
return true;
}
- port_wakeref = intel_display_power_get(i915, port_power_domain);
+ port_wakeref = intel_display_power_get(display, port_power_domain);
if (!adlp_tc_phy_take_ownership(tc, true) &&
!drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
@@ -898,7 +903,7 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
goto out_unblock_tc_cold;
- intel_display_power_put(i915, port_power_domain, port_wakeref);
+ intel_display_power_put(display, port_power_domain, port_wakeref);
return true;
@@ -907,19 +912,19 @@ out_unblock_tc_cold:
out_release_phy:
adlp_tc_phy_take_ownership(tc, false);
out_put_port_power:
- intel_display_power_put(i915, port_power_domain, port_wakeref);
+ intel_display_power_put(display, port_power_domain, port_wakeref);
return false;
}
static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum intel_display_power_domain port_power_domain =
tc_port_power_domain(tc);
intel_wakeref_t port_wakeref;
- port_wakeref = intel_display_power_get(i915, port_power_domain);
+ port_wakeref = intel_display_power_get(display, port_power_domain);
tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
@@ -934,7 +939,7 @@ static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
MISSING_CASE(tc->mode);
}
- intel_display_power_put(i915, port_power_domain, port_wakeref);
+ intel_display_power_put(display, port_power_domain, port_wakeref);
}
static void adlp_tc_phy_init(struct intel_tc_port *tc)
@@ -959,6 +964,7 @@ static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
*/
static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
@@ -969,7 +975,7 @@ static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
u32 pch_isr;
u32 mask = 0;
- with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
+ with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
pch_isr = intel_de_read(i915, SDEISR);
}
@@ -1436,25 +1442,25 @@ static void tc_phy_init(struct intel_tc_port *tc)
static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
int required_lanes, bool force_disconnect)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
enum tc_port_mode old_tc_mode = tc->mode;
- intel_display_power_flush_work(i915);
+ intel_display_power_flush_work(display);
if (!intel_tc_cold_requires_aux_pw(dig_port)) {
enum intel_display_power_domain aux_domain;
bool aux_powered;
aux_domain = intel_aux_power_domain(dig_port);
- aux_powered = intel_display_power_is_enabled(i915, aux_domain);
- drm_WARN_ON(&i915->drm, aux_powered);
+ aux_powered = intel_display_power_is_enabled(display, aux_domain);
+ drm_WARN_ON(display->drm, aux_powered);
}
tc_phy_disconnect(tc);
if (!force_disconnect)
tc_phy_connect(tc, required_lanes);
- drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
+ drm_dbg_kms(display->drm, "Port %s: TC port mode reset (%s -> %s)\n",
tc->port_name,
tc_port_mode_name(old_tc_mode),
tc_port_mode_name(tc->mode));
diff --git a/drivers/gpu/drm/i915/display/intel_tdf.h b/drivers/gpu/drm/i915/display/intel_tdf.h
index 353cde21f6c2..0862c2bfd9cd 100644
--- a/drivers/gpu/drm/i915/display/intel_tdf.h
+++ b/drivers/gpu/drm/i915/display/intel_tdf.h
@@ -14,12 +14,12 @@
* the display flip, since display engine is never coherent with CPU/GPU caches.
*/
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-static inline void intel_td_flush(struct drm_i915_private *i915) {}
+static inline void intel_td_flush(struct intel_display *display) {}
#else
-void intel_td_flush(struct drm_i915_private *i915);
+void intel_td_flush(struct intel_display *display);
#endif
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 6e311dcc1a61..5dbe857ea85b 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -957,15 +957,14 @@ static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *i915 = to_i915(connector->dev);
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int max_dotclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
- status = intel_cpu_transcoder_mode_valid(i915, mode);
+ status = intel_cpu_transcoder_mode_valid(display, mode);
if (status != MODE_OK)
return status;
@@ -1436,7 +1435,6 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct intel_tv_connector_state *tv_conn_state =
@@ -1543,7 +1541,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
intel_de_write(display, TV_CLR_LEVEL,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
- assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder);
+ assert_transcoder_disabled(display, pipe_config->cpu_transcoder);
/* Filter ctl must be set before TV_WIN_SIZE */
tv_filter_ctl = TV_AUTO_SCALE;
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index a95fb3349eba..4efd4f7d497a 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -369,7 +369,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
/*
* Already exiting vblank? If so, shift our position
- * so it looks like we're already apporaching the full
+ * so it looks like we're already approaching the full
* vblank end. This should make the generated timestamp
* more or less match when the active portion will start.
*/
@@ -507,6 +507,23 @@ void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
wait_for_pipe_scanline_moving(crtc, true);
}
+static void intel_crtc_active_timings(struct drm_display_mode *mode,
+ int *vmax_vblank_start,
+ const struct intel_crtc_state *crtc_state,
+ bool vrr_enable)
+{
+ drm_mode_init(mode, &crtc_state->hw.adjusted_mode);
+ *vmax_vblank_start = 0;
+
+ if (!vrr_enable)
+ return;
+
+ mode->crtc_vtotal = intel_vrr_vmax_vtotal(crtc_state);
+ mode->crtc_vblank_end = intel_vrr_vmax_vtotal(crtc_state);
+ mode->crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+ *vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+}
+
void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
bool vrr_enable)
{
@@ -517,19 +534,13 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
int vmax_vblank_start = 0;
unsigned long irqflags;
- drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
-
- if (vrr_enable) {
- drm_WARN_ON(display->drm,
- (mode_flags & I915_MODE_FLAG_VRR) == 0);
+ intel_crtc_active_timings(&adjusted_mode, &vmax_vblank_start,
+ crtc_state, vrr_enable);
- adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
- adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
- adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
- vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
- } else {
+ if (vrr_enable)
+ drm_WARN_ON(display->drm, (mode_flags & I915_MODE_FLAG_VRR) == 0);
+ else
mode_flags &= ~I915_MODE_FLAG_VRR;
- }
/*
* Belts and suspenders locking to guarantee everyone sees 100%
@@ -597,6 +608,37 @@ int intel_mode_vtotal(const struct drm_display_mode *mode)
return vtotal;
}
+int intel_mode_vblank_delay(const struct drm_display_mode *mode)
+{
+ return intel_mode_vblank_start(mode) - intel_mode_vdisplay(mode);
+}
+
+static const struct intel_crtc_state *
+pre_commit_crtc_state(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ /*
+ * During fastsets/etc. the transcoder is still
+ * running with the old timings at this point.
+ */
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ return new_crtc_state;
+ else
+ return old_crtc_state;
+}
+
+const struct intel_crtc_state *
+intel_pre_commit_crtc_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ return pre_commit_crtc_state(old_crtc_state, new_crtc_state);
+}
+
void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state,
struct intel_vblank_evade_ctx *evade)
@@ -605,6 +647,7 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
const struct intel_crtc_state *crtc_state;
const struct drm_display_mode *adjusted_mode;
+ int vblank_delay;
evade->crtc = crtc;
@@ -612,16 +655,8 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
display->platform.cherryview) &&
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
- /*
- * During fastsets/etc. the transcoder is still
- * running with the old timings at this point.
- *
- * TODO: maybe just use the active timings here?
- */
- if (intel_crtc_needs_modeset(new_crtc_state))
- crtc_state = new_crtc_state;
- else
- crtc_state = old_crtc_state;
+ /* TODO: maybe just use the active timings here? */
+ crtc_state = pre_commit_crtc_state(old_crtc_state, new_crtc_state);
adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -634,8 +669,12 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
evade->vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
else
evade->vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+
+ vblank_delay = intel_vrr_vblank_delay(crtc_state);
} else {
evade->vblank_start = intel_mode_vblank_start(adjusted_mode);
+
+ vblank_delay = intel_mode_vblank_delay(adjusted_mode);
}
/* FIXME needs to be calibrated sensibly */
@@ -653,8 +692,7 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
*/
if (intel_color_uses_dsb(new_crtc_state) ||
new_crtc_state->update_m_n || new_crtc_state->update_lrr)
- evade->min -= intel_mode_vblank_start(adjusted_mode) -
- intel_mode_vdisplay(adjusted_mode);
+ evade->min -= vblank_delay;
}
/* must be called with vblank interrupt already enabled! */
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
index 6d7336256982..21fbb08d61d5 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.h
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -11,6 +11,7 @@
struct drm_crtc;
struct drm_display_mode;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -24,6 +25,7 @@ int intel_mode_vdisplay(const struct drm_display_mode *mode);
int intel_mode_vblank_start(const struct drm_display_mode *mode);
int intel_mode_vblank_end(const struct drm_display_mode *mode);
int intel_mode_vtotal(const struct drm_display_mode *mode);
+int intel_mode_vblank_delay(const struct drm_display_mode *mode);
void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state,
@@ -42,4 +44,8 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
bool vrr_enable);
int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state);
+const struct intel_crtc_state *
+intel_pre_commit_crtc_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+
#endif /* __INTEL_VBLANK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index b355c479eda3..3ed64c17bdff 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -10,7 +10,7 @@
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_fixed.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -22,14 +22,13 @@
bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
{
- const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!HAS_DSC(i915))
+ if (!HAS_DSC(display))
return false;
- if (DISPLAY_VER(i915) == 11 && cpu_transcoder == TRANSCODER_A)
+ if (DISPLAY_VER(display) == 11 && cpu_transcoder == TRANSCODER_A)
return false;
return true;
@@ -37,9 +36,9 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return true;
if (cpu_transcoder == TRANSCODER_EDP ||
@@ -48,7 +47,7 @@ static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
return false;
/* There's no pipe A DSC engine on ICL */
- drm_WARN_ON(&i915->drm, crtc->pipe == PIPE_A);
+ drm_WARN_ON(display->drm, crtc->pipe == PIPE_A);
return true;
}
@@ -66,6 +65,13 @@ intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf,
intel_lookup_range_max_qp(bpc, buf, bpp, vdsc_cfg->native_420);
}
+static int
+get_range_bpg_offset(int bpp_low, int offset_low, int bpp_high, int offset_high, int bpp)
+{
+ return offset_low + DIV_ROUND_UP((offset_high - offset_low) * (bpp - bpp_low),
+ (bpp_low - bpp_high));
+}
+
/*
* We are using the method provided in DSC 1.2a C-Model in codec_main.c
* Above method use a common formula to derive values for any combination of DSC
@@ -83,7 +89,7 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
int qp_bpc_modifier = (bpc - 8) * 2;
int uncompressed_bpg_rate;
int first_line_bpg_offset;
- u32 res, buf_i, bpp_i;
+ u32 buf_i, bpp_i;
if (vdsc_cfg->slice_height >= 8)
first_line_bpg_offset =
@@ -99,7 +105,7 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
* According to DSC 1.2 spec in Section 4.1 if native_420 is set:
* -second_line_bpg_offset is 12 in general and equal to 2*(slice_height-1) if slice
* height < 8.
- * -second_line_offset_adj is 512 as shown by emperical values to yield best chroma
+ * -second_line_offset_adj is 512 as shown by empirical values to yield best chroma
* preservation in second line.
* -nsl_bpg_offset is calculated as second_line_offset/slice_height -1 then rounded
* up to 16 fractional bits, we left shift second line offset by 11 to preserve 11
@@ -117,7 +123,6 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
vdsc_cfg->slice_height - 1);
}
- /* Our hw supports only 444 modes as of today */
if (bpp >= 12)
vdsc_cfg->initial_offset = 2048;
else if (bpp >= 10)
@@ -163,23 +168,19 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
intel_vdsc_set_min_max_qp(vdsc_cfg, buf_i, bpp_i);
/* Calculate range_bpg_offset */
- if (bpp <= 8) {
+ if (bpp <= 8)
range_bpg_offset = ofs_und4[buf_i];
- } else if (bpp <= 10) {
- res = DIV_ROUND_UP(((bpp - 8) *
- (ofs_und5[buf_i] - ofs_und4[buf_i])), 2);
- range_bpg_offset = ofs_und4[buf_i] + res;
- } else if (bpp <= 12) {
- res = DIV_ROUND_UP(((bpp - 10) *
- (ofs_und6[buf_i] - ofs_und5[buf_i])), 2);
- range_bpg_offset = ofs_und5[buf_i] + res;
- } else if (bpp <= 16) {
- res = DIV_ROUND_UP(((bpp - 12) *
- (ofs_und8[buf_i] - ofs_und6[buf_i])), 4);
- range_bpg_offset = ofs_und6[buf_i] + res;
- } else {
+ else if (bpp <= 10)
+ range_bpg_offset = get_range_bpg_offset(8, ofs_und4[buf_i],
+ 10, ofs_und5[buf_i], bpp);
+ else if (bpp <= 12)
+ range_bpg_offset = get_range_bpg_offset(10, ofs_und5[buf_i],
+ 12, ofs_und6[buf_i], bpp);
+ else if (bpp <= 16)
+ range_bpg_offset = get_range_bpg_offset(12, ofs_und6[buf_i],
+ 16, ofs_und8[buf_i], bpp);
+ else
range_bpg_offset = ofs_und8[buf_i];
- }
vdsc_cfg->rc_range_params[buf_i].range_bpg_offset =
range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
@@ -215,21 +216,19 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
intel_vdsc_set_min_max_qp(vdsc_cfg, buf_i, bpp_i);
/* Calculate range_bpg_offset */
- if (bpp <= 6) {
+ if (bpp <= 6)
range_bpg_offset = ofs_und6[buf_i];
- } else if (bpp <= 8) {
- res = DIV_ROUND_UP(((bpp - 6) *
- (ofs_und8[buf_i] - ofs_und6[buf_i])), 2);
- range_bpg_offset = ofs_und6[buf_i] + res;
- } else if (bpp <= 12) {
- range_bpg_offset = ofs_und8[buf_i];
- } else if (bpp <= 15) {
- res = DIV_ROUND_UP(((bpp - 12) *
- (ofs_und15[buf_i] - ofs_und12[buf_i])), 3);
- range_bpg_offset = ofs_und12[buf_i] + res;
- } else {
+ else if (bpp <= 8)
+ range_bpg_offset = get_range_bpg_offset(6, ofs_und6[buf_i],
+ 8, ofs_und8[buf_i], bpp);
+ else if (bpp <= 12)
+ range_bpg_offset = get_range_bpg_offset(8, ofs_und8[buf_i],
+ 12, ofs_und12[buf_i], bpp);
+ else if (bpp <= 15)
+ range_bpg_offset = get_range_bpg_offset(12, ofs_und12[buf_i],
+ 15, ofs_und15[buf_i], bpp);
+ else
range_bpg_offset = ofs_und15[buf_i];
- }
vdsc_cfg->rc_range_params[buf_i].range_bpg_offset =
range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
@@ -262,8 +261,7 @@ static int intel_dsc_slice_dimensions_valid(struct intel_crtc_state *pipe_config
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(pipe_config);
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
u16 compressed_bpp = fxp_q4_to_int(pipe_config->dsc.compressed_bpp_x16);
int err;
@@ -276,7 +274,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
err = intel_dsc_slice_dimensions_valid(pipe_config, vdsc_cfg);
if (err) {
- drm_dbg_kms(&dev_priv->drm, "Slice dimension requirements not met\n");
+ drm_dbg_kms(display->drm, "Slice dimension requirements not met\n");
return err;
}
@@ -287,7 +285,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
vdsc_cfg->convert_rgb = pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420 &&
pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR444;
- if (DISPLAY_VER(dev_priv) >= 14 &&
+ if (DISPLAY_VER(display) >= 14 &&
pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
vdsc_cfg->native_420 = true;
/* We do not support YcBCr422 as of now */
@@ -308,7 +306,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
if (vdsc_cfg->bits_per_component < 8) {
- drm_dbg_kms(&dev_priv->drm, "DSC bpc requirements not met bpc: %d\n",
+ drm_dbg_kms(display->drm, "DSC bpc requirements not met bpc: %d\n",
vdsc_cfg->bits_per_component);
return -EINVAL;
}
@@ -320,7 +318,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
* upto uncompressed bpp-1, hence add calculations for all the rc
* parameters
*/
- if (DISPLAY_VER(dev_priv) >= 13) {
+ if (DISPLAY_VER(display) >= 13) {
calculate_rc_params(vdsc_cfg);
} else {
if ((compressed_bpp == 8 ||
@@ -356,7 +354,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
enum intel_display_power_domain
intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
/*
@@ -370,7 +368,8 @@ intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
* the pipe in use. Hence another reference on the pipe power domain
* will suffice. (Except no VDSC/joining on ICL pipe A.)
*/
- if (DISPLAY_VER(i915) == 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A)
+ if (DISPLAY_VER(display) == 12 && !display->platform.rocketlake &&
+ pipe == PIPE_A)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
else if (is_pipe_dsc(crtc, cpu_transcoder))
return POWER_DOMAIN_PIPE(pipe);
@@ -416,26 +415,25 @@ static void intel_dsc_get_pps_reg(const struct intel_crtc_state *crtc_state, int
static void intel_dsc_pps_write(const struct intel_crtc_state *crtc_state,
int pps, u32 pps_val)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
i915_reg_t dsc_reg[3];
int i, vdsc_per_pipe, dsc_reg_num;
vdsc_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
dsc_reg_num = min_t(int, ARRAY_SIZE(dsc_reg), vdsc_per_pipe);
- drm_WARN_ON_ONCE(&i915->drm, dsc_reg_num < vdsc_per_pipe);
+ drm_WARN_ON_ONCE(display->drm, dsc_reg_num < vdsc_per_pipe);
intel_dsc_get_pps_reg(crtc_state, pps, dsc_reg, dsc_reg_num);
for (i = 0; i < dsc_reg_num; i++)
- intel_de_write(i915, dsc_reg[i], pps_val);
+ intel_de_write(display, dsc_reg[i], pps_val);
}
static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
@@ -529,7 +527,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
vdsc_cfg->slice_height);
intel_dsc_pps_write(crtc_state, 16, pps_val);
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
/* PPS 17 */
pps_val = DSC_PPS17_SL_BPG_OFFSET(vdsc_cfg->second_line_bpg_offset);
intel_dsc_pps_write(crtc_state, 17, pps_val);
@@ -547,44 +545,44 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
(u32)(vdsc_cfg->rc_buf_thresh[i] <<
BITS_PER_BYTE * (i % 4));
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
- intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0,
+ intel_de_write(display, DSCA_RC_BUF_THRESH_0,
rc_buf_thresh_dword[0]);
- intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0_UDW,
+ intel_de_write(display, DSCA_RC_BUF_THRESH_0_UDW,
rc_buf_thresh_dword[1]);
- intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1,
+ intel_de_write(display, DSCA_RC_BUF_THRESH_1,
rc_buf_thresh_dword[2]);
- intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1_UDW,
+ intel_de_write(display, DSCA_RC_BUF_THRESH_1_UDW,
rc_buf_thresh_dword[3]);
if (vdsc_instances_per_pipe > 1) {
- intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0,
+ intel_de_write(display, DSCC_RC_BUF_THRESH_0,
rc_buf_thresh_dword[0]);
- intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0_UDW,
+ intel_de_write(display, DSCC_RC_BUF_THRESH_0_UDW,
rc_buf_thresh_dword[1]);
- intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1,
+ intel_de_write(display, DSCC_RC_BUF_THRESH_1,
rc_buf_thresh_dword[2]);
- intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1_UDW,
+ intel_de_write(display, DSCC_RC_BUF_THRESH_1_UDW,
rc_buf_thresh_dword[3]);
}
} else {
- intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0(pipe),
+ intel_de_write(display, ICL_DSC0_RC_BUF_THRESH_0(pipe),
rc_buf_thresh_dword[0]);
- intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+ intel_de_write(display, ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
rc_buf_thresh_dword[1]);
- intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1(pipe),
+ intel_de_write(display, ICL_DSC0_RC_BUF_THRESH_1(pipe),
rc_buf_thresh_dword[2]);
- intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+ intel_de_write(display, ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
rc_buf_thresh_dword[3]);
if (vdsc_instances_per_pipe > 1) {
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_BUF_THRESH_0(pipe),
rc_buf_thresh_dword[0]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
rc_buf_thresh_dword[1]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_BUF_THRESH_1(pipe),
rc_buf_thresh_dword[2]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
rc_buf_thresh_dword[3]);
}
@@ -601,88 +599,88 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
(vdsc_cfg->rc_range_params[i].range_min_qp <<
RC_MIN_QP_SHIFT)) << 16 * (i % 2));
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0_UDW,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_0_UDW,
rc_range_params_dword[1]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_1,
rc_range_params_dword[2]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1_UDW,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_1_UDW,
rc_range_params_dword[3]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_2,
rc_range_params_dword[4]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2_UDW,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_2_UDW,
rc_range_params_dword[5]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_3,
rc_range_params_dword[6]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3_UDW,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_3_UDW,
rc_range_params_dword[7]);
if (vdsc_instances_per_pipe > 1) {
- intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_0,
+ intel_de_write(display, DSCC_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
DSCC_RC_RANGE_PARAMETERS_0_UDW,
rc_range_params_dword[1]);
- intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_1,
+ intel_de_write(display, DSCC_RC_RANGE_PARAMETERS_1,
rc_range_params_dword[2]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
DSCC_RC_RANGE_PARAMETERS_1_UDW,
rc_range_params_dword[3]);
- intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_2,
+ intel_de_write(display, DSCC_RC_RANGE_PARAMETERS_2,
rc_range_params_dword[4]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
DSCC_RC_RANGE_PARAMETERS_2_UDW,
rc_range_params_dword[5]);
- intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_3,
+ intel_de_write(display, DSCC_RC_RANGE_PARAMETERS_3,
rc_range_params_dword[6]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
DSCC_RC_RANGE_PARAMETERS_3_UDW,
rc_range_params_dword[7]);
}
} else {
- intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+ intel_de_write(display, ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
rc_range_params_dword[0]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
rc_range_params_dword[1]);
- intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+ intel_de_write(display, ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
rc_range_params_dword[2]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
rc_range_params_dword[3]);
- intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+ intel_de_write(display, ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
rc_range_params_dword[4]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
rc_range_params_dword[5]);
- intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+ intel_de_write(display, ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
rc_range_params_dword[6]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
rc_range_params_dword[7]);
if (vdsc_instances_per_pipe > 1) {
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
rc_range_params_dword[0]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
rc_range_params_dword[1]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
rc_range_params_dword[2]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
rc_range_params_dword[3]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
rc_range_params_dword[4]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
rc_range_params_dword[5]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
rc_range_params_dword[6]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
rc_range_params_dword[7]);
}
@@ -746,8 +744,8 @@ static i915_reg_t dss_ctl2_reg(struct intel_crtc *crtc, enum transcoder cpu_tran
void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dss_ctl1_val = 0;
if (crtc_state->joiner_pipes && !crtc_state->dsc.compression_enable) {
@@ -756,14 +754,15 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
else
dss_ctl1_val |= UNCOMPRESSED_JOINER_PRIMARY;
- intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
+ intel_de_write(display, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder),
+ dss_ctl1_val);
}
}
void intel_dsc_enable(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dss_ctl1_val = 0;
u32 dss_ctl2_val = 0;
int vdsc_instances_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
@@ -796,28 +795,27 @@ void intel_dsc_enable(const struct intel_crtc_state *crtc_state)
if (intel_crtc_is_bigjoiner_primary(crtc_state))
dss_ctl1_val |= PRIMARY_BIG_JOINER_ENABLE;
}
- intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
- intel_de_write(dev_priv, dss_ctl2_reg(crtc, crtc_state->cpu_transcoder), dss_ctl2_val);
+ intel_de_write(display, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
+ intel_de_write(display, dss_ctl2_reg(crtc, crtc_state->cpu_transcoder), dss_ctl2_val);
}
void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(old_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* Disable only if either of them is enabled */
if (old_crtc_state->dsc.compression_enable ||
old_crtc_state->joiner_pipes) {
- intel_de_write(dev_priv, dss_ctl1_reg(crtc, old_crtc_state->cpu_transcoder), 0);
- intel_de_write(dev_priv, dss_ctl2_reg(crtc, old_crtc_state->cpu_transcoder), 0);
+ intel_de_write(display, dss_ctl1_reg(crtc, old_crtc_state->cpu_transcoder), 0);
+ intel_de_write(display, dss_ctl2_reg(crtc, old_crtc_state->cpu_transcoder), 0);
}
}
static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
bool *all_equal)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
i915_reg_t dsc_reg[3];
int i, vdsc_per_pipe, dsc_reg_num;
u32 val;
@@ -825,16 +823,16 @@ static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
vdsc_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
dsc_reg_num = min_t(int, ARRAY_SIZE(dsc_reg), vdsc_per_pipe);
- drm_WARN_ON_ONCE(&i915->drm, dsc_reg_num < vdsc_per_pipe);
+ drm_WARN_ON_ONCE(display->drm, dsc_reg_num < vdsc_per_pipe);
intel_dsc_get_pps_reg(crtc_state, pps, dsc_reg, dsc_reg_num);
*all_equal = true;
- val = intel_de_read(i915, dsc_reg[0]);
+ val = intel_de_read(display, dsc_reg[0]);
for (i = 1; i < dsc_reg_num; i++) {
- if (intel_de_read(i915, dsc_reg[i]) != val) {
+ if (intel_de_read(display, dsc_reg[i]) != val) {
*all_equal = false;
break;
}
@@ -845,22 +843,20 @@ static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
static u32 intel_dsc_pps_read_and_verify(struct intel_crtc_state *crtc_state, int pps)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 val;
bool all_equal;
val = intel_dsc_pps_read(crtc_state, pps, &all_equal);
- drm_WARN_ON(&i915->drm, !all_equal);
+ drm_WARN_ON(display->drm, !all_equal);
return val;
}
static void intel_dsc_get_pps_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
u32 pps_temp;
@@ -946,7 +942,7 @@ static void intel_dsc_get_pps_config(struct intel_crtc_state *crtc_state)
vdsc_cfg->slice_chunk_size = REG_FIELD_GET(DSC_PPS16_SLICE_CHUNK_SIZE_MASK, pps_temp);
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
/* PPS 17 */
pps_temp = intel_dsc_pps_read_and_verify(crtc_state, 17);
@@ -962,8 +958,8 @@ static void intel_dsc_get_pps_config(struct intel_crtc_state *crtc_state)
void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -974,12 +970,12 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
power_domain = intel_dsc_power_domain(crtc, cpu_transcoder);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return;
- dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, cpu_transcoder));
- dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc, cpu_transcoder));
+ dss_ctl1 = intel_de_read(display, dss_ctl1_reg(crtc, cpu_transcoder));
+ dss_ctl2 = intel_de_read(display, dss_ctl2_reg(crtc, cpu_transcoder));
crtc_state->dsc.compression_enable = dss_ctl2 & VDSC0_ENABLE;
if (!crtc_state->dsc.compression_enable)
@@ -994,7 +990,7 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
intel_dsc_get_pps_config(crtc_state);
out:
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
}
static void intel_vdsc_dump_state(struct drm_printer *p, int indent,
@@ -1019,8 +1015,7 @@ void intel_vdsc_state_dump(struct drm_printer *p, int indent,
int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
+ struct intel_display *display = to_intel_display(crtc_state);
int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
int min_cdclk;
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index fd18dd07ae49..684b5d1bc87c 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -59,7 +59,6 @@ void intel_vga_redisable_power_on(struct intel_display *display)
void intel_vga_redisable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
/*
@@ -71,13 +70,13 @@ void intel_vga_redisable(struct intel_display *display)
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses.
*/
- wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_VGA);
+ wakeref = intel_display_power_get_if_enabled(display, POWER_DOMAIN_VGA);
if (!wakeref)
return;
intel_vga_redisable_power_on(display);
- intel_display_power_put(i915, POWER_DOMAIN_VGA, wakeref);
+ intel_display_power_put(display, POWER_DOMAIN_VGA, wakeref);
}
void intel_vga_reset_io_mem(struct intel_display *display)
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 70088e355055..cac49319026d 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -7,9 +7,9 @@
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_vrr.h"
#include "intel_vrr_regs.h"
-#include "intel_dp.h"
#define FIXED_POINT_PRECISION 100
#define CMRR_PRECISION_TOLERANCE 10
@@ -75,6 +75,46 @@ intel_vrr_check_modeset(struct intel_atomic_state *state)
}
}
+static int intel_vrr_real_vblank_delay(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->hw.adjusted_mode.crtc_vblank_start -
+ crtc_state->hw.adjusted_mode.crtc_vdisplay;
+}
+
+static int intel_vrr_extra_vblank_delay(struct intel_display *display)
+{
+ /*
+ * On ICL/TGL VRR hardware inserts one extra scanline
+ * just after vactive, which pushes the vmin decision
+ * boundary ahead accordingly. We'll include the extra
+ * scanline in our vblank delay estimates to make sure
+ * that we never underestimate how long we have until
+ * the delayed vblank has passed.
+ */
+ return DISPLAY_VER(display) < 13 ? 1 : 0;
+}
+
+int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return intel_vrr_real_vblank_delay(crtc_state) +
+ intel_vrr_extra_vblank_delay(display);
+}
+
+static int intel_vrr_flipline_offset(struct intel_display *display)
+{
+ /* ICL/TGL hardware imposes flipline>=vmin+1 */
+ return DISPLAY_VER(display) < 13 ? 1 : 0;
+}
+
+static int intel_vrr_vmin_flipline(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return crtc_state->vrr.vmin + intel_vrr_flipline_offset(display);
+}
+
/*
* Without VRR registers get latched at:
* vblank_start
@@ -98,19 +138,41 @@ static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_stat
if (DISPLAY_VER(display) >= 13)
return crtc_state->vrr.guardband;
else
- /* The hw imposes the extra scanline before frame start */
+ /* hardware imposes one extra scanline somewhere */
return crtc_state->vrr.pipeline_full + crtc_state->framestart_delay + 1;
}
+int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ /* Min vblank actually determined by flipline */
+ if (DISPLAY_VER(display) >= 13)
+ return intel_vrr_vmin_flipline(crtc_state);
+ else
+ return intel_vrr_vmin_flipline(crtc_state) +
+ intel_vrr_real_vblank_delay(crtc_state);
+}
+
+int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (DISPLAY_VER(display) >= 13)
+ return crtc_state->vrr.vmax;
+ else
+ return crtc_state->vrr.vmax +
+ intel_vrr_real_vblank_delay(crtc_state);
+}
+
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
{
- /* Min vblank actually determined by flipline that is always >=vmin+1 */
- return crtc_state->vrr.vmin + 1 - intel_vrr_vblank_exit_length(crtc_state);
+ return intel_vrr_vmin_vtotal(crtc_state) - intel_vrr_vblank_exit_length(crtc_state);
}
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->vrr.vmax - intel_vrr_vblank_exit_length(crtc_state);
+ return intel_vrr_vmax_vtotal(crtc_state) - intel_vrr_vblank_exit_length(crtc_state);
}
static bool
@@ -202,15 +264,17 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
if (vmin >= vmax)
return;
+ crtc_state->vrr.vmin = vmin;
+ crtc_state->vrr.vmax = vmax;
+
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+
/*
* flipline determines the min vblank length the hardware will
- * generate, and flipline>=vmin+1, hence we reduce vmin by one
- * to make sure we can get the actual min vblank length.
+ * generate, and on ICL/TGL flipline>=vmin+1, hence we reduce
+ * vmin by one to make sure we can get the actual min vblank length.
*/
- crtc_state->vrr.vmin = vmin - 1;
- crtc_state->vrr.vmax = vmax;
-
- crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1;
+ crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display);
/*
* When panel is VRR capable and userspace has
@@ -235,7 +299,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
- if (intel_dp->as_sdp_supported && crtc_state->vrr.enable) {
+ if (HAS_AS_SDP(display)) {
crtc_state->vrr.vsync_start =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
crtc_state->hw.adjusted_mode.vsync_start);
@@ -255,11 +319,20 @@ void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(display) >= 13) {
crtc_state->vrr.guardband =
- crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start;
+ crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start;
} else {
+ /* hardware imposes one extra scanline somewhere */
crtc_state->vrr.pipeline_full =
min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start -
crtc_state->framestart_delay - 1);
+
+ /*
+ * vmin/vmax/flipline also need to be adjusted by
+ * the vblank delay to maintain correct vtotals.
+ */
+ crtc_state->vrr.vmin -= intel_vrr_real_vblank_delay(crtc_state);
+ crtc_state->vrr.vmax -= intel_vrr_real_vblank_delay(crtc_state);
+ crtc_state->vrr.flipline -= intel_vrr_real_vblank_delay(crtc_state);
}
}
@@ -315,9 +388,16 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
trans_vrr_ctl(crtc_state));
intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
crtc_state->vrr.flipline - 1);
+
+ if (HAS_AS_SDP(display))
+ intel_de_write(display,
+ TRANS_VRR_VSYNC(display, cpu_transcoder),
+ VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
+ VRR_VSYNC_START(crtc_state->vrr.vsync_start));
}
-void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
+void intel_vrr_send_push(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -325,8 +405,49 @@ void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
if (!crtc_state->vrr.enable)
return;
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
- TRANS_PUSH_EN | TRANS_PUSH_SEND);
+ if (dsb)
+ intel_dsb_nonpost_start(dsb);
+
+ intel_de_write_dsb(display, dsb,
+ TRANS_PUSH(display, cpu_transcoder),
+ TRANS_PUSH_EN | TRANS_PUSH_SEND);
+
+ if (dsb)
+ intel_dsb_nonpost_end(dsb);
+}
+
+void intel_vrr_check_push_sent(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ /*
+ * Make sure the push send bit has cleared. This should
+ * already be the case as long as the caller makes sure
+ * this is called after the delayed vblank has occurred.
+ */
+ if (dsb) {
+ int wait_us, count;
+
+ wait_us = 2;
+ count = 1;
+
+ /*
+ * If the bit hasn't cleared the DSB will
+ * raise the poll error interrupt.
+ */
+ intel_dsb_poll(dsb, TRANS_PUSH(display, cpu_transcoder),
+ TRANS_PUSH_SEND, 0, wait_us, count);
+ } else {
+ if (intel_vrr_is_push_sent(crtc_state))
+ drm_err(display->drm, "[CRTC:%d:%s] VRR push send still pending\n",
+ crtc->base.base.id, crtc->base.name);
+ }
}
bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
@@ -351,12 +472,6 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
TRANS_PUSH_EN);
- if (HAS_AS_SDP(display))
- intel_de_write(display,
- TRANS_VRR_VSYNC(display, cpu_transcoder),
- VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
- VRR_VSYNC_START(crtc_state->vrr.vsync_start));
-
if (crtc_state->cmrr.enable) {
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
@@ -381,10 +496,6 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
TRANS_VRR_STATUS(display, cpu_transcoder),
VRR_STATUS_VRR_EN_LIVE, 1000);
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
-
- if (HAS_AS_SDP(display))
- intel_de_write(display,
- TRANS_VRR_VSYNC(display, cpu_transcoder), 0);
}
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
@@ -424,10 +535,6 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
TRANS_VRR_VMAX(display, cpu_transcoder)) + 1;
crtc_state->vrr.vmin = intel_de_read(display,
TRANS_VRR_VMIN(display, cpu_transcoder)) + 1;
- }
-
- if (crtc_state->vrr.enable) {
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
if (HAS_AS_SDP(display)) {
trans_vrr_vsync =
@@ -439,4 +546,7 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
REG_FIELD_GET(VRR_VSYNC_END_MASK, trans_vrr_vsync);
}
}
+
+ if (crtc_state->vrr.enable)
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index b3b45c675020..514822577e8a 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -12,6 +12,7 @@ struct drm_connector_state;
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
+struct intel_dsb;
bool intel_vrr_is_capable(struct intel_connector *connector);
bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh);
@@ -22,11 +23,17 @@ void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state);
void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
void intel_vrr_enable(const struct intel_crtc_state *crtc_state);
-void intel_vrr_send_push(const struct intel_crtc_state *crtc_state);
+void intel_vrr_send_push(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state);
+void intel_vrr_check_push_sent(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state);
bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state);
void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
void intel_vrr_get_config(struct intel_crtc_state *crtc_state);
+int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state);
+int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
+int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
index d7dc49aecd27..f00f4cfc58e5 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.c
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -108,6 +108,12 @@ void intel_wm_get_hw_state(struct drm_i915_private *i915)
return i915->display.funcs.wm->get_hw_state(i915);
}
+void intel_wm_sanitize(struct drm_i915_private *i915)
+{
+ if (i915->display.funcs.wm->sanitize)
+ return i915->display.funcs.wm->sanitize(i915);
+}
+
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h
index e97cdca89a5c..7d3a447054b3 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.h
+++ b/drivers/gpu/drm/i915/display/intel_wm.h
@@ -25,6 +25,7 @@ void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_compute_global_watermarks(struct intel_atomic_state *state);
void intel_wm_get_hw_state(struct drm_i915_private *i915);
+void intel_wm_sanitize(struct drm_i915_private *i915);
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_print_wm_latency(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index ae21fce534dc..ee81220a7c88 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "skl_scaler.h"
@@ -64,7 +65,7 @@ static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
/*
* Hardware initial phase limited to [-0.5:1.5].
* Since the max hardware scale factor is 3.0, we
- * should never actually excdeed 1.0 here.
+ * should never actually exceed 1.0 here.
*/
WARN_ON(phase < -0x8000 || phase > 0x18000);
@@ -76,28 +77,60 @@ static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
return ((phase >> 2) & PS_PHASE_MASK) | trip;
}
-#define SKL_MIN_SRC_W 8
-#define SKL_MAX_SRC_W 4096
-#define SKL_MIN_SRC_H 8
-#define SKL_MAX_SRC_H 4096
-#define SKL_MIN_DST_W 8
-#define SKL_MAX_DST_W 4096
-#define SKL_MIN_DST_H 8
-#define SKL_MAX_DST_H 4096
-#define ICL_MAX_SRC_W 5120
-#define ICL_MAX_SRC_H 4096
-#define ICL_MAX_DST_W 5120
-#define ICL_MAX_DST_H 4096
-#define TGL_MAX_SRC_W 5120
-#define TGL_MAX_SRC_H 8192
-#define TGL_MAX_DST_W 8192
-#define TGL_MAX_DST_H 8192
-#define MTL_MAX_SRC_W 4096
-#define MTL_MAX_SRC_H 8192
-#define MTL_MAX_DST_W 8192
-#define MTL_MAX_DST_H 8192
-#define SKL_MIN_YUV_420_SRC_W 16
-#define SKL_MIN_YUV_420_SRC_H 16
+static void skl_scaler_min_src_size(const struct drm_format_info *format,
+ u64 modifier, int *min_w, int *min_h)
+{
+ if (format && intel_format_info_is_yuv_semiplanar(format, modifier)) {
+ *min_w = 16;
+ *min_h = 16;
+ } else {
+ *min_w = 8;
+ *min_h = 8;
+ }
+}
+
+static void skl_scaler_max_src_size(struct intel_crtc *crtc,
+ int *max_w, int *max_h)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ if (DISPLAY_VER(display) >= 14) {
+ *max_w = 4096;
+ *max_h = 8192;
+ } else if (DISPLAY_VER(display) >= 12) {
+ *max_w = 5120;
+ *max_h = 8192;
+ } else if (DISPLAY_VER(display) == 11) {
+ *max_w = 5120;
+ *max_h = 4096;
+ } else {
+ *max_w = 4096;
+ *max_h = 4096;
+ }
+}
+
+static void skl_scaler_min_dst_size(int *min_w, int *min_h)
+{
+ *min_w = 8;
+ *min_h = 8;
+}
+
+static void skl_scaler_max_dst_size(struct intel_crtc *crtc,
+ int *max_w, int *max_h)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ if (DISPLAY_VER(display) >= 12) {
+ *max_w = 8192;
+ *max_h = 8192;
+ } else if (DISPLAY_VER(display) == 11) {
+ *max_w = 5120;
+ *max_h = 4096;
+ } else {
+ *max_w = 4096;
+ *max_h = 4096;
+ }
+}
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
@@ -134,7 +167,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
if (DISPLAY_VER(display) >= 9 && crtc_state->hw.enable &&
need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
drm_dbg_kms(display->drm,
- "Pipe/Plane scaling not supported with IF-ID mode\n");
+ "[CRTC:%d:%s] scaling not supported with IF-ID mode\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
@@ -154,8 +188,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
scaler_state->scalers[*scaler_id].in_use = false;
drm_dbg_kms(display->drm,
- "scaler_user index %u.%u: "
+ "[CRTC:%d:%s] scaler_user index %u.%u: "
"Staged freeing scaler id %d scaler_users = 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
crtc->pipe, scaler_user, *scaler_id,
scaler_state->scaler_users);
*scaler_id = -1;
@@ -163,39 +198,11 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
- (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
- drm_dbg_kms(display->drm,
- "Planar YUV: src dimensions not met\n");
- return -EINVAL;
- }
+ skl_scaler_min_src_size(format, modifier, &min_src_w, &min_src_h);
+ skl_scaler_max_src_size(crtc, &max_src_w, &max_src_h);
- min_src_w = SKL_MIN_SRC_W;
- min_src_h = SKL_MIN_SRC_H;
- min_dst_w = SKL_MIN_DST_W;
- min_dst_h = SKL_MIN_DST_H;
-
- if (DISPLAY_VER(display) < 11) {
- max_src_w = SKL_MAX_SRC_W;
- max_src_h = SKL_MAX_SRC_H;
- max_dst_w = SKL_MAX_DST_W;
- max_dst_h = SKL_MAX_DST_H;
- } else if (DISPLAY_VER(display) < 12) {
- max_src_w = ICL_MAX_SRC_W;
- max_src_h = ICL_MAX_SRC_H;
- max_dst_w = ICL_MAX_DST_W;
- max_dst_h = ICL_MAX_DST_H;
- } else if (DISPLAY_VER(display) < 14) {
- max_src_w = TGL_MAX_SRC_W;
- max_src_h = TGL_MAX_SRC_H;
- max_dst_w = TGL_MAX_DST_W;
- max_dst_h = TGL_MAX_DST_H;
- } else {
- max_src_w = MTL_MAX_SRC_W;
- max_src_h = MTL_MAX_SRC_H;
- max_dst_w = MTL_MAX_DST_W;
- max_dst_h = MTL_MAX_DST_H;
- }
+ skl_scaler_min_dst_size(&min_dst_w, &min_dst_h);
+ skl_scaler_max_dst_size(crtc, &max_dst_w, &max_dst_h);
/* range checks */
if (src_w < min_src_w || src_h < min_src_h ||
@@ -203,8 +210,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
src_w > max_src_w || src_h > max_src_h ||
dst_w > max_dst_w || dst_h > max_dst_h) {
drm_dbg_kms(display->drm,
- "scaler_user index %u.%u: src %ux%u dst %ux%u "
+ "[CRTC:%d:%s] scaler_user index %u.%u: src %ux%u dst %ux%u "
"size is out of scaler range\n",
+ crtc->base.base.id, crtc->base.name,
crtc->pipe, scaler_user, src_w, src_h,
dst_w, dst_h);
return -EINVAL;
@@ -220,16 +228,18 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
if (pipe_src_w > max_dst_w || pipe_src_h > max_dst_h) {
drm_dbg_kms(display->drm,
- "scaler_user index %u.%u: pipe src size %ux%u "
+ "[CRTC:%d:%s] scaler_user index %u.%u: pipe src size %ux%u "
"is out of scaler range\n",
+ crtc->base.base.id, crtc->base.name,
crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
return -EINVAL;
}
/* mark this plane as a scaler user in crtc_state */
scaler_state->scaler_users |= (1 << scaler_user);
- drm_dbg_kms(display->drm, "scaler_user index %u.%u: "
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] scaler_user index %u.%u: "
"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
scaler_state->scaler_users);
@@ -269,14 +279,14 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_framebuffer *fb = plane_state->hw.fb;
bool force_detach = !fb || !plane_state->uapi.visible;
bool need_scaler = false;
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
- if (!icl_is_hdr_plane(dev_priv, plane->id) &&
+ if (!icl_is_hdr_plane(display, plane->id) &&
fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
need_scaler = true;
@@ -309,15 +319,55 @@ static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state,
return -1;
}
-static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+static void
+calculate_max_scale(struct intel_crtc *crtc,
+ bool is_yuv_semiplanar,
+ int scaler_id,
+ int *max_hscale, int *max_vscale)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ /*
+ * FIXME: When two scalers are needed, but only one of
+ * them needs to downscale, we should make sure that
+ * the one that needs downscaling support is assigned
+ * as the first scaler, so we don't reject downscaling
+ * unnecessarily.
+ */
+
+ if (DISPLAY_VER(display) >= 14) {
+ /*
+ * On versions 14 and up, only the first
+ * scaler supports a vertical scaling factor
+ * of more than 1.0, while a horizontal
+ * scaling factor of 3.0 is supported.
+ */
+ *max_hscale = 0x30000 - 1;
+
+ if (scaler_id == 0)
+ *max_vscale = 0x30000 - 1;
+ else
+ *max_vscale = 0x10000;
+ } else if (DISPLAY_VER(display) >= 10 || !is_yuv_semiplanar) {
+ *max_hscale = 0x30000 - 1;
+ *max_vscale = 0x30000 - 1;
+ } else {
+ *max_hscale = 0x20000 - 1;
+ *max_vscale = 0x20000 - 1;
+ }
+}
+
+static int intel_atomic_setup_scaler(struct intel_crtc_state *crtc_state,
int num_scalers_need, struct intel_crtc *crtc,
const char *name, int idx,
struct intel_plane_state *plane_state,
int *scaler_id)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
u32 mode;
+ int hscale = 0;
+ int vscale = 0;
if (*scaler_id < 0)
*scaler_id = intel_allocate_scaler(scaler_state, crtc);
@@ -334,7 +384,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
if (DISPLAY_VER(display) == 9) {
mode = SKL_PS_SCALER_MODE_NV12;
- } else if (icl_is_hdr_plane(dev_priv, plane->id)) {
+ } else if (icl_is_hdr_plane(display, plane->id)) {
/*
* On gen11+'s HDR planes we only use the scaler for
* scaling. They have a dedicated chroma upsampler, so
@@ -366,45 +416,15 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
mode = SKL_PS_SCALER_MODE_DYN;
}
- /*
- * FIXME: we should also check the scaler factors for pfit, so
- * this shouldn't be tied directly to planes.
- */
if (plane_state && plane_state->hw.fb) {
const struct drm_framebuffer *fb = plane_state->hw.fb;
const struct drm_rect *src = &plane_state->uapi.src;
const struct drm_rect *dst = &plane_state->uapi.dst;
- int hscale, vscale, max_vscale, max_hscale;
-
- /*
- * FIXME: When two scalers are needed, but only one of
- * them needs to downscale, we should make sure that
- * the one that needs downscaling support is assigned
- * as the first scaler, so we don't reject downscaling
- * unnecessarily.
- */
+ int max_hscale, max_vscale;
- if (DISPLAY_VER(display) >= 14) {
- /*
- * On versions 14 and up, only the first
- * scaler supports a vertical scaling factor
- * of more than 1.0, while a horizontal
- * scaling factor of 3.0 is supported.
- */
- max_hscale = 0x30000 - 1;
- if (*scaler_id == 0)
- max_vscale = 0x30000 - 1;
- else
- max_vscale = 0x10000;
-
- } else if (DISPLAY_VER(display) >= 10 ||
- !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
- max_hscale = 0x30000 - 1;
- max_vscale = 0x30000 - 1;
- } else {
- max_hscale = 0x20000 - 1;
- max_vscale = 0x20000 - 1;
- }
+ calculate_max_scale(crtc,
+ intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier),
+ *scaler_id, &max_hscale, &max_vscale);
/*
* FIXME: We should change the if-else block above to
@@ -417,8 +437,8 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
if (hscale < 0 || vscale < 0) {
drm_dbg_kms(display->drm,
- "Scaler %d doesn't support required plane scaling\n",
- *scaler_id);
+ "[CRTC:%d:%s] scaler %d doesn't support required plane scaling\n",
+ crtc->base.base.id, crtc->base.name, *scaler_id);
drm_rect_debug_print("src: ", src, true);
drm_rect_debug_print("dst: ", dst, false);
@@ -426,7 +446,48 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
}
}
- drm_dbg_kms(display->drm, "Attached scaler id %u.%u to %s:%d\n",
+ if (crtc_state->pch_pfit.enabled) {
+ struct drm_rect src;
+ int max_hscale, max_vscale;
+
+ drm_rect_init(&src, 0, 0,
+ drm_rect_width(&crtc_state->pipe_src) << 16,
+ drm_rect_height(&crtc_state->pipe_src) << 16);
+
+ calculate_max_scale(crtc, 0, *scaler_id,
+ &max_hscale, &max_vscale);
+
+ /*
+ * When configured for Pipe YUV 420 encoding for port output,
+ * limit downscaling to less than 1.5 (source/destination) in
+ * the horizontal direction and 1.0 in the vertical direction.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ max_hscale = 0x18000 - 1;
+ max_vscale = 0x10000;
+ }
+
+ hscale = drm_rect_calc_hscale(&src, &crtc_state->pch_pfit.dst,
+ 0, max_hscale);
+ vscale = drm_rect_calc_vscale(&src, &crtc_state->pch_pfit.dst,
+ 0, max_vscale);
+
+ if (hscale < 0 || vscale < 0) {
+ drm_dbg_kms(display->drm,
+ "Scaler %d doesn't support required pipe scaling\n",
+ *scaler_id);
+ drm_rect_debug_print("src: ", &src, true);
+ drm_rect_debug_print("dst: ", &crtc_state->pch_pfit.dst, false);
+
+ return -EINVAL;
+ }
+ }
+
+ scaler_state->scalers[*scaler_id].hscale = hscale;
+ scaler_state->scalers[*scaler_id].vscale = vscale;
+
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] attached scaler id %u.%u to %s:%d\n",
+ crtc->base.base.id, crtc->base.name,
crtc->pipe, *scaler_id, name, idx);
scaler_state->scalers[*scaler_id].mode = mode;
@@ -441,7 +502,7 @@ static int setup_crtc_scaler(struct intel_atomic_state *state,
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
- return intel_atomic_setup_scaler(scaler_state,
+ return intel_atomic_setup_scaler(crtc_state,
hweight32(scaler_state->scaler_users),
crtc, "CRTC", crtc->base.base.id,
NULL, &scaler_state->scaler_id);
@@ -476,7 +537,7 @@ static int setup_plane_scaler(struct intel_atomic_state *state,
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
- return intel_atomic_setup_scaler(scaler_state,
+ return intel_atomic_setup_scaler(crtc_state,
hweight32(scaler_state->scaler_users),
crtc, "PLANE", plane->base.base.id,
plane_state, &plane_state->scaler_id);
@@ -526,7 +587,8 @@ int intel_atomic_setup_scalers(struct intel_atomic_state *state,
/* fail if required scalers > available scalers */
if (num_scalers_need > crtc->num_scalers) {
drm_dbg_kms(display->drm,
- "Too many scaling requests %d > %d\n",
+ "[CRTC:%d:%s] too many scaling requests %d > %d\n",
+ crtc->base.base.id, crtc->base.name,
num_scalers_need, crtc->num_scalers);
return -EINVAL;
}
@@ -573,43 +635,45 @@ static u16 glk_nearest_filter_coef(int t)
* The letter represents the filter tap (D is the center tap) and the number
* represents the coefficient set for a phase (0-16).
*
- * +------------+------------------------+------------------------+
- * |Index value | Data value coeffient 1 | Data value coeffient 2 |
- * +------------+------------------------+------------------------+
- * | 00h | B0 | A0 |
- * +------------+------------------------+------------------------+
- * | 01h | D0 | C0 |
- * +------------+------------------------+------------------------+
- * | 02h | F0 | E0 |
- * +------------+------------------------+------------------------+
- * | 03h | A1 | G0 |
- * +------------+------------------------+------------------------+
- * | 04h | C1 | B1 |
- * +------------+------------------------+------------------------+
- * | ... | ... | ... |
- * +------------+------------------------+------------------------+
- * | 38h | B16 | A16 |
- * +------------+------------------------+------------------------+
- * | 39h | D16 | C16 |
- * +------------+------------------------+------------------------+
- * | 3Ah | F16 | C16 |
- * +------------+------------------------+------------------------+
- * | 3Bh | Reserved | G16 |
- * +------------+------------------------+------------------------+
+ * +------------+--------------------------+--------------------------+
+ * |Index value | Data value coefficient 1 | Data value coefficient 2 |
+ * +------------+--------------------------+--------------------------+
+ * | 00h | B0 | A0 |
+ * +------------+--------------------------+--------------------------+
+ * | 01h | D0 | C0 |
+ * +------------+--------------------------+--------------------------+
+ * | 02h | F0 | E0 |
+ * +------------+--------------------------+--------------------------+
+ * | 03h | A1 | G0 |
+ * +------------+--------------------------+--------------------------+
+ * | 04h | C1 | B1 |
+ * +------------+--------------------------+--------------------------+
+ * | ... | ... | ... |
+ * +------------+--------------------------+--------------------------+
+ * | 38h | B16 | A16 |
+ * +------------+--------------------------+--------------------------+
+ * | 39h | D16 | C16 |
+ * +------------+--------------------------+--------------------------+
+ * | 3Ah | F16 | C16 |
+ * +------------+--------------------------+--------------------------+
+ * | 3Bh | Reserved | G16 |
+ * +------------+--------------------------+--------------------------+
*
- * To enable nearest-neighbor scaling: program scaler coefficents with
+ * To enable nearest-neighbor scaling: program scaler coefficients with
* the center tap (Dxx) values set to 1 and all other values set to 0 as per
* SCALER_COEFFICIENT_FORMAT
*
*/
static void glk_program_nearest_filter_coefs(struct intel_display *display,
+ struct intel_dsb *dsb,
enum pipe pipe, int id, int set)
{
int i;
- intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(pipe, id, set),
- PS_COEF_INDEX_AUTO_INC);
+ intel_de_write_dsb(display, dsb,
+ GLK_PS_COEF_INDEX_SET(pipe, id, set),
+ PS_COEF_INDEX_AUTO_INC);
for (i = 0; i < 17 * 7; i += 2) {
u32 tmp;
@@ -621,11 +685,12 @@ static void glk_program_nearest_filter_coefs(struct intel_display *display,
t = glk_coef_tap(i + 1);
tmp |= glk_nearest_filter_coef(t) << 16;
- intel_de_write_fw(display, GLK_PS_COEF_DATA_SET(pipe, id, set),
- tmp);
+ intel_de_write_dsb(display, dsb,
+ GLK_PS_COEF_DATA_SET(pipe, id, set), tmp);
}
- intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
+ intel_de_write_dsb(display, dsb,
+ GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
}
static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
@@ -641,14 +706,15 @@ static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
return PS_FILTER_MEDIUM;
}
-static void skl_scaler_setup_filter(struct intel_display *display, enum pipe pipe,
+static void skl_scaler_setup_filter(struct intel_display *display,
+ struct intel_dsb *dsb, enum pipe pipe,
int id, int set, enum drm_scaling_filter filter)
{
switch (filter) {
case DRM_SCALING_FILTER_DEFAULT:
break;
case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
- glk_program_nearest_filter_coefs(display, pipe, id, set);
+ glk_program_nearest_filter_coefs(display, dsb, pipe, id, set);
break;
default:
MISSING_CASE(filter);
@@ -695,7 +761,9 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode |
skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
- skl_scaler_setup_filter(display, pipe, id, 0,
+ trace_intel_pipe_scaler_update_arm(crtc, id, x, y, width, height);
+
+ skl_scaler_setup_filter(display, NULL, pipe, id, 0,
crtc_state->hw.scaling_filter);
intel_de_write_fw(display, SKL_PS_CTRL(pipe, id), ps_ctrl);
@@ -711,12 +779,12 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
}
void
-skl_program_plane_scaler(struct intel_plane *plane,
+skl_program_plane_scaler(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
int scaler_id = plane_state->scaler_id;
@@ -740,7 +808,7 @@ skl_program_plane_scaler(struct intel_plane *plane,
/* TODO: handle sub-pixel coordinates */
if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
- !icl_is_hdr_plane(dev_priv, plane->id)) {
+ !icl_is_hdr_plane(display, plane->id)) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -759,33 +827,41 @@ skl_program_plane_scaler(struct intel_plane *plane,
ps_ctrl = PS_SCALER_EN | PS_BINDING_PLANE(plane->id) | scaler->mode |
skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
- skl_scaler_setup_filter(display, pipe, scaler_id, 0,
+ trace_intel_plane_scaler_update_arm(plane, scaler_id,
+ crtc_x, crtc_y, crtc_w, crtc_h);
+
+ skl_scaler_setup_filter(display, dsb, pipe, scaler_id, 0,
plane_state->hw.scaling_filter);
- intel_de_write_fw(display, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
- intel_de_write_fw(display, SKL_PS_VPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(display, SKL_PS_HPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, scaler_id),
- PS_WIN_XPOS(crtc_x) | PS_WIN_YPOS(crtc_y));
- intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, scaler_id),
- PS_WIN_XSIZE(crtc_w) | PS_WIN_YSIZE(crtc_h));
+ intel_de_write_dsb(display, dsb, SKL_PS_CTRL(pipe, scaler_id),
+ ps_ctrl);
+ intel_de_write_dsb(display, dsb, SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_dsb(display, dsb, SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_dsb(display, dsb, SKL_PS_WIN_POS(pipe, scaler_id),
+ PS_WIN_XPOS(crtc_x) | PS_WIN_YPOS(crtc_y));
+ intel_de_write_dsb(display, dsb, SKL_PS_WIN_SZ(pipe, scaler_id),
+ PS_WIN_XSIZE(crtc_w) | PS_WIN_YSIZE(crtc_h));
}
-static void skl_detach_scaler(struct intel_crtc *crtc, int id)
+static void skl_detach_scaler(struct intel_dsb *dsb,
+ struct intel_crtc *crtc, int id)
{
struct intel_display *display = to_intel_display(crtc);
- intel_de_write_fw(display, SKL_PS_CTRL(crtc->pipe, id), 0);
- intel_de_write_fw(display, SKL_PS_WIN_POS(crtc->pipe, id), 0);
- intel_de_write_fw(display, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
+ trace_intel_scaler_disable_arm(crtc, id);
+
+ intel_de_write_dsb(display, dsb, SKL_PS_CTRL(crtc->pipe, id), 0);
+ intel_de_write_dsb(display, dsb, SKL_PS_WIN_POS(crtc->pipe, id), 0);
+ intel_de_write_dsb(display, dsb, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
}
/*
* This function detaches (aka. unbinds) unused scalers in hardware
*/
-void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
+void skl_detach_scalers(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct intel_crtc_scaler_state *scaler_state =
@@ -795,7 +871,7 @@ void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
/* loop through and disable scalers that aren't in use */
for (i = 0; i < crtc->num_scalers; i++) {
if (!scaler_state->scalers[i].in_use)
- skl_detach_scaler(crtc, i);
+ skl_detach_scaler(dsb, crtc, i);
}
}
@@ -805,7 +881,7 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
int i;
for (i = 0; i < crtc->num_scalers; i++)
- skl_detach_scaler(crtc, i);
+ skl_detach_scaler(NULL, crtc, i);
}
void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
index 4d2e2dbb1666..355ea15260ca 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.h
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -8,6 +8,7 @@
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dsb;
struct intel_plane;
struct intel_plane_state;
@@ -21,10 +22,12 @@ int intel_atomic_setup_scalers(struct intel_atomic_state *state,
void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
-void skl_program_plane_scaler(struct intel_plane *plane,
+void skl_program_plane_scaler(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
-void skl_detach_scalers(const struct intel_crtc_state *crtc_state);
+void skl_detach_scalers(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state);
void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
void skl_scaler_get_config(struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 80e558042d97..70e550539bb2 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -233,21 +233,19 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
}
}
-static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915)
+static u8 icl_nv12_y_plane_mask(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
return BIT(PLANE_4) | BIT(PLANE_5);
else
return BIT(PLANE_6) | BIT(PLANE_7);
}
-bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
+bool icl_is_nv12_y_plane(struct intel_display *display,
enum plane_id plane_id)
{
- return DISPLAY_VER(dev_priv) >= 11 &&
- icl_nv12_y_plane_mask(dev_priv) & BIT(plane_id);
+ return DISPLAY_VER(display) >= 11 &&
+ icl_nv12_y_plane_mask(display) & BIT(plane_id);
}
u8 icl_hdr_plane_mask(void)
@@ -255,9 +253,9 @@ u8 icl_hdr_plane_mask(void)
return BIT(PLANE_1) | BIT(PLANE_2) | BIT(PLANE_3);
}
-bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
+bool icl_is_hdr_plane(struct intel_display *display, enum plane_id plane_id)
{
- return DISPLAY_VER(dev_priv) >= 11 &&
+ return DISPLAY_VER(display) >= 11 &&
icl_hdr_plane_mask() & BIT(plane_id);
}
@@ -512,11 +510,84 @@ skl_plane_max_stride(struct intel_plane *plane,
max_pixels, max_bytes);
}
+static bool tgl_plane_can_async_flip(u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_4_TILED:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_BMG_CCS:
+ case I915_FORMAT_MOD_4_TILED_LNL_CCS:
+ return true;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
+ return false;
+ default:
+ return false;
+ }
+}
+
+static bool icl_plane_can_async_flip(u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ /*
+ * FIXME: Async on Linear buffer is supported on ICL
+ * but with additional alignment and fbc restrictions
+ * need to be taken care of.
+ */
+ return false;
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool skl_plane_can_async_flip(u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ return false;
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ return true;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ /*
+ * Display WA #0731: skl
+ * WaDisableRCWithAsyncFlip: skl
+ * "When render decompression is enabled, hardware
+ * internally converts the Async flips to Sync flips."
+ *
+ * Display WA #1159: glk
+ * "Async flip with render compression may result in
+ * intermittent underrun corruption."
+ */
+ return false;
+ default:
+ return false;
+ }
+}
+
static u32 tgl_plane_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
/* PLANE_SURF GGTT -> DPT alignment */
int mult = intel_fb_uses_dpt(fb) ? 512 : 1;
@@ -524,28 +595,30 @@ static u32 tgl_plane_min_alignment(struct intel_plane *plane,
if (intel_fb_is_ccs_aux_plane(fb, color_plane))
return mult * 4 * 1024;
+ /*
+ * FIXME ADL sees GGTT/DMAR faults with async
+ * flips unless we align to 16k at least.
+ * Figure out what's going on here...
+ */
+ if (display->platform.alderlake_p &&
+ intel_plane_can_async_flip(plane, fb->modifier))
+ return mult * 16 * 1024;
+
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_4_TILED:
- /*
- * FIXME ADL sees GGTT/DMAR faults with async
- * flips unless we align to 16k at least.
- * Figure out what's going on here...
- */
- if (IS_ALDERLAKE_P(i915) && HAS_ASYNC_FLIPS(i915))
- return mult * 16 * 1024;
return mult * 4 * 1024;
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS:
- case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
- case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
case I915_FORMAT_MOD_4_TILED_BMG_CCS:
case I915_FORMAT_MOD_4_TILED_LNL_CCS:
/*
@@ -570,6 +643,10 @@ static u32 skl_plane_min_alignment(struct intel_plane *plane,
if (color_plane != 0)
return 4 * 1024;
+ /*
+ * VT-d needs at least 256k alignment,
+ * but that's already covered below.
+ */
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
@@ -605,7 +682,7 @@ icl_program_input_csc(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
@@ -750,7 +827,7 @@ static void skl_write_plane_wm(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
@@ -796,7 +873,7 @@ skl_plane_disable_arm(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
@@ -810,7 +887,7 @@ static void icl_plane_disable_sel_fetch_arm(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -824,12 +901,11 @@ icl_plane_disable_arm(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- if (icl_is_hdr_plane(dev_priv, plane_id))
+ if (icl_is_hdr_plane(display, plane_id))
intel_de_write_dsb(display, dsb, PLANE_CUS_CTL(pipe, plane_id), 0);
skl_write_plane_wm(dsb, plane, crtc_state);
@@ -843,22 +919,22 @@ static bool
skl_plane_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
- ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+ ret = intel_de_read(display, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
*pipe = plane->pipe;
- intel_display_power_put(dev_priv, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
return ret;
}
@@ -1020,7 +1096,7 @@ static u32 skl_plane_ctl_rotate(unsigned int rotate)
break;
/*
* DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
- * while i915 HW rotation is clockwise, thats why this swapping.
+ * while i915 HW rotation is clockwise, that's why this swapping.
*/
case DRM_MODE_ROTATE_90:
return PLANE_CTL_ROTATE_270;
@@ -1075,10 +1151,10 @@ static u32 adlp_plane_ctl_arb_slots(const struct intel_plane_state *plane_state)
static u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 plane_ctl = 0;
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
return plane_ctl;
if (crtc_state->gamma_enable)
@@ -1093,8 +1169,7 @@ static u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
@@ -1102,7 +1177,7 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl = PLANE_CTL_ENABLE;
- if (DISPLAY_VER(dev_priv) < 10) {
+ if (DISPLAY_VER(display) < 10) {
plane_ctl |= skl_plane_ctl_alpha(plane_state);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
@@ -1117,7 +1192,7 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
plane_ctl |= icl_plane_ctl_flip(rotation &
DRM_MODE_REFLECT_MASK);
@@ -1127,7 +1202,7 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
/* Wa_22012358565:adl-p */
- if (DISPLAY_VER(dev_priv) == 13)
+ if (DISPLAY_VER(display) == 13)
plane_ctl |= adlp_plane_ctl_arb_slots(plane_state);
return plane_ctl;
@@ -1135,10 +1210,10 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
static u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 plane_color_ctl = 0;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return plane_color_ctl;
if (crtc_state->gamma_enable)
@@ -1153,8 +1228,7 @@ static u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
u32 plane_color_ctl = 0;
@@ -1162,7 +1236,7 @@ static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
- if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
+ if (fb->format->is_yuv && !icl_is_hdr_plane(display, plane->id)) {
switch (plane_state->hw.color_encoding) {
case DRM_COLOR_YCBCR_BT709:
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
@@ -1192,7 +1266,7 @@ static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
static u32 skl_surf_address(const struct intel_plane_state *plane_state,
int color_plane)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
u32 offset = plane_state->view.color_plane[color_plane].offset;
@@ -1201,12 +1275,12 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
* The DPT object contains only one vma, so the VMA's offset
* within the DPT is always 0.
*/
- drm_WARN_ON(&i915->drm, plane_state->dpt_vma &&
+ drm_WARN_ON(display->drm, plane_state->dpt_vma &&
intel_dpt_offset(plane_state->dpt_vma));
- drm_WARN_ON(&i915->drm, offset & 0x1fffff);
+ drm_WARN_ON(display->drm, offset & 0x1fffff);
return offset >> 9;
} else {
- drm_WARN_ON(&i915->drm, offset & 0xfff);
+ drm_WARN_ON(display->drm, offset & 0xfff);
return offset;
}
}
@@ -1225,10 +1299,10 @@ static u32 skl_plane_surf(const struct intel_plane_state *plane_state,
return plane_surf;
}
-static u32 skl_plane_aux_dist(const struct intel_plane_state *plane_state,
- int color_plane)
+u32 skl_plane_aux_dist(const struct intel_plane_state *plane_state,
+ int color_plane)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
int aux_plane = skl_main_to_aux_plane(fb, color_plane);
u32 aux_dist;
@@ -1239,7 +1313,7 @@ static u32 skl_plane_aux_dist(const struct intel_plane_state *plane_state,
aux_dist = skl_surf_address(plane_state, aux_plane) -
skl_surf_address(plane_state, color_plane);
- if (DISPLAY_VER(i915) < 12)
+ if (DISPLAY_VER(display) < 12)
aux_dist |= PLANE_AUX_STRIDE(skl_plane_stride(plane_state, aux_plane));
return aux_dist;
@@ -1277,7 +1351,7 @@ static void icl_plane_csc_load_black(struct intel_dsb *dsb,
struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
@@ -1301,8 +1375,7 @@ static void icl_plane_csc_load_black(struct intel_dsb *dsb,
static int icl_plane_color_plane(const struct intel_plane_state *plane_state)
{
- /* Program the UV plane on planar master */
- if (plane_state->planar_linked_plane && !plane_state->planar_slave)
+ if (plane_state->planar_linked_plane && !plane_state->is_y_plane)
return 1;
else
return 0;
@@ -1314,7 +1387,7 @@ skl_plane_update_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
u32 stride = skl_plane_stride(plane_state, 0);
@@ -1345,8 +1418,7 @@ skl_plane_update_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
u32 x = plane_state->view.color_plane[0].x;
@@ -1361,7 +1433,7 @@ skl_plane_update_arm(struct intel_dsb *dsb,
crtc_state->async_flip_planes & BIT(plane->id))
plane_ctl |= PLANE_CTL_ASYNC_FLIP;
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
plane_color_ctl = plane_state->color_ctl |
glk_plane_color_ctl_crtc(crtc_state);
@@ -1382,7 +1454,7 @@ skl_plane_update_arm(struct intel_dsb *dsb,
PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) |
PLANE_OFFSET_X(plane_state->view.color_plane[1].x));
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
intel_de_write_dsb(display, dsb, PLANE_COLOR_CTL(pipe, plane_id),
plane_color_ctl);
@@ -1394,7 +1466,7 @@ skl_plane_update_arm(struct intel_dsb *dsb,
* TODO: split into noarm+arm pair
*/
if (plane_state->scaler_id >= 0)
- skl_program_plane_scaler(plane, crtc_state, plane_state);
+ skl_program_plane_scaler(dsb, plane, crtc_state, plane_state);
/*
* The control register self-arms if the plane was previously
@@ -1413,7 +1485,7 @@ static void icl_plane_update_sel_fetch_noarm(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state,
int color_plane)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
const struct drm_rect *clip;
u32 val;
@@ -1459,8 +1531,7 @@ icl_plane_update_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
int color_plane = icl_plane_color_plane(plane_state);
@@ -1508,18 +1579,18 @@ icl_plane_update_noarm(struct intel_dsb *dsb,
}
/* FLAT CCS doesn't need to program AUX_DIST */
- if (!HAS_FLAT_CCS(dev_priv) && DISPLAY_VER(dev_priv) < 20)
+ if (!HAS_FLAT_CCS(to_i915(display->drm)) && DISPLAY_VER(display) < 20)
intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id),
skl_plane_aux_dist(plane_state, color_plane));
- if (icl_is_hdr_plane(dev_priv, plane_id))
+ if (icl_is_hdr_plane(display, plane_id))
intel_de_write_dsb(display, dsb, PLANE_CUS_CTL(pipe, plane_id),
plane_state->cus_ctl);
intel_de_write_dsb(display, dsb, PLANE_COLOR_CTL(pipe, plane_id),
plane_color_ctl);
- if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
+ if (fb->format->is_yuv && icl_is_hdr_plane(display, plane_id))
icl_program_input_csc(dsb, plane, plane_state);
skl_write_plane_wm(dsb, plane, crtc_state);
@@ -1539,7 +1610,7 @@ static void icl_plane_update_sel_fetch_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -1558,7 +1629,7 @@ icl_plane_update_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
int color_plane = icl_plane_color_plane(plane_state);
@@ -1575,7 +1646,7 @@ icl_plane_update_arm(struct intel_dsb *dsb,
* TODO: split into noarm+arm pair
*/
if (plane_state->scaler_id >= 0)
- skl_program_plane_scaler(plane, crtc_state, plane_state);
+ skl_program_plane_scaler(dsb, plane, crtc_state, plane_state);
icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
@@ -1590,6 +1661,17 @@ icl_plane_update_arm(struct intel_dsb *dsb,
skl_plane_surf(plane_state, color_plane));
}
+static void skl_plane_capture_error(struct intel_crtc *crtc,
+ struct intel_plane *plane,
+ struct intel_plane_error *error)
+{
+ struct intel_display *display = to_intel_display(plane);
+
+ error->ctl = intel_de_read(display, PLANE_CTL(crtc->pipe, plane->id));
+ error->surf = intel_de_read(display, PLANE_SURF(crtc->pipe, plane->id));
+ error->surflive = intel_de_read(display, PLANE_SURFLIVE(crtc->pipe, plane->id));
+}
+
static void
skl_plane_async_flip(struct intel_dsb *dsb,
struct intel_plane *plane,
@@ -1597,7 +1679,7 @@ skl_plane_async_flip(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state,
bool async_flip)
{
- struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
u32 plane_ctl = plane_state->ctl, plane_surf;
@@ -1633,8 +1715,8 @@ static bool intel_format_is_p01x(u32 format)
static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
@@ -1643,16 +1725,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
intel_fb_is_ccs_modifier(fb->modifier)) {
- drm_dbg_kms(&dev_priv->drm,
- "RC support only with 0/180 degree rotation (%x)\n",
- rotation);
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] RC support only with 0/180 degree rotation (%x)\n",
+ plane->base.base.id, plane->base.name, rotation);
return -EINVAL;
}
if (rotation & DRM_MODE_REFLECT_X &&
fb->modifier == DRM_FORMAT_MOD_LINEAR) {
- drm_dbg_kms(&dev_priv->drm,
- "horizontal flip is not supported with linear surface formats\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] horizontal flip is not supported with linear surface formats\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -1661,16 +1744,18 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
*/
if (rotation & DRM_MODE_REFLECT_X &&
intel_fb_is_tile4_modifier(fb->modifier) &&
- DISPLAY_VER(dev_priv) >= 20) {
- drm_dbg_kms(&dev_priv->drm,
- "horizontal flip is not supported with tile4 surface formats\n");
+ DISPLAY_VER(display) >= 20) {
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] horizontal flip is not supported with tile4 surface formats\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (drm_rotation_90_or_270(rotation)) {
if (!intel_fb_supports_90_270_rotation(to_intel_framebuffer(fb))) {
- drm_dbg_kms(&dev_priv->drm,
- "Y/Yf tiling required for 90/270!\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] Y/Yf tiling required for 90/270!\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -1680,7 +1765,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
*/
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
break;
fallthrough;
case DRM_FORMAT_C8:
@@ -1693,9 +1778,9 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_Y216:
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
- drm_dbg_kms(&dev_priv->drm,
- "Unsupported pixel format %p4cc for 90/270!\n",
- &fb->format->format);
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] unsupported pixel format %p4cc for 90/270!\n",
+ plane->base.base.id, plane->base.name, &fb->format->format);
return -EINVAL;
default:
break;
@@ -1707,17 +1792,19 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
fb->modifier != DRM_FORMAT_MOD_LINEAR &&
fb->modifier != I915_FORMAT_MOD_X_TILED) {
- drm_dbg_kms(&dev_priv->drm,
- "Y/Yf tiling not supported in IF-ID mode\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] Y/Yf tiling not supported in IF-ID mode\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
/* Wa_1606054188:tgl,adl-s */
- if ((IS_ALDERLAKE_S(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
+ if ((display->platform.alderlake_s || display->platform.tigerlake) &&
plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE &&
intel_format_is_p01x(fb->format->format)) {
- drm_dbg_kms(&dev_priv->drm,
- "Source color keying not supported with P01x formats\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] source color keying not supported with P01x formats\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -1727,8 +1814,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
int crtc_x = plane_state->uapi.dst.x1;
int crtc_w = drm_rect_width(&plane_state->uapi.dst);
int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
@@ -1742,10 +1829,11 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
* than the cursor ending less than 4 pixels from the left edge of the
* screen may cause FIFO underflow and display corruption.
*/
- if (DISPLAY_VER(dev_priv) == 10 &&
+ if (DISPLAY_VER(display) == 10 &&
(crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
- drm_dbg_kms(&dev_priv->drm,
- "requested plane X %s position %d invalid (valid range %d-%d)\n",
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] requested plane X %s position %d invalid (valid range %d-%d)\n",
+ plane->base.base.id, plane->base.name,
crtc_x + crtc_w < 4 ? "end" : "start",
crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
4, pipe_src_w - 4);
@@ -1757,7 +1845,8 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
@@ -1767,14 +1856,16 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s
src_w & 3 &&
(rotation == DRM_MODE_ROTATE_270 ||
rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
- drm_dbg_kms(&i915->drm, "src width must be multiple of 4 for rotated planar YUV\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] src width must be multiple of 4 for rotated planar YUV\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
return 0;
}
-static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
+static int skl_plane_max_scale(struct intel_display *display,
const struct drm_framebuffer *fb)
{
/*
@@ -1783,7 +1874,7 @@ static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
* the best case.
* FIXME need to properly check this later.
*/
- if (DISPLAY_VER(dev_priv) >= 10 ||
+ if (DISPLAY_VER(display) >= 10 ||
!intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
return 0x30000 - 1;
else
@@ -1872,8 +1963,8 @@ skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
int *x, int *y, u32 *offset)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
int aux_plane = skl_main_to_aux_plane(fb, 0);
u32 aux_offset = plane_state->view.color_plane[aux_plane].offset;
@@ -1882,7 +1973,7 @@ int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
intel_add_fb_offsets(x, y, plane_state, 0);
*offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0);
- if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
+ if (drm_WARN_ON(display->drm, alignment && !is_power_of_2(alignment)))
return -EINVAL;
/*
@@ -1906,8 +1997,9 @@ int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
while ((*x + w) * cpp > plane_state->view.color_plane[0].mapping_stride) {
if (*offset == 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Unable to find suitable display surface offset due to X-tiling\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] unable to find suitable display surface offset due to X-tiling\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -1922,8 +2014,8 @@ int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
int x = plane_state->uapi.src.x1 >> 16;
@@ -1939,8 +2031,9 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
int ret;
if (w > max_width || w < min_width || h > max_height || h < 1) {
- drm_dbg_kms(&dev_priv->drm,
- "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ plane->base.base.id, plane->base.name,
w, h, min_width, max_width, max_height);
return -EINVAL;
}
@@ -1966,16 +2059,17 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
if (x != plane_state->view.color_plane[aux_plane].x ||
y != plane_state->view.color_plane[aux_plane].y) {
- drm_dbg_kms(&dev_priv->drm,
- "Unable to find suitable display surface offset due to CCS\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] unable to find suitable display surface offset due to CCS\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
}
- if (DISPLAY_VER(dev_priv) >= 13)
- drm_WARN_ON(&dev_priv->drm, x > 65535 || y > 65535);
+ if (DISPLAY_VER(display) >= 13)
+ drm_WARN_ON(display->drm, x > 65535 || y > 65535);
else
- drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);
+ drm_WARN_ON(display->drm, x > 8191 || y > 8191);
plane_state->view.color_plane[0].offset = offset;
plane_state->view.color_plane[0].x = x;
@@ -1993,8 +2087,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
int uv_plane = 1;
@@ -2010,8 +2104,9 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
/* FIXME not quite sure how/if these apply to the chroma plane */
if (w > max_width || h > max_height) {
- drm_dbg_kms(&i915->drm,
- "CbCr source size %dx%d too big (limit %dx%d)\n",
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] CbCr source size %dx%d too big (limit %dx%d)\n",
+ plane->base.base.id, plane->base.name,
w, h, max_width, max_height);
return -EINVAL;
}
@@ -2044,16 +2139,17 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
if (x != plane_state->view.color_plane[ccs_plane].x ||
y != plane_state->view.color_plane[ccs_plane].y) {
- drm_dbg_kms(&i915->drm,
- "Unable to find suitable display surface offset due to CCS\n");
+ drm_dbg_kms(display->drm,
+ "[PLANE:%d:%s] unable to find suitable display surface offset due to CCS\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
}
- if (DISPLAY_VER(i915) >= 13)
- drm_WARN_ON(&i915->drm, x > 65535 || y > 65535);
+ if (DISPLAY_VER(display) >= 13)
+ drm_WARN_ON(display->drm, x > 65535 || y > 65535);
else
- drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);
+ drm_WARN_ON(display->drm, x > 8191 || y > 8191);
plane_state->view.color_plane[uv_plane].offset = offset;
plane_state->view.color_plane[uv_plane].x = x;
@@ -2139,9 +2235,13 @@ static int skl_check_plane_surface(struct intel_plane_state *plane_state)
static bool skl_fb_scalable(const struct drm_framebuffer *fb)
{
+ struct intel_display *display;
+
if (!fb)
return false;
+ display = to_intel_display(fb->dev);
+
switch (fb->format->format) {
case DRM_FORMAT_C8:
return false;
@@ -2149,7 +2249,7 @@ static bool skl_fb_scalable(const struct drm_framebuffer *fb)
case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
- return DISPLAY_VER(to_i915(fb->dev)) >= 11;
+ return DISPLAY_VER(display) >= 11;
default:
return true;
}
@@ -2157,24 +2257,61 @@ static bool skl_fb_scalable(const struct drm_framebuffer *fb)
static void check_protection(struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_gem_object *obj = intel_fb_bo(fb);
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return;
- plane_state->decrypt = intel_pxp_key_check(i915->pxp, obj, false) == 0;
+ plane_state->decrypt = intel_pxp_key_check(obj, false) == 0;
plane_state->force_black = intel_bo_is_protected(obj) &&
!plane_state->decrypt;
}
+static void
+make_damage_viewport_relative(struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_rect *src = &plane_state->uapi.src;
+ unsigned int rotation = plane_state->hw.rotation;
+ struct drm_rect *damage = &plane_state->damage;
+
+ if (!drm_rect_visible(damage))
+ return;
+
+ if (!fb || !plane_state->uapi.visible) {
+ plane_state->damage = DRM_RECT_INIT(0, 0, 0, 0);
+ return;
+ }
+
+ if (drm_rotation_90_or_270(rotation)) {
+ drm_rect_rotate(damage, fb->width, fb->height,
+ DRM_MODE_ROTATE_270);
+ drm_rect_translate(damage, -(src->y1 >> 16), -(src->x1 >> 16));
+ } else {
+ drm_rect_translate(damage, -(src->x1 >> 16), -(src->y1 >> 16));
+ }
+}
+
+static void clip_damage(struct intel_plane_state *plane_state)
+{
+ struct drm_rect *damage = &plane_state->damage;
+ struct drm_rect src;
+
+ if (!drm_rect_visible(damage))
+ return;
+
+ drm_rect_fp_to_int(&src, &plane_state->uapi.src);
+ drm_rect_translate(damage, src.x1, src.y1);
+ drm_rect_intersect(damage, &src);
+}
+
static int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
int min_scale = DRM_PLANE_NO_SCALING;
int max_scale = DRM_PLANE_NO_SCALING;
@@ -2187,7 +2324,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
/* use scaler when colorkey is not required */
if (!plane_state->ckey.flags && skl_fb_scalable(fb)) {
min_scale = 1;
- max_scale = skl_plane_max_scale(dev_priv, fb);
+ max_scale = skl_plane_max_scale(display, fb);
}
ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
@@ -2195,6 +2332,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ make_damage_viewport_relative(plane_state);
+
ret = skl_check_plane_surface(plane_state);
if (ret)
return ret;
@@ -2210,6 +2349,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ clip_damage(plane_state);
+
ret = skl_plane_check_nv12_rotation(plane_state);
if (ret)
return ret;
@@ -2217,17 +2358,19 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
check_protection(plane_state);
/* HW only has 8 bits pixel precision, disable plane if invisible */
- if (!(plane_state->hw.alpha >> 8))
+ if (!(plane_state->hw.alpha >> 8)) {
plane_state->uapi.visible = false;
+ plane_state->damage = DRM_RECT_INIT(0, 0, 0, 0);
+ }
plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
plane_state);
if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
- icl_is_hdr_plane(dev_priv, plane->id))
+ icl_is_hdr_plane(display, plane->id))
/* Enable and use MPEG-2 chroma siting */
plane_state->cus_ctl = PLANE_CUS_ENABLE |
PLANE_CUS_HPHASE_0 |
@@ -2238,42 +2381,74 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
return 0;
}
+void icl_link_nv12_planes(struct intel_plane_state *uv_plane_state,
+ struct intel_plane_state *y_plane_state)
+{
+ struct intel_display *display = to_intel_display(uv_plane_state);
+ struct intel_plane *uv_plane = to_intel_plane(uv_plane_state->uapi.plane);
+ struct intel_plane *y_plane = to_intel_plane(y_plane_state->uapi.plane);
+
+ drm_WARN_ON(display->drm, icl_is_nv12_y_plane(display, uv_plane->id));
+ drm_WARN_ON(display->drm, !icl_is_nv12_y_plane(display, y_plane->id));
+
+ y_plane_state->ctl |= PLANE_CTL_YUV420_Y_PLANE;
+
+ if (icl_is_hdr_plane(display, uv_plane->id)) {
+ switch (y_plane->id) {
+ case PLANE_7:
+ uv_plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
+ break;
+ case PLANE_6:
+ uv_plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
+ break;
+ case PLANE_5:
+ uv_plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
+ break;
+ case PLANE_4:
+ uv_plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
+ break;
+ default:
+ MISSING_CASE(y_plane->id);
+ }
+ }
+}
+
static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe)
{
return pipe - PIPE_A + INTEL_FBC_A;
}
-static bool skl_plane_has_fbc(struct drm_i915_private *i915,
+static bool skl_plane_has_fbc(struct intel_display *display,
enum intel_fbc_id fbc_id, enum plane_id plane_id)
{
- if ((DISPLAY_RUNTIME_INFO(i915)->fbc_mask & BIT(fbc_id)) == 0)
+ if ((DISPLAY_RUNTIME_INFO(display)->fbc_mask & BIT(fbc_id)) == 0)
return false;
- if (DISPLAY_VER(i915) >= 20)
- return icl_is_hdr_plane(i915, plane_id);
+ if (DISPLAY_VER(display) >= 20)
+ return icl_is_hdr_plane(display, plane_id);
else
return plane_id == PLANE_1;
}
-static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv,
+static struct intel_fbc *skl_plane_fbc(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id)
{
enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(pipe);
- if (skl_plane_has_fbc(dev_priv, fbc_id, plane_id))
- return dev_priv->display.fbc[fbc_id];
+ if (skl_plane_has_fbc(display, fbc_id, plane_id))
+ return display->fbc[fbc_id];
else
return NULL;
}
-static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+static bool skl_plane_has_planar(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id)
{
/* Display WA #0870: skl, bxt */
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.skylake || display->platform.broxton)
return false;
- if (DISPLAY_VER(dev_priv) == 9 && pipe == PIPE_C)
+ if (DISPLAY_VER(display) == 9 && pipe == PIPE_C)
return false;
if (plane_id != PLANE_1 && plane_id != PLANE_2)
@@ -2282,11 +2457,11 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
return true;
}
-static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv,
+static const u32 *skl_get_plane_formats(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id,
int *num_formats)
{
- if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ if (skl_plane_has_planar(display, pipe, plane_id)) {
*num_formats = ARRAY_SIZE(skl_planar_formats);
return skl_planar_formats;
} else {
@@ -2295,11 +2470,11 @@ static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv,
}
}
-static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv,
+static const u32 *glk_get_plane_formats(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id,
int *num_formats)
{
- if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ if (skl_plane_has_planar(display, pipe, plane_id)) {
*num_formats = ARRAY_SIZE(glk_planar_formats);
return glk_planar_formats;
} else {
@@ -2308,14 +2483,14 @@ static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv,
}
}
-static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
+static const u32 *icl_get_plane_formats(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id,
int *num_formats)
{
- if (icl_is_hdr_plane(dev_priv, plane_id)) {
+ if (icl_is_hdr_plane(display, plane_id)) {
*num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
return icl_hdr_plane_formats;
- } else if (icl_is_nv12_y_plane(dev_priv, plane_id)) {
+ } else if (icl_is_nv12_y_plane(display, plane_id)) {
*num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
return icl_sdr_y_plane_formats;
} else {
@@ -2533,64 +2708,78 @@ skl_plane_disable_flip_done(struct intel_plane *plane)
spin_unlock_irq(&i915->irq_lock);
}
-static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915,
+static bool skl_plane_has_rc_ccs(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id)
{
- /* Wa_22011186057 */
- if (IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
- return false;
+ return pipe != PIPE_C &&
+ (plane_id == PLANE_1 || plane_id == PLANE_2);
+}
- if (DISPLAY_VER(i915) >= 11)
- return true;
+static u8 skl_plane_caps(struct intel_display *display,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ u8 caps = INTEL_PLANE_CAP_TILING_X |
+ INTEL_PLANE_CAP_TILING_Y |
+ INTEL_PLANE_CAP_TILING_Yf;
- if (IS_GEMINILAKE(i915))
- return pipe != PIPE_C;
+ if (skl_plane_has_rc_ccs(display, pipe, plane_id))
+ caps |= INTEL_PLANE_CAP_CCS_RC;
- return pipe != PIPE_C &&
- (plane_id == PLANE_1 || plane_id == PLANE_2);
+ return caps;
}
-static bool tgl_plane_has_mc_ccs(struct drm_i915_private *i915,
- enum plane_id plane_id)
+static bool glk_plane_has_rc_ccs(struct intel_display *display,
+ enum pipe pipe)
{
- if (DISPLAY_VER(i915) < 12)
- return false;
+ return pipe != PIPE_C;
+}
- /* Wa_14010477008 */
- if (IS_DG1(i915) || IS_ROCKETLAKE(i915) ||
- (IS_TIGERLAKE(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_D0)))
- return false;
+static u8 glk_plane_caps(struct intel_display *display,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ u8 caps = INTEL_PLANE_CAP_TILING_X |
+ INTEL_PLANE_CAP_TILING_Y |
+ INTEL_PLANE_CAP_TILING_Yf;
+
+ if (glk_plane_has_rc_ccs(display, pipe))
+ caps |= INTEL_PLANE_CAP_CCS_RC;
+
+ return caps;
+}
+
+static u8 icl_plane_caps(struct intel_display *display,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ return INTEL_PLANE_CAP_TILING_X |
+ INTEL_PLANE_CAP_TILING_Y |
+ INTEL_PLANE_CAP_TILING_Yf |
+ INTEL_PLANE_CAP_CCS_RC;
+}
- /* Wa_22011186057 */
- if (IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+static bool tgl_plane_has_mc_ccs(struct intel_display *display,
+ enum plane_id plane_id)
+{
+ /* Wa_14010477008 */
+ if (display->platform.dg1 || display->platform.rocketlake ||
+ (display->platform.tigerlake && IS_DISPLAY_STEP(display, STEP_A0, STEP_D0)))
return false;
return plane_id < PLANE_6;
}
-static u8 skl_get_plane_caps(struct drm_i915_private *i915,
- enum pipe pipe, enum plane_id plane_id)
+static u8 tgl_plane_caps(struct intel_display *display,
+ enum pipe pipe, enum plane_id plane_id)
{
- struct intel_display *display = &i915->display;
- u8 caps = INTEL_PLANE_CAP_TILING_X;
+ u8 caps = INTEL_PLANE_CAP_TILING_X |
+ INTEL_PLANE_CAP_CCS_RC |
+ INTEL_PLANE_CAP_CCS_RC_CC;
- if (DISPLAY_VER(display) < 13 || display->platform.alderlake_p)
- caps |= INTEL_PLANE_CAP_TILING_Y;
- if (DISPLAY_VER(display) < 12)
- caps |= INTEL_PLANE_CAP_TILING_Yf;
if (HAS_4TILE(display))
caps |= INTEL_PLANE_CAP_TILING_4;
+ else
+ caps |= INTEL_PLANE_CAP_TILING_Y;
- if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))
- return caps;
-
- if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
- caps |= INTEL_PLANE_CAP_CCS_RC;
- if (DISPLAY_VER(display) >= 12)
- caps |= INTEL_PLANE_CAP_CCS_RC_CC;
- }
-
- if (tgl_plane_has_mc_ccs(i915, plane_id))
+ if (tgl_plane_has_mc_ccs(display, plane_id))
caps |= INTEL_PLANE_CAP_CCS_MC;
if (DISPLAY_VER(display) >= 14 && display->platform.dgfx)
@@ -2600,7 +2789,7 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
}
struct intel_plane *
-skl_universal_plane_create(struct drm_i915_private *dev_priv,
+skl_universal_plane_create(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id)
{
const struct drm_plane_funcs *plane_funcs;
@@ -2612,6 +2801,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
const u32 *formats;
int num_formats;
int ret;
+ u8 caps;
plane = intel_plane_alloc();
if (IS_ERR(plane))
@@ -2621,21 +2811,21 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->id = plane_id;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
- intel_fbc_add_plane(skl_plane_fbc(dev_priv, pipe, plane_id), plane);
+ intel_fbc_add_plane(skl_plane_fbc(display, pipe, plane_id), plane);
- if (DISPLAY_VER(dev_priv) >= 30) {
+ if (DISPLAY_VER(display) >= 30) {
plane->max_width = xe3_plane_max_width;
plane->max_height = icl_plane_max_height;
plane->min_cdclk = icl_plane_min_cdclk;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
plane->min_width = icl_plane_min_width;
- if (icl_is_hdr_plane(dev_priv, plane_id))
+ if (icl_is_hdr_plane(display, plane_id))
plane->max_width = icl_hdr_plane_max_width;
else
plane->max_width = icl_sdr_plane_max_width;
plane->max_height = icl_plane_max_height;
plane->min_cdclk = icl_plane_min_cdclk;
- } else if (DISPLAY_VER(dev_priv) >= 10) {
+ } else if (DISPLAY_VER(display) >= 10) {
plane->max_width = glk_plane_max_width;
plane->max_height = skl_plane_max_height;
plane->min_cdclk = glk_plane_min_cdclk;
@@ -2645,17 +2835,20 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->min_cdclk = skl_plane_min_cdclk;
}
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
plane->max_stride = adl_plane_max_stride;
else
plane->max_stride = skl_plane_max_stride;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
plane->min_alignment = tgl_plane_min_alignment;
else
plane->min_alignment = skl_plane_min_alignment;
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (intel_scanout_needs_vtd_wa(display))
+ plane->vtd_guard = DISPLAY_VER(display) >= 10 ? 168 : 136;
+
+ if (DISPLAY_VER(display) >= 11) {
plane->update_noarm = icl_plane_update_noarm;
plane->update_arm = icl_plane_update_arm;
plane->disable_arm = icl_plane_disable_arm;
@@ -2664,29 +2857,37 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->update_arm = skl_plane_update_arm;
plane->disable_arm = skl_plane_disable_arm;
}
+ plane->capture_error = skl_plane_capture_error;
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
- if (plane_id == PLANE_1) {
- plane->need_async_flip_toggle_wa = IS_DISPLAY_VER(dev_priv, 9, 10);
+ if (HAS_ASYNC_FLIPS(display) && plane_id == PLANE_1) {
+ plane->need_async_flip_toggle_wa = IS_DISPLAY_VER(display, 9, 10);
plane->async_flip = skl_plane_async_flip;
plane->enable_flip_done = skl_plane_enable_flip_done;
plane->disable_flip_done = skl_plane_disable_flip_done;
+
+ if (DISPLAY_VER(display) >= 12)
+ plane->can_async_flip = tgl_plane_can_async_flip;
+ else if (DISPLAY_VER(display) == 11)
+ plane->can_async_flip = icl_plane_can_async_flip;
+ else
+ plane->can_async_flip = skl_plane_can_async_flip;
}
- if (DISPLAY_VER(dev_priv) >= 11)
- formats = icl_get_plane_formats(dev_priv, pipe,
+ if (DISPLAY_VER(display) >= 11)
+ formats = icl_get_plane_formats(display, pipe,
plane_id, &num_formats);
- else if (DISPLAY_VER(dev_priv) >= 10)
- formats = glk_get_plane_formats(dev_priv, pipe,
+ else if (DISPLAY_VER(display) >= 10)
+ formats = glk_get_plane_formats(display, pipe,
plane_id, &num_formats);
else
- formats = skl_get_plane_formats(dev_priv, pipe,
+ formats = skl_get_plane_formats(display, pipe,
plane_id, &num_formats);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
plane_funcs = &tgl_plane_funcs;
- else if (DISPLAY_VER(dev_priv) == 11)
+ else if (DISPLAY_VER(display) == 11)
plane_funcs = &icl_plane_funcs;
else
plane_funcs = &skl_plane_funcs;
@@ -2696,10 +2897,24 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
else
plane_type = DRM_PLANE_TYPE_OVERLAY;
- modifiers = intel_fb_plane_get_modifiers(dev_priv,
- skl_get_plane_caps(dev_priv, pipe, plane_id));
+ if (DISPLAY_VER(display) >= 12)
+ caps = tgl_plane_caps(display, pipe, plane_id);
+ else if (DISPLAY_VER(display) == 11)
+ caps = icl_plane_caps(display, pipe, plane_id);
+ else if (DISPLAY_VER(display) == 10)
+ caps = glk_plane_caps(display, pipe, plane_id);
+ else
+ caps = skl_plane_caps(display, pipe, plane_id);
+
+ /* FIXME: xe has problems with AUX */
+ if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(to_i915(display->drm)))
+ caps &= ~(INTEL_PLANE_CAP_CCS_RC |
+ INTEL_PLANE_CAP_CCS_RC_CC |
+ INTEL_PLANE_CAP_CCS_MC);
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ modifiers = intel_fb_plane_get_modifiers(display, caps);
+
+ ret = drm_universal_plane_init(display->drm, &plane->base,
0, plane_funcs,
formats, num_formats, modifiers,
plane_type,
@@ -2711,14 +2926,14 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
if (ret)
goto fail;
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
else
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
supported_rotations |= DRM_MODE_REFLECT_X;
drm_plane_create_rotation_property(&plane->base,
@@ -2727,7 +2942,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
supported_csc = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709);
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
supported_csc |= BIT(DRM_COLOR_YCBCR_BT2020);
drm_plane_create_color_properties(&plane->base,
@@ -2745,10 +2960,10 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
drm_plane_create_zpos_immutable_property(&plane->base, plane_id);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
drm_plane_enable_fb_damage_clips(&plane->base);
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
drm_plane_create_scaling_filter_property(&plane->base,
BIT(DRM_SCALING_FILTER_DEFAULT) |
BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
@@ -2769,8 +2984,6 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
{
struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
enum plane_id plane_id = plane->id;
enum pipe pipe;
@@ -2784,35 +2997,36 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
if (!plane->get_hw_state(plane, &pipe))
return;
- drm_WARN_ON(dev, pipe != crtc->pipe);
+ drm_WARN_ON(display->drm, pipe != crtc->pipe);
if (crtc_state->joiner_pipes) {
- drm_dbg_kms(&dev_priv->drm,
- "Unsupported joiner configuration for initial FB\n");
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] Unsupported joiner configuration for initial FB\n",
+ crtc->base.base.id, crtc->base.name);
return;
}
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
- drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
+ drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
}
fb = &intel_fb->base;
- fb->dev = dev;
+ fb->dev = display->drm;
- val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
+ val = intel_de_read(display, PLANE_CTL(pipe, plane_id));
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
pixel_format = val & PLANE_CTL_FORMAT_MASK_ICL;
else
pixel_format = val & PLANE_CTL_FORMAT_MASK_SKL;
- if (DISPLAY_VER(dev_priv) >= 10) {
+ if (DISPLAY_VER(display) >= 10) {
u32 color_ctl;
- color_ctl = intel_de_read(dev_priv, PLANE_COLOR_CTL(pipe, plane_id));
+ color_ctl = intel_de_read(display, PLANE_COLOR_CTL(pipe, plane_id));
alpha = REG_FIELD_GET(PLANE_COLOR_ALPHA_MASK, color_ctl);
} else {
alpha = REG_FIELD_GET(PLANE_CTL_ALPHA_MASK, val);
@@ -2834,14 +3048,14 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
case PLANE_CTL_TILED_Y:
plane_config->tiling = I915_TILING_Y;
if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
fb->modifier = I915_FORMAT_MOD_4_TILED_MTL_RC_CCS;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS;
else
fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
fb->modifier = I915_FORMAT_MOD_4_TILED_MTL_MC_CCS;
else
fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
@@ -2873,15 +3087,15 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
goto error;
}
- if (!dev_priv->display.params.enable_dpt &&
- intel_fb_modifier_uses_dpt(dev_priv, fb->modifier)) {
- drm_dbg_kms(&dev_priv->drm, "DPT disabled, skipping initial FB\n");
+ if (!display->params.enable_dpt &&
+ intel_fb_modifier_uses_dpt(display, fb->modifier)) {
+ drm_dbg_kms(display->drm, "DPT disabled, skipping initial FB\n");
goto error;
}
/*
* DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
- * while i915 HW rotation is clockwise, thats why this swapping.
+ * while i915 HW rotation is clockwise, that's why this swapping.
*/
switch (val & PLANE_CTL_ROTATE_MASK) {
case PLANE_CTL_ROTATE_0:
@@ -2898,24 +3112,24 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
break;
}
- if (DISPLAY_VER(dev_priv) >= 11 && val & PLANE_CTL_FLIP_HORIZONTAL)
+ if (DISPLAY_VER(display) >= 11 && val & PLANE_CTL_FLIP_HORIZONTAL)
plane_config->rotation |= DRM_MODE_REFLECT_X;
/* 90/270 degree rotation would require extra work */
if (drm_rotation_90_or_270(plane_config->rotation))
goto error;
- base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & PLANE_SURF_ADDR_MASK;
+ base = intel_de_read(display, PLANE_SURF(pipe, plane_id)) & PLANE_SURF_ADDR_MASK;
plane_config->base = base;
- offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
- drm_WARN_ON(&dev_priv->drm, offset != 0);
+ offset = intel_de_read(display, PLANE_OFFSET(pipe, plane_id));
+ drm_WARN_ON(display->drm, offset != 0);
- val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
+ val = intel_de_read(display, PLANE_SIZE(pipe, plane_id));
fb->height = REG_FIELD_GET(PLANE_HEIGHT_MASK, val) + 1;
fb->width = REG_FIELD_GET(PLANE_WIDTH_MASK, val) + 1;
- val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
+ val = intel_de_read(display, PLANE_STRIDE(pipe, plane_id));
stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
fb->pitches[0] = REG_FIELD_GET(PLANE_STRIDE__MASK, val) * stride_mult;
@@ -2924,11 +3138,12 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size = fb->pitches[0] * aligned_height;
- drm_dbg_kms(&dev_priv->drm,
- "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- crtc->base.name, plane->base.name, fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s][PLANE:%d:%s] with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ plane->base.base.id, plane->base.name,
+ fb->width, fb->height, fb->format->cpp[0] * 8,
+ base, fb->pitches[0], plane_config->size);
plane_config->fb = intel_fb;
return;
@@ -2940,7 +3155,7 @@ error:
bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_initial_plane_config *plane_config)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -2960,7 +3175,7 @@ bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
if (plane_config->base == base)
return false;
- intel_de_write(i915, PLANE_SURF(pipe, plane_id), base);
+ intel_de_write(display, PLANE_SURF(pipe, plane_id), base);
return true;
}
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.h b/drivers/gpu/drm/i915/display/skl_universal_plane.h
index 541489479135..5e2451c21eeb 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.h
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.h
@@ -8,8 +8,8 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_crtc;
+struct intel_display;
struct intel_initial_plane_config;
struct intel_plane_state;
struct skl_ddb_entry;
@@ -19,7 +19,7 @@ enum pipe;
enum plane_id;
struct intel_plane *
-skl_universal_plane_create(struct drm_i915_private *dev_priv,
+skl_universal_plane_create(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id);
void skl_get_initial_plane_config(struct intel_crtc *crtc,
@@ -32,9 +32,15 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
int *x, int *y, u32 *offset);
-bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
+void icl_link_nv12_planes(struct intel_plane_state *uv_plane_state,
+ struct intel_plane_state *y_plane_state);
+
+bool icl_is_nv12_y_plane(struct intel_display *display,
enum plane_id plane_id);
u8 icl_hdr_plane_mask(void);
-bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
+bool icl_is_hdr_plane(struct intel_display *display, enum plane_id plane_id);
+
+u32 skl_plane_aux_dist(const struct intel_plane_state *plane_state,
+ int color_plane);
#endif
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index f4458d1185b3..2d0de1c63308 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -52,13 +52,13 @@ struct skl_wm_params {
u32 dbuf_block_size;
};
-u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
+u8 intel_enabled_dbuf_slices_mask(struct intel_display *display)
{
u8 enabled_slices = 0;
enum dbuf_slice slice;
- for_each_dbuf_slice(i915, slice) {
- if (intel_de_read(i915, DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
+ for_each_dbuf_slice(display, slice) {
+ if (intel_de_read(display, DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
enabled_slices |= BIT(slice);
}
@@ -584,7 +584,7 @@ u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
/*
* Per plane DDB entry can in a really worst case be on multiple slices
- * but single entry is anyway contigious.
+ * but single entry is anyway contiguous.
*/
while (start_slice <= end_slice) {
slice_mask |= BIT(start_slice);
@@ -836,6 +836,7 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
u16 *min_ddb, u16 *interim_ddb)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
@@ -843,7 +844,7 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
enum plane_id plane_id;
power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(i915, power_domain);
+ wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return;
@@ -855,7 +856,7 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
&min_ddb[plane_id],
&interim_ddb[plane_id]);
- intel_display_power_put(i915, power_domain, wakeref);
+ intel_display_power_put(display, power_domain, wakeref);
}
struct dbuf_slice_conf_entry {
@@ -2259,8 +2260,8 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
int ret;
- /* Watermarks calculated in master */
- if (plane_state->planar_slave)
+ /* Watermarks calculated on UV plane */
+ if (plane_state->is_y_plane)
return 0;
memset(wm, 0, sizeof(*wm));
@@ -2292,6 +2293,87 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
}
+static int
+cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+ const struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state)) {
+ drm_WARN_ON(display->drm, PTR_ERR(cdclk_state));
+ return 1;
+ }
+
+ return min(1, DIV_ROUND_UP(crtc_state->pixel_rate,
+ 2 * cdclk_state->logical.cdclk));
+}
+
+static int
+dsc_prefill_latency(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal,
+ crtc_state->hw.adjusted_mode.clock);
+ int num_scaler_users = hweight32(scaler_state->scaler_users);
+ int chroma_downscaling_factor =
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1;
+ u32 dsc_prefill_latency = 0;
+
+ if (!crtc_state->dsc.compression_enable || !num_scaler_users)
+ return dsc_prefill_latency;
+
+ dsc_prefill_latency = DIV_ROUND_UP(15 * linetime * chroma_downscaling_factor, 10);
+
+ for (int i = 0; i < num_scaler_users; i++) {
+ u64 hscale_k, vscale_k;
+
+ hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[i].hscale, 1000) >> 16);
+ vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[i].vscale, 1000) >> 16);
+ dsc_prefill_latency = DIV_ROUND_UP_ULL(dsc_prefill_latency * hscale_k * vscale_k,
+ 1000000);
+ }
+
+ dsc_prefill_latency *= cdclk_prefill_adjustment(crtc_state);
+
+ return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, dsc_prefill_latency);
+}
+
+static int
+scaler_prefill_latency(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ int num_scaler_users = hweight32(scaler_state->scaler_users);
+ int scaler_prefill_latency = 0;
+ int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal,
+ crtc_state->hw.adjusted_mode.clock);
+
+ if (!num_scaler_users)
+ return scaler_prefill_latency;
+
+ scaler_prefill_latency = 4 * linetime;
+
+ if (num_scaler_users > 1) {
+ u64 hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].hscale, 1000) >> 16);
+ u64 vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].vscale, 1000) >> 16);
+ int chroma_downscaling_factor =
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1;
+ int latency;
+
+ latency = DIV_ROUND_UP_ULL((4 * linetime * hscale_k * vscale_k *
+ chroma_downscaling_factor), 1000000);
+ scaler_prefill_latency += latency;
+ }
+
+ scaler_prefill_latency *= cdclk_prefill_adjustment(crtc_state);
+
+ return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, scaler_prefill_latency);
+}
+
static bool
skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
int wm0_lines, int latency)
@@ -2299,9 +2381,10 @@ skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- /* FIXME missing scaler and DSC pre-fill time */
return crtc_state->framestart_delay +
intel_usecs_to_scanlines(adjusted_mode, latency) +
+ scaler_prefill_latency(crtc_state) +
+ dsc_prefill_latency(crtc_state) +
wm0_lines >
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
}
@@ -3074,6 +3157,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
dbuf_state->joined_mbus = intel_de_read(display, MBUS_CTL) & MBUS_JOIN;
dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(display, &display->cdclk.hw);
+ dbuf_state->active_pipes = 0;
for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
@@ -3085,8 +3169,10 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
memset(&crtc_state->wm.skl.optimal, 0,
sizeof(crtc_state->wm.skl.optimal));
- if (crtc_state->hw.active)
+ if (crtc_state->hw.active) {
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
+ dbuf_state->active_pipes |= BIT(pipe);
+ }
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
@@ -3204,7 +3290,7 @@ adjust_wm_latency(struct drm_i915_private *i915,
* WaWmMemoryReadLatency
*
* punit doesn't take into account the read latency so we need
- * to add proper adjustement to each valid level we retrieve
+ * to add proper adjustment to each valid level we retrieve
* from the punit when level 0 response data is 0us.
*/
if (wm[0] == 0) {
@@ -3618,7 +3704,7 @@ void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state)
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *new_dbuf_state =
intel_atomic_get_new_dbuf_state(state);
const struct intel_dbuf_state *old_dbuf_state =
@@ -3636,12 +3722,12 @@ void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_dbuf_state->base.changed);
- gen9_dbuf_slices_update(i915, new_slices);
+ gen9_dbuf_slices_update(display, new_slices);
}
void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *new_dbuf_state =
intel_atomic_get_new_dbuf_state(state);
const struct intel_dbuf_state *old_dbuf_state =
@@ -3659,7 +3745,7 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_dbuf_state->base.changed);
- gen9_dbuf_slices_update(i915, new_slices);
+ gen9_dbuf_slices_update(display, new_slices);
}
static void skl_mbus_sanitize(struct drm_i915_private *i915)
@@ -3754,14 +3840,56 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915)
}
}
-static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+static void skl_wm_sanitize(struct drm_i915_private *i915)
{
- skl_wm_get_hw_state(i915);
-
skl_mbus_sanitize(i915);
skl_dbuf_sanitize(i915);
}
+void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(display->dbuf.obj.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ dbuf_state->active_pipes &= ~BIT(pipe);
+
+ dbuf_state->weight[pipe] = 0;
+ dbuf_state->slices[pipe] = 0;
+
+ memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+}
+
+void skl_wm_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[plane->id], 0, 0);
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[plane->id], 0, 0);
+
+ crtc_state->wm.skl.plane_min_ddb[plane->id] = 0;
+ crtc_state->wm.skl.plane_interim_ddb[plane->id] = 0;
+
+ memset(&crtc_state->wm.skl.raw.planes[plane->id], 0,
+ sizeof(crtc_state->wm.skl.raw.planes[plane->id]));
+ memset(&crtc_state->wm.skl.optimal.planes[plane->id], 0,
+ sizeof(crtc_state->wm.skl.optimal.planes[plane->id]));
+}
+
void intel_wm_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -3792,7 +3920,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y, hw->min_ddb, hw->interim_ddb);
- hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(display);
if (DISPLAY_VER(i915) >= 11 &&
hw_enabled_slices != i915->display.dbuf.enabled_slices)
@@ -3889,7 +4017,8 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
static const struct intel_wm_funcs skl_wm_funcs = {
.compute_global_watermarks = skl_compute_wm,
- .get_hw_state = skl_wm_get_hw_state_and_sanitize,
+ .get_hw_state = skl_wm_get_hw_state,
+ .sanitize = skl_wm_sanitize,
};
void skl_wm_init(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index 8659f89427f2..d9cff6c54310 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -17,12 +17,13 @@ struct intel_atomic_state;
struct intel_bw_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_plane;
struct intel_plane_state;
struct skl_pipe_wm;
struct skl_wm_level;
-u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915);
+u8 intel_enabled_dbuf_slices_mask(struct intel_display *display);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
@@ -40,6 +41,10 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
void intel_wm_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc);
+void skl_wm_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane);
+
void skl_watermark_ipc_init(struct drm_i915_private *i915);
void skl_watermark_ipc_update(struct drm_i915_private *i915);
bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index d49e9b3c7627..af717df83197 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -59,7 +59,7 @@ static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
8 * 100), lane_count);
}
-/* return pixels equvalent to txbyteclkhs */
+/* return pixels equivalent to txbyteclkhs */
static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count,
u16 burst_mode_ratio)
{
@@ -283,7 +283,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = intel_panel_fitting(pipe_config, conn_state);
+ ret = intel_pfit_compute_config(pipe_config, conn_state);
if (ret)
return ret;
@@ -739,7 +739,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_wait_panel_power_cycle(intel_dsi);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
/*
* The BIOS may leave the PLL in a wonky state where it doesn't
@@ -947,7 +947,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
drm_dbg_kms(display->drm, "\n");
- wakeref = intel_display_power_get_if_enabled(dev_priv,
+ wakeref = intel_display_power_get_if_enabled(display,
encoder->power_domain);
if (!wakeref)
return false;
@@ -1007,7 +1007,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
}
out_put_power:
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(display, encoder->power_domain, wakeref);
return active;
}
@@ -1541,14 +1541,14 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
};
static enum drm_mode_status vlv_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
enum drm_mode_status status;
- status = intel_cpu_transcoder_mode_valid(i915, mode);
+ status = intel_cpu_transcoder_mode_valid(display, mode);
if (status != MODE_OK)
return status;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 59a50647f2c3..2ed47e7d1051 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -459,7 +459,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
/*
* rx divider value needs to be updated in the
- * two differnt bit fields in the register hence splitting the
+ * two different bit fields in the register hence splitting the
* rx divider value accordingly
*/
rx_div_lower = rx_div & RX_DIVIDER_BIT_1_2;
@@ -590,9 +590,9 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
intel_de_write(display, MIPI_EOT_DISABLE(display, port), CLOCKSTOP);
}
-static void assert_dsi_pll(struct drm_i915_private *i915, bool state)
+static void assert_dsi_pll(struct intel_display *display, bool state)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
bool cur_state;
vlv_cck_get(i915);
@@ -604,12 +604,12 @@ static void assert_dsi_pll(struct drm_i915_private *i915, bool state)
str_on_off(state), str_on_off(cur_state));
}
-void assert_dsi_pll_enabled(struct drm_i915_private *i915)
+void assert_dsi_pll_enabled(struct intel_display *display)
{
- assert_dsi_pll(i915, true);
+ assert_dsi_pll(display, true);
}
-void assert_dsi_pll_disabled(struct drm_i915_private *i915)
+void assert_dsi_pll_disabled(struct intel_display *display)
{
- assert_dsi_pll(i915, false);
+ assert_dsi_pll(display, false);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
index fbe5113dbeb9..f975660fa609 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
@@ -11,6 +11,7 @@
enum port;
struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_encoder;
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
@@ -22,7 +23,6 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void bxt_dsi_pll_enable(struct intel_encoder *encoder,
@@ -33,13 +33,19 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
#ifdef I915
-void assert_dsi_pll_enabled(struct drm_i915_private *i915);
-void assert_dsi_pll_disabled(struct drm_i915_private *i915);
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+void assert_dsi_pll_enabled(struct intel_display *display);
+void assert_dsi_pll_disabled(struct intel_display *display);
#else
-static inline void assert_dsi_pll_enabled(struct drm_i915_private *i915)
+static inline bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
{
+ return false;
}
-static inline void assert_dsi_pll_disabled(struct drm_i915_private *i915)
+static inline void assert_dsi_pll_enabled(struct intel_display *display)
+{
+}
+
+static inline void assert_dsi_pll_disabled(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index c0543c35cd6a..ab1af978911b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -238,7 +238,7 @@ static int proto_context_set_persistence(struct drm_i915_private *i915,
*
* However, if we cannot reset an engine by itself, we cannot
* cleanup a hanging persistent context without causing
- * colateral damage, and we should not pretend we can by
+ * collateral damage, and we should not pretend we can by
* exposing the interface.
*/
if (!intel_has_reset_engine(to_gt(i915)))
@@ -1589,7 +1589,7 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
*
* However, if we cannot reset an engine by itself, we cannot
* cleanup a hanging persistent context without causing
- * colateral damage, and we should not pretend we can by
+ * collateral damage, and we should not pretend we can by
* exposing the interface.
*/
if (!intel_has_reset_engine(to_gt(ctx->i915)))
@@ -2328,7 +2328,7 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv,
/*
* One for the xarray and one for the caller. We need to grab
- * the reference *prior* to making the ctx visble to userspace
+ * the reference *prior* to making the ctx visible to userspace
* in gem_context_register(), as at any point after that
* userspace can try to race us with another thread destroying
* the context under our feet.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index b6d97da63d1f..67ac2586a0f3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -245,9 +245,9 @@ struct i915_gem_context {
* Execbuf uses the I915_EXEC_RING_MASK as an index into this
* array to select which HW context + engine to execute on. For
* the default array, the user_ring_map[] is used to translate
- * the legacy uABI onto the approprate index (e.g. both
+ * the legacy uABI onto the appropriate index (e.g. both
* I915_EXEC_DEFAULT and I915_EXEC_RENDER select the same
- * context, and I915_EXEC_BSD is weird). For a use defined
+ * context, and I915_EXEC_BSD is weird). For a user defined
* array, execbuf uses I915_EXEC_RING_MASK as a plain index.
*
* User defined by I915_CONTEXT_PARAM_ENGINE (when the
@@ -276,7 +276,7 @@ struct i915_gem_context {
* @vm: unique address space (GTT)
*
* In full-ppgtt mode, each context has its own address space ensuring
- * complete seperation of one client from all others.
+ * complete separation of one client from all others.
*
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 19156ba4b9ef..c3e6a325872d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -193,7 +193,7 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * cpp, 64);
/* align stride to page size so that we can remap */
- if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
+ if (args->pitch > intel_plane_fb_max_stride(dev, format,
DRM_FORMAT_MOD_LINEAR))
args->pitch = ALIGN(args->pitch, 4096);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 3770828f2eaf..75a143d996e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -18,8 +18,6 @@
#include "i915_gem_object_frontbuffer.h"
#include "i915_vma.h"
-#define VTD_GUARD (168u * I915_GTT_PAGE_SIZE) /* 168 or tile-row PTE padding */
-
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@ -276,7 +274,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* For objects created by userspace through GEM_CREATE with pat_index
* set by set_pat extension, simply return 0 here without touching
* the cache setting, because such objects should have an immutable
- * cache setting by desgin and always managed by userspace.
+ * cache setting by design and always managed by userspace.
*/
if (i915_gem_object_has_cache_level(obj, cache_level))
return 0;
@@ -424,7 +422,7 @@ out:
struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
- u32 alignment,
+ u32 alignment, unsigned int guard,
const struct i915_gtt_view *view,
unsigned int flags)
{
@@ -453,15 +451,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
return ERR_PTR(ret);
/* VT-d may overfetch before/after the vma, so pad with scratch */
- if (intel_scanout_needs_vtd_wa(i915)) {
- unsigned int guard = VTD_GUARD;
-
- if (i915_gem_object_is_tiled(obj))
- guard = max(guard,
- i915_gem_object_get_tile_row_size(obj));
-
- flags |= PIN_OFFSET_GUARD | guard;
- }
+ if (guard)
+ flags |= PIN_OFFSET_GUARD | (guard * I915_GTT_PAGE_SIZE);
/*
* As the user may map the buffer once pinned in the display plane
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index f151640c1d13..7796c4119ef5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -303,7 +303,7 @@ struct i915_execbuffer {
struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */
/**
- * Indicate either the size of the hastable used to resolve
+ * Indicate either the size of the hashtable used to resolve
* relocation handles, or if negative that we are using a direct
* index into the execobj[].
*/
@@ -915,7 +915,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
*/
if (i915_gem_context_uses_protected_content(eb->gem_context) &&
i915_gem_object_is_protected(obj)) {
- err = intel_pxp_key_check(eb->i915->pxp, intel_bo_to_drm_bo(obj), true);
+ err = intel_pxp_key_check(intel_bo_to_drm_bo(obj), true);
if (err) {
i915_gem_object_put(obj);
return ERR_PTR(err);
@@ -2543,7 +2543,7 @@ static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
/*
* Error path, cannot use intel_context_timeline_lock as
- * that is user interruptable and this clean up step
+ * that is user interruptible and this clean up step
* must be done.
*/
mutex_lock(&ce->timeline->mutex);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 21274aa9bddd..c3dabb857960 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -164,6 +164,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
* 4 - Support multiple fault handlers per object depending on object's
* backing storage (a.k.a. MMAP_OFFSET).
*
+ * 5 - Support multiple partial mmaps(mmap part of BO + unmap a offset, multiple
+ * times with different size and offset).
+ *
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
@@ -191,7 +194,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
*/
int i915_gem_mmap_gtt_version(void)
{
- return 4;
+ return 5;
}
static inline struct i915_gtt_view
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 58e6c680fe0d..356530b599ce 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -873,6 +873,30 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
return lmem_placement;
}
+static int i915_gem_vmap_object(struct drm_gem_object *gem_obj,
+ struct iosys_map *map)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ void *vaddr;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ iosys_map_set_vaddr(map, vaddr);
+
+ return 0;
+}
+
+static void i915_gem_vunmap_object(struct drm_gem_object *gem_obj,
+ struct iosys_map *map)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+}
+
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
@@ -896,6 +920,8 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = {
.free = i915_gem_free_object,
.close = i915_gem_close_object,
.export = i915_gem_prime_export,
+ .vmap = i915_gem_vmap_object,
+ .vunmap = i915_gem_vunmap_object,
};
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index bb713e096db2..a5f34542135c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -776,7 +776,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
- u32 alignment,
+ u32 alignment, unsigned int guard,
const struct i915_gtt_view *view,
unsigned int flags);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index b09b74a2448b..636768d0f57e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -82,7 +82,7 @@ __i915_gem_object_create_region(struct intel_memory_region *mem,
/*
* Anything smaller than the min_page_size can't be freely inserted into
- * the GTT, due to alignemnt restrictions. For such special objects,
+ * the GTT, due to alignment restrictions. For such special objects,
* make sure we force memcpy based suspend-resume. In the future we can
* revisit this, either by allowing special mis-aligned objects in the
* migration path, or by mapping all of LMEM upfront using cheap 1G
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 9117e9422844..aec41f0f098f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -25,7 +25,7 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
- /* Consider only shrinkable ojects. */
+ /* Consider only shrinkable objects. */
if (!i915_gem_object_is_shrinkable(obj))
return false;
@@ -261,7 +261,7 @@ skip:
* i915_gem_shrink_all - Shrink buffer object caches completely
* @i915: i915 device
*
- * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
+ * This is a simple wrapper around i915_gem_shrink() to aggressively shrink all
* caches completely. It also first waits for and retires all outstanding
* requests to also be able to release backing storage for active objects.
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index d9eb84c1d2f1..5ac23ff3feff 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -39,7 +39,7 @@
* Since neither of this applies for new tiling layouts on modern platforms like
* W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
* Anything else can be handled in userspace entirely without the kernel's
- * invovlement.
+ * involvement.
*/
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 10d8673641f7..1f4814968868 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -994,7 +994,7 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
* If we need to place an LMEM resource which doesn't need CPU
* access then we should try not to victimize mappable objects
* first, since we likely end up stealing more of the mappable
- * portion. And likewise when we try to find space for a mappble
+ * portion. And likewise when we try to find space for a mappable
* object, we know not to ever victimize objects that don't
* occupy any mappable pages.
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 041dab543b78..2f6b33edb9c9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -603,7 +603,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
* sequence, where at the end we can do the move for real.
*
* The special case here is when the dst_mem is TTM_PL_SYSTEM,
- * which doens't require any kind of move, so it should be safe
+ * which doesn't require any kind of move, so it should be safe
* to skip all the below and call ttm_bo_move_null() here, where
* the caller in __i915_ttm_get_pages() will take care of the
* rest, since we should have a valid ttm_tt.
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 84d41e6ccf05..bd08605a1611 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1781,7 +1781,7 @@ static int igt_tmpfs_fallback(void *arg)
/*
* Make sure that we don't burst into a ball of flames upon falling back
- * to tmpfs, which we rely on if on the off-chance we encouter a failure
+ * to tmpfs, which we rely on if on the off-chance we encounter a failure
* when setting up gemfs.
*/
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 99a9ade73956..804f74084bd4 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1342,7 +1342,7 @@ static int igt_mmap_migrate(void *arg)
}
/*
- * Allocate in the mappable portion, should be no suprises here.
+ * Allocate in the mappable portion, should be no surprises here.
*/
err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0);
if (err)
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 2b0327cc47c2..fd8babb513e5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -108,44 +108,6 @@ err_ctx:
}
struct i915_gem_context *
-live_context_for_engine(struct intel_engine_cs *engine, struct file *file)
-{
- struct i915_gem_engines *engines;
- struct i915_gem_context *ctx;
- struct intel_sseu null_sseu = {};
- struct intel_context *ce;
-
- engines = alloc_engines(1);
- if (!engines)
- return ERR_PTR(-ENOMEM);
-
- ctx = live_context(engine->i915, file);
- if (IS_ERR(ctx)) {
- __free_engines(engines, 0);
- return ctx;
- }
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- __free_engines(engines, 0);
- return ERR_CAST(ce);
- }
-
- intel_context_set_gem(ce, ctx, null_sseu);
- engines->engines[0] = ce;
- engines->num_engines = 1;
-
- mutex_lock(&ctx->engines_mutex);
- i915_gem_context_set_user_engines(ctx);
- engines = rcu_replace_pointer(ctx->engines, engines, 1);
- mutex_unlock(&ctx->engines_mutex);
-
- engines_idle_release(ctx, engines);
-
- return ctx;
-}
-
-struct i915_gem_context *
kernel_context(struct drm_i915_private *i915,
struct i915_address_space *vm)
{
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.h b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
index 7a02fd9b5866..bc8fb37d2d24 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
@@ -23,9 +23,6 @@ void mock_context_close(struct i915_gem_context *ctx);
struct i915_gem_context *
live_context(struct drm_i915_private *i915, struct file *file);
-struct i915_gem_context *
-live_context_for_engine(struct intel_engine_cs *engine, struct file *file);
-
struct i915_gem_context *kernel_context(struct drm_i915_private *i915,
struct i915_address_space *vm);
void kernel_context_close(struct i915_gem_context *ctx);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index 075657018739..5cd58e0f0dcf 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -103,8 +103,7 @@ static struct dma_buf *mock_dmabuf(int npages)
struct dma_buf *dmabuf;
int i;
- mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
- GFP_KERNEL);
+ mock = kmalloc(struct_size(mock, pages, npages), GFP_KERNEL);
if (!mock)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
index 4904d0f4162c..8116fd5987e2 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -179,7 +179,7 @@ u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
return __gen2_emit_breadcrumb(rq, cs, 8, 8);
}
-/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+/* Just userspace ABI convention to limit the wa batch bo to a reasonable size */
#define I830_BATCH_LIMIT SZ_256K
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES * SZ_4K, I830_BATCH_LIMIT)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 4d30a86016f2..b721bbd23356 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -308,7 +308,7 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
/*
* There is a discrepancy here between the size reported
* by the register and the size of the context layout
- * in the docs. Both are described as authorative!
+ * in the docs. Both are described as authoritative!
*
* The discrepancy is on the order of a few cachelines,
* but the total is under one page (4k), which is our
@@ -677,7 +677,7 @@ void intel_engines_release(struct intel_gt *gt)
* in case we aborted before completely initialising the engines.
*/
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
- if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ if (!intel_gt_gpu_reset_clobbers_display(gt))
intel_gt_reset_all_engines(gt);
/* Decouple the backend; but keep the layout for late GPU resets */
@@ -769,9 +769,8 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt)
if (MEDIA_VER_FULL(i915) < IP_VER(12, 55))
media_fuse = ~media_fuse;
- vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
+ vdbox_mask = REG_FIELD_GET(GEN11_GT_VDBOX_DISABLE_MASK, media_fuse);
+ vebox_mask = REG_FIELD_GET(GEN11_GT_VEBOX_DISABLE_MASK, media_fuse);
if (MEDIA_VER_FULL(i915) >= IP_VER(12, 55)) {
fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
@@ -845,7 +844,7 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
* Note that we have a catch-22 situation where we need to be able to access
* the blitter forcewake domain to read the engine fuses, but at the same time
* we need to know which engines are available on the system to know which
- * forcewake domains are present. We solve this by intializing the forcewake
+ * forcewake domains are present. We solve this by initializing the forcewake
* domains based on the full engine mask in the platform capabilities before
* calling this function and pruning the domains for fused-off engines
* afterwards.
@@ -1411,7 +1410,7 @@ create_ggtt_bind_context(struct intel_engine_cs *engine)
/*
* MI_UPDATE_GTT can insert up to 511 PTE entries and there could be multiple
- * bind requets at a time so get a bigger ring.
+ * bind requests at a time so get a bigger ring.
*/
return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K,
I915_GEM_HWS_GGTT_BIND_ADDR,
@@ -1533,7 +1532,7 @@ int intel_engines_init(struct intel_gt *gt)
/**
* intel_engine_cleanup_common - cleans up the engine state created by
- * the common initiailizers.
+ * the common initializers.
* @engine: Engine to cleanup.
*
* This cleans up everything created by the common helpers.
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index fe1f85e5dda3..155b6255a63e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -237,7 +237,7 @@ struct intel_engine_execlists {
*/
struct i915_request * const *active;
/**
- * @inflight: the set of contexts submitted and acknowleged by HW
+ * @inflight: the set of contexts submitted and acknowledged by HW
*
* The set of inflight contexts is managed by reading CS events
* from the HW. On a context-switch event (not preemption), we
@@ -260,7 +260,7 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
- * @virtual: Queue of requets on a virtual engine, sorted by priority.
+ * @virtual: Queue of requests on a virtual engine, sorted by priority.
* Each RB entry is a struct i915_priolist containing a list of requests
* of the same priority.
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 0ffba50981e3..0c723e7c71a2 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -328,6 +328,7 @@ static bool fence_is_active(const struct i915_fence_reg *fence)
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
+ struct intel_display *display = &ggtt->vm.i915->display;
struct i915_fence_reg *active = NULL;
struct i915_fence_reg *fence, *fn;
@@ -353,7 +354,7 @@ static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
}
/* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(ggtt->vm.i915))
+ if (intel_has_pending_fb_unpin(display))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-ENOBUFS);
@@ -749,7 +750,7 @@ static void swizzle_page(struct page *page)
char *vaddr;
int i;
- vaddr = kmap(page);
+ vaddr = kmap_local_page(page);
for (i = 0; i < PAGE_SIZE; i += 128) {
memcpy(temp, &vaddr[i], 64);
@@ -757,7 +758,7 @@ static void swizzle_page(struct page *page)
memcpy(&vaddr[i + 64], temp, 64);
}
- kunmap(page);
+ kunmap_local(vaddr);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index c4a351ebf395..3d3b1ba76e2b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -302,25 +302,48 @@ static void gen6_check_faults(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- unsigned long fault;
for_each_engine(engine, gt, id) {
+ u32 fault;
+
fault = GEN6_RING_FAULT_REG_READ(engine);
+
if (fault & RING_FAULT_VALID) {
gt_dbg(gt, "Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
+ "\tAddr: 0x%08x\n"
"\tAddress space: %s\n"
- "\tSource ID: %ld\n"
- "\tType: %ld\n",
- fault & PAGE_MASK,
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & RING_FAULT_VADDR_MASK,
fault & RING_FAULT_GTTSEL_MASK ?
"GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ REG_FIELD_GET(RING_FAULT_SRCID_MASK, fault),
+ REG_FIELD_GET(RING_FAULT_FAULT_TYPE_MASK, fault));
}
}
}
+static void gen8_report_fault(struct intel_gt *gt, u32 fault,
+ u32 fault_data0, u32 fault_data1)
+{
+ u64 fault_addr;
+
+ fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
+ ((u64)fault_data0 << 12);
+
+ gt_dbg(gt, "Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr), lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ REG_FIELD_GET(RING_FAULT_ENGINE_ID_MASK, fault),
+ REG_FIELD_GET(RING_FAULT_SRCID_MASK, fault),
+ REG_FIELD_GET(RING_FAULT_FAULT_TYPE_MASK, fault));
+}
+
static void xehp_check_faults(struct intel_gt *gt)
{
u32 fault;
@@ -333,28 +356,10 @@ static void xehp_check_faults(struct intel_gt *gt)
* toward the primary instance.
*/
fault = intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
- if (fault & RING_FAULT_VALID) {
- u32 fault_data0, fault_data1;
- u64 fault_addr;
-
- fault_data0 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0);
- fault_data1 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1);
-
- fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
- ((u64)fault_data0 << 12);
-
- gt_dbg(gt, "Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr), lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
- }
+ if (fault & RING_FAULT_VALID)
+ gen8_report_fault(gt, fault,
+ intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0),
+ intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1));
}
static void gen8_check_faults(struct intel_gt *gt)
@@ -374,28 +379,10 @@ static void gen8_check_faults(struct intel_gt *gt)
}
fault = intel_uncore_read(uncore, fault_reg);
- if (fault & RING_FAULT_VALID) {
- u32 fault_data0, fault_data1;
- u64 fault_addr;
-
- fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
- fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
-
- fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
- ((u64)fault_data0 << 12);
-
- gt_dbg(gt, "Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr), lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
- }
+ if (fault & RING_FAULT_VALID)
+ gen8_report_fault(gt, fault,
+ intel_uncore_read(uncore, fault_data0_reg),
+ intel_uncore_read(uncore, fault_data1_reg));
}
void intel_gt_check_and_clear_faults(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 6e63505fe478..6c499692d61e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -35,9 +35,7 @@ static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
u32 f24_mhz = 24000000;
u32 f25_mhz = 25000000;
u32 f38_4_mhz = 38400000;
- u32 crystal_clock =
- (rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
- GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+ u32 crystal_clock = rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK;
switch (crystal_clock) {
case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
@@ -80,8 +78,7 @@ static u32 gen11_read_clock_frequency(struct intel_uncore *uncore)
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
- freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+ freq >>= 3 - REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, c0);
}
return freq;
@@ -102,8 +99,7 @@ static u32 gen9_read_clock_frequency(struct intel_uncore *uncore)
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
- freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
- CTC_SHIFT_PARAMETER_SHIFT);
+ freq >>= 3 - REG_FIELD_GET(CTC_SHIFT_PARAMETER_MASK, ctc_reg);
}
return freq;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 1240d44eeb85..75e802e10be2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -480,7 +480,7 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt)
gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(1), ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
- * is enabled/disabled. Same wil be the case for GuC interrupts.
+ * is enabled/disabled. Same will be the case for GuC interrupts.
*/
gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(2), gt->pm_imr, gt->pm_ier);
gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(3), ~gt_interrupts[3], gt_interrupts[3]);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index b8912bd6c08e..a60822e2b5d4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -121,9 +121,8 @@ void intel_gt_mcr_init(struct intel_gt *gt)
gt->info.mslice_mask =
intel_slicemask_from_xehp_dssmask(gt->info.sseu.subslice_mask,
GEN_DSS_PER_MSLICE);
- gt->info.mslice_mask |=
- (intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
- GEN12_MEML3_EN_MASK);
+ gt->info.mslice_mask |= REG_FIELD_GET(GEN12_MEML3_EN_MASK,
+ intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3));
if (!gt->info.mslice_mask) /* should be impossible! */
gt_warn(gt, "mslice mask all zero!\n");
@@ -239,7 +238,7 @@ static u32 rw_with_mcr_steering_fw(struct intel_gt *gt,
* to remain in multicast mode for reads. There's no real
* downside to this, so we'll just go ahead and do so on all
* platforms; we'll only clear the multicast bit from the mask
- * when exlicitly doing a write operation.
+ * when explicitly doing a write operation.
*/
if (rw_flag == FW_REG_WRITE)
mcr_mask |= GEN11_MCR_MULTICAST;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index c08fdb65cc69..3182f19b9837 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -70,6 +70,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
struct drm_i915_private *i915 = gt->i915;
+ struct intel_display *display = &i915->display;
GT_TRACE(gt, "\n");
@@ -84,7 +85,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
* Work around it by grabbing a GT IRQ power domain whilst there is any
* GT activity, preventing any DC state transitions.
*/
- gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+ gt->awake = intel_display_power_get(display, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!gt->awake);
intel_rc6_unpark(&gt->rc6);
@@ -103,6 +104,7 @@ static int __gt_park(struct intel_wakeref *wf)
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
struct drm_i915_private *i915 = gt->i915;
+ struct intel_display *display = &i915->display;
GT_TRACE(gt, "\n");
@@ -120,7 +122,7 @@ static int __gt_park(struct intel_wakeref *wf)
/* Defer dropping the display power well for 100ms, it's slow! */
GEM_BUG_ON(!wakeref);
- intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+ intel_display_power_put_async(display, POWER_DOMAIN_GT_IRQ, wakeref);
return 0;
}
@@ -156,7 +158,7 @@ void intel_gt_pm_init(struct intel_gt *gt)
static bool reset_engines(struct intel_gt *gt)
{
- if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ if (intel_gt_gpu_reset_clobbers_display(gt))
return false;
return intel_gt_reset_all_engines(gt) == 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 6dba65e54cdb..7421ed18d8d1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -30,18 +30,15 @@
/* RPM unit config (Gen8+) */
#define RPM_CONFIG0 _MMIO(0xd00)
-#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
-#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
-#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0
-#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (0x7 << GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 0
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 1
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ 2
-#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ 3
-#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1
-#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT)
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK REG_GENMASK(5, 3)
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ REG_FIELD_PREP(GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 0)
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ REG_FIELD_PREP(GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 1)
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ REG_FIELD_PREP(GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 2)
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ REG_FIELD_PREP(GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 3)
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK REG_BIT(3)
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ REG_FIELD_PREP(GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 0)
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ REG_FIELD_PREP(GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 1)
+#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK REG_GENMASK(2, 1)
#define RPM_CONFIG1 _MMIO(0xd04)
#define GEN10_GT_NOA_ENABLE (1 << 9)
@@ -326,6 +323,12 @@
_RING_FAULT_REG_VCS, \
_RING_FAULT_REG_VECS, \
_RING_FAULT_REG_BCS))
+#define RING_FAULT_VADDR_MASK REG_GENMASK(31, 12) /* pre-bdw */
+#define RING_FAULT_ENGINE_ID_MASK REG_GENMASK(16, 12) /* bdw+ */
+#define RING_FAULT_GTTSEL_MASK REG_BIT(11) /* pre-bdw */
+#define RING_FAULT_SRCID_MASK REG_GENMASK(10, 3)
+#define RING_FAULT_FAULT_TYPE_MASK REG_GENMASK(2, 1) /* ivb+ */
+#define RING_FAULT_VALID REG_BIT(0)
#define ERROR_GEN6 _MMIO(0x40a0)
@@ -385,6 +388,8 @@
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
+#define FAULT_GTT_SEL REG_BIT(4)
+#define FAULT_VA_HIGH_BITS REG_GENMASK(3, 0)
#define GEN11_GACB_PERF_CTRL _MMIO(0x4b80)
#define GEN11_HASH_CTRL_MASK (0x3 << 12 | 0xf << 0)
@@ -409,6 +414,9 @@
#define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8)
#define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4)
+#define GEN8_WM_CHICKEN2 MCR_REG(0x5584)
+#define WAIT_ON_DEPTH_STALL_DONE_DISABLE REG_BIT(5)
+
#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
@@ -504,11 +512,12 @@
#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4)
+#define GEN9_PGCTL_SS_ACK(subslice) REG_BIT(2 + (subslice) * 2)
+#define GEN9_PGCTL_SLICE_ACK REG_BIT(0)
+
#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
((slice) % 3) * 0x4)
-#define GEN9_PGCTL_SLICE_ACK (1 << 0)
-#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice) * 2))
-#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
+#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? REG_GENMASK(6, 0) : REG_GENMASK(4, 0))
#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8)
#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
@@ -516,14 +525,14 @@
#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8)
#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
((slice) % 3) * 0x8)
-#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
-#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
-#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
-#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
-#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
-#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
-#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
-#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
+#define GEN9_PGCTL_SSB_EU311_ACK REG_BIT(14)
+#define GEN9_PGCTL_SSB_EU210_ACK REG_BIT(12)
+#define GEN9_PGCTL_SSB_EU19_ACK REG_BIT(10)
+#define GEN9_PGCTL_SSB_EU08_ACK REG_BIT(8)
+#define GEN9_PGCTL_SSA_EU311_ACK REG_BIT(6)
+#define GEN9_PGCTL_SSA_EU210_ACK REG_BIT(4)
+#define GEN9_PGCTL_SSA_EU19_ACK REG_BIT(2)
+#define GEN9_PGCTL_SSA_EU08_ACK REG_BIT(0)
#define VF_PREEMPTION _MMIO(0x83a4)
#define PREEMPTION_VERTEX_COUNT REG_GENMASK(15, 0)
@@ -580,7 +589,7 @@
#define GEN10_L3BANK_MASK 0x0F
/* on Xe_HP the same fuses indicates mslices instead of L3 banks */
#define GEN12_MAX_MSLICES 4
-#define GEN12_MEML3_EN_MASK 0x0F
+#define GEN12_MEML3_EN_MASK REG_GENMASK(3, 0)
#define HSW_PAVP_FUSE1 _MMIO(0x911c)
#define XEHP_SFC_ENABLE_MASK REG_GENMASK(27, 24)
@@ -590,37 +599,30 @@
#define HSW_F1_EU_DIS_6EUS 2
#define GEN8_FUSE2 _MMIO(0x9120)
-#define GEN8_F2_SS_DIS_SHIFT 21
-#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT)
-#define GEN8_F2_S_ENA_SHIFT 25
-#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
-#define GEN9_F2_SS_DIS_SHIFT 20
-#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
-#define GEN10_F2_S_ENA_SHIFT 22
-#define GEN10_F2_S_ENA_MASK (0x3f << GEN10_F2_S_ENA_SHIFT)
-#define GEN10_F2_SS_DIS_SHIFT 18
-#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
+#define GEN10_F2_S_ENA_MASK REG_GENMASK(27, 22)
+#define GEN10_F2_SS_DIS_MASK REG_GENMASK(21, 18)
+#define GEN8_F2_S_ENA_MASK REG_GENMASK(27, 25)
+#define GEN9_F2_SS_DIS_MASK REG_GENMASK(23, 20)
+#define GEN8_F2_SS_DIS_MASK REG_GENMASK(23, 21)
#define GEN8_EU_DISABLE0 _MMIO(0x9134)
#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4)
#define GEN11_EU_DISABLE _MMIO(0x9134)
-#define GEN8_EU_DIS0_S0_MASK 0xffffff
-#define GEN8_EU_DIS0_S1_SHIFT 24
-#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT)
-#define GEN11_EU_DIS_MASK 0xFF
+#define GEN8_EU_DIS0_S1_MASK REG_GENMASK(31, 24)
+#define GEN8_EU_DIS0_S0_MASK REG_GENMASK(23, 0)
+#define GEN11_EU_DIS_MASK REG_GENMASK(7, 0)
#define XEHP_EU_ENABLE _MMIO(0x9134)
-#define XEHP_EU_ENA_MASK 0xFF
+#define XEHP_EU_ENA_MASK REG_GENMASK(7, 0)
#define GEN8_EU_DISABLE1 _MMIO(0x9138)
-#define GEN8_EU_DIS1_S1_MASK 0xffff
-#define GEN8_EU_DIS1_S2_SHIFT 16
-#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT)
+#define GEN8_EU_DIS1_S2_MASK REG_GENMASK(31, 16)
+#define GEN8_EU_DIS1_S1_MASK REG_GENMASK(15, 0)
#define GEN11_GT_SLICE_ENABLE _MMIO(0x9138)
-#define GEN11_GT_S_ENA_MASK 0xFF
+#define GEN11_GT_S_ENA_MASK REG_GENMASK(7, 0)
#define GEN8_EU_DISABLE2 _MMIO(0x913c)
-#define GEN8_EU_DIS2_S2_MASK 0xff
+#define GEN8_EU_DIS2_S2_MASK REG_GENMASK(7, 0)
#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913c)
#define GEN12_GT_GEOMETRY_DSS_ENABLE _MMIO(0x913c)
@@ -628,9 +630,8 @@
#define GEN10_EU_DISABLE3 _MMIO(0x9140)
#define GEN10_EU_DIS_SS_MASK 0xff
#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
-#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
-#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
-#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
+#define GEN11_GT_VEBOX_DISABLE_MASK REG_GENMASK(19, 16)
+#define GEN11_GT_VDBOX_DISABLE_MASK REG_GENMASK(7, 0)
#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144)
#define XEHPC_GT_COMPUTE_DSS_ENABLE_EXT _MMIO(0x9148)
@@ -878,11 +879,10 @@
/* GPM unit config (Gen9+) */
#define CTC_MODE _MMIO(0xa26c)
-#define CTC_SOURCE_PARAMETER_MASK 1
-#define CTC_SOURCE_CRYSTAL_CLOCK 0
-#define CTC_SOURCE_DIVIDE_LOGIC 1
-#define CTC_SHIFT_PARAMETER_SHIFT 1
-#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT)
+#define CTC_SHIFT_PARAMETER_MASK REG_GENMASK(2, 1)
+#define CTC_SOURCE_PARAMETER_MASK REG_BIT(0)
+#define CTC_SOURCE_CRYSTAL_CLOCK REG_FIELD_PREP(CTC_SOURCE_PARAMETER_MASK, 0)
+#define CTC_SOURCE_DIVIDE_LOGIC REG_FIELD_PREP(CTC_SOURCE_PARAMETER_MASK, 1)
/* GPM MSG_IDLE */
#define MSG_IDLE_CS _MMIO(0x8000)
@@ -926,12 +926,12 @@
#define CHV_POWER_SS0_SIG1 _MMIO(0xa720)
#define CHV_POWER_SS0_SIG2 _MMIO(0xa724)
#define CHV_POWER_SS1_SIG1 _MMIO(0xa728)
-#define CHV_SS_PG_ENABLE (1 << 1)
-#define CHV_EU08_PG_ENABLE (1 << 9)
-#define CHV_EU19_PG_ENABLE (1 << 17)
-#define CHV_EU210_PG_ENABLE (1 << 25)
+#define CHV_EU210_PG_ENABLE REG_BIT(25)
+#define CHV_EU19_PG_ENABLE REG_BIT(17)
+#define CHV_EU08_PG_ENABLE REG_BIT(9)
+#define CHV_SS_PG_ENABLE REG_BIT(1)
#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c)
-#define CHV_EU311_PG_ENABLE (1 << 1)
+#define CHV_EU311_PG_ENABLE REG_BIT(1)
#define GEN7_SARCHKMD _MMIO(0xb000)
#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31)
@@ -1035,17 +1035,12 @@
#define XEHP_FAULT_TLB_DATA0 MCR_REG(0xceb8)
#define GEN12_FAULT_TLB_DATA1 _MMIO(0xcebc)
#define XEHP_FAULT_TLB_DATA1 MCR_REG(0xcebc)
-#define FAULT_VA_HIGH_BITS (0xf << 0)
-#define FAULT_GTT_SEL (1 << 4)
+/* see GEN8_FAULT_TLB_DATA0/1 */
#define GEN12_RING_FAULT_REG _MMIO(0xcec4)
#define XEHP_RING_FAULT_REG MCR_REG(0xcec4)
#define XELPMP_RING_FAULT_REG _MMIO(0xcec4)
-#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
-#define RING_FAULT_GTTSEL_MASK (1 << 11)
-#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
-#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
-#define RING_FAULT_VALID (1 << 0)
+/* see GEN8_RING_FAULT_REG */
#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8)
#define XEHP_GFX_TLB_INV_CR MCR_REG(0xced8)
@@ -1434,16 +1429,12 @@
#define XEHP_CCS_MODE_CSLICE(cslice, ccs) (ccs << (cslice * XEHP_CCS_MODE_CSLICE_WIDTH))
#define CHV_FUSE_GT _MMIO(VLV_GUNIT_BASE + 0x2168)
-#define CHV_FGT_DISABLE_SS0 (1 << 10)
-#define CHV_FGT_DISABLE_SS1 (1 << 11)
-#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
-#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
-#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
-#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT)
-#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24
-#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT)
-#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
-#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
+#define CHV_FGT_EU_DIS_SS1_R1_MASK REG_GENMASK(31, 28)
+#define CHV_FGT_EU_DIS_SS1_R0_MASK REG_GENMASK(27, 24)
+#define CHV_FGT_EU_DIS_SS0_R1_MASK REG_GENMASK(23, 20)
+#define CHV_FGT_EU_DIS_SS0_R0_MASK REG_GENMASK(19, 16)
+#define CHV_FGT_DISABLE_SS1 REG_BIT(11)
+#define CHV_FGT_DISABLE_SS0 REG_BIT(10)
#define BCS_SWCTRL _MMIO(0x22200)
#define BCS_SRC_Y REG_BIT(0)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index d7784650e4d9..1154cd2b7c34 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -464,6 +464,45 @@ static ssize_t slpc_ignore_eff_freq_store(struct kobject *kobj,
return err ?: count;
}
+static ssize_t slpc_power_profile_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+
+ switch (slpc->power_profile) {
+ case SLPC_POWER_PROFILES_BASE:
+ return sysfs_emit(buff, "[%s] %s\n", "base", "power_saving");
+ case SLPC_POWER_PROFILES_POWER_SAVING:
+ return sysfs_emit(buff, "%s [%s]\n", "base", "power_saving");
+ }
+
+ return sysfs_emit(buff, "%u\n", slpc->power_profile);
+}
+
+static ssize_t slpc_power_profile_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ char power_saving[] = "power_saving";
+ char base[] = "base";
+ int err;
+ u32 val;
+
+ if (!strncmp(buff, power_saving, sizeof(power_saving) - 1))
+ val = SLPC_POWER_PROFILES_POWER_SAVING;
+ else if (!strncmp(buff, base, sizeof(base) - 1))
+ val = SLPC_POWER_PROFILES_BASE;
+ else
+ return -EINVAL;
+
+ err = intel_guc_slpc_set_power_profile(slpc, val);
+ return err ?: count;
+}
+
struct intel_gt_bool_throttle_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
@@ -668,6 +707,7 @@ INTEL_GT_ATTR_RO(media_RP0_freq_mhz);
INTEL_GT_ATTR_RO(media_RPn_freq_mhz);
INTEL_GT_ATTR_RW(slpc_ignore_eff_freq);
+INTEL_GT_ATTR_RW(slpc_power_profile);
static const struct attribute *media_perf_power_attrs[] = {
&attr_media_freq_factor.attr,
@@ -864,6 +904,13 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
gt_warn(gt, "failed to create ignore_eff_freq sysfs (%pe)", ERR_PTR(ret));
}
+ if (intel_uc_uses_guc_slpc(&gt->uc)) {
+ ret = sysfs_create_file(kobj, &attr_slpc_power_profile.attr);
+ if (ret)
+ gt_warn(gt, "failed to create slpc_power_profile sysfs (%pe)",
+ ERR_PTR(ret));
+ }
+
if (i915_mmio_reg_valid(intel_gt_perf_limit_reasons_reg(gt))) {
ret = sysfs_create_files(kobj, throttle_reason_attrs);
if (ret)
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 6f7af4077135..aff5aca591e6 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -304,7 +304,7 @@ struct intel_context *intel_migrate_create_context(struct intel_migrate *m)
struct intel_context *ce;
/*
- * We randomly distribute contexts across the engines upon constrction,
+ * We randomly distribute contexts across the engines upon construction,
* as they all share the same pinned vm, and so in order to allow
* multiple blits to run in parallel, we must construct each blit
* to use a different range of the vm for its GTT. This has to be
@@ -646,7 +646,7 @@ calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
* When CHUNK_SZ is passed all the pages upto CHUNK_SZ
* will be taken for the blt. in Flat-ccs supported
* platform Smem obj will have more pages than required
- * for main meory hence limit it to the required size
+ * for main memory hence limit it to the required size
* for main memory
*/
return min_t(u64, bytes_to_cpy, CHUNK_SZ);
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index d791d63d49b4..cf41d325712e 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -675,7 +675,7 @@ void intel_mocs_init(struct intel_gt *gt)
__init_mocs_table(gt->uncore, &table, global_mocs_offset());
/*
- * Initialize the L3CC table as part of mocs initalization to make
+ * Initialize the L3CC table as part of mocs initialization to make
* sure the LNCFCMOCSx registers are programmed for the subsequent
* memory transactions including guc transactions
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index aae5a081cb53..dbdcfe130ad4 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -986,7 +986,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
awake = reset_prepare(gt);
/* Even if the GPU reset fails, it should still stop the engines */
- if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ if (!intel_gt_gpu_reset_clobbers_display(gt))
intel_gt_reset_all_engines(gt);
for_each_engine(engine, gt, id)
@@ -1098,7 +1098,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
dma_fence_put(fence);
- /* Restart iteration after droping lock */
+ /* Restart iteration after dropping lock */
spin_lock(&timelines->lock);
tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
@@ -1106,14 +1106,13 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
/* We must reset pending GPU events before restoring our submission */
ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
- if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ if (!intel_gt_gpu_reset_clobbers_display(gt))
ok = intel_gt_reset_all_engines(gt) == 0;
if (!ok) {
/*
* Warn CI about the unrecoverable wedged condition.
* Time for a reboot.
*/
- gt_err(gt, "Unrecoverable wedged condition\n");
add_taint_for_CI(gt->i915, TAINT_WARN);
return false;
}
@@ -1178,6 +1177,13 @@ static int resume(struct intel_gt *gt)
return 0;
}
+bool intel_gt_gpu_reset_clobbers_display(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ return INTEL_INFO(i915)->gpu_reset_clobbers_display;
+}
+
/**
* intel_gt_reset - reset chip after a hang
* @gt: #intel_gt to reset
@@ -1234,7 +1240,7 @@ void intel_gt_reset(struct intel_gt *gt,
goto error;
}
- if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ if (intel_gt_gpu_reset_clobbers_display(gt))
intel_irq_suspend(gt->i915);
if (do_reset(gt, stalled_mask)) {
@@ -1242,7 +1248,7 @@ void intel_gt_reset(struct intel_gt *gt,
goto taint;
}
- if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ if (intel_gt_gpu_reset_clobbers_display(gt))
intel_irq_resume(gt->i915);
intel_overlay_reset(display);
@@ -1265,10 +1271,8 @@ void intel_gt_reset(struct intel_gt *gt,
}
ret = resume(gt);
- if (ret) {
- gt_err(gt, "Failed to resume (%d)\n", ret);
+ if (ret)
goto taint;
- }
finish:
reset_finish(gt, awake);
@@ -1396,6 +1400,11 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
return err;
}
+static void display_reset_modeset_stuck(void *gt)
+{
+ intel_gt_set_wedged(gt);
+}
+
static void intel_gt_reset_global(struct intel_gt *gt,
u32 engine_mask,
const char *reason)
@@ -1413,15 +1422,33 @@ static void intel_gt_reset_global(struct intel_gt *gt,
/* Use a watchdog to ensure that our reset completes */
intel_wedge_on_timeout(&w, gt, 60 * HZ) {
- intel_display_reset_prepare(gt->i915);
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_display *display = &i915->display;
+ bool need_display_reset;
+ bool reset_display;
+
+ need_display_reset = intel_gt_gpu_reset_clobbers_display(gt) &&
+ intel_has_gpu_reset(gt);
+
+ reset_display = intel_display_reset_test(display) ||
+ need_display_reset;
+
+ if (reset_display)
+ reset_display = intel_display_reset_prepare(display,
+ display_reset_modeset_stuck,
+ gt);
intel_gt_reset(gt, engine_mask, reason);
- intel_display_reset_finish(gt->i915);
+ if (reset_display)
+ intel_display_reset_finish(display, !need_display_reset);
}
if (!test_bit(I915_WEDGED, &gt->reset.flags))
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
+ else
+ drm_dev_wedged_event(&gt->i915->drm,
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
}
/**
@@ -1482,7 +1509,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
local_bh_disable();
for_each_engine_masked(engine, gt, engine_mask, tmp) {
- BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
+ BUILD_BUG_ON(I915_RESET_BACKOFF >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags))
continue;
@@ -1611,7 +1638,6 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
/* Wedged on init is non-recoverable */
- gt_err(gt, "Non-recoverable wedged on init\n");
add_taint_for_CI(gt->i915, TAINT_WARN);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index c00de353075c..724ea6d64f33 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -28,6 +28,8 @@ void intel_gt_handle_error(struct intel_gt *gt,
const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
+bool intel_gt_gpu_reset_clobbers_display(struct intel_gt *gt);
+
void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t stalled_mask,
const char *reason);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index 80351f0a856c..4f5fd393af6f 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -41,8 +41,7 @@ struct intel_reset {
*/
unsigned long flags;
#define I915_RESET_BACKOFF 0
-#define I915_RESET_MODESET 1
-#define I915_RESET_ENGINE 2
+#define I915_RESET_ENGINE 1
#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 3)
#define I915_WEDGED_ON_FINI (BITS_PER_LONG - 2)
#define I915_WEDGED (BITS_PER_LONG - 1)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 458e29d89978..6e9977b2d180 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -242,7 +242,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
/*
* In case of resets fails because engine resumes from
* incorrect RING_HEAD and then GPU may be then fed
- * to invalid instrcutions, which may lead to unrecoverable
+ * to invalid instructions, which may lead to unrecoverable
* hang. So at first write doesn't succeed then try again.
*/
ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index fa304ea088e4..2cfaedb04876 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1025,6 +1025,10 @@ void intel_rps_boost(struct i915_request *rq)
if (rps_uses_slpc(rps)) {
slpc = rps_to_slpc(rps);
+ /* Waitboost should not be done with power saving profile */
+ if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING)
+ return;
+
if (slpc->min_freq_softlimit >= slpc->boost_freq)
return;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 6507fa3f6d1e..5135b90a2a40 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -40,7 +40,7 @@ enum {
/**
* struct intel_rps_freq_caps - rps freq capabilities
* @rp0_freq: non-overclocked max frequency
- * @rp1_freq: "less than" RP0 power/freqency
+ * @rp1_freq: "less than" RP0 power/frequency
* @min_freq: aka RPn, minimum frequency
*
* Freq caps exposed by HW, values are in "hw units" and intel_gpu_freq()
@@ -90,7 +90,7 @@ struct intel_rps {
u8 boost_freq; /* Frequency to request when wait boosting */
u8 idle_freq; /* Frequency to request when we are idle */
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
- u8 rp1_freq; /* "less than" RP0 power/freqency */
+ u8 rp1_freq; /* "less than" RP0 power/frequency */
u8 rp0_freq; /* Non-overclocked max frequency. */
u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.c b/drivers/gpu/drm/i915/gt/intel_sa_media.c
index 8c1dbcbcbc4f..2945526d52d1 100644
--- a/drivers/gpu/drm/i915/gt/intel_sa_media.c
+++ b/drivers/gpu/drm/i915/gt/intel_sa_media.c
@@ -27,7 +27,7 @@ int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
/*
* Standalone media shares the general MMIO space with the primary
- * GT. We'll re-use the primary GT's mapping.
+ * GT. We'll reuse the primary GT's mapping.
*/
uncore->regs = intel_uncore_regs(&i915->uncore);
if (drm_WARN_ON(&i915->drm, uncore->regs == NULL))
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index c8fadf58d836..9501d323d0d3 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -236,7 +236,8 @@ static void xehp_sseu_info_init(struct intel_gt *gt)
GEN12_GT_COMPUTE_DSS_ENABLE,
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT);
- eu_en_fuse = intel_uncore_read(uncore, XEHP_EU_ENABLE) & XEHP_EU_ENA_MASK;
+ eu_en_fuse = REG_FIELD_GET(XEHP_EU_ENA_MASK,
+ intel_uncore_read(uncore, XEHP_EU_ENABLE));
if (HAS_ONE_EU_PER_FUSE_BIT(gt->i915))
eu_en = eu_en_fuse;
@@ -269,15 +270,15 @@ static void gen12_sseu_info_init(struct intel_gt *gt)
* Although gen12 architecture supported multiple slices, TGL, RKL,
* DG1, and ADL only had a single slice.
*/
- s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
- GEN11_GT_S_ENA_MASK;
+ s_en = REG_FIELD_GET(GEN11_GT_S_ENA_MASK,
+ intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE));
drm_WARN_ON(&gt->i915->drm, s_en != 0x1);
g_dss_en = intel_uncore_read(uncore, GEN12_GT_GEOMETRY_DSS_ENABLE);
/* one bit per pair of EUs */
- eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
- GEN11_EU_DIS_MASK);
+ eu_en_fuse = ~REG_FIELD_GET(GEN11_EU_DIS_MASK,
+ intel_uncore_read(uncore, GEN11_EU_DISABLE));
for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
if (eu_en_fuse & BIT(eu))
@@ -306,14 +307,14 @@ static void gen11_sseu_info_init(struct intel_gt *gt)
* Although gen11 architecture supported multiple slices, ICL and
* EHL/JSL only had a single slice in practice.
*/
- s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
- GEN11_GT_S_ENA_MASK;
+ s_en = REG_FIELD_GET(GEN11_GT_S_ENA_MASK,
+ intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE));
drm_WARN_ON(&gt->i915->drm, s_en != 0x1);
ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE);
- eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
- GEN11_EU_DIS_MASK);
+ eu_en = ~REG_FIELD_GET(GEN11_EU_DIS_MASK,
+ intel_uncore_read(uncore, GEN11_EU_DISABLE));
gen11_compute_sseu_info(sseu, ss_en, eu_en);
@@ -335,10 +336,8 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
if (!(fuse & CHV_FGT_DISABLE_SS0)) {
u8 disabled_mask =
- ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
- CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
- (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
- CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
+ REG_FIELD_GET(CHV_FGT_EU_DIS_SS0_R0_MASK, fuse) |
+ REG_FIELD_GET(CHV_FGT_EU_DIS_SS0_R1_MASK, fuse) << hweight32(CHV_FGT_EU_DIS_SS0_R0_MASK);
sseu->subslice_mask.hsw[0] |= BIT(0);
sseu_set_eus(sseu, 0, 0, ~disabled_mask & 0xFF);
@@ -346,10 +345,8 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
if (!(fuse & CHV_FGT_DISABLE_SS1)) {
u8 disabled_mask =
- ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
- CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
- (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
- CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
+ REG_FIELD_GET(CHV_FGT_EU_DIS_SS1_R0_MASK, fuse) |
+ REG_FIELD_GET(CHV_FGT_EU_DIS_SS1_R1_MASK, fuse) << hweight32(CHV_FGT_EU_DIS_SS1_R0_MASK);
sseu->subslice_mask.hsw[0] |= BIT(1);
sseu_set_eus(sseu, 0, 1, ~disabled_mask & 0xFF);
@@ -385,7 +382,7 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
int s, ss;
fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
- sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ sseu->slice_mask = REG_FIELD_GET(GEN8_F2_S_ENA_MASK, fuse2);
/* BXT has a single slice and at most 3 subslices. */
intel_sseu_set_info(sseu, IS_GEN9_LP(i915) ? 1 : 3,
@@ -396,8 +393,7 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
* to each of the enabled slices.
*/
subslice_mask = (1 << sseu->max_subslices) - 1;
- subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
- GEN9_F2_SS_DIS_SHIFT);
+ subslice_mask &= ~REG_FIELD_GET(GEN9_F2_SS_DIS_MASK, fuse2);
/*
* Iterate through enabled slices and subslices to
@@ -490,7 +486,7 @@ static void bdw_sseu_info_init(struct intel_gt *gt)
u32 eu_disable0, eu_disable1, eu_disable2;
fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
- sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ sseu->slice_mask = REG_FIELD_GET(GEN8_F2_S_ENA_MASK, fuse2);
intel_sseu_set_info(sseu, 3, 3, 8);
/*
@@ -498,18 +494,18 @@ static void bdw_sseu_info_init(struct intel_gt *gt)
* to each of the enabled slices.
*/
subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
- subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
- GEN8_F2_SS_DIS_SHIFT);
+ subslice_mask &= ~REG_FIELD_GET(GEN8_F2_SS_DIS_MASK, fuse2);
eu_disable0 = intel_uncore_read(uncore, GEN8_EU_DISABLE0);
eu_disable1 = intel_uncore_read(uncore, GEN8_EU_DISABLE1);
eu_disable2 = intel_uncore_read(uncore, GEN8_EU_DISABLE2);
- eu_disable[0] = eu_disable0 & GEN8_EU_DIS0_S0_MASK;
- eu_disable[1] = (eu_disable0 >> GEN8_EU_DIS0_S1_SHIFT) |
- ((eu_disable1 & GEN8_EU_DIS1_S1_MASK) <<
- (32 - GEN8_EU_DIS0_S1_SHIFT));
- eu_disable[2] = (eu_disable1 >> GEN8_EU_DIS1_S2_SHIFT) |
- ((eu_disable2 & GEN8_EU_DIS2_S2_MASK) <<
- (32 - GEN8_EU_DIS1_S2_SHIFT));
+ eu_disable[0] =
+ REG_FIELD_GET(GEN8_EU_DIS0_S0_MASK, eu_disable0);
+ eu_disable[1] =
+ REG_FIELD_GET(GEN8_EU_DIS0_S1_MASK, eu_disable0) |
+ REG_FIELD_GET(GEN8_EU_DIS1_S1_MASK, eu_disable1) << hweight32(GEN8_EU_DIS0_S1_MASK);
+ eu_disable[2] =
+ REG_FIELD_GET(GEN8_EU_DIS1_S2_MASK, eu_disable1) |
+ REG_FIELD_GET(GEN8_EU_DIS2_S2_MASK, eu_disable2) << hweight32(GEN8_EU_DIS1_S2_MASK);
/*
* Iterate through enabled slices and subslices to
@@ -687,7 +683,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
* According to documentation software must consider the configuration
* as 2x4x8 and hardware will translate this to 1x8x8.
*
- * Furthemore, even though SScount is three bits, maximum documented
+ * Furthermore, even though SScount is three bits, maximum documented
* value for it is four. From this some rules/restrictions follow:
*
* 1.
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 570c91878189..116683ebe074 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -691,16 +691,17 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
struct drm_i915_private *i915 = engine->i915;
/*
- * Wa_1409142259:tgl,dg1,adl-p
+ * Wa_1409142259:tgl,dg1,adl-p,adl-n
* Wa_1409347922:tgl,dg1,adl-p
* Wa_1409252684:tgl,dg1,adl-p
* Wa_1409217633:tgl,dg1,adl-p
* Wa_1409207793:tgl,dg1,adl-p
- * Wa_1409178076:tgl,dg1,adl-p
- * Wa_1408979724:tgl,dg1,adl-p
- * Wa_14010443199:tgl,rkl,dg1,adl-p
- * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
- * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
+ * Wa_1409178076:tgl,dg1,adl-p,adl-n
+ * Wa_1408979724:tgl,dg1,adl-p,adl-n
+ * Wa_14010443199:tgl,rkl,dg1,adl-p,adl-n
+ * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p,adl-n
+ * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p,adl-n
+ * Wa_22010465259:tgl,rkl,dg1,adl-s,adl-p,adl-n
*/
wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
@@ -741,6 +742,12 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_1606376872 */
wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
}
+
+ /*
+ * This bit must be set to enable performance optimization for fast
+ * clears.
+ */
+ wa_mcr_write_or(wal, GEN8_WM_CHICKEN2, WAIT_ON_DEPTH_STALL_DONE_DISABLE);
}
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -1318,7 +1325,7 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
* We'll do our default/implicit steering based on GSLICE (in the
* sliceid field) and DSS (in the subsliceid field). If we can
* find overlap between the valid MSLICE and/or LNCF values with
- * a suitable GSLICE, then we can just re-use the default value and
+ * a suitable GSLICE, then we can just reuse the default value and
* skip and explicit steering at runtime.
*
* We only need to look for overlap between GSLICE/MSLICE/LNCF to find
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 81c31396eceb..d7717de17ecc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -53,7 +53,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
if (i915_request_completed(rq)) /* that was quick! */
return 0;
- /* Wait until the HW has acknowleged the submission (or err) */
+ /* Wait until the HW has acknowledged the submission (or err) */
intel_engine_flush_submission(engine);
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 9d3aeb237295..f057c16410e7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -548,7 +548,7 @@ static int igt_reset_fail_engine(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- /* Check that we can recover from engine-reset failues */
+ /* Check that we can recover from engine-reset failures */
if (!intel_has_reset_engine(gt))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index e17b8777d21d..22e750108c5f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -63,7 +63,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
if (i915_request_completed(rq)) /* that was quick! */
return 0;
- /* Wait until the HW has acknowleged the submission (or err) */
+ /* Wait until the HW has acknowledged the submission (or err) */
intel_engine_flush_submission(engine);
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 27b6d51ef145..908483ab0bc8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -222,7 +222,7 @@ int live_rc6_ctx_wa(void *arg)
i915_reset_engine_count(error, engine);
const u32 *res;
- /* Use a sacrifical context */
+ /* Use a sacrificial context */
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index c207a4fb03bf..73bc91c6ea07 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -22,7 +22,7 @@
#include "selftests/igt_spinner.h"
#include "selftests/librapl.h"
-/* Try to isolate the impact of cstates from determing frequency response */
+/* Try to isolate the impact of cstates from determining frequency response */
#define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
static void dummy_rps_work(struct work_struct *wrk)
@@ -477,12 +477,13 @@ int live_rps_control(void *arg)
limit, intel_gpu_freq(rps, limit),
min, max, ktime_to_ns(min_dt), ktime_to_ns(max_dt));
- if (limit == rps->min_freq) {
- pr_err("%s: GPU throttled to minimum!\n",
- engine->name);
+ if (limit != rps->max_freq) {
+ u32 throttle = intel_uncore_read(gt->uncore,
+ intel_gt_perf_limit_reasons_reg(gt));
+
+ pr_warn("%s: GPU throttled with reasons 0x%08x\n",
+ engine->name, throttle & GT0_PERF_LIMIT_REASONS_MASK);
show_pstate_limits(rps);
- err = -ENODEV;
- break;
}
if (igt_flush_test(gt->i915)) {
@@ -1115,7 +1116,7 @@ static u64 measure_power(struct intel_rps *rps, int *freq)
for (i = 0; i < 5; i++)
x[i] = __measure_power(5);
- *freq = (*freq + intel_rps_read_actual_frequency(rps)) / 2;
+ *freq = (*freq + read_cagf(rps)) / 2;
/* A simple triangle filter for better result stability */
sort(x, 5, sizeof(*x), cmp_u64, NULL);
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index e218b229681f..e61bb0bad12c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -95,6 +95,21 @@ static int slpc_restore_freq(struct intel_guc_slpc *slpc, u32 min, u32 max)
return 0;
}
+static u64 slpc_measure_power(struct intel_rps *rps, int *freq)
+{
+ u64 x[5];
+ int i;
+
+ for (i = 0; i < 5; i++)
+ x[i] = __measure_power(5);
+
+ *freq = (*freq + intel_rps_read_actual_frequency(rps)) / 2;
+
+ /* A simple triangle filter for better result stability */
+ sort(x, 5, sizeof(*x), cmp_u64, NULL);
+ return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
static u64 measure_power_at_freq(struct intel_gt *gt, int *freq, u64 *power)
{
int err = 0;
@@ -103,7 +118,7 @@ static u64 measure_power_at_freq(struct intel_gt *gt, int *freq, u64 *power)
if (err)
return err;
*freq = intel_rps_read_actual_frequency(&gt->rps);
- *power = measure_power(&gt->rps, freq);
+ *power = slpc_measure_power(&gt->rps, freq);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/shaders/README b/drivers/gpu/drm/i915/gt/shaders/README
index e7e96d7073c7..22f8dabed434 100644
--- a/drivers/gpu/drm/i915/gt/shaders/README
+++ b/drivers/gpu/drm/i915/gt/shaders/README
@@ -10,7 +10,7 @@ i915/gt/shaders/clear_kernel directory.
The generated .c files should never be modified directly. Instead, any modification
needs to be done on the on their respective ASM files and build instructions below
-needes to be followed.
+needs to be followed.
Building
========
@@ -24,7 +24,7 @@ on building.
Please make sure your Mesa tool is compiled with "-Dtools=intel" and
"-Ddri-drivers=i965", and run this script from IGT source root directory"
-The instructions bellow assume:
+The instructions below assume:
* IGT gpu tools source code is located on your home directory (~) as ~/igt
* Mesa source code is located on your home directory (~) as ~/mesa
and built under the ~/mesa/build directory
@@ -43,4 +43,4 @@ igt $ ./scripts/generate_clear_kernel.sh -g ivb \
~/igt/lib/i915/shaders/clear_kernel/hsw.asm
~ $ cd ~/igt
igt $ ./scripts/generate_clear_kernel.sh -g hsw \
- -m ~/mesa/build/src/intel/tools/i965_asm \ No newline at end of file
+ -m ~/mesa/build/src/intel/tools/i965_asm
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
index 5fdf384bb621..6c0c89daf96c 100644
--- a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
+++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
@@ -24,7 +24,7 @@ mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
* DW 1.4 - Rsvd (intended for context ID)
* DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
* DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
- * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (intended for Total Thread Count)
*
* Binding Table
*
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
index 97c7ac9e3854..27c28e63d6cc 100644
--- a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
+++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
@@ -24,7 +24,7 @@ mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
* DW 1.4 - Rsvd (intended for context ID)
* DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
* DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
- * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (intended for Total Thread Count)
*
* Binding Table
*
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index bb696b29ee2c..365c4b8b04f4 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -108,7 +108,7 @@ static int __shmem_rw(struct file *file, loff_t off,
if (IS_ERR(page))
return PTR_ERR(page);
- vaddr = kmap(page);
+ vaddr = kmap_local_page(page);
if (write) {
memcpy(vaddr + offset_in_page(off), ptr, this);
set_page_dirty(page);
@@ -116,7 +116,7 @@ static int __shmem_rw(struct file *file, loff_t off,
memcpy(ptr, vaddr + offset_in_page(off), this);
}
mark_page_accessed(page);
- kunmap(page);
+ kunmap_local(vaddr);
put_page(page);
len -= this;
@@ -143,11 +143,11 @@ int shmem_read_to_iosys_map(struct file *file, loff_t off,
if (IS_ERR(page))
return PTR_ERR(page);
- vaddr = kmap(page);
+ vaddr = kmap_local_page(page);
iosys_map_memcpy_to(map, map_off, vaddr + offset_in_page(off),
this);
mark_page_accessed(page);
- kunmap(page);
+ kunmap_local(vaddr);
put_page(page);
len -= this;
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
index c34674e797c6..6de87ae5669e 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
@@ -228,6 +228,11 @@ struct slpc_optimized_strategies {
#define SLPC_OPTIMIZED_STRATEGY_COMPUTE REG_BIT(0)
+enum slpc_power_profiles {
+ SLPC_POWER_PROFILES_BASE = 0x0,
+ SLPC_POWER_PROFILES_POWER_SAVING = 0x1
+};
+
/**
* DOC: SLPC H2G MESSAGE FORMAT
*
diff --git a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
index 1fc0c17b1230..803c0379d97d 100644
--- a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
@@ -81,7 +81,7 @@ struct guc_debug_capture_list {
*
* intel_guc_capture module uses these structures to maintain static
* tables (per unique platform) that consists of lists of registers
- * (offsets, names, flags,...) that are used at the ADS regisration
+ * (offsets, names, flags,...) that are used at the ADS registration
* time as well as during runtime processing and reporting of error-
* capture states generated by GuC just prior to engine reset events.
*/
@@ -200,7 +200,7 @@ struct intel_guc_state_capture {
* dynamically allocate new nodes when receiving the G2H notification
* because the event handlers for all G2H event-processing is called
* by the ct processing worker queue and when that queue is being
- * processed, there is no absoluate guarantee that we are not in the
+ * processed, there is no absolute guarantee that we are not in the
* midst of a GT reset operation (which doesn't allow allocations).
*/
struct list_head cachelist;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 5949ff0b0161..9df80c325fc1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -690,7 +690,7 @@ int intel_guc_suspend(struct intel_guc *guc)
* H2G MMIO command completes.
*
* Don't abort on a failure code from the GuC. Keep going and do the
- * clean up in santize() and re-initialisation on resume and hopefully
+ * clean up in sanitize() and re-initialisation on resume and hopefully
* the error here won't be problematic.
*/
ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 57b903132776..053780f562c1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -295,7 +295,7 @@ struct intel_guc {
*/
struct work_struct dead_guc_worker;
/**
- * @last_dead_guc_jiffies: timestamp of previous 'dead guc' occurrance
+ * @last_dead_guc_jiffies: timestamp of previous 'dead guc' occurrence
* used to prevent a fundamentally broken system from continuously
* reloading the GuC.
*/
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index fe53e8eccf4b..e7ccfa520df3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -259,13 +259,14 @@ static int guc_wait_ucode(struct intel_guc *guc)
} else if (delta_ms > 200) {
guc_warn(guc, "excessive init time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
delta_ms, status, count, ret);
- guc_warn(guc, "excessive init time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
- intel_rps_read_actual_frequency(&gt->rps), before_freq,
+ guc_warn(guc, "excessive init time: [freq = %dMHz -> %dMHz vs %dMHz, perf_limit_reasons = 0x%08X]\n",
+ before_freq, intel_rps_read_actual_frequency(&gt->rps),
+ intel_rps_get_requested_frequency(&gt->rps),
intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
} else {
- guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
- delta_ms, intel_rps_read_actual_frequency(&gt->rps),
- before_freq, status, count, ret);
+ guc_dbg(guc, "init took %lldms, freq = %dMHz -> %dMHz vs %dMHz, status = 0x%08X, count = %d, ret = %d\n",
+ delta_ms, before_freq, intel_rps_read_actual_frequency(&gt->rps),
+ intel_rps_get_requested_frequency(&gt->rps), status, count, ret);
}
return ret;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 4ce6e2332a63..eded00f0c7e1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -408,7 +408,7 @@ enum guc_capture_type {
GUC_CAPTURE_LIST_TYPE_MAX,
};
-/* Class indecies for capture_class and capture_instance arrays */
+/* Class indices for capture_class and capture_instance arrays */
enum {
GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0,
GUC_CAPTURE_LIST_CLASS_VIDEO = 1,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
index b67a15f74276..868195c33f5b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -7,6 +7,7 @@
#include "gt/intel_hwconfig.h"
#include "i915_drv.h"
#include "i915_memcpy.h"
+#include "intel_guc_print.h"
/*
* GuC has a blob containing hardware configuration information (HWConfig).
@@ -42,6 +43,8 @@ static int __guc_action_get_hwconfig(struct intel_guc *guc,
};
int ret;
+ guc_dbg(guc, "Querying HW config table: size = %d, offset = 0x%08X\n",
+ ggtt_size, ggtt_offset);
ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
if (ret == -ENXIO)
return -ENOENT;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 706fffca698b..d5ee6e5e1443 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -15,6 +15,34 @@
#include "gt/intel_gt_regs.h"
#include "gt/intel_rps.h"
+/**
+ * DOC: SLPC - Dynamic Frequency management
+ *
+ * Single Loop Power Control (SLPC) is a GuC algorithm that manages
+ * GT frequency based on busyness and how KMD initializes it. SLPC is
+ * almost completely in control after initialization except for a few
+ * scenarios mentioned below.
+ *
+ * KMD uses the concept of waitboost to ramp frequency to RP0 when there
+ * are pending submissions for a context. It achieves this by sending GuC a
+ * request to update the min frequency to RP0. Waitboost is disabled
+ * when the request retires.
+ *
+ * Another form of frequency control happens through per-context hints.
+ * A context can be marked as low latency during creation. That will ensure
+ * that SLPC uses an aggressive frequency ramp when that context is active.
+ *
+ * Power profiles add another level of control to these mechanisms.
+ * When power saving profile is chosen, SLPC will use conservative
+ * thresholds to ramp frequency, thus saving power. KMD will disable
+ * waitboosts as well, which achieves further power savings. Base profile
+ * is default and ensures balanced performance for any workload.
+ *
+ * Lastly, users have some level of control through sysfs, where min/max
+ * frequency values can be altered and the use of efficient freq
+ * can be toggled.
+ */
+
static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
{
return container_of(slpc, struct intel_guc, slpc);
@@ -265,6 +293,8 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
slpc->num_boosts = 0;
slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
+ slpc->power_profile = SLPC_POWER_PROFILES_BASE;
+
mutex_init(&slpc->lock);
INIT_WORK(&slpc->boost_work, slpc_boost_work);
@@ -357,21 +387,29 @@ static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
}
-static void slpc_shared_data_reset(struct slpc_shared_data *data)
+static void slpc_shared_data_reset(struct intel_guc_slpc *slpc)
{
- memset(data, 0, sizeof(struct slpc_shared_data));
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct slpc_shared_data *data = slpc->vaddr;
+ memset(data, 0, sizeof(struct slpc_shared_data));
data->header.size = sizeof(struct slpc_shared_data);
/* Enable only GTPERF task, disable others */
slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
SLPC_PARAM_TASK_DISABLE_GTPERF);
- slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
- SLPC_PARAM_TASK_DISABLE_BALANCER);
+ /*
+ * Don't allow balancer related algorithms on platforms before
+ * Xe_LPG, where GuC started to restrict it to TDP limited scenarios.
+ */
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)) {
+ slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
+ SLPC_PARAM_TASK_DISABLE_BALANCER);
- slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
- SLPC_PARAM_TASK_DISABLE_DCC);
+ slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
+ SLPC_PARAM_TASK_DISABLE_DCC);
+ }
}
/**
@@ -567,6 +605,34 @@ int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
return ret;
}
+int intel_guc_slpc_set_power_profile(struct intel_guc_slpc *slpc, u32 val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ if (val > SLPC_POWER_PROFILES_POWER_SAVING)
+ return -EINVAL;
+
+ mutex_lock(&slpc->lock);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_POWER_PROFILE,
+ val);
+ if (ret)
+ guc_err(slpc_to_guc(slpc),
+ "Failed to set power profile to %d: %pe\n",
+ val, ERR_PTR(ret));
+ else
+ slpc->power_profile = val;
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&slpc->lock);
+
+ return ret;
+}
+
void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
{
u32 pm_intrmsk_mbz = 0;
@@ -686,7 +752,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
GEM_BUG_ON(!slpc->vma);
- slpc_shared_data_reset(slpc->vaddr);
+ slpc_shared_data_reset(slpc);
ret = slpc_reset(slpc);
if (unlikely(ret < 0)) {
@@ -728,6 +794,13 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Enable SLPC Optimized Strategy for compute */
intel_guc_slpc_set_strategy(slpc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
+ /* Set cached value of power_profile */
+ ret = intel_guc_slpc_set_power_profile(slpc, slpc->power_profile);
+ if (unlikely(ret)) {
+ guc_probe_error(guc, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
return 0;
}
@@ -791,6 +864,23 @@ int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p
drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
drm_printf(p, "\tGTPERF task active: %s\n",
str_yes_no(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
+ drm_printf(p, "\tDCC enabled: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_DCC_TASK_ENABLED));
+ drm_printf(p, "\tDCC in: %s\n",
+ str_yes_no(slpc_tasks->status & SLPC_IN_DCC));
+ drm_printf(p, "\tBalancer enabled: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_BALANCER_ENABLED));
+ drm_printf(p, "\tIBC enabled: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_IBC_TASK_ENABLED));
+ drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_BALANCER_IA_LMT_ENABLED));
+ drm_printf(p, "\tBalancer IA LMT active: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_BALANCER_IA_LMT_ACTIVE));
drm_printf(p, "\tMax freq: %u MHz\n",
slpc_decode_max_freq(slpc));
drm_printf(p, "\tMin freq: %u MHz\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index 1cb5fd44f05c..fc9f761b4372 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -46,5 +46,6 @@ void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val);
int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val);
+int intel_guc_slpc_set_power_profile(struct intel_guc_slpc *slpc, u32 val);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
index a88651331497..83673b10ac4e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
@@ -33,6 +33,9 @@ struct intel_guc_slpc {
u32 max_freq_softlimit;
bool ignore_eff_freq;
+ /* Base or power saving */
+ u32 power_profile;
+
/* cached media ratio mode */
u32 media_ratio_mode;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 3fce5c000144..f8cb7c630d5b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1223,7 +1223,7 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
* determine validity of these values. Instead we read the values multiple times
* until they are consistent. In test runs, 3 attempts results in consistent
* values. The upper bound is set to 6 attempts and may need to be tuned as per
- * any new occurences.
+ * any new occurrences.
*/
static void __get_engine_usage_record(struct intel_engine_cs *engine,
u32 *last_in, u32 *id, u32 *total)
@@ -1285,15 +1285,12 @@ static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
static u32 gpm_timestamp_shift(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
- u32 reg, shift;
+ u32 reg;
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
- shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
-
- return 3 - shift;
+ return 3 - REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
}
static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
@@ -3011,7 +3008,7 @@ static int __guc_context_pin(struct intel_context *ce,
/*
* GuC context gets pinned in guc_request_alloc. See that function for
- * explaination of why.
+ * explanation of why.
*/
return lrc_pin(ce, engine, vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index b3cbf85c00cb..d791f9baa11d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -231,8 +231,8 @@ static void delayed_huc_load_init(struct intel_huc *huc)
sw_fence_dummy_notify);
i915_sw_fence_commit(&huc->delayed_load.fence);
- hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- huc->delayed_load.timer.function = huc_delayed_load_timer_callback;
+ hrtimer_setup(&huc->delayed_load.timer, huc_delayed_load_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static void delayed_huc_load_fini(struct intel_huc *huc)
@@ -489,13 +489,15 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
if (delta_ms > 50) {
huc_warn(huc, "excessive auth time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
delta_ms, huc->status[type].reg.reg, count, ret);
- huc_warn(huc, "excessive auth time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
- intel_rps_read_actual_frequency(&gt->rps), before_freq,
+ huc_warn(huc, "excessive auth time: [freq = %dMHz -> %dMHz vs %dMHz, perf_limit_reasons = 0x%08X]\n",
+ before_freq, intel_rps_read_actual_frequency(&gt->rps),
+ intel_rps_get_requested_frequency(&gt->rps),
intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
} else {
- huc_dbg(huc, "auth took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
- delta_ms, intel_rps_read_actual_frequency(&gt->rps),
- before_freq, huc->status[type].reg.reg, count, ret);
+ huc_dbg(huc, "auth took %lldms, freq = %dMHz -> %dMHz vs %dMHz, status = 0x%08X, count = %d, ret = %d\n",
+ delta_ms, before_freq, intel_rps_read_actual_frequency(&gt->rps),
+ intel_rps_get_requested_frequency(&gt->rps),
+ huc->status[type].reg.reg, count, ret);
}
/* mark the load process as complete even if the wait failed */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 5b8080ec5315..90ba1b0b4c9d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -512,7 +512,7 @@ static int __uc_init_hw(struct intel_uc *uc)
ERR_PTR(ret), attempts);
}
- /* Did we succeded or run out of retries? */
+ /* Did we succeed or run out of retries? */
if (ret)
goto err_log_capture;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
index 26fdc392fce6..83801c992488 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -64,7 +64,7 @@ static int intel_hang_guc(void *arg)
old_beat = engine->props.heartbeat_interval_ms;
ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
if (ret) {
- gt_err(gt, "Failed to boost heatbeat interval: %pe\n", ERR_PTR(ret));
+ gt_err(gt, "Failed to boost heartbeat interval: %pe\n", ERR_PTR(ret));
goto err;
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 6439c8e91a8d..f25ee2953baf 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1906,7 +1906,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
unsigned long start_offset = 0;
- /* get the start gm address of the batch buffer */
+ /* Get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
if (gma == INTEL_GVT_INVALID_ADDR)
return -EFAULT;
@@ -1921,15 +1921,16 @@ static int perform_bb_shadow(struct parser_exec_state *s)
bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
- /* the start_offset stores the batch buffer's start gma's
- * offset relative to page boundary. so for non-privileged batch
+ /*
+ * The start_offset stores the batch buffer's start gma's
+ * offset relative to page boundary. So for non-privileged batch
* buffer, the shadowed gem object holds exactly the same page
- * layout as original gem object. This is for the convience of
+ * layout as original gem object. This is for the convenience of
* replacing the whole non-privilged batch buffer page to this
- * shadowed one in PPGTT at the same gma address. (this replacing
+ * shadowed one in PPGTT at the same gma address. (This replacing
* action is not implemented yet now, but may be necessary in
* future).
- * for prileged batch buffer, we just change start gma address to
+ * For prileged batch buffer, we just change start gma address to
* that of shadowed page.
*/
if (bb->ppgtt)
@@ -1976,7 +1977,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
/*
* ip_va saves the virtual address of the shadow batch buffer, while
* ip_gma saves the graphics address of the original batch buffer.
- * As the shadow batch buffer is just a copy from the originial one,
+ * As the shadow batch buffer is just a copy from the original one,
* it should be right to use shadow batch buffer'va and original batch
* buffer's gma in pair. After all, we don't want to pin the shadow
* buffer here (too early).
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 95570cabdf27..1e1af5e545a4 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -97,7 +97,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
return 0;
}
-static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
+static const unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
{
/* EDID with 1024x768 as its resolution */
/*Header*/
@@ -581,8 +581,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
vgpu->display.port_num = port_num;
/* Init hrtimer based on default refresh rate */
- hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- vblank_timer->timer.function = vblank_timer_fn;
+ hrtimer_setup(&vblank_timer->timer, vblank_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
vblank_timer->vrefresh_k = port->vrefresh_k;
vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k);
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 9efc3ca0ce82..4f599af766b0 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -436,7 +436,7 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
dmabuf_obj_get(dmabuf_obj);
}
ret = 0;
- gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
+ gvt_dbg_dpy("vgpu%d: reuse dmabuf_obj ref %d, id %d\n",
vgpu->id, kref_read(&dmabuf_obj->kref),
gfx_plane_info->dmabuf_id);
mutex_unlock(&vgpu->dmabuf_lock);
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 0a357ca42db1..89147d33168c 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -298,7 +298,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
int byte_count = byte_left;
u32 reg_data = 0;
- /* Data can only be recevied if previous settings correct */
+ /* Data can only be received if previous settings correct */
if (vgpu_vreg_t(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
if (byte_left <= 0) {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 15cce973e1ae..f9f7ef131371 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -398,120 +398,3 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot));
return 0;
}
-
-#define SPRITE_FORMAT_NUM (1 << 3)
-
-static const struct pixel_format sprite_pixel_formats[SPRITE_FORMAT_NUM] = {
- [0x0] = {DRM_FORMAT_YUV422, 16, "YUV 16-bit 4:2:2 packed"},
- [0x1] = {DRM_FORMAT_XRGB2101010, 32, "RGB 32-bit 2:10:10:10"},
- [0x2] = {DRM_FORMAT_XRGB8888, 32, "RGB 32-bit 8:8:8:8"},
- [0x4] = {DRM_FORMAT_AYUV, 32,
- "YUV 32-bit 4:4:4 packed (8:8:8:8 MSB-X:Y:U:V)"},
-};
-
-/**
- * intel_vgpu_decode_sprite_plane - Decode sprite plane
- * @vgpu: input vgpu
- * @plane: sprite plane to save decoded info
- * This function is called for decoding plane
- *
- * Returns:
- * 0 on success, non-zero if failed.
- */
-int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
- struct intel_vgpu_sprite_plane_format *plane)
-{
- u32 val, fmt;
- u32 color_order, yuv_order;
- int drm_format;
- int pipe;
-
- pipe = get_active_pipe(vgpu);
- if (pipe >= I915_MAX_PIPES)
- return -ENODEV;
-
- val = vgpu_vreg_t(vgpu, SPRCTL(pipe));
- plane->enabled = !!(val & SPRITE_ENABLE);
- if (!plane->enabled)
- return -ENODEV;
-
- plane->tiled = !!(val & SPRITE_TILED);
- color_order = !!(val & SPRITE_RGB_ORDER_RGBX);
- yuv_order = (val & SPRITE_YUV_ORDER_MASK) >>
- _SPRITE_YUV_ORDER_SHIFT;
-
- fmt = (val & SPRITE_FORMAT_MASK) >> _SPRITE_FMT_SHIFT;
- if (!sprite_pixel_formats[fmt].bpp) {
- gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt);
- return -EINVAL;
- }
- plane->hw_format = fmt;
- plane->bpp = sprite_pixel_formats[fmt].bpp;
- drm_format = sprite_pixel_formats[fmt].drm_format;
-
- /* Order of RGB values in an RGBxxx buffer may be ordered RGB or
- * BGR depending on the state of the color_order field
- */
- if (!color_order) {
- if (drm_format == DRM_FORMAT_XRGB2101010)
- drm_format = DRM_FORMAT_XBGR2101010;
- else if (drm_format == DRM_FORMAT_XRGB8888)
- drm_format = DRM_FORMAT_XBGR8888;
- }
-
- if (drm_format == DRM_FORMAT_YUV422) {
- switch (yuv_order) {
- case 0:
- drm_format = DRM_FORMAT_YUYV;
- break;
- case 1:
- drm_format = DRM_FORMAT_UYVY;
- break;
- case 2:
- drm_format = DRM_FORMAT_YVYU;
- break;
- case 3:
- drm_format = DRM_FORMAT_VYUY;
- break;
- default:
- /* yuv_order has only 2 bits */
- break;
- }
- }
-
- plane->drm_format = drm_format;
-
- plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!vgpu_gmadr_is_valid(vgpu, plane->base))
- return -EINVAL;
-
- plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
- if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n",
- plane->base);
- return -EINVAL;
- }
-
- plane->stride = vgpu_vreg_t(vgpu, SPRSTRIDE(pipe)) &
- _SPRITE_STRIDE_MASK;
-
- val = vgpu_vreg_t(vgpu, SPRSIZE(pipe));
- plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >>
- _SPRITE_SIZE_HEIGHT_SHIFT;
- plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >>
- _SPRITE_SIZE_WIDTH_SHIFT;
- plane->height += 1; /* raw height is one minus the real value */
- plane->width += 1; /* raw width is one minus the real value */
-
- val = vgpu_vreg_t(vgpu, SPRPOS(pipe));
- plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT;
- plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT;
-
- val = vgpu_vreg_t(vgpu, SPROFFSET(pipe));
- plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >>
- _SPRITE_OFFSET_START_X_SHIFT;
- plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >>
- _SPRITE_OFFSET_START_Y_SHIFT;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index fa6503900c84..436d43c0087b 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -156,7 +156,5 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane);
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane);
-int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
- struct intel_vgpu_sprite_plane_format *plane);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 1bce1493b86f..2fa7ca19ba5d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -71,72 +71,6 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
return false;
}
-/* translate a guest gmadr to host gmadr */
-int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
-{
- struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
-
- if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
- "invalid guest gmadr %llx\n", g_addr))
- return -EACCES;
-
- if (vgpu_gmadr_is_aperture(vgpu, g_addr))
- *h_addr = vgpu_aperture_gmadr_base(vgpu)
- + (g_addr - vgpu_aperture_offset(vgpu));
- else
- *h_addr = vgpu_hidden_gmadr_base(vgpu)
- + (g_addr - vgpu_hidden_offset(vgpu));
- return 0;
-}
-
-/* translate a host gmadr to guest gmadr */
-int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
-{
- struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
-
- if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
- "invalid host gmadr %llx\n", h_addr))
- return -EACCES;
-
- if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
- *g_addr = vgpu_aperture_gmadr_base(vgpu)
- + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
- else
- *g_addr = vgpu_hidden_gmadr_base(vgpu)
- + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
- return 0;
-}
-
-int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
- unsigned long *h_index)
-{
- u64 h_addr;
- int ret;
-
- ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
- &h_addr);
- if (ret)
- return ret;
-
- *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
- return 0;
-}
-
-int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
- unsigned long *g_index)
-{
- u64 g_addr;
- int ret;
-
- ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
- &g_addr);
- if (ret)
- return ret;
-
- *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
- return 0;
-}
-
#define gtt_type_is_entry(type) \
(type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
&& type != GTT_TYPE_PPGTT_PTE_ENTRY \
@@ -1259,7 +1193,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
gvt_vdbg_mm("shadow 64K gtt entry\n");
/*
* The layout of 64K page is special, the page size is
- * controlled by uper PDE. To be simple, we always split
+ * controlled by upper PDE. To be simple, we always split
* 64K page to smaller 4K pages in shadow PT.
*/
return split_64KB_gtt_entry(vgpu, spt, index, &se);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 2c95aeef4e41..01d890999f25 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -452,8 +452,10 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value);
-/* Macros for easily accessing vGPU virtual/shadow register.
- Explicitly seperate use for typed MMIO reg or real offset.*/
+/*
+ * Macros for easily accessing vGPU virtual/shadow register.
+ * Explicitly separate use for typed MMIO reg or real offset.
+ */
#define vgpu_vreg_t(vgpu, reg) \
(*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
#define vgpu_vreg(vgpu, offset) \
@@ -531,12 +533,6 @@ int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num);
gvt_gmadr_is_hidden(gvt, gmadr))
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
-int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
-int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
-int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
- unsigned long *h_index);
-int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
- unsigned long *g_index);
void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
bool primary);
@@ -702,7 +698,7 @@ static inline void intel_gvt_mmio_set_cmd_write_patch(
* @offset: register offset
*
* Returns:
- * True if GPU commmand write to an MMIO should be patched
+ * True if GPU command write to an MMIO should be patched.
*/
static inline bool intel_gvt_mmio_is_cmd_write_patch(
struct intel_gvt *gvt, unsigned int offset)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 241cff0fc683..4efee6797873 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -689,11 +689,11 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
u32 new_rate = 0;
u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k);
- /* Calcuate pixel clock by (ls_clk * M / N) */
+ /* Calculate pixel clock by (ls_clk * M / N) */
pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n);
pixel_clk *= MSEC_PER_SEC;
- /* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */
+ /* Calculate refresh rate by (pixel_clk / (h_total * v_total)) */
new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1));
if (*old_rate != new_rate)
@@ -2001,7 +2001,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
* vGPU reset if in resuming.
* In S0ix exit, the device power state also transite from D3 to D0 as
- * S3 resume, but no vGPU reset (triggered by QEMU devic model). After
+ * S3 resume, but no vGPU reset (triggered by QEMU device model). After
* S0ix exit, all engines continue to work. However the d3_entered
* remains set which will break next vGPU reset logic (miss the expected
* PPGTT invalidation).
@@ -3119,23 +3119,6 @@ int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
}
/**
- * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
- * force-nopriv register
- *
- * @gvt: a GVT device
- * @offset: register offset
- *
- * Returns:
- * True if the register is in force-nonpriv whitelist;
- * False if outside;
- */
-bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
- unsigned int offset)
-{
- return in_whitelist(offset);
-}
-
-/**
* intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
* @vgpu: a vGPU
* @offset: register offset
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index b27ff77bfb50..69830a5c49d3 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -142,7 +142,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
int ret;
/*
- * We pin the pages one-by-one to avoid allocating a big arrary
+ * We pin the pages one-by-one to avoid allocating a big array
* on stack to hold pfns.
*/
for (npage = 0; npage < total_pages; npage++) {
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 32ebacb078e8..3dc912aba80b 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -96,9 +96,6 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
-bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
- unsigned int offset);
-
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 273db14fd5fc..2f7208843367 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -53,7 +53,7 @@ struct engine_mmio {
u32 value;
};
-/* Raw offset is appened to each line for convenience. */
+/* Raw offset is append to each line for convenience. */
static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
{RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */
{RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
@@ -576,8 +576,8 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
/**
* We are using raw mmio access wrapper to improve the
- * performace for batch mmio read/write, so we need
- * handle forcewake mannually.
+ * performance for batch mmio read/write, so we need
+ * handle forcewake manually.
*/
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
switch_mmio(pre, next, engine);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index c077fb4674f0..9f97f743aa71 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -286,8 +286,7 @@ static int tbs_sched_init(struct intel_gvt *gvt)
return -ENOMEM;
INIT_LIST_HEAD(&data->lru_runq_head);
- hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- data->timer.function = tbs_timer_fn;
+ hrtimer_setup(&data->timer, tbs_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
data->period = GVT_DEFAULT_TIME_SLICE;
data->gvt = gvt;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 23f2cc397ec9..6e87c10bc454 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -77,7 +77,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
}
/*
- * when populating shadow ctx from guest, we should not overrride oa related
+ * When populating shadow ctx from guest, we should not override oa related
* registers, so that they will not be overlapped by guest oa configs. Thus
* made it possible to capture oa data from host for both host and guests.
*/
@@ -528,9 +528,10 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
int ret;
list_for_each_entry(bb, &workload->shadow_bb, list) {
- /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
+ /*
+ * For privilege batch buffer and not wa_ctx, the bb_start_cmd_va
* is only updated into ring_scan_buffer, not real ring address
- * allocated in later copy_workload_to_ring_buffer. pls be noted
+ * allocated in later copy_workload_to_ring_buffer. Please be noted
* shadow_ring_buffer_va is now pointed to real ring buffer va
* in copy_workload_to_ring_buffer.
*/
@@ -546,7 +547,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
* here, rather than switch to shadow bb's gma
* address, we directly use original batch buffer's
* gma address, and send original bb to hardware
- * directly
+ * directly.
*/
if (!bb->ppgtt) {
i915_gem_ww_ctx_init(&ww, false);
@@ -1774,7 +1775,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
}
/**
- * intel_vgpu_queue_workload - Qeue a vGPU workload
+ * intel_vgpu_queue_workload - Queue a vGPU workload
* @workload: the workload to queue in
*/
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 63c751ca4119..11260392234a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -78,7 +78,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
* vGPU type name is defined as GVTg_Vx_y which contains the physical GPU
* generation type (e.g V4 as BDW server, V5 as SKL server).
*
- * Depening on the physical SKU resource, we might see vGPU types like
+ * Depending on the physical SKU resource, we might see vGPU types like
* GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create different types of
* vGPU on same physical GPU depending on available resource. Each vGPU
* type will have a different number of avail_instance to indicate how
@@ -417,7 +417,7 @@ out_unlock:
* the whole vGPU to default state as when it is created. This vGPU function
* is required both for functionary and security concerns.The ultimate goal
* of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
- * assign a vGPU to a virtual machine we must isse such reset first.
+ * assign a vGPU to a virtual machine we must issue such reset first.
*
* Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
* (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
@@ -428,7 +428,7 @@ out_unlock:
*
* The parameter dev_level is to identify if we will do DMLR or GT reset.
* The parameter engine_mask is to specific the engines that need to be
- * resetted. If value ALL_ENGINES is given for engine_mask, it means
+ * reset. If value ALL_ENGINES is given for engine_mask, it means
* the caller requests a full GT reset that we will reset all virtual
* GPU engines. For FLR, engine_mask is ignored.
*/
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1c2a97f593c7..0d9e263913ff 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -411,9 +411,6 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n");
- seq_printf(m, "Runtime power status: %s\n",
- str_enabled_disabled(!dev_priv->display.power.domains.init_wakeref));
-
seq_printf(m, "GPU idle: %s\n", str_yes_no(!to_gt(dev_priv)->awake));
seq_printf(m, "IRQs disabled: %s\n",
str_yes_no(!intel_irqs_enabled(dev_priv)));
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index c2ae37d6b94d..ce3cc93ea211 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -41,12 +41,13 @@
#include <linux/vt.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_client.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "display/i9xx_display_sr.h"
-#include "display/intel_acpi.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
#include "display/intel_crtc.h"
@@ -201,7 +202,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
static void sanitize_gpu(struct drm_i915_private *i915)
{
- if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) {
+ if (!intel_gt_gpu_reset_clobbers_display(to_gt(i915))) {
struct intel_gt *gt;
unsigned int i;
@@ -657,8 +658,6 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
intel_power_domains_enable(display);
intel_runtime_pm_enable(&dev_priv->runtime_pm);
- intel_register_dsm_handler();
-
if (i915_switcheroo_register(dev_priv))
drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
}
@@ -675,8 +674,6 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
i915_switcheroo_unregister(dev_priv);
- intel_unregister_dsm_handler();
-
intel_runtime_pm_disable(&dev_priv->runtime_pm);
intel_power_domains_disable(display);
@@ -973,7 +970,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_runtime_pm_disable(&i915->runtime_pm);
intel_power_domains_disable(display);
- intel_fbdev_set_suspend(&i915->drm, FBINFO_STATE_SUSPENDED, true);
+ drm_client_dev_suspend(&i915->drm, false);
if (HAS_DISPLAY(i915)) {
drm_kms_helper_poll_disable(&i915->drm);
intel_display_driver_disable_user_access(display);
@@ -1056,7 +1053,7 @@ static int i915_drm_suspend(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
intel_power_domains_disable(display);
- intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
+ drm_client_dev_suspend(dev, false);
if (HAS_DISPLAY(dev_priv)) {
drm_kms_helper_poll_disable(dev);
intel_display_driver_disable_user_access(display);
@@ -1075,7 +1072,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_encoder_suspend_all(&dev_priv->display);
/* Must be called before GGTT is suspended. */
- intel_dpt_suspend(dev_priv);
+ intel_dpt_suspend(display);
i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
i9xx_display_sr_save(display);
@@ -1130,7 +1127,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* leave the device in D0 on those platforms and hope the BIOS will
* power down the device properly. The issue was seen on multiple old
* GENs with different BIOS vendors, so having an explicit blacklist
- * is inpractical; apply the workaround on everything pre GEN6. The
+ * is impractical; apply the workaround on everything pre GEN6. The
* platforms where the issue was seen:
* Lenovo Thinkpad X301, X61s, X60, T60, X41
* Fujitsu FSC S7110
@@ -1192,7 +1189,7 @@ static int i915_drm_resume(struct drm_device *dev)
setup_private_pat(gt);
/* Must be called after GGTT is resumed. */
- intel_dpt_resume(dev_priv);
+ intel_dpt_resume(display);
intel_dmc_resume(display);
@@ -1242,7 +1239,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_resume(display);
- intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
+ drm_client_dev_resume(dev, false);
intel_power_domains_enable(display);
@@ -1812,6 +1809,8 @@ static const struct drm_driver i915_drm_driver = {
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_dumb_mmap_offset,
+ INTEL_FBDEV_DRIVER_OPS,
+
.ioctls = i915_ioctls,
.num_ioctls = ARRAY_SIZE(i915_ioctls),
.fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b96b8de12756..ffc346379cc2 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -274,7 +274,6 @@ struct drm_i915_private {
/* PCH chipset type */
enum intel_pch pch_type;
- unsigned short pch_id;
unsigned long gem_quirks;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 070ab6546987..8c8d43451f35 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1146,11 +1146,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
int ret;
/*
- * In the proccess of replacing cache_level with pat_index a tricky
+ * In the process of replacing cache_level with pat_index a tricky
* dependency is created on the definition of the enum i915_cache_level.
- * in case this enum is changed, PTE encode would be broken.
+ * In case this enum is changed, PTE encode would be broken.
* Add a WARNING here. And remove when we completely quit using this
- * enum
+ * enum.
*/
BUILD_BUG_ON(I915_CACHE_NONE != 0 ||
I915_CACHE_LLC != 1 ||
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 78a8928562a9..749e1c55613e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -224,8 +224,6 @@ struct i915_gpu_error {
/* Protected by the above dev->gpu_error.lock. */
struct i915_gpu_coredump *first_error;
- atomic_t pending_fb_pin;
-
/** Number of times the device has been reset (global) */
atomic_t reset_count;
diff --git a/drivers/gpu/drm/i915/i915_gtt_view_types.h b/drivers/gpu/drm/i915/i915_gtt_view_types.h
new file mode 100644
index 000000000000..c084f67bc880
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gtt_view_types.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_GTT_VIEW_TYPES_H__
+#define __I915_GTT_VIEW_TYPES_H__
+
+#include <linux/types.h>
+
+struct intel_remapped_plane_info {
+ /* in gtt pages */
+ u32 offset:31;
+ u32 linear:1;
+ union {
+ /* in gtt pages for !linear */
+ struct {
+ u16 width;
+ u16 height;
+ u16 src_stride;
+ u16 dst_stride;
+ };
+
+ /* in gtt pages for linear */
+ u32 size;
+ };
+} __packed;
+
+struct intel_rotation_info {
+ struct intel_remapped_plane_info plane[2];
+} __packed;
+
+struct intel_partial_info {
+ u64 offset;
+ unsigned int size;
+} __packed;
+
+struct intel_remapped_info {
+ struct intel_remapped_plane_info plane[4];
+ /* in gtt pages */
+ u32 plane_alignment;
+} __packed;
+
+enum i915_gtt_view_type {
+ I915_GTT_VIEW_NORMAL = 0,
+ I915_GTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
+ I915_GTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
+ I915_GTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
+};
+
+struct i915_gtt_view {
+ enum i915_gtt_view_type type;
+ union {
+ /* Members need to contain no holes/padding */
+ struct intel_partial_info partial;
+ struct intel_rotation_info rotated;
+ struct intel_remapped_info remapped;
+ };
+};
+
+#endif /* __I915_GTT_VIEW_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7920ad9585ae..37ca4a35daf2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -120,6 +120,29 @@ void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
intel_uncore_posting_read(uncore, regs.imr);
}
+void gen2_error_reset(struct intel_uncore *uncore, struct i915_error_regs regs)
+{
+ intel_uncore_write(uncore, regs.emr, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.emr);
+
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+}
+
+void gen2_error_init(struct intel_uncore *uncore, struct i915_error_regs regs,
+ u32 emr_val)
+{
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+
+ intel_uncore_write(uncore, regs.emr, emr_val);
+ intel_uncore_posting_read(uncore, regs.emr);
+}
+
/**
* ivb_parity_work - Workqueue called when a parity error interrupt
* occurred.
@@ -207,6 +230,7 @@ out:
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -217,6 +241,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
do {
u32 iir, gt_iir, pm_iir;
+ u32 eir = 0, dpinvgtt = 0;
u32 pipe_stats[I915_MAX_PIPES] = {};
u32 hotplug_status = 0;
u32 ier = 0;
@@ -254,13 +279,16 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
+
/* Call regardless, as some status bits might not be
* signalled in IIR */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT))
- intel_lpe_audio_irq_handler(dev_priv);
+ intel_lpe_audio_irq_handler(display);
/*
* VLV_IIR is single buffered, and reflects the level
@@ -280,6 +308,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ vlv_display_error_irq_handler(display, eir, dpinvgtt);
+
valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
@@ -293,6 +324,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -303,6 +335,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
do {
u32 master_ctl, iir;
+ u32 eir = 0, dpinvgtt = 0;
u32 pipe_stats[I915_MAX_PIPES] = {};
u32 hotplug_status = 0;
u32 ier = 0;
@@ -336,6 +369,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
+
/* Call regardless, as some status bits might not be
* signalled in IIR */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
@@ -343,7 +379,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT))
- intel_lpe_audio_irq_handler(dev_priv);
+ intel_lpe_audio_irq_handler(display);
/*
* VLV_IIR is single buffered, and reflects the level
@@ -358,6 +394,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ vlv_display_error_irq_handler(display, eir, dpinvgtt);
+
valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
@@ -813,10 +852,10 @@ static u32 i9xx_error_mask(struct drm_i915_private *i915)
* so we just have to mask off all page table errors via EMR.
*/
if (HAS_FBC(i915))
- return ~I915_ERROR_MEMORY_REFRESH;
+ return I915_ERROR_MEMORY_REFRESH;
else
- return ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH);
+ return I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH;
}
static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
@@ -865,6 +904,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
i9xx_display_irq_reset(dev_priv);
+ gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
dev_priv->irq_mask = ~0u;
}
@@ -874,7 +914,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
- intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv));
+ gen2_error_init(uncore, GEN2_ERROR_REGS, ~i9xx_error_mask(dev_priv));
dev_priv->irq_mask =
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -970,6 +1010,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
i9xx_display_irq_reset(dev_priv);
+ gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
dev_priv->irq_mask = ~0u;
}
@@ -984,13 +1025,13 @@ static u32 i965_error_mask(struct drm_i915_private *i915)
* so we can always enable the page table errors.
*/
if (IS_G4X(i915))
- return ~(GM45_ERROR_PAGE_TABLE |
- GM45_ERROR_MEM_PRIV |
- GM45_ERROR_CP_PRIV |
- I915_ERROR_MEMORY_REFRESH);
+ return GM45_ERROR_PAGE_TABLE |
+ GM45_ERROR_MEM_PRIV |
+ GM45_ERROR_CP_PRIV |
+ I915_ERROR_MEMORY_REFRESH;
else
- return ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH);
+ return I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH;
}
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -998,7 +1039,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
- intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv));
+ gen2_error_init(uncore, GEN2_ERROR_REGS, ~i965_error_mask(dev_priv));
dev_priv->irq_mask =
~(I915_ASLE_INTERRUPT |
@@ -1231,7 +1272,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
}
/**
- * intel_irq_uninstall - finilizes all irq handling
+ * intel_irq_uninstall - finalizes all irq handling
* @dev_priv: i915 device instance
*
* This stops interrupt and hotplug handling and unregisters and frees all
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 0457f6402e05..58789b264575 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -47,4 +47,8 @@ void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs);
void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
u32 imr_val, u32 ier_val);
+void gen2_error_reset(struct intel_uncore *uncore, struct i915_error_regs regs);
+void gen2_error_init(struct intel_uncore *uncore, struct i915_error_regs regs,
+ u32 emr_val);
+
#endif /* __I915_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index 7ed6d70389af..5862754c662c 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -24,7 +24,7 @@ static int i915_check_nomodeset(void)
bool use_kms = true;
/*
- * Enable KMS by default, unless explicitly overriden by
+ * Enable KMS by default, unless explicitly overridden by
* either the i915.modeset parameter or by the
* nomodeset boot option.
*/
@@ -71,8 +71,6 @@ static const struct {
{ .init = i915_vma_resource_module_init,
.exit = i915_vma_resource_module_exit },
{ .init = i915_mock_selftests },
- { .init = i915_pmu_init,
- .exit = i915_pmu_exit },
{ .init = i915_pci_register_driver,
.exit = i915_pci_unregister_driver },
{ .init = i915_perf_sysctl_register,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 5384d1bb4923..de0b413600a1 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -548,7 +548,8 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
bool pollin;
u32 partial_report_size;
- /* We have to consider the (unlikely) possibility that read() errors
+ /*
+ * We have to consider the (unlikely) possibility that read() errors
* could result in an OA buffer reset which might reset the head and
* tail state.
*/
@@ -557,7 +558,8 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
hw_tail -= gtt_offset;
- /* The tail pointer increases in 64 byte increments, not in report_size
+ /*
+ * The tail pointer increases in 64 byte increments, not in report_size
* steps. Also the report size may not be a power of 2. Compute
* potentially partially landed report in the OA buffer
*/
@@ -569,8 +571,9 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
tail = hw_tail;
- /* Walk the stream backward until we find a report with report
- * id and timestmap not at 0. Since the circular buffer pointers
+ /*
+ * Walk the stream backward until we find a report with report
+ * id and timestamp not at 0. Since the circular buffer pointers
* progress by increments of 64 bytes and that reports can be up
* to 256 bytes long, we can't tell whether a report has fully
* landed in memory before the report id and timestamp of the
@@ -3359,9 +3362,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
"opening stream oa config uuid=%s\n",
stream->oa_config->uuid);
- hrtimer_init(&stream->poll_check_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- stream->poll_check_timer.function = oa_poll_check_timer_cb;
+ hrtimer_setup(&stream->poll_check_timer, oa_poll_check_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
init_waitqueue_head(&stream->poll_wq);
spin_lock_init(&stream->oa_buffer.ptr_lock);
mutex_init(&stream->lock);
@@ -3849,7 +3851,7 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
}
/*
- * Asking for SSEU configuration is a priviliged operation.
+ * Asking for SSEU configuration is a privileged operation.
*/
if (props->has_sseu)
privileged_op = true;
@@ -4478,14 +4480,16 @@ static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
static u32 mask_reg_value(u32 reg, u32 val)
{
- /* HALF_SLICE_CHICKEN2 is programmed with a the
+ /*
+ * HALF_SLICE_CHICKEN2 is programmed with a the
* WaDisableSTUnitPowerOptimization workaround. Make sure the value
* programmed by userspace doesn't change this.
*/
if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
- /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
+ /*
+ * WAIT_FOR_RC6_EXIT has only one bit fulfilling the function
* indicated by its name and a bunch of selection fields used by OA
* configs.
*/
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index e55db036be1b..e5a188ce3185 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -28,9 +28,6 @@
BIT(I915_SAMPLE_WAIT) | \
BIT(I915_SAMPLE_SEMA))
-static cpumask_t i915_pmu_cpumask;
-static unsigned int i915_pmu_target_cpu = -1;
-
static struct i915_pmu *event_to_pmu(struct perf_event *event)
{
return container_of(event->pmu, struct i915_pmu, base);
@@ -642,10 +639,6 @@ static int i915_pmu_event_init(struct perf_event *event)
if (event->cpu < 0)
return -EINVAL;
- /* only allow running on one cpu at a time */
- if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
- return -EINVAL;
-
if (is_engine_event(event))
ret = engine_event_init(event);
else
@@ -891,11 +884,6 @@ static void i915_pmu_event_del(struct perf_event *event, int flags)
i915_pmu_event_stop(event, PERF_EF_UPDATE);
}
-static int i915_pmu_event_event_idx(struct perf_event *event)
-{
- return 0;
-}
-
struct i915_str_attribute {
struct device_attribute attr;
const char *str;
@@ -940,23 +928,6 @@ static ssize_t i915_pmu_event_show(struct device *dev,
return sprintf(buf, "config=0x%lx\n", eattr->val);
}
-static ssize_t cpumask_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
-}
-
-static DEVICE_ATTR_RO(cpumask);
-
-static struct attribute *i915_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group i915_pmu_cpumask_attr_group = {
- .attrs = i915_cpumask_attrs,
-};
-
#define __event(__counter, __name, __unit) \
{ \
.counter = (__counter), \
@@ -1173,100 +1144,18 @@ static void free_event_attributes(struct i915_pmu *pmu)
pmu->pmu_attr = NULL;
}
-static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
-{
- struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
-
- /* Select the first online CPU as a designated reader. */
- if (cpumask_empty(&i915_pmu_cpumask))
- cpumask_set_cpu(cpu, &i915_pmu_cpumask);
-
- return 0;
-}
-
-static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
-{
- struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
- unsigned int target = i915_pmu_target_cpu;
-
- /*
- * Unregistering an instance generates a CPU offline event which we must
- * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask.
- */
- if (!pmu->registered)
- return 0;
-
- if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
- target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
-
- /* Migrate events if there is a valid target */
- if (target < nr_cpu_ids) {
- cpumask_set_cpu(target, &i915_pmu_cpumask);
- i915_pmu_target_cpu = target;
- }
- }
-
- if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) {
- perf_pmu_migrate_context(&pmu->base, cpu, target);
- pmu->cpuhp.cpu = target;
- }
-
- return 0;
-}
-
-static enum cpuhp_state cpuhp_state = CPUHP_INVALID;
-
-int i915_pmu_init(void)
-{
- int ret;
-
- ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
- "perf/x86/intel/i915:online",
- i915_pmu_cpu_online,
- i915_pmu_cpu_offline);
- if (ret < 0)
- pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n",
- ret);
- else
- cpuhp_state = ret;
-
- return 0;
-}
-
-void i915_pmu_exit(void)
-{
- if (cpuhp_state != CPUHP_INVALID)
- cpuhp_remove_multi_state(cpuhp_state);
-}
-
-static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
-{
- if (cpuhp_state == CPUHP_INVALID)
- return -EINVAL;
-
- return cpuhp_state_add_instance(cpuhp_state, &pmu->cpuhp.node);
-}
-
-static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
-{
- cpuhp_state_remove_instance(cpuhp_state, &pmu->cpuhp.node);
-}
-
void i915_pmu_register(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
const struct attribute_group *attr_groups[] = {
&i915_pmu_format_attr_group,
&pmu->events_attr_group,
- &i915_pmu_cpumask_attr_group,
NULL
};
int ret = -ENOMEM;
spin_lock_init(&pmu->lock);
- hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pmu->timer.function = i915_sample;
- pmu->cpuhp.cpu = -1;
+ hrtimer_setup(&pmu->timer, i915_sample, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
init_rc6(pmu);
if (IS_DGFX(i915)) {
@@ -1295,28 +1184,22 @@ void i915_pmu_register(struct drm_i915_private *i915)
pmu->base.module = THIS_MODULE;
pmu->base.task_ctx_nr = perf_invalid_context;
+ pmu->base.scope = PERF_PMU_SCOPE_SYS_WIDE;
pmu->base.event_init = i915_pmu_event_init;
pmu->base.add = i915_pmu_event_add;
pmu->base.del = i915_pmu_event_del;
pmu->base.start = i915_pmu_event_start;
pmu->base.stop = i915_pmu_event_stop;
pmu->base.read = i915_pmu_event_read;
- pmu->base.event_idx = i915_pmu_event_event_idx;
ret = perf_pmu_register(&pmu->base, pmu->name, -1);
if (ret)
goto err_groups;
- ret = i915_pmu_register_cpuhp_state(pmu);
- if (ret)
- goto err_unreg;
-
pmu->registered = true;
return;
-err_unreg:
- perf_pmu_unregister(&pmu->base);
err_groups:
kfree(pmu->base.attr_groups);
err_attr:
@@ -1340,8 +1223,6 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
hrtimer_cancel(&pmu->timer);
- i915_pmu_unregister_cpuhp_state(pmu);
-
perf_pmu_unregister(&pmu->base);
kfree(pmu->base.attr_groups);
if (IS_DGFX(i915))
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 8e66d63d0c9f..5826cc81858c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -57,13 +57,6 @@ struct i915_pmu_sample {
struct i915_pmu {
/**
- * @cpuhp: Struct used for CPU hotplug handling.
- */
- struct {
- struct hlist_node node;
- unsigned int cpu;
- } cpuhp;
- /**
* @base: PMU base.
*/
struct pmu base;
@@ -103,7 +96,7 @@ struct i915_pmu {
/**
* @timer_last:
*
- * Timestmap of the previous timer invocation.
+ * Timestamp of the previous timer invocation.
*/
ktime_t timer_last;
@@ -155,15 +148,11 @@ struct i915_pmu {
};
#ifdef CONFIG_PERF_EVENTS
-int i915_pmu_init(void);
-void i915_pmu_exit(void);
void i915_pmu_register(struct drm_i915_private *i915);
void i915_pmu_unregister(struct drm_i915_private *i915);
void i915_pmu_gt_parked(struct intel_gt *gt);
void i915_pmu_gt_unparked(struct intel_gt *gt);
#else
-static inline int i915_pmu_init(void) { return 0; }
-static inline void i915_pmu_exit(void) {}
static inline void i915_pmu_register(struct drm_i915_private *i915) {}
static inline void i915_pmu_unregister(struct drm_i915_private *i915) {}
static inline void i915_pmu_gt_parked(struct intel_gt *gt) {}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 786c727aea45..c5064eebe063 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -84,7 +84,7 @@
* Try to name registers according to the specs. If the register name changes in
* the specs from platform to another, stick to the original name.
*
- * Try to re-use existing register macro definitions. Only add new macros for
+ * Try to reuse existing register macro definitions. Only add new macros for
* new register offsets, or when the register contents have changed enough to
* warrant a full redefinition.
*
@@ -372,8 +372,29 @@
#define GEN7_MEDIA_MAX_REQ_COUNT _MMIO(0x4070)
#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074)
+#define ILK_GTT_FAULT _MMIO(0x44040) /* ilk/snb */
+#define GTT_FAULT_INVALID_GTT_PTE (1 << 7)
+#define GTT_FAULT_INVALID_PTE_DATA (1 << 6)
+#define GTT_FAULT_CURSOR_B_FAULT (1 << 5)
+#define GTT_FAULT_CURSOR_A_FAULT (1 << 4)
+#define GTT_FAULT_SPRITE_B_FAULT (1 << 3)
+#define GTT_FAULT_SPRITE_A_FAULT (1 << 2)
+#define GTT_FAULT_PRIMARY_B_FAULT (1 << 1)
+#define GTT_FAULT_PRIMARY_A_FAULT (1 << 0)
+
#define GEN7_ERR_INT _MMIO(0x44040)
#define ERR_INT_POISON (1 << 31)
+#define ERR_INT_INVALID_GTT_PTE (1 << 29)
+#define ERR_INT_INVALID_PTE_DATA (1 << 28)
+#define ERR_INT_SPRITE_C_FAULT (1 << 23)
+#define ERR_INT_PRIMARY_C_FAULT (1 << 22)
+#define ERR_INT_CURSOR_C_FAULT (1 << 21)
+#define ERR_INT_SPRITE_B_FAULT (1 << 20)
+#define ERR_INT_PRIMARY_B_FAULT (1 << 19)
+#define ERR_INT_CURSOR_B_FAULT (1 << 18)
+#define ERR_INT_SPRITE_A_FAULT (1 << 17)
+#define ERR_INT_PRIMARY_A_FAULT (1 << 16)
+#define ERR_INT_CURSOR_A_FAULT (1 << 15)
#define ERR_INT_MMIO_UNCLAIMED (1 << 13)
#define ERR_INT_PIPE_CRC_DONE_C (1 << 8)
#define ERR_INT_FIFO_UNDERRUN_C (1 << 6)
@@ -451,6 +472,19 @@
#define GM45_ERROR_CP_PRIV (1 << 3)
#define I915_ERROR_MEMORY_REFRESH (1 << 1)
#define I915_ERROR_INSTRUCTION (1 << 0)
+
+#define GEN2_ERROR_REGS I915_ERROR_REGS(EMR, EIR)
+
+#define VLV_EIR _MMIO(VLV_DISPLAY_BASE + 0x20b0)
+#define VLV_EMR _MMIO(VLV_DISPLAY_BASE + 0x20b4)
+#define VLV_ESR _MMIO(VLV_DISPLAY_BASE + 0x20b8)
+#define VLV_ERROR_GUNIT_TLB_DATA (1 << 6)
+#define VLV_ERROR_GUNIT_TLB_PTE (1 << 5)
+#define VLV_ERROR_PAGE_TABLE (1 << 4)
+#define VLV_ERROR_CLAIM (1 << 0)
+
+#define VLV_ERROR_REGS I915_ERROR_REGS(VLV_EMR, VLV_EIR)
+
#define INSTPM _MMIO(0x20c0)
#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
@@ -492,8 +526,9 @@
#define MBUS_ABOX_BT_CREDIT_POOL1_MASK (0x1F << 0)
#define MBUS_ABOX_BT_CREDIT_POOL1(x) ((x) << 0)
-/* Make render/texture TLB fetches lower priorty than associated data
- * fetches. This is not turned on by default
+/*
+ * Make render/texture TLB fetches lower priority than associated data
+ * fetches. This is not turned on by default.
*/
#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15)
@@ -1350,38 +1385,6 @@
/* ADL and later: */
#define VIDEO_DIP_ENABLE_AS_ADL REG_BIT(23)
-/* Panel fitting */
-#define PFIT_CONTROL(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
-#define PFIT_ENABLE REG_BIT(31)
-#define PFIT_PIPE_MASK REG_GENMASK(30, 29) /* 965+ */
-#define PFIT_PIPE(pipe) REG_FIELD_PREP(PFIT_PIPE_MASK, (pipe))
-#define PFIT_SCALING_MASK REG_GENMASK(28, 26) /* 965+ */
-#define PFIT_SCALING_AUTO REG_FIELD_PREP(PFIT_SCALING_MASK, 0)
-#define PFIT_SCALING_PROGRAMMED REG_FIELD_PREP(PFIT_SCALING_MASK, 1)
-#define PFIT_SCALING_PILLAR REG_FIELD_PREP(PFIT_SCALING_MASK, 2)
-#define PFIT_SCALING_LETTER REG_FIELD_PREP(PFIT_SCALING_MASK, 3)
-#define PFIT_FILTER_MASK REG_GENMASK(25, 24) /* 965+ */
-#define PFIT_FILTER_FUZZY REG_FIELD_PREP(PFIT_FILTER_MASK, 0)
-#define PFIT_FILTER_CRISP REG_FIELD_PREP(PFIT_FILTER_MASK, 1)
-#define PFIT_FILTER_MEDIAN REG_FIELD_PREP(PFIT_FILTER_MASK, 2)
-#define PFIT_VERT_INTERP_MASK REG_GENMASK(11, 10) /* pre-965 */
-#define PFIT_VERT_INTERP_BILINEAR REG_FIELD_PREP(PFIT_VERT_INTERP_MASK, 1)
-#define PFIT_VERT_AUTO_SCALE REG_BIT(9) /* pre-965 */
-#define PFIT_HORIZ_INTERP_MASK REG_GENMASK(7, 6) /* pre-965 */
-#define PFIT_HORIZ_INTERP_BILINEAR REG_FIELD_PREP(PFIT_HORIZ_INTERP_MASK, 1)
-#define PFIT_HORIZ_AUTO_SCALE REG_BIT(5) /* pre-965 */
-#define PFIT_PANEL_8TO6_DITHER_ENABLE REG_BIT(3) /* pre-965 */
-
-#define PFIT_PGM_RATIOS(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234)
-#define PFIT_VERT_SCALE_MASK REG_GENMASK(31, 20) /* pre-965 */
-#define PFIT_VERT_SCALE(x) REG_FIELD_PREP(PFIT_VERT_SCALE_MASK, (x))
-#define PFIT_HORIZ_SCALE_MASK REG_GENMASK(15, 4) /* pre-965 */
-#define PFIT_HORIZ_SCALE(x) REG_FIELD_PREP(PFIT_HORIZ_SCALE_MASK, (x))
-#define PFIT_VERT_SCALE_MASK_965 REG_GENMASK(28, 16) /* 965+ */
-#define PFIT_HORIZ_SCALE_MASK_965 REG_GENMASK(12, 0) /* 965+ */
-
-#define PFIT_AUTO_RATIOS(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
-
#define PCH_GTC_CTL _MMIO(0xe7000)
#define PCH_GTC_ENABLE (1 << 31)
@@ -1876,44 +1879,6 @@
#define _PIPEB_LINK_N2 0x6104c
#define PIPE_LINK_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2)
-/* CPU panel fitter */
-/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
-#define _PFA_CTL_1 0x68080
-#define _PFB_CTL_1 0x68880
-#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
-#define PF_ENABLE REG_BIT(31)
-#define PF_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29) /* ivb/hsw */
-#define PF_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(PF_PIPE_SEL_MASK_IVB, (pipe))
-#define PF_FILTER_MASK REG_GENMASK(24, 23)
-#define PF_FILTER_PROGRAMMED REG_FIELD_PREP(PF_FILTER_MASK, 0)
-#define PF_FILTER_MED_3x3 REG_FIELD_PREP(PF_FILTER_MASK, 1)
-#define PF_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 2)
-#define PF_FILTER_EDGE_SOFTEN REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 3)
-
-#define _PFA_WIN_SZ 0x68074
-#define _PFB_WIN_SZ 0x68874
-#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
-#define PF_WIN_XSIZE_MASK REG_GENMASK(31, 16)
-#define PF_WIN_XSIZE(w) REG_FIELD_PREP(PF_WIN_XSIZE_MASK, (w))
-#define PF_WIN_YSIZE_MASK REG_GENMASK(15, 0)
-#define PF_WIN_YSIZE(h) REG_FIELD_PREP(PF_WIN_YSIZE_MASK, (h))
-
-#define _PFA_WIN_POS 0x68070
-#define _PFB_WIN_POS 0x68870
-#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
-#define PF_WIN_XPOS_MASK REG_GENMASK(31, 16)
-#define PF_WIN_XPOS(x) REG_FIELD_PREP(PF_WIN_XPOS_MASK, (x))
-#define PF_WIN_YPOS_MASK REG_GENMASK(15, 0)
-#define PF_WIN_YPOS(y) REG_FIELD_PREP(PF_WIN_YPOS_MASK, (y))
-
-#define _PFA_VSCALE 0x68084
-#define _PFB_VSCALE 0x68884
-#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
-
-#define _PFA_HSCALE 0x68090
-#define _PFB_HSCALE 0x68890
-#define PF_HSCALE(pipe) _MMIO_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
-
/*
* Skylake scalers
*/
@@ -3197,6 +3162,10 @@
#define _TRANS_DP2_VFREQLOW_D 0x630a8
#define TRANS_DP2_VFREQLOW(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQLOW_A, _TRANS_DP2_VFREQLOW_B)
+#define _DP_MIN_HBLANK_CTL_A 0x600ac
+#define _DP_MIN_HBLANK_CTL_B 0x610ac
+#define DP_MIN_HBLANK_CTL(trans) _MMIO_TRANS(trans, _DP_MIN_HBLANK_CTL_A, _DP_MIN_HBLANK_CTL_B)
+
/* SNB eDP training params */
/* SNB A-stepping */
#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
@@ -3565,6 +3534,7 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
#define TRANS_DDI_FUNC_CTL2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _TRANS_DDI_FUNC_CTL2_A)
#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
+#define CMTG_SECONDARY_MODE REG_BIT(3)
#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
@@ -3619,24 +3589,29 @@ enum skl_power_gate {
#define _DDI_BUF_CTL_B 0x64100
/* Known as DDI_CTL_DE in MTL+ */
#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
-#define DDI_BUF_CTL_ENABLE (1 << 31)
+#define DDI_BUF_CTL_ENABLE REG_BIT(31)
#define XE2LPD_DDI_BUF_D2D_LINK_ENABLE REG_BIT(29)
#define XE2LPD_DDI_BUF_D2D_LINK_STATE REG_BIT(28)
-#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
-#define DDI_BUF_EMP_MASK (0xf << 24)
-#define DDI_BUF_PHY_LINK_RATE(r) ((r) << 20)
+#define DDI_BUF_EMP_MASK REG_GENMASK(27, 24)
+#define DDI_BUF_TRANS_SELECT(n) REG_FIELD_PREP(DDI_BUF_EMP_MASK, (n))
+#define DDI_BUF_PHY_LINK_RATE_MASK REG_GENMASK(23, 20)
+#define DDI_BUF_PHY_LINK_RATE(r) REG_FIELD_PREP(DDI_BUF_PHY_LINK_RATE_MASK, (r))
#define DDI_BUF_PORT_DATA_MASK REG_GENMASK(19, 18)
#define DDI_BUF_PORT_DATA_10BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 0)
#define DDI_BUF_PORT_DATA_20BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 1)
#define DDI_BUF_PORT_DATA_40BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 2)
-#define DDI_BUF_PORT_REVERSAL (1 << 16)
-#define DDI_BUF_IS_IDLE (1 << 7)
+#define DDI_BUF_PORT_REVERSAL REG_BIT(16)
+#define DDI_BUF_LANE_STAGGER_DELAY_MASK REG_GENMASK(15, 8)
+#define DDI_BUF_LANE_STAGGER_DELAY(symbols) REG_FIELD_PREP(DDI_BUF_LANE_STAGGER_DELAY_MASK, \
+ (symbols))
+#define DDI_BUF_IS_IDLE REG_BIT(7)
#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
-#define DDI_A_4_LANES (1 << 4)
-#define DDI_PORT_WIDTH(width) (((width) == 3 ? 4 : ((width) - 1)) << 1)
-#define DDI_PORT_WIDTH_MASK (7 << 1)
+#define DDI_A_4_LANES REG_BIT(4)
+#define DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
+#define DDI_PORT_WIDTH(width) REG_FIELD_PREP(DDI_PORT_WIDTH_MASK, \
+ ((width) == 3 ? 4 : (width) - 1))
#define DDI_PORT_WIDTH_SHIFT 1
-#define DDI_INIT_DISPLAY_DETECTED (1 << 0)
+#define DDI_INIT_DISPLAY_DETECTED REG_BIT(0)
/* DDI Buffer Translations */
#define _DDI_BUF_TRANS_A 0x64E00
@@ -4190,8 +4165,8 @@ enum skl_power_gate {
_MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B)
#define _VLV_PIPE_MSA_MISC_A 0x70048
-#define VLV_PIPE_MSA_MISC(pipe) \
- _MMIO_PIPE2(dev_priv, pipe, _VLV_PIPE_MSA_MISC_A)
+#define VLV_PIPE_MSA_MISC(__display, pipe) \
+ _MMIO_PIPE2(__display, pipe, _VLV_PIPE_MSA_MISC_A)
#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index e251bcc0c89f..94a8f902689e 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -294,4 +294,12 @@ struct i915_irq_regs {
#define I915_IRQ_REGS(_imr, _ier, _iir) \
((const struct i915_irq_regs){ .imr = (_imr), .ier = (_ier), .iir = (_iir) })
+struct i915_error_regs {
+ i915_reg_t emr;
+ i915_reg_t eir;
+};
+
+#define I915_ERROR_REGS(_emr, _eir) \
+ ((const struct i915_error_regs){ .emr = (_emr), .eir = (_eir) })
+
#endif /* __I915_REG_DEFS__ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8f62cfa23fb7..c3d27eadc0a7 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -293,8 +293,7 @@ static void __rq_init_watchdog(struct i915_request *rq)
{
struct i915_request_watchdog *wdg = &rq->watchdog;
- hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- wdg->timer.function = __rq_watchdog_expired;
+ hrtimer_setup(&wdg->timer, __rq_watchdog_expired, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
static void __rq_arm_watchdog(struct i915_request *rq)
@@ -473,7 +472,7 @@ static bool __request_in_flight(const struct i915_request *signal)
* to avoid tearing.]
*
* Note that the read of *execlists->active may race with the promotion
- * of execlists->pending[] to execlists->inflight[], overwritting
+ * of execlists->pending[] to execlists->inflight[], overwriting
* the value at *execlists->active. This is fine. The promotion implies
* that we received an ACK from the HW, and so the context is not
* stuck -- if we do not see ourselves in *active, the inflight status
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 0ac55b2e4223..5f7e8138ec14 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -161,7 +161,7 @@ enum {
* parent-child relationship (parallel submission, multi-lrc) that
* hit an error while generating requests in the execbuf IOCTL.
* Indicates this request should be skipped as another request in
- * submission / relationship encoutered an error.
+ * submission / relationship encountered an error.
*/
I915_FENCE_FLAG_SKIP_PARALLEL,
@@ -187,7 +187,7 @@ enum {
* RCU lookup of it that may race against reallocation of the struct
* from the slab freelist. We intentionally do not zero the structure on
* allocation so that the lookup can use the dangling pointers (and is
- * cogniscent that those pointers may be wrong). Instead, everything that
+ * cognisant that those pointers may be wrong). Instead, everything that
* needs to be initialised must be done so explicitly.
*
* The requests are reference counted.
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 776f8cc51b2f..632e316f8b05 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -26,7 +26,7 @@
#include <linux/dma-fence-array.h>
#include <drm/drm_gem.h>
-#include "display/intel_display.h"
+#include "display/intel_fb.h"
#include "display/intel_frontbuffer.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object_frontbuffer.h"
@@ -778,8 +778,8 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
* @flags: mask of PIN_* flags to use
*
* First we try to allocate some free space that meets the requirements for
- * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
- * preferrably the oldest idle entry to make room for the new VMA.
+ * the VMA. Failing that, if the flags permit, it will evict an old VMA,
+ * preferably the oldest idle entry to make room for the new VMA.
*
* Returns:
* 0 on success, negative error code otherwise.
@@ -877,7 +877,7 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
* objects which need to be tightly packed into the low 32bits.
*
* Note that we assume that GGTT are limited to 4GiB for the
- * forseeable future. See also i915_ggtt_offset().
+ * foreseeable future. See also i915_ggtt_offset().
*/
if (upper_32_bits(end - 1) &&
vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
@@ -1001,7 +1001,7 @@ rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
/*
* The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a conenience to indicate how many padding PTEs
+ * here is just a convenience to indicate how many padding PTEs
* to insert at this spot.
*/
sg_set_page(sg, NULL, left, 0);
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 559de74d0b11..a499a3bea874 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -32,6 +32,8 @@
#include "gem/i915_gem_object_types.h"
+#include "i915_gtt_view_types.h"
+
/**
* DOC: Global GTT views
*
@@ -95,46 +97,6 @@
struct i915_vma_resource;
-struct intel_remapped_plane_info {
- /* in gtt pages */
- u32 offset:31;
- u32 linear:1;
- union {
- /* in gtt pages for !linear */
- struct {
- u16 width;
- u16 height;
- u16 src_stride;
- u16 dst_stride;
- };
-
- /* in gtt pages for linear */
- u32 size;
- };
-} __packed;
-
-struct intel_remapped_info {
- struct intel_remapped_plane_info plane[4];
- /* in gtt pages */
- u32 plane_alignment;
-} __packed;
-
-struct intel_rotation_info {
- struct intel_remapped_plane_info plane[2];
-} __packed;
-
-struct intel_partial_info {
- u64 offset;
- unsigned int size;
-} __packed;
-
-enum i915_gtt_view_type {
- I915_GTT_VIEW_NORMAL = 0,
- I915_GTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
- I915_GTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
- I915_GTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
-};
-
static inline void assert_i915_gem_gtt_types(void)
{
BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 2 * sizeof(u32) + 8 * sizeof(u16));
@@ -160,16 +122,6 @@ static inline void assert_i915_gem_gtt_types(void)
}
}
-struct i915_gtt_view {
- enum i915_gtt_view_type type;
- union {
- /* Members need to contain no holes/padding */
- struct intel_partial_info partial;
- struct intel_rotation_info rotated;
- struct intel_remapped_info remapped;
- };
-};
-
/**
* DOC: Virtual Memory Address
*
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index f76642886569..387b26400169 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -682,7 +682,7 @@ static void i85x_init_clock_gating(struct drm_i915_private *i915)
* Have FBC ignore 3D activity since we use software
* render tracking, and otherwise a pure 3D workload
* (even if it just renders a single frame and then does
- * abosultely nothing) would not allow FBC to recompress
+ * absolutely nothing) would not allow FBC to recompress
* until a 2D blit occurs.
*/
intel_uncore_write(&i915->uncore, SCPD0,
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index a5383a2bc64b..dae9dce7d1b3 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -265,7 +265,7 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
}
/**
- * intel_gvt_resume - GVT resume routine wapper
+ * intel_gvt_resume - GVT resume routine wrapper
*
* @dev_priv: drm i915 private *
*
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index ee1cd2126f97..76d84cbb8361 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -18,6 +18,7 @@
#include "display/intel_fbc_regs.h"
#include "display/intel_fdi_regs.h"
#include "display/intel_lvds_regs.h"
+#include "display/intel_pfit_regs.h"
#include "display/intel_psr_regs.h"
#include "display/intel_sprite_regs.h"
#include "display/skl_universal_plane_regs.h"
@@ -1260,7 +1261,7 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
/**
* intel_gvt_iterate_mmio_table - Iterate the GVT MMIO table
- * @iter: the interator
+ * @iter: the iterator
*
* This function is called for iterating the GVT MMIO table when i915 is
* taking the snapshot of the HW and GVT is building MMIO tracking table.
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 1a47ecfd3fd8..8d9f4c410546 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -375,7 +375,7 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
* leave the device suspended skipping the driver's suspend handlers
* if the device was already runtime suspended. This is needed due to
* the difference in our runtime and system suspend sequence and
- * becaue the HDA driver may require us to enable the audio power
+ * because the HDA driver may require us to enable the audio power
* domain during system suspend.
*/
dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index e22669d61e95..7428bd8fa67f 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -31,7 +31,7 @@ struct drm_printer;
* it can be changed with the standard runtime PM files from sysfs.
*
* The irqs_disabled variable becomes true exactly after we disable the IRQs and
- * goes back to false exactly before we reenable the IRQs. We use this variable
+ * goes back to false exactly before we re-enable the IRQs. We use this variable
* to check if someone is trying to enable/disable IRQs while they're supposed
* to be disabled. This shouldn't happen and we'll print some error messages in
* case it happens.
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index eed4937c3ff3..48a10ff80148 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2103,8 +2103,7 @@ static int __fw_domain_init(struct intel_uncore *uncore,
d->mask = BIT(domain_id);
- hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- d->timer.function = intel_uncore_fw_release_timer;
+ hrtimer_setup(&d->timer, intel_uncore_fw_release_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
uncore->fw_domains |= BIT(domain_id);
@@ -2477,7 +2476,7 @@ static int sanity_check_mmio_access(struct intel_uncore *uncore)
/*
* Sanitycheck that MMIO access to the device is working properly. If
- * the CPU is unable to communcate with a PCI device, BAR reads will
+ * the CPU is unable to communicate with a PCI device, BAR reads will
* return 0xFFFFFFFF. Let's make sure the device isn't in this state
* before we start trying to access registers.
*
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index 9cf169665d7c..f8da693ad3ce 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -460,11 +460,11 @@ void intel_pxp_fini_hw(struct intel_pxp *pxp)
intel_pxp_irq_disable(pxp);
}
-int intel_pxp_key_check(struct intel_pxp *pxp,
- struct drm_gem_object *_obj,
- bool assign)
+int intel_pxp_key_check(struct drm_gem_object *_obj, bool assign)
{
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
+ struct drm_i915_private *i915 = to_i915(_obj->dev);
+ struct intel_pxp *pxp = i915->pxp;
if (!intel_pxp_is_active(pxp))
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
index 4ed97db5e7c6..7b19109845a3 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -31,9 +31,7 @@ int intel_pxp_get_backend_timeout_ms(struct intel_pxp *pxp);
int intel_pxp_start(struct intel_pxp *pxp);
void intel_pxp_end(struct intel_pxp *pxp);
-int intel_pxp_key_check(struct intel_pxp *pxp,
- struct drm_gem_object *obj,
- bool assign);
+int intel_pxp_key_check(struct drm_gem_object *obj, bool assign);
void intel_pxp_invalidate(struct intel_pxp *pxp);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
index 329b4fcdc040..929c20e98300 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
@@ -41,7 +41,7 @@ struct pxp43_huc_auth_out {
/* PXP-Input-Packet: Init PXP session */
struct pxp43_create_arb_in {
struct pxp_cmd_header header;
- /* header.stream_id fields for vesion 4.3 of Init PXP session: */
+ /* header.stream_id fields for version 4.3 of Init PXP session: */
#define PXP43_INIT_SESSION_VALID BIT(0)
#define PXP43_INIT_SESSION_APPTYPE BIT(1)
#define PXP43_INIT_SESSION_APPID GENMASK(17, 2)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
index 07864b584cf4..febdbcd8d61e 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -21,7 +21,7 @@ struct drm_i915_private;
*/
struct intel_pxp {
/**
- * @ctrl_gt: poiner to the tile that owns the controls for PXP subsystem assets that
+ * @ctrl_gt: pointer to the tile that owns the controls for PXP subsystem assets that
* the VDBOX, the KCR engine (and GSC CS depending on the platform)
*/
struct intel_gt *ctrl_gt;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 0727492576be..ad650f67114a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -45,13 +45,15 @@ static void trash_stolen(struct drm_i915_private *i915)
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
const u64 slot = ggtt->error_capture.start;
const resource_size_t size = resource_size(&i915->dsm.stolen);
+ struct rnd_state prng;
unsigned long page;
- u32 prng = 0x12345678;
/* XXX: fsck. needs some more thought... */
if (!i915_ggtt_has_aperture(ggtt))
return;
+ prandom_seed_state(&prng, 0x12345678);
+
for (page = 0; page < size; page += PAGE_SIZE) {
const dma_addr_t dma = i915->dsm.stolen.start + page;
u32 __iomem *s;
@@ -64,8 +66,7 @@ static void trash_stolen(struct drm_i915_private *i915)
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
- prng = next_pseudo_random32(prng);
- iowrite32(prng, &s[x]);
+ iowrite32(prandom_u32_state(&prng), &s[x]);
}
io_mapping_unmap_atomic(s);
}
@@ -80,7 +81,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/*
- * As a final sting in the tail, invalidate stolen. Under a real S4,
+ * As a final string in the tail, invalidate stolen. Under a real S4,
* stolen is lost and needs to be refilled on resume. However, under
* CI we merely do S4-device testing (as full S4 is too unreliable
* for automated testing across a cluster), so to simulate the effect
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 5d27e1c733c5..7ab4c4e60264 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -287,7 +287,8 @@ static int lowlevel_hole(struct i915_address_space *vm,
GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total);
GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end);
- /* Ignore allocation failures (i.e. don't report them as
+ /*
+ * Ignore allocation failures (i.e. don't report them as
* a test failure) as we are purposefully allocating very
* large objects without checking that we have sufficient
* memory. We expect to hit -ENOMEM.
@@ -446,7 +447,8 @@ static int fill_hole(struct i915_address_space *vm,
list_add(&obj->st_link, &objects);
- /* Align differing sized objects against the edges, and
+ /*
+ * Align differing sized objects against the edges, and
* check we don't walk off into the void when binding
* them into the GTT.
*/
@@ -831,7 +833,8 @@ static int drunk_hole(struct i915_address_space *vm,
return -ENOMEM;
GEM_BUG_ON(!order);
- /* Ignore allocation failures (i.e. don't report them as
+ /*
+ * Ignore allocation failures (i.e. don't report them as
* a test failure) as we are purposefully allocating very
* large objects without checking that we have sufficient
* memory. We expect to hit -ENOMEM.
@@ -964,7 +967,7 @@ static int __shrink_hole(struct i915_address_space *vm,
break;
if (igt_timeout(end_time,
- "%s timed out at ofset %llx [%llx - %llx]\n",
+ "%s timed out at offset %llx [%llx - %llx]\n",
__func__, addr, hole_start, hole_end)) {
err = -EINTR;
break;
@@ -1011,7 +1014,7 @@ static int shrink_boom(struct i915_address_space *vm,
/*
* Catch the case which shrink_hole seems to miss. The setup here
* requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
- * ensuring that all vma assiocated with the respective pd/pdp are
+ * ensuring that all vma associated with the respective pd/pdp are
* unpinned at the time.
*/
@@ -1537,9 +1540,10 @@ static int igt_gtt_reserve(void *arg)
u64 total;
int err = -ENODEV;
- /* i915_gem_gtt_reserve() tries to reserve the precise range
+ /*
+ * i915_gem_gtt_reserve() tries to reserve the precise range
* for the node, and evicts if it has to. So our test checks that
- * it can give us the requsted space and prevent overlaps.
+ * it can give us the requested space and prevent overlaps.
*/
/* Start by filling the GGTT */
@@ -1743,7 +1747,8 @@ static int igt_gtt_insert(void *arg)
u64 total;
int err = -ENODEV;
- /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
+ /*
+ * i915_gem_gtt_insert() tries to allocate some free space in the GTT
* to the node, evicting if required.
*/
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 71b52d5efef4..7c4111e60f2e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -159,7 +159,8 @@ static int igt_vma_create(void *arg)
LIST_HEAD(objects);
int err = -ENOMEM;
- /* Exercise creating many vma amonst many objections, checking the
+ /*
+ * Exercise creating many vma amongst many objections, checking the
* vma creation and lookup routines.
*/
@@ -292,7 +293,8 @@ static int igt_vma_pin1(void *arg)
VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
#if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
- /* Misusing BIAS is a programming error (it is not controllable
+ /*
+ * Misusing BIAS is a programming error (it is not controllable
* from userspace) so when debugging is enabled, it explodes.
* However, the tests are still quite interesting for checking
* variable start, end and size.
@@ -312,7 +314,8 @@ static int igt_vma_pin1(void *arg)
struct i915_vma *vma;
int err = -EINVAL;
- /* Exercise all the weird and wonderful i915_vma_pin requests,
+ /*
+ * Exercise all the weird and wonderful i915_vma_pin requests,
* focusing on error handling of boundary conditions.
*/
@@ -577,7 +580,8 @@ static int igt_vma_rotate_remap(void *arg)
const unsigned int max_pages = 64;
int err = -ENOMEM;
- /* Create VMA for many different combinations of planes and check
+ /*
+ * Create VMA for many different combinations of planes and check
* that the page layout within the rotated VMA match our expectations.
*/
@@ -804,7 +808,8 @@ static int igt_vma_partial(void *arg)
struct i915_vma *vma;
int err = -ENOMEM;
- /* Create lots of different VMA for the object and check that
+ /*
+ * Create lots of different VMA for the object and check that
* we are returned the same VMA when we later request the same range.
*/
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c
index 842db43e46c0..82dc7fbd1a3e 100644
--- a/drivers/gpu/drm/i915/soc/intel_pch.c
+++ b/drivers/gpu/drm/i915/soc/intel_pch.c
@@ -7,6 +7,36 @@
#include "i915_utils.h"
#include "intel_pch.h"
+#define INTEL_PCH_DEVICE_ID_MASK 0xff80
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
+#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
+#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
+#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
+#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
+#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
+#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
+#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
+#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
+#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
+#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
+#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
+#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
+#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
+#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
+#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
+
/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
static enum intel_pch
intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
@@ -33,14 +63,14 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
- return PCH_LPT;
+ return PCH_LPT_H;
case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
drm_WARN_ON(&dev_priv->drm,
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
!IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
- return PCH_LPT;
+ return PCH_LPT_LP;
case INTEL_PCH_WPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
drm_WARN_ON(&dev_priv->drm,
@@ -48,7 +78,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_WARN_ON(&dev_priv->drm,
IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
/* WPT is LPT compatible */
- return PCH_LPT;
+ return PCH_LPT_H;
case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
drm_WARN_ON(&dev_priv->drm,
@@ -56,7 +86,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_WARN_ON(&dev_priv->drm,
!IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
/* WPT is LPT compatible */
- return PCH_LPT;
+ return PCH_LPT_LP;
case INTEL_PCH_SPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
drm_WARN_ON(&dev_priv->drm,
@@ -243,7 +273,7 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
* underneath. This is a requirement from virtualization team.
*
* In some virtualized environments (e.g. XEN), there is irrelevant
- * ISA bridge in the system. To work reliably, we should scan trhough
+ * ISA bridge in the system. To work reliably, we should scan through
* all the ISA bridge devices and check for the first match, instead
* of only checking the first one.
*/
@@ -256,13 +286,11 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
pch_type = intel_pch_type(dev_priv, id);
if (pch_type != PCH_NONE) {
dev_priv->pch_type = pch_type;
- dev_priv->pch_id = id;
break;
} else if (intel_is_virt_pch(id, pch->subsystem_vendor,
pch->subsystem_device)) {
intel_virt_detect_pch(dev_priv, &id, &pch_type);
dev_priv->pch_type = pch_type;
- dev_priv->pch_id = id;
break;
}
}
@@ -275,12 +303,10 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm,
"Display disabled, reverting to NOP PCH\n");
dev_priv->pch_type = PCH_NOP;
- dev_priv->pch_id = 0;
} else if (!pch) {
if (i915_run_as_guest() && HAS_DISPLAY(dev_priv)) {
intel_virt_detect_pch(dev_priv, &id, &pch_type);
dev_priv->pch_type = pch_type;
- dev_priv->pch_id = id;
} else {
drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
}
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.h b/drivers/gpu/drm/i915/soc/intel_pch.h
index 89e89ede265d..635aea7a5539 100644
--- a/drivers/gpu/drm/i915/soc/intel_pch.h
+++ b/drivers/gpu/drm/i915/soc/intel_pch.h
@@ -19,7 +19,8 @@ enum intel_pch {
PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
- PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
+ PCH_LPT_H, /* Lynxpoint/Wildcatpoint H PCH */
+ PCH_LPT_LP, /* Lynxpoint/Wildcatpoint LP PCH */
PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
PCH_CNP, /* Cannon/Comet Lake PCH */
PCH_ICP, /* Ice Lake/Jasper Lake PCH */
@@ -33,38 +34,7 @@ enum intel_pch {
PCH_LNL,
};
-#define INTEL_PCH_DEVICE_ID_MASK 0xff80
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
-#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
-#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
-#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
-#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
-#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
-#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
-#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
-#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
-#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
-#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
-#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
-#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
-#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
-#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
-#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
-#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
-#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
-#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
-#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
-#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
-#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
-#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
-#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
-#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
-
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
-#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
@@ -72,13 +42,10 @@ enum intel_pch {
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
-#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
-#define HAS_PCH_LPT_LP(dev_priv) \
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
- INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
-#define HAS_PCH_LPT_H(dev_priv) \
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
- INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_H)
+#define HAS_PCH_LPT_LP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_LP)
+#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_H || \
+ INTEL_PCH_TYPE(dev_priv) == PCH_LPT_LP)
#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
index 618503a212a7..1cdb3cfd058d 100644
--- a/drivers/gpu/drm/imagination/pvr_job.c
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -597,8 +597,6 @@ update_job_resvs_for_each(struct pvr_job_data *job_data, u32 job_count)
static bool can_combine_jobs(struct pvr_job *a, struct pvr_job *b)
{
struct pvr_job *geom_job = a, *frag_job = b;
- struct dma_fence *fence;
- unsigned long index;
/* Geometry and fragment jobs can be combined if they are queued to the
* same context and targeting the same HWRT.
@@ -609,13 +607,9 @@ static bool can_combine_jobs(struct pvr_job *a, struct pvr_job *b)
a->hwrt != b->hwrt)
return false;
- xa_for_each(&frag_job->base.dependencies, index, fence) {
- /* We combine when we see an explicit geom -> frag dep. */
- if (&geom_job->base.s_fence->scheduled == fence)
- return true;
- }
-
- return false;
+ /* We combine when we see an explicit geom -> frag dep. */
+ return drm_sched_job_has_dependency(&frag_job->base,
+ &geom_job->base.s_fence->scheduled);
}
static struct dma_fence *
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index 43411be930a2..eba69309bb6c 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -1220,6 +1220,17 @@ struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
},
};
struct pvr_device *pvr_dev = ctx->pvr_dev;
+ const struct drm_sched_init_args sched_args = {
+ .ops = &pvr_queue_sched_ops,
+ .submit_wq = pvr_dev->sched_wq,
+ .num_rqs = 1,
+ .credit_limit = 64 * 1024,
+ .hang_limit = 1,
+ .timeout = msecs_to_jiffies(500),
+ .timeout_wq = pvr_dev->sched_wq,
+ .name = "pvr-queue",
+ .dev = pvr_dev->base.dev,
+ };
struct drm_gpu_scheduler *sched;
struct pvr_queue *queue;
int ctx_state_size, err;
@@ -1292,12 +1303,7 @@ struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
queue->timeline_ufo.value = cpu_map;
- err = drm_sched_init(&queue->scheduler,
- &pvr_queue_sched_ops,
- pvr_dev->sched_wq, 1, 64 * 1024, 1,
- msecs_to_jiffies(500),
- pvr_dev->sched_wq, NULL, "pvr-queue",
- pvr_dev->base.dev);
+ err = drm_sched_init(&queue->scheduler, &sched_args);
if (err)
goto err_release_ufo;
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
index 3a3c8a195119..c5629e155d25 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
@@ -217,7 +217,7 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
imx_tve_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct imx_tve *tve = con_to_tve(connector);
unsigned long rate;
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index c23ee2d214de..20b93fff0239 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -229,7 +229,7 @@ static int ingenic_drm_update_pixclk(struct notifier_block *nb,
}
static void ingenic_drm_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_device_get_priv(bridge->dev);
@@ -260,7 +260,7 @@ static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void ingenic_drm_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_device_get_priv(bridge->dev);
unsigned int var;
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index b40c90e97d7e..825135a26aa4 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -515,18 +515,22 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
{
unsigned int timeout = lima_sched_timeout_ms > 0 ?
lima_sched_timeout_ms : 10000;
+ const struct drm_sched_init_args args = {
+ .ops = &lima_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 1,
+ .hang_limit = lima_job_hang_limit,
+ .timeout = msecs_to_jiffies(timeout),
+ .name = name,
+ .dev = pipe->ldev->dev,
+ };
pipe->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&pipe->fence_lock);
INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
- return drm_sched_init(&pipe->base, &lima_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- 1,
- lima_job_hang_limit,
- msecs_to_jiffies(timeout), NULL,
- NULL, name, pipe->ldev->dev);
+ return drm_sched_init(&pipe->base, &args);
}
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c
index d227a2c1dcf1..aa9a97f9c4dc 100644
--- a/drivers/gpu/drm/loongson/lsdc_plane.c
+++ b/drivers/gpu/drm/loongson/lsdc_plane.c
@@ -171,7 +171,8 @@ static const struct drm_plane_helper_funcs lsdc_primary_helper_funcs = {
};
static int lsdc_cursor_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state,
+ bool flip)
{
struct drm_plane_state *new_state;
struct drm_crtc_state *crtc_state;
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index 5674f5707cca..8f6fba4217ec 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -620,13 +620,16 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
+ goto update_config_out;
}
-#else
+#endif
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
mtk_crtc->config_updating = false;
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
-#endif
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+update_config_out:
+#endif
mutex_unlock(&mtk_crtc->hw_lock);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index dd8433a38282..39c7de4cdcc1 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -96,7 +96,6 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_color *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -108,8 +107,7 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->clk),
"failed to get color clk\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return dev_err_probe(dev, PTR_ERR(priv->regs),
"failed to ioremap color\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index b17b11d93846..8afd15006df2 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -256,7 +256,6 @@ static int mtk_disp_gamma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_gamma *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -268,8 +267,7 @@ static int mtk_disp_gamma_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->clk),
"failed to get gamma clk\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return dev_err_probe(dev, PTR_ERR(priv->regs),
"failed to ioremap gamma\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
index 563b1b248fbb..b174dda091d3 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
@@ -306,7 +306,6 @@ static const struct component_ops mtk_disp_merge_component_ops = {
static int mtk_disp_merge_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct mtk_disp_merge *priv;
int ret;
@@ -314,8 +313,7 @@ static int mtk_disp_merge_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return dev_err_probe(dev, PTR_ERR(priv->regs),
"failed to ioremap merge\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index df82cea4bb79..d0581c4e3c99 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -604,7 +604,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_ovl *priv;
- struct resource *res;
int irq;
int ret;
@@ -621,8 +620,7 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->clk),
"failed to get ovl clk\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return dev_err_probe(dev, PTR_ERR(priv->regs),
"failed to ioremap ovl\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index bf47790e4d6b..c9d41d75e7f2 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -313,7 +313,6 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_rdma *priv;
- struct resource *res;
int irq;
int ret;
@@ -330,8 +329,7 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->clk),
"failed to get rdma clk\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return dev_err_probe(dev, PTR_ERR(priv->regs),
"failed to ioremap rdma\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index cd385ba4c66a..ccdc57cef3ea 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -1766,7 +1766,7 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
ret = drm_dp_dpcd_readb(&mtk_dp->aux, DP_MSTM_CAP, &val);
if (ret < 1) {
- drm_err(mtk_dp->drm_dev, "Read mstm cap failed\n");
+ dev_err(mtk_dp->dev, "Read mstm cap failed: %zd\n", ret);
return ret == 0 ? -EIO : ret;
}
@@ -1776,7 +1776,7 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
&val);
if (ret < 1) {
- drm_err(mtk_dp->drm_dev, "Read irq vector failed\n");
+ dev_err(mtk_dp->dev, "Read irq vector failed: %zd\n", ret);
return ret == 0 ? -EIO : ret;
}
@@ -2059,7 +2059,7 @@ static int mtk_dp_wait_hpd_asserted(struct drm_dp_aux *mtk_aux, unsigned long wa
ret = mtk_dp_parse_capabilities(mtk_dp);
if (ret) {
- drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
+ dev_err(mtk_dp->dev, "Can't parse capabilities: %d\n", ret);
return ret;
}
@@ -2350,12 +2350,12 @@ static void mtk_dp_bridge_detach(struct drm_bridge *bridge)
}
static void mtk_dp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
int ret;
- mtk_dp->conn = drm_atomic_get_new_connector_for_encoder(old_state->base.state,
+ mtk_dp->conn = drm_atomic_get_new_connector_for_encoder(state,
bridge->encoder);
if (!mtk_dp->conn) {
drm_err(mtk_dp->drm_dev,
@@ -2400,7 +2400,7 @@ power_off_aux:
}
static void mtk_dp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 1864eb02dbf5..0fd13e6dd3f1 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -4,8 +4,10 @@
* Author: Jie Qiu <jie.qiu@mediatek.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/media-bus-format.h>
@@ -116,9 +118,15 @@ struct mtk_dpi_yc_limit {
u16 c_bottom;
};
+struct mtk_dpi_factor {
+ u32 clock;
+ u8 factor;
+};
+
/**
* struct mtk_dpi_conf - Configuration of mediatek dpi.
- * @cal_factor: Callback function to calculate factor value.
+ * @dpi_factor: SoC-specific pixel clock PLL factor values.
+ * @num_dpi_factor: Number of pixel clock PLL factor values.
* @reg_h_fre_con: Register address of frequency control.
* @max_clock_khz: Max clock frequency supported for this SoCs in khz units.
* @edge_sel_en: Enable of edge selection.
@@ -127,19 +135,24 @@ struct mtk_dpi_yc_limit {
* @is_ck_de_pol: Support CK/DE polarity.
* @swap_input_support: Support input swap function.
* @support_direct_pin: IP supports direct connection to dpi panels.
- * @input_2pixel: Input pixel of dp_intf is 2 pixel per round, so enable this
- * config to enable this feature.
* @dimension_mask: Mask used for HWIDTH, HPORCH, VSYNC_WIDTH and VSYNC_PORCH
* (no shift).
* @hvsize_mask: Mask of HSIZE and VSIZE mask (no shift).
* @channel_swap_shift: Shift value of channel swap.
* @yuv422_en_bit: Enable bit of yuv422.
* @csc_enable_bit: Enable bit of CSC.
+ * @input_2p_en_bit: Enable bit for input two pixel per round feature.
+ * If present, implies that the feature must be enabled.
* @pixels_per_iter: Quantity of transferred pixels per iteration.
* @edge_cfg_in_mmsys: If the edge configuration for DPI's output needs to be set in MMSYS.
+ * @clocked_by_hdmi: HDMI IP outputs clock to dpi_pixel_clk input clock, needed
+ * for DPI registers access.
+ * @output_1pixel: Enable outputting one pixel per round; if the input is two pixel per
+ * round, the DPI hardware will internally transform it to 1T1P.
*/
struct mtk_dpi_conf {
- unsigned int (*cal_factor)(int clock);
+ const struct mtk_dpi_factor *dpi_factor;
+ const u8 num_dpi_factor;
u32 reg_h_fre_con;
u32 max_clock_khz;
bool edge_sel_en;
@@ -148,14 +161,16 @@ struct mtk_dpi_conf {
bool is_ck_de_pol;
bool swap_input_support;
bool support_direct_pin;
- bool input_2pixel;
u32 dimension_mask;
u32 hvsize_mask;
u32 channel_swap_shift;
u32 yuv422_en_bit;
u32 csc_enable_bit;
+ u32 input_2p_en_bit;
u32 pixels_per_iter;
bool edge_cfg_in_mmsys;
+ bool clocked_by_hdmi;
+ bool output_1pixel;
};
static void mtk_dpi_mask(struct mtk_dpi *dpi, u32 offset, u32 val, u32 mask)
@@ -166,6 +181,18 @@ static void mtk_dpi_mask(struct mtk_dpi *dpi, u32 offset, u32 val, u32 mask)
writel(tmp, dpi->regs + offset);
}
+static void mtk_dpi_test_pattern_en(struct mtk_dpi *dpi, u8 type, bool enable)
+{
+ u32 val;
+
+ if (enable)
+ val = FIELD_PREP(DPI_PAT_SEL, type) | DPI_PAT_EN;
+ else
+ val = 0;
+
+ mtk_dpi_mask(dpi, DPI_PATTERN0, val, DPI_PAT_SEL | DPI_PAT_EN);
+}
+
static void mtk_dpi_sw_reset(struct mtk_dpi *dpi, bool reset)
{
mtk_dpi_mask(dpi, DPI_RET, reset ? RST : 0, RST);
@@ -410,12 +437,13 @@ static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable)
static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi)
{
- mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
+ if (dpi->conf->reg_h_fre_con)
+ mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
}
static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
{
- if (dpi->conf->edge_sel_en)
+ if (dpi->conf->edge_sel_en && dpi->conf->reg_h_fre_con)
mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, 0, EDGE_SEL_EN);
}
@@ -471,6 +499,7 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
mtk_dpi_disable(dpi);
clk_disable_unprepare(dpi->pixel_clk);
+ clk_disable_unprepare(dpi->tvd_clk);
clk_disable_unprepare(dpi->engine_clk);
}
@@ -487,6 +516,12 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
goto err_refcount;
}
+ ret = clk_prepare_enable(dpi->tvd_clk);
+ if (ret) {
+ dev_err(dpi->dev, "Failed to enable tvd pll: %d\n", ret);
+ goto err_engine;
+ }
+
ret = clk_prepare_enable(dpi->pixel_clk);
if (ret) {
dev_err(dpi->dev, "Failed to enable pixel clock: %d\n", ret);
@@ -496,32 +531,39 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
return 0;
err_pixel:
+ clk_disable_unprepare(dpi->tvd_clk);
+err_engine:
clk_disable_unprepare(dpi->engine_clk);
err_refcount:
dpi->refcount--;
return ret;
}
-static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
- struct drm_display_mode *mode)
+static unsigned int mtk_dpi_calculate_factor(struct mtk_dpi *dpi, int mode_clk)
+{
+ const struct mtk_dpi_factor *dpi_factor = dpi->conf->dpi_factor;
+ int i;
+
+ for (i = 0; i < dpi->conf->num_dpi_factor; i++) {
+ if (mode_clk <= dpi_factor[i].clock)
+ return dpi_factor[i].factor;
+ }
+
+ /* If no match try the lowest possible factor */
+ return dpi_factor[dpi->conf->num_dpi_factor - 1].factor;
+}
+
+static void mtk_dpi_set_pixel_clk(struct mtk_dpi *dpi, struct videomode *vm, int mode_clk)
{
- struct mtk_dpi_polarities dpi_pol;
- struct mtk_dpi_sync_param hsync;
- struct mtk_dpi_sync_param vsync_lodd = { 0 };
- struct mtk_dpi_sync_param vsync_leven = { 0 };
- struct mtk_dpi_sync_param vsync_rodd = { 0 };
- struct mtk_dpi_sync_param vsync_reven = { 0 };
- struct videomode vm = { 0 };
unsigned long pll_rate;
unsigned int factor;
/* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */
- factor = dpi->conf->cal_factor(mode->clock);
- drm_display_mode_to_videomode(mode, &vm);
- pll_rate = vm.pixelclock * factor;
+ factor = mtk_dpi_calculate_factor(dpi, mode_clk);
+ pll_rate = vm->pixelclock * factor;
dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
- pll_rate, vm.pixelclock);
+ pll_rate, vm->pixelclock);
clk_set_rate(dpi->tvd_clk, pll_rate);
pll_rate = clk_get_rate(dpi->tvd_clk);
@@ -531,20 +573,36 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
* pixels for each iteration: divide the clock by this number and
* adjust the display porches accordingly.
*/
- vm.pixelclock = pll_rate / factor;
- vm.pixelclock /= dpi->conf->pixels_per_iter;
+ vm->pixelclock = pll_rate / factor;
+ vm->pixelclock /= dpi->conf->pixels_per_iter;
if ((dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_LE) ||
(dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_BE))
- clk_set_rate(dpi->pixel_clk, vm.pixelclock * 2);
+ clk_set_rate(dpi->pixel_clk, vm->pixelclock * 2);
else
- clk_set_rate(dpi->pixel_clk, vm.pixelclock);
-
+ clk_set_rate(dpi->pixel_clk, vm->pixelclock);
- vm.pixelclock = clk_get_rate(dpi->pixel_clk);
+ vm->pixelclock = clk_get_rate(dpi->pixel_clk);
dev_dbg(dpi->dev, "Got PLL %lu Hz, pixel clock %lu Hz\n",
- pll_rate, vm.pixelclock);
+ pll_rate, vm->pixelclock);
+}
+
+static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
+ struct drm_display_mode *mode)
+{
+ struct mtk_dpi_polarities dpi_pol;
+ struct mtk_dpi_sync_param hsync;
+ struct mtk_dpi_sync_param vsync_lodd = { 0 };
+ struct mtk_dpi_sync_param vsync_leven = { 0 };
+ struct mtk_dpi_sync_param vsync_rodd = { 0 };
+ struct mtk_dpi_sync_param vsync_reven = { 0 };
+ struct videomode vm = { 0 };
+
+ drm_display_mode_to_videomode(mode, &vm);
+
+ if (!dpi->conf->clocked_by_hdmi)
+ mtk_dpi_set_pixel_clk(dpi, &vm, mode->clock);
dpi_pol.ck_pol = MTK_DPI_POLARITY_FALLING;
dpi_pol.de_pol = MTK_DPI_POLARITY_RISING;
@@ -607,12 +665,18 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
if (dpi->conf->support_direct_pin) {
mtk_dpi_config_yc_map(dpi, dpi->yc_map);
mtk_dpi_config_2n_h_fre(dpi);
- mtk_dpi_dual_edge(dpi);
+
+ /* DPI can connect to either an external bridge or the internal HDMI encoder */
+ if (dpi->conf->output_1pixel)
+ mtk_dpi_mask(dpi, DPI_CON, DPI_OUTPUT_1T1P_EN, DPI_OUTPUT_1T1P_EN);
+ else
+ mtk_dpi_dual_edge(dpi);
+
mtk_dpi_config_disable_edge(dpi);
}
- if (dpi->conf->input_2pixel) {
- mtk_dpi_mask(dpi, DPI_CON, DPINTF_INPUT_2P_EN,
- DPINTF_INPUT_2P_EN);
+ if (dpi->conf->input_2p_en_bit) {
+ mtk_dpi_mask(dpi, DPI_CON, dpi->conf->input_2p_en_bit,
+ dpi->conf->input_2p_en_bit);
}
mtk_dpi_sw_reset(dpi, false);
@@ -767,6 +831,99 @@ mtk_dpi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
+static int mtk_dpi_debug_tp_show(struct seq_file *m, void *arg)
+{
+ struct mtk_dpi *dpi = m->private;
+ bool en;
+ u32 val;
+
+ if (!dpi)
+ return -EINVAL;
+
+ val = readl(dpi->regs + DPI_PATTERN0);
+ en = val & DPI_PAT_EN;
+ val = FIELD_GET(DPI_PAT_SEL, val);
+
+ seq_printf(m, "DPI Test Pattern: %s\n", en ? "Enabled" : "Disabled");
+
+ if (en) {
+ seq_printf(m, "Internal pattern %d: ", val);
+ switch (val) {
+ case 0:
+ seq_puts(m, "256 Vertical Gray\n");
+ break;
+ case 1:
+ seq_puts(m, "1024 Vertical Gray\n");
+ break;
+ case 2:
+ seq_puts(m, "256 Horizontal Gray\n");
+ break;
+ case 3:
+ seq_puts(m, "1024 Horizontal Gray\n");
+ break;
+ case 4:
+ seq_puts(m, "Vertical Color bars\n");
+ break;
+ case 6:
+ seq_puts(m, "Frame border\n");
+ break;
+ case 7:
+ seq_puts(m, "Dot moire\n");
+ break;
+ default:
+ seq_puts(m, "Invalid selection\n");
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t mtk_dpi_debug_tp_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ u32 en, type;
+ char buf[6];
+
+ if (!m || !m->private || *offp || len > sizeof(buf) - 1)
+ return -EINVAL;
+
+ memset(buf, 0, sizeof(buf));
+ if (copy_from_user(buf, ubuf, len))
+ return -EFAULT;
+
+ if (sscanf(buf, "%u %u", &en, &type) != 2)
+ return -EINVAL;
+
+ if (en < 0 || en > 1 || type < 0 || type > 7)
+ return -EINVAL;
+
+ mtk_dpi_test_pattern_en((struct mtk_dpi *)m->private, type, en);
+ return len;
+}
+
+static int mtk_dpi_debug_tp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtk_dpi_debug_tp_show, inode->i_private);
+}
+
+static const struct file_operations mtk_dpi_debug_tp_fops = {
+ .owner = THIS_MODULE,
+ .open = mtk_dpi_debug_tp_open,
+ .read = seq_read,
+ .write = mtk_dpi_debug_tp_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void mtk_dpi_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct mtk_dpi *dpi = bridge_to_dpi(bridge);
+
+ debugfs_create_file("dpi_test_pattern", 0640, root, dpi, &mtk_dpi_debug_tp_fops);
+}
+
static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = {
.attach = mtk_dpi_bridge_attach,
.mode_set = mtk_dpi_bridge_mode_set,
@@ -779,20 +936,23 @@ static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
+ .debugfs_init = mtk_dpi_debugfs_init,
};
void mtk_dpi_start(struct device *dev)
{
struct mtk_dpi *dpi = dev_get_drvdata(dev);
- mtk_dpi_power_on(dpi);
+ if (!dpi->conf->clocked_by_hdmi)
+ mtk_dpi_power_on(dpi);
}
void mtk_dpi_stop(struct device *dev)
{
struct mtk_dpi *dpi = dev_get_drvdata(dev);
- mtk_dpi_power_off(dpi);
+ if (!dpi->conf->clocked_by_hdmi)
+ mtk_dpi_power_off(dpi);
}
unsigned int mtk_dpi_encoder_index(struct device *dev)
@@ -857,48 +1017,6 @@ static const struct component_ops mtk_dpi_component_ops = {
.unbind = mtk_dpi_unbind,
};
-static unsigned int mt8173_calculate_factor(int clock)
-{
- if (clock <= 27000)
- return 3 << 4;
- else if (clock <= 84000)
- return 3 << 3;
- else if (clock <= 167000)
- return 3 << 2;
- else
- return 3 << 1;
-}
-
-static unsigned int mt2701_calculate_factor(int clock)
-{
- if (clock <= 64000)
- return 4;
- else if (clock <= 128000)
- return 2;
- else
- return 1;
-}
-
-static unsigned int mt8183_calculate_factor(int clock)
-{
- if (clock <= 27000)
- return 8;
- else if (clock <= 167000)
- return 4;
- else
- return 2;
-}
-
-static unsigned int mt8195_dpintf_calculate_factor(int clock)
-{
- if (clock < 70000)
- return 4;
- else if (clock < 200000)
- return 2;
- else
- return 1;
-}
-
static const u32 mt8173_output_fmts[] = {
MEDIA_BUS_FMT_RGB888_1X24,
};
@@ -913,8 +1031,25 @@ static const u32 mt8195_output_fmts[] = {
MEDIA_BUS_FMT_YUYV8_1X16,
};
+static const struct mtk_dpi_factor dpi_factor_mt2701[] = {
+ { 64000, 4 }, { 128000, 2 }, { U32_MAX, 1 }
+};
+
+static const struct mtk_dpi_factor dpi_factor_mt8173[] = {
+ { 27000, 48 }, { 84000, 24 }, { 167000, 12 }, { U32_MAX, 6 }
+};
+
+static const struct mtk_dpi_factor dpi_factor_mt8183[] = {
+ { 27000, 8 }, { 167000, 4 }, { U32_MAX, 2 }
+};
+
+static const struct mtk_dpi_factor dpi_factor_mt8195_dp_intf[] = {
+ { 70000 - 1, 4 }, { 200000 - 1, 2 }, { U32_MAX, 1 }
+};
+
static const struct mtk_dpi_conf mt8173_conf = {
- .cal_factor = mt8173_calculate_factor,
+ .dpi_factor = dpi_factor_mt8173,
+ .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8173),
.reg_h_fre_con = 0xe0,
.max_clock_khz = 300000,
.output_fmts = mt8173_output_fmts,
@@ -931,7 +1066,8 @@ static const struct mtk_dpi_conf mt8173_conf = {
};
static const struct mtk_dpi_conf mt2701_conf = {
- .cal_factor = mt2701_calculate_factor,
+ .dpi_factor = dpi_factor_mt2701,
+ .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt2701),
.reg_h_fre_con = 0xb0,
.edge_sel_en = true,
.max_clock_khz = 150000,
@@ -949,7 +1085,8 @@ static const struct mtk_dpi_conf mt2701_conf = {
};
static const struct mtk_dpi_conf mt8183_conf = {
- .cal_factor = mt8183_calculate_factor,
+ .dpi_factor = dpi_factor_mt8183,
+ .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8183),
.reg_h_fre_con = 0xe0,
.max_clock_khz = 100000,
.output_fmts = mt8183_output_fmts,
@@ -966,7 +1103,8 @@ static const struct mtk_dpi_conf mt8183_conf = {
};
static const struct mtk_dpi_conf mt8186_conf = {
- .cal_factor = mt8183_calculate_factor,
+ .dpi_factor = dpi_factor_mt8183,
+ .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8183),
.reg_h_fre_con = 0xe0,
.max_clock_khz = 150000,
.output_fmts = mt8183_output_fmts,
@@ -984,7 +1122,8 @@ static const struct mtk_dpi_conf mt8186_conf = {
};
static const struct mtk_dpi_conf mt8192_conf = {
- .cal_factor = mt8183_calculate_factor,
+ .dpi_factor = dpi_factor_mt8183,
+ .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8183),
.reg_h_fre_con = 0xe0,
.max_clock_khz = 150000,
.output_fmts = mt8183_output_fmts,
@@ -1000,18 +1139,37 @@ static const struct mtk_dpi_conf mt8192_conf = {
.csc_enable_bit = CSC_ENABLE,
};
+static const struct mtk_dpi_conf mt8195_conf = {
+ .max_clock_khz = 594000,
+ .output_fmts = mt8183_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
+ .pixels_per_iter = 1,
+ .is_ck_de_pol = true,
+ .swap_input_support = true,
+ .support_direct_pin = true,
+ .dimension_mask = HPW_MASK,
+ .hvsize_mask = HSIZE_MASK,
+ .channel_swap_shift = CH_SWAP,
+ .yuv422_en_bit = YUV422_EN,
+ .csc_enable_bit = CSC_ENABLE,
+ .input_2p_en_bit = DPI_INPUT_2P_EN,
+ .clocked_by_hdmi = true,
+ .output_1pixel = true,
+};
+
static const struct mtk_dpi_conf mt8195_dpintf_conf = {
- .cal_factor = mt8195_dpintf_calculate_factor,
+ .dpi_factor = dpi_factor_mt8195_dp_intf,
+ .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8195_dp_intf),
.max_clock_khz = 600000,
.output_fmts = mt8195_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
.pixels_per_iter = 4,
- .input_2pixel = true,
.dimension_mask = DPINTF_HPW_MASK,
.hvsize_mask = DPINTF_HSIZE_MASK,
.channel_swap_shift = DPINTF_CH_SWAP,
.yuv422_en_bit = DPINTF_YUV422_EN,
.csc_enable_bit = DPINTF_CSC_ENABLE,
+ .input_2p_en_bit = DPINTF_INPUT_2P_EN,
};
static int mtk_dpi_probe(struct platform_device *pdev)
@@ -1102,6 +1260,7 @@ static const struct of_device_id mtk_dpi_of_ids[] = {
{ .compatible = "mediatek,mt8188-dp-intf", .data = &mt8195_dpintf_conf },
{ .compatible = "mediatek,mt8192-dpi", .data = &mt8192_conf },
{ .compatible = "mediatek,mt8195-dp-intf", .data = &mt8195_dpintf_conf },
+ { .compatible = "mediatek,mt8195-dpi", .data = &mt8195_conf },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mtk_dpi_of_ids);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi_regs.h b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
index 62bd4931b344..23eeefce8fd2 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
+++ b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
@@ -40,6 +40,11 @@
#define FAKE_DE_LEVEN BIT(21)
#define FAKE_DE_RODD BIT(22)
#define FAKE_DE_REVEN BIT(23)
+
+/* DPI_CON: DPI instances */
+#define DPI_OUTPUT_1T1P_EN BIT(24)
+#define DPI_INPUT_2P_EN BIT(25)
+/* DPI_CON: DPINTF instances */
#define DPINTF_YUV422_EN BIT(24)
#define DPINTF_CSC_ENABLE BIT(26)
#define DPINTF_INPUT_2P_EN BIT(29)
@@ -235,4 +240,8 @@
#define MATRIX_SEL_RGB_TO_JPEG 0
#define MATRIX_SEL_RGB_TO_BT601 2
+#define DPI_PATTERN0 0xf00
+#define DPI_PAT_EN BIT(0)
+#define DPI_PAT_SEL GENMASK(6, 4)
+
#endif /* __MTK_DPI_REGS_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index f22ad2882697..74158b9d6503 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -327,6 +327,10 @@ static const struct mtk_mmsys_driver_data mt8195_vdosys1_driver_data = {
.min_height = 1,
};
+static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = {
+ .mmsys_dev_num = 1,
+};
+
static const struct of_device_id mtk_drm_of_ids[] = {
{ .compatible = "mediatek,mt2701-mmsys",
.data = &mt2701_mmsys_driver_data},
@@ -354,6 +358,8 @@ static const struct of_device_id mtk_drm_of_ids[] = {
.data = &mt8195_vdosys0_driver_data},
{ .compatible = "mediatek,mt8195-vdosys1",
.data = &mt8195_vdosys1_driver_data},
+ { .compatible = "mediatek,mt8365-mmsys",
+ .data = &mt8365_mmsys_driver_data},
{ }
};
MODULE_DEVICE_TABLE(of, mtk_drm_of_ids);
@@ -754,6 +760,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8195-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8365-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8173-disp-od",
.data = (void *)MTK_DISP_OD },
{ .compatible = "mediatek,mt2701-disp-ovl",
@@ -810,6 +818,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt8195-dp-intf",
.data = (void *)MTK_DP_INTF },
+ { .compatible = "mediatek,mt8195-dpi",
+ .data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt2701-dsi",
.data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dsi",
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 40752f232054..d1f407fb7eb1 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -826,7 +826,7 @@ static void mtk_dsi_bridge_mode_set(struct drm_bridge *bridge,
}
static void mtk_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
@@ -834,7 +834,7 @@ static void mtk_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void mtk_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
@@ -845,7 +845,7 @@ static void mtk_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void mtk_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@@ -856,7 +856,7 @@ static void mtk_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
}
static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
@@ -1116,12 +1116,12 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct mtk_dsi *dsi = host_to_dsi(host);
- u32 recv_cnt, i;
+ ssize_t recv_cnt;
u8 read_data[16];
void *src_addr;
u8 irq_flag = CMD_DONE_INT_FLAG;
u32 dsi_mode;
- int ret;
+ int ret, i;
dsi_mode = readl(dsi->regs + DSI_MODE_CTRL);
if (dsi_mode & MODE) {
@@ -1170,7 +1170,7 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
if (recv_cnt)
memcpy(msg->rx_buf, src_addr, recv_cnt);
- DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
+ DRM_INFO("dsi get %zd byte data from the panel address(0x%x)\n",
recv_cnt, *((u8 *)(msg->tx_buf)));
restore_dsi_mode:
@@ -1192,7 +1192,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
{
struct mtk_dsi *dsi;
struct device *dev = &pdev->dev;
- struct resource *regs;
int irq_num;
int ret;
@@ -1217,8 +1216,7 @@ static int mtk_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->hs_clk))
return dev_err_probe(dev, PTR_ERR(dsi->hs_clk), "Failed to get hs clock\n");
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->regs = devm_ioremap_resource(dev, regs);
+ dsi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->regs))
return dev_err_probe(dev, PTR_ERR(dsi->regs), "Failed to ioremap memory\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index ca82bc829cb9..06e4fac152b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -137,7 +137,7 @@ enum hdmi_aud_channel_swap_type {
struct hdmi_audio_param {
enum hdmi_audio_coding_type aud_codec;
- enum hdmi_audio_sample_size aud_sampe_size;
+ enum hdmi_audio_sample_size aud_sample_size;
enum hdmi_aud_input_type aud_input_type;
enum hdmi_aud_i2s_fmt aud_i2s_fmt;
enum hdmi_aud_mclk aud_mclk;
@@ -163,16 +163,10 @@ struct mtk_hdmi {
struct clk *clk[MTK_HDMI_CLK_COUNT];
struct drm_display_mode mode;
bool dvi_mode;
- u32 min_clock;
- u32 max_clock;
- u32 max_hdisplay;
- u32 max_vdisplay;
- u32 ibias;
- u32 ibias_up;
struct regmap *sys_regmap;
unsigned int sys_offset;
void __iomem *regs;
- enum hdmi_colorspace csp;
+ struct platform_device *audio_pdev;
struct hdmi_audio_param aud_param;
bool audio_enable;
bool powered;
@@ -987,15 +981,14 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
return 0;
}
-static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
- const char *vendor,
- const char *product)
+static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi)
{
+ struct drm_bridge *bridge = &hdmi->bridge;
struct hdmi_spd_infoframe frame;
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE];
ssize_t err;
- err = hdmi_spd_infoframe_init(&frame, vendor, product);
+ err = hdmi_spd_infoframe_init(&frame, bridge->vendor, bridge->product);
if (err < 0) {
dev_err(hdmi->dev, "Failed to initialize SPD infoframe: %zd\n",
err);
@@ -1072,9 +1065,8 @@ static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
{
struct hdmi_audio_param *aud_param = &hdmi->aud_param;
- hdmi->csp = HDMI_COLORSPACE_RGB;
aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
@@ -1167,13 +1159,12 @@ static int mtk_hdmi_clk_enable_audio(struct mtk_hdmi *hdmi)
return ret;
ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
- if (ret)
- goto err;
+ if (ret) {
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
+ return ret;
+ }
return 0;
-err:
- clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
- return ret;
}
static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi)
@@ -1309,7 +1300,7 @@ static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
}
static void mtk_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1326,7 +1317,7 @@ static void mtk_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
}
static void mtk_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1362,7 +1353,7 @@ static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
}
static void mtk_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1377,15 +1368,14 @@ static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
{
mtk_hdmi_setup_audio_infoframe(hdmi);
mtk_hdmi_setup_avi_infoframe(hdmi, mode);
- mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
+ mtk_hdmi_setup_spd_infoframe(hdmi);
if (mode->flags & DRM_MODE_FLAG_3D_MASK)
mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
}
static void mtk_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = old_state->base.state;
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
/* Retrieve the connector through the atomic state. */
@@ -1425,7 +1415,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
struct device_node *cec_np, *remote, *i2c_np;
struct platform_device *cec_pdev;
struct regmap *regmap;
- struct resource *mem;
int ret;
ret = mtk_hdmi_get_all_clk(hdmi, np);
@@ -1471,8 +1460,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
}
hdmi->sys_regmap = regmap;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->regs = devm_ioremap_resource(dev, mem);
+ hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs)) {
ret = PTR_ERR(hdmi->regs);
goto put_device;
@@ -1572,14 +1560,14 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
switch (daifmt->fmt) {
case HDMI_I2S:
hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
break;
case HDMI_SPDIF:
hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
break;
default:
@@ -1662,6 +1650,11 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
.hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb,
};
+static void mtk_hdmi_unregister_audio_driver(void *data)
+{
+ platform_device_unregister(data);
+}
+
static int mtk_hdmi_register_audio_driver(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
@@ -1672,15 +1665,21 @@ static int mtk_hdmi_register_audio_driver(struct device *dev)
.data = hdmi,
.no_capture_mute = 1,
};
- struct platform_device *pdev;
+ int ret;
- pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO, &codec_data,
- sizeof(codec_data));
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ hdmi->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ if (IS_ERR(hdmi->audio_pdev))
+ return PTR_ERR(hdmi->audio_pdev);
+
+ ret = devm_add_action_or_reset(dev, mtk_hdmi_unregister_audio_driver,
+ hdmi->audio_pdev);
+ if (ret)
+ return ret;
- DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
return 0;
}
@@ -1724,14 +1723,17 @@ static int mtk_hdmi_probe(struct platform_device *pdev)
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
- drm_bridge_add(&hdmi->bridge);
+ hdmi->bridge.vendor = "MediaTek";
+ hdmi->bridge.product = "On-Chip HDMI";
+
+ ret = devm_drm_bridge_add(dev, &hdmi->bridge);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add bridge\n");
ret = mtk_hdmi_clk_enable_audio(hdmi);
- if (ret) {
- drm_bridge_remove(&hdmi->bridge);
+ if (ret)
return dev_err_probe(dev, ret,
"Failed to enable audio clocks\n");
- }
return 0;
}
@@ -1740,12 +1742,10 @@ static void mtk_hdmi_remove(struct platform_device *pdev)
{
struct mtk_hdmi *hdmi = platform_get_drvdata(pdev);
- drm_bridge_remove(&hdmi->bridge);
mtk_hdmi_clk_disable_audio(hdmi);
}
-#ifdef CONFIG_PM_SLEEP
-static int mtk_hdmi_suspend(struct device *dev)
+static __maybe_unused int mtk_hdmi_suspend(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
@@ -1754,22 +1754,14 @@ static int mtk_hdmi_suspend(struct device *dev)
return 0;
}
-static int mtk_hdmi_resume(struct device *dev)
+static __maybe_unused int mtk_hdmi_resume(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- int ret = 0;
-
- ret = mtk_hdmi_clk_enable_audio(hdmi);
- if (ret) {
- dev_err(dev, "hdmi resume failed!\n");
- return ret;
- }
- return 0;
+ return mtk_hdmi_clk_enable_audio(hdmi);
}
-#endif
-static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops,
- mtk_hdmi_suspend, mtk_hdmi_resume);
+
+static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops, mtk_hdmi_suspend, mtk_hdmi_resume);
static const struct mtk_hdmi_conf mtk_hdmi_conf_mt2701 = {
.tz_disabled = true,
@@ -1781,15 +1773,10 @@ static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8167 = {
};
static const struct of_device_id mtk_hdmi_of_ids[] = {
- { .compatible = "mediatek,mt2701-hdmi",
- .data = &mtk_hdmi_conf_mt2701,
- },
- { .compatible = "mediatek,mt8167-hdmi",
- .data = &mtk_hdmi_conf_mt8167,
- },
- { .compatible = "mediatek,mt8173-hdmi",
- },
- {}
+ { .compatible = "mediatek,mt2701-hdmi", .data = &mtk_hdmi_conf_mt2701 },
+ { .compatible = "mediatek,mt8167-hdmi", .data = &mtk_hdmi_conf_mt8167 },
+ { .compatible = "mediatek,mt8173-hdmi" },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_hdmi_of_ids);
diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
index fc69ee38ce7d..7982788ae9df 100644
--- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
@@ -291,7 +291,6 @@ static const struct component_ops mtk_mdp_rdma_component_ops = {
static int mtk_mdp_rdma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct mtk_mdp_rdma *priv;
int ret = 0;
@@ -299,8 +298,7 @@ static int mtk_mdp_rdma_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return dev_err_probe(dev, PTR_ERR(priv->regs),
"failed to ioremap rdma\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 8a48b3b0a956..655106bbb76d 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -101,7 +101,7 @@ static void mtk_plane_destroy_state(struct drm_plane *plane,
}
static int mtk_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state, bool flip)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index d1191de855d9..e79f7c3ce32e 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -139,10 +139,9 @@ static int meson_encoder_cvbs_atomic_check(struct drm_bridge *bridge,
}
static void meson_encoder_cvbs_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct meson_encoder_cvbs *encoder_cvbs = bridge_to_meson_encoder_cvbs(bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
struct meson_drm *priv = encoder_cvbs->priv;
const struct meson_cvbs_mode *meson_mode;
struct drm_connector_state *conn_state;
@@ -191,7 +190,7 @@ static void meson_encoder_cvbs_atomic_enable(struct drm_bridge *bridge,
}
static void meson_encoder_cvbs_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct meson_encoder_cvbs *meson_encoder_cvbs =
bridge_to_meson_encoder_cvbs(bridge);
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index 7816902f5907..fe204437bd65 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -42,10 +42,9 @@ static int meson_encoder_dsi_attach(struct drm_bridge *bridge,
}
static void meson_encoder_dsi_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
struct meson_drm *priv = encoder_dsi->priv;
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
@@ -80,7 +79,7 @@ static void meson_encoder_dsi_atomic_enable(struct drm_bridge *bridge,
}
static void meson_encoder_dsi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct meson_encoder_dsi *meson_encoder_dsi =
bridge_to_meson_encoder_dsi(bridge);
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 0593a1cde906..6d1c9262a2cf 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -186,10 +186,9 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
}
static void meson_encoder_hdmi_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR;
struct meson_drm *priv = encoder_hdmi->priv;
struct drm_connector_state *conn_state;
@@ -250,7 +249,7 @@ static void meson_encoder_hdmi_atomic_enable(struct drm_bridge *bridge,
}
static void meson_encoder_hdmi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
struct meson_drm *priv = encoder_hdmi->priv;
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index 5a02203fad12..94f063c8722a 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -6,6 +6,7 @@ mgag200-y := \
mgag200_g200.o \
mgag200_g200eh.o \
mgag200_g200eh3.o \
+ mgag200_g200eh5.o \
mgag200_g200er.o \
mgag200_g200ev.o \
mgag200_g200ew3.o \
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 069fdd2dc8f6..32cd8ac018c0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -214,6 +214,7 @@ static const struct pci_device_id mgag200_pciidlist[] = {
{ PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
{ PCI_VENDOR_ID_MATROX, 0x536, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EW3 },
{ PCI_VENDOR_ID_MATROX, 0x538, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH3 },
+ { PCI_VENDOR_ID_MATROX, 0x53a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH5 },
{0,}
};
@@ -256,6 +257,9 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case G200_EH3:
mdev = mgag200_g200eh3_device_create(pdev, &mgag200_driver);
break;
+ case G200_EH5:
+ mdev = mgag200_g200eh5_device_create(pdev, &mgag200_driver);
+ break;
case G200_ER:
mdev = mgag200_g200er_device_create(pdev, &mgag200_driver);
break;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 0608fc63e588..819a7e9381e3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -196,6 +196,7 @@ enum mga_type {
G200_EV,
G200_EH,
G200_EH3,
+ G200_EH5,
G200_ER,
G200_EW3,
};
@@ -334,6 +335,8 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev,
const struct drm_driver *drv);
struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
const struct drm_driver *drv);
+struct mga_device *mgag200_g200eh5_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv);
struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev,
const struct drm_driver *drv);
struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh5.c b/drivers/gpu/drm/mgag200/mgag200_g200eh5.c
new file mode 100644
index 000000000000..e2a2942a80a0
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh5.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/limits.h>
+#include <linux/pci.h>
+#include <linux/units.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mgag200_drv.h"
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200eh5_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ const unsigned long long VCO_MAX = 10 * GIGA; // Hz
+ const unsigned long long VCO_MIN = 2500 * MEGA; // Hz
+ const unsigned long long PLL_FREQ_REF = 25 * MEGA; // Hz
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+
+ unsigned long long fdelta = ULLONG_MAX;
+
+ u16 mult_max = (u16)(VCO_MAX / PLL_FREQ_REF); // 400 (0x190)
+ u16 mult_min = (u16)(VCO_MIN / PLL_FREQ_REF); // 100 (0x64)
+
+ u64 ftmp_delta;
+ u64 computed_fo;
+
+ u16 test_m;
+ u8 test_div_a;
+ u8 test_div_b;
+ u64 fo_hz;
+
+ u8 uc_m = 0;
+ u8 uc_n = 0;
+ u8 uc_p = 0;
+
+ fo_hz = (u64)clock * HZ_PER_KHZ;
+
+ for (test_m = mult_min; test_m <= mult_max; test_m++) { // This gives 100 <= M <= 400
+ for (test_div_a = 8; test_div_a > 0; test_div_a--) { // This gives 1 <= A <= 8
+ for (test_div_b = 1; test_div_b <= test_div_a; test_div_b++) {
+ // This gives 1 <= B <= A
+ computed_fo = (PLL_FREQ_REF * test_m) /
+ (4 * test_div_a * test_div_b);
+
+ if (computed_fo > fo_hz)
+ ftmp_delta = computed_fo - fo_hz;
+ else
+ ftmp_delta = fo_hz - computed_fo;
+
+ if (ftmp_delta < fdelta) {
+ fdelta = ftmp_delta;
+ uc_m = (u8)(0xFF & test_m);
+ uc_n = (u8)((0x7 & (test_div_a - 1))
+ | (0x70 & (0x7 & (test_div_b - 1)) << 4));
+ uc_p = (u8)(1 & (test_m >> 8));
+ }
+ if (fdelta == 0)
+ break;
+ }
+ if (fdelta == 0)
+ break;
+ }
+ if (fdelta == 0)
+ break;
+ }
+
+ pixpllc->m = uc_m + 1;
+ pixpllc->n = uc_n + 1;
+ pixpllc->p = uc_p + 1;
+ pixpllc->s = 0;
+
+ return 0;
+ }
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200eh5_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200eh5_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static const struct drm_crtc_helper_funcs mgag200_g200eh5_crtc_helper_funcs = {
+ MGAG200_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs mgag200_g200eh5_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static int mgag200_g200eh5_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200eh5_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200eh5_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200eh5_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &mgag200_g200eh5_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+ ret = mgag200_vga_bmc_output_init(mdev);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * DRM device
+ */
+
+static const struct mgag200_device_info mgag200_g200eh5_device_info =
+ MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, false, 1, 0, false);
+
+static const struct mgag200_device_funcs mgag200_g200eh5_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200eh5_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200eh_pixpllc_atomic_update, // same as G200EH
+};
+
+struct mga_device *mgag200_g200eh5_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv)
+{
+ struct mga_device *mdev;
+ struct drm_device *dev;
+ resource_size_t vram_available;
+ int ret;
+
+ mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base);
+
+ if (IS_ERR(mdev))
+ return mdev;
+ dev = &mdev->base;
+
+ pci_set_drvdata(pdev, dev);
+
+ ret = mgag200_init_pci_options(pdev, 0x00000120, 0x0000b000);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_preinit(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_device_init(mdev, &mgag200_g200eh5_device_info,
+ &mgag200_g200eh5_device_funcs);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ mgag200_g200eh_init_registers(mdev); // same as G200EH
+ vram_available = mgag200_device_probe_vram(mdev);
+
+ ret = mgag200_mode_config_init(mdev, vram_available);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mgag200_g200eh5_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+ drm_kms_helper_poll_init(dev);
+
+ return mdev;
+}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 7ec833b6d829..974bc7c0ea76 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -170,6 +170,8 @@ config DRM_MSM_HDMI
bool "Enable HDMI support in MSM DRM driver"
depends on DRM_MSM
default y
+ select DRM_DISPLAY_HDMI_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
help
Compile in support for the HDMI output MSM DRM driver. It can
be a primary or a secondary display on device. Note that this is used
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index edffb7737a97..53e2ff4406d8 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -880,6 +880,35 @@ static const struct adreno_info a6xx_gpus[] = {
{ 137, 1 },
),
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06020300),
+ .family = ADRENO_6XX_GEN3,
+ .fw = {
+ [ADRENO_FW_SQE] = "a650_sqe.fw",
+ [ADRENO_FW_GMU] = "a623_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a690_hwcg,
+ .protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020200,
+ .prim_fifo_threshold = 0x00010000,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(3),
+ },
+ { /* sentinel */ },
+ },
+ },
+ .address_space_size = SZ_16G,
+ }, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
0x06030002
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 699b0dd34b18..38c94915d4c9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1169,49 +1169,50 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
u32 val;
+ int ret;
/*
- * The GMU may still be in slumber unless the GPU started so check and
- * skip putting it back into slumber if so
+ * GMU firmware's internal power state gets messed up if we send "prepare_slumber" hfi when
+ * oob_gpu handshake wasn't done after the last wake up. So do a dummy handshake here when
+ * required
*/
- val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+ if (adreno_gpu->base.needs_hw_init) {
+ if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET))
+ goto force_off;
- if (val != 0xf) {
- int ret = a6xx_gmu_wait_for_idle(gmu);
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ }
- /* If the GMU isn't responding assume it is hung */
- if (ret) {
- a6xx_gmu_force_off(gmu);
- return;
- }
+ ret = a6xx_gmu_wait_for_idle(gmu);
- a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
+ /* If the GMU isn't responding assume it is hung */
+ if (ret)
+ goto force_off;
- /* tell the GMU we want to slumber */
- ret = a6xx_gmu_notify_slumber(gmu);
- if (ret) {
- a6xx_gmu_force_off(gmu);
- return;
- }
+ a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
- ret = gmu_poll_timeout(gmu,
- REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
- !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
- 100, 10000);
+ /* tell the GMU we want to slumber */
+ ret = a6xx_gmu_notify_slumber(gmu);
+ if (ret)
+ goto force_off;
- /*
- * Let the user know we failed to slumber but don't worry too
- * much because we are powering down anyway
- */
+ ret = gmu_poll_timeout(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
+ !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
+ 100, 10000);
- if (ret)
- DRM_DEV_ERROR(gmu->dev,
- "Unable to slumber GMU: status = 0%x/0%x\n",
- gmu_read(gmu,
- REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
- gmu_read(gmu,
- REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
- }
+ /*
+ * Let the user know we failed to slumber but don't worry too
+ * much because we are powering down anyway
+ */
+
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev,
+ "Unable to slumber GMU: status = 0%x/0%x\n",
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
/* Turn off HFI */
a6xx_hfi_stop(gmu);
@@ -1221,6 +1222,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
/* Tell RPMh to power off the GPU */
a6xx_rpmh_stop(gmu);
+
+ return;
+
+force_off:
+ a6xx_gmu_force_off(gmu);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 0ae29a7c8a4d..1820c167fcee 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -616,6 +616,14 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.uavflagprd_inv = 2;
}
+ if (adreno_is_a623(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 16;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.rgb565_predicator = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
+ gpu->ubwc_config.macrotile_mode = 1;
+ }
+
if (adreno_is_a640_family(gpu))
gpu->ubwc_config.amsbc = 1;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 0fcae53c0b14..341a72a67401 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -1214,12 +1214,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
- 3, sizeof(*a6xx_state->gmu_registers));
+ 4, sizeof(*a6xx_state->gmu_registers));
if (!a6xx_state->gmu_registers)
return;
- a6xx_state->nr_gmu_registers = 3;
+ a6xx_state->nr_gmu_registers = 4;
/* Get the CX GMU registers from AHB */
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
@@ -1227,6 +1227,13 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
&a6xx_state->gmu_registers[1], true);
+ if (adreno_is_a621(adreno_gpu) || adreno_is_a623(adreno_gpu))
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a621_gpucc_reg,
+ &a6xx_state->gmu_registers[2], false);
+ else
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gpucc_reg,
+ &a6xx_state->gmu_registers[2], false);
+
if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return;
@@ -1234,7 +1241,7 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
- &a6xx_state->gmu_registers[2], false);
+ &a6xx_state->gmu_registers[3], false);
}
static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo(
@@ -1507,6 +1514,8 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
/* Restore the size in the hardware */
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+
+ a6xx_state->nr_indexed_regs = count;
}
static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index dd4c28a8d923..e545106c70be 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -363,6 +363,9 @@ static const u32 a6xx_gmu_cx_registers[] = {
0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
/* GMU AO */
0x9300, 0x9316, 0x9400, 0x9400,
+};
+
+static const u32 a6xx_gmu_gpucc_registers[] = {
/* GPU CC */
0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
@@ -373,6 +376,17 @@ static const u32 a6xx_gmu_cx_registers[] = {
0xbc00, 0xbc16, 0xbc20, 0xbc27,
};
+static const u32 a621_gmu_gpucc_registers[] = {
+ /* GPU CC */
+ 0x9800, 0x980e, 0x9c00, 0x9c0e, 0xb000, 0xb004, 0xb400, 0xb404,
+ 0xb800, 0xb804, 0xbc00, 0xbc05, 0xbc14, 0xbc1d, 0xbc2a, 0xbc30,
+ 0xbc32, 0xbc32, 0xbc41, 0xbc55, 0xbc66, 0xbc68, 0xbc78, 0xbc7a,
+ 0xbc89, 0xbc8a, 0xbc9c, 0xbc9e, 0xbca0, 0xbca3, 0xbcb3, 0xbcb5,
+ 0xbcc5, 0xbcc7, 0xbcd6, 0xbcd8, 0xbce8, 0xbce9, 0xbcf9, 0xbcfc,
+ 0xbd0b, 0xbd0c, 0xbd1c, 0xbd1e, 0xbd40, 0xbd70, 0xbe00, 0xbe16,
+ 0xbe20, 0xbe2d,
+};
+
static const u32 a6xx_gmu_cx_rscc_registers[] = {
/* GPU RSCC */
0x008c, 0x008c, 0x0101, 0x0102, 0x0340, 0x0342, 0x0344, 0x0347,
@@ -386,6 +400,9 @@ static const struct a6xx_registers a6xx_gmu_reglist[] = {
REGS(a6xx_gmu_gx_registers, 0, 0),
};
+static const struct a6xx_registers a6xx_gpucc_reg = REGS(a6xx_gmu_gpucc_registers, 0, 0);
+static const struct a6xx_registers a621_gpucc_reg = REGS(a621_gmu_gpucc_registers, 0, 0);
+
static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu);
static u32 a7xx_get_cp_roq_size(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 1238f3265978..7156cda07b03 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -883,6 +883,16 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ");
drm_printf(p, " - type=%s\n", info->type);
drm_printf(p, " - source=%s\n", info->block);
+
+ /* Information extracted from what we think are the current
+ * pgtables. Hopefully the TTBR0 matches what we've extracted
+ * from the SMMU registers in smmu_info!
+ */
+ drm_puts(p, "pgtable-fault-info:\n");
+ drm_printf(p, " - ttbr0: %.16llx\n", (u64)info->pgtbl_ttbr0);
+ drm_printf(p, " - asid: %d\n", info->asid);
+ drm_printf(p, " - ptes: %.16llx %.16llx %.16llx %.16llx\n",
+ info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]);
}
drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index dcf454629ce0..92caba3584da 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -442,6 +442,11 @@ static inline int adreno_is_a621(const struct adreno_gpu *gpu)
return gpu->info->chip_ids[0] == 0x06020100;
}
+static inline int adreno_is_a623(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06020300;
+}
+
static inline int adreno_is_a630(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 630);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
index bcb39807fe61..6ac97c378056 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -343,8 +343,8 @@ static const struct dpu_wb_cfg sm8650_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
@@ -452,6 +452,7 @@ const struct dpu_mdss_cfg dpu_sm8650_cfg = {
.mdss_ver = &sm8650_mdss_ver,
.caps = &sm8650_dpu_caps,
.mdp = &sm8650_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8650_ctl),
.ctl = sm8650_ctl,
.sspp_count = ARRAY_SIZE(sm8650_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
index ab3dfb0b374e..1f32807bb5e5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
@@ -190,6 +190,7 @@ const struct dpu_mdss_cfg dpu_msm8937_cfg = {
.mdss_ver = &msm8937_mdss_ver,
.caps = &msm8937_dpu_caps,
.mdp = msm8937_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8937_ctl),
.ctl = msm8937_ctl,
.sspp_count = ARRAY_SIZE(msm8937_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
index 6bdaecca6761..42131959ff22 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
@@ -167,6 +167,7 @@ const struct dpu_mdss_cfg dpu_msm8917_cfg = {
.mdss_ver = &msm8917_mdss_ver,
.caps = &msm8917_dpu_caps,
.mdp = msm8917_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8917_ctl),
.ctl = msm8917_ctl,
.sspp_count = ARRAY_SIZE(msm8917_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
index 14f36ea6ad0e..2b4723a5c676 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
@@ -198,6 +198,7 @@ const struct dpu_mdss_cfg dpu_msm8953_cfg = {
.mdss_ver = &msm8953_mdss_ver,
.caps = &msm8953_dpu_caps,
.mdp = msm8953_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8953_ctl),
.ctl = msm8953_ctl,
.sspp_count = ARRAY_SIZE(msm8953_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
index 491f6f5827d1..5cf19de71f06 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
@@ -316,6 +316,7 @@ const struct dpu_mdss_cfg dpu_msm8996_cfg = {
.mdss_ver = &msm8996_mdss_ver,
.caps = &msm8996_dpu_caps,
.mdp = msm8996_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8996_ctl),
.ctl = msm8996_ctl,
.sspp_count = ARRAY_SIZE(msm8996_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 64c94e919a69..746474679ef5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -302,6 +302,7 @@ const struct dpu_mdss_cfg dpu_msm8998_cfg = {
.mdss_ver = &msm8998_mdss_ver,
.caps = &msm8998_dpu_caps,
.mdp = &msm8998_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8998_ctl),
.ctl = msm8998_ctl,
.sspp_count = ARRAY_SIZE(msm8998_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
index 424815e7fb7d..4f2f68b07f20 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
@@ -269,6 +269,7 @@ const struct dpu_mdss_cfg dpu_sdm660_cfg = {
.mdss_ver = &sdm660_mdss_ver,
.caps = &sdm660_dpu_caps,
.mdp = &sdm660_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm660_ctl),
.ctl = sdm660_ctl,
.sspp_count = ARRAY_SIZE(sdm660_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
index df01227fc364..c70bef025ac4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
@@ -205,6 +205,7 @@ const struct dpu_mdss_cfg dpu_sdm630_cfg = {
.mdss_ver = &sdm630_mdss_ver,
.caps = &sdm630_dpu_caps,
.mdp = &sdm630_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm630_ctl),
.ctl = sdm630_ctl,
.sspp_count = ARRAY_SIZE(sdm630_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index 72bd4f7e9e50..ab7b4822ca63 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -319,6 +319,7 @@ const struct dpu_mdss_cfg dpu_sdm845_cfg = {
.mdss_ver = &sdm845_mdss_ver,
.caps = &sdm845_dpu_caps,
.mdp = &sdm845_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm845_ctl),
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm845_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
index daef07924886..c2fde980fb52 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
@@ -132,6 +132,7 @@ const struct dpu_mdss_cfg dpu_sdm670_cfg = {
.mdss_ver = &sdm670_mdss_ver,
.caps = &sdm845_dpu_caps,
.mdp = &sdm670_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm845_ctl),
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm670_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 36cc9dbc00b5..979527d98fbc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -298,8 +298,8 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -388,6 +388,7 @@ const struct dpu_mdss_cfg dpu_sm8150_cfg = {
.mdss_ver = &sm8150_mdss_ver,
.caps = &sm8150_dpu_caps,
.mdp = &sm8150_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8150_ctl),
.ctl = sm8150_ctl,
.sspp_count = ARRAY_SIZE(sm8150_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index e8eacdb47967..d76b8992a6c1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -305,8 +305,8 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -414,6 +414,7 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = {
.mdss_ver = &sc8180x_mdss_ver,
.caps = &sc8180x_dpu_caps,
.mdp = &sc8180x_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc8180x_ctl),
.ctl = sc8180x_ctl,
.sspp_count = ARRAY_SIZE(sc8180x_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
index 2fe674d1e059..83db11339b29 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
@@ -261,8 +261,8 @@ static const struct dpu_wb_cfg sm7150_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -309,6 +309,7 @@ const struct dpu_mdss_cfg dpu_sm7150_cfg = {
.mdss_ver = &sm7150_mdss_ver,
.caps = &sm7150_dpu_caps,
.mdp = &sm7150_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm7150_ctl),
.ctl = sm7150_ctl,
.sspp_count = ARRAY_SIZE(sm7150_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
index d761ed705bac..da11830d4407 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
@@ -27,6 +27,7 @@ static const struct dpu_mdp_cfg sm6150_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -162,6 +163,21 @@ static const struct dpu_pingpong_cfg sm6150_pp[] = {
},
};
+static const struct dpu_wb_cfg sm6150_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 2160,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm6150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -232,6 +248,7 @@ const struct dpu_mdss_cfg dpu_sm6150_cfg = {
.mdss_ver = &sm6150_mdss_ver,
.caps = &sm6150_dpu_caps,
.mdp = &sm6150_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm6150_ctl),
.ctl = sm6150_ctl,
.sspp_count = ARRAY_SIZE(sm6150_sspp),
@@ -242,6 +259,8 @@ const struct dpu_mdss_cfg dpu_sm6150_cfg = {
.dspp = sm6150_dspp,
.pingpong_count = ARRAY_SIZE(sm6150_pp),
.pingpong = sm6150_pp,
+ .wb_count = ARRAY_SIZE(sm6150_wb),
+ .wb = sm6150_wb,
.intf_count = ARRAY_SIZE(sm6150_intf),
.intf = sm6150_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index 76f60a2df7a8..d3d3a34d0b45 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -145,8 +145,8 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -216,6 +216,7 @@ const struct dpu_mdss_cfg dpu_sm6125_cfg = {
.mdss_ver = &sm6125_mdss_ver,
.caps = &sm6125_dpu_caps,
.mdp = &sm6125_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm6125_ctl),
.ctl = sm6125_ctl,
.sspp_count = ARRAY_SIZE(sm6125_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index e8916ae826a6..47e01c3c242f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -386,7 +386,7 @@ const struct dpu_mdss_cfg dpu_sm8250_cfg = {
.mdss_ver = &sm8250_mdss_ver,
.caps = &sm8250_dpu_caps,
.mdp = &sm8250_mdp,
- .cdm = &sc7280_cdm,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8250_ctl),
.ctl = sm8250_ctl,
.sspp_count = ARRAY_SIZE(sm8250_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index 7382ebb6e5b2..040c94c0bb66 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -157,8 +157,8 @@ static const struct dpu_wb_cfg sc7180_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -204,6 +204,7 @@ const struct dpu_mdss_cfg dpu_sc7180_cfg = {
.mdss_ver = &sc7180_mdss_ver,
.caps = &sc7180_dpu_caps,
.mdp = &sc7180_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc7180_ctl),
.ctl = sc7180_ctl,
.sspp_count = ARRAY_SIZE(sc7180_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index 0502cee2f116..397278ba999b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -151,8 +151,8 @@ static const struct dpu_wb_cfg sm6350_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -222,6 +222,7 @@ const struct dpu_mdss_cfg dpu_sm6350_cfg = {
.mdss_ver = &sm6350_mdss_ver,
.caps = &sm6350_dpu_caps,
.mdp = &sm6350_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm6350_ctl),
.ctl = sm6350_ctl,
.sspp_count = ARRAY_SIZE(sm6350_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index f7c08e89c882..0c860e804cab 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -305,8 +305,8 @@ static const struct dpu_wb_cfg sm8350_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -396,6 +396,7 @@ const struct dpu_mdss_cfg dpu_sm8350_cfg = {
.mdss_ver = &sm8350_mdss_ver,
.caps = &sm8350_dpu_caps,
.mdp = &sm8350_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8350_ctl),
.ctl = sm8350_ctl,
.sspp_count = ARRAY_SIZE(sm8350_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
index 2f153e0b5c6a..e9625c48c567 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
@@ -248,7 +248,7 @@ const struct dpu_mdss_cfg dpu_sc7280_cfg = {
.mdss_ver = &sc7280_mdss_ver,
.caps = &sc7280_dpu_caps,
.mdp = &sc7280_mdp,
- .cdm = &sc7280_cdm,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc7280_ctl),
.ctl = sc7280_ctl,
.sspp_count = ARRAY_SIZE(sc7280_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index 0d143e390eca..fcee1c3665f8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -435,6 +435,7 @@ const struct dpu_mdss_cfg dpu_sc8280xp_cfg = {
.mdss_ver = &sc8280xp_mdss_ver,
.caps = &sc8280xp_dpu_caps,
.mdp = &sc8280xp_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc8280xp_ctl),
.ctl = sc8280xp_ctl,
.sspp_count = ARRAY_SIZE(sc8280xp_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 08742472f9cc..19b2ee8bbd5f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -321,8 +321,8 @@ static const struct dpu_wb_cfg sm8450_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -412,6 +412,7 @@ const struct dpu_mdss_cfg dpu_sm8450_cfg = {
.mdss_ver = &sm8450_mdss_ver,
.caps = &sm8450_dpu_caps,
.mdp = &sm8450_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8450_ctl),
.ctl = sm8450_ctl,
.sspp_count = ARRAY_SIZE(sm8450_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
index 76ec72a32378..4d96ce71746f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -458,7 +458,7 @@ const struct dpu_mdss_cfg dpu_sa8775p_cfg = {
.mdss_ver = &sa8775p_mdss_ver,
.caps = &sa8775p_dpu_caps,
.mdp = &sa8775p_mdp,
- .cdm = &sc7280_cdm,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sa8775p_ctl),
.ctl = sa8775p_ctl,
.sspp_count = ARRAY_SIZE(sa8775p_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 4d3787fceb72..24f988465bf6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -317,8 +317,8 @@ static const struct dpu_wb_cfg sm8550_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
@@ -407,6 +407,7 @@ const struct dpu_mdss_cfg dpu_sm8550_cfg = {
.mdss_ver = &sm8550_mdss_ver,
.caps = &sm8550_dpu_caps,
.mdp = &sm8550_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8550_ctl),
.ctl = sm8550_ctl,
.sspp_count = ARRAY_SIZE(sm8550_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 6b112e3d17da..6417baa84f82 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -317,8 +317,8 @@ static const struct dpu_wb_cfg x1e80100_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
@@ -453,6 +453,7 @@ const struct dpu_mdss_cfg dpu_x1e80100_cfg = {
.mdss_ver = &x1e80100_mdss_ver,
.caps = &x1e80100_dpu_caps,
.mdp = &x1e80100_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(x1e80100_ctl),
.ctl = x1e80100_ctl,
.sspp_count = ARRAY_SIZE(x1e80100_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 6f0a37f954fe..0fb5789c60d0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -118,26 +118,38 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf,
return;
}
- memset(perf, 0, sizeof(struct dpu_core_perf_params));
-
- if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
- perf->bw_ctl = 0;
- perf->max_per_pipe_ib = 0;
- perf->core_clk_rate = 0;
- } else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) {
- perf->bw_ctl = core_perf->fix_core_ab_vote;
- perf->max_per_pipe_ib = core_perf->fix_core_ib_vote;
- perf->core_clk_rate = core_perf->fix_core_clk_rate;
- } else {
- perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc);
- perf->max_per_pipe_ib = perf_cfg->min_dram_ib;
- perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state);
- }
-
+ perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc);
+ perf->max_per_pipe_ib = perf_cfg->min_dram_ib;
+ perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state);
DRM_DEBUG_ATOMIC(
- "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n",
+ "crtc=%d clk_rate=%llu core_ib=%u core_ab=%u\n",
crtc->base.id, perf->core_clk_rate,
- perf->max_per_pipe_ib, perf->bw_ctl);
+ perf->max_per_pipe_ib,
+ (u32)DIV_ROUND_UP_ULL(perf->bw_ctl, 1000));
+}
+
+static void dpu_core_perf_aggregate(struct drm_device *ddev,
+ enum dpu_crtc_client_type curr_client_type,
+ struct dpu_core_perf_params *perf)
+{
+ struct dpu_crtc_state *dpu_cstate;
+ struct drm_crtc *tmp_crtc;
+
+ drm_for_each_crtc(tmp_crtc, ddev) {
+ if (tmp_crtc->enabled &&
+ curr_client_type == dpu_crtc_get_client_type(tmp_crtc)) {
+ dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+ perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+ dpu_cstate->new_perf.max_per_pipe_ib);
+
+ perf->bw_ctl += dpu_cstate->new_perf.bw_ctl;
+
+ DRM_DEBUG_ATOMIC("crtc=%d bw=%llu\n",
+ tmp_crtc->base.id,
+ dpu_cstate->new_perf.bw_ctl);
+ }
+ }
}
/**
@@ -150,11 +162,9 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
u32 bw, threshold;
- u64 bw_sum_of_intfs = 0;
- enum dpu_crtc_client_type curr_client_type;
struct dpu_crtc_state *dpu_cstate;
- struct drm_crtc *tmp_crtc;
struct dpu_kms *kms;
+ struct dpu_core_perf_params perf = { 0 };
if (!crtc || !state) {
DPU_ERROR("invalid crtc\n");
@@ -172,80 +182,56 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
/* obtain new values */
_dpu_core_perf_calc_crtc(&kms->perf, crtc, state, &dpu_cstate->new_perf);
- bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl;
- curr_client_type = dpu_crtc_get_client_type(crtc);
-
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (tmp_crtc->enabled &&
- dpu_crtc_get_client_type(tmp_crtc) == curr_client_type &&
- tmp_crtc != crtc) {
- struct dpu_crtc_state *tmp_cstate =
- to_dpu_crtc_state(tmp_crtc->state);
-
- DRM_DEBUG_ATOMIC("crtc:%d bw:%llu ctrl:%d\n",
- tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
- tmp_cstate->bw_control);
+ dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf);
- bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
- }
-
- /* convert bandwidth to kb */
- bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
- DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(perf.bw_ctl, 1000);
+ DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
- threshold = kms->perf.perf_cfg->max_bw_high;
+ threshold = kms->perf.perf_cfg->max_bw_high;
- DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
+ DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
- if (!threshold) {
- DPU_ERROR("no bandwidth limits specified\n");
- return -E2BIG;
- } else if (bw > threshold) {
- DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
- threshold);
- return -E2BIG;
- }
+ if (!threshold) {
+ DPU_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+ threshold);
+ return -E2BIG;
}
return 0;
}
static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc)
{
struct dpu_core_perf_params perf = { 0 };
- enum dpu_crtc_client_type curr_client_type
- = dpu_crtc_get_client_type(crtc);
- struct drm_crtc *tmp_crtc;
- struct dpu_crtc_state *dpu_cstate;
int i, ret = 0;
- u64 avg_bw;
+ u32 avg_bw;
+ u32 peak_bw;
if (!kms->num_paths)
return 0;
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (tmp_crtc->enabled &&
- curr_client_type ==
- dpu_crtc_get_client_type(tmp_crtc)) {
- dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
-
- perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
- dpu_cstate->new_perf.max_per_pipe_ib);
-
- perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+ avg_bw = 0;
+ peak_bw = 0;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+ avg_bw = kms->perf.fix_core_ab_vote;
+ peak_bw = kms->perf.fix_core_ib_vote;
+ } else {
+ dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf);
- DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n",
- tmp_crtc->base.id,
- dpu_cstate->new_perf.bw_ctl, kms->num_paths);
- }
+ avg_bw = div_u64(perf.bw_ctl, 1000); /*Bps_to_icc*/
+ peak_bw = perf.max_per_pipe_ib;
}
- avg_bw = perf.bw_ctl;
- do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/
+ avg_bw /= kms->num_paths;
for (i = 0; i < kms->num_paths; i++)
- icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib);
+ icc_set_bw(kms->path[i], avg_bw, peak_bw);
return ret;
}
@@ -476,9 +462,9 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
&perf->core_clk_rate);
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
- debugfs_create_u32("threshold_low", 0400, entry,
+ debugfs_create_u32("low_core_ab", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_low);
- debugfs_create_u32("threshold_high", 0400, entry,
+ debugfs_create_u32("max_core_ab", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_high);
debugfs_create_u32("min_core_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_core_ib);
@@ -490,9 +476,9 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
(u32 *)perf, &dpu_core_perf_mode_fops);
debugfs_create_u64("fix_core_clk_rate", 0600, entry,
&perf->fix_core_clk_rate);
- debugfs_create_u64("fix_core_ib_vote", 0600, entry,
+ debugfs_create_u32("fix_core_ib_vote", 0600, entry,
&perf->fix_core_ib_vote);
- debugfs_create_u64("fix_core_ab_vote", 0600, entry,
+ debugfs_create_u32("fix_core_ab_vote", 0600, entry,
&perf->fix_core_ab_vote);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index 451bf8021114..d2f21d34e501 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -19,7 +19,7 @@
* @core_clk_rate: core clock rate request
*/
struct dpu_core_perf_params {
- u64 max_per_pipe_ib;
+ u32 max_per_pipe_ib;
u64 bw_ctl;
u64 core_clk_rate;
};
@@ -40,8 +40,8 @@ struct dpu_core_perf_tune {
* @perf_tune: debug control for performance tuning
* @enable_bw_release: debug control for bandwidth release
* @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
- * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
- * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in KBps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in KBps used in mode 2
*/
struct dpu_core_perf {
const struct dpu_perf_cfg *perf_cfg;
@@ -50,8 +50,8 @@ struct dpu_core_perf {
struct dpu_core_perf_tune perf_tune;
u32 enable_bw_release;
u64 fix_core_clk_rate;
- u64 fix_core_ib_vote;
- u64 fix_core_ab_vote;
+ u32 fix_core_ib_vote;
+ u32 fix_core_ab_vote;
};
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index e5dcd41a361f..0714936d8835 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -953,6 +953,45 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
return rc;
}
+static int dpu_crtc_kickoff_clone_mode(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_encoder *rt_encoder = NULL, *wb_encoder = NULL;
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+
+ /* Find encoder for real time display */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ wb_encoder = encoder;
+ else
+ rt_encoder = encoder;
+ }
+
+ if (!rt_encoder || !wb_encoder) {
+ DRM_DEBUG_ATOMIC("real time or wb encoder not found\n");
+ return -EINVAL;
+ }
+
+ dpu_encoder_prepare_for_kickoff(wb_encoder);
+ dpu_encoder_prepare_for_kickoff(rt_encoder);
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ /*
+ * Kickoff real time encoder last as it's the encoder that
+ * will do the flush
+ */
+ dpu_encoder_kickoff(wb_encoder);
+ dpu_encoder_kickoff(rt_encoder);
+
+ /* Don't start frame done timers until the kickoffs have finished */
+ dpu_encoder_start_frame_done_timer(wb_encoder);
+ dpu_encoder_start_frame_done_timer(rt_encoder);
+
+ return 0;
+}
+
/**
* dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* @crtc: Pointer to drm crtc object
@@ -981,13 +1020,27 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
goto end;
}
}
- /*
- * Encoder will flush/start now, unless it has a tx pending. If so, it
- * may delay and flush at an irq event (e.g. ppdone)
- */
- drm_for_each_encoder_mask(encoder, crtc->dev,
- crtc->state->encoder_mask)
- dpu_encoder_prepare_for_kickoff(encoder);
+
+ if (drm_crtc_in_clone_mode(crtc->state)) {
+ if (dpu_crtc_kickoff_clone_mode(crtc))
+ goto end;
+ } else {
+ /*
+ * Encoder will flush/start now, unless it has a tx pending.
+ * If so, it may delay and flush at an irq event (e.g. ppdone)
+ */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask)
+ dpu_encoder_prepare_for_kickoff(encoder);
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ dpu_encoder_kickoff(encoder);
+ dpu_encoder_start_frame_done_timer(encoder);
+ }
+ }
if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
@@ -997,11 +1050,6 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
dpu_crtc->play_count++;
- dpu_vbif_clear_errors(dpu_kms);
-
- drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
- dpu_encoder_kickoff(encoder);
-
reinit_completion(&dpu_crtc->frame_done_comp);
end:
@@ -1230,6 +1278,151 @@ done:
return ret;
}
+#define MAX_CHANNELS_PER_CRTC 2
+#define MAX_HDISPLAY_SPLIT 1080
+
+static struct msm_display_topology dpu_crtc_get_topology(
+ struct drm_crtc *crtc,
+ struct dpu_kms *dpu_kms,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct msm_display_topology topology = {0};
+ struct drm_encoder *drm_enc;
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask)
+ dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state,
+ &crtc_state->adjusted_mode);
+
+ topology.cwb_enabled = drm_crtc_in_clone_mode(crtc_state);
+
+ /*
+ * Datapath topology selection
+ *
+ * Dual display
+ * 2 LM, 2 INTF ( Split display using 2 interfaces)
+ *
+ * Single display
+ * 1 LM, 1 INTF
+ * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
+ *
+ * If DSC is enabled, use 2 LMs for 2:2:1 topology
+ *
+ * Add dspps to the reservation requirements if ctm is requested
+ *
+ * Only hardcode num_lm to 2 for cases where num_intf == 2 and CWB is not
+ * enabled. This is because in cases where CWB is enabled, num_intf will
+ * count both the WB and real-time phys encoders.
+ *
+ * For non-DSC CWB usecases, have the num_lm be decided by the
+ * (mode->hdisplay > MAX_HDISPLAY_SPLIT) check.
+ */
+
+ if (topology.num_intf == 2 && !topology.cwb_enabled)
+ topology.num_lm = 2;
+ else if (topology.num_dsc == 2)
+ topology.num_lm = 2;
+ else if (dpu_kms->catalog->caps->has_3d_merge)
+ topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+ else
+ topology.num_lm = 1;
+
+ if (crtc_state->ctm)
+ topology.num_dspp = topology.num_lm;
+
+ return topology;
+}
+
+static int dpu_crtc_assign_resources(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_CRTC];
+ struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_CRTC];
+ struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_CRTC];
+ int i, num_lm, num_ctl, num_dspp;
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_global_state *global_state;
+ struct dpu_crtc_state *cstate;
+ struct msm_display_topology topology;
+ int ret;
+
+ /*
+ * Release and Allocate resources on every modeset
+ */
+ global_state = dpu_kms_get_global_state(crtc_state->state);
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ dpu_rm_release(global_state, crtc);
+
+ if (!crtc_state->enable)
+ return 0;
+
+ topology = dpu_crtc_get_topology(crtc, dpu_kms, crtc_state);
+ ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ crtc_state->crtc, &topology);
+ if (ret)
+ return ret;
+
+ cstate = to_dpu_crtc_state(crtc_state);
+
+ num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ crtc_state->crtc,
+ DPU_HW_BLK_CTL, hw_ctl,
+ ARRAY_SIZE(hw_ctl));
+ num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ crtc_state->crtc,
+ DPU_HW_BLK_LM, hw_lm,
+ ARRAY_SIZE(hw_lm));
+ num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ crtc_state->crtc,
+ DPU_HW_BLK_DSPP, hw_dspp,
+ ARRAY_SIZE(hw_dspp));
+
+ for (i = 0; i < num_lm; i++) {
+ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+
+ cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
+ cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
+ if (i < num_dspp)
+ cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
+ }
+
+ cstate->num_mixers = num_lm;
+
+ return 0;
+}
+
+/**
+ * dpu_crtc_check_mode_changed: check if full modeset is required
+ * @old_crtc_state: Previous CRTC state
+ * @new_crtc_state: Corresponding CRTC state to be checked
+ *
+ * Check if the changes in the object properties demand full mode set.
+ */
+int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_encoder *drm_enc;
+ struct drm_crtc *crtc = new_crtc_state->crtc;
+ bool clone_mode_enabled = drm_crtc_in_clone_mode(old_crtc_state);
+ bool clone_mode_requested = drm_crtc_in_clone_mode(new_crtc_state);
+
+ DRM_DEBUG_ATOMIC("%d\n", crtc->base.id);
+
+ /* there might be cases where encoder needs a modeset too */
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, new_crtc_state->encoder_mask) {
+ if (dpu_encoder_needs_modeset(drm_enc, new_crtc_state->state))
+ new_crtc_state->mode_changed = true;
+ }
+
+ if ((clone_mode_requested && !clone_mode_enabled) ||
+ (!clone_mode_requested && clone_mode_enabled))
+ new_crtc_state->mode_changed = true;
+
+ return 0;
+}
+
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1245,6 +1438,13 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
+ /* don't reallocate resources if only ACTIVE has beeen changed */
+ if (crtc_state->mode_changed || crtc_state->connectors_changed) {
+ rc = dpu_crtc_assign_resources(crtc, crtc_state);
+ if (rc < 0)
+ return rc;
+ }
+
if (dpu_use_virtual_planes &&
(crtc_state->planes_changed || crtc_state->zpos_changed)) {
rc = dpu_crtc_reassign_planes(crtc, crtc_state);
@@ -1262,10 +1462,6 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
- /* force a full mode set if active state changed */
- if (crtc_state->active_changed)
- crtc_state->mode_changed = true;
-
if (cstate->num_mixers) {
rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state);
if (rc)
@@ -1484,8 +1680,9 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
seq_printf(s, "core_clk_rate: %llu\n",
dpu_crtc->cur_perf.core_clk_rate);
- seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
- seq_printf(s, "max_per_pipe_ib: %llu\n",
+ seq_printf(s, "bw_ctl: %uk\n",
+ (u32)DIV_ROUND_UP_ULL(dpu_crtc->cur_perf.bw_ctl, 1000));
+ seq_printf(s, "max_per_pipe_ib: %u\n",
dpu_crtc->cur_perf.max_per_pipe_ib);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 0b148f3ce0d7..94392b9b9245 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -239,6 +239,9 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
}
+int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state);
+
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 48e6e8d74c85..284e69bb47c1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013 Red Hat
* Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -24,6 +24,7 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
#include "dpu_hw_ctl.h"
+#include "dpu_hw_cwb.h"
#include "dpu_hw_dspp.h"
#include "dpu_hw_dsc.h"
#include "dpu_hw_merge3d.h"
@@ -58,8 +59,6 @@
#define IDLE_SHORT_TIMEOUT 1
-#define MAX_HDISPLAY_SPLIT 1080
-
/* timeout in frames waiting for frame done */
#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
@@ -135,8 +134,12 @@ enum dpu_enc_rc_states {
* @cur_slave: As above but for the slave encoder.
* @hw_pp: Handle to the pingpong blocks used for the display. No.
* pingpong blocks can be different than num_phys_encs.
+ * @hw_cwb: Handle to the CWB muxes used for concurrent writeback
+ * display. Number of CWB muxes can be different than
+ * num_phys_encs.
* @hw_dsc: Handle to the DSC blocks used for the display.
* @dsc_mask: Bitmask of used DSC blocks.
+ * @cwb_mask: Bitmask of used CWB muxes
* @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
* for partial update right-only cases, such as pingpong
* split where virtual pingpong does not generate IRQs
@@ -179,9 +182,11 @@ struct dpu_encoder_virt {
struct dpu_encoder_phys *cur_master;
struct dpu_encoder_phys *cur_slave;
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_cwb *hw_cwb[MAX_CHANNELS_PER_ENC];
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
unsigned int dsc_mask;
+ unsigned int cwb_mask;
bool intfs_swapped;
@@ -622,9 +627,9 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
if (dpu_enc->phys_encs[i])
intf_count++;
- /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
- if (dpu_enc->dsc)
- num_dsc = 2;
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ if (dpu_enc->hw_dsc[i])
+ num_dsc++;
return (num_dsc > 0) && (num_dsc > intf_count);
}
@@ -647,130 +652,51 @@ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
return NULL;
}
-static struct msm_display_topology dpu_encoder_get_topology(
- struct dpu_encoder_virt *dpu_enc,
- struct dpu_kms *dpu_kms,
- struct drm_display_mode *mode,
- struct drm_crtc_state *crtc_state,
- struct drm_dsc_config *dsc)
+void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
+ struct msm_display_topology *topology,
+ struct drm_atomic_state *state,
+ const struct drm_display_mode *adj_mode)
{
- struct msm_display_topology topology = {0};
- int i, intf_count = 0;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ struct msm_drm_private *priv = dpu_enc->base.dev->dev_private;
+ struct msm_display_info *disp_info = &dpu_enc->disp_info;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_framebuffer *fb;
+ struct drm_dsc_config *dsc;
+
+ int i;
for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
if (dpu_enc->phys_encs[i])
- intf_count++;
+ topology->num_intf++;
- /* Datapath topology selection
- *
- * Dual display
- * 2 LM, 2 INTF ( Split display using 2 interfaces)
- *
- * Single display
- * 1 LM, 1 INTF
- * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
- *
- * Add dspps to the reservation requirements if ctm is requested
- */
- if (intf_count == 2)
- topology.num_lm = 2;
- else if (!dpu_kms->catalog->caps->has_3d_merge)
- topology.num_lm = 1;
- else
- topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
-
- if (crtc_state->ctm)
- topology.num_dspp = topology.num_lm;
-
- topology.num_intf = intf_count;
+ dsc = dpu_encoder_get_dsc_config(drm_enc);
+ /* We only support 2 DSC mode (with 2 LM and 1 INTF) */
if (dsc) {
/*
- * In case of Display Stream Compression (DSC), we would use
- * 2 DSC encoders, 2 layer mixers and 1 interface
- * this is power optimal and can drive up to (including) 4k
- * screens
+ * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces
+ * when Display Stream Compression (DSC) is enabled,
+ * and when enough DSC blocks are available.
+ * This is power-optimal and can drive up to (including) 4k
+ * screens.
*/
- topology.num_dsc = 2;
- topology.num_lm = 2;
- topology.num_intf = 1;
- }
-
- return topology;
-}
-
-static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms,
- struct drm_encoder *drm_enc,
- struct dpu_global_state *global_state,
- struct drm_crtc_state *crtc_state)
-{
- struct dpu_crtc_state *cstate;
- struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC];
- int num_lm, num_ctl, num_dspp, i;
-
- cstate = to_dpu_crtc_state(crtc_state);
-
- memset(cstate->mixers, 0, sizeof(cstate->mixers));
-
- num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
- num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
- num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
- ARRAY_SIZE(hw_dspp));
-
- for (i = 0; i < num_lm; i++) {
- int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
-
- cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
- cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
- cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL;
- }
-
- cstate->num_mixers = num_lm;
-}
-
-static int dpu_encoder_virt_atomic_check(
- struct drm_encoder *drm_enc,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct dpu_encoder_virt *dpu_enc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
- struct drm_display_mode *adj_mode;
- struct msm_display_topology topology;
- struct msm_display_info *disp_info;
- struct dpu_global_state *global_state;
- struct drm_framebuffer *fb;
- struct drm_dsc_config *dsc;
- int ret = 0;
-
- if (!drm_enc || !crtc_state || !conn_state) {
- DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
- drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
- return -EINVAL;
+ WARN(topology->num_intf > 2,
+ "DSC topology cannot support more than 2 interfaces\n");
+ if (topology->num_intf >= 2 || dpu_kms->catalog->dsc_count >= 2)
+ topology->num_dsc = 2;
+ else
+ topology->num_dsc = 1;
}
- dpu_enc = to_dpu_encoder_virt(drm_enc);
- DPU_DEBUG_ENC(dpu_enc, "\n");
-
- priv = drm_enc->dev->dev_private;
- disp_info = &dpu_enc->disp_info;
- dpu_kms = to_dpu_kms(priv->kms);
- adj_mode = &crtc_state->adjusted_mode;
- global_state = dpu_kms_get_global_state(crtc_state->state);
- if (IS_ERR(global_state))
- return PTR_ERR(global_state);
-
- trace_dpu_enc_atomic_check(DRMID(drm_enc));
-
- dsc = dpu_encoder_get_dsc_config(drm_enc);
-
- topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
+ connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
+ if (!connector)
+ return;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state)
+ return;
/*
* Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
@@ -781,34 +707,45 @@ static int dpu_encoder_virt_atomic_check(
fb = conn_state->writeback_job->fb;
if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
- topology.needs_cdm = true;
+ topology->num_cdm++;
} else if (disp_info->intf_type == INTF_DP) {
if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
- topology.needs_cdm = true;
+ topology->num_cdm++;
}
+}
- if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
- crtc_state->mode_changed = true;
- else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
- crtc_state->mode_changed = true;
- /*
- * Release and Allocate resources on every modeset
- * Dont allocate when active is false.
- */
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
- dpu_rm_release(global_state, drm_enc);
+bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_framebuffer *fb;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
- if (!crtc_state->active_changed || crtc_state->enable)
- ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
- drm_enc, crtc_state, &topology);
- if (!ret)
- dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc,
- global_state, crtc_state);
- }
+ if (!drm_enc || !state)
+ return false;
- trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
+ connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
+ if (!connector)
+ return false;
- return ret;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+
+ /**
+ * These checks are duplicated from dpu_encoder_update_topology() since
+ * CRTC and encoder don't hold topology information
+ */
+ if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
+ fb = conn_state->writeback_job->fb;
+ if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) {
+ if (!dpu_enc->cur_master->hw_cdm)
+ return true;
+ } else {
+ if (dpu_enc->cur_master->hw_cdm)
+ return true;
+ }
+ }
+
+ return false;
}
static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
@@ -1219,8 +1156,12 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_cwb[MAX_CHANNELS_PER_ENC];
int num_ctl, num_pp, num_dsc;
+ int num_cwb = 0;
+ bool is_cwb_encoder;
unsigned int dsc_mask = 0;
+ unsigned int cwb_mask = 0;
int i;
if (!drm_enc) {
@@ -1233,6 +1174,8 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
+ is_cwb_encoder = drm_crtc_in_clone_mode(crtc_state) &&
+ dpu_enc->disp_info.intf_type == INTF_WB;
global_state = dpu_kms_get_existing_global_state(dpu_kms);
if (IS_ERR_OR_NULL(global_state)) {
@@ -1243,18 +1186,38 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
trace_dpu_enc_mode_set(DRMID(drm_enc));
/* Query resource that have been reserved in atomic check step. */
- num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
- ARRAY_SIZE(hw_pp));
+ if (is_cwb_encoder) {
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->crtc,
+ DPU_HW_BLK_DCWB_PINGPONG,
+ hw_pp, ARRAY_SIZE(hw_pp));
+ num_cwb = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->crtc,
+ DPU_HW_BLK_CWB,
+ hw_cwb, ARRAY_SIZE(hw_cwb));
+ } else {
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->crtc,
+ DPU_HW_BLK_PINGPONG, hw_pp,
+ ARRAY_SIZE(hw_pp));
+ }
+
+ for (i = 0; i < num_cwb; i++) {
+ dpu_enc->hw_cwb[i] = to_dpu_hw_cwb(hw_cwb[i]);
+ cwb_mask |= BIT(dpu_enc->hw_cwb[i]->idx - CWB_0);
+ }
+
+ dpu_enc->cwb_mask = cwb_mask;
+
num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
+ drm_enc->crtc, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
: NULL;
num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_DSC,
+ drm_enc->crtc, DPU_HW_BLK_DSC,
hw_dsc, ARRAY_SIZE(hw_dsc));
for (i = 0; i < num_dsc; i++) {
dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
@@ -1268,7 +1231,7 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_cdm = NULL;
dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_CDM,
+ drm_enc->crtc, DPU_HW_BLK_CDM,
&hw_cdm, 1);
dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
}
@@ -1654,6 +1617,7 @@ static void dpu_encoder_off_work(struct work_struct *work)
static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct dpu_hw_ctl *ctl;
int pending_kickoff_cnt;
u32 ret = UINT_MAX;
@@ -1671,6 +1635,15 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+ /* Return early if encoder is writeback and in clone mode */
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL &&
+ dpu_enc->cwb_mask) {
+ DPU_DEBUG("encoder %d skip flush for concurrent writeback encoder\n",
+ DRMID(drm_enc));
+ return;
+ }
+
+
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
@@ -1693,6 +1666,8 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
*/
static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys->parent);
+
if (!phys) {
DPU_ERROR("invalid argument(s)\n");
return;
@@ -1703,6 +1678,12 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
return;
}
+ if (phys->parent->encoder_type == DRM_MODE_ENCODER_VIRTUAL &&
+ dpu_enc->cwb_mask) {
+ DPU_DEBUG("encoder %d CWB enabled, skipping\n", DRMID(phys->parent));
+ return;
+ }
+
if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
phys->ops.trigger_start(phys);
}
@@ -2020,7 +2001,6 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl,
static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
struct drm_dsc_config *dsc)
{
- /* coding only for 2LM, 2enc, 1 dsc config */
struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
@@ -2030,22 +2010,24 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
int dsc_common_mode;
int pic_width;
u32 initial_lines;
+ int num_dsc = 0;
int i;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
hw_pp[i] = dpu_enc->hw_pp[i];
hw_dsc[i] = dpu_enc->hw_dsc[i];
- if (!hw_pp[i] || !hw_dsc[i]) {
- DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
- return;
- }
+ if (!hw_pp[i] || !hw_dsc[i])
+ break;
+
+ num_dsc++;
}
- dsc_common_mode = 0;
pic_width = dsc->pic_width;
- dsc_common_mode = DSC_MODE_SPLIT_PANEL;
+ dsc_common_mode = 0;
+ if (num_dsc > 1)
+ dsc_common_mode |= DSC_MODE_SPLIT_PANEL;
if (dpu_encoder_use_dsc_merge(enc_master->parent))
dsc_common_mode |= DSC_MODE_MULTIPLEX;
if (enc_master->intf_mode == INTF_MODE_VIDEO)
@@ -2054,14 +2036,10 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
this_frame_slices = pic_width / dsc->slice_width;
intf_ip_w = this_frame_slices * dsc->slice_width;
- /*
- * dsc merge case: when using 2 encoders for the same stream,
- * no. of slices need to be same on both the encoders.
- */
- enc_ip_w = intf_ip_w / 2;
+ enc_ip_w = intf_ip_w / num_dsc;
initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ for (i = 0; i < num_dsc; i++)
dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
dsc, dsc_common_mode, initial_lines);
}
@@ -2135,6 +2113,25 @@ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
}
/**
+ * dpu_encoder_start_frame_done_timer - Start the encoder frame done timer
+ * @drm_enc: Pointer to drm encoder structure
+ */
+void dpu_encoder_start_frame_done_timer(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ unsigned long timeout_ms;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
+ drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
+
+ atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
+ mod_timer(&dpu_enc->frame_done_timer,
+ jiffies + msecs_to_jiffies(timeout_ms));
+
+}
+
+/**
* dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
* (i.e. ctl flush and start) immediately.
* @drm_enc: encoder pointer
@@ -2143,7 +2140,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
- unsigned long timeout_ms;
unsigned int i;
DPU_ATRACE_BEGIN("encoder_kickoff");
@@ -2151,13 +2147,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
trace_dpu_enc_kickoff(DRMID(drm_enc));
- timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
- drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
-
- atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
- mod_timer(&dpu_enc->frame_done_timer,
- jiffies + msecs_to_jiffies(timeout_ms));
-
/* All phys encs are ready to go, trigger the kickoff */
_dpu_encoder_kickoff_phys(dpu_enc);
@@ -2183,22 +2172,22 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
memset(&mixer, 0, sizeof(mixer));
/* reset all mixers for this encoder */
- if (phys_enc->hw_ctl->ops.clear_all_blendstages)
- phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
+ if (ctl->ops.clear_all_blendstages)
+ ctl->ops.clear_all_blendstages(ctl);
global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
- phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+ phys_enc->parent->crtc, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
for (i = 0; i < num_lm; i++) {
hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
- if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
- phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
+ if (ctl->ops.update_pending_flush_mixer)
+ ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
/* clear all blendstages */
- if (phys_enc->hw_ctl->ops.setup_blendstage)
- phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+ if (ctl->ops.setup_blendstage)
+ ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
}
}
@@ -2250,7 +2239,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
- phys_enc->hw_ctl->ops.reset(ctl);
+ ctl->ops.reset(ctl);
dpu_encoder_helper_reset_mixers(phys_enc);
@@ -2265,8 +2254,8 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE);
/* mark WB flush as pending */
- if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
- phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
+ if (ctl->ops.update_pending_flush_wb)
+ ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
} else {
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
@@ -2275,8 +2264,8 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
PINGPONG_NONE);
/* mark INTF flush as pending */
- if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
- phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
+ if (ctl->ops.update_pending_flush_intf)
+ ctl->ops.update_pending_flush_intf(ctl,
dpu_enc->phys_encs[i]->hw_intf->idx);
}
}
@@ -2284,12 +2273,15 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither)
phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL);
+ if (dpu_enc->cwb_mask)
+ dpu_encoder_helper_phys_setup_cwb(phys_enc, false);
+
/* reset the merge 3D HW block */
if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
BLEND_3D_NONE);
- if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
- phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
+ if (ctl->ops.update_pending_flush_merge_3d)
+ ctl->ops.update_pending_flush_merge_3d(ctl,
phys_enc->hw_pp->merge_3d->idx);
}
@@ -2297,9 +2289,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp)
phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm,
PINGPONG_NONE);
- if (phys_enc->hw_ctl->ops.update_pending_flush_cdm)
- phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl,
- phys_enc->hw_cdm->idx);
+ if (ctl->ops.update_pending_flush_cdm)
+ ctl->ops.update_pending_flush_cdm(ctl,
+ phys_enc->hw_cdm->idx);
}
if (dpu_enc->dsc) {
@@ -2310,6 +2302,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
+ intf_cfg.cwb = dpu_enc->cwb_mask;
if (phys_enc->hw_intf)
intf_cfg.intf = phys_enc->hw_intf->idx;
@@ -2327,6 +2320,68 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
ctl->ops.clear_pending_flush(ctl);
}
+void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ struct dpu_hw_cwb *hw_cwb;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_cwb_setup_cfg cwb_cfg;
+
+ struct dpu_kms *dpu_kms;
+ struct dpu_global_state *global_state;
+ struct dpu_hw_blk *rt_pp_list[MAX_CHANNELS_PER_ENC];
+ int num_pp;
+
+ if (!phys_enc->hw_wb)
+ return;
+
+ hw_ctl = phys_enc->hw_ctl;
+
+ if (!phys_enc->hw_ctl) {
+ DPU_DEBUG("[wb:%d] no ctl assigned\n",
+ phys_enc->hw_wb->idx - WB_0);
+ return;
+ }
+
+ dpu_kms = phys_enc->dpu_kms;
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ phys_enc->parent->crtc,
+ DPU_HW_BLK_PINGPONG, rt_pp_list,
+ ARRAY_SIZE(rt_pp_list));
+
+ if (num_pp == 0 || num_pp > MAX_CHANNELS_PER_ENC) {
+ DPU_DEBUG_ENC(dpu_enc, "invalid num_pp %d\n", num_pp);
+ return;
+ }
+
+ /*
+ * The CWB mux supports using LM or DSPP as tap points. For now,
+ * always use LM tap point
+ */
+ cwb_cfg.input = INPUT_MODE_LM_OUT;
+
+ for (int i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ hw_cwb = dpu_enc->hw_cwb[i];
+ if (!hw_cwb)
+ continue;
+
+ if (enable) {
+ struct dpu_hw_pingpong *hw_pp =
+ to_dpu_hw_pingpong(rt_pp_list[i]);
+ cwb_cfg.pp_idx = hw_pp->idx;
+ } else {
+ cwb_cfg.pp_idx = PINGPONG_NONE;
+ }
+
+ hw_cwb->ops.config_cwb(hw_cwb, &cwb_cfg);
+
+ if (hw_ctl->ops.update_pending_flush_cwb)
+ hw_ctl->ops.update_pending_flush_cwb(hw_ctl, hw_cwb->idx);
+ }
+}
+
/**
* dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block
* @phys_enc: Pointer to physical encoder
@@ -2513,6 +2568,38 @@ static int dpu_encoder_virt_add_phys_encs(
return 0;
}
+/**
+ * dpu_encoder_get_clones - Calculate the possible_clones for DPU encoder
+ * @drm_enc: DRM encoder pointer
+ * Returns: possible_clones mask
+ */
+uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc)
+{
+ struct drm_encoder *curr;
+ int type = drm_enc->encoder_type;
+ uint32_t clone_mask = drm_encoder_mask(drm_enc);
+
+ /*
+ * Set writeback as possible clones of real-time DSI encoders and vice
+ * versa
+ *
+ * Writeback encoders can't be clones of each other and DSI
+ * encoders can't be clones of each other.
+ *
+ * TODO: Add DP encoders as valid possible clones for writeback encoders
+ * (and vice versa) once concurrent writeback has been validated for DP
+ */
+ drm_for_each_encoder(curr, drm_enc->dev) {
+ if ((type == DRM_MODE_ENCODER_VIRTUAL &&
+ curr->encoder_type == DRM_MODE_ENCODER_DSI) ||
+ (type == DRM_MODE_ENCODER_DSI &&
+ curr->encoder_type == DRM_MODE_ENCODER_VIRTUAL))
+ clone_mask |= drm_encoder_mask(curr);
+ }
+
+ return clone_mask;
+}
+
static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
struct msm_display_info *disp_info)
@@ -2630,7 +2717,6 @@ static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
.atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
.atomic_disable = dpu_encoder_virt_atomic_disable,
.atomic_enable = dpu_encoder_virt_atomic_enable,
- .atomic_check = dpu_encoder_virt_atomic_check,
};
static const struct drm_encoder_funcs dpu_encoder_funcs = {
@@ -2789,6 +2875,18 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
}
/**
+ * dpu_encoder_helper_get_cwb_mask - get CWB blocks mask for the DPU encoder
+ * @phys_enc: Pointer to physical encoder structure
+ */
+unsigned int dpu_encoder_helper_get_cwb_mask(struct dpu_encoder_phys *phys_enc)
+{
+ struct drm_encoder *encoder = phys_enc->parent;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ return dpu_enc->cwb_mask;
+}
+
+/**
* dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder
* This helper function is used by physical encoder to get DSC blocks mask
* used for this encoder.
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 92b5ee390788..ca1ca2e51d7e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@@ -60,6 +60,8 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
+uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc);
+
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
int drm_enc_mode,
struct msm_display_info *disp_info);
@@ -80,6 +82,13 @@ int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos);
bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc);
+void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
+ struct msm_display_topology *topology,
+ struct drm_atomic_state *state,
+ const struct drm_display_mode *adj_mode);
+
+bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state);
+
void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job);
@@ -88,4 +97,5 @@ void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc);
+void dpu_encoder_start_frame_done_timer(struct drm_encoder *drm_enc);
#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 63f09857025c..61b22d949454 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*/
@@ -309,6 +309,8 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
return BLEND_3D_NONE;
}
+unsigned int dpu_encoder_helper_get_cwb_mask(struct dpu_encoder_phys *phys_enc);
+
unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc);
struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc);
@@ -331,6 +333,9 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
+void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc,
+ bool enable);
+
void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
const struct msm_format *dpu_fmt,
u32 output_type);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index e9bbccc44dad..da9994a79ca2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/delay.h>
+#include <linux/string_choices.h>
#include "dpu_encoder_phys.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_pingpong.h"
@@ -261,7 +262,7 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
- enable ? "true" : "false", refcount);
+ str_true_false(enable), refcount);
if (enable) {
if (phys_enc->vblank_refcount == 0)
@@ -285,7 +286,7 @@ end:
DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0, ret,
- enable ? "true" : "false", refcount);
+ str_true_false(enable), refcount);
}
return ret;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 4c006ec74575..849fea580a4c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -68,7 +68,7 @@ static void dpu_encoder_phys_wb_set_ot_limit(
ot_params.num = hw_wb->idx - WB_0;
ot_params.width = phys_enc->cached_mode.hdisplay;
ot_params.height = phys_enc->cached_mode.vdisplay;
- ot_params.is_wfd = true;
+ ot_params.is_wfd = !dpu_encoder_helper_get_cwb_mask(phys_enc);
ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
ot_params.vbif_idx = hw_wb->caps->vbif_idx;
ot_params.rd = false;
@@ -111,7 +111,7 @@ static void dpu_encoder_phys_wb_set_qos_remap(
qos_params.vbif_idx = hw_wb->caps->vbif_idx;
qos_params.xin_id = hw_wb->caps->xin_id;
qos_params.num = hw_wb->idx - WB_0;
- qos_params.is_rt = false;
+ qos_params.is_rt = dpu_encoder_helper_get_cwb_mask(phys_enc);
DPU_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d is_rt:%d\n",
qos_params.num,
@@ -174,6 +174,7 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct dpu_hw_wb *hw_wb;
struct dpu_hw_wb_cfg *wb_cfg;
+ u32 cdp_usage;
if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
DPU_ERROR("invalid encoder\n");
@@ -182,6 +183,10 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
hw_wb = phys_enc->hw_wb;
wb_cfg = &wb_enc->wb_cfg;
+ if (dpu_encoder_helper_get_cwb_mask(phys_enc))
+ cdp_usage = DPU_PERF_CDP_USAGE_RT;
+ else
+ cdp_usage = DPU_PERF_CDP_USAGE_NRT;
wb_cfg->intf_mode = phys_enc->intf_mode;
wb_cfg->roi.x1 = 0;
@@ -199,7 +204,7 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
const struct dpu_perf_cfg *perf = phys_enc->dpu_kms->catalog->perf;
hw_wb->ops.setup_cdp(hw_wb, format,
- perf->cdp_cfg[DPU_PERF_CDP_USAGE_NRT].wr_enable);
+ perf->cdp_cfg[cdp_usage].wr_enable);
}
if (hw_wb->ops.setup_outaddress)
@@ -236,6 +241,7 @@ static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
intf_cfg.intf = DPU_NONE;
intf_cfg.wb = hw_wb->idx;
+ intf_cfg.cwb = dpu_encoder_helper_get_cwb_mask(phys_enc);
if (mode_3d && hw_pp && hw_pp->merge_3d)
intf_cfg.merge_3d = hw_pp->merge_3d->idx;
@@ -340,6 +346,8 @@ static void dpu_encoder_phys_wb_setup(
dpu_encoder_helper_phys_setup_cdm(phys_enc, format, CDM_CDWN_OUTPUT_WB);
+ dpu_encoder_helper_phys_setup_cwb(phys_enc, true);
+
dpu_encoder_phys_wb_setup_ctl(phys_enc);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 0b342c043875..64265ca4656a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -232,37 +232,6 @@ static const u32 rotation_v2_formats[] = {
/* TODO add formats after validation */
};
-static const u32 wb2_formats_rgb[] = {
- DRM_FORMAT_RGB565,
- DRM_FORMAT_BGR565,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGBA8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGBX8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGBA5551,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGBX5551,
- DRM_FORMAT_ARGB4444,
- DRM_FORMAT_RGBA4444,
- DRM_FORMAT_RGBX4444,
- DRM_FORMAT_XRGB4444,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_BGRA8888,
- DRM_FORMAT_BGRX8888,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_BGRA5551,
- DRM_FORMAT_XBGR1555,
- DRM_FORMAT_BGRX5551,
- DRM_FORMAT_ABGR4444,
- DRM_FORMAT_BGRA4444,
- DRM_FORMAT_BGRX4444,
- DRM_FORMAT_XBGR4444,
-};
-
static const u32 wb2_formats_rgb_yuv[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
@@ -507,7 +476,14 @@ static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
/*************************************************************
* CDM block config
*************************************************************/
-static const struct dpu_cdm_cfg sc7280_cdm = {
+static const struct dpu_cdm_cfg dpu_cdm_1_x_4_x = {
+ .name = "cdm_0",
+ .id = CDM_0,
+ .len = 0x224,
+ .base = 0x79200,
+};
+
+static const struct dpu_cdm_cfg dpu_cdm_5_x = {
.name = "cdm_0",
.id = CDM_0,
.len = 0x228,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
index ae1534c49ae0..3f88c3641d4a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -214,7 +214,9 @@ static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_
mux_cfg = DPU_REG_READ(c, CDM_MUX);
mux_cfg &= ~0xf;
- if (pp)
+ if (pp >= PINGPONG_CWB_0)
+ mux_cfg |= 0xd;
+ else if (pp)
mux_cfg |= (pp - PINGPONG_0) & 0x7;
else
mux_cfg |= 0xf;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 4893f10d6a58..411a7cf088eb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -31,12 +31,14 @@
#define CTL_MERGE_3D_ACTIVE 0x0E4
#define CTL_DSC_ACTIVE 0x0E8
#define CTL_WB_ACTIVE 0x0EC
+#define CTL_CWB_ACTIVE 0x0F0
#define CTL_INTF_ACTIVE 0x0F4
#define CTL_CDM_ACTIVE 0x0F8
#define CTL_FETCH_PIPE_ACTIVE 0x0FC
#define CTL_MERGE_3D_FLUSH 0x100
#define CTL_DSC_FLUSH 0x104
#define CTL_WB_FLUSH 0x108
+#define CTL_CWB_FLUSH 0x10C
#define CTL_INTF_FLUSH 0x110
#define CTL_CDM_FLUSH 0x114
#define CTL_PERIPH_FLUSH 0x128
@@ -53,6 +55,7 @@
#define PERIPH_IDX 30
#define INTF_IDX 31
#define WB_IDX 16
+#define CWB_IDX 28
#define DSPP_IDX 29 /* From DPU hw rev 7.x.x */
#define CTL_INVALID_BIT 0xffff
#define CTL_DEFAULT_GROUP_ID 0xf
@@ -110,6 +113,7 @@ static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
ctx->pending_flush_mask = 0x0;
ctx->pending_intf_flush_mask = 0;
ctx->pending_wb_flush_mask = 0;
+ ctx->pending_cwb_flush_mask = 0;
ctx->pending_merge_3d_flush_mask = 0;
ctx->pending_dsc_flush_mask = 0;
ctx->pending_cdm_flush_mask = 0;
@@ -144,6 +148,9 @@ static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
if (ctx->pending_flush_mask & BIT(WB_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
ctx->pending_wb_flush_mask);
+ if (ctx->pending_flush_mask & BIT(CWB_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_CWB_FLUSH,
+ ctx->pending_cwb_flush_mask);
if (ctx->pending_flush_mask & BIT(DSPP_IDX))
for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
@@ -310,6 +317,13 @@ static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= BIT(WB_IDX);
}
+static void dpu_hw_ctl_update_pending_flush_cwb_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_cwb cwb)
+{
+ ctx->pending_cwb_flush_mask |= BIT(cwb - CWB_0);
+ ctx->pending_flush_mask |= BIT(CWB_IDX);
+}
+
static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
enum dpu_intf intf)
{
@@ -547,6 +561,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 intf_active = 0;
u32 dsc_active = 0;
u32 wb_active = 0;
+ u32 cwb_active = 0;
u32 mode_sel = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
@@ -561,6 +576,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+ cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
if (cfg->intf)
@@ -569,12 +585,16 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if (cfg->wb)
wb_active |= BIT(cfg->wb - WB_0);
+ if (cfg->cwb)
+ cwb_active |= cfg->cwb;
+
if (cfg->dsc)
dsc_active |= cfg->dsc;
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+ DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
if (cfg->merge_3d)
@@ -624,6 +644,7 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
u32 wb_active = 0;
+ u32 cwb_active = 0;
u32 merge3d_active = 0;
u32 dsc_active;
u32 cdm_active;
@@ -651,6 +672,12 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
}
+ if (cfg->cwb) {
+ cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
+ cwb_active &= ~cfg->cwb;
+ DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
+ }
+
if (cfg->wb) {
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
wb_active &= ~BIT(cfg->wb - WB_0);
@@ -703,6 +730,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->update_pending_flush_merge_3d =
dpu_hw_ctl_update_pending_flush_merge_3d_v1;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
+ ops->update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1;
ops->update_pending_flush_dsc =
dpu_hw_ctl_update_pending_flush_dsc_v1;
ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 85c6c835cc87..080a9550a0cc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DPU_HW_CTL_H
@@ -42,6 +42,7 @@ struct dpu_hw_stage_cfg {
* @cdm: CDM block used
* @stream_sel: Stream selection for multi-stream interfaces
* @dsc: DSC BIT masks used
+ * @cwb: CWB BIT masks used
*/
struct dpu_hw_intf_cfg {
enum dpu_intf intf;
@@ -51,6 +52,7 @@ struct dpu_hw_intf_cfg {
enum dpu_ctl_mode_sel intf_mode_sel;
enum dpu_cdm cdm;
int stream_sel;
+ unsigned int cwb;
unsigned int dsc;
};
@@ -115,6 +117,15 @@ struct dpu_hw_ctl_ops {
enum dpu_wb blk);
/**
+ * OR in the given flushbits to the cached pending_(cwb_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : concurrent writeback block index
+ */
+ void (*update_pending_flush_cwb)(struct dpu_hw_ctl *ctx,
+ enum dpu_cwb blk);
+
+ /**
* OR in the given flushbits to the cached pending_(intf_)flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
@@ -258,6 +269,7 @@ struct dpu_hw_ctl_ops {
* @pending_flush_mask: storage for pending ctl_flush managed via ops
* @pending_intf_flush_mask: pending INTF flush
* @pending_wb_flush_mask: pending WB flush
+ * @pending_cwb_flush_mask: pending CWB flush
* @pending_dsc_flush_mask: pending DSC flush
* @pending_cdm_flush_mask: pending CDM flush
* @ops: operation list
@@ -274,6 +286,7 @@ struct dpu_hw_ctl {
u32 pending_flush_mask;
u32 pending_intf_flush_mask;
u32 pending_wb_flush_mask;
+ u32 pending_cwb_flush_mask;
u32 pending_periph_flush_mask;
u32 pending_merge_3d_flush_mask;
u32 pending_dspp_flush_mask[DSPP_MAX - DSPP_0];
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index ba7bb05efe9b..8d820cd1b554 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -77,12 +77,14 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_LM,
DPU_HW_BLK_CTL,
DPU_HW_BLK_PINGPONG,
+ DPU_HW_BLK_DCWB_PINGPONG,
DPU_HW_BLK_INTF,
DPU_HW_BLK_WB,
DPU_HW_BLK_DSPP,
DPU_HW_BLK_MERGE_3D,
DPU_HW_BLK_DSC,
DPU_HW_BLK_CDM,
+ DPU_HW_BLK_CWB,
DPU_HW_BLK_MAX,
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 97e9cb8c2b09..3305ad0623ca 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013 Red Hat
* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -446,6 +446,19 @@ static void dpu_kms_disable_commit(struct msm_kms *kms)
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
+static int dpu_kms_check_mode_changed(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
+ dpu_crtc_check_mode_changed(old_crtc_state, new_crtc_state);
+
+ return 0;
+}
+
static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -811,8 +824,11 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
return ret;
num_encoders = 0;
- drm_for_each_encoder(encoder, dev)
+ drm_for_each_encoder(encoder, dev) {
num_encoders++;
+ if (catalog->cwb_count > 0)
+ encoder->possible_clones = dpu_encoder_get_clones(encoder);
+ }
max_crtc_count = min(catalog->mixer_count, num_encoders);
@@ -1062,6 +1078,7 @@ static const struct msm_kms_funcs kms_funcs = {
.irq = dpu_core_irq,
.enable_commit = dpu_kms_enable_commit,
.disable_commit = dpu_kms_disable_commit,
+ .check_mode_changed = dpu_kms_check_mode_changed,
.flush_commit = dpu_kms_flush_commit,
.wait_flush = dpu_kms_wait_flush,
.complete_commit = dpu_kms_complete_commit,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 547cdb2c0c78..a57ec2ec1060 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -124,14 +124,15 @@ struct dpu_global_state {
struct dpu_rm *rm;
- uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
- uint32_t mixer_to_enc_id[LM_MAX - LM_0];
- uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
- uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
- uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
- uint32_t cdm_to_enc_id;
+ uint32_t pingpong_to_crtc_id[PINGPONG_MAX - PINGPONG_0];
+ uint32_t mixer_to_crtc_id[LM_MAX - LM_0];
+ uint32_t ctl_to_crtc_id[CTL_MAX - CTL_0];
+ uint32_t dspp_to_crtc_id[DSPP_MAX - DSPP_0];
+ uint32_t dsc_to_crtc_id[DSC_MAX - DSC_0];
+ uint32_t cdm_to_crtc_id;
uint32_t sspp_to_crtc_id[SSPP_MAX - SSPP_NONE];
+ uint32_t cwb_to_crtc_id[CWB_MAX - CWB_0];
};
struct dpu_global_state
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 5baf9df702b8..3efbba425ca6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -22,9 +22,9 @@
static inline bool reserved_by_other(uint32_t *res_map, int idx,
- uint32_t enc_id)
+ uint32_t crtc_id)
{
- return res_map[idx] && res_map[idx] != enc_id;
+ return res_map[idx] && res_map[idx] != crtc_id;
}
/**
@@ -233,13 +233,66 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
return -EINVAL;
}
+static int _dpu_rm_reserve_cwb_mux_and_pingpongs(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t crtc_id,
+ struct msm_display_topology *topology)
+{
+ int num_cwb_mux = topology->num_lm, cwb_mux_count = 0;
+ int cwb_pp_start_idx = PINGPONG_CWB_0 - PINGPONG_0;
+ int cwb_pp_idx[MAX_BLOCKS];
+ int cwb_mux_idx[MAX_BLOCKS];
+
+ /*
+ * Reserve additional dedicated CWB PINGPONG blocks and muxes for each
+ * mixer
+ *
+ * TODO: add support reserving resources for platforms with no
+ * PINGPONG_CWB
+ */
+ for (int i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
+ cwb_mux_count < num_cwb_mux; i++) {
+ for (int j = 0; j < ARRAY_SIZE(rm->cwb_blks); j++) {
+ /*
+ * Odd LMs must be assigned to odd CWB muxes and even
+ * LMs with even CWB muxes.
+ *
+ * Since the RM HW block array index is based on the HW
+ * block ids, we can also use the array index to enforce
+ * the odd/even rule. See dpu_rm_init() for more
+ * information
+ */
+ if (reserved_by_other(global_state->cwb_to_crtc_id, j, crtc_id) ||
+ i % 2 != j % 2)
+ continue;
+
+ cwb_mux_idx[cwb_mux_count] = j;
+ cwb_pp_idx[cwb_mux_count] = j + cwb_pp_start_idx;
+ cwb_mux_count++;
+ break;
+ }
+ }
+
+ if (cwb_mux_count != num_cwb_mux) {
+ DPU_ERROR("Unable to reserve all CWB PINGPONGs\n");
+ return -ENAVAIL;
+ }
+
+ for (int i = 0; i < cwb_mux_count; i++) {
+ global_state->pingpong_to_crtc_id[cwb_pp_idx[i]] = crtc_id;
+ global_state->cwb_to_crtc_id[cwb_mux_idx[i]] = crtc_id;
+ }
+
+ return 0;
+}
+
/**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like
* pingpong
* @rm: dpu resource manager handle
* @global_state: resources shared across multiple kms objects
- * @enc_id: encoder id requesting for allocation
+ * @crtc_id: crtc id requesting for allocation
* @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
* if lm, and all other hardwired blocks connected to the lm (pp) is
* available and appropriate
@@ -252,14 +305,14 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
+ uint32_t crtc_id, int lm_idx, int *pp_idx, int *dspp_idx,
struct msm_display_topology *topology)
{
const struct dpu_lm_cfg *lm_cfg;
int idx;
/* Already reserved? */
- if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
+ if (reserved_by_other(global_state->mixer_to_crtc_id, lm_idx, crtc_id)) {
DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
return false;
}
@@ -271,7 +324,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
return false;
}
- if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
+ if (reserved_by_other(global_state->pingpong_to_crtc_id, idx, crtc_id)) {
DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
lm_cfg->pingpong);
return false;
@@ -287,7 +340,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
return false;
}
- if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
+ if (reserved_by_other(global_state->dspp_to_crtc_id, idx, crtc_id)) {
DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
lm_cfg->dspp);
return false;
@@ -299,7 +352,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
struct msm_display_topology *topology)
{
@@ -323,7 +376,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
lm_idx[lm_count] = i;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
- enc_id, i, &pp_idx[lm_count],
+ crtc_id, i, &pp_idx[lm_count],
&dspp_idx[lm_count], topology)) {
continue;
}
@@ -342,7 +395,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
continue;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
- global_state, enc_id, j,
+ global_state, crtc_id, j,
&pp_idx[lm_count], &dspp_idx[lm_count],
topology)) {
continue;
@@ -359,12 +412,12 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
}
for (i = 0; i < lm_count; i++) {
- global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
- global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
- global_state->dspp_to_enc_id[dspp_idx[i]] =
- topology->num_dspp ? enc_id : 0;
+ global_state->mixer_to_crtc_id[lm_idx[i]] = crtc_id;
+ global_state->pingpong_to_crtc_id[pp_idx[i]] = crtc_id;
+ global_state->dspp_to_crtc_id[dspp_idx[i]] =
+ topology->num_dspp ? crtc_id : 0;
- trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
+ trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, crtc_id,
pp_idx[i] + PINGPONG_0);
}
@@ -374,15 +427,25 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
int ctl_idx[MAX_BLOCKS];
int i = 0, j, num_ctls;
bool needs_split_display;
- /* each hw_intf needs its own hw_ctrl to program its control path */
- num_ctls = top->num_intf;
+ /*
+ * For non-CWB mode, each hw_intf needs its own hw_ctl to program its
+ * control path.
+ *
+ * Hardcode num_ctls to 1 if CWB is enabled because in CWB, both the
+ * writeback and real-time encoders must be driven by the same control
+ * path
+ */
+ if (top->cwb_enabled)
+ num_ctls = 1;
+ else
+ num_ctls = top->num_intf;
needs_split_display = _dpu_rm_needs_split_display(top);
@@ -393,7 +456,7 @@ static int _dpu_rm_reserve_ctls(
if (!rm->ctl_blks[j])
continue;
- if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
+ if (reserved_by_other(global_state->ctl_to_crtc_id, j, crtc_id))
continue;
ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
@@ -417,8 +480,8 @@ static int _dpu_rm_reserve_ctls(
return -ENAVAIL;
for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
- global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
- trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
+ global_state->ctl_to_crtc_id[ctl_idx[i]] = crtc_id;
+ trace_dpu_rm_reserve_ctls(i + CTL_0, crtc_id);
}
return 0;
@@ -426,12 +489,12 @@ static int _dpu_rm_reserve_ctls(
static int _dpu_rm_pingpong_next_index(struct dpu_global_state *global_state,
int start,
- uint32_t enc_id)
+ uint32_t crtc_id)
{
int i;
for (i = start; i < (PINGPONG_MAX - PINGPONG_0); i++) {
- if (global_state->pingpong_to_enc_id[i] == enc_id)
+ if (global_state->pingpong_to_crtc_id[i] == crtc_id)
return i;
}
@@ -452,7 +515,7 @@ static int _dpu_rm_pingpong_dsc_check(int dsc_idx, int pp_idx)
static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
int num_dsc = 0;
@@ -465,10 +528,10 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
if (!rm->dsc_blks[dsc_idx])
continue;
- if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id))
+ if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id))
continue;
- pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id);
if (pp_idx < 0)
return -ENAVAIL;
@@ -476,7 +539,7 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
if (ret)
return -ENAVAIL;
- global_state->dsc_to_enc_id[dsc_idx] = enc_id;
+ global_state->dsc_to_crtc_id[dsc_idx] = crtc_id;
num_dsc++;
pp_idx++;
}
@@ -492,7 +555,7 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
int num_dsc = 0;
@@ -507,11 +570,11 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
continue;
/* consective dsc index to be paired */
- if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id) ||
- reserved_by_other(global_state->dsc_to_enc_id, dsc_idx + 1, enc_id))
+ if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id) ||
+ reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx + 1, crtc_id))
continue;
- pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id);
if (pp_idx < 0)
return -ENAVAIL;
@@ -521,7 +584,7 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
continue;
}
- pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, enc_id);
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, crtc_id);
if (pp_idx < 0)
return -ENAVAIL;
@@ -531,8 +594,8 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
continue;
}
- global_state->dsc_to_enc_id[dsc_idx] = enc_id;
- global_state->dsc_to_enc_id[dsc_idx + 1] = enc_id;
+ global_state->dsc_to_crtc_id[dsc_idx] = crtc_id;
+ global_state->dsc_to_crtc_id[dsc_idx + 1] = crtc_id;
num_dsc += 2;
pp_idx++; /* start for next pair */
}
@@ -548,11 +611,9 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
- uint32_t enc_id = enc->base.id;
-
if (!top->num_dsc || !top->num_intf)
return 0;
@@ -568,16 +629,17 @@ static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
/* num_dsc should be either 1, 2 or 4 */
if (top->num_dsc > top->num_intf) /* merge mode */
- return _dpu_rm_dsc_alloc_pair(rm, global_state, enc_id, top);
+ return _dpu_rm_dsc_alloc_pair(rm, global_state, crtc_id, top);
else
- return _dpu_rm_dsc_alloc(rm, global_state, enc_id, top);
+ return _dpu_rm_dsc_alloc(rm, global_state, crtc_id, top);
return 0;
}
static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc)
+ uint32_t crtc_id,
+ int num_cdm)
{
/* try allocating only one CDM block */
if (!rm->cdm_blk) {
@@ -585,12 +647,17 @@ static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
return -EIO;
}
- if (global_state->cdm_to_enc_id) {
+ if (num_cdm > 1) {
+ DPU_ERROR("More than 1 INTF requesting CDM\n");
+ return -EINVAL;
+ }
+
+ if (global_state->cdm_to_crtc_id) {
DPU_ERROR("CDM_0 is already allocated\n");
return -EIO;
}
- global_state->cdm_to_enc_id = enc->base.id;
+ global_state->cdm_to_crtc_id = crtc_id;
return 0;
}
@@ -598,30 +665,37 @@ static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc,
+ uint32_t crtc_id,
struct msm_display_topology *topology)
{
int ret;
- ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, topology);
+ ret = _dpu_rm_reserve_lms(rm, global_state, crtc_id, topology);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
- ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
+ if (topology->cwb_enabled) {
+ ret = _dpu_rm_reserve_cwb_mux_and_pingpongs(rm, global_state,
+ crtc_id, topology);
+ if (ret)
+ return ret;
+ }
+
+ ret = _dpu_rm_reserve_ctls(rm, global_state, crtc_id,
topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_dsc(rm, global_state, enc, topology);
+ ret = _dpu_rm_reserve_dsc(rm, global_state, crtc_id, topology);
if (ret)
return ret;
- if (topology->needs_cdm) {
- ret = _dpu_rm_reserve_cdm(rm, global_state, enc);
+ if (topology->num_cdm > 0) {
+ ret = _dpu_rm_reserve_cdm(rm, global_state, crtc_id, topology->num_cdm);
if (ret) {
DPU_ERROR("unable to find CDM blk\n");
return ret;
@@ -632,12 +706,12 @@ static int _dpu_rm_make_reservation(
}
static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
- uint32_t enc_id)
+ uint32_t crtc_id)
{
int i;
for (i = 0; i < cnt; i++) {
- if (res_mapping[i] == enc_id)
+ if (res_mapping[i] == crtc_id)
res_mapping[i] = 0;
}
}
@@ -646,23 +720,27 @@ static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
* dpu_rm_release - Given the encoder for the display chain, release any
* HW blocks previously reserved for that use case.
* @global_state: resources shared across multiple kms objects
- * @enc: DRM Encoder handle
+ * @crtc: DRM CRTC handle
* @return: 0 on Success otherwise -ERROR
*/
void dpu_rm_release(struct dpu_global_state *global_state,
- struct drm_encoder *enc)
+ struct drm_crtc *crtc)
{
- _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
- ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
- ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
- ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
- ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
- ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id);
+ uint32_t crtc_id = crtc->base.id;
+
+ _dpu_rm_clear_mapping(global_state->pingpong_to_crtc_id,
+ ARRAY_SIZE(global_state->pingpong_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->mixer_to_crtc_id,
+ ARRAY_SIZE(global_state->mixer_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->ctl_to_crtc_id,
+ ARRAY_SIZE(global_state->ctl_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->dsc_to_crtc_id,
+ ARRAY_SIZE(global_state->dsc_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->dspp_to_crtc_id,
+ ARRAY_SIZE(global_state->dspp_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(&global_state->cdm_to_crtc_id, 1, crtc_id);
+ _dpu_rm_clear_mapping(global_state->cwb_to_crtc_id,
+ ARRAY_SIZE(global_state->cwb_to_crtc_id), crtc_id);
}
/**
@@ -674,42 +752,33 @@ void dpu_rm_release(struct dpu_global_state *global_state,
* HW Reservations should be released via dpu_rm_release_hw.
* @rm: DPU Resource Manager handle
* @global_state: resources shared across multiple kms objects
- * @enc: DRM Encoder handle
- * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @crtc: DRM CRTC handle
* @topology: Pointer to topology info for the display
* @return: 0 on Success otherwise -ERROR
*/
int dpu_rm_reserve(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
+ struct drm_crtc *crtc,
struct msm_display_topology *topology)
{
int ret;
- /* Check if this is just a page-flip */
- if (!drm_atomic_crtc_needs_modeset(crtc_state))
- return 0;
-
if (IS_ERR(global_state)) {
DPU_ERROR("failed to global state\n");
return PTR_ERR(global_state);
}
- DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
- enc->base.id, crtc_state->crtc->base.id);
+ DRM_DEBUG_KMS("reserving hw for crtc %d\n", crtc->base.id);
DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
topology->num_lm, topology->num_dsc,
topology->num_intf);
- ret = _dpu_rm_make_reservation(rm, global_state, enc, topology);
+ ret = _dpu_rm_make_reservation(rm, global_state, crtc->base.id, topology);
if (ret)
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
-
-
return ret;
}
@@ -800,50 +869,57 @@ void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
* assigned to this encoder
* @rm: DPU Resource Manager handle
* @global_state: resources shared across multiple kms objects
- * @enc_id: encoder id requesting for allocation
+ * @crtc: DRM CRTC handle
* @type: resource type to return data for
* @blks: pointer to the array to be filled by HW resources
* @blks_size: size of the @blks array
*/
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
- struct dpu_global_state *global_state, uint32_t enc_id,
+ struct dpu_global_state *global_state, struct drm_crtc *crtc,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
{
+ uint32_t crtc_id = crtc->base.id;
struct dpu_hw_blk **hw_blks;
- uint32_t *hw_to_enc_id;
+ uint32_t *hw_to_crtc_id;
int i, num_blks, max_blks;
switch (type) {
case DPU_HW_BLK_PINGPONG:
+ case DPU_HW_BLK_DCWB_PINGPONG:
hw_blks = rm->pingpong_blks;
- hw_to_enc_id = global_state->pingpong_to_enc_id;
+ hw_to_crtc_id = global_state->pingpong_to_crtc_id;
max_blks = ARRAY_SIZE(rm->pingpong_blks);
break;
case DPU_HW_BLK_LM:
hw_blks = rm->mixer_blks;
- hw_to_enc_id = global_state->mixer_to_enc_id;
+ hw_to_crtc_id = global_state->mixer_to_crtc_id;
max_blks = ARRAY_SIZE(rm->mixer_blks);
break;
case DPU_HW_BLK_CTL:
hw_blks = rm->ctl_blks;
- hw_to_enc_id = global_state->ctl_to_enc_id;
+ hw_to_crtc_id = global_state->ctl_to_crtc_id;
max_blks = ARRAY_SIZE(rm->ctl_blks);
break;
case DPU_HW_BLK_DSPP:
hw_blks = rm->dspp_blks;
- hw_to_enc_id = global_state->dspp_to_enc_id;
+ hw_to_crtc_id = global_state->dspp_to_crtc_id;
max_blks = ARRAY_SIZE(rm->dspp_blks);
break;
case DPU_HW_BLK_DSC:
hw_blks = rm->dsc_blks;
- hw_to_enc_id = global_state->dsc_to_enc_id;
+ hw_to_crtc_id = global_state->dsc_to_crtc_id;
max_blks = ARRAY_SIZE(rm->dsc_blks);
break;
case DPU_HW_BLK_CDM:
hw_blks = &rm->cdm_blk;
- hw_to_enc_id = &global_state->cdm_to_enc_id;
+ hw_to_crtc_id = &global_state->cdm_to_crtc_id;
max_blks = 1;
break;
+ case DPU_HW_BLK_CWB:
+ hw_blks = rm->cwb_blks;
+ hw_to_crtc_id = global_state->cwb_to_crtc_id;
+ max_blks = ARRAY_SIZE(rm->cwb_blks);
+ break;
default:
DPU_ERROR("blk type %d not managed by rm\n", type);
return 0;
@@ -851,17 +927,31 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
num_blks = 0;
for (i = 0; i < max_blks; i++) {
- if (hw_to_enc_id[i] != enc_id)
+ if (hw_to_crtc_id[i] != crtc_id)
continue;
+ if (type == DPU_HW_BLK_PINGPONG) {
+ struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]);
+
+ if (pp->idx >= PINGPONG_CWB_0)
+ continue;
+ }
+
+ if (type == DPU_HW_BLK_DCWB_PINGPONG) {
+ struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]);
+
+ if (pp->idx < PINGPONG_CWB_0)
+ continue;
+ }
+
if (num_blks == blks_size) {
- DPU_ERROR("More than %d resources assigned to enc %d\n",
- blks_size, enc_id);
+ DPU_ERROR("More than %d resources assigned to crtc %d\n",
+ blks_size, crtc_id);
break;
}
if (!hw_blks[i]) {
- DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
- type, enc_id);
+ DPU_ERROR("Allocated resource %d unavailable to assign to crtc %d\n",
+ type, crtc_id);
break;
}
blks[num_blks++] = hw_blks[i];
@@ -896,38 +986,38 @@ void dpu_rm_print_state(struct drm_printer *p,
drm_puts(p, "resource mapping:\n");
drm_puts(p, "\tpingpong=");
- for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->pingpong_blks[i],
- global_state->pingpong_to_enc_id[i]);
+ global_state->pingpong_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tmixer=");
- for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->mixer_blks[i],
- global_state->mixer_to_enc_id[i]);
+ global_state->mixer_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tctl=");
- for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->ctl_blks[i],
- global_state->ctl_to_enc_id[i]);
+ global_state->ctl_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tdspp=");
- for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->dspp_blks[i],
- global_state->dspp_to_enc_id[i]);
+ global_state->dspp_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tdsc=");
- for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->dsc_blks[i],
- global_state->dsc_to_enc_id[i]);
+ global_state->dsc_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tcdm=");
dpu_rm_print_state_helper(p, rm->cdm_blk,
- global_state->cdm_to_enc_id);
+ global_state->cdm_to_crtc_id);
drm_puts(p, "\n");
drm_puts(p, "\tsspp=");
@@ -936,4 +1026,10 @@ void dpu_rm_print_state(struct drm_printer *p,
dpu_rm_print_state_helper(p, rm->hw_sspp[i] ? &rm->hw_sspp[i]->base : NULL,
global_state->sspp_to_crtc_id[i]);
drm_puts(p, "\n");
+
+ drm_puts(p, "\tcwb=");
+ for (i = 0; i < ARRAY_SIZE(global_state->cwb_to_crtc_id); i++)
+ dpu_rm_print_state_helper(p, rm->cwb_blks[i],
+ global_state->cwb_to_crtc_id[i]);
+ drm_puts(p, "\n");
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 99bd594ee0d1..a19dbdb1b6f4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -51,14 +51,17 @@ struct dpu_rm_sspp_requirements {
* @num_intf: number of interfaces the panel is mounted on
* @num_dspp: number of dspp blocks used
* @num_dsc: number of Display Stream Compression (DSC) blocks used
- * @needs_cdm: indicates whether cdm block is needed for this display topology
+ * @num_cdm: indicates how many outputs are requesting cdm block for
+ * this display topology
+ * @cwb_enabled: indicates whether CWB is enabled for this display topology
*/
struct msm_display_topology {
u32 num_lm;
u32 num_intf;
u32 num_dspp;
u32 num_dsc;
- bool needs_cdm;
+ int num_cdm;
+ bool cwb_enabled;
};
int dpu_rm_init(struct drm_device *dev,
@@ -69,12 +72,11 @@ int dpu_rm_init(struct drm_device *dev,
int dpu_rm_reserve(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *drm_enc,
- struct drm_crtc_state *crtc_state,
+ struct drm_crtc *crtc,
struct msm_display_topology *topology);
void dpu_rm_release(struct dpu_global_state *global_state,
- struct drm_encoder *enc);
+ struct drm_crtc *crtc);
struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
struct dpu_global_state *global_state,
@@ -85,7 +87,7 @@ void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
struct drm_crtc *crtc);
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
- struct dpu_global_state *global_state, uint32_t enc_id,
+ struct dpu_global_state *global_state, struct drm_crtc *crtc,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
void dpu_rm_print_state(struct drm_printer *p,
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index 7444b75c4215..52e728181b52 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -58,7 +58,7 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index 666de99a46a5..fc183fe37f56 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -3,6 +3,7 @@
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*/
+#include <linux/string_choices.h>
#include "mdp5_kms.h"
#include "mdp5_ctl.h"
@@ -233,7 +234,7 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
return -EINVAL;
ctl->encoder_enabled = enabled;
- DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
+ DBG("intf_%d: %s", intf->num, str_on_off(enabled));
if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 62de248ed1b0..bb1601921938 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -368,7 +368,7 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
}
static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state, bool flip)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 9c463ae2f8fa..d8633a596f8d 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -11,6 +11,7 @@
#include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h>
#include <linux/pm_opp.h>
+#include <linux/string_choices.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_fixed.h>
@@ -1366,9 +1367,9 @@ int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "enable core clocks \n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
return 0;
}
@@ -1385,9 +1386,9 @@ void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "disable core clocks \n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
}
static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
@@ -1416,9 +1417,9 @@ static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "enable link clocks\n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
return 0;
}
@@ -1435,9 +1436,9 @@ static void msm_dp_ctrl_link_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "disabled link clocks\n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
}
static int msm_dp_ctrl_enable_mainlink_clocks(struct msm_dp_ctrl_private *ctrl)
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 3898850739ab..bbc47d86ae9e 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -11,6 +11,7 @@
#include <linux/of_irq.h>
#include <linux/phy/phy.h>
#include <linux/delay.h>
+#include <linux/string_choices.h>
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/drm_edid.h>
@@ -343,8 +344,7 @@ static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *d
{
if ((hpd && dp->msm_dp_display.link_ready) ||
(!hpd && !dp->msm_dp_display.link_ready)) {
- drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
- (hpd ? "on" : "off"));
+ drm_dbg_dp(dp->drm_dev, "HPD already %s\n", str_on_off(hpd));
return 0;
}
@@ -367,6 +367,19 @@ static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *d
return 0;
}
+static void msm_dp_display_lttpr_init(struct msm_dp_display_private *dp)
+{
+ u8 lttpr_caps[DP_LTTPR_COMMON_CAP_SIZE];
+ int rc;
+
+ if (drm_dp_read_lttpr_common_caps(dp->aux, dp->panel->dpcd, lttpr_caps))
+ return;
+
+ rc = drm_dp_lttpr_init(dp->aux, drm_dp_lttpr_count(lttpr_caps));
+ if (rc)
+ DRM_ERROR("failed to set LTTPRs transparency mode, rc=%d\n", rc);
+}
+
static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
{
struct drm_connector *connector = dp->msm_dp_display.connector;
@@ -377,6 +390,8 @@ static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
if (rc)
goto end;
+ msm_dp_display_lttpr_init(dp);
+
msm_dp_link_process_request(dp->link);
if (!dp->msm_dp_display.is_edp)
@@ -1492,13 +1507,13 @@ int msm_dp_modeset_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
}
void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
int rc = 0;
struct msm_dp_display_private *msm_dp_display;
- u32 state;
+ u32 hpd_state;
bool force_link_train = false;
msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
@@ -1517,8 +1532,8 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
return;
}
- state = msm_dp_display->hpd_state;
- if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) {
+ hpd_state = msm_dp_display->hpd_state;
+ if (hpd_state != ST_DISPLAY_OFF && hpd_state != ST_MAINLINK_READY) {
mutex_unlock(&msm_dp_display->event_mutex);
return;
}
@@ -1530,9 +1545,9 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
return;
}
- state = msm_dp_display->hpd_state;
+ hpd_state = msm_dp_display->hpd_state;
- if (state == ST_DISPLAY_OFF) {
+ if (hpd_state == ST_DISPLAY_OFF) {
msm_dp_display_host_phy_init(msm_dp_display);
force_link_train = true;
}
@@ -1553,7 +1568,7 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
}
void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
@@ -1565,11 +1580,11 @@ void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
}
void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
- u32 state;
+ u32 hpd_state;
struct msm_dp_display_private *msm_dp_display;
msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
@@ -1579,15 +1594,15 @@ void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
mutex_lock(&msm_dp_display->event_mutex);
- state = msm_dp_display->hpd_state;
- if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED)
+ hpd_state = msm_dp_display->hpd_state;
+ if (hpd_state != ST_DISCONNECT_PENDING && hpd_state != ST_CONNECTED)
drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n",
- dp->connector_type, state);
+ dp->connector_type, hpd_state);
msm_dp_display_disable(msm_dp_display);
- state = msm_dp_display->hpd_state;
- if (state == ST_DISCONNECT_PENDING) {
+ hpd_state = msm_dp_display->hpd_state;
+ if (hpd_state == ST_DISCONNECT_PENDING) {
/* completed disconnection */
msm_dp_display->hpd_state = ST_DISCONNECTED;
} else {
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 16b7913d1eef..cca57e56c906 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
+#include <linux/string_choices.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_bridge.h>
@@ -25,7 +26,7 @@ static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge)
dp = to_dp_bridge(bridge)->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
- (dp->link_ready) ? "true" : "false");
+ str_true_false(dp->link_ready));
return (dp->link_ready) ? connector_status_connected :
connector_status_disconnected;
@@ -41,7 +42,7 @@ static int msm_dp_bridge_atomic_check(struct drm_bridge *bridge,
dp = to_dp_bridge(bridge)->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
- (dp->link_ready) ? "true" : "false");
+ str_true_false(dp->link_ready));
/*
* There is no protection in the DRM framework to check if the display
@@ -137,9 +138,8 @@ static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
}
static void msm_edp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
@@ -151,25 +151,24 @@ static void msm_edp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
* If the panel is in psr, just exit psr state and skip the full
* bridge enable sequence.
*/
- crtc = drm_atomic_get_new_crtc_for_encoder(atomic_state,
+ crtc = drm_atomic_get_new_crtc_for_encoder(state,
drm_bridge->encoder);
if (!crtc)
return;
- old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
if (old_crtc_state && old_crtc_state->self_refresh_active) {
msm_dp_display_set_psr(dp, false);
return;
}
- msm_dp_bridge_atomic_enable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_enable(drm_bridge, state);
}
static void msm_edp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL, *old_crtc_state = NULL;
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
@@ -208,13 +207,12 @@ static void msm_edp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
}
out:
- msm_dp_bridge_atomic_disable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_disable(drm_bridge, atomic_state);
}
static void msm_edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL;
@@ -233,7 +231,7 @@ static void msm_edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
if (new_crtc_state->self_refresh_active)
return;
- msm_dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_post_disable(drm_bridge, atomic_state);
}
/**
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index 8eae2f74839f..d8c9b905f8bf 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -26,11 +26,11 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
bool yuv_supported);
void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 007311c21fda..4d75529c0e85 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -179,18 +179,18 @@ struct msm_dsi_host {
int irq;
};
-
static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
{
return readl(msm_host->ctrl_base + reg);
}
+
static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
{
writel(data, msm_host->ctrl_base + reg);
}
-static const struct msm_dsi_cfg_handler *dsi_get_config(
- struct msm_dsi_host *msm_host)
+static const struct msm_dsi_cfg_handler *
+dsi_get_config(struct msm_dsi_host *msm_host)
{
const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
struct device *dev = &msm_host->pdev->dev;
@@ -200,7 +200,8 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
ahb_clk = msm_clk_get(msm_host->pdev, "iface");
if (IS_ERR(ahb_clk)) {
- pr_err("%s: cannot get interface clock\n", __func__);
+ dev_err_probe(dev, PTR_ERR(ahb_clk), "%s: cannot get interface clock\n",
+ __func__);
goto exit;
}
@@ -208,13 +209,13 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
ret = clk_prepare_enable(ahb_clk);
if (ret) {
- pr_err("%s: unable to enable ahb_clk\n", __func__);
+ dev_err_probe(dev, ret, "%s: unable to enable ahb_clk\n", __func__);
goto runtime_put;
}
ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
if (ret) {
- pr_err("%s: Invalid version\n", __func__);
+ dev_err_probe(dev, ret, "%s: Invalid version\n", __func__);
goto disable_clks;
}
@@ -281,42 +282,31 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->num_bus_clks = cfg->num_bus_clks;
ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to get clocks, ret = %d\n", ret);
- goto exit;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Unable to get clocks\n");
/* get link and source clocks */
msm_host->byte_clk = msm_clk_get(pdev, "byte");
- if (IS_ERR(msm_host->byte_clk)) {
- ret = PTR_ERR(msm_host->byte_clk);
- pr_err("%s: can't find dsi_byte clock. ret=%d\n",
- __func__, ret);
- msm_host->byte_clk = NULL;
- goto exit;
- }
+ if (IS_ERR(msm_host->byte_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->byte_clk),
+ "%s: can't find dsi_byte clock\n",
+ __func__);
msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
- if (IS_ERR(msm_host->pixel_clk)) {
- ret = PTR_ERR(msm_host->pixel_clk);
- pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
- __func__, ret);
- msm_host->pixel_clk = NULL;
- goto exit;
- }
+ if (IS_ERR(msm_host->pixel_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->pixel_clk),
+ "%s: can't find dsi_pixel clock\n",
+ __func__);
msm_host->esc_clk = msm_clk_get(pdev, "core");
- if (IS_ERR(msm_host->esc_clk)) {
- ret = PTR_ERR(msm_host->esc_clk);
- pr_err("%s: can't find dsi_esc clock. ret=%d\n",
- __func__, ret);
- msm_host->esc_clk = NULL;
- goto exit;
- }
+ if (IS_ERR(msm_host->esc_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->esc_clk),
+ "%s: can't find dsi_esc clock\n",
+ __func__);
if (cfg_hnd->ops->clk_init_ver)
ret = cfg_hnd->ops->clk_init_ver(msm_host);
-exit:
+
return ret;
}
@@ -380,7 +370,6 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
return 0;
}
-
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -598,7 +587,6 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate,
msm_host->byte_clk_rate);
-
}
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
@@ -687,8 +675,8 @@ static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
return NON_BURST_SYNCH_EVENT;
}
-static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
- const enum mipi_dsi_pixel_format mipi_fmt)
+static inline enum dsi_vid_dst_format
+dsi_get_vid_fmt(const enum mipi_dsi_pixel_format mipi_fmt)
{
switch (mipi_fmt) {
case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
@@ -699,8 +687,8 @@ static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
}
}
-static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
- const enum mipi_dsi_pixel_format mipi_fmt)
+static inline enum dsi_cmd_dst_format
+dsi_get_cmd_fmt(const enum mipi_dsi_pixel_format mipi_fmt)
{
switch (mipi_fmt) {
case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
@@ -846,7 +834,7 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host,
dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0));
}
-static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
+static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode)
{
struct drm_dsc_config *dsc = msm_host->dsc;
u32 reg, reg_ctrl, reg_ctrl2;
@@ -858,7 +846,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
/* first calculate dsc parameters and then program
* compress mode registers
*/
- slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay);
+ slice_per_intf = dsc->slice_count;
total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */
@@ -991,7 +979,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
if (msm_host->dsc)
- dsi_update_dsc_timing(msm_host, false, mode->hdisplay);
+ dsi_update_dsc_timing(msm_host, false);
dsi_write(msm_host, REG_DSI_ACTIVE_H,
DSI_ACTIVE_H_START(ha_start) |
@@ -1012,7 +1000,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
} else { /* command mode */
if (msm_host->dsc)
- dsi_update_dsc_timing(msm_host, true, mode->hdisplay);
+ dsi_update_dsc_timing(msm_host, true);
/* image data and 1 byte write_memory_start cmd */
if (!msm_host->dsc)
@@ -1292,14 +1280,15 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
{
u8 *data = msg->rx_buf;
+
if (data && (msg->rx_len >= 1)) {
*data = buf[1]; /* strip out dcs type */
return 1;
- } else {
- pr_err("%s: read data does not match with rx_buf len %zu\n",
- __func__, msg->rx_len);
- return -EINVAL;
}
+
+ pr_err("%s: read data does not match with rx_buf len %zu\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
}
/*
@@ -1308,15 +1297,16 @@ static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
{
u8 *data = msg->rx_buf;
+
if (data && (msg->rx_len >= 2)) {
data[0] = buf[1]; /* strip out dcs type */
data[1] = buf[2];
return 2;
- } else {
- pr_err("%s: read data does not match with rx_buf len %zu\n",
- __func__, msg->rx_len);
- return -EINVAL;
}
+
+ pr_err("%s: read data does not match with rx_buf len %zu\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
}
static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
@@ -1376,8 +1366,9 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
ret = -ETIMEDOUT;
else
ret = len;
- } else
+ } else {
ret = len;
+ }
return ret;
}
@@ -1445,11 +1436,12 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
return len;
}
- /* for video mode, do not send cmds more than
- * one pixel line, since it only transmit it
- * during BLLP.
- */
- /* TODO: if the command is sent in LP mode, the bit rate is only
+ /*
+ * for video mode, do not send cmds more than
+ * one pixel line, since it only transmit it
+ * during BLLP.
+ *
+ * TODO: if the command is sent in LP mode, the bit rate is only
* half of esc clk rate. In this case, if the video is already
* actively streaming, we need to check more carefully if the
* command can be fit into one BLLP.
@@ -1767,8 +1759,20 @@ static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc
return -EINVAL;
}
- if (dsc->bits_per_component != 8) {
- DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support bits_per_component != 8 yet\n");
+ switch (dsc->bits_per_component) {
+ case 8:
+ case 10:
+ case 12:
+ /*
+ * Only 8, 10, and 12 bpc are supported for DSC 1.1 block.
+ * If additional bpc values need to be supported, update
+ * this quard with the appropriate DSC version verification.
+ */
+ break;
+ default:
+ DRM_DEV_ERROR(&msm_host->pdev->dev,
+ "Unsupported bits_per_component value: %d\n",
+ dsc->bits_per_component);
return -EOPNOTSUPP;
}
@@ -1779,7 +1783,7 @@ static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc
drm_dsc_set_const_params(dsc);
drm_dsc_set_rc_buf_thresh(dsc);
- /* handle only bpp = bpc = 8, pre-SCR panels */
+ /* DPU supports only pre-SCR panels */
ret = drm_dsc_setup_rc_params(dsc, DRM_DSC_1_1_PRE_SCR);
if (ret) {
DRM_DEV_ERROR(&msm_host->pdev->dev, "could not find DSC RC parameters\n");
@@ -1827,8 +1831,15 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
__func__, ret);
goto err;
}
- if (!ret)
+ if (!ret) {
msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL);
+ if (!msm_dsi->te_source) {
+ DRM_DEV_ERROR(dev, "%s: failed to allocate te_source\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
ret = 0;
if (of_property_present(np, "syscon-sfpb")) {
@@ -1874,39 +1885,35 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
int ret;
msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
- if (!msm_host) {
+ if (!msm_host)
return -ENOMEM;
- }
msm_host->pdev = pdev;
msm_dsi->host = &msm_host->base;
ret = dsi_host_parse_dt(msm_host);
- if (ret) {
- pr_err("%s: failed to parse dt\n", __func__);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "%s: failed to parse dt\n",
+ __func__);
msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", &msm_host->ctrl_size);
- if (IS_ERR(msm_host->ctrl_base)) {
- pr_err("%s: unable to map Dsi ctrl base\n", __func__);
- return PTR_ERR(msm_host->ctrl_base);
- }
+ if (IS_ERR(msm_host->ctrl_base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->ctrl_base),
+ "%s: unable to map Dsi ctrl base\n", __func__);
pm_runtime_enable(&pdev->dev);
msm_host->cfg_hnd = dsi_get_config(msm_host);
- if (!msm_host->cfg_hnd) {
- pr_err("%s: get config failed\n", __func__);
- return -EINVAL;
- }
+ if (!msm_host->cfg_hnd)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "%s: get config failed\n", __func__);
cfg = msm_host->cfg_hnd->cfg;
msm_host->id = dsi_host_get_id(msm_host);
- if (msm_host->id < 0) {
- pr_err("%s: unable to identify DSI host index\n", __func__);
- return msm_host->id;
- }
+ if (msm_host->id < 0)
+ return dev_err_probe(&pdev->dev, msm_host->id,
+ "%s: unable to identify DSI host index\n",
+ __func__);
/* fixup base address by io offset */
msm_host->ctrl_base += cfg->io_offset;
@@ -1918,42 +1925,32 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
return ret;
ret = dsi_clk_init(msm_host);
- if (ret) {
- pr_err("%s: unable to initialize dsi clks\n", __func__);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "%s: unable to initialize dsi clks\n", __func__);
msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
- if (!msm_host->rx_buf) {
- pr_err("%s: alloc rx temp buf failed\n", __func__);
+ if (!msm_host->rx_buf)
return -ENOMEM;
- }
ret = devm_pm_opp_set_clkname(&pdev->dev, "byte");
if (ret)
return ret;
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(&pdev->dev);
- if (ret && ret != -ENODEV) {
- dev_err(&pdev->dev, "invalid OPP table in device tree\n");
- return ret;
- }
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(&pdev->dev, ret, "invalid OPP table in device tree\n");
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (!msm_host->irq) {
- dev_err(&pdev->dev, "failed to get irq\n");
- return -EINVAL;
- }
+ if (!msm_host->irq)
+ return dev_err_probe(&pdev->dev, -EINVAL, "failed to get irq\n");
/* do not autoenable, will be enabled later */
ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq,
IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN,
"dsi_isr", msm_host);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
- msm_host->irq, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "failed to request IRQ%u\n",
+ msm_host->irq);
init_completion(&msm_host->dma_comp);
init_completion(&msm_host->video_comp);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index a210b7c9e5ca..4fabb01345aa 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -74,17 +74,35 @@ static int dsi_mgr_setup_components(int id)
int ret;
if (!IS_BONDED_DSI()) {
+ /*
+ * Set the usecase before calling msm_dsi_host_register(), which would
+ * already program the PLL source mux based on a default usecase.
+ */
+ msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+
ret = msm_dsi_host_register(msm_dsi->host);
if (ret)
return ret;
-
- msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
- msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
} else if (other_dsi) {
struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
msm_dsi : other_dsi;
struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
other_dsi : msm_dsi;
+
+ /*
+ * PLL0 is to drive both DSI link clocks in bonded DSI mode.
+ *
+ * Set the usecase before calling msm_dsi_host_register(), which would
+ * already program the PLL source mux based on a default usecase.
+ */
+ msm_dsi_phy_set_usecase(clk_master_dsi->phy,
+ MSM_DSI_PHY_MASTER);
+ msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
+ MSM_DSI_PHY_SLAVE);
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+ msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
+
/* Register slave host first, so that slave DSI device
* has a chance to probe, and do not block the master
* DSI device's probe.
@@ -98,14 +116,6 @@ static int dsi_mgr_setup_components(int id)
ret = msm_dsi_host_register(master_link_dsi->host);
if (ret)
return ret;
-
- /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */
- msm_dsi_phy_set_usecase(clk_master_dsi->phy,
- MSM_DSI_PHY_MASTER);
- msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
- MSM_DSI_PHY_SLAVE);
- msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
- msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
}
return 0;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 8985818bb2e0..1925418d9999 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -6,6 +6,7 @@
#ifndef __DSI_PHY_H__
#define __DSI_PHY_H__
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
@@ -84,9 +85,7 @@ struct msm_dsi_dphy_timing {
u8 hs_halfbyte_en_ckln;
};
-#define DSI_BYTE_PLL_CLK 0
-#define DSI_PIXEL_PLL_CLK 1
-#define NUM_PROVIDED_CLKS 2
+#define NUM_PROVIDED_CLKS (DSI_PIXEL_PLL_CLK + 1)
#define DSI_LANE_MAX 5
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 677c62571811..9812b4d69197 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018, The Linux Foundation
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/iopoll.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 2c3cbe0f2870..3a1c8ece6657 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 1383e3a4e050..90348a2af3e9 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 5311ab7f3c70..f3643320ff2f 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -3,6 +3,7 @@
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 798168180c1a..a92decbee5b5 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -3,6 +3,8 @@
* Copyright (c) 2018, The Linux Foundation
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/iopoll.h>
@@ -305,7 +307,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi
writel(pll->phy->cphy_mode ? 0x00 : 0x10,
base + REG_DSI_7nm_PHY_PLL_CMODE_1);
writel(config->pll_clock_inverters,
- base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS);
+ base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1);
}
static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -572,11 +574,11 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
- cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
- cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+ cached->bit_clk_div = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK, cmn_clk_cfg0);
+ cached->pix_clk_div = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK, cmn_clk_cfg0);
cmn_clk_cfg1 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- cached->pll_mux = cmn_clk_cfg1 & 0x3;
+ cached->pll_mux = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK, cmn_clk_cfg1);
DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
@@ -598,7 +600,8 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
dsi_pll_cmn_clk_cfg0_write(pll_7nm,
DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) |
DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div));
- dsi_pll_cmn_clk_cfg1_update(pll_7nm, 0x3, cached->pll_mux);
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK,
+ cached->pll_mux);
ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
pll_7nm->vco_current_rate,
@@ -736,11 +739,9 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
* don't register a pclk_mux clock and just use post_out_div instead
*/
if (pll_7nm->phy->cphy_mode) {
- u32 data;
-
- data = readl(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- writel(data | 3, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
-
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm,
+ DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK,
+ DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL(3));
phy_pll_out_dsi_parent = pll_post_out_div;
} else {
snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 37b3809c6bdd..248541ff4492 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -12,8 +12,8 @@
#include <drm/drm_bridge_connector.h>
#include <drm/drm_of.h>
+#include <drm/display/drm_hdmi_state_helper.h>
-#include <sound/hdmi-codec.h>
#include "hdmi.h"
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -24,7 +24,7 @@ void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
spin_lock_irqsave(&hdmi->reg_lock, flags);
if (power_on) {
ctrl |= HDMI_CTRL_ENABLE;
- if (!hdmi->hdmi_mode) {
+ if (!hdmi->connector->display_info.is_hdmi) {
ctrl |= HDMI_CTRL_HDMI;
hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
ctrl &= ~HDMI_CTRL_HDMI;
@@ -165,8 +165,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
hdmi->dev = dev;
hdmi->encoder = encoder;
- hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
-
ret = msm_hdmi_bridge_init(hdmi);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
@@ -246,111 +244,6 @@ static const struct hdmi_platform_config hdmi_tx_8974_config = {
.hpd_freq = hpd_clk_freq_8x74,
};
-/*
- * HDMI audio codec callbacks
- */
-static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
-{
- struct hdmi *hdmi = dev_get_drvdata(dev);
- unsigned int chan;
- unsigned int channel_allocation = 0;
- unsigned int rate;
- unsigned int level_shift = 0; /* 0dB */
- bool down_mix = false;
-
- DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
- params->sample_width, params->cea.channels);
-
- switch (params->cea.channels) {
- case 2:
- /* FR and FL speakers */
- channel_allocation = 0;
- chan = MSM_HDMI_AUDIO_CHANNEL_2;
- break;
- case 4:
- /* FC, LFE, FR and FL speakers */
- channel_allocation = 0x3;
- chan = MSM_HDMI_AUDIO_CHANNEL_4;
- break;
- case 6:
- /* RR, RL, FC, LFE, FR and FL speakers */
- channel_allocation = 0x0B;
- chan = MSM_HDMI_AUDIO_CHANNEL_6;
- break;
- case 8:
- /* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */
- channel_allocation = 0x1F;
- chan = MSM_HDMI_AUDIO_CHANNEL_8;
- break;
- default:
- return -EINVAL;
- }
-
- switch (params->sample_rate) {
- case 32000:
- rate = HDMI_SAMPLE_RATE_32KHZ;
- break;
- case 44100:
- rate = HDMI_SAMPLE_RATE_44_1KHZ;
- break;
- case 48000:
- rate = HDMI_SAMPLE_RATE_48KHZ;
- break;
- case 88200:
- rate = HDMI_SAMPLE_RATE_88_2KHZ;
- break;
- case 96000:
- rate = HDMI_SAMPLE_RATE_96KHZ;
- break;
- case 176400:
- rate = HDMI_SAMPLE_RATE_176_4KHZ;
- break;
- case 192000:
- rate = HDMI_SAMPLE_RATE_192KHZ;
- break;
- default:
- DRM_DEV_ERROR(dev, "rate[%d] not supported!\n",
- params->sample_rate);
- return -EINVAL;
- }
-
- msm_hdmi_audio_set_sample_rate(hdmi, rate);
- msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation,
- level_shift, down_mix);
-
- return 0;
-}
-
-static void msm_hdmi_audio_shutdown(struct device *dev, void *data)
-{
- struct hdmi *hdmi = dev_get_drvdata(dev);
-
- msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
-}
-
-static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
- .hw_params = msm_hdmi_audio_hw_params,
- .audio_shutdown = msm_hdmi_audio_shutdown,
-};
-
-static struct hdmi_codec_pdata codec_data = {
- .ops = &msm_hdmi_audio_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
-};
-
-static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
-{
- hdmi->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- return PTR_ERR_OR_ZERO(hdmi->audio_pdev);
-}
-
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct msm_drm_private *priv = dev_get_drvdata(master);
@@ -362,12 +255,6 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
return err;
priv->hdmi = hdmi;
- err = msm_hdmi_register_audio_driver(hdmi, dev);
- if (err) {
- DRM_ERROR("Failed to attach an audio codec %d\n", err);
- hdmi->audio_pdev = NULL;
- }
-
return 0;
}
@@ -377,9 +264,6 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master);
if (priv->hdmi) {
- if (priv->hdmi->audio_pdev)
- platform_device_unregister(priv->hdmi->audio_pdev);
-
if (priv->hdmi->bridge)
msm_hdmi_hpd_disable(priv->hdmi);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index a62d2aedfbb7..a5f481c39277 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -24,8 +24,8 @@ struct hdmi_platform_config;
struct hdmi_audio {
bool enabled;
- struct hdmi_audio_infoframe infoframe;
int rate;
+ int channels;
};
struct hdmi_hdcp_ctrl;
@@ -33,7 +33,6 @@ struct hdmi_hdcp_ctrl;
struct hdmi {
struct drm_device *dev;
struct platform_device *pdev;
- struct platform_device *audio_pdev;
const struct hdmi_platform_config *config;
@@ -67,8 +66,6 @@ struct hdmi {
/* the encoder we are hooked to (outside of hdmi block) */
struct drm_encoder *encoder;
- bool hdmi_mode; /* are we in hdmi mode? */
-
int irq;
struct workqueue_struct *workq;
@@ -207,26 +204,16 @@ static inline int msm_hdmi_pll_8998_init(struct platform_device *pdev)
/*
* audio:
*/
-/* Supported HDMI Audio channels and rates */
-#define MSM_HDMI_AUDIO_CHANNEL_2 0
-#define MSM_HDMI_AUDIO_CHANNEL_4 1
-#define MSM_HDMI_AUDIO_CHANNEL_6 2
-#define MSM_HDMI_AUDIO_CHANNEL_8 3
-
-#define HDMI_SAMPLE_RATE_32KHZ 0
-#define HDMI_SAMPLE_RATE_44_1KHZ 1
-#define HDMI_SAMPLE_RATE_48KHZ 2
-#define HDMI_SAMPLE_RATE_88_2KHZ 3
-#define HDMI_SAMPLE_RATE_96KHZ 4
-#define HDMI_SAMPLE_RATE_176_4KHZ 5
-#define HDMI_SAMPLE_RATE_192KHZ 6
+struct hdmi_codec_daifmt;
+struct hdmi_codec_params;
int msm_hdmi_audio_update(struct hdmi *hdmi);
-int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
- uint32_t num_of_channels, uint32_t channel_allocation,
- uint32_t level_shift, bool down_mix);
-void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
-
+int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params);
+void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge);
/*
* hdmi bridge:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 4c2058c4adc1..8bb975e82c17 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -4,11 +4,13 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <drm/display/drm_hdmi_state_helper.h>
+
#include <linux/hdmi.h>
-#include "hdmi.h"
-/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
-static int nchannels[] = { 2, 4, 6, 8 };
+#include <sound/hdmi-codec.h>
+
+#include "hdmi.h"
/* Supported HDMI Audio sample rates */
#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
@@ -74,16 +76,17 @@ static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
int msm_hdmi_audio_update(struct hdmi *hdmi)
{
struct hdmi_audio *audio = &hdmi->audio;
- struct hdmi_audio_infoframe *info = &audio->infoframe;
const struct hdmi_msm_audio_arcs *arcs = NULL;
bool enabled = audio->enabled;
uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl;
- uint32_t infofrm_ctrl, audio_config;
+ uint32_t audio_config;
+
+ if (!hdmi->connector->display_info.is_hdmi)
+ return -EINVAL;
+
+ DBG("audio: enabled=%d, channels=%d, rate=%d",
+ audio->enabled, audio->channels, audio->rate);
- DBG("audio: enabled=%d, channels=%d, channel_allocation=0x%x, "
- "level_shift_value=%d, downmix_inhibit=%d, rate=%d",
- audio->enabled, info->channels, info->channel_allocation,
- info->level_shift_value, info->downmix_inhibit, audio->rate);
DBG("video: power_on=%d, pixclock=%lu", hdmi->power_on, hdmi->pixclock);
if (enabled && !(hdmi->power_on && hdmi->pixclock)) {
@@ -104,7 +107,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL);
vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
aud_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_AUDIO_PKT_CTRL1);
- infofrm_ctrl = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
audio_config = hdmi_read(hdmi, REG_HDMI_AUDIO_CFG);
/* Clear N/CTS selection bits */
@@ -113,7 +115,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
if (enabled) {
uint32_t n, cts, multiplier;
enum hdmi_acr_cts select;
- uint8_t buf[14];
n = arcs->lut[audio->rate].n;
cts = arcs->lut[audio->rate].cts;
@@ -155,20 +156,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
HDMI_ACR_1_N(n));
hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL2,
- COND(info->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) |
+ COND(audio->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) |
HDMI_AUDIO_PKT_CTRL2_OVERRIDE);
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_CONT;
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SEND;
- /* configure infoframe: */
- hdmi_audio_infoframe_pack(info, buf, sizeof(buf));
- hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
- (buf[3] << 0) | (buf[4] << 8) |
- (buf[5] << 16) | (buf[6] << 24));
- hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
- (buf[7] << 0) | (buf[8] << 8));
-
hdmi_write(hdmi, REG_HDMI_GC, 0);
vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_ENABLE;
@@ -176,11 +169,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
aud_pkt_ctrl |= HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
-
audio_config &= ~HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
@@ -190,17 +178,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
aud_pkt_ctrl &= ~HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
audio_config &= ~HDMI_AUDIO_CFG_ENGINE_ENABLE;
}
hdmi_write(hdmi, REG_HDMI_ACR_PKT_CTRL, acr_pkt_ctrl);
hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_ctrl);
hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL1, aud_pkt_ctrl);
- hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, infofrm_ctrl);
hdmi_write(hdmi, REG_HDMI_AUD_INT,
COND(enabled, HDMI_AUD_INT_AUD_FIFO_URUN_INT) |
@@ -214,41 +197,72 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
return 0;
}
-int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
- uint32_t num_of_channels, uint32_t channel_allocation,
- uint32_t level_shift, bool down_mix)
+int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
- struct hdmi_audio *audio;
-
- if (!hdmi)
- return -ENXIO;
-
- audio = &hdmi->audio;
-
- if (num_of_channels >= ARRAY_SIZE(nchannels))
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ unsigned int rate;
+ int ret;
+
+ drm_dbg_driver(bridge->dev, "%u Hz, %d bit, %d channels\n",
+ params->sample_rate,
+ params->sample_width,
+ params->cea.channels);
+
+ switch (params->sample_rate) {
+ case 32000:
+ rate = MSM_HDMI_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ rate = MSM_HDMI_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ rate = MSM_HDMI_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ rate = MSM_HDMI_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ rate = MSM_HDMI_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ rate = MSM_HDMI_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ rate = MSM_HDMI_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ drm_err(bridge->dev, "rate[%d] not supported!\n",
+ params->sample_rate);
return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector,
+ &params->cea);
+ if (ret)
+ return ret;
- audio->enabled = enabled;
- audio->infoframe.channels = nchannels[num_of_channels];
- audio->infoframe.channel_allocation = channel_allocation;
- audio->infoframe.level_shift_value = level_shift;
- audio->infoframe.downmix_inhibit = down_mix;
+ hdmi->audio.rate = rate;
+ hdmi->audio.channels = params->cea.channels;
+ hdmi->audio.enabled = true;
return msm_hdmi_audio_update(hdmi);
}
-void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
+void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge)
{
- struct hdmi_audio *audio;
-
- if (!hdmi)
- return;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
- audio = &hdmi->audio;
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
- if ((rate < 0) || (rate >= MSM_HDMI_SAMPLE_RATE_MAX))
- return;
+ hdmi->audio.rate = 0;
+ hdmi->audio.channels = 2;
+ hdmi->audio.enabled = false;
- audio->rate = rate;
msm_hdmi_audio_update(hdmi);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 4a5b5112227f..1456354c8af4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -7,6 +7,8 @@
#include <linux/delay.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#include "msm_kms.h"
#include "hdmi.h"
@@ -67,24 +69,20 @@ static void power_off(struct drm_bridge *bridge)
}
#define AVI_IFRAME_LINE_NUMBER 1
+#define SPD_IFRAME_LINE_NUMBER 1
+#define VENSPEC_IFRAME_LINE_NUMBER 3
-static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
+static int msm_hdmi_config_avi_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
{
- struct drm_crtc *crtc = hdmi->encoder->crtc;
- const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- union hdmi_infoframe frame;
- u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+ u32 buf[4] = {};
u32 val;
- int len;
+ int i;
- drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- hdmi->connector, mode);
-
- len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (len < 0) {
+ if (len != HDMI_INFOFRAME_SIZE(AVI) || len - 3 > sizeof(buf)) {
DRM_DEV_ERROR(&hdmi->pdev->dev,
"failed to configure avi infoframe\n");
- return;
+ return -EINVAL;
}
/*
@@ -93,57 +91,245 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
* written to the LSB byte of AVI_INFO0 and the version is written to
* the third byte from the LSB of AVI_INFO3
*/
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(0),
+ memcpy(buf, &buffer[3], len - 3);
+
+ buf[3] |= buffer[1] << 24;
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(i), buf[i]);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val |= HDMI_INFOFRAME_CTRL0_AVI_SEND |
+ HDMI_INFOFRAME_CTRL0_AVI_CONT;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
+ val |= HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(AVI_IFRAME_LINE_NUMBER);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+
+ return 0;
+}
+
+static int msm_hdmi_config_audio_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 val;
+
+ if (len != HDMI_INFOFRAME_SIZE(AUDIO)) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
+ "failed to configure audio infoframe\n");
+ return -EINVAL;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
buffer[3] |
buffer[4] << 8 |
buffer[5] << 16 |
buffer[6] << 24);
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(1),
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
buffer[7] |
buffer[8] << 8 |
buffer[9] << 16 |
buffer[10] << 24);
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(2),
- buffer[11] |
- buffer[12] << 8 |
- buffer[13] << 16 |
- buffer[14] << 24);
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(3),
- buffer[15] |
- buffer[16] << 8 |
- buffer[1] << 24);
+ return 0;
+}
- hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
- HDMI_INFOFRAME_CTRL0_AVI_SEND |
- HDMI_INFOFRAME_CTRL0_AVI_CONT);
+static int msm_hdmi_config_spd_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 buf[7] = {};
+ u32 val;
+ int i;
- val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
- val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
- val |= HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(AVI_IFRAME_LINE_NUMBER);
- hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+ if (len != HDMI_INFOFRAME_SIZE(SPD) || len - 3 > sizeof(buf)) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
+ "failed to configure SPD infoframe\n");
+ return -EINVAL;
+ }
+
+ /* checksum gets written together with the body of the frame */
+ hdmi_write(hdmi, REG_HDMI_GENERIC1_HDR,
+ buffer[0] |
+ buffer[1] << 8 |
+ buffer[2] << 16);
+
+ memcpy(buf, &buffer[3], len - 3);
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ hdmi_write(hdmi, REG_HDMI_GENERIC1(i), buf[i]);
+
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val |= HDMI_GEN_PKT_CTRL_GENERIC1_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC1_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC1_LINE(SPD_IFRAME_LINE_NUMBER);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ return 0;
}
-static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+static int msm_hdmi_config_hdmi_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 buf[7] = {};
+ u32 val;
+ int i;
+
+ if (len < HDMI_INFOFRAME_HEADER_SIZE + HDMI_VENDOR_INFOFRAME_SIZE ||
+ len - 3 > sizeof(buf)) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
+ "failed to configure HDMI infoframe\n");
+ return -EINVAL;
+ }
+
+ /* checksum gets written together with the body of the frame */
+ hdmi_write(hdmi, REG_HDMI_GENERIC0_HDR,
+ buffer[0] |
+ buffer[1] << 8 |
+ buffer[2] << 16);
+
+ memcpy(buf, &buffer[3], len - 3);
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ hdmi_write(hdmi, REG_HDMI_GENERIC0(i), buf[i]);
+
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val |= HDMI_GEN_PKT_CTRL_GENERIC0_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC0_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE |
+ HDMI_GEN_PKT_CTRL_GENERIC0_LINE(VENSPEC_IFRAME_LINE_NUMBER);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ return 0;
+}
+
+static int msm_hdmi_bridge_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ u32 val;
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
+ val &= ~(HDMI_INFOFRAME_CTRL0_AVI_SEND |
+ HDMI_INFOFRAME_CTRL0_AVI_CONT);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+
+ break;
+
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
+ val &= ~(HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val &= ~HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__MASK;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+
+ break;
+
+ case HDMI_INFOFRAME_TYPE_SPD:
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val &= ~(HDMI_GEN_PKT_CTRL_GENERIC1_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC1_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ break;
+
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val &= ~(HDMI_GEN_PKT_CTRL_GENERIC0_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC0_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE |
+ HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ break;
+
+ default:
+ drm_dbg_driver(hdmi_bridge->base.dev, "Unsupported infoframe type %x\n", type);
+ }
+
+ return 0;
+}
+
+static int msm_hdmi_bridge_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+
+ msm_hdmi_bridge_clear_infoframe(bridge, type);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return msm_hdmi_config_avi_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ return msm_hdmi_config_audio_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return msm_hdmi_config_spd_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return msm_hdmi_config_hdmi_infoframe(hdmi, buffer, len);
+ default:
+ drm_dbg_driver(hdmi_bridge->base.dev, "Unsupported infoframe type %x\n", type);
+ return 0;
+ }
+}
+
+static void msm_hdmi_set_timings(struct hdmi *hdmi,
+ const struct drm_display_mode *mode);
+
+static void msm_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
DBG("power up");
+ connector = drm_atomic_get_new_connector_for_encoder(state, encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+
+ hdmi->pixclock = conn_state->hdmi.tmds_char_rate;
+
+ msm_hdmi_set_timings(hdmi, &crtc_state->adjusted_mode);
+
if (!hdmi->power_on) {
msm_hdmi_phy_resource_enable(phy);
msm_hdmi_power_on(bridge);
hdmi->power_on = true;
- if (hdmi->hdmi_mode) {
- msm_hdmi_config_avi_infoframe(hdmi);
+ if (connector->display_info.is_hdmi)
msm_hdmi_audio_update(hdmi);
- }
}
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
+
msm_hdmi_phy_powerup(phy, hdmi->pixclock);
msm_hdmi_set_mode(hdmi, true);
@@ -152,7 +338,8 @@ static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
msm_hdmi_hdcp_on(hdmi->hdcp_ctrl);
}
-static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
+static void msm_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
@@ -169,25 +356,18 @@ static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
if (hdmi->power_on) {
power_off(bridge);
hdmi->power_on = false;
- if (hdmi->hdmi_mode)
+ if (hdmi->connector->display_info.is_hdmi)
msm_hdmi_audio_update(hdmi);
msm_hdmi_phy_resource_disable(phy);
}
}
-static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
+static void msm_hdmi_set_timings(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
{
- struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
- struct hdmi *hdmi = hdmi_bridge->hdmi;
int hstart, hend, vstart, vend;
uint32_t frame_ctrl;
- mode = adjusted_mode;
-
- hdmi->pixclock = mode->clock * 1000;
-
hstart = mode->htotal - mode->hsync_start;
hend = mode->htotal - mode->hsync_start + mode->hdisplay;
@@ -232,7 +412,7 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
DBG("frame_ctrl=%08x", frame_ctrl);
hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
- if (hdmi->hdmi_mode)
+ if (hdmi->connector->display_info.is_hdmi)
msm_hdmi_audio_update(hdmi);
}
@@ -251,32 +431,19 @@ static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridg
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
- if (drm_edid) {
- /*
- * FIXME: This should use connector->display_info.is_hdmi from a
- * path that has read the EDID and called
- * drm_edid_connector_update().
- */
- const struct edid *edid = drm_edid_raw(drm_edid);
-
- hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
- }
-
return drm_edid;
}
-static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
- const struct drm_display_info *info,
- const struct drm_display_mode *mode)
+static enum drm_mode_status msm_hdmi_bridge_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct msm_drm_private *priv = bridge->dev->dev_private;
struct msm_kms *kms = priv->kms;
- long actual, requested;
-
- requested = 1000 * mode->clock;
+ long actual;
/* for mdp5/apq8074, we manage our own pixel clk (as opposed to
* mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
@@ -284,27 +451,34 @@ static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge
*/
if (kms->funcs->round_pixclk)
actual = kms->funcs->round_pixclk(kms,
- requested, hdmi_bridge->hdmi->encoder);
+ tmds_rate,
+ hdmi_bridge->hdmi->encoder);
else if (config->pwr_clk_cnt > 0)
- actual = clk_round_rate(hdmi->pwr_clks[0], requested);
+ actual = clk_round_rate(hdmi->pwr_clks[0], tmds_rate);
else
- actual = requested;
+ actual = tmds_rate;
- DBG("requested=%ld, actual=%ld", requested, actual);
+ DBG("requested=%lld, actual=%ld", tmds_rate, actual);
- if (actual != requested)
+ if (actual != tmds_rate)
return MODE_CLOCK_RANGE;
return 0;
}
static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
- .pre_enable = msm_hdmi_bridge_pre_enable,
- .post_disable = msm_hdmi_bridge_post_disable,
- .mode_set = msm_hdmi_bridge_mode_set,
- .mode_valid = msm_hdmi_bridge_mode_valid,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = msm_hdmi_bridge_atomic_pre_enable,
+ .atomic_post_disable = msm_hdmi_bridge_atomic_post_disable,
.edid_read = msm_hdmi_bridge_edid_read,
.detect = msm_hdmi_bridge_detect,
+ .hdmi_tmds_char_rate_valid = msm_hdmi_bridge_tmds_char_rate_valid,
+ .hdmi_clear_infoframe = msm_hdmi_bridge_clear_infoframe,
+ .hdmi_write_infoframe = msm_hdmi_bridge_write_infoframe,
+ .hdmi_audio_prepare = msm_hdmi_bridge_audio_prepare,
+ .hdmi_audio_shutdown = msm_hdmi_bridge_audio_shutdown,
};
static void
@@ -336,9 +510,15 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge->funcs = &msm_hdmi_bridge_funcs;
bridge->ddc = hdmi->i2c;
bridge->type = DRM_MODE_CONNECTOR_HDMIA;
+ bridge->vendor = "Qualcomm";
+ bridge->product = "Snapdragon";
bridge->ops = DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_HDMI |
DRM_BRIDGE_OP_EDID;
+ bridge->hdmi_audio_max_i2s_playback_channels = 8;
+ bridge->hdmi_audio_dev = &hdmi->pdev->dev;
+ bridge->hdmi_audio_dai_port = -1;
ret = devm_drm_bridge_add(hdmi->dev->dev, bridge);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index a7a2384044ff..87a91148a731 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -183,10 +183,16 @@ static unsigned get_crtc_mask(struct drm_atomic_state *state)
int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
- int i;
+ int i, ret = 0;
+ /*
+ * FIXME: stop setting allow_modeset and move this check to the DPU
+ * driver.
+ */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if ((old_crtc_state->ctm && !new_crtc_state->ctm) ||
@@ -196,6 +202,11 @@ int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
}
}
+ if (kms && kms->funcs && kms->funcs->check_mode_changed)
+ ret = kms->funcs->check_mode_changed(kms, state);
+ if (ret)
+ return ret;
+
return drm_atomic_helper_check(dev, state);
}
@@ -221,6 +232,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask);
+ atomic_set(&kms->fault_snapshot_capture, 0);
+
/*
* Now that there is no in-progress flush, prepare the
* current update:
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ff7a7a9f7b0d..c3588dc9e537 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -894,6 +894,7 @@ static const struct drm_driver msm_driver = {
DRIVER_RENDER |
DRIVER_ATOMIC |
DRIVER_MODESET |
+ DRIVER_SYNCOBJ_TIMELINE |
DRIVER_SYNCOBJ,
.open = msm_open,
.postclose = msm_postclose,
diff --git a/drivers/gpu/drm/msm/msm_dsc_helper.h b/drivers/gpu/drm/msm/msm_dsc_helper.h
index b9049fe1e279..63f95523b2cb 100644
--- a/drivers/gpu/drm/msm/msm_dsc_helper.h
+++ b/drivers/gpu/drm/msm/msm_dsc_helper.h
@@ -13,17 +13,6 @@
#include <drm/display/drm_dsc_helper.h>
/**
- * msm_dsc_get_slices_per_intf() - calculate number of slices per interface
- * @dsc: Pointer to drm dsc config struct
- * @intf_width: interface width in pixels
- * Returns: Integer representing the number of slices for the given interface
- */
-static inline u32 msm_dsc_get_slices_per_intf(const struct drm_dsc_config *dsc, u32 intf_width)
-{
- return DIV_ROUND_UP(intf_width, dsc->slice_width);
-}
-
-/**
* msm_dsc_get_bytes_per_line() - calculate bytes per line
* @dsc: Pointer to drm dsc config struct
* Returns: Integer value representing bytes per line. DSI and DP need
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 1a5d4f1c8b42..d41e5a6bbee0 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -65,8 +65,7 @@ msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
fctx->completed_fence = fctx->last_fence;
*fctx->fenceptr = fctx->last_fence;
- hrtimer_init(&fctx->deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- fctx->deadline_timer.function = deadline_timer;
+ hrtimer_setup(&fctx->deadline_timer, deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
kthread_init_work(&fctx->deadline_work, deadline_work);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index dee470403036..3e9aa2cc38ef 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -509,7 +509,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
}
if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
- ret = -SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
break;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 8557998e0c92..c380d9d9f5af 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -281,6 +281,15 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
if (submit) {
int i;
+ if (state->fault_info.ttbr0) {
+ struct msm_gpu_fault_info *info = &state->fault_info;
+ struct msm_mmu *mmu = submit->aspace->mmu;
+
+ msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
+ &info->asid);
+ msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
+ }
+
state->bos = kcalloc(submit->nr_bos,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 7cabc8480d7c..e25009150579 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -101,6 +101,14 @@ struct msm_gpu_fault_info {
int flags;
const char *type;
const char *block;
+
+ /* Information about what we think/expect is the current SMMU state,
+ * for example expected_ttbr0 should match smmu_info.ttbr0 which
+ * was read back from SMMU registers.
+ */
+ phys_addr_t pgtbl_ttbr0;
+ u64 ptes[4];
+ int asid;
};
/**
diff --git a/drivers/gpu/drm/msm/msm_io_utils.c b/drivers/gpu/drm/msm/msm_io_utils.c
index afedd61c3e28..a6efe1eac271 100644
--- a/drivers/gpu/drm/msm/msm_io_utils.c
+++ b/drivers/gpu/drm/msm/msm_io_utils.c
@@ -135,8 +135,7 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
clockid_t clock_id,
enum hrtimer_mode mode)
{
- hrtimer_init(&work->timer, clock_id, mode);
- work->timer.function = msm_hrtimer_worktimer;
+ hrtimer_setup(&work->timer, msm_hrtimer_worktimer, clock_id, mode);
work->worker = worker;
kthread_init_work(&work->work, fn);
}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 2a94e82316f9..fd73dcd3f30e 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -195,6 +195,28 @@ struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu)
return &iommu->domain->geometry;
}
+int
+msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4])
+{
+ struct msm_iommu_pagetable *pagetable;
+ struct arm_lpae_io_pgtable_walk_data wd = {};
+
+ if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
+ return -EINVAL;
+
+ pagetable = to_pagetable(mmu);
+
+ if (!pagetable->pgtbl_ops->pgtable_walk)
+ return -EINVAL;
+
+ pagetable->pgtbl_ops->pgtable_walk(pagetable->pgtbl_ops, iova, &wd);
+
+ for (int i = 0; i < ARRAY_SIZE(wd.ptes); i++)
+ ptes[i] = wd.ptes[i];
+
+ return 0;
+}
+
static const struct msm_mmu_funcs pagetable_funcs = {
.map = msm_iommu_pagetable_map,
.unmap = msm_iommu_pagetable_unmap,
@@ -243,7 +265,7 @@ static const struct iommu_flush_ops tlb_ops = {
.tlb_add_page = msm_iommu_tlb_add_page,
};
-static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg);
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
@@ -319,7 +341,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
return &pagetable->base;
}
-static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg)
{
struct msm_iommu *iommu = arg;
@@ -343,6 +365,17 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
return 0;
}
+static int msm_disp_fault_handler(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags, void *arg)
+{
+ struct msm_iommu *iommu = arg;
+
+ if (iommu->base.handler)
+ return iommu->base.handler(iommu->base.arg, iova, flags, NULL);
+
+ return -ENOSYS;
+}
+
static void msm_iommu_resume_translation(struct msm_mmu *mmu)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
@@ -437,6 +470,21 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
return &iommu->base;
}
+struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks)
+{
+ struct msm_iommu *iommu;
+ struct msm_mmu *mmu;
+
+ mmu = msm_iommu_new(dev, quirks);
+ if (IS_ERR_OR_NULL(mmu))
+ return mmu;
+
+ iommu = to_msm_iommu(mmu);
+ iommu_set_fault_handler(iommu->domain, msm_disp_fault_handler, iommu);
+
+ return mmu;
+}
+
struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
@@ -448,7 +496,7 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
return mmu;
iommu = to_msm_iommu(mmu);
- iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
+ iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu);
/* Enable stall on iommu fault: */
if (adreno_smmu->set_stall)
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 38965e12a6bf..35d5397e73b4 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -164,12 +164,26 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc)
vblank_ctrl_queue_work(priv, crtc, false);
}
+static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void *data)
+{
+ struct msm_kms *kms = arg;
+
+ if (atomic_read(&kms->fault_snapshot_capture) == 0) {
+ msm_disp_snapshot_state(kms->dev);
+ atomic_inc(&kms->fault_snapshot_capture);
+ }
+
+ return -ENOSYS;
+}
+
struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
{
struct msm_gem_address_space *aspace;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
struct device *mdss_dev = mdp_dev->parent;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
struct device *iommu_dev;
/*
@@ -181,7 +195,7 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
else
iommu_dev = mdss_dev;
- mmu = msm_iommu_new(iommu_dev, 0);
+ mmu = msm_iommu_disp_new(iommu_dev, 0);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
@@ -195,8 +209,11 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
if (IS_ERR(aspace)) {
dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
mmu->funcs->destroy(mmu);
+ return aspace;
}
+ msm_mmu_set_fault_handler(aspace->mmu, kms, msm_kms_fault_handler);
+
return aspace;
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e60162744c66..43b58d052ee6 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -60,6 +60,13 @@ struct msm_kms_funcs {
void (*disable_commit)(struct msm_kms *kms);
/**
+ * @check_mode_changed:
+ *
+ * Verify if the commit requires a full modeset on one of CRTCs.
+ */
+ int (*check_mode_changed)(struct msm_kms *kms, struct drm_atomic_state *state);
+
+ /**
* Prepare for atomic commit. This is called after any previous
* (async or otherwise) commit has completed.
*/
@@ -128,6 +135,9 @@ struct msm_kms {
int irq;
bool irq_requested;
+ /* rate limit the snapshot capture to once per attach */
+ atomic_t fault_snapshot_capture;
+
/* mapper-id used to request GEM buffer mapped for scanout: */
struct msm_gem_address_space *aspace;
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 88af4f490881..daf91529e02b 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -42,6 +42,7 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks);
struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks);
+struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks);
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
int (*handler)(void *arg, unsigned long iova, int flags, void *data))
@@ -53,7 +54,8 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
- int *asid);
+ int *asid);
+int msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4]);
struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu);
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index c803556a8f64..c5651c39ac2a 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -59,8 +59,14 @@ static const struct drm_sched_backend_ops msm_sched_ops = {
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
void *memptrs, uint64_t memptrs_iova)
{
+ struct drm_sched_init_args args = {
+ .ops = &msm_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = num_hw_submissions,
+ .timeout = MAX_SCHEDULE_TIMEOUT,
+ .dev = gpu->dev->dev,
+ };
struct msm_ringbuffer *ring;
- long sched_timeout;
char name[32];
int ret;
@@ -87,6 +93,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
}
msm_gem_object_set_name(ring->bo, "ring%d", id);
+ args.name = to_msm_bo(ring->bo)->name,
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
@@ -95,13 +102,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->memptrs = memptrs;
ring->memptrs_iova = memptrs_iova;
- /* currently managing hangcheck ourselves: */
- sched_timeout = MAX_SCHEDULE_TIMEOUT;
-
- ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- num_hw_submissions, 0, sched_timeout,
- NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
+ ret = drm_sched_init(&ring->sched, &args);
if (ret) {
goto fail;
}
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
index 35f7f40e405b..d2c8c46bb041 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -17,6 +17,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="CLK_EN" pos="5" type="boolean"/>
<bitfield name="CLK_EN_SEL" pos="4" type="boolean"/>
<bitfield name="BITCLK_SEL" low="2" high="3" type="uint"/>
+ <bitfield name="DSICLK_SEL" low="0" high="1" type="uint"/>
</reg32>
<reg32 offset="0x00018" name="GLBL_CTRL"/>
<reg32 offset="0x0001c" name="RBUF_CTRL"/>
diff --git a/drivers/gpu/drm/msm/registers/display/hdmi.xml b/drivers/gpu/drm/msm/registers/display/hdmi.xml
index 1cf1b14fbd91..0ebb96297dae 100644
--- a/drivers/gpu/drm/msm/registers/display/hdmi.xml
+++ b/drivers/gpu/drm/msm/registers/display/hdmi.xml
@@ -131,7 +131,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
-->
<bitfield name="GENERIC0_SEND" pos="0" type="boolean"/>
<bitfield name="GENERIC0_CONT" pos="1" type="boolean"/>
- <bitfield name="GENERIC0_UPDATE" low="2" high="3" type="uint"/> <!-- ??? -->
+ <bitfield name="GENERIC0_UPDATE" pos="2" type="boolean"/>
<bitfield name="GENERIC1_SEND" pos="4" type="boolean"/>
<bitfield name="GENERIC1_CONT" pos="5" type="boolean"/>
<bitfield name="GENERIC0_LINE" low="16" high="21" type="uint"/>
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 1050a4617fc1..7b3e979c51ec 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -110,3 +110,21 @@ config DRM_NOUVEAU_GSP_DEFAULT
help
Say Y here if you want to use the GSP codepaths by default on
Turing and Ampere GPUs.
+
+config DRM_NOUVEAU_CH7006
+ tristate "Chrontel ch7006 TV encoder"
+ depends on DRM_NOUVEAU
+ default m
+ help
+ Support for Chrontel ch7006 and similar TV encoders.
+
+ This driver is currently only useful if you're also using
+ the nouveau driver.
+
+config DRM_NOUVEAU_SIL164
+ tristate "Silicon Image sil164 TMDS transmitter"
+ depends on DRM_NOUVEAU
+ default m
+ help
+ Support for sil164 and similar single-link (or dual-link
+ when used in pairs) TMDS transmitters.
diff --git a/drivers/gpu/drm/nouveau/dispnv04/Kbuild b/drivers/gpu/drm/nouveau/dispnv04/Kbuild
index 975c4e226936..4c7bc6bb81b3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv04/Kbuild
@@ -6,7 +6,10 @@ nouveau-y += dispnv04/dac.o
nouveau-y += dispnv04/dfp.o
nouveau-y += dispnv04/disp.o
nouveau-y += dispnv04/hw.o
+nouveau-y += dispnv04/nouveau_i2c_encoder.o
nouveau-y += dispnv04/overlay.o
nouveau-y += dispnv04/tvmodesnv17.o
nouveau-y += dispnv04/tvnv04.o
nouveau-y += dispnv04/tvnv17.o
+
+include $(src)/dispnv04/i2c/Kbuild
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 504c421aa176..c724bacc67f8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -35,7 +35,7 @@
#include "hw.h"
#include "nvreg.h"
-#include <drm/i2c/sil164.h>
+#include <dispnv04/i2c/sil164.h>
#include <subdev/i2c.h>
@@ -171,7 +171,7 @@ static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
list_for_each_entry(slave, &dev->mode_config.encoder_list, head) {
struct dcb_output *slave_dcb = nouveau_encoder(slave)->dcb;
- if (slave_dcb->type == DCB_OUTPUT_TMDS && get_slave_funcs(slave) &&
+ if (slave_dcb->type == DCB_OUTPUT_TMDS && get_encoder_i2c_funcs(slave) &&
slave_dcb->tmdsconf.slave_addr == dcb->tmdsconf.slave_addr)
return slave;
}
@@ -473,8 +473,9 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
/* Init external transmitters */
slave_encoder = get_tmds_slave(encoder);
if (slave_encoder)
- get_slave_funcs(slave_encoder)->mode_set(
- slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
+ get_encoder_i2c_funcs(slave_encoder)->mode_set(slave_encoder,
+ &nv_encoder->mode,
+ &nv_encoder->mode);
helper->dpms(encoder, DRM_MODE_DPMS_ON);
@@ -614,8 +615,8 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- if (get_slave_funcs(encoder))
- get_slave_funcs(encoder)->destroy(encoder);
+ if (get_encoder_i2c_funcs(encoder))
+ get_encoder_i2c_funcs(encoder)->destroy(encoder);
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
@@ -649,8 +650,8 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
if (type < 0)
return;
- drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
- &bus->i2c, &info[type].dev);
+ nouveau_i2c_encoder_init(dev, to_encoder_i2c(encoder),
+ &bus->i2c, &info[type].dev);
}
static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/i2c/Kbuild b/drivers/gpu/drm/nouveau/dispnv04/i2c/Kbuild
new file mode 100644
index 000000000000..3fddfc97bcb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv04/i2c/Kbuild
@@ -0,0 +1,5 @@
+ch7006-y := dispnv04/i2c/ch7006_drv.o dispnv04/i2c/ch7006_mode.o
+obj-$(CONFIG_DRM_NOUVEAU_CH7006) += ch7006.o
+
+sil164-y := dispnv04/i2c/sil164_drv.o
+obj-$(CONFIG_DRM_NOUVEAU_SIL164) += sil164.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_drv.c
index fcb0fcd6c897..fd2150e07e36 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_drv.c
@@ -47,14 +47,14 @@ static void ch7006_encoder_destroy(struct drm_encoder *encoder)
drm_property_destroy(encoder->dev, priv->scale_property);
kfree(priv);
- to_encoder_slave(encoder)->slave_priv = NULL;
+ to_encoder_i2c(encoder)->encoder_i2c_priv = NULL;
- drm_i2c_encoder_destroy(encoder);
+ nouveau_i2c_encoder_destroy(encoder);
}
static void ch7006_encoder_dpms(struct drm_encoder *encoder, int mode)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
@@ -71,7 +71,7 @@ static void ch7006_encoder_dpms(struct drm_encoder *encoder, int mode)
static void ch7006_encoder_save(struct drm_encoder *encoder)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
ch7006_dbg(client, "\n");
@@ -81,7 +81,7 @@ static void ch7006_encoder_save(struct drm_encoder *encoder)
static void ch7006_encoder_restore(struct drm_encoder *encoder)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
ch7006_dbg(client, "\n");
@@ -104,7 +104,7 @@ static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder,
}
static int ch7006_encoder_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (ch7006_lookup_mode(encoder, mode))
return MODE_OK;
@@ -116,7 +116,7 @@ static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *drm_mode,
struct drm_display_mode *adjusted_mode)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_encoder_params *params = &priv->params;
struct ch7006_state *state = &priv->state;
@@ -179,7 +179,7 @@ static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
int det;
@@ -285,7 +285,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
struct drm_property *property,
uint64_t val)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
struct drm_mode_config *conf = &encoder->dev->mode_config;
@@ -370,7 +370,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
return 0;
}
-static const struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
+static const struct nouveau_i2c_encoder_funcs ch7006_encoder_funcs = {
.set_config = ch7006_encoder_set_config,
.destroy = ch7006_encoder_destroy,
.dpms = ch7006_encoder_dpms,
@@ -437,7 +437,7 @@ static int ch7006_resume(struct device *dev)
static int ch7006_encoder_init(struct i2c_client *client,
struct drm_device *dev,
- struct drm_encoder_slave *encoder)
+ struct nouveau_i2c_encoder *encoder)
{
struct ch7006_priv *priv;
int i;
@@ -448,8 +448,8 @@ static int ch7006_encoder_init(struct i2c_client *client,
if (!priv)
return -ENOMEM;
- encoder->slave_priv = priv;
- encoder->slave_funcs = &ch7006_encoder_funcs;
+ encoder->encoder_i2c_priv = priv;
+ encoder->encoder_i2c_funcs = &ch7006_encoder_funcs;
priv->norm = TV_NORM_PAL;
priv->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
@@ -495,7 +495,7 @@ static const struct dev_pm_ops ch7006_pm_ops = {
.resume = ch7006_resume,
};
-static struct drm_i2c_encoder_driver ch7006_driver = {
+static struct nouveau_i2c_encoder_driver ch7006_driver = {
.i2c_driver = {
.probe = ch7006_probe,
.remove = ch7006_remove,
@@ -516,12 +516,12 @@ static struct drm_i2c_encoder_driver ch7006_driver = {
static int __init ch7006_init(void)
{
- return drm_i2c_encoder_register(THIS_MODULE, &ch7006_driver);
+ return i2c_add_driver(&ch7006_driver.i2c_driver);
}
static void __exit ch7006_exit(void)
{
- drm_i2c_encoder_unregister(&ch7006_driver);
+ i2c_del_driver(&ch7006_driver.i2c_driver);
}
int ch7006_debug;
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_mode.c
index 6afe6d0ee630..e58d94451959 100644
--- a/drivers/gpu/drm/i2c/ch7006_mode.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_mode.c
@@ -198,7 +198,7 @@ const struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
void ch7006_setup_levels(struct drm_encoder *encoder)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
uint8_t *regs = priv->state.regs;
const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
@@ -229,7 +229,7 @@ void ch7006_setup_levels(struct drm_encoder *encoder)
void ch7006_setup_subcarrier(struct drm_encoder *encoder)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
@@ -253,7 +253,7 @@ void ch7006_setup_subcarrier(struct drm_encoder *encoder)
void ch7006_setup_pll(struct drm_encoder *encoder)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
uint8_t *regs = priv->state.regs;
const struct ch7006_mode *mode = priv->mode;
@@ -324,7 +324,7 @@ void ch7006_setup_power_state(struct drm_encoder *encoder)
void ch7006_setup_properties(struct drm_encoder *encoder)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_state *state = &priv->state;
const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_priv.h
index 052bdc48a339..5ad5157a2c02 100644
--- a/drivers/gpu/drm/i2c/ch7006_priv.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/i2c/ch7006_priv.h
@@ -24,12 +24,13 @@
*
*/
-#ifndef __DRM_I2C_CH7006_PRIV_H__
-#define __DRM_I2C_CH7006_PRIV_H__
+#ifndef __NOUVEAU_I2C_CH7006_PRIV_H__
+#define __NOUVEAU_I2C_CH7006_PRIV_H__
-#include <drm/drm_encoder_slave.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i2c/ch7006.h>
+
+#include <dispnv04/i2c/encoder_i2c.h>
+#include <dispnv04/i2c/ch7006.h>
typedef int64_t fixed;
#define fixed1 (1LL << 32)
@@ -99,7 +100,7 @@ struct ch7006_priv {
};
#define to_ch7006_priv(x) \
- ((struct ch7006_priv *)to_encoder_slave(x)->slave_priv)
+ ((struct ch7006_priv *)to_encoder_i2c(x)->encoder_i2c_priv)
extern int ch7006_debug;
extern char *ch7006_tv_norm;
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/nouveau/dispnv04/i2c/sil164_drv.c
index c17afa025d9d..54ea8459332d 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/i2c/sil164_drv.c
@@ -27,10 +27,11 @@
#include <linux/module.h>
#include <drm/drm_drv.h>
-#include <drm/drm_encoder_slave.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i2c/sil164.h>
+
+#include <dispnv04/i2c/encoder_i2c.h>
+#include <dispnv04/i2c/sil164.h>
struct sil164_priv {
struct sil164_encoder_params config;
@@ -41,7 +42,7 @@ struct sil164_priv {
};
#define to_sil164_priv(x) \
- ((struct sil164_priv *)to_encoder_slave(x)->slave_priv)
+ ((struct sil164_priv *)to_encoder_i2c(x)->encoder_i2c_priv)
#define sil164_dbg(client, format, ...) do { \
if (drm_debug_enabled(DRM_UT_KMS)) \
@@ -221,7 +222,7 @@ sil164_encoder_dpms(struct drm_encoder *encoder, int mode)
bool on = (mode == DRM_MODE_DPMS_ON);
bool duallink = (on && encoder->crtc->mode.clock > 165000);
- sil164_set_power_state(drm_i2c_encoder_get_client(encoder), on);
+ sil164_set_power_state(nouveau_i2c_encoder_get_client(encoder), on);
if (priv->duallink_slave)
sil164_set_power_state(priv->duallink_slave, duallink);
@@ -232,7 +233,7 @@ sil164_encoder_save(struct drm_encoder *encoder)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
- sil164_save_state(drm_i2c_encoder_get_client(encoder),
+ sil164_save_state(nouveau_i2c_encoder_get_client(encoder),
priv->saved_state);
if (priv->duallink_slave)
@@ -245,7 +246,7 @@ sil164_encoder_restore(struct drm_encoder *encoder)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
- sil164_restore_state(drm_i2c_encoder_get_client(encoder),
+ sil164_restore_state(nouveau_i2c_encoder_get_client(encoder),
priv->saved_state);
if (priv->duallink_slave)
@@ -255,7 +256,7 @@ sil164_encoder_restore(struct drm_encoder *encoder)
static int
sil164_encoder_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
@@ -277,7 +278,7 @@ sil164_encoder_mode_set(struct drm_encoder *encoder,
struct sil164_priv *priv = to_sil164_priv(encoder);
bool duallink = adjusted_mode->clock > 165000;
- sil164_init_state(drm_i2c_encoder_get_client(encoder),
+ sil164_init_state(nouveau_i2c_encoder_get_client(encoder),
&priv->config, duallink);
if (priv->duallink_slave)
@@ -291,7 +292,7 @@ static enum drm_connector_status
sil164_encoder_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
- struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(encoder);
if (sil164_read(client, SIL164_DETECT) & SIL164_DETECT_HOTPLUG_STAT)
return connector_status_connected;
@@ -330,10 +331,10 @@ sil164_encoder_destroy(struct drm_encoder *encoder)
i2c_unregister_device(priv->duallink_slave);
kfree(priv);
- drm_i2c_encoder_destroy(encoder);
+ nouveau_i2c_encoder_destroy(encoder);
}
-static const struct drm_encoder_slave_funcs sil164_encoder_funcs = {
+static const struct nouveau_i2c_encoder_funcs sil164_encoder_funcs = {
.set_config = sil164_encoder_set_config,
.destroy = sil164_encoder_destroy,
.dpms = sil164_encoder_dpms,
@@ -393,7 +394,7 @@ sil164_detect_slave(struct i2c_client *client)
static int
sil164_encoder_init(struct i2c_client *client,
struct drm_device *dev,
- struct drm_encoder_slave *encoder)
+ struct nouveau_i2c_encoder *encoder)
{
struct sil164_priv *priv;
struct i2c_client *slave_client;
@@ -402,8 +403,8 @@ sil164_encoder_init(struct i2c_client *client,
if (!priv)
return -ENOMEM;
- encoder->slave_priv = priv;
- encoder->slave_funcs = &sil164_encoder_funcs;
+ encoder->encoder_i2c_priv = priv;
+ encoder->encoder_i2c_funcs = &sil164_encoder_funcs;
slave_client = sil164_detect_slave(client);
if (!IS_ERR(slave_client))
@@ -418,7 +419,7 @@ static const struct i2c_device_id sil164_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, sil164_ids);
-static struct drm_i2c_encoder_driver sil164_driver = {
+static struct nouveau_i2c_encoder_driver sil164_driver = {
.i2c_driver = {
.probe = sil164_probe,
.driver = {
@@ -434,13 +435,13 @@ static struct drm_i2c_encoder_driver sil164_driver = {
static int __init
sil164_init(void)
{
- return drm_i2c_encoder_register(THIS_MODULE, &sil164_driver);
+ return i2c_add_driver(&sil164_driver.i2c_driver);
}
static void __exit
sil164_exit(void)
{
- drm_i2c_encoder_unregister(&sil164_driver);
+ i2c_del_driver(&sil164_driver.i2c_driver);
}
MODULE_AUTHOR("Francisco Jerez <currojerez@riseup.net>");
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c
index e464429d32df..e2bf99c43336 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c
@@ -26,10 +26,10 @@
#include <linux/module.h>
-#include <drm/drm_encoder_slave.h>
+#include <dispnv04/i2c/encoder_i2c.h>
/**
- * drm_i2c_encoder_init - Initialize an I2C slave encoder
+ * nouveau_i2c_encoder_init - Initialize an I2C slave encoder
* @dev: DRM device.
* @encoder: Encoder to be attached to the I2C device. You aren't
* required to have called drm_encoder_init() before.
@@ -40,7 +40,7 @@
*
* Create an I2C device on the specified bus (the module containing its
* driver is transparently loaded) and attach it to the specified
- * &drm_encoder_slave. The @slave_funcs field will be initialized with
+ * &nouveau_i2c_encoder. The @encoder_i2c_funcs field will be initialized with
* the hooks provided by the slave driver.
*
* If @info.platform_data is non-NULL it will be used as the initial
@@ -49,14 +49,14 @@
* Returns 0 on success or a negative errno on failure, in particular,
* -ENODEV is returned when no matching driver is found.
*/
-int drm_i2c_encoder_init(struct drm_device *dev,
- struct drm_encoder_slave *encoder,
- struct i2c_adapter *adap,
- const struct i2c_board_info *info)
+int nouveau_i2c_encoder_init(struct drm_device *dev,
+ struct nouveau_i2c_encoder *encoder,
+ struct i2c_adapter *adap,
+ const struct i2c_board_info *info)
{
struct module *module = NULL;
struct i2c_client *client;
- struct drm_i2c_encoder_driver *encoder_drv;
+ struct nouveau_i2c_encoder_driver *encoder_drv;
int err = 0;
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
@@ -73,16 +73,16 @@ int drm_i2c_encoder_init(struct drm_device *dev,
goto fail_unregister;
}
- encoder->bus_priv = client;
+ encoder->i2c_client = client;
- encoder_drv = to_drm_i2c_encoder_driver(to_i2c_driver(client->dev.driver));
+ encoder_drv = to_nouveau_i2c_encoder_driver(to_i2c_driver(client->dev.driver));
err = encoder_drv->encoder_init(client, dev, encoder);
if (err)
goto fail_module_put;
if (info->platform_data)
- encoder->slave_funcs->set_config(&encoder->base,
+ encoder->encoder_i2c_funcs->set_config(&encoder->base,
info->platform_data);
return 0;
@@ -93,90 +93,53 @@ fail_unregister:
i2c_unregister_device(client);
return err;
}
-EXPORT_SYMBOL(drm_i2c_encoder_init);
/**
- * drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder
+ * nouveau_i2c_encoder_destroy - Unregister the I2C device backing an encoder
* @drm_encoder: Encoder to be unregistered.
*
* This should be called from the @destroy method of an I2C slave
* encoder driver once I2C access is no longer needed.
*/
-void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
+void nouveau_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
{
- struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
- struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
+ struct nouveau_i2c_encoder *encoder = to_encoder_i2c(drm_encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(drm_encoder);
struct module *module = client->dev.driver->owner;
i2c_unregister_device(client);
- encoder->bus_priv = NULL;
+ encoder->i2c_client = NULL;
module_put(module);
}
-EXPORT_SYMBOL(drm_i2c_encoder_destroy);
+EXPORT_SYMBOL(nouveau_i2c_encoder_destroy);
/*
* Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
*/
-static inline const struct drm_encoder_slave_funcs *
-get_slave_funcs(struct drm_encoder *enc)
+bool nouveau_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- return to_encoder_slave(enc)->slave_funcs;
-}
-
-void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- get_slave_funcs(encoder)->dpms(encoder, mode);
-}
-EXPORT_SYMBOL(drm_i2c_encoder_dpms);
-
-bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- if (!get_slave_funcs(encoder)->mode_fixup)
+ if (!get_encoder_i2c_funcs(encoder)->mode_fixup)
return true;
- return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
-}
-EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup);
-
-void drm_i2c_encoder_prepare(struct drm_encoder *encoder)
-{
- drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-EXPORT_SYMBOL(drm_i2c_encoder_prepare);
-
-void drm_i2c_encoder_commit(struct drm_encoder *encoder)
-{
- drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-EXPORT_SYMBOL(drm_i2c_encoder_commit);
-
-void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
+ return get_encoder_i2c_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
}
-EXPORT_SYMBOL(drm_i2c_encoder_mode_set);
-enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
- struct drm_connector *connector)
+enum drm_connector_status nouveau_i2c_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
- return get_slave_funcs(encoder)->detect(encoder, connector);
+ return get_encoder_i2c_funcs(encoder)->detect(encoder, connector);
}
-EXPORT_SYMBOL(drm_i2c_encoder_detect);
-void drm_i2c_encoder_save(struct drm_encoder *encoder)
+void nouveau_i2c_encoder_save(struct drm_encoder *encoder)
{
- get_slave_funcs(encoder)->save(encoder);
+ get_encoder_i2c_funcs(encoder)->save(encoder);
}
-EXPORT_SYMBOL(drm_i2c_encoder_save);
-void drm_i2c_encoder_restore(struct drm_encoder *encoder)
+void nouveau_i2c_encoder_restore(struct drm_encoder *encoder)
{
- get_slave_funcs(encoder)->restore(encoder);
+ get_encoder_i2c_funcs(encoder)->restore(encoder);
}
-EXPORT_SYMBOL(drm_i2c_encoder_restore);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index d3014027a812..c61ab083f62e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -32,7 +32,7 @@
#include "hw.h"
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/i2c/ch7006.h>
+#include <dispnv04/i2c/ch7006.h>
static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
{
@@ -99,7 +99,7 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
- get_slave_funcs(encoder)->dpms(encoder, mode);
+ get_encoder_i2c_funcs(encoder)->dpms(encoder, mode);
}
static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
@@ -158,7 +158,7 @@ static void nv04_tv_mode_set(struct drm_encoder *encoder,
regp->tv_vskew = 1;
regp->tv_vsync_delay = 1;
- get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
+ get_encoder_i2c_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
}
static void nv04_tv_commit(struct drm_encoder *encoder)
@@ -178,7 +178,7 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
static void nv04_tv_destroy(struct drm_encoder *encoder)
{
- get_slave_funcs(encoder)->destroy(encoder);
+ get_encoder_i2c_funcs(encoder)->destroy(encoder);
drm_encoder_cleanup(encoder);
kfree(encoder->helper_private);
@@ -191,11 +191,11 @@ static const struct drm_encoder_funcs nv04_tv_funcs = {
static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = {
.dpms = nv04_tv_dpms,
- .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .mode_fixup = nouveau_i2c_encoder_mode_fixup,
.prepare = nv04_tv_prepare,
.commit = nv04_tv_commit,
.mode_set = nv04_tv_mode_set,
- .detect = drm_i2c_encoder_detect,
+ .detect = nouveau_i2c_encoder_detect,
};
int
@@ -226,8 +226,8 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
NULL);
drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs);
- nv_encoder->enc_save = drm_i2c_encoder_save;
- nv_encoder->enc_restore = drm_i2c_encoder_restore;
+ nv_encoder->enc_save = nouveau_i2c_encoder_save;
+ nv_encoder->enc_restore = nouveau_i2c_encoder_restore;
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
@@ -235,14 +235,14 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
nv_encoder->or = ffs(entry->or) - 1;
/* Run the slave-specific initialization */
- ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
- &bus->i2c,
- &nv04_tv_encoder_info[type].dev);
+ ret = nouveau_i2c_encoder_init(dev, to_encoder_i2c(encoder),
+ &bus->i2c,
+ &nv04_tv_encoder_info[type].dev);
if (ret < 0)
goto fail_cleanup;
/* Attach it to the specified connector. */
- get_slave_funcs(encoder)->create_resources(encoder, connector);
+ get_encoder_i2c_funcs(encoder)->create_resources(encoder, connector);
drm_connector_attach_encoder(connector, encoder);
return 0;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 3ecb101d23e9..06de05fe5db6 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -308,7 +308,7 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
}
static int nv17_tv_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
@@ -779,7 +779,7 @@ static const struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
.detect = nv17_tv_detect,
};
-static const struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
+static const struct nouveau_i2c_encoder_funcs nv17_tv_encoder_i2c_funcs = {
.get_modes = nv17_tv_get_modes,
.mode_valid = nv17_tv_mode_valid,
.create_resources = nv17_tv_create_resources,
@@ -818,7 +818,7 @@ nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC,
NULL);
drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
- to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
+ to_encoder_i2c(encoder)->encoder_i2c_funcs = &nv17_tv_encoder_i2c_funcs;
tv_enc->base.enc_save = nv17_tv_save;
tv_enc->base.enc_restore = nv17_tv_restore;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 62d72b7a8d04..504cb3f2054b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1141,7 +1141,7 @@ nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
static enum drm_mode_status
nv50_mstc_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
struct nouveau_encoder *outp = mstc->mstm->outp;
diff --git a/drivers/gpu/drm/nouveau/include/dispnv04/i2c/ch7006.h b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/ch7006.h
new file mode 100644
index 000000000000..1a6fa405f85b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/ch7006.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_I2C_CH7006_H__
+#define __NOUVEAU_I2C_CH7006_H__
+
+/**
+ * struct ch7006_encoder_params
+ *
+ * Describes how the ch7006 is wired up with the GPU. It should be
+ * used as the @params parameter of its @set_config method.
+ *
+ * See "http://www.chrontel.com/pdf/7006.pdf" for their precise
+ * meaning.
+ */
+struct ch7006_encoder_params {
+ /* private: FIXME: document the members */
+ enum {
+ CH7006_FORMAT_RGB16 = 0,
+ CH7006_FORMAT_YCrCb24m16,
+ CH7006_FORMAT_RGB24m16,
+ CH7006_FORMAT_RGB15,
+ CH7006_FORMAT_RGB24m12C,
+ CH7006_FORMAT_RGB24m12I,
+ CH7006_FORMAT_RGB24m8,
+ CH7006_FORMAT_RGB16m8,
+ CH7006_FORMAT_RGB15m8,
+ CH7006_FORMAT_YCrCb24m8,
+ } input_format;
+
+ enum {
+ CH7006_CLOCK_SLAVE = 0,
+ CH7006_CLOCK_MASTER,
+ } clock_mode;
+
+ enum {
+ CH7006_CLOCK_EDGE_NEG = 0,
+ CH7006_CLOCK_EDGE_POS,
+ } clock_edge;
+
+ int xcm, pcm;
+
+ enum {
+ CH7006_SYNC_SLAVE = 0,
+ CH7006_SYNC_MASTER,
+ } sync_direction;
+
+ enum {
+ CH7006_SYNC_SEPARATED = 0,
+ CH7006_SYNC_EMBEDDED,
+ } sync_encoding;
+
+ enum {
+ CH7006_POUT_1_8V = 0,
+ CH7006_POUT_3_3V,
+ } pout_level;
+
+ enum {
+ CH7006_ACTIVE_HSYNC = 0,
+ CH7006_ACTIVE_DSTART,
+ } active_detect;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h
new file mode 100644
index 000000000000..31334aa90781
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_ENCODER_I2C_H__
+#define __NOUVEAU_ENCODER_I2C_H__
+
+#include <linux/i2c.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
+
+/**
+ * struct nouveau_i2c_encoder_funcs - Entry points exposed by a I2C encoder driver
+ *
+ * Most of its members are analogous to the function pointers in
+ * &drm_encoder_helper_funcs and they can optionally be used to
+ * initialize the latter. Connector-like methods (e.g. @get_modes and
+ * @set_property) will typically be wrapped around and only be called
+ * if the encoder is the currently selected one for the connector.
+ */
+struct nouveau_i2c_encoder_funcs {
+ /**
+ * @set_config: Initialize any encoder-specific modesetting parameters.
+ * The meaning of the @params parameter is implementation dependent. It
+ * will usually be a structure with DVO port data format settings or
+ * timings. It's not required for the new parameters to take effect
+ * until the next mode is set.
+ */
+ void (*set_config)(struct drm_encoder *encoder,
+ void *params);
+
+ /**
+ * @destroy: Analogous to &drm_encoder_funcs @destroy callback.
+ */
+ void (*destroy)(struct drm_encoder *encoder);
+
+ /**
+ * @dpms: Analogous to &drm_encoder_helper_funcs @dpms callback.
+ */
+ void (*dpms)(struct drm_encoder *encoder, int mode);
+
+ /**
+ * @save: Save state. Wrapped by nouveau_i2c_encoder_save().
+ */
+ void (*save)(struct drm_encoder *encoder);
+
+ /**
+ * @restore: Restore state. Wrapped by nouveau_i2c_encoder_restore().
+ */
+ void (*restore)(struct drm_encoder *encoder);
+
+ /**
+ * @mode_fixup: Analogous to &drm_encoder_helper_funcs @mode_fixup
+ * callback. Wrapped by nouveau_i2c_encoder_mode_fixup().
+ */
+ bool (*mode_fixup)(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /**
+ * @mode_valid: Analogous to &drm_encoder_helper_funcs @mode_valid.
+ */
+ int (*mode_valid)(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode);
+ /**
+ * @mode_set: Analogous to &drm_encoder_helper_funcs @mode_set
+ * callback.
+ */
+ void (*mode_set)(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /**
+ * @detect: Analogous to &drm_encoder_helper_funcs @detect
+ * callback. Wrapped by nouveau_i2c_encoder_detect().
+ */
+ enum drm_connector_status (*detect)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ /**
+ * @get_modes: Get modes.
+ */
+ int (*get_modes)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ /**
+ * @create_resources: Create resources.
+ */
+ int (*create_resources)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ /**
+ * @set_property: Set property.
+ */
+ int (*set_property)(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val);
+};
+
+/**
+ * struct nouveau_i2c_encoder - I2C encoder struct
+ *
+ * A &nouveau_i2c_encoder has two sets of callbacks, @encoder_i2c_funcs and the
+ * ones in @base. The former are never actually called by the common
+ * CRTC code, it's just a convenience for splitting the encoder
+ * functions in an upper, GPU-specific layer and a (hopefully)
+ * GPU-agnostic lower layer: It's the GPU driver responsibility to
+ * call the nouveau_i2c_encoder methods when appropriate.
+ *
+ * nouveau_i2c_encoder_init() provides a way to get an implementation of
+ * this.
+ */
+struct nouveau_i2c_encoder {
+ /**
+ * @base: DRM encoder object.
+ */
+ struct drm_encoder base;
+
+ /**
+ * @encoder_i2c_funcs: I2C encoder callbacks.
+ */
+ const struct nouveau_i2c_encoder_funcs *encoder_i2c_funcs;
+
+ /**
+ * @encoder_i2c_priv: I2C encoder private data.
+ */
+ void *encoder_i2c_priv;
+
+ /**
+ * @i2c_client: corresponding I2C client structure
+ */
+ struct i2c_client *i2c_client;
+};
+
+#define to_encoder_i2c(x) container_of((x), struct nouveau_i2c_encoder, base)
+
+int nouveau_i2c_encoder_init(struct drm_device *dev,
+ struct nouveau_i2c_encoder *encoder,
+ struct i2c_adapter *adap,
+ const struct i2c_board_info *info);
+
+static inline const struct nouveau_i2c_encoder_funcs *
+get_encoder_i2c_funcs(struct drm_encoder *enc)
+{
+ return to_encoder_i2c(enc)->encoder_i2c_funcs;
+}
+
+/**
+ * struct nouveau_i2c_encoder_driver
+ *
+ * Describes a device driver for an encoder connected to the GPU through an I2C
+ * bus.
+ */
+struct nouveau_i2c_encoder_driver {
+ /**
+ * @i2c_driver: I2C device driver description.
+ */
+ struct i2c_driver i2c_driver;
+
+ /**
+ * @encoder_init: Callback to allocate any per-encoder data structures
+ * and to initialize the @encoder_i2c_funcs and (optionally) @encoder_i2c_priv
+ * members of @encoder.
+ */
+ int (*encoder_init)(struct i2c_client *client,
+ struct drm_device *dev,
+ struct nouveau_i2c_encoder *encoder);
+
+};
+
+#define to_nouveau_i2c_encoder_driver(x) container_of((x), \
+ struct nouveau_i2c_encoder_driver, \
+ i2c_driver)
+
+/**
+ * nouveau_i2c_encoder_get_client - Get the I2C client corresponding to an encoder
+ * @encoder: The encoder
+ */
+static inline struct i2c_client *nouveau_i2c_encoder_get_client(struct drm_encoder *encoder)
+{
+ return to_encoder_i2c(encoder)->i2c_client;
+}
+
+void nouveau_i2c_encoder_destroy(struct drm_encoder *encoder);
+
+/*
+ * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
+ */
+
+bool nouveau_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+enum drm_connector_status nouveau_i2c_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+void nouveau_i2c_encoder_save(struct drm_encoder *encoder);
+void nouveau_i2c_encoder_restore(struct drm_encoder *encoder);
+
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/dispnv04/i2c/sil164.h b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/sil164.h
new file mode 100644
index 000000000000..b86750d7abe1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/sil164.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_I2C_SIL164_H__
+#define __NOUVEAU_I2C_SIL164_H__
+
+/**
+ * struct sil164_encoder_params
+ *
+ * Describes how the sil164 is connected to the GPU. It should be used
+ * as the @params parameter of its @set_config method.
+ *
+ * See "http://www.siliconimage.com/docs/SiI-DS-0021-E-164.pdf".
+ */
+struct sil164_encoder_params {
+ /* private: FIXME: document the members */
+ enum {
+ SIL164_INPUT_EDGE_FALLING = 0,
+ SIL164_INPUT_EDGE_RISING
+ } input_edge;
+
+ enum {
+ SIL164_INPUT_WIDTH_12BIT = 0,
+ SIL164_INPUT_WIDTH_24BIT
+ } input_width;
+
+ enum {
+ SIL164_INPUT_SINGLE_EDGE = 0,
+ SIL164_INPUT_DUAL_EDGE
+ } input_dual;
+
+ enum {
+ SIL164_PLL_FILTER_ON = 0,
+ SIL164_PLL_FILTER_OFF,
+ } pll_filter;
+
+ int input_skew; /** < Allowed range [-4, 3], use 0 for no de-skew. */
+ int duallink_skew; /** < Allowed range [-4, 3]. */
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index e825c8a1d9ca..00015412cb3e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -3,25 +3,30 @@
#define __NVIF_IOCTL_H__
struct nvif_ioctl_v0 {
- __u8 version;
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(nvif_ioctl_v0_hdr, __hdr,
+ __u8 version;
#define NVIF_IOCTL_V0_SCLASS 0x01
#define NVIF_IOCTL_V0_NEW 0x02
#define NVIF_IOCTL_V0_DEL 0x03
#define NVIF_IOCTL_V0_MTHD 0x04
#define NVIF_IOCTL_V0_MAP 0x07
#define NVIF_IOCTL_V0_UNMAP 0x08
- __u8 type;
- __u8 pad02[4];
+ __u8 type;
+ __u8 pad02[4];
#define NVIF_IOCTL_V0_OWNER_NVIF 0x00
#define NVIF_IOCTL_V0_OWNER_ANY 0xff
- __u8 owner;
+ __u8 owner;
#define NVIF_IOCTL_V0_ROUTE_NVIF 0x00
#define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff
- __u8 route;
- __u64 token;
- __u64 object;
+ __u8 route;
+ __u64 token;
+ __u64 object;
+ );
__u8 data[]; /* ioctl data (below) */
};
+static_assert(offsetof(struct nvif_ioctl_v0, data) == sizeof(struct nvif_ioctl_v0_hdr),
+ "struct member likely outside of struct_group()");
struct nvif_ioctl_sclass_v0 {
/* nvif_ioctl ... */
@@ -51,12 +56,17 @@ struct nvif_ioctl_del {
};
struct nvif_ioctl_mthd_v0 {
- /* nvif_ioctl ... */
- __u8 version;
- __u8 method;
- __u8 pad02[6];
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(nvif_ioctl_mthd_v0_hdr, __hdr,
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 method;
+ __u8 pad02[6];
+ );
__u8 data[]; /* method data (class.h) */
};
+static_assert(offsetof(struct nvif_ioctl_mthd_v0, data) == sizeof(struct nvif_ioctl_mthd_v0_hdr),
+ "struct member likely outside of struct_group()");
struct nvif_ioctl_map_v0 {
/* nvif_ioctl ... */
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 5c5f4607fcc9..746e126c3ecf 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -196,7 +196,7 @@ struct nvkm_gsp {
void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv);
void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc);
- void *(*rm_alloc_push)(struct nvkm_gsp_object *, void *argv, u32 repc);
+ void *(*rm_alloc_push)(struct nvkm_gsp_object *, void *argv);
void (*rm_alloc_done)(struct nvkm_gsp_object *, void *repv);
int (*rm_free)(struct nvkm_gsp_object *);
@@ -353,9 +353,9 @@ nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u3
}
static inline void *
-nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv)
{
- void *repv = object->client->gsp->rm->rm_alloc_push(object, argv, repc);
+ void *repv = object->client->gsp->rm->rm_alloc_push(object, argv);
if (IS_ERR(repv))
object->client = NULL;
@@ -366,7 +366,7 @@ nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
static inline int
nvkm_gsp_rm_alloc_wr(struct nvkm_gsp_object *object, void *argv)
{
- void *repv = nvkm_gsp_rm_alloc_push(object, argv, 0);
+ void *repv = nvkm_gsp_rm_alloc_push(object, argv);
if (IS_ERR(repv))
return PTR_ERR(repv);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index eac0d1d2dbda..1b10c6c12f46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -796,8 +796,10 @@ nouveau_connector_set_property(struct drm_connector *connector,
property, value);
if (ret) {
if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
- return get_slave_funcs(encoder)->set_property(
- encoder, connector, property, value);
+ return get_encoder_i2c_funcs(encoder)->set_property(encoder,
+ connector,
+ property,
+ value);
return ret;
}
@@ -1014,7 +1016,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
nouveau_connector_detect_depth(connector);
if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
- ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
+ ret = get_encoder_i2c_funcs(encoder)->get_modes(encoder, connector);
if (nv_connector->type == DCB_CONNECTOR_LVDS ||
nv_connector->type == DCB_CONNECTOR_LVDS_SPWG ||
@@ -1073,7 +1075,7 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
static enum drm_mode_status
nouveau_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
@@ -1099,7 +1101,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
max_clock = 350000;
break;
case DCB_OUTPUT_TV:
- return get_slave_funcs(encoder)->mode_valid(encoder, mode);
+ return get_encoder_i2c_funcs(encoder)->mode_valid(encoder, mode);
case DCB_OUTPUT_DP:
return nv50_dp_mode_valid(nv_encoder, mode, NULL);
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index bcda0105160f..55691ec44aba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -79,21 +79,8 @@ nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector,
!drm_dp_read_lttpr_common_caps(aux, dpcd, outp->dp.lttpr.caps)) {
int nr = drm_dp_lttpr_count(outp->dp.lttpr.caps);
- if (nr) {
- drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
- DP_PHY_REPEATER_MODE_TRANSPARENT);
-
- if (nr > 0) {
- ret = drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
- DP_PHY_REPEATER_MODE_NON_TRANSPARENT);
- if (ret != 1) {
- drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
- DP_PHY_REPEATER_MODE_TRANSPARENT);
- } else {
- outp->dp.lttpr.nr = nr;
- }
- }
- }
+ if (!drm_dp_lttpr_init(aux, nr))
+ outp->dp.lttpr.nr = nr;
}
ret = drm_dp_read_dpcd_caps(aux, dpcd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5664c4c71faf..e154d08857c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -118,7 +118,7 @@ static struct drm_driver driver_platform;
#ifdef CONFIG_DEBUG_FS
struct dentry *nouveau_debugfs_root;
-/**
+/*
* gsp_logs - list of nvif_log GSP-RM logging buffers
*
* Head pointer to a a list of nvif_log buffers that is created for each GPU
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 333042fc493f..dce8e5d9d496 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -31,7 +31,8 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
-#include <drm/drm_encoder_slave.h>
+
+#include <dispnv04/i2c/encoder_i2c.h>
#include "dispnv04/disp.h"
@@ -43,7 +44,7 @@ struct nouveau_connector;
struct nvkm_i2c_port;
struct nouveau_encoder {
- struct drm_encoder_slave base;
+ struct nouveau_i2c_encoder base;
struct dcb_output *dcb;
struct nvif_outp outp;
@@ -137,7 +138,7 @@ find_encoder(struct drm_connector *connector, int type);
static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
{
- struct drm_encoder_slave *slave = to_encoder_slave(enc);
+ struct nouveau_i2c_encoder *slave = to_encoder_i2c(enc);
return container_of(slave, struct nouveau_encoder, base);
}
@@ -147,12 +148,6 @@ static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
return &enc->base.base;
}
-static inline const struct drm_encoder_slave_funcs *
-get_slave_funcs(struct drm_encoder *enc)
-{
- return to_encoder_slave(enc)->slave_funcs;
-}
-
/* nouveau_dp.c */
enum nouveau_dp_status {
NOUVEAU_DP_NONE,
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index 4412f2711fb5..d326e55d2d24 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -404,7 +404,14 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
{
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
- const long timeout = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
+ struct drm_sched_init_args args = {
+ .ops = &nouveau_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = credit_limit,
+ .timeout = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS),
+ .name = "nouveau_sched",
+ .dev = drm->dev->dev
+ };
int ret;
if (!wq) {
@@ -416,10 +423,9 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
sched->wq = wq;
}
- ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq,
- NOUVEAU_SCHED_PRIORITY_COUNT,
- credit_limit, 0, timeout,
- NULL, NULL, "nouveau_sched", drm->dev->dev);
+ args.submit_wq = wq,
+
+ ret = drm_sched_init(drm_sched, &args);
if (ret)
goto fail_wq;
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index aa22d8458d22..e12e2596ed84 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -79,8 +79,8 @@ struct nouveau_svm {
#define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
struct nouveau_pfnmap_args {
- struct nvif_ioctl_v0 i;
- struct nvif_ioctl_mthd_v0 m;
+ struct nvif_ioctl_v0_hdr i;
+ struct nvif_ioctl_mthd_v0_hdr m;
struct nvif_vmm_pfnmap_v0 p;
};
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 0b87278ac0f8..70af63d70976 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -57,7 +57,7 @@ int
nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass)
{
struct {
- struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_v0_hdr ioctl;
struct nvif_ioctl_sclass_v0 sclass;
} *args = NULL;
int ret, cnt = 0, i;
@@ -101,7 +101,7 @@ int
nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
{
struct {
- struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_v0_hdr ioctl;
struct nvif_ioctl_mthd_v0 mthd;
} *args;
u32 args_size;
@@ -135,7 +135,7 @@ void
nvif_object_unmap_handle(struct nvif_object *object)
{
struct {
- struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_v0_hdr ioctl;
struct nvif_ioctl_unmap unmap;
} args = {
.ioctl.type = NVIF_IOCTL_V0_UNMAP,
@@ -149,7 +149,7 @@ nvif_object_map_handle(struct nvif_object *object, void *argv, u32 argc,
u64 *handle, u64 *length)
{
struct {
- struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_v0_hdr ioctl;
struct nvif_ioctl_map_v0 map;
} *args;
u32 argn = sizeof(*args) + argc;
@@ -211,7 +211,7 @@ void
nvif_object_dtor(struct nvif_object *object)
{
struct {
- struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_v0_hdr ioctl;
struct nvif_ioctl_del del;
} args = {
.ioctl.type = NVIF_IOCTL_V0_DEL,
@@ -230,7 +230,7 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
s32 oclass, void *data, u32 size, struct nvif_object *object)
{
struct {
- struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_v0_hdr ioctl;
struct nvif_ioctl_new_v0 new;
} *args;
int ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index 58502102926b..db2602e88006 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -61,7 +61,72 @@
extern struct dentry *nouveau_debugfs_root;
#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
-#define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16
+#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
+
+/**
+ * DOC: GSP message queue element
+ *
+ * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h
+ *
+ * The GSP command queue and status queue are message queues for the
+ * communication between software and GSP. The software submits the GSP
+ * RPC via the GSP command queue, GSP writes the status of the submitted
+ * RPC in the status queue.
+ *
+ * A GSP message queue element consists of three parts:
+ *
+ * - message element header (struct r535_gsp_msg), which mostly maintains
+ * the metadata for queuing the element.
+ *
+ * - RPC message header (struct nvfw_gsp_rpc), which maintains the info
+ * of the RPC. E.g., the RPC function number.
+ *
+ * - The payload, where the RPC message stays. E.g. the params of a
+ * specific RPC function. Some RPC functions also have their headers
+ * in the payload. E.g. rm_alloc, rm_control.
+ *
+ * The memory layout of a GSP message element can be illustrated below::
+ *
+ * +------------------------+
+ * | Message Element Header |
+ * | (r535_gsp_msg) |
+ * | |
+ * | (r535_gsp_msg.data) |
+ * | | |
+ * |----------V-------------|
+ * | GSP RPC Header |
+ * | (nvfw_gsp_rpc) |
+ * | |
+ * | (nvfw_gsp_rpc.data) |
+ * | | |
+ * |----------V-------------|
+ * | Payload |
+ * | |
+ * | header(optional) |
+ * | params |
+ * +------------------------+
+ *
+ * The max size of a message queue element is 16 pages (including the
+ * headers). When a GSP message to be sent is larger than 16 pages, the
+ * message should be split into multiple elements and sent accordingly.
+ *
+ * In the bunch of the split elements, the first element has the expected
+ * function number, while the rest of the elements are sent with the
+ * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD.
+ *
+ * GSP consumes the elements from the cmdq and always writes the result
+ * back to the msgq. The result is also formed as split elements.
+ *
+ * Terminology:
+ *
+ * - gsp_msg(msg): GSP message element (element header + GSP RPC header +
+ * payload)
+ * - gsp_rpc(rpc): GSP RPC (RPC header + payload)
+ * - gsp_rpc_buf: buffer for (GSP RPC header + payload)
+ * - gsp_rpc_len: size of (GSP RPC header + payload)
+ * - params_size: size of params in the payload
+ * - payload_size: size of (header if exists + params) in the payload
+ */
struct r535_gsp_msg {
u8 auth_tag_buffer[16];
@@ -73,8 +138,29 @@ struct r535_gsp_msg {
u8 data[];
};
+struct nvfw_gsp_rpc {
+ u32 header_version;
+ u32 signature;
+ u32 length;
+ u32 function;
+ u32 rpc_result;
+ u32 rpc_result_private;
+ u32 sequence;
+ union {
+ u32 spare;
+ u32 cpuRmGfid;
+ };
+ u8 data[];
+};
+
#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
+#define to_gsp_hdr(p, header) \
+ container_of((void *)p, typeof(*header), data)
+
+#define to_payload_hdr(p, header) \
+ container_of((void *)p, typeof(*header), params)
+
static int
r535_rpc_status_to_errno(uint32_t rpc_status)
{
@@ -89,18 +175,16 @@ r535_rpc_status_to_errno(uint32_t rpc_status)
}
}
-static void *
-r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
+static int
+r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
{
- struct r535_gsp_msg *mqe;
u32 size, rptr = *gsp->msgq.rptr;
int used;
- u8 *msg;
- u32 len;
- size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + repc, GSP_PAGE_SIZE);
+ size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
+ GSP_PAGE_SIZE);
if (WARN_ON(!size || size >= gsp->msgq.cnt))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
do {
u32 wptr = *gsp->msgq.wptr;
@@ -115,70 +199,217 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
} while (--(*ptime));
if (WARN_ON(!*ptime))
- return ERR_PTR(-ETIMEDOUT);
+ return -ETIMEDOUT;
- mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000);
+ return used;
+}
- if (prepc) {
- *prepc = (used * GSP_PAGE_SIZE) - sizeof(*mqe);
- return mqe->data;
- }
+static struct r535_gsp_msg *
+r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
+{
+ u32 rptr = *gsp->msgq.rptr;
- size = ALIGN(repc + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
+ /* Skip the first page, which is the message queue info */
+ return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE +
+ rptr * GSP_PAGE_SIZE);
+}
- msg = kvmalloc(repc, GFP_KERNEL);
- if (!msg)
- return ERR_PTR(-ENOMEM);
+/**
+ * DOC: Receive a GSP message queue element
+ *
+ * Receiving a GSP message queue element from the message queue consists of
+ * the following steps:
+ *
+ * - Peek the element from the queue: r535_gsp_msgq_peek().
+ * Peek the first page of the element to determine the total size of the
+ * message before allocating the proper memory.
+ *
+ * - Allocate memory for the message.
+ * Once the total size of the message is determined from the GSP message
+ * queue element, the caller of r535_gsp_msgq_recv() allocates the
+ * required memory.
+ *
+ * - Receive the message: r535_gsp_msgq_recv().
+ * Copy the message into the allocated memory. Advance the read pointer.
+ * If the message is a large GSP message, r535_gsp_msgq_recv() calls
+ * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts
+ * until the complete message is received.
+ * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into
+ * the return of the large GSP message.
+ *
+ * - Free the allocated memory: r535_gsp_msg_done().
+ * The user is responsible for freeing the memory allocated for the GSP
+ * message pages after they have been processed.
+ */
+static void *
+r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+ struct r535_gsp_msg *mqe;
+ int ret;
+
+ ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ return mqe->data;
+}
+
+struct r535_gsp_msg_info {
+ int *retries;
+ u32 gsp_rpc_len;
+ void *gsp_rpc_buf;
+ bool continuation;
+};
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl);
+
+static void *
+r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp,
+ struct r535_gsp_msg_info *info)
+{
+ u8 *buf = info->gsp_rpc_buf;
+ u32 rptr = *gsp->msgq.rptr;
+ struct r535_gsp_msg *mqe;
+ u32 size, expected, len;
+ int ret;
+
+ expected = info->gsp_rpc_len;
+
+ ret = r535_gsp_msgq_wait(gsp, expected, info->retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ if (info->continuation) {
+ struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) {
+ nvkm_error(&gsp->subdev,
+ "Not a continuation of a large RPC\n");
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ return ERR_PTR(-EIO);
+ }
+ }
+
+ size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
- len = min_t(u32, repc, len);
- memcpy(msg, mqe->data, len);
+ len = min_t(u32, expected, len);
+
+ if (info->continuation)
+ memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc),
+ len - sizeof(struct nvfw_gsp_rpc));
+ else
+ memcpy(buf, mqe->data, len);
- repc -= len;
+ expected -= len;
- if (repc) {
+ if (expected) {
mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
- memcpy(msg + len, mqe, repc);
+ memcpy(buf + len, mqe, expected);
}
rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
mb();
(*gsp->msgq.rptr) = rptr;
- return msg;
+ return buf;
}
static void *
-r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 repc, int *ptime)
+r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
{
- return r535_gsp_msgq_wait(gsp, repc, NULL, ptime);
+ struct r535_gsp_msg *mqe;
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe);
+ struct nvfw_gsp_rpc *rpc;
+ struct r535_gsp_msg_info info = {0};
+ u32 expected = gsp_rpc_len;
+ void *buf;
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+ rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (WARN_ON(rpc->length > max_rpc_size))
+ return NULL;
+
+ buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ info.gsp_rpc_buf = buf;
+ info.retries = retries;
+ info.gsp_rpc_len = rpc->length;
+
+ buf = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR(buf)) {
+ kvfree(info.gsp_rpc_buf);
+ info.gsp_rpc_buf = NULL;
+ return buf;
+ }
+
+ if (expected <= max_rpc_size)
+ return buf;
+
+ info.gsp_rpc_buf += info.gsp_rpc_len;
+ expected -= info.gsp_rpc_len;
+
+ while (expected) {
+ u32 size;
+
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ info.gsp_rpc_len = rpc->length;
+ info.continuation = true;
+
+ rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ size = info.gsp_rpc_len - sizeof(*rpc);
+ expected -= size;
+ info.gsp_rpc_buf += size;
+ }
+
+ rpc = buf;
+ rpc->length = gsp_rpc_len;
+ return buf;
}
static int
-r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
+r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
{
- struct r535_gsp_msg *cmd = container_of(argv, typeof(*cmd), data);
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
struct r535_gsp_msg *cqe;
- u32 argc = cmd->checksum;
- u64 *ptr = (void *)cmd;
+ u32 gsp_rpc_len = msg->checksum;
+ u64 *ptr = (void *)msg;
u64 *end;
u64 csum = 0;
int free, time = 1000000;
- u32 wptr, size, step;
+ u32 wptr, size, step, len;
u32 off = 0;
- argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE);
+ len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE);
- end = (u64 *)((char *)ptr + argc);
- cmd->pad = 0;
- cmd->checksum = 0;
- cmd->sequence = gsp->cmdq.seq++;
- cmd->elem_count = DIV_ROUND_UP(argc, 0x1000);
+ end = (u64 *)((char *)ptr + len);
+ msg->pad = 0;
+ msg->checksum = 0;
+ msg->sequence = gsp->cmdq.seq++;
+ msg->elem_count = DIV_ROUND_UP(len, 0x1000);
while (ptr < end)
csum ^= *ptr++;
- cmd->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
+ msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
wptr = *gsp->cmdq.wptr;
do {
@@ -193,23 +424,23 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
} while(--time);
if (WARN_ON(!time)) {
- kvfree(cmd);
+ kvfree(msg);
return -ETIMEDOUT;
}
cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
- size = min_t(u32, argc, step * GSP_PAGE_SIZE);
+ size = min_t(u32, len, step * GSP_PAGE_SIZE);
- memcpy(cqe, (u8 *)cmd + off, size);
+ memcpy(cqe, (u8 *)msg + off, size);
wptr += DIV_ROUND_UP(size, 0x1000);
if (wptr == gsp->cmdq.cnt)
wptr = 0;
off += size;
- argc -= size;
- } while(argc);
+ len -= size;
+ } while (len);
nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
wmb();
@@ -218,40 +449,25 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
- kvfree(cmd);
+ kvfree(msg);
return 0;
}
static void *
-r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 argc)
+r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len)
{
- struct r535_gsp_msg *cmd;
- u32 size = GSP_MSG_HDR_SIZE + argc;
+ struct r535_gsp_msg *msg;
+ u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len;
size = ALIGN(size, GSP_MSG_MIN_SIZE);
- cmd = kvzalloc(size, GFP_KERNEL);
- if (!cmd)
+ msg = kvzalloc(size, GFP_KERNEL);
+ if (!msg)
return ERR_PTR(-ENOMEM);
- cmd->checksum = argc;
- return cmd->data;
+ msg->checksum = gsp_rpc_len;
+ return msg->data;
}
-struct nvfw_gsp_rpc {
- u32 header_version;
- u32 signature;
- u32 length;
- u32 function;
- u32 rpc_result;
- u32 rpc_result_private;
- u32 sequence;
- union {
- u32 spare;
- u32 cpuRmGfid;
- };
- u8 data[];
-};
-
static void
r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
{
@@ -272,61 +488,61 @@ r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
}
static struct nvfw_gsp_rpc *
-r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 repc)
+r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
{
struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvfw_gsp_rpc *msg;
- int time = 4000000, i;
- u32 size;
+ struct nvfw_gsp_rpc *rpc;
+ int retries = 4000000, i;
retry:
- msg = r535_gsp_msgq_wait(gsp, sizeof(*msg), &size, &time);
- if (IS_ERR_OR_NULL(msg))
- return msg;
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
- msg = r535_gsp_msgq_recv(gsp, msg->length, &time);
- if (IS_ERR_OR_NULL(msg))
- return msg;
+ rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
- if (msg->rpc_result) {
- r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, msg);
+ if (rpc->rpc_result) {
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
return ERR_PTR(-EINVAL);
}
- r535_gsp_msg_dump(gsp, msg, NV_DBG_TRACE);
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE);
- if (fn && msg->function == fn) {
- if (repc) {
- if (msg->length < sizeof(*msg) + repc) {
- nvkm_error(subdev, "msg len %d < %zd\n",
- msg->length, sizeof(*msg) + repc);
- r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, msg);
+ if (fn && rpc->function == fn) {
+ if (gsp_rpc_len) {
+ if (rpc->length < gsp_rpc_len) {
+ nvkm_error(subdev, "rpc len %d < %d\n",
+ rpc->length, gsp_rpc_len);
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
return ERR_PTR(-EIO);
}
- return msg;
+ return rpc;
}
- r535_gsp_msg_done(gsp, msg);
+ r535_gsp_msg_done(gsp, rpc);
return NULL;
}
for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
- if (ntfy->fn == msg->function) {
+ if (ntfy->fn == rpc->function) {
if (ntfy->func)
- ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg));
+ ntfy->func(ntfy->priv, ntfy->fn, rpc->data,
+ rpc->length - sizeof(*rpc));
break;
}
}
if (i == gsp->msgq.ntfy_nr)
- r535_gsp_msg_dump(gsp, msg, NV_DBG_WARN);
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN);
- r535_gsp_msg_done(gsp, msg);
+ r535_gsp_msg_done(gsp, rpc);
if (fn)
goto retry;
@@ -369,9 +585,10 @@ r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
}
static void *
-r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait,
+ u32 gsp_rpc_len)
{
- struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data);
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
struct nvfw_gsp_rpc *msg;
u32 fn = rpc->function;
void *repv = NULL;
@@ -389,7 +606,7 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
return ERR_PTR(ret);
if (wait) {
- msg = r535_gsp_msg_recv(gsp, fn, repc);
+ msg = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
if (!IS_ERR_OR_NULL(msg))
repv = msg->data;
else
@@ -584,21 +801,21 @@ r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
}
static void
-r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
+r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params)
{
- rpc_gsp_rm_alloc_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
nvkm_gsp_rpc_done(object->client->gsp, rpc);
}
static void *
-r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params)
{
- rpc_gsp_rm_alloc_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
struct nvkm_gsp *gsp = object->client->gsp;
- void *ret;
+ void *ret = NULL;
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc) + repc);
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc));
if (IS_ERR_OR_NULL(rpc))
return rpc;
@@ -606,8 +823,6 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
- } else {
- ret = repc ? rpc->params : NULL;
}
nvkm_gsp_rpc_done(gsp, rpc);
@@ -616,16 +831,22 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
}
static void *
-r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc)
+r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass,
+ u32 params_size)
{
struct nvkm_gsp_client *client = object->client;
struct nvkm_gsp *gsp = client->gsp;
rpc_gsp_rm_alloc_v03_00 *rpc;
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x cls:0x%08x argc:%d\n",
- client->object.handle, object->parent->handle, object->handle, oclass, argc);
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n",
+ client->object.handle, object->parent->handle,
+ object->handle);
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, sizeof(*rpc) + argc);
+ nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass,
+ params_size);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC,
+ sizeof(*rpc) + params_size);
if (IS_ERR(rpc))
return rpc;
@@ -634,30 +855,30 @@ r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc)
rpc->hObject = object->handle;
rpc->hClass = oclass;
rpc->status = 0;
- rpc->paramsSize = argc;
+ rpc->paramsSize = params_size;
return rpc->params;
}
static void
-r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
+r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params)
{
- rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc);
- if (!repv)
+ if (!params)
return;
nvkm_gsp_rpc_done(object->client->gsp, rpc);
}
static int
-r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
+r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc)
{
- rpc_gsp_rm_control_v03_00 *rpc = container_of((*argv), typeof(*rpc), params);
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc);
struct nvkm_gsp *gsp = object->client->gsp;
int ret = 0;
rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
if (IS_ERR_OR_NULL(rpc)) {
- *argv = NULL;
+ *params = NULL;
return PTR_ERR(rpc);
}
@@ -669,7 +890,7 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
}
if (repc)
- *argv = rpc->params;
+ *params = rpc->params;
else
nvkm_gsp_rpc_done(gsp, rpc);
@@ -677,16 +898,17 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
}
static void *
-r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
+r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size)
{
struct nvkm_gsp_client *client = object->client;
struct nvkm_gsp *gsp = client->gsp;
rpc_gsp_rm_control_v03_00 *rpc;
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x argc:%d\n",
- client->object.handle, object->handle, cmd, argc);
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n",
+ client->object.handle, object->handle, cmd, params_size);
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, sizeof(*rpc) + argc);
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
+ sizeof(*rpc) + params_size);
if (IS_ERR(rpc))
return rpc;
@@ -694,7 +916,7 @@ r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
rpc->hObject = object->handle;
rpc->cmd = cmd;
rpc->status = 0;
- rpc->paramsSize = argc;
+ rpc->paramsSize = params_size;
return rpc->params;
}
@@ -707,11 +929,12 @@ r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
}
static void *
-r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
+r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
{
struct nvfw_gsp_rpc *rpc;
- rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64)));
+ rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size,
+ sizeof(u64)));
if (IS_ERR(rpc))
return ERR_CAST(rpc);
@@ -720,38 +943,41 @@ r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
rpc->function = fn;
rpc->rpc_result = 0xffffffff;
rpc->rpc_result_private = 0xffffffff;
- rpc->length = sizeof(*rpc) + argc;
+ rpc->length = sizeof(*rpc) + payload_size;
return rpc->data;
}
static void *
-r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
-{
- struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data);
- struct r535_gsp_msg *cmd = container_of((void *)rpc, typeof(*cmd), data);
- const u32 max_msg_size = (16 * 0x1000) - sizeof(struct r535_gsp_msg);
- const u32 max_rpc_size = max_msg_size - sizeof(*rpc);
- u32 rpc_size = rpc->length - sizeof(*rpc);
+r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait,
+ u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg);
+ const u32 max_payload_size = max_rpc_size - sizeof(*rpc);
+ u32 payload_size = rpc->length - sizeof(*rpc);
void *repv;
mutex_lock(&gsp->cmdq.mutex);
- if (rpc_size > max_rpc_size) {
+ if (payload_size > max_payload_size) {
const u32 fn = rpc->function;
+ u32 remain_payload_size = payload_size;
/* Adjust length, and send initial RPC. */
- rpc->length = sizeof(*rpc) + max_rpc_size;
- cmd->checksum = rpc->length;
+ rpc->length = sizeof(*rpc) + max_payload_size;
+ msg->checksum = rpc->length;
- repv = r535_gsp_rpc_send(gsp, argv, false, 0);
+ repv = r535_gsp_rpc_send(gsp, payload, false, 0);
if (IS_ERR(repv))
goto done;
- argv += max_rpc_size;
- rpc_size -= max_rpc_size;
+ payload += max_payload_size;
+ remain_payload_size -= max_payload_size;
/* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
- while (rpc_size) {
- u32 size = min(rpc_size, max_rpc_size);
+ while (remain_payload_size) {
+ u32 size = min(remain_payload_size,
+ max_payload_size);
void *next;
next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
@@ -760,28 +986,31 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
goto done;
}
- memcpy(next, argv, size);
+ memcpy(next, payload, size);
repv = r535_gsp_rpc_send(gsp, next, false, 0);
if (IS_ERR(repv))
goto done;
- argv += size;
- rpc_size -= size;
+ payload += size;
+ remain_payload_size -= size;
}
/* Wait for reply. */
- if (wait) {
- rpc = r535_gsp_msg_recv(gsp, fn, repc);
- if (!IS_ERR_OR_NULL(rpc))
+ rpc = r535_gsp_msg_recv(gsp, fn, payload_size +
+ sizeof(*rpc));
+ if (!IS_ERR_OR_NULL(rpc)) {
+ if (wait) {
repv = rpc->data;
- else
- repv = rpc;
+ } else {
+ nvkm_gsp_rpc_done(gsp, rpc);
+ repv = NULL;
+ }
} else {
- repv = NULL;
+ repv = wait ? rpc : NULL;
}
} else {
- repv = r535_gsp_rpc_send(gsp, argv, wait, repc);
+ repv = r535_gsp_rpc_send(gsp, payload, wait, gsp_rpc_len);
}
done:
@@ -1111,7 +1340,7 @@ enum registry_type {
#define REGISTRY_MAX_KEY_LENGTH 64
/**
- * registry_list_entry - linked list member for a registry key/value
+ * struct registry_list_entry - linked list member for a registry key/value
* @head: list_head struct
* @type: dword, binary, or string
* @klen: the length of name of the key
@@ -1327,7 +1556,7 @@ struct nv_gsp_registry_entries {
u32 value;
};
-/**
+/*
* r535_registry_entries - required registry entries for GSP-RM
*
* This array lists registry entries that are required for GSP-RM to
@@ -2101,7 +2330,7 @@ MODULE_PARM_DESC(keep_gsp_logging,
#define NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU 0xf3d722
/**
- * rpc_ucode_libos_print_v1E_08 - RPC payload for libos print buffers
+ * struct rpc_ucode_libos_print_v1e_08 - RPC payload for libos print buffers
* @ucode_eng_desc: the engine descriptor
* @libos_print_buf_size: the size of the libos_print_buf[]
* @libos_print_buf: the actual buffer
@@ -2162,7 +2391,7 @@ r535_gsp_msg_libos_print(void *priv, u32 fn, void *repv, u32 repc)
}
/**
- * create_debufgs - create a blob debugfs entry
+ * create_debugfs - create a blob debugfs entry
* @gsp: gsp pointer
* @name: name of this dentry
* @blob: blob wrapper
@@ -2788,6 +3017,10 @@ static bool is_empty(const struct debugfs_blob_wrapper *b)
/**
* r535_gsp_copy_log - preserve the logging buffers in a blob
+ * @parent: the top-level dentry for this GPU
+ * @name: name of debugfs entry to create
+ * @s: original wrapper object to copy from
+ * @t: new wrapper object to copy to
*
* When GSP shuts down, the nvkm_gsp object and all its memory is deleted.
* To preserve the logging buffers, the buffers need to be copied, but only
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 7b2df3185de4..692df747e2ae 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1236,21 +1236,15 @@ static int dss_video_pll_probe(struct dss_device *dss)
if (!np)
return 0;
- if (of_property_read_bool(np, "syscon-pll-ctrl")) {
- dss->syscon_pll_ctrl = syscon_regmap_lookup_by_phandle(np,
- "syscon-pll-ctrl");
+ if (of_property_present(np, "syscon-pll-ctrl")) {
+ dss->syscon_pll_ctrl =
+ syscon_regmap_lookup_by_phandle_args(np, "syscon-pll-ctrl",
+ 1, &dss->syscon_pll_ctrl_offset);
if (IS_ERR(dss->syscon_pll_ctrl)) {
dev_err(&pdev->dev,
"failed to get syscon-pll-ctrl regmap\n");
return PTR_ERR(dss->syscon_pll_ctrl);
}
-
- if (of_property_read_u32_index(np, "syscon-pll-ctrl", 1,
- &dss->syscon_pll_ctrl_offset)) {
- dev_err(&pdev->dev,
- "failed to get syscon-pll-ctrl offset\n");
- return -EINVAL;
- }
}
pll_regulator = devm_regulator_get(&pdev->dev, "vdda_video");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 4435f0027c78..e1ac447221ee 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -341,10 +341,9 @@ static void hdmi4_bridge_mode_set(struct drm_bridge *bridge,
}
static void hdmi4_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
struct drm_connector_state *conn_state;
struct drm_connector *connector;
struct drm_crtc_state *crtc_state;
@@ -410,7 +409,7 @@ done:
}
static void hdmi4_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index a8c740df3146..fa9904e4c218 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -339,10 +339,9 @@ static void hdmi5_bridge_mode_set(struct drm_bridge *bridge,
}
static void hdmi5_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
- struct drm_atomic_state *state = bridge_state->base.state;
struct drm_connector_state *conn_state;
struct drm_connector *connector;
struct drm_crtc_state *crtc_state;
@@ -408,7 +407,7 @@ done:
}
static void hdmi5_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+ struct drm_atomic_state *state)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 21564c38234f..12ef47cd232b 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -545,23 +545,6 @@ static void hdmi_core_enable_interrupts(struct hdmi_core_data *core)
REG_FLD_MOD(core->base, HDMI_CORE_IH_MUTE, 0x0, 1, 0);
}
-int hdmi5_core_handle_irqs(struct hdmi_core_data *core)
-{
- void __iomem *base = core->base;
-
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT1, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT2, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_AS_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_PHY_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_I2CM_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_CEC_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_VP_STAT0, 0xff, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_IH_I2CMPHY_STAT0, 0xff, 7, 0);
-
- return 0;
-}
-
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg)
{
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
index 070cbf5fb57d..b8ed21156e8c 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
@@ -286,7 +286,6 @@ int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len);
void hdmi5_core_ddc_uninit(struct hdmi_core_data *core);
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s);
-int hdmi5_core_handle_irqs(struct hdmi_core_data *core);
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg);
int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d7469c565d1d..e059b06e0239 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -573,6 +573,16 @@ config DRM_PANEL_RAYDIUM_RM67191
Say Y here if you want to enable support for Raydium RM67191 FHD
(1080x1920) DSI panel.
+config DRM_PANEL_RAYDIUM_RM67200
+ tristate "Raydium RM67200-based DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ help
+ Say Y here if you want to enable support for Raydium RM67200-based
+ DSI video mode panels. This panel controller can be found in the
+ Wanchanglong W552793BAA panel found on the Rockchip RK3588 EVB1
+ evaluation boards.
+
config DRM_PANEL_RAYDIUM_RM68200
tristate "Raydium RM68200 720x1280 DSI video mode panel"
depends on OF
@@ -925,6 +935,15 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_SUMMIT
+ tristate "Apple Summit display panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for the "Summit" display panel
+ used as a touchbar on certain Apple laptops.
+
config DRM_PANEL_SYNAPTICS_R63353
tristate "Synaptics R63353-based panels"
depends on OF
@@ -996,6 +1015,18 @@ config DRM_PANEL_VISIONOX_RM69299
Say Y here if you want to enable support for Visionox
RM69299 DSI Video Mode panel.
+config DRM_PANEL_VISIONOX_RM692E5
+ tristate "Visionox RM692E5"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DSC_HELPER
+ select DRM_DISPLAY_HELPER
+ help
+ Say Y here if you want to enable support for Visionox RM692E5 amoled
+ display panels, such as the one found in the Nothing Phone (1)
+ smartphone.
+
config DRM_PANEL_VISIONOX_VTDR6130
tristate "Visionox VTDR6130"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 7dcf72646cac..1bb8ae46b59b 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS) += panel-osd-osd101t2587-53ts.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67191) += panel-raydium-rm67191.o
+obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67200) += panel-raydium-rm67200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM69380) += panel-raydium-rm69380.o
@@ -89,6 +90,7 @@ obj-$(CONFIG_DRM_PANEL_SHARP_LS060T1SX01) += panel-sharp-ls060t1sx01.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_SUMMIT) += panel-summit.o
obj-$(CONFIG_DRM_PANEL_SYNAPTICS_R63353) += panel-synaptics-r63353.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
obj-$(CONFIG_DRM_PANEL_SONY_TD4353_JDI) += panel-sony-td4353-jdi.o
@@ -100,6 +102,7 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_RM692E5) += panel-visionox-rm692e5.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_VTDR6130) += panel-visionox-vtdr6130.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_R66451) += panel-visionox-r66451.o
obj-$(CONFIG_DRM_PANEL_WIDECHIPS_WS2401) += panel-widechips-ws2401.o
diff --git a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
index e85d63a176d0..0bfed0ec0bbc 100644
--- a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
+++ b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
@@ -57,65 +57,39 @@ static void ebbg_ft8719_reset(struct ebbg_ft8719 *ctx)
static int ebbg_ft8719_on(struct ebbg_ft8719 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x00ff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 90);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(90);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return dsi_ctx.accum_err;
}
static int ebbg_ft8719_off(struct ebbg_ft8719 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- usleep_range(10000, 11000);
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(90);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 90);
- return 0;
+ return dsi_ctx.accum_err;
}
static int ebbg_ft8719_prepare(struct drm_panel *panel)
{
struct ebbg_ft8719 *ctx = to_ebbg_ft8719(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
@@ -126,7 +100,6 @@ static int ebbg_ft8719_prepare(struct drm_panel *panel)
ret = ebbg_ft8719_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
@@ -137,18 +110,10 @@ static int ebbg_ft8719_prepare(struct drm_panel *panel)
static int ebbg_ft8719_unprepare(struct drm_panel *panel)
{
struct ebbg_ft8719 *ctx = to_ebbg_ft8719(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
-
- ret = ebbg_ft8719_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ ebbg_ft8719_off(ctx);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
-
- ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- if (ret)
- dev_err(panel->dev, "Failed to disable regulators: %d\n", ret);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index f8511fe5fb0d..52028c8f8988 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1808,6 +1808,20 @@ static const struct panel_delay delay_200_150_e50 = {
.enable = 50,
};
+static const struct panel_delay delay_200_500_e250 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 250,
+};
+
+static const struct panel_delay delay_50_500_e200_d200_po2e335 = {
+ .hpd_absent = 50,
+ .unprepare = 500,
+ .enable = 200,
+ .disable = 200,
+ .powered_on_to_enable = 335,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.ident = { \
@@ -1862,6 +1876,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x73aa, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
@@ -1914,6 +1929,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
@@ -1951,9 +1967,14 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_p2e200, "MNE007JA1-2"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1103, &delay_200_500_e80_d50, "MNB601LS1-3"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"),
+
+ EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "MB116AN01"),
@@ -1993,6 +2014,8 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1593, &delay_200_500_p2e100, "LQ134N1"),
+ EDP_PANEL_ENTRY('S', 'T', 'A', 0x0004, &delay_200_500_e200, "116KHD024006"),
+ EDP_PANEL_ENTRY('S', 'T', 'A', 0x0009, &delay_200_500_e250, "116QHD024002"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"),
{ /* sentinal */ }
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c
index 3644a7544b93..66abfc44e424 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83102.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c
@@ -24,6 +24,8 @@
#define HX83102_SETPOWER 0xb1
#define HX83102_SETDISP 0xb2
#define HX83102_SETCYC 0xb4
+#define HX83102_UNKNOWN_B6 0xb6
+#define HX83102_UNKNOWN_B8 0xb8
#define HX83102_SETEXTC 0xb9
#define HX83102_SETMIPI 0xba
#define HX83102_SETVDC 0xbc
@@ -43,6 +45,7 @@
#define HX83102_SETGIP1 0xd5
#define HX83102_SETGIP2 0xd6
#define HX83102_SETGIP3 0xd8
+#define HX83102_UNKNOWN_D9 0xd9
#define HX83102_SETGMA 0xe0
#define HX83102_UNKNOWN_E1 0xe1
#define HX83102_SETTP1 0xe7
@@ -291,6 +294,103 @@ static int boe_nv110wum_init(struct hx83102 *ctx)
return dsi_ctx.accum_err;
};
+static int csot_pna957qt1_1_init(struct hx83102 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ msleep(60);
+
+ hx83102_enable_extended_cmds(&dsi_ctx, true);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D9, 0xd2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xb3, 0xb3, 0x31, 0xf1, 0x33,
+ 0xe0, 0x54, 0x36, 0x36, 0x3a, 0x3a, 0x32, 0x8b, 0x11, 0xe5,
+ 0x98);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xd9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x8b, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x00, 0x47, 0xb0, 0x80, 0x00, 0x2c,
+ 0x80, 0x3c, 0x9f, 0x22, 0x20, 0x00, 0x00, 0x98, 0x51);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCYC, 0x41, 0x41, 0x41, 0x41, 0x64, 0x64,
+ 0x40, 0x84, 0x64, 0x84, 0x01, 0x9d, 0x01, 0x02, 0x01, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETVDC, 0x1b, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_BE, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPTBA, 0xfc, 0xc4, 0x80, 0x9c, 0x36, 0x00,
+ 0x0d, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSTBA, 0x32, 0x32, 0x22, 0x11, 0x22, 0xa0,
+ 0x31, 0x08, 0xf5, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xcc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTCON, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETRAMDMY, 0x97);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPWM, 0x00, 0x1e, 0x13, 0x88, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x08, 0x13, 0x07, 0x00, 0x0f,
+ 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPANEL, 0x02, 0x03, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPCTRL, 0x07, 0x06, 0x00, 0x02, 0x04, 0x2c,
+ 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x06, 0x00, 0x00, 0x00, 0x40, 0x04,
+ 0x08, 0x04, 0x08, 0x37, 0x07, 0x44, 0x37, 0x2b, 0x2b, 0x03,
+ 0x03, 0x32, 0x10, 0x22, 0x00, 0x25, 0x32, 0x10, 0x29, 0x00,
+ 0x29, 0x32, 0x10, 0x08, 0x00, 0x08, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP1, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x07, 0x06, 0x07, 0x06, 0x05, 0x04,
+ 0x05, 0x04, 0x03, 0x02, 0x03, 0x02, 0x01, 0x00, 0x01, 0x00,
+ 0x18, 0x18, 0x25, 0x24, 0x25, 0x24, 0x1f, 0x1f, 0x1f, 0x1f,
+ 0x1e, 0x1e, 0x1e, 0x1e, 0x20, 0x20, 0x20, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0,
+ 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x0a, 0x0e, 0x1a, 0x21, 0x28, 0x46,
+ 0x5c, 0x61, 0x63, 0x5e, 0x78, 0x7d, 0x80, 0x8e, 0x89, 0x90,
+ 0x98, 0xaa, 0xa8, 0x52, 0x59, 0x60, 0x6f, 0x06, 0x0a, 0x16,
+ 0x1d, 0x24, 0x46, 0x5c, 0x61, 0x6b, 0x66, 0x7c, 0x7d, 0x80,
+ 0x8e, 0x89, 0x90, 0x98, 0xaa, 0xa8, 0x52, 0x59, 0x60, 0x6f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0xe0, 0x10, 0x10, 0x0d, 0x1e, 0x9d,
+ 0x02, 0x52, 0x9d, 0x14, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x01, 0x7f, 0x11, 0xfd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETMIPI, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x86);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D2, 0x64);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0,
+ 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0, 0x05, 0x15, 0x55, 0x45,
+ 0x55, 0x50, 0x05, 0x15, 0x55, 0x45, 0x55, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x02, 0x00, 0x24, 0x01, 0x7e, 0x0f,
+ 0x7c, 0x10, 0xa0, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x03, 0x07, 0x00, 0x10, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0f, 0x3f, 0xff, 0xcf, 0xff, 0xf0,
+ 0x0f, 0x3f, 0xff, 0xcf, 0xff, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0xfe, 0x01, 0xfe, 0x01, 0xfe, 0x01,
+ 0x00, 0x00, 0x00, 0x23, 0x00, 0x23, 0x81, 0x02, 0x40, 0x00,
+ 0x20, 0x9d, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x66, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCYC, 0x03, 0xff, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0,
+ 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0, 0x0f, 0x2a, 0xaa, 0x8a,
+ 0xaa, 0xf0, 0x0f, 0x2a, 0xaa, 0x8a, 0xaa, 0xf0, 0x0a, 0x2a,
+ 0xaa, 0x8a, 0xaa, 0xa0, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x00);
+ hx83102_enable_extended_cmds(&dsi_ctx, false);
+
+ mipi_dsi_msleep(&dsi_ctx, 60);
+
+ return dsi_ctx.accum_err;
+};
+
static int ivo_t109nw41_init(struct hx83102 *ctx)
{
struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
@@ -396,6 +496,211 @@ static int ivo_t109nw41_init(struct hx83102 *ctx)
return dsi_ctx.accum_err;
};
+static int kingdisplay_kd110n11_51ie_init(struct hx83102 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ msleep(50);
+
+ hx83102_enable_extended_cmds(&dsi_ctx, true);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D9, 0xd1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xb3, 0xb3, 0x31, 0xf1,
+ 0x33, 0xe0, 0x54, 0x36, 0x36, 0x3a, 0x3a, 0x32, 0x8b,
+ 0x11, 0xe5, 0x98);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xd9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x8b, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x00, 0x47, 0xb0, 0x80, 0x00, 0x2c,
+ 0x80, 0x3c, 0x9f, 0x22, 0x20, 0x00, 0x00, 0x98, 0x51);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCYC, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64,
+ 0x40, 0x84, 0x64, 0x84, 0x01, 0x9d, 0x01, 0x02, 0x01, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETVDC, 0x1b, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_BE, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPTBA, 0xfc, 0xc4, 0x80, 0x9c, 0x36, 0x00,
+ 0x0d, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSTBA, 0x32, 0x32, 0x22, 0x11, 0x22, 0xa0,
+ 0x31, 0x08, 0xf5, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xcc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTCON, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETRAMDMY, 0x97);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPWM, 0x00, 0x1e, 0x13, 0x88, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x08, 0x13, 0x07, 0x00,
+ 0x0f, 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPANEL, 0x02, 0x03, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPCTRL, 0x07, 0x06, 0x00, 0x02,
+ 0x04, 0x2c, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x06, 0x00, 0x00, 0x00, 0x40, 0x04,
+ 0x08, 0x04, 0x08, 0x37, 0x07, 0x44, 0x37, 0x2b, 0x2b, 0x03,
+ 0x03, 0x32, 0x10, 0x22, 0x00, 0x25, 0x32, 0x10, 0x29, 0x00,
+ 0x29, 0x32, 0x10, 0x08, 0x00, 0x08, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP1, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x07, 0x06, 0x07, 0x06, 0x05, 0x04,
+ 0x05, 0x04, 0x03, 0x02, 0x03, 0x02, 0x01, 0x00, 0x01, 0x00,
+ 0x18, 0x18, 0x25, 0x24, 0x25, 0x24, 0x1f, 0x1f, 0x1f, 0x1f,
+ 0x1e, 0x1e, 0x1e, 0x1e, 0x20, 0x20, 0x20, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0,
+ 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0xe0, 0x10, 0x10, 0x0d, 0x1e, 0x9d,
+ 0x02, 0x52, 0x9d, 0x14, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x01, 0x7f, 0x11, 0xfd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETMIPI, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x86);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D2, 0x64);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0,
+ 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0, 0x05, 0x15, 0x55, 0x45,
+ 0x55, 0x50, 0x05, 0x15, 0x55, 0x45, 0x55, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x02, 0x00, 0x24, 0x01, 0x7e, 0x0f,
+ 0x7c, 0x10, 0xa0, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x03, 0x07, 0x00, 0x10, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0f, 0x3f, 0xff, 0xcf, 0xff, 0xf0,
+ 0x0f, 0x3f, 0xff, 0xcf, 0xff, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0xfe, 0x01, 0xfe, 0x01, 0xfe, 0x01,
+ 0x00, 0x00, 0x00, 0x23, 0x00, 0x23, 0x81, 0x02, 0x40, 0x00,
+ 0x20, 0x9d, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x66, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCYC, 0x03, 0xff, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0,
+ 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0, 0x0f, 0x2a, 0xaa, 0x8a,
+ 0xaa, 0xf0, 0x0f, 0x2a, 0xaa, 0x8a, 0xaa, 0xf0, 0x0a, 0x2a,
+ 0xaa, 0x8a, 0xaa, 0xa0, 0x0a, 0x2a, 0xaa, 0x8a, 0xaa, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x00);
+ hx83102_enable_extended_cmds(&dsi_ctx, false);
+
+ return dsi_ctx.accum_err;
+}
+
+static int starry_2082109qfh040022_50e_init(struct hx83102 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ msleep(50);
+
+ hx83102_enable_extended_cmds(&dsi_ctx, true);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D9, 0xd1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xb5, 0xb5, 0x31, 0xf1, 0x33,
+ 0xc3, 0x57, 0x36, 0x36, 0x36, 0x36, 0x1a, 0x8b, 0x11, 0x65,
+ 0x00, 0x88, 0xfa, 0xff, 0xff, 0x8f, 0xff, 0x08, 0x3c, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x00, 0x47, 0xb0, 0x80, 0x00, 0x22,
+ 0x70, 0x3c, 0xa1, 0x22, 0x00, 0x00, 0x00, 0x88, 0xf4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCYC, 0x14, 0x16, 0x14, 0x50, 0x14, 0x50,
+ 0x0d, 0x6a, 0x0d, 0x6a, 0x01, 0x9e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_B6, 0x34, 0x34, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_B8, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xcd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETMIPI, 0x84);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETVDC, 0x1b, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_BE, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPTBA, 0xfc, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSTBA, 0x38, 0x38, 0x22, 0x11, 0x33, 0xa0,
+ 0x61, 0x08, 0xf5, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xcc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTCON, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETRAMDMY, 0x97);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPWM, 0x00, 0x1e, 0x30, 0xd4, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x08, 0x13, 0x07, 0x00, 0x0f,
+ 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPANEL, 0x02, 0x03, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCASCADE, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPCTRL, 0x37, 0x06, 0x00, 0x02, 0x04,
+ 0x2c, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x3b, 0x03, 0x73, 0x3b, 0x21, 0x21, 0x03,
+ 0x03, 0x98, 0x10, 0x1d, 0x00, 0x1d, 0x32, 0x17, 0xa1, 0x07,
+ 0xa1, 0x43, 0x17, 0xa6, 0x07, 0xa6, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP1, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x2a, 0x2b, 0x1f, 0x1f,
+ 0x1e, 0x1e, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
+ 0x0a, 0x0b, 0x20, 0x21, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x02, 0xaa, 0xea, 0xaa, 0xaa, 0x00,
+ 0x02, 0xaa, 0xea, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x07, 0x10, 0x10, 0x2a, 0x32, 0x9f,
+ 0x01, 0x5a, 0x91, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x12,
+ 0x05, 0x02, 0x02, 0x10, 0x33, 0x02, 0x04, 0x18, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x01, 0x7f, 0x11, 0xfd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x86);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D2, 0x3d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x00, 0x00, 0x00, 0x80, 0x80, 0x0c,
+ 0xa1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0x03, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x03, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x02, 0x00, 0x2d, 0x01, 0x7f, 0x0f,
+ 0x7c, 0x10, 0xa0, 0x00, 0x00, 0x77, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPTBA, 0xf2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCLOCK, 0x02, 0x00, 0x00, 0x10, 0x58);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_D2, 0x0a, 0x0a, 0x05, 0x03, 0x0a,
+ 0x0a, 0x01, 0x03, 0x01, 0x01, 0x05, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xcc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP0, 0x03, 0x1f, 0xe0, 0x11, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0xab, 0xff, 0xff, 0xff, 0xff, 0xa0,
+ 0xab, 0xff, 0xff, 0xff, 0xff, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0xfe, 0x01, 0xfe, 0x01, 0xfe, 0x01,
+ 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x81, 0x02, 0x40, 0x00,
+ 0x20, 0x9e, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETCYC, 0x03, 0xff, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGIP3, 0xaa, 0xab, 0xea, 0xaa, 0xaa, 0xa0,
+ 0xaa, 0xab, 0xea, 0xaa, 0xaa, 0xa0, 0xaa, 0xbf, 0xff, 0xff,
+ 0xfe, 0xa0, 0xaa, 0xbf, 0xff, 0xff, 0xfe, 0xa0, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xa0, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_UNKNOWN_E1, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETMIPI, 0x96);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xc5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETMIPI, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0xcc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETMIPI, 0x84);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETSPCCMD, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETBANK, 0x00);
+ hx83102_enable_extended_cmds(&dsi_ctx, false);
+
+ mipi_dsi_msleep(&dsi_ctx, 110);
+
+ return dsi_ctx.accum_err;
+}
+
static const struct drm_display_mode starry_mode = {
.clock = 162680,
.hdisplay = 1200,
@@ -440,6 +745,28 @@ static const struct hx83102_panel_desc boe_nv110wum_desc = {
.init = boe_nv110wum_init,
};
+static const struct drm_display_mode csot_pna957qt1_1_default_mode = {
+ .clock = 177958,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 124,
+ .hsync_end = 1200 + 124 + 80,
+ .htotal = 1200 + 124 + 80 + 40,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 88,
+ .vsync_end = 1920 + 88 + 8,
+ .vtotal = 1920 + 88 + 8 + 38,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct hx83102_panel_desc csot_pna957qt1_1_desc = {
+ .modes = &csot_pna957qt1_1_default_mode,
+ .size = {
+ .width_mm = 147,
+ .height_mm = 235,
+ },
+ .init = csot_pna957qt1_1_init,
+};
+
static const struct drm_display_mode ivo_t109nw41_default_mode = {
.clock = 167700,
.hdisplay = 1200,
@@ -462,6 +789,50 @@ static const struct hx83102_panel_desc ivo_t109nw41_desc = {
.init = ivo_t109nw41_init,
};
+static const struct drm_display_mode kingdisplay_kd110n11_51ie_default_mode = {
+ .clock = 182750,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 124,
+ .hsync_end = 1200 + 124 + 80,
+ .htotal = 1200 + 124 + 80 + 80,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 88,
+ .vsync_end = 1920 + 88 + 8,
+ .vtotal = 1920 + 88 + 8 + 38,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct hx83102_panel_desc kingdisplay_kd110n11_51ie_desc = {
+ .modes = &kingdisplay_kd110n11_51ie_default_mode,
+ .size = {
+ .width_mm = 147,
+ .height_mm = 235,
+ },
+ .init = kingdisplay_kd110n11_51ie_init,
+};
+
+static const struct drm_display_mode starry_2082109qfh040022_50e_default_mode = {
+ .clock = 192050,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 160,
+ .hsync_end = 1200 + 160 + 66,
+ .htotal = 1200 + 160 + 66 + 120,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 115,
+ .vsync_end = 1920 + 115 + 8,
+ .vtotal = 1920 + 115 + 8 + 28,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct hx83102_panel_desc starry_2082109qfh040022_50e_desc = {
+ .modes = &starry_2082109qfh040022_50e_default_mode,
+ .size = {
+ .width_mm = 147,
+ .height_mm = 235,
+ },
+ .init = starry_2082109qfh040022_50e_init,
+};
+
static int hx83102_enable(struct drm_panel *panel)
{
msleep(130);
@@ -683,9 +1054,18 @@ static const struct of_device_id hx83102_of_match[] = {
{ .compatible = "boe,nv110wum-l60",
.data = &boe_nv110wum_desc
},
+ { .compatible = "csot,pna957qt1-1",
+ .data = &csot_pna957qt1_1_desc
+ },
{ .compatible = "ivo,t109nw41",
.data = &ivo_t109nw41_desc
},
+ { .compatible = "kingdisplay,kd110n11-51ie",
+ .data = &kingdisplay_kd110n11_51ie_desc
+ },
+ { .compatible = "starry,2082109qfh040022-50e",
+ .data = &starry_2082109qfh040022_50e_desc
+ },
{ .compatible = "starry,himax83102-j02",
.data = &starry_desc
},
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
index 266a087fe14c..3c24a63b6be8 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
@@ -607,7 +607,7 @@ static int ili9882t_add(struct ili9882t *ili)
ili->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(ili->enable_gpio)) {
- dev_err(dev, "cannot get reset-gpios %ld\n",
+ dev_err(dev, "cannot get enable-gpios %ld\n",
PTR_ERR(ili->enable_gpio));
return PTR_ERR(ili->enable_gpio);
}
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67200.c b/drivers/gpu/drm/panel/panel-raydium-rm67200.c
new file mode 100644
index 000000000000..64b685dc11f6
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67200.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2024 Collabora
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct raydium_rm67200_panel_info {
+ struct drm_display_mode mode;
+ const struct regulator_bulk_data *regulators;
+ int num_regulators;
+ void (*panel_setup)(struct mipi_dsi_multi_context *ctx);
+};
+
+struct raydium_rm67200 {
+ struct drm_panel panel;
+ const struct raydium_rm67200_panel_info *panel_info;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+ int num_supplies;
+};
+
+static inline struct raydium_rm67200 *to_raydium_rm67200(struct drm_panel *panel)
+{
+ return container_of(panel, struct raydium_rm67200, panel);
+}
+
+static void raydium_rm67200_reset(struct raydium_rm67200 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(60);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(60);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(60);
+}
+
+static void raydium_rm67200_write(struct mipi_dsi_multi_context *ctx,
+ u8 arg1, u8 arg2)
+{
+ u8 d[] = { arg1, arg2 };
+
+ mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d));
+}
+
+static void w552793baa_setup(struct mipi_dsi_multi_context *ctx)
+{
+ raydium_rm67200_write(ctx, 0xfe, 0x21);
+ raydium_rm67200_write(ctx, 0x04, 0x00);
+ raydium_rm67200_write(ctx, 0x00, 0x64);
+ raydium_rm67200_write(ctx, 0x2a, 0x00);
+ raydium_rm67200_write(ctx, 0x26, 0x64);
+ raydium_rm67200_write(ctx, 0x54, 0x00);
+ raydium_rm67200_write(ctx, 0x50, 0x64);
+ raydium_rm67200_write(ctx, 0x7b, 0x00);
+ raydium_rm67200_write(ctx, 0x77, 0x64);
+ raydium_rm67200_write(ctx, 0xa2, 0x00);
+ raydium_rm67200_write(ctx, 0x9d, 0x64);
+ raydium_rm67200_write(ctx, 0xc9, 0x00);
+ raydium_rm67200_write(ctx, 0xc5, 0x64);
+ raydium_rm67200_write(ctx, 0x01, 0x71);
+ raydium_rm67200_write(ctx, 0x27, 0x71);
+ raydium_rm67200_write(ctx, 0x51, 0x71);
+ raydium_rm67200_write(ctx, 0x78, 0x71);
+ raydium_rm67200_write(ctx, 0x9e, 0x71);
+ raydium_rm67200_write(ctx, 0xc6, 0x71);
+ raydium_rm67200_write(ctx, 0x02, 0x89);
+ raydium_rm67200_write(ctx, 0x28, 0x89);
+ raydium_rm67200_write(ctx, 0x52, 0x89);
+ raydium_rm67200_write(ctx, 0x79, 0x89);
+ raydium_rm67200_write(ctx, 0x9f, 0x89);
+ raydium_rm67200_write(ctx, 0xc7, 0x89);
+ raydium_rm67200_write(ctx, 0x03, 0x9e);
+ raydium_rm67200_write(ctx, 0x29, 0x9e);
+ raydium_rm67200_write(ctx, 0x53, 0x9e);
+ raydium_rm67200_write(ctx, 0x7a, 0x9e);
+ raydium_rm67200_write(ctx, 0xa0, 0x9e);
+ raydium_rm67200_write(ctx, 0xc8, 0x9e);
+ raydium_rm67200_write(ctx, 0x09, 0x00);
+ raydium_rm67200_write(ctx, 0x05, 0xb0);
+ raydium_rm67200_write(ctx, 0x31, 0x00);
+ raydium_rm67200_write(ctx, 0x2b, 0xb0);
+ raydium_rm67200_write(ctx, 0x5a, 0x00);
+ raydium_rm67200_write(ctx, 0x55, 0xb0);
+ raydium_rm67200_write(ctx, 0x80, 0x00);
+ raydium_rm67200_write(ctx, 0x7c, 0xb0);
+ raydium_rm67200_write(ctx, 0xa7, 0x00);
+ raydium_rm67200_write(ctx, 0xa3, 0xb0);
+ raydium_rm67200_write(ctx, 0xce, 0x00);
+ raydium_rm67200_write(ctx, 0xca, 0xb0);
+ raydium_rm67200_write(ctx, 0x06, 0xc0);
+ raydium_rm67200_write(ctx, 0x2d, 0xc0);
+ raydium_rm67200_write(ctx, 0x56, 0xc0);
+ raydium_rm67200_write(ctx, 0x7d, 0xc0);
+ raydium_rm67200_write(ctx, 0xa4, 0xc0);
+ raydium_rm67200_write(ctx, 0xcb, 0xc0);
+ raydium_rm67200_write(ctx, 0x07, 0xcf);
+ raydium_rm67200_write(ctx, 0x2f, 0xcf);
+ raydium_rm67200_write(ctx, 0x58, 0xcf);
+ raydium_rm67200_write(ctx, 0x7e, 0xcf);
+ raydium_rm67200_write(ctx, 0xa5, 0xcf);
+ raydium_rm67200_write(ctx, 0xcc, 0xcf);
+ raydium_rm67200_write(ctx, 0x08, 0xdd);
+ raydium_rm67200_write(ctx, 0x30, 0xdd);
+ raydium_rm67200_write(ctx, 0x59, 0xdd);
+ raydium_rm67200_write(ctx, 0x7f, 0xdd);
+ raydium_rm67200_write(ctx, 0xa6, 0xdd);
+ raydium_rm67200_write(ctx, 0xcd, 0xdd);
+ raydium_rm67200_write(ctx, 0x0e, 0x15);
+ raydium_rm67200_write(ctx, 0x0a, 0xe9);
+ raydium_rm67200_write(ctx, 0x36, 0x15);
+ raydium_rm67200_write(ctx, 0x32, 0xe9);
+ raydium_rm67200_write(ctx, 0x5f, 0x15);
+ raydium_rm67200_write(ctx, 0x5b, 0xe9);
+ raydium_rm67200_write(ctx, 0x85, 0x15);
+ raydium_rm67200_write(ctx, 0x81, 0xe9);
+ raydium_rm67200_write(ctx, 0xad, 0x15);
+ raydium_rm67200_write(ctx, 0xa9, 0xe9);
+ raydium_rm67200_write(ctx, 0xd3, 0x15);
+ raydium_rm67200_write(ctx, 0xcf, 0xe9);
+ raydium_rm67200_write(ctx, 0x0b, 0x14);
+ raydium_rm67200_write(ctx, 0x33, 0x14);
+ raydium_rm67200_write(ctx, 0x5c, 0x14);
+ raydium_rm67200_write(ctx, 0x82, 0x14);
+ raydium_rm67200_write(ctx, 0xaa, 0x14);
+ raydium_rm67200_write(ctx, 0xd0, 0x14);
+ raydium_rm67200_write(ctx, 0x0c, 0x36);
+ raydium_rm67200_write(ctx, 0x34, 0x36);
+ raydium_rm67200_write(ctx, 0x5d, 0x36);
+ raydium_rm67200_write(ctx, 0x83, 0x36);
+ raydium_rm67200_write(ctx, 0xab, 0x36);
+ raydium_rm67200_write(ctx, 0xd1, 0x36);
+ raydium_rm67200_write(ctx, 0x0d, 0x6b);
+ raydium_rm67200_write(ctx, 0x35, 0x6b);
+ raydium_rm67200_write(ctx, 0x5e, 0x6b);
+ raydium_rm67200_write(ctx, 0x84, 0x6b);
+ raydium_rm67200_write(ctx, 0xac, 0x6b);
+ raydium_rm67200_write(ctx, 0xd2, 0x6b);
+ raydium_rm67200_write(ctx, 0x13, 0x5a);
+ raydium_rm67200_write(ctx, 0x0f, 0x94);
+ raydium_rm67200_write(ctx, 0x3b, 0x5a);
+ raydium_rm67200_write(ctx, 0x37, 0x94);
+ raydium_rm67200_write(ctx, 0x64, 0x5a);
+ raydium_rm67200_write(ctx, 0x60, 0x94);
+ raydium_rm67200_write(ctx, 0x8a, 0x5a);
+ raydium_rm67200_write(ctx, 0x86, 0x94);
+ raydium_rm67200_write(ctx, 0xb2, 0x5a);
+ raydium_rm67200_write(ctx, 0xae, 0x94);
+ raydium_rm67200_write(ctx, 0xd8, 0x5a);
+ raydium_rm67200_write(ctx, 0xd4, 0x94);
+ raydium_rm67200_write(ctx, 0x10, 0xd1);
+ raydium_rm67200_write(ctx, 0x38, 0xd1);
+ raydium_rm67200_write(ctx, 0x61, 0xd1);
+ raydium_rm67200_write(ctx, 0x87, 0xd1);
+ raydium_rm67200_write(ctx, 0xaf, 0xd1);
+ raydium_rm67200_write(ctx, 0xd5, 0xd1);
+ raydium_rm67200_write(ctx, 0x11, 0x04);
+ raydium_rm67200_write(ctx, 0x39, 0x04);
+ raydium_rm67200_write(ctx, 0x62, 0x04);
+ raydium_rm67200_write(ctx, 0x88, 0x04);
+ raydium_rm67200_write(ctx, 0xb0, 0x04);
+ raydium_rm67200_write(ctx, 0xd6, 0x04);
+ raydium_rm67200_write(ctx, 0x12, 0x05);
+ raydium_rm67200_write(ctx, 0x3a, 0x05);
+ raydium_rm67200_write(ctx, 0x63, 0x05);
+ raydium_rm67200_write(ctx, 0x89, 0x05);
+ raydium_rm67200_write(ctx, 0xb1, 0x05);
+ raydium_rm67200_write(ctx, 0xd7, 0x05);
+ raydium_rm67200_write(ctx, 0x18, 0xaa);
+ raydium_rm67200_write(ctx, 0x14, 0x36);
+ raydium_rm67200_write(ctx, 0x42, 0xaa);
+ raydium_rm67200_write(ctx, 0x3d, 0x36);
+ raydium_rm67200_write(ctx, 0x69, 0xaa);
+ raydium_rm67200_write(ctx, 0x65, 0x36);
+ raydium_rm67200_write(ctx, 0x8f, 0xaa);
+ raydium_rm67200_write(ctx, 0x8b, 0x36);
+ raydium_rm67200_write(ctx, 0xb7, 0xaa);
+ raydium_rm67200_write(ctx, 0xb3, 0x36);
+ raydium_rm67200_write(ctx, 0xdd, 0xaa);
+ raydium_rm67200_write(ctx, 0xd9, 0x36);
+ raydium_rm67200_write(ctx, 0x15, 0x74);
+ raydium_rm67200_write(ctx, 0x3f, 0x74);
+ raydium_rm67200_write(ctx, 0x66, 0x74);
+ raydium_rm67200_write(ctx, 0x8c, 0x74);
+ raydium_rm67200_write(ctx, 0xb4, 0x74);
+ raydium_rm67200_write(ctx, 0xda, 0x74);
+ raydium_rm67200_write(ctx, 0x16, 0x9f);
+ raydium_rm67200_write(ctx, 0x40, 0x9f);
+ raydium_rm67200_write(ctx, 0x67, 0x9f);
+ raydium_rm67200_write(ctx, 0x8d, 0x9f);
+ raydium_rm67200_write(ctx, 0xb5, 0x9f);
+ raydium_rm67200_write(ctx, 0xdb, 0x9f);
+ raydium_rm67200_write(ctx, 0x17, 0xdc);
+ raydium_rm67200_write(ctx, 0x41, 0xdc);
+ raydium_rm67200_write(ctx, 0x68, 0xdc);
+ raydium_rm67200_write(ctx, 0x8e, 0xdc);
+ raydium_rm67200_write(ctx, 0xb6, 0xdc);
+ raydium_rm67200_write(ctx, 0xdc, 0xdc);
+ raydium_rm67200_write(ctx, 0x1d, 0xff);
+ raydium_rm67200_write(ctx, 0x19, 0x03);
+ raydium_rm67200_write(ctx, 0x47, 0xff);
+ raydium_rm67200_write(ctx, 0x43, 0x03);
+ raydium_rm67200_write(ctx, 0x6e, 0xff);
+ raydium_rm67200_write(ctx, 0x6a, 0x03);
+ raydium_rm67200_write(ctx, 0x94, 0xff);
+ raydium_rm67200_write(ctx, 0x90, 0x03);
+ raydium_rm67200_write(ctx, 0xbc, 0xff);
+ raydium_rm67200_write(ctx, 0xb8, 0x03);
+ raydium_rm67200_write(ctx, 0xe2, 0xff);
+ raydium_rm67200_write(ctx, 0xde, 0x03);
+ raydium_rm67200_write(ctx, 0x1a, 0x35);
+ raydium_rm67200_write(ctx, 0x44, 0x35);
+ raydium_rm67200_write(ctx, 0x6b, 0x35);
+ raydium_rm67200_write(ctx, 0x91, 0x35);
+ raydium_rm67200_write(ctx, 0xb9, 0x35);
+ raydium_rm67200_write(ctx, 0xdf, 0x35);
+ raydium_rm67200_write(ctx, 0x1b, 0x45);
+ raydium_rm67200_write(ctx, 0x45, 0x45);
+ raydium_rm67200_write(ctx, 0x6c, 0x45);
+ raydium_rm67200_write(ctx, 0x92, 0x45);
+ raydium_rm67200_write(ctx, 0xba, 0x45);
+ raydium_rm67200_write(ctx, 0xe0, 0x45);
+ raydium_rm67200_write(ctx, 0x1c, 0x55);
+ raydium_rm67200_write(ctx, 0x46, 0x55);
+ raydium_rm67200_write(ctx, 0x6d, 0x55);
+ raydium_rm67200_write(ctx, 0x93, 0x55);
+ raydium_rm67200_write(ctx, 0xbb, 0x55);
+ raydium_rm67200_write(ctx, 0xe1, 0x55);
+ raydium_rm67200_write(ctx, 0x22, 0xff);
+ raydium_rm67200_write(ctx, 0x1e, 0x68);
+ raydium_rm67200_write(ctx, 0x4c, 0xff);
+ raydium_rm67200_write(ctx, 0x48, 0x68);
+ raydium_rm67200_write(ctx, 0x73, 0xff);
+ raydium_rm67200_write(ctx, 0x6f, 0x68);
+ raydium_rm67200_write(ctx, 0x99, 0xff);
+ raydium_rm67200_write(ctx, 0x95, 0x68);
+ raydium_rm67200_write(ctx, 0xc1, 0xff);
+ raydium_rm67200_write(ctx, 0xbd, 0x68);
+ raydium_rm67200_write(ctx, 0xe7, 0xff);
+ raydium_rm67200_write(ctx, 0xe3, 0x68);
+ raydium_rm67200_write(ctx, 0x1f, 0x7e);
+ raydium_rm67200_write(ctx, 0x49, 0x7e);
+ raydium_rm67200_write(ctx, 0x70, 0x7e);
+ raydium_rm67200_write(ctx, 0x96, 0x7e);
+ raydium_rm67200_write(ctx, 0xbe, 0x7e);
+ raydium_rm67200_write(ctx, 0xe4, 0x7e);
+ raydium_rm67200_write(ctx, 0x20, 0x97);
+ raydium_rm67200_write(ctx, 0x4a, 0x97);
+ raydium_rm67200_write(ctx, 0x71, 0x97);
+ raydium_rm67200_write(ctx, 0x97, 0x97);
+ raydium_rm67200_write(ctx, 0xbf, 0x97);
+ raydium_rm67200_write(ctx, 0xe5, 0x97);
+ raydium_rm67200_write(ctx, 0x21, 0xb5);
+ raydium_rm67200_write(ctx, 0x4b, 0xb5);
+ raydium_rm67200_write(ctx, 0x72, 0xb5);
+ raydium_rm67200_write(ctx, 0x98, 0xb5);
+ raydium_rm67200_write(ctx, 0xc0, 0xb5);
+ raydium_rm67200_write(ctx, 0xe6, 0xb5);
+ raydium_rm67200_write(ctx, 0x25, 0xf0);
+ raydium_rm67200_write(ctx, 0x23, 0xe8);
+ raydium_rm67200_write(ctx, 0x4f, 0xf0);
+ raydium_rm67200_write(ctx, 0x4d, 0xe8);
+ raydium_rm67200_write(ctx, 0x76, 0xf0);
+ raydium_rm67200_write(ctx, 0x74, 0xe8);
+ raydium_rm67200_write(ctx, 0x9c, 0xf0);
+ raydium_rm67200_write(ctx, 0x9a, 0xe8);
+ raydium_rm67200_write(ctx, 0xc4, 0xf0);
+ raydium_rm67200_write(ctx, 0xc2, 0xe8);
+ raydium_rm67200_write(ctx, 0xea, 0xf0);
+ raydium_rm67200_write(ctx, 0xe8, 0xe8);
+ raydium_rm67200_write(ctx, 0x24, 0xff);
+ raydium_rm67200_write(ctx, 0x4e, 0xff);
+ raydium_rm67200_write(ctx, 0x75, 0xff);
+ raydium_rm67200_write(ctx, 0x9b, 0xff);
+ raydium_rm67200_write(ctx, 0xc3, 0xff);
+ raydium_rm67200_write(ctx, 0xe9, 0xff);
+ raydium_rm67200_write(ctx, 0xfe, 0x3d);
+ raydium_rm67200_write(ctx, 0x00, 0x04);
+ raydium_rm67200_write(ctx, 0xfe, 0x23);
+ raydium_rm67200_write(ctx, 0x08, 0x82);
+ raydium_rm67200_write(ctx, 0x0a, 0x00);
+ raydium_rm67200_write(ctx, 0x0b, 0x00);
+ raydium_rm67200_write(ctx, 0x0c, 0x01);
+ raydium_rm67200_write(ctx, 0x16, 0x00);
+ raydium_rm67200_write(ctx, 0x18, 0x02);
+ raydium_rm67200_write(ctx, 0x1b, 0x04);
+ raydium_rm67200_write(ctx, 0x19, 0x04);
+ raydium_rm67200_write(ctx, 0x1c, 0x81);
+ raydium_rm67200_write(ctx, 0x1f, 0x00);
+ raydium_rm67200_write(ctx, 0x20, 0x03);
+ raydium_rm67200_write(ctx, 0x23, 0x04);
+ raydium_rm67200_write(ctx, 0x21, 0x01);
+ raydium_rm67200_write(ctx, 0x54, 0x63);
+ raydium_rm67200_write(ctx, 0x55, 0x54);
+ raydium_rm67200_write(ctx, 0x6e, 0x45);
+ raydium_rm67200_write(ctx, 0x6d, 0x36);
+ raydium_rm67200_write(ctx, 0xfe, 0x3d);
+ raydium_rm67200_write(ctx, 0x55, 0x78);
+ raydium_rm67200_write(ctx, 0xfe, 0x20);
+ raydium_rm67200_write(ctx, 0x26, 0x30);
+ raydium_rm67200_write(ctx, 0xfe, 0x3d);
+ raydium_rm67200_write(ctx, 0x20, 0x71);
+ raydium_rm67200_write(ctx, 0x50, 0x8f);
+ raydium_rm67200_write(ctx, 0x51, 0x8f);
+ raydium_rm67200_write(ctx, 0xfe, 0x00);
+ raydium_rm67200_write(ctx, 0x35, 0x00);
+}
+
+static int raydium_rm67200_prepare(struct drm_panel *panel)
+{
+ struct raydium_rm67200 *ctx = to_raydium_rm67200(panel);
+ int ret;
+
+ ret = regulator_bulk_enable(ctx->num_supplies, ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ raydium_rm67200_reset(ctx);
+
+ msleep(60);
+
+ return 0;
+}
+
+static int raydium_rm67200_unprepare(struct drm_panel *panel)
+{
+ struct raydium_rm67200 *ctx = to_raydium_rm67200(panel);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ctx->num_supplies, ctx->supplies);
+
+ msleep(60);
+
+ return 0;
+}
+
+static int raydium_rm67200_enable(struct drm_panel *panel)
+{
+ struct raydium_rm67200 *rm67200 = to_raydium_rm67200(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = rm67200->dsi };
+
+ rm67200->panel_info->panel_setup(&ctx);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 30);
+
+ return ctx.accum_err;
+}
+
+static int raydium_rm67200_disable(struct drm_panel *panel)
+{
+ struct raydium_rm67200 *rm67200 = to_raydium_rm67200(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = rm67200->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 60);
+
+ return ctx.accum_err;
+}
+
+static int raydium_rm67200_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct raydium_rm67200 *ctx = to_raydium_rm67200(panel);
+
+ return drm_connector_helper_get_modes_fixed(connector, &ctx->panel_info->mode);
+}
+
+static const struct drm_panel_funcs raydium_rm67200_funcs = {
+ .prepare = raydium_rm67200_prepare,
+ .unprepare = raydium_rm67200_unprepare,
+ .get_modes = raydium_rm67200_get_modes,
+ .enable = raydium_rm67200_enable,
+ .disable = raydium_rm67200_disable,
+};
+
+static int raydium_rm67200_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct raydium_rm67200 *ctx;
+ int ret = 0;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->panel_info = device_get_match_data(dev);
+ if (!ctx->panel_info)
+ return -EINVAL;
+
+ ctx->num_supplies = ctx->panel_info->num_regulators;
+ ret = devm_regulator_bulk_get_const(&dsi->dev,
+ ctx->panel_info->num_regulators,
+ ctx->panel_info->regulators,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM;
+ ctx->panel.prepare_prev_first = true;
+
+ drm_panel_init(&ctx->panel, dev, &raydium_rm67200_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ }
+
+ return ret;
+}
+
+static void raydium_rm67200_remove(struct mipi_dsi_device *dsi)
+{
+ struct raydium_rm67200 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct regulator_bulk_data w552793baa_regulators[] = {
+ { .supply = "vdd", }, /* 2.8V */
+ { .supply = "iovcc", }, /* 1.8V */
+ { .supply = "vsp", }, /* +5.5V */
+ { .supply = "vsn", }, /* -5.5V */
+};
+
+static const struct raydium_rm67200_panel_info w552793baa_info = {
+ .mode = {
+ .clock = 132000,
+ .hdisplay = 1080,
+ .hsync_start = 1095,
+ .hsync_end = 1125,
+ .htotal = 1129,
+ .vdisplay = 1920,
+ .vsync_start = 1935,
+ .vsync_end = 1950,
+ .vtotal = 1952,
+ .width_mm = 68, /* 68.04mm */
+ .height_mm = 121, /* 120.96mm */
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+ .regulators = w552793baa_regulators,
+ .num_regulators = ARRAY_SIZE(w552793baa_regulators),
+ .panel_setup = w552793baa_setup,
+};
+
+static const struct of_device_id raydium_rm67200_of_match[] = {
+ { .compatible = "wanchanglong,w552793baa", .data = &w552793baa_info },
+ { /*sentinel*/ }
+};
+MODULE_DEVICE_TABLE(of, raydium_rm67200_of_match);
+
+static struct mipi_dsi_driver raydium_rm67200_driver = {
+ .probe = raydium_rm67200_probe,
+ .remove = raydium_rm67200_remove,
+ .driver = {
+ .name = "panel-raydium-rm67200",
+ .of_match_table = raydium_rm67200_of_match,
+ },
+};
+module_mipi_dsi_driver(raydium_rm67200_driver);
+
+MODULE_AUTHOR("Sebastian Reichel <sebastian.reichel@collabora.com>");
+MODULE_DESCRIPTION("DRM driver for RM67200-equipped DSI panels");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index d2df227abbea..57b1a899bbdc 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -39,91 +39,66 @@ static void s6e88a0_ams452ef01_reset(struct s6e88a0_ams452ef01 *ctx)
static int s6e88a0_ams452ef01_on(struct s6e88a0_ams452ef01 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
- mipi_dsi_dcs_write_seq(dsi, 0xcc, 0x4c); // set Pixel Clock Divider polarity
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcc, 0x4c); // set Pixel Clock Divider polarity
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
// set default brightness/gama
- mipi_dsi_dcs_write_seq(dsi, 0xca,
- 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, // V255 RR,GG,BB
- 0x80, 0x80, 0x80, // V203 R,G,B
- 0x80, 0x80, 0x80, // V151 R,G,B
- 0x80, 0x80, 0x80, // V87 R,G,B
- 0x80, 0x80, 0x80, // V51 R,G,B
- 0x80, 0x80, 0x80, // V35 R,G,B
- 0x80, 0x80, 0x80, // V23 R,G,B
- 0x80, 0x80, 0x80, // V11 R,G,B
- 0x6b, 0x68, 0x71, // V3 R,G,B
- 0x00, 0x00, 0x00); // V1 R,G,B
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca,
+ 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,// V255 RR,GG,BB
+ 0x80, 0x80, 0x80, // V203 R,G,B
+ 0x80, 0x80, 0x80, // V151 R,G,B
+ 0x80, 0x80, 0x80, // V87 R,G,B
+ 0x80, 0x80, 0x80, // V51 R,G,B
+ 0x80, 0x80, 0x80, // V35 R,G,B
+ 0x80, 0x80, 0x80, // V23 R,G,B
+ 0x80, 0x80, 0x80, // V11 R,G,B
+ 0x6b, 0x68, 0x71, // V3 R,G,B
+ 0x00, 0x00, 0x00); // V1 R,G,B
// set default Amoled Off Ratio
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x2c, 0x0b); // set default elvss voltage
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xf7, 0x03); // gamma/aor update
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x2c, 0x0b); // set default elvss voltage
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf7, 0x03); // gamma/aor update
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
-static int s6e88a0_ams452ef01_off(struct s6e88a0_ams452ef01 *ctx)
+static void s6e88a0_ams452ef01_off(struct s6e88a0_ams452ef01 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi};
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(35);
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
-
- return 0;
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 35);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
}
static int s6e88a0_ams452ef01_prepare(struct drm_panel *panel)
{
struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ if (ret < 0)
return ret;
- }
s6e88a0_ams452ef01_reset(ctx);
ret = s6e88a0_ams452ef01_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
ctx->supplies);
@@ -136,12 +111,8 @@ static int s6e88a0_ams452ef01_prepare(struct drm_panel *panel)
static int s6e88a0_ams452ef01_unprepare(struct drm_panel *panel)
{
struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = s6e88a0_ams452ef01_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ s6e88a0_ams452ef01_off(ctx);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
index 74c760ee0c2d..0b4e0983639b 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
@@ -44,60 +44,39 @@ static void sharp_ls060_reset(struct sharp_ls060 *ctx)
static int sharp_ls060_on(struct sharp_ls060 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x13);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_MEMORY_START);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_MEMORY_START);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(50);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
- return 0;
+ return dsi_ctx.accum_err;
}
-static int sharp_ls060_off(struct sharp_ls060 *ctx)
+static void sharp_ls060_off(struct sharp_ls060 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- usleep_range(2000, 3000);
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(121);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 2000, 3000);
- return 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 121);
}
static int sharp_ls060_prepare(struct drm_panel *panel)
{
struct sharp_ls060 *ctx = to_sharp_ls060(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_enable(ctx->vddi_supply);
@@ -125,10 +104,8 @@ static int sharp_ls060_prepare(struct drm_panel *panel)
sharp_ls060_reset(ctx);
ret = sharp_ls060_on(ctx);
- if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ if (ret < 0)
goto err_on;
- }
return 0;
@@ -154,12 +131,8 @@ err_avdd:
static int sharp_ls060_unprepare(struct drm_panel *panel)
{
struct sharp_ls060 *ctx = to_sharp_ls060(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = sharp_ls060_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ sharp_ls060_off(ctx);
regulator_disable(ctx->vddh_supply);
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9b2f128fd309..232b03c1a259 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1374,6 +1374,64 @@ static const struct panel_desc bananapi_s070wv20_ct16 = {
},
};
+static const struct display_timing boe_av101hdt_a10_timing = {
+ .pixelclock = { 74210000, 75330000, 76780000, },
+ .hactive = { 1280, 1280, 1280, },
+ .hfront_porch = { 10, 42, 33, },
+ .hback_porch = { 10, 18, 33, },
+ .hsync_len = { 30, 10, 30, },
+ .vactive = { 720, 720, 720, },
+ .vfront_porch = { 200, 183, 200, },
+ .vback_porch = { 8, 8, 8, },
+ .vsync_len = { 2, 19, 2, },
+ .flags = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc boe_av101hdt_a10 = {
+ .timings = &boe_av101hdt_a10_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 224,
+ .height = 126,
+ },
+ .delay = {
+ .enable = 50,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+static const struct display_timing boe_av123z7m_n17_timing = {
+ .pixelclock = { 86600000, 88000000, 90800000, },
+ .hactive = { 1920, 1920, 1920, },
+ .hfront_porch = { 10, 10, 10, },
+ .hback_porch = { 10, 10, 10, },
+ .hsync_len = { 9, 12, 25, },
+ .vactive = { 720, 720, 720, },
+ .vfront_porch = { 7, 10, 13, },
+ .vback_porch = { 7, 10, 13, },
+ .vsync_len = { 7, 11, 14, },
+ .flags = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc boe_av123z7m_n17 = {
+ .timings = &boe_av123z7m_n17_timing,
+ .bpc = 8,
+ .num_timings = 1,
+ .size = {
+ .width = 292,
+ .height = 110,
+ },
+ .delay = {
+ .prepare = 50,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode boe_bp101wx1_100_mode = {
.clock = 78945,
.hdisplay = 1280,
@@ -4814,6 +4872,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "bananapi,s070wv20-ct16",
.data = &bananapi_s070wv20_ct16,
}, {
+ .compatible = "boe,av101hdt-a10",
+ .data = &boe_av101hdt_a10,
+ }, {
+ .compatible = "boe,av123z7m-n17",
+ .data = &boe_av123z7m_n17,
+ }, {
.compatible = "boe,bp082wx1-100",
.data = &boe_bp082wx1_100,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
index 472195d4bbbe..97f4bb4e1029 100644
--- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
@@ -47,93 +47,40 @@ static inline struct sony_td4353_jdi *to_sony_td4353_jdi(struct drm_panel *panel
static int sony_td4353_jdi_on(struct sony_td4353_jdi *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_column_address(dsi, 0x0000, 1080 - 1);
- if (ret < 0) {
- dev_err(dev, "Failed to set column address: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_page_address(dsi, 0x0000, 2160 - 1);
- if (ret < 0) {
- dev_err(dev, "Failed to set page address: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear scanline: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 1080 - 1);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 2160 - 1);
+ mipi_dsi_dcs_set_tear_scanline_multi(&dsi_ctx, 0);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS,
- 0x00, 0x00, 0x08, 0x6f);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(70);
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS,
+ 0x00, 0x00, 0x08, 0x6f);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_MEMORY_START);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_MEMORY_START);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to turn display on: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return dsi_ctx.accum_err;
}
-static int sony_td4353_jdi_off(struct sony_td4353_jdi *ctx)
+static void sony_td4353_jdi_off(struct sony_td4353_jdi *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(22);
-
- ret = mipi_dsi_dcs_set_tear_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear off: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(80);
-
- return 0;
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 22);
+ mipi_dsi_dcs_set_tear_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 80);
}
static void sony_td4353_assert_reset_gpios(struct sony_td4353_jdi *ctx, int mode)
@@ -146,14 +93,11 @@ static void sony_td4353_assert_reset_gpios(struct sony_td4353_jdi *ctx, int mode
static int sony_td4353_jdi_prepare(struct drm_panel *panel)
{
struct sony_td4353_jdi *ctx = to_sony_td4353_jdi(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ if (ret < 0)
return ret;
- }
msleep(100);
@@ -161,7 +105,6 @@ static int sony_td4353_jdi_prepare(struct drm_panel *panel)
ret = sony_td4353_jdi_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to power on panel: %d\n", ret);
sony_td4353_assert_reset_gpios(ctx, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
return ret;
@@ -173,12 +116,8 @@ static int sony_td4353_jdi_prepare(struct drm_panel *panel)
static int sony_td4353_jdi_unprepare(struct drm_panel *panel)
{
struct sony_td4353_jdi *ctx = to_sony_td4353_jdi(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = sony_td4353_jdi_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to power off panel: %d\n", ret);
+ sony_td4353_jdi_off(ctx);
sony_td4353_assert_reset_gpios(ctx, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
diff --git a/drivers/gpu/drm/panel/panel-summit.c b/drivers/gpu/drm/panel/panel-summit.c
new file mode 100644
index 000000000000..e780faee1857
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-summit.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/backlight.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+#include <video/mipi_display.h>
+
+struct summit_data {
+ struct mipi_dsi_device *dsi;
+ struct backlight_device *bl;
+ struct drm_panel panel;
+};
+
+static int summit_set_brightness(struct device *dev)
+{
+ struct summit_data *s_data = dev_get_drvdata(dev);
+ int level = backlight_get_brightness(s_data->bl);
+
+ return mipi_dsi_dcs_set_display_brightness(s_data->dsi, level);
+}
+
+static int summit_bl_update_status(struct backlight_device *dev)
+{
+ return summit_set_brightness(&dev->dev);
+}
+
+static const struct backlight_ops summit_bl_ops = {
+ .update_status = summit_bl_update_status,
+};
+
+static struct drm_display_mode summit_mode = {
+ .vdisplay = 2008,
+ .hdisplay = 60,
+ .hsync_start = 60 + 8,
+ .hsync_end = 60 + 8 + 80,
+ .htotal = 60 + 8 + 80 + 40,
+ .vsync_start = 2008 + 1,
+ .vsync_end = 2008 + 1 + 15,
+ .vtotal = 2008 + 1 + 15 + 6,
+ .clock = ((60 + 8 + 80 + 40) * (2008 + 1 + 15 + 6) * 60) / 1000,
+ .type = DRM_MODE_TYPE_DRIVER,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static int summit_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ connector->display_info.non_desktop = true;
+ drm_object_property_set_value(&connector->base,
+ connector->dev->mode_config.non_desktop_property,
+ connector->display_info.non_desktop);
+
+ return drm_connector_helper_get_modes_fixed(connector, &summit_mode);
+}
+
+static const struct drm_panel_funcs summit_panel_funcs = {
+ .get_modes = summit_get_modes,
+};
+
+static int summit_probe(struct mipi_dsi_device *dsi)
+{
+ struct backlight_properties props = { 0 };
+ struct device *dev = &dsi->dev;
+ struct summit_data *s_data;
+ int ret;
+
+ s_data = devm_kzalloc(dev, sizeof(*s_data), GFP_KERNEL);
+ if (!s_data)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, s_data);
+ s_data->dsi = dsi;
+
+ ret = device_property_read_u32(dev, "max-brightness", &props.max_brightness);
+ if (ret)
+ return ret;
+ props.type = BACKLIGHT_RAW;
+
+ s_data->bl = devm_backlight_device_register(dev, dev_name(dev),
+ dev, s_data, &summit_bl_ops, &props);
+ if (IS_ERR(s_data->bl))
+ return PTR_ERR(s_data->bl);
+
+ drm_panel_init(&s_data->panel, dev, &summit_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ drm_panel_add(&s_data->panel);
+
+ return mipi_dsi_attach(dsi);
+}
+
+static void summit_remove(struct mipi_dsi_device *dsi)
+{
+ struct summit_data *s_data = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&s_data->panel);
+}
+
+static int summit_suspend(struct device *dev)
+{
+ struct summit_data *s_data = dev_get_drvdata(dev);
+
+ return mipi_dsi_dcs_set_display_brightness(s_data->dsi, 0);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(summit_pm_ops, summit_suspend,
+ summit_set_brightness);
+
+static const struct of_device_id summit_of_match[] = {
+ { .compatible = "apple,summit" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, summit_of_match);
+
+static struct mipi_dsi_driver summit_driver = {
+ .probe = summit_probe,
+ .remove = summit_remove,
+ .driver = {
+ .name = "panel-summit",
+ .of_match_table = summit_of_match,
+ .pm = pm_sleep_ptr(&summit_pm_ops),
+ },
+};
+module_mipi_dsi_driver(summit_driver);
+
+MODULE_DESCRIPTION("Summit Display Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-visionox-r66451.c b/drivers/gpu/drm/panel/panel-visionox-r66451.c
index 493f2a6076f8..3ea0a86f6e69 100644
--- a/drivers/gpu/drm/panel/panel-visionox-r66451.c
+++ b/drivers/gpu/drm/panel/panel-visionox-r66451.c
@@ -42,85 +42,84 @@ static void visionox_r66451_reset(struct visionox_r66451 *ctx)
static int visionox_r66451_on(struct visionox_r66451 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc2,
- 0x09, 0x24, 0x0c, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00,
- 0x09, 0x3c);
- mipi_dsi_dcs_write_seq(dsi, 0xd7,
- 0x00, 0xb9, 0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a,
- 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
- 0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0xde,
- 0x40, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18,
- 0x10, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x02, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xe8, 0x00, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x00, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc4,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x32);
- mipi_dsi_dcs_write_seq(dsi, 0xcf,
- 0x64, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
- 0x00, 0x0b, 0x77, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x03);
- mipi_dsi_dcs_write_seq(dsi, 0xd3,
- 0x45, 0x00, 0x00, 0x01, 0x13, 0x15, 0x00, 0x15, 0x07,
- 0x0f, 0x77, 0x77, 0x77, 0x37, 0xb2, 0x11, 0x00, 0xa0,
- 0x3c, 0x9c);
- mipi_dsi_dcs_write_seq(dsi, 0xd7,
- 0x00, 0xb9, 0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a,
- 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
- 0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0xd8,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a,
- 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x0a, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a,
- 0x00, 0x32, 0x00, 0x0a, 0x00, 0x22);
- mipi_dsi_dcs_write_seq(dsi, 0xdf,
- 0x50, 0x42, 0x58, 0x81, 0x2d, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x01, 0x0f, 0xff, 0xd4, 0x0e, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x0f, 0x53, 0xf1, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xf7, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x34, 0xb4, 0x00, 0x00, 0x00, 0x39, 0x04, 0x09, 0x34);
- mipi_dsi_dcs_write_seq(dsi, 0xe6, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x50, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x50, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x01, 0x00, 0x00, 0x00, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xf4, 0x00, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x19);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x50, 0x42);
- mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- mipi_dsi_dcs_set_column_address(dsi, 0, 1080 - 1);
- mipi_dsi_dcs_set_page_address(dsi, 0, 2340 - 1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2,
+ 0x09, 0x24, 0x0c, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00,
+ 0x09, 0x3c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd7,
+ 0x00, 0xb9, 0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
+ 0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde,
+ 0x40, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18,
+ 0x10, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x02, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe8, 0x00, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0x00, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcf,
+ 0x64, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x0b, 0x77, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd3,
+ 0x45, 0x00, 0x00, 0x01, 0x13, 0x15, 0x00, 0x15, 0x07,
+ 0x0f, 0x77, 0x77, 0x77, 0x37, 0xb2, 0x11, 0x00, 0xa0,
+ 0x3c, 0x9c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd7,
+ 0x00, 0xb9, 0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
+ 0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a,
+ 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0a, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x32, 0x00, 0x0a, 0x00, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf,
+ 0x50, 0x42, 0x58, 0x81, 0x2d, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x0f, 0xff, 0xd4, 0x0e, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0x53, 0xf1, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf7, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0x34, 0xb4, 0x00, 0x00, 0x00, 0x39,
+ 0x04, 0x09, 0x34);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x50, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x50, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf2, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x01, 0x00, 0x00, 0x00, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x00, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf2, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x50, 0x42);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0, 1080 - 1);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0, 2340 - 1);
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- return 0;
+ return dsi_ctx.accum_err;
}
-static int visionox_r66451_off(struct visionox_r66451 *ctx)
+static void visionox_r66451_off(struct visionox_r66451 *ctx)
{
ctx->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- return 0;
}
static int visionox_r66451_prepare(struct drm_panel *panel)
{
struct visionox_r66451 *ctx = to_visionox_r66451(panel);
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies),
@@ -132,7 +131,6 @@ static int visionox_r66451_prepare(struct drm_panel *panel)
ret = visionox_r66451_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
return ret;
@@ -146,12 +144,8 @@ static int visionox_r66451_prepare(struct drm_panel *panel)
static int visionox_r66451_unprepare(struct drm_panel *panel)
{
struct visionox_r66451 *ctx = to_visionox_r66451(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = visionox_r66451_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ visionox_r66451_off(ctx);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
@@ -179,7 +173,7 @@ static int visionox_r66451_enable(struct drm_panel *panel)
struct visionox_r66451 *ctx = to_visionox_r66451(panel);
struct mipi_dsi_device *dsi = ctx->dsi;
struct drm_dsc_picture_parameter_set pps;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
if (!dsi->dsc) {
dev_err(&dsi->dev, "DSC not attached to DSI\n");
@@ -187,51 +181,30 @@ static int visionox_r66451_enable(struct drm_panel *panel)
}
drm_dsc_pps_payload_pack(&pps, dsi->dsc);
- ret = mipi_dsi_picture_parameter_set(dsi, &pps);
- if (ret) {
- dev_err(&dsi->dev, "Failed to set PPS\n");
- return ret;
- }
+ mipi_dsi_picture_parameter_set_multi(&dsi_ctx, &pps);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(&dsi->dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(&dsi->dev, "Failed on set display on: %d\n", ret);
- return ret;
- }
- msleep(20);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- return 0;
+ return dsi_ctx.accum_err;
}
static int visionox_r66451_disable(struct drm_panel *panel)
{
struct visionox_r66451 *ctx = to_visionox_r66451(panel);
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(20);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- return 0;
+ return dsi_ctx.accum_err;
}
static int visionox_r66451_get_modes(struct drm_panel *panel,
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm692e5.c b/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
new file mode 100644
index 000000000000..4db7fa8d74c4
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025, Eugene Lepshy <fekz115@gmail.com>
+ * Copyright (c) 2025, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/display/drm_dsc.h>
+#include <drm/display/drm_dsc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct visionox_rm692e5 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct drm_dsc_config dsc;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data visionox_rm692e5_supplies[] = {
+ { .supply = "vddio" }, /* 1p8 */
+ { .supply = "vdd" }, /* 3p3 */
+};
+
+static inline
+struct visionox_rm692e5 *to_visionox_rm692e5(struct drm_panel *panel)
+{
+ return container_of(panel, struct visionox_rm692e5, panel);
+}
+
+static void visionox_rm692e5_reset(struct visionox_rm692e5 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(32);
+}
+
+static int visionox_rm692e5_on(struct visionox_rm692e5 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbd, 0x07);
+ mipi_dsi_usleep_range(&dsi_ctx, 17000, 18000);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x11);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x00ab);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x54, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0xe8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x69, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6b, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6c, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6e, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x71, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x72, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x73, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x74, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x76, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x46);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x79, 0x54);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7a, 0x62);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7b, 0x69);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7c, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7d, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7e, 0x79);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x7d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x7e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x83, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x84, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x85, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x86, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x87, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x88, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x89, 0xbe);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8a, 0x3a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8b, 0xfc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8c, 0x3a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0xfa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x3a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8f, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x90, 0x3b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0x3b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x93, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x94, 0x3b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x95, 0xb6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x96, 0x4b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x97, 0xf6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x98, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x99, 0x34);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9a, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9b, 0x74);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9c, 0x5c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9d, 0x74);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9e, 0x8c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9f, 0xf4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_START, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa3, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa4, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa6, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa7, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_CONTINUE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xaa, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa0, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xa1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcd, 0x6b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xce, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfa, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x08);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x000d);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000);
+
+ return dsi_ctx.accum_err;
+}
+
+static int visionox_rm692e5_disable(struct drm_panel *panel)
+{
+ struct visionox_rm692e5 *ctx = to_visionox_rm692e5(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000);
+
+ return dsi_ctx.accum_err;
+}
+
+static int visionox_rm692e5_prepare(struct drm_panel *panel)
+{
+ struct visionox_rm692e5 *ctx = to_visionox_rm692e5(panel);
+ struct drm_dsc_picture_parameter_set pps;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(visionox_rm692e5_supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ visionox_rm692e5_reset(ctx);
+
+ ret = visionox_rm692e5_on(ctx);
+ if (ret < 0) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ goto err;
+ }
+
+ drm_dsc_pps_payload_pack(&pps, &ctx->dsc);
+ mipi_dsi_picture_parameter_set_multi(&dsi_ctx, &pps);
+ mipi_dsi_compression_mode_ext_multi(&dsi_ctx, true, MIPI_DSI_COMPRESSION_DSC, 0);
+
+ mipi_dsi_msleep(&dsi_ctx, 28);
+
+ if (dsi_ctx.accum_err < 0) {
+ ret = dsi_ctx.accum_err;
+ goto err;
+ }
+
+ return dsi_ctx.accum_err;
+err:
+ regulator_bulk_disable(ARRAY_SIZE(visionox_rm692e5_supplies),
+ ctx->supplies);
+ return ret;
+}
+
+static int visionox_rm692e5_unprepare(struct drm_panel *panel)
+{
+ struct visionox_rm692e5 *ctx = to_visionox_rm692e5(panel);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(visionox_rm692e5_supplies),
+ ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode visionox_rm692e5_modes[] = {
+ /* Let's initialize the highest frequency first */
+ { /* 120Hz mode */
+ .clock = (1080 + 26 + 39 + 36) * (2400 + 16 + 21 + 16) * 120 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 26,
+ .hsync_end = 1080 + 26 + 39,
+ .htotal = 1080 + 26 + 39 + 36,
+ .vdisplay = 2400,
+ .vsync_start = 2400 + 16,
+ .vsync_end = 2400 + 16 + 21,
+ .vtotal = 2400 + 16 + 21 + 16,
+ .width_mm = 68,
+ .height_mm = 152,
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+ { /* 90Hz mode */
+ .clock = (1080 + 26 + 39 + 36) * (2400 + 16 + 21 + 16) * 90 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 26,
+ .hsync_end = 1080 + 26 + 39,
+ .htotal = 1080 + 26 + 39 + 36,
+ .vdisplay = 2400,
+ .vsync_start = 2400 + 16,
+ .vsync_end = 2400 + 16 + 21,
+ .vtotal = 2400 + 16 + 21 + 16,
+ .width_mm = 68,
+ .height_mm = 152,
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+ { /* 60Hz mode */
+ .clock = (1080 + 26 + 39 + 36) * (2400 + 16 + 21 + 16) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 26,
+ .hsync_end = 1080 + 26 + 39,
+ .htotal = 1080 + 26 + 39 + 36,
+ .vdisplay = 2400,
+ .vsync_start = 2400 + 16,
+ .vsync_end = 2400 + 16 + 21,
+ .vtotal = 2400 + 16 + 21 + 16,
+ .width_mm = 68,
+ .height_mm = 152,
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+};
+
+static int visionox_rm692e5_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ int count = 0;
+
+ for (int i = 0; i < ARRAY_SIZE(visionox_rm692e5_modes); i++)
+ count += drm_connector_helper_get_modes_fixed(connector,
+ &visionox_rm692e5_modes[i]);
+
+ return count;
+}
+
+static const struct drm_panel_funcs visionox_rm692e5_panel_funcs = {
+ .prepare = visionox_rm692e5_prepare,
+ .unprepare = visionox_rm692e5_unprepare,
+ .disable = visionox_rm692e5_disable,
+ .get_modes = visionox_rm692e5_get_modes,
+};
+
+static int visionox_rm692e5_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static int visionox_rm692e5_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness_large(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return brightness;
+}
+
+static const struct backlight_ops visionox_rm692e5_bl_ops = {
+ .update_status = visionox_rm692e5_bl_update_status,
+ .get_brightness = visionox_rm692e5_bl_get_brightness,
+};
+
+static struct backlight_device *
+visionox_rm692e5_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 2047,
+ .max_brightness = 4095,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &visionox_rm692e5_bl_ops, &props);
+}
+
+static int visionox_rm692e5_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct visionox_rm692e5 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(&dsi->dev,
+ ARRAY_SIZE(visionox_rm692e5_supplies),
+ visionox_rm692e5_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ drm_panel_init(&ctx->panel, dev, &visionox_rm692e5_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = visionox_rm692e5_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ dsi->dsc = &ctx->dsc;
+ ctx->dsc.dsc_version_major = 1;
+ ctx->dsc.dsc_version_minor = 1;
+ ctx->dsc.slice_height = 20;
+ ctx->dsc.slice_width = 540;
+ ctx->dsc.slice_count = 1080 / ctx->dsc.slice_width;
+ ctx->dsc.bits_per_component = 10;
+ ctx->dsc.bits_per_pixel = 8 << 4;
+ ctx->dsc.block_pred_enable = true;
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void visionox_rm692e5_remove(struct mipi_dsi_device *dsi)
+{
+ struct visionox_rm692e5 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id visionox_rm692e5_of_match[] = {
+ { .compatible = "visionox,rm692e5" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, visionox_rm692e5_of_match);
+
+static struct mipi_dsi_driver visionox_rm692e5_driver = {
+ .probe = visionox_rm692e5_probe,
+ .remove = visionox_rm692e5_remove,
+ .driver = {
+ .name = "panel-visionox-rm692e5",
+ .of_match_table = visionox_rm692e5_of_match,
+ },
+};
+module_mipi_dsi_driver(visionox_rm692e5_driver);
+
+MODULE_AUTHOR("Eugene Lepshy <fekz115@gmail.com>");
+MODULE_AUTHOR("Danila Tikhonov <danila@jiaxyga.com>");
+MODULE_DESCRIPTION("DRM driver for Visionox RM692E5 cmd mode dsi panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
index 22a14006765e..2b91414c2829 100644
--- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -59,91 +59,80 @@ static inline struct xpp055c272 *panel_to_xpp055c272(struct drm_panel *panel)
return container_of(panel, struct xpp055c272, panel);
}
-static int xpp055c272_init_sequence(struct xpp055c272 *ctx)
+static void xpp055c272_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- struct device *dev = ctx->dev;
-
/*
* Init sequence was supplied by the panel vendor without much
* documentation.
*/
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETEXTC, 0xf1, 0x12, 0x83);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETMIPI,
- 0x33, 0x81, 0x05, 0xf9, 0x0e, 0x0e, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25,
- 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02, 0x4f, 0x01,
- 0x00, 0x00, 0x37);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPOWER_EXT, 0x25);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPCR, 0x02, 0x11, 0x00);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETRGBIF,
- 0x0c, 0x10, 0x0a, 0x50, 0x03, 0xff, 0x00, 0x00,
- 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETSCR,
- 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
- 0x00);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETVDC, 0x46);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPANEL, 0x0b);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETCYC, 0x80);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETDISP, 0xc8, 0x12, 0x30);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETEQ,
- 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
- 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPOWER,
- 0x53, 0x00, 0x1e, 0x1e, 0x77, 0xe1, 0xcc, 0xdd,
- 0x67, 0x77, 0x33, 0x33);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETECO, 0x00, 0x00, 0xff,
- 0xff, 0x01, 0xff);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETBGP, 0x09, 0x09);
- msleep(20);
-
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETVCOM, 0x87, 0x95);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGIP1,
- 0xc2, 0x10, 0x05, 0x05, 0x10, 0x05, 0xa0, 0x12,
- 0x31, 0x23, 0x3f, 0x81, 0x0a, 0xa0, 0x37, 0x18,
- 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x80,
- 0x01, 0x00, 0x00, 0x00, 0x48, 0xf8, 0x86, 0x42,
- 0x08, 0x88, 0x88, 0x80, 0x88, 0x88, 0x88, 0x58,
- 0xf8, 0x87, 0x53, 0x18, 0x88, 0x88, 0x81, 0x88,
- 0x88, 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGIP2,
- 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x1f, 0x88, 0x81, 0x35,
- 0x78, 0x88, 0x88, 0x85, 0x88, 0x88, 0x88, 0x0f,
- 0x88, 0x80, 0x24, 0x68, 0x88, 0x88, 0x84, 0x88,
- 0x88, 0x88, 0x23, 0x10, 0x00, 0x00, 0x1c, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x05,
- 0xa0, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGAMMA,
- 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38, 0x36,
- 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13, 0x11,
- 0x18, 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38,
- 0x36, 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13,
- 0x11, 0x18);
-
- msleep(60);
-
- dev_dbg(dev, "Panel init sequence done\n");
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETEXTC, 0xf1, 0x12, 0x83);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETMIPI,
+ 0x33, 0x81, 0x05, 0xf9, 0x0e, 0x0e, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25,
+ 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02, 0x4f, 0x01,
+ 0x00, 0x00, 0x37);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETPOWER_EXT, 0x25);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETPCR, 0x02, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETRGBIF,
+ 0x0c, 0x10, 0x0a, 0x50, 0x03, 0xff, 0x00, 0x00,
+ 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETSCR,
+ 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETVDC, 0x46);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETPANEL, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETCYC, 0x80);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETDISP, 0xc8, 0x12, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETEQ,
+ 0x07, 0x07, 0x0b, 0x0b, 0x03, 0x0b, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0x00, 0xC0, 0x10);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETPOWER,
+ 0x53, 0x00, 0x1e, 0x1e, 0x77, 0xe1, 0xcc, 0xdd,
+ 0x67, 0x77, 0x33, 0x33);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETECO, 0x00, 0x00, 0xff,
+ 0xff, 0x01, 0xff);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETBGP, 0x09, 0x09);
+ mipi_dsi_msleep(dsi_ctx, 20);
+
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETVCOM, 0x87, 0x95);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETGIP1,
+ 0xc2, 0x10, 0x05, 0x05, 0x10, 0x05, 0xa0, 0x12,
+ 0x31, 0x23, 0x3f, 0x81, 0x0a, 0xa0, 0x37, 0x18,
+ 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0x01, 0x00, 0x00, 0x00, 0x48, 0xf8, 0x86, 0x42,
+ 0x08, 0x88, 0x88, 0x80, 0x88, 0x88, 0x88, 0x58,
+ 0xf8, 0x87, 0x53, 0x18, 0x88, 0x88, 0x81, 0x88,
+ 0x88, 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETGIP2,
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0x88, 0x81, 0x35,
+ 0x78, 0x88, 0x88, 0x85, 0x88, 0x88, 0x88, 0x0f,
+ 0x88, 0x80, 0x24, 0x68, 0x88, 0x88, 0x84, 0x88,
+ 0x88, 0x88, 0x23, 0x10, 0x00, 0x00, 0x1c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x05,
+ 0xa0, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, XPP055C272_CMD_SETGAMMA,
+ 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38, 0x36,
+ 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13, 0x11,
+ 0x18, 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38,
+ 0x36, 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13,
+ 0x11, 0x18);
+
+ mipi_dsi_msleep(dsi_ctx, 60);
}
static int xpp055c272_unprepare(struct drm_panel *panel)
{
struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- dev_err(ctx->dev, "failed to set display off: %d\n", ret);
-
- mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
+ return dsi_ctx.accum_err;
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vci);
@@ -155,17 +144,19 @@ static int xpp055c272_prepare(struct drm_panel *panel)
{
struct xpp055c272 *ctx = panel_to_xpp055c272(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dev_dbg(ctx->dev, "Resetting the panel\n");
- ret = regulator_enable(ctx->vci);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to enable vci supply: %d\n", ret);
- return ret;
+ dsi_ctx.accum_err = regulator_enable(ctx->vci);
+ if (dsi_ctx.accum_err) {
+ dev_err(ctx->dev, "Failed to enable vci supply: %d\n",
+ dsi_ctx.accum_err);
+ return dsi_ctx.accum_err;
}
- ret = regulator_enable(ctx->iovcc);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+ dsi_ctx.accum_err = regulator_enable(ctx->iovcc);
+ if (dsi_ctx.accum_err) {
+ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n",
+ dsi_ctx.accum_err);
goto disable_vci;
}
@@ -177,26 +168,17 @@ static int xpp055c272_prepare(struct drm_panel *panel)
/* T8: 20ms */
msleep(20);
- ret = xpp055c272_init_sequence(ctx);
- if (ret < 0) {
- dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
- goto disable_iovcc;
- }
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
- goto disable_iovcc;
- }
+ xpp055c272_init_sequence(&dsi_ctx);
+ if (!dsi_ctx.accum_err)
+ dev_dbg(ctx->dev, "Panel init sequence done\n");
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
/* T9: 120ms */
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to set display on: %d\n", ret);
+ if (dsi_ctx.accum_err)
goto disable_iovcc;
- }
msleep(50);
@@ -206,7 +188,7 @@ disable_iovcc:
regulator_disable(ctx->iovcc);
disable_vci:
regulator_disable(ctx->vci);
- return ret;
+ return dsi_ctx.accum_err;
}
static const struct drm_display_mode default_mode = {
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 9b8e82fb8bc4..5657106c2f7d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -836,8 +836,16 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
int panfrost_job_init(struct panfrost_device *pfdev)
{
+ struct drm_sched_init_args args = {
+ .ops = &panfrost_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 2,
+ .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .timeout_wq = pfdev->reset.wq,
+ .name = "pan_js",
+ .dev = pfdev->dev,
+ };
struct panfrost_job_slot *js;
- unsigned int nentries = 2;
int ret, j;
/* All GPUs have two entries per queue, but without jobchain
@@ -845,7 +853,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
* so let's just advertise one entry in that case.
*/
if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
- nentries = 1;
+ args.credit_limit = 1;
pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
if (!js)
@@ -875,13 +883,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
for (j = 0; j < NUM_JOB_SLOTS; j++) {
js->queue[j].fence_context = dma_fence_context_alloc(1);
- ret = drm_sched_init(&js->queue[j].sched,
- &panfrost_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- nentries, 0,
- msecs_to_jiffies(JOB_TIMEOUT_MS),
- pfdev->reset.wq,
- NULL, "pan_js", pfdev->dev);
+ ret = drm_sched_init(&js->queue[j].sched, &args);
if (ret) {
dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
goto err_sched;
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index 0a37cfeeb181..a9da1d1eeb70 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -128,14 +128,11 @@ static void panthor_device_reset_work(struct work_struct *work)
struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
int ret = 0, cookie;
- if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) {
- /*
- * No need for a reset as the device has been (or will be)
- * powered down
- */
- atomic_set(&ptdev->reset.pending, 0);
+ /* If the device is entering suspend, we don't reset. A slow reset will
+ * be forced at resume time instead.
+ */
+ if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
return;
- }
if (!drm_dev_enter(&ptdev->base, &cookie))
return;
@@ -477,6 +474,14 @@ int panthor_device_resume(struct device *dev)
if (panthor_device_is_initialized(ptdev) &&
drm_dev_enter(&ptdev->base, &cookie)) {
+ /* If there was a reset pending at the time we suspended the
+ * device, we force a slow reset.
+ */
+ if (atomic_read(&ptdev->reset.pending)) {
+ ptdev->reset.fast = false;
+ atomic_set(&ptdev->reset.pending, 0);
+ }
+
ret = panthor_device_resume_hw_components(ptdev);
if (ret && ptdev->reset.fast) {
drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
@@ -493,9 +498,6 @@ int panthor_device_resume(struct device *dev)
goto err_suspend_devfreq;
}
- if (atomic_read(&ptdev->reset.pending))
- queue_work(ptdev->reset.wq, &ptdev->reset.work);
-
/* Clear all IOMEM mappings pointing to this device after we've
* resumed. This way the fake mappings pointing to the dummy pages
* are removed and the real iomem mapping will be restored on next
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 08136e790ca0..06fe46e32073 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -1458,12 +1458,26 @@ static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency);
}
+static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file)
+{
+ char *drv_name = file->minor->dev->driver->name;
+ struct panthor_file *pfile = file->driver_priv;
+ struct drm_memory_stats stats = {0};
+
+ panthor_fdinfo_gather_group_mem_info(pfile, &stats);
+ panthor_vm_heaps_sizes(pfile, &stats);
+
+ drm_fdinfo_print_size(p, drv_name, "resident", "memory", stats.resident);
+ drm_fdinfo_print_size(p, drv_name, "active", "memory", stats.active);
+}
+
static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
{
struct drm_device *dev = file->minor->dev;
struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p);
+ panthor_show_internal_memory_stats(p, file);
drm_show_memory_stats(p, file);
}
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 68eb4fb4d3a8..0f52766a3120 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -201,7 +201,6 @@ struct panthor_fw_section {
#define MIN_CS_PER_CSG 8
#define MIN_CSGS 3
-#define MAX_CSG_PRIO 0xf
#define CSF_IFACE_VERSION(major, minor, patch) \
(((major) << 24) | ((minor) << 16) | (patch))
@@ -637,8 +636,8 @@ static int panthor_fw_read_build_info(struct panthor_device *ptdev,
u32 ehdr)
{
struct panthor_fw_build_info_hdr hdr;
- char header[9];
- const char git_sha_header[sizeof(header)] = "git_sha: ";
+ static const char git_sha_header[] = "git_sha: ";
+ const int header_len = sizeof(git_sha_header) - 1;
int ret;
ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr));
@@ -652,8 +651,7 @@ static int panthor_fw_read_build_info(struct panthor_device *ptdev,
return 0;
}
- if (memcmp(git_sha_header, fw->data + hdr.meta_start,
- sizeof(git_sha_header))) {
+ if (memcmp(git_sha_header, fw->data + hdr.meta_start, header_len)) {
/* Not the expected header, this isn't metadata we understand */
return 0;
}
@@ -666,7 +664,7 @@ static int panthor_fw_read_build_info(struct panthor_device *ptdev,
}
drm_info(&ptdev->base, "Firmware git sha: %s\n",
- fw->data + hdr.meta_start + sizeof(git_sha_header));
+ fw->data + hdr.meta_start + header_len);
return 0;
}
diff --git a/drivers/gpu/drm/panthor/panthor_fw.h b/drivers/gpu/drm/panthor/panthor_fw.h
index 22448abde992..6598d96c6d2a 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.h
+++ b/drivers/gpu/drm/panthor/panthor_fw.h
@@ -102,9 +102,9 @@ struct panthor_fw_cs_output_iface {
#define CS_STATUS_BLOCKED_REASON_SB_WAIT 1
#define CS_STATUS_BLOCKED_REASON_PROGRESS_WAIT 2
#define CS_STATUS_BLOCKED_REASON_SYNC_WAIT 3
-#define CS_STATUS_BLOCKED_REASON_DEFERRED 5
-#define CS_STATUS_BLOCKED_REASON_RES 6
-#define CS_STATUS_BLOCKED_REASON_FLUSH 7
+#define CS_STATUS_BLOCKED_REASON_DEFERRED 4
+#define CS_STATUS_BLOCKED_REASON_RESOURCE 5
+#define CS_STATUS_BLOCKED_REASON_FLUSH 6
#define CS_STATUS_BLOCKED_REASON_MASK GENMASK(3, 0)
u32 status_blocked_reason;
u32 status_wait_sync_value_hi;
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index e43021cf6d45..5749ef2ebe03 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -85,11 +85,6 @@ struct panthor_gem_object *to_panthor_bo(struct drm_gem_object *obj)
struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size);
-struct drm_gem_object *
-panthor_gem_prime_import_sg_table(struct drm_device *ddev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-
int
panthor_gem_create_with_handle(struct drm_file *file,
struct drm_device *ddev,
diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
index 3796a9eb22af..3bdf61c14264 100644
--- a/drivers/gpu/drm/panthor/panthor_heap.c
+++ b/drivers/gpu/drm/panthor/panthor_heap.c
@@ -97,6 +97,9 @@ struct panthor_heap_pool {
/** @gpu_contexts: Buffer object containing the GPU heap contexts. */
struct panthor_kernel_bo *gpu_contexts;
+
+ /** @size: Size of all chunks across all heaps in the pool. */
+ atomic_t size;
};
static int panthor_heap_ctx_stride(struct panthor_device *ptdev)
@@ -118,7 +121,7 @@ static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id)
panthor_get_heap_ctx_offset(pool, id);
}
-static void panthor_free_heap_chunk(struct panthor_vm *vm,
+static void panthor_free_heap_chunk(struct panthor_heap_pool *pool,
struct panthor_heap *heap,
struct panthor_heap_chunk *chunk)
{
@@ -127,12 +130,13 @@ static void panthor_free_heap_chunk(struct panthor_vm *vm,
heap->chunk_count--;
mutex_unlock(&heap->lock);
+ atomic_sub(heap->chunk_size, &pool->size);
+
panthor_kernel_bo_destroy(chunk->bo);
kfree(chunk);
}
-static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
- struct panthor_vm *vm,
+static int panthor_alloc_heap_chunk(struct panthor_heap_pool *pool,
struct panthor_heap *heap,
bool initial_chunk)
{
@@ -144,7 +148,7 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
if (!chunk)
return -ENOMEM;
- chunk->bo = panthor_kernel_bo_create(ptdev, vm, heap->chunk_size,
+ chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
PANTHOR_VM_KERNEL_AUTO_VA);
@@ -180,6 +184,8 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
heap->chunk_count++;
mutex_unlock(&heap->lock);
+ atomic_add(heap->chunk_size, &pool->size);
+
return 0;
err_destroy_bo:
@@ -191,17 +197,16 @@ err_free_chunk:
return ret;
}
-static void panthor_free_heap_chunks(struct panthor_vm *vm,
+static void panthor_free_heap_chunks(struct panthor_heap_pool *pool,
struct panthor_heap *heap)
{
struct panthor_heap_chunk *chunk, *tmp;
list_for_each_entry_safe(chunk, tmp, &heap->chunks, node)
- panthor_free_heap_chunk(vm, heap, chunk);
+ panthor_free_heap_chunk(pool, heap, chunk);
}
-static int panthor_alloc_heap_chunks(struct panthor_device *ptdev,
- struct panthor_vm *vm,
+static int panthor_alloc_heap_chunks(struct panthor_heap_pool *pool,
struct panthor_heap *heap,
u32 chunk_count)
{
@@ -209,7 +214,7 @@ static int panthor_alloc_heap_chunks(struct panthor_device *ptdev,
u32 i;
for (i = 0; i < chunk_count; i++) {
- ret = panthor_alloc_heap_chunk(ptdev, vm, heap, true);
+ ret = panthor_alloc_heap_chunk(pool, heap, true);
if (ret)
return ret;
}
@@ -226,7 +231,7 @@ panthor_heap_destroy_locked(struct panthor_heap_pool *pool, u32 handle)
if (!heap)
return -EINVAL;
- panthor_free_heap_chunks(pool->vm, heap);
+ panthor_free_heap_chunks(pool, heap);
mutex_destroy(&heap->lock);
kfree(heap);
return 0;
@@ -308,8 +313,7 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
heap->max_chunks = max_chunks;
heap->target_in_flight = target_in_flight;
- ret = panthor_alloc_heap_chunks(pool->ptdev, vm, heap,
- initial_chunk_count);
+ ret = panthor_alloc_heap_chunks(pool, heap, initial_chunk_count);
if (ret)
goto err_free_heap;
@@ -342,7 +346,7 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
return id;
err_free_heap:
- panthor_free_heap_chunks(pool->vm, heap);
+ panthor_free_heap_chunks(pool, heap);
mutex_destroy(&heap->lock);
kfree(heap);
@@ -389,6 +393,7 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
removed = chunk;
list_del(&chunk->node);
heap->chunk_count--;
+ atomic_sub(heap->chunk_size, &pool->size);
break;
}
}
@@ -466,7 +471,7 @@ int panthor_heap_grow(struct panthor_heap_pool *pool,
* further jobs in this queue fail immediately instead of having to
* wait for the job timeout.
*/
- ret = panthor_alloc_heap_chunk(pool->ptdev, pool->vm, heap, false);
+ ret = panthor_alloc_heap_chunk(pool, heap, false);
if (ret)
goto out_unlock;
@@ -560,6 +565,8 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
if (ret)
goto err_destroy_pool;
+ atomic_add(pool->gpu_contexts->obj->size, &pool->size);
+
return pool;
err_destroy_pool:
@@ -594,8 +601,10 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
xa_for_each(&pool->xa, i, heap)
drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i));
- if (!IS_ERR_OR_NULL(pool->gpu_contexts))
+ if (!IS_ERR_OR_NULL(pool->gpu_contexts)) {
+ atomic_sub(pool->gpu_contexts->obj->size, &pool->size);
panthor_kernel_bo_destroy(pool->gpu_contexts);
+ }
/* Reflects the fact the pool has been destroyed. */
pool->vm = NULL;
@@ -603,3 +612,18 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
panthor_heap_pool_put(pool);
}
+
+/**
+ * panthor_heap_pool_size() - Get a heap pool's total size
+ * @pool: Pool whose total chunks size to return
+ *
+ * Returns the aggregated size of all chunks for all heaps in the pool
+ *
+ */
+size_t panthor_heap_pool_size(struct panthor_heap_pool *pool)
+{
+ if (!pool)
+ return 0;
+
+ return atomic_read(&pool->size);
+}
diff --git a/drivers/gpu/drm/panthor/panthor_heap.h b/drivers/gpu/drm/panthor/panthor_heap.h
index 25a5f2bba445..e3358d4e8edb 100644
--- a/drivers/gpu/drm/panthor/panthor_heap.h
+++ b/drivers/gpu/drm/panthor/panthor_heap.h
@@ -27,6 +27,8 @@ struct panthor_heap_pool *
panthor_heap_pool_get(struct panthor_heap_pool *pool);
void panthor_heap_pool_put(struct panthor_heap_pool *pool);
+size_t panthor_heap_pool_size(struct panthor_heap_pool *pool);
+
int panthor_heap_grow(struct panthor_heap_pool *pool,
u64 heap_gpu_va,
u32 renderpasses_in_flight,
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index c39e3eb1c15d..12a02e28f50f 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -53,26 +53,27 @@ struct panthor_mmu {
/** @irq: The MMU irq. */
struct panthor_irq irq;
- /** @as: Address space related fields.
+ /**
+ * @as: Address space related fields.
*
* The GPU has a limited number of address spaces (AS) slots, forcing
* us to re-assign them to re-assign slots on-demand.
*/
struct {
- /** @slots_lock: Lock protecting access to all other AS fields. */
+ /** @as.slots_lock: Lock protecting access to all other AS fields. */
struct mutex slots_lock;
- /** @alloc_mask: Bitmask encoding the allocated slots. */
+ /** @as.alloc_mask: Bitmask encoding the allocated slots. */
unsigned long alloc_mask;
- /** @faulty_mask: Bitmask encoding the faulty slots. */
+ /** @as.faulty_mask: Bitmask encoding the faulty slots. */
unsigned long faulty_mask;
- /** @slots: VMs currently bound to the AS slots. */
+ /** @as.slots: VMs currently bound to the AS slots. */
struct panthor_as_slot slots[MAX_AS_SLOTS];
/**
- * @lru_list: List of least recently used VMs.
+ * @as.lru_list: List of least recently used VMs.
*
* We use this list to pick a VM to evict when all slots are
* used.
@@ -87,16 +88,16 @@ struct panthor_mmu {
/** @vm: VMs management fields */
struct {
- /** @lock: Lock protecting access to list. */
+ /** @vm.lock: Lock protecting access to list. */
struct mutex lock;
- /** @list: List containing all VMs. */
+ /** @vm.list: List containing all VMs. */
struct list_head list;
- /** @reset_in_progress: True if a reset is in progress. */
+ /** @vm.reset_in_progress: True if a reset is in progress. */
bool reset_in_progress;
- /** @wq: Workqueue used for the VM_BIND queues. */
+ /** @vm.wq: Workqueue used for the VM_BIND queues. */
struct workqueue_struct *wq;
} vm;
};
@@ -143,14 +144,14 @@ struct panthor_vma {
struct panthor_vm_op_ctx {
/** @rsvd_page_tables: Pages reserved for the MMU page table update. */
struct {
- /** @count: Number of pages reserved. */
+ /** @rsvd_page_tables.count: Number of pages reserved. */
u32 count;
- /** @ptr: Point to the first unused page in the @pages table. */
+ /** @rsvd_page_tables.ptr: Point to the first unused page in the @pages table. */
u32 ptr;
/**
- * @page: Array of pages that can be used for an MMU page table update.
+ * @rsvd_page_tables.pages: Array of pages to be used for an MMU page table update.
*
* After an VM operation, there might be free pages left in this array.
* They should be returned to the pt_cache as part of the op_ctx cleanup.
@@ -172,10 +173,10 @@ struct panthor_vm_op_ctx {
/** @va: Virtual range targeted by the VM operation. */
struct {
- /** @addr: Start address. */
+ /** @va.addr: Start address. */
u64 addr;
- /** @range: Range size. */
+ /** @va.range: Range size. */
u64 range;
} va;
@@ -195,14 +196,14 @@ struct panthor_vm_op_ctx {
/** @map: Fields specific to a map operation. */
struct {
- /** @vm_bo: Buffer object to map. */
+ /** @map.vm_bo: Buffer object to map. */
struct drm_gpuvm_bo *vm_bo;
- /** @bo_offset: Offset in the buffer object. */
+ /** @map.bo_offset: Offset in the buffer object. */
u64 bo_offset;
/**
- * @sgt: sg-table pointing to pages backing the GEM object.
+ * @map.sgt: sg-table pointing to pages backing the GEM object.
*
* This is gathered at job creation time, such that we don't have
* to allocate in ::run_job().
@@ -210,7 +211,7 @@ struct panthor_vm_op_ctx {
struct sg_table *sgt;
/**
- * @new_vma: The new VMA object that will be inserted to the VA tree.
+ * @map.new_vma: The new VMA object that will be inserted to the VA tree.
*/
struct panthor_vma *new_vma;
} map;
@@ -304,27 +305,27 @@ struct panthor_vm {
/** @kernel_auto_va: Automatic VA-range for kernel BOs. */
struct {
- /** @start: Start of the automatic VA-range for kernel BOs. */
+ /** @kernel_auto_va.start: Start of the automatic VA-range for kernel BOs. */
u64 start;
- /** @size: Size of the automatic VA-range for kernel BOs. */
+ /** @kernel_auto_va.size: Size of the automatic VA-range for kernel BOs. */
u64 end;
} kernel_auto_va;
/** @as: Address space related fields. */
struct {
/**
- * @id: ID of the address space this VM is bound to.
+ * @as.id: ID of the address space this VM is bound to.
*
* A value of -1 means the VM is inactive/not bound.
*/
int id;
- /** @active_cnt: Number of active users of this VM. */
+ /** @as.active_cnt: Number of active users of this VM. */
refcount_t active_cnt;
/**
- * @lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
+ * @as.lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
*
* Active VMs should not be inserted in the LRU list.
*/
@@ -336,13 +337,13 @@ struct panthor_vm {
*/
struct {
/**
- * @pool: The heap pool attached to this VM.
+ * @heaps.pool: The heap pool attached to this VM.
*
* Will stay NULL until someone creates a heap context on this VM.
*/
struct panthor_heap_pool *pool;
- /** @lock: Lock used to protect access to @pool. */
+ /** @heaps.lock: Lock used to protect access to @pool. */
struct mutex lock;
} heaps;
@@ -408,7 +409,7 @@ struct panthor_vm_bind_job {
struct panthor_vm_op_ctx ctx;
};
-/**
+/*
* @pt_cache: Cache used to allocate MMU page tables.
*
* The pre-allocation pattern forces us to over-allocate to plan for
@@ -478,7 +479,7 @@ static void *alloc_pt(void *cookie, size_t size, gfp_t gfp)
}
/**
- * @free_pt() - Custom page table free function
+ * free_pt() - Custom page table free function
* @cookie: Cookie passed at page table allocation time.
* @data: Page table to free.
* @size: Size of the page table. This size should be fixed,
@@ -697,7 +698,7 @@ static void panthor_vm_release_as_locked(struct panthor_vm *vm)
/**
* panthor_vm_active() - Flag a VM as active
- * @VM: VM to flag as active.
+ * @vm: VM to flag as active.
*
* Assigns an address space to a VM so it can be used by the GPU/MCU.
*
@@ -801,7 +802,7 @@ out_dev_exit:
/**
* panthor_vm_idle() - Flag a VM idle
- * @VM: VM to flag as idle.
+ * @vm: VM to flag as idle.
*
* When we know the GPU is done with the VM (no more jobs to process),
* we can relinquish the AS slot attached to this VM, if any.
@@ -1017,7 +1018,7 @@ static int flags_to_prot(u32 flags)
/**
* panthor_vm_alloc_va() - Allocate a region in the auto-va space
- * @VM: VM to allocate a region on.
+ * @vm: VM to allocate a region on.
* @va: start of the VA range. Can be PANTHOR_VM_KERNEL_AUTO_VA if the user
* wants the VA to be automatically allocated from the auto-VA range.
* @size: size of the VA range.
@@ -1063,7 +1064,7 @@ panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
/**
* panthor_vm_free_va() - Free a region allocated with panthor_vm_alloc_va()
- * @VM: VM to free the region on.
+ * @vm: VM to free the region on.
* @va_node: Memory node representing the region to free.
*/
void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
@@ -1492,9 +1493,9 @@ panthor_vm_create_check_args(const struct panthor_device *ptdev,
/**
* panthor_vm_pool_create_vm() - Create a VM
+ * @ptdev: The panthor device
* @pool: The VM to create this VM on.
- * @kernel_va_start: Start of the region reserved for kernel objects.
- * @kernel_va_range: Size of the region reserved for kernel objects.
+ * @args: VM creation args.
*
* Return: a positive VM ID on success, a negative error code otherwise.
*/
@@ -1558,6 +1559,8 @@ static void panthor_vm_destroy(struct panthor_vm *vm)
*
* The VM resources are freed when the last reference on the VM object is
* dropped.
+ *
+ * Return: %0 for success, negative errno value for failure
*/
int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle)
{
@@ -1941,6 +1944,33 @@ struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool c
return pool;
}
+/**
+ * panthor_vm_heaps_sizes() - Calculate size of all heap chunks across all
+ * heaps over all the heap pools in a VM
+ * @pfile: File.
+ * @stats: Memory stats to be updated.
+ *
+ * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM
+ * is active, record the size as active as well.
+ */
+void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats)
+{
+ struct panthor_vm *vm;
+ unsigned long i;
+
+ if (!pfile->vms)
+ return;
+
+ xa_lock(&pfile->vms->xa);
+ xa_for_each(&pfile->vms->xa, i, vm) {
+ size_t size = panthor_heap_pool_size(vm->heaps.pool);
+ stats->resident += size;
+ if (vm->as.id >= 0)
+ stats->active += size;
+ }
+ xa_unlock(&pfile->vms->xa);
+}
+
static u64 mair_to_memattr(u64 mair, bool coherent)
{
u64 memattr = 0;
@@ -2275,6 +2305,16 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
u64 full_va_range = 1ull << va_bits;
struct drm_gem_object *dummy_gem;
struct drm_gpu_scheduler *sched;
+ const struct drm_sched_init_args sched_args = {
+ .ops = &panthor_vm_bind_ops,
+ .submit_wq = ptdev->mmu->vm.wq,
+ .num_rqs = 1,
+ .credit_limit = 1,
+ /* Bind operations are synchronous for now, no timeout needed. */
+ .timeout = MAX_SCHEDULE_TIMEOUT,
+ .name = "panthor-vm-bind",
+ .dev = ptdev->base.dev,
+ };
struct io_pgtable_cfg pgtbl_cfg;
u64 mair, min_va, va_range;
struct panthor_vm *vm;
@@ -2332,11 +2372,7 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
goto err_mm_takedown;
}
- /* Bind operations are synchronous for now, no timeout needed. */
- ret = drm_sched_init(&vm->sched, &panthor_vm_bind_ops, ptdev->mmu->vm.wq,
- 1, 1, 0,
- MAX_SCHEDULE_TIMEOUT, NULL, NULL,
- "panthor-vm-bind", ptdev->base.dev);
+ ret = drm_sched_init(&vm->sched, &sched_args);
if (ret)
goto err_free_io_pgtable;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
index 8d21e83d8aba..fc274637114e 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.h
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -9,6 +9,7 @@
struct drm_exec;
struct drm_sched_job;
+struct drm_memory_stats;
struct panthor_gem_object;
struct panthor_heap_pool;
struct panthor_vm;
@@ -37,6 +38,8 @@ int panthor_vm_flush_all(struct panthor_vm *vm);
struct panthor_heap_pool *
panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
+void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats);
+
struct panthor_vm *panthor_vm_get(struct panthor_vm *vm);
void panthor_vm_put(struct panthor_vm *vm);
struct panthor_vm *panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 77b184c3fb0c..4d31d1967716 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -9,6 +9,7 @@
#include <drm/panthor_drm.h>
#include <linux/build_bug.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -88,9 +89,6 @@
#define JOB_TIMEOUT_MS 5000
-#define MIN_CS_PER_CSG 8
-
-#define MIN_CSGS 3
#define MAX_CSG_PRIO 0xf
#define NUM_INSTRS_PER_CACHE_LINE (64 / sizeof(u64))
@@ -628,16 +626,19 @@ struct panthor_group {
*/
struct panthor_kernel_bo *syncobjs;
- /** @fdinfo: Per-file total cycle and timestamp values reference. */
+ /** @fdinfo: Per-file info exposed through /proc/<process>/fdinfo */
struct {
/** @data: Total sampled values for jobs in queues from this group. */
struct panthor_gpu_usage data;
/**
- * @lock: Mutex to govern concurrent access from drm file's fdinfo callback
- * and job post-completion processing function
+ * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo
+ * callback and job post-completion processing function
*/
- struct mutex lock;
+ spinlock_t lock;
+
+ /** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */
+ size_t kbo_sizes;
} fdinfo;
/** @state: Group state. */
@@ -910,8 +911,6 @@ static void group_release_work(struct work_struct *work)
release_work);
u32 i;
- mutex_destroy(&group->fdinfo.lock);
-
for (i = 0; i < group->queue_count; i++)
group_free_queue(group, group->queues[i]);
@@ -2861,12 +2860,12 @@ static void update_fdinfo_stats(struct panthor_job *job)
struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap;
struct panthor_job_profiling_data *data = &slots[job->profiling.slot];
- mutex_lock(&group->fdinfo.lock);
- if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
- fdinfo->cycles += data->cycles.after - data->cycles.before;
- if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
- fdinfo->time += data->time.after - data->time.before;
- mutex_unlock(&group->fdinfo.lock);
+ scoped_guard(spinlock, &group->fdinfo.lock) {
+ if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
+ fdinfo->cycles += data->cycles.after - data->cycles.before;
+ if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
+ fdinfo->time += data->time.after - data->time.before;
+ }
}
void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
@@ -2878,14 +2877,15 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
if (IS_ERR_OR_NULL(gpool))
return;
+ xa_lock(&gpool->xa);
xa_for_each(&gpool->xa, i, group) {
- mutex_lock(&group->fdinfo.lock);
+ guard(spinlock)(&group->fdinfo.lock);
pfile->stats.cycles += group->fdinfo.data.cycles;
pfile->stats.time += group->fdinfo.data.time;
group->fdinfo.data.cycles = 0;
group->fdinfo.data.time = 0;
- mutex_unlock(&group->fdinfo.lock);
}
+ xa_unlock(&gpool->xa);
}
static void group_sync_upd_work(struct work_struct *work)
@@ -3287,6 +3287,22 @@ static struct panthor_queue *
group_create_queue(struct panthor_group *group,
const struct drm_panthor_queue_create *args)
{
+ const struct drm_sched_init_args sched_args = {
+ .ops = &panthor_queue_sched_ops,
+ .submit_wq = group->ptdev->scheduler->wq,
+ .num_rqs = 1,
+ /*
+ * The credit limit argument tells us the total number of
+ * instructions across all CS slots in the ringbuffer, with
+ * some jobs requiring twice as many as others, depending on
+ * their profiling status.
+ */
+ .credit_limit = args->ringbuf_size / sizeof(u64),
+ .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .timeout_wq = group->ptdev->reset.wq,
+ .name = "panthor-queue",
+ .dev = group->ptdev->base.dev,
+ };
struct drm_gpu_scheduler *drm_sched;
struct panthor_queue *queue;
int ret;
@@ -3357,17 +3373,7 @@ group_create_queue(struct panthor_group *group,
if (ret)
goto err_free_queue;
- /*
- * Credit limit argument tells us the total number of instructions
- * across all CS slots in the ringbuffer, with some jobs requiring
- * twice as many as others, depending on their profiling status.
- */
- ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops,
- group->ptdev->scheduler->wq, 1,
- args->ringbuf_size / sizeof(u64),
- 0, msecs_to_jiffies(JOB_TIMEOUT_MS),
- group->ptdev->reset.wq,
- NULL, "panthor-queue", group->ptdev->base.dev);
+ ret = drm_sched_init(&queue->scheduler, &sched_args);
if (ret)
goto err_free_queue;
@@ -3381,6 +3387,29 @@ err_free_queue:
return ERR_PTR(ret);
}
+static void add_group_kbo_sizes(struct panthor_device *ptdev,
+ struct panthor_group *group)
+{
+ struct panthor_queue *queue;
+ int i;
+
+ if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group)))
+ return;
+ if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev))
+ return;
+
+ group->fdinfo.kbo_sizes += group->suspend_buf->obj->size;
+ group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size;
+ group->fdinfo.kbo_sizes += group->syncobjs->obj->size;
+
+ for (i = 0; i < group->queue_count; i++) {
+ queue = group->queues[i];
+ group->fdinfo.kbo_sizes += queue->ringbuf->obj->size;
+ group->fdinfo.kbo_sizes += queue->iface.mem->obj->size;
+ group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size;
+ }
+}
+
#define MAX_GROUPS_PER_POOL 128
int panthor_group_create(struct panthor_file *pfile,
@@ -3505,7 +3534,8 @@ int panthor_group_create(struct panthor_file *pfile,
}
mutex_unlock(&sched->reset.lock);
- mutex_init(&group->fdinfo.lock);
+ add_group_kbo_sizes(group->ptdev, group);
+ spin_lock_init(&group->fdinfo.lock);
return gid;
@@ -3624,6 +3654,33 @@ void panthor_group_pool_destroy(struct panthor_file *pfile)
pfile->groups = NULL;
}
+/**
+ * panthor_fdinfo_gather_group_mem_info() - Retrieve aggregate size of all private kernel BO's
+ * belonging to all the groups owned by an open Panthor file
+ * @pfile: File.
+ * @stats: Memory statistics to be updated.
+ *
+ */
+void
+panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile,
+ struct drm_memory_stats *stats)
+{
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_group *group;
+ unsigned long i;
+
+ if (IS_ERR_OR_NULL(gpool))
+ return;
+
+ xa_lock(&gpool->xa);
+ xa_for_each(&gpool->xa, i, group) {
+ stats->resident += group->fdinfo.kbo_sizes;
+ if (group->csg_id >= 0)
+ stats->active += group->fdinfo.kbo_sizes;
+ }
+ xa_unlock(&gpool->xa);
+}
+
static void job_release(struct kref *ref)
{
struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
index 5ae6b4bde7c5..e650a445cf50 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.h
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -9,6 +9,7 @@ struct dma_fence;
struct drm_file;
struct drm_gem_object;
struct drm_sched_job;
+struct drm_memory_stats;
struct drm_panthor_group_create;
struct drm_panthor_queue_create;
struct drm_panthor_group_get_state;
@@ -36,6 +37,8 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *job);
int panthor_group_pool_create(struct panthor_file *pfile);
void panthor_group_pool_destroy(struct panthor_file *pfile);
+void panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile,
+ struct drm_memory_stats *stats);
int panthor_sched_init(struct panthor_device *ptdev);
void panthor_sched_unplug(struct panthor_device *ptdev);
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index bc24af08dfcd..70aff64ced87 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -1044,7 +1044,7 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *ddev = connector->dev;
struct qxl_device *qdev = to_qxl(ddev);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 6328627b7c34..fa78824931cc 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -467,7 +467,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
}
int radeon_dp_mode_valid_helper(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index abe9d65cc460..7c3a960f486a 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -3405,12 +3405,8 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
&rdev->pm.dpm.dyn_state.cac_leakage_table;
u32 i;
- if (allowed_sclk_vddc_table == NULL)
- return -EINVAL;
if (allowed_sclk_vddc_table->count < 1)
return -EINVAL;
- if (allowed_mclk_table == NULL)
- return -EINVAL;
if (allowed_mclk_table->count < 1)
return -EINVAL;
@@ -3468,24 +3464,20 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
- if (allowed_mclk_table) {
- for (i = 0; i < allowed_mclk_table->count; i++) {
- pi->dpm_table.vddci_table.dpm_levels[i].value =
- allowed_mclk_table->entries[i].v;
- pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
- }
- pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ pi->dpm_table.vddci_table.dpm_levels[i].value =
+ allowed_mclk_table->entries[i].v;
+ pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
}
+ pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
- if (allowed_mclk_table) {
- for (i = 0; i < allowed_mclk_table->count; i++) {
- pi->dpm_table.mvdd_table.dpm_levels[i].value =
- allowed_mclk_table->entries[i].v;
- pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
- }
- pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ pi->dpm_table.mvdd_table.dpm_levels[i].value =
+ allowed_mclk_table->entries[i].v;
+ pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
}
+ pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
ci_setup_default_pcie_tables(rdev);
@@ -4880,16 +4872,10 @@ static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *
struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
- if (allowed_sclk_vddc_table == NULL)
- return -EINVAL;
if (allowed_sclk_vddc_table->count < 1)
return -EINVAL;
- if (allowed_mclk_vddc_table == NULL)
- return -EINVAL;
if (allowed_mclk_vddc_table->count < 1)
return -EINVAL;
- if (allowed_mclk_vddci_table == NULL)
- return -EINVAL;
if (allowed_mclk_vddci_table->count < 1)
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index f9996304d943..9f6a3df951ba 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -806,7 +806,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -968,7 +968,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -1116,7 +1116,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
return MODE_CLOCK_RANGE;
@@ -1447,7 +1447,7 @@ static void radeon_dvi_force(struct drm_connector *connector)
}
static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -1723,7 +1723,7 @@ out:
}
static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 6f071e61f764..bbd39348a7ab 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -530,7 +530,7 @@ int radeon_wb_init(struct radeon_device *rdev)
* @mc: memory controller structure holding memory informations
* @base: base address at which to put VRAM
*
- * Function will place try to place VRAM at base address provided
+ * Function will try to place VRAM at base address provided
* as parameter (which is so far either PCI aperture address or
* for IGP TOM base address).
*
@@ -557,7 +557,7 @@ int radeon_wb_init(struct radeon_device *rdev)
*
* Note 3: when limiting vram it's safe to overwritte real_vram_size because
* we are not in case where real_vram_size is inferior to mc_vram_size (ie
- * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
+ * not affected by bogus hw of Novell bug 204882 + along with lots of ubuntu
* ones)
*
* Note 4: IGP TOM addr should be the same as the aperture addr, we don't
@@ -594,7 +594,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
* @rdev: radeon device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
- * Function will place try to place GTT before or after VRAM.
+ * Function will try to place GTT before or after VRAM.
*
* If GTT size is bigger than space left then we ajust GTT size.
* Thus function will never fails.
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index daff61586be5..8ff4f18b51a9 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -840,7 +840,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
}
radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
rdev->fence_drv[ring].initialized = true;
- dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx\n",
+ dev_info(rdev->dev, "fence driver on ring %d uses gpu addr 0x%016llx\n",
ring, rdev->fence_drv[ring].gpu_addr);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 4063d3801e81..3102f6c2d055 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -706,7 +706,7 @@ extern int radeon_get_monitor_bpc(struct drm_connector *connector);
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
extern void radeon_dp_set_link_config(struct drm_connector *connector,
const struct drm_display_mode *mode);
extern void radeon_dp_link_train(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 058a1c8451b2..ded5747a58d1 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -961,7 +961,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
unsigned optimal_score = ~0;
/* loop through vco from low to high */
- vco_min = max(max(vco_min, vclk), dclk);
+ vco_min = max3(vco_min, vclk, dclk);
for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index d1871af967d4..2355a78e1b69 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -557,7 +557,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{
int session_idx = -1;
bool destroyed = false, created = false, allocated = false;
- uint32_t tmp, handle = 0;
+ uint32_t tmp = 0, handle = 0;
uint32_t *size = &tmp;
int i, r = 0;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 6c95575ce109..26197aceb001 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6198,7 +6198,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
if (wptr & RB_OVERFLOW) {
wptr &= ~RB_OVERFLOW;
- /* When a ring buffer overflow happen start parsing interrupt
+ /* When a ring buffer overflow happens, start parsing interrupts
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index e8d64583e3bd..380a855b832a 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -582,9 +582,8 @@ EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable);
*/
static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_connector *connector;
struct drm_crtc *crtc;
@@ -596,7 +595,7 @@ static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
}
static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index 3c0c18d5249a..d1e626068065 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -808,7 +808,7 @@ static int rcar_mipi_dsi_attach(struct drm_bridge *bridge,
}
static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
@@ -816,7 +816,7 @@ static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
}
static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index fa7a1ae22aa3..4550c6d84796 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -532,9 +532,8 @@ static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge,
}
static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
const struct drm_display_mode *mode;
struct drm_connector *connector;
@@ -568,7 +567,7 @@ err_stop:
}
static void rzg2l_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 0844175c37c5..a8265a1bf9ff 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -32,27 +32,32 @@
#include "rockchip_drm_drv.h"
-#define RK3288_GRF_SOC_CON6 0x25c
-#define RK3288_EDP_LCDC_SEL BIT(5)
-#define RK3399_GRF_SOC_CON20 0x6250
-#define RK3399_EDP_LCDC_SEL BIT(5)
-
-#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
-
#define PSR_WAIT_LINE_FLAG_TIMEOUT_MS 100
+#define GRF_REG_FIELD(_reg, _lsb, _msb) { \
+ .reg = _reg, \
+ .lsb = _lsb, \
+ .msb = _msb, \
+ .valid = true, \
+ }
+
+struct rockchip_grf_reg_field {
+ u32 reg;
+ u32 lsb;
+ u32 msb;
+ bool valid;
+};
+
/**
* struct rockchip_dp_chip_data - splite the grf setting of kind of chips
- * @lcdsel_grf_reg: grf register offset of lcdc select
- * @lcdsel_big: reg value of selecting vop big for eDP
- * @lcdsel_lit: reg value of selecting vop little for eDP
+ * @lcdc_sel: grf register field of lcdc_sel
* @chip_type: specific chip type
+ * @reg: register base address
*/
struct rockchip_dp_chip_data {
- u32 lcdsel_grf_reg;
- u32 lcdsel_big;
- u32 lcdsel_lit;
+ const struct rockchip_grf_reg_field lcdc_sel;
u32 chip_type;
+ u32 reg;
};
struct rockchip_dp_device {
@@ -84,6 +89,26 @@ static struct rockchip_dp_device *pdata_encoder_to_dp(struct analogix_dp_plat_da
return container_of(plat_data, struct rockchip_dp_device, plat_data);
}
+static int rockchip_grf_write(struct regmap *grf, u32 reg, u32 mask, u32 val)
+{
+ return regmap_write(grf, reg, (mask << 16) | (val & mask));
+}
+
+static int rockchip_grf_field_write(struct regmap *grf,
+ const struct rockchip_grf_reg_field *field,
+ u32 val)
+{
+ u32 mask;
+
+ if (!field->valid)
+ return 0;
+
+ mask = GENMASK(field->msb, field->lsb);
+ val <<= field->lsb;
+
+ return rockchip_grf_write(grf, field->reg, mask, val);
+}
+
static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
{
reset_control_assert(dp->rst);
@@ -181,7 +206,6 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int ret;
- u32 val;
crtc = rockchip_dp_drm_get_new_crtc(encoder, state);
if (!crtc)
@@ -192,24 +216,19 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
if (old_crtc_state && old_crtc_state->self_refresh_active)
return;
- ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
- if (ret < 0)
- return;
-
- if (ret)
- val = dp->data->lcdsel_lit;
- else
- val = dp->data->lcdsel_big;
-
- DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
-
ret = clk_prepare_enable(dp->grfclk);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "failed to enable grfclk %d\n", ret);
return;
}
- ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val);
+ ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
+ if (ret < 0)
+ return;
+
+ DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, ret);
if (ret != 0)
DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
@@ -379,6 +398,8 @@ static int rockchip_dp_probe(struct platform_device *pdev)
const struct rockchip_dp_chip_data *dp_data;
struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
+ struct resource *res;
+ int i;
int ret;
dp_data = of_device_get_match_data(dev);
@@ -393,9 +414,24 @@ static int rockchip_dp_probe(struct platform_device *pdev)
if (!dp)
return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ i = 0;
+ while (dp_data[i].reg) {
+ if (dp_data[i].reg == res->start) {
+ dp->data = &dp_data[i];
+ break;
+ }
+
+ i++;
+ }
+
+ if (!dp->data)
+ return dev_err_probe(dev, -EINVAL, "no chip-data for %s node\n",
+ dev->of_node->name);
+
dp->dev = dev;
dp->adp = ERR_PTR(-ENODEV);
- dp->data = dp_data;
dp->plat_data.panel = panel;
dp->plat_data.dev_type = dp->data->chip_type;
dp->plat_data.power_on = rockchip_dp_poweron;
@@ -447,18 +483,22 @@ static int rockchip_dp_resume(struct device *dev)
static DEFINE_RUNTIME_DEV_PM_OPS(rockchip_dp_pm_ops, rockchip_dp_suspend,
rockchip_dp_resume, NULL);
-static const struct rockchip_dp_chip_data rk3399_edp = {
- .lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
- .lcdsel_big = HIWORD_UPDATE(0, RK3399_EDP_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3399_EDP_LCDC_SEL, RK3399_EDP_LCDC_SEL),
- .chip_type = RK3399_EDP,
+static const struct rockchip_dp_chip_data rk3399_edp[] = {
+ {
+ .lcdc_sel = GRF_REG_FIELD(0x6250, 5, 5),
+ .chip_type = RK3399_EDP,
+ .reg = 0xff970000,
+ },
+ { /* sentinel */ }
};
-static const struct rockchip_dp_chip_data rk3288_dp = {
- .lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
- .lcdsel_big = HIWORD_UPDATE(0, RK3288_EDP_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3288_EDP_LCDC_SEL, RK3288_EDP_LCDC_SEL),
- .chip_type = RK3288_DP,
+static const struct rockchip_dp_chip_data rk3288_dp[] = {
+ {
+ .lcdc_sel = GRF_REG_FIELD(0x025c, 5, 5),
+ .chip_type = RK3288_DP,
+ .reg = 0xff970000,
+ },
+ { /* sentinel */ }
};
static const struct of_device_id rockchip_dp_dt_ids[] = {
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index b17de83b988b..292c31de18f1 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -275,7 +275,7 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
cdn_dp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
struct drm_display_info *display_info = &dp->connector.display_info;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index e7a6669c46b0..f737e7d46e66 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -203,7 +203,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(hdmi->regmap)) {
- drm_err(hdmi, "Unable to get rockchip,grf\n");
+ dev_err(hdmi->dev, "Unable to get rockchip,grf\n");
return PTR_ERR(hdmi->regmap);
}
@@ -214,7 +214,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
if (IS_ERR(hdmi->ref_clk)) {
ret = PTR_ERR(hdmi->ref_clk);
if (ret != -EPROBE_DEFER)
- drm_err(hdmi, "failed to get reference clock\n");
+ dev_err(hdmi->dev, "failed to get reference clock\n");
return ret;
}
@@ -222,7 +222,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
if (IS_ERR(hdmi->grf_clk)) {
ret = PTR_ERR(hdmi->grf_clk);
if (ret != -EPROBE_DEFER)
- drm_err(hdmi, "failed to get grf clock\n");
+ dev_err(hdmi->dev, "failed to get grf clock\n");
return ret;
}
@@ -302,16 +302,16 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
ret = clk_prepare_enable(hdmi->grf_clk);
if (ret < 0) {
- drm_err(hdmi, "failed to enable grfclk %d\n", ret);
+ dev_err(hdmi->dev, "failed to enable grfclk %d\n", ret);
return;
}
ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val);
if (ret != 0)
- drm_err(hdmi, "Could not write to GRF: %d\n", ret);
+ dev_err(hdmi->dev, "Could not write to GRF: %d\n", ret);
clk_disable_unprepare(hdmi->grf_clk);
- drm_dbg(hdmi, "vop %s output to hdmi\n", ret ? "LIT" : "BIG");
+ dev_dbg(hdmi->dev, "vop %s output to hdmi\n", ret ? "LIT" : "BIG");
}
static int
@@ -574,7 +574,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
ret = rockchip_hdmi_parse_dt(hdmi);
if (ret) {
if (ret != -EPROBE_DEFER)
- drm_err(hdmi, "Unable to parse OF data\n");
+ dev_err(hdmi->dev, "Unable to parse OF data\n");
return ret;
}
@@ -582,7 +582,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
if (ret != -EPROBE_DEFER)
- drm_err(hdmi, "failed to get phy\n");
+ dev_err(hdmi->dev, "failed to get phy\n");
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index e498767a0a66..3d1dddb34603 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -25,6 +25,41 @@
#include "rockchip_drm_drv.h"
+#define RK3576_IOC_MISC_CON0 0xa400
+#define RK3576_HDMI_HPD_INT_MSK BIT(2)
+#define RK3576_HDMI_HPD_INT_CLR BIT(1)
+
+#define RK3576_IOC_HDMI_HPD_STATUS 0xa440
+#define RK3576_HDMI_LEVEL_INT BIT(3)
+
+#define RK3576_VO0_GRF_SOC_CON1 0x0004
+#define RK3576_HDMI_FRL_MOD BIT(0)
+#define RK3576_HDMI_HDCP14_MEM_EN BIT(15)
+
+#define RK3576_VO0_GRF_SOC_CON8 0x0020
+#define RK3576_COLOR_FORMAT_MASK (0xf << 4)
+#define RK3576_COLOR_DEPTH_MASK (0xf << 8)
+#define RK3576_RGB (0 << 4)
+#define RK3576_YUV422 (0x1 << 4)
+#define RK3576_YUV444 (0x2 << 4)
+#define RK3576_YUV420 (0x3 << 4)
+#define RK3576_8BPC (0x0 << 8)
+#define RK3576_10BPC (0x6 << 8)
+#define RK3576_CECIN_MASK BIT(3)
+
+#define RK3576_VO0_GRF_SOC_CON12 0x0030
+#define RK3576_GRF_OSDA_DLYN (0xf << 12)
+#define RK3576_GRF_OSDA_DIV (0x7f << 1)
+#define RK3576_GRF_OSDA_DLY_EN BIT(0)
+
+#define RK3576_VO0_GRF_SOC_CON14 0x0038
+#define RK3576_I2S_SEL_MASK BIT(0)
+#define RK3576_SPDIF_SEL_MASK BIT(1)
+#define HDCP0_P1_GPIO_IN BIT(2)
+#define RK3576_SCLIN_MASK BIT(4)
+#define RK3576_SDAIN_MASK BIT(5)
+#define RK3576_HDMI_GRANT_SEL BIT(6)
+
#define RK3588_GRF_SOC_CON2 0x0308
#define RK3588_HDMI0_HPD_INT_MSK BIT(13)
#define RK3588_HDMI0_HPD_INT_CLR BIT(12)
@@ -54,7 +89,6 @@ struct rockchip_hdmi_qp {
struct regmap *regmap;
struct regmap *vo_regmap;
struct rockchip_encoder encoder;
- struct clk *ref_clk;
struct dw_hdmi_qp *hdmi;
struct phy *phy;
struct gpio_desc *enable_gpio;
@@ -62,6 +96,12 @@ struct rockchip_hdmi_qp {
int port_id;
};
+struct rockchip_hdmi_qp_ctrl_ops {
+ void (*io_init)(struct rockchip_hdmi_qp *hdmi);
+ irqreturn_t (*irq_callback)(int irq, void *dev_id);
+ irqreturn_t (*hardirq_callback)(int irq, void *dev_id);
+};
+
static struct rockchip_hdmi_qp *to_rockchip_hdmi_qp(struct drm_encoder *encoder)
{
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
@@ -81,7 +121,6 @@ static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder)
if (crtc && crtc->state) {
rate = drm_hdmi_compute_mode_clock(&crtc->state->adjusted_mode,
8, HDMI_COLORSPACE_RGB);
- clk_set_rate(hdmi->ref_clk, rate);
/*
* FIXME: Temporary workaround to pass pixel clock rate
* to the PHY driver until phy_configure_opts_hdmi
@@ -161,6 +200,37 @@ static const struct dw_hdmi_qp_phy_ops rk3588_hdmi_phy_ops = {
.setup_hpd = dw_hdmi_qp_rk3588_setup_hpd,
};
+static enum drm_connector_status
+dw_hdmi_qp_rk3576_read_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
+{
+ struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data;
+ u32 val;
+
+ regmap_read(hdmi->regmap, RK3576_IOC_HDMI_HPD_STATUS, &val);
+
+ return val & RK3576_HDMI_LEVEL_INT ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static void dw_hdmi_qp_rk3576_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
+{
+ struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data;
+ u32 val;
+
+ val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_CLR,
+ RK3576_HDMI_HPD_INT_CLR | RK3576_HDMI_HPD_INT_MSK);
+
+ regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
+ regmap_write(hdmi->regmap, 0xa404, 0xffff0102);
+}
+
+static const struct dw_hdmi_qp_phy_ops rk3576_hdmi_phy_ops = {
+ .init = dw_hdmi_qp_rk3588_phy_init,
+ .disable = dw_hdmi_qp_rk3588_phy_disable,
+ .read_hpd = dw_hdmi_qp_rk3576_read_hpd,
+ .setup_hpd = dw_hdmi_qp_rk3576_setup_hpd,
+};
+
static void dw_hdmi_qp_rk3588_hpd_work(struct work_struct *work)
{
struct rockchip_hdmi_qp *hdmi = container_of(work,
@@ -172,8 +242,45 @@ static void dw_hdmi_qp_rk3588_hpd_work(struct work_struct *work)
if (drm) {
changed = drm_helper_hpd_irq_event(drm);
if (changed)
- drm_dbg(hdmi, "connector status changed\n");
+ dev_dbg(hdmi->dev, "connector status changed\n");
+ }
+}
+
+static irqreturn_t dw_hdmi_qp_rk3576_hardirq(int irq, void *dev_id)
+{
+ struct rockchip_hdmi_qp *hdmi = dev_id;
+ u32 intr_stat, val;
+
+ regmap_read(hdmi->regmap, RK3576_IOC_HDMI_HPD_STATUS, &intr_stat);
+ if (intr_stat) {
+ val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_MSK, RK3576_HDMI_HPD_INT_MSK);
+
+ regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
+ return IRQ_WAKE_THREAD;
}
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t dw_hdmi_qp_rk3576_irq(int irq, void *dev_id)
+{
+ struct rockchip_hdmi_qp *hdmi = dev_id;
+ u32 intr_stat, val;
+
+ regmap_read(hdmi->regmap, RK3576_IOC_HDMI_HPD_STATUS, &intr_stat);
+
+ if (!intr_stat)
+ return IRQ_NONE;
+
+ val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_CLR, RK3576_HDMI_HPD_INT_CLR);
+ regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
+ mod_delayed_work(system_wq, &hdmi->hpd_work,
+ msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
+
+ val = HIWORD_UPDATE(0, RK3576_HDMI_HPD_INT_MSK);
+ regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
+
+ return IRQ_HANDLED;
}
static irqreturn_t dw_hdmi_qp_rk3588_hardirq(int irq, void *dev_id)
@@ -226,24 +333,95 @@ static irqreturn_t dw_hdmi_qp_rk3588_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void dw_hdmi_qp_rk3576_io_init(struct rockchip_hdmi_qp *hdmi)
+{
+ u32 val;
+
+ val = HIWORD_UPDATE(RK3576_SCLIN_MASK, RK3576_SCLIN_MASK) |
+ HIWORD_UPDATE(RK3576_SDAIN_MASK, RK3576_SDAIN_MASK) |
+ HIWORD_UPDATE(RK3576_HDMI_GRANT_SEL, RK3576_HDMI_GRANT_SEL) |
+ HIWORD_UPDATE(RK3576_I2S_SEL_MASK, RK3576_I2S_SEL_MASK);
+
+ regmap_write(hdmi->vo_regmap, RK3576_VO0_GRF_SOC_CON14, val);
+
+ val = HIWORD_UPDATE(0, RK3576_HDMI_HPD_INT_MSK);
+ regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
+}
+
+static void dw_hdmi_qp_rk3588_io_init(struct rockchip_hdmi_qp *hdmi)
+{
+ u32 val;
+
+ val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
+ HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
+ HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
+ HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
+ regmap_write(hdmi->vo_regmap,
+ hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
+ val);
+
+ val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK, RK3588_SET_HPD_PATH_MASK);
+ regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
+
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL, RK3588_HDMI1_GRANT_SEL);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL, RK3588_HDMI0_GRANT_SEL);
+ regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
+
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK, RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK);
+ regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
+}
+
+static const struct rockchip_hdmi_qp_ctrl_ops rk3576_hdmi_ctrl_ops = {
+ .io_init = dw_hdmi_qp_rk3576_io_init,
+ .irq_callback = dw_hdmi_qp_rk3576_irq,
+ .hardirq_callback = dw_hdmi_qp_rk3576_hardirq,
+};
+
+static const struct rockchip_hdmi_qp_ctrl_ops rk3588_hdmi_ctrl_ops = {
+ .io_init = dw_hdmi_qp_rk3588_io_init,
+ .irq_callback = dw_hdmi_qp_rk3588_irq,
+ .hardirq_callback = dw_hdmi_qp_rk3588_hardirq,
+};
+
struct rockchip_hdmi_qp_cfg {
unsigned int num_ports;
unsigned int port_ids[MAX_HDMI_PORT_NUM];
+ const struct rockchip_hdmi_qp_ctrl_ops *ctrl_ops;
const struct dw_hdmi_qp_phy_ops *phy_ops;
};
+static const struct rockchip_hdmi_qp_cfg rk3576_hdmi_cfg = {
+ .num_ports = 1,
+ .port_ids = {
+ 0x27da0000,
+ },
+ .ctrl_ops = &rk3576_hdmi_ctrl_ops,
+ .phy_ops = &rk3576_hdmi_phy_ops,
+};
+
static const struct rockchip_hdmi_qp_cfg rk3588_hdmi_cfg = {
.num_ports = 2,
.port_ids = {
0xfde80000,
0xfdea0000,
},
+ .ctrl_ops = &rk3588_hdmi_ctrl_ops,
.phy_ops = &rk3588_hdmi_phy_ops,
};
static const struct of_device_id dw_hdmi_qp_rockchip_dt_ids[] = {
- { .compatible = "rockchip,rk3588-dw-hdmi-qp",
- .data = &rk3588_hdmi_cfg },
+ {
+ .compatible = "rockchip,rk3576-dw-hdmi-qp",
+ .data = &rk3576_hdmi_cfg
+ }, {
+ .compatible = "rockchip,rk3588-dw-hdmi-qp",
+ .data = &rk3588_hdmi_cfg
+ },
{},
};
MODULE_DEVICE_TABLE(of, dw_hdmi_qp_rockchip_dt_ids);
@@ -261,7 +439,6 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
struct resource *res;
struct clk_bulk_data *clks;
int ret, irq, i;
- u32 val;
if (!pdev->dev.of_node)
return -ENODEV;
@@ -278,6 +455,12 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
if (!cfg)
return -ENODEV;
+ if (!cfg->ctrl_ops || !cfg->ctrl_ops->io_init ||
+ !cfg->ctrl_ops->irq_callback || !cfg->ctrl_ops->hardirq_callback) {
+ dev_err(dev, "Missing platform ctrl ops\n");
+ return -ENODEV;
+ }
+
hdmi->dev = &pdev->dev;
hdmi->port_id = -ENODEV;
@@ -289,7 +472,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
}
}
if (hdmi->port_id < 0) {
- drm_err(hdmi, "Failed to match HDMI port ID\n");
+ dev_err(hdmi->dev, "Failed to match HDMI port ID\n");
return hdmi->port_id;
}
@@ -313,39 +496,28 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
hdmi->regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
if (IS_ERR(hdmi->regmap)) {
- drm_err(hdmi, "Unable to get rockchip,grf\n");
+ dev_err(hdmi->dev, "Unable to get rockchip,grf\n");
return PTR_ERR(hdmi->regmap);
}
hdmi->vo_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,vo-grf");
if (IS_ERR(hdmi->vo_regmap)) {
- drm_err(hdmi, "Unable to get rockchip,vo-grf\n");
+ dev_err(hdmi->dev, "Unable to get rockchip,vo-grf\n");
return PTR_ERR(hdmi->vo_regmap);
}
ret = devm_clk_bulk_get_all_enabled(hdmi->dev, &clks);
if (ret < 0) {
- drm_err(hdmi, "Failed to get clocks: %d\n", ret);
+ dev_err(hdmi->dev, "Failed to get clocks: %d\n", ret);
return ret;
}
- for (i = 0; i < ret; i++) {
- if (!strcmp(clks[i].id, "ref")) {
- hdmi->ref_clk = clks[1].clk;
- break;
- }
- }
- if (!hdmi->ref_clk) {
- drm_err(hdmi, "Missing ref clock\n");
- return -EINVAL;
- }
-
hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable",
GPIOD_OUT_HIGH);
if (IS_ERR(hdmi->enable_gpio)) {
ret = PTR_ERR(hdmi->enable_gpio);
- drm_err(hdmi, "Failed to request enable GPIO: %d\n", ret);
+ dev_err(hdmi->dev, "Failed to request enable GPIO: %d\n", ret);
return ret;
}
@@ -353,35 +525,11 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
if (ret != -EPROBE_DEFER)
- drm_err(hdmi, "failed to get phy: %d\n", ret);
+ dev_err(hdmi->dev, "failed to get phy: %d\n", ret);
return ret;
}
- val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
- HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
- HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
- HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
- regmap_write(hdmi->vo_regmap,
- hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
- val);
-
- val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
- RK3588_SET_HPD_PATH_MASK);
- regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
-
- if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
- RK3588_HDMI1_GRANT_SEL);
- else
- val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
- RK3588_HDMI0_GRANT_SEL);
- regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
-
- if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK, RK3588_HDMI1_HPD_INT_MSK);
- else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK);
- regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
+ cfg->ctrl_ops->io_init(hdmi);
INIT_DELAYED_WORK(&hdmi->hpd_work, dw_hdmi_qp_rk3588_hpd_work);
@@ -394,8 +542,8 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
return irq;
ret = devm_request_threaded_irq(hdmi->dev, irq,
- dw_hdmi_qp_rk3588_hardirq,
- dw_hdmi_qp_rk3588_irq,
+ cfg->ctrl_ops->hardirq_callback,
+ cfg->ctrl_ops->irq_callback,
IRQF_SHARED, "dw-hdmi-qp-hpd",
hdmi);
if (ret)
@@ -416,7 +564,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
connector = drm_bridge_connector_init(drm, encoder);
if (IS_ERR(connector)) {
ret = PTR_ERR(connector);
- drm_err(hdmi, "failed to init bridge connector: %d\n", ret);
+ dev_err(hdmi->dev, "failed to init bridge connector: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 898d90155057..483ecfeaebb0 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -471,7 +471,7 @@ static int inno_hdmi_setup(struct inno_hdmi *hdmi,
}
static enum drm_mode_status inno_hdmi_display_mode_valid(struct inno_hdmi *hdmi,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
unsigned long mpixelclk, max_tolerance;
long rounded_refclk;
@@ -577,7 +577,7 @@ static int inno_hdmi_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
inno_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct inno_hdmi *hdmi = connector_to_inno_hdmi(connector);
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index 403336397214..f7a460190313 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -482,7 +482,7 @@ static int rk3066_hdmi_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
u32 vic = drm_match_cea_mode(mode);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 439edc165ff6..180fad5d49ad 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -484,9 +484,11 @@ static void rockchip_drm_platform_remove(struct platform_device *pdev)
static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
{
- struct drm_device *drm = platform_get_drvdata(pdev);
+ if (component_master_is_bound(&pdev->dev, &rockchip_drm_ops)) {
+ struct drm_device *drm = platform_get_drvdata(pdev);
- drm_atomic_helper_shutdown(drm);
+ drm_atomic_helper_shutdown(drm);
+ }
}
static const struct of_device_id rockchip_drm_dt_ids[] = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 57747f1cff26..e3596e2b557d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1072,7 +1072,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
}
static int vop_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state, bool flip)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 17a98845fd31..d0f5fea15e21 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -33,7 +33,6 @@
#include <drm/drm_vblank.h>
#include <uapi/linux/videodev2.h>
-#include <dt-bindings/soc/rockchip,vop2.h>
#include "rockchip_drm_gem.h"
#include "rockchip_drm_vop2.h"
@@ -102,142 +101,7 @@ enum vop2_afbc_format {
VOP2_AFBC_FMT_INVALID = -1,
};
-union vop2_alpha_ctrl {
- u32 val;
- struct {
- /* [0:1] */
- u32 color_mode:1;
- u32 alpha_mode:1;
- /* [2:3] */
- u32 blend_mode:2;
- u32 alpha_cal_mode:1;
- /* [5:7] */
- u32 factor_mode:3;
- /* [8:9] */
- u32 alpha_en:1;
- u32 src_dst_swap:1;
- u32 reserved:6;
- /* [16:23] */
- u32 glb_alpha:8;
- } bits;
-};
-
-struct vop2_alpha {
- union vop2_alpha_ctrl src_color_ctrl;
- union vop2_alpha_ctrl dst_color_ctrl;
- union vop2_alpha_ctrl src_alpha_ctrl;
- union vop2_alpha_ctrl dst_alpha_ctrl;
-};
-
-struct vop2_alpha_config {
- bool src_premulti_en;
- bool dst_premulti_en;
- bool src_pixel_alpha_en;
- bool dst_pixel_alpha_en;
- u16 src_glb_alpha_value;
- u16 dst_glb_alpha_value;
-};
-
-struct vop2_win {
- struct vop2 *vop2;
- struct drm_plane base;
- const struct vop2_win_data *data;
- struct regmap_field *reg[VOP2_WIN_MAX_REG];
-
- /**
- * @win_id: graphic window id, a cluster may be split into two
- * graphics windows.
- */
- u8 win_id;
- u8 delay;
- u32 offset;
-
- enum drm_plane_type type;
-};
-
-struct vop2_video_port {
- struct drm_crtc crtc;
- struct vop2 *vop2;
- struct clk *dclk;
- unsigned int id;
- const struct vop2_video_port_data *data;
-
- struct completion dsp_hold_completion;
-
- /**
- * @win_mask: Bitmask of windows attached to the video port;
- */
- u32 win_mask;
-
- struct vop2_win *primary_plane;
- struct drm_pending_vblank_event *event;
-
- unsigned int nlayers;
-};
-
-struct vop2 {
- struct device *dev;
- struct drm_device *drm;
- struct vop2_video_port vps[ROCKCHIP_MAX_CRTC];
-
- const struct vop2_data *data;
- /*
- * Number of windows that are registered as plane, may be less than the
- * total number of hardware windows.
- */
- u32 registered_num_wins;
-
- struct resource *res;
- void __iomem *regs;
- struct regmap *map;
-
- struct regmap *sys_grf;
- struct regmap *vop_grf;
- struct regmap *vo1_grf;
- struct regmap *sys_pmu;
-
- /* physical map length of vop2 register */
- u32 len;
-
- void __iomem *lut_regs;
-
- /* protects crtc enable/disable */
- struct mutex vop2_lock;
-
- int irq;
-
- /*
- * Some global resources are shared between all video ports(crtcs), so
- * we need a ref counter here.
- */
- unsigned int enable_count;
- struct clk *hclk;
- struct clk *aclk;
- struct clk *pclk;
-
- /* optional internal rgb encoder */
- struct rockchip_rgb *rgb;
-
- /* must be put at the end of the struct */
- struct vop2_win win[];
-};
-
-#define vop2_output_if_is_hdmi(x) ((x) == ROCKCHIP_VOP2_EP_HDMI0 || \
- (x) == ROCKCHIP_VOP2_EP_HDMI1)
-
-#define vop2_output_if_is_dp(x) ((x) == ROCKCHIP_VOP2_EP_DP0 || \
- (x) == ROCKCHIP_VOP2_EP_DP1)
-
-#define vop2_output_if_is_edp(x) ((x) == ROCKCHIP_VOP2_EP_EDP0 || \
- (x) == ROCKCHIP_VOP2_EP_EDP1)
-
-#define vop2_output_if_is_mipi(x) ((x) == ROCKCHIP_VOP2_EP_MIPI0 || \
- (x) == ROCKCHIP_VOP2_EP_MIPI1)
-
-#define vop2_output_if_is_lvds(x) ((x) == ROCKCHIP_VOP2_EP_LVDS0 || \
- (x) == ROCKCHIP_VOP2_EP_LVDS1)
-
-#define vop2_output_if_is_dpi(x) ((x) == ROCKCHIP_VOP2_EP_RGB0)
+#define VOP2_MAX_DCLK_RATE 600000000
/*
* bus-format types.
@@ -272,16 +136,6 @@ static DRM_ENUM_NAME_FN(drm_get_bus_format_name, drm_bus_format_enum_list)
static const struct regmap_config vop2_regmap_config;
-static struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc)
-{
- return container_of(crtc, struct vop2_video_port, crtc);
-}
-
-static struct vop2_win *to_vop2_win(struct drm_plane *p)
-{
- return container_of(p, struct vop2_win, base);
-}
-
static void vop2_lock(struct vop2 *vop2)
{
mutex_lock(&vop2->vop2_lock);
@@ -292,44 +146,6 @@ static void vop2_unlock(struct vop2 *vop2)
mutex_unlock(&vop2->vop2_lock);
}
-static void vop2_writel(struct vop2 *vop2, u32 offset, u32 v)
-{
- regmap_write(vop2->map, offset, v);
-}
-
-static void vop2_vp_write(struct vop2_video_port *vp, u32 offset, u32 v)
-{
- regmap_write(vp->vop2->map, vp->data->offset + offset, v);
-}
-
-static u32 vop2_readl(struct vop2 *vop2, u32 offset)
-{
- u32 val;
-
- regmap_read(vop2->map, offset, &val);
-
- return val;
-}
-
-static u32 vop2_vp_read(struct vop2_video_port *vp, u32 offset)
-{
- u32 val;
-
- regmap_read(vp->vop2->map, vp->data->offset + offset, &val);
-
- return val;
-}
-
-static void vop2_win_write(const struct vop2_win *win, unsigned int reg, u32 v)
-{
- regmap_field_write(win->reg[reg], v);
-}
-
-static bool vop2_cluster_window(const struct vop2_win *win)
-{
- return win->data->feature & WIN_FEATURE_CLUSTER;
-}
-
/*
* Note:
* The write mask function is documented but missing on rk3566/8, writes
@@ -539,7 +355,7 @@ static bool vop2_output_uv_swap(u32 bus_format, u32 output_mode)
static bool vop2_output_rg_swap(struct vop2 *vop2, u32 bus_format)
{
- if (vop2->data->soc_id == 3588) {
+ if (vop2->version == VOP_VERSION_RK3588) {
if (bus_format == MEDIA_BUS_FMT_YUV8_1X24 ||
bus_format == MEDIA_BUS_FMT_YUV10_1X30)
return true;
@@ -592,7 +408,7 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format,
if (modifier == DRM_FORMAT_MOD_INVALID)
return false;
- if (vop2->data->soc_id == 3568 || vop2->data->soc_id == 3566) {
+ if (vop2->version == VOP_VERSION_RK3568) {
if (vop2_cluster_window(win)) {
if (modifier == DRM_FORMAT_MOD_LINEAR) {
drm_dbg_kms(vop2->drm,
@@ -603,7 +419,7 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format,
}
if (format == DRM_FORMAT_XRGB2101010 || format == DRM_FORMAT_XBGR2101010) {
- if (vop2->data->soc_id == 3588) {
+ if (vop2->version == VOP_VERSION_RK3588) {
if (!rockchip_afbc(plane, modifier)) {
drm_dbg_kms(vop2->drm, "Only support 32 bpp format with afbc\n");
return false;
@@ -1002,6 +818,7 @@ static void rk3588_vop2_power_domain_enable_all(struct vop2 *vop2)
static void vop2_enable(struct vop2 *vop2)
{
int ret;
+ u32 version;
ret = pm_runtime_resume_and_get(vop2->dev);
if (ret < 0) {
@@ -1021,10 +838,20 @@ static void vop2_enable(struct vop2 *vop2)
return;
}
+ version = vop2_readl(vop2, RK3568_VERSION_INFO);
+ if (version != vop2->version) {
+ drm_err(vop2->drm, "Hardware version(0x%08x) mismatch\n", version);
+ return;
+ }
+
+ /*
+ * rk3566 share the same vop version with rk3568, so
+ * we need to use soc_id for identification here.
+ */
if (vop2->data->soc_id == 3566)
vop2_writel(vop2, RK3568_OTP_WIN_EN, 1);
- if (vop2->data->soc_id == 3588)
+ if (vop2->version == VOP_VERSION_RK3588)
rk3588_vop2_power_domain_enable_all(vop2);
vop2_writel(vop2, RK3568_REG_CFG_DONE, RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN);
@@ -1105,7 +932,7 @@ static void vop2_vp_dsp_lut_update_enable(struct vop2_video_port *vp)
static inline bool vop2_supports_seamless_gamma_lut_update(struct vop2 *vop2)
{
- return (vop2->data->soc_id != 3566 && vop2->data->soc_id != 3568);
+ return vop2->version != VOP_VERSION_RK3568;
}
static bool vop2_gamma_lut_in_use(struct vop2 *vop2, struct vop2_video_port *vp)
@@ -1155,6 +982,9 @@ static void vop2_crtc_atomic_disable(struct drm_crtc *crtc,
vop2_crtc_disable_irq(vp, VP_INT_DSP_HOLD_VALID);
+ if (vp->dclk_src)
+ clk_set_parent(vp->dclk, vp->dclk_src);
+
clk_disable_unprepare(vp->dclk);
vop2->enable_count--;
@@ -1444,12 +1274,15 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
&fb->format->format,
afbc_en ? "AFBC" : "", &yrgb_mst);
- if (vop2->data->soc_id > 3568) {
+ if (vop2->version > VOP_VERSION_RK3568) {
vop2_win_write(win, VOP2_WIN_AXI_BUS_ID, win->data->axi_bus_id);
vop2_win_write(win, VOP2_WIN_AXI_YRGB_R_ID, win->data->axi_yrgb_r_id);
vop2_win_write(win, VOP2_WIN_AXI_UV_R_ID, win->data->axi_uv_r_id);
}
+ if (vop2->version >= VOP_VERSION_RK3576)
+ vop2_win_write(win, VOP2_WIN_VP_SEL, vp->id);
+
if (vop2_cluster_window(win))
vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, half_block_en);
@@ -1504,7 +1337,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
* this bit is gating disable, we should write 1 to
* disable gating when enable afbc.
*/
- if (vop2->data->soc_id == 3566 || vop2->data->soc_id == 3568)
+ if (vop2->version == VOP_VERSION_RK3568)
vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 0);
else
vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 1);
@@ -1514,10 +1347,15 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
else
vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 0);
+ if (vop2->version >= VOP_VERSION_RK3576) {
+ vop2_win_write(win, VOP2_WIN_AFBC_PLD_OFFSET_EN, 1);
+ vop2_win_write(win, VOP2_WIN_AFBC_PLD_OFFSET, yrgb_mst);
+ }
+
transform_offset = vop2_afbc_transform_offset(pstate, half_block_en);
vop2_win_write(win, VOP2_WIN_AFBC_HDR_PTR, yrgb_mst);
vop2_win_write(win, VOP2_WIN_AFBC_PIC_SIZE, act_info);
- vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, transform_offset);
+ vop2_win_write(win, VOP2_WIN_TRANSFORM_OFFSET, transform_offset);
vop2_win_write(win, VOP2_WIN_AFBC_PIC_OFFSET, ((src->x1 >> 16) | src->y1));
vop2_win_write(win, VOP2_WIN_AFBC_DSP_OFFSET, (dest->x1 | (dest->y1 << 16)));
vop2_win_write(win, VOP2_WIN_AFBC_PIC_VIR_WIDTH, stride);
@@ -1528,7 +1366,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
} else {
if (vop2_cluster_window(win)) {
vop2_win_write(win, VOP2_WIN_AFBC_ENABLE, 0);
- vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, 0);
+ vop2_win_write(win, VOP2_WIN_TRANSFORM_OFFSET, 0);
}
vop2_win_write(win, VOP2_WIN_YRGB_VIR, DIV_ROUND_UP(fb->pitches[0], 4));
@@ -1547,10 +1385,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
rb_swap = vop2_win_rb_swap(fb->format->format);
vop2_win_write(win, VOP2_WIN_RB_SWAP, rb_swap);
- if (!vop2_cluster_window(win)) {
- uv_swap = vop2_win_uv_swap(fb->format->format);
- vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap);
- }
+ uv_swap = vop2_win_uv_swap(fb->format->format);
+ vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap);
if (fb->format->is_yuv) {
vop2_win_write(win, VOP2_WIN_UV_VIR, DIV_ROUND_UP(fb->pitches[1], 4));
@@ -1724,6 +1560,7 @@ static void vop2_dither_setup(struct drm_crtc *crtc, u32 *dsp_ctrl)
static void vop2_post_config(struct drm_crtc *crtc)
{
struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ struct vop2 *vop2 = vp->vop2;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u16 vtotal = mode->crtc_vtotal;
u16 hdisplay = mode->crtc_hdisplay;
@@ -1734,18 +1571,10 @@ static void vop2_post_config(struct drm_crtc *crtc)
u32 top_margin = 100, bottom_margin = 100;
u16 hsize = hdisplay * (left_margin + right_margin) / 200;
u16 vsize = vdisplay * (top_margin + bottom_margin) / 200;
- u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
u16 hact_end, vact_end;
u32 val;
- u32 bg_dly;
- u32 pre_scan_dly;
-
- bg_dly = vp->data->pre_scan_max_dly[3];
- vop2_writel(vp->vop2, RK3568_VP_BG_MIX_CTRL(vp->id),
- FIELD_PREP(RK3568_VP_BG_MIX_CTRL__BG_DLY, bg_dly));
- pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len;
- vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly);
+ vop2->ops->setup_bg_dly(vp);
vsize = rounddown(vsize, 2);
hsize = rounddown(hsize, 2);
@@ -1781,347 +1610,6 @@ static void vop2_post_config(struct drm_crtc *crtc)
vop2_vp_write(vp, RK3568_VP_DSP_BG, 0);
}
-static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
-{
- struct vop2 *vop2 = vp->vop2;
- struct drm_crtc *crtc = &vp->crtc;
- u32 die, dip;
-
- die = vop2_readl(vop2, RK3568_DSP_IF_EN);
- dip = vop2_readl(vop2, RK3568_DSP_IF_POL);
-
- switch (id) {
- case ROCKCHIP_VOP2_EP_RGB0:
- die &= ~RK3568_SYS_DSP_INFACE_EN_RGB_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_RGB |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_RGB_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
- if (polflags & POLFLAG_DCLK_INV)
- regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3));
- else
- regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16));
- break;
- case ROCKCHIP_VOP2_EP_HDMI0:
- die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
- break;
- case ROCKCHIP_VOP2_EP_EDP0:
- die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_EDP |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
- break;
- case ROCKCHIP_VOP2_EP_MIPI0:
- die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_MIPI0 |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags);
- break;
- case ROCKCHIP_VOP2_EP_MIPI1:
- die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_MIPI1 |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags);
- break;
- case ROCKCHIP_VOP2_EP_LVDS0:
- die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_LVDS0 |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
- break;
- case ROCKCHIP_VOP2_EP_LVDS1:
- die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX;
- die |= RK3568_SYS_DSP_INFACE_EN_LVDS1 |
- FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX, vp->id);
- dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
- dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
- break;
- default:
- drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
- return 0;
- }
-
- dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
-
- vop2_writel(vop2, RK3568_DSP_IF_EN, die);
- vop2_writel(vop2, RK3568_DSP_IF_POL, dip);
-
- return crtc->state->adjusted_mode.crtc_clock * 1000LL;
-}
-
-/*
- * calc the dclk on rk3588
- * the available div of dclk is 1, 2, 4
- */
-static unsigned long rk3588_calc_dclk(unsigned long child_clk, unsigned long max_dclk)
-{
- if (child_clk * 4 <= max_dclk)
- return child_clk * 4;
- else if (child_clk * 2 <= max_dclk)
- return child_clk * 2;
- else if (child_clk <= max_dclk)
- return child_clk;
- else
- return 0;
-}
-
-/*
- * 4 pixclk/cycle on rk3588
- * RGB/eDP/HDMI: if_pixclk >= dclk_core
- * DP: dp_pixclk = dclk_out <= dclk_core
- * DSI: mipi_pixclk <= dclk_out <= dclk_core
- */
-static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
- int *dclk_core_div, int *dclk_out_div,
- int *if_pixclk_div, int *if_dclk_div)
-{
- struct vop2 *vop2 = vp->vop2;
- struct drm_crtc *crtc = &vp->crtc;
- struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
- struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
- int output_mode = vcstate->output_mode;
- unsigned long v_pixclk = adjusted_mode->crtc_clock * 1000LL; /* video timing pixclk */
- unsigned long dclk_core_rate = v_pixclk >> 2;
- unsigned long dclk_rate = v_pixclk;
- unsigned long dclk_out_rate;
- unsigned long if_pixclk_rate;
- int K = 1;
-
- if (vop2_output_if_is_hdmi(id)) {
- /*
- * K = 2: dclk_core = if_pixclk_rate > if_dclk_rate
- * K = 1: dclk_core = hdmie_edp_dclk > if_pixclk_rate
- */
- if (output_mode == ROCKCHIP_OUT_MODE_YUV420) {
- dclk_rate = dclk_rate >> 1;
- K = 2;
- }
-
- if_pixclk_rate = (dclk_core_rate << 1) / K;
- /*
- * if_dclk_rate = dclk_core_rate / K;
- * *if_pixclk_div = dclk_rate / if_pixclk_rate;
- * *if_dclk_div = dclk_rate / if_dclk_rate;
- */
- *if_pixclk_div = 2;
- *if_dclk_div = 4;
- } else if (vop2_output_if_is_edp(id)) {
- /*
- * edp_pixclk = edp_dclk > dclk_core
- */
- if_pixclk_rate = v_pixclk / K;
- dclk_rate = if_pixclk_rate * K;
- /*
- * *if_pixclk_div = dclk_rate / if_pixclk_rate;
- * *if_dclk_div = *if_pixclk_div;
- */
- *if_pixclk_div = K;
- *if_dclk_div = K;
- } else if (vop2_output_if_is_dp(id)) {
- if (output_mode == ROCKCHIP_OUT_MODE_YUV420)
- dclk_out_rate = v_pixclk >> 3;
- else
- dclk_out_rate = v_pixclk >> 2;
-
- dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
- if (!dclk_rate) {
- drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld Hz\n",
- dclk_out_rate);
- return 0;
- }
- *dclk_out_div = dclk_rate / dclk_out_rate;
- } else if (vop2_output_if_is_mipi(id)) {
- if_pixclk_rate = dclk_core_rate / K;
- /*
- * dclk_core = dclk_out * K = if_pixclk * K = v_pixclk / 4
- */
- dclk_out_rate = if_pixclk_rate;
- /*
- * dclk_rate = N * dclk_core_rate N = (1,2,4 ),
- * we get a little factor here
- */
- dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
- if (!dclk_rate) {
- drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld Hz\n",
- dclk_out_rate);
- return 0;
- }
- *dclk_out_div = dclk_rate / dclk_out_rate;
- /*
- * mipi pixclk == dclk_out
- */
- *if_pixclk_div = 1;
- } else if (vop2_output_if_is_dpi(id)) {
- dclk_rate = v_pixclk;
- }
-
- *dclk_core_div = dclk_rate / dclk_core_rate;
- *if_pixclk_div = ilog2(*if_pixclk_div);
- *if_dclk_div = ilog2(*if_dclk_div);
- *dclk_core_div = ilog2(*dclk_core_div);
- *dclk_out_div = ilog2(*dclk_out_div);
-
- drm_dbg(vop2->drm, "dclk: %ld, pixclk_div: %d, dclk_div: %d\n",
- dclk_rate, *if_pixclk_div, *if_dclk_div);
-
- return dclk_rate;
-}
-
-/*
- * MIPI port mux on rk3588:
- * 0: Video Port2
- * 1: Video Port3
- * 3: Video Port 1(MIPI1 only)
- */
-static u32 rk3588_get_mipi_port_mux(int vp_id)
-{
- if (vp_id == 1)
- return 3;
- else if (vp_id == 3)
- return 1;
- else
- return 0;
-}
-
-static u32 rk3588_get_hdmi_pol(u32 flags)
-{
- u32 val;
-
- val = (flags & DRM_MODE_FLAG_NHSYNC) ? BIT(HSYNC_POSITIVE) : 0;
- val |= (flags & DRM_MODE_FLAG_NVSYNC) ? BIT(VSYNC_POSITIVE) : 0;
-
- return val;
-}
-
-static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
-{
- struct vop2 *vop2 = vp->vop2;
- int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_div;
- unsigned long clock;
- u32 die, dip, div, vp_clk_div, val;
-
- clock = rk3588_calc_cru_cfg(vp, id, &dclk_core_div, &dclk_out_div,
- &if_pixclk_div, &if_dclk_div);
- if (!clock)
- return 0;
-
- vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div);
- vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div);
-
- die = vop2_readl(vop2, RK3568_DSP_IF_EN);
- dip = vop2_readl(vop2, RK3568_DSP_IF_POL);
- div = vop2_readl(vop2, RK3568_DSP_IF_CTRL);
-
- switch (id) {
- case ROCKCHIP_VOP2_EP_HDMI0:
- div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
- div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV;
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
- die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_HDMI0 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
- val = rk3588_get_hdmi_pol(polflags);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 1, 1));
- regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 6, 5));
- break;
- case ROCKCHIP_VOP2_EP_HDMI1:
- div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
- div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV;
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV, if_dclk_div);
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV, if_pixclk_div);
- die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_HDMI1 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
- val = rk3588_get_hdmi_pol(polflags);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 4, 4));
- regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 8, 7));
- break;
- case ROCKCHIP_VOP2_EP_EDP0:
- div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
- div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV;
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
- die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_EDP0 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 0, 0));
- break;
- case ROCKCHIP_VOP2_EP_EDP1:
- div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
- div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV;
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
- div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
- die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_EDP1 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 3, 3));
- break;
- case ROCKCHIP_VOP2_EP_MIPI0:
- div &= ~RK3588_DSP_IF_MIPI0_PCLK_DIV;
- div |= FIELD_PREP(RK3588_DSP_IF_MIPI0_PCLK_DIV, if_pixclk_div);
- die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX;
- val = rk3588_get_mipi_port_mux(vp->id);
- die |= RK3588_SYS_DSP_INFACE_EN_MIPI0 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX, !!val);
- break;
- case ROCKCHIP_VOP2_EP_MIPI1:
- div &= ~RK3588_DSP_IF_MIPI1_PCLK_DIV;
- div |= FIELD_PREP(RK3588_DSP_IF_MIPI1_PCLK_DIV, if_pixclk_div);
- die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
- val = rk3588_get_mipi_port_mux(vp->id);
- die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, val);
- break;
- case ROCKCHIP_VOP2_EP_DP0:
- die &= ~RK3588_SYS_DSP_INFACE_EN_DP0_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_DP0 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP0_MUX, vp->id);
- dip &= ~RK3588_DSP_IF_POL__DP0_PIN_POL;
- dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags);
- break;
- case ROCKCHIP_VOP2_EP_DP1:
- die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
- die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
- FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
- dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL;
- dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags);
- break;
- default:
- drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
- return 0;
- }
-
- dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
-
- vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div);
- vop2_writel(vop2, RK3568_DSP_IF_EN, die);
- vop2_writel(vop2, RK3568_DSP_IF_CTRL, div);
- vop2_writel(vop2, RK3568_DSP_IF_POL, dip);
-
- return clock;
-}
-
-static unsigned long vop2_set_intf_mux(struct vop2_video_port *vp, int ep_id, u32 polflags)
-{
- struct vop2 *vop2 = vp->vop2;
-
- if (vop2->data->soc_id == 3566 || vop2->data->soc_id == 3568)
- return rk3568_set_intf_mux(vp, ep_id, polflags);
- else if (vop2->data->soc_id == 3588)
- return rk3588_set_intf_mux(vp, ep_id, polflags);
- else
- return 0;
-}
-
static int us_to_vertical_line(struct drm_display_mode *mode, int us)
{
return us * mode->clock / mode->htotal / 1000;
@@ -2194,7 +1682,7 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
* process multi(1/2/4/8) pixels per cycle, so the dclk feed by the
* system cru may be the 1/2 or 1/4 of mode->clock.
*/
- clock = vop2_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags);
+ clock = vop2->ops->setup_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags);
}
if (!clock) {
@@ -2259,6 +1747,44 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
vop2_vp_write(vp, RK3568_VP_MIPI_CTRL, 0);
+ /*
+ * Switch to HDMI PHY PLL as DCLK source for display modes up
+ * to 4K@60Hz, if available, otherwise keep using the system CRU.
+ */
+ if ((vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) && clock <= VOP2_MAX_DCLK_RATE) {
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) {
+ struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
+
+ if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) {
+ if (!vop2->pll_hdmiphy0)
+ break;
+
+ if (!vp->dclk_src)
+ vp->dclk_src = clk_get_parent(vp->dclk);
+
+ ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0);
+ if (ret < 0)
+ drm_warn(vop2->drm,
+ "Could not switch to HDMI0 PHY PLL: %d\n", ret);
+ break;
+ }
+
+ if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) {
+ if (!vop2->pll_hdmiphy1)
+ break;
+
+ if (!vp->dclk_src)
+ vp->dclk_src = clk_get_parent(vp->dclk);
+
+ ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1);
+ if (ret < 0)
+ drm_warn(vop2->drm,
+ "Could not switch to HDMI1 PHY PLL: %d\n", ret);
+ break;
+ }
+ }
+ }
+
clk_set_rate(vp->dclk, clock);
vop2_post_config(crtc);
@@ -2323,454 +1849,13 @@ static int vop2_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
-static bool is_opaque(u16 alpha)
-{
- return (alpha >> 8) == 0xff;
-}
-
-static void vop2_parse_alpha(struct vop2_alpha_config *alpha_config,
- struct vop2_alpha *alpha)
-{
- int src_glb_alpha_en = is_opaque(alpha_config->src_glb_alpha_value) ? 0 : 1;
- int dst_glb_alpha_en = is_opaque(alpha_config->dst_glb_alpha_value) ? 0 : 1;
- int src_color_mode = alpha_config->src_premulti_en ?
- ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL;
- int dst_color_mode = alpha_config->dst_premulti_en ?
- ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL;
-
- alpha->src_color_ctrl.val = 0;
- alpha->dst_color_ctrl.val = 0;
- alpha->src_alpha_ctrl.val = 0;
- alpha->dst_alpha_ctrl.val = 0;
-
- if (!alpha_config->src_pixel_alpha_en)
- alpha->src_color_ctrl.bits.blend_mode = ALPHA_GLOBAL;
- else if (alpha_config->src_pixel_alpha_en && !src_glb_alpha_en)
- alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX;
- else
- alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL;
-
- alpha->src_color_ctrl.bits.alpha_en = 1;
-
- if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_GLOBAL) {
- alpha->src_color_ctrl.bits.color_mode = src_color_mode;
- alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL;
- } else if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_PER_PIX) {
- alpha->src_color_ctrl.bits.color_mode = src_color_mode;
- alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_ONE;
- } else {
- alpha->src_color_ctrl.bits.color_mode = ALPHA_SRC_PRE_MUL;
- alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL;
- }
- alpha->src_color_ctrl.bits.glb_alpha = alpha_config->src_glb_alpha_value >> 8;
- alpha->src_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
- alpha->src_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
-
- alpha->dst_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
- alpha->dst_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
- alpha->dst_color_ctrl.bits.blend_mode = ALPHA_GLOBAL;
- alpha->dst_color_ctrl.bits.glb_alpha = alpha_config->dst_glb_alpha_value >> 8;
- alpha->dst_color_ctrl.bits.color_mode = dst_color_mode;
- alpha->dst_color_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE;
-
- alpha->src_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
- alpha->src_alpha_ctrl.bits.blend_mode = alpha->src_color_ctrl.bits.blend_mode;
- alpha->src_alpha_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
- alpha->src_alpha_ctrl.bits.factor_mode = ALPHA_ONE;
-
- alpha->dst_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
- if (alpha_config->dst_pixel_alpha_en && !dst_glb_alpha_en)
- alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX;
- else
- alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL;
- alpha->dst_alpha_ctrl.bits.alpha_cal_mode = ALPHA_NO_SATURATION;
- alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE;
-}
-
-static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id)
-{
- struct vop2_video_port *vp;
- int used_layer = 0;
- int i;
-
- for (i = 0; i < port_id; i++) {
- vp = &vop2->vps[i];
- used_layer += hweight32(vp->win_mask);
- }
-
- return used_layer;
-}
-
-static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win)
-{
- struct vop2_alpha_config alpha_config;
- struct vop2_alpha alpha;
- struct drm_plane_state *bottom_win_pstate;
- bool src_pixel_alpha_en = false;
- u16 src_glb_alpha_val, dst_glb_alpha_val;
- bool premulti_en = false;
- bool swap = false;
- u32 offset = 0;
-
- /* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */
- bottom_win_pstate = main_win->base.state;
- src_glb_alpha_val = 0;
- dst_glb_alpha_val = main_win->base.state->alpha;
-
- if (!bottom_win_pstate->fb)
- return;
-
- alpha_config.src_premulti_en = premulti_en;
- alpha_config.dst_premulti_en = false;
- alpha_config.src_pixel_alpha_en = src_pixel_alpha_en;
- alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */
- alpha_config.src_glb_alpha_value = src_glb_alpha_val;
- alpha_config.dst_glb_alpha_value = dst_glb_alpha_val;
- vop2_parse_alpha(&alpha_config, &alpha);
-
- alpha.src_color_ctrl.bits.src_dst_swap = swap;
-
- switch (main_win->data->phys_id) {
- case ROCKCHIP_VOP2_CLUSTER0:
- offset = 0x0;
- break;
- case ROCKCHIP_VOP2_CLUSTER1:
- offset = 0x10;
- break;
- case ROCKCHIP_VOP2_CLUSTER2:
- offset = 0x20;
- break;
- case ROCKCHIP_VOP2_CLUSTER3:
- offset = 0x30;
- break;
- }
-
- vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL + offset,
- alpha.src_color_ctrl.val);
- vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_COLOR_CTRL + offset,
- alpha.dst_color_ctrl.val);
- vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_ALPHA_CTRL + offset,
- alpha.src_alpha_ctrl.val);
- vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_ALPHA_CTRL + offset,
- alpha.dst_alpha_ctrl.val);
-}
-
-static void vop2_setup_alpha(struct vop2_video_port *vp)
-{
- struct vop2 *vop2 = vp->vop2;
- struct drm_framebuffer *fb;
- struct vop2_alpha_config alpha_config;
- struct vop2_alpha alpha;
- struct drm_plane *plane;
- int pixel_alpha_en;
- int premulti_en, gpremulti_en = 0;
- int mixer_id;
- u32 offset;
- bool bottom_layer_alpha_en = false;
- u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE;
-
- mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id);
- alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */
-
- drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
- struct vop2_win *win = to_vop2_win(plane);
-
- if (plane->state->normalized_zpos == 0 &&
- !is_opaque(plane->state->alpha) &&
- !vop2_cluster_window(win)) {
- /*
- * If bottom layer have global alpha effect [except cluster layer,
- * because cluster have deal with bottom layer global alpha value
- * at cluster mix], bottom layer mix need deal with global alpha.
- */
- bottom_layer_alpha_en = true;
- dst_global_alpha = plane->state->alpha;
- }
- }
-
- drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
- struct vop2_win *win = to_vop2_win(plane);
- int zpos = plane->state->normalized_zpos;
-
- /*
- * Need to configure alpha from second layer.
- */
- if (zpos == 0)
- continue;
-
- if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
- premulti_en = 1;
- else
- premulti_en = 0;
-
- plane = &win->base;
- fb = plane->state->fb;
-
- pixel_alpha_en = fb->format->has_alpha;
-
- alpha_config.src_premulti_en = premulti_en;
-
- if (bottom_layer_alpha_en && zpos == 1) {
- gpremulti_en = premulti_en;
- /* Cd = Cs + (1 - As) * Cd * Agd */
- alpha_config.dst_premulti_en = false;
- alpha_config.src_pixel_alpha_en = pixel_alpha_en;
- alpha_config.src_glb_alpha_value = plane->state->alpha;
- alpha_config.dst_glb_alpha_value = dst_global_alpha;
- } else if (vop2_cluster_window(win)) {
- /* Mix output data only have pixel alpha */
- alpha_config.dst_premulti_en = true;
- alpha_config.src_pixel_alpha_en = true;
- alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
- alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
- } else {
- /* Cd = Cs + (1 - As) * Cd */
- alpha_config.dst_premulti_en = true;
- alpha_config.src_pixel_alpha_en = pixel_alpha_en;
- alpha_config.src_glb_alpha_value = plane->state->alpha;
- alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
- }
-
- vop2_parse_alpha(&alpha_config, &alpha);
-
- offset = (mixer_id + zpos - 1) * 0x10;
- vop2_writel(vop2, RK3568_MIX0_SRC_COLOR_CTRL + offset,
- alpha.src_color_ctrl.val);
- vop2_writel(vop2, RK3568_MIX0_DST_COLOR_CTRL + offset,
- alpha.dst_color_ctrl.val);
- vop2_writel(vop2, RK3568_MIX0_SRC_ALPHA_CTRL + offset,
- alpha.src_alpha_ctrl.val);
- vop2_writel(vop2, RK3568_MIX0_DST_ALPHA_CTRL + offset,
- alpha.dst_alpha_ctrl.val);
- }
-
- if (vp->id == 0) {
- if (bottom_layer_alpha_en) {
- /* Transfer pixel alpha to hdr mix */
- alpha_config.src_premulti_en = gpremulti_en;
- alpha_config.dst_premulti_en = true;
- alpha_config.src_pixel_alpha_en = true;
- alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
- alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
- vop2_parse_alpha(&alpha_config, &alpha);
-
- vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL,
- alpha.src_color_ctrl.val);
- vop2_writel(vop2, RK3568_HDR0_DST_COLOR_CTRL,
- alpha.dst_color_ctrl.val);
- vop2_writel(vop2, RK3568_HDR0_SRC_ALPHA_CTRL,
- alpha.src_alpha_ctrl.val);
- vop2_writel(vop2, RK3568_HDR0_DST_ALPHA_CTRL,
- alpha.dst_alpha_ctrl.val);
- } else {
- vop2_writel(vop2, RK3568_HDR0_SRC_COLOR_CTRL, 0);
- }
- }
-}
-
-static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
-{
- struct vop2 *vop2 = vp->vop2;
- struct drm_plane *plane;
- u32 layer_sel = 0;
- u32 port_sel;
- u8 layer_id;
- u8 old_layer_id;
- u8 layer_sel_id;
- unsigned int ofs;
- u32 ovl_ctrl;
- int i;
- struct vop2_video_port *vp0 = &vop2->vps[0];
- struct vop2_video_port *vp1 = &vop2->vps[1];
- struct vop2_video_port *vp2 = &vop2->vps[2];
- struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
-
- ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL);
- ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
- if (vcstate->yuv_overlay)
- ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id);
- else
- ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id);
-
- vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl);
-
- port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL);
- port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT;
-
- if (vp0->nlayers)
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX,
- vp0->nlayers - 1);
- else
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX, 8);
-
- if (vp1->nlayers)
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX,
- (vp0->nlayers + vp1->nlayers - 1));
- else
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 8);
-
- if (vp2->nlayers)
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX,
- (vp2->nlayers + vp1->nlayers + vp0->nlayers - 1));
- else
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8);
-
- layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
-
- ofs = 0;
- for (i = 0; i < vp->id; i++)
- ofs += vop2->vps[i].nlayers;
-
- drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
- struct vop2_win *win = to_vop2_win(plane);
- struct vop2_win *old_win;
-
- layer_id = (u8)(plane->state->normalized_zpos + ofs);
-
- /*
- * Find the layer this win bind in old state.
- */
- for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) {
- layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf;
- if (layer_sel_id == win->data->layer_sel_id)
- break;
- }
-
- /*
- * Find the win bind to this layer in old state
- */
- for (i = 0; i < vop2->data->win_size; i++) {
- old_win = &vop2->win[i];
- layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf;
- if (layer_sel_id == old_win->data->layer_sel_id)
- break;
- }
-
- switch (win->data->phys_id) {
- case ROCKCHIP_VOP2_CLUSTER0:
- port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER0;
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER0, vp->id);
- break;
- case ROCKCHIP_VOP2_CLUSTER1:
- port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER1;
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER1, vp->id);
- break;
- case ROCKCHIP_VOP2_CLUSTER2:
- port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER2;
- port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER2, vp->id);
- break;
- case ROCKCHIP_VOP2_CLUSTER3:
- port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER3;
- port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER3, vp->id);
- break;
- case ROCKCHIP_VOP2_ESMART0:
- port_sel &= ~RK3568_OVL_PORT_SEL__ESMART0;
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART0, vp->id);
- break;
- case ROCKCHIP_VOP2_ESMART1:
- port_sel &= ~RK3568_OVL_PORT_SEL__ESMART1;
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART1, vp->id);
- break;
- case ROCKCHIP_VOP2_ESMART2:
- port_sel &= ~RK3588_OVL_PORT_SEL__ESMART2;
- port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART2, vp->id);
- break;
- case ROCKCHIP_VOP2_ESMART3:
- port_sel &= ~RK3588_OVL_PORT_SEL__ESMART3;
- port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART3, vp->id);
- break;
- case ROCKCHIP_VOP2_SMART0:
- port_sel &= ~RK3568_OVL_PORT_SEL__SMART0;
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART0, vp->id);
- break;
- case ROCKCHIP_VOP2_SMART1:
- port_sel &= ~RK3568_OVL_PORT_SEL__SMART1;
- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART1, vp->id);
- break;
- }
-
- layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7);
- layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id);
- /*
- * When we bind a window from layerM to layerN, we also need to move the old
- * window on layerN to layerM to avoid one window selected by two or more layers.
- */
- layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7);
- layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, old_win->data->layer_sel_id);
- }
-
- vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
- vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel);
-}
-
-static void vop2_setup_dly_for_windows(struct vop2 *vop2)
-{
- struct vop2_win *win;
- int i = 0;
- u32 cdly = 0, sdly = 0;
-
- for (i = 0; i < vop2->data->win_size; i++) {
- u32 dly;
-
- win = &vop2->win[i];
- dly = win->delay;
-
- switch (win->data->phys_id) {
- case ROCKCHIP_VOP2_CLUSTER0:
- cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_0, dly);
- cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_1, dly);
- break;
- case ROCKCHIP_VOP2_CLUSTER1:
- cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_0, dly);
- cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_1, dly);
- break;
- case ROCKCHIP_VOP2_ESMART0:
- sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART0, dly);
- break;
- case ROCKCHIP_VOP2_ESMART1:
- sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly);
- break;
- case ROCKCHIP_VOP2_SMART0:
- case ROCKCHIP_VOP2_ESMART2:
- sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly);
- break;
- case ROCKCHIP_VOP2_SMART1:
- case ROCKCHIP_VOP2_ESMART3:
- sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly);
- break;
- }
- }
-
- vop2_writel(vop2, RK3568_CLUSTER_DLY_NUM, cdly);
- vop2_writel(vop2, RK3568_SMART_DLY_NUM, sdly);
-}
-
static void vop2_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct vop2_video_port *vp = to_vop2_video_port(crtc);
struct vop2 *vop2 = vp->vop2;
- struct drm_plane *plane;
- vp->win_mask = 0;
-
- drm_atomic_crtc_for_each_plane(plane, crtc) {
- struct vop2_win *win = to_vop2_win(plane);
-
- win->delay = win->data->dly[VOP2_DLY_MODE_DEFAULT];
-
- vp->win_mask |= BIT(win->data->phys_id);
-
- if (vop2_cluster_window(win))
- vop2_setup_cluster_alpha(vop2, win);
- }
-
- if (!vp->win_mask)
- return;
-
- vop2_setup_layer_mixer(vp);
- vop2_setup_alpha(vp);
- vop2_setup_dly_for_windows(vop2);
+ vop2->ops->setup_overlay(vp);
}
static void vop2_crtc_atomic_flush(struct drm_crtc *crtc,
@@ -3082,6 +2167,52 @@ static const struct drm_crtc_funcs vop2_crtc_funcs = {
.late_register = vop2_crtc_late_register,
};
+static irqreturn_t rk3576_vp_isr(int irq, void *data)
+{
+ struct vop2_video_port *vp = data;
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ uint32_t irqs;
+ int ret = IRQ_NONE;
+
+ if (!pm_runtime_get_if_in_use(vop2->dev))
+ return IRQ_NONE;
+
+ irqs = vop2_readl(vop2, RK3568_VP_INT_STATUS(vp->id));
+ vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irqs << 16 | irqs);
+
+ if (irqs & VP_INT_DSP_HOLD_VALID) {
+ complete(&vp->dsp_hold_completion);
+ ret = IRQ_HANDLED;
+ }
+
+ if (irqs & VP_INT_FS_FIELD) {
+ drm_crtc_handle_vblank(crtc);
+ spin_lock(&crtc->dev->event_lock);
+ if (vp->event) {
+ u32 val = vop2_readl(vop2, RK3568_REG_CFG_DONE);
+
+ if (!(val & BIT(vp->id))) {
+ drm_crtc_send_vblank_event(crtc, vp->event);
+ vp->event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ }
+ spin_unlock(&crtc->dev->event_lock);
+
+ ret = IRQ_HANDLED;
+ }
+
+ if (irqs & VP_INT_POST_BUF_EMPTY) {
+ drm_err_ratelimited(vop2->drm, "POST_BUF_EMPTY irq err at vp%d\n", vp->id);
+ ret = IRQ_HANDLED;
+ }
+
+ pm_runtime_put(vop2->dev);
+
+ return ret;
+}
+
static irqreturn_t vop2_isr(int irq, void *data)
{
struct vop2 *vop2 = data;
@@ -3097,41 +2228,43 @@ static irqreturn_t vop2_isr(int irq, void *data)
if (!pm_runtime_get_if_in_use(vop2->dev))
return IRQ_NONE;
- for (i = 0; i < vop2_data->nr_vps; i++) {
- struct vop2_video_port *vp = &vop2->vps[i];
- struct drm_crtc *crtc = &vp->crtc;
- u32 irqs;
-
- irqs = vop2_readl(vop2, RK3568_VP_INT_STATUS(vp->id));
- vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irqs << 16 | irqs);
+ if (vop2->version < VOP_VERSION_RK3576) {
+ for (i = 0; i < vop2_data->nr_vps; i++) {
+ struct vop2_video_port *vp = &vop2->vps[i];
+ struct drm_crtc *crtc = &vp->crtc;
+ u32 irqs;
- if (irqs & VP_INT_DSP_HOLD_VALID) {
- complete(&vp->dsp_hold_completion);
- ret = IRQ_HANDLED;
- }
+ irqs = vop2_readl(vop2, RK3568_VP_INT_STATUS(vp->id));
+ vop2_writel(vop2, RK3568_VP_INT_CLR(vp->id), irqs << 16 | irqs);
- if (irqs & VP_INT_FS_FIELD) {
- drm_crtc_handle_vblank(crtc);
- spin_lock(&crtc->dev->event_lock);
- if (vp->event) {
- u32 val = vop2_readl(vop2, RK3568_REG_CFG_DONE);
+ if (irqs & VP_INT_DSP_HOLD_VALID) {
+ complete(&vp->dsp_hold_completion);
+ ret = IRQ_HANDLED;
+ }
- if (!(val & BIT(vp->id))) {
- drm_crtc_send_vblank_event(crtc, vp->event);
- vp->event = NULL;
- drm_crtc_vblank_put(crtc);
+ if (irqs & VP_INT_FS_FIELD) {
+ drm_crtc_handle_vblank(crtc);
+ spin_lock(&crtc->dev->event_lock);
+ if (vp->event) {
+ u32 val = vop2_readl(vop2, RK3568_REG_CFG_DONE);
+
+ if (!(val & BIT(vp->id))) {
+ drm_crtc_send_vblank_event(crtc, vp->event);
+ vp->event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
}
- }
- spin_unlock(&crtc->dev->event_lock);
+ spin_unlock(&crtc->dev->event_lock);
- ret = IRQ_HANDLED;
- }
+ ret = IRQ_HANDLED;
+ }
- if (irqs & VP_INT_POST_BUF_EMPTY) {
- drm_err_ratelimited(vop2->drm,
- "POST_BUF_EMPTY irq err at vp%d\n",
- vp->id);
- ret = IRQ_HANDLED;
+ if (irqs & VP_INT_POST_BUF_EMPTY) {
+ drm_err_ratelimited(vop2->drm,
+ "POST_BUF_EMPTY irq err at vp%d\n",
+ vp->id);
+ ret = IRQ_HANDLED;
+ }
}
}
@@ -3185,22 +2318,29 @@ static int vop2_plane_init(struct vop2 *vop2, struct vop2_win *win,
return 0;
}
-static struct vop2_video_port *find_vp_without_primary(struct vop2 *vop2)
+/*
+ * On RK3566 these windows don't have an independent
+ * framebuffer. They can only share/mirror the framebuffer
+ * with smart0, esmart0 and cluster0 respectively.
+ * And RK3566 share the same vop version with Rk3568, so we
+ * need to use soc_id for identification here.
+ */
+static bool vop2_is_mirror_win(struct vop2_win *win)
{
- int i;
-
- for (i = 0; i < vop2->data->nr_vps; i++) {
- struct vop2_video_port *vp = &vop2->vps[i];
-
- if (!vp->crtc.port)
- continue;
- if (vp->primary_plane)
- continue;
+ struct vop2 *vop2 = win->vop2;
- return vp;
+ if (vop2->data->soc_id == 3566) {
+ switch (win->data->phys_id) {
+ case ROCKCHIP_VOP2_SMART1:
+ case ROCKCHIP_VOP2_ESMART1:
+ case ROCKCHIP_VOP2_CLUSTER1:
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ return false;
}
-
- return NULL;
}
static int vop2_create_crtcs(struct vop2 *vop2)
@@ -3211,7 +2351,9 @@ static int vop2_create_crtcs(struct vop2 *vop2)
struct drm_plane *plane;
struct device_node *port;
struct vop2_video_port *vp;
- int i, nvp, nvps = 0;
+ struct vop2_win *win;
+ u32 possible_crtcs;
+ int i, j, nvp, nvps = 0;
int ret;
for (i = 0; i < vop2_data->nr_vps; i++) {
@@ -3227,10 +2369,9 @@ static int vop2_create_crtcs(struct vop2 *vop2)
snprintf(dclk_name, sizeof(dclk_name), "dclk_vp%d", vp->id);
vp->dclk = devm_clk_get(vop2->dev, dclk_name);
- if (IS_ERR(vp->dclk)) {
- drm_err(vop2->drm, "failed to get %s\n", dclk_name);
- return PTR_ERR(vp->dclk);
- }
+ if (IS_ERR(vp->dclk))
+ return dev_err_probe(drm->dev, PTR_ERR(vp->dclk),
+ "failed to get %s\n", dclk_name);
np = of_graph_get_remote_node(dev->of_node, i, -1);
if (!np) {
@@ -3240,55 +2381,79 @@ static int vop2_create_crtcs(struct vop2 *vop2)
of_node_put(np);
port = of_graph_get_port_by_id(dev->of_node, i);
- if (!port) {
- drm_err(vop2->drm, "no port node found for video_port%d\n", i);
- return -ENOENT;
- }
-
+ if (!port)
+ return dev_err_probe(drm->dev, -ENOENT,
+ "no port node found for video_port%d\n", i);
vp->crtc.port = port;
nvps++;
}
nvp = 0;
- for (i = 0; i < vop2->registered_num_wins; i++) {
- struct vop2_win *win = &vop2->win[i];
- u32 possible_crtcs = 0;
-
- if (vop2->data->soc_id == 3566) {
- /*
- * On RK3566 these windows don't have an independent
- * framebuffer. They share the framebuffer with smart0,
- * esmart0 and cluster0 respectively.
- */
- switch (win->data->phys_id) {
- case ROCKCHIP_VOP2_SMART1:
- case ROCKCHIP_VOP2_ESMART1:
- case ROCKCHIP_VOP2_CLUSTER1:
+ /* Register a primary plane for every crtc */
+ for (i = 0; i < vop2_data->nr_vps; i++) {
+ vp = &vop2->vps[i];
+
+ if (!vp->crtc.port)
+ continue;
+
+ for (j = 0; j < vop2->registered_num_wins; j++) {
+ win = &vop2->win[j];
+
+ /* Aready registered as primary plane */
+ if (win->base.type == DRM_PLANE_TYPE_PRIMARY)
+ continue;
+
+ /* If this win can not attached to this VP */
+ if (!(win->data->possible_vp_mask & BIT(vp->id)))
continue;
- }
- }
- if (win->type == DRM_PLANE_TYPE_PRIMARY) {
- vp = find_vp_without_primary(vop2);
- if (vp) {
+ if (vop2_is_mirror_win(win))
+ continue;
+
+ if (win->type == DRM_PLANE_TYPE_PRIMARY) {
possible_crtcs = BIT(nvp);
vp->primary_plane = win;
+ ret = vop2_plane_init(vop2, win, possible_crtcs);
+ if (ret)
+ return dev_err_probe(drm->dev, ret,
+ "failed to init primary plane %s\n",
+ win->data->name);
nvp++;
- } else {
- /* change the unused primary window to overlay window */
- win->type = DRM_PLANE_TYPE_OVERLAY;
+ break;
}
}
+ }
- if (win->type == DRM_PLANE_TYPE_OVERLAY)
- possible_crtcs = (1 << nvps) - 1;
+ /* Register all unused window as overlay plane */
+ for (i = 0; i < vop2->registered_num_wins; i++) {
+ win = &vop2->win[i];
- ret = vop2_plane_init(vop2, win, possible_crtcs);
- if (ret) {
- drm_err(vop2->drm, "failed to init plane %s: %d\n",
- win->data->name, ret);
- return ret;
+ /* Aready registered as primary plane */
+ if (win->base.type == DRM_PLANE_TYPE_PRIMARY)
+ continue;
+
+ if (vop2_is_mirror_win(win))
+ continue;
+
+ win->type = DRM_PLANE_TYPE_OVERLAY;
+
+ possible_crtcs = 0;
+ nvp = 0;
+ for (j = 0; j < vop2_data->nr_vps; j++) {
+ vp = &vop2->vps[j];
+
+ if (!vp->crtc.port)
+ continue;
+
+ if (win->data->possible_vp_mask & BIT(vp->id))
+ possible_crtcs |= BIT(nvp);
+ nvp++;
}
+
+ ret = vop2_plane_init(vop2, win, possible_crtcs);
+ if (ret)
+ return dev_err_probe(drm->dev, ret, "failed to init overlay plane %s\n",
+ win->data->name);
}
for (i = 0; i < vop2_data->nr_vps; i++) {
@@ -3302,10 +2467,9 @@ static int vop2_create_crtcs(struct vop2 *vop2)
ret = drm_crtc_init_with_planes(drm, &vp->crtc, plane, NULL,
&vop2_crtc_funcs,
"video_port%d", vp->id);
- if (ret) {
- drm_err(vop2->drm, "crtc init for video_port%d failed\n", i);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(drm->dev, ret,
+ "crtc init for video_port%d failed\n", i);
drm_crtc_helper_add(&vp->crtc, &vop2_crtc_helper_funcs);
if (vop2->lut_regs) {
@@ -3372,184 +2536,26 @@ static int vop2_find_rgb_encoder(struct vop2 *vop2)
return -ENOENT;
}
-static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = {
- [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0),
- [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5),
- [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14),
- [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 18, 18),
- [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_ACT_INFO, 0, 31),
- [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_INFO, 0, 31),
- [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_ST, 0, 31),
- [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_CLUSTER_WIN_YRGB_MST, 0, 31),
- [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_CLUSTER_WIN_CBR_MST, 0, 31),
- [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 19, 19),
- [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 0, 15),
- [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 16, 31),
- [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8),
- [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9),
- [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11),
- [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 3),
- [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 8),
- /* RK3588 only, reserved bit on rk3568*/
- [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13),
-
- /* Scale */
- [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15),
- [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 16, 31),
- [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 14, 15),
- [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 12, 13),
- [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 2, 3),
- [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 28, 28),
- [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 29, 29),
-
- /* cluster regs */
- [VOP2_WIN_AFBC_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 1, 1),
- [VOP2_WIN_CLUSTER_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 0, 0),
- [VOP2_WIN_CLUSTER_LB_MODE] = REG_FIELD(RK3568_CLUSTER_CTRL, 4, 7),
-
- /* afbc regs */
- [VOP2_WIN_AFBC_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 2, 6),
- [VOP2_WIN_AFBC_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 9, 9),
- [VOP2_WIN_AFBC_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 10, 10),
- [VOP2_WIN_AFBC_AUTO_GATING_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL, 4, 4),
- [VOP2_WIN_AFBC_HALF_BLOCK_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 7, 7),
- [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 8, 8),
- [VOP2_WIN_AFBC_HDR_PTR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_HDR_PTR, 0, 31),
- [VOP2_WIN_AFBC_PIC_SIZE] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_SIZE, 0, 31),
- [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 0, 15),
- [VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31),
- [VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31),
- [VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31),
- [VOP2_WIN_AFBC_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_TRANSFORM_OFFSET, 0, 31),
- [VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0),
- [VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1),
- [VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2),
- [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 3, 3),
- [VOP2_WIN_UV_SWAP] = { .reg = 0xffffffff },
- [VOP2_WIN_COLOR_KEY] = { .reg = 0xffffffff },
- [VOP2_WIN_COLOR_KEY_EN] = { .reg = 0xffffffff },
- [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff },
- [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff },
- [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_VSD_CBCR_GT2] = { .reg = 0xffffffff },
- [VOP2_WIN_VSD_CBCR_GT4] = { .reg = 0xffffffff },
-};
-
-static int vop2_cluster_init(struct vop2_win *win)
+static int vop2_regmap_init(struct vop2_win *win, const struct reg_field *regs,
+ int nr_regs)
{
struct vop2 *vop2 = win->vop2;
- struct reg_field *cluster_regs;
- int ret, i;
-
- cluster_regs = kmemdup(vop2_cluster_regs, sizeof(vop2_cluster_regs),
- GFP_KERNEL);
- if (!cluster_regs)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(vop2_cluster_regs); i++)
- if (cluster_regs[i].reg != 0xffffffff)
- cluster_regs[i].reg += win->offset;
-
- ret = devm_regmap_field_bulk_alloc(vop2->dev, vop2->map, win->reg,
- cluster_regs,
- ARRAY_SIZE(vop2_cluster_regs));
-
- kfree(cluster_regs);
-
- return ret;
-};
-
-static struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = {
- [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0),
- [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5),
- [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12),
- [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 14, 14),
- [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 16, 16),
- [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_SMART_REGION0_ACT_INFO, 0, 31),
- [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_SMART_REGION0_DSP_INFO, 0, 31),
- [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_SMART_REGION0_DSP_ST, 0, 28),
- [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_SMART_REGION0_YRGB_MST, 0, 31),
- [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_SMART_REGION0_CBR_MST, 0, 31),
- [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 17, 17),
- [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 0, 15),
- [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 16, 31),
- [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_SMART_CTRL0, 0, 0),
- [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_SMART_CTRL0, 1, 1),
- [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_SMART_CTRL0, 2, 3),
- [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31),
- [VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29),
- [VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31),
- [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8),
- [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16),
- /* RK3588 only, reserved register on rk3568 */
- [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1),
-
- /* Scale */
- [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15),
- [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 16, 31),
- [VOP2_WIN_SCALE_CBCR_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 0, 15),
- [VOP2_WIN_SCALE_CBCR_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 16, 31),
- [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 0, 1),
- [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 2, 3),
- [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 4, 5),
- [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 6, 7),
- [VOP2_WIN_CBCR_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 8, 9),
- [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 10, 11),
- [VOP2_WIN_CBCR_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 12, 13),
- [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 14, 15),
- [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 16, 17),
- [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 8, 8),
- [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 9, 9),
- [VOP2_WIN_VSD_CBCR_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 10, 10),
- [VOP2_WIN_VSD_CBCR_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 11, 11),
- [VOP2_WIN_XMIRROR] = { .reg = 0xffffffff },
- [VOP2_WIN_CLUSTER_ENABLE] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_ENABLE] = { .reg = 0xffffffff },
- [VOP2_WIN_CLUSTER_LB_MODE] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_FORMAT] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_RB_SWAP] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_UV_SWAP] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_AUTO_GATING_EN] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_TILE_NUM] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_TRANSFORM_OFFSET] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff },
- [VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff },
-};
-
-static int vop2_esmart_init(struct vop2_win *win)
-{
- struct vop2 *vop2 = win->vop2;
- struct reg_field *esmart_regs;
- int ret, i;
-
- esmart_regs = kmemdup(vop2_esmart_regs, sizeof(vop2_esmart_regs),
- GFP_KERNEL);
- if (!esmart_regs)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(vop2_esmart_regs); i++)
- if (esmart_regs[i].reg != 0xffffffff)
- esmart_regs[i].reg += win->offset;
+ int i;
- ret = devm_regmap_field_bulk_alloc(vop2->dev, vop2->map, win->reg,
- esmart_regs,
- ARRAY_SIZE(vop2_esmart_regs));
+ for (i = 0; i < nr_regs; i++) {
+ const struct reg_field field = {
+ .reg = (regs[i].reg != 0xffffffff) ?
+ regs[i].reg + win->offset : regs[i].reg,
+ .lsb = regs[i].lsb,
+ .msb = regs[i].msb
+ };
- kfree(esmart_regs);
+ win->reg[i] = devm_regmap_field_alloc(vop2->dev, vop2->map, field);
+ if (IS_ERR(win->reg[i]))
+ return PTR_ERR(win->reg[i]);
+ }
- return ret;
+ return 0;
};
static int vop2_win_init(struct vop2 *vop2)
@@ -3568,9 +2574,11 @@ static int vop2_win_init(struct vop2 *vop2)
win->win_id = i;
win->vop2 = vop2;
if (vop2_cluster_window(win))
- ret = vop2_cluster_init(win);
+ ret = vop2_regmap_init(win, vop2->data->cluster_reg,
+ vop2->data->nr_cluster_regs);
else
- ret = vop2_esmart_init(win);
+ ret = vop2_regmap_init(win, vop2->data->smart_reg,
+ vop2->data->nr_smart_regs);
if (ret)
return ret;
}
@@ -3627,15 +2635,16 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
vop2->dev = dev;
vop2->data = vop2_data;
+ vop2->ops = vop2_data->ops;
+ vop2->version = vop2_data->version;
vop2->drm = drm;
dev_set_drvdata(dev, vop2);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vop");
- if (!res) {
- drm_err(vop2->drm, "failed to get vop2 register byname\n");
- return -EINVAL;
- }
+ if (!res)
+ return dev_err_probe(drm->dev, -EINVAL,
+ "failed to get vop2 register byname\n");
vop2->res = res;
vop2->regs = devm_ioremap_resource(dev, res);
@@ -3660,50 +2669,59 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
if (vop2_data->feature & VOP2_FEATURE_HAS_SYS_GRF) {
vop2->sys_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
if (IS_ERR(vop2->sys_grf))
- return dev_err_probe(dev, PTR_ERR(vop2->sys_grf), "cannot get sys_grf");
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->sys_grf),
+ "cannot get sys_grf\n");
}
if (vop2_data->feature & VOP2_FEATURE_HAS_VOP_GRF) {
vop2->vop_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vop-grf");
if (IS_ERR(vop2->vop_grf))
- return dev_err_probe(dev, PTR_ERR(vop2->vop_grf), "cannot get vop_grf");
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->vop_grf),
+ "cannot get vop_grf\n");
}
if (vop2_data->feature & VOP2_FEATURE_HAS_VO1_GRF) {
vop2->vo1_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vo1-grf");
if (IS_ERR(vop2->vo1_grf))
- return dev_err_probe(dev, PTR_ERR(vop2->vo1_grf), "cannot get vo1_grf");
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->vo1_grf),
+ "cannot get vo1_grf\n");
}
if (vop2_data->feature & VOP2_FEATURE_HAS_SYS_PMU) {
vop2->sys_pmu = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,pmu");
if (IS_ERR(vop2->sys_pmu))
- return dev_err_probe(dev, PTR_ERR(vop2->sys_pmu), "cannot get sys_pmu");
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->sys_pmu),
+ "cannot get sys_pmu\n");
}
vop2->hclk = devm_clk_get(vop2->dev, "hclk");
- if (IS_ERR(vop2->hclk)) {
- drm_err(vop2->drm, "failed to get hclk source\n");
- return PTR_ERR(vop2->hclk);
- }
+ if (IS_ERR(vop2->hclk))
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->hclk),
+ "failed to get hclk source\n");
vop2->aclk = devm_clk_get(vop2->dev, "aclk");
- if (IS_ERR(vop2->aclk)) {
- drm_err(vop2->drm, "failed to get aclk source\n");
- return PTR_ERR(vop2->aclk);
- }
+ if (IS_ERR(vop2->aclk))
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->aclk),
+ "failed to get aclk source\n");
vop2->pclk = devm_clk_get_optional(vop2->dev, "pclk_vop");
- if (IS_ERR(vop2->pclk)) {
- drm_err(vop2->drm, "failed to get pclk source\n");
- return PTR_ERR(vop2->pclk);
- }
+ if (IS_ERR(vop2->pclk))
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->pclk),
+ "failed to get pclk source\n");
+
+ vop2->pll_hdmiphy0 = devm_clk_get_optional(vop2->dev, "pll_hdmiphy0");
+ if (IS_ERR(vop2->pll_hdmiphy0))
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->pll_hdmiphy0),
+ "failed to get pll_hdmiphy0\n");
+
+ vop2->pll_hdmiphy1 = devm_clk_get_optional(vop2->dev, "pll_hdmiphy1");
+ if (IS_ERR(vop2->pll_hdmiphy1))
+ return dev_err_probe(drm->dev, PTR_ERR(vop2->pll_hdmiphy1),
+ "failed to get pll_hdmiphy1\n");
vop2->irq = platform_get_irq(pdev, 0);
- if (vop2->irq < 0) {
- drm_err(vop2->drm, "cannot find irq for vop2\n");
- return vop2->irq;
- }
+ if (vop2->irq < 0)
+ return dev_err_probe(drm->dev, vop2->irq, "cannot find irq for vop2\n");
mutex_init(&vop2->vop2_lock);
@@ -3715,6 +2733,30 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
+ if (vop2->version >= VOP_VERSION_RK3576) {
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, drm) {
+ struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ int vp_irq;
+ const char *irq_name = devm_kasprintf(dev, GFP_KERNEL, "vp%d", vp->id);
+
+ if (!irq_name)
+ return -ENOMEM;
+
+ vp_irq = platform_get_irq_byname(pdev, irq_name);
+ if (vp_irq < 0)
+ return dev_err_probe(drm->dev, vp_irq,
+ "cannot find irq for vop2 vp%d\n", vp->id);
+
+ ret = devm_request_irq(dev, vp_irq, rk3576_vp_isr, IRQF_SHARED, irq_name,
+ vp);
+ if (ret)
+ dev_err_probe(drm->dev, ret,
+ "request irq for vop2 vp%d failed\n", vp->id);
+ }
+ }
+
ret = vop2_find_rgb_encoder(vop2);
if (ret >= 0) {
vop2->rgb = rockchip_rgb_init(dev, &vop2->vps[ret].crtc,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index 29cc7fb8f6d8..680bedbb770e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -9,9 +9,19 @@
#include <linux/regmap.h>
#include <drm/drm_modes.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
+#define VOP2_VERSION(major, minor, build) ((major) << 24 | (minor) << 16 | (build))
+
+/* The VOP version of new SoC is bigger than the old */
+#define VOP_VERSION_RK3568 VOP2_VERSION(0x40, 0x15, 0x8023)
+#define VOP_VERSION_RK3588 VOP2_VERSION(0x40, 0x17, 0x6786)
+#define VOP_VERSION_RK3528 VOP2_VERSION(0x50, 0x17, 0x1263)
+#define VOP_VERSION_RK3562 VOP2_VERSION(0x50, 0x17, 0x4350)
+#define VOP_VERSION_RK3576 VOP2_VERSION(0x50, 0x19, 0x9765)
+
#define VOP2_VP_FEATURE_OUTPUT_10BIT BIT(0)
#define VOP2_FEATURE_HAS_SYS_GRF BIT(0)
@@ -34,6 +44,13 @@ enum win_dly_mode {
VOP2_DLY_MODE_MAX,
};
+enum vop2_dly_module {
+ VOP2_DLY_WIN, /** Win delay cycle for this VP */
+ VOP2_DLY_LAYER_MIX, /** Layer Mix delay cycle for this VP */
+ VOP2_DLY_HDR_MIX, /** HDR delay cycle for this VP */
+ VOP2_DLY_MAX,
+};
+
enum vop2_scale_up_mode {
VOP2_SCALE_UP_NRST_NBOR,
VOP2_SCALE_UP_BIL,
@@ -58,6 +75,23 @@ enum vop2_scale_down_mode {
#define VOP2_PD_DSC_4K BIT(6)
#define VOP2_PD_ESMART BIT(7)
+#define vop2_output_if_is_hdmi(x) ((x) == ROCKCHIP_VOP2_EP_HDMI0 || \
+ (x) == ROCKCHIP_VOP2_EP_HDMI1)
+
+#define vop2_output_if_is_dp(x) ((x) == ROCKCHIP_VOP2_EP_DP0 || \
+ (x) == ROCKCHIP_VOP2_EP_DP1)
+
+#define vop2_output_if_is_edp(x) ((x) == ROCKCHIP_VOP2_EP_EDP0 || \
+ (x) == ROCKCHIP_VOP2_EP_EDP1)
+
+#define vop2_output_if_is_mipi(x) ((x) == ROCKCHIP_VOP2_EP_MIPI0 || \
+ (x) == ROCKCHIP_VOP2_EP_MIPI1)
+
+#define vop2_output_if_is_lvds(x) ((x) == ROCKCHIP_VOP2_EP_LVDS0 || \
+ (x) == ROCKCHIP_VOP2_EP_LVDS1)
+
+#define vop2_output_if_is_dpi(x) ((x) == ROCKCHIP_VOP2_EP_RGB0)
+
enum vop2_win_regs {
VOP2_WIN_ENABLE,
VOP2_WIN_FORMAT,
@@ -113,16 +147,22 @@ enum vop2_win_regs {
VOP2_WIN_AFBC_UV_SWAP,
VOP2_WIN_AFBC_AUTO_GATING_EN,
VOP2_WIN_AFBC_BLOCK_SPLIT_EN,
+ VOP2_WIN_AFBC_PLD_OFFSET_EN,
VOP2_WIN_AFBC_PIC_VIR_WIDTH,
VOP2_WIN_AFBC_TILE_NUM,
VOP2_WIN_AFBC_PIC_OFFSET,
VOP2_WIN_AFBC_PIC_SIZE,
VOP2_WIN_AFBC_DSP_OFFSET,
- VOP2_WIN_AFBC_TRANSFORM_OFFSET,
+ VOP2_WIN_AFBC_PLD_OFFSET,
+ VOP2_WIN_TRANSFORM_OFFSET,
VOP2_WIN_AFBC_HDR_PTR,
VOP2_WIN_AFBC_HALF_BLOCK_EN,
VOP2_WIN_AFBC_ROTATE_270,
VOP2_WIN_AFBC_ROTATE_90,
+
+ VOP2_WIN_VP_SEL,
+ VOP2_WIN_DLY_NUM,
+
VOP2_WIN_MAX_REG,
};
@@ -140,6 +180,7 @@ struct vop2_win_data {
unsigned int phys_id;
u32 base;
+ u32 possible_vp_mask;
enum drm_plane_type type;
u32 nformats;
@@ -148,9 +189,9 @@ struct vop2_win_data {
const unsigned int supported_rotations;
/**
- * @layer_sel_id: defined by register OVERLAY_LAYER_SEL of VOP2
+ * @layer_sel_id: defined by register OVERLAY_LAYER_SEL or PORTn_LAYER_SEL
*/
- unsigned int layer_sel_id;
+ unsigned int layer_sel_id[ROCKCHIP_MAX_CRTC];
uint64_t feature;
uint8_t axi_bus_id;
@@ -162,6 +203,23 @@ struct vop2_win_data {
const u8 dly[VOP2_DLY_MODE_MAX];
};
+struct vop2_win {
+ struct vop2 *vop2;
+ struct drm_plane base;
+ const struct vop2_win_data *data;
+ struct regmap_field *reg[VOP2_WIN_MAX_REG];
+
+ /**
+ * @win_id: graphic window id, a cluster may be split into two
+ * graphics windows.
+ */
+ u8 win_id;
+ u8 delay;
+ u32 offset;
+
+ enum drm_plane_type type;
+};
+
struct vop2_video_port_data {
unsigned int id;
u32 feature;
@@ -170,22 +228,116 @@ struct vop2_video_port_data {
struct vop_rect max_output;
const u8 pre_scan_max_dly[4];
unsigned int offset;
+ /**
+ * @pixel_rate: pixel per cycle
+ */
+ u8 pixel_rate;
+};
+
+struct vop2_video_port {
+ struct drm_crtc crtc;
+ struct vop2 *vop2;
+ struct clk *dclk;
+ struct clk *dclk_src;
+ unsigned int id;
+ const struct vop2_video_port_data *data;
+
+ struct completion dsp_hold_completion;
+
+ /**
+ * @win_mask: Bitmask of windows attached to the video port;
+ */
+ u32 win_mask;
+
+ struct vop2_win *primary_plane;
+ struct drm_pending_vblank_event *event;
+
+ unsigned int nlayers;
+};
+
+/**
+ * struct vop2_ops - helper operations for vop2 hardware
+ *
+ * These hooks are used by the common part of the vop2 driver to
+ * implement the proper behaviour of different variants.
+ */
+struct vop2_ops {
+ unsigned long (*setup_intf_mux)(struct vop2_video_port *vp, int ep_id, u32 polflags);
+ void (*setup_bg_dly)(struct vop2_video_port *vp);
+ void (*setup_overlay)(struct vop2_video_port *vp);
};
struct vop2_data {
u8 nr_vps;
u64 feature;
+ u32 version;
+ const struct vop2_ops *ops;
const struct vop2_win_data *win;
const struct vop2_video_port_data *vp;
+ const struct reg_field *cluster_reg;
+ const struct reg_field *smart_reg;
const struct vop2_regs_dump *regs_dump;
struct vop_rect max_input;
struct vop_rect max_output;
+ unsigned int nr_cluster_regs;
+ unsigned int nr_smart_regs;
unsigned int win_size;
unsigned int regs_dump_size;
unsigned int soc_id;
};
+struct vop2 {
+ u32 version;
+ struct device *dev;
+ struct drm_device *drm;
+ struct vop2_video_port vps[ROCKCHIP_MAX_CRTC];
+
+ const struct vop2_data *data;
+ const struct vop2_ops *ops;
+ /*
+ * Number of windows that are registered as plane, may be less than the
+ * total number of hardware windows.
+ */
+ u32 registered_num_wins;
+
+ struct resource *res;
+ void __iomem *regs;
+ struct regmap *map;
+
+ struct regmap *sys_grf;
+ struct regmap *vop_grf;
+ struct regmap *vo1_grf;
+ struct regmap *sys_pmu;
+
+ /* physical map length of vop2 register */
+ u32 len;
+
+ void __iomem *lut_regs;
+
+ /* protects crtc enable/disable */
+ struct mutex vop2_lock;
+
+ int irq;
+
+ /*
+ * Some global resources are shared between all video ports(crtcs), so
+ * we need a ref counter here.
+ */
+ unsigned int enable_count;
+ struct clk *hclk;
+ struct clk *aclk;
+ struct clk *pclk;
+ struct clk *pll_hdmiphy0;
+ struct clk *pll_hdmiphy1;
+
+ /* optional internal rgb encoder */
+ struct rockchip_rgb *rgb;
+
+ /* must be put at the end of the struct */
+ struct vop2_win win[];
+};
+
/* interrupt define */
#define FS_NEW_INTR BIT(4)
#define ADDR_SAME_INTR BIT(5)
@@ -240,10 +392,13 @@ enum dst_factor_mode {
#define RK3568_REG_CFG_DONE 0x000
#define RK3568_VERSION_INFO 0x004
#define RK3568_SYS_AUTO_GATING_CTRL 0x008
+#define RK3576_SYS_MMU_CTRL_IMD 0x020
#define RK3568_SYS_AXI_LUT_CTRL 0x024
#define RK3568_DSP_IF_EN 0x028
+#define RK3576_SYS_PORT_CTRL_IMD 0x028
#define RK3568_DSP_IF_CTRL 0x02c
#define RK3568_DSP_IF_POL 0x030
+#define RK3576_SYS_CLUSTER_PD_CTRL_IMD 0x030
#define RK3588_SYS_PD_CTRL 0x034
#define RK3568_WB_CTRL 0x40
#define RK3568_WB_XSCAL_FACTOR 0x44
@@ -263,6 +418,55 @@ enum dst_factor_mode {
#define RK3568_VP_INT_CLR(vp) (0xA4 + (vp) * 0x10)
#define RK3568_VP_INT_STATUS(vp) (0xA8 + (vp) * 0x10)
#define RK3568_VP_INT_RAW_STATUS(vp) (0xAC + (vp) * 0x10)
+#define RK3576_WB_CTRL 0x100
+#define RK3576_WB_XSCAL_FACTOR 0x104
+#define RK3576_WB_YRGB_MST 0x108
+#define RK3576_WB_CBR_MST 0x10C
+#define RK3576_WB_VIR_STRIDE 0x110
+#define RK3576_WB_TIMEOUT_CTRL 0x114
+#define RK3576_MIPI0_IF_CTRL 0x180
+#define RK3576_HDMI0_IF_CTRL 0x184
+#define RK3576_EDP0_IF_CTRL 0x188
+#define RK3576_DP0_IF_CTRL 0x18C
+#define RK3576_RGB_IF_CTRL 0x194
+#define RK3576_DP1_IF_CTRL 0x1A4
+#define RK3576_DP2_IF_CTRL 0x1B0
+
+/* Extra OVL register definition */
+#define RK3576_SYS_EXTRA_ALPHA_CTRL 0x500
+#define RK3576_CLUSTER0_MIX_SRC_COLOR_CTRL 0x530
+#define RK3576_CLUSTER0_MIX_DST_COLOR_CTRL 0x534
+#define RK3576_CLUSTER0_MIX_SRC_ALPHA_CTRL 0x538
+#define RK3576_CLUSTER0_MIX_DST_ALPHA_CTRL 0x53c
+#define RK3576_CLUSTER1_MIX_SRC_COLOR_CTRL 0x540
+#define RK3576_CLUSTER1_MIX_DST_COLOR_CTRL 0x544
+#define RK3576_CLUSTER1_MIX_SRC_ALPHA_CTRL 0x548
+#define RK3576_CLUSTER1_MIX_DST_ALPHA_CTRL 0x54c
+
+/* OVL registers for Video Port definition */
+#define RK3576_OVL_CTRL(vp) (0x600 + (vp) * 0x100)
+#define RK3576_OVL_LAYER_SEL(vp) (0x604 + (vp) * 0x100)
+#define RK3576_OVL_MIX0_SRC_COLOR_CTRL(vp) (0x620 + (vp) * 0x100)
+#define RK3576_OVL_MIX0_DST_COLOR_CTRL(vp) (0x624 + (vp) * 0x100)
+#define RK3576_OVL_MIX0_SRC_ALPHA_CTRL(vp) (0x628 + (vp) * 0x100)
+#define RK3576_OVL_MIX0_DST_ALPHA_CTRL(vp) (0x62C + (vp) * 0x100)
+#define RK3576_OVL_MIX1_SRC_COLOR_CTRL(vp) (0x630 + (vp) * 0x100)
+#define RK3576_OVL_MIX1_DST_COLOR_CTRL(vp) (0x634 + (vp) * 0x100)
+#define RK3576_OVL_MIX1_SRC_ALPHA_CTRL(vp) (0x638 + (vp) * 0x100)
+#define RK3576_OVL_MIX1_DST_ALPHA_CTRL(vp) (0x63C + (vp) * 0x100)
+#define RK3576_OVL_MIX2_SRC_COLOR_CTRL(vp) (0x640 + (vp) * 0x100)
+#define RK3576_OVL_MIX2_DST_COLOR_CTRL(vp) (0x644 + (vp) * 0x100)
+#define RK3576_OVL_MIX2_SRC_ALPHA_CTRL(vp) (0x648 + (vp) * 0x100)
+#define RK3576_OVL_MIX2_DST_ALPHA_CTRL(vp) (0x64C + (vp) * 0x100)
+#define RK3576_EXTRA_OVL_SRC_COLOR_CTRL(vp) (0x650 + (vp) * 0x100)
+#define RK3576_EXTRA_OVL_DST_COLOR_CTRL(vp) (0x654 + (vp) * 0x100)
+#define RK3576_EXTRA_OVL_SRC_ALPHA_CTRL(vp) (0x658 + (vp) * 0x100)
+#define RK3576_EXTRA_OVL_DST_ALPHA_CTRL(vp) (0x65C + (vp) * 0x100)
+#define RK3576_OVL_HDR_SRC_COLOR_CTRL(vp) (0x660 + (vp) * 0x100)
+#define RK3576_OVL_HDR_DST_COLOR_CTRL(vp) (0x664 + (vp) * 0x100)
+#define RK3576_OVL_HDR_SRC_ALPHA_CTRL(vp) (0x668 + (vp) * 0x100)
+#define RK3576_OVL_HDR_DST_ALPHA_CTRL(vp) (0x66C + (vp) * 0x100)
+#define RK3576_OVL_BG_MIX_CTRL(vp) (0x670 + (vp) * 0x100)
/* Video Port registers definition */
#define RK3568_VP0_CTRL_BASE 0x0C00
@@ -335,7 +539,7 @@ enum dst_factor_mode {
#define RK3568_CLUSTER_WIN_DSP_INFO 0x24
#define RK3568_CLUSTER_WIN_DSP_ST 0x28
#define RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB 0x30
-#define RK3568_CLUSTER_WIN_AFBCD_TRANSFORM_OFFSET 0x3C
+#define RK3568_CLUSTER_WIN_TRANSFORM_OFFSET 0x3C
#define RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL 0x50
#define RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE 0x54
#define RK3568_CLUSTER_WIN_AFBCD_HDR_PTR 0x58
@@ -345,7 +549,11 @@ enum dst_factor_mode {
#define RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET 0x68
#define RK3568_CLUSTER_WIN_AFBCD_CTRL 0x6C
+#define RK3576_CLUSTER_WIN_AFBCD_PLD_PTR_OFFSET 0x78
+
#define RK3568_CLUSTER_CTRL 0x100
+#define RK3576_CLUSTER_PORT_SEL_IMD 0x1F4
+#define RK3576_CLUSTER_DLY_NUM 0x1F8
/* (E)smart register definition, offset relative to window base */
#define RK3568_SMART_CTRL0 0x00
@@ -396,6 +604,9 @@ enum dst_factor_mode {
#define RK3568_SMART_REGION3_SCL_FACTOR_CBR 0xC8
#define RK3568_SMART_REGION3_SCL_OFFSET 0xCC
#define RK3568_SMART_COLOR_KEY_CTRL 0xD0
+#define RK3576_SMART_ALPHA_MAP 0xD8
+#define RK3576_SMART_PORT_SEL_IMD 0xF4
+#define RK3576_SMART_DLY_NUM 0xF8
/* HDR register definition */
#define RK3568_HDR_LUT_CTRL 0x2000
@@ -544,6 +755,17 @@ enum dst_factor_mode {
#define POLFLAG_DCLK_INV BIT(3)
+#define RK3576_OVL_CTRL__YUV_MODE BIT(0)
+#define RK3576_OVL_BG_MIX_CTRL__BG_DLY GENMASK(31, 24)
+
+#define RK3576_DSP_IF_CFG_DONE_IMD BIT(31)
+#define RK3576_DSP_IF_DCLK_SEL_OUT BIT(21)
+#define RK3576_DSP_IF_PCLK_DIV BIT(20)
+#define RK3576_DSP_IF_PIN_POL GENMASK(5, 4)
+#define RK3576_DSP_IF_MUX GENMASK(3, 2)
+#define RK3576_DSP_IF_CLK_OUT_EN BIT(1)
+#define RK3576_DSP_IF_EN BIT(0)
+
enum vop2_layer_phy_id {
ROCKCHIP_VOP2_CLUSTER0 = 0,
ROCKCHIP_VOP2_CLUSTER1,
@@ -560,4 +782,52 @@ enum vop2_layer_phy_id {
extern const struct component_ops vop2_component_ops;
+static inline void vop2_writel(struct vop2 *vop2, u32 offset, u32 v)
+{
+ regmap_write(vop2->map, offset, v);
+}
+
+static inline void vop2_vp_write(struct vop2_video_port *vp, u32 offset, u32 v)
+{
+ regmap_write(vp->vop2->map, vp->data->offset + offset, v);
+}
+
+static inline u32 vop2_readl(struct vop2 *vop2, u32 offset)
+{
+ u32 val;
+
+ regmap_read(vop2->map, offset, &val);
+
+ return val;
+}
+
+static inline u32 vop2_vp_read(struct vop2_video_port *vp, u32 offset)
+{
+ u32 val;
+
+ regmap_read(vp->vop2->map, vp->data->offset + offset, &val);
+
+ return val;
+}
+
+static inline void vop2_win_write(const struct vop2_win *win, unsigned int reg, u32 v)
+{
+ regmap_field_write(win->reg[reg], v);
+}
+
+static inline bool vop2_cluster_window(const struct vop2_win *win)
+{
+ return win->data->feature & WIN_FEATURE_CLUSTER;
+}
+
+static inline struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct vop2_video_port, crtc);
+}
+
+static inline struct vop2_win *to_vop2_win(struct drm_plane *p)
+{
+ return container_of(p, struct vop2_win, base);
+}
+
#endif /* _ROCKCHIP_DRM_VOP2_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 385cf6881504..a673779de3d2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -448,17 +448,14 @@ struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
static int rk3288_lvds_probe(struct platform_device *pdev,
struct rockchip_lvds *lvds)
{
- int ret;
-
lvds->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lvds->regs))
return PTR_ERR(lvds->regs);
- lvds->pclk = devm_clk_get(lvds->dev, "pclk_lvds");
- if (IS_ERR(lvds->pclk)) {
- DRM_DEV_ERROR(lvds->dev, "could not get pclk_lvds\n");
- return PTR_ERR(lvds->pclk);
- }
+ lvds->pclk = devm_clk_get_prepared(lvds->dev, "pclk_lvds");
+ if (IS_ERR(lvds->pclk))
+ return dev_err_probe(lvds->dev, PTR_ERR(lvds->pclk),
+ "could not get or prepare pclk_lvds\n");
lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins),
GFP_KERNEL);
@@ -467,25 +464,19 @@ static int rk3288_lvds_probe(struct platform_device *pdev,
lvds->pins->p = devm_pinctrl_get(lvds->dev);
if (IS_ERR(lvds->pins->p)) {
- DRM_DEV_ERROR(lvds->dev, "no pinctrl handle\n");
+ dev_warn(lvds->dev, "no pinctrl handle\n");
devm_kfree(lvds->dev, lvds->pins);
lvds->pins = NULL;
} else {
lvds->pins->default_state =
pinctrl_lookup_state(lvds->pins->p, "lcdc");
if (IS_ERR(lvds->pins->default_state)) {
- DRM_DEV_ERROR(lvds->dev, "no default pinctrl state\n");
+ dev_warn(lvds->dev, "no default pinctrl state\n");
devm_kfree(lvds->dev, lvds->pins);
lvds->pins = NULL;
}
}
- ret = clk_prepare(lvds->pclk);
- if (ret < 0) {
- DRM_DEV_ERROR(lvds->dev, "failed to prepare pclk_lvds\n");
- return ret;
- }
-
return 0;
}
@@ -555,11 +546,10 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
lvds->drm_dev = drm_dev;
port = of_graph_get_port_by_id(dev->of_node, 1);
- if (!port) {
- DRM_DEV_ERROR(dev,
- "can't found port point, please init lvds panel port!\n");
- return -EINVAL;
- }
+ if (!port)
+ return dev_err_probe(dev, -EINVAL,
+ "can't found port point, please init lvds panel port!\n");
+
for_each_child_of_node(port, endpoint) {
child_count++;
of_property_read_u32(endpoint, "reg", &endpoint_id);
@@ -571,8 +561,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
}
}
if (!child_count) {
- DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "lvds port does not have any children\n");
goto err_put_port;
} else if (ret) {
dev_err_probe(dev, ret, "failed to find panel and bridge node\n");
@@ -589,8 +578,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
lvds->output = rockchip_lvds_name_to_output(name);
if (lvds->output < 0) {
- DRM_DEV_ERROR(dev, "invalid output type [%s]\n", name);
- ret = lvds->output;
+ ret = dev_err_probe(dev, lvds->output, "invalid output type [%s]\n", name);
goto err_put_remote;
}
@@ -601,8 +589,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
lvds->format = rockchip_lvds_name_to_format(name);
if (lvds->format < 0) {
- DRM_DEV_ERROR(dev, "invalid data-mapping format [%s]\n", name);
- ret = lvds->format;
+ ret = dev_err_probe(dev, lvds->format,
+ "invalid data-mapping format [%s]\n", name);
goto err_put_remote;
}
@@ -612,8 +600,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret < 0) {
- DRM_DEV_ERROR(drm_dev->dev,
- "failed to initialize encoder: %d\n", ret);
+ drm_err(drm_dev,
+ "failed to initialize encoder: %d\n", ret);
goto err_put_remote;
}
@@ -626,8 +614,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
&rockchip_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
if (ret < 0) {
- DRM_DEV_ERROR(drm_dev->dev,
- "failed to initialize connector: %d\n", ret);
+ drm_err(drm_dev,
+ "failed to initialize connector: %d\n", ret);
goto err_free_encoder;
}
@@ -641,9 +629,9 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
connector = drm_bridge_connector_init(lvds->drm_dev, encoder);
if (IS_ERR(connector)) {
- DRM_DEV_ERROR(drm_dev->dev,
- "failed to initialize bridge connector: %pe\n",
- connector);
+ drm_err(drm_dev,
+ "failed to initialize bridge connector: %pe\n",
+ connector);
ret = PTR_ERR(connector);
goto err_free_encoder;
}
@@ -651,8 +639,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
- DRM_DEV_ERROR(drm_dev->dev,
- "failed to attach encoder: %d\n", ret);
+ drm_err(drm_dev, "failed to attach encoder: %d\n", ret);
goto err_free_connector;
}
@@ -714,34 +701,25 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
lvds->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
- if (IS_ERR(lvds->grf)) {
- DRM_DEV_ERROR(dev, "missing rockchip,grf property\n");
- return PTR_ERR(lvds->grf);
- }
+ if (IS_ERR(lvds->grf))
+ return dev_err_probe(dev, PTR_ERR(lvds->grf), "missing rockchip,grf property\n");
ret = lvds->soc_data->probe(pdev, lvds);
- if (ret) {
- DRM_DEV_ERROR(dev, "Platform initialization failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Platform initialization failed\n");
dev_set_drvdata(dev, lvds);
ret = component_add(&pdev->dev, &rockchip_lvds_component_ops);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "failed to add component\n");
- clk_unprepare(lvds->pclk);
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to add component\n");
- return ret;
+ return 0;
}
static void rockchip_lvds_remove(struct platform_device *pdev)
{
- struct rockchip_lvds *lvds = platform_get_drvdata(pdev);
-
component_del(&pdev->dev, &rockchip_lvds_component_ops);
- clk_unprepare(lvds->pclk);
}
struct platform_driver rockchip_lvds_driver = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 65a88f489693..14958d6b3d2e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -4,17 +4,56 @@
* Author: Andy Yan <andy.yan@rock-chips.com>
*/
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/component.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_plane.h>
#include <drm/drm_print.h>
#include "rockchip_drm_vop2.h"
+union vop2_alpha_ctrl {
+ u32 val;
+ struct {
+ /* [0:1] */
+ u32 color_mode:1;
+ u32 alpha_mode:1;
+ /* [2:3] */
+ u32 blend_mode:2;
+ u32 alpha_cal_mode:1;
+ /* [5:7] */
+ u32 factor_mode:3;
+ /* [8:9] */
+ u32 alpha_en:1;
+ u32 src_dst_swap:1;
+ u32 reserved:6;
+ /* [16:23] */
+ u32 glb_alpha:8;
+ } bits;
+};
+
+struct vop2_alpha {
+ union vop2_alpha_ctrl src_color_ctrl;
+ union vop2_alpha_ctrl dst_color_ctrl;
+ union vop2_alpha_ctrl src_alpha_ctrl;
+ union vop2_alpha_ctrl dst_alpha_ctrl;
+};
+
+struct vop2_alpha_config {
+ bool src_premulti_en;
+ bool dst_premulti_en;
+ bool src_pixel_alpha_en;
+ bool dst_pixel_alpha_en;
+ u16 src_glb_alpha_value;
+ u16 dst_glb_alpha_value;
+};
+
static const uint32_t formats_cluster[] = {
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
@@ -32,6 +71,37 @@ static const uint32_t formats_cluster[] = {
DRM_FORMAT_Y210, /* yuv422_10bit non-Linear mode only */
};
+/*
+ * The cluster windows on rk3576 support:
+ * RGB: linear mode and afbc
+ * YUV: linear mode and rfbc
+ * rfbc is a rockchip defined non-linear mode, produced by
+ * Video decoder
+ */
+static const uint32_t formats_rk3576_cluster[] = {
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_NV12, /* yuv420_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV21, /* yvu420_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV16, /* yuv422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV61, /* yvu422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV24, /* yuv444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV42, /* yvu444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */
+};
+
static const uint32_t formats_esmart[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
@@ -78,6 +148,41 @@ static const uint32_t formats_rk356x_esmart[] = {
DRM_FORMAT_VYUY, /* yuv422_8bit[VYUY] linear mode */
};
+/*
+ * Add XRGB2101010/ARGB2101010ARGB1555/XRGB1555
+ */
+static const uint32_t formats_rk3576_esmart[] = {
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_NV12, /* yuv420_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV21, /* yvu420_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV16, /* yuv422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV61, /* yvu422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_NV24, /* yuv444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV42, /* yvu444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_YVYU, /* yuv422_8bit[YVYU] linear mode */
+ DRM_FORMAT_VYUY, /* yuv422_8bit[VYUY] linear mode */
+ DRM_FORMAT_YUYV, /* yuv422_8bit[YUYV] linear mode */
+ DRM_FORMAT_UYVY, /* yuv422_8bit[UYVY] linear mode */
+};
+
static const uint32_t formats_smart[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
@@ -131,6 +236,321 @@ static const uint64_t format_modifiers_afbc[] = {
DRM_FORMAT_MOD_INVALID,
};
+/* used from rk3576, afbc 32*8 half mode */
+static const uint64_t format_modifiers_rk3576_afbc[] = {
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_CBR |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_CBR |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_CBR |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_CBR |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+
+ /* SPLIT mandates SPARSE, RGB modes mandates YTR */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
+ AFBC_FORMAT_MOD_YTR |
+ AFBC_FORMAT_MOD_SPARSE |
+ AFBC_FORMAT_MOD_SPLIT),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static const struct reg_field rk3568_vop_cluster_regs[VOP2_WIN_MAX_REG] = {
+ [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0),
+ [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5),
+ [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14),
+ [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 18, 18),
+ [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_ACT_INFO, 0, 31),
+ [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_INFO, 0, 31),
+ [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_ST, 0, 31),
+ [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_CLUSTER_WIN_YRGB_MST, 0, 31),
+ [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_CLUSTER_WIN_CBR_MST, 0, 31),
+ [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 19, 19),
+ [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 0, 15),
+ [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 16, 31),
+ [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8),
+ [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9),
+ [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 3),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 8),
+ /* RK3588 only, reserved bit on rk3568*/
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13),
+
+ /* Scale */
+ [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15),
+ [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 16, 31),
+ [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 14, 15),
+ [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 12, 13),
+ [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 2, 3),
+ [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 28, 28),
+ [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 29, 29),
+
+ /* cluster regs */
+ [VOP2_WIN_AFBC_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 1, 1),
+ [VOP2_WIN_CLUSTER_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 0, 0),
+ [VOP2_WIN_CLUSTER_LB_MODE] = REG_FIELD(RK3568_CLUSTER_CTRL, 4, 7),
+
+ /* afbc regs */
+ [VOP2_WIN_AFBC_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 2, 6),
+ [VOP2_WIN_AFBC_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 9, 9),
+ [VOP2_WIN_AFBC_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 10, 10),
+ [VOP2_WIN_AFBC_AUTO_GATING_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL, 4, 4),
+ [VOP2_WIN_AFBC_HALF_BLOCK_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 7, 7),
+ [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 8, 8),
+ [VOP2_WIN_AFBC_HDR_PTR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_HDR_PTR, 0, 31),
+ [VOP2_WIN_AFBC_PIC_SIZE] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_SIZE, 0, 31),
+ [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 0, 15),
+ [VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31),
+ [VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31),
+ [VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31),
+ [VOP2_WIN_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_TRANSFORM_OFFSET, 0, 31),
+ [VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0),
+ [VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1),
+ [VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2),
+ [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 3, 3),
+ [VOP2_WIN_UV_SWAP] = { .reg = 0xffffffff },
+ [VOP2_WIN_COLOR_KEY] = { .reg = 0xffffffff },
+ [VOP2_WIN_COLOR_KEY_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff },
+ [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff },
+ [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_VSD_CBCR_GT2] = { .reg = 0xffffffff },
+ [VOP2_WIN_VSD_CBCR_GT4] = { .reg = 0xffffffff },
+};
+
+static const struct reg_field rk3568_vop_smart_regs[VOP2_WIN_MAX_REG] = {
+ [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0),
+ [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5),
+ [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12),
+ [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 14, 14),
+ [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 16, 16),
+ [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_SMART_REGION0_ACT_INFO, 0, 31),
+ [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_SMART_REGION0_DSP_INFO, 0, 31),
+ [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_SMART_REGION0_DSP_ST, 0, 28),
+ [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_SMART_REGION0_YRGB_MST, 0, 31),
+ [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_SMART_REGION0_CBR_MST, 0, 31),
+ [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 17, 17),
+ [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 0, 15),
+ [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 16, 31),
+ [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_SMART_CTRL0, 0, 0),
+ [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_SMART_CTRL0, 1, 1),
+ [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_SMART_CTRL0, 2, 3),
+ [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31),
+ [VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29),
+ [VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16),
+ /* RK3588 only, reserved register on rk3568 */
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1),
+
+ /* Scale */
+ [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15),
+ [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 16, 31),
+ [VOP2_WIN_SCALE_CBCR_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 0, 15),
+ [VOP2_WIN_SCALE_CBCR_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_CBR, 16, 31),
+ [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 0, 1),
+ [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 2, 3),
+ [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 4, 5),
+ [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 6, 7),
+ [VOP2_WIN_CBCR_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 8, 9),
+ [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 10, 11),
+ [VOP2_WIN_CBCR_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 12, 13),
+ [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 14, 15),
+ [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 16, 17),
+ [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 8, 8),
+ [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 9, 9),
+ [VOP2_WIN_VSD_CBCR_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 10, 10),
+ [VOP2_WIN_VSD_CBCR_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 11, 11),
+ [VOP2_WIN_XMIRROR] = { .reg = 0xffffffff },
+ [VOP2_WIN_CLUSTER_ENABLE] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_ENABLE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CLUSTER_LB_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_FORMAT] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_RB_SWAP] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_UV_SWAP] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_AUTO_GATING_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_TILE_NUM] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff },
+ [VOP2_WIN_TRANSFORM_OFFSET] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff },
+};
+
+static const struct reg_field rk3576_vop_cluster_regs[VOP2_WIN_MAX_REG] = {
+ [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 0, 0),
+ [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 1, 5),
+ [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 14, 14),
+ [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 17, 17),
+ [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 18, 18),
+ [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_ACT_INFO, 0, 31),
+ [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_INFO, 0, 31),
+ [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_CLUSTER_WIN_DSP_ST, 0, 31),
+ [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_CLUSTER_WIN_YRGB_MST, 0, 31),
+ [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_CLUSTER_WIN_CBR_MST, 0, 31),
+ [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 19, 19),
+ [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 0, 15),
+ [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_CLUSTER_WIN_VIR, 16, 31),
+ [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8),
+ [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9),
+ [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 4),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 9),
+ /* Read only bit on rk3576, writing on this bit have no effect.*/
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13),
+
+ [VOP2_WIN_VP_SEL] = REG_FIELD(RK3576_CLUSTER_PORT_SEL_IMD, 0, 1),
+ [VOP2_WIN_DLY_NUM] = REG_FIELD(RK3576_CLUSTER_DLY_NUM, 0, 7),
+
+ /* Scale */
+ [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15),
+ [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 16, 31),
+ [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 2, 3),
+ [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 14, 15),
+ [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 22, 23),
+ [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 28, 28),
+ [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL1, 29, 29),
+
+ /* cluster regs */
+ [VOP2_WIN_AFBC_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 1, 1),
+ [VOP2_WIN_CLUSTER_ENABLE] = REG_FIELD(RK3568_CLUSTER_CTRL, 0, 0),
+ [VOP2_WIN_CLUSTER_LB_MODE] = REG_FIELD(RK3568_CLUSTER_CTRL, 4, 7),
+
+ /* afbc regs */
+ [VOP2_WIN_AFBC_FORMAT] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 2, 6),
+ [VOP2_WIN_AFBC_RB_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 9, 9),
+ [VOP2_WIN_AFBC_UV_SWAP] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 10, 10),
+ [VOP2_WIN_AFBC_AUTO_GATING_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_OUTPUT_CTRL, 4, 4),
+ [VOP2_WIN_AFBC_HALF_BLOCK_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 7, 7),
+ [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 8, 8),
+ [VOP2_WIN_AFBC_PLD_OFFSET_EN] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_CTRL, 16, 16),
+ [VOP2_WIN_AFBC_HDR_PTR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_HDR_PTR, 0, 31),
+ [VOP2_WIN_AFBC_PIC_SIZE] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_SIZE, 0, 31),
+ [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 0, 15),
+ [VOP2_WIN_AFBC_TILE_NUM] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_VIR_WIDTH, 16, 31),
+ [VOP2_WIN_AFBC_PIC_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_PIC_OFFSET, 0, 31),
+ [VOP2_WIN_AFBC_DSP_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_DSP_OFFSET, 0, 31),
+ [VOP2_WIN_AFBC_PLD_OFFSET] = REG_FIELD(RK3576_CLUSTER_WIN_AFBCD_PLD_PTR_OFFSET, 0, 31),
+ [VOP2_WIN_TRANSFORM_OFFSET] = REG_FIELD(RK3568_CLUSTER_WIN_TRANSFORM_OFFSET, 0, 31),
+ [VOP2_WIN_AFBC_ROTATE_90] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 0, 0),
+ [VOP2_WIN_AFBC_ROTATE_270] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 1, 1),
+ [VOP2_WIN_XMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 2, 2),
+ [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_CLUSTER_WIN_AFBCD_ROTATE_MODE, 3, 3),
+ [VOP2_WIN_COLOR_KEY] = { .reg = 0xffffffff },
+ [VOP2_WIN_COLOR_KEY_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff },
+ [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff },
+ [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_VSD_CBCR_GT2] = { .reg = 0xffffffff },
+ [VOP2_WIN_VSD_CBCR_GT4] = { .reg = 0xffffffff },
+};
+
+static const struct reg_field rk3576_vop_smart_regs[VOP2_WIN_MAX_REG] = {
+ [VOP2_WIN_ENABLE] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 0, 0),
+ [VOP2_WIN_FORMAT] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 1, 5),
+ [VOP2_WIN_DITHER_UP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 12, 12),
+ [VOP2_WIN_RB_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 14, 14),
+ [VOP2_WIN_UV_SWAP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 16, 16),
+ [VOP2_WIN_ACT_INFO] = REG_FIELD(RK3568_SMART_REGION0_ACT_INFO, 0, 31),
+ [VOP2_WIN_DSP_INFO] = REG_FIELD(RK3568_SMART_REGION0_DSP_INFO, 0, 31),
+ [VOP2_WIN_DSP_ST] = REG_FIELD(RK3568_SMART_REGION0_DSP_ST, 0, 28),
+ [VOP2_WIN_YRGB_MST] = REG_FIELD(RK3568_SMART_REGION0_YRGB_MST, 0, 31),
+ [VOP2_WIN_UV_MST] = REG_FIELD(RK3568_SMART_REGION0_CBR_MST, 0, 31),
+ [VOP2_WIN_YUV_CLIP] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 17, 17),
+ [VOP2_WIN_YRGB_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 0, 15),
+ [VOP2_WIN_UV_VIR] = REG_FIELD(RK3568_SMART_REGION0_VIR, 16, 31),
+ [VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_SMART_CTRL0, 0, 0),
+ [VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_SMART_CTRL0, 1, 1),
+ [VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_SMART_CTRL0, 2, 3),
+ [VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31),
+ [VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29),
+ [VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31),
+ [VOP2_WIN_VP_SEL] = REG_FIELD(RK3576_SMART_PORT_SEL_IMD, 0, 1),
+ [VOP2_WIN_DLY_NUM] = REG_FIELD(RK3576_SMART_DLY_NUM, 0, 7),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16),
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1),
+
+ /* Scale */
+ [VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15),
+ [VOP2_WIN_SCALE_YRGB_Y] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 16, 31),
+ [VOP2_WIN_YRGB_HOR_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 0, 1),
+ [VOP2_WIN_YRGB_HSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 2, 3),
+ [VOP2_WIN_YRGB_VER_SCL_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 4, 5),
+ [VOP2_WIN_YRGB_VSCL_FILTER_MODE] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 6, 7),
+ [VOP2_WIN_BIC_COE_SEL] = REG_FIELD(RK3568_SMART_REGION0_SCL_CTRL, 16, 17),
+ [VOP2_WIN_VSD_YRGB_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 8, 8),
+ [VOP2_WIN_VSD_YRGB_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 9, 9),
+ [VOP2_WIN_VSD_CBCR_GT2] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 10, 10),
+ [VOP2_WIN_VSD_CBCR_GT4] = REG_FIELD(RK3568_SMART_REGION0_CTRL, 11, 11),
+ [VOP2_WIN_XMIRROR] = { .reg = 0xffffffff },
+
+ /* CBCR share the same scale factor as YRGB */
+ [VOP2_WIN_SCALE_CBCR_X] = { .reg = 0xffffffff },
+ [VOP2_WIN_SCALE_CBCR_Y] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_HOR_SCL_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CBCR_HSCL_FILTER_MODE] = { .reg = 0xffffffff},
+ [VOP2_WIN_CBCR_VER_SCL_MODE] = { .reg = 0xffffffff},
+ [VOP2_WIN_CBCR_VSCL_FILTER_MODE] = { .reg = 0xffffffff},
+
+ [VOP2_WIN_CLUSTER_ENABLE] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_ENABLE] = { .reg = 0xffffffff },
+ [VOP2_WIN_CLUSTER_LB_MODE] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_FORMAT] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_RB_SWAP] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_UV_SWAP] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_AUTO_GATING_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_BLOCK_SPLIT_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_PIC_VIR_WIDTH] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_TILE_NUM] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_PIC_OFFSET] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_PIC_SIZE] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_DSP_OFFSET] = { .reg = 0xffffffff },
+ [VOP2_WIN_TRANSFORM_OFFSET] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_HDR_PTR] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_HALF_BLOCK_EN] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_ROTATE_270] = { .reg = 0xffffffff },
+ [VOP2_WIN_AFBC_ROTATE_90] = { .reg = 0xffffffff },
+};
+
static const struct vop2_video_port_data rk3568_vop_video_ports[] = {
{
.id = 0,
@@ -177,10 +597,12 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
.name = "Smart0-win0",
.phys_id = ROCKCHIP_VOP2_SMART0,
.base = 0x1c00,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2),
.formats = formats_smart,
.nformats = ARRAY_SIZE(formats_smart),
.format_modifiers = format_modifiers,
- .layer_sel_id = 3,
+ /* 0xf means this layer can't attached to this VP */
+ .layer_sel_id = { 3, 3, 3, 0xf },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
.max_upscale_factor = 8,
@@ -189,11 +611,12 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
}, {
.name = "Smart1-win0",
.phys_id = ROCKCHIP_VOP2_SMART1,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2),
.formats = formats_smart,
.nformats = ARRAY_SIZE(formats_smart),
.format_modifiers = format_modifiers,
.base = 0x1e00,
- .layer_sel_id = 7,
+ .layer_sel_id = { 7, 7, 7, 0xf },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
.max_upscale_factor = 8,
@@ -202,11 +625,12 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
}, {
.name = "Esmart1-win0",
.phys_id = ROCKCHIP_VOP2_ESMART1,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2),
.formats = formats_rk356x_esmart,
.nformats = ARRAY_SIZE(formats_rk356x_esmart),
.format_modifiers = format_modifiers,
.base = 0x1a00,
- .layer_sel_id = 6,
+ .layer_sel_id = { 6, 6, 6, 0xf },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
.max_upscale_factor = 8,
@@ -215,11 +639,12 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
}, {
.name = "Esmart0-win0",
.phys_id = ROCKCHIP_VOP2_ESMART0,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2),
.formats = formats_rk356x_esmart,
.nformats = ARRAY_SIZE(formats_rk356x_esmart),
.format_modifiers = format_modifiers,
.base = 0x1800,
- .layer_sel_id = 2,
+ .layer_sel_id = { 2, 2, 2, 0xf },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
.max_upscale_factor = 8,
@@ -229,10 +654,11 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
.name = "Cluster0-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER0,
.base = 0x1000,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2),
.formats = formats_cluster,
.nformats = ARRAY_SIZE(formats_cluster),
.format_modifiers = format_modifiers_afbc,
- .layer_sel_id = 0,
+ .layer_sel_id = { 0, 0, 0, 0xf },
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.max_upscale_factor = 4,
@@ -244,10 +670,11 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
.name = "Cluster1-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER1,
.base = 0x1200,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2),
.formats = formats_cluster,
.nformats = ARRAY_SIZE(formats_cluster),
.format_modifiers = format_modifiers_afbc,
- .layer_sel_id = 1,
+ .layer_sel_id = { 1, 1, 1, 0xf },
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
@@ -340,6 +767,292 @@ static const struct vop2_regs_dump rk3568_regs_dump[] = {
},
};
+static const struct vop2_video_port_data rk3576_vop_video_ports[] = {
+ {
+ .id = 0,
+ .feature = VOP2_VP_FEATURE_OUTPUT_10BIT,
+ .gamma_lut_len = 1024,
+ .cubic_lut_len = 9 * 9 * 9, /* 9x9x9 */
+ .max_output = { 4096, 2304 },
+ /* win layer_mix hdr */
+ .pre_scan_max_dly = { 10, 8, 2, 0 },
+ .offset = 0xc00,
+ .pixel_rate = 2,
+ }, {
+ .id = 1,
+ .feature = VOP2_VP_FEATURE_OUTPUT_10BIT,
+ .gamma_lut_len = 1024,
+ .cubic_lut_len = 729, /* 9x9x9 */
+ .max_output = { 2560, 1600 },
+ /* win layer_mix hdr */
+ .pre_scan_max_dly = { 10, 6, 0, 0 },
+ .offset = 0xd00,
+ .pixel_rate = 1,
+ }, {
+ .id = 2,
+ .gamma_lut_len = 1024,
+ .max_output = { 1920, 1080 },
+ /* win layer_mix hdr */
+ .pre_scan_max_dly = { 10, 6, 0, 0 },
+ .offset = 0xe00,
+ .pixel_rate = 1,
+ },
+};
+
+/*
+ * rk3576 vop with 2 cluster, 4 esmart win.
+ * Every cluster can work as 4K win or split into two 2K win.
+ * All win in cluster support AFBCD.
+ *
+ * Every esmart win support 4 Multi-region.
+ *
+ * VP0 can use Cluster0/1 and Esmart0/2
+ * VP1 can use Cluster0/1 and Esmart1/3
+ * VP2 can use Esmart0/1/2/3
+ *
+ * Scale filter mode:
+ *
+ * * Cluster:
+ * * Support prescale down:
+ * * H/V: gt2/avg2 or gt4/avg4
+ * * After prescale down:
+ * * nearest-neighbor/bilinear/multi-phase filter for scale up
+ * * nearest-neighbor/bilinear/multi-phase filter for scale down
+ *
+ * * Esmart:
+ * * Support prescale down:
+ * * H: gt2/avg2 or gt4/avg4
+ * * V: gt2 or gt4
+ * * After prescale down:
+ * * nearest-neighbor/bilinear/bicubic for scale up
+ * * nearest-neighbor/bilinear for scale down
+ *
+ * AXI config::
+ *
+ * * Cluster0 win0: 0xa, 0xb [AXI0]
+ * * Cluster0 win1: 0xc, 0xd [AXI0]
+ * * Cluster1 win0: 0x6, 0x7 [AXI0]
+ * * Cluster1 win1: 0x8, 0x9 [AXI0]
+ * * Esmart0: 0x10, 0x11 [AXI0]
+ * * Esmart1: 0x12, 0x13 [AXI0]
+ * * Esmart2: 0xa, 0xb [AXI1]
+ * * Esmart3: 0xc, 0xd [AXI1]
+ * * Lut dma rid: 0x1, 0x2, 0x3 [AXI0]
+ * * DCI dma rid: 0x4 [AXI0]
+ * * Metadata rid: 0x5 [AXI0]
+ *
+ * * Limit:
+ * * (1) Cluster0/1 are fixed on AXI0 by IC design
+ * * (2) 0x0 and 0xf can't be used;
+ * * (3) 5 Bits ID for eache axi bus
+ * * (3) cluster and lut/dci/metadata rid must smaller than 0xf,
+ * * if Cluster rid is bigger than 0xf, VOP will dead at the
+ * * system bandwidth very terrible scene.
+ */
+static const struct vop2_win_data rk3576_vop_win_data[] = {
+ {
+ .name = "Cluster0-win0",
+ .phys_id = ROCKCHIP_VOP2_CLUSTER0,
+ .base = 0x1000,
+ .possible_vp_mask = BIT(0) | BIT(1),
+ .formats = formats_rk3576_cluster,
+ .nformats = ARRAY_SIZE(formats_rk3576_cluster),
+ .format_modifiers = format_modifiers_rk3576_afbc,
+ .layer_sel_id = { 0, 0, 0xf, 0xf },
+ .supported_rotations = DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 0xa,
+ .axi_uv_r_id = 0xb,
+ .max_upscale_factor = 4,
+ .max_downscale_factor = 4,
+ .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
+ }, {
+ .name = "Cluster1-win0",
+ .phys_id = ROCKCHIP_VOP2_CLUSTER1,
+ .base = 0x1200,
+ .possible_vp_mask = BIT(0) | BIT(1),
+ .formats = formats_rk3576_cluster,
+ .nformats = ARRAY_SIZE(formats_rk3576_cluster),
+ .format_modifiers = format_modifiers_rk3576_afbc,
+ .layer_sel_id = { 1, 1, 0xf, 0xf },
+ .supported_rotations = DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 6,
+ .axi_uv_r_id = 7,
+ .max_upscale_factor = 4,
+ .max_downscale_factor = 4,
+ .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
+ }, {
+ .name = "Esmart0-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART0,
+ .base = 0x1800,
+ .possible_vp_mask = BIT(0) | BIT(2),
+ .formats = formats_rk3576_esmart,
+ .nformats = ARRAY_SIZE(formats_rk3576_esmart),
+ .format_modifiers = format_modifiers,
+ .layer_sel_id = { 2, 0xf, 0, 0xf },
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 0x10,
+ .axi_uv_r_id = 0x11,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ }, {
+ .name = "Esmart1-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART1,
+ .base = 0x1a00,
+ .possible_vp_mask = BIT(1) | BIT(2),
+ .formats = formats_rk3576_esmart,
+ .nformats = ARRAY_SIZE(formats_rk3576_esmart),
+ .format_modifiers = format_modifiers,
+ .layer_sel_id = { 0xf, 2, 1, 0xf },
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 0x12,
+ .axi_uv_r_id = 0x13,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ }, {
+ .name = "Esmart2-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART2,
+ .base = 0x1c00,
+ .possible_vp_mask = BIT(0) | BIT(2),
+ .formats = formats_rk3576_esmart,
+ .nformats = ARRAY_SIZE(formats_rk3576_esmart),
+ .format_modifiers = format_modifiers,
+ .layer_sel_id = { 3, 0xf, 2, 0xf },
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 0x0a,
+ .axi_uv_r_id = 0x0b,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ }, {
+ .name = "Esmart3-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART3,
+ .base = 0x1e00,
+ .possible_vp_mask = BIT(1) | BIT(2),
+ .formats = formats_rk3576_esmart,
+ .nformats = ARRAY_SIZE(formats_rk3576_esmart),
+ .format_modifiers = format_modifiers,
+ .layer_sel_id = { 0xf, 3, 3, 0xf },
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 0x0c,
+ .axi_uv_r_id = 0x0d,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ },
+};
+
+static const struct vop2_regs_dump rk3576_regs_dump[] = {
+ {
+ .name = "SYS",
+ .base = RK3568_REG_CFG_DONE,
+ .size = 0x200,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0
+ }, {
+ .name = "OVL_SYS",
+ .base = RK3576_SYS_EXTRA_ALPHA_CTRL,
+ .size = 0x50,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "OVL_VP0",
+ .base = RK3576_OVL_CTRL(0),
+ .size = 0x80,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "OVL_VP1",
+ .base = RK3576_OVL_CTRL(1),
+ .size = 0x80,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "OVL_VP2",
+ .base = RK3576_OVL_CTRL(2),
+ .size = 0x80,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "VP0",
+ .base = RK3568_VP0_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP1",
+ .base = RK3568_VP1_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP2",
+ .base = RK3568_VP2_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "Cluster0",
+ .base = RK3568_CLUSTER0_CTRL_BASE,
+ .size = 0x200,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster1",
+ .base = RK3568_CLUSTER1_CTRL_BASE,
+ .size = 0x200,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Esmart0",
+ .base = RK3568_ESMART0_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart1",
+ .base = RK3568_ESMART1_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart2",
+ .base = RK3588_ESMART2_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart3",
+ .base = RK3588_ESMART3_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ },
+};
+
static const struct vop2_video_port_data rk3588_vop_video_ports[] = {
{
.id = 0,
@@ -409,10 +1122,11 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.name = "Cluster0-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER0,
.base = 0x1000,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_cluster,
.nformats = ARRAY_SIZE(formats_cluster),
.format_modifiers = format_modifiers_afbc,
- .layer_sel_id = 0,
+ .layer_sel_id = { 0, 0, 0, 0 },
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.axi_bus_id = 0,
@@ -427,10 +1141,11 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.name = "Cluster1-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER1,
.base = 0x1200,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_cluster,
.nformats = ARRAY_SIZE(formats_cluster),
.format_modifiers = format_modifiers_afbc,
- .layer_sel_id = 1,
+ .layer_sel_id = { 1, 1, 1, 1 },
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
@@ -445,10 +1160,11 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.name = "Cluster2-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER2,
.base = 0x1400,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_cluster,
.nformats = ARRAY_SIZE(formats_cluster),
.format_modifiers = format_modifiers_afbc,
- .layer_sel_id = 4,
+ .layer_sel_id = { 4, 4, 4, 4 },
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
@@ -463,10 +1179,11 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.name = "Cluster3-win0",
.phys_id = ROCKCHIP_VOP2_CLUSTER3,
.base = 0x1600,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_cluster,
.nformats = ARRAY_SIZE(formats_cluster),
.format_modifiers = format_modifiers_afbc,
- .layer_sel_id = 5,
+ .layer_sel_id = { 5, 5, 5, 5 },
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
@@ -480,11 +1197,12 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
}, {
.name = "Esmart0-win0",
.phys_id = ROCKCHIP_VOP2_ESMART0,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_esmart,
.nformats = ARRAY_SIZE(formats_esmart),
.format_modifiers = format_modifiers,
.base = 0x1800,
- .layer_sel_id = 2,
+ .layer_sel_id = { 2, 2, 2, 2 },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
.axi_bus_id = 0,
@@ -496,11 +1214,12 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
}, {
.name = "Esmart1-win0",
.phys_id = ROCKCHIP_VOP2_ESMART1,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_esmart,
.nformats = ARRAY_SIZE(formats_esmart),
.format_modifiers = format_modifiers,
.base = 0x1a00,
- .layer_sel_id = 3,
+ .layer_sel_id = { 3, 3, 3, 3 },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
.axi_bus_id = 0,
@@ -513,10 +1232,11 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.name = "Esmart2-win0",
.phys_id = ROCKCHIP_VOP2_ESMART2,
.base = 0x1c00,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_esmart,
.nformats = ARRAY_SIZE(formats_esmart),
.format_modifiers = format_modifiers,
- .layer_sel_id = 6,
+ .layer_sel_id = { 6, 6, 6, 6 },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
.axi_bus_id = 1,
@@ -528,11 +1248,12 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
}, {
.name = "Esmart3-win0",
.phys_id = ROCKCHIP_VOP2_ESMART3,
+ .possible_vp_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3),
.formats = formats_esmart,
.nformats = ARRAY_SIZE(formats_esmart),
.format_modifiers = format_modifiers,
.base = 0x1e00,
- .layer_sel_id = 7,
+ .layer_sel_id = { 7, 7, 7, 7 },
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
.axi_bus_id = 1,
@@ -647,7 +1368,1017 @@ static const struct vop2_regs_dump rk3588_regs_dump[] = {
},
};
+static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ u32 die, dip;
+
+ die = vop2_readl(vop2, RK3568_DSP_IF_EN);
+ dip = vop2_readl(vop2, RK3568_DSP_IF_POL);
+
+ switch (id) {
+ case ROCKCHIP_VOP2_EP_RGB0:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_RGB_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_RGB |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_RGB_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
+ if (polflags & POLFLAG_DCLK_INV)
+ regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3));
+ else
+ regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16));
+ break;
+ case ROCKCHIP_VOP2_EP_HDMI0:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_EDP0:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_EDP |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI0:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_MIPI0 |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI1:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_MIPI1 |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__MIPI_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__MIPI_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_LVDS0:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_LVDS0 |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS0_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_LVDS1:
+ die &= ~RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX;
+ die |= RK3568_SYS_DSP_INFACE_EN_LVDS1 |
+ FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_LVDS1_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
+ break;
+ default:
+ drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
+ return 0;
+ }
+
+ dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
+
+ vop2_writel(vop2, RK3568_DSP_IF_EN, die);
+ vop2_writel(vop2, RK3568_DSP_IF_POL, dip);
+
+ return crtc->state->adjusted_mode.crtc_clock * 1000LL;
+}
+
+static unsigned long rk3576_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
+ u8 port_pix_rate = vp->data->pixel_rate;
+ int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_sel;
+ u32 ctrl, vp_clk_div, reg, dclk_div;
+ unsigned long dclk_in_rate, dclk_core_rate;
+
+ if (vcstate->output_mode == ROCKCHIP_OUT_MODE_YUV420 || adjusted_mode->crtc_clock > 600000)
+ dclk_div = 2;
+ else
+ dclk_div = 1;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ dclk_core_rate = adjusted_mode->crtc_clock / 2;
+ else
+ dclk_core_rate = adjusted_mode->crtc_clock / port_pix_rate;
+
+ dclk_in_rate = adjusted_mode->crtc_clock / dclk_div;
+
+ dclk_core_div = dclk_in_rate > dclk_core_rate ? 1 : 0;
+
+ if (vop2_output_if_is_edp(id))
+ if_pixclk_div = port_pix_rate == 2 ? RK3576_DSP_IF_PCLK_DIV : 0;
+ else
+ if_pixclk_div = port_pix_rate == 1 ? RK3576_DSP_IF_PCLK_DIV : 0;
+
+ if (vcstate->output_mode == ROCKCHIP_OUT_MODE_YUV420) {
+ if_dclk_sel = RK3576_DSP_IF_DCLK_SEL_OUT;
+ dclk_out_div = 1;
+ } else {
+ if_dclk_sel = 0;
+ dclk_out_div = 0;
+ }
+
+ switch (id) {
+ case ROCKCHIP_VOP2_EP_HDMI0:
+ reg = RK3576_HDMI0_IF_CTRL;
+ break;
+ case ROCKCHIP_VOP2_EP_EDP0:
+ reg = RK3576_EDP0_IF_CTRL;
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI0:
+ reg = RK3576_MIPI0_IF_CTRL;
+ break;
+ case ROCKCHIP_VOP2_EP_DP0:
+ reg = RK3576_DP0_IF_CTRL;
+ break;
+ case ROCKCHIP_VOP2_EP_DP1:
+ reg = RK3576_DP1_IF_CTRL;
+ break;
+ default:
+ drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
+ return 0;
+ }
+
+ ctrl = vop2_readl(vop2, reg);
+ ctrl &= ~RK3576_DSP_IF_DCLK_SEL_OUT;
+ ctrl &= ~RK3576_DSP_IF_PCLK_DIV;
+ ctrl &= ~RK3576_DSP_IF_MUX;
+ ctrl |= RK3576_DSP_IF_CFG_DONE_IMD;
+ ctrl |= if_dclk_sel | if_pixclk_div;
+ ctrl |= RK3576_DSP_IF_CLK_OUT_EN | RK3576_DSP_IF_EN;
+ ctrl |= FIELD_PREP(RK3576_DSP_IF_MUX, vp->id);
+ ctrl |= FIELD_PREP(RK3576_DSP_IF_PIN_POL, polflags);
+ vop2_writel(vop2, reg, ctrl);
+
+ vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div);
+ vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div);
+
+ vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div);
+
+ return dclk_in_rate * 1000LL;
+}
+
+/*
+ * calc the dclk on rk3588
+ * the available div of dclk is 1, 2, 4
+ */
+static unsigned long rk3588_calc_dclk(unsigned long child_clk, unsigned long max_dclk)
+{
+ if (child_clk * 4 <= max_dclk)
+ return child_clk * 4;
+ else if (child_clk * 2 <= max_dclk)
+ return child_clk * 2;
+ else if (child_clk <= max_dclk)
+ return child_clk;
+ else
+ return 0;
+}
+
+/*
+ * 4 pixclk/cycle on rk3588
+ * RGB/eDP/HDMI: if_pixclk >= dclk_core
+ * DP: dp_pixclk = dclk_out <= dclk_core
+ * DSI: mipi_pixclk <= dclk_out <= dclk_core
+ */
+static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
+ int *dclk_core_div, int *dclk_out_div,
+ int *if_pixclk_div, int *if_dclk_div)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
+ int output_mode = vcstate->output_mode;
+ unsigned long v_pixclk = adjusted_mode->crtc_clock * 1000LL; /* video timing pixclk */
+ unsigned long dclk_core_rate = v_pixclk >> 2;
+ unsigned long dclk_rate = v_pixclk;
+ unsigned long dclk_out_rate;
+ unsigned long if_pixclk_rate;
+ int K = 1;
+
+ if (vop2_output_if_is_hdmi(id)) {
+ /*
+ * K = 2: dclk_core = if_pixclk_rate > if_dclk_rate
+ * K = 1: dclk_core = hdmie_edp_dclk > if_pixclk_rate
+ */
+ if (output_mode == ROCKCHIP_OUT_MODE_YUV420) {
+ dclk_rate = dclk_rate >> 1;
+ K = 2;
+ }
+
+ /*
+ * if_pixclk_rate = (dclk_core_rate << 1) / K;
+ * if_dclk_rate = dclk_core_rate / K;
+ * *if_pixclk_div = dclk_rate / if_pixclk_rate;
+ * *if_dclk_div = dclk_rate / if_dclk_rate;
+ */
+ *if_pixclk_div = 2;
+ *if_dclk_div = 4;
+ } else if (vop2_output_if_is_edp(id)) {
+ /*
+ * edp_pixclk = edp_dclk > dclk_core
+ */
+ if_pixclk_rate = v_pixclk / K;
+ dclk_rate = if_pixclk_rate * K;
+ /*
+ * *if_pixclk_div = dclk_rate / if_pixclk_rate;
+ * *if_dclk_div = *if_pixclk_div;
+ */
+ *if_pixclk_div = K;
+ *if_dclk_div = K;
+ } else if (vop2_output_if_is_dp(id)) {
+ if (output_mode == ROCKCHIP_OUT_MODE_YUV420)
+ dclk_out_rate = v_pixclk >> 3;
+ else
+ dclk_out_rate = v_pixclk >> 2;
+
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
+ if (!dclk_rate) {
+ drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld Hz\n",
+ dclk_out_rate);
+ return 0;
+ }
+ *dclk_out_div = dclk_rate / dclk_out_rate;
+ } else if (vop2_output_if_is_mipi(id)) {
+ if_pixclk_rate = dclk_core_rate / K;
+ /*
+ * dclk_core = dclk_out * K = if_pixclk * K = v_pixclk / 4
+ */
+ dclk_out_rate = if_pixclk_rate;
+ /*
+ * dclk_rate = N * dclk_core_rate N = (1,2,4 ),
+ * we get a little factor here
+ */
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
+ if (!dclk_rate) {
+ drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld Hz\n",
+ dclk_out_rate);
+ return 0;
+ }
+ *dclk_out_div = dclk_rate / dclk_out_rate;
+ /*
+ * mipi pixclk == dclk_out
+ */
+ *if_pixclk_div = 1;
+ } else if (vop2_output_if_is_dpi(id)) {
+ dclk_rate = v_pixclk;
+ }
+
+ *dclk_core_div = dclk_rate / dclk_core_rate;
+ *if_pixclk_div = ilog2(*if_pixclk_div);
+ *if_dclk_div = ilog2(*if_dclk_div);
+ *dclk_core_div = ilog2(*dclk_core_div);
+ *dclk_out_div = ilog2(*dclk_out_div);
+
+ drm_dbg(vop2->drm, "dclk: %ld, pixclk_div: %d, dclk_div: %d\n",
+ dclk_rate, *if_pixclk_div, *if_dclk_div);
+
+ return dclk_rate;
+}
+
+/*
+ * MIPI port mux on rk3588:
+ * 0: Video Port2
+ * 1: Video Port3
+ * 3: Video Port 1(MIPI1 only)
+ */
+static u32 rk3588_get_mipi_port_mux(int vp_id)
+{
+ if (vp_id == 1)
+ return 3;
+ else if (vp_id == 3)
+ return 1;
+ else
+ return 0;
+}
+
+static u32 rk3588_get_hdmi_pol(u32 flags)
+{
+ u32 val;
+
+ val = (flags & DRM_MODE_FLAG_NHSYNC) ? BIT(HSYNC_POSITIVE) : 0;
+ val |= (flags & DRM_MODE_FLAG_NVSYNC) ? BIT(VSYNC_POSITIVE) : 0;
+
+ return val;
+}
+
+static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
+{
+ struct vop2 *vop2 = vp->vop2;
+ int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_div;
+ unsigned long clock;
+ u32 die, dip, div, vp_clk_div, val;
+
+ clock = rk3588_calc_cru_cfg(vp, id, &dclk_core_div, &dclk_out_div,
+ &if_pixclk_div, &if_dclk_div);
+ if (!clock)
+ return 0;
+
+ vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div);
+ vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div);
+
+ die = vop2_readl(vop2, RK3568_DSP_IF_EN);
+ dip = vop2_readl(vop2, RK3568_DSP_IF_POL);
+ div = vop2_readl(vop2, RK3568_DSP_IF_CTRL);
+
+ switch (id) {
+ case ROCKCHIP_VOP2_EP_HDMI0:
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_HDMI0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
+ val = rk3588_get_hdmi_pol(polflags);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 1, 1));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 6, 5));
+ break;
+ case ROCKCHIP_VOP2_EP_HDMI1:
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_HDMI1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
+ val = rk3588_get_hdmi_pol(polflags);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 4, 4));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 8, 7));
+ break;
+ case ROCKCHIP_VOP2_EP_EDP0:
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_EDP0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 0, 0));
+ break;
+ case ROCKCHIP_VOP2_EP_EDP1:
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_EDP1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 3, 3));
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI0:
+ div &= ~RK3588_DSP_IF_MIPI0_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_MIPI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX;
+ val = rk3588_get_mipi_port_mux(vp->id);
+ die |= RK3588_SYS_DSP_INFACE_EN_MIPI0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX, !!val);
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI1:
+ div &= ~RK3588_DSP_IF_MIPI1_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_MIPI1_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
+ val = rk3588_get_mipi_port_mux(vp->id);
+ die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, val);
+ break;
+ case ROCKCHIP_VOP2_EP_DP0:
+ die &= ~RK3588_SYS_DSP_INFACE_EN_DP0_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_DP0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP0_MUX, vp->id);
+ dip &= ~RK3588_DSP_IF_POL__DP0_PIN_POL;
+ dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_DP1:
+ die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
+ dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL;
+ dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags);
+ break;
+ default:
+ drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
+ return 0;
+ }
+
+ dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
+
+ vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div);
+ vop2_writel(vop2, RK3568_DSP_IF_EN, die);
+ vop2_writel(vop2, RK3568_DSP_IF_CTRL, div);
+ vop2_writel(vop2, RK3568_DSP_IF_POL, dip);
+
+ return clock;
+}
+
+static bool is_opaque(u16 alpha)
+{
+ return (alpha >> 8) == 0xff;
+}
+
+static void vop2_parse_alpha(struct vop2_alpha_config *alpha_config,
+ struct vop2_alpha *alpha)
+{
+ int src_glb_alpha_en = is_opaque(alpha_config->src_glb_alpha_value) ? 0 : 1;
+ int dst_glb_alpha_en = is_opaque(alpha_config->dst_glb_alpha_value) ? 0 : 1;
+ int src_color_mode = alpha_config->src_premulti_en ?
+ ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL;
+ int dst_color_mode = alpha_config->dst_premulti_en ?
+ ALPHA_SRC_PRE_MUL : ALPHA_SRC_NO_PRE_MUL;
+
+ alpha->src_color_ctrl.val = 0;
+ alpha->dst_color_ctrl.val = 0;
+ alpha->src_alpha_ctrl.val = 0;
+ alpha->dst_alpha_ctrl.val = 0;
+
+ if (!alpha_config->src_pixel_alpha_en)
+ alpha->src_color_ctrl.bits.blend_mode = ALPHA_GLOBAL;
+ else if (alpha_config->src_pixel_alpha_en && !src_glb_alpha_en)
+ alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX;
+ else
+ alpha->src_color_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL;
+
+ alpha->src_color_ctrl.bits.alpha_en = 1;
+
+ if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_GLOBAL) {
+ alpha->src_color_ctrl.bits.color_mode = src_color_mode;
+ alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL;
+ } else if (alpha->src_color_ctrl.bits.blend_mode == ALPHA_PER_PIX) {
+ alpha->src_color_ctrl.bits.color_mode = src_color_mode;
+ alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_ONE;
+ } else {
+ alpha->src_color_ctrl.bits.color_mode = ALPHA_SRC_PRE_MUL;
+ alpha->src_color_ctrl.bits.factor_mode = SRC_FAC_ALPHA_SRC_GLOBAL;
+ }
+ alpha->src_color_ctrl.bits.glb_alpha = alpha_config->src_glb_alpha_value >> 8;
+ alpha->src_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
+ alpha->src_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
+
+ alpha->dst_color_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
+ alpha->dst_color_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
+ alpha->dst_color_ctrl.bits.blend_mode = ALPHA_GLOBAL;
+ alpha->dst_color_ctrl.bits.glb_alpha = alpha_config->dst_glb_alpha_value >> 8;
+ alpha->dst_color_ctrl.bits.color_mode = dst_color_mode;
+ alpha->dst_color_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE;
+
+ alpha->src_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
+ alpha->src_alpha_ctrl.bits.blend_mode = alpha->src_color_ctrl.bits.blend_mode;
+ alpha->src_alpha_ctrl.bits.alpha_cal_mode = ALPHA_SATURATION;
+ alpha->src_alpha_ctrl.bits.factor_mode = ALPHA_ONE;
+
+ alpha->dst_alpha_ctrl.bits.alpha_mode = ALPHA_STRAIGHT;
+ if (alpha_config->dst_pixel_alpha_en && !dst_glb_alpha_en)
+ alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX;
+ else
+ alpha->dst_alpha_ctrl.bits.blend_mode = ALPHA_PER_PIX_GLOBAL;
+ alpha->dst_alpha_ctrl.bits.alpha_cal_mode = ALPHA_NO_SATURATION;
+ alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE;
+}
+
+static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id)
+{
+ struct vop2_video_port *vp;
+ int used_layer = 0;
+ int i;
+
+ for (i = 0; i < port_id; i++) {
+ vp = &vop2->vps[i];
+ used_layer += hweight32(vp->win_mask);
+ }
+
+ return used_layer;
+}
+
+static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win)
+{
+ struct vop2_alpha_config alpha_config;
+ struct vop2_alpha alpha;
+ struct drm_plane_state *bottom_win_pstate;
+ bool src_pixel_alpha_en = false;
+ u16 src_glb_alpha_val, dst_glb_alpha_val;
+ u32 src_color_ctrl_reg, dst_color_ctrl_reg, src_alpha_ctrl_reg, dst_alpha_ctrl_reg;
+ u32 offset = 0;
+ bool premulti_en = false;
+ bool swap = false;
+
+ /* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */
+ bottom_win_pstate = main_win->base.state;
+ src_glb_alpha_val = 0;
+ dst_glb_alpha_val = main_win->base.state->alpha;
+
+ if (!bottom_win_pstate->fb)
+ return;
+
+ alpha_config.src_premulti_en = premulti_en;
+ alpha_config.dst_premulti_en = false;
+ alpha_config.src_pixel_alpha_en = src_pixel_alpha_en;
+ alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */
+ alpha_config.src_glb_alpha_value = src_glb_alpha_val;
+ alpha_config.dst_glb_alpha_value = dst_glb_alpha_val;
+ vop2_parse_alpha(&alpha_config, &alpha);
+
+ alpha.src_color_ctrl.bits.src_dst_swap = swap;
+
+ switch (main_win->data->phys_id) {
+ case ROCKCHIP_VOP2_CLUSTER0:
+ offset = 0x0;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER1:
+ offset = 0x10;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER2:
+ offset = 0x20;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER3:
+ offset = 0x30;
+ break;
+ }
+
+ if (vop2->version <= VOP_VERSION_RK3588) {
+ src_color_ctrl_reg = RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL;
+ dst_color_ctrl_reg = RK3568_CLUSTER0_MIX_DST_COLOR_CTRL;
+ src_alpha_ctrl_reg = RK3568_CLUSTER0_MIX_SRC_ALPHA_CTRL;
+ dst_alpha_ctrl_reg = RK3568_CLUSTER0_MIX_DST_ALPHA_CTRL;
+ } else {
+ src_color_ctrl_reg = RK3576_CLUSTER0_MIX_SRC_COLOR_CTRL;
+ dst_color_ctrl_reg = RK3576_CLUSTER0_MIX_DST_COLOR_CTRL;
+ src_alpha_ctrl_reg = RK3576_CLUSTER0_MIX_SRC_ALPHA_CTRL;
+ dst_alpha_ctrl_reg = RK3576_CLUSTER0_MIX_DST_ALPHA_CTRL;
+ }
+
+ vop2_writel(vop2, src_color_ctrl_reg + offset, alpha.src_color_ctrl.val);
+ vop2_writel(vop2, dst_color_ctrl_reg + offset, alpha.dst_color_ctrl.val);
+ vop2_writel(vop2, src_alpha_ctrl_reg + offset, alpha.src_alpha_ctrl.val);
+ vop2_writel(vop2, dst_alpha_ctrl_reg + offset, alpha.dst_alpha_ctrl.val);
+}
+
+static void vop2_setup_alpha(struct vop2_video_port *vp)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_framebuffer *fb;
+ struct vop2_alpha_config alpha_config;
+ struct vop2_alpha alpha;
+ struct drm_plane *plane;
+ int pixel_alpha_en;
+ int premulti_en, gpremulti_en = 0;
+ int mixer_id;
+ u32 src_color_ctrl_reg, dst_color_ctrl_reg, src_alpha_ctrl_reg, dst_alpha_ctrl_reg;
+ u32 offset;
+ bool bottom_layer_alpha_en = false;
+ u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE;
+
+ if (vop2->version <= VOP_VERSION_RK3588)
+ mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id);
+ else
+ mixer_id = 0;
+
+ alpha_config.dst_pixel_alpha_en = true; /* alpha value need transfer to next mix */
+
+ drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
+ struct vop2_win *win = to_vop2_win(plane);
+
+ if (plane->state->normalized_zpos == 0 &&
+ !is_opaque(plane->state->alpha) &&
+ !vop2_cluster_window(win)) {
+ /*
+ * If bottom layer have global alpha effect [except cluster layer,
+ * because cluster have deal with bottom layer global alpha value
+ * at cluster mix], bottom layer mix need deal with global alpha.
+ */
+ bottom_layer_alpha_en = true;
+ dst_global_alpha = plane->state->alpha;
+ }
+ }
+
+ if (vop2->version <= VOP_VERSION_RK3588) {
+ src_color_ctrl_reg = RK3568_MIX0_SRC_COLOR_CTRL;
+ dst_color_ctrl_reg = RK3568_MIX0_DST_COLOR_CTRL;
+ src_alpha_ctrl_reg = RK3568_MIX0_SRC_ALPHA_CTRL;
+ dst_alpha_ctrl_reg = RK3568_MIX0_DST_ALPHA_CTRL;
+ } else {
+ src_color_ctrl_reg = RK3576_OVL_MIX0_SRC_COLOR_CTRL(vp->id);
+ dst_color_ctrl_reg = RK3576_OVL_MIX0_DST_COLOR_CTRL(vp->id);
+ src_alpha_ctrl_reg = RK3576_OVL_MIX0_SRC_ALPHA_CTRL(vp->id);
+ dst_alpha_ctrl_reg = RK3576_OVL_MIX0_DST_ALPHA_CTRL(vp->id);
+ }
+
+ drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
+ struct vop2_win *win = to_vop2_win(plane);
+ int zpos = plane->state->normalized_zpos;
+
+ /*
+ * Need to configure alpha from second layer.
+ */
+ if (zpos == 0)
+ continue;
+
+ if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ premulti_en = 1;
+ else
+ premulti_en = 0;
+
+ plane = &win->base;
+ fb = plane->state->fb;
+
+ pixel_alpha_en = fb->format->has_alpha;
+
+ alpha_config.src_premulti_en = premulti_en;
+
+ if (bottom_layer_alpha_en && zpos == 1) {
+ gpremulti_en = premulti_en;
+ /* Cd = Cs + (1 - As) * Cd * Agd */
+ alpha_config.dst_premulti_en = false;
+ alpha_config.src_pixel_alpha_en = pixel_alpha_en;
+ alpha_config.src_glb_alpha_value = plane->state->alpha;
+ alpha_config.dst_glb_alpha_value = dst_global_alpha;
+ } else if (vop2_cluster_window(win)) {
+ /* Mix output data only have pixel alpha */
+ alpha_config.dst_premulti_en = true;
+ alpha_config.src_pixel_alpha_en = true;
+ alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
+ alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
+ } else {
+ /* Cd = Cs + (1 - As) * Cd */
+ alpha_config.dst_premulti_en = true;
+ alpha_config.src_pixel_alpha_en = pixel_alpha_en;
+ alpha_config.src_glb_alpha_value = plane->state->alpha;
+ alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
+ }
+
+ vop2_parse_alpha(&alpha_config, &alpha);
+
+ offset = (mixer_id + zpos - 1) * 0x10;
+
+ vop2_writel(vop2, src_color_ctrl_reg + offset, alpha.src_color_ctrl.val);
+ vop2_writel(vop2, dst_color_ctrl_reg + offset, alpha.dst_color_ctrl.val);
+ vop2_writel(vop2, src_alpha_ctrl_reg + offset, alpha.src_alpha_ctrl.val);
+ vop2_writel(vop2, dst_alpha_ctrl_reg + offset, alpha.dst_alpha_ctrl.val);
+ }
+
+ if (vp->id == 0) {
+ if (vop2->version <= VOP_VERSION_RK3588) {
+ src_color_ctrl_reg = RK3568_HDR0_SRC_COLOR_CTRL;
+ dst_color_ctrl_reg = RK3568_HDR0_DST_COLOR_CTRL;
+ src_alpha_ctrl_reg = RK3568_HDR0_SRC_ALPHA_CTRL;
+ dst_alpha_ctrl_reg = RK3568_HDR0_DST_ALPHA_CTRL;
+ } else {
+ src_color_ctrl_reg = RK3576_OVL_HDR_SRC_COLOR_CTRL(vp->id);
+ dst_color_ctrl_reg = RK3576_OVL_HDR_DST_COLOR_CTRL(vp->id);
+ src_alpha_ctrl_reg = RK3576_OVL_HDR_SRC_ALPHA_CTRL(vp->id);
+ dst_alpha_ctrl_reg = RK3576_OVL_HDR_DST_ALPHA_CTRL(vp->id);
+ }
+
+ if (bottom_layer_alpha_en) {
+ /* Transfer pixel alpha to hdr mix */
+ alpha_config.src_premulti_en = gpremulti_en;
+ alpha_config.dst_premulti_en = true;
+ alpha_config.src_pixel_alpha_en = true;
+ alpha_config.src_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
+ alpha_config.dst_glb_alpha_value = DRM_BLEND_ALPHA_OPAQUE;
+
+ vop2_parse_alpha(&alpha_config, &alpha);
+
+ vop2_writel(vop2, src_color_ctrl_reg, alpha.src_color_ctrl.val);
+ vop2_writel(vop2, dst_color_ctrl_reg, alpha.dst_color_ctrl.val);
+ vop2_writel(vop2, src_alpha_ctrl_reg, alpha.src_alpha_ctrl.val);
+ vop2_writel(vop2, dst_alpha_ctrl_reg, alpha.dst_alpha_ctrl.val);
+ } else {
+ vop2_writel(vop2, src_color_ctrl_reg, 0);
+ }
+ }
+}
+
+static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_plane *plane;
+ u32 layer_sel = 0;
+ u32 port_sel;
+ u8 layer_id;
+ u8 old_layer_id;
+ u8 layer_sel_id;
+ unsigned int ofs;
+ u32 ovl_ctrl;
+ int i;
+ struct vop2_video_port *vp0 = &vop2->vps[0];
+ struct vop2_video_port *vp1 = &vop2->vps[1];
+ struct vop2_video_port *vp2 = &vop2->vps[2];
+ struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
+
+ ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL);
+ ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
+ if (vcstate->yuv_overlay)
+ ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id);
+ else
+ ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id);
+
+ vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl);
+
+ port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL);
+ port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT;
+
+ if (vp0->nlayers)
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX,
+ vp0->nlayers - 1);
+ else
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT0_MUX, 8);
+
+ if (vp1->nlayers)
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX,
+ (vp0->nlayers + vp1->nlayers - 1));
+ else
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 8);
+
+ if (vp2->nlayers)
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX,
+ (vp2->nlayers + vp1->nlayers + vp0->nlayers - 1));
+ else
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8);
+
+ layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+
+ ofs = 0;
+ for (i = 0; i < vp->id; i++)
+ ofs += vop2->vps[i].nlayers;
+
+ drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
+ struct vop2_win *win = to_vop2_win(plane);
+ struct vop2_win *old_win;
+
+ layer_id = (u8)(plane->state->normalized_zpos + ofs);
+ /*
+ * Find the layer this win bind in old state.
+ */
+ for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) {
+ layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf;
+ if (layer_sel_id == win->data->layer_sel_id[vp->id])
+ break;
+ }
+
+ /*
+ * Find the win bind to this layer in old state
+ */
+ for (i = 0; i < vop2->data->win_size; i++) {
+ old_win = &vop2->win[i];
+ layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf;
+ if (layer_sel_id == old_win->data->layer_sel_id[vp->id])
+ break;
+ }
+
+ switch (win->data->phys_id) {
+ case ROCKCHIP_VOP2_CLUSTER0:
+ port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER0;
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER0, vp->id);
+ break;
+ case ROCKCHIP_VOP2_CLUSTER1:
+ port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER1;
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER1, vp->id);
+ break;
+ case ROCKCHIP_VOP2_CLUSTER2:
+ port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER2;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER2, vp->id);
+ break;
+ case ROCKCHIP_VOP2_CLUSTER3:
+ port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER3;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER3, vp->id);
+ break;
+ case ROCKCHIP_VOP2_ESMART0:
+ port_sel &= ~RK3568_OVL_PORT_SEL__ESMART0;
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART0, vp->id);
+ break;
+ case ROCKCHIP_VOP2_ESMART1:
+ port_sel &= ~RK3568_OVL_PORT_SEL__ESMART1;
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART1, vp->id);
+ break;
+ case ROCKCHIP_VOP2_ESMART2:
+ port_sel &= ~RK3588_OVL_PORT_SEL__ESMART2;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART2, vp->id);
+ break;
+ case ROCKCHIP_VOP2_ESMART3:
+ port_sel &= ~RK3588_OVL_PORT_SEL__ESMART3;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART3, vp->id);
+ break;
+ case ROCKCHIP_VOP2_SMART0:
+ port_sel &= ~RK3568_OVL_PORT_SEL__SMART0;
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART0, vp->id);
+ break;
+ case ROCKCHIP_VOP2_SMART1:
+ port_sel &= ~RK3568_OVL_PORT_SEL__SMART1;
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART1, vp->id);
+ break;
+ }
+
+ layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7);
+ layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id[vp->id]);
+ /*
+ * When we bind a window from layerM to layerN, we also need to move the old
+ * window on layerN to layerM to avoid one window selected by two or more layers.
+ */
+ layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7);
+ layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id,
+ old_win->data->layer_sel_id[vp->id]);
+ }
+
+ vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
+ vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel);
+}
+
+static void rk3568_vop2_setup_dly_for_windows(struct vop2_video_port *vp)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct vop2_win *win;
+ int i = 0;
+ u32 cdly = 0, sdly = 0;
+
+ for (i = 0; i < vop2->data->win_size; i++) {
+ u32 dly;
+
+ win = &vop2->win[i];
+ dly = win->delay;
+
+ switch (win->data->phys_id) {
+ case ROCKCHIP_VOP2_CLUSTER0:
+ cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_0, dly);
+ cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER0_1, dly);
+ break;
+ case ROCKCHIP_VOP2_CLUSTER1:
+ cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_0, dly);
+ cdly |= FIELD_PREP(RK3568_CLUSTER_DLY_NUM__CLUSTER1_1, dly);
+ break;
+ case ROCKCHIP_VOP2_ESMART0:
+ sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART0, dly);
+ break;
+ case ROCKCHIP_VOP2_ESMART1:
+ sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly);
+ break;
+ case ROCKCHIP_VOP2_SMART0:
+ case ROCKCHIP_VOP2_ESMART2:
+ sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly);
+ break;
+ case ROCKCHIP_VOP2_SMART1:
+ case ROCKCHIP_VOP2_ESMART3:
+ sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly);
+ break;
+ }
+ }
+
+ vop2_writel(vop2, RK3568_CLUSTER_DLY_NUM, cdly);
+ vop2_writel(vop2, RK3568_SMART_DLY_NUM, sdly);
+}
+
+static void rk3568_vop2_setup_overlay(struct vop2_video_port *vp)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_plane *plane;
+
+ vp->win_mask = 0;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ struct vop2_win *win = to_vop2_win(plane);
+
+ win->delay = win->data->dly[VOP2_DLY_MODE_DEFAULT];
+
+ vp->win_mask |= BIT(win->data->phys_id);
+
+ if (vop2_cluster_window(win))
+ vop2_setup_cluster_alpha(vop2, win);
+ }
+
+ if (!vp->win_mask)
+ return;
+
+ rk3568_vop2_setup_layer_mixer(vp);
+ vop2_setup_alpha(vp);
+ rk3568_vop2_setup_dly_for_windows(vp);
+}
+
+static void rk3576_vop2_setup_layer_mixer(struct vop2_video_port *vp)
+{
+ struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_plane *plane;
+ u32 layer_sel = 0xffff; /* 0xf means this layer is disabled */
+ u32 ovl_ctrl;
+
+ ovl_ctrl = vop2_readl(vop2, RK3576_OVL_CTRL(vp->id));
+ if (vcstate->yuv_overlay)
+ ovl_ctrl |= RK3576_OVL_CTRL__YUV_MODE;
+ else
+ ovl_ctrl &= ~RK3576_OVL_CTRL__YUV_MODE;
+
+ vop2_writel(vop2, RK3576_OVL_CTRL(vp->id), ovl_ctrl);
+
+ drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
+ struct vop2_win *win = to_vop2_win(plane);
+
+ layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos,
+ 0xf);
+ layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos,
+ win->data->layer_sel_id[vp->id]);
+ }
+
+ vop2_writel(vop2, RK3576_OVL_LAYER_SEL(vp->id), layer_sel);
+}
+
+static void rk3576_vop2_setup_dly_for_windows(struct vop2_video_port *vp)
+{
+ struct drm_plane *plane;
+ struct vop2_win *win;
+
+ drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
+ win = to_vop2_win(plane);
+ vop2_win_write(win, VOP2_WIN_DLY_NUM, 0);
+ }
+}
+
+static void rk3576_vop2_setup_overlay(struct vop2_video_port *vp)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_plane *plane;
+
+ vp->win_mask = 0;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ struct vop2_win *win = to_vop2_win(plane);
+
+ win->delay = win->data->dly[VOP2_DLY_MODE_DEFAULT];
+ vp->win_mask |= BIT(win->data->phys_id);
+
+ if (vop2_cluster_window(win))
+ vop2_setup_cluster_alpha(vop2, win);
+ }
+
+ if (!vp->win_mask)
+ return;
+
+ rk3576_vop2_setup_layer_mixer(vp);
+ vop2_setup_alpha(vp);
+ rk3576_vop2_setup_dly_for_windows(vp);
+}
+
+static void rk3568_vop2_setup_bg_dly(struct vop2_video_port *vp)
+{
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ u16 hdisplay = mode->crtc_hdisplay;
+ u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ u32 bg_dly;
+ u32 pre_scan_dly;
+
+ bg_dly = vp->data->pre_scan_max_dly[3];
+ vop2_writel(vp->vop2, RK3568_VP_BG_MIX_CTRL(vp->id),
+ FIELD_PREP(RK3568_VP_BG_MIX_CTRL__BG_DLY, bg_dly));
+
+ pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len;
+ vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly);
+}
+
+static void rk3576_vop2_setup_bg_dly(struct vop2_video_port *vp)
+{
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ u16 hdisplay = mode->crtc_hdisplay;
+ u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ u32 bg_dly;
+ u32 pre_scan_dly;
+
+ bg_dly = vp->data->pre_scan_max_dly[VOP2_DLY_WIN] +
+ vp->data->pre_scan_max_dly[VOP2_DLY_LAYER_MIX] +
+ vp->data->pre_scan_max_dly[VOP2_DLY_HDR_MIX];
+
+ vop2_writel(vp->vop2, RK3576_OVL_BG_MIX_CTRL(vp->id),
+ FIELD_PREP(RK3576_OVL_BG_MIX_CTRL__BG_DLY, bg_dly));
+
+ pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len;
+ vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly);
+}
+
+static const struct vop2_ops rk3568_vop_ops = {
+ .setup_intf_mux = rk3568_set_intf_mux,
+ .setup_bg_dly = rk3568_vop2_setup_bg_dly,
+ .setup_overlay = rk3568_vop2_setup_overlay,
+};
+
+static const struct vop2_ops rk3576_vop_ops = {
+ .setup_intf_mux = rk3576_set_intf_mux,
+ .setup_bg_dly = rk3576_vop2_setup_bg_dly,
+ .setup_overlay = rk3576_vop2_setup_overlay,
+};
+
+static const struct vop2_ops rk3588_vop_ops = {
+ .setup_intf_mux = rk3588_set_intf_mux,
+ .setup_bg_dly = rk3568_vop2_setup_bg_dly,
+ .setup_overlay = rk3568_vop2_setup_overlay,
+};
+
static const struct vop2_data rk3566_vop = {
+ .version = VOP_VERSION_RK3568,
.feature = VOP2_FEATURE_HAS_SYS_GRF,
.nr_vps = 3,
.max_input = { 4096, 2304 },
@@ -655,12 +2386,18 @@ static const struct vop2_data rk3566_vop = {
.vp = rk3568_vop_video_ports,
.win = rk3568_vop_win_data,
.win_size = ARRAY_SIZE(rk3568_vop_win_data),
+ .cluster_reg = rk3568_vop_cluster_regs,
+ .nr_cluster_regs = ARRAY_SIZE(rk3568_vop_cluster_regs),
+ .smart_reg = rk3568_vop_smart_regs,
+ .nr_smart_regs = ARRAY_SIZE(rk3568_vop_smart_regs),
.regs_dump = rk3568_regs_dump,
.regs_dump_size = ARRAY_SIZE(rk3568_regs_dump),
+ .ops = &rk3568_vop_ops,
.soc_id = 3566,
};
static const struct vop2_data rk3568_vop = {
+ .version = VOP_VERSION_RK3568,
.feature = VOP2_FEATURE_HAS_SYS_GRF,
.nr_vps = 3,
.max_input = { 4096, 2304 },
@@ -668,12 +2405,37 @@ static const struct vop2_data rk3568_vop = {
.vp = rk3568_vop_video_ports,
.win = rk3568_vop_win_data,
.win_size = ARRAY_SIZE(rk3568_vop_win_data),
+ .cluster_reg = rk3568_vop_cluster_regs,
+ .nr_cluster_regs = ARRAY_SIZE(rk3568_vop_cluster_regs),
+ .smart_reg = rk3568_vop_smart_regs,
+ .nr_smart_regs = ARRAY_SIZE(rk3568_vop_smart_regs),
.regs_dump = rk3568_regs_dump,
.regs_dump_size = ARRAY_SIZE(rk3568_regs_dump),
+ .ops = &rk3568_vop_ops,
.soc_id = 3568,
};
+static const struct vop2_data rk3576_vop = {
+ .version = VOP_VERSION_RK3576,
+ .feature = VOP2_FEATURE_HAS_SYS_PMU,
+ .nr_vps = 3,
+ .max_input = { 4096, 4320 },
+ .max_output = { 4096, 4320 },
+ .vp = rk3576_vop_video_ports,
+ .win = rk3576_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3576_vop_win_data),
+ .cluster_reg = rk3576_vop_cluster_regs,
+ .nr_cluster_regs = ARRAY_SIZE(rk3576_vop_cluster_regs),
+ .smart_reg = rk3576_vop_smart_regs,
+ .nr_smart_regs = ARRAY_SIZE(rk3576_vop_smart_regs),
+ .regs_dump = rk3576_regs_dump,
+ .regs_dump_size = ARRAY_SIZE(rk3576_regs_dump),
+ .ops = &rk3576_vop_ops,
+ .soc_id = 3576,
+};
+
static const struct vop2_data rk3588_vop = {
+ .version = VOP_VERSION_RK3588,
.feature = VOP2_FEATURE_HAS_SYS_GRF | VOP2_FEATURE_HAS_VO1_GRF |
VOP2_FEATURE_HAS_VOP_GRF | VOP2_FEATURE_HAS_SYS_PMU,
.nr_vps = 4,
@@ -682,8 +2444,13 @@ static const struct vop2_data rk3588_vop = {
.vp = rk3588_vop_video_ports,
.win = rk3588_vop_win_data,
.win_size = ARRAY_SIZE(rk3588_vop_win_data),
+ .cluster_reg = rk3568_vop_cluster_regs,
+ .nr_cluster_regs = ARRAY_SIZE(rk3568_vop_cluster_regs),
+ .smart_reg = rk3568_vop_smart_regs,
+ .nr_smart_regs = ARRAY_SIZE(rk3568_vop_smart_regs),
.regs_dump = rk3588_regs_dump,
.regs_dump_size = ARRAY_SIZE(rk3588_regs_dump),
+ .ops = &rk3588_vop_ops,
.soc_id = 3588,
};
@@ -695,6 +2462,9 @@ static const struct of_device_id vop2_dt_match[] = {
.compatible = "rockchip,rk3568-vop",
.data = &rk3568_vop,
}, {
+ .compatible = "rockchip,rk3576-vop",
+ .data = &rk3576_vop,
+ }, {
.compatible = "rockchip,rk3588-vop",
.data = &rk3588_vop
}, {
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 69bcf0e99d57..bd39db7bb240 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -28,10 +28,9 @@
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
-#include "gpu_scheduler_trace.h"
+#include "sched_internal.h"
-#define to_drm_sched_job(sched_job) \
- container_of((sched_job), struct drm_sched_job, queue_node)
+#include "gpu_scheduler_trace.h"
/**
* drm_sched_entity_init - Init a context entity used by scheduler when
@@ -92,7 +91,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
* the lowest priority available.
*/
if (entity->priority >= sched_list[0]->num_rqs) {
- drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
+ dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n",
entity->priority, sched_list[0]->num_rqs);
entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
(s32) DRM_SCHED_PRIORITY_KERNEL);
@@ -152,18 +151,6 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
return false;
}
-/* Return true if entity could provide a job. */
-bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
-{
- if (spsc_queue_peek(&entity->job_queue) == NULL)
- return false;
-
- if (READ_ONCE(entity->dependency))
- return false;
-
- return true;
-}
-
/**
* drm_sched_entity_error - return error of last scheduled job
* @entity: scheduler entity to check
@@ -255,13 +242,20 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
/* The entity is guaranteed to not be used by the scheduler */
prev = rcu_dereference_check(entity->last_scheduled, true);
dma_fence_get(prev);
- while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
+ while ((job = drm_sched_entity_queue_pop(entity))) {
struct drm_sched_fence *s_fence = job->s_fence;
dma_fence_get(&s_fence->finished);
- if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
- drm_sched_entity_kill_jobs_cb))
+ if (!prev ||
+ dma_fence_add_callback(prev, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb)) {
+ /*
+ * Adding callback above failed.
+ * dma_fence_put() checks for NULL.
+ */
+ dma_fence_put(prev);
drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+ }
prev = &s_fence->finished;
}
@@ -477,7 +471,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
struct drm_sched_job *sched_job;
- sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ sched_job = drm_sched_entity_queue_peek(entity);
if (!sched_job)
return NULL;
@@ -513,7 +507,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
struct drm_sched_job *next;
- next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ next = drm_sched_entity_queue_peek(entity);
if (next) {
struct drm_sched_rq *rq;
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 0f35f009b9d3..e971528504a5 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -29,6 +29,8 @@
#include <drm/gpu_scheduler.h>
+#include "sched_internal.h"
+
static struct kmem_cache *sched_fence_slab;
static int __init drm_sched_fence_slab_init(void)
diff --git a/drivers/gpu/drm/scheduler/sched_internal.h b/drivers/gpu/drm/scheduler/sched_internal.h
new file mode 100644
index 000000000000..599cf6e1bb74
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/sched_internal.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _DRM_GPU_SCHEDULER_INTERNAL_H_
+#define _DRM_GPU_SCHEDULER_INTERNAL_H_
+
+
+/* Used to choose between FIFO and RR job-scheduling */
+extern int drm_sched_policy;
+
+#define DRM_SCHED_POLICY_RR 0
+#define DRM_SCHED_POLICY_FIFO 1
+
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+
+void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity);
+void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity);
+
+void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq, ktime_t ts);
+
+void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
+struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
+
+struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *s_entity,
+ void *owner);
+void drm_sched_fence_init(struct drm_sched_fence *fence,
+ struct drm_sched_entity *entity);
+void drm_sched_fence_free(struct drm_sched_fence *fence);
+
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
+ struct dma_fence *parent);
+void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
+
+/**
+ * drm_sched_entity_queue_pop - Low level helper for popping queued jobs
+ *
+ * @entity: scheduler entity
+ *
+ * Low level helper for popping queued jobs.
+ *
+ * Returns: The job dequeued or NULL.
+ */
+static inline struct drm_sched_job *
+drm_sched_entity_queue_pop(struct drm_sched_entity *entity)
+{
+ struct spsc_node *node;
+
+ node = spsc_queue_pop(&entity->job_queue);
+ if (!node)
+ return NULL;
+
+ return container_of(node, struct drm_sched_job, queue_node);
+}
+
+/**
+ * drm_sched_entity_queue_peek - Low level helper for peeking at the job queue
+ *
+ * @entity: scheduler entity
+ *
+ * Low level helper for peeking at the job queue
+ *
+ * Returns: The job at the head of the queue or NULL.
+ */
+static inline struct drm_sched_job *
+drm_sched_entity_queue_peek(struct drm_sched_entity *entity)
+{
+ struct spsc_node *node;
+
+ node = spsc_queue_peek(&entity->job_queue);
+ if (!node)
+ return NULL;
+
+ return container_of(node, struct drm_sched_job, queue_node);
+}
+
+/* Return true if entity could provide a job. */
+static inline bool
+drm_sched_entity_is_ready(struct drm_sched_entity *entity)
+{
+ if (!spsc_queue_count(&entity->job_queue))
+ return false;
+
+ if (READ_ONCE(entity->dependency))
+ return false;
+
+ return true;
+}
+
+#endif
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 57da84908752..bfea608a7106 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -64,12 +64,6 @@
* credit limit, the job won't be executed. Instead, the scheduler will wait
* until the credit count has decreased enough to not overflow its credit limit.
* This implies waiting for previously executed jobs.
- *
- * Optionally, drivers may register a callback (update_job_credits) provided by
- * struct drm_sched_backend_ops to update the job's credits dynamically. The
- * scheduler executes this callback every time the scheduler considers a job for
- * execution and subsequently checks whether the job fits the scheduler's credit
- * limit.
*/
#include <linux/wait.h>
@@ -84,6 +78,8 @@
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
+#include "sched_internal.h"
+
#define CREATE_TRACE_POINTS
#include "gpu_scheduler_trace.h"
@@ -93,9 +89,6 @@ static struct lockdep_map drm_sched_lockdep_map = {
};
#endif
-#define to_drm_sched_job(sched_job) \
- container_of((sched_job), struct drm_sched_job, queue_node)
-
int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
/**
@@ -109,9 +102,9 @@ static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
{
u32 credits;
- drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit,
- atomic_read(&sched->credit_count),
- &credits));
+ WARN_ON(check_sub_overflow(sched->credit_limit,
+ atomic_read(&sched->credit_count),
+ &credits));
return credits;
}
@@ -129,23 +122,18 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
{
struct drm_sched_job *s_job;
- s_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ s_job = drm_sched_entity_queue_peek(entity);
if (!s_job)
return false;
- if (sched->ops->update_job_credits) {
- s_job->credits = sched->ops->update_job_credits(s_job);
-
- drm_WARN(sched, !s_job->credits,
- "Jobs with zero credits bypass job-flow control.\n");
- }
-
/* If a job exceeds the credit limit, truncate it to the credit limit
* itself to guarantee forward progress.
*/
- if (drm_WARN(sched, s_job->credits > sched->credit_limit,
- "Jobs may not exceed the credit limit, truncate.\n"))
+ if (s_job->credits > sched->credit_limit) {
+ dev_WARN(sched->dev,
+ "Jobs may not exceed the credit limit, truncate.\n");
s_job->credits = sched->credit_limit;
+ }
return drm_sched_available_credits(sched) >= s_job->credits;
}
@@ -803,7 +791,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
* or worse--a blank screen--leave a trail in the
* logs, so this can be debugged easier.
*/
- drm_err(job->sched, "%s: entity has no rq!\n", __func__);
+ dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__);
return -ENOENT;
}
@@ -998,17 +986,42 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
/**
+ * drm_sched_job_has_dependency - check whether fence is the job's dependency
+ * @job: scheduler job to check
+ * @fence: fence to look for
+ *
+ * Returns:
+ * True if @fence is found within the job's dependencies, or otherwise false.
+ */
+bool drm_sched_job_has_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence)
+{
+ struct dma_fence *f;
+ unsigned long index;
+
+ xa_for_each(&job->dependencies, index, f) {
+ if (f == fence)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_sched_job_has_dependency);
+
+/**
* drm_sched_job_cleanup - clean up scheduler job resources
* @job: scheduler job to clean up
*
* Cleans up the resources allocated with drm_sched_job_init().
*
* Drivers should call this from their error unwind code if @job is aborted
- * before drm_sched_job_arm() is called.
+ * before it was submitted to an entity with drm_sched_entity_push_job().
*
- * After that point of no return @job is committed to be executed by the
- * scheduler, and this function should be called from the
- * &drm_sched_backend_ops.free_job callback.
+ * Since calling drm_sched_job_arm() causes the job's fences to be initialized,
+ * it is up to the driver to ensure that fences that were exposed to external
+ * parties get signaled. drm_sched_job_cleanup() does not ensure this.
+ *
+ * This function must also be called in &struct drm_sched_backend_ops.free_job
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
@@ -1019,7 +1032,7 @@ void drm_sched_job_cleanup(struct drm_sched_job *job)
/* drm_sched_job_arm() has been called */
dma_fence_put(&job->s_fence->finished);
} else {
- /* aborted job before committing to run it */
+ /* aborted job before arming */
drm_sched_fence_free(job->s_fence);
}
@@ -1166,9 +1179,6 @@ static void drm_sched_free_job_work(struct work_struct *w)
container_of(w, struct drm_gpu_scheduler, work_free_job);
struct drm_sched_job *job;
- if (READ_ONCE(sched->pause_submit))
- return;
-
job = drm_sched_get_finished_job(sched);
if (job)
sched->ops->free_job(job);
@@ -1192,9 +1202,6 @@ static void drm_sched_run_job_work(struct work_struct *w)
struct drm_sched_job *sched_job;
int r;
- if (READ_ONCE(sched->pause_submit))
- return;
-
/* Find entity with a ready job */
entity = drm_sched_select_entity(sched);
if (!entity)
@@ -1240,43 +1247,27 @@ static void drm_sched_run_job_work(struct work_struct *w)
* drm_sched_init - Init a gpu scheduler instance
*
* @sched: scheduler instance
- * @ops: backend operations for this scheduler
- * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
- * allocated and used
- * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
- * @credit_limit: the number of credits this scheduler can hold from all jobs
- * @hang_limit: number of times to allow a job to hang before dropping it
- * @timeout: timeout value in jiffies for the scheduler
- * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
- * used
- * @score: optional score atomic shared with other schedulers
- * @name: name used for debugging
- * @dev: target &struct device
+ * @args: scheduler initialization arguments
*
* Return 0 on success, otherwise error code.
*/
-int drm_sched_init(struct drm_gpu_scheduler *sched,
- const struct drm_sched_backend_ops *ops,
- struct workqueue_struct *submit_wq,
- u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
- long timeout, struct workqueue_struct *timeout_wq,
- atomic_t *score, const char *name, struct device *dev)
+int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args)
{
int i;
- sched->ops = ops;
- sched->credit_limit = credit_limit;
- sched->name = name;
- sched->timeout = timeout;
- sched->timeout_wq = timeout_wq ? : system_wq;
- sched->hang_limit = hang_limit;
- sched->score = score ? score : &sched->_score;
- sched->dev = dev;
+ sched->ops = args->ops;
+ sched->credit_limit = args->credit_limit;
+ sched->name = args->name;
+ sched->timeout = args->timeout;
+ sched->hang_limit = args->hang_limit;
+ sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq;
+ sched->score = args->score ? args->score : &sched->_score;
+ sched->dev = args->dev;
- if (num_rqs > DRM_SCHED_PRIORITY_COUNT) {
+ if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) {
/* This is a gross violation--tell drivers what the problem is.
*/
- drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
+ dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
__func__);
return -EINVAL;
} else if (sched->sched_rq) {
@@ -1284,20 +1275,20 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
* fine-tune their DRM calling order, and return all
* is good.
*/
- drm_warn(sched, "%s: scheduler already initialized!\n", __func__);
+ dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__);
return 0;
}
- if (submit_wq) {
- sched->submit_wq = submit_wq;
+ if (args->submit_wq) {
+ sched->submit_wq = args->submit_wq;
sched->own_submit_wq = false;
} else {
#ifdef CONFIG_LOCKDEP
- sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name,
+ sched->submit_wq = alloc_ordered_workqueue_lockdep_map(args->name,
WQ_MEM_RECLAIM,
&drm_sched_lockdep_map);
#else
- sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ sched->submit_wq = alloc_ordered_workqueue(args->name, WQ_MEM_RECLAIM);
#endif
if (!sched->submit_wq)
return -ENOMEM;
@@ -1305,11 +1296,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->own_submit_wq = true;
}
- sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
+ sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq),
GFP_KERNEL | __GFP_ZERO);
if (!sched->sched_rq)
goto Out_check_own;
- sched->num_rqs = num_rqs;
+ sched->num_rqs = args->num_rqs;
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
if (!sched->sched_rq[i])
@@ -1339,7 +1330,7 @@ Out_unroll:
Out_check_own:
if (sched->own_submit_wq)
destroy_workqueue(sched->submit_wq);
- drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
+ dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_sched_init);
diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c
index 08334be38694..7c935870f7d2 100644
--- a/drivers/gpu/drm/solomon/ssd130x-spi.c
+++ b/drivers/gpu/drm/solomon/ssd130x-spi.c
@@ -151,7 +151,6 @@ static const struct of_device_id ssd130x_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ssd130x_of_match);
-#if IS_MODULE(CONFIG_DRM_SSD130X_SPI)
/*
* The SPI core always reports a MODALIAS uevent of the form "spi:<dev>", even
* if the device was registered via OF. This means that the module will not be
@@ -160,7 +159,7 @@ MODULE_DEVICE_TABLE(of, ssd130x_of_match);
* To workaround this issue, add a SPI device ID table. Even when this should
* not be needed for this driver to match the registered SPI devices.
*/
-static const struct spi_device_id ssd130x_spi_table[] = {
+static const struct spi_device_id ssd130x_spi_id[] = {
/* ssd130x family */
{ "sh1106", SH1106_ID },
{ "ssd1305", SSD1305_ID },
@@ -175,14 +174,14 @@ static const struct spi_device_id ssd130x_spi_table[] = {
{ "ssd1331", SSD1331_ID },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(spi, ssd130x_spi_table);
-#endif
+MODULE_DEVICE_TABLE(spi, ssd130x_spi_id);
static struct spi_driver ssd130x_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = ssd130x_of_match,
},
+ .id_table = ssd130x_spi_id,
.probe = ssd130x_spi_probe,
.remove = ssd130x_spi_remove,
.shutdown = ssd130x_spi_shutdown,
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index b777690fd660..dd2006d51c7a 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -880,7 +880,7 @@ static int ssd132x_update_rect(struct ssd130x_device *ssd130x,
u8 n1 = buf[i * width + j];
u8 n2 = buf[i * width + j + 1];
- data_array[array_idx++] = (n2 << 4) | n1;
+ data_array[array_idx++] = (n2 & 0xf0) | (n1 >> 4);
}
}
@@ -1037,7 +1037,7 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
struct drm_format_conv_state *fmtcnv_state)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
- unsigned int dst_pitch = drm_rect_width(rect);
+ unsigned int dst_pitch;
struct iosys_map dst;
int ret = 0;
@@ -1046,6 +1046,8 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
rect->x2 = min_t(unsigned int, round_up(rect->x2, SSD132X_SEGMENT_WIDTH),
ssd130x->width);
+ dst_pitch = drm_rect_width(rect);
+
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index c6c2abaa1891..4dcddd02629b 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -349,7 +349,7 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
sti_dvo_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index b12863bea955..14fdc00d2ba0 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -280,12 +280,12 @@ static void hda_write(struct sti_hda *hda, u32 val, int offset)
*
* Return true if mode is found
*/
-static bool hda_get_mode_idx(struct drm_display_mode mode, int *idx)
+static bool hda_get_mode_idx(const struct drm_display_mode *mode, int *idx)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++)
- if (drm_mode_equal(&hda_supported_modes[i].mode, &mode)) {
+ if (drm_mode_equal(&hda_supported_modes[i].mode, mode)) {
*idx = i;
return true;
}
@@ -443,7 +443,7 @@ static void sti_hda_pre_enable(struct drm_bridge *bridge)
if (clk_prepare_enable(hda->clk_hddac))
DRM_ERROR("Failed to prepare/enable hda_hddac clk\n");
- if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
+ if (!hda_get_mode_idx(&hda->mode, &mode_idx)) {
DRM_ERROR("Undefined mode\n");
return;
}
@@ -526,7 +526,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge,
drm_mode_copy(&hda->mode, mode);
- if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
+ if (!hda_get_mode_idx(&hda->mode, &mode_idx)) {
DRM_ERROR("Undefined mode\n");
return;
}
@@ -603,7 +603,7 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
sti_hda_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
@@ -614,7 +614,7 @@ sti_hda_connector_mode_valid(struct drm_connector *connector,
= to_sti_hda_connector(connector);
struct sti_hda *hda = hda_connector->hda;
- if (!hda_get_mode_idx(*mode, &idx)) {
+ if (!hda_get_mode_idx(mode, &idx)) {
return MODE_BAD;
} else {
result = clk_round_rate(hda->clk_pix, target);
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index ca2fe17de4a5..164a34d793d8 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1011,7 +1011,7 @@ fail:
static enum drm_mode_status
sti_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 54a73753eff9..ba315c66a04d 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -1900,7 +1900,6 @@ int ltdc_load(struct drm_device *ddev)
struct drm_panel *panel;
struct drm_crtc *crtc;
struct reset_control *rstc;
- struct resource *res;
int irq, i, nb_endpoints;
int ret = -ENODEV;
@@ -1966,8 +1965,7 @@ int ltdc_load(struct drm_device *ddev)
reset_control_deassert(rstc);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ldev->regs = devm_ioremap_resource(dev, res);
+ ldev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ldev->regs)) {
DRM_ERROR("Unable to get ltdc registers\n");
ret = PTR_ERR(ldev->regs);
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index 06f2d7a56cc9..4613e8e3b8fd 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -980,9 +980,8 @@ static int lvds_attach(struct drm_bridge *bridge,
}
static void lvds_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = old_bridge_state->base.state;
struct stm_lvds *lvds = bridge_to_stm_lvds(bridge);
struct drm_connector_state *conn_state;
struct drm_connector *connector;
@@ -1017,7 +1016,7 @@ static void lvds_atomic_enable(struct drm_bridge *bridge,
}
static void lvds_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct stm_lvds *lvds = bridge_to_stm_lvds(bridge);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 430b2eededb2..798507a8ae56 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1025,7 +1025,8 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane,
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
}
-static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_atomic_state *state)
+static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_atomic_state *state,
+ bool flip)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_crtc_state *crtc_state;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 4a8cd9ed0a94..9bb077558167 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -812,7 +812,7 @@ static const struct drm_connector_funcs tegra_dsi_connector_funcs = {
static enum drm_mode_status
tegra_dsi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
return MODE_OK;
}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index e705f8590c13..8cd2969e7d4b 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1137,7 +1137,7 @@ static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
static enum drm_mode_status
tegra_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct tegra_output *output = connector_to_output(connector);
struct tegra_hdmi *hdmi = to_hdmi(output);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 802d2db7007a..f98f70eda906 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -1789,7 +1789,7 @@ static int tegra_sor_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
tegra_sor_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
return MODE_OK;
}
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 56dab563abd7..0109bcf7faa5 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST_HELPERS) += \
drm_kunit_helpers.o
obj-$(CONFIG_DRM_KUNIT_TEST) += \
+ drm_atomic_state_test.o \
drm_buddy_test.o \
drm_cmdline_parser_test.o \
drm_connector_test.o \
diff --git a/drivers/gpu/drm/tests/drm_atomic_state_test.c b/drivers/gpu/drm/tests/drm_atomic_state_test.c
new file mode 100644
index 000000000000..2f6ac7a09f44
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_atomic_state_test.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_atomic_state helpers
+ *
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_probe_helper.h>
+
+#define DRM_TEST_ENC_0 BIT(0)
+#define DRM_TEST_ENC_1 BIT(1)
+#define DRM_TEST_ENC_2 BIT(2)
+
+#define DRM_TEST_CONN_0 BIT(0)
+
+struct drm_clone_mode_test {
+ const char *name;
+ u32 encoder_mask;
+ int expected_result;
+};
+
+static const struct drm_display_mode drm_atomic_test_mode = {
+ DRM_MODE("1024x768", 0, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
+};
+
+struct drm_atomic_test_priv {
+ struct drm_device drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder encoders[3];
+ struct drm_connector connectors[2];
+};
+
+static int modeset_counter;
+
+static void drm_test_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ modeset_counter++;
+}
+
+static const struct drm_encoder_helper_funcs drm_atomic_test_encoder_funcs = {
+ .atomic_mode_set = drm_test_encoder_mode_set,
+};
+
+static const struct drm_connector_funcs dummy_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static int drm_atomic_test_dummy_get_modes(struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector,
+ &drm_atomic_test_mode);
+}
+
+static const struct drm_connector_helper_funcs dummy_connector_helper_funcs = {
+ .get_modes = drm_atomic_test_dummy_get_modes,
+};
+
+static struct drm_atomic_test_priv *
+drm_atomic_test_init_drm_components(struct kunit *test, bool has_connectors)
+{
+ struct drm_atomic_test_priv *priv;
+ struct drm_encoder *enc;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ priv = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct drm_atomic_test_priv,
+ drm,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+ test->priv = priv;
+
+ drm = &priv->drm;
+ priv->plane = drm_kunit_helper_create_primary_plane(test, drm,
+ NULL,
+ NULL,
+ NULL, 0,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->plane);
+
+ priv->crtc = drm_kunit_helper_create_crtc(test, drm,
+ priv->plane, NULL,
+ NULL,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->crtc);
+
+ for (int i = 0; i < ARRAY_SIZE(priv->encoders); i++) {
+ enc = &priv->encoders[i];
+
+ ret = drmm_encoder_init(drm, enc, NULL,
+ DRM_MODE_ENCODER_DSI, NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ enc->possible_crtcs = drm_crtc_mask(priv->crtc);
+ }
+
+ priv->encoders[0].possible_clones = DRM_TEST_ENC_0 | DRM_TEST_ENC_1;
+ priv->encoders[1].possible_clones = DRM_TEST_ENC_0 | DRM_TEST_ENC_1;
+ priv->encoders[2].possible_clones = DRM_TEST_ENC_2;
+
+ if (!has_connectors)
+ goto done;
+
+ BUILD_BUG_ON(ARRAY_SIZE(priv->connectors) > ARRAY_SIZE(priv->encoders));
+
+ for (int i = 0; i < ARRAY_SIZE(priv->connectors); i++) {
+ conn = &priv->connectors[i];
+
+ ret = drmm_connector_init(drm, conn, &dummy_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI, NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_connector_helper_add(conn, &dummy_connector_helper_funcs);
+ drm_encoder_helper_add(&priv->encoders[i],
+ &drm_atomic_test_encoder_funcs);
+
+ drm_connector_attach_encoder(conn, &priv->encoders[i]);
+ }
+
+done:
+ drm_mode_config_reset(drm);
+
+ return priv;
+}
+
+static int set_up_atomic_state(struct kunit *test,
+ struct drm_atomic_test_priv *priv,
+ struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_device *drm = &priv->drm;
+ struct drm_crtc *crtc = priv->crtc;
+ struct drm_atomic_state *state;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ if (connector) {
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ }
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, &drm_atomic_test_mode);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ crtc_state->enable = true;
+ crtc_state->active = true;
+
+ if (connector) {
+ ret = drm_atomic_commit(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ } else {
+ // dummy connector mask
+ crtc_state->connector_mask = DRM_TEST_CONN_0;
+ }
+
+ return 0;
+}
+
+/*
+ * Test that the DRM encoder mode_set() is called when the atomic state
+ * connectors are changed but the CRTC mode is not.
+ */
+static void drm_test_check_connector_changed_modeset(struct kunit *test)
+{
+ struct drm_atomic_test_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector *old_conn, *new_conn;
+ struct drm_atomic_state *state;
+ struct drm_device *drm;
+ struct drm_connector_state *new_conn_state, *old_conn_state;
+ int ret, initial_modeset_count;
+
+ priv = drm_atomic_test_init_drm_components(test, true);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ old_conn = &priv->connectors[0];
+ new_conn = &priv->connectors[1];
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ // first modeset to enable
+ ret = set_up_atomic_state(test, priv, old_conn, &ctx);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ new_conn_state = drm_atomic_get_connector_state(state, new_conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_conn_state);
+
+ old_conn_state = drm_atomic_get_connector_state(state, old_conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, old_conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(old_conn_state, NULL);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = drm_atomic_set_crtc_for_connector(new_conn_state, priv->crtc);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ initial_modeset_count = modeset_counter;
+
+ // modeset_disables is called as part of the atomic commit tail
+ ret = drm_atomic_commit(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_EQ(test, modeset_counter, initial_modeset_count + 1);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that the drm_crtc_in_clone_mode() helper can detect if a given CRTC
+ * state is in clone mode
+ */
+static void drm_test_check_in_clone_mode(struct kunit *test)
+{
+ bool ret;
+ const struct drm_clone_mode_test *param = test->param_value;
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = kunit_kzalloc(test, sizeof(*crtc_state), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, crtc_state);
+
+ crtc_state->encoder_mask = param->encoder_mask;
+
+ ret = drm_crtc_in_clone_mode(crtc_state);
+
+ KUNIT_ASSERT_EQ(test, ret, param->expected_result);
+}
+
+/*
+ * Test that the atomic commit path will succeed for valid clones (or non-cloned
+ * states) and fail for states where the cloned encoders are not possible_clones
+ * of each other.
+ */
+static void drm_test_check_valid_clones(struct kunit *test)
+{
+ int ret;
+ const struct drm_clone_mode_test *param = test->param_value;
+ struct drm_atomic_test_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_device *drm;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+
+ priv = drm_atomic_test_init_drm_components(test, false);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ ret = set_up_atomic_state(test, priv, NULL, &ctx);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ crtc_state = drm_atomic_get_crtc_state(state, priv->crtc);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ crtc_state->encoder_mask = param->encoder_mask;
+
+ // force modeset
+ crtc_state->mode_changed = true;
+
+ ret = drm_atomic_helper_check_modeset(drm, state);
+ KUNIT_ASSERT_EQ(test, ret, param->expected_result);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+static void drm_check_in_clone_mode_desc(const struct drm_clone_mode_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+static void drm_check_valid_clones_desc(const struct drm_clone_mode_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+static const struct drm_clone_mode_test drm_clone_mode_tests[] = {
+ {
+ .name = "in_clone_mode",
+ .encoder_mask = DRM_TEST_ENC_0 | DRM_TEST_ENC_1,
+ .expected_result = true,
+ },
+ {
+ .name = "not_in_clone_mode",
+ .encoder_mask = DRM_TEST_ENC_0,
+ .expected_result = false,
+ },
+};
+
+static const struct drm_clone_mode_test drm_valid_clone_mode_tests[] = {
+ {
+ .name = "not_in_clone_mode",
+ .encoder_mask = DRM_TEST_ENC_0,
+ .expected_result = 0,
+ },
+
+ {
+ .name = "valid_clone",
+ .encoder_mask = DRM_TEST_ENC_0 | DRM_TEST_ENC_1,
+ .expected_result = 0,
+ },
+ {
+ .name = "invalid_clone",
+ .encoder_mask = DRM_TEST_ENC_0 | DRM_TEST_ENC_2,
+ .expected_result = -EINVAL,
+ },
+};
+
+KUNIT_ARRAY_PARAM(drm_check_in_clone_mode, drm_clone_mode_tests,
+ drm_check_in_clone_mode_desc);
+
+KUNIT_ARRAY_PARAM(drm_check_valid_clones, drm_valid_clone_mode_tests,
+ drm_check_valid_clones_desc);
+
+static struct kunit_case drm_test_check_modeset_test[] = {
+ KUNIT_CASE(drm_test_check_connector_changed_modeset),
+ {}
+};
+
+static struct kunit_case drm_in_clone_mode_check_test[] = {
+ KUNIT_CASE_PARAM(drm_test_check_in_clone_mode,
+ drm_check_in_clone_mode_gen_params),
+ KUNIT_CASE_PARAM(drm_test_check_valid_clones,
+ drm_check_valid_clones_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_test_check_modeset_test_suite = {
+ .name = "drm_validate_modeset",
+ .test_cases = drm_test_check_modeset_test,
+};
+
+static struct kunit_suite drm_in_clone_mode_check_test_suite = {
+ .name = "drm_validate_clone_mode",
+ .test_cases = drm_in_clone_mode_check_test,
+};
+
+kunit_test_suites(&drm_in_clone_mode_check_test_suite,
+ &drm_test_check_modeset_test_suite);
+
+MODULE_AUTHOR("Jessica Zhang <quic_jesszhan@quicinc.com");
+MODULE_DESCRIPTION("Test cases for the drm_atomic_helper functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index 9662c949d0e3..7a0e523651f0 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -261,7 +261,6 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test)
static void drm_test_buddy_alloc_clear(struct kunit *test)
{
unsigned long n_pages, total, i = 0;
- DRM_RND_STATE(prng, random_seed);
const unsigned long ps = SZ_4K;
struct drm_buddy_block *block;
const int max_order = 12;
@@ -385,17 +384,28 @@ static void drm_test_buddy_alloc_clear(struct kunit *test)
drm_buddy_fini(&mm);
/*
- * Create a new mm with a non power-of-two size. Allocate a random size, free as
- * cleared and then call fini. This will ensure the multi-root force merge during
- * fini.
+ * Create a new mm with a non power-of-two size. Allocate a random size from each
+ * root, free as cleared and then call fini. This will ensure the multi-root
+ * force merge during fini.
*/
- mm_size = 12 * SZ_4K;
- size = max(round_up(prandom_u32_state(&prng) % mm_size, ps), ps);
+ mm_size = (SZ_4K << max_order) + (SZ_4K << (max_order - 2));
+
KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
- size, ps, &allocated,
- DRM_BUDDY_TOPDOWN_ALLOCATION),
- "buddy_alloc hit an error size=%u\n", size);
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order,
+ 4 * ps, ps, &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 4 * ps);
+ drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order,
+ 2 * ps, ps, &allocated,
+ DRM_BUDDY_CLEAR_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 2 * ps);
+ drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, SZ_4K << max_order, mm_size,
+ ps, ps, &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", ps);
drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
drm_buddy_fini(&mm);
}
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 08992636ec05..35cd3405d045 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -60,6 +60,11 @@ struct convert_to_rgb888_result {
const u8 expected[TEST_BUF_SIZE];
};
+struct convert_to_bgr888_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
struct convert_to_argb8888_result {
unsigned int dst_pitch;
const u32 expected[TEST_BUF_SIZE];
@@ -107,6 +112,7 @@ struct convert_xrgb8888_case {
struct convert_to_argb1555_result argb1555_result;
struct convert_to_rgba5551_result rgba5551_result;
struct convert_to_rgb888_result rgb888_result;
+ struct convert_to_bgr888_result bgr888_result;
struct convert_to_argb8888_result argb8888_result;
struct convert_to_xrgb2101010_result xrgb2101010_result;
struct convert_to_argb2101010_result argb2101010_result;
@@ -151,6 +157,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = { 0x00, 0x00, 0xFF },
},
+ .bgr888_result = {
+ .dst_pitch = TEST_USE_DEFAULT_PITCH,
+ .expected = { 0xFF, 0x00, 0x00 },
+ },
.argb8888_result = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = { 0xFFFF0000 },
@@ -217,6 +227,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = { 0x00, 0x00, 0xFF },
},
+ .bgr888_result = {
+ .dst_pitch = TEST_USE_DEFAULT_PITCH,
+ .expected = { 0xFF, 0x00, 0x00 },
+ },
.argb8888_result = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = { 0xFFFF0000 },
@@ -330,6 +344,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
},
},
+ .bgr888_result = {
+ .dst_pitch = TEST_USE_DEFAULT_PITCH,
+ .expected = {
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
+ 0xFF, 0x00, 0x00, 0x00, 0xFF, 0x00,
+ 0x00, 0x00, 0xFF, 0xFF, 0x00, 0xFF,
+ 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF,
+ },
+ },
.argb8888_result = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = {
@@ -468,6 +491,17 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
},
+ .bgr888_result = {
+ .dst_pitch = 15,
+ .expected = {
+ 0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05, 0xA8, 0xF3, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xA8, 0x03, 0x03, 0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ },
.argb8888_result = {
.dst_pitch = 20,
.expected = {
@@ -914,6 +948,52 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
+static void drm_test_fb_xrgb8888_to_bgr888(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_bgr888_result *result = &params->bgr888_result;
+ size_t dst_size;
+ u8 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_BGR888, result->dst_pitch,
+ &params->clip, 0);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ /*
+ * BGR888 expected results are already in little-endian
+ * order, so there's no need to convert the test output.
+ */
+ drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, &params->clip,
+ &fmtcnv_state);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+ int blit_result = 0;
+
+ blit_result = drm_fb_blit(&dst, &result->dst_pitch, DRM_FORMAT_BGR888, &src, &fb, &params->clip,
+ &fmtcnv_state);
+
+ KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+}
+
static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
@@ -1851,6 +1931,7 @@ static struct kunit_case drm_format_helper_test_cases[] = {
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb1555, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgba5551, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_bgr888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb2101010, convert_xrgb8888_gen_params),
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index 23ecc00accb2..e97efd3af9ed 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -273,7 +273,7 @@ drm_kunit_helper_connector_hdmi_init(struct kunit *test,
static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
struct drm_crtc_state *crtc_state;
@@ -296,13 +296,12 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
new_conn_state = drm_atomic_get_connector_state(state, conn);
@@ -327,6 +326,9 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
KUNIT_EXPECT_TRUE(test, crtc_state->mode_changed);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -337,7 +339,7 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
struct drm_crtc_state *crtc_state;
@@ -360,13 +362,12 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
new_conn_state = drm_atomic_get_connector_state(state, conn);
@@ -393,6 +394,9 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
KUNIT_EXPECT_FALSE(test, crtc_state->mode_changed);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -403,7 +407,7 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_atomic_state *state;
struct drm_display_mode *preferred;
@@ -426,13 +430,12 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -449,6 +452,9 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -459,7 +465,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_atomic_state *state;
struct drm_display_mode *mode;
@@ -477,17 +483,16 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -504,6 +509,9 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -514,7 +522,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_atomic_state *state;
struct drm_display_mode *preferred;
@@ -537,13 +545,12 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -562,6 +569,9 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
DRM_HDMI_BROADCAST_RGB_FULL);
KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -572,7 +582,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_atomic_state *state;
struct drm_display_mode *mode;
@@ -590,17 +600,16 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -619,6 +628,9 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
DRM_HDMI_BROADCAST_RGB_FULL);
KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -629,7 +641,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_atomic_state *state;
struct drm_display_mode *preferred;
@@ -652,13 +664,12 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -677,6 +688,9 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
DRM_HDMI_BROADCAST_RGB_LIMITED);
KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -687,7 +701,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_atomic_state *state;
struct drm_display_mode *mode;
@@ -705,17 +719,16 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
conn_state = drm_atomic_get_connector_state(state, conn);
@@ -734,6 +747,9 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
DRM_HDMI_BROADCAST_RGB_LIMITED);
KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -744,7 +760,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
struct drm_crtc_state *crtc_state;
@@ -771,13 +787,12 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
new_conn_state = drm_atomic_get_connector_state(state, conn);
@@ -808,6 +823,9 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
KUNIT_EXPECT_TRUE(test, crtc_state->mode_changed);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -818,7 +836,7 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
struct drm_crtc_state *crtc_state;
@@ -845,13 +863,12 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
new_conn_state = drm_atomic_get_connector_state(state, conn);
@@ -880,6 +897,9 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
KUNIT_EXPECT_FALSE(test, crtc_state->mode_changed);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -889,7 +909,7 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
static void drm_test_check_output_bpc_dvi(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -919,10 +939,9 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -930,6 +949,9 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -939,7 +961,7 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_mode *preferred;
struct drm_connector *conn;
@@ -964,10 +986,9 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -976,6 +997,9 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_bpc, 8);
KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1000);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -986,7 +1010,7 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_mode *preferred;
struct drm_connector *conn;
@@ -1011,10 +1035,9 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1023,6 +1046,9 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_bpc, 10);
KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1250);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1033,7 +1059,7 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_mode *preferred;
struct drm_connector *conn;
@@ -1058,10 +1084,9 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1070,6 +1095,9 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_bpc, 12);
KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1500);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1083,7 +1111,7 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
struct drm_display_mode *preferred;
struct drm_crtc_state *crtc_state;
@@ -1104,16 +1132,15 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
/* You shouldn't be doing that at home. */
conn->hdmi.funcs = &reject_connector_hdmi_funcs;
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -1123,6 +1150,9 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_EXPECT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1139,7 +1169,7 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -1176,10 +1206,9 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 10, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1188,6 +1217,9 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 10);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, preferred->clock * 1250);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1206,7 +1238,7 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -1248,10 +1280,9 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1259,6 +1290,9 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 10);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1269,7 +1303,7 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *mode;
@@ -1310,11 +1344,10 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
rate = mode->clock * 1500;
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1322,6 +1355,9 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1331,7 +1367,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -1376,10 +1412,9 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1387,6 +1422,9 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
KUNIT_EXPECT_LT(test, conn_state->hdmi.output_bpc, 12);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1396,7 +1434,7 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -1443,10 +1481,9 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1454,6 +1491,9 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
KUNIT_EXPECT_LT(test, conn_state->hdmi.output_bpc, 12);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1464,7 +1504,7 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -1501,10 +1541,9 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1512,6 +1551,9 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -1522,7 +1564,7 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_display_info *info;
struct drm_display_mode *preferred;
@@ -1561,10 +1603,9 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1572,13 +1613,16 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8);
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/* Test that atomic check succeeds when disabling a connector. */
static void drm_test_check_disable_connector(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
struct drm_atomic_state *state;
@@ -1593,8 +1637,7 @@ static void drm_test_check_disable_connector(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
conn = &priv->connector;
preferred = find_preferred_mode(conn);
@@ -1602,10 +1645,10 @@ static void drm_test_check_disable_connector(struct kunit *test)
drm = &priv->drm;
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -1623,6 +1666,9 @@ static void drm_test_check_disable_connector(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = {
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 3c0b7824c0be..a4eb68f0decc 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -80,47 +80,6 @@ __drm_kunit_helper_alloc_drm_device_with_driver(struct kunit *test,
}
EXPORT_SYMBOL_GPL(__drm_kunit_helper_alloc_drm_device_with_driver);
-static void action_drm_release_context(void *ptr)
-{
- struct drm_modeset_acquire_ctx *ctx = ptr;
-
- drm_modeset_drop_locks(ctx);
- drm_modeset_acquire_fini(ctx);
-}
-
-/**
- * drm_kunit_helper_acquire_ctx_alloc - Allocates an acquire context
- * @test: The test context object
- *
- * Allocates and initializes a modeset acquire context.
- *
- * The context is tied to the kunit test context, so we must not call
- * drm_modeset_acquire_fini() on it, it will be done so automatically.
- *
- * Returns:
- * An ERR_PTR on error, a pointer to the newly allocated context otherwise
- */
-struct drm_modeset_acquire_ctx *
-drm_kunit_helper_acquire_ctx_alloc(struct kunit *test)
-{
- struct drm_modeset_acquire_ctx *ctx;
- int ret;
-
- ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, ctx);
-
- drm_modeset_acquire_init(ctx, 0);
-
- ret = kunit_add_action_or_reset(test,
- action_drm_release_context,
- ctx);
- if (ret)
- return ERR_PTR(ret);
-
- return ctx;
-}
-EXPORT_SYMBOL_GPL(drm_kunit_helper_acquire_ctx_alloc);
-
static void kunit_action_drm_atomic_state_put(void *ptr)
{
struct drm_atomic_state *state = ptr;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
index 13feedfe5d6d..e88148e44937 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc_regs.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -49,7 +49,7 @@ enum dispc_common_regs {
/*
* dispc_common_regmap should be defined as const u16 * and pointing
* to a valid dss common register map for the platform, before the
- * macros bellow can be used.
+ * macros below can be used.
*/
#define REG(r) (dispc_common_regmap[r ## _OFF])
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 116de124bddb..719412e6c346 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -67,7 +67,7 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
/*
* The HW is only able to start drawing at subpixel boundary
- * (the two first checks bellow). At the end of a row the HW
+ * (the two first checks below). At the end of a row the HW
* can only jump integer number of subpixels forward to the
* beginning of the next row. So we can only show picture with
* integer subpixel width (the third check). However, after
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 94cbdb1337c0..54c84c9801c1 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -1,5 +1,17 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_APPLETBDRM
+ tristate "DRM support for Apple Touch Bars"
+ depends on DRM && USB && MMU
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ help
+ Say Y here if you want support for the display of Touch Bars on x86
+ MacBook Pros.
+
+ To compile this driver as a module, choose M here: the
+ module will be called appletbdrm.
+
config DRM_ARCPGU
tristate "ARC PGU"
depends on DRM && OF
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 60816d2eb4ff..0a3a7837a58b 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_APPLETBDRM) += appletbdrm.o
obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_BOCHS) += bochs.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c
new file mode 100644
index 000000000000..4370ba22dd88
--- /dev/null
+++ b/drivers/gpu/drm/tiny/appletbdrm.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple Touch Bar DRM Driver
+ *
+ * Copyright (c) 2023 Kerem Karabay <kekrby@gmail.com>
+ */
+
+#include <linux/align.h>
+#include <linux/array_size.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/container_of.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+#include <linux/usb.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#define APPLETBDRM_PIXEL_FORMAT cpu_to_le32(0x52474241) /* RGBA, the actual format is BGR888 */
+#define APPLETBDRM_BITS_PER_PIXEL 24
+
+#define APPLETBDRM_MSG_CLEAR_DISPLAY cpu_to_le32(0x434c5244) /* CLRD */
+#define APPLETBDRM_MSG_GET_INFORMATION cpu_to_le32(0x47494e46) /* GINF */
+#define APPLETBDRM_MSG_UPDATE_COMPLETE cpu_to_le32(0x5544434c) /* UDCL */
+#define APPLETBDRM_MSG_SIGNAL_READINESS cpu_to_le32(0x52454459) /* REDY */
+
+#define APPLETBDRM_BULK_MSG_TIMEOUT 1000
+
+#define drm_to_adev(_drm) container_of(_drm, struct appletbdrm_device, drm)
+#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface(adev->dmadev))
+
+struct appletbdrm_msg_request_header {
+ __le16 unk_00;
+ __le16 unk_02;
+ __le32 unk_04;
+ __le32 unk_08;
+ __le32 size;
+} __packed;
+
+struct appletbdrm_msg_response_header {
+ u8 unk_00[16];
+ __le32 msg;
+} __packed;
+
+struct appletbdrm_msg_simple_request {
+ struct appletbdrm_msg_request_header header;
+ __le32 msg;
+ u8 unk_14[8];
+ __le32 size;
+} __packed;
+
+struct appletbdrm_msg_information {
+ struct appletbdrm_msg_response_header header;
+ u8 unk_14[12];
+ __le32 width;
+ __le32 height;
+ u8 bits_per_pixel;
+ __le32 bytes_per_row;
+ __le32 orientation;
+ __le32 bitmap_info;
+ __le32 pixel_format;
+ __le32 width_inches; /* floating point */
+ __le32 height_inches; /* floating point */
+} __packed;
+
+struct appletbdrm_frame {
+ __le16 begin_x;
+ __le16 begin_y;
+ __le16 width;
+ __le16 height;
+ __le32 buf_size;
+ u8 buf[];
+} __packed;
+
+struct appletbdrm_fb_request_footer {
+ u8 unk_00[12];
+ __le32 unk_0c;
+ u8 unk_10[12];
+ __le32 unk_1c;
+ __le64 timestamp;
+ u8 unk_28[12];
+ __le32 unk_34;
+ u8 unk_38[20];
+ __le32 unk_4c;
+} __packed;
+
+struct appletbdrm_fb_request {
+ struct appletbdrm_msg_request_header header;
+ __le16 unk_10;
+ u8 msg_id;
+ u8 unk_13[29];
+ /*
+ * Contents of `data`:
+ * - struct appletbdrm_frame frames[];
+ * - struct appletbdrm_fb_request_footer footer;
+ * - padding to make the total size a multiple of 16
+ */
+ u8 data[];
+} __packed;
+
+struct appletbdrm_fb_request_response {
+ struct appletbdrm_msg_response_header header;
+ u8 unk_14[12];
+ __le64 timestamp;
+} __packed;
+
+struct appletbdrm_device {
+ struct device *dmadev;
+
+ unsigned int in_ep;
+ unsigned int out_ep;
+
+ unsigned int width;
+ unsigned int height;
+
+ struct drm_device drm;
+ struct drm_display_mode mode;
+ struct drm_connector connector;
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+};
+
+struct appletbdrm_plane_state {
+ struct drm_shadow_plane_state base;
+ struct appletbdrm_fb_request *request;
+ struct appletbdrm_fb_request_response *response;
+ size_t request_size;
+ size_t frames_size;
+};
+
+static inline struct appletbdrm_plane_state *to_appletbdrm_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct appletbdrm_plane_state, base.base);
+}
+
+static int appletbdrm_send_request(struct appletbdrm_device *adev,
+ struct appletbdrm_msg_request_header *request, size_t size)
+{
+ struct usb_device *udev = adev_to_udev(adev);
+ struct drm_device *drm = &adev->drm;
+ int ret, actual_size;
+
+ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, adev->out_ep),
+ request, size, &actual_size, APPLETBDRM_BULK_MSG_TIMEOUT);
+ if (ret) {
+ drm_err(drm, "Failed to send message (%d)\n", ret);
+ return ret;
+ }
+
+ if (actual_size != size) {
+ drm_err(drm, "Actual size (%d) doesn't match expected size (%zu)\n",
+ actual_size, size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int appletbdrm_read_response(struct appletbdrm_device *adev,
+ struct appletbdrm_msg_response_header *response,
+ size_t size, __le32 expected_response)
+{
+ struct usb_device *udev = adev_to_udev(adev);
+ struct drm_device *drm = &adev->drm;
+ int ret, actual_size;
+ bool readiness_signal_received = false;
+
+retry:
+ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, adev->in_ep),
+ response, size, &actual_size, APPLETBDRM_BULK_MSG_TIMEOUT);
+ if (ret) {
+ drm_err(drm, "Failed to read response (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * The device responds to the first request sent in a particular
+ * timeframe after the USB device configuration is set with a readiness
+ * signal, in which case the response should be read again
+ */
+ if (response->msg == APPLETBDRM_MSG_SIGNAL_READINESS) {
+ if (!readiness_signal_received) {
+ readiness_signal_received = true;
+ goto retry;
+ }
+
+ drm_err(drm, "Encountered unexpected readiness signal\n");
+ return -EINTR;
+ }
+
+ if (actual_size != size) {
+ drm_err(drm, "Actual size (%d) doesn't match expected size (%zu)\n",
+ actual_size, size);
+ return -EBADMSG;
+ }
+
+ if (response->msg != expected_response) {
+ drm_err(drm, "Unexpected response from device (expected %p4cc found %p4cc)\n",
+ &expected_response, &response->msg);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int appletbdrm_send_msg(struct appletbdrm_device *adev, __le32 msg)
+{
+ struct appletbdrm_msg_simple_request *request;
+ int ret;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ request->header.unk_00 = cpu_to_le16(2);
+ request->header.unk_02 = cpu_to_le16(0x1512);
+ request->header.size = cpu_to_le32(sizeof(*request) - sizeof(request->header));
+ request->msg = msg;
+ request->size = request->header.size;
+
+ ret = appletbdrm_send_request(adev, &request->header, sizeof(*request));
+
+ kfree(request);
+
+ return ret;
+}
+
+static int appletbdrm_clear_display(struct appletbdrm_device *adev)
+{
+ return appletbdrm_send_msg(adev, APPLETBDRM_MSG_CLEAR_DISPLAY);
+}
+
+static int appletbdrm_signal_readiness(struct appletbdrm_device *adev)
+{
+ return appletbdrm_send_msg(adev, APPLETBDRM_MSG_SIGNAL_READINESS);
+}
+
+static int appletbdrm_get_information(struct appletbdrm_device *adev)
+{
+ struct appletbdrm_msg_information *info;
+ struct drm_device *drm = &adev->drm;
+ u8 bits_per_pixel;
+ __le32 pixel_format;
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = appletbdrm_send_msg(adev, APPLETBDRM_MSG_GET_INFORMATION);
+ if (ret)
+ return ret;
+
+ ret = appletbdrm_read_response(adev, &info->header, sizeof(*info),
+ APPLETBDRM_MSG_GET_INFORMATION);
+ if (ret)
+ goto free_info;
+
+ bits_per_pixel = info->bits_per_pixel;
+ pixel_format = get_unaligned(&info->pixel_format);
+
+ adev->width = get_unaligned_le32(&info->width);
+ adev->height = get_unaligned_le32(&info->height);
+
+ if (bits_per_pixel != APPLETBDRM_BITS_PER_PIXEL) {
+ drm_err(drm, "Encountered unexpected bits per pixel value (%d)\n", bits_per_pixel);
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ if (pixel_format != APPLETBDRM_PIXEL_FORMAT) {
+ drm_err(drm, "Encountered unknown pixel format (%p4cc)\n", &pixel_format);
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+free_info:
+ kfree(info);
+
+ return ret;
+}
+
+static u32 rect_size(struct drm_rect *rect)
+{
+ return drm_rect_width(rect) * drm_rect_height(rect) *
+ (BITS_TO_BYTES(APPLETBDRM_BITS_PER_PIXEL));
+}
+
+static int appletbdrm_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct appletbdrm_device *adev = drm_to_adev(connector->dev);
+
+ return drm_connector_helper_get_modes_fixed(connector, &adev->mode);
+}
+
+static const u32 appletbdrm_primary_plane_formats[] = {
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_XRGB8888, /* emulated */
+};
+
+static int appletbdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ struct appletbdrm_plane_state *appletbdrm_state = to_appletbdrm_plane_state(new_plane_state);
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ size_t frames_size = 0;
+ size_t request_size;
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, new_plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ frames_size += struct_size((struct appletbdrm_frame *)0, buf, rect_size(&damage));
+ }
+
+ if (!frames_size)
+ return 0;
+
+ request_size = ALIGN(sizeof(struct appletbdrm_fb_request) +
+ frames_size +
+ sizeof(struct appletbdrm_fb_request_footer), 16);
+
+ appletbdrm_state->request = kzalloc(request_size, GFP_KERNEL);
+
+ if (!appletbdrm_state->request)
+ return -ENOMEM;
+
+ appletbdrm_state->response = kzalloc(sizeof(*appletbdrm_state->response), GFP_KERNEL);
+
+ if (!appletbdrm_state->response)
+ return -ENOMEM;
+
+ appletbdrm_state->request_size = request_size;
+ appletbdrm_state->frames_size = frames_size;
+
+ return 0;
+}
+
+static int appletbdrm_flush_damage(struct appletbdrm_device *adev,
+ struct drm_plane_state *old_state,
+ struct drm_plane_state *state)
+{
+ struct appletbdrm_plane_state *appletbdrm_state = to_appletbdrm_plane_state(state);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
+ struct appletbdrm_fb_request_response *response = appletbdrm_state->response;
+ struct appletbdrm_fb_request_footer *footer;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_framebuffer *fb = state->fb;
+ struct appletbdrm_fb_request *request = appletbdrm_state->request;
+ struct drm_device *drm = &adev->drm;
+ struct appletbdrm_frame *frame;
+ u64 timestamp = ktime_get_ns();
+ struct drm_rect damage;
+ size_t frames_size = appletbdrm_state->frames_size;
+ size_t request_size = appletbdrm_state->request_size;
+ int ret;
+
+ if (!frames_size)
+ return 0;
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret) {
+ drm_err(drm, "Failed to start CPU framebuffer access (%d)\n", ret);
+ goto end_fb_cpu_access;
+ }
+
+ request->header.unk_00 = cpu_to_le16(2);
+ request->header.unk_02 = cpu_to_le16(0x12);
+ request->header.unk_04 = cpu_to_le32(9);
+ request->header.size = cpu_to_le32(request_size - sizeof(request->header));
+ request->unk_10 = cpu_to_le16(1);
+ request->msg_id = timestamp;
+
+ frame = (struct appletbdrm_frame *)request->data;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ struct drm_rect dst_clip = state->dst;
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR(frame->buf);
+ u32 buf_size = rect_size(&damage);
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ /*
+ * The coordinates need to be translated to the coordinate
+ * system the device expects, see the comment in
+ * appletbdrm_setup_mode_config
+ */
+ frame->begin_x = cpu_to_le16(damage.y1);
+ frame->begin_y = cpu_to_le16(adev->height - damage.x2);
+ frame->width = cpu_to_le16(drm_rect_height(&damage));
+ frame->height = cpu_to_le16(drm_rect_width(&damage));
+ frame->buf_size = cpu_to_le32(buf_size);
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB8888:
+ drm_fb_xrgb8888_to_bgr888(&dst, NULL, &shadow_plane_state->data[0], fb, &damage, &shadow_plane_state->fmtcnv_state);
+ break;
+ default:
+ drm_fb_memcpy(&dst, NULL, &shadow_plane_state->data[0], fb, &damage);
+ break;
+ }
+
+ frame = (void *)frame + struct_size(frame, buf, buf_size);
+ }
+
+ footer = (struct appletbdrm_fb_request_footer *)&request->data[frames_size];
+
+ footer->unk_0c = cpu_to_le32(0xfffe);
+ footer->unk_1c = cpu_to_le32(0x80001);
+ footer->unk_34 = cpu_to_le32(0x80002);
+ footer->unk_4c = cpu_to_le32(0xffff);
+ footer->timestamp = cpu_to_le64(timestamp);
+
+ ret = appletbdrm_send_request(adev, &request->header, request_size);
+ if (ret)
+ goto end_fb_cpu_access;
+
+ ret = appletbdrm_read_response(adev, &response->header, sizeof(*response),
+ APPLETBDRM_MSG_UPDATE_COMPLETE);
+ if (ret)
+ goto end_fb_cpu_access;
+
+ if (response->timestamp != footer->timestamp) {
+ drm_err(drm, "Response timestamp (%llu) doesn't match request timestamp (%llu)\n",
+ le64_to_cpu(response->timestamp), timestamp);
+ goto end_fb_cpu_access;
+ }
+
+end_fb_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static void appletbdrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *old_state)
+{
+ struct appletbdrm_device *adev = drm_to_adev(plane->dev);
+ struct drm_device *drm = plane->dev;
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(old_state, plane);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ appletbdrm_flush_damage(adev, old_plane_state, plane_state);
+
+ drm_dev_exit(idx);
+}
+
+static void appletbdrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct appletbdrm_device *adev = drm_to_adev(dev);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ appletbdrm_clear_display(adev);
+
+ drm_dev_exit(idx);
+}
+
+static void appletbdrm_primary_plane_reset(struct drm_plane *plane)
+{
+ struct appletbdrm_plane_state *appletbdrm_state;
+
+ WARN_ON(plane->state);
+
+ appletbdrm_state = kzalloc(sizeof(*appletbdrm_state), GFP_KERNEL);
+ if (!appletbdrm_state)
+ return;
+
+ __drm_gem_reset_shadow_plane(plane, &appletbdrm_state->base);
+}
+
+static struct drm_plane_state *appletbdrm_primary_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct drm_shadow_plane_state *new_shadow_plane_state;
+ struct appletbdrm_plane_state *appletbdrm_state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ appletbdrm_state = kzalloc(sizeof(*appletbdrm_state), GFP_KERNEL);
+ if (!appletbdrm_state)
+ return NULL;
+
+ /* Request and response are not duplicated and are allocated in .atomic_check */
+ appletbdrm_state->request = NULL;
+ appletbdrm_state->response = NULL;
+
+ appletbdrm_state->request_size = 0;
+ appletbdrm_state->frames_size = 0;
+
+ new_shadow_plane_state = &appletbdrm_state->base;
+
+ __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
+
+ return &new_shadow_plane_state->base;
+}
+
+static void appletbdrm_primary_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct appletbdrm_plane_state *appletbdrm_state = to_appletbdrm_plane_state(state);
+
+ kfree(appletbdrm_state->request);
+ kfree(appletbdrm_state->response);
+
+ __drm_gem_destroy_shadow_plane_state(&appletbdrm_state->base);
+
+ kfree(appletbdrm_state);
+}
+
+static const struct drm_plane_helper_funcs appletbdrm_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = appletbdrm_primary_plane_helper_atomic_check,
+ .atomic_update = appletbdrm_primary_plane_helper_atomic_update,
+ .atomic_disable = appletbdrm_primary_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs appletbdrm_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = appletbdrm_primary_plane_reset,
+ .atomic_duplicate_state = appletbdrm_primary_plane_duplicate_state,
+ .atomic_destroy_state = appletbdrm_primary_plane_destroy_state,
+ .destroy = drm_plane_cleanup,
+};
+
+static enum drm_mode_status appletbdrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct appletbdrm_device *adev = drm_to_adev(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &adev->mode);
+}
+
+static const struct drm_mode_config_funcs appletbdrm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const struct drm_connector_funcs appletbdrm_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .destroy = drm_connector_cleanup,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs appletbdrm_connector_helper_funcs = {
+ .get_modes = appletbdrm_connector_helper_get_modes,
+};
+
+static const struct drm_crtc_helper_funcs appletbdrm_crtc_helper_funcs = {
+ .mode_valid = appletbdrm_crtc_helper_mode_valid,
+};
+
+static const struct drm_crtc_funcs appletbdrm_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_encoder_funcs appletbdrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static struct drm_gem_object *appletbdrm_driver_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct appletbdrm_device *adev = drm_to_adev(dev);
+
+ if (!adev->dmadev)
+ return ERR_PTR(-ENODEV);
+
+ return drm_gem_prime_import_dev(dev, dma_buf, adev->dmadev);
+}
+
+DEFINE_DRM_GEM_FOPS(appletbdrm_drm_fops);
+
+static const struct drm_driver appletbdrm_drm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ .gem_prime_import = appletbdrm_driver_gem_prime_import,
+ .name = "appletbdrm",
+ .desc = "Apple Touch Bar DRM Driver",
+ .major = 1,
+ .minor = 0,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ .fops = &appletbdrm_drm_fops,
+};
+
+static int appletbdrm_setup_mode_config(struct appletbdrm_device *adev)
+{
+ struct drm_connector *connector = &adev->connector;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_device *drm = &adev->drm;
+ int ret;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret) {
+ drm_err(drm, "Failed to initialize mode configuration\n");
+ return ret;
+ }
+
+ primary_plane = &adev->primary_plane;
+ ret = drm_universal_plane_init(drm, primary_plane, 0,
+ &appletbdrm_primary_plane_funcs,
+ appletbdrm_primary_plane_formats,
+ ARRAY_SIZE(appletbdrm_primary_plane_formats),
+ NULL,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(drm, "Failed to initialize universal plane object\n");
+ return ret;
+ }
+
+ drm_plane_helper_add(primary_plane, &appletbdrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ crtc = &adev->crtc;
+ ret = drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
+ &appletbdrm_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(drm, "Failed to initialize CRTC object\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &appletbdrm_crtc_helper_funcs);
+
+ encoder = &adev->encoder;
+ ret = drm_encoder_init(drm, encoder, &appletbdrm_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(drm, "Failed to initialize encoder\n");
+ return ret;
+ }
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /*
+ * The coordinate system used by the device is different from the
+ * coordinate system of the framebuffer in that the x and y axes are
+ * swapped, and that the y axis is inverted; so what the device reports
+ * as the height is actually the width of the framebuffer and vice
+ * versa.
+ */
+ drm->mode_config.max_width = max(adev->height, DRM_SHADOW_PLANE_MAX_WIDTH);
+ drm->mode_config.max_height = max(adev->width, DRM_SHADOW_PLANE_MAX_HEIGHT);
+ drm->mode_config.preferred_depth = APPLETBDRM_BITS_PER_PIXEL;
+ drm->mode_config.funcs = &appletbdrm_mode_config_funcs;
+
+ adev->mode = (struct drm_display_mode) {
+ DRM_MODE_INIT(60, adev->height, adev->width,
+ DRM_MODE_RES_MM(adev->height, 218),
+ DRM_MODE_RES_MM(adev->width, 218))
+ };
+
+ ret = drm_connector_init(drm, connector,
+ &appletbdrm_connector_funcs, DRM_MODE_CONNECTOR_USB);
+ if (ret) {
+ drm_err(drm, "Failed to initialize connector\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &appletbdrm_connector_helper_funcs);
+
+ ret = drm_connector_set_panel_orientation(connector,
+ DRM_MODE_PANEL_ORIENTATION_RIGHT_UP);
+ if (ret) {
+ drm_err(drm, "Failed to set panel orientation\n");
+ return ret;
+ }
+
+ connector->display_info.non_desktop = true;
+ ret = drm_object_property_set_value(&connector->base,
+ drm->mode_config.non_desktop_property, true);
+ if (ret) {
+ drm_err(drm, "Failed to set non-desktop property\n");
+ return ret;
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+
+ if (ret) {
+ drm_err(drm, "Failed to initialize simple display pipe\n");
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ return 0;
+}
+
+static int appletbdrm_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ struct device *dev = &intf->dev;
+ struct appletbdrm_device *adev;
+ struct drm_device *drm = NULL;
+ int ret;
+
+ ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL);
+ if (ret) {
+ drm_err(drm, "appletbdrm: Failed to find bulk endpoints\n");
+ return ret;
+ }
+
+ adev = devm_drm_dev_alloc(dev, &appletbdrm_drm_driver, struct appletbdrm_device, drm);
+ if (IS_ERR(adev))
+ return PTR_ERR(adev);
+
+ adev->in_ep = bulk_in->bEndpointAddress;
+ adev->out_ep = bulk_out->bEndpointAddress;
+ adev->dmadev = dev;
+
+ drm = &adev->drm;
+
+ usb_set_intfdata(intf, adev);
+
+ ret = appletbdrm_get_information(adev);
+ if (ret) {
+ drm_err(drm, "Failed to get display information\n");
+ return ret;
+ }
+
+ ret = appletbdrm_signal_readiness(adev);
+ if (ret) {
+ drm_err(drm, "Failed to signal readiness\n");
+ return ret;
+ }
+
+ ret = appletbdrm_setup_mode_config(adev);
+ if (ret) {
+ drm_err(drm, "Failed to setup mode config\n");
+ return ret;
+ }
+
+ ret = drm_dev_register(drm, 0);
+ if (ret) {
+ drm_err(drm, "Failed to register DRM device\n");
+ return ret;
+ }
+
+ ret = appletbdrm_clear_display(adev);
+ if (ret) {
+ drm_err(drm, "Failed to clear display\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void appletbdrm_disconnect(struct usb_interface *intf)
+{
+ struct appletbdrm_device *adev = usb_get_intfdata(intf);
+ struct drm_device *drm = &adev->drm;
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+}
+
+static void appletbdrm_shutdown(struct usb_interface *intf)
+{
+ struct appletbdrm_device *adev = usb_get_intfdata(intf);
+
+ /*
+ * The framebuffer needs to be cleared on shutdown since its content
+ * persists across boots
+ */
+ drm_atomic_helper_shutdown(&adev->drm);
+}
+
+static const struct usb_device_id appletbdrm_usb_id_table[] = {
+ { USB_DEVICE_INTERFACE_CLASS(0x05ac, 0x8302, USB_CLASS_AUDIO_VIDEO) },
+ {}
+};
+MODULE_DEVICE_TABLE(usb, appletbdrm_usb_id_table);
+
+static struct usb_driver appletbdrm_usb_driver = {
+ .name = "appletbdrm",
+ .probe = appletbdrm_probe,
+ .disconnect = appletbdrm_disconnect,
+ .shutdown = appletbdrm_shutdown,
+ .id_table = appletbdrm_usb_id_table,
+};
+module_usb_driver(appletbdrm_usb_driver);
+
+MODULE_AUTHOR("Kerem Karabay <kekrby@gmail.com>");
+MODULE_DESCRIPTION("Apple Touch Bar DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index 2748d1f21d86..7cf0f0ea1bfe 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -253,7 +253,6 @@ static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
struct device_node *encoder_node = NULL, *endpoint_node = NULL;
struct drm_connector *connector = NULL;
struct drm_device *drm = &arcpgu->drm;
- struct resource *res;
int ret;
arcpgu->clk = devm_clk_get(drm->dev, "pxlclk");
@@ -270,8 +269,7 @@ static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
drm->mode_config.max_height = 1080;
drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
+ arcpgu->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(arcpgu->regs))
return PTR_ERR(arcpgu->regs);
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 52ba6c699bc8..5c3b51eb0a97 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -456,7 +456,7 @@ static void repaper_frame_fixed_repeat(struct repaper_epd *epd, u8 fixed_value,
enum repaper_stage stage)
{
u64 start = local_clock();
- u64 end = start + (epd->factored_stage_time * 1000 * 1000);
+ u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000);
do {
repaper_frame_fixed(epd, fixed_value, stage);
@@ -467,7 +467,7 @@ static void repaper_frame_data_repeat(struct repaper_epd *epd, const u8 *image,
const u8 *mask, enum repaper_stage stage)
{
u64 start = local_clock();
- u64 end = start + (epd->factored_stage_time * 1000 * 1000);
+ u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000);
do {
repaper_frame_data(epd, image, mask, stage);
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index dad298127226..40d07a35293a 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,7 +4,7 @@
ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_execbuf_util.o ttm_range_manager.o ttm_resource.o ttm_pool.o \
- ttm_device.o ttm_sys_manager.o
+ ttm_device.o ttm_sys_manager.o ttm_backup.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
new file mode 100644
index 000000000000..93c007f18855
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <drm/ttm/ttm_backup.h>
+#include <linux/page-flags.h>
+#include <linux/swap.h>
+
+/*
+ * Casting from randomized struct file * to struct ttm_backup * is fine since
+ * struct ttm_backup is never defined nor dereferenced.
+ */
+static struct file *ttm_backup_to_file(struct ttm_backup *backup)
+{
+ return (void *)backup;
+}
+
+static struct ttm_backup *ttm_file_to_backup(struct file *file)
+{
+ return (void *)file;
+}
+
+/*
+ * Need to map shmem indices to handle since a handle value
+ * of 0 means error, following the swp_entry_t convention.
+ */
+static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx)
+{
+ return (unsigned long)idx + 1;
+}
+
+static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
+{
+ return handle - 1;
+}
+
+/**
+ * ttm_backup_drop() - release memory associated with a handle
+ * @backup: The struct backup pointer used to obtain the handle
+ * @handle: The handle obtained from the @backup_page function.
+ */
+void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
+{
+ loff_t start = ttm_backup_handle_to_shmem_idx(handle);
+
+ start <<= PAGE_SHIFT;
+ shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start,
+ start + PAGE_SIZE - 1);
+}
+
+/**
+ * ttm_backup_copy_page() - Copy the contents of a previously backed
+ * up page
+ * @backup: The struct backup pointer used to back up the page.
+ * @dst: The struct page to copy into.
+ * @handle: The handle returned when the page was backed up.
+ * @intr: Try to perform waits interruptable or at least killable.
+ *
+ * Return: 0 on success, Negative error code on failure, notably
+ * -EINTR if @intr was set to true and a signal is pending.
+ */
+int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
+ pgoff_t handle, bool intr)
+{
+ struct file *filp = ttm_backup_to_file(backup);
+ struct address_space *mapping = filp->f_mapping;
+ struct folio *from_folio;
+ pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
+
+ from_folio = shmem_read_folio(mapping, idx);
+ if (IS_ERR(from_folio))
+ return PTR_ERR(from_folio);
+
+ copy_highpage(dst, folio_file_page(from_folio, idx));
+ folio_put(from_folio);
+
+ return 0;
+}
+
+/**
+ * ttm_backup_backup_page() - Backup a page
+ * @backup: The struct backup pointer to use.
+ * @page: The page to back up.
+ * @writeback: Whether to perform immediate writeback of the page.
+ * This may have performance implications.
+ * @idx: A unique integer for each page and each struct backup.
+ * This allows the backup implementation to avoid managing
+ * its address space separately.
+ * @page_gfp: The gfp value used when the page was allocated.
+ * This is used for accounting purposes.
+ * @alloc_gfp: The gfp to be used when allocating memory.
+ *
+ * Context: If called from reclaim context, the caller needs to
+ * assert that the shrinker gfp has __GFP_FS set, to avoid
+ * deadlocking on lock_page(). If @writeback is set to true and
+ * called from reclaim context, the caller also needs to assert
+ * that the shrinker gfp has __GFP_IO set, since without it,
+ * we're not allowed to start backup IO.
+ *
+ * Return: A handle on success. Negative error code on failure.
+ *
+ * Note: This function could be extended to back up a folio and
+ * implementations would then split the folio internally if needed.
+ * Drawback is that the caller would then have to keep track of
+ * the folio size- and usage.
+ */
+s64
+ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
+ bool writeback, pgoff_t idx, gfp_t page_gfp,
+ gfp_t alloc_gfp)
+{
+ struct file *filp = ttm_backup_to_file(backup);
+ struct address_space *mapping = filp->f_mapping;
+ unsigned long handle = 0;
+ struct folio *to_folio;
+ int ret;
+
+ to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp);
+ if (IS_ERR(to_folio))
+ return PTR_ERR(to_folio);
+
+ folio_mark_accessed(to_folio);
+ folio_lock(to_folio);
+ folio_mark_dirty(to_folio);
+ copy_highpage(folio_file_page(to_folio, idx), page);
+ handle = ttm_backup_shmem_idx_to_handle(idx);
+
+ if (writeback && !folio_mapped(to_folio) &&
+ folio_clear_dirty_for_io(to_folio)) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = SWAP_CLUSTER_MAX,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ .for_reclaim = 1,
+ };
+ folio_set_reclaim(to_folio);
+ ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc);
+ if (!folio_test_writeback(to_folio))
+ folio_clear_reclaim(to_folio);
+ /*
+ * If writepage succeeds, it unlocks the folio.
+ * writepage() errors are otherwise dropped, since writepage()
+ * is only best effort here.
+ */
+ if (ret)
+ folio_unlock(to_folio);
+ } else {
+ folio_unlock(to_folio);
+ }
+
+ folio_put(to_folio);
+
+ return handle;
+}
+
+/**
+ * ttm_backup_fini() - Free the struct backup resources after last use.
+ * @backup: Pointer to the struct backup whose resources to free.
+ *
+ * After a call to this function, it's illegal to use the @backup pointer.
+ */
+void ttm_backup_fini(struct ttm_backup *backup)
+{
+ fput(ttm_backup_to_file(backup));
+}
+
+/**
+ * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space
+ * left for backup.
+ *
+ * This function is intended also for driver use to indicate whether a
+ * backup attempt is meaningful.
+ *
+ * Return: An approximate size of backup space available.
+ */
+u64 ttm_backup_bytes_avail(void)
+{
+ /*
+ * The idea behind backing up to shmem is that shmem objects may
+ * eventually be swapped out. So no point swapping out if there
+ * is no or low swap-space available. But the accuracy of this
+ * number also depends on shmem actually swapping out backed-up
+ * shmem objects without too much buffering.
+ */
+ return (u64)get_nr_swap_pages() << PAGE_SHIFT;
+}
+EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
+
+/**
+ * ttm_backup_shmem_create() - Create a shmem-based struct backup.
+ * @size: The maximum size (in bytes) to back up.
+ *
+ * Create a backup utilizing shmem objects.
+ *
+ * Return: A pointer to a struct ttm_backup on success,
+ * an error pointer on error.
+ */
+struct ttm_backup *ttm_backup_shmem_create(loff_t size)
+{
+ struct file *filp;
+
+ filp = shmem_file_setup("ttm shmem backup", size, 0);
+
+ return ttm_file_to_backup(filp);
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ea5e49858857..95b86003c50d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -451,7 +451,8 @@ int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man
int ret = 0;
spin_lock(&bdev->lru_lock);
- res = ttm_resource_manager_first(man, &cursor);
+ ttm_resource_cursor_init(&cursor, man);
+ res = ttm_resource_manager_first(&cursor);
ttm_resource_cursor_fini(&cursor);
if (!res) {
ret = -ENOENT;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d939925efa81..15cab9bda17f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -28,7 +28,7 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-
+#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <drm/ttm/ttm_bo.h>
@@ -769,12 +769,10 @@ error_destroy_tt:
return ret;
}
-static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk,
+static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx,
struct ttm_buffer_object *bo,
bool *needs_unlock)
{
- struct ttm_operation_ctx *ctx = walk->ctx;
-
*needs_unlock = false;
if (dma_resv_trylock(bo->base.resv)) {
@@ -865,7 +863,8 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
s64 lret;
spin_lock(&bdev->lru_lock);
- ttm_resource_manager_for_each_res(man, &cursor, res) {
+ ttm_resource_cursor_init(&cursor, man);
+ ttm_resource_manager_for_each_res(&cursor, res) {
struct ttm_buffer_object *bo = res->bo;
bool bo_needs_unlock = false;
bool bo_locked = false;
@@ -876,7 +875,7 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
* since if we do it the other way around, and the trylock fails,
* we need to drop the lru lock to put the bo.
*/
- if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock))
+ if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock))
bo_locked = true;
else if (!walk->ticket || walk->ctx->no_wait_gpu ||
walk->trylock_only)
@@ -919,3 +918,242 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
return progress;
}
+EXPORT_SYMBOL(ttm_lru_walk_for_evict);
+
+static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs)
+{
+ struct ttm_buffer_object *bo = curs->bo;
+
+ if (bo) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ ttm_bo_put(bo);
+ curs->bo = NULL;
+ }
+}
+
+/**
+ * ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor
+ * and clean up any iteration it was used for.
+ * @curs: The cursor.
+ */
+void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ spin_lock(lru_lock);
+ ttm_resource_cursor_fini(&curs->res_curs);
+ spin_unlock(lru_lock);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_fini);
+
+/**
+ * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor
+ * @curs: The ttm_bo_lru_cursor to initialize.
+ * @man: The ttm resource_manager whose LRU lists to iterate over.
+ * @ctx: The ttm_operation_ctx to govern the locking.
+ *
+ * Initialize a struct ttm_bo_lru_cursor. Currently only trylocking
+ * or prelocked buffer objects are available as detailed by
+ * @ctx::resv and @ctx::allow_res_evict. Ticketlocking is not
+ * supported.
+ *
+ * Return: Pointer to @curs. The function does not fail.
+ */
+struct ttm_bo_lru_cursor *
+ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
+ struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx)
+{
+ memset(curs, 0, sizeof(*curs));
+ ttm_resource_cursor_init(&curs->res_curs, man);
+ curs->ctx = ctx;
+
+ return curs;
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_init);
+
+static struct ttm_buffer_object *
+ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *curs)
+{
+ struct ttm_buffer_object *bo = res->bo;
+
+ if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock))
+ return NULL;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ return NULL;
+ }
+
+ curs->bo = bo;
+ return bo;
+}
+
+/**
+ * ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists
+ * to find and lock buffer object.
+ * @curs: The cursor initialized using ttm_bo_lru_cursor_init() and
+ * ttm_bo_lru_cursor_first().
+ *
+ * Return: A pointer to a locked and reference-counted buffer object,
+ * or NULL if none could be found and looping should be terminated.
+ */
+struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+ struct ttm_resource *res = NULL;
+ struct ttm_buffer_object *bo;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+
+ spin_lock(lru_lock);
+ for (;;) {
+ res = ttm_resource_manager_next(&curs->res_curs);
+ if (!res)
+ break;
+
+ bo = ttm_bo_from_res_reserved(res, curs);
+ if (bo)
+ break;
+ }
+
+ spin_unlock(lru_lock);
+ return res ? bo : NULL;
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
+
+/**
+ * ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists
+ * to find and lock buffer object.
+ * @curs: The cursor initialized using ttm_bo_lru_cursor_init().
+ *
+ * Return: A pointer to a locked and reference-counted buffer object,
+ * or NULL if none could be found and looping should be terminated.
+ */
+struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+
+ spin_lock(lru_lock);
+ res = ttm_resource_manager_first(&curs->res_curs);
+ if (!res) {
+ spin_unlock(lru_lock);
+ return NULL;
+ }
+
+ bo = ttm_bo_from_res_reserved(res, curs);
+ spin_unlock(lru_lock);
+
+ return bo ? bo : ttm_bo_lru_cursor_next(curs);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_first);
+
+/**
+ * ttm_bo_shrink() - Helper to shrink a ttm buffer object.
+ * @ctx: The struct ttm_operation_ctx used for the shrinking operation.
+ * @bo: The buffer object.
+ * @flags: Flags governing the shrinking behaviour.
+ *
+ * The function uses the ttm_tt_back_up functionality to back up or
+ * purge a struct ttm_tt. If the bo is not in system, it's first
+ * moved there.
+ *
+ * Return: The number of pages shrunken or purged, or
+ * negative error code on failure.
+ */
+long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct ttm_bo_shrink_flags flags)
+{
+ static const struct ttm_place sys_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = 0,
+ };
+ static struct ttm_placement sys_placement = {
+ .num_placement = 1,
+ .placement = &sys_placement_flags,
+ };
+ struct ttm_tt *tt = bo->ttm;
+ long lret;
+
+ dma_resv_assert_held(bo->base.resv);
+
+ if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) {
+ int ret = ttm_bo_validate(bo, &sys_placement, ctx);
+
+ /* Consider -ENOMEM and -ENOSPC non-fatal. */
+ if (ret) {
+ if (ret == -ENOMEM || ret == -ENOSPC)
+ ret = -EBUSY;
+ return ret;
+ }
+ }
+
+ ttm_bo_unmap_virtual(bo);
+ lret = ttm_bo_wait_ctx(bo, ctx);
+ if (lret < 0)
+ return lret;
+
+ if (bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags)
+ {.purge = flags.purge,
+ .writeback = flags.writeback});
+
+ if (lret <= 0 && bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ if (lret < 0 && lret != -EINTR)
+ return -EBUSY;
+
+ return lret;
+}
+EXPORT_SYMBOL(ttm_bo_shrink);
+
+/**
+ * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking
+ * @ctx: The struct ttm_operation_ctx governing the shrinking.
+ * @bo: The candidate for shrinking.
+ *
+ * Check whether the object, given the information available to TTM,
+ * is suitable for shinking, This function can and should be used
+ * before attempting to shrink an object.
+ *
+ * Return: true if suitable. false if not.
+ */
+bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
+{
+ return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count &&
+ (!ctx->no_wait_gpu ||
+ dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP));
+}
+EXPORT_SYMBOL(ttm_bo_shrink_suitable);
+
+/**
+ * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU
+ * during shrinking
+ *
+ * In some situations, like direct reclaim, waiting (in particular gpu waiting)
+ * should be avoided since it may stall a system that could otherwise make progress
+ * shrinking something else less time consuming.
+ *
+ * Return: true if gpu waiting should be avoided, false if not.
+ */
+bool ttm_bo_shrink_avoid_wait(void)
+{
+ return !current_is_kswapd();
+}
+EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 8504dbe19c1a..83b10706ba89 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -41,12 +41,20 @@
#include <asm/set_memory.h>
#endif
+#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_pool.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_bo.h>
#include "ttm_module.h"
+#ifdef CONFIG_FAULT_INJECTION
+#include <linux/fault-inject.h>
+static DECLARE_FAULT_ATTR(backup_fault_inject);
+#else
+#define should_fail(...) false
+#endif
+
/**
* struct ttm_pool_dma - Helper object for coherent DMA mappings
*
@@ -58,6 +66,52 @@ struct ttm_pool_dma {
unsigned long vaddr;
};
+/**
+ * struct ttm_pool_alloc_state - Current state of the tt page allocation process
+ * @pages: Pointer to the next tt page pointer to populate.
+ * @caching_divide: Pointer to the first page pointer whose page has a staged but
+ * not committed caching transition from write-back to @tt_caching.
+ * @dma_addr: Pointer to the next tt dma_address entry to populate if any.
+ * @remaining_pages: Remaining pages to populate.
+ * @tt_caching: The requested cpu-caching for the pages allocated.
+ */
+struct ttm_pool_alloc_state {
+ struct page **pages;
+ struct page **caching_divide;
+ dma_addr_t *dma_addr;
+ pgoff_t remaining_pages;
+ enum ttm_caching tt_caching;
+};
+
+/**
+ * struct ttm_pool_tt_restore - State representing restore from backup
+ * @pool: The pool used for page allocation while restoring.
+ * @snapshot_alloc: A snapshot of the most recent struct ttm_pool_alloc_state.
+ * @alloced_page: Pointer to the page most recently allocated from a pool or system.
+ * @first_dma: The dma address corresponding to @alloced_page if dma_mapping
+ * is requested.
+ * @alloced_pages: The number of allocated pages present in the struct ttm_tt
+ * page vector from this restore session.
+ * @restored_pages: The number of 4K pages restored for @alloced_page (which
+ * is typically a multi-order page).
+ * @page_caching: The struct ttm_tt requested caching
+ * @order: The order of @alloced_page.
+ *
+ * Recovery from backup might fail when we've recovered less than the
+ * full ttm_tt. In order not to loose any data (yet), keep information
+ * around that allows us to restart a failed ttm backup recovery.
+ */
+struct ttm_pool_tt_restore {
+ struct ttm_pool *pool;
+ struct ttm_pool_alloc_state snapshot_alloc;
+ struct page *alloced_page;
+ dma_addr_t first_dma;
+ pgoff_t alloced_pages;
+ pgoff_t restored_pages;
+ enum ttm_caching page_caching;
+ unsigned int order;
+};
+
static unsigned long page_pool_size;
MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
@@ -160,34 +214,33 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
kfree(dma);
}
-/* Apply a new caching to an array of pages */
-static int ttm_pool_apply_caching(struct page **first, struct page **last,
- enum ttm_caching caching)
+/* Apply any cpu-caching deferred during page allocation */
+static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc)
{
#ifdef CONFIG_X86
- unsigned int num_pages = last - first;
+ unsigned int num_pages = alloc->pages - alloc->caching_divide;
if (!num_pages)
return 0;
- switch (caching) {
+ switch (alloc->tt_caching) {
case ttm_cached:
break;
case ttm_write_combined:
- return set_pages_array_wc(first, num_pages);
+ return set_pages_array_wc(alloc->caching_divide, num_pages);
case ttm_uncached:
- return set_pages_array_uc(first, num_pages);
+ return set_pages_array_uc(alloc->caching_divide, num_pages);
}
#endif
+ alloc->caching_divide = alloc->pages;
return 0;
}
-/* Map pages of 1 << order size and fill the DMA address array */
+/* DMA Map pages of 1 << order size and return the resulting dma_address. */
static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
- struct page *p, dma_addr_t **dma_addr)
+ struct page *p, dma_addr_t *dma_addr)
{
dma_addr_t addr;
- unsigned int i;
if (pool->use_dma_alloc) {
struct ttm_pool_dma *dma = (void *)p->private;
@@ -201,10 +254,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
return -EFAULT;
}
- for (i = 1 << order; i ; --i) {
- *(*dma_addr)++ = addr;
- addr += PAGE_SIZE;
- }
+ *dma_addr = addr;
return 0;
}
@@ -354,24 +404,235 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
return p->private;
}
-/* Called when we got a page, either from a pool or newly allocated */
+/*
+ * Split larger pages so that we can free each PAGE_SIZE page as soon
+ * as it has been backed up, in order to avoid memory pressure during
+ * reclaim.
+ */
+static void ttm_pool_split_for_swap(struct ttm_pool *pool, struct page *p)
+{
+ unsigned int order = ttm_pool_page_order(pool, p);
+ pgoff_t nr;
+
+ if (!order)
+ return;
+
+ split_page(p, order);
+ nr = 1UL << order;
+ while (nr--)
+ (p++)->private = 0;
+}
+
+/**
+ * DOC: Partial backup and restoration of a struct ttm_tt.
+ *
+ * Swapout using ttm_backup_backup_page() and swapin using
+ * ttm_backup_copy_page() may fail.
+ * The former most likely due to lack of swap-space or memory, the latter due
+ * to lack of memory or because of signal interruption during waits.
+ *
+ * Backup failure is easily handled by using a ttm_tt pages vector that holds
+ * both backup handles and page pointers. This has to be taken into account when
+ * restoring such a ttm_tt from backup, and when freeing it while backed up.
+ * When restoring, for simplicity, new pages are actually allocated from the
+ * pool and the contents of any old pages are copied in and then the old pages
+ * are released.
+ *
+ * For restoration failures, the struct ttm_pool_tt_restore holds sufficient state
+ * to be able to resume an interrupted restore, and that structure is freed once
+ * the restoration is complete. If the struct ttm_tt is destroyed while there
+ * is a valid struct ttm_pool_tt_restore attached, that is also properly taken
+ * care of.
+ */
+
+/* Is restore ongoing for the currently allocated page? */
+static bool ttm_pool_restore_valid(const struct ttm_pool_tt_restore *restore)
+{
+ return restore && restore->restored_pages < (1 << restore->order);
+}
+
+/* DMA unmap and free a multi-order page, either to the relevant pool or to system. */
+static pgoff_t ttm_pool_unmap_and_free(struct ttm_pool *pool, struct page *page,
+ const dma_addr_t *dma_addr, enum ttm_caching caching)
+{
+ struct ttm_pool_type *pt = NULL;
+ unsigned int order;
+ pgoff_t nr;
+
+ if (pool) {
+ order = ttm_pool_page_order(pool, page);
+ nr = (1UL << order);
+ if (dma_addr)
+ ttm_pool_unmap(pool, *dma_addr, nr);
+
+ pt = ttm_pool_select_type(pool, caching, order);
+ } else {
+ order = page->private;
+ nr = (1UL << order);
+ }
+
+ if (pt)
+ ttm_pool_type_give(pt, page);
+ else
+ ttm_pool_free_page(pool, caching, order, page);
+
+ return nr;
+}
+
+/* Populate the page-array using the most recent allocated multi-order page. */
+static void ttm_pool_allocated_page_commit(struct page *allocated,
+ dma_addr_t first_dma,
+ struct ttm_pool_alloc_state *alloc,
+ pgoff_t nr)
+{
+ pgoff_t i;
+
+ for (i = 0; i < nr; ++i)
+ *alloc->pages++ = allocated++;
+
+ alloc->remaining_pages -= nr;
+
+ if (!alloc->dma_addr)
+ return;
+
+ for (i = 0; i < nr; ++i) {
+ *alloc->dma_addr++ = first_dma;
+ first_dma += PAGE_SIZE;
+ }
+}
+
+/*
+ * When restoring, restore backed-up content to the newly allocated page and
+ * if successful, populate the page-table and dma-address arrays.
+ */
+static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
+ struct ttm_backup *backup,
+ const struct ttm_operation_ctx *ctx,
+ struct ttm_pool_alloc_state *alloc)
+
+{
+ pgoff_t i, nr = 1UL << restore->order;
+ struct page **first_page = alloc->pages;
+ struct page *p;
+ int ret = 0;
+
+ for (i = restore->restored_pages; i < nr; ++i) {
+ p = first_page[i];
+ if (ttm_backup_page_ptr_is_handle(p)) {
+ unsigned long handle = ttm_backup_page_ptr_to_handle(p);
+
+ if (IS_ENABLED(CONFIG_FAULT_INJECTION) && ctx->interruptible &&
+ should_fail(&backup_fault_inject, 1)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (handle == 0) {
+ restore->restored_pages++;
+ continue;
+ }
+
+ ret = ttm_backup_copy_page(backup, restore->alloced_page + i,
+ handle, ctx->interruptible);
+ if (ret)
+ break;
+
+ ttm_backup_drop(backup, handle);
+ } else if (p) {
+ /*
+ * We could probably avoid splitting the old page
+ * using clever logic, but ATM we don't care, as
+ * we prioritize releasing memory ASAP. Note that
+ * here, the old retained page is always write-back
+ * cached.
+ */
+ ttm_pool_split_for_swap(restore->pool, p);
+ copy_highpage(restore->alloced_page + i, p);
+ __free_pages(p, 0);
+ }
+
+ restore->restored_pages++;
+ first_page[i] = ttm_backup_handle_to_page_ptr(0);
+ }
+
+ if (ret) {
+ if (!restore->restored_pages) {
+ dma_addr_t *dma_addr = alloc->dma_addr ? &restore->first_dma : NULL;
+
+ ttm_pool_unmap_and_free(restore->pool, restore->alloced_page,
+ dma_addr, restore->page_caching);
+ restore->restored_pages = nr;
+ }
+ return ret;
+ }
+
+ ttm_pool_allocated_page_commit(restore->alloced_page, restore->first_dma,
+ alloc, nr);
+ if (restore->page_caching == alloc->tt_caching || PageHighMem(restore->alloced_page))
+ alloc->caching_divide = alloc->pages;
+ restore->snapshot_alloc = *alloc;
+ restore->alloced_pages += nr;
+
+ return 0;
+}
+
+/* If restoring, save information needed for ttm_pool_restore_commit(). */
+static void
+ttm_pool_page_allocated_restore(struct ttm_pool *pool, unsigned int order,
+ struct page *p,
+ enum ttm_caching page_caching,
+ dma_addr_t first_dma,
+ struct ttm_pool_tt_restore *restore,
+ const struct ttm_pool_alloc_state *alloc)
+{
+ restore->pool = pool;
+ restore->order = order;
+ restore->restored_pages = 0;
+ restore->page_caching = page_caching;
+ restore->first_dma = first_dma;
+ restore->alloced_page = p;
+ restore->snapshot_alloc = *alloc;
+}
+
+/*
+ * Called when we got a page, either from a pool or newly allocated.
+ * if needed, dma map the page and populate the dma address array.
+ * Populate the page address array.
+ * If the caching is consistent, update any deferred caching. Otherwise
+ * stage this page for an upcoming deferred caching update.
+ */
static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
- struct page *p, dma_addr_t **dma_addr,
- unsigned long *num_pages,
- struct page ***pages)
+ struct page *p, enum ttm_caching page_caching,
+ struct ttm_pool_alloc_state *alloc,
+ struct ttm_pool_tt_restore *restore)
{
- unsigned int i;
- int r;
+ bool caching_consistent;
+ dma_addr_t first_dma;
+ int r = 0;
+
+ caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(p);
- if (*dma_addr) {
- r = ttm_pool_map(pool, order, p, dma_addr);
+ if (caching_consistent) {
+ r = ttm_pool_apply_caching(alloc);
if (r)
return r;
}
- *num_pages -= 1 << order;
- for (i = 1 << order; i; --i, ++(*pages), ++p)
- **pages = p;
+ if (alloc->dma_addr) {
+ r = ttm_pool_map(pool, order, p, &first_dma);
+ if (r)
+ return r;
+ }
+
+ if (restore) {
+ ttm_pool_page_allocated_restore(pool, order, p, page_caching,
+ first_dma, restore, alloc);
+ } else {
+ ttm_pool_allocated_page_commit(p, first_dma, alloc, 1UL << order);
+
+ if (caching_consistent)
+ alloc->caching_divide = alloc->pages;
+ }
return 0;
}
@@ -394,53 +655,62 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
pgoff_t start_page, pgoff_t end_page)
{
struct page **pages = &tt->pages[start_page];
- unsigned int order;
+ struct ttm_backup *backup = tt->backup;
pgoff_t i, nr;
for (i = start_page; i < end_page; i += nr, pages += nr) {
- struct ttm_pool_type *pt = NULL;
+ struct page *p = *pages;
- order = ttm_pool_page_order(pool, *pages);
- nr = (1UL << order);
- if (tt->dma_address)
- ttm_pool_unmap(pool, tt->dma_address[i], nr);
+ nr = 1;
+ if (ttm_backup_page_ptr_is_handle(p)) {
+ unsigned long handle = ttm_backup_page_ptr_to_handle(p);
- pt = ttm_pool_select_type(pool, caching, order);
- if (pt)
- ttm_pool_type_give(pt, *pages);
- else
- ttm_pool_free_page(pool, caching, order, *pages);
+ if (handle != 0)
+ ttm_backup_drop(backup, handle);
+ } else if (p) {
+ dma_addr_t *dma_addr = tt->dma_address ?
+ tt->dma_address + i : NULL;
+
+ nr = ttm_pool_unmap_and_free(pool, p, dma_addr, caching);
+ }
}
}
-/**
- * ttm_pool_alloc - Fill a ttm_tt object
- *
- * @pool: ttm_pool to use
- * @tt: ttm_tt object to fill
- * @ctx: operation context
- *
- * Fill the ttm_tt object with pages and also make sure to DMA map them when
- * necessary.
- *
- * Returns: 0 on successe, negative error code otherwise.
+static void ttm_pool_alloc_state_init(const struct ttm_tt *tt,
+ struct ttm_pool_alloc_state *alloc)
+{
+ alloc->pages = tt->pages;
+ alloc->caching_divide = tt->pages;
+ alloc->dma_addr = tt->dma_address;
+ alloc->remaining_pages = tt->num_pages;
+ alloc->tt_caching = tt->caching;
+}
+
+/*
+ * Find a suitable allocation order based on highest desired order
+ * and number of remaining pages
*/
-int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
- struct ttm_operation_ctx *ctx)
+static unsigned int ttm_pool_alloc_find_order(unsigned int highest,
+ const struct ttm_pool_alloc_state *alloc)
+{
+ return min_t(unsigned int, highest, __fls(alloc->remaining_pages));
+}
+
+static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx,
+ struct ttm_pool_alloc_state *alloc,
+ struct ttm_pool_tt_restore *restore)
{
- pgoff_t num_pages = tt->num_pages;
- dma_addr_t *dma_addr = tt->dma_address;
- struct page **caching = tt->pages;
- struct page **pages = tt->pages;
enum ttm_caching page_caching;
gfp_t gfp_flags = GFP_USER;
pgoff_t caching_divide;
unsigned int order;
+ bool allow_pools;
struct page *p;
int r;
- WARN_ON(!num_pages || ttm_tt_is_populated(tt));
- WARN_ON(dma_addr && !pool->dev);
+ WARN_ON(!alloc->remaining_pages || ttm_tt_is_populated(tt));
+ WARN_ON(alloc->dma_addr && !pool->dev);
if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
@@ -453,86 +723,155 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else
gfp_flags |= GFP_HIGHUSER;
- for (order = min_t(unsigned int, MAX_PAGE_ORDER, __fls(num_pages));
- num_pages;
- order = min_t(unsigned int, order, __fls(num_pages))) {
+ page_caching = tt->caching;
+ allow_pools = true;
+ for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, alloc);
+ alloc->remaining_pages;
+ order = ttm_pool_alloc_find_order(order, alloc)) {
struct ttm_pool_type *pt;
- page_caching = tt->caching;
- pt = ttm_pool_select_type(pool, tt->caching, order);
- p = pt ? ttm_pool_type_take(pt) : NULL;
- if (p) {
- r = ttm_pool_apply_caching(caching, pages,
- tt->caching);
- if (r)
- goto error_free_page;
-
- caching = pages;
- do {
- r = ttm_pool_page_allocated(pool, order, p,
- &dma_addr,
- &num_pages,
- &pages);
- if (r)
- goto error_free_page;
-
- caching = pages;
- if (num_pages < (1 << order))
- break;
-
- p = ttm_pool_type_take(pt);
- } while (p);
- }
-
- page_caching = ttm_cached;
- while (num_pages >= (1 << order) &&
- (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
-
- if (PageHighMem(p)) {
- r = ttm_pool_apply_caching(caching, pages,
- tt->caching);
- if (r)
- goto error_free_page;
- caching = pages;
- }
- r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
- &num_pages, &pages);
- if (r)
- goto error_free_page;
- if (PageHighMem(p))
- caching = pages;
+ /* First, try to allocate a page from a pool if one exists. */
+ p = NULL;
+ pt = ttm_pool_select_type(pool, page_caching, order);
+ if (pt && allow_pools)
+ p = ttm_pool_type_take(pt);
+ /*
+ * If that fails or previously failed, allocate from system.
+ * Note that this also disallows additional pool allocations using
+ * write-back cached pools of the same order. Consider removing
+ * that behaviour.
+ */
+ if (!p) {
+ page_caching = ttm_cached;
+ allow_pools = false;
+ p = ttm_pool_alloc_page(pool, gfp_flags, order);
}
-
+ /* If that fails, lower the order if possible and retry. */
if (!p) {
if (order) {
--order;
+ page_caching = tt->caching;
+ allow_pools = true;
continue;
}
r = -ENOMEM;
goto error_free_all;
}
+ r = ttm_pool_page_allocated(pool, order, p, page_caching, alloc,
+ restore);
+ if (r)
+ goto error_free_page;
+
+ if (ttm_pool_restore_valid(restore)) {
+ r = ttm_pool_restore_commit(restore, tt->backup, ctx, alloc);
+ if (r)
+ goto error_free_all;
+ }
}
- r = ttm_pool_apply_caching(caching, pages, tt->caching);
+ r = ttm_pool_apply_caching(alloc);
if (r)
goto error_free_all;
+ kfree(tt->restore);
+ tt->restore = NULL;
+
return 0;
error_free_page:
ttm_pool_free_page(pool, page_caching, order, p);
error_free_all:
- num_pages = tt->num_pages - num_pages;
- caching_divide = caching - tt->pages;
+ if (tt->restore)
+ return r;
+
+ caching_divide = alloc->caching_divide - tt->pages;
ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
- ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
+ ttm_pool_free_range(pool, tt, ttm_cached, caching_divide,
+ tt->num_pages - alloc->remaining_pages);
return r;
}
+
+/**
+ * ttm_pool_alloc - Fill a ttm_tt object
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ struct ttm_operation_ctx *ctx)
+{
+ struct ttm_pool_alloc_state alloc;
+
+ if (WARN_ON(ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ ttm_pool_alloc_state_init(tt, &alloc);
+
+ return __ttm_pool_alloc(pool, tt, ctx, &alloc, NULL);
+}
EXPORT_SYMBOL(ttm_pool_alloc);
/**
+ * ttm_pool_restore_and_alloc - Fill a ttm_tt, restoring previously backed-up
+ * content.
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary. Read in backed-up content.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx)
+{
+ struct ttm_pool_alloc_state alloc;
+
+ if (WARN_ON(!ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ if (!tt->restore) {
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
+
+ ttm_pool_alloc_state_init(tt, &alloc);
+ if (ctx->gfp_retry_mayfail)
+ gfp |= __GFP_RETRY_MAYFAIL;
+
+ tt->restore = kzalloc(sizeof(*tt->restore), gfp);
+ if (!tt->restore)
+ return -ENOMEM;
+
+ tt->restore->snapshot_alloc = alloc;
+ tt->restore->pool = pool;
+ tt->restore->restored_pages = 1;
+ } else {
+ struct ttm_pool_tt_restore *restore = tt->restore;
+ int ret;
+
+ alloc = restore->snapshot_alloc;
+ if (ttm_pool_restore_valid(tt->restore)) {
+ ret = ttm_pool_restore_commit(restore, tt->backup, ctx, &alloc);
+ if (ret)
+ return ret;
+ }
+ if (!alloc.remaining_pages)
+ return 0;
+ }
+
+ return __ttm_pool_alloc(pool, tt, ctx, &alloc, tt->restore);
+}
+
+/**
* ttm_pool_free - Free the backing pages from a ttm_tt object
*
* @pool: Pool to give pages back to.
@@ -550,6 +889,169 @@ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
EXPORT_SYMBOL(ttm_pool_free);
/**
+ * ttm_pool_drop_backed_up() - Release content of a swapped-out struct ttm_tt
+ * @tt: The struct ttm_tt.
+ *
+ * Release handles with associated content or any remaining pages of
+ * a backed-up struct ttm_tt.
+ */
+void ttm_pool_drop_backed_up(struct ttm_tt *tt)
+{
+ struct ttm_pool_tt_restore *restore;
+ pgoff_t start_page = 0;
+
+ WARN_ON(!ttm_tt_is_backed_up(tt));
+
+ restore = tt->restore;
+
+ /*
+ * Unmap and free any uncommitted restore page.
+ * any tt page-array backup entries already read back has
+ * been cleared already
+ */
+ if (ttm_pool_restore_valid(restore)) {
+ dma_addr_t *dma_addr = tt->dma_address ? &restore->first_dma : NULL;
+
+ ttm_pool_unmap_and_free(restore->pool, restore->alloced_page,
+ dma_addr, restore->page_caching);
+ restore->restored_pages = 1UL << restore->order;
+ }
+
+ /*
+ * If a restore is ongoing, part of the tt pages may have a
+ * caching different than writeback.
+ */
+ if (restore) {
+ pgoff_t mid = restore->snapshot_alloc.caching_divide - tt->pages;
+
+ start_page = restore->alloced_pages;
+ WARN_ON(mid > start_page);
+ /* Pages that might be dma-mapped and non-cached */
+ ttm_pool_free_range(restore->pool, tt, tt->caching,
+ 0, mid);
+ /* Pages that might be dma-mapped but cached */
+ ttm_pool_free_range(restore->pool, tt, ttm_cached,
+ mid, restore->alloced_pages);
+ kfree(restore);
+ tt->restore = NULL;
+ }
+
+ ttm_pool_free_range(NULL, tt, ttm_cached, start_page, tt->num_pages);
+}
+
+/**
+ * ttm_pool_backup() - Back up or purge a struct ttm_tt
+ * @pool: The pool used when allocating the struct ttm_tt.
+ * @tt: The struct ttm_tt.
+ * @flags: Flags to govern the backup behaviour.
+ *
+ * Back up or purge a struct ttm_tt. If @purge is true, then
+ * all pages will be freed directly to the system rather than to the pool
+ * they were allocated from, making the function behave similarly to
+ * ttm_pool_free(). If @purge is false the pages will be backed up instead,
+ * exchanged for handles.
+ * A subsequent call to ttm_pool_restore_and_alloc() will then read back the content and
+ * a subsequent call to ttm_pool_drop_backed_up() will drop it.
+ * If backup of a page fails for whatever reason, @ttm will still be
+ * partially backed up, retaining those pages for which backup fails.
+ * In that case, this function can be retried, possibly after freeing up
+ * memory resources.
+ *
+ * Return: Number of pages actually backed up or freed, or negative
+ * error code on error.
+ */
+long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_backup_flags *flags)
+{
+ struct ttm_backup *backup = tt->backup;
+ struct page *page;
+ unsigned long handle;
+ gfp_t alloc_gfp;
+ gfp_t gfp;
+ int ret = 0;
+ pgoff_t shrunken = 0;
+ pgoff_t i, num_pages;
+
+ if (WARN_ON(ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ if ((!ttm_backup_bytes_avail() && !flags->purge) ||
+ pool->use_dma_alloc || ttm_tt_is_backed_up(tt))
+ return -EBUSY;
+
+#ifdef CONFIG_X86
+ /* Anything returned to the system needs to be cached. */
+ if (tt->caching != ttm_cached)
+ set_pages_array_wb(tt->pages, tt->num_pages);
+#endif
+
+ if (tt->dma_address || flags->purge) {
+ for (i = 0; i < tt->num_pages; i += num_pages) {
+ unsigned int order;
+
+ page = tt->pages[i];
+ if (unlikely(!page)) {
+ num_pages = 1;
+ continue;
+ }
+
+ order = ttm_pool_page_order(pool, page);
+ num_pages = 1UL << order;
+ if (tt->dma_address)
+ ttm_pool_unmap(pool, tt->dma_address[i],
+ num_pages);
+ if (flags->purge) {
+ shrunken += num_pages;
+ page->private = 0;
+ __free_pages(page, order);
+ memset(tt->pages + i, 0,
+ num_pages * sizeof(*tt->pages));
+ }
+ }
+ }
+
+ if (flags->purge)
+ return shrunken;
+
+ if (pool->use_dma32)
+ gfp = GFP_DMA32;
+ else
+ gfp = GFP_HIGHUSER;
+
+ alloc_gfp = GFP_KERNEL | __GFP_HIGH | __GFP_NOWARN | __GFP_RETRY_MAYFAIL;
+
+ num_pages = tt->num_pages;
+
+ /* Pretend doing fault injection by shrinking only half of the pages. */
+ if (IS_ENABLED(CONFIG_FAULT_INJECTION) && should_fail(&backup_fault_inject, 1))
+ num_pages = DIV_ROUND_UP(num_pages, 2);
+
+ for (i = 0; i < num_pages; ++i) {
+ s64 shandle;
+
+ page = tt->pages[i];
+ if (unlikely(!page))
+ continue;
+
+ ttm_pool_split_for_swap(pool, page);
+
+ shandle = ttm_backup_backup_page(backup, page, flags->writeback, i,
+ gfp, alloc_gfp);
+ if (shandle < 0) {
+ /* We allow partially shrunken tts */
+ ret = shandle;
+ break;
+ }
+ handle = shandle;
+ tt->pages[i] = ttm_backup_handle_to_page_ptr(handle);
+ put_page(page);
+ shrunken++;
+ }
+
+ return shrunken ? shrunken : ret;
+}
+
+/**
* ttm_pool_init - Initialize a pool
*
* @pool: the pool to initialize
@@ -810,6 +1312,10 @@ int ttm_pool_mgr_init(unsigned long num_pages)
&ttm_pool_debugfs_globals_fops);
debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
&ttm_pool_debugfs_shrink_fops);
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("backup_fault_inject", ttm_debugfs_root,
+ &backup_fault_inject);
+#endif
#endif
mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index cc29bbf3eabb..7e5a60c55813 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -83,6 +83,23 @@ static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk)
}
/**
+ * ttm_resource_cursor_init() - Initialize a struct ttm_resource_cursor
+ * @cursor: The cursor to initialize.
+ * @man: The resource manager.
+ *
+ * Initialize the cursor before using it for iteration.
+ */
+void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor,
+ struct ttm_resource_manager *man)
+{
+ cursor->priority = 0;
+ cursor->man = man;
+ ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
+ INIT_LIST_HEAD(&cursor->bulk_link);
+ INIT_LIST_HEAD(&cursor->hitch.link);
+}
+
+/**
* ttm_resource_cursor_fini() - Finalize the LRU list cursor usage
* @cursor: The struct ttm_resource_cursor to finalize.
*
@@ -252,11 +269,16 @@ static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_
return ttm_tt_is_swapped(bo->ttm);
}
+static bool ttm_resource_unevictable(struct ttm_resource *res, struct ttm_buffer_object *bo)
+{
+ return bo->pin_count || ttm_resource_is_swapped(res, bo);
+}
+
/* Add the resource to a bulk move if the BO is configured for it */
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
- if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo))
+ if (bo->bulk_move && !ttm_resource_unevictable(res, bo))
ttm_lru_bulk_move_add(bo->bulk_move, res);
}
@@ -264,7 +286,7 @@ void ttm_resource_add_bulk_move(struct ttm_resource *res,
void ttm_resource_del_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
- if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo))
+ if (bo->bulk_move && !ttm_resource_unevictable(res, bo))
ttm_lru_bulk_move_del(bo->bulk_move, res);
}
@@ -276,10 +298,10 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
lockdep_assert_held(&bo->bdev->lru_lock);
- if (bo->pin_count || ttm_resource_is_swapped(res, bo)) {
+ if (ttm_resource_unevictable(res, bo)) {
list_move_tail(&res->lru.link, &bdev->unevictable);
- } else if (bo->bulk_move) {
+ } else if (bo->bulk_move) {
struct ttm_lru_bulk_move_pos *pos =
ttm_lru_bulk_move_pos(bo->bulk_move, res);
@@ -318,7 +340,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
- if (bo->pin_count || ttm_resource_is_swapped(res, bo))
+ if (ttm_resource_unevictable(res, bo))
list_add_tail(&res->lru.link, &bo->bdev->unevictable);
else
list_add_tail(&res->lru.link, &man->lru[bo->priority]);
@@ -612,7 +634,6 @@ ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
/**
* ttm_resource_manager_first() - Start iterating over the resources
* of a resource manager
- * @man: resource manager to iterate over
* @cursor: cursor to record the position
*
* Initializes the cursor and starts iterating. When done iterating,
@@ -621,17 +642,16 @@ ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
* Return: The first resource from the resource manager.
*/
struct ttm_resource *
-ttm_resource_manager_first(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor)
+ttm_resource_manager_first(struct ttm_resource_cursor *cursor)
{
- lockdep_assert_held(&man->bdev->lru_lock);
+ struct ttm_resource_manager *man = cursor->man;
- cursor->priority = 0;
- cursor->man = man;
- ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
- INIT_LIST_HEAD(&cursor->bulk_link);
- list_add(&cursor->hitch.link, &man->lru[cursor->priority]);
+ if (WARN_ON_ONCE(!man))
+ return NULL;
+ lockdep_assert_held(&man->bdev->lru_lock);
+
+ list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
return ttm_resource_manager_next(cursor);
}
@@ -667,8 +687,6 @@ ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
ttm_resource_cursor_clear_bulk(cursor);
}
- ttm_resource_cursor_fini(cursor);
-
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 3baf215eca23..df0aa6c4b8b8 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -40,6 +40,7 @@
#include <drm/drm_cache.h>
#include <drm/drm_device.h>
#include <drm/drm_util.h>
+#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h>
@@ -158,6 +159,8 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
ttm->swap_storage = NULL;
ttm->sg = bo->sg;
ttm->caching = caching;
+ ttm->restore = NULL;
+ ttm->backup = NULL;
}
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
@@ -182,6 +185,13 @@ void ttm_tt_fini(struct ttm_tt *ttm)
fput(ttm->swap_storage);
ttm->swap_storage = NULL;
+ if (ttm_tt_is_backed_up(ttm))
+ ttm_pool_drop_backed_up(ttm);
+ if (ttm->backup) {
+ ttm_backup_fini(ttm->backup);
+ ttm->backup = NULL;
+ }
+
if (ttm->pages)
kvfree(ttm->pages);
else
@@ -254,6 +264,49 @@ out_err:
EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin);
/**
+ * ttm_tt_backup() - Helper to back up a struct ttm_tt.
+ * @bdev: The TTM device.
+ * @tt: The struct ttm_tt.
+ * @flags: Flags that govern the backup behaviour.
+ *
+ * Update the page accounting and call ttm_pool_shrink_tt to free pages
+ * or back them up.
+ *
+ * Return: Number of pages freed or swapped out, or negative error code on
+ * error.
+ */
+long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_backup_flags flags)
+{
+ long ret;
+
+ if (WARN_ON(IS_ERR_OR_NULL(tt->backup)))
+ return 0;
+
+ ret = ttm_pool_backup(&bdev->pool, tt, &flags);
+ if (ret > 0) {
+ tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
+ tt->page_flags |= TTM_TT_FLAG_BACKED_UP;
+ }
+
+ return ret;
+}
+
+int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx)
+{
+ int ret = ttm_pool_restore_and_alloc(&bdev->pool, tt, ctx);
+
+ if (ret)
+ return ret;
+
+ tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_restore);
+
+/**
* ttm_tt_swapout - swap out tt object
*
* @bdev: TTM device structure.
@@ -348,6 +401,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
goto error;
ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
+ ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
@@ -477,3 +531,32 @@ unsigned long ttm_tt_pages_limit(void)
return ttm_pages_limit;
}
EXPORT_SYMBOL(ttm_tt_pages_limit);
+
+/**
+ * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt
+ * @tt: The ttm_tt for wich to allocate and assign a backup structure.
+ *
+ * Assign a backup structure to be used for tt backup. This should
+ * typically be done at bo creation, to avoid allocations at shrinking
+ * time.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ttm_tt_setup_backup(struct ttm_tt *tt)
+{
+ struct ttm_backup *backup =
+ ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
+
+ if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))
+ return -EINVAL;
+
+ if (IS_ERR(backup))
+ return PTR_ERR(backup);
+
+ if (tt->backup)
+ ttm_backup_fini(tt->backup);
+
+ tt->backup = backup;
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_setup_backup);
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 930737a9347b..852015214e97 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -295,11 +295,21 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
return ret;
+ v3d->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(v3d->clk))
+ return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n");
+
+ ret = clk_prepare_enable(v3d->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable the V3D clock\n");
+ return ret;
+ }
+
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
ret = dma_set_mask_and_coherent(dev, mask);
if (ret)
- return ret;
+ goto clk_disable;
v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
@@ -319,28 +329,29 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ret = PTR_ERR(v3d->reset);
if (ret == -EPROBE_DEFER)
- return ret;
+ goto clk_disable;
v3d->reset = NULL;
ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
if (ret) {
dev_err(dev,
"Failed to get reset control or bridge regs\n");
- return ret;
+ goto clk_disable;
}
}
if (v3d->ver < 41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
- return ret;
+ goto clk_disable;
}
v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!v3d->mmu_scratch) {
dev_err(dev, "Failed to allocate MMU scratch page\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto clk_disable;
}
ret = v3d_gem_init(drm);
@@ -369,6 +380,8 @@ gem_destroy:
v3d_gem_destroy(drm);
dma_free:
dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
+clk_disable:
+ clk_disable_unprepare(v3d->clk);
return ret;
}
@@ -386,6 +399,8 @@ static void v3d_platform_drm_remove(struct platform_device *pdev)
dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
v3d->mmu_scratch_paddr);
+
+ clk_disable_unprepare(v3d->clk);
}
static struct platform_driver v3d_platform_driver = {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index dc1cfe2e14be..9deaefa0f95b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -147,7 +147,6 @@ struct v3d_dev {
struct v3d_render_job *render_job;
struct v3d_tfu_job *tfu_job;
struct v3d_csd_job *csd_job;
- struct v3d_cpu_job *cpu_job;
struct v3d_queue_state queue[V3D_MAX_QUEUES];
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index da08ddb01d21..34c42d6e12cd 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -226,8 +226,12 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
unsigned long irqflags;
- if (unlikely(job->base.base.s_fence->finished.error))
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ spin_lock_irqsave(&v3d->job_lock, irqflags);
+ v3d->bin_job = NULL;
+ spin_unlock_irqrestore(&v3d->job_lock, irqflags);
return NULL;
+ }
/* Lock required around bin_job update vs
* v3d_overflow_mem_work().
@@ -281,8 +285,10 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
- if (unlikely(job->base.base.s_fence->finished.error))
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->render_job = NULL;
return NULL;
+ }
v3d->render_job = job;
@@ -327,11 +333,17 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->tfu_job = NULL;
+ return NULL;
+ }
+
+ v3d->tfu_job = job;
+
fence = v3d_fence_create(v3d, V3D_TFU);
if (IS_ERR(fence))
return NULL;
- v3d->tfu_job = job;
if (job->base.irq_fence)
dma_fence_put(job->base.irq_fence);
job->base.irq_fence = dma_fence_get(fence);
@@ -369,6 +381,11 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
int i, csd_cfg0_reg;
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->csd_job = NULL;
+ return NULL;
+ }
+
v3d->csd_job = job;
v3d_invalidate_caches(v3d);
@@ -656,8 +673,6 @@ v3d_cpu_job_run(struct drm_sched_job *sched_job)
struct v3d_cpu_job *job = to_cpu_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- v3d->cpu_job = job;
-
if (job->job_type >= ARRAY_SIZE(cpu_job_function)) {
DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type);
return NULL;
@@ -822,67 +837,54 @@ static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
.free_job = v3d_cpu_job_free
};
+static int
+v3d_queue_sched_init(struct v3d_dev *v3d, const struct drm_sched_backend_ops *ops,
+ enum v3d_queue queue, const char *name)
+{
+ struct drm_sched_init_args args = {
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 1,
+ .timeout = msecs_to_jiffies(500),
+ .dev = v3d->drm.dev,
+ };
+
+ args.ops = ops;
+ args.name = name;
+
+ return drm_sched_init(&v3d->queue[queue].sched, &args);
+}
+
int
v3d_sched_init(struct v3d_dev *v3d)
{
- int hw_jobs_limit = 1;
- int job_hang_limit = 0;
- int hang_limit_ms = 500;
int ret;
- ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
- &v3d_bin_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- hw_jobs_limit, job_hang_limit,
- msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_bin", v3d->drm.dev);
+ ret = v3d_queue_sched_init(v3d, &v3d_bin_sched_ops, V3D_BIN, "v3d_bin");
if (ret)
return ret;
- ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
- &v3d_render_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- hw_jobs_limit, job_hang_limit,
- msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_render", v3d->drm.dev);
+ ret = v3d_queue_sched_init(v3d, &v3d_render_sched_ops, V3D_RENDER,
+ "v3d_render");
if (ret)
goto fail;
- ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
- &v3d_tfu_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- hw_jobs_limit, job_hang_limit,
- msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_tfu", v3d->drm.dev);
+ ret = v3d_queue_sched_init(v3d, &v3d_tfu_sched_ops, V3D_TFU, "v3d_tfu");
if (ret)
goto fail;
if (v3d_has_csd(v3d)) {
- ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
- &v3d_csd_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- hw_jobs_limit, job_hang_limit,
- msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_csd", v3d->drm.dev);
+ ret = v3d_queue_sched_init(v3d, &v3d_csd_sched_ops, V3D_CSD,
+ "v3d_csd");
if (ret)
goto fail;
- ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
- &v3d_cache_clean_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- hw_jobs_limit, job_hang_limit,
- msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_cache_clean", v3d->drm.dev);
+ ret = v3d_queue_sched_init(v3d, &v3d_cache_clean_sched_ops,
+ V3D_CACHE_CLEAN, "v3d_cache_clean");
if (ret)
goto fail;
}
- ret = drm_sched_init(&v3d->queue[V3D_CPU].sched,
- &v3d_cpu_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- 1, job_hang_limit,
- msecs_to_jiffies(hang_limit_ms), NULL,
- NULL, "v3d_cpu", v3d->drm.dev);
+ ret = v3d_queue_sched_init(v3d, &v3d_cpu_sched_ops, V3D_CPU, "v3d_cpu");
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
index 87dccaecc3e5..db994aeaa0f9 100644
--- a/drivers/gpu/drm/vboxvideo/hgsmi_base.c
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
@@ -181,40 +181,3 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
return rc;
}
-
-/**
- * hgsmi_cursor_position - Report the guest cursor position. The host may
- * wish to use this information to re-position its
- * own cursor (though this is currently unlikely).
- * The current host cursor position is returned.
- * Return: 0 or negative errno value.
- * @ctx: The context containing the heap used.
- * @report_position: Are we reporting a position?
- * @x: Guest cursor X position.
- * @y: Guest cursor Y position.
- * @x_host: Host cursor X position is stored here. Optional.
- * @y_host: Host cursor Y position is stored here. Optional.
- */
-int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
- u32 x, u32 y, u32 *x_host, u32 *y_host)
-{
- struct vbva_cursor_position *p;
-
- p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
- VBVA_CURSOR_POSITION);
- if (!p)
- return -ENOMEM;
-
- p->report_position = report_position;
- p->x = x;
- p->y = y;
-
- hgsmi_buffer_submit(ctx, p);
-
- *x_host = p->x;
- *y_host = p->y;
-
- hgsmi_buffer_free(ctx, p);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
index 55fcee3a6470..643c4448bdcb 100644
--- a/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
@@ -34,8 +34,6 @@ int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret);
int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
u32 hot_x, u32 hot_y, u32 width, u32 height,
u8 *pixels, u32 len);
-int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
- u32 x, u32 y, u32 *x_host, u32 *y_host);
bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
struct vbva_buffer *vbva, s32 screen);
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 6cc7b7e6294a..123ab0ce1781 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -14,6 +14,7 @@ config DRM_VC4
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
select DRM_DISPLAY_HELPER
+ select DRM_EXEC
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
index 40a05869a50e..992e8f5c5c6e 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
@@ -724,7 +724,7 @@ static void drm_vc4_test_pv_muxing_invalid(struct kunit *test)
static int vc4_pv_muxing_test_init(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct pv_muxing_priv *priv;
struct drm_device *drm;
struct vc4_dev *vc4;
@@ -737,13 +737,15 @@ static int vc4_pv_muxing_test_init(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
priv->vc4 = vc4;
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
- priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
return 0;
}
@@ -782,7 +784,7 @@ static struct kunit_suite vc5_pv_muxing_test_suite = {
*/
static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *test)
{
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
struct vc4_crtc_state *new_vc4_crtc_state;
struct vc4_hvs_state *new_hvs_state;
@@ -795,11 +797,10 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
vc4 = vc5_mock_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
@@ -822,7 +823,7 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
@@ -843,6 +844,9 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi1_channel].in_use);
KUNIT_EXPECT_NE(test, hdmi0_channel, hdmi1_channel);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -854,7 +858,7 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
*/
static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
{
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
struct vc4_crtc_state *new_vc4_crtc_state;
struct vc4_hvs_state *new_hvs_state;
@@ -867,11 +871,10 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
vc4 = vc5_mock_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
@@ -905,7 +908,7 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0);
@@ -929,6 +932,9 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
KUNIT_EXPECT_EQ(test, old_hdmi1_channel, hdmi1_channel);
}
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
/*
@@ -949,7 +955,7 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
static void
drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct kunit *test)
{
- struct drm_modeset_acquire_ctx *ctx;
+ struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
struct vc4_crtc_state *new_vc4_crtc_state;
struct drm_device *drm;
@@ -959,11 +965,10 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
vc4 = vc5_mock_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
@@ -975,7 +980,7 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
@@ -987,6 +992,9 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
VC4_ENCODER_TYPE_HDMI0);
KUNIT_EXPECT_NULL(test, new_vc4_crtc_state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static struct kunit_case vc5_pv_muxing_bugs_tests[] = {
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index cf40a53ad42e..2a48038abe7a 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -884,11 +884,7 @@ struct vc4_async_flip_state {
struct drm_framebuffer *fb;
struct drm_framebuffer *old_fb;
struct drm_pending_vblank_event *event;
-
- union {
- struct dma_fence_cb fence;
- struct vc4_seqno_cb seqno;
- } cb;
+ struct dma_fence_cb cb;
};
/* Called when the V3D execution for the BO being flipped to is done, so that
@@ -919,10 +915,11 @@ vc4_async_page_flip_complete(struct vc4_async_flip_state *flip_state)
kfree(flip_state);
}
-static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
+static void vc4_async_page_flip_complete_with_cleanup(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
{
struct vc4_async_flip_state *flip_state =
- container_of(cb, struct vc4_async_flip_state, cb.seqno);
+ container_of(cb, struct vc4_async_flip_state, cb);
struct vc4_bo *bo = NULL;
if (flip_state->old_fb) {
@@ -932,6 +929,7 @@ static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
}
vc4_async_page_flip_complete(flip_state);
+ dma_fence_put(fence);
/*
* Decrement the BO usecnt in order to keep the inc/dec
@@ -950,7 +948,7 @@ static void vc4_async_page_flip_fence_complete(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
struct vc4_async_flip_state *flip_state =
- container_of(cb, struct vc4_async_flip_state, cb.fence);
+ container_of(cb, struct vc4_async_flip_state, cb);
vc4_async_page_flip_complete(flip_state);
dma_fence_put(fence);
@@ -961,16 +959,15 @@ static int vc4_async_set_fence_cb(struct drm_device *dev,
{
struct drm_framebuffer *fb = flip_state->fb;
struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
+ dma_fence_func_t async_page_flip_complete_function;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct dma_fence *fence;
int ret;
- if (vc4->gen == VC4_GEN_4) {
- struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
-
- return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
- vc4_async_page_flip_seqno_complete);
- }
+ if (vc4->gen == VC4_GEN_4)
+ async_page_flip_complete_function = vc4_async_page_flip_complete_with_cleanup;
+ else
+ async_page_flip_complete_function = vc4_async_page_flip_fence_complete;
ret = dma_resv_get_singleton(dma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
if (ret)
@@ -978,14 +975,14 @@ static int vc4_async_set_fence_cb(struct drm_device *dev,
/* If there's no fence, complete the page flip immediately */
if (!fence) {
- vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
+ async_page_flip_complete_function(fence, &flip_state->cb);
return 0;
}
/* If the fence has already been completed, complete the page flip */
- if (dma_fence_add_callback(fence, &flip_state->cb.fence,
- vc4_async_page_flip_fence_complete))
- vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
+ if (dma_fence_add_callback(fence, &flip_state->cb,
+ async_page_flip_complete_function))
+ async_page_flip_complete_function(fence, &flip_state->cb);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 4a078ffd9f82..221d8e01d539 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -186,11 +186,6 @@ struct vc4_dev {
*/
struct vc4_perfmon *active_perfmon;
- /* List of struct vc4_seqno_cb for callbacks to be made from a
- * workqueue when the given seqno is passed.
- */
- struct list_head seqno_cb_list;
-
/* The memory used for storing binner tile alloc, tile state,
* and overflow memory allocations. This is freed when V3D
* powers down.
@@ -247,16 +242,6 @@ struct vc4_dev {
struct vc4_bo {
struct drm_gem_dma_object base;
- /* seqno of the last job to render using this BO. */
- uint64_t seqno;
-
- /* seqno of the last job to use the RCL to write to this BO.
- *
- * Note that this doesn't include binner overflow memory
- * writes.
- */
- uint64_t write_seqno;
-
bool t_format;
/* List entry for the BO's position in either
@@ -304,12 +289,6 @@ struct vc4_fence {
#define to_vc4_fence(_fence) \
container_of_const(_fence, struct vc4_fence, base)
-struct vc4_seqno_cb {
- struct work_struct work;
- uint64_t seqno;
- void (*func)(struct vc4_seqno_cb *cb);
-};
-
struct vc4_v3d {
struct vc4_dev *vc4;
struct platform_device *pdev;
@@ -695,9 +674,6 @@ struct vc4_exec_info {
/* Sequence number for this bin/render job. */
uint64_t seqno;
- /* Latest write_seqno of any BO that binning depends on. */
- uint64_t bin_dep_seqno;
-
struct dma_fence *fence;
/* Last current addresses the hardware was processing when the
@@ -1025,9 +1001,6 @@ void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
uint64_t timeout_ns, bool interruptible);
void vc4_job_handle_completed(struct vc4_dev *vc4);
-int vc4_queue_seqno_cb(struct drm_device *dev,
- struct vc4_seqno_cb *cb, uint64_t seqno,
- void (*func)(struct vc4_seqno_cb *cb));
int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 5eb293bdb363..779b22efe27b 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -800,7 +800,7 @@ dsi_esc_timing(u32 ns)
}
static void vc4_dsi_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *state)
+ struct drm_atomic_state *state)
{
struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
u32 disp0_ctrl;
@@ -811,7 +811,7 @@ static void vc4_dsi_bridge_disable(struct drm_bridge *bridge,
}
static void vc4_dsi_bridge_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *state)
+ struct drm_atomic_state *state)
{
struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
struct device *dev = &dsi->pdev->dev;
@@ -873,9 +873,8 @@ static bool vc4_dsi_bridge_mode_fixup(struct drm_bridge *bridge,
}
static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = old_state->base.state;
struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
const struct drm_crtc_state *crtc_state;
struct device *dev = &dsi->pdev->dev;
@@ -1143,7 +1142,7 @@ static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge,
}
static void vc4_dsi_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_state)
+ struct drm_atomic_state *state)
{
struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
bool debug_dump_regs = false;
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 22bccd69eb62..8125f87edc60 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -29,6 +29,7 @@
#include <linux/sched/signal.h>
#include <linux/dma-fence-array.h>
+#include <drm/drm_exec.h>
#include <drm/drm_syncobj.h>
#include "uapi/drm/vc4_drm.h"
@@ -552,45 +553,24 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
}
static void
-vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
+vc4_attach_fences(struct vc4_exec_info *exec)
{
struct vc4_bo *bo;
unsigned i;
for (i = 0; i < exec->bo_count; i++) {
bo = to_vc4_bo(exec->bo[i]);
- bo->seqno = seqno;
-
dma_resv_add_fence(bo->base.base.resv, exec->fence,
DMA_RESV_USAGE_READ);
}
- list_for_each_entry(bo, &exec->unref_list, unref_head) {
- bo->seqno = seqno;
- }
-
for (i = 0; i < exec->rcl_write_bo_count; i++) {
bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
- bo->write_seqno = seqno;
-
dma_resv_add_fence(bo->base.base.resv, exec->fence,
DMA_RESV_USAGE_WRITE);
}
}
-static void
-vc4_unlock_bo_reservations(struct drm_device *dev,
- struct vc4_exec_info *exec,
- struct ww_acquire_ctx *acquire_ctx)
-{
- int i;
-
- for (i = 0; i < exec->bo_count; i++)
- dma_resv_unlock(exec->bo[i]->resv);
-
- ww_acquire_fini(acquire_ctx);
-}
-
/* Takes the reservation lock on all the BOs being referenced, so that
* at queue submit time we can update the reservations.
*
@@ -599,70 +579,23 @@ vc4_unlock_bo_reservations(struct drm_device *dev,
* to vc4, so we don't attach dma-buf fences to them.
*/
static int
-vc4_lock_bo_reservations(struct drm_device *dev,
- struct vc4_exec_info *exec,
- struct ww_acquire_ctx *acquire_ctx)
+vc4_lock_bo_reservations(struct vc4_exec_info *exec,
+ struct drm_exec *exec_ctx)
{
- int contended_lock = -1;
- int i, ret;
- struct drm_gem_object *bo;
-
- ww_acquire_init(acquire_ctx, &reservation_ww_class);
-
-retry:
- if (contended_lock != -1) {
- bo = exec->bo[contended_lock];
- ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
- if (ret) {
- ww_acquire_done(acquire_ctx);
- return ret;
- }
- }
-
- for (i = 0; i < exec->bo_count; i++) {
- if (i == contended_lock)
- continue;
-
- bo = exec->bo[i];
-
- ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
- if (ret) {
- int j;
-
- for (j = 0; j < i; j++) {
- bo = exec->bo[j];
- dma_resv_unlock(bo->resv);
- }
-
- if (contended_lock != -1 && contended_lock >= i) {
- bo = exec->bo[contended_lock];
-
- dma_resv_unlock(bo->resv);
- }
-
- if (ret == -EDEADLK) {
- contended_lock = i;
- goto retry;
- }
-
- ww_acquire_done(acquire_ctx);
- return ret;
- }
- }
-
- ww_acquire_done(acquire_ctx);
+ int ret;
/* Reserve space for our shared (read-only) fence references,
* before we commit the CL to the hardware.
*/
- for (i = 0; i < exec->bo_count; i++) {
- bo = exec->bo[i];
+ drm_exec_init(exec_ctx, DRM_EXEC_INTERRUPTIBLE_WAIT, exec->bo_count);
+ drm_exec_until_all_locked(exec_ctx) {
+ ret = drm_exec_prepare_array(exec_ctx, exec->bo,
+ exec->bo_count, 1);
+ }
- ret = dma_resv_reserve_fences(bo->resv, 1);
- if (ret) {
- vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
- return ret;
- }
+ if (ret) {
+ drm_exec_fini(exec_ctx);
+ return ret;
}
return 0;
@@ -679,7 +612,7 @@ retry:
*/
static int
vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
- struct ww_acquire_ctx *acquire_ctx,
+ struct drm_exec *exec_ctx,
struct drm_syncobj *out_sync)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -706,9 +639,9 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
if (out_sync)
drm_syncobj_replace_fence(out_sync, exec->fence);
- vc4_update_bo_seqnos(exec, seqno);
+ vc4_attach_fences(exec);
- vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
+ drm_exec_fini(exec_ctx);
list_add_tail(&exec->head, &vc4->bin_job_list);
@@ -904,12 +837,6 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
goto fail;
}
- /* Block waiting on any previous rendering into the CS's VBO,
- * IB, or textures, so that pixels are actually written by the
- * time we try to read them.
- */
- ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
-
fail:
kvfree(temp);
return ret;
@@ -968,7 +895,6 @@ void
vc4_job_handle_completed(struct vc4_dev *vc4)
{
unsigned long irqflags;
- struct vc4_seqno_cb *cb, *cb_temp;
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
@@ -985,46 +911,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
spin_lock_irqsave(&vc4->job_lock, irqflags);
}
- list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
- if (cb->seqno <= vc4->finished_seqno) {
- list_del_init(&cb->work.entry);
- schedule_work(&cb->work);
- }
- }
-
- spin_unlock_irqrestore(&vc4->job_lock, irqflags);
-}
-
-static void vc4_seqno_cb_work(struct work_struct *work)
-{
- struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
-
- cb->func(cb);
-}
-
-int vc4_queue_seqno_cb(struct drm_device *dev,
- struct vc4_seqno_cb *cb, uint64_t seqno,
- void (*func)(struct vc4_seqno_cb *cb))
-{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- unsigned long irqflags;
-
- if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
- return -ENODEV;
-
- cb->func = func;
- INIT_WORK(&cb->work, vc4_seqno_cb_work);
-
- spin_lock_irqsave(&vc4->job_lock, irqflags);
- if (seqno > vc4->finished_seqno) {
- cb->seqno = seqno;
- list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
- } else {
- schedule_work(&cb->work);
- }
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
-
- return 0;
}
/* Scheduled when any job has been completed, this walks the list of
@@ -1079,8 +966,10 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
struct drm_vc4_wait_bo *args = data;
- struct drm_gem_object *gem_obj;
- struct vc4_bo *bo;
+ unsigned long timeout_jiffies =
+ usecs_to_jiffies(div_u64(args->timeout_ns, 1000));
+ ktime_t start = ktime_get();
+ u64 delta_ns;
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
@@ -1088,17 +977,18 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
- gem_obj = drm_gem_object_lookup(file_priv, args->handle);
- if (!gem_obj) {
- DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
- return -EINVAL;
- }
- bo = to_vc4_bo(gem_obj);
+ ret = drm_gem_dma_resv_wait(file_priv, args->handle,
+ true, timeout_jiffies);
- ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
- &args->timeout_ns);
+ /* Decrement the user's timeout, in case we got interrupted
+ * such that the ioctl will be restarted.
+ */
+ delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
+ if (delta_ns < args->timeout_ns)
+ args->timeout_ns -= delta_ns;
+ else
+ args->timeout_ns = 0;
- drm_gem_object_put(gem_obj);
return ret;
}
@@ -1123,7 +1013,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_vc4_submit_cl *args = data;
struct drm_syncobj *out_sync = NULL;
struct vc4_exec_info *exec;
- struct ww_acquire_ctx acquire_ctx;
+ struct drm_exec exec_ctx;
struct dma_fence *in_fence;
int ret = 0;
@@ -1216,7 +1106,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
if (ret)
goto fail;
- ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
+ ret = vc4_lock_bo_reservations(exec, &exec_ctx);
if (ret)
goto fail;
@@ -1224,7 +1114,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
out_sync = drm_syncobj_find(file_priv, args->out_sync);
if (!out_sync) {
ret = -EINVAL;
- goto fail;
+ goto fail_unreserve;
}
/* We replace the fence in out_sync in vc4_queue_submit since
@@ -1239,7 +1129,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
*/
exec->args = NULL;
- ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
+ ret = vc4_queue_submit(dev, exec, &exec_ctx, out_sync);
/* The syncobj isn't part of the exec data and we need to free our
* reference even if job submission failed.
@@ -1248,13 +1138,15 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
drm_syncobj_put(out_sync);
if (ret)
- goto fail;
+ goto fail_unreserve;
/* Return the seqno for our job. */
args->seqno = vc4->emit_seqno;
return 0;
+fail_unreserve:
+ drm_exec_fini(&exec_ctx);
fail:
vc4_complete_exec(&vc4->base, exec);
@@ -1275,7 +1167,6 @@ int vc4_gem_init(struct drm_device *dev)
INIT_LIST_HEAD(&vc4->bin_job_list);
INIT_LIST_HEAD(&vc4->render_job_list);
INIT_LIST_HEAD(&vc4->job_done_list);
- INIT_LIST_HEAD(&vc4->seqno_cb_list);
spin_lock_init(&vc4->job_lock);
INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 47d9ada98430..37238a12baa5 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -270,34 +270,6 @@ static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {}
#endif
-static int reset_pipe(struct drm_crtc *crtc,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_atomic_state *state;
- struct drm_crtc_state *crtc_state;
- int ret;
-
- state = drm_atomic_state_alloc(crtc->dev);
- if (!state)
- return -ENOMEM;
-
- state->acquire_ctx = ctx;
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- goto out;
- }
-
- crtc_state->connectors_changed = true;
-
- ret = drm_atomic_commit(state);
-out:
- drm_atomic_state_put(state);
-
- return ret;
-}
-
static int vc4_hdmi_reset_link(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -376,7 +348,7 @@ static int vc4_hdmi_reset_link(struct drm_connector *connector,
* would be perfectly happy if were to just reconfigure
* the SCDC settings on the fly.
*/
- return reset_pipe(crtc, ctx);
+ return drm_atomic_helper_reset_crtc(crtc, ctx);
}
static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
@@ -2954,15 +2926,16 @@ static int vc5_hdmi_init_resources(struct drm_device *drm,
struct resource *res;
int ret;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi");
- if (!res)
- return -ENODEV;
-
- vc4_hdmi->hdmicore_regs = devm_ioremap(dev, res->start,
- resource_size(res));
- if (!vc4_hdmi->hdmicore_regs)
- return -ENOMEM;
+ vc4_hdmi->hdmicore_regs = devm_platform_ioremap_resource_byname(pdev,
+ "hdmi");
+ if (IS_ERR(vc4_hdmi->hdmicore_regs))
+ return PTR_ERR(vc4_hdmi->hdmicore_regs);
+ /* This is shared between both HDMI controllers. Cannot
+ * claim for both instances. Lets not convert to using
+ * devm_platform_ioremap_resource_byname() like
+ * the rest
+ */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hd");
if (!res)
return -ENODEV;
@@ -2971,53 +2944,35 @@ static int vc5_hdmi_init_resources(struct drm_device *drm,
if (!vc4_hdmi->hd_regs)
return -ENOMEM;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cec");
- if (!res)
- return -ENODEV;
-
- vc4_hdmi->cec_regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vc4_hdmi->cec_regs)
- return -ENOMEM;
+ vc4_hdmi->cec_regs = devm_platform_ioremap_resource_byname(pdev,
+ "cec");
+ if (IS_ERR(vc4_hdmi->cec_regs))
+ return PTR_ERR(vc4_hdmi->cec_regs);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csc");
- if (!res)
- return -ENODEV;
+ vc4_hdmi->csc_regs = devm_platform_ioremap_resource_byname(pdev,
+ "csc");
+ if (IS_ERR(vc4_hdmi->csc_regs))
+ return PTR_ERR(vc4_hdmi->csc_regs);
- vc4_hdmi->csc_regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vc4_hdmi->csc_regs)
- return -ENOMEM;
+ vc4_hdmi->dvp_regs = devm_platform_ioremap_resource_byname(pdev,
+ "dvp");
+ if (IS_ERR(vc4_hdmi->dvp_regs))
+ return PTR_ERR(vc4_hdmi->dvp_regs);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvp");
- if (!res)
- return -ENODEV;
+ vc4_hdmi->phy_regs = devm_platform_ioremap_resource_byname(pdev,
+ "phy");
- vc4_hdmi->dvp_regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vc4_hdmi->dvp_regs)
- return -ENOMEM;
+ if (IS_ERR(vc4_hdmi->phy_regs))
+ return PTR_ERR(vc4_hdmi->phy_regs);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- if (!res)
- return -ENODEV;
+ vc4_hdmi->ram_regs = devm_platform_ioremap_resource_byname(pdev,
+ "packet");
+ if (IS_ERR(vc4_hdmi->ram_regs))
+ return PTR_ERR(vc4_hdmi->ram_regs);
- vc4_hdmi->phy_regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vc4_hdmi->phy_regs)
- return -ENOMEM;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "packet");
- if (!res)
- return -ENODEV;
-
- vc4_hdmi->ram_regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vc4_hdmi->ram_regs)
- return -ENOMEM;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rm");
- if (!res)
- return -ENODEV;
-
- vc4_hdmi->rm_regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vc4_hdmi->rm_regs)
- return -ENOMEM;
+ vc4_hdmi->rm_regs = devm_platform_ioremap_resource_byname(pdev, "rm");
+ if (IS_ERR(vc4_hdmi->rm_regs))
+ return PTR_ERR(vc4_hdmi->rm_regs);
vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi");
if (IS_ERR(vc4_hdmi->hsm_clock)) {
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index d608860d525f..c5e84d3494d2 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -2338,7 +2338,7 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
}
static int vc4_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state, bool flip)
{
struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 4eab069cda75..42acac05fe47 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -196,7 +196,7 @@ static int vc4_txp_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
vc4_txp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 5bf134968ade..1e7bdda55698 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -284,9 +284,6 @@ validate_indexed_prim_list(VALIDATE_ARGS)
if (!ib)
return -EINVAL;
- exec->bin_dep_seqno = max(exec->bin_dep_seqno,
- to_vc4_bo(&ib->base)->write_seqno);
-
if (offset > ib->base.size ||
(ib->base.size - offset) / index_size < length) {
DRM_DEBUG("IB access overflow (%d + %d*%d > %zd)\n",
@@ -738,11 +735,6 @@ reloc_tex(struct vc4_exec_info *exec,
*validated_p0 = tex->dma_addr + p0;
- if (is_cs) {
- exec->bin_dep_seqno = max(exec->bin_dep_seqno,
- to_vc4_bo(&tex->base)->write_seqno);
- }
-
return true;
fail:
DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0);
@@ -904,9 +896,6 @@ validate_gl_shader_rec(struct drm_device *dev,
uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
uint32_t max_index;
- exec->bin_dep_seqno = max(exec->bin_dep_seqno,
- to_vc4_bo(&vbo->base)->write_seqno);
-
if (state->addr & 0x8)
stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 64baf2f22d9f..59a45e74a641 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -189,7 +189,7 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct virtio_gpu_output *output =
drm_connector_to_virtio_gpu_output(connector);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 6a67c6297d58..2d88e390feb4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/poll.h>
+#include <linux/vgaarb.h>
#include <linux/wait.h>
#include <drm/clients/drm_client_setup.h>
@@ -41,6 +42,8 @@
#include "virtgpu_drv.h"
+#define PCI_DEVICE_ID_VIRTIO_GPU 0x1050
+
static const struct drm_driver driver;
static int virtio_gpu_modeset = -1;
@@ -162,7 +165,43 @@ static struct virtio_driver virtio_gpu_driver = {
.config_changed = virtio_gpu_config_changed
};
-module_virtio_driver(virtio_gpu_driver);
+static int __init virtio_gpu_driver_init(void)
+{
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_REDHAT_QUMRANET,
+ PCI_DEVICE_ID_VIRTIO_GPU,
+ NULL);
+ if (pdev && pci_is_vga(pdev)) {
+ ret = vga_get_interruptible(pdev,
+ VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
+ if (ret) {
+ pci_dev_put(pdev);
+ return ret;
+ }
+ }
+
+ ret = register_virtio_driver(&virtio_gpu_driver);
+
+ if (pdev) {
+ if (pci_is_vga(pdev))
+ vga_put(pdev,
+ VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
+
+ pci_dev_put(pdev);
+ }
+
+ return ret;
+}
+
+static void __exit virtio_gpu_driver_exit(void)
+{
+ unregister_virtio_driver(&virtio_gpu_driver);
+}
+
+module_init(virtio_gpu_driver_init);
+module_exit(virtio_gpu_driver_exit);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio GPU driver");
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index f42ca9d8ed10..f17660a71a3e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -310,6 +310,7 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
+struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void);
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
struct virtio_gpu_object_array*
virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
@@ -334,12 +335,21 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo);
+int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ uint64_t offset,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
+void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
@@ -408,6 +418,7 @@ void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
+void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev);
void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
int
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 5aab588fc400..dde8fc1a3689 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -148,6 +148,20 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
virtio_gpu_notify(vgdev);
}
+/* For drm panic */
+struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void)
+{
+ struct virtio_gpu_object_array *objs;
+
+ objs = kmalloc(sizeof(struct virtio_gpu_object_array), GFP_ATOMIC);
+ if (!objs)
+ return NULL;
+
+ objs->nents = 0;
+ objs->total = 1;
+ return objs;
+}
+
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
{
struct virtio_gpu_object_array *objs;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 42aa554eca9f..a6f5a78f436a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -28,6 +28,8 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <linux/virtio_dma_buf.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_panic.h>
#include "virtgpu_drv.h"
@@ -127,6 +129,30 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
return ret;
}
+/* For drm panic */
+static int virtio_gpu_panic_update_dumb_bo(struct virtio_gpu_device *vgdev,
+ struct drm_plane_state *state,
+ struct drm_rect *rect)
+{
+ struct virtio_gpu_object *bo =
+ gem_to_virtio_gpu_obj(state->fb->obj[0]);
+ struct virtio_gpu_object_array *objs;
+ uint32_t w = rect->x2 - rect->x1;
+ uint32_t h = rect->y2 - rect->y1;
+ uint32_t x = rect->x1;
+ uint32_t y = rect->y1;
+ uint32_t off = x * state->fb->format->cpp[0] +
+ y * state->fb->pitches[0];
+
+ objs = virtio_gpu_panic_array_alloc();
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+ return virtio_gpu_panic_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
+ objs);
+}
+
static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
struct drm_plane_state *state,
struct drm_rect *rect)
@@ -150,6 +176,24 @@ static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
objs, NULL);
}
+/* For drm_panic */
+static void virtio_gpu_panic_resource_flush(struct drm_plane *plane,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_object *bo;
+
+ vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+
+ virtio_gpu_panic_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
+ width, height);
+ virtio_gpu_panic_notify(vgdev);
+}
+
static void virtio_gpu_resource_flush(struct drm_plane *plane,
uint32_t x, uint32_t y,
uint32_t width, uint32_t height)
@@ -446,11 +490,63 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
virtio_gpu_cursor_ping(vgdev, output);
}
+static int virtio_drm_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct virtio_gpu_object *bo;
+
+ if (!plane->state || !plane->state->fb || !plane->state->visible)
+ return -ENODEV;
+
+ bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
+
+ /* Only support mapped shmem bo */
+ if (virtio_gpu_is_vram(bo) || bo->base.base.import_attach || !bo->base.vaddr)
+ return -ENODEV;
+
+ iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+
+ sb->format = plane->state->fb->format;
+ sb->height = plane->state->fb->height;
+ sb->width = plane->state->fb->width;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ return 0;
+}
+
+static void virtio_panic_flush(struct drm_plane *plane)
+{
+ struct virtio_gpu_object *bo;
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct drm_rect rect;
+
+ rect.x1 = 0;
+ rect.y1 = 0;
+ rect.x2 = plane->state->fb->width;
+ rect.y2 = plane->state->fb->height;
+
+ bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
+
+ if (bo->dumb) {
+ if (virtio_gpu_panic_update_dumb_bo(vgdev, plane->state,
+ &rect))
+ return;
+ }
+
+ virtio_gpu_panic_resource_flush(plane,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+}
+
static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
.prepare_fb = virtio_gpu_plane_prepare_fb,
.cleanup_fb = virtio_gpu_plane_cleanup_fb,
.atomic_check = virtio_gpu_plane_atomic_check,
.atomic_update = virtio_gpu_primary_plane_update,
+ .get_scanout_buffer = virtio_drm_get_scanout_buffer,
+ .panic_flush = virtio_panic_flush,
};
static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index f92133a01195..fe6a0b018571 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -184,6 +184,23 @@ int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
return 0;
}
+static void virtgpu_dma_buf_unmap(struct virtio_gpu_object *bo)
+{
+ struct dma_buf_attachment *attach = bo->base.base.import_attach;
+
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ if (bo->created) {
+ virtio_gpu_detach_object_fenced(bo);
+
+ if (bo->sgt)
+ dma_buf_unmap_attachment(attach, bo->sgt,
+ DMA_BIDIRECTIONAL);
+
+ bo->sgt = NULL;
+ }
+}
+
static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
@@ -194,13 +211,7 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
struct dma_buf *dmabuf = attach->dmabuf;
dma_resv_lock(dmabuf->resv, NULL);
-
- virtio_gpu_detach_object_fenced(bo);
-
- if (bo->sgt)
- dma_buf_unmap_attachment(attach, bo->sgt,
- DMA_BIDIRECTIONAL);
-
+ virtgpu_dma_buf_unmap(bo);
dma_resv_unlock(dmabuf->resv);
dma_buf_detach(dmabuf, attach);
@@ -250,7 +261,6 @@ static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
virtio_gpu_cmd_resource_create_blob(vgdev, bo, &params,
ents, nents);
bo->guest_blob = true;
- bo->attached = true;
dma_buf_unpin(attach);
dma_resv_unlock(resv);
@@ -274,15 +284,7 @@ static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct drm_gem_object *obj = attach->importer_priv;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
- if (bo->created && kref_read(&obj->refcount)) {
- virtio_gpu_detach_object_fenced(bo);
-
- if (bo->sgt)
- dma_buf_unmap_attachment(attach, bo->sgt,
- DMA_BIDIRECTIONAL);
-
- bo->sgt = NULL;
- }
+ virtgpu_dma_buf_unmap(bo);
}
static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index ad91624df42d..55a15e247dd1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -86,6 +86,22 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
vgdev->vbufs = NULL;
}
+/* For drm_panic */
+static struct virtio_gpu_vbuffer*
+virtio_gpu_panic_get_vbuf(struct virtio_gpu_device *vgdev, int size)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_ATOMIC);
+
+ vbuf->buf = (void *)vbuf + sizeof(*vbuf);
+ vbuf->size = size;
+ vbuf->resp_cb = NULL;
+ vbuf->resp_size = sizeof(struct virtio_gpu_ctrl_hdr);
+ vbuf->resp_buf = (void *)vbuf->buf + size;
+ return vbuf;
+}
+
static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
int size, int resp_size, void *resp_buf,
@@ -137,6 +153,18 @@ virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
return (struct virtio_gpu_update_cursor *)vbuf->buf;
}
+/* For drm_panic */
+static void *virtio_gpu_panic_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int cmd_size)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = virtio_gpu_panic_get_vbuf(vgdev, cmd_size);
+ *vbuffer_p = vbuf;
+ return (struct virtio_gpu_command *)vbuf->buf;
+}
+
static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
virtio_gpu_resp_cb cb,
struct virtio_gpu_vbuffer **vbuffer_p,
@@ -311,6 +339,34 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
return sgt;
}
+/* For drm_panic */
+static int virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ int elemcnt,
+ struct scatterlist **sgs,
+ int outcnt,
+ int incnt)
+{
+ struct virtqueue *vq = vgdev->ctrlq.vq;
+ int ret;
+
+ if (vgdev->has_indirect)
+ elemcnt = 1;
+
+ if (vq->num_free < elemcnt)
+ return -ENOMEM;
+
+ ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
+ WARN_ON(ret);
+
+ vbuf->seqno = ++vgdev->ctrlq.seqno;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
+
+ atomic_inc(&vgdev->pending_commands);
+
+ return 0;
+}
+
static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_fence *fence,
@@ -368,6 +424,32 @@ again:
return 0;
}
+/* For drm_panic */
+static int virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct scatterlist *sgs[3], vcmd, vresp;
+ int elemcnt = 0, outcnt = 0, incnt = 0;
+
+ /* set up vcmd */
+ sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+ elemcnt++;
+ sgs[outcnt] = &vcmd;
+ outcnt++;
+
+ /* set up vresp */
+ if (vbuf->resp_size) {
+ sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+ elemcnt++;
+ sgs[outcnt + incnt] = &vresp;
+ incnt++;
+ }
+
+ return virtio_gpu_panic_queue_ctrl_sgs(vgdev, vbuf,
+ elemcnt, sgs,
+ outcnt, incnt);
+}
+
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_fence *fence)
@@ -422,6 +504,21 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
return ret;
}
+/* For drm_panic */
+void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev)
+{
+ bool notify;
+
+ if (!atomic_read(&vgdev->pending_commands))
+ return;
+
+ atomic_set(&vgdev->pending_commands, 0);
+ notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
+
+ if (notify)
+ virtqueue_notify(vgdev->ctrlq.vq);
+}
+
void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
{
bool notify;
@@ -567,6 +664,29 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
+/* For drm_panic */
+void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height)
+{
+ struct virtio_gpu_resource_flush *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = NULL;
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
+}
+
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
@@ -591,6 +711,37 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
+/* For drm_panic */
+int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ uint64_t offset,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ struct virtio_gpu_transfer_to_host_2d *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ bo->base.sgt, DMA_TO_DEVICE);
+
+ cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->offset = cpu_to_le64(offset);
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ return virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
+}
+
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
@@ -1300,6 +1451,9 @@ virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
bo->created = true;
+
+ if (nents)
+ bo->attached = true;
}
void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
index 25df81c02783..5ad3b7c6f73c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vram.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -37,6 +37,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
unsigned long vm_size = vma->vm_end - vma->vm_start;
+ unsigned long vm_end;
if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
return -EINVAL;
@@ -56,12 +57,14 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- /* Partial mappings of GEM buffers don't happen much in practice. */
- if (vm_size != vram->vram_node.size)
+ if (check_add_overflow(vma->vm_pgoff << PAGE_SHIFT, vm_size, &vm_end))
+ return -EINVAL;
+
+ if (vm_end > vram->vram_node.size)
return -EINVAL;
ret = io_remap_pfn_range(vma, vma->vm_start,
- vram->vram_node.start >> PAGE_SHIFT,
+ (vram->vram_node.start >> PAGE_SHIFT) + vma->vm_pgoff,
vm_size, vma->vm_page_prot);
return ret;
}
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 28a57ae109fc..12034ec12029 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -4,6 +4,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -64,8 +65,8 @@ static int vkms_enable_vblank(struct drm_crtc *crtc)
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
- hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- out->vblank_hrtimer.function = &vkms_vblank_simulate;
+ hrtimer_setup(&out->vblank_hrtimer, &vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
out->period_ns = ktime_set(0, vblank->framedur_ns);
hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
@@ -83,9 +84,7 @@ static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
int *max_error, ktime_t *vblank_time,
bool in_vblank_irq)
{
- struct drm_device *dev = crtc->dev;
- struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
- struct vkms_output *output = &vkmsdev->output;
+ struct vkms_output *output = drm_crtc_to_vkms_output(crtc);
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
if (!READ_ONCE(vblank->enabled)) {
@@ -270,25 +269,29 @@ static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
.atomic_disable = vkms_crtc_atomic_disable,
};
-int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_plane *primary, struct drm_plane *cursor)
+struct vkms_output *vkms_crtc_init(struct drm_device *dev, struct drm_plane *primary,
+ struct drm_plane *cursor)
{
- struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc);
+ struct vkms_output *vkms_out;
+ struct drm_crtc *crtc;
int ret;
- ret = drmm_crtc_init_with_planes(dev, crtc, primary, cursor,
- &vkms_crtc_funcs, NULL);
- if (ret) {
- DRM_ERROR("Failed to init CRTC\n");
- return ret;
+ vkms_out = drmm_crtc_alloc_with_planes(dev, struct vkms_output, crtc,
+ primary, cursor,
+ &vkms_crtc_funcs, NULL);
+ if (IS_ERR(vkms_out)) {
+ DRM_DEV_ERROR(dev->dev, "Failed to init CRTC\n");
+ return vkms_out;
}
+ crtc = &vkms_out->crtc;
+
drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
ret = drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE);
if (ret) {
DRM_ERROR("Failed to set gamma size\n");
- return ret;
+ return ERR_PTR(ret);
}
drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE);
@@ -296,9 +299,11 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
spin_lock_init(&vkms_out->lock);
spin_lock_init(&vkms_out->composer_lock);
- vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
+ vkms_out->composer_workq = drmm_alloc_ordered_workqueue(dev, "vkms_composer", 0);
+ if (IS_ERR(vkms_out->composer_workq))
+ return ERR_CAST(vkms_out->composer_workq);
if (!vkms_out->composer_workq)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- return ret;
+ return vkms_out;
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index e0409aba9349..b6de91134a22 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -53,14 +53,6 @@ MODULE_PARM_DESC(enable_overlay, "Enable/Disable overlay support");
DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
-static void vkms_release(struct drm_device *dev)
-{
- struct vkms_device *vkms = drm_device_to_vkms_device(dev);
-
- if (vkms->output.composer_workq)
- destroy_workqueue(vkms->output.composer_workq);
-}
-
static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
@@ -108,7 +100,6 @@ static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
static const struct drm_driver vkms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
- .release = vkms_release,
.fops = &vkms_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
DRM_FBDEV_SHMEM_DRIVER_OPS,
@@ -244,17 +235,19 @@ static int __init vkms_init(void)
if (!config)
return -ENOMEM;
- default_config = config;
-
config->cursor = enable_cursor;
config->writeback = enable_writeback;
config->overlay = enable_overlay;
ret = vkms_create(config);
- if (ret)
+ if (ret) {
kfree(config);
+ return ret;
+ }
- return ret;
+ default_config = config;
+
+ return 0;
}
static void vkms_destroy(struct vkms_config *config)
@@ -278,9 +271,10 @@ static void vkms_destroy(struct vkms_config *config)
static void __exit vkms_exit(void)
{
- if (default_config->dev)
- vkms_destroy(default_config);
+ if (!default_config)
+ return;
+ vkms_destroy(default_config);
kfree(default_config);
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 00541eff3d1b..abbb652be2b5 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -176,9 +176,8 @@ struct vkms_crtc_state {
*/
struct vkms_output {
struct drm_crtc crtc;
- struct drm_encoder encoder;
- struct drm_connector connector;
struct drm_writeback_connector wb_connector;
+ struct drm_encoder wb_encoder;
struct hrtimer vblank_hrtimer;
ktime_t period_ns;
struct workqueue_struct *composer_workq;
@@ -216,7 +215,6 @@ struct vkms_config {
struct vkms_device {
struct drm_device drm;
struct platform_device *platform;
- struct vkms_output output;
const struct vkms_config *config;
};
@@ -243,8 +241,9 @@ struct vkms_device {
* @primary: primary plane to attach to the CRTC
* @cursor: plane to attach to the CRTC
*/
-int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_plane *primary, struct drm_plane *cursor);
+struct vkms_output *vkms_crtc_init(struct drm_device *dev,
+ struct drm_plane *primary,
+ struct drm_plane *cursor);
/**
* vkms_output_init() - Initialize all sub-components needed for a VKMS device.
@@ -275,6 +274,6 @@ void vkms_set_composer(struct vkms_output *out, bool enabled);
void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer *src_buffer, int y);
/* Writeback */
-int vkms_enable_writeback_connector(struct vkms_device *vkmsdev);
+int vkms_enable_writeback_connector(struct vkms_device *vkmsdev, struct vkms_output *vkms_out);
#endif /* _VKMS_DRV_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 39b1d7c97d45..30a64ecca87c 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -253,6 +253,26 @@ static void XRGB8888_read_line(const struct vkms_plane_state *plane, int x_start
}
}
+static void ABGR8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
+ enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ u8 *px = (u8 *)src_pixels;
+ /* Switch blue and red pixels. */
+ *out_pixel = argb_u16_from_u8888(px[3], px[0], px[1], px[2]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
static void ARGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
int y_start, enum pixel_read_direction direction, int count,
struct pixel_argb_u16 out_pixel[])
@@ -344,6 +364,14 @@ static void argb_u16_to_XRGB8888(u8 *out_pixel, const struct pixel_argb_u16 *in_
out_pixel[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
}
+static void argb_u16_to_ABGR8888(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
+{
+ out_pixel[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257);
+ out_pixel[2] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
+ out_pixel[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+ out_pixel[0] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+}
+
static void argb_u16_to_ARGB16161616(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
__le16 *pixel = (__le16 *)out_pixel;
@@ -420,6 +448,8 @@ pixel_read_line_t get_pixel_read_line_function(u32 format)
return &ARGB8888_read_line;
case DRM_FORMAT_XRGB8888:
return &XRGB8888_read_line;
+ case DRM_FORMAT_ABGR8888:
+ return &ABGR8888_read_line;
case DRM_FORMAT_ARGB16161616:
return &ARGB16161616_read_line;
case DRM_FORMAT_XRGB16161616:
@@ -453,6 +483,8 @@ pixel_write_t get_pixel_write_function(u32 format)
return &argb_u16_to_ARGB8888;
case DRM_FORMAT_XRGB8888:
return &argb_u16_to_XRGB8888;
+ case DRM_FORMAT_ABGR8888:
+ return &argb_u16_to_ABGR8888;
case DRM_FORMAT_ARGB16161616:
return &argb_u16_to_ARGB16161616;
case DRM_FORMAT_XRGB16161616:
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 8f4bd5aef087..22f0d678af3a 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -3,20 +3,16 @@
#include "vkms_drv.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
static const struct drm_connector_funcs vkms_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static const struct drm_encoder_funcs vkms_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int vkms_conn_get_modes(struct drm_connector *connector)
{
int count;
@@ -34,11 +30,10 @@ static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
int vkms_output_init(struct vkms_device *vkmsdev)
{
- struct vkms_output *output = &vkmsdev->output;
struct drm_device *dev = &vkmsdev->drm;
- struct drm_connector *connector = &output->connector;
- struct drm_encoder *encoder = &output->encoder;
- struct drm_crtc *crtc = &output->crtc;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct vkms_output *output;
struct vkms_plane *primary, *overlay, *cursor = NULL;
int ret;
int writeback;
@@ -60,9 +55,12 @@ int vkms_output_init(struct vkms_device *vkmsdev)
return PTR_ERR(cursor);
}
- ret = vkms_crtc_init(dev, crtc, &primary->base, &cursor->base);
- if (ret)
- return ret;
+ output = vkms_crtc_init(dev, &primary->base,
+ cursor ? &cursor->base : NULL);
+ if (IS_ERR(output)) {
+ DRM_ERROR("Failed to allocate CRTC\n");
+ return PTR_ERR(output);
+ }
if (vkmsdev->config->overlay) {
for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
@@ -71,12 +69,18 @@ int vkms_output_init(struct vkms_device *vkmsdev)
DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
return PTR_ERR(overlay);
}
- overlay->base.possible_crtcs = drm_crtc_mask(crtc);
+ overlay->base.possible_crtcs = drm_crtc_mask(&output->crtc);
}
}
- ret = drm_connector_init(dev, connector, &vkms_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL);
+ connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector) {
+ DRM_ERROR("Failed to allocate connector\n");
+ return -ENOMEM;
+ }
+
+ ret = drmm_connector_init(dev, connector, &vkms_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL, NULL);
if (ret) {
DRM_ERROR("Failed to init connector\n");
return ret;
@@ -84,35 +88,34 @@ int vkms_output_init(struct vkms_device *vkmsdev)
drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
- ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
+ encoder = drmm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder) {
+ DRM_ERROR("Failed to allocate encoder\n");
+ return -ENOMEM;
+ }
+ ret = drmm_encoder_init(dev, encoder, NULL,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret) {
DRM_ERROR("Failed to init encoder\n");
- goto err_encoder;
+ return ret;
}
- encoder->possible_crtcs = drm_crtc_mask(crtc);
+ encoder->possible_crtcs = drm_crtc_mask(&output->crtc);
+ /* Attach the encoder and the connector */
ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
DRM_ERROR("Failed to attach connector to encoder\n");
- goto err_attach;
+ return ret;
}
+ /* Initialize the writeback component */
if (vkmsdev->config->writeback) {
- writeback = vkms_enable_writeback_connector(vkmsdev);
+ writeback = vkms_enable_writeback_connector(vkmsdev, output);
if (writeback)
DRM_ERROR("Failed to init writeback connector\n");
}
drm_mode_config_reset(dev);
- return 0;
-
-err_attach:
- drm_encoder_cleanup(encoder);
-
-err_encoder:
- drm_connector_cleanup(connector);
-
return ret;
}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index e2fce471870f..e34f8c7f83c3 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -15,6 +15,7 @@
static const u32 vkms_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB16161616,
DRM_FORMAT_ARGB16161616,
DRM_FORMAT_RGB565
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index 79918b44fedd..fe163271d5b5 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -17,6 +17,7 @@
static const u32 vkms_wb_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB16161616,
DRM_FORMAT_ARGB16161616,
DRM_FORMAT_RGB565
@@ -24,7 +25,6 @@ static const u32 vkms_wb_formats[] = {
static const struct drm_connector_funcs vkms_wb_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -106,7 +106,9 @@ static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
struct drm_writeback_job *job)
{
struct vkms_writeback_job *vkmsjob = job->priv;
- struct vkms_device *vkmsdev;
+ struct vkms_output *vkms_output = container_of(connector,
+ struct vkms_output,
+ wb_connector);
if (!job->fb)
return;
@@ -115,8 +117,7 @@ static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
drm_framebuffer_put(vkmsjob->wb_frame_info.fb);
- vkmsdev = drm_device_to_vkms_device(job->fb->dev);
- vkms_set_composer(&vkmsdev->output, false);
+ vkms_set_composer(vkms_output, false);
kfree(vkmsjob);
}
@@ -125,8 +126,7 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
{
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
conn);
- struct vkms_device *vkmsdev = drm_device_to_vkms_device(conn->dev);
- struct vkms_output *output = &vkmsdev->output;
+ struct vkms_output *output = drm_crtc_to_vkms_output(connector_state->crtc);
struct drm_writeback_connector *wb_conn = &output->wb_connector;
struct drm_connector_state *conn_state = wb_conn->base.state;
struct vkms_crtc_state *crtc_state = output->composer_state;
@@ -140,7 +140,7 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
if (!conn_state)
return;
- vkms_set_composer(&vkmsdev->output, true);
+ vkms_set_composer(output, true);
active_wb = conn_state->writeback_job->priv;
wb_frame_info = &active_wb->wb_frame_info;
@@ -163,16 +163,23 @@ static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
.atomic_check = vkms_wb_atomic_check,
};
-int vkms_enable_writeback_connector(struct vkms_device *vkmsdev)
+int vkms_enable_writeback_connector(struct vkms_device *vkmsdev,
+ struct vkms_output *vkms_output)
{
- struct drm_writeback_connector *wb = &vkmsdev->output.wb_connector;
+ struct drm_writeback_connector *wb = &vkms_output->wb_connector;
+ int ret;
+
+ ret = drmm_encoder_init(&vkmsdev->drm, &vkms_output->wb_encoder,
+ NULL, DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret)
+ return ret;
+ vkms_output->wb_encoder.possible_crtcs |= drm_crtc_mask(&vkms_output->crtc);
drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs);
- return drm_writeback_connector_init(&vkmsdev->drm, wb,
- &vkms_wb_connector_funcs,
- NULL,
- vkms_wb_formats,
- ARRAY_SIZE(vkms_wb_formats),
- 1);
+ return drmm_writeback_connector_init(&vkmsdev->drm, wb,
+ &vkms_wb_connector_funcs,
+ &vkms_output->wb_encoder,
+ vkms_wb_formats,
+ ARRAY_SIZE(vkms_wb_formats));
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 800a79e035ed..1912ac1cde6d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2670,7 +2670,7 @@ out_unref:
* Returns MODE_OK on success, or a drm_mode_status error code.
*/
enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
enum drm_mode_status ret;
struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 2a6c6d6581e0..4eab581883e2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -435,7 +435,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int increment,
struct vmw_kms_dirty *dirty);
enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
int vmw_connector_get_modes(struct drm_connector *connector);
void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 114a75069e1c..f5d2ed1b0a72 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -839,7 +839,7 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
static enum drm_mode_status
vmw_stdu_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
enum drm_mode_status ret;
struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
index 8651b788e98b..aec774fa4d7b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
@@ -290,8 +290,8 @@ vmw_vkms_enable_vblank(struct drm_crtc *crtc)
drm_calc_timestamping_constants(crtc, &crtc->mode);
- hrtimer_init(&du->vkms.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- du->vkms.timer.function = &vmw_vkms_vblank_simulate;
+ hrtimer_setup(&du->vkms.timer, &vmw_vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
du->vkms.period_ns = ktime_set(0, vblank->framedur_ns);
hrtimer_start(&du->vkms.timer, du->vkms.period_ns, HRTIMER_MODE_REL);
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index b51a2bde73e2..7d7995196702 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -39,6 +39,7 @@ config DRM_XE
select DRM_TTM_HELPER
select DRM_EXEC
select DRM_GPUVM
+ select DRM_GPUSVM if !UML && DEVICE_PRIVATE
select DRM_SCHED
select MMU_NOTIFIER
select WANT_DEV_COREDUMP
@@ -59,6 +60,29 @@ config DRM_XE_DISPLAY
help
Disable this option only if you want to compile out display support.
+config DRM_XE_DP_TUNNEL
+ bool "Enable DP tunnel support"
+ depends on DRM_XE_DISPLAY
+ depends on USB4
+ select DRM_DISPLAY_DP_TUNNEL
+ default y
+ help
+ Choose this option to detect DP tunnels and enable the Bandwidth
+ Allocation mode for such tunnels. This allows using the maximum
+ resolution allowed by the link BW on all displays sharing the
+ link BW, for instance on a Thunderbolt link.
+
+ If in doubt say "Y".
+
+config DRM_XE_DEVMEM_MIRROR
+ bool "Enable device memory mirror"
+ depends on DRM_XE
+ select GET_FREE_REGION
+ default y
+ help
+ Disable this option only if you want to compile out without device
+ memory mirror. Will reduce KMD memory footprint when disabled.
+
config DRM_XE_FORCE_PROBE
string "Force probe xe for selected Intel hardware IDs"
depends on DRM_XE
diff --git a/drivers/gpu/drm/xe/Kconfig.profile b/drivers/gpu/drm/xe/Kconfig.profile
index ba17a25e8db3..7530df998148 100644
--- a/drivers/gpu/drm/xe/Kconfig.profile
+++ b/drivers/gpu/drm/xe/Kconfig.profile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
config DRM_XE_JOB_TIMEOUT_MAX
int "Default max job timeout (ms)"
default 10000 # milliseconds
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 5c97ad6ed738..9699b08585f7 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -33,9 +33,10 @@ xe-y += xe_bb.o \
xe_device_sysfs.o \
xe_dma_buf.o \
xe_drm_client.o \
+ xe_eu_stall.o \
xe_exec.o \
- xe_execlist.o \
xe_exec_queue.o \
+ xe_execlist.o \
xe_force_wake.o \
xe_ggtt.o \
xe_gpu_scheduler.o \
@@ -56,9 +57,11 @@ xe-y += xe_bb.o \
xe_gt_topology.o \
xe_guc.o \
xe_guc_ads.o \
+ xe_guc_buf.o \
xe_guc_capture.o \
xe_guc_ct.o \
xe_guc_db_mgr.o \
+ xe_guc_engine_activity.o \
xe_guc_hwconfig.o \
xe_guc_id_mgr.o \
xe_guc_klv_helpers.o \
@@ -66,11 +69,11 @@ xe-y += xe_bb.o \
xe_guc_pc.o \
xe_guc_submit.o \
xe_heci_gsc.o \
+ xe_huc.o \
xe_hw_engine.o \
xe_hw_engine_class_sysfs.o \
xe_hw_engine_group.o \
xe_hw_fence.o \
- xe_huc.o \
xe_irq.o \
xe_lrc.o \
xe_migrate.o \
@@ -86,15 +89,20 @@ xe-y += xe_bb.o \
xe_preempt_fence.o \
xe_pt.o \
xe_pt_walk.o \
+ xe_pxp.o \
+ xe_pxp_debugfs.o \
+ xe_pxp_submit.o \
xe_query.o \
xe_range_fence.o \
xe_reg_sr.o \
xe_reg_whitelist.o \
- xe_rtp.o \
xe_ring_ops.o \
+ xe_rtp.o \
xe_sa.o \
xe_sched_job.o \
+ xe_shrinker.o \
xe_step.o \
+ xe_survivability_mode.o \
xe_sync.o \
xe_tile.o \
xe_tile_sysfs.o \
@@ -102,8 +110,8 @@ xe-y += xe_bb.o \
xe_trace_bo.o \
xe_trace_guc.o \
xe_trace_lrc.o \
- xe_ttm_sys_mgr.o \
xe_ttm_stolen_mgr.o \
+ xe_ttm_sys_mgr.o \
xe_ttm_vram_mgr.o \
xe_tuning.o \
xe_uc.o \
@@ -112,15 +120,18 @@ xe-y += xe_bb.o \
xe_vram.o \
xe_vram_freq.o \
xe_vsec.o \
- xe_wait_user_fence.o \
xe_wa.o \
+ xe_wait_user_fence.o \
xe_wopcm.o
xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
+xe-$(CONFIG_DRM_GPUSVM) += xe_svm.o
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
+xe-$(CONFIG_PERF_EVENTS) += xe_pmu.o
+
# graphics virtualization (SR-IOV) support
xe-y += \
xe_gt_sriov_vf.o \
@@ -199,6 +210,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_bios.o \
i915-display/intel_bw.o \
i915-display/intel_cdclk.o \
+ i915-display/intel_cmtg.o \
i915-display/intel_color.o \
i915-display/intel_combo_phy.o \
i915-display/intel_connector.o \
@@ -221,6 +233,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_display_wa.o \
i915-display/intel_dkl_phy.o \
i915-display/intel_dmc.o \
+ i915-display/intel_dmc_wl.o \
i915-display/intel_dp.o \
i915-display/intel_dp_aux.o \
i915-display/intel_dp_aux_backlight.o \
@@ -262,13 +275,13 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_psr.o \
i915-display/intel_qp_tables.o \
i915-display/intel_quirks.o \
+ i915-display/intel_snps_hdmi_pll.o \
i915-display/intel_snps_phy.o \
i915-display/intel_tc.o \
i915-display/intel_vblank.o \
i915-display/intel_vdsc.o \
i915-display/intel_vga.o \
i915-display/intel_vrr.o \
- i915-display/intel_dmc_wl.o \
i915-display/intel_wm.o \
i915-display/skl_scaler.o \
i915-display/skl_universal_plane.o \
@@ -301,6 +314,9 @@ ifeq ($(CONFIG_DEBUG_FS),y)
i915-display/intel_pipe_crc.o
endif
+xe-$(CONFIG_DRM_XE_DP_TUNNEL) += \
+ i915-display/intel_dp_tunnel.o
+
obj-$(CONFIG_DRM_XE) += xe.o
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h b/drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h
index 57520809e48d..290e431cf10d 100644
--- a/drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h
+++ b/drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h
@@ -6,6 +6,7 @@
#ifndef _ABI_GSC_PXP_COMMANDS_ABI_H
#define _ABI_GSC_PXP_COMMANDS_ABI_H
+#include <linux/sizes.h>
#include <linux/types.h>
/* Heci client ID for PXP commands */
@@ -14,6 +15,12 @@
#define PXP_APIVER(x, y) (((x) & 0xFFFF) << 16 | ((y) & 0xFFFF))
/*
+ * A PXP sub-section in an HECI packet can be up to 64K big in each direction.
+ * This does not include the top-level GSC header.
+ */
+#define PXP_MAX_PACKET_SIZE SZ_64K
+
+/*
* there are a lot of status codes for PXP, but we only define the cross-API
* common ones that we actually can handle in the kernel driver. Other failure
* codes should be printed to error msg for debug.
@@ -24,6 +31,7 @@ enum pxp_status {
PXP_STATUS_NOT_READY = 0x100e,
PXP_STATUS_PLATFCONFIG_KF1_NOVERIF = 0x101a,
PXP_STATUS_PLATFCONFIG_KF1_BAD = 0x101f,
+ PXP_STATUS_PLATFCONFIG_FIXED_KF1_NOT_SUPPORTED = 0x1037,
PXP_STATUS_OP_NOT_PERMITTED = 0x4013
};
@@ -42,6 +50,8 @@ struct pxp_cmd_header {
u32 buffer_len;
} __packed;
+#define PXP43_CMDID_INVALIDATE_STREAM_KEY 0x00000007
+#define PXP43_CMDID_INIT_SESSION 0x00000036
#define PXP43_CMDID_NEW_HUC_AUTH 0x0000003F /* MTL+ */
/* PXP-Input-Packet: HUC Auth-only */
@@ -56,4 +66,35 @@ struct pxp43_huc_auth_out {
struct pxp_cmd_header header;
} __packed;
+/* PXP-Input-Packet: Init PXP session */
+struct pxp43_create_arb_in {
+ struct pxp_cmd_header header;
+ /* header.stream_id fields for vesion 4.3 of Init PXP session: */
+ #define PXP43_INIT_SESSION_VALID BIT(0)
+ #define PXP43_INIT_SESSION_APPTYPE BIT(1)
+ #define PXP43_INIT_SESSION_APPID GENMASK(17, 2)
+ u32 protection_mode;
+ #define PXP43_INIT_SESSION_PROTECTION_ARB 0x2
+ u32 sub_session_id;
+ u32 init_flags;
+ u32 rsvd[12];
+} __packed;
+
+/* PXP-Input-Packet: Init PXP session */
+struct pxp43_create_arb_out {
+ struct pxp_cmd_header header;
+ u32 rsvd[8];
+} __packed;
+
+/* PXP-Input-Packet: Invalidate Stream Key */
+struct pxp43_inv_stream_key_in {
+ struct pxp_cmd_header header;
+ u32 rsvd[3];
+} __packed;
+
+/* PXP-Output-Packet: Invalidate Stream Key */
+struct pxp43_inv_stream_key_out {
+ struct pxp_cmd_header header;
+ u32 rsvd;
+} __packed;
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index fee385532fb0..ec516e838ee8 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -140,6 +140,7 @@ enum xe_guc_action {
XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
XE_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
XE_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
+ XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER = 0x550C,
XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR = 0x6000,
XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC = 0x6002,
XE_GUC_ACTION_PAGE_FAULT_RES_DESC = 0x6003,
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
index 85abe4f09ae2..b28c8fa061f7 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
@@ -174,6 +174,9 @@ struct slpc_task_state_data {
};
} __packed;
+#define SLPC_CTX_FREQ_REQ_IS_COMPUTE REG_BIT(28)
+#define SLPC_OPTIMIZED_STRATEGY_COMPUTE REG_BIT(0)
+
struct slpc_shared_data_header {
/* Total size in bytes of this shared buffer. */
u32 size;
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
new file mode 100644
index 000000000000..8a048980ea38
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_GEM_OBJECT_H__
+#define __I915_GEM_OBJECT_H__
+
+struct dma_fence;
+struct i915_sched_attr;
+
+static inline void i915_gem_fence_wait_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
+{
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
index 9c4cf050059a..41d39d67817a 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
@@ -1,3 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
#ifndef _I915_GEM_STOLEN_H_
#define _I915_GEM_STOLEN_H_
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index 84b0991b35b3..dfec5108d2c3 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -95,14 +95,6 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
-#define I915_PRIORITY_DISPLAY 0
-struct i915_sched_attr {
- int priority;
-};
-#define i915_gem_fence_wait_priority(fence, attr) do { (void) attr; } while (0)
-
-#define FORCEWAKE_ALL XE_FORCEWAKE_ALL
-
#ifdef CONFIG_ARM64
/*
* arm64 indirectly includes linux/rtc.h,
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h
new file mode 100644
index 000000000000..b261910cd6f9
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#include "../../i915/i915_gtt_view_types.h"
+
+/* Partial view not supported in xe, fail build if used. */
+#define I915_GTT_VIEW_PARTIAL
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h
new file mode 100644
index 000000000000..c11130440d31
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_SCHEDULER_TYPES_H__
+#define __I915_SCHEDULER_TYPES_H__
+
+#define I915_PRIORITY_DISPLAY 0
+
+struct i915_sched_attr {
+ int priority;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
index bdae8392e125..4465c40f8134 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
@@ -10,6 +10,8 @@
#include "xe_ggtt_types.h"
+#include <linux/refcount.h>
+
/* We don't want these from i915_drm.h in case of Xe */
#undef I915_TILING_X
#undef I915_TILING_Y
@@ -19,6 +21,7 @@
struct xe_bo;
struct i915_vma {
+ refcount_t ref;
struct xe_bo *bo, *dpt;
struct xe_ggtt_node *node;
};
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h
deleted file mode 100644
index e7aaf50f5485..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include <linux/types.h>
-#include <linux/build_bug.h>
-
-/* XX: Figure out how to handle this vma mapping in xe */
-struct intel_remapped_plane_info {
- /* in gtt pages */
- u32 offset:31;
- u32 linear:1;
- union {
- /* in gtt pages for !linear */
- struct {
- u16 width;
- u16 height;
- u16 src_stride;
- u16 dst_stride;
- };
-
- /* in gtt pages for linear */
- u32 size;
- };
-} __packed;
-
-struct intel_remapped_info {
- struct intel_remapped_plane_info plane[4];
- /* in gtt pages */
- u32 plane_alignment;
-} __packed;
-
-struct intel_rotation_info {
- struct intel_remapped_plane_info plane[2];
-} __packed;
-
-enum i915_gtt_view_type {
- I915_GTT_VIEW_NORMAL = 0,
- I915_GTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
- I915_GTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
-};
-
-static inline void assert_i915_gem_gtt_types(void)
-{
- BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 2 * sizeof(u32) + 8 * sizeof(u16));
- BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 5 * sizeof(u32) + 16 * sizeof(u16));
-
- /* Check that rotation/remapped shares offsets for simplicity */
- BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
- offsetof(struct intel_rotation_info, plane[0]));
- BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
- offsetofend(struct intel_rotation_info, plane[1]));
-
- /* As we encode the size of each branch inside the union into its type,
- * we have to be careful that each branch has a unique size.
- */
- switch ((enum i915_gtt_view_type)0) {
- case I915_GTT_VIEW_NORMAL:
- case I915_GTT_VIEW_ROTATED:
- case I915_GTT_VIEW_REMAPPED:
- /* gcc complains if these are identical cases */
- break;
- }
-}
-
-struct i915_gtt_view {
- enum i915_gtt_view_type type;
- union {
- /* Members need to contain no holes/padding */
- struct intel_rotation_info rotated;
- struct intel_remapped_info remapped;
- };
-};
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index 4fc3e535de91..0c1e88e36a1e 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -10,6 +10,8 @@
#include "xe_device_types.h"
#include "xe_mmio.h"
+#define FORCEWAKE_ALL XE_FORCEWAKE_ALL
+
static inline struct intel_uncore *to_intel_uncore(struct drm_device *drm)
{
return &to_xe_device(drm)->uncore;
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
index 5dfc587c8237..97fd0ddf0b3a 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
@@ -9,14 +9,21 @@
#include <linux/errno.h>
#include <linux/types.h>
+#include "xe_pxp.h"
+
struct drm_gem_object;
-struct intel_pxp;
-static inline int intel_pxp_key_check(struct intel_pxp *pxp,
- struct drm_gem_object *obj,
- bool assign)
+static inline int intel_pxp_key_check(struct drm_gem_object *obj, bool assign)
{
- return -ENODEV;
+ /*
+ * The assign variable is used in i915 to assign the key to the BO at
+ * first submission time. In Xe the key is instead assigned at BO
+ * creation time, so the assign variable must always be false.
+ */
+ if (assign)
+ return -EINVAL;
+
+ return xe_pxp_obj_key_check(obj);
}
#endif
diff --git a/drivers/gpu/drm/xe/display/ext/i915_irq.c b/drivers/gpu/drm/xe/display/ext/i915_irq.c
index ac4cda2d81c7..3c6bca66ddab 100644
--- a/drivers/gpu/drm/xe/display/ext/i915_irq.c
+++ b/drivers/gpu/drm/xe/display/ext/i915_irq.c
@@ -51,6 +51,29 @@ void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
intel_uncore_posting_read(uncore, regs.imr);
}
+void gen2_error_reset(struct intel_uncore *uncore, struct i915_error_regs regs)
+{
+ intel_uncore_write(uncore, regs.emr, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.emr);
+
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+}
+
+void gen2_error_init(struct intel_uncore *uncore, struct i915_error_regs regs,
+ u32 emr_val)
+{
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+
+ intel_uncore_write(uncore, regs.emr, emr_val);
+ intel_uncore_posting_read(uncore, regs.emr);
+}
+
bool intel_irqs_enabled(struct xe_device *xe)
{
return atomic_read(&xe->irq.enabled);
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
index b463f5bd4eed..27437c22bd70 100644
--- a/drivers/gpu/drm/xe/display/intel_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_bo.c
@@ -25,7 +25,7 @@ bool intel_bo_is_shmem(struct drm_gem_object *obj)
bool intel_bo_is_protected(struct drm_gem_object *obj)
{
- return false;
+ return xe_bo_is_protected(gem_to_xe_bo(obj));
}
void intel_bo_flush_if_display(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
index 4d209ebc26c2..ebdb22c9499d 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -24,7 +24,7 @@ void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
xe_bo_put(bo);
}
-int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
+int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb,
struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
@@ -50,10 +50,10 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
/*
* XE_BO_FLAG_SCANOUT should ideally be set at creation, or is
* automatically set when creating FB. We cannot change caching
- * mode when the boect is VM_BINDed, so we can only set
+ * mode when the bo is VM_BINDed, so we can only set
* coherency with display when unbound.
*/
- if (XE_IOCTL_DBG(xe, !list_empty(&bo->ttm.base.gpuva.list))) {
+ if (XE_IOCTL_DBG(xe, xe_bo_is_vm_bound(bo))) {
ttm_bo_unreserve(&bo->ttm);
ret = -EINVAL;
goto err;
@@ -68,10 +68,11 @@ err:
return ret;
}
-struct drm_gem_object *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
+struct drm_gem_object *intel_fb_bo_lookup_valid_bo(struct drm_device *drm,
struct drm_file *filp,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ struct xe_device *xe = to_xe_device(drm);
struct xe_bo *bo;
struct drm_gem_object *gem = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
@@ -80,7 +81,7 @@ struct drm_gem_object *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915
bo = gem_to_xe_bo(gem);
/* Require vram placement or dma-buf import */
- if (IS_DGFX(i915) &&
+ if (IS_DGFX(xe) &&
!xe_bo_can_migrate(bo, XE_PL_VRAM0) &&
bo->ttm.type != ttm_bo_type_sg) {
drm_gem_object_put(gem);
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index ca95fcd098ec..3a1e505ff182 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -45,7 +45,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
if (!IS_ERR(obj))
drm_info(&xe->drm, "Allocated fbdev into stolen\n");
else
@@ -56,7 +56,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
}
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index b3921dbc52ff..0b0aca7a25af 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -8,6 +8,8 @@
#include <linux/fb.h>
+#include <drm/drm_client.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
@@ -29,6 +31,7 @@
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_opregion.h"
+#include "skl_watermark.h"
#include "xe_module.h"
/* Xe device functions */
@@ -66,6 +69,10 @@ void xe_display_driver_set_hooks(struct drm_driver *driver)
if (!xe_modparam.probe_display)
return;
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ driver->fbdev_probe = intel_fbdev_driver_fbdev_probe;
+#endif
+
driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
}
@@ -101,19 +108,25 @@ int xe_display_create(struct xe_device *xe)
return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
}
-static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
+static void xe_display_fini_early(void *arg)
{
- struct xe_device *xe = to_xe_device(dev);
+ struct xe_device *xe = arg;
struct intel_display *display = &xe->display;
if (!xe->info.probe_display)
return;
+ intel_display_driver_remove_nogem(display);
+ intel_display_driver_remove_noirq(display);
+ intel_opregion_cleanup(display);
intel_power_domains_cleanup(display);
}
-int xe_display_init_nommio(struct xe_device *xe)
+int xe_display_init_early(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+ int err;
+
if (!xe->info.probe_display)
return 0;
@@ -123,29 +136,6 @@ int xe_display_init_nommio(struct xe_device *xe)
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(xe);
- return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
-}
-
-static void xe_display_fini_noirq(void *arg)
-{
- struct xe_device *xe = arg;
- struct intel_display *display = &xe->display;
-
- if (!xe->info.probe_display)
- return;
-
- intel_display_driver_remove_noirq(display);
- intel_opregion_cleanup(display);
-}
-
-int xe_display_init_noirq(struct xe_device *xe)
-{
- struct intel_display *display = &xe->display;
- int err;
-
- if (!xe->info.probe_display)
- return 0;
-
intel_display_driver_early_probe(display);
/* Early display init.. */
@@ -162,26 +152,34 @@ int xe_display_init_noirq(struct xe_device *xe)
intel_display_device_info_runtime_init(display);
err = intel_display_driver_probe_noirq(display);
- if (err) {
- intel_opregion_cleanup(display);
- return err;
- }
+ if (err)
+ goto err_opregion;
+
+ err = intel_display_driver_probe_nogem(display);
+ if (err)
+ goto err_noirq;
- return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noirq, xe);
+ return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
+err_noirq:
+ intel_display_driver_remove_noirq(display);
+ intel_power_domains_cleanup(display);
+err_opregion:
+ intel_opregion_cleanup(display);
+ return err;
}
-static void xe_display_fini_noaccel(void *arg)
+static void xe_display_fini(void *arg)
{
struct xe_device *xe = arg;
struct intel_display *display = &xe->display;
- if (!xe->info.probe_display)
- return;
-
- intel_display_driver_remove_nogem(display);
+ intel_hpd_poll_fini(xe);
+ intel_hdcp_component_fini(display);
+ intel_audio_deinit(display);
+ intel_display_driver_remove(display);
}
-int xe_display_init_noaccel(struct xe_device *xe)
+int xe_display_init(struct xe_device *xe)
{
struct intel_display *display = &xe->display;
int err;
@@ -189,34 +187,11 @@ int xe_display_init_noaccel(struct xe_device *xe)
if (!xe->info.probe_display)
return 0;
- err = intel_display_driver_probe_nogem(display);
+ err = intel_display_driver_probe(display);
if (err)
return err;
- return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noaccel, xe);
-}
-
-int xe_display_init(struct xe_device *xe)
-{
- struct intel_display *display = &xe->display;
-
- if (!xe->info.probe_display)
- return 0;
-
- return intel_display_driver_probe(display);
-}
-
-void xe_display_fini(struct xe_device *xe)
-{
- struct intel_display *display = &xe->display;
-
- if (!xe->info.probe_display)
- return;
-
- intel_hpd_poll_fini(xe);
-
- intel_hdcp_component_fini(display);
- intel_audio_deinit(xe);
+ return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
}
void xe_display_register(struct xe_device *xe)
@@ -228,7 +203,6 @@ void xe_display_register(struct xe_device *xe)
intel_display_driver_register(display);
intel_power_domains_enable(display);
- intel_register_dsm_handler();
}
void xe_display_unregister(struct xe_device *xe)
@@ -238,21 +212,10 @@ void xe_display_unregister(struct xe_device *xe)
if (!xe->info.probe_display)
return;
- intel_unregister_dsm_handler();
intel_power_domains_disable(display);
intel_display_driver_unregister(display);
}
-void xe_display_driver_remove(struct xe_device *xe)
-{
- struct intel_display *display = &xe->display;
-
- if (!xe->info.probe_display)
- return;
-
- intel_display_driver_remove(display);
-}
-
/* IRQ-related functions */
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
@@ -322,11 +285,58 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe)
}
}
-/* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
-static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
+static void xe_display_enable_d3cold(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
+
+ /*
+ * We do a lot of poking in a lot of registers, make sure they work
+ * properly.
+ */
+ intel_power_domains_disable(display);
+
+ xe_display_flush_cleanup_work(xe);
+
+ intel_opregion_suspend(display, PCI_D3cold);
+
+ intel_dmc_suspend(display);
+
+ if (has_display(xe))
+ intel_hpd_poll_enable(xe);
+}
+
+static void xe_display_disable_d3cold(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
+
+ intel_dmc_resume(display);
+
+ if (has_display(xe))
+ drm_mode_config_reset(&xe->drm);
+
+ intel_display_driver_init_hw(display);
+
+ intel_hpd_init(xe);
+
+ if (has_display(xe))
+ intel_hpd_poll_disable(xe);
+
+ intel_opregion_resume(display);
+
+ intel_power_domains_enable(display);
+}
+
+void xe_display_pm_suspend(struct xe_device *xe)
{
struct intel_display *display = &xe->display;
bool s2idle = suspend_to_idle();
+
if (!xe->info.probe_display)
return;
@@ -335,10 +345,9 @@ static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
* properly.
*/
intel_power_domains_disable(display);
- if (!runtime)
- intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
+ drm_client_dev_suspend(&xe->drm, false);
- if (!runtime && has_display(xe)) {
+ if (has_display(xe)) {
drm_kms_helper_poll_disable(&xe->drm);
intel_display_driver_disable_user_access(display);
intel_display_driver_suspend(display);
@@ -348,7 +357,7 @@ static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
intel_hpd_cancel_work(xe);
- if (!runtime && has_display(xe)) {
+ if (has_display(xe)) {
intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(&xe->display);
}
@@ -356,14 +365,6 @@ static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
intel_dmc_suspend(display);
-
- if (runtime && has_display(xe))
- intel_hpd_poll_enable(xe);
-}
-
-void xe_display_pm_suspend(struct xe_device *xe)
-{
- __xe_display_pm_suspend(xe, false);
}
void xe_display_pm_shutdown(struct xe_device *xe)
@@ -374,7 +375,8 @@ void xe_display_pm_shutdown(struct xe_device *xe)
return;
intel_power_domains_disable(display);
- intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
+ drm_client_dev_suspend(&xe->drm, false);
+
if (has_display(xe)) {
drm_kms_helper_poll_disable(&xe->drm);
intel_display_driver_disable_user_access(display);
@@ -402,7 +404,7 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe)
return;
if (xe->d3cold.allowed) {
- __xe_display_pm_suspend(xe, true);
+ xe_display_enable_d3cold(xe);
return;
}
@@ -463,7 +465,7 @@ void xe_display_pm_resume_early(struct xe_device *xe)
intel_display_power_resume_early(display);
}
-static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
+void xe_display_pm_resume(struct xe_device *xe)
{
struct intel_display *display = &xe->display;
@@ -477,12 +479,12 @@ static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
intel_display_driver_init_hw(display);
- if (!runtime && has_display(xe))
+ if (has_display(xe))
intel_display_driver_resume_access(display);
intel_hpd_init(xe);
- if (!runtime && has_display(xe)) {
+ if (has_display(xe)) {
intel_display_driver_resume(display);
drm_kms_helper_poll_enable(&xe->drm);
intel_display_driver_enable_user_access(display);
@@ -493,29 +495,24 @@ static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
intel_opregion_resume(display);
- if (!runtime)
- intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
+ drm_client_dev_resume(&xe->drm, false);
intel_power_domains_enable(display);
}
-void xe_display_pm_resume(struct xe_device *xe)
-{
- __xe_display_pm_resume(xe, false);
-}
-
void xe_display_pm_runtime_resume(struct xe_device *xe)
{
if (!xe->info.probe_display)
return;
if (xe->d3cold.allowed) {
- __xe_display_pm_resume(xe, true);
+ xe_display_disable_d3cold(xe);
return;
}
intel_hpd_init(xe);
intel_hpd_poll_disable(xe);
+ skl_watermark_ipc_update(xe);
}
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 233f81a26c25..46e14f8dee28 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -14,17 +14,13 @@ struct drm_driver;
bool xe_display_driver_probe_defer(struct pci_dev *pdev);
void xe_display_driver_set_hooks(struct drm_driver *driver);
-void xe_display_driver_remove(struct xe_device *xe);
int xe_display_create(struct xe_device *xe);
int xe_display_probe(struct xe_device *xe);
-int xe_display_init_nommio(struct xe_device *xe);
-int xe_display_init_noirq(struct xe_device *xe);
-int xe_display_init_noaccel(struct xe_device *xe);
+int xe_display_init_early(struct xe_device *xe);
int xe_display_init(struct xe_device *xe);
-void xe_display_fini(struct xe_device *xe);
void xe_display_register(struct xe_device *xe);
void xe_display_unregister(struct xe_device *xe);
@@ -54,11 +50,8 @@ static inline int xe_display_create(struct xe_device *xe) { return 0; }
static inline int xe_display_probe(struct xe_device *xe) { return 0; }
-static inline int xe_display_init_nommio(struct xe_device *xe) { return 0; }
-static inline int xe_display_init_noirq(struct xe_device *xe) { return 0; }
-static inline int xe_display_init_noaccel(struct xe_device *xe) { return 0; }
+static inline int xe_display_init_early(struct xe_device *xe) { return 0; }
static inline int xe_display_init(struct xe_device *xe) { return 0; }
-static inline void xe_display_fini(struct xe_device *xe) {}
static inline void xe_display_register(struct xe_device *xe) {}
static inline void xe_display_unregister(struct xe_device *xe) {}
diff --git a/drivers/gpu/drm/xe/display/xe_display_rps.c b/drivers/gpu/drm/xe/display/xe_display_rps.c
index ab21c581c192..fa616f9688a5 100644
--- a/drivers/gpu/drm/xe/display/xe_display_rps.c
+++ b/drivers/gpu/drm/xe/display/xe_display_rps.c
@@ -10,7 +10,7 @@ void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
{
}
-void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+void intel_display_rps_mark_interactive(struct intel_display *display,
struct intel_atomic_state *state,
bool interactive)
{
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index 9fa51b84737c..d918ae1c8061 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -5,10 +5,12 @@
#include <drm/ttm/ttm_bo.h>
+#include "i915_vma.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
+#include "intel_fbdev.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_ggtt.h"
@@ -80,7 +82,7 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs,
static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
const struct i915_gtt_view *view,
struct i915_vma *vma,
- u64 physical_alignment)
+ unsigned int alignment)
{
struct xe_device *xe = to_xe_device(fb->base.dev);
struct xe_tile *tile0 = xe_device_get_root_tile(xe);
@@ -106,7 +108,7 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
XE_BO_FLAG_VRAM0 |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_PAGETABLE,
- physical_alignment);
+ alignment);
else
dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
dpt_size, ~0ull,
@@ -114,7 +116,7 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
XE_BO_FLAG_STOLEN |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_PAGETABLE,
- physical_alignment);
+ alignment);
if (IS_ERR(dpt))
dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
dpt_size, ~0ull,
@@ -122,7 +124,7 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_PAGETABLE,
- physical_alignment);
+ alignment);
if (IS_ERR(dpt))
return PTR_ERR(dpt);
@@ -192,7 +194,7 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo
static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
const struct i915_gtt_view *view,
struct i915_vma *vma,
- u64 physical_alignment)
+ unsigned int alignment)
{
struct drm_gem_object *obj = intel_fb_bo(&fb->base);
struct xe_bo *bo = gem_to_xe_bo(obj);
@@ -275,7 +277,7 @@ out:
static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
const struct i915_gtt_view *view,
- u64 physical_alignment)
+ unsigned int alignment)
{
struct drm_device *dev = fb->base.dev;
struct xe_device *xe = to_xe_device(dev);
@@ -287,6 +289,7 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
if (!vma)
return ERR_PTR(-ENODEV);
+ refcount_set(&vma->ref, 1);
if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
@@ -324,9 +327,9 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
vma->bo = bo;
if (intel_fb_uses_dpt(&fb->base))
- ret = __xe_pin_fb_vma_dpt(fb, view, vma, physical_alignment);
+ ret = __xe_pin_fb_vma_dpt(fb, view, vma, alignment);
else
- ret = __xe_pin_fb_vma_ggtt(fb, view, vma, physical_alignment);
+ ret = __xe_pin_fb_vma_ggtt(fb, view, vma, alignment);
if (ret)
goto err_unpin;
@@ -347,6 +350,9 @@ static void __xe_unpin_fb_vma(struct i915_vma *vma)
{
u8 tile_id = vma->node->ggtt->tile->id;
+ if (!refcount_dec_and_test(&vma->ref))
+ return;
+
if (vma->dpt)
xe_bo_unpin_map_no_vm(vma->dpt);
else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) ||
@@ -364,6 +370,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
const struct i915_gtt_view *view,
unsigned int alignment,
unsigned int phys_alignment,
+ unsigned int vtd_guard,
bool uses_fence,
unsigned long *out_flags)
{
@@ -377,25 +384,58 @@ void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags)
__xe_unpin_fb_vma(vma);
}
-int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+static bool reuse_vma(struct intel_plane_state *new_plane_state,
+ const struct intel_plane_state *old_plane_state)
{
- struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb);
+ struct xe_device *xe = to_xe_device(fb->base.dev);
+ struct i915_vma *vma;
+
+ if (old_plane_state->hw.fb == new_plane_state->hw.fb &&
+ !memcmp(&old_plane_state->view.gtt,
+ &new_plane_state->view.gtt,
+ sizeof(new_plane_state->view.gtt))) {
+ vma = old_plane_state->ggtt_vma;
+ goto found;
+ }
+
+ if (fb == intel_fbdev_framebuffer(xe->display.fbdev.fbdev)) {
+ vma = intel_fbdev_vma_pointer(xe->display.fbdev.fbdev);
+ if (vma)
+ goto found;
+ }
+
+ return false;
+
+found:
+ refcount_inc(&vma->ref);
+ new_plane_state->ggtt_vma = vma;
+ return true;
+}
+
+int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
+ const struct intel_plane_state *old_plane_state)
+{
+ struct drm_framebuffer *fb = new_plane_state->hw.fb;
struct drm_gem_object *obj = intel_fb_bo(fb);
struct xe_bo *bo = gem_to_xe_bo(obj);
struct i915_vma *vma;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- u64 phys_alignment = plane->min_alignment(plane, fb, 0);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
+ unsigned int alignment = plane->min_alignment(plane, fb, 0);
+
+ if (reuse_vma(new_plane_state, old_plane_state))
+ return 0;
/* We reject creating !SCANOUT fb's, so this is weird.. */
drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT));
- vma = __xe_pin_fb_vma(intel_fb, &plane_state->view.gtt, phys_alignment);
+ vma = __xe_pin_fb_vma(intel_fb, &new_plane_state->view.gtt, alignment);
if (IS_ERR(vma))
return PTR_ERR(vma);
- plane_state->ggtt_vma = vma;
+ new_plane_state->ggtt_vma = vma;
return 0;
}
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 2a2f250fa495..4ca0cb571194 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -8,7 +8,9 @@
#include "regs/xe_gtt_defs.h"
#include "xe_ggtt.h"
+#include "xe_mmio.h"
+#include "i915_reg.h"
#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_display.h"
@@ -22,6 +24,21 @@
#include <generated/xe_wa_oob.h>
+void intel_plane_initial_vblank_wait(struct intel_crtc *crtc)
+{
+ /* Early xe has no irq */
+ struct xe_device *xe = to_xe_device(crtc->base.dev);
+ struct xe_reg pipe_frmtmstmp = XE_REG(i915_mmio_reg_offset(PIPE_FRMTMSTMP(crtc->pipe)));
+ u32 timestamp;
+ int ret;
+
+ timestamp = xe_mmio_read32(xe_root_tile_mmio(xe), pipe_frmtmstmp);
+
+ ret = xe_mmio_wait32_not(xe_root_tile_mmio(xe), pipe_frmtmstmp, ~0U, timestamp, 40000U, &timestamp, false);
+ if (ret < 0)
+ drm_warn(&xe->drm, "waiting for early vblank failed with %i\n", ret);
+}
+
static bool
intel_reuse_initial_plane_obj(struct intel_crtc *this,
const struct intel_initial_plane_config plane_configs[],
@@ -215,7 +232,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
plane_state->uapi.rotation, &plane_state->view);
vma = intel_fb_pin_to_ggtt(fb, &plane_state->view.gtt,
- 0, 0, false, &plane_state->flags);
+ 0, 0, 0, false, &plane_state->flags);
if (IS_ERR(vma))
goto nofb;
@@ -293,7 +310,7 @@ void intel_initial_plane_config(struct intel_display *display)
intel_find_initial_plane_obj(crtc, plane_configs);
if (display->funcs.display->fixup_initial_plane_config(crtc, plane_config))
- intel_crtc_wait_for_next_vblank(crtc);
+ intel_plane_initial_vblank_wait(crtc);
plane_config_fini(plane_config);
}
diff --git a/drivers/gpu/drm/xe/display/xe_tdf.c b/drivers/gpu/drm/xe/display/xe_tdf.c
index 2c0d4e144e09..2a7fccbeb1d5 100644
--- a/drivers/gpu/drm/xe/display/xe_tdf.c
+++ b/drivers/gpu/drm/xe/display/xe_tdf.c
@@ -7,7 +7,9 @@
#include "intel_display_types.h"
#include "intel_tdf.h"
-void intel_td_flush(struct drm_i915_private *i915)
+void intel_td_flush(struct intel_display *display)
{
- xe_device_td_flush(i915);
+ struct xe_device *xe = to_xe_device(display->drm);
+
+ xe_device_td_flush(xe);
}
diff --git a/drivers/gpu/drm/xe/instructions/xe_instr_defs.h b/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
index fd2ce7ace510..e559969468c4 100644
--- a/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
+++ b/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
@@ -16,6 +16,7 @@
#define XE_INSTR_CMD_TYPE GENMASK(31, 29)
#define XE_INSTR_MI REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x0)
#define XE_INSTR_GSC REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x2)
+#define XE_INSTR_VIDEOPIPE REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x3)
#define XE_INSTR_GFXPIPE REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x3)
#define XE_INSTR_GFX_STATE REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x4)
diff --git a/drivers/gpu/drm/xe/instructions/xe_mfx_commands.h b/drivers/gpu/drm/xe/instructions/xe_mfx_commands.h
new file mode 100644
index 000000000000..3c0c97f78e90
--- /dev/null
+++ b/drivers/gpu/drm/xe/instructions/xe_mfx_commands.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_MFX_COMMANDS_H_
+#define _XE_MFX_COMMANDS_H_
+
+#include "instructions/xe_instr_defs.h"
+
+#define MFX_CMD_SUBTYPE REG_GENMASK(28, 27) /* A.K.A cmd pipe */
+#define MFX_CMD_OPCODE REG_GENMASK(26, 24)
+#define MFX_CMD_SUB_OPCODE REG_GENMASK(23, 16)
+#define MFX_FLAGS_AND_LEN REG_GENMASK(15, 0)
+
+#define XE_MFX_INSTR(subtype, op, sub_op) \
+ (XE_INSTR_VIDEOPIPE | \
+ REG_FIELD_PREP(MFX_CMD_SUBTYPE, subtype) | \
+ REG_FIELD_PREP(MFX_CMD_OPCODE, op) | \
+ REG_FIELD_PREP(MFX_CMD_SUB_OPCODE, sub_op))
+
+#define MFX_WAIT XE_MFX_INSTR(1, 0, 0)
+#define MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG REG_BIT(9)
+#define MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG REG_BIT(8)
+
+#define CRYPTO_KEY_EXCHANGE XE_MFX_INSTR(2, 6, 9)
+
+#endif
diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
index 10ec2920d31b..167fb0f742de 100644
--- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
@@ -48,6 +48,7 @@
#define MI_LRI_LEN(x) (((x) & 0xff) + 1)
#define MI_FLUSH_DW __MI_INSTR(0x26)
+#define MI_FLUSH_DW_PROTECTED_MEM_EN REG_BIT(22)
#define MI_FLUSH_DW_STORE_INDEX REG_BIT(21)
#define MI_INVALIDATE_TLB REG_BIT(18)
#define MI_FLUSH_DW_CCS REG_BIT(16)
@@ -66,4 +67,8 @@
#define MI_BATCH_BUFFER_START __MI_INSTR(0x31)
+#define MI_SET_APPID __MI_INSTR(0x0e)
+#define MI_SET_APPID_SESSION_ID_MASK REG_GENMASK(6, 0)
+#define MI_SET_APPID_SESSION_ID(x) REG_FIELD_PREP(MI_SET_APPID_SESSION_ID_MASK, x)
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index b732c89816df..4f372dc2cb89 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -131,6 +131,7 @@
#define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4)
#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
+#define CTX_CTRL_PXP_ENABLE REG_BIT(10)
#define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8)
#define CTX_CTRL_RUN_ALONE REG_BIT(7)
#define CTX_CTRL_INDIRECT_RING_STATE_ENABLE REG_BIT(4)
diff --git a/drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h b/drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h
new file mode 100644
index 000000000000..c53f57fdde65
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_EU_STALL_REGS_H_
+#define _XE_EU_STALL_REGS_H_
+
+#include "regs/xe_reg_defs.h"
+
+#define XEHPC_EUSTALL_BASE XE_REG_MCR(0xe520)
+#define XEHPC_EUSTALL_BASE_BUF_ADDR REG_GENMASK(31, 6)
+#define XEHPC_EUSTALL_BASE_XECORE_BUF_SZ REG_GENMASK(5, 3)
+#define XEHPC_EUSTALL_BASE_ENABLE_SAMPLING REG_BIT(1)
+
+#define XEHPC_EUSTALL_BASE_UPPER XE_REG_MCR(0xe524)
+
+#define XEHPC_EUSTALL_REPORT XE_REG_MCR(0xe528, XE_REG_OPTION_MASKED)
+#define XEHPC_EUSTALL_REPORT_WRITE_PTR_MASK REG_GENMASK(15, 2)
+#define XEHPC_EUSTALL_REPORT_OVERFLOW_DROP REG_BIT(1)
+
+#define XEHPC_EUSTALL_REPORT1 XE_REG_MCR(0xe52c, XE_REG_OPTION_MASKED)
+#define XEHPC_EUSTALL_REPORT1_READ_PTR_MASK REG_GENMASK(15, 2)
+
+#define XEHPC_EUSTALL_CTRL XE_REG_MCR(0xe53c, XE_REG_OPTION_MASKED)
+#define EUSTALL_MOCS REG_GENMASK(9, 3)
+#define EUSTALL_SAMPLE_RATE REG_GENMASK(2, 0)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 162f18e975da..da1f198ac107 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -221,6 +221,9 @@
#define MIRROR_FUSE1 XE_REG(0x911c)
+#define MIRROR_L3BANK_ENABLE XE_REG(0x9130)
+#define XE3_L3BANK_ENABLE REG_GENMASK(31, 0)
+
#define XELP_EU_ENABLE XE_REG(0x9134) /* "_DISABLE" on Xe_LP */
#define XELP_EU_MASK REG_GENMASK(7, 0)
#define XELP_GT_SLICE_ENABLE XE_REG(0x9138)
@@ -355,14 +358,18 @@
#define RENDER_AWAKE_STATUS REG_BIT(1)
#define MEDIA_SLICE0_AWAKE_STATUS REG_BIT(0)
+#define MISC_STATUS_0 XE_REG(0xa500)
+
#define FORCEWAKE_MEDIA_VDBOX(n) XE_REG(0xa540 + (n) * 4)
#define FORCEWAKE_MEDIA_VEBOX(n) XE_REG(0xa560 + (n) * 4)
#define FORCEWAKE_GSC XE_REG(0xa618)
+#define XELP_GARBCNTL XE_REG(0xb004)
+#define XELP_BUS_HASH_CTL_BIT_EXC REG_BIT(7)
+
#define XEHPC_LNCFMISCCFGREG0 XE_REG_MCR(0xb01c, XE_REG_OPTION_MASKED)
#define XEHPC_OVRLSCCC REG_BIT(0)
-/* L3 Cache Control */
#define LNCFCMOCS_REG_COUNT 32
#define XELP_LNCFCMOCS(i) XE_REG(0xb020 + (i) * 4)
#define XEHP_LNCFCMOCS(i) XE_REG_MCR(0xb020 + (i) * 4)
@@ -475,6 +482,7 @@
#define TDL_TSL_CHICKEN XE_REG_MCR(0xe4c4, XE_REG_OPTION_MASKED)
#define STK_ID_RESTRICT REG_BIT(12)
#define SLM_WMTP_RESTORE REG_BIT(11)
+#define RES_CHK_SPR_DIS REG_BIT(6)
#define ROW_CHICKEN XE_REG_MCR(0xe4f0, XE_REG_OPTION_MASKED)
#define UGM_BACKUP_MODE REG_BIT(13)
@@ -500,6 +508,9 @@
#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
#define DIS_ATOMIC_CHAINING_TYPED_WRITES REG_BIT(3)
+#define TDL_CHICKEN XE_REG_MCR(0xe5f4, XE_REG_OPTION_MASKED)
+#define QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE REG_BIT(12)
+
#define LSC_CHICKEN_BIT_0 XE_REG_MCR(0xe7c8)
#define DISABLE_D8_D16_COASLESCE REG_BIT(30)
#define WR_REQ_CHAINING_DIS REG_BIT(26)
diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
index 1776b3f78ccb..f0ecfcac4003 100644
--- a/drivers/gpu/drm/xe/regs/xe_irq_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
@@ -44,6 +44,7 @@
#define ENGINE1_MASK REG_GENMASK(31, 16)
#define ENGINE0_MASK REG_GENMASK(15, 0)
#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c, XE_REG_OPTION_VF)
+#define CRYPTO_RSVD_INTR_ENABLE XE_REG(0x190040)
#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044, XE_REG_OPTION_VF)
#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048, XE_REG_OPTION_VF)
@@ -54,6 +55,7 @@
#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x)
#define OTHER_GUC_INSTANCE 0
#define OTHER_GSC_HECI2_INSTANCE 3
+#define OTHER_KCR_INSTANCE 4
#define OTHER_GSC_INSTANCE 6
#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4), XE_REG_OPTION_VF)
@@ -65,6 +67,7 @@
#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4)
#define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF)
#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF)
+#define CRYPTO_RSVD_INTR_MASK XE_REG(0x1900f0)
#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4, XE_REG_OPTION_VF)
#define CCS0_CCS1_INTR_MASK XE_REG(0x190100)
#define CCS2_CCS3_INTR_MASK XE_REG(0x190104)
@@ -79,4 +82,9 @@
#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
#define GT_RENDER_USER_INTERRUPT REG_BIT(0)
+/* irqs for OTHER_KCR_INSTANCE */
+#define KCR_PXP_STATE_TERMINATED_INTERRUPT REG_BIT(1)
+#define KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT REG_BIT(2)
+#define KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT REG_BIT(3)
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
index 519dd1067a19..f5e5234857c1 100644
--- a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
@@ -34,6 +34,9 @@
#define PCU_CR_PACKAGE_ENERGY_STATUS XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x593c)
+#define PCU_CR_PACKAGE_TEMPERATURE XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5978)
+#define TEMP_MASK REG_GENMASK(7, 0)
+
#define PCU_CR_PACKAGE_RAPL_LIMIT XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
#define PKG_PWR_LIM_1 REG_GENMASK(14, 0)
#define PKG_PWR_LIM_1_EN REG_BIT(15)
diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
index 0b0b49d850ae..8846eb9ce2a4 100644
--- a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
@@ -21,6 +21,8 @@
#define BMG_PACKAGE_POWER_SKU XE_REG(0x138098)
#define BMG_PACKAGE_POWER_SKU_UNIT XE_REG(0x1380dc)
#define BMG_PACKAGE_ENERGY_STATUS XE_REG(0x138120)
+#define BMG_VRAM_TEMPERATURE XE_REG(0x1382c0)
+#define BMG_PACKAGE_TEMPERATURE XE_REG(0x138434)
#define BMG_PACKAGE_RAPL_LIMIT XE_REG(0x138440)
#define BMG_PLATFORM_ENERGY_STATUS XE_REG(0x138458)
#define BMG_PLATFORM_POWER_LIMIT XE_REG(0x138460)
diff --git a/drivers/gpu/drm/xe/regs/xe_pxp_regs.h b/drivers/gpu/drm/xe/regs/xe_pxp_regs.h
new file mode 100644
index 000000000000..aa158938b42e
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_pxp_regs.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2024, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __XE_PXP_REGS_H__
+#define __XE_PXP_REGS_H__
+
+#include "regs/xe_regs.h"
+
+/* The following registers are only valid on platforms with a media GT */
+
+/* KCR enable/disable control */
+#define KCR_INIT XE_REG(0x3860f0)
+#define KCR_INIT_ALLOW_DISPLAY_ME_WRITES REG_BIT(14)
+
+/* KCR hwdrm session in play status 0-31 */
+#define KCR_SIP XE_REG(0x386260)
+
+/* PXP global terminate register for session termination */
+#define KCR_GLOBAL_TERMINATE XE_REG(0x3860f8)
+
+#endif /* __XE_PXP_REGS_H__ */
diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
index 0eedd6c26b1b..c39aab843e35 100644
--- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h
+++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
@@ -7,10 +7,22 @@
#define _XE_REG_DEFS_H_
#include <linux/build_bug.h>
+#include <linux/log2.h>
+#include <linux/sizes.h>
#include "compat-i915-headers/i915_reg_defs.h"
/**
+ * XE_REG_ADDR_MAX - The upper limit on MMIO register address
+ *
+ * This macro specifies the upper limit (not inclusive) on MMIO register offset
+ * supported by struct xe_reg and functions based on struct xe_mmio.
+ *
+ * Currently this is defined as 4 MiB.
+ */
+#define XE_REG_ADDR_MAX SZ_4M
+
+/**
* struct xe_reg - Register definition
*
* Register definition to be used by the individual register. Although the same
@@ -21,7 +33,7 @@ struct xe_reg {
union {
struct {
/** @addr: address */
- u32 addr:28;
+ u32 addr:const_ilog2(XE_REG_ADDR_MAX);
/**
* @masked: register is "masked", with upper 16bits used
* to identify the bits that are updated on the lower
@@ -41,10 +53,6 @@ struct xe_reg {
* @vf: register is accessible from the Virtual Function.
*/
u32 vf:1;
- /**
- * @ext: access MMIO extension space for current register.
- */
- u32 ext:1;
};
/** @raw: Raw value with both address and options */
u32 raw;
@@ -112,16 +120,6 @@ struct xe_reg_mcr {
#define XE_REG(r_, ...) ((const struct xe_reg)XE_REG_INITIALIZER(r_, ##__VA_ARGS__))
/**
- * XE_REG_EXT - Create a struct xe_reg from extension offset and additional
- * flags
- * @r_: Register extension offset
- * @...: Additional options like access mode. See struct xe_reg for available
- * options.
- */
-#define XE_REG_EXT(r_, ...) \
- ((const struct xe_reg)XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .ext = 1))
-
-/**
* XE_REG_MCR - Create a struct xe_reg_mcr from offset and additional flags
* @r_: Register offset
* @...: Additional options like access mode. See struct xe_reg for available
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index 6cf282618836..3abb17d2ca33 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -7,10 +7,6 @@
#include "regs/xe_reg_defs.h"
-#define TIMESTAMP_OVERRIDE XE_REG(0x44074)
-#define TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK REG_GENMASK(15, 12)
-#define TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK REG_GENMASK(9, 0)
-
#define GU_CNTL_PROTECTED XE_REG(0x10100C)
#define DRIVERINT_FLR_DIS REG_BIT(31)
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 6795d1d916e4..9fde67ca989f 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -514,8 +514,13 @@ static int shrink_test_run_device(struct xe_device *xe)
* other way around, they may not be subject to swapping...
*/
if (alloced < purgeable) {
+ xe_ttm_tt_account_subtract(&xe_tt->ttm);
xe_tt->purgeable = true;
+ xe_ttm_tt_account_add(&xe_tt->ttm);
bo->ttm.priority = 0;
+ spin_lock(&bo->ttm.bdev->lru_lock);
+ ttm_bo_move_to_lru_tail(&bo->ttm);
+ spin_unlock(&bo->ttm.bdev->lru_lock);
} else {
int ret = shrink_test_fill_random(bo, &prng, link);
@@ -570,7 +575,6 @@ static int shrink_test_run_device(struct xe_device *xe)
if (ret == -EINTR)
intr = true;
} while (ret == -EINTR && !signal_pending(current));
-
if (!ret && !purgeable)
failed = shrink_test_verify(test, bo, count, &prng, link);
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
new file mode 100644
index 000000000000..6faffcd74869
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <kunit/static_stub.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include "xe_device.h"
+#include "xe_ggtt.h"
+#include "xe_guc_ct.h"
+#include "xe_kunit_helpers.h"
+#include "xe_pci_test.h"
+
+#define DUT_GGTT_START SZ_1M
+#define DUT_GGTT_SIZE SZ_2M
+
+static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *xe,
+ struct xe_tile *tile,
+ size_t size, u32 flags)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct xe_bo *bo;
+ void *buf;
+
+ bo = drmm_kzalloc(&xe->drm, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
+
+ buf = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
+ bo->tile = tile;
+ bo->ttm.bdev = &xe->ttm;
+ bo->size = size;
+ iosys_map_set_vaddr(&bo->vmap, buf);
+
+ if (flags & XE_BO_FLAG_GGTT) {
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+
+ bo->ggtt_node[tile->id] = xe_ggtt_node_init(ggtt);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo->ggtt_node[tile->id]);
+
+ KUNIT_ASSERT_EQ(test, 0,
+ drm_mm_insert_node_in_range(&ggtt->mm,
+ &bo->ggtt_node[tile->id]->base,
+ bo->size, SZ_4K,
+ 0, 0, U64_MAX, 0));
+ }
+
+ return bo;
+}
+
+static int guc_buf_test_init(struct kunit *test)
+{
+ struct xe_pci_fake_data fake = {
+ .sriov_mode = XE_SRIOV_MODE_PF,
+ .platform = XE_TIGERLAKE, /* some random platform */
+ .subplatform = XE_SUBPLATFORM_NONE,
+ };
+ struct xe_ggtt *ggtt;
+ struct xe_guc *guc;
+
+ test->priv = &fake;
+ xe_kunit_helper_xe_device_test_init(test);
+
+ ggtt = xe_device_get_root_tile(test->priv)->mem.ggtt;
+ guc = &xe_device_get_gt(test->priv, 0)->uc.guc;
+
+ drm_mm_init(&ggtt->mm, DUT_GGTT_START, DUT_GGTT_SIZE);
+ mutex_init(&ggtt->lock);
+
+ kunit_activate_static_stub(test, xe_managed_bo_create_pin_map,
+ replacement_xe_managed_bo_create_pin_map);
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_buf_cache_init(&guc->buf));
+
+ test->priv = &guc->buf;
+ return 0;
+}
+
+static void test_smallest(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+
+ buf = xe_guc_buf_reserve(cache, 1);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
+ xe_guc_buf_release(buf);
+}
+
+static void test_largest(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+
+ buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
+ xe_guc_buf_release(buf);
+}
+
+static void test_granular(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf *bufs;
+ int n, dwords;
+
+ dwords = xe_guc_buf_cache_dwords(cache);
+ bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, bufs);
+
+ for (n = 0; n < dwords; n++)
+ bufs[n] = xe_guc_buf_reserve(cache, 1);
+
+ for (n = 0; n < dwords; n++)
+ KUNIT_EXPECT_TRUE_MSG(test, xe_guc_buf_is_valid(bufs[n]), "n=%d", n);
+
+ for (n = 0; n < dwords; n++)
+ xe_guc_buf_release(bufs[n]);
+}
+
+static void test_unique(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf *bufs;
+ int n, m, dwords;
+
+ dwords = xe_guc_buf_cache_dwords(cache);
+ bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, bufs);
+
+ for (n = 0; n < dwords; n++)
+ bufs[n] = xe_guc_buf_reserve(cache, 1);
+
+ for (n = 0; n < dwords; n++) {
+ for (m = n + 1; m < dwords; m++) {
+ KUNIT_EXPECT_PTR_NE_MSG(test, xe_guc_buf_cpu_ptr(bufs[n]),
+ xe_guc_buf_cpu_ptr(bufs[m]), "n=%d, m=%d", n, m);
+ KUNIT_ASSERT_NE_MSG(test, xe_guc_buf_gpu_addr(bufs[n]),
+ xe_guc_buf_gpu_addr(bufs[m]), "n=%d, m=%d", n, m);
+ }
+ }
+
+ for (n = 0; n < dwords; n++)
+ xe_guc_buf_release(bufs[n]);
+}
+
+static void test_overlap(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf b1, b2;
+ u32 dwords = xe_guc_buf_cache_dwords(cache) / 2;
+ u32 bytes = dwords * sizeof(u32);
+ void *p1, *p2;
+ u64 a1, a2;
+
+ b1 = xe_guc_buf_reserve(cache, dwords);
+ b2 = xe_guc_buf_reserve(cache, dwords);
+
+ p1 = xe_guc_buf_cpu_ptr(b1);
+ p2 = xe_guc_buf_cpu_ptr(b2);
+
+ a1 = xe_guc_buf_gpu_addr(b1);
+ a2 = xe_guc_buf_gpu_addr(b2);
+
+ KUNIT_EXPECT_PTR_NE(test, p1, p2);
+ if (p1 < p2)
+ KUNIT_EXPECT_LT(test, (uintptr_t)(p1 + bytes - 1), (uintptr_t)p2);
+ else
+ KUNIT_EXPECT_LT(test, (uintptr_t)(p2 + bytes - 1), (uintptr_t)p1);
+
+ KUNIT_EXPECT_NE(test, a1, a2);
+ if (a1 < a2)
+ KUNIT_EXPECT_LT(test, a1 + bytes - 1, a2);
+ else
+ KUNIT_EXPECT_LT(test, a2 + bytes - 1, a1);
+
+ xe_guc_buf_release(b1);
+ xe_guc_buf_release(b2);
+}
+
+static void test_reusable(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf b1, b2;
+ void *p1;
+ u64 a1;
+
+ b1 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(b1));
+ KUNIT_EXPECT_NOT_NULL(test, p1 = xe_guc_buf_cpu_ptr(b1));
+ KUNIT_EXPECT_NE(test, 0, a1 = xe_guc_buf_gpu_addr(b1));
+ xe_guc_buf_release(b1);
+
+ b2 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
+ KUNIT_EXPECT_PTR_EQ(test, p1, xe_guc_buf_cpu_ptr(b2));
+ KUNIT_EXPECT_EQ(test, a1, xe_guc_buf_gpu_addr(b2));
+ xe_guc_buf_release(b2);
+}
+
+static void test_too_big(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+
+ buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache) + 1);
+ KUNIT_EXPECT_FALSE(test, xe_guc_buf_is_valid(buf));
+ xe_guc_buf_release(buf); /* shouldn't crash */
+}
+
+static void test_flush(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+ const u32 dwords = xe_guc_buf_cache_dwords(cache);
+ const u32 bytes = dwords * sizeof(u32);
+ u32 *s, *p, *d;
+ int n;
+
+ KUNIT_ASSERT_NOT_NULL(test, s = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL));
+ KUNIT_ASSERT_NOT_NULL(test, d = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL));
+
+ for (n = 0; n < dwords; n++)
+ s[n] = n;
+
+ buf = xe_guc_buf_reserve(cache, dwords);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_PTR_NE(test, p, s);
+ KUNIT_EXPECT_PTR_NE(test, p, d);
+
+ memcpy(p, s, bytes);
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_flush(buf));
+
+ iosys_map_memcpy_from(d, &cache->sam->bo->vmap, 0, bytes);
+ KUNIT_EXPECT_MEMEQ(test, s, d, bytes);
+
+ xe_guc_buf_release(buf);
+}
+
+static void test_lookup(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+ u32 dwords;
+ u64 addr;
+ u32 *p;
+ int n;
+
+ dwords = xe_guc_buf_cache_dwords(cache);
+ buf = xe_guc_buf_reserve(cache, dwords);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
+ KUNIT_ASSERT_NE(test, 0, addr = xe_guc_buf_gpu_addr(buf));
+
+ KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p - 1, sizeof(u32)));
+ KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p + dwords, sizeof(u32)));
+
+ for (n = 0; n < dwords; n++)
+ KUNIT_EXPECT_EQ_MSG(test, xe_guc_cache_gpu_addr_from_ptr(cache, p + n, sizeof(u32)),
+ addr + n * sizeof(u32), "n=%d", n);
+
+ xe_guc_buf_release(buf);
+}
+
+static void test_data(struct kunit *test)
+{
+ static const u32 data[] = { 1, 2, 3, 4, 5, 6 };
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+ void *p;
+
+ buf = xe_guc_buf_from_data(cache, data, sizeof(data));
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_MEMEQ(test, p, data, sizeof(data));
+
+ xe_guc_buf_release(buf);
+}
+
+static void test_class(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ u32 dwords = xe_guc_buf_cache_dwords(cache);
+
+ {
+ CLASS(xe_guc_buf, buf)(cache, dwords);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
+ }
+
+ {
+ CLASS(xe_guc_buf, buf)(cache, dwords);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
+ }
+}
+
+static struct kunit_case guc_buf_test_cases[] = {
+ KUNIT_CASE(test_smallest),
+ KUNIT_CASE(test_largest),
+ KUNIT_CASE(test_granular),
+ KUNIT_CASE(test_unique),
+ KUNIT_CASE(test_overlap),
+ KUNIT_CASE(test_reusable),
+ KUNIT_CASE(test_too_big),
+ KUNIT_CASE(test_flush),
+ KUNIT_CASE(test_lookup),
+ KUNIT_CASE(test_data),
+ KUNIT_CASE(test_class),
+ {}
+};
+
+static struct kunit_suite guc_buf_suite = {
+ .name = "guc_buf",
+ .test_cases = guc_buf_test_cases,
+ .init = guc_buf_test_init,
+};
+
+kunit_test_suites(&guc_buf_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 67404863087e..1d3e2e50c355 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -21,15 +21,15 @@
*/
void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn)
{
- const struct xe_graphics_desc *ip, *last = NULL;
+ const struct xe_graphics_desc *desc, *last = NULL;
- for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) {
- ip = graphics_ip_map[i].ip;
- if (ip == last)
+ for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
+ desc = graphics_ips[i].desc;
+ if (desc == last)
continue;
- xe_fn(ip);
- last = ip;
+ xe_fn(desc);
+ last = desc;
}
}
EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip);
@@ -43,15 +43,15 @@ EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip);
*/
void xe_call_for_each_media_ip(xe_media_fn xe_fn)
{
- const struct xe_media_desc *ip, *last = NULL;
+ const struct xe_media_desc *desc, *last = NULL;
- for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) {
- ip = media_ip_map[i].ip;
- if (ip == last)
+ for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
+ desc = media_ips[i].desc;
+ if (desc == last)
continue;
- xe_fn(ip);
- last = ip;
+ xe_fn(desc);
+ last = desc;
}
}
EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_media_ip);
@@ -110,7 +110,7 @@ done:
kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid);
xe_info_init_early(xe, desc, subplatform_desc);
- xe_info_init(xe, desc->graphics, desc->media);
+ xe_info_init(xe, desc);
return 0;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index 36a3b5420fef..b0254b014fe4 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -320,7 +320,7 @@ static void xe_rtp_process_to_sr_tests(struct kunit *test)
count_rtp_entries++;
xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries);
- xe_rtp_process_to_sr(&ctx, param->entries, reg_sr);
+ xe_rtp_process_to_sr(&ctx, param->entries, count_rtp_entries, reg_sr);
xa_for_each(&reg_sr->xa, idx, sre) {
if (idx == param->expected_reg.addr)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 3f5391d416d4..64f9c936eea0 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -6,15 +6,19 @@
#include "xe_bo.h"
#include <linux/dma-buf.h>
+#include <linux/nospec.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_managed.h>
+#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
#include <uapi/drm/xe_drm.h>
+#include <kunit/static_stub.h>
+
#include "xe_device.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
@@ -24,7 +28,9 @@
#include "xe_migrate.h"
#include "xe_pm.h"
#include "xe_preempt_fence.h"
+#include "xe_pxp.h"
#include "xe_res_cursor.h"
+#include "xe_shrinker.h"
#include "xe_trace_bo.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
@@ -124,6 +130,22 @@ bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270;
}
+/**
+ * xe_bo_is_vm_bound - check if BO has any mappings through VM_BIND
+ * @bo: The BO
+ *
+ * Check if a given bo is bound through VM_BIND. This requires the
+ * reservation lock for the BO to be held.
+ *
+ * Returns: boolean
+ */
+bool xe_bo_is_vm_bound(struct xe_bo *bo)
+{
+ xe_bo_assert_held(bo);
+
+ return !list_empty(&bo->ttm.base.gpuva.list);
+}
+
static bool xe_bo_is_user(struct xe_bo *bo)
{
return bo->flags & XE_BO_FLAG_USER;
@@ -139,14 +161,17 @@ mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
return tile->migrate;
}
-static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
+static struct xe_vram_region *res_to_mem_region(struct ttm_resource *res)
{
struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
struct ttm_resource_manager *mgr;
+ struct xe_ttm_vram_mgr *vram_mgr;
xe_assert(xe, resource_is_vram(res));
mgr = ttm_manager_type(&xe->ttm, res->mem_type);
- return to_xe_ttm_vram_mgr(mgr)->vram;
+ vram_mgr = to_xe_ttm_vram_mgr(mgr);
+
+ return container_of(vram_mgr, struct xe_vram_region, ttm);
}
static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
@@ -175,12 +200,15 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
{
struct ttm_place place = { .mem_type = mem_type };
- struct xe_mem_region *vram;
+ struct ttm_resource_manager *mgr = ttm_manager_type(&xe->ttm, mem_type);
+ struct xe_ttm_vram_mgr *vram_mgr = to_xe_ttm_vram_mgr(mgr);
+
+ struct xe_vram_region *vram;
u64 io_size;
xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
- vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram;
+ vram = container_of(vram_mgr, struct xe_vram_region, ttm);
xe_assert(xe, vram && vram->usable_size);
io_size = vram->io_size;
@@ -253,6 +281,8 @@ int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
static void xe_evict_flags(struct ttm_buffer_object *tbo,
struct ttm_placement *placement)
{
+ struct xe_bo *bo;
+
if (!xe_bo_is_xe_bo(tbo)) {
/* Don't handle scatter gather BOs */
if (tbo->type == ttm_bo_type_sg) {
@@ -264,6 +294,12 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
return;
}
+ bo = ttm_to_xe_bo(tbo);
+ if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) {
+ *placement = sys_placement;
+ return;
+ }
+
/*
* For xe, sg bos that are evicted to system just triggers a
* rebind of the sg list upon subsequent validation to XE_PL_TT.
@@ -281,9 +317,11 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
}
}
+/* struct xe_ttm_tt - Subclassed ttm_tt for xe */
struct xe_ttm_tt {
struct ttm_tt ttm;
- struct device *dev;
+ /** @xe - The xe device */
+ struct xe_device *xe;
struct sg_table sgt;
struct sg_table *sg;
/** @purgeable: Whether the content of the pages of @ttm is purgeable. */
@@ -296,7 +334,8 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
unsigned long num_pages = tt->num_pages;
int ret;
- XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
+ XE_WARN_ON((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
+ !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE));
if (xe_tt->sg)
return 0;
@@ -304,13 +343,13 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
num_pages, 0,
(u64)num_pages << PAGE_SHIFT,
- xe_sg_segment_size(xe_tt->dev),
+ xe_sg_segment_size(xe_tt->xe->drm.dev),
GFP_KERNEL);
if (ret)
return ret;
xe_tt->sg = &xe_tt->sgt;
- ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL,
+ ret = dma_map_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC);
if (ret) {
sg_free_table(xe_tt->sg);
@@ -326,7 +365,7 @@ static void xe_tt_unmap_sg(struct ttm_tt *tt)
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->sg) {
- dma_unmap_sgtable(xe_tt->dev, xe_tt->sg,
+ dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
DMA_BIDIRECTIONAL, 0);
sg_free_table(xe_tt->sg);
xe_tt->sg = NULL;
@@ -341,21 +380,47 @@ struct sg_table *xe_bo_sg(struct xe_bo *bo)
return xe_tt->sg;
}
+/*
+ * Account ttm pages against the device shrinker's shrinkable and
+ * purgeable counts.
+ */
+static void xe_ttm_tt_account_add(struct ttm_tt *tt)
+{
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
+
+ if (xe_tt->purgeable)
+ xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, tt->num_pages);
+ else
+ xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, tt->num_pages, 0);
+}
+
+static void xe_ttm_tt_account_subtract(struct ttm_tt *tt)
+{
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
+
+ if (xe_tt->purgeable)
+ xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, -(long)tt->num_pages);
+ else
+ xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, -(long)tt->num_pages, 0);
+}
+
static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
u32 page_flags)
{
struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
struct xe_device *xe = xe_bo_device(bo);
- struct xe_ttm_tt *tt;
+ struct xe_ttm_tt *xe_tt;
+ struct ttm_tt *tt;
unsigned long extra_pages;
enum ttm_caching caching = ttm_cached;
int err;
- tt = kzalloc(sizeof(*tt), GFP_KERNEL);
- if (!tt)
+ xe_tt = kzalloc(sizeof(*xe_tt), GFP_KERNEL);
+ if (!xe_tt)
return NULL;
- tt->dev = xe->drm.dev;
+ tt = &xe_tt->ttm;
+ xe_tt->xe = xe;
extra_pages = 0;
if (xe_bo_needs_ccs_pages(bo))
@@ -401,42 +466,66 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
caching = ttm_uncached;
}
- err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
+ if (ttm_bo->type != ttm_bo_type_sg)
+ page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+
+ err = ttm_tt_init(tt, &bo->ttm, page_flags, caching, extra_pages);
if (err) {
- kfree(tt);
+ kfree(xe_tt);
return NULL;
}
- return &tt->ttm;
+ if (ttm_bo->type != ttm_bo_type_sg) {
+ err = ttm_tt_setup_backup(tt);
+ if (err) {
+ ttm_tt_fini(tt);
+ kfree(xe_tt);
+ return NULL;
+ }
+ }
+
+ return tt;
}
static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
struct ttm_operation_ctx *ctx)
{
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
int err;
/*
* dma-bufs are not populated with pages, and the dma-
* addresses are set up when moved to XE_PL_TT.
*/
- if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
+ if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
+ !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
return 0;
- err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
+ if (ttm_tt_is_backed_up(tt) && !xe_tt->purgeable) {
+ err = ttm_tt_restore(ttm_dev, tt, ctx);
+ } else {
+ ttm_tt_clear_backed_up(tt);
+ err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
+ }
if (err)
return err;
- return err;
+ xe_tt->purgeable = false;
+ xe_ttm_tt_account_add(tt);
+
+ return 0;
}
static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
{
- if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
+ if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
+ !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
return;
xe_tt_unmap_sg(tt);
- return ttm_pool_free(&ttm_dev->pool, tt);
+ ttm_pool_free(&ttm_dev->pool, tt);
+ xe_ttm_tt_account_subtract(tt);
}
static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
@@ -464,7 +553,7 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
return 0;
case XE_PL_VRAM0:
case XE_PL_VRAM1: {
- struct xe_mem_region *vram = res_to_mem_region(mem);
+ struct xe_vram_region *vram = res_to_mem_region(mem);
if (!xe_ttm_resource_visible(mem))
return -EINVAL;
@@ -708,11 +797,40 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
goto out;
}
+ if (!move_lacks_source && (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) &&
+ new_mem->mem_type == XE_PL_SYSTEM) {
+ ret = xe_svm_bo_evict(bo);
+ if (!ret) {
+ drm_dbg(&xe->drm, "Evict system allocator BO success\n");
+ ttm_bo_move_null(ttm_bo, new_mem);
+ } else {
+ drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n",
+ ERR_PTR(ret));
+ }
+
+ goto out;
+ }
+
if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) {
ttm_bo_move_null(ttm_bo, new_mem);
goto out;
}
+ /* Reject BO eviction if BO is bound to current VM. */
+ if (evict && ctx->resv) {
+ struct drm_gpuvm_bo *vm_bo;
+
+ drm_gem_for_each_gpuvm_bo(vm_bo, &bo->ttm.base) {
+ struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
+
+ if (xe_vm_resv(vm) == ctx->resv &&
+ xe_vm_in_preempt_fence_mode(vm)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+ }
+
/*
* Failed multi-hop where the old_mem is still marked as
* TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
@@ -796,7 +914,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
/* Create a new VMAP once kernel BO back in VRAM */
if (!ret && resource_is_vram(new_mem)) {
- struct xe_mem_region *vram = res_to_mem_region(new_mem);
+ struct xe_vram_region *vram = res_to_mem_region(new_mem);
void __iomem *new_addr = vram->mapping +
(new_mem->start << PAGE_SHIFT);
@@ -871,6 +989,111 @@ out:
return ret;
}
+static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
+ struct ttm_buffer_object *bo,
+ unsigned long *scanned)
+{
+ long lret;
+
+ /* Fake move to system, without copying data. */
+ if (bo->resource->mem_type != XE_PL_SYSTEM) {
+ struct ttm_resource *new_resource;
+
+ lret = ttm_bo_wait_ctx(bo, ctx);
+ if (lret)
+ return lret;
+
+ lret = ttm_bo_mem_space(bo, &sys_placement, &new_resource, ctx);
+ if (lret)
+ return lret;
+
+ xe_tt_unmap_sg(bo->ttm);
+ ttm_bo_move_null(bo, new_resource);
+ }
+
+ *scanned += bo->ttm->num_pages;
+ lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
+ {.purge = true,
+ .writeback = false,
+ .allow_move = false});
+
+ if (lret > 0)
+ xe_ttm_tt_account_subtract(bo->ttm);
+
+ return lret;
+}
+
+/**
+ * xe_bo_shrink() - Try to shrink an xe bo.
+ * @ctx: The struct ttm_operation_ctx used for shrinking.
+ * @bo: The TTM buffer object whose pages to shrink.
+ * @flags: Flags governing the shrink behaviour.
+ * @scanned: Pointer to a counter of the number of pages
+ * attempted to shrink.
+ *
+ * Try to shrink- or purge a bo, and if it succeeds, unmap dma.
+ * Note that we need to be able to handle also non xe bos
+ * (ghost bos), but only if the struct ttm_tt is embedded in
+ * a struct xe_ttm_tt. When the function attempts to shrink
+ * the pages of a buffer object, The value pointed to by @scanned
+ * is updated.
+ *
+ * Return: The number of pages shrunken or purged, or negative error
+ * code on failure.
+ */
+long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long *scanned)
+{
+ struct ttm_tt *tt = bo->ttm;
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
+ struct ttm_place place = {.mem_type = bo->resource->mem_type};
+ struct xe_bo *xe_bo = ttm_to_xe_bo(bo);
+ struct xe_device *xe = xe_tt->xe;
+ bool needs_rpm;
+ long lret = 0L;
+
+ if (!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE) ||
+ (flags.purge && !xe_tt->purgeable))
+ return -EBUSY;
+
+ if (!ttm_bo_eviction_valuable(bo, &place))
+ return -EBUSY;
+
+ if (!xe_bo_is_xe_bo(bo) || !xe_bo_get_unless_zero(xe_bo))
+ return xe_bo_shrink_purge(ctx, bo, scanned);
+
+ if (xe_tt->purgeable) {
+ if (bo->resource->mem_type != XE_PL_SYSTEM)
+ lret = xe_bo_move_notify(xe_bo, ctx);
+ if (!lret)
+ lret = xe_bo_shrink_purge(ctx, bo, scanned);
+ goto out_unref;
+ }
+
+ /* System CCS needs gpu copy when moving PL_TT -> PL_SYSTEM */
+ needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM &&
+ xe_bo_needs_ccs_pages(xe_bo));
+ if (needs_rpm && !xe_pm_runtime_get_if_active(xe))
+ goto out_unref;
+
+ *scanned += tt->num_pages;
+ lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
+ {.purge = false,
+ .writeback = flags.writeback,
+ .allow_move = true});
+ if (needs_rpm)
+ xe_pm_runtime_put(xe);
+
+ if (lret > 0)
+ xe_ttm_tt_account_subtract(tt);
+
+out_unref:
+ xe_bo_put(xe_bo);
+
+ return lret;
+}
+
/**
* xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
* @bo: The buffer object to move.
@@ -1006,7 +1229,7 @@ static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
{
struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
struct xe_res_cursor cursor;
- struct xe_mem_region *vram;
+ struct xe_vram_region *vram;
if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
@@ -1146,7 +1369,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
struct iosys_map vmap;
struct xe_res_cursor cursor;
- struct xe_mem_region *vram;
+ struct xe_vram_region *vram;
int bytes_left = len;
xe_bo_assert_held(bo);
@@ -1644,6 +1867,7 @@ __xe_bo_create_locked(struct xe_device *xe,
}
}
+ trace_xe_bo_create(bo);
return bo;
err_unlock_put_bo:
@@ -1781,6 +2005,8 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
struct xe_bo *bo;
int ret;
+ KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags);
+
bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags);
if (IS_ERR(bo))
return bo;
@@ -1885,6 +2111,8 @@ int xe_bo_pin_external(struct xe_bo *bo)
}
ttm_bo_pin(&bo->ttm);
+ if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
+ xe_ttm_tt_account_subtract(bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -1944,6 +2172,8 @@ int xe_bo_pin(struct xe_bo *bo)
}
ttm_bo_pin(&bo->ttm);
+ if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
+ xe_ttm_tt_account_subtract(bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -1978,6 +2208,8 @@ void xe_bo_unpin_external(struct xe_bo *bo)
spin_unlock(&xe->pinned.lock);
ttm_bo_unpin(&bo->ttm);
+ if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
+ xe_ttm_tt_account_add(bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2001,6 +2233,8 @@ void xe_bo_unpin(struct xe_bo *bo)
spin_unlock(&xe->pinned.lock);
}
ttm_bo_unpin(&bo->ttm);
+ if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
+ xe_ttm_tt_account_add(bo->ttm.ttm);
}
/**
@@ -2135,6 +2369,93 @@ void xe_bo_vunmap(struct xe_bo *bo)
__xe_bo_vunmap(bo);
}
+static int gem_create_set_pxp_type(struct xe_device *xe, struct xe_bo *bo, u64 value)
+{
+ if (value == DRM_XE_PXP_TYPE_NONE)
+ return 0;
+
+ /* we only support DRM_XE_PXP_TYPE_HWDRM for now */
+ if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
+ return -EINVAL;
+
+ return xe_pxp_key_assign(xe->pxp, bo);
+}
+
+typedef int (*xe_gem_create_set_property_fn)(struct xe_device *xe,
+ struct xe_bo *bo,
+ u64 value);
+
+static const xe_gem_create_set_property_fn gem_create_set_property_funcs[] = {
+ [DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_set_pxp_type,
+};
+
+static int gem_create_user_ext_set_property(struct xe_device *xe,
+ struct xe_bo *bo,
+ u64 extension)
+{
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_ext_set_property ext;
+ int err;
+ u32 idx;
+
+ err = __copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, ext.property >=
+ ARRAY_SIZE(gem_create_set_property_funcs)) ||
+ XE_IOCTL_DBG(xe, ext.pad) ||
+ XE_IOCTL_DBG(xe, ext.property != DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.property, ARRAY_SIZE(gem_create_set_property_funcs));
+ if (!gem_create_set_property_funcs[idx])
+ return -EINVAL;
+
+ return gem_create_set_property_funcs[idx](xe, bo, ext.value);
+}
+
+typedef int (*xe_gem_create_user_extension_fn)(struct xe_device *xe,
+ struct xe_bo *bo,
+ u64 extension);
+
+static const xe_gem_create_user_extension_fn gem_create_user_extension_funcs[] = {
+ [DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_user_ext_set_property,
+};
+
+#define MAX_USER_EXTENSIONS 16
+static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo,
+ u64 extensions, int ext_number)
+{
+ u64 __user *address = u64_to_user_ptr(extensions);
+ struct drm_xe_user_extension ext;
+ int err;
+ u32 idx;
+
+ if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
+ return -E2BIG;
+
+ err = __copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, ext.pad) ||
+ XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(gem_create_user_extension_funcs)))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.name,
+ ARRAY_SIZE(gem_create_user_extension_funcs));
+ err = gem_create_user_extension_funcs[idx](xe, bo, extensions);
+ if (XE_IOCTL_DBG(xe, err))
+ return err;
+
+ if (ext.next_extension)
+ return gem_create_user_extensions(xe, bo, ext.next_extension,
+ ++ext_number);
+
+ return 0;
+}
+
int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -2142,13 +2463,13 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct xe_file *xef = to_xe_file(file);
struct drm_xe_gem_create *args = data;
struct xe_vm *vm = NULL;
+ ktime_t end = 0;
struct xe_bo *bo;
unsigned int bo_flags;
u32 handle;
int err;
- if (XE_IOCTL_DBG(xe, args->extensions) ||
- XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
+ if (XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
@@ -2214,6 +2535,10 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
vm = xe_vm_lookup(xef, args->vm_id);
if (XE_IOCTL_DBG(xe, !vm))
return -ENOENT;
+ }
+
+retry:
+ if (vm) {
err = xe_vm_lock(vm, true);
if (err)
goto out_vm;
@@ -2227,9 +2552,17 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
+ if (xe_vm_validate_should_retry(NULL, err, &end))
+ goto retry;
goto out_vm;
}
+ if (args->extensions) {
+ err = gem_create_user_extensions(xe, bo, args->extensions, 0);
+ if (err)
+ goto out_bulk;
+ }
+
err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
if (err)
goto out_bulk;
@@ -2263,9 +2596,26 @@ int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->flags))
+ if (XE_IOCTL_DBG(xe, args->flags &
+ ~DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER))
return -EINVAL;
+ if (args->flags & DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER) {
+ if (XE_IOCTL_DBG(xe, !IS_DGFX(xe)))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->handle))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, PAGE_SIZE > SZ_4K))
+ return -EINVAL;
+
+ BUILD_BUG_ON(((XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT) +
+ SZ_4K) >= DRM_FILE_PAGE_OFFSET_START);
+ args->offset = XE_PCI_BARRIER_MMAP_OFFSET;
+ return 0;
+ }
+
gem_obj = drm_gem_object_lookup(file, args->handle);
if (XE_IOCTL_DBG(xe, !gem_obj))
return -ENOENT;
@@ -2500,6 +2850,31 @@ void xe_bo_put_commit(struct llist_head *deferred)
drm_gem_object_free(&bo->ttm.base.refcount);
}
+static void xe_bo_dev_work_func(struct work_struct *work)
+{
+ struct xe_bo_dev *bo_dev = container_of(work, typeof(*bo_dev), async_free);
+
+ xe_bo_put_commit(&bo_dev->async_list);
+}
+
+/**
+ * xe_bo_dev_init() - Initialize BO dev to manage async BO freeing
+ * @bo_dev: The BO dev structure
+ */
+void xe_bo_dev_init(struct xe_bo_dev *bo_dev)
+{
+ INIT_WORK(&bo_dev->async_free, xe_bo_dev_work_func);
+}
+
+/**
+ * xe_bo_dev_fini() - Finalize BO dev managing async BO freeing
+ * @bo_dev: The BO dev structure
+ */
+void xe_bo_dev_fini(struct xe_bo_dev *bo_dev)
+{
+ flush_work(&bo_dev->async_free);
+}
+
void xe_bo_put(struct xe_bo *bo)
{
struct xe_tile *tile;
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index d9386ab03140..ec3e4446d027 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -47,6 +47,7 @@
XE_BO_FLAG_GGTT1 | \
XE_BO_FLAG_GGTT2 | \
XE_BO_FLAG_GGTT3)
+#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(22)
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
@@ -75,6 +76,8 @@
#define XE_BO_PROPS_INVALID (-1)
+#define XE_PCI_BARRIER_MMAP_OFFSET (0x50 << XE_PTE_SHIFT)
+
struct sg_table;
struct xe_bo *xe_bo_alloc(void);
@@ -146,6 +149,28 @@ static inline struct xe_bo *xe_bo_get(struct xe_bo *bo)
void xe_bo_put(struct xe_bo *bo);
+/*
+ * xe_bo_get_unless_zero() - Conditionally obtain a GEM object refcount on an
+ * xe bo
+ * @bo: The bo for which we want to obtain a refcount.
+ *
+ * There is a short window between where the bo's GEM object refcount reaches
+ * zero and where we put the final ttm_bo reference. Code in the eviction- and
+ * shrinking path should therefore attempt to grab a gem object reference before
+ * trying to use members outside of the base class ttm object. This function is
+ * intended for that purpose. On successful return, this function must be paired
+ * with an xe_bo_put().
+ *
+ * Return: @bo on success, NULL on failure.
+ */
+static inline __must_check struct xe_bo *xe_bo_get_unless_zero(struct xe_bo *bo)
+{
+ if (!bo || !kref_get_unless_zero(&bo->ttm.base.refcount))
+ return NULL;
+
+ return bo;
+}
+
static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo)
{
if (bo)
@@ -184,6 +209,11 @@ static inline bool xe_bo_is_pinned(struct xe_bo *bo)
return bo->ttm.pin_count;
}
+static inline bool xe_bo_is_protected(const struct xe_bo *bo)
+{
+ return bo->pxp_key_instance;
+}
+
static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo)
{
if (likely(bo)) {
@@ -234,6 +264,7 @@ bool mem_type_is_vram(u32 mem_type);
bool xe_bo_is_vram(struct xe_bo *bo);
bool xe_bo_is_stolen(struct xe_bo *bo);
bool xe_bo_is_stolen_devmem(struct xe_bo *bo);
+bool xe_bo_is_vm_bound(struct xe_bo *bo);
bool xe_bo_has_single_placement(struct xe_bo *bo);
uint64_t vram_region_gpu_offset(struct ttm_resource *res);
@@ -315,6 +346,25 @@ xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred)
void xe_bo_put_commit(struct llist_head *deferred);
+/**
+ * xe_bo_put_async() - Put BO async
+ * @bo: The bo to put.
+ *
+ * Put BO async, the final put is deferred to a worker to exit an IRQ context.
+ */
+static inline void
+xe_bo_put_async(struct xe_bo *bo)
+{
+ struct xe_bo_dev *bo_device = &xe_bo_device(bo)->bo_device;
+
+ if (xe_bo_put_deferred(bo, &bo_device->async_list))
+ schedule_work(&bo_device->async_free);
+}
+
+void xe_bo_dev_init(struct xe_bo_dev *bo_device);
+
+void xe_bo_dev_fini(struct xe_bo_dev *bo_device);
+
struct sg_table *xe_bo_sg(struct xe_bo *bo);
/*
@@ -341,7 +391,20 @@ static inline unsigned int xe_sg_segment_size(struct device *dev)
return round_down(max / 2, PAGE_SIZE);
}
-#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+/**
+ * struct xe_bo_shrink_flags - flags governing the shrink behaviour.
+ * @purge: Only purging allowed. Don't shrink if bo not purgeable.
+ * @writeback: Attempt to immediately move content to swap.
+ */
+struct xe_bo_shrink_flags {
+ u32 purge : 1;
+ u32 writeback : 1;
+};
+
+long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long *scanned);
+
/**
* xe_bo_is_mem_type - Whether the bo currently resides in the given
* TTM memory type
@@ -356,4 +419,3 @@ static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
return bo->ttm.resource->mem_type == mem_type;
}
#endif
-#endif
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 46dc9e4e3e46..15a92e3d4898 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -8,6 +8,7 @@
#include <linux/iosys-map.h>
+#include <drm/drm_gpusvm.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
@@ -57,6 +58,12 @@ struct xe_bo {
*/
struct list_head client_link;
#endif
+ /**
+ * @pxp_key_instance: PXP key instance this BO was created against. A
+ * 0 in this variable indicates that the BO does not use PXP encryption.
+ */
+ u32 pxp_key_instance;
+
/** @freed: List node for delayed put. */
struct llist_node freed;
/** @update_index: Update index if PT BO */
@@ -74,6 +81,9 @@ struct xe_bo {
*/
u16 cpu_caching;
+ /** @devmem_allocation: SVM device memory allocation */
+ struct drm_gpusvm_devmem devmem_allocation;
+
/** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
struct list_head vram_userfault_link;
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index 492b4877433f..d0503959a8ed 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -18,6 +18,7 @@
#include "xe_gt_printk.h"
#include "xe_guc_ads.h"
#include "xe_pm.h"
+#include "xe_pxp_debugfs.h"
#include "xe_sriov.h"
#include "xe_step.h"
@@ -166,7 +167,7 @@ static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
return -EINVAL;
if (xe->wedged.mode == wedged_mode)
- return 0;
+ return size;
xe->wedged.mode = wedged_mode;
@@ -175,6 +176,7 @@ static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
ret = xe_guc_ads_scheduler_policy_toggle_reset(&gt->uc.guc.ads);
if (ret) {
xe_gt_err(gt, "Failed to update GuC ADS scheduler policy. GuC may still cause engine reset even with wedged_mode=2\n");
+ xe_pm_runtime_put(xe);
return -EIO;
}
}
@@ -230,5 +232,7 @@ void xe_debugfs_register(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_debugfs_register(gt);
+ xe_pxp_debugfs_register(xe->pxp);
+
fault_create_debugfs_attr("fail_gt_reset", root, &gt_reset_failure);
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 39fe485d2085..81b9d9bb3f57 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -237,7 +237,7 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
/*
* NB: Despite passing a GFP_ flags parameter here, more allocations are done
- * internally using GFP_KERNEL expliictly. Hence this call must be in the worker
+ * internally using GFP_KERNEL explicitly. Hence this call must be in the worker
* thread and not in the initial capture call.
*/
dev_coredumpm_timeout(gt_to_xe(ss->gt)->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
@@ -423,11 +423,11 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffi
if (size & 3)
drm_printf(p, "Size not word aligned: %zu", size);
if (offset & 3)
- drm_printf(p, "Offset not word aligned: %zu", size);
+ drm_printf(p, "Offset not word aligned: %zu", offset);
line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_KERNEL);
- if (IS_ERR_OR_NULL(line_buff)) {
- drm_printf(p, "Failed to allocate line buffer: %pe", line_buff);
+ if (!line_buff) {
+ drm_printf(p, "Failed to allocate line buffer\n");
return;
}
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 4e1839b483a0..5d79b439dd62 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -49,7 +49,10 @@
#include "xe_pat.h"
#include "xe_pcode.h"
#include "xe_pm.h"
+#include "xe_pmu.h"
+#include "xe_pxp.h"
#include "xe_query.h"
+#include "xe_shrinker.h"
#include "xe_sriov.h"
#include "xe_tile.h"
#include "xe_ttm_stolen_mgr.h"
@@ -232,12 +235,117 @@ static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned lo
#define xe_drm_compat_ioctl NULL
#endif
+static void barrier_open(struct vm_area_struct *vma)
+{
+ drm_dev_get(vma->vm_private_data);
+}
+
+static void barrier_close(struct vm_area_struct *vma)
+{
+ drm_dev_put(vma->vm_private_data);
+}
+
+static void barrier_release_dummy_page(struct drm_device *dev, void *res)
+{
+ struct page *dummy_page = (struct page *)res;
+
+ __free_page(dummy_page);
+}
+
+static vm_fault_t barrier_fault(struct vm_fault *vmf)
+{
+ struct drm_device *dev = vmf->vma->vm_private_data;
+ struct vm_area_struct *vma = vmf->vma;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ pgprot_t prot;
+ int idx;
+
+ prot = vm_get_page_prot(vma->vm_flags);
+
+ if (drm_dev_enter(dev, &idx)) {
+ unsigned long pfn;
+
+#define LAST_DB_PAGE_OFFSET 0x7ff001
+ pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) +
+ LAST_DB_PAGE_OFFSET);
+ ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn,
+ pgprot_noncached(prot));
+ drm_dev_exit(idx);
+ } else {
+ struct page *page;
+
+ /* Allocate new dummy page to map all the VA range in this VMA to it*/
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return VM_FAULT_OOM;
+
+ /* Set the page to be freed using drmm release action */
+ if (drmm_add_action_or_reset(dev, barrier_release_dummy_page, page))
+ return VM_FAULT_OOM;
+
+ ret = vmf_insert_pfn_prot(vma, vma->vm_start, page_to_pfn(page),
+ prot);
+ }
+
+ return ret;
+}
+
+static const struct vm_operations_struct vm_ops_barrier = {
+ .open = barrier_open,
+ .close = barrier_close,
+ .fault = barrier_fault,
+};
+
+static int xe_pci_barrier_mmap(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct xe_device *xe = to_xe_device(dev);
+
+ if (!IS_DGFX(xe))
+ return -EINVAL;
+
+ if (vma->vm_end - vma->vm_start > SZ_4K)
+ return -EINVAL;
+
+ if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+
+ if (vma->vm_flags & (VM_READ | VM_EXEC))
+ return -EINVAL;
+
+ vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC);
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
+ vma->vm_ops = &vm_ops_barrier;
+ vma->vm_private_data = dev;
+ drm_dev_get(vma->vm_private_data);
+
+ return 0;
+}
+
+static int xe_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ switch (vma->vm_pgoff) {
+ case XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT:
+ return xe_pci_barrier_mmap(filp, vma);
+ }
+
+ return drm_gem_mmap(filp, vma);
+}
+
static const struct file_operations xe_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release_noglobal,
.unlocked_ioctl = xe_drm_ioctl,
- .mmap = drm_gem_mmap,
+ .mmap = xe_mmap,
.poll = drm_poll,
.read = drm_read,
.compat_ioctl = xe_drm_compat_ioctl,
@@ -280,6 +388,8 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
+ xe_bo_dev_fini(&xe->bo_device);
+
if (xe->preempt_fence_wq)
destroy_workqueue(xe->preempt_fence_wq);
@@ -289,6 +399,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
if (xe->unordered_wq)
destroy_workqueue(xe->unordered_wq);
+ if (!IS_ERR_OR_NULL(xe->mem.shrinker))
+ xe_shrinker_destroy(xe->mem.shrinker);
+
if (xe->destroy_wq)
destroy_workqueue(xe->destroy_wq);
@@ -317,10 +430,15 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (WARN_ON(err))
goto err;
+ xe_bo_dev_init(&xe->bo_device);
err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
if (err)
goto err;
+ xe->mem.shrinker = xe_shrinker_create(xe);
+ if (IS_ERR(xe->mem.shrinker))
+ return ERR_CAST(xe->mem.shrinker);
+
xe->info.devid = pdev->device;
xe->info.revid = pdev->revision;
xe->info.force_execlist = xe_modparam.force_execlist;
@@ -553,7 +671,7 @@ static int wait_for_lmem_ready(struct xe_device *xe)
}
ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */
-static void update_device_info(struct xe_device *xe)
+static void sriov_update_device_info(struct xe_device *xe)
{
/* disable features that are not available/applicable to VFs */
if (IS_SRIOV_VF(xe)) {
@@ -578,13 +696,13 @@ int xe_device_probe_early(struct xe_device *xe)
{
int err;
- err = xe_mmio_init(xe);
+ err = xe_mmio_probe_early(xe);
if (err)
return err;
xe_sriov_probe_early(xe);
- update_device_info(xe);
+ sriov_update_device_info(xe);
err = xe_pcode_probe_early(xe);
if (err)
@@ -623,6 +741,7 @@ static int probe_has_flat_ccs(struct xe_device *xe)
"Flat CCS has been disabled in bios, May lead to performance impact");
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
return 0;
}
@@ -631,7 +750,6 @@ int xe_device_probe(struct xe_device *xe)
struct xe_tile *tile;
struct xe_gt *gt;
int err;
- u8 last_gt;
u8 id;
xe_pat_init_early(xe);
@@ -641,9 +759,6 @@ int xe_device_probe(struct xe_device *xe)
return err;
xe->info.mem_region_mask = 1;
- err = xe_display_init_nommio(xe);
- if (err)
- return err;
err = xe_set_dma_info(xe);
if (err)
@@ -653,7 +768,9 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
- xe_ttm_sys_mgr_init(xe);
+ err = xe_ttm_sys_mgr_init(xe);
+ if (err)
+ return err;
for_each_gt(gt, xe, id) {
err = xe_gt_init_early(gt);
@@ -695,34 +812,32 @@ int xe_device_probe(struct xe_device *xe)
err = xe_devcoredump_init(xe);
if (err)
return err;
- err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe);
- if (err)
- return err;
- err = xe_display_init_noirq(xe);
+ /*
+ * From here on, if a step fails, make sure a Driver-FLR is triggereed
+ */
+ err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe);
if (err)
return err;
- err = xe_irq_install(xe);
- if (err)
- goto err;
-
err = probe_has_flat_ccs(xe);
if (err)
- goto err;
+ return err;
err = xe_vram_probe(xe);
if (err)
- goto err;
+ return err;
for_each_tile(tile, xe, id) {
err = xe_tile_init_noalloc(tile);
if (err)
- goto err;
+ return err;
}
/* Allocate and map stolen after potential VRAM resize */
- xe_ttm_stolen_mgr_init(xe);
+ err = xe_ttm_stolen_mgr_init(xe);
+ if (err)
+ return err;
/*
* Now that GT is initialized (TTM in particular),
@@ -730,39 +845,61 @@ int xe_device_probe(struct xe_device *xe)
* This is the reason the first allocation needs to be done
* inside display.
*/
- err = xe_display_init_noaccel(xe);
+ err = xe_display_init_early(xe);
if (err)
- goto err;
+ return err;
- for_each_gt(gt, xe, id) {
- last_gt = id;
+ for_each_tile(tile, xe, id) {
+ err = xe_tile_init(tile);
+ if (err)
+ return err;
+ }
+
+ err = xe_irq_install(xe);
+ if (err)
+ return err;
+ for_each_gt(gt, xe, id) {
err = xe_gt_init(gt);
if (err)
- goto err_fini_gt;
+ return err;
}
- xe_heci_gsc_init(xe);
+ err = xe_heci_gsc_init(xe);
+ if (err)
+ return err;
err = xe_oa_init(xe);
if (err)
- goto err_fini_gt;
+ return err;
err = xe_display_init(xe);
if (err)
- goto err_fini_oa;
+ return err;
+
+ err = xe_pxp_init(xe);
+ if (err)
+ return err;
err = drm_dev_register(&xe->drm, 0);
if (err)
- goto err_fini_display;
+ return err;
xe_display_register(xe);
- xe_oa_register(xe);
+ err = xe_oa_register(xe);
+ if (err)
+ goto err_unregister_display;
+
+ err = xe_pmu_register(&xe->pmu);
+ if (err)
+ goto err_unregister_display;
xe_debugfs_register(xe);
- xe_hwmon_register(xe);
+ err = xe_hwmon_register(xe);
+ if (err)
+ goto err_unregister_display;
for_each_gt(gt, xe, id)
xe_gt_sanitize_freq(gt);
@@ -771,50 +908,17 @@ int xe_device_probe(struct xe_device *xe)
return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
-err_fini_display:
- xe_display_driver_remove(xe);
-
-err_fini_oa:
- xe_oa_fini(xe);
-
-err_fini_gt:
- for_each_gt(gt, xe, id) {
- if (id < last_gt)
- xe_gt_remove(gt);
- else
- break;
- }
+err_unregister_display:
+ xe_display_unregister(xe);
-err:
- xe_display_fini(xe);
return err;
}
-static void xe_device_remove_display(struct xe_device *xe)
+void xe_device_remove(struct xe_device *xe)
{
xe_display_unregister(xe);
drm_dev_unplug(&xe->drm);
- xe_display_driver_remove(xe);
-}
-
-void xe_device_remove(struct xe_device *xe)
-{
- struct xe_gt *gt;
- u8 id;
-
- xe_oa_unregister(xe);
-
- xe_device_remove_display(xe);
-
- xe_display_fini(xe);
-
- xe_oa_fini(xe);
-
- xe_heci_gsc_fini(xe);
-
- for_each_gt(gt, xe, id)
- xe_gt_remove(gt);
}
void xe_device_shutdown(struct xe_device *xe)
@@ -1003,7 +1107,8 @@ static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
* re-probe (unbind + bind).
* In this state every IOCTL will be blocked so the GT cannot be used.
* In general it will be called upon any critical error such as gt reset
- * failure or guc loading failure.
+ * failure or guc loading failure. Userspace will be notified of this state
+ * through device wedged uevent.
* If xe.wedged module parameter is set to 2, this function will be called
* on every single execution timeout (a.k.a. GPU hang) right after devcoredump
* snapshot capture. In this mode, GT reset won't be attempted so the state of
@@ -1033,6 +1138,10 @@ void xe_device_declare_wedged(struct xe_device *xe)
"IOCTLs and executions are blocked. Only a rebind may clear the failure\n"
"Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
dev_name(xe->drm.dev));
+
+ /* Notify userspace of wedged device */
+ drm_dev_wedged_event(&xe->drm,
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
}
for_each_gt(gt, xe, id)
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index fc3c2af3fb7f..0bc3bc8e6803 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -170,6 +170,11 @@ static inline bool xe_device_uses_memirq(struct xe_device *xe)
return xe_device_has_memirq(xe) && (IS_SRIOV_VF(xe) || xe_device_has_msix(xe));
}
+static inline bool xe_device_has_lmtt(struct xe_device *xe)
+{
+ return IS_DGFX(xe);
+}
+
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size);
void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index 7375937934fa..7efbd4c52791 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -32,9 +32,6 @@ vram_d3cold_threshold_show(struct device *dev,
struct xe_device *xe = pdev_to_xe_device(pdev);
int ret;
- if (!xe)
- return -EINVAL;
-
xe_pm_runtime_get(xe);
ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold);
xe_pm_runtime_put(xe);
@@ -51,9 +48,6 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
u32 vram_d3cold_threshold;
int ret;
- if (!xe)
- return -EINVAL;
-
ret = kstrtou32(buff, 0, &vram_d3cold_threshold);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 8a7b15972413..72ef0b6fc425 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -10,6 +10,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
+#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_device.h>
#include "xe_devcoredump_types.h"
@@ -18,9 +19,12 @@
#include "xe_memirq_types.h"
#include "xe_oa_types.h"
#include "xe_platform_types.h"
+#include "xe_pmu_types.h"
#include "xe_pt_types.h"
#include "xe_sriov_types.h"
#include "xe_step_types.h"
+#include "xe_survivability_mode_types.h"
+#include "xe_ttm_vram_mgr_types.h"
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
#define TEST_VM_OPS_ERROR
@@ -34,6 +38,7 @@
struct xe_ggtt;
struct xe_pat_ops;
+struct xe_pxp;
#define XE_BO_INVALID_OFFSET LONG_MAX
@@ -67,11 +72,11 @@ struct xe_pat_ops;
struct xe_tile * : (tile__)->xe)
/**
- * struct xe_mem_region - memory region structure
+ * struct xe_vram_region - memory region structure
* This is used to describe a memory region in xe
* device, such as HBM memory or CXL extension memory.
*/
-struct xe_mem_region {
+struct xe_vram_region {
/** @io_start: IO start address of this VRAM instance */
resource_size_t io_start;
/**
@@ -102,6 +107,21 @@ struct xe_mem_region {
resource_size_t actual_physical_size;
/** @mapping: pointer to VRAM mappable space */
void __iomem *mapping;
+ /** @pagemap: Used to remap device memory as ZONE_DEVICE */
+ struct dev_pagemap pagemap;
+ /**
+ * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
+ * pages of this tile.
+ */
+ struct drm_pagemap dpagemap;
+ /**
+ * @hpa_base: base host physical address
+ *
+ * This is generated when remap device memory as ZONE_DEVICE
+ */
+ resource_size_t hpa_base;
+ /** @ttm: VRAM TTM manager */
+ struct xe_ttm_vram_mgr ttm;
};
/**
@@ -186,13 +206,6 @@ struct xe_tile {
*/
struct xe_mmio mmio;
- /**
- * @mmio_ext: MMIO-extension info for a tile.
- *
- * Each tile has its own additional 256MB (28-bit) MMIO-extension space.
- */
- struct xe_mmio mmio_ext;
-
/** @mem: memory management info for tile */
struct {
/**
@@ -201,10 +214,7 @@ struct xe_tile {
* Although VRAM is associated with a specific tile, it can
* still be accessed by all tiles' GTs.
*/
- struct xe_mem_region vram;
-
- /** @mem.vram_mgr: VRAM TTM manager */
- struct xe_ttm_vram_mgr *vram_mgr;
+ struct xe_vram_region vram;
/** @mem.ggtt: Global graphics translation table */
struct xe_ggtt *ggtt;
@@ -263,8 +273,6 @@ struct xe_device {
const char *graphics_name;
/** @info.media_name: media IP name */
const char *media_name;
- /** @info.tile_mmio_ext_size: size of MMIO extension space, per-tile */
- u32 tile_mmio_ext_size;
/** @info.graphics_verx100: graphics IP version */
u32 graphics_verx100;
/** @info.media_verx100: media IP version */
@@ -314,8 +322,8 @@ struct xe_device {
u8 has_heci_gscfi:1;
/** @info.has_llc: Device has a shared CPU+GPU last level cache */
u8 has_llc:1;
- /** @info.has_mmio_ext: Device has extra MMIO address range */
- u8 has_mmio_ext:1;
+ /** @info.has_pxp: Device has PXP support */
+ u8 has_pxp:1;
/** @info.has_range_tlb_invalidation: Has range based TLB invalidations */
u8 has_range_tlb_invalidation:1;
/** @info.has_sriov: Supports SR-IOV */
@@ -341,6 +349,9 @@ struct xe_device {
u8 skip_pcode:1;
} info;
+ /** @survivability: survivability information for device */
+ struct xe_survivability survivability;
+
/** @irq: device interrupt state */
struct {
/** @irq.lock: lock for processing irq's on this device */
@@ -372,9 +383,11 @@ struct xe_device {
/** @mem: memory info for device */
struct {
/** @mem.vram: VRAM info for device */
- struct xe_mem_region vram;
+ struct xe_vram_region vram;
/** @mem.sys_mgr: system TTM manager */
struct ttm_resource_manager sys_mgr;
+ /** @mem.sys_mgr: system memory shrinker. */
+ struct xe_shrinker *shrinker;
} mem;
/** @sriov: device level virtualization data */
@@ -514,6 +527,9 @@ struct xe_device {
/** @oa: oa observation subsystem */
struct xe_oa oa;
+ /** @pxp: Encapsulate Protected Xe Path support */
+ struct xe_pxp *pxp;
+
/** @needs_flr_on_fini: requests function-reset on fini */
bool needs_flr_on_fini;
@@ -525,6 +541,17 @@ struct xe_device {
int mode;
} wedged;
+ /** @bo_device: Struct to control async free of BOs */
+ struct xe_bo_dev {
+ /** @bo_device.async_free: Free worker */
+ struct work_struct async_free;
+ /** @bo_device.async_list: List of BOs to be freed */
+ struct llist_head async_list;
+ } bo_device;
+
+ /** @pmu: performance monitoring unit */
+ struct xe_pmu pmu;
+
#ifdef TEST_VM_OPS_ERROR
/**
* @vm_inject_error_position: inject errors at different places in VM
@@ -544,7 +571,6 @@ struct xe_device {
*/
struct intel_display display;
enum intel_pch pch_type;
- u16 pch_id;
struct dram_info {
bool wm_lv_0_adjust_needed;
@@ -586,8 +612,6 @@ struct xe_device {
unsigned int czclk_freq;
unsigned int fsb_freq, mem_freq, is_ddr3;
};
-
- void *pxp;
#endif
};
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index c5b95470fa32..f67803e15a0e 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -58,7 +58,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
* 1) Avoid pinning in a placement not accessible to some importers.
* 2) Pinning in VRAM requires PIN accounting which is a to-do.
*/
- if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) {
+ if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) {
drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 2d4874d2b922..31f688e953d7 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -325,6 +325,14 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
unsigned int fw_ref;
/*
+ * RING_TIMESTAMP registers are inaccessible in VF mode.
+ * Without drm-total-cycles-*, other keys provide little value.
+ * Show all or none of the optional "run_ticks" keys in this case.
+ */
+ if (IS_SRIOV_VF(xe))
+ return;
+
+ /*
* Wait for any exec queue going away: their cycles will get updated on
* context switch out, so wait for that to happen
*/
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
new file mode 100644
index 000000000000..88a92baf5c95
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -0,0 +1,960 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/types.h>
+
+#include <drm/drm_drv.h>
+#include <generated/xe_wa_oob.h>
+#include <uapi/drm/xe_drm.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_eu_stall.h"
+#include "xe_force_wake.h"
+#include "xe_gt_mcr.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_topology.h"
+#include "xe_macros.h"
+#include "xe_observation.h"
+#include "xe_pm.h"
+#include "xe_trace.h"
+#include "xe_wa.h"
+
+#include "regs/xe_eu_stall_regs.h"
+#include "regs/xe_gt_regs.h"
+
+#define POLL_PERIOD_MS 5
+
+static size_t per_xecore_buf_size = SZ_512K;
+
+struct per_xecore_buf {
+ /* Buffer vaddr */
+ u8 *vaddr;
+ /* Write pointer */
+ u32 write;
+ /* Read pointer */
+ u32 read;
+};
+
+struct xe_eu_stall_data_stream {
+ bool pollin;
+ bool enabled;
+ int wait_num_reports;
+ int sampling_rate_mult;
+ wait_queue_head_t poll_wq;
+ size_t data_record_size;
+ size_t per_xecore_buf_size;
+
+ struct xe_gt *gt;
+ struct xe_bo *bo;
+ struct per_xecore_buf *xecore_buf;
+ struct {
+ bool reported_to_user;
+ xe_dss_mask_t mask;
+ } data_drop;
+ struct delayed_work buf_poll_work;
+};
+
+struct xe_eu_stall_gt {
+ /* Lock to protect stream */
+ struct mutex stream_lock;
+ /* EU stall data stream */
+ struct xe_eu_stall_data_stream *stream;
+ /* Workqueue to schedule buffer pointers polling work */
+ struct workqueue_struct *buf_ptr_poll_wq;
+};
+
+/**
+ * struct eu_stall_open_properties - EU stall sampling properties received
+ * from user space at open.
+ * @sampling_rate_mult: EU stall sampling rate multiplier.
+ * HW will sample every (sampling_rate_mult x 251) cycles.
+ * @wait_num_reports: Minimum number of EU stall data reports to unblock poll().
+ * @gt: GT on which EU stall data will be captured.
+ */
+struct eu_stall_open_properties {
+ int sampling_rate_mult;
+ int wait_num_reports;
+ struct xe_gt *gt;
+};
+
+/*
+ * EU stall data format for PVC
+ */
+struct xe_eu_stall_data_pvc {
+ __u64 ip_addr:29; /* Bits 0 to 28 */
+ __u64 active_count:8; /* Bits 29 to 36 */
+ __u64 other_count:8; /* Bits 37 to 44 */
+ __u64 control_count:8; /* Bits 45 to 52 */
+ __u64 pipestall_count:8; /* Bits 53 to 60 */
+ __u64 send_count:8; /* Bits 61 to 68 */
+ __u64 dist_acc_count:8; /* Bits 69 to 76 */
+ __u64 sbid_count:8; /* Bits 77 to 84 */
+ __u64 sync_count:8; /* Bits 85 to 92 */
+ __u64 inst_fetch_count:8; /* Bits 93 to 100 */
+ __u64 unused_bits:27;
+ __u64 unused[6];
+} __packed;
+
+/*
+ * EU stall data format for Xe2 arch GPUs (LNL, BMG).
+ */
+struct xe_eu_stall_data_xe2 {
+ __u64 ip_addr:29; /* Bits 0 to 28 */
+ __u64 tdr_count:8; /* Bits 29 to 36 */
+ __u64 other_count:8; /* Bits 37 to 44 */
+ __u64 control_count:8; /* Bits 45 to 52 */
+ __u64 pipestall_count:8; /* Bits 53 to 60 */
+ __u64 send_count:8; /* Bits 61 to 68 */
+ __u64 dist_acc_count:8; /* Bits 69 to 76 */
+ __u64 sbid_count:8; /* Bits 77 to 84 */
+ __u64 sync_count:8; /* Bits 85 to 92 */
+ __u64 inst_fetch_count:8; /* Bits 93 to 100 */
+ __u64 active_count:8; /* Bits 101 to 108 */
+ __u64 ex_id:3; /* Bits 109 to 111 */
+ __u64 end_flag:1; /* Bit 112 */
+ __u64 unused_bits:15;
+ __u64 unused[6];
+} __packed;
+
+const u64 eu_stall_sampling_rates[] = {251, 251 * 2, 251 * 3, 251 * 4, 251 * 5, 251 * 6, 251 * 7};
+
+/**
+ * xe_eu_stall_get_sampling_rates - get EU stall sampling rates information.
+ *
+ * @num_rates: Pointer to a u32 to return the number of sampling rates.
+ * @rates: double u64 pointer to point to an array of sampling rates.
+ *
+ * Stores the number of sampling rates and pointer to the array of
+ * sampling rates in the input pointers.
+ *
+ * Returns: Size of the EU stall sampling rates array.
+ */
+size_t xe_eu_stall_get_sampling_rates(u32 *num_rates, const u64 **rates)
+{
+ *num_rates = ARRAY_SIZE(eu_stall_sampling_rates);
+ *rates = eu_stall_sampling_rates;
+
+ return sizeof(eu_stall_sampling_rates);
+}
+
+/**
+ * xe_eu_stall_get_per_xecore_buf_size - get per XeCore buffer size.
+ *
+ * Returns: The per XeCore buffer size used to allocate the per GT
+ * EU stall data buffer.
+ */
+size_t xe_eu_stall_get_per_xecore_buf_size(void)
+{
+ return per_xecore_buf_size;
+}
+
+/**
+ * xe_eu_stall_data_record_size - get EU stall data record size.
+ *
+ * @xe: Pointer to a Xe device.
+ *
+ * Returns: EU stall data record size.
+ */
+size_t xe_eu_stall_data_record_size(struct xe_device *xe)
+{
+ size_t record_size = 0;
+
+ if (xe->info.platform == XE_PVC)
+ record_size = sizeof(struct xe_eu_stall_data_pvc);
+ else if (GRAPHICS_VER(xe) >= 20)
+ record_size = sizeof(struct xe_eu_stall_data_xe2);
+
+ xe_assert(xe, is_power_of_2(record_size));
+
+ return record_size;
+}
+
+/**
+ * num_data_rows - Return the number of EU stall data rows of 64B each
+ * for a given data size.
+ *
+ * @data_size: EU stall data size
+ */
+static u32 num_data_rows(u32 data_size)
+{
+ return data_size >> 6;
+}
+
+static void xe_eu_stall_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ destroy_workqueue(gt->eu_stall->buf_ptr_poll_wq);
+ mutex_destroy(&gt->eu_stall->stream_lock);
+ kfree(gt->eu_stall);
+}
+
+/**
+ * xe_eu_stall_init() - Allocate and initialize GT level EU stall data
+ * structure xe_eu_stall_gt within struct xe_gt.
+ *
+ * @gt: GT being initialized.
+ *
+ * Returns: zero on success or a negative error code.
+ */
+int xe_eu_stall_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int ret;
+
+ gt->eu_stall = kzalloc(sizeof(*gt->eu_stall), GFP_KERNEL);
+ if (!gt->eu_stall) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ mutex_init(&gt->eu_stall->stream_lock);
+
+ gt->eu_stall->buf_ptr_poll_wq = alloc_ordered_workqueue("xe_eu_stall", 0);
+ if (!gt->eu_stall->buf_ptr_poll_wq) {
+ ret = -ENOMEM;
+ goto exit_free;
+ }
+
+ ret = devm_add_action_or_reset(xe->drm.dev, xe_eu_stall_fini, gt);
+ if (ret)
+ goto exit_destroy;
+
+ return 0;
+exit_destroy:
+ destroy_workqueue(gt->eu_stall->buf_ptr_poll_wq);
+exit_free:
+ mutex_destroy(&gt->eu_stall->stream_lock);
+ kfree(gt->eu_stall);
+exit:
+ return ret;
+}
+
+static int set_prop_eu_stall_sampling_rate(struct xe_device *xe, u64 value,
+ struct eu_stall_open_properties *props)
+{
+ value = div_u64(value, 251);
+ if (value == 0 || value > 7) {
+ drm_dbg(&xe->drm, "Invalid EU stall sampling rate %llu\n", value);
+ return -EINVAL;
+ }
+ props->sampling_rate_mult = value;
+ return 0;
+}
+
+static int set_prop_eu_stall_wait_num_reports(struct xe_device *xe, u64 value,
+ struct eu_stall_open_properties *props)
+{
+ props->wait_num_reports = value;
+
+ return 0;
+}
+
+static int set_prop_eu_stall_gt_id(struct xe_device *xe, u64 value,
+ struct eu_stall_open_properties *props)
+{
+ if (value >= xe->info.gt_count) {
+ drm_dbg(&xe->drm, "Invalid GT ID %llu for EU stall sampling\n", value);
+ return -EINVAL;
+ }
+ props->gt = xe_device_get_gt(xe, value);
+ return 0;
+}
+
+typedef int (*set_eu_stall_property_fn)(struct xe_device *xe, u64 value,
+ struct eu_stall_open_properties *props);
+
+static const set_eu_stall_property_fn xe_set_eu_stall_property_funcs[] = {
+ [DRM_XE_EU_STALL_PROP_SAMPLE_RATE] = set_prop_eu_stall_sampling_rate,
+ [DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS] = set_prop_eu_stall_wait_num_reports,
+ [DRM_XE_EU_STALL_PROP_GT_ID] = set_prop_eu_stall_gt_id,
+};
+
+static int xe_eu_stall_user_ext_set_property(struct xe_device *xe, u64 extension,
+ struct eu_stall_open_properties *props)
+{
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_ext_set_property ext;
+ int err;
+ u32 idx;
+
+ err = __copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, ext.property >= ARRAY_SIZE(xe_set_eu_stall_property_funcs)) ||
+ XE_IOCTL_DBG(xe, ext.pad))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_set_eu_stall_property_funcs));
+ return xe_set_eu_stall_property_funcs[idx](xe, ext.value, props);
+}
+
+typedef int (*xe_eu_stall_user_extension_fn)(struct xe_device *xe, u64 extension,
+ struct eu_stall_open_properties *props);
+static const xe_eu_stall_user_extension_fn xe_eu_stall_user_extension_funcs[] = {
+ [DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY] = xe_eu_stall_user_ext_set_property,
+};
+
+#define MAX_USER_EXTENSIONS 5
+static int xe_eu_stall_user_extensions(struct xe_device *xe, u64 extension,
+ int ext_number, struct eu_stall_open_properties *props)
+{
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_user_extension ext;
+ int err;
+ u32 idx;
+
+ if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
+ return -E2BIG;
+
+ err = __copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, ext.pad) ||
+ XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(xe_eu_stall_user_extension_funcs)))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_eu_stall_user_extension_funcs));
+ err = xe_eu_stall_user_extension_funcs[idx](xe, extension, props);
+ if (XE_IOCTL_DBG(xe, err))
+ return err;
+
+ if (ext.next_extension)
+ return xe_eu_stall_user_extensions(xe, ext.next_extension, ++ext_number, props);
+
+ return 0;
+}
+
+/**
+ * buf_data_size - Calculate the number of bytes in a circular buffer
+ * given the read and write pointers and the size of
+ * the buffer.
+ *
+ * @buf_size: Size of the circular buffer
+ * @read_ptr: Read pointer with an additional overflow bit
+ * @write_ptr: Write pointer with an additional overflow bit
+ *
+ * Since the read and write pointers have an additional overflow bit,
+ * this function calculates the offsets from the pointers and use the
+ * offsets to calculate the data size in the buffer.
+ *
+ * Returns: number of bytes of data in the buffer
+ */
+static u32 buf_data_size(size_t buf_size, u32 read_ptr, u32 write_ptr)
+{
+ u32 read_offset, write_offset, size = 0;
+
+ if (read_ptr == write_ptr)
+ goto exit;
+
+ read_offset = read_ptr & (buf_size - 1);
+ write_offset = write_ptr & (buf_size - 1);
+
+ if (write_offset > read_offset)
+ size = write_offset - read_offset;
+ else
+ size = buf_size - read_offset + write_offset;
+exit:
+ return size;
+}
+
+/**
+ * eu_stall_data_buf_poll - Poll for EU stall data in the buffer.
+ *
+ * @stream: xe EU stall data stream instance
+ *
+ * Returns: true if the EU stall buffer contains minimum stall data as
+ * specified by the event report count, else false.
+ */
+static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream)
+{
+ u32 read_ptr, write_ptr_reg, write_ptr, total_data = 0;
+ u32 buf_size = stream->per_xecore_buf_size;
+ struct per_xecore_buf *xecore_buf;
+ struct xe_gt *gt = stream->gt;
+ bool min_data_present = false;
+ u16 group, instance;
+ unsigned int xecore;
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ for_each_dss_steering(xecore, gt, group, instance) {
+ xecore_buf = &stream->xecore_buf[xecore];
+ read_ptr = xecore_buf->read;
+ write_ptr_reg = xe_gt_mcr_unicast_read(gt, XEHPC_EUSTALL_REPORT,
+ group, instance);
+ write_ptr = REG_FIELD_GET(XEHPC_EUSTALL_REPORT_WRITE_PTR_MASK, write_ptr_reg);
+ write_ptr <<= 6;
+ write_ptr &= ((buf_size << 1) - 1);
+ if (!min_data_present) {
+ total_data += buf_data_size(buf_size, read_ptr, write_ptr);
+ if (num_data_rows(total_data) >= stream->wait_num_reports)
+ min_data_present = true;
+ }
+ if (write_ptr_reg & XEHPC_EUSTALL_REPORT_OVERFLOW_DROP)
+ set_bit(xecore, stream->data_drop.mask);
+ xecore_buf->write = write_ptr;
+ }
+ mutex_unlock(&gt->eu_stall->stream_lock);
+
+ return min_data_present;
+}
+
+static void clear_dropped_eviction_line_bit(struct xe_gt *gt, u16 group, u16 instance)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 write_ptr_reg;
+
+ /* On PVC, the overflow bit has to be cleared by writing 1 to it.
+ * On Xe2 and later GPUs, the bit has to be cleared by writing 0 to it.
+ */
+ if (GRAPHICS_VER(xe) >= 20)
+ write_ptr_reg = _MASKED_BIT_DISABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
+ else
+ write_ptr_reg = _MASKED_BIT_ENABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
+
+ xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT, write_ptr_reg, group, instance);
+}
+
+static int xe_eu_stall_data_buf_read(struct xe_eu_stall_data_stream *stream,
+ char __user *buf, size_t count,
+ size_t *total_data_size, struct xe_gt *gt,
+ u16 group, u16 instance, unsigned int xecore)
+{
+ size_t read_data_size, copy_size, buf_size;
+ u32 read_ptr_reg, read_ptr, write_ptr;
+ u8 *xecore_start_vaddr, *read_vaddr;
+ struct per_xecore_buf *xecore_buf;
+ u32 read_offset, write_offset;
+
+ /* Hardware increments the read and write pointers such that they can
+ * overflow into one additional bit. For example, a 256KB size buffer
+ * offset pointer needs 18 bits. But HW uses 19 bits for the read and
+ * write pointers. This technique avoids wasting a slot in the buffer.
+ * Read and write offsets are calculated from the pointers in order to
+ * check if the write pointer has wrapped around the array.
+ */
+ xecore_buf = &stream->xecore_buf[xecore];
+ xecore_start_vaddr = xecore_buf->vaddr;
+ read_ptr = xecore_buf->read;
+ write_ptr = xecore_buf->write;
+ buf_size = stream->per_xecore_buf_size;
+
+ read_data_size = buf_data_size(buf_size, read_ptr, write_ptr);
+ /* Read only the data that the user space buffer can accommodate */
+ read_data_size = min_t(size_t, count - *total_data_size, read_data_size);
+ if (read_data_size == 0)
+ goto exit_drop;
+
+ read_offset = read_ptr & (buf_size - 1);
+ write_offset = write_ptr & (buf_size - 1);
+ read_vaddr = xecore_start_vaddr + read_offset;
+
+ if (write_offset > read_offset) {
+ if (copy_to_user(buf + *total_data_size, read_vaddr, read_data_size))
+ return -EFAULT;
+ } else {
+ if (read_data_size >= buf_size - read_offset)
+ copy_size = buf_size - read_offset;
+ else
+ copy_size = read_data_size;
+ if (copy_to_user(buf + *total_data_size, read_vaddr, copy_size))
+ return -EFAULT;
+ if (copy_to_user(buf + *total_data_size + copy_size,
+ xecore_start_vaddr, read_data_size - copy_size))
+ return -EFAULT;
+ }
+
+ *total_data_size += read_data_size;
+ read_ptr += read_data_size;
+
+ /* Read pointer can overflow into one additional bit */
+ read_ptr &= (buf_size << 1) - 1;
+ read_ptr_reg = REG_FIELD_PREP(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, (read_ptr >> 6));
+ read_ptr_reg = _MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
+ xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT1, read_ptr_reg, group, instance);
+ xecore_buf->read = read_ptr;
+ trace_xe_eu_stall_data_read(group, instance, read_ptr, write_ptr,
+ read_data_size, *total_data_size);
+exit_drop:
+ /* Clear drop bit (if set) after any data was read or if the buffer was empty.
+ * Drop bit can be set even if the buffer is empty as the buffer may have been emptied
+ * in the previous read() and the data drop bit was set during the previous read().
+ */
+ if (test_bit(xecore, stream->data_drop.mask)) {
+ clear_dropped_eviction_line_bit(gt, group, instance);
+ clear_bit(xecore, stream->data_drop.mask);
+ }
+ return 0;
+}
+
+/**
+ * xe_eu_stall_stream_read_locked - copy EU stall counters data from the
+ * per xecore buffers to the userspace buffer
+ * @stream: A stream opened for EU stall count metrics
+ * @file: An xe EU stall data stream file
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ *
+ * Returns: Number of bytes copied or a negative error code
+ * If we've successfully copied any data then reporting that takes
+ * precedence over any internal error status, so the data isn't lost.
+ */
+static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *stream,
+ struct file *file, char __user *buf,
+ size_t count)
+{
+ struct xe_gt *gt = stream->gt;
+ size_t total_size = 0;
+ u16 group, instance;
+ unsigned int xecore;
+ int ret = 0;
+
+ if (bitmap_weight(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS)) {
+ if (!stream->data_drop.reported_to_user) {
+ stream->data_drop.reported_to_user = true;
+ xe_gt_dbg(gt, "EU stall data dropped in XeCores: %*pb\n",
+ XE_MAX_DSS_FUSE_BITS, stream->data_drop.mask);
+ return -EIO;
+ }
+ stream->data_drop.reported_to_user = false;
+ }
+
+ for_each_dss_steering(xecore, gt, group, instance) {
+ ret = xe_eu_stall_data_buf_read(stream, buf, count, &total_size,
+ gt, group, instance, xecore);
+ if (ret || count == total_size)
+ break;
+ }
+ return total_size ?: (ret ?: -EAGAIN);
+}
+
+/*
+ * Userspace must enable the EU stall stream with DRM_XE_OBSERVATION_IOCTL_ENABLE
+ * before calling read().
+ *
+ * Returns: The number of bytes copied or a negative error code on failure.
+ * -EIO if HW drops any EU stall data when the buffer is full.
+ */
+static ssize_t xe_eu_stall_stream_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct xe_eu_stall_data_stream *stream = file->private_data;
+ struct xe_gt *gt = stream->gt;
+ ssize_t ret, aligned_count;
+
+ aligned_count = ALIGN_DOWN(count, stream->data_record_size);
+ if (aligned_count == 0)
+ return -EINVAL;
+
+ if (!stream->enabled) {
+ xe_gt_dbg(gt, "EU stall data stream not enabled to read\n");
+ return -EINVAL;
+ }
+
+ if (!(file->f_flags & O_NONBLOCK)) {
+ do {
+ ret = wait_event_interruptible(stream->poll_wq, stream->pollin);
+ if (ret)
+ return -EINTR;
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_read_locked(stream, file, buf, aligned_count);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+ } while (ret == -EAGAIN);
+ } else {
+ mutex_lock(&gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_read_locked(stream, file, buf, aligned_count);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+ }
+
+ /*
+ * This may not work correctly if the user buffer is very small.
+ * We don't want to block the next read() when there is data in the buffer
+ * now, but couldn't be accommodated in the small user buffer.
+ */
+ stream->pollin = false;
+
+ return ret;
+}
+
+static void xe_eu_stall_stream_free(struct xe_eu_stall_data_stream *stream)
+{
+ struct xe_gt *gt = stream->gt;
+
+ gt->eu_stall->stream = NULL;
+ kfree(stream);
+}
+
+static void xe_eu_stall_data_buf_destroy(struct xe_eu_stall_data_stream *stream)
+{
+ xe_bo_unpin_map_no_vm(stream->bo);
+ kfree(stream->xecore_buf);
+}
+
+static int xe_eu_stall_data_buf_alloc(struct xe_eu_stall_data_stream *stream,
+ u16 last_xecore)
+{
+ struct xe_tile *tile = stream->gt->tile;
+ struct xe_bo *bo;
+ u32 size;
+
+ stream->xecore_buf = kcalloc(last_xecore, sizeof(*stream->xecore_buf), GFP_KERNEL);
+ if (!stream->xecore_buf)
+ return -ENOMEM;
+
+ size = stream->per_xecore_buf_size * last_xecore;
+
+ bo = xe_bo_create_pin_map_at_aligned(tile->xe, tile, NULL,
+ size, ~0ull, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, SZ_64);
+ if (IS_ERR(bo)) {
+ kfree(stream->xecore_buf);
+ return PTR_ERR(bo);
+ }
+
+ XE_WARN_ON(!IS_ALIGNED(xe_bo_ggtt_addr(bo), SZ_64));
+ stream->bo = bo;
+
+ return 0;
+}
+
+static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
+{
+ u32 write_ptr_reg, write_ptr, read_ptr_reg, reg_value;
+ struct per_xecore_buf *xecore_buf;
+ struct xe_gt *gt = stream->gt;
+ u16 group, instance;
+ unsigned int fw_ref;
+ int xecore;
+
+ /* Take runtime pm ref and forcewake to disable RC6 */
+ xe_pm_runtime_get(gt_to_xe(gt));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_RENDER);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_RENDER)) {
+ xe_gt_err(gt, "Failed to get RENDER forcewake\n");
+ xe_pm_runtime_put(gt_to_xe(gt));
+ return -ETIMEDOUT;
+ }
+
+ if (XE_WA(gt, 22016596838))
+ xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
+
+ for_each_dss_steering(xecore, gt, group, instance) {
+ write_ptr_reg = xe_gt_mcr_unicast_read(gt, XEHPC_EUSTALL_REPORT, group, instance);
+ /* Clear any drop bits set and not cleared in the previous session. */
+ if (write_ptr_reg & XEHPC_EUSTALL_REPORT_OVERFLOW_DROP)
+ clear_dropped_eviction_line_bit(gt, group, instance);
+ write_ptr = REG_FIELD_GET(XEHPC_EUSTALL_REPORT_WRITE_PTR_MASK, write_ptr_reg);
+ read_ptr_reg = REG_FIELD_PREP(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, write_ptr);
+ read_ptr_reg = _MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
+ /* Initialize the read pointer to the write pointer */
+ xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT1, read_ptr_reg, group, instance);
+ write_ptr <<= 6;
+ write_ptr &= (stream->per_xecore_buf_size << 1) - 1;
+ xecore_buf = &stream->xecore_buf[xecore];
+ xecore_buf->write = write_ptr;
+ xecore_buf->read = write_ptr;
+ }
+ stream->data_drop.reported_to_user = false;
+ bitmap_zero(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS);
+
+ reg_value = _MASKED_FIELD(EUSTALL_MOCS | EUSTALL_SAMPLE_RATE,
+ REG_FIELD_PREP(EUSTALL_MOCS, gt->mocs.uc_index << 1) |
+ REG_FIELD_PREP(EUSTALL_SAMPLE_RATE,
+ stream->sampling_rate_mult));
+ xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_CTRL, reg_value);
+ /* GGTT addresses can never be > 32 bits */
+ xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_BASE_UPPER, 0);
+ reg_value = xe_bo_ggtt_addr(stream->bo);
+ reg_value |= REG_FIELD_PREP(XEHPC_EUSTALL_BASE_XECORE_BUF_SZ,
+ stream->per_xecore_buf_size / SZ_256K);
+ reg_value |= XEHPC_EUSTALL_BASE_ENABLE_SAMPLING;
+ xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_BASE, reg_value);
+
+ return 0;
+}
+
+static void eu_stall_data_buf_poll_work_fn(struct work_struct *work)
+{
+ struct xe_eu_stall_data_stream *stream =
+ container_of(work, typeof(*stream), buf_poll_work.work);
+ struct xe_gt *gt = stream->gt;
+
+ if (eu_stall_data_buf_poll(stream)) {
+ stream->pollin = true;
+ wake_up(&stream->poll_wq);
+ }
+ queue_delayed_work(gt->eu_stall->buf_ptr_poll_wq,
+ &stream->buf_poll_work,
+ msecs_to_jiffies(POLL_PERIOD_MS));
+}
+
+static int xe_eu_stall_stream_init(struct xe_eu_stall_data_stream *stream,
+ struct eu_stall_open_properties *props)
+{
+ unsigned int max_wait_num_reports, xecore, last_xecore, num_xecores;
+ struct per_xecore_buf *xecore_buf;
+ struct xe_gt *gt = stream->gt;
+ xe_dss_mask_t all_xecores;
+ u16 group, instance;
+ u32 vaddr_offset;
+ int ret;
+
+ bitmap_or(all_xecores, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
+ XE_MAX_DSS_FUSE_BITS);
+ num_xecores = bitmap_weight(all_xecores, XE_MAX_DSS_FUSE_BITS);
+ last_xecore = xe_gt_topology_mask_last_dss(all_xecores) + 1;
+
+ max_wait_num_reports = num_data_rows(per_xecore_buf_size * num_xecores);
+ if (props->wait_num_reports == 0 || props->wait_num_reports > max_wait_num_reports) {
+ xe_gt_dbg(gt, "Invalid EU stall event report count %u\n",
+ props->wait_num_reports);
+ xe_gt_dbg(gt, "Minimum event report count is 1, maximum is %u\n",
+ max_wait_num_reports);
+ return -EINVAL;
+ }
+
+ init_waitqueue_head(&stream->poll_wq);
+ INIT_DELAYED_WORK(&stream->buf_poll_work, eu_stall_data_buf_poll_work_fn);
+ stream->per_xecore_buf_size = per_xecore_buf_size;
+ stream->sampling_rate_mult = props->sampling_rate_mult;
+ stream->wait_num_reports = props->wait_num_reports;
+ stream->data_record_size = xe_eu_stall_data_record_size(gt_to_xe(gt));
+
+ ret = xe_eu_stall_data_buf_alloc(stream, last_xecore);
+ if (ret)
+ return ret;
+
+ for_each_dss_steering(xecore, gt, group, instance) {
+ xecore_buf = &stream->xecore_buf[xecore];
+ vaddr_offset = xecore * stream->per_xecore_buf_size;
+ xecore_buf->vaddr = stream->bo->vmap.vaddr + vaddr_offset;
+ }
+ return 0;
+}
+
+static __poll_t xe_eu_stall_stream_poll_locked(struct xe_eu_stall_data_stream *stream,
+ struct file *file, poll_table *wait)
+{
+ __poll_t events = 0;
+
+ poll_wait(file, &stream->poll_wq, wait);
+
+ if (stream->pollin)
+ events |= EPOLLIN;
+
+ return events;
+}
+
+static __poll_t xe_eu_stall_stream_poll(struct file *file, poll_table *wait)
+{
+ struct xe_eu_stall_data_stream *stream = file->private_data;
+ struct xe_gt *gt = stream->gt;
+ __poll_t ret;
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_poll_locked(stream, file, wait);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+
+ return ret;
+}
+
+static int xe_eu_stall_enable_locked(struct xe_eu_stall_data_stream *stream)
+{
+ struct xe_gt *gt = stream->gt;
+ int ret = 0;
+
+ if (stream->enabled)
+ return ret;
+
+ stream->enabled = true;
+
+ ret = xe_eu_stall_stream_enable(stream);
+
+ queue_delayed_work(gt->eu_stall->buf_ptr_poll_wq,
+ &stream->buf_poll_work,
+ msecs_to_jiffies(POLL_PERIOD_MS));
+ return ret;
+}
+
+static int xe_eu_stall_disable_locked(struct xe_eu_stall_data_stream *stream)
+{
+ struct xe_gt *gt = stream->gt;
+
+ if (!stream->enabled)
+ return 0;
+
+ stream->enabled = false;
+
+ xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_BASE, 0);
+
+ cancel_delayed_work_sync(&stream->buf_poll_work);
+
+ if (XE_WA(gt, 22016596838))
+ xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
+ _MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_RENDER);
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return 0;
+}
+
+static long xe_eu_stall_stream_ioctl_locked(struct xe_eu_stall_data_stream *stream,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DRM_XE_OBSERVATION_IOCTL_ENABLE:
+ return xe_eu_stall_enable_locked(stream);
+ case DRM_XE_OBSERVATION_IOCTL_DISABLE:
+ return xe_eu_stall_disable_locked(stream);
+ }
+
+ return -EINVAL;
+}
+
+static long xe_eu_stall_stream_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct xe_eu_stall_data_stream *stream = file->private_data;
+ struct xe_gt *gt = stream->gt;
+ long ret;
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_ioctl_locked(stream, cmd, arg);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+
+ return ret;
+}
+
+static int xe_eu_stall_stream_close(struct inode *inode, struct file *file)
+{
+ struct xe_eu_stall_data_stream *stream = file->private_data;
+ struct xe_gt *gt = stream->gt;
+
+ drm_dev_put(&gt->tile->xe->drm);
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ xe_eu_stall_disable_locked(stream);
+ xe_eu_stall_data_buf_destroy(stream);
+ xe_eu_stall_stream_free(stream);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+
+ return 0;
+}
+
+static const struct file_operations fops_eu_stall = {
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .release = xe_eu_stall_stream_close,
+ .poll = xe_eu_stall_stream_poll,
+ .read = xe_eu_stall_stream_read,
+ .unlocked_ioctl = xe_eu_stall_stream_ioctl,
+ .compat_ioctl = xe_eu_stall_stream_ioctl,
+};
+
+static int xe_eu_stall_stream_open_locked(struct drm_device *dev,
+ struct eu_stall_open_properties *props,
+ struct drm_file *file)
+{
+ struct xe_eu_stall_data_stream *stream;
+ struct xe_gt *gt = props->gt;
+ unsigned long f_flags = 0;
+ int ret, stream_fd;
+
+ /* Only one session can be active at any time */
+ if (gt->eu_stall->stream) {
+ xe_gt_dbg(gt, "EU stall sampling session already active\n");
+ return -EBUSY;
+ }
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ gt->eu_stall->stream = stream;
+ stream->gt = gt;
+
+ ret = xe_eu_stall_stream_init(stream, props);
+ if (ret) {
+ xe_gt_dbg(gt, "EU stall stream init failed : %d\n", ret);
+ goto err_free;
+ }
+
+ stream_fd = anon_inode_getfd("[xe_eu_stall]", &fops_eu_stall, stream, f_flags);
+ if (stream_fd < 0) {
+ ret = stream_fd;
+ xe_gt_dbg(gt, "EU stall inode get fd failed : %d\n", ret);
+ goto err_destroy;
+ }
+
+ /* Take a reference on the driver that will be kept with stream_fd
+ * until its release.
+ */
+ drm_dev_get(&gt->tile->xe->drm);
+
+ return stream_fd;
+
+err_destroy:
+ xe_eu_stall_data_buf_destroy(stream);
+err_free:
+ xe_eu_stall_stream_free(stream);
+ return ret;
+}
+
+/**
+ * xe_eu_stall_stream_open - Open a xe EU stall data stream fd
+ *
+ * @dev: DRM device pointer
+ * @data: pointer to first struct @drm_xe_ext_set_property in
+ * the chain of input properties from the user space.
+ * @file: DRM file pointer
+ *
+ * This function opens a EU stall data stream with input properties from
+ * the user space.
+ *
+ * Returns: EU stall data stream fd on success or a negative error code.
+ */
+int xe_eu_stall_stream_open(struct drm_device *dev, u64 data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct eu_stall_open_properties props = {};
+ int ret;
+
+ if (!xe_eu_stall_supported_on_platform(xe)) {
+ drm_dbg(&xe->drm, "EU stall monitoring is not supported on this platform\n");
+ return -ENODEV;
+ }
+
+ if (xe_observation_paranoid && !perfmon_capable()) {
+ drm_dbg(&xe->drm, "Insufficient privileges for EU stall monitoring\n");
+ return -EACCES;
+ }
+
+ /* Initialize and set default values */
+ props.wait_num_reports = 1;
+ props.sampling_rate_mult = 4;
+
+ ret = xe_eu_stall_user_extensions(xe, data, 0, &props);
+ if (ret)
+ return ret;
+
+ if (!props.gt) {
+ drm_dbg(&xe->drm, "GT ID not provided for EU stall sampling\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&props.gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_open_locked(dev, &props, file);
+ mutex_unlock(&props.gt->eu_stall->stream_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.h b/drivers/gpu/drm/xe/xe_eu_stall.h
new file mode 100644
index 000000000000..ed9d0f233566
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_eu_stall.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __XE_EU_STALL_H__
+#define __XE_EU_STALL_H__
+
+#include "xe_gt_types.h"
+
+size_t xe_eu_stall_get_per_xecore_buf_size(void);
+size_t xe_eu_stall_data_record_size(struct xe_device *xe);
+size_t xe_eu_stall_get_sampling_rates(u32 *num_rates, const u64 **rates);
+
+int xe_eu_stall_init(struct xe_gt *gt);
+int xe_eu_stall_stream_open(struct drm_device *dev,
+ u64 data,
+ struct drm_file *file);
+
+static inline bool xe_eu_stall_supported_on_platform(struct xe_device *xe)
+{
+ return xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20;
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index df8ce550deb4..b75adfc99fb7 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -262,6 +262,12 @@ retry:
goto err_exec;
}
+ if (xe_exec_queue_uses_pxp(q)) {
+ err = xe_vm_validate_protected(q->vm);
+ if (err)
+ goto err_exec;
+ }
+
job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
addresses : &args->address);
if (IS_ERR(job)) {
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 7e1abbbfba12..606922d9dd73 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -25,6 +25,7 @@
#include "xe_ring_ops_types.h"
#include "xe_trace.h"
#include "xe_vm.h"
+#include "xe_pxp.h"
enum xe_exec_queue_sched_prop {
XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
@@ -38,6 +39,8 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
static void __xe_exec_queue_free(struct xe_exec_queue *q)
{
+ if (xe_exec_queue_uses_pxp(q))
+ xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
if (q->vm)
xe_vm_put(q->vm);
@@ -78,6 +81,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
INIT_LIST_HEAD(&q->lr.link);
INIT_LIST_HEAD(&q->multi_gt_link);
INIT_LIST_HEAD(&q->hw_engine_group_link);
+ INIT_LIST_HEAD(&q->pxp.link);
q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
q->sched_props.preempt_timeout_us =
@@ -112,6 +116,21 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
{
struct xe_vm *vm = q->vm;
int i, err;
+ u32 flags = 0;
+
+ /*
+ * PXP workloads executing on RCS or CCS must run in isolation (i.e. no
+ * other workload can use the EUs at the same time). On MTL this is done
+ * by setting the RUNALONE bit in the LRC, while starting on Xe2 there
+ * is a dedicated bit for it.
+ */
+ if (xe_exec_queue_uses_pxp(q) &&
+ (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) {
+ if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20)
+ flags |= XE_LRC_CREATE_PXP;
+ else
+ flags |= XE_LRC_CREATE_RUNALONE;
+ }
if (vm) {
err = xe_vm_lock(vm, true);
@@ -120,7 +139,7 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
}
for (i = 0; i < q->width; ++i) {
- q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec);
+ q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
if (IS_ERR(q->lrc[i])) {
err = PTR_ERR(q->lrc[i]);
goto err_unlock;
@@ -153,6 +172,9 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
struct xe_exec_queue *q;
int err;
+ /* VMs for GSCCS queues (and only those) must have the XE_VM_FLAG_GSC flag */
+ xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0)));
+
q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
extensions);
if (IS_ERR(q))
@@ -162,12 +184,26 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
if (err)
goto err_post_alloc;
+ /*
+ * We can only add the queue to the PXP list after the init is complete,
+ * because the PXP termination can call exec_queue_kill and that will
+ * go bad if the queue is only half-initialized. This means that we
+ * can't do it when we handle the PXP extension in __xe_exec_queue_alloc
+ * and we need to do it here instead.
+ */
+ if (xe_exec_queue_uses_pxp(q)) {
+ err = xe_pxp_exec_queue_add(xe->pxp, q);
+ if (err)
+ goto err_post_alloc;
+ }
+
return q;
err_post_alloc:
__xe_exec_queue_free(q);
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_exec_queue_create, ERRNO);
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
struct xe_vm *vm,
@@ -250,6 +286,9 @@ void xe_exec_queue_destroy(struct kref *ref)
struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
struct xe_exec_queue *eq, *next;
+ if (xe_exec_queue_uses_pxp(q))
+ xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
+
xe_exec_queue_last_fence_put_unlocked(q);
if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
list_for_each_entry_safe(eq, next, &q->multi_gt_list,
@@ -405,6 +444,22 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
return 0;
}
+static int
+exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value)
+{
+ if (value == DRM_XE_PXP_TYPE_NONE)
+ return 0;
+
+ /* we only support HWDRM sessions right now */
+ if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
+ return -EINVAL;
+
+ if (!xe_pxp_is_enabled(xe->pxp))
+ return -ENODEV;
+
+ return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
+}
+
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
struct xe_exec_queue *q,
u64 value);
@@ -412,6 +467,7 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
+ [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type,
};
static int exec_queue_user_ext_set_property(struct xe_device *xe,
@@ -431,7 +487,8 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
ARRAY_SIZE(exec_queue_set_property_funcs)) ||
XE_IOCTL_DBG(xe, ext.pad) ||
XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
- ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
+ ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE &&
+ ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
@@ -483,7 +540,7 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
return 0;
}
-static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
+static u32 calc_validate_logical_mask(struct xe_device *xe,
struct drm_xe_engine_class_instance *eci,
u16 width, u16 num_placements)
{
@@ -545,15 +602,15 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
u64_to_user_ptr(args->instances);
struct xe_hw_engine *hwe;
struct xe_vm *vm;
- struct xe_gt *gt;
struct xe_tile *tile;
struct xe_exec_queue *q = NULL;
u32 logical_mask;
+ u32 flags = 0;
u32 id;
u32 len;
int err;
- if (XE_IOCTL_DBG(xe, args->flags) ||
+ if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) ||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
@@ -570,6 +627,9 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
return -EINVAL;
+ if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
+ flags |= EXEC_QUEUE_FLAG_LOW_LATENCY;
+
if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
if (XE_IOCTL_DBG(xe, args->width != 1) ||
XE_IOCTL_DBG(xe, args->num_placements != 1) ||
@@ -578,8 +638,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
for_each_tile(tile, xe, id) {
struct xe_exec_queue *new;
- u32 flags = EXEC_QUEUE_FLAG_VM;
+ flags |= EXEC_QUEUE_FLAG_VM;
if (id)
flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
@@ -598,8 +658,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
&q->multi_gt_link);
}
} else {
- gt = xe_device_get_gt(xe, eci[0].gt_id);
- logical_mask = calc_validate_logical_mask(xe, gt, eci,
+ logical_mask = calc_validate_logical_mask(xe, eci,
args->width,
args->num_placements);
if (XE_IOCTL_DBG(xe, !logical_mask))
@@ -626,7 +685,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
}
q = xe_exec_queue_create(xe, vm, logical_mask,
- args->width, hwe, 0,
+ args->width, hwe, flags,
args->extensions);
up_read(&vm->lock);
xe_vm_put(vm);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index 90c7f73eab88..17bc50a7f05a 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -57,6 +57,11 @@ static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
return q->width > 1;
}
+static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q)
+{
+ return q->pxp.type;
+}
+
bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
bool xe_exec_queue_ring_full(struct xe_exec_queue *q);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 5af5419cec7a..cc1cffb5c87f 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -85,6 +85,8 @@ struct xe_exec_queue {
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(3)
/* kernel exec_queue only, set priority to highest level */
#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(4)
+/* flag to indicate low latency hint to guc */
+#define EXEC_QUEUE_FLAG_LOW_LATENCY BIT(5)
/**
* @flags: flags for this exec queue, should statically setup aside from ban
@@ -130,6 +132,14 @@ struct xe_exec_queue {
struct list_head link;
} lr;
+ /** @pxp: PXP info tracking */
+ struct {
+ /** @pxp.type: PXP session type used by this queue */
+ u8 type;
+ /** @pxp.link: link into the list of PXP exec queues */
+ struct list_head link;
+ } pxp;
+
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 5ef96deaa881..9fbed1a2fcc6 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -269,7 +269,7 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
port->hwe = hwe;
- port->lrc = xe_lrc_create(hwe, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX);
+ port->lrc = xe_lrc_create(hwe, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX, 0);
if (IS_ERR(port->lrc)) {
err = PTR_ERR(port->lrc);
goto err;
@@ -336,6 +336,15 @@ static const struct drm_sched_backend_ops drm_sched_ops = {
static int execlist_exec_queue_init(struct xe_exec_queue *q)
{
struct drm_gpu_scheduler *sched;
+ const struct drm_sched_init_args args = {
+ .ops = &drm_sched_ops,
+ .num_rqs = 1,
+ .credit_limit = q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES,
+ .hang_limit = XE_SCHED_HANG_LIMIT,
+ .timeout = XE_SCHED_JOB_TIMEOUT,
+ .name = q->hwe->name,
+ .dev = gt_to_xe(q->gt)->drm.dev,
+ };
struct xe_execlist_exec_queue *exl;
struct xe_device *xe = gt_to_xe(q->gt);
int err;
@@ -350,11 +359,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
exl->q = q;
- err = drm_sched_init(&exl->sched, &drm_sched_ops, NULL, 1,
- q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES,
- XE_SCHED_HANG_LIMIT, XE_SCHED_JOB_TIMEOUT,
- NULL, NULL, q->hwe->name,
- gt_to_xe(q->gt)->drm.dev);
+ err = drm_sched_init(&exl->sched, &args);
if (err)
goto err_free;
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
index 904cf47925aa..ed9183599e31 100644
--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -28,10 +28,10 @@
"\n" \
"#endif\n"
-static void print_usage(FILE *f)
+static void print_usage(FILE *f, const char *progname)
{
fprintf(f, "usage: %s <input-rule-file> <generated-c-source-file> <generated-c-header-file>\n",
- program_invocation_short_name);
+ progname);
}
static void print_parse_error(const char *err_msg, const char *line,
@@ -144,7 +144,7 @@ int main(int argc, const char *argv[])
if (argc < 3) {
fprintf(stderr, "ERROR: wrong arguments\n");
- print_usage(stderr);
+ print_usage(stderr, argv[0]);
return 1;
}
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
index 50361b4638f9..869b43a4151d 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
@@ -63,13 +63,24 @@ int xe_sched_init(struct xe_gpu_scheduler *sched,
atomic_t *score, const char *name,
struct device *dev)
{
+ const struct drm_sched_init_args args = {
+ .ops = ops,
+ .submit_wq = submit_wq,
+ .num_rqs = 1,
+ .credit_limit = hw_submission,
+ .hang_limit = hang_limit,
+ .timeout = timeout,
+ .timeout_wq = timeout_wq,
+ .score = score,
+ .name = name,
+ .dev = dev,
+ };
+
sched->ops = xe_ops;
INIT_LIST_HEAD(&sched->msgs);
INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
- return drm_sched_init(&sched->base, ops, submit_wq, 1, hw_submission,
- hang_limit, timeout, timeout_wq, score, name,
- dev);
+ return drm_sched_init(&sched->base, &args);
}
void xe_sched_fini(struct xe_gpu_scheduler *sched)
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 1eb791ddc375..fd41113f8572 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -555,15 +555,6 @@ void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc)
flush_work(&gsc->work);
}
-/**
- * xe_gsc_remove() - Clean up the GSC structures before driver removal
- * @gsc: the GSC uC
- */
-void xe_gsc_remove(struct xe_gsc *gsc)
-{
- xe_gsc_proxy_remove(gsc);
-}
-
/*
* wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
* GSC engine reset by writing a notification bit in the GS1 register and then
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
index e282b9ef6ec4..d99f66c38075 100644
--- a/drivers/gpu/drm/xe/xe_gsc.h
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -17,7 +17,6 @@ int xe_gsc_init(struct xe_gsc *gsc);
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
void xe_gsc_load_start(struct xe_gsc *gsc);
-void xe_gsc_remove(struct xe_gsc *gsc);
void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep);
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index 24cc6a4f9a96..8cf70b228ff3 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -423,6 +423,34 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
return 0;
}
+static void xe_gsc_proxy_remove(void *arg)
+{
+ struct xe_gsc *gsc = arg;
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int fw_ref = 0;
+
+ if (!gsc->proxy.component_added)
+ return;
+
+ /* disable HECI2 IRQs */
+ xe_pm_runtime_get(xe);
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ if (!fw_ref)
+ xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n");
+
+ /* try do disable irq even if forcewake failed */
+ gsc_proxy_irq_toggle(gsc, false);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ xe_pm_runtime_put(xe);
+
+ xe_gsc_wait_for_worker_completion(gsc);
+
+ component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
+ gsc->proxy.component_added = false;
+}
+
/**
* xe_gsc_proxy_init() - init objects and MEI component required by GSC proxy
* @gsc: the GSC uC
@@ -462,40 +490,7 @@ int xe_gsc_proxy_init(struct xe_gsc *gsc)
gsc->proxy.component_added = true;
- /* the component must be removed before unload, so can't use drmm for cleanup */
-
- return 0;
-}
-
-/**
- * xe_gsc_proxy_remove() - remove the GSC proxy MEI component
- * @gsc: the GSC uC
- */
-void xe_gsc_proxy_remove(struct xe_gsc *gsc)
-{
- struct xe_gt *gt = gsc_to_gt(gsc);
- struct xe_device *xe = gt_to_xe(gt);
- unsigned int fw_ref = 0;
-
- if (!gsc->proxy.component_added)
- return;
-
- /* disable HECI2 IRQs */
- xe_pm_runtime_get(xe);
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
- if (!fw_ref)
- xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n");
-
- /* try do disable irq even if forcewake failed */
- gsc_proxy_irq_toggle(gsc, false);
-
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- xe_pm_runtime_put(xe);
-
- xe_gsc_wait_for_worker_completion(gsc);
-
- component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
- gsc->proxy.component_added = false;
+ return devm_add_action_or_reset(xe->drm.dev, xe_gsc_proxy_remove, gsc);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h
index c511ade6b863..fdef56995cd4 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.h
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h
@@ -12,7 +12,6 @@ struct xe_gsc;
int xe_gsc_proxy_init(struct xe_gsc *gsc);
bool xe_gsc_proxy_init_done(struct xe_gsc *gsc);
-void xe_gsc_proxy_remove(struct xe_gsc *gsc);
int xe_gsc_proxy_start(struct xe_gsc *gsc);
int xe_gsc_proxy_request_handler(struct xe_gsc *gsc);
diff --git a/drivers/gpu/drm/xe/xe_gsc_types.h b/drivers/gpu/drm/xe/xe_gsc_types.h
index 5926de20214c..97c056656df0 100644
--- a/drivers/gpu/drm/xe/xe_gsc_types.h
+++ b/drivers/gpu/drm/xe/xe_gsc_types.h
@@ -13,6 +13,7 @@
#include <linux/workqueue.h>
#include "xe_uc_fw_types.h"
+#include "xe_device_types.h"
struct xe_bo;
struct xe_exec_queue;
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 9f4f27d1ef4a..10a9e3c72b36 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -19,6 +19,7 @@
#include "xe_bb.h"
#include "xe_bo.h"
#include "xe_device.h"
+#include "xe_eu_stall.h"
#include "xe_exec_queue.h"
#include "xe_execlist.h"
#include "xe_force_wake.h"
@@ -32,6 +33,7 @@
#include "xe_gt_pagefault.h"
#include "xe_gt_printk.h"
#include "xe_gt_sriov_pf.h"
+#include "xe_gt_sriov_vf.h"
#include "xe_gt_sysfs.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
@@ -140,26 +142,6 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
-/**
- * xe_gt_remove() - Clean up the GT structures before driver removal
- * @gt: the GT object
- *
- * This function should only act on objects/structures that must be cleaned
- * before the driver removal callback is complete and therefore can't be
- * deferred to a drmm action.
- */
-void xe_gt_remove(struct xe_gt *gt)
-{
- int i;
-
- xe_uc_remove(&gt->uc);
-
- for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
- xe_hw_fence_irq_finish(&gt->fence_irq[i]);
-
- xe_gt_disable_host_l2_vram(gt);
-}
-
static void gt_reset_worker(struct work_struct *w);
static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
@@ -380,6 +362,10 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
+ err = xe_tuning_init(gt);
+ if (err)
+ return err;
+
xe_wa_process_oob(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
@@ -406,13 +392,11 @@ static void dump_pat_on_error(struct xe_gt *gt)
static int gt_fw_domain_init(struct xe_gt *gt)
{
unsigned int fw_ref;
- int err, i;
+ int err;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (!fw_ref) {
- err = -ETIMEDOUT;
- goto err_hw_fence_irq;
- }
+ if (!fw_ref)
+ return -ETIMEDOUT;
if (!xe_gt_is_media_type(gt)) {
err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
@@ -453,9 +437,6 @@ static int gt_fw_domain_init(struct xe_gt *gt)
err_force_wake:
dump_pat_on_error(gt);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
-err_hw_fence_irq:
- for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
- xe_hw_fence_irq_finish(&gt->fence_irq[i]);
return err;
}
@@ -463,7 +444,7 @@ err_hw_fence_irq:
static int all_fw_domain_init(struct xe_gt *gt)
{
unsigned int fw_ref;
- int err, i;
+ int err;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
@@ -543,8 +524,6 @@ static int all_fw_domain_init(struct xe_gt *gt)
err_force_wake:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
- for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
- xe_hw_fence_irq_finish(&gt->fence_irq[i]);
return err;
}
@@ -582,6 +561,17 @@ out_fw:
return err;
}
+static void xe_gt_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+ int i;
+
+ for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
+ xe_hw_fence_irq_finish(&gt->fence_irq[i]);
+
+ xe_gt_disable_host_l2_vram(gt);
+}
+
int xe_gt_init(struct xe_gt *gt)
{
int err;
@@ -594,6 +584,10 @@ int xe_gt_init(struct xe_gt *gt)
xe_hw_fence_irq_init(&gt->fence_irq[i]);
}
+ err = devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, xe_gt_fini, gt);
+ if (err)
+ return err;
+
err = xe_gt_pagefault_init(gt);
if (err)
return err;
@@ -624,6 +618,10 @@ int xe_gt_init(struct xe_gt *gt)
xe_gt_record_user_engines(gt);
+ err = xe_eu_stall_init(gt);
+ if (err)
+ return err;
+
return 0;
}
@@ -637,17 +635,19 @@ int xe_gt_init(struct xe_gt *gt)
void xe_gt_mmio_init(struct xe_gt *gt)
{
struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
- gt->mmio.regs = tile->mmio.regs;
- gt->mmio.regs_size = tile->mmio.regs_size;
- gt->mmio.tile = tile;
+ xe_mmio_init(&gt->mmio, tile, tile->mmio.regs, tile->mmio.regs_size);
if (gt->info.type == XE_GT_TYPE_MEDIA) {
gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
+ } else {
+ gt->mmio.adj_offset = 0;
+ gt->mmio.adj_limit = 0;
}
- if (IS_SRIOV_VF(gt_to_xe(gt)))
+ if (IS_SRIOV_VF(xe))
gt->mmio.sriov_vf_gt = gt;
}
@@ -676,6 +676,9 @@ static int do_gt_reset(struct xe_gt *gt)
{
int err;
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return xe_gt_sriov_vf_reset(gt);
+
xe_gsc_wa_14015076503(gt, true);
xe_mmio_write32(&gt->mmio, GDRST, GRDOM_FULL);
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index e504cc33ade4..187fa6490eaf 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -54,7 +54,6 @@ int xe_gt_resume(struct xe_gt *gt);
void xe_gt_reset_async(struct xe_gt *gt);
void xe_gt_sanitize(struct xe_gt *gt);
int xe_gt_sanitize_freq(struct xe_gt *gt);
-void xe_gt_remove(struct xe_gt *gt);
/**
* xe_gt_wait_for_reset - wait for gt's async reset to finalize.
diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c
index cc2ae159298e..2a958c92d8ea 100644
--- a/drivers/gpu/drm/xe/xe_gt_clock.c
+++ b/drivers/gpu/drm/xe/xe_gt_clock.c
@@ -12,25 +12,10 @@
#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_macros.h"
#include "xe_mmio.h"
-static u32 read_reference_ts_freq(struct xe_gt *gt)
-{
- u32 ts_override = xe_mmio_read32(&gt->mmio, TIMESTAMP_OVERRIDE);
- u32 base_freq, frac_freq;
-
- base_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK,
- ts_override) + 1;
- base_freq *= 1000000;
-
- frac_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK,
- ts_override);
- frac_freq = 1000000 / (frac_freq + 1);
-
- return base_freq + frac_freq;
-}
-
static u32 get_crystal_clock_freq(u32 rpm_config_reg)
{
const u32 f19_2_mhz = 19200000;
@@ -57,26 +42,30 @@ static u32 get_crystal_clock_freq(u32 rpm_config_reg)
int xe_gt_clock_init(struct xe_gt *gt)
{
- u32 ctc_reg = xe_mmio_read32(&gt->mmio, CTC_MODE);
+ u32 c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
u32 freq = 0;
- /* Assuming gen11+ so assert this assumption is correct */
- xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
-
- if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(gt);
- } else {
- u32 c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
-
- freq = get_crystal_clock_freq(c0);
-
- /*
- * Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, c0);
- }
+ /*
+ * CTC_MODE[0] = 1 is definitely not supported for Xe2 and later
+ * platforms. In theory it could be a valid setting for pre-Xe2
+ * platforms, but there's no documentation on how to properly handle
+ * this case. Reading TIMESTAMP_OVERRIDE, as the driver attempted in
+ * the past has been confirmed as incorrect by the hardware architects.
+ *
+ * For now just warn if we ever encounter hardware in the wild that
+ * has this setting and move on as if it hadn't been set.
+ */
+ if (xe_mmio_read32(&gt->mmio, CTC_MODE) & CTC_SOURCE_DIVIDE_LOGIC)
+ xe_gt_warn(gt, "CTC_MODE[0] is set; this is unexpected and undocumented\n");
+
+ freq = get_crystal_clock_freq(c0);
+
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, c0);
gt->info.reference_clock = freq;
return 0;
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index e7792858b1e4..2d63a69cbfa3 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -30,6 +30,7 @@
#include "xe_reg_sr.h"
#include "xe_reg_whitelist.h"
#include "xe_sriov.h"
+#include "xe_tuning.h"
#include "xe_uc_debugfs.h"
#include "xe_wa.h"
@@ -217,6 +218,15 @@ static int workarounds(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
+static int tunings(struct xe_gt *gt, struct drm_printer *p)
+{
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_tuning_dump(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return 0;
+}
+
static int pat(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
@@ -300,6 +310,7 @@ static const struct drm_info_list debugfs_list[] = {
{"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
{"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
{"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
+ {"tunings", .show = xe_gt_debugfs_simple_show, .data = tunings},
{"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
{"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
{"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc},
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index ffd3ba7f6656..fbbace7b0b12 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -69,6 +69,8 @@ static u64 get_residency_ms(struct xe_gt_idle *gtidle, u64 cur_residency)
{
u64 delta, overflow_residency, prev_residency;
+ lockdep_assert_held(&gtidle->lock);
+
overflow_residency = BIT_ULL(32);
/*
@@ -275,8 +277,21 @@ static ssize_t idle_status_show(struct device *dev,
return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
}
-static DEVICE_ATTR_RO(idle_status);
+u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle)
+{
+ struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
+ u64 residency;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gtidle->lock, flags);
+ residency = get_residency_ms(gtidle, gtidle->idle_residency(pc));
+ raw_spin_unlock_irqrestore(&gtidle->lock, flags);
+
+ return residency;
+}
+
+static DEVICE_ATTR_RO(idle_status);
static ssize_t idle_residency_ms_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
@@ -285,10 +300,10 @@ static ssize_t idle_residency_ms_show(struct device *dev,
u64 residency;
xe_pm_runtime_get(pc_to_xe(pc));
- residency = gtidle->idle_residency(pc);
+ residency = xe_gt_idle_residency_msec(gtidle);
xe_pm_runtime_put(pc_to_xe(pc));
- return sysfs_emit(buff, "%llu\n", get_residency_ms(gtidle, residency));
+ return sysfs_emit(buff, "%llu\n", residency);
}
static DEVICE_ATTR_RO(idle_residency_ms);
@@ -331,6 +346,8 @@ int xe_gt_idle_init(struct xe_gt_idle *gtidle)
if (!kobj)
return -ENOMEM;
+ raw_spin_lock_init(&gtidle->lock);
+
if (xe_gt_is_media_type(gt)) {
snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-mc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_mc6_residency;
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.h b/drivers/gpu/drm/xe/xe_gt_idle.h
index 4455a6501cb0..591a01e181bc 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.h
+++ b/drivers/gpu/drm/xe/xe_gt_idle.h
@@ -17,5 +17,6 @@ void xe_gt_idle_disable_c6(struct xe_gt *gt);
void xe_gt_idle_enable_pg(struct xe_gt *gt);
void xe_gt_idle_disable_pg(struct xe_gt *gt);
int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p);
+u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle);
#endif /* _XE_GT_IDLE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_idle_types.h b/drivers/gpu/drm/xe/xe_gt_idle_types.h
index b8b297a3f884..a3667c567f8a 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_idle_types.h
@@ -6,6 +6,7 @@
#ifndef _XE_GT_IDLE_SYSFS_TYPES_H_
#define _XE_GT_IDLE_SYSFS_TYPES_H_
+#include <linux/spinlock.h>
#include <linux/types.h>
struct xe_guc_pc;
@@ -31,6 +32,8 @@ struct xe_gt_idle {
u64 cur_residency;
/** @prev_residency: previous residency counter */
u64 prev_residency;
+ /** @lock: Lock protecting idle residency counters */
+ raw_spinlock_t lock;
/** @idle_status: get the current idle state */
enum xe_gt_idle_state (*idle_status)(struct xe_guc_pc *pc);
/** @idle_residency: get idle residency counter */
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index a1676b787fdc..605aad3554e7 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -341,7 +341,13 @@ static unsigned int dss_per_group(struct xe_gt *gt)
return DIV_ROUND_UP(max_subslices, max_slices);
fallback:
- xe_gt_dbg(gt, "GuC hwconfig cannot provide dss/slice; using typical fallback values\n");
+ /*
+ * Some older platforms don't have tables or don't have complete tables.
+ * Newer platforms should always have the required info.
+ */
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 2000)
+ xe_gt_err(gt, "Slice/Subslice counts missing from hwconfig table; using typical fallback values\n");
+
if (gt_to_xe(gt)->info.platform == XE_PVC)
return 8;
else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 2606cd396df5..c5ad9a0a89c2 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -14,10 +14,12 @@
#include "abi/guc_actions_abi.h"
#include "xe_bo.h"
#include "xe_gt.h"
+#include "xe_gt_stats.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_migrate.h"
+#include "xe_svm.h"
#include "xe_trace_bo.h"
#include "xe_vm.h"
@@ -124,18 +126,22 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
return 0;
}
-static int handle_vma_pagefault(struct xe_tile *tile, struct pagefault *pf,
- struct xe_vma *vma)
+static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
+ bool atomic)
{
struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_tile *tile = gt_to_tile(gt);
struct drm_exec exec;
struct dma_fence *fence;
ktime_t end = 0;
int err;
- bool atomic;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, xe_vma_size(vma) / 1024);
trace_xe_vma_pagefault(vma);
- atomic = access_is_atomic(pf->access_type);
/* Check if VMA is valid */
if (vma_is_valid(tile, vma) && !atomic)
@@ -202,10 +208,10 @@ static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid)
static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
{
struct xe_device *xe = gt_to_xe(gt);
- struct xe_tile *tile = gt_to_tile(gt);
struct xe_vm *vm;
struct xe_vma *vma = NULL;
int err;
+ bool atomic;
/* SW isn't expected to handle TRTT faults */
if (pf->trva_fault)
@@ -231,7 +237,13 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
goto unlock_vm;
}
- err = handle_vma_pagefault(tile, pf, vma);
+ atomic = access_is_atomic(pf->access_type);
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ err = xe_svm_handle_pagefault(vm, vma, gt_to_tile(gt),
+ pf->page_addr, atomic);
+ else
+ err = handle_vma_pagefault(gt, vma, atomic);
unlock_vm:
if (!err)
@@ -263,12 +275,13 @@ static void print_pagefault(struct xe_device *xe, struct pagefault *pf)
"\tFaultType: %d\n"
"\tAccessType: %d\n"
"\tFaultLevel: %d\n"
- "\tEngineClass: %d\n"
+ "\tEngineClass: %d %s\n"
"\tEngineInstance: %d\n",
pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
lower_32_bits(pf->page_addr),
pf->fault_type, pf->access_type, pf->fault_level,
- pf->engine_class, pf->engine_instance);
+ pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
+ pf->engine_instance);
}
#define PF_MSG_LEN_DW 4
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index 6f906c8e8108..c08efca6420e 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -15,7 +15,11 @@
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_service.h"
+#include "xe_gt_sriov_printk.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
+
+static void pf_worker_restart_func(struct work_struct *w);
/*
* VF's metadata is maintained in the flexible array where:
@@ -41,6 +45,11 @@ static int pf_alloc_metadata(struct xe_gt *gt)
return 0;
}
+static void pf_init_workers(struct xe_gt *gt)
+{
+ INIT_WORK(&gt->sriov.pf.workers.restart, pf_worker_restart_func);
+}
+
/**
* xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
* @gt: the &xe_gt to initialize
@@ -65,6 +74,8 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
if (err)
return err;
+ pf_init_workers(gt);
+
return 0;
}
@@ -78,6 +89,12 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
*/
int xe_gt_sriov_pf_init(struct xe_gt *gt)
{
+ int err;
+
+ err = xe_gt_sriov_pf_config_init(gt);
+ if (err)
+ return err;
+
return xe_gt_sriov_pf_migration_init(gt);
}
@@ -155,6 +172,35 @@ void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
pf_clear_vf_scratch_regs(gt, vfid);
}
+static void pf_restart(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_pm_runtime_get(xe);
+ xe_gt_sriov_pf_config_restart(gt);
+ xe_gt_sriov_pf_control_restart(gt);
+ xe_pm_runtime_put(xe);
+
+ xe_gt_sriov_dbg(gt, "restart completed\n");
+}
+
+static void pf_worker_restart_func(struct work_struct *w)
+{
+ struct xe_gt *gt = container_of(w, typeof(*gt), sriov.pf.workers.restart);
+
+ pf_restart(gt);
+}
+
+static void pf_queue_restart(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ if (!queue_work(xe->sriov.wq, &gt->sriov.pf.workers.restart))
+ xe_gt_sriov_dbg(gt, "restart already in queue!\n");
+}
+
/**
* xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
* @gt: the &xe_gt
@@ -163,6 +209,5 @@ void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
*/
void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
- xe_gt_sriov_pf_config_restart(gt);
- xe_gt_sriov_pf_control_restart(gt);
+ pf_queue_restart(gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 878e96281c03..10be109bf357 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -20,6 +20,7 @@
#include "xe_gt_sriov_pf_policy.h"
#include "xe_gt_sriov_printk.h"
#include "xe_guc.h"
+#include "xe_guc_buf.h"
#include "xe_guc_ct.h"
#include "xe_guc_db_mgr.h"
#include "xe_guc_fwif.h"
@@ -71,48 +72,27 @@ static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
* Return: number of KLVs that were successfully parsed and saved,
* negative error code on failure.
*/
-static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
+static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords)
{
- const u32 bytes = num_dwords * sizeof(u32);
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_device *xe = tile_to_xe(tile);
struct xe_guc *guc = &gt->uc.guc;
- struct xe_bo *bo;
- int ret;
-
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(bytes, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
-
- xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
-
- ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
- xe_bo_unpin_map_no_vm(bo);
-
- return ret;
+ return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords);
}
/*
* Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
* negative error code on failure.
*/
-static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
- const u32 *klvs, u32 num_dwords)
+static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
+ struct xe_guc_buf buf, u32 num_dwords)
{
int ret;
- xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
-
- ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
+ ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords);
if (ret != num_klvs) {
int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
+ void *klvs = xe_guc_buf_cpu_ptr(buf);
struct drm_printer p = xe_gt_info_printer(gt);
char name[8];
@@ -125,13 +105,35 @@ static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
struct drm_printer p = xe_gt_info_printer(gt);
+ void *klvs = xe_guc_buf_cpu_ptr(buf);
+ char name[8];
+ xe_gt_sriov_info(gt, "pushed %s config with %u KLV%s:\n",
+ xe_sriov_function_name(vfid, name, sizeof(name)),
+ num_klvs, str_plural(num_klvs));
xe_guc_klv_print(klvs, num_dwords, &p);
}
return 0;
}
+/*
+ * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data,
+ * negative error code on failure.
+ */
+static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
+ const u32 *klvs, u32 num_dwords)
+{
+ CLASS(xe_guc_buf_from_data, buf)(&gt->uc.guc.buf, klvs, num_dwords * sizeof(u32));
+
+ xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
+
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+
+ return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
+}
+
static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
{
u32 klv[] = {
@@ -262,7 +264,7 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool
n += encode_config_ggtt(cfg, config, details);
- if (details) {
+ if (details && config->num_ctxs) {
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
cfg[n++] = config->begin_ctx;
}
@@ -270,7 +272,7 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
cfg[n++] = config->num_ctxs;
- if (details) {
+ if (details && config->num_dbs) {
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
cfg[n++] = config->begin_db;
}
@@ -304,16 +306,17 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool
static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
- u32 max_cfg_dwords = SZ_4K / sizeof(u32);
+ u32 max_cfg_dwords = xe_guc_buf_cache_dwords(&gt->uc.guc.buf);
+ CLASS(xe_guc_buf, buf)(&gt->uc.guc.buf, max_cfg_dwords);
u32 num_dwords;
int num_klvs;
u32 *cfg;
int err;
- cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+ cfg = xe_guc_buf_cpu_ptr(buf);
num_dwords = encode_config(cfg, config, true);
xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
@@ -330,12 +333,31 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
num_klvs = xe_guc_klv_count(cfg, num_dwords);
- err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
+ err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
- kfree(cfg);
return err;
}
+static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset)
+{
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (reset)
+ err = pf_send_vf_cfg_reset(gt, vfid);
+ if (!err)
+ err = pf_push_full_vf_config(gt, vfid);
+
+ return err;
+}
+
+static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_push_vf_cfg(gt, vfid, true);
+}
+
static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -432,6 +454,10 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
return err;
pf_release_vf_config_ggtt(gt, config);
+
+ err = pf_refresh_vf_cfg(gt, vfid);
+ if (unlikely(err))
+ return err;
}
xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
@@ -757,6 +783,10 @@ static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctx
return ret;
pf_release_config_ctxs(gt, config);
+
+ ret = pf_refresh_vf_cfg(gt, vfid);
+ if (unlikely(ret))
+ return ret;
}
if (!num_ctxs)
@@ -1054,6 +1084,10 @@ static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
return ret;
pf_release_config_dbs(gt, config);
+
+ ret = pf_refresh_vf_cfg(gt, vfid);
+ if (unlikely(ret))
+ return ret;
}
if (!num_dbs)
@@ -1302,7 +1336,7 @@ static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
struct xe_tile *tile;
unsigned int tid;
- xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, xe_device_has_lmtt(xe));
xe_assert(xe, IS_SRIOV_PF(xe));
for_each_tile(tile, xe, tid) {
@@ -1323,7 +1357,7 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
unsigned int tid;
int err;
- xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, xe_device_has_lmtt(xe));
xe_assert(xe, IS_SRIOV_PF(xe));
total = 0;
@@ -1400,7 +1434,8 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
if (unlikely(err))
return err;
- pf_reset_vf_lmtt(xe, vfid);
+ if (xe_device_has_lmtt(xe))
+ pf_reset_vf_lmtt(xe, vfid);
pf_release_vf_config_lmem(gt, config);
}
xe_gt_assert(gt, !config->lmem_obj);
@@ -1420,9 +1455,11 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
config->lmem_obj = bo;
- err = pf_update_vf_lmtt(xe, vfid);
- if (unlikely(err))
- goto release;
+ if (xe_device_has_lmtt(xe)) {
+ err = pf_update_vf_lmtt(xe, vfid);
+ if (unlikely(err))
+ goto release;
+ }
err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
if (unlikely(err))
@@ -1433,7 +1470,8 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
return 0;
reset_lmtt:
- pf_reset_vf_lmtt(xe, vfid);
+ if (xe_device_has_lmtt(xe))
+ pf_reset_vf_lmtt(xe, vfid);
release:
pf_release_vf_config_lmem(gt, config);
return err;
@@ -1526,7 +1564,7 @@ static u64 pf_query_free_lmem(struct xe_gt *gt)
{
struct xe_tile *tile = gt->tile;
- return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
+ return xe_ttm_vram_get_avail(&tile->mem.vram.ttm.manager);
}
static u64 pf_query_max_lmem(struct xe_gt *gt)
@@ -1947,7 +1985,8 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
pf_release_vf_config_ggtt(gt, config);
if (IS_DGFX(xe)) {
pf_release_vf_config_lmem(gt, config);
- pf_update_vf_lmtt(xe, vfid);
+ if (xe_device_has_lmtt(xe))
+ pf_update_vf_lmtt(xe, vfid);
}
}
pf_release_config_ctxs(gt, config);
@@ -2085,10 +2124,7 @@ int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh
xe_gt_assert(gt, vfid);
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
- if (refresh)
- err = pf_send_vf_cfg_reset(gt, vfid);
- if (!err)
- err = pf_push_full_vf_config(gt, vfid);
+ err = pf_push_vf_cfg(gt, vfid, refresh);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
if (unlikely(err)) {
@@ -2320,6 +2356,35 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
return err;
}
+static void fini_config(void *arg)
+{
+ struct xe_gt *gt = arg;
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ for (n = 1; n <= total_vfs; n++)
+ pf_release_vf_config(gt, n);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+}
+
+/**
+ * xe_gt_sriov_pf_config_init - Initialize SR-IOV configuration data.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
+}
+
/**
* xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
* @gt: the &xe_gt
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
index f894e9d4abba..513e6512a575 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
@@ -63,6 +63,7 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_init(struct xe_gt *gt);
void xe_gt_sriov_pf_config_restart(struct xe_gt *gt);
int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
index c00fb354705f..4445f660e6d1 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
@@ -10,6 +10,7 @@
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_policy.h"
#include "xe_gt_sriov_printk.h"
+#include "xe_guc_buf.h"
#include "xe_guc_ct.h"
#include "xe_guc_klv_helpers.h"
#include "xe_pm.h"
@@ -34,48 +35,28 @@ static int guc_action_update_vgt_policy(struct xe_guc *guc, u64 addr, u32 size)
* Return: number of KLVs that were successfully parsed and saved,
* negative error code on failure.
*/
-static int pf_send_policy_klvs(struct xe_gt *gt, const u32 *klvs, u32 num_dwords)
+static int pf_send_policy_klvs(struct xe_gt *gt, struct xe_guc_buf buf, u32 num_dwords)
{
- const u32 bytes = num_dwords * sizeof(u32);
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_device *xe = tile_to_xe(tile);
struct xe_guc *guc = &gt->uc.guc;
- struct xe_bo *bo;
- int ret;
-
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(bytes, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
-
- xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
- ret = guc_action_update_vgt_policy(guc, xe_bo_ggtt_addr(bo), num_dwords);
-
- xe_bo_unpin_map_no_vm(bo);
-
- return ret;
+ return guc_action_update_vgt_policy(guc, xe_guc_buf_flush(buf), num_dwords);
}
/*
* Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
* negative error code on failure.
*/
-static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs,
- const u32 *klvs, u32 num_dwords)
+static int pf_push_policy_buf_klvs(struct xe_gt *gt, u32 num_klvs,
+ struct xe_guc_buf buf, u32 num_dwords)
{
int ret;
- xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
-
- ret = pf_send_policy_klvs(gt, klvs, num_dwords);
+ ret = pf_send_policy_klvs(gt, buf, num_dwords);
if (ret != num_klvs) {
int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
struct drm_printer p = xe_gt_info_printer(gt);
+ void *klvs = xe_guc_buf_cpu_ptr(buf);
xe_gt_sriov_notice(gt, "Failed to push %u policy KLV%s (%pe)\n",
num_klvs, str_plural(num_klvs), ERR_PTR(err));
@@ -86,6 +67,23 @@ static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs,
return 0;
}
+/*
+ * Return: 0 on success, -ENOBUFS if there is no free buffer for the indirect data,
+ * negative error code on failure.
+ */
+static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs,
+ const u32 *klvs, u32 num_dwords)
+{
+ CLASS(xe_guc_buf_from_data, buf)(&gt->uc.guc.buf, klvs, num_dwords * sizeof(u32));
+
+ xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
+
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+
+ return pf_push_policy_buf_klvs(gt, num_klvs, buf, num_dwords);
+}
+
static int pf_push_policy_u32(struct xe_gt *gt, u16 key, u32 value)
{
u32 klv[] = {
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
index 924e75b94aec..4efde5f46b43 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
@@ -114,7 +114,6 @@ static const struct xe_reg tgl_runtime_regs[] = {
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
};
static const struct xe_reg ats_m_runtime_regs[] = {
@@ -127,7 +126,6 @@ static const struct xe_reg ats_m_runtime_regs[] = {
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
};
static const struct xe_reg pvc_runtime_regs[] = {
@@ -140,7 +138,6 @@ static const struct xe_reg pvc_runtime_regs[] = {
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
CTC_MODE, /* _MMIO(0xA26C) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
};
static const struct xe_reg ver_1270_runtime_regs[] = {
@@ -155,7 +152,6 @@ static const struct xe_reg ver_1270_runtime_regs[] = {
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
};
static const struct xe_reg ver_2000_runtime_regs[] = {
@@ -173,14 +169,34 @@ static const struct xe_reg ver_2000_runtime_regs[] = {
XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
+};
+
+static const struct xe_reg ver_3000_runtime_regs[] = {
+ RPM_CONFIG0, /* _MMIO(0x0d00) */
+ XEHP_FUSE4, /* _MMIO(0x9114) */
+ MIRROR_FUSE3, /* _MMIO(0x9118) */
+ MIRROR_FUSE1, /* _MMIO(0x911c) */
+ MIRROR_L3BANK_ENABLE, /* _MMIO(0x9130) */
+ XELP_EU_ENABLE, /* _MMIO(0x9134) */
+ XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
+ GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
+ XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
+ XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
+ XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
+ XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
+ CTC_MODE, /* _MMIO(0xa26c) */
+ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
static const struct xe_reg *pick_runtime_regs(struct xe_device *xe, unsigned int *count)
{
const struct xe_reg *regs;
- if (GRAPHICS_VERx100(xe) >= 2000) {
+ if (GRAPHICS_VERx100(xe) >= 3000) {
+ *count = ARRAY_SIZE(ver_3000_runtime_regs);
+ regs = ver_3000_runtime_regs;
+ } else if (GRAPHICS_VERx100(xe) >= 2000) {
*count = ARRAY_SIZE(ver_2000_runtime_regs);
regs = ver_2000_runtime_regs;
} else if (GRAPHICS_VERx100(xe) >= 1270) {
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
index 0426b1a77069..a64a6835ad65 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
@@ -36,7 +36,16 @@ struct xe_gt_sriov_metadata {
};
/**
+ * struct xe_gt_sriov_pf_workers - GT level workers used by the PF.
+ */
+struct xe_gt_sriov_pf_workers {
+ /** @restart: worker that executes actions post GT reset */
+ struct work_struct restart;
+};
+
+/**
* struct xe_gt_sriov_pf - GT level PF virtualization data.
+ * @workers: workers data.
* @service: service data.
* @control: control data.
* @policy: policy data.
@@ -45,6 +54,7 @@ struct xe_gt_sriov_metadata {
* @vfs: metadata for all VFs.
*/
struct xe_gt_sriov_pf {
+ struct xe_gt_sriov_pf_workers workers;
struct xe_gt_sriov_pf_service service;
struct xe_gt_sriov_pf_control control;
struct xe_gt_sriov_pf_policy policy;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index cca5d5732802..a439261bf4d7 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -47,17 +47,40 @@ static int guc_action_vf_reset(struct xe_guc *guc)
return ret > 0 ? -EPROTO : ret;
}
+#define GUC_RESET_VF_STATE_RETRY_MAX 10
static int vf_reset_guc_state(struct xe_gt *gt)
{
+ unsigned int retry = GUC_RESET_VF_STATE_RETRY_MAX;
struct xe_guc *guc = &gt->uc.guc;
int err;
- err = guc_action_vf_reset(guc);
+ do {
+ err = guc_action_vf_reset(guc);
+ if (!err || err != -ETIMEDOUT)
+ break;
+ } while (--retry);
+
if (unlikely(err))
xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err));
return err;
}
+/**
+ * xe_gt_sriov_vf_reset - Reset GuC VF internal state.
+ * @gt: the &xe_gt
+ *
+ * It requires functional `GuC MMIO based communication`_.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_vf_reset(struct xe_gt *gt)
+{
+ if (!xe_device_uc_enabled(gt_to_xe(gt)))
+ return -ENODEV;
+
+ return vf_reset_guc_state(gt);
+}
+
static int guc_action_match_version(struct xe_guc *guc,
u32 wanted_branch, u32 wanted_major, u32 wanted_minor,
u32 *branch, u32 *major, u32 *minor, u32 *patch)
@@ -213,6 +236,9 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
{
int err;
+ if (!xe_device_uc_enabled(gt_to_xe(gt)))
+ return -ENODEV;
+
err = vf_reset_guc_state(gt);
if (unlikely(err))
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index 912d20814261..ba6c5d74e326 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -12,6 +12,7 @@ struct drm_printer;
struct xe_gt;
struct xe_reg;
+int xe_gt_sriov_vf_reset(struct xe_gt *gt);
int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt);
int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index 7a6c1d808e41..6155ea354432 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -23,11 +23,13 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr)
if (id >= __XE_GT_STATS_NUM_IDS)
return;
- atomic_add(incr, &gt->stats.counters[id]);
+ atomic64_add(incr, &gt->stats.counters[id]);
}
static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
"tlb_inval_count",
+ "vma_pagefault_count",
+ "vma_pagefault_kb",
};
/**
@@ -42,8 +44,8 @@ int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p)
enum xe_gt_stats_id id;
for (id = 0; id < __XE_GT_STATS_NUM_IDS; ++id)
- drm_printf(p, "%s: %d\n", stat_description[id],
- atomic_read(&gt->stats.counters[id]));
+ drm_printf(p, "%s: %lld\n", stat_description[id],
+ atomic64_read(&gt->stats.counters[id]));
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
index 2fc055e39f27..d556771f99d6 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -8,6 +8,8 @@
enum xe_gt_stats_id {
XE_GT_STATS_ID_TLB_INVAL,
+ XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_VMA_PAGEFAULT_KB,
/* must be the last entry */
__XE_GT_STATS_NUM_IDS,
};
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 0a93831c0a02..03072e094991 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -411,6 +411,28 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
}
/**
+ * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT for a VM
+ * @gt: graphics tile
+ * @vm: VM to invalidate
+ *
+ * Invalidate entire VM's address space
+ */
+void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
+{
+ struct xe_gt_tlb_invalidation_fence fence;
+ u64 range = 1ull << vm->xe->info.va_bits;
+ int ret;
+
+ xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
+
+ ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm->usm.asid);
+ if (ret < 0)
+ return;
+
+ xe_gt_tlb_invalidation_fence_wait(&fence);
+}
+
+/**
* xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
* @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index 672acfcdf0d7..abe9b03d543e 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -12,6 +12,7 @@
struct xe_gt;
struct xe_guc;
+struct xe_vm;
struct xe_vma;
int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
@@ -21,6 +22,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
struct xe_vma *vma);
+void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
u64 start, u64 end, u32 asid);
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index df2042db7ee6..516c81e3b8dd 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -129,7 +129,8 @@ static void
load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
{
struct xe_device *xe = gt_to_xe(gt);
- u32 fuse3 = xe_mmio_read32(&gt->mmio, MIRROR_FUSE3);
+ struct xe_mmio *mmio = &gt->mmio;
+ u32 fuse3 = xe_mmio_read32(mmio, MIRROR_FUSE3);
/*
* PTL platforms with media version 30.00 do not provide proper values
@@ -143,7 +144,16 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
if (XE_WA(gt, no_media_l3))
return;
- if (GRAPHICS_VER(xe) >= 20) {
+ if (GRAPHICS_VER(xe) >= 30) {
+ xe_l3_bank_mask_t per_node = {};
+ u32 meml3_en = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, fuse3);
+ u32 mirror_l3bank_enable = xe_mmio_read32(mmio, MIRROR_L3BANK_ENABLE);
+ u32 bank_val = REG_FIELD_GET(XE3_L3BANK_ENABLE, mirror_l3bank_enable);
+
+ bitmap_from_arr32(per_node, &bank_val, 32);
+ gen_l3_mask_from_pattern(xe, l3_bank_mask, per_node, 32,
+ meml3_en);
+ } else if (GRAPHICS_VER(xe) >= 20) {
xe_l3_bank_mask_t per_node = {};
u32 meml3_en = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, fuse3);
u32 bank_val = REG_FIELD_GET(XE2_GT_L3_MODE_MASK, fuse3);
@@ -155,7 +165,7 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
xe_l3_bank_mask_t per_node = {};
xe_l3_bank_mask_t per_mask_bit = {};
u32 meml3_en = REG_FIELD_GET(MEML3_EN_MASK, fuse3);
- u32 fuse4 = xe_mmio_read32(&gt->mmio, XEHP_FUSE4);
+ u32 fuse4 = xe_mmio_read32(mmio, XEHP_FUSE4);
u32 bank_val = REG_FIELD_GET(GT_L3_EXC_MASK, fuse4);
bitmap_set_value8(per_mask_bit, 0x3, 0);
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index 746b325bbf6e..a72d26ba0653 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -25,6 +25,19 @@ void xe_gt_topology_init(struct xe_gt *gt);
void xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p);
+/**
+ * xe_gt_topology_mask_last_dss() - Returns the index of the last DSS in a mask.
+ * @mask: Input DSS mask
+ *
+ * Return: Index of the last DSS in the input DSS mask,
+ * XE_MAX_DSS_FUSE_BITS if DSS mask is empty.
+ */
+static inline unsigned int
+xe_gt_topology_mask_last_dss(const xe_dss_mask_t mask)
+{
+ return find_last_bit(mask, XE_MAX_DSS_FUSE_BITS);
+}
+
unsigned int
xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 6e66bf0e8b3f..e3cfb026ac88 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -139,7 +139,7 @@ struct xe_gt {
/** @stats: GT stats */
struct {
/** @stats.counters: counters for various GT stats */
- atomic_t counters[__XE_GT_STATS_NUM_IDS];
+ atomic64_t counters[__XE_GT_STATS_NUM_IDS];
} stats;
#endif
@@ -413,6 +413,16 @@ struct xe_gt {
bool oob_initialized;
} wa_active;
+ /** @tuning_active: keep track of active tunings */
+ struct {
+ /** @tuning_active.gt: bitmap with active GT tunings */
+ unsigned long *gt;
+ /** @tuning_active.engine: bitmap with active engine tunings */
+ unsigned long *engine;
+ /** @tuning_active.lrc: bitmap with active LRC tunings */
+ unsigned long *lrc;
+ } tuning_active;
+
/** @user_engines: engines present in GT and available to userspace */
struct {
/**
@@ -430,6 +440,9 @@ struct xe_gt {
/** @oa: oa observation subsystem per gt info */
struct xe_oa_gt oa;
+
+ /** @eu_stall: EU stall counters subsystem per gt info */
+ struct xe_eu_stall_gt *eu_stall;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 408365dfe4ee..bc5714a5b36b 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -23,9 +23,11 @@
#include "xe_gt_sriov_vf.h"
#include "xe_gt_throttle.h"
#include "xe_guc_ads.h"
+#include "xe_guc_buf.h"
#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
#include "xe_guc_db_mgr.h"
+#include "xe_guc_engine_activity.h"
#include "xe_guc_hwconfig.h"
#include "xe_guc_log.h"
#include "xe_guc_pc.h"
@@ -743,6 +745,14 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc)
if (ret)
return ret;
+ ret = xe_guc_engine_activity_init(guc);
+ if (ret)
+ return ret;
+
+ ret = xe_guc_buf_cache_init(&guc->buf);
+ if (ret)
+ return ret;
+
return xe_guc_ads_init_post_hwconfig(&guc->ads);
}
@@ -1486,14 +1496,6 @@ void xe_guc_stop(struct xe_guc *guc)
int xe_guc_start(struct xe_guc *guc)
{
- if (!IS_SRIOV_VF(guc_to_xe(guc))) {
- int err;
-
- err = xe_guc_pc_start(&guc->pc);
- xe_gt_WARN(guc_to_gt(guc), err, "Failed to start GuC PC: %pe\n",
- ERR_PTR(err));
- }
-
return xe_guc_submit_start(guc);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index fab259adc380..e7c9e095a19f 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -342,7 +342,7 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
offset = guc_ads_waklv_offset(ads);
remain = guc_ads_waklv_size(ads);
- if (XE_WA(gt, 14019882105))
+ if (XE_WA(gt, 14019882105) || XE_WA(gt, 16021333562))
guc_waklv_enable_simple(ads,
GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED,
&offset, &remain);
diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c
new file mode 100644
index 000000000000..0193c94dd6a0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_buf.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/cleanup.h>
+#include <drm/drm_managed.h>
+
+#include "xe_assert.h"
+#include "xe_bo.h"
+#include "xe_gt_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_buf.h"
+#include "xe_sa.h"
+
+static struct xe_guc *cache_to_guc(struct xe_guc_buf_cache *cache)
+{
+ return container_of(cache, struct xe_guc, buf);
+}
+
+static struct xe_gt *cache_to_gt(struct xe_guc_buf_cache *cache)
+{
+ return guc_to_gt(cache_to_guc(cache));
+}
+
+/**
+ * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache.
+ * @cache: the &xe_guc_buf_cache to initialize
+ *
+ * The Buffer Cache allows to obtain a reusable buffer that can be used to pass
+ * indirect H2G data to GuC without a need to create a ad-hoc allocation.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
+{
+ struct xe_gt *gt = cache_to_gt(cache);
+ struct xe_sa_manager *sam;
+
+ /* XXX: currently it's useful only for the PF actions */
+ if (!IS_SRIOV_PF(gt_to_xe(gt)))
+ return 0;
+
+ sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32));
+ if (IS_ERR(sam))
+ return PTR_ERR(sam);
+ cache->sam = sam;
+
+ xe_gt_dbg(gt, "reusable buffer with %u dwords at %#x for %ps\n",
+ xe_guc_buf_cache_dwords(cache), xe_bo_ggtt_addr(sam->bo),
+ __builtin_return_address(0));
+ return 0;
+}
+
+/**
+ * xe_guc_buf_cache_dwords() - Number of dwords the GuC Buffer Cache supports.
+ * @cache: the &xe_guc_buf_cache to query
+ *
+ * Return: a size of the largest reusable buffer (in dwords)
+ */
+u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache)
+{
+ return cache->sam ? cache->sam->base.size / sizeof(u32) : 0;
+}
+
+/**
+ * xe_guc_buf_reserve() - Reserve a new sub-allocation.
+ * @cache: the &xe_guc_buf_cache where reserve sub-allocation
+ * @dwords: the requested size of the buffer in dwords
+ *
+ * Use xe_guc_buf_is_valid() to check if returned buffer reference is valid.
+ * Must use xe_guc_buf_release() to release a sub-allocation.
+ *
+ * Return: a &xe_guc_buf of new sub-allocation.
+ */
+struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords)
+{
+ struct drm_suballoc *sa;
+
+ if (cache->sam)
+ sa = __xe_sa_bo_new(cache->sam, dwords * sizeof(u32), GFP_ATOMIC);
+ else
+ sa = ERR_PTR(-EOPNOTSUPP);
+
+ return (struct xe_guc_buf){ .sa = sa };
+}
+
+/**
+ * xe_guc_buf_from_data() - Reserve a new sub-allocation using data.
+ * @cache: the &xe_guc_buf_cache where reserve sub-allocation
+ * @data: the data to flush the sub-allocation
+ * @size: the size of the data
+ *
+ * Similar to xe_guc_buf_reserve() but flushes @data to the GPU memory.
+ *
+ * Return: a &xe_guc_buf of new sub-allocation.
+ */
+struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache,
+ const void *data, size_t size)
+{
+ struct drm_suballoc *sa;
+
+ sa = __xe_sa_bo_new(cache->sam, size, GFP_ATOMIC);
+ if (!IS_ERR(sa))
+ memcpy(xe_sa_bo_cpu_addr(sa), data, size);
+
+ return (struct xe_guc_buf){ .sa = sa };
+}
+
+/**
+ * xe_guc_buf_release() - Release a sub-allocation.
+ * @buf: the &xe_guc_buf to release
+ *
+ * Releases a sub-allocation reserved by the xe_guc_buf_reserve().
+ */
+void xe_guc_buf_release(const struct xe_guc_buf buf)
+{
+ if (xe_guc_buf_is_valid(buf))
+ xe_sa_bo_free(buf.sa, NULL);
+}
+
+/**
+ * xe_guc_buf_flush() - Copy the data from the sub-allocation to the GPU memory.
+ * @buf: the &xe_guc_buf to flush
+ *
+ * Return: a GPU address of the sub-allocation.
+ */
+u64 xe_guc_buf_flush(const struct xe_guc_buf buf)
+{
+ xe_sa_bo_flush_write(buf.sa);
+ return xe_sa_bo_gpu_addr(buf.sa);
+}
+
+/**
+ * xe_guc_buf_cpu_ptr() - Obtain a CPU pointer to the sub-allocation.
+ * @buf: the &xe_guc_buf to query
+ *
+ * Return: a CPU pointer of the sub-allocation.
+ */
+void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf)
+{
+ return xe_sa_bo_cpu_addr(buf.sa);
+}
+
+/**
+ * xe_guc_buf_gpu_addr() - Obtain a GPU address of the sub-allocation.
+ * @buf: the &xe_guc_buf to query
+ *
+ * Return: a GPU address of the sub-allocation.
+ */
+u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf)
+{
+ return xe_sa_bo_gpu_addr(buf.sa);
+}
+
+/**
+ * xe_guc_cache_gpu_addr_from_ptr() - Lookup a GPU address using the pointer.
+ * @cache: the &xe_guc_buf_cache with sub-allocations
+ * @ptr: the CPU pointer of the sub-allocation
+ * @size: the size of the data
+ *
+ * Return: a GPU address on success or 0 if the pointer was unrelated.
+ */
+u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size)
+{
+ ptrdiff_t offset = ptr - cache->sam->cpu_ptr;
+
+ if (offset < 0 || offset + size > cache->sam->base.size)
+ return 0;
+
+ return cache->sam->gpu_addr + offset;
+}
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_guc_buf_kunit.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_buf.h b/drivers/gpu/drm/xe/xe_guc_buf.h
new file mode 100644
index 000000000000..0d67604d96bd
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_buf.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GUC_BUF_H_
+#define _XE_GUC_BUF_H_
+
+#include <linux/cleanup.h>
+#include <linux/err.h>
+
+#include "xe_guc_buf_types.h"
+
+int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache);
+u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache);
+struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords);
+struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache,
+ const void *data, size_t size);
+void xe_guc_buf_release(const struct xe_guc_buf buf);
+
+/**
+ * xe_guc_buf_is_valid() - Check if a buffer reference is valid.
+ * @buf: the &xe_guc_buf reference to check
+ *
+ * Return: true if @ref represents a valid sub-allication.
+ */
+static inline bool xe_guc_buf_is_valid(const struct xe_guc_buf buf)
+{
+ return !IS_ERR_OR_NULL(buf.sa);
+}
+
+void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf);
+u64 xe_guc_buf_flush(const struct xe_guc_buf buf);
+u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf);
+u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size);
+
+DEFINE_CLASS(xe_guc_buf, struct xe_guc_buf,
+ xe_guc_buf_release(_T),
+ xe_guc_buf_reserve(cache, num),
+ struct xe_guc_buf_cache *cache, u32 num);
+
+DEFINE_CLASS(xe_guc_buf_from_data, struct xe_guc_buf,
+ xe_guc_buf_release(_T),
+ xe_guc_buf_from_data(cache, data, size),
+ struct xe_guc_buf_cache *cache, const void *data, size_t size);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_buf_types.h b/drivers/gpu/drm/xe/xe_guc_buf_types.h
new file mode 100644
index 000000000000..9e123d71c064
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_buf_types.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GUC_BUF_TYPES_H_
+#define _XE_GUC_BUF_TYPES_H_
+
+struct drm_suballoc;
+struct xe_sa_manager;
+
+/**
+ * struct xe_guc_buf_cache - GuC Data Buffer Cache.
+ */
+struct xe_guc_buf_cache {
+ /* private: internal sub-allocation manager */
+ struct xe_sa_manager *sam;
+};
+
+/**
+ * struct xe_guc_buf - GuC Data Buffer Reference.
+ */
+struct xe_guc_buf {
+ /* private: internal sub-allocation reference */
+ struct drm_suballoc *sa;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c
index 995b306aced7..c569ff456e74 100644
--- a/drivers/gpu/drm/xe/xe_guc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c
@@ -13,6 +13,7 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_guc_log.h"
+#include "xe_guc_pc.h"
#include "xe_macros.h"
#include "xe_pm.h"
@@ -47,6 +48,18 @@ static int guc_log(struct seq_file *m, void *data)
return 0;
}
+static int guc_log_dmesg(struct seq_file *m, void *data)
+{
+ struct xe_guc *guc = node_to_guc(m->private);
+ struct xe_device *xe = guc_to_xe(guc);
+
+ xe_pm_runtime_get(xe);
+ xe_guc_log_print_dmesg(&guc->log);
+ xe_pm_runtime_put(xe);
+
+ return 0;
+}
+
static int guc_ctb(struct seq_file *m, void *data)
{
struct xe_guc *guc = node_to_guc(m->private);
@@ -60,10 +73,25 @@ static int guc_ctb(struct seq_file *m, void *data)
return 0;
}
+static int guc_pc(struct seq_file *m, void *data)
+{
+ struct xe_guc *guc = node_to_guc(m->private);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_pm_runtime_get(xe);
+ xe_guc_pc_print(&guc->pc, &p);
+ xe_pm_runtime_put(xe);
+
+ return 0;
+}
+
static const struct drm_info_list debugfs_list[] = {
{"guc_info", guc_info, 0},
{"guc_log", guc_log, 0},
+ {"guc_log_dmesg", guc_log_dmesg, 0},
{"guc_ctb", guc_ctb, 0},
+ {"guc_pc", guc_pc, 0},
};
void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent)
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
new file mode 100644
index 000000000000..2a457dcf31d5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_abi.h"
+#include "regs/xe_gt_regs.h"
+
+#include "xe_bo.h"
+#include "xe_force_wake.h"
+#include "xe_gt_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_engine_activity.h"
+#include "xe_guc_ct.h"
+#include "xe_hw_engine.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_trace_guc.h"
+
+#define TOTAL_QUANTA 0x8000
+
+static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+ u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
+ size_t offset;
+
+ offset = offsetof(struct guc_engine_activity_data,
+ engine_activity[guc_class][hwe->logical_instance]);
+
+ return IOSYS_MAP_INIT_OFFSET(&buffer->activity_bo->vmap, offset);
+}
+
+static struct iosys_map engine_metadata_map(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+
+ return buffer->metadata_bo->vmap;
+}
+
+static int allocate_engine_activity_group(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct xe_device *xe = guc_to_xe(guc);
+ u32 num_activity_group = 1; /* Will be modified for VF */
+
+ engine_activity->eag = drmm_kcalloc(&xe->drm, num_activity_group,
+ sizeof(struct engine_activity_group), GFP_KERNEL);
+
+ if (!engine_activity->eag)
+ return -ENOMEM;
+
+ engine_activity->num_activity_group = num_activity_group;
+
+ return 0;
+}
+
+static int allocate_engine_activity_buffers(struct xe_guc *guc,
+ struct engine_activity_buffer *buffer)
+{
+ u32 metadata_size = sizeof(struct guc_engine_activity_metadata);
+ u32 size = sizeof(struct guc_engine_activity_data);
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo, *metadata_bo;
+
+ metadata_bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(metadata_size),
+ ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+
+ if (IS_ERR(metadata_bo))
+ return PTR_ERR(metadata_bo);
+
+ bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(size),
+ ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+
+ if (IS_ERR(bo)) {
+ xe_bo_unpin_map_no_vm(metadata_bo);
+ return PTR_ERR(bo);
+ }
+
+ buffer->metadata_bo = metadata_bo;
+ buffer->activity_bo = bo;
+ return 0;
+}
+
+static void free_engine_activity_buffers(struct engine_activity_buffer *buffer)
+{
+ xe_bo_unpin_map_no_vm(buffer->metadata_bo);
+ xe_bo_unpin_map_no_vm(buffer->activity_bo);
+}
+
+static bool is_engine_activity_supported(struct xe_guc *guc)
+{
+ struct xe_uc_fw_version *version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
+ struct xe_uc_fw_version required = { 1, 14, 1 };
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ if (IS_SRIOV_VF(gt_to_xe(gt))) {
+ xe_gt_info(gt, "engine activity stats not supported on VFs\n");
+ return false;
+ }
+
+ /* engine activity stats is supported from GuC interface version (1.14.1) */
+ if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER_STRUCT(required)) {
+ xe_gt_info(gt,
+ "engine activity stats unsupported in GuC interface v%u.%u.%u, need v%u.%u.%u or higher\n",
+ version->major, version->minor, version->patch, required.major,
+ required.minor, required.patch);
+ return false;
+ }
+
+ return true;
+}
+
+static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe)
+{
+ struct xe_guc *guc = &hwe->gt->uc.guc;
+ struct engine_activity_group *eag = &guc->engine_activity.eag[0];
+ u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
+
+ return &eag->engine[guc_class][hwe->logical_instance];
+}
+
+static u64 cpu_ns_to_guc_tsc_tick(ktime_t ns, u32 freq)
+{
+ return mul_u64_u32_div(ns, freq, NSEC_PER_SEC);
+}
+
+#define read_engine_activity_record(xe_, map_, field_) \
+ xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity, field_)
+
+#define read_metadata_record(xe_, map_, field_) \
+ xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity_metadata, field_)
+
+static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+{
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
+ struct guc_engine_activity *cached_activity = &ea->activity;
+ struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct iosys_map activity_map, metadata_map;
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 last_update_tick, global_change_num;
+ u64 active_ticks, gpm_ts;
+ u16 change_num;
+
+ activity_map = engine_activity_map(guc, hwe);
+ metadata_map = engine_metadata_map(guc);
+ global_change_num = read_metadata_record(xe, &metadata_map, global_change_num);
+
+ /* GuC has not initialized activity data yet, return 0 */
+ if (!global_change_num)
+ goto update;
+
+ if (global_change_num == cached_metadata->global_change_num)
+ goto update;
+
+ cached_metadata->global_change_num = global_change_num;
+ change_num = read_engine_activity_record(xe, &activity_map, change_num);
+
+ if (!change_num || change_num == cached_activity->change_num)
+ goto update;
+
+ /* read engine activity values */
+ last_update_tick = read_engine_activity_record(xe, &activity_map, last_update_tick);
+ active_ticks = read_engine_activity_record(xe, &activity_map, active_ticks);
+
+ /* activity calculations */
+ ea->running = !!last_update_tick;
+ ea->total += active_ticks - cached_activity->active_ticks;
+ ea->active = 0;
+
+ /* cache the counter */
+ cached_activity->change_num = change_num;
+ cached_activity->last_update_tick = last_update_tick;
+ cached_activity->active_ticks = active_ticks;
+
+update:
+ if (ea->running) {
+ gpm_ts = xe_mmio_read64_2x32(&gt->mmio, MISC_STATUS_0) >>
+ engine_activity->gpm_timestamp_shift;
+ ea->active = lower_32_bits(gpm_ts) - cached_activity->last_update_tick;
+ }
+
+ trace_xe_guc_engine_activity(xe, ea, hwe->name, hwe->instance);
+
+ return ea->total + ea->active;
+}
+
+static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+{
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
+ struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
+ struct guc_engine_activity *cached_activity = &ea->activity;
+ struct iosys_map activity_map, metadata_map;
+ struct xe_device *xe = guc_to_xe(guc);
+ ktime_t now, cpu_delta;
+ u64 numerator;
+ u16 quanta_ratio;
+
+ activity_map = engine_activity_map(guc, hwe);
+ metadata_map = engine_metadata_map(guc);
+
+ if (!cached_metadata->guc_tsc_frequency_hz)
+ cached_metadata->guc_tsc_frequency_hz = read_metadata_record(xe, &metadata_map,
+ guc_tsc_frequency_hz);
+
+ quanta_ratio = read_engine_activity_record(xe, &activity_map, quanta_ratio);
+ cached_activity->quanta_ratio = quanta_ratio;
+
+ /* Total ticks calculations */
+ now = ktime_get();
+ cpu_delta = now - ea->last_cpu_ts;
+ ea->last_cpu_ts = now;
+ numerator = (ea->quanta_remainder_ns + cpu_delta) * cached_activity->quanta_ratio;
+ ea->quanta_ns += numerator / TOTAL_QUANTA;
+ ea->quanta_remainder_ns = numerator % TOTAL_QUANTA;
+ ea->quanta = cpu_ns_to_guc_tsc_tick(ea->quanta_ns, cached_metadata->guc_tsc_frequency_hz);
+
+ trace_xe_guc_engine_activity(xe, ea, hwe->name, hwe->instance);
+
+ return ea->quanta;
+}
+
+static int enable_engine_activity_stats(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+ u32 action[] = {
+ XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER,
+ xe_bo_ggtt_addr(buffer->metadata_bo),
+ 0,
+ xe_bo_ggtt_addr(buffer->activity_bo),
+ 0,
+ };
+
+ /* Blocking here to ensure the buffers are ready before reading them */
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static void engine_activity_set_cpu_ts(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_group *eag = &engine_activity->eag[0];
+ int i, j;
+
+ for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++)
+ for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; j++)
+ eag->engine[i][j].last_cpu_ts = ktime_get();
+}
+
+static u32 gpm_timestamp_shift(struct xe_gt *gt)
+{
+ u32 reg;
+
+ reg = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
+
+ return 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
+}
+
+/**
+ * xe_guc_engine_activity_active_ticks - Get engine active ticks
+ * @guc: The GuC object
+ * @hwe: The hw_engine object
+ *
+ * Return: accumulated ticks @hwe was active since engine activity stats were enabled.
+ */
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+{
+ if (!xe_guc_engine_activity_supported(guc))
+ return 0;
+
+ return get_engine_active_ticks(guc, hwe);
+}
+
+/**
+ * xe_guc_engine_activity_total_ticks - Get engine total ticks
+ * @guc: The GuC object
+ * @hwe: The hw_engine object
+ *
+ * Return: accumulated quanta of ticks allocated for the engine
+ */
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+{
+ if (!xe_guc_engine_activity_supported(guc))
+ return 0;
+
+ return get_engine_total_ticks(guc, hwe);
+}
+
+/**
+ * xe_guc_engine_activity_supported - Check support for engine activity stats
+ * @guc: The GuC object
+ *
+ * Engine activity stats is supported from GuC interface version (1.14.1)
+ *
+ * Return: true if engine activity stats supported, false otherwise
+ */
+bool xe_guc_engine_activity_supported(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+
+ return engine_activity->supported;
+}
+
+/**
+ * xe_guc_engine_activity_enable_stats - Enable engine activity stats
+ * @guc: The GuC object
+ *
+ * Enable engine activity stats and set initial timestamps
+ */
+void xe_guc_engine_activity_enable_stats(struct xe_guc *guc)
+{
+ int ret;
+
+ if (!xe_guc_engine_activity_supported(guc))
+ return;
+
+ ret = enable_engine_activity_stats(guc);
+ if (ret)
+ xe_gt_err(guc_to_gt(guc), "failed to enable activity stats%d\n", ret);
+ else
+ engine_activity_set_cpu_ts(guc);
+}
+
+static void engine_activity_fini(void *arg)
+{
+ struct xe_guc_engine_activity *engine_activity = arg;
+ struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+
+ free_engine_activity_buffers(buffer);
+}
+
+/**
+ * xe_guc_engine_activity_init - Initialize the engine activity data
+ * @guc: The GuC object
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int xe_guc_engine_activity_init(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct xe_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ engine_activity->supported = is_engine_activity_supported(guc);
+ if (!engine_activity->supported)
+ return 0;
+
+ ret = allocate_engine_activity_group(guc);
+ if (ret) {
+ xe_gt_err(gt, "failed to allocate engine activity group (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer);
+ if (ret) {
+ xe_gt_err(gt, "failed to allocate engine activity buffers (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ engine_activity->gpm_timestamp_shift = gpm_timestamp_shift(gt);
+
+ return devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, engine_activity_fini,
+ engine_activity);
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.h b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
new file mode 100644
index 000000000000..a042d4cb404c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_GUC_ENGINE_ACTIVITY_H_
+#define _XE_GUC_ENGINE_ACTIVITY_H_
+
+#include <linux/types.h>
+
+struct xe_hw_engine;
+struct xe_guc;
+
+int xe_guc_engine_activity_init(struct xe_guc *guc);
+bool xe_guc_engine_activity_supported(struct xe_guc *guc);
+void xe_guc_engine_activity_enable_stats(struct xe_guc *guc);
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe);
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
new file mode 100644
index 000000000000..5cdd034b6b70
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_GUC_ENGINE_ACTIVITY_TYPES_H_
+#define _XE_GUC_ENGINE_ACTIVITY_TYPES_H_
+
+#include <linux/types.h>
+
+#include "xe_guc_fwif.h"
+/**
+ * struct engine_activity - Engine specific activity data
+ *
+ * Contains engine specific activity data and snapshot of the
+ * structures from GuC
+ */
+struct engine_activity {
+ /** @active: current activity */
+ u64 active;
+
+ /** @last_cpu_ts: cpu timestamp in nsec of previous sample */
+ u64 last_cpu_ts;
+
+ /** @quanta: total quanta used on HW */
+ u64 quanta;
+
+ /** @quanta_ns: total quanta_ns used on HW */
+ u64 quanta_ns;
+
+ /**
+ * @quanta_remainder_ns: remainder when the CPU time is scaled as
+ * per the quanta_ratio. This remainder is used in subsequent
+ * quanta calculations.
+ */
+ u64 quanta_remainder_ns;
+
+ /** @total: total engine activity */
+ u64 total;
+
+ /** @running: true if engine is running some work */
+ bool running;
+
+ /** @metadata: snapshot of engine activity metadata */
+ struct guc_engine_activity_metadata metadata;
+
+ /** @activity: snapshot of engine activity counter */
+ struct guc_engine_activity activity;
+};
+
+/**
+ * struct engine_activity_group - Activity data for all engines
+ */
+struct engine_activity_group {
+ /** @engine: engine specific activity data */
+ struct engine_activity engine[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+};
+
+/**
+ * struct engine_activity_buffer - engine activity buffers
+ *
+ * This contains the buffers allocated for metadata and activity data
+ */
+struct engine_activity_buffer {
+ /** @activity_bo: object allocated to hold activity data */
+ struct xe_bo *activity_bo;
+
+ /** @metadata_bo: object allocated to hold activity metadata */
+ struct xe_bo *metadata_bo;
+};
+
+/**
+ * struct xe_guc_engine_activity - Data used by engine activity implementation
+ */
+struct xe_guc_engine_activity {
+ /** @gpm_timestamp_shift: Right shift value for the gpm timestamp */
+ u32 gpm_timestamp_shift;
+
+ /** @num_activity_group: number of activity groups */
+ u32 num_activity_group;
+
+ /** @supported: indicates support for engine activity stats */
+ bool supported;
+
+ /** @eag: holds the device level engine activity data */
+ struct engine_activity_group *eag;
+
+ /** @device_buffer: buffer object for global engine activity */
+ struct engine_activity_buffer device_buffer;
+};
+#endif
+
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index 057153f89b30..6f57578b07cb 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -208,6 +208,25 @@ struct guc_engine_usage {
struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
} __packed;
+/* Engine Activity stats */
+struct guc_engine_activity {
+ u16 change_num;
+ u16 quanta_ratio;
+ u32 last_update_tick;
+ u64 active_ticks;
+} __packed;
+
+struct guc_engine_activity_data {
+ struct guc_engine_activity engine_activity[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+} __packed;
+
+struct guc_engine_activity_metadata {
+ u32 guc_tsc_frequency_hz;
+ u32 lag_latency_usec;
+ u32 global_change_num;
+ u32 reserved;
+} __packed;
+
/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
enum xe_guc_recv_message {
XE_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index 0ca3056d8bd3..80514a446ba2 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -149,16 +149,12 @@ struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log,
size_t remain;
int i;
- if (!log->bo) {
- xe_gt_err(gt, "GuC log buffer not allocated\n");
+ if (!log->bo)
return NULL;
- }
snapshot = xe_guc_log_snapshot_alloc(log, atomic);
- if (!snapshot) {
- xe_gt_err(gt, "GuC log snapshot not allocated\n");
+ if (!snapshot)
return NULL;
- }
remain = snapshot->size;
for (i = 0; i < snapshot->num_chunks; i++) {
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index df7f130fb663..85215313976c 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -6,8 +6,10 @@
#include "xe_guc_pc.h"
#include <linux/delay.h>
+#include <linux/ktime.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <generated/xe_wa_oob.h>
#include "abi/guc_actions_slpc_abi.h"
@@ -19,6 +21,7 @@
#include "xe_gt.h"
#include "xe_gt_idle.h"
#include "xe_gt_printk.h"
+#include "xe_gt_throttle.h"
#include "xe_gt_types.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
@@ -49,6 +52,9 @@
#define LNL_MERT_FREQ_CAP 800
#define BMG_MERT_FREQ_CAP 2133
+#define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
+#define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
+
/**
* DOC: GuC Power Conservation (PC)
*
@@ -113,9 +119,10 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
static int wait_for_pc_state(struct xe_guc_pc *pc,
- enum slpc_global_state state)
+ enum slpc_global_state state,
+ int timeout_ms)
{
- int timeout_us = 5000; /* rought 5ms, but no need for precision */
+ int timeout_us = 1000 * timeout_ms;
int slept, wait = 10;
xe_device_assert_mem_access(pc_to_xe(pc));
@@ -164,7 +171,8 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc)
};
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
/* Blocking here to ensure the results are ready before reading them */
@@ -187,7 +195,8 @@ static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
};
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
@@ -208,7 +217,8 @@ static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
@@ -362,16 +372,17 @@ static void tgl_update_rpa_value(struct xe_guc_pc *pc)
u32 reg;
/*
- * For PVC we still need to use fused RP1 as the approximation for RPe
- * For other platforms than PVC we get the resolved RPe directly from
+ * For PVC we still need to use fused RP0 as the approximation for RPa
+ * For other platforms than PVC we get the resolved RPa directly from
* PCODE at a different register
*/
- if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC) {
reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
- else
+ pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ } else {
reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
-
- pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ }
}
static void tgl_update_rpe_value(struct xe_guc_pc *pc)
@@ -385,12 +396,13 @@ static void tgl_update_rpe_value(struct xe_guc_pc *pc)
* For other platforms than PVC we get the resolved RPe directly from
* PCODE at a different register
*/
- if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC) {
reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
- else
+ pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ } else {
reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
-
- pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ }
}
static void pc_update_rp_values(struct xe_guc_pc *pc)
@@ -440,6 +452,15 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
return freq;
}
+static u32 get_cur_freq(struct xe_gt *gt)
+{
+ u32 freq;
+
+ freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
+ freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
+ return decode_freq(freq);
+}
+
/**
* xe_guc_pc_get_cur_freq - Get Current requested frequency
* @pc: The GuC PC
@@ -463,10 +484,7 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
return -ETIMEDOUT;
}
- *freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
-
- *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
- *freq = decode_freq(*freq);
+ *freq = get_cur_freq(gt);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
@@ -992,6 +1010,17 @@ out:
return ret;
}
+static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
+{
+ int ret = 0;
+
+ ret = pc_action_set_param(pc,
+ SLPC_PARAM_STRATEGIES,
+ val);
+
+ return ret;
+}
+
/**
* xe_guc_pc_start - Start GuC's Power Conservation component
* @pc: Xe_GuC_PC instance
@@ -1002,6 +1031,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
unsigned int fw_ref;
+ ktime_t earlier;
int ret;
xe_gt_assert(gt, xe_device_uc_enabled(xe));
@@ -1026,14 +1056,25 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
memset(pc->bo->vmap.vaddr, 0, size);
slpc_shared_data_write(pc, header.size, size);
+ earlier = ktime_get();
ret = pc_action_reset(pc);
if (ret)
goto out;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
- xe_gt_err(gt, "GuC PC Start failed\n");
- ret = -EIO;
- goto out;
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS)) {
+ xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
+ xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
+ xe_gt_throttle_get_limit_reasons(gt));
+
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
+ xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
+ goto out;
+ }
+
+ xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
+ ktime_ms_delta(ktime_get(), earlier));
}
ret = pc_init_freqs(pc);
@@ -1051,6 +1092,11 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
}
ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
+ if (ret)
+ goto out;
+
+ /* Enable SLPC Optimized Strategy for compute */
+ ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
out:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
@@ -1131,3 +1177,61 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
}
+
+static const char *pc_get_state_string(struct xe_guc_pc *pc)
+{
+ switch (slpc_shared_data_read(pc, header.global_state)) {
+ case SLPC_GLOBAL_STATE_NOT_RUNNING:
+ return "not running";
+ case SLPC_GLOBAL_STATE_INITIALIZING:
+ return "initializing";
+ case SLPC_GLOBAL_STATE_RESETTING:
+ return "resetting";
+ case SLPC_GLOBAL_STATE_RUNNING:
+ return "running";
+ case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
+ return "shutting down";
+ case SLPC_GLOBAL_STATE_ERROR:
+ return "error";
+ default:
+ return "unknown";
+ }
+}
+
+/**
+ * xe_guc_pc_print - Print GuC's Power Conservation information for debug
+ * @pc: Xe_GuC_PC instance
+ * @p: drm_printer
+ */
+void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p)
+{
+ drm_printf(p, "SLPC Shared Data Header:\n");
+ drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size));
+ drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc));
+
+ if (pc_action_query_task_state(pc))
+ return;
+
+ drm_printf(p, "\nSLPC Tasks Status:\n");
+ drm_printf(p, "\tGTPERF enabled: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_GTPERF_TASK_ENABLED));
+ drm_printf(p, "\tDCC enabled: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_DCC_TASK_ENABLED));
+ drm_printf(p, "\tDCC in use: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_IN_DCC));
+ drm_printf(p, "\tBalancer enabled: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_BALANCER_ENABLED));
+ drm_printf(p, "\tIBC enabled: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_IBC_TASK_ENABLED));
+ drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_BALANCER_IA_LMT_ENABLED));
+ drm_printf(p, "\tBalancer IA LMT active: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_BALANCER_IA_LMT_ACTIVE));
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index 619f59cd633c..39102b79602f 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -10,6 +10,7 @@
struct xe_guc_pc;
enum slpc_gucrc_mode;
+struct drm_printer;
int xe_guc_pc_init(struct xe_guc_pc *pc);
int xe_guc_pc_start(struct xe_guc_pc *pc);
@@ -17,6 +18,7 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc);
int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc);
int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode);
int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc);
+void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p);
u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq);
diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c
index 8f62de026724..e5dc94f3e618 100644
--- a/drivers/gpu/drm/xe/xe_guc_relay.c
+++ b/drivers/gpu/drm/xe/xe_guc_relay.c
@@ -225,7 +225,7 @@ __relay_get_transaction(struct xe_guc_relay *relay, bool incoming, u32 remote, u
* with CTB lock held which is marked as used in the reclaim path.
* Btw, that's one of the reason why we use mempool here!
*/
- txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_KERNEL);
+ txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_NOWAIT);
if (!txn)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index b6a2dd742ebd..31bc2022bfc2 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -15,6 +15,7 @@
#include <drm/drm_managed.h>
#include "abi/guc_actions_abi.h"
+#include "abi/guc_actions_slpc_abi.h"
#include "abi/guc_klvs_abi.h"
#include "regs/xe_lrc_layout.h"
#include "xe_assert.h"
@@ -400,6 +401,7 @@ static void __guc_exec_queue_policy_add_##func(struct exec_queue_policy *policy,
MAKE_EXEC_QUEUE_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
MAKE_EXEC_QUEUE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
+MAKE_EXEC_QUEUE_POLICY_ADD(slpc_exec_queue_freq_req, SLPM_GT_FREQUENCY)
#undef MAKE_EXEC_QUEUE_POLICY_ADD
static const int xe_exec_queue_prio_to_guc[] = {
@@ -414,14 +416,20 @@ static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
struct exec_queue_policy policy;
enum xe_exec_queue_priority prio = q->sched_props.priority;
u32 timeslice_us = q->sched_props.timeslice_us;
+ u32 slpc_exec_queue_freq_req = 0;
u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
+ if (q->flags & EXEC_QUEUE_FLAG_LOW_LATENCY)
+ slpc_exec_queue_freq_req |= SLPC_CTX_FREQ_REQ_IS_COMPUTE;
+
__guc_exec_queue_policy_start_klv(&policy, q->guc->id);
__guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
__guc_exec_queue_policy_add_execution_quantum(&policy, timeslice_us);
__guc_exec_queue_policy_add_preemption_timeout(&policy, preempt_timeout_us);
+ __guc_exec_queue_policy_add_slpc_exec_queue_freq_req(&policy,
+ slpc_exec_queue_freq_req);
xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
__guc_exec_queue_policy_action_size(&policy), 0, 0);
@@ -1246,11 +1254,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
xe_pm_runtime_get(guc_to_xe(guc));
trace_xe_exec_queue_destroy(q);
+ release_guc_id(guc, q);
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
- release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index 83a41ebcdc91..63bac64429a5 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -11,7 +11,9 @@
#include "regs/xe_reg_defs.h"
#include "xe_guc_ads_types.h"
+#include "xe_guc_buf_types.h"
#include "xe_guc_ct_types.h"
+#include "xe_guc_engine_activity_types.h"
#include "xe_guc_fwif.h"
#include "xe_guc_log_types.h"
#include "xe_guc_pc_types.h"
@@ -58,6 +60,8 @@ struct xe_guc {
struct xe_guc_ads ads;
/** @ct: GuC ct */
struct xe_guc_ct ct;
+ /** @buf: GuC Buffer Cache manager */
+ struct xe_guc_buf_cache buf;
/** @capture: the error-state-capture module's data and objects */
struct xe_guc_state_capture *capture;
/** @pc: GuC Power Conservation */
@@ -100,6 +104,9 @@ struct xe_guc {
/** @relay: GuC Relay Communication used in SR-IOV */
struct xe_guc_relay relay;
+ /** @engine_activity: Device specific engine activity */
+ struct xe_guc_engine_activity engine_activity;
+
/**
* @notify_reg: Register which is written to notify GuC of H2G messages
*/
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index d765bfd3636b..27d11e06a82b 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -12,6 +12,7 @@
#include "xe_drv.h"
#include "xe_heci_gsc.h"
#include "xe_platform_types.h"
+#include "xe_survivability_mode.h"
#define GSC_BAR_LENGTH 0x00000FFC
@@ -88,12 +89,9 @@ static void heci_gsc_release_dev(struct device *dev)
kfree(adev);
}
-void xe_heci_gsc_fini(struct xe_device *xe)
+static void xe_heci_gsc_fini(void *arg)
{
- struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
-
- if (!xe->info.has_heci_gscfi && !xe->info.has_heci_cscfi)
- return;
+ struct xe_heci_gsc *heci_gsc = arg;
if (heci_gsc->adev) {
struct auxiliary_device *aux_dev = &heci_gsc->adev->aux_dev;
@@ -105,6 +103,7 @@ void xe_heci_gsc_fini(struct xe_device *xe)
if (heci_gsc->irq >= 0)
irq_free_desc(heci_gsc->irq);
+
heci_gsc->irq = -1;
}
@@ -171,14 +170,14 @@ static int heci_gsc_add_device(struct xe_device *xe, const struct heci_gsc_def *
return ret;
}
-void xe_heci_gsc_init(struct xe_device *xe)
+int xe_heci_gsc_init(struct xe_device *xe)
{
struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
- const struct heci_gsc_def *def;
+ const struct heci_gsc_def *def = NULL;
int ret;
if (!xe->info.has_heci_gscfi && !xe->info.has_heci_cscfi)
- return;
+ return 0;
heci_gsc->irq = -1;
@@ -190,29 +189,24 @@ void xe_heci_gsc_init(struct xe_device *xe)
def = &heci_gsc_def_dg2;
} else if (xe->info.platform == XE_DG1) {
def = &heci_gsc_def_dg1;
- } else {
- drm_warn_once(&xe->drm, "Unknown platform\n");
- return;
}
- if (!def->name) {
- drm_warn_once(&xe->drm, "HECI is not implemented!\n");
- return;
+ if (!def || !def->name) {
+ drm_warn(&xe->drm, "HECI is not implemented!\n");
+ return 0;
}
- if (!def->use_polling) {
+ ret = devm_add_action_or_reset(xe->drm.dev, xe_heci_gsc_fini, heci_gsc);
+ if (ret)
+ return ret;
+
+ if (!def->use_polling && !xe_survivability_mode_is_enabled(xe)) {
ret = heci_gsc_irq_setup(xe);
if (ret)
- goto fail;
+ return ret;
}
- ret = heci_gsc_add_device(xe, def);
- if (ret)
- goto fail;
-
- return;
-fail:
- xe_heci_gsc_fini(xe);
+ return heci_gsc_add_device(xe, def);
}
void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.h b/drivers/gpu/drm/xe/xe_heci_gsc.h
index 48b3b1838045..745eb6783942 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.h
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.h
@@ -33,8 +33,7 @@ struct xe_heci_gsc {
int irq;
};
-void xe_heci_gsc_init(struct xe_device *xe);
-void xe_heci_gsc_fini(struct xe_device *xe);
+int xe_heci_gsc_init(struct xe_device *xe);
void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir);
void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir);
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
index 392102515f3d..c3cc0fa105e8 100644
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -138,13 +138,17 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
i += size;
if (unlikely(j == st->nents - 1)) {
+ xe_assert(xe, i >= npages);
if (i > npages)
size -= (i - npages);
+
sg_mark_end(sgl);
+ } else {
+ xe_assert(xe, i < npages);
}
+
sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
}
- xe_assert(xe, i == npages);
return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index fc447751fe78..223b95de388c 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -400,10 +400,9 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
},
- {}
};
- xe_rtp_process_to_sr(&ctx, lrc_setup, &hwe->reg_lrc);
+ xe_rtp_process_to_sr(&ctx, lrc_setup, ARRAY_SIZE(lrc_setup), &hwe->reg_lrc);
}
static void
@@ -459,10 +458,9 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
- {}
};
- xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
+ xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr);
}
static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance)
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index 82750520a90a..2d68c5b5262a 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -178,6 +178,7 @@ err_suspend:
up_write(&group->mode_sem);
return err;
}
+ALLOW_ERROR_INJECTION(xe_hw_engine_group_add_exec_queue, ERRNO);
/**
* xe_hw_engine_group_del_exec_queue() - Delete an exec queue from a hw engine group
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index fde56dad3ab7..48d80ffdf7bb 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -6,6 +6,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
#include <linux/types.h>
+#include <linux/units.h>
#include <drm/drm_managed.h>
#include "regs/xe_gt_regs.h"
@@ -20,6 +21,7 @@
#include "xe_pm.h"
enum xe_hwmon_reg {
+ REG_TEMP,
REG_PKG_RAPL_LIMIT,
REG_PKG_POWER_SKU,
REG_PKG_POWER_SKU_UNIT,
@@ -36,6 +38,7 @@ enum xe_hwmon_reg_operation {
enum xe_hwmon_channel {
CHANNEL_CARD,
CHANNEL_PKG,
+ CHANNEL_VRAM,
CHANNEL_MAX,
};
@@ -84,6 +87,19 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
struct xe_device *xe = hwmon->xe;
switch (hwmon_reg) {
+ case REG_TEMP:
+ if (xe->info.platform == XE_BATTLEMAGE) {
+ if (channel == CHANNEL_PKG)
+ return BMG_PACKAGE_TEMPERATURE;
+ else if (channel == CHANNEL_VRAM)
+ return BMG_VRAM_TEMPERATURE;
+ } else if (xe->info.platform == XE_DG2) {
+ if (channel == CHANNEL_PKG)
+ return PCU_CR_PACKAGE_TEMPERATURE;
+ else if (channel == CHANNEL_VRAM)
+ return BMG_VRAM_TEMPERATURE;
+ }
+ break;
case REG_PKG_RAPL_LIMIT:
if (xe->info.platform == XE_BATTLEMAGE) {
if (channel == CHANNEL_PKG)
@@ -431,6 +447,8 @@ static const struct attribute_group *hwmon_groups[] = {
};
static const struct hwmon_channel_info * const hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL,
HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT | HWMON_P_LABEL),
HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
@@ -507,6 +525,36 @@ static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *valu
}
static umode_t
+xe_hwmon_temp_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_TEMP, channel)) ? 0444 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int
+xe_hwmon_temp_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
+ u64 reg_val;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_TEMP, channel));
+
+ /* HW register value is in degrees Celsius, convert to millidegrees. */
+ *val = REG_FIELD_GET(TEMP_MASK, reg_val) * MILLIDEGREE_PER_DEGREE;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t
xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
u32 uval;
@@ -667,6 +715,9 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
xe_pm_runtime_get(hwmon->xe);
switch (type) {
+ case hwmon_temp:
+ ret = xe_hwmon_temp_is_visible(hwmon, attr, channel);
+ break;
case hwmon_power:
ret = xe_hwmon_power_is_visible(hwmon, attr, channel);
break;
@@ -699,6 +750,9 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
xe_pm_runtime_get(hwmon->xe);
switch (type) {
+ case hwmon_temp:
+ ret = xe_hwmon_temp_read(hwmon, attr, channel, val);
+ break;
case hwmon_power:
ret = xe_hwmon_power_read(hwmon, attr, channel, val);
break;
@@ -752,6 +806,12 @@ static int xe_hwmon_read_label(struct device *dev,
u32 attr, int channel, const char **str)
{
switch (type) {
+ case hwmon_temp:
+ if (channel == CHANNEL_PKG)
+ *str = "pkg";
+ else if (channel == CHANNEL_VRAM)
+ *str = "vram";
+ return 0;
case hwmon_power:
case hwmon_energy:
case hwmon_curr:
@@ -779,10 +839,9 @@ static const struct hwmon_chip_info hwmon_chip_info = {
};
static void
-xe_hwmon_get_preregistration_info(struct xe_device *xe)
+xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
{
- struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- struct xe_hwmon *hwmon = xe->hwmon;
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
long energy;
u64 val_sku_unit = 0;
int channel;
@@ -816,33 +875,34 @@ static void xe_hwmon_mutex_destroy(void *arg)
mutex_destroy(&hwmon->hwmon_lock);
}
-void xe_hwmon_register(struct xe_device *xe)
+int xe_hwmon_register(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
struct xe_hwmon *hwmon;
+ int ret;
/* hwmon is available only for dGfx */
if (!IS_DGFX(xe))
- return;
+ return 0;
/* hwmon is not available on VFs */
if (IS_SRIOV_VF(xe))
- return;
+ return 0;
hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
- return;
-
- xe->hwmon = hwmon;
+ return -ENOMEM;
mutex_init(&hwmon->hwmon_lock);
- if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon))
- return;
+ ret = devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon);
+ if (ret)
+ return ret;
/* There's only one instance of hwmon per device */
hwmon->xe = xe;
+ xe->hwmon = hwmon;
- xe_hwmon_get_preregistration_info(xe);
+ xe_hwmon_get_preregistration_info(hwmon);
drm_dbg(&xe->drm, "Register xe hwmon interface\n");
@@ -850,11 +910,12 @@ void xe_hwmon_register(struct xe_device *xe)
hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon,
&hwmon_chip_info,
hwmon_groups);
-
if (IS_ERR(hwmon->hwmon_dev)) {
- drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
+ drm_err(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
xe->hwmon = NULL;
- return;
+ return PTR_ERR(hwmon->hwmon_dev);
}
+
+ return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_hwmon.h b/drivers/gpu/drm/xe/xe_hwmon.h
index c42a1de2cd7a..d02c1bfe8c0a 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.h
+++ b/drivers/gpu/drm/xe/xe_hwmon.h
@@ -11,9 +11,9 @@
struct xe_device;
#if IS_REACHABLE(CONFIG_HWMON)
-void xe_hwmon_register(struct xe_device *xe);
+int xe_hwmon_register(struct xe_device *xe);
#else
-static inline void xe_hwmon_register(struct xe_device *xe) { };
+static inline int xe_hwmon_register(struct xe_device *xe) { return 0; };
#endif
#endif /* _XE_HWMON_H_ */
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 08552ee3fb94..5362d3174b06 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -20,6 +20,7 @@
#include "xe_hw_engine.h"
#include "xe_memirq.h"
#include "xe_mmio.h"
+#include "xe_pxp.h"
#include "xe_sriov.h"
/*
@@ -208,6 +209,15 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
}
if (heci_mask)
xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
+
+ if (xe_pxp_is_supported(xe)) {
+ u32 kcr_mask = KCR_PXP_STATE_TERMINATED_INTERRUPT |
+ KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT |
+ KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT;
+
+ xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, kcr_mask << 16);
+ xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~(kcr_mask << 16));
+ }
}
}
@@ -330,9 +340,15 @@ static void gt_irq_handler(struct xe_tile *tile,
}
if (class == XE_ENGINE_CLASS_OTHER) {
- /* HECI GSCFI interrupts come from outside of GT */
+ /*
+ * HECI GSCFI interrupts come from outside of GT.
+ * KCR irqs come from inside GT but are handled
+ * by the global PXP subsystem.
+ */
if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE)
xe_heci_gsc_irq_handler(xe, intr_vec);
+ else if (instance == OTHER_KCR_INSTANCE)
+ xe_pxp_irq_handler(xe, intr_vec);
else
gt_other_irq_handler(engine_gt, instance, intr_vec);
}
@@ -510,6 +526,8 @@ static void gt_irq_reset(struct xe_tile *tile)
xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
+ xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, 0);
+ xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~0);
}
xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index a60ceae4c6dd..89393dcb53d9 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -164,7 +164,7 @@ int xe_lmtt_init(struct xe_lmtt *lmtt)
lmtt_assert(lmtt, IS_SRIOV_PF(xe));
lmtt_assert(lmtt, !lmtt->ops);
- if (!IS_DGFX(xe))
+ if (!xe_device_has_lmtt(xe))
return 0;
if (xe_has_multi_level_lmtt(xe))
@@ -486,7 +486,7 @@ u64 xe_lmtt_estimate_pt_size(struct xe_lmtt *lmtt, u64 size)
u64 pt_size;
lmtt_assert(lmtt, IS_SRIOV_PF(lmtt_to_xe(lmtt)));
- lmtt_assert(lmtt, IS_DGFX(lmtt_to_xe(lmtt)));
+ lmtt_assert(lmtt, xe_device_has_lmtt(lmtt_to_xe(lmtt)));
lmtt_assert(lmtt, lmtt->ops);
pt_size = PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index bbb9ffbf6367..df3ceddede07 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -883,7 +883,8 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
- struct xe_vm *vm, u32 ring_size, u16 msix_vec)
+ struct xe_vm *vm, u32 ring_size, u16 msix_vec,
+ u32 init_flags)
{
struct xe_gt *gt = hwe->gt;
struct xe_tile *tile = gt_to_tile(gt);
@@ -979,6 +980,16 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
}
+ if (init_flags & XE_LRC_CREATE_RUNALONE)
+ xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
+ xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
+ _MASKED_BIT_ENABLE(CTX_CTRL_RUN_ALONE));
+
+ if (init_flags & XE_LRC_CREATE_PXP)
+ xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
+ xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
+ _MASKED_BIT_ENABLE(CTX_CTRL_PXP_ENABLE));
+
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0);
if (xe->info.has_asid && vm)
@@ -1021,6 +1032,7 @@ err_lrc_finish:
* @vm: The VM (address space)
* @ring_size: LRC ring size
* @msix_vec: MSI-X interrupt vector (for platforms that support it)
+ * @flags: LRC initialization flags
*
* Allocate and initialize the Logical Ring Context (LRC).
*
@@ -1028,7 +1040,7 @@ err_lrc_finish:
* upon failure.
*/
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size, u16 msix_vec)
+ u32 ring_size, u16 msix_vec, u32 flags)
{
struct xe_lrc *lrc;
int err;
@@ -1037,7 +1049,7 @@ struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
if (!lrc)
return ERR_PTR(-ENOMEM);
- err = xe_lrc_init(lrc, hwe, vm, ring_size, msix_vec);
+ err = xe_lrc_init(lrc, hwe, vm, ring_size, msix_vec, flags);
if (err) {
kfree(lrc);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index 4206e6a8b50a..0b40f349ab95 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -39,10 +39,13 @@ struct xe_lrc_snapshot {
u32 ctx_job_timestamp;
};
-#define LRC_PPHWSP_SCRATCH_ADDR (0x34 * 4)
+#define LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR (0x34 * 4)
+#define LRC_PPHWSP_PXP_INVAL_SCRATCH_ADDR (0x40 * 4)
+#define XE_LRC_CREATE_RUNALONE 0x1
+#define XE_LRC_CREATE_PXP 0x2
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size, u16 msix_vec);
+ u32 ring_size, u16 msix_vec, u32 flags);
void xe_lrc_destroy(struct kref *ref);
/**
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 278bc96cf593..df4282c71bf0 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -1544,6 +1544,181 @@ void xe_migrate_wait(struct xe_migrate *m)
dma_fence_wait(m->fence, false);
}
+static u32 pte_update_cmd_size(u64 size)
+{
+ u32 num_dword;
+ u64 entries = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+
+ XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
+ /*
+ * MI_STORE_DATA_IMM command is used to update page table. Each
+ * instruction can update maximumly 0x1ff pte entries. To update
+ * n (n <= 0x1ff) pte entries, we need:
+ * 1 dword for the MI_STORE_DATA_IMM command header (opcode etc)
+ * 2 dword for the page table's physical location
+ * 2*n dword for value of pte to fill (each pte entry is 2 dwords)
+ */
+ num_dword = (1 + 2) * DIV_ROUND_UP(entries, 0x1ff);
+ num_dword += entries * 2;
+
+ return num_dword;
+}
+
+static void build_pt_update_batch_sram(struct xe_migrate *m,
+ struct xe_bb *bb, u32 pt_offset,
+ dma_addr_t *sram_addr, u32 size)
+{
+ u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
+ u32 ptes;
+ int i = 0;
+
+ ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+ while (ptes) {
+ u32 chunk = min(0x1ffU, ptes);
+
+ bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
+ bb->cs[bb->len++] = pt_offset;
+ bb->cs[bb->len++] = 0;
+
+ pt_offset += chunk * 8;
+ ptes -= chunk;
+
+ while (chunk--) {
+ u64 addr = sram_addr[i++] & PAGE_MASK;
+
+ xe_tile_assert(m->tile, addr);
+ addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
+ addr, pat_index,
+ 0, false, 0);
+ bb->cs[bb->len++] = lower_32_bits(addr);
+ bb->cs[bb->len++] = upper_32_bits(addr);
+ }
+ }
+}
+
+enum xe_migrate_copy_dir {
+ XE_MIGRATE_COPY_TO_VRAM,
+ XE_MIGRATE_COPY_TO_SRAM,
+};
+
+static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
+ unsigned long npages,
+ dma_addr_t *sram_addr, u64 vram_addr,
+ const enum xe_migrate_copy_dir dir)
+{
+ struct xe_gt *gt = m->tile->primary_gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct dma_fence *fence = NULL;
+ u32 batch_size = 2;
+ u64 src_L0_ofs, dst_L0_ofs;
+ u64 round_update_size;
+ struct xe_sched_job *job;
+ struct xe_bb *bb;
+ u32 update_idx, pt_slot = 0;
+ int err;
+
+ if (npages * PAGE_SIZE > MAX_PREEMPTDISABLE_TRANSFER)
+ return ERR_PTR(-EINVAL);
+
+ round_update_size = npages * PAGE_SIZE;
+ batch_size += pte_update_cmd_size(round_update_size);
+ batch_size += EMIT_COPY_DW;
+
+ bb = xe_bb_new(gt, batch_size, true);
+ if (IS_ERR(bb)) {
+ err = PTR_ERR(bb);
+ return ERR_PTR(err);
+ }
+
+ build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
+ sram_addr, round_update_size);
+
+ if (dir == XE_MIGRATE_COPY_TO_VRAM) {
+ src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
+ dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
+
+ } else {
+ src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
+ dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
+ }
+
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ update_idx = bb->len;
+
+ emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, round_update_size,
+ XE_PAGE_SIZE);
+
+ job = xe_bb_create_migration_job(m->q, bb,
+ xe_migrate_batch_base(m, true),
+ update_idx);
+ if (IS_ERR(job)) {
+ err = PTR_ERR(job);
+ goto err;
+ }
+
+ xe_sched_job_add_migrate_flush(job, 0);
+
+ mutex_lock(&m->job_mutex);
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ dma_fence_put(m->fence);
+ m->fence = dma_fence_get(fence);
+ mutex_unlock(&m->job_mutex);
+
+ xe_bb_free(bb, fence);
+
+ return fence;
+
+err:
+ xe_bb_free(bb, NULL);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_migrate_to_vram() - Migrate to VRAM
+ * @m: The migration context.
+ * @npages: Number of pages to migrate.
+ * @src_addr: Array of dma addresses (source of migrate)
+ * @dst_addr: Device physical address of VRAM (destination of migrate)
+ *
+ * Copy from an array dma addresses to a VRAM device physical address
+ *
+ * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
+ * failure
+ */
+struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
+ unsigned long npages,
+ dma_addr_t *src_addr,
+ u64 dst_addr)
+{
+ return xe_migrate_vram(m, npages, src_addr, dst_addr,
+ XE_MIGRATE_COPY_TO_VRAM);
+}
+
+/**
+ * xe_migrate_from_vram() - Migrate from VRAM
+ * @m: The migration context.
+ * @npages: Number of pages to migrate.
+ * @src_addr: Device physical address of VRAM (source of migrate)
+ * @dst_addr: Array of dma addresses (destination of migrate)
+ *
+ * Copy from a VRAM device physical address to an array dma addresses
+ *
+ * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
+ * failure
+ */
+struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
+ unsigned long npages,
+ u64 src_addr,
+ dma_addr_t *dst_addr)
+{
+ return xe_migrate_vram(m, npages, dst_addr, src_addr,
+ XE_MIGRATE_COPY_TO_SRAM);
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_migrate.c"
#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 0109866e398a..6ff9a963425c 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -95,6 +95,16 @@ struct xe_migrate_pt_update {
struct xe_migrate *xe_migrate_init(struct xe_tile *tile);
+struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
+ unsigned long npages,
+ dma_addr_t *src_addr,
+ u64 dst_addr);
+
+struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
+ unsigned long npages,
+ u64 src_addr,
+ dma_addr_t *dst_addr);
+
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo,
struct xe_bo *dst_bo,
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index a48f239cad1c..70a36e777546 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -55,12 +55,11 @@ static void tiles_fini(void *arg)
static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
{
struct xe_tile *tile;
- void __iomem *regs;
u8 id;
/*
* Nothing to be done as tile 0 has already been setup earlier with the
- * entire BAR mapped - see xe_mmio_init()
+ * entire BAR mapped - see xe_mmio_probe_early()
*/
if (xe->info.tile_count == 1)
return;
@@ -74,7 +73,7 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
/*
* Although the per-tile mmio regs are not yet initialized, this
* is fine as it's going to the root tile's mmio, that's
- * guaranteed to be initialized earlier in xe_mmio_init()
+ * guaranteed to be initialized earlier in xe_mmio_probe_early()
*/
mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR);
tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
@@ -94,59 +93,15 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
}
}
- regs = xe->mmio.regs;
- for_each_tile(tile, xe, id) {
- tile->mmio.regs_size = SZ_4M;
- tile->mmio.regs = regs;
- tile->mmio.tile = tile;
- regs += tile_mmio_size;
- }
-}
-
-/*
- * On top of all the multi-tile MMIO space there can be a platform-dependent
- * extension for each tile, resulting in a layout like below:
- *
- * .----------------------. <- ext_base + tile_count * tile_mmio_ext_size
- * | .... |
- * |----------------------| <- ext_base + 2 * tile_mmio_ext_size
- * | tile1->mmio_ext.regs |
- * |----------------------| <- ext_base + 1 * tile_mmio_ext_size
- * | tile0->mmio_ext.regs |
- * |======================| <- ext_base = tile_count * tile_mmio_size
- * | |
- * | mmio.regs |
- * | |
- * '----------------------' <- 0MB
- *
- * Set up the tile[]->mmio_ext pointers/sizes.
- */
-static void mmio_extension_setup(struct xe_device *xe, size_t tile_mmio_size,
- size_t tile_mmio_ext_size)
-{
- struct xe_tile *tile;
- void __iomem *regs;
- u8 id;
-
- if (!xe->info.has_mmio_ext)
- return;
-
- regs = xe->mmio.regs + tile_mmio_size * xe->info.tile_count;
- for_each_tile(tile, xe, id) {
- tile->mmio_ext.regs_size = tile_mmio_ext_size;
- tile->mmio_ext.regs = regs;
- tile->mmio_ext.tile = tile;
- regs += tile_mmio_ext_size;
- }
+ for_each_remote_tile(tile, xe, id)
+ xe_mmio_init(&tile->mmio, tile, xe->mmio.regs + id * tile_mmio_size, SZ_4M);
}
int xe_mmio_probe_tiles(struct xe_device *xe)
{
size_t tile_mmio_size = SZ_16M;
- size_t tile_mmio_ext_size = xe->info.tile_mmio_ext_size;
mmio_multi_tile_setup(xe, tile_mmio_size);
- mmio_extension_setup(xe, tile_mmio_size, tile_mmio_ext_size);
return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe);
}
@@ -161,7 +116,7 @@ static void mmio_fini(void *arg)
root_tile->mmio.regs = NULL;
}
-int xe_mmio_init(struct xe_device *xe)
+int xe_mmio_probe_early(struct xe_device *xe)
{
struct xe_tile *root_tile = xe_device_get_root_tile(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -179,13 +134,29 @@ int xe_mmio_init(struct xe_device *xe)
}
/* Setup first tile; other tiles (if present) will be setup later. */
- root_tile->mmio.regs_size = SZ_4M;
- root_tile->mmio.regs = xe->mmio.regs;
- root_tile->mmio.tile = root_tile;
+ xe_mmio_init(&root_tile->mmio, root_tile, xe->mmio.regs, SZ_4M);
return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
}
+/**
+ * xe_mmio_init() - Initialize an MMIO instance
+ * @mmio: Pointer to the MMIO instance to initialize
+ * @tile: The tile to which the MMIO region belongs
+ * @ptr: Pointer to the start of the MMIO region
+ * @size: The size of the MMIO region in bytes
+ *
+ * This is a convenience function for minimal initialization of struct xe_mmio.
+ */
+void xe_mmio_init(struct xe_mmio *mmio, struct xe_tile *tile, void __iomem *ptr, u32 size)
+{
+ xe_tile_assert(tile, size <= XE_REG_ADDR_MAX);
+
+ mmio->regs = ptr;
+ mmio->regs_size = size;
+ mmio->tile = tile;
+}
+
static void mmio_flush_pending_writes(struct xe_mmio *mmio)
{
#define DUMMY_REG_OFFSET 0x130030
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index 8a46f4006a84..c151ba569003 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -11,9 +11,11 @@
struct xe_device;
struct xe_reg;
-int xe_mmio_init(struct xe_device *xe);
+int xe_mmio_probe_early(struct xe_device *xe);
int xe_mmio_probe_tiles(struct xe_device *xe);
+void xe_mmio_init(struct xe_mmio *mmio, struct xe_tile *tile, void __iomem *ptr, u32 size);
+
u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val);
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index 54d199b5cfb2..31dade91a089 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -781,7 +781,9 @@ void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
flags = get_mocs_settings(xe, &table);
xe_pm_runtime_get_noresume(xe);
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ fw_ref = xe_force_wake_get(gt_to_fw(gt),
+ flags & HAS_LNCF_MOCS ?
+ XE_FORCEWAKE_ALL : XE_FW_GT);
if (!fw_ref)
goto err_fw;
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index 07b27114be9a..9f4632e39a1a 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -22,9 +22,16 @@ struct xe_modparam xe_modparam = {
.guc_log_level = 3,
.force_probe = CONFIG_DRM_XE_FORCE_PROBE,
.wedged_mode = 1,
+ .svm_notifier_size = 512,
/* the rest are 0 by default */
};
+module_param_named(svm_notifier_size, xe_modparam.svm_notifier_size, uint, 0600);
+MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size(in MiB), must be power of 2");
+
+module_param_named(always_migrate_to_vram, xe_modparam.always_migrate_to_vram, bool, 0444);
+MODULE_PARM_DESC(always_migrate_to_vram, "Always migrate to VRAM on GPU fault");
+
module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444);
MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
@@ -77,10 +84,6 @@ struct init_funcs {
void (*exit)(void);
};
-static void xe_dummy_exit(void)
-{
-}
-
static const struct init_funcs init_funcs[] = {
{
.init = xe_check_nomodeset,
@@ -103,7 +106,6 @@ static const struct init_funcs init_funcs[] = {
},
{
.init = xe_pm_module_init,
- .exit = xe_dummy_exit,
},
};
diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
index 161a5e6f717f..84339e509c80 100644
--- a/drivers/gpu/drm/xe/xe_module.h
+++ b/drivers/gpu/drm/xe/xe_module.h
@@ -12,6 +12,7 @@
struct xe_modparam {
bool force_execlist;
bool probe_display;
+ bool always_migrate_to_vram;
u32 force_vram_bar_size;
int guc_log_level;
char *guc_firmware_path;
@@ -22,6 +23,7 @@ struct xe_modparam {
unsigned int max_vfs;
#endif
int wedged_mode;
+ u32 svm_notifier_size;
};
extern struct xe_modparam xe_modparam;
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index eb6cd91e1e22..7ffc98f67e69 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -12,6 +12,8 @@
#include <drm/drm_managed.h>
#include <uapi/drm/xe_drm.h>
+#include <generated/xe_wa_oob.h>
+
#include "abi/guc_actions_slpc_abi.h"
#include "instructions/xe_mi_commands.h"
#include "regs/xe_engine_regs.h"
@@ -35,6 +37,7 @@
#include "xe_sched_job.h"
#include "xe_sriov.h"
#include "xe_sync.h"
+#include "xe_wa.h"
#define DEFAULT_POLL_FREQUENCY_HZ 200
#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
@@ -548,6 +551,7 @@ static ssize_t xe_oa_read(struct file *file, char __user *buf,
mutex_unlock(&stream->stream_lock);
} while (!offset && !ret);
} else {
+ xe_oa_buffer_check_unlocked(stream);
mutex_lock(&stream->stream_lock);
ret = __xe_oa_read(stream, buf, count, &offset);
mutex_unlock(&stream->stream_lock);
@@ -811,11 +815,8 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
struct xe_mmio *mmio = &stream->gt->mmio;
u32 sqcnt1;
- /*
- * Wa_1508761755:xehpsdv, dg2
- * Enable thread stall DOP gating and EU DOP gating.
- */
- if (stream->oa->xe->info.platform == XE_DG2) {
+ /* Enable thread stall DOP gating and EU DOP gating. */
+ if (XE_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
@@ -1064,11 +1065,10 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
int ret;
/*
- * Wa_1508761755:xehpsdv, dg2
* EU NOA signals behave incorrectly if EU clock gating is enabled.
* Disable thread stall DOP gating and EU DOP gating.
*/
- if (stream->oa->xe->info.platform == XE_DG2) {
+ if (XE_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
@@ -1719,12 +1719,10 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
}
/*
- * Wa_1509372804:pvc
- *
* GuC reset of engines causes OA to lose configuration
* state. Prevent this by overriding GUCRC mode.
*/
- if (stream->oa->xe->info.platform == XE_PVC) {
+ if (XE_WA(stream->gt, 1509372804)) {
ret = xe_guc_pc_override_gucrc_mode(&gt->uc.guc.pc,
SLPC_GUCRC_MODE_GUCRC_NO_RC6);
if (ret)
@@ -1766,8 +1764,8 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
WRITE_ONCE(u->exclusive_stream, stream);
- hrtimer_init(&stream->poll_check_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- stream->poll_check_timer.function = xe_oa_poll_check_timer_cb;
+ hrtimer_setup(&stream->poll_check_timer, xe_oa_poll_check_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
init_waitqueue_head(&stream->poll_wq);
spin_lock_init(&stream->oa_buffer.ptr_lock);
@@ -1856,23 +1854,14 @@ u32 xe_oa_timestamp_frequency(struct xe_gt *gt)
{
u32 reg, shift;
- /*
- * Wa_18013179988:dg2
- * Wa_14015568240:pvc
- * Wa_14015846243:mtl
- */
- switch (gt_to_xe(gt)->info.platform) {
- case XE_DG2:
- case XE_PVC:
- case XE_METEORLAKE:
+ if (XE_WA(gt, 18013179988) || XE_WA(gt, 14015568240)) {
xe_pm_runtime_get(gt_to_xe(gt));
reg = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
xe_pm_runtime_put(gt_to_xe(gt));
shift = REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
return gt->info.reference_clock << (3 - shift);
-
- default:
+ } else {
return gt->info.reference_clock;
}
}
@@ -2424,36 +2413,36 @@ err_unlock:
return ret;
}
+static void xe_oa_unregister(void *arg)
+{
+ struct xe_oa *oa = arg;
+
+ if (!oa->metrics_kobj)
+ return;
+
+ kobject_put(oa->metrics_kobj);
+ oa->metrics_kobj = NULL;
+}
+
/**
* xe_oa_register - Xe OA registration
* @xe: @xe_device
*
* Exposes the metrics sysfs directory upon completion of module initialization
*/
-void xe_oa_register(struct xe_device *xe)
+int xe_oa_register(struct xe_device *xe)
{
struct xe_oa *oa = &xe->oa;
if (!oa->xe)
- return;
+ return 0;
oa->metrics_kobj = kobject_create_and_add("metrics",
&xe->drm.primary->kdev->kobj);
-}
-
-/**
- * xe_oa_unregister - Xe OA de-registration
- * @xe: @xe_device
- */
-void xe_oa_unregister(struct xe_device *xe)
-{
- struct xe_oa *oa = &xe->oa;
-
if (!oa->metrics_kobj)
- return;
+ return -ENOMEM;
- kobject_put(oa->metrics_kobj);
- oa->metrics_kobj = NULL;
+ return devm_add_action_or_reset(xe->drm.dev, xe_oa_unregister, oa);
}
static u32 num_oa_units_per_gt(struct xe_gt *gt)
@@ -2642,6 +2631,27 @@ static void xe_oa_init_supported_formats(struct xe_oa *oa)
}
}
+static int destroy_config(int id, void *p, void *data)
+{
+ xe_oa_config_put(p);
+
+ return 0;
+}
+
+static void xe_oa_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_oa *oa = &xe->oa;
+
+ if (!oa->xe)
+ return;
+
+ idr_for_each(&oa->metrics_idr, destroy_config, oa);
+ idr_destroy(&oa->metrics_idr);
+
+ oa->xe = NULL;
+}
+
/**
* xe_oa_init - OA initialization during device probe
* @xe: @xe_device
@@ -2673,31 +2683,10 @@ int xe_oa_init(struct xe_device *xe)
}
xe_oa_init_supported_formats(oa);
- return 0;
-exit:
- oa->xe = NULL;
- return ret;
-}
-static int destroy_config(int id, void *p, void *data)
-{
- xe_oa_config_put(p);
- return 0;
-}
-
-/**
- * xe_oa_fini - OA de-initialization during device remove
- * @xe: @xe_device
- */
-void xe_oa_fini(struct xe_device *xe)
-{
- struct xe_oa *oa = &xe->oa;
-
- if (!oa->xe)
- return;
-
- idr_for_each(&oa->metrics_idr, destroy_config, oa);
- idr_destroy(&oa->metrics_idr);
+ return devm_add_action_or_reset(xe->drm.dev, xe_oa_fini, xe);
+exit:
oa->xe = NULL;
+ return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_oa.h b/drivers/gpu/drm/xe/xe_oa.h
index 87a38820c317..e510826f9efc 100644
--- a/drivers/gpu/drm/xe/xe_oa.h
+++ b/drivers/gpu/drm/xe/xe_oa.h
@@ -15,9 +15,7 @@ struct xe_gt;
struct xe_hw_engine;
int xe_oa_init(struct xe_device *xe);
-void xe_oa_fini(struct xe_device *xe);
-void xe_oa_register(struct xe_device *xe);
-void xe_oa_unregister(struct xe_device *xe);
+int xe_oa_register(struct xe_device *xe);
int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *file);
int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *file);
int xe_oa_remove_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *file);
diff --git a/drivers/gpu/drm/xe/xe_observation.c b/drivers/gpu/drm/xe/xe_observation.c
index 57cf01efc07f..e3f9b546207e 100644
--- a/drivers/gpu/drm/xe/xe_observation.c
+++ b/drivers/gpu/drm/xe/xe_observation.c
@@ -8,6 +8,7 @@
#include <uapi/drm/xe_drm.h>
+#include "xe_eu_stall.h"
#include "xe_oa.h"
#include "xe_observation.h"
@@ -29,6 +30,17 @@ static int xe_oa_ioctl(struct drm_device *dev, struct drm_xe_observation_param *
}
}
+static int xe_eu_stall_ioctl(struct drm_device *dev, struct drm_xe_observation_param *arg,
+ struct drm_file *file)
+{
+ switch (arg->observation_op) {
+ case DRM_XE_OBSERVATION_OP_STREAM_OPEN:
+ return xe_eu_stall_stream_open(dev, arg->param, file);
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* xe_observation_ioctl - The top level observation layer ioctl
* @dev: @drm_device
@@ -51,6 +63,8 @@ int xe_observation_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
switch (arg->observation_type) {
case DRM_XE_OBSERVATION_TYPE_OA:
return xe_oa_ioctl(dev, arg, file);
+ case DRM_XE_OBSERVATION_TYPE_EU_STALL:
+ return xe_eu_stall_ioctl(dev, arg, file);
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 39be74848e44..da9679c8cf26 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -30,6 +30,7 @@
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_step.h"
+#include "xe_survivability_mode.h"
#include "xe_tile.h"
enum toggle_d3cold {
@@ -45,15 +46,18 @@ struct xe_subplatform_desc {
struct xe_device_desc {
/* Should only ever be set for platforms without GMD_ID */
- const struct xe_graphics_desc *graphics;
+ const struct xe_ip *pre_gmdid_graphics_ip;
/* Should only ever be set for platforms without GMD_ID */
- const struct xe_media_desc *media;
+ const struct xe_ip *pre_gmdid_media_ip;
const char *platform_name;
const struct xe_subplatform_desc *subplatforms;
enum xe_platform platform;
+ u8 dma_mask_size;
+ u8 max_remote_tiles:2;
+
u8 require_force_probe:1;
u8 is_dgfx:1;
@@ -61,7 +65,7 @@ struct xe_device_desc {
u8 has_heci_gscfi:1;
u8 has_heci_cscfi:1;
u8 has_llc:1;
- u8 has_mmio_ext:1;
+ u8 has_pxp:1;
u8 has_sriov:1;
u8 skip_guc_pc:1;
u8 skip_mtcfg:1;
@@ -78,40 +82,18 @@ __diag_ignore_all("-Woverride-init", "Allow field overrides in table");
#define NOP(x) x
static const struct xe_graphics_desc graphics_xelp = {
- .name = "Xe_LP",
- .ver = 12,
- .rel = 0,
-
.hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
- .dma_mask_size = 39,
- .va_bits = 48,
- .vm_max_level = 3,
-};
-
-static const struct xe_graphics_desc graphics_xelpp = {
- .name = "Xe_LP+",
- .ver = 12,
- .rel = 10,
-
- .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
-
- .dma_mask_size = 39,
.va_bits = 48,
.vm_max_level = 3,
};
#define XE_HP_FEATURES \
.has_range_tlb_invalidation = true, \
- .dma_mask_size = 46, \
.va_bits = 48, \
.vm_max_level = 3
static const struct xe_graphics_desc graphics_xehpg = {
- .name = "Xe_HPG",
- .ver = 12,
- .rel = 55,
-
.hw_engine_mask =
BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
@@ -124,10 +106,6 @@ static const struct xe_graphics_desc graphics_xehpg = {
};
static const struct xe_graphics_desc graphics_xehpc = {
- .name = "Xe_HPC",
- .ver = 12,
- .rel = 60,
-
.hw_engine_mask =
BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) |
BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) |
@@ -138,8 +116,6 @@ static const struct xe_graphics_desc graphics_xehpc = {
BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
XE_HP_FEATURES,
- .dma_mask_size = 52,
- .max_remote_tiles = 1,
.va_bits = 57,
.vm_max_level = 4,
.vram_flags = XE_VRAM_FLAGS_NEED64K,
@@ -150,7 +126,6 @@ static const struct xe_graphics_desc graphics_xehpc = {
};
static const struct xe_graphics_desc graphics_xelpg = {
- .name = "Xe_LPG",
.hw_engine_mask =
BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
BIT(XE_HW_ENGINE_CCS0),
@@ -159,7 +134,6 @@ static const struct xe_graphics_desc graphics_xelpg = {
};
#define XE2_GFX_FEATURES \
- .dma_mask_size = 46, \
.has_asid = 1, \
.has_atomic_enable_pte_bit = 1, \
.has_flat_ccs = 1, \
@@ -174,60 +148,66 @@ static const struct xe_graphics_desc graphics_xelpg = {
GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
static const struct xe_graphics_desc graphics_xe2 = {
- .name = "Xe2_LPG / Xe2_HPG / Xe3_LPG",
-
XE2_GFX_FEATURES,
};
static const struct xe_media_desc media_xem = {
- .name = "Xe_M",
- .ver = 12,
- .rel = 0,
-
- .hw_engine_mask =
- GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
- GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
-};
-
-static const struct xe_media_desc media_xehpm = {
- .name = "Xe_HPM",
- .ver = 12,
- .rel = 55,
-
.hw_engine_mask =
GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
};
static const struct xe_media_desc media_xelpmp = {
- .name = "Xe_LPM+",
.hw_engine_mask =
GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
BIT(XE_HW_ENGINE_GSCCS0)
};
-static const struct xe_media_desc media_xe2 = {
- .name = "Xe2_LPM / Xe2_HPM / Xe3_LPM",
- .hw_engine_mask =
- GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
- GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
- BIT(XE_HW_ENGINE_GSCCS0)
+/* Pre-GMDID Graphics IPs */
+static const struct xe_ip graphics_ip_xelp = { 1200, "Xe_LP", &graphics_xelp };
+static const struct xe_ip graphics_ip_xelpp = { 1210, "Xe_LP+", &graphics_xelp };
+static const struct xe_ip graphics_ip_xehpg = { 1255, "Xe_HPG", &graphics_xehpg };
+static const struct xe_ip graphics_ip_xehpc = { 1260, "Xe_HPC", &graphics_xehpc };
+
+/* GMDID-based Graphics IPs */
+static const struct xe_ip graphics_ips[] = {
+ { 1270, "Xe_LPG", &graphics_xelpg },
+ { 1271, "Xe_LPG", &graphics_xelpg },
+ { 1274, "Xe_LPG+", &graphics_xelpg },
+ { 2001, "Xe2_HPG", &graphics_xe2 },
+ { 2004, "Xe2_LPG", &graphics_xe2 },
+ { 3000, "Xe3_LPG", &graphics_xe2 },
+ { 3001, "Xe3_LPG", &graphics_xe2 },
+};
+
+/* Pre-GMDID Media IPs */
+static const struct xe_ip media_ip_xem = { 1200, "Xe_M", &media_xem };
+static const struct xe_ip media_ip_xehpm = { 1255, "Xe_HPM", &media_xem };
+
+/* GMDID-based Media IPs */
+static const struct xe_ip media_ips[] = {
+ { 1300, "Xe_LPM+", &media_xelpmp },
+ { 1301, "Xe2_HPM", &media_xelpmp },
+ { 2000, "Xe2_LPM", &media_xelpmp },
+ { 3000, "Xe3_LPM", &media_xelpmp },
};
static const struct xe_device_desc tgl_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(TIGERLAKE),
+ .dma_mask_size = 39,
.has_display = true,
.has_llc = true,
.require_force_probe = true,
};
static const struct xe_device_desc rkl_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(ROCKETLAKE),
+ .dma_mask_size = 39,
.has_display = true,
.has_llc = true,
.require_force_probe = true,
@@ -236,9 +216,10 @@ static const struct xe_device_desc rkl_desc = {
static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 };
static const struct xe_device_desc adl_s_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(ALDERLAKE_S),
+ .dma_mask_size = 39,
.has_display = true,
.has_llc = true,
.require_force_probe = true,
@@ -251,9 +232,10 @@ static const struct xe_device_desc adl_s_desc = {
static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 };
static const struct xe_device_desc adl_p_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(ALDERLAKE_P),
+ .dma_mask_size = 39,
.has_display = true,
.has_llc = true,
.require_force_probe = true,
@@ -264,9 +246,10 @@ static const struct xe_device_desc adl_p_desc = {
};
static const struct xe_device_desc adl_n_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(ALDERLAKE_N),
+ .dma_mask_size = 39,
.has_display = true,
.has_llc = true,
.require_force_probe = true,
@@ -276,10 +259,11 @@ static const struct xe_device_desc adl_n_desc = {
.is_dgfx = 1
static const struct xe_device_desc dg1_desc = {
- .graphics = &graphics_xelpp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelpp,
+ .pre_gmdid_media_ip = &media_ip_xem,
DGFX_FEATURES,
PLATFORM(DG1),
+ .dma_mask_size = 39,
.has_display = true,
.has_heci_gscfi = 1,
.require_force_probe = true,
@@ -301,8 +285,9 @@ static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
}
static const struct xe_device_desc ats_m_desc = {
- .graphics = &graphics_xehpg,
- .media = &media_xehpm,
+ .pre_gmdid_graphics_ip = &graphics_ip_xehpg,
+ .pre_gmdid_media_ip = &media_ip_xehpm,
+ .dma_mask_size = 46,
.require_force_probe = true,
DG2_FEATURES,
@@ -310,8 +295,9 @@ static const struct xe_device_desc ats_m_desc = {
};
static const struct xe_device_desc dg2_desc = {
- .graphics = &graphics_xehpg,
- .media = &media_xehpm,
+ .pre_gmdid_graphics_ip = &graphics_ip_xehpg,
+ .pre_gmdid_media_ip = &media_ip_xehpm,
+ .dma_mask_size = 46,
.require_force_probe = true,
DG2_FEATURES,
@@ -319,11 +305,13 @@ static const struct xe_device_desc dg2_desc = {
};
static const __maybe_unused struct xe_device_desc pvc_desc = {
- .graphics = &graphics_xehpc,
+ .pre_gmdid_graphics_ip = &graphics_ip_xehpc,
DGFX_FEATURES,
PLATFORM(PVC),
+ .dma_mask_size = 52,
.has_display = false,
.has_heci_gscfi = 1,
+ .max_remote_tiles = 1,
.require_force_probe = true,
};
@@ -331,49 +319,37 @@ static const struct xe_device_desc mtl_desc = {
/* .graphics and .media determined via GMD_ID */
.require_force_probe = true,
PLATFORM(METEORLAKE),
+ .dma_mask_size = 46,
.has_display = true,
+ .has_pxp = true,
};
static const struct xe_device_desc lnl_desc = {
PLATFORM(LUNARLAKE),
+ .dma_mask_size = 46,
.has_display = true,
+ .has_pxp = true,
};
static const struct xe_device_desc bmg_desc = {
DGFX_FEATURES,
PLATFORM(BATTLEMAGE),
+ .dma_mask_size = 46,
.has_display = true,
.has_heci_cscfi = 1,
};
static const struct xe_device_desc ptl_desc = {
PLATFORM(PANTHERLAKE),
+ .dma_mask_size = 46,
.has_display = true,
+ .has_sriov = true,
.require_force_probe = true,
};
#undef PLATFORM
__diag_pop();
-/* Map of GMD_ID values to graphics IP */
-static const struct gmdid_map graphics_ip_map[] = {
- { 1270, &graphics_xelpg },
- { 1271, &graphics_xelpg },
- { 1274, &graphics_xelpg }, /* Xe_LPG+ */
- { 2001, &graphics_xe2 },
- { 2004, &graphics_xe2 },
- { 3000, &graphics_xe2 },
- { 3001, &graphics_xe2 },
-};
-
-/* Map of GMD_ID values to media IP */
-static const struct gmdid_map media_ip_map[] = {
- { 1300, &media_xelpmp },
- { 1301, &media_xe2 },
- { 2000, &media_xe2 },
- { 3000, &media_xe2 },
-};
-
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@@ -502,6 +478,7 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
gt->info.type = XE_GT_TYPE_MAIN;
}
+ xe_gt_mmio_init(gt);
xe_guc_comm_init_early(&gt->uc.guc);
/* Don't bother with GMDID if failed to negotiate the GuC ABI */
@@ -533,66 +510,49 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
}
/*
- * Pre-GMD_ID platform: device descriptor already points to the appropriate
- * graphics descriptor. Simply forward the description and calculate the version
- * appropriately. "graphics" should be present in all such platforms, while
- * media is optional.
- */
-static void handle_pre_gmdid(struct xe_device *xe,
- const struct xe_graphics_desc *graphics,
- const struct xe_media_desc *media)
-{
- xe->info.graphics_verx100 = graphics->ver * 100 + graphics->rel;
-
- if (media)
- xe->info.media_verx100 = media->ver * 100 + media->rel;
-
-}
-
-/*
- * GMD_ID platform: read IP version from hardware and select graphics descriptor
+ * Read IP version from hardware and select graphics/media IP descriptors
* based on the result.
*/
static void handle_gmdid(struct xe_device *xe,
- const struct xe_graphics_desc **graphics,
- const struct xe_media_desc **media,
+ const struct xe_ip **graphics_ip,
+ const struct xe_ip **media_ip,
u32 *graphics_revid,
u32 *media_revid)
{
u32 ver;
+ *graphics_ip = NULL;
+ *media_ip = NULL;
+
read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid);
- for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) {
- if (ver == graphics_ip_map[i].ver) {
- xe->info.graphics_verx100 = ver;
- *graphics = graphics_ip_map[i].ip;
+ for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
+ if (ver == graphics_ips[i].verx100) {
+ *graphics_ip = &graphics_ips[i];
break;
}
}
- if (!xe->info.graphics_verx100) {
+ if (!*graphics_ip) {
drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n",
ver / 100, ver % 100);
}
read_gmdid(xe, GMDID_MEDIA, &ver, media_revid);
-
/* Media may legitimately be fused off / not present */
if (ver == 0)
return;
- for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) {
- if (ver == media_ip_map[i].ver) {
- xe->info.media_verx100 = ver;
- *media = media_ip_map[i].ip;
+ for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
+ if (ver == media_ips[i].verx100) {
+ *media_ip = &media_ips[i];
break;
}
}
- if (!xe->info.media_verx100) {
+ if (!*media_ip) {
drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n",
ver / 100, ver % 100);
}
@@ -613,11 +573,12 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.subplatform = subplatform_desc ?
subplatform_desc->subplatform : XE_SUBPLATFORM_NONE;
+ xe->info.dma_mask_size = desc->dma_mask_size;
xe->info.is_dgfx = desc->is_dgfx;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
xe->info.has_heci_cscfi = desc->has_heci_cscfi;
xe->info.has_llc = desc->has_llc;
- xe->info.has_mmio_ext = desc->has_mmio_ext;
+ xe->info.has_pxp = desc->has_pxp;
xe->info.has_sriov = desc->has_sriov;
xe->info.skip_guc_pc = desc->skip_guc_pc;
xe->info.skip_mtcfg = desc->skip_mtcfg;
@@ -626,6 +587,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
xe_modparam.probe_display &&
desc->has_display;
+ xe->info.tile_count = 1 + desc->max_remote_tiles;
err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0);
if (err)
@@ -641,26 +603,31 @@ static int xe_info_init_early(struct xe_device *xe,
* present in device info.
*/
static int xe_info_init(struct xe_device *xe,
- const struct xe_graphics_desc *graphics_desc,
- const struct xe_media_desc *media_desc)
+ const struct xe_device_desc *desc)
{
u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0;
+ const struct xe_ip *graphics_ip;
+ const struct xe_ip *media_ip;
+ const struct xe_graphics_desc *graphics_desc;
+ const struct xe_media_desc *media_desc;
struct xe_tile *tile;
struct xe_gt *gt;
u8 id;
/*
* If this platform supports GMD_ID, we'll detect the proper IP
- * descriptor to use from hardware registers. desc->graphics will only
- * ever be set at this point for platforms before GMD_ID. In that case
- * the IP descriptions and versions are simply derived from that.
+ * descriptor to use from hardware registers.
+ * desc->pre_gmdid_graphics_ip will only ever be set at this point for
+ * platforms before GMD_ID. In that case the IP descriptions and
+ * versions are simply derived from that.
*/
- if (graphics_desc) {
- handle_pre_gmdid(xe, graphics_desc, media_desc);
+ if (desc->pre_gmdid_graphics_ip) {
+ graphics_ip = desc->pre_gmdid_graphics_ip;
+ media_ip = desc->pre_gmdid_media_ip;
xe->info.step = xe_step_pre_gmdid_get(xe);
} else {
- xe_assert(xe, !media_desc);
- handle_gmdid(xe, &graphics_desc, &media_desc,
+ xe_assert(xe, !desc->pre_gmdid_media_ip);
+ handle_gmdid(xe, &graphics_ip, &media_ip,
&graphics_gmdid_revid, &media_gmdid_revid);
xe->info.step = xe_step_gmdid_get(xe,
graphics_gmdid_revid,
@@ -672,14 +639,22 @@ static int xe_info_init(struct xe_device *xe,
* error and we should abort driver load. Failing to detect media
* IP is non-fatal; we'll just proceed without enabling media support.
*/
- if (!graphics_desc)
+ if (!graphics_ip)
return -ENODEV;
- xe->info.graphics_name = graphics_desc->name;
- xe->info.media_name = media_desc ? media_desc->name : "none";
- xe->info.tile_mmio_ext_size = graphics_desc->tile_mmio_ext_size;
+ xe->info.graphics_verx100 = graphics_ip->verx100;
+ xe->info.graphics_name = graphics_ip->name;
+ graphics_desc = graphics_ip->desc;
+
+ if (media_ip) {
+ xe->info.media_verx100 = media_ip->verx100;
+ xe->info.media_name = media_ip->name;
+ media_desc = media_ip->desc;
+ } else {
+ xe->info.media_name = "none";
+ media_desc = NULL;
+ }
- xe->info.dma_mask_size = graphics_desc->dma_mask_size;
xe->info.vram_flags = graphics_desc->vram_flags;
xe->info.va_bits = graphics_desc->va_bits;
xe->info.vm_max_level = graphics_desc->vm_max_level;
@@ -694,17 +669,6 @@ static int xe_info_init(struct xe_device *xe,
xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
xe->info.has_usm = graphics_desc->has_usm;
- /*
- * All platforms have at least one primary GT. Any platform with media
- * version 13 or higher has an additional dedicated media GT. And
- * depending on the graphics IP there may be additional "remote tiles."
- * All of these together determine the overall GT count.
- *
- * FIXME: 'tile_count' here is misnamed since the rest of the driver
- * treats it as the number of GTs rather than just the number of tiles.
- */
- xe->info.tile_count = 1 + graphics_desc->max_remote_tiles;
-
for_each_remote_tile(tile, xe, id) {
int err;
@@ -713,6 +677,12 @@ static int xe_info_init(struct xe_device *xe,
return err;
}
+ /*
+ * All platforms have at least one primary GT. Any platform with media
+ * version 13 or higher has an additional dedicated media GT. And
+ * depending on the graphics IP there may be additional "remote tiles."
+ * All of these together determine the overall GT count.
+ */
for_each_tile(tile, xe, id) {
gt = tile->primary_gt;
gt->info.id = xe->info.gt_count++;
@@ -754,18 +724,16 @@ static int xe_info_init(struct xe_device *xe,
static void xe_pci_remove(struct pci_dev *pdev)
{
- struct xe_device *xe;
-
- xe = pdev_to_xe_device(pdev);
- if (!xe) /* driver load aborted, nothing to cleanup */
- return;
+ struct xe_device *xe = pdev_to_xe_device(pdev);
if (IS_SRIOV_PF(xe))
xe_pci_sriov_configure(pdev, 0);
+ if (xe_survivability_mode_is_enabled(xe))
+ return;
+
xe_device_remove(xe);
xe_pm_runtime_fini(xe);
- pci_set_drvdata(pdev, NULL);
}
/*
@@ -835,10 +803,22 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
err = xe_device_probe_early(xe);
- if (err)
+
+ /*
+ * In Boot Survivability mode, no drm card is exposed and driver is
+ * loaded with bare minimum to allow for firmware to be flashed through
+ * mei. If early probe fails, check if survivability mode is flagged by
+ * HW to be enabled. In that case enable it and return success.
+ */
+ if (err) {
+ if (xe_survivability_mode_required(xe) &&
+ xe_survivability_mode_enable(xe))
+ return 0;
+
return err;
+ }
- err = xe_info_init(xe, desc->graphics, desc->media);
+ err = xe_info_init(xe, desc);
if (err)
return err;
@@ -923,9 +903,13 @@ static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle)
static int xe_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
int err;
- err = xe_pm_suspend(pdev_to_xe_device(pdev));
+ if (xe_survivability_mode_is_enabled(xe))
+ return -EBUSY;
+
+ err = xe_pm_suspend(xe);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
index aaceee748287..09ee8a06fe2e 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -62,6 +62,55 @@ static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs)
xe_gt_sriov_pf_control_trigger_flr(gt, n);
}
+static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ /* caller must use pci_dev_put() */
+ return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ pdev->bus->number,
+ pci_iov_virtfn_devfn(pdev, vf_id));
+}
+
+static void pf_link_vfs(struct xe_device *xe, int num_vfs)
+{
+ struct pci_dev *pdev_pf = to_pci_dev(xe->drm.dev);
+ struct device_link *link;
+ struct pci_dev *pdev_vf;
+ unsigned int n;
+
+ /*
+ * When both PF and VF devices are enabled on the host, during system
+ * resume they are resuming in parallel.
+ *
+ * But PF has to complete the provision of VF first to allow any VFs to
+ * successfully resume.
+ *
+ * Create a parent-child device link between PF and VF devices that will
+ * enforce correct resume order.
+ */
+ for (n = 1; n <= num_vfs; n++) {
+ pdev_vf = xe_pci_pf_get_vf_dev(xe, n - 1);
+
+ /* unlikely, something weird is happening, abort */
+ if (!pdev_vf) {
+ xe_sriov_err(xe, "Cannot find VF%u device, aborting link%s creation!\n",
+ n, str_plural(num_vfs));
+ break;
+ }
+
+ link = device_link_add(&pdev_vf->dev, &pdev_pf->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ /* unlikely and harmless, continue with other VFs */
+ if (!link)
+ xe_sriov_notice(xe, "Failed linking VF%u\n", n);
+
+ pci_dev_put(pdev_vf);
+ }
+}
+
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -92,6 +141,8 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
if (err < 0)
goto failed;
+ pf_link_vfs(xe, num_vfs);
+
xe_sriov_info(xe, "Enabled %u of %u VF%s\n",
num_vfs, total_vfs, str_plural(total_vfs));
return num_vfs;
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index 79b0f80376a4..e9b9bbc138d3 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -9,21 +9,12 @@
#include <linux/types.h>
struct xe_graphics_desc {
- const char *name;
- u8 ver;
- u8 rel;
-
- u8 dma_mask_size; /* available DMA address bits */
u8 va_bits;
u8 vm_max_level;
u8 vram_flags;
u64 hw_engine_mask; /* hardware engines provided by graphics IP */
- u32 tile_mmio_ext_size; /* size of MMIO extension space, per-tile */
-
- u8 max_remote_tiles:2;
-
u8 has_asid:1;
u8 has_atomic_enable_pte_bit:1;
u8 has_flat_ccs:1;
@@ -33,18 +24,15 @@ struct xe_graphics_desc {
};
struct xe_media_desc {
- const char *name;
- u8 ver;
- u8 rel;
-
u64 hw_engine_mask; /* hardware engines provided by media IP */
u8 has_indirect_ring_state:1;
};
-struct gmdid_map {
- unsigned int ver;
- const void *ip;
+struct xe_ip {
+ unsigned int verx100;
+ const char *name;
+ const void *desc;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h
index f153ce96f69a..2bae9afdbd35 100644
--- a/drivers/gpu/drm/xe/xe_pcode_api.h
+++ b/drivers/gpu/drm/xe/xe_pcode_api.h
@@ -49,6 +49,20 @@
/* Domain IDs (param2) */
#define PCODE_MBOX_DOMAIN_HBM 0x2
+#define PCODE_SCRATCH(x) XE_REG(0x138320 + ((x) * 4))
+/* PCODE_SCRATCH0 */
+#define AUXINFO_REG_OFFSET REG_GENMASK(17, 15)
+#define OVERFLOW_REG_OFFSET REG_GENMASK(14, 12)
+#define HISTORY_TRACKING REG_BIT(11)
+#define OVERFLOW_SUPPORT REG_BIT(10)
+#define AUXINFO_SUPPORT REG_BIT(9)
+#define BOOT_STATUS REG_GENMASK(3, 1)
+#define CRITICAL_FAILURE 4
+#define NON_CRITICAL_FAILURE 7
+
+/* Auxiliary info bits */
+#define AUXINFO_HISTORY_OFFSET REG_GENMASK(31, 29)
+
struct pcode_err_decode {
int errno;
const char *str;
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index c9cc0c091dfd..7b6b754ad6eb 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -22,6 +22,7 @@
#include "xe_guc.h"
#include "xe_irq.h"
#include "xe_pcode.h"
+#include "xe_pxp.h"
#include "xe_trace.h"
#include "xe_wa.h"
@@ -90,7 +91,7 @@ static struct lockdep_map xe_pm_runtime_nod3cold_map = {
*/
bool xe_rpm_reclaim_safe(const struct xe_device *xe)
{
- return !xe->d3cold.capable && !xe->info.has_sriov;
+ return !xe->d3cold.capable;
}
static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
@@ -122,6 +123,10 @@ int xe_pm_suspend(struct xe_device *xe)
drm_dbg(&xe->drm, "Suspending device\n");
trace_xe_pm_suspend(xe, __builtin_return_address(0));
+ err = xe_pxp_pm_suspend(xe->pxp);
+ if (err)
+ goto err;
+
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
@@ -130,14 +135,12 @@ int xe_pm_suspend(struct xe_device *xe)
/* FIXME: Super racey... */
err = xe_bo_evict_all(xe);
if (err)
- goto err;
+ goto err_pxp;
for_each_gt(gt, xe, id) {
err = xe_gt_suspend(gt);
- if (err) {
- xe_display_pm_resume(xe);
- goto err;
- }
+ if (err)
+ goto err_display;
}
xe_irq_suspend(xe);
@@ -146,6 +149,11 @@ int xe_pm_suspend(struct xe_device *xe)
drm_dbg(&xe->drm, "Device suspended\n");
return 0;
+
+err_display:
+ xe_display_pm_resume(xe);
+err_pxp:
+ xe_pxp_pm_resume(xe->pxp);
err:
drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
return err;
@@ -195,6 +203,8 @@ int xe_pm_resume(struct xe_device *xe)
if (err)
goto err;
+ xe_pxp_pm_resume(xe->pxp);
+
drm_dbg(&xe->drm, "Device resumed\n");
return 0;
err:
@@ -267,6 +277,15 @@ int xe_pm_init_early(struct xe_device *xe)
}
ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
+static u32 vram_threshold_value(struct xe_device *xe)
+{
+ /* FIXME: D3Cold temporarily disabled by default on BMG */
+ if (xe->info.platform == XE_BATTLEMAGE)
+ return 0;
+
+ return DEFAULT_VRAM_THRESHOLD;
+}
+
/**
* xe_pm_init - Initialize Xe Power Management
* @xe: xe device instance
@@ -277,6 +296,7 @@ ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
*/
int xe_pm_init(struct xe_device *xe)
{
+ u32 vram_threshold;
int err;
/* For now suspend/resume is only allowed with GuC */
@@ -290,7 +310,8 @@ int xe_pm_init(struct xe_device *xe)
if (err)
return err;
- err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
+ vram_threshold = vram_threshold_value(xe);
+ err = xe_pm_set_vram_threshold(xe, vram_threshold);
if (err)
return err;
}
@@ -389,6 +410,10 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
*/
xe_rpm_lockmap_acquire(xe);
+ err = xe_pxp_pm_suspend(xe->pxp);
+ if (err)
+ goto out;
+
/*
* Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
* also checks and deletes bo entry from user fault list.
@@ -404,22 +429,27 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
if (xe->d3cold.allowed) {
err = xe_bo_evict_all(xe);
if (err)
- goto out;
+ goto out_resume;
}
for_each_gt(gt, xe, id) {
err = xe_gt_suspend(gt);
if (err)
- goto out;
+ goto out_resume;
}
xe_irq_suspend(xe);
xe_display_pm_runtime_suspend_late(xe);
+ xe_rpm_lockmap_release(xe);
+ xe_pm_write_callback_task(xe, NULL);
+ return 0;
+
+out_resume:
+ xe_display_pm_runtime_resume(xe);
+ xe_pxp_pm_resume(xe->pxp);
out:
- if (err)
- xe_display_pm_runtime_resume(xe);
xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return err;
@@ -472,6 +502,8 @@ int xe_pm_runtime_resume(struct xe_device *xe)
goto out;
}
+ xe_pxp_pm_resume(xe->pxp);
+
out:
xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c
new file mode 100644
index 000000000000..4f62a6e515d6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pmu.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_drv.h>
+#include <linux/device.h>
+
+#include "xe_device.h"
+#include "xe_force_wake.h"
+#include "xe_gt_idle.h"
+#include "xe_guc_engine_activity.h"
+#include "xe_hw_engine.h"
+#include "xe_pm.h"
+#include "xe_pmu.h"
+
+/**
+ * DOC: Xe PMU (Performance Monitoring Unit)
+ *
+ * Expose events/counters like GT-C6 residency, GT frequency and per-class-engine
+ * activity to user land via the perf interface. Events are per device.
+ *
+ * All events are listed in sysfs:
+ *
+ * $ ls -ld /sys/bus/event_source/devices/xe_*
+ * $ ls /sys/bus/event_source/devices/xe_0000_00_02.0/events/
+ * $ ls /sys/bus/event_source/devices/xe_0000_00_02.0/format/
+ *
+ * The following format parameters are available to read events,
+ * but only few are valid with each event:
+ *
+ * gt[60:63] Selects gt for the event
+ * engine_class[20:27] Selects engine-class for event
+ * engine_instance[12:19] Selects the engine-instance for the event
+ *
+ * For engine specific events (engine-*), gt, engine_class and engine_instance parameters must be
+ * set as populated by DRM_XE_DEVICE_QUERY_ENGINES.
+ *
+ * For gt specific events (gt-*) gt parameter must be passed. All other parameters will be 0.
+ *
+ * The standard perf tool can be used to grep for a certain event as well.
+ * Example:
+ *
+ * $ perf list | grep gt-c6
+ *
+ * To sample a specific event for a GT at regular intervals:
+ *
+ * $ perf stat -e <event_name,gt=> -I <interval>
+ */
+
+#define XE_PMU_EVENT_GT_MASK GENMASK_ULL(63, 60)
+#define XE_PMU_EVENT_ENGINE_CLASS_MASK GENMASK_ULL(27, 20)
+#define XE_PMU_EVENT_ENGINE_INSTANCE_MASK GENMASK_ULL(19, 12)
+#define XE_PMU_EVENT_ID_MASK GENMASK_ULL(11, 0)
+
+static unsigned int config_to_event_id(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_ID_MASK, config);
+}
+
+static unsigned int config_to_engine_class(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_ENGINE_CLASS_MASK, config);
+}
+
+static unsigned int config_to_engine_instance(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_ENGINE_INSTANCE_MASK, config);
+}
+
+static unsigned int config_to_gt_id(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_GT_MASK, config);
+}
+
+#define XE_PMU_EVENT_GT_C6_RESIDENCY 0x01
+#define XE_PMU_EVENT_ENGINE_ACTIVE_TICKS 0x02
+#define XE_PMU_EVENT_ENGINE_TOTAL_TICKS 0x03
+
+static struct xe_gt *event_to_gt(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ u64 gt = config_to_gt_id(event->attr.config);
+
+ return xe_device_get_gt(xe, gt);
+}
+
+static struct xe_hw_engine *event_to_hwe(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct drm_xe_engine_class_instance eci;
+ u64 config = event->attr.config;
+ struct xe_hw_engine *hwe;
+
+ eci.engine_class = config_to_engine_class(config);
+ eci.engine_instance = config_to_engine_instance(config);
+ eci.gt_id = config_to_gt_id(config);
+
+ hwe = xe_hw_engine_lookup(xe, eci);
+ if (!hwe || xe_hw_engine_is_reserved(hwe))
+ return NULL;
+
+ return hwe;
+}
+
+static bool is_engine_event(u64 config)
+{
+ unsigned int event_id = config_to_event_id(config);
+
+ return (event_id == XE_PMU_EVENT_ENGINE_TOTAL_TICKS ||
+ event_id == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
+}
+
+static bool event_gt_forcewake(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ u64 config = event->attr.config;
+ struct xe_gt *gt;
+ unsigned int *fw_ref;
+
+ if (!is_engine_event(config))
+ return true;
+
+ gt = xe_device_get_gt(xe, config_to_gt_id(config));
+
+ fw_ref = kzalloc(sizeof(*fw_ref), GFP_KERNEL);
+ if (!fw_ref)
+ return false;
+
+ *fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!*fw_ref) {
+ kfree(fw_ref);
+ return false;
+ }
+
+ event->pmu_private = fw_ref;
+
+ return true;
+}
+
+static bool event_supported(struct xe_pmu *pmu, unsigned int gt,
+ unsigned int id)
+{
+ if (gt >= XE_MAX_GT_PER_TILE)
+ return false;
+
+ return id < sizeof(pmu->supported_events) * BITS_PER_BYTE &&
+ pmu->supported_events & BIT_ULL(id);
+}
+
+static bool event_param_valid(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ unsigned int engine_class, engine_instance;
+ u64 config = event->attr.config;
+ struct xe_gt *gt;
+
+ gt = xe_device_get_gt(xe, config_to_gt_id(config));
+ if (!gt)
+ return false;
+
+ engine_class = config_to_engine_class(config);
+ engine_instance = config_to_engine_instance(config);
+
+ switch (config_to_event_id(config)) {
+ case XE_PMU_EVENT_GT_C6_RESIDENCY:
+ if (engine_class || engine_instance)
+ return false;
+ break;
+ case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
+ case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
+ if (!event_to_hwe(event))
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+static void xe_pmu_event_destroy(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct xe_gt *gt;
+ unsigned int *fw_ref = event->pmu_private;
+
+ if (fw_ref) {
+ gt = xe_device_get_gt(xe, config_to_gt_id(event->attr.config));
+ xe_force_wake_put(gt_to_fw(gt), *fw_ref);
+ kfree(fw_ref);
+ event->pmu_private = NULL;
+ }
+
+ drm_WARN_ON(&xe->drm, event->parent);
+ xe_pm_runtime_put(xe);
+ drm_dev_put(&xe->drm);
+}
+
+static int xe_pmu_event_init(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct xe_pmu *pmu = &xe->pmu;
+ unsigned int id, gt;
+
+ if (!pmu->registered)
+ return -ENODEV;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* unsupported modes and filters */
+ if (event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ gt = config_to_gt_id(event->attr.config);
+ id = config_to_event_id(event->attr.config);
+ if (!event_supported(pmu, gt, id))
+ return -ENOENT;
+
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ if (!event_param_valid(event))
+ return -ENOENT;
+
+ if (!event->parent) {
+ drm_dev_get(&xe->drm);
+ xe_pm_runtime_get(xe);
+ if (!event_gt_forcewake(event)) {
+ xe_pm_runtime_put(xe);
+ drm_dev_put(&xe->drm);
+ return -EINVAL;
+ }
+ event->destroy = xe_pmu_event_destroy;
+ }
+
+ return 0;
+}
+
+static u64 read_engine_events(struct xe_gt *gt, struct perf_event *event)
+{
+ struct xe_hw_engine *hwe;
+ u64 val = 0;
+
+ hwe = event_to_hwe(event);
+ if (config_to_event_id(event->attr.config) == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS)
+ val = xe_guc_engine_activity_active_ticks(&gt->uc.guc, hwe);
+ else
+ val = xe_guc_engine_activity_total_ticks(&gt->uc.guc, hwe);
+
+ return val;
+}
+
+static u64 __xe_pmu_event_read(struct perf_event *event)
+{
+ struct xe_gt *gt = event_to_gt(event);
+
+ if (!gt)
+ return 0;
+
+ switch (config_to_event_id(event->attr.config)) {
+ case XE_PMU_EVENT_GT_C6_RESIDENCY:
+ return xe_gt_idle_residency_msec(&gt->gtidle);
+ case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
+ case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
+ return read_engine_events(gt, event);
+ }
+
+ return 0;
+}
+
+static void xe_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev, new;
+
+ prev = local64_read(&hwc->prev_count);
+ do {
+ new = __xe_pmu_event_read(event);
+ } while (!local64_try_cmpxchg(&hwc->prev_count, &prev, new));
+
+ local64_add(new - prev, &event->count);
+}
+
+static void xe_pmu_event_read(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct xe_pmu *pmu = &xe->pmu;
+
+ if (!pmu->registered) {
+ event->hw.state = PERF_HES_STOPPED;
+ return;
+ }
+
+ xe_pmu_event_update(event);
+}
+
+static void xe_pmu_enable(struct perf_event *event)
+{
+ /*
+ * Store the current counter value so we can report the correct delta
+ * for all listeners. Even when the event was already enabled and has
+ * an existing non-zero value.
+ */
+ local64_set(&event->hw.prev_count, __xe_pmu_event_read(event));
+}
+
+static void xe_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct xe_pmu *pmu = &xe->pmu;
+
+ if (!pmu->registered)
+ return;
+
+ xe_pmu_enable(event);
+ event->hw.state = 0;
+}
+
+static void xe_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct xe_pmu *pmu = &xe->pmu;
+
+ if (pmu->registered)
+ if (flags & PERF_EF_UPDATE)
+ xe_pmu_event_update(event);
+
+ event->hw.state = PERF_HES_STOPPED;
+}
+
+static int xe_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct xe_pmu *pmu = &xe->pmu;
+
+ if (!pmu->registered)
+ return -ENODEV;
+
+ if (flags & PERF_EF_START)
+ xe_pmu_event_start(event, flags);
+
+ return 0;
+}
+
+static void xe_pmu_event_del(struct perf_event *event, int flags)
+{
+ xe_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+PMU_FORMAT_ATTR(gt, "config:60-63");
+PMU_FORMAT_ATTR(engine_class, "config:20-27");
+PMU_FORMAT_ATTR(engine_instance, "config:12-19");
+PMU_FORMAT_ATTR(event, "config:0-11");
+
+static struct attribute *pmu_format_attrs[] = {
+ &format_attr_event.attr,
+ &format_attr_engine_class.attr,
+ &format_attr_engine_instance.attr,
+ &format_attr_gt.attr,
+ NULL,
+};
+
+static const struct attribute_group pmu_format_attr_group = {
+ .name = "format",
+ .attrs = pmu_format_attrs,
+};
+
+static ssize_t event_attr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct perf_pmu_events_attr *pmu_attr =
+ container_of(attr, struct perf_pmu_events_attr, attr);
+
+ return sprintf(buf, "event=%#04llx\n", pmu_attr->id);
+}
+
+#define XE_EVENT_ATTR(name_, v_, id_) \
+ PMU_EVENT_ATTR(name_, pmu_event_ ## v_, id_, event_attr_show)
+
+#define XE_EVENT_ATTR_UNIT(name_, v_, unit_) \
+ PMU_EVENT_ATTR_STRING(name_.unit, pmu_event_unit_ ## v_, unit_)
+
+#define XE_EVENT_ATTR_GROUP(v_, id_, ...) \
+ static struct attribute *pmu_attr_ ##v_[] = { \
+ __VA_ARGS__, \
+ NULL \
+ }; \
+ static umode_t is_visible_##v_(struct kobject *kobj, \
+ struct attribute *attr, int idx) \
+ { \
+ struct perf_pmu_events_attr *pmu_attr; \
+ struct xe_pmu *pmu; \
+ \
+ pmu_attr = container_of(attr, typeof(*pmu_attr), attr.attr); \
+ pmu = container_of(dev_get_drvdata(kobj_to_dev(kobj)), \
+ typeof(*pmu), base); \
+ \
+ return event_supported(pmu, 0, id_) ? attr->mode : 0; \
+ } \
+ static const struct attribute_group pmu_group_ ##v_ = { \
+ .name = "events", \
+ .attrs = pmu_attr_ ## v_, \
+ .is_visible = is_visible_ ## v_, \
+ }
+
+#define XE_EVENT_ATTR_SIMPLE(name_, v_, id_, unit_) \
+ XE_EVENT_ATTR(name_, v_, id_) \
+ XE_EVENT_ATTR_UNIT(name_, v_, unit_) \
+ XE_EVENT_ATTR_GROUP(v_, id_, &pmu_event_ ##v_.attr.attr, \
+ &pmu_event_unit_ ##v_.attr.attr)
+
+#define XE_EVENT_ATTR_NOUNIT(name_, v_, id_) \
+ XE_EVENT_ATTR(name_, v_, id_) \
+ XE_EVENT_ATTR_GROUP(v_, id_, &pmu_event_ ##v_.attr.attr)
+
+XE_EVENT_ATTR_SIMPLE(gt-c6-residency, gt_c6_residency, XE_PMU_EVENT_GT_C6_RESIDENCY, "ms");
+XE_EVENT_ATTR_NOUNIT(engine-active-ticks, engine_active_ticks, XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
+XE_EVENT_ATTR_NOUNIT(engine-total-ticks, engine_total_ticks, XE_PMU_EVENT_ENGINE_TOTAL_TICKS);
+
+static struct attribute *pmu_empty_event_attrs[] = {
+ /* Empty - all events are added as groups with .attr_update() */
+ NULL,
+};
+
+static const struct attribute_group pmu_events_attr_group = {
+ .name = "events",
+ .attrs = pmu_empty_event_attrs,
+};
+
+static const struct attribute_group *pmu_events_attr_update[] = {
+ &pmu_group_gt_c6_residency,
+ &pmu_group_engine_active_ticks,
+ &pmu_group_engine_total_ticks,
+ NULL,
+};
+
+static void set_supported_events(struct xe_pmu *pmu)
+{
+ struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
+ struct xe_gt *gt = xe_device_get_gt(xe, 0);
+
+ if (!xe->info.skip_guc_pc)
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_C6_RESIDENCY);
+
+ if (xe_guc_engine_activity_supported(&gt->uc.guc)) {
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_TOTAL_TICKS);
+ }
+}
+
+/**
+ * xe_pmu_unregister() - Remove/cleanup PMU registration
+ * @arg: Ptr to pmu
+ */
+static void xe_pmu_unregister(void *arg)
+{
+ struct xe_pmu *pmu = arg;
+ struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
+
+ if (!pmu->registered)
+ return;
+
+ pmu->registered = false;
+
+ perf_pmu_unregister(&pmu->base);
+ kfree(pmu->name);
+}
+
+/**
+ * xe_pmu_register() - Define basic PMU properties for Xe and add event callbacks.
+ * @pmu: the PMU object
+ *
+ * Returns 0 on success and an appropriate error code otherwise
+ */
+int xe_pmu_register(struct xe_pmu *pmu)
+{
+ struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
+ static const struct attribute_group *attr_groups[] = {
+ &pmu_format_attr_group,
+ &pmu_events_attr_group,
+ NULL
+ };
+ int ret = -ENOMEM;
+ char *name;
+
+ BUILD_BUG_ON(XE_MAX_GT_PER_TILE != XE_PMU_MAX_GT);
+
+ if (IS_SRIOV_VF(xe))
+ return 0;
+
+ name = kasprintf(GFP_KERNEL, "xe_%s",
+ dev_name(xe->drm.dev));
+ if (!name)
+ goto err;
+
+ /* tools/perf reserves colons as special. */
+ strreplace(name, ':', '_');
+
+ pmu->name = name;
+ pmu->base.attr_groups = attr_groups;
+ pmu->base.attr_update = pmu_events_attr_update;
+ pmu->base.scope = PERF_PMU_SCOPE_SYS_WIDE;
+ pmu->base.module = THIS_MODULE;
+ pmu->base.task_ctx_nr = perf_invalid_context;
+ pmu->base.event_init = xe_pmu_event_init;
+ pmu->base.add = xe_pmu_event_add;
+ pmu->base.del = xe_pmu_event_del;
+ pmu->base.start = xe_pmu_event_start;
+ pmu->base.stop = xe_pmu_event_stop;
+ pmu->base.read = xe_pmu_event_read;
+
+ set_supported_events(pmu);
+
+ ret = perf_pmu_register(&pmu->base, pmu->name, -1);
+ if (ret)
+ goto err_name;
+
+ pmu->registered = true;
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_pmu_unregister, pmu);
+
+err_name:
+ kfree(name);
+err:
+ drm_err(&xe->drm, "Failed to register PMU (ret=%d)!\n", ret);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_pmu.h b/drivers/gpu/drm/xe/xe_pmu.h
new file mode 100644
index 000000000000..60c37126f87e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pmu.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_PMU_H_
+#define _XE_PMU_H_
+
+#include "xe_pmu_types.h"
+
+#if IS_ENABLED(CONFIG_PERF_EVENTS)
+int xe_pmu_register(struct xe_pmu *pmu);
+#else
+static inline int xe_pmu_register(struct xe_pmu *pmu) { return 0; }
+#endif
+
+#endif
+
diff --git a/drivers/gpu/drm/xe/xe_pmu_types.h b/drivers/gpu/drm/xe/xe_pmu_types.h
new file mode 100644
index 000000000000..f5ba4d56622c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pmu_types.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_PMU_TYPES_H_
+#define _XE_PMU_TYPES_H_
+
+#include <linux/perf_event.h>
+#include <linux/spinlock_types.h>
+
+#define XE_PMU_MAX_GT 2
+
+/**
+ * struct xe_pmu - PMU related data per Xe device
+ *
+ * Stores per device PMU info that includes event/perf attributes and sampling
+ * counters across all GTs for this device.
+ */
+struct xe_pmu {
+ /**
+ * @base: PMU base.
+ */
+ struct pmu base;
+ /**
+ * @registered: PMU is registered and not in the unregistering process.
+ */
+ bool registered;
+ /**
+ * @name: Name as registered with perf core.
+ */
+ const char *name;
+ /**
+ * @supported_events: Bitmap of supported events, indexed by event id
+ */
+ u64 supported_events;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index dc24baa84092..ffaf0d02dc7d 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -20,6 +20,7 @@
#include "xe_res_cursor.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
+#include "xe_svm.h"
#include "xe_trace.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
@@ -219,6 +220,20 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
}
/**
+ * xe_pt_clear() - Clear a page-table.
+ * @xe: xe device.
+ * @pt: The page-table.
+ *
+ * Clears page-table by setting to zero.
+ */
+void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt)
+{
+ struct iosys_map *map = &pt->bo->vmap;
+
+ xe_map_memset(xe, map, 0, 0, SZ_4K);
+}
+
+/**
* DOC: Pagetable building
*
* Below we use the term "page-table" for both page-directories, containing
@@ -593,6 +608,7 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
* range.
* @tile: The tile we're building for.
* @vma: The vma indicating the address range.
+ * @range: The range indicating the address range.
* @entries: Storage for the update entries used for connecting the tree to
* the main tree at commit time.
* @num_entries: On output contains the number of @entries used.
@@ -608,6 +624,7 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
*/
static int
xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
+ struct xe_svm_range *range,
struct xe_vm_pgtable_update *entries, u32 *num_entries)
{
struct xe_device *xe = tile_to_xe(tile);
@@ -625,14 +642,43 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
.vm = xe_vma_vm(vma),
.tile = tile,
.curs = &curs,
- .va_curs_start = xe_vma_start(vma),
+ .va_curs_start = range ? range->base.itree.start :
+ xe_vma_start(vma),
.vma = vma,
.wupd.entries = entries,
- .needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem,
};
struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
int ret;
+ if (range) {
+ /* Move this entire thing to xe_svm.c? */
+ xe_svm_notifier_lock(xe_vma_vm(vma));
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "BIND PREPARE - RETRY");
+ xe_svm_notifier_unlock(xe_vma_vm(vma));
+ return -EAGAIN;
+ }
+ if (xe_svm_range_has_dma_mapping(range)) {
+ xe_res_first_dma(range->base.dma_addr, 0,
+ range->base.itree.last + 1 - range->base.itree.start,
+ &curs);
+ is_devmem = xe_res_is_vram(&curs);
+ if (is_devmem)
+ xe_svm_range_debug(range, "BIND PREPARE - DMA VRAM");
+ else
+ xe_svm_range_debug(range, "BIND PREPARE - DMA");
+ } else {
+ xe_assert(xe, false);
+ }
+ /*
+ * Note, when unlocking the resource cursor dma addresses may become
+ * stale, but the bind will be aborted anyway at commit time.
+ */
+ xe_svm_notifier_unlock(xe_vma_vm(vma));
+ }
+
+ xe_walk.needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem;
+
/**
* Default atomic expectations for different allocation scenarios are as follows:
*
@@ -654,7 +700,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
* gets migrated to LMEM, bind such allocations with
* device atomics enabled.
*/
- else if (is_devmem && !xe_bo_has_single_placement(bo))
+ else if (is_devmem)
xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
} else {
xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
@@ -670,15 +716,16 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
if (is_devmem) {
xe_walk.default_pte |= XE_PPGTT_PTE_DM;
- xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource);
+ xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0;
}
if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo))
xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo));
- xe_bo_assert_held(bo);
+ if (!range)
+ xe_bo_assert_held(bo);
- if (!xe_vma_is_null(vma)) {
+ if (!xe_vma_is_null(vma) && !range) {
if (xe_vma_is_userptr(vma))
xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
xe_vma_size(vma), &curs);
@@ -688,12 +735,14 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
else
xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs);
- } else {
+ } else if (!range) {
curs.size = xe_vma_size(vma);
}
- ret = xe_pt_walk_range(&pt->base, pt->level, xe_vma_start(vma),
- xe_vma_end(vma), &xe_walk.base);
+ ret = xe_pt_walk_range(&pt->base, pt->level,
+ range ? range->base.itree.start : xe_vma_start(vma),
+ range ? range->base.itree.last + 1 : xe_vma_end(vma),
+ &xe_walk.base);
*num_entries = xe_walk.wupd.num_used_entries;
return ret;
@@ -837,6 +886,46 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
return xe_walk.needs_invalidate;
}
+/**
+ * xe_pt_zap_ptes_range() - Zap (zero) gpu ptes of a SVM range
+ * @tile: The tile we're zapping for.
+ * @vm: The VM we're zapping for.
+ * @range: The SVM range we're zapping for.
+ *
+ * SVM invalidation needs to be able to zap the gpu ptes of a given address
+ * range. In order to be able to do that, that function needs access to the
+ * shared page-table entries so it can either clear the leaf PTEs or
+ * clear the pointers to lower-level page-tables. The caller is required
+ * to hold the SVM notifier lock.
+ *
+ * Return: Whether ptes were actually updated and a TLB invalidation is
+ * required.
+ */
+bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
+ struct xe_svm_range *range)
+{
+ struct xe_pt_zap_ptes_walk xe_walk = {
+ .base = {
+ .ops = &xe_pt_zap_ptes_ops,
+ .shifts = xe_normal_pt_shifts,
+ .max_level = XE_PT_HIGHEST_LEVEL,
+ },
+ .tile = tile,
+ };
+ struct xe_pt *pt = vm->pt_root[tile->id];
+ u8 pt_mask = (range->tile_present & ~range->tile_invalidated);
+
+ xe_svm_assert_in_notifier(vm);
+
+ if (!(pt_mask & BIT(tile->id)))
+ return false;
+
+ (void)xe_pt_walk_shared(&pt->base, pt->level, range->base.itree.start,
+ range->base.itree.last + 1, &xe_walk.base);
+
+ return xe_walk.needs_invalidate;
+}
+
static void
xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *tile,
struct iosys_map *map, void *data,
@@ -880,13 +969,19 @@ static void xe_pt_cancel_bind(struct xe_vma *vma,
}
}
+#define XE_INVALID_VMA ((struct xe_vma *)(0xdeaddeadull))
+
static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
{
- struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_vm *vm;
+
+ if (vma == XE_INVALID_VMA)
+ return;
+ vm = xe_vma_vm(vma);
lockdep_assert_held(&vm->lock);
- if (!xe_vma_is_userptr(vma) && !xe_vma_is_null(vma))
+ if (!xe_vma_has_no_bo(vma))
dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
xe_vm_assert_held(vm);
@@ -894,8 +989,12 @@ static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
static void xe_pt_commit_locks_assert(struct xe_vma *vma)
{
- struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_vm *vm;
+
+ if (vma == XE_INVALID_VMA)
+ return;
+ vm = xe_vma_vm(vma);
xe_pt_commit_prepare_locks_assert(vma);
if (xe_vma_is_userptr(vma))
@@ -923,7 +1022,8 @@ static void xe_pt_commit(struct xe_vma *vma,
int j_ = j + entries[i].ofs;
pt_dir->children[j_] = pt_dir->staging[j_];
- xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
+ xe_pt_destroy(oldpte, (vma == XE_INVALID_VMA) ? 0 :
+ xe_vma_vm(vma)->flags, deferred);
}
}
}
@@ -1002,12 +1102,13 @@ static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
+ struct xe_svm_range *range,
struct xe_vm_pgtable_update *entries, u32 *num_entries)
{
int err;
*num_entries = 0;
- err = xe_pt_stage_bind(tile, vma, entries, num_entries);
+ err = xe_pt_stage_bind(tile, vma, range, entries, num_entries);
if (!err)
xe_tile_assert(tile, *num_entries);
@@ -1090,6 +1191,11 @@ static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
{
int err = 0;
+ /*
+ * No need to check for is_cpu_addr_mirror here as vma_add_deps is a
+ * NOP if VMA is_cpu_addr_mirror
+ */
+
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
if (!op->map.immediate && xe_vm_in_fault_mode(vm))
@@ -1108,6 +1214,8 @@ static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
case DRM_GPUVA_OP_PREFETCH:
err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job);
break;
+ case DRM_GPUVA_OP_DRIVER:
+ break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
@@ -1312,6 +1420,40 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
return err;
}
+static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
+{
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vma_op *op;
+ int err;
+
+ err = xe_pt_pre_commit(pt_update);
+ if (err)
+ return err;
+
+ xe_svm_notifier_lock(vm);
+
+ list_for_each_entry(op, &vops->list, link) {
+ struct xe_svm_range *range = op->map_range.range;
+
+ if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
+ continue;
+
+ xe_svm_range_debug(range, "PRE-COMMIT");
+
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
+ xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
+
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+ xe_svm_notifier_unlock(vm);
+ return -EAGAIN;
+ }
+ }
+
+ return 0;
+}
+
struct invalidation_fence {
struct xe_gt_tlb_invalidation_fence base;
struct xe_gt *gt;
@@ -1497,7 +1639,9 @@ static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
* xe_pt_stage_unbind() - Build page-table update structures for an unbind
* operation
* @tile: The tile we're unbinding for.
+ * @vm: The vm
* @vma: The vma we're unbinding.
+ * @range: The range we're unbinding.
* @entries: Caller-provided storage for the update structures.
*
* Builds page-table update structures for an unbind operation. The function
@@ -1507,9 +1651,14 @@ static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
*
* Return: The number of entries used.
*/
-static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
+static unsigned int xe_pt_stage_unbind(struct xe_tile *tile,
+ struct xe_vm *vm,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
struct xe_vm_pgtable_update *entries)
{
+ u64 start = range ? range->base.itree.start : xe_vma_start(vma);
+ u64 end = range ? range->base.itree.last + 1 : xe_vma_end(vma);
struct xe_pt_stage_unbind_walk xe_walk = {
.base = {
.ops = &xe_pt_stage_unbind_ops,
@@ -1518,14 +1667,14 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
.staging = true,
},
.tile = tile,
- .modified_start = xe_vma_start(vma),
- .modified_end = xe_vma_end(vma),
+ .modified_start = start,
+ .modified_end = end,
.wupd.entries = entries,
};
- struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
+ struct xe_pt *pt = vm->pt_root[tile->id];
- (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma),
- xe_vma_end(vma), &xe_walk.base);
+ (void)xe_pt_walk_shared(&pt->base, pt->level, start, end,
+ &xe_walk.base);
return xe_walk.wupd.num_used_entries;
}
@@ -1605,12 +1754,12 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
static void
xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
- struct xe_vma *vma)
+ u64 start, u64 end)
{
+ u64 last;
u32 current_op = pt_update_ops->current_op;
struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
int i, level = 0;
- u64 start, last;
for (i = 0; i < pt_op->num_entries; i++) {
const struct xe_vm_pgtable_update *entry = &pt_op->entries[i];
@@ -1620,8 +1769,8 @@ xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
}
/* Greedy (non-optimal) calculation but simple */
- start = ALIGN_DOWN(xe_vma_start(vma), 0x1ull << xe_pt_shift(level));
- last = ALIGN(xe_vma_end(vma), 0x1ull << xe_pt_shift(level)) - 1;
+ start = ALIGN_DOWN(start, 0x1ull << xe_pt_shift(level));
+ last = ALIGN(end, 0x1ull << xe_pt_shift(level)) - 1;
if (start < pt_update_ops->start)
pt_update_ops->start = start;
@@ -1648,6 +1797,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
int err;
+ xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
xe_bo_assert_held(xe_vma_bo(vma));
vm_dbg(&xe_vma_vm(vma)->xe->drm,
@@ -1662,7 +1812,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
if (err)
return err;
- err = xe_pt_prepare_bind(tile, vma, pt_op->entries,
+ err = xe_pt_prepare_bind(tile, vma, NULL, pt_op->entries,
&pt_op->num_entries);
if (!err) {
xe_tile_assert(tile, pt_op->num_entries <=
@@ -1670,7 +1820,9 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
pt_op->num_entries, true);
- xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ xe_pt_update_ops_rfence_interval(pt_update_ops,
+ xe_vma_start(vma),
+ xe_vma_end(vma));
++pt_update_ops->current_op;
pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
@@ -1704,6 +1856,48 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
return err;
}
+static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma, struct xe_svm_range *range)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int err;
+
+ xe_tile_assert(tile, xe_vma_is_cpu_addr_mirror(vma));
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "Preparing bind, with range [%lx...%lx)\n",
+ range->base.itree.start, range->base.itree.last);
+
+ pt_op->vma = NULL;
+ pt_op->bind = true;
+ pt_op->rebind = BIT(tile->id) & range->tile_present;
+
+ err = xe_pt_prepare_bind(tile, vma, range, pt_op->entries,
+ &pt_op->num_entries);
+ if (!err) {
+ xe_tile_assert(tile, pt_op->num_entries <=
+ ARRAY_SIZE(pt_op->entries));
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, true);
+
+ xe_pt_update_ops_rfence_interval(pt_update_ops,
+ range->base.itree.start,
+ range->base.itree.last + 1);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_svm_lock = true;
+
+ pt_op->vma = vma;
+ xe_pt_commit_prepare_bind(vma, pt_op->entries,
+ pt_op->num_entries, pt_op->rebind);
+ } else {
+ xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries);
+ }
+
+ return err;
+}
+
static int unbind_op_prepare(struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma)
@@ -1715,19 +1909,13 @@ static int unbind_op_prepare(struct xe_tile *tile,
if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id)))
return 0;
+ xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
xe_bo_assert_held(xe_vma_bo(vma));
vm_dbg(&xe_vma_vm(vma)->xe->drm,
"Preparing unbind, with range [%llx...%llx)\n",
xe_vma_start(vma), xe_vma_end(vma) - 1);
- /*
- * Wait for invalidation to complete. Can corrupt internal page table
- * state if an invalidation is running while preparing an unbind.
- */
- if (xe_vma_is_userptr(vma) && xe_vm_in_fault_mode(xe_vma_vm(vma)))
- mmu_interval_read_begin(&to_userptr_vma(vma)->userptr.notifier);
-
pt_op->vma = vma;
pt_op->bind = false;
pt_op->rebind = false;
@@ -1736,11 +1924,13 @@ static int unbind_op_prepare(struct xe_tile *tile,
if (err)
return err;
- pt_op->num_entries = xe_pt_stage_unbind(tile, vma, pt_op->entries);
+ pt_op->num_entries = xe_pt_stage_unbind(tile, xe_vma_vm(vma),
+ vma, NULL, pt_op->entries);
xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
pt_op->num_entries, false);
- xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma),
+ xe_vma_end(vma));
++pt_update_ops->current_op;
pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
pt_update_ops->needs_invalidation = true;
@@ -1750,6 +1940,42 @@ static int unbind_op_prepare(struct xe_tile *tile,
return 0;
}
+static int unbind_range_prepare(struct xe_vm *vm,
+ struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_svm_range *range)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+
+ if (!(range->tile_present & BIT(tile->id)))
+ return 0;
+
+ vm_dbg(&vm->xe->drm,
+ "Preparing unbind, with range [%lx...%lx)\n",
+ range->base.itree.start, range->base.itree.last);
+
+ pt_op->vma = XE_INVALID_VMA;
+ pt_op->bind = false;
+ pt_op->rebind = false;
+
+ pt_op->num_entries = xe_pt_stage_unbind(tile, vm, NULL, range,
+ pt_op->entries);
+
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, false);
+ xe_pt_update_ops_rfence_interval(pt_update_ops, range->base.itree.start,
+ range->base.itree.last + 1);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_svm_lock = true;
+ pt_update_ops->needs_invalidation = true;
+
+ xe_pt_commit_prepare_unbind(XE_INVALID_VMA, pt_op->entries,
+ pt_op->num_entries);
+
+ return 0;
+}
+
static int op_prepare(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -1761,15 +1987,21 @@ static int op_prepare(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
+ op->map.is_cpu_addr_mirror)
break;
err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma);
pt_update_ops->wait_vm_kernel = true;
break;
case DRM_GPUVA_OP_REMAP:
- err = unbind_op_prepare(tile, pt_update_ops,
- gpuva_to_vma(op->base.remap.unmap->va));
+ {
+ struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va);
+
+ if (xe_vma_is_cpu_addr_mirror(old))
+ break;
+
+ err = unbind_op_prepare(tile, pt_update_ops, old);
if (!err && op->remap.prev) {
err = bind_op_prepare(vm, tile, pt_update_ops,
@@ -1782,15 +2014,40 @@ static int op_prepare(struct xe_vm *vm,
pt_update_ops->wait_vm_bookkeep = true;
}
break;
+ }
case DRM_GPUVA_OP_UNMAP:
- err = unbind_op_prepare(tile, pt_update_ops,
- gpuva_to_vma(op->base.unmap.va));
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ break;
+
+ err = unbind_op_prepare(tile, pt_update_ops, vma);
break;
+ }
case DRM_GPUVA_OP_PREFETCH:
- err = bind_op_prepare(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.prefetch.va));
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ break;
+
+ err = bind_op_prepare(vm, tile, pt_update_ops, vma);
pt_update_ops->wait_vm_kernel = true;
break;
+ }
+ case DRM_GPUVA_OP_DRIVER:
+ if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
+
+ err = bind_range_prepare(vm, tile, pt_update_ops,
+ op->map_range.vma,
+ op->map_range.range);
+ } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
+ err = unbind_range_prepare(vm, tile, pt_update_ops,
+ op->unmap_range.range);
+ }
+ break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
@@ -1860,6 +2117,8 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vma *vma, struct dma_fence *fence,
struct dma_fence *fence2)
{
+ xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
+
if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
pt_update_ops->wait_vm_bookkeep ?
@@ -1893,6 +2152,8 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vma *vma, struct dma_fence *fence,
struct dma_fence *fence2)
{
+ xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
+
if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
pt_update_ops->wait_vm_bookkeep ?
@@ -1927,16 +2188,21 @@ static void op_commit(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
+ op->map.is_cpu_addr_mirror)
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
fence2);
break;
case DRM_GPUVA_OP_REMAP:
- unbind_op_commit(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.remap.unmap->va), fence,
- fence2);
+ {
+ struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va);
+
+ if (xe_vma_is_cpu_addr_mirror(old))
+ break;
+
+ unbind_op_commit(vm, tile, pt_update_ops, old, fence, fence2);
if (op->remap.prev)
bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
@@ -1945,14 +2211,35 @@ static void op_commit(struct xe_vm *vm,
bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
fence, fence2);
break;
+ }
case DRM_GPUVA_OP_UNMAP:
- unbind_op_commit(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.unmap.va), fence, fence2);
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ unbind_op_commit(vm, tile, pt_update_ops, vma, fence,
+ fence2);
break;
+ }
case DRM_GPUVA_OP_PREFETCH:
- bind_op_commit(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.prefetch.va), fence, fence2);
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ bind_op_commit(vm, tile, pt_update_ops, vma, fence,
+ fence2);
+ break;
+ }
+ case DRM_GPUVA_OP_DRIVER:
+ {
+ if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
+ op->map_range.range->tile_present |= BIT(tile->id);
+ op->map_range.range->tile_invalidated &= ~BIT(tile->id);
+ } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
+ op->unmap_range.range->tile_present &= ~BIT(tile->id);
+ }
break;
+ }
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
@@ -1970,6 +2257,12 @@ static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
.pre_commit = xe_pt_userptr_pre_commit,
};
+static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
+ .populate = xe_vm_populate_pgtable,
+ .clear = xe_migrate_clear_pgtable_callback,
+ .pre_commit = xe_pt_svm_pre_commit,
+};
+
/**
* xe_pt_update_ops_run() - Run PT update operations
* @tile: Tile of PT update operations
@@ -1995,7 +2288,9 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
struct xe_vma_op *op;
int err = 0, i;
struct xe_migrate_pt_update update = {
- .ops = pt_update_ops->needs_userptr_lock ?
+ .ops = pt_update_ops->needs_svm_lock ?
+ &svm_migrate_ops :
+ pt_update_ops->needs_userptr_lock ?
&userptr_migrate_ops :
&migrate_ops,
.vops = vops,
@@ -2116,6 +2411,8 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
&ifence->base.base, &mfence->base.base);
}
+ if (pt_update_ops->needs_svm_lock)
+ xe_svm_notifier_unlock(vm);
if (pt_update_ops->needs_userptr_lock)
up_read(&vm->userptr.notifier_lock);
diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
index 9ab386431cad..5ecf003d513c 100644
--- a/drivers/gpu/drm/xe/xe_pt.h
+++ b/drivers/gpu/drm/xe/xe_pt.h
@@ -13,6 +13,7 @@ struct dma_fence;
struct xe_bo;
struct xe_device;
struct xe_exec_queue;
+struct xe_svm_range;
struct xe_sync_entry;
struct xe_tile;
struct xe_vm;
@@ -35,6 +36,8 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
+void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt);
+
int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops);
struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile,
struct xe_vma_ops *vops);
@@ -42,5 +45,7 @@ void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops);
void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops);
bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
+bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
+ struct xe_svm_range *range);
#endif
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index 384cc04de719..69eab6f37cfe 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -104,6 +104,8 @@ struct xe_vm_pgtable_update_ops {
u32 num_ops;
/** @current_op: current operations */
u32 current_op;
+ /** @needs_svm_lock: Needs SVM lock */
+ bool needs_svm_lock;
/** @needs_userptr_lock: Needs userptr lock */
bool needs_userptr_lock;
/** @needs_invalidation: Needs invalidation */
diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c
new file mode 100644
index 000000000000..454ea7dc08ac
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp.c
@@ -0,0 +1,919 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2024 Intel Corporation.
+ */
+
+#include "xe_pxp.h"
+
+#include <drm/drm_managed.h>
+#include <uapi/drm/xe_drm.h>
+
+#include "xe_bo.h"
+#include "xe_bo_types.h"
+#include "xe_device_types.h"
+#include "xe_exec_queue.h"
+#include "xe_force_wake.h"
+#include "xe_guc_submit.h"
+#include "xe_gsc_proxy.h"
+#include "xe_gt.h"
+#include "xe_gt_types.h"
+#include "xe_huc.h"
+#include "xe_mmio.h"
+#include "xe_pm.h"
+#include "xe_pxp_submit.h"
+#include "xe_pxp_types.h"
+#include "xe_uc_fw.h"
+#include "regs/xe_irq_regs.h"
+#include "regs/xe_pxp_regs.h"
+
+/**
+ * DOC: PXP
+ *
+ * PXP (Protected Xe Path) allows execution and flip to display of protected
+ * (i.e. encrypted) objects. This feature is currently only supported in
+ * integrated parts.
+ */
+
+#define ARB_SESSION DRM_XE_PXP_HWDRM_DEFAULT_SESSION /* shorter define */
+
+/*
+ * A submission to GSC can take up to 250ms to complete, so use a 300ms
+ * timeout for activation where only one of those is involved. Termination
+ * additionally requires a submission to VCS and an interaction with KCR, so
+ * bump the timeout to 500ms for that.
+ */
+#define PXP_ACTIVATION_TIMEOUT_MS 300
+#define PXP_TERMINATION_TIMEOUT_MS 500
+
+bool xe_pxp_is_supported(const struct xe_device *xe)
+{
+ return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY);
+}
+
+bool xe_pxp_is_enabled(const struct xe_pxp *pxp)
+{
+ return pxp;
+}
+
+static bool pxp_prerequisites_done(const struct xe_pxp *pxp)
+{
+ struct xe_gt *gt = pxp->gt;
+ unsigned int fw_ref;
+ bool ready;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+
+ /*
+ * If force_wake fails we could falsely report the prerequisites as not
+ * done even if they are; the consequence of this would be that the
+ * callers won't go ahead with using PXP, but if force_wake doesn't work
+ * the GT is very likely in a bad state so not really a problem to abort
+ * PXP. Therefore, we can just log the force_wake error and not escalate
+ * it.
+ */
+ XE_WARN_ON(!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL));
+
+ /* PXP requires both HuC authentication via GSC and GSC proxy initialized */
+ ready = xe_huc_is_authenticated(&gt->uc.huc, XE_HUC_AUTH_VIA_GSC) &&
+ xe_gsc_proxy_init_done(&gt->uc.gsc);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ return ready;
+}
+
+/**
+ * xe_pxp_get_readiness_status - check whether PXP is ready for userspace use
+ * @pxp: the xe_pxp pointer (can be NULL if PXP is disabled)
+ *
+ * Returns: 0 if PXP is not ready yet, 1 if it is ready, a negative errno value
+ * if PXP is not supported/enabled or if something went wrong in the
+ * initialization of the prerequisites. Note that the return values of this
+ * function follow the uapi (see drm_xe_query_pxp_status), so they can be used
+ * directly in the query ioctl.
+ */
+int xe_pxp_get_readiness_status(struct xe_pxp *pxp)
+{
+ int ret = 0;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ /* if the GSC or HuC FW are in an error state, PXP will never work */
+ if (xe_uc_fw_status_to_error(pxp->gt->uc.huc.fw.status) ||
+ xe_uc_fw_status_to_error(pxp->gt->uc.gsc.fw.status))
+ return -EIO;
+
+ xe_pm_runtime_get(pxp->xe);
+
+ /* PXP requires both HuC loaded and GSC proxy initialized */
+ if (pxp_prerequisites_done(pxp))
+ ret = 1;
+
+ xe_pm_runtime_put(pxp->xe);
+ return ret;
+}
+
+static bool pxp_session_is_in_play(struct xe_pxp *pxp, u32 id)
+{
+ struct xe_gt *gt = pxp->gt;
+
+ return xe_mmio_read32(&gt->mmio, KCR_SIP) & BIT(id);
+}
+
+static int pxp_wait_for_session_state(struct xe_pxp *pxp, u32 id, bool in_play)
+{
+ struct xe_gt *gt = pxp->gt;
+ u32 mask = BIT(id);
+
+ return xe_mmio_wait32(&gt->mmio, KCR_SIP, mask, in_play ? mask : 0,
+ 250, NULL, false);
+}
+
+static void pxp_invalidate_queues(struct xe_pxp *pxp);
+
+static int pxp_terminate_hw(struct xe_pxp *pxp)
+{
+ struct xe_gt *gt = pxp->gt;
+ unsigned int fw_ref;
+ int ret = 0;
+
+ drm_dbg(&pxp->xe->drm, "Terminating PXP\n");
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* terminate the hw session */
+ ret = xe_pxp_submit_session_termination(pxp, ARB_SESSION);
+ if (ret)
+ goto out;
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false);
+ if (ret)
+ goto out;
+
+ /* Trigger full HW cleanup */
+ xe_mmio_write32(&gt->mmio, KCR_GLOBAL_TERMINATE, 1);
+
+ /* now we can tell the GSC to clean up its own state */
+ ret = xe_pxp_submit_session_invalidation(&pxp->gsc_res, ARB_SESSION);
+
+out:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return ret;
+}
+
+static void mark_termination_in_progress(struct xe_pxp *pxp)
+{
+ lockdep_assert_held(&pxp->mutex);
+
+ reinit_completion(&pxp->termination);
+ pxp->status = XE_PXP_TERMINATION_IN_PROGRESS;
+}
+
+static void pxp_terminate(struct xe_pxp *pxp)
+{
+ int ret = 0;
+ struct xe_device *xe = pxp->xe;
+
+ if (!wait_for_completion_timeout(&pxp->activation,
+ msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
+ drm_err(&xe->drm, "failed to wait for PXP start before termination\n");
+
+ mutex_lock(&pxp->mutex);
+
+ if (pxp->status == XE_PXP_ACTIVE)
+ pxp->key_instance++;
+
+ /*
+ * we'll mark the status as needing termination on resume, so no need to
+ * emit a termination now.
+ */
+ if (pxp->status == XE_PXP_SUSPENDED) {
+ mutex_unlock(&pxp->mutex);
+ return;
+ }
+
+ /*
+ * If we have a termination already in progress, we need to wait for
+ * it to complete before queueing another one. Once the first
+ * termination is completed we'll set the state back to
+ * NEEDS_TERMINATION and leave it to the pxp start code to issue it.
+ */
+ if (pxp->status == XE_PXP_TERMINATION_IN_PROGRESS) {
+ pxp->status = XE_PXP_NEEDS_ADDITIONAL_TERMINATION;
+ mutex_unlock(&pxp->mutex);
+ return;
+ }
+
+ mark_termination_in_progress(pxp);
+
+ mutex_unlock(&pxp->mutex);
+
+ pxp_invalidate_queues(pxp);
+
+ ret = pxp_terminate_hw(pxp);
+ if (ret) {
+ drm_err(&xe->drm, "PXP termination failed: %pe\n", ERR_PTR(ret));
+ mutex_lock(&pxp->mutex);
+ pxp->status = XE_PXP_ERROR;
+ complete_all(&pxp->termination);
+ mutex_unlock(&pxp->mutex);
+ }
+}
+
+static void pxp_terminate_complete(struct xe_pxp *pxp)
+{
+ /*
+ * We expect PXP to be in one of 3 states when we get here:
+ * - XE_PXP_TERMINATION_IN_PROGRESS: a single termination event was
+ * requested and it is now completing, so we're ready to start.
+ * - XE_PXP_NEEDS_ADDITIONAL_TERMINATION: a second termination was
+ * requested while the first one was still being processed.
+ * - XE_PXP_SUSPENDED: PXP is now suspended, so we defer everything to
+ * when we come back on resume.
+ */
+ mutex_lock(&pxp->mutex);
+
+ switch (pxp->status) {
+ case XE_PXP_TERMINATION_IN_PROGRESS:
+ pxp->status = XE_PXP_READY_TO_START;
+ break;
+ case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
+ pxp->status = XE_PXP_NEEDS_TERMINATION;
+ break;
+ case XE_PXP_SUSPENDED:
+ /* Nothing to do */
+ break;
+ default:
+ drm_err(&pxp->xe->drm,
+ "PXP termination complete while status was %u\n",
+ pxp->status);
+ }
+
+ complete_all(&pxp->termination);
+
+ mutex_unlock(&pxp->mutex);
+}
+
+static void pxp_irq_work(struct work_struct *work)
+{
+ struct xe_pxp *pxp = container_of(work, typeof(*pxp), irq.work);
+ struct xe_device *xe = pxp->xe;
+ u32 events = 0;
+
+ spin_lock_irq(&xe->irq.lock);
+ events = pxp->irq.events;
+ pxp->irq.events = 0;
+ spin_unlock_irq(&xe->irq.lock);
+
+ if (!events)
+ return;
+
+ /*
+ * If we're processing a termination irq while suspending then don't
+ * bother, we're going to re-init everything on resume anyway.
+ */
+ if ((events & PXP_TERMINATION_REQUEST) && !xe_pm_runtime_get_if_active(xe))
+ return;
+
+ if (events & PXP_TERMINATION_REQUEST) {
+ events &= ~PXP_TERMINATION_COMPLETE;
+ pxp_terminate(pxp);
+ }
+
+ if (events & PXP_TERMINATION_COMPLETE)
+ pxp_terminate_complete(pxp);
+
+ if (events & PXP_TERMINATION_REQUEST)
+ xe_pm_runtime_put(xe);
+}
+
+/**
+ * xe_pxp_irq_handler - Handles PXP interrupts.
+ * @xe: the xe_device structure
+ * @iir: interrupt vector
+ */
+void xe_pxp_irq_handler(struct xe_device *xe, u16 iir)
+{
+ struct xe_pxp *pxp = xe->pxp;
+
+ if (!xe_pxp_is_enabled(pxp)) {
+ drm_err(&xe->drm, "PXP irq 0x%x received with PXP disabled!\n", iir);
+ return;
+ }
+
+ lockdep_assert_held(&xe->irq.lock);
+
+ if (unlikely(!iir))
+ return;
+
+ if (iir & (KCR_PXP_STATE_TERMINATED_INTERRUPT |
+ KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT))
+ pxp->irq.events |= PXP_TERMINATION_REQUEST;
+
+ if (iir & KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT)
+ pxp->irq.events |= PXP_TERMINATION_COMPLETE;
+
+ if (pxp->irq.events)
+ queue_work(pxp->irq.wq, &pxp->irq.work);
+}
+
+static int kcr_pxp_set_status(const struct xe_pxp *pxp, bool enable)
+{
+ u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
+ _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
+ unsigned int fw_ref;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(pxp->gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT))
+ return -EIO;
+
+ xe_mmio_write32(&pxp->gt->mmio, KCR_INIT, val);
+ xe_force_wake_put(gt_to_fw(pxp->gt), fw_ref);
+
+ return 0;
+}
+
+static int kcr_pxp_enable(const struct xe_pxp *pxp)
+{
+ return kcr_pxp_set_status(pxp, true);
+}
+
+static int kcr_pxp_disable(const struct xe_pxp *pxp)
+{
+ return kcr_pxp_set_status(pxp, false);
+}
+
+static void pxp_fini(void *arg)
+{
+ struct xe_pxp *pxp = arg;
+
+ destroy_workqueue(pxp->irq.wq);
+ xe_pxp_destroy_execution_resources(pxp);
+
+ /* no need to explicitly disable KCR since we're going to do an FLR */
+}
+
+/**
+ * xe_pxp_init - initialize PXP support
+ * @xe: the xe_device structure
+ *
+ * Initialize the HW state and allocate the objects required for PXP support.
+ * Note that some of the requirement for PXP support (GSC proxy init, HuC auth)
+ * are performed asynchronously as part of the GSC init. PXP can only be used
+ * after both this function and the async worker have completed.
+ *
+ * Returns 0 if PXP is not supported or if PXP initialization is successful,
+ * other errno value if there is an error during the init.
+ */
+int xe_pxp_init(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe->tiles[0].media_gt;
+ struct xe_pxp *pxp;
+ int err;
+
+ if (!xe_pxp_is_supported(xe))
+ return 0;
+
+ /* we only support PXP on single tile devices with a media GT */
+ if (xe->info.tile_count > 1 || !gt)
+ return 0;
+
+ /* The GSCCS is required for submissions to the GSC FW */
+ if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
+ return 0;
+
+ /* PXP requires both GSC and HuC firmwares to be available */
+ if (!xe_uc_fw_is_loadable(&gt->uc.gsc.fw) ||
+ !xe_uc_fw_is_loadable(&gt->uc.huc.fw)) {
+ drm_info(&xe->drm, "skipping PXP init due to missing FW dependencies");
+ return 0;
+ }
+
+ pxp = drmm_kzalloc(&xe->drm, sizeof(struct xe_pxp), GFP_KERNEL);
+ if (!pxp) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&pxp->queues.list);
+ spin_lock_init(&pxp->queues.lock);
+ INIT_WORK(&pxp->irq.work, pxp_irq_work);
+ pxp->xe = xe;
+ pxp->gt = gt;
+
+ pxp->key_instance = 1;
+ pxp->last_suspend_key_instance = 1;
+
+ /*
+ * we'll use the completions to check if there is an action pending,
+ * so we start them as completed and we reinit it when an action is
+ * triggered.
+ */
+ init_completion(&pxp->activation);
+ init_completion(&pxp->termination);
+ complete_all(&pxp->termination);
+ complete_all(&pxp->activation);
+
+ mutex_init(&pxp->mutex);
+
+ pxp->irq.wq = alloc_ordered_workqueue("pxp-wq", 0);
+ if (!pxp->irq.wq) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = kcr_pxp_enable(pxp);
+ if (err)
+ goto out_wq;
+
+ err = xe_pxp_allocate_execution_resources(pxp);
+ if (err)
+ goto out_kcr_disable;
+
+ xe->pxp = pxp;
+
+ return devm_add_action_or_reset(xe->drm.dev, pxp_fini, pxp);
+
+out_kcr_disable:
+ kcr_pxp_disable(pxp);
+out_wq:
+ destroy_workqueue(pxp->irq.wq);
+out_free:
+ drmm_kfree(&xe->drm, pxp);
+out:
+ drm_err(&xe->drm, "PXP initialization failed: %pe\n", ERR_PTR(err));
+ return err;
+}
+
+static int __pxp_start_arb_session(struct xe_pxp *pxp)
+{
+ int ret;
+ unsigned int fw_ref;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(pxp->gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT))
+ return -EIO;
+
+ if (pxp_session_is_in_play(pxp, ARB_SESSION)) {
+ ret = -EEXIST;
+ goto out_force_wake;
+ }
+
+ ret = xe_pxp_submit_session_init(&pxp->gsc_res, ARB_SESSION);
+ if (ret) {
+ drm_err(&pxp->xe->drm, "Failed to init PXP arb session: %pe\n", ERR_PTR(ret));
+ goto out_force_wake;
+ }
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true);
+ if (ret) {
+ drm_err(&pxp->xe->drm, "PXP ARB session failed to go in play%pe\n", ERR_PTR(ret));
+ goto out_force_wake;
+ }
+
+ drm_dbg(&pxp->xe->drm, "PXP ARB session is active\n");
+
+out_force_wake:
+ xe_force_wake_put(gt_to_fw(pxp->gt), fw_ref);
+ return ret;
+}
+
+/**
+ * xe_pxp_exec_queue_set_type - Mark a queue as using PXP
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @q: the queue to mark as using PXP
+ * @type: the type of PXP session this queue will use
+ *
+ * Returns 0 if the selected PXP type is supported, -ENODEV otherwise.
+ */
+int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type)
+{
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ /* we only support HWDRM sessions right now */
+ xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
+
+ q->pxp.type = type;
+
+ return 0;
+}
+
+static void __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+{
+ spin_lock_irq(&pxp->queues.lock);
+ list_add_tail(&q->pxp.link, &pxp->queues.list);
+ spin_unlock_irq(&pxp->queues.lock);
+}
+
+/**
+ * xe_pxp_exec_queue_add - add a queue to the PXP list
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @q: the queue to add to the list
+ *
+ * If PXP is enabled and the prerequisites are done, start the PXP ARB
+ * session (if not already running) and add the queue to the PXP list. Note
+ * that the queue must have previously been marked as using PXP with
+ * xe_pxp_exec_queue_set_type.
+ *
+ * Returns 0 if the PXP ARB session is running and the queue is in the list,
+ * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
+ * other errno value if something goes wrong during the session start.
+ */
+int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+{
+ int ret = 0;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ /* we only support HWDRM sessions right now */
+ xe_assert(pxp->xe, q->pxp.type == DRM_XE_PXP_TYPE_HWDRM);
+
+ /*
+ * Runtime suspend kills PXP, so we take a reference to prevent it from
+ * happening while we have active queues that use PXP
+ */
+ xe_pm_runtime_get(pxp->xe);
+
+ if (!pxp_prerequisites_done(pxp)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+wait_for_idle:
+ /*
+ * if there is an action in progress, wait for it. We need to wait
+ * outside the lock because the completion is done from within the lock.
+ * Note that the two action should never be pending at the same time.
+ */
+ if (!wait_for_completion_timeout(&pxp->termination,
+ msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS))) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&pxp->activation,
+ msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ mutex_lock(&pxp->mutex);
+
+ /* If PXP is not already active, turn it on */
+ switch (pxp->status) {
+ case XE_PXP_ERROR:
+ ret = -EIO;
+ break;
+ case XE_PXP_ACTIVE:
+ __exec_queue_add(pxp, q);
+ mutex_unlock(&pxp->mutex);
+ goto out;
+ case XE_PXP_READY_TO_START:
+ pxp->status = XE_PXP_START_IN_PROGRESS;
+ reinit_completion(&pxp->activation);
+ break;
+ case XE_PXP_START_IN_PROGRESS:
+ /* If a start is in progress then the completion must not be done */
+ XE_WARN_ON(completion_done(&pxp->activation));
+ mutex_unlock(&pxp->mutex);
+ goto wait_for_idle;
+ case XE_PXP_NEEDS_TERMINATION:
+ mark_termination_in_progress(pxp);
+ break;
+ case XE_PXP_TERMINATION_IN_PROGRESS:
+ case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
+ /* If a termination is in progress then the completion must not be done */
+ XE_WARN_ON(completion_done(&pxp->termination));
+ mutex_unlock(&pxp->mutex);
+ goto wait_for_idle;
+ case XE_PXP_SUSPENDED:
+ default:
+ drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n", pxp->status);
+ ret = -EIO;
+ break;
+ }
+
+ mutex_unlock(&pxp->mutex);
+
+ if (ret)
+ goto out;
+
+ if (!completion_done(&pxp->termination)) {
+ ret = pxp_terminate_hw(pxp);
+ if (ret) {
+ drm_err(&pxp->xe->drm, "PXP termination failed before start\n");
+ mutex_lock(&pxp->mutex);
+ pxp->status = XE_PXP_ERROR;
+ mutex_unlock(&pxp->mutex);
+
+ goto out;
+ }
+
+ goto wait_for_idle;
+ }
+
+ /* All the cases except for start should have exited earlier */
+ XE_WARN_ON(completion_done(&pxp->activation));
+ ret = __pxp_start_arb_session(pxp);
+
+ mutex_lock(&pxp->mutex);
+
+ complete_all(&pxp->activation);
+
+ /*
+ * Any other process should wait until the state goes away from
+ * XE_PXP_START_IN_PROGRESS, so if the state is not that something went
+ * wrong. Mark the status as needing termination and try again.
+ */
+ if (pxp->status != XE_PXP_START_IN_PROGRESS) {
+ drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n", pxp->status);
+ pxp->status = XE_PXP_NEEDS_TERMINATION;
+ mutex_unlock(&pxp->mutex);
+ goto wait_for_idle;
+ }
+
+ /* If everything went ok, update the status and add the queue to the list */
+ if (!ret) {
+ pxp->status = XE_PXP_ACTIVE;
+ __exec_queue_add(pxp, q);
+ } else {
+ pxp->status = XE_PXP_ERROR;
+ }
+
+ mutex_unlock(&pxp->mutex);
+
+out:
+ /*
+ * in the successful case the PM ref is released from
+ * xe_pxp_exec_queue_remove
+ */
+ if (ret)
+ xe_pm_runtime_put(pxp->xe);
+
+ return ret;
+}
+
+static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock)
+{
+ bool need_pm_put = false;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return;
+
+ if (lock)
+ spin_lock_irq(&pxp->queues.lock);
+
+ if (!list_empty(&q->pxp.link)) {
+ list_del_init(&q->pxp.link);
+ need_pm_put = true;
+ }
+
+ q->pxp.type = DRM_XE_PXP_TYPE_NONE;
+
+ if (lock)
+ spin_unlock_irq(&pxp->queues.lock);
+
+ if (need_pm_put)
+ xe_pm_runtime_put(pxp->xe);
+}
+
+/**
+ * xe_pxp_exec_queue_remove - remove a queue from the PXP list
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @q: the queue to remove from the list
+ *
+ * If PXP is enabled and the exec_queue is in the list, the queue will be
+ * removed from the list and its PM reference will be released. It is safe to
+ * call this function multiple times for the same queue.
+ */
+void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
+{
+ __pxp_exec_queue_remove(pxp, q, true);
+}
+
+static void pxp_invalidate_queues(struct xe_pxp *pxp)
+{
+ struct xe_exec_queue *tmp, *q;
+ LIST_HEAD(to_clean);
+
+ spin_lock_irq(&pxp->queues.lock);
+
+ list_for_each_entry_safe(q, tmp, &pxp->queues.list, pxp.link) {
+ q = xe_exec_queue_get_unless_zero(q);
+ if (!q)
+ continue;
+
+ list_move_tail(&q->pxp.link, &to_clean);
+ }
+ spin_unlock_irq(&pxp->queues.lock);
+
+ list_for_each_entry_safe(q, tmp, &to_clean, pxp.link) {
+ xe_exec_queue_kill(q);
+
+ /*
+ * We hold a ref to the queue so there is no risk of racing with
+ * the calls to exec_queue_remove coming from exec_queue_destroy.
+ */
+ __pxp_exec_queue_remove(pxp, q, false);
+
+ xe_exec_queue_put(q);
+ }
+}
+
+/**
+ * xe_pxp_key_assign - mark a BO as using the current PXP key iteration
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @bo: the BO to mark
+ *
+ * Returns: -ENODEV if PXP is disabled, 0 otherwise.
+ */
+int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo)
+{
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ xe_assert(pxp->xe, !bo->pxp_key_instance);
+
+ /*
+ * Note that the PXP key handling is inherently racey, because the key
+ * can theoretically change at any time (although it's unlikely to do
+ * so without triggers), even right after we copy it. Taking a lock
+ * wouldn't help because the value might still change as soon as we
+ * release the lock.
+ * Userspace needs to handle the fact that their BOs can go invalid at
+ * any point.
+ */
+ bo->pxp_key_instance = pxp->key_instance;
+
+ return 0;
+}
+
+/**
+ * xe_pxp_bo_key_check - check if the key used by a xe_bo is valid
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @bo: the BO we want to check
+ *
+ * Checks whether a BO was encrypted with the current key or an obsolete one.
+ *
+ * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the
+ * BO is not using PXP, -ENOEXEC if the key is not valid.
+ */
+int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo)
+{
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ if (!xe_bo_is_protected(bo))
+ return -EINVAL;
+
+ xe_assert(pxp->xe, bo->pxp_key_instance);
+
+ /*
+ * Note that the PXP key handling is inherently racey, because the key
+ * can theoretically change at any time (although it's unlikely to do
+ * so without triggers), even right after we check it. Taking a lock
+ * wouldn't help because the value might still change as soon as we
+ * release the lock.
+ * We mitigate the risk by checking the key at multiple points (on each
+ * submission involving the BO and right before flipping it on the
+ * display), but there is still a very small chance that we could
+ * operate on an invalid BO for a single submission or a single frame
+ * flip. This is a compromise made to protect the encrypted data (which
+ * is what the key termination is for).
+ */
+ if (bo->pxp_key_instance != pxp->key_instance)
+ return -ENOEXEC;
+
+ return 0;
+}
+
+/**
+ * xe_pxp_obj_key_check - check if the key used by a drm_gem_obj is valid
+ * @obj: the drm_gem_obj we want to check
+ *
+ * Checks whether a drm_gem_obj was encrypted with the current key or an
+ * obsolete one.
+ *
+ * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the
+ * obj is not using PXP, -ENOEXEC if the key is not valid.
+ */
+int xe_pxp_obj_key_check(struct drm_gem_object *obj)
+{
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+ struct xe_device *xe = xe_bo_device(bo);
+ struct xe_pxp *pxp = xe->pxp;
+
+ return xe_pxp_bo_key_check(pxp, bo);
+}
+
+/**
+ * xe_pxp_pm_suspend - prepare PXP for HW suspend
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ *
+ * Makes sure all PXP actions have completed and invalidates all PXP queues
+ * and objects before we go into a suspend state.
+ *
+ * Returns: 0 if successful, a negative errno value otherwise.
+ */
+int xe_pxp_pm_suspend(struct xe_pxp *pxp)
+{
+ bool needs_queue_inval = false;
+ int ret = 0;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return 0;
+
+wait_for_activation:
+ if (!wait_for_completion_timeout(&pxp->activation,
+ msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
+ ret = -ETIMEDOUT;
+
+ mutex_lock(&pxp->mutex);
+
+ switch (pxp->status) {
+ case XE_PXP_ERROR:
+ case XE_PXP_READY_TO_START:
+ case XE_PXP_SUSPENDED:
+ case XE_PXP_TERMINATION_IN_PROGRESS:
+ case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
+ /*
+ * If PXP is not running there is nothing to cleanup. If there
+ * is a termination pending then no need to issue another one.
+ */
+ break;
+ case XE_PXP_START_IN_PROGRESS:
+ mutex_unlock(&pxp->mutex);
+ goto wait_for_activation;
+ case XE_PXP_NEEDS_TERMINATION:
+ /* If PXP was never used we can skip the cleanup */
+ if (pxp->key_instance == pxp->last_suspend_key_instance)
+ break;
+ fallthrough;
+ case XE_PXP_ACTIVE:
+ pxp->key_instance++;
+ needs_queue_inval = true;
+ break;
+ default:
+ drm_err(&pxp->xe->drm, "unexpected state during PXP suspend: %u",
+ pxp->status);
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * We set this even if we were in error state, hoping the suspend clears
+ * the error. Worse case we fail again and go in error state again.
+ */
+ pxp->status = XE_PXP_SUSPENDED;
+
+ mutex_unlock(&pxp->mutex);
+
+ if (needs_queue_inval)
+ pxp_invalidate_queues(pxp);
+
+ /*
+ * if there is a termination in progress, wait for it.
+ * We need to wait outside the lock because the completion is done from
+ * within the lock
+ */
+ if (!wait_for_completion_timeout(&pxp->termination,
+ msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
+ ret = -ETIMEDOUT;
+
+ pxp->last_suspend_key_instance = pxp->key_instance;
+
+out:
+ return ret;
+}
+
+/**
+ * xe_pxp_pm_resume - re-init PXP after HW suspend
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ */
+void xe_pxp_pm_resume(struct xe_pxp *pxp)
+{
+ int err;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return;
+
+ err = kcr_pxp_enable(pxp);
+
+ mutex_lock(&pxp->mutex);
+
+ xe_assert(pxp->xe, pxp->status == XE_PXP_SUSPENDED);
+
+ if (err)
+ pxp->status = XE_PXP_ERROR;
+ else
+ pxp->status = XE_PXP_NEEDS_TERMINATION;
+
+ mutex_unlock(&pxp->mutex);
+}
diff --git a/drivers/gpu/drm/xe/xe_pxp.h b/drivers/gpu/drm/xe/xe_pxp.h
new file mode 100644
index 000000000000..71a23280b900
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2024, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __XE_PXP_H__
+#define __XE_PXP_H__
+
+#include <linux/types.h>
+
+struct drm_gem_object;
+struct xe_bo;
+struct xe_device;
+struct xe_exec_queue;
+struct xe_pxp;
+
+bool xe_pxp_is_supported(const struct xe_device *xe);
+bool xe_pxp_is_enabled(const struct xe_pxp *pxp);
+int xe_pxp_get_readiness_status(struct xe_pxp *pxp);
+
+int xe_pxp_init(struct xe_device *xe);
+void xe_pxp_irq_handler(struct xe_device *xe, u16 iir);
+
+int xe_pxp_pm_suspend(struct xe_pxp *pxp);
+void xe_pxp_pm_resume(struct xe_pxp *pxp);
+
+int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type);
+int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q);
+void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q);
+
+int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo);
+int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo);
+int xe_pxp_obj_key_check(struct drm_gem_object *obj);
+
+#endif /* __XE_PXP_H__ */
diff --git a/drivers/gpu/drm/xe/xe_pxp_debugfs.c b/drivers/gpu/drm/xe/xe_pxp_debugfs.c
new file mode 100644
index 000000000000..ccfbacf08efc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp_debugfs.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "xe_pxp_debugfs.h"
+
+#include <linux/debugfs.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include "xe_device.h"
+#include "xe_pxp.h"
+#include "xe_pxp_types.h"
+#include "regs/xe_irq_regs.h"
+
+static struct xe_pxp *node_to_pxp(struct drm_info_node *node)
+{
+ return node->info_ent->data;
+}
+
+static const char *pxp_status_to_str(struct xe_pxp *pxp)
+{
+ lockdep_assert_held(&pxp->mutex);
+
+ switch (pxp->status) {
+ case XE_PXP_ERROR:
+ return "error";
+ case XE_PXP_NEEDS_TERMINATION:
+ return "needs termination";
+ case XE_PXP_TERMINATION_IN_PROGRESS:
+ return "termination in progress";
+ case XE_PXP_READY_TO_START:
+ return "ready to start";
+ case XE_PXP_ACTIVE:
+ return "active";
+ case XE_PXP_SUSPENDED:
+ return "suspended";
+ default:
+ return "unknown";
+ }
+};
+
+static int pxp_info(struct seq_file *m, void *data)
+{
+ struct xe_pxp *pxp = node_to_pxp(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+ const char *status;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ mutex_lock(&pxp->mutex);
+ status = pxp_status_to_str(pxp);
+
+ drm_printf(&p, "status: %s\n", status);
+ drm_printf(&p, "instance counter: %u\n", pxp->key_instance);
+ mutex_unlock(&pxp->mutex);
+
+ return 0;
+}
+
+static int pxp_terminate(struct seq_file *m, void *data)
+{
+ struct xe_pxp *pxp = node_to_pxp(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ /* simulate a termination interrupt */
+ spin_lock_irq(&pxp->xe->irq.lock);
+ xe_pxp_irq_handler(pxp->xe, KCR_PXP_STATE_TERMINATED_INTERRUPT);
+ spin_unlock_irq(&pxp->xe->irq.lock);
+
+ drm_printf(&p, "PXP termination queued\n");
+
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ {"info", pxp_info, 0},
+ {"terminate", pxp_terminate, 0},
+};
+
+void xe_pxp_debugfs_register(struct xe_pxp *pxp)
+{
+ struct drm_minor *minor;
+ struct drm_info_list *local;
+ struct dentry *root;
+ int i;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return;
+
+ minor = pxp->xe->drm.primary;
+ if (!minor->debugfs_root)
+ return;
+
+#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
+ local = drmm_kmalloc(&pxp->xe->drm, DEBUGFS_SIZE, GFP_KERNEL);
+ if (!local)
+ return;
+
+ memcpy(local, debugfs_list, DEBUGFS_SIZE);
+#undef DEBUGFS_SIZE
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
+ local[i].data = pxp;
+
+ root = debugfs_create_dir("pxp", minor->debugfs_root);
+ if (IS_ERR(root))
+ return;
+
+ drm_debugfs_create_files(local,
+ ARRAY_SIZE(debugfs_list),
+ root, minor);
+}
diff --git a/drivers/gpu/drm/xe/xe_pxp_debugfs.h b/drivers/gpu/drm/xe/xe_pxp_debugfs.h
new file mode 100644
index 000000000000..988466aad50b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp_debugfs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __XE_PXP_DEBUGFS_H__
+#define __XE_PXP_DEBUGFS_H__
+
+struct xe_pxp;
+
+void xe_pxp_debugfs_register(struct xe_pxp *pxp);
+
+#endif /* __XE_PXP_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.c b/drivers/gpu/drm/xe/xe_pxp_submit.c
new file mode 100644
index 000000000000..d92ec0f515b0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp_submit.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2024 Intel Corporation.
+ */
+
+#include "xe_pxp_submit.h"
+
+#include <linux/delay.h>
+#include <uapi/drm/xe_drm.h>
+
+#include "xe_device_types.h"
+#include "xe_bb.h"
+#include "xe_bo.h"
+#include "xe_exec_queue.h"
+#include "xe_gsc_submit.h"
+#include "xe_gt.h"
+#include "xe_lrc.h"
+#include "xe_map.h"
+#include "xe_pxp.h"
+#include "xe_pxp_types.h"
+#include "xe_sched_job.h"
+#include "xe_vm.h"
+#include "abi/gsc_command_header_abi.h"
+#include "abi/gsc_pxp_commands_abi.h"
+#include "instructions/xe_gsc_commands.h"
+#include "instructions/xe_mfx_commands.h"
+#include "instructions/xe_mi_commands.h"
+
+/*
+ * The VCS is used for kernel-owned GGTT submissions to issue key termination.
+ * Terminations are serialized, so we only need a single queue and a single
+ * batch.
+ */
+static int allocate_vcs_execution_resources(struct xe_pxp *pxp)
+{
+ struct xe_gt *gt = pxp->gt;
+ struct xe_device *xe = pxp->xe;
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_hw_engine *hwe;
+ struct xe_exec_queue *q;
+ struct xe_bo *bo;
+ int err;
+
+ hwe = xe_gt_hw_engine(gt, XE_ENGINE_CLASS_VIDEO_DECODE, 0, true);
+ if (!hwe)
+ return -ENODEV;
+
+ q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1, hwe,
+ EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_PERMANENT, 0);
+ if (IS_ERR(q))
+ return PTR_ERR(q);
+
+ /*
+ * Each termination is 16 DWORDS, so 4K is enough to contain a
+ * termination for each sessions.
+ */
+ bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ goto out_queue;
+ }
+
+ pxp->vcs_exec.q = q;
+ pxp->vcs_exec.bo = bo;
+
+ return 0;
+
+out_queue:
+ xe_exec_queue_put(q);
+ return err;
+}
+
+static void destroy_vcs_execution_resources(struct xe_pxp *pxp)
+{
+ if (pxp->vcs_exec.bo)
+ xe_bo_unpin_map_no_vm(pxp->vcs_exec.bo);
+
+ if (pxp->vcs_exec.q)
+ xe_exec_queue_put(pxp->vcs_exec.q);
+}
+
+#define PXP_BB_SIZE XE_PAGE_SIZE
+static int allocate_gsc_client_resources(struct xe_gt *gt,
+ struct xe_pxp_gsc_client_resources *gsc_res,
+ size_t inout_size)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_hw_engine *hwe;
+ struct xe_vm *vm;
+ struct xe_bo *bo;
+ struct xe_exec_queue *q;
+ struct dma_fence *fence;
+ long timeout;
+ int err = 0;
+
+ hwe = xe_gt_hw_engine(gt, XE_ENGINE_CLASS_OTHER, 0, true);
+
+ /* we shouldn't reach here if the GSC engine is not available */
+ xe_assert(xe, hwe);
+
+ /* PXP instructions must be issued from PPGTT */
+ vm = xe_vm_create(xe, XE_VM_FLAG_GSC);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
+
+ /* We allocate a single object for the batch and the in/out memory */
+ xe_vm_lock(vm, false);
+ bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_NEEDS_UC);
+ xe_vm_unlock(vm);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ goto vm_out;
+ }
+
+ fence = xe_vm_bind_kernel_bo(vm, bo, NULL, 0, XE_CACHE_WB);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto bo_out;
+ }
+
+ timeout = dma_fence_wait_timeout(fence, false, HZ);
+ dma_fence_put(fence);
+ if (timeout <= 0) {
+ err = timeout ?: -ETIME;
+ goto bo_out;
+ }
+
+ q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), 1, hwe,
+ EXEC_QUEUE_FLAG_KERNEL |
+ EXEC_QUEUE_FLAG_PERMANENT, 0);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto bo_out;
+ }
+
+ gsc_res->vm = vm;
+ gsc_res->bo = bo;
+ gsc_res->inout_size = inout_size;
+ gsc_res->batch = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0);
+ gsc_res->msg_in = IOSYS_MAP_INIT_OFFSET(&bo->vmap, PXP_BB_SIZE);
+ gsc_res->msg_out = IOSYS_MAP_INIT_OFFSET(&bo->vmap, PXP_BB_SIZE + inout_size);
+ gsc_res->q = q;
+
+ /* initialize host-session-handle (for all Xe-to-gsc-firmware PXP cmds) */
+ gsc_res->host_session_handle = xe_gsc_create_host_session_id();
+
+ return 0;
+
+bo_out:
+ xe_bo_unpin_map_no_vm(bo);
+vm_out:
+ xe_vm_close_and_put(vm);
+
+ return err;
+}
+
+static void destroy_gsc_client_resources(struct xe_pxp_gsc_client_resources *gsc_res)
+{
+ if (!gsc_res->q)
+ return;
+
+ xe_exec_queue_put(gsc_res->q);
+ xe_bo_unpin_map_no_vm(gsc_res->bo);
+ xe_vm_close_and_put(gsc_res->vm);
+}
+
+/**
+ * xe_pxp_allocate_execution_resources - Allocate PXP submission objects
+ * @pxp: the xe_pxp structure
+ *
+ * Allocates exec_queues objects for VCS and GSCCS submission. The GSCCS
+ * submissions are done via PPGTT, so this function allocates a VM for it and
+ * maps the object into it.
+ *
+ * Returns 0 if the allocation and mapping is successful, an errno value
+ * otherwise.
+ */
+int xe_pxp_allocate_execution_resources(struct xe_pxp *pxp)
+{
+ int err;
+
+ err = allocate_vcs_execution_resources(pxp);
+ if (err)
+ return err;
+
+ /*
+ * PXP commands can require a lot of BO space (see PXP_MAX_PACKET_SIZE),
+ * but we currently only support a subset of commands that are small
+ * (< 20 dwords), so a single page is enough for now.
+ */
+ err = allocate_gsc_client_resources(pxp->gt, &pxp->gsc_res, XE_PAGE_SIZE);
+ if (err)
+ goto destroy_vcs_context;
+
+ return 0;
+
+destroy_vcs_context:
+ destroy_vcs_execution_resources(pxp);
+ return err;
+}
+
+void xe_pxp_destroy_execution_resources(struct xe_pxp *pxp)
+{
+ destroy_gsc_client_resources(&pxp->gsc_res);
+ destroy_vcs_execution_resources(pxp);
+}
+
+#define emit_cmd(xe_, map_, offset_, val_) \
+ xe_map_wr(xe_, map_, (offset_) * sizeof(u32), u32, val_)
+
+/* stall until prior PXP and MFX/HCP/HUC objects are completed */
+#define MFX_WAIT_PXP (MFX_WAIT | \
+ MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \
+ MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG)
+static u32 pxp_emit_wait(struct xe_device *xe, struct iosys_map *batch, u32 offset)
+{
+ /* wait for cmds to go through */
+ emit_cmd(xe, batch, offset++, MFX_WAIT_PXP);
+ emit_cmd(xe, batch, offset++, 0);
+
+ return offset;
+}
+
+static u32 pxp_emit_session_selection(struct xe_device *xe, struct iosys_map *batch,
+ u32 offset, u32 idx)
+{
+ offset = pxp_emit_wait(xe, batch, offset);
+
+ /* pxp off */
+ emit_cmd(xe, batch, offset++, MI_FLUSH_DW | MI_FLUSH_IMM_DW);
+ emit_cmd(xe, batch, offset++, 0);
+ emit_cmd(xe, batch, offset++, 0);
+ emit_cmd(xe, batch, offset++, 0);
+
+ /* select session */
+ emit_cmd(xe, batch, offset++, MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx));
+ emit_cmd(xe, batch, offset++, 0);
+
+ offset = pxp_emit_wait(xe, batch, offset);
+
+ /* pxp on */
+ emit_cmd(xe, batch, offset++, MI_FLUSH_DW |
+ MI_FLUSH_DW_PROTECTED_MEM_EN |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX |
+ MI_FLUSH_IMM_DW);
+ emit_cmd(xe, batch, offset++, LRC_PPHWSP_PXP_INVAL_SCRATCH_ADDR |
+ MI_FLUSH_DW_USE_GTT);
+ emit_cmd(xe, batch, offset++, 0);
+ emit_cmd(xe, batch, offset++, 0);
+
+ offset = pxp_emit_wait(xe, batch, offset);
+
+ return offset;
+}
+
+static u32 pxp_emit_inline_termination(struct xe_device *xe,
+ struct iosys_map *batch, u32 offset)
+{
+ /* session inline termination */
+ emit_cmd(xe, batch, offset++, CRYPTO_KEY_EXCHANGE);
+ emit_cmd(xe, batch, offset++, 0);
+
+ return offset;
+}
+
+static u32 pxp_emit_session_termination(struct xe_device *xe, struct iosys_map *batch,
+ u32 offset, u32 idx)
+{
+ offset = pxp_emit_session_selection(xe, batch, offset, idx);
+ offset = pxp_emit_inline_termination(xe, batch, offset);
+
+ return offset;
+}
+
+/**
+ * xe_pxp_submit_session_termination - submits a PXP inline termination
+ * @pxp: the xe_pxp structure
+ * @id: the session to terminate
+ *
+ * Emit an inline termination via the VCS engine to terminate a session.
+ *
+ * Returns 0 if the submission is successful, an errno value otherwise.
+ */
+int xe_pxp_submit_session_termination(struct xe_pxp *pxp, u32 id)
+{
+ struct xe_sched_job *job;
+ struct dma_fence *fence;
+ long timeout;
+ u32 offset = 0;
+ u64 addr = xe_bo_ggtt_addr(pxp->vcs_exec.bo);
+
+ offset = pxp_emit_session_termination(pxp->xe, &pxp->vcs_exec.bo->vmap, offset, id);
+ offset = pxp_emit_wait(pxp->xe, &pxp->vcs_exec.bo->vmap, offset);
+ emit_cmd(pxp->xe, &pxp->vcs_exec.bo->vmap, offset, MI_BATCH_BUFFER_END);
+
+ job = xe_sched_job_create(pxp->vcs_exec.q, &addr);
+ if (IS_ERR(job))
+ return PTR_ERR(job);
+
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ timeout = dma_fence_wait_timeout(fence, false, HZ);
+
+ dma_fence_put(fence);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+ else if (timeout < 0)
+ return timeout;
+
+ return 0;
+}
+
+static bool
+is_fw_err_platform_config(u32 type)
+{
+ switch (type) {
+ case PXP_STATUS_ERROR_API_VERSION:
+ case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
+ case PXP_STATUS_PLATFCONFIG_KF1_BAD:
+ case PXP_STATUS_PLATFCONFIG_FIXED_KF1_NOT_SUPPORTED:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static const char *
+fw_err_to_string(u32 type)
+{
+ switch (type) {
+ case PXP_STATUS_ERROR_API_VERSION:
+ return "ERR_API_VERSION";
+ case PXP_STATUS_NOT_READY:
+ return "ERR_NOT_READY";
+ case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
+ case PXP_STATUS_PLATFCONFIG_KF1_BAD:
+ case PXP_STATUS_PLATFCONFIG_FIXED_KF1_NOT_SUPPORTED:
+ return "ERR_PLATFORM_CONFIG";
+ default:
+ break;
+ }
+ return NULL;
+}
+
+static int pxp_pkt_submit(struct xe_exec_queue *q, u64 batch_addr)
+{
+ struct xe_gt *gt = q->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_sched_job *job;
+ struct dma_fence *fence;
+ long timeout;
+
+ xe_assert(xe, q->hwe->engine_id == XE_HW_ENGINE_GSCCS0);
+
+ job = xe_sched_job_create(q, &batch_addr);
+ if (IS_ERR(job))
+ return PTR_ERR(job);
+
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ timeout = dma_fence_wait_timeout(fence, false, HZ);
+ dma_fence_put(fence);
+ if (timeout < 0)
+ return timeout;
+ else if (!timeout)
+ return -ETIME;
+
+ return 0;
+}
+
+static void emit_pxp_heci_cmd(struct xe_device *xe, struct iosys_map *batch,
+ u64 addr_in, u32 size_in, u64 addr_out, u32 size_out)
+{
+ u32 len = 0;
+
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, GSC_HECI_CMD_PKT);
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, lower_32_bits(addr_in));
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, upper_32_bits(addr_in));
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, size_in);
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, lower_32_bits(addr_out));
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, upper_32_bits(addr_out));
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, size_out);
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, 0);
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, MI_BATCH_BUFFER_END);
+}
+
+#define GSC_PENDING_RETRY_MAXCOUNT 40
+#define GSC_PENDING_RETRY_PAUSE_MS 50
+static int gsccs_send_message(struct xe_pxp_gsc_client_resources *gsc_res,
+ void *msg_in, size_t msg_in_size,
+ void *msg_out, size_t msg_out_size_max)
+{
+ struct xe_device *xe = gsc_res->vm->xe;
+ const size_t max_msg_size = gsc_res->inout_size - sizeof(struct intel_gsc_mtl_header);
+ u32 wr_offset;
+ u32 rd_offset;
+ u32 reply_size;
+ u32 min_reply_size = 0;
+ int ret;
+ int retry = GSC_PENDING_RETRY_MAXCOUNT;
+
+ if (msg_in_size > max_msg_size || msg_out_size_max > max_msg_size)
+ return -ENOSPC;
+
+ wr_offset = xe_gsc_emit_header(xe, &gsc_res->msg_in, 0,
+ HECI_MEADDRESS_PXP,
+ gsc_res->host_session_handle,
+ msg_in_size);
+
+ /* NOTE: zero size packets are used for session-cleanups */
+ if (msg_in && msg_in_size) {
+ xe_map_memcpy_to(xe, &gsc_res->msg_in, wr_offset,
+ msg_in, msg_in_size);
+ min_reply_size = sizeof(struct pxp_cmd_header);
+ }
+
+ /* Make sure the reply header does not contain stale data */
+ xe_gsc_poison_header(xe, &gsc_res->msg_out, 0);
+
+ /*
+ * The BO is mapped at address 0 of the PPGTT, so no need to add its
+ * base offset when calculating the in/out addresses.
+ */
+ emit_pxp_heci_cmd(xe, &gsc_res->batch, PXP_BB_SIZE,
+ wr_offset + msg_in_size, PXP_BB_SIZE + gsc_res->inout_size,
+ wr_offset + msg_out_size_max);
+
+ xe_device_wmb(xe);
+
+ /*
+ * If the GSC needs to communicate with CSME to complete our request,
+ * it'll set the "pending" flag in the return header. In this scenario
+ * we're expected to wait 50ms to give some time to the proxy code to
+ * handle the GSC<->CSME communication and then try again. Note that,
+ * although in most case the 50ms window is enough, the proxy flow is
+ * not actually guaranteed to complete within that time period, so we
+ * might have to try multiple times, up to a worst case of 2 seconds,
+ * after which the request is considered aborted.
+ */
+ do {
+ ret = pxp_pkt_submit(gsc_res->q, 0);
+ if (ret)
+ break;
+
+ if (xe_gsc_check_and_update_pending(xe, &gsc_res->msg_in, 0,
+ &gsc_res->msg_out, 0)) {
+ ret = -EAGAIN;
+ msleep(GSC_PENDING_RETRY_PAUSE_MS);
+ }
+ } while (--retry && ret == -EAGAIN);
+
+ if (ret) {
+ drm_err(&xe->drm, "failed to submit GSC PXP message (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = xe_gsc_read_out_header(xe, &gsc_res->msg_out, 0,
+ min_reply_size, &rd_offset);
+ if (ret) {
+ drm_err(&xe->drm, "invalid GSC reply for PXP (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ if (msg_out && min_reply_size) {
+ reply_size = xe_map_rd_field(xe, &gsc_res->msg_out, rd_offset,
+ struct pxp_cmd_header, buffer_len);
+ reply_size += sizeof(struct pxp_cmd_header);
+
+ if (reply_size > msg_out_size_max) {
+ drm_warn(&xe->drm, "PXP reply size overflow: %u (%zu)\n",
+ reply_size, msg_out_size_max);
+ reply_size = msg_out_size_max;
+ }
+
+ xe_map_memcpy_from(xe, msg_out, &gsc_res->msg_out,
+ rd_offset, reply_size);
+ }
+
+ xe_gsc_poison_header(xe, &gsc_res->msg_in, 0);
+
+ return ret;
+}
+
+/**
+ * xe_pxp_submit_session_init - submits a PXP GSC session initialization
+ * @gsc_res: the pxp client resources
+ * @id: the session to initialize
+ *
+ * Submit a message to the GSC FW to initialize (i.e. start) a PXP session.
+ *
+ * Returns 0 if the submission is successful, an errno value otherwise.
+ */
+int xe_pxp_submit_session_init(struct xe_pxp_gsc_client_resources *gsc_res, u32 id)
+{
+ struct xe_device *xe = gsc_res->vm->xe;
+ struct pxp43_create_arb_in msg_in = {0};
+ struct pxp43_create_arb_out msg_out = {0};
+ int ret;
+
+ msg_in.header.api_version = PXP_APIVER(4, 3);
+ msg_in.header.command_id = PXP43_CMDID_INIT_SESSION;
+ msg_in.header.stream_id = (FIELD_PREP(PXP43_INIT_SESSION_APPID, id) |
+ FIELD_PREP(PXP43_INIT_SESSION_VALID, 1) |
+ FIELD_PREP(PXP43_INIT_SESSION_APPTYPE, 0));
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+
+ if (id == DRM_XE_PXP_HWDRM_DEFAULT_SESSION)
+ msg_in.protection_mode = PXP43_INIT_SESSION_PROTECTION_ARB;
+
+ ret = gsccs_send_message(gsc_res, &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out));
+ if (ret) {
+ drm_err(&xe->drm, "Failed to init PXP session %u (%pe)\n", id, ERR_PTR(ret));
+ } else if (msg_out.header.status != 0) {
+ ret = -EIO;
+
+ if (is_fw_err_platform_config(msg_out.header.status))
+ drm_info_once(&xe->drm,
+ "Failed to init PXP session %u due to BIOS/SOC, s=0x%x(%s)\n",
+ id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ else
+ drm_dbg(&xe->drm, "Failed to init PXP session %u, s=0x%x\n",
+ id, msg_out.header.status);
+ }
+
+ return ret;
+}
+
+/**
+ * xe_pxp_submit_session_invalidation - submits a PXP GSC invalidation
+ * @gsc_res: the pxp client resources
+ * @id: the session to invalidate
+ *
+ * Submit a message to the GSC FW to notify it that a session has been
+ * terminated and is therefore invalid.
+ *
+ * Returns 0 if the submission is successful, an errno value otherwise.
+ */
+int xe_pxp_submit_session_invalidation(struct xe_pxp_gsc_client_resources *gsc_res, u32 id)
+{
+ struct xe_device *xe = gsc_res->vm->xe;
+ struct pxp43_inv_stream_key_in msg_in = {0};
+ struct pxp43_inv_stream_key_out msg_out = {0};
+ int ret = 0;
+
+ /*
+ * Stream key invalidation reuses the same version 4.2 input/output
+ * command format but firmware requires 4.3 API interaction
+ */
+ msg_in.header.api_version = PXP_APIVER(4, 3);
+ msg_in.header.command_id = PXP43_CMDID_INVALIDATE_STREAM_KEY;
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+
+ msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, id);
+
+ ret = gsccs_send_message(gsc_res, &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out));
+ if (ret) {
+ drm_err(&xe->drm, "Failed to invalidate PXP stream-key %u (%pe)\n",
+ id, ERR_PTR(ret));
+ } else if (msg_out.header.status != 0) {
+ ret = -EIO;
+
+ if (is_fw_err_platform_config(msg_out.header.status))
+ drm_info_once(&xe->drm,
+ "Failed to invalidate PXP stream-key %u: BIOS/SOC 0x%08x(%s)\n",
+ id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ else
+ drm_dbg(&xe->drm, "Failed to invalidate stream-key %u, s=0x%08x\n",
+ id, msg_out.header.status);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.h b/drivers/gpu/drm/xe/xe_pxp_submit.h
new file mode 100644
index 000000000000..c9efda02f4b0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp_submit.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2024, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __XE_PXP_SUBMIT_H__
+#define __XE_PXP_SUBMIT_H__
+
+#include <linux/types.h>
+
+struct xe_pxp;
+struct xe_pxp_gsc_client_resources;
+
+int xe_pxp_allocate_execution_resources(struct xe_pxp *pxp);
+void xe_pxp_destroy_execution_resources(struct xe_pxp *pxp);
+
+int xe_pxp_submit_session_init(struct xe_pxp_gsc_client_resources *gsc_res, u32 id);
+int xe_pxp_submit_session_termination(struct xe_pxp *pxp, u32 id);
+int xe_pxp_submit_session_invalidation(struct xe_pxp_gsc_client_resources *gsc_res,
+ u32 id);
+
+#endif /* __XE_PXP_SUBMIT_H__ */
diff --git a/drivers/gpu/drm/xe/xe_pxp_types.h b/drivers/gpu/drm/xe/xe_pxp_types.h
new file mode 100644
index 000000000000..53e9d48d10fb
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp_types.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2024, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __XE_PXP_TYPES_H__
+#define __XE_PXP_TYPES_H__
+
+#include <linux/completion.h>
+#include <linux/iosys-map.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct xe_bo;
+struct xe_exec_queue;
+struct xe_device;
+struct xe_gt;
+struct xe_vm;
+
+enum xe_pxp_status {
+ XE_PXP_ERROR = -1,
+ XE_PXP_NEEDS_TERMINATION = 0, /* starting status */
+ XE_PXP_NEEDS_ADDITIONAL_TERMINATION,
+ XE_PXP_TERMINATION_IN_PROGRESS,
+ XE_PXP_READY_TO_START,
+ XE_PXP_START_IN_PROGRESS,
+ XE_PXP_ACTIVE,
+ XE_PXP_SUSPENDED,
+};
+
+/**
+ * struct xe_pxp_gsc_client_resources - resources for GSC submission by a PXP
+ * client. The GSC FW supports multiple GSC client active at the same time.
+ */
+struct xe_pxp_gsc_client_resources {
+ /**
+ * @host_session_handle: handle used to identify the client in messages
+ * sent to the GSC firmware.
+ */
+ u64 host_session_handle;
+ /** @vm: VM used for PXP submissions to the GSCCS */
+ struct xe_vm *vm;
+ /** @q: GSCCS exec queue for PXP submissions */
+ struct xe_exec_queue *q;
+
+ /**
+ * @bo: BO used for submissions to the GSCCS and GSC FW. It includes
+ * space for the GSCCS batch and the input/output buffers read/written
+ * by the FW
+ */
+ struct xe_bo *bo;
+ /** @inout_size: size of each of the msg_in/out sections individually */
+ u32 inout_size;
+ /** @batch: iosys_map to the batch memory within the BO */
+ struct iosys_map batch;
+ /** @msg_in: iosys_map to the input memory within the BO */
+ struct iosys_map msg_in;
+ /** @msg_out: iosys_map to the output memory within the BO */
+ struct iosys_map msg_out;
+};
+
+/**
+ * struct xe_pxp - pxp state
+ */
+struct xe_pxp {
+ /** @xe: Backpoiner to the xe_device struct */
+ struct xe_device *xe;
+
+ /**
+ * @gt: pointer to the gt that owns the submission-side of PXP
+ * (VDBOX, KCR and GSC)
+ */
+ struct xe_gt *gt;
+
+ /** @vcs_exec: kernel-owned objects for PXP submissions to the VCS */
+ struct {
+ /** @vcs_exec.q: kernel-owned VCS exec queue used for PXP terminations */
+ struct xe_exec_queue *q;
+ /** @vcs_exec.bo: BO used for submissions to the VCS */
+ struct xe_bo *bo;
+ } vcs_exec;
+
+ /** @gsc_res: kernel-owned objects for PXP submissions to the GSCCS */
+ struct xe_pxp_gsc_client_resources gsc_res;
+
+ /** @irq: wrapper for the worker and queue used for PXP irq support */
+ struct {
+ /** @irq.work: worker that manages irq events. */
+ struct work_struct work;
+ /** @irq.wq: workqueue on which to queue the irq work. */
+ struct workqueue_struct *wq;
+ /** @irq.events: pending events, protected with xe->irq.lock. */
+ u32 events;
+#define PXP_TERMINATION_REQUEST BIT(0)
+#define PXP_TERMINATION_COMPLETE BIT(1)
+ } irq;
+
+ /** @mutex: protects the pxp status and the queue list */
+ struct mutex mutex;
+ /** @status: the current pxp status */
+ enum xe_pxp_status status;
+ /** @activation: completion struct that tracks pxp start */
+ struct completion activation;
+ /** @termination: completion struct that tracks terminations */
+ struct completion termination;
+
+ /** @queues: management of exec_queues that use PXP */
+ struct {
+ /** @queues.lock: spinlock protecting the queue management */
+ spinlock_t lock;
+ /** @queues.list: list of exec_queues that use PXP */
+ struct list_head list;
+ } queues;
+
+ /**
+ * @key_instance: keep track of the current iteration of the PXP key.
+ * Note that, due to the time needed for PXP termination and re-start
+ * to complete, the minimum time between 2 subsequent increases of this
+ * variable is 50ms, and even that only if there is a continuous attack;
+ * normal behavior is for this to increase much much slower than that.
+ * This means that we don't expect this to ever wrap and don't implement
+ * that case in the code.
+ */
+ u32 key_instance;
+ /**
+ * @last_suspend_key_instance: value of key_instance at the last
+ * suspend. Used to check if any PXP session has been created between
+ * suspend cycles.
+ */
+ u32 last_suspend_key_instance;
+};
+
+#endif /* __XE_PXP_TYPES_H__ */
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index c059639613f7..5e65830dad25 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -16,6 +16,7 @@
#include "regs/xe_gt_regs.h"
#include "xe_bo.h"
#include "xe_device.h"
+#include "xe_eu_stall.h"
#include "xe_exec_queue.h"
#include "xe_force_wake.h"
#include "xe_ggtt.h"
@@ -24,6 +25,7 @@
#include "xe_macros.h"
#include "xe_mmio.h"
#include "xe_oa.h"
+#include "xe_pxp.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wa.h"
@@ -120,6 +122,9 @@ query_engine_cycles(struct xe_device *xe,
struct xe_gt *gt;
unsigned int fw_ref;
+ if (IS_SRIOV_VF(xe))
+ return -EOPNOTSUPP;
+
if (query->size == 0) {
query->size = size;
return 0;
@@ -333,8 +338,13 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
xe->info.devid | (xe->info.revid << 16);
if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
- config->info[DRM_XE_QUERY_CONFIG_FLAGS] =
+ config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
+ if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_GPUSVM))
+ config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
+ DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR;
+ config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
+ DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY;
config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] =
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits;
@@ -698,6 +708,74 @@ static int query_oa_units(struct xe_device *xe,
return ret ? -EFAULT : 0;
}
+static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *query)
+{
+ struct drm_xe_query_pxp_status __user *query_ptr = u64_to_user_ptr(query->data);
+ size_t size = sizeof(struct drm_xe_query_pxp_status);
+ struct drm_xe_query_pxp_status resp = { 0 };
+ int ret;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ ret = xe_pxp_get_readiness_status(xe->pxp);
+ if (ret < 0)
+ return ret;
+
+ resp.status = ret;
+ resp.supported_session_types = BIT(DRM_XE_PXP_TYPE_HWDRM);
+
+ if (copy_to_user(query_ptr, &resp, size))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int query_eu_stall(struct xe_device *xe,
+ struct drm_xe_device_query *query)
+{
+ void __user *query_ptr = u64_to_user_ptr(query->data);
+ struct drm_xe_query_eu_stall *info;
+ size_t size, array_size;
+ const u64 *rates;
+ u32 num_rates;
+ int ret;
+
+ if (!xe_eu_stall_supported_on_platform(xe)) {
+ drm_dbg(&xe->drm, "EU stall monitoring is not supported on this platform\n");
+ return -ENODEV;
+ }
+
+ array_size = xe_eu_stall_get_sampling_rates(&num_rates, &rates);
+ size = sizeof(struct drm_xe_query_eu_stall) + array_size;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ info = kzalloc(size, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->num_sampling_rates = num_rates;
+ info->capabilities = DRM_XE_EU_STALL_CAPS_BASE;
+ info->record_size = xe_eu_stall_data_record_size(xe);
+ info->per_xecore_buf_size = xe_eu_stall_get_per_xecore_buf_size();
+ memcpy(info->sampling_rates, rates, array_size);
+
+ ret = copy_to_user(query_ptr, info, size);
+ kfree(info);
+
+ return ret ? -EFAULT : 0;
+}
+
static int (* const xe_query_funcs[])(struct xe_device *xe,
struct drm_xe_device_query *query) = {
query_engines,
@@ -709,6 +787,8 @@ static int (* const xe_query_funcs[])(struct xe_device *xe,
query_engine_cycles,
query_uc_fw_version,
query_oa_units,
+ query_pxp_status,
+ query_eu_stall,
};
int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
index edab5d4e3ba5..23f6c81d9994 100644
--- a/drivers/gpu/drm/xe/xe_reg_whitelist.c
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -88,7 +88,6 @@ static const struct xe_rtp_entry_sr register_whitelist[] = {
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4))
},
- {}
};
static void whitelist_apply_to_hwe(struct xe_hw_engine *hwe)
@@ -137,7 +136,8 @@ void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, register_whitelist, &hwe->reg_whitelist);
+ xe_rtp_process_to_sr(&ctx, register_whitelist, ARRAY_SIZE(register_whitelist),
+ &hwe->reg_whitelist);
whitelist_apply_to_hwe(hwe);
}
diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res_cursor.h
index dca374b6521c..d1a403cfb628 100644
--- a/drivers/gpu/drm/xe/xe_res_cursor.h
+++ b/drivers/gpu/drm/xe/xe_res_cursor.h
@@ -26,6 +26,7 @@
#include <linux/scatterlist.h>
+#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_resource.h>
@@ -34,17 +35,38 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_macros.h"
+#include "xe_svm.h"
#include "xe_ttm_vram_mgr.h"
-/* state back for walking over vram_mgr, stolen_mgr, and gtt_mgr allocations */
+/**
+ * struct xe_res_cursor - state for walking over dma mapping, vram_mgr,
+ * stolen_mgr, and gtt_mgr allocations
+ */
struct xe_res_cursor {
+ /** @start: Start of cursor */
u64 start;
+ /** @size: Size of the current segment. */
u64 size;
+ /** @remaining: Remaining bytes in cursor */
u64 remaining;
+ /** @node: Opaque point current node cursor */
void *node;
+ /** @mem_type: Memory type */
u32 mem_type;
+ /** @sgl: Scatterlist for cursor */
struct scatterlist *sgl;
+ /** @dma_addr: Current element in a struct drm_pagemap_device_addr array */
+ const struct drm_pagemap_device_addr *dma_addr;
+ /** @mm: Buddy allocator for VRAM cursor */
struct drm_buddy *mm;
+ /**
+ * @dma_start: DMA start address for the current segment.
+ * This may be different to @dma_addr.addr since elements in
+ * the array may be coalesced to a single segment.
+ */
+ u64 dma_start;
+ /** @dma_seg_size: Size of the current DMA segment. */
+ u64 dma_seg_size;
};
static struct drm_buddy *xe_res_get_buddy(struct ttm_resource *res)
@@ -70,6 +92,7 @@ static inline void xe_res_first(struct ttm_resource *res,
struct xe_res_cursor *cur)
{
cur->sgl = NULL;
+ cur->dma_addr = NULL;
if (!res)
goto fallback;
@@ -142,6 +165,36 @@ static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
}
/**
+ * __xe_res_dma_next() - Advance the cursor when end-of-segment is reached
+ * @cur: The cursor
+ */
+static inline void __xe_res_dma_next(struct xe_res_cursor *cur)
+{
+ const struct drm_pagemap_device_addr *addr = cur->dma_addr;
+ u64 start = cur->start;
+
+ while (start >= cur->dma_seg_size) {
+ start -= cur->dma_seg_size;
+ addr++;
+ cur->dma_seg_size = PAGE_SIZE << addr->order;
+ }
+ cur->dma_start = addr->addr;
+
+ /* Coalesce array_elements */
+ while (cur->dma_seg_size - start < cur->remaining) {
+ if (cur->dma_start + cur->dma_seg_size != addr[1].addr ||
+ addr->proto != addr[1].proto)
+ break;
+ addr++;
+ cur->dma_seg_size += PAGE_SIZE << addr->order;
+ }
+
+ cur->dma_addr = addr;
+ cur->start = start;
+ cur->size = cur->dma_seg_size - start;
+}
+
+/**
* xe_res_first_sg - initialize a xe_res_cursor with a scatter gather table
*
* @sg: scatter gather table to walk
@@ -160,12 +213,43 @@ static inline void xe_res_first_sg(const struct sg_table *sg,
cur->start = start;
cur->remaining = size;
cur->size = 0;
+ cur->dma_addr = NULL;
cur->sgl = sg->sgl;
cur->mem_type = XE_PL_TT;
__xe_res_sg_next(cur);
}
/**
+ * xe_res_first_dma - initialize a xe_res_cursor with dma_addr array
+ *
+ * @dma_addr: struct drm_pagemap_device_addr array to walk
+ * @start: Start of the range
+ * @size: Size of the range
+ * @cur: cursor object to initialize
+ *
+ * Start walking over the range of allocations between @start and @size.
+ */
+static inline void xe_res_first_dma(const struct drm_pagemap_device_addr *dma_addr,
+ u64 start, u64 size,
+ struct xe_res_cursor *cur)
+{
+ XE_WARN_ON(!dma_addr);
+ XE_WARN_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
+ !IS_ALIGNED(size, PAGE_SIZE));
+
+ cur->node = NULL;
+ cur->start = start;
+ cur->remaining = size;
+ cur->dma_seg_size = PAGE_SIZE << dma_addr->order;
+ cur->dma_start = 0;
+ cur->size = 0;
+ cur->dma_addr = dma_addr;
+ __xe_res_dma_next(cur);
+ cur->sgl = NULL;
+ cur->mem_type = XE_PL_TT;
+}
+
+/**
* xe_res_next - advance the cursor
*
* @cur: the cursor to advance
@@ -191,6 +275,12 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
return;
}
+ if (cur->dma_addr) {
+ cur->start += size;
+ __xe_res_dma_next(cur);
+ return;
+ }
+
if (cur->sgl) {
cur->start += size;
__xe_res_sg_next(cur);
@@ -232,6 +322,35 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
*/
static inline u64 xe_res_dma(const struct xe_res_cursor *cur)
{
- return cur->sgl ? sg_dma_address(cur->sgl) + cur->start : cur->start;
+ if (cur->dma_addr)
+ return cur->dma_start + cur->start;
+ else if (cur->sgl)
+ return sg_dma_address(cur->sgl) + cur->start;
+ else
+ return cur->start;
+}
+
+/**
+ * xe_res_is_vram() - Whether the cursor current dma address points to
+ * same-device VRAM
+ * @cur: The cursor.
+ *
+ * Return: true iff the address returned by xe_res_dma() points to internal vram.
+ */
+static inline bool xe_res_is_vram(const struct xe_res_cursor *cur)
+{
+ if (cur->dma_addr)
+ return cur->dma_addr->proto == XE_INTERCONNECT_VRAM;
+
+ switch (cur->mem_type) {
+ case XE_PL_STOLEN:
+ case XE_PL_VRAM0:
+ case XE_PL_VRAM1:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
}
#endif
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 9f327f27c072..917fc16de866 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -90,11 +90,10 @@ static int emit_flush_dw(u32 *dw, int i)
return i;
}
-static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb,
- u32 *dw, int i)
+static int emit_flush_imm_ggtt(u32 addr, u32 value, u32 flags, u32 *dw, int i)
{
dw[i++] = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW |
- (invalidate_tlb ? MI_INVALIDATE_TLB : 0);
+ flags;
dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
dw[i++] = 0;
dw[i++] = value;
@@ -111,16 +110,13 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
return i;
}
-static int emit_flush_invalidate(u32 flag, u32 *dw, int i)
+static int emit_flush_invalidate(u32 *dw, int i)
{
- dw[i] = MI_FLUSH_DW;
- dw[i] |= flag;
- dw[i++] |= MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW |
- MI_FLUSH_DW_STORE_INDEX;
-
- dw[i++] = LRC_PPHWSP_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+ dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
+ MI_FLUSH_IMM_DW | MI_FLUSH_DW_STORE_INDEX;
+ dw[i++] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR;
+ dw[i++] = 0;
dw[i++] = 0;
- dw[i++] = ~0U;
return i;
}
@@ -156,7 +152,7 @@ static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
flags &= ~mask_flags;
- return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_SCRATCH_ADDR, 0);
+ return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
}
static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
@@ -177,6 +173,10 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
u32 flags;
+ if (XE_WA(gt, 14016712196))
+ i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_DEPTH_CACHE_FLUSH,
+ LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
+
flags = (PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TILE_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
@@ -253,7 +253,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
if (job->ring_ops_flush_tlb) {
dw[i++] = preparser_disable(true);
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
- seqno, true, dw, i);
+ seqno, MI_INVALIDATE_TLB, dw, i);
dw[i++] = preparser_disable(false);
} else {
i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
@@ -269,7 +269,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
dw, i);
}
- i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
+ i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, 0, dw, i);
i = emit_user_interrupt(dw, i);
@@ -315,7 +315,7 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
if (job->ring_ops_flush_tlb)
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
- seqno, true, dw, i);
+ seqno, MI_INVALIDATE_TLB, dw, i);
dw[i++] = preparser_disable(false);
@@ -332,7 +332,7 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
dw, i);
}
- i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
+ i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, 0, dw, i);
i = emit_user_interrupt(dw, i);
@@ -409,7 +409,7 @@ static void emit_migration_job_gen12(struct xe_sched_job *job,
if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) {
/* XXX: Do we need this? Leaving for now. */
dw[i++] = preparser_disable(true);
- i = emit_flush_invalidate(0, dw, i);
+ i = emit_flush_invalidate(dw, i);
dw[i++] = preparser_disable(false);
}
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 7a1c78fdfc92..13bb62d3e615 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -237,6 +237,7 @@ static void rtp_mark_active(struct xe_device *xe,
* the save-restore argument.
* @ctx: The context for processing the table, with one of device, gt or hwe
* @entries: Table with RTP definitions
+ * @n_entries: Number of entries to process, usually ARRAY_SIZE(entries)
* @sr: Save-restore struct where matching rules execute the action. This can be
* viewed as the "coalesced view" of multiple the tables. The bits for each
* register set are expected not to collide with previously added entries
@@ -247,6 +248,7 @@ static void rtp_mark_active(struct xe_device *xe,
*/
void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry_sr *entries,
+ size_t n_entries,
struct xe_reg_sr *sr)
{
const struct xe_rtp_entry_sr *entry;
@@ -259,7 +261,9 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
if (IS_SRIOV_VF(xe))
return;
- for (entry = entries; entry && entry->name; entry++) {
+ xe_assert(xe, entries);
+
+ for (entry = entries; entry - entries < n_entries; entry++) {
bool match = false;
if (entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) {
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 38b9f13bba5e..4fe736a11c42 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -430,7 +430,7 @@ void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry_sr *entries,
- struct xe_reg_sr *sr);
+ size_t n_entries, struct xe_reg_sr *sr);
void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry *entries);
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index e055bed7ae55..f8fe61e25518 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -31,47 +31,55 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
sa_manager->bo = NULL;
}
-struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align)
+/**
+ * __xe_sa_bo_manager_init() - Create and initialize the suballocator
+ * @tile: the &xe_tile where allocate
+ * @size: number of bytes to allocate
+ * @guard: number of bytes to exclude from suballocations
+ * @align: alignment for each suballocated chunk
+ *
+ * Prepares the suballocation manager for suballocations.
+ *
+ * Return: a pointer to the &xe_sa_manager or an ERR_PTR on failure.
+ */
+struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align)
{
struct xe_device *xe = tile_to_xe(tile);
- u32 managed_size = size - SZ_4K;
+ struct xe_sa_manager *sa_manager;
+ u32 managed_size;
struct xe_bo *bo;
int ret;
- struct xe_sa_manager *sa_manager = drmm_kzalloc(&tile_to_xe(tile)->drm,
- sizeof(*sa_manager),
- GFP_KERNEL);
+ xe_tile_assert(tile, size > guard);
+ managed_size = size - guard;
+
+ sa_manager = drmm_kzalloc(&xe->drm, sizeof(*sa_manager), GFP_KERNEL);
if (!sa_manager)
return ERR_PTR(-ENOMEM);
- sa_manager->bo = NULL;
-
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo)) {
- drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
- PTR_ERR(bo));
+ drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n",
+ size / SZ_1K, bo);
return ERR_CAST(bo);
}
sa_manager->bo = bo;
sa_manager->is_iomem = bo->vmap.is_iomem;
-
- drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
if (bo->vmap.is_iomem) {
sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
- if (!sa_manager->cpu_ptr) {
- sa_manager->bo = NULL;
+ if (!sa_manager->cpu_ptr)
return ERR_PTR(-ENOMEM);
- }
} else {
sa_manager->cpu_ptr = bo->vmap.vaddr;
memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
}
+ drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
sa_manager);
if (ret)
@@ -80,8 +88,17 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
return sa_manager;
}
-struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
- unsigned int size)
+/**
+ * __xe_sa_bo_new() - Make a suballocation but use custom gfp flags.
+ * @sa_manager: the &xe_sa_manager
+ * @size: number of bytes we want to suballocate
+ * @gfp: gfp flags used for memory allocation. Typically GFP_KERNEL.
+ *
+ * Try to make a suballocation of size @size.
+ *
+ * Return: a &drm_suballoc, or an ERR_PTR.
+ */
+struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp)
{
/*
* BB to large, return -ENOBUFS indicating user should split
@@ -90,7 +107,7 @@ struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
if (size > sa_manager->base.size)
return ERR_PTR(-ENOBUFS);
- return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0);
+ return drm_suballoc_new(&sa_manager->base, size, gfp, true, 0);
}
void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo)
diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h
index 4e96483057d7..1170ee5a81a8 100644
--- a/drivers/gpu/drm/xe/xe_sa.h
+++ b/drivers/gpu/drm/xe/xe_sa.h
@@ -5,19 +5,37 @@
#ifndef _XE_SA_H_
#define _XE_SA_H_
+#include <linux/sizes.h>
+#include <linux/types.h>
#include "xe_sa_types.h"
struct dma_fence;
-struct xe_bo;
struct xe_tile;
-struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align);
+struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align);
+struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp);
+
+static inline struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align)
+{
+ return __xe_sa_bo_manager_init(tile, size, SZ_4K, align);
+}
+
+/**
+ * xe_sa_bo_new() - Make a suballocation.
+ * @sa_manager: the &xe_sa_manager
+ * @size: number of bytes we want to suballocate
+ *
+ * Try to make a suballocation of size @size.
+ *
+ * Return: a &drm_suballoc, or an ERR_PTR.
+ */
+static inline struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size)
+{
+ return __xe_sa_bo_new(sa_manager, size, GFP_KERNEL);
+}
-struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
- u32 size);
void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo);
-void xe_sa_bo_free(struct drm_suballoc *sa_bo,
- struct dma_fence *fence);
+void xe_sa_bo_free(struct drm_suballoc *sa_bo, struct dma_fence *fence);
static inline struct xe_sa_manager *
to_xe_sa_manager(struct drm_suballoc_manager *mng)
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index d942b20a9f29..dbf260dded8d 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -18,9 +18,9 @@ struct dma_fence_chain;
* struct xe_job_ptrs - Per hw engine instance data
*/
struct xe_job_ptrs {
- /** @lrc_fence: Pre-allocated uinitialized lrc fence.*/
+ /** @lrc_fence: Pre-allocated uninitialized lrc fence.*/
struct dma_fence *lrc_fence;
- /** @chain_fence: Pre-allocated ninitialized fence chain node. */
+ /** @chain_fence: Pre-allocated uninitialized fence chain node. */
struct dma_fence_chain *chain_fence;
/** @batch_addr: Batch buffer address. */
u64 batch_addr;
diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c
new file mode 100644
index 000000000000..8184390f9c7b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_shrinker.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/shrinker.h>
+
+#include <drm/ttm/ttm_backup.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "xe_bo.h"
+#include "xe_pm.h"
+#include "xe_shrinker.h"
+
+/**
+ * struct xe_shrinker - per-device shrinker
+ * @xe: Back pointer to the device.
+ * @lock: Lock protecting accounting.
+ * @shrinkable_pages: Number of pages that are currently shrinkable.
+ * @purgeable_pages: Number of pages that are currently purgeable.
+ * @shrink: Pointer to the mm shrinker.
+ * @pm_worker: Worker to wake up the device if required.
+ */
+struct xe_shrinker {
+ struct xe_device *xe;
+ rwlock_t lock;
+ long shrinkable_pages;
+ long purgeable_pages;
+ struct shrinker *shrink;
+ struct work_struct pm_worker;
+};
+
+static struct xe_shrinker *to_xe_shrinker(struct shrinker *shrink)
+{
+ return shrink->private_data;
+}
+
+/**
+ * xe_shrinker_mod_pages() - Modify shrinker page accounting
+ * @shrinker: Pointer to the struct xe_shrinker.
+ * @shrinkable: Shrinkable pages delta. May be negative.
+ * @purgeable: Purgeable page delta. May be negative.
+ *
+ * Modifies the shrinkable and purgeable pages accounting.
+ */
+void
+xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable)
+{
+ write_lock(&shrinker->lock);
+ shrinker->shrinkable_pages += shrinkable;
+ shrinker->purgeable_pages += purgeable;
+ write_unlock(&shrinker->lock);
+}
+
+static s64 xe_shrinker_walk(struct xe_device *xe,
+ struct ttm_operation_ctx *ctx,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long to_scan, unsigned long *scanned)
+{
+ unsigned int mem_type;
+ s64 freed = 0, lret;
+
+ for (mem_type = XE_PL_SYSTEM; mem_type <= XE_PL_TT; ++mem_type) {
+ struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type);
+ struct ttm_bo_lru_cursor curs;
+ struct ttm_buffer_object *ttm_bo;
+
+ if (!man || !man->use_tt)
+ continue;
+
+ ttm_bo_lru_for_each_reserved_guarded(&curs, man, ctx, ttm_bo) {
+ if (!ttm_bo_shrink_suitable(ttm_bo, ctx))
+ continue;
+
+ lret = xe_bo_shrink(ctx, ttm_bo, flags, scanned);
+ if (lret < 0)
+ return lret;
+
+ freed += lret;
+ if (*scanned >= to_scan)
+ break;
+ }
+ }
+
+ return freed;
+}
+
+static unsigned long
+xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct xe_shrinker *shrinker = to_xe_shrinker(shrink);
+ unsigned long num_pages;
+ bool can_backup = !!(sc->gfp_mask & __GFP_FS);
+
+ num_pages = ttm_backup_bytes_avail() >> PAGE_SHIFT;
+ read_lock(&shrinker->lock);
+
+ if (can_backup)
+ num_pages = min_t(unsigned long, num_pages, shrinker->shrinkable_pages);
+ else
+ num_pages = 0;
+
+ num_pages += shrinker->purgeable_pages;
+ read_unlock(&shrinker->lock);
+
+ return num_pages ? num_pages : SHRINK_EMPTY;
+}
+
+/*
+ * Check if we need runtime pm, and if so try to grab a reference if
+ * already active. If grabbing a reference fails, queue a worker that
+ * does it for us outside of reclaim, but don't wait for it to complete.
+ * If bo shrinking needs an rpm reference and we don't have it (yet),
+ * that bo will be skipped anyway.
+ */
+static bool xe_shrinker_runtime_pm_get(struct xe_shrinker *shrinker, bool force,
+ unsigned long nr_to_scan, bool can_backup)
+{
+ struct xe_device *xe = shrinker->xe;
+
+ if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe) ||
+ !ttm_backup_bytes_avail())
+ return false;
+
+ if (!force) {
+ read_lock(&shrinker->lock);
+ force = (nr_to_scan > shrinker->purgeable_pages && can_backup);
+ read_unlock(&shrinker->lock);
+ if (!force)
+ return false;
+ }
+
+ if (!xe_pm_runtime_get_if_active(xe)) {
+ if (xe_rpm_reclaim_safe(xe) && !ttm_bo_shrink_avoid_wait()) {
+ xe_pm_runtime_get(xe);
+ return true;
+ }
+ queue_work(xe->unordered_wq, &shrinker->pm_worker);
+ return false;
+ }
+
+ return true;
+}
+
+static void xe_shrinker_runtime_pm_put(struct xe_shrinker *shrinker, bool runtime_pm)
+{
+ if (runtime_pm)
+ xe_pm_runtime_put(shrinker->xe);
+}
+
+static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct xe_shrinker *shrinker = to_xe_shrinker(shrink);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = ttm_bo_shrink_avoid_wait(),
+ };
+ unsigned long nr_to_scan, nr_scanned = 0, freed = 0;
+ struct xe_bo_shrink_flags shrink_flags = {
+ .purge = true,
+ /* Don't request writeback without __GFP_IO. */
+ .writeback = !ctx.no_wait_gpu && (sc->gfp_mask & __GFP_IO),
+ };
+ bool runtime_pm;
+ bool purgeable;
+ bool can_backup = !!(sc->gfp_mask & __GFP_FS);
+ s64 lret;
+
+ nr_to_scan = sc->nr_to_scan;
+
+ read_lock(&shrinker->lock);
+ purgeable = !!shrinker->purgeable_pages;
+ read_unlock(&shrinker->lock);
+
+ /* Might need runtime PM. Try to wake early if it looks like it. */
+ runtime_pm = xe_shrinker_runtime_pm_get(shrinker, false, nr_to_scan, can_backup);
+
+ if (purgeable && nr_scanned < nr_to_scan) {
+ lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
+ nr_to_scan, &nr_scanned);
+ if (lret >= 0)
+ freed += lret;
+ }
+
+ sc->nr_scanned = nr_scanned;
+ if (nr_scanned >= nr_to_scan || !can_backup)
+ goto out;
+
+ /* If we didn't wake before, try to do it now if needed. */
+ if (!runtime_pm)
+ runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup);
+
+ shrink_flags.purge = false;
+ lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
+ nr_to_scan, &nr_scanned);
+ if (lret >= 0)
+ freed += lret;
+
+ sc->nr_scanned = nr_scanned;
+out:
+ xe_shrinker_runtime_pm_put(shrinker, runtime_pm);
+ return nr_scanned ? freed : SHRINK_STOP;
+}
+
+/* Wake up the device for shrinking. */
+static void xe_shrinker_pm(struct work_struct *work)
+{
+ struct xe_shrinker *shrinker =
+ container_of(work, typeof(*shrinker), pm_worker);
+
+ xe_pm_runtime_get(shrinker->xe);
+ xe_pm_runtime_put(shrinker->xe);
+}
+
+/**
+ * xe_shrinker_create() - Create an xe per-device shrinker
+ * @xe: Pointer to the xe device.
+ *
+ * Returns: A pointer to the created shrinker on success,
+ * Negative error code on failure.
+ */
+struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
+{
+ struct xe_shrinker *shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL);
+
+ if (!shrinker)
+ return ERR_PTR(-ENOMEM);
+
+ shrinker->shrink = shrinker_alloc(0, "xe system shrinker");
+ if (!shrinker->shrink) {
+ kfree(shrinker);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK(&shrinker->pm_worker, xe_shrinker_pm);
+ shrinker->xe = xe;
+ rwlock_init(&shrinker->lock);
+ shrinker->shrink->count_objects = xe_shrinker_count;
+ shrinker->shrink->scan_objects = xe_shrinker_scan;
+ shrinker->shrink->private_data = shrinker;
+ shrinker_register(shrinker->shrink);
+
+ return shrinker;
+}
+
+/**
+ * xe_shrinker_destroy() - Destroy an xe per-device shrinker
+ * @shrinker: Pointer to the shrinker to destroy.
+ */
+void xe_shrinker_destroy(struct xe_shrinker *shrinker)
+{
+ xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
+ xe_assert(shrinker->xe, !shrinker->purgeable_pages);
+ shrinker_free(shrinker->shrink);
+ flush_work(&shrinker->pm_worker);
+ kfree(shrinker);
+}
diff --git a/drivers/gpu/drm/xe/xe_shrinker.h b/drivers/gpu/drm/xe/xe_shrinker.h
new file mode 100644
index 000000000000..28a038f4fcbf
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_shrinker.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_SHRINKER_H_
+#define _XE_SHRINKER_H_
+
+struct xe_shrinker;
+struct xe_device;
+
+void xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable);
+
+struct xe_shrinker *xe_shrinker_create(struct xe_device *xe);
+
+void xe_shrinker_destroy(struct xe_shrinker *shrinker);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index 04e2f539ccd9..a0eab44c0e76 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -81,7 +81,7 @@ void xe_sriov_probe_early(struct xe_device *xe)
xe->sriov.__mode = mode;
xe_assert(xe, xe->sriov.__mode);
- if (has_sriov)
+ if (IS_SRIOV(xe))
drm_info(&xe->drm, "Running in %s mode\n",
xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
}
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
new file mode 100644
index 000000000000..d939ce70e6fa
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_survivability_mode.h"
+#include "xe_survivability_mode_types.h"
+
+#include <linux/kobject.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_heci_gsc.h"
+#include "xe_mmio.h"
+#include "xe_pcode_api.h"
+#include "xe_vsec.h"
+
+#define MAX_SCRATCH_MMIO 8
+
+/**
+ * DOC: Xe Boot Survivability
+ *
+ * Boot Survivability is a software based workflow for recovering a system in a failed boot state
+ * Here system recoverability is concerned with recovering the firmware responsible for boot.
+ *
+ * This is implemented by loading the driver with bare minimum (no drm card) to allow the firmware
+ * to be flashed through mei and collect telemetry. The driver's probe flow is modified
+ * such that it enters survivability mode when pcode initialization is incomplete and boot status
+ * denotes a failure. The driver then populates the survivability_mode PCI sysfs indicating
+ * survivability mode and provides additional information required for debug
+ *
+ * KMD exposes below admin-only readable sysfs in survivability mode
+ *
+ * device/survivability_mode: The presence of this file indicates that the card is in survivability
+ * mode. Also, provides additional information on why the driver entered
+ * survivability mode.
+ *
+ * Capability Information - Provides boot status
+ * Postcode Information - Provides information about the failure
+ * Overflow Information - Provides history of previous failures
+ * Auxiliary Information - Certain failures may have information in
+ * addition to postcode information
+ */
+
+static u32 aux_history_offset(u32 reg_value)
+{
+ return REG_FIELD_GET(AUXINFO_HISTORY_OFFSET, reg_value);
+}
+
+static void set_survivability_info(struct xe_mmio *mmio, struct xe_survivability_info *info,
+ int id, char *name)
+{
+ strscpy(info[id].name, name, sizeof(info[id].name));
+ info[id].reg = PCODE_SCRATCH(id).raw;
+ info[id].value = xe_mmio_read32(mmio, PCODE_SCRATCH(id));
+}
+
+static void populate_survivability_info(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+ struct xe_survivability_info *info = survivability->info;
+ struct xe_mmio *mmio;
+ u32 id = 0, reg_value;
+ char name[NAME_MAX];
+ int index;
+
+ mmio = xe_root_tile_mmio(xe);
+ set_survivability_info(mmio, info, id, "Capability Info");
+ reg_value = info[id].value;
+
+ if (reg_value & HISTORY_TRACKING) {
+ id++;
+ set_survivability_info(mmio, info, id, "Postcode Info");
+
+ if (reg_value & OVERFLOW_SUPPORT) {
+ id = REG_FIELD_GET(OVERFLOW_REG_OFFSET, reg_value);
+ set_survivability_info(mmio, info, id, "Overflow Info");
+ }
+ }
+
+ if (reg_value & AUXINFO_SUPPORT) {
+ id = REG_FIELD_GET(AUXINFO_REG_OFFSET, reg_value);
+
+ for (index = 0; id && reg_value; index++, reg_value = info[id].value,
+ id = aux_history_offset(reg_value)) {
+ snprintf(name, NAME_MAX, "Auxiliary Info %d", index);
+ set_survivability_info(mmio, info, id, name);
+ }
+ }
+}
+
+static void log_survivability_info(struct pci_dev *pdev)
+{
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ struct xe_survivability *survivability = &xe->survivability;
+ struct xe_survivability_info *info = survivability->info;
+ int id;
+
+ dev_info(&pdev->dev, "Survivability Boot Status : Critical Failure (%d)\n",
+ survivability->boot_status);
+ for (id = 0; id < MAX_SCRATCH_MMIO; id++) {
+ if (info[id].reg)
+ dev_info(&pdev->dev, "%s: 0x%x - 0x%x\n", info[id].name,
+ info[id].reg, info[id].value);
+ }
+}
+
+static ssize_t survivability_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ struct xe_survivability *survivability = &xe->survivability;
+ struct xe_survivability_info *info = survivability->info;
+ int index = 0, count = 0;
+
+ for (index = 0; index < MAX_SCRATCH_MMIO; index++) {
+ if (info[index].reg)
+ count += sysfs_emit_at(buff, count, "%s: 0x%x - 0x%x\n", info[index].name,
+ info[index].reg, info[index].value);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_ADMIN_RO(survivability_mode);
+
+static void xe_survivability_mode_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct device *dev = &pdev->dev;
+
+ sysfs_remove_file(&dev->kobj, &dev_attr_survivability_mode.attr);
+}
+
+static int enable_survivability_mode(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ struct xe_survivability *survivability = &xe->survivability;
+ int ret = 0;
+
+ /* create survivability mode sysfs */
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_survivability_mode.attr);
+ if (ret) {
+ dev_warn(dev, "Failed to create survivability sysfs files\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(xe->drm.dev,
+ xe_survivability_mode_fini, xe);
+ if (ret)
+ return ret;
+
+ ret = xe_heci_gsc_init(xe);
+ if (ret)
+ return ret;
+
+ xe_vsec_init(xe);
+
+ survivability->mode = true;
+ dev_err(dev, "In Survivability Mode\n");
+
+ return 0;
+}
+
+/**
+ * xe_survivability_mode_is_enabled - check if survivability mode is enabled
+ * @xe: xe device instance
+ *
+ * Returns true if in survivability mode, false otherwise
+ */
+bool xe_survivability_mode_is_enabled(struct xe_device *xe)
+{
+ return xe->survivability.mode;
+}
+
+/**
+ * xe_survivability_mode_required - checks if survivability mode is required
+ * @xe: xe device instance
+ *
+ * This function reads the boot status from Pcode
+ *
+ * Return: true if boot status indicates failure, false otherwise
+ */
+bool xe_survivability_mode_required(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+ u32 data;
+
+ if (!IS_DGFX(xe) || xe->info.platform < XE_BATTLEMAGE || IS_SRIOV_VF(xe))
+ return false;
+
+ data = xe_mmio_read32(mmio, PCODE_SCRATCH(0));
+ survivability->boot_status = REG_FIELD_GET(BOOT_STATUS, data);
+
+ return survivability->boot_status == NON_CRITICAL_FAILURE ||
+ survivability->boot_status == CRITICAL_FAILURE;
+}
+
+/**
+ * xe_survivability_mode_enable - Initialize and enable the survivability mode
+ * @xe: xe device instance
+ *
+ * Initialize survivability information and enable survivability mode
+ *
+ * Return: 0 for success, negative error code otherwise.
+ */
+int xe_survivability_mode_enable(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+ struct xe_survivability_info *info;
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+
+ survivability->size = MAX_SCRATCH_MMIO;
+
+ info = devm_kcalloc(xe->drm.dev, survivability->size, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ survivability->info = info;
+
+ populate_survivability_info(xe);
+
+ /* Only log debug information and exit if it is a critical failure */
+ if (survivability->boot_status == CRITICAL_FAILURE) {
+ log_survivability_info(pdev);
+ return -ENXIO;
+ }
+
+ return enable_survivability_mode(pdev);
+}
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.h b/drivers/gpu/drm/xe/xe_survivability_mode.h
new file mode 100644
index 000000000000..f4df5f9025ce
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SURVIVABILITY_MODE_H_
+#define _XE_SURVIVABILITY_MODE_H_
+
+#include <linux/types.h>
+
+struct xe_device;
+
+int xe_survivability_mode_enable(struct xe_device *xe);
+bool xe_survivability_mode_is_enabled(struct xe_device *xe);
+bool xe_survivability_mode_required(struct xe_device *xe);
+
+#endif /* _XE_SURVIVABILITY_MODE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode_types.h b/drivers/gpu/drm/xe/xe_survivability_mode_types.h
new file mode 100644
index 000000000000..19d433e253df
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_survivability_mode_types.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SURVIVABILITY_MODE_TYPES_H_
+#define _XE_SURVIVABILITY_MODE_TYPES_H_
+
+#include <linux/limits.h>
+#include <linux/types.h>
+
+struct xe_survivability_info {
+ char name[NAME_MAX];
+ u32 reg;
+ u32 value;
+};
+
+/**
+ * struct xe_survivability: Contains survivability mode information
+ */
+struct xe_survivability {
+ /** @info: struct that holds survivability info from scratch registers */
+ struct xe_survivability_info *info;
+
+ /** @size: number of scratch registers */
+ u32 size;
+
+ /** @boot_status: indicates critical/non critical boot failure */
+ u8 boot_status;
+
+ /** @mode: boolean to indicate survivability mode */
+ bool mode;
+};
+
+#endif /* _XE_SURVIVABILITY_MODE_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
new file mode 100644
index 000000000000..3e829c87d7b4
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -0,0 +1,946 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "xe_bo.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_migrate.h"
+#include "xe_module.h"
+#include "xe_pt.h"
+#include "xe_svm.h"
+#include "xe_ttm_vram_mgr.h"
+#include "xe_vm.h"
+#include "xe_vm_types.h"
+
+static bool xe_svm_range_in_vram(struct xe_svm_range *range)
+{
+ /* Not reliable without notifier lock */
+ return range->base.flags.has_devmem_pages;
+}
+
+static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
+{
+ /* Not reliable without notifier lock */
+ return xe_svm_range_in_vram(range) && range->tile_present;
+}
+
+static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
+{
+ return container_of(gpusvm, struct xe_vm, svm.gpusvm);
+}
+
+static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
+{
+ return gpusvm_to_vm(r->gpusvm);
+}
+
+static unsigned long xe_svm_range_start(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_start(&range->base);
+}
+
+static unsigned long xe_svm_range_end(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_end(&range->base);
+}
+
+static unsigned long xe_svm_range_size(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_size(&range->base);
+}
+
+#define range_debug(r__, operaton__) \
+ vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
+ "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
+ "start=0x%014lx, end=0x%014lx, size=%lu", \
+ (operaton__), range_to_vm(&(r__)->base)->usm.asid, \
+ (r__)->base.gpusvm, \
+ xe_svm_range_in_vram((r__)) ? 1 : 0, \
+ xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
+ (r__)->base.notifier_seq, \
+ xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
+ xe_svm_range_size((r__)))
+
+void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
+{
+ range_debug(range, operation);
+}
+
+static void *xe_svm_devm_owner(struct xe_device *xe)
+{
+ return xe;
+}
+
+static struct drm_gpusvm_range *
+xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
+{
+ struct xe_svm_range *range;
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&range->garbage_collector_link);
+ xe_vm_get(gpusvm_to_vm(gpusvm));
+
+ return &range->base;
+}
+
+static void xe_svm_range_free(struct drm_gpusvm_range *range)
+{
+ xe_vm_put(range_to_vm(range));
+ kfree(range);
+}
+
+static struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
+{
+ return container_of(r, struct xe_svm_range, base);
+}
+
+static void
+xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
+ const struct mmu_notifier_range *mmu_range)
+{
+ struct xe_device *xe = vm->xe;
+
+ range_debug(range, "GARBAGE COLLECTOR ADD");
+
+ drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
+
+ spin_lock(&vm->svm.garbage_collector.lock);
+ if (list_empty(&range->garbage_collector_link))
+ list_add_tail(&range->garbage_collector_link,
+ &vm->svm.garbage_collector.range_list);
+ spin_unlock(&vm->svm.garbage_collector.lock);
+
+ queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
+ &vm->svm.garbage_collector.work);
+}
+
+static u8
+xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
+ const struct mmu_notifier_range *mmu_range,
+ u64 *adj_start, u64 *adj_end)
+{
+ struct xe_svm_range *range = to_xe_range(r);
+ struct xe_device *xe = vm->xe;
+ struct xe_tile *tile;
+ u8 tile_mask = 0;
+ u8 id;
+
+ xe_svm_assert_in_notifier(vm);
+
+ range_debug(range, "NOTIFIER");
+
+ /* Skip if already unmapped or if no binding exist */
+ if (range->base.flags.unmapped || !range->tile_present)
+ return 0;
+
+ range_debug(range, "NOTIFIER - EXECUTE");
+
+ /* Adjust invalidation to range boundaries */
+ *adj_start = min(xe_svm_range_start(range), mmu_range->start);
+ *adj_end = max(xe_svm_range_end(range), mmu_range->end);
+
+ /*
+ * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
+ * invalidation code can't correctly cope with sparse ranges or
+ * invalidations spanning multiple ranges.
+ */
+ for_each_tile(tile, xe, id)
+ if (xe_pt_zap_ptes_range(tile, vm, range)) {
+ tile_mask |= BIT(id);
+ range->tile_invalidated |= BIT(id);
+ }
+
+ return tile_mask;
+}
+
+static void
+xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
+ const struct mmu_notifier_range *mmu_range)
+{
+ struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
+
+ xe_svm_assert_in_notifier(vm);
+
+ drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
+ if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
+ xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
+ mmu_range);
+}
+
+static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ const struct mmu_notifier_range *mmu_range)
+{
+ struct xe_vm *vm = gpusvm_to_vm(gpusvm);
+ struct xe_device *xe = vm->xe;
+ struct xe_tile *tile;
+ struct drm_gpusvm_range *r, *first;
+ struct xe_gt_tlb_invalidation_fence
+ fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
+ u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
+ u8 tile_mask = 0;
+ u8 id;
+ u32 fence_id = 0;
+ long err;
+
+ xe_svm_assert_in_notifier(vm);
+
+ vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
+ "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
+ vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
+ mmu_range->start, mmu_range->end, mmu_range->event);
+
+ /* Adjust invalidation to notifier boundaries */
+ adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
+ adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
+
+ first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
+ if (!first)
+ return;
+
+ /*
+ * PTs may be getting destroyed so not safe to touch these but PT should
+ * be invalidated at this point in time. Regardless we still need to
+ * ensure any dma mappings are unmapped in the here.
+ */
+ if (xe_vm_is_closed(vm))
+ goto range_notifier_event_end;
+
+ /*
+ * XXX: Less than ideal to always wait on VM's resv slots if an
+ * invalidation is not required. Could walk range list twice to figure
+ * out if an invalidations is need, but also not ideal.
+ */
+ err = dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ XE_WARN_ON(err <= 0);
+
+ r = first;
+ drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
+ tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
+ &adj_start,
+ &adj_end);
+ if (!tile_mask)
+ goto range_notifier_event_end;
+
+ xe_device_wmb(xe);
+
+ for_each_tile(tile, xe, id) {
+ if (tile_mask & BIT(id)) {
+ int err;
+
+ xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->primary_gt,
+ &fence[fence_id],
+ adj_start,
+ adj_end,
+ vm->usm.asid);
+ if (WARN_ON_ONCE(err < 0))
+ goto wait;
+ ++fence_id;
+
+ if (!tile->media_gt)
+ continue;
+
+ xe_gt_tlb_invalidation_fence_init(tile->media_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->media_gt,
+ &fence[fence_id],
+ adj_start,
+ adj_end,
+ vm->usm.asid);
+ if (WARN_ON_ONCE(err < 0))
+ goto wait;
+ ++fence_id;
+ }
+ }
+
+wait:
+ for (id = 0; id < fence_id; ++id)
+ xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+
+range_notifier_event_end:
+ r = first;
+ drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
+ xe_svm_range_notifier_event_end(vm, r, mmu_range);
+}
+
+static int __xe_svm_garbage_collector(struct xe_vm *vm,
+ struct xe_svm_range *range)
+{
+ struct dma_fence *fence;
+
+ range_debug(range, "GARBAGE COLLECTOR");
+
+ xe_vm_lock(vm, false);
+ fence = xe_vm_range_unbind(vm, range);
+ xe_vm_unlock(vm);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ dma_fence_put(fence);
+
+ drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
+
+ return 0;
+}
+
+static int xe_svm_garbage_collector(struct xe_vm *vm)
+{
+ struct xe_svm_range *range;
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ if (xe_vm_is_closed_or_banned(vm))
+ return -ENOENT;
+
+ spin_lock(&vm->svm.garbage_collector.lock);
+ for (;;) {
+ range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
+ typeof(*range),
+ garbage_collector_link);
+ if (!range)
+ break;
+
+ list_del(&range->garbage_collector_link);
+ spin_unlock(&vm->svm.garbage_collector.lock);
+
+ err = __xe_svm_garbage_collector(vm, range);
+ if (err) {
+ drm_warn(&vm->xe->drm,
+ "Garbage collection failed: %pe\n",
+ ERR_PTR(err));
+ xe_vm_kill(vm, true);
+ return err;
+ }
+
+ spin_lock(&vm->svm.garbage_collector.lock);
+ }
+ spin_unlock(&vm->svm.garbage_collector.lock);
+
+ return 0;
+}
+
+static void xe_svm_garbage_collector_work_func(struct work_struct *w)
+{
+ struct xe_vm *vm = container_of(w, struct xe_vm,
+ svm.garbage_collector.work);
+
+ down_write(&vm->lock);
+ xe_svm_garbage_collector(vm);
+ up_write(&vm->lock);
+}
+
+static struct xe_vram_region *page_to_vr(struct page *page)
+{
+ return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
+}
+
+static struct xe_tile *vr_to_tile(struct xe_vram_region *vr)
+{
+ return container_of(vr, struct xe_tile, mem.vram);
+}
+
+static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
+ struct page *page)
+{
+ u64 dpa;
+ struct xe_tile *tile = vr_to_tile(vr);
+ u64 pfn = page_to_pfn(page);
+ u64 offset;
+
+ xe_tile_assert(tile, is_device_private_page(page));
+ xe_tile_assert(tile, (pfn << PAGE_SHIFT) >= vr->hpa_base);
+
+ offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
+ dpa = vr->dpa_base + offset;
+
+ return dpa;
+}
+
+enum xe_svm_copy_dir {
+ XE_SVM_COPY_TO_VRAM,
+ XE_SVM_COPY_TO_SRAM,
+};
+
+static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
+ unsigned long npages, const enum xe_svm_copy_dir dir)
+{
+ struct xe_vram_region *vr = NULL;
+ struct xe_tile *tile;
+ struct dma_fence *fence = NULL;
+ unsigned long i;
+#define XE_VRAM_ADDR_INVALID ~0x0ull
+ u64 vram_addr = XE_VRAM_ADDR_INVALID;
+ int err = 0, pos = 0;
+ bool sram = dir == XE_SVM_COPY_TO_SRAM;
+
+ /*
+ * This flow is complex: it locates physically contiguous device pages,
+ * derives the starting physical address, and performs a single GPU copy
+ * to for every 8M chunk in a DMA address array. Both device pages and
+ * DMA addresses may be sparsely populated. If either is NULL, a copy is
+ * triggered based on the current search state. The last GPU copy is
+ * waited on to ensure all copies are complete.
+ */
+
+ for (i = 0; i < npages; ++i) {
+ struct page *spage = pages[i];
+ struct dma_fence *__fence;
+ u64 __vram_addr;
+ bool match = false, chunk, last;
+
+#define XE_MIGRATE_CHUNK_SIZE SZ_8M
+ chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
+ last = (i + 1) == npages;
+
+ /* No CPU page and no device pages queue'd to copy */
+ if (!dma_addr[i] && vram_addr == XE_VRAM_ADDR_INVALID)
+ continue;
+
+ if (!vr && spage) {
+ vr = page_to_vr(spage);
+ tile = vr_to_tile(vr);
+ }
+ XE_WARN_ON(spage && page_to_vr(spage) != vr);
+
+ /*
+ * CPU page and device page valid, capture physical address on
+ * first device page, check if physical contiguous on subsequent
+ * device pages.
+ */
+ if (dma_addr[i] && spage) {
+ __vram_addr = xe_vram_region_page_to_dpa(vr, spage);
+ if (vram_addr == XE_VRAM_ADDR_INVALID) {
+ vram_addr = __vram_addr;
+ pos = i;
+ }
+
+ match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
+ }
+
+ /*
+ * Mismatched physical address, 8M copy chunk, or last page -
+ * trigger a copy.
+ */
+ if (!match || chunk || last) {
+ /*
+ * Extra page for first copy if last page and matching
+ * physical address.
+ */
+ int incr = (match && last) ? 1 : 0;
+
+ if (vram_addr != XE_VRAM_ADDR_INVALID) {
+ if (sram) {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
+ vram_addr, (u64)dma_addr[pos], i - pos + incr);
+ __fence = xe_migrate_from_vram(tile->migrate,
+ i - pos + incr,
+ vram_addr,
+ dma_addr + pos);
+ } else {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
+ (u64)dma_addr[pos], vram_addr, i - pos + incr);
+ __fence = xe_migrate_to_vram(tile->migrate,
+ i - pos + incr,
+ dma_addr + pos,
+ vram_addr);
+ }
+ if (IS_ERR(__fence)) {
+ err = PTR_ERR(__fence);
+ goto err_out;
+ }
+
+ dma_fence_put(fence);
+ fence = __fence;
+ }
+
+ /* Setup physical address of next device page */
+ if (dma_addr[i] && spage) {
+ vram_addr = __vram_addr;
+ pos = i;
+ } else {
+ vram_addr = XE_VRAM_ADDR_INVALID;
+ }
+
+ /* Extra mismatched device page, copy it */
+ if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
+ if (sram) {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
+ vram_addr, (u64)dma_addr[pos], 1);
+ __fence = xe_migrate_from_vram(tile->migrate, 1,
+ vram_addr,
+ dma_addr + pos);
+ } else {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
+ (u64)dma_addr[pos], vram_addr, 1);
+ __fence = xe_migrate_to_vram(tile->migrate, 1,
+ dma_addr + pos,
+ vram_addr);
+ }
+ if (IS_ERR(__fence)) {
+ err = PTR_ERR(__fence);
+ goto err_out;
+ }
+
+ dma_fence_put(fence);
+ fence = __fence;
+ }
+ }
+ }
+
+err_out:
+ /* Wait for all copies to complete */
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+
+ return err;
+#undef XE_MIGRATE_CHUNK_SIZE
+#undef XE_VRAM_ADDR_INVALID
+}
+
+static int xe_svm_copy_to_devmem(struct page **pages, dma_addr_t *dma_addr,
+ unsigned long npages)
+{
+ return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_VRAM);
+}
+
+static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr,
+ unsigned long npages)
+{
+ return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM);
+}
+
+static struct xe_bo *to_xe_bo(struct drm_gpusvm_devmem *devmem_allocation)
+{
+ return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
+}
+
+static void xe_svm_devmem_release(struct drm_gpusvm_devmem *devmem_allocation)
+{
+ struct xe_bo *bo = to_xe_bo(devmem_allocation);
+
+ xe_bo_put_async(bo);
+}
+
+static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
+{
+ return PHYS_PFN(offset + vr->hpa_base);
+}
+
+static struct drm_buddy *tile_to_buddy(struct xe_tile *tile)
+{
+ return &tile->mem.vram.ttm.mm;
+}
+
+static int xe_svm_populate_devmem_pfn(struct drm_gpusvm_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn)
+{
+ struct xe_bo *bo = to_xe_bo(devmem_allocation);
+ struct ttm_resource *res = bo->ttm.resource;
+ struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
+ struct drm_buddy_block *block;
+ int j = 0;
+
+ list_for_each_entry(block, blocks, link) {
+ struct xe_vram_region *vr = block->private;
+ struct xe_tile *tile = vr_to_tile(vr);
+ struct drm_buddy *buddy = tile_to_buddy(tile);
+ u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
+ int i;
+
+ for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
+ pfn[j++] = block_pfn + i;
+ }
+
+ return 0;
+}
+
+static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = {
+ .devmem_release = xe_svm_devmem_release,
+ .populate_devmem_pfn = xe_svm_populate_devmem_pfn,
+ .copy_to_devmem = xe_svm_copy_to_devmem,
+ .copy_to_ram = xe_svm_copy_to_ram,
+};
+
+static const struct drm_gpusvm_ops gpusvm_ops = {
+ .range_alloc = xe_svm_range_alloc,
+ .range_free = xe_svm_range_free,
+ .invalidate = xe_svm_invalidate,
+};
+
+static const unsigned long fault_chunk_sizes[] = {
+ SZ_2M,
+ SZ_64K,
+ SZ_4K,
+};
+
+/**
+ * xe_svm_init() - SVM initialize
+ * @vm: The VM.
+ *
+ * Initialize SVM state which is embedded within the VM.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_svm_init(struct xe_vm *vm)
+{
+ int err;
+
+ spin_lock_init(&vm->svm.garbage_collector.lock);
+ INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
+ INIT_WORK(&vm->svm.garbage_collector.work,
+ xe_svm_garbage_collector_work_func);
+
+ err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
+ current->mm, xe_svm_devm_owner(vm->xe), 0,
+ vm->size, xe_modparam.svm_notifier_size * SZ_1M,
+ &gpusvm_ops, fault_chunk_sizes,
+ ARRAY_SIZE(fault_chunk_sizes));
+ if (err)
+ return err;
+
+ drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
+
+ return 0;
+}
+
+/**
+ * xe_svm_close() - SVM close
+ * @vm: The VM.
+ *
+ * Close SVM state (i.e., stop and flush all SVM actions).
+ */
+void xe_svm_close(struct xe_vm *vm)
+{
+ xe_assert(vm->xe, xe_vm_is_closed(vm));
+ flush_work(&vm->svm.garbage_collector.work);
+}
+
+/**
+ * xe_svm_fini() - SVM finalize
+ * @vm: The VM.
+ *
+ * Finalize SVM state which is embedded within the VM.
+ */
+void xe_svm_fini(struct xe_vm *vm)
+{
+ xe_assert(vm->xe, xe_vm_is_closed(vm));
+
+ drm_gpusvm_fini(&vm->svm.gpusvm);
+}
+
+static bool xe_svm_range_is_valid(struct xe_svm_range *range,
+ struct xe_tile *tile)
+{
+ return (range->tile_present & ~range->tile_invalidated) & BIT(tile->id);
+}
+
+static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
+{
+ return &tile->mem.vram;
+}
+
+static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ struct mm_struct *mm = vm->svm.gpusvm.mm;
+ struct xe_vram_region *vr = tile_to_vr(tile);
+ struct drm_buddy_block *block;
+ struct list_head *blocks;
+ struct xe_bo *bo;
+ ktime_t end = 0;
+ int err;
+
+ range_debug(range, "ALLOCATE VRAM");
+
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+ mmap_read_lock(mm);
+
+retry:
+ bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL,
+ xe_svm_range_size(range),
+ ttm_bo_type_device,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_CPU_ADDR_MIRROR);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ if (xe_vm_validate_should_retry(NULL, err, &end))
+ goto retry;
+ goto unlock;
+ }
+
+ drm_gpusvm_devmem_init(&bo->devmem_allocation,
+ vm->xe->drm.dev, mm,
+ &gpusvm_devmem_ops,
+ &tile->mem.vram.dpagemap,
+ xe_svm_range_size(range));
+
+ blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
+ list_for_each_entry(block, blocks, link)
+ block->private = vr;
+
+ err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base,
+ &bo->devmem_allocation, ctx);
+ xe_bo_unlock(bo);
+ if (err)
+ xe_bo_put(bo); /* Creation ref */
+
+unlock:
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return err;
+}
+
+/**
+ * xe_svm_handle_pagefault() - SVM handle page fault
+ * @vm: The VM.
+ * @vma: The CPU address mirror VMA.
+ * @tile: The tile upon the fault occurred.
+ * @fault_addr: The GPU fault address.
+ * @atomic: The fault atomic access bit.
+ *
+ * Create GPU bindings for a SVM page fault. Optionally migrate to device
+ * memory.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_tile *tile, u64 fault_addr,
+ bool atomic)
+{
+ struct drm_gpusvm_ctx ctx = {
+ .read_only = xe_vma_read_only(vma),
+ .devmem_possible = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
+ .check_pages_threshold = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
+ };
+ struct xe_svm_range *range;
+ struct drm_gpusvm_range *r;
+ struct drm_exec exec;
+ struct dma_fence *fence;
+ ktime_t end = 0;
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
+
+retry:
+ /* Always process UNMAPs first so view SVM ranges is current */
+ err = xe_svm_garbage_collector(vm);
+ if (err)
+ return err;
+
+ r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr,
+ xe_vma_start(vma), xe_vma_end(vma),
+ &ctx);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ range = to_xe_range(r);
+ if (xe_svm_range_is_valid(range, tile))
+ return 0;
+
+ range_debug(range, "PAGE FAULT");
+
+ /* XXX: Add migration policy, for now migrate range once */
+ if (!range->skip_migrate && range->base.flags.migrate_devmem &&
+ xe_svm_range_size(range) >= SZ_64K) {
+ range->skip_migrate = true;
+
+ err = xe_svm_alloc_vram(vm, tile, range, &ctx);
+ if (err) {
+ drm_dbg(&vm->xe->drm,
+ "VRAM allocation failed, falling back to "
+ "retrying fault, asid=%u, errno=%pe\n",
+ vm->usm.asid, ERR_PTR(err));
+ goto retry;
+ }
+ }
+
+ range_debug(range, "GET PAGES");
+ err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
+ /* Corner where CPU mappings have changed */
+ if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
+ if (err == -EOPNOTSUPP) {
+ range_debug(range, "PAGE FAULT - EVICT PAGES");
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+ }
+ drm_dbg(&vm->xe->drm,
+ "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ range_debug(range, "PAGE FAULT - RETRY PAGES");
+ goto retry;
+ }
+ if (err) {
+ range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
+ goto err_out;
+ }
+
+ range_debug(range, "PAGE FAULT - BIND");
+
+retry_bind:
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked(&exec) {
+ err = drm_exec_lock_obj(&exec, vm->gpuvm.r_obj);
+ drm_exec_retry_on_contention(&exec);
+ if (err) {
+ drm_exec_fini(&exec);
+ goto err_out;
+ }
+
+ fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
+ if (IS_ERR(fence)) {
+ drm_exec_fini(&exec);
+ err = PTR_ERR(fence);
+ if (err == -EAGAIN) {
+ range_debug(range, "PAGE FAULT - RETRY BIND");
+ goto retry;
+ }
+ if (xe_vm_validate_should_retry(&exec, err, &end))
+ goto retry_bind;
+ goto err_out;
+ }
+ }
+ drm_exec_fini(&exec);
+
+ if (xe_modparam.always_migrate_to_vram)
+ range->skip_migrate = false;
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+err_out:
+
+ return err;
+}
+
+/**
+ * xe_svm_has_mapping() - SVM has mappings
+ * @vm: The VM.
+ * @start: Start address.
+ * @end: End address.
+ *
+ * Check if an address range has SVM mappings.
+ *
+ * Return: True if address range has a SVM mapping, False otherwise
+ */
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
+{
+ return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
+}
+
+/**
+ * xe_svm_bo_evict() - SVM evict BO to system memory
+ * @bo: BO to evict
+ *
+ * SVM evict BO to system memory. GPU SVM layer ensures all device pages
+ * are evicted before returning.
+ *
+ * Return: 0 on success standard error code otherwise
+ */
+int xe_svm_bo_evict(struct xe_bo *bo)
+{
+ return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+static struct drm_pagemap_device_addr
+xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct page *page,
+ unsigned int order,
+ enum dma_data_direction dir)
+{
+ struct device *pgmap_dev = dpagemap->dev;
+ enum drm_interconnect_protocol prot;
+ dma_addr_t addr;
+
+ if (pgmap_dev == dev) {
+ addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
+ prot = XE_INTERCONNECT_VRAM;
+ } else {
+ addr = DMA_MAPPING_ERROR;
+ prot = 0;
+ }
+
+ return drm_pagemap_device_addr_encode(addr, prot, order, dir);
+}
+
+static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
+ .device_map = xe_drm_pagemap_device_map,
+};
+
+/**
+ * xe_devm_add: Remap and provide memmap backing for device memory
+ * @tile: tile that the memory region belongs to
+ * @vr: vram memory region to remap
+ *
+ * This remap device memory to host physical address space and create
+ * struct page to back device memory
+ *
+ * Return: 0 on success standard error code otherwise
+ */
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
+ struct resource *res;
+ void *addr;
+ int ret;
+
+ res = devm_request_free_mem_region(dev, &iomem_resource,
+ vr->usable_size);
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
+ return ret;
+ }
+
+ vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ vr->pagemap.range.start = res->start;
+ vr->pagemap.range.end = res->end;
+ vr->pagemap.nr_range = 1;
+ vr->pagemap.ops = drm_gpusvm_pagemap_ops_get();
+ vr->pagemap.owner = xe_svm_devm_owner(xe);
+ addr = devm_memremap_pages(dev, &vr->pagemap);
+
+ vr->dpagemap.dev = dev;
+ vr->dpagemap.ops = &xe_drm_pagemap_ops;
+
+ if (IS_ERR(addr)) {
+ devm_release_mem_region(dev, res->start, resource_size(res));
+ ret = PTR_ERR(addr);
+ drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
+ tile->id, ERR_PTR(ret));
+ return ret;
+ }
+ vr->hpa_base = res->start;
+
+ drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
+ tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
+ return 0;
+}
+#else
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
new file mode 100644
index 000000000000..e059590e5076
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_SVM_H_
+#define _XE_SVM_H_
+
+#include <drm/drm_pagemap.h>
+#include <drm/drm_gpusvm.h>
+
+#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
+
+struct xe_bo;
+struct xe_vram_region;
+struct xe_tile;
+struct xe_vm;
+struct xe_vma;
+
+/** struct xe_svm_range - SVM range */
+struct xe_svm_range {
+ /** @base: base drm_gpusvm_range */
+ struct drm_gpusvm_range base;
+ /**
+ * @garbage_collector_link: Link into VM's garbage collect SVM range
+ * list. Protected by VM's garbage collect lock.
+ */
+ struct list_head garbage_collector_link;
+ /**
+ * @tile_present: Tile mask of binding is present for this range.
+ * Protected by GPU SVM notifier lock.
+ */
+ u8 tile_present;
+ /**
+ * @tile_invalidated: Tile mask of binding is invalidated for this
+ * range. Protected by GPU SVM notifier lock.
+ */
+ u8 tile_invalidated;
+ /**
+ * @skip_migrate: Skip migration to VRAM, protected by GPU fault handler
+ * locking.
+ */
+ u8 skip_migrate :1;
+};
+
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+/**
+ * xe_svm_range_pages_valid() - SVM range pages valid
+ * @range: SVM range
+ *
+ * Return: True if SVM range pages are valid, False otherwise
+ */
+static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
+}
+
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
+
+int xe_svm_init(struct xe_vm *vm);
+
+void xe_svm_fini(struct xe_vm *vm);
+
+void xe_svm_close(struct xe_vm *vm);
+
+int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_tile *tile, u64 fault_addr,
+ bool atomic);
+
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
+
+int xe_svm_bo_evict(struct xe_bo *bo);
+
+void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
+#else
+static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
+{
+ return false;
+}
+
+static inline
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
+{
+ return 0;
+}
+
+static inline
+int xe_svm_init(struct xe_vm *vm)
+{
+ return 0;
+}
+
+static inline
+void xe_svm_fini(struct xe_vm *vm)
+{
+}
+
+static inline
+void xe_svm_close(struct xe_vm *vm)
+{
+}
+
+static inline
+int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_tile *tile, u64 fault_addr,
+ bool atomic)
+{
+ return 0;
+}
+
+static inline
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
+{
+ return false;
+}
+
+static inline
+int xe_svm_bo_evict(struct xe_bo *bo)
+{
+ return 0;
+}
+
+static inline
+void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
+{
+}
+#endif
+
+/**
+ * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
+ * @range: SVM range
+ *
+ * Return: True if SVM range has a DMA mapping, False otherwise
+ */
+static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
+{
+ lockdep_assert_held(&range->base.gpusvm->notifier_lock);
+ return range->base.flags.has_dma_mapping;
+}
+
+#define xe_svm_assert_in_notifier(vm__) \
+ lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_lock(vm__) \
+ drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
+
+#define xe_svm_notifier_unlock(vm__) \
+ drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 42f5bebd09e5..f87276df18f2 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -210,6 +210,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
return 0;
}
+ALLOW_ERROR_INJECTION(xe_sync_entry_parse, ERRNO);
int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
{
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 07cf7cfe4abd..0771acbbf367 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -13,6 +13,7 @@
#include "xe_migrate.h"
#include "xe_pcode.h"
#include "xe_sa.h"
+#include "xe_svm.h"
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
#include "xe_ttm_vram_mgr.h"
@@ -94,10 +95,6 @@ static int xe_tile_alloc(struct xe_tile *tile)
return -ENOMEM;
tile->mem.ggtt->tile = tile;
- tile->mem.vram_mgr = drmm_kzalloc(drm, sizeof(*tile->mem.vram_mgr), GFP_KERNEL);
- if (!tile->mem.vram_mgr)
- return -ENOMEM;
-
return 0;
}
@@ -139,7 +136,7 @@ static int tile_ttm_mgr_init(struct xe_tile *tile)
int err;
if (tile->mem.vram.usable_size) {
- err = xe_ttm_vram_mgr_init(tile, tile->mem.vram_mgr);
+ err = xe_ttm_vram_mgr_init(tile, &tile->mem.vram.ttm);
if (err)
return err;
xe->info.mem_region_mask |= BIT(tile->id) << 1;
@@ -164,23 +161,29 @@ static int tile_ttm_mgr_init(struct xe_tile *tile)
*/
int xe_tile_init_noalloc(struct xe_tile *tile)
{
+ struct xe_device *xe = tile_to_xe(tile);
int err;
err = tile_ttm_mgr_init(tile);
if (err)
return err;
+ xe_wa_apply_tile_workarounds(tile);
+
+ if (xe->info.has_usm && IS_DGFX(xe))
+ xe_devm_add(tile, &tile->mem.vram);
+
+ return xe_tile_sysfs_init(tile);
+}
+
+int xe_tile_init(struct xe_tile *tile)
+{
tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
if (IS_ERR(tile->mem.kernel_bb_pool))
return PTR_ERR(tile->mem.kernel_bb_pool);
- xe_wa_apply_tile_workarounds(tile);
-
- err = xe_tile_sysfs_init(tile);
-
return 0;
}
-
void xe_tile_migrate_wait(struct xe_tile *tile)
{
xe_migrate_wait(tile->migrate);
diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h
index 1c9e42ade6b0..eb939316d55b 100644
--- a/drivers/gpu/drm/xe/xe_tile.h
+++ b/drivers/gpu/drm/xe/xe_tile.h
@@ -12,6 +12,7 @@ struct xe_tile;
int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id);
int xe_tile_init_noalloc(struct xe_tile *tile);
+int xe_tile_init(struct xe_tile *tile);
void xe_tile_migrate_wait(struct xe_tile *tile);
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index d5281de04d54..b4a3577df70c 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -427,6 +427,36 @@ DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl,
TP_ARGS(xe, caller)
);
+TRACE_EVENT(xe_eu_stall_data_read,
+ TP_PROTO(u8 slice, u8 subslice,
+ u32 read_ptr, u32 write_ptr,
+ size_t read_size, size_t total_size),
+ TP_ARGS(slice, subslice,
+ read_ptr, write_ptr,
+ read_size, total_size),
+
+ TP_STRUCT__entry(__field(u8, slice)
+ __field(u8, subslice)
+ __field(u32, read_ptr)
+ __field(u32, write_ptr)
+ __field(size_t, read_size)
+ __field(size_t, total_size)
+ ),
+
+ TP_fast_assign(__entry->slice = slice;
+ __entry->subslice = subslice;
+ __entry->read_ptr = read_ptr;
+ __entry->write_ptr = write_ptr;
+ __entry->read_size = read_size;
+ __entry->total_size = total_size;
+ ),
+
+ TP_printk("slice: %u subslice: %u read ptr: 0x%x write ptr: 0x%x read size: %zu total read size: %zu",
+ __entry->slice, __entry->subslice,
+ __entry->read_ptr, __entry->write_ptr,
+ __entry->read_size, __entry->total_size)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
index ea50fee50c7d..ccebd5f0878e 100644
--- a/drivers/gpu/drm/xe/xe_trace_bo.h
+++ b/drivers/gpu/drm/xe/xe_trace_bo.h
@@ -53,6 +53,11 @@ DEFINE_EVENT(xe_bo, xe_bo_validate,
TP_ARGS(bo)
);
+DEFINE_EVENT(xe_bo, xe_bo_create,
+ TP_PROTO(struct xe_bo *bo),
+ TP_ARGS(bo)
+);
+
TRACE_EVENT(xe_bo_move,
TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
bool move_lacks_source),
@@ -87,6 +92,7 @@ DECLARE_EVENT_CLASS(xe_vma,
TP_STRUCT__entry(
__string(dev, __dev_name_vma(vma))
__field(struct xe_vma *, vma)
+ __field(struct xe_vm *, vm)
__field(u32, asid)
__field(u64, start)
__field(u64, end)
@@ -96,14 +102,16 @@ DECLARE_EVENT_CLASS(xe_vma,
TP_fast_assign(
__assign_str(dev);
__entry->vma = vma;
+ __entry->vm = xe_vma_vm(vma);
__entry->asid = xe_vma_vm(vma)->usm.asid;
__entry->start = xe_vma_start(vma);
__entry->end = xe_vma_end(vma) - 1;
__entry->ptr = xe_vma_userptr(vma);
),
- TP_printk("dev=%s, vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
- __get_str(dev), __entry->vma, __entry->asid, __entry->start,
+ TP_printk("dev=%s, vma=%p, vm=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx",
+ __get_str(dev), __entry->vma, __entry->vm,
+ __entry->asid, __entry->start,
__entry->end, __entry->ptr)
)
@@ -185,16 +193,19 @@ DECLARE_EVENT_CLASS(xe_vm,
__string(dev, __dev_name_vm(vm))
__field(struct xe_vm *, vm)
__field(u32, asid)
+ __field(u32, flags)
),
TP_fast_assign(
__assign_str(dev);
__entry->vm = vm;
__entry->asid = vm->usm.asid;
+ __entry->flags = vm->flags;
),
- TP_printk("dev=%s, vm=%p, asid=0x%05x", __get_str(dev),
- __entry->vm, __entry->asid)
+ TP_printk("dev=%s, vm=%p, asid=0x%05x, vm flags=0x%05x",
+ __get_str(dev), __entry->vm, __entry->asid,
+ __entry->flags)
);
DEFINE_EVENT(xe_vm, xe_vm_kill,
diff --git a/drivers/gpu/drm/xe/xe_trace_guc.h b/drivers/gpu/drm/xe/xe_trace_guc.h
index 23abdd55dc62..78949db9cfce 100644
--- a/drivers/gpu/drm/xe/xe_trace_guc.h
+++ b/drivers/gpu/drm/xe/xe_trace_guc.h
@@ -14,6 +14,7 @@
#include "xe_device_types.h"
#include "xe_guc_exec_queue_types.h"
+#include "xe_guc_engine_activity_types.h"
#define __dev_name_xe(xe) dev_name((xe)->drm.dev)
@@ -100,6 +101,54 @@ DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
);
+TRACE_EVENT(xe_guc_engine_activity,
+ TP_PROTO(struct xe_device *xe, struct engine_activity *ea, const char *name,
+ u16 instance),
+ TP_ARGS(xe, ea, name, instance),
+
+ TP_STRUCT__entry(
+ __string(dev, __dev_name_xe(xe))
+ __string(name, name)
+ __field(u32, global_change_num)
+ __field(u32, guc_tsc_frequency_hz)
+ __field(u32, lag_latency_usec)
+ __field(u16, instance)
+ __field(u16, change_num)
+ __field(u16, quanta_ratio)
+ __field(u32, last_update_tick)
+ __field(u64, active_ticks)
+ __field(u64, active)
+ __field(u64, total)
+ __field(u64, quanta)
+ __field(u64, last_cpu_ts)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __assign_str(name);
+ __entry->global_change_num = ea->metadata.global_change_num;
+ __entry->guc_tsc_frequency_hz = ea->metadata.guc_tsc_frequency_hz;
+ __entry->lag_latency_usec = ea->metadata.lag_latency_usec;
+ __entry->instance = instance;
+ __entry->change_num = ea->activity.change_num;
+ __entry->quanta_ratio = ea->activity.quanta_ratio;
+ __entry->last_update_tick = ea->activity.last_update_tick;
+ __entry->active_ticks = ea->activity.active_ticks;
+ __entry->active = ea->active;
+ __entry->total = ea->total;
+ __entry->quanta = ea->quanta;
+ __entry->last_cpu_ts = ea->last_cpu_ts;
+ ),
+
+ TP_printk("dev=%s engine %s:%d Active=%llu, quanta=%llu, last_cpu_ts=%llu\n"
+ "Activity metadata: global_change_num=%u, guc_tsc_frequency_hz=%u lag_latency_usec=%u\n"
+ "Activity data: change_num=%u, quanta_ratio=0x%x, last_update_tick=%u, active_ticks=%llu\n",
+ __get_str(dev), __get_str(name), __entry->instance,
+ (__entry->active + __entry->total), __entry->quanta, __entry->last_cpu_ts,
+ __entry->global_change_num, __entry->guc_tsc_frequency_hz,
+ __entry->lag_latency_usec, __entry->change_num, __entry->quanta_ratio,
+ __entry->last_update_tick, __entry->active_ticks)
+);
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index d414421f8c13..d9c9d2547aad 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -207,17 +207,16 @@ static u64 detect_stolen(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
#endif
}
-void xe_ttm_stolen_mgr_init(struct xe_device *xe)
+int xe_ttm_stolen_mgr_init(struct xe_device *xe)
{
- struct xe_ttm_stolen_mgr *mgr = drmm_kzalloc(&xe->drm, sizeof(*mgr), GFP_KERNEL);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct xe_ttm_stolen_mgr *mgr;
u64 stolen_size, io_size;
int err;
- if (!mgr) {
- drm_dbg_kms(&xe->drm, "Stolen mgr init failed\n");
- return;
- }
+ mgr = drmm_kzalloc(&xe->drm, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
if (IS_SRIOV_VF(xe))
stolen_size = 0;
@@ -230,7 +229,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
if (!stolen_size) {
drm_dbg_kms(&xe->drm, "No stolen memory support\n");
- return;
+ return 0;
}
/*
@@ -246,7 +245,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
io_size, PAGE_SIZE);
if (err) {
drm_dbg_kms(&xe->drm, "Stolen mgr init failed: %i\n", err);
- return;
+ return err;
}
drm_dbg_kms(&xe->drm, "Initialized stolen memory support with %llu bytes\n",
@@ -254,6 +253,8 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
if (io_size)
mgr->mapping = devm_ioremap_wc(&pdev->dev, mgr->io_base, io_size);
+
+ return 0;
}
u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset)
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
index 1777245ff810..8e877d1e839b 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
@@ -12,7 +12,7 @@ struct ttm_resource;
struct xe_bo;
struct xe_device;
-void xe_ttm_stolen_mgr_init(struct xe_device *xe);
+int xe_ttm_stolen_mgr_init(struct xe_device *xe);
int xe_ttm_stolen_io_mem_reserve(struct xe_device *xe, struct ttm_resource *mem);
bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe);
u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset);
diff --git a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
index 9844a8edbfe1..d38b91872da3 100644
--- a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
@@ -108,9 +108,8 @@ int xe_ttm_sys_mgr_init(struct xe_device *xe)
u64 gtt_size;
si_meminfo(&si);
+ /* Potentially restrict amount of TT memory here. */
gtt_size = (u64)si.totalram * si.mem_unit;
- /* TTM limits allocation of all TTM devices by 50% of system memory */
- gtt_size /= 2;
man->use_tt = true;
man->func = &xe_ttm_sys_mgr_func;
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
index f4a16e5fa770..9e375a40aee9 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
@@ -340,9 +340,8 @@ int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr)
{
struct xe_device *xe = tile_to_xe(tile);
- struct xe_mem_region *vram = &tile->mem.vram;
+ struct xe_vram_region *vram = &tile->mem.vram;
- mgr->vram = vram;
return __xe_ttm_vram_mgr_init(xe, mgr, XE_PL_VRAM0 + tile->id,
vram->usable_size, vram->io_size,
PAGE_SIZE);
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
index 2d75cf126289..1144f9232ebb 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
@@ -9,8 +9,6 @@
#include <drm/drm_buddy.h>
#include <drm/ttm/ttm_device.h>
-struct xe_mem_region;
-
/**
* struct xe_ttm_vram_mgr - XE TTM VRAM manager
*
@@ -21,8 +19,6 @@ struct xe_ttm_vram_mgr {
struct ttm_resource_manager manager;
/** @mm: DRM buddy allocator which manages the VRAM */
struct drm_buddy mm;
- /** @vram: ptr to details of associated VRAM region */
- struct xe_mem_region *vram;
/** @visible_size: Proped size of the CPU visible portion */
u64 visible_size;
/** @visible_avail: CPU visible portion still unallocated */
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index d449de0fb6ec..49ddbda7cdef 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -7,6 +7,8 @@
#include <kunit/visibility.h>
+#include <drm/drm_managed.h>
+
#include "regs/xe_gt_regs.h"
#include "xe_gt_types.h"
#include "xe_platform_types.h"
@@ -83,28 +85,22 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
XE_RTP_RULES(MEDIA_VERSION(2000)),
XE_RTP_ACTIONS(SET(XE2LPM_SCRATCH3_LBCF, RWFLUSHALLEN))
},
-
- {}
};
static const struct xe_rtp_entry_sr engine_tunings[] = {
+ { XE_RTP_NAME("Tuning: L3 Hashing Mask"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(CLR(XELP_GARBCNTL, XELP_BUS_HASH_CTL_BIT_EXC))
+ },
{ XE_RTP_NAME("Tuning: Set Indirect State Override"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1274),
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
- {}
};
static const struct xe_rtp_entry_sr lrc_tunings[] = {
- { XE_RTP_NAME("Tuning: ganged timer, also known as 16011163337"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
- /* read verification is ignored due to 1608008084. */
- XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2,
- FF_MODE2_GS_TIMER_MASK,
- FF_MODE2_GS_TIMER_224))
- },
-
/* DG2 */
{ XE_RTP_NAME("Tuning: L3 cache"),
@@ -139,15 +135,47 @@ static const struct xe_rtp_entry_sr lrc_tunings[] = {
XE_RTP_ACTIONS(FIELD_SET(FF_MODE, VS_HIT_MAX_VALUE_MASK,
REG_FIELD_PREP(VS_HIT_MAX_VALUE_MASK, 0x3f)))
},
-
- {}
};
+/**
+ * xe_tuning_init - initialize gt with tunings bookkeeping
+ * @gt: GT instance to initialize
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_tuning_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ size_t n_lrc, n_engine, n_gt, total;
+ unsigned long *p;
+
+ n_gt = BITS_TO_LONGS(ARRAY_SIZE(gt_tunings));
+ n_engine = BITS_TO_LONGS(ARRAY_SIZE(engine_tunings));
+ n_lrc = BITS_TO_LONGS(ARRAY_SIZE(lrc_tunings));
+ total = n_gt + n_engine + n_lrc;
+
+ p = drmm_kzalloc(&xe->drm, sizeof(*p) * total, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ gt->tuning_active.gt = p;
+ p += n_gt;
+ gt->tuning_active.engine = p;
+ p += n_engine;
+ gt->tuning_active.lrc = p;
+
+ return 0;
+}
+ALLOW_ERROR_INJECTION(xe_tuning_init, ERRNO); /* See xe_pci_probe() */
+
void xe_tuning_process_gt(struct xe_gt *gt)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
- xe_rtp_process_to_sr(&ctx, gt_tunings, &gt->reg_sr);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ gt->tuning_active.gt,
+ ARRAY_SIZE(gt_tunings));
+ xe_rtp_process_to_sr(&ctx, gt_tunings, ARRAY_SIZE(gt_tunings), &gt->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_gt);
@@ -155,7 +183,11 @@ void xe_tuning_process_engine(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, engine_tunings, &hwe->reg_sr);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ hwe->gt->tuning_active.engine,
+ ARRAY_SIZE(engine_tunings));
+ xe_rtp_process_to_sr(&ctx, engine_tunings, ARRAY_SIZE(engine_tunings),
+ &hwe->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_engine);
@@ -171,5 +203,25 @@ void xe_tuning_process_lrc(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, lrc_tunings, &hwe->reg_lrc);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ hwe->gt->tuning_active.lrc,
+ ARRAY_SIZE(lrc_tunings));
+ xe_rtp_process_to_sr(&ctx, lrc_tunings, ARRAY_SIZE(lrc_tunings), &hwe->reg_lrc);
+}
+
+void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ size_t idx;
+
+ drm_printf(p, "GT Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.gt, ARRAY_SIZE(gt_tunings))
+ drm_printf_indent(p, 1, "%s\n", gt_tunings[idx].name);
+
+ drm_printf(p, "\nEngine Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.engine, ARRAY_SIZE(engine_tunings))
+ drm_printf_indent(p, 1, "%s\n", engine_tunings[idx].name);
+
+ drm_printf(p, "\nLRC Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.lrc, ARRAY_SIZE(lrc_tunings))
+ drm_printf_indent(p, 1, "%s\n", lrc_tunings[idx].name);
}
diff --git a/drivers/gpu/drm/xe/xe_tuning.h b/drivers/gpu/drm/xe/xe_tuning.h
index 4f9c3ac3b516..dd0d3ccc9c65 100644
--- a/drivers/gpu/drm/xe/xe_tuning.h
+++ b/drivers/gpu/drm/xe/xe_tuning.h
@@ -6,11 +6,14 @@
#ifndef _XE_TUNING_
#define _XE_TUNING_
+struct drm_printer;
struct xe_gt;
struct xe_hw_engine;
+int xe_tuning_init(struct xe_gt *gt);
void xe_tuning_process_gt(struct xe_gt *gt);
void xe_tuning_process_engine(struct xe_hw_engine *hwe);
void xe_tuning_process_lrc(struct xe_hw_engine *hwe);
+void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 0d073a9987c2..c14bd2282044 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -14,6 +14,7 @@
#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
#include "xe_guc_pc.h"
+#include "xe_guc_engine_activity.h"
#include "xe_huc.h"
#include "xe_sriov.h"
#include "xe_uc_fw.h"
@@ -210,6 +211,8 @@ int xe_uc_init_hw(struct xe_uc *uc)
if (ret)
return ret;
+ xe_guc_engine_activity_enable_stats(&uc->guc);
+
/* We don't fail the driver load if HuC fails to auth, but let's warn */
ret = xe_huc_auth(&uc->huc, XE_HUC_AUTH_VIA_GUC);
xe_gt_assert(uc_to_gt(uc), !ret);
@@ -289,19 +292,6 @@ int xe_uc_suspend(struct xe_uc *uc)
}
/**
- * xe_uc_remove() - Clean up the UC structures before driver removal
- * @uc: the UC object
- *
- * This function should only act on objects/structures that must be cleaned
- * before the driver removal callback is complete and therefore can't be
- * deferred to a drmm action.
- */
-void xe_uc_remove(struct xe_uc *uc)
-{
- xe_gsc_remove(&uc->gsc);
-}
-
-/**
* xe_uc_declare_wedged() - Declare UC wedged
* @uc: the UC object
*
diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
index 506517c11333..3813c1ede450 100644
--- a/drivers/gpu/drm/xe/xe_uc.h
+++ b/drivers/gpu/drm/xe/xe_uc.h
@@ -20,7 +20,6 @@ void xe_uc_stop(struct xe_uc *uc);
int xe_uc_start(struct xe_uc *uc);
int xe_uc_suspend(struct xe_uc *uc);
int xe_uc_sanitize_reset(struct xe_uc *uc);
-void xe_uc_remove(struct xe_uc *uc);
void xe_uc_declare_wedged(struct xe_uc *uc);
#endif
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index ec6ec18ab3fa..60303998bd61 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -8,6 +8,7 @@
#include <linux/dma-fence-array.h>
#include <linux/nospec.h>
+#include <drm/drm_drv.h>
#include <drm/drm_exec.h>
#include <drm/drm_print.h>
#include <drm/ttm/ttm_tt.h>
@@ -33,7 +34,9 @@
#include "xe_pm.h"
#include "xe_preempt_fence.h"
#include "xe_pt.h"
+#include "xe_pxp.h"
#include "xe_res_cursor.h"
+#include "xe_svm.h"
#include "xe_sync.h"
#include "xe_trace_bo.h"
#include "xe_wa.h"
@@ -269,6 +272,7 @@ out_up_write:
return err;
}
+ALLOW_ERROR_INJECTION(xe_vm_add_compute_exec_queue, ERRNO);
/**
* xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
@@ -688,7 +692,6 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
{
struct xe_userptr_vma *uvma, *next;
int err = 0;
- LIST_HEAD(tmp_evict);
xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
lockdep_assert_held_write(&vm->lock);
@@ -947,6 +950,179 @@ free_ops:
return fence;
}
+static void xe_vm_populate_range_rebind(struct xe_vma_op *op,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
+ u8 tile_mask)
+{
+ INIT_LIST_HEAD(&op->link);
+ op->tile_mask = tile_mask;
+ op->base.op = DRM_GPUVA_OP_DRIVER;
+ op->subop = XE_VMA_SUBOP_MAP_RANGE;
+ op->map_range.vma = vma;
+ op->map_range.range = range;
+}
+
+static int
+xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
+ u8 tile_mask)
+{
+ struct xe_vma_op *op;
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ xe_vm_populate_range_rebind(op, vma, range, tile_mask);
+ list_add_tail(&op->link, &vops->list);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
+
+ return 0;
+}
+
+/**
+ * xe_vm_range_rebind() - VM range (re)bind
+ * @vm: The VM which the range belongs to.
+ * @vma: The VMA which the range belongs to.
+ * @range: SVM range to rebind.
+ * @tile_mask: Tile mask to bind the range to.
+ *
+ * (re)bind SVM range setting up GPU page tables for the range.
+ *
+ * Return: dma fence for rebind to signal completion on succees, ERR_PTR on
+ * failure
+ */
+struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
+ u8 tile_mask)
+{
+ struct dma_fence *fence = NULL;
+ struct xe_vma_ops vops;
+ struct xe_vma_op *op, *next_op;
+ struct xe_tile *tile;
+ u8 id;
+ int err;
+
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+ xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
+
+ xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ for_each_tile(tile, vm->xe, id) {
+ vops.pt_update_ops[id].wait_vm_bookkeep = true;
+ vops.pt_update_ops[tile->id].q =
+ xe_tile_migrate_exec_queue(tile);
+ }
+
+ err = xe_vm_ops_add_range_rebind(&vops, vma, range, tile_mask);
+ if (err)
+ return ERR_PTR(err);
+
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err) {
+ fence = ERR_PTR(err);
+ goto free_ops;
+ }
+
+ fence = ops_execute(vm, &vops);
+
+free_ops:
+ list_for_each_entry_safe(op, next_op, &vops.list, link) {
+ list_del(&op->link);
+ kfree(op);
+ }
+ xe_vma_ops_fini(&vops);
+
+ return fence;
+}
+
+static void xe_vm_populate_range_unbind(struct xe_vma_op *op,
+ struct xe_svm_range *range)
+{
+ INIT_LIST_HEAD(&op->link);
+ op->tile_mask = range->tile_present;
+ op->base.op = DRM_GPUVA_OP_DRIVER;
+ op->subop = XE_VMA_SUBOP_UNMAP_RANGE;
+ op->unmap_range.range = range;
+}
+
+static int
+xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
+ struct xe_svm_range *range)
+{
+ struct xe_vma_op *op;
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ xe_vm_populate_range_unbind(op, range);
+ list_add_tail(&op->link, &vops->list);
+ xe_vma_ops_incr_pt_update_ops(vops, range->tile_present);
+
+ return 0;
+}
+
+/**
+ * xe_vm_range_unbind() - VM range unbind
+ * @vm: The VM which the range belongs to.
+ * @range: SVM range to rebind.
+ *
+ * Unbind SVM range removing the GPU page tables for the range.
+ *
+ * Return: dma fence for unbind to signal completion on succees, ERR_PTR on
+ * failure
+ */
+struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
+ struct xe_svm_range *range)
+{
+ struct dma_fence *fence = NULL;
+ struct xe_vma_ops vops;
+ struct xe_vma_op *op, *next_op;
+ struct xe_tile *tile;
+ u8 id;
+ int err;
+
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+ xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
+
+ if (!range->tile_present)
+ return dma_fence_get_stub();
+
+ xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ for_each_tile(tile, vm->xe, id) {
+ vops.pt_update_ops[id].wait_vm_bookkeep = true;
+ vops.pt_update_ops[tile->id].q =
+ xe_tile_migrate_exec_queue(tile);
+ }
+
+ err = xe_vm_ops_add_range_unbind(&vops, range);
+ if (err)
+ return ERR_PTR(err);
+
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err) {
+ fence = ERR_PTR(err);
+ goto free_ops;
+ }
+
+ fence = ops_execute(vm, &vops);
+
+free_ops:
+ list_for_each_entry_safe(op, next_op, &vops.list, link) {
+ list_del(&op->link);
+ kfree(op);
+ }
+ xe_vma_ops_fini(&vops);
+
+ return fence;
+}
+
static void xe_vma_free(struct xe_vma *vma)
{
if (xe_vma_is_userptr(vma))
@@ -955,9 +1131,10 @@ static void xe_vma_free(struct xe_vma *vma)
kfree(vma);
}
-#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
-#define VMA_CREATE_FLAG_IS_NULL BIT(1)
-#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
+#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
+#define VMA_CREATE_FLAG_IS_NULL BIT(1)
+#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
+#define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3)
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
@@ -971,6 +1148,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
+ bool is_cpu_addr_mirror =
+ (flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -979,7 +1158,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
* Allocate and ensure that the xe_vma_is_userptr() return
* matches what was allocated.
*/
- if (!bo && !is_null) {
+ if (!bo && !is_null && !is_cpu_addr_mirror) {
struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
if (!uvma)
@@ -991,6 +1170,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (!vma)
return ERR_PTR(-ENOMEM);
+ if (is_cpu_addr_mirror)
+ vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR;
if (is_null)
vma->gpuva.flags |= DRM_GPUVA_SPARSE;
if (bo)
@@ -1033,7 +1214,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
drm_gpuva_link(&vma->gpuva, vm_bo);
drm_gpuvm_bo_put(vm_bo);
} else /* userptr or null */ {
- if (!is_null) {
+ if (!is_null && !is_cpu_addr_mirror) {
struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
u64 size = end - start + 1;
int err;
@@ -1085,7 +1266,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
mmu_interval_notifier_remove(&userptr->notifier);
mutex_destroy(&userptr->unmap_mutex);
xe_vm_put(vm);
- } else if (xe_vma_is_null(vma)) {
+ } else if (xe_vma_is_null(vma) || xe_vma_is_cpu_addr_mirror(vma)) {
xe_vm_put(vm);
} else {
xe_bo_put(xe_vma_bo(vma));
@@ -1125,7 +1306,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
- } else if (!xe_vma_is_null(vma)) {
+ } else if (!xe_vma_is_null(vma) && !xe_vma_is_cpu_addr_mirror(vma)) {
xe_bo_assert_held(xe_vma_bo(vma));
drm_gpuva_unlink(&vma->gpuva);
@@ -1439,6 +1620,12 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
struct xe_tile *tile;
u8 id;
+ /*
+ * Since the GSCCS is not user-accessible, we don't expect a GSC VM to
+ * ever be in faulting mode.
+ */
+ xe_assert(xe, !((flags & XE_VM_FLAG_GSC) && (flags & XE_VM_FLAG_FAULT_MODE)));
+
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return ERR_PTR(-ENOMEM);
@@ -1449,7 +1636,21 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->flags = flags;
- init_rwsem(&vm->lock);
+ /**
+ * GSC VMs are kernel-owned, only used for PXP ops and can sometimes be
+ * manipulated under the PXP mutex. However, the PXP mutex can be taken
+ * under a user-VM lock when the PXP session is started at exec_queue
+ * creation time. Those are different VMs and therefore there is no risk
+ * of deadlock, but we need to tell lockdep that this is the case or it
+ * will print a warning.
+ */
+ if (flags & XE_VM_FLAG_GSC) {
+ static struct lock_class_key gsc_vm_key;
+
+ __init_rwsem(&vm->lock, "gsc_vm", &gsc_vm_key);
+ } else {
+ init_rwsem(&vm->lock);
+ }
mutex_init(&vm->snap_mutex);
INIT_LIST_HEAD(&vm->rebind_list);
@@ -1556,6 +1757,12 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
}
}
+ if (flags & XE_VM_FLAG_FAULT_MODE) {
+ err = xe_svm_init(vm);
+ if (err)
+ goto err_close;
+ }
+
if (number_tiles > 1)
vm->composite_fence_ctx = dma_fence_context_alloc(1);
@@ -1582,9 +1789,44 @@ err_no_resv:
static void xe_vm_close(struct xe_vm *vm)
{
+ struct xe_device *xe = vm->xe;
+ bool bound;
+ int idx;
+
+ bound = drm_dev_enter(&xe->drm, &idx);
+
down_write(&vm->lock);
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_notifier_lock(vm);
+
vm->size = 0;
+
+ if (!((vm->flags & XE_VM_FLAG_MIGRATION))) {
+ struct xe_tile *tile;
+ struct xe_gt *gt;
+ u8 id;
+
+ /* Wait for pending binds */
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ if (bound) {
+ for_each_tile(tile, xe, id)
+ if (vm->pt_root[id])
+ xe_pt_clear(xe, vm->pt_root[id]);
+
+ for_each_gt(gt, xe, id)
+ xe_gt_tlb_invalidation_vm(gt, vm);
+ }
+ }
+
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_notifier_unlock(vm);
up_write(&vm->lock);
+
+ if (bound)
+ drm_dev_exit(idx);
}
void xe_vm_close_and_put(struct xe_vm *vm)
@@ -1601,6 +1843,8 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vm_close(vm);
if (xe_vm_in_preempt_fence_mode(vm))
flush_work(&vm->preempt.rebind_work);
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_close(vm);
down_write(&vm->lock);
for_each_tile(tile, xe, id) {
@@ -1669,6 +1913,9 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vma_destroy_unlocked(vma);
}
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_fini(vm);
+
up_write(&vm->lock);
down_write(&xe->usm.lock);
@@ -1809,9 +2056,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->extensions))
- return -EINVAL;
-
if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
flags |= XE_VM_FLAG_SCRATCH_PAGE;
if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
@@ -2025,6 +2269,8 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
op->map.read_only =
flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ op->map.is_cpu_addr_mirror = flags &
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
@@ -2217,6 +2463,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
VMA_CREATE_FLAG_IS_NULL : 0;
flags |= op->map.dumpable ?
VMA_CREATE_FLAG_DUMPABLE : 0;
+ flags |= op->map.is_cpu_addr_mirror ?
+ VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
vma = new_vma(vm, &op->base.map, op->map.pat_index,
flags);
@@ -2224,7 +2472,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return PTR_ERR(vma);
op->map.vma = vma;
- if (op->map.immediate || !xe_vm_in_fault_mode(vm))
+ if ((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
+ !op->map.is_cpu_addr_mirror)
xe_vma_ops_incr_pt_update_ops(vops,
op->tile_mask);
break;
@@ -2233,21 +2482,35 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
{
struct xe_vma *old =
gpuva_to_vma(op->base.remap.unmap->va);
+ bool skip = xe_vma_is_cpu_addr_mirror(old);
+ u64 start = xe_vma_start(old), end = xe_vma_end(old);
+
+ if (op->base.remap.prev)
+ start = op->base.remap.prev->va.addr +
+ op->base.remap.prev->va.range;
+ if (op->base.remap.next)
+ end = op->base.remap.next->va.addr;
+
+ if (xe_vma_is_cpu_addr_mirror(old) &&
+ xe_svm_has_mapping(vm, start, end))
+ return -EBUSY;
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
- if (op->base.remap.prev) {
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_READ_ONLY ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->base.remap.unmap->va->flags &
- DRM_GPUVA_SPARSE ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_DUMPABLE ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_READ_ONLY ?
+ VMA_CREATE_FLAG_READ_ONLY : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ DRM_GPUVA_SPARSE ?
+ VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_DUMPABLE ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
+ flags |= xe_vma_is_cpu_addr_mirror(old) ?
+ VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
+ if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
old->pat_index, flags);
if (IS_ERR(vma))
@@ -2259,9 +2522,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
* Userptr creates a new SG mapping so
* we must also rebind.
*/
- op->remap.skip_prev = !xe_vma_is_userptr(old) &&
+ op->remap.skip_prev = skip ||
+ (!xe_vma_is_userptr(old) &&
IS_ALIGNED(xe_vma_end(vma),
- xe_vma_max_pte_size(old));
+ xe_vma_max_pte_size(old)));
if (op->remap.skip_prev) {
xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
op->remap.range -=
@@ -2277,16 +2541,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
}
if (op->base.remap.next) {
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_READ_ONLY ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->base.remap.unmap->va->flags &
- DRM_GPUVA_SPARSE ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_DUMPABLE ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
-
vma = new_vma(vm, op->base.remap.next,
old->pat_index, flags);
if (IS_ERR(vma))
@@ -2298,9 +2552,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
* Userptr creates a new SG mapping so
* we must also rebind.
*/
- op->remap.skip_next = !xe_vma_is_userptr(old) &&
+ op->remap.skip_next = skip ||
+ (!xe_vma_is_userptr(old) &&
IS_ALIGNED(xe_vma_start(vma),
- xe_vma_max_pte_size(old));
+ xe_vma_max_pte_size(old)));
if (op->remap.skip_next) {
xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
op->remap.range -=
@@ -2313,11 +2568,20 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
}
}
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ if (!skip)
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
}
case DRM_GPUVA_OP_UNMAP:
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ vma = gpuva_to_vma(op->base.unmap.va);
+
+ if (xe_vma_is_cpu_addr_mirror(vma) &&
+ xe_svm_has_mapping(vm, xe_vma_start(vma),
+ xe_vma_end(vma)))
+ return -EBUSY;
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
case DRM_GPUVA_OP_PREFETCH:
vma = gpuva_to_vma(op->base.prefetch.va);
@@ -2328,7 +2592,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return err;
}
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
@@ -2554,6 +2819,8 @@ static void op_trace(struct xe_vma_op *op)
case DRM_GPUVA_OP_PREFETCH:
trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
break;
+ case DRM_GPUVA_OP_DRIVER:
+ break;
default:
XE_WARN_ON("NOT POSSIBLE");
}
@@ -2731,14 +2998,15 @@ static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
}
if (ufence)
xe_sync_ufence_put(ufence);
- for (i = 0; i < vops->num_syncs; i++)
- xe_sync_entry_signal(vops->syncs + i, fence);
- xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
- dma_fence_put(fence);
+ if (fence) {
+ for (i = 0; i < vops->num_syncs; i++)
+ xe_sync_entry_signal(vops->syncs + i, fence);
+ xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
+ }
}
-static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
- struct xe_vma_ops *vops)
+static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm,
+ struct xe_vma_ops *vops)
{
struct drm_exec exec;
struct dma_fence *fence;
@@ -2751,12 +3019,15 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
drm_exec_until_all_locked(&exec) {
err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
drm_exec_retry_on_contention(&exec);
- if (err)
+ if (err) {
+ fence = ERR_PTR(err);
goto unlock;
+ }
fence = ops_execute(vm, vops);
if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
+ if (PTR_ERR(fence) == -ENODATA)
+ vm_bind_ioctl_ops_fini(vm, vops, NULL);
goto unlock;
}
@@ -2765,7 +3036,7 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
unlock:
drm_exec_fini(&exec);
- return err;
+ return fence;
}
ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
@@ -2773,7 +3044,9 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
(DRM_XE_VM_BIND_FLAG_READONLY | \
DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
DRM_XE_VM_BIND_FLAG_NULL | \
- DRM_XE_VM_BIND_FLAG_DUMPABLE)
+ DRM_XE_VM_BIND_FLAG_DUMPABLE | \
+ DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
#ifdef TEST_VM_OPS_ERROR
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
@@ -2784,7 +3057,7 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
-static int vm_bind_ioctl_check_args(struct xe_device *xe,
+static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
struct drm_xe_vm_bind *args,
struct drm_xe_vm_bind_op **bind_ops)
{
@@ -2829,9 +3102,18 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
u64 obj_offset = (*bind_ops)[i].obj_offset;
u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ bool is_cpu_addr_mirror = flags &
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
u16 pat_index = (*bind_ops)[i].pat_index;
u16 coh_mode;
+ if (XE_IOCTL_DBG(xe, is_cpu_addr_mirror &&
+ (!xe_vm_in_fault_mode(vm) ||
+ !IS_ENABLED(CONFIG_DRM_GPUSVM)))) {
+ err = -EINVAL;
+ goto free_bind_ops;
+ }
+
if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
err = -EINVAL;
goto free_bind_ops;
@@ -2852,13 +3134,14 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
- XE_IOCTL_DBG(xe, obj && is_null) ||
- XE_IOCTL_DBG(xe, obj_offset && is_null) ||
+ XE_IOCTL_DBG(xe, obj && (is_null || is_cpu_addr_mirror)) ||
+ XE_IOCTL_DBG(xe, obj_offset && (is_null ||
+ is_cpu_addr_mirror)) ||
XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
- is_null) ||
+ (is_null || is_cpu_addr_mirror)) ||
XE_IOCTL_DBG(xe, !obj &&
op == DRM_XE_VM_BIND_OP_MAP &&
- !is_null) ||
+ !is_null && !is_cpu_addr_mirror) ||
XE_IOCTL_DBG(xe, !obj &&
op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, addr &&
@@ -2936,7 +3219,7 @@ static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
u64 addr, u64 range, u64 obj_offset,
- u16 pat_index)
+ u16 pat_index, u32 op, u32 bind_flags)
{
u16 coh_mode;
@@ -2980,6 +3263,12 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
return -EINVAL;
}
+ /* If a BO is protected it can only be mapped if the key is still valid */
+ if ((bind_flags & DRM_XE_VM_BIND_FLAG_CHECK_PXP) && xe_bo_is_protected(bo) &&
+ op != DRM_XE_VM_BIND_OP_UNMAP && op != DRM_XE_VM_BIND_OP_UNMAP_ALL)
+ if (XE_IOCTL_DBG(xe, xe_pxp_bo_key_check(xe->pxp, bo) != 0))
+ return -ENOEXEC;
+
return 0;
}
@@ -2997,18 +3286,23 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct xe_sync_entry *syncs = NULL;
struct drm_xe_vm_bind_op *bind_ops;
struct xe_vma_ops vops;
+ struct dma_fence *fence;
int err;
int i;
- err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ err = vm_bind_ioctl_check_args(xe, vm, args, &bind_ops);
if (err)
- return err;
+ goto put_vm;
if (args->exec_queue_id) {
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
if (XE_IOCTL_DBG(xe, !q)) {
err = -ENOENT;
- goto free_objs;
+ goto put_vm;
}
if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
@@ -3017,15 +3311,13 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
}
- vm = xe_vm_lookup(xef, args->vm_id);
- if (XE_IOCTL_DBG(xe, !vm)) {
- err = -EINVAL;
- goto put_exec_queue;
- }
+ /* Ensure all UNMAPs visible */
+ if (xe_vm_in_fault_mode(vm))
+ flush_work(&vm->svm.garbage_collector.work);
err = down_write_killable(&vm->lock);
if (err)
- goto put_vm;
+ goto put_exec_queue;
if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
err = -ENOENT;
@@ -3068,6 +3360,8 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u32 obj = bind_ops[i].obj;
u64 obj_offset = bind_ops[i].obj_offset;
u16 pat_index = bind_ops[i].pat_index;
+ u32 op = bind_ops[i].op;
+ u32 bind_flags = bind_ops[i].flags;
if (!obj)
continue;
@@ -3080,7 +3374,8 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
bos[i] = gem_to_xe_bo(gem_obj);
err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
- obj_offset, pat_index);
+ obj_offset, pat_index, op,
+ bind_flags);
if (err)
goto put_obj;
}
@@ -3161,7 +3456,11 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (err)
goto unwind_ops;
- err = vm_bind_ioctl_ops_execute(vm, &vops);
+ fence = vm_bind_ioctl_ops_execute(vm, &vops);
+ if (IS_ERR(fence))
+ err = PTR_ERR(fence);
+ else
+ dma_fence_put(fence);
unwind_ops:
if (err && err != -ENODATA)
@@ -3182,12 +3481,11 @@ put_obj:
xe_bo_put(bos[i]);
release_vm_lock:
up_write(&vm->lock);
-put_vm:
- xe_vm_put(vm);
put_exec_queue:
if (q)
xe_exec_queue_put(q);
-free_objs:
+put_vm:
+ xe_vm_put(vm);
kvfree(bos);
kvfree(ops);
if (args->num_binds > 1)
@@ -3196,6 +3494,81 @@ free_objs:
}
/**
+ * xe_vm_bind_kernel_bo - bind a kernel BO to a VM
+ * @vm: VM to bind the BO to
+ * @bo: BO to bind
+ * @q: exec queue to use for the bind (optional)
+ * @addr: address at which to bind the BO
+ * @cache_lvl: PAT cache level to use
+ *
+ * Execute a VM bind map operation on a kernel-owned BO to bind it into a
+ * kernel-owned VM.
+ *
+ * Returns a dma_fence to track the binding completion if the job to do so was
+ * successfully submitted, an error pointer otherwise.
+ */
+struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
+ struct xe_exec_queue *q, u64 addr,
+ enum xe_cache_level cache_lvl)
+{
+ struct xe_vma_ops vops;
+ struct drm_gpuva_ops *ops = NULL;
+ struct dma_fence *fence;
+ int err;
+
+ xe_bo_get(bo);
+ xe_vm_get(vm);
+ if (q)
+ xe_exec_queue_get(q);
+
+ down_write(&vm->lock);
+
+ xe_vma_ops_init(&vops, vm, q, NULL, 0);
+
+ ops = vm_bind_ioctl_ops_create(vm, bo, 0, addr, bo->size,
+ DRM_XE_VM_BIND_OP_MAP, 0, 0,
+ vm->xe->pat.idx[cache_lvl]);
+ if (IS_ERR(ops)) {
+ err = PTR_ERR(ops);
+ goto release_vm_lock;
+ }
+
+ err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
+ if (err)
+ goto release_vm_lock;
+
+ xe_assert(vm->xe, !list_empty(&vops.list));
+
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err)
+ goto unwind_ops;
+
+ fence = vm_bind_ioctl_ops_execute(vm, &vops);
+ if (IS_ERR(fence))
+ err = PTR_ERR(fence);
+
+unwind_ops:
+ if (err && err != -ENODATA)
+ vm_bind_ioctl_ops_unwind(vm, &ops, 1);
+
+ xe_vma_ops_fini(&vops);
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+
+release_vm_lock:
+ up_write(&vm->lock);
+
+ if (q)
+ xe_exec_queue_put(q);
+ xe_vm_put(vm);
+ xe_bo_put(bo);
+
+ if (err)
+ fence = ERR_PTR(err);
+
+ return fence;
+}
+
+/**
* xe_vm_lock() - Lock the vm's dma_resv object
* @vm: The struct xe_vm whose lock is to be locked
* @intr: Whether to perform any wait interruptible
@@ -3244,6 +3617,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
int ret = 0;
xe_assert(xe, !xe_vma_is_null(vma));
+ xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
trace_xe_vma_invalidate(vma);
vm_dbg(&xe_vma_vm(vma)->xe->drm,
@@ -3301,6 +3675,35 @@ wait:
return ret;
}
+int xe_vm_validate_protected(struct xe_vm *vm)
+{
+ struct drm_gpuva *gpuva;
+ int err = 0;
+
+ if (!vm)
+ return -ENODEV;
+
+ mutex_lock(&vm->snap_mutex);
+
+ drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+ struct xe_bo *bo = vma->gpuva.gem.obj ?
+ gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
+
+ if (!bo)
+ continue;
+
+ if (xe_bo_is_protected(bo)) {
+ err = xe_pxp_bo_key_check(vm->xe->pxp, bo);
+ if (err)
+ break;
+ }
+ }
+
+ mutex_unlock(&vm->snap_mutex);
+ return err;
+}
+
struct xe_vm_snapshot {
unsigned long num_snaps;
struct {
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index b882bfb31bd0..0ef811fc2bde 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -18,9 +18,12 @@ struct drm_file;
struct ttm_buffer_object;
+struct dma_fence;
+
struct xe_exec_queue;
struct xe_file;
struct xe_sync_entry;
+struct xe_svm_range;
struct drm_exec;
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
@@ -150,6 +153,11 @@ static inline bool xe_vma_is_null(struct xe_vma *vma)
return vma->gpuva.flags & DRM_GPUVA_SPARSE;
}
+static inline bool xe_vma_is_cpu_addr_mirror(struct xe_vma *vma)
+{
+ return vma->gpuva.flags & XE_VMA_SYSTEM_ALLOCATOR;
+}
+
static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
{
return !xe_vma_bo(vma);
@@ -157,7 +165,8 @@ static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
static inline bool xe_vma_is_userptr(struct xe_vma *vma)
{
- return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
+ return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma) &&
+ !xe_vma_is_cpu_addr_mirror(vma);
}
/**
@@ -210,9 +219,17 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm);
int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
u8 tile_mask);
+struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
+ u8 tile_mask);
+struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
+ struct xe_svm_range *range);
int xe_vm_invalidate_vma(struct xe_vma *vma);
+int xe_vm_validate_protected(struct xe_vm *vm);
+
static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
{
xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
@@ -247,6 +264,10 @@ int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
+struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
+ struct xe_exec_queue *q, u64 addr,
+ enum xe_cache_level cache_lvl);
+
/**
* xe_vm_resv() - Return's the vm's reservation object
* @vm: The vm
diff --git a/drivers/gpu/drm/xe/xe_vm_doc.h b/drivers/gpu/drm/xe/xe_vm_doc.h
index 078786958403..1030ce214032 100644
--- a/drivers/gpu/drm/xe/xe_vm_doc.h
+++ b/drivers/gpu/drm/xe/xe_vm_doc.h
@@ -431,7 +431,7 @@
* bind path also acquires this lock in write while the exec / compute mode
* rebind worker acquires this lock in read mode.
*
- * VM dma-resv lock (vm->ttm.base.resv->lock) - WW lock. Protects VM dma-resv
+ * VM dma-resv lock (vm->gpuvm.r_obj->resv->lock) - WW lock. Protects VM dma-resv
* slots which is shared with any private BO in the VM. Expected to be acquired
* during VM binds, execs, and compute mode rebind worker. This lock is also
* held when private BOs are being evicted.
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index a4b4091cfd0d..84fa41b9fa20 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -6,6 +6,7 @@
#ifndef _XE_VM_TYPES_H_
#define _XE_VM_TYPES_H_
+#include <drm/drm_gpusvm.h>
#include <drm/drm_gpuvm.h>
#include <linux/dma-resv.h>
@@ -18,6 +19,7 @@
#include "xe_range_fence.h"
struct xe_bo;
+struct xe_svm_range;
struct xe_sync_entry;
struct xe_user_fence;
struct xe_vm;
@@ -42,6 +44,7 @@ struct xe_vm_pgtable_update_op;
#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 6)
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
+#define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9)
/** struct xe_userptr - User pointer */
struct xe_userptr {
@@ -143,6 +146,30 @@ struct xe_vm {
/** @gpuvm: base GPUVM used to track VMAs */
struct drm_gpuvm gpuvm;
+ /** @svm: Shared virtual memory state */
+ struct {
+ /** @svm.gpusvm: base GPUSVM used to track fault allocations */
+ struct drm_gpusvm gpusvm;
+ /**
+ * @svm.garbage_collector: Garbage collector which is used unmap
+ * SVM range's GPU bindings and destroy the ranges.
+ */
+ struct {
+ /** @svm.garbage_collector.lock: Protect's range list */
+ spinlock_t lock;
+ /**
+ * @svm.garbage_collector.range_list: List of SVM ranges
+ * in the garbage collector.
+ */
+ struct list_head range_list;
+ /**
+ * @svm.garbage_collector.work: Worker which the
+ * garbage collector runs on.
+ */
+ struct work_struct work;
+ } garbage_collector;
+ } svm;
+
struct xe_device *xe;
/* exec queue used for (un)binding vma's */
@@ -168,6 +195,7 @@ struct xe_vm {
#define XE_VM_FLAG_BANNED BIT(5)
#define XE_VM_FLAG_TILE_ID(flags) FIELD_GET(GENMASK(7, 6), flags)
#define XE_VM_FLAG_SET_TILE_ID(tile) FIELD_PREP(GENMASK(7, 6), (tile)->id)
+#define XE_VM_FLAG_GSC BIT(8)
unsigned long flags;
/** @composite_fence_ctx: context composite fence */
@@ -298,6 +326,8 @@ struct xe_vma_op_map {
bool read_only;
/** @is_null: is NULL binding */
bool is_null;
+ /** @is_cpu_addr_mirror: is CPU address mirror binding */
+ bool is_cpu_addr_mirror;
/** @dumpable: whether BO is dumped on GPU hang */
bool dumpable;
/** @pat_index: The pat index to use for this operation. */
@@ -328,6 +358,20 @@ struct xe_vma_op_prefetch {
u32 region;
};
+/** struct xe_vma_op_map_range - VMA map range operation */
+struct xe_vma_op_map_range {
+ /** @vma: VMA to map (system allocator VMA) */
+ struct xe_vma *vma;
+ /** @range: SVM range to map */
+ struct xe_svm_range *range;
+};
+
+/** struct xe_vma_op_unmap_range - VMA unmap range operation */
+struct xe_vma_op_unmap_range {
+ /** @range: SVM range to unmap */
+ struct xe_svm_range *range;
+};
+
/** enum xe_vma_op_flags - flags for VMA operation */
enum xe_vma_op_flags {
/** @XE_VMA_OP_COMMITTED: VMA operation committed */
@@ -338,6 +382,14 @@ enum xe_vma_op_flags {
XE_VMA_OP_NEXT_COMMITTED = BIT(2),
};
+/** enum xe_vma_subop - VMA sub-operation */
+enum xe_vma_subop {
+ /** @XE_VMA_SUBOP_MAP_RANGE: Map range */
+ XE_VMA_SUBOP_MAP_RANGE,
+ /** @XE_VMA_SUBOP_UNMAP_RANGE: Unmap range */
+ XE_VMA_SUBOP_UNMAP_RANGE,
+};
+
/** struct xe_vma_op - VMA operation */
struct xe_vma_op {
/** @base: GPUVA base operation */
@@ -346,6 +398,8 @@ struct xe_vma_op {
struct list_head link;
/** @flags: operation flags */
enum xe_vma_op_flags flags;
+ /** @subop: user defined sub-operation */
+ enum xe_vma_subop subop;
/** @tile_mask: Tile mask for operation */
u8 tile_mask;
@@ -356,6 +410,10 @@ struct xe_vma_op {
struct xe_vma_op_remap remap;
/** @prefetch: VMA prefetch operation specific data */
struct xe_vma_op_prefetch prefetch;
+ /** @map_range: VMA map range operation specific data */
+ struct xe_vma_op_map_range map_range;
+ /** @unmap_range: VMA unmap range operation specific data */
+ struct xe_vma_op_unmap_range unmap_range;
};
};
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 570fe0376402..a25afb757f70 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -279,8 +279,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), RAMDFTUNIT_CLKGATE_DIS)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
-
- {}
};
static const struct xe_rtp_entry_sr engine_was[] = {
@@ -599,7 +597,8 @@ static const struct xe_rtp_entry_sr engine_was[] = {
/* Xe3_LPG */
{ XE_RTP_NAME("14021402888"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
},
{ XE_RTP_NAME("18034896535"),
@@ -613,11 +612,33 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(FIELD_SET(SAMPLER_MODE, SMP_WAIT_FETCH_MERGING_COUNTER,
SMP_FORCE_128B_OVERFETCH))
},
-
- {}
+ { XE_RTP_NAME("14023061436"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_CHICKEN, QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE))
+ },
+ { XE_RTP_NAME("13012615864"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
+ },
};
static const struct xe_rtp_entry_sr lrc_was[] = {
+ { XE_RTP_NAME("16011163337"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+ /* read verification is ignored due to 1608008084. */
+ XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2,
+ FF_MODE2_GS_TIMER_MASK,
+ FF_MODE2_GS_TIMER_224))
+ },
+ { XE_RTP_NAME("1604555607"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+ /* read verification is ignored due to 1608008084. */
+ XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2,
+ FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128))
+ },
{ XE_RTP_NAME("1409342910, 14010698770, 14010443199, 1408979724, 1409178076, 1409207793, 1409217633, 1409252684, 1409347922, 1409142259"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN3,
@@ -800,8 +821,6 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_PARTIAL_AUTOSTRIP |
DIS_AUTOSTRIP))
},
-
- {}
};
static __maybe_unused const struct xe_rtp_entry oob_was[] = {
@@ -843,7 +862,7 @@ void xe_wa_process_gt(struct xe_gt *gt)
xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->wa_active.gt,
ARRAY_SIZE(gt_was));
- xe_rtp_process_to_sr(&ctx, gt_was, &gt->reg_sr);
+ xe_rtp_process_to_sr(&ctx, gt_was, ARRAY_SIZE(gt_was), &gt->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_wa_process_gt);
@@ -861,7 +880,7 @@ void xe_wa_process_engine(struct xe_hw_engine *hwe)
xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.engine,
ARRAY_SIZE(engine_was));
- xe_rtp_process_to_sr(&ctx, engine_was, &hwe->reg_sr);
+ xe_rtp_process_to_sr(&ctx, engine_was, ARRAY_SIZE(engine_was), &hwe->reg_sr);
}
/**
@@ -878,7 +897,7 @@ void xe_wa_process_lrc(struct xe_hw_engine *hwe)
xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.lrc,
ARRAY_SIZE(lrc_was));
- xe_rtp_process_to_sr(&ctx, lrc_was, &hwe->reg_lrc);
+ xe_rtp_process_to_sr(&ctx, lrc_was, ARRAY_SIZE(lrc_was), &hwe->reg_lrc);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 40438c3d9b72..e0c5fa460487 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -5,6 +5,7 @@
22011391025 PLATFORM(DG2)
22012727170 SUBPLATFORM(DG2, G11)
22012727685 SUBPLATFORM(DG2, G11)
+22016596838 PLATFORM(PVC)
18020744125 PLATFORM(PVC)
1509372804 PLATFORM(PVC), GRAPHICS_STEP(A0, C0)
1409600907 GRAPHICS_VERSION_RANGE(1200, 1250)
@@ -28,6 +29,7 @@
16022287689 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
13011645652 GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION(3001)
14022293748 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
22019794406 GRAPHICS_VERSION(2001)
@@ -42,3 +44,12 @@
no_media_l3 MEDIA_VERSION(3000)
14022866841 GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0)
+16021333562 GRAPHICS_VERSION_RANGE(1200, 1274)
+ MEDIA_VERSION(1300)
+14016712196 GRAPHICS_VERSION(1255)
+ GRAPHICS_VERSION_RANGE(1270, 1274)
+14015568240 GRAPHICS_VERSION_RANGE(1255, 1260)
+18013179988 GRAPHICS_VERSION(1255)
+ GRAPHICS_VERSION_RANGE(1270, 1274)
+1508761755 GRAPHICS_VERSION(1255)
+ GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0)
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 979f6d3239ba..a6a4a871f197 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1437,7 +1437,7 @@ zynqmp_dp_disp_connected_live_layer(struct zynqmp_dp *dp)
}
static void zynqmp_dp_disp_enable(struct zynqmp_dp *dp,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct zynqmp_disp_layer *layer;
struct drm_bridge_state *bridge_state;
@@ -1447,8 +1447,7 @@ static void zynqmp_dp_disp_enable(struct zynqmp_dp *dp,
if (!layer)
return;
- bridge_state = drm_atomic_get_new_bridge_state(old_bridge_state->base.state,
- old_bridge_state->bridge);
+ bridge_state = drm_atomic_get_new_bridge_state(state, &dp->bridge);
if (WARN_ON(!bridge_state))
return;
@@ -1534,10 +1533,10 @@ zynqmp_dp_bridge_mode_valid(struct drm_bridge *bridge,
}
/* Check with link rate and lane count */
- mutex_lock(&dp->lock);
- rate = zynqmp_dp_max_rate(dp->link_config.max_rate,
- dp->link_config.max_lanes, dp->config.bpp);
- mutex_unlock(&dp->lock);
+ scoped_guard(mutex, &dp->lock)
+ rate = zynqmp_dp_max_rate(dp->link_config.max_rate,
+ dp->link_config.max_lanes,
+ dp->config.bpp);
if (mode->clock > rate) {
dev_dbg(dp->dev, "filtered mode %s for high pixel rate\n",
mode->name);
@@ -1549,10 +1548,9 @@ zynqmp_dp_bridge_mode_valid(struct drm_bridge *bridge,
}
static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
- struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *adjusted_mode;
const struct drm_display_mode *mode;
@@ -1565,7 +1563,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
pm_runtime_get_sync(dp->dev);
guard(mutex)(&dp->lock);
- zynqmp_dp_disp_enable(dp, old_bridge_state);
+ zynqmp_dp_disp_enable(dp, state);
/*
* Retrieve the CRTC mode and adjusted mode. This requires a little
@@ -1627,8 +1625,10 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
}
static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
+ struct drm_bridge_state *old_bridge_state = drm_atomic_get_old_bridge_state(state,
+ bridge);
struct zynqmp_dp *dp = bridge_to_dp(bridge);
mutex_lock(&dp->lock);
@@ -1722,13 +1722,9 @@ disconnected:
static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
- enum drm_connector_status ret;
-
- mutex_lock(&dp->lock);
- ret = __zynqmp_dp_bridge_detect(dp);
- mutex_unlock(&dp->lock);
- return ret;
+ guard(mutex)(&dp->lock);
+ return __zynqmp_dp_bridge_detect(dp);
}
static const struct drm_edid *zynqmp_dp_bridge_edid_read(struct drm_bridge *bridge,
@@ -1881,10 +1877,9 @@ static ssize_t zynqmp_dp_pattern_read(struct file *file, char __user *user_buf,
if (unlikely(ret))
return ret;
- mutex_lock(&dp->lock);
- ret = snprintf(buf, sizeof(buf), "%s\n",
- test_pattern_str[dp->test.pattern]);
- mutex_unlock(&dp->lock);
+ scoped_guard(mutex, &dp->lock)
+ ret = snprintf(buf, sizeof(buf), "%s\n",
+ test_pattern_str[dp->test.pattern]);
debugfs_file_put(dentry);
return simple_read_from_buffer(user_buf, count, ppos, buf, ret);
@@ -1939,24 +1934,18 @@ static int zynqmp_dp_enhanced_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = dp->test.enhanced;
- mutex_unlock(&dp->lock);
return 0;
}
static int zynqmp_dp_enhanced_set(void *data, u64 val)
{
struct zynqmp_dp *dp = data;
- int ret = 0;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
dp->test.enhanced = val;
- if (dp->test.active)
- ret = zynqmp_dp_test_setup(dp);
- mutex_unlock(&dp->lock);
-
- return ret;
+ return dp->test.active ? zynqmp_dp_test_setup(dp) : 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_enhanced, zynqmp_dp_enhanced_get,
@@ -1966,24 +1955,19 @@ static int zynqmp_dp_downspread_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = dp->test.downspread;
- mutex_unlock(&dp->lock);
return 0;
}
static int zynqmp_dp_downspread_set(void *data, u64 val)
{
struct zynqmp_dp *dp = data;
- int ret = 0;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
dp->test.downspread = val;
- if (dp->test.active)
- ret = zynqmp_dp_test_setup(dp);
- mutex_unlock(&dp->lock);
- return ret;
+ return dp->test.active ? zynqmp_dp_test_setup(dp) : 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_downspread, zynqmp_dp_downspread_get,
@@ -1993,33 +1977,32 @@ static int zynqmp_dp_active_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = dp->test.active;
- mutex_unlock(&dp->lock);
return 0;
}
static int zynqmp_dp_active_set(void *data, u64 val)
{
struct zynqmp_dp *dp = data;
- int ret = 0;
+ int ret;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
if (val) {
if (val < 2) {
ret = zynqmp_dp_test_setup(dp);
if (ret)
- goto out;
+ return ret;
}
ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern,
dp->test.custom);
if (ret)
- goto out;
+ return ret;
ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set);
if (ret)
- goto out;
+ return ret;
dp->test.active = true;
} else {
@@ -2032,10 +2015,8 @@ static int zynqmp_dp_active_set(void *data, u64 val)
err);
zynqmp_dp_train_loop(dp);
}
-out:
- mutex_unlock(&dp->lock);
- return ret;
+ return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_active, zynqmp_dp_active_get,
@@ -2102,9 +2083,8 @@ static int zynqmp_dp_swing_get(void *data, u64 *val)
struct zynqmp_dp_train_set_priv *priv = data;
struct zynqmp_dp *dp = priv->dp;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = dp->test.train_set[priv->lane] & DP_TRAIN_VOLTAGE_SWING_MASK;
- mutex_unlock(&dp->lock);
return 0;
}
@@ -2113,12 +2093,11 @@ static int zynqmp_dp_swing_set(void *data, u64 val)
struct zynqmp_dp_train_set_priv *priv = data;
struct zynqmp_dp *dp = priv->dp;
u8 *train_set = &dp->test.train_set[priv->lane];
- int ret = 0;
if (val > 3)
return -EINVAL;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*train_set &= ~(DP_TRAIN_MAX_SWING_REACHED |
DP_TRAIN_VOLTAGE_SWING_MASK);
*train_set |= val;
@@ -2126,10 +2105,9 @@ static int zynqmp_dp_swing_set(void *data, u64 val)
*train_set |= DP_TRAIN_MAX_SWING_REACHED;
if (dp->test.active)
- ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set);
- mutex_unlock(&dp->lock);
+ return zynqmp_dp_update_vs_emph(dp, dp->test.train_set);
- return ret;
+ return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_swing, zynqmp_dp_swing_get,
@@ -2140,10 +2118,9 @@ static int zynqmp_dp_preemphasis_get(void *data, u64 *val)
struct zynqmp_dp_train_set_priv *priv = data;
struct zynqmp_dp *dp = priv->dp;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK,
dp->test.train_set[priv->lane]);
- mutex_unlock(&dp->lock);
return 0;
}
@@ -2152,12 +2129,11 @@ static int zynqmp_dp_preemphasis_set(void *data, u64 val)
struct zynqmp_dp_train_set_priv *priv = data;
struct zynqmp_dp *dp = priv->dp;
u8 *train_set = &dp->test.train_set[priv->lane];
- int ret = 0;
if (val > 2)
return -EINVAL;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*train_set &= ~(DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
DP_TRAIN_PRE_EMPHASIS_MASK);
*train_set |= val;
@@ -2165,10 +2141,9 @@ static int zynqmp_dp_preemphasis_set(void *data, u64 val)
*train_set |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
if (dp->test.active)
- ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set);
- mutex_unlock(&dp->lock);
+ return zynqmp_dp_update_vs_emph(dp, dp->test.train_set);
- return ret;
+ return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_preemphasis, zynqmp_dp_preemphasis_get,
@@ -2178,31 +2153,24 @@ static int zynqmp_dp_lanes_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = dp->test.link_cnt;
- mutex_unlock(&dp->lock);
return 0;
}
static int zynqmp_dp_lanes_set(void *data, u64 val)
{
struct zynqmp_dp *dp = data;
- int ret = 0;
if (val > ZYNQMP_DP_MAX_LANES)
return -EINVAL;
- mutex_lock(&dp->lock);
- if (val > dp->num_lanes) {
- ret = -EINVAL;
- } else {
- dp->test.link_cnt = val;
- if (dp->test.active)
- ret = zynqmp_dp_test_setup(dp);
- }
- mutex_unlock(&dp->lock);
+ guard(mutex)(&dp->lock);
+ if (val > dp->num_lanes)
+ return -EINVAL;
- return ret;
+ dp->test.link_cnt = val;
+ return dp->test.active ? zynqmp_dp_test_setup(dp) : 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_lanes, zynqmp_dp_lanes_get,
@@ -2212,9 +2180,8 @@ static int zynqmp_dp_rate_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = drm_dp_bw_code_to_link_rate(dp->test.bw_code) * 10000ULL;
- mutex_unlock(&dp->lock);
return 0;
}
@@ -2222,7 +2189,6 @@ static int zynqmp_dp_rate_set(void *data, u64 val)
{
struct zynqmp_dp *dp = data;
int link_rate;
- int ret = 0;
u8 bw_code;
if (do_div(val, 10000))
@@ -2237,13 +2203,9 @@ static int zynqmp_dp_rate_set(void *data, u64 val)
bw_code != DP_LINK_BW_5_4)
return -EINVAL;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
dp->test.bw_code = bw_code;
- if (dp->test.active)
- ret = zynqmp_dp_test_setup(dp);
- mutex_unlock(&dp->lock);
-
- return ret;
+ return dp->test.active ? zynqmp_dp_test_setup(dp) : 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_rate, zynqmp_dp_rate_get,
@@ -2253,9 +2215,8 @@ static int zynqmp_dp_ignore_aux_errors_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->aux.hw_mutex);
+ guard(mutex)(&dp->lock);
*val = dp->ignore_aux_errors;
- mutex_unlock(&dp->aux.hw_mutex);
return 0;
}
@@ -2266,9 +2227,8 @@ static int zynqmp_dp_ignore_aux_errors_set(void *data, u64 val)
if (val != !!val)
return -EINVAL;
- mutex_lock(&dp->aux.hw_mutex);
+ guard(mutex)(&dp->lock);
dp->ignore_aux_errors = val;
- mutex_unlock(&dp->aux.hw_mutex);
return 0;
}
@@ -2280,9 +2240,8 @@ static int zynqmp_dp_ignore_hpd_get(void *data, u64 *val)
{
struct zynqmp_dp *dp = data;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
*val = dp->ignore_hpd;
- mutex_unlock(&dp->lock);
return 0;
}
@@ -2293,9 +2252,8 @@ static int zynqmp_dp_ignore_hpd_set(void *data, u64 val)
if (val != !!val)
return -EINVAL;
- mutex_lock(&dp->lock);
+ guard(mutex)(&dp->lock);
dp->ignore_hpd = val;
- mutex_lock(&dp->lock);
return 0;
}
@@ -2391,14 +2349,12 @@ static void zynqmp_dp_hpd_work_func(struct work_struct *work)
struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, hpd_work);
enum drm_connector_status status;
- mutex_lock(&dp->lock);
- if (dp->ignore_hpd) {
- mutex_unlock(&dp->lock);
- return;
- }
+ scoped_guard(mutex, &dp->lock) {
+ if (dp->ignore_hpd)
+ return;
- status = __zynqmp_dp_bridge_detect(dp);
- mutex_unlock(&dp->lock);
+ status = __zynqmp_dp_bridge_detect(dp);
+ }
drm_bridge_hpd_notify(&dp->bridge, status);
}
@@ -2410,11 +2366,9 @@ static void zynqmp_dp_hpd_irq_work_func(struct work_struct *work)
u8 status[DP_LINK_STATUS_SIZE + 2];
int err;
- mutex_lock(&dp->lock);
- if (dp->ignore_hpd) {
- mutex_unlock(&dp->lock);
+ guard(mutex)(&dp->lock);
+ if (dp->ignore_hpd)
return;
- }
err = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
DP_LINK_STATUS_SIZE + 2);
@@ -2428,7 +2382,6 @@ static void zynqmp_dp_hpd_irq_work_func(struct work_struct *work)
zynqmp_dp_train_loop(dp);
}
}
- mutex_unlock(&dp->lock);
}
static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
@@ -2483,7 +2436,6 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
struct platform_device *pdev = to_platform_device(dpsub->dev);
struct drm_bridge *bridge;
struct zynqmp_dp *dp;
- struct resource *res;
int ret;
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
@@ -2500,8 +2452,7 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
INIT_WORK(&dp->hpd_irq_work, zynqmp_dp_hpd_irq_work_func);
/* Acquire all resources (IOMEM, IRQ and PHYs). */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp");
- dp->iomem = devm_ioremap_resource(dp->dev, res);
+ dp->iomem = devm_platform_ioremap_resource_byname(pdev, "dp");
if (IS_ERR(dp->iomem)) {
ret = PTR_ERR(dp->iomem);
goto err_free;
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
index fa5f0ace6084..f07ff4eb3a6d 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
@@ -323,12 +323,16 @@ int zynqmp_audio_init(struct zynqmp_dpsub *dpsub)
audio->dai_name = devm_kasprintf(dev, GFP_KERNEL,
"%s-dai", dev_name(dev));
+ if (!audio->dai_name)
+ return -ENOMEM;
for (unsigned int i = 0; i < ZYNQMP_NUM_PCMS; ++i) {
audio->link_names[i] = devm_kasprintf(dev, GFP_KERNEL,
"%s-dp-%u", dev_name(dev), i);
audio->pcm_names[i] = devm_kasprintf(dev, GFP_KERNEL,
"%s-pcm-%u", dev_name(dev), i);
+ if (!audio->link_names[i] || !audio->pcm_names[i])
+ return -ENOMEM;
}
audio->base = devm_platform_ioremap_resource_byname(pdev, "aud");
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index f953ca48a930..3a9544b97bc5 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -201,6 +201,8 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
if (ret)
return ret;
+ dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
/* Try the reserved memory. Proceed if there's none. */
of_reserved_mem_device_init(&pdev->dev);
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index a18cc8d8caf5..6433c00d5d7e 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -216,12 +216,3 @@ void host1x_debug_dump(struct host1x *host1x)
show_all(host1x, &o, true);
}
-
-void host1x_debug_dump_syncpts(struct host1x *host1x)
-{
- struct output o = {
- .fn = write_to_printk
- };
-
- show_syncpts(host1x, &o, false);
-}
diff --git a/drivers/gpu/host1x/debug.h b/drivers/gpu/host1x/debug.h
index 62bd8a091fa7..c43c61d876a9 100644
--- a/drivers/gpu/host1x/debug.h
+++ b/drivers/gpu/host1x/debug.h
@@ -41,6 +41,5 @@ extern unsigned int host1x_debug_trace_cmdbuf;
void host1x_debug_init(struct host1x *host1x);
void host1x_debug_deinit(struct host1x *host1x);
void host1x_debug_dump(struct host1x *host1x);
-void host1x_debug_dump_syncpts(struct host1x *host1x);
#endif
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 46cae925b095..1f93e5e276c0 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -361,6 +361,10 @@ static bool host1x_wants_iommu(struct host1x *host1x)
return true;
}
+/*
+ * Returns ERR_PTR on failure, NULL if the translation is IDENTITY, otherwise a
+ * valid paging domain.
+ */
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
@@ -385,6 +389,8 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
* Similarly, if host1x is already attached to an IOMMU (via the DMA
* API), don't try to attach again.
*/
+ if (domain && domain->type == IOMMU_DOMAIN_IDENTITY)
+ domain = NULL;
if (!host1x_wants_iommu(host) || domain)
return domain;
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 947323f4a234..fa77e4e64f12 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -165,38 +165,6 @@ int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
}
EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
-int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
- bool hflip, bool vflip)
-{
- u32 r90, vf, hf;
-
- r90 = ((u32)mode >> 2) & 0x1;
- hf = ((u32)mode >> 1) & 0x1;
- vf = ((u32)mode >> 0) & 0x1;
- hf ^= (u32)hflip;
- vf ^= (u32)vflip;
-
- switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
- case IPU_ROTATE_NONE:
- *degrees = 0;
- break;
- case IPU_ROTATE_90_RIGHT:
- *degrees = 90;
- break;
- case IPU_ROTATE_180:
- *degrees = 180;
- break;
- case IPU_ROTATE_90_LEFT:
- *degrees = 270;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
-
struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
{
struct ipuv3_channel *channel;
@@ -516,12 +484,6 @@ int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
}
EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
-bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
-{
- return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
-}
-EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
-
int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
{
struct ipu_soc *ipu = channel->ipu;
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 82b244cb313e..07866b1369c6 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -337,12 +337,6 @@ void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id)
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_axi_id);
-int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch)
-{
- return ipu_ch_param_read_field(ch, IPU_FIELD_NPB) + 1;
-}
-EXPORT_SYMBOL_GPL(ipu_cpmem_get_burstsize);
-
void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize)
{
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1);
@@ -452,23 +446,6 @@ int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width)
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough);
-void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
-{
- switch (pixel_format) {
- case V4L2_PIX_FMT_UYVY:
- ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
- ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);/* pix fmt */
- ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
- break;
- case V4L2_PIX_FMT_YUYV:
- ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
- ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);/* pix fmt */
- ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
- break;
- }
-}
-EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
-
void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
unsigned int uv_stride,
unsigned int u_offset, unsigned int v_offset)
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index 778bc26d3ba5..d576b7d28437 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -186,32 +186,6 @@ static inline void ipu_csi_write(struct ipu_csi *csi, u32 value,
}
/*
- * Set mclk division ratio for generating test mode mclk. Only used
- * for test generator.
- */
-static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
- u32 ipu_clk)
-{
- u32 temp;
- int div_ratio;
-
- div_ratio = (ipu_clk / pixel_clk) - 1;
-
- if (div_ratio > 0xFF || div_ratio < 0) {
- dev_err(csi->ipu->dev,
- "value of pixel_clk extends normal range\n");
- return -EINVAL;
- }
-
- temp = ipu_csi_read(csi, CSI_SENS_CONF);
- temp &= ~CSI_SENS_CONF_DIVRATIO_MASK;
- ipu_csi_write(csi, temp | (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT),
- CSI_SENS_CONF);
-
- return 0;
-}
-
-/*
* Find the CSI data format and data width for the given V4L2 media
* bus pixel format code.
*/
@@ -538,56 +512,6 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(ipu_csi_init_interface);
-bool ipu_csi_is_interlaced(struct ipu_csi *csi)
-{
- unsigned long flags;
- u32 sensor_protocol;
-
- spin_lock_irqsave(&csi->lock, flags);
- sensor_protocol =
- (ipu_csi_read(csi, CSI_SENS_CONF) &
- CSI_SENS_CONF_SENS_PRTCL_MASK) >>
- CSI_SENS_CONF_SENS_PRTCL_SHIFT;
- spin_unlock_irqrestore(&csi->lock, flags);
-
- switch (sensor_protocol) {
- case IPU_CSI_CLK_MODE_GATED_CLK:
- case IPU_CSI_CLK_MODE_NONGATED_CLK:
- case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
- case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
- case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
- return false;
- case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
- case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
- case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
- return true;
- default:
- dev_err(csi->ipu->dev,
- "CSI %d sensor protocol unsupported\n", csi->id);
- return false;
- }
-}
-EXPORT_SYMBOL_GPL(ipu_csi_is_interlaced);
-
-void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w)
-{
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&csi->lock, flags);
-
- reg = ipu_csi_read(csi, CSI_ACT_FRM_SIZE);
- w->width = (reg & 0xFFFF) + 1;
- w->height = (reg >> 16 & 0xFFFF) + 1;
-
- reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
- w->left = (reg & CSI_HSC_MASK) >> CSI_HSC_SHIFT;
- w->top = (reg & CSI_VSC_MASK) >> CSI_VSC_SHIFT;
-
- spin_unlock_irqrestore(&csi->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ipu_csi_get_window);
-
void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w)
{
unsigned long flags;
@@ -624,38 +548,6 @@ void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert)
}
EXPORT_SYMBOL_GPL(ipu_csi_set_downsize);
-void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
- u32 r_value, u32 g_value, u32 b_value,
- u32 pix_clk)
-{
- unsigned long flags;
- u32 ipu_clk = clk_get_rate(csi->clk_ipu);
- u32 temp;
-
- spin_lock_irqsave(&csi->lock, flags);
-
- temp = ipu_csi_read(csi, CSI_TST_CTRL);
-
- if (!active) {
- temp &= ~CSI_TEST_GEN_MODE_EN;
- ipu_csi_write(csi, temp, CSI_TST_CTRL);
- } else {
- /* Set sensb_mclk div_ratio */
- ipu_csi_set_testgen_mclk(csi, pix_clk, ipu_clk);
-
- temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK |
- CSI_TEST_GEN_B_MASK);
- temp |= CSI_TEST_GEN_MODE_EN;
- temp |= (r_value << CSI_TEST_GEN_R_SHIFT) |
- (g_value << CSI_TEST_GEN_G_SHIFT) |
- (b_value << CSI_TEST_GEN_B_SHIFT);
- ipu_csi_write(csi, temp, CSI_TST_CTRL);
- }
-
- spin_unlock_irqrestore(&csi->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ipu_csi_set_test_generator);
-
int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
struct v4l2_mbus_framefmt *mbus_fmt)
{
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
index 846461bac70d..acd76ecc5221 100644
--- a/drivers/gpu/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -321,79 +321,6 @@ void ipu_ic_task_disable(struct ipu_ic *ic)
}
EXPORT_SYMBOL_GPL(ipu_ic_task_disable);
-int ipu_ic_task_graphics_init(struct ipu_ic *ic,
- const struct ipu_ic_colorspace *g_in_cs,
- bool galpha_en, u32 galpha,
- bool colorkey_en, u32 colorkey)
-{
- struct ipu_ic_priv *priv = ic->priv;
- struct ipu_ic_csc csc2;
- unsigned long flags;
- u32 reg, ic_conf;
- int ret = 0;
-
- if (ic->task == IC_TASK_ENCODER)
- return -EINVAL;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- ic_conf = ipu_ic_read(ic, IC_CONF);
-
- if (!(ic_conf & ic->bit->ic_conf_csc1_en)) {
- struct ipu_ic_csc csc1;
-
- ret = ipu_ic_calc_csc(&csc1,
- V4L2_YCBCR_ENC_601,
- V4L2_QUANTIZATION_FULL_RANGE,
- IPUV3_COLORSPACE_RGB,
- V4L2_YCBCR_ENC_601,
- V4L2_QUANTIZATION_FULL_RANGE,
- IPUV3_COLORSPACE_RGB);
- if (ret)
- goto unlock;
-
- /* need transparent CSC1 conversion */
- ret = init_csc(ic, &csc1, 0);
- if (ret)
- goto unlock;
- }
-
- ic->g_in_cs = *g_in_cs;
- csc2.in_cs = ic->g_in_cs;
- csc2.out_cs = ic->out_cs;
-
- ret = __ipu_ic_calc_csc(&csc2);
- if (ret)
- goto unlock;
-
- ret = init_csc(ic, &csc2, 1);
- if (ret)
- goto unlock;
-
- if (galpha_en) {
- ic_conf |= IC_CONF_IC_GLB_LOC_A;
- reg = ipu_ic_read(ic, IC_CMBP_1);
- reg &= ~(0xff << ic->bit->ic_cmb_galpha_bit);
- reg |= (galpha << ic->bit->ic_cmb_galpha_bit);
- ipu_ic_write(ic, reg, IC_CMBP_1);
- } else
- ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
-
- if (colorkey_en) {
- ic_conf |= IC_CONF_KEY_COLOR_EN;
- ipu_ic_write(ic, colorkey, IC_CMBP_2);
- } else
- ic_conf &= ~IC_CONF_KEY_COLOR_EN;
-
- ipu_ic_write(ic, ic_conf, IC_CONF);
-
- ic->graphics = true;
-unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
-}
-EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init);
-
int ipu_ic_task_init_rsc(struct ipu_ic *ic,
const struct ipu_ic_csc *csc,
int in_width, int in_height,
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index 841316582ea9..3c33b4defab5 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -355,20 +355,6 @@ static void dump_format(struct ipu_image_convert_ctx *ctx,
(ic_image->fmt->fourcc >> 24) & 0xff);
}
-int ipu_image_convert_enum_format(int index, u32 *fourcc)
-{
- const struct ipu_image_pixfmt *fmt;
-
- if (index >= (int)ARRAY_SIZE(image_convert_formats))
- return -EINVAL;
-
- /* Format found */
- fmt = &image_convert_formats[index];
- *fourcc = fmt->fourcc;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
-
static void free_dma_buf(struct ipu_image_convert_priv *priv,
struct ipu_image_convert_dma_buf *buf)
{
@@ -2437,40 +2423,6 @@ ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
}
EXPORT_SYMBOL_GPL(ipu_image_convert);
-/* "Canned" synchronous single image conversion */
-static void image_convert_sync_complete(struct ipu_image_convert_run *run,
- void *data)
-{
- struct completion *comp = data;
-
- complete(comp);
-}
-
-int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
- struct ipu_image *in, struct ipu_image *out,
- enum ipu_rotate_mode rot_mode)
-{
- struct ipu_image_convert_run *run;
- struct completion comp;
- int ret;
-
- init_completion(&comp);
-
- run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
- image_convert_sync_complete, &comp);
- if (IS_ERR(run))
- return PTR_ERR(run);
-
- ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
- ret = (ret == 0) ? -ETIMEDOUT : 0;
-
- ipu_image_convert_unprepare(run->ctx);
- kfree(run);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
-
int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
{
struct ipu_image_convert_priv *priv;
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index 3884acb7995a..16322b2137f8 100644
--- a/drivers/gpu/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -216,8 +216,6 @@ void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync);
int ipu_module_enable(struct ipu_soc *ipu, u32 mask);
int ipu_module_disable(struct ipu_soc *ipu, u32 mask);
-bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno);
-
int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
unsigned long base, u32 module, struct clk *clk_ipu);
void ipu_csi_exit(struct ipu_soc *ipu, int id);
diff --git a/drivers/gpu/ipu-v3/ipu-vdi.c b/drivers/gpu/ipu-v3/ipu-vdi.c
index a593b232b6d3..af9631fd4256 100644
--- a/drivers/gpu/ipu-v3/ipu-vdi.c
+++ b/drivers/gpu/ipu-v3/ipu-vdi.c
@@ -150,17 +150,6 @@ void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres)
}
EXPORT_SYMBOL_GPL(ipu_vdi_setup);
-void ipu_vdi_unsetup(struct ipu_vdi *vdi)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&vdi->lock, flags);
- ipu_vdi_write(vdi, 0, VDI_FSIZE);
- ipu_vdi_write(vdi, 0, VDI_C);
- spin_unlock_irqrestore(&vdi->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ipu_vdi_unsetup);
-
int ipu_vdi_enable(struct ipu_vdi *vdi)
{
unsigned long flags;
diff --git a/drivers/gpu/nova-core/Kconfig b/drivers/gpu/nova-core/Kconfig
new file mode 100644
index 000000000000..ad0c06756516
--- /dev/null
+++ b/drivers/gpu/nova-core/Kconfig
@@ -0,0 +1,14 @@
+config NOVA_CORE
+ tristate "Nova Core GPU driver"
+ depends on PCI
+ depends on RUST
+ depends on RUST_FW_LOADER_ABSTRACTIONS
+ default n
+ help
+ Choose this if you want to build the Nova Core driver for Nvidia
+ GPUs based on the GPU System Processor (GSP). This is true for Turing
+ and later GPUs.
+
+ This driver is work in progress and may not be functional.
+
+ If M is selected, the module will be called nova_core.
diff --git a/drivers/gpu/nova-core/Makefile b/drivers/gpu/nova-core/Makefile
new file mode 100644
index 000000000000..2d78c50126e1
--- /dev/null
+++ b/drivers/gpu/nova-core/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_NOVA_CORE) += nova_core.o
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
new file mode 100644
index 000000000000..63c19f140fbd
--- /dev/null
+++ b/drivers/gpu/nova-core/driver.rs
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{bindings, c_str, pci, prelude::*};
+
+use crate::gpu::Gpu;
+
+#[pin_data]
+pub(crate) struct NovaCore {
+ #[pin]
+ pub(crate) gpu: Gpu,
+}
+
+const BAR0_SIZE: usize = 8;
+pub(crate) type Bar0 = pci::Bar<BAR0_SIZE>;
+
+kernel::pci_device_table!(
+ PCI_TABLE,
+ MODULE_PCI_TABLE,
+ <NovaCore as pci::Driver>::IdInfo,
+ [(
+ pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_NVIDIA, bindings::PCI_ANY_ID as _),
+ ()
+ )]
+);
+
+impl pci::Driver for NovaCore {
+ type IdInfo = ();
+ const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
+
+ fn probe(pdev: &mut pci::Device, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ dev_dbg!(pdev.as_ref(), "Probe Nova Core GPU driver.\n");
+
+ pdev.enable_device_mem()?;
+ pdev.set_master();
+
+ let bar = pdev.iomap_region_sized::<BAR0_SIZE>(0, c_str!("nova-core/bar0"))?;
+
+ let this = KBox::pin_init(
+ try_pin_init!(Self {
+ gpu <- Gpu::new(pdev, bar)?,
+ }),
+ GFP_KERNEL,
+ )?;
+
+ Ok(this)
+ }
+}
diff --git a/drivers/gpu/nova-core/firmware.rs b/drivers/gpu/nova-core/firmware.rs
new file mode 100644
index 000000000000..6e6361c59ca1
--- /dev/null
+++ b/drivers/gpu/nova-core/firmware.rs
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::gpu;
+use kernel::firmware;
+
+pub(crate) struct ModInfoBuilder<const N: usize>(firmware::ModInfoBuilder<N>);
+
+impl<const N: usize> ModInfoBuilder<N> {
+ const VERSION: &'static str = "535.113.01";
+
+ const fn make_entry_file(self, chipset: &str, fw: &str) -> Self {
+ ModInfoBuilder(
+ self.0
+ .new_entry()
+ .push("nvidia/")
+ .push(chipset)
+ .push("/gsp/")
+ .push(fw)
+ .push("-")
+ .push(Self::VERSION)
+ .push(".bin"),
+ )
+ }
+
+ const fn make_entry_chipset(self, chipset: &str) -> Self {
+ self.make_entry_file(chipset, "booter_load")
+ .make_entry_file(chipset, "booter_unload")
+ .make_entry_file(chipset, "bootloader")
+ .make_entry_file(chipset, "gsp")
+ }
+
+ pub(crate) const fn create(
+ module_name: &'static kernel::str::CStr,
+ ) -> firmware::ModInfoBuilder<N> {
+ let mut this = Self(firmware::ModInfoBuilder::new(module_name));
+ let mut i = 0;
+
+ while i < gpu::Chipset::NAMES.len() {
+ this = this.make_entry_chipset(gpu::Chipset::NAMES[i]);
+ i += 1;
+ }
+
+ this.0
+ }
+}
diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs
new file mode 100644
index 000000000000..17c9660da450
--- /dev/null
+++ b/drivers/gpu/nova-core/gpu.rs
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{
+ device, devres::Devres, error::code::*, firmware, fmt, pci, prelude::*, str::CString,
+};
+
+use crate::driver::Bar0;
+use crate::regs;
+use crate::util;
+use core::fmt;
+
+macro_rules! define_chipset {
+ ({ $($variant:ident = $value:expr),* $(,)* }) =>
+ {
+ /// Enum representation of the GPU chipset.
+ #[derive(fmt::Debug)]
+ pub(crate) enum Chipset {
+ $($variant = $value),*,
+ }
+
+ impl Chipset {
+ pub(crate) const ALL: &'static [Chipset] = &[
+ $( Chipset::$variant, )*
+ ];
+
+ pub(crate) const NAMES: [&'static str; Self::ALL.len()] = [
+ $( util::const_bytes_to_str(
+ util::to_lowercase_bytes::<{ stringify!($variant).len() }>(
+ stringify!($variant)
+ ).as_slice()
+ ), )*
+ ];
+ }
+
+ // TODO replace with something like derive(FromPrimitive)
+ impl TryFrom<u32> for Chipset {
+ type Error = kernel::error::Error;
+
+ fn try_from(value: u32) -> Result<Self, Self::Error> {
+ match value {
+ $( $value => Ok(Chipset::$variant), )*
+ _ => Err(ENODEV),
+ }
+ }
+ }
+ }
+}
+
+define_chipset!({
+ // Turing
+ TU102 = 0x162,
+ TU104 = 0x164,
+ TU106 = 0x166,
+ TU117 = 0x167,
+ TU116 = 0x168,
+ // Ampere
+ GA102 = 0x172,
+ GA103 = 0x173,
+ GA104 = 0x174,
+ GA106 = 0x176,
+ GA107 = 0x177,
+ // Ada
+ AD102 = 0x192,
+ AD103 = 0x193,
+ AD104 = 0x194,
+ AD106 = 0x196,
+ AD107 = 0x197,
+});
+
+impl Chipset {
+ pub(crate) fn arch(&self) -> Architecture {
+ match self {
+ Self::TU102 | Self::TU104 | Self::TU106 | Self::TU117 | Self::TU116 => {
+ Architecture::Turing
+ }
+ Self::GA102 | Self::GA103 | Self::GA104 | Self::GA106 | Self::GA107 => {
+ Architecture::Ampere
+ }
+ Self::AD102 | Self::AD103 | Self::AD104 | Self::AD106 | Self::AD107 => {
+ Architecture::Ada
+ }
+ }
+ }
+}
+
+// TODO
+//
+// The resulting strings are used to generate firmware paths, hence the
+// generated strings have to be stable.
+//
+// Hence, replace with something like strum_macros derive(Display).
+//
+// For now, redirect to fmt::Debug for convenience.
+impl fmt::Display for Chipset {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self)
+ }
+}
+
+/// Enum representation of the GPU generation.
+#[derive(fmt::Debug)]
+pub(crate) enum Architecture {
+ Turing,
+ Ampere,
+ Ada,
+}
+
+pub(crate) struct Revision {
+ major: u8,
+ minor: u8,
+}
+
+impl Revision {
+ fn from_boot0(boot0: regs::Boot0) -> Self {
+ Self {
+ major: boot0.major_rev(),
+ minor: boot0.minor_rev(),
+ }
+ }
+}
+
+impl fmt::Display for Revision {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:x}.{:x}", self.major, self.minor)
+ }
+}
+
+/// Structure holding the metadata of the GPU.
+pub(crate) struct Spec {
+ chipset: Chipset,
+ /// The revision of the chipset.
+ revision: Revision,
+}
+
+impl Spec {
+ fn new(bar: &Devres<Bar0>) -> Result<Spec> {
+ let bar = bar.try_access().ok_or(ENXIO)?;
+ let boot0 = regs::Boot0::read(&bar);
+
+ Ok(Self {
+ chipset: boot0.chipset().try_into()?,
+ revision: Revision::from_boot0(boot0),
+ })
+ }
+}
+
+/// Structure encapsulating the firmware blobs required for the GPU to operate.
+#[expect(dead_code)]
+pub(crate) struct Firmware {
+ booter_load: firmware::Firmware,
+ booter_unload: firmware::Firmware,
+ bootloader: firmware::Firmware,
+ gsp: firmware::Firmware,
+}
+
+impl Firmware {
+ fn new(dev: &device::Device, spec: &Spec, ver: &str) -> Result<Firmware> {
+ let mut chip_name = CString::try_from_fmt(fmt!("{}", spec.chipset))?;
+ chip_name.make_ascii_lowercase();
+
+ let request = |name_| {
+ CString::try_from_fmt(fmt!("nvidia/{}/gsp/{}-{}.bin", &*chip_name, name_, ver))
+ .and_then(|path| firmware::Firmware::request(&path, dev))
+ };
+
+ Ok(Firmware {
+ booter_load: request("booter_load")?,
+ booter_unload: request("booter_unload")?,
+ bootloader: request("bootloader")?,
+ gsp: request("gsp")?,
+ })
+ }
+}
+
+/// Structure holding the resources required to operate the GPU.
+#[pin_data]
+pub(crate) struct Gpu {
+ spec: Spec,
+ /// MMIO mapping of PCI BAR 0
+ bar: Devres<Bar0>,
+ fw: Firmware,
+}
+
+impl Gpu {
+ pub(crate) fn new(pdev: &pci::Device, bar: Devres<Bar0>) -> Result<impl PinInit<Self>> {
+ let spec = Spec::new(&bar)?;
+ let fw = Firmware::new(pdev.as_ref(), &spec, "535.113.01")?;
+
+ dev_info!(
+ pdev.as_ref(),
+ "NVIDIA (Chipset: {}, Architecture: {:?}, Revision: {})\n",
+ spec.chipset,
+ spec.chipset.arch(),
+ spec.revision
+ );
+
+ Ok(pin_init!(Self { spec, bar, fw }))
+ }
+}
diff --git a/drivers/gpu/nova-core/nova_core.rs b/drivers/gpu/nova-core/nova_core.rs
new file mode 100644
index 000000000000..a91cd924054b
--- /dev/null
+++ b/drivers/gpu/nova-core/nova_core.rs
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Nova Core GPU Driver
+
+mod driver;
+mod firmware;
+mod gpu;
+mod regs;
+mod util;
+
+kernel::module_pci_driver! {
+ type: driver::NovaCore,
+ name: "NovaCore",
+ author: "Danilo Krummrich",
+ description: "Nova Core GPU driver",
+ license: "GPL v2",
+ firmware: [],
+}
+
+kernel::module_firmware!(firmware::ModInfoBuilder);
diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs
new file mode 100644
index 000000000000..50aefb150b0b
--- /dev/null
+++ b/drivers/gpu/nova-core/regs.rs
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::driver::Bar0;
+
+// TODO
+//
+// Create register definitions via generic macros. See task "Generic register
+// abstraction" in Documentation/gpu/nova/core/todo.rst.
+
+const BOOT0_OFFSET: usize = 0x00000000;
+
+// 3:0 - chipset minor revision
+const BOOT0_MINOR_REV_SHIFT: u8 = 0;
+const BOOT0_MINOR_REV_MASK: u32 = 0x0000000f;
+
+// 7:4 - chipset major revision
+const BOOT0_MAJOR_REV_SHIFT: u8 = 4;
+const BOOT0_MAJOR_REV_MASK: u32 = 0x000000f0;
+
+// 23:20 - chipset implementation Identifier (depends on architecture)
+const BOOT0_IMPL_SHIFT: u8 = 20;
+const BOOT0_IMPL_MASK: u32 = 0x00f00000;
+
+// 28:24 - chipset architecture identifier
+const BOOT0_ARCH_MASK: u32 = 0x1f000000;
+
+// 28:20 - chipset identifier (virtual register field combining BOOT0_IMPL and
+// BOOT0_ARCH)
+const BOOT0_CHIPSET_SHIFT: u8 = BOOT0_IMPL_SHIFT;
+const BOOT0_CHIPSET_MASK: u32 = BOOT0_IMPL_MASK | BOOT0_ARCH_MASK;
+
+#[derive(Copy, Clone)]
+pub(crate) struct Boot0(u32);
+
+impl Boot0 {
+ #[inline]
+ pub(crate) fn read(bar: &Bar0) -> Self {
+ Self(bar.readl(BOOT0_OFFSET))
+ }
+
+ #[inline]
+ pub(crate) fn chipset(&self) -> u32 {
+ (self.0 & BOOT0_CHIPSET_MASK) >> BOOT0_CHIPSET_SHIFT
+ }
+
+ #[inline]
+ pub(crate) fn minor_rev(&self) -> u8 {
+ ((self.0 & BOOT0_MINOR_REV_MASK) >> BOOT0_MINOR_REV_SHIFT) as u8
+ }
+
+ #[inline]
+ pub(crate) fn major_rev(&self) -> u8 {
+ ((self.0 & BOOT0_MAJOR_REV_MASK) >> BOOT0_MAJOR_REV_SHIFT) as u8
+ }
+}
diff --git a/drivers/gpu/nova-core/util.rs b/drivers/gpu/nova-core/util.rs
new file mode 100644
index 000000000000..332a64cfc6a9
--- /dev/null
+++ b/drivers/gpu/nova-core/util.rs
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+
+pub(crate) const fn to_lowercase_bytes<const N: usize>(s: &str) -> [u8; N] {
+ let src = s.as_bytes();
+ let mut dst = [0; N];
+ let mut i = 0;
+
+ while i < src.len() && i < N {
+ dst[i] = (src[i] as char).to_ascii_lowercase() as u8;
+ i += 1;
+ }
+
+ dst
+}
+
+pub(crate) const fn const_bytes_to_str(bytes: &[u8]) -> &str {
+ match core::str::from_utf8(bytes) {
+ Ok(string) => string,
+ Err(_) => kernel::build_error!("Bytes are not valid UTF-8."),
+ }
+}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index dfc245867a46..a503252702b7 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -148,6 +148,31 @@ config HID_APPLEIR
Say Y here if you want support for Apple infrared remote control.
+config HID_APPLETB_BL
+ tristate "Apple Touch Bar Backlight"
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want support for the backlight of Touch Bars on x86
+ MacBook Pros.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-appletb-bl.
+
+config HID_APPLETB_KBD
+ tristate "Apple Touch Bar Keyboard Mode"
+ depends on USB_HID
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ select HID_APPLETB_BL
+ help
+ Say Y here if you want support for the keyboard mode (escape,
+ function, media and brightness keys) of Touch Bars on x86 MacBook
+ Pros.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-appletb-kbd.
+
config HID_ASUS
tristate "Asus"
depends on USB_HID
@@ -603,6 +628,7 @@ config HID_LOGITECH
tristate "Logitech devices"
depends on USB_HID
depends on LEDS_CLASS
+ depends on LEDS_CLASS_MULTICOLOR
default !EXPERT
help
Support for Logitech devices that are not fully compliant with HID standard.
@@ -1220,6 +1246,20 @@ config HID_U2FZERO
allow setting the brightness to anything but 1, which will
trigger a single blink and immediately reset back to 0.
+config HID_UNIVERSAL_PIDFF
+ tristate "universal-pidff: extended USB PID driver compatibility and usage"
+ depends on USB_HID
+ depends on HID_PID
+ help
+ Extended PID support for selected devices.
+
+ Contains report fixups, extended usable button range and
+ pidff quirk management to extend compatibility with slightly
+ non-compliant USB PID devices and better fuzz/flat values for
+ high precision direct drive devices.
+
+ Supports Moza Racing, Cammus, VRS, FFBeast and more.
+
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 482b096eea28..10ae5dedbd84 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -29,6 +29,8 @@ obj-$(CONFIG_HID_ALPS) += hid-alps.o
obj-$(CONFIG_HID_ACRUX) += hid-axff.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
obj-$(CONFIG_HID_APPLEIR) += hid-appleir.o
+obj-$(CONFIG_HID_APPLETB_BL) += hid-appletb-bl.o
+obj-$(CONFIG_HID_APPLETB_KBD) += hid-appletb-kbd.o
obj-$(CONFIG_HID_CREATIVE_SB0540) += hid-creative-sb0540.o
obj-$(CONFIG_HID_ASUS) += hid-asus.o
obj-$(CONFIG_HID_AUREAL) += hid-aureal.o
@@ -140,6 +142,7 @@ hid-uclogic-objs := hid-uclogic-core.o \
hid-uclogic-params.o
obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o
obj-$(CONFIG_HID_UDRAW_PS3) += hid-udraw-ps3.o
+obj-$(CONFIG_HID_UNIVERSAL_PIDFF) += hid-universal-pidff.o
obj-$(CONFIG_HID_LED) += hid-led.o
obj-$(CONFIG_HID_XIAOMI) += hid-xiaomi.o
obj-$(CONFIG_HID_XINMO) += hid-xinmo.o
@@ -166,7 +169,6 @@ obj-$(CONFIG_USB_KBD) += usbhid/
obj-$(CONFIG_I2C_HID_CORE) += i2c-hid/
obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
-obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
index 799b8686a88a..f44a3bb2fbd4 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
@@ -42,6 +42,7 @@ struct amd_mp2_sensor_info {
struct sfh_dev_status {
bool is_hpd_present;
+ bool is_hpd_enabled;
bool is_als_present;
bool is_sra_present;
};
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 48cfd0c58241..1c1fd63330c9 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -18,6 +18,7 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "amd_sfh_pcie.h"
#include "sfh1_1/amd_sfh_init.h"
@@ -330,6 +331,57 @@ static const struct dmi_system_id dmi_nodevs[] = {
{ }
};
+static ssize_t hpd_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(mp2->dev_en.is_hpd_enabled));
+}
+
+static ssize_t hpd_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
+ bool enabled;
+ int ret;
+
+ ret = kstrtobool(buf, &enabled);
+ if (ret)
+ return ret;
+
+ mp2->sfh1_1_ops->toggle_hpd(mp2, enabled);
+
+ return count;
+}
+static DEVICE_ATTR_RW(hpd);
+
+static umode_t sfh_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
+
+ if (!mp2->sfh1_1_ops || !mp2->dev_en.is_hpd_present)
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute *sfh_attrs[] = {
+ &dev_attr_hpd.attr,
+ NULL,
+};
+
+static struct attribute_group sfh_attr_group = {
+ .attrs = sfh_attrs,
+ .is_visible = sfh_attr_is_visible,
+};
+
+static const struct attribute_group *amd_sfh_groups[] = {
+ &sfh_attr_group,
+ NULL,
+};
+
static void sfh1_1_init_work(struct work_struct *work)
{
struct amd_mp2_dev *mp2 = container_of(work, struct amd_mp2_dev, work);
@@ -341,6 +393,11 @@ static void sfh1_1_init_work(struct work_struct *work)
amd_sfh_clear_intr(mp2);
mp2->init_done = 1;
+
+ rc = sysfs_update_group(&mp2->pdev->dev.kobj, &sfh_attr_group);
+ if (rc)
+ dev_warn(&mp2->pdev->dev, "failed to update sysfs group\n");
+
}
static void sfh_init_work(struct work_struct *work)
@@ -487,6 +544,7 @@ static struct pci_driver amd_mp2_pci_driver = {
.driver.pm = &amd_mp2_pm_ops,
.shutdown = amd_sfh_shutdown,
.remove = amd_sfh_remove,
+ .dev_groups = amd_sfh_groups,
};
module_pci_driver(amd_mp2_pci_driver);
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index e9929c4aa72e..25f0ebfcbd5f 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -212,6 +212,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
switch (cl_data->sensor_idx[i]) {
case HPD_IDX:
privdata->dev_en.is_hpd_present = true;
+ privdata->dev_en.is_hpd_enabled = true;
+ amd_sfh_toggle_hpd(privdata, false);
break;
case ALS_IDX:
privdata->dev_en.is_als_present = true;
@@ -255,6 +257,10 @@ static void amd_sfh_resume(struct amd_mp2_dev *mp2)
}
for (i = 0; i < cl_data->num_hid_devices; i++) {
+ /* leave HPD alone; policy is controlled by sysfs */
+ if (cl_data->sensor_idx[i] == HPD_IDX)
+ continue;
+
if (cl_data->sensor_sts[i] == SENSOR_DISABLED) {
info.sensor_idx = cl_data->sensor_idx[i];
mp2->mp2_ops->start(mp2, info);
@@ -285,8 +291,10 @@ static void amd_sfh_suspend(struct amd_mp2_dev *mp2)
}
for (i = 0; i < cl_data->num_hid_devices; i++) {
- if (cl_data->sensor_idx[i] != HPD_IDX &&
- cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+ /* leave HPD alone; policy is controlled by sysfs */
+ if (cl_data->sensor_idx[i] == HPD_IDX)
+ continue;
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
mp2->mp2_ops->stop(mp2, cl_data->sensor_idx[i]);
status = amd_sfh_wait_for_response
(mp2, cl_data->sensor_idx[i], DISABLE_SENSOR);
@@ -304,6 +312,44 @@ static void amd_sfh_suspend(struct amd_mp2_dev *mp2)
amd_sfh_clear_intr(mp2);
}
+void amd_sfh_toggle_hpd(struct amd_mp2_dev *mp2, bool enabled)
+{
+ struct amdtp_cl_data *cl_data = mp2->cl_data;
+ struct amd_mp2_sensor_info info;
+ int i, status;
+
+ if (mp2->dev_en.is_hpd_enabled == enabled)
+ return;
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_idx[i] != HPD_IDX)
+ continue;
+ info.sensor_idx = cl_data->sensor_idx[i];
+ if (enabled) {
+ mp2->mp2_ops->start(mp2, info);
+ status = amd_sfh_wait_for_response
+ (mp2, cl_data->sensor_idx[i], ENABLE_SENSOR);
+ if (status == 0)
+ status = SENSOR_ENABLED;
+ if (status == SENSOR_ENABLED)
+ cl_data->sensor_sts[i] = SENSOR_ENABLED;
+ } else {
+ mp2->mp2_ops->stop(mp2, cl_data->sensor_idx[i]);
+ status = amd_sfh_wait_for_response
+ (mp2, cl_data->sensor_idx[i], DISABLE_SENSOR);
+ if (status == 0)
+ status = SENSOR_DISABLED;
+ if (status != SENSOR_ENABLED)
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ }
+ dev_dbg(&mp2->pdev->dev, "toggle sid 0x%x (%s) status 0x%x\n",
+ cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
+ cl_data->sensor_sts[i]);
+ break;
+ }
+ mp2->dev_en.is_hpd_enabled = enabled;
+}
+
static void amd_mp2_pci_remove(void *privdata)
{
struct amd_mp2_dev *mp2 = privdata;
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h
index 21c44990bbeb..797d206641c6 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h
@@ -15,12 +15,15 @@
struct amd_sfh1_1_ops {
int (*init)(struct amd_mp2_dev *mp2);
+ void (*toggle_hpd)(struct amd_mp2_dev *mp2, bool enable);
};
int amd_sfh1_1_init(struct amd_mp2_dev *mp2);
+void amd_sfh_toggle_hpd(struct amd_mp2_dev *mp2, bool enabled);
static const struct amd_sfh1_1_ops __maybe_unused sfh1_1_ops = {
.init = amd_sfh1_1_init,
+ .toggle_hpd = amd_sfh_toggle_hpd,
};
#endif
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
index ffb98b4c36cb..837d59e7a661 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
@@ -129,7 +129,7 @@ static int amd_sfh_hpd_info(u8 *user_present)
if (!user_present)
return -EINVAL;
- if (!emp2 || !emp2->dev_en.is_hpd_present)
+ if (!emp2 || !emp2->dev_en.is_hpd_present || !emp2->dev_en.is_hpd_enabled)
return -ENODEV;
hpdstatus.val = readl(emp2->mmio + amd_get_c2p_val(emp2, 4));
diff --git a/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c b/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
index a4a4f324aedd..489cb4fcc2cd 100644
--- a/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
+++ b/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
@@ -41,7 +41,7 @@ static const __u8 fixed_rdesc[] = {
0x15, 0x00, // Logical Minimum (0) 22
0x25, 0x01, // Logical Maximum (1) 24
0x75, 0x01, // Report Size (1) 26
- 0x95, 0x05, // Report Count (5) 28 /* changed (was 5) */
+ 0x95, 0x05, // Report Count (5) 28 /* changed (was 6) */
0x81, 0x02, // Input (Data,Var,Abs) 30
0x05, 0x09, // Usage Page (Button) /* inserted */
0x09, 0x4a, // Usage (0x4a) /* inserted to be translated as input usage 0x149: BTN_STYLUS3 */
@@ -189,8 +189,68 @@ static const __u8 fixed_rdesc[] = {
0x96, 0x00, 0x01, // Report Count (256) 322
0xb1, 0x02, // Feature (Data,Var,Abs) 325
0xc0, // End Collection 327
+ /* New in Firmware Version: HUION_M220_240524 */
+ 0x05, 0x01, // Usage Page (Generic Desktop) 328
+ 0x09, 0x01, // Usage (Pointer) 330
+ 0xa1, 0x01, // Collection (Application) 332
+ 0x09, 0x01, // Usage (Pointer) 334
+ 0xa1, 0x00, // Collection (Physical) 336
+ 0x05, 0x09, // Usage Page (Button) 338
+ 0x19, 0x01, // UsageMinimum (1) 340
+ 0x29, 0x03, // UsageMaximum (3) 342
+ 0x15, 0x00, // Logical Minimum (0) 344
+ 0x25, 0x01, // Logical Maximum (1) 346
+ 0x85, 0x02, // Report ID (2) 348
+ 0x95, 0x03, // Report Count (3) 350
+ 0x75, 0x01, // Report Size (1) 352
+ 0x81, 0x02, // Input (Data,Var,Abs) 354
+ 0x95, 0x01, // Report Count (1) 356
+ 0x75, 0x05, // Report Size (5) 358
+ 0x81, 0x01, // Input (Cnst,Arr,Abs) 360
+ 0x05, 0x01, // Usage Page (Generic Desktop) 362
+ 0x09, 0x30, // Usage (X) 364
+ 0x09, 0x31, // Usage (Y) 366
+ 0x15, 0x81, // Logical Minimum (-127) 368
+ 0x25, 0x7f, // Logical Maximum (127) 370
+ 0x75, 0x08, // Report Size (8) 372
+ 0x95, 0x02, // Report Count (2) 374
+ 0x81, 0x06, // Input (Data,Var,Rel) 376
+ 0x95, 0x04, // Report Count (4) 378
+ 0x75, 0x08, // Report Size (8) 380
+ 0x81, 0x01, // Input (Cnst,Arr,Abs) 382
+ 0xc0, // End Collection 384
+ 0xc0, // End Collection 385
+ 0x05, 0x0d, // Usage Page (Digitizers) 386
+ 0x09, 0x05, // Usage (Touch Pad) 388
+ 0xa1, 0x01, // Collection (Application) 390
+ 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page FF00) 392
+ 0x09, 0x0c, // Usage (Vendor Usage 0x0c) 395
+ 0x15, 0x00, // Logical Minimum (0) 397
+ 0x26, 0xff, 0x00, // Logical Maximum (255) 399
+ 0x75, 0x08, // Report Size (8) 402
+ 0x95, 0x10, // Report Count (16) 404
+ 0x85, 0x3f, // Report ID (63) 406
+ 0x81, 0x22, // Input (Data,Var,Abs,NoPref) 408
+ 0xc0, // End Collection 410
+ 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page FF00) 411
+ 0x09, 0x0c, // Usage (Vendor Usage 0x0c) 414
+ 0xa1, 0x01, // Collection (Application) 416
+ 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page FF00) 418
+ 0x09, 0x0c, // Usage (Vendor Usage 0x0c) 421
+ 0x15, 0x00, // Logical Minimum (0) 423
+ 0x26, 0xff, 0x00, // Logical Maximum (255) 425
+ 0x85, 0x44, // Report ID (68) 428
+ 0x75, 0x08, // Report Size (8) 430
+ 0x96, 0x6b, 0x05, // Report Count (1387) 432
+ 0x81, 0x00, // Input (Data,Arr,Abs) 435
+ 0xc0, // End Collection 437
};
+#define PRE_240524_RDESC_SIZE 328
+#define PRE_240524_RDESC_FIXED_SIZE 338 /* The original bits of the descriptor */
+#define FW_240524_RDESC_SIZE 438
+#define FW_240524_RDESC_FIXED_SIZE sizeof(fixed_rdesc)
+
SEC(HID_BPF_RDESC_FIXUP)
int BPF_PROG(hid_fix_rdesc_huion_kamvas_pro_19, struct hid_bpf_ctx *hctx)
{
@@ -199,9 +259,14 @@ int BPF_PROG(hid_fix_rdesc_huion_kamvas_pro_19, struct hid_bpf_ctx *hctx)
if (!data)
return 0; /* EPERM check */
- __builtin_memcpy(data, fixed_rdesc, sizeof(fixed_rdesc));
+ if (hctx->size == FW_240524_RDESC_SIZE) {
+ __builtin_memcpy(data, fixed_rdesc, FW_240524_RDESC_FIXED_SIZE);
+ return sizeof(fixed_rdesc);
+ }
+
+ __builtin_memcpy(data, fixed_rdesc, PRE_240524_RDESC_FIXED_SIZE);
- return sizeof(fixed_rdesc);
+ return PRE_240524_RDESC_FIXED_SIZE;
}
/*
@@ -263,7 +328,9 @@ HID_BPF_OPS(huion_Kamvas_pro_19) = {
SEC("syscall")
int probe(struct hid_bpf_probe_args *ctx)
{
- ctx->retval = ctx->rdesc_size != 328;
+
+ ctx->retval = !((ctx->rdesc_size == PRE_240524_RDESC_SIZE) ||
+ (ctx->rdesc_size == FW_240524_RDESC_SIZE));
if (ctx->retval)
ctx->retval = -EINVAL;
diff --git a/drivers/hid/bpf/progs/Huion__KeydialK20.bpf.c b/drivers/hid/bpf/progs/Huion__KeydialK20.bpf.c
new file mode 100644
index 000000000000..ec360d71130f
--- /dev/null
+++ b/drivers/hid/bpf/progs/Huion__KeydialK20.bpf.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Red Hat, Inc
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_HUION 0x256C
+#define PID_KEYDIAL_K20 0x0069
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_HUION, PID_KEYDIAL_K20),
+);
+
+/* Filled in by udev-hid-bpf */
+char UDEV_PROP_HUION_FIRMWARE_ID[64];
+
+/* The prefix of the firmware ID we expect for this device. The full firmware
+ * string has a date suffix, e.g. HUION_T21h_230511
+ */
+char EXPECTED_FIRMWARE_ID[] = "HUION_T21h_";
+
+/* How this BPF program works: the tablet has two modes, firmware mode and
+ * tablet mode. In firmware mode (out of the box) the tablet sends button events
+ * as keyboard shortcuts and the dial as wheel but it's not forwarded by the kernel.
+ * In tablet mode it uses a vendor specific hid report to report everything instead.
+ * Depending on the mode some hid reports are never sent and the corresponding
+ * devices are mute.
+ *
+ * To switch the tablet use e.g. https://github.com/whot/huion-switcher
+ * or one of the tools from the digimend project
+ *
+ * This BPF currently works for both modes only. The huion-switcher tool sets the
+ * HUION_FIRMWARE_ID udev property - if that is set then we disable the firmware
+ * pad and pen reports (by making them vendor collections that are ignored).
+ * If that property is not set we fix all hidraw nodes so the tablet works in
+ * either mode though the drawback is that the device will show up twice if
+ * you bind it to all event nodes
+ *
+ * Default report descriptor for the first exposed hidraw node:
+ *
+ * # HUION Huion Keydial_K20
+ * # Report descriptor length: 18 bytes
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 0xFF00) 0
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 3
+ * # 0xa1, 0x01, // Collection (Application) 5
+ * # 0x85, 0x08, // Report ID (8) 7
+ * # 0x75, 0x58, // Report Size (88) 9
+ * # 0x95, 0x01, // Report Count (1) 11
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 13
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 15
+ * # 0xc0, // End Collection 17
+ * R: 18 06 00 ff 09 01 a1 01 85 08 75 58 95 01 09 01 81 02 c0
+ *
+ * This report descriptor appears to be identical for all Huion devices.
+ *
+ * Second hidraw node is the Pad. This one sends the button events until the tablet is
+ * switched to raw mode, then it's mute.
+ *
+ * # HUION Huion Keydial_K20
+ * # Report descriptor length: 135 bytes
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * # 0x09, 0x06, // Usage (Keyboard) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x03, // Report ID (3) 6
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 8
+ * # 0x19, 0xe0, // UsageMinimum (224) 10
+ * # 0x29, 0xe7, // UsageMaximum (231) 12
+ * # 0x15, 0x00, // Logical Minimum (0) 14
+ * # 0x25, 0x01, // Logical Maximum (1) 16
+ * # 0x75, 0x01, // Report Size (1) 18
+ * # 0x95, 0x08, // Report Count (8) 20
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 22
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 24
+ * # 0x19, 0x00, // UsageMinimum (0) 26
+ * # 0x29, 0xff, // UsageMaximum (255) 28
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 30
+ * # 0x75, 0x08, // Report Size (8) 33
+ * # 0x95, 0x06, // Report Count (6) 35
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 37
+ * # 0xc0, // End Collection 39
+ * # 0x05, 0x0c, // Usage Page (Consumer) 40
+ * # 0x09, 0x01, // Usage (Consumer Control) 42
+ * # 0xa1, 0x01, // Collection (Application) 44
+ * # 0x85, 0x04, // Report ID (4) 46
+ * # 0x05, 0x0c, // Usage Page (Consumer) 48
+ * # 0x19, 0x00, // UsageMinimum (0) 50
+ * # 0x2a, 0x80, 0x03, // UsageMaximum (896) 52
+ * # 0x15, 0x00, // Logical Minimum (0) 55
+ * # 0x26, 0x80, 0x03, // Logical Maximum (896) 57
+ * # 0x75, 0x10, // Report Size (16) 60
+ * # 0x95, 0x01, // Report Count (1) 62
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 64
+ * # 0xc0, // End Collection 66
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 67
+ * # 0x09, 0x02, // Usage (Mouse) 69
+ * # 0xa1, 0x01, // Collection (Application) 71
+ * # 0x09, 0x01, // Usage (Pointer) 73
+ * # 0x85, 0x05, // Report ID (5) 75
+ * # 0xa1, 0x00, // Collection (Physical) 77
+ * # 0x05, 0x09, // Usage Page (Button) 79
+ * # 0x19, 0x01, // UsageMinimum (1) 81
+ * # 0x29, 0x05, // UsageMaximum (5) 83
+ * # 0x15, 0x00, // Logical Minimum (0) 85
+ * # 0x25, 0x01, // Logical Maximum (1) 87
+ * # 0x95, 0x05, // Report Count (5) 89
+ * # 0x75, 0x01, // Report Size (1) 91
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 93
+ * # 0x95, 0x01, // Report Count (1) 95
+ * # 0x75, 0x03, // Report Size (3) 97
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 99
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 101
+ * # 0x09, 0x30, // Usage (X) 103
+ * # 0x09, 0x31, // Usage (Y) 105
+ * # 0x16, 0x00, 0x80, // Logical Minimum (-32768) 107
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 110
+ * # 0x75, 0x10, // Report Size (16) 113
+ * # 0x95, 0x02, // Report Count (2) 115
+ * # 0x81, 0x06, // Input (Data,Var,Rel) 117
+ * # 0x95, 0x01, // Report Count (1) 119
+ * # 0x75, 0x08, // Report Size (8) 121
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 123
+ * # 0x09, 0x38, // Usage (Wheel) 125
+ * # 0x15, 0x81, // Logical Minimum (-127) 127
+ * # 0x25, 0x7f, // Logical Maximum (127) 129
+ * # 0x81, 0x06, // Input (Data,Var,Rel) 131
+ * # 0xc0, // End Collection 133
+ * # 0xc0, // End Collection 134
+ * R: 135 05 01 09 06 a1 01 85 03 05 07 19 e0 29 e7 15 00 25 01 75 01 95 08 81 02 05 07 19 00 29 ff 26 ff 00 75 08 95 06 81 00 c0 05 0c 09 01 a1 01 85 04 05 0c 19 00 2a 80 03 15 00 26 80 03 75 10 95 01 81 00 c0 05 01 09 02 a1 01 09 01 85 05 a1 00 05 09 19 01 29 05 15 00 25 01 95 05 75 01 81 02 95 01 75 03 81 01 05 01 09 30 09 31 16 00 80 26 ff 7f 7510 95 02 81 06 95 01 75 08 05 01 09 38 15 81 25 7f 81 06 c0 c0
+ *
+ * Third hidraw node is a multi-axis controller which sends the dial events
+ * and the button inside the dial. If the tablet is switched to raw mode it is mute.
+ *
+ * # HUION Huion Keydial_K20
+ * # Report descriptor length: 108 bytes
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * # 0x09, 0x0e, // Usage (System Multi-Axis Controller) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x11, // Report ID (17) 6
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 8
+ * # 0x09, 0x21, // Usage (Puck) 10
+ * # 0xa1, 0x02, // Collection (Logical) 12
+ * # 0x15, 0x00, // Logical Minimum (0) 14
+ * # 0x25, 0x01, // Logical Maximum (1) 16
+ * # 0x75, 0x01, // Report Size (1) 18
+ * # 0x95, 0x01, // Report Count (1) 20
+ * # 0xa1, 0x00, // Collection (Physical) 22
+ * # 0x05, 0x09, // Usage Page (Button) 24
+ * # 0x09, 0x01, // Usage (Button 1) 26
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 28
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 30
+ * # 0x09, 0x33, // Usage (Touch) 32
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 34
+ * # 0x95, 0x06, // Report Count (6) 36
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 38
+ * # 0xa1, 0x02, // Collection (Logical) 40
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 42
+ * # 0x09, 0x37, // Usage (Dial) 44
+ * # 0x16, 0x00, 0x80, // Logical Minimum (-32768) 46
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 49
+ * # 0x75, 0x10, // Report Size (16) 52
+ * # 0x95, 0x01, // Report Count (1) 54
+ * # 0x81, 0x06, // Input (Data,Var,Rel) 56
+ * # 0x35, 0x00, // Physical Minimum (0) 58
+ * # 0x46, 0x10, 0x0e, // Physical Maximum (3600) 60
+ * # 0x15, 0x00, // Logical Minimum (0) 63
+ * # 0x26, 0x10, 0x0e, // Logical Maximum (3600) 65
+ * # 0x09, 0x48, // Usage (Resolution Multiplier) 68
+ * # 0xb1, 0x02, // Feature (Data,Var,Abs) 70
+ * # 0x45, 0x00, // Physical Maximum (0) 72
+ * # 0xc0, // End Collection 74
+ * # 0x75, 0x08, // Report Size (8) 75
+ * # 0x95, 0x01, // Report Count (1) 77
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 79
+ * # 0x75, 0x08, // Report Size (8) 81
+ * # 0x95, 0x01, // Report Count (1) 83
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 85
+ * # 0x75, 0x08, // Report Size (8) 87
+ * # 0x95, 0x01, // Report Count (1) 89
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 91
+ * # 0x75, 0x08, // Report Size (8) 93
+ * # 0x95, 0x01, // Report Count (1) 95
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 97
+ * # 0x75, 0x08, // Report Size (8) 99
+ * # 0x95, 0x01, // Report Count (1) 101
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 103
+ * # 0xc0, // End Collection 105
+ * # 0xc0, // End Collection 106
+ * # 0xc0, // End Collection 107
+ * R: 108 05 01 09 0e a1 01 85 11 05 0d 09 21 a1 02 15 00 25 01 75 01 95 01 a1 00 05 09 09 01 81 02 05 0d 09 33 81 02 95 06 81 03 a1 02 05 01 09 37 16 00 80 26 ff 7f 75 10 95 01 81 06 35 00 46 10 0e 15 00 26 10 0e 09 48 b1 02 45 00 c0 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 c0 c0 c0
+ *
+ */
+
+#define PAD_REPORT_DESCRIPTOR_LENGTH 135
+#define PUCK_REPORT_DESCRIPTOR_LENGTH 108
+#define VENDOR_REPORT_DESCRIPTOR_LENGTH 18
+#define PAD_KBD_REPORT_ID 3
+#define PAD_CC_REPORT_ID 3 // never sends events
+#define PAD_MOUSE_REPORT_ID 4 // never sends events
+#define PUCK_REPORT_ID 17
+#define VENDOR_REPORT_ID 8
+#define PAD_KBD_REPORT_LENGTH 8
+#define PAD_CC_REPORT_LENGTH 3
+#define PAD_MOUSE_REPORT_LENGTH 7
+#define PUCK_REPORT_LENGTH 9
+#define VENDOR_REPORT_LENGTH 12
+
+__u32 last_button_state;
+
+static const __u8 disabled_rdesc_puck[] = {
+ FixedSizeVendorReport(PUCK_REPORT_LENGTH)
+};
+
+static const __u8 disabled_rdesc_pad[] = {
+ FixedSizeVendorReport(PAD_KBD_REPORT_LENGTH)
+ FixedSizeVendorReport(PAD_CC_REPORT_LENGTH)
+ FixedSizeVendorReport(PAD_MOUSE_REPORT_LENGTH)
+};
+
+static const __u8 fixed_rdesc_vendor[] = {
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // Byte 0
+ // We send our pad events on the vendor report id because why not
+ ReportId(VENDOR_REPORT_ID)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 is a button so we look like a tablet
+ Usage_Dig_BarrelSwitch // BTN_STYLUS, needed so we get to be a tablet pad
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // Padding
+ Input(Const)
+ // Bytes 2/3 - x/y just exist so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Bytes 4-7 are the button state for 19 buttons + pad out to u32
+ // We send the first 10 buttons as buttons 1-10 which is BTN_0 -> BTN_9
+ UsagePage_Button
+ UsageMinimum_i8(1)
+ UsageMaximum_i8(10)
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(10)
+ ReportSize(1)
+ Input(Var|Abs)
+ // We send the other 9 buttons as buttons 0x31 and above -> BTN_A - BTN_TL2
+ UsageMinimum_i8(0x31)
+ UsageMaximum_i8(0x3a)
+ ReportCount(9)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(13)
+ ReportSize(1)
+ Input(Const) // padding
+ // Byte 6 is the wheel
+ UsagePage_GenericDesktop
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ )
+ // Make sure we match our original report length
+ FixedSizeVendorReport(VENDOR_REPORT_LENGTH)
+ )
+};
+
+/* Identical to fixed_rdesc_pad but with different FixedSizeVendorReport */
+static const __u8 fixed_rdesc_pad[] = {
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // Byte 0
+ // We send our pad events on the vendor report id because why not
+ ReportId(VENDOR_REPORT_ID)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 is a button so we look like a tablet
+ Usage_Dig_BarrelSwitch // BTN_STYLUS, needed so we get to be a tablet pad
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // Padding
+ Input(Const)
+ // Bytes 2/3 - x/y just exist so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Bytes 4-7 are the button state for 19 buttons + pad out to u32
+ // We send the first 10 buttons as buttons 1-10 which is BTN_0 -> BTN_9
+ UsagePage_Button
+ UsageMinimum_i8(1)
+ UsageMaximum_i8(10)
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(10)
+ ReportSize(1)
+ Input(Var|Abs)
+ // We send the other 9 buttons as buttons 0x31 and above -> BTN_A - BTN_TL2
+ UsageMinimum_i8(0x31)
+ UsageMaximum_i8(0x3a)
+ ReportCount(9)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(13)
+ ReportSize(1)
+ Input(Const) // padding
+ // Byte 6 is the wheel
+ UsagePage_GenericDesktop
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ )
+ // Make sure we match our original report lengths
+ FixedSizeVendorReport(PAD_KBD_REPORT_LENGTH)
+ FixedSizeVendorReport(PAD_CC_REPORT_LENGTH)
+ FixedSizeVendorReport(PAD_MOUSE_REPORT_LENGTH)
+ )
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(k20_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+ __s32 rdesc_size = hctx->size;
+ __u8 have_fw_id;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* If we have a firmware ID and it matches our expected prefix, we
+ * disable the default pad/puck nodes. They won't send events
+ * but cause duplicate devices.
+ */
+ have_fw_id = __builtin_memcmp(UDEV_PROP_HUION_FIRMWARE_ID,
+ EXPECTED_FIRMWARE_ID,
+ sizeof(EXPECTED_FIRMWARE_ID) - 1) == 0;
+ if (rdesc_size == PAD_REPORT_DESCRIPTOR_LENGTH) {
+ if (have_fw_id) {
+ __builtin_memcpy(data, disabled_rdesc_pad, sizeof(disabled_rdesc_pad));
+ return sizeof(disabled_rdesc_pad);
+ } else {
+ __builtin_memcpy(data, fixed_rdesc_pad, sizeof(fixed_rdesc_pad));
+ return sizeof(fixed_rdesc_pad);
+
+ }
+ }
+ if (rdesc_size == PUCK_REPORT_DESCRIPTOR_LENGTH) {
+ if (have_fw_id) {
+ __builtin_memcpy(data, disabled_rdesc_puck, sizeof(disabled_rdesc_puck));
+ return sizeof(disabled_rdesc_puck);
+ }
+ }
+ /* Always fix the vendor mode so the tablet will work even if nothing sets
+ * the udev property (e.g. huion-switcher run manually)
+ */
+ if (rdesc_size == VENDOR_REPORT_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, fixed_rdesc_vendor, sizeof(fixed_rdesc_vendor));
+ return sizeof(fixed_rdesc_vendor);
+
+ }
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(k20_fix_events, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* Only sent if tablet is in raw mode */
+ if (data[0] == VENDOR_REPORT_ID) {
+ /* See fixed_rdesc_pad */
+ struct pad_report {
+ __u8 report_id;
+ __u8 btn_stylus:1;
+ __u8 pad:7;
+ __u8 x;
+ __u8 y;
+ __u32 buttons;
+ __u8 wheel;
+ } __attribute__((packed)) *pad_report;
+
+ __u8 wheel = 0;
+
+ /* Wheel report */
+ if (data[1] == 0xf1) {
+ if (data[5] == 2)
+ wheel = 0xff;
+ else
+ wheel = data[5];
+ } else {
+ /* data[4..6] are the buttons, mapped correctly */
+ last_button_state = data[4] | (data[5] << 8) | (data[6] << 16);
+ wheel = 0; // wheel
+ }
+
+ pad_report = (struct pad_report *)data;
+ pad_report->report_id = VENDOR_REPORT_ID;
+ pad_report->btn_stylus = 0;
+ pad_report->x = 0;
+ pad_report->y = 0;
+ pad_report->buttons = last_button_state;
+ pad_report->wheel = wheel;
+
+ return sizeof(struct pad_report);
+ }
+
+ if (data[0] == PAD_KBD_REPORT_ID) {
+ const __u8 button_mapping[] = {
+ 0x0e, /* Button 1: K */
+ 0x0a, /* Button 2: G */
+ 0x0f, /* Button 3: L */
+ 0x4c, /* Button 4: Delete */
+ 0x0c, /* Button 5: I */
+ 0x07, /* Button 6: D */
+ 0x05, /* Button 7: B */
+ 0x08, /* Button 8: E */
+ 0x16, /* Button 9: S */
+ 0x1d, /* Button 10: Z */
+ 0x06, /* Button 11: C */
+ 0x19, /* Button 12: V */
+ 0xff, /* Button 13: LeftControl */
+ 0xff, /* Button 14: LeftAlt */
+ 0xff, /* Button 15: LeftShift */
+ 0x28, /* Button 16: Return Enter */
+ 0x2c, /* Button 17: Spacebar */
+ 0x11, /* Button 18: N */
+ };
+ /* See fixed_rdesc_pad */
+ struct pad_report {
+ __u8 report_id;
+ __u8 btn_stylus:1;
+ __u8 pad:7;
+ __u8 x;
+ __u8 y;
+ __u32 buttons;
+ __u8 wheel;
+ } __attribute__((packed)) *pad_report;
+ int i, b;
+ __u8 modifiers = data[1];
+ __u32 buttons = 0;
+
+ if (modifiers & 0x01) { /* Control */
+ buttons |= BIT(12);
+ }
+ if (modifiers & 0x02) { /* Shift */
+ buttons |= BIT(14);
+ }
+ if (modifiers & 0x04) { /* Alt */
+ buttons |= BIT(13);
+ }
+
+ for (i = 2; i < PAD_KBD_REPORT_LENGTH; i++) {
+ if (!data[i])
+ break;
+
+ for (b = 0; b < ARRAY_SIZE(button_mapping); b++) {
+ if (data[i] == button_mapping[b]) {
+ buttons |= BIT(b);
+ break;
+ }
+ }
+ data[i] = 0;
+ }
+
+ pad_report = (struct pad_report *)data;
+ pad_report->report_id = VENDOR_REPORT_ID;
+ pad_report->btn_stylus = 0;
+ pad_report->x = 0;
+ pad_report->y = 0;
+ pad_report->buttons = buttons;
+ // The wheel happens on a different hidraw node but its
+ // values are unreliable (as is the button inside the wheel).
+ // So the wheel is simply always zero, if you want the wheel
+ // to work reliably, use the tablet mode.
+ pad_report->wheel = 0;
+
+ return sizeof(struct pad_report);
+ }
+
+ return 0;
+}
+
+HID_BPF_OPS(keydial_k20) = {
+ .hid_device_event = (void *)k20_fix_events,
+ .hid_rdesc_fixup = (void *)k20_fix_rdesc,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case PAD_REPORT_DESCRIPTOR_LENGTH:
+ case PUCK_REPORT_DESCRIPTOR_LENGTH:
+ case VENDOR_REPORT_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/TUXEDO__Sirius-16-Gen1-and-Gen2.bpf.c b/drivers/hid/bpf/progs/TUXEDO__Sirius-16-Gen1-and-Gen2.bpf.c
new file mode 100644
index 000000000000..a123003fb5fd
--- /dev/null
+++ b/drivers/hid/bpf/progs/TUXEDO__Sirius-16-Gen1-and-Gen2.bpf.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (c) 2025 TUXEDO Computers GmbH
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, 0x048D, 0x8910)
+);
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(ignore_key_fix_event, struct hid_bpf_ctx *hid_ctx)
+{
+ const int expected_length = 37;
+ const int expected_report_id = 1;
+ __u8 *data;
+ int i;
+
+ if (hid_ctx->size < expected_length)
+ return 0;
+
+ data = hid_bpf_get_data(hid_ctx, 0, expected_length);
+ if (!data || data[0] != expected_report_id)
+ return 0;
+
+ // Zero out F13 (HID usage ID: 0x68) key press.
+ // The first 6 parallel key presses (excluding modifier keys) are
+ // encoded in an array containing usage IDs.
+ for (i = 3; i < 9; ++i)
+ if (data[i] == 0x68)
+ data[i] = 0x00;
+ // Additional parallel key presses starting with the 7th (excluding
+ // modifier keys) are encoded as a bit flag with the offset being
+ // the usage ID.
+ data[22] &= 0xfe;
+
+ return 0;
+}
+
+HID_BPF_OPS(ignore_button) = {
+ .hid_device_event = (void *)ignore_key_fix_event,
+};
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c b/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c
new file mode 100644
index 000000000000..1a0aeea6a081
--- /dev/null
+++ b/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Red Hat, Inc
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define HID_BPF_ASYNC_MAX_CTX 1
+#include "hid_bpf_async.h"
+
+#define VID_UGEE 0x28BD
+/* same PID whether connected directly or through the provided dongle: */
+#define PID_ACK05_REMOTE 0x0202
+
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ACK05_REMOTE),
+);
+
+/*
+ * By default, the pad reports the buttons through a set of key sequences.
+ *
+ * The pad reports a classic keyboard report descriptor:
+ * # HANVON UGEE Shortcut Remote
+ * Report descriptor length: 102 bytes
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * 0x09, 0x02, // Usage (Mouse) 2
+ * 0xa1, 0x01, // Collection (Application) 4
+ * 0x85, 0x09, // Report ID (9) 6
+ * 0x09, 0x01, // Usage (Pointer) 8
+ * 0xa1, 0x00, // Collection (Physical) 10
+ * 0x05, 0x09, // Usage Page (Button) 12
+ * 0x19, 0x01, // UsageMinimum (1) 14
+ * 0x29, 0x03, // UsageMaximum (3) 16
+ * 0x15, 0x00, // Logical Minimum (0) 18
+ * 0x25, 0x01, // Logical Maximum (1) 20
+ * 0x95, 0x03, // Report Count (3) 22
+ * 0x75, 0x01, // Report Size (1) 24
+ * 0x81, 0x02, // Input (Data,Var,Abs) 26
+ * 0x95, 0x05, // Report Count (5) 28
+ * 0x81, 0x01, // Input (Cnst,Arr,Abs) 30
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 32
+ * 0x09, 0x30, // Usage (X) 34
+ * 0x09, 0x31, // Usage (Y) 36
+ * 0x26, 0xff, 0x7f, // Logical Maximum (32767) 38
+ * 0x95, 0x02, // Report Count (2) 41
+ * 0x75, 0x10, // Report Size (16) 43
+ * 0x81, 0x02, // Input (Data,Var,Abs) 45
+ * 0x05, 0x0d, // Usage Page (Digitizers) 47
+ * 0x09, 0x30, // Usage (Tip Pressure) 49
+ * 0x26, 0xff, 0x07, // Logical Maximum (2047) 51
+ * 0x95, 0x01, // Report Count (1) 54
+ * 0x75, 0x10, // Report Size (16) 56
+ * 0x81, 0x02, // Input (Data,Var,Abs) 58
+ * 0xc0, // End Collection 60
+ * 0xc0, // End Collection 61
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 62
+ * 0x09, 0x06, // Usage (Keyboard) 64
+ * 0xa1, 0x01, // Collection (Application) 66
+ * 0x85, 0x06, // Report ID (6) 68
+ * 0x05, 0x07, // Usage Page (Keyboard/Keypad) 70
+ * 0x19, 0xe0, // UsageMinimum (224) 72
+ * 0x29, 0xe7, // UsageMaximum (231) 74
+ * 0x15, 0x00, // Logical Minimum (0) 76
+ * 0x25, 0x01, // Logical Maximum (1) 78
+ * 0x75, 0x01, // Report Size (1) 80
+ * 0x95, 0x08, // Report Count (8) 82
+ * 0x81, 0x02, // Input (Data,Var,Abs) 84
+ * 0x05, 0x07, // Usage Page (Keyboard/Keypad) 86
+ * 0x19, 0x00, // UsageMinimum (0) 88
+ * 0x29, 0xff, // UsageMaximum (255) 90
+ * 0x26, 0xff, 0x00, // Logical Maximum (255) 92
+ * 0x75, 0x08, // Report Size (8) 95
+ * 0x95, 0x06, // Report Count (6) 97
+ * 0x81, 0x00, // Input (Data,Arr,Abs) 99
+ * 0xc0, // End Collection 101
+ *
+ * Each button gets assigned the following events:
+ *
+ * Buttons released: 06 00 00 00 00 00 00 00
+ * Button 1: 06 01 12 00 00 00 00 00 -> LControl + o
+ * Button 2: 06 01 11 00 00 00 00 00 -> LControl + n
+ * Button 3: 06 00 3e 00 00 00 00 00 -> F5
+ * Button 4: 06 02 00 00 00 00 00 00 -> LShift
+ * Button 5: 06 01 00 00 00 00 00 00 -> LControl
+ * Button 6: 06 04 00 00 00 00 00 00 -> LAlt
+ * Button 7: 06 01 16 00 00 00 00 00 -> LControl + s
+ * Button 8: 06 01 1d 00 00 00 00 00 -> LControl + z
+ * Button 9: 06 00 2c 00 00 00 00 00 -> Space
+ * Button 10: 06 03 1d 00 00 00 00 00 -> LControl + LShift + z
+ * Wheel: 06 01 57 00 00 00 00 00 -> clockwise rotation (LControl + Keypad Plus)
+ * Wheel: 06 01 56 00 00 00 00 00 -> counter-clockwise rotation
+ * (LControl + Keypad Minus)
+ *
+ * However, multiple buttons can be pressed at the same time, and when this happens,
+ * each button gets assigned a new slot in the Input (Data,Arr,Abs):
+ *
+ * Button 1 + 3: 06 01 12 3e 00 00 00 00 -> LControl + o + F5
+ *
+ * When a modifier is pressed (Button 4, 5, or 6), the assigned key is set to 00:
+ *
+ * Button 5 + 7: 06 01 00 16 00 00 00 00 -> LControl + s
+ *
+ * This is mostly fine, but with Button 8 and Button 10 sharing the same
+ * key value ("z"), there are cases where we can not know which is which.
+ *
+ */
+
+#define PAD_WIRED_DESCRIPTOR_LENGTH 102
+#define PAD_DONGLE_DESCRIPTOR_LENGTH 177
+#define STYLUS_DESCRIPTOR_LENGTH 109
+#define VENDOR_DESCRIPTOR_LENGTH 36
+#define PAD_REPORT_ID 6
+#define RAW_PAD_REPORT_ID 0xf0
+#define RAW_BATTERY_REPORT_ID 0xf2
+#define VENDOR_REPORT_ID 2
+#define PAD_REPORT_LENGTH 8
+#define VENDOR_REPORT_LENGTH 12
+
+__u16 last_button_state;
+
+static const __u8 disabled_rdesc[] = {
+ // Make sure we match our original report length
+ FixedSizeVendorReport(VENDOR_REPORT_LENGTH)
+};
+
+static const __u8 fixed_rdesc_vendor[] = {
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // -- Byte 0 in report
+ ReportId(RAW_PAD_REPORT_ID)
+ // Byte 1 in report - same than report ID
+ ReportCount(1)
+ ReportSize(8)
+ Input(Const) // padding (internal report ID)
+ LogicalMaximum_i8(0)
+ LogicalMaximum_i8(1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 2-3 is the button state
+ UsagePage_Button
+ UsageMinimum_i8(0x01)
+ UsageMaximum_i8(0x0a)
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(10)
+ ReportSize(1)
+ Input(Var|Abs)
+ Usage_i8(0x31) // will be mapped as BTN_A / BTN_SOUTH
+ ReportCount(1)
+ Input(Var|Abs)
+ ReportCount(5) // padding
+ Input(Const)
+ // Byte 4 in report - just exists so we get to be a tablet pad
+ Usage_Dig_BarrelSwitch // BTN_STYLUS
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // padding
+ Input(Const)
+ // Bytes 5/6 in report - just exists so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Byte 7 in report is the dial
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ )
+ // -- Byte 0 in report
+ ReportId(RAW_BATTERY_REPORT_ID)
+ // Byte 1 in report - same than report ID
+ ReportCount(1)
+ ReportSize(8)
+ Input(Const) // padding (internal report ID)
+ // Byte 2 in report - always 0x01
+ Input(Const) // padding (internal report ID)
+ UsagePage_Digitizers
+ /*
+ * We represent the device as a stylus to force the kernel to not
+ * directly query its battery state. Instead the kernel will rely
+ * only on the provided events.
+ */
+ Usage_Dig_Stylus
+ CollectionPhysical(
+ // Byte 3 in report - battery value
+ UsagePage_BatterySystem
+ Usage_BS_AbsoluteStateOfCharge
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(100)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Byte 4 in report - charging state
+ Usage_BS_Charging
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Abs)
+ )
+ )
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(ack05_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+ __s32 rdesc_size = hctx->size;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (rdesc_size == VENDOR_DESCRIPTOR_LENGTH) {
+ /*
+ * The vendor fixed rdesc is appended after the current one,
+ * to keep the output reports working.
+ */
+ __builtin_memcpy(data + rdesc_size, fixed_rdesc_vendor, sizeof(fixed_rdesc_vendor));
+ return sizeof(fixed_rdesc_vendor) + rdesc_size;
+ }
+
+ hid_set_name(hctx->hid, "Disabled by HID-BPF Hanvon Ugee Shortcut Remote");
+
+ __builtin_memcpy(data, disabled_rdesc, sizeof(disabled_rdesc));
+ return sizeof(disabled_rdesc);
+}
+
+static int HID_BPF_ASYNC_FUN(switch_to_raw_mode)(struct hid_bpf_ctx *hid)
+{
+ static __u8 magic_0[32] = {0x02, 0xb0, 0x04, 0x00, 0x00};
+ int err;
+
+ /*
+ * The proprietary driver sends the 3 following packets after the
+ * above one.
+ * These don't seem to have any effect, so we don't send them to save
+ * some processing time.
+ *
+ * static __u8 magic_1[32] = {0x02, 0xb4, 0x01, 0x00, 0x01};
+ * static __u8 magic_2[32] = {0x02, 0xb4, 0x01, 0x00, 0xff};
+ * static __u8 magic_3[32] = {0x02, 0xb8, 0x04, 0x00, 0x00};
+ */
+
+ err = hid_bpf_hw_output_report(hid, magic_0, sizeof(magic_0));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(ack05_fix_events, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, PAD_REPORT_LENGTH);
+ int ret = 0;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] != VENDOR_REPORT_ID)
+ return 0;
+
+ /* reconnect event */
+ if (data[1] == 0xf8 && data[2] == 02 && data[3] == 0x01)
+ HID_BPF_ASYNC_DELAYED_CALL(switch_to_raw_mode, hctx, 10);
+
+ /* button event */
+ if (data[1] == RAW_PAD_REPORT_ID) {
+ data[0] = data[1];
+ if (data[7] == 0x02)
+ data[7] = 0xff;
+ ret = 8;
+ } else if (data[1] == RAW_BATTERY_REPORT_ID) {
+ data[0] = data[1];
+ ret = 5;
+ }
+
+ return ret;
+}
+
+HID_BPF_OPS(xppen_ack05_remote) = {
+ .hid_device_event = (void *)ack05_fix_events,
+ .hid_rdesc_fixup = (void *)ack05_fix_rdesc,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case PAD_WIRED_DESCRIPTOR_LENGTH:
+ case PAD_DONGLE_DESCRIPTOR_LENGTH:
+ case STYLUS_DESCRIPTOR_LENGTH:
+ case VENDOR_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ break;
+ }
+
+ if (ctx->rdesc_size == VENDOR_DESCRIPTOR_LENGTH) {
+ struct hid_bpf_ctx *hctx = hid_bpf_allocate_context(ctx->hid);
+
+ if (!hctx) {
+ ctx->retval = -EINVAL;
+ return 0;
+ }
+
+ ctx->retval = HID_BPF_ASYNC_INIT(switch_to_raw_mode) ||
+ switch_to_raw_mode(hctx);
+
+ hid_bpf_release_context(hctx);
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c b/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c
index a669525691aa..0c7e5cc5dc7e 100644
--- a/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c
+++ b/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c
@@ -10,10 +10,12 @@
#define VID_UGEE 0x28BD /* VID is shared with SinoWealth and Glorious and prob others */
#define PID_ARTIST_PRO14_GEN2 0x095A
#define PID_ARTIST_PRO16_GEN2 0x095B
+#define PID_ARTIST_PRO19_GEN2 0x096A
HID_BPF_CONFIG(
HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ARTIST_PRO14_GEN2),
- HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ARTIST_PRO16_GEN2)
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ARTIST_PRO16_GEN2),
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ARTIST_PRO19_GEN2)
);
/*
@@ -22,7 +24,7 @@ HID_BPF_CONFIG(
* - when the eraser button is pressed and the stylus is touching the tablet,
* the device sends Tip Switch instead of sending Eraser
*
- * This descriptor uses physical dimensions of the 16" device.
+ * This descriptor uses the physical dimensions of the 16" device.
*/
static const __u8 fixed_rdesc[] = {
0x05, 0x0d, // Usage Page (Digitizers) 0
@@ -100,6 +102,12 @@ int BPF_PROG(hid_fix_rdesc_xppen_artistpro16gen2, struct hid_bpf_ctx *hctx)
data[62] = 0x62;
data[73] = 0x1c;
data[72] = 0xfd;
+ } else if (hctx->hid->product == PID_ARTIST_PRO19_GEN2) {
+ /* 19" screen reports 16.101" x 9.057" */
+ data[63] = 0x3e;
+ data[62] = 0xe5;
+ data[73] = 0x23;
+ data[72] = 0x61;
}
return sizeof(fixed_rdesc);
@@ -177,6 +185,27 @@ static const __u16 angle_offsets_vertical_16[128] = {
188, 186, 184, 182, 180, 178, 176, 174, 172
};
+/* 19" inch screen 16.101" x 9.057" */
+static const __u16 angle_offsets_horizontal_19[128] = {
+ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 25, 27, 29, 31, 33, 35, 37, 39, 41,
+ 42, 44, 46, 48, 50, 51, 53, 55, 57, 58, 60, 62, 63, 65, 67, 68, 70, 71, 73, 74, 76,
+ 77, 79, 80, 82, 83, 84, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 98, 99, 100,
+ 101, 102, 103, 104, 104, 105, 106, 106, 107, 108, 108, 109, 109, 110, 110, 111,
+ 111, 112, 112, 112, 112, 113, 113, 113, 113, 113, 113, 113, 113, 113, 113, 113,
+ 113, 113, 112, 112, 112, 112, 111, 111, 110, 110, 109, 109, 108, 108, 107, 106,
+ 106, 105, 104, 104, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94, 93, 92, 90
+};
+static const __u16 angle_offsets_vertical_19[128] = {
+ 0, 4, 7, 11, 14, 18, 21, 25, 28, 32, 35, 38, 42, 45, 49, 52, 56, 59, 62, 66, 69, 72,
+ 75, 79, 82, 85, 88, 91, 95, 98, 101, 104, 107, 110, 113, 116, 118, 121, 124, 127,
+ 129, 132, 135, 137, 140, 142, 145, 147, 150, 152, 154, 157, 159, 161, 163, 165, 167,
+ 169, 171, 173, 174, 176, 178, 179, 181, 183, 184, 185, 187, 188, 189, 190, 192, 193,
+ 194, 195, 195, 196, 197, 198, 198, 199, 199, 200, 200, 201, 201, 201, 201, 201, 201,
+ 201, 201, 201, 201, 201, 200, 200, 199, 199, 198, 198, 197, 196, 195, 195, 194, 193,
+ 192, 190, 189, 188, 187, 185, 184, 183, 181, 179, 178, 176, 174, 173, 171, 169, 167,
+ 165, 163, 161
+};
+
static void compensate_coordinates_by_tilt(__u8 *data, const __u8 idx,
const __s8 tilt, const __u16 (*compensation_table)[128])
{
@@ -241,12 +270,19 @@ static int xppen_16_fix_angle_offset(struct hid_bpf_ctx *hctx)
__s8 tilt_x = (__s8) data[8];
__s8 tilt_y = (__s8) data[9];
- if (hctx->hid->product == PID_ARTIST_PRO14_GEN2) {
+ switch (hctx->hid->product) {
+ case PID_ARTIST_PRO14_GEN2:
compensate_coordinates_by_tilt(data, 2, tilt_x, &angle_offsets_horizontal_14);
compensate_coordinates_by_tilt(data, 4, tilt_y, &angle_offsets_vertical_14);
- } else if (hctx->hid->product == PID_ARTIST_PRO16_GEN2) {
+ break;
+ case PID_ARTIST_PRO16_GEN2:
compensate_coordinates_by_tilt(data, 2, tilt_x, &angle_offsets_horizontal_16);
compensate_coordinates_by_tilt(data, 4, tilt_y, &angle_offsets_vertical_16);
+ break;
+ case PID_ARTIST_PRO19_GEN2:
+ compensate_coordinates_by_tilt(data, 2, tilt_x, &angle_offsets_horizontal_19);
+ compensate_coordinates_by_tilt(data, 4, tilt_y, &angle_offsets_vertical_19);
+ break;
}
return 0;
diff --git a/drivers/hid/bpf/progs/hid_bpf_async.h b/drivers/hid/bpf/progs/hid_bpf_async.h
new file mode 100644
index 000000000000..9ab585434239
--- /dev/null
+++ b/drivers/hid/bpf/progs/hid_bpf_async.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (c) 2024 Benjamin Tissoires
+ */
+
+#ifndef __HID_BPF_ASYNC_H__
+#define __HID_BPF_ASYNC_H__
+
+#ifndef HID_BPF_ASYNC_MAX_CTX
+#error "HID_BPF_ASYNC_MAX_CTX should be set to the maximum number of concurrent async functions"
+#endif /* HID_BPF_ASYNC_MAX_CTX */
+
+#define CLOCK_MONOTONIC 1
+
+typedef int (*hid_bpf_async_callback_t)(void *map, int *key, void *value);
+
+enum hid_bpf_async_state {
+ HID_BPF_ASYNC_STATE_UNSET = 0,
+ HID_BPF_ASYNC_STATE_INITIALIZING,
+ HID_BPF_ASYNC_STATE_INITIALIZED,
+ HID_BPF_ASYNC_STATE_STARTING,
+ HID_BPF_ASYNC_STATE_RUNNING,
+};
+
+struct hid_bpf_async_map_elem {
+ struct bpf_spin_lock lock;
+ enum hid_bpf_async_state state;
+ struct bpf_timer t;
+ struct bpf_wq wq;
+ u32 hid;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, HID_BPF_ASYNC_MAX_CTX);
+ __type(key, u32);
+ __type(value, struct hid_bpf_async_map_elem);
+} hid_bpf_async_ctx_map SEC(".maps");
+
+/**
+ * HID_BPF_ASYNC_CB: macro to define an async callback used in a bpf_wq
+ *
+ * The caller is responsible for allocating a key in the async map
+ * with hid_bpf_async_get_ctx().
+ */
+#define HID_BPF_ASYNC_CB(cb) \
+cb(void *map, int *key, void *value); \
+static __always_inline int \
+____##cb(struct hid_bpf_ctx *ctx); \
+typeof(cb(0, 0, 0)) cb(void *map, int *key, void *value) \
+{ \
+ struct hid_bpf_async_map_elem *e; \
+ struct hid_bpf_ctx *ctx; \
+ \
+ e = (struct hid_bpf_async_map_elem *)value; \
+ ctx = hid_bpf_allocate_context(e->hid); \
+ if (!ctx) \
+ return 0; /* EPERM check */ \
+ \
+ e->state = HID_BPF_ASYNC_STATE_RUNNING; \
+ \
+ ____##cb(ctx); \
+ \
+ e->state = HID_BPF_ASYNC_STATE_INITIALIZED; \
+ hid_bpf_release_context(ctx); \
+ return 0; \
+} \
+static __always_inline int \
+____##cb
+
+/**
+ * ASYNC: macro to automatically handle async callbacks contexts
+ *
+ * Needs to be used in conjunction with HID_BPF_ASYNC_INIT and HID_BPF_ASYNC_DELAYED_CALL
+ */
+#define HID_BPF_ASYNC_FUN(fun) \
+fun(struct hid_bpf_ctx *ctx); \
+int ____key__##fun; \
+static int ____async_init_##fun(void) \
+{ \
+ ____key__##fun = hid_bpf_async_get_ctx(); \
+ if (____key__##fun < 0) \
+ return ____key__##fun; \
+ return 0; \
+} \
+static int HID_BPF_ASYNC_CB(____##fun##_cb)(struct hid_bpf_ctx *hctx) \
+{ \
+ return fun(hctx); \
+} \
+typeof(fun(0)) fun
+
+#define HID_BPF_ASYNC_INIT(fun) ____async_init_##fun()
+#define HID_BPF_ASYNC_DELAYED_CALL(fun, ctx, delay) \
+ hid_bpf_async_delayed_call(ctx, delay, ____key__##fun, ____##fun##_cb)
+
+/*
+ * internal cb for starting the delayed work callback in a workqueue.
+ */
+static int __start_wq_timer_cb(void *map, int *key, void *value)
+{
+ struct hid_bpf_async_map_elem *e = (struct hid_bpf_async_map_elem *)value;
+
+ bpf_wq_start(&e->wq, 0);
+
+ return 0;
+}
+
+static int hid_bpf_async_find_empty_key(void)
+{
+ int i;
+
+ bpf_for(i, 0, HID_BPF_ASYNC_MAX_CTX) {
+ struct hid_bpf_async_map_elem *elem;
+ int key = i;
+
+ elem = bpf_map_lookup_elem(&hid_bpf_async_ctx_map, &key);
+ if (!elem)
+ return -ENOMEM; /* should never happen */
+
+ bpf_spin_lock(&elem->lock);
+
+ if (elem->state == HID_BPF_ASYNC_STATE_UNSET) {
+ elem->state = HID_BPF_ASYNC_STATE_INITIALIZING;
+ bpf_spin_unlock(&elem->lock);
+ return i;
+ }
+
+ bpf_spin_unlock(&elem->lock);
+ }
+
+ return -EINVAL;
+}
+
+static int hid_bpf_async_get_ctx(void)
+{
+ int key = hid_bpf_async_find_empty_key();
+ struct hid_bpf_async_map_elem *elem;
+ int err;
+
+ if (key < 0)
+ return key;
+
+ elem = bpf_map_lookup_elem(&hid_bpf_async_ctx_map, &key);
+ if (!elem)
+ return -EINVAL;
+
+ err = bpf_timer_init(&elem->t, &hid_bpf_async_ctx_map, CLOCK_MONOTONIC);
+ if (err)
+ return err;
+
+ err = bpf_timer_set_callback(&elem->t, __start_wq_timer_cb);
+ if (err)
+ return err;
+
+ err = bpf_wq_init(&elem->wq, &hid_bpf_async_ctx_map, 0);
+ if (err)
+ return err;
+
+ elem->state = HID_BPF_ASYNC_STATE_INITIALIZED;
+
+ return key;
+}
+
+static inline u64 ms_to_ns(u64 milliseconds)
+{
+ return (u64)milliseconds * 1000UL * 1000UL;
+}
+
+static int hid_bpf_async_delayed_call(struct hid_bpf_ctx *hctx, u64 milliseconds, int key,
+ hid_bpf_async_callback_t wq_cb)
+{
+ struct hid_bpf_async_map_elem *elem;
+ int err;
+
+ elem = bpf_map_lookup_elem(&hid_bpf_async_ctx_map, &key);
+ if (!elem)
+ return -EINVAL;
+
+ bpf_spin_lock(&elem->lock);
+ /* The wq must be:
+ * - HID_BPF_ASYNC_STATE_INITIALIZED -> it's been initialized and ready to be called
+ * - HID_BPF_ASYNC_STATE_RUNNING -> possible re-entry from the wq itself
+ */
+ if (elem->state != HID_BPF_ASYNC_STATE_INITIALIZED &&
+ elem->state != HID_BPF_ASYNC_STATE_RUNNING) {
+ bpf_spin_unlock(&elem->lock);
+ return -EINVAL;
+ }
+ elem->state = HID_BPF_ASYNC_STATE_STARTING;
+ bpf_spin_unlock(&elem->lock);
+
+ elem->hid = hctx->hid->id;
+
+ err = bpf_wq_set_callback(&elem->wq, wq_cb, 0);
+ if (err)
+ return err;
+
+ if (milliseconds) {
+ /* needed for every call because a cancel might unset this */
+ err = bpf_timer_set_callback(&elem->t, __start_wq_timer_cb);
+ if (err)
+ return err;
+
+ err = bpf_timer_start(&elem->t, ms_to_ns(milliseconds), 0);
+ if (err)
+ return err;
+
+ return 0;
+ }
+
+ return bpf_wq_start(&elem->wq, 0);
+}
+
+static inline int hid_bpf_async_call(struct hid_bpf_ctx *ctx, int key,
+ hid_bpf_async_callback_t wq_cb)
+{
+ return hid_bpf_async_delayed_call(ctx, 0, key, wq_cb);
+}
+
+#endif /* __HID_BPF_ASYNC_H__ */
diff --git a/drivers/hid/bpf/progs/hid_bpf_helpers.h b/drivers/hid/bpf/progs/hid_bpf_helpers.h
index 3ba24d125a08..bf19785a6b06 100644
--- a/drivers/hid/bpf/progs/hid_bpf_helpers.h
+++ b/drivers/hid/bpf/progs/hid_bpf_helpers.h
@@ -19,6 +19,25 @@ extern int hid_bpf_hw_request(struct hid_bpf_ctx *ctx,
size_t buf__sz,
enum hid_report_type type,
enum hid_class_request reqtype) __ksym;
+extern int hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx,
+ __u8 *buf, size_t buf__sz) __weak __ksym;
+extern int hid_bpf_input_report(struct hid_bpf_ctx *ctx,
+ enum hid_report_type type,
+ __u8 *data,
+ size_t buf__sz) __weak __ksym;
+extern int hid_bpf_try_input_report(struct hid_bpf_ctx *ctx,
+ enum hid_report_type type,
+ __u8 *data,
+ size_t buf__sz) __weak __ksym;
+
+/* bpf_wq implementation */
+extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
+extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
+extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
+ int (callback_fn)(void *map, int *key, void *value),
+ unsigned int flags__k, void *aux__ign) __ksym;
+#define bpf_wq_set_callback(wq, cb, flags) \
+ bpf_wq_set_callback_impl(wq, cb, flags, NULL)
#define HID_MAX_DESCRIPTOR_SIZE 4096
#define HID_IGNORE_EVENT -1
diff --git a/drivers/hid/hid-appletb-bl.c b/drivers/hid/hid-appletb-bl.c
new file mode 100644
index 000000000000..bad2aead8780
--- /dev/null
+++ b/drivers/hid/hid-appletb-bl.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple Touch Bar Backlight Driver
+ *
+ * Copyright (c) 2017-2018 Ronald Tschalär
+ * Copyright (c) 2022-2023 Kerem Karabay <kekrby@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/hid.h>
+#include <linux/backlight.h>
+#include <linux/device.h>
+
+#include "hid-ids.h"
+
+#define APPLETB_BL_ON 1
+#define APPLETB_BL_DIM 3
+#define APPLETB_BL_OFF 4
+
+#define HID_UP_APPLEVENDOR_TB_BL 0xff120000
+
+#define HID_VD_APPLE_TB_BRIGHTNESS 0xff120001
+#define HID_USAGE_AUX1 0xff120020
+#define HID_USAGE_BRIGHTNESS 0xff120021
+
+static int appletb_bl_def_brightness = 2;
+module_param_named(brightness, appletb_bl_def_brightness, int, 0444);
+MODULE_PARM_DESC(brightness, "Default brightness:\n"
+ " 0 - Touchbar is off\n"
+ " 1 - Dim brightness\n"
+ " [2] - Full brightness");
+
+struct appletb_bl {
+ struct hid_field *aux1_field, *brightness_field;
+ struct backlight_device *bdev;
+
+ bool full_on;
+};
+
+static const u8 appletb_bl_brightness_map[] = {
+ APPLETB_BL_OFF,
+ APPLETB_BL_DIM,
+ APPLETB_BL_ON,
+};
+
+static int appletb_bl_set_brightness(struct appletb_bl *bl, u8 brightness)
+{
+ struct hid_report *report = bl->brightness_field->report;
+ struct hid_device *hdev = report->device;
+ int ret;
+
+ ret = hid_set_field(bl->aux1_field, 0, 1);
+ if (ret) {
+ hid_err(hdev, "Failed to set auxiliary field (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = hid_set_field(bl->brightness_field, 0, brightness);
+ if (ret) {
+ hid_err(hdev, "Failed to set brightness field (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ if (!bl->full_on) {
+ ret = hid_hw_power(hdev, PM_HINT_FULLON);
+ if (ret < 0) {
+ hid_err(hdev, "Device didn't power on (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ bl->full_on = true;
+ }
+
+ hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
+
+ if (brightness == APPLETB_BL_OFF) {
+ hid_hw_power(hdev, PM_HINT_NORMAL);
+ bl->full_on = false;
+ }
+
+ return 0;
+}
+
+static int appletb_bl_update_status(struct backlight_device *bdev)
+{
+ struct appletb_bl *bl = bl_get_data(bdev);
+ u8 brightness;
+
+ if (backlight_is_blank(bdev))
+ brightness = APPLETB_BL_OFF;
+ else
+ brightness = appletb_bl_brightness_map[backlight_get_brightness(bdev)];
+
+ return appletb_bl_set_brightness(bl, brightness);
+}
+
+static const struct backlight_ops appletb_bl_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = appletb_bl_update_status,
+};
+
+static int appletb_bl_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct hid_field *aux1_field, *brightness_field;
+ struct backlight_properties bl_props = { 0 };
+ struct device *dev = &hdev->dev;
+ struct appletb_bl *bl;
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "HID parse failed\n");
+
+ aux1_field = hid_find_field(hdev, HID_FEATURE_REPORT,
+ HID_VD_APPLE_TB_BRIGHTNESS, HID_USAGE_AUX1);
+
+ brightness_field = hid_find_field(hdev, HID_FEATURE_REPORT,
+ HID_VD_APPLE_TB_BRIGHTNESS, HID_USAGE_BRIGHTNESS);
+
+ if (!aux1_field || !brightness_field)
+ return -ENODEV;
+
+ if (aux1_field->report != brightness_field->report)
+ return dev_err_probe(dev, -ENODEV, "Encountered unexpected report structure\n");
+
+ bl = devm_kzalloc(dev, sizeof(*bl), GFP_KERNEL);
+ if (!bl)
+ return -ENOMEM;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DRIVER);
+ if (ret)
+ return dev_err_probe(dev, ret, "HID hardware start failed\n");
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ dev_err_probe(dev, ret, "HID hardware open failed\n");
+ goto stop_hw;
+ }
+
+ bl->aux1_field = aux1_field;
+ bl->brightness_field = brightness_field;
+
+ ret = appletb_bl_set_brightness(bl,
+ appletb_bl_brightness_map[(appletb_bl_def_brightness > 2) ? 2 : appletb_bl_def_brightness]);
+
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to set default touch bar brightness to %d\n",
+ appletb_bl_def_brightness);
+ goto close_hw;
+ }
+
+ bl_props.type = BACKLIGHT_RAW;
+ bl_props.max_brightness = ARRAY_SIZE(appletb_bl_brightness_map) - 1;
+
+ bl->bdev = devm_backlight_device_register(dev, "appletb_backlight", dev, bl,
+ &appletb_bl_backlight_ops, &bl_props);
+ if (IS_ERR(bl->bdev)) {
+ ret = PTR_ERR(bl->bdev);
+ dev_err_probe(dev, ret, "Failed to register backlight device\n");
+ goto close_hw;
+ }
+
+ hid_set_drvdata(hdev, bl);
+
+ return 0;
+
+close_hw:
+ hid_hw_close(hdev);
+stop_hw:
+ hid_hw_stop(hdev);
+
+ return ret;
+}
+
+static void appletb_bl_remove(struct hid_device *hdev)
+{
+ struct appletb_bl *bl = hid_get_drvdata(hdev);
+
+ appletb_bl_set_brightness(bl, APPLETB_BL_OFF);
+
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id appletb_bl_hid_ids[] = {
+ /* MacBook Pro's 2018, 2019, with T2 chip: iBridge DFR Brightness */
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, appletb_bl_hid_ids);
+
+static struct hid_driver appletb_bl_hid_driver = {
+ .name = "hid-appletb-bl",
+ .id_table = appletb_bl_hid_ids,
+ .probe = appletb_bl_probe,
+ .remove = appletb_bl_remove,
+};
+module_hid_driver(appletb_bl_hid_driver);
+
+MODULE_AUTHOR("Ronald Tschalär");
+MODULE_AUTHOR("Kerem Karabay <kekrby@gmail.com>");
+MODULE_DESCRIPTION("MacBook Pro Touch Bar Backlight driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-appletb-kbd.c b/drivers/hid/hid-appletb-kbd.c
new file mode 100644
index 000000000000..d4b95aa3eecb
--- /dev/null
+++ b/drivers/hid/hid-appletb-kbd.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple Touch Bar Keyboard Mode Driver
+ *
+ * Copyright (c) 2017-2018 Ronald Tschalär
+ * Copyright (c) 2022-2023 Kerem Karabay <kekrby@gmail.com>
+ * Copyright (c) 2024-2025 Aditya Garg <gargaditya08@live.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/hid.h>
+#include <linux/usb.h>
+#include <linux/input.h>
+#include <linux/sysfs.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/backlight.h>
+#include <linux/timer.h>
+#include <linux/input/sparse-keymap.h>
+
+#include "hid-ids.h"
+
+#define APPLETB_KBD_MODE_ESC 0
+#define APPLETB_KBD_MODE_FN 1
+#define APPLETB_KBD_MODE_SPCL 2
+#define APPLETB_KBD_MODE_OFF 3
+#define APPLETB_KBD_MODE_MAX APPLETB_KBD_MODE_OFF
+
+#define APPLETB_DEVID_KEYBOARD 1
+#define APPLETB_DEVID_TRACKPAD 2
+
+#define HID_USAGE_MODE 0x00ff0004
+
+static int appletb_tb_def_mode = APPLETB_KBD_MODE_SPCL;
+module_param_named(mode, appletb_tb_def_mode, int, 0444);
+MODULE_PARM_DESC(mode, "Default touchbar mode:\n"
+ " 0 - escape key only\n"
+ " 1 - function-keys\n"
+ " [2] - special keys");
+
+static bool appletb_tb_fn_toggle = true;
+module_param_named(fntoggle, appletb_tb_fn_toggle, bool, 0644);
+MODULE_PARM_DESC(fntoggle, "Switch between Fn and media controls on pressing Fn key");
+
+static bool appletb_tb_autodim = true;
+module_param_named(autodim, appletb_tb_autodim, bool, 0644);
+MODULE_PARM_DESC(autodim, "Automatically dim and turn off the Touch Bar after some time");
+
+static int appletb_tb_dim_timeout = 60;
+module_param_named(dim_timeout, appletb_tb_dim_timeout, int, 0644);
+MODULE_PARM_DESC(dim_timeout, "Dim timeout in sec");
+
+static int appletb_tb_idle_timeout = 15;
+module_param_named(idle_timeout, appletb_tb_idle_timeout, int, 0644);
+MODULE_PARM_DESC(idle_timeout, "Idle timeout in sec");
+
+struct appletb_kbd {
+ struct hid_field *mode_field;
+ struct input_handler inp_handler;
+ struct input_handle kbd_handle;
+ struct input_handle tpd_handle;
+ struct backlight_device *backlight_dev;
+ struct timer_list inactivity_timer;
+ bool has_dimmed;
+ bool has_turned_off;
+ u8 saved_mode;
+ u8 current_mode;
+};
+
+static const struct key_entry appletb_kbd_keymap[] = {
+ { KE_KEY, KEY_ESC, { KEY_ESC } },
+ { KE_KEY, KEY_F1, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, KEY_F2, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, KEY_F3, { KEY_RESERVED } },
+ { KE_KEY, KEY_F4, { KEY_RESERVED } },
+ { KE_KEY, KEY_F5, { KEY_KBDILLUMDOWN } },
+ { KE_KEY, KEY_F6, { KEY_KBDILLUMUP } },
+ { KE_KEY, KEY_F7, { KEY_PREVIOUSSONG } },
+ { KE_KEY, KEY_F8, { KEY_PLAYPAUSE } },
+ { KE_KEY, KEY_F9, { KEY_NEXTSONG } },
+ { KE_KEY, KEY_F10, { KEY_MUTE } },
+ { KE_KEY, KEY_F11, { KEY_VOLUMEDOWN } },
+ { KE_KEY, KEY_F12, { KEY_VOLUMEUP } },
+ { KE_END, 0 }
+};
+
+static int appletb_kbd_set_mode(struct appletb_kbd *kbd, u8 mode)
+{
+ struct hid_report *report = kbd->mode_field->report;
+ struct hid_device *hdev = report->device;
+ int ret;
+
+ ret = hid_hw_power(hdev, PM_HINT_FULLON);
+ if (ret) {
+ hid_err(hdev, "Device didn't resume (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = hid_set_field(kbd->mode_field, 0, mode);
+ if (ret) {
+ hid_err(hdev, "Failed to set mode field to %u (%pe)\n", mode, ERR_PTR(ret));
+ goto power_normal;
+ }
+
+ hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
+
+ kbd->current_mode = mode;
+
+power_normal:
+ hid_hw_power(hdev, PM_HINT_NORMAL);
+
+ return ret;
+}
+
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct appletb_kbd *kbd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", kbd->current_mode);
+}
+
+static ssize_t mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct appletb_kbd *kbd = dev_get_drvdata(dev);
+ u8 mode;
+ int ret;
+
+ ret = kstrtou8(buf, 0, &mode);
+ if (ret)
+ return ret;
+
+ if (mode > APPLETB_KBD_MODE_MAX)
+ return -EINVAL;
+
+ ret = appletb_kbd_set_mode(kbd, mode);
+
+ return ret < 0 ? ret : size;
+}
+static DEVICE_ATTR_RW(mode);
+
+static struct attribute *appletb_kbd_attrs[] = {
+ &dev_attr_mode.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(appletb_kbd);
+
+static int appletb_tb_key_to_slot(unsigned int code)
+{
+ switch (code) {
+ case KEY_ESC:
+ return 0;
+ case KEY_F1 ... KEY_F10:
+ return code - KEY_F1 + 1;
+ case KEY_F11 ... KEY_F12:
+ return code - KEY_F11 + 11;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void appletb_inactivity_timer(struct timer_list *t)
+{
+ struct appletb_kbd *kbd = from_timer(kbd, t, inactivity_timer);
+
+ if (kbd->backlight_dev && appletb_tb_autodim) {
+ if (!kbd->has_dimmed) {
+ backlight_device_set_brightness(kbd->backlight_dev, 1);
+ kbd->has_dimmed = true;
+ mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_idle_timeout * 1000));
+ } else if (!kbd->has_turned_off) {
+ backlight_device_set_brightness(kbd->backlight_dev, 0);
+ kbd->has_turned_off = true;
+ }
+ }
+}
+
+static void reset_inactivity_timer(struct appletb_kbd *kbd)
+{
+ if (kbd->backlight_dev && appletb_tb_autodim) {
+ if (kbd->has_dimmed || kbd->has_turned_off) {
+ backlight_device_set_brightness(kbd->backlight_dev, 2);
+ kbd->has_dimmed = false;
+ kbd->has_turned_off = false;
+ }
+ mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000));
+ }
+}
+
+static int appletb_kbd_hid_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct appletb_kbd *kbd = hid_get_drvdata(hdev);
+ struct key_entry *translation;
+ struct input_dev *input;
+ int slot;
+
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_KEYBOARD || usage->type != EV_KEY)
+ return 0;
+
+ input = field->hidinput->input;
+
+ /*
+ * Skip non-touch-bar keys.
+ *
+ * Either the touch bar itself or usbhid generate a slew of key-down
+ * events for all the meta keys. None of which we're at all interested
+ * in.
+ */
+ slot = appletb_tb_key_to_slot(usage->code);
+ if (slot < 0)
+ return 0;
+
+ reset_inactivity_timer(kbd);
+
+ translation = sparse_keymap_entry_from_scancode(input, usage->code);
+
+ if (translation && kbd->current_mode == APPLETB_KBD_MODE_SPCL) {
+ input_event(input, usage->type, translation->keycode, value);
+
+ return 1;
+ }
+
+ return kbd->current_mode == APPLETB_KBD_MODE_OFF;
+}
+
+static void appletb_kbd_inp_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ struct appletb_kbd *kbd = handle->private;
+
+ reset_inactivity_timer(kbd);
+
+ if (type == EV_KEY && code == KEY_FN && appletb_tb_fn_toggle &&
+ (kbd->current_mode == APPLETB_KBD_MODE_SPCL ||
+ kbd->current_mode == APPLETB_KBD_MODE_FN)) {
+ if (value == 1) {
+ kbd->saved_mode = kbd->current_mode;
+ appletb_kbd_set_mode(kbd, kbd->current_mode == APPLETB_KBD_MODE_SPCL
+ ? APPLETB_KBD_MODE_FN : APPLETB_KBD_MODE_SPCL);
+ } else if (value == 0) {
+ if (kbd->saved_mode != kbd->current_mode)
+ appletb_kbd_set_mode(kbd, kbd->saved_mode);
+ }
+ }
+}
+
+static int appletb_kbd_inp_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ struct appletb_kbd *kbd = handler->private;
+ struct input_handle *handle;
+ int rc;
+
+ if (id->driver_info == APPLETB_DEVID_KEYBOARD) {
+ handle = &kbd->kbd_handle;
+ handle->name = "tbkbd";
+ } else if (id->driver_info == APPLETB_DEVID_TRACKPAD) {
+ handle = &kbd->tpd_handle;
+ handle->name = "tbtpd";
+ } else {
+ return -ENOENT;
+ }
+
+ if (handle->dev)
+ return -EEXIST;
+
+ handle->open = 0;
+ handle->dev = input_get_device(dev);
+ handle->handler = handler;
+ handle->private = kbd;
+
+ rc = input_register_handle(handle);
+ if (rc)
+ goto err_free_dev;
+
+ rc = input_open_device(handle);
+ if (rc)
+ goto err_unregister_handle;
+
+ return 0;
+
+ err_unregister_handle:
+ input_unregister_handle(handle);
+ err_free_dev:
+ input_put_device(handle->dev);
+ handle->dev = NULL;
+ return rc;
+}
+
+static void appletb_kbd_inp_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+
+ input_put_device(handle->dev);
+ handle->dev = NULL;
+}
+
+static int appletb_kbd_input_configured(struct hid_device *hdev, struct hid_input *hidinput)
+{
+ int idx;
+ struct input_dev *input = hidinput->input;
+
+ /*
+ * Clear various input capabilities that are blindly set by the hid
+ * driver (usbkbd.c)
+ */
+ memset(input->evbit, 0, sizeof(input->evbit));
+ memset(input->keybit, 0, sizeof(input->keybit));
+ memset(input->ledbit, 0, sizeof(input->ledbit));
+
+ __set_bit(EV_REP, input->evbit);
+
+ sparse_keymap_setup(input, appletb_kbd_keymap, NULL);
+
+ for (idx = 0; appletb_kbd_keymap[idx].type != KE_END; idx++)
+ input_set_capability(input, EV_KEY, appletb_kbd_keymap[idx].code);
+
+ return 0;
+}
+
+static const struct input_device_id appletb_kbd_input_devices[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_BUS |
+ INPUT_DEVICE_ID_MATCH_VENDOR |
+ INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .bustype = BUS_USB,
+ .vendor = USB_VENDOR_ID_APPLE,
+ .keybit = { [BIT_WORD(KEY_FN)] = BIT_MASK(KEY_FN) },
+ .driver_info = APPLETB_DEVID_KEYBOARD,
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_BUS |
+ INPUT_DEVICE_ID_MATCH_VENDOR |
+ INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .bustype = BUS_USB,
+ .vendor = USB_VENDOR_ID_APPLE,
+ .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+ .driver_info = APPLETB_DEVID_TRACKPAD,
+ },
+ { }
+};
+
+static bool appletb_kbd_match_internal_device(struct input_handler *handler,
+ struct input_dev *inp_dev)
+{
+ struct device *dev = &inp_dev->dev;
+
+ /* in kernel: dev && !is_usb_device(dev) */
+ while (dev && !(dev->type && dev->type->name &&
+ !strcmp(dev->type->name, "usb_device")))
+ dev = dev->parent;
+
+ /*
+ * Apple labels all their internal keyboards and trackpads as such,
+ * instead of maintaining an ever expanding list of product-id's we
+ * just look at the device's product name.
+ */
+ if (dev)
+ return !!strstr(to_usb_device(dev)->product, "Internal Keyboard");
+
+ return false;
+}
+
+static int appletb_kbd_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct appletb_kbd *kbd;
+ struct device *dev = &hdev->dev;
+ struct hid_field *mode_field;
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "HID parse failed\n");
+
+ mode_field = hid_find_field(hdev, HID_OUTPUT_REPORT,
+ HID_GD_KEYBOARD, HID_USAGE_MODE);
+ if (!mode_field)
+ return -ENODEV;
+
+ kbd = devm_kzalloc(dev, sizeof(*kbd), GFP_KERNEL);
+ if (!kbd)
+ return -ENOMEM;
+
+ kbd->mode_field = mode_field;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDINPUT);
+ if (ret)
+ return dev_err_probe(dev, ret, "HID hw start failed\n");
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ dev_err_probe(dev, ret, "HID hw open failed\n");
+ goto stop_hw;
+ }
+
+ kbd->backlight_dev = backlight_device_get_by_name("appletb_backlight");
+ if (!kbd->backlight_dev) {
+ dev_err_probe(dev, -ENODEV, "Failed to get backlight device\n");
+ } else {
+ backlight_device_set_brightness(kbd->backlight_dev, 2);
+ timer_setup(&kbd->inactivity_timer, appletb_inactivity_timer, 0);
+ mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000));
+ }
+
+ kbd->inp_handler.event = appletb_kbd_inp_event;
+ kbd->inp_handler.connect = appletb_kbd_inp_connect;
+ kbd->inp_handler.disconnect = appletb_kbd_inp_disconnect;
+ kbd->inp_handler.name = "appletb";
+ kbd->inp_handler.id_table = appletb_kbd_input_devices;
+ kbd->inp_handler.match = appletb_kbd_match_internal_device;
+ kbd->inp_handler.private = kbd;
+
+ ret = input_register_handler(&kbd->inp_handler);
+ if (ret) {
+ dev_err_probe(dev, ret, "Unable to register keyboard handler\n");
+ goto close_hw;
+ }
+
+ ret = appletb_kbd_set_mode(kbd, appletb_tb_def_mode);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to set touchbar mode\n");
+ goto close_hw;
+ }
+
+ hid_set_drvdata(hdev, kbd);
+
+ return 0;
+
+close_hw:
+ hid_hw_close(hdev);
+stop_hw:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static void appletb_kbd_remove(struct hid_device *hdev)
+{
+ struct appletb_kbd *kbd = hid_get_drvdata(hdev);
+
+ appletb_kbd_set_mode(kbd, APPLETB_KBD_MODE_OFF);
+
+ input_unregister_handler(&kbd->inp_handler);
+ del_timer_sync(&kbd->inactivity_timer);
+
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
+#ifdef CONFIG_PM
+static int appletb_kbd_suspend(struct hid_device *hdev, pm_message_t msg)
+{
+ struct appletb_kbd *kbd = hid_get_drvdata(hdev);
+
+ kbd->saved_mode = kbd->current_mode;
+ appletb_kbd_set_mode(kbd, APPLETB_KBD_MODE_OFF);
+
+ return 0;
+}
+
+static int appletb_kbd_reset_resume(struct hid_device *hdev)
+{
+ struct appletb_kbd *kbd = hid_get_drvdata(hdev);
+
+ appletb_kbd_set_mode(kbd, kbd->saved_mode);
+
+ return 0;
+}
+#endif
+
+static const struct hid_device_id appletb_kbd_hid_ids[] = {
+ /* MacBook Pro's 2018, 2019, with T2 chip: iBridge Display */
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, appletb_kbd_hid_ids);
+
+static struct hid_driver appletb_kbd_hid_driver = {
+ .name = "hid-appletb-kbd",
+ .id_table = appletb_kbd_hid_ids,
+ .probe = appletb_kbd_probe,
+ .remove = appletb_kbd_remove,
+ .event = appletb_kbd_hid_event,
+ .input_configured = appletb_kbd_input_configured,
+#ifdef CONFIG_PM
+ .suspend = appletb_kbd_suspend,
+ .reset_resume = appletb_kbd_reset_resume,
+#endif
+ .driver.dev_groups = appletb_kbd_groups,
+};
+module_hid_driver(appletb_kbd_hid_driver);
+
+/* The backlight driver should be loaded before the keyboard driver is initialised */
+MODULE_SOFTDEP("pre: hid_appletb_bl");
+
+MODULE_AUTHOR("Ronald Tschalär");
+MODULE_AUTHOR("Kerem Karabay <kekrby@gmail.com>");
+MODULE_AUTHOR("Aditya Garg <gargaditya08@live.com>");
+MODULE_DESCRIPTION("MacBook Pro Touch Bar Keyboard Mode driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 4497b50799db..4741ff626771 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -657,7 +657,11 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
break;
default:
- hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
+ if (item->tag >= HID_MAIN_ITEM_TAG_RESERVED_MIN &&
+ item->tag <= HID_MAIN_ITEM_TAG_RESERVED_MAX)
+ hid_warn(parser->device, "reserved main item tag 0x%x\n", item->tag);
+ else
+ hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
ret = 0;
}
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index eb6fd2dc75d0..4c1ccf7a267a 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -22,7 +22,6 @@
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
-#include <linux/pm_wakeup.h>
#include <linux/unaligned.h>
#include "hid-ids.h"
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7e400624908e..288a2b864cc4 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -190,6 +190,12 @@
#define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
#define USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY 0x8302
+#define USB_VENDOR_ID_ASETEK 0x2433
+#define USB_DEVICE_ID_ASETEK_INVICTA 0xf300
+#define USB_DEVICE_ID_ASETEK_FORTE 0xf301
+#define USB_DEVICE_ID_ASETEK_LA_PRIMA 0xf303
+#define USB_DEVICE_ID_ASETEK_TONY_KANAAN 0xf306
+
#define USB_VENDOR_ID_ASUS 0x0486
#define USB_DEVICE_ID_ASUS_T91MT 0x0185
#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186
@@ -262,6 +268,10 @@
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
+#define USB_VENDOR_ID_CAMMUS 0x3416
+#define USB_DEVICE_ID_CAMMUS_C5 0x0301
+#define USB_DEVICE_ID_CAMMUS_C12 0x0302
+
#define USB_VENDOR_ID_CANDO 0x2087
#define USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH 0x0703
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
@@ -453,6 +463,11 @@
#define USB_VENDOR_ID_EVISION 0x320f
#define USB_DEVICE_ID_EVISION_ICL01 0x5041
+#define USB_VENDOR_ID_FFBEAST 0x045b
+#define USB_DEVICE_ID_FFBEAST_JOYSTICK 0x58f9
+#define USB_DEVICE_ID_FFBEAST_RUDDER 0x5968
+#define USB_DEVICE_ID_FFBEAST_WHEEL 0x59d7
+
#define USB_VENDOR_ID_FLATFROG 0x25b5
#define USB_DEVICE_ID_MULTITOUCH_3200 0x0002
@@ -817,6 +832,13 @@
#define I2C_DEVICE_ID_LG_8001 0x8001
#define I2C_DEVICE_ID_LG_7010 0x7010
+#define USB_VENDOR_ID_LITE_STAR 0x11ff
+#define USB_DEVICE_ID_PXN_V10 0x3245
+#define USB_DEVICE_ID_PXN_V12 0x1212
+#define USB_DEVICE_ID_PXN_V12_LITE 0x1112
+#define USB_DEVICE_ID_PXN_V12_LITE_2 0x1211
+#define USB_DEVICE_LITE_STAR_GT987_FF 0x2141
+
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_Z_10_SPK 0x0a07
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
@@ -964,6 +986,18 @@
#define USB_VENDOR_ID_MONTEREY 0x0566
#define USB_DEVICE_ID_GENIUS_KB29E 0x3004
+#define USB_VENDOR_ID_MOZA 0x346e
+#define USB_DEVICE_ID_MOZA_R3 0x0005
+#define USB_DEVICE_ID_MOZA_R3_2 0x0015
+#define USB_DEVICE_ID_MOZA_R5 0x0004
+#define USB_DEVICE_ID_MOZA_R5_2 0x0014
+#define USB_DEVICE_ID_MOZA_R9 0x0002
+#define USB_DEVICE_ID_MOZA_R9_2 0x0012
+#define USB_DEVICE_ID_MOZA_R12 0x0006
+#define USB_DEVICE_ID_MOZA_R12_2 0x0016
+#define USB_DEVICE_ID_MOZA_R16_R21 0x0000
+#define USB_DEVICE_ID_MOZA_R16_R21_2 0x0010
+
#define USB_VENDOR_ID_MSI 0x1770
#define USB_DEVICE_ID_MSI_GT683R_LED_PANEL 0xff00
@@ -1377,6 +1411,9 @@
#define USB_DEVICE_ID_VELLEMAN_K8061_FIRST 0x8061
#define USB_DEVICE_ID_VELLEMAN_K8061_LAST 0x8068
+#define USB_VENDOR_ID_VRS 0x0483
+#define USB_DEVICE_ID_VRS_DFP 0xa355
+
#define USB_VENDOR_ID_VTL 0x0306
#define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index a7d9ca02779e..af29ba840522 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -728,11 +728,9 @@ static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data)
if (hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) {
report_key_event(input, KEY_RFKILL);
return 1;
- } else {
- platform_profile_cycle();
- return 1;
}
- return 0;
+ platform_profile_cycle();
+ return 1;
case TP_X12_RAW_HOTKEY_FN_F10:
/* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/
(hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) ?
@@ -778,7 +776,7 @@ static int lenovo_raw_event(struct hid_device *hdev,
if (unlikely((hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB
|| hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB2)
&& size >= 3 && report->id == 0x03))
- return lenovo_raw_event_TP_X12_tab(hdev, le32_to_cpu(*(u32 *)data));
+ return lenovo_raw_event_TP_X12_tab(hdev, le32_to_cpu(*(__le32 *)data));
return 0;
}
diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
index 53e7b90f9cc3..f8605656257b 100644
--- a/drivers/hid/hid-lg-g15.c
+++ b/drivers/hid/hid-lg-g15.c
@@ -8,11 +8,13 @@
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/leds.h>
+#include <linux/led-class-multicolor.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/usb.h>
#include <linux/wait.h>
+#include <dt-bindings/leds/common.h>
#include "hid-ids.h"
@@ -44,9 +46,13 @@ enum lg_g15_led_type {
};
struct lg_g15_led {
- struct led_classdev cdev;
+ union {
+ struct led_classdev cdev;
+ struct led_classdev_mc mcdev;
+ };
enum led_brightness brightness;
enum lg_g15_led_type led;
+ /* Used to store initial color intensities before subled_info is allocated */
u8 red, green, blue;
};
@@ -229,15 +235,15 @@ static int lg_g510_kbd_led_write(struct lg_g15_data *g15,
struct lg_g15_led *g15_led,
enum led_brightness brightness)
{
+ struct mc_subled *subleds = g15_led->mcdev.subled_info;
int ret;
+ led_mc_calc_color_components(&g15_led->mcdev, brightness);
+
g15->transfer_buf[0] = 5 + g15_led->led;
- g15->transfer_buf[1] =
- DIV_ROUND_CLOSEST(g15_led->red * brightness, 255);
- g15->transfer_buf[2] =
- DIV_ROUND_CLOSEST(g15_led->green * brightness, 255);
- g15->transfer_buf[3] =
- DIV_ROUND_CLOSEST(g15_led->blue * brightness, 255);
+ g15->transfer_buf[1] = subleds[0].brightness;
+ g15->transfer_buf[2] = subleds[1].brightness;
+ g15->transfer_buf[3] = subleds[2].brightness;
ret = hid_hw_raw_request(g15->hdev,
LG_G510_FEATURE_BACKLIGHT_RGB + g15_led->led,
@@ -258,8 +264,9 @@ static int lg_g510_kbd_led_write(struct lg_g15_data *g15,
static int lg_g510_kbd_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
+ struct led_classdev_mc *mc = lcdev_to_mccdev(led_cdev);
struct lg_g15_led *g15_led =
- container_of(led_cdev, struct lg_g15_led, cdev);
+ container_of(mc, struct lg_g15_led, mcdev);
struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
int ret;
@@ -276,82 +283,20 @@ static int lg_g510_kbd_led_set(struct led_classdev *led_cdev,
static enum led_brightness lg_g510_kbd_led_get(struct led_classdev *led_cdev)
{
+ struct led_classdev_mc *mc = lcdev_to_mccdev(led_cdev);
struct lg_g15_led *g15_led =
- container_of(led_cdev, struct lg_g15_led, cdev);
+ container_of(mc, struct lg_g15_led, mcdev);
return g15_led->brightness;
}
-static ssize_t color_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lg_g15_led *g15_led =
- container_of(led_cdev, struct lg_g15_led, cdev);
- struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
- unsigned long value;
- int ret;
-
- if (count < 7 || (count == 8 && buf[7] != '\n') || count > 8)
- return -EINVAL;
-
- if (buf[0] != '#')
- return -EINVAL;
-
- ret = kstrtoul(buf + 1, 16, &value);
- if (ret)
- return ret;
-
- mutex_lock(&g15->mutex);
- g15_led->red = (value & 0xff0000) >> 16;
- g15_led->green = (value & 0x00ff00) >> 8;
- g15_led->blue = (value & 0x0000ff);
- ret = lg_g510_kbd_led_write(g15, g15_led, g15_led->brightness);
- mutex_unlock(&g15->mutex);
-
- return (ret < 0) ? ret : count;
-}
-
-static ssize_t color_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lg_g15_led *g15_led =
- container_of(led_cdev, struct lg_g15_led, cdev);
- struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
- ssize_t ret;
-
- mutex_lock(&g15->mutex);
- ret = sprintf(buf, "#%02x%02x%02x\n",
- g15_led->red, g15_led->green, g15_led->blue);
- mutex_unlock(&g15->mutex);
-
- return ret;
-}
-
-static DEVICE_ATTR_RW(color);
-
-static struct attribute *lg_g510_kbd_led_attrs[] = {
- &dev_attr_color.attr,
- NULL,
-};
-
-static const struct attribute_group lg_g510_kbd_led_group = {
- .attrs = lg_g510_kbd_led_attrs,
-};
-
-static const struct attribute_group *lg_g510_kbd_led_groups[] = {
- &lg_g510_kbd_led_group,
- NULL,
-};
-
static void lg_g510_leds_sync_work(struct work_struct *work)
{
struct lg_g15_data *g15 = container_of(work, struct lg_g15_data, work);
+ struct lg_g15_led *g15_led = &g15->leds[LG_G15_KBD_BRIGHTNESS];
mutex_lock(&g15->mutex);
- lg_g510_kbd_led_write(g15, &g15->leds[LG_G15_KBD_BRIGHTNESS],
- g15->leds[LG_G15_KBD_BRIGHTNESS].brightness);
+ lg_g510_kbd_led_write(g15, g15_led, g15_led->brightness);
mutex_unlock(&g15->mutex);
}
@@ -667,8 +612,46 @@ static void lg_g15_input_close(struct input_dev *dev)
hid_hw_close(hdev);
}
+static void lg_g15_setup_led_rgb(struct lg_g15_data *g15, int index)
+{
+ int i;
+ struct mc_subled *subled_info;
+
+ g15->leds[index].mcdev.led_cdev.brightness_set_blocking =
+ lg_g510_kbd_led_set;
+ g15->leds[index].mcdev.led_cdev.brightness_get =
+ lg_g510_kbd_led_get;
+ g15->leds[index].mcdev.led_cdev.max_brightness = 255;
+ g15->leds[index].mcdev.num_colors = 3;
+
+ subled_info = devm_kcalloc(&g15->hdev->dev, 3, sizeof(*subled_info), GFP_KERNEL);
+ if (!subled_info)
+ return;
+
+ for (i = 0; i < 3; i++) {
+ switch (i + 1) {
+ case LED_COLOR_ID_RED:
+ subled_info[i].color_index = LED_COLOR_ID_RED;
+ subled_info[i].intensity = g15->leds[index].red;
+ break;
+ case LED_COLOR_ID_GREEN:
+ subled_info[i].color_index = LED_COLOR_ID_GREEN;
+ subled_info[i].intensity = g15->leds[index].green;
+ break;
+ case LED_COLOR_ID_BLUE:
+ subled_info[i].color_index = LED_COLOR_ID_BLUE;
+ subled_info[i].intensity = g15->leds[index].blue;
+ break;
+ }
+ subled_info[i].channel = i;
+ }
+ g15->leds[index].mcdev.subled_info = subled_info;
+}
+
static int lg_g15_register_led(struct lg_g15_data *g15, int i, const char *name)
{
+ int ret;
+
g15->leds[i].led = i;
g15->leds[i].cdev.name = name;
@@ -685,6 +668,7 @@ static int lg_g15_register_led(struct lg_g15_data *g15, int i, const char *name)
} else {
g15->leds[i].cdev.max_brightness = 1;
}
+ ret = devm_led_classdev_register(&g15->hdev->dev, &g15->leds[i].cdev);
break;
case LG_G510:
case LG_G510_USB_AUDIO:
@@ -697,12 +681,11 @@ static int lg_g15_register_led(struct lg_g15_data *g15, int i, const char *name)
g15->leds[i].cdev.name = "g15::power_on_backlight_val";
fallthrough;
case LG_G15_KBD_BRIGHTNESS:
- g15->leds[i].cdev.brightness_set_blocking =
- lg_g510_kbd_led_set;
- g15->leds[i].cdev.brightness_get =
- lg_g510_kbd_led_get;
- g15->leds[i].cdev.max_brightness = 255;
- g15->leds[i].cdev.groups = lg_g510_kbd_led_groups;
+ /* register multicolor LED */
+ lg_g15_setup_led_rgb(g15, i);
+ ret = devm_led_classdev_multicolor_register_ext(&g15->hdev->dev,
+ &g15->leds[i].mcdev,
+ NULL);
break;
default:
g15->leds[i].cdev.brightness_set_blocking =
@@ -710,11 +693,12 @@ static int lg_g15_register_led(struct lg_g15_data *g15, int i, const char *name)
g15->leds[i].cdev.brightness_get =
lg_g510_mkey_led_get;
g15->leds[i].cdev.max_brightness = 1;
+ ret = devm_led_classdev_register(&g15->hdev->dev, &g15->leds[i].cdev);
}
break;
}
- return devm_led_classdev_register(&g15->hdev->dev, &g15->leds[i].cdev);
+ return ret;
}
/* Common input device init code shared between keyboards and Z-10 speaker handling */
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
index 25cfd964dc25..acb9eb18f7cc 100644
--- a/drivers/hid/hid-plantronics.c
+++ b/drivers/hid/hid-plantronics.c
@@ -6,9 +6,6 @@
* Copyright (c) 2015-2018 Terry Junge <terry.junge@plantronics.com>
*/
-/*
- */
-
#include "hid-ids.h"
#include <linux/hid.h>
@@ -23,30 +20,28 @@
#define PLT_VOL_UP 0x00b1
#define PLT_VOL_DOWN 0x00b2
+#define PLT_MIC_MUTE 0x00b5
#define PLT1_VOL_UP (PLT_HID_1_0_PAGE | PLT_VOL_UP)
#define PLT1_VOL_DOWN (PLT_HID_1_0_PAGE | PLT_VOL_DOWN)
+#define PLT1_MIC_MUTE (PLT_HID_1_0_PAGE | PLT_MIC_MUTE)
#define PLT2_VOL_UP (PLT_HID_2_0_PAGE | PLT_VOL_UP)
#define PLT2_VOL_DOWN (PLT_HID_2_0_PAGE | PLT_VOL_DOWN)
+#define PLT2_MIC_MUTE (PLT_HID_2_0_PAGE | PLT_MIC_MUTE)
+#define HID_TELEPHONY_MUTE (HID_UP_TELEPHONY | 0x2f)
+#define HID_CONSUMER_MUTE (HID_UP_CONSUMER | 0xe2)
#define PLT_DA60 0xda60
#define PLT_BT300_MIN 0x0413
#define PLT_BT300_MAX 0x0418
-
-#define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
- (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
-
-#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
-#define PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS BIT(1)
-
#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
-#define PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT 220 /* ms */
struct plt_drv_data {
unsigned long device_type;
- unsigned long last_volume_key_ts;
- u32 quirks;
+ unsigned long last_key_ts;
+ unsigned long double_key_to;
+ __u16 last_key;
};
static int plantronics_input_mapping(struct hid_device *hdev,
@@ -58,34 +53,43 @@ static int plantronics_input_mapping(struct hid_device *hdev,
unsigned short mapped_key;
struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
unsigned long plt_type = drv_data->device_type;
+ int allow_mute = usage->hid == HID_TELEPHONY_MUTE;
+ int allow_consumer = field->application == HID_CP_CONSUMERCONTROL &&
+ (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER &&
+ usage->hid != HID_CONSUMER_MUTE;
/* special case for PTT products */
if (field->application == HID_GD_JOYSTICK)
goto defaulted;
- /* handle volume up/down mapping */
/* non-standard types or multi-HID interfaces - plt_type is PID */
if (!(plt_type & HID_USAGE_PAGE)) {
switch (plt_type) {
case PLT_DA60:
- if (PLT_ALLOW_CONSUMER)
+ if (allow_consumer)
goto defaulted;
- goto ignored;
+ if (usage->hid == HID_CONSUMER_MUTE) {
+ mapped_key = KEY_MICMUTE;
+ goto mapped;
+ }
+ break;
default:
- if (PLT_ALLOW_CONSUMER)
+ if (allow_consumer || allow_mute)
goto defaulted;
}
+ goto ignored;
}
- /* handle standard types - plt_type is 0xffa0uuuu or 0xffa2uuuu */
- /* 'basic telephony compliant' - allow default consumer page map */
- else if ((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY &&
- (plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) {
- if (PLT_ALLOW_CONSUMER)
- goto defaulted;
- }
- /* not 'basic telephony' - apply legacy mapping */
- /* only map if the field is in the device's primary vendor page */
- else if (!((field->application ^ plt_type) & HID_USAGE_PAGE)) {
+
+ /* handle standard consumer control mapping */
+ /* and standard telephony mic mute mapping */
+ if (allow_consumer || allow_mute)
+ goto defaulted;
+
+ /* handle vendor unique types - plt_type is 0xffa0uuuu or 0xffa2uuuu */
+ /* if not 'basic telephony compliant' - map vendor unique controls */
+ if (!((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY &&
+ (plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) &&
+ !((field->application ^ plt_type) & HID_USAGE_PAGE))
switch (usage->hid) {
case PLT1_VOL_UP:
case PLT2_VOL_UP:
@@ -95,8 +99,11 @@ static int plantronics_input_mapping(struct hid_device *hdev,
case PLT2_VOL_DOWN:
mapped_key = KEY_VOLUMEDOWN;
goto mapped;
+ case PLT1_MIC_MUTE:
+ case PLT2_MIC_MUTE:
+ mapped_key = KEY_MICMUTE;
+ goto mapped;
}
- }
/*
* Future mapping of call control or other usages,
@@ -105,6 +112,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
*/
ignored:
+ hid_dbg(hdev, "usage: %08x (appl: %08x) - ignored\n",
+ usage->hid, field->application);
return -1;
defaulted:
@@ -123,38 +132,26 @@ static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
+ unsigned long prev_tsto, cur_ts;
+ __u16 prev_key, cur_key;
- if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
- unsigned long prev_ts, cur_ts;
+ /* Usages are filtered in plantronics_usages. */
- /* Usages are filtered in plantronics_usages. */
+ /* HZ too low for ms resolution - double key detection disabled */
+ /* or it is a key release - handle key presses only. */
+ if (!drv_data->double_key_to || !value)
+ return 0;
- if (!value) /* Handle key presses only. */
- return 0;
+ prev_tsto = drv_data->last_key_ts + drv_data->double_key_to;
+ cur_ts = drv_data->last_key_ts = jiffies;
+ prev_key = drv_data->last_key;
+ cur_key = drv_data->last_key = usage->code;
- prev_ts = drv_data->last_volume_key_ts;
- cur_ts = jiffies;
- if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
- return 1; /* Ignore the repeated key. */
-
- drv_data->last_volume_key_ts = cur_ts;
+ /* If the same key occurs in <= double_key_to -- ignore it */
+ if (prev_key == cur_key && time_before_eq(cur_ts, prev_tsto)) {
+ hid_dbg(hdev, "double key %d ignored\n", cur_key);
+ return 1; /* Ignore the repeated key. */
}
- if (drv_data->quirks & PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS) {
- unsigned long prev_ts, cur_ts;
-
- /* Usages are filtered in plantronics_usages. */
-
- if (!value) /* Handle key presses only. */
- return 0;
-
- prev_ts = drv_data->last_volume_key_ts;
- cur_ts = jiffies;
- if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT)
- return 1; /* Ignore the followed opposite volume key. */
-
- drv_data->last_volume_key_ts = cur_ts;
- }
-
return 0;
}
@@ -196,12 +193,16 @@ static int plantronics_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
- goto err;
+ return ret;
}
drv_data->device_type = plantronics_device_type(hdev);
- drv_data->quirks = id->driver_data;
- drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
+ drv_data->double_key_to = msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
+ drv_data->last_key_ts = jiffies - drv_data->double_key_to;
+
+ /* if HZ does not allow ms resolution - disable double key detection */
+ if (drv_data->double_key_to < PLT_DOUBLE_KEY_TIMEOUT)
+ drv_data->double_key_to = 0;
hid_set_drvdata(hdev, drv_data);
@@ -210,29 +211,10 @@ static int plantronics_probe(struct hid_device *hdev,
if (ret)
hid_err(hdev, "hw start failed\n");
-err:
return ret;
}
static const struct hid_device_id plantronics_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
- USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES),
- .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
- USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
- .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
- USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES),
- .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
- USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES),
- .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
- USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES),
- .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
- USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES),
- .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
{ }
};
@@ -241,6 +223,14 @@ MODULE_DEVICE_TABLE(hid, plantronics_devices);
static const struct hid_usage_id plantronics_usages[] = {
{ HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
{ HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
+ { HID_TELEPHONY_MUTE, EV_KEY, HID_ANY_ID },
+ { HID_CONSUMER_MUTE, EV_KEY, HID_ANY_ID },
+ { PLT2_VOL_UP, EV_KEY, HID_ANY_ID },
+ { PLT2_VOL_DOWN, EV_KEY, HID_ANY_ID },
+ { PLT2_MIC_MUTE, EV_KEY, HID_ANY_ID },
+ { PLT1_VOL_UP, EV_KEY, HID_ANY_ID },
+ { PLT1_VOL_DOWN, EV_KEY, HID_ANY_ID },
+ { PLT1_MIC_MUTE, EV_KEY, HID_ANY_ID },
{ HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
};
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 5d7a418ccdbe..646171598e41 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -328,8 +328,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) },
#endif
#if IS_ENABLED(CONFIG_HID_APPLEIR)
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
@@ -338,6 +336,12 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
#endif
+#if IS_ENABLED(CONFIG_HID_APPLETB_BL)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLETB_KBD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) },
+#endif
#if IS_ENABLED(CONFIG_HID_ASUS)
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
@@ -595,6 +599,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
#endif
+#if IS_ENABLED(CONFIG_HID_PLAYSTATION)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) },
+#endif
#if IS_ENABLED(CONFIG_HID_PRIMAX)
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
#endif
@@ -664,11 +679,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 10460b7bde1a..dfd9d22ed559 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -559,15 +559,13 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
if (steam->gamepad_mode)
enable = false;
+ mutex_lock(&steam->report_mutex);
if (enable) {
- mutex_lock(&steam->report_mutex);
/* enable esc, enter, cursors */
steam_send_report_byte(steam, ID_SET_DEFAULT_DIGITAL_MAPPINGS);
/* reset settings */
steam_send_report_byte(steam, ID_LOAD_DEFAULT_SETTINGS);
- mutex_unlock(&steam->report_mutex);
} else {
- mutex_lock(&steam->report_mutex);
/* disable esc, enter, cursor */
steam_send_report_byte(steam, ID_CLEAR_DIGITAL_MAPPINGS);
@@ -579,15 +577,14 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
SETTING_RIGHT_TRACKPAD_CLICK_PRESSURE, 0xFFFF, /* disable haptic click */
SETTING_STEAM_WATCHDOG_ENABLE, 0, /* disable watchdog that tests if Steam is active */
0);
- mutex_unlock(&steam->report_mutex);
} else {
steam_write_settings(steam,
SETTING_LEFT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */
SETTING_RIGHT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */
0);
- mutex_unlock(&steam->report_mutex);
}
}
+ mutex_unlock(&steam->report_mutex);
}
static int steam_input_open(struct input_dev *dev)
diff --git a/drivers/hid/hid-universal-pidff.c b/drivers/hid/hid-universal-pidff.c
new file mode 100644
index 000000000000..001a0f5efb9d
--- /dev/null
+++ b/drivers/hid/hid-universal-pidff.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HID UNIVERSAL PIDFF
+ * hid-pidff wrapper for PID-enabled devices
+ * Handles device reports, quirks and extends usable button range
+ *
+ * Copyright (c) 2024, 2025 Oleg Makarenko
+ * Copyright (c) 2024, 2025 Tomasz Pakuła
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/input-event-codes.h>
+#include "hid-ids.h"
+#include "usbhid/hid-pidff.h"
+
+#define JOY_RANGE (BTN_DEAD - BTN_JOYSTICK + 1)
+
+/*
+ * Map buttons manually to extend the default joystick button limit
+ */
+static int universal_pidff_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON)
+ return 0;
+
+ if (field->application != HID_GD_JOYSTICK)
+ return 0;
+
+ int button = ((usage->hid - 1) & HID_USAGE);
+ int code = button + BTN_JOYSTICK;
+
+ /* Detect the end of JOYSTICK buttons range */
+ if (code > BTN_DEAD)
+ code = button + KEY_NEXT_FAVORITE - JOY_RANGE;
+
+ /*
+ * Map overflowing buttons to KEY_RESERVED to not ignore
+ * them and let them still trigger MSC_SCAN
+ */
+ if (code > KEY_MAX)
+ code = KEY_RESERVED;
+
+ hid_map_usage(hi, usage, bit, max, EV_KEY, code);
+ hid_dbg(hdev, "Button %d: usage %d", button, code);
+ return 1;
+}
+
+/*
+ * Check if the device is PID and initialize it
+ * Add quirks after initialisation
+ */
+static int universal_pidff_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int i, error;
+ error = hid_parse(hdev);
+ if (error) {
+ hid_err(hdev, "HID parse failed\n");
+ goto err;
+ }
+
+ error = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (error) {
+ hid_err(hdev, "HID hw start failed\n");
+ goto err;
+ }
+
+ /* Check if device contains PID usage page */
+ error = 1;
+ for (i = 0; i < hdev->collection_size; i++)
+ if ((hdev->collection[i].usage & HID_USAGE_PAGE) == HID_UP_PID) {
+ error = 0;
+ hid_dbg(hdev, "PID usage page found\n");
+ break;
+ }
+
+ /*
+ * Do not fail as this might be the second "device"
+ * just for additional buttons/axes. Exit cleanly if force
+ * feedback usage page wasn't found (included devices were
+ * tested and confirmed to be USB PID after all).
+ */
+ if (error) {
+ hid_dbg(hdev, "PID usage page not found in the descriptor\n");
+ return 0;
+ }
+
+ /* Check if HID_PID support is enabled */
+ int (*init_function)(struct hid_device *, u32);
+ init_function = hid_pidff_init_with_quirks;
+
+ if (!init_function) {
+ hid_warn(hdev, "HID_PID support not enabled!\n");
+ return 0;
+ }
+
+ error = init_function(hdev, id->driver_data);
+ if (error) {
+ hid_warn(hdev, "Error initialising force feedback\n");
+ goto err;
+ }
+
+ hid_info(hdev, "Universal pidff driver loaded successfully!");
+
+ return 0;
+err:
+ return error;
+}
+
+static int universal_pidff_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
+{
+ int axis;
+ struct input_dev *input = hidinput->input;
+
+ if (!input->absinfo)
+ return 0;
+
+ /* Decrease fuzz and deadzone on available axes */
+ for (axis = ABS_X; axis <= ABS_BRAKE; axis++) {
+ if (!test_bit(axis, input->absbit))
+ continue;
+
+ input_set_abs_params(input, axis,
+ input->absinfo[axis].minimum,
+ input->absinfo[axis].maximum,
+ axis == ABS_X ? 0 : 8, 0);
+ }
+
+ /* Remove fuzz and deadzone from the second joystick axis */
+ if (hdev->vendor == USB_VENDOR_ID_FFBEAST &&
+ hdev->product == USB_DEVICE_ID_FFBEAST_JOYSTICK)
+ input_set_abs_params(input, ABS_Y,
+ input->absinfo[ABS_Y].minimum,
+ input->absinfo[ABS_Y].maximum, 0, 0);
+
+ return 0;
+}
+
+static const struct hid_device_id universal_pidff_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3_2),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5_2),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9_2),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12_2),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21_2),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C12) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_VRS, USB_DEVICE_ID_VRS_DFP),
+ .driver_data = HID_PIDFF_QUIRK_PERMISSIVE_CONTROL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_JOYSTICK), },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_RUDDER), },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V10),
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12),
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE),
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE_2),
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_LITE_STAR_GT987_FF),
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_INVICTA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_FORTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_LA_PRIMA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_TONY_KANAAN) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, universal_pidff_devices);
+
+static struct hid_driver universal_pidff = {
+ .name = "hid-universal-pidff",
+ .id_table = universal_pidff_devices,
+ .input_mapping = universal_pidff_input_mapping,
+ .probe = universal_pidff_probe,
+ .input_configured = universal_pidff_input_configured
+};
+module_hid_driver(universal_pidff);
+
+MODULE_DESCRIPTION("Universal driver for USB PID Force Feedback devices");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oleg Makarenko <oleg@makarenk.ooo>");
+MODULE_AUTHOR("Tomasz Pakuła <tomasz.pakula.oficjalny@gmail.com>");
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
index 2de93f4a25ca..fa51155ebe39 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -557,20 +557,19 @@ static int quicki2c_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- ret = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
+ mem_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
+ ret = PTR_ERR_OR_ZERO(mem_addr);
if (ret) {
dev_err_once(&pdev->dev, "Failed to get PCI regions, ret = %d.\n", ret);
goto disable_pci_device;
}
- mem_addr = pcim_iomap_table(pdev)[0];
-
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err_once(&pdev->dev, "No usable DMA configuration %d\n", ret);
- goto unmap_io_region;
+ goto disable_pci_device;
}
}
@@ -578,7 +577,7 @@ static int quicki2c_probe(struct pci_dev *pdev,
if (ret < 0) {
dev_err_once(&pdev->dev,
"Failed to allocate IRQ vectors. ret = %d\n", ret);
- goto unmap_io_region;
+ goto disable_pci_device;
}
pdev->irq = pci_irq_vector(pdev, 0);
@@ -587,7 +586,7 @@ static int quicki2c_probe(struct pci_dev *pdev,
if (IS_ERR(qcdev)) {
dev_err_once(&pdev->dev, "QuickI2C device init failed\n");
ret = PTR_ERR(qcdev);
- goto unmap_io_region;
+ goto disable_pci_device;
}
pci_set_drvdata(pdev, qcdev);
@@ -666,8 +665,6 @@ dma_deinit:
quicki2c_dma_deinit(qcdev);
dev_deinit:
quicki2c_dev_deinit(qcdev);
-unmap_io_region:
- pcim_iounmap_regions(pdev, BIT(0));
disable_pci_device:
pci_clear_master(pdev);
@@ -697,7 +694,6 @@ static void quicki2c_remove(struct pci_dev *pdev)
quicki2c_dev_deinit(qcdev);
- pcim_iounmap_regions(pdev, BIT(0));
pci_clear_master(pdev);
}
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
index 6b2c7620be2b..d4f89f44c3b4 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
@@ -426,7 +426,7 @@ static struct quickspi_device *quickspi_dev_init(struct pci_dev *pdev, void __io
thc_interrupt_enable(qsdev->thc_hw, true);
- qsdev->state = QUICKSPI_INITED;
+ qsdev->state = QUICKSPI_INITIATED;
return qsdev;
}
@@ -575,20 +575,19 @@ static int quickspi_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- ret = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
+ mem_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
+ ret = PTR_ERR_OR_ZERO(mem_addr);
if (ret) {
dev_err(&pdev->dev, "Failed to get PCI regions, ret = %d.\n", ret);
goto disable_pci_device;
}
- mem_addr = pcim_iomap_table(pdev)[0];
-
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "No usable DMA configuration %d\n", ret);
- goto unmap_io_region;
+ goto disable_pci_device;
}
}
@@ -596,7 +595,7 @@ static int quickspi_probe(struct pci_dev *pdev,
if (ret < 0) {
dev_err(&pdev->dev,
"Failed to allocate IRQ vectors. ret = %d\n", ret);
- goto unmap_io_region;
+ goto disable_pci_device;
}
pdev->irq = pci_irq_vector(pdev, 0);
@@ -605,7 +604,7 @@ static int quickspi_probe(struct pci_dev *pdev,
if (IS_ERR(qsdev)) {
dev_err(&pdev->dev, "QuickSPI device init failed\n");
ret = PTR_ERR(qsdev);
- goto unmap_io_region;
+ goto disable_pci_device;
}
pci_set_drvdata(pdev, qsdev);
@@ -668,8 +667,6 @@ dma_deinit:
quickspi_dma_deinit(qsdev);
dev_deinit:
quickspi_dev_deinit(qsdev);
-unmap_io_region:
- pcim_iounmap_regions(pdev, BIT(0));
disable_pci_device:
pci_clear_master(pdev);
@@ -699,7 +696,6 @@ static void quickspi_remove(struct pci_dev *pdev)
quickspi_dev_deinit(qsdev);
- pcim_iounmap_regions(pdev, BIT(0));
pci_clear_master(pdev);
}
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
index 75179bb26767..6fdf674b21c5 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
@@ -57,9 +57,9 @@
enum quickspi_dev_state {
QUICKSPI_NONE,
+ QUICKSPI_INITIATED,
QUICKSPI_RESETING,
- QUICKSPI_RESETED,
- QUICKSPI_INITED,
+ QUICKSPI_RESET,
QUICKSPI_ENABLED,
QUICKSPI_DISABLED,
};
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
index 918050af73e5..e6ba2ddcc9cb 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
@@ -333,7 +333,7 @@ int reset_tic(struct quickspi_device *qsdev)
return -EINVAL;
}
- qsdev->state = QUICKSPI_RESETED;
+ qsdev->state = QUICKSPI_RESET;
ret = quickspi_get_device_descriptor(qsdev);
if (ret)
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
index eb23bea77686..8f97e71df7f4 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
@@ -295,7 +295,7 @@ static void release_dma_buffers(struct thc_device *dev,
return;
for (i = 0; i < config->prd_tbl_num; i++) {
- if (!config->sgls[i] | !config->sgls_nent[i])
+ if (!config->sgls[i] || !config->sgls_nent[i])
continue;
dma_unmap_sg(dev->dev, config->sgls[i],
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index a6eb6fe6130d..44c2351b870f 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -35,6 +35,7 @@
#include <linux/hid-debug.h>
#include <linux/hidraw.h>
#include "usbhid.h"
+#include "hid-pidff.h"
/*
* Version Information
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index 3b4ee21cd811..8dfd2c554a27 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -3,27 +3,27 @@
* Force feedback driver for USB HID PID compliant devices
*
* Copyright (c) 2005, 2006 Anssi Hannula <anssi.hannula@gmail.com>
+ * Upgraded 2025 by Oleg Makarenko and Tomasz Pakuła
*/
-/*
- */
-
-/* #define DEBUG */
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include "hid-pidff.h"
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/usb.h>
-
#include <linux/hid.h>
+#include <linux/minmax.h>
-#include "usbhid.h"
#define PID_EFFECTS_MAX 64
+#define PID_INFINITE U16_MAX
-/* Report usage table used to put reports into an array */
+/* Linux Force Feedback API uses miliseconds as time unit */
+#define FF_TIME_EXPONENT -3
+#define FF_INFINITE 0
+/* Report usage table used to put reports into an array */
#define PID_SET_EFFECT 0
#define PID_EFFECT_OPERATION 1
#define PID_DEVICE_GAIN 2
@@ -44,12 +44,19 @@ static const u8 pidff_reports[] = {
0x21, 0x77, 0x7d, 0x7f, 0x89, 0x90, 0x96, 0xab,
0x5a, 0x5f, 0x6e, 0x73, 0x74
};
+/*
+ * device_control is really 0x95, but 0x96 specified
+ * as it is the usage of the only field in that report.
+ */
-/* device_control is really 0x95, but 0x96 specified as it is the usage of
-the only field in that report */
+/* PID special fields */
+#define PID_EFFECT_TYPE 0x25
+#define PID_DIRECTION 0x57
+#define PID_EFFECT_OPERATION_ARRAY 0x78
+#define PID_BLOCK_LOAD_STATUS 0x8b
+#define PID_DEVICE_CONTROL_ARRAY 0x96
/* Value usage tables used to put fields and values into arrays */
-
#define PID_EFFECT_BLOCK_INDEX 0
#define PID_DURATION 1
@@ -107,10 +114,13 @@ static const u8 pidff_device_gain[] = { 0x7e };
static const u8 pidff_pool[] = { 0x80, 0x83, 0xa9 };
/* Special field key tables used to put special field keys into arrays */
-
#define PID_ENABLE_ACTUATORS 0
-#define PID_RESET 1
-static const u8 pidff_device_control[] = { 0x97, 0x9a };
+#define PID_DISABLE_ACTUATORS 1
+#define PID_STOP_ALL_EFFECTS 2
+#define PID_RESET 3
+#define PID_PAUSE 4
+#define PID_CONTINUE 5
+static const u8 pidff_device_control[] = { 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c };
#define PID_CONSTANT 0
#define PID_RAMP 1
@@ -130,12 +140,16 @@ static const u8 pidff_effect_types[] = {
#define PID_BLOCK_LOAD_SUCCESS 0
#define PID_BLOCK_LOAD_FULL 1
-static const u8 pidff_block_load_status[] = { 0x8c, 0x8d };
+#define PID_BLOCK_LOAD_ERROR 2
+static const u8 pidff_block_load_status[] = { 0x8c, 0x8d, 0x8e};
#define PID_EFFECT_START 0
#define PID_EFFECT_STOP 1
static const u8 pidff_effect_operation_status[] = { 0x79, 0x7b };
+/* Polar direction 90 degrees (East) */
+#define PIDFF_FIXED_WHEEL_DIRECTION 0x4000
+
struct pidff_usage {
struct hid_field *field;
s32 *value;
@@ -159,8 +173,10 @@ struct pidff_device {
struct pidff_usage effect_operation[sizeof(pidff_effect_operation)];
struct pidff_usage block_free[sizeof(pidff_block_free)];
- /* Special field is a field that is not composed of
- usage<->value pairs that pidff_usage values are */
+ /*
+ * Special field is a field that is not composed of
+ * usage<->value pairs that pidff_usage values are
+ */
/* Special field in create_new_effect */
struct hid_field *create_new_effect_type;
@@ -184,30 +200,61 @@ struct pidff_device {
int operation_id[sizeof(pidff_effect_operation_status)];
int pid_id[PID_EFFECTS_MAX];
+
+ u32 quirks;
+ u8 effect_count;
};
/*
+ * Clamp value for a given field
+ */
+static s32 pidff_clamp(s32 i, struct hid_field *field)
+{
+ s32 clamped = clamp(i, field->logical_minimum, field->logical_maximum);
+ pr_debug("clamped from %d to %d", i, clamped);
+ return clamped;
+}
+
+/*
* Scale an unsigned value with range 0..max for the given field
*/
static int pidff_rescale(int i, int max, struct hid_field *field)
{
return i * (field->logical_maximum - field->logical_minimum) / max +
- field->logical_minimum;
+ field->logical_minimum;
}
/*
- * Scale a signed value in range -0x8000..0x7fff for the given field
+ * Scale a signed value in range S16_MIN..S16_MAX for the given field
*/
static int pidff_rescale_signed(int i, struct hid_field *field)
{
- return i == 0 ? 0 : i >
- 0 ? i * field->logical_maximum / 0x7fff : i *
- field->logical_minimum / -0x8000;
+ if (i > 0) return i * field->logical_maximum / S16_MAX;
+ if (i < 0) return i * field->logical_minimum / S16_MIN;
+ return 0;
+}
+
+/*
+ * Scale time value from Linux default (ms) to field units
+ */
+static u32 pidff_rescale_time(u16 time, struct hid_field *field)
+{
+ u32 scaled_time = time;
+ int exponent = field->unit_exponent;
+ pr_debug("time field exponent: %d\n", exponent);
+
+ for (;exponent < FF_TIME_EXPONENT; exponent++)
+ scaled_time *= 10;
+ for (;exponent > FF_TIME_EXPONENT; exponent--)
+ scaled_time /= 10;
+
+ pr_debug("time calculated from %d to %d\n", time, scaled_time);
+ return scaled_time;
}
static void pidff_set(struct pidff_usage *usage, u16 value)
{
- usage->value[0] = pidff_rescale(value, 0xffff, usage->field);
+ usage->value[0] = pidff_rescale(value, U16_MAX, usage->field);
pr_debug("calculated from %d to %d\n", value, usage->value[0]);
}
@@ -218,14 +265,35 @@ static void pidff_set_signed(struct pidff_usage *usage, s16 value)
else {
if (value < 0)
usage->value[0] =
- pidff_rescale(-value, 0x8000, usage->field);
+ pidff_rescale(-value, -S16_MIN, usage->field);
else
usage->value[0] =
- pidff_rescale(value, 0x7fff, usage->field);
+ pidff_rescale(value, S16_MAX, usage->field);
}
pr_debug("calculated from %d to %d\n", value, usage->value[0]);
}
+static void pidff_set_time(struct pidff_usage *usage, u16 time)
+{
+ u32 modified_time = pidff_rescale_time(time, usage->field);
+ usage->value[0] = pidff_clamp(modified_time, usage->field);
+}
+
+static void pidff_set_duration(struct pidff_usage *usage, u16 duration)
+{
+ /* Infinite value conversion from Linux API -> PID */
+ if (duration == FF_INFINITE)
+ duration = PID_INFINITE;
+
+ /* PID defines INFINITE as the max possible value for duration field */
+ if (duration == PID_INFINITE) {
+ usage->value[0] = (1U << usage->field->report_size) - 1;
+ return;
+ }
+
+ pidff_set_time(usage, duration);
+}
+
/*
* Send envelope report to the device
*/
@@ -233,19 +301,21 @@ static void pidff_set_envelope_report(struct pidff_device *pidff,
struct ff_envelope *envelope)
{
pidff->set_envelope[PID_EFFECT_BLOCK_INDEX].value[0] =
- pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
+ pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
pidff->set_envelope[PID_ATTACK_LEVEL].value[0] =
- pidff_rescale(envelope->attack_level >
- 0x7fff ? 0x7fff : envelope->attack_level, 0x7fff,
- pidff->set_envelope[PID_ATTACK_LEVEL].field);
+ pidff_rescale(envelope->attack_level >
+ S16_MAX ? S16_MAX : envelope->attack_level, S16_MAX,
+ pidff->set_envelope[PID_ATTACK_LEVEL].field);
pidff->set_envelope[PID_FADE_LEVEL].value[0] =
- pidff_rescale(envelope->fade_level >
- 0x7fff ? 0x7fff : envelope->fade_level, 0x7fff,
- pidff->set_envelope[PID_FADE_LEVEL].field);
+ pidff_rescale(envelope->fade_level >
+ S16_MAX ? S16_MAX : envelope->fade_level, S16_MAX,
+ pidff->set_envelope[PID_FADE_LEVEL].field);
- pidff->set_envelope[PID_ATTACK_TIME].value[0] = envelope->attack_length;
- pidff->set_envelope[PID_FADE_TIME].value[0] = envelope->fade_length;
+ pidff_set_time(&pidff->set_envelope[PID_ATTACK_TIME],
+ envelope->attack_length);
+ pidff_set_time(&pidff->set_envelope[PID_FADE_TIME],
+ envelope->fade_length);
hid_dbg(pidff->hid, "attack %u => %d\n",
envelope->attack_level,
@@ -261,10 +331,22 @@ static void pidff_set_envelope_report(struct pidff_device *pidff,
static int pidff_needs_set_envelope(struct ff_envelope *envelope,
struct ff_envelope *old)
{
- return envelope->attack_level != old->attack_level ||
- envelope->fade_level != old->fade_level ||
+ bool needs_new_envelope;
+ needs_new_envelope = envelope->attack_level != 0 ||
+ envelope->fade_level != 0 ||
+ envelope->attack_length != 0 ||
+ envelope->fade_length != 0;
+
+ if (!needs_new_envelope)
+ return false;
+
+ if (!old)
+ return needs_new_envelope;
+
+ return envelope->attack_level != old->attack_level ||
+ envelope->fade_level != old->fade_level ||
envelope->attack_length != old->attack_length ||
- envelope->fade_length != old->fade_length;
+ envelope->fade_length != old->fade_length;
}
/*
@@ -301,17 +383,27 @@ static void pidff_set_effect_report(struct pidff_device *pidff,
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
pidff->set_effect_type->value[0] =
pidff->create_new_effect_type->value[0];
- pidff->set_effect[PID_DURATION].value[0] = effect->replay.length;
+
+ pidff_set_duration(&pidff->set_effect[PID_DURATION],
+ effect->replay.length);
+
pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = effect->trigger.button;
- pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] =
- effect->trigger.interval;
+ pidff_set_time(&pidff->set_effect[PID_TRIGGER_REPEAT_INT],
+ effect->trigger.interval);
pidff->set_effect[PID_GAIN].value[0] =
pidff->set_effect[PID_GAIN].field->logical_maximum;
pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1;
- pidff->effect_direction->value[0] =
- pidff_rescale(effect->direction, 0xffff,
- pidff->effect_direction);
- pidff->set_effect[PID_START_DELAY].value[0] = effect->replay.delay;
+
+ /* Use fixed direction if needed */
+ pidff->effect_direction->value[0] = pidff_rescale(
+ pidff->quirks & HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION ?
+ PIDFF_FIXED_WHEEL_DIRECTION : effect->direction,
+ U16_MAX, pidff->effect_direction);
+
+ /* Omit setting delay field if it's missing */
+ if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_DELAY))
+ pidff_set_time(&pidff->set_effect[PID_START_DELAY],
+ effect->replay.delay);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT],
HID_REQ_SET_REPORT);
@@ -343,11 +435,11 @@ static void pidff_set_periodic_report(struct pidff_device *pidff,
pidff_set_signed(&pidff->set_periodic[PID_OFFSET],
effect->u.periodic.offset);
pidff_set(&pidff->set_periodic[PID_PHASE], effect->u.periodic.phase);
- pidff->set_periodic[PID_PERIOD].value[0] = effect->u.periodic.period;
+ pidff_set_time(&pidff->set_periodic[PID_PERIOD],
+ effect->u.periodic.period);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_PERIODIC],
HID_REQ_SET_REPORT);
-
}
/*
@@ -368,13 +460,19 @@ static int pidff_needs_set_periodic(struct ff_effect *effect,
static void pidff_set_condition_report(struct pidff_device *pidff,
struct ff_effect *effect)
{
- int i;
+ int i, max_axis;
+
+ /* Devices missing Parameter Block Offset can only have one axis */
+ max_axis = pidff->quirks & HID_PIDFF_QUIRK_MISSING_PBO ? 1 : 2;
pidff->set_condition[PID_EFFECT_BLOCK_INDEX].value[0] =
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
- for (i = 0; i < 2; i++) {
- pidff->set_condition[PID_PARAM_BLOCK_OFFSET].value[0] = i;
+ for (i = 0; i < max_axis; i++) {
+ /* Omit Parameter Block Offset if missing */
+ if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_PBO))
+ pidff->set_condition[PID_PARAM_BLOCK_OFFSET].value[0] = i;
+
pidff_set_signed(&pidff->set_condition[PID_CP_OFFSET],
effect->u.condition[i].center);
pidff_set_signed(&pidff->set_condition[PID_POS_COEFFICIENT],
@@ -442,8 +540,103 @@ static int pidff_needs_set_ramp(struct ff_effect *effect, struct ff_effect *old)
}
/*
+ * Set device gain
+ */
+static void pidff_set_gain_report(struct pidff_device *pidff, u16 gain)
+{
+ if (!pidff->device_gain[PID_DEVICE_GAIN_FIELD].field)
+ return;
+
+ pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], gain);
+ hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_GAIN],
+ HID_REQ_SET_REPORT);
+}
+
+/*
+ * Send device control report to the device
+ */
+static void pidff_set_device_control(struct pidff_device *pidff, int field)
+{
+ int i, index;
+ int field_index = pidff->control_id[field];
+
+ if (field_index < 1)
+ return;
+
+ /* Detect if the field is a bitmask variable or an array */
+ if (pidff->device_control->flags & HID_MAIN_ITEM_VARIABLE) {
+ hid_dbg(pidff->hid, "DEVICE_CONTROL is a bitmask\n");
+
+ /* Clear current bitmask */
+ for(i = 0; i < sizeof(pidff_device_control); i++) {
+ index = pidff->control_id[i];
+ if (index < 1)
+ continue;
+
+ pidff->device_control->value[index - 1] = 0;
+ }
+
+ pidff->device_control->value[field_index - 1] = 1;
+ } else {
+ hid_dbg(pidff->hid, "DEVICE_CONTROL is an array\n");
+ pidff->device_control->value[0] = field_index;
+ }
+
+ hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
+ hid_hw_wait(pidff->hid);
+}
+
+/*
+ * Modify actuators state
+ */
+static void pidff_set_actuators(struct pidff_device *pidff, bool enable)
+{
+ hid_dbg(pidff->hid, "%s actuators\n", enable ? "Enable" : "Disable");
+ pidff_set_device_control(pidff,
+ enable ? PID_ENABLE_ACTUATORS : PID_DISABLE_ACTUATORS);
+}
+
+/*
+ * Reset the device, stop all effects, enable actuators
+ */
+static void pidff_reset(struct pidff_device *pidff)
+{
+ /* We reset twice as sometimes hid_wait_io isn't waiting long enough */
+ pidff_set_device_control(pidff, PID_RESET);
+ pidff_set_device_control(pidff, PID_RESET);
+ pidff->effect_count = 0;
+
+ pidff_set_device_control(pidff, PID_STOP_ALL_EFFECTS);
+ pidff_set_actuators(pidff, 1);
+}
+
+/*
+ * Fetch pool report
+ */
+static void pidff_fetch_pool(struct pidff_device *pidff)
+{
+ int i;
+ struct hid_device *hid = pidff->hid;
+
+ /* Repeat if PID_SIMULTANEOUS_MAX < 2 to make sure it's correct */
+ for(i = 0; i < 20; i++) {
+ hid_hw_request(hid, pidff->reports[PID_POOL], HID_REQ_GET_REPORT);
+ hid_hw_wait(hid);
+
+ if (!pidff->pool[PID_SIMULTANEOUS_MAX].value)
+ return;
+ if (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] >= 2)
+ return;
+ }
+ hid_warn(hid, "device reports %d simultaneous effects\n",
+ pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
+}
+
+/*
* Send a request for effect upload to the device
*
+ * Reset and enable actuators if no effects were present on the device
+ *
* Returns 0 if device reported success, -ENOSPC if the device reported memory
* is full. Upon unknown response the function will retry for 60 times, if
* still unsuccessful -EIO is returned.
@@ -452,6 +645,9 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
{
int j;
+ if (!pidff->effect_count)
+ pidff_reset(pidff);
+
pidff->create_new_effect_type->value[0] = efnum;
hid_hw_request(pidff->hid, pidff->reports[PID_CREATE_NEW_EFFECT],
HID_REQ_SET_REPORT);
@@ -471,6 +667,8 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
hid_dbg(pidff->hid, "device reported free memory: %d bytes\n",
pidff->block_load[PID_RAM_POOL_AVAILABLE].value ?
pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
+
+ pidff->effect_count++;
return 0;
}
if (pidff->block_load_status->value[0] ==
@@ -480,6 +678,11 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
return -ENOSPC;
}
+ if (pidff->block_load_status->value[0] ==
+ pidff->status_id[PID_BLOCK_LOAD_ERROR]) {
+ hid_dbg(pidff->hid, "device error during effect creation\n");
+ return -EREMOTEIO;
+ }
}
hid_err(pidff->hid, "pid_block_load failed 60 times\n");
return -EIO;
@@ -498,7 +701,8 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n)
} else {
pidff->effect_operation_status->value[0] =
pidff->operation_id[PID_EFFECT_START];
- pidff->effect_operation[PID_LOOP_COUNT].value[0] = n;
+ pidff->effect_operation[PID_LOOP_COUNT].value[0] =
+ pidff_clamp(n, pidff->effect_operation[PID_LOOP_COUNT].field);
}
hid_hw_request(pidff->hid, pidff->reports[PID_EFFECT_OPERATION],
@@ -511,20 +715,22 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n)
static int pidff_playback(struct input_dev *dev, int effect_id, int value)
{
struct pidff_device *pidff = dev->ff->private;
-
pidff_playback_pid(pidff, pidff->pid_id[effect_id], value);
-
return 0;
}
/*
* Erase effect with PID id
+ * Decrease the device effect counter
*/
static void pidff_erase_pid(struct pidff_device *pidff, int pid_id)
{
pidff->block_free[PID_EFFECT_BLOCK_INDEX].value[0] = pid_id;
hid_hw_request(pidff->hid, pidff->reports[PID_BLOCK_FREE],
HID_REQ_SET_REPORT);
+
+ if (pidff->effect_count > 0)
+ pidff->effect_count--;
}
/*
@@ -537,8 +743,11 @@ static int pidff_erase_effect(struct input_dev *dev, int effect_id)
hid_dbg(pidff->hid, "starting to erase %d/%d\n",
effect_id, pidff->pid_id[effect_id]);
- /* Wait for the queue to clear. We do not want a full fifo to
- prevent the effect removal. */
+
+ /*
+ * Wait for the queue to clear. We do not want
+ * a full fifo to prevent the effect removal.
+ */
hid_hw_wait(pidff->hid);
pidff_playback_pid(pidff, pid_id, 0);
pidff_erase_pid(pidff, pid_id);
@@ -574,11 +783,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
pidff_set_effect_report(pidff, effect);
if (!old || pidff_needs_set_constant(effect, old))
pidff_set_constant_force_report(pidff, effect);
- if (!old ||
- pidff_needs_set_envelope(&effect->u.constant.envelope,
- &old->u.constant.envelope))
- pidff_set_envelope_report(pidff,
- &effect->u.constant.envelope);
+ if (pidff_needs_set_envelope(&effect->u.constant.envelope,
+ old ? &old->u.constant.envelope : NULL))
+ pidff_set_envelope_report(pidff, &effect->u.constant.envelope);
break;
case FF_PERIODIC:
@@ -604,6 +811,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
return -EINVAL;
}
+ if (pidff->quirks & HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY)
+ type_id = PID_SINE;
+
error = pidff_request_effect_upload(pidff,
pidff->type_id[type_id]);
if (error)
@@ -613,11 +823,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
pidff_set_effect_report(pidff, effect);
if (!old || pidff_needs_set_periodic(effect, old))
pidff_set_periodic_report(pidff, effect);
- if (!old ||
- pidff_needs_set_envelope(&effect->u.periodic.envelope,
- &old->u.periodic.envelope))
- pidff_set_envelope_report(pidff,
- &effect->u.periodic.envelope);
+ if (pidff_needs_set_envelope(&effect->u.periodic.envelope,
+ old ? &old->u.periodic.envelope : NULL))
+ pidff_set_envelope_report(pidff, &effect->u.periodic.envelope);
break;
case FF_RAMP:
@@ -631,56 +839,32 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
pidff_set_effect_report(pidff, effect);
if (!old || pidff_needs_set_ramp(effect, old))
pidff_set_ramp_force_report(pidff, effect);
- if (!old ||
- pidff_needs_set_envelope(&effect->u.ramp.envelope,
- &old->u.ramp.envelope))
- pidff_set_envelope_report(pidff,
- &effect->u.ramp.envelope);
+ if (pidff_needs_set_envelope(&effect->u.ramp.envelope,
+ old ? &old->u.ramp.envelope : NULL))
+ pidff_set_envelope_report(pidff, &effect->u.ramp.envelope);
break;
case FF_SPRING:
- if (!old) {
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[PID_SPRING]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_condition(effect, old))
- pidff_set_condition_report(pidff, effect);
- break;
-
- case FF_FRICTION:
- if (!old) {
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[PID_FRICTION]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_condition(effect, old))
- pidff_set_condition_report(pidff, effect);
- break;
-
case FF_DAMPER:
- if (!old) {
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[PID_DAMPER]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_condition(effect, old))
- pidff_set_condition_report(pidff, effect);
- break;
-
case FF_INERTIA:
+ case FF_FRICTION:
if (!old) {
+ switch(effect->type) {
+ case FF_SPRING:
+ type_id = PID_SPRING;
+ break;
+ case FF_DAMPER:
+ type_id = PID_DAMPER;
+ break;
+ case FF_INERTIA:
+ type_id = PID_INERTIA;
+ break;
+ case FF_FRICTION:
+ type_id = PID_FRICTION;
+ break;
+ }
error = pidff_request_effect_upload(pidff,
- pidff->type_id[PID_INERTIA]);
+ pidff->type_id[type_id]);
if (error)
return error;
}
@@ -709,11 +893,7 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
*/
static void pidff_set_gain(struct input_dev *dev, u16 gain)
{
- struct pidff_device *pidff = dev->ff->private;
-
- pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], gain);
- hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_GAIN],
- HID_REQ_SET_REPORT);
+ pidff_set_gain_report(dev->ff->private, gain);
}
static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude)
@@ -736,7 +916,10 @@ static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude)
pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] = 0;
pidff_set(&pidff->set_effect[PID_GAIN], magnitude);
pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1;
- pidff->set_effect[PID_START_DELAY].value[0] = 0;
+
+ /* Omit setting delay field if it's missing */
+ if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_DELAY))
+ pidff->set_effect[PID_START_DELAY].value[0] = 0;
hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT],
HID_REQ_SET_REPORT);
@@ -747,9 +930,7 @@ static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude)
*/
static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude)
{
- struct pidff_device *pidff = dev->ff->private;
-
- pidff_autocenter(pidff, magnitude);
+ pidff_autocenter(dev->ff->private, magnitude);
}
/*
@@ -758,7 +939,13 @@ static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude)
static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
struct hid_report *report, int count, int strict)
{
+ if (!report) {
+ pr_debug("pidff_find_fields, null report\n");
+ return -1;
+ }
+
int i, j, k, found;
+ int return_value = 0;
for (k = 0; k < count; k++) {
found = 0;
@@ -783,12 +970,22 @@ static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
if (found)
break;
}
- if (!found && strict) {
+ if (!found && table[k] == pidff_set_effect[PID_START_DELAY]) {
+ pr_debug("Delay field not found, but that's OK\n");
+ pr_debug("Setting MISSING_DELAY quirk\n");
+ return_value |= HID_PIDFF_QUIRK_MISSING_DELAY;
+ }
+ else if (!found && table[k] == pidff_set_condition[PID_PARAM_BLOCK_OFFSET]) {
+ pr_debug("PBO field not found, but that's OK\n");
+ pr_debug("Setting MISSING_PBO quirk\n");
+ return_value |= HID_PIDFF_QUIRK_MISSING_PBO;
+ }
+ else if (!found && strict) {
pr_debug("failed to locate %d\n", k);
return -1;
}
}
- return 0;
+ return return_value;
}
/*
@@ -871,6 +1068,11 @@ static int pidff_reports_ok(struct pidff_device *pidff)
static struct hid_field *pidff_find_special_field(struct hid_report *report,
int usage, int enforce_min)
{
+ if (!report) {
+ pr_debug("pidff_find_special_field, null report\n");
+ return NULL;
+ }
+
int i;
for (i = 0; i < report->maxfield; i++) {
@@ -923,22 +1125,24 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
pidff->create_new_effect_type =
pidff_find_special_field(pidff->reports[PID_CREATE_NEW_EFFECT],
- 0x25, 1);
+ PID_EFFECT_TYPE, 1);
pidff->set_effect_type =
pidff_find_special_field(pidff->reports[PID_SET_EFFECT],
- 0x25, 1);
+ PID_EFFECT_TYPE, 1);
pidff->effect_direction =
pidff_find_special_field(pidff->reports[PID_SET_EFFECT],
- 0x57, 0);
+ PID_DIRECTION, 0);
pidff->device_control =
pidff_find_special_field(pidff->reports[PID_DEVICE_CONTROL],
- 0x96, 1);
+ PID_DEVICE_CONTROL_ARRAY,
+ !(pidff->quirks & HID_PIDFF_QUIRK_PERMISSIVE_CONTROL));
+
pidff->block_load_status =
pidff_find_special_field(pidff->reports[PID_BLOCK_LOAD],
- 0x8b, 1);
+ PID_BLOCK_LOAD_STATUS, 1);
pidff->effect_operation_status =
pidff_find_special_field(pidff->reports[PID_EFFECT_OPERATION],
- 0x78, 1);
+ PID_EFFECT_OPERATION_ARRAY, 1);
hid_dbg(pidff->hid, "search done\n");
@@ -967,10 +1171,6 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
return -1;
}
- pidff_find_special_keys(pidff->control_id, pidff->device_control,
- pidff_device_control,
- sizeof(pidff_device_control));
-
PIDFF_FIND_SPECIAL_KEYS(control_id, device_control, device_control);
if (!PIDFF_FIND_SPECIAL_KEYS(type_id, create_new_effect_type,
@@ -1049,7 +1249,6 @@ static int pidff_find_effects(struct pidff_device *pidff,
set_bit(FF_FRICTION, dev->ffbit);
return 0;
-
}
#define PIDFF_FIND_FIELDS(name, report, strict) \
@@ -1062,12 +1261,19 @@ static int pidff_find_effects(struct pidff_device *pidff,
*/
static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
{
- int envelope_ok = 0;
+ int status = 0;
- if (PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1)) {
+ /* Save info about the device not having the DELAY ffb field. */
+ status = PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1);
+ if (status == -1) {
hid_err(pidff->hid, "unknown set_effect report layout\n");
return -ENODEV;
}
+ pidff->quirks |= status;
+
+ if (status & HID_PIDFF_QUIRK_MISSING_DELAY)
+ hid_dbg(pidff->hid, "Adding MISSING_DELAY quirk\n");
+
PIDFF_FIND_FIELDS(block_load, PID_BLOCK_LOAD, 0);
if (!pidff->block_load[PID_EFFECT_BLOCK_INDEX].value) {
@@ -1085,13 +1291,10 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
return -ENODEV;
}
- if (!PIDFF_FIND_FIELDS(set_envelope, PID_SET_ENVELOPE, 1))
- envelope_ok = 1;
-
if (pidff_find_special_fields(pidff) || pidff_find_effects(pidff, dev))
return -ENODEV;
- if (!envelope_ok) {
+ if (PIDFF_FIND_FIELDS(set_envelope, PID_SET_ENVELOPE, 1)) {
if (test_and_clear_bit(FF_CONSTANT, dev->ffbit))
hid_warn(pidff->hid,
"has constant effect but no envelope\n");
@@ -1116,16 +1319,20 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
clear_bit(FF_RAMP, dev->ffbit);
}
- if ((test_bit(FF_SPRING, dev->ffbit) ||
- test_bit(FF_DAMPER, dev->ffbit) ||
- test_bit(FF_FRICTION, dev->ffbit) ||
- test_bit(FF_INERTIA, dev->ffbit)) &&
- PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1)) {
- hid_warn(pidff->hid, "unknown condition effect layout\n");
- clear_bit(FF_SPRING, dev->ffbit);
- clear_bit(FF_DAMPER, dev->ffbit);
- clear_bit(FF_FRICTION, dev->ffbit);
- clear_bit(FF_INERTIA, dev->ffbit);
+ if (test_bit(FF_SPRING, dev->ffbit) ||
+ test_bit(FF_DAMPER, dev->ffbit) ||
+ test_bit(FF_FRICTION, dev->ffbit) ||
+ test_bit(FF_INERTIA, dev->ffbit)) {
+ status = PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1);
+
+ if (status < 0) {
+ hid_warn(pidff->hid, "unknown condition effect layout\n");
+ clear_bit(FF_SPRING, dev->ffbit);
+ clear_bit(FF_DAMPER, dev->ffbit);
+ clear_bit(FF_FRICTION, dev->ffbit);
+ clear_bit(FF_INERTIA, dev->ffbit);
+ }
+ pidff->quirks |= status;
}
if (test_bit(FF_PERIODIC, dev->ffbit) &&
@@ -1143,46 +1350,6 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
}
/*
- * Reset the device
- */
-static void pidff_reset(struct pidff_device *pidff)
-{
- struct hid_device *hid = pidff->hid;
- int i = 0;
-
- pidff->device_control->value[0] = pidff->control_id[PID_RESET];
- /* We reset twice as sometimes hid_wait_io isn't waiting long enough */
- hid_hw_request(hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
- hid_hw_wait(hid);
- hid_hw_request(hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
- hid_hw_wait(hid);
-
- pidff->device_control->value[0] =
- pidff->control_id[PID_ENABLE_ACTUATORS];
- hid_hw_request(hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
- hid_hw_wait(hid);
-
- /* pool report is sometimes messed up, refetch it */
- hid_hw_request(hid, pidff->reports[PID_POOL], HID_REQ_GET_REPORT);
- hid_hw_wait(hid);
-
- if (pidff->pool[PID_SIMULTANEOUS_MAX].value) {
- while (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] < 2) {
- if (i++ > 20) {
- hid_warn(pidff->hid,
- "device reports %d simultaneous effects\n",
- pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
- break;
- }
- hid_dbg(pidff->hid, "pid_pool requested again\n");
- hid_hw_request(hid, pidff->reports[PID_POOL],
- HID_REQ_GET_REPORT);
- hid_hw_wait(hid);
- }
- }
-}
-
-/*
* Test if autocenter modification is using the supported method
*/
static int pidff_check_autocenter(struct pidff_device *pidff,
@@ -1206,24 +1373,23 @@ static int pidff_check_autocenter(struct pidff_device *pidff,
if (pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] ==
pidff->block_load[PID_EFFECT_BLOCK_INDEX].field->logical_minimum + 1) {
- pidff_autocenter(pidff, 0xffff);
+ pidff_autocenter(pidff, U16_MAX);
set_bit(FF_AUTOCENTER, dev->ffbit);
} else {
hid_notice(pidff->hid,
"device has unknown autocenter control method\n");
}
-
pidff_erase_pid(pidff,
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]);
return 0;
-
}
/*
* Check if the device is PID and initialize it
+ * Set initial quirks
*/
-int hid_pidff_init(struct hid_device *hid)
+int hid_pidff_init_with_quirks(struct hid_device *hid, u32 initial_quirks)
{
struct pidff_device *pidff;
struct hid_input *hidinput = list_entry(hid->inputs.next,
@@ -1245,6 +1411,8 @@ int hid_pidff_init(struct hid_device *hid)
return -ENOMEM;
pidff->hid = hid;
+ pidff->quirks = initial_quirks;
+ pidff->effect_count = 0;
hid_device_io_start(hid);
@@ -1261,14 +1429,9 @@ int hid_pidff_init(struct hid_device *hid)
if (error)
goto fail;
- pidff_reset(pidff);
-
- if (test_bit(FF_GAIN, dev->ffbit)) {
- pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], 0xffff);
- hid_hw_request(hid, pidff->reports[PID_DEVICE_GAIN],
- HID_REQ_SET_REPORT);
- }
-
+ /* pool report is sometimes messed up, refetch it */
+ pidff_fetch_pool(pidff);
+ pidff_set_gain_report(pidff, U16_MAX);
error = pidff_check_autocenter(pidff, dev);
if (error)
goto fail;
@@ -1311,6 +1474,7 @@ int hid_pidff_init(struct hid_device *hid)
ff->playback = pidff_playback;
hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
+ hid_dbg(dev, "Active quirks mask: 0x%x\n", pidff->quirks);
hid_device_io_stop(hid);
@@ -1322,3 +1486,14 @@ int hid_pidff_init(struct hid_device *hid)
kfree(pidff);
return error;
}
+EXPORT_SYMBOL_GPL(hid_pidff_init_with_quirks);
+
+/*
+ * Check if the device is PID and initialize it
+ * Wrapper made to keep the compatibility with old
+ * init function
+ */
+int hid_pidff_init(struct hid_device *hid)
+{
+ return hid_pidff_init_with_quirks(hid, 0);
+}
diff --git a/drivers/hid/usbhid/hid-pidff.h b/drivers/hid/usbhid/hid-pidff.h
new file mode 100644
index 000000000000..dda571e0a5bd
--- /dev/null
+++ b/drivers/hid/usbhid/hid-pidff.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __HID_PIDFF_H
+#define __HID_PIDFF_H
+
+#include <linux/hid.h>
+
+/* HID PIDFF quirks */
+
+/* Delay field (0xA7) missing. Skip it during set effect report upload */
+#define HID_PIDFF_QUIRK_MISSING_DELAY BIT(0)
+
+/* Missing Paramter block offset (0x23). Skip it during SET_CONDITION
+ report upload */
+#define HID_PIDFF_QUIRK_MISSING_PBO BIT(1)
+
+/* Initialise device control field even if logical_minimum != 1 */
+#define HID_PIDFF_QUIRK_PERMISSIVE_CONTROL BIT(2)
+
+/* Use fixed 0x4000 direction during SET_EFFECT report upload */
+#define HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION BIT(3)
+
+/* Force all periodic effects to be uploaded as SINE */
+#define HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY BIT(4)
+
+#ifdef CONFIG_HID_PID
+int hid_pidff_init(struct hid_device *hid);
+int hid_pidff_init_with_quirks(struct hid_device *hid, u32 initial_quirks);
+#else
+#define hid_pidff_init NULL
+#define hid_pidff_init_with_quirks NULL
+#endif
+
+#endif
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index c439ed2f16db..af6bc76dbf64 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -160,7 +160,7 @@ static int usb_kbd_event(struct input_dev *dev, unsigned int type,
return -1;
spin_lock_irqsave(&kbd->leds_lock, flags);
- kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
+ kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 4) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
(!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) |
(!!test_bit(LED_NUML, dev->led));
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 8125383932ec..97393a3083ca 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -69,16 +69,27 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
struct kfifo_rec_ptr_2 *fifo)
{
while (!kfifo_is_empty(fifo)) {
- u8 buf[WACOM_PKGLEN_MAX];
- int size;
+ int size = kfifo_peek_len(fifo);
+ u8 *buf = kzalloc(size, GFP_KERNEL);
+ unsigned int count;
int err;
- size = kfifo_out(fifo, buf, sizeof(buf));
+ count = kfifo_out(fifo, buf, size);
+ if (count != size) {
+ // Hard to say what is the "right" action in this
+ // circumstance. Skipping the entry and continuing
+ // to flush seems reasonable enough, however.
+ hid_warn(hdev, "%s: removed fifo entry with unexpected size\n",
+ __func__);
+ continue;
+ }
err = hid_report_raw_event(hdev, HID_INPUT_REPORT, buf, size, false);
if (err) {
hid_warn(hdev, "%s: unable to flush event due to error %d\n",
__func__, err);
}
+
+ kfree(buf);
}
}
@@ -158,13 +169,10 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
if (wacom->wacom_wac.features.type == BOOTLOADER)
return 0;
- if (size > WACOM_PKGLEN_MAX)
- return 1;
-
if (wacom_wac_pen_serial_enforce(hdev, report, raw_data, size))
return -1;
- memcpy(wacom->wacom_wac.data, raw_data, size);
+ wacom->wacom_wac.data = raw_data;
wacom_wac_irq(&wacom->wacom_wac, size);
@@ -1286,6 +1294,7 @@ static void wacom_devm_kfifo_release(struct device *dev, void *res)
static int wacom_devm_kfifo_alloc(struct wacom *wacom)
{
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ int fifo_size = min(PAGE_SIZE, 10 * wacom_wac->features.pktlen);
struct kfifo_rec_ptr_2 *pen_fifo;
int error;
@@ -1296,7 +1305,7 @@ static int wacom_devm_kfifo_alloc(struct wacom *wacom)
if (!pen_fifo)
return -ENOMEM;
- error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ error = kfifo_alloc(pen_fifo, fifo_size, GFP_KERNEL);
if (error) {
devres_free(pen_fifo);
return error;
@@ -2352,12 +2361,14 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
unsigned int connect_mask = HID_CONNECT_HIDRAW;
features->pktlen = wacom_compute_pktlen(hdev);
- if (features->pktlen > WACOM_PKGLEN_MAX)
- return -EINVAL;
if (!devres_open_group(&hdev->dev, wacom, GFP_KERNEL))
return -ENOMEM;
+ error = wacom_devm_kfifo_alloc(wacom);
+ if (error)
+ goto fail;
+
wacom->resources = true;
error = wacom_allocate_inputs(wacom);
@@ -2821,10 +2832,6 @@ static int wacom_probe(struct hid_device *hdev,
if (features->check_for_hid_type && features->hid_type != hdev->type)
return -ENODEV;
- error = wacom_devm_kfifo_alloc(wacom);
- if (error)
- return error;
-
wacom_wac->hid_data.inputmode = -1;
wacom_wac->mode_report = -1;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index b60bfafc6a8f..5107a676e24f 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1201,12 +1201,10 @@ static void wacom_intuos_bt_process_data(struct wacom_wac *wacom,
static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len)
{
- unsigned char data[WACOM_PKGLEN_MAX];
+ u8 *data = kmemdup(wacom->data, len, GFP_KERNEL);
int i = 1;
unsigned power_raw, battery_capacity, bat_charging, ps_connected;
- memcpy(data, wacom->data, len);
-
switch (data[0]) {
case 0x04:
wacom_intuos_bt_process_data(wacom, data + i);
@@ -1230,8 +1228,10 @@ static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len)
dev_dbg(wacom->pen_input->dev.parent,
"Unknown report: %d,%d size:%zu\n",
data[0], data[1], len);
- return 0;
+ break;
}
+
+ kfree(data);
return 0;
}
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 0c3c6a6aaae9..d4f7d8ca1e7e 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -7,9 +7,6 @@
#include <linux/hid.h>
#include <linux/kfifo.h>
-/* maximum packet length for USB/BT devices */
-#define WACOM_PKGLEN_MAX 361
-
#define WACOM_NAME_MAX 64
#define WACOM_MAX_REMOTES 5
#define WACOM_STATUS_UNKNOWN 255
@@ -277,7 +274,7 @@ struct wacom_features {
unsigned touch_max;
int oVid;
int oPid;
- int pktlen;
+ unsigned int pktlen;
bool check_for_hid_type;
int hid_type;
};
@@ -341,7 +338,7 @@ struct wacom_wac {
char pen_name[WACOM_NAME_MAX];
char touch_name[WACOM_NAME_MAX];
char pad_name[WACOM_NAME_MAX];
- unsigned char data[WACOM_PKGLEN_MAX];
+ u8 *data;
int tool[2];
int id[2];
__u64 serial[2];
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index afe470f3661c..6105ea9a6c6a 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -401,6 +401,7 @@ static void ssip_reset(struct hsi_client *cl)
del_timer(&ssi->rx_wd);
del_timer(&ssi->tx_wd);
del_timer(&ssi->keep_alive);
+ cancel_work_sync(&ssi->work);
ssi->main_state = 0;
ssi->send_state = 0;
ssi->recv_state = 0;
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 862c47b191af..6c1416167bd2 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -55,4 +55,21 @@ config HYPERV_BALLOON
help
Select this option to enable Hyper-V Balloon driver.
+config MSHV_ROOT
+ tristate "Microsoft Hyper-V root partition support"
+ depends on HYPERV && (X86_64 || ARM64)
+ depends on !HYPERV_VTL_MODE
+ # The hypervisor interface operates on 4k pages. Enforcing it here
+ # simplifies many assumptions in the root partition code.
+ # e.g. When withdrawing memory, the hypervisor gives back 4k pages in
+ # no particular order, making it impossible to reassemble larger pages
+ depends on PAGE_SIZE_4KB
+ select EVENTFD
+ default n
+ help
+ Select this option to enable support for booting and running as root
+ partition on Microsoft Hyper-V.
+
+ If unsure, say N.
+
endmenu
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index b992c0ed182b..976189c725dc 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_HYPERV) += hv_vmbus.o
obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
+obj-$(CONFIG_MSHV_ROOT) += mshv_root.o
CFLAGS_hv_trace.o = -I$(src)
CFLAGS_hv_balloon.o = -I$(src)
@@ -11,6 +12,9 @@ hv_vmbus-y := vmbus_drv.o \
channel_mgmt.o ring_buffer.o hv_trace.o
hv_vmbus-$(CONFIG_HYPERV_TESTING) += hv_debugfs.o
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_utils_transport.o
+mshv_root-y := mshv_root_main.o mshv_synic.o mshv_eventfd.o mshv_irq.o \
+ mshv_root_hv_call.o mshv_portid_table.o
# Code that must be built-in
obj-$(subst m,y,$(CONFIG_HYPERV)) += hv_common.o
+obj-$(subst m,y,$(CONFIG_MSHV_ROOT)) += hv_proc.o mshv_common.o
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 36d9ba097ff5..308c8f279df8 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -144,7 +144,7 @@ int hv_synic_alloc(void)
* Synic message and event pages are allocated by paravisor.
* Skip these pages allocation here.
*/
- if (!ms_hyperv.paravisor_present && !hv_root_partition) {
+ if (!ms_hyperv.paravisor_present && !hv_root_partition()) {
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (!hv_cpu->synic_message_page) {
@@ -272,7 +272,7 @@ void hv_synic_enable_regs(unsigned int cpu)
simp.as_uint64 = hv_get_msr(HV_MSR_SIMP);
simp.simp_enabled = 1;
- if (ms_hyperv.paravisor_present || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
@@ -291,7 +291,7 @@ void hv_synic_enable_regs(unsigned int cpu)
siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 1;
- if (ms_hyperv.paravisor_present || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
@@ -313,17 +313,7 @@ void hv_synic_enable_regs(unsigned int cpu)
shared_sint.vector = vmbus_interrupt;
shared_sint.masked = false;
-
- /*
- * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
- * it doesn't provide a recommendation flag and AEOI must be disabled.
- */
-#ifdef HV_DEPRECATING_AEOI_RECOMMENDED
- shared_sint.auto_eoi =
- !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
-#else
- shared_sint.auto_eoi = 0;
-#endif
+ shared_sint.auto_eoi = hv_recommend_using_aeoi();
hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
/* Enable the global synic bit */
@@ -367,7 +357,7 @@ void hv_synic_disable_regs(unsigned int cpu)
* addresses.
*/
simp.simp_enabled = 0;
- if (ms_hyperv.paravisor_present || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
iounmap(hv_cpu->synic_message_page);
hv_cpu->synic_message_page = NULL;
} else {
@@ -379,7 +369,7 @@ void hv_synic_disable_regs(unsigned int cpu)
siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 0;
- if (ms_hyperv.paravisor_present || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
iounmap(hv_cpu->synic_event_page);
hv_cpu->synic_event_page = NULL;
} else {
@@ -433,13 +423,47 @@ retry:
return pending;
}
+static int hv_pick_new_cpu(struct vmbus_channel *channel)
+{
+ int ret = -EBUSY;
+ int start;
+ int cpu;
+
+ lockdep_assert_cpus_held();
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
+
+ /*
+ * We can't assume that the relevant interrupts will be sent before
+ * the cpu is offlined on older versions of hyperv.
+ */
+ if (vmbus_proto_version < VERSION_WIN10_V5_3)
+ return -EBUSY;
+
+ start = get_random_u32_below(nr_cpu_ids);
+
+ for_each_cpu_wrap(cpu, cpu_online_mask, start) {
+ if (channel->target_cpu == cpu ||
+ channel->target_cpu == VMBUS_CONNECT_CPU)
+ continue;
+
+ ret = vmbus_channel_set_cpu(channel, cpu);
+ if (!ret)
+ break;
+ }
+
+ if (ret)
+ ret = vmbus_channel_set_cpu(channel, VMBUS_CONNECT_CPU);
+
+ return ret;
+}
+
/*
* hv_synic_cleanup - Cleanup routine for hv_synic_init().
*/
int hv_synic_cleanup(unsigned int cpu)
{
struct vmbus_channel *channel, *sc;
- bool channel_found = false;
+ int ret = 0;
if (vmbus_connection.conn_state != CONNECTED)
goto always_cleanup;
@@ -456,38 +480,34 @@ int hv_synic_cleanup(unsigned int cpu)
/*
* Search for channels which are bound to the CPU we're about to
- * cleanup. In case we find one and vmbus is still connected, we
- * fail; this will effectively prevent CPU offlining.
- *
- * TODO: Re-bind the channels to different CPUs.
+ * cleanup.
*/
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (channel->target_cpu == cpu) {
- channel_found = true;
- break;
+ ret = hv_pick_new_cpu(channel);
+ if (ret) {
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ return ret;
+ }
}
list_for_each_entry(sc, &channel->sc_list, sc_list) {
if (sc->target_cpu == cpu) {
- channel_found = true;
- break;
+ ret = hv_pick_new_cpu(sc);
+ if (ret) {
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ return ret;
+ }
}
}
- if (channel_found)
- break;
}
mutex_unlock(&vmbus_connection.channel_mutex);
- if (channel_found)
- return -EBUSY;
-
/*
- * channel_found == false means that any channels that were previously
- * assigned to the CPU have been reassigned elsewhere with a call of
- * vmbus_send_modifychannel(). Scan the event flags page looking for
- * bits that are set and waiting with a timeout for vmbus_chan_sched()
- * to process such bits. If bits are still set after this operation
- * and VMBus is connected, fail the CPU offlining operation.
+ * Scan the event flags page looking for bits that are set and waiting
+ * with a timeout for vmbus_chan_sched() to process such bits. If bits
+ * are still set after this operation and VMBus is connected, fail the
+ * CPU offlining operation.
*/
if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending())
return -EBUSY;
@@ -497,5 +517,5 @@ always_cleanup:
hv_synic_disable_regs(cpu);
- return 0;
+ return ret;
}
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index f2e6f55d6ca6..b3b11be11650 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -31,8 +31,14 @@
#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
+u64 hv_current_partition_id = HV_PARTITION_ID_SELF;
+EXPORT_SYMBOL_GPL(hv_current_partition_id);
+
+enum hv_partition_type hv_curr_partition_type;
+EXPORT_SYMBOL_GPL(hv_curr_partition_type);
+
/*
- * hv_root_partition, ms_hyperv and hv_nested are defined here with other
+ * ms_hyperv and hv_nested are defined here with other
* Hyper-V specific globals so they are shared across all architectures and are
* built only when CONFIG_HYPERV is defined. But on x86,
* ms_hyperv_init_platform() is built even when CONFIG_HYPERV is not
@@ -40,9 +46,6 @@
* here, allowing for an overriding definition in the module containing
* ms_hyperv_init_platform().
*/
-bool __weak hv_root_partition;
-EXPORT_SYMBOL_GPL(hv_root_partition);
-
bool __weak hv_nested;
EXPORT_SYMBOL_GPL(hv_nested);
@@ -66,6 +69,16 @@ static void hv_kmsg_dump_unregister(void);
static struct ctl_table_header *hv_ctl_table_hdr;
/*
+ * Per-cpu array holding the tail pointer for the SynIC event ring buffer
+ * for each SINT.
+ *
+ * We cannot maintain this in mshv driver because the tail pointer should
+ * persist even if the mshv driver is unloaded.
+ */
+u8 * __percpu *hv_synic_eventring_tail;
+EXPORT_SYMBOL_GPL(hv_synic_eventring_tail);
+
+/*
* Hyper-V specific initialization and shutdown code that is
* common across all architectures. Called from architecture
* specific initialization functions.
@@ -87,6 +100,9 @@ void __init hv_common_free(void)
free_percpu(hyperv_pcpu_input_arg);
hyperv_pcpu_input_arg = NULL;
+
+ free_percpu(hv_synic_eventring_tail);
+ hv_synic_eventring_tail = NULL;
}
/*
@@ -280,7 +296,26 @@ static void hv_kmsg_dump_register(void)
static inline bool hv_output_page_exists(void)
{
- return hv_root_partition || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
+ return hv_root_partition() || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
+}
+
+void __init hv_get_partition_id(void)
+{
+ struct hv_output_get_partition_id *output;
+ unsigned long flags;
+ u64 status, pt_id;
+
+ local_irq_save(flags);
+ output = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, &output);
+ pt_id = output->partition_id;
+ local_irq_restore(flags);
+
+ if (hv_result_success(status))
+ hv_current_partition_id = pt_id;
+ else
+ pr_err("Hyper-V: failed to get partition ID: %#x\n",
+ hv_result(status));
}
int __init hv_common_init(void)
@@ -350,6 +385,11 @@ int __init hv_common_init(void)
BUG_ON(!hyperv_pcpu_output_arg);
}
+ if (hv_root_partition()) {
+ hv_synic_eventring_tail = alloc_percpu(u8 *);
+ BUG_ON(!hv_synic_eventring_tail);
+ }
+
hv_vp_index = kmalloc_array(nr_cpu_ids, sizeof(*hv_vp_index),
GFP_KERNEL);
if (!hv_vp_index) {
@@ -438,11 +478,12 @@ error:
int hv_common_cpu_init(unsigned int cpu)
{
void **inputarg, **outputarg;
+ u8 **synic_eventring_tail;
u64 msr_vp_index;
gfp_t flags;
const int pgcount = hv_output_page_exists() ? 2 : 1;
void *mem;
- int ret;
+ int ret = 0;
/* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL;
@@ -450,8 +491,8 @@ int hv_common_cpu_init(unsigned int cpu)
inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
/*
- * hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory is already
- * allocated if this CPU was previously online and then taken offline
+ * The per-cpu memory is already allocated if this CPU was previously
+ * online and then taken offline
*/
if (!*inputarg) {
mem = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags);
@@ -498,11 +539,21 @@ int hv_common_cpu_init(unsigned int cpu)
if (msr_vp_index > hv_max_vp_index)
hv_max_vp_index = msr_vp_index;
- return 0;
+ if (hv_root_partition()) {
+ synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail);
+ *synic_eventring_tail = kcalloc(HV_SYNIC_SINT_COUNT,
+ sizeof(u8), flags);
+ /* No need to unwind any of the above on failure here */
+ if (unlikely(!*synic_eventring_tail))
+ ret = -ENOMEM;
+ }
+
+ return ret;
}
int hv_common_cpu_die(unsigned int cpu)
{
+ u8 **synic_eventring_tail;
/*
* The hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory
* is not freed when the CPU goes offline as the hyperv_pcpu_input_arg
@@ -515,6 +566,10 @@ int hv_common_cpu_die(unsigned int cpu)
* originally allocated memory is reused in hv_common_cpu_init().
*/
+ synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail);
+ kfree(*synic_eventring_tail);
+ *synic_eventring_tail = NULL;
+
return 0;
}
@@ -572,7 +627,7 @@ EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
bool hv_is_hibernation_supported(void)
{
- return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
+ return !hv_root_partition() && acpi_sleep_state_supported(ACPI_STATE_S4);
}
EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
@@ -625,6 +680,11 @@ void __weak hv_remove_vmbus_handler(void)
}
EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler);
+void __weak hv_setup_mshv_handler(void (*handler)(void))
+{
+}
+EXPORT_SYMBOL_GPL(hv_setup_mshv_handler);
+
void __weak hv_setup_kexec_handler(void (*handler)(void))
{
}
@@ -661,3 +721,121 @@ u64 __weak hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
return HV_STATUS_INVALID_PARAMETER;
}
EXPORT_SYMBOL_GPL(hv_tdx_hypercall);
+
+void hv_identify_partition_type(void)
+{
+ /* Assume guest role */
+ hv_curr_partition_type = HV_PARTITION_TYPE_GUEST;
+ /*
+ * Check partition creation and cpu management privileges
+ *
+ * Hyper-V should never specify running as root and as a Confidential
+ * VM. But to protect against a compromised/malicious Hyper-V trying
+ * to exploit root behavior to expose Confidential VM memory, ignore
+ * the root partition setting if also a Confidential VM.
+ */
+ if ((ms_hyperv.priv_high & HV_CREATE_PARTITIONS) &&
+ (ms_hyperv.priv_high & HV_CPU_MANAGEMENT) &&
+ !(ms_hyperv.priv_high & HV_ISOLATION)) {
+ pr_info("Hyper-V: running as root partition\n");
+ if (IS_ENABLED(CONFIG_MSHV_ROOT))
+ hv_curr_partition_type = HV_PARTITION_TYPE_ROOT;
+ else
+ pr_crit("Hyper-V: CONFIG_MSHV_ROOT not enabled!\n");
+ }
+}
+
+struct hv_status_info {
+ char *string;
+ int errno;
+ u16 code;
+};
+
+/*
+ * Note on the errno mappings:
+ * A failed hypercall is usually only recoverable (or loggable) near
+ * the call site where the HV_STATUS_* code is known. So the errno
+ * it gets converted to is not too useful further up the stack.
+ * Provide a few mappings that could be useful, and revert to -EIO
+ * as a fallback.
+ */
+static const struct hv_status_info hv_status_infos[] = {
+#define _STATUS_INFO(status, errno) { #status, (errno), (status) }
+ _STATUS_INFO(HV_STATUS_SUCCESS, 0),
+ _STATUS_INFO(HV_STATUS_INVALID_HYPERCALL_CODE, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_HYPERCALL_INPUT, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_ALIGNMENT, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_PARAMETER, -EINVAL),
+ _STATUS_INFO(HV_STATUS_ACCESS_DENIED, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_PARTITION_STATE, -EIO),
+ _STATUS_INFO(HV_STATUS_OPERATION_DENIED, -EIO),
+ _STATUS_INFO(HV_STATUS_UNKNOWN_PROPERTY, -EIO),
+ _STATUS_INFO(HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE, -EIO),
+ _STATUS_INFO(HV_STATUS_INSUFFICIENT_MEMORY, -ENOMEM),
+ _STATUS_INFO(HV_STATUS_INVALID_PARTITION_ID, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_VP_INDEX, -EINVAL),
+ _STATUS_INFO(HV_STATUS_NOT_FOUND, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_PORT_ID, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_CONNECTION_ID, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INSUFFICIENT_BUFFERS, -EIO),
+ _STATUS_INFO(HV_STATUS_NOT_ACKNOWLEDGED, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_VP_STATE, -EIO),
+ _STATUS_INFO(HV_STATUS_NO_RESOURCES, -EIO),
+ _STATUS_INFO(HV_STATUS_PROCESSOR_FEATURE_NOT_SUPPORTED, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_LP_INDEX, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_REGISTER_VALUE, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_LP_INDEX, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_REGISTER_VALUE, -EIO),
+ _STATUS_INFO(HV_STATUS_OPERATION_FAILED, -EIO),
+ _STATUS_INFO(HV_STATUS_TIME_OUT, -EIO),
+ _STATUS_INFO(HV_STATUS_CALL_PENDING, -EIO),
+ _STATUS_INFO(HV_STATUS_VTL_ALREADY_ENABLED, -EIO),
+#undef _STATUS_INFO
+};
+
+static inline const struct hv_status_info *find_hv_status_info(u64 hv_status)
+{
+ int i;
+ u16 code = hv_result(hv_status);
+
+ for (i = 0; i < ARRAY_SIZE(hv_status_infos); ++i) {
+ const struct hv_status_info *info = &hv_status_infos[i];
+
+ if (info->code == code)
+ return info;
+ }
+
+ return NULL;
+}
+
+/* Convert a hypercall result into a linux-friendly error code. */
+int hv_result_to_errno(u64 status)
+{
+ const struct hv_status_info *info;
+
+ /* hv_do_hypercall() may return U64_MAX, hypercalls aren't possible */
+ if (unlikely(status == U64_MAX))
+ return -EOPNOTSUPP;
+
+ info = find_hv_status_info(status);
+ if (info)
+ return info->errno;
+
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(hv_result_to_errno);
+
+const char *hv_result_to_string(u64 status)
+{
+ const struct hv_status_info *info;
+
+ if (unlikely(status == U64_MAX))
+ return "Hypercall page missing!";
+
+ info = find_hv_status_info(status);
+ if (info)
+ return info->string;
+
+ return "Unknown";
+}
+EXPORT_SYMBOL_GPL(hv_result_to_string);
diff --git a/drivers/hv/hv_proc.c b/drivers/hv/hv_proc.c
new file mode 100644
index 000000000000..7d7ecb6f6137
--- /dev/null
+++ b/drivers/hv/hv_proc.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/clockchips.h>
+#include <linux/slab.h>
+#include <linux/cpuhotplug.h>
+#include <linux/minmax.h>
+#include <asm/mshyperv.h>
+
+/*
+ * See struct hv_deposit_memory. The first u64 is partition ID, the rest
+ * are GPAs.
+ */
+#define HV_DEPOSIT_MAX (HV_HYP_PAGE_SIZE / sizeof(u64) - 1)
+
+/* Deposits exact number of pages. Must be called with interrupts enabled. */
+int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
+{
+ struct page **pages, *page;
+ int *counts;
+ int num_allocations;
+ int i, j, page_count;
+ int order;
+ u64 status;
+ int ret;
+ u64 base_pfn;
+ struct hv_deposit_memory *input_page;
+ unsigned long flags;
+
+ if (num_pages > HV_DEPOSIT_MAX)
+ return -E2BIG;
+ if (!num_pages)
+ return 0;
+
+ /* One buffer for page pointers and counts */
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ pages = page_address(page);
+
+ counts = kcalloc(HV_DEPOSIT_MAX, sizeof(int), GFP_KERNEL);
+ if (!counts) {
+ free_page((unsigned long)pages);
+ return -ENOMEM;
+ }
+
+ /* Allocate all the pages before disabling interrupts */
+ i = 0;
+
+ while (num_pages) {
+ /* Find highest order we can actually allocate */
+ order = 31 - __builtin_clz(num_pages);
+
+ while (1) {
+ pages[i] = alloc_pages_node(node, GFP_KERNEL, order);
+ if (pages[i])
+ break;
+ if (!order) {
+ ret = -ENOMEM;
+ num_allocations = i;
+ goto err_free_allocations;
+ }
+ --order;
+ }
+
+ split_page(pages[i], order);
+ counts[i] = 1 << order;
+ num_pages -= counts[i];
+ i++;
+ }
+ num_allocations = i;
+
+ local_irq_save(flags);
+
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input_page->partition_id = partition_id;
+
+ /* Populate gpa_page_list - these will fit on the input page */
+ for (i = 0, page_count = 0; i < num_allocations; ++i) {
+ base_pfn = page_to_pfn(pages[i]);
+ for (j = 0; j < counts[i]; ++j, ++page_count)
+ input_page->gpa_page_list[page_count] = base_pfn + j;
+ }
+ status = hv_do_rep_hypercall(HVCALL_DEPOSIT_MEMORY,
+ page_count, 0, input_page, NULL);
+ local_irq_restore(flags);
+ if (!hv_result_success(status)) {
+ hv_status_err(status, "\n");
+ ret = hv_result_to_errno(status);
+ goto err_free_allocations;
+ }
+
+ ret = 0;
+ goto free_buf;
+
+err_free_allocations:
+ for (i = 0; i < num_allocations; ++i) {
+ base_pfn = page_to_pfn(pages[i]);
+ for (j = 0; j < counts[i]; ++j)
+ __free_page(pfn_to_page(base_pfn + j));
+ }
+
+free_buf:
+ free_page((unsigned long)pages);
+ kfree(counts);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hv_call_deposit_pages);
+
+int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
+{
+ struct hv_input_add_logical_processor *input;
+ struct hv_output_add_logical_processor *output;
+ u64 status;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * When adding a logical processor, the hypervisor may return
+ * HV_STATUS_INSUFFICIENT_MEMORY. When that happens, we deposit more
+ * pages and retry.
+ */
+ do {
+ local_irq_save(flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ /* We don't do anything with the output right now */
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ input->lp_index = lp_index;
+ input->apic_id = apic_id;
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
+ status = hv_do_hypercall(HVCALL_ADD_LOGICAL_PROCESSOR,
+ input, output);
+ local_irq_restore(flags);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (!hv_result_success(status)) {
+ hv_status_err(status, "cpu %u apic ID: %u\n",
+ lp_index, apic_id);
+ ret = hv_result_to_errno(status);
+ }
+ break;
+ }
+ ret = hv_call_deposit_pages(node, hv_current_partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
+{
+ struct hv_create_vp *input;
+ u64 status;
+ unsigned long irq_flags;
+ int ret = 0;
+
+ /* Root VPs don't seem to need pages deposited */
+ if (partition_id != hv_current_partition_id) {
+ /* The value 90 is empirically determined. It may change. */
+ ret = hv_call_deposit_pages(node, partition_id, 90);
+ if (ret)
+ return ret;
+ }
+
+ do {
+ local_irq_save(irq_flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->flags = flags;
+ input->subnode_type = HV_SUBNODE_ANY;
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
+ status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL);
+ local_irq_restore(irq_flags);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (!hv_result_success(status)) {
+ hv_status_err(status, "vcpu: %u, lp: %u\n",
+ vp_index, flags);
+ ret = hv_result_to_errno(status);
+ }
+ break;
+ }
+ ret = hv_call_deposit_pages(node, partition_id, 1);
+
+ } while (!ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hv_call_create_vp);
diff --git a/drivers/hv/mshv.h b/drivers/hv/mshv.h
new file mode 100644
index 000000000000..0340a67acd0a
--- /dev/null
+++ b/drivers/hv/mshv.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ */
+
+#ifndef _MSHV_H_
+#define _MSHV_H_
+
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <hyperv/hvhdk.h>
+
+#define mshv_field_nonzero(STRUCT, MEMBER) \
+ memchr_inv(&((STRUCT).MEMBER), \
+ 0, sizeof_field(typeof(STRUCT), MEMBER))
+
+int hv_call_get_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ union hv_input_vtl input_vtl,
+ struct hv_register_assoc *registers);
+
+int hv_call_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ union hv_input_vtl input_vtl,
+ struct hv_register_assoc *registers);
+
+int hv_call_get_partition_property(u64 partition_id, u64 property_code,
+ u64 *property_value);
+
+int mshv_do_pre_guest_mode_work(ulong th_flags);
+
+#endif /* _MSHV_H */
diff --git a/drivers/hv/mshv_common.c b/drivers/hv/mshv_common.c
new file mode 100644
index 000000000000..2575e6d7a71f
--- /dev/null
+++ b/drivers/hv/mshv_common.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Microsoft Corporation.
+ *
+ * This file contains functions that will be called from one or more modules.
+ * If any of these modules are configured to build, this file is built and just
+ * statically linked in.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/mshyperv.h>
+#include <linux/resume_user_mode.h>
+
+#include "mshv.h"
+
+#define HV_GET_REGISTER_BATCH_SIZE \
+ (HV_HYP_PAGE_SIZE / sizeof(union hv_register_value))
+#define HV_SET_REGISTER_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_set_vp_registers)) \
+ / sizeof(struct hv_register_assoc))
+
+int hv_call_get_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ union hv_input_vtl input_vtl,
+ struct hv_register_assoc *registers)
+{
+ struct hv_input_get_vp_registers *input_page;
+ union hv_register_value *output_page;
+ u16 completed = 0;
+ unsigned long remaining = count;
+ int rep_count, i;
+ u64 status = HV_STATUS_SUCCESS;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ input_page->partition_id = partition_id;
+ input_page->vp_index = vp_index;
+ input_page->input_vtl.as_uint8 = input_vtl.as_uint8;
+ input_page->rsvd_z8 = 0;
+ input_page->rsvd_z16 = 0;
+
+ while (remaining) {
+ rep_count = min(remaining, HV_GET_REGISTER_BATCH_SIZE);
+ for (i = 0; i < rep_count; ++i)
+ input_page->names[i] = registers[i].name;
+
+ status = hv_do_rep_hypercall(HVCALL_GET_VP_REGISTERS, rep_count,
+ 0, input_page, output_page);
+ if (!hv_result_success(status))
+ break;
+
+ completed = hv_repcomp(status);
+ for (i = 0; i < completed; ++i)
+ registers[i].value = output_page[i];
+
+ registers += completed;
+ remaining -= completed;
+ }
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+EXPORT_SYMBOL_GPL(hv_call_get_vp_registers);
+
+int hv_call_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ union hv_input_vtl input_vtl,
+ struct hv_register_assoc *registers)
+{
+ struct hv_input_set_vp_registers *input_page;
+ u16 completed = 0;
+ unsigned long remaining = count;
+ int rep_count;
+ u64 status = HV_STATUS_SUCCESS;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input_page->partition_id = partition_id;
+ input_page->vp_index = vp_index;
+ input_page->input_vtl.as_uint8 = input_vtl.as_uint8;
+ input_page->rsvd_z8 = 0;
+ input_page->rsvd_z16 = 0;
+
+ while (remaining) {
+ rep_count = min(remaining, HV_SET_REGISTER_BATCH_SIZE);
+ memcpy(input_page->elements, registers,
+ sizeof(struct hv_register_assoc) * rep_count);
+
+ status = hv_do_rep_hypercall(HVCALL_SET_VP_REGISTERS, rep_count,
+ 0, input_page, NULL);
+ if (!hv_result_success(status))
+ break;
+
+ completed = hv_repcomp(status);
+ registers += completed;
+ remaining -= completed;
+ }
+
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+EXPORT_SYMBOL_GPL(hv_call_set_vp_registers);
+
+int hv_call_get_partition_property(u64 partition_id,
+ u64 property_code,
+ u64 *property_value)
+{
+ u64 status;
+ unsigned long flags;
+ struct hv_input_get_partition_property *input;
+ struct hv_output_get_partition_property *output;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+ memset(input, 0, sizeof(*input));
+ input->partition_id = partition_id;
+ input->property_code = property_code;
+ status = hv_do_hypercall(HVCALL_GET_PARTITION_PROPERTY, input, output);
+
+ if (!hv_result_success(status)) {
+ local_irq_restore(flags);
+ return hv_result_to_errno(status);
+ }
+ *property_value = output->property_value;
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hv_call_get_partition_property);
+
+/*
+ * Handle any pre-processing before going into the guest mode on this cpu, most
+ * notably call schedule(). Must be invoked with both preemption and
+ * interrupts enabled.
+ *
+ * Returns: 0 on success, -errno on error.
+ */
+int mshv_do_pre_guest_mode_work(ulong th_flags)
+{
+ if (th_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+ return -EINTR;
+
+ if (th_flags & _TIF_NEED_RESCHED)
+ schedule();
+
+ if (th_flags & _TIF_NOTIFY_RESUME)
+ resume_user_mode_work(NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mshv_do_pre_guest_mode_work);
diff --git a/drivers/hv/mshv_eventfd.c b/drivers/hv/mshv_eventfd.c
new file mode 100644
index 000000000000..8dd22be2ca0b
--- /dev/null
+++ b/drivers/hv/mshv_eventfd.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * eventfd support for mshv
+ *
+ * Heavily inspired from KVM implementation of irqfd/ioeventfd. The basic
+ * framework code is taken from the kvm implementation.
+ *
+ * All credits to kvm developers.
+ */
+
+#include <linux/syscalls.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/eventfd.h>
+
+#if IS_ENABLED(CONFIG_X86_64)
+#include <asm/apic.h>
+#endif
+#include <asm/mshyperv.h>
+
+#include "mshv_eventfd.h"
+#include "mshv.h"
+#include "mshv_root.h"
+
+static struct workqueue_struct *irqfd_cleanup_wq;
+
+void mshv_register_irq_ack_notifier(struct mshv_partition *partition,
+ struct mshv_irq_ack_notifier *mian)
+{
+ mutex_lock(&partition->pt_irq_lock);
+ hlist_add_head_rcu(&mian->link, &partition->irq_ack_notifier_list);
+ mutex_unlock(&partition->pt_irq_lock);
+}
+
+void mshv_unregister_irq_ack_notifier(struct mshv_partition *partition,
+ struct mshv_irq_ack_notifier *mian)
+{
+ mutex_lock(&partition->pt_irq_lock);
+ hlist_del_init_rcu(&mian->link);
+ mutex_unlock(&partition->pt_irq_lock);
+ synchronize_rcu();
+}
+
+bool mshv_notify_acked_gsi(struct mshv_partition *partition, int gsi)
+{
+ struct mshv_irq_ack_notifier *mian;
+ bool acked = false;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(mian, &partition->irq_ack_notifier_list,
+ link) {
+ if (mian->irq_ack_gsi == gsi) {
+ mian->irq_acked(mian);
+ acked = true;
+ }
+ }
+ rcu_read_unlock();
+
+ return acked;
+}
+
+#if IS_ENABLED(CONFIG_ARM64)
+static inline bool hv_should_clear_interrupt(enum hv_interrupt_type type)
+{
+ return false;
+}
+#elif IS_ENABLED(CONFIG_X86_64)
+static inline bool hv_should_clear_interrupt(enum hv_interrupt_type type)
+{
+ return type == HV_X64_INTERRUPT_TYPE_EXTINT;
+}
+#endif
+
+static void mshv_irqfd_resampler_ack(struct mshv_irq_ack_notifier *mian)
+{
+ struct mshv_irqfd_resampler *resampler;
+ struct mshv_partition *partition;
+ struct mshv_irqfd *irqfd;
+ int idx;
+
+ resampler = container_of(mian, struct mshv_irqfd_resampler,
+ rsmplr_notifier);
+ partition = resampler->rsmplr_partn;
+
+ idx = srcu_read_lock(&partition->pt_irq_srcu);
+
+ hlist_for_each_entry_rcu(irqfd, &resampler->rsmplr_irqfd_list,
+ irqfd_resampler_hnode) {
+ if (hv_should_clear_interrupt(irqfd->irqfd_lapic_irq.lapic_control.interrupt_type))
+ hv_call_clear_virtual_interrupt(partition->pt_id);
+
+ eventfd_signal(irqfd->irqfd_resamplefd);
+ }
+
+ srcu_read_unlock(&partition->pt_irq_srcu, idx);
+}
+
+#if IS_ENABLED(CONFIG_X86_64)
+static bool
+mshv_vp_irq_vector_injected(union hv_vp_register_page_interrupt_vectors iv,
+ u32 vector)
+{
+ int i;
+
+ for (i = 0; i < iv.vector_count; i++) {
+ if (iv.vector[i] == vector)
+ return true;
+ }
+
+ return false;
+}
+
+static int mshv_vp_irq_try_set_vector(struct mshv_vp *vp, u32 vector)
+{
+ union hv_vp_register_page_interrupt_vectors iv, new_iv;
+
+ iv = vp->vp_register_page->interrupt_vectors;
+ new_iv = iv;
+
+ if (mshv_vp_irq_vector_injected(iv, vector))
+ return 0;
+
+ if (iv.vector_count >= HV_VP_REGISTER_PAGE_MAX_VECTOR_COUNT)
+ return -ENOSPC;
+
+ new_iv.vector[new_iv.vector_count++] = vector;
+
+ if (cmpxchg(&vp->vp_register_page->interrupt_vectors.as_uint64,
+ iv.as_uint64, new_iv.as_uint64) != iv.as_uint64)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int mshv_vp_irq_set_vector(struct mshv_vp *vp, u32 vector)
+{
+ int ret;
+
+ do {
+ ret = mshv_vp_irq_try_set_vector(vp, vector);
+ } while (ret == -EAGAIN && !need_resched());
+
+ return ret;
+}
+
+/*
+ * Try to raise irq for guest via shared vector array. hyp does the actual
+ * inject of the interrupt.
+ */
+static int mshv_try_assert_irq_fast(struct mshv_irqfd *irqfd)
+{
+ struct mshv_partition *partition = irqfd->irqfd_partn;
+ struct mshv_lapic_irq *irq = &irqfd->irqfd_lapic_irq;
+ struct mshv_vp *vp;
+
+ if (!(ms_hyperv.ext_features &
+ HV_VP_DISPATCH_INTERRUPT_INJECTION_AVAILABLE))
+ return -EOPNOTSUPP;
+
+ if (hv_scheduler_type != HV_SCHEDULER_TYPE_ROOT)
+ return -EOPNOTSUPP;
+
+ if (irq->lapic_control.logical_dest_mode)
+ return -EOPNOTSUPP;
+
+ vp = partition->pt_vp_array[irq->lapic_apic_id];
+
+ if (!vp->vp_register_page)
+ return -EOPNOTSUPP;
+
+ if (mshv_vp_irq_set_vector(vp, irq->lapic_vector))
+ return -EINVAL;
+
+ if (vp->run.flags.root_sched_dispatched &&
+ vp->vp_register_page->interrupt_vectors.as_uint64)
+ return -EBUSY;
+
+ wake_up(&vp->run.vp_suspend_queue);
+
+ return 0;
+}
+#else /* CONFIG_X86_64 */
+static int mshv_try_assert_irq_fast(struct mshv_irqfd *irqfd)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static void mshv_assert_irq_slow(struct mshv_irqfd *irqfd)
+{
+ struct mshv_partition *partition = irqfd->irqfd_partn;
+ struct mshv_lapic_irq *irq = &irqfd->irqfd_lapic_irq;
+ unsigned int seq;
+ int idx;
+
+ WARN_ON(irqfd->irqfd_resampler &&
+ !irq->lapic_control.level_triggered);
+
+ idx = srcu_read_lock(&partition->pt_irq_srcu);
+ if (irqfd->irqfd_girq_ent.guest_irq_num) {
+ if (!irqfd->irqfd_girq_ent.girq_entry_valid) {
+ srcu_read_unlock(&partition->pt_irq_srcu, idx);
+ return;
+ }
+
+ do {
+ seq = read_seqcount_begin(&irqfd->irqfd_irqe_sc);
+ } while (read_seqcount_retry(&irqfd->irqfd_irqe_sc, seq));
+ }
+
+ hv_call_assert_virtual_interrupt(irqfd->irqfd_partn->pt_id,
+ irq->lapic_vector, irq->lapic_apic_id,
+ irq->lapic_control);
+ srcu_read_unlock(&partition->pt_irq_srcu, idx);
+}
+
+static void mshv_irqfd_resampler_shutdown(struct mshv_irqfd *irqfd)
+{
+ struct mshv_irqfd_resampler *rp = irqfd->irqfd_resampler;
+ struct mshv_partition *pt = rp->rsmplr_partn;
+
+ mutex_lock(&pt->irqfds_resampler_lock);
+
+ hlist_del_rcu(&irqfd->irqfd_resampler_hnode);
+ synchronize_srcu(&pt->pt_irq_srcu);
+
+ if (hlist_empty(&rp->rsmplr_irqfd_list)) {
+ hlist_del(&rp->rsmplr_hnode);
+ mshv_unregister_irq_ack_notifier(pt, &rp->rsmplr_notifier);
+ kfree(rp);
+ }
+
+ mutex_unlock(&pt->irqfds_resampler_lock);
+}
+
+/*
+ * Race-free decouple logic (ordering is critical)
+ */
+static void mshv_irqfd_shutdown(struct work_struct *work)
+{
+ struct mshv_irqfd *irqfd =
+ container_of(work, struct mshv_irqfd, irqfd_shutdown);
+
+ /*
+ * Synchronize with the wait-queue and unhook ourselves to prevent
+ * further events.
+ */
+ remove_wait_queue(irqfd->irqfd_wqh, &irqfd->irqfd_wait);
+
+ if (irqfd->irqfd_resampler) {
+ mshv_irqfd_resampler_shutdown(irqfd);
+ eventfd_ctx_put(irqfd->irqfd_resamplefd);
+ }
+
+ /*
+ * It is now safe to release the object's resources
+ */
+ eventfd_ctx_put(irqfd->irqfd_eventfd_ctx);
+ kfree(irqfd);
+}
+
+/* assumes partition->pt_irqfds_lock is held */
+static bool mshv_irqfd_is_active(struct mshv_irqfd *irqfd)
+{
+ return !hlist_unhashed(&irqfd->irqfd_hnode);
+}
+
+/*
+ * Mark the irqfd as inactive and schedule it for removal
+ *
+ * assumes partition->pt_irqfds_lock is held
+ */
+static void mshv_irqfd_deactivate(struct mshv_irqfd *irqfd)
+{
+ if (!mshv_irqfd_is_active(irqfd))
+ return;
+
+ hlist_del(&irqfd->irqfd_hnode);
+
+ queue_work(irqfd_cleanup_wq, &irqfd->irqfd_shutdown);
+}
+
+/*
+ * Called with wqh->lock held and interrupts disabled
+ */
+static int mshv_irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode,
+ int sync, void *key)
+{
+ struct mshv_irqfd *irqfd = container_of(wait, struct mshv_irqfd,
+ irqfd_wait);
+ unsigned long flags = (unsigned long)key;
+ int idx;
+ unsigned int seq;
+ struct mshv_partition *pt = irqfd->irqfd_partn;
+ int ret = 0;
+
+ if (flags & POLLIN) {
+ u64 cnt;
+
+ eventfd_ctx_do_read(irqfd->irqfd_eventfd_ctx, &cnt);
+ idx = srcu_read_lock(&pt->pt_irq_srcu);
+ do {
+ seq = read_seqcount_begin(&irqfd->irqfd_irqe_sc);
+ } while (read_seqcount_retry(&irqfd->irqfd_irqe_sc, seq));
+
+ /* An event has been signaled, raise an interrupt */
+ ret = mshv_try_assert_irq_fast(irqfd);
+ if (ret)
+ mshv_assert_irq_slow(irqfd);
+
+ srcu_read_unlock(&pt->pt_irq_srcu, idx);
+
+ ret = 1;
+ }
+
+ if (flags & POLLHUP) {
+ /* The eventfd is closing, detach from the partition */
+ unsigned long flags;
+
+ spin_lock_irqsave(&pt->pt_irqfds_lock, flags);
+
+ /*
+ * We must check if someone deactivated the irqfd before
+ * we could acquire the pt_irqfds_lock since the item is
+ * deactivated from the mshv side before it is unhooked from
+ * the wait-queue. If it is already deactivated, we can
+ * simply return knowing the other side will cleanup for us.
+ * We cannot race against the irqfd going away since the
+ * other side is required to acquire wqh->lock, which we hold
+ */
+ if (mshv_irqfd_is_active(irqfd))
+ mshv_irqfd_deactivate(irqfd);
+
+ spin_unlock_irqrestore(&pt->pt_irqfds_lock, flags);
+ }
+
+ return ret;
+}
+
+/* Must be called under pt_irqfds_lock */
+static void mshv_irqfd_update(struct mshv_partition *pt,
+ struct mshv_irqfd *irqfd)
+{
+ write_seqcount_begin(&irqfd->irqfd_irqe_sc);
+ irqfd->irqfd_girq_ent = mshv_ret_girq_entry(pt,
+ irqfd->irqfd_irqnum);
+ mshv_copy_girq_info(&irqfd->irqfd_girq_ent, &irqfd->irqfd_lapic_irq);
+ write_seqcount_end(&irqfd->irqfd_irqe_sc);
+}
+
+void mshv_irqfd_routing_update(struct mshv_partition *pt)
+{
+ struct mshv_irqfd *irqfd;
+
+ spin_lock_irq(&pt->pt_irqfds_lock);
+ hlist_for_each_entry(irqfd, &pt->pt_irqfds_list, irqfd_hnode)
+ mshv_irqfd_update(pt, irqfd);
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+}
+
+static void mshv_irqfd_queue_proc(struct file *file, wait_queue_head_t *wqh,
+ poll_table *polltbl)
+{
+ struct mshv_irqfd *irqfd =
+ container_of(polltbl, struct mshv_irqfd, irqfd_polltbl);
+
+ irqfd->irqfd_wqh = wqh;
+ add_wait_queue_priority(wqh, &irqfd->irqfd_wait);
+}
+
+static int mshv_irqfd_assign(struct mshv_partition *pt,
+ struct mshv_user_irqfd *args)
+{
+ struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
+ struct mshv_irqfd *irqfd, *tmp;
+ unsigned int events;
+ struct fd f;
+ int ret;
+ int idx;
+
+ irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
+ if (!irqfd)
+ return -ENOMEM;
+
+ irqfd->irqfd_partn = pt;
+ irqfd->irqfd_irqnum = args->gsi;
+ INIT_WORK(&irqfd->irqfd_shutdown, mshv_irqfd_shutdown);
+ seqcount_spinlock_init(&irqfd->irqfd_irqe_sc, &pt->pt_irqfds_lock);
+
+ f = fdget(args->fd);
+ if (!fd_file(f)) {
+ ret = -EBADF;
+ goto out;
+ }
+
+ eventfd = eventfd_ctx_fileget(fd_file(f));
+ if (IS_ERR(eventfd)) {
+ ret = PTR_ERR(eventfd);
+ goto fail;
+ }
+
+ irqfd->irqfd_eventfd_ctx = eventfd;
+
+ if (args->flags & BIT(MSHV_IRQFD_BIT_RESAMPLE)) {
+ struct mshv_irqfd_resampler *rp;
+
+ resamplefd = eventfd_ctx_fdget(args->resamplefd);
+ if (IS_ERR(resamplefd)) {
+ ret = PTR_ERR(resamplefd);
+ goto fail;
+ }
+
+ irqfd->irqfd_resamplefd = resamplefd;
+
+ mutex_lock(&pt->irqfds_resampler_lock);
+
+ hlist_for_each_entry(rp, &pt->irqfds_resampler_list,
+ rsmplr_hnode) {
+ if (rp->rsmplr_notifier.irq_ack_gsi ==
+ irqfd->irqfd_irqnum) {
+ irqfd->irqfd_resampler = rp;
+ break;
+ }
+ }
+
+ if (!irqfd->irqfd_resampler) {
+ rp = kzalloc(sizeof(*rp), GFP_KERNEL_ACCOUNT);
+ if (!rp) {
+ ret = -ENOMEM;
+ mutex_unlock(&pt->irqfds_resampler_lock);
+ goto fail;
+ }
+
+ rp->rsmplr_partn = pt;
+ INIT_HLIST_HEAD(&rp->rsmplr_irqfd_list);
+ rp->rsmplr_notifier.irq_ack_gsi = irqfd->irqfd_irqnum;
+ rp->rsmplr_notifier.irq_acked =
+ mshv_irqfd_resampler_ack;
+
+ hlist_add_head(&rp->rsmplr_hnode,
+ &pt->irqfds_resampler_list);
+ mshv_register_irq_ack_notifier(pt,
+ &rp->rsmplr_notifier);
+ irqfd->irqfd_resampler = rp;
+ }
+
+ hlist_add_head_rcu(&irqfd->irqfd_resampler_hnode,
+ &irqfd->irqfd_resampler->rsmplr_irqfd_list);
+
+ mutex_unlock(&pt->irqfds_resampler_lock);
+ }
+
+ /*
+ * Install our own custom wake-up handling so we are notified via
+ * a callback whenever someone signals the underlying eventfd
+ */
+ init_waitqueue_func_entry(&irqfd->irqfd_wait, mshv_irqfd_wakeup);
+ init_poll_funcptr(&irqfd->irqfd_polltbl, mshv_irqfd_queue_proc);
+
+ spin_lock_irq(&pt->pt_irqfds_lock);
+ if (args->flags & BIT(MSHV_IRQFD_BIT_RESAMPLE) &&
+ !irqfd->irqfd_lapic_irq.lapic_control.level_triggered) {
+ /*
+ * Resample Fd must be for level triggered interrupt
+ * Otherwise return with failure
+ */
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+ ret = -EINVAL;
+ goto fail;
+ }
+ ret = 0;
+ hlist_for_each_entry(tmp, &pt->pt_irqfds_list, irqfd_hnode) {
+ if (irqfd->irqfd_eventfd_ctx != tmp->irqfd_eventfd_ctx)
+ continue;
+ /* This fd is used for another irq already. */
+ ret = -EBUSY;
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+ goto fail;
+ }
+
+ idx = srcu_read_lock(&pt->pt_irq_srcu);
+ mshv_irqfd_update(pt, irqfd);
+ hlist_add_head(&irqfd->irqfd_hnode, &pt->pt_irqfds_list);
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+
+ /*
+ * Check if there was an event already pending on the eventfd
+ * before we registered, and trigger it as if we didn't miss it.
+ */
+ events = vfs_poll(fd_file(f), &irqfd->irqfd_polltbl);
+
+ if (events & POLLIN)
+ mshv_assert_irq_slow(irqfd);
+
+ srcu_read_unlock(&pt->pt_irq_srcu, idx);
+ /*
+ * do not drop the file until the irqfd is fully initialized, otherwise
+ * we might race against the POLLHUP
+ */
+ fdput(f);
+
+ return 0;
+
+fail:
+ if (irqfd->irqfd_resampler)
+ mshv_irqfd_resampler_shutdown(irqfd);
+
+ if (resamplefd && !IS_ERR(resamplefd))
+ eventfd_ctx_put(resamplefd);
+
+ if (eventfd && !IS_ERR(eventfd))
+ eventfd_ctx_put(eventfd);
+
+ fdput(f);
+
+out:
+ kfree(irqfd);
+ return ret;
+}
+
+/*
+ * shutdown any irqfd's that match fd+gsi
+ */
+static int mshv_irqfd_deassign(struct mshv_partition *pt,
+ struct mshv_user_irqfd *args)
+{
+ struct mshv_irqfd *irqfd;
+ struct hlist_node *n;
+ struct eventfd_ctx *eventfd;
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ hlist_for_each_entry_safe(irqfd, n, &pt->pt_irqfds_list,
+ irqfd_hnode) {
+ if (irqfd->irqfd_eventfd_ctx == eventfd &&
+ irqfd->irqfd_irqnum == args->gsi)
+
+ mshv_irqfd_deactivate(irqfd);
+ }
+
+ eventfd_ctx_put(eventfd);
+
+ /*
+ * Block until we know all outstanding shutdown jobs have completed
+ * so that we guarantee there will not be any more interrupts on this
+ * gsi once this deassign function returns.
+ */
+ flush_workqueue(irqfd_cleanup_wq);
+
+ return 0;
+}
+
+int mshv_set_unset_irqfd(struct mshv_partition *pt,
+ struct mshv_user_irqfd *args)
+{
+ if (args->flags & ~MSHV_IRQFD_FLAGS_MASK)
+ return -EINVAL;
+
+ if (args->flags & BIT(MSHV_IRQFD_BIT_DEASSIGN))
+ return mshv_irqfd_deassign(pt, args);
+
+ return mshv_irqfd_assign(pt, args);
+}
+
+/*
+ * This function is called as the mshv VM fd is being released.
+ * Shutdown all irqfds that still remain open
+ */
+static void mshv_irqfd_release(struct mshv_partition *pt)
+{
+ struct mshv_irqfd *irqfd;
+ struct hlist_node *n;
+
+ spin_lock_irq(&pt->pt_irqfds_lock);
+
+ hlist_for_each_entry_safe(irqfd, n, &pt->pt_irqfds_list, irqfd_hnode)
+ mshv_irqfd_deactivate(irqfd);
+
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+
+ /*
+ * Block until we know all outstanding shutdown jobs have completed
+ * since we do not take a mshv_partition* reference.
+ */
+ flush_workqueue(irqfd_cleanup_wq);
+}
+
+int mshv_irqfd_wq_init(void)
+{
+ irqfd_cleanup_wq = alloc_workqueue("mshv-irqfd-cleanup", 0, 0);
+ if (!irqfd_cleanup_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mshv_irqfd_wq_cleanup(void)
+{
+ destroy_workqueue(irqfd_cleanup_wq);
+}
+
+/*
+ * --------------------------------------------------------------------
+ * ioeventfd: translate a MMIO memory write to an eventfd signal.
+ *
+ * userspace can register a MMIO address with an eventfd for receiving
+ * notification when the memory has been touched.
+ * --------------------------------------------------------------------
+ */
+
+static void ioeventfd_release(struct mshv_ioeventfd *p, u64 partition_id)
+{
+ if (p->iovntfd_doorbell_id > 0)
+ mshv_unregister_doorbell(partition_id, p->iovntfd_doorbell_id);
+ eventfd_ctx_put(p->iovntfd_eventfd);
+ kfree(p);
+}
+
+/* MMIO writes trigger an event if the addr/val match */
+static void ioeventfd_mmio_write(int doorbell_id, void *data)
+{
+ struct mshv_partition *partition = (struct mshv_partition *)data;
+ struct mshv_ioeventfd *p;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(p, &partition->ioeventfds_list, iovntfd_hnode)
+ if (p->iovntfd_doorbell_id == doorbell_id) {
+ eventfd_signal(p->iovntfd_eventfd);
+ break;
+ }
+
+ rcu_read_unlock();
+}
+
+static bool ioeventfd_check_collision(struct mshv_partition *pt,
+ struct mshv_ioeventfd *p)
+ __must_hold(&pt->mutex)
+{
+ struct mshv_ioeventfd *_p;
+
+ hlist_for_each_entry(_p, &pt->ioeventfds_list, iovntfd_hnode)
+ if (_p->iovntfd_addr == p->iovntfd_addr &&
+ _p->iovntfd_length == p->iovntfd_length &&
+ (_p->iovntfd_wildcard || p->iovntfd_wildcard ||
+ _p->iovntfd_datamatch == p->iovntfd_datamatch))
+ return true;
+
+ return false;
+}
+
+static int mshv_assign_ioeventfd(struct mshv_partition *pt,
+ struct mshv_user_ioeventfd *args)
+ __must_hold(&pt->mutex)
+{
+ struct mshv_ioeventfd *p;
+ struct eventfd_ctx *eventfd;
+ u64 doorbell_flags = 0;
+ int ret;
+
+ /* This mutex is currently protecting ioeventfd.items list */
+ WARN_ON_ONCE(!mutex_is_locked(&pt->pt_mutex));
+
+ if (args->flags & BIT(MSHV_IOEVENTFD_BIT_PIO))
+ return -EOPNOTSUPP;
+
+ /* must be natural-word sized */
+ switch (args->len) {
+ case 0:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_ANY;
+ break;
+ case 1:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_BYTE;
+ break;
+ case 2:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_WORD;
+ break;
+ case 4:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_DWORD;
+ break;
+ case 8:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_QWORD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* check for range overflow */
+ if (args->addr + args->len < args->addr)
+ return -EINVAL;
+
+ /* check for extra flags that we don't understand */
+ if (args->flags & ~MSHV_IOEVENTFD_FLAGS_MASK)
+ return -EINVAL;
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ p->iovntfd_addr = args->addr;
+ p->iovntfd_length = args->len;
+ p->iovntfd_eventfd = eventfd;
+
+ /* The datamatch feature is optional, otherwise this is a wildcard */
+ if (args->flags & BIT(MSHV_IOEVENTFD_BIT_DATAMATCH)) {
+ p->iovntfd_datamatch = args->datamatch;
+ } else {
+ p->iovntfd_wildcard = true;
+ doorbell_flags |= HV_DOORBELL_FLAG_TRIGGER_ANY_VALUE;
+ }
+
+ if (ioeventfd_check_collision(pt, p)) {
+ ret = -EEXIST;
+ goto unlock_fail;
+ }
+
+ ret = mshv_register_doorbell(pt->pt_id, ioeventfd_mmio_write,
+ (void *)pt, p->iovntfd_addr,
+ p->iovntfd_datamatch, doorbell_flags);
+ if (ret < 0)
+ goto unlock_fail;
+
+ p->iovntfd_doorbell_id = ret;
+
+ hlist_add_head_rcu(&p->iovntfd_hnode, &pt->ioeventfds_list);
+
+ return 0;
+
+unlock_fail:
+ kfree(p);
+
+fail:
+ eventfd_ctx_put(eventfd);
+
+ return ret;
+}
+
+static int mshv_deassign_ioeventfd(struct mshv_partition *pt,
+ struct mshv_user_ioeventfd *args)
+ __must_hold(&pt->mutex)
+{
+ struct mshv_ioeventfd *p;
+ struct eventfd_ctx *eventfd;
+ struct hlist_node *n;
+ int ret = -ENOENT;
+
+ /* This mutex is currently protecting ioeventfd.items list */
+ WARN_ON_ONCE(!mutex_is_locked(&pt->pt_mutex));
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ hlist_for_each_entry_safe(p, n, &pt->ioeventfds_list, iovntfd_hnode) {
+ bool wildcard = !(args->flags & BIT(MSHV_IOEVENTFD_BIT_DATAMATCH));
+
+ if (p->iovntfd_eventfd != eventfd ||
+ p->iovntfd_addr != args->addr ||
+ p->iovntfd_length != args->len ||
+ p->iovntfd_wildcard != wildcard)
+ continue;
+
+ if (!p->iovntfd_wildcard &&
+ p->iovntfd_datamatch != args->datamatch)
+ continue;
+
+ hlist_del_rcu(&p->iovntfd_hnode);
+ synchronize_rcu();
+ ioeventfd_release(p, pt->pt_id);
+ ret = 0;
+ break;
+ }
+
+ eventfd_ctx_put(eventfd);
+
+ return ret;
+}
+
+int mshv_set_unset_ioeventfd(struct mshv_partition *pt,
+ struct mshv_user_ioeventfd *args)
+ __must_hold(&pt->mutex)
+{
+ if ((args->flags & ~MSHV_IOEVENTFD_FLAGS_MASK) ||
+ mshv_field_nonzero(*args, rsvd))
+ return -EINVAL;
+
+ /* PIO not yet implemented */
+ if (args->flags & BIT(MSHV_IOEVENTFD_BIT_PIO))
+ return -EOPNOTSUPP;
+
+ if (args->flags & BIT(MSHV_IOEVENTFD_BIT_DEASSIGN))
+ return mshv_deassign_ioeventfd(pt, args);
+
+ return mshv_assign_ioeventfd(pt, args);
+}
+
+void mshv_eventfd_init(struct mshv_partition *pt)
+{
+ spin_lock_init(&pt->pt_irqfds_lock);
+ INIT_HLIST_HEAD(&pt->pt_irqfds_list);
+
+ INIT_HLIST_HEAD(&pt->irqfds_resampler_list);
+ mutex_init(&pt->irqfds_resampler_lock);
+
+ INIT_HLIST_HEAD(&pt->ioeventfds_list);
+}
+
+void mshv_eventfd_release(struct mshv_partition *pt)
+{
+ struct hlist_head items;
+ struct hlist_node *n;
+ struct mshv_ioeventfd *p;
+
+ hlist_move_list(&pt->ioeventfds_list, &items);
+ synchronize_rcu();
+
+ hlist_for_each_entry_safe(p, n, &items, iovntfd_hnode) {
+ hlist_del(&p->iovntfd_hnode);
+ ioeventfd_release(p, pt->pt_id);
+ }
+
+ mshv_irqfd_release(pt);
+}
diff --git a/drivers/hv/mshv_eventfd.h b/drivers/hv/mshv_eventfd.h
new file mode 100644
index 000000000000..332e7670a344
--- /dev/null
+++ b/drivers/hv/mshv_eventfd.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * irqfd: Allows an fd to be used to inject an interrupt to the guest.
+ * ioeventfd: Allow an fd to be used to receive a signal from the guest.
+ * All credit goes to kvm developers.
+ */
+
+#ifndef __LINUX_MSHV_EVENTFD_H
+#define __LINUX_MSHV_EVENTFD_H
+
+#include <linux/poll.h>
+
+#include "mshv.h"
+#include "mshv_root.h"
+
+/* struct to contain list of irqfds sharing an irq. Updates are protected by
+ * partition.irqfds.resampler_lock
+ */
+struct mshv_irqfd_resampler {
+ struct mshv_partition *rsmplr_partn;
+ struct hlist_head rsmplr_irqfd_list;
+ struct mshv_irq_ack_notifier rsmplr_notifier;
+ struct hlist_node rsmplr_hnode;
+};
+
+struct mshv_irqfd {
+ struct mshv_partition *irqfd_partn;
+ struct eventfd_ctx *irqfd_eventfd_ctx;
+ struct mshv_guest_irq_ent irqfd_girq_ent;
+ seqcount_spinlock_t irqfd_irqe_sc;
+ u32 irqfd_irqnum;
+ struct mshv_lapic_irq irqfd_lapic_irq;
+ struct hlist_node irqfd_hnode;
+ poll_table irqfd_polltbl;
+ wait_queue_head_t *irqfd_wqh;
+ wait_queue_entry_t irqfd_wait;
+ struct work_struct irqfd_shutdown;
+ struct mshv_irqfd_resampler *irqfd_resampler;
+ struct eventfd_ctx *irqfd_resamplefd;
+ struct hlist_node irqfd_resampler_hnode;
+};
+
+void mshv_eventfd_init(struct mshv_partition *partition);
+void mshv_eventfd_release(struct mshv_partition *partition);
+
+void mshv_register_irq_ack_notifier(struct mshv_partition *partition,
+ struct mshv_irq_ack_notifier *mian);
+void mshv_unregister_irq_ack_notifier(struct mshv_partition *partition,
+ struct mshv_irq_ack_notifier *mian);
+bool mshv_notify_acked_gsi(struct mshv_partition *partition, int gsi);
+
+int mshv_set_unset_irqfd(struct mshv_partition *partition,
+ struct mshv_user_irqfd *args);
+
+int mshv_irqfd_wq_init(void);
+void mshv_irqfd_wq_cleanup(void);
+
+struct mshv_ioeventfd {
+ struct hlist_node iovntfd_hnode;
+ u64 iovntfd_addr;
+ int iovntfd_length;
+ struct eventfd_ctx *iovntfd_eventfd;
+ u64 iovntfd_datamatch;
+ int iovntfd_doorbell_id;
+ bool iovntfd_wildcard;
+};
+
+int mshv_set_unset_ioeventfd(struct mshv_partition *pt,
+ struct mshv_user_ioeventfd *args);
+
+#endif /* __LINUX_MSHV_EVENTFD_H */
diff --git a/drivers/hv/mshv_irq.c b/drivers/hv/mshv_irq.c
new file mode 100644
index 000000000000..d0fb9ef734f4
--- /dev/null
+++ b/drivers/hv/mshv_irq.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/mshyperv.h>
+
+#include "mshv_eventfd.h"
+#include "mshv.h"
+#include "mshv_root.h"
+
+/* called from the ioctl code, user wants to update the guest irq table */
+int mshv_update_routing_table(struct mshv_partition *partition,
+ const struct mshv_user_irq_entry *ue,
+ unsigned int numents)
+{
+ struct mshv_girq_routing_table *new = NULL, *old;
+ u32 i, nr_rt_entries = 0;
+ int r = 0;
+
+ if (numents == 0)
+ goto swap_routes;
+
+ for (i = 0; i < numents; i++) {
+ if (ue[i].gsi >= MSHV_MAX_GUEST_IRQS)
+ return -EINVAL;
+
+ if (ue[i].address_hi)
+ return -EINVAL;
+
+ nr_rt_entries = max(nr_rt_entries, ue[i].gsi);
+ }
+ nr_rt_entries += 1;
+
+ new = kzalloc(struct_size(new, mshv_girq_info_tbl, nr_rt_entries),
+ GFP_KERNEL_ACCOUNT);
+ if (!new)
+ return -ENOMEM;
+
+ new->num_rt_entries = nr_rt_entries;
+ for (i = 0; i < numents; i++) {
+ struct mshv_guest_irq_ent *girq;
+
+ girq = &new->mshv_girq_info_tbl[ue[i].gsi];
+
+ /*
+ * Allow only one to one mapping between GSI and MSI routing.
+ */
+ if (girq->guest_irq_num != 0) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ girq->guest_irq_num = ue[i].gsi;
+ girq->girq_addr_lo = ue[i].address_lo;
+ girq->girq_addr_hi = ue[i].address_hi;
+ girq->girq_irq_data = ue[i].data;
+ girq->girq_entry_valid = true;
+ }
+
+swap_routes:
+ mutex_lock(&partition->pt_irq_lock);
+ old = rcu_dereference_protected(partition->pt_girq_tbl, 1);
+ rcu_assign_pointer(partition->pt_girq_tbl, new);
+ mshv_irqfd_routing_update(partition);
+ mutex_unlock(&partition->pt_irq_lock);
+
+ synchronize_srcu_expedited(&partition->pt_irq_srcu);
+ new = old;
+
+out:
+ kfree(new);
+
+ return r;
+}
+
+/* vm is going away, kfree the irq routing table */
+void mshv_free_routing_table(struct mshv_partition *partition)
+{
+ struct mshv_girq_routing_table *rt =
+ rcu_access_pointer(partition->pt_girq_tbl);
+
+ kfree(rt);
+}
+
+struct mshv_guest_irq_ent
+mshv_ret_girq_entry(struct mshv_partition *partition, u32 irqnum)
+{
+ struct mshv_guest_irq_ent entry = { 0 };
+ struct mshv_girq_routing_table *girq_tbl;
+
+ girq_tbl = srcu_dereference_check(partition->pt_girq_tbl,
+ &partition->pt_irq_srcu,
+ lockdep_is_held(&partition->pt_irq_lock));
+ if (!girq_tbl || irqnum >= girq_tbl->num_rt_entries) {
+ /*
+ * Premature register_irqfd, setting valid_entry = 0
+ * would ignore this entry anyway
+ */
+ entry.guest_irq_num = irqnum;
+ return entry;
+ }
+
+ return girq_tbl->mshv_girq_info_tbl[irqnum];
+}
+
+void mshv_copy_girq_info(struct mshv_guest_irq_ent *ent,
+ struct mshv_lapic_irq *lirq)
+{
+ memset(lirq, 0, sizeof(*lirq));
+ if (!ent || !ent->girq_entry_valid)
+ return;
+
+ lirq->lapic_vector = ent->girq_irq_data & 0xFF;
+ lirq->lapic_apic_id = (ent->girq_addr_lo >> 12) & 0xFF;
+ lirq->lapic_control.interrupt_type = (ent->girq_irq_data & 0x700) >> 8;
+ lirq->lapic_control.level_triggered = (ent->girq_irq_data >> 15) & 0x1;
+ lirq->lapic_control.logical_dest_mode = (ent->girq_addr_lo >> 2) & 0x1;
+}
diff --git a/drivers/hv/mshv_portid_table.c b/drivers/hv/mshv_portid_table.c
new file mode 100644
index 000000000000..c349af1f0aaa
--- /dev/null
+++ b/drivers/hv/mshv_portid_table.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <asm/mshyperv.h>
+
+#include "mshv.h"
+#include "mshv_root.h"
+
+/*
+ * Ports and connections are hypervisor struct used for inter-partition
+ * communication. Port represents the source and connection represents
+ * the destination. Partitions are responsible for managing the port and
+ * connection ids.
+ *
+ */
+
+#define PORTID_MIN 1
+#define PORTID_MAX INT_MAX
+
+static DEFINE_IDR(port_table_idr);
+
+void
+mshv_port_table_fini(void)
+{
+ struct port_table_info *port_info;
+ unsigned long i, tmp;
+
+ idr_lock(&port_table_idr);
+ if (!idr_is_empty(&port_table_idr)) {
+ idr_for_each_entry_ul(&port_table_idr, port_info, tmp, i) {
+ port_info = idr_remove(&port_table_idr, i);
+ kfree_rcu(port_info, portbl_rcu);
+ }
+ }
+ idr_unlock(&port_table_idr);
+}
+
+int
+mshv_portid_alloc(struct port_table_info *info)
+{
+ int ret = 0;
+
+ idr_lock(&port_table_idr);
+ ret = idr_alloc(&port_table_idr, info, PORTID_MIN,
+ PORTID_MAX, GFP_KERNEL);
+ idr_unlock(&port_table_idr);
+
+ return ret;
+}
+
+void
+mshv_portid_free(int port_id)
+{
+ struct port_table_info *info;
+
+ idr_lock(&port_table_idr);
+ info = idr_remove(&port_table_idr, port_id);
+ WARN_ON(!info);
+ idr_unlock(&port_table_idr);
+
+ synchronize_rcu();
+ kfree(info);
+}
+
+int
+mshv_portid_lookup(int port_id, struct port_table_info *info)
+{
+ struct port_table_info *_info;
+ int ret = -ENOENT;
+
+ rcu_read_lock();
+ _info = idr_find(&port_table_idr, port_id);
+ rcu_read_unlock();
+
+ if (_info) {
+ *info = *_info;
+ ret = 0;
+ }
+
+ return ret;
+}
diff --git a/drivers/hv/mshv_root.h b/drivers/hv/mshv_root.h
new file mode 100644
index 000000000000..e3931b0f1269
--- /dev/null
+++ b/drivers/hv/mshv_root.h
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ */
+
+#ifndef _MSHV_ROOT_H_
+#define _MSHV_ROOT_H_
+
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/semaphore.h>
+#include <linux/sched.h>
+#include <linux/srcu.h>
+#include <linux/wait.h>
+#include <linux/hashtable.h>
+#include <linux/dev_printk.h>
+#include <linux/build_bug.h>
+#include <uapi/linux/mshv.h>
+
+/*
+ * Hypervisor must be between these version numbers (inclusive)
+ * to guarantee compatibility
+ */
+#define MSHV_HV_MIN_VERSION (27744)
+#define MSHV_HV_MAX_VERSION (27751)
+
+static_assert(HV_HYP_PAGE_SIZE == MSHV_HV_PAGE_SIZE);
+
+#define MSHV_MAX_VPS 256
+
+#define MSHV_PARTITIONS_HASH_BITS 9
+
+#define MSHV_PIN_PAGES_BATCH_SIZE (0x10000000ULL / HV_HYP_PAGE_SIZE)
+
+struct mshv_vp {
+ u32 vp_index;
+ struct mshv_partition *vp_partition;
+ struct mutex vp_mutex;
+ struct hv_vp_register_page *vp_register_page;
+ struct hv_message *vp_intercept_msg_page;
+ void *vp_ghcb_page;
+ struct hv_stats_page *vp_stats_pages[2];
+ struct {
+ atomic64_t vp_signaled_count;
+ struct {
+ u64 intercept_suspend: 1;
+ u64 root_sched_blocked: 1; /* root scheduler only */
+ u64 root_sched_dispatched: 1; /* root scheduler only */
+ u64 reserved: 61;
+ } flags;
+ unsigned int kicked_by_hv;
+ wait_queue_head_t vp_suspend_queue;
+ } run;
+};
+
+#define vp_fmt(fmt) "p%lluvp%u: " fmt
+#define vp_devprintk(level, v, fmt, ...) \
+do { \
+ const struct mshv_vp *__vp = (v); \
+ const struct mshv_partition *__pt = __vp->vp_partition; \
+ dev_##level(__pt->pt_module_dev, vp_fmt(fmt), __pt->pt_id, \
+ __vp->vp_index, ##__VA_ARGS__); \
+} while (0)
+#define vp_emerg(v, fmt, ...) vp_devprintk(emerg, v, fmt, ##__VA_ARGS__)
+#define vp_crit(v, fmt, ...) vp_devprintk(crit, v, fmt, ##__VA_ARGS__)
+#define vp_alert(v, fmt, ...) vp_devprintk(alert, v, fmt, ##__VA_ARGS__)
+#define vp_err(v, fmt, ...) vp_devprintk(err, v, fmt, ##__VA_ARGS__)
+#define vp_warn(v, fmt, ...) vp_devprintk(warn, v, fmt, ##__VA_ARGS__)
+#define vp_notice(v, fmt, ...) vp_devprintk(notice, v, fmt, ##__VA_ARGS__)
+#define vp_info(v, fmt, ...) vp_devprintk(info, v, fmt, ##__VA_ARGS__)
+#define vp_dbg(v, fmt, ...) vp_devprintk(dbg, v, fmt, ##__VA_ARGS__)
+
+struct mshv_mem_region {
+ struct hlist_node hnode;
+ u64 nr_pages;
+ u64 start_gfn;
+ u64 start_uaddr;
+ u32 hv_map_flags;
+ struct {
+ u64 large_pages: 1; /* 2MiB */
+ u64 range_pinned: 1;
+ u64 reserved: 62;
+ } flags;
+ struct mshv_partition *partition;
+ struct page *pages[];
+};
+
+struct mshv_irq_ack_notifier {
+ struct hlist_node link;
+ unsigned int irq_ack_gsi;
+ void (*irq_acked)(struct mshv_irq_ack_notifier *mian);
+};
+
+struct mshv_partition {
+ struct device *pt_module_dev;
+
+ struct hlist_node pt_hnode;
+ u64 pt_id;
+ refcount_t pt_ref_count;
+ struct mutex pt_mutex;
+ struct hlist_head pt_mem_regions; // not ordered
+
+ u32 pt_vp_count;
+ struct mshv_vp *pt_vp_array[MSHV_MAX_VPS];
+
+ struct mutex pt_irq_lock;
+ struct srcu_struct pt_irq_srcu;
+ struct hlist_head irq_ack_notifier_list;
+
+ struct hlist_head pt_devices;
+
+ /*
+ * MSHV does not support more than one async hypercall in flight
+ * for a single partition. Thus, it is okay to define per partition
+ * async hypercall status.
+ */
+ struct completion async_hypercall;
+ u64 async_hypercall_status;
+
+ spinlock_t pt_irqfds_lock;
+ struct hlist_head pt_irqfds_list;
+ struct mutex irqfds_resampler_lock;
+ struct hlist_head irqfds_resampler_list;
+
+ struct hlist_head ioeventfds_list;
+
+ struct mshv_girq_routing_table __rcu *pt_girq_tbl;
+ u64 isolation_type;
+ bool import_completed;
+ bool pt_initialized;
+};
+
+#define pt_fmt(fmt) "p%llu: " fmt
+#define pt_devprintk(level, p, fmt, ...) \
+do { \
+ const struct mshv_partition *__pt = (p); \
+ dev_##level(__pt->pt_module_dev, pt_fmt(fmt), __pt->pt_id, \
+ ##__VA_ARGS__); \
+} while (0)
+#define pt_emerg(p, fmt, ...) pt_devprintk(emerg, p, fmt, ##__VA_ARGS__)
+#define pt_crit(p, fmt, ...) pt_devprintk(crit, p, fmt, ##__VA_ARGS__)
+#define pt_alert(p, fmt, ...) pt_devprintk(alert, p, fmt, ##__VA_ARGS__)
+#define pt_err(p, fmt, ...) pt_devprintk(err, p, fmt, ##__VA_ARGS__)
+#define pt_warn(p, fmt, ...) pt_devprintk(warn, p, fmt, ##__VA_ARGS__)
+#define pt_notice(p, fmt, ...) pt_devprintk(notice, p, fmt, ##__VA_ARGS__)
+#define pt_info(p, fmt, ...) pt_devprintk(info, p, fmt, ##__VA_ARGS__)
+#define pt_dbg(p, fmt, ...) pt_devprintk(dbg, p, fmt, ##__VA_ARGS__)
+
+struct mshv_lapic_irq {
+ u32 lapic_vector;
+ u64 lapic_apic_id;
+ union hv_interrupt_control lapic_control;
+};
+
+#define MSHV_MAX_GUEST_IRQS 4096
+
+/* representation of one guest irq entry, either msi or legacy */
+struct mshv_guest_irq_ent {
+ u32 girq_entry_valid; /* vfio looks at this */
+ u32 guest_irq_num; /* a unique number for each irq */
+ u32 girq_addr_lo; /* guest irq msi address info */
+ u32 girq_addr_hi;
+ u32 girq_irq_data; /* idt vector in some cases */
+};
+
+struct mshv_girq_routing_table {
+ u32 num_rt_entries;
+ struct mshv_guest_irq_ent mshv_girq_info_tbl[];
+};
+
+struct hv_synic_pages {
+ struct hv_message_page *synic_message_page;
+ struct hv_synic_event_flags_page *synic_event_flags_page;
+ struct hv_synic_event_ring_page *synic_event_ring_page;
+};
+
+struct mshv_root {
+ struct hv_synic_pages __percpu *synic_pages;
+ spinlock_t pt_ht_lock;
+ DECLARE_HASHTABLE(pt_htable, MSHV_PARTITIONS_HASH_BITS);
+};
+
+/*
+ * Callback for doorbell events.
+ * NOTE: This is called in interrupt context. Callback
+ * should defer slow and sleeping logic to later.
+ */
+typedef void (*doorbell_cb_t) (int doorbell_id, void *);
+
+/*
+ * port table information
+ */
+struct port_table_info {
+ struct rcu_head portbl_rcu;
+ enum hv_port_type hv_port_type;
+ union {
+ struct {
+ u64 reserved[2];
+ } hv_port_message;
+ struct {
+ u64 reserved[2];
+ } hv_port_event;
+ struct {
+ u64 reserved[2];
+ } hv_port_monitor;
+ struct {
+ doorbell_cb_t doorbell_cb;
+ void *data;
+ } hv_port_doorbell;
+ };
+};
+
+int mshv_update_routing_table(struct mshv_partition *partition,
+ const struct mshv_user_irq_entry *entries,
+ unsigned int numents);
+void mshv_free_routing_table(struct mshv_partition *partition);
+
+struct mshv_guest_irq_ent mshv_ret_girq_entry(struct mshv_partition *partition,
+ u32 irq_num);
+
+void mshv_copy_girq_info(struct mshv_guest_irq_ent *src_irq,
+ struct mshv_lapic_irq *dest_irq);
+
+void mshv_irqfd_routing_update(struct mshv_partition *partition);
+
+void mshv_port_table_fini(void);
+int mshv_portid_alloc(struct port_table_info *info);
+int mshv_portid_lookup(int port_id, struct port_table_info *info);
+void mshv_portid_free(int port_id);
+
+int mshv_register_doorbell(u64 partition_id, doorbell_cb_t doorbell_cb,
+ void *data, u64 gpa, u64 val, u64 flags);
+void mshv_unregister_doorbell(u64 partition_id, int doorbell_portid);
+
+void mshv_isr(void);
+int mshv_synic_init(unsigned int cpu);
+int mshv_synic_cleanup(unsigned int cpu);
+
+static inline bool mshv_partition_encrypted(struct mshv_partition *partition)
+{
+ return partition->isolation_type == HV_PARTITION_ISOLATION_TYPE_SNP;
+}
+
+struct mshv_partition *mshv_partition_get(struct mshv_partition *partition);
+void mshv_partition_put(struct mshv_partition *partition);
+struct mshv_partition *mshv_partition_find(u64 partition_id) __must_hold(RCU);
+
+/* hypercalls */
+
+int hv_call_withdraw_memory(u64 count, int node, u64 partition_id);
+int hv_call_create_partition(u64 flags,
+ struct hv_partition_creation_properties creation_properties,
+ union hv_partition_isolation_properties isolation_properties,
+ u64 *partition_id);
+int hv_call_initialize_partition(u64 partition_id);
+int hv_call_finalize_partition(u64 partition_id);
+int hv_call_delete_partition(u64 partition_id);
+int hv_call_map_mmio_pages(u64 partition_id, u64 gfn, u64 mmio_spa, u64 numpgs);
+int hv_call_map_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
+ u32 flags, struct page **pages);
+int hv_call_unmap_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
+ u32 flags);
+int hv_call_delete_vp(u64 partition_id, u32 vp_index);
+int hv_call_assert_virtual_interrupt(u64 partition_id, u32 vector,
+ u64 dest_addr,
+ union hv_interrupt_control control);
+int hv_call_clear_virtual_interrupt(u64 partition_id);
+int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,
+ union hv_gpa_page_access_state_flags state_flags,
+ int *written_total,
+ union hv_gpa_page_access_state *states);
+int hv_call_get_vp_state(u32 vp_index, u64 partition_id,
+ struct hv_vp_state_data state_data,
+ /* Choose between pages and ret_output */
+ u64 page_count, struct page **pages,
+ union hv_output_get_vp_state *ret_output);
+int hv_call_set_vp_state(u32 vp_index, u64 partition_id,
+ /* Choose between pages and bytes */
+ struct hv_vp_state_data state_data, u64 page_count,
+ struct page **pages, u32 num_bytes, u8 *bytes);
+int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl,
+ struct page **state_page);
+int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl);
+int hv_call_create_port(u64 port_partition_id, union hv_port_id port_id,
+ u64 connection_partition_id, struct hv_port_info *port_info,
+ u8 port_vtl, u8 min_connection_vtl, int node);
+int hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id);
+int hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id,
+ u64 connection_partition_id,
+ union hv_connection_id connection_id,
+ struct hv_connection_info *connection_info,
+ u8 connection_vtl, int node);
+int hv_call_disconnect_port(u64 connection_partition_id,
+ union hv_connection_id connection_id);
+int hv_call_notify_port_ring_empty(u32 sint_index);
+int hv_call_map_stat_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity,
+ void **addr);
+int hv_call_unmap_stat_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity);
+int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages,
+ u64 page_struct_count, u32 host_access,
+ u32 flags, u8 acquire);
+
+extern struct mshv_root mshv_root;
+extern enum hv_scheduler_type hv_scheduler_type;
+extern u8 * __percpu *hv_synic_eventring_tail;
+
+#endif /* _MSHV_ROOT_H_ */
diff --git a/drivers/hv/mshv_root_hv_call.c b/drivers/hv/mshv_root_hv_call.c
new file mode 100644
index 000000000000..a222a16107f6
--- /dev/null
+++ b/drivers/hv/mshv_root_hv_call.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ *
+ * Hypercall helper functions used by the mshv_root module.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/mshyperv.h>
+
+#include "mshv_root.h"
+
+/* Determined empirically */
+#define HV_INIT_PARTITION_DEPOSIT_PAGES 208
+#define HV_MAP_GPA_DEPOSIT_PAGES 256
+#define HV_UMAP_GPA_PAGES 512
+
+#define HV_PAGE_COUNT_2M_ALIGNED(pg_count) (!((pg_count) & (0x200 - 1)))
+
+#define HV_WITHDRAW_BATCH_SIZE (HV_HYP_PAGE_SIZE / sizeof(u64))
+#define HV_MAP_GPA_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_map_gpa_pages)) \
+ / sizeof(u64))
+#define HV_GET_VP_STATE_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_get_vp_state)) \
+ / sizeof(u64))
+#define HV_SET_VP_STATE_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_set_vp_state)) \
+ / sizeof(u64))
+#define HV_GET_GPA_ACCESS_STATES_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(union hv_gpa_page_access_state)) \
+ / sizeof(union hv_gpa_page_access_state))
+#define HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT \
+ ((HV_HYP_PAGE_SIZE - \
+ sizeof(struct hv_input_modify_sparse_spa_page_host_access)) / \
+ sizeof(u64))
+
+int hv_call_withdraw_memory(u64 count, int node, u64 partition_id)
+{
+ struct hv_input_withdraw_memory *input_page;
+ struct hv_output_withdraw_memory *output_page;
+ struct page *page;
+ u16 completed;
+ unsigned long remaining = count;
+ u64 status;
+ int i;
+ unsigned long flags;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ output_page = page_address(page);
+
+ while (remaining) {
+ local_irq_save(flags);
+
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input_page, 0, sizeof(*input_page));
+ input_page->partition_id = partition_id;
+ status = hv_do_rep_hypercall(HVCALL_WITHDRAW_MEMORY,
+ min(remaining, HV_WITHDRAW_BATCH_SIZE),
+ 0, input_page, output_page);
+
+ local_irq_restore(flags);
+
+ completed = hv_repcomp(status);
+
+ for (i = 0; i < completed; i++)
+ __free_page(pfn_to_page(output_page->gpa_page_list[i]));
+
+ if (!hv_result_success(status)) {
+ if (hv_result(status) == HV_STATUS_NO_RESOURCES)
+ status = HV_STATUS_SUCCESS;
+ break;
+ }
+
+ remaining -= completed;
+ }
+ free_page((unsigned long)output_page);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_create_partition(u64 flags,
+ struct hv_partition_creation_properties creation_properties,
+ union hv_partition_isolation_properties isolation_properties,
+ u64 *partition_id)
+{
+ struct hv_input_create_partition *input;
+ struct hv_output_create_partition *output;
+ u64 status;
+ int ret;
+ unsigned long irq_flags;
+
+ do {
+ local_irq_save(irq_flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, sizeof(*input));
+ input->flags = flags;
+ input->compatibility_version = HV_COMPATIBILITY_21_H2;
+
+ memcpy(&input->partition_creation_properties, &creation_properties,
+ sizeof(creation_properties));
+
+ memcpy(&input->isolation_properties, &isolation_properties,
+ sizeof(isolation_properties));
+
+ status = hv_do_hypercall(HVCALL_CREATE_PARTITION,
+ input, output);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (hv_result_success(status))
+ *partition_id = output->partition_id;
+ local_irq_restore(irq_flags);
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ local_irq_restore(irq_flags);
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ hv_current_partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_initialize_partition(u64 partition_id)
+{
+ struct hv_input_initialize_partition input;
+ u64 status;
+ int ret;
+
+ input.partition_id = partition_id;
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
+ HV_INIT_PARTITION_DEPOSIT_PAGES);
+ if (ret)
+ return ret;
+
+ do {
+ status = hv_do_fast_hypercall8(HVCALL_INITIALIZE_PARTITION,
+ *(u64 *)&input);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_finalize_partition(u64 partition_id)
+{
+ struct hv_input_finalize_partition input;
+ u64 status;
+
+ input.partition_id = partition_id;
+ status = hv_do_fast_hypercall8(HVCALL_FINALIZE_PARTITION,
+ *(u64 *)&input);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_delete_partition(u64 partition_id)
+{
+ struct hv_input_delete_partition input;
+ u64 status;
+
+ input.partition_id = partition_id;
+ status = hv_do_fast_hypercall8(HVCALL_DELETE_PARTITION, *(u64 *)&input);
+
+ return hv_result_to_errno(status);
+}
+
+/* Ask the hypervisor to map guest ram pages or the guest mmio space */
+static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
+ u32 flags, struct page **pages, u64 mmio_spa)
+{
+ struct hv_input_map_gpa_pages *input_page;
+ u64 status, *pfnlist;
+ unsigned long irq_flags, large_shift = 0;
+ int ret = 0, done = 0;
+ u64 page_count = page_struct_count;
+
+ if (page_count == 0 || (pages && mmio_spa))
+ return -EINVAL;
+
+ if (flags & HV_MAP_GPA_LARGE_PAGE) {
+ if (mmio_spa)
+ return -EINVAL;
+
+ if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
+ return -EINVAL;
+
+ large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
+ page_count >>= large_shift;
+ }
+
+ while (done < page_count) {
+ ulong i, completed, remain = page_count - done;
+ int rep_count = min(remain, HV_MAP_GPA_BATCH_SIZE);
+
+ local_irq_save(irq_flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input_page->target_partition_id = partition_id;
+ input_page->target_gpa_base = gfn + (done << large_shift);
+ input_page->map_flags = flags;
+ pfnlist = input_page->source_gpa_page_list;
+
+ for (i = 0; i < rep_count; i++)
+ if (flags & HV_MAP_GPA_NO_ACCESS) {
+ pfnlist[i] = 0;
+ } else if (pages) {
+ u64 index = (done + i) << large_shift;
+
+ if (index >= page_struct_count) {
+ ret = -EINVAL;
+ break;
+ }
+ pfnlist[i] = page_to_pfn(pages[index]);
+ } else {
+ pfnlist[i] = mmio_spa + done + i;
+ }
+ if (ret)
+ break;
+
+ status = hv_do_rep_hypercall(HVCALL_MAP_GPA_PAGES, rep_count, 0,
+ input_page, NULL);
+ local_irq_restore(irq_flags);
+
+ completed = hv_repcomp(status);
+
+ if (hv_result(status) == HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
+ HV_MAP_GPA_DEPOSIT_PAGES);
+ if (ret)
+ break;
+
+ } else if (!hv_result_success(status)) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+
+ done += completed;
+ }
+
+ if (ret && done) {
+ u32 unmap_flags = 0;
+
+ if (flags & HV_MAP_GPA_LARGE_PAGE)
+ unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
+ hv_call_unmap_gpa_pages(partition_id, gfn, done, unmap_flags);
+ }
+
+ return ret;
+}
+
+/* Ask the hypervisor to map guest ram pages */
+int hv_call_map_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
+ u32 flags, struct page **pages)
+{
+ return hv_do_map_gpa_hcall(partition_id, gpa_target, page_count,
+ flags, pages, 0);
+}
+
+/* Ask the hypervisor to map guest mmio space */
+int hv_call_map_mmio_pages(u64 partition_id, u64 gfn, u64 mmio_spa, u64 numpgs)
+{
+ int i;
+ u32 flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE |
+ HV_MAP_GPA_NOT_CACHED;
+
+ for (i = 0; i < numpgs; i++)
+ if (page_is_ram(mmio_spa + i))
+ return -EINVAL;
+
+ return hv_do_map_gpa_hcall(partition_id, gfn, numpgs, flags, NULL,
+ mmio_spa);
+}
+
+int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
+ u32 flags)
+{
+ struct hv_input_unmap_gpa_pages *input_page;
+ u64 status, page_count = page_count_4k;
+ unsigned long irq_flags, large_shift = 0;
+ int ret = 0, done = 0;
+
+ if (page_count == 0)
+ return -EINVAL;
+
+ if (flags & HV_UNMAP_GPA_LARGE_PAGE) {
+ if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
+ return -EINVAL;
+
+ large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
+ page_count >>= large_shift;
+ }
+
+ while (done < page_count) {
+ ulong completed, remain = page_count - done;
+ int rep_count = min(remain, HV_UMAP_GPA_PAGES);
+
+ local_irq_save(irq_flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input_page->target_partition_id = partition_id;
+ input_page->target_gpa_base = gfn + (done << large_shift);
+ input_page->unmap_flags = flags;
+ status = hv_do_rep_hypercall(HVCALL_UNMAP_GPA_PAGES, rep_count,
+ 0, input_page, NULL);
+ local_irq_restore(irq_flags);
+
+ completed = hv_repcomp(status);
+ if (!hv_result_success(status)) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+
+ done += completed;
+ }
+
+ return ret;
+}
+
+int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,
+ union hv_gpa_page_access_state_flags state_flags,
+ int *written_total,
+ union hv_gpa_page_access_state *states)
+{
+ struct hv_input_get_gpa_pages_access_state *input_page;
+ union hv_gpa_page_access_state *output_page;
+ int completed = 0;
+ unsigned long remaining = count;
+ int rep_count, i;
+ u64 status = 0;
+ unsigned long flags;
+
+ *written_total = 0;
+ while (remaining) {
+ local_irq_save(flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ input_page->partition_id = partition_id;
+ input_page->hv_gpa_page_number = gpa_base_pfn + *written_total;
+ input_page->flags = state_flags;
+ rep_count = min(remaining, HV_GET_GPA_ACCESS_STATES_BATCH_SIZE);
+
+ status = hv_do_rep_hypercall(HVCALL_GET_GPA_PAGES_ACCESS_STATES, rep_count,
+ 0, input_page, output_page);
+ if (!hv_result_success(status)) {
+ local_irq_restore(flags);
+ break;
+ }
+ completed = hv_repcomp(status);
+ for (i = 0; i < completed; ++i)
+ states[i].as_uint8 = output_page[i].as_uint8;
+
+ local_irq_restore(flags);
+ states += completed;
+ *written_total += completed;
+ remaining -= completed;
+ }
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_assert_virtual_interrupt(u64 partition_id, u32 vector,
+ u64 dest_addr,
+ union hv_interrupt_control control)
+{
+ struct hv_input_assert_virtual_interrupt *input;
+ unsigned long flags;
+ u64 status;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+ input->partition_id = partition_id;
+ input->vector = vector;
+ input->dest_addr = dest_addr;
+ input->control = control;
+ status = hv_do_hypercall(HVCALL_ASSERT_VIRTUAL_INTERRUPT, input, NULL);
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_delete_vp(u64 partition_id, u32 vp_index)
+{
+ union hv_input_delete_vp input = {};
+ u64 status;
+
+ input.partition_id = partition_id;
+ input.vp_index = vp_index;
+
+ status = hv_do_fast_hypercall16(HVCALL_DELETE_VP,
+ input.as_uint64[0], input.as_uint64[1]);
+
+ return hv_result_to_errno(status);
+}
+EXPORT_SYMBOL_GPL(hv_call_delete_vp);
+
+int hv_call_get_vp_state(u32 vp_index, u64 partition_id,
+ struct hv_vp_state_data state_data,
+ /* Choose between pages and ret_output */
+ u64 page_count, struct page **pages,
+ union hv_output_get_vp_state *ret_output)
+{
+ struct hv_input_get_vp_state *input;
+ union hv_output_get_vp_state *output;
+ u64 status;
+ int i;
+ u64 control;
+ unsigned long flags;
+ int ret = 0;
+
+ if (page_count > HV_GET_VP_STATE_BATCH_SIZE)
+ return -EINVAL;
+
+ if (!page_count && !ret_output)
+ return -EINVAL;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+ memset(input, 0, sizeof(*input));
+ memset(output, 0, sizeof(*output));
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->state_data = state_data;
+ for (i = 0; i < page_count; i++)
+ input->output_data_pfns[i] = page_to_pfn(pages[i]);
+
+ control = (HVCALL_GET_VP_STATE) |
+ (page_count << HV_HYPERCALL_VARHEAD_OFFSET);
+
+ status = hv_do_hypercall(control, input, output);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (hv_result_success(status) && ret_output)
+ memcpy(ret_output, output, sizeof(*output));
+
+ local_irq_restore(flags);
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ local_irq_restore(flags);
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_set_vp_state(u32 vp_index, u64 partition_id,
+ /* Choose between pages and bytes */
+ struct hv_vp_state_data state_data, u64 page_count,
+ struct page **pages, u32 num_bytes, u8 *bytes)
+{
+ struct hv_input_set_vp_state *input;
+ u64 status;
+ int i;
+ u64 control;
+ unsigned long flags;
+ int ret = 0;
+ u16 varhead_sz;
+
+ if (page_count > HV_SET_VP_STATE_BATCH_SIZE)
+ return -EINVAL;
+ if (sizeof(*input) + num_bytes > HV_HYP_PAGE_SIZE)
+ return -EINVAL;
+
+ if (num_bytes)
+ /* round up to 8 and divide by 8 */
+ varhead_sz = (num_bytes + 7) >> 3;
+ else if (page_count)
+ varhead_sz = page_count;
+ else
+ return -EINVAL;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->state_data = state_data;
+ if (num_bytes) {
+ memcpy((u8 *)input->data, bytes, num_bytes);
+ } else {
+ for (i = 0; i < page_count; i++)
+ input->data[i].pfns = page_to_pfn(pages[i]);
+ }
+
+ control = (HVCALL_SET_VP_STATE) |
+ (varhead_sz << HV_HYPERCALL_VARHEAD_OFFSET);
+
+ status = hv_do_hypercall(control, input, NULL);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ local_irq_restore(flags);
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ local_irq_restore(flags);
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl,
+ struct page **state_page)
+{
+ struct hv_input_map_vp_state_page *input;
+ struct hv_output_map_vp_state_page *output;
+ u64 status;
+ int ret;
+ unsigned long flags;
+
+ do {
+ local_irq_save(flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->type = type;
+ input->input_vtl = input_vtl;
+
+ status = hv_do_hypercall(HVCALL_MAP_VP_STATE_PAGE, input, output);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (hv_result_success(status))
+ *state_page = pfn_to_page(output->map_location);
+ local_irq_restore(flags);
+ ret = hv_result_to_errno(status);
+ break;
+ }
+
+ local_irq_restore(flags);
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl)
+{
+ unsigned long flags;
+ u64 status;
+ struct hv_input_unmap_vp_state_page *input;
+
+ local_irq_save(flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input, 0, sizeof(*input));
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->type = type;
+ input->input_vtl = input_vtl;
+
+ status = hv_do_hypercall(HVCALL_UNMAP_VP_STATE_PAGE, input, NULL);
+
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+
+int
+hv_call_clear_virtual_interrupt(u64 partition_id)
+{
+ int status;
+
+ status = hv_do_fast_hypercall8(HVCALL_CLEAR_VIRTUAL_INTERRUPT,
+ partition_id);
+
+ return hv_result_to_errno(status);
+}
+
+int
+hv_call_create_port(u64 port_partition_id, union hv_port_id port_id,
+ u64 connection_partition_id,
+ struct hv_port_info *port_info,
+ u8 port_vtl, u8 min_connection_vtl, int node)
+{
+ struct hv_input_create_port *input;
+ unsigned long flags;
+ int ret = 0;
+ int status;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+
+ input->port_partition_id = port_partition_id;
+ input->port_id = port_id;
+ input->connection_partition_id = connection_partition_id;
+ input->port_info = *port_info;
+ input->port_vtl = port_vtl;
+ input->min_connection_vtl = min_connection_vtl;
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
+ status = hv_do_hypercall(HVCALL_CREATE_PORT, input, NULL);
+ local_irq_restore(flags);
+ if (hv_result_success(status))
+ break;
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, port_partition_id, 1);
+
+ } while (!ret);
+
+ return ret;
+}
+
+int
+hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id)
+{
+ union hv_input_delete_port input = { 0 };
+ int status;
+
+ input.port_partition_id = port_partition_id;
+ input.port_id = port_id;
+ status = hv_do_fast_hypercall16(HVCALL_DELETE_PORT,
+ input.as_uint64[0],
+ input.as_uint64[1]);
+
+ return hv_result_to_errno(status);
+}
+
+int
+hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id,
+ u64 connection_partition_id,
+ union hv_connection_id connection_id,
+ struct hv_connection_info *connection_info,
+ u8 connection_vtl, int node)
+{
+ struct hv_input_connect_port *input;
+ unsigned long flags;
+ int ret = 0, status;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+ input->port_partition_id = port_partition_id;
+ input->port_id = port_id;
+ input->connection_partition_id = connection_partition_id;
+ input->connection_id = connection_id;
+ input->connection_info = *connection_info;
+ input->connection_vtl = connection_vtl;
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
+ status = hv_do_hypercall(HVCALL_CONNECT_PORT, input, NULL);
+
+ local_irq_restore(flags);
+ if (hv_result_success(status))
+ break;
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ connection_partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int
+hv_call_disconnect_port(u64 connection_partition_id,
+ union hv_connection_id connection_id)
+{
+ union hv_input_disconnect_port input = { 0 };
+ int status;
+
+ input.connection_partition_id = connection_partition_id;
+ input.connection_id = connection_id;
+ input.is_doorbell = 1;
+ status = hv_do_fast_hypercall16(HVCALL_DISCONNECT_PORT,
+ input.as_uint64[0],
+ input.as_uint64[1]);
+
+ return hv_result_to_errno(status);
+}
+
+int
+hv_call_notify_port_ring_empty(u32 sint_index)
+{
+ union hv_input_notify_port_ring_empty input = { 0 };
+ int status;
+
+ input.sint_index = sint_index;
+ status = hv_do_fast_hypercall8(HVCALL_NOTIFY_PORT_RING_EMPTY,
+ input.as_uint64);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_map_stat_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity,
+ void **addr)
+{
+ unsigned long flags;
+ struct hv_input_map_stats_page *input;
+ struct hv_output_map_stats_page *output;
+ u64 status, pfn;
+ int ret = 0;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, sizeof(*input));
+ input->type = type;
+ input->identity = *identity;
+
+ status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE, input, output);
+ pfn = output->map_location;
+
+ local_irq_restore(flags);
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_result_to_errno(status);
+ if (hv_result_success(status))
+ break;
+ return ret;
+ }
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ hv_current_partition_id, 1);
+ if (ret)
+ return ret;
+ } while (!ret);
+
+ *addr = page_address(pfn_to_page(pfn));
+
+ return ret;
+}
+
+int hv_call_unmap_stat_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity)
+{
+ unsigned long flags;
+ struct hv_input_unmap_stats_page *input;
+ u64 status;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input, 0, sizeof(*input));
+ input->type = type;
+ input->identity = *identity;
+
+ status = hv_do_hypercall(HVCALL_UNMAP_STATS_PAGE, input, NULL);
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages,
+ u64 page_struct_count, u32 host_access,
+ u32 flags, u8 acquire)
+{
+ struct hv_input_modify_sparse_spa_page_host_access *input_page;
+ u64 status;
+ int done = 0;
+ unsigned long irq_flags, large_shift = 0;
+ u64 page_count = page_struct_count;
+ u16 code = acquire ? HVCALL_ACQUIRE_SPARSE_SPA_PAGE_HOST_ACCESS :
+ HVCALL_RELEASE_SPARSE_SPA_PAGE_HOST_ACCESS;
+
+ if (page_count == 0)
+ return -EINVAL;
+
+ if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE) {
+ if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
+ return -EINVAL;
+ large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
+ page_count >>= large_shift;
+ }
+
+ while (done < page_count) {
+ ulong i, completed, remain = page_count - done;
+ int rep_count = min(remain,
+ HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT);
+
+ local_irq_save(irq_flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input_page, 0, sizeof(*input_page));
+ /* Only set the partition id if you are making the pages
+ * exclusive
+ */
+ if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE)
+ input_page->partition_id = partition_id;
+ input_page->flags = flags;
+ input_page->host_access = host_access;
+
+ for (i = 0; i < rep_count; i++) {
+ u64 index = (done + i) << large_shift;
+
+ if (index >= page_struct_count)
+ return -EINVAL;
+
+ input_page->spa_page_list[i] =
+ page_to_pfn(pages[index]);
+ }
+
+ status = hv_do_rep_hypercall(code, rep_count, 0, input_page,
+ NULL);
+ local_irq_restore(irq_flags);
+
+ completed = hv_repcomp(status);
+
+ if (!hv_result_success(status))
+ return hv_result_to_errno(status);
+
+ done += completed;
+ }
+
+ return 0;
+}
diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
new file mode 100644
index 000000000000..72df774e410a
--- /dev/null
+++ b/drivers/hv/mshv_root_main.c
@@ -0,0 +1,2307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Microsoft Corporation.
+ *
+ * The main part of the mshv_root module, providing APIs to create
+ * and manage guest partitions.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/cpuhotplug.h>
+#include <linux/random.h>
+#include <asm/mshyperv.h>
+#include <linux/hyperv.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/kexec.h>
+#include <linux/page-flags.h>
+#include <linux/crash_dump.h>
+#include <linux/panic_notifier.h>
+#include <linux/vmalloc.h>
+
+#include "mshv_eventfd.h"
+#include "mshv.h"
+#include "mshv_root.h"
+
+MODULE_AUTHOR("Microsoft");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microsoft Hyper-V root partition VMM interface /dev/mshv");
+
+/* TODO move this to mshyperv.h when needed outside driver */
+static inline bool hv_parent_partition(void)
+{
+ return hv_root_partition();
+}
+
+/* TODO move this to another file when debugfs code is added */
+enum hv_stats_vp_counters { /* HV_THREAD_COUNTER */
+#if defined(CONFIG_X86)
+ VpRootDispatchThreadBlocked = 201,
+#elif defined(CONFIG_ARM64)
+ VpRootDispatchThreadBlocked = 94,
+#endif
+ VpStatsMaxCounter
+};
+
+struct hv_stats_page {
+ union {
+ u64 vp_cntrs[VpStatsMaxCounter]; /* VP counters */
+ u8 data[HV_HYP_PAGE_SIZE];
+ };
+} __packed;
+
+struct mshv_root mshv_root;
+
+enum hv_scheduler_type hv_scheduler_type;
+
+/* Once we implement the fast extended hypercall ABI they can go away. */
+static void * __percpu *root_scheduler_input;
+static void * __percpu *root_scheduler_output;
+
+static long mshv_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
+static int mshv_dev_open(struct inode *inode, struct file *filp);
+static int mshv_dev_release(struct inode *inode, struct file *filp);
+static int mshv_vp_release(struct inode *inode, struct file *filp);
+static long mshv_vp_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
+static int mshv_partition_release(struct inode *inode, struct file *filp);
+static long mshv_partition_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
+static int mshv_vp_mmap(struct file *file, struct vm_area_struct *vma);
+static vm_fault_t mshv_vp_fault(struct vm_fault *vmf);
+static int mshv_init_async_handler(struct mshv_partition *partition);
+static void mshv_async_hvcall_handler(void *data, u64 *status);
+
+static const union hv_input_vtl input_vtl_zero;
+static const union hv_input_vtl input_vtl_normal = {
+ .target_vtl = HV_NORMAL_VTL,
+ .use_target_vtl = 1,
+};
+
+static const struct vm_operations_struct mshv_vp_vm_ops = {
+ .fault = mshv_vp_fault,
+};
+
+static const struct file_operations mshv_vp_fops = {
+ .owner = THIS_MODULE,
+ .release = mshv_vp_release,
+ .unlocked_ioctl = mshv_vp_ioctl,
+ .llseek = noop_llseek,
+ .mmap = mshv_vp_mmap,
+};
+
+static const struct file_operations mshv_partition_fops = {
+ .owner = THIS_MODULE,
+ .release = mshv_partition_release,
+ .unlocked_ioctl = mshv_partition_ioctl,
+ .llseek = noop_llseek,
+};
+
+static const struct file_operations mshv_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = mshv_dev_open,
+ .release = mshv_dev_release,
+ .unlocked_ioctl = mshv_dev_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice mshv_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mshv",
+ .fops = &mshv_dev_fops,
+ .mode = 0600,
+};
+
+/*
+ * Only allow hypercalls that have a u64 partition id as the first member of
+ * the input structure.
+ * These are sorted by value.
+ */
+static u16 mshv_passthru_hvcalls[] = {
+ HVCALL_GET_PARTITION_PROPERTY,
+ HVCALL_SET_PARTITION_PROPERTY,
+ HVCALL_INSTALL_INTERCEPT,
+ HVCALL_GET_VP_REGISTERS,
+ HVCALL_SET_VP_REGISTERS,
+ HVCALL_TRANSLATE_VIRTUAL_ADDRESS,
+ HVCALL_CLEAR_VIRTUAL_INTERRUPT,
+ HVCALL_REGISTER_INTERCEPT_RESULT,
+ HVCALL_ASSERT_VIRTUAL_INTERRUPT,
+ HVCALL_GET_GPA_PAGES_ACCESS_STATES,
+ HVCALL_SIGNAL_EVENT_DIRECT,
+ HVCALL_POST_MESSAGE_DIRECT,
+ HVCALL_GET_VP_CPUID_VALUES,
+};
+
+static bool mshv_hvcall_is_async(u16 code)
+{
+ switch (code) {
+ case HVCALL_SET_PARTITION_PROPERTY:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static int mshv_ioctl_passthru_hvcall(struct mshv_partition *partition,
+ bool partition_locked,
+ void __user *user_args)
+{
+ u64 status;
+ int ret = 0, i;
+ bool is_async;
+ struct mshv_root_hvcall args;
+ struct page *page;
+ unsigned int pages_order;
+ void *input_pg = NULL;
+ void *output_pg = NULL;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ if (args.status || !args.in_ptr || args.in_sz < sizeof(u64) ||
+ mshv_field_nonzero(args, rsvd) || args.in_sz > HV_HYP_PAGE_SIZE)
+ return -EINVAL;
+
+ if (args.out_ptr && (!args.out_sz || args.out_sz > HV_HYP_PAGE_SIZE))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(mshv_passthru_hvcalls); ++i)
+ if (args.code == mshv_passthru_hvcalls[i])
+ break;
+
+ if (i >= ARRAY_SIZE(mshv_passthru_hvcalls))
+ return -EINVAL;
+
+ is_async = mshv_hvcall_is_async(args.code);
+ if (is_async) {
+ /* async hypercalls can only be called from partition fd */
+ if (!partition_locked)
+ return -EINVAL;
+ ret = mshv_init_async_handler(partition);
+ if (ret)
+ return ret;
+ }
+
+ pages_order = args.out_ptr ? 1 : 0;
+ page = alloc_pages(GFP_KERNEL, pages_order);
+ if (!page)
+ return -ENOMEM;
+ input_pg = page_address(page);
+
+ if (args.out_ptr)
+ output_pg = (char *)input_pg + PAGE_SIZE;
+ else
+ output_pg = NULL;
+
+ if (copy_from_user(input_pg, (void __user *)args.in_ptr,
+ args.in_sz)) {
+ ret = -EFAULT;
+ goto free_pages_out;
+ }
+
+ /*
+ * NOTE: This only works because all the allowed hypercalls' input
+ * structs begin with a u64 partition_id field.
+ */
+ *(u64 *)input_pg = partition->pt_id;
+
+ if (args.reps)
+ status = hv_do_rep_hypercall(args.code, args.reps, 0,
+ input_pg, output_pg);
+ else
+ status = hv_do_hypercall(args.code, input_pg, output_pg);
+
+ if (hv_result(status) == HV_STATUS_CALL_PENDING) {
+ if (is_async) {
+ mshv_async_hvcall_handler(partition, &status);
+ } else { /* Paranoia check. This shouldn't happen! */
+ ret = -EBADFD;
+ goto free_pages_out;
+ }
+ }
+
+ if (hv_result(status) == HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition->pt_id, 1);
+ if (!ret)
+ ret = -EAGAIN;
+ } else if (!hv_result_success(status)) {
+ ret = hv_result_to_errno(status);
+ }
+
+ /*
+ * Always return the status and output data regardless of result.
+ * The VMM may need it to determine how to proceed. E.g. the status may
+ * contain the number of reps completed if a rep hypercall partially
+ * succeeded.
+ */
+ args.status = hv_result(status);
+ args.reps = args.reps ? hv_repcomp(status) : 0;
+ if (copy_to_user(user_args, &args, sizeof(args)))
+ ret = -EFAULT;
+
+ if (output_pg &&
+ copy_to_user((void __user *)args.out_ptr, output_pg, args.out_sz))
+ ret = -EFAULT;
+
+free_pages_out:
+ free_pages((unsigned long)input_pg, pages_order);
+
+ return ret;
+}
+
+static inline bool is_ghcb_mapping_available(void)
+{
+#if IS_ENABLED(CONFIG_X86_64)
+ return ms_hyperv.ext_features & HV_VP_GHCB_ROOT_MAPPING_AVAILABLE;
+#else
+ return 0;
+#endif
+}
+
+static int mshv_get_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ struct hv_register_assoc *registers)
+{
+ return hv_call_get_vp_registers(vp_index, partition_id,
+ count, input_vtl_zero, registers);
+}
+
+static int mshv_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ struct hv_register_assoc *registers)
+{
+ return hv_call_set_vp_registers(vp_index, partition_id,
+ count, input_vtl_zero, registers);
+}
+
+/*
+ * Explicit guest vCPU suspend is asynchronous by nature (as it is requested by
+ * dom0 vCPU for guest vCPU) and thus it can race with "intercept" suspend,
+ * done by the hypervisor.
+ * "Intercept" suspend leads to asynchronous message delivery to dom0 which
+ * should be awaited to keep the VP loop consistent (i.e. no message pending
+ * upon VP resume).
+ * VP intercept suspend can't be done when the VP is explicitly suspended
+ * already, and thus can be only two possible race scenarios:
+ * 1. implicit suspend bit set -> explicit suspend bit set -> message sent
+ * 2. implicit suspend bit set -> message sent -> explicit suspend bit set
+ * Checking for implicit suspend bit set after explicit suspend request has
+ * succeeded in either case allows us to reliably identify, if there is a
+ * message to receive and deliver to VMM.
+ */
+static int
+mshv_suspend_vp(const struct mshv_vp *vp, bool *message_in_flight)
+{
+ struct hv_register_assoc explicit_suspend = {
+ .name = HV_REGISTER_EXPLICIT_SUSPEND
+ };
+ struct hv_register_assoc intercept_suspend = {
+ .name = HV_REGISTER_INTERCEPT_SUSPEND
+ };
+ union hv_explicit_suspend_register *es =
+ &explicit_suspend.value.explicit_suspend;
+ union hv_intercept_suspend_register *is =
+ &intercept_suspend.value.intercept_suspend;
+ int ret;
+
+ es->suspended = 1;
+
+ ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &explicit_suspend);
+ if (ret) {
+ vp_err(vp, "Failed to explicitly suspend vCPU\n");
+ return ret;
+ }
+
+ ret = mshv_get_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &intercept_suspend);
+ if (ret) {
+ vp_err(vp, "Failed to get intercept suspend state\n");
+ return ret;
+ }
+
+ *message_in_flight = is->suspended;
+
+ return 0;
+}
+
+/*
+ * This function is used when VPs are scheduled by the hypervisor's
+ * scheduler.
+ *
+ * Caller has to make sure the registers contain cleared
+ * HV_REGISTER_INTERCEPT_SUSPEND and HV_REGISTER_EXPLICIT_SUSPEND registers
+ * exactly in this order (the hypervisor clears them sequentially) to avoid
+ * potential invalid clearing a newly arrived HV_REGISTER_INTERCEPT_SUSPEND
+ * after VP is released from HV_REGISTER_EXPLICIT_SUSPEND in case of the
+ * opposite order.
+ */
+static long mshv_run_vp_with_hyp_scheduler(struct mshv_vp *vp)
+{
+ long ret;
+ struct hv_register_assoc suspend_regs[2] = {
+ { .name = HV_REGISTER_INTERCEPT_SUSPEND },
+ { .name = HV_REGISTER_EXPLICIT_SUSPEND }
+ };
+ size_t count = ARRAY_SIZE(suspend_regs);
+
+ /* Resume VP execution */
+ ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ count, suspend_regs);
+ if (ret) {
+ vp_err(vp, "Failed to resume vp execution. %lx\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible(vp->run.vp_suspend_queue,
+ vp->run.kicked_by_hv == 1);
+ if (ret) {
+ bool message_in_flight;
+
+ /*
+ * Otherwise the waiting was interrupted by a signal: suspend
+ * the vCPU explicitly and copy message in flight (if any).
+ */
+ ret = mshv_suspend_vp(vp, &message_in_flight);
+ if (ret)
+ return ret;
+
+ /* Return if no message in flight */
+ if (!message_in_flight)
+ return -EINTR;
+
+ /* Wait for the message in flight. */
+ wait_event(vp->run.vp_suspend_queue, vp->run.kicked_by_hv == 1);
+ }
+
+ /*
+ * Reset the flag to make the wait_event call above work
+ * next time.
+ */
+ vp->run.kicked_by_hv = 0;
+
+ return 0;
+}
+
+static int
+mshv_vp_dispatch(struct mshv_vp *vp, u32 flags,
+ struct hv_output_dispatch_vp *res)
+{
+ struct hv_input_dispatch_vp *input;
+ struct hv_output_dispatch_vp *output;
+ u64 status;
+
+ preempt_disable();
+ input = *this_cpu_ptr(root_scheduler_input);
+ output = *this_cpu_ptr(root_scheduler_output);
+
+ memset(input, 0, sizeof(*input));
+ memset(output, 0, sizeof(*output));
+
+ input->partition_id = vp->vp_partition->pt_id;
+ input->vp_index = vp->vp_index;
+ input->time_slice = 0; /* Run forever until something happens */
+ input->spec_ctrl = 0; /* TODO: set sensible flags */
+ input->flags = flags;
+
+ vp->run.flags.root_sched_dispatched = 1;
+ status = hv_do_hypercall(HVCALL_DISPATCH_VP, input, output);
+ vp->run.flags.root_sched_dispatched = 0;
+
+ *res = *output;
+ preempt_enable();
+
+ if (!hv_result_success(status))
+ vp_err(vp, "%s: status %s\n", __func__,
+ hv_result_to_string(status));
+
+ return hv_result_to_errno(status);
+}
+
+static int
+mshv_vp_clear_explicit_suspend(struct mshv_vp *vp)
+{
+ struct hv_register_assoc explicit_suspend = {
+ .name = HV_REGISTER_EXPLICIT_SUSPEND,
+ .value.explicit_suspend.suspended = 0,
+ };
+ int ret;
+
+ ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &explicit_suspend);
+
+ if (ret)
+ vp_err(vp, "Failed to unsuspend\n");
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_X86_64)
+static u64 mshv_vp_interrupt_pending(struct mshv_vp *vp)
+{
+ if (!vp->vp_register_page)
+ return 0;
+ return vp->vp_register_page->interrupt_vectors.as_uint64;
+}
+#else
+static u64 mshv_vp_interrupt_pending(struct mshv_vp *vp)
+{
+ return 0;
+}
+#endif
+
+static bool mshv_vp_dispatch_thread_blocked(struct mshv_vp *vp)
+{
+ struct hv_stats_page **stats = vp->vp_stats_pages;
+ u64 *self_vp_cntrs = stats[HV_STATS_AREA_SELF]->vp_cntrs;
+ u64 *parent_vp_cntrs = stats[HV_STATS_AREA_PARENT]->vp_cntrs;
+
+ if (self_vp_cntrs[VpRootDispatchThreadBlocked])
+ return self_vp_cntrs[VpRootDispatchThreadBlocked];
+ return parent_vp_cntrs[VpRootDispatchThreadBlocked];
+}
+
+static int
+mshv_vp_wait_for_hv_kick(struct mshv_vp *vp)
+{
+ int ret;
+
+ ret = wait_event_interruptible(vp->run.vp_suspend_queue,
+ (vp->run.kicked_by_hv == 1 &&
+ !mshv_vp_dispatch_thread_blocked(vp)) ||
+ mshv_vp_interrupt_pending(vp));
+ if (ret)
+ return -EINTR;
+
+ vp->run.flags.root_sched_blocked = 0;
+ vp->run.kicked_by_hv = 0;
+
+ return 0;
+}
+
+static int mshv_pre_guest_mode_work(struct mshv_vp *vp)
+{
+ const ulong work_flags = _TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING |
+ _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME;
+ ulong th_flags;
+
+ th_flags = read_thread_flags();
+ while (th_flags & work_flags) {
+ int ret;
+
+ /* nb: following will call schedule */
+ ret = mshv_do_pre_guest_mode_work(th_flags);
+
+ if (ret)
+ return ret;
+
+ th_flags = read_thread_flags();
+ }
+
+ return 0;
+}
+
+/* Must be called with interrupts enabled */
+static long mshv_run_vp_with_root_scheduler(struct mshv_vp *vp)
+{
+ long ret;
+
+ if (vp->run.flags.root_sched_blocked) {
+ /*
+ * Dispatch state of this VP is blocked. Need to wait
+ * for the hypervisor to clear the blocked state before
+ * dispatching it.
+ */
+ ret = mshv_vp_wait_for_hv_kick(vp);
+ if (ret)
+ return ret;
+ }
+
+ do {
+ u32 flags = 0;
+ struct hv_output_dispatch_vp output;
+
+ ret = mshv_pre_guest_mode_work(vp);
+ if (ret)
+ break;
+
+ if (vp->run.flags.intercept_suspend)
+ flags |= HV_DISPATCH_VP_FLAG_CLEAR_INTERCEPT_SUSPEND;
+
+ if (mshv_vp_interrupt_pending(vp))
+ flags |= HV_DISPATCH_VP_FLAG_SCAN_INTERRUPT_INJECTION;
+
+ ret = mshv_vp_dispatch(vp, flags, &output);
+ if (ret)
+ break;
+
+ vp->run.flags.intercept_suspend = 0;
+
+ if (output.dispatch_state == HV_VP_DISPATCH_STATE_BLOCKED) {
+ if (output.dispatch_event ==
+ HV_VP_DISPATCH_EVENT_SUSPEND) {
+ /*
+ * TODO: remove the warning once VP canceling
+ * is supported
+ */
+ WARN_ONCE(atomic64_read(&vp->run.vp_signaled_count),
+ "%s: vp#%d: unexpected explicit suspend\n",
+ __func__, vp->vp_index);
+ /*
+ * Need to clear explicit suspend before
+ * dispatching.
+ * Explicit suspend is either:
+ * - set right after the first VP dispatch or
+ * - set explicitly via hypercall
+ * Since the latter case is not yet supported,
+ * simply clear it here.
+ */
+ ret = mshv_vp_clear_explicit_suspend(vp);
+ if (ret)
+ break;
+
+ ret = mshv_vp_wait_for_hv_kick(vp);
+ if (ret)
+ break;
+ } else {
+ vp->run.flags.root_sched_blocked = 1;
+ ret = mshv_vp_wait_for_hv_kick(vp);
+ if (ret)
+ break;
+ }
+ } else {
+ /* HV_VP_DISPATCH_STATE_READY */
+ if (output.dispatch_event ==
+ HV_VP_DISPATCH_EVENT_INTERCEPT)
+ vp->run.flags.intercept_suspend = 1;
+ }
+ } while (!vp->run.flags.intercept_suspend);
+
+ return ret;
+}
+
+static_assert(sizeof(struct hv_message) <= MSHV_RUN_VP_BUF_SZ,
+ "sizeof(struct hv_message) must not exceed MSHV_RUN_VP_BUF_SZ");
+
+static long mshv_vp_ioctl_run_vp(struct mshv_vp *vp, void __user *ret_msg)
+{
+ long rc;
+
+ if (hv_scheduler_type == HV_SCHEDULER_TYPE_ROOT)
+ rc = mshv_run_vp_with_root_scheduler(vp);
+ else
+ rc = mshv_run_vp_with_hyp_scheduler(vp);
+
+ if (rc)
+ return rc;
+
+ if (copy_to_user(ret_msg, vp->vp_intercept_msg_page,
+ sizeof(struct hv_message)))
+ rc = -EFAULT;
+
+ return rc;
+}
+
+static int
+mshv_vp_ioctl_get_set_state_pfn(struct mshv_vp *vp,
+ struct hv_vp_state_data state_data,
+ unsigned long user_pfn, size_t page_count,
+ bool is_set)
+{
+ int completed, ret = 0;
+ unsigned long check;
+ struct page **pages;
+
+ if (page_count > INT_MAX)
+ return -EINVAL;
+ /*
+ * Check the arithmetic for wraparound/overflow.
+ * The last page address in the buffer is:
+ * (user_pfn + (page_count - 1)) * PAGE_SIZE
+ */
+ if (check_add_overflow(user_pfn, (page_count - 1), &check))
+ return -EOVERFLOW;
+ if (check_mul_overflow(check, PAGE_SIZE, &check))
+ return -EOVERFLOW;
+
+ /* Pin user pages so hypervisor can copy directly to them */
+ pages = kcalloc(page_count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (completed = 0; completed < page_count; completed += ret) {
+ unsigned long user_addr = (user_pfn + completed) * PAGE_SIZE;
+ int remaining = page_count - completed;
+
+ ret = pin_user_pages_fast(user_addr, remaining, FOLL_WRITE,
+ &pages[completed]);
+ if (ret < 0) {
+ vp_err(vp, "%s: Failed to pin user pages error %i\n",
+ __func__, ret);
+ goto unpin_pages;
+ }
+ }
+
+ if (is_set)
+ ret = hv_call_set_vp_state(vp->vp_index,
+ vp->vp_partition->pt_id,
+ state_data, page_count, pages,
+ 0, NULL);
+ else
+ ret = hv_call_get_vp_state(vp->vp_index,
+ vp->vp_partition->pt_id,
+ state_data, page_count, pages,
+ NULL);
+
+unpin_pages:
+ unpin_user_pages(pages, completed);
+ kfree(pages);
+ return ret;
+}
+
+static long
+mshv_vp_ioctl_get_set_state(struct mshv_vp *vp,
+ struct mshv_get_set_vp_state __user *user_args,
+ bool is_set)
+{
+ struct mshv_get_set_vp_state args;
+ long ret = 0;
+ union hv_output_get_vp_state vp_state;
+ u32 data_sz;
+ struct hv_vp_state_data state_data = {};
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ if (args.type >= MSHV_VP_STATE_COUNT || mshv_field_nonzero(args, rsvd) ||
+ !args.buf_sz || !PAGE_ALIGNED(args.buf_sz) ||
+ !PAGE_ALIGNED(args.buf_ptr))
+ return -EINVAL;
+
+ if (!access_ok((void __user *)args.buf_ptr, args.buf_sz))
+ return -EFAULT;
+
+ switch (args.type) {
+ case MSHV_VP_STATE_LAPIC:
+ state_data.type = HV_GET_SET_VP_STATE_LAPIC_STATE;
+ data_sz = HV_HYP_PAGE_SIZE;
+ break;
+ case MSHV_VP_STATE_XSAVE:
+ {
+ u64 data_sz_64;
+
+ ret = hv_call_get_partition_property(vp->vp_partition->pt_id,
+ HV_PARTITION_PROPERTY_XSAVE_STATES,
+ &state_data.xsave.states.as_uint64);
+ if (ret)
+ return ret;
+
+ ret = hv_call_get_partition_property(vp->vp_partition->pt_id,
+ HV_PARTITION_PROPERTY_MAX_XSAVE_DATA_SIZE,
+ &data_sz_64);
+ if (ret)
+ return ret;
+
+ data_sz = (u32)data_sz_64;
+ state_data.xsave.flags = 0;
+ /* Always request legacy states */
+ state_data.xsave.states.legacy_x87 = 1;
+ state_data.xsave.states.legacy_sse = 1;
+ state_data.type = HV_GET_SET_VP_STATE_XSAVE;
+ break;
+ }
+ case MSHV_VP_STATE_SIMP:
+ state_data.type = HV_GET_SET_VP_STATE_SIM_PAGE;
+ data_sz = HV_HYP_PAGE_SIZE;
+ break;
+ case MSHV_VP_STATE_SIEFP:
+ state_data.type = HV_GET_SET_VP_STATE_SIEF_PAGE;
+ data_sz = HV_HYP_PAGE_SIZE;
+ break;
+ case MSHV_VP_STATE_SYNTHETIC_TIMERS:
+ state_data.type = HV_GET_SET_VP_STATE_SYNTHETIC_TIMERS;
+ data_sz = sizeof(vp_state.synthetic_timers_state);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (copy_to_user(&user_args->buf_sz, &data_sz, sizeof(user_args->buf_sz)))
+ return -EFAULT;
+
+ if (data_sz > args.buf_sz)
+ return -EINVAL;
+
+ /* If the data is transmitted via pfns, delegate to helper */
+ if (state_data.type & HV_GET_SET_VP_STATE_TYPE_PFN) {
+ unsigned long user_pfn = PFN_DOWN(args.buf_ptr);
+ size_t page_count = PFN_DOWN(args.buf_sz);
+
+ return mshv_vp_ioctl_get_set_state_pfn(vp, state_data, user_pfn,
+ page_count, is_set);
+ }
+
+ /* Paranoia check - this shouldn't happen! */
+ if (data_sz > sizeof(vp_state)) {
+ vp_err(vp, "Invalid vp state data size!\n");
+ return -EINVAL;
+ }
+
+ if (is_set) {
+ if (copy_from_user(&vp_state, (__user void *)args.buf_ptr, data_sz))
+ return -EFAULT;
+
+ return hv_call_set_vp_state(vp->vp_index,
+ vp->vp_partition->pt_id,
+ state_data, 0, NULL,
+ sizeof(vp_state), (u8 *)&vp_state);
+ }
+
+ ret = hv_call_get_vp_state(vp->vp_index, vp->vp_partition->pt_id,
+ state_data, 0, NULL, &vp_state);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)args.buf_ptr, &vp_state, data_sz))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long
+mshv_vp_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct mshv_vp *vp = filp->private_data;
+ long r = -ENOTTY;
+
+ if (mutex_lock_killable(&vp->vp_mutex))
+ return -EINTR;
+
+ switch (ioctl) {
+ case MSHV_RUN_VP:
+ r = mshv_vp_ioctl_run_vp(vp, (void __user *)arg);
+ break;
+ case MSHV_GET_VP_STATE:
+ r = mshv_vp_ioctl_get_set_state(vp, (void __user *)arg, false);
+ break;
+ case MSHV_SET_VP_STATE:
+ r = mshv_vp_ioctl_get_set_state(vp, (void __user *)arg, true);
+ break;
+ case MSHV_ROOT_HVCALL:
+ r = mshv_ioctl_passthru_hvcall(vp->vp_partition, false,
+ (void __user *)arg);
+ break;
+ default:
+ vp_warn(vp, "Invalid ioctl: %#x\n", ioctl);
+ break;
+ }
+ mutex_unlock(&vp->vp_mutex);
+
+ return r;
+}
+
+static vm_fault_t mshv_vp_fault(struct vm_fault *vmf)
+{
+ struct mshv_vp *vp = vmf->vma->vm_file->private_data;
+
+ switch (vmf->vma->vm_pgoff) {
+ case MSHV_VP_MMAP_OFFSET_REGISTERS:
+ vmf->page = virt_to_page(vp->vp_register_page);
+ break;
+ case MSHV_VP_MMAP_OFFSET_INTERCEPT_MESSAGE:
+ vmf->page = virt_to_page(vp->vp_intercept_msg_page);
+ break;
+ case MSHV_VP_MMAP_OFFSET_GHCB:
+ vmf->page = virt_to_page(vp->vp_ghcb_page);
+ break;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+
+ get_page(vmf->page);
+
+ return 0;
+}
+
+static int mshv_vp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct mshv_vp *vp = file->private_data;
+
+ switch (vma->vm_pgoff) {
+ case MSHV_VP_MMAP_OFFSET_REGISTERS:
+ if (!vp->vp_register_page)
+ return -ENODEV;
+ break;
+ case MSHV_VP_MMAP_OFFSET_INTERCEPT_MESSAGE:
+ if (!vp->vp_intercept_msg_page)
+ return -ENODEV;
+ break;
+ case MSHV_VP_MMAP_OFFSET_GHCB:
+ if (!vp->vp_ghcb_page)
+ return -ENODEV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vma->vm_ops = &mshv_vp_vm_ops;
+ return 0;
+}
+
+static int
+mshv_vp_release(struct inode *inode, struct file *filp)
+{
+ struct mshv_vp *vp = filp->private_data;
+
+ /* Rest of VP cleanup happens in destroy_partition() */
+ mshv_partition_put(vp->vp_partition);
+ return 0;
+}
+
+static void mshv_vp_stats_unmap(u64 partition_id, u32 vp_index)
+{
+ union hv_stats_object_identity identity = {
+ .vp.partition_id = partition_id,
+ .vp.vp_index = vp_index,
+ };
+
+ identity.vp.stats_area_type = HV_STATS_AREA_SELF;
+ hv_call_unmap_stat_page(HV_STATS_OBJECT_VP, &identity);
+
+ identity.vp.stats_area_type = HV_STATS_AREA_PARENT;
+ hv_call_unmap_stat_page(HV_STATS_OBJECT_VP, &identity);
+}
+
+static int mshv_vp_stats_map(u64 partition_id, u32 vp_index,
+ void *stats_pages[])
+{
+ union hv_stats_object_identity identity = {
+ .vp.partition_id = partition_id,
+ .vp.vp_index = vp_index,
+ };
+ int err;
+
+ identity.vp.stats_area_type = HV_STATS_AREA_SELF;
+ err = hv_call_map_stat_page(HV_STATS_OBJECT_VP, &identity,
+ &stats_pages[HV_STATS_AREA_SELF]);
+ if (err)
+ return err;
+
+ identity.vp.stats_area_type = HV_STATS_AREA_PARENT;
+ err = hv_call_map_stat_page(HV_STATS_OBJECT_VP, &identity,
+ &stats_pages[HV_STATS_AREA_PARENT]);
+ if (err)
+ goto unmap_self;
+
+ return 0;
+
+unmap_self:
+ identity.vp.stats_area_type = HV_STATS_AREA_SELF;
+ hv_call_unmap_stat_page(HV_STATS_OBJECT_VP, &identity);
+ return err;
+}
+
+static long
+mshv_partition_ioctl_create_vp(struct mshv_partition *partition,
+ void __user *arg)
+{
+ struct mshv_create_vp args;
+ struct mshv_vp *vp;
+ struct page *intercept_message_page, *register_page, *ghcb_page;
+ void *stats_pages[2];
+ long ret;
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ if (args.vp_index >= MSHV_MAX_VPS)
+ return -EINVAL;
+
+ if (partition->pt_vp_array[args.vp_index])
+ return -EEXIST;
+
+ ret = hv_call_create_vp(NUMA_NO_NODE, partition->pt_id, args.vp_index,
+ 0 /* Only valid for root partition VPs */);
+ if (ret)
+ return ret;
+
+ ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
+ input_vtl_zero,
+ &intercept_message_page);
+ if (ret)
+ goto destroy_vp;
+
+ if (!mshv_partition_encrypted(partition)) {
+ ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_REGISTERS,
+ input_vtl_zero,
+ &register_page);
+ if (ret)
+ goto unmap_intercept_message_page;
+ }
+
+ if (mshv_partition_encrypted(partition) &&
+ is_ghcb_mapping_available()) {
+ ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_GHCB,
+ input_vtl_normal,
+ &ghcb_page);
+ if (ret)
+ goto unmap_register_page;
+ }
+
+ if (hv_parent_partition()) {
+ ret = mshv_vp_stats_map(partition->pt_id, args.vp_index,
+ stats_pages);
+ if (ret)
+ goto unmap_ghcb_page;
+ }
+
+ vp = kzalloc(sizeof(*vp), GFP_KERNEL);
+ if (!vp)
+ goto unmap_stats_pages;
+
+ vp->vp_partition = mshv_partition_get(partition);
+ if (!vp->vp_partition) {
+ ret = -EBADF;
+ goto free_vp;
+ }
+
+ mutex_init(&vp->vp_mutex);
+ init_waitqueue_head(&vp->run.vp_suspend_queue);
+ atomic64_set(&vp->run.vp_signaled_count, 0);
+
+ vp->vp_index = args.vp_index;
+ vp->vp_intercept_msg_page = page_to_virt(intercept_message_page);
+ if (!mshv_partition_encrypted(partition))
+ vp->vp_register_page = page_to_virt(register_page);
+
+ if (mshv_partition_encrypted(partition) && is_ghcb_mapping_available())
+ vp->vp_ghcb_page = page_to_virt(ghcb_page);
+
+ if (hv_parent_partition())
+ memcpy(vp->vp_stats_pages, stats_pages, sizeof(stats_pages));
+
+ /*
+ * Keep anon_inode_getfd last: it installs fd in the file struct and
+ * thus makes the state accessible in user space.
+ */
+ ret = anon_inode_getfd("mshv_vp", &mshv_vp_fops, vp,
+ O_RDWR | O_CLOEXEC);
+ if (ret < 0)
+ goto put_partition;
+
+ /* already exclusive with the partition mutex for all ioctls */
+ partition->pt_vp_count++;
+ partition->pt_vp_array[args.vp_index] = vp;
+
+ return ret;
+
+put_partition:
+ mshv_partition_put(partition);
+free_vp:
+ kfree(vp);
+unmap_stats_pages:
+ if (hv_parent_partition())
+ mshv_vp_stats_unmap(partition->pt_id, args.vp_index);
+unmap_ghcb_page:
+ if (mshv_partition_encrypted(partition) && is_ghcb_mapping_available()) {
+ hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_GHCB,
+ input_vtl_normal);
+ }
+unmap_register_page:
+ if (!mshv_partition_encrypted(partition)) {
+ hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_REGISTERS,
+ input_vtl_zero);
+ }
+unmap_intercept_message_page:
+ hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
+ input_vtl_zero);
+destroy_vp:
+ hv_call_delete_vp(partition->pt_id, args.vp_index);
+ return ret;
+}
+
+static int mshv_init_async_handler(struct mshv_partition *partition)
+{
+ if (completion_done(&partition->async_hypercall)) {
+ pt_err(partition,
+ "Cannot issue async hypercall while another one in progress!\n");
+ return -EPERM;
+ }
+
+ reinit_completion(&partition->async_hypercall);
+ return 0;
+}
+
+static void mshv_async_hvcall_handler(void *data, u64 *status)
+{
+ struct mshv_partition *partition = data;
+
+ wait_for_completion(&partition->async_hypercall);
+ pt_dbg(partition, "Async hypercall completed!\n");
+
+ *status = partition->async_hypercall_status;
+}
+
+static int
+mshv_partition_region_share(struct mshv_mem_region *region)
+{
+ u32 flags = HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_SHARED;
+
+ if (region->flags.large_pages)
+ flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
+
+ return hv_call_modify_spa_host_access(region->partition->pt_id,
+ region->pages, region->nr_pages,
+ HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE,
+ flags, true);
+}
+
+static int
+mshv_partition_region_unshare(struct mshv_mem_region *region)
+{
+ u32 flags = HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE;
+
+ if (region->flags.large_pages)
+ flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
+
+ return hv_call_modify_spa_host_access(region->partition->pt_id,
+ region->pages, region->nr_pages,
+ 0,
+ flags, false);
+}
+
+static int
+mshv_region_remap_pages(struct mshv_mem_region *region, u32 map_flags,
+ u64 page_offset, u64 page_count)
+{
+ if (page_offset + page_count > region->nr_pages)
+ return -EINVAL;
+
+ if (region->flags.large_pages)
+ map_flags |= HV_MAP_GPA_LARGE_PAGE;
+
+ /* ask the hypervisor to map guest ram */
+ return hv_call_map_gpa_pages(region->partition->pt_id,
+ region->start_gfn + page_offset,
+ page_count, map_flags,
+ region->pages + page_offset);
+}
+
+static int
+mshv_region_map(struct mshv_mem_region *region)
+{
+ u32 map_flags = region->hv_map_flags;
+
+ return mshv_region_remap_pages(region, map_flags,
+ 0, region->nr_pages);
+}
+
+static void
+mshv_region_evict_pages(struct mshv_mem_region *region,
+ u64 page_offset, u64 page_count)
+{
+ if (region->flags.range_pinned)
+ unpin_user_pages(region->pages + page_offset, page_count);
+
+ memset(region->pages + page_offset, 0,
+ page_count * sizeof(struct page *));
+}
+
+static void
+mshv_region_evict(struct mshv_mem_region *region)
+{
+ mshv_region_evict_pages(region, 0, region->nr_pages);
+}
+
+static int
+mshv_region_populate_pages(struct mshv_mem_region *region,
+ u64 page_offset, u64 page_count)
+{
+ u64 done_count, nr_pages;
+ struct page **pages;
+ __u64 userspace_addr;
+ int ret;
+
+ if (page_offset + page_count > region->nr_pages)
+ return -EINVAL;
+
+ for (done_count = 0; done_count < page_count; done_count += ret) {
+ pages = region->pages + page_offset + done_count;
+ userspace_addr = region->start_uaddr +
+ (page_offset + done_count) *
+ HV_HYP_PAGE_SIZE;
+ nr_pages = min(page_count - done_count,
+ MSHV_PIN_PAGES_BATCH_SIZE);
+
+ /*
+ * Pinning assuming 4k pages works for large pages too.
+ * All page structs within the large page are returned.
+ *
+ * Pin requests are batched because pin_user_pages_fast
+ * with the FOLL_LONGTERM flag does a large temporary
+ * allocation of contiguous memory.
+ */
+ if (region->flags.range_pinned)
+ ret = pin_user_pages_fast(userspace_addr,
+ nr_pages,
+ FOLL_WRITE | FOLL_LONGTERM,
+ pages);
+ else
+ ret = -EOPNOTSUPP;
+
+ if (ret < 0)
+ goto release_pages;
+ }
+
+ if (PageHuge(region->pages[page_offset]))
+ region->flags.large_pages = true;
+
+ return 0;
+
+release_pages:
+ mshv_region_evict_pages(region, page_offset, done_count);
+ return ret;
+}
+
+static int
+mshv_region_populate(struct mshv_mem_region *region)
+{
+ return mshv_region_populate_pages(region, 0, region->nr_pages);
+}
+
+static struct mshv_mem_region *
+mshv_partition_region_by_gfn(struct mshv_partition *partition, u64 gfn)
+{
+ struct mshv_mem_region *region;
+
+ hlist_for_each_entry(region, &partition->pt_mem_regions, hnode) {
+ if (gfn >= region->start_gfn &&
+ gfn < region->start_gfn + region->nr_pages)
+ return region;
+ }
+
+ return NULL;
+}
+
+static struct mshv_mem_region *
+mshv_partition_region_by_uaddr(struct mshv_partition *partition, u64 uaddr)
+{
+ struct mshv_mem_region *region;
+
+ hlist_for_each_entry(region, &partition->pt_mem_regions, hnode) {
+ if (uaddr >= region->start_uaddr &&
+ uaddr < region->start_uaddr +
+ (region->nr_pages << HV_HYP_PAGE_SHIFT))
+ return region;
+ }
+
+ return NULL;
+}
+
+/*
+ * NB: caller checks and makes sure mem->size is page aligned
+ * Returns: 0 with regionpp updated on success, or -errno
+ */
+static int mshv_partition_create_region(struct mshv_partition *partition,
+ struct mshv_user_mem_region *mem,
+ struct mshv_mem_region **regionpp,
+ bool is_mmio)
+{
+ struct mshv_mem_region *region;
+ u64 nr_pages = HVPFN_DOWN(mem->size);
+
+ /* Reject overlapping regions */
+ if (mshv_partition_region_by_gfn(partition, mem->guest_pfn) ||
+ mshv_partition_region_by_gfn(partition, mem->guest_pfn + nr_pages - 1) ||
+ mshv_partition_region_by_uaddr(partition, mem->userspace_addr) ||
+ mshv_partition_region_by_uaddr(partition, mem->userspace_addr + mem->size - 1))
+ return -EEXIST;
+
+ region = vzalloc(sizeof(*region) + sizeof(struct page *) * nr_pages);
+ if (!region)
+ return -ENOMEM;
+
+ region->nr_pages = nr_pages;
+ region->start_gfn = mem->guest_pfn;
+ region->start_uaddr = mem->userspace_addr;
+ region->hv_map_flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_ADJUSTABLE;
+ if (mem->flags & BIT(MSHV_SET_MEM_BIT_WRITABLE))
+ region->hv_map_flags |= HV_MAP_GPA_WRITABLE;
+ if (mem->flags & BIT(MSHV_SET_MEM_BIT_EXECUTABLE))
+ region->hv_map_flags |= HV_MAP_GPA_EXECUTABLE;
+
+ /* Note: large_pages flag populated when we pin the pages */
+ if (!is_mmio)
+ region->flags.range_pinned = true;
+
+ region->partition = partition;
+
+ *regionpp = region;
+
+ return 0;
+}
+
+/*
+ * Map guest ram. if snp, make sure to release that from the host first
+ * Side Effects: In case of failure, pages are unpinned when feasible.
+ */
+static int
+mshv_partition_mem_region_map(struct mshv_mem_region *region)
+{
+ struct mshv_partition *partition = region->partition;
+ int ret;
+
+ ret = mshv_region_populate(region);
+ if (ret) {
+ pt_err(partition, "Failed to populate memory region: %d\n",
+ ret);
+ goto err_out;
+ }
+
+ /*
+ * For an SNP partition it is a requirement that for every memory region
+ * that we are going to map for this partition we should make sure that
+ * host access to that region is released. This is ensured by doing an
+ * additional hypercall which will update the SLAT to release host
+ * access to guest memory regions.
+ */
+ if (mshv_partition_encrypted(partition)) {
+ ret = mshv_partition_region_unshare(region);
+ if (ret) {
+ pt_err(partition,
+ "Failed to unshare memory region (guest_pfn: %llu): %d\n",
+ region->start_gfn, ret);
+ goto evict_region;
+ }
+ }
+
+ ret = mshv_region_map(region);
+ if (ret && mshv_partition_encrypted(partition)) {
+ int shrc;
+
+ shrc = mshv_partition_region_share(region);
+ if (!shrc)
+ goto evict_region;
+
+ pt_err(partition,
+ "Failed to share memory region (guest_pfn: %llu): %d\n",
+ region->start_gfn, shrc);
+ /*
+ * Don't unpin if marking shared failed because pages are no
+ * longer mapped in the host, ie root, anymore.
+ */
+ goto err_out;
+ }
+
+ return 0;
+
+evict_region:
+ mshv_region_evict(region);
+err_out:
+ return ret;
+}
+
+/*
+ * This maps two things: guest RAM and for pci passthru mmio space.
+ *
+ * mmio:
+ * - vfio overloads vm_pgoff to store the mmio start pfn/spa.
+ * - Two things need to happen for mapping mmio range:
+ * 1. mapped in the uaddr so VMM can access it.
+ * 2. mapped in the hwpt (gfn <-> mmio phys addr) so guest can access it.
+ *
+ * This function takes care of the second. The first one is managed by vfio,
+ * and hence is taken care of via vfio_pci_mmap_fault().
+ */
+static long
+mshv_map_user_memory(struct mshv_partition *partition,
+ struct mshv_user_mem_region mem)
+{
+ struct mshv_mem_region *region;
+ struct vm_area_struct *vma;
+ bool is_mmio;
+ ulong mmio_pfn;
+ long ret;
+
+ if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP) ||
+ !access_ok((const void *)mem.userspace_addr, mem.size))
+ return -EINVAL;
+
+ mmap_read_lock(current->mm);
+ vma = vma_lookup(current->mm, mem.userspace_addr);
+ is_mmio = vma ? !!(vma->vm_flags & (VM_IO | VM_PFNMAP)) : 0;
+ mmio_pfn = is_mmio ? vma->vm_pgoff : 0;
+ mmap_read_unlock(current->mm);
+
+ if (!vma)
+ return -EINVAL;
+
+ ret = mshv_partition_create_region(partition, &mem, &region,
+ is_mmio);
+ if (ret)
+ return ret;
+
+ if (is_mmio)
+ ret = hv_call_map_mmio_pages(partition->pt_id, mem.guest_pfn,
+ mmio_pfn, HVPFN_DOWN(mem.size));
+ else
+ ret = mshv_partition_mem_region_map(region);
+
+ if (ret)
+ goto errout;
+
+ /* Install the new region */
+ hlist_add_head(&region->hnode, &partition->pt_mem_regions);
+
+ return 0;
+
+errout:
+ vfree(region);
+ return ret;
+}
+
+/* Called for unmapping both the guest ram and the mmio space */
+static long
+mshv_unmap_user_memory(struct mshv_partition *partition,
+ struct mshv_user_mem_region mem)
+{
+ struct mshv_mem_region *region;
+ u32 unmap_flags = 0;
+
+ if (!(mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP)))
+ return -EINVAL;
+
+ region = mshv_partition_region_by_gfn(partition, mem.guest_pfn);
+ if (!region)
+ return -EINVAL;
+
+ /* Paranoia check */
+ if (region->start_uaddr != mem.userspace_addr ||
+ region->start_gfn != mem.guest_pfn ||
+ region->nr_pages != HVPFN_DOWN(mem.size))
+ return -EINVAL;
+
+ hlist_del(&region->hnode);
+
+ if (region->flags.large_pages)
+ unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
+
+ /* ignore unmap failures and continue as process may be exiting */
+ hv_call_unmap_gpa_pages(partition->pt_id, region->start_gfn,
+ region->nr_pages, unmap_flags);
+
+ mshv_region_evict(region);
+
+ vfree(region);
+ return 0;
+}
+
+static long
+mshv_partition_ioctl_set_memory(struct mshv_partition *partition,
+ struct mshv_user_mem_region __user *user_mem)
+{
+ struct mshv_user_mem_region mem;
+
+ if (copy_from_user(&mem, user_mem, sizeof(mem)))
+ return -EFAULT;
+
+ if (!mem.size ||
+ !PAGE_ALIGNED(mem.size) ||
+ !PAGE_ALIGNED(mem.userspace_addr) ||
+ (mem.flags & ~MSHV_SET_MEM_FLAGS_MASK) ||
+ mshv_field_nonzero(mem, rsvd))
+ return -EINVAL;
+
+ if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP))
+ return mshv_unmap_user_memory(partition, mem);
+
+ return mshv_map_user_memory(partition, mem);
+}
+
+static long
+mshv_partition_ioctl_ioeventfd(struct mshv_partition *partition,
+ void __user *user_args)
+{
+ struct mshv_user_ioeventfd args;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ return mshv_set_unset_ioeventfd(partition, &args);
+}
+
+static long
+mshv_partition_ioctl_irqfd(struct mshv_partition *partition,
+ void __user *user_args)
+{
+ struct mshv_user_irqfd args;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ return mshv_set_unset_irqfd(partition, &args);
+}
+
+static long
+mshv_partition_ioctl_get_gpap_access_bitmap(struct mshv_partition *partition,
+ void __user *user_args)
+{
+ struct mshv_gpap_access_bitmap args;
+ union hv_gpa_page_access_state *states;
+ long ret, i;
+ union hv_gpa_page_access_state_flags hv_flags = {};
+ u8 hv_type_mask;
+ ulong bitmap_buf_sz, states_buf_sz;
+ int written = 0;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ if (args.access_type >= MSHV_GPAP_ACCESS_TYPE_COUNT ||
+ args.access_op >= MSHV_GPAP_ACCESS_OP_COUNT ||
+ mshv_field_nonzero(args, rsvd) || !args.page_count ||
+ !args.bitmap_ptr)
+ return -EINVAL;
+
+ if (check_mul_overflow(args.page_count, sizeof(*states), &states_buf_sz))
+ return -E2BIG;
+
+ /* Num bytes needed to store bitmap; one bit per page rounded up */
+ bitmap_buf_sz = DIV_ROUND_UP(args.page_count, 8);
+
+ /* Sanity check */
+ if (bitmap_buf_sz > states_buf_sz)
+ return -EBADFD;
+
+ switch (args.access_type) {
+ case MSHV_GPAP_ACCESS_TYPE_ACCESSED:
+ hv_type_mask = 1;
+ if (args.access_op == MSHV_GPAP_ACCESS_OP_CLEAR) {
+ hv_flags.clear_accessed = 1;
+ /* not accessed implies not dirty */
+ hv_flags.clear_dirty = 1;
+ } else { /* MSHV_GPAP_ACCESS_OP_SET */
+ hv_flags.set_accessed = 1;
+ }
+ break;
+ case MSHV_GPAP_ACCESS_TYPE_DIRTY:
+ hv_type_mask = 2;
+ if (args.access_op == MSHV_GPAP_ACCESS_OP_CLEAR) {
+ hv_flags.clear_dirty = 1;
+ } else { /* MSHV_GPAP_ACCESS_OP_SET */
+ hv_flags.set_dirty = 1;
+ /* dirty implies accessed */
+ hv_flags.set_accessed = 1;
+ }
+ break;
+ }
+
+ states = vzalloc(states_buf_sz);
+ if (!states)
+ return -ENOMEM;
+
+ ret = hv_call_get_gpa_access_states(partition->pt_id, args.page_count,
+ args.gpap_base, hv_flags, &written,
+ states);
+ if (ret)
+ goto free_return;
+
+ /*
+ * Overwrite states buffer with bitmap - the bits in hv_type_mask
+ * correspond to bitfields in hv_gpa_page_access_state
+ */
+ for (i = 0; i < written; ++i)
+ __assign_bit(i, (ulong *)states,
+ states[i].as_uint8 & hv_type_mask);
+
+ /* zero the unused bits in the last byte(s) of the returned bitmap */
+ for (i = written; i < bitmap_buf_sz * 8; ++i)
+ __clear_bit(i, (ulong *)states);
+
+ if (copy_to_user((void __user *)args.bitmap_ptr, states, bitmap_buf_sz))
+ ret = -EFAULT;
+
+free_return:
+ vfree(states);
+ return ret;
+}
+
+static long
+mshv_partition_ioctl_set_msi_routing(struct mshv_partition *partition,
+ void __user *user_args)
+{
+ struct mshv_user_irq_entry *entries = NULL;
+ struct mshv_user_irq_table args;
+ long ret;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ if (args.nr > MSHV_MAX_GUEST_IRQS ||
+ mshv_field_nonzero(args, rsvd))
+ return -EINVAL;
+
+ if (args.nr) {
+ struct mshv_user_irq_table __user *urouting = user_args;
+
+ entries = vmemdup_user(urouting->entries,
+ array_size(sizeof(*entries),
+ args.nr));
+ if (IS_ERR(entries))
+ return PTR_ERR(entries);
+ }
+ ret = mshv_update_routing_table(partition, entries, args.nr);
+ kvfree(entries);
+
+ return ret;
+}
+
+static long
+mshv_partition_ioctl_initialize(struct mshv_partition *partition)
+{
+ long ret;
+
+ if (partition->pt_initialized)
+ return 0;
+
+ ret = hv_call_initialize_partition(partition->pt_id);
+ if (ret)
+ goto withdraw_mem;
+
+ partition->pt_initialized = true;
+
+ return 0;
+
+withdraw_mem:
+ hv_call_withdraw_memory(U64_MAX, NUMA_NO_NODE, partition->pt_id);
+
+ return ret;
+}
+
+static long
+mshv_partition_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct mshv_partition *partition = filp->private_data;
+ long ret;
+ void __user *uarg = (void __user *)arg;
+
+ if (mutex_lock_killable(&partition->pt_mutex))
+ return -EINTR;
+
+ switch (ioctl) {
+ case MSHV_INITIALIZE_PARTITION:
+ ret = mshv_partition_ioctl_initialize(partition);
+ break;
+ case MSHV_SET_GUEST_MEMORY:
+ ret = mshv_partition_ioctl_set_memory(partition, uarg);
+ break;
+ case MSHV_CREATE_VP:
+ ret = mshv_partition_ioctl_create_vp(partition, uarg);
+ break;
+ case MSHV_IRQFD:
+ ret = mshv_partition_ioctl_irqfd(partition, uarg);
+ break;
+ case MSHV_IOEVENTFD:
+ ret = mshv_partition_ioctl_ioeventfd(partition, uarg);
+ break;
+ case MSHV_SET_MSI_ROUTING:
+ ret = mshv_partition_ioctl_set_msi_routing(partition, uarg);
+ break;
+ case MSHV_GET_GPAP_ACCESS_BITMAP:
+ ret = mshv_partition_ioctl_get_gpap_access_bitmap(partition,
+ uarg);
+ break;
+ case MSHV_ROOT_HVCALL:
+ ret = mshv_ioctl_passthru_hvcall(partition, true, uarg);
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ mutex_unlock(&partition->pt_mutex);
+ return ret;
+}
+
+static int
+disable_vp_dispatch(struct mshv_vp *vp)
+{
+ int ret;
+ struct hv_register_assoc dispatch_suspend = {
+ .name = HV_REGISTER_DISPATCH_SUSPEND,
+ .value.dispatch_suspend.suspended = 1,
+ };
+
+ ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &dispatch_suspend);
+ if (ret)
+ vp_err(vp, "failed to suspend\n");
+
+ return ret;
+}
+
+static int
+get_vp_signaled_count(struct mshv_vp *vp, u64 *count)
+{
+ int ret;
+ struct hv_register_assoc root_signal_count = {
+ .name = HV_REGISTER_VP_ROOT_SIGNAL_COUNT,
+ };
+
+ ret = mshv_get_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &root_signal_count);
+
+ if (ret) {
+ vp_err(vp, "Failed to get root signal count");
+ *count = 0;
+ return ret;
+ }
+
+ *count = root_signal_count.value.reg64;
+
+ return ret;
+}
+
+static void
+drain_vp_signals(struct mshv_vp *vp)
+{
+ u64 hv_signal_count;
+ u64 vp_signal_count;
+
+ get_vp_signaled_count(vp, &hv_signal_count);
+
+ vp_signal_count = atomic64_read(&vp->run.vp_signaled_count);
+
+ /*
+ * There should be at most 1 outstanding notification, but be extra
+ * careful anyway.
+ */
+ while (hv_signal_count != vp_signal_count) {
+ WARN_ON(hv_signal_count - vp_signal_count != 1);
+
+ if (wait_event_interruptible(vp->run.vp_suspend_queue,
+ vp->run.kicked_by_hv == 1))
+ break;
+ vp->run.kicked_by_hv = 0;
+ vp_signal_count = atomic64_read(&vp->run.vp_signaled_count);
+ }
+}
+
+static void drain_all_vps(const struct mshv_partition *partition)
+{
+ int i;
+ struct mshv_vp *vp;
+
+ /*
+ * VPs are reachable from ISR. It is safe to not take the partition
+ * lock because nobody else can enter this function and drop the
+ * partition from the list.
+ */
+ for (i = 0; i < MSHV_MAX_VPS; i++) {
+ vp = partition->pt_vp_array[i];
+ if (!vp)
+ continue;
+ /*
+ * Disable dispatching of the VP in the hypervisor. After this
+ * the hypervisor guarantees it won't generate any signals for
+ * the VP and the hypervisor's VP signal count won't change.
+ */
+ disable_vp_dispatch(vp);
+ drain_vp_signals(vp);
+ }
+}
+
+static void
+remove_partition(struct mshv_partition *partition)
+{
+ spin_lock(&mshv_root.pt_ht_lock);
+ hlist_del_rcu(&partition->pt_hnode);
+ spin_unlock(&mshv_root.pt_ht_lock);
+
+ synchronize_rcu();
+}
+
+/*
+ * Tear down a partition and remove it from the list.
+ * Partition's refcount must be 0
+ */
+static void destroy_partition(struct mshv_partition *partition)
+{
+ struct mshv_vp *vp;
+ struct mshv_mem_region *region;
+ int i, ret;
+ struct hlist_node *n;
+
+ if (refcount_read(&partition->pt_ref_count)) {
+ pt_err(partition,
+ "Attempt to destroy partition but refcount > 0\n");
+ return;
+ }
+
+ if (partition->pt_initialized) {
+ /*
+ * We only need to drain signals for root scheduler. This should be
+ * done before removing the partition from the partition list.
+ */
+ if (hv_scheduler_type == HV_SCHEDULER_TYPE_ROOT)
+ drain_all_vps(partition);
+
+ /* Remove vps */
+ for (i = 0; i < MSHV_MAX_VPS; ++i) {
+ vp = partition->pt_vp_array[i];
+ if (!vp)
+ continue;
+
+ if (hv_parent_partition())
+ mshv_vp_stats_unmap(partition->pt_id, vp->vp_index);
+
+ if (vp->vp_register_page) {
+ (void)hv_call_unmap_vp_state_page(partition->pt_id,
+ vp->vp_index,
+ HV_VP_STATE_PAGE_REGISTERS,
+ input_vtl_zero);
+ vp->vp_register_page = NULL;
+ }
+
+ (void)hv_call_unmap_vp_state_page(partition->pt_id,
+ vp->vp_index,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
+ input_vtl_zero);
+ vp->vp_intercept_msg_page = NULL;
+
+ if (vp->vp_ghcb_page) {
+ (void)hv_call_unmap_vp_state_page(partition->pt_id,
+ vp->vp_index,
+ HV_VP_STATE_PAGE_GHCB,
+ input_vtl_normal);
+ vp->vp_ghcb_page = NULL;
+ }
+
+ kfree(vp);
+
+ partition->pt_vp_array[i] = NULL;
+ }
+
+ /* Deallocates and unmaps everything including vcpus, GPA mappings etc */
+ hv_call_finalize_partition(partition->pt_id);
+
+ partition->pt_initialized = false;
+ }
+
+ remove_partition(partition);
+
+ /* Remove regions, regain access to the memory and unpin the pages */
+ hlist_for_each_entry_safe(region, n, &partition->pt_mem_regions,
+ hnode) {
+ hlist_del(&region->hnode);
+
+ if (mshv_partition_encrypted(partition)) {
+ ret = mshv_partition_region_share(region);
+ if (ret) {
+ pt_err(partition,
+ "Failed to regain access to memory, unpinning user pages will fail and crash the host error: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ mshv_region_evict(region);
+
+ vfree(region);
+ }
+
+ /* Withdraw and free all pages we deposited */
+ hv_call_withdraw_memory(U64_MAX, NUMA_NO_NODE, partition->pt_id);
+ hv_call_delete_partition(partition->pt_id);
+
+ mshv_free_routing_table(partition);
+ kfree(partition);
+}
+
+struct
+mshv_partition *mshv_partition_get(struct mshv_partition *partition)
+{
+ if (refcount_inc_not_zero(&partition->pt_ref_count))
+ return partition;
+ return NULL;
+}
+
+struct
+mshv_partition *mshv_partition_find(u64 partition_id)
+ __must_hold(RCU)
+{
+ struct mshv_partition *p;
+
+ hash_for_each_possible_rcu(mshv_root.pt_htable, p, pt_hnode,
+ partition_id)
+ if (p->pt_id == partition_id)
+ return p;
+
+ return NULL;
+}
+
+void
+mshv_partition_put(struct mshv_partition *partition)
+{
+ if (refcount_dec_and_test(&partition->pt_ref_count))
+ destroy_partition(partition);
+}
+
+static int
+mshv_partition_release(struct inode *inode, struct file *filp)
+{
+ struct mshv_partition *partition = filp->private_data;
+
+ mshv_eventfd_release(partition);
+
+ cleanup_srcu_struct(&partition->pt_irq_srcu);
+
+ mshv_partition_put(partition);
+
+ return 0;
+}
+
+static int
+add_partition(struct mshv_partition *partition)
+{
+ spin_lock(&mshv_root.pt_ht_lock);
+
+ hash_add_rcu(mshv_root.pt_htable, &partition->pt_hnode,
+ partition->pt_id);
+
+ spin_unlock(&mshv_root.pt_ht_lock);
+
+ return 0;
+}
+
+static long
+mshv_ioctl_create_partition(void __user *user_arg, struct device *module_dev)
+{
+ struct mshv_create_partition args;
+ u64 creation_flags;
+ struct hv_partition_creation_properties creation_properties = {};
+ union hv_partition_isolation_properties isolation_properties = {};
+ struct mshv_partition *partition;
+ struct file *file;
+ int fd;
+ long ret;
+
+ if (copy_from_user(&args, user_arg, sizeof(args)))
+ return -EFAULT;
+
+ if ((args.pt_flags & ~MSHV_PT_FLAGS_MASK) ||
+ args.pt_isolation >= MSHV_PT_ISOLATION_COUNT)
+ return -EINVAL;
+
+ /* Only support EXO partitions */
+ creation_flags = HV_PARTITION_CREATION_FLAG_EXO_PARTITION |
+ HV_PARTITION_CREATION_FLAG_INTERCEPT_MESSAGE_PAGE_ENABLED;
+
+ if (args.pt_flags & BIT(MSHV_PT_BIT_LAPIC))
+ creation_flags |= HV_PARTITION_CREATION_FLAG_LAPIC_ENABLED;
+ if (args.pt_flags & BIT(MSHV_PT_BIT_X2APIC))
+ creation_flags |= HV_PARTITION_CREATION_FLAG_X2APIC_CAPABLE;
+ if (args.pt_flags & BIT(MSHV_PT_BIT_GPA_SUPER_PAGES))
+ creation_flags |= HV_PARTITION_CREATION_FLAG_GPA_SUPER_PAGES_ENABLED;
+
+ switch (args.pt_isolation) {
+ case MSHV_PT_ISOLATION_NONE:
+ isolation_properties.isolation_type =
+ HV_PARTITION_ISOLATION_TYPE_NONE;
+ break;
+ }
+
+ partition = kzalloc(sizeof(*partition), GFP_KERNEL);
+ if (!partition)
+ return -ENOMEM;
+
+ partition->pt_module_dev = module_dev;
+ partition->isolation_type = isolation_properties.isolation_type;
+
+ refcount_set(&partition->pt_ref_count, 1);
+
+ mutex_init(&partition->pt_mutex);
+
+ mutex_init(&partition->pt_irq_lock);
+
+ init_completion(&partition->async_hypercall);
+
+ INIT_HLIST_HEAD(&partition->irq_ack_notifier_list);
+
+ INIT_HLIST_HEAD(&partition->pt_devices);
+
+ INIT_HLIST_HEAD(&partition->pt_mem_regions);
+
+ mshv_eventfd_init(partition);
+
+ ret = init_srcu_struct(&partition->pt_irq_srcu);
+ if (ret)
+ goto free_partition;
+
+ ret = hv_call_create_partition(creation_flags,
+ creation_properties,
+ isolation_properties,
+ &partition->pt_id);
+ if (ret)
+ goto cleanup_irq_srcu;
+
+ ret = add_partition(partition);
+ if (ret)
+ goto delete_partition;
+
+ ret = mshv_init_async_handler(partition);
+ if (ret)
+ goto remove_partition;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ goto remove_partition;
+ }
+
+ file = anon_inode_getfile("mshv_partition", &mshv_partition_fops,
+ partition, O_RDWR);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto put_fd;
+ }
+
+ fd_install(fd, file);
+
+ return fd;
+
+put_fd:
+ put_unused_fd(fd);
+remove_partition:
+ remove_partition(partition);
+delete_partition:
+ hv_call_delete_partition(partition->pt_id);
+cleanup_irq_srcu:
+ cleanup_srcu_struct(&partition->pt_irq_srcu);
+free_partition:
+ kfree(partition);
+
+ return ret;
+}
+
+static long mshv_dev_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct miscdevice *misc = filp->private_data;
+
+ switch (ioctl) {
+ case MSHV_CREATE_PARTITION:
+ return mshv_ioctl_create_partition((void __user *)arg,
+ misc->this_device);
+ }
+
+ return -ENOTTY;
+}
+
+static int
+mshv_dev_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int
+mshv_dev_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int mshv_cpuhp_online;
+static int mshv_root_sched_online;
+
+static const char *scheduler_type_to_string(enum hv_scheduler_type type)
+{
+ switch (type) {
+ case HV_SCHEDULER_TYPE_LP:
+ return "classic scheduler without SMT";
+ case HV_SCHEDULER_TYPE_LP_SMT:
+ return "classic scheduler with SMT";
+ case HV_SCHEDULER_TYPE_CORE_SMT:
+ return "core scheduler";
+ case HV_SCHEDULER_TYPE_ROOT:
+ return "root scheduler";
+ default:
+ return "unknown scheduler";
+ };
+}
+
+/* TODO move this to hv_common.c when needed outside */
+static int __init hv_retrieve_scheduler_type(enum hv_scheduler_type *out)
+{
+ struct hv_input_get_system_property *input;
+ struct hv_output_get_system_property *output;
+ unsigned long flags;
+ u64 status;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, sizeof(*input));
+ memset(output, 0, sizeof(*output));
+ input->property_id = HV_SYSTEM_PROPERTY_SCHEDULER_TYPE;
+
+ status = hv_do_hypercall(HVCALL_GET_SYSTEM_PROPERTY, input, output);
+ if (!hv_result_success(status)) {
+ local_irq_restore(flags);
+ pr_err("%s: %s\n", __func__, hv_result_to_string(status));
+ return hv_result_to_errno(status);
+ }
+
+ *out = output->scheduler_type;
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/* Retrieve and stash the supported scheduler type */
+static int __init mshv_retrieve_scheduler_type(struct device *dev)
+{
+ int ret;
+
+ ret = hv_retrieve_scheduler_type(&hv_scheduler_type);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Hypervisor using %s\n",
+ scheduler_type_to_string(hv_scheduler_type));
+
+ switch (hv_scheduler_type) {
+ case HV_SCHEDULER_TYPE_CORE_SMT:
+ case HV_SCHEDULER_TYPE_LP_SMT:
+ case HV_SCHEDULER_TYPE_ROOT:
+ case HV_SCHEDULER_TYPE_LP:
+ /* Supported scheduler, nothing to do */
+ break;
+ default:
+ dev_err(dev, "unsupported scheduler 0x%x, bailing.\n",
+ hv_scheduler_type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mshv_root_scheduler_init(unsigned int cpu)
+{
+ void **inputarg, **outputarg, *p;
+
+ inputarg = (void **)this_cpu_ptr(root_scheduler_input);
+ outputarg = (void **)this_cpu_ptr(root_scheduler_output);
+
+ /* Allocate two consecutive pages. One for input, one for output. */
+ p = kmalloc(2 * HV_HYP_PAGE_SIZE, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ *inputarg = p;
+ *outputarg = (char *)p + HV_HYP_PAGE_SIZE;
+
+ return 0;
+}
+
+static int mshv_root_scheduler_cleanup(unsigned int cpu)
+{
+ void *p, **inputarg, **outputarg;
+
+ inputarg = (void **)this_cpu_ptr(root_scheduler_input);
+ outputarg = (void **)this_cpu_ptr(root_scheduler_output);
+
+ p = *inputarg;
+
+ *inputarg = NULL;
+ *outputarg = NULL;
+
+ kfree(p);
+
+ return 0;
+}
+
+/* Must be called after retrieving the scheduler type */
+static int
+root_scheduler_init(struct device *dev)
+{
+ int ret;
+
+ if (hv_scheduler_type != HV_SCHEDULER_TYPE_ROOT)
+ return 0;
+
+ root_scheduler_input = alloc_percpu(void *);
+ root_scheduler_output = alloc_percpu(void *);
+
+ if (!root_scheduler_input || !root_scheduler_output) {
+ dev_err(dev, "Failed to allocate root scheduler buffers\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mshv_root_sched",
+ mshv_root_scheduler_init,
+ mshv_root_scheduler_cleanup);
+
+ if (ret < 0) {
+ dev_err(dev, "Failed to setup root scheduler state: %i\n", ret);
+ goto out;
+ }
+
+ mshv_root_sched_online = ret;
+
+ return 0;
+
+out:
+ free_percpu(root_scheduler_input);
+ free_percpu(root_scheduler_output);
+ return ret;
+}
+
+static void
+root_scheduler_deinit(void)
+{
+ if (hv_scheduler_type != HV_SCHEDULER_TYPE_ROOT)
+ return;
+
+ cpuhp_remove_state(mshv_root_sched_online);
+ free_percpu(root_scheduler_input);
+ free_percpu(root_scheduler_output);
+}
+
+static int mshv_reboot_notify(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ cpuhp_remove_state(mshv_cpuhp_online);
+ return 0;
+}
+
+struct notifier_block mshv_reboot_nb = {
+ .notifier_call = mshv_reboot_notify,
+};
+
+static void mshv_root_partition_exit(void)
+{
+ unregister_reboot_notifier(&mshv_reboot_nb);
+ root_scheduler_deinit();
+}
+
+static int __init mshv_root_partition_init(struct device *dev)
+{
+ int err;
+
+ if (mshv_retrieve_scheduler_type(dev))
+ return -ENODEV;
+
+ err = root_scheduler_init(dev);
+ if (err)
+ return err;
+
+ err = register_reboot_notifier(&mshv_reboot_nb);
+ if (err)
+ goto root_sched_deinit;
+
+ return 0;
+
+root_sched_deinit:
+ root_scheduler_deinit();
+ return err;
+}
+
+static int __init mshv_parent_partition_init(void)
+{
+ int ret;
+ struct device *dev;
+ union hv_hypervisor_version_info version_info;
+
+ if (!hv_root_partition() || is_kdump_kernel())
+ return -ENODEV;
+
+ if (hv_get_hypervisor_version(&version_info))
+ return -ENODEV;
+
+ ret = misc_register(&mshv_dev);
+ if (ret)
+ return ret;
+
+ dev = mshv_dev.this_device;
+
+ if (version_info.build_number < MSHV_HV_MIN_VERSION ||
+ version_info.build_number > MSHV_HV_MAX_VERSION) {
+ dev_err(dev, "Running on unvalidated Hyper-V version\n");
+ dev_err(dev, "Versions: current: %u min: %u max: %u\n",
+ version_info.build_number, MSHV_HV_MIN_VERSION,
+ MSHV_HV_MAX_VERSION);
+ }
+
+ mshv_root.synic_pages = alloc_percpu(struct hv_synic_pages);
+ if (!mshv_root.synic_pages) {
+ dev_err(dev, "Failed to allocate percpu synic page\n");
+ ret = -ENOMEM;
+ goto device_deregister;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mshv_synic",
+ mshv_synic_init,
+ mshv_synic_cleanup);
+ if (ret < 0) {
+ dev_err(dev, "Failed to setup cpu hotplug state: %i\n", ret);
+ goto free_synic_pages;
+ }
+
+ mshv_cpuhp_online = ret;
+
+ ret = mshv_root_partition_init(dev);
+ if (ret)
+ goto remove_cpu_state;
+
+ ret = mshv_irqfd_wq_init();
+ if (ret)
+ goto exit_partition;
+
+ spin_lock_init(&mshv_root.pt_ht_lock);
+ hash_init(mshv_root.pt_htable);
+
+ hv_setup_mshv_handler(mshv_isr);
+
+ return 0;
+
+exit_partition:
+ if (hv_root_partition())
+ mshv_root_partition_exit();
+remove_cpu_state:
+ cpuhp_remove_state(mshv_cpuhp_online);
+free_synic_pages:
+ free_percpu(mshv_root.synic_pages);
+device_deregister:
+ misc_deregister(&mshv_dev);
+ return ret;
+}
+
+static void __exit mshv_parent_partition_exit(void)
+{
+ hv_setup_mshv_handler(NULL);
+ mshv_port_table_fini();
+ misc_deregister(&mshv_dev);
+ mshv_irqfd_wq_cleanup();
+ if (hv_root_partition())
+ mshv_root_partition_exit();
+ cpuhp_remove_state(mshv_cpuhp_online);
+ free_percpu(mshv_root.synic_pages);
+}
+
+module_init(mshv_parent_partition_init);
+module_exit(mshv_parent_partition_exit);
diff --git a/drivers/hv/mshv_synic.c b/drivers/hv/mshv_synic.c
new file mode 100644
index 000000000000..e6b6381b7c36
--- /dev/null
+++ b/drivers/hv/mshv_synic.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ *
+ * mshv_root module's main interrupt handler and associated functionality.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/random.h>
+#include <asm/mshyperv.h>
+
+#include "mshv_eventfd.h"
+#include "mshv.h"
+
+static u32 synic_event_ring_get_queued_port(u32 sint_index)
+{
+ struct hv_synic_event_ring_page **event_ring_page;
+ volatile struct hv_synic_event_ring *ring;
+ struct hv_synic_pages *spages;
+ u8 **synic_eventring_tail;
+ u32 message;
+ u8 tail;
+
+ spages = this_cpu_ptr(mshv_root.synic_pages);
+ event_ring_page = &spages->synic_event_ring_page;
+ synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail);
+
+ if (unlikely(!*synic_eventring_tail)) {
+ pr_debug("Missing synic event ring tail!\n");
+ return 0;
+ }
+ tail = (*synic_eventring_tail)[sint_index];
+
+ if (unlikely(!*event_ring_page)) {
+ pr_debug("Missing synic event ring page!\n");
+ return 0;
+ }
+
+ ring = &(*event_ring_page)->sint_event_ring[sint_index];
+
+ /*
+ * Get the message.
+ */
+ message = ring->data[tail];
+
+ if (!message) {
+ if (ring->ring_full) {
+ /*
+ * Ring is marked full, but we would have consumed all
+ * the messages. Notify the hypervisor that ring is now
+ * empty and check again.
+ */
+ ring->ring_full = 0;
+ hv_call_notify_port_ring_empty(sint_index);
+ message = ring->data[tail];
+ }
+
+ if (!message) {
+ ring->signal_masked = 0;
+ /*
+ * Unmask the signal and sync with hypervisor
+ * before one last check for any message.
+ */
+ mb();
+ message = ring->data[tail];
+
+ /*
+ * Ok, lets bail out.
+ */
+ if (!message)
+ return 0;
+ }
+
+ ring->signal_masked = 1;
+ }
+
+ /*
+ * Clear the message in the ring buffer.
+ */
+ ring->data[tail] = 0;
+
+ if (++tail == HV_SYNIC_EVENT_RING_MESSAGE_COUNT)
+ tail = 0;
+
+ (*synic_eventring_tail)[sint_index] = tail;
+
+ return message;
+}
+
+static bool
+mshv_doorbell_isr(struct hv_message *msg)
+{
+ struct hv_notification_message_payload *notification;
+ u32 port;
+
+ if (msg->header.message_type != HVMSG_SYNIC_SINT_INTERCEPT)
+ return false;
+
+ notification = (struct hv_notification_message_payload *)msg->u.payload;
+ if (notification->sint_index != HV_SYNIC_DOORBELL_SINT_INDEX)
+ return false;
+
+ while ((port = synic_event_ring_get_queued_port(HV_SYNIC_DOORBELL_SINT_INDEX))) {
+ struct port_table_info ptinfo = { 0 };
+
+ if (mshv_portid_lookup(port, &ptinfo)) {
+ pr_debug("Failed to get port info from port_table!\n");
+ continue;
+ }
+
+ if (ptinfo.hv_port_type != HV_PORT_TYPE_DOORBELL) {
+ pr_debug("Not a doorbell port!, port: %d, port_type: %d\n",
+ port, ptinfo.hv_port_type);
+ continue;
+ }
+
+ /* Invoke the callback */
+ ptinfo.hv_port_doorbell.doorbell_cb(port,
+ ptinfo.hv_port_doorbell.data);
+ }
+
+ return true;
+}
+
+static bool mshv_async_call_completion_isr(struct hv_message *msg)
+{
+ bool handled = false;
+ struct hv_async_completion_message_payload *async_msg;
+ struct mshv_partition *partition;
+ u64 partition_id;
+
+ if (msg->header.message_type != HVMSG_ASYNC_CALL_COMPLETION)
+ goto out;
+
+ async_msg =
+ (struct hv_async_completion_message_payload *)msg->u.payload;
+
+ partition_id = async_msg->partition_id;
+
+ /*
+ * Hold this lock for the rest of the isr, because the partition could
+ * be released anytime.
+ * e.g. the MSHV_RUN_VP thread could wake on another cpu; it could
+ * release the partition unless we hold this!
+ */
+ rcu_read_lock();
+
+ partition = mshv_partition_find(partition_id);
+
+ if (unlikely(!partition)) {
+ pr_debug("failed to find partition %llu\n", partition_id);
+ goto unlock_out;
+ }
+
+ partition->async_hypercall_status = async_msg->status;
+ complete(&partition->async_hypercall);
+
+ handled = true;
+
+unlock_out:
+ rcu_read_unlock();
+out:
+ return handled;
+}
+
+static void kick_vp(struct mshv_vp *vp)
+{
+ atomic64_inc(&vp->run.vp_signaled_count);
+ vp->run.kicked_by_hv = 1;
+ wake_up(&vp->run.vp_suspend_queue);
+}
+
+static void
+handle_bitset_message(const struct hv_vp_signal_bitset_scheduler_message *msg)
+{
+ int bank_idx, vps_signaled = 0, bank_mask_size;
+ struct mshv_partition *partition;
+ const struct hv_vpset *vpset;
+ const u64 *bank_contents;
+ u64 partition_id = msg->partition_id;
+
+ if (msg->vp_bitset.bitset.format != HV_GENERIC_SET_SPARSE_4K) {
+ pr_debug("scheduler message format is not HV_GENERIC_SET_SPARSE_4K");
+ return;
+ }
+
+ if (msg->vp_count == 0) {
+ pr_debug("scheduler message with no VP specified");
+ return;
+ }
+
+ rcu_read_lock();
+
+ partition = mshv_partition_find(partition_id);
+ if (unlikely(!partition)) {
+ pr_debug("failed to find partition %llu\n", partition_id);
+ goto unlock_out;
+ }
+
+ vpset = &msg->vp_bitset.bitset;
+
+ bank_idx = -1;
+ bank_contents = vpset->bank_contents;
+ bank_mask_size = sizeof(vpset->valid_bank_mask) * BITS_PER_BYTE;
+
+ while (true) {
+ int vp_bank_idx = -1;
+ int vp_bank_size = sizeof(*bank_contents) * BITS_PER_BYTE;
+ int vp_index;
+
+ bank_idx = find_next_bit((unsigned long *)&vpset->valid_bank_mask,
+ bank_mask_size, bank_idx + 1);
+ if (bank_idx == bank_mask_size)
+ break;
+
+ while (true) {
+ struct mshv_vp *vp;
+
+ vp_bank_idx = find_next_bit((unsigned long *)bank_contents,
+ vp_bank_size, vp_bank_idx + 1);
+ if (vp_bank_idx == vp_bank_size)
+ break;
+
+ vp_index = (bank_idx * vp_bank_size) + vp_bank_idx;
+
+ /* This shouldn't happen, but just in case. */
+ if (unlikely(vp_index >= MSHV_MAX_VPS)) {
+ pr_debug("VP index %u out of bounds\n",
+ vp_index);
+ goto unlock_out;
+ }
+
+ vp = partition->pt_vp_array[vp_index];
+ if (unlikely(!vp)) {
+ pr_debug("failed to find VP %u\n", vp_index);
+ goto unlock_out;
+ }
+
+ kick_vp(vp);
+ vps_signaled++;
+ }
+
+ bank_contents++;
+ }
+
+unlock_out:
+ rcu_read_unlock();
+
+ if (vps_signaled != msg->vp_count)
+ pr_debug("asked to signal %u VPs but only did %u\n",
+ msg->vp_count, vps_signaled);
+}
+
+static void
+handle_pair_message(const struct hv_vp_signal_pair_scheduler_message *msg)
+{
+ struct mshv_partition *partition = NULL;
+ struct mshv_vp *vp;
+ int idx;
+
+ rcu_read_lock();
+
+ for (idx = 0; idx < msg->vp_count; idx++) {
+ u64 partition_id = msg->partition_ids[idx];
+ u32 vp_index = msg->vp_indexes[idx];
+
+ if (idx == 0 || partition->pt_id != partition_id) {
+ partition = mshv_partition_find(partition_id);
+ if (unlikely(!partition)) {
+ pr_debug("failed to find partition %llu\n",
+ partition_id);
+ break;
+ }
+ }
+
+ /* This shouldn't happen, but just in case. */
+ if (unlikely(vp_index >= MSHV_MAX_VPS)) {
+ pr_debug("VP index %u out of bounds\n", vp_index);
+ break;
+ }
+
+ vp = partition->pt_vp_array[vp_index];
+ if (!vp) {
+ pr_debug("failed to find VP %u\n", vp_index);
+ break;
+ }
+
+ kick_vp(vp);
+ }
+
+ rcu_read_unlock();
+}
+
+static bool
+mshv_scheduler_isr(struct hv_message *msg)
+{
+ if (msg->header.message_type != HVMSG_SCHEDULER_VP_SIGNAL_BITSET &&
+ msg->header.message_type != HVMSG_SCHEDULER_VP_SIGNAL_PAIR)
+ return false;
+
+ if (msg->header.message_type == HVMSG_SCHEDULER_VP_SIGNAL_BITSET)
+ handle_bitset_message((struct hv_vp_signal_bitset_scheduler_message *)
+ msg->u.payload);
+ else
+ handle_pair_message((struct hv_vp_signal_pair_scheduler_message *)
+ msg->u.payload);
+
+ return true;
+}
+
+static bool
+mshv_intercept_isr(struct hv_message *msg)
+{
+ struct mshv_partition *partition;
+ bool handled = false;
+ struct mshv_vp *vp;
+ u64 partition_id;
+ u32 vp_index;
+
+ partition_id = msg->header.sender;
+
+ rcu_read_lock();
+
+ partition = mshv_partition_find(partition_id);
+ if (unlikely(!partition)) {
+ pr_debug("failed to find partition %llu\n",
+ partition_id);
+ goto unlock_out;
+ }
+
+ if (msg->header.message_type == HVMSG_X64_APIC_EOI) {
+ /*
+ * Check if this gsi is registered in the
+ * ack_notifier list and invoke the callback
+ * if registered.
+ */
+
+ /*
+ * If there is a notifier, the ack callback is supposed
+ * to handle the VMEXIT. So we need not pass this message
+ * to vcpu thread.
+ */
+ struct hv_x64_apic_eoi_message *eoi_msg =
+ (struct hv_x64_apic_eoi_message *)&msg->u.payload[0];
+
+ if (mshv_notify_acked_gsi(partition, eoi_msg->interrupt_vector)) {
+ handled = true;
+ goto unlock_out;
+ }
+ }
+
+ /*
+ * We should get an opaque intercept message here for all intercept
+ * messages, since we're using the mapped VP intercept message page.
+ *
+ * The intercept message will have been placed in intercept message
+ * page at this point.
+ *
+ * Make sure the message type matches our expectation.
+ */
+ if (msg->header.message_type != HVMSG_OPAQUE_INTERCEPT) {
+ pr_debug("wrong message type %d", msg->header.message_type);
+ goto unlock_out;
+ }
+
+ /*
+ * Since we directly index the vp, and it has to exist for us to be here
+ * (because the vp is only deleted when the partition is), no additional
+ * locking is needed here
+ */
+ vp_index =
+ ((struct hv_opaque_intercept_message *)msg->u.payload)->vp_index;
+ vp = partition->pt_vp_array[vp_index];
+ if (unlikely(!vp)) {
+ pr_debug("failed to find VP %u\n", vp_index);
+ goto unlock_out;
+ }
+
+ kick_vp(vp);
+
+ handled = true;
+
+unlock_out:
+ rcu_read_unlock();
+
+ return handled;
+}
+
+void mshv_isr(void)
+{
+ struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
+ struct hv_message_page **msg_page = &spages->synic_message_page;
+ struct hv_message *msg;
+ bool handled;
+
+ if (unlikely(!(*msg_page))) {
+ pr_debug("Missing synic page!\n");
+ return;
+ }
+
+ msg = &((*msg_page)->sint_message[HV_SYNIC_INTERCEPTION_SINT_INDEX]);
+
+ /*
+ * If the type isn't set, there isn't really a message;
+ * it may be some other hyperv interrupt
+ */
+ if (msg->header.message_type == HVMSG_NONE)
+ return;
+
+ handled = mshv_doorbell_isr(msg);
+
+ if (!handled)
+ handled = mshv_scheduler_isr(msg);
+
+ if (!handled)
+ handled = mshv_async_call_completion_isr(msg);
+
+ if (!handled)
+ handled = mshv_intercept_isr(msg);
+
+ if (handled) {
+ /*
+ * Acknowledge message with hypervisor if another message is
+ * pending.
+ */
+ msg->header.message_type = HVMSG_NONE;
+ /*
+ * Ensure the write is complete so the hypervisor will deliver
+ * the next message if available.
+ */
+ mb();
+ if (msg->header.message_flags.msg_pending)
+ hv_set_non_nested_msr(HV_MSR_EOM, 0);
+
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+ add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR);
+#endif
+ } else {
+ pr_warn_once("%s: unknown message type 0x%x\n", __func__,
+ msg->header.message_type);
+ }
+}
+
+int mshv_synic_init(unsigned int cpu)
+{
+ union hv_synic_simp simp;
+ union hv_synic_siefp siefp;
+ union hv_synic_sirbp sirbp;
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+ union hv_synic_sint sint;
+#endif
+ union hv_synic_scontrol sctrl;
+ struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
+ struct hv_message_page **msg_page = &spages->synic_message_page;
+ struct hv_synic_event_flags_page **event_flags_page =
+ &spages->synic_event_flags_page;
+ struct hv_synic_event_ring_page **event_ring_page =
+ &spages->synic_event_ring_page;
+
+ /* Setup the Synic's message page */
+ simp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIMP);
+ simp.simp_enabled = true;
+ *msg_page = memremap(simp.base_simp_gpa << HV_HYP_PAGE_SHIFT,
+ HV_HYP_PAGE_SIZE,
+ MEMREMAP_WB);
+
+ if (!(*msg_page))
+ return -EFAULT;
+
+ hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
+
+ /* Setup the Synic's event flags page */
+ siefp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIEFP);
+ siefp.siefp_enabled = true;
+ *event_flags_page = memremap(siefp.base_siefp_gpa << PAGE_SHIFT,
+ PAGE_SIZE, MEMREMAP_WB);
+
+ if (!(*event_flags_page))
+ goto cleanup;
+
+ hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
+
+ /* Setup the Synic's event ring page */
+ sirbp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIRBP);
+ sirbp.sirbp_enabled = true;
+ *event_ring_page = memremap(sirbp.base_sirbp_gpa << PAGE_SHIFT,
+ PAGE_SIZE, MEMREMAP_WB);
+
+ if (!(*event_ring_page))
+ goto cleanup;
+
+ hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
+
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+ /* Enable intercepts */
+ sint.as_uint64 = 0;
+ sint.vector = HYPERVISOR_CALLBACK_VECTOR;
+ sint.masked = false;
+ sint.auto_eoi = hv_recommend_using_aeoi();
+ hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX,
+ sint.as_uint64);
+
+ /* Doorbell SINT */
+ sint.as_uint64 = 0;
+ sint.vector = HYPERVISOR_CALLBACK_VECTOR;
+ sint.masked = false;
+ sint.as_intercept = 1;
+ sint.auto_eoi = hv_recommend_using_aeoi();
+ hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX,
+ sint.as_uint64);
+#endif
+
+ /* Enable global synic bit */
+ sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
+ sctrl.enable = 1;
+ hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
+
+ return 0;
+
+cleanup:
+ if (*event_ring_page) {
+ sirbp.sirbp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
+ memunmap(*event_ring_page);
+ }
+ if (*event_flags_page) {
+ siefp.siefp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
+ memunmap(*event_flags_page);
+ }
+ if (*msg_page) {
+ simp.simp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
+ memunmap(*msg_page);
+ }
+
+ return -EFAULT;
+}
+
+int mshv_synic_cleanup(unsigned int cpu)
+{
+ union hv_synic_sint sint;
+ union hv_synic_simp simp;
+ union hv_synic_siefp siefp;
+ union hv_synic_sirbp sirbp;
+ union hv_synic_scontrol sctrl;
+ struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
+ struct hv_message_page **msg_page = &spages->synic_message_page;
+ struct hv_synic_event_flags_page **event_flags_page =
+ &spages->synic_event_flags_page;
+ struct hv_synic_event_ring_page **event_ring_page =
+ &spages->synic_event_ring_page;
+
+ /* Disable the interrupt */
+ sint.as_uint64 = hv_get_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX);
+ sint.masked = true;
+ hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX,
+ sint.as_uint64);
+
+ /* Disable Doorbell SINT */
+ sint.as_uint64 = hv_get_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX);
+ sint.masked = true;
+ hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX,
+ sint.as_uint64);
+
+ /* Disable Synic's event ring page */
+ sirbp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIRBP);
+ sirbp.sirbp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
+ memunmap(*event_ring_page);
+
+ /* Disable Synic's event flags page */
+ siefp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIEFP);
+ siefp.siefp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
+ memunmap(*event_flags_page);
+
+ /* Disable Synic's message page */
+ simp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIMP);
+ simp.simp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
+ memunmap(*msg_page);
+
+ /* Disable global synic bit */
+ sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
+ sctrl.enable = 0;
+ hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
+
+ return 0;
+}
+
+int
+mshv_register_doorbell(u64 partition_id, doorbell_cb_t doorbell_cb, void *data,
+ u64 gpa, u64 val, u64 flags)
+{
+ struct hv_connection_info connection_info = { 0 };
+ union hv_connection_id connection_id = { 0 };
+ struct port_table_info *port_table_info;
+ struct hv_port_info port_info = { 0 };
+ union hv_port_id port_id = { 0 };
+ int ret;
+
+ port_table_info = kmalloc(sizeof(*port_table_info), GFP_KERNEL);
+ if (!port_table_info)
+ return -ENOMEM;
+
+ port_table_info->hv_port_type = HV_PORT_TYPE_DOORBELL;
+ port_table_info->hv_port_doorbell.doorbell_cb = doorbell_cb;
+ port_table_info->hv_port_doorbell.data = data;
+ ret = mshv_portid_alloc(port_table_info);
+ if (ret < 0) {
+ kfree(port_table_info);
+ return ret;
+ }
+
+ port_id.u.id = ret;
+ port_info.port_type = HV_PORT_TYPE_DOORBELL;
+ port_info.doorbell_port_info.target_sint = HV_SYNIC_DOORBELL_SINT_INDEX;
+ port_info.doorbell_port_info.target_vp = HV_ANY_VP;
+ ret = hv_call_create_port(hv_current_partition_id, port_id, partition_id,
+ &port_info,
+ 0, 0, NUMA_NO_NODE);
+
+ if (ret < 0) {
+ mshv_portid_free(port_id.u.id);
+ return ret;
+ }
+
+ connection_id.u.id = port_id.u.id;
+ connection_info.port_type = HV_PORT_TYPE_DOORBELL;
+ connection_info.doorbell_connection_info.gpa = gpa;
+ connection_info.doorbell_connection_info.trigger_value = val;
+ connection_info.doorbell_connection_info.flags = flags;
+
+ ret = hv_call_connect_port(hv_current_partition_id, port_id, partition_id,
+ connection_id, &connection_info, 0, NUMA_NO_NODE);
+ if (ret < 0) {
+ hv_call_delete_port(hv_current_partition_id, port_id);
+ mshv_portid_free(port_id.u.id);
+ return ret;
+ }
+
+ // lets use the port_id as the doorbell_id
+ return port_id.u.id;
+}
+
+void
+mshv_unregister_doorbell(u64 partition_id, int doorbell_portid)
+{
+ union hv_port_id port_id = { 0 };
+ union hv_connection_id connection_id = { 0 };
+
+ connection_id.u.id = doorbell_portid;
+ hv_call_disconnect_port(partition_id, connection_id);
+
+ port_id.u.id = doorbell_portid;
+ hv_call_delete_port(hv_current_partition_id, port_id);
+
+ mshv_portid_free(doorbell_portid);
+}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 0f6cd44fff29..8d3cff42bdbb 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1611,16 +1611,16 @@ static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%u\n", channel->target_cpu);
}
-static ssize_t target_cpu_store(struct vmbus_channel *channel,
- const char *buf, size_t count)
+
+int vmbus_channel_set_cpu(struct vmbus_channel *channel, u32 target_cpu)
{
- u32 target_cpu, origin_cpu;
- ssize_t ret = count;
+ u32 origin_cpu;
+ int ret = 0;
- if (vmbus_proto_version < VERSION_WIN10_V4_1)
- return -EIO;
+ lockdep_assert_cpus_held();
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
- if (sscanf(buf, "%uu", &target_cpu) != 1)
+ if (vmbus_proto_version < VERSION_WIN10_V4_1)
return -EIO;
/* Validate target_cpu for the cpumask_test_cpu() operation below. */
@@ -1630,22 +1630,17 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
return -EINVAL;
- /* No CPUs should come up or down during this. */
- cpus_read_lock();
-
- if (!cpu_online(target_cpu)) {
- cpus_read_unlock();
+ if (!cpu_online(target_cpu))
return -EINVAL;
- }
/*
- * Synchronizes target_cpu_store() and channel closure:
+ * Synchronizes vmbus_channel_set_cpu() and channel closure:
*
* { Initially: state = CHANNEL_OPENED }
*
* CPU1 CPU2
*
- * [target_cpu_store()] [vmbus_disconnect_ring()]
+ * [vmbus_channel_set_cpu()] [vmbus_disconnect_ring()]
*
* LOCK channel_mutex LOCK channel_mutex
* LOAD r1 = state LOAD r2 = state
@@ -1660,7 +1655,6 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
* Note. The host processes the channel messages "sequentially", in
* the order in which they are received on a per-partition basis.
*/
- mutex_lock(&vmbus_connection.channel_mutex);
/*
* Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
@@ -1668,17 +1662,17 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
*/
if (channel->state != CHANNEL_OPENED_STATE) {
ret = -EIO;
- goto cpu_store_unlock;
+ goto end;
}
origin_cpu = channel->target_cpu;
if (target_cpu == origin_cpu)
- goto cpu_store_unlock;
+ goto end;
if (vmbus_send_modifychannel(channel,
hv_cpu_number_to_vp_number(target_cpu))) {
ret = -EIO;
- goto cpu_store_unlock;
+ goto end;
}
/*
@@ -1708,10 +1702,26 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
origin_cpu, target_cpu);
}
-cpu_store_unlock:
+end:
+ return ret;
+}
+
+static ssize_t target_cpu_store(struct vmbus_channel *channel,
+ const char *buf, size_t count)
+{
+ u32 target_cpu;
+ ssize_t ret;
+
+ if (sscanf(buf, "%uu", &target_cpu) != 1)
+ return -EIO;
+
+ cpus_read_lock();
+ mutex_lock(&vmbus_connection.channel_mutex);
+ ret = vmbus_channel_set_cpu(channel, target_cpu);
mutex_unlock(&vmbus_connection.channel_mutex);
cpus_read_unlock();
- return ret;
+
+ return ret ?: count;
}
static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
@@ -2262,12 +2272,25 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
struct resource *iter;
mutex_lock(&hyperv_mmio_lock);
+
+ /*
+ * If all bytes of the MMIO range to be released are within the
+ * special case fb_mmio shadow region, skip releasing the shadow
+ * region since no corresponding __request_region() was done
+ * in vmbus_allocate_mmio().
+ */
+ if (fb_mmio && start >= fb_mmio->start &&
+ (start + size - 1 <= fb_mmio->end))
+ goto skip_shadow_release;
+
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= start + size) || (iter->end <= start))
continue;
__release_region(iter, start, size);
}
+
+skip_shadow_release:
release_mem_region(start, size);
mutex_unlock(&hyperv_mmio_lock);
@@ -2646,7 +2669,7 @@ static int __init hv_acpi_init(void)
if (!hv_is_hyperv_initialized())
return -ENODEV;
- if (hv_root_partition && !hv_nested)
+ if (hv_root_partition() && !hv_nested)
return 0;
/*
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4cbaba15d86e..f91f713b0105 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -463,6 +463,16 @@ config SENSORS_BT1_PVT_ALARMS
the data conversion will be periodically performed and the data will be
saved in the internal driver cache.
+config SENSORS_CGBC
+ tristate "Congatec Board Controller Sensors"
+ depends on MFD_CGBC
+ help
+ Enable sensors support for the Congatec Board Controller. It has
+ temperature, voltage, current and fan sensors.
+
+ This driver can also be built as a module. If so, the module will be
+ called cgbc-hwmon.
+
config SENSORS_CHIPCAP2
tristate "Amphenol ChipCap 2 relative humidity and temperature sensor"
depends on I2C
@@ -789,6 +799,17 @@ config SENSORS_HS3001
This driver can also be built as a module. If so, the module
will be called hs3001.
+config SENSORS_HTU31
+ tristate "Measurement Specialties HTU31 humidity and temperature sensor"
+ depends on I2C
+ select CRC8
+ help
+ If you say yes here you get support for the HTU31 humidity
+ and temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called htu31.
+
config SENSORS_IBMAEM
tristate "IBM Active Energy Manager temperature/power sensors and control"
select IPMI_SI
@@ -1519,7 +1540,7 @@ config SENSORS_LM90
MAX6657, MAX6658, MAX6659, MAX6680, MAX6681, MAX6692, MAX6695,
MAX6696,
ON Semiconductor NCT1008, NCT210, NCT72, NCT214, NCT218,
- Winbond/Nuvoton W83L771W/G/AWG/ASG,
+ Winbond/Nuvoton W83L771W/G/AWG/ASG, NCT7716, NCT7717 and NCT7718,
Philips NE1618, SA56004, GMT G781, Texas Instruments TMP451 and TMP461
sensor chips.
@@ -1625,7 +1646,7 @@ config SENSORS_NTC_THERMISTOR
B57891S0103 from EPCOS.
This driver can also be built as a module. If so, the module
- will be called ntc-thermistor.
+ will be called ntc_thermistor.
config SENSORS_NCT6683
tristate "Nuvoton NCT6683D"
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index b7ef0f0562d3..766c652ef22b 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_SENSORS_ASUS_ROG_RYUJIN) += asus_rog_ryujin.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_AXI_FAN_CONTROL) += axi-fan-control.o
obj-$(CONFIG_SENSORS_BT1_PVT) += bt1-pvt.o
+obj-$(CONFIG_SENSORS_CGBC) += cgbc-hwmon.o
obj-$(CONFIG_SENSORS_CHIPCAP2) += chipcap2.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_CORSAIR_CPRO) += corsair-cpro.o
@@ -91,6 +92,7 @@ obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
obj-$(CONFIG_SENSORS_GXP_FAN_CTRL) += gxp-fan-ctrl.o
obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
obj-$(CONFIG_SENSORS_HS3001) += hs3001.o
+obj-$(CONFIG_SENSORS_HTU31) += htu31.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
obj-$(CONFIG_SENSORS_I5500) += i5500_temp.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 44afb07409a4..29ccdc2fb7ff 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -87,25 +87,14 @@ struct acpi_power_meter_resource {
bool power_alarm;
int sensors_valid;
unsigned long sensors_last_updated;
- struct sensor_device_attribute sensors[NUM_SENSORS];
- int num_sensors;
+#define POWER_METER_TRIP_AVERAGE_MIN_IDX 0
+#define POWER_METER_TRIP_AVERAGE_MAX_IDX 1
s64 trip[2];
int num_domain_devices;
struct acpi_device **domain_devices;
struct kobject *holders_dir;
};
-struct sensor_template {
- char *label;
- ssize_t (*show)(struct device *dev,
- struct device_attribute *devattr,
- char *buf);
- ssize_t (*set)(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count);
- int index;
-};
-
/* Averaging interval */
static int update_avg_interval(struct acpi_power_meter_resource *resource)
{
@@ -124,62 +113,6 @@ static int update_avg_interval(struct acpi_power_meter_resource *resource)
return 0;
}
-static ssize_t show_avg_interval(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
-
- mutex_lock(&resource->lock);
- update_avg_interval(resource);
- mutex_unlock(&resource->lock);
-
- return sprintf(buf, "%llu\n", resource->avg_interval);
-}
-
-static ssize_t set_avg_interval(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list args = { 1, &arg0 };
- int res;
- unsigned long temp;
- unsigned long long data;
- acpi_status status;
-
- res = kstrtoul(buf, 10, &temp);
- if (res)
- return res;
-
- if (temp > resource->caps.max_avg_interval ||
- temp < resource->caps.min_avg_interval)
- return -EINVAL;
- arg0.integer.value = temp;
-
- mutex_lock(&resource->lock);
- status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PAI",
- &args, &data);
- if (ACPI_SUCCESS(status))
- resource->avg_interval = temp;
- mutex_unlock(&resource->lock);
-
- if (ACPI_FAILURE(status)) {
- acpi_evaluation_failure_warn(resource->acpi_dev->handle, "_PAI",
- status);
- return -EINVAL;
- }
-
- /* _PAI returns 0 on success, nonzero otherwise */
- if (data)
- return -EINVAL;
-
- return count;
-}
-
/* Cap functions */
static int update_cap(struct acpi_power_meter_resource *resource)
{
@@ -198,61 +131,6 @@ static int update_cap(struct acpi_power_meter_resource *resource)
return 0;
}
-static ssize_t show_cap(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
-
- mutex_lock(&resource->lock);
- update_cap(resource);
- mutex_unlock(&resource->lock);
-
- return sprintf(buf, "%llu\n", resource->cap * 1000);
-}
-
-static ssize_t set_cap(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list args = { 1, &arg0 };
- int res;
- unsigned long temp;
- unsigned long long data;
- acpi_status status;
-
- res = kstrtoul(buf, 10, &temp);
- if (res)
- return res;
-
- temp = DIV_ROUND_CLOSEST(temp, 1000);
- if (temp > resource->caps.max_cap || temp < resource->caps.min_cap)
- return -EINVAL;
- arg0.integer.value = temp;
-
- mutex_lock(&resource->lock);
- status = acpi_evaluate_integer(resource->acpi_dev->handle, "_SHL",
- &args, &data);
- if (ACPI_SUCCESS(status))
- resource->cap = temp;
- mutex_unlock(&resource->lock);
-
- if (ACPI_FAILURE(status)) {
- acpi_evaluation_failure_warn(resource->acpi_dev->handle, "_SHL",
- status);
- return -EINVAL;
- }
-
- /* _SHL returns 0 on success, nonzero otherwise */
- if (data)
- return -EINVAL;
-
- return count;
-}
-
/* Power meter trip points */
static int set_acpi_trip(struct acpi_power_meter_resource *resource)
{
@@ -287,34 +165,6 @@ static int set_acpi_trip(struct acpi_power_meter_resource *resource)
return 0;
}
-static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
- unsigned long temp, trip_bk;
- int res;
-
- res = kstrtoul(buf, 10, &temp);
- if (res)
- return res;
-
- temp = DIV_ROUND_CLOSEST(temp, 1000);
-
- guard(mutex)(&resource->lock);
-
- trip_bk = resource->trip[attr->index - 7];
- resource->trip[attr->index - 7] = temp;
- res = set_acpi_trip(resource);
- if (res) {
- resource->trip[attr->index - 7] = trip_bk;
- return res;
- }
-
- return count;
-}
-
/* Power meter */
static int update_meter(struct acpi_power_meter_resource *resource)
{
@@ -341,202 +191,6 @@ static int update_meter(struct acpi_power_meter_resource *resource)
return 0;
}
-static ssize_t show_power(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
-
- mutex_lock(&resource->lock);
- update_meter(resource);
- mutex_unlock(&resource->lock);
-
- if (resource->power == UNKNOWN_POWER)
- return -ENODATA;
-
- return sprintf(buf, "%llu\n", resource->power * 1000);
-}
-
-/* Miscellaneous */
-static ssize_t show_str(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
- acpi_string val;
- int ret;
-
- mutex_lock(&resource->lock);
- switch (attr->index) {
- case 0:
- val = resource->model_number;
- break;
- case 1:
- val = resource->serial_number;
- break;
- case 2:
- val = resource->oem_info;
- break;
- default:
- WARN(1, "Implementation error: unexpected attribute index %d\n",
- attr->index);
- val = "";
- break;
- }
- ret = sprintf(buf, "%s\n", val);
- mutex_unlock(&resource->lock);
- return ret;
-}
-
-static ssize_t show_val(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
- u64 val = 0;
- int ret;
-
- guard(mutex)(&resource->lock);
-
- switch (attr->index) {
- case 0:
- val = resource->caps.min_avg_interval;
- break;
- case 1:
- val = resource->caps.max_avg_interval;
- break;
- case 2:
- val = resource->caps.min_cap * 1000;
- break;
- case 3:
- val = resource->caps.max_cap * 1000;
- break;
- case 4:
- if (resource->caps.hysteresis == UNKNOWN_HYSTERESIS)
- return sprintf(buf, "unknown\n");
-
- val = resource->caps.hysteresis * 1000;
- break;
- case 5:
- if (resource->caps.flags & POWER_METER_IS_BATTERY)
- val = 1;
- else
- val = 0;
- break;
- case 6:
- ret = update_meter(resource);
- if (ret)
- return ret;
- /* need to update cap if not to support the notification. */
- if (!(resource->caps.flags & POWER_METER_CAN_NOTIFY)) {
- ret = update_cap(resource);
- if (ret)
- return ret;
- }
- val = resource->power_alarm || resource->power > resource->cap;
- resource->power_alarm = resource->power > resource->cap;
- break;
- case 7:
- case 8:
- if (resource->trip[attr->index - 7] < 0)
- return sprintf(buf, "unknown\n");
-
- val = resource->trip[attr->index - 7] * 1000;
- break;
- default:
- WARN(1, "Implementation error: unexpected attribute index %d\n",
- attr->index);
- break;
- }
-
- return sprintf(buf, "%llu\n", val);
-}
-
-static ssize_t show_accuracy(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
- unsigned int acc = resource->caps.accuracy;
-
- return sprintf(buf, "%u.%u%%\n", acc / 1000, acc % 1000);
-}
-
-static ssize_t show_name(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- return sprintf(buf, "%s\n", ACPI_POWER_METER_NAME);
-}
-
-#define RO_SENSOR_TEMPLATE(_label, _show, _index) \
- { \
- .label = _label, \
- .show = _show, \
- .index = _index, \
- }
-
-#define RW_SENSOR_TEMPLATE(_label, _show, _set, _index) \
- { \
- .label = _label, \
- .show = _show, \
- .set = _set, \
- .index = _index, \
- }
-
-/* Sensor descriptions. If you add a sensor, update NUM_SENSORS above! */
-static struct sensor_template meter_attrs[] = {
- RO_SENSOR_TEMPLATE(POWER_AVERAGE_NAME, show_power, 0),
- RO_SENSOR_TEMPLATE("power1_accuracy", show_accuracy, 0),
- RO_SENSOR_TEMPLATE("power1_average_interval_min", show_val, 0),
- RO_SENSOR_TEMPLATE("power1_average_interval_max", show_val, 1),
- RO_SENSOR_TEMPLATE("power1_is_battery", show_val, 5),
- RW_SENSOR_TEMPLATE(POWER_AVG_INTERVAL_NAME, show_avg_interval,
- set_avg_interval, 0),
- {},
-};
-
-static struct sensor_template misc_cap_attrs[] = {
- RO_SENSOR_TEMPLATE("power1_cap_min", show_val, 2),
- RO_SENSOR_TEMPLATE("power1_cap_max", show_val, 3),
- RO_SENSOR_TEMPLATE("power1_cap_hyst", show_val, 4),
- RO_SENSOR_TEMPLATE(POWER_ALARM_NAME, show_val, 6),
- {},
-};
-
-static struct sensor_template ro_cap_attrs[] = {
- RO_SENSOR_TEMPLATE(POWER_CAP_NAME, show_cap, 0),
- {},
-};
-
-static struct sensor_template rw_cap_attrs[] = {
- RW_SENSOR_TEMPLATE(POWER_CAP_NAME, show_cap, set_cap, 0),
- {},
-};
-
-static struct sensor_template trip_attrs[] = {
- RW_SENSOR_TEMPLATE("power1_average_min", show_val, set_trip, 7),
- RW_SENSOR_TEMPLATE("power1_average_max", show_val, set_trip, 8),
- {},
-};
-
-static struct sensor_template misc_attrs[] = {
- RO_SENSOR_TEMPLATE("name", show_name, 0),
- RO_SENSOR_TEMPLATE("power1_model_number", show_str, 0),
- RO_SENSOR_TEMPLATE("power1_oem_info", show_str, 2),
- RO_SENSOR_TEMPLATE("power1_serial_number", show_str, 1),
- {},
-};
-
-#undef RO_SENSOR_TEMPLATE
-#undef RW_SENSOR_TEMPLATE
-
/* Read power domain data */
static void remove_domain_devices(struct acpi_power_meter_resource *resource)
{
@@ -638,109 +292,434 @@ end:
return res;
}
-/* Registration and deregistration */
-static int register_attrs(struct acpi_power_meter_resource *resource,
- struct sensor_template *attrs)
+static int set_trip(struct acpi_power_meter_resource *resource, u16 trip_idx,
+ unsigned long trip)
{
- struct device *dev = &resource->acpi_dev->dev;
- struct sensor_device_attribute *sensors =
- &resource->sensors[resource->num_sensors];
- int res = 0;
+ unsigned long trip_bk;
+ int ret;
- while (attrs->label) {
- sensors->dev_attr.attr.name = attrs->label;
- sensors->dev_attr.attr.mode = 0444;
- sensors->dev_attr.show = attrs->show;
- sensors->index = attrs->index;
+ trip = DIV_ROUND_CLOSEST(trip, 1000);
+ trip_bk = resource->trip[trip_idx];
- if (attrs->set) {
- sensors->dev_attr.attr.mode |= 0200;
- sensors->dev_attr.store = attrs->set;
- }
+ resource->trip[trip_idx] = trip;
+ ret = set_acpi_trip(resource);
+ if (ret) {
+ dev_err(&resource->acpi_dev->dev, "set %s failed.\n",
+ (trip_idx == POWER_METER_TRIP_AVERAGE_MIN_IDX) ?
+ "power1_average_min" : "power1_average_max");
+ resource->trip[trip_idx] = trip_bk;
+ }
- sysfs_attr_init(&sensors->dev_attr.attr);
- res = device_create_file(dev, &sensors->dev_attr);
- if (res) {
- sensors->dev_attr.attr.name = NULL;
- goto error;
- }
- sensors++;
- resource->num_sensors++;
- attrs++;
+ return ret;
+}
+
+static int set_cap(struct acpi_power_meter_resource *resource,
+ unsigned long cap)
+{
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+ unsigned long long data;
+ acpi_status status;
+
+ cap = DIV_ROUND_CLOSEST(cap, 1000);
+ if (cap > resource->caps.max_cap || cap < resource->caps.min_cap)
+ return -EINVAL;
+
+ arg0.integer.value = cap;
+ status = acpi_evaluate_integer(resource->acpi_dev->handle, "_SHL",
+ &args, &data);
+ if (ACPI_FAILURE(status)) {
+ acpi_evaluation_failure_warn(resource->acpi_dev->handle, "_SHL",
+ status);
+ return -EINVAL;
}
+ resource->cap = cap;
-error:
- return res;
+ /* _SHL returns 0 on success, nonzero otherwise */
+ if (data)
+ return -EINVAL;
+
+ return 0;
}
-static void remove_attrs(struct acpi_power_meter_resource *resource)
+static int set_avg_interval(struct acpi_power_meter_resource *resource,
+ unsigned long val)
{
- int i;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+ unsigned long long data;
+ acpi_status status;
- for (i = 0; i < resource->num_sensors; i++) {
- if (!resource->sensors[i].dev_attr.attr.name)
- continue;
- device_remove_file(&resource->acpi_dev->dev,
- &resource->sensors[i].dev_attr);
+ if (val > resource->caps.max_avg_interval ||
+ val < resource->caps.min_avg_interval)
+ return -EINVAL;
+
+ arg0.integer.value = val;
+ status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PAI",
+ &args, &data);
+ if (ACPI_FAILURE(status)) {
+ acpi_evaluation_failure_warn(resource->acpi_dev->handle, "_PAI",
+ status);
+ return -EINVAL;
}
+ resource->avg_interval = val;
- remove_domain_devices(resource);
+ /* _PAI returns 0 on success, nonzero otherwise */
+ if (data)
+ return -EINVAL;
- resource->num_sensors = 0;
+ return 0;
}
-static int setup_attrs(struct acpi_power_meter_resource *resource)
+static int get_power_alarm_state(struct acpi_power_meter_resource *resource,
+ long *val)
{
- int res = 0;
+ int ret;
- /* _PMD method is optional. */
- res = read_domain_devices(resource);
- if (res && res != -ENODEV)
- return res;
+ ret = update_meter(resource);
+ if (ret)
+ return ret;
- if (resource->caps.flags & POWER_METER_CAN_MEASURE) {
- res = register_attrs(resource, meter_attrs);
- if (res)
- goto error;
+ /* need to update cap if not to support the notification. */
+ if (!(resource->caps.flags & POWER_METER_CAN_NOTIFY)) {
+ ret = update_cap(resource);
+ if (ret)
+ return ret;
+ resource->power_alarm = resource->power > resource->cap;
+ *val = resource->power_alarm;
+ } else {
+ *val = resource->power_alarm || resource->power > resource->cap;
+ resource->power_alarm = resource->power > resource->cap;
}
- if (resource->caps.flags & POWER_METER_CAN_CAP) {
- if (!can_cap_in_hardware()) {
- dev_warn(&resource->acpi_dev->dev,
- "Ignoring unsafe software power cap!\n");
- goto skip_unsafe_cap;
+ return 0;
+}
+
+static umode_t power_meter_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct acpi_power_meter_resource *res = data;
+
+ if (type != hwmon_power)
+ return 0;
+
+ switch (attr) {
+ case hwmon_power_average:
+ case hwmon_power_average_interval_min:
+ case hwmon_power_average_interval_max:
+ if (res->caps.flags & POWER_METER_CAN_MEASURE)
+ return 0444;
+ break;
+ case hwmon_power_average_interval:
+ if (res->caps.flags & POWER_METER_CAN_MEASURE)
+ return 0644;
+ break;
+ case hwmon_power_cap_min:
+ case hwmon_power_cap_max:
+ case hwmon_power_alarm:
+ if (res->caps.flags & POWER_METER_CAN_CAP && can_cap_in_hardware())
+ return 0444;
+ break;
+ case hwmon_power_cap:
+ if (res->caps.flags & POWER_METER_CAN_CAP && can_cap_in_hardware()) {
+ if (res->caps.configurable_cap)
+ return 0644;
+ else
+ return 0444;
}
+ break;
+ default:
+ break;
+ }
- if (resource->caps.configurable_cap)
- res = register_attrs(resource, rw_cap_attrs);
- else
- res = register_attrs(resource, ro_cap_attrs);
+ return 0;
+}
- if (res)
- goto error;
+static int power_meter_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+ int ret = 0;
- res = register_attrs(resource, misc_cap_attrs);
- if (res)
- goto error;
+ if (type != hwmon_power)
+ return -EINVAL;
+
+ guard(mutex)(&res->lock);
+
+ switch (attr) {
+ case hwmon_power_average:
+ ret = update_meter(res);
+ if (ret)
+ return ret;
+ if (res->power == UNKNOWN_POWER)
+ return -ENODATA;
+ *val = res->power * 1000;
+ break;
+ case hwmon_power_average_interval_min:
+ *val = res->caps.min_avg_interval;
+ break;
+ case hwmon_power_average_interval_max:
+ *val = res->caps.max_avg_interval;
+ break;
+ case hwmon_power_average_interval:
+ ret = update_avg_interval(res);
+ if (ret)
+ return ret;
+ *val = (res)->avg_interval;
+ break;
+ case hwmon_power_cap_min:
+ *val = res->caps.min_cap * 1000;
+ break;
+ case hwmon_power_cap_max:
+ *val = res->caps.max_cap * 1000;
+ break;
+ case hwmon_power_alarm:
+ ret = get_power_alarm_state(res, val);
+ if (ret)
+ return ret;
+ break;
+ case hwmon_power_cap:
+ ret = update_cap(res);
+ if (ret)
+ return ret;
+ *val = res->cap * 1000;
+ break;
+ default:
+ break;
}
-skip_unsafe_cap:
- if (resource->caps.flags & POWER_METER_CAN_TRIP) {
- res = register_attrs(resource, trip_attrs);
- if (res)
- goto error;
+ return 0;
+}
+
+static int power_meter_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+ int ret;
+
+ if (type != hwmon_power)
+ return -EINVAL;
+
+ guard(mutex)(&res->lock);
+ switch (attr) {
+ case hwmon_power_cap:
+ ret = set_cap(res, val);
+ break;
+ case hwmon_power_average_interval:
+ ret = set_avg_interval(res, val);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
}
- res = register_attrs(resource, misc_attrs);
- if (res)
- goto error;
+ return ret;
+}
- return res;
-error:
- remove_attrs(resource);
- return res;
+static const struct hwmon_channel_info * const power_meter_info[] = {
+ HWMON_CHANNEL_INFO(power, HWMON_P_AVERAGE |
+ HWMON_P_AVERAGE_INTERVAL | HWMON_P_AVERAGE_INTERVAL_MIN |
+ HWMON_P_AVERAGE_INTERVAL_MAX | HWMON_P_CAP | HWMON_P_CAP_MIN |
+ HWMON_P_CAP_MAX | HWMON_P_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops power_meter_ops = {
+ .is_visible = power_meter_is_visible,
+ .read = power_meter_read,
+ .write = power_meter_write,
+};
+
+static const struct hwmon_chip_info power_meter_chip_info = {
+ .ops = &power_meter_ops,
+ .info = power_meter_info,
+};
+
+static ssize_t power1_average_max_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+ unsigned long trip;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &trip);
+ if (ret)
+ return ret;
+
+ mutex_lock(&res->lock);
+ ret = set_trip(res, POWER_METER_TRIP_AVERAGE_MAX_IDX, trip);
+ mutex_unlock(&res->lock);
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t power1_average_min_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+ unsigned long trip;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &trip);
+ if (ret)
+ return ret;
+
+ mutex_lock(&res->lock);
+ ret = set_trip(res, POWER_METER_TRIP_AVERAGE_MIN_IDX, trip);
+ mutex_unlock(&res->lock);
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t power1_average_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ if (res->trip[POWER_METER_TRIP_AVERAGE_MIN_IDX] < 0)
+ return sysfs_emit(buf, "unknown\n");
+
+ return sysfs_emit(buf, "%lld\n",
+ res->trip[POWER_METER_TRIP_AVERAGE_MIN_IDX] * 1000);
+}
+
+static ssize_t power1_average_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ if (res->trip[POWER_METER_TRIP_AVERAGE_MAX_IDX] < 0)
+ return sysfs_emit(buf, "unknown\n");
+
+ return sysfs_emit(buf, "%lld\n",
+ res->trip[POWER_METER_TRIP_AVERAGE_MAX_IDX] * 1000);
+}
+
+static ssize_t power1_cap_hyst_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ if (res->caps.hysteresis == UNKNOWN_HYSTERESIS)
+ return sysfs_emit(buf, "unknown\n");
+
+ return sysfs_emit(buf, "%llu\n", res->caps.hysteresis * 1000);
+}
+
+static ssize_t power1_accuracy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+ unsigned int acc = res->caps.accuracy;
+
+ return sysfs_emit(buf, "%u.%u%%\n", acc / 1000, acc % 1000);
+}
+
+static ssize_t power1_is_battery_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n",
+ res->caps.flags & POWER_METER_IS_BATTERY ? 1 : 0);
+}
+
+static ssize_t power1_model_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", res->model_number);
+}
+
+static ssize_t power1_oem_info_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", res->oem_info);
+}
+
+static ssize_t power1_serial_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", res->serial_number);
}
+/* depend on POWER_METER_CAN_TRIP */
+static DEVICE_ATTR_RW(power1_average_max);
+static DEVICE_ATTR_RW(power1_average_min);
+
+/* depend on POWER_METER_CAN_CAP */
+static DEVICE_ATTR_RO(power1_cap_hyst);
+
+/* depend on POWER_METER_CAN_MEASURE */
+static DEVICE_ATTR_RO(power1_accuracy);
+static DEVICE_ATTR_RO(power1_is_battery);
+
+static DEVICE_ATTR_RO(power1_model_number);
+static DEVICE_ATTR_RO(power1_oem_info);
+static DEVICE_ATTR_RO(power1_serial_number);
+
+static umode_t power_extra_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ if (attr == &dev_attr_power1_is_battery.attr ||
+ attr == &dev_attr_power1_accuracy.attr) {
+ if ((res->caps.flags & POWER_METER_CAN_MEASURE) == 0)
+ return 0;
+ }
+
+ if (attr == &dev_attr_power1_cap_hyst.attr) {
+ if ((res->caps.flags & POWER_METER_CAN_CAP) == 0) {
+ return 0;
+ } else if (!can_cap_in_hardware()) {
+ dev_warn(&res->acpi_dev->dev,
+ "Ignoring unsafe software power cap!\n");
+ return 0;
+ }
+ }
+
+ if (attr == &dev_attr_power1_average_max.attr ||
+ attr == &dev_attr_power1_average_min.attr) {
+ if ((res->caps.flags & POWER_METER_CAN_TRIP) == 0)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static struct attribute *power_extra_attrs[] = {
+ &dev_attr_power1_average_max.attr,
+ &dev_attr_power1_average_min.attr,
+ &dev_attr_power1_cap_hyst.attr,
+ &dev_attr_power1_accuracy.attr,
+ &dev_attr_power1_is_battery.attr,
+ &dev_attr_power1_model_number.attr,
+ &dev_attr_power1_oem_info.attr,
+ &dev_attr_power1_serial_number.attr,
+ NULL
+};
+
+static const struct attribute_group power_extra_group = {
+ .attrs = power_extra_attrs,
+ .is_visible = power_extra_is_visible,
+};
+
+__ATTRIBUTE_GROUPS(power_extra);
+
static void free_capabilities(struct acpi_power_meter_resource *resource)
{
acpi_string *str;
@@ -848,13 +827,23 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
case METER_NOTIFY_CONFIG:
mutex_lock(&resource->lock);
free_capabilities(resource);
+ remove_domain_devices(resource);
+ hwmon_device_unregister(resource->hwmon_dev);
res = read_capabilities(resource);
- mutex_unlock(&resource->lock);
if (res)
- break;
-
- remove_attrs(resource);
- setup_attrs(resource);
+ dev_err_once(&device->dev, "read capabilities failed.\n");
+ res = read_domain_devices(resource);
+ if (res && res != -ENODEV)
+ dev_err_once(&device->dev, "read domain devices failed.\n");
+ resource->hwmon_dev =
+ hwmon_device_register_with_info(&device->dev,
+ ACPI_POWER_METER_NAME,
+ resource,
+ &power_meter_chip_info,
+ power_extra_groups);
+ if (IS_ERR(resource->hwmon_dev))
+ dev_err_once(&device->dev, "register hwmon device failed.\n");
+ mutex_unlock(&resource->lock);
break;
case METER_NOTIFY_TRIP:
sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME);
@@ -916,7 +905,7 @@ static int acpi_power_meter_add(struct acpi_device *device)
struct acpi_device *ipi_device = acpi_dev_get_first_match_dev("IPI0001", NULL, -1);
if (ipi_device && acpi_wait_for_acpi_ipmi())
- dev_warn(&device->dev, "Waiting for ACPI IPMI timeout");
+ dev_warn(&device->dev, "Waiting for ACPI IPMI timeout");
acpi_dev_put(ipi_device);
}
#endif
@@ -928,11 +917,16 @@ static int acpi_power_meter_add(struct acpi_device *device)
resource->trip[0] = -1;
resource->trip[1] = -1;
- res = setup_attrs(resource);
- if (res)
+ /* _PMD method is optional. */
+ res = read_domain_devices(resource);
+ if (res && res != -ENODEV)
goto exit_free_capability;
- resource->hwmon_dev = hwmon_device_register(&device->dev);
+ resource->hwmon_dev =
+ hwmon_device_register_with_info(&device->dev,
+ ACPI_POWER_METER_NAME, resource,
+ &power_meter_chip_info,
+ power_extra_groups);
if (IS_ERR(resource->hwmon_dev)) {
res = PTR_ERR(resource->hwmon_dev);
goto exit_remove;
@@ -942,7 +936,7 @@ static int acpi_power_meter_add(struct acpi_device *device)
goto exit;
exit_remove:
- remove_attrs(resource);
+ remove_domain_devices(resource);
exit_free_capability:
free_capabilities(resource);
exit_free:
@@ -961,7 +955,7 @@ static void acpi_power_meter_remove(struct acpi_device *device)
resource = acpi_driver_data(device);
hwmon_device_unregister(resource->hwmon_dev);
- remove_attrs(resource);
+ remove_domain_devices(resource);
free_capabilities(resource);
kfree(resource);
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 43e54dc513da..006ced5ab6e6 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -316,6 +316,14 @@ static const struct ec_board_info board_info_prime_x570_pro = {
.family = family_amd_500_series,
};
+static const struct ec_board_info board_info_prime_x670e_pro_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
@@ -503,6 +511,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_prime_x470_pro),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO",
&board_info_prime_x570_pro),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X670E-PRO WIFI",
+ &board_info_prime_x670e_pro_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X570-CREATOR WIFI",
&board_info_pro_art_x570_creator_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X670E-CREATOR WIFI",
diff --git a/drivers/hwmon/cgbc-hwmon.c b/drivers/hwmon/cgbc-hwmon.c
new file mode 100644
index 000000000000..772f44d56ccf
--- /dev/null
+++ b/drivers/hwmon/cgbc-hwmon.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * cgbc-hwmon - Congatec Board Controller hardware monitoring driver
+ *
+ * Copyright (C) 2024 Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/mfd/cgbc.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define CGBC_HWMON_CMD_SENSOR 0x77
+#define CGBC_HWMON_CMD_SENSOR_DATA_SIZE 0x05
+
+#define CGBC_HWMON_TYPE_MASK GENMASK(6, 5)
+#define CGBC_HWMON_ID_MASK GENMASK(4, 0)
+#define CGBC_HWMON_ACTIVE_BIT BIT(7)
+
+struct cgbc_hwmon_sensor {
+ enum hwmon_sensor_types type;
+ bool active;
+ unsigned int index;
+ unsigned int channel;
+ const char *label;
+};
+
+struct cgbc_hwmon_data {
+ struct cgbc_device_data *cgbc;
+ unsigned int nb_sensors;
+ struct cgbc_hwmon_sensor *sensors;
+};
+
+enum cgbc_sensor_types {
+ CGBC_HWMON_TYPE_TEMP = 1,
+ CGBC_HWMON_TYPE_IN,
+ CGBC_HWMON_TYPE_FAN
+};
+
+static const char * const cgbc_hwmon_labels_temp[] = {
+ "CPU Temperature",
+ "Box Temperature",
+ "Ambient Temperature",
+ "Board Temperature",
+ "Carrier Temperature",
+ "Chipset Temperature",
+ "Video Temperature",
+ "Other Temperature",
+ "TOPDIM Temperature",
+ "BOTTOMDIM Temperature",
+};
+
+static const struct {
+ enum hwmon_sensor_types type;
+ const char *label;
+} cgbc_hwmon_labels_in[] = {
+ { hwmon_in, "CPU Voltage" },
+ { hwmon_in, "DC Runtime Voltage" },
+ { hwmon_in, "DC Standby Voltage" },
+ { hwmon_in, "CMOS Battery Voltage" },
+ { hwmon_in, "Battery Voltage" },
+ { hwmon_in, "AC Voltage" },
+ { hwmon_in, "Other Voltage" },
+ { hwmon_in, "5V Voltage" },
+ { hwmon_in, "5V Standby Voltage" },
+ { hwmon_in, "3V3 Voltage" },
+ { hwmon_in, "3V3 Standby Voltage" },
+ { hwmon_in, "VCore A Voltage" },
+ { hwmon_in, "VCore B Voltage" },
+ { hwmon_in, "12V Voltage" },
+ { hwmon_curr, "DC Current" },
+ { hwmon_curr, "5V Current" },
+ { hwmon_curr, "12V Current" },
+};
+
+#define CGBC_HWMON_NB_IN_SENSORS 14
+
+static const char * const cgbc_hwmon_labels_fan[] = {
+ "CPU Fan",
+ "Box Fan",
+ "Ambient Fan",
+ "Chipset Fan",
+ "Video Fan",
+ "Other Fan",
+};
+
+static int cgbc_hwmon_cmd(struct cgbc_device_data *cgbc, u8 index, u8 *data)
+{
+ u8 cmd[2] = {CGBC_HWMON_CMD_SENSOR, index};
+
+ return cgbc_command(cgbc, cmd, sizeof(cmd), data, CGBC_HWMON_CMD_SENSOR_DATA_SIZE, NULL);
+}
+
+static int cgbc_hwmon_probe_sensors(struct device *dev, struct cgbc_hwmon_data *hwmon)
+{
+ struct cgbc_device_data *cgbc = hwmon->cgbc;
+ struct cgbc_hwmon_sensor *sensor = hwmon->sensors;
+ u8 data[CGBC_HWMON_CMD_SENSOR_DATA_SIZE], nb_sensors, i;
+ int ret;
+
+ ret = cgbc_hwmon_cmd(cgbc, 0, &data[0]);
+ if (ret)
+ return ret;
+
+ nb_sensors = data[0];
+
+ hwmon->sensors = devm_kzalloc(dev, sizeof(*hwmon->sensors) * nb_sensors, GFP_KERNEL);
+ sensor = hwmon->sensors;
+
+ for (i = 0; i < nb_sensors; i++) {
+ enum cgbc_sensor_types type;
+ unsigned int channel;
+
+ /*
+ * No need to request data for the first sensor.
+ * We got data for the first sensor when we ask the number of sensors to the Board
+ * Controller.
+ */
+ if (i) {
+ ret = cgbc_hwmon_cmd(cgbc, i, &data[0]);
+ if (ret)
+ return ret;
+ }
+
+ type = FIELD_GET(CGBC_HWMON_TYPE_MASK, data[1]);
+ channel = FIELD_GET(CGBC_HWMON_ID_MASK, data[1]) - 1;
+
+ if (type == CGBC_HWMON_TYPE_TEMP && channel < ARRAY_SIZE(cgbc_hwmon_labels_temp)) {
+ sensor->type = hwmon_temp;
+ sensor->label = cgbc_hwmon_labels_temp[channel];
+ } else if (type == CGBC_HWMON_TYPE_IN &&
+ channel < ARRAY_SIZE(cgbc_hwmon_labels_in)) {
+ /*
+ * The Board Controller doesn't differentiate current and voltage sensors.
+ * Get the sensor type from cgbc_hwmon_labels_in[channel].type instead.
+ */
+ sensor->type = cgbc_hwmon_labels_in[channel].type;
+ sensor->label = cgbc_hwmon_labels_in[channel].label;
+ } else if (type == CGBC_HWMON_TYPE_FAN &&
+ channel < ARRAY_SIZE(cgbc_hwmon_labels_fan)) {
+ sensor->type = hwmon_fan;
+ sensor->label = cgbc_hwmon_labels_fan[channel];
+ } else {
+ dev_warn(dev, "Board Controller returned an unknown sensor (type=%d, channel=%d), ignore it",
+ type, channel);
+ continue;
+ }
+
+ sensor->active = FIELD_GET(CGBC_HWMON_ACTIVE_BIT, data[1]);
+ sensor->channel = channel;
+ sensor->index = i;
+ sensor++;
+ hwmon->nb_sensors++;
+ }
+
+ return 0;
+}
+
+static struct cgbc_hwmon_sensor *cgbc_hwmon_find_sensor(struct cgbc_hwmon_data *hwmon,
+ enum hwmon_sensor_types type, int channel)
+{
+ struct cgbc_hwmon_sensor *sensor = NULL;
+ int i;
+
+ /*
+ * The Board Controller doesn't differentiate current and voltage sensors.
+ * The channel value (from the Board Controller point of view) shall be computed for current
+ * sensors.
+ */
+ if (type == hwmon_curr)
+ channel += CGBC_HWMON_NB_IN_SENSORS;
+
+ for (i = 0; i < hwmon->nb_sensors; i++) {
+ if (hwmon->sensors[i].type == type && hwmon->sensors[i].channel == channel) {
+ sensor = &hwmon->sensors[i];
+ break;
+ }
+ }
+
+ return sensor;
+}
+
+static int cgbc_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long *val)
+{
+ struct cgbc_hwmon_data *hwmon = dev_get_drvdata(dev);
+ struct cgbc_hwmon_sensor *sensor = cgbc_hwmon_find_sensor(hwmon, type, channel);
+ struct cgbc_device_data *cgbc = hwmon->cgbc;
+ u8 data[CGBC_HWMON_CMD_SENSOR_DATA_SIZE];
+ int ret;
+
+ ret = cgbc_hwmon_cmd(cgbc, sensor->index, &data[0]);
+ if (ret)
+ return ret;
+
+ *val = (data[3] << 8) | data[2];
+
+ /*
+ * For the Board Controller 1lsb = 0.1 degree centigrade.
+ * Other units are as expected.
+ */
+ if (sensor->type == hwmon_temp)
+ *val *= 100;
+
+ return 0;
+}
+
+static umode_t cgbc_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ struct cgbc_hwmon_data *data = (struct cgbc_hwmon_data *)_data;
+ struct cgbc_hwmon_sensor *sensor;
+
+ sensor = cgbc_hwmon_find_sensor(data, type, channel);
+ if (!sensor)
+ return 0;
+
+ return sensor->active ? 0444 : 0;
+}
+
+static int cgbc_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ struct cgbc_hwmon_data *hwmon = dev_get_drvdata(dev);
+ struct cgbc_hwmon_sensor *sensor = cgbc_hwmon_find_sensor(hwmon, type, channel);
+
+ *str = sensor->label;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info * const cgbc_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops cgbc_hwmon_ops = {
+ .is_visible = cgbc_hwmon_is_visible,
+ .read = cgbc_hwmon_read,
+ .read_string = cgbc_hwmon_read_string,
+};
+
+static const struct hwmon_chip_info cgbc_chip_info = {
+ .ops = &cgbc_hwmon_ops,
+ .info = cgbc_hwmon_info,
+};
+
+static int cgbc_hwmon_probe(struct platform_device *pdev)
+{
+ struct cgbc_device_data *cgbc = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct cgbc_hwmon_data *data;
+ struct device *hwmon_dev;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->cgbc = cgbc;
+
+ ret = cgbc_hwmon_probe_sensors(dev, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to probe sensors");
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "cgbc_hwmon", data, &cgbc_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct platform_driver cgbc_hwmon_driver = {
+ .driver = {
+ .name = "cgbc-hwmon",
+ },
+ .probe = cgbc_hwmon_probe,
+};
+
+module_platform_driver(cgbc_hwmon_driver);
+
+MODULE_AUTHOR("Thomas Richard <thomas.richard@bootlin.com>");
+MODULE_DESCRIPTION("Congatec Board Controller Hardware Monitoring Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index cd00adaad1b4..79e5606e6d2f 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -73,7 +73,7 @@
#define DELL_SMM_LEGACY_EXECUTE 0x1
#define DELL_SMM_NO_TEMP 10
-#define DELL_SMM_NO_FANS 3
+#define DELL_SMM_NO_FANS 4
struct smm_regs {
unsigned int eax;
@@ -1074,11 +1074,14 @@ static const struct hwmon_channel_info * const dell_smm_info[] = {
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
HWMON_F_TARGET,
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
+ HWMON_F_TARGET,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
HWMON_F_TARGET
),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
HWMON_PWM_INPUT
),
NULL
diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c
index 4d39fbd83769..234c54956a4b 100644
--- a/drivers/hwmon/emc2305.c
+++ b/drivers/hwmon/emc2305.c
@@ -112,8 +112,6 @@ static char *emc2305_fan_name[] = {
"emc2305_fan5",
};
-static void emc2305_unset_tz(struct device *dev);
-
static int emc2305_get_max_channel(const struct emc2305_data *data)
{
return data->pwm_num;
@@ -293,8 +291,9 @@ static int emc2305_set_single_tz(struct device *dev, int idx)
pwm = data->pwm_min[cdev_idx];
data->cdev_data[cdev_idx].cdev =
- thermal_cooling_device_register(emc2305_fan_name[idx], data,
- &emc2305_cooling_ops);
+ devm_thermal_of_cooling_device_register(dev, dev->of_node,
+ emc2305_fan_name[idx], data,
+ &emc2305_cooling_ops);
if (IS_ERR(data->cdev_data[cdev_idx].cdev)) {
dev_err(dev, "Failed to register cooling device %s\n", emc2305_fan_name[idx]);
@@ -332,24 +331,9 @@ static int emc2305_set_tz(struct device *dev)
for (i = 0; i < data->pwm_num; i++) {
ret = emc2305_set_single_tz(dev, i + 1);
if (ret)
- goto thermal_cooling_device_register_fail;
+ return ret;
}
return 0;
-
-thermal_cooling_device_register_fail:
- emc2305_unset_tz(dev);
- return ret;
-}
-
-static void emc2305_unset_tz(struct device *dev)
-{
- struct emc2305_data *data = dev_get_drvdata(dev);
- int i;
-
- /* Unregister cooling device. */
- for (i = 0; i < EMC2305_PWM_MAX; i++)
- if (data->cdev_data[i].cdev)
- thermal_cooling_device_unregister(data->cdev_data[i].cdev);
}
static umode_t
@@ -599,20 +583,18 @@ static int emc2305_probe(struct i2c_client *client)
return 0;
}
-static void emc2305_remove(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
-
- if (IS_REACHABLE(CONFIG_THERMAL))
- emc2305_unset_tz(dev);
-}
+static const struct of_device_id of_emc2305_match_table[] = {
+ { .compatible = "microchip,emc2305", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_emc2305_match_table);
static struct i2c_driver emc2305_driver = {
.driver = {
.name = "emc2305",
+ .of_match_table = of_emc2305_match_table,
},
.probe = emc2305_probe,
- .remove = emc2305_remove,
.id_table = emc2305_ids,
};
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index d92c536be9af..b779240328d5 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -393,7 +393,12 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
if (state >= fan_data->num_speed)
return -EINVAL;
+ mutex_lock(&fan_data->lock);
+
set_fan_speed(fan_data, state);
+
+ mutex_unlock(&fan_data->lock);
+
return 0;
}
@@ -489,7 +494,11 @@ MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
static void gpio_fan_stop(void *data)
{
+ struct gpio_fan_data *fan_data = data;
+
+ mutex_lock(&fan_data->lock);
set_fan_speed(data, 0);
+ mutex_unlock(&fan_data->lock);
}
static int gpio_fan_probe(struct platform_device *pdev)
@@ -562,7 +571,9 @@ static int gpio_fan_suspend(struct device *dev)
if (fan_data->gpios) {
fan_data->resume_speed = fan_data->speed_index;
+ mutex_lock(&fan_data->lock);
set_fan_speed(fan_data, 0);
+ mutex_unlock(&fan_data->lock);
}
return 0;
@@ -572,8 +583,11 @@ static int gpio_fan_resume(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- if (fan_data->gpios)
+ if (fan_data->gpios) {
+ mutex_lock(&fan_data->lock);
set_fan_speed(fan_data, fan_data->resume_speed);
+ mutex_unlock(&fan_data->lock);
+ }
return 0;
}
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
index 14a6385cd7cc..0f9af82cebec 100644
--- a/drivers/hwmon/gsc-hwmon.c
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -47,7 +47,6 @@ static const struct regmap_bus gsc_hwmon_regmap_bus = {
static const struct regmap_config gsc_hwmon_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_NONE,
};
static ssize_t pwm_auto_point_temp_show(struct device *dev,
diff --git a/drivers/hwmon/hp-wmi-sensors.c b/drivers/hwmon/hp-wmi-sensors.c
index d6bdad26feb1..03c684ba83bd 100644
--- a/drivers/hwmon/hp-wmi-sensors.c
+++ b/drivers/hwmon/hp-wmi-sensors.c
@@ -1197,7 +1197,7 @@ static int hp_wmi_update_info(struct hp_wmi_sensors *state,
if (time_after(jiffies, info->last_updated + HZ)) {
mutex_lock(&state->lock);
- wobj = hp_wmi_get_wobj(HP_WMI_NUMERIC_SENSOR_GUID, instance);
+ wobj = wmidev_block_query(state->wdev, instance);
if (!wobj) {
ret = -EIO;
goto out_unlock;
@@ -1745,7 +1745,7 @@ static int init_numeric_sensors(struct hp_wmi_sensors *state,
return -ENOMEM;
for (i = 0, info = info_arr; i < icount; i++, info++) {
- wobj = hp_wmi_get_wobj(HP_WMI_NUMERIC_SENSOR_GUID, i);
+ wobj = wmidev_block_query(state->wdev, i);
if (!wobj)
return -EIO;
diff --git a/drivers/hwmon/htu31.c b/drivers/hwmon/htu31.c
new file mode 100644
index 000000000000..7521a371aa6c
--- /dev/null
+++ b/drivers/hwmon/htu31.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * The driver for Measurement Specialties HTU31 Temperature and Humidity sensor.
+ *
+ * Copyright (C) 2025
+ * Author: Andrei Lalaev <andrey.lalaev@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/cleanup.h>
+#include <linux/crc8.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#define HTU31_READ_TEMP_HUM_CMD 0x00
+#define HTU31_READ_SERIAL_CMD 0x0a
+#define HTU31_CONVERSION_CMD 0x5e
+#define HTU31_HEATER_OFF_CMD 0x02
+#define HTU31_HEATER_ON_CMD 0x04
+
+#define HTU31_TEMP_HUM_LEN 6
+
+/* Conversion time for the highest resolution */
+#define HTU31_HUMIDITY_CONV_TIME 10000 /* us */
+#define HTU31_TEMPERATURE_CONV_TIME 15000 /* us */
+
+#define HTU31_SERIAL_NUMBER_LEN 3
+#define HTU31_SERIAL_NUMBER_CRC_LEN 1
+#define HTU31_SERIAL_NUMBER_CRC_OFFSET 3
+
+#define HTU31_CRC8_INIT_VAL 0
+#define HTU31_CRC8_POLYNOMIAL 0x31
+DECLARE_CRC8_TABLE(htu31_crc8_table);
+
+/**
+ * struct htu31_data - all the data required to operate a HTU31 chip
+ * @client: the i2c client associated with the HTU31
+ * @lock: a mutex to prevent parallel access to the data
+ * @wait_time: the time needed by sensor to convert values
+ * @temperature: the latest temperature value in millidegrees
+ * @humidity: the latest relative humidity value in millipercent
+ * @serial_number: the serial number of the sensor
+ * @heater_enable: the internal state of the heater
+ */
+struct htu31_data {
+ struct i2c_client *client;
+ struct mutex lock; /* Used to protect against parallel data updates */
+ long wait_time;
+ long temperature;
+ long humidity;
+ u8 serial_number[HTU31_SERIAL_NUMBER_LEN];
+ bool heater_enable;
+};
+
+static long htu31_temp_to_millicelsius(u16 val)
+{
+ return -40000 + DIV_ROUND_CLOSEST_ULL(165000ULL * val, 65535);
+}
+
+static long htu31_relative_humidity(u16 val)
+{
+ return DIV_ROUND_CLOSEST_ULL(100000ULL * val, 65535);
+}
+
+static int htu31_data_fetch_command(struct htu31_data *data)
+{
+ struct i2c_client *client = data->client;
+ u8 conversion_on = HTU31_CONVERSION_CMD;
+ u8 read_data_cmd = HTU31_READ_TEMP_HUM_CMD;
+ u8 t_h_buf[HTU31_TEMP_HUM_LEN] = {};
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &read_data_cmd,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(t_h_buf),
+ .buf = t_h_buf,
+ },
+ };
+ int ret;
+ u8 crc;
+
+ guard(mutex)(&data->lock);
+
+ ret = i2c_master_send(client, &conversion_on, 1);
+ if (ret != 1) {
+ ret = ret < 0 ? ret : -EIO;
+ dev_err(&client->dev,
+ "Conversion command is failed. Error code: %d\n", ret);
+ return ret;
+ }
+
+ fsleep(data->wait_time);
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ ret = ret < 0 ? ret : -EIO;
+ dev_err(&client->dev,
+ "T&H command is failed. Error code: %d\n", ret);
+ return ret;
+ }
+
+ crc = crc8(htu31_crc8_table, &t_h_buf[0], 2, HTU31_CRC8_INIT_VAL);
+ if (crc != t_h_buf[2]) {
+ dev_err(&client->dev, "Temperature CRC mismatch\n");
+ return -EIO;
+ }
+
+ crc = crc8(htu31_crc8_table, &t_h_buf[3], 2, HTU31_CRC8_INIT_VAL);
+ if (crc != t_h_buf[5]) {
+ dev_err(&client->dev, "Humidity CRC mismatch\n");
+ return -EIO;
+ }
+
+ data->temperature = htu31_temp_to_millicelsius(be16_to_cpup((__be16 *)&t_h_buf[0]));
+ data->humidity = htu31_relative_humidity(be16_to_cpup((__be16 *)&t_h_buf[3]));
+
+ return 0;
+}
+
+static umode_t htu31_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ case hwmon_humidity:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int htu31_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct htu31_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = htu31_data_fetch_command(data);
+ if (ret < 0)
+ return ret;
+
+ switch (type) {
+ case hwmon_temp:
+ if (attr != hwmon_temp_input)
+ return -EINVAL;
+
+ *val = data->temperature;
+ break;
+ case hwmon_humidity:
+ if (attr != hwmon_humidity_input)
+ return -EINVAL;
+
+ *val = data->humidity;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int htu31_read_serial_number(struct htu31_data *data)
+{
+ struct i2c_client *client = data->client;
+ u8 read_sn_cmd = HTU31_READ_SERIAL_CMD;
+ u8 sn_buf[HTU31_SERIAL_NUMBER_LEN + HTU31_SERIAL_NUMBER_CRC_LEN];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &read_sn_cmd,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(sn_buf),
+ .buf = sn_buf,
+ },
+ };
+ int ret;
+ u8 crc;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+
+ crc = crc8(htu31_crc8_table, sn_buf, HTU31_SERIAL_NUMBER_LEN, HTU31_CRC8_INIT_VAL);
+ if (crc != sn_buf[HTU31_SERIAL_NUMBER_CRC_OFFSET]) {
+ dev_err(&client->dev, "Serial number CRC mismatch\n");
+ return -EIO;
+ }
+
+ memcpy(data->serial_number, sn_buf, HTU31_SERIAL_NUMBER_LEN);
+
+ return 0;
+}
+
+static ssize_t heater_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct htu31_data *data = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", data->heater_enable);
+}
+
+static ssize_t heater_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct htu31_data *data = dev_get_drvdata(dev);
+ u8 heater_cmd;
+ bool status;
+ int ret;
+
+ ret = kstrtobool(buf, &status);
+ if (ret)
+ return ret;
+
+ heater_cmd = status ? HTU31_HEATER_ON_CMD : HTU31_HEATER_OFF_CMD;
+
+ guard(mutex)(&data->lock);
+
+ ret = i2c_master_send(data->client, &heater_cmd, 1);
+ if (ret < 0)
+ return ret;
+
+ data->heater_enable = status;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(heater_enable);
+
+static int serial_number_show(struct seq_file *seq_file,
+ void *unused)
+{
+ struct htu31_data *data = seq_file->private;
+
+ seq_printf(seq_file, "%X%X%X\n", data->serial_number[0],
+ data->serial_number[1], data->serial_number[2]);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(serial_number);
+
+static struct attribute *htu31_attrs[] = {
+ &dev_attr_heater_enable.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(htu31);
+
+static const struct hwmon_channel_info * const htu31_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(humidity, HWMON_H_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops htu31_hwmon_ops = {
+ .is_visible = htu31_is_visible,
+ .read = htu31_read,
+};
+
+static const struct hwmon_chip_info htu31_chip_info = {
+ .info = htu31_info,
+ .ops = &htu31_hwmon_ops,
+};
+
+static int htu31_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct htu31_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ data->wait_time = HTU31_TEMPERATURE_CONV_TIME + HTU31_HUMIDITY_CONV_TIME;
+
+ ret = devm_mutex_init(dev, &data->lock);
+ if (ret)
+ return ret;
+
+ crc8_populate_msb(htu31_crc8_table, HTU31_CRC8_POLYNOMIAL);
+
+ ret = htu31_read_serial_number(data);
+ if (ret) {
+ dev_err(dev, "Failed to read serial number\n");
+ return ret;
+ }
+
+ debugfs_create_file("serial_number",
+ 0444,
+ client->debugfs,
+ data,
+ &serial_number_fops);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ client->name,
+ data,
+ &htu31_chip_info,
+ htu31_groups);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id htu31_id[] = {
+ { "htu31" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, htu31_id);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id htu31_of_match[] = {
+ { .compatible = "meas,htu31" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, htu31_of_match);
+#endif
+
+static struct i2c_driver htu31_driver = {
+ .driver = {
+ .name = "htu31",
+ .of_match_table = of_match_ptr(htu31_of_match),
+ },
+ .probe = htu31_probe,
+ .id_table = htu31_id,
+};
+module_i2c_driver(htu31_driver);
+
+MODULE_AUTHOR("Andrei Lalaev <andrey.lalaev@gmail.com>");
+MODULE_DESCRIPTION("HTU31 Temperature and Humidity sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 9703d60e9bbf..1688c210888a 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -646,8 +646,8 @@ static const char * const hwmon_power_attr_templates[] = {
[hwmon_power_enable] = "power%d_enable",
[hwmon_power_average] = "power%d_average",
[hwmon_power_average_interval] = "power%d_average_interval",
- [hwmon_power_average_interval_max] = "power%d_interval_max",
- [hwmon_power_average_interval_min] = "power%d_interval_min",
+ [hwmon_power_average_interval_max] = "power%d_average_interval_max",
+ [hwmon_power_average_interval_min] = "power%d_average_interval_min",
[hwmon_power_average_highest] = "power%d_average_highest",
[hwmon_power_average_lowest] = "power%d_average_lowest",
[hwmon_power_average_max] = "power%d_average_max",
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 1bf479a0f793..ce0e3f214f5b 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -116,7 +116,6 @@ struct ina3221_input {
* @fields: Register fields of the device
* @inputs: Array of channel input source specific structures
* @lock: mutex lock to serialize sysfs attribute accesses
- * @debugfs: Pointer to debugfs entry for device
* @reg_config: Register value of INA3221_CONFIG
* @summation_shunt_resistor: equivalent shunt resistor value for summation
* @summation_channel_control: Value written to SCC field in INA3221_MASK_ENABLE
@@ -128,7 +127,6 @@ struct ina3221_data {
struct regmap_field *fields[F_MAX_FIELDS];
struct ina3221_input inputs[INA3221_NUM_CHANNELS];
struct mutex lock;
- struct dentry *debugfs;
u32 reg_config;
int summation_shunt_resistor;
u32 summation_channel_control;
@@ -913,12 +911,9 @@ static int ina3221_probe(struct i2c_client *client)
goto fail;
}
- scnprintf(name, sizeof(name), "%s-%s", INA3221_DRIVER_NAME, dev_name(dev));
- ina->debugfs = debugfs_create_dir(name, NULL);
-
for (i = 0; i < INA3221_NUM_CHANNELS; i++) {
scnprintf(name, sizeof(name), "in%d_summation_disable", i);
- debugfs_create_bool(name, 0400, ina->debugfs,
+ debugfs_create_bool(name, 0400, client->debugfs,
&ina->inputs[i].summation_disable);
}
@@ -940,8 +935,6 @@ static void ina3221_remove(struct i2c_client *client)
struct ina3221_data *ina = dev_get_drvdata(&client->dev);
int i;
- debugfs_remove_recursive(ina->debugfs);
-
pm_runtime_disable(ina->pm_dev);
pm_runtime_set_suspended(ina->pm_dev);
diff --git a/drivers/hwmon/isl28022.c b/drivers/hwmon/isl28022.c
index 3f9b4520b53e..1fb9864635db 100644
--- a/drivers/hwmon/isl28022.c
+++ b/drivers/hwmon/isl28022.c
@@ -324,26 +324,6 @@ static int shunt_voltage_show(struct seq_file *seqf, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(shunt_voltage);
-static struct dentry *isl28022_debugfs_root;
-
-static void isl28022_debugfs_remove(void *res)
-{
- debugfs_remove_recursive(res);
-}
-
-static void isl28022_debugfs_init(struct i2c_client *client, struct isl28022_data *data)
-{
- char name[16];
- struct dentry *debugfs;
-
- scnprintf(name, sizeof(name), "%d-%04hx", client->adapter->nr, client->addr);
-
- debugfs = debugfs_create_dir(name, isl28022_debugfs_root);
- debugfs_create_file("shunt_voltage", 0444, debugfs, data, &shunt_voltage_fops);
-
- devm_add_action_or_reset(&client->dev, isl28022_debugfs_remove, debugfs);
-}
-
/*
* read property values and make consistency checks.
*
@@ -475,7 +455,7 @@ static int isl28022_probe(struct i2c_client *client)
if (err)
return err;
- isl28022_debugfs_init(client, data);
+ debugfs_create_file("shunt_voltage", 0444, client->debugfs, data, &shunt_voltage_fops);
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data, &isl28022_chip_info, NULL);
@@ -505,27 +485,7 @@ static struct i2c_driver isl28022_driver = {
.probe = isl28022_probe,
.id_table = isl28022_ids,
};
-
-static int __init isl28022_init(void)
-{
- int err;
-
- isl28022_debugfs_root = debugfs_create_dir("isl28022", NULL);
- err = i2c_add_driver(&isl28022_driver);
- if (!err)
- return 0;
-
- debugfs_remove_recursive(isl28022_debugfs_root);
- return err;
-}
-module_init(isl28022_init);
-
-static void __exit isl28022_exit(void)
-{
- i2c_del_driver(&isl28022_driver);
- debugfs_remove_recursive(isl28022_debugfs_root);
-}
-module_exit(isl28022_exit);
+module_i2c_driver(isl28022_driver);
MODULE_AUTHOR("Carsten Spieß <mail@carsten-spiess.de>");
MODULE_DESCRIPTION("ISL28022 driver");
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index d0b4cc9a5011..3685906cc57c 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -467,6 +467,7 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
k10temp_get_ccd_support(data, 4);
break;
case 0x31: /* Zen2 Threadripper */
+ case 0x47: /* Cyan Skillfish */
case 0x60: /* Renoir */
case 0x68: /* Lucienne */
case 0x71: /* Zen2 */
@@ -535,6 +536,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M40H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 511d95a0efb3..75f09553fd67 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -90,6 +90,9 @@
* This driver also supports NE1618 from Philips. It is similar to NE1617
* but supports 11 bit external temperature values.
*
+ * This driver also supports NCT7716, NCT7717 and NCT7718 from Nuvoton.
+ * The NCT7716 is similar to NCT7717 but has one more address support.
+ *
* Since the LM90 was the first chipset supported by this driver, most
* comments will refer to this chipset, but are actually general and
* concern all supported chipsets, unless mentioned otherwise.
@@ -119,13 +122,15 @@
* Address is fully defined internally and cannot be changed except for
* MAX6659, MAX6680 and MAX6681.
* LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, ADT7461A, MAX6649,
- * MAX6657, MAX6658, NCT1008 and W83L771 have address 0x4c.
+ * MAX6657, MAX6658, NCT1008, NCT7718 and W83L771 have address 0x4c.
* ADM1032-2, ADT7461-2, ADT7461A-2, LM89-1, LM99-1, MAX6646, and NCT1008D
* have address 0x4d.
* MAX6647 has address 0x4e.
* MAX6659 can have address 0x4c, 0x4d or 0x4e.
* MAX6654, MAX6680, and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29,
* 0x2a, 0x2b, 0x4c, 0x4d or 0x4e.
+ * NCT7716 can have address 0x48 or 0x49.
+ * NCT7717 has address 0x48.
* SA56004 can have address 0x48 through 0x4F.
*/
@@ -136,7 +141,7 @@ static const unsigned short normal_i2c[] = {
enum chips { adm1023, adm1032, adt7461, adt7461a, adt7481,
g781, lm84, lm90, lm99,
max1617, max6642, max6646, max6648, max6654, max6657, max6659, max6680, max6696,
- nct210, nct72, ne1618, sa56004, tmp451, tmp461, w83l771,
+ nct210, nct72, nct7716, nct7717, nct7718, ne1618, sa56004, tmp451, tmp461, w83l771,
};
/*
@@ -191,6 +196,9 @@ enum chips { adm1023, adm1032, adt7461, adt7461a, adt7481,
#define ADT7481_REG_MAN_ID 0x3e
#define ADT7481_REG_CHIP_ID 0x3d
+/* NCT7716/7717/7718 registers */
+#define NCT7716_REG_CHIP_ID 0xFD
+
/* Device features */
#define LM90_HAVE_EXTENDED_TEMP BIT(0) /* extended temperature support */
#define LM90_HAVE_OFFSET BIT(1) /* temperature offset register */
@@ -275,6 +283,9 @@ static const struct i2c_device_id lm90_id[] = {
{ "nct214", nct72 },
{ "nct218", nct72 },
{ "nct72", nct72 },
+ { "nct7716", nct7716 },
+ { "nct7717", nct7717 },
+ { "nct7718", nct7718 },
{ "ne1618", ne1618 },
{ "w83l771", w83l771 },
{ "sa56004", sa56004 },
@@ -383,6 +394,18 @@ static const struct of_device_id __maybe_unused lm90_of_match[] = {
.data = (void *)nct72
},
{
+ .compatible = "nuvoton,nct7716",
+ .data = (void *)nct7716
+ },
+ {
+ .compatible = "nuvoton,nct7717",
+ .data = (void *)nct7717
+ },
+ {
+ .compatible = "nuvoton,nct7718",
+ .data = (void *)nct7718
+ },
+ {
.compatible = "winbond,w83l771",
.data = (void *)w83l771
},
@@ -601,6 +624,26 @@ static const struct lm90_params lm90_params[] = {
.resolution = 11,
.max_convrate = 7,
},
+ [nct7716] = {
+ .flags = LM90_HAVE_ALARMS | LM90_HAVE_CONVRATE,
+ .alert_alarms = 0x40,
+ .resolution = 8,
+ .max_convrate = 8,
+ },
+ [nct7717] = {
+ .flags = LM90_HAVE_ALARMS | LM90_HAVE_CONVRATE,
+ .alert_alarms = 0x40,
+ .resolution = 8,
+ .max_convrate = 8,
+ },
+ [nct7718] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT
+ | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE
+ | LM90_HAVE_REMOTE_EXT,
+ .alert_alarms = 0x7c,
+ .resolution = 11,
+ .max_convrate = 8,
+ },
[ne1618] = {
.flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_BROKEN_ALERT
| LM90_HAVE_LOW | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT,
@@ -2300,6 +2343,38 @@ static const char *lm90_detect_nuvoton(struct i2c_client *client, int chip_id,
return name;
}
+static const char *lm90_detect_nuvoton_50(struct i2c_client *client, int chip_id,
+ int config1, int convrate)
+{
+ int chip_id2 = i2c_smbus_read_byte_data(client, NCT7716_REG_CHIP_ID);
+ int config2 = i2c_smbus_read_byte_data(client, LM90_REG_CONFIG2);
+ int address = client->addr;
+ const char *name = NULL;
+
+ if (chip_id2 < 0 || config2 < 0)
+ return NULL;
+
+ if (chip_id2 != 0x50 || convrate > 0x08)
+ return NULL;
+
+ switch (chip_id) {
+ case 0x90:
+ if (address == 0x48 && !(config1 & 0x3e) && !(config2 & 0xfe))
+ name = "nct7717";
+ break;
+ case 0x91:
+ if ((address == 0x48 || address == 0x49) && !(config1 & 0x3e) &&
+ !(config2 & 0xfe))
+ name = "nct7716";
+ else if (address == 0x4c && !(config1 & 0x38) && !(config2 & 0xf8))
+ name = "nct7718";
+ break;
+ default:
+ break;
+ }
+ return name;
+}
+
static const char *lm90_detect_nxp(struct i2c_client *client, bool common_address,
int chip_id, int config1, int convrate)
{
@@ -2484,6 +2559,9 @@ static int lm90_detect(struct i2c_client *client, struct i2c_board_info *info)
name = lm90_detect_maxim(client, common_address, chip_id,
config1, convrate);
break;
+ case 0x50:
+ name = lm90_detect_nuvoton_50(client, chip_id, config1, convrate);
+ break;
case 0x54: /* ON MC1066, Microchip TC1068, TCM1617 (originally TelCom) */
if (common_address && !(config1 & 0x3f) && !(convrate & 0xf8))
name = "mc1066";
diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c
index 4f608a3790fb..7f38d2696239 100644
--- a/drivers/hwmon/ltc4282.c
+++ b/drivers/hwmon/ltc4282.c
@@ -1674,47 +1674,19 @@ static int ltc4282_show_power1_bad_fault_log(void *arg, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(ltc4282_power1_bad_fault_log,
ltc4282_show_power1_bad_fault_log, NULL, "%llu\n");
-static void ltc4282_debugfs_remove(void *dir)
+static void ltc4282_debugfs_init(struct ltc4282_state *st, struct i2c_client *i2c)
{
- debugfs_remove_recursive(dir);
-}
-
-static void ltc4282_debugfs_init(struct ltc4282_state *st,
- struct i2c_client *i2c,
- const struct device *hwmon)
-{
- const char *debugfs_name;
- struct dentry *dentry;
- int ret;
-
- if (!IS_ENABLED(CONFIG_DEBUG_FS))
- return;
-
- debugfs_name = devm_kasprintf(&i2c->dev, GFP_KERNEL, "ltc4282-%s",
- dev_name(hwmon));
- if (!debugfs_name)
- return;
-
- dentry = debugfs_create_dir(debugfs_name, NULL);
- if (IS_ERR(dentry))
- return;
-
- ret = devm_add_action_or_reset(&i2c->dev, ltc4282_debugfs_remove,
- dentry);
- if (ret)
- return;
-
- debugfs_create_file_unsafe("power1_bad_fault_log", 0400, dentry, st,
+ debugfs_create_file_unsafe("power1_bad_fault_log", 0400, i2c->debugfs, st,
&ltc4282_power1_bad_fault_log);
- debugfs_create_file_unsafe("in0_fet_short_fault_log", 0400, dentry, st,
+ debugfs_create_file_unsafe("in0_fet_short_fault_log", 0400, i2c->debugfs, st,
&ltc4282_fet_short_fault_log);
- debugfs_create_file_unsafe("in0_fet_bad_fault_log", 0400, dentry, st,
+ debugfs_create_file_unsafe("in0_fet_bad_fault_log", 0400, i2c->debugfs, st,
&ltc4282_fet_bad_fault_log);
- debugfs_create_file_unsafe("in1_crit_fault_log", 0400, dentry, st,
+ debugfs_create_file_unsafe("in1_crit_fault_log", 0400, i2c->debugfs, st,
&ltc4282_in1_crit_fault_log);
- debugfs_create_file_unsafe("in1_lcrit_fault_log", 0400, dentry, st,
+ debugfs_create_file_unsafe("in1_lcrit_fault_log", 0400, i2c->debugfs, st,
&ltc4282_in1_lcrit_fault_log);
- debugfs_create_file_unsafe("curr1_crit_fault_log", 0400, dentry, st,
+ debugfs_create_file_unsafe("curr1_crit_fault_log", 0400, i2c->debugfs, st,
&ltc4282_curr1_crit_fault_log);
}
@@ -1757,7 +1729,7 @@ static int ltc4282_probe(struct i2c_client *i2c)
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
- ltc4282_debugfs_init(st, i2c, hwmon);
+ ltc4282_debugfs_init(st, i2c);
return 0;
}
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 416ac02e9f74..6cda35388b24 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -176,6 +176,7 @@ superio_exit(int ioreg)
#define NCT6683_CUSTOMER_ID_MSI2 0x200
#define NCT6683_CUSTOMER_ID_MSI3 0x207
#define NCT6683_CUSTOMER_ID_MSI4 0x20d
+#define NCT6683_CUSTOMER_ID_AMD 0x162b
#define NCT6683_CUSTOMER_ID_ASROCK 0xe2c
#define NCT6683_CUSTOMER_ID_ASROCK2 0xe1b
#define NCT6683_CUSTOMER_ID_ASROCK3 0x1631
@@ -1231,6 +1232,8 @@ static int nct6683_probe(struct platform_device *pdev)
break;
case NCT6683_CUSTOMER_ID_MSI4:
break;
+ case NCT6683_CUSTOMER_ID_AMD:
+ break;
case NCT6683_CUSTOMER_ID_ASROCK:
break;
case NCT6683_CUSTOMER_ID_ASROCK2:
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index fa3351351825..79bc67ffb998 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -273,8 +273,8 @@ static const s8 NCT6776_BEEP_BITS[NUM_BEEP_BITS] = {
static const u16 NCT6776_REG_TOLERANCE_H[] = {
0x10c, 0x20c, 0x30c, 0x80c, 0x90c, 0xa0c, 0xb0c };
-static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0 };
-static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
+static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0, 0 };
+static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0, 0 };
static const u16 NCT6776_REG_FAN_MIN[] = {
0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c };
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 0d29c8f97ba7..d21f7266c411 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -387,12 +387,9 @@ static int get_ohm_of_thermistor(struct ntc_data *data, unsigned int uv)
puo = data->pullup_ohm;
pdo = data->pulldown_ohm;
- if (uv == 0)
- return (data->connect == NTC_CONNECTED_POSITIVE) ?
- INT_MAX : 0;
- if (uv >= puv)
- return (data->connect == NTC_CONNECTED_POSITIVE) ?
- 0 : INT_MAX;
+ /* faulty adc value */
+ if (uv == 0 || uv >= puv)
+ return -ENODATA;
if (data->connect == NTC_CONNECTED_POSITIVE && puo == 0)
n = div_u64(pdo * (puv - uv), uv);
@@ -404,8 +401,10 @@ static int get_ohm_of_thermistor(struct ntc_data *data, unsigned int uv)
else
n = div64_u64_safe(pdo * puo * uv, pdo * (puv - uv) - puo * uv);
- if (n > INT_MAX)
- n = INT_MAX;
+ /* sensor out of bounds */
+ if (n > data->comp[0].ohm || n < data->comp[data->n_comp - 1].ohm)
+ return -ENODATA;
+
return n;
}
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 419469f40ba0..c9b3c3149982 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -133,6 +133,15 @@ config SENSORS_DPS920AB
This driver can also be built as a module. If so, the module will
be called dps920ab.
+config SENSORS_INA233
+ tristate "Texas Instruments INA233 and compatibles"
+ help
+ If you say yes here you get hardware monitoring support for Texas
+ Instruments INA233.
+
+ This driver can also be built as a module. If so, the module will
+ be called ina233.
+
config SENSORS_INSPUR_IPSPS
tristate "INSPUR Power System Power Supply"
help
@@ -233,9 +242,9 @@ config SENSORS_LTC2978_REGULATOR
depends on SENSORS_LTC2978 && REGULATOR
help
If you say yes here you get regulator support for Linear Technology
- LTC3880, LTC3883, LTC3884, LTC3886, LTC3887, LTC3889, LTC7841,
- LTC7880, LTM4644, LTM4675, LTM4676, LTM4677, LTM4678, LTM4680,
- LTM4686, and LTM4700.
+ LT7170, LT7171, LTC3880, LTC3883, LTC3884, LTC3886, LTC3887, LTC3889,
+ LTC7841, LTC7880, LTM4644, LTM4673, LTM4675, LTM4676, LTM4677,
+ LTM4678, LTM4680, LTM4686, and LTM4700.
config SENSORS_LTC3815
tristate "Linear Technologies LTC3815"
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index c7eb7739b7f8..56f128c4653e 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SENSORS_DELTA_AHE50DC_FAN) += delta-ahe50dc-fan.o
obj-$(CONFIG_SENSORS_FSP_3Y) += fsp-3y.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
obj-$(CONFIG_SENSORS_DPS920AB) += dps920ab.o
+obj-$(CONFIG_SENSORS_INA233) += ina233.o
obj-$(CONFIG_SENSORS_INSPUR_IPSPS) += inspur-ipsps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
obj-$(CONFIG_SENSORS_IR36021) += ir36021.o
diff --git a/drivers/hwmon/pmbus/ina233.c b/drivers/hwmon/pmbus/ina233.c
new file mode 100644
index 000000000000..dde1e1678394
--- /dev/null
+++ b/drivers/hwmon/pmbus/ina233.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for ina233
+ *
+ * Copyright (c) 2025 Leo Yang
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+#define MFR_READ_VSHUNT 0xd1
+#define MFR_CALIBRATION 0xd4
+
+#define INA233_MAX_CURRENT_DEFAULT 32768000 /* uA */
+#define INA233_RSHUNT_DEFAULT 2000 /* uOhm */
+
+#define MAX_M_VAL 32767
+
+static void calculate_coef(int *m, int *R, u32 current_lsb, int power_coef)
+{
+ u64 scaled_m;
+ int scale_factor = 0;
+ int scale_coef = 1;
+
+ /*
+ * 1000000 from Current_LSB A->uA .
+ * scale_coef is for scaling up to minimize rounding errors,
+ * If there is no decimal information, no need to scale.
+ */
+ if (1000000 % current_lsb) {
+ /* Scaling to keep integer precision */
+ scale_factor = -3;
+ scale_coef = 1000;
+ }
+
+ /*
+ * Unit Conversion (Current_LSB A->uA) and use scaling(scale_factor)
+ * to keep integer precision.
+ * Formulae referenced from spec.
+ */
+ scaled_m = div64_u64(1000000 * scale_coef, (u64)current_lsb * power_coef);
+
+ /* Maximize while keeping it bounded.*/
+ while (scaled_m > MAX_M_VAL) {
+ scaled_m = div_u64(scaled_m, 10);
+ scale_factor++;
+ }
+ /* Scale up only if fractional part exists. */
+ while (scaled_m * 10 < MAX_M_VAL && scale_coef != 1) {
+ scaled_m *= 10;
+ scale_factor--;
+ }
+
+ *m = scaled_m;
+ *R = scale_factor;
+}
+
+static int ina233_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VMON:
+ ret = pmbus_read_word_data(client, 0, 0xff, MFR_READ_VSHUNT);
+
+ /* Adjust returned value to match VIN coefficients */
+ /* VIN: 1.25 mV VSHUNT: 2.5 uV LSB */
+ ret = DIV_ROUND_CLOSEST(ret * 25, 12500);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int ina233_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int ret, m, R;
+ u32 rshunt;
+ u32 max_current;
+ u32 current_lsb;
+ u16 calibration;
+ struct pmbus_driver_info *info;
+
+ info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pages = 1;
+ info->format[PSC_VOLTAGE_IN] = direct;
+ info->format[PSC_VOLTAGE_OUT] = direct;
+ info->format[PSC_CURRENT_OUT] = direct;
+ info->format[PSC_POWER] = direct;
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_VMON | PMBUS_HAVE_STATUS_VMON;
+ info->m[PSC_VOLTAGE_IN] = 8;
+ info->R[PSC_VOLTAGE_IN] = 2;
+ info->m[PSC_VOLTAGE_OUT] = 8;
+ info->R[PSC_VOLTAGE_OUT] = 2;
+ info->read_word_data = ina233_read_word_data;
+
+ /* If INA233 skips current/power, shunt-resistor and current-lsb aren't needed. */
+ /* read rshunt value (uOhm) */
+ ret = device_property_read_u32(dev, "shunt-resistor", &rshunt);
+ if (ret) {
+ if (ret != -EINVAL)
+ return dev_err_probe(dev, ret, "Shunt resistor property read fail.\n");
+ rshunt = INA233_RSHUNT_DEFAULT;
+ }
+ if (!rshunt)
+ return dev_err_probe(dev, -EINVAL,
+ "Shunt resistor cannot be zero.\n");
+
+ /* read Maximum expected current value (uA) */
+ ret = device_property_read_u32(dev, "ti,maximum-expected-current-microamp", &max_current);
+ if (ret) {
+ if (ret != -EINVAL)
+ return dev_err_probe(dev, ret,
+ "Maximum expected current property read fail.\n");
+ max_current = INA233_MAX_CURRENT_DEFAULT;
+ }
+ if (max_current < 32768)
+ return dev_err_probe(dev, -EINVAL,
+ "Maximum expected current cannot less then 32768.\n");
+
+ /* Calculate Current_LSB according to the spec formula */
+ current_lsb = max_current / 32768;
+
+ /* calculate current coefficient */
+ calculate_coef(&m, &R, current_lsb, 1);
+ info->m[PSC_CURRENT_OUT] = m;
+ info->R[PSC_CURRENT_OUT] = R;
+
+ /* calculate power coefficient */
+ calculate_coef(&m, &R, current_lsb, 25);
+ info->m[PSC_POWER] = m;
+ info->R[PSC_POWER] = R;
+
+ /* write MFR_CALIBRATION register, Apply formula from spec with unit scaling. */
+ calibration = div64_u64(5120000000ULL, (u64)rshunt * current_lsb);
+ if (calibration > 0x7FFF)
+ return dev_err_probe(dev, -EINVAL,
+ "Product of Current_LSB %u and shunt resistor %u too small, MFR_CALIBRATION reg exceeds 0x7FFF.\n",
+ current_lsb, rshunt);
+ ret = i2c_smbus_write_word_data(client, MFR_CALIBRATION, calibration);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Unable to write calibration.\n");
+
+ dev_dbg(dev, "power monitor %s (Rshunt = %u uOhm, Current_LSB = %u uA/bit)\n",
+ client->name, rshunt, current_lsb);
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct i2c_device_id ina233_id[] = {
+ {"ina233", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ina233_id);
+
+static const struct of_device_id __maybe_unused ina233_of_match[] = {
+ { .compatible = "ti,ina233" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ina233_of_match);
+
+static struct i2c_driver ina233_driver = {
+ .driver = {
+ .name = "ina233",
+ .of_match_table = of_match_ptr(ina233_of_match),
+ },
+ .probe = ina233_probe,
+ .id_table = ina233_id,
+};
+
+module_i2c_driver(ina233_driver);
+
+MODULE_AUTHOR("Leo Yang <leo.yang.sy0@gmail.com>");
+MODULE_DESCRIPTION("PMBus driver for INA233 and compatible chips");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 4c306943383a..8f5be520a15d 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -23,11 +23,11 @@ enum chips {
/* Managers */
ltc2972, ltc2974, ltc2975, ltc2977, ltc2978, ltc2979, ltc2980,
/* Controllers */
- ltc3880, ltc3882, ltc3883, ltc3884, ltc3886, ltc3887, ltc3889, ltc7132,
- ltc7841, ltc7880,
+ lt7170, lt7171, ltc3880, ltc3882, ltc3883, ltc3884, ltc3886, ltc3887,
+ ltc3889, ltc7132, ltc7841, ltc7880,
/* Modules */
- ltm2987, ltm4664, ltm4675, ltm4676, ltm4677, ltm4678, ltm4680, ltm4686,
- ltm4700,
+ ltm2987, ltm4664, ltm4673, ltm4675, ltm4676, ltm4677, ltm4678, ltm4680,
+ ltm4686, ltm4700,
};
/* Common for all chips */
@@ -62,6 +62,7 @@ enum chips {
#define LTC2978_ID_MASK 0xfff0
+#define LT7170_ID 0x1C10
#define LTC2972_ID 0x0310
#define LTC2974_ID 0x0210
#define LTC2975_ID 0x0220
@@ -86,6 +87,8 @@ enum chips {
#define LTM2987_ID_A 0x8010 /* A/B for two die IDs */
#define LTM2987_ID_B 0x8020
#define LTM4664_ID 0x4120
+#define LTM4673_ID_REV1 0x0230
+#define LTM4673_ID 0x4480
#define LTM4675_ID 0x47a0
#define LTM4676_ID_REV1 0x4400
#define LTM4676_ID_REV2 0x4480
@@ -535,6 +538,8 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
}
static const struct i2c_device_id ltc2978_id[] = {
+ {"lt7170", lt7170},
+ {"lt7171", lt7171},
{"ltc2972", ltc2972},
{"ltc2974", ltc2974},
{"ltc2975", ltc2975},
@@ -554,6 +559,7 @@ static const struct i2c_device_id ltc2978_id[] = {
{"ltc7880", ltc7880},
{"ltm2987", ltm2987},
{"ltm4664", ltm4664},
+ {"ltm4673", ltm4673},
{"ltm4675", ltm4675},
{"ltm4676", ltm4676},
{"ltm4677", ltm4677},
@@ -612,7 +618,7 @@ static int ltc2978_get_id(struct i2c_client *client)
ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
if (ret < 0)
return ret;
- if (ret < 3 || strncmp(buf, "LTC", 3))
+ if (ret < 3 || (strncmp(buf, "LTC", 3) && strncmp(buf, "ADI", 3)))
return -ENODEV;
ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
@@ -627,6 +633,25 @@ static int ltc2978_get_id(struct i2c_client *client)
chip_id &= LTC2978_ID_MASK;
+ if (chip_id == LT7170_ID) {
+ u8 buf[I2C_SMBUS_BLOCK_MAX];
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client, PMBUS_IC_DEVICE_ID,
+ sizeof(buf), buf);
+ if (ret < 0)
+ return ret;
+
+ if (!strncmp(buf + 1, "LT7170", 6) ||
+ !strncmp(buf + 1, "LT7170-1", 8))
+ return lt7170;
+ if (!strncmp(buf + 1, "LT7171", 6) ||
+ !strncmp(buf + 1, "LT7171-1", 8))
+ return lt7171;
+
+ return -ENODEV;
+ }
+
if (chip_id == LTC2972_ID)
return ltc2972;
else if (chip_id == LTC2974_ID)
@@ -665,6 +690,8 @@ static int ltc2978_get_id(struct i2c_client *client)
return ltm2987;
else if (chip_id == LTM4664_ID)
return ltm4664;
+ else if (chip_id == LTM4673_ID || chip_id == LTM4673_ID_REV1)
+ return ltm4673;
else if (chip_id == LTM4675_ID)
return ltm4675;
else if (chip_id == LTM4676_ID_REV1 || chip_id == LTM4676_ID_REV2 ||
@@ -736,6 +763,20 @@ static int ltc2978_probe(struct i2c_client *client)
data->temp2_max = 0x7c00;
switch (data->id) {
+ case lt7170:
+ case lt7171:
+ data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING;
+ info->read_word_data = ltc3883_read_word_data;
+ info->pages = LTC3883_NUM_PAGES;
+ info->format[PSC_VOLTAGE_IN] = ieee754;
+ info->format[PSC_VOLTAGE_OUT] = ieee754;
+ info->format[PSC_CURRENT_OUT] = ieee754;
+ info->format[PSC_TEMPERATURE] = ieee754;
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ break;
case ltc2972:
info->read_word_data = ltc2975_read_word_data;
info->pages = LTC2972_NUM_PAGES;
@@ -869,6 +910,21 @@ static int ltc2978_probe(struct i2c_client *client)
| PMBUS_HAVE_IOUT
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
break;
+ case ltm4673:
+ data->features |= FEAT_NEEDS_POLLING;
+ info->read_word_data = ltc2975_read_word_data;
+ info->pages = LTC2974_NUM_PAGES;
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_TEMP2;
+ for (i = 0; i < info->pages; i++) {
+ info->func[i] |= PMBUS_HAVE_IIN
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_PIN
+ | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ }
+ break;
default:
return -ENODEV;
}
@@ -907,6 +963,8 @@ static int ltc2978_probe(struct i2c_client *client)
#ifdef CONFIG_OF
static const struct of_device_id ltc2978_of_match[] = {
+ { .compatible = "lltc,lt7170" },
+ { .compatible = "lltc,lt7171" },
{ .compatible = "lltc,ltc2972" },
{ .compatible = "lltc,ltc2974" },
{ .compatible = "lltc,ltc2975" },
@@ -926,6 +984,7 @@ static const struct of_device_id ltc2978_of_match[] = {
{ .compatible = "lltc,ltc7880" },
{ .compatible = "lltc,ltm2987" },
{ .compatible = "lltc,ltm4664" },
+ { .compatible = "lltc,ltm4673" },
{ .compatible = "lltc,ltm4675" },
{ .compatible = "lltc,ltm4676" },
{ .compatible = "lltc,ltm4677" },
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 787683e83db6..cfeba2e4c5c3 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/dcache.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/module.h>
@@ -44,8 +45,7 @@ struct pmbus_sensor {
enum pmbus_sensor_classes class; /* sensor class */
bool update; /* runtime sensor update needed */
bool convert; /* Whether or not to apply linear/vid/direct */
- int data; /* Sensor data.
- Negative if there was a read error */
+ int data; /* Sensor data; negative if there was a read error */
};
#define to_pmbus_sensor(_attr) \
container_of(_attr, struct pmbus_sensor, attribute)
@@ -100,7 +100,6 @@ struct pmbus_data {
int num_attributes;
struct attribute_group group;
const struct attribute_group **groups;
- struct dentry *debugfs; /* debugfs device directory */
struct pmbus_sensor *sensors;
@@ -192,11 +191,10 @@ static void pmbus_update_ts(struct i2c_client *client, bool write_op)
struct pmbus_data *data = i2c_get_clientdata(client);
const struct pmbus_driver_info *info = data->info;
- if (info->access_delay) {
+ if (info->access_delay)
data->access_time = ktime_get();
- } else if (info->write_delay && write_op) {
+ else if (info->write_delay && write_op)
data->write_time = ktime_get();
- }
}
int pmbus_set_page(struct i2c_client *client, int page, int phase)
@@ -292,7 +290,6 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
}
EXPORT_SYMBOL_NS_GPL(pmbus_write_word_data, "PMBUS");
-
static int pmbus_write_virt_reg(struct i2c_client *client, int page, int reg,
u16 word)
{
@@ -381,14 +378,14 @@ int pmbus_update_fan(struct i2c_client *client, int page, int id,
u8 to;
from = _pmbus_read_byte_data(client, page,
- pmbus_fan_config_registers[id]);
+ pmbus_fan_config_registers[id]);
if (from < 0)
return from;
to = (from & ~mask) | (config & mask);
if (to != from) {
rv = _pmbus_write_byte_data(client, page,
- pmbus_fan_config_registers[id], to);
+ pmbus_fan_config_registers[id], to);
if (rv < 0)
return rv;
}
@@ -563,7 +560,7 @@ static int pmbus_get_fan_rate(struct i2c_client *client, int page, int id,
}
config = _pmbus_read_byte_data(client, page,
- pmbus_fan_config_registers[id]);
+ pmbus_fan_config_registers[id]);
if (config < 0)
return config;
@@ -788,7 +785,7 @@ static s64 pmbus_reg2data_linear(struct pmbus_data *data,
if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */
exponent = data->exponent[sensor->page];
- mantissa = (u16) sensor->data;
+ mantissa = (u16)sensor->data;
} else { /* LINEAR11 */
exponent = ((s16)sensor->data) >> 11;
mantissa = ((s16)((sensor->data & 0x7ff) << 5)) >> 5;
@@ -1173,7 +1170,6 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
} else {
pmbus_clear_fault_page(client, page);
}
-
}
if (s1 && s2) {
s64 v1, v2;
@@ -1470,8 +1466,7 @@ static int pmbus_add_label(struct pmbus_data *data,
snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq);
if (!index) {
if (phase == 0xff)
- strncpy(label->label, lstring,
- sizeof(label->label) - 1);
+ strscpy(label->label, lstring);
else
snprintf(label->label, sizeof(label->label), "%s.%d",
lstring, phase);
@@ -1500,8 +1495,7 @@ struct pmbus_limit_attr {
u16 reg; /* Limit register */
u16 sbit; /* Alarm attribute status bit */
bool update; /* True if register needs updates */
- bool low; /* True if low limit; for limits with compare
- functions only */
+ bool low; /* True if low limit; for limits with compare functions only */
const char *attr; /* Attribute name */
const char *alarm; /* Alarm attribute name */
};
@@ -2212,8 +2206,8 @@ static const u32 pmbus_fan_status_flags[] = {
/* Precondition: FAN_CONFIG_x_y and FAN_COMMAND_x must exist for the fan ID */
static int pmbus_add_fan_ctrl(struct i2c_client *client,
- struct pmbus_data *data, int index, int page, int id,
- u8 config)
+ struct pmbus_data *data, int index, int page,
+ int id, u8 config)
{
struct pmbus_sensor *sensor;
@@ -2225,7 +2219,7 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
return -ENOMEM;
if (!((data->info->func[page] & PMBUS_HAVE_PWM12) ||
- (data->info->func[page] & PMBUS_HAVE_PWM34)))
+ (data->info->func[page] & PMBUS_HAVE_PWM34)))
return 0;
sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
@@ -2935,7 +2929,7 @@ static void pmbus_notify(struct pmbus_data *data, int page, int reg, int flags)
}
static int _pmbus_get_flags(struct pmbus_data *data, u8 page, unsigned int *flags,
- unsigned int *event, bool notify)
+ unsigned int *event, bool notify)
{
int i, status;
const struct pmbus_status_category *cat;
@@ -2964,7 +2958,6 @@ static int _pmbus_get_flags(struct pmbus_data *data, u8 page, unsigned int *flag
if (notify && status)
pmbus_notify(data, page, cat->reg, status);
-
}
/*
@@ -3015,7 +3008,6 @@ static int _pmbus_get_flags(struct pmbus_data *data, u8 page, unsigned int *flag
*event |= REGULATOR_EVENT_OVER_TEMP_WARN;
}
-
return 0;
}
@@ -3228,7 +3220,7 @@ static int pmbus_regulator_set_voltage(struct regulator_dev *rdev, int min_uv,
}
static int pmbus_regulator_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
+ unsigned int selector)
{
struct device *dev = rdev_get_dev(rdev);
struct i2c_client *client = to_i2c_client(dev->parent);
@@ -3320,17 +3312,16 @@ static int pmbus_regulator_register(struct pmbus_data *data)
return 0;
}
-static int pmbus_regulator_notify(struct pmbus_data *data, int page, int event)
+static void pmbus_regulator_notify(struct pmbus_data *data, int page, int event)
{
- int j;
+ int j;
- for (j = 0; j < data->info->num_regulators; j++) {
- if (page == rdev_get_id(data->rdevs[j])) {
- regulator_notifier_call_chain(data->rdevs[j], event, NULL);
- break;
- }
+ for (j = 0; j < data->info->num_regulators; j++) {
+ if (page == rdev_get_id(data->rdevs[j])) {
+ regulator_notifier_call_chain(data->rdevs[j], event, NULL);
+ break;
}
- return 0;
+ }
}
#else
static int pmbus_regulator_register(struct pmbus_data *data)
@@ -3338,9 +3329,8 @@ static int pmbus_regulator_register(struct pmbus_data *data)
return 0;
}
-static int pmbus_regulator_notify(struct pmbus_data *data, int page, int event)
+static void pmbus_regulator_notify(struct pmbus_data *data, int page, int event)
{
- return 0;
}
#endif
@@ -3363,8 +3353,8 @@ static irqreturn_t pmbus_fault_handler(int irq, void *pdata)
{
struct pmbus_data *data = pdata;
struct i2c_client *client = to_i2c_client(data->dev);
-
int i, status, event;
+
mutex_lock(&data->update_lock);
for (i = 0; i < data->info->pages; i++) {
_pmbus_get_flags(data, i, &status, &event, true);
@@ -3428,7 +3418,6 @@ static int pmbus_irq_setup(struct i2c_client *client, struct pmbus_data *data)
static struct dentry *pmbus_debugfs_dir; /* pmbus debugfs directory */
-#if IS_ENABLED(CONFIG_DEBUG_FS)
static int pmbus_debugfs_get(void *data, u64 *val)
{
int rc;
@@ -3471,8 +3460,8 @@ static int pmbus_debugfs_get_status(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(pmbus_debugfs_ops_status, pmbus_debugfs_get_status,
NULL, "0x%04llx\n");
-static ssize_t pmbus_debugfs_mfr_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t pmbus_debugfs_block_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
int rc;
struct pmbus_debugfs_entry *entry = file->private_data;
@@ -3497,51 +3486,98 @@ static ssize_t pmbus_debugfs_mfr_read(struct file *file, char __user *buf,
return simple_read_from_buffer(buf, count, ppos, data, rc);
}
-static const struct file_operations pmbus_debugfs_ops_mfr = {
+static const struct file_operations pmbus_debugfs_block_ops = {
.llseek = noop_llseek,
- .read = pmbus_debugfs_mfr_read,
+ .read = pmbus_debugfs_block_read,
.write = NULL,
.open = simple_open,
};
-static void pmbus_remove_debugfs(void *data)
+static void pmbus_remove_symlink(void *symlink)
{
- struct dentry *entry = data;
-
- debugfs_remove_recursive(entry);
+ debugfs_remove(symlink);
}
-static int pmbus_init_debugfs(struct i2c_client *client,
- struct pmbus_data *data)
+struct pmbus_debugfs_data {
+ u8 reg;
+ u32 flag;
+ const char *name;
+};
+
+static const struct pmbus_debugfs_data pmbus_debugfs_block_data[] = {
+ { .reg = PMBUS_MFR_ID, .name = "mfr_id" },
+ { .reg = PMBUS_MFR_MODEL, .name = "mfr_model" },
+ { .reg = PMBUS_MFR_REVISION, .name = "mfr_revision" },
+ { .reg = PMBUS_MFR_LOCATION, .name = "mfr_location" },
+ { .reg = PMBUS_MFR_DATE, .name = "mfr_date" },
+ { .reg = PMBUS_MFR_SERIAL, .name = "mfr_serial" },
+};
+
+static const struct pmbus_debugfs_data pmbus_debugfs_status_data[] = {
+ { .reg = PMBUS_STATUS_VOUT, .flag = PMBUS_HAVE_STATUS_VOUT, .name = "status%d_vout" },
+ { .reg = PMBUS_STATUS_IOUT, .flag = PMBUS_HAVE_STATUS_IOUT, .name = "status%d_iout" },
+ { .reg = PMBUS_STATUS_INPUT, .flag = PMBUS_HAVE_STATUS_INPUT, .name = "status%d_input" },
+ { .reg = PMBUS_STATUS_TEMPERATURE, .flag = PMBUS_HAVE_STATUS_TEMP,
+ .name = "status%d_temp" },
+ { .reg = PMBUS_STATUS_FAN_12, .flag = PMBUS_HAVE_STATUS_FAN12, .name = "status%d_fan12" },
+ { .reg = PMBUS_STATUS_FAN_34, .flag = PMBUS_HAVE_STATUS_FAN34, .name = "status%d_fan34" },
+ { .reg = PMBUS_STATUS_CML, .name = "status%d_cml" },
+ { .reg = PMBUS_STATUS_OTHER, .name = "status%d_other" },
+ { .reg = PMBUS_STATUS_MFR_SPECIFIC, .name = "status%d_mfr" },
+};
+
+static void pmbus_init_debugfs(struct i2c_client *client,
+ struct pmbus_data *data)
{
- int i, idx = 0;
- char name[PMBUS_NAME_SIZE];
+ struct dentry *symlink_d, *debugfs = client->debugfs;
struct pmbus_debugfs_entry *entries;
+ const char *pathname, *symlink;
+ char name[PMBUS_NAME_SIZE];
+ int page, i, idx = 0;
- if (!pmbus_debugfs_dir)
- return -ENODEV;
+ /*
+ * client->debugfs may be NULL or an ERR_PTR(). dentry_path_raw()
+ * does not check if its parameters are valid, so validate
+ * client->debugfs before using it.
+ */
+ if (!pmbus_debugfs_dir || IS_ERR_OR_NULL(debugfs))
+ return;
/*
- * Create the debugfs directory for this device. Use the hwmon device
- * name to avoid conflicts (hwmon numbers are globally unique).
+ * Backwards compatibility: Create symlink from /pmbus/<hwmon_device>
+ * to i2c debugfs directory.
*/
- data->debugfs = debugfs_create_dir(dev_name(data->hwmon_dev),
- pmbus_debugfs_dir);
- if (IS_ERR_OR_NULL(data->debugfs)) {
- data->debugfs = NULL;
- return -ENODEV;
- }
+ pathname = dentry_path_raw(debugfs, name, sizeof(name));
+ if (IS_ERR(pathname))
+ return;
+
+ /*
+ * The path returned by dentry_path_raw() starts with '/'. Prepend it
+ * with ".." to get the symlink relative to the pmbus root directory.
+ */
+ symlink = kasprintf(GFP_KERNEL, "..%s", pathname);
+ if (!symlink)
+ return;
+
+ symlink_d = debugfs_create_symlink(dev_name(data->hwmon_dev),
+ pmbus_debugfs_dir, symlink);
+ kfree(symlink);
+
+ devm_add_action_or_reset(data->dev, pmbus_remove_symlink, symlink_d);
/*
* Allocate the max possible entries we need.
- * 7 entries device-specific
- * 10 entries page-specific
+ * device specific:
+ * ARRAY_SIZE(pmbus_debugfs_block_data) + 2
+ * page specific:
+ * ARRAY_SIZE(pmbus_debugfs_status_data) + 1
*/
entries = devm_kcalloc(data->dev,
- 7 + data->info->pages * 10, sizeof(*entries),
- GFP_KERNEL);
+ ARRAY_SIZE(pmbus_debugfs_block_data) + 2 +
+ data->info->pages * (ARRAY_SIZE(pmbus_debugfs_status_data) + 1),
+ sizeof(*entries), GFP_KERNEL);
if (!entries)
- return -ENOMEM;
+ return;
/*
* Add device-specific entries.
@@ -3551,184 +3587,67 @@ static int pmbus_init_debugfs(struct i2c_client *client,
* assume that values of the following registers are the same for all
* pages and report values only for page 0.
*/
- if (pmbus_check_byte_register(client, 0, PMBUS_REVISION)) {
+ if (!(data->flags & PMBUS_NO_CAPABILITY) &&
+ pmbus_check_byte_register(client, 0, PMBUS_CAPABILITY)) {
entries[idx].client = client;
entries[idx].page = 0;
- entries[idx].reg = PMBUS_REVISION;
- debugfs_create_file("revision", 0444, data->debugfs,
+ entries[idx].reg = PMBUS_CAPABILITY;
+ debugfs_create_file("capability", 0444, debugfs,
&entries[idx++],
&pmbus_debugfs_ops);
}
-
- if (pmbus_check_block_register(client, 0, PMBUS_MFR_ID)) {
- entries[idx].client = client;
- entries[idx].page = 0;
- entries[idx].reg = PMBUS_MFR_ID;
- debugfs_create_file("mfr_id", 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops_mfr);
- }
-
- if (pmbus_check_block_register(client, 0, PMBUS_MFR_MODEL)) {
- entries[idx].client = client;
- entries[idx].page = 0;
- entries[idx].reg = PMBUS_MFR_MODEL;
- debugfs_create_file("mfr_model", 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops_mfr);
- }
-
- if (pmbus_check_block_register(client, 0, PMBUS_MFR_REVISION)) {
- entries[idx].client = client;
- entries[idx].page = 0;
- entries[idx].reg = PMBUS_MFR_REVISION;
- debugfs_create_file("mfr_revision", 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops_mfr);
- }
-
- if (pmbus_check_block_register(client, 0, PMBUS_MFR_LOCATION)) {
+ if (pmbus_check_byte_register(client, 0, PMBUS_REVISION)) {
entries[idx].client = client;
entries[idx].page = 0;
- entries[idx].reg = PMBUS_MFR_LOCATION;
- debugfs_create_file("mfr_location", 0444, data->debugfs,
+ entries[idx].reg = PMBUS_REVISION;
+ debugfs_create_file("pmbus_revision", 0444, debugfs,
&entries[idx++],
- &pmbus_debugfs_ops_mfr);
+ &pmbus_debugfs_ops);
}
- if (pmbus_check_block_register(client, 0, PMBUS_MFR_DATE)) {
- entries[idx].client = client;
- entries[idx].page = 0;
- entries[idx].reg = PMBUS_MFR_DATE;
- debugfs_create_file("mfr_date", 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops_mfr);
- }
+ for (i = 0; i < ARRAY_SIZE(pmbus_debugfs_block_data); i++) {
+ const struct pmbus_debugfs_data *d = &pmbus_debugfs_block_data[i];
- if (pmbus_check_block_register(client, 0, PMBUS_MFR_SERIAL)) {
- entries[idx].client = client;
- entries[idx].page = 0;
- entries[idx].reg = PMBUS_MFR_SERIAL;
- debugfs_create_file("mfr_serial", 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops_mfr);
+ if (pmbus_check_block_register(client, 0, d->reg)) {
+ entries[idx].client = client;
+ entries[idx].page = 0;
+ entries[idx].reg = d->reg;
+ debugfs_create_file(d->name, 0444, debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_block_ops);
+ }
}
/* Add page specific entries */
- for (i = 0; i < data->info->pages; ++i) {
+ for (page = 0; page < data->info->pages; ++page) {
/* Check accessibility of status register if it's not page 0 */
- if (!i || pmbus_check_status_register(client, i)) {
+ if (!page || pmbus_check_status_register(client, page)) {
/* No need to set reg as we have special read op. */
entries[idx].client = client;
- entries[idx].page = i;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d", i);
- debugfs_create_file(name, 0444, data->debugfs,
+ entries[idx].page = page;
+ scnprintf(name, PMBUS_NAME_SIZE, "status%d", page);
+ debugfs_create_file(name, 0444, debugfs,
&entries[idx++],
&pmbus_debugfs_ops_status);
}
- if (data->info->func[i] & PMBUS_HAVE_STATUS_VOUT) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_VOUT;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_vout", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (data->info->func[i] & PMBUS_HAVE_STATUS_IOUT) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_IOUT;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_iout", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (data->info->func[i] & PMBUS_HAVE_STATUS_INPUT) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_INPUT;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_input", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (data->info->func[i] & PMBUS_HAVE_STATUS_TEMP) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_TEMPERATURE;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_temp", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (pmbus_check_byte_register(client, i, PMBUS_STATUS_CML)) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_CML;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_cml", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (pmbus_check_byte_register(client, i, PMBUS_STATUS_OTHER)) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_OTHER;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_other", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (pmbus_check_byte_register(client, i,
- PMBUS_STATUS_MFR_SPECIFIC)) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_MFR_SPECIFIC;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_mfr", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (data->info->func[i] & PMBUS_HAVE_STATUS_FAN12) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_FAN_12;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_fan12", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (data->info->func[i] & PMBUS_HAVE_STATUS_FAN34) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_FAN_34;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_fan34", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
+ for (i = 0; i < ARRAY_SIZE(pmbus_debugfs_status_data); i++) {
+ const struct pmbus_debugfs_data *d =
+ &pmbus_debugfs_status_data[i];
+
+ if ((data->info->func[page] & d->flag) ||
+ (!d->flag && pmbus_check_byte_register(client, page, d->reg))) {
+ entries[idx].client = client;
+ entries[idx].page = page;
+ entries[idx].reg = d->reg;
+ scnprintf(name, PMBUS_NAME_SIZE, d->name, page);
+ debugfs_create_file(name, 0444, debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
}
}
-
- return devm_add_action_or_reset(data->dev,
- pmbus_remove_debugfs, data->debugfs);
-}
-#else
-static int pmbus_init_debugfs(struct i2c_client *client,
- struct pmbus_data *data)
-{
- return 0;
}
-#endif /* IS_ENABLED(CONFIG_DEBUG_FS) */
int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
{
@@ -3800,8 +3719,8 @@ int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
data->groups[0] = &data->group;
memcpy(data->groups + 1, info->groups, sizeof(void *) * groups_num);
- data->hwmon_dev = devm_hwmon_device_register_with_groups(dev,
- name, data, data->groups);
+ data->hwmon_dev = devm_hwmon_device_register_with_groups(dev, name,
+ data, data->groups);
if (IS_ERR(data->hwmon_dev)) {
dev_err(dev, "Failed to register hwmon device\n");
return PTR_ERR(data->hwmon_dev);
@@ -3815,9 +3734,7 @@ int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
if (ret)
return ret;
- ret = pmbus_init_debugfs(client, data);
- if (ret)
- dev_warn(dev, "Failed to register debugfs\n");
+ pmbus_init_debugfs(client, data);
return 0;
}
@@ -3825,9 +3742,15 @@ EXPORT_SYMBOL_NS_GPL(pmbus_do_probe, "PMBUS");
struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client)
{
- struct pmbus_data *data = i2c_get_clientdata(client);
-
- return data->debugfs;
+ /*
+ * client->debugfs may be an ERR_PTR(). Returning that to
+ * the calling code would potentially require additional
+ * complexity in the calling code and otherwise add no
+ * value. Return NULL in that case.
+ */
+ if (IS_ERR_OR_NULL(client->debugfs))
+ return NULL;
+ return client->debugfs;
}
EXPORT_SYMBOL_NS_GPL(pmbus_get_debugfs_dir, "PMBUS");
diff --git a/drivers/hwmon/pt5161l.c b/drivers/hwmon/pt5161l.c
index a9f0b23f9e76..20e3cfa625f1 100644
--- a/drivers/hwmon/pt5161l.c
+++ b/drivers/hwmon/pt5161l.c
@@ -63,7 +63,6 @@ struct pt5161l_fw_ver {
/* Each client has this additional data */
struct pt5161l_data {
struct i2c_client *client;
- struct dentry *debugfs;
struct pt5161l_fw_ver fw_ver;
struct mutex lock; /* for atomic I2C transactions */
bool init_done;
@@ -72,8 +71,6 @@ struct pt5161l_data {
bool mm_wide_reg_access; /* MM assisted wide register access */
};
-static struct dentry *pt5161l_debugfs_dir;
-
/*
* Write multiple data bytes to Aries over I2C
*/
@@ -568,21 +565,16 @@ static const struct file_operations pt5161l_debugfs_ops_hb_sts = {
.open = simple_open,
};
-static int pt5161l_init_debugfs(struct pt5161l_data *data)
+static void pt5161l_init_debugfs(struct i2c_client *client, struct pt5161l_data *data)
{
- data->debugfs = debugfs_create_dir(dev_name(&data->client->dev),
- pt5161l_debugfs_dir);
-
- debugfs_create_file("fw_ver", 0444, data->debugfs, data,
+ debugfs_create_file("fw_ver", 0444, client->debugfs, data,
&pt5161l_debugfs_ops_fw_ver);
- debugfs_create_file("fw_load_status", 0444, data->debugfs, data,
+ debugfs_create_file("fw_load_status", 0444, client->debugfs, data,
&pt5161l_debugfs_ops_fw_load_sts);
- debugfs_create_file("heartbeat_status", 0444, data->debugfs, data,
+ debugfs_create_file("heartbeat_status", 0444, client->debugfs, data,
&pt5161l_debugfs_ops_hb_sts);
-
- return 0;
}
static int pt5161l_probe(struct i2c_client *client)
@@ -604,17 +596,12 @@ static int pt5161l_probe(struct i2c_client *client)
data,
&pt5161l_chip_info,
NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
- pt5161l_init_debugfs(data);
-
- return PTR_ERR_OR_ZERO(hwmon_dev);
-}
-
-static void pt5161l_remove(struct i2c_client *client)
-{
- struct pt5161l_data *data = i2c_get_clientdata(client);
+ pt5161l_init_debugfs(client, data);
- debugfs_remove_recursive(data->debugfs);
+ return 0;
}
static const struct of_device_id __maybe_unused pt5161l_of_match[] = {
@@ -643,24 +630,9 @@ static struct i2c_driver pt5161l_driver = {
.acpi_match_table = ACPI_PTR(pt5161l_acpi_match),
},
.probe = pt5161l_probe,
- .remove = pt5161l_remove,
.id_table = pt5161l_id,
};
-
-static int __init pt5161l_init(void)
-{
- pt5161l_debugfs_dir = debugfs_create_dir("pt5161l", NULL);
- return i2c_add_driver(&pt5161l_driver);
-}
-
-static void __exit pt5161l_exit(void)
-{
- i2c_del_driver(&pt5161l_driver);
- debugfs_remove_recursive(pt5161l_debugfs_dir);
-}
-
-module_init(pt5161l_init);
-module_exit(pt5161l_exit);
+module_i2c_driver(pt5161l_driver);
MODULE_AUTHOR("Cosmo Chou <cosmo.chou@quantatw.com>");
MODULE_DESCRIPTION("Hwmon driver for Astera Labs Aries PCIe retimer");
diff --git a/drivers/hwmon/sg2042-mcu.c b/drivers/hwmon/sg2042-mcu.c
index aa3fb773602c..105131c4acf7 100644
--- a/drivers/hwmon/sg2042-mcu.c
+++ b/drivers/hwmon/sg2042-mcu.c
@@ -50,12 +50,9 @@
struct sg2042_mcu_data {
struct i2c_client *client;
- struct dentry *debugfs;
struct mutex mutex;
};
-static struct dentry *sgmcu_debugfs;
-
static ssize_t reset_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -292,18 +289,15 @@ static const struct hwmon_chip_info sg2042_mcu_chip_info = {
.info = sg2042_mcu_info,
};
-static void sg2042_mcu_debugfs_init(struct sg2042_mcu_data *mcu,
- struct device *dev)
+static void sg2042_mcu_debugfs_init(struct sg2042_mcu_data *mcu)
{
- mcu->debugfs = debugfs_create_dir(dev_name(dev), sgmcu_debugfs);
-
- debugfs_create_file("firmware_version", 0444, mcu->debugfs,
+ debugfs_create_file("firmware_version", 0444, mcu->client->debugfs,
mcu, &firmware_version_fops);
- debugfs_create_file("pcb_version", 0444, mcu->debugfs, mcu,
+ debugfs_create_file("pcb_version", 0444, mcu->client->debugfs, mcu,
&pcb_version_fops);
- debugfs_create_file("mcu_type", 0444, mcu->debugfs, mcu,
+ debugfs_create_file("mcu_type", 0444, mcu->client->debugfs, mcu,
&mcu_type_fops);
- debugfs_create_file("board_type", 0444, mcu->debugfs, mcu,
+ debugfs_create_file("board_type", 0444, mcu->client->debugfs, mcu,
&board_type_fops);
}
@@ -333,18 +327,11 @@ static int sg2042_mcu_i2c_probe(struct i2c_client *client)
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- sg2042_mcu_debugfs_init(mcu, dev);
+ sg2042_mcu_debugfs_init(mcu);
return 0;
}
-static void sg2042_mcu_i2c_remove(struct i2c_client *client)
-{
- struct sg2042_mcu_data *mcu = i2c_get_clientdata(client);
-
- debugfs_remove_recursive(mcu->debugfs);
-}
-
static const struct i2c_device_id sg2042_mcu_id[] = {
{ "sg2042-hwmon-mcu" },
{ }
@@ -364,24 +351,9 @@ static struct i2c_driver sg2042_mcu_driver = {
.dev_groups = sg2042_mcu_groups,
},
.probe = sg2042_mcu_i2c_probe,
- .remove = sg2042_mcu_i2c_remove,
.id_table = sg2042_mcu_id,
};
-
-static int __init sg2042_mcu_init(void)
-{
- sgmcu_debugfs = debugfs_create_dir("sg2042-mcu", NULL);
- return i2c_add_driver(&sg2042_mcu_driver);
-}
-
-static void __exit sg2042_mcu_exit(void)
-{
- debugfs_remove_recursive(sgmcu_debugfs);
- i2c_del_driver(&sg2042_mcu_driver);
-}
-
-module_init(sg2042_mcu_init);
-module_exit(sg2042_mcu_exit);
+module_i2c_driver(sg2042_mcu_driver);
MODULE_AUTHOR("Inochi Amaoto <inochiama@outlook.com>");
MODULE_DESCRIPTION("MCU I2C driver for SG2042 soc platform");
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 650b0bcc2359..557ad3e7752a 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -44,8 +44,6 @@ static const unsigned char sht3x_cmd_read_status_reg[] = { 0xf3, 0x2d };
static const unsigned char sht3x_cmd_clear_status_reg[] = { 0x30, 0x41 };
static const unsigned char sht3x_cmd_read_serial_number[] = { 0x37, 0x80 };
-static struct dentry *debugfs;
-
/* delays for single-shot mode i2c commands, both in us */
#define SHT3X_SINGLE_WAIT_TIME_HPM 15000
#define SHT3X_SINGLE_WAIT_TIME_MPM 6000
@@ -167,7 +165,6 @@ struct sht3x_data {
enum sht3x_chips chip_id;
struct mutex i2c_lock; /* lock for sending i2c commands */
struct mutex data_lock; /* lock for updating driver data */
- struct dentry *sensor_dir;
u8 mode;
const unsigned char *command;
@@ -837,23 +834,7 @@ static int sht3x_write(struct device *dev, enum hwmon_sensor_types type,
}
}
-static void sht3x_debugfs_init(struct sht3x_data *data)
-{
- char name[32];
-
- snprintf(name, sizeof(name), "i2c%u-%02x",
- data->client->adapter->nr, data->client->addr);
- data->sensor_dir = debugfs_create_dir(name, debugfs);
- debugfs_create_u32("serial_number", 0444,
- data->sensor_dir, &data->serial_number);
-}
-
-static void sht3x_debugfs_remove(void *sensor_dir)
-{
- debugfs_remove_recursive(sensor_dir);
-}
-
-static int sht3x_serial_number_read(struct sht3x_data *data)
+static void sht3x_serial_number_read(struct sht3x_data *data)
{
int ret;
char buffer[SHT3X_RESPONSE_LENGTH];
@@ -864,11 +845,12 @@ static int sht3x_serial_number_read(struct sht3x_data *data)
buffer,
SHT3X_RESPONSE_LENGTH, 0);
if (ret)
- return ret;
+ return;
data->serial_number = (buffer[0] << 24) | (buffer[1] << 16) |
(buffer[3] << 8) | buffer[4];
- return ret;
+
+ debugfs_create_u32("serial_number", 0444, client->debugfs, &data->serial_number);
}
static const struct hwmon_ops sht3x_ops = {
@@ -930,28 +912,14 @@ static int sht3x_probe(struct i2c_client *client)
if (ret)
return ret;
- ret = sht3x_serial_number_read(data);
- if (ret) {
- dev_dbg(dev, "unable to read serial number\n");
- } else {
- sht3x_debugfs_init(data);
- ret = devm_add_action_or_reset(dev,
- sht3x_debugfs_remove,
- data->sensor_dir);
- if (ret)
- return ret;
- }
-
- hwmon_dev = devm_hwmon_device_register_with_info(dev,
- client->name,
- data,
- &sht3x_chip_info,
- sht3x_groups);
-
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
+ &sht3x_chip_info, sht3x_groups);
if (IS_ERR(hwmon_dev))
- dev_dbg(dev, "unable to register hwmon device\n");
+ return PTR_ERR(hwmon_dev);
+
+ sht3x_serial_number_read(data);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ return 0;
}
/* device ID table */
@@ -968,20 +936,7 @@ static struct i2c_driver sht3x_i2c_driver = {
.probe = sht3x_probe,
.id_table = sht3x_ids,
};
-
-static int __init sht3x_init(void)
-{
- debugfs = debugfs_create_dir("sht3x", NULL);
- return i2c_add_driver(&sht3x_i2c_driver);
-}
-module_init(sht3x_init);
-
-static void __exit sht3x_cleanup(void)
-{
- debugfs_remove_recursive(debugfs);
- i2c_del_driver(&sht3x_i2c_driver);
-}
-module_exit(sht3x_cleanup);
+module_i2c_driver(sht3x_i2c_driver);
MODULE_AUTHOR("David Frey <david.frey@sensirion.com>");
MODULE_AUTHOR("Pascal Sachs <pascal.sachs@sensirion.com>");
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index 80fb03f30c30..4cb3960d5170 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -114,7 +114,6 @@ struct tps23861_data {
struct regmap *regmap;
u32 shunt_resistor;
struct i2c_client *client;
- struct dentry *debugfs_dir;
};
static const struct regmap_config tps23861_regmap_config = {
@@ -503,25 +502,6 @@ static int tps23861_port_status_show(struct seq_file *s, void *data)
DEFINE_SHOW_ATTRIBUTE(tps23861_port_status);
-static void tps23861_init_debugfs(struct tps23861_data *data,
- struct device *hwmon_dev)
-{
- const char *debugfs_name;
-
- debugfs_name = devm_kasprintf(&data->client->dev, GFP_KERNEL, "%s-%s",
- data->client->name, dev_name(hwmon_dev));
- if (!debugfs_name)
- return;
-
- data->debugfs_dir = debugfs_create_dir(debugfs_name, NULL);
-
- debugfs_create_file("port_status",
- 0400,
- data->debugfs_dir,
- data,
- &tps23861_port_status_fops);
-}
-
static int tps23861_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -562,18 +542,12 @@ static int tps23861_probe(struct i2c_client *client)
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- tps23861_init_debugfs(data, hwmon_dev);
+ debugfs_create_file("port_status", 0400, client->debugfs, data,
+ &tps23861_port_status_fops);
return 0;
}
-static void tps23861_remove(struct i2c_client *client)
-{
- struct tps23861_data *data = i2c_get_clientdata(client);
-
- debugfs_remove_recursive(data->debugfs_dir);
-}
-
static const struct of_device_id __maybe_unused tps23861_of_match[] = {
{ .compatible = "ti,tps23861", },
{ },
@@ -582,7 +556,6 @@ MODULE_DEVICE_TABLE(of, tps23861_of_match);
static struct i2c_driver tps23861_driver = {
.probe = tps23861_probe,
- .remove = tps23861_remove,
.driver = {
.name = "tps23861",
.of_match_table = of_match_ptr(tps23861_of_match),
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 7087197383c9..2cdbd5f107a2 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -105,7 +105,7 @@ struct xgene_hwmon_dev {
phys_addr_t comm_base_addr;
void *pcc_comm_addr;
- u64 usecs_lat;
+ unsigned int usecs_lat;
};
/*
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 6505261e6068..cc8e952a6772 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -710,66 +710,6 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)
}
/**
- * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
- * @hwlock: a valid hwspinlock instance
- *
- * Returns: the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
- */
-int hwspin_lock_get_id(struct hwspinlock *hwlock)
-{
- if (!hwlock) {
- pr_err("invalid hwlock\n");
- return -EINVAL;
- }
-
- return hwlock_to_id(hwlock);
-}
-EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
-
-/**
- * hwspin_lock_request() - request an hwspinlock
- *
- * This function should be called by users of the hwspinlock device,
- * in order to dynamically assign them an unused hwspinlock.
- * Usually the user of this lock will then have to communicate the lock's id
- * to the remote core before it can be used for synchronization (to get the
- * id of a given hwlock, use hwspin_lock_get_id()).
- *
- * Should be called from a process context (might sleep)
- *
- * Returns: the address of the assigned hwspinlock, or %NULL on error
- */
-struct hwspinlock *hwspin_lock_request(void)
-{
- struct hwspinlock *hwlock;
- int ret;
-
- mutex_lock(&hwspinlock_tree_lock);
-
- /* look for an unused lock */
- ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
- 0, 1, HWSPINLOCK_UNUSED);
- if (ret == 0) {
- pr_warn("a free hwspinlock is not available\n");
- hwlock = NULL;
- goto out;
- }
-
- /* sanity check that should never fail */
- WARN_ON(ret > 1);
-
- /* mark as used and power up */
- ret = __hwspin_lock_request(hwlock);
- if (ret < 0)
- hwlock = NULL;
-
-out:
- mutex_unlock(&hwspinlock_tree_lock);
- return hwlock;
-}
-EXPORT_SYMBOL_GPL(hwspin_lock_request);
-
-/**
* hwspin_lock_request_specific() - request for a specific hwspinlock
* @id: index of the specific hwspinlock that is requested
*
@@ -913,40 +853,6 @@ int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
EXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
/**
- * devm_hwspin_lock_request() - request an hwspinlock for a managed device
- * @dev: the device to request an hwspinlock
- *
- * This function should be called by users of the hwspinlock device,
- * in order to dynamically assign them an unused hwspinlock.
- * Usually the user of this lock will then have to communicate the lock's id
- * to the remote core before it can be used for synchronization (to get the
- * id of a given hwlock, use hwspin_lock_get_id()).
- *
- * Should be called from a process context (might sleep)
- *
- * Returns: the address of the assigned hwspinlock, or %NULL on error
- */
-struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
-{
- struct hwspinlock **ptr, *hwlock;
-
- ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- hwlock = hwspin_lock_request();
- if (hwlock) {
- *ptr = hwlock;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return hwlock;
-}
-EXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
-
-/**
* devm_hwspin_lock_request_specific() - request for a specific hwspinlock for
* a managed device
* @dev: the device to request the specific hwspinlock
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 2c1a60577728..3d98e3371fff 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -1216,7 +1216,7 @@ static void etm4_fixup_wrong_ccitmin(struct etmv4_drvdata *drvdata)
* recorded value for 'drvdata->ccitmin' to workaround
* this problem.
*/
- if (is_midr_in_range_list(read_cpuid_id(), etm_wrong_ccitmin_cpus)) {
+ if (is_midr_in_range_list(etm_wrong_ccitmin_cpus)) {
if (drvdata->ccitmin == 256)
drvdata->ccitmin = 4;
}
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index e9496fe97baa..495eb1dc8ac5 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -81,10 +81,8 @@ static int stm_heartbeat_init(void)
stm_heartbeat[i].data.type = STM_USER;
stm_heartbeat[i].data.link = stm_heartbeat_link;
stm_heartbeat[i].data.unlink = stm_heartbeat_unlink;
- hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS);
- stm_heartbeat[i].hrtimer.function =
- stm_heartbeat_hrtimer_handler;
+ hrtimer_setup(&stm_heartbeat[i].hrtimer, stm_heartbeat_hrtimer_handler,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ret = stm_source_register_device(NULL, &stm_heartbeat[i].data);
if (ret)
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 544c94e86b89..1eac35838040 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -485,6 +485,8 @@ MODULE_DEVICE_TABLE(pci, ali1535_ids);
static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ int ret;
+
if (ali1535_setup(dev)) {
dev_warn(&dev->dev,
"ALI1535 not detected, module not inserted.\n");
@@ -496,7 +498,15 @@ static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name),
"SMBus ALI1535 adapter at %04x", ali1535_offset);
- return i2c_add_adapter(&ali1535_adapter);
+ ret = i2c_add_adapter(&ali1535_adapter);
+ if (ret)
+ goto release_region;
+
+ return 0;
+
+release_region:
+ release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
+ return ret;
}
static void ali1535_remove(struct pci_dev *dev)
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 4761c7208102..418d11266671 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -472,6 +472,8 @@ MODULE_DEVICE_TABLE (pci, ali15x3_ids);
static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ int ret;
+
if (ali15x3_setup(dev)) {
dev_err(&dev->dev,
"ALI15X3 not detected, module not inserted.\n");
@@ -483,7 +485,15 @@ static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(ali15x3_adapter.name, sizeof(ali15x3_adapter.name),
"SMBus ALI15X3 adapter at %04x", ali15x3_smba);
- return i2c_add_adapter(&ali15x3_adapter);
+ ret = i2c_add_adapter(&ali15x3_adapter);
+ if (ret)
+ goto release_region;
+
+ return 0;
+
+release_region:
+ release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE);
+ return ret;
}
static void ali15x3_remove(struct pci_dev *dev)
diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
index 143165300949..ef7370d3dbea 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-pci.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
@@ -327,13 +327,11 @@ static int amd_mp2_pci_init(struct amd_mp2_dev *privdata,
amd_mp2_irq_isr, irq_flag, dev_name(&pci_dev->dev), privdata);
if (rc) {
pci_err(pci_dev, "Failure requesting irq %i: %d\n", privdata->dev_irq, rc);
- goto free_irq_vectors;
+ goto err_dma_mask;
}
return rc;
-free_irq_vectors:
- free_irq(privdata->dev_irq, privdata);
err_dma_mask:
pci_clear_master(pci_dev);
err_pci_enable:
@@ -376,7 +374,6 @@ static void amd_mp2_pci_remove(struct pci_dev *pci_dev)
pm_runtime_forbid(&pci_dev->dev);
pm_runtime_get_noresume(&pci_dev->dev);
- free_irq(privdata->dev_irq, privdata);
pci_clear_master(pci_dev);
amd_mp2_clear_reg(privdata);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index ee0d25b498cb..9e5d454d8318 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1723,8 +1723,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&i2c_imx->slave_lock);
- hrtimer_init(&i2c_imx->slave_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- i2c_imx->slave_timer.function = i2c_imx_slave_timeout;
+ hrtimer_setup(&i2c_imx->slave_timer, i2c_imx_slave_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
match = device_get_match_data(&pdev->dev);
if (match)
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 92faf03d64cf..f18c3e74b076 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1048,23 +1048,6 @@ static int omap_i2c_transmit_data(struct omap_i2c_dev *omap, u8 num_bytes,
return 0;
}
-static irqreturn_t
-omap_i2c_isr(int irq, void *dev_id)
-{
- struct omap_i2c_dev *omap = dev_id;
- irqreturn_t ret = IRQ_HANDLED;
- u16 mask;
- u16 stat;
-
- stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK;
-
- if (stat & mask)
- ret = IRQ_WAKE_THREAD;
-
- return ret;
-}
-
static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
{
u16 bits;
@@ -1095,8 +1078,13 @@ static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
}
if (stat & OMAP_I2C_STAT_NACK) {
- err |= OMAP_I2C_STAT_NACK;
+ omap->cmd_err |= OMAP_I2C_STAT_NACK;
omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK);
+
+ if (!(stat & ~OMAP_I2C_STAT_NACK)) {
+ err = -EAGAIN;
+ break;
+ }
}
if (stat & OMAP_I2C_STAT_AL) {
@@ -1472,7 +1460,7 @@ omap_i2c_probe(struct platform_device *pdev)
IRQF_NO_SUSPEND, pdev->name, omap);
else
r = devm_request_threaded_irq(&pdev->dev, omap->irq,
- omap_i2c_isr, omap_i2c_isr_thread,
+ NULL, omap_i2c_isr_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
pdev->name, omap);
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 3505cf29cedd..a19c3d251804 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -509,6 +509,8 @@ MODULE_DEVICE_TABLE(pci, sis630_ids);
static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ int ret;
+
if (sis630_setup(dev)) {
dev_err(&dev->dev,
"SIS630 compatible bus not detected, "
@@ -522,7 +524,15 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
"SMBus SIS630 adapter at %04x", smbus_base + SMB_STS);
- return i2c_add_adapter(&sis630_adapter);
+ ret = i2c_add_adapter(&sis630_adapter);
+ if (ret)
+ goto release_region;
+
+ return 0;
+
+release_region:
+ release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION);
+ return ret;
}
static void sis630_remove(struct pci_dev *dev)
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile
index 0a3c37510079..a34af1ba09bd 100644
--- a/drivers/idle/Makefile
+++ b/drivers/idle/Makefile
@@ -1,3 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
+# Branch profiling isn't noinstr-safe
+ccflags-$(CONFIG_TRACE_BRANCH_PROFILING) += -DDISABLE_BRANCH_PROFILING
+
+obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 0fdb1d1316c4..976f5be54e36 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -58,6 +58,7 @@
#include <asm/spec-ctrl.h>
#include <asm/tsc.h>
#include <asm/fpu/api.h>
+#include <asm/smp.h>
#define INTEL_IDLE_VERSION "0.5.1"
@@ -90,7 +91,6 @@ struct idle_cpu {
* Indicate which enable bits to clear here.
*/
unsigned long auto_demotion_disable_flags;
- bool byt_auto_demotion_disable_flag;
bool disable_promotion_to_c1e;
bool use_acpi;
};
@@ -229,6 +229,15 @@ static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
return 0;
}
+static void intel_idle_enter_dead(struct cpuidle_device *dev, int index)
+{
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ struct cpuidle_state *state = &drv->states[index];
+ unsigned long eax = flg2MWAIT(state->flags);
+
+ mwait_play_dead(eax);
+}
+
/*
* States are indexed by the cstate number,
* which is also the index into the MWAIT hint array.
@@ -1464,13 +1473,11 @@ static const struct idle_cpu idle_cpu_snx __initconst = {
static const struct idle_cpu idle_cpu_byt __initconst = {
.state_table = byt_cstates,
.disable_promotion_to_c1e = true,
- .byt_auto_demotion_disable_flag = true,
};
static const struct idle_cpu idle_cpu_cht __initconst = {
.state_table = cht_cstates,
.disable_promotion_to_c1e = true,
- .byt_auto_demotion_disable_flag = true,
};
static const struct idle_cpu idle_cpu_ivb __initconst = {
@@ -1696,6 +1703,10 @@ static bool force_use_acpi __read_mostly; /* No effect if no_acpi is set. */
module_param_named(use_acpi, force_use_acpi, bool, 0444);
MODULE_PARM_DESC(use_acpi, "Use ACPI _CST for building the idle states list");
+static bool no_native __read_mostly; /* No effect if no_acpi is set. */
+module_param_named(no_native, no_native, bool, 0444);
+MODULE_PARM_DESC(no_native, "Ignore cpu specific (native) idle states in lieu of ACPI idle states");
+
static struct acpi_processor_power acpi_state_table __initdata;
/**
@@ -1804,6 +1815,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
mark_tsc_unstable("TSC halts in idle");
state->enter = intel_idle;
+ state->enter_dead = intel_idle_enter_dead;
state->enter_s2idle = intel_idle_s2idle;
}
}
@@ -1838,6 +1850,11 @@ static bool __init intel_idle_off_by_default(unsigned int flags, u32 mwait_hint)
}
return true;
}
+
+static inline bool ignore_native(void)
+{
+ return no_native && !no_acpi;
+}
#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */
#define force_use_acpi (false)
@@ -1847,6 +1864,7 @@ static inline bool intel_idle_off_by_default(unsigned int flags, u32 mwait_hint)
{
return false;
}
+static inline bool ignore_native(void) { return false; }
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
/**
@@ -2059,6 +2077,15 @@ static void __init spr_idle_state_table_update(void)
}
}
+/**
+ * byt_cht_auto_demotion_disable - Disable Bay/Cherry Trail auto-demotion.
+ */
+static void __init byt_cht_auto_demotion_disable(void)
+{
+ wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
+ wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
+}
+
static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
{
unsigned int mwait_cstate = (MWAIT_HINT2CSTATE(mwait_hint) + 1) &
@@ -2140,6 +2167,10 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
case INTEL_ATOM_GRACEMONT:
adl_idle_state_table_update();
break;
+ case INTEL_ATOM_SILVERMONT:
+ case INTEL_ATOM_AIRMONT:
+ byt_cht_auto_demotion_disable();
+ break;
}
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
@@ -2153,6 +2184,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
!cpuidle_state_table[cstate].enter_s2idle)
break;
+ if (!cpuidle_state_table[cstate].enter_dead)
+ cpuidle_state_table[cstate].enter_dead = intel_idle_enter_dead;
+
/* If marked as unusable, skip this state. */
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
pr_debug("state %s is disabled\n",
@@ -2182,11 +2216,6 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
drv->state_count++;
}
-
- if (icpu->byt_auto_demotion_disable_flag) {
- wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
- wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
- }
}
/**
@@ -2332,6 +2361,10 @@ static int __init intel_idle_init(void)
pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
icpu = (const struct idle_cpu *)id->driver_data;
+ if (icpu && ignore_native()) {
+ pr_debug("ignoring native CPU idle states\n");
+ icpu = NULL;
+ }
if (icpu) {
if (icpu->state_table)
cpuidle_state_table = icpu->state_table;
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index 7dde5713973f..49560059f4b7 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -812,9 +812,7 @@ static int tsc2046_adc_probe(struct spi_device *spi)
spin_lock_init(&priv->state_lock);
priv->state = TSC2046_STATE_SHUTDOWN;
- hrtimer_init(&priv->trig_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_SOFT);
- priv->trig_timer.function = tsc2046_adc_timer;
+ hrtimer_setup(&priv->trig_timer, tsc2046_adc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ret = devm_iio_trigger_register(dev, trig);
if (ret) {
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index cfbfcaefec0f..e1f8740ae688 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1245,8 +1245,8 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, unsigned int *conf, int irq)
channel_templates = xadc_us_channels;
max_channels = ARRAY_SIZE(xadc_us_channels);
}
- channels = devm_kmemdup(dev, channel_templates,
- sizeof(channels[0]) * max_channels, GFP_KERNEL);
+ channels = devm_kmemdup_array(dev, channel_templates, max_channels,
+ sizeof(*channel_templates), GFP_KERNEL);
if (!channels)
return -ENOMEM;
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
index 0732cfa258c4..8cc071463249 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
@@ -7,7 +7,7 @@
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
-#include <linux/device.h>
+#include <linux/device/devres.h>
#include <linux/err.h>
#include <linux/gfp_types.h>
#include <linux/i2c.h>
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
index 43ec57c1e604..806e55f75f65 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
@@ -7,7 +7,7 @@
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
-#include <linux/device.h>
+#include <linux/device/devres.h>
#include <linux/err.h>
#include <linux/gfp_types.h>
#include <linux/module.h>
diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c
index 716c795d08fb..82c72baccb62 100644
--- a/drivers/iio/trigger/iio-trig-hrtimer.c
+++ b/drivers/iio/trigger/iio-trig-hrtimer.c
@@ -145,8 +145,8 @@ static struct iio_sw_trigger *iio_trig_hrtimer_probe(const char *name)
trig_info->swt.trigger->ops = &iio_hrtimer_trigger_ops;
trig_info->swt.trigger->dev.groups = iio_hrtimer_attr_groups;
- hrtimer_init(&trig_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
- trig_info->timer.function = iio_hrtimer_trig_handler;
+ hrtimer_setup(&trig_info->timer, iio_hrtimer_trig_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_HARD);
trig_info->sampling_frequency[0] = HRTIMER_DEFAULT_SAMPLING_FREQUENCY;
trig_info->period = NSEC_PER_SEC / trig_info->sampling_frequency[0];
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 8ab4eea5a0a5..d49ded7e95f0 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -39,6 +39,7 @@ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
uverbs_std_types_async_fd.o \
uverbs_std_types_srq.o \
uverbs_std_types_wq.o \
- uverbs_std_types_qp.o
+ uverbs_std_types_qp.o \
+ ucaps.o
ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o umem_dmabuf.o
ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index f8413f8a9f26..9979a351577f 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1501,6 +1501,12 @@ ib_cache_update(struct ib_device *device, u32 port, bool update_gids,
device->port_data[port].cache.pkey = pkey_cache;
}
device->port_data[port].cache.lmc = tprops->lmc;
+
+ if (device->port_data[port].cache.port_state != IB_PORT_NOP &&
+ device->port_data[port].cache.port_state != tprops->state)
+ ibdev_info(device, "Port: %d Link %s\n", port,
+ ib_port_state_to_str(tprops->state));
+
device->port_data[port].cache.port_state = tprops->state;
device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 91db10515d74..fedcdb56fb6b 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -739,12 +739,26 @@ cma_validate_port(struct ib_device *device, u32 port,
goto out;
}
- if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
- ndev = dev_get_by_index(dev_addr->net, bound_if_index);
- if (!ndev)
- goto out;
+ /*
+ * For a RXE device, it should work with TUN device and normal ethernet
+ * devices. Use driver_id to check if a device is a RXE device or not.
+ * ARPHDR_NONE means a TUN device.
+ */
+ if (device->ops.driver_id == RDMA_DRIVER_RXE) {
+ if ((dev_type == ARPHRD_NONE || dev_type == ARPHRD_ETHER)
+ && rdma_protocol_roce(device, port)) {
+ ndev = dev_get_by_index(dev_addr->net, bound_if_index);
+ if (!ndev)
+ goto out;
+ }
} else {
- gid_type = IB_GID_TYPE_IB;
+ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
+ ndev = dev_get_by_index(dev_addr->net, bound_if_index);
+ if (!ndev)
+ goto out;
+ } else {
+ gid_type = IB_GID_TYPE_IB;
+ }
}
sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index af59486fe418..e6ec7b7a40af 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -12,7 +12,8 @@
static int __counter_set_mode(struct rdma_port_counter *port_counter,
enum rdma_nl_counter_mode new_mode,
- enum rdma_nl_counter_mask new_mask)
+ enum rdma_nl_counter_mask new_mask,
+ bool bind_opcnt)
{
if (new_mode == RDMA_COUNTER_MODE_AUTO) {
if (new_mask & (~ALL_AUTO_MODE_MASKS))
@@ -23,6 +24,7 @@ static int __counter_set_mode(struct rdma_port_counter *port_counter,
port_counter->mode.mode = new_mode;
port_counter->mode.mask = new_mask;
+ port_counter->mode.bind_opcnt = bind_opcnt;
return 0;
}
@@ -41,6 +43,7 @@ static int __counter_set_mode(struct rdma_port_counter *port_counter,
*/
int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mask mask,
+ bool bind_opcnt,
struct netlink_ext_ack *extack)
{
struct rdma_port_counter *port_counter;
@@ -59,12 +62,13 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port,
RDMA_COUNTER_MODE_NONE;
if (port_counter->mode.mode == mode &&
- port_counter->mode.mask == mask) {
+ port_counter->mode.mask == mask &&
+ port_counter->mode.bind_opcnt == bind_opcnt) {
ret = 0;
goto out;
}
- ret = __counter_set_mode(port_counter, mode, mask);
+ ret = __counter_set_mode(port_counter, mode, mask, bind_opcnt);
out:
mutex_unlock(&port_counter->lock);
@@ -89,7 +93,7 @@ static void auto_mode_init_counter(struct rdma_counter *counter,
}
static int __rdma_counter_bind_qp(struct rdma_counter *counter,
- struct ib_qp *qp)
+ struct ib_qp *qp, u32 port)
{
int ret;
@@ -100,7 +104,7 @@ static int __rdma_counter_bind_qp(struct rdma_counter *counter,
return -EOPNOTSUPP;
mutex_lock(&counter->lock);
- ret = qp->device->ops.counter_bind_qp(counter, qp);
+ ret = qp->device->ops.counter_bind_qp(counter, qp, port);
mutex_unlock(&counter->lock);
return ret;
@@ -140,7 +144,8 @@ out:
static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port,
struct ib_qp *qp,
- enum rdma_nl_counter_mode mode)
+ enum rdma_nl_counter_mode mode,
+ bool bind_opcnt)
{
struct rdma_port_counter *port_counter;
struct rdma_counter *counter;
@@ -149,13 +154,15 @@ static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port,
if (!dev->ops.counter_dealloc || !dev->ops.counter_alloc_stats)
return NULL;
- counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ counter = rdma_zalloc_drv_obj(dev, rdma_counter);
if (!counter)
return NULL;
counter->device = dev;
counter->port = port;
+ dev->ops.counter_init(counter);
+
rdma_restrack_new(&counter->res, RDMA_RESTRACK_COUNTER);
counter->stats = dev->ops.counter_alloc_stats(counter);
if (!counter->stats)
@@ -166,7 +173,7 @@ static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port,
switch (mode) {
case RDMA_COUNTER_MODE_MANUAL:
ret = __counter_set_mode(port_counter, RDMA_COUNTER_MODE_MANUAL,
- 0);
+ 0, bind_opcnt);
if (ret) {
mutex_unlock(&port_counter->lock);
goto err_mode;
@@ -185,10 +192,11 @@ static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port,
mutex_unlock(&port_counter->lock);
counter->mode.mode = mode;
+ counter->mode.bind_opcnt = bind_opcnt;
kref_init(&counter->kref);
mutex_init(&counter->lock);
- ret = __rdma_counter_bind_qp(counter, qp);
+ ret = __rdma_counter_bind_qp(counter, qp, port);
if (ret)
goto err_mode;
@@ -213,7 +221,8 @@ static void rdma_counter_free(struct rdma_counter *counter)
port_counter->num_counters--;
if (!port_counter->num_counters &&
(port_counter->mode.mode == RDMA_COUNTER_MODE_MANUAL))
- __counter_set_mode(port_counter, RDMA_COUNTER_MODE_NONE, 0);
+ __counter_set_mode(port_counter, RDMA_COUNTER_MODE_NONE, 0,
+ false);
mutex_unlock(&port_counter->lock);
@@ -238,7 +247,7 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
return match;
}
-static int __rdma_counter_unbind_qp(struct ib_qp *qp)
+static int __rdma_counter_unbind_qp(struct ib_qp *qp, u32 port)
{
struct rdma_counter *counter = qp->counter;
int ret;
@@ -247,7 +256,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp)
return -EOPNOTSUPP;
mutex_lock(&counter->lock);
- ret = qp->device->ops.counter_unbind_qp(qp);
+ ret = qp->device->ops.counter_unbind_qp(qp, port);
mutex_unlock(&counter->lock);
return ret;
@@ -339,13 +348,14 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port)
counter = rdma_get_counter_auto_mode(qp, port);
if (counter) {
- ret = __rdma_counter_bind_qp(counter, qp);
+ ret = __rdma_counter_bind_qp(counter, qp, port);
if (ret) {
kref_put(&counter->kref, counter_release);
return ret;
}
} else {
- counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_AUTO);
+ counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_AUTO,
+ port_counter->mode.bind_opcnt);
if (!counter)
return -ENOMEM;
}
@@ -358,7 +368,7 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port)
* @force:
* true - Decrease the counter ref-count anyway (e.g., qp destroy)
*/
-int rdma_counter_unbind_qp(struct ib_qp *qp, bool force)
+int rdma_counter_unbind_qp(struct ib_qp *qp, u32 port, bool force)
{
struct rdma_counter *counter = qp->counter;
int ret;
@@ -366,7 +376,7 @@ int rdma_counter_unbind_qp(struct ib_qp *qp, bool force)
if (!counter)
return -EINVAL;
- ret = __rdma_counter_unbind_qp(qp);
+ ret = __rdma_counter_unbind_qp(qp, port);
if (ret && !force)
return ret;
@@ -513,7 +523,7 @@ int rdma_counter_bind_qpn(struct ib_device *dev, u32 port,
goto err_task;
}
- ret = __rdma_counter_bind_qp(counter, qp);
+ ret = __rdma_counter_bind_qp(counter, qp, port);
if (ret)
goto err_task;
@@ -558,7 +568,7 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port,
goto err;
}
- counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_MANUAL);
+ counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_MANUAL, true);
if (!counter) {
ret = -ENOMEM;
goto err;
@@ -604,7 +614,7 @@ int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port,
goto out;
}
- ret = rdma_counter_unbind_qp(qp, false);
+ ret = rdma_counter_unbind_qp(qp, port, false);
out:
rdma_restrack_put(&qp->res);
@@ -613,13 +623,15 @@ out:
int rdma_counter_get_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mode *mode,
- enum rdma_nl_counter_mask *mask)
+ enum rdma_nl_counter_mask *mask,
+ bool *opcnt)
{
struct rdma_port_counter *port_counter;
port_counter = &dev->port_data[port].port_counter;
*mode = port_counter->mode.mode;
*mask = port_counter->mode.mask;
+ *opcnt = port_counter->mode.bind_opcnt;
return 0;
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 0ded91f056f3..b4e3e4beb7f4 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -528,6 +528,8 @@ static struct class ib_class = {
static void rdma_init_coredev(struct ib_core_device *coredev,
struct ib_device *dev, struct net *net)
{
+ bool is_full_dev = &dev->coredev == coredev;
+
/* This BUILD_BUG_ON is intended to catch layout change
* of union of ib_core_device and device.
* dev must be the first element as ib_core and providers
@@ -539,6 +541,13 @@ static void rdma_init_coredev(struct ib_core_device *coredev,
coredev->dev.class = &ib_class;
coredev->dev.groups = dev->groups;
+
+ /*
+ * Don't expose hw counters outside of the init namespace.
+ */
+ if (!is_full_dev && dev->hw_stats_attr_index)
+ coredev->dev.groups[dev->hw_stats_attr_index] = NULL;
+
device_initialize(&coredev->dev);
coredev->owner = dev;
INIT_LIST_HEAD(&coredev->port_list);
@@ -1341,9 +1350,11 @@ static void ib_device_notify_register(struct ib_device *device)
u32 port;
int ret;
+ down_read(&devices_rwsem);
+
ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
if (ret)
- return;
+ goto out;
rdma_for_each_port(device, port) {
netdev = ib_device_get_netdev(device, port);
@@ -1354,8 +1365,11 @@ static void ib_device_notify_register(struct ib_device *device)
RDMA_NETDEV_ATTACH_EVENT);
dev_put(netdev);
if (ret)
- return;
+ goto out;
}
+
+out:
+ up_read(&devices_rwsem);
}
/**
@@ -2669,6 +2683,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, counter_alloc_stats);
SET_DEVICE_OP(dev_ops, counter_bind_qp);
SET_DEVICE_OP(dev_ops, counter_dealloc);
+ SET_DEVICE_OP(dev_ops, counter_init);
SET_DEVICE_OP(dev_ops, counter_unbind_qp);
SET_DEVICE_OP(dev_ops, counter_update_stats);
SET_DEVICE_OP(dev_ops, create_ah);
@@ -2783,6 +2798,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_OBJ_SIZE(dev_ops, ib_srq);
SET_OBJ_SIZE(dev_ops, ib_ucontext);
SET_OBJ_SIZE(dev_ops, ib_xrcd);
+ SET_OBJ_SIZE(dev_ops, rdma_counter);
}
EXPORT_SYMBOL(ib_set_device_ops);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 7e3a55349e10..f4486cbd8f45 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -109,7 +109,9 @@ static struct ctl_table iwcm_ctl_table[] = {
.data = &default_backlog,
.maxlen = sizeof(default_backlog),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_INT_MAX,
},
};
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 1fd54d5c4dd8..73f3a0b9a54b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2671,11 +2671,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
struct ib_mad_private *mad)
{
unsigned long flags;
- int post, ret;
struct ib_mad_private *mad_priv;
struct ib_sge sg_list;
struct ib_recv_wr recv_wr;
struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
+ int ret = 0;
/* Initialize common scatter list fields */
sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
@@ -2685,7 +2685,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
recv_wr.sg_list = &sg_list;
recv_wr.num_sge = 1;
- do {
+ while (true) {
/* Allocate and map receive buffer */
if (mad) {
mad_priv = mad;
@@ -2693,10 +2693,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
} else {
mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
GFP_ATOMIC);
- if (!mad_priv) {
- ret = -ENOMEM;
- break;
- }
+ if (!mad_priv)
+ return -ENOMEM;
}
sg_list.length = mad_priv_dma_size(mad_priv);
sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
@@ -2705,37 +2703,41 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
sg_list.addr))) {
- kfree(mad_priv);
ret = -ENOMEM;
- break;
+ goto free_mad_priv;
}
mad_priv->header.mapping = sg_list.addr;
mad_priv->header.mad_list.mad_queue = recv_queue;
mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
-
- /* Post receive WR */
spin_lock_irqsave(&recv_queue->lock, flags);
- post = (++recv_queue->count < recv_queue->max_active);
- list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
+ if (recv_queue->count >= recv_queue->max_active) {
+ /* Fully populated the receive queue */
+ spin_unlock_irqrestore(&recv_queue->lock, flags);
+ break;
+ }
+ recv_queue->count++;
+ list_add_tail(&mad_priv->header.mad_list.list,
+ &recv_queue->list);
spin_unlock_irqrestore(&recv_queue->lock, flags);
+
ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
if (ret) {
spin_lock_irqsave(&recv_queue->lock, flags);
list_del(&mad_priv->header.mad_list.list);
recv_queue->count--;
spin_unlock_irqrestore(&recv_queue->lock, flags);
- ib_dma_unmap_single(qp_info->port_priv->device,
- mad_priv->header.mapping,
- mad_priv_dma_size(mad_priv),
- DMA_FROM_DEVICE);
- kfree(mad_priv);
dev_err(&qp_info->port_priv->device->dev,
"ib_post_recv failed: %d\n", ret);
break;
}
- } while (post);
+ }
+ ib_dma_unmap_single(qp_info->port_priv->device,
+ mad_priv->header.mapping,
+ mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE);
+free_mad_priv:
+ kfree(mad_priv);
return ret;
}
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index cb987ab0177c..a872643e8039 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -171,6 +171,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_PARENT_NAME] = { .type = NLA_NUL_STRING },
[RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_EVENT_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED] = { .type = NLA_U8 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -2028,6 +2029,7 @@ static int nldev_stat_set_mode_doit(struct sk_buff *msg,
struct ib_device *device, u32 port)
{
u32 mode, mask = 0, qpn, cntn = 0;
+ bool opcnt = false;
int ret;
/* Currently only counter for QP is supported */
@@ -2035,12 +2037,17 @@ static int nldev_stat_set_mode_doit(struct sk_buff *msg,
nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
return -EINVAL;
+ if (tb[RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED])
+ opcnt = !!nla_get_u8(
+ tb[RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED]);
+
mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
if (mode == RDMA_COUNTER_MODE_AUTO) {
if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
mask = nla_get_u32(
tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
- return rdma_counter_set_auto_mode(device, port, mask, extack);
+ return rdma_counter_set_auto_mode(device, port, mask, opcnt,
+ extack);
}
if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
@@ -2358,6 +2365,7 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
struct ib_device *device;
struct sk_buff *msg;
u32 index, port;
+ bool opcnt;
int ret;
if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID])
@@ -2393,7 +2401,7 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
goto err_msg;
}
- ret = rdma_counter_get_mode(device, port, &mode, &mask);
+ ret = rdma_counter_get_mode(device, port, &mode, &mask, &opcnt);
if (ret)
goto err_msg;
@@ -2410,6 +2418,12 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
goto err_msg;
}
+ if ((mode == RDMA_COUNTER_MODE_AUTO) &&
+ nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED, opcnt)) {
+ ret = -EMSGSIZE;
+ goto err_msg;
+ }
+
nlmsg_end(msg, nlh);
ib_device_put(device);
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 9f97bef02149..0ed862b38b44 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -216,24 +216,12 @@ static ssize_t state_show(struct ib_device *ibdev, u32 port_num,
struct ib_port_attr attr;
ssize_t ret;
- static const char *state_name[] = {
- [IB_PORT_NOP] = "NOP",
- [IB_PORT_DOWN] = "DOWN",
- [IB_PORT_INIT] = "INIT",
- [IB_PORT_ARMED] = "ARMED",
- [IB_PORT_ACTIVE] = "ACTIVE",
- [IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER"
- };
-
ret = ib_query_port(ibdev, port_num, &attr);
if (ret)
return ret;
return sysfs_emit(buf, "%d: %s\n", attr.state,
- attr.state >= 0 &&
- attr.state < ARRAY_SIZE(state_name) ?
- state_name[attr.state] :
- "UNKNOWN");
+ ib_port_state_to_str(attr.state));
}
static ssize_t lid_show(struct ib_device *ibdev, u32 port_num,
@@ -988,6 +976,7 @@ int ib_setup_device_attrs(struct ib_device *ibdev)
for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++)
if (!ibdev->groups[i]) {
ibdev->groups[i] = &data->group;
+ ibdev->hw_stats_attr_index = i;
return 0;
}
WARN(true, "struct ib_device->groups is too small");
diff --git a/drivers/infiniband/core/ucaps.c b/drivers/infiniband/core/ucaps.c
new file mode 100644
index 000000000000..6853c6d078f9
--- /dev/null
+++ b/drivers/infiniband/core/ucaps.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/kref.h>
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <rdma/ib_ucaps.h>
+
+#define RDMA_UCAP_FIRST RDMA_UCAP_MLX5_CTRL_LOCAL
+
+static DEFINE_MUTEX(ucaps_mutex);
+static struct ib_ucap *ucaps_list[RDMA_UCAP_MAX];
+static bool ucaps_class_is_registered;
+static dev_t ucaps_base_dev;
+
+struct ib_ucap {
+ struct cdev cdev;
+ struct device dev;
+ struct kref ref;
+};
+
+static const char *ucap_names[RDMA_UCAP_MAX] = {
+ [RDMA_UCAP_MLX5_CTRL_LOCAL] = "mlx5_perm_ctrl_local",
+ [RDMA_UCAP_MLX5_CTRL_OTHER_VHCA] = "mlx5_perm_ctrl_other_vhca"
+};
+
+static char *ucaps_devnode(const struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0600;
+
+ return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
+}
+
+static const struct class ucaps_class = {
+ .name = "infiniband_ucaps",
+ .devnode = ucaps_devnode,
+};
+
+static const struct file_operations ucaps_cdev_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+};
+
+/**
+ * ib_cleanup_ucaps - cleanup all API resources and class.
+ *
+ * This is called once, when removing the ib_uverbs module.
+ */
+void ib_cleanup_ucaps(void)
+{
+ mutex_lock(&ucaps_mutex);
+ if (!ucaps_class_is_registered) {
+ mutex_unlock(&ucaps_mutex);
+ return;
+ }
+
+ for (int i = RDMA_UCAP_FIRST; i < RDMA_UCAP_MAX; i++)
+ WARN_ON(ucaps_list[i]);
+
+ class_unregister(&ucaps_class);
+ ucaps_class_is_registered = false;
+ unregister_chrdev_region(ucaps_base_dev, RDMA_UCAP_MAX);
+ mutex_unlock(&ucaps_mutex);
+}
+
+static int get_ucap_from_devt(dev_t devt, u64 *idx_mask)
+{
+ for (int type = RDMA_UCAP_FIRST; type < RDMA_UCAP_MAX; type++) {
+ if (ucaps_list[type] && ucaps_list[type]->dev.devt == devt) {
+ *idx_mask |= 1 << type;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int get_devt_from_fd(unsigned int fd, dev_t *ret_dev)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (!file)
+ return -EBADF;
+
+ *ret_dev = file_inode(file)->i_rdev;
+ fput(file);
+ return 0;
+}
+
+/**
+ * ib_ucaps_init - Initialization required before ucap creation.
+ *
+ * Return: 0 on success, or a negative errno value on failure
+ */
+static int ib_ucaps_init(void)
+{
+ int ret = 0;
+
+ if (ucaps_class_is_registered)
+ return ret;
+
+ ret = class_register(&ucaps_class);
+ if (ret)
+ return ret;
+
+ ret = alloc_chrdev_region(&ucaps_base_dev, 0, RDMA_UCAP_MAX,
+ ucaps_class.name);
+ if (ret < 0) {
+ class_unregister(&ucaps_class);
+ return ret;
+ }
+
+ ucaps_class_is_registered = true;
+
+ return 0;
+}
+
+static void ucap_dev_release(struct device *device)
+{
+ struct ib_ucap *ucap = container_of(device, struct ib_ucap, dev);
+
+ kfree(ucap);
+}
+
+/**
+ * ib_create_ucap - Add a ucap character device
+ * @type: UCAP type
+ *
+ * Creates a ucap character device in the /dev/infiniband directory. By default,
+ * the device has root-only read-write access.
+ *
+ * A driver may call this multiple times with the same UCAP type. A reference
+ * count tracks creations and deletions.
+ *
+ * Return: 0 on success, or a negative errno value on failure
+ */
+int ib_create_ucap(enum rdma_user_cap type)
+{
+ struct ib_ucap *ucap;
+ int ret;
+
+ if (type >= RDMA_UCAP_MAX)
+ return -EINVAL;
+
+ mutex_lock(&ucaps_mutex);
+ ret = ib_ucaps_init();
+ if (ret)
+ goto unlock;
+
+ ucap = ucaps_list[type];
+ if (ucap) {
+ kref_get(&ucap->ref);
+ mutex_unlock(&ucaps_mutex);
+ return 0;
+ }
+
+ ucap = kzalloc(sizeof(*ucap), GFP_KERNEL);
+ if (!ucap) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ device_initialize(&ucap->dev);
+ ucap->dev.class = &ucaps_class;
+ ucap->dev.devt = MKDEV(MAJOR(ucaps_base_dev), type);
+ ucap->dev.release = ucap_dev_release;
+ ret = dev_set_name(&ucap->dev, ucap_names[type]);
+ if (ret)
+ goto err_device;
+
+ cdev_init(&ucap->cdev, &ucaps_cdev_fops);
+ ucap->cdev.owner = THIS_MODULE;
+
+ ret = cdev_device_add(&ucap->cdev, &ucap->dev);
+ if (ret)
+ goto err_device;
+
+ kref_init(&ucap->ref);
+ ucaps_list[type] = ucap;
+ mutex_unlock(&ucaps_mutex);
+
+ return 0;
+
+err_device:
+ put_device(&ucap->dev);
+unlock:
+ mutex_unlock(&ucaps_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(ib_create_ucap);
+
+static void ib_release_ucap(struct kref *ref)
+{
+ struct ib_ucap *ucap = container_of(ref, struct ib_ucap, ref);
+ enum rdma_user_cap type;
+
+ for (type = RDMA_UCAP_FIRST; type < RDMA_UCAP_MAX; type++) {
+ if (ucaps_list[type] == ucap)
+ break;
+ }
+ WARN_ON(type == RDMA_UCAP_MAX);
+
+ ucaps_list[type] = NULL;
+ cdev_device_del(&ucap->cdev, &ucap->dev);
+ put_device(&ucap->dev);
+}
+
+/**
+ * ib_remove_ucap - Remove a ucap character device
+ * @type: User cap type
+ *
+ * Removes the ucap character device according to type. The device is completely
+ * removed from the filesystem when its reference count reaches 0.
+ */
+void ib_remove_ucap(enum rdma_user_cap type)
+{
+ struct ib_ucap *ucap;
+
+ mutex_lock(&ucaps_mutex);
+ ucap = ucaps_list[type];
+ if (WARN_ON(!ucap))
+ goto end;
+
+ kref_put(&ucap->ref, ib_release_ucap);
+end:
+ mutex_unlock(&ucaps_mutex);
+}
+EXPORT_SYMBOL(ib_remove_ucap);
+
+/**
+ * ib_get_ucaps - Get bitmask of ucap types from file descriptors
+ * @fds: Array of file descriptors
+ * @fd_count: Number of file descriptors in the array
+ * @idx_mask: Bitmask to be updated based on the ucaps in the fd list
+ *
+ * Given an array of file descriptors, this function returns a bitmask of
+ * the ucaps where a bit is set if an FD for that ucap type was in the array.
+ *
+ * Return: 0 on success, or a negative errno value on failure
+ */
+int ib_get_ucaps(int *fds, int fd_count, uint64_t *idx_mask)
+{
+ int ret = 0;
+ dev_t dev;
+
+ *idx_mask = 0;
+ mutex_lock(&ucaps_mutex);
+ for (int i = 0; i < fd_count; i++) {
+ ret = get_devt_from_fd(fds[i], &dev);
+ if (ret)
+ goto end;
+
+ ret = get_ucap_from_devt(dev, idx_mask);
+ if (ret)
+ goto end;
+ }
+
+end:
+ mutex_unlock(&ucaps_mutex);
+ return ret;
+}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 02f1666f3cba..6e700b974033 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -69,7 +69,9 @@ static struct ctl_table ucma_ctl_table[] = {
.data = &max_backlog,
.maxlen = sizeof max_backlog,
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_INT_MAX,
},
};
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 07c571c7b699..c5b686394760 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -80,9 +80,12 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
unsigned long pgsz_bitmap,
unsigned long virt)
{
- struct scatterlist *sg;
+ unsigned long curr_len = 0;
+ dma_addr_t curr_base = ~0;
unsigned long va, pgoff;
+ struct scatterlist *sg;
dma_addr_t mask;
+ dma_addr_t end;
int i;
umem->iova = va = virt;
@@ -107,17 +110,30 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
pgoff = umem->address & ~PAGE_MASK;
for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
- /* Walk SGL and reduce max page size if VA/PA bits differ
- * for any address.
+ /* If the current entry is physically contiguous with the previous
+ * one, no need to take its start addresses into consideration.
*/
- mask |= (sg_dma_address(sg) + pgoff) ^ va;
+ if (check_add_overflow(curr_base, curr_len, &end) ||
+ end != sg_dma_address(sg)) {
+
+ curr_base = sg_dma_address(sg);
+ curr_len = 0;
+
+ /* Reduce max page size if VA/PA bits differ */
+ mask |= (curr_base + pgoff) ^ va;
+
+ /* The alignment of any VA matching a discontinuity point
+ * in the physical memory sets the maximum possible page
+ * size as this must be a starting point of a new page that
+ * needs to be aligned.
+ */
+ if (i != 0)
+ mask |= va;
+ }
+
+ curr_len += sg_dma_len(sg);
va += sg_dma_len(sg) - pgoff;
- /* Except for the last entry, the ending iova alignment sets
- * the maximum possible page size as the low bits of the iova
- * must be zero when starting the next chunk.
- */
- if (i != (umem->sgt_append.sgt.nents - 1))
- mask |= va;
+
pgoff = 0;
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 5ad14c39d48c..3c3bb670c805 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -42,6 +42,7 @@
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_std_types.h>
+#include <rdma/ib_ucaps.h>
#include "rdma_core.h"
#include "uverbs.h"
@@ -232,6 +233,8 @@ int ib_init_ucontext(struct uverbs_attr_bundle *attrs)
{
struct ib_ucontext *ucontext = attrs->context;
struct ib_uverbs_file *file = attrs->ufile;
+ int *fd_array;
+ int fd_count;
int ret;
if (!down_read_trylock(&file->hw_destroy_rwsem))
@@ -247,6 +250,22 @@ int ib_init_ucontext(struct uverbs_attr_bundle *attrs)
if (ret)
goto err;
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_GET_CONTEXT_FD_ARR)) {
+ fd_count = uverbs_attr_ptr_get_array_size(attrs,
+ UVERBS_ATTR_GET_CONTEXT_FD_ARR,
+ sizeof(int));
+ if (fd_count < 0) {
+ ret = fd_count;
+ goto err_uncharge;
+ }
+
+ fd_array = uverbs_attr_get_alloced_ptr(attrs,
+ UVERBS_ATTR_GET_CONTEXT_FD_ARR);
+ ret = ib_get_ucaps(fd_array, fd_count, &ucontext->enabled_caps);
+ if (ret)
+ goto err_uncharge;
+ }
+
ret = ucontext->device->ops.alloc_ucontext(ucontext,
&attrs->driver_udata);
if (ret)
@@ -716,8 +735,8 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
goto err_free;
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
- if (!pd) {
- ret = -EINVAL;
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
goto err_free;
}
@@ -807,8 +826,8 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
if (cmd.flags & IB_MR_REREG_PD) {
new_pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
attrs);
- if (!new_pd) {
- ret = -EINVAL;
+ if (IS_ERR(new_pd)) {
+ ret = PTR_ERR(new_pd);
goto put_uobjs;
}
} else {
@@ -917,8 +936,8 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
return PTR_ERR(uobj);
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
- if (!pd) {
- ret = -EINVAL;
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
goto err_free;
}
@@ -1125,8 +1144,8 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
- if (!cq)
- return -EINVAL;
+ if (IS_ERR(cq))
+ return PTR_ERR(cq);
ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
if (ret)
@@ -1187,8 +1206,8 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
- if (!cq)
- return -EINVAL;
+ if (IS_ERR(cq))
+ return PTR_ERR(cq);
/* we copy a struct ib_uverbs_poll_cq_resp to user space */
header_ptr = attrs->ucore.outbuf;
@@ -1236,8 +1255,8 @@ static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
- if (!cq)
- return -EINVAL;
+ if (IS_ERR(cq))
+ return PTR_ERR(cq);
ib_req_notify_cq(cq, cmd.solicited_only ?
IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
@@ -1319,8 +1338,8 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
ind_tbl = uobj_get_obj_read(rwq_ind_table,
UVERBS_OBJECT_RWQ_IND_TBL,
cmd->rwq_ind_tbl_handle, attrs);
- if (!ind_tbl) {
- ret = -EINVAL;
+ if (IS_ERR(ind_tbl)) {
+ ret = PTR_ERR(ind_tbl);
goto err_put;
}
@@ -1358,8 +1377,10 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
if (cmd->is_srq) {
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
cmd->srq_handle, attrs);
- if (!srq || srq->srq_type == IB_SRQT_XRC) {
- ret = -EINVAL;
+ if (IS_ERR(srq) ||
+ srq->srq_type == IB_SRQT_XRC) {
+ ret = IS_ERR(srq) ? PTR_ERR(srq) :
+ -EINVAL;
goto err_put;
}
}
@@ -1369,23 +1390,29 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
rcq = uobj_get_obj_read(
cq, UVERBS_OBJECT_CQ,
cmd->recv_cq_handle, attrs);
- if (!rcq) {
- ret = -EINVAL;
+ if (IS_ERR(rcq)) {
+ ret = PTR_ERR(rcq);
goto err_put;
}
}
}
}
- if (has_sq)
+ if (has_sq) {
scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
cmd->send_cq_handle, attrs);
+ if (IS_ERR(scq)) {
+ ret = PTR_ERR(scq);
+ goto err_put;
+ }
+ }
+
if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI)
rcq = rcq ?: scq;
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
attrs);
- if (!pd || (!scq && has_sq)) {
- ret = -EINVAL;
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
goto err_put;
}
@@ -1480,18 +1507,18 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
err_put:
if (!IS_ERR(xrcd_uobj))
uobj_put_read(xrcd_uobj);
- if (pd)
+ if (!IS_ERR_OR_NULL(pd))
uobj_put_obj_read(pd);
- if (scq)
+ if (!IS_ERR_OR_NULL(scq))
rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
UVERBS_LOOKUP_READ);
- if (rcq && rcq != scq)
+ if (!IS_ERR_OR_NULL(rcq) && rcq != scq)
rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
UVERBS_LOOKUP_READ);
- if (srq)
+ if (!IS_ERR_OR_NULL(srq))
rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
UVERBS_LOOKUP_READ);
- if (ind_tbl)
+ if (!IS_ERR_OR_NULL(ind_tbl))
uobj_put_obj_read(ind_tbl);
uobj_alloc_abort(&obj->uevent.uobject, attrs);
@@ -1653,8 +1680,8 @@ static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
}
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp) {
- ret = -EINVAL;
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto out;
}
@@ -1759,8 +1786,8 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
attrs);
- if (!qp) {
- ret = -EINVAL;
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto out;
}
@@ -2026,8 +2053,8 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
return -ENOMEM;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp) {
- ret = -EINVAL;
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto out;
}
@@ -2064,9 +2091,9 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
user_wr->wr.ud.ah, attrs);
- if (!ud->ah) {
+ if (IS_ERR(ud->ah)) {
+ ret = PTR_ERR(ud->ah);
kfree(ud);
- ret = -EINVAL;
goto out_put;
}
ud->remote_qpn = user_wr->wr.ud.remote_qpn;
@@ -2303,8 +2330,8 @@ static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
return PTR_ERR(wr);
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp) {
- ret = -EINVAL;
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto out;
}
@@ -2354,8 +2381,8 @@ static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
return PTR_ERR(wr);
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
- if (!srq) {
- ret = -EINVAL;
+ if (IS_ERR(srq)) {
+ ret = PTR_ERR(srq);
goto out;
}
@@ -2411,8 +2438,8 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
}
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
- if (!pd) {
- ret = -EINVAL;
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
goto err;
}
@@ -2481,8 +2508,8 @@ static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
return ret;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp)
- return -EINVAL;
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
obj = qp->uobject;
@@ -2531,8 +2558,8 @@ static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
return ret;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp)
- return -EINVAL;
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
obj = qp->uobject;
mutex_lock(&obj->mcast_lock);
@@ -2666,8 +2693,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
UVERBS_OBJECT_FLOW_ACTION,
kern_spec->action.handle,
attrs);
- if (!ib_spec->action.act)
- return -EINVAL;
+ if (IS_ERR(ib_spec->action.act))
+ return PTR_ERR(ib_spec->action.act);
ib_spec->action.size =
sizeof(struct ib_flow_spec_action_handle);
flow_resources_add(uflow_res,
@@ -2684,8 +2711,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
UVERBS_OBJECT_COUNTERS,
kern_spec->flow_count.handle,
attrs);
- if (!ib_spec->flow_count.counters)
- return -EINVAL;
+ if (IS_ERR(ib_spec->flow_count.counters))
+ return PTR_ERR(ib_spec->flow_count.counters);
ib_spec->flow_count.size =
sizeof(struct ib_flow_spec_action_count);
flow_resources_add(uflow_res,
@@ -2903,14 +2930,14 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
return PTR_ERR(obj);
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
- if (!pd) {
- err = -EINVAL;
+ if (IS_ERR(pd)) {
+ err = PTR_ERR(pd);
goto err_uobj;
}
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
- if (!cq) {
- err = -EINVAL;
+ if (IS_ERR(cq)) {
+ err = PTR_ERR(cq);
goto err_put_pd;
}
@@ -3011,8 +3038,8 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
return -EINVAL;
wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
- if (!wq)
- return -EINVAL;
+ if (IS_ERR(wq))
+ return PTR_ERR(wq);
if (cmd.attr_mask & IB_WQ_FLAGS) {
wq_attr.flags = cmd.flags;
@@ -3095,8 +3122,8 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
num_read_wqs++) {
wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
wqs_handles[num_read_wqs], attrs);
- if (!wq) {
- err = -EINVAL;
+ if (IS_ERR(wq)) {
+ err = PTR_ERR(wq);
goto put_wqs;
}
@@ -3251,8 +3278,8 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
}
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp) {
- err = -EINVAL;
+ if (IS_ERR(qp)) {
+ err = PTR_ERR(qp);
goto err_uobj;
}
@@ -3398,15 +3425,15 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
if (ib_srq_has_cq(cmd->srq_type)) {
attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
cmd->cq_handle, attrs);
- if (!attr.ext.cq) {
- ret = -EINVAL;
+ if (IS_ERR(attr.ext.cq)) {
+ ret = PTR_ERR(attr.ext.cq);
goto err_put_xrcd;
}
}
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
- if (!pd) {
- ret = -EINVAL;
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
goto err_put_cq;
}
@@ -3513,8 +3540,8 @@ static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
return ret;
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
- if (!srq)
- return -EINVAL;
+ if (IS_ERR(srq))
+ return PTR_ERR(srq);
attr.max_wr = cmd.max_wr;
attr.srq_limit = cmd.srq_limit;
@@ -3541,8 +3568,8 @@ static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
return ret;
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
- if (!srq)
- return -EINVAL;
+ if (IS_ERR(srq))
+ return PTR_ERR(srq);
ret = ib_query_srq(srq, &attr);
@@ -3667,8 +3694,8 @@ static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
return -EOPNOTSUPP;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
- if (!cq)
- return -EINVAL;
+ if (IS_ERR(cq))
+ return PTR_ERR(cq);
ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 85cfc790a7bb..973fe2c7ef53 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -52,6 +52,7 @@
#include <rdma/ib.h>
#include <rdma/uverbs_std_types.h>
#include <rdma/rdma_netlink.h>
+#include <rdma/ib_ucaps.h>
#include "uverbs.h"
#include "core_priv.h"
@@ -1345,6 +1346,7 @@ static void __exit ib_uverbs_cleanup(void)
IB_UVERBS_NUM_FIXED_MINOR);
unregister_chrdev_region(dynamic_uverbs_dev,
IB_UVERBS_NUM_DYNAMIC_MINOR);
+ ib_cleanup_ucaps();
mmu_notifier_synchronize();
}
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index fb0555647336..c0fd283d9d6c 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -437,6 +437,10 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
UVERBS_ATTR_TYPE(u64), UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_GET_CONTEXT_FD_ARR,
+ UVERBS_ATTR_MIN_SIZE(sizeof(int)),
+ UA_OPTIONAL,
+ UA_ALLOC_AND_COPY),
UVERBS_ATTR_UHW());
DECLARE_UVERBS_NAMED_METHOD(
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 473ee0831307..c5e78bbefbd0 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -2105,7 +2105,7 @@ int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
if (!qp->uobject)
rdma_rw_cleanup_mrs(qp);
- rdma_counter_unbind_qp(qp, true);
+ rdma_counter_unbind_qp(qp, qp->port, true);
ret = qp->device->ops.destroy_qp(qp, udata);
if (ret) {
if (sec)
@@ -3109,22 +3109,23 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
bool __rdma_block_iter_next(struct ib_block_iter *biter)
{
unsigned int block_offset;
- unsigned int sg_delta;
+ unsigned int delta;
if (!biter->__sg_nents || !biter->__sg)
return false;
biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
- sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
+ delta = BIT_ULL(biter->__pg_bit) - block_offset;
- if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
- biter->__sg_advance += sg_delta;
- } else {
+ while (biter->__sg_nents && biter->__sg &&
+ sg_dma_len(biter->__sg) - biter->__sg_advance <= delta) {
+ delta -= sg_dma_len(biter->__sg) - biter->__sg_advance;
biter->__sg_advance = 0;
biter->__sg = sg_next(biter->__sg);
biter->__sg_nents--;
}
+ biter->__sg_advance += delta;
return true;
}
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 3721446c6ba4..6df5a2738c95 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -53,12 +53,6 @@
#define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
-#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
-#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
-#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
/* Number of MRs to reserve for PF, leaving remainder for VFs */
#define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024)
@@ -231,6 +225,8 @@ struct bnxt_re_dev {
unsigned long event_bitmap;
struct bnxt_qplib_cc_param cc_param;
struct workqueue_struct *dcb_wq;
+ struct dentry *cc_config;
+ struct bnxt_re_dbg_cc_config_params *cc_config_params;
};
#define to_bnxt_re_dev(ptr, member) \
@@ -243,6 +239,10 @@ struct bnxt_re_dev {
#define BNXT_RE_CHECK_RC(x) ((x) && ((x) != -ETIMEDOUT))
void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev);
+int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad);
+int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev,
+ struct ib_mad *out_mad);
+
static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
{
if (rdev)
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c
index 7c47039044ef..af91d16c3c77 100644
--- a/drivers/infiniband/hw/bnxt_re/debugfs.c
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.c
@@ -22,6 +22,23 @@
static struct dentry *bnxt_re_debugfs_root;
+static const char * const bnxt_re_cc_gen0_name[] = {
+ "enable_cc",
+ "run_avg_weight_g",
+ "num_phase_per_state",
+ "init_cr",
+ "init_tr",
+ "tos_ecn",
+ "tos_dscp",
+ "alt_vlan_pcp",
+ "alt_vlan_dscp",
+ "rtt",
+ "cc_mode",
+ "tcp_cp",
+ "tx_queue",
+ "inactivity_cp",
+};
+
static inline const char *bnxt_re_qp_state_str(u8 state)
{
switch (state) {
@@ -110,19 +127,215 @@ void bnxt_re_debug_rem_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
debugfs_remove(qp->dentry);
}
+static int map_cc_config_offset_gen0_ext0(u32 offset, struct bnxt_qplib_cc_param *ccparam, u32 *val)
+{
+ u64 map_offset;
+
+ map_offset = BIT(offset);
+
+ switch (map_offset) {
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC:
+ *val = ccparam->enable;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_G:
+ *val = ccparam->g;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_NUMPHASEPERSTATE:
+ *val = ccparam->nph_per_state;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_CR:
+ *val = ccparam->init_cr;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_TR:
+ *val = ccparam->init_tr;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN:
+ *val = ccparam->tos_ecn;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP:
+ *val = ccparam->tos_dscp;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP:
+ *val = ccparam->alt_vlan_pcp;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP:
+ *val = ccparam->alt_tos_dscp;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_RTT:
+ *val = ccparam->rtt;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE:
+ *val = ccparam->cc_mode;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP:
+ *val = ccparam->tcp_cp;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t bnxt_re_cc_config_get(struct file *filp, char __user *buffer,
+ size_t usr_buf_len, loff_t *ppos)
+{
+ struct bnxt_re_cc_param *dbg_cc_param = filp->private_data;
+ struct bnxt_re_dev *rdev = dbg_cc_param->rdev;
+ struct bnxt_qplib_cc_param ccparam = {};
+ u32 offset = dbg_cc_param->offset;
+ char buf[16];
+ u32 val;
+ int rc;
+
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &ccparam);
+ if (rc)
+ return rc;
+
+ rc = map_cc_config_offset_gen0_ext0(offset, &ccparam, &val);
+ if (rc)
+ return rc;
+
+ rc = snprintf(buf, sizeof(buf), "%d\n", val);
+ if (rc < 0)
+ return rc;
+
+ return simple_read_from_buffer(buffer, usr_buf_len, ppos, (u8 *)(buf), rc);
+}
+
+static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val)
+{
+ u32 modify_mask;
+
+ modify_mask = BIT(offset);
+
+ switch (modify_mask) {
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC:
+ ccparam->enable = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_G:
+ ccparam->g = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_NUMPHASEPERSTATE:
+ ccparam->nph_per_state = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_CR:
+ ccparam->init_cr = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_TR:
+ ccparam->init_tr = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN:
+ ccparam->tos_ecn = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP:
+ ccparam->tos_dscp = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP:
+ ccparam->alt_vlan_pcp = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP:
+ ccparam->alt_tos_dscp = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_RTT:
+ ccparam->rtt = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE:
+ ccparam->cc_mode = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP:
+ ccparam->tcp_cp = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TX_QUEUE:
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE:
+ ccparam->time_pph = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_PKTS_PER_PHASE:
+ ccparam->pkts_pph = val;
+ break;
+ }
+
+ ccparam->mask = modify_mask;
+}
+
+static int bnxt_re_configure_cc(struct bnxt_re_dev *rdev, u32 gen_ext, u32 offset, u32 val)
+{
+ struct bnxt_qplib_cc_param ccparam = { };
+
+ /* Supporting only Gen 0 now */
+ if (gen_ext == CC_CONFIG_GEN0_EXT0)
+ bnxt_re_fill_gen0_ext0(&ccparam, offset, val);
+ else
+ return -EINVAL;
+
+ bnxt_qplib_modify_cc(&rdev->qplib_res, &ccparam);
+ return 0;
+}
+
+static ssize_t bnxt_re_cc_config_set(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct bnxt_re_cc_param *dbg_cc_param = filp->private_data;
+ struct bnxt_re_dev *rdev = dbg_cc_param->rdev;
+ u32 offset = dbg_cc_param->offset;
+ u8 cc_gen = dbg_cc_param->cc_gen;
+ char buf[16];
+ u32 val;
+ int rc;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+ if (kstrtou32(buf, 0, &val))
+ return -EINVAL;
+
+ rc = bnxt_re_configure_cc(rdev, cc_gen, offset, val);
+ return rc ? rc : count;
+}
+
+static const struct file_operations bnxt_re_cc_config_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = bnxt_re_cc_config_get,
+ .write = bnxt_re_cc_config_set,
+};
+
void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev)
{
struct pci_dev *pdev = rdev->en_dev->pdev;
+ struct bnxt_re_dbg_cc_config_params *cc_params;
+ int i;
rdev->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), bnxt_re_debugfs_root);
rdev->qp_debugfs = debugfs_create_dir("QPs", rdev->dbg_root);
+ rdev->cc_config = debugfs_create_dir("cc_config", rdev->dbg_root);
+
+ rdev->cc_config_params = kzalloc(sizeof(*cc_params), GFP_KERNEL);
+
+ for (i = 0; i < BNXT_RE_CC_PARAM_GEN0; i++) {
+ struct bnxt_re_cc_param *tmp_params = &rdev->cc_config_params->gen0_parms[i];
+
+ tmp_params->rdev = rdev;
+ tmp_params->offset = i;
+ tmp_params->cc_gen = CC_CONFIG_GEN0_EXT0;
+ tmp_params->dentry = debugfs_create_file(bnxt_re_cc_gen0_name[i], 0400,
+ rdev->cc_config, tmp_params,
+ &bnxt_re_cc_config_ops);
+ }
}
void bnxt_re_debugfs_rem_pdev(struct bnxt_re_dev *rdev)
{
debugfs_remove_recursive(rdev->qp_debugfs);
-
+ debugfs_remove_recursive(rdev->cc_config);
+ kfree(rdev->cc_config_params);
debugfs_remove_recursive(rdev->dbg_root);
rdev->dbg_root = NULL;
}
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.h b/drivers/infiniband/hw/bnxt_re/debugfs.h
index cd3be0a9ec7e..8f101df4e838 100644
--- a/drivers/infiniband/hw/bnxt_re/debugfs.h
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.h
@@ -18,4 +18,19 @@ void bnxt_re_debugfs_rem_pdev(struct bnxt_re_dev *rdev);
void bnxt_re_register_debugfs(void);
void bnxt_re_unregister_debugfs(void);
+#define CC_CONFIG_GEN_EXT(x, y) (((x) << 16) | (y))
+#define CC_CONFIG_GEN0_EXT0 CC_CONFIG_GEN_EXT(0, 0)
+
+#define BNXT_RE_CC_PARAM_GEN0 14
+
+struct bnxt_re_cc_param {
+ struct bnxt_re_dev *rdev;
+ struct dentry *dentry;
+ u32 offset;
+ u8 cc_gen;
+};
+
+struct bnxt_re_dbg_cc_config_params {
+ struct bnxt_re_cc_param gen0_parms[BNXT_RE_CC_PARAM_GEN0];
+};
#endif
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index f039aefcaf67..44bb082e0a60 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -39,6 +39,8 @@
#include <linux/types.h>
#include <linux/pci.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_pma.h>
#include "roce_hsi.h"
#include "qplib_res.h"
@@ -285,6 +287,96 @@ static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev,
readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
}
+int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
+{
+ struct ib_pma_portcounters_ext *pma_cnt_ext;
+ struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
+ struct ctx_hw_stats *hw_stats = NULL;
+ int rc;
+
+ hw_stats = rdev->qplib_ctx.stats.dma;
+
+ pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
+ if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags)) {
+ u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
+
+ rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat);
+ if (rc)
+ return rc;
+ }
+
+ pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
+ if ((bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) && rdev->is_virtfn) ||
+ !bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
+ pma_cnt_ext->port_xmit_data =
+ cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_bytes) / 4);
+ pma_cnt_ext->port_rcv_data =
+ cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_bytes) / 4);
+ pma_cnt_ext->port_xmit_packets =
+ cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts));
+ pma_cnt_ext->port_rcv_packets =
+ cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts));
+ pma_cnt_ext->port_unicast_rcv_packets =
+ cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts));
+ pma_cnt_ext->port_unicast_xmit_packets =
+ cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts));
+
+ } else {
+ pma_cnt_ext->port_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts);
+ pma_cnt_ext->port_rcv_data = cpu_to_be64(estat->rx_roce_good_bytes / 4);
+ pma_cnt_ext->port_xmit_packets = cpu_to_be64(estat->tx_roce_pkts);
+ pma_cnt_ext->port_xmit_data = cpu_to_be64(estat->tx_roce_bytes / 4);
+ pma_cnt_ext->port_unicast_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts);
+ pma_cnt_ext->port_unicast_xmit_packets = cpu_to_be64(estat->tx_roce_pkts);
+ }
+ return 0;
+}
+
+int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
+{
+ struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
+ struct ib_pma_portcounters *pma_cnt;
+ struct ctx_hw_stats *hw_stats = NULL;
+ int rc;
+
+ hw_stats = rdev->qplib_ctx.stats.dma;
+
+ pma_cnt = (struct ib_pma_portcounters *)(out_mad->data + 40);
+ if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags)) {
+ u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
+
+ rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat);
+ if (rc)
+ return rc;
+ }
+ if ((bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) && rdev->is_virtfn) ||
+ !bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
+ pma_cnt->port_rcv_packets =
+ cpu_to_be32((u32)(le64_to_cpu(hw_stats->rx_ucast_pkts)) & 0xFFFFFFFF);
+ pma_cnt->port_rcv_data =
+ cpu_to_be32((u32)((le64_to_cpu(hw_stats->rx_ucast_bytes) &
+ 0xFFFFFFFF) / 4));
+ pma_cnt->port_xmit_packets =
+ cpu_to_be32((u32)(le64_to_cpu(hw_stats->tx_ucast_pkts)) & 0xFFFFFFFF);
+ pma_cnt->port_xmit_data =
+ cpu_to_be32((u32)((le64_to_cpu(hw_stats->tx_ucast_bytes)
+ & 0xFFFFFFFF) / 4));
+ } else {
+ pma_cnt->port_rcv_packets = cpu_to_be32(estat->rx_roce_good_pkts);
+ pma_cnt->port_rcv_data = cpu_to_be32((estat->rx_roce_good_bytes / 4));
+ pma_cnt->port_xmit_packets = cpu_to_be32(estat->tx_roce_pkts);
+ pma_cnt->port_xmit_data = cpu_to_be32((estat->tx_roce_bytes / 4));
+ }
+ pma_cnt->port_rcv_constraint_errors = (u8)(le64_to_cpu(hw_stats->rx_discard_pkts) & 0xFF);
+ pma_cnt->port_rcv_errors = cpu_to_be16((u16)(le64_to_cpu(hw_stats->rx_error_pkts)
+ & 0xFFFF));
+ pma_cnt->port_xmit_constraint_errors = (u8)(le64_to_cpu(hw_stats->tx_error_pkts) & 0xFF);
+ pma_cnt->port_xmit_discards = cpu_to_be16((u16)(le64_to_cpu(hw_stats->tx_discard_pkts)
+ & 0xFFFF));
+
+ return 0;
+}
+
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u32 port, int index)
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 6f5db32082dd..9082b3fd2b47 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -49,6 +49,7 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_cache.h>
+#include <rdma/ib_pma.h>
#include <rdma/uverbs_ioctl.h>
#include <linux/hashtable.h>
@@ -4491,6 +4492,41 @@ void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
kfree(bnxt_entry);
}
+int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct ib_class_port_info cpi = {};
+ int ret = IB_MAD_RESULT_SUCCESS;
+ int rc = 0;
+
+ if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
+ return ret;
+
+ switch (in_mad->mad_hdr.attr_id) {
+ case IB_PMA_CLASS_PORT_INFO:
+ cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
+ break;
+ case IB_PMA_PORT_COUNTERS_EXT:
+ rc = bnxt_re_assign_pma_port_ext_counters(rdev, out_mad);
+ break;
+ case IB_PMA_PORT_COUNTERS:
+ rc = bnxt_re_assign_pma_port_counters(rdev, out_mad);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ return IB_MAD_RESULT_FAILURE;
+ ret |= IB_MAD_RESULT_REPLY;
+ return ret;
+}
+
static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
{
struct bnxt_re_ucontext *uctx;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index fbb16a411d6a..22c9eb8e9cfc 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -268,6 +268,12 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+int bnxt_re_process_mad(struct ib_device *device, int process_mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
+
static inline u32 __to_ib_port_num(u16 port_id)
{
return (u32)port_id + 1;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index a94c8c5387d9..293b0a96c8e3 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1285,6 +1285,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.post_recv = bnxt_re_post_recv,
.post_send = bnxt_re_post_send,
.post_srq_recv = bnxt_re_post_srq_recv,
+ .process_mad = bnxt_re_process_mad,
.query_ah = bnxt_re_query_ah,
.query_device = bnxt_re_query_device,
.modify_device = bnxt_re_modify_device,
@@ -2130,8 +2131,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
* memory for the function and all child VFs
*/
rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
- &rdev->qplib_ctx,
- BNXT_RE_MAX_QPC_COUNT);
+ &rdev->qplib_ctx);
if (rc) {
ibdev_err(&rdev->ibdev,
"Failed to allocate RCFW Channel: %#x\n", rc);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 5336f74297f8..457eecb99f96 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1217,8 +1217,6 @@ static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
qp->path_mtu =
CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
}
- qp->modify_flags &=
- ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
/* Bono FW require the max_dest_rd_atomic to be >= 1 */
if (qp->max_dest_rd_atomic < 1)
qp->max_dest_rd_atomic = 1;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 17e62f22683b..d23074383428 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -915,7 +915,6 @@ skip_ctx_setup:
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
- kfree(rcfw->qp_tbl);
kfree(rcfw->crsqe_tbl);
bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq);
@@ -924,8 +923,7 @@ void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_ctx *ctx,
- int qp_tbl_sz)
+ struct bnxt_qplib_ctx *ctx)
{
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
@@ -969,12 +967,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
if (!rcfw->crsqe_tbl)
goto fail;
- /* Allocate one extra to hold the QP1 entries */
- rcfw->qp_tbl_size = qp_tbl_sz + 1;
- rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
- GFP_KERNEL);
- if (!rcfw->qp_tbl)
- goto fail;
spin_lock_init(&rcfw->tbl_lock);
rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 88814cb3aa74..ff873c5f1b25 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -262,8 +262,7 @@ static inline void bnxt_qplib_fill_cmdqmsg(struct bnxt_qplib_cmdqmsg *msg,
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_ctx *ctx,
- int qp_tbl_sz);
+ struct bnxt_qplib_ctx *ctx);
void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
@@ -285,9 +284,10 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn);
void bnxt_qplib_mark_qp_error(void *qp_handle);
+
static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw)
{
/* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/
- return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2;
+ return (qid == 1) ? rcfw->qp_tbl_size - 1 : (qid % (rcfw->qp_tbl_size - 2));
}
#endif /* __BNXT_QPLIB_RCFW_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 02922a0987ad..6cd05207ffed 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -871,6 +871,7 @@ int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
{
+ kfree(res->rcfw->qp_tbl);
bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
bnxt_qplib_free_pd_tbl(&res->pd_tbl);
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
@@ -878,12 +879,20 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev)
{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_dev_attr *dev_attr;
int rc;
res->netdev = netdev;
dev_attr = res->dattr;
+ /* Allocate one extra to hold the QP1 entries */
+ rcfw->qp_tbl_size = max_t(u32, BNXT_RE_MAX_QPC_COUNT + 1, dev_attr->max_qp);
+ rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
+ GFP_KERNEL);
+ if (!rcfw->qp_tbl)
+ return -ENOMEM;
+
rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
if (rc)
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 711990232de1..6a13927674b4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -49,6 +49,13 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
#define CHIP_NUM_58818 0xd818
#define CHIP_NUM_57608 0x1760
+#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
+#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
+#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
+
#define BNXT_QPLIB_DBR_VALID (0x1UL << 26)
#define BNXT_QPLIB_DBR_EPOCH_SHIFT 24
#define BNXT_QPLIB_DBR_TOGGLE_SHIFT 25
@@ -600,4 +607,9 @@ static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2)
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED;
}
+static inline bool _is_max_srq_ext_supported(u16 dev_cap_ext_flags_2)
+{
+ return !!(dev_cap_ext_flags_2 & CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED);
+}
+
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 4ccd4405355a..f231e886ad9d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -176,6 +176,9 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2);
+ if (_is_max_srq_ext_supported(attr->dev_cap_flags2))
+ attr->max_srq += le16_to_cpu(sb->max_srq_ext);
+
bnxt_qplib_query_version(rcfw, attr->fw_ver);
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 0ee60fdc18b3..7eceb3e9f4ce 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -2215,11 +2215,12 @@ struct creq_query_func_resp_sb {
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
+ #define CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED 0x40UL
#define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL
__le16 max_xp_qp_size;
__le16 create_qp_batch_size;
__le16 destroy_qp_batch_size;
- __le16 reserved16;
+ __le16 max_srq_ext;
__le64 reserved64;
};
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
index 1b23c698ec25..e0acc185e719 100644
--- a/drivers/infiniband/hw/erdma/erdma_cm.c
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -709,7 +709,6 @@ error:
erdma_cancel_mpatimer(new_cep);
erdma_cep_put(new_cep);
- new_cep->sock = NULL;
}
if (new_s) {
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index a442eca498b8..368b6be3226f 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -12882,22 +12882,6 @@ u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
}
}
-/* return the OPA port logical state name */
-const char *opa_lstate_name(u32 lstate)
-{
- static const char * const port_logical_names[] = {
- "PORT_NOP",
- "PORT_DOWN",
- "PORT_INIT",
- "PORT_ARMED",
- "PORT_ACTIVE",
- "PORT_ACTIVE_DEFER",
- };
- if (lstate < ARRAY_SIZE(port_logical_names))
- return port_logical_names[lstate];
- return "unknown";
-}
-
/* return the OPA port physical state name */
const char *opa_pstate_name(u32 pstate)
{
@@ -12956,8 +12940,6 @@ static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
break;
}
}
- dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
- opa_lstate_name(state), state);
}
/**
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 8841db16bde7..6992f6d40255 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -771,7 +771,6 @@ int is_bx(struct hfi1_devdata *dd);
bool is_urg_masked(struct hfi1_ctxtdata *rcd);
u32 read_physical_state(struct hfi1_devdata *dd);
u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate);
-const char *opa_lstate_name(u32 lstate);
const char *opa_pstate_name(u32 pstate);
u32 driver_pstate(struct hfi1_pportdata *ppd);
u32 driver_lstate(struct hfi1_pportdata *ppd);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 37a6794885d3..50826e7cdb7e 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -968,7 +968,7 @@ static bool __set_armed_to_active(struct hfi1_packet *packet)
if (hwstate != IB_PORT_ACTIVE) {
dd_dev_info(packet->rcd->dd,
"Unexpected link state %s\n",
- opa_lstate_name(hwstate));
+ ib_port_state_to_str(hwstate));
return false;
}
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index cbac4a442d9e..d6fbd9c2b8b4 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -635,12 +635,11 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
spin_lock_init(&ppd->cca_timer_lock);
for (i = 0; i < OPA_MAX_SLS; i++) {
- hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
ppd->cca_timer[i].ppd = ppd;
ppd->cca_timer[i].sl = i;
ppd->cca_timer[i].ccti = 0;
- ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
+ hrtimer_setup(&ppd->cca_timer[i].hrtimer, cca_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index a9883295f4af..b39f63ce6dfc 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1160,8 +1160,8 @@ static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
if (ret == HFI_TRANSITION_DISALLOWED ||
ret == HFI_TRANSITION_UNDEFINED) {
pr_warn("invalid logical state transition %s -> %s\n",
- opa_lstate_name(logical_old),
- opa_lstate_name(logical_new));
+ ib_port_state_to_str(logical_old),
+ ib_port_state_to_str(logical_new));
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index 52cce1c8b76a..3b7842a7f634 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -405,26 +405,6 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
}
/*
- * Perform a stand-alone single QSFP write. Acquire the resource, do the
- * write, then release the resource.
- */
-int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
- int len)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u32 resource = qsfp_resource(dd);
- int ret;
-
- ret = acquire_chip_resource(dd, resource, QSFP_WAIT);
- if (ret)
- return ret;
- ret = qsfp_write(ppd, target, addr, bp, len);
- release_chip_resource(dd, resource);
-
- return ret;
-}
-
-/*
* Access page n, offset m of QSFP memory as defined by SFF 8636
* by reading @addr = ((256 * n) + m)
*
diff --git a/drivers/infiniband/hw/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h
index df1389bad86b..5c59d53fcb63 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.h
+++ b/drivers/infiniband/hw/hfi1/qsfp.h
@@ -195,8 +195,6 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len);
int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len);
-int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
- int len);
int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len);
struct hfi1_asic_data;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 950c133d4220..6ee911f6885b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -175,8 +175,10 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
ida_destroy(&hr_dev->xrcd_ida.ida);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
ida_destroy(&hr_dev->srq_table.srq_ida.ida);
+ xa_destroy(&hr_dev->srq_table.xa);
+ }
hns_roce_cleanup_qp_table(hr_dev);
hns_roce_cleanup_cq_table(hr_dev);
ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 4106423a1b39..3a5c93c9fb3e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -537,5 +537,6 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
ida_destroy(&hr_dev->cq_table.bank[i].ida);
+ xa_destroy(&hr_dev->cq_table.array);
mutex_destroy(&hr_dev->cq_table.bank_mutex);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 605562122ecc..ca0798224e56 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -1361,6 +1361,11 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
return ret;
}
+/* This is the bottom bt pages number of a 100G MR on 4K OS, assuming
+ * the bt page size is not expanded by cal_best_bt_pg_sz()
+ */
+#define RESCHED_LOOP_CNT_THRESHOLD_ON_4K 12800
+
/* construct the base address table and link them by address hop config */
int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
@@ -1369,6 +1374,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
{
const struct hns_roce_buf_region *r;
int ofs, end;
+ int loop;
int unit;
int ret;
int i;
@@ -1386,7 +1392,10 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
continue;
end = r->offset + r->count;
- for (ofs = r->offset; ofs < end; ofs += unit) {
+ for (ofs = r->offset, loop = 1; ofs < end; ofs += unit, loop++) {
+ if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
+ cond_resched();
+
ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
hem_list->mid_bt[i],
&hem_list->btm_bt);
@@ -1443,9 +1452,14 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct list_head *head = &hem_list->btm_bt;
struct hns_roce_hem_item *hem, *temp_hem;
void *cpu_base = NULL;
+ int loop = 1;
int nr = 0;
list_for_each_entry_safe(hem, temp_hem, head, sibling) {
+ if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
+ cond_resched();
+ loop++;
+
if (hem_list_page_is_in_range(hem, offset)) {
nr = offset - hem->start;
cpu_base = hem->addr + nr * BA_BYTE_LEN;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index ae24c81c9812..cf89a8db4f64 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -183,7 +183,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
IB_DEVICE_RC_RNR_NAK_GEN;
props->max_send_sge = hr_dev->caps.max_sq_sg;
props->max_recv_sge = hr_dev->caps.max_rq_sg;
- props->max_sge_rd = 1;
+ props->max_sge_rd = hr_dev->caps.max_sq_sg;
props->max_cq = hr_dev->caps.num_cqs;
props->max_cqe = hr_dev->caps.max_cqes;
props->max_mr = hr_dev->caps.num_mtpts;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 55b9283bfc6f..09da3496843b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -998,7 +998,7 @@ static bool is_buf_attr_valid(struct hns_roce_dev *hr_dev,
if (attr->region_count > ARRAY_SIZE(attr->region) ||
attr->region_count < 1 || attr->page_shift < HNS_HW_PAGE_SHIFT) {
ibdev_err(ibdev,
- "invalid buf attr, region count %d, page shift %u.\n",
+ "invalid buf attr, region count %u, page shift %u.\n",
attr->region_count, attr->page_shift);
return false;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 9e2e76c59406..9f376a2232b0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -868,12 +868,14 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
struct hns_roce_ib_create_qp *ucmd,
struct hns_roce_ib_create_qp_resp *resp)
{
+ bool has_sdb = user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd);
struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
struct hns_roce_ucontext, ibucontext);
+ bool has_rdb = user_qp_has_rdb(hr_dev, init_attr, udata, resp);
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
- if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
+ if (has_sdb) {
ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
if (ret) {
ibdev_err(ibdev,
@@ -884,7 +886,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
}
- if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
+ if (has_rdb) {
ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
if (ret) {
ibdev_err(ibdev,
@@ -898,7 +900,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
return 0;
err_sdb:
- if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
+ if (has_sdb)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
err_out:
return ret;
@@ -1119,24 +1121,23 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ibucontext);
hr_qp->config = uctx->config;
ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
- if (ret)
+ if (ret) {
ibdev_err(ibdev,
"failed to set user SQ size, ret = %d.\n",
ret);
+ return ret;
+ }
ret = set_congest_param(hr_dev, hr_qp, ucmd);
- if (ret)
- return ret;
} else {
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
+ default_congest_type(hr_dev, hr_qp);
ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
if (ret)
ibdev_err(ibdev,
"failed to set kernel SQ size, ret = %d.\n",
ret);
-
- default_congest_type(hr_dev, hr_qp);
}
return ret;
@@ -1219,7 +1220,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
min(udata->outlen, sizeof(resp)));
if (ret) {
ibdev_err(ibdev, "copy qp resp failed!\n");
- goto err_store;
+ goto err_flow_ctrl;
}
}
@@ -1319,7 +1320,7 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
ret = hns_roce_create_qp_common(hr_dev, init_attr, udata, hr_qp);
if (ret)
- ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n",
+ ibdev_err(ibdev, "create QP type %d failed(%d)\n",
init_attr->qp_type, ret);
err_out:
@@ -1602,6 +1603,7 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
ida_destroy(&hr_dev->qp_table.bank[i].ida);
xa_destroy(&hr_dev->qp_table.dip_xa);
+ xa_destroy(&hr_dev->qp_table_xa);
mutex_destroy(&hr_dev->qp_table.bank_mutex);
mutex_destroy(&hr_dev->qp_table.scc_mutex);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 70c06ef65603..1090051f493b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -51,7 +51,7 @@ static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
break;
default:
dev_err(hr_dev->dev,
- "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
+ "hns_roce:Unexpected event type %d on SRQ %06lx\n",
event_type, srq->srqn);
return;
}
diff --git a/drivers/infiniband/hw/irdma/Kconfig b/drivers/infiniband/hw/irdma/Kconfig
index b6f9c41bca51..5f49a58590ed 100644
--- a/drivers/infiniband/hw/irdma/Kconfig
+++ b/drivers/infiniband/hw/irdma/Kconfig
@@ -7,6 +7,7 @@ config INFINIBAND_IRDMA
depends on ICE && I40E
select GENERIC_ALLOCATOR
select AUXILIARY_BUS
+ select CRC32
help
This is an Intel(R) Ethernet Protocol Driver for RDMA driver
that support E810 (iWARP/RoCE) and X722 (iWARP) network devices.
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index ad50b77282f8..69ce1862eabe 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -498,8 +498,6 @@ static int irdma_save_msix_info(struct irdma_pci_f *rf)
iw_qvlist->num_vectors = rf->msix_count;
if (rf->msix_count <= num_online_cpus())
rf->msix_shared = true;
- else if (rf->msix_count > num_online_cpus() + 1)
- rf->msix_count = num_online_cpus() + 1;
pmsix = rf->msix_entries;
for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index 3f13200ff71b..1ee8969595d3 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -206,6 +206,43 @@ static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
}
+static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
+{
+ int i;
+
+ rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX;
+ rf->msix_entries = kcalloc(rf->msix_count, sizeof(*rf->msix_entries),
+ GFP_KERNEL);
+ if (!rf->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < rf->msix_count; i++)
+ if (ice_alloc_rdma_qvector(pf, &rf->msix_entries[i]))
+ break;
+
+ if (i < IRDMA_MIN_MSIX) {
+ for (; i > 0; i--)
+ ice_free_rdma_qvector(pf, &rf->msix_entries[i]);
+
+ kfree(rf->msix_entries);
+ return -ENOMEM;
+ }
+
+ rf->msix_count = i;
+
+ return 0;
+}
+
+static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < rf->msix_count; i++)
+ ice_free_rdma_qvector(pf, &rf->msix_entries[i]);
+
+ kfree(rf->msix_entries);
+}
+
static void irdma_remove(struct auxiliary_device *aux_dev)
{
struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
@@ -216,6 +253,7 @@ static void irdma_remove(struct auxiliary_device *aux_dev)
irdma_ib_unregister_device(iwdev);
ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false);
+ irdma_deinit_interrupts(iwdev->rf, pf);
pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
}
@@ -230,9 +268,7 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf
rf->gen_ops.unregister_qset = irdma_lan_unregister_qset;
rf->hw.hw_addr = pf->hw.hw_addr;
rf->pcidev = pf->pdev;
- rf->msix_count = pf->num_rdma_msix;
rf->pf_id = pf->hw.pf_id;
- rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector];
rf->default_vsi.vsi_idx = vsi->vsi_num;
rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ?
IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
@@ -281,6 +317,10 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
irdma_fill_device_info(iwdev, pf, vsi);
rf = iwdev->rf;
+ err = irdma_init_interrupts(rf, pf);
+ if (err)
+ goto err_init_interrupts;
+
err = irdma_ctrl_init_hw(rf);
if (err)
goto err_ctrl_init;
@@ -311,6 +351,8 @@ err_ibreg:
err_rt_init:
irdma_ctrl_deinit_hw(rf);
err_ctrl_init:
+ irdma_deinit_interrupts(rf, pf);
+err_init_interrupts:
kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index 9f0ed6e84471..bb0b6494ccb2 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -30,7 +30,6 @@
#endif
#include <linux/auxiliary_bus.h>
#include <linux/net/intel/iidc.h>
-#include <crypto/hash.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
@@ -117,6 +116,9 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
#define IRDMA_IRQ_NAME_STR_LEN (64)
+#define IRDMA_NUM_AEQ_MSIX 1
+#define IRDMA_MIN_MSIX 2
+
enum init_completion_state {
INVALID_STATE = 0,
INITIAL_STATE,
diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h
index ddf02a462efa..4b4f78288d12 100644
--- a/drivers/infiniband/hw/irdma/osdep.h
+++ b/drivers/infiniband/hw/irdma/osdep.h
@@ -6,7 +6,6 @@
#include <linux/pci.h>
#include <linux/bitfield.h>
#include <linux/net/intel/iidc.h>
-#include <crypto/hash.h>
#include <rdma/ib_verbs.h>
#define STATS_TIMER_DELAY 60000
@@ -43,15 +42,12 @@ enum irdma_status_code irdma_vf_wait_vchnl_resp(struct irdma_sc_dev *dev);
bool irdma_vf_clear_to_send(struct irdma_sc_dev *dev);
void irdma_add_dev_ref(struct irdma_sc_dev *dev);
void irdma_put_dev_ref(struct irdma_sc_dev *dev);
-int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len,
- u32 val);
+int irdma_ieq_check_mpacrc(const void *addr, u32 len, u32 val);
struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
struct irdma_puda_buf *buf);
void irdma_send_ieq_ack(struct irdma_sc_qp *qp);
void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
u32 seqnum);
-void irdma_free_hash_desc(struct shash_desc *hash_desc);
-int irdma_init_hash_desc(struct shash_desc **hash_desc);
int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
struct irdma_puda_buf *buf);
int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c
index 7e3f9bca2c23..694e5a9ed15d 100644
--- a/drivers/infiniband/hw/irdma/puda.c
+++ b/drivers/infiniband/hw/irdma/puda.c
@@ -923,8 +923,6 @@ void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
switch (rsrc->cmpl) {
case PUDA_HASH_CRC_COMPLETE:
- irdma_free_hash_desc(rsrc->hash_desc);
- fallthrough;
case PUDA_QP_CREATED:
irdma_qp_rem_qos(&rsrc->qp);
@@ -1095,15 +1093,12 @@ int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
goto error;
if (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) {
- if (!irdma_init_hash_desc(&rsrc->hash_desc)) {
- rsrc->check_crc = true;
- rsrc->cmpl = PUDA_HASH_CRC_COMPLETE;
- ret = 0;
- }
+ rsrc->check_crc = true;
+ rsrc->cmpl = PUDA_HASH_CRC_COMPLETE;
}
irdma_sc_ccq_arm(&rsrc->cq);
- return ret;
+ return 0;
error:
irdma_puda_dele_rsrc(vsi, info->type, false);
@@ -1396,8 +1391,8 @@ static int irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq,
crcptr = txbuf->data + fpdu_len - 4;
mpacrc = *(u32 *)crcptr;
if (ieq->check_crc) {
- status = irdma_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
- (fpdu_len - 4), mpacrc);
+ status = irdma_ieq_check_mpacrc(txbuf->data, fpdu_len - 4,
+ mpacrc);
if (status) {
ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error bad crc\n");
goto error;
@@ -1465,8 +1460,8 @@ static int irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
crcptr = datap + fpdu_len - 4;
mpacrc = *(u32 *)crcptr;
if (ieq->check_crc)
- ret = irdma_ieq_check_mpacrc(ieq->hash_desc, datap,
- fpdu_len - 4, mpacrc);
+ ret = irdma_ieq_check_mpacrc(datap, fpdu_len - 4,
+ mpacrc);
if (ret) {
list_add(&buf->list, rxlist);
ibdev_dbg(to_ibdev(ieq->dev),
diff --git a/drivers/infiniband/hw/irdma/puda.h b/drivers/infiniband/hw/irdma/puda.h
index bc6d9514c9c1..2fc638f2b143 100644
--- a/drivers/infiniband/hw/irdma/puda.h
+++ b/drivers/infiniband/hw/irdma/puda.h
@@ -119,7 +119,6 @@ struct irdma_puda_rsrc {
u32 rx_wqe_idx;
u32 rxq_invalid_cnt;
u32 tx_wqe_avail_cnt;
- struct shash_desc *hash_desc;
struct list_head txpend;
struct list_head bufpool; /* free buffers pool list for recv and xmit */
u32 alloc_buf_count;
@@ -163,10 +162,8 @@ struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
struct irdma_puda_buf *buf);
int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
struct irdma_puda_buf *buf);
-int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len, u32 val);
-int irdma_init_hash_desc(struct shash_desc **desc);
+int irdma_ieq_check_mpacrc(const void *addr, u32 len, u32 val);
void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
-void irdma_free_hash_desc(struct shash_desc *desc);
void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, u32 seqnum);
int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 0e594122baa7..e73b14fd95ef 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -1274,57 +1274,14 @@ void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
}
/**
- * irdma_init_hash_desc - initialize hash for crc calculation
- * @desc: cryption type
- */
-int irdma_init_hash_desc(struct shash_desc **desc)
-{
- struct crypto_shash *tfm;
- struct shash_desc *tdesc;
-
- tfm = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(tfm))
- return -EINVAL;
-
- tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
- GFP_KERNEL);
- if (!tdesc) {
- crypto_free_shash(tfm);
- return -EINVAL;
- }
-
- tdesc->tfm = tfm;
- *desc = tdesc;
-
- return 0;
-}
-
-/**
- * irdma_free_hash_desc - free hash desc
- * @desc: to be freed
- */
-void irdma_free_hash_desc(struct shash_desc *desc)
-{
- if (desc) {
- crypto_free_shash(desc->tfm);
- kfree(desc);
- }
-}
-
-/**
* irdma_ieq_check_mpacrc - check if mpa crc is OK
- * @desc: desc for hash
* @addr: address of buffer for crc
* @len: length of buffer
* @val: value to be compared
*/
-int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len,
- u32 val)
+int irdma_ieq_check_mpacrc(const void *addr, u32 len, u32 val)
{
- u32 crc = 0;
-
- crypto_shash_digest(desc, addr, len, (u8 *)&crc);
- if (crc != val)
+ if ((__force u32)cpu_to_le32(~crc32c(~0, addr, len)) != val)
return -EINVAL;
return 0;
diff --git a/drivers/infiniband/hw/mana/Makefile b/drivers/infiniband/hw/mana/Makefile
index 88655fe5e398..921c05e08b11 100644
--- a/drivers/infiniband/hw/mana/Makefile
+++ b/drivers/infiniband/hw/mana/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MANA_INFINIBAND) += mana_ib.o
-mana_ib-y := device.o main.o wq.o qp.o cq.o mr.o
+mana_ib-y := device.o main.o wq.o qp.o cq.o mr.o ah.o wr.o counters.o
diff --git a/drivers/infiniband/hw/mana/ah.c b/drivers/infiniband/hw/mana/ah.c
new file mode 100644
index 000000000000..f56952eebbaa
--- /dev/null
+++ b/drivers/infiniband/hw/mana/ah.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Microsoft Corporation. All rights reserved.
+ */
+
+#include "mana_ib.h"
+
+int mana_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct mana_ib_dev *mdev = container_of(ibah->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_ah *ah = container_of(ibah, struct mana_ib_ah, ibah);
+ struct rdma_ah_attr *ah_attr = attr->ah_attr;
+ const struct ib_global_route *grh;
+ enum rdma_network_type ntype;
+
+ if (ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE ||
+ !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
+ return -EINVAL;
+
+ if (udata)
+ return -EINVAL;
+
+ ah->av = dma_pool_zalloc(mdev->av_pool, GFP_ATOMIC, &ah->dma_handle);
+ if (!ah->av)
+ return -ENOMEM;
+
+ grh = rdma_ah_read_grh(ah_attr);
+ ntype = rdma_gid_attr_network_type(grh->sgid_attr);
+
+ copy_in_reverse(ah->av->dest_mac, ah_attr->roce.dmac, ETH_ALEN);
+ ah->av->udp_src_port = rdma_flow_label_to_udp_sport(grh->flow_label);
+ ah->av->hop_limit = grh->hop_limit;
+ ah->av->dscp = (grh->traffic_class >> 2) & 0x3f;
+ ah->av->is_ipv6 = (ntype == RDMA_NETWORK_IPV6);
+
+ if (ah->av->is_ipv6) {
+ copy_in_reverse(ah->av->dest_ip, grh->dgid.raw, 16);
+ copy_in_reverse(ah->av->src_ip, grh->sgid_attr->gid.raw, 16);
+ } else {
+ ah->av->dest_ip[10] = 0xFF;
+ ah->av->dest_ip[11] = 0xFF;
+ copy_in_reverse(&ah->av->dest_ip[12], &grh->dgid.raw[12], 4);
+ copy_in_reverse(&ah->av->src_ip[12], &grh->sgid_attr->gid.raw[12], 4);
+ }
+
+ return 0;
+}
+
+int mana_ib_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct mana_ib_dev *mdev = container_of(ibah->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_ah *ah = container_of(ibah, struct mana_ib_ah, ibah);
+
+ dma_pool_free(mdev->av_pool, ah->av, ah->dma_handle);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/counters.c b/drivers/infiniband/hw/mana/counters.c
new file mode 100644
index 000000000000..e533ce21013d
--- /dev/null
+++ b/drivers/infiniband/hw/mana/counters.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Microsoft Corporation. All rights reserved.
+ */
+
+#include "counters.h"
+
+static const struct rdma_stat_desc mana_ib_port_stats_desc[] = {
+ [MANA_IB_REQUESTER_TIMEOUT].name = "requester_timeout",
+ [MANA_IB_REQUESTER_OOS_NAK].name = "requester_oos_nak",
+ [MANA_IB_REQUESTER_RNR_NAK].name = "requester_rnr_nak",
+ [MANA_IB_RESPONDER_RNR_NAK].name = "responder_rnr_nak",
+ [MANA_IB_RESPONDER_OOS].name = "responder_oos",
+ [MANA_IB_RESPONDER_DUP_REQUEST].name = "responder_dup_request",
+ [MANA_IB_REQUESTER_IMPLICIT_NAK].name = "requester_implicit_nak",
+ [MANA_IB_REQUESTER_READRESP_PSN_MISMATCH].name = "requester_readresp_psn_mismatch",
+ [MANA_IB_NAK_INV_REQ].name = "nak_inv_req",
+ [MANA_IB_NAK_ACCESS_ERR].name = "nak_access_error",
+ [MANA_IB_NAK_OPP_ERR].name = "nak_opp_error",
+ [MANA_IB_NAK_INV_READ].name = "nak_inv_read",
+ [MANA_IB_RESPONDER_LOCAL_LEN_ERR].name = "responder_local_len_error",
+ [MANA_IB_REQUESTOR_LOCAL_PROT_ERR].name = "requestor_local_prot_error",
+ [MANA_IB_RESPONDER_REM_ACCESS_ERR].name = "responder_rem_access_error",
+ [MANA_IB_RESPONDER_LOCAL_QP_ERR].name = "responder_local_qp_error",
+ [MANA_IB_RESPONDER_MALFORMED_WQE].name = "responder_malformed_wqe",
+ [MANA_IB_GENERAL_HW_ERR].name = "general_hw_error",
+ [MANA_IB_REQUESTER_RNR_NAK_RETRIES_EXCEEDED].name = "requester_rnr_nak_retries_exceeded",
+ [MANA_IB_REQUESTER_RETRIES_EXCEEDED].name = "requester_retries_exceeded",
+ [MANA_IB_TOTAL_FATAL_ERR].name = "total_fatal_error",
+ [MANA_IB_RECEIVED_CNPS].name = "received_cnps",
+ [MANA_IB_NUM_QPS_CONGESTED].name = "num_qps_congested",
+ [MANA_IB_RATE_INC_EVENTS].name = "rate_inc_events",
+ [MANA_IB_NUM_QPS_RECOVERED].name = "num_qps_recovered",
+ [MANA_IB_CURRENT_RATE].name = "current_rate",
+};
+
+struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num)
+{
+ return rdma_alloc_hw_stats_struct(mana_ib_port_stats_desc,
+ ARRAY_SIZE(mana_ib_port_stats_desc),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
+ ib_dev);
+ struct mana_rnic_query_vf_cntrs_resp resp = {};
+ struct mana_rnic_query_vf_cntrs_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_VF_COUNTERS,
+ sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+
+ err = mana_gd_send_request(mdev_to_gc(mdev), sizeof(req), &req,
+ sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to query vf counters err %d",
+ err);
+ return err;
+ }
+
+ stats->value[MANA_IB_REQUESTER_TIMEOUT] = resp.requester_timeout;
+ stats->value[MANA_IB_REQUESTER_OOS_NAK] = resp.requester_oos_nak;
+ stats->value[MANA_IB_REQUESTER_RNR_NAK] = resp.requester_rnr_nak;
+ stats->value[MANA_IB_RESPONDER_RNR_NAK] = resp.responder_rnr_nak;
+ stats->value[MANA_IB_RESPONDER_OOS] = resp.responder_oos;
+ stats->value[MANA_IB_RESPONDER_DUP_REQUEST] = resp.responder_dup_request;
+ stats->value[MANA_IB_REQUESTER_IMPLICIT_NAK] =
+ resp.requester_implicit_nak;
+ stats->value[MANA_IB_REQUESTER_READRESP_PSN_MISMATCH] =
+ resp.requester_readresp_psn_mismatch;
+ stats->value[MANA_IB_NAK_INV_REQ] = resp.nak_inv_req;
+ stats->value[MANA_IB_NAK_ACCESS_ERR] = resp.nak_access_err;
+ stats->value[MANA_IB_NAK_OPP_ERR] = resp.nak_opp_err;
+ stats->value[MANA_IB_NAK_INV_READ] = resp.nak_inv_read;
+ stats->value[MANA_IB_RESPONDER_LOCAL_LEN_ERR] =
+ resp.responder_local_len_err;
+ stats->value[MANA_IB_REQUESTOR_LOCAL_PROT_ERR] =
+ resp.requestor_local_prot_err;
+ stats->value[MANA_IB_RESPONDER_REM_ACCESS_ERR] =
+ resp.responder_rem_access_err;
+ stats->value[MANA_IB_RESPONDER_LOCAL_QP_ERR] =
+ resp.responder_local_qp_err;
+ stats->value[MANA_IB_RESPONDER_MALFORMED_WQE] =
+ resp.responder_malformed_wqe;
+ stats->value[MANA_IB_GENERAL_HW_ERR] = resp.general_hw_err;
+ stats->value[MANA_IB_REQUESTER_RNR_NAK_RETRIES_EXCEEDED] =
+ resp.requester_rnr_nak_retries_exceeded;
+ stats->value[MANA_IB_REQUESTER_RETRIES_EXCEEDED] =
+ resp.requester_retries_exceeded;
+ stats->value[MANA_IB_TOTAL_FATAL_ERR] = resp.total_fatal_err;
+
+ stats->value[MANA_IB_RECEIVED_CNPS] = resp.received_cnps;
+ stats->value[MANA_IB_NUM_QPS_CONGESTED] = resp.num_qps_congested;
+ stats->value[MANA_IB_RATE_INC_EVENTS] = resp.rate_inc_events;
+ stats->value[MANA_IB_NUM_QPS_RECOVERED] = resp.num_qps_recovered;
+ stats->value[MANA_IB_CURRENT_RATE] = resp.current_rate;
+
+ return ARRAY_SIZE(mana_ib_port_stats_desc);
+}
diff --git a/drivers/infiniband/hw/mana/counters.h b/drivers/infiniband/hw/mana/counters.h
new file mode 100644
index 000000000000..7ff92d27f6c3
--- /dev/null
+++ b/drivers/infiniband/hw/mana/counters.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Microsoft Corporation. All rights reserved.
+ */
+
+#ifndef _COUNTERS_H_
+#define _COUNTERS_H_
+
+#include "mana_ib.h"
+
+enum mana_ib_port_counters {
+ MANA_IB_REQUESTER_TIMEOUT,
+ MANA_IB_REQUESTER_OOS_NAK,
+ MANA_IB_REQUESTER_RNR_NAK,
+ MANA_IB_RESPONDER_RNR_NAK,
+ MANA_IB_RESPONDER_OOS,
+ MANA_IB_RESPONDER_DUP_REQUEST,
+ MANA_IB_REQUESTER_IMPLICIT_NAK,
+ MANA_IB_REQUESTER_READRESP_PSN_MISMATCH,
+ MANA_IB_NAK_INV_REQ,
+ MANA_IB_NAK_ACCESS_ERR,
+ MANA_IB_NAK_OPP_ERR,
+ MANA_IB_NAK_INV_READ,
+ MANA_IB_RESPONDER_LOCAL_LEN_ERR,
+ MANA_IB_REQUESTOR_LOCAL_PROT_ERR,
+ MANA_IB_RESPONDER_REM_ACCESS_ERR,
+ MANA_IB_RESPONDER_LOCAL_QP_ERR,
+ MANA_IB_RESPONDER_MALFORMED_WQE,
+ MANA_IB_GENERAL_HW_ERR,
+ MANA_IB_REQUESTER_RNR_NAK_RETRIES_EXCEEDED,
+ MANA_IB_REQUESTER_RETRIES_EXCEEDED,
+ MANA_IB_TOTAL_FATAL_ERR,
+ MANA_IB_RECEIVED_CNPS,
+ MANA_IB_NUM_QPS_CONGESTED,
+ MANA_IB_RATE_INC_EVENTS,
+ MANA_IB_NUM_QPS_RECOVERED,
+ MANA_IB_CURRENT_RATE,
+};
+
+struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num);
+int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num, int index);
+#endif /* _COUNTERS_H_ */
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index f04a679d2871..0fc4e2679218 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -15,42 +15,58 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev;
+ struct gdma_context *gc;
bool is_rnic_cq;
u32 doorbell;
+ u32 buf_size;
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ gc = mdev_to_gc(mdev);
cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
cq->cq_handle = INVALID_MANA_HANDLE;
- if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
- return -EINVAL;
+ if (udata) {
+ if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
+ return -EINVAL;
- err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
- if (err) {
- ibdev_dbg(ibdev,
- "Failed to copy from udata for create cq, %d\n", err);
- return err;
- }
+ err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to copy from udata for create cq, %d\n", err);
+ return err;
+ }
- is_rnic_cq = !!(ucmd.flags & MANA_IB_CREATE_RNIC_CQ);
+ is_rnic_cq = !!(ucmd.flags & MANA_IB_CREATE_RNIC_CQ);
- if (!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) {
- ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
- return -EINVAL;
- }
+ if ((!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) ||
+ attr->cqe > U32_MAX / COMP_ENTRY_SIZE) {
+ ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
+ return -EINVAL;
+ }
- cq->cqe = attr->cqe;
- err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
- if (err) {
- ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
- return err;
- }
+ cq->cqe = attr->cqe;
+ err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
+ &cq->queue);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
+ return err;
+ }
- mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
- ibucontext);
- doorbell = mana_ucontext->doorbell;
+ mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
+ ibucontext);
+ doorbell = mana_ucontext->doorbell;
+ } else {
+ is_rnic_cq = true;
+ buf_size = MANA_PAGE_ALIGN(roundup_pow_of_two(attr->cqe * COMP_ENTRY_SIZE));
+ cq->cqe = buf_size / COMP_ENTRY_SIZE;
+ err = mana_ib_create_kernel_queue(mdev, buf_size, GDMA_CQ, &cq->queue);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to create kernel queue for create cq, %d\n", err);
+ return err;
+ }
+ doorbell = gc->mana_ib.doorbell;
+ }
if (is_rnic_cq) {
err = mana_ib_gd_create_cq(mdev, cq, doorbell);
@@ -66,13 +82,19 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
}
- resp.cqid = cq->queue.id;
- err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
- if (err) {
- ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
- goto err_remove_cq_cb;
+ if (udata) {
+ resp.cqid = cq->queue.id;
+ err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
+ goto err_remove_cq_cb;
+ }
}
+ spin_lock_init(&cq->cq_lock);
+ INIT_LIST_HEAD(&cq->list_send_qp);
+ INIT_LIST_HEAD(&cq->list_recv_qp);
+
return 0;
err_remove_cq_cb:
@@ -122,7 +144,10 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
return -EINVAL;
/* Create CQ table entry */
WARN_ON(gc->cq_table[cq->queue.id]);
- gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
+ if (cq->queue.kmem)
+ gdma_cq = cq->queue.kmem;
+ else
+ gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
if (!gdma_cq)
return -ENOMEM;
@@ -141,6 +166,153 @@ void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
if (cq->queue.id >= gc->max_num_cqs || cq->queue.id == INVALID_QUEUE_ID)
return;
+ if (cq->queue.kmem)
+ /* Then it will be cleaned and removed by the mana */
+ return;
+
kfree(gc->cq_table[cq->queue.id]);
gc->cq_table[cq->queue.id] = NULL;
}
+
+int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
+ struct gdma_queue *gdma_cq = cq->queue.kmem;
+
+ if (!gdma_cq)
+ return -EINVAL;
+
+ mana_gd_ring_cq(gdma_cq, SET_ARM_BIT);
+ return 0;
+}
+
+static inline void handle_ud_sq_cqe(struct mana_ib_qp *qp, struct gdma_comp *cqe)
+{
+ struct mana_rdma_cqe *rdma_cqe = (struct mana_rdma_cqe *)cqe->cqe_data;
+ struct gdma_queue *wq = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].kmem;
+ struct ud_sq_shadow_wqe *shadow_wqe;
+
+ shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_sq);
+ if (!shadow_wqe)
+ return;
+
+ shadow_wqe->header.error_code = rdma_cqe->ud_send.vendor_error;
+
+ wq->tail += shadow_wqe->header.posted_wqe_size;
+ shadow_queue_advance_next_to_complete(&qp->shadow_sq);
+}
+
+static inline void handle_ud_rq_cqe(struct mana_ib_qp *qp, struct gdma_comp *cqe)
+{
+ struct mana_rdma_cqe *rdma_cqe = (struct mana_rdma_cqe *)cqe->cqe_data;
+ struct gdma_queue *wq = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].kmem;
+ struct ud_rq_shadow_wqe *shadow_wqe;
+
+ shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_rq);
+ if (!shadow_wqe)
+ return;
+
+ shadow_wqe->byte_len = rdma_cqe->ud_recv.msg_len;
+ shadow_wqe->src_qpn = rdma_cqe->ud_recv.src_qpn;
+ shadow_wqe->header.error_code = IB_WC_SUCCESS;
+
+ wq->tail += shadow_wqe->header.posted_wqe_size;
+ shadow_queue_advance_next_to_complete(&qp->shadow_rq);
+}
+
+static void mana_handle_cqe(struct mana_ib_dev *mdev, struct gdma_comp *cqe)
+{
+ struct mana_ib_qp *qp = mana_get_qp_ref(mdev, cqe->wq_num, cqe->is_sq);
+
+ if (!qp)
+ return;
+
+ if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD) {
+ if (cqe->is_sq)
+ handle_ud_sq_cqe(qp, cqe);
+ else
+ handle_ud_rq_cqe(qp, cqe);
+ }
+
+ mana_put_qp_ref(qp);
+}
+
+static void fill_verbs_from_shadow_wqe(struct mana_ib_qp *qp, struct ib_wc *wc,
+ const struct shadow_wqe_header *shadow_wqe)
+{
+ const struct ud_rq_shadow_wqe *ud_wqe = (const struct ud_rq_shadow_wqe *)shadow_wqe;
+
+ wc->wr_id = shadow_wqe->wr_id;
+ wc->status = shadow_wqe->error_code;
+ wc->opcode = shadow_wqe->opcode;
+ wc->vendor_err = shadow_wqe->error_code;
+ wc->wc_flags = 0;
+ wc->qp = &qp->ibqp;
+ wc->pkey_index = 0;
+
+ if (shadow_wqe->opcode == IB_WC_RECV) {
+ wc->byte_len = ud_wqe->byte_len;
+ wc->src_qp = ud_wqe->src_qpn;
+ wc->wc_flags |= IB_WC_GRH;
+ }
+}
+
+static int mana_process_completions(struct mana_ib_cq *cq, int nwc, struct ib_wc *wc)
+{
+ struct shadow_wqe_header *shadow_wqe;
+ struct mana_ib_qp *qp;
+ int wc_index = 0;
+
+ /* process send shadow queue completions */
+ list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
+ while ((shadow_wqe = shadow_queue_get_next_to_consume(&qp->shadow_sq))
+ != NULL) {
+ if (wc_index >= nwc)
+ goto out;
+
+ fill_verbs_from_shadow_wqe(qp, &wc[wc_index], shadow_wqe);
+ shadow_queue_advance_consumer(&qp->shadow_sq);
+ wc_index++;
+ }
+ }
+
+ /* process recv shadow queue completions */
+ list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
+ while ((shadow_wqe = shadow_queue_get_next_to_consume(&qp->shadow_rq))
+ != NULL) {
+ if (wc_index >= nwc)
+ goto out;
+
+ fill_verbs_from_shadow_wqe(qp, &wc[wc_index], shadow_wqe);
+ shadow_queue_advance_consumer(&qp->shadow_rq);
+ wc_index++;
+ }
+ }
+
+out:
+ return wc_index;
+}
+
+int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
+ struct mana_ib_dev *mdev = container_of(ibcq->device, struct mana_ib_dev, ib_dev);
+ struct gdma_queue *queue = cq->queue.kmem;
+ struct gdma_comp gdma_cqe;
+ unsigned long flags;
+ int num_polled = 0;
+ int comp_read, i;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ for (i = 0; i < num_entries; i++) {
+ comp_read = mana_gd_poll_cq(queue, &gdma_cqe, 1);
+ if (comp_read < 1)
+ break;
+ mana_handle_cqe(mdev, &gdma_cqe);
+ }
+
+ num_polled = mana_process_completions(cq, num_entries, wc);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ return num_polled;
+}
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index 3416a85f8738..b31089320aa5 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -19,6 +19,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
.add_gid = mana_ib_gd_add_gid,
.alloc_pd = mana_ib_alloc_pd,
.alloc_ucontext = mana_ib_alloc_ucontext,
+ .create_ah = mana_ib_create_ah,
.create_cq = mana_ib_create_cq,
.create_qp = mana_ib_create_qp,
.create_rwq_ind_table = mana_ib_create_rwq_ind_table,
@@ -27,22 +28,30 @@ static const struct ib_device_ops mana_ib_dev_ops = {
.dealloc_ucontext = mana_ib_dealloc_ucontext,
.del_gid = mana_ib_gd_del_gid,
.dereg_mr = mana_ib_dereg_mr,
+ .destroy_ah = mana_ib_destroy_ah,
.destroy_cq = mana_ib_destroy_cq,
.destroy_qp = mana_ib_destroy_qp,
.destroy_rwq_ind_table = mana_ib_destroy_rwq_ind_table,
.destroy_wq = mana_ib_destroy_wq,
.disassociate_ucontext = mana_ib_disassociate_ucontext,
+ .get_dma_mr = mana_ib_get_dma_mr,
.get_link_layer = mana_ib_get_link_layer,
.get_port_immutable = mana_ib_get_port_immutable,
.mmap = mana_ib_mmap,
.modify_qp = mana_ib_modify_qp,
.modify_wq = mana_ib_modify_wq,
+ .poll_cq = mana_ib_poll_cq,
+ .post_recv = mana_ib_post_recv,
+ .post_send = mana_ib_post_send,
.query_device = mana_ib_query_device,
.query_gid = mana_ib_query_gid,
.query_pkey = mana_ib_query_pkey,
.query_port = mana_ib_query_port,
.reg_user_mr = mana_ib_reg_user_mr,
+ .reg_user_mr_dmabuf = mana_ib_reg_user_mr_dmabuf,
+ .req_notify_cq = mana_ib_arm_cq,
+ INIT_RDMA_OBJ_SIZE(ib_ah, mana_ib_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, mana_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, mana_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, mana_ib_qp, ibqp),
@@ -51,6 +60,43 @@ static const struct ib_device_ops mana_ib_dev_ops = {
ib_ind_table),
};
+static const struct ib_device_ops mana_ib_stats_ops = {
+ .alloc_hw_port_stats = mana_ib_alloc_hw_port_stats,
+ .get_hw_stats = mana_ib_get_hw_stats,
+};
+
+static int mana_ib_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct mana_ib_dev *dev = container_of(this, struct mana_ib_dev, nb);
+ struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ struct gdma_context *gc = dev->gdma_dev->gdma_context;
+ struct mana_context *mc = gc->mana.driver_data;
+ struct net_device *ndev;
+
+ /* Only process events from our parent device */
+ if (event_dev != mc->ports[0])
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
+ /*
+ * RDMA core will setup GID based on updated netdev.
+ * It's not possible to race with the core as rtnl lock is being
+ * held.
+ */
+ ib_device_set_netdev(&dev->ib_dev, ndev, 1);
+
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ netdev_put(ndev, &dev->dev_tracker);
+
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
static int mana_ib_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
@@ -84,10 +130,8 @@ static int mana_ib_probe(struct auxiliary_device *adev,
dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues;
dev->ib_dev.dev.parent = mdev->gdma_context->dev;
- rcu_read_lock(); /* required to get primary netdev */
- ndev = mana_get_primary_netdev_rcu(mc, 0);
+ ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
if (!ndev) {
- rcu_read_unlock();
ret = -ENODEV;
ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
goto free_ib_device;
@@ -95,7 +139,8 @@ static int mana_ib_probe(struct auxiliary_device *adev,
ether_addr_copy(mac_addr, ndev->dev_addr);
addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
- rcu_read_unlock();
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ netdev_put(ndev, &dev->dev_tracker);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
goto free_ib_device;
@@ -109,17 +154,27 @@ static int mana_ib_probe(struct auxiliary_device *adev,
}
dev->gdma_dev = &mdev->gdma_context->mana_ib;
+ dev->nb.notifier_call = mana_ib_netdev_event;
+ ret = register_netdevice_notifier(&dev->nb);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
+ ret);
+ goto deregister_device;
+ }
+
ret = mana_ib_gd_query_adapter_caps(dev);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d",
ret);
- goto deregister_device;
+ goto deregister_net_notifier;
}
+ ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
+
ret = mana_ib_create_eqs(dev);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
- goto deregister_device;
+ goto deregister_net_notifier;
}
ret = mana_ib_gd_create_rnic_adapter(dev);
@@ -134,20 +189,31 @@ static int mana_ib_probe(struct auxiliary_device *adev,
goto destroy_rnic;
}
+ dev->av_pool = dma_pool_create("mana_ib_av", mdev->gdma_context->dev,
+ MANA_AV_BUFFER_SIZE, MANA_AV_BUFFER_SIZE, 0);
+ if (!dev->av_pool) {
+ ret = -ENOMEM;
+ goto destroy_rnic;
+ }
+
ret = ib_register_device(&dev->ib_dev, "mana_%d",
mdev->gdma_context->dev);
if (ret)
- goto destroy_rnic;
+ goto deallocate_pool;
dev_set_drvdata(&adev->dev, dev);
return 0;
+deallocate_pool:
+ dma_pool_destroy(dev->av_pool);
destroy_rnic:
xa_destroy(&dev->qp_table_wq);
mana_ib_gd_destroy_rnic_adapter(dev);
destroy_eqs:
mana_ib_destroy_eqs(dev);
+deregister_net_notifier:
+ unregister_netdevice_notifier(&dev->nb);
deregister_device:
mana_gd_deregister_device(dev->gdma_dev);
free_ib_device:
@@ -160,9 +226,11 @@ static void mana_ib_remove(struct auxiliary_device *adev)
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
ib_unregister_device(&dev->ib_dev);
+ dma_pool_destroy(dev->av_pool);
xa_destroy(&dev->qp_table_wq);
mana_ib_gd_destroy_rnic_adapter(dev);
mana_ib_destroy_eqs(dev);
+ unregister_netdevice_notifier(&dev->nb);
mana_gd_deregister_device(dev->gdma_dev);
ib_dealloc_device(&dev->ib_dev);
}
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 457cea6d9909..eda9c5b971de 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -82,6 +82,9 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
sizeof(resp));
+ if (!udata)
+ flags |= GDMA_PD_FLAG_ALLOW_GPA_MR;
+
req.flags = flags;
err = mana_gd_send_request(gc, sizeof(req), &req,
sizeof(resp), &resp);
@@ -237,6 +240,27 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
}
+int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
+ struct mana_ib_queue *queue)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct gdma_queue_spec spec = {};
+ int err;
+
+ queue->id = INVALID_QUEUE_ID;
+ queue->gdma_region = GDMA_INVALID_DMA_REGION;
+ spec.type = type;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = size;
+ err = mana_gd_create_mana_wq_cq(&gc->mana_ib, &spec, &queue->kmem);
+ if (err)
+ return err;
+ /* take ownership into mana_ib from mana */
+ queue->gdma_region = queue->kmem->mem_info.dma_region_handle;
+ queue->kmem->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
+ return 0;
+}
+
int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
struct mana_ib_queue *queue)
{
@@ -276,6 +300,8 @@ void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue
*/
mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
ib_umem_release(queue->umem);
+ if (queue->kmem)
+ mana_gd_destroy_queue(mdev_to_gc(mdev), queue->kmem);
}
static int
@@ -358,7 +384,7 @@ static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem
unsigned int tail = 0;
u64 *page_addr_list;
void *request_buf;
- int err;
+ int err = 0;
gc = mdev_to_gc(dev);
hwc = gc->hwc.driver_data;
@@ -535,8 +561,10 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
- if (port_num == 1)
+ if (port_num == 1) {
immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ }
return 0;
}
@@ -595,8 +623,11 @@ int mana_ib_query_port(struct ib_device *ibdev, u32 port,
props->active_width = IB_WIDTH_4X;
props->active_speed = IB_SPEED_EDR;
props->pkey_tbl_len = 1;
- if (port == 1)
+ if (port == 1) {
props->gid_tbl_len = 16;
+ props->port_cap_flags = IB_PORT_CM_SUP;
+ props->ip_gids = true;
+ }
return 0;
}
@@ -634,7 +665,7 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req),
sizeof(resp));
- req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V4;
req.hdr.dev_id = dev->gdma_dev->dev_id;
err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req),
@@ -663,6 +694,7 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
caps->max_inline_data_size = resp.max_inline_data_size;
caps->max_send_sge_count = resp.max_send_sge_count;
caps->max_recv_sge_count = resp.max_recv_sge_count;
+ caps->feature_flags = resp.feature_flags;
return 0;
}
@@ -678,7 +710,7 @@ mana_ib_event_handler(void *ctx, struct gdma_queue *q, struct gdma_event *event)
switch (event->type) {
case GDMA_EQE_RNIC_QP_FATAL:
qpn = event->details[0];
- qp = mana_get_qp_ref(mdev, qpn);
+ qp = mana_get_qp_ref(mdev, qpn, false);
if (!qp)
break;
if (qp->ibqp.event_handler) {
@@ -762,6 +794,9 @@ int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
req.hdr.dev_id = gc->mana_ib.dev_id;
req.notify_eq_id = mdev->fatal_err_eq->id;
+ if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT)
+ req.feature_flags |= MANA_IB_FEATURE_CLIENT_ERROR_CQE_REQUEST;
+
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err);
@@ -987,3 +1022,61 @@ int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
}
return 0;
}
+
+int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
+ struct ib_qp_init_attr *attr, u32 doorbell, u32 type)
+{
+ struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
+ struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
+ struct mana_ib_pd *pd = container_of(qp->ibqp.pd, struct mana_ib_pd, ibpd);
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_create_udqp_resp resp = {};
+ struct mana_rnic_create_udqp_req req = {};
+ int err, i;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.pd_handle = pd->pd_handle;
+ req.send_cq_handle = send_cq->cq_handle;
+ req.recv_cq_handle = recv_cq->cq_handle;
+ for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++)
+ req.dma_region[i] = qp->ud_qp.queues[i].gdma_region;
+ req.doorbell_page = doorbell;
+ req.max_send_wr = attr->cap.max_send_wr;
+ req.max_recv_wr = attr->cap.max_recv_wr;
+ req.max_send_sge = attr->cap.max_send_sge;
+ req.max_recv_sge = attr->cap.max_recv_sge;
+ req.qp_type = type;
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create ud qp err %d", err);
+ return err;
+ }
+ qp->qp_handle = resp.qp_handle;
+ for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; i++) {
+ qp->ud_qp.queues[i].id = resp.queue_ids[i];
+ /* The GDMA regions are now owned by the RNIC QP handle */
+ qp->ud_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
+ }
+ return 0;
+}
+
+int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
+{
+ struct mana_rnic_destroy_udqp_resp resp = {0};
+ struct mana_rnic_destroy_udqp_req req = {0};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.qp_handle = qp->qp_handle;
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to destroy ud qp err %d", err);
+ return err;
+ }
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index b53a5b4de908..6903946677e5 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -11,8 +11,11 @@
#include <rdma/ib_umem.h>
#include <rdma/mana-abi.h>
#include <rdma/uverbs_ioctl.h>
+#include <linux/dmapool.h>
#include <net/mana/mana.h>
+#include "shadow_queue.h"
+#include "counters.h"
#define PAGE_SZ_BM \
(SZ_4K | SZ_8K | SZ_16K | SZ_32K | SZ_64K | SZ_128K | SZ_256K | \
@@ -21,6 +24,9 @@
/* MANA doesn't have any limit for MR size */
#define MANA_IB_MAX_MR_SIZE U64_MAX
+/* Send queue ID mask */
+#define MANA_SENDQ_MASK BIT(31)
+
/*
* The hardware limit of number of MRs is greater than maximum number of MRs
* that can possibly represent in 24 bits
@@ -32,6 +38,11 @@
*/
#define MANA_CA_ACK_DELAY 16
+/*
+ * The buffer used for writing AV
+ */
+#define MANA_AV_BUFFER_SIZE 64
+
struct mana_ib_adapter_caps {
u32 max_sq_id;
u32 max_rq_id;
@@ -48,10 +59,12 @@ struct mana_ib_adapter_caps {
u32 max_send_sge_count;
u32 max_recv_sge_count;
u32 max_inline_data_size;
+ u64 feature_flags;
};
struct mana_ib_queue {
struct ib_umem *umem;
+ struct gdma_queue *kmem;
u64 gdma_region;
u64 id;
};
@@ -64,6 +77,9 @@ struct mana_ib_dev {
struct gdma_queue **eqs;
struct xarray qp_table_wq;
struct mana_ib_adapter_caps adapter_caps;
+ struct dma_pool *av_pool;
+ netdevice_tracker dev_tracker;
+ struct notifier_block nb;
};
struct mana_ib_wq {
@@ -87,6 +103,25 @@ struct mana_ib_pd {
u32 tx_vp_offset;
};
+struct mana_ib_av {
+ u8 dest_ip[16];
+ u8 dest_mac[ETH_ALEN];
+ u16 udp_src_port;
+ u8 src_ip[16];
+ u32 hop_limit : 8;
+ u32 reserved1 : 12;
+ u32 dscp : 6;
+ u32 reserved2 : 5;
+ u32 is_ipv6 : 1;
+ u32 reserved3 : 32;
+};
+
+struct mana_ib_ah {
+ struct ib_ah ibah;
+ struct mana_ib_av *av;
+ dma_addr_t dma_handle;
+};
+
struct mana_ib_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
@@ -96,6 +131,10 @@ struct mana_ib_mr {
struct mana_ib_cq {
struct ib_cq ibcq;
struct mana_ib_queue queue;
+ /* protects CQ polling */
+ spinlock_t cq_lock;
+ struct list_head list_send_qp;
+ struct list_head list_recv_qp;
int cqe;
u32 comp_vector;
mana_handle_t cq_handle;
@@ -114,6 +153,17 @@ struct mana_ib_rc_qp {
struct mana_ib_queue queues[MANA_RC_QUEUE_TYPE_MAX];
};
+enum mana_ud_queue_type {
+ MANA_UD_SEND_QUEUE = 0,
+ MANA_UD_RECV_QUEUE,
+ MANA_UD_QUEUE_TYPE_MAX,
+};
+
+struct mana_ib_ud_qp {
+ struct mana_ib_queue queues[MANA_UD_QUEUE_TYPE_MAX];
+ u32 sq_psn;
+};
+
struct mana_ib_qp {
struct ib_qp ibqp;
@@ -121,11 +171,17 @@ struct mana_ib_qp {
union {
struct mana_ib_queue raw_sq;
struct mana_ib_rc_qp rc_qp;
+ struct mana_ib_ud_qp ud_qp;
};
/* The port on the IB device, starting with 1 */
u32 port;
+ struct list_head cq_send_list;
+ struct list_head cq_recv_list;
+ struct shadow_queue shadow_rq;
+ struct shadow_queue shadow_sq;
+
refcount_t refcount;
struct completion free;
};
@@ -145,17 +201,24 @@ enum mana_ib_command_code {
MANA_IB_DESTROY_ADAPTER = 0x30003,
MANA_IB_CONFIG_IP_ADDR = 0x30004,
MANA_IB_CONFIG_MAC_ADDR = 0x30005,
+ MANA_IB_CREATE_UD_QP = 0x30006,
+ MANA_IB_DESTROY_UD_QP = 0x30007,
MANA_IB_CREATE_CQ = 0x30008,
MANA_IB_DESTROY_CQ = 0x30009,
MANA_IB_CREATE_RC_QP = 0x3000a,
MANA_IB_DESTROY_RC_QP = 0x3000b,
MANA_IB_SET_QP_STATE = 0x3000d,
+ MANA_IB_QUERY_VF_COUNTERS = 0x30022,
};
struct mana_ib_query_adapter_caps_req {
struct gdma_req_hdr hdr;
}; /*HW Data */
+enum mana_ib_adapter_features {
+ MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT = BIT(4),
+};
+
struct mana_ib_query_adapter_caps_resp {
struct gdma_resp_hdr hdr;
u32 max_sq_id;
@@ -176,8 +239,13 @@ struct mana_ib_query_adapter_caps_resp {
u32 max_send_sge_count;
u32 max_recv_sge_count;
u32 max_inline_data_size;
+ u64 feature_flags;
}; /* HW Data */
+enum mana_ib_adapter_features_request {
+ MANA_IB_FEATURE_CLIENT_ERROR_CQE_REQUEST = BIT(1),
+}; /*HW Data */
+
struct mana_rnic_create_adapter_req {
struct gdma_req_hdr hdr;
u32 notify_eq_id;
@@ -296,6 +364,37 @@ struct mana_rnic_destroy_rc_qp_resp {
struct gdma_resp_hdr hdr;
}; /* HW Data */
+struct mana_rnic_create_udqp_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ mana_handle_t pd_handle;
+ mana_handle_t send_cq_handle;
+ mana_handle_t recv_cq_handle;
+ u64 dma_region[MANA_UD_QUEUE_TYPE_MAX];
+ u32 qp_type;
+ u32 doorbell_page;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+}; /* HW Data */
+
+struct mana_rnic_create_udqp_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t qp_handle;
+ u32 queue_ids[MANA_UD_QUEUE_TYPE_MAX];
+}; /* HW Data*/
+
+struct mana_rnic_destroy_udqp_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ mana_handle_t qp_handle;
+}; /* HW Data */
+
+struct mana_rnic_destroy_udqp_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
struct mana_ib_ah_attr {
u8 src_addr[16];
u8 dest_addr[16];
@@ -332,17 +431,104 @@ struct mana_rnic_set_qp_state_resp {
struct gdma_resp_hdr hdr;
}; /* HW Data */
+enum WQE_OPCODE_TYPES {
+ WQE_TYPE_UD_SEND = 0,
+ WQE_TYPE_UD_RECV = 8,
+}; /* HW DATA */
+
+struct rdma_send_oob {
+ u32 wqe_type : 5;
+ u32 fence : 1;
+ u32 signaled : 1;
+ u32 solicited : 1;
+ u32 psn : 24;
+
+ u32 ssn_or_rqpn : 24;
+ u32 reserved1 : 8;
+ union {
+ struct {
+ u32 remote_qkey;
+ u32 immediate;
+ u32 reserved1;
+ u32 reserved2;
+ } ud_send;
+ };
+}; /* HW DATA */
+
+struct mana_rdma_cqe {
+ union {
+ struct {
+ u8 cqe_type;
+ u8 data[GDMA_COMP_DATA_SIZE - 1];
+ };
+ struct {
+ u32 cqe_type : 8;
+ u32 vendor_error : 9;
+ u32 reserved1 : 15;
+ u32 sge_offset : 5;
+ u32 tx_wqe_offset : 27;
+ } ud_send;
+ struct {
+ u32 cqe_type : 8;
+ u32 reserved1 : 24;
+ u32 msg_len;
+ u32 src_qpn : 24;
+ u32 reserved2 : 8;
+ u32 imm_data;
+ u32 rx_wqe_offset;
+ } ud_recv;
+ };
+}; /* HW DATA */
+
+struct mana_rnic_query_vf_cntrs_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+}; /* HW Data */
+
+struct mana_rnic_query_vf_cntrs_resp {
+ struct gdma_resp_hdr hdr;
+ u64 requester_timeout;
+ u64 requester_oos_nak;
+ u64 requester_rnr_nak;
+ u64 responder_rnr_nak;
+ u64 responder_oos;
+ u64 responder_dup_request;
+ u64 requester_implicit_nak;
+ u64 requester_readresp_psn_mismatch;
+ u64 nak_inv_req;
+ u64 nak_access_err;
+ u64 nak_opp_err;
+ u64 nak_inv_read;
+ u64 responder_local_len_err;
+ u64 requestor_local_prot_err;
+ u64 responder_rem_access_err;
+ u64 responder_local_qp_err;
+ u64 responder_malformed_wqe;
+ u64 general_hw_err;
+ u64 requester_rnr_nak_retries_exceeded;
+ u64 requester_retries_exceeded;
+ u64 total_fatal_err;
+ u64 received_cnps;
+ u64 num_qps_congested;
+ u64 rate_inc_events;
+ u64 num_qps_recovered;
+ u64 current_rate;
+}; /* HW Data */
+
static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
{
return mdev->gdma_dev->gdma_context;
}
static inline struct mana_ib_qp *mana_get_qp_ref(struct mana_ib_dev *mdev,
- uint32_t qid)
+ u32 qid, bool is_sq)
{
struct mana_ib_qp *qp;
unsigned long flag;
+ if (is_sq)
+ qid |= MANA_SENDQ_MASK;
+
xa_lock_irqsave(&mdev->qp_table_wq, flag);
qp = xa_load(&mdev->qp_table_wq, qid);
if (qp)
@@ -388,6 +574,8 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
mana_handle_t gdma_region);
+int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
+ struct mana_ib_queue *queue);
int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
struct mana_ib_queue *queue);
void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
@@ -480,4 +668,24 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
struct ib_qp_init_attr *attr, u32 doorbell, u64 flags);
int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp);
+
+int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
+ struct ib_qp_init_attr *attr, u32 doorbell, u32 type);
+int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp);
+
+int mana_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int mana_ib_destroy_ah(struct ib_ah *ah, u32 flags);
+
+int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+
+int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+
+struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 iova, int fd, int mr_access_flags,
+ struct uverbs_attr_bundle *attrs);
#endif
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 887b09dd86e7..f99557ec7767 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -8,6 +8,8 @@
#define VALID_MR_FLAGS \
(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)
+#define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
+
static enum gdma_mr_access_flags
mana_ib_verbs_to_gdma_access_flags(int access_flags)
{
@@ -39,6 +41,8 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
req.mr_type = mr_params->mr_type;
switch (mr_params->mr_type) {
+ case GDMA_MR_TYPE_GPA:
+ break;
case GDMA_MR_TYPE_GVA:
req.gva.dma_region_handle = mr_params->gva.dma_region_handle;
req.gva.virtual_address = mr_params->gva.virtual_address;
@@ -169,6 +173,107 @@ err_free:
return ERR_PTR(err);
}
+struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 iova, int fd, int access_flags,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
+ struct gdma_create_mr_params mr_params = {};
+ struct ib_device *ibdev = ibpd->device;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct mana_ib_dev *dev;
+ struct mana_ib_mr *mr;
+ u64 dma_region_handle;
+ int err;
+
+ dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+
+ access_flags &= ~IB_ACCESS_OPTIONAL;
+ if (access_flags & ~VALID_MR_FLAGS)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
+ if (IS_ERR(umem_dmabuf)) {
+ err = PTR_ERR(umem_dmabuf);
+ ibdev_dbg(ibdev, "Failed to get dmabuf umem, %d\n", err);
+ goto err_free;
+ }
+
+ mr->umem = &umem_dmabuf->umem;
+
+ err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
+ err);
+ goto err_umem;
+ }
+
+ mr_params.pd_handle = pd->pd_handle;
+ mr_params.mr_type = GDMA_MR_TYPE_GVA;
+ mr_params.gva.dma_region_handle = dma_region_handle;
+ mr_params.gva.virtual_address = iova;
+ mr_params.gva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+
+ err = mana_ib_gd_create_mr(dev, mr, &mr_params);
+ if (err)
+ goto err_dma_region;
+
+ /*
+ * There is no need to keep track of dma_region_handle after MR is
+ * successfully created. The dma_region_handle is tracked in the PF
+ * as part of the lifecycle of this MR.
+ */
+
+ return &mr->ibmr;
+
+err_dma_region:
+ mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
+
+err_umem:
+ ib_umem_release(mr->umem);
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags)
+{
+ struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
+ struct gdma_create_mr_params mr_params = {};
+ struct ib_device *ibdev = ibpd->device;
+ struct mana_ib_dev *dev;
+ struct mana_ib_mr *mr;
+ int err;
+
+ dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+
+ if (access_flags & ~VALID_DMA_MR_FLAGS)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr_params.pd_handle = pd->pd_handle;
+ mr_params.mr_type = GDMA_MR_TYPE_GPA;
+
+ err = mana_ib_gd_create_mr(dev, mr, &mr_params);
+ if (err)
+ goto err_free;
+
+ return &mr->ibmr;
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 73d67c853b6f..c928af58f38b 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -398,18 +398,128 @@ err_free_vport:
return err;
}
+static u32 mana_ib_wqe_size(u32 sge, u32 oob_size)
+{
+ u32 wqe_size = sge * sizeof(struct gdma_sge) + sizeof(struct gdma_wqe) + oob_size;
+
+ return ALIGN(wqe_size, GDMA_WQE_BU_SIZE);
+}
+
+static u32 mana_ib_queue_size(struct ib_qp_init_attr *attr, u32 queue_type)
+{
+ u32 queue_size;
+
+ switch (attr->qp_type) {
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ if (queue_type == MANA_UD_SEND_QUEUE)
+ queue_size = attr->cap.max_send_wr *
+ mana_ib_wqe_size(attr->cap.max_send_sge, INLINE_OOB_LARGE_SIZE);
+ else
+ queue_size = attr->cap.max_recv_wr *
+ mana_ib_wqe_size(attr->cap.max_recv_sge, INLINE_OOB_SMALL_SIZE);
+ break;
+ default:
+ return 0;
+ }
+
+ return MANA_PAGE_ALIGN(roundup_pow_of_two(queue_size));
+}
+
+static enum gdma_queue_type mana_ib_queue_type(struct ib_qp_init_attr *attr, u32 queue_type)
+{
+ enum gdma_queue_type type;
+
+ switch (attr->qp_type) {
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ if (queue_type == MANA_UD_SEND_QUEUE)
+ type = GDMA_SQ;
+ else
+ type = GDMA_RQ;
+ break;
+ default:
+ type = GDMA_INVALID_QUEUE;
+ }
+ return type;
+}
+
+static int mana_table_store_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
+{
+ return xa_insert_irq(&mdev->qp_table_wq, qp->ibqp.qp_num, qp,
+ GFP_KERNEL);
+}
+
+static void mana_table_remove_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
+{
+ xa_erase_irq(&mdev->qp_table_wq, qp->ibqp.qp_num);
+}
+
+static int mana_table_store_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
+{
+ u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id | MANA_SENDQ_MASK;
+ u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
+ int err;
+
+ err = xa_insert_irq(&mdev->qp_table_wq, qids, qp, GFP_KERNEL);
+ if (err)
+ return err;
+
+ err = xa_insert_irq(&mdev->qp_table_wq, qidr, qp, GFP_KERNEL);
+ if (err)
+ goto remove_sq;
+
+ return 0;
+
+remove_sq:
+ xa_erase_irq(&mdev->qp_table_wq, qids);
+ return err;
+}
+
+static void mana_table_remove_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
+{
+ u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id | MANA_SENDQ_MASK;
+ u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
+
+ xa_erase_irq(&mdev->qp_table_wq, qids);
+ xa_erase_irq(&mdev->qp_table_wq, qidr);
+}
+
static int mana_table_store_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
{
refcount_set(&qp->refcount, 1);
init_completion(&qp->free);
- return xa_insert_irq(&mdev->qp_table_wq, qp->ibqp.qp_num, qp,
- GFP_KERNEL);
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ return mana_table_store_rc_qp(mdev, qp);
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ return mana_table_store_ud_qp(mdev, qp);
+ default:
+ ibdev_dbg(&mdev->ib_dev, "Unknown QP type for storing in mana table, %d\n",
+ qp->ibqp.qp_type);
+ }
+
+ return -EINVAL;
}
static void mana_table_remove_qp(struct mana_ib_dev *mdev,
struct mana_ib_qp *qp)
{
- xa_erase_irq(&mdev->qp_table_wq, qp->ibqp.qp_num);
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ mana_table_remove_rc_qp(mdev, qp);
+ break;
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ mana_table_remove_ud_qp(mdev, qp);
+ break;
+ default:
+ ibdev_dbg(&mdev->ib_dev, "Unknown QP type for removing from mana table, %d\n",
+ qp->ibqp.qp_type);
+ return;
+ }
mana_put_qp_ref(qp);
wait_for_completion(&qp->free);
}
@@ -490,6 +600,105 @@ destroy_queues:
return err;
}
+static void mana_add_qp_to_cqs(struct mana_ib_qp *qp)
+{
+ struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
+ struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&send_cq->cq_lock, flags);
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ spin_unlock_irqrestore(&send_cq->cq_lock, flags);
+
+ spin_lock_irqsave(&recv_cq->cq_lock, flags);
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ spin_unlock_irqrestore(&recv_cq->cq_lock, flags);
+}
+
+static void mana_remove_qp_from_cqs(struct mana_ib_qp *qp)
+{
+ struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
+ struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&send_cq->cq_lock, flags);
+ list_del(&qp->cq_send_list);
+ spin_unlock_irqrestore(&send_cq->cq_lock, flags);
+
+ spin_lock_irqsave(&recv_cq->cq_lock, flags);
+ list_del(&qp->cq_recv_list);
+ spin_unlock_irqrestore(&recv_cq->cq_lock, flags);
+}
+
+static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
+ struct ib_qp_init_attr *attr, struct ib_udata *udata)
+{
+ struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ u32 doorbell, queue_size;
+ int i, err;
+
+ if (udata) {
+ ibdev_dbg(&mdev->ib_dev, "User-level UD QPs are not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i) {
+ queue_size = mana_ib_queue_size(attr, i);
+ err = mana_ib_create_kernel_queue(mdev, queue_size, mana_ib_queue_type(attr, i),
+ &qp->ud_qp.queues[i]);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create queue %d, err %d\n",
+ i, err);
+ goto destroy_queues;
+ }
+ }
+ doorbell = gc->mana_ib.doorbell;
+
+ err = create_shadow_queue(&qp->shadow_rq, attr->cap.max_recv_wr,
+ sizeof(struct ud_rq_shadow_wqe));
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create shadow rq err %d\n", err);
+ goto destroy_queues;
+ }
+ err = create_shadow_queue(&qp->shadow_sq, attr->cap.max_send_wr,
+ sizeof(struct ud_sq_shadow_wqe));
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create shadow sq err %d\n", err);
+ goto destroy_shadow_queues;
+ }
+
+ err = mana_ib_gd_create_ud_qp(mdev, qp, attr, doorbell, attr->qp_type);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create ud qp %d\n", err);
+ goto destroy_shadow_queues;
+ }
+ qp->ibqp.qp_num = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
+ qp->port = attr->port_num;
+
+ for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i)
+ qp->ud_qp.queues[i].kmem->id = qp->ud_qp.queues[i].id;
+
+ err = mana_table_store_qp(mdev, qp);
+ if (err)
+ goto destroy_qp;
+
+ mana_add_qp_to_cqs(qp);
+
+ return 0;
+
+destroy_qp:
+ mana_ib_gd_destroy_ud_qp(mdev, qp);
+destroy_shadow_queues:
+ destroy_shadow_queue(&qp->shadow_rq);
+ destroy_shadow_queue(&qp->shadow_sq);
+destroy_queues:
+ while (i-- > 0)
+ mana_ib_destroy_queue(mdev, &qp->ud_qp.queues[i]);
+ return err;
+}
+
int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
struct ib_udata *udata)
{
@@ -503,6 +712,9 @@ int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
case IB_QPT_RC:
return mana_ib_create_rc_qp(ibqp, ibqp->pd, attr, udata);
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ return mana_ib_create_ud_qp(ibqp, ibqp->pd, attr, udata);
default:
ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
attr->qp_type);
@@ -579,6 +791,8 @@ int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
{
switch (ibqp->qp_type) {
case IB_QPT_RC:
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
return mana_ib_gd_modify_qp(ibqp, attr, attr_mask, udata);
default:
ibdev_dbg(ibqp->device, "Modify QP type %u not supported", ibqp->qp_type);
@@ -652,6 +866,28 @@ static int mana_ib_destroy_rc_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
return 0;
}
+static int mana_ib_destroy_ud_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
+{
+ struct mana_ib_dev *mdev =
+ container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
+ int i;
+
+ mana_remove_qp_from_cqs(qp);
+ mana_table_remove_qp(mdev, qp);
+
+ destroy_shadow_queue(&qp->shadow_rq);
+ destroy_shadow_queue(&qp->shadow_sq);
+
+ /* Ignore return code as there is not much we can do about it.
+ * The error message is printed inside.
+ */
+ mana_ib_gd_destroy_ud_qp(mdev, qp);
+ for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i)
+ mana_ib_destroy_queue(mdev, &qp->ud_qp.queues[i]);
+
+ return 0;
+}
+
int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
@@ -665,6 +901,9 @@ int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
return mana_ib_destroy_qp_raw(qp, udata);
case IB_QPT_RC:
return mana_ib_destroy_rc_qp(qp, udata);
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ return mana_ib_destroy_ud_qp(qp, udata);
default:
ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
ibqp->qp_type);
diff --git a/drivers/infiniband/hw/mana/shadow_queue.h b/drivers/infiniband/hw/mana/shadow_queue.h
new file mode 100644
index 000000000000..a4b3818f9c39
--- /dev/null
+++ b/drivers/infiniband/hw/mana/shadow_queue.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2024, Microsoft Corporation. All rights reserved.
+ */
+
+#ifndef _MANA_SHADOW_QUEUE_H_
+#define _MANA_SHADOW_QUEUE_H_
+
+struct shadow_wqe_header {
+ u16 opcode;
+ u16 error_code;
+ u32 posted_wqe_size;
+ u64 wr_id;
+};
+
+struct ud_rq_shadow_wqe {
+ struct shadow_wqe_header header;
+ u32 byte_len;
+ u32 src_qpn;
+};
+
+struct ud_sq_shadow_wqe {
+ struct shadow_wqe_header header;
+};
+
+struct shadow_queue {
+ /* Unmasked producer index, Incremented on wqe posting */
+ u64 prod_idx;
+ /* Unmasked consumer index, Incremented on cq polling */
+ u64 cons_idx;
+ /* Unmasked index of next-to-complete (from HW) shadow WQE */
+ u64 next_to_complete_idx;
+ /* queue size in wqes */
+ u32 length;
+ /* distance between elements in bytes */
+ u32 stride;
+ /* ring buffer holding wqes */
+ void *buffer;
+};
+
+static inline int create_shadow_queue(struct shadow_queue *queue, uint32_t length, uint32_t stride)
+{
+ queue->buffer = kvmalloc_array(length, stride, GFP_KERNEL);
+ if (!queue->buffer)
+ return -ENOMEM;
+
+ queue->length = length;
+ queue->stride = stride;
+
+ return 0;
+}
+
+static inline void destroy_shadow_queue(struct shadow_queue *queue)
+{
+ kvfree(queue->buffer);
+}
+
+static inline bool shadow_queue_full(struct shadow_queue *queue)
+{
+ return (queue->prod_idx - queue->cons_idx) >= queue->length;
+}
+
+static inline bool shadow_queue_empty(struct shadow_queue *queue)
+{
+ return queue->prod_idx == queue->cons_idx;
+}
+
+static inline void *
+shadow_queue_get_element(const struct shadow_queue *queue, u64 unmasked_index)
+{
+ u32 index = unmasked_index % queue->length;
+
+ return ((u8 *)queue->buffer + index * queue->stride);
+}
+
+static inline void *
+shadow_queue_producer_entry(struct shadow_queue *queue)
+{
+ return shadow_queue_get_element(queue, queue->prod_idx);
+}
+
+static inline void *
+shadow_queue_get_next_to_consume(const struct shadow_queue *queue)
+{
+ if (queue->cons_idx == queue->next_to_complete_idx)
+ return NULL;
+
+ return shadow_queue_get_element(queue, queue->cons_idx);
+}
+
+static inline void *
+shadow_queue_get_next_to_complete(struct shadow_queue *queue)
+{
+ if (queue->next_to_complete_idx == queue->prod_idx)
+ return NULL;
+
+ return shadow_queue_get_element(queue, queue->next_to_complete_idx);
+}
+
+static inline void shadow_queue_advance_producer(struct shadow_queue *queue)
+{
+ queue->prod_idx++;
+}
+
+static inline void shadow_queue_advance_consumer(struct shadow_queue *queue)
+{
+ queue->cons_idx++;
+}
+
+static inline void shadow_queue_advance_next_to_complete(struct shadow_queue *queue)
+{
+ queue->next_to_complete_idx++;
+}
+
+#endif
diff --git a/drivers/infiniband/hw/mana/wr.c b/drivers/infiniband/hw/mana/wr.c
new file mode 100644
index 000000000000..1813567d3b16
--- /dev/null
+++ b/drivers/infiniband/hw/mana/wr.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Microsoft Corporation. All rights reserved.
+ */
+
+#include "mana_ib.h"
+
+#define MAX_WR_SGL_NUM (2)
+
+static int mana_ib_post_recv_ud(struct mana_ib_qp *qp, const struct ib_recv_wr *wr)
+{
+ struct mana_ib_dev *mdev = container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
+ struct gdma_queue *queue = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].kmem;
+ struct gdma_posted_wqe_info wqe_info = {0};
+ struct gdma_sge gdma_sgl[MAX_WR_SGL_NUM];
+ struct gdma_wqe_request wqe_req = {0};
+ struct ud_rq_shadow_wqe *shadow_wqe;
+ int err, i;
+
+ if (shadow_queue_full(&qp->shadow_rq))
+ return -EINVAL;
+
+ if (wr->num_sge > MAX_WR_SGL_NUM)
+ return -EINVAL;
+
+ for (i = 0; i < wr->num_sge; ++i) {
+ gdma_sgl[i].address = wr->sg_list[i].addr;
+ gdma_sgl[i].mem_key = wr->sg_list[i].lkey;
+ gdma_sgl[i].size = wr->sg_list[i].length;
+ }
+ wqe_req.num_sge = wr->num_sge;
+ wqe_req.sgl = gdma_sgl;
+
+ err = mana_gd_post_work_request(queue, &wqe_req, &wqe_info);
+ if (err)
+ return err;
+
+ shadow_wqe = shadow_queue_producer_entry(&qp->shadow_rq);
+ memset(shadow_wqe, 0, sizeof(*shadow_wqe));
+ shadow_wqe->header.opcode = IB_WC_RECV;
+ shadow_wqe->header.wr_id = wr->wr_id;
+ shadow_wqe->header.posted_wqe_size = wqe_info.wqe_size_in_bu;
+ shadow_queue_advance_producer(&qp->shadow_rq);
+
+ mana_gd_wq_ring_doorbell(mdev_to_gc(mdev), queue);
+ return 0;
+}
+
+int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
+ int err = 0;
+
+ for (; wr; wr = wr->next) {
+ switch (ibqp->qp_type) {
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ err = mana_ib_post_recv_ud(qp, wr);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ return err;
+ }
+ break;
+ default:
+ ibdev_dbg(ibqp->device, "Posting recv wr on qp type %u is not supported\n",
+ ibqp->qp_type);
+ return -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static int mana_ib_post_send_ud(struct mana_ib_qp *qp, const struct ib_ud_wr *wr)
+{
+ struct mana_ib_dev *mdev = container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
+ struct mana_ib_ah *ah = container_of(wr->ah, struct mana_ib_ah, ibah);
+ struct net_device *ndev = mana_ib_get_netdev(&mdev->ib_dev, qp->port);
+ struct gdma_queue *queue = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].kmem;
+ struct gdma_sge gdma_sgl[MAX_WR_SGL_NUM + 1];
+ struct gdma_posted_wqe_info wqe_info = {0};
+ struct gdma_wqe_request wqe_req = {0};
+ struct rdma_send_oob send_oob = {0};
+ struct ud_sq_shadow_wqe *shadow_wqe;
+ int err, i;
+
+ if (!ndev) {
+ ibdev_dbg(&mdev->ib_dev, "Invalid port %u in QP %u\n",
+ qp->port, qp->ibqp.qp_num);
+ return -EINVAL;
+ }
+
+ if (wr->wr.opcode != IB_WR_SEND)
+ return -EINVAL;
+
+ if (shadow_queue_full(&qp->shadow_sq))
+ return -EINVAL;
+
+ if (wr->wr.num_sge > MAX_WR_SGL_NUM)
+ return -EINVAL;
+
+ gdma_sgl[0].address = ah->dma_handle;
+ gdma_sgl[0].mem_key = qp->ibqp.pd->local_dma_lkey;
+ gdma_sgl[0].size = sizeof(struct mana_ib_av);
+ for (i = 0; i < wr->wr.num_sge; ++i) {
+ gdma_sgl[i + 1].address = wr->wr.sg_list[i].addr;
+ gdma_sgl[i + 1].mem_key = wr->wr.sg_list[i].lkey;
+ gdma_sgl[i + 1].size = wr->wr.sg_list[i].length;
+ }
+
+ wqe_req.num_sge = wr->wr.num_sge + 1;
+ wqe_req.sgl = gdma_sgl;
+ wqe_req.inline_oob_size = sizeof(struct rdma_send_oob);
+ wqe_req.inline_oob_data = &send_oob;
+ wqe_req.flags = GDMA_WR_OOB_IN_SGL;
+ wqe_req.client_data_unit = ib_mtu_enum_to_int(ib_mtu_int_to_enum(ndev->mtu));
+
+ send_oob.wqe_type = WQE_TYPE_UD_SEND;
+ send_oob.fence = !!(wr->wr.send_flags & IB_SEND_FENCE);
+ send_oob.signaled = !!(wr->wr.send_flags & IB_SEND_SIGNALED);
+ send_oob.solicited = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
+ send_oob.psn = qp->ud_qp.sq_psn;
+ send_oob.ssn_or_rqpn = wr->remote_qpn;
+ send_oob.ud_send.remote_qkey =
+ qp->ibqp.qp_type == IB_QPT_GSI ? IB_QP1_QKEY : wr->remote_qkey;
+
+ err = mana_gd_post_work_request(queue, &wqe_req, &wqe_info);
+ if (err)
+ return err;
+
+ qp->ud_qp.sq_psn++;
+ shadow_wqe = shadow_queue_producer_entry(&qp->shadow_sq);
+ memset(shadow_wqe, 0, sizeof(*shadow_wqe));
+ shadow_wqe->header.opcode = IB_WC_SEND;
+ shadow_wqe->header.wr_id = wr->wr.wr_id;
+ shadow_wqe->header.posted_wqe_size = wqe_info.wqe_size_in_bu;
+ shadow_queue_advance_producer(&qp->shadow_sq);
+
+ mana_gd_wq_ring_doorbell(mdev_to_gc(mdev), queue);
+ return 0;
+}
+
+int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ int err;
+ struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
+
+ for (; wr; wr = wr->next) {
+ switch (ibqp->qp_type) {
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ err = mana_ib_post_send_ud(qp, ud_wr(wr));
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ return err;
+ }
+ break;
+ default:
+ ibdev_dbg(ibqp->device, "Posting send wr on qp type %u is not supported\n",
+ ibqp->qp_type);
+ return -EINVAL;
+ }
+ }
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index b38961f5058e..11878ddf7cc7 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -9,6 +9,7 @@ mlx5_ib-y := ah.o \
data_direct.o \
dm.o \
doorbell.o \
+ fs.o \
gsi.o \
ib_virt.o \
mad.o \
@@ -26,7 +27,6 @@ mlx5_ib-y := ah.o \
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o \
- fs.o \
qos.o \
std_types.o
mlx5_ib-$(CONFIG_MLX5_MACSEC) += macsec.o
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 99036afb3aef..531a57f9ee7e 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -50,11 +50,12 @@ static __be16 mlx5_ah_get_udp_sport(const struct mlx5_ib_dev *dev,
return sport;
}
-static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
+static int create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
struct rdma_ah_init_attr *init_attr)
{
struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
enum ib_gid_type gid_type;
+ int rate_val;
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
@@ -67,8 +68,10 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.tclass = grh->traffic_class;
}
- ah->av.stat_rate_sl =
- (mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr)) << 4);
+ rate_val = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr));
+ if (rate_val < 0)
+ return rate_val;
+ ah->av.stat_rate_sl = rate_val << 4;
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (init_attr->xmit_slave)
@@ -89,6 +92,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f;
ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf);
}
+
+ return 0;
}
int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
@@ -121,8 +126,7 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
return err;
}
- create_ib_ah(dev, ah, init_attr);
- return 0;
+ return create_ib_ah(dev, ah, init_attr);
}
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 81cfa74147a1..b847084dcd99 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -140,6 +140,13 @@ static const struct mlx5_ib_counter rdmatx_cnp_op_cnts[] = {
INIT_OP_COUNTER(cc_tx_cnp_pkts, CC_TX_CNP_PKTS),
};
+static const struct mlx5_ib_counter packets_op_cnts[] = {
+ INIT_OP_COUNTER(rdma_tx_packets, RDMA_TX_PACKETS),
+ INIT_OP_COUNTER(rdma_tx_bytes, RDMA_TX_BYTES),
+ INIT_OP_COUNTER(rdma_rx_packets, RDMA_RX_PACKETS),
+ INIT_OP_COUNTER(rdma_rx_bytes, RDMA_RX_BYTES),
+};
+
static int mlx5_ib_read_counters(struct ib_counters *counters,
struct ib_counters_read_attr *read_attr,
struct uverbs_attr_bundle *attrs)
@@ -427,6 +434,52 @@ done:
return num_counters;
}
+static bool is_rdma_bytes_counter(u32 type)
+{
+ if (type == MLX5_IB_OPCOUNTER_RDMA_TX_BYTES ||
+ type == MLX5_IB_OPCOUNTER_RDMA_RX_BYTES ||
+ type == MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP ||
+ type == MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP)
+ return true;
+
+ return false;
+}
+
+static int do_per_qp_get_op_stat(struct rdma_counter *counter)
+{
+ struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port);
+ struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
+ int i, ret, index, num_hw_counters;
+ u64 packets = 0, bytes = 0;
+
+ for (i = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
+ i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP; i++) {
+ if (!mcounter->fc[i])
+ continue;
+
+ ret = mlx5_fc_query(dev->mdev, mcounter->fc[i],
+ &packets, &bytes);
+ if (ret)
+ return ret;
+
+ num_hw_counters = cnts->num_q_counters +
+ cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+
+ index = i - MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP +
+ num_hw_counters;
+
+ if (is_rdma_bytes_counter(i))
+ counter->stats->value[index] = bytes;
+ else
+ counter->stats->value[index] = packets;
+
+ clear_bit(index, counter->stats->is_disabled);
+ }
+ return 0;
+}
+
static int do_get_op_stat(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u32 port_num, int index)
@@ -434,7 +487,7 @@ static int do_get_op_stat(struct ib_device *ibdev,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct mlx5_ib_counters *cnts;
const struct mlx5_ib_op_fc *opfcs;
- u64 packets = 0, bytes;
+ u64 packets, bytes;
u32 type;
int ret;
@@ -453,8 +506,11 @@ static int do_get_op_stat(struct ib_device *ibdev,
if (ret)
return ret;
+ if (is_rdma_bytes_counter(type))
+ stats->value[index] = bytes;
+ else
+ stats->value[index] = packets;
out:
- stats->value[index] = packets;
return index;
}
@@ -523,19 +579,30 @@ static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
{
struct mlx5_ib_dev *dev = to_mdev(counter->device);
const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port);
+ int ret;
+
+ ret = mlx5_ib_query_q_counters(dev->mdev, cnts, counter->stats,
+ counter->id);
+ if (ret)
+ return ret;
+
+ if (!counter->mode.bind_opcnt)
+ return 0;
- return mlx5_ib_query_q_counters(dev->mdev, cnts,
- counter->stats, counter->id);
+ return do_per_qp_get_op_stat(counter);
}
static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
{
+ struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
struct mlx5_ib_dev *dev = to_mdev(counter->device);
u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
if (!counter->id)
return 0;
+ WARN_ON(!xa_empty(&mcounter->qpn_opfc_xa));
+ mlx5r_fs_destroy_fcs(dev, counter);
MLX5_SET(dealloc_q_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter->id);
@@ -543,7 +610,7 @@ static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
}
static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
- struct ib_qp *qp)
+ struct ib_qp *qp, u32 port)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
bool new = false;
@@ -568,8 +635,14 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
if (err)
goto fail_set_counter;
+ err = mlx5r_fs_bind_op_fc(qp, counter, port);
+ if (err)
+ goto fail_bind_op_fc;
+
return 0;
+fail_bind_op_fc:
+ mlx5_ib_qp_set_counter(qp, NULL);
fail_set_counter:
if (new) {
mlx5_ib_counter_dealloc(counter);
@@ -579,9 +652,22 @@ fail_set_counter:
return err;
}
-static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp)
+static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp, u32 port)
{
- return mlx5_ib_qp_set_counter(qp, NULL);
+ struct rdma_counter *counter = qp->counter;
+ int err;
+
+ mlx5r_fs_unbind_op_fc(qp, counter);
+
+ err = mlx5_ib_qp_set_counter(qp, NULL);
+ if (err)
+ goto fail_set_counter;
+
+ return 0;
+
+fail_set_counter:
+ mlx5r_fs_bind_op_fc(qp, counter, port);
+ return err;
}
static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
@@ -681,6 +767,12 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
descs[j].priv = &rdmatx_cnp_op_cnts[i].type;
}
}
+
+ for (i = 0; i < ARRAY_SIZE(packets_op_cnts); i++, j++) {
+ descs[j].name = packets_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &packets_op_cnts[i].type;
+ }
}
@@ -731,6 +823,8 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
num_op_counters = ARRAY_SIZE(basic_op_cnts);
+ num_op_counters += ARRAY_SIZE(packets_op_cnts);
+
if (MLX5_CAP_FLOWTABLE(dev->mdev,
ft_field_support_2_nic_receive_rdma.bth_opcode))
num_op_counters += ARRAY_SIZE(rdmarx_cnp_op_cnts);
@@ -760,10 +854,58 @@ err:
return -ENOMEM;
}
+/*
+ * Checks if the given flow counter type should be sharing the same flow counter
+ * with another type and if it should, checks if that other type flow counter
+ * was already created, if both conditions are met return true and the counter
+ * else return false.
+ */
+bool mlx5r_is_opfc_shared_and_in_use(struct mlx5_ib_op_fc *opfcs, u32 type,
+ struct mlx5_ib_op_fc **opfc)
+{
+ u32 shared_fc_type;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_BYTES;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_BYTES;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP;
+ break;
+ default:
+ return false;
+ }
+
+ *opfc = &opfcs[shared_fc_type];
+ if (!(*opfc)->fc)
+ return false;
+
+ return true;
+}
+
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
int num_cnt_ports = dev->num_ports;
+ struct mlx5_ib_op_fc *in_use_opfc;
int i, j;
if (is_mdev_switchdev_mode(dev->mdev))
@@ -785,11 +927,15 @@ static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
if (!dev->port[i].cnts.opfcs[j].fc)
continue;
- if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
- mlx5_ib_fs_remove_op_fc(dev,
- &dev->port[i].cnts.opfcs[j], j);
+ if (mlx5r_is_opfc_shared_and_in_use(
+ dev->port[i].cnts.opfcs, j, &in_use_opfc))
+ goto skip;
+
+ mlx5_ib_fs_remove_op_fc(dev,
+ &dev->port[i].cnts.opfcs[j], j);
mlx5_fc_destroy(dev->mdev,
dev->port[i].cnts.opfcs[j].fc);
+skip:
dev->port[i].cnts.opfcs[j].fc = NULL;
}
}
@@ -983,8 +1129,8 @@ static int mlx5_ib_modify_stat(struct ib_device *device, u32 port,
unsigned int index, bool enable)
{
struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_ib_op_fc *opfc, *in_use_opfc;
struct mlx5_ib_counters *cnts;
- struct mlx5_ib_op_fc *opfc;
u32 num_hw_counters, type;
int ret;
@@ -1008,6 +1154,13 @@ static int mlx5_ib_modify_stat(struct ib_device *device, u32 port,
if (opfc->fc)
return -EEXIST;
+ if (mlx5r_is_opfc_shared_and_in_use(cnts->opfcs, type,
+ &in_use_opfc)) {
+ opfc->fc = in_use_opfc->fc;
+ opfc->rule[0] = in_use_opfc->rule[0];
+ return 0;
+ }
+
opfc->fc = mlx5_fc_create(dev->mdev, false);
if (IS_ERR(opfc->fc))
return PTR_ERR(opfc->fc);
@@ -1023,12 +1176,23 @@ static int mlx5_ib_modify_stat(struct ib_device *device, u32 port,
if (!opfc->fc)
return -EINVAL;
+ if (mlx5r_is_opfc_shared_and_in_use(cnts->opfcs, type, &in_use_opfc))
+ goto out;
+
mlx5_ib_fs_remove_op_fc(dev, opfc, type);
mlx5_fc_destroy(dev->mdev, opfc->fc);
+out:
opfc->fc = NULL;
return 0;
}
+static void mlx5_ib_counter_init(struct rdma_counter *counter)
+{
+ struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
+
+ xa_init(&mcounter->qpn_opfc_xa);
+}
+
static const struct ib_device_ops hw_stats_ops = {
.alloc_hw_port_stats = mlx5_ib_alloc_hw_port_stats,
.get_hw_stats = mlx5_ib_get_hw_stats,
@@ -1037,8 +1201,10 @@ static const struct ib_device_ops hw_stats_ops = {
.counter_dealloc = mlx5_ib_counter_dealloc,
.counter_alloc_stats = mlx5_ib_counter_alloc_stats,
.counter_update_stats = mlx5_ib_counter_update_stats,
- .modify_hw_stat = IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) ?
- mlx5_ib_modify_stat : NULL,
+ .modify_hw_stat = mlx5_ib_modify_stat,
+ .counter_init = mlx5_ib_counter_init,
+
+ INIT_RDMA_OBJ_SIZE(rdma_counter, mlx5_rdma_counter, rdma_counter),
};
static const struct ib_device_ops hw_switchdev_vport_op = {
@@ -1053,6 +1219,9 @@ static const struct ib_device_ops hw_switchdev_stats_ops = {
.counter_dealloc = mlx5_ib_counter_dealloc,
.counter_alloc_stats = mlx5_ib_counter_alloc_stats,
.counter_update_stats = mlx5_ib_counter_update_stats,
+ .counter_init = mlx5_ib_counter_init,
+
+ INIT_RDMA_OBJ_SIZE(rdma_counter, mlx5_rdma_counter, rdma_counter),
};
static const struct ib_device_ops counters_ops = {
diff --git a/drivers/infiniband/hw/mlx5/counters.h b/drivers/infiniband/hw/mlx5/counters.h
index 6bcaaa52e2b2..bd03cee42014 100644
--- a/drivers/infiniband/hw/mlx5/counters.h
+++ b/drivers/infiniband/hw/mlx5/counters.h
@@ -8,10 +8,25 @@
#include "mlx5_ib.h"
+struct mlx5_rdma_counter {
+ struct rdma_counter rdma_counter;
+
+ struct mlx5_fc *fc[MLX5_IB_OPCOUNTER_MAX];
+ struct xarray qpn_opfc_xa;
+};
+
+static inline struct mlx5_rdma_counter *
+to_mcounter(struct rdma_counter *counter)
+{
+ return container_of(counter, struct mlx5_rdma_counter, rdma_counter);
+}
+
int mlx5_ib_counters_init(struct mlx5_ib_dev *dev);
void mlx5_ib_counters_cleanup(struct mlx5_ib_dev *dev);
void mlx5_ib_counters_clear_description(struct ib_counters *counters);
int mlx5_ib_flow_counters_set_data(struct ib_counters *ibcounters,
struct mlx5_ib_create_flow *ucmd);
u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num);
+bool mlx5r_is_opfc_shared_and_in_use(struct mlx5_ib_op_fc *opfcs, u32 type,
+ struct mlx5_ib_op_fc **opfc);
#endif /* _MLX5_IB_COUNTERS_H */
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 4c54dc578069..1aa5311b03e9 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -490,7 +490,7 @@ repoll:
}
qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
- if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
+ if (!*cur_qp || (qpn != (*cur_qp)->trans_qp.base.mqp.qpn)) {
/* We do not have to take the QP table lock here,
* because CQs will be locked while QPs are removed
* from the table.
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 4186884c66e1..2479da8620ca 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -13,6 +13,7 @@
#include <rdma/uverbs_std_types.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
+#include <rdma/ib_ucaps.h>
#include "mlx5_ib.h"
#include "devx.h"
#include "qp.h"
@@ -122,7 +123,27 @@ devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
return to_mucontext(ib_uverbs_get_ucontext(attrs));
}
-int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
+static int set_uctx_ucaps(struct mlx5_ib_dev *dev, u64 req_ucaps, u32 *cap)
+{
+ if (UCAP_ENABLED(req_ucaps, RDMA_UCAP_MLX5_CTRL_LOCAL)) {
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL)
+ *cap |= MLX5_UCTX_CAP_RDMA_CTRL;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ if (UCAP_ENABLED(req_ucaps, RDMA_UCAP_MLX5_CTRL_OTHER_VHCA)) {
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
+ MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA)
+ *cap |= MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user, u64 req_ucaps)
{
u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
@@ -136,14 +157,22 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
return -EINVAL;
uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
- if (is_user && capable(CAP_NET_RAW) &&
- (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
+ if (is_user &&
+ (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX) &&
+ capable(CAP_NET_RAW))
cap |= MLX5_UCTX_CAP_RAW_TX;
- if (is_user && capable(CAP_SYS_RAWIO) &&
+ if (is_user &&
(MLX5_CAP_GEN(dev->mdev, uctx_cap) &
- MLX5_UCTX_CAP_INTERNAL_DEV_RES))
+ MLX5_UCTX_CAP_INTERNAL_DEV_RES) &&
+ capable(CAP_SYS_RAWIO))
cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
+ if (req_ucaps) {
+ err = set_uctx_ucaps(dev, req_ucaps, &cap);
+ if (err)
+ return err;
+ }
+
MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
MLX5_SET(uctx, uctx, cap, cap);
@@ -2573,7 +2602,7 @@ int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
struct mlx5_devx_event_table *table = &dev->devx_event_table;
int uid;
- uid = mlx5_ib_devx_create(dev, false);
+ uid = mlx5_ib_devx_create(dev, false, 0);
if (uid > 0) {
dev->devx_whitelist_uid = uid;
xa_init(&table->event_xa);
diff --git a/drivers/infiniband/hw/mlx5/devx.h b/drivers/infiniband/hw/mlx5/devx.h
index 1344bf4c9d21..ee9e7d3af93f 100644
--- a/drivers/infiniband/hw/mlx5/devx.h
+++ b/drivers/infiniband/hw/mlx5/devx.h
@@ -24,13 +24,14 @@ struct devx_obj {
struct list_head event_sub; /* holds devx_event_subscription entries */
};
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
-int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user, u64 req_ucaps);
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
int mlx5_ib_devx_init(struct mlx5_ib_dev *dev);
void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev);
void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile);
#else
-static inline int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
+static inline int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user,
+ u64 req_ucaps)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 162814ae8cb4..251246c73b33 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -12,6 +12,7 @@
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/ib_hdrs.h>
#include <rdma/ib_umem.h>
+#include <rdma/ib_ucaps.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/fs_helpers.h>
@@ -32,6 +33,11 @@ enum {
MATCH_CRITERIA_ENABLE_MISC2_BIT
};
+
+struct mlx5_per_qp_opfc {
+ struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX];
+};
+
#define HEADER_IS_ZERO(match_criteria, headers) \
!(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
@@ -678,7 +684,7 @@ enum flow_table_type {
#define MLX5_FS_MAX_TYPES 6
#define MLX5_FS_MAX_ENTRIES BIT(16)
-static bool mlx5_ib_shared_ft_allowed(struct ib_device *device)
+static bool __maybe_unused mlx5_ib_shared_ft_allowed(struct ib_device *device)
{
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -690,7 +696,7 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *prio,
int priority,
int num_entries, int num_groups,
- u32 flags)
+ u32 flags, u16 vport)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
@@ -698,6 +704,7 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev,
ft_attr.prio = priority;
ft_attr.max_fte = num_entries;
ft_attr.flags = flags;
+ ft_attr.vport = vport;
ft_attr.autogroup.max_num_groups = num_groups;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
@@ -792,18 +799,25 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
ft = prio->flow_table;
if (!ft)
return _get_prio(dev, ns, prio, priority, max_table_size,
- num_groups, flags);
+ num_groups, flags, 0);
return prio;
}
enum {
+ RDMA_RX_ECN_OPCOUNTER_PER_QP_PRIO,
+ RDMA_RX_CNP_OPCOUNTER_PER_QP_PRIO,
+ RDMA_RX_PKTS_BYTES_OPCOUNTER_PER_QP_PRIO,
RDMA_RX_ECN_OPCOUNTER_PRIO,
RDMA_RX_CNP_OPCOUNTER_PRIO,
+ RDMA_RX_PKTS_BYTES_OPCOUNTER_PRIO,
};
enum {
+ RDMA_TX_CNP_OPCOUNTER_PER_QP_PRIO,
+ RDMA_TX_PKTS_BYTES_OPCOUNTER_PER_QP_PRIO,
RDMA_TX_CNP_OPCOUNTER_PRIO,
+ RDMA_TX_PKTS_BYTES_OPCOUNTER_PRIO,
};
static int set_vhca_port_spec(struct mlx5_ib_dev *dev, u32 port_num,
@@ -867,6 +881,344 @@ static int set_cnp_spec(struct mlx5_ib_dev *dev, u32 port_num,
return 0;
}
+/* Returns the prio we should use for the given optional counter type,
+ * whereas for bytes type we use the packet type, since they share the same
+ * resources.
+ */
+static struct mlx5_ib_flow_prio *get_opfc_prio(struct mlx5_ib_dev *dev,
+ u32 type)
+{
+ u32 prio_type;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES:
+ prio_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES:
+ prio_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP:
+ prio_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP:
+ prio_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP;
+ break;
+ default:
+ prio_type = type;
+ }
+
+ return &dev->flow_db->opfcs[prio_type];
+}
+
+static void put_per_qp_prio(struct mlx5_ib_dev *dev,
+ enum mlx5_ib_optional_counter_type type)
+{
+ enum mlx5_ib_optional_counter_type per_qp_type;
+ struct mlx5_ib_flow_prio *prio;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS:
+ per_qp_type = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS:
+ per_qp_type = MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS:
+ per_qp_type = MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS:
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES:
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS:
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES:
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP;
+ break;
+ default:
+ return;
+ }
+
+ prio = get_opfc_prio(dev, per_qp_type);
+ put_flow_table(dev, prio, true);
+}
+
+static int get_per_qp_prio(struct mlx5_ib_dev *dev,
+ enum mlx5_ib_optional_counter_type type)
+{
+ enum mlx5_ib_optional_counter_type per_qp_type;
+ enum mlx5_flow_namespace_type fn_type;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_ib_flow_prio *prio;
+ int priority;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_ECN_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_CNP_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS;
+ priority = RDMA_TX_CNP_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS;
+ priority = RDMA_TX_PKTS_BYTES_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS;
+ priority = RDMA_TX_PKTS_BYTES_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_PKTS_BYTES_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES:
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_PKTS_BYTES_OPCOUNTER_PER_QP_PRIO;
+ per_qp_type = MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
+ if (!ns)
+ return -EOPNOTSUPP;
+
+ prio = get_opfc_prio(dev, per_qp_type);
+ if (prio->flow_table)
+ return 0;
+
+ prio = _get_prio(dev, ns, prio, priority, MLX5_FS_MAX_POOL_SIZE, 1, 0, 0);
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+
+ prio->refcount = 1;
+
+ return 0;
+}
+
+static struct mlx5_per_qp_opfc *
+get_per_qp_opfc(struct mlx5_rdma_counter *mcounter, u32 qp_num, bool *new)
+{
+ struct mlx5_per_qp_opfc *per_qp_opfc;
+
+ *new = false;
+
+ per_qp_opfc = xa_load(&mcounter->qpn_opfc_xa, qp_num);
+ if (per_qp_opfc)
+ return per_qp_opfc;
+ per_qp_opfc = kzalloc(sizeof(*per_qp_opfc), GFP_KERNEL);
+
+ if (!per_qp_opfc)
+ return NULL;
+
+ *new = true;
+ return per_qp_opfc;
+}
+
+static int add_op_fc_rules(struct mlx5_ib_dev *dev,
+ struct mlx5_rdma_counter *mcounter,
+ struct mlx5_per_qp_opfc *per_qp_opfc,
+ struct mlx5_ib_flow_prio *prio,
+ enum mlx5_ib_optional_counter_type type,
+ u32 qp_num, u32 port_num)
+{
+ struct mlx5_ib_op_fc *opfc = &per_qp_opfc->opfcs[type], *in_use_opfc;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_destination dst;
+ struct mlx5_flow_spec *spec;
+ int i, err, spec_num;
+ bool is_tx;
+
+ if (opfc->fc)
+ return -EEXIST;
+
+ if (mlx5r_is_opfc_shared_and_in_use(per_qp_opfc->opfcs, type,
+ &in_use_opfc)) {
+ opfc->fc = in_use_opfc->fc;
+ opfc->rule[0] = in_use_opfc->rule[0];
+ return 0;
+ }
+
+ opfc->fc = mcounter->fc[type];
+
+ spec = kcalloc(MAX_OPFC_RULES, sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto null_fc;
+ }
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP:
+ if (set_ecn_ce_spec(dev, port_num, &spec[0],
+ MLX5_FS_IPV4_VERSION) ||
+ set_ecn_ce_spec(dev, port_num, &spec[1],
+ MLX5_FS_IPV6_VERSION)) {
+ err = -EOPNOTSUPP;
+ goto free_spec;
+ }
+ spec_num = 2;
+ is_tx = false;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec[1].match_criteria,
+ misc_parameters.bth_dst_qp);
+ MLX5_SET(fte_match_param, spec[1].match_value,
+ misc_parameters.bth_dst_qp, qp_num);
+ spec[1].match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+ break;
+ case MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS_PER_QP:
+ if (!MLX5_CAP_FLOWTABLE(
+ dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode) ||
+ set_cnp_spec(dev, port_num, &spec[0])) {
+ err = -EOPNOTSUPP;
+ goto free_spec;
+ }
+ spec_num = 1;
+ is_tx = false;
+ break;
+ case MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS_PER_QP:
+ if (!MLX5_CAP_FLOWTABLE(
+ dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode) ||
+ set_cnp_spec(dev, port_num, &spec[0])) {
+ err = -EOPNOTSUPP;
+ goto free_spec;
+ }
+ spec_num = 1;
+ is_tx = true;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP:
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP:
+ spec_num = 1;
+ is_tx = true;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP:
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP:
+ spec_num = 1;
+ is_tx = false;
+ break;
+ default:
+ err = -EINVAL;
+ goto free_spec;
+ }
+
+ if (is_tx) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.source_sqn);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters.source_sqn, qp_num);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.bth_dst_qp);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters.bth_dst_qp, qp_num);
+ }
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dst.counter = opfc->fc;
+
+ flow_act.action =
+ MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ for (i = 0; i < spec_num; i++) {
+ opfc->rule[i] = mlx5_add_flow_rules(prio->flow_table, &spec[i],
+ &flow_act, &dst, 1);
+ if (IS_ERR(opfc->rule[i])) {
+ err = PTR_ERR(opfc->rule[i]);
+ goto del_rules;
+ }
+ }
+ prio->refcount += spec_num;
+
+ err = xa_err(xa_store(&mcounter->qpn_opfc_xa, qp_num, per_qp_opfc,
+ GFP_KERNEL));
+ if (err)
+ goto del_rules;
+
+ kfree(spec);
+
+ return 0;
+
+del_rules:
+ while (i--)
+ mlx5_del_flow_rules(opfc->rule[i]);
+ put_flow_table(dev, prio, false);
+free_spec:
+ kfree(spec);
+null_fc:
+ opfc->fc = NULL;
+ return err;
+}
+
+static bool is_fc_shared_and_in_use(struct mlx5_rdma_counter *mcounter,
+ u32 type, struct mlx5_fc **fc)
+{
+ u32 shared_fc_type;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP;
+ break;
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP:
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP;
+ break;
+ default:
+ return false;
+ }
+
+ *fc = mcounter->fc[shared_fc_type];
+ if (!(*fc))
+ return false;
+
+ return true;
+}
+
+void mlx5r_fs_destroy_fcs(struct mlx5_ib_dev *dev,
+ struct rdma_counter *counter)
+{
+ struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
+ struct mlx5_fc *in_use_fc;
+ int i;
+
+ for (i = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
+ i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP; i++) {
+ if (!mcounter->fc[i])
+ continue;
+
+ if (is_fc_shared_and_in_use(mcounter, i, &in_use_fc)) {
+ mcounter->fc[i] = NULL;
+ continue;
+ }
+
+ mlx5_fc_destroy(dev->mdev, mcounter->fc[i]);
+ mcounter->fc[i] = NULL;
+ }
+}
+
int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
struct mlx5_ib_op_fc *opfc,
enum mlx5_ib_optional_counter_type type)
@@ -921,6 +1273,20 @@ int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
priority = RDMA_TX_CNP_OPCOUNTER_PRIO;
break;
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS:
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES:
+ spec_num = 1;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS;
+ priority = RDMA_TX_PKTS_BYTES_OPCOUNTER_PRIO;
+ break;
+
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS:
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES:
+ spec_num = 1;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_PKTS_BYTES_OPCOUNTER_PRIO;
+ break;
+
default:
err = -EOPNOTSUPP;
goto free;
@@ -932,13 +1298,17 @@ int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
goto free;
}
- prio = &dev->flow_db->opfcs[type];
+ prio = get_opfc_prio(dev, type);
if (!prio->flow_table) {
+ err = get_per_qp_prio(dev, type);
+ if (err)
+ goto free;
+
prio = _get_prio(dev, ns, prio, priority,
- dev->num_ports * MAX_OPFC_RULES, 1, 0);
+ dev->num_ports * MAX_OPFC_RULES, 1, 0, 0);
if (IS_ERR(prio)) {
err = PTR_ERR(prio);
- goto free;
+ goto put_prio;
}
}
@@ -965,6 +1335,8 @@ del_rules:
for (i -= 1; i >= 0; i--)
mlx5_del_flow_rules(opfc->rule[i]);
put_flow_table(dev, prio, false);
+put_prio:
+ put_per_qp_prio(dev, type);
free:
kfree(spec);
return err;
@@ -974,12 +1346,115 @@ void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
struct mlx5_ib_op_fc *opfc,
enum mlx5_ib_optional_counter_type type)
{
+ struct mlx5_ib_flow_prio *prio;
int i;
+ prio = get_opfc_prio(dev, type);
+
for (i = 0; i < MAX_OPFC_RULES && opfc->rule[i]; i++) {
mlx5_del_flow_rules(opfc->rule[i]);
- put_flow_table(dev, &dev->flow_db->opfcs[type], true);
+ put_flow_table(dev, prio, true);
}
+
+ put_per_qp_prio(dev, type);
+}
+
+void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct rdma_counter *counter)
+{
+ struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
+ struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ struct mlx5_per_qp_opfc *per_qp_opfc;
+ struct mlx5_ib_op_fc *in_use_opfc;
+ struct mlx5_ib_flow_prio *prio;
+ int i, j;
+
+ per_qp_opfc = xa_load(&mcounter->qpn_opfc_xa, qp->qp_num);
+ if (!per_qp_opfc)
+ return;
+
+ for (i = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
+ i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP; i++) {
+ if (!per_qp_opfc->opfcs[i].fc)
+ continue;
+
+ if (mlx5r_is_opfc_shared_and_in_use(per_qp_opfc->opfcs, i,
+ &in_use_opfc)) {
+ per_qp_opfc->opfcs[i].fc = NULL;
+ continue;
+ }
+
+ for (j = 0; j < MAX_OPFC_RULES; j++) {
+ if (!per_qp_opfc->opfcs[i].rule[j])
+ continue;
+ mlx5_del_flow_rules(per_qp_opfc->opfcs[i].rule[j]);
+ prio = get_opfc_prio(dev, i);
+ put_flow_table(dev, prio, true);
+ }
+ per_qp_opfc->opfcs[i].fc = NULL;
+ }
+
+ kfree(per_qp_opfc);
+ xa_erase(&mcounter->qpn_opfc_xa, qp->qp_num);
+}
+
+int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
+ u32 port)
+{
+ struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_per_qp_opfc *per_qp_opfc;
+ struct mlx5_ib_flow_prio *prio;
+ struct mlx5_ib_counters *cnts;
+ struct mlx5_ib_op_fc *opfc;
+ struct mlx5_fc *in_use_fc;
+ int i, err, per_qp_type;
+ bool new;
+
+ if (!counter->mode.bind_opcnt)
+ return 0;
+
+ cnts = &dev->port[port - 1].cnts;
+
+ for (i = 0; i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES; i++) {
+ opfc = &cnts->opfcs[i];
+ if (!opfc->fc)
+ continue;
+
+ per_qp_type = i + MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
+ prio = get_opfc_prio(dev, per_qp_type);
+ WARN_ON(!prio->flow_table);
+
+ if (is_fc_shared_and_in_use(mcounter, per_qp_type, &in_use_fc))
+ mcounter->fc[per_qp_type] = in_use_fc;
+
+ if (!mcounter->fc[per_qp_type]) {
+ mcounter->fc[per_qp_type] = mlx5_fc_create(dev->mdev,
+ false);
+ if (IS_ERR(mcounter->fc[per_qp_type]))
+ return PTR_ERR(mcounter->fc[per_qp_type]);
+ }
+
+ per_qp_opfc = get_per_qp_opfc(mcounter, qp->qp_num, &new);
+ if (!per_qp_opfc) {
+ err = -ENOMEM;
+ goto free_fc;
+ }
+ err = add_op_fc_rules(dev, mcounter, per_qp_opfc, prio,
+ per_qp_type, qp->qp_num, port);
+ if (err)
+ goto del_rules;
+ }
+
+ return 0;
+
+del_rules:
+ mlx5r_fs_unbind_op_fc(qp, counter);
+ if (new)
+ kfree(per_qp_opfc);
+free_fc:
+ if (xa_empty(&mcounter->qpn_opfc_xa))
+ mlx5r_fs_destroy_fcs(dev, counter);
+ return err;
}
static void set_underlay_qp(struct mlx5_ib_dev *dev,
@@ -1413,17 +1888,51 @@ free_ucmd:
return ERR_PTR(err);
}
+static int mlx5_ib_fill_transport_ns_info(struct mlx5_ib_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ u32 *flags, u16 *vport_idx,
+ u16 *vport,
+ struct mlx5_core_dev **ft_mdev,
+ u32 ib_port)
+{
+ struct mlx5_core_dev *esw_mdev;
+
+ if (!is_mdev_switchdev_mode(dev->mdev))
+ return 0;
+
+ if (!MLX5_CAP_ADV_RDMA(dev->mdev, rdma_transport_manager))
+ return -EOPNOTSUPP;
+
+ if (!dev->port[ib_port - 1].rep)
+ return -EINVAL;
+
+ esw_mdev = mlx5_eswitch_get_core_dev(dev->port[ib_port - 1].rep->esw);
+ if (esw_mdev != dev->mdev)
+ return -EOPNOTSUPP;
+
+ *flags |= MLX5_FLOW_TABLE_OTHER_VPORT;
+ *ft_mdev = esw_mdev;
+ *vport = dev->port[ib_port - 1].rep->vport;
+ *vport_idx = dev->port[ib_port - 1].rep->vport_index;
+
+ return 0;
+}
+
static struct mlx5_ib_flow_prio *
_get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
enum mlx5_flow_namespace_type ns_type,
- bool mcast)
+ bool mcast, u32 ib_port)
{
+ struct mlx5_core_dev *ft_mdev = dev->mdev;
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_ib_flow_prio *prio = NULL;
int max_table_size = 0;
+ u16 vport_idx = 0;
bool esw_encap;
u32 flags = 0;
+ u16 vport = 0;
int priority;
+ int ret;
if (mcast)
priority = MLX5_IB_FLOW_MCAST_PRIO;
@@ -1471,13 +1980,38 @@ _get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, log_max_ft_size));
priority = user_priority;
break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
+ if (ib_port == 0 || user_priority > MLX5_RDMA_TRANSPORT_BYPASS_PRIO)
+ return ERR_PTR(-EINVAL);
+ ret = mlx5_ib_fill_transport_ns_info(dev, ns_type, &flags,
+ &vport_idx, &vport,
+ &ft_mdev, ib_port);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX)
+ max_table_size =
+ BIT(MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(
+ ft_mdev, log_max_ft_size));
+ else
+ max_table_size =
+ BIT(MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(
+ ft_mdev, log_max_ft_size));
+ priority = user_priority;
+ break;
default:
break;
}
max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
- ns = mlx5_get_flow_namespace(dev->mdev, ns_type);
+ if (ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX)
+ ns = mlx5_get_flow_vport_namespace(ft_mdev, ns_type, vport_idx);
+ else
+ ns = mlx5_get_flow_namespace(ft_mdev, ns_type);
+
if (!ns)
return ERR_PTR(-EOPNOTSUPP);
@@ -1497,6 +2031,12 @@ _get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
case MLX5_FLOW_NAMESPACE_RDMA_TX:
prio = &dev->flow_db->rdma_tx[priority];
break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
+ prio = &dev->flow_db->rdma_transport_rx[ib_port - 1];
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
+ prio = &dev->flow_db->rdma_transport_tx[ib_port - 1];
+ break;
default: return ERR_PTR(-EINVAL);
}
@@ -1507,7 +2047,7 @@ _get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
return prio;
return _get_prio(dev, ns, prio, priority, max_table_size,
- MLX5_FS_MAX_TYPES, flags);
+ MLX5_FS_MAX_TYPES, flags, vport);
}
static struct mlx5_ib_flow_handler *
@@ -1626,7 +2166,8 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
mutex_lock(&dev->flow_db->lock);
ft_prio = _get_flow_table(dev, fs_matcher->priority,
- fs_matcher->ns_type, mcast);
+ fs_matcher->ns_type, mcast,
+ fs_matcher->ib_port);
if (IS_ERR(ft_prio)) {
err = PTR_ERR(ft_prio);
goto unlock;
@@ -1742,6 +2283,12 @@ mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX:
*namespace = MLX5_FLOW_NAMESPACE_RDMA_TX;
break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TRANSPORT_RX:
+ *namespace = MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TRANSPORT_TX:
+ *namespace = MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX;
+ break;
default:
return -EINVAL;
}
@@ -1831,7 +2378,8 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
return -EINVAL;
/* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
- if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
+ if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX) &&
((!dest_devx && !dest_qp) || (dest_devx && dest_qp)))
return -EINVAL;
@@ -1848,7 +2396,8 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
return -EINVAL;
/* Allow only flow table as dest when inserting to FDB or RDMA_RX */
if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS ||
- fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX) &&
*dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
return -EINVAL;
} else if (dest_qp) {
@@ -1869,14 +2418,16 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
*dest_id = mqp->raw_packet_qp.rq.tirn;
*dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
} else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
- fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) &&
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX) &&
!(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) {
*dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
}
if (*dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
(fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
- fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX))
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX))
return -EINVAL;
return 0;
@@ -2353,6 +2904,15 @@ static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
return 0;
}
+static bool verify_context_caps(struct mlx5_ib_dev *dev, u64 enabled_caps)
+{
+ if (is_mdev_switchdev_mode(dev->mdev))
+ return UCAP_ENABLED(enabled_caps,
+ RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
+
+ return UCAP_ENABLED(enabled_caps, RDMA_UCAP_MLX5_CTRL_LOCAL);
+}
+
static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
struct uverbs_attr_bundle *attrs)
{
@@ -2401,6 +2961,26 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
goto end;
}
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_IB_PORT)) {
+ err = uverbs_copy_from(&obj->ib_port, attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_IB_PORT);
+ if (err)
+ goto end;
+ if (!rdma_is_port_valid(&dev->ib_dev, obj->ib_port)) {
+ err = -EINVAL;
+ goto end;
+ }
+ if (obj->ns_type != MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX &&
+ obj->ns_type != MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX) {
+ err = -EINVAL;
+ goto end;
+ }
+ if (!verify_context_caps(dev, uobj->context->enabled_caps)) {
+ err = -EOPNOTSUPP;
+ goto end;
+ }
+ }
+
uobj->object = obj;
obj->mdev = dev->mdev;
atomic_set(&obj->usecnt, 0);
@@ -2448,7 +3028,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
mutex_lock(&dev->flow_db->lock);
- ft_prio = _get_flow_table(dev, priority, ns_type, 0);
+ ft_prio = _get_flow_table(dev, priority, ns_type, 0, 0);
if (IS_ERR(ft_prio)) {
err = PTR_ERR(ft_prio);
goto free_obj;
@@ -2834,7 +3414,10 @@ DECLARE_UVERBS_NAMED_METHOD(
UA_OPTIONAL),
UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE,
enum mlx5_ib_uapi_flow_table_type,
- UA_OPTIONAL));
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_IB_PORT,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
@@ -2878,6 +3461,7 @@ DECLARE_UVERBS_NAMED_OBJECT(
&UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY));
const struct uapi_definition mlx5_ib_flow_defs[] = {
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
MLX5_IB_OBJECT_FLOW_MATCHER),
UAPI_DEF_CHAIN_OBJ_TREE(
@@ -2888,6 +3472,7 @@ const struct uapi_definition mlx5_ib_flow_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
MLX5_IB_OBJECT_STEERING_ANCHOR,
UAPI_DEF_IS_OBJ_SUPPORTED(mlx5_ib_shared_ft_allowed)),
+#endif
{},
};
@@ -2904,8 +3489,26 @@ int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
if (!dev->flow_db)
return -ENOMEM;
+ dev->flow_db->rdma_transport_rx = kcalloc(dev->num_ports,
+ sizeof(struct mlx5_ib_flow_prio),
+ GFP_KERNEL);
+ if (!dev->flow_db->rdma_transport_rx)
+ goto free_flow_db;
+
+ dev->flow_db->rdma_transport_tx = kcalloc(dev->num_ports,
+ sizeof(struct mlx5_ib_flow_prio),
+ GFP_KERNEL);
+ if (!dev->flow_db->rdma_transport_tx)
+ goto free_rdma_transport_rx;
+
mutex_init(&dev->flow_db->lock);
ib_set_device_ops(&dev->ib_dev, &flow_ops);
return 0;
+
+free_rdma_transport_rx:
+ kfree(dev->flow_db->rdma_transport_rx);
+free_flow_db:
+ kfree(dev->flow_db);
+ return -ENOMEM;
}
diff --git a/drivers/infiniband/hw/mlx5/fs.h b/drivers/infiniband/hw/mlx5/fs.h
index b9734904f5f0..2ebe86e5be10 100644
--- a/drivers/infiniband/hw/mlx5/fs.h
+++ b/drivers/infiniband/hw/mlx5/fs.h
@@ -8,23 +8,8 @@
#include "mlx5_ib.h"
-#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_fs_init(struct mlx5_ib_dev *dev);
void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev);
-#else
-static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
-{
- dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
-
- if (!dev->flow_db)
- return -ENOMEM;
-
- mutex_init(&dev->flow_db->lock);
- return 0;
-}
-
-inline void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev) {}
-#endif
static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev)
{
@@ -40,6 +25,8 @@ static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev)
* is a safe assumption that all references are gone.
*/
mlx5_ib_fs_cleanup_anchor(dev);
+ kfree(dev->flow_db->rdma_transport_tx);
+ kfree(dev->flow_db->rdma_transport_rx);
kfree(dev->flow_db);
}
#endif /* _MLX5_IB_FS_H */
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 81849eb671a1..d07cacaa0abd 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -47,6 +47,7 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/ib_ucaps.h>
#include "macsec.h"
#include "data_direct.h"
@@ -1934,6 +1935,12 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
return 0;
}
+static bool uctx_rdma_ctrl_is_enabled(u64 enabled_caps)
+{
+ return UCAP_ENABLED(enabled_caps, RDMA_UCAP_MLX5_CTRL_LOCAL) ||
+ UCAP_ENABLED(enabled_caps, RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
+}
+
static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
struct ib_udata *udata)
{
@@ -1976,10 +1983,17 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
return -EINVAL;
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
- err = mlx5_ib_devx_create(dev, true);
+ err = mlx5_ib_devx_create(dev, true, uctx->enabled_caps);
if (err < 0)
goto out_ctx;
context->devx_uid = err;
+
+ if (uctx_rdma_ctrl_is_enabled(uctx->enabled_caps)) {
+ err = mlx5_cmd_add_privileged_uid(dev->mdev,
+ context->devx_uid);
+ if (err)
+ goto out_devx;
+ }
}
lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
@@ -1994,7 +2008,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
/* updates req->total_num_bfregs */
err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
if (err)
- goto out_devx;
+ goto out_ucap;
mutex_init(&bfregi->lock);
bfregi->lib_uar_4k = lib_uar_4k;
@@ -2002,7 +2016,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
GFP_KERNEL);
if (!bfregi->count) {
err = -ENOMEM;
- goto out_devx;
+ goto out_ucap;
}
bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
@@ -2066,6 +2080,11 @@ out_sys_pages:
out_count:
kfree(bfregi->count);
+out_ucap:
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX &&
+ uctx_rdma_ctrl_is_enabled(uctx->enabled_caps))
+ mlx5_cmd_remove_privileged_uid(dev->mdev, context->devx_uid);
+
out_devx:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
mlx5_ib_devx_destroy(dev, context->devx_uid);
@@ -2110,8 +2129,12 @@ static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
kfree(bfregi->sys_pages);
kfree(bfregi->count);
- if (context->devx_uid)
+ if (context->devx_uid) {
+ if (uctx_rdma_ctrl_is_enabled(ibcontext->enabled_caps))
+ mlx5_cmd_remove_privileged_uid(dev->mdev,
+ context->devx_uid);
mlx5_ib_devx_destroy(dev, context->devx_uid);
+ }
}
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
@@ -4201,8 +4224,47 @@ static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
return (var_table->bitmap) ? 0 : -ENOMEM;
}
+static void mlx5_ib_cleanup_ucaps(struct mlx5_ib_dev *dev)
+{
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL)
+ ib_remove_ucap(RDMA_UCAP_MLX5_CTRL_LOCAL);
+
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
+ MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA)
+ ib_remove_ucap(RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
+}
+
+static int mlx5_ib_init_ucaps(struct mlx5_ib_dev *dev)
+{
+ int ret;
+
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL) {
+ ret = ib_create_ucap(RDMA_UCAP_MLX5_CTRL_LOCAL);
+ if (ret)
+ return ret;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
+ MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA) {
+ ret = ib_create_ucap(RDMA_UCAP_MLX5_CTRL_OTHER_VHCA);
+ if (ret)
+ goto remove_local;
+ }
+
+ return 0;
+
+remove_local:
+ if (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RDMA_CTRL)
+ ib_remove_ucap(RDMA_UCAP_MLX5_CTRL_LOCAL);
+ return ret;
+}
+
static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
{
+ if (MLX5_CAP_GEN_2_64(dev->mdev, general_obj_types_127_64) &
+ MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL)
+ mlx5_ib_cleanup_ucaps(dev);
+
bitmap_free(dev->var_table.bitmap);
}
@@ -4253,6 +4315,13 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN_2_64(dev->mdev, general_obj_types_127_64) &
+ MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL) {
+ err = mlx5_ib_init_ucaps(dev);
+ if (err)
+ return err;
+ }
+
dev->ib_dev.use_cq_dim = true;
return 0;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 974a45c92fbb..ace2df3e1d9f 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -276,6 +276,7 @@ struct mlx5_ib_flow_matcher {
struct mlx5_core_dev *mdev;
atomic_t usecnt;
u8 match_criteria_enable;
+ u32 ib_port;
};
struct mlx5_ib_steering_anchor {
@@ -293,6 +294,18 @@ enum mlx5_ib_optional_counter_type {
MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS,
MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS,
MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS,
+ MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS,
+ MLX5_IB_OPCOUNTER_RDMA_TX_BYTES,
+ MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS,
+ MLX5_IB_OPCOUNTER_RDMA_RX_BYTES,
+
+ MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP,
+ MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS_PER_QP,
+ MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS_PER_QP,
+ MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP,
+ MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP,
+ MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP,
+ MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP,
MLX5_IB_OPCOUNTER_MAX,
};
@@ -307,6 +320,8 @@ struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX];
struct mlx5_flow_table *lag_demux_ft;
+ struct mlx5_ib_flow_prio *rdma_transport_rx;
+ struct mlx5_ib_flow_prio *rdma_transport_tx;
/* Protect flow steering bypass flow tables
* when add/del flow rules.
* only single add/removal of flow steering rule could be done
@@ -883,6 +898,14 @@ void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
struct mlx5_ib_op_fc *opfc,
enum mlx5_ib_optional_counter_type type);
+int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
+ u32 port);
+
+void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct rdma_counter *counter);
+
+void mlx5r_fs_destroy_fcs(struct mlx5_ib_dev *dev,
+ struct rdma_counter *counter);
+
struct mlx5_ib_multiport_info;
struct mlx5_ib_multiport {
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 753faa9ad06a..b7c8c926c578 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -56,7 +56,7 @@ static void
create_mkey_callback(int status, struct mlx5_async_work *context);
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u64 iova, int access_flags,
- unsigned int page_size, bool populate,
+ unsigned long page_size, bool populate,
int access_mode);
static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr);
@@ -718,8 +718,7 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
}
static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
- struct mlx5_cache_ent *ent,
- int access_flags)
+ struct mlx5_cache_ent *ent)
{
struct mlx5_ib_mr *mr;
int err;
@@ -794,7 +793,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
if (!ent)
return ERR_PTR(-EOPNOTSUPP);
- return _mlx5_mr_cache_alloc(dev, ent, access_flags);
+ return _mlx5_mr_cache_alloc(dev, ent);
}
static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
@@ -919,6 +918,25 @@ mkeys_err:
return ERR_PTR(ret);
}
+static void mlx5r_destroy_cache_entries(struct mlx5_ib_dev *dev)
+{
+ struct rb_root *root = &dev->cache.rb_root;
+ struct mlx5_cache_ent *ent;
+ struct rb_node *node;
+
+ mutex_lock(&dev->cache.rb_lock);
+ node = rb_first(root);
+ while (node) {
+ ent = rb_entry(node, struct mlx5_cache_ent, node);
+ node = rb_next(node);
+ clean_keys(dev, ent);
+ rb_erase(&ent->node, root);
+ mlx5r_mkeys_uninit(ent);
+ kfree(ent);
+ }
+ mutex_unlock(&dev->cache.rb_lock);
+}
+
int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
{
struct mlx5_mkey_cache *cache = &dev->cache;
@@ -970,6 +988,8 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
err:
mutex_unlock(&cache->rb_lock);
mlx5_mkey_cache_debugfs_cleanup(dev);
+ mlx5r_destroy_cache_entries(dev);
+ destroy_workqueue(cache->wq);
mlx5_ib_warn(dev, "failed to create mkey cache entry\n");
return ret;
}
@@ -1003,17 +1023,7 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
/* At this point all entries are disabled and have no concurrent work. */
- mutex_lock(&dev->cache.rb_lock);
- node = rb_first(root);
- while (node) {
- ent = rb_entry(node, struct mlx5_cache_ent, node);
- node = rb_next(node);
- clean_keys(dev, ent);
- rb_erase(&ent->node, root);
- mlx5r_mkeys_uninit(ent);
- kfree(ent);
- }
- mutex_unlock(&dev->cache.rb_lock);
+ mlx5r_destroy_cache_entries(dev);
destroy_workqueue(dev->cache.wq);
del_timer_sync(&dev->delay_timer);
@@ -1115,7 +1125,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
struct mlx5r_cache_rb_key rb_key = {};
struct mlx5_cache_ent *ent;
struct mlx5_ib_mr *mr;
- unsigned int page_size;
+ unsigned long page_size;
if (umem->is_dmabuf)
page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
@@ -1144,7 +1154,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
return mr;
}
- mr = _mlx5_mr_cache_alloc(dev, ent, access_flags);
+ mr = _mlx5_mr_cache_alloc(dev, ent);
if (IS_ERR(mr))
return mr;
@@ -1219,7 +1229,7 @@ err_1:
*/
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u64 iova, int access_flags,
- unsigned int page_size, bool populate,
+ unsigned long page_size, bool populate,
int access_mode)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
@@ -1425,7 +1435,7 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
mr = alloc_cacheable_mr(pd, umem, iova, access_flags,
MLX5_MKC_ACCESS_MODE_MTT);
} else {
- unsigned int page_size =
+ unsigned long page_size =
mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
mutex_lock(&dev->slow_path_mutex);
@@ -1957,7 +1967,6 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
if (mr->mmkey.cache_ent) {
spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
- mr->mmkey.cache_ent->in_use--;
goto end;
}
@@ -2025,6 +2034,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
bool is_odp = is_odp_mr(mr);
bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
!to_ib_umem_dmabuf(mr->umem)->pinned;
+ bool from_cache = !!ent;
int ret = 0;
if (is_odp)
@@ -2037,6 +2047,8 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
ent = mr->mmkey.cache_ent;
/* upon storing to a clean temp entry - schedule its cleanup */
spin_lock_irq(&ent->mkeys_queue.lock);
+ if (from_cache)
+ ent->in_use--;
if (ent->is_tmp && !ent->tmp_cleanup_scheduled) {
mod_delayed_work(ent->dev->cache.wq, &ent->dwork,
msecs_to_jiffies(30 * 1000));
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index e77c9280c07e..86d8fa63bf69 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -309,9 +309,6 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
blk_start_idx = idx;
in_block = 1;
}
-
- /* Count page invalidations */
- invalidations += idx - blk_start_idx + 1;
} else {
u64 umr_offset = idx & umr_block_mask;
@@ -321,14 +318,19 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
in_block = 0;
+ /* Count page invalidations */
+ invalidations += idx - blk_start_idx + 1;
}
}
}
- if (in_block)
+ if (in_block) {
mlx5r_umr_update_xlt(mr, blk_start_idx,
idx - blk_start_idx + 1, 0,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
+ /* Count page invalidations */
+ invalidations += idx - blk_start_idx + 1;
+ }
mlx5_update_odp_stats_with_handled(mr, invalidations, invalidations);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 9f54aa90a35a..bcd43dc30e21 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -237,34 +237,6 @@ enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
return IB_LINK_LAYER_ETHERNET;
}
-int pvrdma_modify_device(struct ib_device *ibdev, int mask,
- struct ib_device_modify *props)
-{
- unsigned long flags;
-
- if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
- IB_DEVICE_MODIFY_NODE_DESC)) {
- dev_warn(&to_vdev(ibdev)->pdev->dev,
- "unsupported device modify mask %#x\n", mask);
- return -EOPNOTSUPP;
- }
-
- if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
- spin_lock_irqsave(&to_vdev(ibdev)->desc_lock, flags);
- memcpy(ibdev->node_desc, props->node_desc, 64);
- spin_unlock_irqrestore(&to_vdev(ibdev)->desc_lock, flags);
- }
-
- if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
- mutex_lock(&to_vdev(ibdev)->port_mutex);
- to_vdev(ibdev)->sys_image_guid =
- cpu_to_be64(props->sys_image_guid);
- mutex_unlock(&to_vdev(ibdev)->port_mutex);
- }
-
- return 0;
-}
-
/**
* pvrdma_modify_port - modify device port attributes
* @ibdev: the device to modify
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index 4b9edc03d73d..fd47b0b1df5c 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -356,8 +356,6 @@ int pvrdma_query_pkey(struct ib_device *ibdev, u32 port,
u16 index, u16 *pkey);
enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
u32 port);
-int pvrdma_modify_device(struct ib_device *ibdev, int mask,
- struct ib_device_modify *props);
int pvrdma_modify_port(struct ib_device *ibdev, u32 port,
int mask, struct ib_port_modify *props);
int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index e6203e26cc06..614009fb9632 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1107,9 +1107,8 @@ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
}
/* initialize timers needed for rc qp */
timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
- hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- qp->s_rnr_timer.function = rvt_rc_rnr_retry;
+ hrtimer_setup(&qp->s_rnr_timer, rvt_rc_rnr_retry, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Driver needs to set up it's private QP structure and do any
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 06b8dc5093f7..c180e7ebcfc5 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -4,8 +4,7 @@ config RDMA_RXE
depends on INET && PCI && INFINIBAND
depends on INFINIBAND_VIRT_DMA
select NET_UDP_TUNNEL
- select CRYPTO
- select CRYPTO_CRC32
+ select CRC32
help
This driver implements the InfiniBand RDMA transport over
the Linux network stack. It enables a system with a
diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile
index 5395a581f4bb..93134f1d1d0c 100644
--- a/drivers/infiniband/sw/rxe/Makefile
+++ b/drivers/infiniband/sw/rxe/Makefile
@@ -23,3 +23,5 @@ rdma_rxe-y := \
rxe_task.o \
rxe_net.o \
rxe_hw_counters.o
+
+rdma_rxe-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += rxe_odp.o
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 1ba4a0c8726a..b248c68bf9b1 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -31,17 +31,12 @@ void rxe_dealloc(struct ib_device *ib_dev)
WARN_ON(!RB_EMPTY_ROOT(&rxe->mcg_tree));
- if (rxe->tfm)
- crypto_free_shash(rxe->tfm);
-
mutex_destroy(&rxe->usdev_lock);
}
/* initialize rxe device parameters */
-static void rxe_init_device_param(struct rxe_dev *rxe)
+static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev)
{
- struct net_device *ndev;
-
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
rxe->attr.vendor_id = RXE_VENDOR_ID;
@@ -74,16 +69,39 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_pkeys = RXE_MAX_PKEYS;
rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
- ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
- if (!ndev)
- return;
+ if (ndev->addr_len) {
+ memcpy(rxe->raw_gid, ndev->dev_addr,
+ min_t(unsigned int, ndev->addr_len, ETH_ALEN));
+ } else {
+ /*
+ * This device does not have a HW address, but
+ * connection mangagement requires a unique gid.
+ */
+ eth_random_addr(rxe->raw_gid);
+ }
addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
- ndev->dev_addr);
-
- dev_put(ndev);
+ rxe->raw_gid);
rxe->max_ucontext = RXE_MAX_UCONTEXT;
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ rxe->attr.kernel_cap_flags |= IBK_ON_DEMAND_PAGING;
+
+ /* IB_ODP_SUPPORT_IMPLICIT is not supported right now. */
+ rxe->attr.odp_caps.general_caps |= IB_ODP_SUPPORT;
+
+ rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
+ rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_RECV;
+ rxe->attr.odp_caps.per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+ }
}
/* initialize port attributes */
@@ -115,18 +133,13 @@ static void rxe_init_port_param(struct rxe_port *port)
/* initialize port state, note IB convention that HCA ports are always
* numbered from 1
*/
-static void rxe_init_ports(struct rxe_dev *rxe)
+static void rxe_init_ports(struct rxe_dev *rxe, struct net_device *ndev)
{
struct rxe_port *port = &rxe->port;
- struct net_device *ndev;
rxe_init_port_param(port);
- ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
- if (!ndev)
- return;
addrconf_addr_eui48((unsigned char *)&port->port_guid,
- ndev->dev_addr);
- dev_put(ndev);
+ rxe->raw_gid);
spin_lock_init(&port->port_lock);
}
@@ -144,12 +157,12 @@ static void rxe_init_pools(struct rxe_dev *rxe)
}
/* initialize rxe device state */
-static void rxe_init(struct rxe_dev *rxe)
+static void rxe_init(struct rxe_dev *rxe, struct net_device *ndev)
{
/* init default device parameters */
- rxe_init_device_param(rxe);
+ rxe_init_device_param(rxe, ndev);
- rxe_init_ports(rxe);
+ rxe_init_ports(rxe, ndev);
rxe_init_pools(rxe);
/* init pending mmap list */
@@ -184,7 +197,7 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
struct net_device *ndev)
{
- rxe_init(rxe);
+ rxe_init(rxe, ndev);
rxe_set_mtu(rxe, mtu);
return rxe_register_device(rxe, ibdev_name, ndev);
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index fe7f97066732..ff8cd53f5f28 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -21,7 +21,6 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
-#include <crypto/hash.h>
#include "rxe_net.h"
#include "rxe_opcode.h"
@@ -100,43 +99,6 @@
#define rxe_info_mw(mw, fmt, ...) ibdev_info_ratelimited((mw)->ibmw.device, \
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
-/* responder states */
-enum resp_states {
- RESPST_NONE,
- RESPST_GET_REQ,
- RESPST_CHK_PSN,
- RESPST_CHK_OP_SEQ,
- RESPST_CHK_OP_VALID,
- RESPST_CHK_RESOURCE,
- RESPST_CHK_LENGTH,
- RESPST_CHK_RKEY,
- RESPST_EXECUTE,
- RESPST_READ_REPLY,
- RESPST_ATOMIC_REPLY,
- RESPST_ATOMIC_WRITE_REPLY,
- RESPST_PROCESS_FLUSH,
- RESPST_COMPLETE,
- RESPST_ACKNOWLEDGE,
- RESPST_CLEANUP,
- RESPST_DUPLICATE_REQUEST,
- RESPST_ERR_MALFORMED_WQE,
- RESPST_ERR_UNSUPPORTED_OPCODE,
- RESPST_ERR_MISALIGNED_ATOMIC,
- RESPST_ERR_PSN_OUT_OF_SEQ,
- RESPST_ERR_MISSING_OPCODE_FIRST,
- RESPST_ERR_MISSING_OPCODE_LAST_C,
- RESPST_ERR_MISSING_OPCODE_LAST_D1E,
- RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
- RESPST_ERR_RNR,
- RESPST_ERR_RKEY_VIOLATION,
- RESPST_ERR_INVALIDATE_RKEY,
- RESPST_ERR_LENGTH,
- RESPST_ERR_CQ_OVERFLOW,
- RESPST_ERROR,
- RESPST_DONE,
- RESPST_EXIT,
-};
-
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c
index fdf5f08cd8f1..76d760fbe7ea 100644
--- a/drivers/infiniband/sw/rxe/rxe_icrc.c
+++ b/drivers/infiniband/sw/rxe/rxe_icrc.c
@@ -10,28 +10,6 @@
#include "rxe_loc.h"
/**
- * rxe_icrc_init() - Initialize crypto function for computing crc32
- * @rxe: rdma_rxe device object
- *
- * Return: 0 on success else an error
- */
-int rxe_icrc_init(struct rxe_dev *rxe)
-{
- struct crypto_shash *tfm;
-
- tfm = crypto_alloc_shash("crc32", 0, 0);
- if (IS_ERR(tfm)) {
- rxe_dbg_dev(rxe, "failed to init crc32 algorithm err: %ld\n",
- PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-
- rxe->tfm = tfm;
-
- return 0;
-}
-
-/**
* rxe_crc32() - Compute cumulative crc32 for a contiguous segment
* @rxe: rdma_rxe device object
* @crc: starting crc32 value from previous segments
@@ -42,23 +20,7 @@ int rxe_icrc_init(struct rxe_dev *rxe)
*/
static __be32 rxe_crc32(struct rxe_dev *rxe, __be32 crc, void *next, size_t len)
{
- __be32 icrc;
- int err;
-
- SHASH_DESC_ON_STACK(shash, rxe->tfm);
-
- shash->tfm = rxe->tfm;
- *(__be32 *)shash_desc_ctx(shash) = crc;
- err = crypto_shash_update(shash, next, len);
- if (unlikely(err)) {
- rxe_dbg_dev(rxe, "failed crc calculation, err: %d\n", err);
- return (__force __be32)crc32_le((__force u32)crc, next, len);
- }
-
- icrc = *(__be32 *)shash_desc_ctx(shash);
- barrier_data(shash_desc_ctx(shash));
-
- return icrc;
+ return (__force __be32)crc32_le((__force u32)crc, next, len);
}
/**
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index ded46119151b..feb386d98d1d 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -58,6 +58,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
/* rxe_mr.c */
u8 rxe_get_next_key(u32 last_key);
+void rxe_mr_init(int access, struct rxe_mr *mr);
void rxe_mr_init_dma(int access, struct rxe_mr *mr);
int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
int access, struct rxe_mr *mr);
@@ -80,6 +81,9 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key);
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
void rxe_mr_cleanup(struct rxe_pool_elem *elem);
+/* defined in rxe_mr.c; used in rxe_mr.c and rxe_odp.c */
+extern spinlock_t atomic_ops_lock;
+
/* rxe_mw.c */
int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
int rxe_dealloc_mw(struct ib_mw *ibmw);
@@ -168,7 +172,6 @@ int rxe_sender(struct rxe_qp *qp);
int rxe_receiver(struct rxe_qp *qp);
/* rxe_icrc.c */
-int rxe_icrc_init(struct rxe_dev *rxe);
int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt);
void rxe_icrc_generate(struct sk_buff *skb, struct rxe_pkt_info *pkt);
@@ -181,4 +184,34 @@ static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
}
+/* rxe_odp.c */
+extern const struct mmu_interval_notifier_ops rxe_mn_ops;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
+ u64 iova, int access_flags, struct rxe_mr *mr);
+int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+ enum rxe_mr_copy_dir dir);
+int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+static inline int
+rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+ int access_flags, struct rxe_mr *mr)
+{
+ return -EOPNOTSUPP;
+}
+static inline int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
+ int length, enum rxe_mr_copy_dir dir)
+{
+ return -EOPNOTSUPP;
+}
+static inline int
+rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+}
+#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+
#endif /* RXE_LOC_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index da3dee520876..868d2f0b74e9 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -45,7 +45,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
}
}
-static void rxe_mr_init(int access, struct rxe_mr *mr)
+void rxe_mr_init(int access, struct rxe_mr *mr)
{
u32 key = mr->elem.index << 8 | rxe_get_next_key(-1);
@@ -323,7 +323,10 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
return err;
}
- return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
+ if (mr->umem->is_odp)
+ return rxe_odp_mr_copy(mr, iova, addr, length, dir);
+ else
+ return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
}
/* copy data in or out of a wqe, i.e. sg list
@@ -466,7 +469,7 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
}
/* Guarantee atomicity of atomic operations at the machine level. */
-static DEFINE_SPINLOCK(atomic_ops_lock);
+DEFINE_SPINLOCK(atomic_ops_lock);
int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
u64 compare, u64 swap_add, u64 *orig_val)
@@ -532,6 +535,10 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
struct page *page;
u64 *va;
+ /* ODP is not supported right now. WIP. */
+ if (mr->umem->is_odp)
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+
/* See IBA oA19-28 */
if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
rxe_dbg_mr(mr, "mr not in valid state\n");
diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c
new file mode 100644
index 000000000000..9f6e2bb2a269
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_odp.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2022-2023 Fujitsu Ltd. All rights reserved.
+ */
+
+#include <linux/hmm.h>
+
+#include <rdma/ib_umem_odp.h>
+
+#include "rxe.h"
+
+static bool rxe_ib_invalidate_range(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(mni, struct ib_umem_odp, notifier);
+ unsigned long start, end;
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ mutex_lock(&umem_odp->umem_mutex);
+ mmu_interval_set_seq(mni, cur_seq);
+
+ start = max_t(u64, ib_umem_start(umem_odp), range->start);
+ end = min_t(u64, ib_umem_end(umem_odp), range->end);
+
+ /* update umem_odp->dma_list */
+ ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
+
+ mutex_unlock(&umem_odp->umem_mutex);
+ return true;
+}
+
+const struct mmu_interval_notifier_ops rxe_mn_ops = {
+ .invalidate = rxe_ib_invalidate_range,
+};
+
+#define RXE_PAGEFAULT_DEFAULT 0
+#define RXE_PAGEFAULT_RDONLY BIT(0)
+#define RXE_PAGEFAULT_SNAPSHOT BIT(1)
+static int rxe_odp_do_pagefault_and_lock(struct rxe_mr *mr, u64 user_va, int bcnt, u32 flags)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ bool fault = !(flags & RXE_PAGEFAULT_SNAPSHOT);
+ u64 access_mask;
+ int np;
+
+ access_mask = ODP_READ_ALLOWED_BIT;
+ if (umem_odp->umem.writable && !(flags & RXE_PAGEFAULT_RDONLY))
+ access_mask |= ODP_WRITE_ALLOWED_BIT;
+
+ /*
+ * ib_umem_odp_map_dma_and_lock() locks umem_mutex on success.
+ * Callers must release the lock later to let invalidation handler
+ * do its work again.
+ */
+ np = ib_umem_odp_map_dma_and_lock(umem_odp, user_va, bcnt,
+ access_mask, fault);
+ return np;
+}
+
+static int rxe_odp_init_pages(struct rxe_mr *mr)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ int ret;
+
+ ret = rxe_odp_do_pagefault_and_lock(mr, mr->umem->address,
+ mr->umem->length,
+ RXE_PAGEFAULT_SNAPSHOT);
+
+ if (ret >= 0)
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
+ u64 iova, int access_flags, struct rxe_mr *mr)
+{
+ struct ib_umem_odp *umem_odp;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ return -EOPNOTSUPP;
+
+ rxe_mr_init(access_flags, mr);
+
+ if (!start && length == U64_MAX) {
+ if (iova != 0)
+ return -EINVAL;
+ if (!(rxe->attr.odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
+ return -EINVAL;
+
+ /* Never reach here, for implicit ODP is not implemented. */
+ }
+
+ umem_odp = ib_umem_odp_get(&rxe->ib_dev, start, length, access_flags,
+ &rxe_mn_ops);
+ if (IS_ERR(umem_odp)) {
+ rxe_dbg_mr(mr, "Unable to create umem_odp err = %d\n",
+ (int)PTR_ERR(umem_odp));
+ return PTR_ERR(umem_odp);
+ }
+
+ umem_odp->private = mr;
+
+ mr->umem = &umem_odp->umem;
+ mr->access = access_flags;
+ mr->ibmr.length = length;
+ mr->ibmr.iova = iova;
+ mr->page_offset = ib_umem_offset(&umem_odp->umem);
+
+ err = rxe_odp_init_pages(mr);
+ if (err) {
+ ib_umem_odp_release(umem_odp);
+ return err;
+ }
+
+ mr->state = RXE_MR_STATE_VALID;
+ mr->ibmr.type = IB_MR_TYPE_USER;
+
+ return err;
+}
+
+static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
+ u64 iova, int length, u32 perm)
+{
+ bool need_fault = false;
+ u64 addr;
+ int idx;
+
+ addr = iova & (~(BIT(umem_odp->page_shift) - 1));
+
+ /* Skim through all pages that are to be accessed. */
+ while (addr < iova + length) {
+ idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
+
+ if (!(umem_odp->dma_list[idx] & perm)) {
+ need_fault = true;
+ break;
+ }
+
+ addr += BIT(umem_odp->page_shift);
+ }
+ return need_fault;
+}
+
+static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u32 flags)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ bool need_fault;
+ u64 perm;
+ int err;
+
+ if (unlikely(length < 1))
+ return -EINVAL;
+
+ perm = ODP_READ_ALLOWED_BIT;
+ if (!(flags & RXE_PAGEFAULT_RDONLY))
+ perm |= ODP_WRITE_ALLOWED_BIT;
+
+ mutex_lock(&umem_odp->umem_mutex);
+
+ need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
+ if (need_fault) {
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ /* umem_mutex is locked on success. */
+ err = rxe_odp_do_pagefault_and_lock(mr, iova, length,
+ flags);
+ if (err < 0)
+ return err;
+
+ need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
+ if (need_fault)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int __rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
+ int length, enum rxe_mr_copy_dir dir)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ struct page *page;
+ int idx, bytes;
+ size_t offset;
+ u8 *user_va;
+
+ idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
+ offset = iova & (BIT(umem_odp->page_shift) - 1);
+
+ while (length > 0) {
+ u8 *src, *dest;
+
+ page = hmm_pfn_to_page(umem_odp->pfn_list[idx]);
+ user_va = kmap_local_page(page);
+ if (!user_va)
+ return -EFAULT;
+
+ src = (dir == RXE_TO_MR_OBJ) ? addr : user_va;
+ dest = (dir == RXE_TO_MR_OBJ) ? user_va : addr;
+
+ bytes = BIT(umem_odp->page_shift) - offset;
+ if (bytes > length)
+ bytes = length;
+
+ memcpy(dest, src, bytes);
+ kunmap_local(user_va);
+
+ length -= bytes;
+ idx++;
+ offset = 0;
+ }
+
+ return 0;
+}
+
+int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+ enum rxe_mr_copy_dir dir)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ u32 flags = RXE_PAGEFAULT_DEFAULT;
+ int err;
+
+ if (length == 0)
+ return 0;
+
+ if (unlikely(!mr->umem->is_odp))
+ return -EOPNOTSUPP;
+
+ switch (dir) {
+ case RXE_TO_MR_OBJ:
+ break;
+
+ case RXE_FROM_MR_OBJ:
+ flags |= RXE_PAGEFAULT_RDONLY;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ err = rxe_odp_map_range_and_lock(mr, iova, length, flags);
+ if (err)
+ return err;
+
+ err = __rxe_odp_mr_copy(mr, iova, addr, length, dir);
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return err;
+}
+
+static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ struct page *page;
+ unsigned int idx;
+ u64 value;
+ u64 *va;
+ int err;
+
+ if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
+ rxe_dbg_mr(mr, "mr not in valid state\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ err = mr_check_range(mr, iova, sizeof(value));
+ if (err) {
+ rxe_dbg_mr(mr, "iova out of range\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
+ page_offset = iova & (BIT(umem_odp->page_shift) - 1);
+ page = hmm_pfn_to_page(umem_odp->pfn_list[idx]);
+ if (!page)
+ return RESPST_ERR_RKEY_VIOLATION;
+
+ if (unlikely(page_offset & 0x7)) {
+ rxe_dbg_mr(mr, "iova not aligned\n");
+ return RESPST_ERR_MISALIGNED_ATOMIC;
+ }
+
+ va = kmap_local_page(page);
+
+ spin_lock_bh(&atomic_ops_lock);
+ value = *orig_val = va[page_offset >> 3];
+
+ if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+ if (value == compare)
+ va[page_offset >> 3] = swap_add;
+ } else {
+ value += swap_add;
+ va[page_offset >> 3] = value;
+ }
+ spin_unlock_bh(&atomic_ops_lock);
+
+ kunmap_local(va);
+
+ return 0;
+}
+
+int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ int err;
+
+ err = rxe_odp_map_range_and_lock(mr, iova, sizeof(char),
+ RXE_PAGEFAULT_DEFAULT);
+ if (err < 0)
+ return RESPST_ERR_RKEY_VIOLATION;
+
+ err = rxe_odp_do_atomic_op(mr, iova, opcode, compare, swap_add,
+ orig_val);
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return err;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 87a02f0deb00..9d0392df8a92 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -5,7 +5,6 @@
*/
#include <linux/skbuff.h>
-#include <crypto/hash.h>
#include "rxe.h"
#include "rxe_loc.h"
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index c11ab280551a..54ba9ee1acc5 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -649,6 +649,10 @@ static enum resp_states process_flush(struct rxe_qp *qp,
struct rxe_mr *mr = qp->resp.mr;
struct resp_res *res = qp->resp.res;
+ /* ODP is not supported right now. WIP. */
+ if (mr->umem->is_odp)
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+
/* oA19-14, oA19-15 */
if (res && res->replay)
return RESPST_ACKNOWLEDGE;
@@ -702,10 +706,16 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
if (!res->replay) {
u64 iova = qp->resp.va + qp->resp.offset;
- err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode,
- atmeth_comp(pkt),
- atmeth_swap_add(pkt),
- &res->atomic.orig_val);
+ if (mr->umem->is_odp)
+ err = rxe_odp_atomic_op(mr, iova, pkt->opcode,
+ atmeth_comp(pkt),
+ atmeth_swap_add(pkt),
+ &res->atomic.orig_val);
+ else
+ err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode,
+ atmeth_comp(pkt),
+ atmeth_swap_add(pkt),
+ &res->atomic.orig_val);
if (err)
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 6152a0fdfc8c..2331e698a65b 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -80,6 +80,18 @@ err_out:
return err;
}
+static int rxe_query_gid(struct ib_device *ibdev, u32 port, int idx,
+ union ib_gid *gid)
+{
+ struct rxe_dev *rxe = to_rdev(ibdev);
+
+ /* subnet_prefix == interface_id == 0; */
+ memset(gid, 0, sizeof(*gid));
+ memcpy(gid->raw, rxe->raw_gid, ETH_ALEN);
+
+ return 0;
+}
+
static int rxe_query_pkey(struct ib_device *ibdev,
u32 port_num, u16 index, u16 *pkey)
{
@@ -1286,7 +1298,10 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
mr->ibmr.pd = ibpd;
mr->ibmr.device = ibpd->device;
- err = rxe_mr_init_user(rxe, start, length, access, mr);
+ if (access & IB_ACCESS_ON_DEMAND)
+ err = rxe_odp_mr_init_user(rxe, start, length, iova, access, mr);
+ else
+ err = rxe_mr_init_user(rxe, start, length, access, mr);
if (err) {
rxe_dbg_mr(mr, "reg_user_mr failed, err = %d\n", err);
goto err_cleanup;
@@ -1493,6 +1508,7 @@ static const struct ib_device_ops rxe_dev_ops = {
.query_ah = rxe_query_ah,
.query_device = rxe_query_device,
.query_pkey = rxe_query_pkey,
+ .query_gid = rxe_query_gid,
.query_port = rxe_query_port,
.query_qp = rxe_query_qp,
.query_srq = rxe_query_srq,
@@ -1523,7 +1539,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
dev->num_comp_vectors = num_possible_cpus();
dev->local_dma_lkey = 0;
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
- ndev->dev_addr);
+ rxe->raw_gid);
dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
@@ -1533,10 +1549,6 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
if (err)
return err;
- err = rxe_icrc_init(rxe);
- if (err)
- return err;
-
err = ib_register_device(dev, ibdev_name, NULL);
if (err)
rxe_dbg_dev(rxe, "failed with error %d\n", err);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 6573ceec0ef5..fd48075810dd 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -126,6 +126,43 @@ struct rxe_comp_info {
u32 rnr_retry;
};
+/* responder states */
+enum resp_states {
+ RESPST_NONE,
+ RESPST_GET_REQ,
+ RESPST_CHK_PSN,
+ RESPST_CHK_OP_SEQ,
+ RESPST_CHK_OP_VALID,
+ RESPST_CHK_RESOURCE,
+ RESPST_CHK_LENGTH,
+ RESPST_CHK_RKEY,
+ RESPST_EXECUTE,
+ RESPST_READ_REPLY,
+ RESPST_ATOMIC_REPLY,
+ RESPST_ATOMIC_WRITE_REPLY,
+ RESPST_PROCESS_FLUSH,
+ RESPST_COMPLETE,
+ RESPST_ACKNOWLEDGE,
+ RESPST_CLEANUP,
+ RESPST_DUPLICATE_REQUEST,
+ RESPST_ERR_MALFORMED_WQE,
+ RESPST_ERR_UNSUPPORTED_OPCODE,
+ RESPST_ERR_MISALIGNED_ATOMIC,
+ RESPST_ERR_PSN_OUT_OF_SEQ,
+ RESPST_ERR_MISSING_OPCODE_FIRST,
+ RESPST_ERR_MISSING_OPCODE_LAST_C,
+ RESPST_ERR_MISSING_OPCODE_LAST_D1E,
+ RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
+ RESPST_ERR_RNR,
+ RESPST_ERR_RKEY_VIOLATION,
+ RESPST_ERR_INVALIDATE_RKEY,
+ RESPST_ERR_LENGTH,
+ RESPST_ERR_CQ_OVERFLOW,
+ RESPST_ERROR,
+ RESPST_DONE,
+ RESPST_EXIT,
+};
+
enum rdatm_res_state {
rdatm_res_state_next,
rdatm_res_state_new,
@@ -376,7 +413,9 @@ struct rxe_dev {
struct ib_device_attr attr;
int max_ucontext;
int max_inline_data;
- struct mutex usdev_lock;
+ struct mutex usdev_lock;
+
+ char raw_gid[ETH_ALEN];
struct rxe_pool uc_pool;
struct rxe_pool pd_pool;
@@ -402,7 +441,6 @@ struct rxe_dev {
atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
struct rxe_port port;
- struct crypto_shash *tfm;
};
static inline struct net_device *rxe_ib_device_get_netdev(struct ib_device *dev)
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index 81b70a3eeb87..ae4a953e2a03 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -2,9 +2,7 @@ config RDMA_SIW
tristate "Software RDMA over TCP/IP (iWARP) driver"
depends on INET && INFINIBAND
depends on INFINIBAND_VIRT_DMA
- select LIBCRC32C
- select CRYPTO
- select CRYPTO_CRC32C
+ select CRC32
help
This driver implements the iWARP RDMA transport over
the Linux TCP/IP network stack. It enables a system with a
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index ea5eee50dc39..385067e07faf 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -10,9 +10,9 @@
#include <rdma/restrack.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
-#include <crypto/hash.h>
#include <linux/crc32.h>
#include <linux/crc32c.h>
+#include <linux/unaligned.h>
#include <rdma/siw-abi.h>
#include "iwarp.h"
@@ -289,7 +289,8 @@ struct siw_rx_stream {
union iwarp_hdr hdr;
struct mpa_trailer trailer;
- struct shash_desc *mpa_crc_hd;
+ u32 mpa_crc;
+ bool mpa_crc_enabled;
/*
* For each FPDU, main RX loop runs through 3 stages:
@@ -390,7 +391,8 @@ struct siw_iwarp_tx {
int burst;
int bytes_unsent; /* ddp payload bytes */
- struct shash_desc *mpa_crc_hd;
+ u32 mpa_crc;
+ bool mpa_crc_enabled;
u8 do_crc : 1; /* do crc for segment */
u8 use_sendpage : 1; /* send w/o copy */
@@ -496,7 +498,6 @@ extern u_char mpa_version;
extern const bool peer_to_peer;
extern struct task_struct *siw_tx_thread[];
-extern struct crypto_shash *siw_crypto_shash;
extern struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1];
/* QP general functions */
@@ -668,6 +669,30 @@ static inline struct siw_sqe *irq_alloc_free(struct siw_qp *qp)
return NULL;
}
+static inline void siw_crc_init(u32 *crc)
+{
+ *crc = ~0;
+}
+
+static inline void siw_crc_update(u32 *crc, const void *data, size_t len)
+{
+ *crc = crc32c(*crc, data, len);
+}
+
+static inline void siw_crc_final(u32 *crc, u8 out[4])
+{
+ put_unaligned_le32(~*crc, out);
+}
+
+static inline void siw_crc_oneshot(const void *data, size_t len, u8 out[4])
+{
+ u32 crc;
+
+ siw_crc_init(&crc);
+ siw_crc_update(&crc, data, len);
+ return siw_crc_final(&crc, out);
+}
+
static inline __wsum siw_csum_update(const void *buff, int len, __wsum sum)
{
return (__force __wsum)crc32c((__force __u32)sum, buff, len);
@@ -676,8 +701,8 @@ static inline __wsum siw_csum_update(const void *buff, int len, __wsum sum)
static inline __wsum siw_csum_combine(__wsum csum, __wsum csum2, int offset,
int len)
{
- return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
- (__force __u32)csum2, len);
+ return (__force __wsum)crc32c_combine((__force __u32)csum,
+ (__force __u32)csum2, len);
}
static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
@@ -686,11 +711,11 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
.update = siw_csum_update,
.combine = siw_csum_combine,
};
- __wsum crc = *(u32 *)shash_desc_ctx(srx->mpa_crc_hd);
+ __wsum crc = (__force __wsum)srx->mpa_crc;
crc = __skb_checksum(srx->skb, srx->skb_offset, len, crc,
&siw_cs_ops);
- *(u32 *)shash_desc_ctx(srx->mpa_crc_hd) = crc;
+ srx->mpa_crc = (__force u32)crc;
}
#define siw_dbg(ibdev, fmt, ...) \
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index b17752bd1ecc..5168307229a9 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -59,7 +59,6 @@ u_char mpa_version = MPA_REVISION_2;
const bool peer_to_peer;
struct task_struct *siw_tx_thread[NR_CPUS];
-struct crypto_shash *siw_crypto_shash;
static int siw_device_register(struct siw_device *sdev, const char *name)
{
@@ -467,20 +466,7 @@ static __init int siw_init_module(void)
rv = -ENOMEM;
goto out_error;
}
- /*
- * Locate CRC32 algorithm. If unsuccessful, fail
- * loading siw only, if CRC is required.
- */
- siw_crypto_shash = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(siw_crypto_shash)) {
- pr_info("siw: Loading CRC32c failed: %ld\n",
- PTR_ERR(siw_crypto_shash));
- siw_crypto_shash = NULL;
- if (mpa_crc_required) {
- rv = -EOPNOTSUPP;
- goto out_error;
- }
- }
+
rv = register_netdevice_notifier(&siw_netdev_nb);
if (rv)
goto out_error;
@@ -493,9 +479,6 @@ static __init int siw_init_module(void)
out_error:
siw_stop_tx_threads();
- if (siw_crypto_shash)
- crypto_free_shash(siw_crypto_shash);
-
pr_info("SoftIWARP attach failed. Error: %d\n", rv);
siw_cm_exit();
@@ -516,9 +499,6 @@ static void __exit siw_exit_module(void)
siw_destroy_cpulist(siw_cpu_info.num_nodes);
- if (siw_crypto_shash)
- crypto_free_shash(siw_crypto_shash);
-
pr_info("SoftiWARP detached\n");
}
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index da92cfa2073d..c1e6e7d6e32f 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -226,33 +226,6 @@ static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
return 0;
}
-static int siw_qp_enable_crc(struct siw_qp *qp)
-{
- struct siw_rx_stream *c_rx = &qp->rx_stream;
- struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
- int size;
-
- if (siw_crypto_shash == NULL)
- return -ENOENT;
-
- size = crypto_shash_descsize(siw_crypto_shash) +
- sizeof(struct shash_desc);
-
- c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
- c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
- if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) {
- kfree(c_tx->mpa_crc_hd);
- kfree(c_rx->mpa_crc_hd);
- c_tx->mpa_crc_hd = NULL;
- c_rx->mpa_crc_hd = NULL;
- return -ENOMEM;
- }
- c_tx->mpa_crc_hd->tfm = siw_crypto_shash;
- c_rx->mpa_crc_hd->tfm = siw_crypto_shash;
-
- return 0;
-}
-
/*
* Send a non signalled READ or WRITE to peer side as negotiated
* with MPAv2 P2P setup protocol. The work request is only created
@@ -583,20 +556,15 @@ void siw_send_terminate(struct siw_qp *qp)
term->ctrl.mpa_len =
cpu_to_be16(len_terminate - (MPA_HDR_SIZE + MPA_CRC_SIZE));
- if (qp->tx_ctx.mpa_crc_hd) {
- crypto_shash_init(qp->tx_ctx.mpa_crc_hd);
- if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd,
- (u8 *)iov[0].iov_base,
- iov[0].iov_len))
- goto out;
-
+ if (qp->tx_ctx.mpa_crc_enabled) {
+ siw_crc_init(&qp->tx_ctx.mpa_crc);
+ siw_crc_update(&qp->tx_ctx.mpa_crc,
+ iov[0].iov_base, iov[0].iov_len);
if (num_frags == 3) {
- if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd,
- (u8 *)iov[1].iov_base,
- iov[1].iov_len))
- goto out;
+ siw_crc_update(&qp->tx_ctx.mpa_crc,
+ iov[1].iov_base, iov[1].iov_len);
}
- crypto_shash_final(qp->tx_ctx.mpa_crc_hd, (u8 *)&crc);
+ siw_crc_final(&qp->tx_ctx.mpa_crc, (u8 *)&crc);
}
rv = kernel_sendmsg(s, &msg, iov, num_frags, len_terminate);
@@ -604,7 +572,6 @@ void siw_send_terminate(struct siw_qp *qp)
rv == len_terminate ? "success" : "failure",
__rdmap_term_layer(term), __rdmap_term_etype(term),
__rdmap_term_ecode(term), rv);
-out:
kfree(term);
kfree(err_hdr);
}
@@ -643,9 +610,10 @@ static int siw_qp_nextstate_from_idle(struct siw_qp *qp,
switch (attrs->state) {
case SIW_QP_STATE_RTS:
if (attrs->flags & SIW_MPA_CRC) {
- rv = siw_qp_enable_crc(qp);
- if (rv)
- break;
+ siw_crc_init(&qp->tx_ctx.mpa_crc);
+ qp->tx_ctx.mpa_crc_enabled = true;
+ siw_crc_init(&qp->rx_stream.mpa_crc);
+ qp->rx_stream.mpa_crc_enabled = true;
}
if (!(mask & SIW_QP_ATTR_LLP_HANDLE)) {
siw_dbg_qp(qp, "no socket\n");
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index ed4fc39718b4..32554eba1eac 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -67,10 +67,10 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
return -EFAULT;
}
- if (srx->mpa_crc_hd) {
+ if (srx->mpa_crc_enabled) {
if (rdma_is_kernel_res(&rx_qp(srx)->base_qp.res)) {
- crypto_shash_update(srx->mpa_crc_hd,
- (u8 *)(dest + pg_off), bytes);
+ siw_crc_update(&srx->mpa_crc, dest + pg_off,
+ bytes);
kunmap_atomic(dest);
} else {
kunmap_atomic(dest);
@@ -114,8 +114,8 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
return rv;
}
- if (srx->mpa_crc_hd)
- crypto_shash_update(srx->mpa_crc_hd, (u8 *)kva, len);
+ if (srx->mpa_crc_enabled)
+ siw_crc_update(&srx->mpa_crc, kva, len);
srx->skb_offset += len;
srx->skb_copied += len;
@@ -966,16 +966,16 @@ static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx)
if (srx->fpdu_part_rem)
return -EAGAIN;
- if (!srx->mpa_crc_hd)
+ if (!srx->mpa_crc_enabled)
return 0;
if (srx->pad)
- crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
+ siw_crc_update(&srx->mpa_crc, tbuf, srx->pad);
/*
* CRC32 is computed, transmitted and received directly in NBO,
* so there's never a reason to convert byte order.
*/
- crypto_shash_final(srx->mpa_crc_hd, (u8 *)&crc_own);
+ siw_crc_final(&srx->mpa_crc, (u8 *)&crc_own);
crc_in = (__force __wsum)srx->trailer.crc;
if (unlikely(crc_in != crc_own)) {
@@ -1093,13 +1093,12 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
* (tagged/untagged). E.g., a WRITE can get intersected by a SEND,
* but not by a READ RESPONSE etc.
*/
- if (srx->mpa_crc_hd) {
+ if (srx->mpa_crc_enabled) {
/*
* Restart CRC computation
*/
- crypto_shash_init(srx->mpa_crc_hd);
- crypto_shash_update(srx->mpa_crc_hd, (u8 *)c_hdr,
- srx->fpdu_part_rcvd);
+ siw_crc_init(&srx->mpa_crc);
+ siw_crc_update(&srx->mpa_crc, c_hdr, srx->fpdu_part_rcvd);
}
if (frx->more_ddp_segs) {
frx->first_ddp_seg = 0;
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index a034264c5669..6432bce7d083 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -248,10 +248,8 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
/*
* Do complete CRC if enabled and short packet
*/
- if (c_tx->mpa_crc_hd &&
- crypto_shash_digest(c_tx->mpa_crc_hd, (u8 *)&c_tx->pkt,
- c_tx->ctrl_len, (u8 *)crc) != 0)
- return -EINVAL;
+ if (c_tx->mpa_crc_enabled)
+ siw_crc_oneshot(&c_tx->pkt, c_tx->ctrl_len, (u8 *)crc);
c_tx->ctrl_len += MPA_CRC_SIZE;
return PKT_COMPLETE;
@@ -482,9 +480,8 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
iov[seg].iov_len = sge_len;
if (do_crc)
- crypto_shash_update(c_tx->mpa_crc_hd,
- iov[seg].iov_base,
- sge_len);
+ siw_crc_update(&c_tx->mpa_crc,
+ iov[seg].iov_base, sge_len);
sge_off += sge_len;
data_len -= sge_len;
seg++;
@@ -516,15 +513,14 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
iov[seg].iov_len = plen;
if (do_crc)
- crypto_shash_update(
- c_tx->mpa_crc_hd,
+ siw_crc_update(
+ &c_tx->mpa_crc,
iov[seg].iov_base,
plen);
} else if (do_crc) {
kaddr = kmap_local_page(p);
- crypto_shash_update(c_tx->mpa_crc_hd,
- kaddr + fp_off,
- plen);
+ siw_crc_update(&c_tx->mpa_crc,
+ kaddr + fp_off, plen);
kunmap_local(kaddr);
}
} else {
@@ -536,10 +532,9 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
page_array[seg] = ib_virt_dma_to_page(va);
if (do_crc)
- crypto_shash_update(
- c_tx->mpa_crc_hd,
- ib_virt_dma_to_ptr(va),
- plen);
+ siw_crc_update(&c_tx->mpa_crc,
+ ib_virt_dma_to_ptr(va),
+ plen);
}
sge_len -= plen;
@@ -576,14 +571,14 @@ sge_done:
if (c_tx->pad) {
*(u32 *)c_tx->trailer.pad = 0;
if (do_crc)
- crypto_shash_update(c_tx->mpa_crc_hd,
- (u8 *)&c_tx->trailer.crc - c_tx->pad,
- c_tx->pad);
+ siw_crc_update(&c_tx->mpa_crc,
+ (u8 *)&c_tx->trailer.crc - c_tx->pad,
+ c_tx->pad);
}
- if (!c_tx->mpa_crc_hd)
+ if (!c_tx->mpa_crc_enabled)
c_tx->trailer.crc = 0;
else if (do_crc)
- crypto_shash_final(c_tx->mpa_crc_hd, (u8 *)&c_tx->trailer.crc);
+ siw_crc_final(&c_tx->mpa_crc, (u8 *)&c_tx->trailer.crc);
data_len = c_tx->bytes_unsent;
@@ -736,10 +731,9 @@ static void siw_prepare_fpdu(struct siw_qp *qp, struct siw_wqe *wqe)
/*
* Init MPA CRC computation
*/
- if (c_tx->mpa_crc_hd) {
- crypto_shash_init(c_tx->mpa_crc_hd);
- crypto_shash_update(c_tx->mpa_crc_hd, (u8 *)&c_tx->pkt,
- c_tx->ctrl_len);
+ if (c_tx->mpa_crc_enabled) {
+ siw_crc_init(&c_tx->mpa_crc);
+ siw_crc_update(&c_tx->mpa_crc, &c_tx->pkt, c_tx->ctrl_len);
c_tx->do_crc = 1;
}
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 5ac8bd450d24..fd7b266a221b 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -631,9 +631,6 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
}
up_write(&qp->state_lock);
- kfree(qp->tx_ctx.mpa_crc_hd);
- kfree(qp->rx_stream.mpa_crc_hd);
-
qp->scq = qp->rcq = NULL;
siw_qp_put(qp);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index 9ad8d9856275..53db7c8191e3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -97,10 +97,13 @@ out_err:
return ret;
}
-static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int ipoib_new_child_link(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct net_device *pdev;
struct ipoib_dev_priv *ppriv;
u16 child_pkey;
@@ -109,7 +112,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
if (!tb[IFLA_LINK])
return -EINVAL;
- pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ pdev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!pdev || pdev->type != ARPHRD_INFINIBAND)
return -ENODEV;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index bb9aaff92ca3..a5be6f1ba12b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -393,10 +393,10 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
* @task: iscsi task
* @sector: error sector if exsists (output)
*
- * Return: zero if no data-integrity errors have occured
- * 0x1: data-integrity error occured in the guard-block
- * 0x2: data-integrity error occured in the reference tag
- * 0x3: data-integrity error occured in the application tag
+ * Return: zero if no data-integrity errors have occurred
+ * 0x1: data-integrity error occurred in the guard-block
+ * 0x2: data-integrity error occurred in the reference tag
+ * 0x3: data-integrity error occurred in the application tag
*
* In addition the error sector is marked.
*/
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 8fe2a51df649..c33e6f33265b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -140,6 +140,7 @@ static const struct xpad_device {
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+ { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
@@ -177,6 +178,7 @@ static const struct xpad_device {
{ 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
{ 0x06a3, 0x0201, "Saitek Adrenalin", 0, XTYPE_XBOX },
{ 0x06a3, 0xf51a, "Saitek P3600", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4503, "Mad Catz Racing Wheel", 0, XTYPE_XBOXONE },
{ 0x0738, 0x4506, "Mad Catz 4506 Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX },
{ 0x0738, 0x4520, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
@@ -238,6 +240,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE },
{ 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
@@ -276,12 +279,15 @@ static const struct xpad_device {
{ 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0151, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
+ { 0x0f0d, 0x0152, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
{ 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ { 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
{ 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
@@ -306,7 +312,7 @@ static const struct xpad_device {
{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
- { 0x1a86, 0xe310, "QH Electronics Controller", 0, XTYPE_XBOX360 },
+ { 0x1a86, 0xe310, "Legion Go S", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -343,6 +349,7 @@ static const struct xpad_device {
{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+ { 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 },
{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
@@ -366,6 +373,7 @@ static const struct xpad_device {
{ 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x581a, "ThrustMaster XB1 Classic Controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
@@ -374,10 +382,15 @@ static const struct xpad_device {
{ 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 },
{ 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE },
{ 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
+ { 0x2993, 0x2001, "TECNO Pocket Go", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
{ 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 },
{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
+ { 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE },
+ { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
@@ -385,11 +398,16 @@ static const struct xpad_device {
{ 0x31e3, 0x1230, "Wooting Two HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
+ { 0x3285, 0x0603, "Nacon Pro Compact controller for Xbox", 0, XTYPE_XBOXONE },
{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
+ { 0x3285, 0x0614, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
{ 0x3285, 0x0646, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
+ { 0x3285, 0x0662, "Nacon Revolution5 Pro", 0, XTYPE_XBOX360 },
{ 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
+ { 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
+ { 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
@@ -488,6 +506,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x03f0), /* HP HyperX Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */
+ XPAD_XBOXONE_VENDOR(0x044f), /* Thrustmaster Xbox One controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech Xbox 360-style controllers */
@@ -519,8 +538,9 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */
XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */
- XPAD_XBOX360_VENDOR(0x1a86), /* QH Electronics */
+ XPAD_XBOX360_VENDOR(0x1a86), /* Nanjing Qinheng Microelectronics (WCH) */
XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */
+ XPAD_XBOX360_VENDOR(0x1ee9), /* ZOTAC Technology Limited */
XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA controllers */
XPAD_XBOX360_VENDOR(0x2345), /* Machenike Controllers */
@@ -528,17 +548,20 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA controllers */
XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */
XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */
- XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */
+ XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */
+ XPAD_XBOX360_VENDOR(0x2993), /* TECNO Mobile */
XPAD_XBOX360_VENDOR(0x2c22), /* Qanba Controllers */
- XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller */
- XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */
- XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke Xbox One pad */
- XPAD_XBOX360_VENDOR(0x2f24), /* GameSir controllers */
+ XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Controllers */
+ XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Controllers */
+ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e95), /* SCUF Gaming Controller */
XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */
XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
+ XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */
{ }
};
@@ -691,7 +714,9 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on),
+ XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth),
+ XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/keyboard/ipaq-micro-keys.c b/drivers/input/keyboard/ipaq-micro-keys.c
index 58631bf7ce55..ca7ec054b1ce 100644
--- a/drivers/input/keyboard/ipaq-micro-keys.c
+++ b/drivers/input/keyboard/ipaq-micro-keys.c
@@ -102,9 +102,8 @@ static int micro_key_probe(struct platform_device *pdev)
keys->input->keycodesize = sizeof(micro_keycodes[0]);
keys->input->keycodemax = ARRAY_SIZE(micro_keycodes);
- keys->codes = devm_kmemdup(&pdev->dev, micro_keycodes,
- keys->input->keycodesize * keys->input->keycodemax,
- GFP_KERNEL);
+ keys->codes = devm_kmemdup_array(&pdev->dev, micro_keycodes, keys->input->keycodemax,
+ keys->input->keycodesize, GFP_KERNEL);
if (!keys->codes)
return -ENOMEM;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 13d135257e06..f5496ca0c0d2 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -240,12 +240,12 @@ config INPUT_MAX77650_ONKEY
will be called max77650-onkey.
config INPUT_MAX77693_HAPTIC
- tristate "MAXIM MAX77693/MAX77843 haptic controller support"
- depends on (MFD_MAX77693 || MFD_MAX77843) && PWM
+ tristate "MAXIM MAX77693/MAX77705/MAX77843 haptic controller support"
+ depends on (MFD_MAX77693 || MFD_MAX77705 || MFD_MAX77843) && PWM
select INPUT_FF_MEMLESS
help
This option enables support for the haptic controller on
- MAXIM MAX77693 and MAX77843 chips.
+ MAXIM MAX77693, MAX77705 and MAX77843 chips.
To compile this driver as module, choose M here: the
module will be called max77693-haptic.
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index 22022d11470d..80b917944b51 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -100,11 +100,11 @@ enum iqs7222_reg_key_id {
enum iqs7222_reg_grp_id {
IQS7222_REG_GRP_STAT,
- IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_CYCLE,
IQS7222_REG_GRP_GLBL,
IQS7222_REG_GRP_BTN,
IQS7222_REG_GRP_CHAN,
+ IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_SLDR,
IQS7222_REG_GRP_TPAD,
IQS7222_REG_GRP_GPIO,
@@ -286,6 +286,7 @@ static const struct iqs7222_event_desc iqs7222_tp_events[] = {
struct iqs7222_reg_grp_desc {
u16 base;
+ u16 val_len;
int num_row;
int num_col;
};
@@ -342,6 +343,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAC00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -400,6 +402,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAC00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -454,6 +457,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xC400,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -496,6 +500,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xC400,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -543,6 +548,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAA00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -600,6 +606,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAA00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -656,6 +663,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAE00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -712,6 +720,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAE00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -768,6 +777,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAE00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -1604,7 +1614,7 @@ static int iqs7222_force_comms(struct iqs7222_private *iqs7222)
}
static int iqs7222_read_burst(struct iqs7222_private *iqs7222,
- u16 reg, void *val, u16 num_val)
+ u16 reg, void *val, u16 val_len)
{
u8 reg_buf[sizeof(__be16)];
int ret, i;
@@ -1619,7 +1629,7 @@ static int iqs7222_read_burst(struct iqs7222_private *iqs7222,
{
.addr = client->addr,
.flags = I2C_M_RD,
- .len = num_val * sizeof(__le16),
+ .len = val_len,
.buf = (u8 *)val,
},
};
@@ -1675,7 +1685,7 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val)
__le16 val_buf;
int error;
- error = iqs7222_read_burst(iqs7222, reg, &val_buf, 1);
+ error = iqs7222_read_burst(iqs7222, reg, &val_buf, sizeof(val_buf));
if (error)
return error;
@@ -1685,10 +1695,9 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val)
}
static int iqs7222_write_burst(struct iqs7222_private *iqs7222,
- u16 reg, const void *val, u16 num_val)
+ u16 reg, const void *val, u16 val_len)
{
int reg_len = reg > U8_MAX ? sizeof(reg) : sizeof(u8);
- int val_len = num_val * sizeof(__le16);
int msg_len = reg_len + val_len;
int ret, i;
struct i2c_client *client = iqs7222->client;
@@ -1747,7 +1756,7 @@ static int iqs7222_write_word(struct iqs7222_private *iqs7222, u16 reg, u16 val)
{
__le16 val_buf = cpu_to_le16(val);
- return iqs7222_write_burst(iqs7222, reg, &val_buf, 1);
+ return iqs7222_write_burst(iqs7222, reg, &val_buf, sizeof(val_buf));
}
static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
@@ -1831,30 +1840,14 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
/*
* Acknowledge reset before writing any registers in case the device
- * suffers a spurious reset during initialization. Because this step
- * may change the reserved fields of the second filter beta register,
- * its cache must be updated.
- *
- * Writing the second filter beta register, in turn, may clobber the
- * system status register. As such, the filter beta register pair is
- * written first to protect against this hazard.
+ * suffers a spurious reset during initialization.
*/
if (dir == WRITE) {
- u16 reg = dev_desc->reg_grps[IQS7222_REG_GRP_FILT].base + 1;
- u16 filt_setup;
-
error = iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP,
iqs7222->sys_setup[0] |
IQS7222_SYS_SETUP_ACK_RESET);
if (error)
return error;
-
- error = iqs7222_read_word(iqs7222, reg, &filt_setup);
- if (error)
- return error;
-
- iqs7222->filt_setup[1] &= GENMASK(7, 0);
- iqs7222->filt_setup[1] |= (filt_setup & ~GENMASK(7, 0));
}
/*
@@ -1883,6 +1876,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
int num_col = dev_desc->reg_grps[i].num_col;
u16 reg = dev_desc->reg_grps[i].base;
__le16 *val_buf;
+ u16 val_len = dev_desc->reg_grps[i].val_len ? : num_col * sizeof(*val_buf);
u16 *val;
if (!num_col)
@@ -1900,7 +1894,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
switch (dir) {
case READ:
error = iqs7222_read_burst(iqs7222, reg,
- val_buf, num_col);
+ val_buf, val_len);
for (k = 0; k < num_col; k++)
val[k] = le16_to_cpu(val_buf[k]);
break;
@@ -1909,7 +1903,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
for (k = 0; k < num_col; k++)
val_buf[k] = cpu_to_le16(val[k]);
error = iqs7222_write_burst(iqs7222, reg,
- val_buf, num_col);
+ val_buf, val_len);
break;
default:
@@ -1962,7 +1956,7 @@ static int iqs7222_dev_info(struct iqs7222_private *iqs7222)
int error, i;
error = iqs7222_read_burst(iqs7222, IQS7222_PROD_NUM, dev_id,
- ARRAY_SIZE(dev_id));
+ sizeof(dev_id));
if (error)
return error;
@@ -2915,7 +2909,7 @@ static int iqs7222_report(struct iqs7222_private *iqs7222)
__le16 status[IQS7222_MAX_COLS_STAT];
error = iqs7222_read_burst(iqs7222, IQS7222_SYS_STATUS, status,
- num_stat);
+ num_stat * sizeof(*status));
if (error)
return error;
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index cdb9be737e48..1dfd7b95a4ce 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -24,6 +24,7 @@
#include <linux/mfd/max77693.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77693-private.h>
+#include <linux/mfd/max77705-private.h>
#include <linux/mfd/max77843-private.h>
#define MAX_MAGNITUDE_SHIFT 16
@@ -116,6 +117,13 @@ static int max77693_haptic_configure(struct max77693_haptic *haptic,
MAX77693_HAPTIC_PWM_DIVISOR_128);
config_reg = MAX77693_HAPTIC_REG_CONFIG2;
break;
+ case TYPE_MAX77705:
+ value = ((haptic->type << MAX77693_CONFIG2_MODE) |
+ (enable << MAX77693_CONFIG2_MEN) |
+ (haptic->mode << MAX77693_CONFIG2_HTYP) |
+ MAX77693_HAPTIC_PWM_DIVISOR_128);
+ config_reg = MAX77705_PMIC_REG_MCONFIG;
+ break;
case TYPE_MAX77843:
value = (haptic->type << MCONFIG_MODE_SHIFT) |
(enable << MCONFIG_MEN_SHIFT) |
@@ -313,6 +321,7 @@ static int max77693_haptic_probe(struct platform_device *pdev)
case TYPE_MAX77693:
haptic->regmap_haptic = max77693->regmap_haptic;
break;
+ case TYPE_MAX77705:
case TYPE_MAX77843:
haptic->regmap_haptic = max77693->regmap;
break;
@@ -408,6 +417,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(max77693_haptic_pm_ops,
static const struct platform_device_id max77693_haptic_id[] = {
{ "max77693-haptic", },
+ { "max77705-haptic", },
{ "max77843-haptic", },
{},
};
@@ -415,6 +425,7 @@ MODULE_DEVICE_TABLE(platform, max77693_haptic_id);
static const struct of_device_id of_max77693_haptic_dt_match[] = {
{ .compatible = "maxim,max77693-haptic", },
+ { .compatible = "maxim,max77705-haptic", },
{ .compatible = "maxim,max77843-haptic", },
{ /* sentinel */ },
};
@@ -433,5 +444,5 @@ module_platform_driver(max77693_haptic_driver);
MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
-MODULE_DESCRIPTION("MAXIM 77693/77843 Haptic driver");
+MODULE_DESCRIPTION("MAXIM 77693/77705/77843 Haptic driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 4fada5bc2a38..9c6ff04c46cf 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -251,6 +251,8 @@ static bool gscps2_report_data(struct gscps2port *ps2port)
/**
* gscps2_interrupt() - Interruption service routine
+ * @irq: interrupt number which triggered (unused)
+ * @dev: device pointer (unused)
*
* This function reads received PS/2 bytes and processes them on
* all interfaces.
@@ -329,6 +331,8 @@ static void gscps2_close(struct serio *port)
/**
* gscps2_probe() - Probes PS2 devices
+ * @dev: pointer to parisc_device struct which will be probed
+ *
* @return: success/error report
*/
@@ -420,6 +424,8 @@ fail_nomem:
/**
* gscps2_remove() - Removes PS2 devices
+ * @dev: pointer to parisc_device which shall be removed
+ *
* @return: success/error report
*/
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index 127cfdc8668a..6ed9fc34948c 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -1080,16 +1080,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/* Mivvy M310 */
@@ -1159,9 +1157,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
},
/*
* A lot of modern Clevo barebones have touchpad and/or keyboard issues
- * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
- * none of them have an external PS/2 port so this can safely be set for
- * all of them.
+ * after suspend fixable with the forcenorestore quirk.
* Clevo barebones come with board_vendor and/or system_vendor set to
* either the very generic string "Notebook" and/or a different value
* for each individual reseller. The only somewhat universal way to
@@ -1171,29 +1167,25 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "N140CU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "N141CU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
@@ -1205,29 +1197,19 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
- /*
- * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
- * the keyboard very laggy for ~5 seconds after boot and
- * sometimes also after resume.
- * However both are required for the keyboard to not fail
- * completely sometimes after boot or resume.
- */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
/*
* At least one modern Clevo barebone has the touchpad connected both
@@ -1243,17 +1225,15 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NS50MU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
- SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
- SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX |
+ SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
- SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
- SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX |
+ SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
@@ -1265,8 +1245,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "P640RE"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1277,16 +1262,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65xH"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1297,8 +1280,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65_P67H"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1309,8 +1291,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RP"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1321,8 +1302,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RS"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1333,22 +1313,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P67xRP"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PB51RF"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PB71RD"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PC70DR"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PCX0DX_GN20"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
/* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */
{
@@ -1361,15 +1362,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index 25bf8be6e711..96f23ae57d5a 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -176,8 +176,7 @@ int sparse_keymap_setup(struct input_dev *dev,
for (e = keymap; e->type != KE_END; e++)
map_size++;
- map = devm_kmemdup(&dev->dev, keymap, map_size * sizeof(*map),
- GFP_KERNEL);
+ map = devm_kmemdup_array(&dev->dev, keymap, map_size, sizeof(*keymap), GFP_KERNEL);
if (!map)
return -ENOMEM;
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 066dc04003fa..67264c5b49cb 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1021,7 +1021,7 @@ static int ads7846_setup_pendown(struct spi_device *spi,
if (pdata->get_pendown_state) {
ts->get_pendown_state = pdata->get_pendown_state;
} else {
- ts->gpio_pendown = gpiod_get(&spi->dev, "pendown", GPIOD_IN);
+ ts->gpio_pendown = devm_gpiod_get(&spi->dev, "pendown", GPIOD_IN);
if (IS_ERR(ts->gpio_pendown)) {
dev_err(&spi->dev, "failed to request pendown GPIO\n");
return PTR_ERR(ts->gpio_pendown);
diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c
index 3fc03cf0ca23..7f8cfdd106fa 100644
--- a/drivers/input/touchscreen/goodix_berlin_core.c
+++ b/drivers/input/touchscreen/goodix_berlin_core.c
@@ -165,7 +165,7 @@ struct goodix_berlin_core {
struct device *dev;
struct regmap *regmap;
struct regulator *avdd;
- struct regulator *iovdd;
+ struct regulator *vddio;
struct gpio_desc *reset_gpio;
struct touchscreen_properties props;
struct goodix_berlin_fw_version fw_version;
@@ -248,22 +248,22 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd)
{
int error;
- error = regulator_enable(cd->iovdd);
+ error = regulator_enable(cd->vddio);
if (error) {
- dev_err(cd->dev, "Failed to enable iovdd: %d\n", error);
+ dev_err(cd->dev, "Failed to enable vddio: %d\n", error);
return error;
}
- /* Vendor waits 3ms for IOVDD to settle */
+ /* Vendor waits 3ms for VDDIO to settle */
usleep_range(3000, 3100);
error = regulator_enable(cd->avdd);
if (error) {
dev_err(cd->dev, "Failed to enable avdd: %d\n", error);
- goto err_iovdd_disable;
+ goto err_vddio_disable;
}
- /* Vendor waits 15ms for IOVDD to settle */
+ /* Vendor waits 15ms for AVDD to settle */
usleep_range(15000, 15100);
gpiod_set_value_cansleep(cd->reset_gpio, 0);
@@ -283,8 +283,8 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd)
err_dev_reset:
gpiod_set_value_cansleep(cd->reset_gpio, 1);
regulator_disable(cd->avdd);
-err_iovdd_disable:
- regulator_disable(cd->iovdd);
+err_vddio_disable:
+ regulator_disable(cd->vddio);
return error;
}
@@ -292,7 +292,7 @@ static void goodix_berlin_power_off(struct goodix_berlin_core *cd)
{
gpiod_set_value_cansleep(cd->reset_gpio, 1);
regulator_disable(cd->avdd);
- regulator_disable(cd->iovdd);
+ regulator_disable(cd->vddio);
}
static int goodix_berlin_read_version(struct goodix_berlin_core *cd)
@@ -744,10 +744,10 @@ int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id,
return dev_err_probe(dev, PTR_ERR(cd->avdd),
"Failed to request avdd regulator\n");
- cd->iovdd = devm_regulator_get(dev, "iovdd");
- if (IS_ERR(cd->iovdd))
- return dev_err_probe(dev, PTR_ERR(cd->iovdd),
- "Failed to request iovdd regulator\n");
+ cd->vddio = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(cd->vddio))
+ return dev_err_probe(dev, PTR_ERR(cd->vddio),
+ "Failed to request vddio regulator\n");
error = goodix_berlin_power_on(cd);
if (error) {
diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c
index abeae9102323..3c8bbe284b73 100644
--- a/drivers/input/touchscreen/imagis.c
+++ b/drivers/input/touchscreen/imagis.c
@@ -22,6 +22,7 @@
#define IST3032C_WHOAMI 0x32c
#define IST3038C_WHOAMI 0x38c
+#define IST3038H_WHOAMI 0x38d
#define IST3038B_REG_CHIPID 0x30
#define IST3038B_WHOAMI 0x30380b
@@ -428,11 +429,19 @@ static const struct imagis_properties imagis_3038c_data = {
.protocol_b = true,
};
+static const struct imagis_properties imagis_3038h_data = {
+ .interrupt_msg_cmd = IST3038C_REG_INTR_MESSAGE,
+ .touch_coord_cmd = IST3038C_REG_TOUCH_COORD,
+ .whoami_cmd = IST3038C_REG_CHIPID,
+ .whoami_val = IST3038H_WHOAMI,
+};
+
static const struct of_device_id imagis_of_match[] = {
{ .compatible = "imagis,ist3032c", .data = &imagis_3032c_data },
{ .compatible = "imagis,ist3038", .data = &imagis_3038_data },
{ .compatible = "imagis,ist3038b", .data = &imagis_3038b_data },
{ .compatible = "imagis,ist3038c", .data = &imagis_3038c_data },
+ { .compatible = "imagis,ist3038h", .data = &imagis_3038h_data },
{ },
};
MODULE_DEVICE_TABLE(of, imagis_of_match);
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index 27941245e962..88d376090e6e 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -1153,11 +1153,13 @@ static const struct i2c_device_id wdt87xx_dev_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wdt87xx_dev_id);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id wdt87xx_acpi_id[] = {
{ "WDHT0001", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, wdt87xx_acpi_id);
+#endif
static struct i2c_driver wdt87xx_driver = {
.probe = wdt87xx_ts_probe,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index ec1b5e32b972..cd750f512dee 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -154,7 +154,6 @@ config IOMMU_DMA
select DMA_OPS_HELPERS
select IOMMU_API
select IOMMU_IOVA
- select IRQ_MSI_IOMMU
select NEED_SG_DMA_LENGTH
select NEED_SG_DMA_FLAGS if SWIOTLB
@@ -483,8 +482,7 @@ config MTK_IOMMU
config MTK_IOMMU_V1
tristate "MediaTek IOMMU Version 1 (M4U gen1) Support"
- depends on ARM
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
select ARM_DMA_USE_IOMMU
select IOMMU_API
select MEMORY
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 68debf5ee2d7..220c598b7e14 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -47,7 +47,6 @@ extern unsigned long amd_iommu_pgsize_bitmap;
/* Protection domain ops */
void amd_iommu_init_identity_domain(void);
struct protection_domain *protection_domain_alloc(void);
-void protection_domain_free(struct protection_domain *domain);
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct mm_struct *mm);
void amd_iommu_domain_free(struct iommu_domain *dom);
@@ -176,12 +175,11 @@ void amd_iommu_apply_ivrs_quirks(void);
#else
static inline void amd_iommu_apply_ivrs_quirks(void) { }
#endif
+struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid);
void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
-
-#endif
-
-struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid);
struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid);
+
+#endif /* AMD_IOMMU_H */
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 23caea22f8dc..5089b58e528a 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -112,6 +112,10 @@
#define FEATURE_SNPAVICSUP_GAM(x) \
(FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1)
+#define FEATURE_NUM_INT_REMAP_SUP GENMASK_ULL(9, 8)
+#define FEATURE_NUM_INT_REMAP_SUP_2K(x) \
+ (FIELD_GET(FEATURE_NUM_INT_REMAP_SUP, x) == 0x1)
+
/* Note:
* The current driver only support 16-bit PASID.
* Currently, hardware only implement upto 16-bit PASID
@@ -175,13 +179,16 @@
#define CONTROL_GAM_EN 25
#define CONTROL_GALOG_EN 28
#define CONTROL_GAINT_EN 29
+#define CONTROL_NUM_INT_REMAP_MODE 43
+#define CONTROL_NUM_INT_REMAP_MODE_MASK 0x03
+#define CONTROL_NUM_INT_REMAP_MODE_2K 0x01
#define CONTROL_EPH_EN 45
#define CONTROL_XT_EN 50
#define CONTROL_INTCAPXT_EN 51
#define CONTROL_IRTCACHEDIS 59
#define CONTROL_SNPAVIC_EN 61
-#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
+#define CTRL_INV_TO_MASK 7
#define CTRL_INV_TO_NONE 0
#define CTRL_INV_TO_1MS 1
#define CTRL_INV_TO_10MS 2
@@ -309,15 +316,13 @@
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
#define DTE_IRQ_REMAP_ENABLE 1ULL
-/*
- * AMD IOMMU hardware only support 512 IRTEs despite
- * the architectural limitation of 2048 entries.
- */
-#define DTE_INTTAB_ALIGNMENT 128
-#define DTE_INTTABLEN_VALUE 9ULL
-#define DTE_INTTABLEN (DTE_INTTABLEN_VALUE << 1)
#define DTE_INTTABLEN_MASK (0xfULL << 1)
-#define MAX_IRQS_PER_TABLE (1 << DTE_INTTABLEN_VALUE)
+#define DTE_INTTABLEN_VALUE_512 9ULL
+#define DTE_INTTABLEN_512 (DTE_INTTABLEN_VALUE_512 << 1)
+#define MAX_IRQS_PER_TABLE_512 BIT(DTE_INTTABLEN_VALUE_512)
+#define DTE_INTTABLEN_VALUE_2K 11ULL
+#define DTE_INTTABLEN_2K (DTE_INTTABLEN_VALUE_2K << 1)
+#define MAX_IRQS_PER_TABLE_2K BIT(DTE_INTTABLEN_VALUE_2K)
#define PAGE_MODE_NONE 0x00
#define PAGE_MODE_1_LEVEL 0x01
@@ -492,9 +497,6 @@ extern const struct iommu_ops amd_iommu_ops;
/* IVRS indicates that pre-boot remapping was enabled */
extern bool amdr_ivrs_remap_support;
-/* kmem_cache to get tables with 128 byte alignement */
-extern struct kmem_cache *amd_iommu_irq_cache;
-
#define PCI_SBDF_TO_SEGID(sbdf) (((sbdf) >> 16) & 0xffff)
#define PCI_SBDF_TO_DEVID(sbdf) ((sbdf) & 0xffff)
#define PCI_SEG_DEVID_TO_SBDF(seg, devid) ((((u32)(seg) & 0xffff) << 16) | \
@@ -851,6 +853,7 @@ struct iommu_dev_data {
struct device *dev;
u16 devid; /* PCI Device ID */
+ unsigned int max_irqs; /* Maximum IRQs supported by device */
u32 max_pasids; /* Max supported PASIDs */
u32 flags; /* Holds AMD_IOMMU_DEVICE_FLAG_<*> */
int ats_qdep;
@@ -928,9 +931,6 @@ struct unity_map_entry {
* Data structures for device handling
*/
-/* size of the dma_ops aperture as power of 2 */
-extern unsigned amd_iommu_aperture_order;
-
extern bool amd_iommu_force_isolation;
/* Max levels of glxval supported */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index cb536d372b12..dd9e26b7b718 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -12,7 +12,6 @@
#include <linux/acpi.h>
#include <linux/list.h>
#include <linux/bitmap.h>
-#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
#include <linux/msi.h>
@@ -219,7 +218,6 @@ static bool __initdata cmdline_maps;
static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
-static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
static bool amd_iommu_pre_enabled = true;
@@ -412,33 +410,26 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
&entry, sizeof(entry));
}
-/* Generic functions to enable/disable certain features of the IOMMU. */
-void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
+static void iommu_feature_set(struct amd_iommu *iommu, u64 val, u64 mask, u8 shift)
{
u64 ctrl;
ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
- ctrl |= (1ULL << bit);
+ mask <<= shift;
+ ctrl &= ~mask;
+ ctrl |= (val << shift) & mask;
writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}
-static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
+/* Generic functions to enable/disable certain features of the IOMMU. */
+void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
{
- u64 ctrl;
-
- ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
- ctrl &= ~(1ULL << bit);
- writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ iommu_feature_set(iommu, 1ULL, 1ULL, bit);
}
-static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
+static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
{
- u64 ctrl;
-
- ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
- ctrl &= ~CTRL_INV_TO_MASK;
- ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
- writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ iommu_feature_set(iommu, 0ULL, 1ULL, bit);
}
/* Function to enable the hardware */
@@ -1069,7 +1060,8 @@ static bool __copy_device_table(struct amd_iommu *iommu)
int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
if (irq_v && (int_ctl || int_tab_len)) {
if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
- (int_tab_len != DTE_INTTABLEN)) {
+ (int_tab_len != DTE_INTTABLEN_512 &&
+ int_tab_len != DTE_INTTABLEN_2K)) {
pr_err("Wrong old irq remapping flag: %#x\n", devid);
memunmap(old_devtb);
return false;
@@ -2652,7 +2644,7 @@ static void iommu_init_flags(struct amd_iommu *iommu)
iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
/* Set IOTLB invalidation timeout to 1s */
- iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
+ iommu_feature_set(iommu, CTRL_INV_TO_1S, CTRL_INV_TO_MASK, CONTROL_INV_TIMEOUT);
/* Enable Enhanced Peripheral Page Request Handling */
if (check_feature(FEATURE_EPHSUP))
@@ -2745,6 +2737,17 @@ static void iommu_enable_irtcachedis(struct amd_iommu *iommu)
iommu->irtcachedis_enabled ? "disabled" : "enabled");
}
+static void iommu_enable_2k_int(struct amd_iommu *iommu)
+{
+ if (!FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
+ return;
+
+ iommu_feature_set(iommu,
+ CONTROL_NUM_INT_REMAP_MODE_2K,
+ CONTROL_NUM_INT_REMAP_MODE_MASK,
+ CONTROL_NUM_INT_REMAP_MODE);
+}
+
static void early_enable_iommu(struct amd_iommu *iommu)
{
iommu_disable(iommu);
@@ -2757,6 +2760,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
+ iommu_enable_2k_int(iommu);
iommu_enable(iommu);
amd_iommu_flush_all_caches(iommu);
}
@@ -2813,6 +2817,7 @@ static void early_enable_iommus(void)
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
+ iommu_enable_2k_int(iommu);
iommu_set_device_table(iommu);
amd_iommu_flush_all_caches(iommu);
}
@@ -2939,9 +2944,6 @@ static struct syscore_ops amd_iommu_syscore_ops = {
static void __init free_iommu_resources(void)
{
- kmem_cache_destroy(amd_iommu_irq_cache);
- amd_iommu_irq_cache = NULL;
-
free_iommu_all();
free_pci_segments();
}
@@ -3040,7 +3042,7 @@ static void __init ivinfo_init(void *ivrs)
static int __init early_amd_iommu_init(void)
{
struct acpi_table_header *ivrs_base;
- int remap_cache_sz, ret;
+ int ret;
acpi_status status;
if (!amd_iommu_detected)
@@ -3102,22 +3104,7 @@ static int __init early_amd_iommu_init(void)
if (amd_iommu_irq_remap) {
struct amd_iommu_pci_seg *pci_seg;
- /*
- * Interrupt remapping enabled, create kmem_cache for the
- * remapping tables.
- */
ret = -ENOMEM;
- if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
- remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
- else
- remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
- amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
- remap_cache_sz,
- DTE_INTTAB_ALIGNMENT,
- 0, NULL);
- if (!amd_iommu_irq_cache)
- goto out;
-
for_each_pci_segment(pci_seg) {
if (alloc_irq_lookup_table(pci_seg))
goto out;
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index f3399087859f..26cf562dde11 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -47,13 +47,6 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
return fpte;
}
-/****************************************************************************
- *
- * The functions below are used the create the page table mappings for
- * unity mapped regions.
- *
- ****************************************************************************/
-
static void free_pt_page(u64 *pt, struct list_head *freelist)
{
struct page *p = virt_to_page(pt);
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index c616de2c5926..a56a27396305 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -254,7 +254,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd,
iova, map_size, gfp, &updated);
if (!pte) {
- ret = -EINVAL;
+ ret = -ENOMEM;
goto out;
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index cd5116d8c3b2..be8761bbef0f 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -75,8 +75,6 @@ struct iommu_cmd {
*/
DEFINE_IDA(pdom_ids);
-struct kmem_cache *amd_iommu_irq_cache;
-
static int amd_iommu_attach_device(struct iommu_domain *dom,
struct device *dev);
@@ -868,7 +866,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
int type, devid, flags, tag;
volatile u32 *event = __evt;
int count = 0;
- u64 address;
+ u64 address, ctrl;
u32 pasid;
retry:
@@ -878,6 +876,7 @@ retry:
(event[1] & EVENT_DOMID_MASK_LO);
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
address = (u64)(((u64)event[3]) << 32) | event[2];
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
if (type == 0) {
/* Did we hit the erratum? */
@@ -899,6 +898,7 @@ retry:
dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
+ dev_err(dev, "Control Reg : 0x%llx\n", ctrl);
dump_dte_entry(iommu, devid);
break;
case EVENT_TYPE_DEV_TAB_ERR:
@@ -2394,8 +2394,14 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
}
out_err:
+
iommu_completion_wait(iommu);
+ if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
+ dev_data->max_irqs = MAX_IRQS_PER_TABLE_2K;
+ else
+ dev_data->max_irqs = MAX_IRQS_PER_TABLE_512;
+
if (dev_is_pci(dev))
pci_prepare_ats(to_pci_dev(dev), PAGE_SHIFT);
@@ -2432,15 +2438,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
*
*****************************************************************************/
-void protection_domain_free(struct protection_domain *domain)
-{
- WARN_ON(!list_empty(&domain->dev_list));
- if (domain->domain.type & __IOMMU_DOMAIN_PAGING)
- free_io_pgtable_ops(&domain->iop.pgtbl.ops);
- pdom_id_free(domain->id);
- kfree(domain);
-}
-
static void protection_domain_init(struct protection_domain *domain)
{
spin_lock_init(&domain->lock);
@@ -2578,7 +2575,11 @@ void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain = to_pdomain(dom);
- protection_domain_free(domain);
+ WARN_ON(!list_empty(&domain->dev_list));
+ if (domain->domain.type & __IOMMU_DOMAIN_PAGING)
+ free_io_pgtable_ops(&domain->iop.pgtbl.ops);
+ pdom_id_free(domain->id);
+ kfree(domain);
}
static int blocked_domain_attach_device(struct iommu_domain *domain,
@@ -3081,6 +3082,13 @@ out:
raw_spin_unlock_irqrestore(&iommu->lock, flags);
}
+static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data)
+{
+ if (dev_data && dev_data->max_irqs == MAX_IRQS_PER_TABLE_2K)
+ return DTE_INTTABLEN_2K;
+ return DTE_INTTABLEN_512;
+}
+
static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{
@@ -3095,7 +3103,7 @@ static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
new &= ~DTE_IRQ_PHYS_ADDR_MASK;
new |= iommu_virt_to_phys(table->table);
new |= DTE_IRQ_REMAP_INTCTL;
- new |= DTE_INTTABLEN;
+ new |= iommu_get_int_tablen(dev_data);
new |= DTE_IRQ_REMAP_ENABLE;
WRITE_ONCE(dte->data[2], new);
@@ -3121,7 +3129,7 @@ static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
return table;
}
-static struct irq_remap_table *__alloc_irq_table(void)
+static struct irq_remap_table *__alloc_irq_table(int nid, int order)
{
struct irq_remap_table *table;
@@ -3129,19 +3137,13 @@ static struct irq_remap_table *__alloc_irq_table(void)
if (!table)
return NULL;
- table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
+ table->table = iommu_alloc_pages_node(nid, GFP_KERNEL, order);
if (!table->table) {
kfree(table);
return NULL;
}
raw_spin_lock_init(&table->lock);
- if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
- memset(table->table, 0,
- MAX_IRQS_PER_TABLE * sizeof(u32));
- else
- memset(table->table, 0,
- (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
return table;
}
@@ -3173,13 +3175,24 @@ static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
return 0;
}
+static inline size_t get_irq_table_size(unsigned int max_irqs)
+{
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ return max_irqs * sizeof(u32);
+
+ return max_irqs * (sizeof(u64) * 2);
+}
+
static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
- u16 devid, struct pci_dev *pdev)
+ u16 devid, struct pci_dev *pdev,
+ unsigned int max_irqs)
{
struct irq_remap_table *table = NULL;
struct irq_remap_table *new_table = NULL;
struct amd_iommu_pci_seg *pci_seg;
unsigned long flags;
+ int order = get_order(get_irq_table_size(max_irqs));
+ int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
u16 alias;
spin_lock_irqsave(&iommu_table_lock, flags);
@@ -3198,7 +3211,7 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
spin_unlock_irqrestore(&iommu_table_lock, flags);
/* Nothing there yet, allocate new irq remapping table */
- new_table = __alloc_irq_table();
+ new_table = __alloc_irq_table(nid, order);
if (!new_table)
return NULL;
@@ -3233,20 +3246,21 @@ out_unlock:
spin_unlock_irqrestore(&iommu_table_lock, flags);
if (new_table) {
- kmem_cache_free(amd_iommu_irq_cache, new_table->table);
+ iommu_free_pages(new_table->table, order);
kfree(new_table);
}
return table;
}
static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
- bool align, struct pci_dev *pdev)
+ bool align, struct pci_dev *pdev,
+ unsigned long max_irqs)
{
struct irq_remap_table *table;
int index, c, alignment = 1;
unsigned long flags;
- table = alloc_irq_table(iommu, devid, pdev);
+ table = alloc_irq_table(iommu, devid, pdev, max_irqs);
if (!table)
return -ENODEV;
@@ -3257,7 +3271,7 @@ static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
/* Scan table for free entries */
for (index = ALIGN(table->min_index, alignment), c = 0;
- index < MAX_IRQS_PER_TABLE;) {
+ index < max_irqs;) {
if (!iommu->irte_ops->is_allocated(table, index)) {
c += 1;
} else {
@@ -3527,6 +3541,14 @@ static void fill_msi_msg(struct msi_msg *msg, u32 index)
msg->data = index;
msg->address_lo = 0;
msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
+ /*
+ * The struct msi_msg.dest_mode_logical is used to set the DM bit
+ * in MSI Message Address Register. For device w/ 2K int-remap support,
+ * this is bit must be set to 1 regardless of the actual destination
+ * mode, which is signified by the IRTE[DM].
+ */
+ if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
+ msg->arch_addr_lo.dest_mode_logical = true;
msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
}
@@ -3589,6 +3611,8 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct amd_ir_data *data = NULL;
struct amd_iommu *iommu;
struct irq_cfg *cfg;
+ struct iommu_dev_data *dev_data;
+ unsigned long max_irqs;
int i, ret, devid, seg, sbdf;
int index;
@@ -3607,6 +3631,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (!iommu)
return -EINVAL;
+ dev_data = search_dev_data(iommu, devid);
+ max_irqs = dev_data ? dev_data->max_irqs : MAX_IRQS_PER_TABLE_512;
+
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
if (ret < 0)
return ret;
@@ -3614,7 +3641,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
struct irq_remap_table *table;
- table = alloc_irq_table(iommu, devid, NULL);
+ table = alloc_irq_table(iommu, devid, NULL, max_irqs);
if (table) {
if (!table->min_index) {
/*
@@ -3635,9 +3662,11 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
index = alloc_irq_index(iommu, devid, nr_irqs, align,
- msi_desc_to_pci_dev(info->desc));
+ msi_desc_to_pci_dev(info->desc),
+ max_irqs);
} else {
- index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL);
+ index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL,
+ max_irqs);
}
if (index < 0) {
diff --git a/drivers/iommu/amd/pasid.c b/drivers/iommu/amd/pasid.c
index 11150cfd6718..77c8e9a91cbc 100644
--- a/drivers/iommu/amd/pasid.c
+++ b/drivers/iommu/amd/pasid.c
@@ -195,7 +195,7 @@ struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
ret = mmu_notifier_register(&pdom->mn, mm);
if (ret) {
- protection_domain_free(pdom);
+ amd_iommu_domain_free(&pdom->domain);
return ERR_PTR(ret);
}
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 95ba3caeb401..e13501541fdd 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -36,7 +36,7 @@
#define DART_MAX_STREAMS 256
#define DART_MAX_TTBR 4
-#define MAX_DARTS_PER_DEVICE 2
+#define MAX_DARTS_PER_DEVICE 3
/* Common registers */
@@ -277,6 +277,9 @@ struct apple_dart_domain {
* @streams: streams for this device
*/
struct apple_dart_master_cfg {
+ /* Intersection of DART capabilitles */
+ u32 supports_bypass : 1;
+
struct apple_dart_stream_map stream_maps[MAX_DARTS_PER_DEVICE];
};
@@ -684,7 +687,7 @@ static int apple_dart_attach_dev_identity(struct iommu_domain *domain,
struct apple_dart_stream_map *stream_map;
int i;
- if (!cfg->stream_maps[0].dart->supports_bypass)
+ if (!cfg->supports_bypass)
return -EINVAL;
for_each_stream_map(i, cfg, stream_map)
@@ -792,20 +795,23 @@ static int apple_dart_of_xlate(struct device *dev,
return -EINVAL;
sid = args->args[0];
- if (!cfg)
+ if (!cfg) {
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
+ if (!cfg)
+ return -ENOMEM;
+ /* Will be ANDed with DART capabilities */
+ cfg->supports_bypass = true;
+ }
dev_iommu_priv_set(dev, cfg);
cfg_dart = cfg->stream_maps[0].dart;
if (cfg_dart) {
- if (cfg_dart->supports_bypass != dart->supports_bypass)
- return -EINVAL;
if (cfg_dart->pgsize != dart->pgsize)
return -EINVAL;
}
+ cfg->supports_bypass &= dart->supports_bypass;
+
for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
if (cfg->stream_maps[i].dart == dart) {
set_bit(sid, cfg->stream_maps[i].sidmap);
@@ -945,7 +951,7 @@ static int apple_dart_def_domain_type(struct device *dev)
if (cfg->stream_maps[0].dart->pgsize > PAGE_SIZE)
return IOMMU_DOMAIN_IDENTITY;
- if (!cfg->stream_maps[0].dart->supports_bypass)
+ if (!cfg->supports_bypass)
return IOMMU_DOMAIN_DMA;
return 0;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index de205a34ffc6..8f439c265a23 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -79,8 +79,11 @@ static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
{
- if (pm_runtime_enabled(smmu->dev))
- pm_runtime_put_autosuspend(smmu->dev);
+ if (pm_runtime_enabled(smmu->dev)) {
+ pm_runtime_mark_last_busy(smmu->dev);
+ __pm_runtime_put_autosuspend(smmu->dev);
+
+ }
}
static void arm_smmu_rpm_use_autosuspend(struct arm_smmu_device *smmu)
@@ -1195,7 +1198,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/* Looks ok, so add the device to the domain */
arm_smmu_master_install_s2crs(cfg, S2CR_TYPE_TRANS,
smmu_domain->cfg.cbndx, fwspec);
- arm_smmu_rpm_use_autosuspend(smmu);
rpm_put:
arm_smmu_rpm_put(smmu);
return ret;
@@ -1218,7 +1220,6 @@ static int arm_smmu_attach_dev_type(struct device *dev,
return ret;
arm_smmu_master_install_s2crs(cfg, type, 0, fwspec);
- arm_smmu_rpm_use_autosuspend(smmu);
arm_smmu_rpm_put(smmu);
return 0;
}
@@ -1486,7 +1487,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
out_cfg_free:
kfree(cfg);
out_free:
- iommu_fwspec_free(dev);
return ERR_PTR(ret);
}
@@ -2246,6 +2246,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (dev->pm_domain) {
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+ arm_smmu_rpm_use_autosuspend(smmu);
}
return 0;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 2a9fa0c8cc00..0832998eca38 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -24,6 +24,7 @@
#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/mutex.h>
+#include <linux/msi.h>
#include <linux/of_iommu.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
@@ -86,7 +87,6 @@ struct iommu_dma_cookie {
struct iommu_domain *fq_domain;
/* Options for dma-iommu use */
struct iommu_dma_options options;
- struct mutex mutex;
};
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
@@ -102,6 +102,9 @@ static int __init iommu_dma_forcedac_setup(char *str)
}
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
+static int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr);
+
/* Number of entries per flush queue */
#define IOVA_DEFAULT_FQ_SIZE 256
#define IOVA_SINGLE_FQ_SIZE 32768
@@ -397,7 +400,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
if (!domain->iova_cookie)
return -ENOMEM;
- mutex_init(&domain->iova_cookie->mutex);
+ iommu_domain_set_sw_msi(domain, iommu_dma_sw_msi);
return 0;
}
@@ -429,6 +432,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
cookie->msi_iova = base;
domain->iova_cookie = cookie;
+ iommu_domain_set_sw_msi(domain, iommu_dma_sw_msi);
return 0;
}
EXPORT_SYMBOL(iommu_get_msi_cookie);
@@ -443,6 +447,11 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi, *tmp;
+#if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
+ if (domain->sw_msi != iommu_dma_sw_msi)
+ return;
+#endif
+
if (!cookie)
return;
@@ -698,23 +707,20 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
domain->geometry.aperture_start >> order);
/* start_pfn is always nonzero for an already-initialised domain */
- mutex_lock(&cookie->mutex);
if (iovad->start_pfn) {
if (1UL << order != iovad->granule ||
base_pfn != iovad->start_pfn) {
pr_warn("Incompatible range for DMA domain\n");
- ret = -EFAULT;
- goto done_unlock;
+ return -EFAULT;
}
- ret = 0;
- goto done_unlock;
+ return 0;
}
init_iova_domain(iovad, 1UL << order, base_pfn);
ret = iova_domain_init_rcaches(iovad);
if (ret)
- goto done_unlock;
+ return ret;
iommu_dma_init_options(&cookie->options, dev);
@@ -723,11 +729,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
(!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
domain->type = IOMMU_DOMAIN_DMA;
- ret = iova_reserve_iommu_regions(dev, domain);
-
-done_unlock:
- mutex_unlock(&cookie->mutex);
- return ret;
+ return iova_reserve_iommu_regions(dev, domain);
}
/**
@@ -1800,60 +1802,26 @@ out_free_page:
return NULL;
}
-/**
- * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
- * @desc: MSI descriptor, will store the MSI page
- * @msi_addr: MSI target address to be mapped
- *
- * Return: 0 on success or negative error code if the mapping failed.
- */
-int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
+static int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr)
{
struct device *dev = msi_desc_to_dev(desc);
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iommu_dma_msi_page *msi_page;
- static DEFINE_MUTEX(msi_prepare_lock); /* see below */
+ const struct iommu_dma_msi_page *msi_page;
- if (!domain || !domain->iova_cookie) {
- desc->iommu_cookie = NULL;
+ if (!domain->iova_cookie) {
+ msi_desc_set_iommu_msi_iova(desc, 0, 0);
return 0;
}
- /*
- * In fact the whole prepare operation should already be serialised by
- * irq_domain_mutex further up the callchain, but that's pretty subtle
- * on its own, so consider this locking as failsafe documentation...
- */
- mutex_lock(&msi_prepare_lock);
+ iommu_group_mutex_assert(dev);
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
- mutex_unlock(&msi_prepare_lock);
-
- msi_desc_set_iommu_cookie(desc, msi_page);
-
if (!msi_page)
return -ENOMEM;
- return 0;
-}
-/**
- * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
- * @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
- * @msg: MSI message containing target physical address
- */
-void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
-{
- struct device *dev = msi_desc_to_dev(desc);
- const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- const struct iommu_dma_msi_page *msi_page;
-
- msi_page = msi_desc_get_iommu_cookie(desc);
-
- if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
- return;
-
- msg->address_hi = upper_32_bits(msi_page->iova);
- msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
- msg->address_lo += lower_32_bits(msi_page->iova);
+ msi_desc_set_iommu_msi_iova(
+ desc, msi_page->iova,
+ ilog2(cookie_msi_granule(domain->iova_cookie)));
+ return 0;
}
static int iommu_dma_init(void)
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 2a86aa5d54c6..761ab647f372 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -130,7 +130,7 @@ static int __init hyperv_prepare_irq_remapping(void)
x86_init.hyper.msi_ext_dest_id())
return -ENODEV;
- if (hv_root_partition) {
+ if (hv_root_partition()) {
name = "HYPERV-ROOT-IR";
ops = &hyperv_root_ir_domain_ops;
} else {
@@ -151,7 +151,7 @@ static int __init hyperv_prepare_irq_remapping(void)
return -ENOMEM;
}
- if (hv_root_partition)
+ if (hv_root_partition())
return 0; /* The rest is only relevant to guests */
/*
@@ -217,7 +217,7 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
status = hv_unmap_ioapic_interrupt(ioapic_id, &entry);
if (status != HV_STATUS_SUCCESS)
- pr_debug("%s: unexpected unmap status %lld\n", __func__, status);
+ hv_status_debug(status, "failed to unmap\n");
data->entry.ioapic_rte.as_uint64 = 0;
data->entry.source = 0; /* Invalid source */
@@ -228,7 +228,7 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
vector, &entry);
if (status != HV_STATUS_SUCCESS) {
- pr_err("%s: map hypercall failed, status %lld\n", __func__, status);
+ hv_status_err(status, "map failed\n");
return;
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index bf1f0c814348..ec2f385ae25b 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -737,7 +737,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
return NULL;
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
- pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ pteval = virt_to_phys(tmp_page) | DMA_PTE_READ |
+ DMA_PTE_WRITE;
if (domain->use_first_level)
pteval |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
@@ -1172,32 +1173,59 @@ static bool dev_needs_extra_dtlb_flush(struct pci_dev *pdev)
return true;
}
-static void iommu_enable_pci_caps(struct device_domain_info *info)
+static void iommu_enable_pci_ats(struct device_domain_info *info)
{
struct pci_dev *pdev;
- if (!dev_is_pci(info->dev))
+ if (!info->ats_supported)
return;
pdev = to_pci_dev(info->dev);
- if (info->ats_supported && pci_ats_page_aligned(pdev) &&
- !pci_enable_ats(pdev, VTD_PAGE_SHIFT))
+ if (!pci_ats_page_aligned(pdev))
+ return;
+
+ if (!pci_enable_ats(pdev, VTD_PAGE_SHIFT))
info->ats_enabled = 1;
}
-static void iommu_disable_pci_caps(struct device_domain_info *info)
+static void iommu_disable_pci_ats(struct device_domain_info *info)
+{
+ if (!info->ats_enabled)
+ return;
+
+ pci_disable_ats(to_pci_dev(info->dev));
+ info->ats_enabled = 0;
+}
+
+static void iommu_enable_pci_pri(struct device_domain_info *info)
{
struct pci_dev *pdev;
- if (!dev_is_pci(info->dev))
+ if (!info->ats_enabled || !info->pri_supported)
return;
pdev = to_pci_dev(info->dev);
+ /* PASID is required in PRG Response Message. */
+ if (info->pasid_enabled && !pci_prg_resp_pasid_required(pdev))
+ return;
- if (info->ats_enabled) {
- pci_disable_ats(pdev);
- info->ats_enabled = 0;
- }
+ if (pci_reset_pri(pdev))
+ return;
+
+ if (!pci_enable_pri(pdev, PRQ_DEPTH))
+ info->pri_enabled = 1;
+}
+
+static void iommu_disable_pci_pri(struct device_domain_info *info)
+{
+ if (!info->pri_enabled)
+ return;
+
+ if (WARN_ON(info->iopf_refcount))
+ iopf_queue_remove_device(info->iommu->iopf_queue, info->dev);
+
+ pci_disable_pri(to_pci_dev(info->dev));
+ info->pri_enabled = 0;
}
static void intel_flush_iotlb_all(struct iommu_domain *domain)
@@ -1556,12 +1584,19 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
u8 bus = info->bus, devfn = info->devfn;
+ int ret;
if (!dev_is_pci(dev))
return domain_context_mapping_one(domain, iommu, bus, devfn);
- return pci_for_each_dma_alias(to_pci_dev(dev),
- domain_context_mapping_cb, domain);
+ ret = pci_for_each_dma_alias(to_pci_dev(dev),
+ domain_context_mapping_cb, domain);
+ if (ret)
+ return ret;
+
+ iommu_enable_pci_ats(info);
+
+ return 0;
}
/* Return largest possible superpage level for a given mapping */
@@ -1748,7 +1783,7 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock(&iommu->lock);
- intel_context_flush_present(info, context, did, true);
+ intel_context_flush_no_pasid(info, context, did);
}
int __domain_setup_first_level(struct intel_iommu *iommu,
@@ -1843,8 +1878,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (ret)
goto out_block_translation;
- iommu_enable_pci_caps(info);
-
ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
if (ret)
goto out_block_translation;
@@ -2871,16 +2904,19 @@ void intel_iommu_shutdown(void)
if (no_iommu || dmar_disabled)
return;
- down_write(&dmar_global_lock);
+ /*
+ * All other CPUs were brought down, hotplug interrupts were disabled,
+ * no lock and RCU checking needed anymore
+ */
+ list_for_each_entry(drhd, &dmar_drhd_units, list) {
+ iommu = drhd->iommu;
- /* Disable PMRs explicitly here. */
- for_each_iommu(iommu, drhd)
+ /* Disable PMRs explicitly here. */
iommu_disable_protect_mem_regions(iommu);
- /* Make sure the IOMMUs are switched off */
- intel_disable_iommus();
-
- up_write(&dmar_global_lock);
+ /* Make sure the IOMMUs are switched off */
+ iommu_disable_translation(iommu);
+ }
}
static struct intel_iommu *dev_to_intel_iommu(struct device *dev)
@@ -3013,6 +3049,7 @@ static int __init probe_acpi_namespace_devices(void)
if (dev->bus != &acpi_bus_type)
continue;
+ up_read(&dmar_global_lock);
adev = to_acpi_device(dev);
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(pn,
@@ -3022,6 +3059,7 @@ static int __init probe_acpi_namespace_devices(void)
break;
}
mutex_unlock(&adev->physical_node_lock);
+ down_read(&dmar_global_lock);
if (ret)
return ret;
@@ -3205,6 +3243,7 @@ static void domain_context_clear(struct device_domain_info *info)
pci_for_each_dma_alias(to_pci_dev(info->dev),
&domain_context_clear_one_cb, info);
+ iommu_disable_pci_ats(info);
}
/*
@@ -3221,7 +3260,6 @@ void device_block_translation(struct device *dev)
if (info->domain)
cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
- iommu_disable_pci_caps(info);
if (!dev_is_real_dma_subdevice(dev)) {
if (sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, dev,
@@ -3756,6 +3794,10 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
!pci_enable_pasid(pdev, info->pasid_supported & ~1))
info->pasid_enabled = 1;
+ if (sm_supported(iommu))
+ iommu_enable_pci_ats(info);
+ iommu_enable_pci_pri(info);
+
return &iommu->iommu;
free_table:
intel_pasid_free_table(dev);
@@ -3772,6 +3814,9 @@ static void intel_iommu_release_device(struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
+ iommu_disable_pci_pri(info);
+ iommu_disable_pci_ats(info);
+
if (info->pasid_enabled) {
pci_disable_pasid(to_pci_dev(dev));
info->pasid_enabled = 0;
@@ -3858,151 +3903,41 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev)
return generic_device_group(dev);
}
-static int intel_iommu_enable_sva(struct device *dev)
+int intel_iommu_enable_iopf(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu;
-
- if (!info || dmar_disabled)
- return -EINVAL;
-
- iommu = info->iommu;
- if (!iommu)
- return -EINVAL;
-
- if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
- return -ENODEV;
-
- if (!info->pasid_enabled || !info->ats_enabled)
- return -EINVAL;
-
- /*
- * Devices having device-specific I/O fault handling should not
- * support PCI/PRI. The IOMMU side has no means to check the
- * capability of device-specific IOPF. Therefore, IOMMU can only
- * default that if the device driver enables SVA on a non-PRI
- * device, it will handle IOPF in its own way.
- */
- if (!info->pri_supported)
- return 0;
-
- /* Devices supporting PRI should have it enabled. */
- if (!info->pri_enabled)
- return -EINVAL;
-
- return 0;
-}
-
-static int context_flip_pri(struct device_domain_info *info, bool enable)
-{
struct intel_iommu *iommu = info->iommu;
- u8 bus = info->bus, devfn = info->devfn;
- struct context_entry *context;
- u16 did;
-
- spin_lock(&iommu->lock);
- if (context_copied(iommu, bus, devfn)) {
- spin_unlock(&iommu->lock);
- return -EINVAL;
- }
-
- context = iommu_context_addr(iommu, bus, devfn, false);
- if (!context || !context_present(context)) {
- spin_unlock(&iommu->lock);
- return -ENODEV;
- }
- did = context_domain_id(context);
-
- if (enable)
- context_set_sm_pre(context);
- else
- context_clear_sm_pre(context);
-
- if (!ecap_coherent(iommu->ecap))
- clflush_cache_range(context, sizeof(*context));
- intel_context_flush_present(info, context, did, true);
- spin_unlock(&iommu->lock);
-
- return 0;
-}
-
-static int intel_iommu_enable_iopf(struct device *dev)
-{
- struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL;
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu;
int ret;
- if (!pdev || !info || !info->ats_enabled || !info->pri_supported)
+ if (!info->pri_enabled)
return -ENODEV;
- if (info->pri_enabled)
- return -EBUSY;
-
- iommu = info->iommu;
- if (!iommu)
- return -EINVAL;
-
- /* PASID is required in PRG Response Message. */
- if (info->pasid_enabled && !pci_prg_resp_pasid_required(pdev))
- return -EINVAL;
-
- ret = pci_reset_pri(pdev);
- if (ret)
- return ret;
+ if (info->iopf_refcount) {
+ info->iopf_refcount++;
+ return 0;
+ }
ret = iopf_queue_add_device(iommu->iopf_queue, dev);
if (ret)
return ret;
- ret = context_flip_pri(info, true);
- if (ret)
- goto err_remove_device;
-
- ret = pci_enable_pri(pdev, PRQ_DEPTH);
- if (ret)
- goto err_clear_pri;
-
- info->pri_enabled = 1;
+ info->iopf_refcount = 1;
return 0;
-err_clear_pri:
- context_flip_pri(info, false);
-err_remove_device:
- iopf_queue_remove_device(iommu->iopf_queue, dev);
-
- return ret;
}
-static int intel_iommu_disable_iopf(struct device *dev)
+void intel_iommu_disable_iopf(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
- if (!info->pri_enabled)
- return -EINVAL;
+ if (WARN_ON(!info->pri_enabled || !info->iopf_refcount))
+ return;
- /* Disable new PRI reception: */
- context_flip_pri(info, false);
+ if (--info->iopf_refcount)
+ return;
- /*
- * Remove device from fault queue and acknowledge all outstanding
- * PRQs to the device:
- */
iopf_queue_remove_device(iommu->iopf_queue, dev);
-
- /*
- * PCIe spec states that by clearing PRI enable bit, the Page
- * Request Interface will not issue new page requests, but has
- * outstanding page requests that have been transmitted or are
- * queued for transmission. This is supposed to be called after
- * the device driver has stopped DMA, all PASIDs have been
- * unbound and the outstanding PRQs have been drained.
- */
- pci_disable_pri(to_pci_dev(dev));
- info->pri_enabled = 0;
-
- return 0;
}
static int
@@ -4013,7 +3948,7 @@ intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
return intel_iommu_enable_iopf(dev);
case IOMMU_DEV_FEAT_SVA:
- return intel_iommu_enable_sva(dev);
+ return 0;
default:
return -ENODEV;
@@ -4025,7 +3960,8 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
{
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
- return intel_iommu_disable_iopf(dev);
+ intel_iommu_disable_iopf(dev);
+ return 0;
case IOMMU_DEV_FEAT_SVA:
return 0;
@@ -4410,13 +4346,10 @@ static int identity_domain_attach_dev(struct iommu_domain *domain, struct device
if (dev_is_real_dma_subdevice(dev))
return 0;
- if (sm_supported(iommu)) {
+ if (sm_supported(iommu))
ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
- if (!ret)
- iommu_enable_pci_caps(info);
- } else {
+ else
ret = device_setup_pass_through(dev);
- }
return ret;
}
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 6ea7bbe26b19..c4916886da5a 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -774,6 +774,7 @@ struct device_domain_info {
u8 ats_enabled:1;
u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */
u8 ats_qdep;
+ unsigned int iopf_refcount;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
@@ -953,25 +954,6 @@ static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
-/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
- are never going to work. */
-static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
-{
- return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
-}
-static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
-{
- return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
-}
-static inline unsigned long page_to_dma_pfn(struct page *pg)
-{
- return mm_to_dma_pfn_start(page_to_pfn(pg));
-}
-static inline unsigned long virt_to_dma_pfn(void *p)
-{
- return page_to_dma_pfn(virt_to_page(p));
-}
-
static inline void context_set_present(struct context_entry *context)
{
context->lo |= 1;
@@ -1304,9 +1286,8 @@ void cache_tag_flush_all(struct dmar_domain *domain);
void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
unsigned long end);
-void intel_context_flush_present(struct device_domain_info *info,
- struct context_entry *context,
- u16 did, bool affect_domains);
+void intel_context_flush_no_pasid(struct device_domain_info *info,
+ struct context_entry *context, u16 did);
int intel_iommu_enable_prq(struct intel_iommu *iommu);
int intel_iommu_finish_prq(struct intel_iommu *iommu);
@@ -1314,6 +1295,9 @@ void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt,
struct iommu_page_response *msg);
void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid);
+int intel_iommu_enable_iopf(struct device *dev);
+void intel_iommu_disable_iopf(struct device *dev);
+
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index ad795c772f21..ea3ca5203919 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -25,11 +25,6 @@
#include "../irq_remapping.h"
#include "../iommu-pages.h"
-enum irq_mode {
- IRQ_REMAPPING,
- IRQ_POSTING,
-};
-
struct ioapic_scope {
struct intel_iommu *iommu;
unsigned int id;
@@ -49,8 +44,8 @@ struct irq_2_iommu {
u16 irte_index;
u16 sub_handle;
u8 irte_mask;
- enum irq_mode mode;
bool posted_msi;
+ bool posted_vcpu;
};
struct intel_ir_data {
@@ -138,7 +133,6 @@ static int alloc_irte(struct intel_iommu *iommu,
irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
- irq_iommu->mode = IRQ_REMAPPING;
}
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
@@ -193,8 +187,6 @@ static int modify_irte(struct irq_2_iommu *irq_iommu,
rc = qi_flush_iec(iommu, index, 0);
- /* Update iommu mode according to the IRTE mode */
- irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING;
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return rc;
@@ -1169,7 +1161,26 @@ static void intel_ir_reconfigure_irte_posted(struct irq_data *irqd)
static inline void intel_ir_reconfigure_irte_posted(struct irq_data *irqd) {}
#endif
-static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
+static void __intel_ir_reconfigure_irte(struct irq_data *irqd, bool force_host)
+{
+ struct intel_ir_data *ir_data = irqd->chip_data;
+
+ /*
+ * Don't modify IRTEs for IRQs that are being posted to vCPUs if the
+ * host CPU affinity changes.
+ */
+ if (ir_data->irq_2_iommu.posted_vcpu && !force_host)
+ return;
+
+ ir_data->irq_2_iommu.posted_vcpu = false;
+
+ if (ir_data->irq_2_iommu.posted_msi)
+ intel_ir_reconfigure_irte_posted(irqd);
+ else
+ modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
+}
+
+static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force_host)
{
struct intel_ir_data *ir_data = irqd->chip_data;
struct irte *irte = &ir_data->irte_entry;
@@ -1182,10 +1193,7 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
irte->vector = cfg->vector;
irte->dest_id = IRTE_DEST(cfg->dest_apicid);
- if (ir_data->irq_2_iommu.posted_msi)
- intel_ir_reconfigure_irte_posted(irqd);
- else if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
- modify_irte(&ir_data->irq_2_iommu, irte);
+ __intel_ir_reconfigure_irte(irqd, force_host);
}
/*
@@ -1240,7 +1248,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
/* stop posting interrupts, back to the default mode */
if (!vcpu_pi_info) {
- modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
+ __intel_ir_reconfigure_irte(data, true);
} else {
struct irte irte_pi;
@@ -1263,6 +1271,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
~(-1UL << PDA_HIGH_BIT);
+ ir_data->irq_2_iommu.posted_vcpu = true;
modify_irte(&ir_data->irq_2_iommu, &irte_pi);
}
@@ -1489,6 +1498,9 @@ static void intel_irq_remapping_deactivate(struct irq_domain *domain,
struct intel_ir_data *data = irq_data->chip_data;
struct irte entry;
+ WARN_ON_ONCE(data->irq_2_iommu.posted_vcpu);
+ data->irq_2_iommu.posted_vcpu = false;
+
memset(&entry, 0, sizeof(entry));
modify_irte(&data->irq_2_iommu, &entry);
}
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index fb59a7d35958..7ee18bb48bd4 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -932,7 +932,7 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock(&iommu->lock);
- intel_context_flush_present(info, context, did, false);
+ intel_context_flush_no_pasid(info, context, did);
}
static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
@@ -992,6 +992,8 @@ static int context_entry_set_pasid_table(struct context_entry *context,
context_set_sm_dte(context);
if (info->pasid_supported)
context_set_pasid(context);
+ if (info->pri_supported)
+ context_set_sm_pre(context);
context_set_fault_enable(context);
context_set_present(context);
@@ -1117,17 +1119,15 @@ static void __context_flush_dev_iotlb(struct device_domain_info *info)
/*
* Cache invalidations after change in a context table entry that was present
- * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations). If
- * IOMMU is in scalable mode and all PASID table entries of the device were
- * non-present, set flush_domains to false. Otherwise, true.
+ * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations).
+ * This helper can only be used when IOMMU is working in the legacy mode or
+ * IOMMU is in scalable mode but all PASID table entries of the device are
+ * non-present.
*/
-void intel_context_flush_present(struct device_domain_info *info,
- struct context_entry *context,
- u16 did, bool flush_domains)
+void intel_context_flush_no_pasid(struct device_domain_info *info,
+ struct context_entry *context, u16 did)
{
struct intel_iommu *iommu = info->iommu;
- struct pasid_entry *pte;
- int i;
/*
* Device-selective context-cache invalidation. The Domain-ID field
@@ -1150,30 +1150,5 @@ void intel_context_flush_present(struct device_domain_info *info,
return;
}
- /*
- * For scalable mode:
- * - Domain-selective PASID-cache invalidation to affected domains
- * - Domain-selective IOTLB invalidation to affected domains
- * - Global Device-TLB invalidation to affected functions
- */
- if (flush_domains) {
- /*
- * If the IOMMU is running in scalable mode and there might
- * be potential PASID translations, the caller should hold
- * the lock to ensure that context changes and cache flushes
- * are atomic.
- */
- assert_spin_locked(&iommu->lock);
- for (i = 0; i < info->pasid_table->max_pasid; i++) {
- pte = intel_pasid_get_entry(info->dev, i);
- if (!pte || !pasid_pte_is_present(pte))
- continue;
-
- did = pasid_get_domain_id(pte);
- qi_flush_pasid_cache(iommu, did, QI_PC_ALL_PASIDS, 0);
- iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
- }
- }
-
__context_flush_dev_iotlb(info);
}
diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
index 064194399b38..5b6a64d96850 100644
--- a/drivers/iommu/intel/prq.c
+++ b/drivers/iommu/intel/prq.c
@@ -67,7 +67,7 @@ void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid)
u16 sid, did;
info = dev_iommu_priv_get(dev);
- if (!info->pri_enabled)
+ if (!info->iopf_refcount)
return;
iommu = info->iommu;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index f5569347591f..ba93123cb4eb 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -110,6 +110,41 @@ static const struct mmu_notifier_ops intel_mmuops = {
.free_notifier = intel_mm_free_notifier,
};
+static int intel_iommu_sva_supported(struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu;
+
+ if (!info || dmar_disabled)
+ return -EINVAL;
+
+ iommu = info->iommu;
+ if (!iommu)
+ return -EINVAL;
+
+ if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
+ return -ENODEV;
+
+ if (!info->pasid_enabled || !info->ats_enabled)
+ return -EINVAL;
+
+ /*
+ * Devices having device-specific I/O fault handling should not
+ * support PCI/PRI. The IOMMU side has no means to check the
+ * capability of device-specific IOPF. Therefore, IOMMU can only
+ * default that if the device driver enables SVA on a non-PRI
+ * device, it will handle IOPF in its own way.
+ */
+ if (!info->pri_supported)
+ return 0;
+
+ /* Devices supporting PRI should have it enabled. */
+ if (!info->pri_enabled)
+ return -EINVAL;
+
+ return 0;
+}
+
static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid,
struct iommu_domain *old)
@@ -121,6 +156,10 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
unsigned long sflags;
int ret = 0;
+ ret = intel_iommu_sva_supported(dev);
+ if (ret)
+ return ret;
+
dev_pasid = domain_add_dev_pasid(domain, dev, pasid);
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
@@ -161,6 +200,10 @@ struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
struct dmar_domain *domain;
int ret;
+ ret = intel_iommu_sva_supported(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index c004640640ee..06aca9ab52f9 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -135,7 +135,6 @@ static int dart_init_pte(struct dart_io_pgtable *data,
pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_START, 0);
pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_END, 0xfff);
- pte |= APPLE_DART1_PTE_PROT_SP_DIS;
pte |= APPLE_DART_PTE_VALID;
for (i = 0; i < num_entries; i++)
@@ -211,6 +210,7 @@ static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data,
dart_iopte pte = 0;
if (data->iop.fmt == APPLE_DART) {
+ pte |= APPLE_DART1_PTE_PROT_SP_DIS;
if (!(prot & IOMMU_WRITE))
pte |= APPLE_DART1_PTE_PROT_NO_WRITE;
if (!(prot & IOMMU_READ))
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
index de5b54eaa8bf..05fa6e682e88 100644
--- a/drivers/iommu/iommu-priv.h
+++ b/drivers/iommu/iommu-priv.h
@@ -17,6 +17,8 @@ static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
return dev->iommu->iommu_dev->ops;
}
+void dev_iommu_free(struct device *dev);
+
const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode);
static inline const struct iommu_ops *iommu_fwspec_ops(struct iommu_fwspec *fwspec)
@@ -24,8 +26,7 @@ static inline const struct iommu_ops *iommu_fwspec_ops(struct iommu_fwspec *fwsp
return iommu_ops_from_fwnode(fwspec ? fwspec->iommu_fwnode : NULL);
}
-int iommu_group_replace_domain(struct iommu_group *group,
- struct iommu_domain *new_domain);
+void iommu_fwspec_free(struct device *dev);
int iommu_device_register_bus(struct iommu_device *iommu,
const struct iommu_ops *ops,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 60aed01e54f2..9e1b444246f8 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -45,6 +45,9 @@ static unsigned int iommu_def_domain_type __read_mostly;
static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
static u32 iommu_cmd_line __read_mostly;
+/* Tags used with xa_tag_pointer() in group->pasid_array */
+enum { IOMMU_PASID_ARRAY_DOMAIN = 0, IOMMU_PASID_ARRAY_HANDLE = 1 };
+
struct iommu_group {
struct kobject kobj;
struct kobject *devices_kobj;
@@ -352,7 +355,7 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
return param;
}
-static void dev_iommu_free(struct device *dev)
+void dev_iommu_free(struct device *dev)
{
struct dev_iommu *param = dev->iommu;
@@ -404,14 +407,40 @@ EXPORT_SYMBOL_GPL(dev_iommu_priv_set);
* Init the dev->iommu and dev->iommu_group in the struct device and get the
* driver probed
*/
-static int iommu_init_device(struct device *dev, const struct iommu_ops *ops)
+static int iommu_init_device(struct device *dev)
{
+ const struct iommu_ops *ops;
struct iommu_device *iommu_dev;
struct iommu_group *group;
int ret;
if (!dev_iommu_get(dev))
return -ENOMEM;
+ /*
+ * For FDT-based systems and ACPI IORT/VIOT, the common firmware parsing
+ * is buried in the bus dma_configure path. Properly unpicking that is
+ * still a big job, so for now just invoke the whole thing. The device
+ * already having a driver bound means dma_configure has already run and
+ * either found no IOMMU to wait for, or we're in its replay call right
+ * now, so either way there's no point calling it again.
+ */
+ if (!dev->driver && dev->bus->dma_configure) {
+ mutex_unlock(&iommu_probe_device_lock);
+ dev->bus->dma_configure(dev);
+ mutex_lock(&iommu_probe_device_lock);
+ }
+ /*
+ * At this point, relevant devices either now have a fwspec which will
+ * match ops registered with a non-NULL fwnode, or we can reasonably
+ * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can
+ * be present, and that any of their registered instances has suitable
+ * ops for probing, and thus cheekily co-opt the same mechanism.
+ */
+ ops = iommu_fwspec_ops(dev->iommu->fwspec);
+ if (!ops) {
+ ret = -ENODEV;
+ goto err_free;
+ }
if (!try_module_get(ops->owner)) {
ret = -EINVAL;
@@ -514,23 +543,11 @@ DEFINE_MUTEX(iommu_probe_device_lock);
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
- const struct iommu_ops *ops;
struct iommu_group *group;
struct group_device *gdev;
int ret;
/*
- * For FDT-based systems and ACPI IORT/VIOT, drivers register IOMMU
- * instances with non-NULL fwnodes, and client devices should have been
- * identified with a fwspec by this point. Otherwise, we can currently
- * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can
- * be present, and that any of their registered instances has suitable
- * ops for probing, and thus cheekily co-opt the same mechanism.
- */
- ops = iommu_fwspec_ops(dev_iommu_fwspec_get(dev));
- if (!ops)
- return -ENODEV;
- /*
* Serialise to avoid races between IOMMU drivers registering in
* parallel and/or the "replay" calls from ACPI/OF code via client
* driver probe. Once the latter have been cleaned up we should
@@ -543,9 +560,15 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
if (dev->iommu_group)
return 0;
- ret = iommu_init_device(dev, ops);
+ ret = iommu_init_device(dev);
if (ret)
return ret;
+ /*
+ * And if we do now see any replay calls, they would indicate someone
+ * misusing the dma_configure path outside bus code.
+ */
+ if (dev->driver)
+ dev_WARN(dev, "late IOMMU probe at driver bind, something fishy here!\n");
group = dev->iommu_group;
gdev = iommu_group_alloc_device(group, dev);
@@ -2147,6 +2170,17 @@ struct iommu_domain *iommu_get_dma_domain(struct device *dev)
return dev->iommu_group->default_domain;
}
+static void *iommu_make_pasid_array_entry(struct iommu_domain *domain,
+ struct iommu_attach_handle *handle)
+{
+ if (handle) {
+ handle->domain = domain;
+ return xa_tag_pointer(handle, IOMMU_PASID_ARRAY_HANDLE);
+ }
+
+ return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN);
+}
+
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{
@@ -2187,32 +2221,6 @@ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
}
EXPORT_SYMBOL_GPL(iommu_attach_group);
-/**
- * iommu_group_replace_domain - replace the domain that a group is attached to
- * @group: IOMMU group that will be attached to the new domain
- * @new_domain: new IOMMU domain to replace with
- *
- * This API allows the group to switch domains without being forced to go to
- * the blocking domain in-between.
- *
- * If the currently attached domain is a core domain (e.g. a default_domain),
- * it will act just like the iommu_attach_group().
- */
-int iommu_group_replace_domain(struct iommu_group *group,
- struct iommu_domain *new_domain)
-{
- int ret;
-
- if (!new_domain)
- return -EINVAL;
-
- mutex_lock(&group->mutex);
- ret = __iommu_group_set_domain(group, new_domain);
- mutex_unlock(&group->mutex);
- return ret;
-}
-EXPORT_SYMBOL_NS_GPL(iommu_group_replace_domain, "IOMMUFD_INTERNAL");
-
static int __iommu_device_set_domain(struct iommu_group *group,
struct device *dev,
struct iommu_domain *new_domain,
@@ -2849,7 +2857,6 @@ void iommu_fwspec_free(struct device *dev)
dev_iommu_fwspec_set(dev, NULL);
}
}
-EXPORT_SYMBOL_GPL(iommu_fwspec_free);
int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids)
{
@@ -3097,6 +3104,11 @@ int iommu_device_use_default_domain(struct device *dev)
return 0;
mutex_lock(&group->mutex);
+ /* We may race against bus_iommu_probe() finalising groups here */
+ if (!group->default_domain) {
+ ret = -EPROBE_DEFER;
+ goto unlock_out;
+ }
if (group->owner_cnt) {
if (group->domain != group->default_domain || group->owner ||
!xa_empty(&group->pasid_array)) {
@@ -3374,6 +3386,7 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
struct iommu_group *group = dev->iommu_group;
struct group_device *device;
const struct iommu_ops *ops;
+ void *entry;
int ret;
if (!group)
@@ -3397,16 +3410,31 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
}
}
- if (handle)
- handle->domain = domain;
+ entry = iommu_make_pasid_array_entry(domain, handle);
- ret = xa_insert(&group->pasid_array, pasid, handle, GFP_KERNEL);
+ /*
+ * Entry present is a failure case. Use xa_insert() instead of
+ * xa_reserve().
+ */
+ ret = xa_insert(&group->pasid_array, pasid, XA_ZERO_ENTRY, GFP_KERNEL);
if (ret)
goto out_unlock;
ret = __iommu_set_group_pasid(domain, group, pasid);
- if (ret)
- xa_erase(&group->pasid_array, pasid);
+ if (ret) {
+ xa_release(&group->pasid_array, pasid);
+ goto out_unlock;
+ }
+
+ /*
+ * The xa_insert() above reserved the memory, and the group->mutex is
+ * held, this cannot fail. The new domain cannot be visible until the
+ * operation succeeds as we cannot tolerate PRIs becoming concurrently
+ * queued and then failing attach.
+ */
+ WARN_ON(xa_is_err(xa_store(&group->pasid_array,
+ pasid, entry, GFP_KERNEL)));
+
out_unlock:
mutex_unlock(&group->mutex);
return ret;
@@ -3480,13 +3508,17 @@ struct iommu_attach_handle *
iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type)
{
struct iommu_attach_handle *handle;
+ void *entry;
xa_lock(&group->pasid_array);
- handle = xa_load(&group->pasid_array, pasid);
- if (!handle)
+ entry = xa_load(&group->pasid_array, pasid);
+ if (!entry || xa_pointer_tag(entry) != IOMMU_PASID_ARRAY_HANDLE) {
handle = ERR_PTR(-ENOENT);
- else if (type && handle->domain->type != type)
- handle = ERR_PTR(-EBUSY);
+ } else {
+ handle = xa_untag_pointer(entry);
+ if (type && handle->domain->type != type)
+ handle = ERR_PTR(-EBUSY);
+ }
xa_unlock(&group->pasid_array);
return handle;
@@ -3509,25 +3541,35 @@ int iommu_attach_group_handle(struct iommu_domain *domain,
struct iommu_group *group,
struct iommu_attach_handle *handle)
{
+ void *entry;
int ret;
- if (handle)
- handle->domain = domain;
+ if (!handle)
+ return -EINVAL;
mutex_lock(&group->mutex);
- ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL);
+ entry = iommu_make_pasid_array_entry(domain, handle);
+ ret = xa_insert(&group->pasid_array,
+ IOMMU_NO_PASID, XA_ZERO_ENTRY, GFP_KERNEL);
if (ret)
- goto err_unlock;
+ goto out_unlock;
ret = __iommu_attach_group(domain, group);
- if (ret)
- goto err_erase;
- mutex_unlock(&group->mutex);
+ if (ret) {
+ xa_release(&group->pasid_array, IOMMU_NO_PASID);
+ goto out_unlock;
+ }
- return 0;
-err_erase:
- xa_erase(&group->pasid_array, IOMMU_NO_PASID);
-err_unlock:
+ /*
+ * The xa_insert() above reserved the memory, and the group->mutex is
+ * held, this cannot fail. The new domain cannot be visible until the
+ * operation succeeds as we cannot tolerate PRIs becoming concurrently
+ * queued and then failing attach.
+ */
+ WARN_ON(xa_is_err(xa_store(&group->pasid_array,
+ IOMMU_NO_PASID, entry, GFP_KERNEL)));
+
+out_unlock:
mutex_unlock(&group->mutex);
return ret;
}
@@ -3557,33 +3599,34 @@ EXPORT_SYMBOL_NS_GPL(iommu_detach_group_handle, "IOMMUFD_INTERNAL");
* @new_domain: new IOMMU domain to replace with
* @handle: attach handle
*
- * This is a variant of iommu_group_replace_domain(). It allows the caller to
- * provide an attach handle for the new domain and use it when the domain is
- * attached.
+ * This API allows the group to switch domains without being forced to go to
+ * the blocking domain in-between. It allows the caller to provide an attach
+ * handle for the new domain and use it when the domain is attached.
+ *
+ * If the currently attached domain is a core domain (e.g. a default_domain),
+ * it will act just like the iommu_attach_group_handle().
*/
int iommu_replace_group_handle(struct iommu_group *group,
struct iommu_domain *new_domain,
struct iommu_attach_handle *handle)
{
- void *curr;
+ void *curr, *entry;
int ret;
- if (!new_domain)
+ if (!new_domain || !handle)
return -EINVAL;
mutex_lock(&group->mutex);
- if (handle) {
- ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL);
- if (ret)
- goto err_unlock;
- handle->domain = new_domain;
- }
+ entry = iommu_make_pasid_array_entry(new_domain, handle);
+ ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL);
+ if (ret)
+ goto err_unlock;
ret = __iommu_group_set_domain(group, new_domain);
if (ret)
goto err_release;
- curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL);
+ curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL);
WARN_ON(xa_is_err(curr));
mutex_unlock(&group->mutex);
@@ -3596,3 +3639,32 @@ err_unlock:
return ret;
}
EXPORT_SYMBOL_NS_GPL(iommu_replace_group_handle, "IOMMUFD_INTERNAL");
+
+#if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
+/**
+ * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
+ * @desc: MSI descriptor, will store the MSI page
+ * @msi_addr: MSI target address to be mapped
+ *
+ * The implementation of sw_msi() should take msi_addr and map it to
+ * an IOVA in the domain and call msi_desc_set_iommu_msi_iova() with the
+ * mapping information.
+ *
+ * Return: 0 on success or negative error code if the mapping failed.
+ */
+int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct iommu_group *group = dev->iommu_group;
+ int ret = 0;
+
+ if (!group)
+ return 0;
+
+ mutex_lock(&group->mutex);
+ if (group->domain && group->domain->sw_msi)
+ ret = group->domain->sw_msi(group->domain, desc, msi_addr);
+ mutex_unlock(&group->mutex);
+ return ret;
+}
+#endif /* CONFIG_IRQ_MSI_IOMMU */
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index dfd0898fb6c1..4e107f69f951 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -5,6 +5,7 @@
#include <linux/iommufd.h>
#include <linux/slab.h>
#include <uapi/linux/iommufd.h>
+#include <linux/msi.h>
#include "../iommu-priv.h"
#include "io_pagetable.h"
@@ -293,36 +294,152 @@ u32 iommufd_device_to_id(struct iommufd_device *idev)
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_to_id, "IOMMUFD");
+/*
+ * Get a iommufd_sw_msi_map for the msi physical address requested by the irq
+ * layer. The mapping to IOVA is global to the iommufd file descriptor, every
+ * domain that is attached to a device using the same MSI parameters will use
+ * the same IOVA.
+ */
+static __maybe_unused struct iommufd_sw_msi_map *
+iommufd_sw_msi_get_map(struct iommufd_ctx *ictx, phys_addr_t msi_addr,
+ phys_addr_t sw_msi_start)
+{
+ struct iommufd_sw_msi_map *cur;
+ unsigned int max_pgoff = 0;
+
+ lockdep_assert_held(&ictx->sw_msi_lock);
+
+ list_for_each_entry(cur, &ictx->sw_msi_list, sw_msi_item) {
+ if (cur->sw_msi_start != sw_msi_start)
+ continue;
+ max_pgoff = max(max_pgoff, cur->pgoff + 1);
+ if (cur->msi_addr == msi_addr)
+ return cur;
+ }
+
+ if (ictx->sw_msi_id >=
+ BITS_PER_BYTE * sizeof_field(struct iommufd_sw_msi_maps, bitmap))
+ return ERR_PTR(-EOVERFLOW);
+
+ cur = kzalloc(sizeof(*cur), GFP_KERNEL);
+ if (!cur)
+ return ERR_PTR(-ENOMEM);
+
+ cur->sw_msi_start = sw_msi_start;
+ cur->msi_addr = msi_addr;
+ cur->pgoff = max_pgoff;
+ cur->id = ictx->sw_msi_id++;
+ list_add_tail(&cur->sw_msi_item, &ictx->sw_msi_list);
+ return cur;
+}
+
+static int iommufd_sw_msi_install(struct iommufd_ctx *ictx,
+ struct iommufd_hwpt_paging *hwpt_paging,
+ struct iommufd_sw_msi_map *msi_map)
+{
+ unsigned long iova;
+
+ lockdep_assert_held(&ictx->sw_msi_lock);
+
+ iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
+ if (!test_bit(msi_map->id, hwpt_paging->present_sw_msi.bitmap)) {
+ int rc;
+
+ rc = iommu_map(hwpt_paging->common.domain, iova,
+ msi_map->msi_addr, PAGE_SIZE,
+ IOMMU_WRITE | IOMMU_READ | IOMMU_MMIO,
+ GFP_KERNEL_ACCOUNT);
+ if (rc)
+ return rc;
+ __set_bit(msi_map->id, hwpt_paging->present_sw_msi.bitmap);
+ }
+ return 0;
+}
+
+/*
+ * Called by the irq code if the platform translates the MSI address through the
+ * IOMMU. msi_addr is the physical address of the MSI page. iommufd will
+ * allocate a fd global iova for the physical page that is the same on all
+ * domains and devices.
+ */
+#ifdef CONFIG_IRQ_MSI_IOMMU
+int iommufd_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct iommufd_hwpt_paging *hwpt_paging;
+ struct iommu_attach_handle *raw_handle;
+ struct iommufd_attach_handle *handle;
+ struct iommufd_sw_msi_map *msi_map;
+ struct iommufd_ctx *ictx;
+ unsigned long iova;
+ int rc;
+
+ /*
+ * It is safe to call iommu_attach_handle_get() here because the iommu
+ * core code invokes this under the group mutex which also prevents any
+ * change of the attach handle for the duration of this function.
+ */
+ iommu_group_mutex_assert(dev);
+
+ raw_handle =
+ iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0);
+ if (IS_ERR(raw_handle))
+ return 0;
+ hwpt_paging = find_hwpt_paging(domain->iommufd_hwpt);
+
+ handle = to_iommufd_handle(raw_handle);
+ /* No IOMMU_RESV_SW_MSI means no change to the msi_msg */
+ if (handle->idev->igroup->sw_msi_start == PHYS_ADDR_MAX)
+ return 0;
+
+ ictx = handle->idev->ictx;
+ guard(mutex)(&ictx->sw_msi_lock);
+ /*
+ * The input msi_addr is the exact byte offset of the MSI doorbell, we
+ * assume the caller has checked that it is contained with a MMIO region
+ * that is secure to map at PAGE_SIZE.
+ */
+ msi_map = iommufd_sw_msi_get_map(handle->idev->ictx,
+ msi_addr & PAGE_MASK,
+ handle->idev->igroup->sw_msi_start);
+ if (IS_ERR(msi_map))
+ return PTR_ERR(msi_map);
+
+ rc = iommufd_sw_msi_install(ictx, hwpt_paging, msi_map);
+ if (rc)
+ return rc;
+ __set_bit(msi_map->id, handle->idev->igroup->required_sw_msi.bitmap);
+
+ iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
+ msi_desc_set_iommu_msi_iova(desc, iova, PAGE_SHIFT);
+ return 0;
+}
+#endif
+
static int iommufd_group_setup_msi(struct iommufd_group *igroup,
struct iommufd_hwpt_paging *hwpt_paging)
{
- phys_addr_t sw_msi_start = igroup->sw_msi_start;
- int rc;
+ struct iommufd_ctx *ictx = igroup->ictx;
+ struct iommufd_sw_msi_map *cur;
+
+ if (igroup->sw_msi_start == PHYS_ADDR_MAX)
+ return 0;
/*
- * If the IOMMU driver gives a IOMMU_RESV_SW_MSI then it is asking us to
- * call iommu_get_msi_cookie() on its behalf. This is necessary to setup
- * the MSI window so iommu_dma_prepare_msi() can install pages into our
- * domain after request_irq(). If it is not done interrupts will not
- * work on this domain.
- *
- * FIXME: This is conceptually broken for iommufd since we want to allow
- * userspace to change the domains, eg switch from an identity IOAS to a
- * DMA IOAS. There is currently no way to create a MSI window that
- * matches what the IRQ layer actually expects in a newly created
- * domain.
+ * Install all the MSI pages the device has been using into the domain
*/
- if (sw_msi_start != PHYS_ADDR_MAX && !hwpt_paging->msi_cookie) {
- rc = iommu_get_msi_cookie(hwpt_paging->common.domain,
- sw_msi_start);
+ guard(mutex)(&ictx->sw_msi_lock);
+ list_for_each_entry(cur, &ictx->sw_msi_list, sw_msi_item) {
+ int rc;
+
+ if (cur->sw_msi_start != igroup->sw_msi_start ||
+ !test_bit(cur->id, igroup->required_sw_msi.bitmap))
+ continue;
+
+ rc = iommufd_sw_msi_install(ictx, hwpt_paging, cur);
if (rc)
return rc;
-
- /*
- * iommu_get_msi_cookie() can only be called once per domain,
- * it returns -EBUSY on later calls.
- */
- hwpt_paging->msi_cookie = true;
}
return 0;
}
@@ -352,6 +469,111 @@ iommufd_device_attach_reserved_iova(struct iommufd_device *idev,
return 0;
}
+/* The device attach/detach/replace helpers for attach_handle */
+
+static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev)
+{
+ struct iommufd_attach_handle *handle;
+ int rc;
+
+ lockdep_assert_held(&idev->igroup->lock);
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ if (hwpt->fault) {
+ rc = iommufd_fault_iopf_enable(idev);
+ if (rc)
+ goto out_free_handle;
+ }
+
+ handle->idev = idev;
+ rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
+ &handle->handle);
+ if (rc)
+ goto out_disable_iopf;
+
+ return 0;
+
+out_disable_iopf:
+ if (hwpt->fault)
+ iommufd_fault_iopf_disable(idev);
+out_free_handle:
+ kfree(handle);
+ return rc;
+}
+
+static struct iommufd_attach_handle *
+iommufd_device_get_attach_handle(struct iommufd_device *idev)
+{
+ struct iommu_attach_handle *handle;
+
+ lockdep_assert_held(&idev->igroup->lock);
+
+ handle =
+ iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
+ if (IS_ERR(handle))
+ return NULL;
+ return to_iommufd_handle(handle);
+}
+
+static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev)
+{
+ struct iommufd_attach_handle *handle;
+
+ handle = iommufd_device_get_attach_handle(idev);
+ iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
+ if (hwpt->fault) {
+ iommufd_auto_response_faults(hwpt, handle);
+ iommufd_fault_iopf_disable(idev);
+ }
+ kfree(handle);
+}
+
+static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
+ struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_hw_pagetable *old)
+{
+ struct iommufd_attach_handle *handle, *old_handle =
+ iommufd_device_get_attach_handle(idev);
+ int rc;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ if (hwpt->fault && !old->fault) {
+ rc = iommufd_fault_iopf_enable(idev);
+ if (rc)
+ goto out_free_handle;
+ }
+
+ handle->idev = idev;
+ rc = iommu_replace_group_handle(idev->igroup->group, hwpt->domain,
+ &handle->handle);
+ if (rc)
+ goto out_disable_iopf;
+
+ if (old->fault) {
+ iommufd_auto_response_faults(hwpt, old_handle);
+ if (!hwpt->fault)
+ iommufd_fault_iopf_disable(idev);
+ }
+ kfree(old_handle);
+
+ return 0;
+
+out_disable_iopf:
+ if (hwpt->fault && !old->fault)
+ iommufd_fault_iopf_disable(idev);
+out_free_handle:
+ kfree(handle);
+ return rc;
+}
+
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
index d9a937450e55..c48d72c9668c 100644
--- a/drivers/iommu/iommufd/fault.c
+++ b/drivers/iommu/iommufd/fault.c
@@ -17,7 +17,7 @@
#include "../iommu-priv.h"
#include "iommufd_private.h"
-static int iommufd_fault_iopf_enable(struct iommufd_device *idev)
+int iommufd_fault_iopf_enable(struct iommufd_device *idev)
{
struct device *dev = idev->dev;
int ret;
@@ -50,7 +50,7 @@ static int iommufd_fault_iopf_enable(struct iommufd_device *idev)
return ret;
}
-static void iommufd_fault_iopf_disable(struct iommufd_device *idev)
+void iommufd_fault_iopf_disable(struct iommufd_device *idev)
{
mutex_lock(&idev->iopf_lock);
if (!WARN_ON(idev->iopf_enabled == 0)) {
@@ -60,46 +60,8 @@ static void iommufd_fault_iopf_disable(struct iommufd_device *idev)
mutex_unlock(&idev->iopf_lock);
}
-static int __fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
-{
- struct iommufd_attach_handle *handle;
- int ret;
-
- handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- handle->idev = idev;
- ret = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
- &handle->handle);
- if (ret)
- kfree(handle);
-
- return ret;
-}
-
-int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
-{
- int ret;
-
- if (!hwpt->fault)
- return -EINVAL;
-
- ret = iommufd_fault_iopf_enable(idev);
- if (ret)
- return ret;
-
- ret = __fault_domain_attach_dev(hwpt, idev);
- if (ret)
- iommufd_fault_iopf_disable(idev);
-
- return ret;
-}
-
-static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_attach_handle *handle)
+void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_attach_handle *handle)
{
struct iommufd_fault *fault = hwpt->fault;
struct iopf_group *group, *next;
@@ -135,88 +97,6 @@ static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
mutex_unlock(&fault->mutex);
}
-static struct iommufd_attach_handle *
-iommufd_device_get_attach_handle(struct iommufd_device *idev)
-{
- struct iommu_attach_handle *handle;
-
- handle = iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
- if (IS_ERR(handle))
- return NULL;
-
- return to_iommufd_handle(handle);
-}
-
-void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
-{
- struct iommufd_attach_handle *handle;
-
- handle = iommufd_device_get_attach_handle(idev);
- iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
- iommufd_auto_response_faults(hwpt, handle);
- iommufd_fault_iopf_disable(idev);
- kfree(handle);
-}
-
-static int __fault_domain_replace_dev(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- struct iommufd_hw_pagetable *old)
-{
- struct iommufd_attach_handle *handle, *curr = NULL;
- int ret;
-
- if (old->fault)
- curr = iommufd_device_get_attach_handle(idev);
-
- if (hwpt->fault) {
- handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- handle->idev = idev;
- ret = iommu_replace_group_handle(idev->igroup->group,
- hwpt->domain, &handle->handle);
- } else {
- ret = iommu_replace_group_handle(idev->igroup->group,
- hwpt->domain, NULL);
- }
-
- if (!ret && curr) {
- iommufd_auto_response_faults(old, curr);
- kfree(curr);
- }
-
- return ret;
-}
-
-int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- struct iommufd_hw_pagetable *old)
-{
- bool iopf_off = !hwpt->fault && old->fault;
- bool iopf_on = hwpt->fault && !old->fault;
- int ret;
-
- if (iopf_on) {
- ret = iommufd_fault_iopf_enable(idev);
- if (ret)
- return ret;
- }
-
- ret = __fault_domain_replace_dev(idev, hwpt, old);
- if (ret) {
- if (iopf_on)
- iommufd_fault_iopf_disable(idev);
- return ret;
- }
-
- if (iopf_off)
- iommufd_fault_iopf_disable(idev);
-
- return 0;
-}
-
void iommufd_fault_destroy(struct iommufd_object *obj)
{
struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj);
@@ -449,7 +329,7 @@ int iommufd_fault_iopf_handler(struct iopf_group *group)
struct iommufd_hw_pagetable *hwpt;
struct iommufd_fault *fault;
- hwpt = group->attach_handle->domain->fault_data;
+ hwpt = group->attach_handle->domain->iommufd_hwpt;
fault = hwpt->fault;
spin_lock(&fault->lock);
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 598be26a14e2..7de6e914232e 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -156,6 +156,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
goto out_abort;
}
}
+ iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
/*
* Set the coherency mode before we do iopt_table_add_domain() as some
@@ -251,6 +252,7 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
goto out_abort;
}
hwpt->domain->owner = ops;
+ iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
rc = -EINVAL;
@@ -307,6 +309,7 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
goto out_abort;
}
hwpt->domain->owner = viommu->iommu_dev->ops;
+ iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
rc = -EINVAL;
@@ -406,10 +409,10 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
}
hwpt->fault = fault;
hwpt->domain->iopf_handler = iommufd_fault_iopf_handler;
- hwpt->domain->fault_data = hwpt;
refcount_inc(&fault->obj.users);
iommufd_put_object(ucmd->ictx, &fault->obj);
}
+ hwpt->domain->iommufd_hwpt = hwpt;
cmd->out_hwpt_id = hwpt->obj.id;
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 0b1bafc7fd99..246297452a44 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -19,6 +19,22 @@ struct iommu_group;
struct iommu_option;
struct iommufd_device;
+struct iommufd_sw_msi_map {
+ struct list_head sw_msi_item;
+ phys_addr_t sw_msi_start;
+ phys_addr_t msi_addr;
+ unsigned int pgoff;
+ unsigned int id;
+};
+
+/* Bitmap of struct iommufd_sw_msi_map::id */
+struct iommufd_sw_msi_maps {
+ DECLARE_BITMAP(bitmap, 64);
+};
+
+int iommufd_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr);
+
struct iommufd_ctx {
struct file *file;
struct xarray objects;
@@ -26,6 +42,10 @@ struct iommufd_ctx {
wait_queue_head_t destroy_wait;
struct rw_semaphore ioas_creation_lock;
+ struct mutex sw_msi_lock;
+ struct list_head sw_msi_list;
+ unsigned int sw_msi_id;
+
u8 account_mode;
/* Compatibility with VFIO no iommu */
u8 no_iommu_mode;
@@ -283,10 +303,10 @@ struct iommufd_hwpt_paging {
struct iommufd_ioas *ioas;
bool auto_domain : 1;
bool enforce_cache_coherency : 1;
- bool msi_cookie : 1;
bool nest_parent : 1;
/* Head at iommufd_ioas::hwpt_list */
struct list_head hwpt_item;
+ struct iommufd_sw_msi_maps present_sw_msi;
};
struct iommufd_hwpt_nested {
@@ -383,6 +403,7 @@ struct iommufd_group {
struct iommu_group *group;
struct iommufd_hw_pagetable *hwpt;
struct list_head device_list;
+ struct iommufd_sw_msi_maps required_sw_msi;
phys_addr_t sw_msi_start;
};
@@ -496,43 +517,10 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
void iommufd_fault_destroy(struct iommufd_object *obj);
int iommufd_fault_iopf_handler(struct iopf_group *group);
-int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev);
-void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev);
-int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- struct iommufd_hw_pagetable *old);
-
-static inline int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
-{
- if (hwpt->fault)
- return iommufd_fault_domain_attach_dev(hwpt, idev);
-
- return iommu_attach_group(hwpt->domain, idev->igroup->group);
-}
-
-static inline void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
-{
- if (hwpt->fault) {
- iommufd_fault_domain_detach_dev(hwpt, idev);
- return;
- }
-
- iommu_detach_group(hwpt->domain, idev->igroup->group);
-}
-
-static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- struct iommufd_hw_pagetable *old)
-{
- if (old->fault || hwpt->fault)
- return iommufd_fault_domain_replace_dev(idev, hwpt, old);
-
- return iommu_group_replace_domain(idev->igroup->group, hwpt->domain);
-}
+int iommufd_fault_iopf_enable(struct iommufd_device *idev);
+void iommufd_fault_iopf_disable(struct iommufd_device *idev);
+void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_attach_handle *handle);
static inline struct iommufd_viommu *
iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index ccf616462a1c..b6fa9fd11bc1 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -227,6 +227,8 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp)
xa_init(&ictx->groups);
ictx->file = filp;
init_waitqueue_head(&ictx->destroy_wait);
+ mutex_init(&ictx->sw_msi_lock);
+ INIT_LIST_HEAD(&ictx->sw_msi_list);
filp->private_data = ictx;
return 0;
}
@@ -234,6 +236,8 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp)
static int iommufd_fops_release(struct inode *inode, struct file *filp)
{
struct iommufd_ctx *ictx = filp->private_data;
+ struct iommufd_sw_msi_map *next;
+ struct iommufd_sw_msi_map *cur;
struct iommufd_object *obj;
/*
@@ -262,6 +266,11 @@ static int iommufd_fops_release(struct inode *inode, struct file *filp)
break;
}
WARN_ON(!xa_empty(&ictx->groups));
+
+ mutex_destroy(&ictx->sw_msi_lock);
+ list_for_each_entry_safe(cur, next, &ictx->sw_msi_list, sw_msi_item)
+ kfree(cur);
+
kfree(ictx);
return 0;
}
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index a565b9e40f4a..66824982e05f 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -27,11 +27,20 @@
#include <linux/spinlock.h>
#include <linux/string_choices.h>
#include <asm/barrier.h>
-#include <asm/dma-iommu.h>
#include <dt-bindings/memory/mtk-memory-port.h>
#include <dt-bindings/memory/mt2701-larb-port.h>
#include <soc/mediatek/smi.h>
+#if defined(CONFIG_ARM)
+#include <asm/dma-iommu.h>
+#else
+#define arm_iommu_create_mapping(...) NULL
+#define arm_iommu_attach_device(...) -ENODEV
+struct dma_iommu_mapping {
+ struct iommu_domain *domain;
+};
+#endif
+
#define REG_MMU_PT_BASE_ADDR 0x000
#define F_ALL_INVLD 0x2
@@ -446,22 +455,13 @@ static int mtk_iommu_v1_create_mapping(struct device *dev,
static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct iommu_fwspec *fwspec = NULL;
struct of_phandle_args iommu_spec;
struct mtk_iommu_v1_data *data;
int err, idx = 0, larbid, larbidx;
struct device_link *link;
struct device *larbdev;
- /*
- * In the deferred case, free the existed fwspec.
- * Always initialize the fwspec internally.
- */
- if (fwspec) {
- iommu_fwspec_free(dev);
- fwspec = dev_iommu_fwspec_get(dev);
- }
-
while (!of_parse_phandle_with_args(dev->of_node, "iommus",
"#iommu-cells",
idx, &iommu_spec)) {
@@ -476,6 +476,9 @@ static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
idx++;
}
+ if (!fwspec)
+ return ERR_PTR(-ENODEV);
+
data = dev_iommu_priv_get(dev);
/* Link the consumer device with the smi-larb device(supplier) */
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 97987cd78da9..6b989a62def2 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -116,6 +116,7 @@ static void of_pci_check_device_ats(struct device *dev, struct device_node *np)
int of_iommu_configure(struct device *dev, struct device_node *master_np,
const u32 *id)
{
+ bool dev_iommu_present;
int err;
if (!master_np)
@@ -127,6 +128,7 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np,
mutex_unlock(&iommu_probe_device_lock);
return 0;
}
+ dev_iommu_present = dev->iommu;
/*
* We don't currently walk up the tree looking for a parent IOMMU.
@@ -147,11 +149,18 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np,
err = of_iommu_configure_device(master_np, dev, id);
}
- if (err)
+ if (err && dev_iommu_present)
iommu_fwspec_free(dev);
+ else if (err && dev->iommu)
+ dev_iommu_free(dev);
mutex_unlock(&iommu_probe_device_lock);
- if (!err && dev->bus)
+ /*
+ * If we're not on the iommu_probe_device() path (as indicated by the
+ * initial dev->iommu) then try to simulate it. This should no longer
+ * happen unless of_dma_configure() is being misused outside bus code.
+ */
+ if (!err && dev->bus && !dev_iommu_present)
err = iommu_probe_device(dev);
if (err && err != -EPROBE_DEFER)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 323cc665c357..af4cc91b2bbf 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -88,6 +88,7 @@ struct rk_iommu_domain {
dma_addr_t dt_dma;
spinlock_t iommus_lock; /* lock for iommus list */
spinlock_t dt_lock; /* lock for modifying page directory table */
+ struct device *dma_dev;
struct iommu_domain domain;
};
@@ -123,7 +124,6 @@ struct rk_iommudata {
struct rk_iommu *iommu;
};
-static struct device *dma_dev;
static const struct rk_iommu_ops *rk_ops;
static struct iommu_domain rk_identity_domain;
@@ -132,7 +132,7 @@ static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
{
size_t size = count * sizeof(u32); /* count of u32 entry */
- dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
+ dma_sync_single_for_device(dom->dma_dev, dma, size, DMA_TO_DEVICE);
}
static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
@@ -734,9 +734,9 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (!page_table)
return ERR_PTR(-ENOMEM);
- pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(dma_dev, pt_dma)) {
- dev_err(dma_dev, "DMA mapping error while allocating page table\n");
+ pt_dma = dma_map_single(rk_domain->dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(rk_domain->dma_dev, pt_dma)) {
+ dev_err(rk_domain->dma_dev, "DMA mapping error while allocating page table\n");
iommu_free_page(page_table);
return ERR_PTR(-ENOMEM);
}
@@ -1051,9 +1051,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
{
struct rk_iommu_domain *rk_domain;
-
- if (!dma_dev)
- return NULL;
+ struct rk_iommu *iommu;
rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
@@ -1068,10 +1066,12 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
if (!rk_domain->dt)
goto err_free_domain;
- rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
+ iommu = rk_iommu_from_dev(dev);
+ rk_domain->dma_dev = iommu->dev;
+ rk_domain->dt_dma = dma_map_single(rk_domain->dma_dev, rk_domain->dt,
SPAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
- dev_err(dma_dev, "DMA map error for DT\n");
+ if (dma_mapping_error(rk_domain->dma_dev, rk_domain->dt_dma)) {
+ dev_err(rk_domain->dma_dev, "DMA map error for DT\n");
goto err_free_dt;
}
@@ -1105,13 +1105,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
if (rk_dte_is_pt_valid(dte)) {
phys_addr_t pt_phys = rk_ops->pt_address(dte);
u32 *page_table = phys_to_virt(pt_phys);
- dma_unmap_single(dma_dev, pt_phys,
+ dma_unmap_single(rk_domain->dma_dev, pt_phys,
SPAGE_SIZE, DMA_TO_DEVICE);
iommu_free_page(page_table);
}
}
- dma_unmap_single(dma_dev, rk_domain->dt_dma,
+ dma_unmap_single(rk_domain->dma_dev, rk_domain->dt_dma,
SPAGE_SIZE, DMA_TO_DEVICE);
iommu_free_page(rk_domain->dt);
@@ -1148,12 +1148,12 @@ static int rk_iommu_of_xlate(struct device *dev,
struct platform_device *iommu_dev;
struct rk_iommudata *data;
- data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
+ iommu_dev = of_find_device_by_node(args->np);
+
+ data = devm_kzalloc(&iommu_dev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- iommu_dev = of_find_device_by_node(args->np);
-
data->iommu = platform_get_drvdata(iommu_dev);
data->iommu->domain = &rk_identity_domain;
dev_iommu_priv_set(dev, data);
@@ -1256,22 +1256,6 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (err)
return err;
- err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
- if (err)
- goto err_unprepare_clocks;
-
- err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
- if (err)
- goto err_remove_sysfs;
-
- /*
- * Use the first registered IOMMU device for domain to use with DMA
- * API, since a domain might not physically correspond to a single
- * IOMMU device..
- */
- if (!dma_dev)
- dma_dev = &pdev->dev;
-
pm_runtime_enable(dev);
for (i = 0; i < iommu->num_irq; i++) {
@@ -1290,12 +1274,19 @@ static int rk_iommu_probe(struct platform_device *pdev)
dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
+ err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
+ if (err)
+ goto err_pm_disable;
+
+ err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
+ if (err)
+ goto err_remove_sysfs;
+
return 0;
-err_pm_disable:
- pm_runtime_disable(dev);
err_remove_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
-err_unprepare_clocks:
+err_pm_disable:
+ pm_runtime_disable(dev);
clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
return err;
}
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index fbdeded3d48b..e1c76e0f9c2b 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -16,7 +16,7 @@
#include "dma-iommu.h"
-static const struct iommu_ops s390_iommu_ops;
+static const struct iommu_ops s390_iommu_ops, s390_iommu_rtr_ops;
static struct kmem_cache *dma_region_table_cache;
static struct kmem_cache *dma_page_table_cache;
@@ -381,6 +381,46 @@ static void zdev_s390_domain_update(struct zpci_dev *zdev,
spin_unlock_irqrestore(&zdev->dom_lock, flags);
}
+static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
+ struct iommu_domain *domain, u8 *status)
+{
+ struct s390_domain *s390_domain;
+ int rc = 0;
+ u64 iota;
+
+ switch (domain->type) {
+ case IOMMU_DOMAIN_IDENTITY:
+ rc = zpci_register_ioat(zdev, 0, zdev->start_dma,
+ zdev->end_dma, 0, status);
+ break;
+ case IOMMU_DOMAIN_BLOCKED:
+ /* Nothing to do in this case */
+ break;
+ default:
+ s390_domain = to_s390_domain(domain);
+ iota = virt_to_phys(s390_domain->dma_table) |
+ ZPCI_IOTA_RTTO_FLAG;
+ rc = zpci_register_ioat(zdev, 0, zdev->start_dma,
+ zdev->end_dma, iota, status);
+ }
+
+ return rc;
+}
+
+int zpci_iommu_register_ioat(struct zpci_dev *zdev, u8 *status)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&zdev->dom_lock, flags);
+
+ rc = s390_iommu_domain_reg_ioat(zdev, zdev->s390_domain, status);
+
+ spin_unlock_irqrestore(&zdev->dom_lock, flags);
+
+ return rc;
+}
+
static int blocking_domain_attach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -392,9 +432,11 @@ static int blocking_domain_attach_device(struct iommu_domain *domain,
return 0;
s390_domain = to_s390_domain(zdev->s390_domain);
- spin_lock_irqsave(&s390_domain->list_lock, flags);
- list_del_rcu(&zdev->iommu_list);
- spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+ if (zdev->dma_table) {
+ spin_lock_irqsave(&s390_domain->list_lock, flags);
+ list_del_rcu(&zdev->iommu_list);
+ spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+ }
zpci_unregister_ioat(zdev, 0);
zdev->dma_table = NULL;
@@ -422,8 +464,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
blocking_domain_attach_device(&blocking_domain, dev);
/* If we fail now DMA remains blocked via blocking domain */
- cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
- virt_to_phys(s390_domain->dma_table), &status);
+ cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
return -EIO;
zdev->dma_table = s390_domain->dma_table;
@@ -723,7 +764,13 @@ int zpci_init_iommu(struct zpci_dev *zdev)
if (rc)
goto out_err;
- rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, NULL);
+ if (zdev->rtr_avail) {
+ rc = iommu_device_register(&zdev->iommu_dev,
+ &s390_iommu_rtr_ops, NULL);
+ } else {
+ rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops,
+ NULL);
+ }
if (rc)
goto out_sysfs;
@@ -787,6 +834,39 @@ static int __init s390_iommu_init(void)
}
subsys_initcall(s390_iommu_init);
+static int s390_attach_dev_identity(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct zpci_dev *zdev = to_zpci_dev(dev);
+ u8 status;
+ int cc;
+
+ blocking_domain_attach_device(&blocking_domain, dev);
+
+ /* If we fail now DMA remains blocked via blocking domain */
+ cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
+
+ /*
+ * If the device is undergoing error recovery the reset code
+ * will re-establish the new domain.
+ */
+ if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ return -EIO;
+
+ zdev_s390_domain_update(zdev, domain);
+
+ return 0;
+}
+
+static const struct iommu_domain_ops s390_identity_ops = {
+ .attach_dev = s390_attach_dev_identity,
+};
+
+static struct iommu_domain s390_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &s390_identity_ops,
+};
+
static struct iommu_domain blocking_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &(const struct iommu_domain_ops) {
@@ -794,23 +874,31 @@ static struct iommu_domain blocking_domain = {
}
};
-static const struct iommu_ops s390_iommu_ops = {
- .blocked_domain = &blocking_domain,
- .release_domain = &blocking_domain,
- .capable = s390_iommu_capable,
- .domain_alloc_paging = s390_domain_alloc_paging,
- .probe_device = s390_iommu_probe_device,
- .device_group = generic_device_group,
- .pgsize_bitmap = SZ_4K,
- .get_resv_regions = s390_iommu_get_resv_regions,
- .default_domain_ops = &(const struct iommu_domain_ops) {
- .attach_dev = s390_iommu_attach_device,
- .map_pages = s390_iommu_map_pages,
- .unmap_pages = s390_iommu_unmap_pages,
- .flush_iotlb_all = s390_iommu_flush_iotlb_all,
- .iotlb_sync = s390_iommu_iotlb_sync,
- .iotlb_sync_map = s390_iommu_iotlb_sync_map,
- .iova_to_phys = s390_iommu_iova_to_phys,
- .free = s390_domain_free,
+#define S390_IOMMU_COMMON_OPS() \
+ .blocked_domain = &blocking_domain, \
+ .release_domain = &blocking_domain, \
+ .capable = s390_iommu_capable, \
+ .domain_alloc_paging = s390_domain_alloc_paging, \
+ .probe_device = s390_iommu_probe_device, \
+ .device_group = generic_device_group, \
+ .pgsize_bitmap = SZ_4K, \
+ .get_resv_regions = s390_iommu_get_resv_regions, \
+ .default_domain_ops = &(const struct iommu_domain_ops) { \
+ .attach_dev = s390_iommu_attach_device, \
+ .map_pages = s390_iommu_map_pages, \
+ .unmap_pages = s390_iommu_unmap_pages, \
+ .flush_iotlb_all = s390_iommu_flush_iotlb_all, \
+ .iotlb_sync = s390_iommu_iotlb_sync, \
+ .iotlb_sync_map = s390_iommu_iotlb_sync_map, \
+ .iova_to_phys = s390_iommu_iova_to_phys, \
+ .free = s390_domain_free, \
}
+
+static const struct iommu_ops s390_iommu_ops = {
+ S390_IOMMU_COMMON_OPS()
+};
+
+static const struct iommu_ops s390_iommu_rtr_ops = {
+ .identity_domain = &s390_identity_domain,
+ S390_IOMMU_COMMON_OPS()
};
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 7f633bb5efef..69d353e1df84 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -846,7 +846,6 @@ static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
err = ops->of_xlate(dev, args);
if (err < 0) {
dev_err(dev, "failed to parse SW group ID: %d\n", err);
- iommu_fwspec_free(dev);
return err;
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index c11b9965c4ad..cec05e443083 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -28,6 +28,7 @@ config ARM_GIC_V2M
select ARM_GIC
select IRQ_MSI_LIB
select PCI_MSI
+ select IRQ_MSI_IOMMU
config GIC_NON_BANKED
bool
@@ -38,12 +39,14 @@ config ARM_GIC_V3
select PARTITION_PERCPU
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
select HAVE_ARM_SMCCC_DISCOVERY
+ select IRQ_MSI_IOMMU
config ARM_GIC_V3_ITS
bool
select GENERIC_MSI_IRQ
select IRQ_MSI_LIB
default ARM_GIC_V3
+ select IRQ_MSI_IOMMU
config ARM_GIC_V3_ITS_FSL_MC
bool
@@ -109,6 +112,22 @@ config I8259
bool
select IRQ_DOMAIN
+config BCM2712_MIP
+ tristate "Broadcom BCM2712 MSI-X Interrupt Peripheral support"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default m if ARCH_BRCMSTB
+ depends on ARM_GIC
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_MSI_IRQ
+ select IRQ_MSI_LIB
+ help
+ Enable support for the Broadcom BCM2712 MSI-X target peripheral
+ (MIP) needed by brcmstb PCIe to handle MSI-X interrupts on
+ Raspberry Pi 5.
+
+ If unsure say n.
+
config BCM6345_L1_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -408,6 +427,7 @@ config LS_EXTIRQ
config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
+ select IRQ_MSI_IOMMU
depends on PCI_MSI
config PARTITION_PERCPU
@@ -590,13 +610,7 @@ config RISCV_IMSIC
select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_MATRIX_ALLOCATOR
select GENERIC_MSI_IRQ
-
-config RISCV_IMSIC_PCI
- bool
- depends on RISCV_IMSIC
- depends on PCI
- depends on PCI_MSI
- default RISCV_IMSIC
+ select IRQ_MSI_LIB
config SIFIVE_PLIC
bool
@@ -752,6 +766,18 @@ config MCHP_EIC
help
Support for Microchip External Interrupt Controller.
+config SOPHGO_SG2042_MSI
+ bool "Sophgo SG2042 MSI Controller"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on PCI
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_MSI_LIB
+ select PCI_MSI
+ help
+ Support for the Sophgo SG2042 MSI Controller.
+ This on-chip interrupt controller enables MSI sources to be
+ routed to the primary PLIC controller on SoC.
+
config SUNPLUS_SP7021_INTC
bool "Sunplus SP7021 interrupt controller" if COMPILE_TEST
default SOC_SP7021
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 25e9ad29b8c4..365bcea9a61f 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
obj-$(CONFIG_XILINX_INTC) += irq-xilinx-intc.o
obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o
+obj-$(CONFIG_BCM2712_MIP) += irq-bcm2712-mip.o
obj-$(CONFIG_BCM6345_L1_IRQ) += irq-bcm6345-l1.o
obj-$(CONFIG_BCM7038_L1_IRQ) += irq-bcm7038-l1.o
obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o
@@ -128,4 +129,5 @@ obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o
obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o
obj-$(CONFIG_APPLE_AIC) += irq-apple-aic.o
obj-$(CONFIG_MCHP_EIC) += irq-mchp-eic.o
+obj-$(CONFIG_SOPHGO_SG2042_MSI) += irq-sg2042-msi.o
obj-$(CONFIG_SUNPLUS_SP7021_INTC) += irq-sp7021-intc.o
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 2b1684c60e3c..974dc088c853 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -409,15 +409,15 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
* in use, and be cleared when coming back from the handler.
*/
if (is_kernel_in_hyp_mode() &&
- (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
+ (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
generic_handle_domain_irq(aic_irqc->hw_domain,
AIC_FIQ_HWIRQ(AIC_VGIC_MI));
- if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
+ if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
read_sysreg_s(SYS_ICH_MISR_EL2))) {
pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
- sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
+ sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
}
}
}
@@ -841,7 +841,7 @@ static int aic_init_cpu(unsigned int cpu)
VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0);
/* vGIC maintenance IRQ */
- sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
+ sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
}
/* PMC FIQ */
diff --git a/drivers/irqchip/irq-bcm2712-mip.c b/drivers/irqchip/irq-bcm2712-mip.c
new file mode 100644
index 000000000000..49a19db2d1e1
--- /dev/null
+++ b/drivers/irqchip/irq-bcm2712-mip.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Raspberry Pi Ltd., All Rights Reserved.
+ * Copyright (c) 2024 SUSE
+ */
+
+#include <linux/bitmap.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include "irq-msi-lib.h"
+
+#define MIP_INT_RAISE 0x00
+#define MIP_INT_CLEAR 0x10
+#define MIP_INT_CFGL_HOST 0x20
+#define MIP_INT_CFGH_HOST 0x30
+#define MIP_INT_MASKL_HOST 0x40
+#define MIP_INT_MASKH_HOST 0x50
+#define MIP_INT_MASKL_VPU 0x60
+#define MIP_INT_MASKH_VPU 0x70
+#define MIP_INT_STATUSL_HOST 0x80
+#define MIP_INT_STATUSH_HOST 0x90
+#define MIP_INT_STATUSL_VPU 0xa0
+#define MIP_INT_STATUSH_VPU 0xb0
+
+/**
+ * struct mip_priv - MSI-X interrupt controller data
+ * @lock: Used to protect bitmap alloc/free
+ * @base: Base address of MMIO area
+ * @msg_addr: PCIe MSI-X address
+ * @msi_base: MSI base
+ * @num_msis: Count of MSIs
+ * @msi_offset: MSI offset
+ * @bitmap: A bitmap for hwirqs
+ * @parent: Parent domain (GIC)
+ * @dev: A device pointer
+ */
+struct mip_priv {
+ spinlock_t lock;
+ void __iomem *base;
+ u64 msg_addr;
+ u32 msi_base;
+ u32 num_msis;
+ u32 msi_offset;
+ unsigned long *bitmap;
+ struct irq_domain *parent;
+ struct device *dev;
+};
+
+static void mip_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct mip_priv *mip = irq_data_get_irq_chip_data(d);
+
+ msg->address_hi = upper_32_bits(mip->msg_addr);
+ msg->address_lo = lower_32_bits(mip->msg_addr);
+ msg->data = d->hwirq;
+}
+
+static struct irq_chip mip_middle_irq_chip = {
+ .name = "MIP",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_compose_msi_msg = mip_compose_msi_msg,
+};
+
+static int mip_alloc_hwirq(struct mip_priv *mip, unsigned int nr_irqs)
+{
+ guard(spinlock)(&mip->lock);
+ return bitmap_find_free_region(mip->bitmap, mip->num_msis, ilog2(nr_irqs));
+}
+
+static void mip_free_hwirq(struct mip_priv *mip, unsigned int hwirq,
+ unsigned int nr_irqs)
+{
+ guard(spinlock)(&mip->lock);
+ bitmap_release_region(mip->bitmap, hwirq, ilog2(nr_irqs));
+}
+
+static int mip_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct mip_priv *mip = domain->host_data;
+ struct irq_fwspec fwspec = {0};
+ unsigned int hwirq, i;
+ struct irq_data *irqd;
+ int irq, ret;
+
+ irq = mip_alloc_hwirq(mip, nr_irqs);
+ if (irq < 0)
+ return irq;
+
+ hwirq = irq + mip->msi_offset;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0;
+ fwspec.param[1] = hwirq + mip->msi_base;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec);
+ if (ret)
+ goto err_free_hwirq;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irqd = irq_domain_get_irq_data(domain->parent, virq + i);
+ irqd->chip->irq_set_type(irqd, IRQ_TYPE_EDGE_RISING);
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &mip_middle_irq_chip, mip);
+ if (ret)
+ goto err_free;
+
+ irqd = irq_get_irq_data(virq + i);
+ irqd_set_single_target(irqd);
+ irqd_set_affinity_on_activate(irqd);
+ }
+
+ return 0;
+
+err_free:
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+err_free_hwirq:
+ mip_free_hwirq(mip, irq, nr_irqs);
+ return ret;
+}
+
+static void mip_middle_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
+ struct mip_priv *mip;
+ unsigned int hwirq;
+
+ if (!irqd)
+ return;
+
+ mip = irq_data_get_irq_chip_data(irqd);
+ hwirq = irqd_to_hwirq(irqd);
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ mip_free_hwirq(mip, hwirq - mip->msi_offset, nr_irqs);
+}
+
+static const struct irq_domain_ops mip_middle_domain_ops = {
+ .select = msi_lib_irq_domain_select,
+ .alloc = mip_middle_domain_alloc,
+ .free = mip_middle_domain_free,
+};
+
+#define MIP_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define MIP_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI | \
+ MSI_FLAG_PCI_MSIX)
+
+static const struct msi_parent_ops mip_msi_parent_ops = {
+ .supported_flags = MIP_MSI_FLAGS_SUPPORTED,
+ .required_flags = MIP_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_GENERIC_MSI,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .prefix = "MIP-MSI-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
+static int mip_init_domains(struct mip_priv *mip, struct device_node *np)
+{
+ struct irq_domain *middle;
+
+ middle = irq_domain_add_hierarchy(mip->parent, 0, mip->num_msis, np,
+ &mip_middle_domain_ops, mip);
+ if (!middle)
+ return -ENOMEM;
+
+ irq_domain_update_bus_token(middle, DOMAIN_BUS_GENERIC_MSI);
+ middle->dev = mip->dev;
+ middle->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ middle->msi_parent_ops = &mip_msi_parent_ops;
+
+ /*
+ * All MSI-X unmasked for the host, masked for the VPU, and edge-triggered.
+ */
+ writel(0, mip->base + MIP_INT_MASKL_HOST);
+ writel(0, mip->base + MIP_INT_MASKH_HOST);
+ writel(~0, mip->base + MIP_INT_MASKL_VPU);
+ writel(~0, mip->base + MIP_INT_MASKH_VPU);
+ writel(~0, mip->base + MIP_INT_CFGL_HOST);
+ writel(~0, mip->base + MIP_INT_CFGH_HOST);
+
+ return 0;
+}
+
+static int mip_parse_dt(struct mip_priv *mip, struct device_node *np)
+{
+ struct of_phandle_args args;
+ u64 size;
+ int ret;
+
+ ret = of_property_read_u32(np, "brcm,msi-offset", &mip->msi_offset);
+ if (ret)
+ mip->msi_offset = 0;
+
+ ret = of_parse_phandle_with_args(np, "msi-ranges", "#interrupt-cells",
+ 0, &args);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(np, "msi-ranges", args.args_count + 1,
+ &mip->num_msis);
+ if (ret)
+ goto err_put;
+
+ ret = of_property_read_reg(np, 1, &mip->msg_addr, &size);
+ if (ret)
+ goto err_put;
+
+ mip->msi_base = args.args[1];
+
+ mip->parent = irq_find_host(args.np);
+ if (!mip->parent)
+ ret = -EINVAL;
+
+err_put:
+ of_node_put(args.np);
+ return ret;
+}
+
+static int __init mip_of_msi_init(struct device_node *node, struct device_node *parent)
+{
+ struct platform_device *pdev;
+ struct mip_priv *mip;
+ int ret;
+
+ pdev = of_find_device_by_node(node);
+ of_node_put(node);
+ if (!pdev)
+ return -EPROBE_DEFER;
+
+ mip = kzalloc(sizeof(*mip), GFP_KERNEL);
+ if (!mip)
+ return -ENOMEM;
+
+ spin_lock_init(&mip->lock);
+ mip->dev = &pdev->dev;
+
+ ret = mip_parse_dt(mip, node);
+ if (ret)
+ goto err_priv;
+
+ mip->base = of_iomap(node, 0);
+ if (!mip->base) {
+ ret = -ENXIO;
+ goto err_priv;
+ }
+
+ mip->bitmap = bitmap_zalloc(mip->num_msis, GFP_KERNEL);
+ if (!mip->bitmap) {
+ ret = -ENOMEM;
+ goto err_base;
+ }
+
+ ret = mip_init_domains(mip, node);
+ if (ret)
+ goto err_map;
+
+ dev_dbg(&pdev->dev, "MIP: MSI-X count: %u, base: %u, offset: %u, msg_addr: %llx\n",
+ mip->num_msis, mip->msi_base, mip->msi_offset, mip->msg_addr);
+
+ return 0;
+
+err_map:
+ bitmap_free(mip->bitmap);
+err_base:
+ iounmap(mip->base);
+err_priv:
+ kfree(mip);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(mip_msi)
+IRQCHIP_MATCH("brcm,bcm2712-mip", mip_of_msi_init)
+IRQCHIP_PLATFORM_DRIVER_END(mip_msi)
+MODULE_DESCRIPTION("Broadcom BCM2712 MSI-X interrupt controller");
+MODULE_AUTHOR("Phil Elwell <phil@raspberrypi.com>");
+MODULE_AUTHOR("Stanimir Varbanov <svarbanov@suse.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
index f4f8e9fadbbf..d7948c55f542 100644
--- a/drivers/irqchip/irq-davinci-cp-intc.c
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
-#include <linux/irqchip/irq-davinci-cp-intc.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -154,24 +153,20 @@ static const struct irq_domain_ops davinci_cp_intc_irq_domain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-static int __init
-davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
- struct device_node *node)
+static int __init davinci_cp_intc_do_init(struct resource *res, unsigned int num_irqs,
+ struct device_node *node)
{
- unsigned int num_regs = BITS_TO_LONGS(config->num_irqs);
+ unsigned int num_regs = BITS_TO_LONGS(num_irqs);
int offset, irq_base;
void __iomem *req;
- req = request_mem_region(config->reg.start,
- resource_size(&config->reg),
- "davinci-cp-intc");
+ req = request_mem_region(res->start, resource_size(res), "davinci-cp-intc");
if (!req) {
pr_err("%s: register range busy\n", __func__);
return -EBUSY;
}
- davinci_cp_intc_base = ioremap(config->reg.start,
- resource_size(&config->reg));
+ davinci_cp_intc_base = ioremap(res->start, resource_size(res));
if (!davinci_cp_intc_base) {
pr_err("%s: unable to ioremap register range\n", __func__);
return -EINVAL;
@@ -184,8 +179,7 @@ davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
/* Disable system interrupts */
for (offset = 0; offset < num_regs; offset++)
- davinci_cp_intc_write(~0,
- DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset));
+ davinci_cp_intc_write(~0, DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset));
/* Set to normal mode, no nesting, no priority hold */
davinci_cp_intc_write(0, DAVINCI_CP_INTC_CTRL);
@@ -193,28 +187,25 @@ davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
/* Clear system interrupt status */
for (offset = 0; offset < num_regs; offset++)
- davinci_cp_intc_write(~0,
- DAVINCI_CP_INTC_SYS_STAT_CLR(offset));
+ davinci_cp_intc_write(~0, DAVINCI_CP_INTC_SYS_STAT_CLR(offset));
/* Enable nIRQ (what about nFIQ?) */
davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET);
+ /* 4 channels per register */
+ num_regs = (num_irqs + 3) >> 2;
/* Default all priorities to channel 7. */
- num_regs = (config->num_irqs + 3) >> 2; /* 4 channels per register */
for (offset = 0; offset < num_regs; offset++)
- davinci_cp_intc_write(0x07070707,
- DAVINCI_CP_INTC_CHAN_MAP(offset));
+ davinci_cp_intc_write(0x07070707, DAVINCI_CP_INTC_CHAN_MAP(offset));
- irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0);
+ irq_base = irq_alloc_descs(-1, 0, num_irqs, 0);
if (irq_base < 0) {
- pr_err("%s: unable to allocate interrupt descriptors: %d\n",
- __func__, irq_base);
+ pr_err("%s: unable to allocate interrupt descriptors: %d\n", __func__, irq_base);
return irq_base;
}
- davinci_cp_intc_irq_domain = irq_domain_add_legacy(
- node, config->num_irqs, irq_base, 0,
- &davinci_cp_intc_irq_domain_ops, NULL);
+ davinci_cp_intc_irq_domain = irq_domain_add_legacy(node, num_irqs, irq_base, 0,
+ &davinci_cp_intc_irq_domain_ops, NULL);
if (!davinci_cp_intc_irq_domain) {
pr_err("%s: unable to create an interrupt domain\n", __func__);
@@ -229,31 +220,25 @@ davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
return 0;
}
-int __init davinci_cp_intc_init(const struct davinci_cp_intc_config *config)
-{
- return davinci_cp_intc_do_init(config, NULL);
-}
-
static int __init davinci_cp_intc_of_init(struct device_node *node,
struct device_node *parent)
{
- struct davinci_cp_intc_config config = { };
+ unsigned int num_irqs;
+ struct resource res;
int ret;
- ret = of_address_to_resource(node, 0, &config.reg);
+ ret = of_address_to_resource(node, 0, &res);
if (ret) {
- pr_err("%s: unable to get the register range from device-tree\n",
- __func__);
+ pr_err("%s: unable to get the register range from device-tree\n", __func__);
return ret;
}
- ret = of_property_read_u32(node, "ti,intc-size", &config.num_irqs);
+ ret = of_property_read_u32(node, "ti,intc-size", &num_irqs);
if (ret) {
- pr_err("%s: unable to read the 'ti,intc-size' property\n",
- __func__);
+ pr_err("%s: unable to read the 'ti,intc-size' property\n", __func__);
return ret;
}
- return davinci_cp_intc_do_init(&config, node);
+ return davinci_cp_intc_do_init(&res, num_irqs, node);
}
IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", davinci_cp_intc_of_init);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index be35c5349986..c69894861866 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -87,9 +87,6 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
phys_addr_t addr = gicv2m_get_msi_addr(v2m, data->hwirq);
- msg->address_hi = upper_32_bits(addr);
- msg->address_lo = lower_32_bits(addr);
-
if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
msg->data = 0;
else
@@ -97,7 +94,7 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
msg->data -= v2m->spi_offset;
- iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
+ msi_msg_set_addr(irq_data_get_msi_desc(data), msg, addr);
}
static struct irq_chip gicv2m_irq_chip = {
@@ -255,6 +252,7 @@ static void __init gicv2m_teardown(void)
static struct msi_parent_ops gicv2m_msi_parent_ops = {
.supported_flags = GICV2M_MSI_FLAGS_SUPPORTED,
.required_flags = GICV2M_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "GICv2m-",
diff --git a/drivers/irqchip/irq-gic-v3-its-msi-parent.c b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
index e150365fbe89..bdb04c808148 100644
--- a/drivers/irqchip/irq-gic-v3-its-msi-parent.c
+++ b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
@@ -203,6 +203,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
const struct msi_parent_ops gic_v3_its_msi_parent_ops = {
.supported_flags = ITS_MSI_FLAGS_SUPPORTED,
.required_flags = ITS_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "ITS-",
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 8c3ec5734f1e..0115ad6c8259 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -205,13 +205,15 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+static gfp_t gfp_flags_quirk;
+
static struct page *its_alloc_pages_node(int node, gfp_t gfp,
unsigned int order)
{
struct page *page;
int ret = 0;
- page = alloc_pages_node(node, gfp, order);
+ page = alloc_pages_node(node, gfp | gfp_flags_quirk, order);
if (!page)
return NULL;
@@ -1809,17 +1811,10 @@ static u64 its_irq_get_msi_base(struct its_device *its_dev)
static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- struct its_node *its;
- u64 addr;
-
- its = its_dev->its;
- addr = its->get_msi_base(its_dev);
-
- msg->address_lo = lower_32_bits(addr);
- msg->address_hi = upper_32_bits(addr);
- msg->data = its_get_event_id(d);
- iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
+ msg->data = its_get_event_id(d);
+ msi_msg_set_addr(irq_data_get_msi_desc(d), msg,
+ its_dev->its->get_msi_base(its_dev));
}
static int its_irq_set_irqchip_state(struct irq_data *d,
@@ -4887,6 +4882,17 @@ static bool __maybe_unused its_enable_quirk_hip09_162100801(void *data)
return true;
}
+static bool __maybe_unused its_enable_rk3568002(void *data)
+{
+ if (!of_machine_is_compatible("rockchip,rk3566") &&
+ !of_machine_is_compatible("rockchip,rk3568"))
+ return false;
+
+ gfp_flags_quirk |= GFP_DMA32;
+
+ return true;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@@ -4954,6 +4960,14 @@ static const struct gic_quirk its_quirks[] = {
.property = "dma-noncoherent",
.init = its_set_non_coherent,
},
+#ifdef CONFIG_ROCKCHIP_ERRATUM_3568002
+ {
+ .desc = "ITS: Rockchip erratum RK3568002",
+ .iidr = 0x0201743b,
+ .mask = 0xffffffff,
+ .init = its_enable_rk3568002,
+ },
+#endif
{
}
};
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index 3fe870f8ee17..34e9ca77a8c3 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -147,22 +147,18 @@ static const struct irq_domain_ops mbi_domain_ops = {
static void mbi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- msg[0].address_hi = upper_32_bits(mbi_phys_base + GICD_SETSPI_NSR);
- msg[0].address_lo = lower_32_bits(mbi_phys_base + GICD_SETSPI_NSR);
msg[0].data = data->parent_data->hwirq;
-
- iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
+ msi_msg_set_addr(irq_data_get_msi_desc(data), &msg[0],
+ mbi_phys_base + GICD_SETSPI_NSR);
}
static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg)
{
mbi_compose_msi_msg(data, msg);
- msg[1].address_hi = upper_32_bits(mbi_phys_base + GICD_CLRSPI_NSR);
- msg[1].address_lo = lower_32_bits(mbi_phys_base + GICD_CLRSPI_NSR);
msg[1].data = data->parent_data->hwirq;
-
- iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), &msg[1]);
+ msi_msg_set_addr(irq_data_get_msi_desc(data), &msg[1],
+ mbi_phys_base + GICD_CLRSPI_NSR);
}
static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
@@ -201,6 +197,7 @@ static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = {
.supported_flags = MBI_MSI_FLAGS_SUPPORTED,
.required_flags = MBI_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "MBI-",
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index b0e9788c0045..afbfcce3b1e3 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -24,7 +24,7 @@
#define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4)
#define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8)
-#define CHAN_MAX_OUTPUT_INT 0x8
+#define CHAN_MAX_OUTPUT_INT 0xF
struct irqsteer_data {
void __iomem *regs;
@@ -228,10 +228,8 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
for (i = 0; i < data->irq_count; i++) {
data->irq[i] = irq_of_parse_and_map(np, i);
- if (!data->irq[i]) {
- ret = -EINVAL;
- goto out;
- }
+ if (!data->irq[i])
+ break;
irq_set_chained_handler_and_data(data->irq[i],
imx_irqsteer_irq_handler,
@@ -254,9 +252,13 @@ static void imx_irqsteer_remove(struct platform_device *pdev)
struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < irqsteer_data->irq_count; i++)
+ for (i = 0; i < irqsteer_data->irq_count; i++) {
+ if (!irqsteer_data->irq[i])
+ break;
+
irq_set_chained_handler_and_data(irqsteer_data->irq[i],
NULL, NULL);
+ }
irq_domain_remove(irqsteer_data->domain);
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
index 4342a21de1eb..69aacdfc8bef 100644
--- a/drivers/irqchip/irq-imx-mu-msi.c
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -214,6 +214,7 @@ static void imx_mu_msi_irq_handler(struct irq_desc *desc)
static const struct msi_parent_ops imx_mu_msi_parent_ops = {
.supported_flags = IMX_MU_MSI_FLAGS_SUPPORTED,
.required_flags = IMX_MU_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "MU-MSI-",
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index bd337ecddb40..9c62108b3ad5 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -146,6 +146,7 @@ static const struct irq_domain_ops pch_msi_middle_domain_ops = {
static struct msi_parent_ops pch_msi_parent_ops = {
.required_flags = PCH_MSI_FLAGS_REQUIRED,
.supported_flags = PCH_MSI_FLAGS_SUPPORTED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_mask = MATCH_PCI_MSI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.prefix = "PCH-",
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index c0e1aafe468c..3cb80796cc7c 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -87,8 +87,6 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
- msg->address_hi = upper_32_bits(msi_data->msiir_addr);
- msg->address_lo = lower_32_bits(msi_data->msiir_addr);
msg->data = data->hwirq;
if (msi_affinity_flag) {
@@ -98,7 +96,8 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
msg->data |= cpumask_first(mask);
}
- iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
+ msi_msg_set_addr(irq_data_get_msi_desc(data), msg,
+ msi_data->msiir_addr);
}
static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index cd789fa51519..0a25536a5d07 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -26,8 +26,6 @@
/* use for A1 like chips */
#define REG_PIN_A1_SEL 0x04
-/* Used for s4 chips */
-#define REG_EDGE_POL_S4 0x1c
/*
* Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by
@@ -72,6 +70,7 @@ struct meson_gpio_irq_params {
bool support_edge_both;
unsigned int edge_both_offset;
unsigned int edge_single_offset;
+ unsigned int edge_pol_reg;
unsigned int pol_low_offset;
unsigned int pin_sel_mask;
struct irq_ctl_ops ops;
@@ -105,6 +104,18 @@ struct meson_gpio_irq_params {
.pin_sel_mask = 0x7f, \
.nr_channels = 8, \
+#define INIT_MESON_A4_AO_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
+ meson_a1_gpio_irq_sel_pin, \
+ meson_s4_gpio_irq_set_type) \
+ .support_edge_both = true, \
+ .edge_both_offset = 0, \
+ .edge_single_offset = 12, \
+ .edge_pol_reg = 0x8, \
+ .pol_low_offset = 0, \
+ .pin_sel_mask = 0xff, \
+ .nr_channels = 2, \
+
#define INIT_MESON_S4_COMMON_DATA(irqs) \
INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
meson_a1_gpio_irq_sel_pin, \
@@ -112,6 +123,7 @@ struct meson_gpio_irq_params {
.support_edge_both = true, \
.edge_both_offset = 0, \
.edge_single_offset = 12, \
+ .edge_pol_reg = 0x1c, \
.pol_low_offset = 0, \
.pin_sel_mask = 0xff, \
.nr_channels = 12, \
@@ -146,6 +158,18 @@ static const struct meson_gpio_irq_params a1_params = {
INIT_MESON_A1_COMMON_DATA(62)
};
+static const struct meson_gpio_irq_params a4_params = {
+ INIT_MESON_S4_COMMON_DATA(81)
+};
+
+static const struct meson_gpio_irq_params a4_ao_params = {
+ INIT_MESON_A4_AO_COMMON_DATA(8)
+};
+
+static const struct meson_gpio_irq_params a5_params = {
+ INIT_MESON_S4_COMMON_DATA(99)
+};
+
static const struct meson_gpio_irq_params s4_params = {
INIT_MESON_S4_COMMON_DATA(82)
};
@@ -168,6 +192,9 @@ static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
{ .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
{ .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params },
+ { .compatible = "amlogic,a4-gpio-ao-intc", .data = &a4_ao_params },
+ { .compatible = "amlogic,a4-gpio-intc", .data = &a4_params },
+ { .compatible = "amlogic,a5-gpio-intc", .data = &a5_params },
{ .compatible = "amlogic,c3-gpio-intc", .data = &c3_params },
{ .compatible = "amlogic,t7-gpio-intc", .data = &t7_params },
{ }
@@ -299,11 +326,10 @@ meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
unsigned int type, u32 *channel_hwirq)
{
- u32 val = 0;
+ const struct meson_gpio_irq_params *params = ctl->params;
unsigned int idx;
- const struct meson_gpio_irq_params *params;
+ u32 val = 0;
- params = ctl->params;
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
/*
@@ -356,19 +382,19 @@ static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
unsigned int type, u32 *channel_hwirq)
{
- u32 val = 0;
+ const struct meson_gpio_irq_params *params = ctl->params;
unsigned int idx;
+ u32 val = 0;
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
type &= IRQ_TYPE_SENSE_MASK;
- meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, BIT(idx), 0);
+ meson_gpio_irq_update_bits(ctl, params->edge_pol_reg, BIT(idx), 0);
if (type == IRQ_TYPE_EDGE_BOTH) {
- val |= BIT(ctl->params->edge_both_offset + idx);
- meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4,
- BIT(ctl->params->edge_both_offset + idx), val);
+ val = BIT(ctl->params->edge_both_offset + idx);
+ meson_gpio_irq_update_bits(ctl, params->edge_pol_reg, val, val);
return 0;
}
@@ -378,7 +404,7 @@ static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
val |= BIT(ctl->params->edge_single_offset + idx);
- meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
+ meson_gpio_irq_update_bits(ctl, params->edge_pol_reg,
BIT(idx) | BIT(12 + idx), val);
return 0;
};
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
index d8e29fc0d406..51464c6257f3 100644
--- a/drivers/irqchip/irq-msi-lib.c
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -28,6 +28,7 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct msi_domain_info *info)
{
const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
+ struct irq_chip *chip = info->chip;
u32 required_flags;
/* Parent ops available? */
@@ -92,10 +93,10 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
info->flags |= required_flags;
/* Chip updates for all child bus types */
- if (!info->chip->irq_eoi)
- info->chip->irq_eoi = irq_chip_eoi_parent;
- if (!info->chip->irq_ack)
- info->chip->irq_ack = irq_chip_ack_parent;
+ if (!chip->irq_eoi && (pops->chip_flags & MSI_CHIP_FLAG_SET_EOI))
+ chip->irq_eoi = irq_chip_eoi_parent;
+ if (!chip->irq_ack && (pops->chip_flags & MSI_CHIP_FLAG_SET_ACK))
+ chip->irq_ack = irq_chip_ack_parent;
/*
* The device MSI domain can never have a set affinity callback. It
@@ -105,7 +106,7 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
* device MSI domain aside of mask/unmask which is provided e.g. by
* PCI/MSI device domains.
*/
- info->chip->irq_set_affinity = msi_domain_set_affinity;
+ chip->irq_set_affinity = msi_domain_set_affinity;
return true;
}
EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info);
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index 2b6183919ea4..d67f93f6d750 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -161,6 +161,7 @@ static const struct irq_domain_ops gicp_domain_ops = {
static const struct msi_parent_ops gicp_msi_parent_ops = {
.supported_flags = GICP_MSI_FLAGS_SUPPORTED,
.required_flags = GICP_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "GICP-",
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
index ff19bfd258dc..28f7e81df94f 100644
--- a/drivers/irqchip/irq-mvebu-odmi.c
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -157,6 +157,7 @@ static const struct irq_domain_ops odmi_domain_ops = {
static const struct msi_parent_ops odmi_msi_parent_ops = {
.supported_flags = ODMI_MSI_FLAGS_SUPPORTED,
.required_flags = ODMI_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "ODMI-",
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index 065166ab5dbc..ebd4a9014e8d 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -356,6 +356,7 @@ static void mvebu_sei_reset(struct mvebu_sei *sei)
static const struct msi_parent_ops sei_msi_parent_ops = {
.supported_flags = SEI_MSI_FLAGS_SUPPORTED,
.required_flags = SEI_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_mask = MATCH_PLATFORM_MSI,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.prefix = "SEI-",
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 99e27e01b0b1..6a2e41f02446 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -541,43 +541,36 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
return -ENODEV;
parent_domain = irq_find_host(parent);
- if (!parent_domain) {
- dev_err(&pdev->dev, "cannot find parent domain\n");
- return -ENODEV;
- }
+ if (!parent_domain)
+ return dev_err_probe(dev, -ENODEV, "cannot find parent domain\n");
- rzg2l_irqc_data = devm_kzalloc(&pdev->dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
+ rzg2l_irqc_data = devm_kzalloc(dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
if (!rzg2l_irqc_data)
return -ENOMEM;
rzg2l_irqc_data->irqchip = irq_chip;
- rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
+ rzg2l_irqc_data->base = devm_of_iomap(dev, dev->of_node, 0, NULL);
if (IS_ERR(rzg2l_irqc_data->base))
return PTR_ERR(rzg2l_irqc_data->base);
ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node);
- if (ret) {
- dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
- return ret;
- }
-
- resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(resetn))
- return PTR_ERR(resetn);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot parse interrupts: %d\n", ret);
- ret = reset_control_deassert(resetn);
- if (ret) {
- dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
- return ret;
+ resetn = devm_reset_control_get_exclusive_deasserted(dev, NULL);
+ if (IS_ERR(resetn)) {
+ return dev_err_probe(dev, PTR_ERR(resetn),
+ "failed to acquire deasserted reset: %d\n", ret);
}
- pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
- goto pm_disable;
- }
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "devm_pm_runtime_enable failed: %d\n", ret);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "pm_runtime_resume_and_get failed: %d\n", ret);
raw_spin_lock_init(&rzg2l_irqc_data->lock);
@@ -585,9 +578,8 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
node, &rzg2l_irqc_domain_ops,
rzg2l_irqc_data);
if (!irq_domain) {
- dev_err(&pdev->dev, "failed to add irq domain\n");
- ret = -ENOMEM;
- goto pm_put;
+ pm_runtime_put(dev);
+ return dev_err_probe(dev, -ENOMEM, "failed to add irq domain\n");
}
register_syscore_ops(&rzg2l_irqc_syscore_ops);
@@ -604,13 +596,6 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
dev = NULL;
return 0;
-
-pm_put:
- pm_runtime_put(&pdev->dev);
-pm_disable:
- pm_runtime_disable(&pdev->dev);
- reset_control_assert(resetn);
- return ret;
}
static int __init rzg2l_irqc_init(struct device_node *node,
diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
index fe2d29e91026..3d5b5fdf9bde 100644
--- a/drivers/irqchip/irq-renesas-rzv2h.c
+++ b/drivers/irqchip/irq-renesas-rzv2h.c
@@ -64,11 +64,18 @@
#define ICU_TINT_LEVEL_HIGH 2
#define ICU_TINT_LEVEL_LOW 3
-#define ICU_TSSR_K(tint_nr) ((tint_nr) / 4)
-#define ICU_TSSR_TSSEL_N(tint_nr) ((tint_nr) % 4)
-#define ICU_TSSR_TSSEL_PREP(tssel, n) ((tssel) << ((n) * 8))
-#define ICU_TSSR_TSSEL_MASK(n) ICU_TSSR_TSSEL_PREP(0x7F, n)
-#define ICU_TSSR_TIEN(n) (BIT(7) << ((n) * 8))
+#define ICU_TSSR_TSSEL_PREP(tssel, n, field_width) ((tssel) << ((n) * (field_width)))
+#define ICU_TSSR_TSSEL_MASK(n, field_width) \
+({\
+ typeof(field_width) (_field_width) = (field_width); \
+ ICU_TSSR_TSSEL_PREP((GENMASK(((_field_width) - 2), 0)), (n), _field_width); \
+})
+
+#define ICU_TSSR_TIEN(n, field_width) \
+({\
+ typeof(field_width) (_field_width) = (field_width); \
+ BIT((_field_width) - 1) << ((n) * (_field_width)); \
+})
#define ICU_TITSR_K(tint_nr) ((tint_nr) / 16)
#define ICU_TITSR_TITSEL_N(tint_nr) ((tint_nr) % 16)
@@ -78,20 +85,36 @@
#define ICU_TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
#define ICU_TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
-#define ICU_PB5_TINT 0x55
+#define ICU_RZG3E_TINT_OFFSET 0x800
+#define ICU_RZG3E_TSSEL_MAX_VAL 0x8c
+#define ICU_RZV2H_TSSEL_MAX_VAL 0x55
+
+/**
+ * struct rzv2h_hw_info - Interrupt Control Unit controller hardware info structure.
+ * @tssel_lut: TINT lookup table
+ * @t_offs: TINT offset
+ * @max_tssel: TSSEL max value
+ * @field_width: TSSR field width
+ */
+struct rzv2h_hw_info {
+ const u8 *tssel_lut;
+ u16 t_offs;
+ u8 max_tssel;
+ u8 field_width;
+};
/**
* struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure.
* @base: Controller's base address
- * @irqchip: Pointer to struct irq_chip
* @fwspec: IRQ firmware specific data
* @lock: Lock to serialize access to hardware registers
+ * @info: Pointer to struct rzv2h_hw_info
*/
struct rzv2h_icu_priv {
void __iomem *base;
- const struct irq_chip *irqchip;
struct irq_fwspec fwspec[ICU_NUM_IRQ];
raw_spinlock_t lock;
+ const struct rzv2h_hw_info *info;
};
static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data)
@@ -111,7 +134,7 @@ static void rzv2h_icu_eoi(struct irq_data *d)
tintirq_nr = hw_irq - ICU_TINT_START;
bit = BIT(tintirq_nr);
if (!irqd_is_level_type(d))
- writel_relaxed(bit, priv->base + ICU_TSCLR);
+ writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
} else if (hw_irq >= ICU_IRQ_START) {
tintirq_nr = hw_irq - ICU_IRQ_START;
bit = BIT(tintirq_nr);
@@ -130,21 +153,23 @@ static void rzv2h_tint_irq_endisable(struct irq_data *d, bool enable)
struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
unsigned int hw_irq = irqd_to_hwirq(d);
u32 tint_nr, tssel_n, k, tssr;
+ u8 nr_tint;
if (hw_irq < ICU_TINT_START)
return;
tint_nr = hw_irq - ICU_TINT_START;
- k = ICU_TSSR_K(tint_nr);
- tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
+ nr_tint = 32 / priv->info->field_width;
+ k = tint_nr / nr_tint;
+ tssel_n = tint_nr % nr_tint;
guard(raw_spinlock)(&priv->lock);
- tssr = readl_relaxed(priv->base + ICU_TSSR(k));
+ tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(k));
if (enable)
- tssr |= ICU_TSSR_TIEN(tssel_n);
+ tssr |= ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
else
- tssr &= ~ICU_TSSR_TIEN(tssel_n);
- writel_relaxed(tssr, priv->base + ICU_TSSR(k));
+ tssr &= ~ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
+ writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(k));
}
static void rzv2h_icu_irq_disable(struct irq_data *d)
@@ -247,8 +272,8 @@ static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq
u32 bit = BIT(tint_nr);
int k = tint_nr / 16;
- tsctr = readl_relaxed(priv->base + ICU_TSCTR);
- titsr = readl_relaxed(priv->base + ICU_TITSR(k));
+ tsctr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSCTR);
+ titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(k));
titsel = ICU_TITSR_TITSEL_GET(titsr, titsel_n);
/*
@@ -257,7 +282,7 @@ static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq
*/
if ((tsctr & bit) && ((titsel == ICU_TINT_EDGE_RISING) ||
(titsel == ICU_TINT_EDGE_FALLING)))
- writel_relaxed(bit, priv->base + ICU_TSCLR);
+ writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
}
static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
@@ -268,6 +293,7 @@ static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
unsigned int hwirq;
u32 tint, sense;
int tint_nr;
+ u8 nr_tint;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_LEVEL_LOW:
@@ -290,39 +316,42 @@ static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
+ priv = irq_data_to_priv(d);
tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
- if (tint > ICU_PB5_TINT)
+ if (tint > priv->info->max_tssel)
return -EINVAL;
- priv = irq_data_to_priv(d);
- hwirq = irqd_to_hwirq(d);
+ if (priv->info->tssel_lut)
+ tint = priv->info->tssel_lut[tint];
+ hwirq = irqd_to_hwirq(d);
tint_nr = hwirq - ICU_TINT_START;
- tssr_k = ICU_TSSR_K(tint_nr);
- tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
+ nr_tint = 32 / priv->info->field_width;
+ tssr_k = tint_nr / nr_tint;
+ tssel_n = tint_nr % nr_tint;
+ tien = ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
titsr_k = ICU_TITSR_K(tint_nr);
titsel_n = ICU_TITSR_TITSEL_N(tint_nr);
- tien = ICU_TSSR_TIEN(titsel_n);
guard(raw_spinlock)(&priv->lock);
- tssr = readl_relaxed(priv->base + ICU_TSSR(tssr_k));
- tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n) | tien);
- tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n);
+ tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
+ tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n, priv->info->field_width) | tien);
+ tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n, priv->info->field_width);
- writel_relaxed(tssr, priv->base + ICU_TSSR(tssr_k));
+ writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
- titsr = readl_relaxed(priv->base + ICU_TITSR(titsr_k));
+ titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
titsr &= ~ICU_TITSR_TITSEL_MASK(titsel_n);
titsr |= ICU_TITSR_TITSEL_PREP(sense, titsel_n);
- writel_relaxed(titsr, priv->base + ICU_TITSR(titsr_k));
+ writel_relaxed(titsr, priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
rzv2h_clear_tint_int(priv, hwirq);
- writel_relaxed(tssr | tien, priv->base + ICU_TSSR(tssr_k));
+ writel_relaxed(tssr | tien, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
return 0;
}
@@ -390,7 +419,7 @@ static int rzv2h_icu_alloc(struct irq_domain *domain, unsigned int virq, unsigne
if (hwirq > (ICU_NUM_IRQ - 1))
return -EINVAL;
- ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, priv->irqchip,
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &rzv2h_icu_chip,
(void *)(uintptr_t)tint);
if (ret)
return ret;
@@ -421,7 +450,13 @@ static int rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv *priv, struct device
return 0;
}
-static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+static void rzv2h_icu_put_device(void *data)
+{
+ put_device(data);
+}
+
+static int rzv2h_icu_init_common(struct device_node *node, struct device_node *parent,
+ const struct rzv2h_hw_info *hw_info)
{
struct irq_domain *irq_domain, *parent_domain;
struct rzv2h_icu_priv *rzv2h_icu_data;
@@ -433,50 +468,48 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
if (!pdev)
return -ENODEV;
+ ret = devm_add_action_or_reset(&pdev->dev, rzv2h_icu_put_device,
+ &pdev->dev);
+ if (ret < 0)
+ return ret;
+
parent_domain = irq_find_host(parent);
if (!parent_domain) {
dev_err(&pdev->dev, "cannot find parent domain\n");
- ret = -ENODEV;
- goto put_dev;
+ return -ENODEV;
}
rzv2h_icu_data = devm_kzalloc(&pdev->dev, sizeof(*rzv2h_icu_data), GFP_KERNEL);
- if (!rzv2h_icu_data) {
- ret = -ENOMEM;
- goto put_dev;
- }
-
- rzv2h_icu_data->irqchip = &rzv2h_icu_chip;
+ if (!rzv2h_icu_data)
+ return -ENOMEM;
rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
- if (IS_ERR(rzv2h_icu_data->base)) {
- ret = PTR_ERR(rzv2h_icu_data->base);
- goto put_dev;
- }
+ if (IS_ERR(rzv2h_icu_data->base))
+ return PTR_ERR(rzv2h_icu_data->base);
ret = rzv2h_icu_parse_interrupts(rzv2h_icu_data, node);
if (ret) {
dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
- goto put_dev;
+ return ret;
}
- resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ resetn = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
if (IS_ERR(resetn)) {
ret = PTR_ERR(resetn);
- goto put_dev;
+ dev_err(&pdev->dev, "failed to acquire deasserted reset: %d\n", ret);
+ return ret;
}
- ret = reset_control_deassert(resetn);
- if (ret) {
- dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
- goto put_dev;
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "devm_pm_runtime_enable failed, %d\n", ret);
+ return ret;
}
- pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
- goto pm_disable;
+ return ret;
}
raw_spin_lock_init(&rzv2h_icu_data->lock);
@@ -489,6 +522,8 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
goto pm_put;
}
+ rzv2h_icu_data->info = hw_info;
+
/*
* coccicheck complains about a missing put_device call before returning, but it's a false
* positive. We still need &pdev->dev after successfully returning from this function.
@@ -497,16 +532,61 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
pm_put:
pm_runtime_put(&pdev->dev);
-pm_disable:
- pm_runtime_disable(&pdev->dev);
- reset_control_assert(resetn);
-put_dev:
- put_device(&pdev->dev);
return ret;
}
+/* Mapping based on port index on Table 4.2-6 and TSSEL bits on Table 4.6-4 */
+static const u8 rzg3e_tssel_lut[] = {
+ 81, 82, 83, 84, 85, 86, 87, 88, /* P00-P07 */
+ 89, 90, 91, 92, 93, 94, 95, 96, /* P10-P17 */
+ 111, 112, /* P20-P21 */
+ 97, 98, 99, 100, 101, 102, 103, 104, /* P30-P37 */
+ 105, 106, 107, 108, 109, 110, /* P40-P45 */
+ 113, 114, 115, 116, 117, 118, 119, /* P50-P56 */
+ 120, 121, 122, 123, 124, 125, 126, /* P60-P66 */
+ 127, 128, 129, 130, 131, 132, 133, 134, /* P70-P77 */
+ 135, 136, 137, 138, 139, 140, /* P80-P85 */
+ 43, 44, 45, 46, 47, 48, 49, 50, /* PA0-PA7 */
+ 51, 52, 53, 54, 55, 56, 57, 58, /* PB0-PB7 */
+ 59, 60, 61, /* PC0-PC2 */
+ 62, 63, 64, 65, 66, 67, 68, 69, /* PD0-PD7 */
+ 70, 71, 72, 73, 74, 75, 76, 77, /* PE0-PE7 */
+ 78, 79, 80, /* PF0-PF2 */
+ 25, 26, 27, 28, 29, 30, 31, 32, /* PG0-PG7 */
+ 33, 34, 35, 36, 37, 38, /* PH0-PH5 */
+ 4, 5, 6, 7, 8, /* PJ0-PJ4 */
+ 39, 40, 41, 42, /* PK0-PK3 */
+ 9, 10, 11, 12, 21, 22, 23, 24, /* PL0-PL7 */
+ 13, 14, 15, 16, 17, 18, 19, 20, /* PM0-PM7 */
+ 0, 1, 2, 3 /* PS0-PS3 */
+};
+
+static const struct rzv2h_hw_info rzg3e_hw_params = {
+ .tssel_lut = rzg3e_tssel_lut,
+ .t_offs = ICU_RZG3E_TINT_OFFSET,
+ .max_tssel = ICU_RZG3E_TSSEL_MAX_VAL,
+ .field_width = 16,
+};
+
+static const struct rzv2h_hw_info rzv2h_hw_params = {
+ .t_offs = 0,
+ .max_tssel = ICU_RZV2H_TSSEL_MAX_VAL,
+ .field_width = 8,
+};
+
+static int rzg3e_icu_init(struct device_node *node, struct device_node *parent)
+{
+ return rzv2h_icu_init_common(node, parent, &rzg3e_hw_params);
+}
+
+static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+{
+ return rzv2h_icu_init_common(node, parent, &rzv2h_hw_params);
+}
+
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu)
+IRQCHIP_MATCH("renesas,r9a09g047-icu", rzg3e_icu_init)
IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_init)
IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu)
MODULE_AUTHOR("Fabrizio Castro <fabrizio.castro.jz@renesas.com>");
diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c
index 7cd6b646774b..205ad61d15e4 100644
--- a/drivers/irqchip/irq-riscv-aplic-direct.c
+++ b/drivers/irqchip/irq-riscv-aplic-direct.c
@@ -31,7 +31,7 @@ struct aplic_direct {
};
struct aplic_idc {
- unsigned int hart_index;
+ u32 hart_index;
void __iomem *regs;
struct aplic_direct *direct;
};
@@ -219,6 +219,20 @@ static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
return 0;
}
+static int aplic_direct_get_hart_index(struct device *dev, u32 logical_index,
+ u32 *hart_index)
+{
+ const char *prop_hart_index = "riscv,hart-indexes";
+ struct device_node *np = to_of_node(dev->fwnode);
+
+ if (!np || !of_property_present(np, prop_hart_index)) {
+ *hart_index = logical_index;
+ return 0;
+ }
+
+ return of_property_read_u32_index(np, prop_hart_index, logical_index, hart_index);
+}
+
int aplic_direct_setup(struct device *dev, void __iomem *regs)
{
int i, j, rc, cpu, current_cpu, setup_count = 0;
@@ -265,8 +279,12 @@ int aplic_direct_setup(struct device *dev, void __iomem *regs)
cpumask_set_cpu(cpu, &direct->lmask);
idc = per_cpu_ptr(&aplic_idcs, cpu);
- idc->hart_index = i;
- idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE;
+ rc = aplic_direct_get_hart_index(dev, i, &idc->hart_index);
+ if (rc) {
+ dev_warn(dev, "hart index not found for IDC%d\n", i);
+ continue;
+ }
+ idc->regs = priv->regs + APLIC_IDC_BASE + idc->hart_index * APLIC_IDC_SIZE;
idc->direct = direct;
aplic_idc_set_delivery(idc, true);
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
index 275df5005705..d9ae87808651 100644
--- a/drivers/irqchip/irq-riscv-imsic-early.c
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -73,10 +73,16 @@ static int __init imsic_ipi_domain_init(void) { return 0; }
static void imsic_handle_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- int err, cpu = smp_processor_id();
+ int cpu = smp_processor_id();
struct imsic_vector *vec;
unsigned long local_id;
+ /*
+ * Process pending local synchronization instead of waiting
+ * for per-CPU local timer to expire.
+ */
+ imsic_local_sync_all(false);
+
chained_irq_enter(chip, desc);
while ((local_id = csr_swap(CSR_TOPEI, 0))) {
@@ -97,9 +103,7 @@ static void imsic_handle_irq(struct irq_desc *desc)
continue;
}
- err = generic_handle_domain_irq(imsic->base_domain, vec->hwirq);
- if (unlikely(err))
- pr_warn_ratelimited("hwirq 0x%x mapping not found\n", vec->hwirq);
+ generic_handle_irq(vec->irq);
}
chained_irq_exit(chip, desc);
@@ -120,7 +124,7 @@ static int imsic_starting_cpu(unsigned int cpu)
* Interrupts identities might have been enabled/disabled while
* this CPU was not running so sync-up local enable/disable state.
*/
- imsic_local_sync_all();
+ imsic_local_sync_all(true);
/* Enable local interrupt delivery */
imsic_local_delivery(true);
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index c708780e8760..b8ae67c25b37 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <linux/smp.h>
+#include "irq-msi-lib.h"
#include "irq-riscv-imsic-state.h"
static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index,
@@ -63,6 +64,11 @@ static int imsic_irq_retrigger(struct irq_data *d)
return 0;
}
+static void imsic_irq_ack(struct irq_data *d)
+{
+ irq_move_irq(d);
+}
+
static void imsic_irq_compose_vector_msg(struct imsic_vector *vec, struct msi_msg *msg)
{
phys_addr_t msi_addr;
@@ -96,9 +102,23 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
bool force)
{
struct imsic_vector *old_vec, *new_vec;
- struct irq_data *pd = d->parent_data;
+ struct imsic_vector tmp_vec;
+
+ /*
+ * Requirements for the downstream irqdomains (or devices):
+ *
+ * 1) Downstream irqdomains (or devices) with atomic MSI update can
+ * happily do imsic_irq_set_affinity() in the process-context on
+ * any CPU so the irqchip of such irqdomains must not set the
+ * IRQCHIP_MOVE_DEFERRED flag.
+ *
+ * 2) Downstream irqdomains (or devices) with non-atomic MSI update
+ * must use imsic_irq_set_affinity() in nterrupt-context upon
+ * the next device interrupt so the irqchip of such irqdomains
+ * must set the IRQCHIP_MOVE_DEFERRED flag.
+ */
- old_vec = irq_data_get_irq_chip_data(pd);
+ old_vec = irq_data_get_irq_chip_data(d);
if (WARN_ON(!old_vec))
return -ENOENT;
@@ -111,34 +131,95 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
return -EBUSY;
/* Get a new vector on the desired set of CPUs */
- new_vec = imsic_vector_alloc(old_vec->hwirq, mask_val);
+ new_vec = imsic_vector_alloc(old_vec->irq, mask_val);
if (!new_vec)
return -ENOSPC;
+ /*
+ * Device having non-atomic MSI update might see an intermediate
+ * state when changing target IMSIC vector from one CPU to another.
+ *
+ * To avoid losing interrupt to such intermediate state, do the
+ * following (just like x86 APIC):
+ *
+ * 1) First write a temporary IMSIC vector to the device which
+ * has MSI address same as the old IMSIC vector but MSI data
+ * matches the new IMSIC vector.
+ *
+ * 2) Next write the new IMSIC vector to the device.
+ *
+ * Based on the above, __imsic_local_sync() must check pending
+ * status of both old MSI data and new MSI data on the old CPU.
+ */
+ if (!irq_can_move_in_process_context(d) &&
+ new_vec->local_id != old_vec->local_id) {
+ /* Setup temporary vector */
+ tmp_vec.cpu = old_vec->cpu;
+ tmp_vec.local_id = new_vec->local_id;
+
+ /* Point device to the temporary vector */
+ imsic_msi_update_msg(irq_get_irq_data(d->irq), &tmp_vec);
+ }
+
/* Point device to the new vector */
- imsic_msi_update_msg(d, new_vec);
+ imsic_msi_update_msg(irq_get_irq_data(d->irq), new_vec);
/* Update irq descriptors with the new vector */
- pd->chip_data = new_vec;
+ d->chip_data = new_vec;
- /* Update effective affinity of parent irq data */
- irq_data_update_effective_affinity(pd, cpumask_of(new_vec->cpu));
+ /* Update effective affinity */
+ irq_data_update_effective_affinity(d, cpumask_of(new_vec->cpu));
/* Move state of the old vector to the new vector */
imsic_vector_move(old_vec, new_vec);
return IRQ_SET_MASK_OK_DONE;
}
+
+static void imsic_irq_force_complete_move(struct irq_data *d)
+{
+ struct imsic_vector *mvec, *vec = irq_data_get_irq_chip_data(d);
+ unsigned int cpu = smp_processor_id();
+
+ if (WARN_ON(!vec))
+ return;
+
+ /* Do nothing if there is no in-flight move */
+ mvec = imsic_vector_get_move(vec);
+ if (!mvec)
+ return;
+
+ /* Do nothing if the old IMSIC vector does not belong to current CPU */
+ if (mvec->cpu != cpu)
+ return;
+
+ /*
+ * The best we can do is force cleanup the old IMSIC vector.
+ *
+ * The challenges over here are same as x86 vector domain so
+ * refer to the comments in irq_force_complete_move() function
+ * implemented at arch/x86/kernel/apic/vector.c.
+ */
+
+ /* Force cleanup in-flight move */
+ pr_info("IRQ fixup: irq %d move in progress, old vector cpu %d local_id %d\n",
+ d->irq, mvec->cpu, mvec->local_id);
+ imsic_vector_force_move_cleanup(vec);
+}
#endif
static struct irq_chip imsic_irq_base_chip = {
- .name = "IMSIC",
- .irq_mask = imsic_irq_mask,
- .irq_unmask = imsic_irq_unmask,
- .irq_retrigger = imsic_irq_retrigger,
- .irq_compose_msi_msg = imsic_irq_compose_msg,
- .flags = IRQCHIP_SKIP_SET_WAKE |
- IRQCHIP_MASK_ON_SUSPEND,
+ .name = "IMSIC",
+ .irq_mask = imsic_irq_mask,
+ .irq_unmask = imsic_irq_unmask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = imsic_irq_set_affinity,
+ .irq_force_complete_move = imsic_irq_force_complete_move,
+#endif
+ .irq_retrigger = imsic_irq_retrigger,
+ .irq_ack = imsic_irq_ack,
+ .irq_compose_msi_msg = imsic_irq_compose_msg,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
};
static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -155,7 +236,7 @@ static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return -ENOSPC;
irq_domain_set_info(domain, virq, virq, &imsic_irq_base_chip, vec,
- handle_simple_irq, NULL, NULL);
+ handle_edge_irq, NULL, NULL);
irq_set_noprobe(virq);
irq_set_affinity(virq, cpu_online_mask);
irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu));
@@ -172,22 +253,6 @@ static void imsic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static int imsic_irq_domain_select(struct irq_domain *domain, struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token)
-{
- const struct msi_parent_ops *ops = domain->msi_parent_ops;
- u32 busmask = BIT(bus_token);
-
- if (fwspec->fwnode != domain->fwnode || fwspec->param_count != 0)
- return 0;
-
- /* Handle pure domain searches */
- if (bus_token == ops->bus_select_token)
- return 1;
-
- return !!(ops->bus_select_mask & busmask);
-}
-
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
struct irq_data *irqd, int ind)
@@ -204,107 +269,37 @@ static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
static const struct irq_domain_ops imsic_base_domain_ops = {
.alloc = imsic_irq_domain_alloc,
.free = imsic_irq_domain_free,
- .select = imsic_irq_domain_select,
+ .select = msi_lib_irq_domain_select,
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
.debug_show = imsic_irq_debug_show,
#endif
};
-#ifdef CONFIG_RISCV_IMSIC_PCI
-
-static void imsic_pci_mask_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void imsic_pci_unmask_irq(struct irq_data *d)
+static bool imsic_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
{
- irq_chip_unmask_parent(d);
- pci_msi_unmask_irq(d);
-}
-
-#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
-
-#else
-
-#define MATCH_PCI_MSI 0
-
-#endif
-
-static bool imsic_init_dev_msi_info(struct device *dev,
- struct irq_domain *domain,
- struct irq_domain *real_parent,
- struct msi_domain_info *info)
-{
- const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
-
- /* MSI parent domain specific settings */
- switch (real_parent->bus_token) {
- case DOMAIN_BUS_NEXUS:
- if (WARN_ON_ONCE(domain != real_parent))
- return false;
-#ifdef CONFIG_SMP
- info->chip->irq_set_affinity = imsic_irq_set_affinity;
-#endif
- break;
- default:
- WARN_ON_ONCE(1);
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
return false;
- }
- /* Is the target supported? */
switch (info->bus_token) {
-#ifdef CONFIG_RISCV_IMSIC_PCI
case DOMAIN_BUS_PCI_DEVICE_MSI:
case DOMAIN_BUS_PCI_DEVICE_MSIX:
- info->chip->irq_mask = imsic_pci_mask_irq;
- info->chip->irq_unmask = imsic_pci_unmask_irq;
- break;
-#endif
- case DOMAIN_BUS_DEVICE_MSI:
- /*
- * Per-device MSI should never have any MSI feature bits
- * set. It's sole purpose is to create a dumb interrupt
- * chip which has a device specific irq_write_msi_msg()
- * callback.
- */
- if (WARN_ON_ONCE(info->flags))
- return false;
-
- /* Core managed MSI descriptors */
- info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
- MSI_FLAG_FREE_MSI_DESCS;
- break;
- case DOMAIN_BUS_WIRED_TO_MSI:
+ info->chip->flags |= IRQCHIP_MOVE_DEFERRED;
break;
default:
- WARN_ON_ONCE(1);
- return false;
+ break;
}
- /* Use hierarchial chip operations re-trigger */
- info->chip->irq_retrigger = irq_chip_retrigger_hierarchy;
-
- /*
- * Mask out the domain specific MSI feature flags which are not
- * supported by the real parent.
- */
- info->flags &= pops->supported_flags;
-
- /* Enforce the required flags */
- info->flags |= pops->required_flags;
-
return true;
}
-#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
-
static const struct msi_parent_ops imsic_msi_parent_ops = {
.supported_flags = MSI_GENERIC_FLAGS_MASK |
MSI_FLAG_PCI_MSIX,
.required_flags = MSI_FLAG_USE_DEF_DOM_OPS |
- MSI_FLAG_USE_DEF_CHIP_OPS,
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.init_dev_msi_info = imsic_init_dev_msi_info,
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
index b97e6cd89ed7..bdf5cd2037f2 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.c
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -124,10 +124,11 @@ void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend,
}
}
-static void __imsic_local_sync(struct imsic_local_priv *lpriv)
+static bool __imsic_local_sync(struct imsic_local_priv *lpriv)
{
- struct imsic_local_config *mlocal;
- struct imsic_vector *vec, *mvec;
+ struct imsic_local_config *tlocal, *mlocal;
+ struct imsic_vector *vec, *tvec, *mvec;
+ bool ret = true;
int i;
lockdep_assert_held(&lpriv->lock);
@@ -143,35 +144,97 @@ static void __imsic_local_sync(struct imsic_local_priv *lpriv)
__imsic_id_clear_enable(i);
/*
- * If the ID was being moved to a new ID on some other CPU
- * then we can get a MSI during the movement so check the
- * ID pending bit and re-trigger the new ID on other CPU
- * using MMIO write.
+ * Clear the previous vector pointer of the new vector only
+ * after the movement is complete on the old CPU.
*/
- mvec = READ_ONCE(vec->move);
- WRITE_ONCE(vec->move, NULL);
- if (mvec && mvec != vec) {
- if (__imsic_id_read_clear_pending(i)) {
+ mvec = READ_ONCE(vec->move_prev);
+ if (mvec) {
+ /*
+ * If the old vector has not been updated then
+ * try again in the next sync-up call.
+ */
+ if (READ_ONCE(mvec->move_next)) {
+ ret = false;
+ continue;
+ }
+
+ WRITE_ONCE(vec->move_prev, NULL);
+ }
+
+ /*
+ * If a vector was being moved to a new vector on some other
+ * CPU then we can get a MSI during the movement so check the
+ * ID pending bit and re-trigger the new ID on other CPU using
+ * MMIO write.
+ */
+ mvec = READ_ONCE(vec->move_next);
+ if (mvec) {
+ /*
+ * Devices having non-atomic MSI update might see
+ * an intermediate state so check both old ID and
+ * new ID for pending interrupts.
+ *
+ * For details, see imsic_irq_set_affinity().
+ */
+ tvec = vec->local_id == mvec->local_id ?
+ NULL : &lpriv->vectors[mvec->local_id];
+
+ if (tvec && !irq_can_move_in_process_context(irq_get_irq_data(vec->irq)) &&
+ __imsic_id_read_clear_pending(tvec->local_id)) {
+ /* Retrigger temporary vector if it was already in-use */
+ if (READ_ONCE(tvec->enable)) {
+ tlocal = per_cpu_ptr(imsic->global.local, tvec->cpu);
+ writel_relaxed(tvec->local_id, tlocal->msi_va);
+ }
+
+ mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
+ writel_relaxed(mvec->local_id, mlocal->msi_va);
+ }
+
+ if (__imsic_id_read_clear_pending(vec->local_id)) {
mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
writel_relaxed(mvec->local_id, mlocal->msi_va);
}
- imsic_vector_free(&lpriv->vectors[i]);
+ WRITE_ONCE(vec->move_next, NULL);
+ imsic_vector_free(vec);
}
skip:
bitmap_clear(lpriv->dirty_bitmap, i, 1);
}
+
+ return ret;
}
-void imsic_local_sync_all(void)
+#ifdef CONFIG_SMP
+static void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
+{
+ lockdep_assert_held(&lpriv->lock);
+
+ if (!timer_pending(&lpriv->timer)) {
+ lpriv->timer.expires = jiffies + 1;
+ add_timer_on(&lpriv->timer, smp_processor_id());
+ }
+}
+#else
+static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
+{
+}
+#endif
+
+void imsic_local_sync_all(bool force_all)
{
struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
unsigned long flags;
raw_spin_lock_irqsave(&lpriv->lock, flags);
- bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
- __imsic_local_sync(lpriv);
+
+ if (force_all)
+ bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
+ if (!__imsic_local_sync(lpriv))
+ __imsic_local_timer_start(lpriv);
+
raw_spin_unlock_irqrestore(&lpriv->lock, flags);
}
@@ -190,12 +253,7 @@ void imsic_local_delivery(bool enable)
#ifdef CONFIG_SMP
static void imsic_local_timer_callback(struct timer_list *timer)
{
- struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&lpriv->lock, flags);
- __imsic_local_sync(lpriv);
- raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+ imsic_local_sync_all(false);
}
static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
@@ -216,14 +274,11 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu
*/
if (cpu_online(cpu)) {
if (cpu == smp_processor_id()) {
- __imsic_local_sync(lpriv);
- return;
+ if (__imsic_local_sync(lpriv))
+ return;
}
- if (!timer_pending(&lpriv->timer)) {
- lpriv->timer.expires = jiffies + 1;
- add_timer_on(&lpriv->timer, cpu);
- }
+ __imsic_local_timer_start(lpriv);
}
}
#else
@@ -278,8 +333,26 @@ void imsic_vector_unmask(struct imsic_vector *vec)
raw_spin_unlock(&lpriv->lock);
}
-static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec,
- bool new_enable, struct imsic_vector *new_move)
+void imsic_vector_force_move_cleanup(struct imsic_vector *vec)
+{
+ struct imsic_local_priv *lpriv;
+ struct imsic_vector *mvec;
+ unsigned long flags;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+
+ mvec = READ_ONCE(vec->move_prev);
+ WRITE_ONCE(vec->move_prev, NULL);
+ if (mvec)
+ imsic_vector_free(mvec);
+
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+}
+
+static bool imsic_vector_move_update(struct imsic_local_priv *lpriv,
+ struct imsic_vector *vec, bool is_old_vec,
+ bool new_enable, struct imsic_vector *move_vec)
{
unsigned long flags;
bool enabled;
@@ -289,7 +362,10 @@ static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsi
/* Update enable and move details */
enabled = READ_ONCE(vec->enable);
WRITE_ONCE(vec->enable, new_enable);
- WRITE_ONCE(vec->move, new_move);
+ if (is_old_vec)
+ WRITE_ONCE(vec->move_next, move_vec);
+ else
+ WRITE_ONCE(vec->move_prev, move_vec);
/* Mark the vector as dirty and synchronize */
bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
@@ -322,8 +398,8 @@ void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_ve
* interrupt on the old vector while device was being moved
* to the new vector.
*/
- enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec);
- imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec);
+ enabled = imsic_vector_move_update(old_lpriv, old_vec, true, false, new_vec);
+ imsic_vector_move_update(new_lpriv, new_vec, false, enabled, old_vec);
}
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
@@ -368,7 +444,7 @@ struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int l
return &lpriv->vectors[local_id];
}
-struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask)
+struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask)
{
struct imsic_vector *vec = NULL;
struct imsic_local_priv *lpriv;
@@ -384,9 +460,10 @@ struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask
lpriv = per_cpu_ptr(imsic->lpriv, cpu);
vec = &lpriv->vectors[local_id];
- vec->hwirq = hwirq;
+ vec->irq = irq;
vec->enable = false;
- vec->move = NULL;
+ vec->move_next = NULL;
+ vec->move_prev = NULL;
return vec;
}
@@ -396,7 +473,7 @@ void imsic_vector_free(struct imsic_vector *vec)
unsigned long flags;
raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
- vec->hwirq = UINT_MAX;
+ vec->irq = 0;
irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false);
raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
}
@@ -455,7 +532,7 @@ static int __init imsic_local_init(void)
vec = &lpriv->vectors[i];
vec->cpu = cpu;
vec->local_id = i;
- vec->hwirq = UINT_MAX;
+ vec->irq = 0;
}
}
diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h
index 391e44280827..3202ffa4e849 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.h
+++ b/drivers/irqchip/irq-riscv-imsic-state.h
@@ -20,10 +20,11 @@ struct imsic_vector {
unsigned int cpu;
unsigned int local_id;
/* Details saved by driver in the vector */
- unsigned int hwirq;
+ unsigned int irq;
/* Details accessed using local lock held */
bool enable;
- struct imsic_vector *move;
+ struct imsic_vector *move_next;
+ struct imsic_vector *move_prev;
};
struct imsic_local_priv {
@@ -74,7 +75,7 @@ static inline void __imsic_id_clear_enable(unsigned long id)
__imsic_eix_update(id, 1, false, false);
}
-void imsic_local_sync_all(void);
+void imsic_local_sync_all(bool force_all);
void imsic_local_delivery(bool enable);
void imsic_vector_mask(struct imsic_vector *vec);
@@ -87,14 +88,15 @@ static inline bool imsic_vector_isenabled(struct imsic_vector *vec)
static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec)
{
- return READ_ONCE(vec->move);
+ return READ_ONCE(vec->move_prev);
}
+void imsic_vector_force_move_cleanup(struct imsic_vector *vec);
void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec);
struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id);
-struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask);
+struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask);
void imsic_vector_free(struct imsic_vector *vector);
void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind);
diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c
new file mode 100644
index 000000000000..ee682e87eb8b
--- /dev/null
+++ b/drivers/irqchip/irq-sg2042-msi.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SG2042 MSI Controller
+ *
+ * Copyright (C) 2024 Sophgo Technology Inc.
+ * Copyright (C) 2024 Chen Wang <unicorn_wang@outlook.com>
+ */
+
+#include <linux/cleanup.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include "irq-msi-lib.h"
+
+#define SG2042_MAX_MSI_VECTOR 32
+
+struct sg2042_msi_chipdata {
+ void __iomem *reg_clr; // clear reg, see TRM, 10.1.33, GP_INTR0_CLR
+
+ phys_addr_t doorbell_addr; // see TRM, 10.1.32, GP_INTR0_SET
+
+ u32 irq_first; // The vector number that MSIs starts
+ u32 num_irqs; // The number of vectors for MSIs
+
+ DECLARE_BITMAP(msi_map, SG2042_MAX_MSI_VECTOR);
+ struct mutex msi_map_lock; // lock for msi_map
+};
+
+static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_req)
+{
+ int first;
+
+ guard(mutex)(&data->msi_map_lock);
+ first = bitmap_find_free_region(data->msi_map, data->num_irqs,
+ get_count_order(num_req));
+ return first >= 0 ? first : -ENOSPC;
+}
+
+static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, int num_req)
+{
+ guard(mutex)(&data->msi_map_lock);
+ bitmap_release_region(data->msi_map, hwirq, get_count_order(num_req));
+}
+
+static void sg2042_msi_irq_ack(struct irq_data *d)
+{
+ struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+ int bit_off = d->hwirq;
+
+ writel(1 << bit_off, data->reg_clr);
+
+ irq_chip_ack_parent(d);
+}
+
+static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+
+ msg->address_hi = upper_32_bits(data->doorbell_addr);
+ msg->address_lo = lower_32_bits(data->doorbell_addr);
+ msg->data = 1 << d->hwirq;
+}
+
+static const struct irq_chip sg2042_msi_middle_irq_chip = {
+ .name = "SG2042 MSI",
+ .irq_ack = sg2042_msi_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .irq_compose_msi_msg = sg2042_msi_irq_compose_msi_msg,
+};
+
+static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq)
+{
+ struct sg2042_msi_chipdata *data = domain->host_data;
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ int ret;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = data->irq_first + hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ return ret;
+
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+}
+
+static int sg2042_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct sg2042_msi_chipdata *data = domain->host_data;
+ int hwirq, err, i;
+
+ hwirq = sg2042_msi_allocate_hwirq(data, nr_irqs);
+ if (hwirq < 0)
+ return hwirq;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = sg2042_msi_parent_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
+ goto err_hwirq;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &sg2042_msi_middle_irq_chip, data);
+ }
+
+ return 0;
+
+err_hwirq:
+ sg2042_msi_free_hwirq(data, hwirq, nr_irqs);
+ irq_domain_free_irqs_parent(domain, virq, i);
+
+ return err;
+}
+
+static void sg2042_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ sg2042_msi_free_hwirq(data, d->hwirq, nr_irqs);
+}
+
+static const struct irq_domain_ops sg2042_msi_middle_domain_ops = {
+ .alloc = sg2042_msi_middle_domain_alloc,
+ .free = sg2042_msi_middle_domain_free,
+ .select = msi_lib_irq_domain_select,
+};
+
+#define SG2042_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
+
+#define SG2042_MSI_FLAGS_SUPPORTED MSI_GENERIC_FLAGS_MASK
+
+static const struct msi_parent_ops sg2042_msi_parent_ops = {
+ .required_flags = SG2042_MSI_FLAGS_REQUIRED,
+ .supported_flags = SG2042_MSI_FLAGS_SUPPORTED,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .prefix = "SG2042-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
+static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data,
+ struct irq_domain *plic_domain, struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct irq_domain *middle_domain;
+
+ middle_domain = irq_domain_create_hierarchy(plic_domain, 0, data->num_irqs, fwnode,
+ &sg2042_msi_middle_domain_ops, data);
+ if (!middle_domain) {
+ pr_err("Failed to create the MSI middle domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
+
+ middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ middle_domain->msi_parent_ops = &sg2042_msi_parent_ops;
+
+ return 0;
+}
+
+static int sg2042_msi_probe(struct platform_device *pdev)
+{
+ struct fwnode_reference_args args = { };
+ struct sg2042_msi_chipdata *data;
+ struct device *dev = &pdev->dev;
+ struct irq_domain *plic_domain;
+ struct resource *res;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(struct sg2042_msi_chipdata), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->reg_clr = devm_platform_ioremap_resource_byname(pdev, "clr");
+ if (IS_ERR(data->reg_clr)) {
+ dev_err(dev, "Failed to map clear register\n");
+ return PTR_ERR(data->reg_clr);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "doorbell");
+ if (!res) {
+ dev_err(dev, "Failed get resource from set\n");
+ return -EINVAL;
+ }
+ data->doorbell_addr = res->start;
+
+ ret = fwnode_property_get_reference_args(dev_fwnode(dev), "msi-ranges",
+ "#interrupt-cells", 0, 0, &args);
+ if (ret) {
+ dev_err(dev, "Unable to parse MSI vec base\n");
+ return ret;
+ }
+ fwnode_handle_put(args.fwnode);
+
+ ret = fwnode_property_get_reference_args(dev_fwnode(dev), "msi-ranges", NULL,
+ args.nargs + 1, 0, &args);
+ if (ret) {
+ dev_err(dev, "Unable to parse MSI vec number\n");
+ return ret;
+ }
+
+ plic_domain = irq_find_matching_fwnode(args.fwnode, DOMAIN_BUS_ANY);
+ fwnode_handle_put(args.fwnode);
+ if (!plic_domain) {
+ pr_err("Failed to find the PLIC domain\n");
+ return -ENXIO;
+ }
+
+ data->irq_first = (u32)args.args[0];
+ data->num_irqs = (u32)args.args[args.nargs - 1];
+
+ mutex_init(&data->msi_map_lock);
+
+ return sg2042_msi_init_domains(data, plic_domain, dev);
+}
+
+static const struct of_device_id sg2042_msi_of_match[] = {
+ { .compatible = "sophgo,sg2042-msi" },
+ { }
+};
+
+static struct platform_driver sg2042_msi_driver = {
+ .driver = {
+ .name = "sg2042-msi",
+ .of_match_table = sg2042_msi_of_match,
+ },
+ .probe = sg2042_msi_probe,
+};
+builtin_platform_driver(sg2042_msi_driver);
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 0b4312152024..01b0d8321728 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -48,32 +48,41 @@ enum {
SUNXI_SRC_TYPE_EDGE_RISING,
};
-struct sunxi_sc_nmi_reg_offs {
- u32 ctrl;
- u32 pend;
- u32 enable;
+struct sunxi_sc_nmi_data {
+ struct {
+ u32 ctrl;
+ u32 pend;
+ u32 enable;
+ } reg_offs;
+ u32 enable_val;
};
-static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst = {
- .ctrl = SUN6I_NMI_CTRL,
- .pend = SUN6I_NMI_PENDING,
- .enable = SUN6I_NMI_ENABLE,
+static const struct sunxi_sc_nmi_data sun6i_data __initconst = {
+ .reg_offs.ctrl = SUN6I_NMI_CTRL,
+ .reg_offs.pend = SUN6I_NMI_PENDING,
+ .reg_offs.enable = SUN6I_NMI_ENABLE,
};
-static const struct sunxi_sc_nmi_reg_offs sun7i_reg_offs __initconst = {
- .ctrl = SUN7I_NMI_CTRL,
- .pend = SUN7I_NMI_PENDING,
- .enable = SUN7I_NMI_ENABLE,
+static const struct sunxi_sc_nmi_data sun7i_data __initconst = {
+ .reg_offs.ctrl = SUN7I_NMI_CTRL,
+ .reg_offs.pend = SUN7I_NMI_PENDING,
+ .reg_offs.enable = SUN7I_NMI_ENABLE,
};
-static const struct sunxi_sc_nmi_reg_offs sun9i_reg_offs __initconst = {
- .ctrl = SUN9I_NMI_CTRL,
- .pend = SUN9I_NMI_PENDING,
- .enable = SUN9I_NMI_ENABLE,
+static const struct sunxi_sc_nmi_data sun9i_data __initconst = {
+ .reg_offs.ctrl = SUN9I_NMI_CTRL,
+ .reg_offs.pend = SUN9I_NMI_PENDING,
+ .reg_offs.enable = SUN9I_NMI_ENABLE,
};
-static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
- u32 val)
+static const struct sunxi_sc_nmi_data sun55i_a523_data __initconst = {
+ .reg_offs.ctrl = SUN9I_NMI_CTRL,
+ .reg_offs.pend = SUN9I_NMI_PENDING,
+ .reg_offs.enable = SUN9I_NMI_ENABLE,
+ .enable_val = BIT(31),
+};
+
+static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, u32 val)
{
irq_reg_writel(gc, val, off);
}
@@ -143,15 +152,13 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
}
static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
- const struct sunxi_sc_nmi_reg_offs *reg_offs)
+ const struct sunxi_sc_nmi_data *data)
{
- struct irq_domain *domain;
+ unsigned int irq, clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct irq_chip_generic *gc;
- unsigned int irq;
- unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct irq_domain *domain;
int ret;
-
domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("Could not register interrupt domain.\n");
@@ -186,27 +193,28 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
- gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
+ gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED |
+ IRQCHIP_EOI_IF_HANDLED |
IRQCHIP_SKIP_SET_WAKE;
- gc->chip_types[0].regs.ack = reg_offs->pend;
- gc->chip_types[0].regs.mask = reg_offs->enable;
- gc->chip_types[0].regs.type = reg_offs->ctrl;
+ gc->chip_types[0].regs.ack = data->reg_offs.pend;
+ gc->chip_types[0].regs.mask = data->reg_offs.enable;
+ gc->chip_types[0].regs.type = data->reg_offs.ctrl;
gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type;
- gc->chip_types[1].regs.ack = reg_offs->pend;
- gc->chip_types[1].regs.mask = reg_offs->enable;
- gc->chip_types[1].regs.type = reg_offs->ctrl;
+ gc->chip_types[1].regs.ack = data->reg_offs.pend;
+ gc->chip_types[1].regs.mask = data->reg_offs.enable;
+ gc->chip_types[1].regs.type = data->reg_offs.ctrl;
gc->chip_types[1].handler = handle_edge_irq;
/* Disable any active interrupts */
- sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
+ sunxi_sc_nmi_write(gc, data->reg_offs.enable, data->enable_val);
/* Clear any pending NMI interrupts */
- sunxi_sc_nmi_write(gc, reg_offs->pend, SUNXI_NMI_IRQ_BIT);
+ sunxi_sc_nmi_write(gc, data->reg_offs.pend, SUNXI_NMI_IRQ_BIT);
irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
@@ -221,20 +229,27 @@ fail_irqd_remove:
static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
struct device_node *parent)
{
- return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
+ return sunxi_sc_nmi_irq_init(node, &sun6i_data);
}
IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
struct device_node *parent)
{
- return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
+ return sunxi_sc_nmi_irq_init(node, &sun7i_data);
}
IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
static int __init sun9i_nmi_irq_init(struct device_node *node,
struct device_node *parent)
{
- return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs);
+ return sunxi_sc_nmi_irq_init(node, &sun9i_data);
}
IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);
+
+static int __init sun55i_nmi_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sunxi_sc_nmi_irq_init(node, &sun55i_a523_data);
+}
+IRQCHIP_DECLARE(sun55i_nmi, "allwinner,sun55i-a523-nmi", sun55i_nmi_irq_init);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 2b27d043921c..a104cbb0a001 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -778,6 +778,14 @@ config LEDS_MAX77650
help
LEDs driver for MAX77650 family of PMICs from Maxim Integrated.
+config LEDS_MAX77705
+ tristate "LED support for Maxim MAX77705 PMIC"
+ depends on MFD_MAX77705
+ depends on LEDS_CLASS
+ depends on LEDS_CLASS_MULTICOLOR
+ help
+ LED driver for MAX77705 PMIC from Maxim Integrated.
+
config LEDS_MAX8997
tristate "LED support for MAX8997 PMIC"
depends on LEDS_CLASS && MFD_MAX8997
@@ -924,7 +932,8 @@ config LEDS_USER
config LEDS_NIC78BX
tristate "LED support for NI PXI NIC78bx devices"
depends on LEDS_CLASS
- depends on X86 && ACPI
+ depends on HAS_IOPORT
+ depends on X86 || COMPILE_TEST
help
This option enables support for the User1 and User2 LEDs on NI
PXI NIC78bx devices.
@@ -971,6 +980,7 @@ config LEDS_ST1202
depends on I2C
depends on OF
select LEDS_TRIGGERS
+ select LEDS_TRIGGER_PATTERN
help
Say Y to enable support for LEDs connected to LED1202
LED driver chips accessed via the I2C bus.
@@ -1014,7 +1024,7 @@ source "drivers/leds/rgb/Kconfig"
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
-comment "Simple LED drivers"
-source "drivers/leds/simple/Kconfig"
+comment "Simatic LED drivers"
+source "drivers/leds/simatic/Kconfig"
endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 6ad52e219ec6..2f170d69dcbf 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_LEDS_LP8864) += leds-lp8864.o
obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
obj-$(CONFIG_LEDS_MAX5970) += leds-max5970.o
obj-$(CONFIG_LEDS_MAX77650) += leds-max77650.o
+obj-$(CONFIG_LEDS_MAX77705) += leds-max77705.o
obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o
@@ -121,5 +122,5 @@ obj-$(CONFIG_LEDS_TRIGGERS) += trigger/
# LED Blink
obj-y += blink/
-# Simple LED drivers
-obj-y += simple/
+# Simatic LED drivers
+obj-y += simatic/
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index f6c46d2e5276..e3d8ddcff567 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -159,8 +159,19 @@ static void set_brightness_delayed(struct work_struct *ws)
* before this work item runs once. To make sure this works properly
* handle LED_SET_BRIGHTNESS_OFF first.
*/
- if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags))
+ if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags)) {
set_brightness_delayed_set_brightness(led_cdev, LED_OFF);
+ /*
+ * The consecutives led_set_brightness(LED_OFF),
+ * led_set_brightness(LED_FULL) could have been executed out of
+ * order (LED_FULL first), if the work_flags has been set
+ * between LED_SET_BRIGHTNESS_OFF and LED_SET_BRIGHTNESS of this
+ * work. To avoid ending with the LED turned off, turn the LED
+ * on again.
+ */
+ if (led_cdev->delayed_set_value != LED_OFF)
+ set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags);
+ }
if (test_and_clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags))
set_brightness_delayed_set_brightness(led_cdev, led_cdev->delayed_set_value);
@@ -331,10 +342,13 @@ void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value)
* change is done immediately afterwards (before the work runs),
* it uses a separate work_flag.
*/
- if (value) {
- led_cdev->delayed_set_value = value;
+ led_cdev->delayed_set_value = value;
+ /* Ensure delayed_set_value is seen before work_flags modification */
+ smp_mb__before_atomic();
+
+ if (value)
set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags);
- } else {
+ else {
clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags);
clear_bit(LED_SET_BLINK, &led_cdev->work_flags);
set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags);
diff --git a/drivers/leds/leds-aw200xx.c b/drivers/leds/leds-aw200xx.c
index 08cca128458c..fe223d363a5d 100644
--- a/drivers/leds/leds-aw200xx.c
+++ b/drivers/leds/leds-aw200xx.c
@@ -379,7 +379,7 @@ static void aw200xx_enable(const struct aw200xx *const chip)
static void aw200xx_disable(const struct aw200xx *const chip)
{
- return gpiod_set_value_cansleep(chip->hwen, 0);
+ gpiod_set_value_cansleep(chip->hwen, 0);
}
static int aw200xx_probe_get_display_rows(struct device *dev,
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 06196d851ade..995f2adf8569 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -331,7 +331,6 @@ static const struct regmap_config lp8860_regmap_config = {
.max_register = LP8860_EEPROM_UNLOCK,
.reg_defaults = lp8860_reg_defs,
.num_reg_defaults = ARRAY_SIZE(lp8860_reg_defs),
- .cache_type = REGCACHE_NONE,
};
static const struct reg_default lp8860_eeprom_defs[] = {
@@ -369,7 +368,6 @@ static const struct regmap_config lp8860_eeprom_regmap_config = {
.max_register = LP8860_EEPROM_REG_24,
.reg_defaults = lp8860_eeprom_defs,
.num_reg_defaults = ARRAY_SIZE(lp8860_eeprom_defs),
- .cache_type = REGCACHE_NONE,
};
static int lp8860_probe(struct i2c_client *client)
diff --git a/drivers/leds/leds-max77705.c b/drivers/leds/leds-max77705.c
new file mode 100644
index 000000000000..933cb4f19be9
--- /dev/null
+++ b/drivers/leds/leds-max77705.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Based on leds-max77650 driver
+ *
+ * LED driver for MAXIM 77705 PMIC.
+ * Copyright (C) 2025 Dzmitry Sankouski <dsankouski@gmail.org>
+ */
+
+#include <linux/i2c.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/leds.h>
+#include <linux/mfd/max77705-private.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MAX77705_LED_NUM_LEDS 4
+#define MAX77705_LED_EN_MASK GENMASK(1, 0)
+#define MAX77705_LED_MAX_BRIGHTNESS 0xff
+#define MAX77705_LED_EN_SHIFT(reg) (reg * MAX77705_RGBLED_EN_WIDTH)
+#define MAX77705_LED_REG_BRIGHTNESS(reg) (reg + MAX77705_RGBLED_REG_LED0BRT)
+
+struct max77705_led {
+ struct led_classdev cdev;
+ struct led_classdev_mc mcdev;
+ struct regmap *regmap;
+
+ struct mc_subled *subled_info;
+};
+
+static const struct regmap_config max77705_leds_regmap_config = {
+ .reg_base = MAX77705_RGBLED_REG_BASE,
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77705_LED_REG_END,
+};
+
+static int max77705_rgb_blink(struct led_classdev *cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct max77705_led *led = container_of(cdev, struct max77705_led, cdev);
+ int value, on_value, off_value;
+
+ if (*delay_on < MAX77705_RGB_DELAY_100_STEP)
+ on_value = 0;
+ else if (*delay_on < MAX77705_RGB_DELAY_100_STEP_LIM)
+ on_value = *delay_on / MAX77705_RGB_DELAY_100_STEP - 1;
+ else if (*delay_on < MAX77705_RGB_DELAY_250_STEP_LIM)
+ on_value = (*delay_on - MAX77705_RGB_DELAY_100_STEP_LIM) /
+ MAX77705_RGB_DELAY_250_STEP +
+ MAX77705_RGB_DELAY_100_STEP_COUNT;
+ else
+ on_value = 15;
+
+ on_value <<= 4;
+
+ if (*delay_off < 1)
+ off_value = 0;
+ else if (*delay_off < MAX77705_RGB_DELAY_500_STEP)
+ off_value = 1;
+ else if (*delay_off < MAX77705_RGB_DELAY_500_STEP_LIM)
+ off_value = *delay_off / MAX77705_RGB_DELAY_500_STEP;
+ else if (*delay_off < MAX77705_RGB_DELAY_1000_STEP_LIM)
+ off_value = (*delay_off - MAX77705_RGB_DELAY_1000_STEP_LIM) /
+ MAX77705_RGB_DELAY_1000_STEP +
+ MAX77705_RGB_DELAY_500_STEP_COUNT;
+ else if (*delay_off < MAX77705_RGB_DELAY_2000_STEP_LIM)
+ off_value = (*delay_off - MAX77705_RGB_DELAY_2000_STEP_LIM) /
+ MAX77705_RGB_DELAY_2000_STEP +
+ MAX77705_RGB_DELAY_1000_STEP_COUNT;
+ else
+ off_value = 15;
+
+ value = on_value | off_value;
+ return regmap_write(led->regmap, MAX77705_RGBLED_REG_LEDBLNK, value);
+}
+
+static int max77705_led_brightness_set(struct regmap *regmap, struct mc_subled *subled,
+ int num_colors)
+{
+ int ret;
+
+ for (int i = 0; i < num_colors; i++) {
+ unsigned int channel, brightness;
+
+ channel = subled[i].channel;
+ brightness = subled[i].brightness;
+
+ if (brightness == LED_OFF) {
+ /* Flash OFF */
+ ret = regmap_update_bits(regmap,
+ MAX77705_RGBLED_REG_LEDEN,
+ MAX77705_LED_EN_MASK << MAX77705_LED_EN_SHIFT(channel), 0);
+ } else {
+ /* Set current */
+ ret = regmap_write(regmap, MAX77705_LED_REG_BRIGHTNESS(channel),
+ brightness);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap,
+ MAX77705_RGBLED_REG_LEDEN,
+ LED_ON << MAX77705_LED_EN_SHIFT(channel),
+ MAX77705_LED_EN_MASK << MAX77705_LED_EN_SHIFT(channel));
+ }
+ }
+
+ return ret;
+}
+
+static int max77705_led_brightness_set_single(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct max77705_led *led = container_of(cdev, struct max77705_led, cdev);
+
+ led->subled_info->brightness = brightness;
+
+ return max77705_led_brightness_set(led->regmap, led->subled_info, 1);
+}
+
+static int max77705_led_brightness_set_multi(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct led_classdev_mc *mcdev = lcdev_to_mccdev(cdev);
+ struct max77705_led *led = container_of(mcdev, struct max77705_led, mcdev);
+
+ led_mc_calc_color_components(mcdev, brightness);
+
+ return max77705_led_brightness_set(led->regmap, led->mcdev.subled_info, mcdev->num_colors);
+}
+
+static int max77705_parse_subled(struct device *dev, struct fwnode_handle *np,
+ struct mc_subled *info)
+{
+ u32 color = LED_COLOR_ID_GREEN;
+ u32 reg;
+ int ret;
+
+ ret = fwnode_property_read_u32(np, "reg", &reg);
+ if (ret || !reg || reg >= MAX77705_LED_NUM_LEDS)
+ return dev_err_probe(dev, -EINVAL, "invalid \"reg\" of %pOFn\n", np);
+
+ info->channel = reg;
+
+ ret = fwnode_property_read_u32(np, "color", &color);
+ if (ret < 0 && ret != -EINVAL)
+ return dev_err_probe(dev, ret,
+ "failed to parse \"color\" of %pOF\n", np);
+
+ info->color_index = color;
+
+ return 0;
+}
+
+static int max77705_add_led(struct device *dev, struct regmap *regmap, struct fwnode_handle *np)
+{
+ int ret, i = 0;
+ unsigned int color, reg;
+ struct max77705_led *led;
+ struct led_classdev *cdev;
+ struct mc_subled *info;
+ struct fwnode_handle *child;
+ struct led_init_data init_data = {};
+
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ ret = fwnode_property_read_u32(np, "color", &color);
+ if (ret < 0 && ret != -EINVAL)
+ return dev_err_probe(dev, ret,
+ "failed to parse \"color\" of %pOF\n", np);
+
+ led->regmap = regmap;
+ init_data.fwnode = np;
+
+ if (color == LED_COLOR_ID_RGB) {
+ int num_channels = of_get_available_child_count(to_of_node(np));
+
+ ret = fwnode_property_read_u32(np, "reg", &reg);
+ if (ret || reg >= MAX77705_LED_NUM_LEDS)
+ ret = -EINVAL;
+
+ info = devm_kcalloc(dev, num_channels, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ cdev = &led->mcdev.led_cdev;
+ cdev->max_brightness = MAX77705_LED_MAX_BRIGHTNESS;
+ cdev->brightness_set_blocking = max77705_led_brightness_set_multi;
+ cdev->blink_set = max77705_rgb_blink;
+
+ fwnode_for_each_available_child_node(np, child) {
+ ret = max77705_parse_subled(dev, child, &info[i]);
+ if (ret < 0)
+ return ret;
+
+ info[i].intensity = 0;
+ i++;
+ }
+
+ led->mcdev.subled_info = info;
+ led->mcdev.num_colors = num_channels;
+ led->cdev = *cdev;
+
+ ret = devm_led_classdev_multicolor_register_ext(dev, &led->mcdev, &init_data);
+ if (ret)
+ return ret;
+
+ ret = max77705_led_brightness_set_multi(&led->cdev, LED_OFF);
+ if (ret)
+ return ret;
+ } else {
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ max77705_parse_subled(dev, np, info);
+
+ led->subled_info = info;
+ led->cdev.brightness_set_blocking = max77705_led_brightness_set_single;
+ led->cdev.blink_set = max77705_rgb_blink;
+ led->cdev.max_brightness = MAX77705_LED_MAX_BRIGHTNESS;
+
+ ret = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
+ if (ret)
+ return ret;
+
+ ret = max77705_led_brightness_set_single(&led->cdev, LED_OFF);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max77705_led_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct i2c_client *i2c = to_i2c_client(pdev->dev.parent);
+ struct regmap *regmap;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(i2c, &max77705_leds_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Failed to register LEDs regmap\n");
+
+ device_for_each_child_node_scoped(dev, child) {
+ ret = max77705_add_led(dev, regmap, child);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id max77705_led_of_match[] = {
+ { .compatible = "maxim,max77705-rgb" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77705_led_of_match);
+
+static struct platform_driver max77705_led_driver = {
+ .driver = {
+ .name = "max77705-led",
+ .of_match_table = max77705_led_of_match,
+ },
+ .probe = max77705_led_probe,
+};
+module_platform_driver(max77705_led_driver);
+
+MODULE_DESCRIPTION("Maxim MAX77705 LED driver");
+MODULE_AUTHOR("Dzmitry Sankouski <dsankouski@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-mlxcpld.c b/drivers/leds/leds-mlxcpld.c
index 718f55096e90..f25f68789281 100644
--- a/drivers/leds/leds-mlxcpld.c
+++ b/drivers/leds/leds-mlxcpld.c
@@ -32,7 +32,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/hwmon.h>
diff --git a/drivers/leds/leds-nic78bx.c b/drivers/leds/leds-nic78bx.c
index 282d9e4cf116..f3161266b8ad 100644
--- a/drivers/leds/leds-nic78bx.c
+++ b/drivers/leds/leds-nic78bx.c
@@ -3,11 +3,19 @@
* Copyright (C) 2016 National Instruments Corp.
*/
-#include <linux/acpi.h>
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
#define NIC78BX_USER1_LED_MASK 0x3
#define NIC78BX_USER1_GREEN_LED BIT(0)
@@ -181,8 +189,8 @@ static int nic78bx_probe(struct platform_device *pdev)
}
static const struct acpi_device_id led_device_ids[] = {
- {"NIC78B3", 0},
- {"", 0},
+ { "NIC78B3" },
+ { }
};
MODULE_DEVICE_TABLE(acpi, led_device_ids);
@@ -190,7 +198,7 @@ static struct platform_driver led_driver = {
.probe = nic78bx_probe,
.driver = {
.name = KBUILD_MODNAME,
- .acpi_match_table = ACPI_PTR(led_device_ids),
+ .acpi_match_table = led_device_ids,
},
};
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 94a9f8a54b35..e9cfde9fe4b1 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -62,6 +62,8 @@
#define PCA955X_GPIO_HIGH LED_OFF
#define PCA955X_GPIO_LOW LED_FULL
+#define PCA955X_BLINK_DEFAULT_MS 1000
+
enum pca955x_type {
pca9550,
pca9551,
@@ -74,6 +76,7 @@ struct pca955x_chipdef {
int bits;
u8 slv_addr; /* 7-bit slave address mask */
int slv_addr_shift; /* Number of bits to ignore */
+ int blink_div; /* PSC divider */
};
static const struct pca955x_chipdef pca955x_chipdefs[] = {
@@ -81,26 +84,31 @@ static const struct pca955x_chipdef pca955x_chipdefs[] = {
.bits = 2,
.slv_addr = /* 110000x */ 0x60,
.slv_addr_shift = 1,
+ .blink_div = 44,
},
[pca9551] = {
.bits = 8,
.slv_addr = /* 1100xxx */ 0x60,
.slv_addr_shift = 3,
+ .blink_div = 38,
},
[pca9552] = {
.bits = 16,
.slv_addr = /* 1100xxx */ 0x60,
.slv_addr_shift = 3,
+ .blink_div = 44,
},
[ibm_pca9552] = {
.bits = 16,
.slv_addr = /* 0110xxx */ 0x30,
.slv_addr_shift = 3,
+ .blink_div = 44,
},
[pca9553] = {
.bits = 4,
.slv_addr = /* 110001x */ 0x62,
.slv_addr_shift = 1,
+ .blink_div = 44,
},
};
@@ -109,7 +117,9 @@ struct pca955x {
struct pca955x_led *leds;
const struct pca955x_chipdef *chipdef;
struct i2c_client *client;
+ unsigned long active_blink;
unsigned long active_pins;
+ unsigned long blink_period;
#ifdef CONFIG_LEDS_PCA955X_GPIO
struct gpio_chip gpio;
#endif
@@ -124,17 +134,25 @@ struct pca955x_led {
struct fwnode_handle *fwnode;
};
+#define led_to_pca955x(l) container_of(l, struct pca955x_led, led_cdev)
+
struct pca955x_platform_data {
struct pca955x_led *leds;
int num_leds;
};
/* 8 bits per input register */
-static inline int pca95xx_num_input_regs(int bits)
+static inline int pca955x_num_input_regs(int bits)
{
return (bits + 7) / 8;
}
+/* 4 bits per LED selector register */
+static inline int pca955x_num_led_regs(int bits)
+{
+ return (bits + 3) / 4;
+}
+
/*
* Return an LED selector register value based on an existing one, with
* the appropriate 2-bit state value set for the given LED number (0-3).
@@ -145,20 +163,25 @@ static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state)
((state & 0x3) << (led_num << 1));
}
+static inline int pca955x_ledstate(u8 ls, int led_num)
+{
+ return (ls >> (led_num << 1)) & 0x3;
+}
+
/*
* Write to frequency prescaler register, used to program the
- * period of the PWM output. period = (PSCx + 1) / 38
+ * period of the PWM output. period = (PSCx + 1) / coeff
+ * Where for pca9551 chips coeff = 38 and for all other chips coeff = 44
*/
-static int pca955x_write_psc(struct i2c_client *client, int n, u8 val)
+static int pca955x_write_psc(struct pca955x *pca955x, int n, u8 val)
{
- struct pca955x *pca955x = i2c_get_clientdata(client);
- u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + (2 * n);
+ u8 cmd = pca955x_num_input_regs(pca955x->chipdef->bits) + (2 * n);
int ret;
- ret = i2c_smbus_write_byte_data(client, cmd, val);
+ ret = i2c_smbus_write_byte_data(pca955x->client, cmd, val);
if (ret < 0)
- dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n",
- __func__, n, val, ret);
+ dev_err(&pca955x->client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", __func__, n,
+ val, ret);
return ret;
}
@@ -169,16 +192,15 @@ static int pca955x_write_psc(struct i2c_client *client, int n, u8 val)
*
* Duty cycle is (256 - PWMx) / 256
*/
-static int pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
+static int pca955x_write_pwm(struct pca955x *pca955x, int n, u8 val)
{
- struct pca955x *pca955x = i2c_get_clientdata(client);
- u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + (2 * n);
+ u8 cmd = pca955x_num_input_regs(pca955x->chipdef->bits) + 1 + (2 * n);
int ret;
- ret = i2c_smbus_write_byte_data(client, cmd, val);
+ ret = i2c_smbus_write_byte_data(pca955x->client, cmd, val);
if (ret < 0)
- dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n",
- __func__, n, val, ret);
+ dev_err(&pca955x->client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", __func__, n,
+ val, ret);
return ret;
}
@@ -186,16 +208,15 @@ static int pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
* Write to LED selector register, which determines the source that
* drives the LED output.
*/
-static int pca955x_write_ls(struct i2c_client *client, int n, u8 val)
+static int pca955x_write_ls(struct pca955x *pca955x, int n, u8 val)
{
- struct pca955x *pca955x = i2c_get_clientdata(client);
- u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n;
+ u8 cmd = pca955x_num_input_regs(pca955x->chipdef->bits) + 4 + n;
int ret;
- ret = i2c_smbus_write_byte_data(client, cmd, val);
+ ret = i2c_smbus_write_byte_data(pca955x->client, cmd, val);
if (ret < 0)
- dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n",
- __func__, n, val, ret);
+ dev_err(&pca955x->client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", __func__, n,
+ val, ret);
return ret;
}
@@ -203,32 +224,43 @@ static int pca955x_write_ls(struct i2c_client *client, int n, u8 val)
* Read the LED selector register, which determines the source that
* drives the LED output.
*/
-static int pca955x_read_ls(struct i2c_client *client, int n, u8 *val)
+static int pca955x_read_ls(struct pca955x *pca955x, int n, u8 *val)
{
- struct pca955x *pca955x = i2c_get_clientdata(client);
- u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n;
+ u8 cmd = pca955x_num_input_regs(pca955x->chipdef->bits) + 4 + n;
int ret;
- ret = i2c_smbus_read_byte_data(client, cmd);
+ ret = i2c_smbus_read_byte_data(pca955x->client, cmd);
if (ret < 0) {
- dev_err(&client->dev, "%s: reg 0x%x, err %d\n",
- __func__, n, ret);
+ dev_err(&pca955x->client->dev, "%s: reg 0x%x, err %d\n", __func__, n, ret);
return ret;
}
*val = (u8)ret;
return 0;
}
-static int pca955x_read_pwm(struct i2c_client *client, int n, u8 *val)
+static int pca955x_read_pwm(struct pca955x *pca955x, int n, u8 *val)
{
- struct pca955x *pca955x = i2c_get_clientdata(client);
- u8 cmd = pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + (2 * n);
+ u8 cmd = pca955x_num_input_regs(pca955x->chipdef->bits) + 1 + (2 * n);
int ret;
- ret = i2c_smbus_read_byte_data(client, cmd);
+ ret = i2c_smbus_read_byte_data(pca955x->client, cmd);
if (ret < 0) {
- dev_err(&client->dev, "%s: reg 0x%x, err %d\n",
- __func__, n, ret);
+ dev_err(&pca955x->client->dev, "%s: reg 0x%x, err %d\n", __func__, n, ret);
+ return ret;
+ }
+ *val = (u8)ret;
+ return 0;
+}
+
+static int pca955x_read_psc(struct pca955x *pca955x, int n, u8 *val)
+{
+ int ret;
+ u8 cmd;
+
+ cmd = pca955x_num_input_regs(pca955x->chipdef->bits) + (2 * n);
+ ret = i2c_smbus_read_byte_data(pca955x->client, cmd);
+ if (ret < 0) {
+ dev_err(&pca955x->client->dev, "%s: reg 0x%x, err %d\n", __func__, n, ret);
return ret;
}
*val = (u8)ret;
@@ -237,30 +269,25 @@ static int pca955x_read_pwm(struct i2c_client *client, int n, u8 *val)
static enum led_brightness pca955x_led_get(struct led_classdev *led_cdev)
{
- struct pca955x_led *pca955x_led = container_of(led_cdev,
- struct pca955x_led,
- led_cdev);
+ struct pca955x_led *pca955x_led = led_to_pca955x(led_cdev);
struct pca955x *pca955x = pca955x_led->pca955x;
u8 ls, pwm;
int ret;
- ret = pca955x_read_ls(pca955x->client, pca955x_led->led_num / 4, &ls);
+ ret = pca955x_read_ls(pca955x, pca955x_led->led_num / 4, &ls);
if (ret)
return ret;
- ls = (ls >> ((pca955x_led->led_num % 4) << 1)) & 0x3;
- switch (ls) {
+ switch (pca955x_ledstate(ls, pca955x_led->led_num % 4)) {
case PCA955X_LS_LED_ON:
+ case PCA955X_LS_BLINK0:
ret = LED_FULL;
break;
case PCA955X_LS_LED_OFF:
ret = LED_OFF;
break;
- case PCA955X_LS_BLINK0:
- ret = LED_HALF;
- break;
case PCA955X_LS_BLINK1:
- ret = pca955x_read_pwm(pca955x->client, 1, &pwm);
+ ret = pca955x_read_pwm(pca955x, 1, &pwm);
if (ret)
return ret;
ret = 255 - pwm;
@@ -273,51 +300,150 @@ static enum led_brightness pca955x_led_get(struct led_classdev *led_cdev)
static int pca955x_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
- struct pca955x_led *pca955x_led;
- struct pca955x *pca955x;
+ struct pca955x_led *pca955x_led = led_to_pca955x(led_cdev);
+ struct pca955x *pca955x = pca955x_led->pca955x;
+ int reg = pca955x_led->led_num / 4;
+ int bit = pca955x_led->led_num % 4;
u8 ls;
- int chip_ls; /* which LSx to use (0-3 potentially) */
- int ls_led; /* which set of bits within LSx to use (0-3) */
int ret;
- pca955x_led = container_of(led_cdev, struct pca955x_led, led_cdev);
- pca955x = pca955x_led->pca955x;
-
- chip_ls = pca955x_led->led_num / 4;
- ls_led = pca955x_led->led_num % 4;
-
mutex_lock(&pca955x->lock);
- ret = pca955x_read_ls(pca955x->client, chip_ls, &ls);
+ ret = pca955x_read_ls(pca955x, reg, &ls);
if (ret)
goto out;
- switch (value) {
- case LED_FULL:
- ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON);
- break;
- case LED_OFF:
- ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_OFF);
- break;
- case LED_HALF:
- ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK0);
- break;
- default:
- /*
- * Use PWM1 for all other values. This has the unwanted
- * side effect of making all LEDs on the chip share the
- * same brightness level if set to a value other than
- * OFF, HALF, or FULL. But, this is probably better than
- * just turning off for all other values.
- */
- ret = pca955x_write_pwm(pca955x->client, 1, 255 - value);
- if (ret)
+ if (test_bit(pca955x_led->led_num, &pca955x->active_blink)) {
+ if (value == LED_OFF) {
+ clear_bit(pca955x_led->led_num, &pca955x->active_blink);
+ ls = pca955x_ledsel(ls, bit, PCA955X_LS_LED_OFF);
+ } else {
+ /* No variable brightness for blinking LEDs */
goto out;
- ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1);
- break;
+ }
+ } else {
+ switch (value) {
+ case LED_FULL:
+ ls = pca955x_ledsel(ls, bit, PCA955X_LS_LED_ON);
+ break;
+ case LED_OFF:
+ ls = pca955x_ledsel(ls, bit, PCA955X_LS_LED_OFF);
+ break;
+ default:
+ /*
+ * Use PWM1 for all other values. This has the unwanted
+ * side effect of making all LEDs on the chip share the
+ * same brightness level if set to a value other than
+ * OFF or FULL. But, this is probably better than just
+ * turning off for all other values.
+ */
+ ret = pca955x_write_pwm(pca955x, 1, 255 - value);
+ if (ret)
+ goto out;
+ ls = pca955x_ledsel(ls, bit, PCA955X_LS_BLINK1);
+ break;
+ }
}
- ret = pca955x_write_ls(pca955x->client, chip_ls, ls);
+ ret = pca955x_write_ls(pca955x, reg, ls);
+
+out:
+ mutex_unlock(&pca955x->lock);
+
+ return ret;
+}
+
+static u8 pca955x_period_to_psc(struct pca955x *pca955x, unsigned long period)
+{
+ /* psc register value = (blink period * coeff) - 1 */
+ period *= pca955x->chipdef->blink_div;
+ period /= MSEC_PER_SEC;
+ period -= 1;
+
+ return period;
+}
+
+static unsigned long pca955x_psc_to_period(struct pca955x *pca955x, u8 psc)
+{
+ unsigned long period = psc;
+
+ /* blink period = (psc register value + 1) / coeff */
+ period += 1;
+ period *= MSEC_PER_SEC;
+ period /= pca955x->chipdef->blink_div;
+
+ return period;
+}
+
+static int pca955x_led_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ struct pca955x_led *pca955x_led = led_to_pca955x(led_cdev);
+ struct pca955x *pca955x = pca955x_led->pca955x;
+ unsigned long period = *delay_on + *delay_off;
+ int ret = 0;
+
+ mutex_lock(&pca955x->lock);
+
+ if (period) {
+ if (*delay_on != *delay_off) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (period < pca955x_psc_to_period(pca955x, 0) ||
+ period > pca955x_psc_to_period(pca955x, 0xff)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ period = pca955x->active_blink ? pca955x->blink_period :
+ PCA955X_BLINK_DEFAULT_MS;
+ }
+
+ if (!pca955x->active_blink ||
+ pca955x->active_blink == BIT(pca955x_led->led_num) ||
+ pca955x->blink_period == period) {
+ u8 psc = pca955x_period_to_psc(pca955x, period);
+
+ if (!test_and_set_bit(pca955x_led->led_num,
+ &pca955x->active_blink)) {
+ u8 ls;
+ int reg = pca955x_led->led_num / 4;
+ int bit = pca955x_led->led_num % 4;
+
+ ret = pca955x_read_ls(pca955x, reg, &ls);
+ if (ret)
+ goto out;
+
+ ls = pca955x_ledsel(ls, bit, PCA955X_LS_BLINK0);
+ ret = pca955x_write_ls(pca955x, reg, ls);
+ if (ret)
+ goto out;
+
+ /*
+ * Force 50% duty cycle to maintain the specified
+ * blink rate.
+ */
+ ret = pca955x_write_pwm(pca955x, 0, 128);
+ if (ret)
+ goto out;
+ }
+
+ if (pca955x->blink_period != period) {
+ pca955x->blink_period = period;
+ ret = pca955x_write_psc(pca955x, 0, psc);
+ if (ret)
+ goto out;
+ }
+
+ period = pca955x_psc_to_period(pca955x, psc);
+ period /= 2;
+ *delay_on = period;
+ *delay_off = period;
+ } else {
+ ret = -EBUSY;
+ }
out:
mutex_unlock(&pca955x->lock);
@@ -455,10 +581,13 @@ static int pca955x_probe(struct i2c_client *client)
struct led_classdev *led;
struct led_init_data init_data;
struct i2c_adapter *adapter;
- int i, err;
+ int i, bit, err, nls, reg;
+ u8 ls1[4];
+ u8 ls2[4];
struct pca955x_platform_data *pdata;
+ u8 psc0;
+ bool keep_psc0 = false;
bool set_default_label = false;
- bool keep_pwm = false;
char default_label[8];
chip = i2c_get_match_data(client);
@@ -509,10 +638,22 @@ static int pca955x_probe(struct i2c_client *client)
mutex_init(&pca955x->lock);
pca955x->client = client;
pca955x->chipdef = chip;
+ pca955x->blink_period = PCA955X_BLINK_DEFAULT_MS;
init_data.devname_mandatory = false;
init_data.devicename = "pca955x";
+ nls = pca955x_num_led_regs(chip->bits);
+ /* Use auto-increment feature to read all the LED selectors at once. */
+ err = i2c_smbus_read_i2c_block_data(client,
+ 0x10 | (pca955x_num_input_regs(chip->bits) + 4), nls,
+ ls1);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < nls; i++)
+ ls2[i] = ls1[i];
+
for (i = 0; i < chip->bits; i++) {
pca955x_led = &pca955x->leds[i];
pca955x_led->led_num = i;
@@ -524,18 +665,20 @@ static int pca955x_probe(struct i2c_client *client)
case PCA955X_TYPE_GPIO:
break;
case PCA955X_TYPE_LED:
+ bit = i % 4;
+ reg = i / 4;
led = &pca955x_led->led_cdev;
led->brightness_set_blocking = pca955x_led_set;
led->brightness_get = pca955x_led_get;
-
- if (pdata->leds[i].default_state == LEDS_DEFSTATE_OFF) {
- err = pca955x_led_set(led, LED_OFF);
- if (err)
- return err;
- } else if (pdata->leds[i].default_state == LEDS_DEFSTATE_ON) {
- err = pca955x_led_set(led, LED_FULL);
- if (err)
- return err;
+ led->blink_set = pca955x_led_blink;
+
+ if (pdata->leds[i].default_state == LEDS_DEFSTATE_OFF)
+ ls2[reg] = pca955x_ledsel(ls2[reg], bit, PCA955X_LS_LED_OFF);
+ else if (pdata->leds[i].default_state == LEDS_DEFSTATE_ON)
+ ls2[reg] = pca955x_ledsel(ls2[reg], bit, PCA955X_LS_LED_ON);
+ else if (pca955x_ledstate(ls2[reg], bit) == PCA955X_LS_BLINK0) {
+ keep_psc0 = true;
+ set_bit(i, &pca955x->active_blink);
}
init_data.fwnode = pdata->leds[i].fwnode;
@@ -564,39 +707,31 @@ static int pca955x_probe(struct i2c_client *client)
return err;
set_bit(i, &pca955x->active_pins);
-
- /*
- * For default-state == "keep", let the core update the
- * brightness from the hardware, then check the
- * brightness to see if it's using PWM1. If so, PWM1
- * should not be written below.
- */
- if (pdata->leds[i].default_state == LEDS_DEFSTATE_KEEP) {
- if (led->brightness != LED_FULL &&
- led->brightness != LED_OFF &&
- led->brightness != LED_HALF)
- keep_pwm = true;
- }
}
}
- /* PWM0 is used for half brightness or 50% duty cycle */
- err = pca955x_write_pwm(client, 0, 255 - LED_HALF);
- if (err)
- return err;
+ for (i = 0; i < nls; i++) {
+ if (ls1[i] != ls2[i]) {
+ err = pca955x_write_ls(pca955x, i, ls2[i]);
+ if (err)
+ return err;
+ }
+ }
- if (!keep_pwm) {
- /* PWM1 is used for variable brightness, default to OFF */
- err = pca955x_write_pwm(client, 1, 0);
- if (err)
- return err;
+ if (keep_psc0) {
+ err = pca955x_read_psc(pca955x, 0, &psc0);
+ } else {
+ psc0 = pca955x_period_to_psc(pca955x, pca955x->blink_period);
+ err = pca955x_write_psc(pca955x, 0, psc0);
}
- /* Set to fast frequency so we do not see flashing */
- err = pca955x_write_psc(client, 0, 0);
if (err)
return err;
- err = pca955x_write_psc(client, 1, 0);
+
+ pca955x->blink_period = pca955x_psc_to_period(pca955x, psc0);
+
+ /* Set PWM1 to fast frequency so we do not see flashing */
+ err = pca955x_write_psc(pca955x, 1, 0);
if (err)
return err;
diff --git a/drivers/leds/leds-st1202.c b/drivers/leds/leds-st1202.c
index b691c4886993..4e5dd76d714d 100644
--- a/drivers/leds/leds-st1202.c
+++ b/drivers/leds/leds-st1202.c
@@ -99,9 +99,9 @@ static int st1202_pwm_pattern_write(struct st1202_chip *chip, int led_num,
value_h = (u8)(value >> 8);
/*
- * Datasheet: Register address low = 1Eh + 2*(xh) + 18h*(yh),
- * where x is the channel number (led number) in hexadecimal (x = 00h .. 0Bh)
- * and y is the pattern number in hexadecimal (y = 00h .. 07h)
+ * Datasheet: Register address low = 1Eh + 2*(xh) + 18h*(yh),
+ * where x is the channel number (led number) in hexadecimal (x = 00h .. 0Bh)
+ * and y is the pattern number in hexadecimal (y = 00h .. 07h)
*/
ret = st1202_write_reg(chip, (ST1202_PATTERN_PWM + (led_num * 2) + 0x18 * pattern),
value_l);
@@ -189,9 +189,8 @@ static int st1202_channel_set(struct st1202_chip *chip, int led_num, bool active
static int st1202_led_set(struct led_classdev *ldev, enum led_brightness value)
{
struct st1202_led *led = cdev_to_st1202_led(ldev);
- struct st1202_chip *chip = led->chip;
- return st1202_channel_set(chip, led->led_num, value == LED_OFF ? false : true);
+ return st1202_channel_set(led->chip, led->led_num, !!value);
}
static int st1202_led_pattern_clear(struct led_classdev *ldev)
@@ -261,8 +260,6 @@ static int st1202_dt_init(struct st1202_chip *chip)
int err, reg;
for_each_available_child_of_node_scoped(dev_of_node(dev), child) {
- struct led_init_data init_data = {};
-
err = of_property_read_u32(child, "reg", &reg);
if (err)
return dev_err_probe(dev, err, "Invalid register\n");
@@ -276,15 +273,6 @@ static int st1202_dt_init(struct st1202_chip *chip)
led->led_cdev.pattern_set = st1202_led_pattern_set;
led->led_cdev.pattern_clear = st1202_led_pattern_clear;
led->led_cdev.default_trigger = "pattern";
-
- init_data.fwnode = led->fwnode;
- init_data.devicename = "st1202";
- init_data.default_label = ":";
-
- err = devm_led_classdev_register_ext(dev, &led->led_cdev, &init_data);
- if (err < 0)
- return dev_err_probe(dev, err, "Failed to register LED class device\n");
-
led->led_cdev.brightness_set = st1202_brightness_set;
led->led_cdev.brightness_get = st1202_brightness_get;
}
@@ -299,8 +287,8 @@ static int st1202_setup(struct st1202_chip *chip)
guard(mutex)(&chip->lock);
/*
- * Once the supply voltage is applied, the LED1202 executes some internal checks,
- * afterwords it stops the oscillator and puts the internal LDO in quiescent mode.
+ * Once the supply voltage is applied, the LED1202 executes some internal checks.
+ * Afterwards, it stops the oscillator and puts the internal LDO in quiescent mode.
* To start the device, EN bit must be set inside the “Device Enable” register at
* address 01h. As soon as EN is set, the LED1202 loads the adjustment parameters
* from the internal non-volatile memory and performs an auto-calibration procedure
@@ -356,18 +344,21 @@ static int st1202_probe(struct i2c_client *client)
if (!chip)
return -ENOMEM;
- devm_mutex_init(&client->dev, &chip->lock);
+ ret = devm_mutex_init(&client->dev, &chip->lock);
+ if (ret < 0)
+ return ret;
chip->client = client;
- ret = st1202_dt_init(chip);
+ ret = st1202_setup(chip);
if (ret < 0)
return ret;
- ret = st1202_setup(chip);
+ ret = st1202_dt_init(chip);
if (ret < 0)
return ret;
for (int i = 0; i < ST1202_MAX_LEDS; i++) {
+ struct led_init_data init_data = {};
led = &chip->leds[i];
led->chip = chip;
led->led_num = i;
@@ -384,6 +375,15 @@ static int st1202_probe(struct i2c_client *client)
if (ret < 0)
return dev_err_probe(&client->dev, ret,
"Failed to clear LED pattern\n");
+
+ init_data.fwnode = led->fwnode;
+ init_data.devicename = "st1202";
+ init_data.default_label = ":";
+
+ ret = devm_led_classdev_register_ext(&client->dev, &led->led_cdev, &init_data);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to register LED class device\n");
}
return 0;
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
index f80a06cc31f8..1c7705bafdfc 100644
--- a/drivers/leds/rgb/leds-pwm-multicolor.c
+++ b/drivers/leds/rgb/leds-pwm-multicolor.c
@@ -141,8 +141,11 @@ static int led_pwm_mc_probe(struct platform_device *pdev)
/* init the multicolor's LED class device */
cdev = &priv->mc_cdev.led_cdev;
- fwnode_property_read_u32(mcnode, "max-brightness",
+ ret = fwnode_property_read_u32(mcnode, "max-brightness",
&cdev->max_brightness);
+ if (ret)
+ goto release_mcnode;
+
cdev->flags = LED_CORE_SUSPENDRESUME;
cdev->brightness_set_blocking = led_pwm_mc_set;
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index f3c9ef2bfa57..4f2a178e3d26 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -24,6 +24,7 @@
#define LPG_PATTERN_CONFIG_REG 0x40
#define LPG_SIZE_CLK_REG 0x41
#define PWM_CLK_SELECT_MASK GENMASK(1, 0)
+#define PWM_SIZE_SELECT_MASK BIT(2)
#define PWM_CLK_SELECT_HI_RES_MASK GENMASK(2, 0)
#define PWM_SIZE_HI_RES_MASK GENMASK(6, 4)
#define LPG_PREDIV_CLK_REG 0x42
@@ -412,8 +413,8 @@ static int lpg_lut_sync(struct lpg *lpg, unsigned int mask)
static const unsigned int lpg_clk_rates[] = {0, 1024, 32768, 19200000};
static const unsigned int lpg_clk_rates_hi_res[] = {0, 1024, 32768, 19200000, 76800000};
static const unsigned int lpg_pre_divs[] = {1, 3, 5, 6};
-static const unsigned int lpg_pwm_resolution[] = {9};
-static const unsigned int lpg_pwm_resolution_hi_res[] = {8, 9, 10, 11, 12, 13, 14, 15};
+static const unsigned int lpg_pwm_resolution[] = {6, 9};
+static const unsigned int lpg_pwm_resolution_hi_res[] = {8, 9, 10, 11, 12, 13, 14, 15};
static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period)
{
@@ -436,12 +437,12 @@ static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period)
* period = --------------------------
* refclk
*
- * Resolution = 2^9 bits for PWM or
+ * Resolution = 2^{6 or 9} bits for PWM or
* 2^{8, 9, 10, 11, 12, 13, 14, 15} bits for high resolution PWM
* pre_div = {1, 3, 5, 6} and
* M = [0..7].
*
- * This allows for periods between 27uS and 384s for PWM channels and periods between
+ * This allows for periods between 3uS and 384s for PWM channels and periods between
* 3uS and 24576s for high resolution PWMs.
* The PWM framework wants a period of equal or lower length than requested,
* reject anything below minimum period.
@@ -461,7 +462,7 @@ static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period)
max_res = LPG_RESOLUTION_9BIT;
}
- min_period = div64_u64((u64)NSEC_PER_SEC * (1 << pwm_resolution_arr[0]),
+ min_period = div64_u64((u64)NSEC_PER_SEC * ((1 << pwm_resolution_arr[0]) - 1),
clk_rate_arr[clk_len - 1]);
if (period <= min_period)
return -EINVAL;
@@ -482,7 +483,7 @@ static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period)
*/
for (i = 0; i < pwm_resolution_count; i++) {
- resolution = 1 << pwm_resolution_arr[i];
+ resolution = (1 << pwm_resolution_arr[i]) - 1;
for (clk_sel = 1; clk_sel < clk_len; clk_sel++) {
u64 numerator = period * clk_rate_arr[clk_sel];
@@ -529,10 +530,10 @@ static void lpg_calc_duty(struct lpg_channel *chan, uint64_t duty)
unsigned int clk_rate;
if (chan->subtype == LPG_SUBTYPE_HI_RES_PWM) {
- max = LPG_RESOLUTION_15BIT - 1;
+ max = BIT(lpg_pwm_resolution_hi_res[chan->pwm_resolution_sel]) - 1;
clk_rate = lpg_clk_rates_hi_res[chan->clk_sel];
} else {
- max = LPG_RESOLUTION_9BIT - 1;
+ max = BIT(lpg_pwm_resolution[chan->pwm_resolution_sel]) - 1;
clk_rate = lpg_clk_rates[chan->clk_sel];
}
@@ -558,7 +559,7 @@ static void lpg_apply_freq(struct lpg_channel *chan)
val |= GENMASK(5, 4);
break;
case LPG_SUBTYPE_PWM:
- val |= BIT(2);
+ val |= FIELD_PREP(PWM_SIZE_SELECT_MASK, chan->pwm_resolution_sel);
break;
case LPG_SUBTYPE_HI_RES_PWM:
val |= FIELD_PREP(PWM_SIZE_HI_RES_MASK, chan->pwm_resolution_sel);
@@ -1276,7 +1277,7 @@ static int lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
resolution = lpg_pwm_resolution_hi_res[FIELD_GET(PWM_SIZE_HI_RES_MASK, val)];
} else {
refclk = lpg_clk_rates[FIELD_GET(PWM_CLK_SELECT_MASK, val)];
- resolution = 9;
+ resolution = lpg_pwm_resolution[FIELD_GET(PWM_SIZE_SELECT_MASK, val)];
}
if (refclk) {
@@ -1291,7 +1292,7 @@ static int lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
if (ret)
return ret;
- state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * (1 << resolution) *
+ state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * ((1 << resolution) - 1) *
pre_div * (1 << m), refclk);
state->duty_cycle = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pwm_value * pre_div * (1 << m), refclk);
} else {
diff --git a/drivers/leds/simple/Kconfig b/drivers/leds/simatic/Kconfig
index e616cc6d6051..e616cc6d6051 100644
--- a/drivers/leds/simple/Kconfig
+++ b/drivers/leds/simatic/Kconfig
diff --git a/drivers/leds/simple/Makefile b/drivers/leds/simatic/Makefile
index 783578f11bb0..783578f11bb0 100644
--- a/drivers/leds/simple/Makefile
+++ b/drivers/leds/simatic/Makefile
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c b/drivers/leds/simatic/simatic-ipc-leds-gpio-apollolake.c
index c98c370687c2..c98c370687c2 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c
+++ b/drivers/leds/simatic/simatic-ipc-leds-gpio-apollolake.c
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-core.c b/drivers/leds/simatic/simatic-ipc-leds-gpio-core.c
index 9bc5f361a06b..9bc5f361a06b 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio-core.c
+++ b/drivers/leds/simatic/simatic-ipc-leds-gpio-core.c
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c b/drivers/leds/simatic/simatic-ipc-leds-gpio-elkhartlake.c
index 7f7cff275448..7f7cff275448 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c
+++ b/drivers/leds/simatic/simatic-ipc-leds-gpio-elkhartlake.c
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c b/drivers/leds/simatic/simatic-ipc-leds-gpio-f7188x.c
index bc23d701bcb7..bc23d701bcb7 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c
+++ b/drivers/leds/simatic/simatic-ipc-leds-gpio-f7188x.c
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.h b/drivers/leds/simatic/simatic-ipc-leds-gpio.h
index 6b2519809cee..6b2519809cee 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio.h
+++ b/drivers/leds/simatic/simatic-ipc-leds-gpio.h
diff --git a/drivers/leds/simple/simatic-ipc-leds.c b/drivers/leds/simatic/simatic-ipc-leds.c
index 348679f0d1b7..348679f0d1b7 100644
--- a/drivers/leds/simple/simatic-ipc-leds.c
+++ b/drivers/leds/simatic/simatic-ipc-leds.c
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index c15efe3e5078..4e048e08c4fd 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -68,6 +68,7 @@ struct led_netdev_data {
unsigned int last_activity;
unsigned long mode;
+ unsigned long blink_delay;
int link_speed;
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported_link_modes);
u8 duplex;
@@ -86,6 +87,10 @@ static void set_baseline_state(struct led_netdev_data *trigger_data)
/* Already validated, hw control is possible with the requested mode */
if (trigger_data->hw_control) {
led_cdev->hw_control_set(led_cdev, trigger_data->mode);
+ if (led_cdev->blink_set) {
+ led_cdev->blink_set(led_cdev, &trigger_data->blink_delay,
+ &trigger_data->blink_delay);
+ }
return;
}
@@ -454,10 +459,11 @@ static ssize_t interval_store(struct device *dev,
size_t size)
{
struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
+ struct led_classdev *led_cdev = trigger_data->led_cdev;
unsigned long value;
int ret;
- if (trigger_data->hw_control)
+ if (trigger_data->hw_control && !led_cdev->blink_set)
return -EINVAL;
ret = kstrtoul(buf, 0, &value);
@@ -466,9 +472,13 @@ static ssize_t interval_store(struct device *dev,
/* impose some basic bounds on the timer interval */
if (value >= 5 && value <= 10000) {
- cancel_delayed_work_sync(&trigger_data->work);
+ if (trigger_data->hw_control) {
+ trigger_data->blink_delay = value;
+ } else {
+ cancel_delayed_work_sync(&trigger_data->work);
- atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
+ atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
+ }
set_baseline_state(trigger_data); /* resets timer */
}
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index aad48c2540fc..a594bd5e2233 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -483,8 +483,8 @@ static int pattern_trig_activate(struct led_classdev *led_cdev)
data->led_cdev = led_cdev;
led_set_trigger_data(led_cdev, data);
timer_setup(&data->timer, pattern_trig_timer_function, 0);
- hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->hrtimer.function = pattern_trig_hrtimer_function;
+ hrtimer_setup(&data->hrtimer, pattern_trig_hrtimer_function, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
led_cdev->activated = true;
if (led_cdev->flags & LED_INIT_DEFAULT_TRIGGER) {
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
index 537f7bfb7b06..0950b7bce184 100644
--- a/drivers/mailbox/arm_mhu.c
+++ b/drivers/mailbox/arm_mhu.c
@@ -153,7 +153,7 @@ static int mhu_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static struct amba_id mhu_ids[] = {
+static const struct amba_id mhu_ids[] = {
{
.id = 0x1bb098,
.mask = 0xffffff,
diff --git a/drivers/mailbox/arm_mhu_db.c b/drivers/mailbox/arm_mhu_db.c
index 27a510d46908..9e937b09c5fb 100644
--- a/drivers/mailbox/arm_mhu_db.c
+++ b/drivers/mailbox/arm_mhu_db.c
@@ -328,7 +328,7 @@ static int mhu_db_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static struct amba_id mhu_ids[] = {
+static const struct amba_id mhu_ids[] = {
{
.id = 0x1bb098,
.mask = 0xffffff,
diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
index cff7c343ee08..f035284944c0 100644
--- a/drivers/mailbox/arm_mhuv2.c
+++ b/drivers/mailbox/arm_mhuv2.c
@@ -1107,7 +1107,7 @@ static void mhuv2_remove(struct amba_device *adev)
writel_relaxed(0x0, &mhu->send->access_request);
}
-static struct amba_id mhuv2_ids[] = {
+static const struct amba_id mhuv2_ids[] = {
{
/* 2.0 */
.id = 0xbb0d1,
diff --git a/drivers/mailbox/exynos-mailbox.c b/drivers/mailbox/exynos-mailbox.c
index 20049f0ec5ff..2320649bf60c 100644
--- a/drivers/mailbox/exynos-mailbox.c
+++ b/drivers/mailbox/exynos-mailbox.c
@@ -57,7 +57,7 @@ static int exynos_mbox_send_data(struct mbox_chan *chan, void *data)
if (msg->chan_type != EXYNOS_MBOX_CHAN_TYPE_DOORBELL) {
dev_err(dev, "Unsupported channel type [%d]\n", msg->chan_type);
return -EINVAL;
- };
+ }
writel(BIT(msg->chan_id), exynos_mbox->regs + EXYNOS_MBOX_INTGR1);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index d3d26a2c9895..0593b4d03685 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -6,18 +6,15 @@
* Author: Jassi Brar <jassisinghbrar@gmail.com>
*/
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/module.h>
#include <linux/device.h>
-#include <linux/bitops.h>
+#include <linux/err.h>
#include <linux/mailbox_client.h>
#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/spinlock.h>
#include "mailbox.h"
@@ -413,15 +410,15 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
return ERR_PTR(-ENODEV);
}
- mutex_lock(&con_mutex);
-
- if (of_parse_phandle_with_args(dev->of_node, "mboxes",
- "#mbox-cells", index, &spec)) {
+ ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
+ index, &spec);
+ if (ret) {
dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
- mutex_unlock(&con_mutex);
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(ret);
}
+ mutex_lock(&con_mutex);
+
chan = ERR_PTR(-EPROBE_DEFER);
list_for_each_entry(mbox, &mbox_cons, node)
if (mbox->dev->of_node == spec.np) {
@@ -534,9 +531,7 @@ int mbox_controller_register(struct mbox_controller *mbox)
return -EINVAL;
}
- hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- mbox->poll_hrt.function = txdone_hrtimer;
+ hrtimer_setup(&mbox->poll_hrt, txdone_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
spin_lock_init(&mbox->poll_hrt_lock);
}
diff --git a/drivers/mailbox/mailbox.h b/drivers/mailbox/mailbox.h
index 046d6d258b32..e1ec4efab693 100644
--- a/drivers/mailbox/mailbox.h
+++ b/drivers/mailbox/mailbox.h
@@ -3,6 +3,8 @@
#ifndef __MAILBOX_H
#define __MAILBOX_H
+#include <linux/bits.h>
+
#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */
#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
#define TXDONE_BY_ACK BIT(2) /* S/W ACK received by Client ticks the TX */
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 82102a4c5d68..f6714c233f5a 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -117,8 +117,6 @@ struct pcc_chan_info {
static struct pcc_chan_info *chan_info;
static int pcc_chan_count;
-static int pcc_send_data(struct mbox_chan *chan, void *data);
-
/*
* PCC can be used with perf critical drivers such as CPPC
* So it makes sense to locally cache the virtual address and
@@ -245,13 +243,13 @@ static bool pcc_mbox_cmd_complete_check(struct pcc_chan_info *pchan)
u64 val;
int ret;
+ if (!pchan->cmd_complete.gas)
+ return true;
+
ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
if (ret)
return false;
- if (!pchan->cmd_complete.gas)
- return true;
-
/*
* Judge if the channel respond the interrupt based on the value of
* command complete.
@@ -269,33 +267,43 @@ static bool pcc_mbox_cmd_complete_check(struct pcc_chan_info *pchan)
return !!val;
}
-static void check_and_ack(struct pcc_chan_info *pchan, struct mbox_chan *chan)
+static int pcc_mbox_error_check_and_clear(struct pcc_chan_info *pchan)
+{
+ u64 val;
+ int ret;
+
+ ret = pcc_chan_reg_read(&pchan->error, &val);
+ if (ret)
+ return ret;
+
+ val &= pchan->error.status_mask;
+ if (val) {
+ val &= ~pchan->error.status_mask;
+ pcc_chan_reg_write(&pchan->error, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void pcc_chan_acknowledge(struct pcc_chan_info *pchan)
{
- struct acpi_pcct_ext_pcc_shared_memory pcc_hdr;
+ struct acpi_pcct_ext_pcc_shared_memory __iomem *pcc_hdr;
if (pchan->type != ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
return;
- /* If the memory region has not been mapped, we cannot
- * determine if we need to send the message, but we still
- * need to set the cmd_update flag before returning.
- */
- if (pchan->chan.shmem == NULL) {
- pcc_chan_reg_read_modify_write(&pchan->cmd_update);
- return;
- }
- memcpy_fromio(&pcc_hdr, pchan->chan.shmem,
- sizeof(struct acpi_pcct_ext_pcc_shared_memory));
+
+ pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+
+ pcc_hdr = pchan->chan.shmem;
+
/*
- * The PCC slave subspace channel needs to set the command complete bit
- * after processing message. If the PCC_ACK_FLAG is set, it should also
- * ring the doorbell.
- *
- * The PCC master subspace channel clears chan_in_use to free channel.
+ * The PCC slave subspace channel needs to set the command
+ * complete bit after processing message. If the PCC_ACK_FLAG
+ * is set, it should also ring the doorbell.
*/
- if (le32_to_cpup(&pcc_hdr.flags) & PCC_ACK_FLAG_MASK)
- pcc_send_data(chan, NULL);
- else
- pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+ if (ioread32(&pcc_hdr->flags) & PCC_CMD_COMPLETION_NOTIFY)
+ pcc_chan_reg_read_modify_write(&pchan->db);
}
/**
@@ -309,10 +317,12 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
{
struct pcc_chan_info *pchan;
struct mbox_chan *chan = p;
- u64 val;
- int ret;
pchan = chan->con_priv;
+
+ if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
+ return IRQ_NONE;
+
if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE &&
!pchan->chan_in_use)
return IRQ_NONE;
@@ -320,23 +330,19 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
if (!pcc_mbox_cmd_complete_check(pchan))
return IRQ_NONE;
- ret = pcc_chan_reg_read(&pchan->error, &val);
- if (ret)
- return IRQ_NONE;
- val &= pchan->error.status_mask;
- if (val) {
- val &= ~pchan->error.status_mask;
- pcc_chan_reg_write(&pchan->error, val);
- return IRQ_NONE;
- }
-
- if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
+ if (pcc_mbox_error_check_and_clear(pchan))
return IRQ_NONE;
+ /*
+ * Clear this flag after updating interrupt ack register and just
+ * before mbox_chan_received_data() which might call pcc_send_data()
+ * where the flag is set again to start new transfer. This is
+ * required to avoid any possible race in updatation of this flag.
+ */
+ pchan->chan_in_use = false;
mbox_chan_received_data(chan, NULL);
- check_and_ack(pchan, chan);
- pchan->chan_in_use = false;
+ pcc_chan_acknowledge(pchan);
return IRQ_HANDLED;
}
@@ -356,6 +362,7 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
struct pcc_mbox_chan *
pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
{
+ struct pcc_mbox_chan *pcc_mchan;
struct pcc_chan_info *pchan;
struct mbox_chan *chan;
int rc;
@@ -374,7 +381,14 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
if (rc)
return ERR_PTR(rc);
- return &pchan->chan;
+ pcc_mchan = &pchan->chan;
+ pcc_mchan->shmem = acpi_os_ioremap(pcc_mchan->shmem_base_addr,
+ pcc_mchan->shmem_size);
+ if (pcc_mchan->shmem)
+ return pcc_mchan;
+
+ mbox_free_channel(chan);
+ return ERR_PTR(-ENXIO);
}
EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
@@ -403,21 +417,6 @@ void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
-int pcc_mbox_ioremap(struct mbox_chan *chan)
-{
- struct pcc_chan_info *pchan_info;
- struct pcc_mbox_chan *pcc_mbox_chan;
-
- if (!chan || !chan->cl)
- return -1;
- pchan_info = chan->con_priv;
- pcc_mbox_chan = &pchan_info->chan;
- pcc_mbox_chan->shmem = ioremap(pcc_mbox_chan->shmem_base_addr,
- pcc_mbox_chan->shmem_size);
- return 0;
-}
-EXPORT_SYMBOL_GPL(pcc_mbox_ioremap);
-
/**
* pcc_send_data - Called from Mailbox Controller code. Used
* here only to ring the channel doorbell. The PCC client
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
index fbcf07930390..606f26a2a6fd 100644
--- a/drivers/mailbox/pl320-ipc.c
+++ b/drivers/mailbox/pl320-ipc.c
@@ -45,18 +45,6 @@ static DEFINE_MUTEX(ipc_m1_lock);
static DECLARE_COMPLETION(ipc_completion);
static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
-static inline void set_destination(int source, int mbox)
-{
- writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
- writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
-}
-
-static inline void clear_destination(int source, int mbox)
-{
- writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
- writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
-}
-
static void __ipc_send(int mbox, u32 *data)
{
int i;
@@ -164,7 +152,7 @@ err:
return ret;
}
-static struct amba_id pl320_ids[] = {
+static const struct amba_id pl320_ids[] = {
{
.id = 0x00041320,
.mask = 0x000fffff,
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index c1981f091bd1..ed9a0bb2bcd8 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2025, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
@@ -28,12 +28,6 @@
#define HSP_INT_FULL_MASK 0xff
#define HSP_INT_DIMENSIONING 0x380
-#define HSP_nSM_SHIFT 0
-#define HSP_nSS_SHIFT 4
-#define HSP_nAS_SHIFT 8
-#define HSP_nDB_SHIFT 12
-#define HSP_nSI_SHIFT 16
-#define HSP_nINT_MASK 0xf
#define HSP_DB_TRIGGER 0x0
#define HSP_DB_ENABLE 0x4
@@ -97,6 +91,20 @@ struct tegra_hsp_soc {
bool has_per_mb_ie;
bool has_128_bit_mb;
unsigned int reg_stride;
+
+ /* Shifts for dimensioning register. */
+ unsigned int si_shift;
+ unsigned int db_shift;
+ unsigned int as_shift;
+ unsigned int ss_shift;
+ unsigned int sm_shift;
+
+ /* Masks for dimensioning register. */
+ unsigned int si_mask;
+ unsigned int db_mask;
+ unsigned int as_mask;
+ unsigned int ss_mask;
+ unsigned int sm_mask;
};
struct tegra_hsp {
@@ -747,11 +755,11 @@ static int tegra_hsp_probe(struct platform_device *pdev)
return PTR_ERR(hsp->regs);
value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING);
- hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK;
- hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK;
- hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK;
- hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
- hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
+ hsp->num_sm = (value >> hsp->soc->sm_shift) & hsp->soc->sm_mask;
+ hsp->num_ss = (value >> hsp->soc->ss_shift) & hsp->soc->ss_mask;
+ hsp->num_as = (value >> hsp->soc->as_shift) & hsp->soc->as_mask;
+ hsp->num_db = (value >> hsp->soc->db_shift) & hsp->soc->db_mask;
+ hsp->num_si = (value >> hsp->soc->si_shift) & hsp->soc->si_mask;
err = platform_get_irq_byname_optional(pdev, "doorbell");
if (err >= 0)
@@ -915,6 +923,16 @@ static const struct tegra_hsp_soc tegra186_hsp_soc = {
.has_per_mb_ie = false,
.has_128_bit_mb = false,
.reg_stride = 0x100,
+ .si_shift = 16,
+ .db_shift = 12,
+ .as_shift = 8,
+ .ss_shift = 4,
+ .sm_shift = 0,
+ .si_mask = 0xf,
+ .db_mask = 0xf,
+ .as_mask = 0xf,
+ .ss_mask = 0xf,
+ .sm_mask = 0xf,
};
static const struct tegra_hsp_soc tegra194_hsp_soc = {
@@ -922,6 +940,16 @@ static const struct tegra_hsp_soc tegra194_hsp_soc = {
.has_per_mb_ie = true,
.has_128_bit_mb = false,
.reg_stride = 0x100,
+ .si_shift = 16,
+ .db_shift = 12,
+ .as_shift = 8,
+ .ss_shift = 4,
+ .sm_shift = 0,
+ .si_mask = 0xf,
+ .db_mask = 0xf,
+ .as_mask = 0xf,
+ .ss_mask = 0xf,
+ .sm_mask = 0xf,
};
static const struct tegra_hsp_soc tegra234_hsp_soc = {
@@ -929,6 +957,16 @@ static const struct tegra_hsp_soc tegra234_hsp_soc = {
.has_per_mb_ie = false,
.has_128_bit_mb = true,
.reg_stride = 0x100,
+ .si_shift = 16,
+ .db_shift = 12,
+ .as_shift = 8,
+ .ss_shift = 4,
+ .sm_shift = 0,
+ .si_mask = 0xf,
+ .db_mask = 0xf,
+ .as_mask = 0xf,
+ .ss_mask = 0xf,
+ .sm_mask = 0xf,
};
static const struct tegra_hsp_soc tegra264_hsp_soc = {
@@ -936,6 +974,16 @@ static const struct tegra_hsp_soc tegra264_hsp_soc = {
.has_per_mb_ie = false,
.has_128_bit_mb = true,
.reg_stride = 0x1000,
+ .si_shift = 17,
+ .db_shift = 12,
+ .as_shift = 8,
+ .ss_shift = 4,
+ .sm_shift = 0,
+ .si_mask = 0x1f,
+ .db_mask = 0x1f,
+ .as_mask = 0xf,
+ .ss_mask = 0xf,
+ .sm_mask = 0xf,
};
static const struct of_device_id tegra_hsp_match[] = {
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 731467d4ed10..b690905ab89f 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -426,7 +426,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b
if (!clone)
return NULL;
- bio_init(clone, fc->dev->bdev, bio->bi_inline_vecs, nr_iovecs, bio->bi_opf);
+ bio_init(clone, fc->dev->bdev, clone->bi_inline_vecs, nr_iovecs, bio->bi_opf);
clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
clone->bi_private = bio;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c45464b6576a..c8c1a00e7d80 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4811,23 +4811,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
ti->error = "Cannot allocate bio set";
goto bad;
}
- r = bioset_integrity_create(&ic->recheck_bios, RECHECK_POOL_SIZE);
- if (r) {
- ti->error = "Cannot allocate bio integrity set";
- r = -ENOMEM;
- goto bad;
- }
r = bioset_init(&ic->recalc_bios, 1, 0, BIOSET_NEED_BVECS);
if (r) {
ti->error = "Cannot allocate bio set";
goto bad;
}
- r = bioset_integrity_create(&ic->recalc_bios, 1);
- if (r) {
- ti->error = "Cannot allocate bio integrity set";
- r = -ENOMEM;
- goto bad;
- }
}
ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 0ef5203387b2..453803f1edf5 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1081,15 +1081,9 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
__alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags))
goto out_free_pools;
- if (mempool_needs_integrity &&
- bioset_integrity_create(&pools->io_bs, pool_size))
- goto out_free_pools;
init_bs:
if (bioset_init(&pools->bs, pool_size, front_pad, 0))
goto out_free_pools;
- if (mempool_needs_integrity &&
- bioset_integrity_create(&pools->bs, pool_size))
- goto out_free_pools;
t->mempools = pools;
return 0;
@@ -1250,6 +1244,7 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
profile->max_dun_bytes_supported = UINT_MAX;
memset(profile->modes_supported, 0xFF,
sizeof(profile->modes_supported));
+ profile->key_types_supported = ~0;
for (i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 23c09d22fcdb..44ec9b17cfd3 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -29,8 +29,10 @@
#include <linux/buffer_head.h>
#include <linux/seq_file.h>
#include <trace/events/block.h>
+
#include "md.h"
#include "md-bitmap.h"
+#include "md-cluster.h"
#define BITMAP_MAJOR_LO 3
/* version 4 insists the bitmap is in little-endian order
@@ -426,8 +428,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage;
- unsigned int bitmap_limit = (bitmap->storage.file_pages - pg_index) <<
- PAGE_SHIFT;
+ unsigned long num_pages = bitmap->storage.file_pages;
+ unsigned int bitmap_limit = (num_pages - pg_index % num_pages) << PAGE_SHIFT;
loff_t sboff, offset = mddev->bitmap_info.offset;
sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE;
unsigned int size = PAGE_SIZE;
@@ -436,7 +438,7 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
/* we compare length (page numbers), not page offset. */
- if ((pg_index - store->sb_index) == store->file_pages - 1) {
+ if ((pg_index - store->sb_index) == num_pages - 1) {
unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
if (last_page_size == 0)
@@ -942,7 +944,7 @@ out:
bmname(bitmap), err);
goto out_no_sb;
}
- bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
+ bitmap->cluster_slot = bitmap->mddev->cluster_ops->slot_number(bitmap->mddev);
goto re_read;
}
@@ -2021,7 +2023,7 @@ static void md_bitmap_free(void *data)
sysfs_put(bitmap->sysfs_can_clear);
if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
- bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
+ bitmap->cluster_slot == bitmap->mddev->cluster_ops->slot_number(bitmap->mddev))
md_cluster_stop(bitmap->mddev);
/* Shouldn't be needed - but just in case.... */
@@ -2229,7 +2231,7 @@ static int bitmap_load(struct mddev *mddev)
mddev_create_serial_pool(mddev, rdev);
if (mddev_is_clustered(mddev))
- md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
+ mddev->cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
/* Clear out old bitmap info first: Either there is none, or we
* are resuming after someone else has possibly changed things,
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 6595f89becdb..94221d964d4f 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1166,7 +1166,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
struct dlm_lock_resource *bm_lockres;
char str[64];
- if (i == md_cluster_ops->slot_number(mddev))
+ if (i == slot_number(mddev))
continue;
bitmap = mddev->bitmap_ops->get_from_slot(mddev, i);
@@ -1216,7 +1216,7 @@ out:
*/
static int cluster_check_sync_size(struct mddev *mddev)
{
- int current_slot = md_cluster_ops->slot_number(mddev);
+ int current_slot = slot_number(mddev);
int node_num = mddev->bitmap_info.nodes;
struct dlm_lock_resource *bm_lockres;
struct md_bitmap_stats stats;
@@ -1612,7 +1612,14 @@ out:
return err;
}
-static const struct md_cluster_operations cluster_ops = {
+static struct md_cluster_operations cluster_ops = {
+ .head = {
+ .type = MD_CLUSTER,
+ .id = ID_CLUSTER,
+ .name = "cluster",
+ .owner = THIS_MODULE,
+ },
+
.join = join,
.leave = leave,
.slot_number = slot_number,
@@ -1642,13 +1649,12 @@ static int __init cluster_init(void)
{
pr_warn("md-cluster: support raid1 and raid10 (limited support)\n");
pr_info("Registering Cluster MD functions\n");
- register_md_cluster_operations(&cluster_ops, THIS_MODULE);
- return 0;
+ return register_md_submodule(&cluster_ops.head);
}
static void cluster_exit(void)
{
- unregister_md_cluster_operations();
+ unregister_md_submodule(&cluster_ops.head);
}
module_init(cluster_init);
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index 470bf18ffde5..8fb06d853173 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -10,6 +10,8 @@ struct mddev;
struct md_rdev;
struct md_cluster_operations {
+ struct md_submodule_head head;
+
int (*join)(struct mddev *mddev, int nodes);
int (*leave)(struct mddev *mddev);
int (*slot_number)(struct mddev *mddev);
@@ -35,4 +37,8 @@ struct md_cluster_operations {
void (*update_size)(struct mddev *mddev, sector_t old_dev_sectors);
};
+extern int md_setup_cluster(struct mddev *mddev, int nodes);
+extern void md_cluster_stop(struct mddev *mddev);
+extern void md_reload_sb(struct mddev *mddev, int raid_disk);
+
#endif /* _MD_CLUSTER_H */
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 369aed044b40..5d9b08115375 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -5,7 +5,6 @@
*/
#include <linux/blkdev.h>
-#include <linux/raid/md_u.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -320,9 +319,13 @@ static void linear_quiesce(struct mddev *mddev, int state)
}
static struct md_personality linear_personality = {
- .name = "linear",
- .level = LEVEL_LINEAR,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_LINEAR,
+ .name = "linear",
+ .owner = THIS_MODULE,
+ },
+
.make_request = linear_make_request,
.run = linear_run,
.free = linear_free,
@@ -335,12 +338,12 @@ static struct md_personality linear_personality = {
static int __init linear_init(void)
{
- return register_md_personality(&linear_personality);
+ return register_md_submodule(&linear_personality.head);
}
static void linear_exit(void)
{
- unregister_md_personality(&linear_personality);
+ unregister_md_submodule(&linear_personality.head);
}
module_init(linear_init);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 30b3dbbce2d2..438e71e45c16 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -79,16 +79,10 @@ static const char *action_name[NR_SYNC_ACTIONS] = {
[ACTION_IDLE] = "idle",
};
-/* pers_list is a list of registered personalities protected by pers_lock. */
-static LIST_HEAD(pers_list);
-static DEFINE_SPINLOCK(pers_lock);
+static DEFINE_XARRAY(md_submodule);
static const struct kobj_type md_ktype;
-const struct md_cluster_operations *md_cluster_ops;
-EXPORT_SYMBOL(md_cluster_ops);
-static struct module *md_cluster_mod;
-
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
@@ -629,6 +623,12 @@ static void __mddev_put(struct mddev *mddev)
queue_work(md_misc_wq, &mddev->del_work);
}
+static void mddev_put_locked(struct mddev *mddev)
+{
+ if (atomic_dec_and_test(&mddev->active))
+ __mddev_put(mddev);
+}
+
void mddev_put(struct mddev *mddev)
{
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
@@ -888,16 +888,40 @@ struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
}
EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
-static struct md_personality *find_pers(int level, char *clevel)
+static struct md_personality *get_pers(int level, char *clevel)
{
- struct md_personality *pers;
- list_for_each_entry(pers, &pers_list, list) {
- if (level != LEVEL_NONE && pers->level == level)
- return pers;
- if (strcmp(pers->name, clevel)==0)
- return pers;
+ struct md_personality *ret = NULL;
+ struct md_submodule_head *head;
+ unsigned long i;
+
+ xa_lock(&md_submodule);
+ xa_for_each(&md_submodule, i, head) {
+ if (head->type != MD_PERSONALITY)
+ continue;
+ if ((level != LEVEL_NONE && head->id == level) ||
+ !strcmp(head->name, clevel)) {
+ if (try_module_get(head->owner))
+ ret = (void *)head;
+ break;
+ }
}
- return NULL;
+ xa_unlock(&md_submodule);
+
+ if (!ret) {
+ if (level != LEVEL_NONE)
+ pr_warn("md: personality for level %d is not loaded!\n",
+ level);
+ else
+ pr_warn("md: personality for level %s is not loaded!\n",
+ clevel);
+ }
+
+ return ret;
+}
+
+static void put_pers(struct md_personality *pers)
+{
+ module_put(pers->head.owner);
}
/* return the offset of the super block in 512byte sectors */
@@ -1180,7 +1204,7 @@ int md_check_no_bitmap(struct mddev *mddev)
if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
return 0;
pr_warn("%s: bitmaps are not supported for %s\n",
- mdname(mddev), mddev->pers->name);
+ mdname(mddev), mddev->pers->head.name);
return 1;
}
EXPORT_SYMBOL(md_check_no_bitmap);
@@ -1748,7 +1772,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
count <<= sb->bblog_shift;
if (bb + 1 == 0)
break;
- if (badblocks_set(&rdev->badblocks, sector, count, 1))
+ if (!badblocks_set(&rdev->badblocks, sector, count, 1))
return -EINVAL;
}
} else if (sb->bblog_offset != 0)
@@ -2359,19 +2383,6 @@ int md_integrity_register(struct mddev *mddev)
return 0; /* shouldn't register */
pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
- if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) ||
- (mddev->level != 1 && mddev->level != 10 &&
- bioset_integrity_create(&mddev->io_clone_set, BIO_POOL_SIZE))) {
- /*
- * No need to handle the failure of bioset_integrity_create,
- * because the function is called by md_run() -> pers->run(),
- * md_run calls bioset_exit -> bioset_integrity_free in case
- * of failure case.
- */
- pr_err("md: failed to create integrity pool for %s\n",
- mdname(mddev));
- return -EINVAL;
- }
return 0;
}
EXPORT_SYMBOL(md_integrity_register);
@@ -2639,11 +2650,11 @@ repeat:
force_change = 1;
if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
nospares = 1;
- ret = md_cluster_ops->metadata_update_start(mddev);
+ ret = mddev->cluster_ops->metadata_update_start(mddev);
/* Has someone else has updated the sb */
if (!does_sb_need_changing(mddev)) {
if (ret == 0)
- md_cluster_ops->metadata_update_cancel(mddev);
+ mddev->cluster_ops->metadata_update_cancel(mddev);
bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
BIT(MD_SB_CHANGE_DEVS) |
BIT(MD_SB_CHANGE_CLEAN));
@@ -2783,7 +2794,7 @@ rewrite:
/* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
if (mddev_is_clustered(mddev) && ret == 0)
- md_cluster_ops->metadata_update_finish(mddev);
+ mddev->cluster_ops->metadata_update_finish(mddev);
if (mddev->in_sync != sync_req ||
!bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
@@ -2942,7 +2953,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
else {
err = 0;
if (mddev_is_clustered(mddev))
- err = md_cluster_ops->remove_disk(mddev, rdev);
+ err = mddev->cluster_ops->remove_disk(mddev, rdev);
if (err == 0) {
md_kick_rdev_from_array(rdev);
@@ -3052,7 +3063,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* by this node eventually
*/
if (!mddev_is_clustered(rdev->mddev) ||
- (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
+ (err = mddev->cluster_ops->gather_bitmaps(rdev)) == 0) {
clear_bit(Faulty, &rdev->flags);
err = add_bound_rdev(rdev);
}
@@ -3860,7 +3871,7 @@ level_show(struct mddev *mddev, char *page)
spin_lock(&mddev->lock);
p = mddev->pers;
if (p)
- ret = sprintf(page, "%s\n", p->name);
+ ret = sprintf(page, "%s\n", p->head.name);
else if (mddev->clevel[0])
ret = sprintf(page, "%s\n", mddev->clevel);
else if (mddev->level != LEVEL_NONE)
@@ -3917,7 +3928,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
rv = -EINVAL;
if (!mddev->pers->quiesce) {
pr_warn("md: %s: %s does not support online personality change\n",
- mdname(mddev), mddev->pers->name);
+ mdname(mddev), mddev->pers->head.name);
goto out_unlock;
}
@@ -3931,24 +3942,20 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
if (request_module("md-%s", clevel) != 0)
request_module("md-level-%s", clevel);
- spin_lock(&pers_lock);
- pers = find_pers(level, clevel);
- if (!pers || !try_module_get(pers->owner)) {
- spin_unlock(&pers_lock);
- pr_warn("md: personality %s not loaded\n", clevel);
+ pers = get_pers(level, clevel);
+ if (!pers) {
rv = -EINVAL;
goto out_unlock;
}
- spin_unlock(&pers_lock);
if (pers == mddev->pers) {
/* Nothing to do! */
- module_put(pers->owner);
+ put_pers(pers);
rv = len;
goto out_unlock;
}
if (!pers->takeover) {
- module_put(pers->owner);
+ put_pers(pers);
pr_warn("md: %s: %s does not support personality takeover\n",
mdname(mddev), clevel);
rv = -EINVAL;
@@ -3969,7 +3976,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->raid_disks -= mddev->delta_disks;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
- module_put(pers->owner);
+ put_pers(pers);
pr_warn("md: %s: %s would not accept array\n",
mdname(mddev), clevel);
rv = PTR_ERR(priv);
@@ -3984,7 +3991,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
oldpriv = mddev->private;
mddev->pers = pers;
mddev->private = priv;
- strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
+ strscpy(mddev->clevel, pers->head.name, sizeof(mddev->clevel));
mddev->level = mddev->new_level;
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
@@ -4026,7 +4033,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->to_remove = &md_redundancy_group;
}
- module_put(oldpers->owner);
+ put_pers(oldpers);
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
@@ -5584,7 +5591,7 @@ __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
{
- if (mddev->pers == NULL || (mddev->pers->level != 1))
+ if (mddev->pers == NULL || (mddev->pers->head.id != ID_RAID1))
return sprintf(page, "n/a\n");
else
return sprintf(page, "%d\n", mddev->serialize_policy);
@@ -5610,7 +5617,7 @@ serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
err = mddev_suspend_and_lock(mddev);
if (err)
return err;
- if (mddev->pers == NULL || (mddev->pers->level != 1)) {
+ if (mddev->pers == NULL || (mddev->pers->head.id != ID_RAID1)) {
pr_err("md: serialize_policy is only effective for raid1\n");
err = -EINVAL;
goto unlock;
@@ -6096,30 +6103,21 @@ int md_run(struct mddev *mddev)
goto exit_sync_set;
}
- spin_lock(&pers_lock);
- pers = find_pers(mddev->level, mddev->clevel);
- if (!pers || !try_module_get(pers->owner)) {
- spin_unlock(&pers_lock);
- if (mddev->level != LEVEL_NONE)
- pr_warn("md: personality for level %d is not loaded!\n",
- mddev->level);
- else
- pr_warn("md: personality for level %s is not loaded!\n",
- mddev->clevel);
+ pers = get_pers(mddev->level, mddev->clevel);
+ if (!pers) {
err = -EINVAL;
goto abort;
}
- spin_unlock(&pers_lock);
- if (mddev->level != pers->level) {
- mddev->level = pers->level;
- mddev->new_level = pers->level;
+ if (mddev->level != pers->head.id) {
+ mddev->level = pers->head.id;
+ mddev->new_level = pers->head.id;
}
- strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
+ strscpy(mddev->clevel, pers->head.name, sizeof(mddev->clevel));
if (mddev->reshape_position != MaxSector &&
pers->start_reshape == NULL) {
/* This personality cannot handle reshaping... */
- module_put(pers->owner);
+ put_pers(pers);
err = -EINVAL;
goto abort;
}
@@ -6246,7 +6244,7 @@ bitmap_abort:
if (mddev->private)
pers->free(mddev, mddev->private);
mddev->private = NULL;
- module_put(pers->owner);
+ put_pers(pers);
mddev->bitmap_ops->destroy(mddev);
abort:
bioset_exit(&mddev->io_clone_set);
@@ -6467,7 +6465,7 @@ static void __md_stop(struct mddev *mddev)
mddev->private = NULL;
if (pers->sync_request && mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
- module_put(pers->owner);
+ put_pers(pers);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
bioset_exit(&mddev->bio_set);
@@ -6983,7 +6981,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
set_bit(Candidate, &rdev->flags);
else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
/* --add initiated by this node */
- err = md_cluster_ops->add_new_disk(mddev, rdev);
+ err = mddev->cluster_ops->add_new_disk(mddev, rdev);
if (err) {
export_rdev(rdev, mddev);
return err;
@@ -7000,14 +6998,14 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
if (mddev_is_clustered(mddev)) {
if (info->state & (1 << MD_DISK_CANDIDATE)) {
if (!err) {
- err = md_cluster_ops->new_disk_ack(mddev,
- err == 0);
+ err = mddev->cluster_ops->new_disk_ack(
+ mddev, err == 0);
if (err)
md_kick_rdev_from_array(rdev);
}
} else {
if (err)
- md_cluster_ops->add_new_disk_cancel(mddev);
+ mddev->cluster_ops->add_new_disk_cancel(mddev);
else
err = add_bound_rdev(rdev);
}
@@ -7087,10 +7085,9 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
goto busy;
kick_rdev:
- if (mddev_is_clustered(mddev)) {
- if (md_cluster_ops->remove_disk(mddev, rdev))
- goto busy;
- }
+ if (mddev_is_clustered(mddev) &&
+ mddev->cluster_ops->remove_disk(mddev, rdev))
+ goto busy;
md_kick_rdev_from_array(rdev);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
@@ -7393,7 +7390,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
rv = mddev->pers->resize(mddev, num_sectors);
if (!rv) {
if (mddev_is_clustered(mddev))
- md_cluster_ops->update_size(mddev, old_dev_sectors);
+ mddev->cluster_ops->update_size(mddev, old_dev_sectors);
else if (!mddev_is_dm(mddev))
set_capacity_and_notify(mddev->gendisk,
mddev->array_sectors);
@@ -7441,6 +7438,28 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
return rv;
}
+static int get_cluster_ops(struct mddev *mddev)
+{
+ xa_lock(&md_submodule);
+ mddev->cluster_ops = xa_load(&md_submodule, ID_CLUSTER);
+ if (mddev->cluster_ops &&
+ !try_module_get(mddev->cluster_ops->head.owner))
+ mddev->cluster_ops = NULL;
+ xa_unlock(&md_submodule);
+
+ return mddev->cluster_ops == NULL ? -ENOENT : 0;
+}
+
+static void put_cluster_ops(struct mddev *mddev)
+{
+ if (!mddev->cluster_ops)
+ return;
+
+ mddev->cluster_ops->leave(mddev);
+ module_put(mddev->cluster_ops->head.owner);
+ mddev->cluster_ops = NULL;
+}
+
/*
* update_array_info is used to change the configuration of an
* on-line array.
@@ -7549,16 +7568,15 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
if (mddev->bitmap_info.nodes) {
/* hold PW on all the bitmap lock */
- if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
+ if (mddev->cluster_ops->lock_all_bitmaps(mddev) <= 0) {
pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
rv = -EPERM;
- md_cluster_ops->unlock_all_bitmaps(mddev);
+ mddev->cluster_ops->unlock_all_bitmaps(mddev);
goto err;
}
mddev->bitmap_info.nodes = 0;
- md_cluster_ops->leave(mddev);
- module_put(md_cluster_mod);
+ put_cluster_ops(mddev);
mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
}
mddev->bitmap_ops->destroy(mddev);
@@ -7842,7 +7860,7 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
case CLUSTERED_DISK_NACK:
if (mddev_is_clustered(mddev))
- md_cluster_ops->new_disk_ack(mddev, false);
+ mddev->cluster_ops->new_disk_ack(mddev, false);
else
err = -EINVAL;
goto unlock;
@@ -8124,7 +8142,8 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
return;
mddev->pers->error_handler(mddev, rdev);
- if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR)
+ if (mddev->pers->head.id == ID_RAID0 ||
+ mddev->pers->head.id == ID_LINEAR)
return;
if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
@@ -8162,14 +8181,17 @@ static void status_unused(struct seq_file *seq)
static void status_personalities(struct seq_file *seq)
{
- struct md_personality *pers;
+ struct md_submodule_head *head;
+ unsigned long i;
seq_puts(seq, "Personalities : ");
- spin_lock(&pers_lock);
- list_for_each_entry(pers, &pers_list, list)
- seq_printf(seq, "[%s] ", pers->name);
- spin_unlock(&pers_lock);
+ xa_lock(&md_submodule);
+ xa_for_each(&md_submodule, i, head)
+ if (head->type == MD_PERSONALITY)
+ seq_printf(seq, "[%s] ", head->name);
+ xa_unlock(&md_submodule);
+
seq_puts(seq, "\n");
}
@@ -8392,7 +8414,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, " (read-only)");
if (mddev->ro == MD_AUTO_READ)
seq_printf(seq, " (auto-read-only)");
- seq_printf(seq, " %s", mddev->pers->name);
+ seq_printf(seq, " %s", mddev->pers->head.name);
} else {
seq_printf(seq, "inactive");
}
@@ -8461,9 +8483,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs))
status_unused(seq);
- if (atomic_dec_and_test(&mddev->active))
- __mddev_put(mddev);
-
+ mddev_put_locked(mddev);
return 0;
}
@@ -8514,67 +8534,34 @@ static const struct proc_ops mdstat_proc_ops = {
.proc_poll = mdstat_poll,
};
-int register_md_personality(struct md_personality *p)
-{
- pr_debug("md: %s personality registered for level %d\n",
- p->name, p->level);
- spin_lock(&pers_lock);
- list_add_tail(&p->list, &pers_list);
- spin_unlock(&pers_lock);
- return 0;
-}
-EXPORT_SYMBOL(register_md_personality);
-
-int unregister_md_personality(struct md_personality *p)
+int register_md_submodule(struct md_submodule_head *msh)
{
- pr_debug("md: %s personality unregistered\n", p->name);
- spin_lock(&pers_lock);
- list_del_init(&p->list);
- spin_unlock(&pers_lock);
- return 0;
+ return xa_insert(&md_submodule, msh->id, msh, GFP_KERNEL);
}
-EXPORT_SYMBOL(unregister_md_personality);
+EXPORT_SYMBOL_GPL(register_md_submodule);
-int register_md_cluster_operations(const struct md_cluster_operations *ops,
- struct module *module)
+void unregister_md_submodule(struct md_submodule_head *msh)
{
- int ret = 0;
- spin_lock(&pers_lock);
- if (md_cluster_ops != NULL)
- ret = -EALREADY;
- else {
- md_cluster_ops = ops;
- md_cluster_mod = module;
- }
- spin_unlock(&pers_lock);
- return ret;
+ xa_erase(&md_submodule, msh->id);
}
-EXPORT_SYMBOL(register_md_cluster_operations);
-
-int unregister_md_cluster_operations(void)
-{
- spin_lock(&pers_lock);
- md_cluster_ops = NULL;
- spin_unlock(&pers_lock);
- return 0;
-}
-EXPORT_SYMBOL(unregister_md_cluster_operations);
+EXPORT_SYMBOL_GPL(unregister_md_submodule);
int md_setup_cluster(struct mddev *mddev, int nodes)
{
- int ret;
- if (!md_cluster_ops)
+ int ret = get_cluster_ops(mddev);
+
+ if (ret) {
request_module("md-cluster");
- spin_lock(&pers_lock);
+ ret = get_cluster_ops(mddev);
+ }
+
/* ensure module won't be unloaded */
- if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
+ if (ret) {
pr_warn("can't find md-cluster module or get its reference.\n");
- spin_unlock(&pers_lock);
- return -ENOENT;
+ return ret;
}
- spin_unlock(&pers_lock);
- ret = md_cluster_ops->join(mddev, nodes);
+ ret = mddev->cluster_ops->join(mddev, nodes);
if (!ret)
mddev->safemode_delay = 0;
return ret;
@@ -8582,10 +8569,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
void md_cluster_stop(struct mddev *mddev)
{
- if (!md_cluster_ops)
- return;
- md_cluster_ops->leave(mddev);
- module_put(md_cluster_mod);
+ put_cluster_ops(mddev);
}
static int is_mddev_idle(struct mddev *mddev, int init)
@@ -8978,7 +8962,7 @@ void md_do_sync(struct md_thread *thread)
}
if (mddev_is_clustered(mddev)) {
- ret = md_cluster_ops->resync_start(mddev);
+ ret = mddev->cluster_ops->resync_start(mddev);
if (ret)
goto skip;
@@ -9005,7 +8989,7 @@ void md_do_sync(struct md_thread *thread)
*
*/
if (mddev_is_clustered(mddev))
- md_cluster_ops->resync_start_notify(mddev);
+ mddev->cluster_ops->resync_start_notify(mddev);
do {
int mddev2_minor = -1;
mddev->curr_resync = MD_RESYNC_DELAYED;
@@ -9460,6 +9444,13 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
return true;
}
+ /* Check if resync is in progress. */
+ if (mddev->recovery_cp < MaxSector) {
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ return true;
+ }
+
/*
* Remove any failed drives, then add spares if possible. Spares are
* also removed and re-added, to allow the personality to fail the
@@ -9476,13 +9467,6 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
return true;
}
- /* Check if recovery is in progress. */
- if (mddev->recovery_cp < MaxSector) {
- set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
- clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- return true;
- }
-
/* Delay to choose resync/check/repair in md_do_sync(). */
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
return true;
@@ -9789,7 +9773,7 @@ void md_reap_sync_thread(struct mddev *mddev)
* call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
* clustered raid */
if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
- md_cluster_ops->resync_finish(mddev);
+ mddev->cluster_ops->resync_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -9797,13 +9781,13 @@ void md_reap_sync_thread(struct mddev *mddev)
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
/*
- * We call md_cluster_ops->update_size here because sync_size could
+ * We call mddev->cluster_ops->update_size here because sync_size could
* be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
* so it is time to update size across cluster.
*/
if (mddev_is_clustered(mddev) && is_reshaped
&& !test_bit(MD_CLOSING, &mddev->flags))
- md_cluster_ops->update_size(mddev, old_dev_sectors);
+ mddev->cluster_ops->update_size(mddev, old_dev_sectors);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_completed);
@@ -9841,12 +9825,11 @@ EXPORT_SYMBOL(md_finish_reshape);
/* Bad block management */
-/* Returns 1 on success, 0 on failure */
-int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
- int is_new)
+/* Returns true on success, false on failure */
+bool rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
+ int is_new)
{
struct mddev *mddev = rdev->mddev;
- int rv;
/*
* Recording new badblocks for faulty rdev will force unnecessary
@@ -9856,50 +9839,51 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
* avoid it.
*/
if (test_bit(Faulty, &rdev->flags))
- return 1;
+ return true;
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
- rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
- if (rv == 0) {
- /* Make sure they get written out promptly */
- if (test_bit(ExternalBbl, &rdev->flags))
- sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
- sysfs_notify_dirent_safe(rdev->sysfs_state);
- set_mask_bits(&mddev->sb_flags, 0,
- BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
- md_wakeup_thread(rdev->mddev->thread);
- return 1;
- } else
- return 0;
+
+ if (!badblocks_set(&rdev->badblocks, s, sectors, 0))
+ return false;
+
+ /* Make sure they get written out promptly */
+ if (test_bit(ExternalBbl, &rdev->flags))
+ sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
+ md_wakeup_thread(rdev->mddev->thread);
+ return true;
}
EXPORT_SYMBOL_GPL(rdev_set_badblocks);
-int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
- int is_new)
+void rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
+ int is_new)
{
- int rv;
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
- rv = badblocks_clear(&rdev->badblocks, s, sectors);
- if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
+
+ if (!badblocks_clear(&rdev->badblocks, s, sectors))
+ return;
+
+ if (test_bit(ExternalBbl, &rdev->flags))
sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
- return rv;
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
static int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
- struct mddev *mddev, *n;
+ struct mddev *mddev;
int need_delay = 0;
spin_lock(&all_mddevs_lock);
- list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
+ list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
if (!mddev_get(mddev))
continue;
spin_unlock(&all_mddevs_lock);
@@ -9911,8 +9895,8 @@ static int md_notify_reboot(struct notifier_block *this,
mddev_unlock(mddev);
}
need_delay = 1;
- mddev_put(mddev);
spin_lock(&all_mddevs_lock);
+ mddev_put_locked(mddev);
}
spin_unlock(&all_mddevs_lock);
@@ -10029,7 +10013,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE &&
!(le32_to_cpu(sb->feature_map) &
MD_FEATURE_RESHAPE_ACTIVE) &&
- !md_cluster_ops->resync_status_get(mddev)) {
+ !mddev->cluster_ops->resync_status_get(mddev)) {
/*
* -1 to make raid1_add_disk() set conf->fullsync
* to 1. This could avoid skipping sync when the
@@ -10245,7 +10229,7 @@ void md_autostart_arrays(int part)
static __exit void md_exit(void)
{
- struct mddev *mddev, *n;
+ struct mddev *mddev;
int delay = 1;
unregister_blkdev(MD_MAJOR,"md");
@@ -10266,7 +10250,7 @@ static __exit void md_exit(void)
remove_proc_entry("mdstat", NULL);
spin_lock(&all_mddevs_lock);
- list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
+ list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
if (!mddev_get(mddev))
continue;
spin_unlock(&all_mddevs_lock);
@@ -10278,8 +10262,8 @@ static __exit void md_exit(void)
* the mddev for destruction by a workqueue, and the
* destroy_workqueue() below will wait for that to complete.
*/
- mddev_put(mddev);
spin_lock(&all_mddevs_lock);
+ mddev_put_locked(mddev);
}
spin_unlock(&all_mddevs_lock);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index def808064ad8..1cf00a04bcdd 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -18,11 +18,37 @@
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/raid/md_u.h>
#include <trace/events/block.h>
-#include "md-cluster.h"
#define MaxSector (~(sector_t)0)
+enum md_submodule_type {
+ MD_PERSONALITY = 0,
+ MD_CLUSTER,
+ MD_BITMAP, /* TODO */
+};
+
+enum md_submodule_id {
+ ID_LINEAR = LEVEL_LINEAR,
+ ID_RAID0 = 0,
+ ID_RAID1 = 1,
+ ID_RAID4 = 4,
+ ID_RAID5 = 5,
+ ID_RAID6 = 6,
+ ID_RAID10 = 10,
+ ID_CLUSTER,
+ ID_BITMAP, /* TODO */
+ ID_LLBITMAP, /* TODO */
+};
+
+struct md_submodule_head {
+ enum md_submodule_type type;
+ enum md_submodule_id id;
+ const char *name;
+ struct module *owner;
+};
+
/*
* These flags should really be called "NO_RETRY" rather than
* "FAILFAST" because they don't make any promise about time lapse,
@@ -266,8 +292,8 @@ enum flag_bits {
Nonrot, /* non-rotational device (SSD) */
};
-static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
- sector_t *first_bad, int *bad_sectors)
+static inline int is_badblock(struct md_rdev *rdev, sector_t s, sector_t sectors,
+ sector_t *first_bad, sector_t *bad_sectors)
{
if (unlikely(rdev->badblocks.count)) {
int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s,
@@ -284,16 +310,17 @@ static inline int rdev_has_badblock(struct md_rdev *rdev, sector_t s,
int sectors)
{
sector_t first_bad;
- int bad_sectors;
+ sector_t bad_sectors;
return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors);
}
-extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
- int is_new);
-extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
- int is_new);
+extern bool rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
+ int is_new);
+extern void rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
+ int is_new);
struct md_cluster_info;
+struct md_cluster_operations;
/**
* enum mddev_flags - md device flags.
@@ -576,6 +603,7 @@ struct mddev {
mempool_t *serial_info_pool;
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
+ struct md_cluster_operations *cluster_ops;
unsigned int good_device_nr; /* good device num within cluster raid */
unsigned int noio_flag; /* for memalloc scope API */
@@ -699,10 +727,8 @@ static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
struct md_personality
{
- char *name;
- int level;
- struct list_head list;
- struct module *owner;
+ struct md_submodule_head head;
+
bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
/*
* start up works that do NOT require md_thread. tasks that
@@ -843,13 +869,9 @@ static inline void safe_put_page(struct page *p)
if (p) put_page(p);
}
-extern int register_md_personality(struct md_personality *p);
-extern int unregister_md_personality(struct md_personality *p);
-extern int register_md_cluster_operations(const struct md_cluster_operations *ops,
- struct module *module);
-extern int unregister_md_cluster_operations(void);
-extern int md_setup_cluster(struct mddev *mddev, int nodes);
-extern void md_cluster_stop(struct mddev *mddev);
+int register_md_submodule(struct md_submodule_head *msh);
+void unregister_md_submodule(struct md_submodule_head *msh);
+
extern struct md_thread *md_register_thread(
void (*run)(struct md_thread *thread),
struct mddev *mddev,
@@ -906,7 +928,6 @@ extern void md_idle_sync_thread(struct mddev *mddev);
extern void md_frozen_sync_thread(struct mddev *mddev);
extern void md_unfrozen_sync_thread(struct mddev *mddev);
-extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev);
extern void mddev_destroy_serial_pool(struct mddev *mddev,
@@ -928,7 +949,6 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
}
}
-extern const struct md_cluster_operations *md_cluster_ops;
static inline int mddev_is_clustered(struct mddev *mddev)
{
return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 70bcc3cdf2cd..d8f639f4ae12 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -809,9 +809,13 @@ static void raid0_quiesce(struct mddev *mddev, int quiesce)
static struct md_personality raid0_personality=
{
- .name = "raid0",
- .level = 0,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_RAID0,
+ .name = "raid0",
+ .owner = THIS_MODULE,
+ },
+
.make_request = raid0_make_request,
.run = raid0_run,
.free = raid0_free,
@@ -822,14 +826,14 @@ static struct md_personality raid0_personality=
.error_handler = raid0_error,
};
-static int __init raid0_init (void)
+static int __init raid0_init(void)
{
- return register_md_personality (&raid0_personality);
+ return register_md_submodule(&raid0_personality.head);
}
-static void raid0_exit (void)
+static void __exit raid0_exit(void)
{
- unregister_md_personality (&raid0_personality);
+ unregister_md_submodule(&raid0_personality.head);
}
module_init(raid0_init);
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 4378d3250bd7..c7efd8aab675 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -247,7 +247,7 @@ static inline int raid1_check_read_range(struct md_rdev *rdev,
sector_t this_sector, int *len)
{
sector_t first_bad;
- int bad_sectors;
+ sector_t bad_sectors;
/* no bad block overlap */
if (!is_badblock(rdev, this_sector, *len, &first_bad, &bad_sectors))
@@ -287,8 +287,8 @@ static inline bool raid1_should_read_first(struct mddev *mddev,
return true;
if (mddev_is_clustered(mddev) &&
- md_cluster_ops->area_resyncing(mddev, READ, this_sector,
- this_sector + len))
+ mddev->cluster_ops->area_resyncing(mddev, READ, this_sector,
+ this_sector + len))
return true;
return false;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 10ea3af40991..0efc03cea24e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -36,6 +36,7 @@
#include "md.h"
#include "raid1.h"
#include "md-bitmap.h"
+#include "md-cluster.h"
#define UNSUPPORTED_MDDEV_FLAGS \
((1L << MD_HAS_JOURNAL) | \
@@ -45,6 +46,7 @@
static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
+static void raid1_free(struct mddev *mddev, void *priv);
#define RAID_1_10_NAME "raid1"
#include "raid1-10.c"
@@ -1315,8 +1317,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
struct r1conf *conf = mddev->private;
struct raid1_info *mirror;
struct bio *read_bio;
- const enum req_op op = bio_op(bio);
- const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
int max_sectors;
int rdisk, error;
bool r1bio_existed = !!r1_bio;
@@ -1404,7 +1404,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_iter.bi_sector = r1_bio->sector +
mirror->rdev->data_offset;
read_bio->bi_end_io = raid1_end_read_request;
- read_bio->bi_opf = op | do_sync;
if (test_bit(FailFast, &mirror->rdev->flags) &&
test_bit(R1BIO_FailFast, &r1_bio->state))
read_bio->bi_opf |= MD_FAILFAST;
@@ -1467,7 +1466,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
bool is_discard = (bio_op(bio) == REQ_OP_DISCARD);
if (mddev_is_clustered(mddev) &&
- md_cluster_ops->area_resyncing(mddev, WRITE,
+ mddev->cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio))) {
DEFINE_WAIT(w);
@@ -1478,7 +1477,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (;;) {
prepare_to_wait(&conf->wait_barrier,
&w, TASK_IDLE);
- if (!md_cluster_ops->area_resyncing(mddev, WRITE,
+ if (!mddev->cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector,
bio_end_sector(bio)))
break;
@@ -1537,7 +1536,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
atomic_inc(&rdev->nr_pending);
if (test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
- int bad_sectors;
+ sector_t bad_sectors;
int is_bad;
is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
@@ -1653,8 +1652,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_opf = bio_op(bio) |
- (bio->bi_opf & (REQ_SYNC | REQ_FUA | REQ_ATOMIC));
if (test_bit(FailFast, &rdev->flags) &&
!test_bit(WriteMostly, &rdev->flags) &&
conf->raid_disks - mddev->degraded > 1)
@@ -2486,7 +2483,7 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
}
}
-static int narrow_write_error(struct r1bio *r1_bio, int i)
+static bool narrow_write_error(struct r1bio *r1_bio, int i)
{
struct mddev *mddev = r1_bio->mddev;
struct r1conf *conf = mddev->private;
@@ -2507,10 +2504,10 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
sector_t sector;
int sectors;
int sect_to_write = r1_bio->sectors;
- int ok = 1;
+ bool ok = true;
if (rdev->badblocks.shift < 0)
- return 0;
+ return false;
block_sectors = roundup(1 << rdev->badblocks.shift,
bdev_logical_block_size(rdev->bdev) >> 9);
@@ -2886,7 +2883,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
} else {
/* may need to read from here */
sector_t first_bad = MaxSector;
- int bad_sectors;
+ sector_t bad_sectors;
if (is_badblock(rdev, sector_nr, good_sectors,
&first_bad, &bad_sectors)) {
@@ -3038,9 +3035,9 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
conf->cluster_sync_low = mddev->curr_resync_completed;
conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
/* Send resync message */
- md_cluster_ops->resync_info_update(mddev,
- conf->cluster_sync_low,
- conf->cluster_sync_high);
+ mddev->cluster_ops->resync_info_update(mddev,
+ conf->cluster_sync_low,
+ conf->cluster_sync_high);
}
/* For a user-requested sync, we read all readable devices and do a
@@ -3256,8 +3253,11 @@ static int raid1_run(struct mddev *mddev)
if (!mddev_is_dm(mddev)) {
ret = raid1_set_limits(mddev);
- if (ret)
+ if (ret) {
+ if (!mddev->private)
+ raid1_free(mddev, conf);
return ret;
+ }
}
mddev->degraded = 0;
@@ -3271,6 +3271,8 @@ static int raid1_run(struct mddev *mddev)
*/
if (conf->raid_disks - mddev->degraded < 1) {
md_unregister_thread(mddev, &conf->thread);
+ if (!mddev->private)
+ raid1_free(mddev, conf);
return -EINVAL;
}
@@ -3491,9 +3493,13 @@ static void *raid1_takeover(struct mddev *mddev)
static struct md_personality raid1_personality =
{
- .name = "raid1",
- .level = 1,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_RAID1,
+ .name = "raid1",
+ .owner = THIS_MODULE,
+ },
+
.make_request = raid1_make_request,
.run = raid1_run,
.free = raid1_free,
@@ -3510,18 +3516,18 @@ static struct md_personality raid1_personality =
.takeover = raid1_takeover,
};
-static int __init raid_init(void)
+static int __init raid1_init(void)
{
- return register_md_personality(&raid1_personality);
+ return register_md_submodule(&raid1_personality.head);
}
-static void raid_exit(void)
+static void __exit raid1_exit(void)
{
- unregister_md_personality(&raid1_personality);
+ unregister_md_submodule(&raid1_personality.head);
}
-module_init(raid_init);
-module_exit(raid_exit);
+module_init(raid1_init);
+module_exit(raid1_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
MODULE_ALIAS("md-personality-3"); /* RAID1 */
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 15b9ae5bf84d..846c5f29486e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -24,6 +24,7 @@
#include "raid10.h"
#include "raid0.h"
#include "md-bitmap.h"
+#include "md-cluster.h"
/*
* RAID10 provides a combination of RAID0 and RAID1 functionality.
@@ -747,7 +748,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
for (slot = 0; slot < conf->copies ; slot++) {
sector_t first_bad;
- int bad_sectors;
+ sector_t bad_sectors;
sector_t dev_sector;
unsigned int pending;
bool nonrot;
@@ -1146,8 +1147,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
{
struct r10conf *conf = mddev->private;
struct bio *read_bio;
- const enum req_op op = bio_op(bio);
- const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
int max_sectors;
struct md_rdev *rdev;
char b[BDEVNAME_SIZE];
@@ -1228,7 +1227,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
choose_data_offset(r10_bio, rdev);
read_bio->bi_end_io = raid10_end_read_request;
- read_bio->bi_opf = op | do_sync;
if (test_bit(FailFast, &rdev->flags) &&
test_bit(R10BIO_FailFast, &r10_bio->state))
read_bio->bi_opf |= MD_FAILFAST;
@@ -1247,10 +1245,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
struct bio *bio, bool replacement,
int n_copy)
{
- const enum req_op op = bio_op(bio);
- const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
- const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
- const blk_opf_t do_atomic = bio->bi_opf & REQ_ATOMIC;
unsigned long flags;
struct r10conf *conf = mddev->private;
struct md_rdev *rdev;
@@ -1269,7 +1263,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
choose_data_offset(r10_bio, rdev));
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_opf = op | do_sync | do_fua | do_atomic;
if (!replacement && test_bit(FailFast,
&conf->mirrors[devnum].rdev->flags)
&& enough(conf, devnum))
@@ -1355,9 +1348,9 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
int error;
if ((mddev_is_clustered(mddev) &&
- md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector,
- bio_end_sector(bio)))) {
+ mddev->cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector,
+ bio_end_sector(bio)))) {
DEFINE_WAIT(w);
/* Bail out if REQ_NOWAIT is set for the bio */
if (bio->bi_opf & REQ_NOWAIT) {
@@ -1367,7 +1360,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
for (;;) {
prepare_to_wait(&conf->wait_barrier,
&w, TASK_IDLE);
- if (!md_cluster_ops->area_resyncing(mddev, WRITE,
+ if (!mddev->cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio)))
break;
schedule();
@@ -1438,7 +1431,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
sector_t dev_sector = r10_bio->devs[i].addr;
- int bad_sectors;
+ sector_t bad_sectors;
int is_bad;
is_bad = is_badblock(rdev, dev_sector, max_sectors,
@@ -1631,11 +1624,10 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
return -EAGAIN;
- if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
+ if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
bio_wouldblock_error(bio);
return 0;
}
- wait_barrier(conf, false);
/*
* Check reshape again to avoid reshape happens after checking
@@ -2786,7 +2778,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
}
}
-static int narrow_write_error(struct r10bio *r10_bio, int i)
+static bool narrow_write_error(struct r10bio *r10_bio, int i)
{
struct bio *bio = r10_bio->master_bio;
struct mddev *mddev = r10_bio->mddev;
@@ -2807,10 +2799,10 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
sector_t sector;
int sectors;
int sect_to_write = r10_bio->sectors;
- int ok = 1;
+ bool ok = true;
if (rdev->badblocks.shift < 0)
- return 0;
+ return false;
block_sectors = roundup(1 << rdev->badblocks.shift,
bdev_logical_block_size(rdev->bdev) >> 9);
@@ -3413,7 +3405,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t from_addr, to_addr;
struct md_rdev *rdev = conf->mirrors[d].rdev;
sector_t sector, first_bad;
- int bad_sectors;
+ sector_t bad_sectors;
if (!rdev ||
!test_bit(In_sync, &rdev->flags))
continue;
@@ -3609,7 +3601,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
sector_t first_bad, sector;
- int bad_sectors;
+ sector_t bad_sectors;
struct md_rdev *rdev;
if (r10_bio->devs[i].repl_bio)
@@ -3716,7 +3708,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
conf->cluster_sync_low = mddev->curr_resync_completed;
raid10_set_cluster_sync_high(conf);
/* Send resync message */
- md_cluster_ops->resync_info_update(mddev,
+ mddev->cluster_ops->resync_info_update(mddev,
conf->cluster_sync_low,
conf->cluster_sync_high);
}
@@ -3749,7 +3741,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
if (broadcast_msg) {
raid10_set_cluster_sync_high(conf);
- md_cluster_ops->resync_info_update(mddev,
+ mddev->cluster_ops->resync_info_update(mddev,
conf->cluster_sync_low,
conf->cluster_sync_high);
}
@@ -4541,7 +4533,7 @@ static int raid10_start_reshape(struct mddev *mddev)
if (ret)
goto abort;
- ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
+ ret = mddev->cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
if (ret) {
mddev->bitmap_ops->resize(mddev, oldsize, 0, false);
goto abort;
@@ -4832,7 +4824,7 @@ read_more:
conf->cluster_sync_low = sb_reshape_pos;
}
- md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
+ mddev->cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
conf->cluster_sync_high);
}
@@ -4977,7 +4969,7 @@ static void raid10_update_reshape_pos(struct mddev *mddev)
struct r10conf *conf = mddev->private;
sector_t lo, hi;
- md_cluster_ops->resync_info_get(mddev, &lo, &hi);
+ mddev->cluster_ops->resync_info_get(mddev, &lo, &hi);
if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
|| mddev->reshape_position == MaxSector)
conf->reshape_progress = mddev->reshape_position;
@@ -5123,9 +5115,13 @@ static void raid10_finish_reshape(struct mddev *mddev)
static struct md_personality raid10_personality =
{
- .name = "raid10",
- .level = 10,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_RAID10,
+ .name = "raid10",
+ .owner = THIS_MODULE,
+ },
+
.make_request = raid10_make_request,
.run = raid10_run,
.free = raid10_free,
@@ -5145,18 +5141,18 @@ static struct md_personality raid10_personality =
.update_reshape_pos = raid10_update_reshape_pos,
};
-static int __init raid_init(void)
+static int __init raid10_init(void)
{
- return register_md_personality(&raid10_personality);
+ return register_md_submodule(&raid10_personality.head);
}
-static void raid_exit(void)
+static void __exit raid10_exit(void)
{
- unregister_md_personality(&raid10_personality);
+ unregister_md_submodule(&raid10_personality.head);
}
-module_init(raid_init);
-module_exit(raid_exit);
+module_init(raid10_init);
+module_exit(raid10_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
MODULE_ALIAS("md-personality-9"); /* RAID10 */
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index e530271cb86b..ba768ca7f422 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -714,7 +714,7 @@ static void r5l_submit_current_io(struct r5l_log *log)
block = page_address(io->meta_page);
block->meta_size = cpu_to_le32(io->meta_offset);
- crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
+ crc = crc32c(log->uuid_checksum, block, PAGE_SIZE);
block->checksum = cpu_to_le32(crc);
log->current_io = NULL;
@@ -1020,8 +1020,8 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
continue;
addr = kmap_local_page(sh->dev[i].page);
- sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
- addr, PAGE_SIZE);
+ sh->dev[i].log_checksum = crc32c(log->uuid_checksum,
+ addr, PAGE_SIZE);
kunmap_local(addr);
}
parity_pages = 1 + !!(sh->qd_idx >= 0);
@@ -1741,7 +1741,7 @@ static int r5l_recovery_read_meta_block(struct r5l_log *log,
le64_to_cpu(mb->position) != ctx->pos)
return -EINVAL;
- crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
+ crc = crc32c(log->uuid_checksum, mb, PAGE_SIZE);
if (stored_crc != crc)
return -EINVAL;
@@ -1780,8 +1780,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
return -ENOMEM;
r5l_recovery_create_empty_meta_block(log, page, pos, seq);
mb = page_address(page);
- mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
- mb, PAGE_SIZE));
+ mb->checksum = cpu_to_le32(crc32c(log->uuid_checksum, mb, PAGE_SIZE));
if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
REQ_SYNC | REQ_FUA, false)) {
__free_page(page);
@@ -1976,7 +1975,7 @@ r5l_recovery_verify_data_checksum(struct r5l_log *log,
r5l_recovery_read_page(log, ctx, page, log_offset);
addr = kmap_local_page(page);
- checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
+ checksum = crc32c(log->uuid_checksum, addr, PAGE_SIZE);
kunmap_local(addr);
return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
}
@@ -2379,8 +2378,8 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
raid5_compute_blocknr(sh, i, 0));
addr = kmap_local_page(dev->page);
payload->checksum[0] = cpu_to_le32(
- crc32c_le(log->uuid_checksum, addr,
- PAGE_SIZE));
+ crc32c(log->uuid_checksum, addr,
+ PAGE_SIZE));
kunmap_local(addr);
sync_page_io(log->rdev, write_pos, PAGE_SIZE,
dev->page, REQ_OP_WRITE, false);
@@ -2392,8 +2391,8 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
}
}
mb->meta_size = cpu_to_le32(offset);
- mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
- mb, PAGE_SIZE));
+ mb->checksum = cpu_to_le32(crc32c(log->uuid_checksum,
+ mb, PAGE_SIZE));
sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false);
sh->log_start = ctx->pos;
@@ -2885,8 +2884,8 @@ int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
continue;
addr = kmap_local_page(sh->dev[i].page);
- sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
- addr, PAGE_SIZE);
+ sh->dev[i].log_checksum = crc32c(log->uuid_checksum,
+ addr, PAGE_SIZE);
kunmap_local(addr);
pages++;
}
@@ -2969,7 +2968,7 @@ static int r5l_load_log(struct r5l_log *log)
}
stored_crc = le32_to_cpu(mb->checksum);
mb->checksum = 0;
- expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
+ expected_crc = crc32c(log->uuid_checksum, mb, PAGE_SIZE);
if (stored_crc != expected_crc) {
create_super = true;
goto create;
@@ -3077,8 +3076,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
return -ENOMEM;
log->rdev = rdev;
log->need_cache_flush = bdev_write_cache(rdev->bdev);
- log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
- sizeof(rdev->mddev->uuid));
+ log->uuid_checksum = crc32c(~0, rdev->mddev->uuid,
+ sizeof(rdev->mddev->uuid));
mutex_init(&log->io_mutex);
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 37c4da5311ca..c0fb335311aa 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -346,9 +346,9 @@ static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) {
le32_add_cpu(&e->pp_size, PAGE_SIZE);
io->pp_size += PAGE_SIZE;
- e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum),
- page_address(sh->ppl_page),
- PAGE_SIZE));
+ e->checksum = cpu_to_le32(crc32c(le32_to_cpu(e->checksum),
+ page_address(sh->ppl_page),
+ PAGE_SIZE));
}
list_add_tail(&sh->log_list, &io->stripe_list);
@@ -454,7 +454,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
}
pplhdr->entries_count = cpu_to_le32(io->entries_count);
- pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE));
+ pplhdr->checksum = cpu_to_le32(~crc32c(~0, pplhdr, PPL_HEADER_SIZE));
/* Rewind the buffer if current PPL is larger then remaining space */
if (log->use_multippl &&
@@ -998,7 +998,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
goto out;
}
- crc = crc32c_le(crc, page_address(page), s);
+ crc = crc32c(crc, page_address(page), s);
pp_size -= s;
sector += s >> 9;
@@ -1052,7 +1052,7 @@ static int ppl_write_empty_header(struct ppl_log *log)
log->rdev->ppl.size, GFP_NOIO, 0);
memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
- pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
+ pplhdr->checksum = cpu_to_le32(~crc32c(~0, pplhdr, PAGE_SIZE));
if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
@@ -1106,7 +1106,7 @@ static int ppl_load_distributed(struct ppl_log *log)
/* check header validity */
crc_stored = le32_to_cpu(pplhdr->checksum);
pplhdr->checksum = 0;
- crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
+ crc = ~crc32c(~0, pplhdr, PAGE_SIZE);
if (crc_stored != crc) {
pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
@@ -1390,7 +1390,7 @@ int ppl_init_log(struct r5conf *conf)
spin_lock_init(&ppl_conf->no_mem_stripes_lock);
if (!mddev->external) {
- ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
+ ppl_conf->signature = ~crc32c(~0, mddev->uuid, sizeof(mddev->uuid));
ppl_conf->block_size = 512;
} else {
ppl_conf->block_size =
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5c79429acc64..6389383166c0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5858,6 +5858,9 @@ static enum reshape_loc get_reshape_loc(struct mddev *mddev,
struct r5conf *conf, sector_t logical_sector)
{
sector_t reshape_progress, reshape_safe;
+
+ if (likely(conf->reshape_progress == MaxSector))
+ return LOC_NO_RESHAPE;
/*
* Spinlock is needed as reshape_progress may be
* 64bit on a 32bit platform, and so it might be
@@ -5935,22 +5938,19 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
const int rw = bio_data_dir(bi);
enum stripe_result ret;
struct stripe_head *sh;
+ enum reshape_loc loc;
sector_t new_sector;
int previous = 0, flags = 0;
int seq, dd_idx;
seq = read_seqcount_begin(&conf->gen_lock);
-
- if (unlikely(conf->reshape_progress != MaxSector)) {
- enum reshape_loc loc = get_reshape_loc(mddev, conf,
- logical_sector);
- if (loc == LOC_INSIDE_RESHAPE) {
- ret = STRIPE_SCHEDULE_AND_RETRY;
- goto out;
- }
- if (loc == LOC_AHEAD_OF_RESHAPE)
- previous = 1;
+ loc = get_reshape_loc(mddev, conf, logical_sector);
+ if (loc == LOC_INSIDE_RESHAPE) {
+ ret = STRIPE_SCHEDULE_AND_RETRY;
+ goto out;
}
+ if (loc == LOC_AHEAD_OF_RESHAPE)
+ previous = 1;
new_sector = raid5_compute_sector(conf, logical_sector, previous,
&dd_idx, NULL);
@@ -6127,7 +6127,6 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
/* Bail out if conflicts with reshape and REQ_NOWAIT is set */
if ((bi->bi_opf & REQ_NOWAIT) &&
- (conf->reshape_progress != MaxSector) &&
get_reshape_loc(mddev, conf, logical_sector) == LOC_INSIDE_RESHAPE) {
bio_wouldblock_error(bi);
if (rw == WRITE)
@@ -8954,9 +8953,13 @@ static void raid5_prepare_suspend(struct mddev *mddev)
static struct md_personality raid6_personality =
{
- .name = "raid6",
- .level = 6,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_RAID6,
+ .name = "raid6",
+ .owner = THIS_MODULE,
+ },
+
.make_request = raid5_make_request,
.run = raid5_run,
.start = raid5_start,
@@ -8980,9 +8983,13 @@ static struct md_personality raid6_personality =
};
static struct md_personality raid5_personality =
{
- .name = "raid5",
- .level = 5,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_RAID5,
+ .name = "raid5",
+ .owner = THIS_MODULE,
+ },
+
.make_request = raid5_make_request,
.run = raid5_run,
.start = raid5_start,
@@ -9007,9 +9014,13 @@ static struct md_personality raid5_personality =
static struct md_personality raid4_personality =
{
- .name = "raid4",
- .level = 4,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_RAID4,
+ .name = "raid4",
+ .owner = THIS_MODULE,
+ },
+
.make_request = raid5_make_request,
.run = raid5_run,
.start = raid5_start,
@@ -9045,21 +9056,39 @@ static int __init raid5_init(void)
"md/raid5:prepare",
raid456_cpu_up_prepare,
raid456_cpu_dead);
- if (ret) {
- destroy_workqueue(raid5_wq);
- return ret;
- }
- register_md_personality(&raid6_personality);
- register_md_personality(&raid5_personality);
- register_md_personality(&raid4_personality);
+ if (ret)
+ goto err_destroy_wq;
+
+ ret = register_md_submodule(&raid6_personality.head);
+ if (ret)
+ goto err_cpuhp_remove;
+
+ ret = register_md_submodule(&raid5_personality.head);
+ if (ret)
+ goto err_unregister_raid6;
+
+ ret = register_md_submodule(&raid4_personality.head);
+ if (ret)
+ goto err_unregister_raid5;
+
return 0;
+
+err_unregister_raid5:
+ unregister_md_submodule(&raid5_personality.head);
+err_unregister_raid6:
+ unregister_md_submodule(&raid6_personality.head);
+err_cpuhp_remove:
+ cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE);
+err_destroy_wq:
+ destroy_workqueue(raid5_wq);
+ return ret;
}
-static void raid5_exit(void)
+static void __exit raid5_exit(void)
{
- unregister_md_personality(&raid6_personality);
- unregister_md_personality(&raid5_personality);
- unregister_md_personality(&raid4_personality);
+ unregister_md_submodule(&raid6_personality.head);
+ unregister_md_submodule(&raid5_personality.head);
+ unregister_md_submodule(&raid4_personality.head);
cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE);
destroy_workqueue(raid5_wq);
}
diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
index c50299246fc4..2b50578d107e 100644
--- a/drivers/media/cec/core/cec-api.c
+++ b/drivers/media/cec/core/cec-api.c
@@ -222,7 +222,7 @@ static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
mutex_lock(&adap->lock);
if (adap->log_addrs.num_log_addrs == 0)
err = -EPERM;
- else if (adap->is_configuring)
+ else if (adap->is_configuring && !msg_is_raw(&msg))
err = -ENONET;
else if (cec_is_busy(adap, fh))
err = -EBUSY;
diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index a70451d99ebc..59ac12113f3a 100644
--- a/drivers/media/cec/core/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
@@ -873,19 +873,19 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
if (pin->wait_usecs > 150) {
pin->wait_usecs -= 100;
pin->timer_ts = ktime_add_us(ts, 100);
- hrtimer_forward_now(timer, ns_to_ktime(100000));
+ hrtimer_forward_now(timer, us_to_ktime(100));
return HRTIMER_RESTART;
}
if (pin->wait_usecs > 100) {
pin->wait_usecs /= 2;
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
hrtimer_forward_now(timer,
- ns_to_ktime(pin->wait_usecs * 1000));
+ us_to_ktime(pin->wait_usecs));
return HRTIMER_RESTART;
}
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
hrtimer_forward_now(timer,
- ns_to_ktime(pin->wait_usecs * 1000));
+ us_to_ktime(pin->wait_usecs));
pin->wait_usecs = 0;
return HRTIMER_RESTART;
}
@@ -1020,13 +1020,12 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
if (!adap->monitor_pin_cnt || usecs <= 150) {
pin->wait_usecs = 0;
pin->timer_ts = ktime_add_us(ts, usecs);
- hrtimer_forward_now(timer,
- ns_to_ktime(usecs * 1000));
+ hrtimer_forward_now(timer, us_to_ktime(usecs));
return HRTIMER_RESTART;
}
pin->wait_usecs = usecs - 100;
pin->timer_ts = ktime_add_us(ts, 100);
- hrtimer_forward_now(timer, ns_to_ktime(100000));
+ hrtimer_forward_now(timer, us_to_ktime(100));
return HRTIMER_RESTART;
}
@@ -1346,9 +1345,8 @@ struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
if (pin == NULL)
return ERR_PTR(-ENOMEM);
pin->ops = pin_ops;
- hrtimer_init(&pin->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
atomic_set(&pin->work_pin_num_events, 0);
- pin->timer.function = cec_pin_timer;
+ hrtimer_setup(&pin->timer, cec_pin_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
init_waitqueue_head(&pin->kthread_waitq);
pin->tx_custom_low_usecs = CEC_TIM_CUSTOM_DEFAULT;
pin->tx_custom_high_usecs = CEC_TIM_CUSTOM_DEFAULT;
diff --git a/drivers/media/cec/i2c/Kconfig b/drivers/media/cec/i2c/Kconfig
index d912d143fb31..b9d21643eef1 100644
--- a/drivers/media/cec/i2c/Kconfig
+++ b/drivers/media/cec/i2c/Kconfig
@@ -13,3 +13,12 @@ config CEC_CH7322
generic CEC framework interface.
CEC bus is present in the HDMI connector and enables communication
between compatible devices.
+
+config CEC_NXP_TDA9950
+ tristate "NXP Semiconductors TDA9950/TDA998X HDMI CEC"
+ select CEC_NOTIFIER
+ select CEC_CORE
+ default DRM_I2C_NXP_TDA998X
+ help
+ This is a driver for the NXP TDA9950 CEC controller and for the CEC
+ controller block integrated into several NXP TDA998x HDMI encoders.
diff --git a/drivers/media/cec/i2c/Makefile b/drivers/media/cec/i2c/Makefile
index d7496dfd0fa4..95c9eda52583 100644
--- a/drivers/media/cec/i2c/Makefile
+++ b/drivers/media/cec/i2c/Makefile
@@ -3,3 +3,4 @@
# Makefile for the CEC I2C device drivers.
#
obj-$(CONFIG_CEC_CH7322) += ch7322.o
+obj-$(CONFIG_CEC_NXP_TDA9950) += tda9950.o
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/media/cec/i2c/tda9950.c
index cbff851e0c85..cbff851e0c85 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/media/cec/i2c/tda9950.c
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index 44d8fe8b220e..9b1a650ed055 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -1243,6 +1243,8 @@ static int __init smsdvb_module_init(void)
smsdvb_debugfs_register();
rc = smscore_register_hotplug(smsdvb_hotplug);
+ if (rc)
+ smsdvb_debugfs_unregister();
pr_debug("\n");
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index ded11cd8dbf7..931e5dc453b9 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -2249,10 +2249,10 @@ void tpg_log_status(struct tpg_data *tpg)
tpg->src_width, tpg->src_height,
tpg_color_enc_str(tpg->color_enc));
pr_info("tpg field: %u\n", tpg->field);
- pr_info("tpg crop: %ux%u@%dx%d\n", tpg->crop.width, tpg->crop.height,
- tpg->crop.left, tpg->crop.top);
- pr_info("tpg compose: %ux%u@%dx%d\n", tpg->compose.width, tpg->compose.height,
- tpg->compose.left, tpg->compose.top);
+ pr_info("tpg crop: (%d,%d)/%ux%u\n", tpg->crop.left, tpg->crop.top,
+ tpg->crop.width, tpg->crop.height);
+ pr_info("tpg compose: (%d,%d)/%ux%u\n", tpg->compose.left, tpg->compose.top,
+ tpg->compose.width, tpg->compose.height);
pr_info("tpg colorspace: %d\n", tpg->colorspace);
pr_info("tpg transfer function: %d/%d\n", tpg->xfer_func, tpg->real_xfer_func);
if (tpg->color_enc == TGP_COLOR_ENC_HSV)
diff --git a/drivers/media/dvb-frontends/dibx000_common.c b/drivers/media/dvb-frontends/dibx000_common.c
index 63a4c6a4afb5..bd5c5d7223aa 100644
--- a/drivers/media/dvb-frontends/dibx000_common.c
+++ b/drivers/media/dvb-frontends/dibx000_common.c
@@ -250,12 +250,12 @@ static int dibx000_i2c_master_xfer_gpio34(struct i2c_adapter *i2c_adap, struct i
return num;
}
-static struct i2c_algorithm dibx000_i2c_master_gpio12_xfer_algo = {
+static const struct i2c_algorithm dibx000_i2c_master_gpio12_xfer_algo = {
.master_xfer = dibx000_i2c_master_xfer_gpio12,
.functionality = dibx000_i2c_func,
};
-static struct i2c_algorithm dibx000_i2c_master_gpio34_xfer_algo = {
+static const struct i2c_algorithm dibx000_i2c_master_gpio34_xfer_algo = {
.master_xfer = dibx000_i2c_master_xfer_gpio34,
.functionality = dibx000_i2c_func,
};
@@ -324,7 +324,7 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
return ret;
}
-static struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = {
+static const struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = {
.master_xfer = dibx000_i2c_gated_gpio67_xfer,
.functionality = dibx000_i2c_func,
};
@@ -369,7 +369,7 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
return ret;
}
-static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
+static const struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
.master_xfer = dibx000_i2c_gated_tuner_xfer,
.functionality = dibx000_i2c_func,
};
@@ -422,7 +422,7 @@ void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst)
EXPORT_SYMBOL(dibx000_reset_i2c_master);
static int i2c_adapter_init(struct i2c_adapter *i2c_adap,
- struct i2c_algorithm *algo, const char *name,
+ const struct i2c_algorithm *algo, const char *name,
struct dibx000_i2c_master *mst)
{
strscpy(i2c_adap->name, name, sizeof(i2c_adap->name));
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 05254d8717db..0357624968f1 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -1363,6 +1363,7 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
dev->vb_queue.ops = &rtl2832_sdr_vb2_ops;
dev->vb_queue.mem_ops = &vb2_vmalloc_memops;
dev->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ dev->vb_queue.lock = &dev->vb_queue_lock;
ret = vb2_queue_init(&dev->vb_queue);
if (ret) {
dev_err(&pdev->dev, "Could not initialize vb2 queue\n");
@@ -1421,7 +1422,6 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
/* Init video_device structure */
dev->vdev = rtl2832_sdr_template;
dev->vdev.queue = &dev->vb_queue;
- dev->vdev.queue->lock = &dev->vb_queue_lock;
video_set_drvdata(&dev->vdev, dev);
/* Register the v4l2_device structure */
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index da7ff2c2e8e5..ba4bb3685095 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -250,7 +250,7 @@ static int stv0299_get_symbolrate (struct stv0299_state* state)
offset /= 128;
dprintk ("%s : srate = %i\n", __func__, srate);
- dprintk ("%s : ofset = %i\n", __func__, offset);
+ dprintk ("%s : offset = %i\n", __func__, offset);
srate += offset;
diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
index 3e725cdcc66b..1f87eb0dcf2a 100644
--- a/drivers/media/dvb-frontends/tda10048.c
+++ b/drivers/media/dvb-frontends/tda10048.c
@@ -328,7 +328,8 @@ static int tda10048_set_wref(struct dvb_frontend *fe, u32 sample_freq_hz,
u32 bw)
{
struct tda10048_state *state = fe->demodulator_priv;
- u64 t, z;
+ u64 t;
+ u32 z;
dprintk(1, "%s()\n", __func__);
@@ -341,6 +342,11 @@ static int tda10048_set_wref(struct dvb_frontend *fe, u32 sample_freq_hz,
/* t *= 2147483648 on 32bit platforms */
t *= (2048 * 1024);
t *= 1024;
+
+ /*
+ * Sample frequency is typically 55 MHz, with a theoretical maximum of
+ * 69 MHz. With a 32 bit z we have enough accuracy for up to 613 MHz.
+ */
z = 7 * sample_freq_hz;
do_div(t, z);
t += 5;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 8ba096b8ebca..e576b213084d 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -140,6 +140,7 @@ config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
depends on GPIOLIB
select REGMAP_I2C
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the Sony
IMX214 camera.
@@ -1146,6 +1147,17 @@ config VIDEO_ISL7998X
Support for Intersil ISL7998x analog to MIPI-CSI2 or
BT.656 decoder.
+config VIDEO_LT6911UXE
+ tristate "Lontium LT6911UXE decoder"
+ depends on ACPI && VIDEO_DEV
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor-level driver for the Lontium
+ LT6911UXE HDMI to MIPI CSI-2 bridge.
+
+ To compile this driver as a module, choose M here: the
+ module will be called lt6911uxe.
+
config VIDEO_KS0127
tristate "KS0127 video decoder"
depends on VIDEO_DEV && I2C
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index fbb988bd067a..6c23a4463527 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_VIDEO_ISL7998X) += isl7998x.o
obj-$(CONFIG_VIDEO_KS0127) += ks0127.o
obj-$(CONFIG_VIDEO_LM3560) += lm3560.o
obj-$(CONFIG_VIDEO_LM3646) += lm3646.o
+obj-$(CONFIG_VIDEO_LT6911UXE) += lt6911uxe.o
obj-$(CONFIG_VIDEO_M52790) += m52790.o
obj-$(CONFIG_VIDEO_MAX9271_LIB) += max9271.o
obj-$(CONFIG_VIDEO_MAX9286) += max9286.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index ff7dfa0278a7..6e50b14f888f 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -195,6 +195,7 @@ struct adv7180_state;
#define ADV7180_FLAG_V2 BIT(1)
#define ADV7180_FLAG_MIPI_CSI2 BIT(2)
#define ADV7180_FLAG_I2P BIT(3)
+#define ADV7180_FLAG_TEST_PATTERN BIT(4)
struct adv7180_chip_info {
unsigned int flags;
@@ -682,11 +683,15 @@ static int adv7180_init_controls(struct adv7180_state *state)
ADV7180_HUE_MAX, 1, ADV7180_HUE_DEF);
v4l2_ctrl_new_custom(&state->ctrl_hdl, &adv7180_ctrl_fast_switch, NULL);
- v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl, &adv7180_ctrl_ops,
- V4L2_CID_TEST_PATTERN,
- ARRAY_SIZE(test_pattern_menu) - 1,
- 0, ARRAY_SIZE(test_pattern_menu) - 1,
- test_pattern_menu);
+ if (state->chip_info->flags & ADV7180_FLAG_TEST_PATTERN) {
+ v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl,
+ &adv7180_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(test_pattern_menu) - 1,
+ 0,
+ ARRAY_SIZE(test_pattern_menu) - 1,
+ test_pattern_menu);
+ }
state->sd.ctrl_handler = &state->ctrl_hdl;
if (state->ctrl_hdl.error) {
@@ -1221,7 +1226,7 @@ static const struct adv7180_chip_info adv7182_info = {
};
static const struct adv7180_chip_info adv7280_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@@ -1235,7 +1240,8 @@ static const struct adv7180_chip_info adv7280_info = {
};
static const struct adv7180_chip_info adv7280_m_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@@ -1256,7 +1262,8 @@ static const struct adv7180_chip_info adv7280_m_info = {
};
static const struct adv7180_chip_info adv7281_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN7) |
@@ -1271,7 +1278,8 @@ static const struct adv7180_chip_info adv7281_info = {
};
static const struct adv7180_chip_info adv7281_m_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@@ -1291,7 +1299,8 @@ static const struct adv7180_chip_info adv7281_m_info = {
};
static const struct adv7180_chip_info adv7281_ma_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@@ -1316,7 +1325,7 @@ static const struct adv7180_chip_info adv7281_ma_info = {
};
static const struct adv7180_chip_info adv7282_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN7) |
@@ -1331,7 +1340,8 @@ static const struct adv7180_chip_info adv7282_info = {
};
static const struct adv7180_chip_info adv7282_m_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
index 9bc0121d0eff..2c1db5968af8 100644
--- a/drivers/media/i2c/adv748x/adv748x.h
+++ b/drivers/media/i2c/adv748x/adv748x.h
@@ -320,7 +320,7 @@ struct adv748x_state {
/* Free run pattern select */
#define ADV748X_SDP_FRP 0x14
-#define ADV748X_SDP_FRP_MASK GENMASK(3, 1)
+#define ADV748X_SDP_FRP_MASK GENMASK(2, 0)
/* Saturation */
#define ADV748X_SDP_SD_SAT_U 0xe3 /* user_map_rw_reg_e3 */
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 4036972af3a6..f95a99d85360 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -1664,7 +1664,9 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
if (!err) {
adv7511_dbg_dump_edid(2, debug, sd, segment, &state->edid.data[segment * 256]);
if (segment == 0) {
- state->edid.blocks = state->edid.data[0x7e] + 1;
+ state->edid.blocks =
+ v4l2_num_edid_blocks(state->edid.data,
+ EDID_MAX_SEGM * 2);
v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n",
__func__, state->edid.blocks);
}
@@ -1682,7 +1684,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
/* one more segment read ok */
state->edid.segments = segment + 1;
v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
- if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
+ if (state->edid.blocks > state->edid.segments * 2) {
/* Request next EDID segment */
v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
adv7511_wr(sd, 0xc9, 0xf);
diff --git a/drivers/media/i2c/ccs-pll.c b/drivers/media/i2c/ccs-pll.c
index cf8858cb13d4..34ccda666524 100644
--- a/drivers/media/i2c/ccs-pll.c
+++ b/drivers/media/i2c/ccs-pll.c
@@ -75,11 +75,11 @@ static const char *pll_string(unsigned int which)
#define PLL_FL(f) CCS_PLL_FLAG_##f
-static void print_pll(struct device *dev, struct ccs_pll *pll)
+static void print_pll(struct device *dev, const struct ccs_pll *pll)
{
const struct {
- struct ccs_pll_branch_fr *fr;
- struct ccs_pll_branch_bk *bk;
+ const struct ccs_pll_branch_fr *fr;
+ const struct ccs_pll_branch_bk *bk;
unsigned int which;
} branches[] = {
{ &pll->vt_fr, &pll->vt_bk, PLL_VT },
@@ -150,10 +150,10 @@ static u32 op_pix_ddr(u32 flags)
static int check_fr_bounds(struct device *dev,
const struct ccs_pll_limits *lim,
- struct ccs_pll *pll, unsigned int which)
+ const struct ccs_pll *pll, unsigned int which)
{
const struct ccs_pll_branch_limits_fr *lim_fr;
- struct ccs_pll_branch_fr *pll_fr;
+ const struct ccs_pll_branch_fr *pll_fr;
const char *s = pll_string(which);
int rval;
@@ -190,10 +190,10 @@ static int check_fr_bounds(struct device *dev,
static int check_bk_bounds(struct device *dev,
const struct ccs_pll_limits *lim,
- struct ccs_pll *pll, unsigned int which)
+ const struct ccs_pll *pll, unsigned int which)
{
const struct ccs_pll_branch_limits_bk *lim_bk;
- struct ccs_pll_branch_bk *pll_bk;
+ const struct ccs_pll_branch_bk *pll_bk;
const char *s = pll_string(which);
int rval;
@@ -230,7 +230,7 @@ static int check_bk_bounds(struct device *dev,
return rval;
}
-static int check_ext_bounds(struct device *dev, struct ccs_pll *pll)
+static int check_ext_bounds(struct device *dev, const struct ccs_pll *pll)
{
if (!(pll->flags & CCS_PLL_FLAG_FIFO_DERATING) &&
pll->pixel_rate_pixel_array > pll->pixel_rate_csi) {
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 2cdab2f3d9dc..004d28c33287 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -3566,6 +3566,7 @@ static int ccs_probe(struct i2c_client *client)
out_disable_runtime_pm:
pm_runtime_put_noidle(&client->dev);
pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
out_cleanup:
ccs_cleanup(sensor);
@@ -3595,9 +3596,10 @@ static void ccs_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(subdev);
pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
+ if (!pm_runtime_status_suspended(&client->dev)) {
ccs_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ }
for (i = 0; i < sensor->ssds_used; i++)
v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
diff --git a/drivers/media/i2c/dw9719.c b/drivers/media/i2c/dw9719.c
index c626ed845928..032fbcb981f2 100644
--- a/drivers/media/i2c/dw9719.c
+++ b/drivers/media/i2c/dw9719.c
@@ -2,8 +2,10 @@
// Copyright (c) 2012 Intel Corporation
/*
- * Based on linux/modules/camera/drivers/media/i2c/imx/dw9719.c in this repo:
- * https://github.com/ZenfoneArea/android_kernel_asus_zenfone5
+ * Based on linux/modules/camera/drivers/media/i2c/imx/dw9719.c from:
+ * https://github.com/ZenfoneArea/android_kernel_asus_zenfone5 and
+ * latte-l-oss/drivers/external_drivers/camera/drivers/media/i2c/micam/dw9761.c
+ * from: https://github.com/MiCode/Xiaomi_Kernel_OpenSource/
*/
#include <linux/delay.h>
@@ -23,26 +25,45 @@
#define DW9719_INFO CCI_REG8(0)
#define DW9719_ID 0xF1
+#define DW9761_ID 0xF4
#define DW9719_CONTROL CCI_REG8(2)
+#define DW9719_STANDBY 0x00
+#define DW9719_SHUTDOWN 0x01
#define DW9719_ENABLE_RINGING 0x02
#define DW9719_VCM_CURRENT CCI_REG16(3)
+#define DW9719_STATUS CCI_REG16(5)
+#define DW9719_STATUS_BUSY BIT(0)
+
#define DW9719_MODE CCI_REG8(6)
#define DW9719_MODE_SAC_SHIFT 4
-#define DW9719_MODE_SAC3 4
+#define DW9719_DEFAULT_SAC 4
+#define DW9761_DEFAULT_SAC 6
#define DW9719_VCM_FREQ CCI_REG8(7)
#define DW9719_DEFAULT_VCM_FREQ 0x60
+#define DW9761_DEFAULT_VCM_FREQ 0x3E
+
+#define DW9761_VCM_PRELOAD CCI_REG8(8)
+#define DW9761_DEFAULT_VCM_PRELOAD 0x73
+
#define to_dw9719_device(x) container_of(x, struct dw9719_device, sd)
+enum dw9719_model {
+ DW9719,
+ DW9761,
+};
+
struct dw9719_device {
struct v4l2_subdev sd;
struct device *dev;
struct regmap *regmap;
struct regulator *regulator;
+ enum dw9719_model model;
+ u32 mode_low_bits;
u32 sac_mode;
u32 vcm_freq;
@@ -52,30 +73,14 @@ struct dw9719_device {
} ctrls;
};
-static int dw9719_detect(struct dw9719_device *dw9719)
-{
- int ret;
- u64 val;
-
- ret = cci_read(dw9719->regmap, DW9719_INFO, &val, NULL);
- if (ret < 0)
- return ret;
-
- if (val != DW9719_ID) {
- dev_err(dw9719->dev, "Failed to detect correct id\n");
- return -ENXIO;
- }
-
- return 0;
-}
-
static int dw9719_power_down(struct dw9719_device *dw9719)
{
return regulator_disable(dw9719->regulator);
}
-static int dw9719_power_up(struct dw9719_device *dw9719)
+static int dw9719_power_up(struct dw9719_device *dw9719, bool detect)
{
+ u64 val;
int ret;
ret = regulator_enable(dw9719->regulator);
@@ -83,16 +88,54 @@ static int dw9719_power_up(struct dw9719_device *dw9719)
return ret;
/* Jiggle SCL pin to wake up device */
- cci_write(dw9719->regmap, DW9719_CONTROL, 1, &ret);
-
+ cci_write(dw9719->regmap, DW9719_CONTROL, DW9719_SHUTDOWN, &ret);
+ fsleep(100);
+ cci_write(dw9719->regmap, DW9719_CONTROL, DW9719_STANDBY, &ret);
/* Need 100us to transit from SHUTDOWN to STANDBY */
fsleep(100);
+ if (detect) {
+ ret = cci_read(dw9719->regmap, DW9719_INFO, &val, NULL);
+ if (ret < 0)
+ return ret;
+
+ switch (val) {
+ case DW9719_ID:
+ dw9719->model = DW9719;
+ dw9719->mode_low_bits = 0x00;
+ dw9719->sac_mode = DW9719_DEFAULT_SAC;
+ dw9719->vcm_freq = DW9719_DEFAULT_VCM_FREQ;
+ break;
+ case DW9761_ID:
+ dw9719->model = DW9761;
+ dw9719->mode_low_bits = 0x01;
+ dw9719->sac_mode = DW9761_DEFAULT_SAC;
+ dw9719->vcm_freq = DW9761_DEFAULT_VCM_FREQ;
+ break;
+ default:
+ dev_err(dw9719->dev,
+ "Error unknown device id 0x%02llx\n", val);
+ return -ENXIO;
+ }
+
+ /* Optional indication of SAC mode select */
+ device_property_read_u32(dw9719->dev, "dongwoon,sac-mode",
+ &dw9719->sac_mode);
+
+ /* Optional indication of VCM frequency */
+ device_property_read_u32(dw9719->dev, "dongwoon,vcm-freq",
+ &dw9719->vcm_freq);
+ }
+
cci_write(dw9719->regmap, DW9719_CONTROL, DW9719_ENABLE_RINGING, &ret);
- cci_write(dw9719->regmap, DW9719_MODE,
- dw9719->sac_mode << DW9719_MODE_SAC_SHIFT, &ret);
+ cci_write(dw9719->regmap, DW9719_MODE, dw9719->mode_low_bits |
+ (dw9719->sac_mode << DW9719_MODE_SAC_SHIFT), &ret);
cci_write(dw9719->regmap, DW9719_VCM_FREQ, dw9719->vcm_freq, &ret);
+ if (dw9719->model == DW9761)
+ cci_write(dw9719->regmap, DW9761_VCM_PRELOAD,
+ DW9761_DEFAULT_VCM_PRELOAD, &ret);
+
if (ret)
dw9719_power_down(dw9719);
@@ -159,7 +202,7 @@ static int dw9719_resume(struct device *dev)
int ret;
int val;
- ret = dw9719_power_up(dw9719);
+ ret = dw9719_power_up(dw9719, false);
if (ret)
return ret;
@@ -237,16 +280,6 @@ static int dw9719_probe(struct i2c_client *client)
return PTR_ERR(dw9719->regmap);
dw9719->dev = &client->dev;
- dw9719->sac_mode = DW9719_MODE_SAC3;
- dw9719->vcm_freq = DW9719_DEFAULT_VCM_FREQ;
-
- /* Optional indication of SAC mode select */
- device_property_read_u32(&client->dev, "dongwoon,sac-mode",
- &dw9719->sac_mode);
-
- /* Optional indication of VCM frequency */
- device_property_read_u32(&client->dev, "dongwoon,vcm-freq",
- &dw9719->vcm_freq);
dw9719->regulator = devm_regulator_get(&client->dev, "vdd");
if (IS_ERR(dw9719->regulator))
@@ -274,14 +307,10 @@ static int dw9719_probe(struct i2c_client *client)
* will work.
*/
- ret = dw9719_power_up(dw9719);
+ ret = dw9719_power_up(dw9719, true);
if (ret)
goto err_cleanup_media;
- ret = dw9719_detect(dw9719);
- if (ret)
- goto err_powerdown;
-
pm_runtime_set_active(&client->dev);
pm_runtime_get_noresume(&client->dev);
pm_runtime_enable(&client->dev);
@@ -299,7 +328,6 @@ static int dw9719_probe(struct i2c_client *client)
err_pm_runtime:
pm_runtime_disable(&client->dev);
pm_runtime_put_noidle(&client->dev);
-err_powerdown:
dw9719_power_down(dw9719);
err_cleanup_media:
media_entity_cleanup(&dw9719->sd.entity);
@@ -327,6 +355,7 @@ static void dw9719_remove(struct i2c_client *client)
static const struct i2c_device_id dw9719_id_table[] = {
{ "dw9719" },
+ { "dw9761" },
{ }
};
MODULE_DEVICE_TABLE(i2c, dw9719_id_table);
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 3ac42d1ab8b4..aed258211b8a 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -719,7 +719,7 @@ static int hi556_write_reg_list(struct hi556 *hi556,
r_list->regs[i].val);
if (ret) {
dev_err_ratelimited(&client->dev,
- "failed to write reg 0x%4.4x. error = %d",
+ "failed to write reg 0x%4.4x. error = %d\n",
r_list->regs[i].address, ret);
return ret;
}
@@ -926,7 +926,7 @@ static int hi556_identify_module(struct hi556 *hi556)
return ret;
if (val != HI556_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
HI556_CHIP_ID, val);
return -ENXIO;
}
@@ -1002,14 +1002,14 @@ static int hi556_start_streaming(struct hi556 *hi556)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = hi556_write_reg_list(hi556, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(&client->dev, "failed to set plls\n");
return ret;
}
reg_list = &hi556->cur_mode->reg_list;
ret = hi556_write_reg_list(hi556, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(&client->dev, "failed to set mode\n");
return ret;
}
@@ -1021,7 +1021,7 @@ static int hi556_start_streaming(struct hi556 *hi556)
HI556_REG_VALUE_16BIT, HI556_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(&client->dev, "failed to set stream\n");
return ret;
}
@@ -1034,7 +1034,7 @@ static void hi556_stop_streaming(struct hi556 *hi556)
if (hi556_write_reg(hi556, HI556_REG_MODE_SELECT,
HI556_REG_VALUE_16BIT, HI556_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream");
+ dev_err(&client->dev, "failed to set stream\n");
}
static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
@@ -1053,7 +1053,6 @@ static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
ret = hi556_start_streaming(hi556);
if (ret) {
- enable = 0;
hi556_stop_streaming(hi556);
pm_runtime_put(&client->dev);
}
@@ -1220,33 +1219,35 @@ static int hi556_check_hwcfg(struct device *dev)
*/
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
- return -EPROBE_DEFER;
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "waiting for fwnode graph endpoint\n");
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "parsing endpoint failed\n");
ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
+ dev_err(dev, "can't get clock frequency\n");
+ goto check_hwcfg_error;
}
if (mclk != HI556_MCLK) {
- dev_err(dev, "external clock %d is not supported", mclk);
- return -EINVAL;
+ dev_err(dev, "external clock %d is not supported\n", mclk);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
}
if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
- dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
goto check_hwcfg_error;
}
if (!bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "no link frequencies defined");
+ dev_err(dev, "no link frequencies defined\n");
ret = -EINVAL;
goto check_hwcfg_error;
}
@@ -1259,7 +1260,7 @@ static int hi556_check_hwcfg(struct device *dev)
}
if (j == bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "no link frequency %lld supported",
+ dev_err(dev, "no link frequency %lld supported\n",
link_freq_menu_items[i]);
ret = -EINVAL;
goto check_hwcfg_error;
@@ -1332,11 +1333,8 @@ static int hi556_probe(struct i2c_client *client)
int ret;
ret = hi556_check_hwcfg(&client->dev);
- if (ret) {
- dev_err(&client->dev, "failed to check HW configuration: %d",
- ret);
+ if (ret)
return ret;
- }
hi556 = devm_kzalloc(&client->dev, sizeof(*hi556), GFP_KERNEL);
if (!hi556)
@@ -1371,7 +1369,7 @@ static int hi556_probe(struct i2c_client *client)
ret = hi556_identify_module(hi556);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(&client->dev, "failed to find sensor: %d\n", ret);
goto probe_error_power_off;
}
}
@@ -1380,7 +1378,7 @@ static int hi556_probe(struct i2c_client *client)
hi556->cur_mode = &supported_modes[0];
ret = hi556_init_controls(hi556);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(&client->dev, "failed to init controls: %d\n", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1391,13 +1389,13 @@ static int hi556_probe(struct i2c_client *client)
hi556->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&hi556->sd.entity, 1, &hi556->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(&client->dev, "failed to init entity pads: %d\n", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&hi556->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d\n",
ret);
goto probe_error_media_entity_cleanup;
}
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 4962cfe7c83d..dd7bc45523d8 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -15,26 +15,186 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <media/media-entity.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-#define IMX214_REG_MODE_SELECT 0x0100
+/* Chip ID */
+#define IMX214_REG_CHIP_ID CCI_REG16(0x0016)
+#define IMX214_CHIP_ID 0x0214
+
+#define IMX214_REG_MODE_SELECT CCI_REG8(0x0100)
#define IMX214_MODE_STANDBY 0x00
#define IMX214_MODE_STREAMING 0x01
+#define IMX214_REG_FAST_STANDBY_CTRL CCI_REG8(0x0106)
+
#define IMX214_DEFAULT_CLK_FREQ 24000000
-#define IMX214_DEFAULT_LINK_FREQ 480000000
+#define IMX214_DEFAULT_LINK_FREQ 600000000
+/* Keep wrong link frequency for backward compatibility */
+#define IMX214_DEFAULT_LINK_FREQ_LEGACY 480000000
#define IMX214_DEFAULT_PIXEL_RATE ((IMX214_DEFAULT_LINK_FREQ * 8LL) / 10)
#define IMX214_FPS 30
-#define IMX214_MBUS_CODE MEDIA_BUS_FMT_SRGGB10_1X10
+
+/* V-TIMING internal */
+#define IMX214_REG_FRM_LENGTH_LINES CCI_REG16(0x0340)
+#define IMX214_VTS_MAX 0xffff
+
+#define IMX214_VBLANK_MIN 890
+
+/* HBLANK control - read only */
+#define IMX214_PPL_DEFAULT 5008
/* Exposure control */
-#define IMX214_REG_EXPOSURE 0x0202
-#define IMX214_EXPOSURE_MIN 0
-#define IMX214_EXPOSURE_MAX 3184
+#define IMX214_REG_EXPOSURE CCI_REG16(0x0202)
+#define IMX214_EXPOSURE_OFFSET 10
+#define IMX214_EXPOSURE_MIN 1
#define IMX214_EXPOSURE_STEP 1
#define IMX214_EXPOSURE_DEFAULT 3184
+#define IMX214_REG_EXPOSURE_RATIO CCI_REG8(0x0222)
+#define IMX214_REG_SHORT_EXPOSURE CCI_REG16(0x0224)
+
+/* Analog gain control */
+#define IMX214_REG_ANALOG_GAIN CCI_REG16(0x0204)
+#define IMX214_REG_SHORT_ANALOG_GAIN CCI_REG16(0x0216)
+#define IMX214_ANA_GAIN_MIN 0
+#define IMX214_ANA_GAIN_MAX 448
+#define IMX214_ANA_GAIN_STEP 1
+#define IMX214_ANA_GAIN_DEFAULT 0x0
+
+/* Digital gain control */
+#define IMX214_REG_DIG_GAIN_GREENR CCI_REG16(0x020e)
+#define IMX214_REG_DIG_GAIN_RED CCI_REG16(0x0210)
+#define IMX214_REG_DIG_GAIN_BLUE CCI_REG16(0x0212)
+#define IMX214_REG_DIG_GAIN_GREENB CCI_REG16(0x0214)
+#define IMX214_DGTL_GAIN_MIN 0x0100
+#define IMX214_DGTL_GAIN_MAX 0x0fff
+#define IMX214_DGTL_GAIN_DEFAULT 0x0100
+#define IMX214_DGTL_GAIN_STEP 1
+
+#define IMX214_REG_ORIENTATION CCI_REG8(0x0101)
+
+#define IMX214_REG_MASK_CORR_FRAMES CCI_REG8(0x0105)
+#define IMX214_CORR_FRAMES_TRANSMIT 0
+#define IMX214_CORR_FRAMES_MASK 1
+
+#define IMX214_REG_CSI_DATA_FORMAT CCI_REG16(0x0112)
+#define IMX214_CSI_DATA_FORMAT_RAW8 0x0808
+#define IMX214_CSI_DATA_FORMAT_RAW10 0x0A0A
+#define IMX214_CSI_DATA_FORMAT_COMP6 0x0A06
+#define IMX214_CSI_DATA_FORMAT_COMP8 0x0A08
+
+#define IMX214_REG_CSI_LANE_MODE CCI_REG8(0x0114)
+#define IMX214_CSI_2_LANE_MODE 1
+#define IMX214_CSI_4_LANE_MODE 3
+
+#define IMX214_REG_EXCK_FREQ CCI_REG16(0x0136)
+#define IMX214_EXCK_FREQ(n) ((n) * 256) /* n expressed in MHz */
+
+#define IMX214_REG_TEMP_SENSOR_CONTROL CCI_REG8(0x0138)
+
+#define IMX214_REG_HDR_MODE CCI_REG8(0x0220)
+#define IMX214_HDR_MODE_OFF 0
+#define IMX214_HDR_MODE_ON 1
+
+#define IMX214_REG_HDR_RES_REDUCTION CCI_REG8(0x0221)
+#define IMX214_HDR_RES_REDU_THROUGH 0x11
+#define IMX214_HDR_RES_REDU_2_BINNING 0x22
+
+/* PLL settings */
+#define IMX214_REG_VTPXCK_DIV CCI_REG8(0x0301)
+#define IMX214_REG_VTSYCK_DIV CCI_REG8(0x0303)
+#define IMX214_REG_PREPLLCK_VT_DIV CCI_REG8(0x0305)
+#define IMX214_REG_PLL_VT_MPY CCI_REG16(0x0306)
+#define IMX214_REG_OPPXCK_DIV CCI_REG8(0x0309)
+#define IMX214_REG_OPSYCK_DIV CCI_REG8(0x030b)
+#define IMX214_REG_PLL_MULT_DRIV CCI_REG8(0x0310)
+#define IMX214_PLL_SINGLE 0
+#define IMX214_PLL_DUAL 1
+
+#define IMX214_REG_LINE_LENGTH_PCK CCI_REG16(0x0342)
+#define IMX214_REG_X_ADD_STA CCI_REG16(0x0344)
+#define IMX214_REG_Y_ADD_STA CCI_REG16(0x0346)
+#define IMX214_REG_X_ADD_END CCI_REG16(0x0348)
+#define IMX214_REG_Y_ADD_END CCI_REG16(0x034a)
+#define IMX214_REG_X_OUTPUT_SIZE CCI_REG16(0x034c)
+#define IMX214_REG_Y_OUTPUT_SIZE CCI_REG16(0x034e)
+#define IMX214_REG_X_EVEN_INC CCI_REG8(0x0381)
+#define IMX214_REG_X_ODD_INC CCI_REG8(0x0383)
+#define IMX214_REG_Y_EVEN_INC CCI_REG8(0x0385)
+#define IMX214_REG_Y_ODD_INC CCI_REG8(0x0387)
+
+#define IMX214_REG_SCALE_MODE CCI_REG8(0x0401)
+#define IMX214_SCALE_NONE 0
+#define IMX214_SCALE_HORIZONTAL 1
+#define IMX214_SCALE_FULL 2
+#define IMX214_REG_SCALE_M CCI_REG16(0x0404)
+
+#define IMX214_REG_DIG_CROP_X_OFFSET CCI_REG16(0x0408)
+#define IMX214_REG_DIG_CROP_Y_OFFSET CCI_REG16(0x040a)
+#define IMX214_REG_DIG_CROP_WIDTH CCI_REG16(0x040c)
+#define IMX214_REG_DIG_CROP_HEIGHT CCI_REG16(0x040e)
+
+#define IMX214_REG_REQ_LINK_BIT_RATE CCI_REG32(0x0820)
+#define IMX214_LINK_BIT_RATE_MBPS(n) ((n) << 16)
+
+/* Binning mode */
+#define IMX214_REG_BINNING_MODE CCI_REG8(0x0900)
+#define IMX214_BINNING_NONE 0
+#define IMX214_BINNING_ENABLE 1
+#define IMX214_REG_BINNING_TYPE CCI_REG8(0x0901)
+#define IMX214_REG_BINNING_WEIGHTING CCI_REG8(0x0902)
+#define IMX214_BINNING_AVERAGE 0x00
+#define IMX214_BINNING_SUMMED 0x01
+#define IMX214_BINNING_BAYER 0x02
+
+#define IMX214_REG_SING_DEF_CORR_EN CCI_REG8(0x0b06)
+#define IMX214_SING_DEF_CORR_OFF 0
+#define IMX214_SING_DEF_CORR_ON 1
+
+/* AWB control */
+#define IMX214_REG_ABS_GAIN_GREENR CCI_REG16(0x0b8e)
+#define IMX214_REG_ABS_GAIN_RED CCI_REG16(0x0b90)
+#define IMX214_REG_ABS_GAIN_BLUE CCI_REG16(0x0b92)
+#define IMX214_REG_ABS_GAIN_GREENB CCI_REG16(0x0b94)
+
+#define IMX214_REG_RMSC_NR_MODE CCI_REG8(0x3001)
+#define IMX214_REG_STATS_OUT_EN CCI_REG8(0x3013)
+#define IMX214_STATS_OUT_OFF 0
+#define IMX214_STATS_OUT_ON 1
+
+/* Chroma noise reduction */
+#define IMX214_REG_NML_NR_EN CCI_REG8(0x30a2)
+#define IMX214_NML_NR_OFF 0
+#define IMX214_NML_NR_ON 1
+
+#define IMX214_REG_EBD_SIZE_V CCI_REG8(0x5041)
+#define IMX214_EBD_NO 0
+#define IMX214_EBD_4_LINE 4
+
+#define IMX214_REG_RG_STATS_LMT CCI_REG16(0x6d12)
+#define IMX214_RG_STATS_LMT_10_BIT 0x03FF
+#define IMX214_RG_STATS_LMT_14_BIT 0x3FFF
+
+#define IMX214_REG_ATR_FAST_MOVE CCI_REG8(0x9300)
+
+/* Test Pattern Control */
+#define IMX214_REG_TEST_PATTERN CCI_REG16(0x0600)
+#define IMX214_TEST_PATTERN_DISABLE 0
+#define IMX214_TEST_PATTERN_SOLID_COLOR 1
+#define IMX214_TEST_PATTERN_COLOR_BARS 2
+#define IMX214_TEST_PATTERN_GREY_COLOR 3
+#define IMX214_TEST_PATTERN_PN9 4
+
+/* Test pattern colour components */
+#define IMX214_REG_TESTP_RED CCI_REG16(0x0602)
+#define IMX214_REG_TESTP_GREENR CCI_REG16(0x0604)
+#define IMX214_REG_TESTP_BLUE CCI_REG16(0x0606)
+#define IMX214_REG_TESTP_GREENB CCI_REG16(0x0608)
+#define IMX214_TESTP_COLOUR_MIN 0
+#define IMX214_TESTP_COLOUR_MAX 0x03ff
+#define IMX214_TESTP_COLOUR_STEP 1
/* IMX214 native and active pixel array size */
#define IMX214_NATIVE_WIDTH 4224U
@@ -52,6 +212,38 @@ static const char * const imx214_supply_name[] = {
#define IMX214_NUM_SUPPLIES ARRAY_SIZE(imx214_supply_name)
+/*
+ * The supported formats.
+ * This table MUST contain 4 entries per format, to cover the various flip
+ * combinations in the order
+ * - no flip
+ * - h flip
+ * - v flip
+ * - h&v flips
+ */
+static const u32 imx214_mbus_formats[] = {
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+};
+
+static const char * const imx214_test_pattern_menu[] = {
+ "Disabled",
+ "Color Bars",
+ "Solid Color",
+ "Grey Color Bars",
+ "PN9"
+};
+
+static const int imx214_test_pattern_val[] = {
+ IMX214_TEST_PATTERN_DISABLE,
+ IMX214_TEST_PATTERN_COLOR_BARS,
+ IMX214_TEST_PATTERN_SOLID_COLOR,
+ IMX214_TEST_PATTERN_GREY_COLOR,
+ IMX214_TEST_PATTERN_PN9,
+};
+
struct imx214 {
struct device *dev;
struct clk *xclk;
@@ -59,365 +251,262 @@ struct imx214 {
struct v4l2_subdev sd;
struct media_pad pad;
- struct v4l2_mbus_framefmt fmt;
- struct v4l2_rect crop;
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *pixel_rate;
struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
struct v4l2_ctrl *exposure;
struct v4l2_ctrl *unit_size;
+ struct {
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ };
struct regulator_bulk_data supplies[IMX214_NUM_SUPPLIES];
struct gpio_desc *enable_gpio;
-
- /*
- * Serialize control access, get/set format, get selection
- * and start streaming.
- */
- struct mutex mutex;
-};
-
-struct reg_8 {
- u16 addr;
- u8 val;
-};
-
-enum {
- IMX214_TABLE_WAIT_MS = 0,
- IMX214_TABLE_END,
- IMX214_MAX_RETRIES,
- IMX214_WAIT_MS
};
/*From imx214_mode_tbls.h*/
-static const struct reg_8 mode_4096x2304[] = {
- {0x0114, 0x03},
- {0x0220, 0x00},
- {0x0221, 0x11},
- {0x0222, 0x01},
- {0x0340, 0x0C},
- {0x0341, 0x7A},
- {0x0342, 0x13},
- {0x0343, 0x90},
- {0x0344, 0x00},
- {0x0345, 0x38},
- {0x0346, 0x01},
- {0x0347, 0x98},
- {0x0348, 0x10},
- {0x0349, 0x37},
- {0x034A, 0x0A},
- {0x034B, 0x97},
- {0x0381, 0x01},
- {0x0383, 0x01},
- {0x0385, 0x01},
- {0x0387, 0x01},
- {0x0900, 0x00},
- {0x0901, 0x00},
- {0x0902, 0x00},
- {0x3000, 0x35},
- {0x3054, 0x01},
- {0x305C, 0x11},
-
- {0x0112, 0x0A},
- {0x0113, 0x0A},
- {0x034C, 0x10},
- {0x034D, 0x00},
- {0x034E, 0x09},
- {0x034F, 0x00},
- {0x0401, 0x00},
- {0x0404, 0x00},
- {0x0405, 0x10},
- {0x0408, 0x00},
- {0x0409, 0x00},
- {0x040A, 0x00},
- {0x040B, 0x00},
- {0x040C, 0x10},
- {0x040D, 0x00},
- {0x040E, 0x09},
- {0x040F, 0x00},
-
- {0x0301, 0x05},
- {0x0303, 0x02},
- {0x0305, 0x03},
- {0x0306, 0x00},
- {0x0307, 0x96},
- {0x0309, 0x0A},
- {0x030B, 0x01},
- {0x0310, 0x00},
-
- {0x0820, 0x12},
- {0x0821, 0xC0},
- {0x0822, 0x00},
- {0x0823, 0x00},
-
- {0x3A03, 0x09},
- {0x3A04, 0x50},
- {0x3A05, 0x01},
-
- {0x0B06, 0x01},
- {0x30A2, 0x00},
-
- {0x30B4, 0x00},
-
- {0x3A02, 0xFF},
-
- {0x3011, 0x00},
- {0x3013, 0x01},
-
- {0x0202, 0x0C},
- {0x0203, 0x70},
- {0x0224, 0x01},
- {0x0225, 0xF4},
-
- {0x0204, 0x00},
- {0x0205, 0x00},
- {0x020E, 0x01},
- {0x020F, 0x00},
- {0x0210, 0x01},
- {0x0211, 0x00},
- {0x0212, 0x01},
- {0x0213, 0x00},
- {0x0214, 0x01},
- {0x0215, 0x00},
- {0x0216, 0x00},
- {0x0217, 0x00},
-
- {0x4170, 0x00},
- {0x4171, 0x10},
- {0x4176, 0x00},
- {0x4177, 0x3C},
- {0xAE20, 0x04},
- {0xAE21, 0x5C},
-
- {IMX214_TABLE_WAIT_MS, 10},
- {0x0138, 0x01},
- {IMX214_TABLE_END, 0x00}
+static const struct cci_reg_sequence mode_4096x2304[] = {
+ { IMX214_REG_HDR_MODE, IMX214_HDR_MODE_OFF },
+ { IMX214_REG_HDR_RES_REDUCTION, IMX214_HDR_RES_REDU_THROUGH },
+ { IMX214_REG_EXPOSURE_RATIO, 1 },
+ { IMX214_REG_X_ADD_STA, 56 },
+ { IMX214_REG_Y_ADD_STA, 408 },
+ { IMX214_REG_X_ADD_END, 4151 },
+ { IMX214_REG_Y_ADD_END, 2711 },
+ { IMX214_REG_X_EVEN_INC, 1 },
+ { IMX214_REG_X_ODD_INC, 1 },
+ { IMX214_REG_Y_EVEN_INC, 1 },
+ { IMX214_REG_Y_ODD_INC, 1 },
+ { IMX214_REG_BINNING_MODE, IMX214_BINNING_NONE },
+ { IMX214_REG_BINNING_TYPE, 0 },
+ { IMX214_REG_BINNING_WEIGHTING, IMX214_BINNING_AVERAGE },
+ { CCI_REG8(0x3000), 0x35 },
+ { CCI_REG8(0x3054), 0x01 },
+ { CCI_REG8(0x305C), 0x11 },
+
+ { IMX214_REG_CSI_DATA_FORMAT, IMX214_CSI_DATA_FORMAT_RAW10 },
+ { IMX214_REG_X_OUTPUT_SIZE, 4096 },
+ { IMX214_REG_Y_OUTPUT_SIZE, 2304 },
+ { IMX214_REG_SCALE_MODE, IMX214_SCALE_NONE },
+ { IMX214_REG_SCALE_M, 2 },
+ { IMX214_REG_DIG_CROP_X_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_Y_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_WIDTH, 4096 },
+ { IMX214_REG_DIG_CROP_HEIGHT, 2304 },
+
+ { IMX214_REG_VTPXCK_DIV, 5 },
+ { IMX214_REG_VTSYCK_DIV, 2 },
+ { IMX214_REG_PREPLLCK_VT_DIV, 3 },
+ { IMX214_REG_PLL_VT_MPY, 150 },
+ { IMX214_REG_OPPXCK_DIV, 10 },
+ { IMX214_REG_OPSYCK_DIV, 1 },
+ { IMX214_REG_PLL_MULT_DRIV, IMX214_PLL_SINGLE },
+
+ { IMX214_REG_REQ_LINK_BIT_RATE, IMX214_LINK_BIT_RATE_MBPS(4800) },
+
+ { CCI_REG8(0x3A03), 0x09 },
+ { CCI_REG8(0x3A04), 0x50 },
+ { CCI_REG8(0x3A05), 0x01 },
+
+ { IMX214_REG_SING_DEF_CORR_EN, IMX214_SING_DEF_CORR_ON },
+ { IMX214_REG_NML_NR_EN, IMX214_NML_NR_OFF },
+
+ { CCI_REG8(0x30B4), 0x00 },
+
+ { CCI_REG8(0x3A02), 0xFF },
+
+ { CCI_REG8(0x3011), 0x00 },
+ { IMX214_REG_STATS_OUT_EN, IMX214_STATS_OUT_ON },
+
+ { IMX214_REG_SHORT_EXPOSURE, 500 },
+
+ { CCI_REG8(0x4170), 0x00 },
+ { CCI_REG8(0x4171), 0x10 },
+ { CCI_REG8(0x4176), 0x00 },
+ { CCI_REG8(0x4177), 0x3C },
+ { CCI_REG8(0xAE20), 0x04 },
+ { CCI_REG8(0xAE21), 0x5C },
};
-static const struct reg_8 mode_1920x1080[] = {
- {0x0114, 0x03},
- {0x0220, 0x00},
- {0x0221, 0x11},
- {0x0222, 0x01},
- {0x0340, 0x0C},
- {0x0341, 0x7A},
- {0x0342, 0x13},
- {0x0343, 0x90},
- {0x0344, 0x04},
- {0x0345, 0x78},
- {0x0346, 0x03},
- {0x0347, 0xFC},
- {0x0348, 0x0B},
- {0x0349, 0xF7},
- {0x034A, 0x08},
- {0x034B, 0x33},
- {0x0381, 0x01},
- {0x0383, 0x01},
- {0x0385, 0x01},
- {0x0387, 0x01},
- {0x0900, 0x00},
- {0x0901, 0x00},
- {0x0902, 0x00},
- {0x3000, 0x35},
- {0x3054, 0x01},
- {0x305C, 0x11},
-
- {0x0112, 0x0A},
- {0x0113, 0x0A},
- {0x034C, 0x07},
- {0x034D, 0x80},
- {0x034E, 0x04},
- {0x034F, 0x38},
- {0x0401, 0x00},
- {0x0404, 0x00},
- {0x0405, 0x10},
- {0x0408, 0x00},
- {0x0409, 0x00},
- {0x040A, 0x00},
- {0x040B, 0x00},
- {0x040C, 0x07},
- {0x040D, 0x80},
- {0x040E, 0x04},
- {0x040F, 0x38},
-
- {0x0301, 0x05},
- {0x0303, 0x02},
- {0x0305, 0x03},
- {0x0306, 0x00},
- {0x0307, 0x96},
- {0x0309, 0x0A},
- {0x030B, 0x01},
- {0x0310, 0x00},
-
- {0x0820, 0x12},
- {0x0821, 0xC0},
- {0x0822, 0x00},
- {0x0823, 0x00},
-
- {0x3A03, 0x04},
- {0x3A04, 0xF8},
- {0x3A05, 0x02},
-
- {0x0B06, 0x01},
- {0x30A2, 0x00},
-
- {0x30B4, 0x00},
-
- {0x3A02, 0xFF},
-
- {0x3011, 0x00},
- {0x3013, 0x01},
-
- {0x0202, 0x0C},
- {0x0203, 0x70},
- {0x0224, 0x01},
- {0x0225, 0xF4},
-
- {0x0204, 0x00},
- {0x0205, 0x00},
- {0x020E, 0x01},
- {0x020F, 0x00},
- {0x0210, 0x01},
- {0x0211, 0x00},
- {0x0212, 0x01},
- {0x0213, 0x00},
- {0x0214, 0x01},
- {0x0215, 0x00},
- {0x0216, 0x00},
- {0x0217, 0x00},
-
- {0x4170, 0x00},
- {0x4171, 0x10},
- {0x4176, 0x00},
- {0x4177, 0x3C},
- {0xAE20, 0x04},
- {0xAE21, 0x5C},
-
- {IMX214_TABLE_WAIT_MS, 10},
- {0x0138, 0x01},
- {IMX214_TABLE_END, 0x00}
+static const struct cci_reg_sequence mode_1920x1080[] = {
+ { IMX214_REG_HDR_MODE, IMX214_HDR_MODE_OFF },
+ { IMX214_REG_HDR_RES_REDUCTION, IMX214_HDR_RES_REDU_THROUGH },
+ { IMX214_REG_EXPOSURE_RATIO, 1 },
+ { IMX214_REG_X_ADD_STA, 1144 },
+ { IMX214_REG_Y_ADD_STA, 1020 },
+ { IMX214_REG_X_ADD_END, 3063 },
+ { IMX214_REG_Y_ADD_END, 2099 },
+ { IMX214_REG_X_EVEN_INC, 1 },
+ { IMX214_REG_X_ODD_INC, 1 },
+ { IMX214_REG_Y_EVEN_INC, 1 },
+ { IMX214_REG_Y_ODD_INC, 1 },
+ { IMX214_REG_BINNING_MODE, IMX214_BINNING_NONE },
+ { IMX214_REG_BINNING_TYPE, 0 },
+ { IMX214_REG_BINNING_WEIGHTING, IMX214_BINNING_AVERAGE },
+ { CCI_REG8(0x3000), 0x35 },
+ { CCI_REG8(0x3054), 0x01 },
+ { CCI_REG8(0x305C), 0x11 },
+
+ { IMX214_REG_CSI_DATA_FORMAT, IMX214_CSI_DATA_FORMAT_RAW10 },
+ { IMX214_REG_X_OUTPUT_SIZE, 1920 },
+ { IMX214_REG_Y_OUTPUT_SIZE, 1080 },
+ { IMX214_REG_SCALE_MODE, IMX214_SCALE_NONE },
+ { IMX214_REG_SCALE_M, 2 },
+ { IMX214_REG_DIG_CROP_X_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_Y_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_WIDTH, 1920 },
+ { IMX214_REG_DIG_CROP_HEIGHT, 1080 },
+
+ { IMX214_REG_VTPXCK_DIV, 5 },
+ { IMX214_REG_VTSYCK_DIV, 2 },
+ { IMX214_REG_PREPLLCK_VT_DIV, 3 },
+ { IMX214_REG_PLL_VT_MPY, 150 },
+ { IMX214_REG_OPPXCK_DIV, 10 },
+ { IMX214_REG_OPSYCK_DIV, 1 },
+ { IMX214_REG_PLL_MULT_DRIV, IMX214_PLL_SINGLE },
+
+ { IMX214_REG_REQ_LINK_BIT_RATE, IMX214_LINK_BIT_RATE_MBPS(4800) },
+
+ { CCI_REG8(0x3A03), 0x04 },
+ { CCI_REG8(0x3A04), 0xF8 },
+ { CCI_REG8(0x3A05), 0x02 },
+
+ { IMX214_REG_SING_DEF_CORR_EN, IMX214_SING_DEF_CORR_ON },
+ { IMX214_REG_NML_NR_EN, IMX214_NML_NR_OFF },
+
+ { CCI_REG8(0x30B4), 0x00 },
+
+ { CCI_REG8(0x3A02), 0xFF },
+
+ { CCI_REG8(0x3011), 0x00 },
+ { IMX214_REG_STATS_OUT_EN, IMX214_STATS_OUT_ON },
+
+ { IMX214_REG_SHORT_EXPOSURE, 500 },
+
+ { CCI_REG8(0x4170), 0x00 },
+ { CCI_REG8(0x4171), 0x10 },
+ { CCI_REG8(0x4176), 0x00 },
+ { CCI_REG8(0x4177), 0x3C },
+ { CCI_REG8(0xAE20), 0x04 },
+ { CCI_REG8(0xAE21), 0x5C },
};
-static const struct reg_8 mode_table_common[] = {
+static const struct cci_reg_sequence mode_table_common[] = {
/* software reset */
/* software standby settings */
- {0x0100, 0x00},
+ { IMX214_REG_MODE_SELECT, IMX214_MODE_STANDBY },
/* ATR setting */
- {0x9300, 0x02},
+ { IMX214_REG_ATR_FAST_MOVE, 2 },
/* external clock setting */
- {0x0136, 0x18},
- {0x0137, 0x00},
+ { IMX214_REG_EXCK_FREQ, IMX214_EXCK_FREQ(IMX214_DEFAULT_CLK_FREQ / 1000000) },
/* global setting */
/* basic config */
- {0x0101, 0x00},
- {0x0105, 0x01},
- {0x0106, 0x01},
- {0x4550, 0x02},
- {0x4601, 0x00},
- {0x4642, 0x05},
- {0x6227, 0x11},
- {0x6276, 0x00},
- {0x900E, 0x06},
- {0xA802, 0x90},
- {0xA803, 0x11},
- {0xA804, 0x62},
- {0xA805, 0x77},
- {0xA806, 0xAE},
- {0xA807, 0x34},
- {0xA808, 0xAE},
- {0xA809, 0x35},
- {0xA80A, 0x62},
- {0xA80B, 0x83},
- {0xAE33, 0x00},
+ { IMX214_REG_MASK_CORR_FRAMES, IMX214_CORR_FRAMES_MASK },
+ { IMX214_REG_FAST_STANDBY_CTRL, 1 },
+ { IMX214_REG_LINE_LENGTH_PCK, IMX214_PPL_DEFAULT },
+ { CCI_REG8(0x4550), 0x02 },
+ { CCI_REG8(0x4601), 0x00 },
+ { CCI_REG8(0x4642), 0x05 },
+ { CCI_REG8(0x6227), 0x11 },
+ { CCI_REG8(0x6276), 0x00 },
+ { CCI_REG8(0x900E), 0x06 },
+ { CCI_REG8(0xA802), 0x90 },
+ { CCI_REG8(0xA803), 0x11 },
+ { CCI_REG8(0xA804), 0x62 },
+ { CCI_REG8(0xA805), 0x77 },
+ { CCI_REG8(0xA806), 0xAE },
+ { CCI_REG8(0xA807), 0x34 },
+ { CCI_REG8(0xA808), 0xAE },
+ { CCI_REG8(0xA809), 0x35 },
+ { CCI_REG8(0xA80A), 0x62 },
+ { CCI_REG8(0xA80B), 0x83 },
+ { CCI_REG8(0xAE33), 0x00 },
/* analog setting */
- {0x4174, 0x00},
- {0x4175, 0x11},
- {0x4612, 0x29},
- {0x461B, 0x12},
- {0x461F, 0x06},
- {0x4635, 0x07},
- {0x4637, 0x30},
- {0x463F, 0x18},
- {0x4641, 0x0D},
- {0x465B, 0x12},
- {0x465F, 0x11},
- {0x4663, 0x11},
- {0x4667, 0x0F},
- {0x466F, 0x0F},
- {0x470E, 0x09},
- {0x4909, 0xAB},
- {0x490B, 0x95},
- {0x4915, 0x5D},
- {0x4A5F, 0xFF},
- {0x4A61, 0xFF},
- {0x4A73, 0x62},
- {0x4A85, 0x00},
- {0x4A87, 0xFF},
+ { CCI_REG8(0x4174), 0x00 },
+ { CCI_REG8(0x4175), 0x11 },
+ { CCI_REG8(0x4612), 0x29 },
+ { CCI_REG8(0x461B), 0x12 },
+ { CCI_REG8(0x461F), 0x06 },
+ { CCI_REG8(0x4635), 0x07 },
+ { CCI_REG8(0x4637), 0x30 },
+ { CCI_REG8(0x463F), 0x18 },
+ { CCI_REG8(0x4641), 0x0D },
+ { CCI_REG8(0x465B), 0x12 },
+ { CCI_REG8(0x465F), 0x11 },
+ { CCI_REG8(0x4663), 0x11 },
+ { CCI_REG8(0x4667), 0x0F },
+ { CCI_REG8(0x466F), 0x0F },
+ { CCI_REG8(0x470E), 0x09 },
+ { CCI_REG8(0x4909), 0xAB },
+ { CCI_REG8(0x490B), 0x95 },
+ { CCI_REG8(0x4915), 0x5D },
+ { CCI_REG8(0x4A5F), 0xFF },
+ { CCI_REG8(0x4A61), 0xFF },
+ { CCI_REG8(0x4A73), 0x62 },
+ { CCI_REG8(0x4A85), 0x00 },
+ { CCI_REG8(0x4A87), 0xFF },
/* embedded data */
- {0x5041, 0x04},
- {0x583C, 0x04},
- {0x620E, 0x04},
- {0x6EB2, 0x01},
- {0x6EB3, 0x00},
- {0x9300, 0x02},
+ { IMX214_REG_EBD_SIZE_V, IMX214_EBD_4_LINE },
+ { CCI_REG8(0x583C), 0x04 },
+ { CCI_REG8(0x620E), 0x04 },
+ { CCI_REG8(0x6EB2), 0x01 },
+ { CCI_REG8(0x6EB3), 0x00 },
+ { IMX214_REG_ATR_FAST_MOVE, 2 },
/* imagequality */
/* HDR setting */
- {0x3001, 0x07},
- {0x6D12, 0x3F},
- {0x6D13, 0xFF},
- {0x9344, 0x03},
- {0x9706, 0x10},
- {0x9707, 0x03},
- {0x9708, 0x03},
- {0x9E04, 0x01},
- {0x9E05, 0x00},
- {0x9E0C, 0x01},
- {0x9E0D, 0x02},
- {0x9E24, 0x00},
- {0x9E25, 0x8C},
- {0x9E26, 0x00},
- {0x9E27, 0x94},
- {0x9E28, 0x00},
- {0x9E29, 0x96},
+ { IMX214_REG_RMSC_NR_MODE, 0x07 },
+ { IMX214_REG_RG_STATS_LMT, IMX214_RG_STATS_LMT_14_BIT },
+ { CCI_REG8(0x9344), 0x03 },
+ { CCI_REG8(0x9706), 0x10 },
+ { CCI_REG8(0x9707), 0x03 },
+ { CCI_REG8(0x9708), 0x03 },
+ { CCI_REG8(0x9E04), 0x01 },
+ { CCI_REG8(0x9E05), 0x00 },
+ { CCI_REG8(0x9E0C), 0x01 },
+ { CCI_REG8(0x9E0D), 0x02 },
+ { CCI_REG8(0x9E24), 0x00 },
+ { CCI_REG8(0x9E25), 0x8C },
+ { CCI_REG8(0x9E26), 0x00 },
+ { CCI_REG8(0x9E27), 0x94 },
+ { CCI_REG8(0x9E28), 0x00 },
+ { CCI_REG8(0x9E29), 0x96 },
/* CNR parameter setting */
- {0x69DB, 0x01},
+ { CCI_REG8(0x69DB), 0x01 },
/* Moire reduction */
- {0x6957, 0x01},
+ { CCI_REG8(0x6957), 0x01 },
/* image enhancement */
- {0x6987, 0x17},
- {0x698A, 0x03},
- {0x698B, 0x03},
+ { CCI_REG8(0x6987), 0x17 },
+ { CCI_REG8(0x698A), 0x03 },
+ { CCI_REG8(0x698B), 0x03 },
/* white balanace */
- {0x0B8E, 0x01},
- {0x0B8F, 0x00},
- {0x0B90, 0x01},
- {0x0B91, 0x00},
- {0x0B92, 0x01},
- {0x0B93, 0x00},
- {0x0B94, 0x01},
- {0x0B95, 0x00},
+ { IMX214_REG_ABS_GAIN_GREENR, 0x0100 },
+ { IMX214_REG_ABS_GAIN_RED, 0x0100 },
+ { IMX214_REG_ABS_GAIN_BLUE, 0x0100 },
+ { IMX214_REG_ABS_GAIN_GREENB, 0x0100 },
/* ATR setting */
- {0x6E50, 0x00},
- {0x6E51, 0x32},
- {0x9340, 0x00},
- {0x9341, 0x3C},
- {0x9342, 0x03},
- {0x9343, 0xFF},
- {IMX214_TABLE_END, 0x00}
+ { CCI_REG8(0x6E50), 0x00 },
+ { CCI_REG8(0x6E51), 0x32 },
+ { CCI_REG8(0x9340), 0x00 },
+ { CCI_REG8(0x9341), 0x3C },
+ { CCI_REG8(0x9342), 0x03 },
+ { CCI_REG8(0x9343), 0xFF },
};
/*
@@ -427,16 +516,25 @@ static const struct reg_8 mode_table_common[] = {
static const struct imx214_mode {
u32 width;
u32 height;
- const struct reg_8 *reg_table;
+
+ /* V-timing */
+ unsigned int vts_def;
+
+ unsigned int num_of_regs;
+ const struct cci_reg_sequence *reg_table;
} imx214_modes[] = {
{
.width = 4096,
.height = 2304,
+ .vts_def = 3194,
+ .num_of_regs = ARRAY_SIZE(mode_4096x2304),
.reg_table = mode_4096x2304,
},
{
.width = 1920,
.height = 1080,
+ .vts_def = 3194,
+ .num_of_regs = ARRAY_SIZE(mode_1920x1080),
.reg_table = mode_1920x1080,
},
};
@@ -490,14 +588,42 @@ static int __maybe_unused imx214_power_off(struct device *dev)
return 0;
}
+/* Get bayer order based on flip setting. */
+static u32 imx214_get_format_code(struct imx214 *imx214)
+{
+ unsigned int i;
+
+ i = (imx214->vflip->val ? 2 : 0) | (imx214->hflip->val ? 1 : 0);
+
+ return imx214_mbus_formats[i];
+}
+
+static void imx214_update_pad_format(struct imx214 *imx214,
+ const struct imx214_mode *mode,
+ struct v4l2_mbus_framefmt *fmt, u32 code)
+{
+ fmt->code = imx214_get_format_code(imx214);
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
+ fmt->colorspace,
+ fmt->ycbcr_enc);
+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
+}
+
static int imx214_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- if (code->index > 0)
+ struct imx214 *imx214 = to_imx214(sd);
+
+ if (code->index >= (ARRAY_SIZE(imx214_mbus_formats) / 4))
return -EINVAL;
- code->code = IMX214_MBUS_CODE;
+ code->code = imx214_get_format_code(imx214);
return 0;
}
@@ -506,7 +632,11 @@ static int imx214_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->code != IMX214_MBUS_CODE)
+ struct imx214 *imx214 = to_imx214(subdev);
+ u32 code;
+
+ code = imx214_get_format_code(imx214);
+ if (fse->code != code)
return -EINVAL;
if (fse->index >= ARRAY_SIZE(imx214_modes))
@@ -549,52 +679,6 @@ static const struct v4l2_subdev_core_ops imx214_core_ops = {
#endif
};
-static struct v4l2_mbus_framefmt *
-__imx214_get_pad_format(struct imx214 *imx214,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_state_get_format(sd_state, pad);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &imx214->fmt;
- default:
- return NULL;
- }
-}
-
-static int imx214_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct imx214 *imx214 = to_imx214(sd);
-
- mutex_lock(&imx214->mutex);
- format->format = *__imx214_get_pad_format(imx214, sd_state,
- format->pad,
- format->which);
- mutex_unlock(&imx214->mutex);
-
- return 0;
-}
-
-static struct v4l2_rect *
-__imx214_get_pad_crop(struct imx214 *imx214,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad, enum v4l2_subdev_format_whence which)
-{
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_state_get_crop(sd_state, pad);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &imx214->crop;
- default:
- return NULL;
- }
-}
-
static int imx214_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
@@ -604,34 +688,48 @@ static int imx214_set_format(struct v4l2_subdev *sd,
struct v4l2_rect *__crop;
const struct imx214_mode *mode;
- mutex_lock(&imx214->mutex);
-
- __crop = __imx214_get_pad_crop(imx214, sd_state, format->pad,
- format->which);
-
mode = v4l2_find_nearest_size(imx214_modes,
ARRAY_SIZE(imx214_modes), width, height,
format->format.width,
format->format.height);
- __crop->width = mode->width;
- __crop->height = mode->height;
+ imx214_update_pad_format(imx214, mode, &format->format,
+ format->format.code);
+ __format = v4l2_subdev_state_get_format(sd_state, 0);
- __format = __imx214_get_pad_format(imx214, sd_state, format->pad,
- format->which);
- __format->width = __crop->width;
- __format->height = __crop->height;
- __format->code = IMX214_MBUS_CODE;
- __format->field = V4L2_FIELD_NONE;
- __format->colorspace = V4L2_COLORSPACE_SRGB;
- __format->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(__format->colorspace);
- __format->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
- __format->colorspace, __format->ycbcr_enc);
- __format->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(__format->colorspace);
+ *__format = format->format;
- format->format = *__format;
+ __crop = v4l2_subdev_state_get_crop(sd_state, 0);
+ __crop->width = mode->width;
+ __crop->height = mode->height;
- mutex_unlock(&imx214->mutex);
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ int exposure_max;
+ int exposure_def;
+ int hblank;
+
+ /* Update blank limits */
+ __v4l2_ctrl_modify_range(imx214->vblank, IMX214_VBLANK_MIN,
+ IMX214_VTS_MAX - mode->height, 2,
+ mode->vts_def - mode->height);
+
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = mode->vts_def - IMX214_EXPOSURE_OFFSET;
+ exposure_def = min(exposure_max, IMX214_EXPOSURE_DEFAULT);
+ __v4l2_ctrl_modify_range(imx214->exposure,
+ imx214->exposure->minimum,
+ exposure_max, imx214->exposure->step,
+ exposure_def);
+
+ /*
+ * Currently PPL is fixed to IMX214_PPL_DEFAULT, so hblank
+ * depends on mode->width only, and is not changeable in any
+ * way other than changing the mode.
+ */
+ hblank = IMX214_PPL_DEFAULT - mode->width;
+ __v4l2_ctrl_modify_range(imx214->hblank, hblank, hblank, 1,
+ hblank);
+ }
return 0;
}
@@ -640,14 +738,9 @@ static int imx214_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
- struct imx214 *imx214 = to_imx214(sd);
-
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- mutex_lock(&imx214->mutex);
- sel->r = *__imx214_get_pad_crop(imx214, sd_state, sel->pad,
- sel->which);
- mutex_unlock(&imx214->mutex);
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, 0);
return 0;
case V4L2_SEL_TGT_NATIVE_SIZE:
@@ -675,6 +768,7 @@ static int imx214_entity_init_state(struct v4l2_subdev *subdev,
struct v4l2_subdev_format fmt = { };
fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
fmt.format.width = imx214_modes[0].width;
fmt.format.height = imx214_modes[0].height;
@@ -683,12 +777,41 @@ static int imx214_entity_init_state(struct v4l2_subdev *subdev,
return 0;
}
+static int imx214_update_digital_gain(struct imx214 *imx214, u32 val)
+{
+ int ret = 0;
+
+ cci_write(imx214->regmap, IMX214_REG_DIG_GAIN_GREENR, val, &ret);
+ cci_write(imx214->regmap, IMX214_REG_DIG_GAIN_RED, val, &ret);
+ cci_write(imx214->regmap, IMX214_REG_DIG_GAIN_BLUE, val, &ret);
+ cci_write(imx214->regmap, IMX214_REG_DIG_GAIN_GREENB, val, &ret);
+
+ return ret;
+}
+
static int imx214_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx214 *imx214 = container_of(ctrl->handler,
struct imx214, ctrls);
- u8 vals[2];
- int ret;
+ const struct v4l2_mbus_framefmt *format = NULL;
+ struct v4l2_subdev_state *state;
+ int ret = 0;
+
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ int exposure_max, exposure_def;
+
+ state = v4l2_subdev_get_locked_active_state(&imx214->sd);
+ format = v4l2_subdev_state_get_format(state, 0);
+
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max =
+ format->height + ctrl->val - IMX214_EXPOSURE_OFFSET;
+ exposure_def = min(exposure_max, IMX214_EXPOSURE_DEFAULT);
+ __v4l2_ctrl_modify_range(imx214->exposure,
+ imx214->exposure->minimum,
+ exposure_max, imx214->exposure->step,
+ exposure_def);
+ }
/*
* Applying V4L2 control value only happens
@@ -698,15 +821,47 @@ static int imx214_set_ctrl(struct v4l2_ctrl *ctrl)
return 0;
switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ cci_write(imx214->regmap, IMX214_REG_ANALOG_GAIN,
+ ctrl->val, &ret);
+ cci_write(imx214->regmap, IMX214_REG_SHORT_ANALOG_GAIN,
+ ctrl->val, &ret);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = imx214_update_digital_gain(imx214, ctrl->val);
+ break;
case V4L2_CID_EXPOSURE:
- vals[1] = ctrl->val;
- vals[0] = ctrl->val >> 8;
- ret = regmap_bulk_write(imx214->regmap, IMX214_REG_EXPOSURE, vals, 2);
- if (ret < 0)
- dev_err(imx214->dev, "Error %d\n", ret);
- ret = 0;
+ cci_write(imx214->regmap, IMX214_REG_EXPOSURE, ctrl->val, &ret);
+ break;
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ cci_write(imx214->regmap, IMX214_REG_ORIENTATION,
+ imx214->hflip->val | imx214->vflip->val << 1, &ret);
+ break;
+ case V4L2_CID_VBLANK:
+ cci_write(imx214->regmap, IMX214_REG_FRM_LENGTH_LINES,
+ format->height + ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ cci_write(imx214->regmap, IMX214_REG_TEST_PATTERN,
+ imx214_test_pattern_val[ctrl->val], &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_RED:
+ cci_write(imx214->regmap, IMX214_REG_TESTP_RED,
+ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_GREENR:
+ cci_write(imx214->regmap, IMX214_REG_TESTP_GREENR,
+ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_BLUE:
+ cci_write(imx214->regmap, IMX214_REG_TESTP_BLUE,
+ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_GREENB:
+ cci_write(imx214->regmap, IMX214_REG_TESTP_GREENB,
+ ctrl->val, &ret);
break;
-
default:
ret = -EINVAL;
}
@@ -729,16 +884,19 @@ static int imx214_ctrls_init(struct imx214 *imx214)
.width = 1120,
.height = 1120,
};
+ const struct imx214_mode *mode = &imx214_modes[0];
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
- int ret;
+ int exposure_max, exposure_def;
+ int hblank;
+ int i, ret;
ret = v4l2_fwnode_device_parse(imx214->dev, &props);
if (ret < 0)
return ret;
ctrl_hdlr = &imx214->ctrls;
- ret = v4l2_ctrl_handler_init(&imx214->ctrls, 6);
+ ret = v4l2_ctrl_handler_init(&imx214->ctrls, 13);
if (ret)
return ret;
@@ -764,17 +922,75 @@ static int imx214_ctrls_init(struct imx214 *imx214)
*
* Yours sincerely, Ricardo.
*/
+
+ /* Initial vblank/hblank/exposure parameters based on current mode */
+ imx214->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
+ V4L2_CID_VBLANK, IMX214_VBLANK_MIN,
+ IMX214_VTS_MAX - mode->height, 2,
+ mode->vts_def - mode->height);
+
+ hblank = IMX214_PPL_DEFAULT - mode->width;
+ imx214->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
+ V4L2_CID_HBLANK, hblank, hblank,
+ 1, hblank);
+ if (imx214->hblank)
+ imx214->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ exposure_max = mode->vts_def - IMX214_EXPOSURE_OFFSET;
+ exposure_def = min(exposure_max, IMX214_EXPOSURE_DEFAULT);
imx214->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
V4L2_CID_EXPOSURE,
IMX214_EXPOSURE_MIN,
- IMX214_EXPOSURE_MAX,
+ exposure_max,
IMX214_EXPOSURE_STEP,
- IMX214_EXPOSURE_DEFAULT);
+ exposure_def);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ IMX214_ANA_GAIN_MIN, IMX214_ANA_GAIN_MAX,
+ IMX214_ANA_GAIN_STEP, IMX214_ANA_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ IMX214_DGTL_GAIN_MIN, IMX214_DGTL_GAIN_MAX,
+ IMX214_DGTL_GAIN_STEP, IMX214_DGTL_GAIN_DEFAULT);
+
+ imx214->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (imx214->hflip)
+ imx214->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ imx214->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (imx214->vflip)
+ imx214->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ v4l2_ctrl_cluster(2, &imx214->hflip);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx214_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(imx214_test_pattern_menu) - 1,
+ 0, 0, imx214_test_pattern_menu);
+ for (i = 0; i < 4; i++) {
+ /*
+ * The assumption is that
+ * V4L2_CID_TEST_PATTERN_GREENR == V4L2_CID_TEST_PATTERN_RED + 1
+ * V4L2_CID_TEST_PATTERN_BLUE == V4L2_CID_TEST_PATTERN_RED + 2
+ * V4L2_CID_TEST_PATTERN_GREENB == V4L2_CID_TEST_PATTERN_RED + 3
+ */
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
+ V4L2_CID_TEST_PATTERN_RED + i,
+ IMX214_TESTP_COLOUR_MIN,
+ IMX214_TESTP_COLOUR_MAX,
+ IMX214_TESTP_COLOUR_STEP,
+ IMX214_TESTP_COLOUR_MAX);
+ /* The "Solid color" pattern is white by default */
+ }
imx214->unit_size = v4l2_ctrl_new_std_compound(ctrl_hdlr,
NULL,
V4L2_CID_UNIT_CELL_SIZE,
- v4l2_ctrl_ptr_create((void *)&unit_size));
+ v4l2_ctrl_ptr_create((void *)&unit_size),
+ v4l2_ctrl_ptr_create(NULL),
+ v4l2_ctrl_ptr_create(NULL));
v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &imx214_ctrl_ops, &props);
@@ -790,76 +1006,52 @@ static int imx214_ctrls_init(struct imx214 *imx214)
return 0;
};
-#define MAX_CMD 4
-static int imx214_write_table(struct imx214 *imx214,
- const struct reg_8 table[])
-{
- u8 vals[MAX_CMD];
- int i;
- int ret;
-
- for (; table->addr != IMX214_TABLE_END ; table++) {
- if (table->addr == IMX214_TABLE_WAIT_MS) {
- usleep_range(table->val * 1000,
- table->val * 1000 + 500);
- continue;
- }
-
- for (i = 0; i < MAX_CMD; i++) {
- if (table[i].addr != (table[0].addr + i))
- break;
- vals[i] = table[i].val;
- }
-
- ret = regmap_bulk_write(imx214->regmap, table->addr, vals, i);
-
- if (ret) {
- dev_err(imx214->dev, "write_table error: %d\n", ret);
- return ret;
- }
-
- table += i - 1;
- }
-
- return 0;
-}
-
static int imx214_start_streaming(struct imx214 *imx214)
{
+ const struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_subdev_state *state;
const struct imx214_mode *mode;
int ret;
- mutex_lock(&imx214->mutex);
- ret = imx214_write_table(imx214, mode_table_common);
+ ret = cci_multi_reg_write(imx214->regmap, mode_table_common,
+ ARRAY_SIZE(mode_table_common), NULL);
if (ret < 0) {
dev_err(imx214->dev, "could not sent common table %d\n", ret);
- goto error;
+ return ret;
}
- mode = v4l2_find_nearest_size(imx214_modes,
- ARRAY_SIZE(imx214_modes), width, height,
- imx214->fmt.width, imx214->fmt.height);
- ret = imx214_write_table(imx214, mode->reg_table);
+ ret = cci_write(imx214->regmap, IMX214_REG_CSI_LANE_MODE,
+ IMX214_CSI_4_LANE_MODE, NULL);
+ if (ret) {
+ dev_err(imx214->dev, "failed to configure lanes\n");
+ return ret;
+ }
+
+ state = v4l2_subdev_get_locked_active_state(&imx214->sd);
+ fmt = v4l2_subdev_state_get_format(state, 0);
+ mode = v4l2_find_nearest_size(imx214_modes, ARRAY_SIZE(imx214_modes),
+ width, height, fmt->width, fmt->height);
+ ret = cci_multi_reg_write(imx214->regmap, mode->reg_table,
+ mode->num_of_regs, NULL);
if (ret < 0) {
dev_err(imx214->dev, "could not sent mode table %d\n", ret);
- goto error;
+ return ret;
}
+
+ usleep_range(10000, 10500);
+
+ cci_write(imx214->regmap, IMX214_REG_TEMP_SENSOR_CONTROL, 0x01, NULL);
+
ret = __v4l2_ctrl_handler_setup(&imx214->ctrls);
if (ret < 0) {
dev_err(imx214->dev, "could not sync v4l2 controls\n");
- goto error;
+ return ret;
}
- ret = regmap_write(imx214->regmap, IMX214_REG_MODE_SELECT, IMX214_MODE_STREAMING);
- if (ret < 0) {
+ ret = cci_write(imx214->regmap, IMX214_REG_MODE_SELECT,
+ IMX214_MODE_STREAMING, NULL);
+ if (ret < 0)
dev_err(imx214->dev, "could not sent start table %d\n", ret);
- goto error;
- }
-
- mutex_unlock(&imx214->mutex);
- return 0;
-error:
- mutex_unlock(&imx214->mutex);
return ret;
}
@@ -867,7 +1059,8 @@ static int imx214_stop_streaming(struct imx214 *imx214)
{
int ret;
- ret = regmap_write(imx214->regmap, IMX214_REG_MODE_SELECT, IMX214_MODE_STANDBY);
+ ret = cci_write(imx214->regmap, IMX214_REG_MODE_SELECT,
+ IMX214_MODE_STANDBY, NULL);
if (ret < 0)
dev_err(imx214->dev, "could not sent stop table %d\n", ret);
@@ -877,6 +1070,7 @@ static int imx214_stop_streaming(struct imx214 *imx214)
static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct imx214 *imx214 = to_imx214(subdev);
+ struct v4l2_subdev_state *state;
int ret;
if (enable) {
@@ -884,7 +1078,9 @@ static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
if (ret < 0)
return ret;
+ state = v4l2_subdev_lock_and_get_active_state(subdev);
ret = imx214_start_streaming(imx214);
+ v4l2_subdev_unlock_state(state);
if (ret < 0)
goto err_rpm_put;
} else {
@@ -918,12 +1114,22 @@ static int imx214_get_frame_interval(struct v4l2_subdev *subdev,
return 0;
}
+/*
+ * Raw sensors should be using the VBLANK and HBLANK controls to determine
+ * the frame rate. However this driver was initially added using the
+ * [S|G|ENUM]_FRAME_INTERVAL ioctls with a fixed rate of 30fps.
+ * Retain the frame_interval ops for backwards compatibility, but they do
+ * nothing.
+ */
static int imx214_enum_frame_interval(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
+ struct imx214 *imx214 = to_imx214(subdev);
const struct imx214_mode *mode;
+ dev_warn_once(imx214->dev, "frame_interval functions return an unreliable value for compatibility reasons. Use the VBLANK and HBLANK controls to determine the correct frame rate.\n");
+
if (fie->index != 0)
return -EINVAL;
@@ -931,7 +1137,7 @@ static int imx214_enum_frame_interval(struct v4l2_subdev *subdev,
ARRAY_SIZE(imx214_modes), width, height,
fie->width, fie->height);
- fie->code = IMX214_MBUS_CODE;
+ fie->code = imx214_get_format_code(imx214);
fie->width = mode->width;
fie->height = mode->height;
fie->interval.numerator = 1;
@@ -948,7 +1154,7 @@ static const struct v4l2_subdev_pad_ops imx214_subdev_pad_ops = {
.enum_mbus_code = imx214_enum_mbus_code,
.enum_frame_size = imx214_enum_frame_size,
.enum_frame_interval = imx214_enum_frame_interval,
- .get_fmt = imx214_get_format,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = imx214_set_format,
.get_selection = imx214_get_selection,
.get_frame_interval = imx214_get_frame_interval,
@@ -965,12 +1171,6 @@ static const struct v4l2_subdev_internal_ops imx214_internal_ops = {
.init_state = imx214_entity_init_state,
};
-static const struct regmap_config sensor_regmap_config = {
- .reg_bits = 16,
- .val_bits = 8,
- .cache_type = REGCACHE_MAPLE,
-};
-
static int imx214_get_regulators(struct device *dev, struct imx214 *imx214)
{
unsigned int i;
@@ -982,6 +1182,27 @@ static int imx214_get_regulators(struct device *dev, struct imx214 *imx214)
imx214->supplies);
}
+/* Verify chip ID */
+static int imx214_identify_module(struct imx214 *imx214)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx214->sd);
+ int ret;
+ u64 val;
+
+ ret = cci_read(imx214->regmap, IMX214_REG_CHIP_ID, &val, NULL);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to read chip id %x\n",
+ IMX214_CHIP_ID);
+
+ if (val != IMX214_CHIP_ID)
+ return dev_err_probe(&client->dev, -EIO,
+ "chip id mismatch: %x!=%llx\n",
+ IMX214_CHIP_ID, val);
+
+ return 0;
+}
+
static int imx214_parse_fwnode(struct device *dev)
{
struct fwnode_handle *endpoint;
@@ -992,28 +1213,42 @@ static int imx214_parse_fwnode(struct device *dev)
int ret;
endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
- if (!endpoint) {
- dev_err(dev, "endpoint node not found\n");
- return -EINVAL;
- }
+ if (!endpoint)
+ return dev_err_probe(dev, -EINVAL, "endpoint node not found\n");
ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &bus_cfg);
if (ret) {
- dev_err(dev, "parsing endpoint node failed\n");
+ dev_err_probe(dev, ret, "parsing endpoint node failed\n");
goto done;
}
- for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
+ /* Check the number of MIPI CSI2 data lanes */
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 4) {
+ ret = dev_err_probe(dev, -EINVAL,
+ "only 4 data lanes are currently supported\n");
+ goto done;
+ }
+
+ if (bus_cfg.nr_of_link_frequencies != 1)
+ dev_warn(dev, "Only one link-frequency supported, please review your DT. Continuing anyway\n");
+
+ for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) {
if (bus_cfg.link_frequencies[i] == IMX214_DEFAULT_LINK_FREQ)
break;
-
- if (i == bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "link-frequencies %d not supported, Please review your DT\n",
- IMX214_DEFAULT_LINK_FREQ);
- ret = -EINVAL;
- goto done;
+ if (bus_cfg.link_frequencies[i] ==
+ IMX214_DEFAULT_LINK_FREQ_LEGACY) {
+ dev_warn(dev,
+ "link-frequencies %d not supported, please review your DT. Continuing anyway\n",
+ IMX214_DEFAULT_LINK_FREQ);
+ break;
+ }
}
+ if (i == bus_cfg.nr_of_link_frequencies)
+ ret = dev_err_probe(dev, -EINVAL,
+ "link-frequencies %d not supported, please review your DT\n",
+ IMX214_DEFAULT_LINK_FREQ);
+
done:
v4l2_fwnode_endpoint_free(&bus_cfg);
fwnode_handle_put(endpoint);
@@ -1037,34 +1272,28 @@ static int imx214_probe(struct i2c_client *client)
imx214->dev = dev;
imx214->xclk = devm_clk_get(dev, NULL);
- if (IS_ERR(imx214->xclk)) {
- dev_err(dev, "could not get xclk");
- return PTR_ERR(imx214->xclk);
- }
+ if (IS_ERR(imx214->xclk))
+ return dev_err_probe(dev, PTR_ERR(imx214->xclk),
+ "failed to get xclk\n");
ret = clk_set_rate(imx214->xclk, IMX214_DEFAULT_CLK_FREQ);
- if (ret) {
- dev_err(dev, "could not set xclk frequency\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to set xclk frequency\n");
ret = imx214_get_regulators(dev, imx214);
- if (ret < 0) {
- dev_err(dev, "cannot get regulators\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
imx214->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(imx214->enable_gpio)) {
- dev_err(dev, "cannot get enable gpio\n");
- return PTR_ERR(imx214->enable_gpio);
- }
+ if (IS_ERR(imx214->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(imx214->enable_gpio),
+ "failed to get enable gpio\n");
- imx214->regmap = devm_regmap_init_i2c(client, &sensor_regmap_config);
- if (IS_ERR(imx214->regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(imx214->regmap);
- }
+ imx214->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(imx214->regmap))
+ return dev_err_probe(dev, PTR_ERR(imx214->regmap),
+ "failed to initialize CCI\n");
v4l2_i2c_subdev_init(&imx214->sd, client, &imx214_subdev_ops);
imx214->sd.internal_ops = &imx214_internal_ops;
@@ -1075,17 +1304,14 @@ static int imx214_probe(struct i2c_client *client)
*/
imx214_power_on(imx214->dev);
- pm_runtime_set_active(imx214->dev);
- pm_runtime_enable(imx214->dev);
- pm_runtime_idle(imx214->dev);
+ ret = imx214_identify_module(imx214);
+ if (ret)
+ goto error_power_off;
ret = imx214_ctrls_init(imx214);
if (ret < 0)
goto error_power_off;
- mutex_init(&imx214->mutex);
- imx214->ctrls.lock = &imx214->mutex;
-
imx214->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
imx214->pad.flags = MEDIA_PAD_FL_SOURCE;
imx214->sd.dev = &client->dev;
@@ -1093,28 +1319,44 @@ static int imx214_probe(struct i2c_client *client)
ret = media_entity_pads_init(&imx214->sd.entity, 1, &imx214->pad);
if (ret < 0) {
- dev_err(dev, "could not register media entity\n");
+ dev_err_probe(dev, ret, "failed to init entity pads\n");
goto free_ctrl;
}
- imx214_entity_init_state(&imx214->sd, NULL);
+ imx214->sd.state_lock = imx214->ctrls.lock;
+ ret = v4l2_subdev_init_finalize(&imx214->sd);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "subdev init error\n");
+ goto free_entity;
+ }
+
+ pm_runtime_set_active(imx214->dev);
+ pm_runtime_enable(imx214->dev);
ret = v4l2_async_register_subdev_sensor(&imx214->sd);
if (ret < 0) {
- dev_err(dev, "could not register v4l2 device\n");
- goto free_entity;
+ dev_err_probe(dev, ret,
+ "failed to register sensor sub-device\n");
+ goto error_subdev_cleanup;
}
+ pm_runtime_idle(imx214->dev);
+
return 0;
+error_subdev_cleanup:
+ pm_runtime_disable(imx214->dev);
+ pm_runtime_set_suspended(&client->dev);
+ v4l2_subdev_cleanup(&imx214->sd);
+
free_entity:
media_entity_cleanup(&imx214->sd.entity);
+
free_ctrl:
- mutex_destroy(&imx214->mutex);
v4l2_ctrl_handler_free(&imx214->ctrls);
+
error_power_off:
- pm_runtime_disable(imx214->dev);
- regulator_bulk_disable(IMX214_NUM_SUPPLIES, imx214->supplies);
+ imx214_power_off(imx214->dev);
return ret;
}
@@ -1125,13 +1367,14 @@ static void imx214_remove(struct i2c_client *client)
struct imx214 *imx214 = to_imx214(sd);
v4l2_async_unregister_subdev(&imx214->sd);
+ v4l2_subdev_cleanup(sd);
media_entity_cleanup(&imx214->sd.entity);
v4l2_ctrl_handler_free(&imx214->ctrls);
-
pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
-
- mutex_destroy(&imx214->mutex);
+ if (!pm_runtime_status_suspended(&client->dev)) {
+ imx214_power_off(imx214->dev);
+ pm_runtime_set_suspended(&client->dev);
+ }
}
static const struct of_device_id imx214_of_match[] = {
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index 2d54cea113e1..04262bbf6306 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -70,15 +70,14 @@
#define IMX219_EXPOSURE_MAX 65535
/* V_TIMING internal */
-#define IMX219_REG_VTS CCI_REG16(0x0160)
-#define IMX219_VTS_MAX 0xffff
-
-#define IMX219_VBLANK_MIN 4
-
-/* HBLANK control - read only */
-#define IMX219_PPL_DEFAULT 3448
-
+#define IMX219_REG_FRM_LENGTH_A CCI_REG16(0x0160)
+#define IMX219_FLL_MAX 0xffff
+#define IMX219_VBLANK_MIN 32
#define IMX219_REG_LINE_LENGTH_A CCI_REG16(0x0162)
+#define IMX219_LLP_MIN 0x0d78
+#define IMX219_BINNED_LLP_MIN 0x0de8
+#define IMX219_LLP_MAX 0x7ff0
+
#define IMX219_REG_X_ADD_STA_A CCI_REG16(0x0164)
#define IMX219_REG_X_ADD_END_A CCI_REG16(0x0166)
#define IMX219_REG_Y_ADD_STA_A CCI_REG16(0x0168)
@@ -133,10 +132,11 @@
/* Pixel rate is fixed for all the modes */
#define IMX219_PIXEL_RATE 182400000
-#define IMX219_PIXEL_RATE_4LANE 280800000
+#define IMX219_PIXEL_RATE_4LANE 281600000
#define IMX219_DEFAULT_LINK_FREQ 456000000
-#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000
+#define IMX219_DEFAULT_LINK_FREQ_4LANE_UNSUPPORTED 363000000
+#define IMX219_DEFAULT_LINK_FREQ_4LANE 364000000
/* IMX219 native and active pixel array size. */
#define IMX219_NATIVE_WIDTH 3296U
@@ -154,7 +154,7 @@ struct imx219_mode {
unsigned int height;
/* V-timing */
- unsigned int vts_def;
+ unsigned int fll_def;
};
static const struct cci_reg_sequence imx219_common_regs[] = {
@@ -168,15 +168,6 @@ static const struct cci_reg_sequence imx219_common_regs[] = {
{ CCI_REG8(0x30eb), 0x05 },
{ CCI_REG8(0x30eb), 0x09 },
- /* PLL Clock Table */
- { IMX219_REG_VTPXCK_DIV, 5 },
- { IMX219_REG_VTSYCK_DIV, 1 },
- { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */
- { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */
- { IMX219_REG_PLL_VT_MPY, 57 },
- { IMX219_REG_OPSYCK_DIV, 1 },
- { IMX219_REG_PLL_OP_MPY, 114 },
-
/* Undocumented registers */
{ CCI_REG8(0x455e), 0x00 },
{ CCI_REG8(0x471e), 0x4b },
@@ -192,7 +183,6 @@ static const struct cci_reg_sequence imx219_common_regs[] = {
{ CCI_REG8(0x479b), 0x0e },
/* Frame Bank Register Group "A" */
- { IMX219_REG_LINE_LENGTH_A, 3448 },
{ IMX219_REG_X_ODD_INC_A, 1 },
{ IMX219_REG_Y_ODD_INC_A, 1 },
@@ -201,12 +191,45 @@ static const struct cci_reg_sequence imx219_common_regs[] = {
{ IMX219_REG_EXCK_FREQ, IMX219_EXCK_FREQ(IMX219_XCLK_FREQ / 1000000) },
};
+static const struct cci_reg_sequence imx219_2lane_regs[] = {
+ /* PLL Clock Table */
+ { IMX219_REG_VTPXCK_DIV, 5 },
+ { IMX219_REG_VTSYCK_DIV, 1 },
+ { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */
+ { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */
+ { IMX219_REG_PLL_VT_MPY, 57 },
+ { IMX219_REG_OPSYCK_DIV, 1 },
+ { IMX219_REG_PLL_OP_MPY, 114 },
+
+ /* 2-Lane CSI Mode */
+ { IMX219_REG_CSI_LANE_MODE, IMX219_CSI_2_LANE_MODE },
+};
+
+static const struct cci_reg_sequence imx219_4lane_regs[] = {
+ /* PLL Clock Table */
+ { IMX219_REG_VTPXCK_DIV, 5 },
+ { IMX219_REG_VTSYCK_DIV, 1 },
+ { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */
+ { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */
+ { IMX219_REG_PLL_VT_MPY, 88 },
+ { IMX219_REG_OPSYCK_DIV, 1 },
+ { IMX219_REG_PLL_OP_MPY, 91 },
+
+ /* 4-Lane CSI Mode */
+ { IMX219_REG_CSI_LANE_MODE, IMX219_CSI_4_LANE_MODE },
+};
+
static const s64 imx219_link_freq_menu[] = {
IMX219_DEFAULT_LINK_FREQ,
};
static const s64 imx219_link_freq_4lane_menu[] = {
IMX219_DEFAULT_LINK_FREQ_4LANE,
+ /*
+ * This will never be advertised to userspace, but will be used for
+ * v4l2_link_freq_to_bitmap
+ */
+ IMX219_DEFAULT_LINK_FREQ_4LANE_UNSUPPORTED,
};
static const char * const imx219_test_pattern_menu[] = {
@@ -289,25 +312,25 @@ static const struct imx219_mode supported_modes[] = {
/* 8MPix 15fps mode */
.width = 3280,
.height = 2464,
- .vts_def = 3526,
+ .fll_def = 3526,
},
{
/* 1080P 30fps cropped */
.width = 1920,
.height = 1080,
- .vts_def = 1763,
+ .fll_def = 1763,
},
{
- /* 2x2 binned 30fps mode */
+ /* 2x2 binned 60fps mode */
.width = 1640,
.height = 1232,
- .vts_def = 1763,
+ .fll_def = 1707,
},
{
- /* 640x480 30fps mode */
+ /* 640x480 60fps mode */
.width = 640,
.height = 480,
- .vts_def = 1763,
+ .fll_def = 1707,
},
};
@@ -359,6 +382,62 @@ static u32 imx219_get_format_code(struct imx219 *imx219, u32 code)
return imx219_mbus_formats[i];
}
+static u32 imx219_get_format_bpp(const struct v4l2_mbus_framefmt *format)
+{
+ switch (format->code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ return 8;
+
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ default:
+ return 10;
+ }
+}
+
+static void imx219_get_binning(struct v4l2_subdev_state *state, u8 *bin_h,
+ u8 *bin_v)
+{
+ const struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_state_get_format(state, 0);
+ const struct v4l2_rect *crop = v4l2_subdev_state_get_crop(state, 0);
+ u32 hbin = crop->width / format->width;
+ u32 vbin = crop->height / format->height;
+
+ *bin_h = IMX219_BINNING_NONE;
+ *bin_v = IMX219_BINNING_NONE;
+
+ /*
+ * Use analog binning only if both dimensions are binned, as it crops
+ * the other dimension.
+ */
+ if (hbin == 2 && vbin == 2) {
+ *bin_h = IMX219_BINNING_X2_ANALOG;
+ *bin_v = IMX219_BINNING_X2_ANALOG;
+
+ return;
+ }
+
+ if (hbin == 2)
+ *bin_h = IMX219_BINNING_X2;
+ if (vbin == 2)
+ *bin_v = IMX219_BINNING_X2;
+}
+
+static inline u32 imx219_get_rate_factor(struct v4l2_subdev_state *state)
+{
+ u8 bin_h, bin_v;
+
+ imx219_get_binning(state, &bin_h, &bin_v);
+
+ return (bin_h & bin_v) == IMX219_BINNING_X2_ANALOG ? 2 : 1;
+}
+
/* -----------------------------------------------------------------------------
* Controls
*/
@@ -370,10 +449,12 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
const struct v4l2_mbus_framefmt *format;
struct v4l2_subdev_state *state;
+ u32 rate_factor;
int ret = 0;
state = v4l2_subdev_get_locked_active_state(&imx219->sd);
format = v4l2_subdev_state_get_format(state, 0);
+ rate_factor = imx219_get_rate_factor(state);
if (ctrl->id == V4L2_CID_VBLANK) {
int exposure_max, exposure_def;
@@ -402,7 +483,7 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_EXPOSURE:
cci_write(imx219->regmap, IMX219_REG_EXPOSURE,
- ctrl->val, &ret);
+ ctrl->val / rate_factor, &ret);
break;
case V4L2_CID_DIGITAL_GAIN:
cci_write(imx219->regmap, IMX219_REG_DIGITAL_GAIN,
@@ -418,8 +499,12 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
imx219->hflip->val | imx219->vflip->val << 1, &ret);
break;
case V4L2_CID_VBLANK:
- cci_write(imx219->regmap, IMX219_REG_VTS,
- format->height + ctrl->val, &ret);
+ cci_write(imx219->regmap, IMX219_REG_FRM_LENGTH_A,
+ (format->height + ctrl->val) / rate_factor, &ret);
+ break;
+ case V4L2_CID_HBLANK:
+ cci_write(imx219->regmap, IMX219_REG_LINE_LENGTH_A,
+ format->width + ctrl->val, &ret);
break;
case V4L2_CID_TEST_PATTERN_RED:
cci_write(imx219->regmap, IMX219_REG_TESTP_RED,
@@ -466,7 +551,7 @@ static int imx219_init_controls(struct imx219 *imx219)
const struct imx219_mode *mode = &supported_modes[0];
struct v4l2_ctrl_handler *ctrl_hdlr;
struct v4l2_fwnode_device_properties props;
- int exposure_max, exposure_def, hblank;
+ int exposure_max, exposure_def;
int i, ret;
ctrl_hdlr = &imx219->ctrl_handler;
@@ -490,18 +575,17 @@ static int imx219_init_controls(struct imx219 *imx219)
if (imx219->link_freq)
imx219->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- /* Initial vblank/hblank/exposure parameters based on current mode */
+ /* Initial blanking and exposure. Limits are updated during set_fmt */
imx219->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
V4L2_CID_VBLANK, IMX219_VBLANK_MIN,
- IMX219_VTS_MAX - mode->height, 1,
- mode->vts_def - mode->height);
- hblank = IMX219_PPL_DEFAULT - mode->width;
+ IMX219_FLL_MAX - mode->height, 1,
+ mode->fll_def - mode->height);
imx219->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
- V4L2_CID_HBLANK, hblank, hblank,
- 1, hblank);
- if (imx219->hblank)
- imx219->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- exposure_max = mode->vts_def - 4;
+ V4L2_CID_HBLANK,
+ IMX219_LLP_MIN - mode->width,
+ IMX219_LLP_MAX - mode->width, 1,
+ IMX219_LLP_MIN - mode->width);
+ exposure_max = mode->fll_def - 4;
exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
exposure_max : IMX219_EXPOSURE_DEFAULT;
imx219->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
@@ -587,29 +671,13 @@ static int imx219_set_framefmt(struct imx219 *imx219,
{
const struct v4l2_mbus_framefmt *format;
const struct v4l2_rect *crop;
- unsigned int bpp;
- u64 bin_h, bin_v;
+ u8 bin_h, bin_v;
+ u32 bpp;
int ret = 0;
format = v4l2_subdev_state_get_format(state, 0);
crop = v4l2_subdev_state_get_crop(state, 0);
-
- switch (format->code) {
- case MEDIA_BUS_FMT_SRGGB8_1X8:
- case MEDIA_BUS_FMT_SGRBG8_1X8:
- case MEDIA_BUS_FMT_SGBRG8_1X8:
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- bpp = 8;
- break;
-
- case MEDIA_BUS_FMT_SRGGB10_1X10:
- case MEDIA_BUS_FMT_SGRBG10_1X10:
- case MEDIA_BUS_FMT_SGBRG10_1X10:
- case MEDIA_BUS_FMT_SBGGR10_1X10:
- default:
- bpp = 10;
- break;
- }
+ bpp = imx219_get_format_bpp(format);
cci_write(imx219->regmap, IMX219_REG_X_ADD_STA_A,
crop->left - IMX219_PIXEL_ARRAY_LEFT, &ret);
@@ -620,26 +688,7 @@ static int imx219_set_framefmt(struct imx219 *imx219,
cci_write(imx219->regmap, IMX219_REG_Y_ADD_END_A,
crop->top - IMX219_PIXEL_ARRAY_TOP + crop->height - 1, &ret);
- switch (crop->width / format->width) {
- case 1:
- default:
- bin_h = IMX219_BINNING_NONE;
- break;
- case 2:
- bin_h = bpp == 8 ? IMX219_BINNING_X2_ANALOG : IMX219_BINNING_X2;
- break;
- }
-
- switch (crop->height / format->height) {
- case 1:
- default:
- bin_v = IMX219_BINNING_NONE;
- break;
- case 2:
- bin_v = bpp == 8 ? IMX219_BINNING_X2_ANALOG : IMX219_BINNING_X2;
- break;
- }
-
+ imx219_get_binning(state, &bin_h, &bin_v);
cci_write(imx219->regmap, IMX219_REG_BINNING_MODE_H, bin_h, &ret);
cci_write(imx219->regmap, IMX219_REG_BINNING_MODE_V, bin_v, &ret);
@@ -662,9 +711,11 @@ static int imx219_set_framefmt(struct imx219 *imx219,
static int imx219_configure_lanes(struct imx219 *imx219)
{
- return cci_write(imx219->regmap, IMX219_REG_CSI_LANE_MODE,
- imx219->lanes == 2 ? IMX219_CSI_2_LANE_MODE :
- IMX219_CSI_4_LANE_MODE, NULL);
+ /* Write the appropriate PLL settings for the number of MIPI lanes */
+ return cci_multi_reg_write(imx219->regmap,
+ imx219->lanes == 2 ? imx219_2lane_regs : imx219_4lane_regs,
+ imx219->lanes == 2 ? ARRAY_SIZE(imx219_2lane_regs) :
+ ARRAY_SIZE(imx219_4lane_regs), NULL);
};
static int imx219_start_streaming(struct imx219 *imx219,
@@ -815,7 +866,11 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
const struct imx219_mode *mode;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- unsigned int bin_h, bin_v;
+ u8 bin_h, bin_v;
+ u32 prev_line_len;
+
+ format = v4l2_subdev_state_get_format(state, 0);
+ prev_line_len = format->width + imx219->hblank->val;
mode = v4l2_find_nearest_size(supported_modes,
ARRAY_SIZE(supported_modes),
@@ -823,8 +878,6 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx219_update_pad_format(imx219, mode, &fmt->format, fmt->format.code);
-
- format = v4l2_subdev_state_get_format(state, 0);
*format = fmt->format;
/*
@@ -843,30 +896,51 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
int exposure_max;
int exposure_def;
- int hblank;
+ int hblank, llp_min;
+ int pixel_rate;
/* Update limits and set FPS to default */
__v4l2_ctrl_modify_range(imx219->vblank, IMX219_VBLANK_MIN,
- IMX219_VTS_MAX - mode->height, 1,
- mode->vts_def - mode->height);
+ IMX219_FLL_MAX - mode->height, 1,
+ mode->fll_def - mode->height);
__v4l2_ctrl_s_ctrl(imx219->vblank,
- mode->vts_def - mode->height);
+ mode->fll_def - mode->height);
/* Update max exposure while meeting expected vblanking */
- exposure_max = mode->vts_def - 4;
+ exposure_max = mode->fll_def - 4;
exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
exposure_max : IMX219_EXPOSURE_DEFAULT;
__v4l2_ctrl_modify_range(imx219->exposure,
imx219->exposure->minimum,
exposure_max, imx219->exposure->step,
exposure_def);
+
/*
- * Currently PPL is fixed to IMX219_PPL_DEFAULT, so hblank
- * depends on mode->width only, and is not changeble in any
- * way other than changing the mode.
+ * With analog binning the default minimum line length of 3448
+ * can cause artefacts with RAW10 formats, because the ADC
+ * operates on two lines together. So we switch to a higher
+ * minimum of 3560.
*/
- hblank = IMX219_PPL_DEFAULT - mode->width;
- __v4l2_ctrl_modify_range(imx219->hblank, hblank, hblank, 1,
- hblank);
+ imx219_get_binning(state, &bin_h, &bin_v);
+ llp_min = (bin_h & bin_v) == IMX219_BINNING_X2_ANALOG ?
+ IMX219_BINNED_LLP_MIN : IMX219_LLP_MIN;
+ __v4l2_ctrl_modify_range(imx219->hblank, llp_min - mode->width,
+ IMX219_LLP_MAX - mode->width, 1,
+ llp_min - mode->width);
+ /*
+ * Retain PPL setting from previous mode so that the
+ * line time does not change on a mode change.
+ * Limits have to be recomputed as the controls define
+ * the blanking only, so PPL values need to have the
+ * mode width subtracted.
+ */
+ hblank = prev_line_len - mode->width;
+ __v4l2_ctrl_s_ctrl(imx219->hblank, hblank);
+
+ /* Scale the pixel rate based on the mode specific factor */
+ pixel_rate = imx219_get_pixel_rate(imx219) *
+ imx219_get_rate_factor(state);
+ __v4l2_ctrl_modify_range(imx219->pixel_rate, pixel_rate,
+ pixel_rate, 1, pixel_rate);
}
return 0;
@@ -877,10 +951,9 @@ static int imx219_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_selection *sel)
{
switch (sel->target) {
- case V4L2_SEL_TGT_CROP: {
+ case V4L2_SEL_TGT_CROP:
sel->r = *v4l2_subdev_state_get_crop(state, 0);
return 0;
- }
case V4L2_SEL_TGT_NATIVE_SIZE:
sel->r.top = 0;
@@ -1035,6 +1108,7 @@ static int imx219_check_hwcfg(struct device *dev, struct imx219 *imx219)
struct v4l2_fwnode_endpoint ep_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ unsigned long link_freq_bitmap;
int ret = -EINVAL;
endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
@@ -1056,23 +1130,40 @@ static int imx219_check_hwcfg(struct device *dev, struct imx219 *imx219)
imx219->lanes = ep_cfg.bus.mipi_csi2.num_data_lanes;
/* Check the link frequency set in device tree */
- if (!ep_cfg.nr_of_link_frequencies) {
- dev_err_probe(dev, -EINVAL,
- "link-frequency property not found in DT\n");
- goto error_out;
+ switch (imx219->lanes) {
+ case 2:
+ ret = v4l2_link_freq_to_bitmap(dev,
+ ep_cfg.link_frequencies,
+ ep_cfg.nr_of_link_frequencies,
+ imx219_link_freq_menu,
+ ARRAY_SIZE(imx219_link_freq_menu),
+ &link_freq_bitmap);
+ break;
+ case 4:
+ ret = v4l2_link_freq_to_bitmap(dev,
+ ep_cfg.link_frequencies,
+ ep_cfg.nr_of_link_frequencies,
+ imx219_link_freq_4lane_menu,
+ ARRAY_SIZE(imx219_link_freq_4lane_menu),
+ &link_freq_bitmap);
+
+ if (!ret && (link_freq_bitmap & BIT(1))) {
+ dev_warn(dev, "Link frequency of %d not supported, but has been incorrectly advertised previously\n",
+ IMX219_DEFAULT_LINK_FREQ_4LANE_UNSUPPORTED);
+ dev_warn(dev, "Using link frequency of %d\n",
+ IMX219_DEFAULT_LINK_FREQ_4LANE);
+ link_freq_bitmap |= BIT(0);
+ }
+ break;
}
- if (ep_cfg.nr_of_link_frequencies != 1 ||
- (ep_cfg.link_frequencies[0] != ((imx219->lanes == 2) ?
- IMX219_DEFAULT_LINK_FREQ : IMX219_DEFAULT_LINK_FREQ_4LANE))) {
+ if (ret || !(link_freq_bitmap & BIT(0))) {
+ ret = -EINVAL;
dev_err_probe(dev, -EINVAL,
"Link frequency not supported: %lld\n",
ep_cfg.link_frequencies[0]);
- goto error_out;
}
- ret = 0;
-
error_out:
v4l2_fwnode_endpoint_free(&ep_cfg);
fwnode_handle_put(endpoint);
@@ -1178,6 +1269,9 @@ static int imx219_probe(struct i2c_client *client)
goto error_media_entity;
}
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
ret = v4l2_async_register_subdev_sensor(&imx219->sd);
if (ret < 0) {
dev_err_probe(dev, ret,
@@ -1185,15 +1279,14 @@ static int imx219_probe(struct i2c_client *client)
goto error_subdev_cleanup;
}
- /* Enable runtime PM and turn off the device */
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
pm_runtime_idle(dev);
return 0;
error_subdev_cleanup:
v4l2_subdev_cleanup(&imx219->sd);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
error_media_entity:
media_entity_cleanup(&imx219->sd.entity);
@@ -1218,9 +1311,10 @@ static void imx219_remove(struct i2c_client *client)
imx219_free_controls(imx219);
pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
+ if (!pm_runtime_status_suspended(&client->dev)) {
imx219_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ }
}
static const struct of_device_id imx219_dt_ids[] = {
diff --git a/drivers/media/i2c/imx283.c b/drivers/media/i2c/imx283.c
index f676faf4b301..beb9169f93ad 100644
--- a/drivers/media/i2c/imx283.c
+++ b/drivers/media/i2c/imx283.c
@@ -1170,8 +1170,10 @@ static int imx283_disable_streams(struct v4l2_subdev *sd,
}
/* Power/clock management functions */
-static int imx283_power_on(struct imx283 *imx283)
+static int imx283_power_on(struct device *dev)
{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct imx283 *imx283 = to_imx283(sd);
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(imx283_supply_name),
@@ -1199,29 +1201,14 @@ reg_off:
return ret;
}
-static int imx283_power_off(struct imx283 *imx283)
-{
- gpiod_set_value_cansleep(imx283->reset_gpio, 1);
- regulator_bulk_disable(ARRAY_SIZE(imx283_supply_name), imx283->supplies);
- clk_disable_unprepare(imx283->xclk);
-
- return 0;
-}
-
-static int imx283_runtime_resume(struct device *dev)
+static int imx283_power_off(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct imx283 *imx283 = to_imx283(sd);
- return imx283_power_on(imx283);
-}
-
-static int imx283_runtime_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct imx283 *imx283 = to_imx283(sd);
-
- imx283_power_off(imx283);
+ gpiod_set_value_cansleep(imx283->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(imx283_supply_name), imx283->supplies);
+ clk_disable_unprepare(imx283->xclk);
return 0;
}
@@ -1516,7 +1503,7 @@ static int imx283_probe(struct i2c_client *client)
* The sensor must be powered for imx283_identify_module()
* to be able to read the CHIP_ID register
*/
- ret = imx283_power_on(imx283);
+ ret = imx283_power_on(imx283->dev);
if (ret)
return ret;
@@ -1589,7 +1576,7 @@ error_pm:
pm_runtime_disable(imx283->dev);
pm_runtime_set_suspended(imx283->dev);
error_power_off:
- imx283_power_off(imx283);
+ imx283_power_off(imx283->dev);
return ret;
}
@@ -1606,12 +1593,12 @@ static void imx283_remove(struct i2c_client *client)
pm_runtime_disable(imx283->dev);
if (!pm_runtime_status_suspended(imx283->dev))
- imx283_power_off(imx283);
+ imx283_power_off(imx283->dev);
pm_runtime_set_suspended(imx283->dev);
}
-static DEFINE_RUNTIME_DEV_PM_OPS(imx283_pm_ops, imx283_runtime_suspend,
- imx283_runtime_resume, NULL);
+static DEFINE_RUNTIME_DEV_PM_OPS(imx283_pm_ops, imx283_power_off,
+ imx283_power_on, NULL);
static const struct of_device_id imx283_dt_ids[] = {
{ .compatible = "sony,imx283" },
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index dd1b4ff983dc..701840f4a5cc 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -2442,17 +2442,19 @@ static int imx319_probe(struct i2c_client *client)
if (full_power)
pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
ret = v4l2_async_register_subdev_sensor(&imx319->sd);
if (ret < 0)
goto error_media_entity_pm;
+ pm_runtime_idle(&client->dev);
+
return 0;
error_media_entity_pm:
pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ if (full_power)
+ pm_runtime_set_suspended(&client->dev);
media_entity_cleanup(&imx319->sd.entity);
error_handler_free:
@@ -2474,7 +2476,8 @@ static void imx319_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx319->mutex);
}
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index fcfd1d851bd4..0beb80b8c458 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -559,12 +559,14 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
imx335->vblank,
imx335->vblank + imx335->cur_mode->height);
- return __v4l2_ctrl_modify_range(imx335->exp_ctrl,
- IMX335_EXPOSURE_MIN,
- imx335->vblank +
- imx335->cur_mode->height -
- IMX335_EXPOSURE_OFFSET,
- 1, IMX335_EXPOSURE_DEFAULT);
+ ret = __v4l2_ctrl_modify_range(imx335->exp_ctrl,
+ IMX335_EXPOSURE_MIN,
+ imx335->vblank +
+ imx335->cur_mode->height -
+ IMX335_EXPOSURE_OFFSET,
+ 1, IMX335_EXPOSURE_DEFAULT);
+ if (ret)
+ return ret;
}
/*
@@ -575,6 +577,13 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
return 0;
switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ exposure = imx335->exp_ctrl->val;
+ analog_gain = imx335->again_ctrl->val;
+
+ ret = imx335_update_exp_gain(imx335, exposure, analog_gain);
+
+ break;
case V4L2_CID_EXPOSURE:
exposure = ctrl->val;
analog_gain = imx335->again_ctrl->val;
diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c
index 3f7924aa1bd3..9f37779bd611 100644
--- a/drivers/media/i2c/imx415.c
+++ b/drivers/media/i2c/imx415.c
@@ -26,6 +26,10 @@
#define IMX415_PIXEL_ARRAY_WIDTH 3864
#define IMX415_PIXEL_ARRAY_HEIGHT 2192
#define IMX415_PIXEL_ARRAY_VBLANK 58
+#define IMX415_EXPOSURE_OFFSET 8
+
+#define IMX415_PIXEL_RATE_74_25MHZ 891000000
+#define IMX415_PIXEL_RATE_72MHZ 864000000
#define IMX415_NUM_CLK_PARAM_REGS 11
@@ -51,7 +55,10 @@
#define IMX415_OUTSEL CCI_REG8(0x30c0)
#define IMX415_DRV CCI_REG8(0x30c1)
#define IMX415_VMAX CCI_REG24_LE(0x3024)
+#define IMX415_VMAX_MAX 0xfffff
#define IMX415_HMAX CCI_REG16_LE(0x3028)
+#define IMX415_HMAX_MAX 0xffff
+#define IMX415_HMAX_MULTIPLIER 12
#define IMX415_SHR0 CCI_REG24_LE(0x3050)
#define IMX415_GAIN_PCG_0 CCI_REG16_LE(0x3090)
#define IMX415_AGAIN_MIN 0
@@ -445,11 +452,8 @@ static const struct imx415_clk_params imx415_clk_params[] = {
},
};
-/* all-pixel 2-lane 720 Mbps 15.74 Hz mode */
-static const struct cci_reg_sequence imx415_mode_2_720[] = {
- { IMX415_VMAX, 0x08CA },
- { IMX415_HMAX, 0x07F0 },
- { IMX415_LANEMODE, IMX415_LANEMODE_2 },
+/* 720 Mbps CSI configuration */
+static const struct cci_reg_sequence imx415_linkrate_720mbps[] = {
{ IMX415_TCLKPOST, 0x006F },
{ IMX415_TCLKPREPARE, 0x002F },
{ IMX415_TCLKTRAIL, 0x002F },
@@ -461,11 +465,8 @@ static const struct cci_reg_sequence imx415_mode_2_720[] = {
{ IMX415_TLPX, 0x0027 },
};
-/* all-pixel 2-lane 1440 Mbps 30.01 Hz mode */
-static const struct cci_reg_sequence imx415_mode_2_1440[] = {
- { IMX415_VMAX, 0x08CA },
- { IMX415_HMAX, 0x042A },
- { IMX415_LANEMODE, IMX415_LANEMODE_2 },
+/* 1440 Mbps CSI configuration */
+static const struct cci_reg_sequence imx415_linkrate_1440mbps[] = {
{ IMX415_TCLKPOST, 0x009F },
{ IMX415_TCLKPREPARE, 0x0057 },
{ IMX415_TCLKTRAIL, 0x0057 },
@@ -477,11 +478,8 @@ static const struct cci_reg_sequence imx415_mode_2_1440[] = {
{ IMX415_TLPX, 0x004F },
};
-/* all-pixel 4-lane 891 Mbps 30 Hz mode */
-static const struct cci_reg_sequence imx415_mode_4_891[] = {
- { IMX415_VMAX, 0x08CA },
- { IMX415_HMAX, 0x044C },
- { IMX415_LANEMODE, IMX415_LANEMODE_4 },
+/* 891 Mbps CSI configuration */
+static const struct cci_reg_sequence imx415_linkrate_891mbps[] = {
{ IMX415_TCLKPOST, 0x007F },
{ IMX415_TCLKPREPARE, 0x0037 },
{ IMX415_TCLKTRAIL, 0x0037 },
@@ -498,39 +496,9 @@ struct imx415_mode_reg_list {
const struct cci_reg_sequence *regs;
};
-/*
- * Mode : number of lanes, lane rate and frame rate dependent settings
- *
- * pixel_rate and hmax_pix are needed to calculate hblank for the v4l2 ctrl
- * interface. These values can not be found in the data sheet and should be
- * treated as virtual values. Use following table when adding new modes.
- *
- * lane_rate lanes fps hmax_pix pixel_rate
- *
- * 594 2 10.000 4400 99000000
- * 891 2 15.000 4400 148500000
- * 720 2 15.748 4064 144000000
- * 1782 2 30.000 4400 297000000
- * 2079 2 30.000 4400 297000000
- * 1440 2 30.019 4510 304615385
- *
- * 594 4 20.000 5500 247500000
- * 594 4 25.000 4400 247500000
- * 720 4 25.000 4400 247500000
- * 720 4 30.019 4510 304615385
- * 891 4 30.000 4400 297000000
- * 1440 4 30.019 4510 304615385
- * 1440 4 60.038 4510 609230769
- * 1485 4 60.000 4400 594000000
- * 1782 4 60.000 4400 594000000
- * 2079 4 60.000 4400 594000000
- * 2376 4 90.164 4392 891000000
- */
struct imx415_mode {
u64 lane_rate;
- u32 lanes;
- u32 hmax_pix;
- u64 pixel_rate;
+ u32 hmax_min[2];
struct imx415_mode_reg_list reg_list;
};
@@ -538,32 +506,26 @@ struct imx415_mode {
static const struct imx415_mode supported_modes[] = {
{
.lane_rate = 720000000,
- .lanes = 2,
- .hmax_pix = 4064,
- .pixel_rate = 144000000,
+ .hmax_min = { 2032, 1066 },
.reg_list = {
- .num_of_regs = ARRAY_SIZE(imx415_mode_2_720),
- .regs = imx415_mode_2_720,
+ .num_of_regs = ARRAY_SIZE(imx415_linkrate_720mbps),
+ .regs = imx415_linkrate_720mbps,
},
},
{
.lane_rate = 1440000000,
- .lanes = 2,
- .hmax_pix = 4510,
- .pixel_rate = 304615385,
+ .hmax_min = { 1066, 533 },
.reg_list = {
- .num_of_regs = ARRAY_SIZE(imx415_mode_2_1440),
- .regs = imx415_mode_2_1440,
+ .num_of_regs = ARRAY_SIZE(imx415_linkrate_1440mbps),
+ .regs = imx415_linkrate_1440mbps,
},
},
{
.lane_rate = 891000000,
- .lanes = 4,
- .hmax_pix = 4400,
- .pixel_rate = 297000000,
+ .hmax_min = { 2200, 1100 },
.reg_list = {
- .num_of_regs = ARRAY_SIZE(imx415_mode_4_891),
- .regs = imx415_mode_4_891,
+ .num_of_regs = ARRAY_SIZE(imx415_linkrate_891mbps),
+ .regs = imx415_linkrate_891mbps,
},
},
};
@@ -587,6 +549,7 @@ static const char *const imx415_test_pattern_menu[] = {
struct imx415 {
struct device *dev;
struct clk *clk;
+ unsigned long pixel_rate;
struct regulator_bulk_data supplies[ARRAY_SIZE(imx415_supply_names)];
struct gpio_desc *reset;
struct regmap *regmap;
@@ -598,8 +561,10 @@ struct imx415 {
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
struct v4l2_ctrl *hflip;
struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *exposure;
unsigned int cur_mode;
unsigned int num_data_lanes;
@@ -730,17 +695,38 @@ static int imx415_s_ctrl(struct v4l2_ctrl *ctrl)
ctrls);
const struct v4l2_mbus_framefmt *format;
struct v4l2_subdev_state *state;
+ u32 exposure_max;
unsigned int vmax;
unsigned int flip;
int ret;
- if (!pm_runtime_get_if_in_use(sensor->dev))
- return 0;
-
state = v4l2_subdev_get_locked_active_state(&sensor->subdev);
format = v4l2_subdev_state_get_format(state, 0);
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ exposure_max = format->height + ctrl->val -
+ IMX415_EXPOSURE_OFFSET;
+ __v4l2_ctrl_modify_range(sensor->exposure,
+ sensor->exposure->minimum,
+ exposure_max, sensor->exposure->step,
+ sensor->exposure->default_value);
+ }
+
+ if (!pm_runtime_get_if_in_use(sensor->dev))
+ return 0;
+
switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ ret = cci_write(sensor->regmap, IMX415_VMAX,
+ format->height + ctrl->val, NULL);
+ if (ret)
+ return ret;
+ /*
+ * Exposure is set based on VMAX which has just changed, so
+ * program exposure register as well
+ */
+ ctrl = sensor->exposure;
+ fallthrough;
case V4L2_CID_EXPOSURE:
/* clamp the exposure value to VMAX. */
vmax = format->height + sensor->vblank->cur.val;
@@ -766,6 +752,13 @@ static int imx415_s_ctrl(struct v4l2_ctrl *ctrl)
ret = imx415_set_testpattern(sensor, ctrl->val);
break;
+ case V4L2_CID_HBLANK:
+ ret = cci_write(sensor->regmap, IMX415_HMAX,
+ (format->width + ctrl->val) /
+ IMX415_HMAX_MULTIPLIER,
+ NULL);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -784,11 +777,12 @@ static int imx415_ctrls_init(struct imx415 *sensor)
{
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl *ctrl;
- u64 pixel_rate = supported_modes[sensor->cur_mode].pixel_rate;
- u64 lane_rate = supported_modes[sensor->cur_mode].lane_rate;
+ const struct imx415_mode *cur_mode = &supported_modes[sensor->cur_mode];
+ u64 lane_rate = cur_mode->lane_rate;
u32 exposure_max = IMX415_PIXEL_ARRAY_HEIGHT +
- IMX415_PIXEL_ARRAY_VBLANK - 8;
- u32 hblank;
+ IMX415_PIXEL_ARRAY_VBLANK -
+ IMX415_EXPOSURE_OFFSET;
+ u32 hblank_min, hblank_max;
unsigned int i;
int ret;
@@ -816,36 +810,33 @@ static int imx415_ctrls_init(struct imx415 *sensor)
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- v4l2_ctrl_new_std(&sensor->ctrls, &imx415_ctrl_ops, V4L2_CID_EXPOSURE,
- 4, exposure_max, 1, exposure_max);
+ sensor->exposure = v4l2_ctrl_new_std(&sensor->ctrls, &imx415_ctrl_ops,
+ V4L2_CID_EXPOSURE, 4,
+ exposure_max, 1, exposure_max);
v4l2_ctrl_new_std(&sensor->ctrls, &imx415_ctrl_ops,
V4L2_CID_ANALOGUE_GAIN, IMX415_AGAIN_MIN,
IMX415_AGAIN_MAX, IMX415_AGAIN_STEP,
IMX415_AGAIN_MIN);
- hblank = supported_modes[sensor->cur_mode].hmax_pix -
- IMX415_PIXEL_ARRAY_WIDTH;
+ hblank_min = (cur_mode->hmax_min[sensor->num_data_lanes == 2 ? 0 : 1] *
+ IMX415_HMAX_MULTIPLIER) - IMX415_PIXEL_ARRAY_WIDTH;
+ hblank_max = (IMX415_HMAX_MAX * IMX415_HMAX_MULTIPLIER) -
+ IMX415_PIXEL_ARRAY_WIDTH;
ctrl = v4l2_ctrl_new_std(&sensor->ctrls, &imx415_ctrl_ops,
- V4L2_CID_HBLANK, hblank, hblank, 1, hblank);
- if (ctrl)
- ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ V4L2_CID_HBLANK, hblank_min,
+ hblank_max, IMX415_HMAX_MULTIPLIER,
+ hblank_min);
sensor->vblank = v4l2_ctrl_new_std(&sensor->ctrls, &imx415_ctrl_ops,
V4L2_CID_VBLANK,
IMX415_PIXEL_ARRAY_VBLANK,
- IMX415_PIXEL_ARRAY_VBLANK, 1,
- IMX415_PIXEL_ARRAY_VBLANK);
- if (sensor->vblank)
- sensor->vblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ IMX415_VMAX_MAX - IMX415_PIXEL_ARRAY_HEIGHT,
+ 1, IMX415_PIXEL_ARRAY_VBLANK);
- /*
- * The pixel rate used here is a virtual value and can be used for
- * calculating the frame rate together with hblank. It may not
- * necessarily be the physically correct pixel clock.
- */
- v4l2_ctrl_new_std(&sensor->ctrls, NULL, V4L2_CID_PIXEL_RATE, pixel_rate,
- pixel_rate, 1, pixel_rate);
+ v4l2_ctrl_new_std(&sensor->ctrls, NULL, V4L2_CID_PIXEL_RATE,
+ sensor->pixel_rate, sensor->pixel_rate, 1,
+ sensor->pixel_rate);
sensor->hflip = v4l2_ctrl_new_std(&sensor->ctrls, &imx415_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
@@ -890,7 +881,12 @@ static int imx415_set_mode(struct imx415 *sensor, int mode)
IMX415_NUM_CLK_PARAM_REGS,
&ret);
- return 0;
+ ret = cci_write(sensor->regmap, IMX415_LANEMODE,
+ sensor->num_data_lanes == 2 ? IMX415_LANEMODE_2 :
+ IMX415_LANEMODE_4,
+ NULL);
+
+ return ret;
}
static int imx415_setup(struct imx415 *sensor, struct v4l2_subdev_state *state)
@@ -1301,8 +1297,6 @@ static int imx415_parse_hw_config(struct imx415 *sensor)
}
for (j = 0; j < ARRAY_SIZE(supported_modes); ++j) {
- if (sensor->num_data_lanes != supported_modes[j].lanes)
- continue;
if (bus_cfg.link_frequencies[i] * 2 !=
supported_modes[j].lane_rate)
continue;
@@ -1317,6 +1311,17 @@ static int imx415_parse_hw_config(struct imx415 *sensor)
"no valid sensor mode defined\n");
goto done_endpoint_free;
}
+ switch (inck) {
+ case 27000000:
+ case 37125000:
+ case 74250000:
+ sensor->pixel_rate = IMX415_PIXEL_RATE_74_25MHZ;
+ break;
+ case 24000000:
+ case 72000000:
+ sensor->pixel_rate = IMX415_PIXEL_RATE_72MHZ;
+ break;
+ }
lane_rate = supported_modes[sensor->cur_mode].lane_rate;
for (i = 0; i < ARRAY_SIZE(imx415_clk_params); ++i) {
diff --git a/drivers/media/i2c/lt6911uxe.c b/drivers/media/i2c/lt6911uxe.c
new file mode 100644
index 000000000000..c5b40bb58a37
--- /dev/null
+++ b/drivers/media/i2c/lt6911uxe.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2023 - 2025 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/v4l2-dv-timings.h>
+
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+
+#define LT6911UXE_CHIP_ID 0x2102
+#define REG_CHIP_ID CCI_REG16(0xe100)
+
+#define REG_ENABLE_I2C CCI_REG8(0xe0ee)
+#define REG_HALF_PIX_CLK CCI_REG24(0xe085)
+#define REG_BYTE_CLK CCI_REG24(0xe092)
+#define REG_HALF_H_TOTAL CCI_REG16(0xe088)
+#define REG_V_TOTAL CCI_REG16(0xe08a)
+#define REG_HALF_H_ACTIVE CCI_REG16(0xe08c)
+#define REG_V_ACTIVE CCI_REG16(0xe08e)
+#define REG_MIPI_FORMAT CCI_REG8(0xe096)
+#define REG_MIPI_TX_CTRL CCI_REG8(0xe0b0)
+
+/* Interrupts */
+#define REG_INT_HDMI CCI_REG8(0xe084)
+#define INT_VIDEO_DISAPPEAR 0x0
+#define INT_VIDEO_READY 0x1
+
+#define LT6911UXE_DEFAULT_LANES 4
+#define LT6911_PAGE_CONTROL 0xff
+#define YUV422_8_BIT 0x7
+
+static const struct v4l2_dv_timings_cap lt6911uxe_timings_cap_4kp30 = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with CLANG */
+ .reserved = { 0 },
+ /* Pixel clock from REF_01 p. 20. Min/max height/width are unknown */
+ V4L2_INIT_BT_TIMINGS(160, 3840, /* min/max width */
+ 120, 2160, /* min/max height */
+ 50000000, 594000000, /* min/max pixelclock */
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_CVT,
+ V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_CUSTOM |
+ V4L2_DV_BT_CAP_REDUCED_BLANKING)
+};
+
+static const struct regmap_range_cfg lt6911uxe_ranges[] = {
+ {
+ .name = "register_range",
+ .range_min = 0,
+ .range_max = 0xffff,
+ .selector_reg = LT6911_PAGE_CONTROL,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 0x100,
+ },
+};
+
+static const struct regmap_config lt6911uxe_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xffff,
+ .ranges = lt6911uxe_ranges,
+ .num_ranges = ARRAY_SIZE(lt6911uxe_ranges),
+};
+
+struct lt6911uxe_mode {
+ u32 width;
+ u32 height;
+ u32 htotal;
+ u32 vtotal;
+ u32 code;
+ u32 fps;
+ u32 lanes;
+ s64 link_freq;
+ u64 pixel_clk;
+};
+
+struct lt6911uxe {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_dv_timings timings;
+ struct lt6911uxe_mode cur_mode;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *irq_gpio;
+};
+
+static const struct v4l2_event lt6911uxe_ev_source_change = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+};
+
+static inline struct lt6911uxe *to_lt6911uxe(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct lt6911uxe, sd);
+}
+
+static s64 get_pixel_rate(struct lt6911uxe *lt6911uxe)
+{
+ s64 pixel_rate;
+
+ pixel_rate = (s64)lt6911uxe->cur_mode.width *
+ lt6911uxe->cur_mode.height *
+ lt6911uxe->cur_mode.fps * 16;
+ do_div(pixel_rate, lt6911uxe->cur_mode.lanes);
+
+ return pixel_rate;
+}
+
+static int lt6911uxe_get_detected_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ struct v4l2_bt_timings *bt = &timings->bt;
+
+ memset(timings, 0, sizeof(struct v4l2_dv_timings));
+
+ timings->type = V4L2_DV_BT_656_1120;
+
+ bt->width = lt6911uxe->cur_mode.width;
+ bt->height = lt6911uxe->cur_mode.height;
+ bt->vsync = lt6911uxe->cur_mode.vtotal - lt6911uxe->cur_mode.height;
+ bt->hsync = lt6911uxe->cur_mode.htotal - lt6911uxe->cur_mode.width;
+ bt->pixelclock = lt6911uxe->cur_mode.pixel_clk;
+
+ return 0;
+}
+
+static int lt6911uxe_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ struct v4l2_subdev_state *state;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ if (v4l2_match_dv_timings(&lt6911uxe->timings, timings, 0, false)) {
+ v4l2_subdev_unlock_state(state);
+ return 0;
+ }
+
+ if (!v4l2_valid_dv_timings(timings, &lt6911uxe_timings_cap_4kp30,
+ NULL, NULL)) {
+ v4l2_subdev_unlock_state(state);
+ return -ERANGE;
+ }
+ lt6911uxe->timings = *timings;
+ v4l2_subdev_unlock_state(state);
+
+ return 0;
+}
+
+static int lt6911uxe_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ struct v4l2_subdev_state *state;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ *timings = lt6911uxe->timings;
+ v4l2_subdev_unlock_state(state);
+
+ return 0;
+}
+
+static int lt6911uxe_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
+{
+ struct v4l2_subdev_state *state;
+ int ret;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ ret = lt6911uxe_get_detected_timings(sd, timings);
+ if (ret) {
+ v4l2_subdev_unlock_state(state);
+ return ret;
+ }
+
+ if (!v4l2_valid_dv_timings(timings, &lt6911uxe_timings_cap_4kp30,
+ NULL, NULL)) {
+ v4l2_subdev_unlock_state(state);
+ return -ERANGE;
+ }
+
+ v4l2_subdev_unlock_state(state);
+ return 0;
+}
+
+static int lt6911uxe_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings)
+{
+ return v4l2_enum_dv_timings_cap(timings,
+ &lt6911uxe_timings_cap_4kp30, NULL, NULL);
+}
+
+static int lt6911uxe_dv_timings_cap(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings_cap *cap)
+{
+ *cap = lt6911uxe_timings_cap_4kp30;
+ return 0;
+}
+
+static int lt6911uxe_status_update(struct lt6911uxe *lt6911uxe)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&lt6911uxe->sd);
+ u64 int_event;
+ u64 byte_clk, half_pix_clk, fps, format;
+ u64 half_htotal, vtotal, half_width, height;
+ int ret = 0;
+
+ /* Read interrupt event */
+ cci_read(lt6911uxe->regmap, REG_INT_HDMI, &int_event, &ret);
+ if (ret) {
+ dev_err(&client->dev, "failed to read interrupt event: %d\n",
+ ret);
+ return ret;
+ }
+
+ switch (int_event) {
+ case INT_VIDEO_READY:
+ cci_read(lt6911uxe->regmap, REG_BYTE_CLK, &byte_clk, &ret);
+ byte_clk *= 1000;
+ cci_read(lt6911uxe->regmap, REG_HALF_PIX_CLK,
+ &half_pix_clk, &ret);
+ half_pix_clk *= 1000;
+
+ if (ret || byte_clk == 0 || half_pix_clk == 0) {
+ dev_dbg(&client->dev,
+ "invalid ByteClock or PixelClock\n");
+ return -EINVAL;
+ }
+
+ cci_read(lt6911uxe->regmap, REG_HALF_H_TOTAL,
+ &half_htotal, &ret);
+ cci_read(lt6911uxe->regmap, REG_V_TOTAL, &vtotal, &ret);
+ if (ret || half_htotal == 0 || vtotal == 0) {
+ dev_dbg(&client->dev, "invalid htotal or vtotal\n");
+ return -EINVAL;
+ }
+
+ fps = div_u64(half_pix_clk, half_htotal * vtotal);
+ if (fps > 60) {
+ dev_dbg(&client->dev,
+ "max fps is 60, current fps: %llu\n", fps);
+ return -EINVAL;
+ }
+
+ cci_read(lt6911uxe->regmap, REG_HALF_H_ACTIVE,
+ &half_width, &ret);
+ cci_read(lt6911uxe->regmap, REG_V_ACTIVE, &height, &ret);
+ if (ret || half_width == 0 || half_width * 2 > 3840 ||
+ height == 0 || height > 2160) {
+ dev_dbg(&client->dev, "invalid width or height\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get MIPI format, YUV422_8_BIT is expected in lt6911uxe
+ */
+ cci_read(lt6911uxe->regmap, REG_MIPI_FORMAT, &format, &ret);
+ if (format != YUV422_8_BIT) {
+ dev_dbg(&client->dev, "invalid MIPI format\n");
+ return -EINVAL;
+ }
+
+ lt6911uxe->cur_mode.height = height;
+ lt6911uxe->cur_mode.width = half_width * 2;
+ lt6911uxe->cur_mode.fps = fps;
+ /* MIPI Clock Rate = ByteClock × 4, defined in lt6911uxe spec */
+ lt6911uxe->cur_mode.link_freq = byte_clk * 4;
+ lt6911uxe->cur_mode.pixel_clk = half_pix_clk * 2;
+ lt6911uxe->cur_mode.vtotal = vtotal;
+ lt6911uxe->cur_mode.htotal = half_htotal * 2;
+ break;
+
+ case INT_VIDEO_DISAPPEAR:
+ cci_write(lt6911uxe->regmap, REG_MIPI_TX_CTRL, 0x0, &ret);
+ lt6911uxe->cur_mode.height = 0;
+ lt6911uxe->cur_mode.width = 0;
+ lt6911uxe->cur_mode.fps = 0;
+ lt6911uxe->cur_mode.link_freq = 0;
+ break;
+
+ default:
+ ret = -ENOLINK;
+ }
+ v4l2_subdev_notify_event(&lt6911uxe->sd, &lt6911uxe_ev_source_change);
+ return ret;
+}
+
+static int lt6911uxe_init_controls(struct lt6911uxe *lt6911uxe)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 pixel_rate;
+ int ret;
+
+ ctrl_hdlr = &lt6911uxe->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ pixel_rate = get_pixel_rate(lt6911uxe);
+ lt6911uxe->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, NULL,
+ V4L2_CID_PIXEL_RATE,
+ pixel_rate, pixel_rate, 1,
+ pixel_rate);
+
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ goto hdlr_free;
+ }
+ lt6911uxe->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+hdlr_free:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ return ret;
+}
+
+static void lt6911uxe_update_pad_format(const struct lt6911uxe_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = mode->code;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int lt6911uxe_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
+ return ret;
+
+ cci_write(lt6911uxe->regmap, REG_MIPI_TX_CTRL, 0x1, &ret);
+ if (ret) {
+ dev_err(&client->dev, "failed to start stream: %d\n", ret);
+ goto err_rpm_put;
+ }
+
+ return 0;
+
+err_rpm_put:
+ pm_runtime_put(&client->dev);
+ return ret;
+}
+
+static int lt6911uxe_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(&lt6911uxe->sd);
+ int ret;
+
+ ret = cci_write(lt6911uxe->regmap, REG_MIPI_TX_CTRL, 0x0, NULL);
+ if (ret)
+ dev_err(&client->dev, "failed to stop stream: %d\n", ret);
+
+ pm_runtime_put(&client->dev);
+ return 0;
+}
+
+static int lt6911uxe_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ u64 pixel_rate;
+
+ lt6911uxe_update_pad_format(&lt6911uxe->cur_mode, &fmt->format);
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ pixel_rate = get_pixel_rate(lt6911uxe);
+ __v4l2_ctrl_modify_range(lt6911uxe->pixel_rate, pixel_rate,
+ pixel_rate, 1, pixel_rate);
+
+ return 0;
+}
+
+static int lt6911uxe_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = lt6911uxe->cur_mode.code;
+
+ return 0;
+}
+
+static int lt6911uxe_get_mbus_config(struct v4l2_subdev *sd,
+ unsigned int pad,
+ struct v4l2_mbus_config *cfg)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ struct v4l2_subdev_state *state;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ cfg->type = V4L2_MBUS_CSI2_DPHY;
+ cfg->link_freq = lt6911uxe->cur_mode.link_freq;
+ v4l2_subdev_unlock_state(state);
+
+ return 0;
+}
+
+static int lt6911uxe_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ return lt6911uxe_set_format(sd, sd_state, &fmt);
+}
+
+static const struct v4l2_subdev_video_ops lt6911uxe_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+/*
+ * lt6911uxe provides editable EDID for customers, but only can be edited like
+ * updating flash. Due to this limitation, it is not possible to implement
+ * EDID support.
+ */
+static const struct v4l2_subdev_pad_ops lt6911uxe_pad_ops = {
+ .set_fmt = lt6911uxe_set_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .enable_streams = lt6911uxe_enable_streams,
+ .disable_streams = lt6911uxe_disable_streams,
+ .enum_mbus_code = lt6911uxe_enum_mbus_code,
+ .get_frame_interval = v4l2_subdev_get_frame_interval,
+ .s_dv_timings = lt6911uxe_s_dv_timings,
+ .g_dv_timings = lt6911uxe_g_dv_timings,
+ .query_dv_timings = lt6911uxe_query_dv_timings,
+ .enum_dv_timings = lt6911uxe_enum_dv_timings,
+ .dv_timings_cap = lt6911uxe_dv_timings_cap,
+ .get_mbus_config = lt6911uxe_get_mbus_config,
+};
+
+static const struct v4l2_subdev_core_ops lt6911uxe_subdev_core_ops = {
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops lt6911uxe_subdev_ops = {
+ .core = &lt6911uxe_subdev_core_ops,
+ .video = &lt6911uxe_video_ops,
+ .pad = &lt6911uxe_pad_ops,
+};
+
+static const struct media_entity_operations lt6911uxe_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops lt6911uxe_internal_ops = {
+ .init_state = lt6911uxe_init_state,
+};
+
+static int lt6911uxe_fwnode_parse(struct lt6911uxe *lt6911uxe,
+ struct device *dev)
+{
+ struct fwnode_handle *endpoint;
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ int ret;
+
+ endpoint = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!endpoint)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "endpoint node not found\n");
+
+ ret = v4l2_fwnode_endpoint_parse(endpoint, &bus_cfg);
+ fwnode_handle_put(endpoint);
+ if (ret) {
+ dev_err(dev, "failed to parse endpoint node: %d\n", ret);
+ goto out_err;
+ }
+
+ /*
+ * Check the number of MIPI CSI2 data lanes,
+ * lt6911uxe only support 4 lanes.
+ */
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != LT6911UXE_DEFAULT_LANES) {
+ dev_err(dev, "only 4 data lanes are currently supported\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+ lt6911uxe->cur_mode.lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+ lt6911uxe->cur_mode.code = MEDIA_BUS_FMT_UYVY8_1X16;
+
+ return 0;
+
+out_err:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+ return ret;
+}
+
+static int lt6911uxe_identify_module(struct lt6911uxe *lt6911uxe,
+ struct device *dev)
+{
+ u64 val;
+ int ret = 0;
+
+ /* Chip ID should be confirmed when the I2C slave is active */
+ cci_write(lt6911uxe->regmap, REG_ENABLE_I2C, 0x1, &ret);
+ cci_read(lt6911uxe->regmap, REG_CHIP_ID, &val, &ret);
+ cci_write(lt6911uxe->regmap, REG_ENABLE_I2C, 0x0, &ret);
+ if (ret)
+ return dev_err_probe(dev, ret, "fail to read chip id\n");
+
+ if (val != LT6911UXE_CHIP_ID) {
+ return dev_err_probe(dev, -ENXIO, "chip id mismatch: %x!=%x\n",
+ LT6911UXE_CHIP_ID, (u16)val);
+ }
+
+ return 0;
+}
+
+static irqreturn_t lt6911uxe_threaded_irq_fn(int irq, void *dev_id)
+{
+ struct v4l2_subdev *sd = dev_id;
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ struct v4l2_subdev_state *state;
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE
+ };
+
+ lt6911uxe_status_update(lt6911uxe);
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ /*
+ * As a HDMI to CSI2 bridge, it needs to update the format in time
+ * when the HDMI source changes.
+ */
+ lt6911uxe_set_format(sd, state, &fmt);
+ v4l2_subdev_unlock_state(state);
+
+ return IRQ_HANDLED;
+}
+
+static void lt6911uxe_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+
+ free_irq(gpiod_to_irq(lt6911uxe->irq_gpio), lt6911uxe);
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&lt6911uxe->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+}
+
+static int lt6911uxe_probe(struct i2c_client *client)
+{
+ struct lt6911uxe *lt6911uxe;
+ struct device *dev = &client->dev;
+ int ret;
+
+ lt6911uxe = devm_kzalloc(dev, sizeof(*lt6911uxe), GFP_KERNEL);
+ if (!lt6911uxe)
+ return -ENOMEM;
+
+ lt6911uxe->regmap = devm_regmap_init_i2c(client,
+ &lt6911uxe_regmap_config);
+ if (IS_ERR(lt6911uxe->regmap))
+ return dev_err_probe(dev, PTR_ERR(lt6911uxe->regmap),
+ "failed to init CCI\n");
+
+ v4l2_i2c_subdev_init(&lt6911uxe->sd, client, &lt6911uxe_subdev_ops);
+
+ lt6911uxe->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_IN);
+ if (IS_ERR(lt6911uxe->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(lt6911uxe->reset_gpio),
+ "failed to get reset gpio\n");
+
+ lt6911uxe->irq_gpio = devm_gpiod_get(dev, "readystat", GPIOD_IN);
+ if (IS_ERR(lt6911uxe->irq_gpio))
+ return dev_err_probe(dev, PTR_ERR(lt6911uxe->irq_gpio),
+ "failed to get ready_stat gpio\n");
+
+ ret = lt6911uxe_fwnode_parse(lt6911uxe, dev);
+ if (ret)
+ return ret;
+
+ usleep_range(10000, 10500);
+
+ ret = lt6911uxe_identify_module(lt6911uxe, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to find chip\n");
+
+ ret = lt6911uxe_init_controls(lt6911uxe);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init control\n");
+
+ lt6911uxe->sd.dev = dev;
+ lt6911uxe->sd.internal_ops = &lt6911uxe_internal_ops;
+ lt6911uxe->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ lt6911uxe->sd.entity.ops = &lt6911uxe_subdev_entity_ops;
+ lt6911uxe->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ lt6911uxe->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&lt6911uxe->sd.entity, 1, &lt6911uxe->pad);
+ if (ret) {
+ dev_err(dev, "failed to init entity pads: %d\n", ret);
+ goto v4l2_ctrl_handler_free;
+ }
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+
+ ret = v4l2_subdev_init_finalize(&lt6911uxe->sd);
+ if (ret) {
+ dev_err(dev, "failed to init v4l2 subdev: %d\n", ret);
+ goto media_entity_cleanup;
+ }
+
+ /* Setting irq */
+ ret = request_threaded_irq(gpiod_to_irq(lt6911uxe->irq_gpio), NULL,
+ lt6911uxe_threaded_irq_fn,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT, NULL, lt6911uxe);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ: %d\n", ret);
+ goto subdev_cleanup;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&lt6911uxe->sd);
+ if (ret) {
+ dev_err(dev, "failed to register V4L2 subdev: %d\n", ret);
+ goto free_irq;
+ }
+
+ return 0;
+
+free_irq:
+ free_irq(gpiod_to_irq(lt6911uxe->irq_gpio), lt6911uxe);
+
+subdev_cleanup:
+ v4l2_subdev_cleanup(&lt6911uxe->sd);
+
+media_entity_cleanup:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ media_entity_cleanup(&lt6911uxe->sd.entity);
+
+v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(lt6911uxe->sd.ctrl_handler);
+
+ return ret;
+}
+
+static const struct acpi_device_id lt6911uxe_acpi_ids[] = {
+ { "INTC10C5" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, lt6911uxe_acpi_ids);
+
+static struct i2c_driver lt6911uxe_i2c_driver = {
+ .driver = {
+ .name = "lt6911uxe",
+ .acpi_match_table = ACPI_PTR(lt6911uxe_acpi_ids),
+ },
+ .probe = lt6911uxe_probe,
+ .remove = lt6911uxe_remove,
+};
+
+module_i2c_driver(lt6911uxe_i2c_driver);
+
+MODULE_AUTHOR("Yan Dongcheng <dongcheng.yan@intel.com>");
+MODULE_DESCRIPTION("Lontium lt6911uxe HDMI to MIPI Bridge Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index b9682264e2f5..cf0e41fc3071 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -1322,9 +1322,6 @@ static int ov08x40_power_on(struct device *dev)
struct ov08x40 *ov08x = to_ov08x40(sd);
int ret;
- if (is_acpi_node(dev_fwnode(dev)))
- return 0;
-
ret = clk_prepare_enable(ov08x->xvclk);
if (ret < 0) {
dev_err(dev, "failed to enable xvclk\n");
@@ -1360,9 +1357,6 @@ static int ov08x40_power_off(struct device *dev)
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ov08x40 *ov08x = to_ov08x40(sd);
- if (is_acpi_node(dev_fwnode(dev)))
- return 0;
-
gpiod_set_value_cansleep(ov08x->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ov08x40_supply_names),
ov08x->supplies);
@@ -1400,7 +1394,7 @@ static int ov08x40_read_reg(struct ov08x40 *ov08x,
ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (ret != ARRAY_SIZE(msgs))
- return -EIO;
+ return ret < 0 ? ret : -EIO;
*val = be32_to_cpu(data_be);
@@ -1469,7 +1463,7 @@ static int ov08x40_write_reg(struct ov08x40 *ov08x,
u16 reg, u32 len, u32 __val)
{
struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
- int buf_i, val_i;
+ int buf_i, val_i, ret;
u8 buf[6], *val_p;
__be32 val;
@@ -1487,8 +1481,9 @@ static int ov08x40_write_reg(struct ov08x40 *ov08x,
while (val_i < 4)
buf[buf_i++] = val_p[val_i++];
- if (i2c_master_send(client, buf, len + 2) != len + 2)
- return -EIO;
+ ret = i2c_master_send(client, buf, len + 2);
+ if (ret != len + 2)
+ return ret < 0 ? ret : -EIO;
return 0;
}
@@ -1937,6 +1932,35 @@ static int ov08x40_stop_streaming(struct ov08x40 *ov08x)
OV08X40_REG_VALUE_08BIT, OV08X40_MODE_STANDBY);
}
+/* Verify chip ID */
+static int ov08x40_identify_module(struct ov08x40 *ov08x)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
+ int ret;
+ u32 val;
+
+ if (ov08x->identified)
+ return 0;
+
+ ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
+ OV08X40_REG_VALUE_24BIT, &val);
+ if (ret) {
+ dev_err(&client->dev, "error reading chip-id register: %d\n", ret);
+ return ret;
+ }
+
+ if (val != OV08X40_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ OV08X40_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ dev_dbg(&client->dev, "chip id 0x%x\n", val);
+ ov08x->identified = true;
+
+ return 0;
+}
+
static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov08x40 *ov08x = to_ov08x40(sd);
@@ -1950,6 +1974,10 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
if (ret < 0)
goto err_unlock;
+ ret = ov08x40_identify_module(ov08x);
+ if (ret)
+ goto err_rpm_put;
+
/*
* Apply default & customized values
* and then start streaming.
@@ -1974,32 +2002,6 @@ err_unlock:
return ret;
}
-/* Verify chip ID */
-static int ov08x40_identify_module(struct ov08x40 *ov08x)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
- int ret;
- u32 val;
-
- if (ov08x->identified)
- return 0;
-
- ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
- OV08X40_REG_VALUE_24BIT, &val);
- if (ret)
- return ret;
-
- if (val != OV08X40_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
- OV08X40_CHIP_ID, val);
- return -ENXIO;
- }
-
- ov08x->identified = true;
-
- return 0;
-}
-
static const struct v4l2_subdev_video_ops ov08x40_video_ops = {
.s_stream = ov08x40_set_stream,
};
@@ -2151,65 +2153,69 @@ static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
int ret;
u32 xvclk_rate;
- if (!fwnode)
- return -ENXIO;
+ /*
+ * Sometimes the fwnode graph is initialized by the bridge driver.
+ * Bridge drivers doing this also add sensor properties, wait for this.
+ */
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "waiting for fwnode graph endpoint\n");
- if (!is_acpi_node(fwnode)) {
- ov08x->xvclk = devm_clk_get(dev, NULL);
- if (IS_ERR(ov08x->xvclk)) {
- dev_err(dev, "could not get xvclk clock (%pe)\n",
- ov08x->xvclk);
- return PTR_ERR(ov08x->xvclk);
- }
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return dev_err_probe(dev, ret, "parsing endpoint failed\n");
- xvclk_rate = clk_get_rate(ov08x->xvclk);
+ ov08x->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ov08x->reset_gpio)) {
+ ret = dev_err_probe(dev, PTR_ERR(ov08x->reset_gpio),
+ "getting reset GPIO\n");
+ goto out_err;
+ }
- ov08x->reset_gpio = devm_gpiod_get_optional(dev, "reset",
- GPIOD_OUT_LOW);
- if (IS_ERR(ov08x->reset_gpio))
- return PTR_ERR(ov08x->reset_gpio);
+ for (i = 0; i < ARRAY_SIZE(ov08x40_supply_names); i++)
+ ov08x->supplies[i].supply = ov08x40_supply_names[i];
- for (i = 0; i < ARRAY_SIZE(ov08x40_supply_names); i++)
- ov08x->supplies[i].supply = ov08x40_supply_names[i];
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ov08x40_supply_names),
+ ov08x->supplies);
+ if (ret)
+ goto out_err;
- ret = devm_regulator_bulk_get(dev,
- ARRAY_SIZE(ov08x40_supply_names),
- ov08x->supplies);
- if (ret)
- return ret;
+ ov08x->xvclk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(ov08x->xvclk)) {
+ ret = dev_err_probe(dev, PTR_ERR(ov08x->xvclk),
+ "getting xvclk\n");
+ goto out_err;
+ }
+ if (ov08x->xvclk) {
+ xvclk_rate = clk_get_rate(ov08x->xvclk);
} else {
ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
&xvclk_rate);
if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
+ dev_err(dev, "can't get clock frequency\n");
+ goto out_err;
}
}
if (xvclk_rate != OV08X40_XVCLK) {
- dev_err(dev, "external clock %d is not supported",
+ dev_err(dev, "external clock %d is not supported\n",
xvclk_rate);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_err;
}
- ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
- if (!ep)
- return -ENXIO;
-
- ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
- fwnode_handle_put(ep);
- if (ret)
- return ret;
-
if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV08X40_DATA_LANES) {
- dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
goto out_err;
}
if (!bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "no link frequencies defined");
+ dev_err(dev, "no link frequencies defined\n");
ret = -EINVAL;
goto out_err;
}
@@ -2222,7 +2228,7 @@ static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
}
if (j == bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "no link frequency %lld supported",
+ dev_err(dev, "no link frequency %lld supported\n",
link_freq_menu_items[i]);
ret = -EINVAL;
goto out_err;
@@ -2246,10 +2252,8 @@ static int ov08x40_probe(struct i2c_client *client)
/* Check HW config */
ret = ov08x40_check_hwcfg(ov08x, &client->dev);
- if (ret) {
- dev_err(&client->dev, "failed to check hwcfg: %d", ret);
+ if (ret)
return ret;
- }
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov08x->sd, client, &ov08x40_subdev_ops);
@@ -2264,10 +2268,8 @@ static int ov08x40_probe(struct i2c_client *client)
/* Check module identity */
ret = ov08x40_identify_module(ov08x);
- if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ if (ret)
goto probe_power_off;
- }
}
/* Set default mode to max resolution */
@@ -2324,11 +2326,14 @@ static void ov08x40_remove(struct i2c_client *client)
ov08x40_free_controls(ov08x);
pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ ov08x40_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- ov08x40_power_off(&client->dev);
}
+static DEFINE_RUNTIME_DEV_PM_OPS(ov08x40_pm_ops, ov08x40_power_off,
+ ov08x40_power_on, NULL);
+
#ifdef CONFIG_ACPI
static const struct acpi_device_id ov08x40_acpi_ids[] = {
{"OVTI08F4"},
@@ -2349,6 +2354,7 @@ static struct i2c_driver ov08x40_i2c_driver = {
.name = "ov08x40",
.acpi_match_table = ACPI_PTR(ov08x40_acpi_ids),
.of_match_table = ov08x40_of_match,
+ .pm = pm_sleep_ptr(&ov08x40_pm_ops),
},
.probe = ov08x40_probe,
.remove = ov08x40_remove,
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 9a5d118b87b0..80d151e8ae29 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -83,8 +83,6 @@ static const char * const ov2740_supply_name[] = {
"DVDD",
};
-#define OV2740_NUM_SUPPLIES ARRAY_SIZE(ov2740_supply_name)
-
struct nvm_data {
struct nvmem_device *nvmem;
struct regmap *regmap;
@@ -536,7 +534,7 @@ struct ov2740 {
struct gpio_desc *reset_gpio;
struct gpio_desc *powerdown_gpio;
struct clk *clk;
- struct regulator_bulk_data supplies[OV2740_NUM_SUPPLIES];
+ struct regulator_bulk_data supplies[ARRAY_SIZE(ov2740_supply_name)];
/* Current mode */
const struct ov2740_mode *cur_mode;
@@ -655,7 +653,7 @@ static int ov2740_identify_module(struct ov2740 *ov2740)
return -ENXIO;
}
- dev_dbg(&client->dev, "chip id: %x\n", val);
+ dev_dbg(&client->dev, "chip id: 0x%x\n", val);
ov2740->identified = true;
@@ -828,8 +826,10 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
0, 0, ov2740_test_pattern_menu);
ret = v4l2_fwnode_device_parse(&client->dev, &props);
- if (ret)
+ if (ret) {
+ v4l2_ctrl_handler_free(ctrl_hdlr);
return ret;
+ }
v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov2740_ctrl_ops, &props);
@@ -1319,7 +1319,8 @@ static int ov2740_suspend(struct device *dev)
gpiod_set_value_cansleep(ov2740->reset_gpio, 1);
gpiod_set_value_cansleep(ov2740->powerdown_gpio, 1);
clk_disable_unprepare(ov2740->clk);
- regulator_bulk_disable(OV2740_NUM_SUPPLIES, ov2740->supplies);
+ regulator_bulk_disable(ARRAY_SIZE(ov2740_supply_name),
+ ov2740->supplies);
return 0;
}
@@ -1329,13 +1330,15 @@ static int ov2740_resume(struct device *dev)
struct ov2740 *ov2740 = to_ov2740(sd);
int ret;
- ret = regulator_bulk_enable(OV2740_NUM_SUPPLIES, ov2740->supplies);
+ ret = regulator_bulk_enable(ARRAY_SIZE(ov2740_supply_name),
+ ov2740->supplies);
if (ret)
return ret;
ret = clk_prepare_enable(ov2740->clk);
if (ret) {
- regulator_bulk_disable(OV2740_NUM_SUPPLIES, ov2740->supplies);
+ regulator_bulk_disable(ARRAY_SIZE(ov2740_supply_name),
+ ov2740->supplies);
return ret;
}
@@ -1351,7 +1354,8 @@ static int ov2740_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct ov2740 *ov2740;
bool full_power;
- int i, ret;
+ unsigned int i;
+ int ret;
ov2740 = devm_kzalloc(&client->dev, sizeof(*ov2740), GFP_KERNEL);
if (!ov2740)
@@ -1389,10 +1393,11 @@ static int ov2740_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(ov2740->clk),
"failed to get clock\n");
- for (i = 0; i < OV2740_NUM_SUPPLIES; i++)
+ for (i = 0; i < ARRAY_SIZE(ov2740_supply_name); i++)
ov2740->supplies[i].supply = ov2740_supply_name[i];
- ret = devm_regulator_bulk_get(dev, OV2740_NUM_SUPPLIES, ov2740->supplies);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ov2740_supply_name),
+ ov2740->supplies);
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 30f61e04ecaf..3226888d77e9 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -922,6 +922,8 @@ static int ov7251_set_power_on(struct device *dev)
return ret;
}
+ usleep_range(1000, 1100);
+
gpiod_set_value_cansleep(ov7251->enable_gpio, 1);
/* wait at least 65536 external clock cycles */
@@ -1696,7 +1698,7 @@ static int ov7251_probe(struct i2c_client *client)
return PTR_ERR(ov7251->analog_regulator);
}
- ov7251->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
+ ov7251->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(ov7251->enable_gpio)) {
dev_err(dev, "cannot get enable gpio\n");
return PTR_ERR(ov7251->enable_gpio);
diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
index 87e5d7ce5a47..c882a021cf18 100644
--- a/drivers/media/i2c/ov9282.c
+++ b/drivers/media/i2c/ov9282.c
@@ -44,6 +44,15 @@
#define OV9282_EXPOSURE_STEP 1
#define OV9282_EXPOSURE_DEFAULT 0x0282
+/* AEC/AGC manual */
+#define OV9282_REG_AEC_MANUAL 0x3503
+#define OV9282_DIGFRAC_GAIN_DELAY BIT(6)
+#define OV9282_GAIN_CHANGE_DELAY BIT(5)
+#define OV9282_GAIN_DELAY BIT(4)
+#define OV9282_GAIN_PREC16_EN BIT(3)
+#define OV9282_GAIN_MANUAL_AS_SENSGAIN BIT(2)
+#define OV9282_AEC_MANUAL_DEFAULT 0x00
+
/* Analog gain control */
#define OV9282_REG_AGAIN 0x3509
#define OV9282_AGAIN_MIN 0x10
@@ -214,7 +223,7 @@ static const struct ov9282_reg common_regs[] = {
{0x3030, 0x10},
{0x3039, 0x32},
{0x303a, 0x00},
- {0x3503, 0x08},
+ {OV9282_REG_AEC_MANUAL, OV9282_GAIN_PREC16_EN},
{0x3505, 0x8c},
{0x3507, 0x03},
{0x3508, 0x00},
@@ -296,8 +305,8 @@ static const struct ov9282_reg mode_1280x800_regs[] = {
{0x3813, 0x08},
{0x3814, 0x11},
{0x3815, 0x11},
- {0x3820, 0x40},
- {0x3821, 0x00},
+ {OV9282_REG_TIMING_FORMAT_1, 0x40},
+ {OV9282_REG_TIMING_FORMAT_2, 0x00},
{0x4003, 0x40},
{0x4008, 0x04},
{0x4009, 0x0b},
@@ -327,8 +336,8 @@ static const struct ov9282_reg mode_1280x720_regs[] = {
{0x3813, 0x08},
{0x3814, 0x11},
{0x3815, 0x11},
- {0x3820, 0x3c},
- {0x3821, 0x84},
+ {OV9282_REG_TIMING_FORMAT_1, 0x3c},
+ {OV9282_REG_TIMING_FORMAT_2, 0x84},
{0x4003, 0x40},
{0x4008, 0x02},
{0x4009, 0x05},
@@ -358,8 +367,8 @@ static const struct ov9282_reg mode_640x400_regs[] = {
{0x3813, 0x04},
{0x3814, 0x31},
{0x3815, 0x22},
- {0x3820, 0x60},
- {0x3821, 0x01},
+ {OV9282_REG_TIMING_FORMAT_1, 0x60},
+ {OV9282_REG_TIMING_FORMAT_2, 0x01},
{0x4008, 0x02},
{0x4009, 0x05},
{0x400c, 0x00},
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index f08db3cfe076..f4568e87f018 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -301,8 +301,9 @@ static int mipid02_detect(struct mipid02_dev *bridge)
static int mipid02_configure_from_rx_speed(struct mipid02_dev *bridge,
struct v4l2_mbus_framefmt *fmt)
{
+ struct media_pad *remote =
+ &bridge->s_subdev->entity.pads[bridge->s_subdev_pad_id];
struct i2c_client *client = bridge->i2c_client;
- struct v4l2_subdev *subdev = bridge->s_subdev;
struct v4l2_fwnode_endpoint *ep = &bridge->rx;
u32 bpp = bpp_from_code(fmt->code);
/*
@@ -312,7 +313,7 @@ static int mipid02_configure_from_rx_speed(struct mipid02_dev *bridge,
u64 ui_4 = 2000000000;
s64 link_freq;
- link_freq = v4l2_get_link_freq(subdev->ctrl_handler, bpp,
+ link_freq = v4l2_get_link_freq(remote, bpp,
2 * ep->bus.mipi_csi2.num_data_lanes);
if (link_freq < 0) {
dev_err(&client->dev, "Failed to get link frequency");
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index 389582420ba7..143aa1359aba 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -161,10 +161,6 @@ struct tc358746 {
u16 pll_pre_div;
u16 pll_mul;
-#define TC358746_VB_MAX_SIZE (511 * 32)
-#define TC358746_VB_DEFAULT_SIZE (1 * 32)
- unsigned int vb_size; /* Video buffer size in bits */
-
struct phy_configure_opts_mipi_dphy dphy_cfg;
};
@@ -202,6 +198,15 @@ enum {
PDFORMAT_YUV444,
};
+#define TC358746_FORMAT_RAW(_bpp, _code) \
+{ \
+ .code = _code, \
+ .bus_width = _bpp, \
+ .bpp = _bpp, \
+ .pdformat = PDFORMAT_RAW##_bpp, \
+ .pdataf = PDATAF_MODE0, /* don't care */ \
+}
+
/* Check tc358746_src_mbus_code() if you add new formats */
static const struct tc358746_format tc358746_formats[] = {
{
@@ -230,7 +235,23 @@ static const struct tc358746_format tc358746_formats[] = {
.bpp = 20,
.pdformat = PDFORMAT_YUV422_10BIT,
.pdataf = PDATAF_MODE0, /* don't care */
- }
+ },
+ TC358746_FORMAT_RAW(8, MEDIA_BUS_FMT_SBGGR8_1X8),
+ TC358746_FORMAT_RAW(8, MEDIA_BUS_FMT_SGBRG8_1X8),
+ TC358746_FORMAT_RAW(8, MEDIA_BUS_FMT_SGRBG8_1X8),
+ TC358746_FORMAT_RAW(8, MEDIA_BUS_FMT_SRGGB8_1X8),
+ TC358746_FORMAT_RAW(10, MEDIA_BUS_FMT_SBGGR10_1X10),
+ TC358746_FORMAT_RAW(10, MEDIA_BUS_FMT_SGBRG10_1X10),
+ TC358746_FORMAT_RAW(10, MEDIA_BUS_FMT_SGRBG10_1X10),
+ TC358746_FORMAT_RAW(10, MEDIA_BUS_FMT_SRGGB10_1X10),
+ TC358746_FORMAT_RAW(12, MEDIA_BUS_FMT_SBGGR12_1X12),
+ TC358746_FORMAT_RAW(12, MEDIA_BUS_FMT_SGBRG12_1X12),
+ TC358746_FORMAT_RAW(12, MEDIA_BUS_FMT_SGRBG12_1X12),
+ TC358746_FORMAT_RAW(12, MEDIA_BUS_FMT_SRGGB12_1X12),
+ TC358746_FORMAT_RAW(14, MEDIA_BUS_FMT_SBGGR14_1X14),
+ TC358746_FORMAT_RAW(14, MEDIA_BUS_FMT_SGBRG14_1X14),
+ TC358746_FORMAT_RAW(14, MEDIA_BUS_FMT_SGRBG14_1X14),
+ TC358746_FORMAT_RAW(14, MEDIA_BUS_FMT_SRGGB14_1X14),
};
/* Get n-th format for pad */
@@ -415,6 +436,70 @@ tc358746_apply_pll_config(struct tc358746 *tc358746)
return tc358746_set_bits(tc358746, PLLCTL1_REG, CKEN);
}
+#define TC358746_VB_PRECISION 10
+#define TC358746_VB_MAX_SIZE (511 * 32)
+#define TC358746_VB_DEFAULT_SIZE (1 * 32)
+
+static int tc358746_calc_vb_size(struct tc358746 *tc358746,
+ s64 source_link_freq,
+ const struct v4l2_mbus_framefmt *mbusfmt,
+ const struct tc358746_format *fmt)
+{
+ unsigned long csi_bitrate, source_bitrate;
+ unsigned int fifo_sz, tmp, n;
+ int vb_size; /* Video buffer size in bits */
+
+ source_bitrate = source_link_freq * fmt->bus_width;
+
+ csi_bitrate = tc358746->dphy_cfg.lanes * tc358746->pll_rate;
+
+ dev_dbg(tc358746->sd.dev,
+ "Fifo settings params: source-bitrate:%lu csi-bitrate:%lu",
+ source_bitrate, csi_bitrate);
+
+ /* Avoid possible FIFO overflows */
+ if (csi_bitrate < source_bitrate)
+ return -EINVAL;
+
+ /* Best case */
+ if (csi_bitrate == source_bitrate) {
+ fifo_sz = TC358746_VB_DEFAULT_SIZE;
+ vb_size = TC358746_VB_DEFAULT_SIZE;
+ } else {
+ /*
+ * Avoid possible FIFO underflow in case of
+ * csi_bitrate > source_bitrate. For such case the chip has a internal
+ * fifo which can be used to delay the line output.
+ *
+ * Fifo size calculation (excluding precision):
+ *
+ * fifo-sz, image-width - in bits
+ * sbr - source_bitrate in bits/s
+ * csir - csi_bitrate in bits/s
+ *
+ * image-width / csir >= (image-width - fifo-sz) / sbr
+ * image-width * sbr / csir >= image-width - fifo-sz
+ * fifo-sz >= image-width - image-width * sbr / csir; with n = csir/sbr
+ * fifo-sz >= image-width - image-width / n
+ */
+ source_bitrate /= TC358746_VB_PRECISION;
+ n = csi_bitrate / source_bitrate;
+ tmp = (mbusfmt->width * TC358746_VB_PRECISION) / n;
+ fifo_sz = mbusfmt->width - tmp;
+ fifo_sz *= fmt->bpp;
+ vb_size = round_up(fifo_sz, 32);
+ }
+
+ dev_dbg(tc358746->sd.dev,
+ "Found FIFO size[bits]:%u -> aligned to size[bits]:%u\n",
+ fifo_sz, vb_size);
+
+ if (vb_size > TC358746_VB_MAX_SIZE)
+ return -EINVAL;
+
+ return vb_size;
+}
+
static int tc358746_apply_misc_config(struct tc358746 *tc358746)
{
const struct v4l2_mbus_framefmt *mbusfmt;
@@ -422,6 +507,9 @@ static int tc358746_apply_misc_config(struct tc358746 *tc358746)
struct v4l2_subdev_state *sink_state;
const struct tc358746_format *fmt;
struct device *dev = sd->dev;
+ struct media_pad *source_pad;
+ s64 source_link_freq;
+ int vb_size;
u32 val;
int err;
@@ -430,6 +518,21 @@ static int tc358746_apply_misc_config(struct tc358746 *tc358746)
mbusfmt = v4l2_subdev_state_get_format(sink_state, TC358746_SINK);
fmt = tc358746_get_format_by_code(TC358746_SINK, mbusfmt->code);
+ source_pad = media_entity_remote_source_pad_unique(&sd->entity);
+ if (IS_ERR(source_pad)) {
+ dev_err(dev, "Failed to get source pad of %s\n", sd->name);
+ err = PTR_ERR(source_pad);
+ goto out;
+ }
+ source_link_freq = v4l2_get_link_freq(source_pad, 0, 0);
+ if (source_link_freq <= 0) {
+ dev_err(dev,
+ "Failed to query or invalid source link frequency\n");
+ /* Return -EINVAL in case of source_link_freq is 0 */
+ err = source_link_freq ?: -EINVAL;
+ goto out;
+ }
+
/* Self defined CSI user data type id's are not supported yet */
val = PDFMT(fmt->pdformat);
dev_dbg(dev, "DATAFMT: 0x%x\n", val);
@@ -443,7 +546,13 @@ static int tc358746_apply_misc_config(struct tc358746 *tc358746)
if (err)
goto out;
- val = tc358746->vb_size / 32;
+ vb_size = tc358746_calc_vb_size(tc358746, source_link_freq, mbusfmt, fmt);
+ if (vb_size < 0) {
+ err = vb_size;
+ goto out;
+ }
+
+ val = vb_size / 32;
dev_dbg(dev, "FIFOCTL: %u (0x%x)\n", val, val);
err = tc358746_write(tc358746, FIFOCTL_REG, val);
if (err)
@@ -460,24 +569,20 @@ out:
return err;
}
-/* Use MHz as base so the div needs no u64 */
-static u32 tc358746_cfg_to_cnt(unsigned int cfg_val,
- unsigned int clk_mhz,
- unsigned int time_base)
+static u32 tc358746_cfg_to_cnt(unsigned long cfg_val, unsigned long clk_hz,
+ unsigned long long time_base)
{
- return DIV_ROUND_UP(cfg_val * clk_mhz, time_base);
+ return div64_u64((u64)cfg_val * clk_hz + time_base - 1, time_base);
}
-static u32 tc358746_ps_to_cnt(unsigned int cfg_val,
- unsigned int clk_mhz)
+static u32 tc358746_ps_to_cnt(unsigned long cfg_val, unsigned long clk_hz)
{
- return tc358746_cfg_to_cnt(cfg_val, clk_mhz, USEC_PER_SEC);
+ return tc358746_cfg_to_cnt(cfg_val, clk_hz, PSEC_PER_SEC);
}
-static u32 tc358746_us_to_cnt(unsigned int cfg_val,
- unsigned int clk_mhz)
+static u32 tc358746_us_to_cnt(unsigned long cfg_val, unsigned long clk_hz)
{
- return tc358746_cfg_to_cnt(cfg_val, clk_mhz, 1);
+ return tc358746_cfg_to_cnt(cfg_val, clk_hz, USEC_PER_SEC);
}
static int tc358746_apply_dphy_config(struct tc358746 *tc358746)
@@ -492,7 +597,6 @@ static int tc358746_apply_dphy_config(struct tc358746 *tc358746)
/* The hs_byte_clk is also called SYSCLK in the excel sheet */
hs_byte_clk = cfg->hs_clk_rate / 8;
- hs_byte_clk /= HZ_PER_MHZ;
hf_clk = hs_byte_clk / 2;
val = tc358746_us_to_cnt(cfg->init, hf_clk) - 1;
@@ -882,97 +986,6 @@ static unsigned long tc358746_find_pll_settings(struct tc358746 *tc358746,
return best_freq;
}
-#define TC358746_PRECISION 10
-
-static int
-tc358746_link_validate(struct v4l2_subdev *sd, struct media_link *link,
- struct v4l2_subdev_format *source_fmt,
- struct v4l2_subdev_format *sink_fmt)
-{
- struct tc358746 *tc358746 = to_tc358746(sd);
- unsigned long csi_bitrate, source_bitrate;
- struct v4l2_subdev_state *sink_state;
- struct v4l2_mbus_framefmt *mbusfmt;
- const struct tc358746_format *fmt;
- unsigned int fifo_sz, tmp, n;
- struct v4l2_subdev *source;
- s64 source_link_freq;
- int err;
-
- err = v4l2_subdev_link_validate_default(sd, link, source_fmt, sink_fmt);
- if (err)
- return err;
-
- sink_state = v4l2_subdev_lock_and_get_active_state(sd);
- mbusfmt = v4l2_subdev_state_get_format(sink_state, TC358746_SINK);
-
- /* Check the FIFO settings */
- fmt = tc358746_get_format_by_code(TC358746_SINK, mbusfmt->code);
-
- source = media_entity_to_v4l2_subdev(link->source->entity);
- source_link_freq = v4l2_get_link_freq(source->ctrl_handler, 0, 0);
- if (source_link_freq <= 0) {
- dev_err(tc358746->sd.dev,
- "Failed to query or invalid source link frequency\n");
- v4l2_subdev_unlock_state(sink_state);
- /* Return -EINVAL in case of source_link_freq is 0 */
- return source_link_freq ? : -EINVAL;
- }
- source_bitrate = source_link_freq * fmt->bus_width;
-
- csi_bitrate = tc358746->dphy_cfg.lanes * tc358746->pll_rate;
-
- dev_dbg(tc358746->sd.dev,
- "Fifo settings params: source-bitrate:%lu csi-bitrate:%lu",
- source_bitrate, csi_bitrate);
-
- /* Avoid possible FIFO overflows */
- if (csi_bitrate < source_bitrate) {
- v4l2_subdev_unlock_state(sink_state);
- return -EINVAL;
- }
-
- /* Best case */
- if (csi_bitrate == source_bitrate) {
- fifo_sz = TC358746_VB_DEFAULT_SIZE;
- tc358746->vb_size = TC358746_VB_DEFAULT_SIZE;
- goto out;
- }
-
- /*
- * Avoid possible FIFO underflow in case of
- * csi_bitrate > source_bitrate. For such case the chip has a internal
- * fifo which can be used to delay the line output.
- *
- * Fifo size calculation (excluding precision):
- *
- * fifo-sz, image-width - in bits
- * sbr - source_bitrate in bits/s
- * csir - csi_bitrate in bits/s
- *
- * image-width / csir >= (image-width - fifo-sz) / sbr
- * image-width * sbr / csir >= image-width - fifo-sz
- * fifo-sz >= image-width - image-width * sbr / csir; with n = csir/sbr
- * fifo-sz >= image-width - image-width / n
- */
-
- source_bitrate /= TC358746_PRECISION;
- n = csi_bitrate / source_bitrate;
- tmp = (mbusfmt->width * TC358746_PRECISION) / n;
- fifo_sz = mbusfmt->width - tmp;
- fifo_sz *= fmt->bpp;
- tc358746->vb_size = round_up(fifo_sz, 32);
-
-out:
- dev_dbg(tc358746->sd.dev,
- "Found FIFO size[bits]:%u -> aligned to size[bits]:%u\n",
- fifo_sz, tc358746->vb_size);
-
- v4l2_subdev_unlock_state(sink_state);
-
- return tc358746->vb_size > TC358746_VB_MAX_SIZE ? -EINVAL : 0;
-}
-
static int tc358746_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_config *config)
{
@@ -1040,7 +1053,7 @@ static const struct v4l2_subdev_pad_ops tc358746_pad_ops = {
.enum_mbus_code = tc358746_enum_mbus_code,
.set_fmt = tc358746_set_fmt,
.get_fmt = v4l2_subdev_get_fmt,
- .link_validate = tc358746_link_validate,
+ .link_validate = v4l2_subdev_link_validate_default,
.get_mbus_config = tc358746_get_mbus_config,
};
@@ -1352,8 +1365,6 @@ tc358746_init_output_port(struct tc358746 *tc358746, unsigned long refclk)
if (err)
goto err;
- tc358746->vb_size = TC358746_VB_DEFAULT_SIZE;
-
return 0;
err:
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 3b7e5ff5b010..959590afc80f 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2315,11 +2315,10 @@ static int tda1997x_parse_dt(struct tda1997x_state *state)
return -EINVAL;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &bus_cfg);
- if (ret) {
- of_node_put(ep);
- return ret;
- }
of_node_put(ep);
+ if (ret)
+ return ret;
+
pdata->vidout_bus_type = bus_cfg.bus_type;
/* polarity of HS/VS/DE */
diff --git a/drivers/media/i2c/vgxy61.c b/drivers/media/i2c/vgxy61.c
index d77468c8587b..5b0479f3a3c0 100644
--- a/drivers/media/i2c/vgxy61.c
+++ b/drivers/media/i2c/vgxy61.c
@@ -892,8 +892,8 @@ static u32 vgxy61_get_expo_long_max(struct vgxy61_dev *sensor,
third_rot_max_expo = (sensor->frame_length / 71) * short_expo_ratio;
/* Take the minimum from all rules */
- return min(min(first_rot_max_expo, second_rot_max_expo),
- third_rot_max_expo);
+ return min3(first_rot_max_expo, second_rot_max_expo,
+ third_rot_max_expo);
}
static int vgxy61_update_exposure(struct vgxy61_dev *sensor, u16 new_expo_long,
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 036a6375627a..0dd991d70d53 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -264,18 +264,8 @@ static int amg88xx_set_power(struct video_i2c_data *data, bool on)
#if IS_REACHABLE(CONFIG_HWMON)
-static const u32 amg88xx_temp_config[] = {
- HWMON_T_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info amg88xx_temp = {
- .type = hwmon_temp,
- .config = amg88xx_temp_config,
-};
-
static const struct hwmon_channel_info * const amg88xx_info[] = {
- &amg88xx_temp,
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
NULL
};
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 9244b4320558..da23e7dfeef5 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -860,6 +860,31 @@ struct cx23885_board cx23885_boards[] = {
.amux = CX25840_AUDIO7,
} },
},
+ [CX23885_BOARD_AVERMEDIA_H789C] = {
+ .name = "AVerMedia H789-C",
+ .porta = CX23885_ANALOG_VIDEO,
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x63, /* 0xc0 >> 1 */
+ .tuner_bus = 1,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1 |
+ CX25840_DIF_ON,
+ .amux = CX25840_AUDIO8,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_VIN8_CH1,
+ .amux = CX25840_AUDIO7,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_VIN8_CH1 |
+ CX25840_VIN7_CH3 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
+ }, },
+ },
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -1187,6 +1212,10 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x1461,
.subdevice = 0x3100,
.card = CX23885_BOARD_AVERMEDIA_CE310B,
+ }, {
+ .subvendor = 0x1461,
+ .subdevice = 0xe139,
+ .card = CX23885_BOARD_AVERMEDIA_H789C,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -2413,6 +2442,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_VIEWCAST_260E:
case CX23885_BOARD_VIEWCAST_460E:
case CX23885_BOARD_AVERMEDIA_CE310B:
+ case CX23885_BOARD_AVERMEDIA_H789C:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", 0x88 >> 1, NULL);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index c8705d786cdd..a39f445ce22a 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2231,6 +2231,28 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
kfree(dev);
}
+static int __maybe_unused cx23885_suspend(struct device *dev_d)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev_d);
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
+ struct cx23885_dev *dev = to_cx23885(v4l2_dev);
+
+ cx23885_shutdown(dev);
+
+ return 0;
+}
+
+static int __maybe_unused cx23885_resume(struct device *dev_d)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev_d);
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
+ struct cx23885_dev *dev = to_cx23885(v4l2_dev);
+
+ cx23885_reset(dev);
+
+ return 0;
+}
+
static const struct pci_device_id cx23885_pci_tbl[] = {
{
/* CX23885 */
@@ -2250,11 +2272,14 @@ static const struct pci_device_id cx23885_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
+static SIMPLE_DEV_PM_OPS(cx23885_pm_ops, cx23885_suspend, cx23885_resume);
+
static struct pci_driver cx23885_pci_driver = {
- .name = "cx23885",
- .id_table = cx23885_pci_tbl,
- .probe = cx23885_initdev,
- .remove = cx23885_finidev,
+ .name = "cx23885",
+ .id_table = cx23885_pci_tbl,
+ .probe = cx23885_initdev,
+ .remove = cx23885_finidev,
+ .driver.pm = &cx23885_pm_ops,
};
static int __init cx23885_init(void)
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 35d58328db56..14d219fd1d8a 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -261,7 +261,8 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
(dev->board == CX23885_BOARD_AVERMEDIA_HC81R) ||
(dev->board == CX23885_BOARD_VIEWCAST_260E) ||
(dev->board == CX23885_BOARD_VIEWCAST_460E) ||
- (dev->board == CX23885_BOARD_AVERMEDIA_CE310B)) {
+ (dev->board == CX23885_BOARD_AVERMEDIA_CE310B) ||
+ (dev->board == CX23885_BOARD_AVERMEDIA_H789C)) {
/* Configure audio routing */
v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
INPUT(input)->amux, 0, 0);
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 349462ee2c48..8ba1f306238c 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -102,6 +102,7 @@
#define CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885 60
#define CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885 61
#define CX23885_BOARD_AVERMEDIA_CE310B 62
+#define CX23885_BOARD_AVERMEDIA_H789C 63
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/pci/cx23885/netup-eeprom.c b/drivers/media/pci/cx23885/netup-eeprom.c
index 26dd5b3884e6..335d150d81de 100644
--- a/drivers/media/pci/cx23885/netup-eeprom.c
+++ b/drivers/media/pci/cx23885/netup-eeprom.c
@@ -49,35 +49,6 @@ int netup_eeprom_read(struct i2c_adapter *i2c_adap, u8 addr)
return buf[1];
};
-int netup_eeprom_write(struct i2c_adapter *i2c_adap, u8 addr, u8 data)
-{
- int ret;
- unsigned char bufw[2];
-
- /* Write into EEPROM */
- struct i2c_msg msg[] = {
- {
- .addr = EEPROM_I2C_ADDR,
- .flags = 0,
- .buf = &bufw[0],
- .len = 2
- }
- };
-
- bufw[0] = addr;
- bufw[1] = data;
-
- ret = i2c_transfer(i2c_adap, msg, 1);
-
- if (ret != 1) {
- pr_err("eeprom i2c write error, status=%d\n", ret);
- return -1;
- }
-
- mdelay(10); /* prophylactic delay, datasheet write cycle time = 5 ms */
- return 0;
-};
-
void netup_get_card_info(struct i2c_adapter *i2c_adap,
struct netup_card_info *cinfo)
{
diff --git a/drivers/media/pci/cx23885/netup-eeprom.h b/drivers/media/pci/cx23885/netup-eeprom.h
index 549a033679f7..fb8217eb455c 100644
--- a/drivers/media/pci/cx23885/netup-eeprom.h
+++ b/drivers/media/pci/cx23885/netup-eeprom.h
@@ -21,7 +21,6 @@ struct netup_card_info {
};
extern int netup_eeprom_read(struct i2c_adapter *i2c_adap, u8 addr);
-extern int netup_eeprom_write(struct i2c_adapter *i2c_adap, u8 addr, u8 data);
extern void netup_get_card_info(struct i2c_adapter *i2c_adap,
struct netup_card_info *cinfo);
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index a04a1d33fadb..b9f2c14d62b4 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -190,8 +190,7 @@ static int __cx88_ir_start(void *priv)
ir = core->ir;
if (ir->polling) {
- hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ir->timer.function = cx88_ir_work;
+ hrtimer_setup(&ir->timer, cx88_ir_work, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(&ir->timer,
ktime_set(0, ir->polling * 1000000),
HRTIMER_MODE_REL);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index dd73d534ac49..0c365eb59085 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -309,12 +309,17 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
unsigned int bpp, unsigned int lanes)
{
struct device *dev = &cio2->pci_dev->dev;
+ struct media_pad *src_pad;
s64 freq;
- if (!q->sensor)
- return -ENODEV;
+ src_pad = media_entity_remote_source_pad_unique(&q->subdev.entity);
+ if (IS_ERR(src_pad)) {
+ dev_err(dev, "can't get source pad of %s (%ld)\n",
+ q->subdev.name, PTR_ERR(src_pad));
+ return PTR_ERR(src_pad);
+ }
- freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
+ freq = v4l2_get_link_freq(src_pad, bpp, lanes * 2);
if (freq < 0) {
dev_err(dev, "error %lld, invalid link_freq\n", freq);
return freq;
diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.c b/drivers/media/pci/intel/ipu6/ipu6-dma.c
index b34022bad83b..1ca60ca79dba 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-dma.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-dma.c
@@ -457,36 +457,3 @@ void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
ipu6_dma_unmap_sg(sys, sgt->sgl, sgt->nents, dir, attrs);
}
EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sgtable, "INTEL_IPU6");
-
-/*
- * Create scatter-list for the already allocated DMA buffer
- */
-int ipu6_dma_get_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size,
- unsigned long attrs)
-{
- struct device *dev = &sys->auxdev.dev;
- struct ipu6_mmu *mmu = sys->mmu;
- struct vm_info *info;
- int n_pages;
- int ret = 0;
-
- info = get_vm_info(mmu, handle);
- if (!info)
- return -EFAULT;
-
- if (!info->vaddr)
- return -EFAULT;
-
- if (WARN_ON(!info->pages))
- return -ENOMEM;
-
- n_pages = PFN_UP(size);
-
- ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size,
- GFP_KERNEL);
- if (ret)
- dev_warn(dev, "get sgt table failed\n");
-
- return ret;
-}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.h b/drivers/media/pci/intel/ipu6/ipu6-dma.h
index b51244add9e6..2882850d9366 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-dma.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-dma.h
@@ -43,7 +43,4 @@ int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs);
void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs);
-int ipu6_dma_get_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size,
- unsigned long attrs);
#endif /* IPU6_DMA_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
index 051898ce53f4..da8581a37e22 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
@@ -80,25 +80,19 @@ static const struct ipu6_csi2_error dphy_rx_errors[] = {
s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2)
{
struct media_pad *src_pad;
- struct v4l2_subdev *ext_sd;
- struct device *dev;
if (!csi2)
return -EINVAL;
- dev = &csi2->isys->adev->auxdev.dev;
src_pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity);
if (IS_ERR(src_pad)) {
- dev_err(dev, "can't get source pad of %s (%ld)\n",
+ dev_err(&csi2->isys->adev->auxdev.dev,
+ "can't get source pad of %s (%ld)\n",
csi2->asd.sd.name, PTR_ERR(src_pad));
return PTR_ERR(src_pad);
}
- ext_sd = media_entity_to_v4l2_subdev(src_pad->entity);
- if (WARN(!ext_sd, "Failed to get subdev for %s\n", csi2->asd.sd.name))
- return -ENODEV;
-
- return v4l2_get_link_freq(ext_sd->ctrl_handler, 0, 0);
+ return v4l2_get_link_freq(src_pad, 0, 0);
}
static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
index bbb66b56ee88..72f5f987ef48 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
@@ -804,8 +804,6 @@ void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
static const struct vb2_ops ipu6_isys_queue_ops = {
.queue_setup = ipu6_isys_queue_setup,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
.buf_init = ipu6_isys_buf_init,
.buf_prepare = ipu6_isys_buf_prepare,
.buf_cleanup = ipu6_isys_buf_cleanup,
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
index 387963529adb..959869a88556 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
@@ -1296,6 +1296,7 @@ int ipu6_isys_video_init(struct ipu6_isys_video *av)
av->vdev.release = video_device_release_empty;
av->vdev.fops = &isys_fops;
av->vdev.v4l2_dev = &av->isys->v4l2_dev;
+ av->vdev.dev_parent = &av->isys->adev->isp->pdev->dev;
if (!av->vdev.ioctl_ops)
av->vdev.ioctl_ops = &ipu6_v4l2_ioctl_ops;
av->vdev.queue = &av->aq.vbq;
diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
index 6a893c4547b2..92d871a378ba 100644
--- a/drivers/media/pci/intel/ivsc/mei_csi.c
+++ b/drivers/media/pci/intel/ivsc/mei_csi.c
@@ -35,8 +35,6 @@
#define MEI_CSI_ENTITY_NAME "Intel IVSC CSI"
-#define MEI_CSI_LINK_FREQ_400MHZ 400000000ULL
-
/* the 5s used here is based on experiment */
#define CSI_CMD_TIMEOUT (5 * HZ)
/* to setup CSI-2 link an extra delay needed and determined experimentally */
@@ -121,14 +119,13 @@ struct mei_csi {
struct mutex lock;
struct v4l2_subdev subdev;
- struct v4l2_subdev *remote;
+ struct media_pad *remote;
struct v4l2_async_notifier notifier;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *freq_ctrl;
struct v4l2_ctrl *privacy_ctrl;
/* lock for v4l2 controls */
struct mutex ctrl_lock;
- unsigned int remote_pad;
/* start streaming or not */
int streaming;
@@ -147,10 +144,6 @@ static const struct v4l2_mbus_framefmt mei_csi_format_mbus_default = {
.field = V4L2_FIELD_NONE,
};
-static s64 link_freq_menu_items[] = {
- MEI_CSI_LINK_FREQ_400MHZ
-};
-
static inline struct mei_csi *notifier_to_csi(struct v4l2_async_notifier *n)
{
return container_of(n, struct mei_csi, notifier);
@@ -161,11 +154,6 @@ static inline struct mei_csi *sd_to_csi(struct v4l2_subdev *sd)
return container_of(sd, struct mei_csi, subdev);
}
-static inline struct mei_csi *ctrl_to_csi(struct v4l2_ctrl *ctrl)
-{
- return container_of(ctrl->handler, struct mei_csi, ctrl_handler);
-}
-
/* send a command to firmware and mutex must be held by caller */
static int mei_csi_send(struct mei_csi *csi, u8 *buf, size_t len)
{
@@ -286,11 +274,13 @@ static void mei_csi_rx(struct mei_cl_device *cldev)
static int mei_csi_set_stream(struct v4l2_subdev *sd, int enable)
{
struct mei_csi *csi = sd_to_csi(sd);
+ struct v4l2_subdev *remote_sd =
+ media_entity_to_v4l2_subdev(csi->remote->entity);
s64 freq;
int ret;
if (enable && csi->streaming == 0) {
- freq = v4l2_get_link_freq(csi->remote->ctrl_handler, 0, 0);
+ freq = v4l2_get_link_freq(csi->remote, 0, 0);
if (freq < 0) {
dev_err(&csi->cldev->dev,
"error %lld, invalid link_freq\n", freq);
@@ -309,11 +299,11 @@ static int mei_csi_set_stream(struct v4l2_subdev *sd, int enable)
if (ret < 0)
goto err_switch;
- ret = v4l2_subdev_call(csi->remote, video, s_stream, 1);
+ ret = v4l2_subdev_call(remote_sd, video, s_stream, 1);
if (ret)
goto err_switch;
} else if (!enable && csi->streaming == 1) {
- v4l2_subdev_call(csi->remote, video, s_stream, 0);
+ v4l2_subdev_call(remote_sd, video, s_stream, 0);
/* switch CSI-2 link to IVSC */
ret = csi_set_link_owner(csi, CSI_LINK_IVSC);
@@ -470,34 +460,30 @@ static int mei_csi_set_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int mei_csi_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+static int mei_csi_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_config *mbus_config)
{
- struct mei_csi *csi = ctrl_to_csi(ctrl);
+ struct mei_csi *csi = sd_to_csi(sd);
+ unsigned int i;
s64 freq;
- if (ctrl->id == V4L2_CID_LINK_FREQ) {
- if (!csi->remote)
- return -EINVAL;
+ mbus_config->type = V4L2_MBUS_CSI2_DPHY;
+ for (i = 0; i < V4L2_MBUS_CSI2_MAX_DATA_LANES; i++)
+ mbus_config->bus.mipi_csi2.data_lanes[i] = i + 1;
+ mbus_config->bus.mipi_csi2.num_data_lanes = csi->nr_of_lanes;
- freq = v4l2_get_link_freq(csi->remote->ctrl_handler, 0, 0);
- if (freq < 0) {
- dev_err(&csi->cldev->dev,
- "error %lld, invalid link_freq\n", freq);
- return -EINVAL;
- }
-
- link_freq_menu_items[0] = freq;
- ctrl->val = 0;
-
- return 0;
+ freq = v4l2_get_link_freq(csi->remote, 0, 0);
+ if (freq < 0) {
+ dev_err(&csi->cldev->dev,
+ "error %lld, invalid link_freq\n", freq);
+ return -EINVAL;
}
- return -EINVAL;
-}
+ csi->link_freq = freq;
+ mbus_config->link_freq = freq;
-static const struct v4l2_ctrl_ops mei_csi_ctrl_ops = {
- .g_volatile_ctrl = mei_csi_g_volatile_ctrl,
-};
+ return 0;
+}
static const struct v4l2_subdev_video_ops mei_csi_video_ops = {
.s_stream = mei_csi_set_stream,
@@ -506,6 +492,7 @@ static const struct v4l2_subdev_video_ops mei_csi_video_ops = {
static const struct v4l2_subdev_pad_ops mei_csi_pad_ops = {
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = mei_csi_set_fmt,
+ .get_mbus_config = mei_csi_get_mbus_config,
};
static const struct v4l2_subdev_ops mei_csi_subdev_ops = {
@@ -533,8 +520,7 @@ static int mei_csi_notify_bound(struct v4l2_async_notifier *notifier,
if (pad < 0)
return pad;
- csi->remote = subdev;
- csi->remote_pad = pad;
+ csi->remote = &subdev->entity.pads[pad];
return media_create_pad_link(&subdev->entity, pad,
&csi->subdev.entity, CSI_PAD_SINK,
@@ -558,28 +544,16 @@ static const struct v4l2_async_notifier_operations mei_csi_notify_ops = {
static int mei_csi_init_controls(struct mei_csi *csi)
{
- u32 max;
int ret;
mutex_init(&csi->ctrl_lock);
- ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 2);
+ ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 1);
if (ret)
return ret;
csi->ctrl_handler.lock = &csi->ctrl_lock;
- max = ARRAY_SIZE(link_freq_menu_items) - 1;
- csi->freq_ctrl = v4l2_ctrl_new_int_menu(&csi->ctrl_handler,
- &mei_csi_ctrl_ops,
- V4L2_CID_LINK_FREQ,
- max,
- 0,
- link_freq_menu_items);
- if (csi->freq_ctrl)
- csi->freq_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY |
- V4L2_CTRL_FLAG_VOLATILE;
-
csi->privacy_ctrl = v4l2_ctrl_new_std(&csi->ctrl_handler, NULL,
V4L2_CID_PRIVACY, 0, 1, 1, 0);
if (csi->privacy_ctrl)
diff --git a/drivers/media/pci/mgb4/mgb4_cmt.c b/drivers/media/pci/mgb4/mgb4_cmt.c
index a25b68403bc6..c22ef51436ed 100644
--- a/drivers/media/pci/mgb4/mgb4_cmt.c
+++ b/drivers/media/pci/mgb4/mgb4_cmt.c
@@ -135,8 +135,8 @@ static const u16 cmt_vals_out[][15] = {
};
static const u16 cmt_vals_in[][13] = {
- {0x1082, 0x0000, 0x5104, 0x0000, 0x11C7, 0x0000, 0x1041, 0x02BC, 0x7C01, 0xFFE9, 0x9900, 0x9908, 0x8100},
{0x1104, 0x0000, 0x9208, 0x0000, 0x138E, 0x0000, 0x1041, 0x015E, 0x7C01, 0xFFE9, 0x0100, 0x0908, 0x1000},
+ {0x1082, 0x0000, 0x5104, 0x0000, 0x11C7, 0x0000, 0x1041, 0x02BC, 0x7C01, 0xFFE9, 0x9900, 0x9908, 0x8100},
};
static const u32 cmt_addrs_out[][15] = {
@@ -206,10 +206,11 @@ u32 mgb4_cmt_set_vout_freq(struct mgb4_vout_dev *voutdev, unsigned int freq)
mgb4_write_reg(video, regs->config, 0x1 | (config & ~0x3));
+ mgb4_mask_reg(video, regs->config, 0x100, 0x100);
+
for (i = 0; i < ARRAY_SIZE(cmt_addrs_out[0]); i++)
mgb4_write_reg(&voutdev->mgbdev->cmt, addr[i], reg_set[i]);
- mgb4_mask_reg(video, regs->config, 0x100, 0x100);
mgb4_mask_reg(video, regs->config, 0x100, 0x0);
mgb4_write_reg(video, regs->config, config & ~0x1);
@@ -236,10 +237,11 @@ void mgb4_cmt_set_vin_freq_range(struct mgb4_vin_dev *vindev,
mgb4_write_reg(video, regs->config, 0x1 | (config & ~0x3));
+ mgb4_mask_reg(video, regs->config, 0x1000, 0x1000);
+
for (i = 0; i < ARRAY_SIZE(cmt_addrs_in[0]); i++)
mgb4_write_reg(&vindev->mgbdev->cmt, addr[i], reg_set[i]);
- mgb4_mask_reg(video, regs->config, 0x1000, 0x1000);
mgb4_mask_reg(video, regs->config, 0x1000, 0x0);
mgb4_write_reg(video, regs->config, config & ~0x1);
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
index f90ffc4dad52..3ce6b717ca32 100644
--- a/drivers/media/pci/mgb4/mgb4_core.c
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -406,8 +406,9 @@ static int get_module_version(struct mgb4_dev *mgbdev)
dev_err(dev, "unknown module type\n");
return -EINVAL;
}
- fw_version = mgb4_read_reg(&mgbdev->video, 0xC4);
- if (fw_version >> 24 != mgbdev->module_version >> 4) {
+ fw_version = mgb4_read_reg(&mgbdev->video, 0xC4) >> 24;
+ if ((MGB4_IS_FPDL3(mgbdev) && fw_version != 1) ||
+ (MGB4_IS_GMSL(mgbdev) && fw_version != 2)) {
dev_err(dev, "module/firmware type mismatch\n");
return -EINVAL;
}
@@ -599,14 +600,18 @@ static int mgb4_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rv = get_module_version(mgbdev);
if (rv < 0)
goto exit;
+ /* Propagate the module type(version) to the FPGA */
+ mgb4_write_reg(&mgbdev->video, 0xD4, mgbdev->module_version);
/* Video input v4l2 devices */
for (i = 0; i < MGB4_VIN_DEVICES; i++)
mgbdev->vin[i] = mgb4_vin_create(mgbdev, i);
/* Video output v4l2 devices */
- for (i = 0; i < MGB4_VOUT_DEVICES; i++)
- mgbdev->vout[i] = mgb4_vout_create(mgbdev, i);
+ if (MGB4_HAS_VOUT(mgbdev)) {
+ for (i = 0; i < MGB4_VOUT_DEVICES; i++)
+ mgbdev->vout[i] = mgb4_vout_create(mgbdev, i);
+ }
/* Triggers */
mgbdev->indio_dev = mgb4_trigger_create(mgbdev);
diff --git a/drivers/media/pci/mgb4/mgb4_core.h b/drivers/media/pci/mgb4/mgb4_core.h
index e86742d7b6c4..cc24068400a2 100644
--- a/drivers/media/pci/mgb4/mgb4_core.h
+++ b/drivers/media/pci/mgb4/mgb4_core.h
@@ -19,9 +19,13 @@
#define MGB4_VOUT_DEVICES 2
#define MGB4_IS_GMSL(mgbdev) \
- ((mgbdev)->module_version >> 4 == 2)
+ ((((mgbdev)->module_version >> 4) >= 2) && \
+ (((mgbdev)->module_version >> 4) <= 4))
#define MGB4_IS_FPDL3(mgbdev) \
- ((mgbdev)->module_version >> 4 == 1)
+ (((mgbdev)->module_version >> 4) == 1)
+#define MGB4_HAS_VOUT(mgbdev) \
+ ((((mgbdev)->module_version >> 4) >= 1) && \
+ (((mgbdev)->module_version >> 4) <= 3))
struct mgb4_dma_channel {
struct dma_chan *chan;
diff --git a/drivers/media/pci/mgb4/mgb4_regs.c b/drivers/media/pci/mgb4/mgb4_regs.c
index 31befd722d72..b45537dbfafa 100644
--- a/drivers/media/pci/mgb4/mgb4_regs.c
+++ b/drivers/media/pci/mgb4/mgb4_regs.c
@@ -5,6 +5,7 @@
*/
#include <linux/ioport.h>
+#include <linux/errno.h>
#include "mgb4_regs.h"
int mgb4_regs_map(struct resource *res, struct mgb4_regs *regs)
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 85d2627776b6..9287faafdce5 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -85,6 +85,7 @@ source "drivers/media/platform/rockchip/Kconfig"
source "drivers/media/platform/samsung/Kconfig"
source "drivers/media/platform/st/Kconfig"
source "drivers/media/platform/sunxi/Kconfig"
+source "drivers/media/platform/synopsys/Kconfig"
source "drivers/media/platform/ti/Kconfig"
source "drivers/media/platform/verisilicon/Kconfig"
source "drivers/media/platform/via/Kconfig"
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index ace4e34483dd..6fd7db0541c7 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -28,6 +28,7 @@ obj-y += rockchip/
obj-y += samsung/
obj-y += st/
obj-y += sunxi/
+obj-y += synopsys/
obj-y += ti/
obj-y += verisilicon/
obj-y += via/
diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
index e491399afcc9..eb03df0d8652 100644
--- a/drivers/media/platform/allegro-dvt/allegro-core.c
+++ b/drivers/media/platform/allegro-dvt/allegro-core.c
@@ -3912,6 +3912,7 @@ static int allegro_probe(struct platform_device *pdev)
if (ret < 0) {
v4l2_err(&dev->v4l2_dev,
"failed to request firmware: %d\n", ret);
+ v4l2_device_unregister(&dev->v4l2_dev);
return ret;
}
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index 4d64df829e75..cebcae196eec 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -164,6 +164,8 @@ static void csi2rx_reset(struct csi2rx_priv *csi2rx)
static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx)
{
+ struct media_pad *src_pad =
+ &csi2rx->source_subdev->entity.pads[csi2rx->source_pad];
union phy_configure_opts opts = { };
struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
struct v4l2_subdev_format sd_fmt = {
@@ -181,7 +183,7 @@ static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx)
fmt = csi2rx_get_fmt_by_code(sd_fmt.format.code);
- link_freq = v4l2_get_link_freq(csi2rx->source_subdev->ctrl_handler,
+ link_freq = v4l2_get_link_freq(src_pad,
fmt->bpp, 2 * csi2rx->num_lanes);
if (link_freq < 0)
return link_freq;
diff --git a/drivers/media/platform/chips-media/coda/coda-common.c b/drivers/media/platform/chips-media/coda/coda-common.c
index 289a076c3bcc..e6e3f5ec24f6 100644
--- a/drivers/media/platform/chips-media/coda/coda-common.c
+++ b/drivers/media/platform/chips-media/coda/coda-common.c
@@ -3340,6 +3340,7 @@ static int coda_runtime_resume(struct device *dev)
static const struct dev_pm_ops coda_pm_ops = {
SET_RUNTIME_PM_OPS(NULL, coda_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
static struct platform_driver coda_driver = {
diff --git a/drivers/media/platform/chips-media/wave5/wave5-hw.c b/drivers/media/platform/chips-media/wave5/wave5-hw.c
index c8a905994109..d94cf84c3ee5 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-hw.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-hw.c
@@ -585,7 +585,7 @@ int wave5_vpu_build_up_dec_param(struct vpu_instance *inst,
vpu_write_reg(inst->dev, W5_CMD_NUM_CQ_DEPTH_M1,
WAVE521_COMMAND_QUEUE_DEPTH - 1);
}
-
+ vpu_write_reg(inst->dev, W5_CMD_ERR_CONCEAL, 0);
ret = send_firmware_command(inst, W5_CREATE_INSTANCE, true, NULL, NULL);
if (ret) {
wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_work);
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
index d3ff420c52ce..fd71f0c43ac3 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
@@ -1345,10 +1345,24 @@ static int wave5_vpu_dec_start_streaming(struct vb2_queue *q, unsigned int count
if (ret)
goto free_bitstream_vbuf;
} else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ struct dec_initial_info *initial_info =
+ &inst->codec_info->dec_info.initial_info;
+
if (inst->state == VPU_INST_STATE_STOP)
ret = switch_state(inst, VPU_INST_STATE_INIT_SEQ);
if (ret)
goto return_buffers;
+
+ if (inst->state == VPU_INST_STATE_INIT_SEQ &&
+ inst->dev->product_code == WAVE521C_CODE) {
+ if (initial_info->luma_bitdepth != 8) {
+ dev_info(inst->dev->dev, "%s: no support for %d bit depth",
+ __func__, initial_info->luma_bitdepth);
+ ret = -EINVAL;
+ goto return_buffers;
+ }
+ }
+
}
pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
@@ -1369,6 +1383,16 @@ static int streamoff_output(struct vb2_queue *q)
struct vb2_v4l2_buffer *buf;
int ret;
dma_addr_t new_rd_ptr;
+ struct dec_output_info dec_info;
+ unsigned int i;
+
+ for (i = 0; i < v4l2_m2m_num_dst_bufs_ready(m2m_ctx); i++) {
+ ret = wave5_vpu_dec_set_disp_flag(inst, i);
+ if (ret)
+ dev_dbg(inst->dev->dev,
+ "%s: Setting display flag of buf index: %u, fail: %d\n",
+ __func__, i, ret);
+ }
while ((buf = v4l2_m2m_src_buf_remove(m2m_ctx))) {
dev_dbg(inst->dev->dev, "%s: (Multiplanar) buf type %4u | index %4u\n",
@@ -1376,6 +1400,11 @@ static int streamoff_output(struct vb2_queue *q)
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
}
+ while (wave5_vpu_dec_get_output_info(inst, &dec_info) == 0) {
+ if (dec_info.index_frame_display >= 0)
+ wave5_vpu_dec_set_disp_flag(inst, dec_info.index_frame_display);
+ }
+
ret = wave5_vpu_flush_instance(inst);
if (ret)
return ret;
@@ -1459,7 +1488,7 @@ static void wave5_vpu_dec_stop_streaming(struct vb2_queue *q)
break;
if (wave5_vpu_dec_get_output_info(inst, &dec_output_info))
- dev_dbg(inst->dev->dev, "Getting decoding results from fw, fail\n");
+ dev_dbg(inst->dev->dev, "there is no output info\n");
}
v4l2_m2m_update_stop_streaming_state(m2m_ctx, q);
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.c b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
index d1320298a0f7..e1715d3f43b0 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
@@ -55,12 +55,12 @@ static void wave5_vpu_handle_irq(void *dev_id)
struct vpu_device *dev = dev_id;
irq_reason = wave5_vdi_read_register(dev, W5_VPU_VINT_REASON);
+ seq_done = wave5_vdi_read_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO);
+ cmd_done = wave5_vdi_read_register(dev, W5_RET_QUEUE_CMD_DONE_INST);
wave5_vdi_write_register(dev, W5_VPU_VINT_REASON_CLR, irq_reason);
wave5_vdi_write_register(dev, W5_VPU_VINT_CLEAR, 0x1);
list_for_each_entry(inst, &dev->instances, list) {
- seq_done = wave5_vdi_read_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO);
- cmd_done = wave5_vdi_read_register(dev, W5_RET_QUEUE_CMD_DONE_INST);
if (irq_reason & BIT(INT_WAVE5_INIT_SEQ) ||
irq_reason & BIT(INT_WAVE5_ENC_SET_PARAM)) {
@@ -269,8 +269,8 @@ static int wave5_vpu_probe(struct platform_device *pdev)
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq < 0) {
dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n");
- hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- dev->hrtimer.function = &wave5_vpu_timer_callback;
+ hrtimer_setup(&dev->hrtimer, &wave5_vpu_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
dev->worker = kthread_run_worker(0, "vpu_irq_thread");
if (IS_ERR(dev->worker)) {
dev_err(&pdev->dev, "failed to create vpu irq worker\n");
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
index e16b990041c2..e5e879a13e8b 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
@@ -75,6 +75,16 @@ int wave5_vpu_flush_instance(struct vpu_instance *inst)
inst->type == VPU_INST_TYPE_DEC ? "DECODER" : "ENCODER", inst->id);
mutex_unlock(&inst->dev->hw_lock);
return -ETIMEDOUT;
+ } else if (ret == -EBUSY) {
+ struct dec_output_info dec_info;
+
+ mutex_unlock(&inst->dev->hw_lock);
+ wave5_vpu_dec_get_output_info(inst, &dec_info);
+ ret = mutex_lock_interruptible(&inst->dev->hw_lock);
+ if (ret)
+ return ret;
+ if (dec_info.index_frame_display > 0)
+ wave5_vpu_dec_set_disp_flag(inst, dec_info.index_frame_display);
}
} while (ret != 0);
mutex_unlock(&inst->dev->hw_lock);
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
index ff23b225db70..1b0bc47355c0 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
@@ -79,8 +79,11 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use
}
fw = devm_kzalloc(&plat_dev->dev, sizeof(*fw), GFP_KERNEL);
- if (!fw)
+ if (!fw) {
+ scp_put(scp);
return ERR_PTR(-ENOMEM);
+ }
+
fw->type = SCP;
fw->ops = &mtk_vcodec_rproc_msg;
fw->scp = scp;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
index f8145998fcaf..8522f71fc901 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
@@ -594,7 +594,11 @@ static int h264_enc_init(struct mtk_vcodec_enc_ctx *ctx)
inst->ctx = ctx;
inst->vpu_inst.ctx = ctx;
- inst->vpu_inst.id = is_ext ? SCP_IPI_VENC_H264 : IPI_VENC_H264;
+ if (is_ext)
+ inst->vpu_inst.id = SCP_IPI_VENC_H264;
+ else
+ inst->vpu_inst.id = IPI_VENC_H264;
+
inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx->dev->reg_base, VENC_SYS);
ret = vpu_enc_init(&inst->vpu_inst);
diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c
index 024cd8ee1709..7a9d8928ae40 100644
--- a/drivers/media/platform/nuvoton/npcm-video.c
+++ b/drivers/media/platform/nuvoton/npcm-video.c
@@ -1648,8 +1648,8 @@ rel_ctrl_handler:
static int npcm_video_ece_init(struct npcm_video *video)
{
+ struct device_node *ece_node __free(device_node) = NULL;
struct device *dev = video->dev;
- struct device_node *ece_node;
struct platform_device *ece_pdev;
void __iomem *regs;
@@ -1669,7 +1669,7 @@ static int npcm_video_ece_init(struct npcm_video *video)
dev_err(dev, "Failed to find ECE device\n");
return -ENODEV;
}
- of_node_put(ece_node);
+ struct device *ece_dev __free(put_device) = &ece_pdev->dev;
regs = devm_platform_ioremap_resource(ece_pdev, 0);
if (IS_ERR(regs)) {
@@ -1684,7 +1684,7 @@ static int npcm_video_ece_init(struct npcm_video *video)
return PTR_ERR(video->ece.regmap);
}
- video->ece.reset = devm_reset_control_get(&ece_pdev->dev, NULL);
+ video->ece.reset = devm_reset_control_get(ece_dev, NULL);
if (IS_ERR(video->ece.reset)) {
dev_err(dev, "Failed to get ECE reset control in DTS\n");
return PTR_ERR(video->ece.reset);
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index 29523bb84d95..d060eadebc7a 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -597,12 +597,13 @@ static void __mipi_csis_set_format(struct mipi_csis_device *csis,
static int mipi_csis_calculate_params(struct mipi_csis_device *csis,
const struct csis_pix_format *csis_fmt)
{
+ struct media_pad *src_pad =
+ &csis->source.sd->entity.pads[csis->source.pad->index];
s64 link_freq;
u32 lane_rate;
/* Calculate the line rate from the pixel rate. */
- link_freq = v4l2_get_link_freq(csis->source.sd->ctrl_handler,
- csis_fmt->width,
+ link_freq = v4l2_get_link_freq(src_pad, csis_fmt->width,
csis->bus.num_data_lanes * 2);
if (link_freq < 0) {
dev_err(csis->dev, "Unable to obtain link frequency: %d\n",
diff --git a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
index 1f2657cf6e82..a8bcf60e2f37 100644
--- a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
+++ b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
@@ -287,6 +287,7 @@ static int imx8mq_mipi_csi_calc_hs_settle(struct csi_state *state,
struct v4l2_subdev_state *sd_state,
u32 *hs_settle)
{
+ struct media_pad *src_pad;
s64 link_freq;
u32 lane_rate;
unsigned long esc_clk_rate;
@@ -294,13 +295,19 @@ static int imx8mq_mipi_csi_calc_hs_settle(struct csi_state *state,
const struct v4l2_mbus_framefmt *fmt;
const struct csi2_pix_format *csi2_fmt;
+ src_pad = media_entity_remote_source_pad_unique(&sd_state->sd->entity);
+ if (IS_ERR(src_pad)) {
+ dev_err(state->dev, "can't get source pad of %s (%ld)\n",
+ sd_state->sd->name, PTR_ERR(src_pad));
+ return PTR_ERR(src_pad);
+ }
+
/* Calculate the line rate from the pixel rate. */
fmt = v4l2_subdev_state_get_format(sd_state, MIPI_CSI2_PAD_SINK);
csi2_fmt = find_csi2_format(fmt->code);
- link_freq = v4l2_get_link_freq(state->src_sd->ctrl_handler,
- csi2_fmt->width,
+ link_freq = v4l2_get_link_freq(src_pad, csi2_fmt->width,
state->bus.num_data_lanes * 2);
if (link_freq < 0) {
dev_err(state->dev, "Unable to obtain link frequency: %d\n",
diff --git a/drivers/media/platform/qcom/Kconfig b/drivers/media/platform/qcom/Kconfig
index cc5799b9ea00..4f4d3a68e6e5 100644
--- a/drivers/media/platform/qcom/Kconfig
+++ b/drivers/media/platform/qcom/Kconfig
@@ -3,4 +3,5 @@
comment "Qualcomm media platform drivers"
source "drivers/media/platform/qcom/camss/Kconfig"
+source "drivers/media/platform/qcom/iris/Kconfig"
source "drivers/media/platform/qcom/venus/Kconfig"
diff --git a/drivers/media/platform/qcom/Makefile b/drivers/media/platform/qcom/Makefile
index 4f055c396e04..ea2221a202c0 100644
--- a/drivers/media/platform/qcom/Makefile
+++ b/drivers/media/platform/qcom/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y += camss/
+obj-y += iris/
obj-y += venus/
diff --git a/drivers/media/platform/qcom/camss/Makefile b/drivers/media/platform/qcom/camss/Makefile
index e636968a1126..f6db5b3b5ace 100644
--- a/drivers/media/platform/qcom/camss/Makefile
+++ b/drivers/media/platform/qcom/camss/Makefile
@@ -7,6 +7,7 @@ qcom-camss-objs += \
camss-csid-4-1.o \
camss-csid-4-7.o \
camss-csid-gen2.o \
+ camss-csid-780.o \
camss-csiphy-2ph-1-0.o \
camss-csiphy-3ph-1-0.o \
camss-csiphy.o \
@@ -16,6 +17,7 @@ qcom-camss-objs += \
camss-vfe-4-8.o \
camss-vfe-17x.o \
camss-vfe-480.o \
+ camss-vfe-780.o \
camss-vfe-gen1.o \
camss-vfe.o \
camss-video.o \
diff --git a/drivers/media/platform/qcom/camss/camss-csid-4-1.c b/drivers/media/platform/qcom/camss/camss-csid-4-1.c
index c95861420502..6998e1c52895 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-csid-4-1.c
@@ -17,7 +17,6 @@
#include "camss-csid-gen1.h"
#include "camss.h"
-#define CAMSS_CSID_HW_VERSION 0x0
#define CAMSS_CSID_CORE_CTRL_0 0x004
#define CAMSS_CSID_CORE_CTRL_1 0x008
#define CAMSS_CSID_RST_CMD 0x00c
@@ -139,15 +138,6 @@ static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
return 0;
}
-static u32 csid_hw_version(struct csid_device *csid)
-{
- u32 hw_version = readl_relaxed(csid->base + CAMSS_CSID_HW_VERSION);
-
- dev_dbg(csid->camss->dev, "CSID HW Version = 0x%08x\n", hw_version);
-
- return hw_version;
-}
-
static irqreturn_t csid_isr(int irq, void *dev)
{
struct csid_device *csid = dev;
@@ -180,15 +170,6 @@ static int csid_reset(struct csid_device *csid)
return 0;
}
-static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
- unsigned int match_format_idx, u32 match_code)
-{
- if (match_format_idx > 0)
- return 0;
-
- return sink_code;
-}
-
static void csid_subdev_init(struct csid_device *csid)
{
csid->testgen.modes = csid_testgen_modes;
diff --git a/drivers/media/platform/qcom/camss/camss-csid-4-7.c b/drivers/media/platform/qcom/camss/camss-csid-4-7.c
index 08578a143688..66054d4872e6 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-csid-4-7.c
@@ -16,7 +16,6 @@
#include "camss-csid-gen1.h"
#include "camss.h"
-#define CAMSS_CSID_HW_VERSION 0x0
#define CAMSS_CSID_CORE_CTRL_0 0x004
#define CAMSS_CSID_CORE_CTRL_1 0x008
#define CAMSS_CSID_RST_CMD 0x010
@@ -151,15 +150,6 @@ static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
return 0;
}
-static u32 csid_hw_version(struct csid_device *csid)
-{
- u32 hw_version = readl_relaxed(csid->base + CAMSS_CSID_HW_VERSION);
-
- dev_dbg(csid->camss->dev, "CSID HW Version = 0x%08x\n", hw_version);
-
- return hw_version;
-}
-
/*
* isr - CSID module interrupt service routine
* @irq: Interrupt line
@@ -205,38 +195,6 @@ static int csid_reset(struct csid_device *csid)
return 0;
}
-static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
- unsigned int match_format_idx, u32 match_code)
-{
- switch (sink_code) {
- case MEDIA_BUS_FMT_SBGGR10_1X10:
- {
- u32 src_code[] = {
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
- };
-
- return csid_find_code(src_code, ARRAY_SIZE(src_code),
- match_format_idx, match_code);
- }
- case MEDIA_BUS_FMT_Y10_1X10:
- {
- u32 src_code[] = {
- MEDIA_BUS_FMT_Y10_1X10,
- MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
- };
-
- return csid_find_code(src_code, ARRAY_SIZE(src_code),
- match_format_idx, match_code);
- }
- default:
- if (match_format_idx > 0)
- return 0;
-
- return sink_code;
- }
-}
-
static void csid_subdev_init(struct csid_device *csid)
{
csid->testgen.modes = csid_testgen_modes;
diff --git a/drivers/media/platform/qcom/camss/camss-csid-780.c b/drivers/media/platform/qcom/camss/camss-csid-780.c
new file mode 100644
index 000000000000..4c720d177731
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csid-780.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
+ *
+ * Copyright (c) 2024 Qualcomm Technologies, Inc.
+ */
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+
+#include "camss.h"
+#include "camss-csid.h"
+#include "camss-csid-780.h"
+
+#define CSID_IO_PATH_CFG0(csid) (0x4 * (csid))
+#define OUTPUT_IFE_EN 0x100
+#define INTERNAL_CSID 1
+
+#define CSID_RST_CFG 0xC
+#define RST_MODE BIT(0)
+#define RST_LOCATION BIT(4)
+
+#define CSID_RST_CMD 0x10
+#define SELECT_HW_RST BIT(0)
+#define SELECT_IRQ_RST BIT(2)
+
+#define CSID_IRQ_CMD 0x14
+#define IRQ_CMD_CLEAR BIT(0)
+
+#define CSID_RUP_AUP_CMD 0x18
+#define CSID_RUP_AUP_RDI(rdi) ((BIT(4) | BIT(20)) << (rdi))
+
+#define CSID_TOP_IRQ_STATUS 0x7C
+#define TOP_IRQ_STATUS_RESET_DONE BIT(0)
+
+#define CSID_TOP_IRQ_MASK 0x80
+#define CSID_TOP_IRQ_CLEAR 0x84
+#define CSID_TOP_IRQ_SET 0x88
+
+#define CSID_CSI2_RX_IRQ_STATUS 0x9C
+#define CSID_CSI2_RX_IRQ_MASK 0xA0
+#define CSID_CSI2_RX_IRQ_CLEAR 0xA4
+#define CSID_CSI2_RX_IRQ_SET 0xA8
+
+#define CSID_BUF_DONE_IRQ_STATUS 0x8C
+#define BUF_DONE_IRQ_STATUS_RDI_OFFSET (csid_is_lite(csid) ? 1 : 14)
+#define CSID_BUF_DONE_IRQ_MASK 0x90
+#define CSID_BUF_DONE_IRQ_CLEAR 0x94
+#define CSID_BUF_DONE_IRQ_SET 0x98
+
+#define CSID_CSI2_RDIN_IRQ_STATUS(rdi) (0xEC + 0x10 * (rdi))
+#define RUP_DONE_IRQ_STATUS BIT(23)
+
+#define CSID_CSI2_RDIN_IRQ_CLEAR(rdi) (0xF4 + 0x10 * (rdi))
+#define CSID_CSI2_RDIN_IRQ_SET(rdi) (0xF8 + 0x10 * (rdi))
+
+#define CSID_CSI2_RX_CFG0 0x200
+#define CSI2_RX_CFG0_NUM_ACTIVE_LANES 0
+#define CSI2_RX_CFG0_DL0_INPUT_SEL 4
+#define CSI2_RX_CFG0_PHY_NUM_SEL 20
+
+#define CSID_CSI2_RX_CFG1 0x204
+#define CSI2_RX_CFG1_ECC_CORRECTION_EN BIT(0)
+#define CSI2_RX_CFG1_VC_MODE BIT(2)
+
+#define CSID_RDI_CFG0(rdi) (0x500 + 0x100 * (rdi))
+#define RDI_CFG0_TIMESTAMP_EN BIT(6)
+#define RDI_CFG0_TIMESTAMP_STB_SEL BIT(8)
+#define RDI_CFG0_DECODE_FORMAT 12
+#define RDI_CFG0_DT 16
+#define RDI_CFG0_VC 22
+#define RDI_CFG0_DT_ID 27
+#define RDI_CFG0_EN BIT(31)
+
+#define CSID_RDI_CTRL(rdi) (0x504 + 0x100 * (rdi))
+#define RDI_CTRL_START_CMD BIT(0)
+
+#define CSID_RDI_CFG1(rdi) (0x510 + 0x100 * (rdi))
+#define RDI_CFG1_DROP_H_EN BIT(5)
+#define RDI_CFG1_DROP_V_EN BIT(6)
+#define RDI_CFG1_CROP_H_EN BIT(7)
+#define RDI_CFG1_CROP_V_EN BIT(8)
+#define RDI_CFG1_PIX_STORE BIT(10)
+#define RDI_CFG1_PACKING_FORMAT_MIPI BIT(15)
+
+#define CSID_RDI_IRQ_SUBSAMPLE_PATTERN(rdi) (0x548 + 0x100 * (rdi))
+#define CSID_RDI_IRQ_SUBSAMPLE_PERIOD(rdi) (0x54C + 0x100 * (rdi))
+
+#define CSI2_RX_CFG0_PHY_SEL_BASE_IDX 1
+
+static void __csid_configure_rx(struct csid_device *csid,
+ struct csid_phy_config *phy, int vc)
+{
+ int val;
+
+ val = (phy->lane_cnt - 1) << CSI2_RX_CFG0_NUM_ACTIVE_LANES;
+ val |= phy->lane_assign << CSI2_RX_CFG0_DL0_INPUT_SEL;
+ val |= (phy->csiphy_id + CSI2_RX_CFG0_PHY_SEL_BASE_IDX) << CSI2_RX_CFG0_PHY_NUM_SEL;
+
+ writel(val, csid->base + CSID_CSI2_RX_CFG0);
+
+ val = CSI2_RX_CFG1_ECC_CORRECTION_EN;
+ if (vc > 3)
+ val |= CSI2_RX_CFG1_VC_MODE;
+
+ writel(val, csid->base + CSID_CSI2_RX_CFG1);
+}
+
+static void __csid_ctrl_rdi(struct csid_device *csid, int enable, u8 rdi)
+{
+ int val = 0;
+
+ if (enable)
+ val = RDI_CTRL_START_CMD;
+
+ writel(val, csid->base + CSID_RDI_CTRL(rdi));
+}
+
+static void __csid_configure_wrapper(struct csid_device *csid)
+{
+ u32 val;
+
+ /* csid lite doesn't need to configure top register */
+ if (csid->res->is_lite)
+ return;
+
+ val = OUTPUT_IFE_EN | INTERNAL_CSID;
+ writel(val, csid->camss->csid_wrapper_base + CSID_IO_PATH_CFG0(csid->id));
+}
+
+static void __csid_configure_rdi_stream(struct csid_device *csid, u8 enable, u8 vc)
+{
+ u32 val;
+ u8 lane_cnt = csid->phy.lane_cnt;
+ /* Source pads matching RDI channels on hardware. Pad 1 -> RDI0, Pad 2 -> RDI1, etc. */
+ struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_FIRST_SRC + vc];
+ const struct csid_format_info *format = csid_get_fmt_entry(csid->res->formats->formats,
+ csid->res->formats->nformats,
+ input_format->code);
+
+ if (!lane_cnt)
+ lane_cnt = 4;
+
+ /*
+ * DT_ID is a two bit bitfield that is concatenated with
+ * the four least significant bits of the five bit VC
+ * bitfield to generate an internal CID value.
+ *
+ * CSID_RDI_CFG0(vc)
+ * DT_ID : 28:27
+ * VC : 26:22
+ * DT : 21:16
+ *
+ * CID : VC 3:0 << 2 | DT_ID 1:0
+ */
+ u8 dt_id = vc & 0x03;
+
+ val = RDI_CFG0_TIMESTAMP_EN;
+ val |= RDI_CFG0_TIMESTAMP_STB_SEL;
+ /* note: for non-RDI path, this should be format->decode_format */
+ val |= DECODE_FORMAT_PAYLOAD_ONLY << RDI_CFG0_DECODE_FORMAT;
+ val |= vc << RDI_CFG0_VC;
+ val |= format->data_type << RDI_CFG0_DT;
+ val |= dt_id << RDI_CFG0_DT_ID;
+
+ writel(val, csid->base + CSID_RDI_CFG0(vc));
+
+ val = RDI_CFG1_PACKING_FORMAT_MIPI;
+ val |= RDI_CFG1_PIX_STORE;
+ val |= RDI_CFG1_DROP_H_EN;
+ val |= RDI_CFG1_DROP_V_EN;
+ val |= RDI_CFG1_CROP_H_EN;
+ val |= RDI_CFG1_CROP_V_EN;
+
+ writel(val, csid->base + CSID_RDI_CFG1(vc));
+
+ val = 0;
+ writel(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PERIOD(vc));
+
+ val = 1;
+ writel(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PATTERN(vc));
+
+ val = 0;
+ writel(val, csid->base + CSID_RDI_CTRL(vc));
+
+ val = readl(csid->base + CSID_RDI_CFG0(vc));
+
+ if (enable)
+ val |= RDI_CFG0_EN;
+ writel(val, csid->base + CSID_RDI_CFG0(vc));
+}
+
+static void csid_configure_stream(struct csid_device *csid, u8 enable)
+{
+ u8 i;
+
+ __csid_configure_wrapper(csid);
+
+ /* Loop through all enabled VCs and configure stream for each */
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++)
+ if (csid->phy.en_vc & BIT(i)) {
+ __csid_configure_rdi_stream(csid, enable, i);
+ __csid_configure_rx(csid, &csid->phy, i);
+ __csid_ctrl_rdi(csid, enable, i);
+ }
+}
+
+static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
+{
+ return 0;
+}
+
+static void csid_subdev_reg_update(struct csid_device *csid, int port_id, bool clear)
+{
+ if (clear) {
+ csid->reg_update &= ~CSID_RUP_AUP_RDI(port_id);
+ } else {
+ csid->reg_update |= CSID_RUP_AUP_RDI(port_id);
+ writel(csid->reg_update, csid->base + CSID_RUP_AUP_CMD);
+ }
+}
+
+/*
+ * csid_isr - CSID module interrupt service routine
+ * @irq: Interrupt line
+ * @dev: CSID device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t csid_isr(int irq, void *dev)
+{
+ struct csid_device *csid = dev;
+ u32 val, buf_done_val;
+ u8 reset_done;
+ int i;
+
+ val = readl(csid->base + CSID_TOP_IRQ_STATUS);
+ writel(val, csid->base + CSID_TOP_IRQ_CLEAR);
+ reset_done = val & TOP_IRQ_STATUS_RESET_DONE;
+
+ val = readl(csid->base + CSID_CSI2_RX_IRQ_STATUS);
+ writel(val, csid->base + CSID_CSI2_RX_IRQ_CLEAR);
+
+ buf_done_val = readl(csid->base + CSID_BUF_DONE_IRQ_STATUS);
+ writel(buf_done_val, csid->base + CSID_BUF_DONE_IRQ_CLEAR);
+
+ /* Read and clear IRQ status for each enabled RDI channel */
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++)
+ if (csid->phy.en_vc & BIT(i)) {
+ val = readl(csid->base + CSID_CSI2_RDIN_IRQ_STATUS(i));
+ writel(val, csid->base + CSID_CSI2_RDIN_IRQ_CLEAR(i));
+
+ if (val & RUP_DONE_IRQ_STATUS)
+ /* clear the reg update bit */
+ csid_subdev_reg_update(csid, i, true);
+
+ if (buf_done_val & BIT(BUF_DONE_IRQ_STATUS_RDI_OFFSET + i)) {
+ /*
+ * For Titan 780, bus done and RUP IRQ have been moved to
+ * CSID from VFE. Once CSID received bus done, need notify
+ * VFE of this event. Trigger VFE to handle bus done process.
+ */
+ camss_buf_done(csid->camss, csid->id, i);
+ }
+ }
+
+ val = IRQ_CMD_CLEAR;
+ writel(val, csid->base + CSID_IRQ_CMD);
+
+ if (reset_done)
+ complete(&csid->reset_complete);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csid_reset - Trigger reset on CSID module and wait to complete
+ * @csid: CSID device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_reset(struct csid_device *csid)
+{
+ unsigned long time;
+ u32 val;
+ int i;
+
+ reinit_completion(&csid->reset_complete);
+
+ writel(1, csid->base + CSID_TOP_IRQ_CLEAR);
+ writel(1, csid->base + CSID_IRQ_CMD);
+ writel(1, csid->base + CSID_TOP_IRQ_MASK);
+
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++)
+ if (csid->phy.en_vc & BIT(i)) {
+ writel(BIT(BUF_DONE_IRQ_STATUS_RDI_OFFSET + i),
+ csid->base + CSID_BUF_DONE_IRQ_CLEAR);
+ writel(IRQ_CMD_CLEAR, csid->base + CSID_IRQ_CMD);
+ writel(BIT(BUF_DONE_IRQ_STATUS_RDI_OFFSET + i),
+ csid->base + CSID_BUF_DONE_IRQ_MASK);
+ }
+
+ /* preserve registers */
+ val = RST_LOCATION | RST_MODE;
+ writel(val, csid->base + CSID_RST_CFG);
+
+ val = SELECT_HW_RST | SELECT_IRQ_RST;
+ writel(val, csid->base + CSID_RST_CMD);
+
+ time = wait_for_completion_timeout(&csid->reset_complete,
+ msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
+ if (!time) {
+ dev_err(csid->camss->dev, "CSID reset timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void csid_subdev_init(struct csid_device *csid)
+{
+ csid->testgen.nmodes = CSID_PAYLOAD_MODE_DISABLED;
+}
+
+const struct csid_hw_ops csid_ops_780 = {
+ .configure_stream = csid_configure_stream,
+ .configure_testgen_pattern = csid_configure_testgen_pattern,
+ .hw_version = csid_hw_version,
+ .isr = csid_isr,
+ .reset = csid_reset,
+ .src_pad_code = csid_src_pad_code,
+ .subdev_init = csid_subdev_init,
+ .reg_update = csid_subdev_reg_update,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-csid-780.h b/drivers/media/platform/qcom/camss/camss-csid-780.h
new file mode 100644
index 000000000000..a990c66a60ff
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csid-780.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * camss-csid-780.h
+ *
+ * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module Generation 3
+ *
+ * Copyright (c) 2024 Qualcomm Technologies, Inc.
+ */
+#ifndef __QC_MSM_CAMSS_CSID_780_H__
+#define __QC_MSM_CAMSS_CSID_780_H__
+
+#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1
+#define DECODE_FORMAT_UNCOMPRESSED_10_BIT 0x2
+#define DECODE_FORMAT_UNCOMPRESSED_12_BIT 0x3
+#define DECODE_FORMAT_UNCOMPRESSED_14_BIT 0x4
+#define DECODE_FORMAT_UNCOMPRESSED_16_BIT 0x5
+#define DECODE_FORMAT_UNCOMPRESSED_20_BIT 0x6
+#define DECODE_FORMAT_UNCOMPRESSED_24_BIT 0x7
+#define DECODE_FORMAT_PAYLOAD_ONLY 0xf
+
+#define PLAIN_FORMAT_PLAIN8 0x0 /* supports DPCM, UNCOMPRESSED_6/8_BIT */
+#define PLAIN_FORMAT_PLAIN16 0x1 /* supports DPCM, UNCOMPRESSED_10/16_BIT */
+#define PLAIN_FORMAT_PLAIN32 0x2 /* supports UNCOMPRESSED_20_BIT */
+
+#endif /* __QC_MSM_CAMSS_CSID_780_H__ */
diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
index e1c757933e27..2a1746dcc1c5 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
@@ -22,11 +22,6 @@
* alternate register layout.
*/
-#define CSID_HW_VERSION 0x0
-#define HW_VERSION_STEPPING 0
-#define HW_VERSION_REVISION 16
-#define HW_VERSION_GENERATION 28
-
#define CSID_RST_STROBES 0x10
#define RST_STROBES 0
@@ -352,29 +347,6 @@ static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
}
/*
- * csid_hw_version - CSID hardware version query
- * @csid: CSID device
- *
- * Return HW version or error
- */
-static u32 csid_hw_version(struct csid_device *csid)
-{
- u32 hw_version;
- u32 hw_gen;
- u32 hw_rev;
- u32 hw_step;
-
- hw_version = readl_relaxed(csid->base + CSID_HW_VERSION);
- hw_gen = (hw_version >> HW_VERSION_GENERATION) & 0xF;
- hw_rev = (hw_version >> HW_VERSION_REVISION) & 0xFFF;
- hw_step = (hw_version >> HW_VERSION_STEPPING) & 0xFFFF;
- dev_dbg(csid->camss->dev, "CSID HW Version = %u.%u.%u\n",
- hw_gen, hw_rev, hw_step);
-
- return hw_version;
-}
-
-/*
* csid_isr - CSID module interrupt service routine
* @irq: Interrupt line
* @dev: CSID device
@@ -443,38 +415,6 @@ static int csid_reset(struct csid_device *csid)
return 0;
}
-static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
- unsigned int match_format_idx, u32 match_code)
-{
- switch (sink_code) {
- case MEDIA_BUS_FMT_SBGGR10_1X10:
- {
- u32 src_code[] = {
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
- };
-
- return csid_find_code(src_code, ARRAY_SIZE(src_code),
- match_format_idx, match_code);
- }
- case MEDIA_BUS_FMT_Y10_1X10:
- {
- u32 src_code[] = {
- MEDIA_BUS_FMT_Y10_1X10,
- MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
- };
-
- return csid_find_code(src_code, ARRAY_SIZE(src_code),
- match_format_idx, match_code);
- }
- default:
- if (match_format_idx > 0)
- return 0;
-
- return sink_code;
- }
-}
-
static void csid_subdev_init(struct csid_device *csid)
{
csid->testgen.modes = csid_testgen_modes;
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index 858db5d4ca75..d08117f46f3b 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -17,6 +17,7 @@
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <media/media-entity.h>
+#include <media/mipi-csi2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-subdev.h>
@@ -29,6 +30,11 @@
#define VFE_480_CSID_OFFSET 0x1200
#define VFE_480_LITE_CSID_OFFSET 0x200
+#define CSID_HW_VERSION 0x0
+#define HW_VERSION_STEPPING 0
+#define HW_VERSION_REVISION 16
+#define HW_VERSION_GENERATION 28
+
#define MSM_CSID_NAME "msm_csid"
const char * const csid_testgen_modes[] = {
@@ -48,119 +54,119 @@ const char * const csid_testgen_modes[] = {
static const struct csid_format_info formats_4_1[] = {
{
MEDIA_BUS_FMT_UYVY8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_VYUY8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YUYV8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YVYU8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_SBGGR8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGBRG8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGRBG8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SRGGB8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SBGGR10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGBRG10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGRBG10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SRGGB10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SBGGR12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGBRG12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGRBG12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SRGGB12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_Y10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
@@ -170,147 +176,147 @@ static const struct csid_format_info formats_4_1[] = {
static const struct csid_format_info formats_4_7[] = {
{
MEDIA_BUS_FMT_UYVY8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_VYUY8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YUYV8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YVYU8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_SBGGR8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGBRG8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGRBG8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SRGGB8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SBGGR10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGBRG10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGRBG10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SRGGB10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SBGGR12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGBRG12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGRBG12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SRGGB12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SBGGR14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SGBRG14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SGRBG14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SRGGB14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_Y10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
@@ -320,154 +326,154 @@ static const struct csid_format_info formats_4_7[] = {
static const struct csid_format_info formats_gen2[] = {
{
MEDIA_BUS_FMT_UYVY8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_VYUY8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YUYV8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YVYU8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_SBGGR8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGBRG8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGRBG8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SRGGB8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SBGGR10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGBRG10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGRBG10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SRGGB10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_Y8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_Y10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SBGGR12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGBRG12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGRBG12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SRGGB12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SBGGR14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SGBRG14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SGRBG14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SRGGB14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
@@ -591,6 +597,78 @@ static int csid_set_clock_rates(struct csid_device *csid)
}
/*
+ * csid_hw_version - CSID hardware version query
+ * @csid: CSID device
+ *
+ * Return HW version or error
+ */
+u32 csid_hw_version(struct csid_device *csid)
+{
+ u32 hw_version;
+ u32 hw_gen;
+ u32 hw_rev;
+ u32 hw_step;
+
+ hw_version = readl_relaxed(csid->base + CSID_HW_VERSION);
+ hw_gen = (hw_version >> HW_VERSION_GENERATION) & 0xF;
+ hw_rev = (hw_version >> HW_VERSION_REVISION) & 0xFFF;
+ hw_step = (hw_version >> HW_VERSION_STEPPING) & 0xFFFF;
+ dev_info(csid->camss->dev, "CSID:%d HW Version = %u.%u.%u\n",
+ csid->id, hw_gen, hw_rev, hw_step);
+
+ return hw_version;
+}
+
+/*
+ * csid_src_pad_code - Pick an output/src format based on the input/sink format
+ * @csid: CSID device
+ * @sink_code: The sink format of the input
+ * @match_format_idx: Request preferred index, as defined by subdevice csid
+ * format. Set @match_code to 0 if used.
+ * @match_code: Request preferred code, set @match_format_idx to 0 if used
+ *
+ * Return 0 on failure or src format code otherwise
+ */
+u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
+ unsigned int match_format_idx, u32 match_code)
+{
+ if (csid->camss->res->version == CAMSS_8x16) {
+ if (match_format_idx > 0)
+ return 0;
+
+ return sink_code;
+ }
+
+ switch (sink_code) {
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ };
+
+ return csid_find_code(src_code, ARRAY_SIZE(src_code),
+ match_format_idx, match_code);
+ }
+ case MEDIA_BUS_FMT_Y10_1X10:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
+ };
+
+ return csid_find_code(src_code, ARRAY_SIZE(src_code),
+ match_format_idx, match_code);
+ }
+ default:
+ if (match_format_idx > 0)
+ return 0;
+
+ return sink_code;
+ }
+}
+
+/*
* csid_set_power - Power on/off CSID module
* @sd: CSID V4L2 subdevice
* @on: Requested power state
@@ -683,11 +761,13 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
int ret;
if (enable) {
- ret = v4l2_ctrl_handler_setup(&csid->ctrls);
- if (ret < 0) {
- dev_err(csid->camss->dev,
- "could not sync v4l2 controls: %d\n", ret);
- return ret;
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) {
+ ret = v4l2_ctrl_handler_setup(&csid->ctrls);
+ if (ret < 0) {
+ dev_err(csid->camss->dev,
+ "could not sync v4l2 controls: %d\n", ret);
+ return ret;
+ }
}
if (!csid->testgen.enabled &&
@@ -761,7 +841,8 @@ static void csid_try_format(struct csid_device *csid,
break;
case MSM_CSID_PAD_SRC:
- if (csid->testgen_mode->cur.val == 0) {
+ if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED ||
+ csid->testgen_mode->cur.val == 0) {
/* Test generator is disabled, */
/* keep pad formats in sync */
u32 code = fmt->code;
@@ -811,7 +892,8 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
code->code = csid->res->formats->formats[code->index].code;
} else {
- if (csid->testgen_mode->cur.val == 0) {
+ if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED ||
+ csid->testgen_mode->cur.val == 0) {
struct v4l2_mbus_framefmt *sink_fmt;
sink_fmt = __csid_get_format(csid, sd_state,
@@ -1190,7 +1272,8 @@ static int csid_link_setup(struct media_entity *entity,
/* If test generator is enabled */
/* do not allow a link from CSIPHY to CSID */
- if (csid->testgen_mode->cur.val != 0)
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED &&
+ csid->testgen_mode->cur.val != 0)
return -EBUSY;
sd = media_entity_to_v4l2_subdev(remote->entity);
@@ -1283,24 +1366,27 @@ int msm_csid_register_entity(struct csid_device *csid,
MSM_CSID_NAME, csid->id);
v4l2_set_subdevdata(sd, csid);
- ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
- if (ret < 0) {
- dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
- return ret;
- }
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) {
+ ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
+ return ret;
+ }
- csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls,
- &csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
- csid->testgen.nmodes, 0, 0,
- csid->testgen.modes);
+ csid->testgen_mode =
+ v4l2_ctrl_new_std_menu_items(&csid->ctrls,
+ &csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ csid->testgen.nmodes, 0, 0,
+ csid->testgen.modes);
- if (csid->ctrls.error) {
- dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error);
- ret = csid->ctrls.error;
- goto free_ctrl;
- }
+ if (csid->ctrls.error) {
+ dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error);
+ ret = csid->ctrls.error;
+ goto free_ctrl;
+ }
- csid->subdev.ctrl_handler = &csid->ctrls;
+ csid->subdev.ctrl_handler = &csid->ctrls;
+ }
ret = csid_init_formats(sd, NULL);
if (ret < 0) {
@@ -1331,7 +1417,8 @@ int msm_csid_register_entity(struct csid_device *csid,
media_cleanup:
media_entity_cleanup(&sd->entity);
free_ctrl:
- v4l2_ctrl_handler_free(&csid->ctrls);
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED)
+ v4l2_ctrl_handler_free(&csid->ctrls);
return ret;
}
@@ -1344,7 +1431,8 @@ void msm_csid_unregister_entity(struct csid_device *csid)
{
v4l2_device_unregister_subdev(&csid->subdev);
media_entity_cleanup(&csid->subdev.entity);
- v4l2_ctrl_handler_free(&csid->ctrls);
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED)
+ v4l2_ctrl_handler_free(&csid->ctrls);
}
inline bool csid_is_lite(struct csid_device *csid)
diff --git a/drivers/media/platform/qcom/camss/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h
index 8cdae98e4dca..90b8fc5852be 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.h
+++ b/drivers/media/platform/qcom/camss/camss-csid.h
@@ -27,29 +27,6 @@
/* CSID hardware can demultiplex up to 4 outputs */
#define MSM_CSID_MAX_SRC_STREAMS 4
-#define DATA_TYPE_EMBEDDED_DATA_8BIT 0x12
-#define DATA_TYPE_YUV420_8BIT 0x18
-#define DATA_TYPE_YUV420_10BIT 0x19
-#define DATA_TYPE_YUV420_8BIT_LEGACY 0x1a
-#define DATA_TYPE_YUV420_8BIT_SHIFTED 0x1c /* Chroma Shifted Pixel Sampling */
-#define DATA_TYPE_YUV420_10BIT_SHIFTED 0x1d /* Chroma Shifted Pixel Sampling */
-#define DATA_TYPE_YUV422_8BIT 0x1e
-#define DATA_TYPE_YUV422_10BIT 0x1f
-#define DATA_TYPE_RGB444 0x20
-#define DATA_TYPE_RGB555 0x21
-#define DATA_TYPE_RGB565 0x22
-#define DATA_TYPE_RGB666 0x23
-#define DATA_TYPE_RGB888 0x24
-#define DATA_TYPE_RAW_24BIT 0x27
-#define DATA_TYPE_RAW_6BIT 0x28
-#define DATA_TYPE_RAW_7BIT 0x29
-#define DATA_TYPE_RAW_8BIT 0x2a
-#define DATA_TYPE_RAW_10BIT 0x2b
-#define DATA_TYPE_RAW_12BIT 0x2c
-#define DATA_TYPE_RAW_14BIT 0x2d
-#define DATA_TYPE_RAW_16BIT 0x2e
-#define DATA_TYPE_RAW_20BIT 0x2f
-
#define CSID_RESET_TIMEOUT_MS 500
enum csid_testgen_mode {
@@ -152,6 +129,14 @@ struct csid_hw_ops {
* @csid: CSID device
*/
void (*subdev_init)(struct csid_device *csid);
+
+ /*
+ * reg_update - receive message from other sub device
+ * @csid: CSID device
+ * @port_id: Port id
+ * @is_clear: Indicate if it is clearing reg update or setting reg update
+ */
+ void (*reg_update)(struct csid_device *csid, int port_id, bool is_clear);
};
struct csid_subdev_resources {
@@ -169,6 +154,7 @@ struct csid_device {
void __iomem *base;
u32 irq;
char irq_name[30];
+ u32 reg_update;
struct camss_clock *clock;
int nclocks;
struct regulator_bulk_data *supplies;
@@ -228,6 +214,7 @@ extern const struct csid_formats csid_formats_gen2;
extern const struct csid_hw_ops csid_ops_4_1;
extern const struct csid_hw_ops csid_ops_4_7;
extern const struct csid_hw_ops csid_ops_gen2;
+extern const struct csid_hw_ops csid_ops_780;
/*
* csid_is_lite - Check if CSID is CSID lite.
@@ -237,4 +224,25 @@ extern const struct csid_hw_ops csid_ops_gen2;
*/
bool csid_is_lite(struct csid_device *csid);
+/*
+ * csid_hw_version - CSID hardware version query
+ * @csid: CSID device
+ *
+ * Return HW version or error
+ */
+u32 csid_hw_version(struct csid_device *csid);
+
+/*
+ * csid_src_pad_code - Pick an output/src format based on the input/sink format
+ * @csid: CSID device
+ * @sink_code: The sink format of the input
+ * @match_format_idx: Request preferred index, as defined by subdevice csid
+ * format. Set @match_code to 0 if used.
+ * @match_code: Request preferred code, set @match_format_idx to 0 if used
+ *
+ * Return 0 on failure or src format code otherwise
+ */
+u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
+ unsigned int match_format_idx, u32 match_code);
+
#endif /* QC_MSM_CAMSS_CSID_H */
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
index cd4a8c369234..9d67e7fa6366 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
@@ -180,6 +180,11 @@ static irqreturn_t csiphy_isr(int irq, void *dev)
return IRQ_HANDLED;
}
+static int csiphy_init(struct csiphy_device *csiphy)
+{
+ return 0;
+}
+
const struct csiphy_hw_ops csiphy_ops_2ph_1_0 = {
.get_lane_mask = csiphy_get_lane_mask,
.hw_version_read = csiphy_hw_version_read,
@@ -187,4 +192,5 @@ const struct csiphy_hw_ops csiphy_ops_2ph_1_0 = {
.lanes_enable = csiphy_lanes_enable,
.lanes_disable = csiphy_lanes_disable,
.isr = csiphy_isr,
+ .init = csiphy_init,
};
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index f341f7b7fd8a..a6cc957b986e 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -42,21 +42,21 @@
#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15(n) (0x03c + 0x100 * (n))
#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15_SWI_SOT_SYMBOL 0xb8
-#define CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(n) (0x800 + 0x4 * (n))
+#define CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(offset, n) ((offset) + 0x4 * (n))
#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE BIT(7)
#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B BIT(0)
#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID BIT(1)
-#define CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(n) (0x8b0 + 0x4 * (n))
+#define CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(offset, n) ((offset) + 0xb0 + 0x4 * (n))
-#define CSIPHY_DEFAULT_PARAMS 0
-#define CSIPHY_LANE_ENABLE 1
-#define CSIPHY_SETTLE_CNT_LOWER_BYTE 2
-#define CSIPHY_SETTLE_CNT_HIGHER_BYTE 3
-#define CSIPHY_DNP_PARAMS 4
-#define CSIPHY_2PH_REGS 5
-#define CSIPHY_3PH_REGS 6
+#define CSIPHY_DEFAULT_PARAMS 0
+#define CSIPHY_LANE_ENABLE 1
+#define CSIPHY_SETTLE_CNT_LOWER_BYTE 2
+#define CSIPHY_SETTLE_CNT_HIGHER_BYTE 3
+#define CSIPHY_DNP_PARAMS 4
+#define CSIPHY_2PH_REGS 5
+#define CSIPHY_3PH_REGS 6
-struct csiphy_reg_t {
+struct csiphy_lane_regs {
s32 reg_addr;
s32 reg_data;
s32 delay;
@@ -65,305 +65,381 @@ struct csiphy_reg_t {
/* GEN2 1.0 2PH */
static const struct
-csiphy_reg_t lane_regs_sdm845[5][14] = {
- {
- {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0708, 0x14, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0600, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
+csiphy_lane_regs lane_regs_sdm845[] = {
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x14, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
};
/* GEN2 1.1 2PH */
static const struct
-csiphy_reg_t lane_regs_sc8280xp[5][14] = {
- {
- {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0000, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0008, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x000C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0708, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0200, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0208, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0400, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0408, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0600, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0608, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
+csiphy_lane_regs lane_regs_sc8280xp[] = {
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
};
/* GEN2 1.2.1 2PH */
static const struct
-csiphy_reg_t lane_regs_sm8250[5][20] = {
- {
- {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0900, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0908, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0904, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0904, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0034, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0010, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x001C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x0000, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0024, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0730, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C80, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C88, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C84, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0734, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0710, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x071C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0708, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x070c, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0724, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0230, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0A00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0A08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0A04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0234, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0210, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x021C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0208, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x0200, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x020c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0224, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0B00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0B08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0B04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0B04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0434, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0410, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x041C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x0400, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x040c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0424, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0630, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0634, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0610, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x061C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0608, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x0600, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x060c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0624, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
+csiphy_lane_regs lane_regs_sm8250[] = {
+ {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0900, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0908, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0904, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0904, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0010, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0000, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0024, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0730, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C80, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C88, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C84, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x070c, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0724, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0230, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0210, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0200, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x020c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0224, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0410, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0400, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x040c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0424, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0630, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0610, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0600, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x060c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0624, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+/* GEN2 2.1.2 2PH DPHY mode */
+static const struct
+csiphy_lane_regs lane_regs_sm8550[] = {
+ {0x0E90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0090, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0490, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0890, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0808, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C00, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C38, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0094, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x005C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0xBD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0494, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x045C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0xBD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0894, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x085C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0860, 0xBD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0864, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C94, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C5C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C60, 0xBD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C64, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
};
static void csiphy_hw_version_read(struct csiphy_device *csiphy,
struct device *dev)
{
+ struct csiphy_device_regs *regs = csiphy->regs;
u32 hw_version;
- writel(CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID,
- csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+ writel(CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 6));
hw_version = readl_relaxed(csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(12));
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(regs->offset, 12));
hw_version |= readl_relaxed(csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(13)) << 8;
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(regs->offset, 13)) << 8;
hw_version |= readl_relaxed(csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(14)) << 16;
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(regs->offset, 14)) << 16;
hw_version |= readl_relaxed(csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(15)) << 24;
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(regs->offset, 15)) << 24;
dev_dbg(dev, "CSIPHY 3PH HW Version = 0x%08x\n", hw_version);
}
@@ -374,31 +450,39 @@ static void csiphy_hw_version_read(struct csiphy_device *csiphy,
*/
static void csiphy_reset(struct csiphy_device *csiphy)
{
- writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
+ struct csiphy_device_regs *regs = csiphy->regs;
+
+ writel_relaxed(0x1, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 0));
usleep_range(5000, 8000);
- writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
+ writel_relaxed(0x0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 0));
}
static irqreturn_t csiphy_isr(int irq, void *dev)
{
struct csiphy_device *csiphy = dev;
+ struct csiphy_device_regs *regs = csiphy->regs;
int i;
for (i = 0; i < 11; i++) {
int c = i + 22;
u8 val = readl_relaxed(csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(i));
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(regs->offset, i));
writel_relaxed(val, csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(c));
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, c));
}
- writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10));
- writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10));
+ writel_relaxed(0x1, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 10));
+ writel_relaxed(0x0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 10));
- for (i = 22; i < 33; i++)
+ for (i = 22; i < 33; i++) {
writel_relaxed(0x0, csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(i));
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, i));
+ }
return IRQ_HANDLED;
}
@@ -500,46 +584,22 @@ static void csiphy_gen1_config_lanes(struct csiphy_device *csiphy,
static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
u8 settle_cnt)
{
- const struct csiphy_reg_t *r;
- int i, l, array_size;
+ const struct csiphy_lane_regs *r = csiphy->regs->lane_regs;
+ int i, array_size = csiphy->regs->lane_array_size;
u32 val;
- switch (csiphy->camss->res->version) {
- case CAMSS_7280:
- r = &lane_regs_sm8250[0][0];
- array_size = ARRAY_SIZE(lane_regs_sm8250[0]);
- break;
- case CAMSS_8250:
- r = &lane_regs_sm8250[0][0];
- array_size = ARRAY_SIZE(lane_regs_sm8250[0]);
- break;
- case CAMSS_8280XP:
- r = &lane_regs_sc8280xp[0][0];
- array_size = ARRAY_SIZE(lane_regs_sc8280xp[0]);
- break;
- case CAMSS_845:
- r = &lane_regs_sdm845[0][0];
- array_size = ARRAY_SIZE(lane_regs_sdm845[0]);
- break;
- default:
- WARN(1, "unknown cspi version\n");
- return;
- }
-
- for (l = 0; l < 5; l++) {
- for (i = 0; i < array_size; i++, r++) {
- switch (r->csiphy_param_type) {
- case CSIPHY_SETTLE_CNT_LOWER_BYTE:
- val = settle_cnt & 0xff;
- break;
- case CSIPHY_DNP_PARAMS:
- continue;
- default:
- val = r->reg_data;
- break;
- }
- writel_relaxed(val, csiphy->base + r->reg_addr);
+ for (i = 0; i < array_size; i++, r++) {
+ switch (r->csiphy_param_type) {
+ case CSIPHY_SETTLE_CNT_LOWER_BYTE:
+ val = settle_cnt & 0xff;
+ break;
+ case CSIPHY_DNP_PARAMS:
+ continue;
+ default:
+ val = r->reg_data;
+ break;
}
+ writel_relaxed(val, csiphy->base + r->reg_addr);
}
}
@@ -565,6 +625,7 @@ static bool csiphy_is_gen2(u32 version)
case CAMSS_8250:
case CAMSS_8280XP:
case CAMSS_845:
+ case CAMSS_8550:
ret = true;
break;
}
@@ -577,6 +638,7 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
s64 link_freq, u8 lane_mask)
{
struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
+ struct csiphy_device_regs *regs = csiphy->regs;
u8 settle_cnt;
u8 val;
int i;
@@ -587,16 +649,20 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
for (i = 0; i < c->num_data; i++)
val |= BIT(c->data[i].pos * 2);
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5));
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 5));
val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 6));
val = 0x02;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(7));
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 7));
val = 0x00;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 0));
if (csiphy_is_gen2(csiphy->camss->res->version))
csiphy_gen2_config_lanes(csiphy, settle_cnt);
@@ -604,18 +670,61 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
csiphy_gen1_config_lanes(csiphy, cfg, settle_cnt);
/* IRQ_MASK registers - disable all interrupts */
- for (i = 11; i < 22; i++)
- writel_relaxed(0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(i));
+ for (i = 11; i < 22; i++) {
+ writel_relaxed(0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, i));
+ }
}
static void csiphy_lanes_disable(struct csiphy_device *csiphy,
struct csiphy_config *cfg)
{
+ struct csiphy_device_regs *regs = csiphy->regs;
+
writel_relaxed(0, csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5));
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 5));
writel_relaxed(0, csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 6));
+}
+
+static int csiphy_init(struct csiphy_device *csiphy)
+{
+ struct device *dev = csiphy->camss->dev;
+ struct csiphy_device_regs *regs;
+
+ regs = devm_kmalloc(dev, sizeof(*regs), GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
+
+ csiphy->regs = regs;
+ regs->offset = 0x800;
+
+ switch (csiphy->camss->res->version) {
+ case CAMSS_845:
+ regs->lane_regs = &lane_regs_sdm845[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_sdm845);
+ break;
+ case CAMSS_7280:
+ case CAMSS_8250:
+ regs->lane_regs = &lane_regs_sm8250[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_sm8250);
+ break;
+ case CAMSS_8280XP:
+ regs->lane_regs = &lane_regs_sc8280xp[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_sc8280xp);
+ break;
+ case CAMSS_8550:
+ regs->lane_regs = &lane_regs_sm8550[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_sm8550);
+ regs->offset = 0x1000;
+ break;
+ default:
+ WARN(1, "unknown csiphy version\n");
+ return -ENODEV;
+ }
+
+ return 0;
}
const struct csiphy_hw_ops csiphy_ops_3ph_1_0 = {
@@ -625,4 +734,5 @@ const struct csiphy_hw_ops csiphy_ops_3ph_1_0 = {
.lanes_enable = csiphy_lanes_enable,
.lanes_disable = csiphy_lanes_disable,
.isr = csiphy_isr,
+ .init = csiphy_init,
};
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 3791c2d8a6cf..c053616558a7 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -594,6 +594,10 @@ int msm_csiphy_subdev_init(struct camss *camss,
csiphy->cfg.combo_mode = 0;
csiphy->res = &res->csiphy;
+ ret = csiphy->res->hw_ops->init(csiphy);
+ if (ret)
+ return ret;
+
/* Memory */
csiphy->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]);
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.h b/drivers/media/platform/qcom/camss/camss-csiphy.h
index 90cc3f976643..86b98b37838e 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.h
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.h
@@ -77,6 +77,7 @@ struct csiphy_hw_ops {
void (*lanes_disable)(struct csiphy_device *csiphy,
struct csiphy_config *cfg);
irqreturn_t (*isr)(int irq, void *dev);
+ int (*init)(struct csiphy_device *csiphy);
};
struct csiphy_subdev_resources {
@@ -84,6 +85,12 @@ struct csiphy_subdev_resources {
const struct csiphy_formats *formats;
};
+struct csiphy_device_regs {
+ const struct csiphy_lane_regs *lane_regs;
+ int lane_array_size;
+ u32 offset;
+};
+
struct csiphy_device {
struct camss *camss;
u8 id;
@@ -102,6 +109,7 @@ struct csiphy_device {
struct csiphy_config cfg;
struct v4l2_mbus_framefmt fmt[MSM_CSIPHY_PADS_NUM];
const struct csiphy_subdev_resources *res;
+ struct csiphy_device_regs *regs;
};
struct camss_subdev_resources;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-17x.c b/drivers/media/platform/qcom/camss/camss-vfe-17x.c
index 380c99321030..e5ee7e717b3b 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-17x.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-17x.c
@@ -14,8 +14,6 @@
#include "camss.h"
#include "camss-vfe.h"
-#define VFE_HW_VERSION (0x000)
-
#define VFE_GLOBAL_RESET_CMD (0x018)
#define GLOBAL_RESET_CMD_CORE BIT(0)
#define GLOBAL_RESET_CMD_CAMIF BIT(1)
@@ -176,20 +174,6 @@
#define VFE_BUS_WM_FRAME_INC(n) (0x2258 + (n) * 0x100)
#define VFE_BUS_WM_BURST_LIMIT(n) (0x225c + (n) * 0x100)
-static u32 vfe_hw_version(struct vfe_device *vfe)
-{
- u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
-
- u32 gen = (hw_version >> 28) & 0xF;
- u32 rev = (hw_version >> 16) & 0xFFF;
- u32 step = hw_version & 0xFFFF;
-
- dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n",
- gen, rev, step);
-
- return hw_version;
-}
-
static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
{
u32 bits = readl_relaxed(vfe->base + reg);
@@ -438,62 +422,6 @@ error:
return -EINVAL;
}
-static int vfe_enable_output(struct vfe_line *line)
-{
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output = &line->output;
- const struct vfe_hw_ops *ops = vfe->res->hw_ops;
- struct media_entity *sensor;
- unsigned long flags;
- unsigned int frame_skip = 0;
- unsigned int i;
-
- sensor = camss_find_sensor(&line->subdev.entity);
- if (sensor) {
- struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(sensor);
-
- v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
- /* Max frame skip is 29 frames */
- if (frame_skip > VFE_FRAME_DROP_VAL - 1)
- frame_skip = VFE_FRAME_DROP_VAL - 1;
- }
-
- spin_lock_irqsave(&vfe->output_lock, flags);
-
- ops->reg_update_clear(vfe, line->id);
-
- if (output->state > VFE_OUTPUT_RESERVED) {
- dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
- output->state);
- spin_unlock_irqrestore(&vfe->output_lock, flags);
- return -EINVAL;
- }
-
- WARN_ON(output->gen2.active_num);
-
- output->state = VFE_OUTPUT_ON;
-
- output->sequence = 0;
- output->wait_reg_update = 0;
- reinit_completion(&output->reg_update);
-
- vfe_wm_start(vfe, output->wm_idx[0], line);
-
- for (i = 0; i < 2; i++) {
- output->buf[i] = vfe_buf_get_pending(output);
- if (!output->buf[i])
- break;
- output->gen2.active_num++;
- vfe_wm_update(vfe, output->wm_idx[0], output->buf[i]->addr[0], line);
- }
-
- ops->reg_update(vfe, line->id);
-
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- return 0;
-}
-
/*
* vfe_enable - Enable streaming on VFE line
* @line: VFE line
@@ -518,7 +446,7 @@ static int vfe_enable(struct vfe_line *line)
if (ret < 0)
goto error_get_output;
- ret = vfe_enable_output(line);
+ ret = vfe_enable_output_v2(line);
if (ret < 0)
goto error_enable_output;
@@ -627,40 +555,6 @@ out_unlock:
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
-/*
- * vfe_queue_buffer - Add empty buffer
- * @vid: Video device structure
- * @buf: Buffer to be enqueued
- *
- * Add an empty buffer - depending on the current number of buffers it will be
- * put in pending buffer queue or directly given to the hardware to be filled.
- *
- * Return 0 on success or a negative error code otherwise
- */
-static int vfe_queue_buffer(struct camss_video *vid,
- struct camss_buffer *buf)
-{
- struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output;
- unsigned long flags;
-
- output = &line->output;
-
- spin_lock_irqsave(&vfe->output_lock, flags);
-
- if (output->state == VFE_OUTPUT_ON && output->gen2.active_num < 2) {
- output->buf[output->gen2.active_num++] = buf;
- vfe_wm_update(vfe, output->wm_idx[0], buf->addr[0], line);
- } else {
- vfe_buf_add_pending(output, buf);
- }
-
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- return 0;
-}
-
static const struct vfe_isr_ops vfe_isr_ops_170 = {
.reset_ack = vfe_isr_reset_ack,
.halt_ack = vfe_isr_halt_ack,
@@ -671,7 +565,7 @@ static const struct vfe_isr_ops vfe_isr_ops_170 = {
};
static const struct camss_video_ops vfe_video_ops_170 = {
- .queue_buffer = vfe_queue_buffer,
+ .queue_buffer = vfe_queue_buffer_v2,
.flush_buffers = vfe_flush_buffers,
};
@@ -695,5 +589,7 @@ const struct vfe_hw_ops vfe_ops_170 = {
.vfe_enable = vfe_enable,
.vfe_halt = vfe_halt,
.violation_read = vfe_violation_read,
+ .vfe_wm_start = vfe_wm_start,
.vfe_wm_stop = vfe_wm_stop,
+ .vfe_wm_update = vfe_wm_update,
};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
index 9a9007c3ff33..901677293d97 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -210,15 +210,6 @@
#define MSM_VFE_VFE0_UB_SIZE 1023
#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
-static u32 vfe_hw_version(struct vfe_device *vfe)
-{
- u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
-
- dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
-
- return hw_version;
-}
-
static u16 vfe_get_ub_size(u8 vfe_id)
{
if (vfe_id == 0)
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
index ce0719106bd3..76729607db02 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -18,8 +18,6 @@
#include "camss-vfe-gen1.h"
-#define VFE_0_HW_VERSION 0x000
-
#define VFE_0_GLOBAL_RESET_CMD 0x018
#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0)
#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1)
@@ -254,15 +252,6 @@
#define MSM_VFE_VFE1_UB_SIZE 1535
#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
-static u32 vfe_hw_version(struct vfe_device *vfe)
-{
- u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
-
- dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
-
- return hw_version;
-}
-
static u16 vfe_get_ub_size(u8 vfe_id)
{
if (vfe_id == 0)
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
index 6b59c8107a3c..b2f7d855d8dd 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
@@ -17,8 +17,6 @@
#include "camss-vfe.h"
#include "camss-vfe-gen1.h"
-#define VFE_0_HW_VERSION 0x000
-
#define VFE_0_GLOBAL_RESET_CMD 0x018
#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0)
#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1)
@@ -247,15 +245,6 @@
#define MSM_VFE_VFE1_UB_SIZE 1535
#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
-static u32 vfe_hw_version(struct vfe_device *vfe)
-{
- u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
-
- dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
-
- return hw_version;
-}
-
static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
{
u32 bits = readl_relaxed(vfe->base + reg);
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
index dc2735476c82..4feea590a47b 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
@@ -15,8 +15,6 @@
#include "camss.h"
#include "camss-vfe.h"
-#define VFE_HW_VERSION (0x00)
-
#define VFE_GLOBAL_RESET_CMD (vfe_is_lite(vfe) ? 0x0c : 0x1c)
#define GLOBAL_RESET_HW_AND_REG (vfe_is_lite(vfe) ? BIT(1) : BIT(0))
@@ -92,19 +90,6 @@ static inline int bus_irq_mask_0_comp_done(struct vfe_device *vfe, int n)
#define MAX_VFE_OUTPUT_LINES 4
-static u32 vfe_hw_version(struct vfe_device *vfe)
-{
- u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
-
- u32 gen = (hw_version >> 28) & 0xF;
- u32 rev = (hw_version >> 16) & 0xFFF;
- u32 step = hw_version & 0xFFFF;
-
- dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n", gen, rev, step);
-
- return hw_version;
-}
-
static void vfe_global_reset(struct vfe_device *vfe)
{
writel_relaxed(IRQ_MASK_0_RESET_ACK, vfe->base + VFE_IRQ_MASK(0));
@@ -167,18 +152,16 @@ static inline void vfe_reg_update_clear(struct vfe_device *vfe,
vfe->reg_update &= ~REG_UPDATE_RDI(vfe, line_id);
}
-static void vfe_enable_irq_common(struct vfe_device *vfe)
-{
- /* enable reset ack IRQ and top BUS status IRQ */
- writel_relaxed(IRQ_MASK_0_RESET_ACK | IRQ_MASK_0_BUS_TOP_IRQ,
- vfe->base + VFE_IRQ_MASK(0));
-}
-
-static void vfe_enable_lines_irq(struct vfe_device *vfe)
+static void vfe_enable_irq(struct vfe_device *vfe)
{
int i;
u32 bus_irq_mask = 0;
+ if (!vfe->stream_count)
+ /* enable reset ack IRQ and top BUS status IRQ */
+ writel(IRQ_MASK_0_RESET_ACK | IRQ_MASK_0_BUS_TOP_IRQ,
+ vfe->base + VFE_IRQ_MASK(0));
+
for (i = 0; i < MAX_VFE_OUTPUT_LINES; i++) {
/* Enable IRQ for newly added lines, but also keep already running lines's IRQ */
if (vfe->line[i].output.state == VFE_OUTPUT_RESERVED ||
@@ -188,11 +171,10 @@ static void vfe_enable_lines_irq(struct vfe_device *vfe)
}
}
- writel_relaxed(bus_irq_mask, vfe->base + VFE_BUS_IRQ_MASK(0));
+ writel(bus_irq_mask, vfe->base + VFE_BUS_IRQ_MASK(0));
}
static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id);
-static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm);
/*
* vfe_isr - VFE module interrupt handler
@@ -226,7 +208,7 @@ static irqreturn_t vfe_isr(int irq, void *dev)
vfe_isr_reg_update(vfe, i);
if (status & BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(i)))
- vfe_isr_wm_done(vfe, i);
+ vfe_buf_done(vfe, i);
}
}
@@ -245,132 +227,6 @@ static int vfe_halt(struct vfe_device *vfe)
return 0;
}
-static int vfe_get_output(struct vfe_line *line)
-{
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output;
- unsigned long flags;
-
- spin_lock_irqsave(&vfe->output_lock, flags);
-
- output = &line->output;
- if (output->state > VFE_OUTPUT_RESERVED) {
- dev_err(vfe->camss->dev, "Output is running\n");
- goto error;
- }
-
- output->wm_num = 1;
-
- /* Correspondence between VFE line number and WM number.
- * line 0 -> RDI 0, line 1 -> RDI1, line 2 -> RDI2, line 3 -> PIX/RDI3
- * Note this 1:1 mapping will not work for PIX streams.
- */
- output->wm_idx[0] = line->id;
- vfe->wm_output_map[line->id] = line->id;
-
- output->drop_update_idx = 0;
-
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- return 0;
-
-error:
- spin_unlock_irqrestore(&vfe->output_lock, flags);
- output->state = VFE_OUTPUT_OFF;
-
- return -EINVAL;
-}
-
-static int vfe_enable_output(struct vfe_line *line)
-{
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output = &line->output;
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&vfe->output_lock, flags);
-
- vfe_reg_update_clear(vfe, line->id);
-
- if (output->state > VFE_OUTPUT_RESERVED) {
- dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
- output->state);
- spin_unlock_irqrestore(&vfe->output_lock, flags);
- return -EINVAL;
- }
-
- WARN_ON(output->gen2.active_num);
-
- output->state = VFE_OUTPUT_ON;
-
- output->sequence = 0;
- output->wait_reg_update = 0;
- reinit_completion(&output->reg_update);
-
- vfe_wm_start(vfe, output->wm_idx[0], line);
-
- for (i = 0; i < 2; i++) {
- output->buf[i] = vfe_buf_get_pending(output);
- if (!output->buf[i])
- break;
- output->gen2.active_num++;
- vfe_wm_update(vfe, output->wm_idx[0], output->buf[i]->addr[0], line);
- }
-
- vfe_reg_update(vfe, line->id);
-
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- return 0;
-}
-
-/*
- * vfe_enable - Enable streaming on VFE line
- * @line: VFE line
- *
- * Return 0 on success or a negative error code otherwise
- */
-static int vfe_enable(struct vfe_line *line)
-{
- struct vfe_device *vfe = to_vfe(line);
- int ret;
-
- mutex_lock(&vfe->stream_lock);
-
- if (!vfe->stream_count)
- vfe_enable_irq_common(vfe);
-
- vfe->stream_count++;
-
- vfe_enable_lines_irq(vfe);
-
- mutex_unlock(&vfe->stream_lock);
-
- ret = vfe_get_output(line);
- if (ret < 0)
- goto error_get_output;
-
- ret = vfe_enable_output(line);
- if (ret < 0)
- goto error_enable_output;
-
- vfe->was_streaming = 1;
-
- return 0;
-
-error_enable_output:
- vfe_put_output(line);
-
-error_get_output:
- mutex_lock(&vfe->stream_lock);
-
- vfe->stream_count--;
-
- mutex_unlock(&vfe->stream_lock);
-
- return ret;
-}
-
/*
* vfe_isr_reg_update - Process reg update interrupt
* @vfe: VFE Device
@@ -394,114 +250,48 @@ static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
-/*
- * vfe_isr_wm_done - Process write master done interrupt
- * @vfe: VFE Device
- * @wm: Write master id
- */
-static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
-{
- struct vfe_line *line = &vfe->line[vfe->wm_output_map[wm]];
- struct camss_buffer *ready_buf;
- struct vfe_output *output;
- unsigned long flags;
- u32 index;
- u64 ts = ktime_get_ns();
-
- spin_lock_irqsave(&vfe->output_lock, flags);
-
- if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
- dev_err_ratelimited(vfe->camss->dev,
- "Received wm done for unmapped index\n");
- goto out_unlock;
- }
- output = &vfe->line[vfe->wm_output_map[wm]].output;
-
- ready_buf = output->buf[0];
- if (!ready_buf) {
- dev_err_ratelimited(vfe->camss->dev,
- "Missing ready buf %d!\n", output->state);
- goto out_unlock;
- }
-
- ready_buf->vb.vb2_buf.timestamp = ts;
- ready_buf->vb.sequence = output->sequence++;
-
- index = 0;
- output->buf[0] = output->buf[1];
- if (output->buf[0])
- index = 1;
-
- output->buf[index] = vfe_buf_get_pending(output);
-
- if (output->buf[index])
- vfe_wm_update(vfe, output->wm_idx[0], output->buf[index]->addr[0], line);
- else
- output->gen2.active_num--;
-
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
-
- return;
+static const struct camss_video_ops vfe_video_ops_480 = {
+ .queue_buffer = vfe_queue_buffer_v2,
+ .flush_buffers = vfe_flush_buffers,
+};
-out_unlock:
- spin_unlock_irqrestore(&vfe->output_lock, flags);
+static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
+{
+ vfe->video_ops = vfe_video_ops_480;
}
-/*
- * vfe_queue_buffer - Add empty buffer
- * @vid: Video device structure
- * @buf: Buffer to be enqueued
- *
- * Add an empty buffer - depending on the current number of buffers it will be
- * put in pending buffer queue or directly given to the hardware to be filled.
- *
- * Return 0 on success or a negative error code otherwise
- */
-static int vfe_queue_buffer(struct camss_video *vid,
- struct camss_buffer *buf)
+static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
{
- struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output;
- unsigned long flags;
-
- output = &line->output;
-
- spin_lock_irqsave(&vfe->output_lock, flags);
-
- if (output->state == VFE_OUTPUT_ON && output->gen2.active_num < 2) {
- output->buf[output->gen2.active_num++] = buf;
- vfe_wm_update(vfe, output->wm_idx[0], buf->addr[0], line);
- } else {
- vfe_buf_add_pending(output, buf);
- }
-
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- return 0;
+ /* nop */
}
-static const struct camss_video_ops vfe_video_ops_480 = {
- .queue_buffer = vfe_queue_buffer,
- .flush_buffers = vfe_flush_buffers,
-};
+static void vfe_violation_read(struct vfe_device *vfe)
+{
+ /* nop */
+}
-static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
+static void vfe_buf_done_480(struct vfe_device *vfe, int port_id)
{
- vfe->video_ops = vfe_video_ops_480;
+ /* nop */
}
const struct vfe_hw_ops vfe_ops_480 = {
+ .enable_irq = vfe_enable_irq,
.global_reset = vfe_global_reset,
.hw_version = vfe_hw_version,
.isr = vfe_isr,
+ .isr_read = vfe_isr_read,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
.pm_domain_off = vfe_pm_domain_off,
.pm_domain_on = vfe_pm_domain_on,
.subdev_init = vfe_subdev_init,
.vfe_disable = vfe_disable,
- .vfe_enable = vfe_enable,
+ .vfe_enable = vfe_enable_v2,
.vfe_halt = vfe_halt,
+ .violation_read = vfe_violation_read,
+ .vfe_wm_start = vfe_wm_start,
.vfe_wm_stop = vfe_wm_stop,
+ .vfe_buf_done = vfe_buf_done_480,
+ .vfe_wm_update = vfe_wm_update,
};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-780.c b/drivers/media/platform/qcom/camss/camss-vfe-780.c
new file mode 100644
index 000000000000..b9812d70f91b
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe-780.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v780 (SM8550)
+ *
+ * Copyright (c) 2024 Qualcomm Technologies, Inc.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#include "camss.h"
+#include "camss-vfe.h"
+
+#define BUS_REG_BASE (vfe_is_lite(vfe) ? 0x200 : 0xC00)
+
+#define VFE_BUS_WM_CGC_OVERRIDE (BUS_REG_BASE + 0x08)
+#define WM_CGC_OVERRIDE_ALL (0x7FFFFFF)
+
+#define VFE_BUS_WM_TEST_BUS_CTRL (BUS_REG_BASE + 0xDC)
+
+#define VFE_BUS_WM_CFG(n) (BUS_REG_BASE + 0x200 + (n) * 0x100)
+#define WM_CFG_EN BIT(0)
+#define WM_VIR_FRM_EN BIT(1)
+#define WM_CFG_MODE BIT(16)
+#define VFE_BUS_WM_IMAGE_ADDR(n) (BUS_REG_BASE + 0x204 + (n) * 0x100)
+#define VFE_BUS_WM_FRAME_INCR(n) (BUS_REG_BASE + 0x208 + (n) * 0x100)
+#define VFE_BUS_WM_IMAGE_CFG_0(n) (BUS_REG_BASE + 0x20c + (n) * 0x100)
+#define WM_IMAGE_CFG_0_DEFAULT_WIDTH (0xFFFF)
+#define VFE_BUS_WM_IMAGE_CFG_2(n) (BUS_REG_BASE + 0x214 + (n) * 0x100)
+#define WM_IMAGE_CFG_2_DEFAULT_STRIDE (0xFFFF)
+#define VFE_BUS_WM_PACKER_CFG(n) (BUS_REG_BASE + 0x218 + (n) * 0x100)
+
+#define VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(n) (BUS_REG_BASE + 0x230 + (n) * 0x100)
+#define VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(n) (BUS_REG_BASE + 0x234 + (n) * 0x100)
+#define VFE_BUS_WM_FRAMEDROP_PERIOD(n) (BUS_REG_BASE + 0x238 + (n) * 0x100)
+#define VFE_BUS_WM_FRAMEDROP_PATTERN(n) (BUS_REG_BASE + 0x23c + (n) * 0x100)
+
+#define VFE_BUS_WM_MMU_PREFETCH_CFG(n) (BUS_REG_BASE + 0x260 + (n) * 0x100)
+#define VFE_BUS_WM_MMU_PREFETCH_MAX_OFFSET(n) (BUS_REG_BASE + 0x264 + (n) * 0x100)
+
+/*
+ * Bus client mapping:
+ *
+ * Full VFE:
+ * 23 = RDI0, 24 = RDI1, 25 = RDI2
+ *
+ * VFE LITE:
+ * 0 = RDI0, 1 = RDI1, 2 = RDI3, 4 = RDI4
+ */
+#define RDI_WM(n) ((vfe_is_lite(vfe) ? 0x0 : 0x17) + (n))
+
+static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
+{
+ struct v4l2_pix_format_mplane *pix =
+ &line->video_out.active_fmt.fmt.pix_mp;
+
+ wm = RDI_WM(wm);
+
+ /* no clock gating at bus input */
+ writel(WM_CGC_OVERRIDE_ALL, vfe->base + VFE_BUS_WM_CGC_OVERRIDE);
+
+ writel(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL);
+
+ writel(ALIGN(pix->plane_fmt[0].bytesperline, 16) * pix->height >> 8,
+ vfe->base + VFE_BUS_WM_FRAME_INCR(wm));
+ writel((WM_IMAGE_CFG_0_DEFAULT_WIDTH & 0xFFFF),
+ vfe->base + VFE_BUS_WM_IMAGE_CFG_0(wm));
+ writel(WM_IMAGE_CFG_2_DEFAULT_STRIDE,
+ vfe->base + VFE_BUS_WM_IMAGE_CFG_2(wm));
+ writel(0, vfe->base + VFE_BUS_WM_PACKER_CFG(wm));
+
+ /* no dropped frames, one irq per frame */
+ writel(0, vfe->base + VFE_BUS_WM_FRAMEDROP_PERIOD(wm));
+ writel(1, vfe->base + VFE_BUS_WM_FRAMEDROP_PATTERN(wm));
+ writel(0, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(wm));
+ writel(1, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(wm));
+
+ writel(1, vfe->base + VFE_BUS_WM_MMU_PREFETCH_CFG(wm));
+ writel(0xFFFFFFFF, vfe->base + VFE_BUS_WM_MMU_PREFETCH_MAX_OFFSET(wm));
+
+ writel(WM_CFG_EN | WM_CFG_MODE, vfe->base + VFE_BUS_WM_CFG(wm));
+}
+
+static void vfe_wm_stop(struct vfe_device *vfe, u8 wm)
+{
+ wm = RDI_WM(wm);
+ writel(0, vfe->base + VFE_BUS_WM_CFG(wm));
+}
+
+static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr,
+ struct vfe_line *line)
+{
+ wm = RDI_WM(wm);
+ writel((addr >> 8) & 0xFFFFFFFF, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
+
+ dev_dbg(vfe->camss->dev, "wm:%d, image buf addr:0x%x\n",
+ wm, addr);
+}
+
+static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ int port_id = line_id;
+
+ camss_reg_update(vfe->camss, vfe->id, port_id, false);
+}
+
+static inline void vfe_reg_update_clear(struct vfe_device *vfe,
+ enum vfe_line_id line_id)
+{
+ int port_id = line_id;
+
+ camss_reg_update(vfe->camss, vfe->id, port_id, true);
+}
+
+static const struct camss_video_ops vfe_video_ops_780 = {
+ .queue_buffer = vfe_queue_buffer_v2,
+ .flush_buffers = vfe_flush_buffers,
+};
+
+static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
+{
+ vfe->video_ops = vfe_video_ops_780;
+}
+
+static void vfe_global_reset(struct vfe_device *vfe)
+{
+ vfe_isr_reset_ack(vfe);
+}
+
+static irqreturn_t vfe_isr(int irq, void *dev)
+{
+ /* nop */
+ return IRQ_HANDLED;
+}
+
+static int vfe_halt(struct vfe_device *vfe)
+{
+ /* rely on vfe_disable_output() to stop the VFE */
+ return 0;
+}
+
+const struct vfe_hw_ops vfe_ops_780 = {
+ .global_reset = vfe_global_reset,
+ .hw_version = vfe_hw_version,
+ .isr = vfe_isr,
+ .pm_domain_off = vfe_pm_domain_off,
+ .pm_domain_on = vfe_pm_domain_on,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
+ .subdev_init = vfe_subdev_init,
+ .vfe_disable = vfe_disable,
+ .vfe_enable = vfe_enable_v2,
+ .vfe_halt = vfe_halt,
+ .vfe_wm_start = vfe_wm_start,
+ .vfe_wm_stop = vfe_wm_stop,
+ .vfe_buf_done = vfe_buf_done,
+ .vfe_wm_update = vfe_wm_update,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-gen1.c b/drivers/media/platform/qcom/camss/camss-vfe-gen1.c
index eb33c03df27e..d84a375e3318 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-gen1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-gen1.c
@@ -170,7 +170,7 @@ static int vfe_enable_output(struct vfe_line *line)
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
const struct vfe_hw_ops *ops = vfe->res->hw_ops;
- struct media_entity *sensor;
+ struct media_pad *sensor_pad;
unsigned long flags;
unsigned int frame_skip = 0;
unsigned int i;
@@ -180,9 +180,10 @@ static int vfe_enable_output(struct vfe_line *line)
if (!ub_size)
return -EINVAL;
- sensor = camss_find_sensor(&line->subdev.entity);
- if (sensor) {
- struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(sensor);
+ sensor_pad = camss_find_sensor_pad(&line->subdev.entity);
+ if (sensor_pad) {
+ struct v4l2_subdev *subdev =
+ media_entity_to_v4l2_subdev(sensor_pad->entity);
v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
/* Max frame skip is 29 frames */
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index 95f6a1ac7eaf..cf0e8f5c004a 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -32,6 +32,11 @@
#define SCALER_RATIO_MAX 16
+#define VFE_HW_VERSION 0x0
+#define HW_VERSION_STEPPING 0
+#define HW_VERSION_REVISION 16
+#define HW_VERSION_GENERATION 28
+
static const struct camss_format_info formats_rdi_8x16[] = {
{ MEDIA_BUS_FMT_UYVY8_1X16, 8, V4L2_PIX_FMT_UYVY, 1,
PER_PLANE_DATA(0, 1, 1, 1, 1, 16) },
@@ -340,6 +345,7 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
case CAMSS_8250:
case CAMSS_8280XP:
case CAMSS_845:
+ case CAMSS_8550:
switch (sink_code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
{
@@ -400,10 +406,278 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
return sink_code;
}
break;
+ default:
+ WARN(1, "Unsupported HW version: %x\n",
+ vfe->camss->res->version);
+ break;
}
return 0;
}
+/*
+ * vfe_hw_version - Process write master done interrupt
+ * @vfe: VFE Device
+ *
+ * Return vfe hw version
+ */
+u32 vfe_hw_version(struct vfe_device *vfe)
+{
+ u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
+
+ u32 gen = (hw_version >> HW_VERSION_GENERATION) & 0xF;
+ u32 rev = (hw_version >> HW_VERSION_REVISION) & 0xFFF;
+ u32 step = (hw_version >> HW_VERSION_STEPPING) & 0xFFFF;
+
+ dev_info(vfe->camss->dev, "VFE:%d HW Version = %u.%u.%u\n",
+ vfe->id, gen, rev, step);
+
+ return hw_version;
+}
+
+/*
+ * vfe_buf_done - Process write master done interrupt
+ * @vfe: VFE Device
+ * @wm: Write master id
+ */
+void vfe_buf_done(struct vfe_device *vfe, int wm)
+{
+ struct vfe_line *line = &vfe->line[vfe->wm_output_map[wm]];
+ const struct vfe_hw_ops *ops = vfe->res->hw_ops;
+ struct camss_buffer *ready_buf;
+ struct vfe_output *output;
+ unsigned long flags;
+ u32 index;
+ u64 ts = ktime_get_ns();
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
+ dev_err_ratelimited(vfe->camss->dev,
+ "Received wm done for unmapped index\n");
+ goto out_unlock;
+ }
+ output = &vfe->line[vfe->wm_output_map[wm]].output;
+
+ ready_buf = output->buf[0];
+ if (!ready_buf) {
+ dev_err_ratelimited(vfe->camss->dev,
+ "Missing ready buf %d!\n", output->state);
+ goto out_unlock;
+ }
+
+ ready_buf->vb.vb2_buf.timestamp = ts;
+ ready_buf->vb.sequence = output->sequence++;
+
+ index = 0;
+ output->buf[0] = output->buf[1];
+ if (output->buf[0])
+ index = 1;
+
+ output->buf[index] = vfe_buf_get_pending(output);
+
+ if (output->buf[index]) {
+ ops->vfe_wm_update(vfe, output->wm_idx[0],
+ output->buf[index]->addr[0],
+ line);
+ ops->reg_update(vfe, line->id);
+ } else {
+ output->gen2.active_num--;
+ }
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+
+ return;
+
+out_unlock:
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+}
+
+int vfe_enable_output_v2(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ const struct vfe_hw_ops *ops = vfe->res->hw_ops;
+ struct media_pad *sensor_pad;
+ unsigned long flags;
+ unsigned int frame_skip = 0;
+ unsigned int i;
+
+ sensor_pad = camss_find_sensor_pad(&line->subdev.entity);
+ if (sensor_pad) {
+ struct v4l2_subdev *subdev =
+ media_entity_to_v4l2_subdev(sensor_pad->entity);
+
+ v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
+ /* Max frame skip is 29 frames */
+ if (frame_skip > VFE_FRAME_DROP_VAL - 1)
+ frame_skip = VFE_FRAME_DROP_VAL - 1;
+ }
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ ops->reg_update_clear(vfe, line->id);
+
+ if (output->state > VFE_OUTPUT_RESERVED) {
+ dev_err(vfe->camss->dev,
+ "Output is not in reserved state %d\n",
+ output->state);
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+ return -EINVAL;
+ }
+
+ WARN_ON(output->gen2.active_num);
+
+ output->state = VFE_OUTPUT_ON;
+
+ output->sequence = 0;
+ output->wait_reg_update = 0;
+ reinit_completion(&output->reg_update);
+
+ ops->vfe_wm_start(vfe, output->wm_idx[0], line);
+
+ for (i = 0; i < 2; i++) {
+ output->buf[i] = vfe_buf_get_pending(output);
+ if (!output->buf[i])
+ break;
+ output->gen2.active_num++;
+ ops->vfe_wm_update(vfe, output->wm_idx[0],
+ output->buf[i]->addr[0], line);
+ ops->reg_update(vfe, line->id);
+ }
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+}
+
+/*
+ * vfe_queue_buffer_v2 - Add empty buffer
+ * @vid: Video device structure
+ * @buf: Buffer to be enqueued
+ *
+ * Add an empty buffer - depending on the current number of buffers it will be
+ * put in pending buffer queue or directly given to the hardware to be filled.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_queue_buffer_v2(struct camss_video *vid,
+ struct camss_buffer *buf)
+{
+ struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
+ struct vfe_device *vfe = to_vfe(line);
+ const struct vfe_hw_ops *ops = vfe->res->hw_ops;
+ struct vfe_output *output;
+ unsigned long flags;
+
+ output = &line->output;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ if (output->state == VFE_OUTPUT_ON &&
+ output->gen2.active_num < 2) {
+ output->buf[output->gen2.active_num++] = buf;
+ ops->vfe_wm_update(vfe, output->wm_idx[0],
+ buf->addr[0], line);
+ ops->reg_update(vfe, line->id);
+ } else {
+ vfe_buf_add_pending(output, buf);
+ }
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+}
+
+/*
+ * vfe_enable_v2 - Enable streaming on VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_enable_v2(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ const struct vfe_hw_ops *ops = vfe->res->hw_ops;
+ int ret;
+
+ mutex_lock(&vfe->stream_lock);
+
+ if (vfe->res->hw_ops->enable_irq)
+ ops->enable_irq(vfe);
+
+ vfe->stream_count++;
+
+ mutex_unlock(&vfe->stream_lock);
+
+ ret = vfe_get_output_v2(line);
+ if (ret < 0)
+ goto error_get_output;
+
+ ret = vfe_enable_output_v2(line);
+ if (ret < 0)
+ goto error_enable_output;
+
+ vfe->was_streaming = 1;
+
+ return 0;
+
+error_enable_output:
+ vfe_put_output(line);
+
+error_get_output:
+ mutex_lock(&vfe->stream_lock);
+
+ vfe->stream_count--;
+
+ mutex_unlock(&vfe->stream_lock);
+
+ return ret;
+}
+
+/*
+ * vfe_get_output_v2 - Get vfe output port for corresponding VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_get_output_v2(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ output = &line->output;
+ if (output->state > VFE_OUTPUT_RESERVED) {
+ dev_err(vfe->camss->dev, "Output is running\n");
+ goto error;
+ }
+
+ output->wm_num = 1;
+
+ /* Correspondence between VFE line number and WM number.
+ * line 0 -> RDI 0, line 1 -> RDI1, line 2 -> RDI2, line 3 -> PIX/RDI3
+ * Note this 1:1 mapping will not work for PIX streams.
+ */
+ output->wm_idx[0] = line->id;
+ vfe->wm_output_map[line->id] = line->id;
+
+ output->drop_update_idx = 0;
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+
+error:
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+ output->state = VFE_OUTPUT_OFF;
+
+ return -EINVAL;
+}
+
int vfe_reset(struct vfe_device *vfe)
{
unsigned long time;
@@ -1698,6 +1972,7 @@ static int vfe_bpl_align(struct vfe_device *vfe)
case CAMSS_8250:
case CAMSS_8280XP:
case CAMSS_845:
+ case CAMSS_8550:
ret = 16;
break;
default:
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index 10e2cc3c0b83..9dec5bc0d1b1 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -99,7 +99,7 @@ struct vfe_line {
struct vfe_device;
struct vfe_hw_ops {
- void (*enable_irq_common)(struct vfe_device *vfe);
+ void (*enable_irq)(struct vfe_device *vfe);
void (*global_reset)(struct vfe_device *vfe);
u32 (*hw_version)(struct vfe_device *vfe);
irqreturn_t (*isr)(int irq, void *dev);
@@ -114,7 +114,12 @@ struct vfe_hw_ops {
int (*vfe_enable)(struct vfe_line *line);
int (*vfe_halt)(struct vfe_device *vfe);
void (*violation_read)(struct vfe_device *vfe);
+ void (*vfe_wm_start)(struct vfe_device *vfe, u8 wm,
+ struct vfe_line *line);
void (*vfe_wm_stop)(struct vfe_device *vfe, u8 wm);
+ void (*vfe_buf_done)(struct vfe_device *vfe, int port_id);
+ void (*vfe_wm_update)(struct vfe_device *vfe, u8 wm, u32 addr,
+ struct vfe_line *line);
};
struct vfe_isr_ops {
@@ -238,6 +243,7 @@ extern const struct vfe_hw_ops vfe_ops_4_7;
extern const struct vfe_hw_ops vfe_ops_4_8;
extern const struct vfe_hw_ops vfe_ops_170;
extern const struct vfe_hw_ops vfe_ops_480;
+extern const struct vfe_hw_ops vfe_ops_780;
int vfe_get(struct vfe_device *vfe);
void vfe_put(struct vfe_device *vfe);
@@ -252,4 +258,55 @@ void vfe_put(struct vfe_device *vfe);
*/
bool vfe_is_lite(struct vfe_device *vfe);
+/*
+ * vfe_hw_version - Process write master done interrupt
+ * @vfe: VFE Device
+ *
+ * Return vfe hw version
+ */
+u32 vfe_hw_version(struct vfe_device *vfe);
+/*
+ * vfe_enable - Enable streaming on VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_enable_v2(struct vfe_line *line);
+
+/*
+ * vfe_buf_done - Process write master done interrupt
+ * @vfe: VFE Device
+ * @wm: Write master id
+ */
+void vfe_buf_done(struct vfe_device *vfe, int wm);
+
+/*
+ * vfe_get_output_v2 - Get vfe output line
+ * line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_get_output_v2(struct vfe_line *line);
+
+/*
+ * vfe_enable_output_v2 - Enable vfe output line
+ * line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_enable_output_v2(struct vfe_line *line);
+
+/*
+ * vfe_queue_buffer_v2 - Add empty buffer
+ * @vid: Video device structure
+ * @buf: Buffer to be enqueued
+ *
+ * Add an empty buffer - depending on the current number of buffers it will be
+ * put in pending buffer queue or directly given to the hardware to be filled.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_queue_buffer_v2(struct camss_video *vid,
+ struct camss_buffer *buf);
+
#endif /* QC_MSM_CAMSS_VFE_H */
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index a85e9df0f301..6791dfea91b1 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -738,6 +738,185 @@ static const struct camss_subdev_resources vfe_res_660[] = {
}
};
+static const struct camss_subdev_resources csiphy_res_670[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "soc_ahb", "cpas_ahb",
+ "csiphy0", "csiphy0_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 19200000, 240000000, 269333333 } },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "soc_ahb", "cpas_ahb",
+ "csiphy1", "csiphy1_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 19200000, 240000000, 269333333 } },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+
+ /* CSIPHY2 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "soc_ahb", "cpas_ahb",
+ "csiphy2", "csiphy2_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 19200000, 240000000, 269333333 } },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ }
+};
+
+static const struct camss_subdev_resources csid_res_670[] = {
+ /* CSID0 */
+ {
+ .regulators = {},
+ .clock = { "cpas_ahb", "soc_ahb", "vfe0",
+ "vfe0_cphy_rx", "csi0" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 100000000, 320000000, 404000000, 480000000, 600000000 },
+ { 384000000 },
+ { 19200000, 75000000, 384000000, 538666667 } },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" },
+ .csid = {
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+
+ /* CSID1 */
+ {
+ .regulators = {},
+ .clock = { "cpas_ahb", "soc_ahb", "vfe1",
+ "vfe1_cphy_rx", "csi1" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 100000000, 320000000, 404000000, 480000000, 600000000 },
+ { 384000000 },
+ { 19200000, 75000000, 384000000, 538666667 } },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" },
+ .csid = {
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+
+ /* CSID2 */
+ {
+ .regulators = {},
+ .clock = { "cpas_ahb", "soc_ahb", "vfe_lite",
+ "vfe_lite_cphy_rx", "csi2" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 100000000, 320000000, 404000000, 480000000, 600000000 },
+ { 384000000 },
+ { 19200000, 75000000, 384000000, 538666667 } },
+ .reg = { "csid2" },
+ .interrupt = { "csid2" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ }
+};
+
+static const struct camss_subdev_resources vfe_res_670[] = {
+ /* VFE0 */
+ {
+ .regulators = {},
+ .clock = { "camnoc_axi", "cpas_ahb", "soc_ahb",
+ "vfe0", "vfe0_axi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 320000000, 404000000, 480000000, 600000000 },
+ { 0 } },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" },
+ .vfe = {
+ .line_num = 4,
+ .has_pd = true,
+ .pd_name = "ife0",
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+
+ /* VFE1 */
+ {
+ .regulators = {},
+ .clock = { "camnoc_axi", "cpas_ahb", "soc_ahb",
+ "vfe1", "vfe1_axi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 320000000, 404000000, 480000000, 600000000 },
+ { 0 } },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" },
+ .vfe = {
+ .line_num = 4,
+ .has_pd = true,
+ .pd_name = "ife1",
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+
+ /* VFE-lite */
+ {
+ .regulators = {},
+ .clock = { "camnoc_axi", "cpas_ahb", "soc_ahb",
+ "vfe_lite" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 320000000, 404000000, 480000000, 600000000 } },
+ .reg = { "vfe_lite" },
+ .interrupt = { "vfe_lite" },
+ .vfe = {
+ .is_lite = true,
+ .line_num = 4,
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ }
+};
+
static const struct camss_subdev_resources csiphy_res_845[] = {
/* CSIPHY0 */
{
@@ -927,6 +1106,7 @@ static const struct camss_subdev_resources vfe_res_845[] = {
.interrupt = { "vfe0" },
.vfe = {
.line_num = 4,
+ .pd_name = "ife0",
.has_pd = true,
.hw_ops = &vfe_ops_170,
.formats_rdi = &vfe_formats_rdi_845,
@@ -954,6 +1134,7 @@ static const struct camss_subdev_resources vfe_res_845[] = {
.interrupt = { "vfe1" },
.vfe = {
.line_num = 4,
+ .pd_name = "ife1",
.has_pd = true,
.hw_ops = &vfe_ops_170,
.formats_rdi = &vfe_formats_rdi_845,
@@ -1443,12 +1624,13 @@ static const struct camss_subdev_resources vfe_res_7280[] = {
.regulators = {},
.clock = { "camnoc_axi", "cpas_ahb", "icp_ahb", "vfe0",
- "vfe0_axi", "gcc_cam_hf_axi" },
+ "vfe0_axi", "gcc_axi_hf", "gcc_axi_sf" },
.clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
{ 80000000 },
{ 0 },
{ 380000000, 510000000, 637000000, 760000000 },
{ 0 },
+ { 0 },
{ 0 } },
.reg = { "vfe0" },
@@ -1468,12 +1650,13 @@ static const struct camss_subdev_resources vfe_res_7280[] = {
.regulators = {},
.clock = { "camnoc_axi", "cpas_ahb", "icp_ahb", "vfe1",
- "vfe1_axi", "gcc_cam_hf_axi" },
+ "vfe1_axi", "gcc_axi_hf", "gcc_axi_sf" },
.clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
{ 80000000 },
{ 0 },
{ 380000000, 510000000, 637000000, 760000000 },
{ 0 },
+ { 0 },
{ 0 } },
.reg = { "vfe1" },
@@ -1493,12 +1676,13 @@ static const struct camss_subdev_resources vfe_res_7280[] = {
.regulators = {},
.clock = { "camnoc_axi", "cpas_ahb", "icp_ahb", "vfe2",
- "vfe2_axi", "gcc_cam_hf_axi" },
+ "vfe2_axi", "gcc_axi_hf", "gcc_axi_sf" },
.clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
{ 80000000 },
{ 0 },
{ 380000000, 510000000, 637000000, 760000000 },
{ 0 },
+ { 0 },
{ 0 } },
.reg = { "vfe2" },
@@ -1516,11 +1700,12 @@ static const struct camss_subdev_resources vfe_res_7280[] = {
/* VFE3 (lite) */
{
.clock = { "camnoc_axi", "cpas_ahb", "icp_ahb",
- "vfe_lite0", "gcc_cam_hf_axi" },
+ "vfe_lite0", "gcc_axi_hf", "gcc_axi_sf" },
.clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
{ 80000000 },
{ 0 },
{ 320000000, 400000000, 480000000, 600000000 },
+ { 0 },
{ 0 } },
.regulators = {},
@@ -1537,11 +1722,12 @@ static const struct camss_subdev_resources vfe_res_7280[] = {
/* VFE4 (lite) */
{
.clock = { "camnoc_axi", "cpas_ahb", "icp_ahb",
- "vfe_lite1", "gcc_cam_hf_axi" },
+ "vfe_lite1", "gcc_axi_hf", "gcc_axi_sf" },
.clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
{ 80000000 },
{ 0 },
{ 320000000, 400000000, 480000000, 600000000 },
+ { 0 },
{ 0 } },
.regulators = {},
@@ -1938,6 +2124,327 @@ static const struct resources_icc icc_res_sc8280xp[] = {
},
};
+static const struct camss_subdev_resources csiphy_res_8550[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy0", "csiphy0_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy1", "csiphy1_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY2 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy2", "csiphy2_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY3 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy3", "csiphy3_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy3" },
+ .interrupt = { "csiphy3" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY4 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy4", "csiphy4_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy4" },
+ .interrupt = { "csiphy4" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY5 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy5", "csiphy5_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy5" },
+ .interrupt = { "csiphy5" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY6 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy6", "csiphy6_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy6" },
+ .interrupt = { "csiphy6" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY7 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy7", "csiphy7_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy7" },
+ .interrupt = { "csiphy7" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ }
+};
+
+static const struct resources_wrapper csid_wrapper_res_sm8550 = {
+ .reg = "csid_wrapper",
+};
+
+static const struct camss_subdev_resources csid_res_8550[] = {
+ /* CSID0 */
+ {
+ .regulators = {},
+ .clock = { "csid", "csiphy_rx" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000, 480000000 } },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" },
+ .csid = {
+ .is_lite = false,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .hw_ops = &csid_ops_780,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID1 */
+ {
+ .regulators = {},
+ .clock = { "csid", "csiphy_rx" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000, 480000000 } },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" },
+ .csid = {
+ .is_lite = false,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .hw_ops = &csid_ops_780,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID2 */
+ {
+ .regulators = {},
+ .clock = { "csid", "csiphy_rx" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000, 480000000 } },
+ .reg = { "csid2" },
+ .interrupt = { "csid2" },
+ .csid = {
+ .is_lite = false,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .hw_ops = &csid_ops_780,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID3 */
+ {
+ .regulators = {},
+ .clock = { "vfe_lite_csid", "vfe_lite_cphy_rx" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000, 480000000 } },
+ .reg = { "csid_lite0" },
+ .interrupt = { "csid_lite0" },
+ .csid = {
+ .is_lite = true,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .hw_ops = &csid_ops_780,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID4 */
+ {
+ .regulators = {},
+ .clock = { "vfe_lite_csid", "vfe_lite_cphy_rx" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000, 480000000 } },
+ .reg = { "csid_lite1" },
+ .interrupt = { "csid_lite1" },
+ .csid = {
+ .is_lite = true,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .hw_ops = &csid_ops_780,
+ .formats = &csid_formats_gen2
+ }
+ }
+};
+
+static const struct camss_subdev_resources vfe_res_8550[] = {
+ /* VFE0 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "cpas_ahb", "cpas_fast_ahb_clk", "vfe0_fast_ahb",
+ "vfe0", "cpas_vfe0", "camnoc_axi" },
+ .clock_rate = { { 0 },
+ { 80000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 466000000, 594000000, 675000000, 785000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 } },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = true,
+ .pd_name = "ife0",
+ .hw_ops = &vfe_ops_780,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE1 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "cpas_ahb", "cpas_fast_ahb_clk", "vfe1_fast_ahb",
+ "vfe1", "cpas_vfe1", "camnoc_axi" },
+ .clock_rate = { { 0 },
+ { 80000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 466000000, 594000000, 675000000, 785000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 } },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = true,
+ .pd_name = "ife1",
+ .hw_ops = &vfe_ops_780,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE2 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "cpas_ahb", "cpas_fast_ahb_clk", "vfe2_fast_ahb",
+ "vfe2", "cpas_vfe2", "camnoc_axi" },
+ .clock_rate = { { 0 },
+ { 80000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 466000000, 594000000, 675000000, 785000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 } },
+ .reg = { "vfe2" },
+ .interrupt = { "vfe2" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = true,
+ .pd_name = "ife2",
+ .hw_ops = &vfe_ops_780,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE3 lite */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "cpas_ahb", "cpas_fast_ahb_clk", "vfe_lite_ahb",
+ "vfe_lite", "cpas_ife_lite", "camnoc_axi" },
+ .clock_rate = { { 0 },
+ { 80000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 400000000, 480000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 } },
+ .reg = { "vfe_lite0" },
+ .interrupt = { "vfe_lite0" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_780,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE4 lite */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "cpas_ahb", "cpas_fast_ahb_clk", "vfe_lite_ahb",
+ "vfe_lite", "cpas_ife_lite", "camnoc_axi" },
+ .clock_rate = { { 0 },
+ { 80000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 400000000, 480000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 } },
+ .reg = { "vfe_lite1" },
+ .interrupt = { "vfe_lite1" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_780,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+};
+
+static const struct resources_icc icc_res_sm8550[] = {
+ {
+ .name = "ahb",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+ {
+ .name = "hf_0_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+};
+
/*
* camss_add_clock_margin - Add margin to clock frequency rate
* @rate: Clock frequency rate
@@ -1996,12 +2503,12 @@ void camss_disable_clocks(int nclocks, struct camss_clock *clock)
}
/*
- * camss_find_sensor - Find a linked media entity which represents a sensor
+ * camss_find_sensor_pad - Find the media pad via which the sensor is linked
* @entity: Media entity to start searching from
*
- * Return a pointer to sensor media entity or NULL if not found
+ * Return a pointer to sensor media pad or NULL if not found
*/
-struct media_entity *camss_find_sensor(struct media_entity *entity)
+struct media_pad *camss_find_sensor_pad(struct media_entity *entity)
{
struct media_pad *pad;
@@ -2017,7 +2524,7 @@ struct media_entity *camss_find_sensor(struct media_entity *entity)
entity = pad->entity;
if (entity->function == MEDIA_ENT_F_CAM_SENSOR)
- return entity;
+ return pad;
}
}
@@ -2032,16 +2539,13 @@ struct media_entity *camss_find_sensor(struct media_entity *entity)
s64 camss_get_link_freq(struct media_entity *entity, unsigned int bpp,
unsigned int lanes)
{
- struct media_entity *sensor;
- struct v4l2_subdev *subdev;
+ struct media_pad *sensor_pad;
- sensor = camss_find_sensor(entity);
- if (!sensor)
+ sensor_pad = camss_find_sensor_pad(entity);
+ if (!sensor_pad)
return -ENODEV;
- subdev = media_entity_to_v4l2_subdev(sensor);
-
- return v4l2_get_link_freq(subdev->ctrl_handler, bpp, 2 * lanes);
+ return v4l2_get_link_freq(sensor_pad, bpp, 2 * lanes);
}
/*
@@ -2053,15 +2557,15 @@ s64 camss_get_link_freq(struct media_entity *entity, unsigned int bpp,
*/
int camss_get_pixel_clock(struct media_entity *entity, u64 *pixel_clock)
{
- struct media_entity *sensor;
+ struct media_pad *sensor_pad;
struct v4l2_subdev *subdev;
struct v4l2_ctrl *ctrl;
- sensor = camss_find_sensor(entity);
- if (!sensor)
+ sensor_pad = camss_find_sensor_pad(entity);
+ if (!sensor_pad)
return -ENODEV;
- subdev = media_entity_to_v4l2_subdev(sensor);
+ subdev = media_entity_to_v4l2_subdev(sensor_pad->entity);
ctrl = v4l2_ctrl_find(subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
@@ -2403,6 +2907,28 @@ static int camss_link_entities(struct camss *camss)
return 0;
}
+void camss_reg_update(struct camss *camss, int hw_id, int port_id, bool is_clear)
+{
+ struct csid_device *csid;
+
+ if (hw_id < camss->res->csid_num) {
+ csid = &camss->csid[hw_id];
+
+ csid->res->hw_ops->reg_update(csid, port_id, is_clear);
+ }
+}
+
+void camss_buf_done(struct camss *camss, int hw_id, int port_id)
+{
+ struct vfe_device *vfe;
+
+ if (hw_id < camss->res->vfe_num) {
+ vfe = &camss->vfe[hw_id];
+
+ vfe->res->hw_ops->vfe_buf_done(vfe, port_id);
+ }
+}
+
/*
* camss_register_entities - Register subdev nodes and create links
* @camss: CAMSS device
@@ -2898,8 +3424,20 @@ static const struct camss_resources sdm660_resources = {
.link_entities = camss_link_entities
};
+static const struct camss_resources sdm670_resources = {
+ .version = CAMSS_845,
+ .csiphy_res = csiphy_res_670,
+ .csid_res = csid_res_670,
+ .vfe_res = vfe_res_670,
+ .csiphy_num = ARRAY_SIZE(csiphy_res_670),
+ .csid_num = ARRAY_SIZE(csid_res_670),
+ .vfe_num = ARRAY_SIZE(vfe_res_670),
+ .link_entities = camss_link_entities
+};
+
static const struct camss_resources sdm845_resources = {
.version = CAMSS_845,
+ .pd_name = "top",
.csiphy_res = csiphy_res_845,
.csid_res = csid_res_845,
.vfe_res = vfe_res_845,
@@ -2952,6 +3490,21 @@ static const struct camss_resources sc7280_resources = {
.link_entities = camss_link_entities
};
+static const struct camss_resources sm8550_resources = {
+ .version = CAMSS_8550,
+ .pd_name = "top",
+ .csiphy_res = csiphy_res_8550,
+ .csid_res = csid_res_8550,
+ .vfe_res = vfe_res_8550,
+ .csid_wrapper_res = &csid_wrapper_res_sm8550,
+ .icc_res = icc_res_sm8550,
+ .icc_path_num = ARRAY_SIZE(icc_res_sm8550),
+ .csiphy_num = ARRAY_SIZE(csiphy_res_8550),
+ .csid_num = ARRAY_SIZE(csid_res_8550),
+ .vfe_num = ARRAY_SIZE(vfe_res_8550),
+ .link_entities = camss_link_entities
+};
+
static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,msm8916-camss", .data = &msm8916_resources },
{ .compatible = "qcom,msm8953-camss", .data = &msm8953_resources },
@@ -2959,8 +3512,10 @@ static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,sc7280-camss", .data = &sc7280_resources },
{ .compatible = "qcom,sc8280xp-camss", .data = &sc8280xp_resources },
{ .compatible = "qcom,sdm660-camss", .data = &sdm660_resources },
+ { .compatible = "qcom,sdm670-camss", .data = &sdm670_resources },
{ .compatible = "qcom,sdm845-camss", .data = &sdm845_resources },
{ .compatible = "qcom,sm8250-camss", .data = &sm8250_resources },
+ { .compatible = "qcom,sm8550-camss", .data = &sm8550_resources },
{ }
};
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index 9a046eea334f..b284b910ce42 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -85,6 +85,7 @@ enum camss_version {
CAMSS_8250,
CAMSS_8280XP,
CAMSS_845,
+ CAMSS_8550,
};
enum icc_count {
@@ -153,7 +154,7 @@ void camss_add_clock_margin(u64 *rate);
int camss_enable_clocks(int nclocks, struct camss_clock *clock,
struct device *dev);
void camss_disable_clocks(int nclocks, struct camss_clock *clock);
-struct media_entity *camss_find_sensor(struct media_entity *entity);
+struct media_pad *camss_find_sensor_pad(struct media_entity *entity);
s64 camss_get_link_freq(struct media_entity *entity, unsigned int bpp,
unsigned int lanes);
int camss_get_pixel_clock(struct media_entity *entity, u64 *pixel_clock);
@@ -162,5 +163,8 @@ void camss_pm_domain_off(struct camss *camss, int id);
int camss_vfe_get(struct camss *camss, int id);
void camss_vfe_put(struct camss *camss, int id);
void camss_delete(struct camss *camss);
+void camss_buf_done(struct camss *camss, int hw_id, int port_id);
+void camss_reg_update(struct camss *camss, int hw_id,
+ int port_id, bool is_clear);
#endif /* QC_MSM_CAMSS_H */
diff --git a/drivers/media/platform/qcom/iris/Kconfig b/drivers/media/platform/qcom/iris/Kconfig
new file mode 100644
index 000000000000..3c803a05305a
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/Kconfig
@@ -0,0 +1,13 @@
+config VIDEO_QCOM_IRIS
+ tristate "Qualcomm iris V4L2 decoder driver"
+ depends on VIDEO_DEV
+ depends on ARCH_QCOM || COMPILE_TEST
+ select V4L2_MEM2MEM_DEV
+ select QCOM_MDT_LOADER if ARCH_QCOM
+ select QCOM_SCM
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ This is a V4L2 driver for Qualcomm iris video accelerator
+ hardware. It accelerates decoding operations on various
+ Qualcomm SoCs.
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/qcom/iris/Makefile b/drivers/media/platform/qcom/iris/Makefile
new file mode 100644
index 000000000000..35390534534e
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/Makefile
@@ -0,0 +1,31 @@
+qcom-iris-objs += \
+ iris_buffer.o \
+ iris_core.o \
+ iris_ctrls.o \
+ iris_firmware.o \
+ iris_hfi_common.o \
+ iris_hfi_gen1_command.o \
+ iris_hfi_gen1_response.o \
+ iris_hfi_gen2_command.o \
+ iris_hfi_gen2_packet.o \
+ iris_hfi_gen2_response.o \
+ iris_hfi_queue.o \
+ iris_platform_sm8550.o \
+ iris_power.o \
+ iris_probe.o \
+ iris_resources.o \
+ iris_state.o \
+ iris_utils.o \
+ iris_vidc.o \
+ iris_vb2.o \
+ iris_vdec.o \
+ iris_vpu2.o \
+ iris_vpu3.o \
+ iris_vpu_buffer.o \
+ iris_vpu_common.o \
+
+ifeq ($(CONFIG_VIDEO_QCOM_VENUS),)
+qcom-iris-objs += iris_platform_sm8250.o
+endif
+
+obj-$(CONFIG_VIDEO_QCOM_IRIS) += qcom-iris.o
diff --git a/drivers/media/platform/qcom/iris/iris_buffer.c b/drivers/media/platform/qcom/iris/iris_buffer.c
new file mode 100644
index 000000000000..e5c5a564fcb8
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_buffer.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_buffer.h"
+#include "iris_instance.h"
+#include "iris_power.h"
+#include "iris_vpu_buffer.h"
+
+#define PIXELS_4K 4096
+#define MAX_WIDTH 4096
+#define MAX_HEIGHT 2304
+#define Y_STRIDE_ALIGN 128
+#define UV_STRIDE_ALIGN 128
+#define Y_SCANLINE_ALIGN 32
+#define UV_SCANLINE_ALIGN 16
+#define UV_SCANLINE_ALIGN_QC08C 32
+#define META_STRIDE_ALIGNED 64
+#define META_SCANLINE_ALIGNED 16
+#define NUM_MBS_4K (DIV_ROUND_UP(MAX_WIDTH, 16) * DIV_ROUND_UP(MAX_HEIGHT, 16))
+
+/*
+ * NV12:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-Y/UV_Stride (aligned to 128)->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | y_scanlines (aligned to 32)
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . uv_scanlines (aligned to 16)
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size aligned to 4K
+ *
+ * y_stride : Width aligned to 128
+ * uv_stride : Width aligned to 128
+ * y_scanlines: Height aligned to 32
+ * uv_scanlines: Height/2 aligned to 16
+ * Total size = align((y_stride * y_scanlines
+ * + uv_stride * uv_scanlines , 4096)
+ *
+ * Note: All the alignments are hardware requirements.
+ */
+static u32 iris_yuv_buffer_size_nv12(struct iris_inst *inst)
+{
+ u32 y_plane, uv_plane, y_stride, uv_stride, y_scanlines, uv_scanlines;
+ struct v4l2_format *f = inst->fmt_dst;
+
+ y_stride = ALIGN(f->fmt.pix_mp.width, Y_STRIDE_ALIGN);
+ uv_stride = ALIGN(f->fmt.pix_mp.width, UV_STRIDE_ALIGN);
+ y_scanlines = ALIGN(f->fmt.pix_mp.height, Y_SCANLINE_ALIGN);
+ uv_scanlines = ALIGN((f->fmt.pix_mp.height + 1) >> 1, UV_SCANLINE_ALIGN);
+ y_plane = y_stride * y_scanlines;
+ uv_plane = uv_stride * uv_scanlines;
+
+ return ALIGN(y_plane + uv_plane, PIXELS_4K);
+}
+
+/*
+ * QC08C:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- y_meta_stride ----> (aligned to 64)
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | y_meta_scanlines (aligned to 16)
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile y_stride---> (aligned to 128)
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile y_scanlines (aligned to 32)
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- uv_meta_stride ----> (aligned to 64)
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . uv_meta_scanlines (aligned to 16)
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile uv_stride---> (aligned to 128)
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . uv_scanlines (aligned to 32)
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * y_stride: width aligned to 128
+ * uv_stride: width aligned to 128
+ * y_scanlines: height aligned to 32
+ * uv_scanlines: height aligned to 32
+ * y_plane: buffer size aligned to 4096
+ * uv_plane: buffer size aligned to 4096
+ * y_meta_stride: width aligned to 64
+ * y_meta_scanlines: height aligned to 16
+ * y_meta_plane: buffer size aligned to 4096
+ * uv_meta_stride: width aligned to 64
+ * uv_meta_scanlines: height aligned to 16
+ * uv_meta_plane: buffer size aligned to 4096
+ *
+ * Total size = align( y_plane + uv_plane +
+ * y_meta_plane + uv_meta_plane, 4096)
+ *
+ * Note: All the alignments are hardware requirements.
+ */
+static u32 iris_yuv_buffer_size_qc08c(struct iris_inst *inst)
+{
+ u32 y_plane, uv_plane, y_stride, uv_stride;
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 uv_meta_stride, uv_meta_plane;
+ u32 y_meta_stride, y_meta_plane;
+
+ y_meta_stride = ALIGN(DIV_ROUND_UP(f->fmt.pix_mp.width, META_STRIDE_ALIGNED >> 1),
+ META_STRIDE_ALIGNED);
+ y_meta_plane = y_meta_stride * ALIGN(DIV_ROUND_UP(f->fmt.pix_mp.height,
+ META_SCANLINE_ALIGNED >> 1),
+ META_SCANLINE_ALIGNED);
+ y_meta_plane = ALIGN(y_meta_plane, PIXELS_4K);
+
+ y_stride = ALIGN(f->fmt.pix_mp.width, Y_STRIDE_ALIGN);
+ y_plane = ALIGN(y_stride * ALIGN(f->fmt.pix_mp.height, Y_SCANLINE_ALIGN), PIXELS_4K);
+
+ uv_meta_stride = ALIGN(DIV_ROUND_UP(f->fmt.pix_mp.width / 2, META_STRIDE_ALIGNED >> 2),
+ META_STRIDE_ALIGNED);
+ uv_meta_plane = uv_meta_stride * ALIGN(DIV_ROUND_UP(f->fmt.pix_mp.height / 2,
+ META_SCANLINE_ALIGNED >> 1),
+ META_SCANLINE_ALIGNED);
+ uv_meta_plane = ALIGN(uv_meta_plane, PIXELS_4K);
+
+ uv_stride = ALIGN(f->fmt.pix_mp.width, UV_STRIDE_ALIGN);
+ uv_plane = ALIGN(uv_stride * ALIGN(f->fmt.pix_mp.height / 2, UV_SCANLINE_ALIGN_QC08C),
+ PIXELS_4K);
+
+ return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane, PIXELS_4K);
+}
+
+static u32 iris_bitstream_buffer_size(struct iris_inst *inst)
+{
+ struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
+ u32 base_res_mbs = NUM_MBS_4K;
+ u32 frame_size, num_mbs;
+ u32 div_factor = 2;
+
+ num_mbs = iris_get_mbpf(inst);
+ if (num_mbs > NUM_MBS_4K) {
+ div_factor = 4;
+ base_res_mbs = caps->max_mbpf;
+ }
+
+ /*
+ * frame_size = YUVsize / div_factor
+ * where YUVsize = resolution_in_MBs * MBs_in_pixel * 3 / 2
+ */
+ frame_size = base_res_mbs * (16 * 16) * 3 / 2 / div_factor;
+
+ return ALIGN(frame_size, PIXELS_4K);
+}
+
+int iris_get_buffer_size(struct iris_inst *inst,
+ enum iris_buffer_type buffer_type)
+{
+ switch (buffer_type) {
+ case BUF_INPUT:
+ return iris_bitstream_buffer_size(inst);
+ case BUF_OUTPUT:
+ return iris_yuv_buffer_size_nv12(inst);
+ case BUF_DPB:
+ return iris_yuv_buffer_size_qc08c(inst);
+ default:
+ return 0;
+ }
+}
+
+static void iris_fill_internal_buf_info(struct iris_inst *inst,
+ enum iris_buffer_type buffer_type)
+{
+ struct iris_buffers *buffers = &inst->buffers[buffer_type];
+
+ buffers->size = iris_vpu_buf_size(inst, buffer_type);
+ buffers->min_count = iris_vpu_buf_count(inst, buffer_type);
+}
+
+void iris_get_internal_buffers(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
+ const u32 *internal_buf_type;
+ u32 internal_buffer_count, i;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ for (i = 0; i < internal_buffer_count; i++)
+ iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ for (i = 0; i < internal_buffer_count; i++)
+ iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ }
+}
+
+static int iris_create_internal_buffer(struct iris_inst *inst,
+ enum iris_buffer_type buffer_type, u32 index)
+{
+ struct iris_buffers *buffers = &inst->buffers[buffer_type];
+ struct iris_core *core = inst->core;
+ struct iris_buffer *buffer;
+
+ if (!buffers->size)
+ return 0;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&buffer->list);
+ buffer->type = buffer_type;
+ buffer->index = index;
+ buffer->buffer_size = buffers->size;
+ buffer->dma_attrs = DMA_ATTR_WRITE_COMBINE | DMA_ATTR_NO_KERNEL_MAPPING;
+ list_add_tail(&buffer->list, &buffers->list);
+
+ buffer->kvaddr = dma_alloc_attrs(core->dev, buffer->buffer_size,
+ &buffer->device_addr, GFP_KERNEL, buffer->dma_attrs);
+ if (!buffer->kvaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int iris_create_internal_buffers(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
+ u32 internal_buffer_count, i, j;
+ struct iris_buffers *buffers;
+ const u32 *internal_buf_type;
+ int ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ }
+
+ for (i = 0; i < internal_buffer_count; i++) {
+ buffers = &inst->buffers[internal_buf_type[i]];
+ for (j = 0; j < buffers->min_count; j++) {
+ ret = iris_create_internal_buffer(inst, internal_buf_type[i], j);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int iris_queue_buffer(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ int ret;
+
+ ret = hfi_ops->session_queue_buf(inst, buf);
+ if (ret)
+ return ret;
+
+ buf->attr &= ~BUF_ATTR_DEFERRED;
+ buf->attr |= BUF_ATTR_QUEUED;
+
+ return 0;
+}
+
+int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
+ struct iris_buffer *buffer, *next;
+ struct iris_buffers *buffers;
+ const u32 *internal_buf_type;
+ u32 internal_buffer_count, i;
+ int ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ }
+
+ for (i = 0; i < internal_buffer_count; i++) {
+ buffers = &inst->buffers[internal_buf_type[i]];
+ list_for_each_entry_safe(buffer, next, &buffers->list, list) {
+ if (buffer->attr & BUF_ATTR_PENDING_RELEASE)
+ continue;
+ if (buffer->attr & BUF_ATTR_QUEUED)
+ continue;
+ ret = iris_queue_buffer(inst, buffer);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int iris_destroy_internal_buffer(struct iris_inst *inst, struct iris_buffer *buffer)
+{
+ struct iris_core *core = inst->core;
+
+ list_del(&buffer->list);
+ dma_free_attrs(core->dev, buffer->buffer_size, buffer->kvaddr,
+ buffer->device_addr, buffer->dma_attrs);
+ kfree(buffer);
+
+ return 0;
+}
+
+int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
+ struct iris_buffer *buf, *next;
+ struct iris_buffers *buffers;
+ const u32 *internal_buf_type;
+ u32 i, len;
+ int ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ len = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ len = platform_data->dec_op_int_buf_tbl_size;
+ }
+
+ for (i = 0; i < len; i++) {
+ buffers = &inst->buffers[internal_buf_type[i]];
+ list_for_each_entry_safe(buf, next, &buffers->list, list) {
+ ret = iris_destroy_internal_buffer(inst, buf);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int iris_release_internal_buffers(struct iris_inst *inst,
+ enum iris_buffer_type buffer_type)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ struct iris_buffers *buffers = &inst->buffers[buffer_type];
+ struct iris_buffer *buffer, *next;
+ int ret;
+
+ list_for_each_entry_safe(buffer, next, &buffers->list, list) {
+ if (buffer->attr & BUF_ATTR_PENDING_RELEASE)
+ continue;
+ if (!(buffer->attr & BUF_ATTR_QUEUED))
+ continue;
+ ret = hfi_ops->session_release_buf(inst, buffer);
+ if (ret)
+ return ret;
+ buffer->attr |= BUF_ATTR_PENDING_RELEASE;
+ }
+
+ return 0;
+}
+
+static int iris_release_input_internal_buffers(struct iris_inst *inst)
+{
+ const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
+ const u32 *internal_buf_type;
+ u32 internal_buffer_count, i;
+ int ret;
+
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+
+ for (i = 0; i < internal_buffer_count; i++) {
+ ret = iris_release_internal_buffers(inst, internal_buf_type[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst)
+{
+ struct iris_buffers *buffers = &inst->buffers[BUF_PERSIST];
+ struct iris_buffer *buffer, *next;
+ int ret;
+ u32 i;
+
+ if (!list_empty(&buffers->list))
+ return 0;
+
+ iris_fill_internal_buf_info(inst, BUF_PERSIST);
+
+ for (i = 0; i < buffers->min_count; i++) {
+ ret = iris_create_internal_buffer(inst, BUF_PERSIST, i);
+ if (ret)
+ return ret;
+ }
+
+ list_for_each_entry_safe(buffer, next, &buffers->list, list) {
+ if (buffer->attr & BUF_ATTR_PENDING_RELEASE)
+ continue;
+ if (buffer->attr & BUF_ATTR_QUEUED)
+ continue;
+ ret = iris_queue_buffer(inst, buffer);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int iris_alloc_and_queue_input_int_bufs(struct iris_inst *inst)
+{
+ int ret;
+
+ iris_get_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ ret = iris_release_input_internal_buffers(inst);
+ if (ret)
+ return ret;
+
+ ret = iris_create_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ return iris_queue_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+}
+
+int iris_queue_deferred_buffers(struct iris_inst *inst, enum iris_buffer_type buf_type)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buffer, *n;
+ struct iris_buffer *buf;
+ int ret;
+
+ iris_scale_power(inst);
+
+ if (buf_type == BUF_INPUT) {
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (!(buf->attr & BUF_ATTR_DEFERRED))
+ continue;
+ ret = iris_queue_buffer(inst, buf);
+ if (ret)
+ return ret;
+ }
+ } else {
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (!(buf->attr & BUF_ATTR_DEFERRED))
+ continue;
+ ret = iris_queue_buffer(inst, buf);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void iris_vb2_queue_error(struct iris_inst *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct vb2_queue *q;
+
+ q = v4l2_m2m_get_src_vq(m2m_ctx);
+ vb2_queue_error(q);
+ q = v4l2_m2m_get_dst_vq(m2m_ctx);
+ vb2_queue_error(q);
+}
+
+static struct vb2_v4l2_buffer *
+iris_helper_find_buf(struct iris_inst *inst, u32 type, u32 idx)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx);
+ else
+ return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx);
+}
+
+static void iris_get_ts_metadata(struct iris_inst *inst, u64 timestamp_ns,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(inst->tss); ++i) {
+ if (inst->tss[i].ts_ns != timestamp_ns)
+ continue;
+
+ vbuf->flags &= ~mask;
+ vbuf->flags |= inst->tss[i].flags;
+ vbuf->timecode = inst->tss[i].tc;
+ return;
+ }
+
+ vbuf->flags &= ~mask;
+ vbuf->flags |= inst->tss[inst->metadata_idx].flags;
+ vbuf->timecode = inst->tss[inst->metadata_idx].tc;
+}
+
+int iris_vb2_buffer_done(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct vb2_v4l2_buffer *vbuf;
+ struct vb2_buffer *vb2;
+ u32 type, state;
+
+ switch (buf->type) {
+ case BUF_INPUT:
+ type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ break;
+ case BUF_OUTPUT:
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ break;
+ default:
+ return 0; /* Internal DPB Buffers */
+ }
+
+ vbuf = iris_helper_find_buf(inst, type, buf->index);
+ if (!vbuf)
+ return -EINVAL;
+
+ vb2 = &vbuf->vb2_buf;
+
+ if (buf->flags & V4L2_BUF_FLAG_ERROR)
+ state = VB2_BUF_STATE_ERROR;
+ else
+ state = VB2_BUF_STATE_DONE;
+
+ vbuf->flags |= buf->flags;
+
+ if (V4L2_TYPE_IS_CAPTURE(type)) {
+ vb2_set_plane_payload(vb2, 0, buf->data_size);
+ vbuf->sequence = inst->sequence_cap++;
+ iris_get_ts_metadata(inst, buf->timestamp, vbuf);
+ } else {
+ vbuf->sequence = inst->sequence_out++;
+ }
+
+ if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
+ if (!v4l2_m2m_has_stopped(m2m_ctx)) {
+ const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
+
+ v4l2_event_queue_fh(&inst->fh, &ev);
+ v4l2_m2m_mark_stopped(m2m_ctx);
+ }
+ }
+ vb2->timestamp = buf->timestamp;
+ v4l2_m2m_buf_done(vbuf, state);
+
+ return 0;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_buffer.h b/drivers/media/platform/qcom/iris/iris_buffer.h
new file mode 100644
index 000000000000..c36b6347b077
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_buffer.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_BUFFER_H__
+#define __IRIS_BUFFER_H__
+
+#include <media/videobuf2-v4l2.h>
+
+struct iris_inst;
+
+#define to_iris_buffer(ptr) container_of(ptr, struct iris_buffer, vb2)
+
+/**
+ * enum iris_buffer_type
+ *
+ * @BUF_INPUT: input buffer to the iris hardware
+ * @BUF_OUTPUT: output buffer from the iris hardware
+ * @BUF_BIN: buffer to store intermediate bin data
+ * @BUF_ARP: buffer for auto register programming
+ * @BUF_COMV: buffer to store colocated motion vectors
+ * @BUF_NON_COMV: buffer to hold config data for HW
+ * @BUF_LINE: buffer to store decoding/encoding context data for HW
+ * @BUF_DPB: buffer to store display picture buffers for reference
+ * @BUF_PERSIST: buffer to store session context data
+ * @BUF_SCRATCH_1: buffer to store decoding/encoding context data for HW
+ * @BUF_TYPE_MAX: max buffer types
+ */
+enum iris_buffer_type {
+ BUF_INPUT = 1,
+ BUF_OUTPUT,
+ BUF_BIN,
+ BUF_ARP,
+ BUF_COMV,
+ BUF_NON_COMV,
+ BUF_LINE,
+ BUF_DPB,
+ BUF_PERSIST,
+ BUF_SCRATCH_1,
+ BUF_TYPE_MAX,
+};
+
+/*
+ * enum iris_buffer_attributes
+ *
+ * BUF_ATTR_DEFERRED: buffer queued by client but not submitted to firmware.
+ * BUF_ATTR_PENDING_RELEASE: buffers requested to be released from firmware.
+ * BUF_ATTR_QUEUED: buffers submitted to firmware.
+ * BUF_ATTR_DEQUEUED: buffers received from firmware.
+ * BUF_ATTR_BUFFER_DONE: buffers sent back to vb2.
+ */
+enum iris_buffer_attributes {
+ BUF_ATTR_DEFERRED = BIT(0),
+ BUF_ATTR_PENDING_RELEASE = BIT(1),
+ BUF_ATTR_QUEUED = BIT(2),
+ BUF_ATTR_DEQUEUED = BIT(3),
+ BUF_ATTR_BUFFER_DONE = BIT(4),
+};
+
+/**
+ * struct iris_buffer
+ *
+ * @vb2: v4l2 vb2 buffer
+ * @list: list head for the iris_buffers structure
+ * @inst: iris instance structure
+ * @type: enum for type of iris buffer
+ * @index: identifier for the iris buffer
+ * @fd: file descriptor of the buffer
+ * @buffer_size: accessible buffer size in bytes starting from addr_offset
+ * @data_offset: accessible buffer offset from base address
+ * @data_size: data size in bytes
+ * @device_addr: device address of the buffer
+ * @kvaddr: kernel virtual address of the buffer
+ * @dma_attrs: dma attributes
+ * @flags: buffer flags. It is represented as bit masks.
+ * @timestamp: timestamp of the buffer in nano seconds (ns)
+ * @attr: enum for iris buffer attributes
+ */
+struct iris_buffer {
+ struct vb2_v4l2_buffer vb2;
+ struct list_head list;
+ struct iris_inst *inst;
+ enum iris_buffer_type type;
+ u32 index;
+ int fd;
+ size_t buffer_size;
+ u32 data_offset;
+ size_t data_size;
+ dma_addr_t device_addr;
+ void *kvaddr;
+ unsigned long dma_attrs;
+ u32 flags; /* V4L2_BUF_FLAG_* */
+ u64 timestamp;
+ enum iris_buffer_attributes attr;
+};
+
+struct iris_buffers {
+ struct list_head list;
+ u32 min_count;
+ u32 size;
+};
+
+int iris_get_buffer_size(struct iris_inst *inst, enum iris_buffer_type buffer_type);
+void iris_get_internal_buffers(struct iris_inst *inst, u32 plane);
+int iris_create_internal_buffers(struct iris_inst *inst, u32 plane);
+int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane);
+int iris_destroy_internal_buffer(struct iris_inst *inst, struct iris_buffer *buffer);
+int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane);
+int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst);
+int iris_alloc_and_queue_input_int_bufs(struct iris_inst *inst);
+int iris_queue_buffer(struct iris_inst *inst, struct iris_buffer *buf);
+int iris_queue_deferred_buffers(struct iris_inst *inst, enum iris_buffer_type buf_type);
+int iris_vb2_buffer_done(struct iris_inst *inst, struct iris_buffer *buf);
+void iris_vb2_queue_error(struct iris_inst *inst);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_core.c b/drivers/media/platform/qcom/iris/iris_core.c
new file mode 100644
index 000000000000..0fa0a3b549a2
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_core.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "iris_core.h"
+#include "iris_firmware.h"
+#include "iris_state.h"
+#include "iris_vpu_common.h"
+
+void iris_core_deinit(struct iris_core *core)
+{
+ pm_runtime_resume_and_get(core->dev);
+
+ mutex_lock(&core->lock);
+ iris_fw_unload(core);
+ iris_vpu_power_off(core);
+ iris_hfi_queues_deinit(core);
+ core->state = IRIS_CORE_DEINIT;
+ mutex_unlock(&core->lock);
+
+ pm_runtime_put_sync(core->dev);
+}
+
+static int iris_wait_for_system_response(struct iris_core *core)
+{
+ u32 hw_response_timeout_val = core->iris_platform_data->hw_response_timeout;
+ int ret;
+
+ if (core->state == IRIS_CORE_ERROR)
+ return -EIO;
+
+ ret = wait_for_completion_timeout(&core->core_init_done,
+ msecs_to_jiffies(hw_response_timeout_val));
+ if (!ret) {
+ core->state = IRIS_CORE_ERROR;
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int iris_core_init(struct iris_core *core)
+{
+ int ret;
+
+ mutex_lock(&core->lock);
+ if (core->state == IRIS_CORE_INIT) {
+ ret = 0;
+ goto exit;
+ } else if (core->state == IRIS_CORE_ERROR) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ core->state = IRIS_CORE_INIT;
+
+ ret = iris_hfi_queues_init(core);
+ if (ret)
+ goto error;
+
+ ret = iris_vpu_power_on(core);
+ if (ret)
+ goto error_queue_deinit;
+
+ ret = iris_fw_load(core);
+ if (ret)
+ goto error_power_off;
+
+ ret = iris_vpu_boot_firmware(core);
+ if (ret)
+ goto error_unload_fw;
+
+ ret = iris_hfi_core_init(core);
+ if (ret)
+ goto error_unload_fw;
+
+ mutex_unlock(&core->lock);
+
+ return iris_wait_for_system_response(core);
+
+error_unload_fw:
+ iris_fw_unload(core);
+error_power_off:
+ iris_vpu_power_off(core);
+error_queue_deinit:
+ iris_hfi_queues_deinit(core);
+error:
+ core->state = IRIS_CORE_DEINIT;
+exit:
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_core.h b/drivers/media/platform/qcom/iris/iris_core.h
new file mode 100644
index 000000000000..37fb4919fecc
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_core.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_CORE_H__
+#define __IRIS_CORE_H__
+
+#include <linux/types.h>
+#include <linux/pm_domain.h>
+#include <media/v4l2-device.h>
+
+#include "iris_hfi_common.h"
+#include "iris_hfi_queue.h"
+#include "iris_platform_common.h"
+#include "iris_resources.h"
+#include "iris_state.h"
+
+struct icc_info {
+ const char *name;
+ u32 bw_min_kbps;
+ u32 bw_max_kbps;
+};
+
+#define IRIS_FW_VERSION_LENGTH 128
+#define IFACEQ_CORE_PKT_SIZE (1024 * 4)
+
+/**
+ * struct iris_core - holds core parameters valid for all instances
+ *
+ * @dev: reference to device structure
+ * @reg_base: IO memory base address
+ * @irq: iris irq
+ * @v4l2_dev: a holder for v4l2 device structure
+ * @vdev_dec: iris video device structure for decoder
+ * @iris_v4l2_file_ops: iris v4l2 file ops
+ * @iris_v4l2_ioctl_ops: iris v4l2 ioctl ops
+ * @iris_vb2_ops: iris vb2 ops
+ * @icc_tbl: table of iris interconnects
+ * @icc_count: count of iris interconnects
+ * @pmdomain_tbl: table of iris power domains
+ * @opp_pmdomain_tbl: table of opp power domains
+ * @clock_tbl: table of iris clocks
+ * @clk_count: count of iris clocks
+ * @resets: table of iris reset clocks
+ * @iris_platform_data: a structure for platform data
+ * @state: current state of core
+ * @iface_q_table_daddr: device address for interface queue table memory
+ * @sfr_daddr: device address for SFR (Sub System Failure Reason) register memory
+ * @iface_q_table_vaddr: virtual address for interface queue table memory
+ * @sfr_vaddr: virtual address for SFR (Sub System Failure Reason) register memory
+ * @command_queue: shared interface queue to send commands to firmware
+ * @message_queue: shared interface queue to receive responses from firmware
+ * @debug_queue: shared interface queue to receive debug info from firmware
+ * @lock: a lock for this strucure
+ * @response_packet: a pointer to response packet from fw to driver
+ * @header_id: id of packet header
+ * @packet_id: id of packet
+ * @power: a structure for clock and bw information
+ * @hfi_ops: iris hfi command ops
+ * @hfi_response_ops: iris hfi response ops
+ * @core_init_done: structure of signal completion for system response
+ * @intr_status: interrupt status
+ * @sys_error_handler: a delayed work for handling system fatal error
+ * @instances: a list_head of all instances
+ * @inst_fw_caps: an array of supported instance capabilities
+ */
+
+struct iris_core {
+ struct device *dev;
+ void __iomem *reg_base;
+ int irq;
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev_dec;
+ const struct v4l2_file_operations *iris_v4l2_file_ops;
+ const struct v4l2_ioctl_ops *iris_v4l2_ioctl_ops;
+ const struct vb2_ops *iris_vb2_ops;
+ struct icc_bulk_data *icc_tbl;
+ u32 icc_count;
+ struct dev_pm_domain_list *pmdomain_tbl;
+ struct dev_pm_domain_list *opp_pmdomain_tbl;
+ struct clk_bulk_data *clock_tbl;
+ u32 clk_count;
+ struct reset_control_bulk_data *resets;
+ const struct iris_platform_data *iris_platform_data;
+ enum iris_core_state state;
+ dma_addr_t iface_q_table_daddr;
+ dma_addr_t sfr_daddr;
+ void *iface_q_table_vaddr;
+ void *sfr_vaddr;
+ struct iris_iface_q_info command_queue;
+ struct iris_iface_q_info message_queue;
+ struct iris_iface_q_info debug_queue;
+ struct mutex lock; /* lock for core related operations */
+ u8 *response_packet;
+ u32 header_id;
+ u32 packet_id;
+ struct iris_core_power power;
+ const struct iris_hfi_command_ops *hfi_ops;
+ const struct iris_hfi_response_ops *hfi_response_ops;
+ struct completion core_init_done;
+ u32 intr_status;
+ struct delayed_work sys_error_handler;
+ struct list_head instances;
+ struct platform_inst_fw_cap inst_fw_caps[INST_FW_CAP_MAX];
+};
+
+int iris_core_init(struct iris_core *core);
+void iris_core_deinit(struct iris_core *core);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_ctrls.c b/drivers/media/platform/qcom/iris/iris_ctrls.c
new file mode 100644
index 000000000000..b690578256d5
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_ctrls.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_ctrls.h"
+#include "iris_instance.h"
+
+static inline bool iris_valid_cap_id(enum platform_inst_fw_cap_type cap_id)
+{
+ return cap_id >= 1 && cap_id < INST_FW_CAP_MAX;
+}
+
+static enum platform_inst_fw_cap_type iris_get_cap_id(u32 id)
+{
+ switch (id) {
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ return DEBLOCK;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ return PROFILE;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ return LEVEL;
+ default:
+ return INST_FW_CAP_MAX;
+ }
+}
+
+static u32 iris_get_v4l2_id(enum platform_inst_fw_cap_type cap_id)
+{
+ if (!iris_valid_cap_id(cap_id))
+ return 0;
+
+ switch (cap_id) {
+ case DEBLOCK:
+ return V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER;
+ case PROFILE:
+ return V4L2_CID_MPEG_VIDEO_H264_PROFILE;
+ case LEVEL:
+ return V4L2_CID_MPEG_VIDEO_H264_LEVEL;
+ default:
+ return 0;
+ }
+}
+
+static int iris_vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct iris_inst *inst = container_of(ctrl->handler, struct iris_inst, ctrl_handler);
+ enum platform_inst_fw_cap_type cap_id;
+ struct platform_inst_fw_cap *cap;
+ struct vb2_queue *q;
+
+ cap = &inst->fw_caps[0];
+ cap_id = iris_get_cap_id(ctrl->id);
+ if (!iris_valid_cap_id(cap_id))
+ return -EINVAL;
+
+ q = v4l2_m2m_get_src_vq(inst->m2m_ctx);
+ if (vb2_is_streaming(q) &&
+ (!(inst->fw_caps[cap_id].flags & CAP_FLAG_DYNAMIC_ALLOWED)))
+ return -EINVAL;
+
+ cap[cap_id].flags |= CAP_FLAG_CLIENT_SET;
+
+ inst->fw_caps[cap_id].value = ctrl->val;
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops iris_ctrl_ops = {
+ .s_ctrl = iris_vdec_op_s_ctrl,
+};
+
+int iris_ctrls_init(struct iris_inst *inst)
+{
+ struct platform_inst_fw_cap *cap = &inst->fw_caps[0];
+ u32 num_ctrls = 0, ctrl_idx = 0, idx = 0;
+ u32 v4l2_id;
+ int ret;
+
+ for (idx = 1; idx < INST_FW_CAP_MAX; idx++) {
+ if (iris_get_v4l2_id(cap[idx].cap_id))
+ num_ctrls++;
+ }
+ if (!num_ctrls)
+ return -EINVAL;
+
+ /* Adding 1 to num_ctrls to include V4L2_CID_MIN_BUFFERS_FOR_CAPTURE */
+
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, num_ctrls + 1);
+ if (ret)
+ return ret;
+
+ for (idx = 1; idx < INST_FW_CAP_MAX; idx++) {
+ struct v4l2_ctrl *ctrl;
+
+ v4l2_id = iris_get_v4l2_id(cap[idx].cap_id);
+ if (!v4l2_id)
+ continue;
+
+ if (ctrl_idx >= num_ctrls) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (cap[idx].flags & CAP_FLAG_MENU) {
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler,
+ &iris_ctrl_ops,
+ v4l2_id,
+ cap[idx].max,
+ ~(cap[idx].step_or_mask),
+ cap[idx].value);
+ } else {
+ ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler,
+ &iris_ctrl_ops,
+ v4l2_id,
+ cap[idx].min,
+ cap[idx].max,
+ cap[idx].step_or_mask,
+ cap[idx].value);
+ }
+ if (!ctrl) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ctrl_idx++;
+ }
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, NULL,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 4);
+
+ ret = inst->ctrl_handler.error;
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+
+ return ret;
+}
+
+void iris_session_init_caps(struct iris_core *core)
+{
+ struct platform_inst_fw_cap *caps;
+ u32 i, num_cap, cap_id;
+
+ caps = core->iris_platform_data->inst_fw_caps;
+ num_cap = core->iris_platform_data->inst_fw_caps_size;
+
+ for (i = 0; i < num_cap; i++) {
+ cap_id = caps[i].cap_id;
+ if (!iris_valid_cap_id(cap_id))
+ continue;
+
+ core->inst_fw_caps[cap_id].cap_id = caps[i].cap_id;
+ core->inst_fw_caps[cap_id].min = caps[i].min;
+ core->inst_fw_caps[cap_id].max = caps[i].max;
+ core->inst_fw_caps[cap_id].step_or_mask = caps[i].step_or_mask;
+ core->inst_fw_caps[cap_id].value = caps[i].value;
+ core->inst_fw_caps[cap_id].flags = caps[i].flags;
+ core->inst_fw_caps[cap_id].hfi_id = caps[i].hfi_id;
+ }
+}
+
+static u32 iris_get_port_info(struct iris_inst *inst,
+ enum platform_inst_fw_cap_type cap_id)
+{
+ if (inst->fw_caps[cap_id].flags & CAP_FLAG_INPUT_PORT)
+ return HFI_PORT_BITSTREAM;
+ else if (inst->fw_caps[cap_id].flags & CAP_FLAG_OUTPUT_PORT)
+ return HFI_PORT_RAW;
+
+ return HFI_PORT_NONE;
+}
+
+int iris_set_u32_enum(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 hfi_value = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &hfi_value, sizeof(u32));
+}
+
+int iris_set_u32(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 hfi_value = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &hfi_value, sizeof(u32));
+}
+
+int iris_set_stage(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ struct v4l2_format *inp_f = inst->fmt_src;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 height = inp_f->fmt.pix_mp.height;
+ u32 width = inp_f->fmt.pix_mp.width;
+ u32 work_mode = STAGE_2;
+
+ if (iris_res_is_less_than(width, height, 1280, 720))
+ work_mode = STAGE_1;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &work_mode, sizeof(u32));
+}
+
+int iris_set_pipe(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 work_route = inst->fw_caps[PIPE].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &work_route, sizeof(u32));
+}
+
+int iris_set_properties(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ struct platform_inst_fw_cap *cap;
+ int ret;
+ u32 i;
+
+ ret = hfi_ops->session_set_config_params(inst, plane);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < INST_FW_CAP_MAX; i++) {
+ cap = &inst->fw_caps[i];
+ if (!iris_valid_cap_id(cap->cap_id))
+ continue;
+
+ if (cap->cap_id && cap->set)
+ cap->set(inst, i);
+ }
+
+ return 0;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_ctrls.h b/drivers/media/platform/qcom/iris/iris_ctrls.h
new file mode 100644
index 000000000000..9b5741868933
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_ctrls.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_CTRLS_H__
+#define __IRIS_CTRLS_H__
+
+#include "iris_platform_common.h"
+
+struct iris_core;
+struct iris_inst;
+
+int iris_ctrls_init(struct iris_inst *inst);
+void iris_session_init_caps(struct iris_core *core);
+int iris_set_u32_enum(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_stage(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_pipe(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_u32(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_properties(struct iris_inst *inst, u32 plane);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_firmware.c b/drivers/media/platform/qcom/iris/iris_firmware.c
new file mode 100644
index 000000000000..7c493b4a75db
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_firmware.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/firmware.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/soc/qcom/mdt_loader.h>
+
+#include "iris_core.h"
+#include "iris_firmware.h"
+
+#define MAX_FIRMWARE_NAME_SIZE 128
+
+static int iris_load_fw_to_memory(struct iris_core *core, const char *fw_name)
+{
+ u32 pas_id = core->iris_platform_data->pas_id;
+ const struct firmware *firmware = NULL;
+ struct device *dev = core->dev;
+ struct reserved_mem *rmem;
+ struct device_node *node;
+ phys_addr_t mem_phys;
+ size_t res_size;
+ ssize_t fw_size;
+ void *mem_virt;
+ int ret;
+
+ if (strlen(fw_name) >= MAX_FIRMWARE_NAME_SIZE - 4)
+ return -EINVAL;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node)
+ return -EINVAL;
+
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+ if (!rmem)
+ return -EINVAL;
+
+ mem_phys = rmem->base;
+ res_size = rmem->size;
+
+ ret = request_firmware(&firmware, fw_name, dev);
+ if (ret)
+ return ret;
+
+ fw_size = qcom_mdt_get_size(firmware);
+ if (fw_size < 0 || res_size < (size_t)fw_size) {
+ ret = -EINVAL;
+ goto err_release_fw;
+ }
+
+ mem_virt = memremap(mem_phys, res_size, MEMREMAP_WC);
+ if (!mem_virt)
+ goto err_release_fw;
+
+ ret = qcom_mdt_load(dev, firmware, fw_name,
+ pas_id, mem_virt, mem_phys, res_size, NULL);
+ if (ret)
+ goto err_mem_unmap;
+
+ ret = qcom_scm_pas_auth_and_reset(pas_id);
+ if (ret)
+ goto err_mem_unmap;
+
+ return ret;
+
+err_mem_unmap:
+ memunmap(mem_virt);
+err_release_fw:
+ release_firmware(firmware);
+
+ return ret;
+}
+
+int iris_fw_load(struct iris_core *core)
+{
+ struct tz_cp_config *cp_config = core->iris_platform_data->tz_cp_config_data;
+ const char *fwpath = NULL;
+ int ret;
+
+ ret = of_property_read_string_index(core->dev->of_node, "firmware-name", 0,
+ &fwpath);
+ if (ret)
+ fwpath = core->iris_platform_data->fwname;
+
+ ret = iris_load_fw_to_memory(core, fwpath);
+ if (ret) {
+ dev_err(core->dev, "firmware download failed\n");
+ return -ENOMEM;
+ }
+
+ ret = qcom_scm_mem_protect_video_var(cp_config->cp_start,
+ cp_config->cp_size,
+ cp_config->cp_nonpixel_start,
+ cp_config->cp_nonpixel_size);
+ if (ret) {
+ dev_err(core->dev, "protect memory failed\n");
+ qcom_scm_pas_shutdown(core->iris_platform_data->pas_id);
+ return ret;
+ }
+
+ return ret;
+}
+
+int iris_fw_unload(struct iris_core *core)
+{
+ return qcom_scm_pas_shutdown(core->iris_platform_data->pas_id);
+}
+
+int iris_set_hw_state(struct iris_core *core, bool resume)
+{
+ return qcom_scm_set_remote_state(resume, 0);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_firmware.h b/drivers/media/platform/qcom/iris/iris_firmware.h
new file mode 100644
index 000000000000..e833ecd34887
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_firmware.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_FIRMWARE_H__
+#define __IRIS_FIRMWARE_H__
+
+struct iris_core;
+
+int iris_fw_load(struct iris_core *core);
+int iris_fw_unload(struct iris_core *core);
+int iris_set_hw_state(struct iris_core *core, bool resume);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_common.c b/drivers/media/platform/qcom/iris/iris_hfi_common.c
new file mode 100644
index 000000000000..92112eb16c11
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_common.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "iris_firmware.h"
+#include "iris_core.h"
+#include "iris_hfi_common.h"
+#include "iris_vpu_common.h"
+
+u32 iris_hfi_get_v4l2_color_primaries(u32 hfi_primaries)
+{
+ switch (hfi_primaries) {
+ case HFI_PRIMARIES_RESERVED:
+ return V4L2_COLORSPACE_DEFAULT;
+ case HFI_PRIMARIES_BT709:
+ return V4L2_COLORSPACE_REC709;
+ case HFI_PRIMARIES_BT470_SYSTEM_M:
+ return V4L2_COLORSPACE_470_SYSTEM_M;
+ case HFI_PRIMARIES_BT470_SYSTEM_BG:
+ return V4L2_COLORSPACE_470_SYSTEM_BG;
+ case HFI_PRIMARIES_BT601_525:
+ return V4L2_COLORSPACE_SMPTE170M;
+ case HFI_PRIMARIES_SMPTE_ST240M:
+ return V4L2_COLORSPACE_SMPTE240M;
+ case HFI_PRIMARIES_BT2020:
+ return V4L2_COLORSPACE_BT2020;
+ case V4L2_COLORSPACE_DCI_P3:
+ return HFI_PRIMARIES_SMPTE_RP431_2;
+ default:
+ return V4L2_COLORSPACE_DEFAULT;
+ }
+}
+
+u32 iris_hfi_get_v4l2_transfer_char(u32 hfi_characterstics)
+{
+ switch (hfi_characterstics) {
+ case HFI_TRANSFER_RESERVED:
+ return V4L2_XFER_FUNC_DEFAULT;
+ case HFI_TRANSFER_BT709:
+ return V4L2_XFER_FUNC_709;
+ case HFI_TRANSFER_SMPTE_ST240M:
+ return V4L2_XFER_FUNC_SMPTE240M;
+ case HFI_TRANSFER_SRGB_SYCC:
+ return V4L2_XFER_FUNC_SRGB;
+ case HFI_TRANSFER_SMPTE_ST2084_PQ:
+ return V4L2_XFER_FUNC_SMPTE2084;
+ default:
+ return V4L2_XFER_FUNC_DEFAULT;
+ }
+}
+
+u32 iris_hfi_get_v4l2_matrix_coefficients(u32 hfi_coefficients)
+{
+ switch (hfi_coefficients) {
+ case HFI_MATRIX_COEFF_RESERVED:
+ return V4L2_YCBCR_ENC_DEFAULT;
+ case HFI_MATRIX_COEFF_BT709:
+ return V4L2_YCBCR_ENC_709;
+ case HFI_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625:
+ return V4L2_YCBCR_ENC_XV601;
+ case HFI_MATRIX_COEFF_BT601_525_BT1358_525_OR_625:
+ return V4L2_YCBCR_ENC_601;
+ case HFI_MATRIX_COEFF_SMPTE_ST240:
+ return V4L2_YCBCR_ENC_SMPTE240M;
+ case HFI_MATRIX_COEFF_BT2020_NON_CONSTANT:
+ return V4L2_YCBCR_ENC_BT2020;
+ case HFI_MATRIX_COEFF_BT2020_CONSTANT:
+ return V4L2_YCBCR_ENC_BT2020_CONST_LUM;
+ default:
+ return V4L2_YCBCR_ENC_DEFAULT;
+ }
+}
+
+int iris_hfi_core_init(struct iris_core *core)
+{
+ const struct iris_hfi_command_ops *hfi_ops = core->hfi_ops;
+ int ret;
+
+ ret = hfi_ops->sys_init(core);
+ if (ret)
+ return ret;
+
+ ret = hfi_ops->sys_image_version(core);
+ if (ret)
+ return ret;
+
+ return hfi_ops->sys_interframe_powercollapse(core);
+}
+
+irqreturn_t iris_hfi_isr(int irq, void *data)
+{
+ disable_irq_nosync(irq);
+
+ return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t iris_hfi_isr_handler(int irq, void *data)
+{
+ struct iris_core *core = data;
+
+ if (!core)
+ return IRQ_NONE;
+
+ mutex_lock(&core->lock);
+ pm_runtime_mark_last_busy(core->dev);
+ iris_vpu_clear_interrupt(core);
+ mutex_unlock(&core->lock);
+
+ core->hfi_response_ops->hfi_response_handler(core);
+
+ if (!iris_vpu_watchdog(core, core->intr_status))
+ enable_irq(irq);
+
+ return IRQ_HANDLED;
+}
+
+int iris_hfi_pm_suspend(struct iris_core *core)
+{
+ int ret;
+
+ ret = iris_vpu_prepare_pc(core);
+ if (ret) {
+ pm_runtime_mark_last_busy(core->dev);
+ ret = -EAGAIN;
+ goto error;
+ }
+
+ ret = iris_set_hw_state(core, false);
+ if (ret)
+ goto error;
+
+ iris_vpu_power_off(core);
+
+ return 0;
+
+error:
+ dev_err(core->dev, "failed to suspend\n");
+
+ return ret;
+}
+
+int iris_hfi_pm_resume(struct iris_core *core)
+{
+ const struct iris_hfi_command_ops *ops = core->hfi_ops;
+ int ret;
+
+ ret = iris_vpu_power_on(core);
+ if (ret)
+ goto error;
+
+ ret = iris_set_hw_state(core, true);
+ if (ret)
+ goto err_power_off;
+
+ ret = iris_vpu_boot_firmware(core);
+ if (ret)
+ goto err_suspend_hw;
+
+ ret = ops->sys_interframe_powercollapse(core);
+ if (ret)
+ goto err_suspend_hw;
+
+ return 0;
+
+err_suspend_hw:
+ iris_set_hw_state(core, false);
+err_power_off:
+ iris_vpu_power_off(core);
+error:
+ dev_err(core->dev, "failed to resume\n");
+
+ return -EBUSY;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_common.h b/drivers/media/platform/qcom/iris/iris_hfi_common.h
new file mode 100644
index 000000000000..b2c541367fc6
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_common.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_COMMON_H__
+#define __IRIS_HFI_COMMON_H__
+
+#include <linux/types.h>
+#include <media/v4l2-device.h>
+
+#include "iris_buffer.h"
+
+struct iris_inst;
+struct iris_core;
+
+enum hfi_packet_port_type {
+ HFI_PORT_NONE = 0x00000000,
+ HFI_PORT_BITSTREAM = 0x00000001,
+ HFI_PORT_RAW = 0x00000002,
+};
+
+enum hfi_packet_payload_info {
+ HFI_PAYLOAD_NONE = 0x00000000,
+ HFI_PAYLOAD_U32 = 0x00000001,
+ HFI_PAYLOAD_S32 = 0x00000002,
+ HFI_PAYLOAD_U64 = 0x00000003,
+ HFI_PAYLOAD_S64 = 0x00000004,
+ HFI_PAYLOAD_STRUCTURE = 0x00000005,
+ HFI_PAYLOAD_BLOB = 0x00000006,
+ HFI_PAYLOAD_STRING = 0x00000007,
+ HFI_PAYLOAD_Q16 = 0x00000008,
+ HFI_PAYLOAD_U32_ENUM = 0x00000009,
+ HFI_PAYLOAD_32_PACKED = 0x0000000a,
+ HFI_PAYLOAD_U32_ARRAY = 0x0000000b,
+ HFI_PAYLOAD_S32_ARRAY = 0x0000000c,
+ HFI_PAYLOAD_64_PACKED = 0x0000000d,
+};
+
+enum hfi_packet_host_flags {
+ HFI_HOST_FLAGS_NONE = 0x00000000,
+ HFI_HOST_FLAGS_INTR_REQUIRED = 0x00000001,
+ HFI_HOST_FLAGS_RESPONSE_REQUIRED = 0x00000002,
+ HFI_HOST_FLAGS_NON_DISCARDABLE = 0x00000004,
+ HFI_HOST_FLAGS_GET_PROPERTY = 0x00000008,
+};
+
+enum hfi_color_primaries {
+ HFI_PRIMARIES_RESERVED = 0,
+ HFI_PRIMARIES_BT709 = 1,
+ HFI_PRIMARIES_UNSPECIFIED = 2,
+ HFI_PRIMARIES_BT470_SYSTEM_M = 4,
+ HFI_PRIMARIES_BT470_SYSTEM_BG = 5,
+ HFI_PRIMARIES_BT601_525 = 6,
+ HFI_PRIMARIES_SMPTE_ST240M = 7,
+ HFI_PRIMARIES_GENERIC_FILM = 8,
+ HFI_PRIMARIES_BT2020 = 9,
+ HFI_PRIMARIES_SMPTE_ST428_1 = 10,
+ HFI_PRIMARIES_SMPTE_RP431_2 = 11,
+ HFI_PRIMARIES_SMPTE_EG431_1 = 12,
+ HFI_PRIMARIES_SMPTE_EBU_TECH = 22,
+};
+
+enum hfi_transfer_characteristics {
+ HFI_TRANSFER_RESERVED = 0,
+ HFI_TRANSFER_BT709 = 1,
+ HFI_TRANSFER_UNSPECIFIED = 2,
+ HFI_TRANSFER_BT470_SYSTEM_M = 4,
+ HFI_TRANSFER_BT470_SYSTEM_BG = 5,
+ HFI_TRANSFER_BT601_525_OR_625 = 6,
+ HFI_TRANSFER_SMPTE_ST240M = 7,
+ HFI_TRANSFER_LINEAR = 8,
+ HFI_TRANSFER_LOG_100_1 = 9,
+ HFI_TRANSFER_LOG_SQRT = 10,
+ HFI_TRANSFER_XVYCC = 11,
+ HFI_TRANSFER_BT1361_0 = 12,
+ HFI_TRANSFER_SRGB_SYCC = 13,
+ HFI_TRANSFER_BT2020_14 = 14,
+ HFI_TRANSFER_BT2020_15 = 15,
+ HFI_TRANSFER_SMPTE_ST2084_PQ = 16,
+ HFI_TRANSFER_SMPTE_ST428_1 = 17,
+ HFI_TRANSFER_BT2100_2_HLG = 18,
+};
+
+enum hfi_matrix_coefficients {
+ HFI_MATRIX_COEFF_SRGB_SMPTE_ST428_1 = 0,
+ HFI_MATRIX_COEFF_BT709 = 1,
+ HFI_MATRIX_COEFF_UNSPECIFIED = 2,
+ HFI_MATRIX_COEFF_RESERVED = 3,
+ HFI_MATRIX_COEFF_FCC_TITLE_47 = 4,
+ HFI_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625 = 5,
+ HFI_MATRIX_COEFF_BT601_525_BT1358_525_OR_625 = 6,
+ HFI_MATRIX_COEFF_SMPTE_ST240 = 7,
+ HFI_MATRIX_COEFF_YCGCO = 8,
+ HFI_MATRIX_COEFF_BT2020_NON_CONSTANT = 9,
+ HFI_MATRIX_COEFF_BT2020_CONSTANT = 10,
+ HFI_MATRIX_COEFF_SMPTE_ST2085 = 11,
+ HFI_MATRIX_COEFF_SMPTE_CHROM_DERV_NON_CONSTANT = 12,
+ HFI_MATRIX_COEFF_SMPTE_CHROM_DERV_CONSTANT = 13,
+ HFI_MATRIX_COEFF_BT2100 = 14,
+};
+
+struct iris_hfi_prop_type_handle {
+ u32 type;
+ int (*handle)(struct iris_inst *inst);
+};
+
+struct iris_hfi_command_ops {
+ int (*sys_init)(struct iris_core *core);
+ int (*sys_image_version)(struct iris_core *core);
+ int (*sys_interframe_powercollapse)(struct iris_core *core);
+ int (*sys_pc_prep)(struct iris_core *core);
+ int (*session_set_config_params)(struct iris_inst *inst, u32 plane);
+ int (*session_set_property)(struct iris_inst *inst,
+ u32 packet_type, u32 flag, u32 plane, u32 payload_type,
+ void *payload, u32 payload_size);
+ int (*session_open)(struct iris_inst *inst);
+ int (*session_start)(struct iris_inst *inst, u32 plane);
+ int (*session_queue_buf)(struct iris_inst *inst, struct iris_buffer *buffer);
+ int (*session_release_buf)(struct iris_inst *inst, struct iris_buffer *buffer);
+ int (*session_pause)(struct iris_inst *inst, u32 plane);
+ int (*session_resume_drc)(struct iris_inst *inst, u32 plane);
+ int (*session_stop)(struct iris_inst *inst, u32 plane);
+ int (*session_drain)(struct iris_inst *inst, u32 plane);
+ int (*session_resume_drain)(struct iris_inst *inst, u32 plane);
+ int (*session_close)(struct iris_inst *inst);
+};
+
+struct iris_hfi_response_ops {
+ void (*hfi_response_handler)(struct iris_core *core);
+};
+
+struct hfi_subscription_params {
+ u32 bitstream_resolution;
+ u32 crop_offsets[2];
+ u32 bit_depth;
+ u32 coded_frames;
+ u32 fw_min_count;
+ u32 pic_order_cnt;
+ u32 color_info;
+ u32 profile;
+ u32 level;
+};
+
+u32 iris_hfi_get_v4l2_color_primaries(u32 hfi_primaries);
+u32 iris_hfi_get_v4l2_transfer_char(u32 hfi_characterstics);
+u32 iris_hfi_get_v4l2_matrix_coefficients(u32 hfi_coefficients);
+int iris_hfi_core_init(struct iris_core *core);
+int iris_hfi_pm_suspend(struct iris_core *core);
+int iris_hfi_pm_resume(struct iris_core *core);
+
+irqreturn_t iris_hfi_isr(int irq, void *data);
+irqreturn_t iris_hfi_isr_handler(int irq, void *data);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1.h b/drivers/media/platform/qcom/iris/iris_hfi_gen1.h
new file mode 100644
index 000000000000..19b8e9054a75
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_GEN1_H__
+#define __IRIS_HFI_GEN1_H__
+
+struct iris_core;
+struct iris_inst;
+
+void iris_hfi_gen1_command_ops_init(struct iris_core *core);
+void iris_hfi_gen1_response_ops_init(struct iris_core *core);
+struct iris_inst *iris_hfi_gen1_get_instance(void);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c b/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
new file mode 100644
index 000000000000..64f887d9a17d
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
@@ -0,0 +1,826 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "iris_hfi_gen1.h"
+#include "iris_hfi_gen1_defines.h"
+#include "iris_instance.h"
+#include "iris_vpu_buffer.h"
+
+static u32 iris_hfi_gen1_buf_type_from_driver(enum iris_buffer_type buffer_type)
+{
+ switch (buffer_type) {
+ case BUF_INPUT:
+ return HFI_BUFFER_INPUT;
+ case BUF_OUTPUT:
+ return HFI_BUFFER_OUTPUT;
+ case BUF_PERSIST:
+ return HFI_BUFFER_INTERNAL_PERSIST_1;
+ case BUF_BIN:
+ return HFI_BUFFER_INTERNAL_SCRATCH;
+ case BUF_SCRATCH_1:
+ return HFI_BUFFER_INTERNAL_SCRATCH_1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int iris_hfi_gen1_sys_init(struct iris_core *core)
+{
+ struct hfi_sys_init_pkt sys_init_pkt;
+
+ sys_init_pkt.hdr.size = sizeof(sys_init_pkt);
+ sys_init_pkt.hdr.pkt_type = HFI_CMD_SYS_INIT;
+ sys_init_pkt.arch_type = HFI_VIDEO_ARCH_OX;
+
+ return iris_hfi_queue_cmd_write_locked(core, &sys_init_pkt, sys_init_pkt.hdr.size);
+}
+
+static int iris_hfi_gen1_sys_image_version(struct iris_core *core)
+{
+ struct hfi_sys_get_property_pkt packet;
+
+ packet.hdr.size = sizeof(packet);
+ packet.hdr.pkt_type = HFI_CMD_SYS_GET_PROPERTY;
+ packet.num_properties = 1;
+ packet.data = HFI_PROPERTY_SYS_IMAGE_VERSION;
+
+ return iris_hfi_queue_cmd_write_locked(core, &packet, packet.hdr.size);
+}
+
+static int iris_hfi_gen1_sys_interframe_powercollapse(struct iris_core *core)
+{
+ struct hfi_sys_set_property_pkt *pkt;
+ struct hfi_enable *hfi;
+ u32 packet_size;
+ int ret;
+
+ packet_size = struct_size(pkt, data, 1) + sizeof(*hfi);
+ pkt = kzalloc(packet_size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ hfi = (struct hfi_enable *)&pkt->data[1];
+
+ pkt->hdr.size = packet_size;
+ pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL;
+ hfi->enable = true;
+
+ ret = iris_hfi_queue_cmd_write_locked(core, pkt, pkt->hdr.size);
+ kfree(pkt);
+
+ return ret;
+}
+
+static int iris_hfi_gen1_sys_pc_prep(struct iris_core *core)
+{
+ struct hfi_sys_pc_prep_pkt pkt;
+
+ pkt.hdr.size = sizeof(struct hfi_sys_pc_prep_pkt);
+ pkt.hdr.pkt_type = HFI_CMD_SYS_PC_PREP;
+
+ return iris_hfi_queue_cmd_write_locked(core, &pkt, pkt.hdr.size);
+}
+
+static int iris_hfi_gen1_session_open(struct iris_inst *inst)
+{
+ struct hfi_session_open_pkt packet;
+ int ret;
+
+ if (inst->state != IRIS_INST_DEINIT)
+ return -EALREADY;
+
+ packet.shdr.hdr.size = sizeof(struct hfi_session_open_pkt);
+ packet.shdr.hdr.pkt_type = HFI_CMD_SYS_SESSION_INIT;
+ packet.shdr.session_id = inst->session_id;
+ packet.session_domain = HFI_SESSION_TYPE_DEC;
+ packet.session_codec = HFI_VIDEO_CODEC_H264;
+
+ reinit_completion(&inst->completion);
+
+ ret = iris_hfi_queue_cmd_write(inst->core, &packet, packet.shdr.hdr.size);
+ if (ret)
+ return ret;
+
+ return iris_wait_for_session_response(inst, false);
+}
+
+static void iris_hfi_gen1_packet_session_cmd(struct iris_inst *inst,
+ struct hfi_session_pkt *packet,
+ u32 ptype)
+{
+ packet->shdr.hdr.size = sizeof(*packet);
+ packet->shdr.hdr.pkt_type = ptype;
+ packet->shdr.session_id = inst->session_id;
+}
+
+static int iris_hfi_gen1_session_close(struct iris_inst *inst)
+{
+ struct hfi_session_pkt packet;
+
+ iris_hfi_gen1_packet_session_cmd(inst, &packet, HFI_CMD_SYS_SESSION_END);
+
+ return iris_hfi_queue_cmd_write(inst->core, &packet, packet.shdr.hdr.size);
+}
+
+static int iris_hfi_gen1_session_start(struct iris_inst *inst, u32 plane)
+{
+ struct iris_core *core = inst->core;
+ struct hfi_session_pkt packet;
+ int ret;
+
+ if (!V4L2_TYPE_IS_OUTPUT(plane))
+ return 0;
+
+ if (inst->sub_state & IRIS_INST_SUB_LOAD_RESOURCES)
+ return 0;
+
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &packet, HFI_CMD_SESSION_LOAD_RESOURCES);
+
+ ret = iris_hfi_queue_cmd_write(core, &packet, packet.shdr.hdr.size);
+ if (ret)
+ return ret;
+
+ ret = iris_wait_for_session_response(inst, false);
+ if (ret)
+ return ret;
+
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &packet, HFI_CMD_SESSION_START);
+
+ ret = iris_hfi_queue_cmd_write(core, &packet, packet.shdr.hdr.size);
+ if (ret)
+ return ret;
+
+ ret = iris_wait_for_session_response(inst, false);
+ if (ret)
+ return ret;
+
+ return iris_inst_change_sub_state(inst, 0, IRIS_INST_SUB_LOAD_RESOURCES);
+}
+
+static int iris_hfi_gen1_session_stop(struct iris_inst *inst, u32 plane)
+{
+ struct hfi_session_flush_pkt flush_pkt;
+ struct iris_core *core = inst->core;
+ struct hfi_session_pkt pkt;
+ u32 flush_type = 0;
+ int ret = 0;
+
+ if ((V4L2_TYPE_IS_OUTPUT(plane) &&
+ inst->state == IRIS_INST_INPUT_STREAMING) ||
+ (V4L2_TYPE_IS_CAPTURE(plane) &&
+ inst->state == IRIS_INST_OUTPUT_STREAMING) ||
+ inst->state == IRIS_INST_ERROR) {
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &pkt, HFI_CMD_SESSION_STOP);
+ ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, false);
+
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &pkt, HFI_CMD_SESSION_RELEASE_RESOURCES);
+ ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, false);
+
+ iris_inst_change_sub_state(inst, IRIS_INST_SUB_LOAD_RESOURCES, 0);
+
+ iris_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ VB2_BUF_STATE_ERROR);
+ iris_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ VB2_BUF_STATE_ERROR);
+ } else if (inst->state == IRIS_INST_STREAMING) {
+ if (V4L2_TYPE_IS_OUTPUT(plane))
+ flush_type = HFI_FLUSH_ALL;
+ else if (V4L2_TYPE_IS_CAPTURE(plane))
+ flush_type = HFI_FLUSH_OUTPUT;
+
+ reinit_completion(&inst->flush_completion);
+
+ flush_pkt.shdr.hdr.size = sizeof(struct hfi_session_flush_pkt);
+ flush_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
+ flush_pkt.shdr.session_id = inst->session_id;
+ flush_pkt.flush_type = flush_type;
+
+ ret = iris_hfi_queue_cmd_write(core, &flush_pkt, flush_pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, true);
+ }
+
+ return ret;
+}
+
+static int iris_hfi_gen1_session_continue(struct iris_inst *inst, u32 plane)
+{
+ struct hfi_session_pkt packet;
+
+ iris_hfi_gen1_packet_session_cmd(inst, &packet, HFI_CMD_SESSION_CONTINUE);
+
+ return iris_hfi_queue_cmd_write(inst->core, &packet, packet.shdr.hdr.size);
+}
+
+static int iris_hfi_gen1_queue_input_buffer(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ struct hfi_session_empty_buffer_compressed_pkt ip_pkt;
+
+ ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_compressed_pkt);
+ ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ ip_pkt.shdr.session_id = inst->session_id;
+ ip_pkt.time_stamp_hi = upper_32_bits(buf->timestamp);
+ ip_pkt.time_stamp_lo = lower_32_bits(buf->timestamp);
+ ip_pkt.flags = buf->flags;
+ ip_pkt.mark_target = 0;
+ ip_pkt.mark_data = 0;
+ ip_pkt.offset = buf->data_offset;
+ ip_pkt.alloc_len = buf->buffer_size;
+ ip_pkt.filled_len = buf->data_size;
+ ip_pkt.input_tag = buf->index;
+ ip_pkt.packet_buffer = buf->device_addr;
+
+ return iris_hfi_queue_cmd_write(inst->core, &ip_pkt, ip_pkt.shdr.hdr.size);
+}
+
+static int iris_hfi_gen1_queue_output_buffer(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ struct hfi_session_fill_buffer_pkt op_pkt;
+
+ op_pkt.shdr.hdr.size = sizeof(struct hfi_session_fill_buffer_pkt);
+ op_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FILL_BUFFER;
+ op_pkt.shdr.session_id = inst->session_id;
+ op_pkt.output_tag = buf->index;
+ op_pkt.packet_buffer = buf->device_addr;
+ op_pkt.extradata_buffer = 0;
+ op_pkt.alloc_len = buf->buffer_size;
+ op_pkt.filled_len = buf->data_size;
+ op_pkt.offset = buf->data_offset;
+ op_pkt.data = 0;
+
+ if (buf->type == BUF_OUTPUT && iris_split_mode_enabled(inst))
+ op_pkt.stream_id = 1;
+ else
+ op_pkt.stream_id = 0;
+
+ return iris_hfi_queue_cmd_write(inst->core, &op_pkt, op_pkt.shdr.hdr.size);
+}
+
+static int iris_hfi_gen1_queue_internal_buffer(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ struct hfi_session_set_buffers_pkt *int_pkt;
+ u32 buffer_type, i;
+ u32 packet_size;
+ int ret;
+
+ packet_size = struct_size(int_pkt, buffer_info, 1);
+ int_pkt = kzalloc(packet_size, GFP_KERNEL);
+ if (!int_pkt)
+ return -ENOMEM;
+
+ int_pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_BUFFERS;
+ int_pkt->shdr.session_id = inst->session_id;
+ int_pkt->buffer_size = buf->buffer_size;
+ int_pkt->min_buffer_size = buf->buffer_size;
+ int_pkt->num_buffers = 1;
+ int_pkt->extradata_size = 0;
+ int_pkt->shdr.hdr.size = packet_size;
+ for (i = 0; i < int_pkt->num_buffers; i++)
+ int_pkt->buffer_info[i] = buf->device_addr;
+ buffer_type = iris_hfi_gen1_buf_type_from_driver(buf->type);
+ if (buffer_type == -EINVAL) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ int_pkt->buffer_type = buffer_type;
+ ret = iris_hfi_queue_cmd_write(inst->core, int_pkt, int_pkt->shdr.hdr.size);
+
+exit:
+ kfree(int_pkt);
+
+ return ret;
+}
+
+static int iris_hfi_gen1_session_queue_buffer(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ switch (buf->type) {
+ case BUF_INPUT:
+ return iris_hfi_gen1_queue_input_buffer(inst, buf);
+ case BUF_OUTPUT:
+ case BUF_DPB:
+ return iris_hfi_gen1_queue_output_buffer(inst, buf);
+ case BUF_PERSIST:
+ case BUF_BIN:
+ case BUF_SCRATCH_1:
+ return iris_hfi_gen1_queue_internal_buffer(inst, buf);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int iris_hfi_gen1_session_unset_buffers(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ struct hfi_session_release_buffer_pkt *pkt;
+ u32 packet_size, buffer_type, i;
+ int ret;
+
+ buffer_type = iris_hfi_gen1_buf_type_from_driver(buf->type);
+ if (buffer_type == -EINVAL)
+ return -EINVAL;
+
+ if (buffer_type == HFI_BUFFER_INPUT)
+ return 0;
+
+ packet_size = sizeof(*pkt) + sizeof(struct hfi_buffer_info);
+ pkt = kzalloc(packet_size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_RELEASE_BUFFERS;
+ pkt->shdr.session_id = inst->session_id;
+ pkt->buffer_size = buf->buffer_size;
+ pkt->num_buffers = 1;
+
+ if (buffer_type == HFI_BUFFER_OUTPUT ||
+ buffer_type == HFI_BUFFER_OUTPUT2) {
+ struct hfi_buffer_info *bi;
+
+ bi = (struct hfi_buffer_info *)pkt->buffer_info;
+ for (i = 0; i < pkt->num_buffers; i++) {
+ bi->buffer_addr = buf->device_addr;
+ bi->extradata_addr = 0;
+ }
+ pkt->shdr.hdr.size = packet_size;
+ } else {
+ for (i = 0; i < pkt->num_buffers; i++)
+ pkt->buffer_info[i] = buf->device_addr;
+ pkt->extradata_size = 0;
+ pkt->shdr.hdr.size =
+ sizeof(struct hfi_session_set_buffers_pkt) +
+ ((pkt->num_buffers) * sizeof(u32));
+ }
+
+ pkt->response_req = true;
+ pkt->buffer_type = buffer_type;
+
+ ret = iris_hfi_queue_cmd_write(inst->core, pkt, pkt->shdr.hdr.size);
+ if (ret)
+ goto exit;
+
+ ret = iris_wait_for_session_response(inst, false);
+
+exit:
+ kfree(pkt);
+
+ return ret;
+}
+
+static int iris_hfi_gen1_session_drain(struct iris_inst *inst, u32 plane)
+{
+ struct hfi_session_empty_buffer_compressed_pkt ip_pkt = {0};
+
+ ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_compressed_pkt);
+ ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ ip_pkt.shdr.session_id = inst->session_id;
+ ip_pkt.flags = HFI_BUFFERFLAG_EOS;
+
+ return iris_hfi_queue_cmd_write(inst->core, &ip_pkt, ip_pkt.shdr.hdr.size);
+}
+
+static int
+iris_hfi_gen1_packet_session_set_property(struct hfi_session_set_property_pkt *packet,
+ struct iris_inst *inst, u32 ptype, void *pdata)
+{
+ void *prop_data = &packet->data[1];
+
+ packet->shdr.hdr.size = sizeof(*packet);
+ packet->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
+ packet->shdr.session_id = inst->session_id;
+ packet->num_properties = 1;
+ packet->data[0] = ptype;
+
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_FRAME_SIZE: {
+ struct hfi_framesize *in = pdata, *fsize = prop_data;
+
+ fsize->buffer_type = in->buffer_type;
+ fsize->height = in->height;
+ fsize->width = in->width;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*fsize);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE: {
+ struct hfi_videocores_usage_type *in = pdata, *cu = prop_data;
+
+ cu->video_core_enable_mask = in->video_core_enable_mask;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*cu);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT: {
+ struct hfi_uncompressed_format_select *in = pdata;
+ struct hfi_uncompressed_format_select *hfi = prop_data;
+
+ hfi->buffer_type = in->buffer_type;
+ hfi->format = in->format;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO: {
+ struct hfi_uncompressed_plane_actual_constraints_info *info = prop_data;
+
+ info->buffer_type = HFI_BUFFER_OUTPUT2;
+ info->num_planes = 2;
+ info->plane_format[0].stride_multiples = 128;
+ info->plane_format[0].max_stride = 8192;
+ info->plane_format[0].min_plane_buffer_height_multiple = 32;
+ info->plane_format[0].buffer_alignment = 256;
+ if (info->num_planes > 1) {
+ info->plane_format[1].stride_multiples = 128;
+ info->plane_format[1].max_stride = 8192;
+ info->plane_format[1].min_plane_buffer_height_multiple = 16;
+ info->plane_format[1].buffer_alignment = 256;
+ }
+
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*info);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL: {
+ struct hfi_buffer_count_actual *in = pdata;
+ struct hfi_buffer_count_actual *count = prop_data;
+
+ count->type = in->type;
+ count->count_actual = in->count_actual;
+ count->count_min_host = in->count_min_host;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*count);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM: {
+ struct hfi_multi_stream *in = pdata;
+ struct hfi_multi_stream *multi = prop_data;
+
+ multi->buffer_type = in->buffer_type;
+ multi->enable = in->enable;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL: {
+ struct hfi_buffer_size_actual *in = pdata, *sz = prop_data;
+
+ sz->size = in->size;
+ sz->type = in->type;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*sz);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_WORK_ROUTE: {
+ struct hfi_video_work_route *wr = prop_data;
+ u32 *in = pdata;
+
+ wr->video_work_route = *in;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*wr);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_WORK_MODE: {
+ struct hfi_video_work_mode *wm = prop_data;
+ u32 *in = pdata;
+
+ wm->video_work_mode = *in;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*wm);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER: {
+ struct hfi_enable *en = prop_data;
+ u32 *in = pdata;
+
+ en->enable = *in;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hfi_gen1_set_property(struct iris_inst *inst, u32 packet_type,
+ void *payload, u32 payload_size)
+{
+ struct hfi_session_set_property_pkt *pkt;
+ u32 packet_size;
+ int ret;
+
+ packet_size = sizeof(*pkt) + sizeof(u32) + payload_size;
+ pkt = kzalloc(packet_size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ ret = iris_hfi_gen1_packet_session_set_property(pkt, inst, packet_type, payload);
+ if (ret == -EOPNOTSUPP) {
+ ret = 0;
+ goto exit;
+ }
+ if (ret)
+ goto exit;
+
+ ret = iris_hfi_queue_cmd_write(inst->core, pkt, pkt->shdr.hdr.size);
+
+exit:
+ kfree(pkt);
+
+ return ret;
+}
+
+static int iris_hfi_gen1_session_set_property(struct iris_inst *inst, u32 packet_type,
+ u32 flag, u32 plane, u32 payload_type,
+ void *payload, u32 payload_size)
+{
+ return hfi_gen1_set_property(inst, packet_type, payload, payload_size);
+}
+
+static int iris_hfi_gen1_set_resolution(struct iris_inst *inst)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
+ struct hfi_framesize fs;
+ int ret;
+
+ fs.buffer_type = HFI_BUFFER_INPUT;
+ fs.width = inst->fmt_src->fmt.pix_mp.width;
+ fs.height = inst->fmt_src->fmt.pix_mp.height;
+
+ ret = hfi_gen1_set_property(inst, ptype, &fs, sizeof(fs));
+ if (ret)
+ return ret;
+
+ fs.buffer_type = HFI_BUFFER_OUTPUT2;
+ fs.width = inst->fmt_dst->fmt.pix_mp.width;
+ fs.height = inst->fmt_dst->fmt.pix_mp.height;
+
+ return hfi_gen1_set_property(inst, ptype, &fs, sizeof(fs));
+}
+
+static int iris_hfi_gen1_decide_core(struct iris_inst *inst)
+{
+ const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
+ struct hfi_videocores_usage_type cu;
+
+ cu.video_core_enable_mask = HFI_CORE_ID_1;
+
+ return hfi_gen1_set_property(inst, ptype, &cu, sizeof(cu));
+}
+
+static int iris_hfi_gen1_set_raw_format(struct iris_inst *inst)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
+ u32 pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
+ struct hfi_uncompressed_format_select fmt;
+ int ret;
+
+ if (iris_split_mode_enabled(inst)) {
+ fmt.buffer_type = HFI_BUFFER_OUTPUT;
+ fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12_UBWC : 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ if (ret)
+ return ret;
+
+ fmt.buffer_type = HFI_BUFFER_OUTPUT2;
+ fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12 : 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ } else {
+ fmt.buffer_type = HFI_BUFFER_OUTPUT;
+ fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12 : 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ }
+
+ return ret;
+}
+
+static int iris_hfi_gen1_set_format_constraints(struct iris_inst *inst)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO;
+ struct hfi_uncompressed_plane_actual_constraints_info pconstraint;
+
+ pconstraint.buffer_type = HFI_BUFFER_OUTPUT2;
+ pconstraint.num_planes = 2;
+ pconstraint.plane_format[0].stride_multiples = 128;
+ pconstraint.plane_format[0].max_stride = 8192;
+ pconstraint.plane_format[0].min_plane_buffer_height_multiple = 32;
+ pconstraint.plane_format[0].buffer_alignment = 256;
+
+ pconstraint.plane_format[1].stride_multiples = 128;
+ pconstraint.plane_format[1].max_stride = 8192;
+ pconstraint.plane_format[1].min_plane_buffer_height_multiple = 16;
+ pconstraint.plane_format[1].buffer_alignment = 256;
+
+ return hfi_gen1_set_property(inst, ptype, &pconstraint, sizeof(pconstraint));
+}
+
+static int iris_hfi_gen1_set_num_bufs(struct iris_inst *inst)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
+ struct hfi_buffer_count_actual buf_count;
+ int ret;
+
+ buf_count.type = HFI_BUFFER_INPUT;
+ buf_count.count_actual = VIDEO_MAX_FRAME;
+ buf_count.count_min_host = VIDEO_MAX_FRAME;
+
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ if (ret)
+ return ret;
+
+ if (iris_split_mode_enabled(inst)) {
+ buf_count.type = HFI_BUFFER_OUTPUT;
+ buf_count.count_actual = VIDEO_MAX_FRAME;
+ buf_count.count_min_host = VIDEO_MAX_FRAME;
+
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ if (ret)
+ return ret;
+
+ buf_count.type = HFI_BUFFER_OUTPUT2;
+ buf_count.count_actual = iris_vpu_buf_count(inst, BUF_DPB);
+ buf_count.count_min_host = iris_vpu_buf_count(inst, BUF_DPB);
+
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ } else {
+ buf_count.type = HFI_BUFFER_OUTPUT;
+ buf_count.count_actual = VIDEO_MAX_FRAME;
+ buf_count.count_min_host = VIDEO_MAX_FRAME;
+
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ }
+
+ return ret;
+}
+
+static int iris_hfi_gen1_set_multistream(struct iris_inst *inst)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
+ struct hfi_multi_stream multi = {0};
+ int ret;
+
+ if (iris_split_mode_enabled(inst)) {
+ multi.buffer_type = HFI_BUFFER_OUTPUT;
+ multi.enable = 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &multi, sizeof(multi));
+ if (ret)
+ return ret;
+
+ multi.buffer_type = HFI_BUFFER_OUTPUT2;
+ multi.enable = 1;
+
+ ret = hfi_gen1_set_property(inst, ptype, &multi, sizeof(multi));
+ } else {
+ multi.buffer_type = HFI_BUFFER_OUTPUT;
+ multi.enable = 1;
+
+ ret = hfi_gen1_set_property(inst, ptype, &multi, sizeof(multi));
+ if (ret)
+ return ret;
+
+ multi.buffer_type = HFI_BUFFER_OUTPUT2;
+ multi.enable = 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &multi, sizeof(multi));
+ }
+
+ return ret;
+}
+
+static int iris_hfi_gen1_set_bufsize(struct iris_inst *inst)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
+ struct hfi_buffer_size_actual bufsz;
+ int ret;
+
+ if (iris_split_mode_enabled(inst)) {
+ bufsz.type = HFI_BUFFER_OUTPUT;
+ bufsz.size = iris_vpu_buf_size(inst, BUF_DPB);
+
+ ret = hfi_gen1_set_property(inst, ptype, &bufsz, sizeof(bufsz));
+ if (ret)
+ return ret;
+
+ bufsz.type = HFI_BUFFER_OUTPUT2;
+ bufsz.size = inst->buffers[BUF_OUTPUT].size;
+
+ ret = hfi_gen1_set_property(inst, ptype, &bufsz, sizeof(bufsz));
+ } else {
+ bufsz.type = HFI_BUFFER_OUTPUT;
+ bufsz.size = inst->buffers[BUF_OUTPUT].size;
+
+ ret = hfi_gen1_set_property(inst, ptype, &bufsz, sizeof(bufsz));
+ if (ret)
+ return ret;
+
+ bufsz.type = HFI_BUFFER_OUTPUT2;
+ bufsz.size = 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &bufsz, sizeof(bufsz));
+ }
+
+ return ret;
+}
+
+static int iris_hfi_gen1_session_set_config_params(struct iris_inst *inst, u32 plane)
+{
+ struct iris_core *core = inst->core;
+ u32 config_params_size, i, j;
+ const u32 *config_params;
+ int ret;
+
+ static const struct iris_hfi_prop_type_handle prop_type_handle_inp_arr[] = {
+ {HFI_PROPERTY_PARAM_FRAME_SIZE,
+ iris_hfi_gen1_set_resolution},
+ {HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE,
+ iris_hfi_gen1_decide_core},
+ {HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+ iris_hfi_gen1_set_raw_format},
+ {HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
+ iris_hfi_gen1_set_format_constraints},
+ {HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
+ iris_hfi_gen1_set_num_bufs},
+ {HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM,
+ iris_hfi_gen1_set_multistream},
+ {HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL,
+ iris_hfi_gen1_set_bufsize},
+ };
+
+ static const struct iris_hfi_prop_type_handle prop_type_handle_out_arr[] = {
+ {HFI_PROPERTY_PARAM_FRAME_SIZE,
+ iris_hfi_gen1_set_resolution},
+ {HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+ iris_hfi_gen1_set_raw_format},
+ {HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
+ iris_hfi_gen1_set_format_constraints},
+ {HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
+ iris_hfi_gen1_set_num_bufs},
+ {HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM,
+ iris_hfi_gen1_set_multistream},
+ {HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL,
+ iris_hfi_gen1_set_bufsize},
+ };
+
+ config_params = core->iris_platform_data->input_config_params;
+ config_params_size = core->iris_platform_data->input_config_params_size;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ for (i = 0; i < config_params_size; i++) {
+ for (j = 0; j < ARRAY_SIZE(prop_type_handle_inp_arr); j++) {
+ if (prop_type_handle_inp_arr[j].type == config_params[i]) {
+ ret = prop_type_handle_inp_arr[j].handle(inst);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+ }
+ } else if (V4L2_TYPE_IS_CAPTURE(plane)) {
+ for (i = 0; i < config_params_size; i++) {
+ for (j = 0; j < ARRAY_SIZE(prop_type_handle_out_arr); j++) {
+ if (prop_type_handle_out_arr[j].type == config_params[i]) {
+ ret = prop_type_handle_out_arr[j].handle(inst);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const struct iris_hfi_command_ops iris_hfi_gen1_command_ops = {
+ .sys_init = iris_hfi_gen1_sys_init,
+ .sys_image_version = iris_hfi_gen1_sys_image_version,
+ .sys_interframe_powercollapse = iris_hfi_gen1_sys_interframe_powercollapse,
+ .sys_pc_prep = iris_hfi_gen1_sys_pc_prep,
+ .session_open = iris_hfi_gen1_session_open,
+ .session_set_config_params = iris_hfi_gen1_session_set_config_params,
+ .session_set_property = iris_hfi_gen1_session_set_property,
+ .session_start = iris_hfi_gen1_session_start,
+ .session_queue_buf = iris_hfi_gen1_session_queue_buffer,
+ .session_release_buf = iris_hfi_gen1_session_unset_buffers,
+ .session_resume_drc = iris_hfi_gen1_session_continue,
+ .session_stop = iris_hfi_gen1_session_stop,
+ .session_drain = iris_hfi_gen1_session_drain,
+ .session_close = iris_hfi_gen1_session_close,
+};
+
+void iris_hfi_gen1_command_ops_init(struct iris_core *core)
+{
+ core->hfi_ops = &iris_hfi_gen1_command_ops;
+}
+
+struct iris_inst *iris_hfi_gen1_get_instance(void)
+{
+ return kzalloc(sizeof(struct iris_inst), GFP_KERNEL);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h b/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h
new file mode 100644
index 000000000000..9f246816a286
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h
@@ -0,0 +1,448 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_GEN1_DEFINES_H__
+#define __IRIS_HFI_GEN1_DEFINES_H__
+
+#include <linux/types.h>
+
+#define HFI_VIDEO_ARCH_OX 0x1
+
+#define HFI_SESSION_TYPE_DEC 2
+
+#define HFI_VIDEO_CODEC_H264 0x00000002
+
+#define HFI_ERR_NONE 0x0
+
+#define HFI_CMD_SYS_INIT 0x10001
+#define HFI_CMD_SYS_PC_PREP 0x10002
+#define HFI_CMD_SYS_SET_PROPERTY 0x10005
+#define HFI_CMD_SYS_GET_PROPERTY 0x10006
+#define HFI_CMD_SYS_SESSION_INIT 0x10007
+#define HFI_CMD_SYS_SESSION_END 0x10008
+
+#define HFI_CMD_SESSION_SET_PROPERTY 0x11001
+#define HFI_CMD_SESSION_SET_BUFFERS 0x11002
+
+#define HFI_CMD_SESSION_LOAD_RESOURCES 0x211001
+#define HFI_CMD_SESSION_START 0x211002
+#define HFI_CMD_SESSION_STOP 0x211003
+#define HFI_CMD_SESSION_EMPTY_BUFFER 0x211004
+#define HFI_CMD_SESSION_FILL_BUFFER 0x211005
+#define HFI_CMD_SESSION_FLUSH 0x211008
+#define HFI_CMD_SESSION_RELEASE_BUFFERS 0x21100b
+#define HFI_CMD_SESSION_RELEASE_RESOURCES 0x21100c
+#define HFI_CMD_SESSION_CONTINUE 0x21100d
+
+#define HFI_ERR_SESSION_UNSUPPORTED_SETTING 0x1008
+#define HFI_ERR_SESSION_UNSUPPORTED_STREAM 0x100d
+#define HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE 0x1010
+#define HFI_ERR_SESSION_INVALID_SCALE_FACTOR 0x1012
+#define HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED 0x1013
+
+#define HFI_EVENT_SYS_ERROR 0x1
+#define HFI_EVENT_SESSION_ERROR 0x2
+
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES 0x1000001
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES 0x1000002
+#define HFI_EVENT_SESSION_SEQUENCE_CHANGED 0x1000003
+
+#define HFI_BUFFERFLAG_EOS 0x00000001
+#define HFI_BUFFERFLAG_TIMESTAMPINVALID 0x00000100
+
+#define HFI_FLUSH_OUTPUT 0x1000002
+#define HFI_FLUSH_OUTPUT2 0x1000003
+#define HFI_FLUSH_ALL 0x1000004
+
+#define HFI_INDEX_EXTRADATA_INPUT_CROP 0x0700000e
+
+#define HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL 0x201001
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO 0x201002
+#define HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE 0x201008
+#define HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL 0x20100c
+
+#define HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS 0x202001
+
+#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER 0x1200001
+#define HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS 0x120300e
+#define HFI_PROPERTY_CONFIG_VDEC_ENTROPY 0x1204004
+
+#define HFI_BUFFER_INPUT 0x1
+#define HFI_BUFFER_OUTPUT 0x2
+#define HFI_BUFFER_OUTPUT2 0x3
+#define HFI_BUFFER_INTERNAL_PERSIST_1 0x5
+#define HFI_BUFFER_INTERNAL_SCRATCH 0x6
+#define HFI_BUFFER_INTERNAL_SCRATCH_1 0x7
+
+#define HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL 0x5
+#define HFI_PROPERTY_SYS_IMAGE_VERSION 0x6
+
+#define HFI_PROPERTY_PARAM_FRAME_SIZE 0x1001
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT 0x1003
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT 0x1005
+#define HFI_PROPERTY_PARAM_WORK_MODE 0x1015
+#define HFI_PROPERTY_PARAM_WORK_ROUTE 0x1017
+#define HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE 0x2002
+
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM 0x1003001
+#define HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH 0x1003007
+#define HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT 0x1003009
+#define HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE 0x100300a
+#define HFI_CORE_ID_1 1
+#define HFI_COLOR_FORMAT_NV12 0x02
+#define HFI_COLOR_FORMAT_NV12_UBWC 0x8002
+
+#define HFI_MSG_SYS_INIT 0x20001
+#define HFI_MSG_SYS_SESSION_INIT 0x20006
+#define HFI_MSG_SYS_SESSION_END 0x20007
+#define HFI_MSG_SYS_COV 0x20009
+#define HFI_MSG_SYS_PROPERTY_INFO 0x2000a
+
+#define HFI_MSG_EVENT_NOTIFY 0x21001
+#define HFI_MSG_SESSION_LOAD_RESOURCES 0x221001
+#define HFI_MSG_SESSION_START 0x221002
+#define HFI_MSG_SESSION_STOP 0x221003
+#define HFI_MSG_SESSION_FLUSH 0x221006
+#define HFI_MSG_SESSION_EMPTY_BUFFER 0x221007
+#define HFI_MSG_SESSION_FILL_BUFFER 0x221008
+#define HFI_MSG_SESSION_RELEASE_RESOURCES 0x22100a
+#define HFI_MSG_SESSION_RELEASE_BUFFERS 0x22100c
+
+#define HFI_PICTURE_I 0x00000001
+#define HFI_PICTURE_P 0x00000002
+#define HFI_PICTURE_B 0x00000004
+#define HFI_PICTURE_IDR 0x00000008
+#define HFI_FRAME_NOTCODED 0x7f002000
+#define HFI_FRAME_YUV 0x7f004000
+#define HFI_UNUSED_PICT 0x10000000
+
+struct hfi_pkt_hdr {
+ u32 size;
+ u32 pkt_type;
+};
+
+struct hfi_session_hdr_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 session_id;
+};
+
+struct hfi_session_open_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 session_domain;
+ u32 session_codec;
+};
+
+struct hfi_session_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_sys_init_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 arch_type;
+};
+
+struct hfi_sys_set_property_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 num_properties;
+ u32 data[];
+};
+
+struct hfi_sys_get_property_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 num_properties;
+ u32 data;
+};
+
+struct hfi_session_set_property_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 num_properties;
+ u32 data[];
+};
+
+struct hfi_sys_pc_prep_pkt {
+ struct hfi_pkt_hdr hdr;
+};
+
+struct hfi_session_set_buffers_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 extradata_size;
+ u32 min_buffer_size;
+ u32 num_buffers;
+ u32 buffer_info[];
+};
+
+struct hfi_session_empty_buffer_compressed_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 input_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data;
+};
+
+struct hfi_session_fill_buffer_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 stream_id;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 output_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data;
+};
+
+struct hfi_session_flush_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 flush_type;
+};
+
+struct hfi_session_release_buffer_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 extradata_size;
+ u32 response_req;
+ u32 num_buffers;
+ u32 buffer_info[];
+};
+
+struct hfi_buffer_info {
+ u32 buffer_addr;
+ u32 extradata_addr;
+};
+
+struct hfi_msg_event_notify_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 event_id;
+ u32 event_data1;
+ u32 event_data2;
+ u32 ext_event_data[];
+};
+
+struct hfi_msg_sys_init_done_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 error_type;
+ u32 num_properties;
+ u32 data[];
+};
+
+struct hfi_msg_session_hdr_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_init_done_pkt {
+ struct hfi_msg_session_hdr_pkt shdr;
+ u32 num_properties;
+ u32 data[];
+};
+
+struct hfi_msg_sys_property_info_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 num_properties;
+ u32 property;
+ u8 data[];
+};
+
+struct hfi_msg_session_flush_done_pkt {
+ struct hfi_msg_session_hdr_pkt shdr;
+ u32 flush_type;
+};
+
+struct hfi_enable {
+ u32 enable;
+};
+
+struct hfi_profile_level {
+ u32 profile;
+ u32 level;
+};
+
+struct hfi_framesize {
+ u32 buffer_type;
+ u32 width;
+ u32 height;
+};
+
+struct hfi_videocores_usage_type {
+ u32 video_core_enable_mask;
+};
+
+struct hfi_video_work_mode {
+ u32 video_work_mode;
+};
+
+struct hfi_video_work_route {
+ u32 video_work_route;
+};
+
+struct hfi_bit_depth {
+ u32 buffer_type;
+ u32 bit_depth;
+};
+
+struct hfi_pic_struct {
+ u32 progressive_only;
+};
+
+struct hfi_colour_space {
+ u32 colour_space;
+};
+
+struct hfi_extradata_input_crop {
+ u32 size;
+ u32 version;
+ u32 port_index;
+ u32 left;
+ u32 top;
+ u32 width;
+ u32 height;
+};
+
+struct hfi_dpb_counts {
+ u32 max_dpb_count;
+ u32 max_ref_frames;
+ u32 max_dec_buffering;
+ u32 max_reorder_frames;
+ u32 fw_min_count;
+};
+
+struct hfi_uncompressed_format_select {
+ u32 buffer_type;
+ u32 format;
+};
+
+struct hfi_uncompressed_plane_constraints {
+ u32 stride_multiples;
+ u32 max_stride;
+ u32 min_plane_buffer_height_multiple;
+ u32 buffer_alignment;
+};
+
+struct hfi_uncompressed_plane_actual_constraints_info {
+ u32 buffer_type;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_constraints plane_format[2];
+};
+
+struct hfi_buffer_count_actual {
+ u32 type;
+ u32 count_actual;
+ u32 count_min_host;
+};
+
+struct hfi_buffer_size_actual {
+ u32 type;
+ u32 size;
+};
+
+struct hfi_multi_stream {
+ u32 buffer_type;
+ u32 enable;
+};
+
+struct hfi_buffer_requirements {
+ u32 type;
+ u32 size;
+ u32 region_size;
+ u32 hold_count;
+ u32 count_min;
+ u32 count_actual;
+ u32 contiguous;
+ u32 alignment;
+};
+
+struct hfi_event_data {
+ u32 error;
+ u32 height;
+ u32 width;
+ u32 event_type;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 tag;
+ u32 profile;
+ u32 level;
+ u32 bit_depth;
+ u32 pic_struct;
+ u32 colour_space;
+ u32 entropy_mode;
+ u32 buf_count;
+ struct {
+ u32 left, top;
+ u32 width, height;
+ } input_crop;
+};
+
+struct hfi_msg_session_empty_buffer_done_pkt {
+ struct hfi_msg_session_hdr_pkt shdr;
+ u32 offset;
+ u32 filled_len;
+ u32 input_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[];
+};
+
+struct hfi_msg_session_fbd_uncompressed_plane0_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 stream_id;
+ u32 view_id;
+ u32 error_type;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 stats;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 frame_width;
+ u32 frame_height;
+ u32 start_x_coord;
+ u32 start_y_coord;
+ u32 input_tag;
+ u32 input_tag2;
+ u32 output_tag;
+ u32 picture_type;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[];
+};
+
+struct hfi_msg_session_release_buffers_done_pkt {
+ struct hfi_msg_session_hdr_pkt shdr;
+ u32 num_buffers;
+ u32 buffer_info[];
+};
+
+struct hfi_msg_sys_debug_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 msg_type;
+ u32 msg_size;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u8 msg_data[];
+};
+
+struct hfi_msg_sys_coverage_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 msg_size;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u8 msg_data[];
+};
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c b/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
new file mode 100644
index 000000000000..b72d503dd740
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_hfi_gen1.h"
+#include "iris_hfi_gen1_defines.h"
+#include "iris_instance.h"
+#include "iris_vdec.h"
+#include "iris_vpu_buffer.h"
+
+static void iris_hfi_gen1_read_changed_params(struct iris_inst *inst,
+ struct hfi_msg_event_notify_pkt *pkt)
+{
+ struct v4l2_pix_format_mplane *pixmp_ip = &inst->fmt_src->fmt.pix_mp;
+ struct v4l2_pix_format_mplane *pixmp_op = &inst->fmt_dst->fmt.pix_mp;
+ u32 num_properties_changed = pkt->event_data2;
+ u8 *data_ptr = (u8 *)&pkt->ext_event_data[0];
+ u32 primaries, matrix_coeff, transfer_char;
+ struct hfi_dpb_counts *iris_vpu_dpb_count;
+ struct hfi_profile_level *profile_level;
+ struct hfi_buffer_requirements *bufreq;
+ struct hfi_extradata_input_crop *crop;
+ struct hfi_colour_space *colour_info;
+ struct iris_core *core = inst->core;
+ u32 colour_description_present_flag;
+ u32 video_signal_type_present_flag;
+ struct hfi_event_data event = {0};
+ struct hfi_bit_depth *pixel_depth;
+ struct hfi_pic_struct *pic_struct;
+ struct hfi_framesize *frame_sz;
+ struct vb2_queue *dst_q;
+ struct v4l2_ctrl *ctrl;
+ u32 full_range, ptype;
+
+ do {
+ ptype = *((u32 *)data_ptr);
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_FRAME_SIZE:
+ data_ptr += sizeof(u32);
+ frame_sz = (struct hfi_framesize *)data_ptr;
+ event.width = frame_sz->width;
+ event.height = frame_sz->height;
+ data_ptr += sizeof(*frame_sz);
+ break;
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
+ data_ptr += sizeof(u32);
+ profile_level = (struct hfi_profile_level *)data_ptr;
+ event.profile = profile_level->profile;
+ event.level = profile_level->level;
+ data_ptr += sizeof(*profile_level);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH:
+ data_ptr += sizeof(u32);
+ pixel_depth = (struct hfi_bit_depth *)data_ptr;
+ event.bit_depth = pixel_depth->bit_depth;
+ data_ptr += sizeof(*pixel_depth);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT:
+ data_ptr += sizeof(u32);
+ pic_struct = (struct hfi_pic_struct *)data_ptr;
+ event.pic_struct = pic_struct->progressive_only;
+ data_ptr += sizeof(*pic_struct);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE:
+ data_ptr += sizeof(u32);
+ colour_info = (struct hfi_colour_space *)data_ptr;
+ event.colour_space = colour_info->colour_space;
+ data_ptr += sizeof(*colour_info);
+ break;
+ case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
+ data_ptr += sizeof(u32);
+ event.entropy_mode = *(u32 *)data_ptr;
+ data_ptr += sizeof(u32);
+ break;
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ data_ptr += sizeof(u32);
+ bufreq = (struct hfi_buffer_requirements *)data_ptr;
+ event.buf_count = bufreq->count_min;
+ data_ptr += sizeof(*bufreq);
+ break;
+ case HFI_INDEX_EXTRADATA_INPUT_CROP:
+ data_ptr += sizeof(u32);
+ crop = (struct hfi_extradata_input_crop *)data_ptr;
+ event.input_crop.left = crop->left;
+ event.input_crop.top = crop->top;
+ event.input_crop.width = crop->width;
+ event.input_crop.height = crop->height;
+ data_ptr += sizeof(*crop);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS:
+ data_ptr += sizeof(u32);
+ iris_vpu_dpb_count = (struct hfi_dpb_counts *)data_ptr;
+ event.buf_count = iris_vpu_dpb_count->fw_min_count;
+ data_ptr += sizeof(*iris_vpu_dpb_count);
+ break;
+ default:
+ break;
+ }
+ num_properties_changed--;
+ } while (num_properties_changed > 0);
+
+ pixmp_ip->width = event.width;
+ pixmp_ip->height = event.height;
+
+ pixmp_op->width = ALIGN(event.width, 128);
+ pixmp_op->height = ALIGN(event.height, 32);
+ pixmp_op->plane_fmt[0].bytesperline = ALIGN(event.width, 128);
+ pixmp_op->plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_OUTPUT);
+
+ matrix_coeff = FIELD_GET(GENMASK(7, 0), event.colour_space);
+ transfer_char = FIELD_GET(GENMASK(15, 8), event.colour_space);
+ primaries = FIELD_GET(GENMASK(23, 16), event.colour_space);
+ colour_description_present_flag = FIELD_GET(GENMASK(24, 24), event.colour_space);
+ full_range = FIELD_GET(GENMASK(25, 25), event.colour_space);
+ video_signal_type_present_flag = FIELD_GET(GENMASK(29, 29), event.colour_space);
+
+ pixmp_op->colorspace = V4L2_COLORSPACE_DEFAULT;
+ pixmp_op->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pixmp_op->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pixmp_op->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ if (video_signal_type_present_flag) {
+ pixmp_op->quantization =
+ full_range ?
+ V4L2_QUANTIZATION_FULL_RANGE :
+ V4L2_QUANTIZATION_LIM_RANGE;
+ if (colour_description_present_flag) {
+ pixmp_op->colorspace =
+ iris_hfi_get_v4l2_color_primaries(primaries);
+ pixmp_op->xfer_func =
+ iris_hfi_get_v4l2_transfer_char(transfer_char);
+ pixmp_op->ycbcr_enc =
+ iris_hfi_get_v4l2_matrix_coefficients(matrix_coeff);
+ }
+ }
+
+ pixmp_ip->colorspace = pixmp_op->colorspace;
+ pixmp_ip->xfer_func = pixmp_op->xfer_func;
+ pixmp_ip->ycbcr_enc = pixmp_op->ycbcr_enc;
+ pixmp_ip->quantization = pixmp_op->quantization;
+
+ if (event.input_crop.width > 0 && event.input_crop.height > 0) {
+ inst->crop.left = event.input_crop.left;
+ inst->crop.top = event.input_crop.top;
+ inst->crop.width = event.input_crop.width;
+ inst->crop.height = event.input_crop.height;
+ } else {
+ inst->crop.left = 0;
+ inst->crop.top = 0;
+ inst->crop.width = event.width;
+ inst->crop.height = event.height;
+ }
+
+ inst->fw_min_count = event.buf_count;
+ inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].size = pixmp_op->plane_fmt[0].sizeimage;
+ ctrl = v4l2_ctrl_find(&inst->ctrl_handler, V4L2_CID_MIN_BUFFERS_FOR_CAPTURE);
+ if (ctrl)
+ v4l2_ctrl_s_ctrl(ctrl, inst->buffers[BUF_OUTPUT].min_count);
+
+ dst_q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+ dst_q->min_reqbufs_allocation = inst->buffers[BUF_OUTPUT].min_count;
+
+ if (event.bit_depth || !event.pic_struct) {
+ dev_err(core->dev, "unsupported content, bit depth: %x, pic_struct = %x\n",
+ event.bit_depth, event.pic_struct);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ }
+}
+
+static void iris_hfi_gen1_event_seq_changed(struct iris_inst *inst,
+ struct hfi_msg_event_notify_pkt *pkt)
+{
+ struct hfi_session_flush_pkt flush_pkt;
+ u32 num_properties_changed;
+ int ret;
+
+ ret = iris_inst_sub_state_change_drc(inst);
+ if (ret)
+ return;
+
+ switch (pkt->event_data1) {
+ case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES:
+ case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES:
+ break;
+ default:
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return;
+ }
+
+ num_properties_changed = pkt->event_data2;
+ if (!num_properties_changed) {
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return;
+ }
+
+ iris_hfi_gen1_read_changed_params(inst, pkt);
+
+ if (inst->state != IRIS_INST_ERROR) {
+ reinit_completion(&inst->flush_completion);
+
+ flush_pkt.shdr.hdr.size = sizeof(struct hfi_session_flush_pkt);
+ flush_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
+ flush_pkt.shdr.session_id = inst->session_id;
+ flush_pkt.flush_type = HFI_FLUSH_OUTPUT;
+ iris_hfi_queue_cmd_write(inst->core, &flush_pkt, flush_pkt.shdr.hdr.size);
+ }
+
+ iris_vdec_src_change(inst);
+ iris_inst_sub_state_change_drc_last(inst);
+}
+
+static void
+iris_hfi_gen1_sys_event_notify(struct iris_core *core, void *packet)
+{
+ struct hfi_msg_event_notify_pkt *pkt = packet;
+ struct iris_inst *instance;
+
+ if (pkt->event_id == HFI_EVENT_SYS_ERROR)
+ dev_err(core->dev, "sys error (type: %x, session id:%x, data1:%x, data2:%x)\n",
+ pkt->event_id, pkt->shdr.session_id, pkt->event_data1,
+ pkt->event_data2);
+
+ core->state = IRIS_CORE_ERROR;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list)
+ iris_inst_change_state(instance, IRIS_INST_ERROR);
+ mutex_unlock(&core->lock);
+
+ schedule_delayed_work(&core->sys_error_handler, msecs_to_jiffies(10));
+}
+
+static void
+iris_hfi_gen1_event_session_error(struct iris_inst *inst, struct hfi_msg_event_notify_pkt *pkt)
+{
+ switch (pkt->event_data1) {
+ /* non fatal session errors */
+ case HFI_ERR_SESSION_INVALID_SCALE_FACTOR:
+ case HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE:
+ case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
+ case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
+ dev_dbg(inst->core->dev, "session error: event id:%x, session id:%x\n",
+ pkt->event_data1, pkt->shdr.session_id);
+ break;
+ /* fatal session errors */
+ default:
+ /*
+ * firmware fills event_data2 as an additional information about the
+ * hfi command for which session error has ouccured.
+ */
+ dev_err(inst->core->dev,
+ "session error for command: %x, event id:%x, session id:%x\n",
+ pkt->event_data2, pkt->event_data1,
+ pkt->shdr.session_id);
+ iris_vb2_queue_error(inst);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ break;
+ }
+}
+
+static void iris_hfi_gen1_session_event_notify(struct iris_inst *inst, void *packet)
+{
+ struct hfi_msg_event_notify_pkt *pkt = packet;
+
+ switch (pkt->event_id) {
+ case HFI_EVENT_SESSION_ERROR:
+ iris_hfi_gen1_event_session_error(inst, pkt);
+ break;
+ case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
+ iris_hfi_gen1_event_seq_changed(inst, pkt);
+ break;
+ default:
+ break;
+ }
+}
+
+static void iris_hfi_gen1_sys_init_done(struct iris_core *core, void *packet)
+{
+ struct hfi_msg_sys_init_done_pkt *pkt = packet;
+
+ if (pkt->error_type != HFI_ERR_NONE) {
+ core->state = IRIS_CORE_ERROR;
+ return;
+ }
+
+ complete(&core->core_init_done);
+}
+
+static void
+iris_hfi_gen1_sys_get_prop_image_version(struct iris_core *core,
+ struct hfi_msg_sys_property_info_pkt *pkt)
+{
+ int req_bytes = pkt->hdr.size - sizeof(*pkt);
+ char fw_version[IRIS_FW_VERSION_LENGTH];
+ u8 *str_image_version;
+ u32 i;
+
+ if (req_bytes < IRIS_FW_VERSION_LENGTH - 1 || !pkt->data[0] || pkt->num_properties > 1) {
+ dev_err(core->dev, "bad packet\n");
+ return;
+ }
+
+ str_image_version = pkt->data;
+ if (!str_image_version) {
+ dev_err(core->dev, "firmware version not available\n");
+ return;
+ }
+
+ for (i = 0; i < IRIS_FW_VERSION_LENGTH - 1; i++) {
+ if (str_image_version[i] != '\0')
+ fw_version[i] = str_image_version[i];
+ else
+ fw_version[i] = ' ';
+ }
+ fw_version[i] = '\0';
+ dev_dbg(core->dev, "firmware version: %s\n", fw_version);
+}
+
+static void iris_hfi_gen1_sys_property_info(struct iris_core *core, void *packet)
+{
+ struct hfi_msg_sys_property_info_pkt *pkt = packet;
+
+ if (!pkt->num_properties) {
+ dev_dbg(core->dev, "no properties\n");
+ return;
+ }
+
+ switch (pkt->property) {
+ case HFI_PROPERTY_SYS_IMAGE_VERSION:
+ iris_hfi_gen1_sys_get_prop_image_version(core, pkt);
+ break;
+ default:
+ dev_dbg(core->dev, "unknown property data\n");
+ break;
+ }
+}
+
+static void iris_hfi_gen1_session_etb_done(struct iris_inst *inst, void *packet)
+{
+ struct hfi_msg_session_empty_buffer_done_pkt *pkt = packet;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *m2m_buffer, *n;
+ struct iris_buffer *buf = NULL;
+ bool found = false;
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, m2m_buffer, n) {
+ buf = to_iris_buffer(&m2m_buffer->vb);
+ if (buf->index == pkt->input_tag) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ goto error;
+
+ if (pkt->shdr.error_type == HFI_ERR_SESSION_UNSUPPORTED_STREAM) {
+ buf->flags = V4L2_BUF_FLAG_ERROR;
+ iris_vb2_queue_error(inst);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ }
+
+ if (!(buf->attr & BUF_ATTR_QUEUED))
+ return;
+
+ buf->attr &= ~BUF_ATTR_QUEUED;
+
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ iris_vb2_buffer_done(inst, buf);
+ }
+
+ return;
+
+error:
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ dev_err(inst->core->dev, "error in etb done\n");
+}
+
+static void iris_hfi_gen1_session_ftb_done(struct iris_inst *inst, void *packet)
+{
+ struct hfi_msg_session_fbd_uncompressed_plane0_pkt *pkt = packet;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *m2m_buffer, *n;
+ struct hfi_session_flush_pkt flush_pkt;
+ u32 timestamp_hi = pkt->time_stamp_hi;
+ u32 timestamp_lo = pkt->time_stamp_lo;
+ struct iris_core *core = inst->core;
+ u32 filled_len = pkt->filled_len;
+ u32 pic_type = pkt->picture_type;
+ u32 output_tag = pkt->output_tag;
+ struct iris_buffer *buf, *iter;
+ struct iris_buffers *buffers;
+ u32 hfi_flags = pkt->flags;
+ u32 offset = pkt->offset;
+ u64 timestamp_us = 0;
+ bool found = false;
+ u32 flags = 0;
+
+ if ((hfi_flags & HFI_BUFFERFLAG_EOS) && !filled_len) {
+ reinit_completion(&inst->flush_completion);
+
+ flush_pkt.shdr.hdr.size = sizeof(struct hfi_session_flush_pkt);
+ flush_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
+ flush_pkt.shdr.session_id = inst->session_id;
+ flush_pkt.flush_type = HFI_FLUSH_OUTPUT;
+ iris_hfi_queue_cmd_write(core, &flush_pkt, flush_pkt.shdr.hdr.size);
+ iris_inst_sub_state_change_drain_last(inst);
+
+ return;
+ }
+
+ if (iris_split_mode_enabled(inst) && pkt->stream_id == 0) {
+ buffers = &inst->buffers[BUF_DPB];
+ if (!buffers)
+ goto error;
+
+ found = false;
+ list_for_each_entry(iter, &buffers->list, list) {
+ if (!(iter->attr & BUF_ATTR_QUEUED))
+ continue;
+
+ found = (iter->index == output_tag &&
+ iter->data_offset == offset);
+
+ if (found) {
+ buf = iter;
+ break;
+ }
+ }
+ } else {
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, m2m_buffer, n) {
+ buf = to_iris_buffer(&m2m_buffer->vb);
+ if (!(buf->attr & BUF_ATTR_QUEUED))
+ continue;
+
+ found = (buf->index == output_tag &&
+ buf->data_offset == offset);
+
+ if (found)
+ break;
+ }
+ }
+ if (!found)
+ goto error;
+
+ buf->data_offset = offset;
+ buf->data_size = filled_len;
+
+ if (filled_len) {
+ timestamp_us = timestamp_hi;
+ timestamp_us = (timestamp_us << 32) | timestamp_lo;
+ } else {
+ flags |= V4L2_BUF_FLAG_LAST;
+ }
+ buf->timestamp = timestamp_us;
+
+ switch (pic_type) {
+ case HFI_PICTURE_IDR:
+ case HFI_PICTURE_I:
+ flags |= V4L2_BUF_FLAG_KEYFRAME;
+ break;
+ case HFI_PICTURE_P:
+ flags |= V4L2_BUF_FLAG_PFRAME;
+ break;
+ case HFI_PICTURE_B:
+ flags |= V4L2_BUF_FLAG_BFRAME;
+ break;
+ case HFI_FRAME_NOTCODED:
+ case HFI_UNUSED_PICT:
+ case HFI_FRAME_YUV:
+ default:
+ break;
+ }
+
+ buf->attr &= ~BUF_ATTR_QUEUED;
+ buf->attr |= BUF_ATTR_DEQUEUED;
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+
+ buf->flags |= flags;
+
+ iris_vb2_buffer_done(inst, buf);
+
+ return;
+
+error:
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ dev_err(core->dev, "error in ftb done\n");
+}
+
+struct iris_hfi_gen1_response_pkt_info {
+ u32 pkt;
+ u32 pkt_sz;
+};
+
+static const struct iris_hfi_gen1_response_pkt_info pkt_infos[] = {
+ {
+ .pkt = HFI_MSG_EVENT_NOTIFY,
+ .pkt_sz = sizeof(struct hfi_msg_event_notify_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SYS_INIT,
+ .pkt_sz = sizeof(struct hfi_msg_sys_init_done_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SYS_PROPERTY_INFO,
+ .pkt_sz = sizeof(struct hfi_msg_sys_property_info_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SYS_SESSION_INIT,
+ .pkt_sz = sizeof(struct hfi_msg_session_init_done_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SYS_SESSION_END,
+ .pkt_sz = sizeof(struct hfi_msg_session_hdr_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_LOAD_RESOURCES,
+ .pkt_sz = sizeof(struct hfi_msg_session_hdr_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_START,
+ .pkt_sz = sizeof(struct hfi_msg_session_hdr_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_STOP,
+ .pkt_sz = sizeof(struct hfi_msg_session_hdr_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_EMPTY_BUFFER,
+ .pkt_sz = sizeof(struct hfi_msg_session_empty_buffer_done_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_FILL_BUFFER,
+ .pkt_sz = sizeof(struct hfi_msg_session_fbd_uncompressed_plane0_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_FLUSH,
+ .pkt_sz = sizeof(struct hfi_msg_session_flush_done_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_RELEASE_RESOURCES,
+ .pkt_sz = sizeof(struct hfi_msg_session_hdr_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_RELEASE_BUFFERS,
+ .pkt_sz = sizeof(struct hfi_msg_session_release_buffers_done_pkt),
+ },
+};
+
+static void iris_hfi_gen1_handle_response(struct iris_core *core, void *response)
+{
+ struct hfi_pkt_hdr *hdr = (struct hfi_pkt_hdr *)response;
+ const struct iris_hfi_gen1_response_pkt_info *pkt_info;
+ struct device *dev = core->dev;
+ struct hfi_session_pkt *pkt;
+ struct completion *done;
+ struct iris_inst *inst;
+ bool found = false;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(pkt_infos); i++) {
+ pkt_info = &pkt_infos[i];
+ if (pkt_info->pkt != hdr->pkt_type)
+ continue;
+ found = true;
+ break;
+ }
+
+ if (!found || hdr->size < pkt_info->pkt_sz) {
+ dev_err(dev, "bad packet size (%d should be %d, pkt type:%x, found %d)\n",
+ hdr->size, pkt_info->pkt_sz, hdr->pkt_type, found);
+
+ return;
+ }
+
+ switch (hdr->pkt_type) {
+ case HFI_MSG_SYS_INIT:
+ iris_hfi_gen1_sys_init_done(core, hdr);
+ break;
+ case HFI_MSG_SYS_PROPERTY_INFO:
+ iris_hfi_gen1_sys_property_info(core, hdr);
+ break;
+ case HFI_MSG_EVENT_NOTIFY:
+ pkt = (struct hfi_session_pkt *)hdr;
+ inst = iris_get_instance(core, pkt->shdr.session_id);
+ if (inst) {
+ mutex_lock(&inst->lock);
+ iris_hfi_gen1_session_event_notify(inst, hdr);
+ mutex_unlock(&inst->lock);
+ } else {
+ iris_hfi_gen1_sys_event_notify(core, hdr);
+ }
+
+ break;
+ default:
+ pkt = (struct hfi_session_pkt *)hdr;
+ inst = iris_get_instance(core, pkt->shdr.session_id);
+ if (!inst) {
+ dev_warn(dev, "no valid instance(pkt session_id:%x, pkt:%x)\n",
+ pkt->shdr.session_id,
+ pkt_info ? pkt_info->pkt : 0);
+ return;
+ }
+
+ mutex_lock(&inst->lock);
+ if (hdr->pkt_type == HFI_MSG_SESSION_EMPTY_BUFFER) {
+ iris_hfi_gen1_session_etb_done(inst, hdr);
+ } else if (hdr->pkt_type == HFI_MSG_SESSION_FILL_BUFFER) {
+ iris_hfi_gen1_session_ftb_done(inst, hdr);
+ } else {
+ struct hfi_msg_session_hdr_pkt *shdr;
+
+ shdr = (struct hfi_msg_session_hdr_pkt *)hdr;
+ if (shdr->error_type != HFI_ERR_NONE)
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+
+ done = pkt_info->pkt == HFI_MSG_SESSION_FLUSH ?
+ &inst->flush_completion : &inst->completion;
+ complete(done);
+ }
+ mutex_unlock(&inst->lock);
+
+ break;
+ }
+}
+
+static void iris_hfi_gen1_flush_debug_queue(struct iris_core *core, u8 *packet)
+{
+ struct hfi_msg_sys_coverage_pkt *pkt;
+
+ while (!iris_hfi_queue_dbg_read(core, packet)) {
+ pkt = (struct hfi_msg_sys_coverage_pkt *)packet;
+
+ if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
+ struct hfi_msg_sys_debug_pkt *pkt =
+ (struct hfi_msg_sys_debug_pkt *)packet;
+
+ dev_dbg(core->dev, "%s", pkt->msg_data);
+ }
+ }
+}
+
+static void iris_hfi_gen1_response_handler(struct iris_core *core)
+{
+ memset(core->response_packet, 0, sizeof(struct hfi_pkt_hdr));
+ while (!iris_hfi_queue_msg_read(core, core->response_packet)) {
+ iris_hfi_gen1_handle_response(core, core->response_packet);
+ memset(core->response_packet, 0, sizeof(struct hfi_pkt_hdr));
+ }
+
+ iris_hfi_gen1_flush_debug_queue(core, core->response_packet);
+}
+
+static const struct iris_hfi_response_ops iris_hfi_gen1_response_ops = {
+ .hfi_response_handler = iris_hfi_gen1_response_handler,
+};
+
+void iris_hfi_gen1_response_ops_init(struct iris_core *core)
+{
+ core->hfi_response_ops = &iris_hfi_gen1_response_ops;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2.h b/drivers/media/platform/qcom/iris/iris_hfi_gen2.h
new file mode 100644
index 000000000000..b9d3749a10ef
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_GEN2_H__
+#define __IRIS_HFI_GEN2_H__
+
+#include "iris_instance.h"
+
+struct iris_core;
+
+#define to_iris_inst_hfi_gen2(ptr) \
+ container_of(ptr, struct iris_inst_hfi_gen2, inst)
+
+/**
+ * struct iris_inst_hfi_gen2 - holds per video instance parameters for hfi_gen2
+ *
+ * @inst: pointer to iris_instance structure
+ * @packet: HFI packet
+ * @ipsc_properties_set: boolean to set ipsc properties to fw
+ * @opsc_properties_set: boolean to set opsc properties to fw
+ * @hfi_frame_info: structure of frame info
+ * @src_subcr_params: subscription params to fw on input port
+ * @dst_subcr_params: subscription params to fw on output port
+ */
+struct iris_inst_hfi_gen2 {
+ struct iris_inst inst;
+ struct iris_hfi_header *packet;
+ bool ipsc_properties_set;
+ bool opsc_properties_set;
+ struct iris_hfi_frame_info hfi_frame_info;
+ struct hfi_subscription_params src_subcr_params;
+ struct hfi_subscription_params dst_subcr_params;
+};
+
+void iris_hfi_gen2_command_ops_init(struct iris_core *core);
+void iris_hfi_gen2_response_ops_init(struct iris_core *core);
+struct iris_inst *iris_hfi_gen2_get_instance(void);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c b/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c
new file mode 100644
index 000000000000..a908b41e2868
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c
@@ -0,0 +1,957 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+
+#include "iris_hfi_gen2.h"
+#include "iris_hfi_gen2_packet.h"
+
+#define UNSPECIFIED_COLOR_FORMAT 5
+#define NUM_SYS_INIT_PACKETS 8
+
+#define SYS_INIT_PKT_SIZE (sizeof(struct iris_hfi_header) + \
+ NUM_SYS_INIT_PACKETS * (sizeof(struct iris_hfi_packet) + sizeof(u32)))
+
+#define SYS_IFPC_PKT_SIZE (sizeof(struct iris_hfi_header) + \
+ sizeof(struct iris_hfi_packet) + sizeof(u32))
+
+#define SYS_NO_PAYLOAD_PKT_SIZE (sizeof(struct iris_hfi_header) + \
+ sizeof(struct iris_hfi_packet))
+
+static int iris_hfi_gen2_sys_init(struct iris_core *core)
+{
+ struct iris_hfi_header *hdr;
+ int ret;
+
+ hdr = kzalloc(SYS_INIT_PKT_SIZE, GFP_KERNEL);
+ if (!hdr)
+ return -ENOMEM;
+
+ iris_hfi_gen2_packet_sys_init(core, hdr);
+ ret = iris_hfi_queue_cmd_write_locked(core, hdr, hdr->size);
+
+ kfree(hdr);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_sys_image_version(struct iris_core *core)
+{
+ struct iris_hfi_header *hdr;
+ int ret;
+
+ hdr = kzalloc(SYS_NO_PAYLOAD_PKT_SIZE, GFP_KERNEL);
+ if (!hdr)
+ return -ENOMEM;
+
+ iris_hfi_gen2_packet_image_version(core, hdr);
+ ret = iris_hfi_queue_cmd_write_locked(core, hdr, hdr->size);
+
+ kfree(hdr);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_sys_interframe_powercollapse(struct iris_core *core)
+{
+ struct iris_hfi_header *hdr;
+ int ret;
+
+ hdr = kzalloc(SYS_IFPC_PKT_SIZE, GFP_KERNEL);
+ if (!hdr)
+ return -ENOMEM;
+
+ iris_hfi_gen2_packet_sys_interframe_powercollapse(core, hdr);
+ ret = iris_hfi_queue_cmd_write_locked(core, hdr, hdr->size);
+
+ kfree(hdr);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_sys_pc_prep(struct iris_core *core)
+{
+ struct iris_hfi_header *hdr;
+ int ret;
+
+ hdr = kzalloc(SYS_NO_PAYLOAD_PKT_SIZE, GFP_KERNEL);
+ if (!hdr)
+ return -ENOMEM;
+
+ iris_hfi_gen2_packet_sys_pc_prep(core, hdr);
+ ret = iris_hfi_queue_cmd_write_locked(core, hdr, hdr->size);
+
+ kfree(hdr);
+
+ return ret;
+}
+
+static u32 iris_hfi_gen2_get_port(u32 plane)
+{
+ switch (plane) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return HFI_PORT_BITSTREAM;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return HFI_PORT_RAW;
+ default:
+ return HFI_PORT_NONE;
+ }
+}
+
+static u32 iris_hfi_gen2_get_port_from_buf_type(enum iris_buffer_type buffer_type)
+{
+ switch (buffer_type) {
+ case BUF_INPUT:
+ case BUF_BIN:
+ case BUF_COMV:
+ case BUF_NON_COMV:
+ case BUF_LINE:
+ return HFI_PORT_BITSTREAM;
+ case BUF_OUTPUT:
+ case BUF_DPB:
+ return HFI_PORT_RAW;
+ case BUF_PERSIST:
+ default:
+ return HFI_PORT_NONE;
+ }
+}
+
+static int iris_hfi_gen2_session_set_property(struct iris_inst *inst, u32 packet_type, u32 flag,
+ u32 plane, u32 payload_type, void *payload,
+ u32 payload_size)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+
+ iris_hfi_gen2_packet_session_property(inst,
+ packet_type,
+ flag,
+ plane,
+ payload_type,
+ payload,
+ payload_size);
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_set_bitstream_resolution(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 resolution = inst->fmt_src->fmt.pix_mp.width << 16 |
+ inst->fmt_src->fmt.pix_mp.height;
+
+ inst_hfi_gen2->src_subcr_params.bitstream_resolution = resolution;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_BITSTREAM_RESOLUTION,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32,
+ &resolution,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_crop_offsets(struct iris_inst *inst)
+{
+ u32 bottom_offset = (inst->fmt_src->fmt.pix_mp.height - inst->crop.height);
+ u32 right_offset = (inst->fmt_src->fmt.pix_mp.width - inst->crop.width);
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 left_offset = inst->crop.left;
+ u32 top_offset = inst->crop.top;
+ u32 payload[2];
+
+ payload[0] = FIELD_PREP(GENMASK(31, 16), left_offset) | top_offset;
+ payload[1] = FIELD_PREP(GENMASK(31, 16), right_offset) | bottom_offset;
+ inst_hfi_gen2->src_subcr_params.crop_offsets[0] = payload[0];
+ inst_hfi_gen2->src_subcr_params.crop_offsets[1] = payload[1];
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_CROP_OFFSETS,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_64_PACKED,
+ &payload,
+ sizeof(u64));
+}
+
+static int iris_hfi_gen2_set_bit_dpeth(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 bitdepth = BIT_DEPTH_8;
+
+ inst_hfi_gen2->src_subcr_params.bit_depth = bitdepth;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_LUMA_CHROMA_BIT_DEPTH,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32,
+ &bitdepth,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_coded_frames(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 coded_frames = 0;
+
+ if (inst->fw_caps[CODED_FRAMES].value == CODED_FRAMES_PROGRESSIVE)
+ coded_frames = HFI_BITMASK_FRAME_MBS_ONLY_FLAG;
+ inst_hfi_gen2->src_subcr_params.coded_frames = coded_frames;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_CODED_FRAMES,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32,
+ &coded_frames,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_min_output_count(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 min_output = inst->buffers[BUF_OUTPUT].min_count;
+
+ inst_hfi_gen2->src_subcr_params.fw_min_count = min_output;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32,
+ &min_output,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_picture_order_count(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 poc = 0;
+
+ inst_hfi_gen2->src_subcr_params.pic_order_cnt = poc;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_PIC_ORDER_CNT_TYPE,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32,
+ &poc,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_colorspace(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ struct v4l2_pix_format_mplane *pixmp = &inst->fmt_src->fmt.pix_mp;
+ u32 video_signal_type_present_flag = 0, color_info;
+ u32 matrix_coeff = HFI_MATRIX_COEFF_RESERVED;
+ u32 video_format = UNSPECIFIED_COLOR_FORMAT;
+ u32 full_range = V4L2_QUANTIZATION_DEFAULT;
+ u32 transfer_char = HFI_TRANSFER_RESERVED;
+ u32 colour_description_present_flag = 0;
+ u32 primaries = HFI_PRIMARIES_RESERVED;
+
+ if (pixmp->colorspace != V4L2_COLORSPACE_DEFAULT ||
+ pixmp->ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT ||
+ pixmp->xfer_func != V4L2_XFER_FUNC_DEFAULT) {
+ colour_description_present_flag = 1;
+ video_signal_type_present_flag = 1;
+ primaries = iris_hfi_gen2_get_color_primaries(pixmp->colorspace);
+ matrix_coeff = iris_hfi_gen2_get_matrix_coefficients(pixmp->ycbcr_enc);
+ transfer_char = iris_hfi_gen2_get_transfer_char(pixmp->xfer_func);
+ }
+
+ if (pixmp->quantization != V4L2_QUANTIZATION_DEFAULT) {
+ video_signal_type_present_flag = 1;
+ full_range = pixmp->quantization == V4L2_QUANTIZATION_FULL_RANGE ? 1 : 0;
+ }
+
+ color_info = iris_hfi_gen2_get_color_info(matrix_coeff, transfer_char, primaries,
+ colour_description_present_flag, full_range,
+ video_format, video_signal_type_present_flag);
+
+ inst_hfi_gen2->src_subcr_params.color_info = color_info;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_SIGNAL_COLOR_INFO,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_32_PACKED,
+ &color_info,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_profile(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 profile = inst->fw_caps[PROFILE].value;
+
+ inst_hfi_gen2->src_subcr_params.profile = profile;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_PROFILE,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32_ENUM,
+ &profile,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_level(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 level = inst->fw_caps[LEVEL].value;
+
+ inst_hfi_gen2->src_subcr_params.level = level;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_LEVEL,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32_ENUM,
+ &level,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_colorformat(struct iris_inst *inst)
+{
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ u32 hfi_colorformat, pixelformat;
+
+ pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
+ hfi_colorformat = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FMT_NV12 : 0;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_COLOR_FORMAT,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32,
+ &hfi_colorformat,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_linear_stride_scanline(struct iris_inst *inst)
+{
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ u32 pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
+ u32 scanline_y = inst->fmt_dst->fmt.pix_mp.height;
+ u32 stride_y = inst->fmt_dst->fmt.pix_mp.width;
+ u32 scanline_uv = scanline_y / 2;
+ u32 stride_uv = stride_y;
+ u32 payload[2];
+
+ if (pixelformat != V4L2_PIX_FMT_NV12)
+ return 0;
+
+ payload[0] = stride_y << 16 | scanline_y;
+ payload[1] = stride_uv << 16 | scanline_uv;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_LINEAR_STRIDE_SCANLINE,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U64,
+ &payload,
+ sizeof(u64));
+}
+
+static int iris_hfi_gen2_session_set_config_params(struct iris_inst *inst, u32 plane)
+{
+ struct iris_core *core = inst->core;
+ u32 config_params_size, i, j;
+ const u32 *config_params;
+ int ret;
+
+ static const struct iris_hfi_prop_type_handle prop_type_handle_arr[] = {
+ {HFI_PROP_BITSTREAM_RESOLUTION, iris_hfi_gen2_set_bitstream_resolution },
+ {HFI_PROP_CROP_OFFSETS, iris_hfi_gen2_set_crop_offsets },
+ {HFI_PROP_CODED_FRAMES, iris_hfi_gen2_set_coded_frames },
+ {HFI_PROP_LUMA_CHROMA_BIT_DEPTH, iris_hfi_gen2_set_bit_dpeth },
+ {HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT, iris_hfi_gen2_set_min_output_count },
+ {HFI_PROP_PIC_ORDER_CNT_TYPE, iris_hfi_gen2_set_picture_order_count },
+ {HFI_PROP_SIGNAL_COLOR_INFO, iris_hfi_gen2_set_colorspace },
+ {HFI_PROP_PROFILE, iris_hfi_gen2_set_profile },
+ {HFI_PROP_LEVEL, iris_hfi_gen2_set_level },
+ {HFI_PROP_COLOR_FORMAT, iris_hfi_gen2_set_colorformat },
+ {HFI_PROP_LINEAR_STRIDE_SCANLINE, iris_hfi_gen2_set_linear_stride_scanline },
+ };
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ config_params = core->iris_platform_data->input_config_params;
+ config_params_size = core->iris_platform_data->input_config_params_size;
+ } else {
+ config_params = core->iris_platform_data->output_config_params;
+ config_params_size = core->iris_platform_data->output_config_params_size;
+ }
+
+ if (!config_params || !config_params_size)
+ return -EINVAL;
+
+ for (i = 0; i < config_params_size; i++) {
+ for (j = 0; j < ARRAY_SIZE(prop_type_handle_arr); j++) {
+ if (prop_type_handle_arr[j].type == config_params[i]) {
+ ret = prop_type_handle_arr[j].handle(inst);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int iris_hfi_gen2_session_set_codec(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 codec = HFI_CODEC_DECODE_AVC;
+
+ iris_hfi_gen2_packet_session_property(inst,
+ HFI_PROP_CODEC,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PORT_NONE,
+ HFI_PAYLOAD_U32_ENUM,
+ &codec,
+ sizeof(u32));
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_set_default_header(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 default_header = false;
+
+ iris_hfi_gen2_packet_session_property(inst,
+ HFI_PROP_DEC_DEFAULT_HEADER,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PORT_BITSTREAM,
+ HFI_PAYLOAD_U32,
+ &default_header,
+ sizeof(u32));
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_open(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ int ret;
+
+ if (inst->state != IRIS_INST_DEINIT)
+ return -EALREADY;
+
+ inst_hfi_gen2->ipsc_properties_set = false;
+ inst_hfi_gen2->opsc_properties_set = false;
+
+ inst_hfi_gen2->packet = kzalloc(4096, GFP_KERNEL);
+ if (!inst_hfi_gen2->packet)
+ return -ENOMEM;
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_OPEN,
+ HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED,
+ HFI_PORT_NONE,
+ 0,
+ HFI_PAYLOAD_U32,
+ &inst->session_id,
+ sizeof(u32));
+
+ ret = iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+ if (ret)
+ goto fail_free_packet;
+
+ ret = iris_hfi_gen2_session_set_codec(inst);
+ if (ret)
+ goto fail_free_packet;
+
+ ret = iris_hfi_gen2_session_set_default_header(inst);
+ if (ret)
+ goto fail_free_packet;
+
+ return 0;
+
+fail_free_packet:
+ kfree(inst_hfi_gen2->packet);
+ inst_hfi_gen2->packet = NULL;
+
+ return ret;
+}
+
+static int iris_hfi_gen2_session_close(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ int ret;
+
+ if (!inst_hfi_gen2->packet)
+ return -EINVAL;
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_CLOSE,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED |
+ HFI_HOST_FLAGS_NON_DISCARDABLE),
+ HFI_PORT_NONE,
+ inst->session_id,
+ HFI_PAYLOAD_NONE,
+ NULL,
+ 0);
+
+ ret = iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+
+ kfree(inst_hfi_gen2->packet);
+ inst_hfi_gen2->packet = NULL;
+
+ return ret;
+}
+
+static int iris_hfi_gen2_session_subscribe_mode(struct iris_inst *inst,
+ u32 cmd, u32 plane, u32 payload_type,
+ void *payload, u32 payload_size)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+
+ iris_hfi_gen2_packet_session_command(inst,
+ cmd,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ payload_type,
+ payload,
+ payload_size);
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_subscribe_change_param(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct hfi_subscription_params subsc_params;
+ u32 prop_type, payload_size, payload_type;
+ struct iris_core *core = inst->core;
+ const u32 *change_param;
+ u32 change_param_size;
+ u32 payload[32] = {0};
+ u32 hfi_port = 0, i;
+ int ret;
+
+ if ((V4L2_TYPE_IS_OUTPUT(plane) && inst_hfi_gen2->ipsc_properties_set) ||
+ (V4L2_TYPE_IS_CAPTURE(plane) && inst_hfi_gen2->opsc_properties_set)) {
+ dev_err(core->dev, "invalid plane\n");
+ return 0;
+ }
+
+ change_param = core->iris_platform_data->input_config_params;
+ change_param_size = core->iris_platform_data->input_config_params_size;
+
+ payload[0] = HFI_MODE_PORT_SETTINGS_CHANGE;
+
+ for (i = 0; i < change_param_size; i++)
+ payload[i + 1] = change_param[i];
+
+ ret = iris_hfi_gen2_session_subscribe_mode(inst,
+ HFI_CMD_SUBSCRIBE_MODE,
+ plane,
+ HFI_PAYLOAD_U32_ARRAY,
+ &payload[0],
+ ((change_param_size + 1) * sizeof(u32)));
+ if (ret)
+ return ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ inst_hfi_gen2->ipsc_properties_set = true;
+ } else {
+ hfi_port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ memcpy(&inst_hfi_gen2->dst_subcr_params,
+ &inst_hfi_gen2->src_subcr_params,
+ sizeof(inst_hfi_gen2->src_subcr_params));
+ subsc_params = inst_hfi_gen2->dst_subcr_params;
+ for (i = 0; i < change_param_size; i++) {
+ payload[0] = 0;
+ payload[1] = 0;
+ payload_size = 0;
+ payload_type = 0;
+ prop_type = change_param[i];
+ switch (prop_type) {
+ case HFI_PROP_BITSTREAM_RESOLUTION:
+ payload[0] = subsc_params.bitstream_resolution;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ case HFI_PROP_CROP_OFFSETS:
+ payload[0] = subsc_params.crop_offsets[0];
+ payload[1] = subsc_params.crop_offsets[1];
+ payload_size = sizeof(u64);
+ payload_type = HFI_PAYLOAD_64_PACKED;
+ break;
+ case HFI_PROP_CODED_FRAMES:
+ payload[0] = subsc_params.coded_frames;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ case HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT:
+ payload[0] = subsc_params.fw_min_count;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ case HFI_PROP_PIC_ORDER_CNT_TYPE:
+ payload[0] = subsc_params.pic_order_cnt;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ case HFI_PROP_SIGNAL_COLOR_INFO:
+ payload[0] = subsc_params.color_info;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ case HFI_PROP_PROFILE:
+ payload[0] = subsc_params.profile;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ case HFI_PROP_LEVEL:
+ payload[0] = subsc_params.level;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ default:
+ prop_type = 0;
+ ret = -EINVAL;
+ break;
+ }
+ if (prop_type) {
+ ret = iris_hfi_gen2_session_set_property(inst,
+ prop_type,
+ HFI_HOST_FLAGS_NONE,
+ hfi_port,
+ payload_type,
+ &payload,
+ payload_size);
+ if (ret)
+ return ret;
+ }
+ }
+ inst_hfi_gen2->opsc_properties_set = true;
+ }
+
+ return 0;
+}
+
+static int iris_hfi_gen2_subscribe_property(struct iris_inst *inst, u32 plane)
+{
+ struct iris_core *core = inst->core;
+ u32 subscribe_prop_size, i;
+ const u32 *subcribe_prop;
+ u32 payload[32] = {0};
+
+ payload[0] = HFI_MODE_PROPERTY;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ subscribe_prop_size = core->iris_platform_data->dec_input_prop_size;
+ subcribe_prop = core->iris_platform_data->dec_input_prop;
+ } else {
+ subscribe_prop_size = core->iris_platform_data->dec_output_prop_size;
+ subcribe_prop = core->iris_platform_data->dec_output_prop;
+ }
+
+ for (i = 0; i < subscribe_prop_size; i++)
+ payload[i + 1] = subcribe_prop[i];
+
+ return iris_hfi_gen2_session_subscribe_mode(inst,
+ HFI_CMD_SUBSCRIBE_MODE,
+ plane,
+ HFI_PAYLOAD_U32_ARRAY,
+ &payload[0],
+ (subscribe_prop_size + 1) * sizeof(u32));
+}
+
+static int iris_hfi_gen2_session_start(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ int ret = 0;
+
+ ret = iris_hfi_gen2_subscribe_change_param(inst, plane);
+ if (ret)
+ return ret;
+
+ ret = iris_hfi_gen2_subscribe_property(inst, plane);
+ if (ret)
+ return ret;
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_START,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ HFI_PAYLOAD_NONE,
+ NULL,
+ 0);
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_stop(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ int ret = 0;
+
+ reinit_completion(&inst->completion);
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_STOP,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED |
+ HFI_HOST_FLAGS_NON_DISCARDABLE),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ HFI_PAYLOAD_NONE,
+ NULL,
+ 0);
+
+ ret = iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+ if (ret)
+ return ret;
+
+ return iris_wait_for_session_response(inst, false);
+}
+
+static int iris_hfi_gen2_session_pause(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_PAUSE,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ HFI_PAYLOAD_NONE,
+ NULL,
+ 0);
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_resume_drc(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 payload = HFI_CMD_SETTINGS_CHANGE;
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_RESUME,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ HFI_PAYLOAD_U32,
+ &payload,
+ sizeof(u32));
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_resume_drain(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 payload = HFI_CMD_DRAIN;
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_RESUME,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ HFI_PAYLOAD_U32,
+ &payload,
+ sizeof(u32));
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_drain(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+
+ if (!V4L2_TYPE_IS_OUTPUT(plane))
+ return 0;
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_DRAIN,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED |
+ HFI_HOST_FLAGS_NON_DISCARDABLE),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ HFI_PAYLOAD_NONE,
+ NULL,
+ 0);
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static u32 iris_hfi_gen2_buf_type_from_driver(enum iris_buffer_type buffer_type)
+{
+ switch (buffer_type) {
+ case BUF_INPUT:
+ return HFI_BUFFER_BITSTREAM;
+ case BUF_OUTPUT:
+ return HFI_BUFFER_RAW;
+ case BUF_BIN:
+ return HFI_BUFFER_BIN;
+ case BUF_COMV:
+ return HFI_BUFFER_COMV;
+ case BUF_NON_COMV:
+ return HFI_BUFFER_NON_COMV;
+ case BUF_LINE:
+ return HFI_BUFFER_LINE;
+ case BUF_DPB:
+ return HFI_BUFFER_DPB;
+ case BUF_PERSIST:
+ return HFI_BUFFER_PERSIST;
+ default:
+ return 0;
+ }
+}
+
+static int iris_set_num_comv(struct iris_inst *inst)
+{
+ struct platform_inst_caps *caps;
+ struct iris_core *core = inst->core;
+ u32 num_comv;
+
+ caps = core->iris_platform_data->inst_caps;
+ num_comv = caps->num_comv;
+
+ return core->hfi_ops->session_set_property(inst,
+ HFI_PROP_COMV_BUFFER_COUNT,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PORT_BITSTREAM,
+ HFI_PAYLOAD_U32,
+ &num_comv, sizeof(u32));
+}
+
+static void iris_hfi_gen2_get_buffer(struct iris_buffer *buffer, struct iris_hfi_buffer *buf)
+{
+ memset(buf, 0, sizeof(*buf));
+ buf->type = iris_hfi_gen2_buf_type_from_driver(buffer->type);
+ buf->index = buffer->index;
+ buf->base_address = buffer->device_addr;
+ buf->addr_offset = 0;
+ buf->buffer_size = buffer->buffer_size;
+
+ if (buffer->type == BUF_INPUT)
+ buf->buffer_size = ALIGN(buffer->buffer_size, 256);
+ buf->data_offset = buffer->data_offset;
+ buf->data_size = buffer->data_size;
+ if (buffer->attr & BUF_ATTR_PENDING_RELEASE)
+ buf->flags |= HFI_BUF_HOST_FLAG_RELEASE;
+ buf->flags |= HFI_BUF_HOST_FLAGS_CB_NON_SECURE;
+ buf->timestamp = buffer->timestamp;
+}
+
+static int iris_hfi_gen2_session_queue_buffer(struct iris_inst *inst, struct iris_buffer *buffer)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct iris_hfi_buffer hfi_buffer;
+ u32 port;
+ int ret;
+
+ iris_hfi_gen2_get_buffer(buffer, &hfi_buffer);
+ if (buffer->type == BUF_COMV) {
+ ret = iris_set_num_comv(inst);
+ if (ret)
+ return ret;
+ }
+
+ port = iris_hfi_gen2_get_port_from_buf_type(buffer->type);
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_BUFFER,
+ HFI_HOST_FLAGS_INTR_REQUIRED,
+ port,
+ inst->session_id,
+ HFI_PAYLOAD_STRUCTURE,
+ &hfi_buffer,
+ sizeof(hfi_buffer));
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_release_buffer(struct iris_inst *inst, struct iris_buffer *buffer)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct iris_hfi_buffer hfi_buffer;
+ u32 port;
+
+ iris_hfi_gen2_get_buffer(buffer, &hfi_buffer);
+ hfi_buffer.flags |= HFI_BUF_HOST_FLAG_RELEASE;
+ port = iris_hfi_gen2_get_port_from_buf_type(buffer->type);
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_BUFFER,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED),
+ port,
+ inst->session_id,
+ HFI_PAYLOAD_STRUCTURE,
+ &hfi_buffer,
+ sizeof(hfi_buffer));
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static const struct iris_hfi_command_ops iris_hfi_gen2_command_ops = {
+ .sys_init = iris_hfi_gen2_sys_init,
+ .sys_image_version = iris_hfi_gen2_sys_image_version,
+ .sys_interframe_powercollapse = iris_hfi_gen2_sys_interframe_powercollapse,
+ .sys_pc_prep = iris_hfi_gen2_sys_pc_prep,
+ .session_open = iris_hfi_gen2_session_open,
+ .session_set_config_params = iris_hfi_gen2_session_set_config_params,
+ .session_set_property = iris_hfi_gen2_session_set_property,
+ .session_start = iris_hfi_gen2_session_start,
+ .session_queue_buf = iris_hfi_gen2_session_queue_buffer,
+ .session_release_buf = iris_hfi_gen2_session_release_buffer,
+ .session_pause = iris_hfi_gen2_session_pause,
+ .session_resume_drc = iris_hfi_gen2_session_resume_drc,
+ .session_stop = iris_hfi_gen2_session_stop,
+ .session_drain = iris_hfi_gen2_session_drain,
+ .session_resume_drain = iris_hfi_gen2_session_resume_drain,
+ .session_close = iris_hfi_gen2_session_close,
+};
+
+void iris_hfi_gen2_command_ops_init(struct iris_core *core)
+{
+ core->hfi_ops = &iris_hfi_gen2_command_ops;
+}
+
+struct iris_inst *iris_hfi_gen2_get_instance(void)
+{
+ return kzalloc(sizeof(struct iris_inst_hfi_gen2), GFP_KERNEL);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h b/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h
new file mode 100644
index 000000000000..806f8bb7f505
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_GEN2_DEFINES_H__
+#define __IRIS_HFI_GEN2_DEFINES_H__
+
+#include <linux/types.h>
+
+#define HFI_VIDEO_ARCH_LX 0x1
+
+#define HFI_CMD_BEGIN 0x01000000
+#define HFI_CMD_INIT 0x01000001
+#define HFI_CMD_POWER_COLLAPSE 0x01000002
+#define HFI_CMD_OPEN 0x01000003
+#define HFI_CMD_CLOSE 0x01000004
+#define HFI_CMD_START 0x01000005
+#define HFI_CMD_STOP 0x01000006
+#define HFI_CMD_DRAIN 0x01000007
+#define HFI_CMD_RESUME 0x01000008
+#define HFI_CMD_BUFFER 0x01000009
+#define HFI_CMD_SUBSCRIBE_MODE 0x0100000B
+#define HFI_CMD_SETTINGS_CHANGE 0x0100000C
+#define HFI_CMD_PAUSE 0x01000011
+#define HFI_CMD_END 0x01FFFFFF
+
+#define HFI_BITMASK_BITSTREAM_WIDTH 0xffff0000
+#define HFI_BITMASK_BITSTREAM_HEIGHT 0x0000ffff
+#define HFI_BITMASK_FRAME_MBS_ONLY_FLAG 0x00000001
+
+#define HFI_PROP_BEGIN 0x03000000
+#define HFI_PROP_IMAGE_VERSION 0x03000001
+#define HFI_PROP_INTRA_FRAME_POWER_COLLAPSE 0x03000002
+#define HFI_PROP_UBWC_MAX_CHANNELS 0x03000003
+#define HFI_PROP_UBWC_MAL_LENGTH 0x03000004
+#define HFI_PROP_UBWC_HBB 0x03000005
+#define HFI_PROP_UBWC_BANK_SWZL_LEVEL1 0x03000006
+#define HFI_PROP_UBWC_BANK_SWZL_LEVEL2 0x03000007
+#define HFI_PROP_UBWC_BANK_SWZL_LEVEL3 0x03000008
+#define HFI_PROP_UBWC_BANK_SPREADING 0x03000009
+#define HFI_PROP_CODEC 0x03000100
+#define HFI_PROP_COLOR_FORMAT 0x03000101
+#define HFI_PROP_BITSTREAM_RESOLUTION 0x03000103
+#define HFI_PROP_LINEAR_STRIDE_SCANLINE 0x03000104
+#define HFI_PROP_CROP_OFFSETS 0x03000105
+#define HFI_PROP_PROFILE 0x03000107
+#define HFI_PROP_LEVEL 0x03000108
+#define HFI_PROP_STAGE 0x0300010a
+#define HFI_PROP_PIPE 0x0300010b
+#define HFI_PROP_LUMA_CHROMA_BIT_DEPTH 0x0300010f
+#define HFI_PROP_CODED_FRAMES 0x03000120
+#define HFI_PROP_CABAC_SESSION 0x03000121
+#define HFI_PROP_BUFFER_HOST_MAX_COUNT 0x03000123
+#define HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT 0x03000124
+#define HFI_PROP_PIC_ORDER_CNT_TYPE 0x03000128
+#define HFI_PROP_QUALITY_MODE 0x03000148
+#define HFI_PROP_SIGNAL_COLOR_INFO 0x03000155
+#define HFI_PROP_PICTURE_TYPE 0x03000162
+#define HFI_PROP_DEC_DEFAULT_HEADER 0x03000168
+#define HFI_PROP_DEC_START_FROM_RAP_FRAME 0x03000169
+#define HFI_PROP_NO_OUTPUT 0x0300016a
+#define HFI_PROP_COMV_BUFFER_COUNT 0x03000193
+#define HFI_PROP_END 0x03FFFFFF
+
+#define HFI_SESSION_ERROR_BEGIN 0x04000000
+#define HFI_ERROR_UNKNOWN_SESSION 0x04000001
+#define HFI_ERROR_MAX_SESSIONS 0x04000002
+#define HFI_ERROR_FATAL 0x04000003
+#define HFI_ERROR_INVALID_STATE 0x04000004
+#define HFI_ERROR_INSUFFICIENT_RESOURCES 0x04000005
+#define HFI_ERROR_BUFFER_NOT_SET 0x04000006
+#define HFI_ERROR_STREAM_UNSUPPORTED 0x04000008
+#define HFI_SESSION_ERROR_END 0x04FFFFFF
+
+#define HFI_SYSTEM_ERROR_BEGIN 0x05000000
+#define HFI_SYS_ERROR_WD_TIMEOUT 0x05000001
+#define HFI_SYSTEM_ERROR_END 0x05FFFFFF
+
+#define HFI_INFORMATION_BEGIN 0x06000000
+#define HFI_INFO_UNSUPPORTED 0x06000001
+#define HFI_INFO_DATA_CORRUPT 0x06000002
+#define HFI_INFO_BUFFER_OVERFLOW 0x06000004
+#define HFI_INFO_HFI_FLAG_DRAIN_LAST 0x06000006
+#define HFI_INFO_HFI_FLAG_PSC_LAST 0x06000007
+#define HFI_INFORMATION_END 0x06FFFFFF
+
+enum hfi_property_mode_type {
+ HFI_MODE_PORT_SETTINGS_CHANGE = 0x00000001,
+ HFI_MODE_PROPERTY = 0x00000002,
+};
+
+enum hfi_color_format {
+ HFI_COLOR_FMT_OPAQUE = 0,
+ HFI_COLOR_FMT_NV12 = 1,
+ HFI_COLOR_FMT_NV12_UBWC = 2,
+ HFI_COLOR_FMT_P010 = 3,
+ HFI_COLOR_FMT_TP10_UBWC = 4,
+ HFI_COLOR_FMT_RGBA8888 = 5,
+ HFI_COLOR_FMT_RGBA8888_UBWC = 6,
+ HFI_COLOR_FMT_NV21 = 7,
+};
+
+enum hfi_codec_type {
+ HFI_CODEC_DECODE_AVC = 1,
+ HFI_CODEC_ENCODE_AVC = 2,
+};
+
+enum hfi_picture_type {
+ HFI_PICTURE_IDR = 0x00000001,
+ HFI_PICTURE_P = 0x00000002,
+ HFI_PICTURE_B = 0x00000004,
+ HFI_PICTURE_I = 0x00000008,
+ HFI_PICTURE_CRA = 0x00000010,
+ HFI_PICTURE_BLA = 0x00000020,
+};
+
+enum hfi_buffer_type {
+ HFI_BUFFER_BITSTREAM = 0x00000001,
+ HFI_BUFFER_RAW = 0x00000002,
+ HFI_BUFFER_METADATA = 0x00000003,
+ HFI_BUFFER_SUBCACHE = 0x00000004,
+ HFI_BUFFER_PARTIAL_DATA = 0x00000005,
+ HFI_BUFFER_DPB = 0x00000006,
+ HFI_BUFFER_BIN = 0x00000007,
+ HFI_BUFFER_LINE = 0x00000008,
+ HFI_BUFFER_ARP = 0x00000009,
+ HFI_BUFFER_COMV = 0x0000000A,
+ HFI_BUFFER_NON_COMV = 0x0000000B,
+ HFI_BUFFER_PERSIST = 0x0000000C,
+ HFI_BUFFER_VPSS = 0x0000000D,
+};
+
+enum hfi_buffer_host_flags {
+ HFI_BUF_HOST_FLAG_RELEASE = 0x00000001,
+ HFI_BUF_HOST_FLAG_READONLY = 0x00000010,
+ HFI_BUF_HOST_FLAG_CODEC_CONFIG = 0x00000100,
+ HFI_BUF_HOST_FLAGS_CB_NON_SECURE = 0x00000200,
+};
+
+enum hfi_buffer_firmware_flags {
+ HFI_BUF_FW_FLAG_RELEASE_DONE = 0x00000001,
+ HFI_BUF_FW_FLAG_READONLY = 0x00000010,
+ HFI_BUF_FW_FLAG_LAST = 0x10000000,
+ HFI_BUF_FW_FLAG_PSC_LAST = 0x20000000,
+};
+
+enum hfi_packet_firmware_flags {
+ HFI_FW_FLAGS_SUCCESS = 0x00000001,
+ HFI_FW_FLAGS_INFORMATION = 0x00000002,
+ HFI_FW_FLAGS_SESSION_ERROR = 0x00000004,
+ HFI_FW_FLAGS_SYSTEM_ERROR = 0x00000008,
+};
+
+struct hfi_debug_header {
+ u32 size;
+ u32 debug_level;
+ u32 reserved[2];
+};
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.c b/drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.c
new file mode 100644
index 000000000000..d77fa29f44fc
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "iris_hfi_common.h"
+#include "iris_hfi_gen2.h"
+#include "iris_hfi_gen2_packet.h"
+
+u32 iris_hfi_gen2_get_color_primaries(u32 primaries)
+{
+ switch (primaries) {
+ case V4L2_COLORSPACE_DEFAULT:
+ return HFI_PRIMARIES_RESERVED;
+ case V4L2_COLORSPACE_REC709:
+ return HFI_PRIMARIES_BT709;
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ return HFI_PRIMARIES_BT470_SYSTEM_M;
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ return HFI_PRIMARIES_BT470_SYSTEM_BG;
+ case V4L2_COLORSPACE_SMPTE170M:
+ return HFI_PRIMARIES_BT601_525;
+ case V4L2_COLORSPACE_SMPTE240M:
+ return HFI_PRIMARIES_SMPTE_ST240M;
+ case V4L2_COLORSPACE_BT2020:
+ return HFI_PRIMARIES_BT2020;
+ case V4L2_COLORSPACE_DCI_P3:
+ return HFI_PRIMARIES_SMPTE_RP431_2;
+ default:
+ return HFI_PRIMARIES_RESERVED;
+ }
+}
+
+u32 iris_hfi_gen2_get_transfer_char(u32 characterstics)
+{
+ switch (characterstics) {
+ case V4L2_XFER_FUNC_DEFAULT:
+ return HFI_TRANSFER_RESERVED;
+ case V4L2_XFER_FUNC_709:
+ return HFI_TRANSFER_BT709;
+ case V4L2_XFER_FUNC_SMPTE240M:
+ return HFI_TRANSFER_SMPTE_ST240M;
+ case V4L2_XFER_FUNC_SRGB:
+ return HFI_TRANSFER_SRGB_SYCC;
+ case V4L2_XFER_FUNC_SMPTE2084:
+ return HFI_TRANSFER_SMPTE_ST2084_PQ;
+ default:
+ return HFI_TRANSFER_RESERVED;
+ }
+}
+
+u32 iris_hfi_gen2_get_matrix_coefficients(u32 coefficients)
+{
+ switch (coefficients) {
+ case V4L2_YCBCR_ENC_DEFAULT:
+ return HFI_MATRIX_COEFF_RESERVED;
+ case V4L2_YCBCR_ENC_709:
+ return HFI_MATRIX_COEFF_BT709;
+ case V4L2_YCBCR_ENC_XV709:
+ return HFI_MATRIX_COEFF_BT709;
+ case V4L2_YCBCR_ENC_XV601:
+ return HFI_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625;
+ case V4L2_YCBCR_ENC_601:
+ return HFI_MATRIX_COEFF_BT601_525_BT1358_525_OR_625;
+ case V4L2_YCBCR_ENC_SMPTE240M:
+ return HFI_MATRIX_COEFF_SMPTE_ST240;
+ case V4L2_YCBCR_ENC_BT2020:
+ return HFI_MATRIX_COEFF_BT2020_NON_CONSTANT;
+ case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
+ return HFI_MATRIX_COEFF_BT2020_CONSTANT;
+ default:
+ return HFI_MATRIX_COEFF_RESERVED;
+ }
+}
+
+u32 iris_hfi_gen2_get_color_info(u32 matrix_coeff, u32 transfer_char, u32 primaries,
+ u32 colour_description_present_flag, u32 full_range,
+ u32 video_format, u32 video_signal_type_present_flag)
+{
+ return (matrix_coeff & 0xFF) |
+ ((transfer_char << 8) & 0xFF00) |
+ ((primaries << 16) & 0xFF0000) |
+ ((colour_description_present_flag << 24) & 0x1000000) |
+ ((full_range << 25) & 0x2000000) |
+ ((video_format << 26) & 0x1C000000) |
+ ((video_signal_type_present_flag << 29) & 0x20000000);
+}
+
+static void iris_hfi_gen2_create_header(struct iris_hfi_header *hdr,
+ u32 session_id, u32 header_id)
+{
+ memset(hdr, 0, sizeof(*hdr));
+
+ hdr->size = sizeof(*hdr);
+ hdr->session_id = session_id;
+ hdr->header_id = header_id;
+ hdr->num_packets = 0;
+}
+
+static void iris_hfi_gen2_create_packet(struct iris_hfi_header *hdr, u32 pkt_type,
+ u32 pkt_flags, u32 payload_type, u32 port,
+ u32 packet_id, void *payload, u32 payload_size)
+{
+ struct iris_hfi_packet *pkt = (struct iris_hfi_packet *)((u8 *)hdr + hdr->size);
+ u32 pkt_size = sizeof(*pkt) + payload_size;
+
+ memset(pkt, 0, pkt_size);
+ pkt->size = pkt_size;
+ pkt->type = pkt_type;
+ pkt->flags = pkt_flags;
+ pkt->payload_info = payload_type;
+ pkt->port = port;
+ pkt->packet_id = packet_id;
+ if (payload_size)
+ memcpy(&pkt->payload[0], payload, payload_size);
+
+ hdr->num_packets++;
+ hdr->size += pkt->size;
+}
+
+void iris_hfi_gen2_packet_sys_init(struct iris_core *core, struct iris_hfi_header *hdr)
+{
+ u32 payload = 0;
+
+ iris_hfi_gen2_create_header(hdr, 0, core->header_id++);
+
+ payload = HFI_VIDEO_ARCH_LX;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_CMD_INIT,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED |
+ HFI_HOST_FLAGS_NON_DISCARDABLE),
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->max_channels;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_MAX_CHANNELS,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->mal_length;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_MAL_LENGTH,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->highest_bank_bit;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_HBB,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->bank_swzl_level;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_BANK_SWZL_LEVEL1,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->bank_swz2_level;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_BANK_SWZL_LEVEL2,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->bank_swz3_level;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_BANK_SWZL_LEVEL3,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->bank_spreading;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_BANK_SPREADING,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+}
+
+void iris_hfi_gen2_packet_image_version(struct iris_core *core, struct iris_hfi_header *hdr)
+{
+ iris_hfi_gen2_create_header(hdr, 0, core->header_id++);
+
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_IMAGE_VERSION,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED |
+ HFI_HOST_FLAGS_GET_PROPERTY),
+ HFI_PAYLOAD_NONE,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ NULL, 0);
+}
+
+void iris_hfi_gen2_packet_session_command(struct iris_inst *inst, u32 pkt_type,
+ u32 flags, u32 port, u32 session_id,
+ u32 payload_type, void *payload,
+ u32 payload_size)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct iris_core *core = inst->core;
+
+ iris_hfi_gen2_create_header(inst_hfi_gen2->packet, session_id, core->header_id++);
+
+ iris_hfi_gen2_create_packet(inst_hfi_gen2->packet,
+ pkt_type,
+ flags,
+ payload_type,
+ port,
+ core->packet_id++,
+ payload,
+ payload_size);
+}
+
+void iris_hfi_gen2_packet_session_property(struct iris_inst *inst,
+ u32 pkt_type, u32 flags, u32 port,
+ u32 payload_type, void *payload, u32 payload_size)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct iris_core *core = inst->core;
+
+ iris_hfi_gen2_create_header(inst_hfi_gen2->packet, inst->session_id, core->header_id++);
+
+ iris_hfi_gen2_create_packet(inst_hfi_gen2->packet,
+ pkt_type,
+ flags,
+ payload_type,
+ port,
+ core->packet_id++,
+ payload,
+ payload_size);
+}
+
+void iris_hfi_gen2_packet_sys_interframe_powercollapse(struct iris_core *core,
+ struct iris_hfi_header *hdr)
+{
+ u32 payload = 1; /* HFI_TRUE */
+
+ iris_hfi_gen2_create_header(hdr, 0 /*session_id*/, core->header_id++);
+
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_INTRA_FRAME_POWER_COLLAPSE,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+}
+
+void iris_hfi_gen2_packet_sys_pc_prep(struct iris_core *core, struct iris_hfi_header *hdr)
+{
+ iris_hfi_gen2_create_header(hdr, 0 /*session_id*/, core->header_id++);
+
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_CMD_POWER_COLLAPSE,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_NONE,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ NULL, 0);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.h b/drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.h
new file mode 100644
index 000000000000..25b9582349ca
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_GEN2_PACKET_H__
+#define __IRIS_HFI_GEN2_PACKET_H__
+
+#include "iris_hfi_gen2_defines.h"
+
+struct iris_core;
+
+/**
+ * struct iris_hfi_header
+ *
+ * @size: size of the total packet in bytes including hfi_header
+ * @session_id: For session level hfi_header session_id is non-zero.
+ * For system level hfi_header session_id is zero.
+ * @header_id: unique header id for each hfi_header
+ * @reserved: reserved for future use
+ * @num_packets: number of hfi_packet that are included with the hfi_header
+ */
+struct iris_hfi_header {
+ u32 size;
+ u32 session_id;
+ u32 header_id;
+ u32 reserved[4];
+ u32 num_packets;
+};
+
+/**
+ * struct iris_hfi_packet
+ *
+ * @size: size of the hfi_packet in bytes including payload
+ * @type: one of the below hfi_packet types:
+ * HFI_CMD_*,
+ * HFI_PROP_*,
+ * HFI_ERROR_*,
+ * HFI_INFO_*,
+ * HFI_SYS_ERROR_*
+ * @flags: hfi_packet flags. It is represented as bit masks.
+ * host packet flags are "enum hfi_packet_host_flags"
+ * firmware packet flags are "enum hfi_packet_firmware_flags"
+ * @payload_info: payload information indicated by "enum hfi_packet_payload_info"
+ * @port: hfi_packet port type indicated by "enum hfi_packet_port_type"
+ * This is bitmask and may be applicable to multiple ports.
+ * @packet_id: host hfi_packet contains unique packet id.
+ * firmware returns host packet id in response packet
+ * wherever applicable. If not applicable firmware sets it to zero.
+ * @reserved: reserved for future use.
+ * @payload: flexible array of payload having additional packet information.
+ */
+struct iris_hfi_packet {
+ u32 size;
+ u32 type;
+ u32 flags;
+ u32 payload_info;
+ u32 port;
+ u32 packet_id;
+ u32 reserved[2];
+ u32 payload[];
+};
+
+/**
+ * struct iris_hfi_buffer
+ *
+ * @type: buffer type indicated by "enum hfi_buffer_type"
+ * FW needs to return proper type for any buffer command.
+ * @index: index of the buffer
+ * @base_address: base address of the buffer.
+ * This buffer address is always 4KBytes aligned.
+ * @addr_offset: accessible buffer offset from base address
+ * Decoder bitstream buffer: 256 Bytes aligned
+ * Firmware can uniquely identify a buffer based on
+ * base_address & addr_offset.
+ * HW can read memory only from base_address+addr_offset.
+ * @buffer_size: accessible buffer size in bytes starting from addr_offset
+ * @data_offset: data starts from "base_address + addr_offset + data_offset"
+ * RAW buffer: data_offset is 0. Restriction: 4KBytes aligned
+ * decoder bitstream buffer: no restriction (can be any value)
+ * @data_size: data size in bytes
+ * @flags: buffer flags. It is represented as bit masks.
+ * host buffer flags are "enum hfi_buffer_host_flags"
+ * firmware buffer flags are "enum hfi_buffer_firmware_flags"
+ * @timestamp: timestamp of the buffer in nano seconds (ns)
+ * It is Presentation timestamp (PTS) for encoder & decoder.
+ * Decoder: it is pass through from bitstream to raw buffer.
+ * firmware does not need to return as part of input buffer done.
+ * For any internal buffers: there is no timestamp. Host sets as 0.
+ * @reserved: reserved for future use
+ */
+struct iris_hfi_buffer {
+ u32 type;
+ u32 index;
+ u64 base_address;
+ u32 addr_offset;
+ u32 buffer_size;
+ u32 data_offset;
+ u32 data_size;
+ u64 timestamp;
+ u32 flags;
+ u32 reserved[5];
+};
+
+u32 iris_hfi_gen2_get_color_primaries(u32 primaries);
+u32 iris_hfi_gen2_get_transfer_char(u32 characterstics);
+u32 iris_hfi_gen2_get_matrix_coefficients(u32 coefficients);
+u32 iris_hfi_gen2_get_color_info(u32 matrix_coeff, u32 transfer_char, u32 primaries,
+ u32 colour_description_present_flag, u32 full_range,
+ u32 video_format, u32 video_signal_type_present_flag);
+
+void iris_hfi_gen2_packet_sys_init(struct iris_core *core, struct iris_hfi_header *hdr);
+void iris_hfi_gen2_packet_image_version(struct iris_core *core, struct iris_hfi_header *hdr);
+void iris_hfi_gen2_packet_session_command(struct iris_inst *inst, u32 pkt_type,
+ u32 flags, u32 port, u32 session_id,
+ u32 payload_type, void *payload,
+ u32 payload_size);
+void iris_hfi_gen2_packet_session_property(struct iris_inst *inst,
+ u32 pkt_type, u32 flags, u32 port,
+ u32 payload_type, void *payload, u32 payload_size);
+void iris_hfi_gen2_packet_sys_interframe_powercollapse(struct iris_core *core,
+ struct iris_hfi_header *hdr);
+void iris_hfi_gen2_packet_sys_pc_prep(struct iris_core *core, struct iris_hfi_header *hdr);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c b/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
new file mode 100644
index 000000000000..b75a01641d5d
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
@@ -0,0 +1,934 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_hfi_gen2.h"
+#include "iris_hfi_gen2_defines.h"
+#include "iris_hfi_gen2_packet.h"
+#include "iris_vdec.h"
+#include "iris_vpu_buffer.h"
+#include "iris_vpu_common.h"
+
+struct iris_hfi_gen2_core_hfi_range {
+ u32 begin;
+ u32 end;
+ int (*handle)(struct iris_core *core, struct iris_hfi_packet *pkt);
+};
+
+struct iris_hfi_gen2_inst_hfi_range {
+ u32 begin;
+ u32 end;
+ int (*handle)(struct iris_inst *inst, struct iris_hfi_packet *pkt);
+};
+
+struct iris_hfi_gen2_packet_handle {
+ enum hfi_buffer_type type;
+ int (*handle)(struct iris_inst *inst, struct iris_hfi_packet *pkt);
+};
+
+static u32 iris_hfi_gen2_buf_type_to_driver(enum hfi_buffer_type buf_type)
+{
+ switch (buf_type) {
+ case HFI_BUFFER_BITSTREAM:
+ return BUF_INPUT;
+ case HFI_BUFFER_RAW:
+ return BUF_OUTPUT;
+ case HFI_BUFFER_BIN:
+ return BUF_BIN;
+ case HFI_BUFFER_ARP:
+ return BUF_ARP;
+ case HFI_BUFFER_COMV:
+ return BUF_COMV;
+ case HFI_BUFFER_NON_COMV:
+ return BUF_NON_COMV;
+ case HFI_BUFFER_LINE:
+ return BUF_LINE;
+ case HFI_BUFFER_DPB:
+ return BUF_DPB;
+ case HFI_BUFFER_PERSIST:
+ return BUF_PERSIST;
+ default:
+ return 0;
+ }
+}
+
+static bool iris_hfi_gen2_is_valid_hfi_buffer_type(u32 buffer_type)
+{
+ switch (buffer_type) {
+ case HFI_BUFFER_BITSTREAM:
+ case HFI_BUFFER_RAW:
+ case HFI_BUFFER_BIN:
+ case HFI_BUFFER_ARP:
+ case HFI_BUFFER_COMV:
+ case HFI_BUFFER_NON_COMV:
+ case HFI_BUFFER_LINE:
+ case HFI_BUFFER_DPB:
+ case HFI_BUFFER_PERSIST:
+ case HFI_BUFFER_VPSS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool iris_hfi_gen2_is_valid_hfi_port(u32 port, u32 buffer_type)
+{
+ if (port == HFI_PORT_NONE && buffer_type != HFI_BUFFER_PERSIST)
+ return false;
+
+ if (port != HFI_PORT_BITSTREAM && port != HFI_PORT_RAW)
+ return false;
+
+ return true;
+}
+
+static int iris_hfi_gen2_get_driver_buffer_flags(struct iris_inst *inst, u32 hfi_flags)
+{
+ u32 keyframe = HFI_PICTURE_IDR | HFI_PICTURE_I | HFI_PICTURE_CRA | HFI_PICTURE_BLA;
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 driver_flags = 0;
+
+ if (inst_hfi_gen2->hfi_frame_info.picture_type & keyframe)
+ driver_flags |= V4L2_BUF_FLAG_KEYFRAME;
+ else if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_PICTURE_P)
+ driver_flags |= V4L2_BUF_FLAG_PFRAME;
+ else if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_PICTURE_B)
+ driver_flags |= V4L2_BUF_FLAG_BFRAME;
+
+ if (inst_hfi_gen2->hfi_frame_info.data_corrupt || inst_hfi_gen2->hfi_frame_info.overflow)
+ driver_flags |= V4L2_BUF_FLAG_ERROR;
+
+ if (hfi_flags & HFI_BUF_FW_FLAG_LAST ||
+ hfi_flags & HFI_BUF_FW_FLAG_PSC_LAST)
+ driver_flags |= V4L2_BUF_FLAG_LAST;
+
+ return driver_flags;
+}
+
+static bool iris_hfi_gen2_validate_packet_payload(struct iris_hfi_packet *pkt)
+{
+ u32 payload_size = 0;
+
+ switch (pkt->payload_info) {
+ case HFI_PAYLOAD_U32:
+ case HFI_PAYLOAD_S32:
+ case HFI_PAYLOAD_Q16:
+ case HFI_PAYLOAD_U32_ENUM:
+ case HFI_PAYLOAD_32_PACKED:
+ payload_size = 4;
+ break;
+ case HFI_PAYLOAD_U64:
+ case HFI_PAYLOAD_S64:
+ case HFI_PAYLOAD_64_PACKED:
+ payload_size = 8;
+ break;
+ case HFI_PAYLOAD_STRUCTURE:
+ if (pkt->type == HFI_CMD_BUFFER)
+ payload_size = sizeof(struct iris_hfi_buffer);
+ break;
+ default:
+ payload_size = 0;
+ break;
+ }
+
+ if (pkt->size < sizeof(struct iris_hfi_packet) + payload_size)
+ return false;
+
+ return true;
+}
+
+static int iris_hfi_gen2_validate_packet(u8 *response_pkt, u8 *core_resp_pkt)
+{
+ u8 *response_limit = core_resp_pkt + IFACEQ_CORE_PKT_SIZE;
+ u32 response_pkt_size = *(u32 *)response_pkt;
+
+ if (!response_pkt_size)
+ return -EINVAL;
+
+ if (response_pkt_size < sizeof(struct iris_hfi_packet))
+ return -EINVAL;
+
+ if (response_pkt + response_pkt_size > response_limit)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int iris_hfi_gen2_validate_hdr_packet(struct iris_core *core, struct iris_hfi_header *hdr)
+{
+ struct iris_hfi_packet *packet;
+ int ret;
+ u8 *pkt;
+ u32 i;
+
+ if (hdr->size < sizeof(*hdr) + sizeof(*packet))
+ return -EINVAL;
+
+ pkt = (u8 *)((u8 *)hdr + sizeof(*hdr));
+
+ for (i = 0; i < hdr->num_packets; i++) {
+ packet = (struct iris_hfi_packet *)pkt;
+ ret = iris_hfi_gen2_validate_packet(pkt, core->response_packet);
+ if (ret)
+ return ret;
+
+ pkt += packet->size;
+ }
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_session_info(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct iris_core *core = inst->core;
+ int ret = 0;
+ char *info;
+
+ switch (pkt->type) {
+ case HFI_INFO_UNSUPPORTED:
+ info = "unsupported";
+ break;
+ case HFI_INFO_DATA_CORRUPT:
+ info = "data corrupt";
+ inst_hfi_gen2->hfi_frame_info.data_corrupt = 1;
+ break;
+ case HFI_INFO_BUFFER_OVERFLOW:
+ info = "buffer overflow";
+ inst_hfi_gen2->hfi_frame_info.overflow = 1;
+ break;
+ case HFI_INFO_HFI_FLAG_DRAIN_LAST:
+ info = "drain last flag";
+ ret = iris_inst_sub_state_change_drain_last(inst);
+ break;
+ case HFI_INFO_HFI_FLAG_PSC_LAST:
+ info = "drc last flag";
+ ret = iris_inst_sub_state_change_drc_last(inst);
+ break;
+ default:
+ info = "unknown";
+ break;
+ }
+
+ dev_dbg(core->dev, "session info received %#x: %s\n",
+ pkt->type, info);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_handle_session_error(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ struct iris_core *core = inst->core;
+ char *error;
+
+ switch (pkt->type) {
+ case HFI_ERROR_MAX_SESSIONS:
+ error = "exceeded max sessions";
+ break;
+ case HFI_ERROR_UNKNOWN_SESSION:
+ error = "unknown session id";
+ break;
+ case HFI_ERROR_INVALID_STATE:
+ error = "invalid operation for current state";
+ break;
+ case HFI_ERROR_INSUFFICIENT_RESOURCES:
+ error = "insufficient resources";
+ break;
+ case HFI_ERROR_BUFFER_NOT_SET:
+ error = "internal buffers not set";
+ break;
+ case HFI_ERROR_FATAL:
+ error = "fatal error";
+ break;
+ case HFI_ERROR_STREAM_UNSUPPORTED:
+ error = "unsupported stream";
+ break;
+ default:
+ error = "unknown";
+ break;
+ }
+
+ dev_err(core->dev, "session error received %#x: %s\n", pkt->type, error);
+ iris_vb2_queue_error(inst);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_system_error(struct iris_core *core,
+ struct iris_hfi_packet *pkt)
+{
+ struct iris_inst *instance;
+
+ dev_err(core->dev, "received system error of type %#x\n", pkt->type);
+
+ core->state = IRIS_CORE_ERROR;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list)
+ iris_inst_change_state(instance, IRIS_INST_ERROR);
+ mutex_unlock(&core->lock);
+
+ schedule_delayed_work(&core->sys_error_handler, msecs_to_jiffies(10));
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_system_init(struct iris_core *core,
+ struct iris_hfi_packet *pkt)
+{
+ if (!(pkt->flags & HFI_FW_FLAGS_SUCCESS)) {
+ core->state = IRIS_CORE_ERROR;
+ return 0;
+ }
+
+ complete(&core->core_init_done);
+
+ return 0;
+}
+
+static void iris_hfi_gen2_handle_session_close(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ if (!(pkt->flags & HFI_FW_FLAGS_SUCCESS)) {
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return;
+ }
+
+ complete(&inst->completion);
+}
+
+static int iris_hfi_gen2_handle_input_buffer(struct iris_inst *inst,
+ struct iris_hfi_buffer *buffer)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *m2m_buffer, *n;
+ struct iris_buffer *buf;
+ bool found = false;
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, m2m_buffer, n) {
+ buf = to_iris_buffer(&m2m_buffer->vb);
+ if (buf->index == buffer->index) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ if (!(buf->attr & BUF_ATTR_QUEUED))
+ return -EINVAL;
+
+ buf->attr &= ~BUF_ATTR_QUEUED;
+ buf->attr |= BUF_ATTR_DEQUEUED;
+
+ buf->flags = iris_hfi_gen2_get_driver_buffer_flags(inst, buffer->flags);
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_output_buffer(struct iris_inst *inst,
+ struct iris_hfi_buffer *hfi_buffer)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *m2m_buffer, *n;
+ struct iris_buffer *buf;
+ bool found = false;
+ int ret;
+
+ if (hfi_buffer->flags & HFI_BUF_FW_FLAG_LAST) {
+ ret = iris_inst_sub_state_change_drain_last(inst);
+ if (ret)
+ return ret;
+ }
+
+ if (hfi_buffer->flags & HFI_BUF_FW_FLAG_PSC_LAST) {
+ ret = iris_inst_sub_state_change_drc_last(inst);
+ if (ret)
+ return ret;
+ }
+
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, m2m_buffer, n) {
+ buf = to_iris_buffer(&m2m_buffer->vb);
+ if (buf->index == hfi_buffer->index &&
+ buf->device_addr == hfi_buffer->base_address &&
+ buf->data_offset == hfi_buffer->data_offset) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ if (!(buf->attr & BUF_ATTR_QUEUED))
+ return -EINVAL;
+
+ buf->data_offset = hfi_buffer->data_offset;
+ buf->data_size = hfi_buffer->data_size;
+ buf->timestamp = hfi_buffer->timestamp;
+
+ buf->attr &= ~BUF_ATTR_QUEUED;
+ buf->attr |= BUF_ATTR_DEQUEUED;
+
+ buf->flags = iris_hfi_gen2_get_driver_buffer_flags(inst, hfi_buffer->flags);
+
+ return 0;
+}
+
+static void iris_hfi_gen2_handle_dequeue_buffers(struct iris_inst *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buffer, *n;
+ struct iris_buffer *buf = NULL;
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (buf->attr & BUF_ATTR_DEQUEUED) {
+ buf->attr &= ~BUF_ATTR_DEQUEUED;
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ iris_vb2_buffer_done(inst, buf);
+ }
+ }
+ }
+
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (buf->attr & BUF_ATTR_DEQUEUED) {
+ buf->attr &= ~BUF_ATTR_DEQUEUED;
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ iris_vb2_buffer_done(inst, buf);
+ }
+ }
+ }
+}
+
+static int iris_hfi_gen2_handle_release_internal_buffer(struct iris_inst *inst,
+ struct iris_hfi_buffer *buffer)
+{
+ u32 buf_type = iris_hfi_gen2_buf_type_to_driver(buffer->type);
+ struct iris_buffers *buffers = &inst->buffers[buf_type];
+ struct iris_buffer *buf, *iter;
+ bool found = false;
+ int ret = 0;
+
+ list_for_each_entry(iter, &buffers->list, list) {
+ if (iter->device_addr == buffer->base_address) {
+ found = true;
+ buf = iter;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ buf->attr &= ~BUF_ATTR_QUEUED;
+ if (buf->attr & BUF_ATTR_PENDING_RELEASE)
+ ret = iris_destroy_internal_buffer(inst, buf);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_handle_session_stop(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ int ret = 0;
+
+ if (pkt->port == HFI_PORT_RAW)
+ ret = iris_inst_sub_state_change_pause(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ else if (pkt->port == HFI_PORT_BITSTREAM)
+ ret = iris_inst_sub_state_change_pause(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ complete(&inst->completion);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_handle_session_buffer(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ struct iris_hfi_buffer *buffer;
+
+ if (pkt->payload_info == HFI_PAYLOAD_NONE)
+ return 0;
+
+ if (!iris_hfi_gen2_validate_packet_payload(pkt)) {
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return 0;
+ }
+
+ buffer = (struct iris_hfi_buffer *)((u8 *)pkt + sizeof(*pkt));
+ if (!iris_hfi_gen2_is_valid_hfi_buffer_type(buffer->type))
+ return 0;
+
+ if (!iris_hfi_gen2_is_valid_hfi_port(pkt->port, buffer->type))
+ return 0;
+
+ if (buffer->type == HFI_BUFFER_BITSTREAM)
+ return iris_hfi_gen2_handle_input_buffer(inst, buffer);
+ else if (buffer->type == HFI_BUFFER_RAW)
+ return iris_hfi_gen2_handle_output_buffer(inst, buffer);
+ else
+ return iris_hfi_gen2_handle_release_internal_buffer(inst, buffer);
+}
+
+static int iris_hfi_gen2_handle_session_drain(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ int ret = 0;
+
+ if (!(pkt->flags & HFI_FW_FLAGS_SUCCESS)) {
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return 0;
+ }
+
+ if (inst->sub_state & IRIS_INST_SUB_DRAIN)
+ ret = iris_inst_change_sub_state(inst, 0, IRIS_INST_SUB_INPUT_PAUSE);
+
+ return ret;
+}
+
+static void iris_hfi_gen2_read_input_subcr_params(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct v4l2_pix_format_mplane *pixmp_ip = &inst->fmt_src->fmt.pix_mp;
+ struct v4l2_pix_format_mplane *pixmp_op = &inst->fmt_dst->fmt.pix_mp;
+ u32 primaries, matrix_coeff, transfer_char;
+ struct hfi_subscription_params subsc_params;
+ u32 colour_description_present_flag;
+ u32 video_signal_type_present_flag;
+ struct iris_core *core = inst->core;
+ u32 full_range, width, height;
+ struct vb2_queue *dst_q;
+ struct v4l2_ctrl *ctrl;
+
+ subsc_params = inst_hfi_gen2->src_subcr_params;
+ width = (subsc_params.bitstream_resolution &
+ HFI_BITMASK_BITSTREAM_WIDTH) >> 16;
+ height = subsc_params.bitstream_resolution &
+ HFI_BITMASK_BITSTREAM_HEIGHT;
+
+ pixmp_ip->width = width;
+ pixmp_ip->height = height;
+
+ pixmp_op->width = ALIGN(width, 128);
+ pixmp_op->height = ALIGN(height, 32);
+ pixmp_op->plane_fmt[0].bytesperline = ALIGN(width, 128);
+ pixmp_op->plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_OUTPUT);
+
+ matrix_coeff = subsc_params.color_info & 0xFF;
+ transfer_char = (subsc_params.color_info & 0xFF00) >> 8;
+ primaries = (subsc_params.color_info & 0xFF0000) >> 16;
+ colour_description_present_flag =
+ (subsc_params.color_info & 0x1000000) >> 24;
+ full_range = (subsc_params.color_info & 0x2000000) >> 25;
+ video_signal_type_present_flag =
+ (subsc_params.color_info & 0x20000000) >> 29;
+
+ pixmp_op->colorspace = V4L2_COLORSPACE_DEFAULT;
+ pixmp_op->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pixmp_op->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pixmp_op->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ if (video_signal_type_present_flag) {
+ pixmp_op->quantization =
+ full_range ?
+ V4L2_QUANTIZATION_FULL_RANGE :
+ V4L2_QUANTIZATION_LIM_RANGE;
+ if (colour_description_present_flag) {
+ pixmp_op->colorspace =
+ iris_hfi_get_v4l2_color_primaries(primaries);
+ pixmp_op->xfer_func =
+ iris_hfi_get_v4l2_transfer_char(transfer_char);
+ pixmp_op->ycbcr_enc =
+ iris_hfi_get_v4l2_matrix_coefficients(matrix_coeff);
+ }
+ }
+
+ pixmp_ip->colorspace = pixmp_op->colorspace;
+ pixmp_ip->xfer_func = pixmp_op->xfer_func;
+ pixmp_ip->ycbcr_enc = pixmp_op->ycbcr_enc;
+ pixmp_ip->quantization = pixmp_op->quantization;
+
+ inst->crop.top = subsc_params.crop_offsets[0] & 0xFFFF;
+ inst->crop.left = (subsc_params.crop_offsets[0] >> 16) & 0xFFFF;
+ inst->crop.height = pixmp_ip->height -
+ (subsc_params.crop_offsets[1] & 0xFFFF) - inst->crop.top;
+ inst->crop.width = pixmp_ip->width -
+ ((subsc_params.crop_offsets[1] >> 16) & 0xFFFF) - inst->crop.left;
+
+ inst->fw_caps[PROFILE].value = subsc_params.profile;
+ inst->fw_caps[LEVEL].value = subsc_params.level;
+ inst->fw_caps[POC].value = subsc_params.pic_order_cnt;
+
+ if (subsc_params.bit_depth != BIT_DEPTH_8 ||
+ !(subsc_params.coded_frames & HFI_BITMASK_FRAME_MBS_ONLY_FLAG)) {
+ dev_err(core->dev, "unsupported content, bit depth: %x, pic_struct = %x\n",
+ subsc_params.bit_depth, subsc_params.coded_frames);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ }
+
+ inst->fw_min_count = subsc_params.fw_min_count;
+ inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].size = pixmp_op->plane_fmt[0].sizeimage;
+ ctrl = v4l2_ctrl_find(&inst->ctrl_handler, V4L2_CID_MIN_BUFFERS_FOR_CAPTURE);
+ if (ctrl)
+ v4l2_ctrl_s_ctrl(ctrl, inst->buffers[BUF_OUTPUT].min_count);
+
+ dst_q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+ dst_q->min_reqbufs_allocation = inst->buffers[BUF_OUTPUT].min_count;
+}
+
+static int iris_hfi_gen2_handle_src_change(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ int ret;
+
+ if (pkt->port != HFI_PORT_BITSTREAM)
+ return 0;
+
+ ret = iris_inst_sub_state_change_drc(inst);
+ if (ret)
+ return ret;
+
+ iris_hfi_gen2_read_input_subcr_params(inst);
+ iris_vdec_src_change(inst);
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_session_command(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ int ret = 0;
+
+ switch (pkt->type) {
+ case HFI_CMD_CLOSE:
+ iris_hfi_gen2_handle_session_close(inst, pkt);
+ break;
+ case HFI_CMD_STOP:
+ iris_hfi_gen2_handle_session_stop(inst, pkt);
+ break;
+ case HFI_CMD_BUFFER:
+ ret = iris_hfi_gen2_handle_session_buffer(inst, pkt);
+ break;
+ case HFI_CMD_SETTINGS_CHANGE:
+ ret = iris_hfi_gen2_handle_src_change(inst, pkt);
+ break;
+ case HFI_CMD_DRAIN:
+ ret = iris_hfi_gen2_handle_session_drain(inst, pkt);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int iris_hfi_gen2_handle_session_property(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+
+ if (pkt->port != HFI_PORT_BITSTREAM)
+ return 0;
+
+ if (pkt->flags & HFI_FW_FLAGS_INFORMATION)
+ return 0;
+
+ switch (pkt->type) {
+ case HFI_PROP_BITSTREAM_RESOLUTION:
+ inst_hfi_gen2->src_subcr_params.bitstream_resolution = pkt->payload[0];
+ break;
+ case HFI_PROP_CROP_OFFSETS:
+ inst_hfi_gen2->src_subcr_params.crop_offsets[0] = pkt->payload[0];
+ inst_hfi_gen2->src_subcr_params.crop_offsets[1] = pkt->payload[1];
+ break;
+ case HFI_PROP_CODED_FRAMES:
+ inst_hfi_gen2->src_subcr_params.coded_frames = pkt->payload[0];
+ break;
+ case HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT:
+ inst_hfi_gen2->src_subcr_params.fw_min_count = pkt->payload[0];
+ break;
+ case HFI_PROP_PIC_ORDER_CNT_TYPE:
+ inst_hfi_gen2->src_subcr_params.pic_order_cnt = pkt->payload[0];
+ break;
+ case HFI_PROP_SIGNAL_COLOR_INFO:
+ inst_hfi_gen2->src_subcr_params.color_info = pkt->payload[0];
+ break;
+ case HFI_PROP_PROFILE:
+ inst_hfi_gen2->src_subcr_params.profile = pkt->payload[0];
+ break;
+ case HFI_PROP_LEVEL:
+ inst_hfi_gen2->src_subcr_params.level = pkt->payload[0];
+ break;
+ case HFI_PROP_PICTURE_TYPE:
+ inst_hfi_gen2->hfi_frame_info.picture_type = pkt->payload[0];
+ break;
+ case HFI_PROP_NO_OUTPUT:
+ inst_hfi_gen2->hfi_frame_info.no_output = 1;
+ break;
+ case HFI_PROP_QUALITY_MODE:
+ case HFI_PROP_STAGE:
+ case HFI_PROP_PIPE:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_image_version_property(struct iris_core *core,
+ struct iris_hfi_packet *pkt)
+{
+ u8 *str_image_version = (u8 *)pkt + sizeof(*pkt);
+ u32 req_bytes = pkt->size - sizeof(*pkt);
+ char fw_version[IRIS_FW_VERSION_LENGTH];
+ u32 i;
+
+ if (req_bytes < IRIS_FW_VERSION_LENGTH - 1)
+ return -EINVAL;
+
+ for (i = 0; i < IRIS_FW_VERSION_LENGTH - 1; i++) {
+ if (str_image_version[i] != '\0')
+ fw_version[i] = str_image_version[i];
+ else
+ fw_version[i] = ' ';
+ }
+ fw_version[i] = '\0';
+ dev_dbg(core->dev, "firmware version: %s\n", fw_version);
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_system_property(struct iris_core *core,
+ struct iris_hfi_packet *pkt)
+{
+ switch (pkt->type) {
+ case HFI_PROP_IMAGE_VERSION:
+ return iris_hfi_gen2_handle_image_version_property(core, pkt);
+ default:
+ return 0;
+ }
+}
+
+static int iris_hfi_gen2_handle_system_response(struct iris_core *core,
+ struct iris_hfi_header *hdr)
+{
+ u8 *start_pkt = (u8 *)((u8 *)hdr + sizeof(*hdr));
+ struct iris_hfi_packet *packet;
+ u32 i, j;
+ u8 *pkt;
+ int ret;
+ static const struct iris_hfi_gen2_core_hfi_range range[] = {
+ {HFI_SYSTEM_ERROR_BEGIN, HFI_SYSTEM_ERROR_END, iris_hfi_gen2_handle_system_error },
+ {HFI_PROP_BEGIN, HFI_PROP_END, iris_hfi_gen2_handle_system_property },
+ {HFI_CMD_BEGIN, HFI_CMD_END, iris_hfi_gen2_handle_system_init },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(range); i++) {
+ pkt = start_pkt;
+ for (j = 0; j < hdr->num_packets; j++) {
+ packet = (struct iris_hfi_packet *)pkt;
+ if (packet->flags & HFI_FW_FLAGS_SYSTEM_ERROR) {
+ ret = iris_hfi_gen2_handle_system_error(core, packet);
+ return ret;
+ }
+
+ if (packet->type > range[i].begin && packet->type < range[i].end) {
+ ret = range[i].handle(core, packet);
+ if (ret)
+ return ret;
+
+ if (packet->type > HFI_SYSTEM_ERROR_BEGIN &&
+ packet->type < HFI_SYSTEM_ERROR_END)
+ return 0;
+ }
+ pkt += packet->size;
+ }
+ }
+
+ return 0;
+}
+
+static void iris_hfi_gen2_init_src_change_param(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct v4l2_pix_format_mplane *pixmp_ip = &inst->fmt_src->fmt.pix_mp;
+ struct v4l2_pix_format_mplane *pixmp_op = &inst->fmt_dst->fmt.pix_mp;
+ u32 bottom_offset = (pixmp_ip->height - inst->crop.height);
+ u32 right_offset = (pixmp_ip->width - inst->crop.width);
+ struct hfi_subscription_params *subsc_params;
+ u32 primaries, matrix_coeff, transfer_char;
+ u32 colour_description_present_flag = 0;
+ u32 video_signal_type_present_flag = 0;
+ u32 full_range, video_format = 0;
+ u32 left_offset = inst->crop.left;
+ u32 top_offset = inst->crop.top;
+
+ subsc_params = &inst_hfi_gen2->src_subcr_params;
+ subsc_params->bitstream_resolution =
+ pixmp_ip->width << 16 | pixmp_ip->height;
+ subsc_params->crop_offsets[0] =
+ left_offset << 16 | top_offset;
+ subsc_params->crop_offsets[1] =
+ right_offset << 16 | bottom_offset;
+ subsc_params->fw_min_count = inst->buffers[BUF_OUTPUT].min_count;
+
+ primaries = iris_hfi_gen2_get_color_primaries(pixmp_op->colorspace);
+ matrix_coeff = iris_hfi_gen2_get_matrix_coefficients(pixmp_op->ycbcr_enc);
+ transfer_char = iris_hfi_gen2_get_transfer_char(pixmp_op->xfer_func);
+ full_range = pixmp_op->quantization == V4L2_QUANTIZATION_FULL_RANGE ? 1 : 0;
+ subsc_params->color_info =
+ iris_hfi_gen2_get_color_info(matrix_coeff, transfer_char, primaries,
+ colour_description_present_flag,
+ full_range, video_format,
+ video_signal_type_present_flag);
+
+ subsc_params->profile = inst->fw_caps[PROFILE].value;
+ subsc_params->level = inst->fw_caps[LEVEL].value;
+ subsc_params->pic_order_cnt = inst->fw_caps[POC].value;
+ subsc_params->bit_depth = inst->fw_caps[BIT_DEPTH].value;
+ if (inst->fw_caps[CODED_FRAMES].value ==
+ CODED_FRAMES_PROGRESSIVE)
+ subsc_params->coded_frames = HFI_BITMASK_FRAME_MBS_ONLY_FLAG;
+ else
+ subsc_params->coded_frames = 0;
+}
+
+static int iris_hfi_gen2_handle_session_response(struct iris_core *core,
+ struct iris_hfi_header *hdr)
+{
+ u8 *pkt = (u8 *)((u8 *)hdr + sizeof(*hdr));
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2;
+ struct iris_hfi_packet *packet;
+ struct iris_inst *inst;
+ bool dequeue = false;
+ int ret = 0;
+ u32 i, j;
+ static const struct iris_hfi_gen2_inst_hfi_range range[] = {
+ {HFI_SESSION_ERROR_BEGIN, HFI_SESSION_ERROR_END,
+ iris_hfi_gen2_handle_session_error},
+ {HFI_INFORMATION_BEGIN, HFI_INFORMATION_END,
+ iris_hfi_gen2_handle_session_info},
+ {HFI_PROP_BEGIN, HFI_PROP_END,
+ iris_hfi_gen2_handle_session_property},
+ {HFI_CMD_BEGIN, HFI_CMD_END,
+ iris_hfi_gen2_handle_session_command },
+ };
+
+ inst = iris_get_instance(core, hdr->session_id);
+ if (!inst)
+ return -EINVAL;
+
+ mutex_lock(&inst->lock);
+ inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ memset(&inst_hfi_gen2->hfi_frame_info, 0, sizeof(struct iris_hfi_frame_info));
+
+ for (i = 0; i < hdr->num_packets; i++) {
+ packet = (struct iris_hfi_packet *)pkt;
+ if (packet->type == HFI_CMD_SETTINGS_CHANGE) {
+ if (packet->port == HFI_PORT_BITSTREAM) {
+ iris_hfi_gen2_init_src_change_param(inst);
+ break;
+ }
+ }
+ pkt += packet->size;
+ }
+
+ pkt = (u8 *)((u8 *)hdr + sizeof(*hdr));
+ for (i = 0; i < ARRAY_SIZE(range); i++) {
+ pkt = (u8 *)((u8 *)hdr + sizeof(*hdr));
+ for (j = 0; j < hdr->num_packets; j++) {
+ packet = (struct iris_hfi_packet *)pkt;
+ if (packet->flags & HFI_FW_FLAGS_SESSION_ERROR)
+ iris_hfi_gen2_handle_session_error(inst, packet);
+
+ if (packet->type > range[i].begin && packet->type < range[i].end) {
+ dequeue |= (packet->type == HFI_CMD_BUFFER);
+ ret = range[i].handle(inst, packet);
+ if (ret)
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ }
+ pkt += packet->size;
+ }
+ }
+
+ if (dequeue)
+ iris_hfi_gen2_handle_dequeue_buffers(inst);
+
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_handle_response(struct iris_core *core, void *response)
+{
+ struct iris_hfi_header *hdr = (struct iris_hfi_header *)response;
+ int ret;
+
+ ret = iris_hfi_gen2_validate_hdr_packet(core, hdr);
+ if (ret)
+ return iris_hfi_gen2_handle_system_error(core, NULL);
+
+ if (!hdr->session_id)
+ return iris_hfi_gen2_handle_system_response(core, hdr);
+ else
+ return iris_hfi_gen2_handle_session_response(core, hdr);
+}
+
+static void iris_hfi_gen2_flush_debug_queue(struct iris_core *core, u8 *packet)
+{
+ struct hfi_debug_header *pkt;
+ u8 *log;
+
+ while (!iris_hfi_queue_dbg_read(core, packet)) {
+ pkt = (struct hfi_debug_header *)packet;
+
+ if (pkt->size < sizeof(*pkt))
+ continue;
+
+ if (pkt->size >= IFACEQ_CORE_PKT_SIZE)
+ continue;
+
+ packet[pkt->size] = '\0';
+ log = (u8 *)packet + sizeof(*pkt) + 1;
+ dev_dbg(core->dev, "%s", log);
+ }
+}
+
+static void iris_hfi_gen2_response_handler(struct iris_core *core)
+{
+ if (iris_vpu_watchdog(core, core->intr_status)) {
+ struct iris_hfi_packet pkt = {.type = HFI_SYS_ERROR_WD_TIMEOUT};
+
+ dev_err(core->dev, "cpu watchdog error received\n");
+ core->state = IRIS_CORE_ERROR;
+ iris_hfi_gen2_handle_system_error(core, &pkt);
+
+ return;
+ }
+
+ memset(core->response_packet, 0, sizeof(struct iris_hfi_header));
+ while (!iris_hfi_queue_msg_read(core, core->response_packet)) {
+ iris_hfi_gen2_handle_response(core, core->response_packet);
+ memset(core->response_packet, 0, sizeof(struct iris_hfi_header));
+ }
+
+ iris_hfi_gen2_flush_debug_queue(core, core->response_packet);
+}
+
+static const struct iris_hfi_response_ops iris_hfi_gen2_response_ops = {
+ .hfi_response_handler = iris_hfi_gen2_response_handler,
+};
+
+void iris_hfi_gen2_response_ops_init(struct iris_core *core)
+{
+ core->hfi_response_ops = &iris_hfi_gen2_response_ops;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_queue.c b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
new file mode 100644
index 000000000000..fac7df0c4d1a
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "iris_core.h"
+#include "iris_hfi_queue.h"
+#include "iris_vpu_common.h"
+
+static int iris_hfi_queue_write(struct iris_iface_q_info *qinfo, void *packet, u32 packet_size)
+{
+ struct iris_hfi_queue_header *queue = qinfo->qhdr;
+ u32 write_idx = queue->write_idx * sizeof(u32);
+ u32 read_idx = queue->read_idx * sizeof(u32);
+ u32 empty_space, new_write_idx, residue;
+ u32 *write_ptr;
+
+ if (write_idx < read_idx)
+ empty_space = read_idx - write_idx;
+ else
+ empty_space = IFACEQ_QUEUE_SIZE - (write_idx - read_idx);
+ if (empty_space < packet_size)
+ return -ENOSPC;
+
+ queue->tx_req = 0;
+
+ new_write_idx = write_idx + packet_size;
+ write_ptr = (u32 *)((u8 *)qinfo->kernel_vaddr + write_idx);
+
+ if (write_ptr < (u32 *)qinfo->kernel_vaddr ||
+ write_ptr > (u32 *)(qinfo->kernel_vaddr +
+ IFACEQ_QUEUE_SIZE))
+ return -EINVAL;
+
+ if (new_write_idx < IFACEQ_QUEUE_SIZE) {
+ memcpy(write_ptr, packet, packet_size);
+ } else {
+ residue = new_write_idx - IFACEQ_QUEUE_SIZE;
+ memcpy(write_ptr, packet, (packet_size - residue));
+ memcpy(qinfo->kernel_vaddr,
+ packet + (packet_size - residue), residue);
+ new_write_idx = residue;
+ }
+
+ /* Make sure packet is written before updating the write index */
+ mb();
+ queue->write_idx = new_write_idx / sizeof(u32);
+
+ /* Make sure write index is updated before an interrupt is raised */
+ mb();
+
+ return 0;
+}
+
+static int iris_hfi_queue_read(struct iris_iface_q_info *qinfo, void *packet)
+{
+ struct iris_hfi_queue_header *queue = qinfo->qhdr;
+ u32 write_idx = queue->write_idx * sizeof(u32);
+ u32 read_idx = queue->read_idx * sizeof(u32);
+ u32 packet_size, receive_request = 0;
+ u32 new_read_idx, residue;
+ u32 *read_ptr;
+ int ret = 0;
+
+ if (queue->queue_type == IFACEQ_MSGQ_ID)
+ receive_request = 1;
+
+ if (read_idx == write_idx) {
+ queue->rx_req = receive_request;
+ /* Ensure qhdr is updated in main memory */
+ mb();
+ return -ENODATA;
+ }
+
+ read_ptr = qinfo->kernel_vaddr + read_idx;
+ if (read_ptr < (u32 *)qinfo->kernel_vaddr ||
+ read_ptr > (u32 *)(qinfo->kernel_vaddr +
+ IFACEQ_QUEUE_SIZE - sizeof(*read_ptr)))
+ return -ENODATA;
+
+ packet_size = *read_ptr;
+ if (!packet_size)
+ return -EINVAL;
+
+ new_read_idx = read_idx + packet_size;
+ if (packet_size <= IFACEQ_CORE_PKT_SIZE) {
+ if (new_read_idx < IFACEQ_QUEUE_SIZE) {
+ memcpy(packet, read_ptr, packet_size);
+ } else {
+ residue = new_read_idx - IFACEQ_QUEUE_SIZE;
+ memcpy(packet, read_ptr, (packet_size - residue));
+ memcpy((packet + (packet_size - residue)),
+ qinfo->kernel_vaddr, residue);
+ new_read_idx = residue;
+ }
+ } else {
+ new_read_idx = write_idx;
+ ret = -EBADMSG;
+ }
+
+ queue->rx_req = receive_request;
+
+ queue->read_idx = new_read_idx / sizeof(u32);
+ /* Ensure qhdr is updated in main memory */
+ mb();
+
+ return ret;
+}
+
+int iris_hfi_queue_cmd_write_locked(struct iris_core *core, void *pkt, u32 pkt_size)
+{
+ struct iris_iface_q_info *q_info = &core->command_queue;
+
+ if (core->state == IRIS_CORE_ERROR)
+ return -EINVAL;
+
+ if (!iris_hfi_queue_write(q_info, pkt, pkt_size)) {
+ iris_vpu_raise_interrupt(core);
+ } else {
+ dev_err(core->dev, "queue full\n");
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+int iris_hfi_queue_cmd_write(struct iris_core *core, void *pkt, u32 pkt_size)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(core->dev);
+ if (ret < 0)
+ goto exit;
+
+ mutex_lock(&core->lock);
+ ret = iris_hfi_queue_cmd_write_locked(core, pkt, pkt_size);
+ if (ret) {
+ mutex_unlock(&core->lock);
+ goto exit;
+ }
+ mutex_unlock(&core->lock);
+
+ pm_runtime_mark_last_busy(core->dev);
+ pm_runtime_put_autosuspend(core->dev);
+
+ return 0;
+
+exit:
+ pm_runtime_put_sync(core->dev);
+
+ return ret;
+}
+
+int iris_hfi_queue_msg_read(struct iris_core *core, void *pkt)
+{
+ struct iris_iface_q_info *q_info = &core->message_queue;
+ int ret = 0;
+
+ mutex_lock(&core->lock);
+ if (core->state != IRIS_CORE_INIT) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (iris_hfi_queue_read(q_info, pkt)) {
+ ret = -ENODATA;
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
+
+int iris_hfi_queue_dbg_read(struct iris_core *core, void *pkt)
+{
+ struct iris_iface_q_info *q_info = &core->debug_queue;
+ int ret = 0;
+
+ mutex_lock(&core->lock);
+ if (core->state != IRIS_CORE_INIT) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (iris_hfi_queue_read(q_info, pkt)) {
+ ret = -ENODATA;
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
+
+static void iris_hfi_queue_set_header(struct iris_core *core, u32 queue_id,
+ struct iris_iface_q_info *iface_q)
+{
+ iface_q->qhdr->status = 0x1;
+ iface_q->qhdr->start_addr = iface_q->device_addr;
+ iface_q->qhdr->header_type = IFACEQ_DFLT_QHDR;
+ iface_q->qhdr->queue_type = queue_id;
+ iface_q->qhdr->q_size = IFACEQ_QUEUE_SIZE / sizeof(u32);
+ iface_q->qhdr->pkt_size = 0; /* variable packet size */
+ iface_q->qhdr->rx_wm = 0x1;
+ iface_q->qhdr->tx_wm = 0x1;
+ iface_q->qhdr->rx_req = 0x1;
+ iface_q->qhdr->tx_req = 0x0;
+ iface_q->qhdr->rx_irq_status = 0x0;
+ iface_q->qhdr->tx_irq_status = 0x0;
+ iface_q->qhdr->read_idx = 0x0;
+ iface_q->qhdr->write_idx = 0x0;
+
+ /*
+ * Set receive request to zero on debug queue as there is no
+ * need of interrupt from video hardware for debug messages
+ */
+ if (queue_id == IFACEQ_DBGQ_ID)
+ iface_q->qhdr->rx_req = 0;
+}
+
+static void
+iris_hfi_queue_init(struct iris_core *core, u32 queue_id, struct iris_iface_q_info *iface_q)
+{
+ struct iris_hfi_queue_table_header *q_tbl_hdr = core->iface_q_table_vaddr;
+ u32 offset = sizeof(*q_tbl_hdr) + (queue_id * IFACEQ_QUEUE_SIZE);
+
+ iface_q->device_addr = core->iface_q_table_daddr + offset;
+ iface_q->kernel_vaddr =
+ (void *)((char *)core->iface_q_table_vaddr + offset);
+ iface_q->qhdr = &q_tbl_hdr->q_hdr[queue_id];
+
+ iris_hfi_queue_set_header(core, queue_id, iface_q);
+}
+
+static void iris_hfi_queue_deinit(struct iris_iface_q_info *iface_q)
+{
+ iface_q->qhdr = NULL;
+ iface_q->kernel_vaddr = NULL;
+ iface_q->device_addr = 0;
+}
+
+int iris_hfi_queues_init(struct iris_core *core)
+{
+ struct iris_hfi_queue_table_header *q_tbl_hdr;
+ u32 queue_size;
+
+ /* Iris hardware requires 4K queue alignment */
+ queue_size = ALIGN((sizeof(*q_tbl_hdr) + (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ)), SZ_4K);
+ core->iface_q_table_vaddr = dma_alloc_attrs(core->dev, queue_size,
+ &core->iface_q_table_daddr,
+ GFP_KERNEL, DMA_ATTR_WRITE_COMBINE);
+ if (!core->iface_q_table_vaddr) {
+ dev_err(core->dev, "queues alloc and map failed\n");
+ return -ENOMEM;
+ }
+
+ core->sfr_vaddr = dma_alloc_attrs(core->dev, SFR_SIZE,
+ &core->sfr_daddr,
+ GFP_KERNEL, DMA_ATTR_WRITE_COMBINE);
+ if (!core->sfr_vaddr) {
+ dev_err(core->dev, "sfr alloc and map failed\n");
+ dma_free_attrs(core->dev, sizeof(*q_tbl_hdr), core->iface_q_table_vaddr,
+ core->iface_q_table_daddr, DMA_ATTR_WRITE_COMBINE);
+ return -ENOMEM;
+ }
+
+ iris_hfi_queue_init(core, IFACEQ_CMDQ_ID, &core->command_queue);
+ iris_hfi_queue_init(core, IFACEQ_MSGQ_ID, &core->message_queue);
+ iris_hfi_queue_init(core, IFACEQ_DBGQ_ID, &core->debug_queue);
+
+ q_tbl_hdr = (struct iris_hfi_queue_table_header *)core->iface_q_table_vaddr;
+ q_tbl_hdr->version = 0;
+ q_tbl_hdr->device_addr = (void *)core;
+ strscpy(q_tbl_hdr->name, "iris-hfi-queues", sizeof(q_tbl_hdr->name));
+ q_tbl_hdr->size = sizeof(*q_tbl_hdr);
+ q_tbl_hdr->qhdr0_offset = sizeof(*q_tbl_hdr) -
+ (IFACEQ_NUMQ * sizeof(struct iris_hfi_queue_header));
+ q_tbl_hdr->qhdr_size = sizeof(q_tbl_hdr->q_hdr[0]);
+ q_tbl_hdr->num_q = IFACEQ_NUMQ;
+ q_tbl_hdr->num_active_q = IFACEQ_NUMQ;
+
+ /* Write sfr size in first word to be used by firmware */
+ *((u32 *)core->sfr_vaddr) = SFR_SIZE;
+
+ return 0;
+}
+
+void iris_hfi_queues_deinit(struct iris_core *core)
+{
+ u32 queue_size;
+
+ if (!core->iface_q_table_vaddr)
+ return;
+
+ iris_hfi_queue_deinit(&core->debug_queue);
+ iris_hfi_queue_deinit(&core->message_queue);
+ iris_hfi_queue_deinit(&core->command_queue);
+
+ dma_free_attrs(core->dev, SFR_SIZE, core->sfr_vaddr,
+ core->sfr_daddr, DMA_ATTR_WRITE_COMBINE);
+
+ core->sfr_vaddr = NULL;
+ core->sfr_daddr = 0;
+
+ queue_size = ALIGN(sizeof(struct iris_hfi_queue_table_header) +
+ (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ), SZ_4K);
+
+ dma_free_attrs(core->dev, queue_size, core->iface_q_table_vaddr,
+ core->iface_q_table_daddr, DMA_ATTR_WRITE_COMBINE);
+
+ core->iface_q_table_vaddr = NULL;
+ core->iface_q_table_daddr = 0;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_queue.h b/drivers/media/platform/qcom/iris/iris_hfi_queue.h
new file mode 100644
index 000000000000..2174fc5ce618
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_queue.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_QUEUE_H__
+#define __IRIS_HFI_QUEUE_H__
+
+struct iris_core;
+
+/*
+ * Max 64 Buffers ( 32 input buffers and 32 output buffers)
+ * can be queued by v4l2 framework at any given time.
+ */
+#define IFACEQ_MAX_BUF_COUNT 64
+/*
+ * Max session supported are 16.
+ * this value is used to calcualte the size of
+ * individual shared queue.
+ */
+#define IFACE_MAX_PARALLEL_SESSIONS 16
+#define IFACEQ_DFLT_QHDR 0x0101
+#define IFACEQ_MAX_PKT_SIZE 1024 /* Maximum size of a packet in the queue */
+
+/*
+ * SFR: Subsystem Failure Reason
+ * when hardware goes into bad state/failure, firmware fills this memory
+ * and driver will get to know the actual failure reason from this SFR buffer.
+ */
+#define SFR_SIZE SZ_4K /* Iris hardware requires 4K queue alignment */
+
+#define IFACEQ_QUEUE_SIZE (IFACEQ_MAX_PKT_SIZE * \
+ IFACEQ_MAX_BUF_COUNT * IFACE_MAX_PARALLEL_SESSIONS)
+
+/*
+ * Memory layout of the shared queues:
+ *
+ * ||=================|| ^ ^ ^
+ * || || | | |
+ * || Queue Table || 288 Bytes | |
+ * || Header || | | |
+ * || || | | |
+ * ||-----------------|| V | |
+ * ||-----------------|| ^ | |
+ * || || | | |
+ * || Command Queue || 56 Bytes | |
+ * || Header || | | |
+ * || || | | |
+ * ||-----------------|| V 456 Bytes |
+ * ||-----------------|| ^ | |
+ * || || | | |
+ * || Message Queue || 56 Bytes | |
+ * || Header || | | |
+ * || || | | |
+ * ||-----------------|| V | Buffer size aligned to 4k
+ * ||-----------------|| ^ | Overall Queue Size = 2,404 KB
+ * || || | | |
+ * || Debug Queue || 56 Bytes | |
+ * || Header || | | |
+ * || || | | |
+ * ||=================|| V V |
+ * ||=================|| ^ |
+ * || || | |
+ * || Command || 800 KB |
+ * || Queue || | |
+ * || || | |
+ * ||=================|| V |
+ * ||=================|| ^ |
+ * || || | |
+ * || Message || 800 KB |
+ * || Queue || | |
+ * || || | |
+ * ||=================|| V |
+ * ||=================|| ^ |
+ * || || | |
+ * || Debug || 800 KB |
+ * || Queue || | |
+ * || || | |
+ * ||=================|| V |
+ * || || |
+ * ||=================|| V
+ */
+
+/*
+ * Shared queues are used for communication between driver and firmware.
+ * There are 3 types of queues:
+ * Command queue - driver to write any command to firmware.
+ * Message queue - firmware to send any response to driver.
+ * Debug queue - firmware to write debug message.
+ */
+
+/* Host-firmware shared queue ids */
+enum iris_iface_queue {
+ IFACEQ_CMDQ_ID,
+ IFACEQ_MSGQ_ID,
+ IFACEQ_DBGQ_ID,
+ IFACEQ_NUMQ, /* not an index */
+};
+
+/**
+ * struct iris_hfi_queue_header
+ *
+ * @status: Queue status, bits (7:0), 0x1 - active, 0x0 - inactive
+ * @start_addr: Queue start address in non cached memory
+ * @queue_type: Queue ID
+ * @header_type: Default queue header
+ * @q_size: Queue size
+ * Number of queue packets if pkt_size is non-zero
+ * Queue size in bytes if pkt_size is zero
+ * @pkt_size: Size of queue packet entries
+ * 0x0: variable queue packet size
+ * non zero: size of queue packet entry, fixed
+ * @pkt_drop_cnt: Number of packets dropped by sender
+ * @rx_wm: Receiver watermark, applicable in event driven mode
+ * @tx_wm: Sender watermark, applicable in event driven mode
+ * @rx_req: Receiver sets this bit if queue is empty
+ * @tx_req: Sender sets this bit if queue is full
+ * @rx_irq_status: Receiver sets this bit and triggers an interrupt to
+ * the sender after packets are dequeued. Sender clears this bit
+ * @tx_irq_status: Sender sets this bit and triggers an interrupt to
+ * the receiver after packets are queued. Receiver clears this bit
+ * @read_idx: Index till where receiver has consumed the packets from the queue.
+ * @write_idx: Index till where sender has written the packets into the queue.
+ */
+struct iris_hfi_queue_header {
+ u32 status;
+ u32 start_addr;
+ u16 queue_type;
+ u16 header_type;
+ u32 q_size;
+ u32 pkt_size;
+ u32 pkt_drop_cnt;
+ u32 rx_wm;
+ u32 tx_wm;
+ u32 rx_req;
+ u32 tx_req;
+ u32 rx_irq_status;
+ u32 tx_irq_status;
+ u32 read_idx;
+ u32 write_idx;
+};
+
+/**
+ * struct iris_hfi_queue_table_header
+ *
+ * @version: Queue table version number
+ * @size: Queue table size from version to last parametr in qhdr entry
+ * @qhdr0_offset: Offset to the start of first qhdr
+ * @qhdr_size: Queue header size in bytes
+ * @num_q: Total number of queues in Queue table
+ * @num_active_q: Total number of active queues
+ * @device_addr: Device address of the queue
+ * @name: Queue name in characters
+ * @q_hdr: Array of queue headers
+ */
+struct iris_hfi_queue_table_header {
+ u32 version;
+ u32 size;
+ u32 qhdr0_offset;
+ u32 qhdr_size;
+ u32 num_q;
+ u32 num_active_q;
+ void *device_addr;
+ char name[256]; /* NUL-terminated array of characters */
+ struct iris_hfi_queue_header q_hdr[IFACEQ_NUMQ];
+};
+
+struct iris_iface_q_info {
+ struct iris_hfi_queue_header *qhdr;
+ dma_addr_t device_addr;
+ void *kernel_vaddr;
+};
+
+int iris_hfi_queues_init(struct iris_core *core);
+void iris_hfi_queues_deinit(struct iris_core *core);
+
+int iris_hfi_queue_cmd_write_locked(struct iris_core *core, void *pkt, u32 pkt_size);
+int iris_hfi_queue_cmd_write(struct iris_core *core, void *pkt, u32 pkt_size);
+int iris_hfi_queue_msg_read(struct iris_core *core, void *pkt);
+int iris_hfi_queue_dbg_read(struct iris_core *core, void *pkt);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_instance.h b/drivers/media/platform/qcom/iris/iris_instance.h
new file mode 100644
index 000000000000..caa3c6507006
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_instance.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_INSTANCE_H__
+#define __IRIS_INSTANCE_H__
+
+#include <media/v4l2-ctrls.h>
+
+#include "iris_buffer.h"
+#include "iris_core.h"
+#include "iris_utils.h"
+
+/**
+ * struct iris_inst - holds per video instance parameters
+ *
+ * @list: used for attach an instance to the core
+ * @core: pointer to core structure
+ * @session_id: id of current video session
+ * @ctx_q_lock: lock to serialize queues related ioctls
+ * @lock: lock to seralise forward and reverse threads
+ * @fh: reference of v4l2 file handler
+ * @fmt_src: structure of v4l2_format for source
+ * @fmt_dst: structure of v4l2_format for destination
+ * @ctrl_handler: reference of v4l2 ctrl handler
+ * @crop: structure of crop info
+ * @completion: structure of signal completions
+ * @flush_completion: structure of signal completions for flush cmd
+ * @fw_caps: array of supported instance firmware capabilities
+ * @buffers: array of different iris buffers
+ * @fw_min_count: minimnum count of buffers needed by fw
+ * @state: instance state
+ * @sub_state: instance sub state
+ * @once_per_session_set: boolean to set once per session property
+ * @max_input_data_size: max size of input data
+ * @power: structure of power info
+ * @icc_data: structure of interconnect data
+ * @m2m_dev: a reference to m2m device structure
+ * @m2m_ctx: a reference to m2m context structure
+ * @sequence_cap: a sequence counter for capture queue
+ * @sequence_out: a sequence counter for output queue
+ * @tss: timestamp metadata
+ * @metadata_idx: index for metadata buffer
+ */
+
+struct iris_inst {
+ struct list_head list;
+ struct iris_core *core;
+ u32 session_id;
+ struct mutex ctx_q_lock;/* lock to serialize queues related ioctls */
+ struct mutex lock; /* lock to serialize forward and reverse threads */
+ struct v4l2_fh fh;
+ struct v4l2_format *fmt_src;
+ struct v4l2_format *fmt_dst;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct iris_hfi_rect_desc crop;
+ struct completion completion;
+ struct completion flush_completion;
+ struct platform_inst_fw_cap fw_caps[INST_FW_CAP_MAX];
+ struct iris_buffers buffers[BUF_TYPE_MAX];
+ u32 fw_min_count;
+ enum iris_inst_state state;
+ enum iris_inst_sub_state sub_state;
+ bool once_per_session_set;
+ size_t max_input_data_size;
+ struct iris_inst_power power;
+ struct icc_vote_data icc_data;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ u32 sequence_cap;
+ u32 sequence_out;
+ struct iris_ts_metadata tss[VIDEO_MAX_FRAME];
+ u32 metadata_idx;
+};
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_platform_common.h b/drivers/media/platform/qcom/iris/iris_platform_common.h
new file mode 100644
index 000000000000..f6b15d2805fb
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_platform_common.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_PLATFORM_COMMON_H__
+#define __IRIS_PLATFORM_COMMON_H__
+
+#include <linux/bits.h>
+
+struct iris_core;
+struct iris_inst;
+
+#define IRIS_PAS_ID 9
+#define HW_RESPONSE_TIMEOUT_VALUE (1000) /* milliseconds */
+#define AUTOSUSPEND_DELAY_VALUE (HW_RESPONSE_TIMEOUT_VALUE + 500) /* milliseconds */
+
+#define REGISTER_BIT_DEPTH(luma, chroma) ((luma) << 16 | (chroma))
+#define BIT_DEPTH_8 REGISTER_BIT_DEPTH(8, 8)
+#define CODED_FRAMES_PROGRESSIVE 0x0
+#define DEFAULT_MAX_HOST_BUF_COUNT 64
+#define DEFAULT_MAX_HOST_BURST_BUF_COUNT 256
+#define DEFAULT_FPS 30
+
+enum stage_type {
+ STAGE_1 = 1,
+ STAGE_2 = 2,
+};
+
+enum pipe_type {
+ PIPE_1 = 1,
+ PIPE_2 = 2,
+ PIPE_4 = 4,
+};
+
+extern struct iris_platform_data sm8250_data;
+extern struct iris_platform_data sm8550_data;
+
+enum platform_clk_type {
+ IRIS_AXI_CLK,
+ IRIS_CTRL_CLK,
+ IRIS_HW_CLK,
+};
+
+struct platform_clk_data {
+ enum platform_clk_type clk_type;
+ const char *clk_name;
+};
+
+struct tz_cp_config {
+ u32 cp_start;
+ u32 cp_size;
+ u32 cp_nonpixel_start;
+ u32 cp_nonpixel_size;
+};
+
+struct ubwc_config_data {
+ u32 max_channels;
+ u32 mal_length;
+ u32 highest_bank_bit;
+ u32 bank_swzl_level;
+ u32 bank_swz2_level;
+ u32 bank_swz3_level;
+ u32 bank_spreading;
+};
+
+struct platform_inst_caps {
+ u32 min_frame_width;
+ u32 max_frame_width;
+ u32 min_frame_height;
+ u32 max_frame_height;
+ u32 max_mbpf;
+ u32 mb_cycles_vsp;
+ u32 mb_cycles_vpp;
+ u32 mb_cycles_fw;
+ u32 mb_cycles_fw_vpp;
+ u32 num_comv;
+};
+
+enum platform_inst_fw_cap_type {
+ PROFILE = 1,
+ LEVEL,
+ INPUT_BUF_HOST_MAX_COUNT,
+ STAGE,
+ PIPE,
+ POC,
+ CODED_FRAMES,
+ BIT_DEPTH,
+ RAP_FRAME,
+ DEBLOCK,
+ INST_FW_CAP_MAX,
+};
+
+enum platform_inst_fw_cap_flags {
+ CAP_FLAG_DYNAMIC_ALLOWED = BIT(0),
+ CAP_FLAG_MENU = BIT(1),
+ CAP_FLAG_INPUT_PORT = BIT(2),
+ CAP_FLAG_OUTPUT_PORT = BIT(3),
+ CAP_FLAG_CLIENT_SET = BIT(4),
+ CAP_FLAG_BITMASK = BIT(5),
+ CAP_FLAG_VOLATILE = BIT(6),
+};
+
+struct platform_inst_fw_cap {
+ enum platform_inst_fw_cap_type cap_id;
+ s64 min;
+ s64 max;
+ s64 step_or_mask;
+ s64 value;
+ u32 hfi_id;
+ enum platform_inst_fw_cap_flags flags;
+ int (*set)(struct iris_inst *inst,
+ enum platform_inst_fw_cap_type cap_id);
+};
+
+struct bw_info {
+ u32 mbs_per_sec;
+ u32 bw_ddr;
+};
+
+struct iris_core_power {
+ u64 clk_freq;
+ u64 icc_bw;
+};
+
+struct iris_inst_power {
+ u64 min_freq;
+ u32 icc_bw;
+};
+
+struct icc_vote_data {
+ u32 height, width;
+ u32 fps;
+};
+
+enum platform_pm_domain_type {
+ IRIS_CTRL_POWER_DOMAIN,
+ IRIS_HW_POWER_DOMAIN,
+};
+
+struct iris_platform_data {
+ void (*init_hfi_command_ops)(struct iris_core *core);
+ void (*init_hfi_response_ops)(struct iris_core *core);
+ struct iris_inst *(*get_instance)(void);
+ const struct vpu_ops *vpu_ops;
+ void (*set_preset_registers)(struct iris_core *core);
+ const struct icc_info *icc_tbl;
+ unsigned int icc_tbl_size;
+ const struct bw_info *bw_tbl_dec;
+ unsigned int bw_tbl_dec_size;
+ const char * const *pmdomain_tbl;
+ unsigned int pmdomain_tbl_size;
+ const char * const *opp_pd_tbl;
+ unsigned int opp_pd_tbl_size;
+ const struct platform_clk_data *clk_tbl;
+ unsigned int clk_tbl_size;
+ const char * const *clk_rst_tbl;
+ unsigned int clk_rst_tbl_size;
+ u64 dma_mask;
+ const char *fwname;
+ u32 pas_id;
+ struct platform_inst_caps *inst_caps;
+ struct platform_inst_fw_cap *inst_fw_caps;
+ u32 inst_fw_caps_size;
+ struct tz_cp_config *tz_cp_config_data;
+ u32 core_arch;
+ u32 hw_response_timeout;
+ struct ubwc_config_data *ubwc_config;
+ u32 num_vpp_pipe;
+ u32 max_session_count;
+ u32 max_core_mbpf;
+ const u32 *input_config_params;
+ unsigned int input_config_params_size;
+ const u32 *output_config_params;
+ unsigned int output_config_params_size;
+ const u32 *dec_input_prop;
+ unsigned int dec_input_prop_size;
+ const u32 *dec_output_prop;
+ unsigned int dec_output_prop_size;
+ const u32 *dec_ip_int_buf_tbl;
+ unsigned int dec_ip_int_buf_tbl_size;
+ const u32 *dec_op_int_buf_tbl;
+ unsigned int dec_op_int_buf_tbl_size;
+};
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_platform_sm8250.c b/drivers/media/platform/qcom/iris/iris_platform_sm8250.c
new file mode 100644
index 000000000000..5c86fd7b7b6f
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_platform_sm8250.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "iris_core.h"
+#include "iris_ctrls.h"
+#include "iris_platform_common.h"
+#include "iris_resources.h"
+#include "iris_hfi_gen1.h"
+#include "iris_hfi_gen1_defines.h"
+#include "iris_vpu_common.h"
+
+static struct platform_inst_fw_cap inst_fw_cap_sm8250[] = {
+ {
+ .cap_id = PIPE,
+ .min = PIPE_1,
+ .max = PIPE_4,
+ .step_or_mask = 1,
+ .value = PIPE_4,
+ .hfi_id = HFI_PROPERTY_PARAM_WORK_ROUTE,
+ .set = iris_set_pipe,
+ },
+ {
+ .cap_id = STAGE,
+ .min = STAGE_1,
+ .max = STAGE_2,
+ .step_or_mask = 1,
+ .value = STAGE_2,
+ .hfi_id = HFI_PROPERTY_PARAM_WORK_MODE,
+ .set = iris_set_stage,
+ },
+ {
+ .cap_id = DEBLOCK,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 0,
+ .hfi_id = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
+ .set = iris_set_u32,
+ },
+};
+
+static struct platform_inst_caps platform_inst_cap_sm8250 = {
+ .min_frame_width = 128,
+ .max_frame_width = 8192,
+ .min_frame_height = 128,
+ .max_frame_height = 8192,
+ .max_mbpf = 138240,
+ .mb_cycles_vsp = 25,
+ .mb_cycles_vpp = 200,
+};
+
+static void iris_set_sm8250_preset_registers(struct iris_core *core)
+{
+ writel(0x0, core->reg_base + 0xB0088);
+}
+
+static const struct icc_info sm8250_icc_table[] = {
+ { "cpu-cfg", 1000, 1000 },
+ { "video-mem", 1000, 15000000 },
+};
+
+static const char * const sm8250_clk_reset_table[] = { "bus", "core" };
+
+static const struct bw_info sm8250_bw_table_dec[] = {
+ { ((4096 * 2160) / 256) * 60, 2403000 },
+ { ((4096 * 2160) / 256) * 30, 1224000 },
+ { ((1920 * 1080) / 256) * 60, 812000 },
+ { ((1920 * 1080) / 256) * 30, 416000 },
+};
+
+static const char * const sm8250_pmdomain_table[] = { "venus", "vcodec0" };
+
+static const char * const sm8250_opp_pd_table[] = { "mx" };
+
+static const struct platform_clk_data sm8250_clk_table[] = {
+ {IRIS_AXI_CLK, "iface" },
+ {IRIS_CTRL_CLK, "core" },
+ {IRIS_HW_CLK, "vcodec0_core" },
+};
+
+static struct tz_cp_config tz_cp_config_sm8250 = {
+ .cp_start = 0,
+ .cp_size = 0x25800000,
+ .cp_nonpixel_start = 0x01000000,
+ .cp_nonpixel_size = 0x24800000,
+};
+
+static const u32 sm8250_vdec_input_config_param_default[] = {
+ HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE,
+ HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+ HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
+ HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
+ HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM,
+ HFI_PROPERTY_PARAM_FRAME_SIZE,
+ HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL,
+ HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE,
+};
+
+static const u32 sm8250_dec_ip_int_buf_tbl[] = {
+ BUF_BIN,
+ BUF_SCRATCH_1,
+};
+
+static const u32 sm8250_dec_op_int_buf_tbl[] = {
+ BUF_DPB,
+};
+
+struct iris_platform_data sm8250_data = {
+ .get_instance = iris_hfi_gen1_get_instance,
+ .init_hfi_command_ops = &iris_hfi_gen1_command_ops_init,
+ .init_hfi_response_ops = iris_hfi_gen1_response_ops_init,
+ .vpu_ops = &iris_vpu2_ops,
+ .set_preset_registers = iris_set_sm8250_preset_registers,
+ .icc_tbl = sm8250_icc_table,
+ .icc_tbl_size = ARRAY_SIZE(sm8250_icc_table),
+ .clk_rst_tbl = sm8250_clk_reset_table,
+ .clk_rst_tbl_size = ARRAY_SIZE(sm8250_clk_reset_table),
+ .bw_tbl_dec = sm8250_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sm8250_bw_table_dec),
+ .pmdomain_tbl = sm8250_pmdomain_table,
+ .pmdomain_tbl_size = ARRAY_SIZE(sm8250_pmdomain_table),
+ .opp_pd_tbl = sm8250_opp_pd_table,
+ .opp_pd_tbl_size = ARRAY_SIZE(sm8250_opp_pd_table),
+ .clk_tbl = sm8250_clk_table,
+ .clk_tbl_size = ARRAY_SIZE(sm8250_clk_table),
+ /* Upper bound of DMA address range */
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/vpu-1.0/venus.mbn",
+ .pas_id = IRIS_PAS_ID,
+ .inst_caps = &platform_inst_cap_sm8250,
+ .inst_fw_caps = inst_fw_cap_sm8250,
+ .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_sm8250),
+ .tz_cp_config_data = &tz_cp_config_sm8250,
+ .hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
+ .num_vpp_pipe = 4,
+ .max_session_count = 16,
+ .max_core_mbpf = (8192 * 4352) / 256,
+ .input_config_params =
+ sm8250_vdec_input_config_param_default,
+ .input_config_params_size =
+ ARRAY_SIZE(sm8250_vdec_input_config_param_default),
+
+ .dec_ip_int_buf_tbl = sm8250_dec_ip_int_buf_tbl,
+ .dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8250_dec_ip_int_buf_tbl),
+ .dec_op_int_buf_tbl = sm8250_dec_op_int_buf_tbl,
+ .dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8250_dec_op_int_buf_tbl),
+};
diff --git a/drivers/media/platform/qcom/iris/iris_platform_sm8550.c b/drivers/media/platform/qcom/iris/iris_platform_sm8550.c
new file mode 100644
index 000000000000..35d278996c43
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_platform_sm8550.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "iris_core.h"
+#include "iris_ctrls.h"
+#include "iris_hfi_gen2.h"
+#include "iris_hfi_gen2_defines.h"
+#include "iris_platform_common.h"
+#include "iris_vpu_common.h"
+
+#define VIDEO_ARCH_LX 1
+
+static struct platform_inst_fw_cap inst_fw_cap_sm8550[] = {
+ {
+ .cap_id = PROFILE,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH),
+ .value = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = LEVEL,
+ .min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .max = V4L2_MPEG_VIDEO_H264_LEVEL_6_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_2),
+ .value = V4L2_MPEG_VIDEO_H264_LEVEL_6_1,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = INPUT_BUF_HOST_MAX_COUNT,
+ .min = DEFAULT_MAX_HOST_BUF_COUNT,
+ .max = DEFAULT_MAX_HOST_BURST_BUF_COUNT,
+ .step_or_mask = 1,
+ .value = DEFAULT_MAX_HOST_BUF_COUNT,
+ .hfi_id = HFI_PROP_BUFFER_HOST_MAX_COUNT,
+ .flags = CAP_FLAG_INPUT_PORT,
+ .set = iris_set_u32,
+ },
+ {
+ .cap_id = STAGE,
+ .min = STAGE_1,
+ .max = STAGE_2,
+ .step_or_mask = 1,
+ .value = STAGE_2,
+ .hfi_id = HFI_PROP_STAGE,
+ .set = iris_set_stage,
+ },
+ {
+ .cap_id = PIPE,
+ .min = PIPE_1,
+ .max = PIPE_4,
+ .step_or_mask = 1,
+ .value = PIPE_4,
+ .hfi_id = HFI_PROP_PIPE,
+ .set = iris_set_pipe,
+ },
+ {
+ .cap_id = POC,
+ .min = 0,
+ .max = 2,
+ .step_or_mask = 1,
+ .value = 1,
+ .hfi_id = HFI_PROP_PIC_ORDER_CNT_TYPE,
+ },
+ {
+ .cap_id = CODED_FRAMES,
+ .min = CODED_FRAMES_PROGRESSIVE,
+ .max = CODED_FRAMES_PROGRESSIVE,
+ .step_or_mask = 0,
+ .value = CODED_FRAMES_PROGRESSIVE,
+ .hfi_id = HFI_PROP_CODED_FRAMES,
+ },
+ {
+ .cap_id = BIT_DEPTH,
+ .min = BIT_DEPTH_8,
+ .max = BIT_DEPTH_8,
+ .step_or_mask = 1,
+ .value = BIT_DEPTH_8,
+ .hfi_id = HFI_PROP_LUMA_CHROMA_BIT_DEPTH,
+ },
+ {
+ .cap_id = RAP_FRAME,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 1,
+ .hfi_id = HFI_PROP_DEC_START_FROM_RAP_FRAME,
+ .flags = CAP_FLAG_INPUT_PORT,
+ .set = iris_set_u32,
+ },
+};
+
+static struct platform_inst_caps platform_inst_cap_sm8550 = {
+ .min_frame_width = 96,
+ .max_frame_width = 8192,
+ .min_frame_height = 96,
+ .max_frame_height = 8192,
+ .max_mbpf = (8192 * 4352) / 256,
+ .mb_cycles_vpp = 200,
+ .mb_cycles_fw = 489583,
+ .mb_cycles_fw_vpp = 66234,
+ .num_comv = 0,
+};
+
+static void iris_set_sm8550_preset_registers(struct iris_core *core)
+{
+ writel(0x0, core->reg_base + 0xB0088);
+}
+
+static const struct icc_info sm8550_icc_table[] = {
+ { "cpu-cfg", 1000, 1000 },
+ { "video-mem", 1000, 15000000 },
+};
+
+static const char * const sm8550_clk_reset_table[] = { "bus" };
+
+static const struct bw_info sm8550_bw_table_dec[] = {
+ { ((4096 * 2160) / 256) * 60, 1608000 },
+ { ((4096 * 2160) / 256) * 30, 826000 },
+ { ((1920 * 1080) / 256) * 60, 567000 },
+ { ((1920 * 1080) / 256) * 30, 294000 },
+};
+
+static const char * const sm8550_pmdomain_table[] = { "venus", "vcodec0" };
+
+static const char * const sm8550_opp_pd_table[] = { "mxc", "mmcx" };
+
+static const struct platform_clk_data sm8550_clk_table[] = {
+ {IRIS_AXI_CLK, "iface" },
+ {IRIS_CTRL_CLK, "core" },
+ {IRIS_HW_CLK, "vcodec0_core" },
+};
+
+static struct ubwc_config_data ubwc_config_sm8550 = {
+ .max_channels = 8,
+ .mal_length = 32,
+ .highest_bank_bit = 16,
+ .bank_swzl_level = 0,
+ .bank_swz2_level = 1,
+ .bank_swz3_level = 1,
+ .bank_spreading = 1,
+};
+
+static struct tz_cp_config tz_cp_config_sm8550 = {
+ .cp_start = 0,
+ .cp_size = 0x25800000,
+ .cp_nonpixel_start = 0x01000000,
+ .cp_nonpixel_size = 0x24800000,
+};
+
+static const u32 sm8550_vdec_input_config_params[] = {
+ HFI_PROP_BITSTREAM_RESOLUTION,
+ HFI_PROP_CROP_OFFSETS,
+ HFI_PROP_CODED_FRAMES,
+ HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT,
+ HFI_PROP_PIC_ORDER_CNT_TYPE,
+ HFI_PROP_PROFILE,
+ HFI_PROP_LEVEL,
+ HFI_PROP_SIGNAL_COLOR_INFO,
+};
+
+static const u32 sm8550_vdec_output_config_params[] = {
+ HFI_PROP_COLOR_FORMAT,
+ HFI_PROP_LINEAR_STRIDE_SCANLINE,
+};
+
+static const u32 sm8550_vdec_subscribe_input_properties[] = {
+ HFI_PROP_NO_OUTPUT,
+};
+
+static const u32 sm8550_vdec_subscribe_output_properties[] = {
+ HFI_PROP_PICTURE_TYPE,
+ HFI_PROP_CABAC_SESSION,
+};
+
+static const u32 sm8550_dec_ip_int_buf_tbl[] = {
+ BUF_BIN,
+ BUF_COMV,
+ BUF_NON_COMV,
+ BUF_LINE,
+};
+
+static const u32 sm8550_dec_op_int_buf_tbl[] = {
+ BUF_DPB,
+};
+
+struct iris_platform_data sm8550_data = {
+ .get_instance = iris_hfi_gen2_get_instance,
+ .init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
+ .init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .vpu_ops = &iris_vpu3_ops,
+ .set_preset_registers = iris_set_sm8550_preset_registers,
+ .icc_tbl = sm8550_icc_table,
+ .icc_tbl_size = ARRAY_SIZE(sm8550_icc_table),
+ .clk_rst_tbl = sm8550_clk_reset_table,
+ .clk_rst_tbl_size = ARRAY_SIZE(sm8550_clk_reset_table),
+ .bw_tbl_dec = sm8550_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sm8550_bw_table_dec),
+ .pmdomain_tbl = sm8550_pmdomain_table,
+ .pmdomain_tbl_size = ARRAY_SIZE(sm8550_pmdomain_table),
+ .opp_pd_tbl = sm8550_opp_pd_table,
+ .opp_pd_tbl_size = ARRAY_SIZE(sm8550_opp_pd_table),
+ .clk_tbl = sm8550_clk_table,
+ .clk_tbl_size = ARRAY_SIZE(sm8550_clk_table),
+ /* Upper bound of DMA address range */
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/vpu/vpu30_p4.mbn",
+ .pas_id = IRIS_PAS_ID,
+ .inst_caps = &platform_inst_cap_sm8550,
+ .inst_fw_caps = inst_fw_cap_sm8550,
+ .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_sm8550),
+ .tz_cp_config_data = &tz_cp_config_sm8550,
+ .core_arch = VIDEO_ARCH_LX,
+ .hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
+ .ubwc_config = &ubwc_config_sm8550,
+ .num_vpp_pipe = 4,
+ .max_session_count = 16,
+ .max_core_mbpf = ((8192 * 4352) / 256) * 2,
+ .input_config_params =
+ sm8550_vdec_input_config_params,
+ .input_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_params),
+ .output_config_params =
+ sm8550_vdec_output_config_params,
+ .output_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_output_config_params),
+ .dec_input_prop = sm8550_vdec_subscribe_input_properties,
+ .dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
+ .dec_output_prop = sm8550_vdec_subscribe_output_properties,
+ .dec_output_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_output_properties),
+
+ .dec_ip_int_buf_tbl = sm8550_dec_ip_int_buf_tbl,
+ .dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
+ .dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
+ .dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+};
diff --git a/drivers/media/platform/qcom/iris/iris_power.c b/drivers/media/platform/qcom/iris/iris_power.c
new file mode 100644
index 000000000000..dbca42df0910
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_power.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_buffer.h"
+#include "iris_instance.h"
+#include "iris_power.h"
+#include "iris_resources.h"
+#include "iris_vpu_common.h"
+
+static u32 iris_calc_bw(struct iris_inst *inst, struct icc_vote_data *data)
+{
+ const struct bw_info *bw_tbl = NULL;
+ struct iris_core *core = inst->core;
+ u32 num_rows, i, mbs, mbps;
+ u32 icc_bw = 0;
+
+ mbs = DIV_ROUND_UP(data->height, 16) * DIV_ROUND_UP(data->width, 16);
+ mbps = mbs * data->fps;
+ if (mbps == 0)
+ goto exit;
+
+ bw_tbl = core->iris_platform_data->bw_tbl_dec;
+ num_rows = core->iris_platform_data->bw_tbl_dec_size;
+
+ for (i = 0; i < num_rows; i++) {
+ if (i != 0 && mbps > bw_tbl[i].mbs_per_sec)
+ break;
+
+ icc_bw = bw_tbl[i].bw_ddr;
+ }
+
+exit:
+ return icc_bw;
+}
+
+static int iris_set_interconnects(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *instance;
+ u64 total_bw_ddr = 0;
+ int ret;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list) {
+ if (!instance->max_input_data_size)
+ continue;
+
+ total_bw_ddr += instance->power.icc_bw;
+ }
+
+ ret = iris_set_icc_bw(core, total_bw_ddr);
+
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
+
+static int iris_vote_interconnects(struct iris_inst *inst)
+{
+ struct icc_vote_data *vote_data = &inst->icc_data;
+ struct v4l2_format *inp_f = inst->fmt_src;
+
+ vote_data->width = inp_f->fmt.pix_mp.width;
+ vote_data->height = inp_f->fmt.pix_mp.height;
+ vote_data->fps = DEFAULT_FPS;
+
+ inst->power.icc_bw = iris_calc_bw(inst, vote_data);
+
+ return iris_set_interconnects(inst);
+}
+
+static int iris_set_clocks(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *instance;
+ u64 freq = 0;
+ int ret;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list) {
+ if (!instance->max_input_data_size)
+ continue;
+
+ freq += instance->power.min_freq;
+ }
+
+ core->power.clk_freq = freq;
+ ret = dev_pm_opp_set_rate(core->dev, freq);
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
+
+static int iris_scale_clocks(struct iris_inst *inst)
+{
+ const struct vpu_ops *vpu_ops = inst->core->iris_platform_data->vpu_ops;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buffer, *n;
+ struct iris_buffer *buf;
+ size_t data_size = 0;
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ data_size = max(data_size, buf->data_size);
+ }
+
+ inst->max_input_data_size = data_size;
+ if (!inst->max_input_data_size)
+ return 0;
+
+ inst->power.min_freq = vpu_ops->calc_freq(inst, inst->max_input_data_size);
+
+ return iris_set_clocks(inst);
+}
+
+int iris_scale_power(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ int ret;
+
+ if (pm_runtime_suspended(core->dev)) {
+ ret = pm_runtime_resume_and_get(core->dev);
+ if (ret < 0)
+ return ret;
+
+ pm_runtime_put_autosuspend(core->dev);
+ }
+
+ ret = iris_scale_clocks(inst);
+ if (ret)
+ return ret;
+
+ return iris_vote_interconnects(inst);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_power.h b/drivers/media/platform/qcom/iris/iris_power.h
new file mode 100644
index 000000000000..55212660e72d
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_power.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_POWER_H__
+#define __IRIS_POWER_H__
+
+struct iris_inst;
+
+int iris_scale_power(struct iris_inst *inst);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_probe.c b/drivers/media/platform/qcom/iris/iris_probe.c
new file mode 100644
index 000000000000..aca442dcc153
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_probe.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/interconnect.h>
+#include <linux/module.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "iris_core.h"
+#include "iris_ctrls.h"
+#include "iris_vidc.h"
+
+static int iris_init_icc(struct iris_core *core)
+{
+ const struct icc_info *icc_tbl;
+ u32 i = 0;
+
+ icc_tbl = core->iris_platform_data->icc_tbl;
+
+ core->icc_count = core->iris_platform_data->icc_tbl_size;
+ core->icc_tbl = devm_kzalloc(core->dev,
+ sizeof(struct icc_bulk_data) * core->icc_count,
+ GFP_KERNEL);
+ if (!core->icc_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < core->icc_count; i++) {
+ core->icc_tbl[i].name = icc_tbl[i].name;
+ core->icc_tbl[i].avg_bw = icc_tbl[i].bw_min_kbps;
+ core->icc_tbl[i].peak_bw = 0;
+ }
+
+ return devm_of_icc_bulk_get(core->dev, core->icc_count, core->icc_tbl);
+}
+
+static int iris_init_power_domains(struct iris_core *core)
+{
+ const struct platform_clk_data *clk_tbl;
+ u32 clk_cnt, i;
+ int ret;
+
+ struct dev_pm_domain_attach_data iris_pd_data = {
+ .pd_names = core->iris_platform_data->pmdomain_tbl,
+ .num_pd_names = core->iris_platform_data->pmdomain_tbl_size,
+ .pd_flags = PD_FLAG_NO_DEV_LINK,
+ };
+
+ struct dev_pm_domain_attach_data iris_opp_pd_data = {
+ .pd_names = core->iris_platform_data->opp_pd_tbl,
+ .num_pd_names = core->iris_platform_data->opp_pd_tbl_size,
+ .pd_flags = PD_FLAG_DEV_LINK_ON,
+ };
+
+ ret = devm_pm_domain_attach_list(core->dev, &iris_pd_data, &core->pmdomain_tbl);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_pm_domain_attach_list(core->dev, &iris_opp_pd_data, &core->opp_pmdomain_tbl);
+ if (ret < 0)
+ return ret;
+
+ clk_tbl = core->iris_platform_data->clk_tbl;
+ clk_cnt = core->iris_platform_data->clk_tbl_size;
+
+ for (i = 0; i < clk_cnt; i++) {
+ if (clk_tbl[i].clk_type == IRIS_HW_CLK) {
+ ret = devm_pm_opp_set_clkname(core->dev, clk_tbl[i].clk_name);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return devm_pm_opp_of_add_table(core->dev);
+}
+
+static int iris_init_clocks(struct iris_core *core)
+{
+ int ret;
+
+ ret = devm_clk_bulk_get_all(core->dev, &core->clock_tbl);
+ if (ret < 0)
+ return ret;
+
+ core->clk_count = ret;
+
+ return 0;
+}
+
+static int iris_init_resets(struct iris_core *core)
+{
+ const char * const *rst_tbl;
+ u32 rst_tbl_size;
+ u32 i = 0;
+
+ rst_tbl = core->iris_platform_data->clk_rst_tbl;
+ rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
+
+ core->resets = devm_kzalloc(core->dev,
+ sizeof(*core->resets) * rst_tbl_size,
+ GFP_KERNEL);
+ if (!core->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < rst_tbl_size; i++)
+ core->resets[i].id = rst_tbl[i];
+
+ return devm_reset_control_bulk_get_exclusive(core->dev, rst_tbl_size, core->resets);
+}
+
+static int iris_init_resources(struct iris_core *core)
+{
+ int ret;
+
+ ret = iris_init_icc(core);
+ if (ret)
+ return ret;
+
+ ret = iris_init_power_domains(core);
+ if (ret)
+ return ret;
+
+ ret = iris_init_clocks(core);
+ if (ret)
+ return ret;
+
+ return iris_init_resets(core);
+}
+
+static int iris_register_video_device(struct iris_core *core)
+{
+ struct video_device *vdev;
+ int ret;
+
+ vdev = video_device_alloc();
+ if (!vdev)
+ return -ENOMEM;
+
+ strscpy(vdev->name, "qcom-iris-decoder", sizeof(vdev->name));
+ vdev->release = video_device_release;
+ vdev->fops = core->iris_v4l2_file_ops;
+ vdev->ioctl_ops = core->iris_v4l2_ioctl_ops;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->v4l2_dev = &core->v4l2_dev;
+ vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret)
+ goto err_vdev_release;
+
+ core->vdev_dec = vdev;
+ video_set_drvdata(vdev, core);
+
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+
+ return ret;
+}
+
+static void iris_remove(struct platform_device *pdev)
+{
+ struct iris_core *core;
+
+ core = platform_get_drvdata(pdev);
+ if (!core)
+ return;
+
+ iris_core_deinit(core);
+
+ video_unregister_device(core->vdev_dec);
+
+ v4l2_device_unregister(&core->v4l2_dev);
+
+ mutex_destroy(&core->lock);
+}
+
+static void iris_sys_error_handler(struct work_struct *work)
+{
+ struct iris_core *core =
+ container_of(work, struct iris_core, sys_error_handler.work);
+
+ iris_core_deinit(core);
+ iris_core_init(core);
+}
+
+static int iris_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iris_core *core;
+ u64 dma_mask;
+ int ret;
+
+ core = devm_kzalloc(&pdev->dev, sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return -ENOMEM;
+ core->dev = dev;
+
+ core->state = IRIS_CORE_DEINIT;
+ mutex_init(&core->lock);
+ init_completion(&core->core_init_done);
+
+ core->response_packet = devm_kzalloc(core->dev, IFACEQ_CORE_PKT_SIZE, GFP_KERNEL);
+ if (!core->response_packet)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&core->instances);
+ INIT_DELAYED_WORK(&core->sys_error_handler, iris_sys_error_handler);
+
+ core->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(core->reg_base))
+ return PTR_ERR(core->reg_base);
+
+ core->irq = platform_get_irq(pdev, 0);
+ if (core->irq < 0)
+ return core->irq;
+
+ core->iris_platform_data = of_device_get_match_data(core->dev);
+
+ ret = devm_request_threaded_irq(core->dev, core->irq, iris_hfi_isr,
+ iris_hfi_isr_handler, IRQF_TRIGGER_HIGH, "iris", core);
+ if (ret)
+ return ret;
+
+ disable_irq_nosync(core->irq);
+
+ iris_init_ops(core);
+ core->iris_platform_data->init_hfi_command_ops(core);
+ core->iris_platform_data->init_hfi_response_ops(core);
+
+ ret = iris_init_resources(core);
+ if (ret)
+ return ret;
+
+ iris_session_init_caps(core);
+
+ ret = v4l2_device_register(dev, &core->v4l2_dev);
+ if (ret)
+ return ret;
+
+ ret = iris_register_video_device(core);
+ if (ret)
+ goto err_v4l2_unreg;
+
+ platform_set_drvdata(pdev, core);
+
+ dma_mask = core->iris_platform_data->dma_mask;
+
+ ret = dma_set_mask_and_coherent(dev, dma_mask);
+ if (ret)
+ goto err_vdev_unreg;
+
+ dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+ dma_set_seg_boundary(&pdev->dev, DMA_BIT_MASK(32));
+
+ pm_runtime_set_autosuspend_delay(core->dev, AUTOSUSPEND_DELAY_VALUE);
+ pm_runtime_use_autosuspend(core->dev);
+ ret = devm_pm_runtime_enable(core->dev);
+ if (ret)
+ goto err_vdev_unreg;
+
+ return 0;
+
+err_vdev_unreg:
+ video_unregister_device(core->vdev_dec);
+err_v4l2_unreg:
+ v4l2_device_unregister(&core->v4l2_dev);
+
+ return ret;
+}
+
+static int __maybe_unused iris_pm_suspend(struct device *dev)
+{
+ struct iris_core *core;
+ int ret = 0;
+
+ core = dev_get_drvdata(dev);
+
+ mutex_lock(&core->lock);
+ if (core->state != IRIS_CORE_INIT)
+ goto exit;
+
+ ret = iris_hfi_pm_suspend(core);
+
+exit:
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
+
+static int __maybe_unused iris_pm_resume(struct device *dev)
+{
+ struct iris_core *core;
+ int ret = 0;
+
+ core = dev_get_drvdata(dev);
+
+ mutex_lock(&core->lock);
+ if (core->state != IRIS_CORE_INIT)
+ goto exit;
+
+ ret = iris_hfi_pm_resume(core);
+ pm_runtime_mark_last_busy(core->dev);
+
+exit:
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
+
+static const struct dev_pm_ops iris_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(iris_pm_suspend, iris_pm_resume, NULL)
+};
+
+static const struct of_device_id iris_dt_match[] = {
+ {
+ .compatible = "qcom,sm8550-iris",
+ .data = &sm8550_data,
+ },
+#if (!IS_ENABLED(CONFIG_VIDEO_QCOM_VENUS))
+ {
+ .compatible = "qcom,sm8250-venus",
+ .data = &sm8250_data,
+ },
+#endif
+ { },
+};
+MODULE_DEVICE_TABLE(of, iris_dt_match);
+
+static struct platform_driver qcom_iris_driver = {
+ .probe = iris_probe,
+ .remove = iris_remove,
+ .driver = {
+ .name = "qcom-iris",
+ .of_match_table = iris_dt_match,
+ .pm = &iris_pm_ops,
+ },
+};
+
+module_platform_driver(qcom_iris_driver);
+MODULE_DESCRIPTION("Qualcomm iris video driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/qcom/iris/iris_resources.c b/drivers/media/platform/qcom/iris/iris_resources.c
new file mode 100644
index 000000000000..cf32f268b703
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_resources.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/interconnect.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "iris_core.h"
+#include "iris_resources.h"
+
+#define BW_THRESHOLD 50000
+
+int iris_set_icc_bw(struct iris_core *core, unsigned long icc_bw)
+{
+ unsigned long bw_kbps = 0, bw_prev = 0;
+ const struct icc_info *icc_tbl;
+ int ret = 0, i;
+
+ icc_tbl = core->iris_platform_data->icc_tbl;
+
+ for (i = 0; i < core->icc_count; i++) {
+ if (!strcmp(core->icc_tbl[i].name, "video-mem")) {
+ bw_kbps = icc_bw;
+ bw_prev = core->power.icc_bw;
+
+ bw_kbps = clamp_t(typeof(bw_kbps), bw_kbps,
+ icc_tbl[i].bw_min_kbps, icc_tbl[i].bw_max_kbps);
+
+ if (abs(bw_kbps - bw_prev) < BW_THRESHOLD && bw_prev)
+ return ret;
+
+ core->icc_tbl[i].avg_bw = bw_kbps;
+
+ core->power.icc_bw = bw_kbps;
+ break;
+ }
+ }
+
+ return icc_bulk_set_bw(core->icc_count, core->icc_tbl);
+}
+
+int iris_unset_icc_bw(struct iris_core *core)
+{
+ u32 i;
+
+ core->power.icc_bw = 0;
+
+ for (i = 0; i < core->icc_count; i++) {
+ core->icc_tbl[i].avg_bw = 0;
+ core->icc_tbl[i].peak_bw = 0;
+ }
+
+ return icc_bulk_set_bw(core->icc_count, core->icc_tbl);
+}
+
+int iris_enable_power_domains(struct iris_core *core, struct device *pd_dev)
+{
+ int ret;
+
+ ret = dev_pm_opp_set_rate(core->dev, ULONG_MAX);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(pd_dev);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+int iris_disable_power_domains(struct iris_core *core, struct device *pd_dev)
+{
+ int ret;
+
+ ret = dev_pm_opp_set_rate(core->dev, 0);
+ if (ret)
+ return ret;
+
+ pm_runtime_put_sync(pd_dev);
+
+ return 0;
+}
+
+static struct clk *iris_get_clk_by_type(struct iris_core *core, enum platform_clk_type clk_type)
+{
+ const struct platform_clk_data *clk_tbl;
+ u32 clk_cnt, i, j;
+
+ clk_tbl = core->iris_platform_data->clk_tbl;
+ clk_cnt = core->iris_platform_data->clk_tbl_size;
+
+ for (i = 0; i < clk_cnt; i++) {
+ if (clk_tbl[i].clk_type == clk_type) {
+ for (j = 0; core->clock_tbl && j < core->clk_count; j++) {
+ if (!strcmp(core->clock_tbl[j].id, clk_tbl[i].clk_name))
+ return core->clock_tbl[j].clk;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+int iris_prepare_enable_clock(struct iris_core *core, enum platform_clk_type clk_type)
+{
+ struct clk *clock;
+
+ clock = iris_get_clk_by_type(core, clk_type);
+ if (!clock)
+ return -EINVAL;
+
+ return clk_prepare_enable(clock);
+}
+
+int iris_disable_unprepare_clock(struct iris_core *core, enum platform_clk_type clk_type)
+{
+ struct clk *clock;
+
+ clock = iris_get_clk_by_type(core, clk_type);
+ if (!clock)
+ return -EINVAL;
+
+ clk_disable_unprepare(clock);
+
+ return 0;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_resources.h b/drivers/media/platform/qcom/iris/iris_resources.h
new file mode 100644
index 000000000000..f723dfe5bd81
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_resources.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_RESOURCES_H__
+#define __IRIS_RESOURCES_H__
+
+struct iris_core;
+
+int iris_enable_power_domains(struct iris_core *core, struct device *pd_dev);
+int iris_disable_power_domains(struct iris_core *core, struct device *pd_dev);
+int iris_unset_icc_bw(struct iris_core *core);
+int iris_set_icc_bw(struct iris_core *core, unsigned long icc_bw);
+int iris_disable_unprepare_clock(struct iris_core *core, enum platform_clk_type clk_type);
+int iris_prepare_enable_clock(struct iris_core *core, enum platform_clk_type clk_type);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_state.c b/drivers/media/platform/qcom/iris/iris_state.c
new file mode 100644
index 000000000000..5976e926c83d
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_state.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_instance.h"
+
+static bool iris_allow_inst_state_change(struct iris_inst *inst,
+ enum iris_inst_state req_state)
+{
+ switch (inst->state) {
+ case IRIS_INST_INIT:
+ if (req_state == IRIS_INST_INPUT_STREAMING ||
+ req_state == IRIS_INST_OUTPUT_STREAMING ||
+ req_state == IRIS_INST_DEINIT)
+ return true;
+ return false;
+ case IRIS_INST_INPUT_STREAMING:
+ if (req_state == IRIS_INST_INIT ||
+ req_state == IRIS_INST_STREAMING ||
+ req_state == IRIS_INST_DEINIT)
+ return true;
+ return false;
+ case IRIS_INST_OUTPUT_STREAMING:
+ if (req_state == IRIS_INST_INIT ||
+ req_state == IRIS_INST_STREAMING ||
+ req_state == IRIS_INST_DEINIT)
+ return true;
+ return false;
+ case IRIS_INST_STREAMING:
+ if (req_state == IRIS_INST_INPUT_STREAMING ||
+ req_state == IRIS_INST_OUTPUT_STREAMING ||
+ req_state == IRIS_INST_DEINIT)
+ return true;
+ return false;
+ case IRIS_INST_DEINIT:
+ if (req_state == IRIS_INST_INIT)
+ return true;
+ return false;
+ default:
+ return false;
+ }
+}
+
+int iris_inst_change_state(struct iris_inst *inst,
+ enum iris_inst_state request_state)
+{
+ if (inst->state == IRIS_INST_ERROR)
+ return 0;
+
+ if (inst->state == request_state)
+ return 0;
+
+ if (request_state == IRIS_INST_ERROR)
+ goto change_state;
+
+ if (!iris_allow_inst_state_change(inst, request_state))
+ return -EINVAL;
+
+change_state:
+ inst->state = request_state;
+ dev_dbg(inst->core->dev, "state changed from %x to %x\n",
+ inst->state, request_state);
+
+ return 0;
+}
+
+int iris_inst_state_change_streamon(struct iris_inst *inst, u32 plane)
+{
+ enum iris_inst_state new_state = IRIS_INST_ERROR;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ if (inst->state == IRIS_INST_INIT)
+ new_state = IRIS_INST_INPUT_STREAMING;
+ else if (inst->state == IRIS_INST_OUTPUT_STREAMING)
+ new_state = IRIS_INST_STREAMING;
+ } else if (V4L2_TYPE_IS_CAPTURE(plane)) {
+ if (inst->state == IRIS_INST_INIT)
+ new_state = IRIS_INST_OUTPUT_STREAMING;
+ else if (inst->state == IRIS_INST_INPUT_STREAMING)
+ new_state = IRIS_INST_STREAMING;
+ }
+
+ return iris_inst_change_state(inst, new_state);
+}
+
+int iris_inst_state_change_streamoff(struct iris_inst *inst, u32 plane)
+{
+ enum iris_inst_state new_state = IRIS_INST_ERROR;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ if (inst->state == IRIS_INST_INPUT_STREAMING)
+ new_state = IRIS_INST_INIT;
+ else if (inst->state == IRIS_INST_STREAMING)
+ new_state = IRIS_INST_OUTPUT_STREAMING;
+ } else if (V4L2_TYPE_IS_CAPTURE(plane)) {
+ if (inst->state == IRIS_INST_OUTPUT_STREAMING)
+ new_state = IRIS_INST_INIT;
+ else if (inst->state == IRIS_INST_STREAMING)
+ new_state = IRIS_INST_INPUT_STREAMING;
+ }
+
+ return iris_inst_change_state(inst, new_state);
+}
+
+static bool iris_inst_allow_sub_state(struct iris_inst *inst, enum iris_inst_sub_state sub_state)
+{
+ if (!sub_state)
+ return true;
+
+ switch (inst->state) {
+ case IRIS_INST_INIT:
+ if (sub_state & IRIS_INST_SUB_LOAD_RESOURCES)
+ return true;
+ return false;
+ case IRIS_INST_INPUT_STREAMING:
+ if (sub_state & (IRIS_INST_SUB_FIRST_IPSC | IRIS_INST_SUB_DRC |
+ IRIS_INST_SUB_DRAIN | IRIS_INST_SUB_INPUT_PAUSE))
+ return true;
+ return false;
+ case IRIS_INST_OUTPUT_STREAMING:
+ if (sub_state & (IRIS_INST_SUB_DRC_LAST |
+ IRIS_INST_SUB_DRAIN_LAST | IRIS_INST_SUB_OUTPUT_PAUSE))
+ return true;
+ return false;
+ case IRIS_INST_STREAMING:
+ if (sub_state & (IRIS_INST_SUB_DRC | IRIS_INST_SUB_DRAIN |
+ IRIS_INST_SUB_DRC_LAST | IRIS_INST_SUB_DRAIN_LAST |
+ IRIS_INST_SUB_INPUT_PAUSE | IRIS_INST_SUB_OUTPUT_PAUSE))
+ return true;
+ return false;
+ case IRIS_INST_DEINIT:
+ if (sub_state & (IRIS_INST_SUB_DRC | IRIS_INST_SUB_DRAIN |
+ IRIS_INST_SUB_DRC_LAST | IRIS_INST_SUB_DRAIN_LAST |
+ IRIS_INST_SUB_INPUT_PAUSE | IRIS_INST_SUB_OUTPUT_PAUSE))
+ return true;
+ return false;
+ default:
+ return false;
+ }
+}
+
+int iris_inst_change_sub_state(struct iris_inst *inst,
+ enum iris_inst_sub_state clear_sub_state,
+ enum iris_inst_sub_state set_sub_state)
+{
+ enum iris_inst_sub_state prev_sub_state;
+
+ if (inst->state == IRIS_INST_ERROR)
+ return 0;
+
+ if (!clear_sub_state && !set_sub_state)
+ return 0;
+
+ if ((clear_sub_state & set_sub_state) ||
+ set_sub_state > IRIS_INST_MAX_SUB_STATE_VALUE ||
+ clear_sub_state > IRIS_INST_MAX_SUB_STATE_VALUE)
+ return -EINVAL;
+
+ prev_sub_state = inst->sub_state;
+
+ if (!iris_inst_allow_sub_state(inst, set_sub_state))
+ return -EINVAL;
+
+ inst->sub_state |= set_sub_state;
+ inst->sub_state &= ~clear_sub_state;
+
+ if (inst->sub_state != prev_sub_state)
+ dev_dbg(inst->core->dev, "sub_state changed from %x to %x\n",
+ prev_sub_state, inst->sub_state);
+
+ return 0;
+}
+
+int iris_inst_sub_state_change_drc(struct iris_inst *inst)
+{
+ enum iris_inst_sub_state set_sub_state = 0;
+
+ if (inst->sub_state & IRIS_INST_SUB_DRC)
+ return -EINVAL;
+
+ if (inst->state == IRIS_INST_INPUT_STREAMING ||
+ inst->state == IRIS_INST_INIT)
+ set_sub_state = IRIS_INST_SUB_FIRST_IPSC | IRIS_INST_SUB_INPUT_PAUSE;
+ else
+ set_sub_state = IRIS_INST_SUB_DRC | IRIS_INST_SUB_INPUT_PAUSE;
+
+ return iris_inst_change_sub_state(inst, 0, set_sub_state);
+}
+
+int iris_inst_sub_state_change_drain_last(struct iris_inst *inst)
+{
+ enum iris_inst_sub_state set_sub_state;
+
+ if (inst->sub_state & IRIS_INST_SUB_DRAIN_LAST)
+ return -EINVAL;
+
+ if (!(inst->sub_state & IRIS_INST_SUB_DRAIN))
+ return -EINVAL;
+
+ set_sub_state = IRIS_INST_SUB_DRAIN_LAST | IRIS_INST_SUB_OUTPUT_PAUSE;
+
+ return iris_inst_change_sub_state(inst, 0, set_sub_state);
+}
+
+int iris_inst_sub_state_change_drc_last(struct iris_inst *inst)
+{
+ enum iris_inst_sub_state set_sub_state;
+
+ if (inst->sub_state & IRIS_INST_SUB_DRC_LAST)
+ return -EINVAL;
+
+ if (!(inst->sub_state & IRIS_INST_SUB_DRC) ||
+ !(inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE))
+ return -EINVAL;
+
+ if (inst->sub_state & IRIS_INST_SUB_FIRST_IPSC)
+ return 0;
+
+ set_sub_state = IRIS_INST_SUB_DRC_LAST | IRIS_INST_SUB_OUTPUT_PAUSE;
+
+ return iris_inst_change_sub_state(inst, 0, set_sub_state);
+}
+
+int iris_inst_sub_state_change_pause(struct iris_inst *inst, u32 plane)
+{
+ enum iris_inst_sub_state set_sub_state;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ if (inst->sub_state & IRIS_INST_SUB_DRC &&
+ !(inst->sub_state & IRIS_INST_SUB_DRC_LAST))
+ return -EINVAL;
+
+ if (inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ !(inst->sub_state & IRIS_INST_SUB_DRAIN_LAST))
+ return -EINVAL;
+
+ set_sub_state = IRIS_INST_SUB_INPUT_PAUSE;
+ } else {
+ set_sub_state = IRIS_INST_SUB_OUTPUT_PAUSE;
+ }
+
+ return iris_inst_change_sub_state(inst, 0, set_sub_state);
+}
+
+static inline bool iris_drc_pending(struct iris_inst *inst)
+{
+ return inst->sub_state & IRIS_INST_SUB_DRC &&
+ inst->sub_state & IRIS_INST_SUB_DRC_LAST;
+}
+
+static inline bool iris_drain_pending(struct iris_inst *inst)
+{
+ return inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ inst->sub_state & IRIS_INST_SUB_DRAIN_LAST;
+}
+
+bool iris_allow_cmd(struct iris_inst *inst, u32 cmd)
+{
+ struct vb2_queue *src_q = v4l2_m2m_get_src_vq(inst->m2m_ctx);
+ struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+
+ if (cmd == V4L2_DEC_CMD_START) {
+ if (vb2_is_streaming(src_q) || vb2_is_streaming(dst_q))
+ if (iris_drc_pending(inst) || iris_drain_pending(inst))
+ return true;
+ } else if (cmd == V4L2_DEC_CMD_STOP) {
+ if (vb2_is_streaming(src_q))
+ if (inst->sub_state != IRIS_INST_SUB_DRAIN)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_state.h b/drivers/media/platform/qcom/iris/iris_state.h
new file mode 100644
index 000000000000..78c61aac5e7e
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_state.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_STATE_H__
+#define __IRIS_STATE_H__
+
+struct iris_inst;
+
+/**
+ * enum iris_core_state
+ *
+ * @IRIS_CORE_DEINIT: default state.
+ * @IRIS_CORE_INIT: core state with core initialized. FW loaded and
+ * HW brought out of reset, shared queues established
+ * between host driver and firmware.
+ * @IRIS_CORE_ERROR: error state.
+ *
+ * -----------
+ * |
+ * V
+ * -----------
+ * +--->| DEINIT |<---+
+ * | ----------- |
+ * | | |
+ * | v |
+ * | ----------- |
+ * | / \ |
+ * | / \ |
+ * | / \ |
+ * | v v v
+ * ----------- -----------
+ * | INIT |--->| ERROR |
+ * ----------- -----------
+ */
+enum iris_core_state {
+ IRIS_CORE_DEINIT,
+ IRIS_CORE_INIT,
+ IRIS_CORE_ERROR,
+};
+
+/**
+ * enum iris_inst_state
+ *
+ * @IRIS_INST_INIT: video instance is opened.
+ * @IRIS_INST_INPUT_STREAMING: stream on is completed on output plane.
+ * @IRIS_INST_OUTPUT_STREAMING: stream on is completed on capture plane.
+ * @IRIS_INST_STREAMING: stream on is completed on both output and capture planes.
+ * @IRIS_INST_DEINIT: video instance is closed.
+ * @IRIS_INST_ERROR: error state.
+ * |
+ * V
+ * -------------
+ * +--------| INIT |----------+
+ * | ------------- |
+ * | ^ ^ |
+ * | / \ |
+ * | / \ |
+ * | v v |
+ * | ----------- ----------- |
+ * | | INPUT OUTPUT | |
+ * |---| STREAMING STREAMING |---|
+ * | ----------- ----------- |
+ * | ^ ^ |
+ * | \ / |
+ * | \ / |
+ * | v v |
+ * | ------------- |
+ * |--------| STREAMING |-----------|
+ * | ------------- |
+ * | | |
+ * | | |
+ * | v |
+ * | ----------- |
+ * +-------->| DEINIT |<----------+
+ * | ----------- |
+ * | | |
+ * | | |
+ * | v |
+ * | ---------- |
+ * +-------->| ERROR |<------------+
+ * ----------
+ */
+enum iris_inst_state {
+ IRIS_INST_DEINIT,
+ IRIS_INST_INIT,
+ IRIS_INST_INPUT_STREAMING,
+ IRIS_INST_OUTPUT_STREAMING,
+ IRIS_INST_STREAMING,
+ IRIS_INST_ERROR,
+};
+
+#define IRIS_INST_SUB_STATES 8
+#define IRIS_INST_MAX_SUB_STATE_VALUE ((1 << IRIS_INST_SUB_STATES) - 1)
+
+/**
+ * enum iris_inst_sub_state
+ *
+ * @IRIS_INST_SUB_FIRST_IPSC: indicates source change is received from firmware
+ * when output port is not yet streaming.
+ * @IRIS_INST_SUB_DRC: indicates source change is received from firmware
+ * when output port is streaming and source change event is
+ * sent to client.
+ * @IRIS_INST_SUB_DRC_LAST: indicates last buffer is received from firmware
+ * as part of source change.
+ * @IRIS_INST_SUB_DRAIN: indicates drain is in progress.
+ * @IRIS_INST_SUB_DRAIN_LAST: indicates last buffer is received from firmware
+ * as part of drain sequence.
+ * @IRIS_INST_SUB_INPUT_PAUSE: source change is received form firmware. This
+ * indicates that firmware is paused to process
+ * any further input frames.
+ * @IRIS_INST_SUB_OUTPUT_PAUSE: last buffer is received form firmware as part
+ * of drc sequence. This indicates that
+ * firmware is paused to process any further output frames.
+ * @IRIS_INST_SUB_LOAD_RESOURCES: indicates all the resources have been loaded by the
+ * firmware and it is ready for processing.
+ */
+enum iris_inst_sub_state {
+ IRIS_INST_SUB_FIRST_IPSC = BIT(0),
+ IRIS_INST_SUB_DRC = BIT(1),
+ IRIS_INST_SUB_DRC_LAST = BIT(2),
+ IRIS_INST_SUB_DRAIN = BIT(3),
+ IRIS_INST_SUB_DRAIN_LAST = BIT(4),
+ IRIS_INST_SUB_INPUT_PAUSE = BIT(5),
+ IRIS_INST_SUB_OUTPUT_PAUSE = BIT(6),
+ IRIS_INST_SUB_LOAD_RESOURCES = BIT(7),
+};
+
+int iris_inst_change_state(struct iris_inst *inst,
+ enum iris_inst_state request_state);
+int iris_inst_change_sub_state(struct iris_inst *inst,
+ enum iris_inst_sub_state clear_sub_state,
+ enum iris_inst_sub_state set_sub_state);
+
+int iris_inst_state_change_streamon(struct iris_inst *inst, u32 plane);
+int iris_inst_state_change_streamoff(struct iris_inst *inst, u32 plane);
+int iris_inst_sub_state_change_drc(struct iris_inst *inst);
+int iris_inst_sub_state_change_drain_last(struct iris_inst *inst);
+int iris_inst_sub_state_change_drc_last(struct iris_inst *inst);
+int iris_inst_sub_state_change_pause(struct iris_inst *inst, u32 plane);
+bool iris_allow_cmd(struct iris_inst *inst, u32 cmd);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_utils.c b/drivers/media/platform/qcom/iris/iris_utils.c
new file mode 100644
index 000000000000..83c70d6a2d90
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_utils.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pm_runtime.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_instance.h"
+#include "iris_utils.h"
+
+bool iris_res_is_less_than(u32 width, u32 height,
+ u32 ref_width, u32 ref_height)
+{
+ u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
+ u32 max_side = max(ref_width, ref_height);
+
+ if (num_mbs < NUM_MBS_PER_FRAME(ref_height, ref_width) &&
+ width < max_side &&
+ height < max_side)
+ return true;
+
+ return false;
+}
+
+int iris_get_mbpf(struct iris_inst *inst)
+{
+ struct v4l2_format *inp_f = inst->fmt_src;
+ u32 height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
+ u32 width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
+
+ return NUM_MBS_PER_FRAME(height, width);
+}
+
+bool iris_split_mode_enabled(struct iris_inst *inst)
+{
+ return inst->fmt_dst->fmt.pix_mp.pixelformat == V4L2_PIX_FMT_NV12;
+}
+
+void iris_helper_buffers_done(struct iris_inst *inst, unsigned int type,
+ enum vb2_buffer_state state)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct vb2_v4l2_buffer *buf;
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ while ((buf = v4l2_m2m_src_buf_remove(m2m_ctx)))
+ v4l2_m2m_buf_done(buf, state);
+ } else if (V4L2_TYPE_IS_CAPTURE(type)) {
+ while ((buf = v4l2_m2m_dst_buf_remove(m2m_ctx)))
+ v4l2_m2m_buf_done(buf, state);
+ }
+}
+
+int iris_wait_for_session_response(struct iris_inst *inst, bool is_flush)
+{
+ struct iris_core *core = inst->core;
+ u32 hw_response_timeout_val;
+ struct completion *done;
+ int ret;
+
+ hw_response_timeout_val = core->iris_platform_data->hw_response_timeout;
+ done = is_flush ? &inst->flush_completion : &inst->completion;
+
+ mutex_unlock(&inst->lock);
+ ret = wait_for_completion_timeout(done, msecs_to_jiffies(hw_response_timeout_val));
+ mutex_lock(&inst->lock);
+ if (!ret) {
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+struct iris_inst *iris_get_instance(struct iris_core *core, u32 session_id)
+{
+ struct iris_inst *inst;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->session_id == session_id) {
+ mutex_unlock(&core->lock);
+ return inst;
+ }
+ }
+
+ mutex_unlock(&core->lock);
+ return NULL;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_utils.h b/drivers/media/platform/qcom/iris/iris_utils.h
new file mode 100644
index 000000000000..49869cf7a376
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_utils.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_UTILS_H__
+#define __IRIS_UTILS_H__
+
+struct iris_core;
+#include "iris_buffer.h"
+
+struct iris_hfi_rect_desc {
+ u32 left;
+ u32 top;
+ u32 width;
+ u32 height;
+};
+
+struct iris_hfi_frame_info {
+ u32 picture_type;
+ u32 no_output;
+ u32 data_corrupt;
+ u32 overflow;
+};
+
+struct iris_ts_metadata {
+ u64 ts_ns;
+ u64 ts_us;
+ u32 flags;
+ struct v4l2_timecode tc;
+};
+
+#define NUM_MBS_PER_FRAME(height, width) \
+ (DIV_ROUND_UP(height, 16) * DIV_ROUND_UP(width, 16))
+
+static inline enum iris_buffer_type iris_v4l2_type_to_driver(u32 type)
+{
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return BUF_INPUT;
+ else
+ return BUF_OUTPUT;
+}
+
+bool iris_res_is_less_than(u32 width, u32 height,
+ u32 ref_width, u32 ref_height);
+int iris_get_mbpf(struct iris_inst *inst);
+bool iris_split_mode_enabled(struct iris_inst *inst);
+struct iris_inst *iris_get_instance(struct iris_core *core, u32 session_id);
+void iris_helper_buffers_done(struct iris_inst *inst, unsigned int type,
+ enum vb2_buffer_state state);
+int iris_wait_for_session_response(struct iris_inst *inst, bool is_flush);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vb2.c b/drivers/media/platform/qcom/iris/iris_vb2.c
new file mode 100644
index 000000000000..cdf11feb590b
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vb2.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_instance.h"
+#include "iris_vb2.h"
+#include "iris_vdec.h"
+#include "iris_power.h"
+
+static int iris_check_core_mbpf(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *instance;
+ u32 total_mbpf = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list)
+ total_mbpf += iris_get_mbpf(instance);
+ mutex_unlock(&core->lock);
+
+ if (total_mbpf > core->iris_platform_data->max_core_mbpf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int iris_check_inst_mbpf(struct iris_inst *inst)
+{
+ struct platform_inst_caps *caps;
+ u32 mbpf, max_mbpf;
+
+ caps = inst->core->iris_platform_data->inst_caps;
+ max_mbpf = caps->max_mbpf;
+ mbpf = iris_get_mbpf(inst);
+ if (mbpf > max_mbpf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int iris_check_resolution_supported(struct iris_inst *inst)
+{
+ u32 width, height, min_width, min_height, max_width, max_height;
+ struct platform_inst_caps *caps;
+
+ caps = inst->core->iris_platform_data->inst_caps;
+ width = inst->fmt_src->fmt.pix_mp.width;
+ height = inst->fmt_src->fmt.pix_mp.height;
+
+ min_width = caps->min_frame_width;
+ max_width = caps->max_frame_width;
+ min_height = caps->min_frame_height;
+ max_height = caps->max_frame_height;
+
+ if (!(min_width <= width && width <= max_width) ||
+ !(min_height <= height && height <= max_height))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int iris_check_session_supported(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *instance = NULL;
+ bool found = false;
+ int ret;
+
+ list_for_each_entry(instance, &core->instances, list) {
+ if (instance == inst)
+ found = true;
+ }
+
+ if (!found) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = iris_check_core_mbpf(inst);
+ if (ret)
+ goto exit;
+
+ ret = iris_check_inst_mbpf(inst);
+ if (ret)
+ goto exit;
+
+ ret = iris_check_resolution_supported(inst);
+ if (ret)
+ goto exit;
+
+ return 0;
+exit:
+ dev_err(inst->core->dev, "current session not supported(%d)\n", ret);
+
+ return ret;
+}
+
+int iris_vb2_buf_init(struct vb2_buffer *vb2)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+ struct iris_buffer *buf = to_iris_buffer(vbuf);
+
+ buf->device_addr = vb2_dma_contig_plane_dma_addr(vb2, 0);
+
+ return 0;
+}
+
+int iris_vb2_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct iris_inst *inst;
+ struct iris_core *core;
+ struct v4l2_format *f;
+ int ret = 0;
+
+ inst = vb2_get_drv_priv(q);
+
+ mutex_lock(&inst->lock);
+ if (inst->state == IRIS_INST_ERROR) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ core = inst->core;
+ f = V4L2_TYPE_IS_OUTPUT(q->type) ? inst->fmt_src : inst->fmt_dst;
+
+ if (*num_planes) {
+ if (*num_planes != f->fmt.pix_mp.num_planes ||
+ sizes[0] < f->fmt.pix_mp.plane_fmt[0].sizeimage)
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = iris_check_session_supported(inst);
+ if (ret)
+ goto unlock;
+
+ if (!inst->once_per_session_set) {
+ inst->once_per_session_set = true;
+
+ ret = core->hfi_ops->session_open(inst);
+ if (ret) {
+ ret = -EINVAL;
+ dev_err(core->dev, "session open failed\n");
+ goto unlock;
+ }
+
+ ret = iris_inst_change_state(inst, IRIS_INST_INIT);
+ if (ret)
+ goto unlock;
+ }
+
+ *num_planes = 1;
+ sizes[0] = f->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+unlock:
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+int iris_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ enum iris_buffer_type buf_type;
+ struct iris_inst *inst;
+ int ret = 0;
+
+ inst = vb2_get_drv_priv(q);
+
+ if (V4L2_TYPE_IS_CAPTURE(q->type) && inst->state == IRIS_INST_INIT)
+ return 0;
+
+ mutex_lock(&inst->lock);
+ if (inst->state == IRIS_INST_ERROR) {
+ ret = -EBUSY;
+ goto error;
+ }
+
+ if (!V4L2_TYPE_IS_OUTPUT(q->type) &&
+ !V4L2_TYPE_IS_CAPTURE(q->type)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ iris_scale_power(inst);
+
+ ret = iris_check_session_supported(inst);
+ if (ret)
+ goto error;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ret = iris_vdec_streamon_input(inst);
+ else if (V4L2_TYPE_IS_CAPTURE(q->type))
+ ret = iris_vdec_streamon_output(inst);
+ if (ret)
+ goto error;
+
+ buf_type = iris_v4l2_type_to_driver(q->type);
+
+ ret = iris_queue_deferred_buffers(inst, buf_type);
+ if (ret)
+ goto error;
+
+ mutex_unlock(&inst->lock);
+
+ return ret;
+
+error:
+ iris_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+void iris_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct iris_inst *inst;
+ int ret = 0;
+
+ inst = vb2_get_drv_priv(q);
+
+ if (V4L2_TYPE_IS_CAPTURE(q->type) && inst->state == IRIS_INST_INIT)
+ return;
+
+ mutex_lock(&inst->lock);
+
+ if (!V4L2_TYPE_IS_OUTPUT(q->type) &&
+ !V4L2_TYPE_IS_CAPTURE(q->type))
+ goto exit;
+
+ ret = iris_vdec_session_streamoff(inst, q->type);
+ if (ret)
+ goto exit;
+
+exit:
+ iris_helper_buffers_done(inst, q->type, VB2_BUF_STATE_ERROR);
+ if (ret)
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+
+ mutex_unlock(&inst->lock);
+}
+
+int iris_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct iris_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+ }
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ vb2_plane_size(vb, 0) < iris_get_buffer_size(inst, BUF_OUTPUT))
+ return -EINVAL;
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ vb2_plane_size(vb, 0) < iris_get_buffer_size(inst, BUF_INPUT))
+ return -EINVAL;
+
+ return 0;
+}
+
+int iris_vb2_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_buf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+void iris_vb2_buf_queue(struct vb2_buffer *vb2)
+{
+ static const struct v4l2_event eos = { .type = V4L2_EVENT_EOS };
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct iris_inst *inst;
+ int ret = 0;
+
+ inst = vb2_get_drv_priv(vb2->vb2_queue);
+
+ mutex_lock(&inst->lock);
+ if (inst->state == IRIS_INST_ERROR) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+
+ m2m_ctx = inst->m2m_ctx;
+
+ if (!vb2->planes[0].bytesused && V4L2_TYPE_IS_OUTPUT(vb2->type)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (V4L2_TYPE_IS_CAPTURE(vb2->vb2_queue->type)) {
+ if ((inst->sub_state & IRIS_INST_SUB_DRC &&
+ inst->sub_state & IRIS_INST_SUB_DRC_LAST) ||
+ (inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ inst->sub_state & IRIS_INST_SUB_DRAIN_LAST)) {
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ vbuf->sequence = inst->sequence_cap++;
+ vbuf->field = V4L2_FIELD_NONE;
+ vb2_set_plane_payload(vb2, 0, 0);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+ if (!v4l2_m2m_has_stopped(m2m_ctx)) {
+ v4l2_event_queue_fh(&inst->fh, &eos);
+ v4l2_m2m_mark_stopped(m2m_ctx);
+ }
+ goto exit;
+ }
+ }
+
+ v4l2_m2m_buf_queue(m2m_ctx, vbuf);
+
+ ret = iris_vdec_qbuf(inst, vbuf);
+
+exit:
+ if (ret) {
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+ mutex_unlock(&inst->lock);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_vb2.h b/drivers/media/platform/qcom/iris/iris_vb2.h
new file mode 100644
index 000000000000..a88565fdd3e4
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vb2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_VB2_H__
+#define __IRIS_VB2_H__
+
+int iris_vb2_buf_init(struct vb2_buffer *vb2);
+int iris_vb2_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[]);
+int iris_vb2_start_streaming(struct vb2_queue *q, unsigned int count);
+void iris_vb2_stop_streaming(struct vb2_queue *q);
+int iris_vb2_buf_prepare(struct vb2_buffer *vb);
+int iris_vb2_buf_out_validate(struct vb2_buffer *vb);
+void iris_vb2_buf_queue(struct vb2_buffer *vb2);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vdec.c b/drivers/media/platform/qcom/iris/iris_vdec.c
new file mode 100644
index 000000000000..4143acedfc57
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vdec.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_buffer.h"
+#include "iris_ctrls.h"
+#include "iris_instance.h"
+#include "iris_power.h"
+#include "iris_vdec.h"
+#include "iris_vpu_buffer.h"
+
+#define DEFAULT_WIDTH 320
+#define DEFAULT_HEIGHT 240
+#define DEFAULT_CODEC_ALIGNMENT 16
+
+int iris_vdec_inst_init(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct v4l2_format *f;
+
+ inst->fmt_src = kzalloc(sizeof(*inst->fmt_src), GFP_KERNEL);
+ inst->fmt_dst = kzalloc(sizeof(*inst->fmt_dst), GFP_KERNEL);
+
+ inst->fw_min_count = MIN_BUFFERS;
+
+ f = inst->fmt_src;
+ f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ f->fmt.pix_mp.width = DEFAULT_WIDTH;
+ f->fmt.pix_mp.height = DEFAULT_HEIGHT;
+ f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_H264;
+ f->fmt.pix_mp.num_planes = 1;
+ f->fmt.pix_mp.plane_fmt[0].bytesperline = 0;
+ f->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_INPUT);
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ inst->buffers[BUF_INPUT].min_count = iris_vpu_buf_count(inst, BUF_INPUT);
+ inst->buffers[BUF_INPUT].size = f->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ f = inst->fmt_dst;
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12;
+ f->fmt.pix_mp.width = ALIGN(DEFAULT_WIDTH, 128);
+ f->fmt.pix_mp.height = ALIGN(DEFAULT_HEIGHT, 32);
+ f->fmt.pix_mp.num_planes = 1;
+ f->fmt.pix_mp.plane_fmt[0].bytesperline = ALIGN(DEFAULT_WIDTH, 128);
+ f->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_OUTPUT);
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_DEFAULT;
+ f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
+ inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].size = f->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ memcpy(&inst->fw_caps[0], &core->inst_fw_caps[0],
+ INST_FW_CAP_MAX * sizeof(struct platform_inst_fw_cap));
+
+ return iris_ctrls_init(inst);
+}
+
+void iris_vdec_inst_deinit(struct iris_inst *inst)
+{
+ kfree(inst->fmt_dst);
+ kfree(inst->fmt_src);
+}
+
+int iris_vdec_enum_fmt(struct iris_inst *inst, struct v4l2_fmtdesc *f)
+{
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ f->pixelformat = V4L2_PIX_FMT_H264;
+ f->flags = V4L2_FMT_FLAG_COMPRESSED | V4L2_FMT_FLAG_DYN_RESOLUTION;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ f->pixelformat = V4L2_PIX_FMT_NV12;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int iris_vdec_try_fmt(struct iris_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_format *f_inst;
+ struct vb2_queue *src_q;
+
+ memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_H264) {
+ f_inst = inst->fmt_src;
+ f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
+ f->fmt.pix_mp.height = f_inst->fmt.pix_mp.height;
+ f->fmt.pix_mp.pixelformat = f_inst->fmt.pix_mp.pixelformat;
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_NV12) {
+ f_inst = inst->fmt_dst;
+ f->fmt.pix_mp.pixelformat = f_inst->fmt.pix_mp.pixelformat;
+ f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
+ f->fmt.pix_mp.height = f_inst->fmt.pix_mp.height;
+ }
+
+ src_q = v4l2_m2m_get_src_vq(m2m_ctx);
+ if (vb2_is_streaming(src_q)) {
+ f_inst = inst->fmt_src;
+ f->fmt.pix_mp.height = f_inst->fmt.pix_mp.height;
+ f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (pixmp->field == V4L2_FIELD_ANY)
+ pixmp->field = V4L2_FIELD_NONE;
+
+ pixmp->num_planes = 1;
+
+ return 0;
+}
+
+int iris_vdec_s_fmt(struct iris_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_format *fmt, *output_fmt;
+ struct vb2_queue *q;
+ u32 codec_align;
+
+ q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type);
+ if (!q)
+ return -EINVAL;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ iris_vdec_try_fmt(inst, f);
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_H264)
+ return -EINVAL;
+
+ fmt = inst->fmt_src;
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+
+ codec_align = DEFAULT_CODEC_ALIGNMENT;
+ fmt->fmt.pix_mp.width = ALIGN(f->fmt.pix_mp.width, codec_align);
+ fmt->fmt.pix_mp.height = ALIGN(f->fmt.pix_mp.height, codec_align);
+ fmt->fmt.pix_mp.num_planes = 1;
+ fmt->fmt.pix_mp.plane_fmt[0].bytesperline = 0;
+ fmt->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_INPUT);
+ inst->buffers[BUF_INPUT].min_count = iris_vpu_buf_count(inst, BUF_INPUT);
+ inst->buffers[BUF_INPUT].size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace;
+ fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func;
+ fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization;
+
+ output_fmt = inst->fmt_dst;
+ output_fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace;
+ output_fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func;
+ output_fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ output_fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization;
+
+ inst->crop.left = 0;
+ inst->crop.top = 0;
+ inst->crop.width = f->fmt.pix_mp.width;
+ inst->crop.height = f->fmt.pix_mp.height;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ fmt = inst->fmt_dst;
+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ if (fmt->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_NV12)
+ return -EINVAL;
+ fmt->fmt.pix_mp.pixelformat = f->fmt.pix_mp.pixelformat;
+ fmt->fmt.pix_mp.width = ALIGN(f->fmt.pix_mp.width, 128);
+ fmt->fmt.pix_mp.height = ALIGN(f->fmt.pix_mp.height, 32);
+ fmt->fmt.pix_mp.num_planes = 1;
+ fmt->fmt.pix_mp.plane_fmt[0].bytesperline = ALIGN(f->fmt.pix_mp.width, 128);
+ fmt->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ inst->crop.top = 0;
+ inst->crop.left = 0;
+ inst->crop.width = f->fmt.pix_mp.width;
+ inst->crop.height = f->fmt.pix_mp.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+ memcpy(f, fmt, sizeof(*fmt));
+
+ return 0;
+}
+
+int iris_vdec_subscribe_event(struct iris_inst *inst, const struct v4l2_event_subscription *sub)
+{
+ int ret = 0;
+
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ ret = v4l2_event_subscribe(&inst->fh, sub, 0, NULL);
+ break;
+ case V4L2_EVENT_SOURCE_CHANGE:
+ ret = v4l2_src_change_event_subscribe(&inst->fh, sub);
+ break;
+ case V4L2_EVENT_CTRL:
+ ret = v4l2_ctrl_subscribe_event(&inst->fh, sub);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+void iris_vdec_src_change(struct iris_inst *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_event event = {0};
+ struct vb2_queue *src_q;
+
+ src_q = v4l2_m2m_get_src_vq(m2m_ctx);
+ if (!vb2_is_streaming(src_q))
+ return;
+
+ event.type = V4L2_EVENT_SOURCE_CHANGE;
+ event.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION;
+ v4l2_event_queue_fh(&inst->fh, &event);
+}
+
+static int iris_vdec_get_num_queued_buffers(struct iris_inst *inst,
+ enum iris_buffer_type type)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buffer, *n;
+ struct iris_buffer *buf;
+ u32 count = 0;
+
+ switch (type) {
+ case BUF_INPUT:
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (!(buf->attr & BUF_ATTR_QUEUED))
+ continue;
+ count++;
+ }
+ return count;
+ case BUF_OUTPUT:
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (!(buf->attr & BUF_ATTR_QUEUED))
+ continue;
+ count++;
+ }
+ return count;
+ default:
+ return count;
+ }
+}
+
+static void iris_vdec_flush_deferred_buffers(struct iris_inst *inst,
+ enum iris_buffer_type type)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buffer, *n;
+ struct iris_buffer *buf;
+
+ if (type == BUF_INPUT) {
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (buf->attr & BUF_ATTR_DEFERRED) {
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ buf->data_size = 0;
+ iris_vb2_buffer_done(inst, buf);
+ }
+ }
+ }
+ } else {
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (buf->attr & BUF_ATTR_DEFERRED) {
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ buf->data_size = 0;
+ iris_vb2_buffer_done(inst, buf);
+ }
+ }
+ }
+ }
+}
+
+static void iris_vdec_kill_session(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+
+ if (!inst->session_id)
+ return;
+
+ hfi_ops->session_close(inst);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+}
+
+int iris_vdec_session_streamoff(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ enum iris_buffer_type buffer_type;
+ u32 count;
+ int ret;
+
+ switch (plane) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ buffer_type = BUF_INPUT;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ buffer_type = BUF_OUTPUT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hfi_ops->session_stop(inst, plane);
+ if (ret)
+ goto error;
+
+ count = iris_vdec_get_num_queued_buffers(inst, buffer_type);
+ if (count) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = iris_inst_state_change_streamoff(inst, plane);
+ if (ret)
+ goto error;
+
+ iris_vdec_flush_deferred_buffers(inst, buffer_type);
+
+ return 0;
+
+error:
+ iris_vdec_kill_session(inst);
+ iris_vdec_flush_deferred_buffers(inst, buffer_type);
+
+ return ret;
+}
+
+static int iris_vdec_process_streamon_input(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ enum iris_inst_sub_state set_sub_state = 0;
+ int ret;
+
+ iris_scale_power(inst);
+
+ ret = hfi_ops->session_start(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ ret = iris_inst_change_sub_state(inst, IRIS_INST_SUB_INPUT_PAUSE, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (inst->sub_state & IRIS_INST_SUB_DRC ||
+ inst->sub_state & IRIS_INST_SUB_DRAIN ||
+ inst->sub_state & IRIS_INST_SUB_FIRST_IPSC) {
+ if (!(inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE)) {
+ if (hfi_ops->session_pause) {
+ ret = hfi_ops->session_pause(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ }
+ set_sub_state = IRIS_INST_SUB_INPUT_PAUSE;
+ }
+ }
+
+ ret = iris_inst_state_change_streamon(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ return iris_inst_change_sub_state(inst, 0, set_sub_state);
+}
+
+int iris_vdec_streamon_input(struct iris_inst *inst)
+{
+ int ret;
+
+ ret = iris_set_properties(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_alloc_and_queue_persist_bufs(inst);
+ if (ret)
+ return ret;
+
+ iris_get_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ ret = iris_destroy_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_create_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_queue_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ return iris_vdec_process_streamon_input(inst);
+}
+
+static int iris_vdec_process_streamon_output(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ bool drain_active = false, drc_active = false;
+ enum iris_inst_sub_state clear_sub_state = 0;
+ int ret = 0;
+
+ iris_scale_power(inst);
+
+ drain_active = inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ inst->sub_state & IRIS_INST_SUB_DRAIN_LAST;
+
+ drc_active = inst->sub_state & IRIS_INST_SUB_DRC &&
+ inst->sub_state & IRIS_INST_SUB_DRC_LAST;
+
+ if (drc_active)
+ clear_sub_state = IRIS_INST_SUB_DRC | IRIS_INST_SUB_DRC_LAST;
+ else if (drain_active)
+ clear_sub_state = IRIS_INST_SUB_DRAIN | IRIS_INST_SUB_DRAIN_LAST;
+
+ if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ ret = iris_alloc_and_queue_input_int_bufs(inst);
+ if (ret)
+ return ret;
+ ret = iris_set_stage(inst, STAGE);
+ if (ret)
+ return ret;
+ ret = iris_set_pipe(inst, PIPE);
+ if (ret)
+ return ret;
+ }
+
+ if (inst->state == IRIS_INST_INPUT_STREAMING &&
+ inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ if (!drain_active)
+ ret = hfi_ops->session_resume_drc(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ else if (hfi_ops->session_resume_drain)
+ ret = hfi_ops->session_resume_drain(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ clear_sub_state |= IRIS_INST_SUB_INPUT_PAUSE;
+ }
+
+ if (inst->sub_state & IRIS_INST_SUB_FIRST_IPSC)
+ clear_sub_state |= IRIS_INST_SUB_FIRST_IPSC;
+
+ ret = hfi_ops->session_start(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ if (inst->sub_state & IRIS_INST_SUB_OUTPUT_PAUSE)
+ clear_sub_state |= IRIS_INST_SUB_OUTPUT_PAUSE;
+
+ ret = iris_inst_state_change_streamon(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ return iris_inst_change_sub_state(inst, clear_sub_state, 0);
+}
+
+int iris_vdec_streamon_output(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ int ret;
+
+ ret = hfi_ops->session_set_config_params(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ iris_get_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ ret = iris_destroy_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_create_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_vdec_process_streamon_output(inst);
+ if (ret)
+ goto error;
+
+ ret = iris_queue_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ goto error;
+
+ return ret;
+
+error:
+ iris_vdec_session_streamoff(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ return ret;
+}
+
+static int
+iris_vdec_vb2_buffer_to_driver(struct vb2_buffer *vb2, struct iris_buffer *buf)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+
+ buf->type = iris_v4l2_type_to_driver(vb2->type);
+ buf->index = vb2->index;
+ buf->fd = vb2->planes[0].m.fd;
+ buf->buffer_size = vb2->planes[0].length;
+ buf->data_offset = vb2->planes[0].data_offset;
+ buf->data_size = vb2->planes[0].bytesused - vb2->planes[0].data_offset;
+ buf->flags = vbuf->flags;
+ buf->timestamp = vb2->timestamp;
+ buf->attr = 0;
+
+ return 0;
+}
+
+static void
+iris_set_ts_metadata(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+ u64 ts_us = vb->timestamp;
+
+ if (inst->metadata_idx >= ARRAY_SIZE(inst->tss))
+ inst->metadata_idx = 0;
+
+ do_div(ts_us, NSEC_PER_USEC);
+
+ inst->tss[inst->metadata_idx].flags = vbuf->flags & mask;
+ inst->tss[inst->metadata_idx].tc = vbuf->timecode;
+ inst->tss[inst->metadata_idx].ts_us = ts_us;
+ inst->tss[inst->metadata_idx].ts_ns = vb->timestamp;
+
+ inst->metadata_idx++;
+}
+
+int iris_vdec_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ struct iris_buffer *buf = to_iris_buffer(vbuf);
+ struct vb2_buffer *vb2 = &vbuf->vb2_buf;
+ struct vb2_queue *q;
+ int ret;
+
+ ret = iris_vdec_vb2_buffer_to_driver(vb2, buf);
+ if (ret)
+ return ret;
+
+ if (buf->type == BUF_INPUT)
+ iris_set_ts_metadata(inst, vbuf);
+
+ q = v4l2_m2m_get_vq(inst->m2m_ctx, vb2->type);
+ if (!vb2_is_streaming(q)) {
+ buf->attr |= BUF_ATTR_DEFERRED;
+ return 0;
+ }
+
+ iris_scale_power(inst);
+
+ return iris_queue_buffer(inst, buf);
+}
+
+int iris_vdec_start_cmd(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ enum iris_inst_sub_state clear_sub_state = 0;
+ struct vb2_queue *dst_vq;
+ int ret;
+
+ dst_vq = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+
+ if (inst->sub_state & IRIS_INST_SUB_DRC &&
+ inst->sub_state & IRIS_INST_SUB_DRC_LAST) {
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ clear_sub_state = IRIS_INST_SUB_DRC | IRIS_INST_SUB_DRC_LAST;
+
+ if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ ret = hfi_ops->session_resume_drc(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ clear_sub_state |= IRIS_INST_SUB_INPUT_PAUSE;
+ }
+ if (inst->sub_state & IRIS_INST_SUB_OUTPUT_PAUSE) {
+ ret = hfi_ops->session_resume_drc(inst,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+ clear_sub_state |= IRIS_INST_SUB_OUTPUT_PAUSE;
+ }
+ } else if (inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ inst->sub_state & IRIS_INST_SUB_DRAIN_LAST) {
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ clear_sub_state = IRIS_INST_SUB_DRAIN | IRIS_INST_SUB_DRAIN_LAST;
+ if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ if (hfi_ops->session_resume_drain) {
+ ret =
+ hfi_ops->session_resume_drain(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ }
+
+ clear_sub_state |= IRIS_INST_SUB_INPUT_PAUSE;
+ }
+ if (inst->sub_state & IRIS_INST_SUB_OUTPUT_PAUSE) {
+ if (hfi_ops->session_resume_drain) {
+ ret =
+ hfi_ops->session_resume_drain(inst,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+ }
+
+ clear_sub_state |= IRIS_INST_SUB_OUTPUT_PAUSE;
+ }
+ } else {
+ dev_err(inst->core->dev, "start called before receiving last_flag\n");
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return -EBUSY;
+ }
+
+ return iris_inst_change_sub_state(inst, clear_sub_state, 0);
+}
+
+int iris_vdec_stop_cmd(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ int ret;
+
+ ret = hfi_ops->session_drain(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ return iris_inst_change_sub_state(inst, 0, IRIS_INST_SUB_DRAIN);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_vdec.h b/drivers/media/platform/qcom/iris/iris_vdec.h
new file mode 100644
index 000000000000..b24932dc511a
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vdec.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_VDEC_H__
+#define __IRIS_VDEC_H__
+
+struct iris_inst;
+
+int iris_vdec_inst_init(struct iris_inst *inst);
+void iris_vdec_inst_deinit(struct iris_inst *inst);
+int iris_vdec_enum_fmt(struct iris_inst *inst, struct v4l2_fmtdesc *f);
+int iris_vdec_try_fmt(struct iris_inst *inst, struct v4l2_format *f);
+int iris_vdec_s_fmt(struct iris_inst *inst, struct v4l2_format *f);
+int iris_vdec_subscribe_event(struct iris_inst *inst, const struct v4l2_event_subscription *sub);
+void iris_vdec_src_change(struct iris_inst *inst);
+int iris_vdec_streamon_input(struct iris_inst *inst);
+int iris_vdec_streamon_output(struct iris_inst *inst);
+int iris_vdec_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf);
+int iris_vdec_start_cmd(struct iris_inst *inst);
+int iris_vdec_stop_cmd(struct iris_inst *inst);
+int iris_vdec_session_streamoff(struct iris_inst *inst, u32 plane);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vidc.c b/drivers/media/platform/qcom/iris/iris_vidc.c
new file mode 100644
index 000000000000..ca0f4e310f77
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vidc.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pm_runtime.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "iris_vidc.h"
+#include "iris_instance.h"
+#include "iris_vdec.h"
+#include "iris_vb2.h"
+#include "iris_vpu_buffer.h"
+#include "iris_platform_common.h"
+
+#define IRIS_DRV_NAME "iris_driver"
+#define IRIS_BUS_NAME "platform:iris_icc"
+#define STEP_WIDTH 1
+#define STEP_HEIGHT 1
+
+static void iris_v4l2_fh_init(struct iris_inst *inst)
+{
+ v4l2_fh_init(&inst->fh, inst->core->vdev_dec);
+ inst->fh.ctrl_handler = &inst->ctrl_handler;
+ v4l2_fh_add(&inst->fh);
+}
+
+static void iris_v4l2_fh_deinit(struct iris_inst *inst)
+{
+ v4l2_fh_del(&inst->fh);
+ inst->fh.ctrl_handler = NULL;
+ v4l2_fh_exit(&inst->fh);
+}
+
+static void iris_add_session(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *iter;
+ u32 count = 0;
+
+ mutex_lock(&core->lock);
+
+ list_for_each_entry(iter, &core->instances, list)
+ count++;
+
+ if (count < core->iris_platform_data->max_session_count)
+ list_add_tail(&inst->list, &core->instances);
+
+ mutex_unlock(&core->lock);
+}
+
+static void iris_remove_session(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *iter, *temp;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry_safe(iter, temp, &core->instances, list) {
+ if (iter->session_id == inst->session_id) {
+ list_del_init(&iter->list);
+ break;
+ }
+ }
+ mutex_unlock(&core->lock);
+}
+
+static inline struct iris_inst *iris_get_inst(struct file *filp, void *fh)
+{
+ return container_of(filp->private_data, struct iris_inst, fh);
+}
+
+static void iris_m2m_device_run(void *priv)
+{
+}
+
+static void iris_m2m_job_abort(void *priv)
+{
+ struct iris_inst *inst = priv;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+
+ v4l2_m2m_job_finish(inst->m2m_dev, m2m_ctx);
+}
+
+static const struct v4l2_m2m_ops iris_m2m_ops = {
+ .device_run = iris_m2m_device_run,
+ .job_abort = iris_m2m_job_abort,
+};
+
+static int
+iris_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct iris_inst *inst = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->ops = inst->core->iris_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->drv_priv = inst;
+ src_vq->buf_struct_size = sizeof(struct iris_buffer);
+ src_vq->min_reqbufs_allocation = MIN_BUFFERS;
+ src_vq->dev = inst->core->dev;
+ src_vq->lock = &inst->ctx_q_lock;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->ops = inst->core->iris_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->drv_priv = inst;
+ dst_vq->buf_struct_size = sizeof(struct iris_buffer);
+ dst_vq->min_reqbufs_allocation = MIN_BUFFERS;
+ dst_vq->dev = inst->core->dev;
+ dst_vq->lock = &inst->ctx_q_lock;
+
+ return vb2_queue_init(dst_vq);
+}
+
+int iris_open(struct file *filp)
+{
+ struct iris_core *core = video_drvdata(filp);
+ struct iris_inst *inst;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(core->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = iris_core_init(core);
+ if (ret) {
+ dev_err(core->dev, "core init failed\n");
+ pm_runtime_put_sync(core->dev);
+ return ret;
+ }
+
+ pm_runtime_put_sync(core->dev);
+
+ inst = core->iris_platform_data->get_instance();
+ if (!inst)
+ return -ENOMEM;
+
+ inst->core = core;
+ inst->session_id = hash32_ptr(inst);
+ inst->state = IRIS_INST_DEINIT;
+
+ mutex_init(&inst->lock);
+ mutex_init(&inst->ctx_q_lock);
+
+ INIT_LIST_HEAD(&inst->buffers[BUF_BIN].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_ARP].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_COMV].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_NON_COMV].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_LINE].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_DPB].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_PERSIST].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_SCRATCH_1].list);
+ init_completion(&inst->completion);
+ init_completion(&inst->flush_completion);
+
+ iris_v4l2_fh_init(inst);
+
+ inst->m2m_dev = v4l2_m2m_init(&iris_m2m_ops);
+ if (IS_ERR_OR_NULL(inst->m2m_dev)) {
+ ret = -EINVAL;
+ goto fail_v4l2_fh_deinit;
+ }
+
+ inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, iris_m2m_queue_init);
+ if (IS_ERR_OR_NULL(inst->m2m_ctx)) {
+ ret = -EINVAL;
+ goto fail_m2m_release;
+ }
+
+ ret = iris_vdec_inst_init(inst);
+ if (ret)
+ goto fail_m2m_ctx_release;
+
+ iris_add_session(inst);
+
+ inst->fh.m2m_ctx = inst->m2m_ctx;
+ filp->private_data = &inst->fh;
+
+ return 0;
+
+fail_m2m_ctx_release:
+ v4l2_m2m_ctx_release(inst->m2m_ctx);
+fail_m2m_release:
+ v4l2_m2m_release(inst->m2m_dev);
+fail_v4l2_fh_deinit:
+ iris_v4l2_fh_deinit(inst);
+ mutex_destroy(&inst->ctx_q_lock);
+ mutex_destroy(&inst->lock);
+ kfree(inst);
+
+ return ret;
+}
+
+static void iris_session_close(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ bool wait_for_response = true;
+ int ret;
+
+ if (inst->state == IRIS_INST_DEINIT)
+ return;
+
+ reinit_completion(&inst->completion);
+
+ ret = hfi_ops->session_close(inst);
+ if (ret)
+ wait_for_response = false;
+
+ if (wait_for_response)
+ iris_wait_for_session_response(inst, false);
+}
+
+int iris_close(struct file *filp)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+ v4l2_m2m_ctx_release(inst->m2m_ctx);
+ v4l2_m2m_release(inst->m2m_dev);
+ mutex_lock(&inst->lock);
+ iris_vdec_inst_deinit(inst);
+ iris_session_close(inst);
+ iris_inst_change_state(inst, IRIS_INST_DEINIT);
+ iris_v4l2_fh_deinit(inst);
+ iris_destroy_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ iris_destroy_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ iris_remove_session(inst);
+ mutex_unlock(&inst->lock);
+ mutex_destroy(&inst->ctx_q_lock);
+ mutex_destroy(&inst->lock);
+ kfree(inst);
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+static int iris_enum_fmt(struct file *filp, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+
+ if (f->index)
+ return -EINVAL;
+
+ return iris_vdec_enum_fmt(inst, f);
+}
+
+static int iris_try_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format *f)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+ int ret;
+
+ mutex_lock(&inst->lock);
+ ret = iris_vdec_try_fmt(inst, f);
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+static int iris_s_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format *f)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+ int ret;
+
+ mutex_lock(&inst->lock);
+ ret = iris_vdec_s_fmt(inst, f);
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+static int iris_g_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format *f)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+ int ret = 0;
+
+ mutex_lock(&inst->lock);
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ *f = *inst->fmt_src;
+ else if (V4L2_TYPE_IS_CAPTURE(f->type))
+ *f = *inst->fmt_dst;
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+static int iris_enum_framesizes(struct file *filp, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct platform_inst_caps *caps;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ if (fsize->pixel_format != V4L2_PIX_FMT_H264 &&
+ fsize->pixel_format != V4L2_PIX_FMT_NV12)
+ return -EINVAL;
+
+ caps = inst->core->iris_platform_data->inst_caps;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = caps->min_frame_width;
+ fsize->stepwise.max_width = caps->max_frame_width;
+ fsize->stepwise.step_width = STEP_WIDTH;
+ fsize->stepwise.min_height = caps->min_frame_height;
+ fsize->stepwise.max_height = caps->max_frame_height;
+ fsize->stepwise.step_height = STEP_HEIGHT;
+
+ return 0;
+}
+
+static int iris_querycap(struct file *filp, void *fh, struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, IRIS_DRV_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "Iris Decoder", sizeof(cap->card));
+
+ return 0;
+}
+
+static int iris_g_selection(struct file *filp, void *fh, struct v4l2_selection *s)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r.left = inst->crop.left;
+ s->r.top = inst->crop.top;
+ s->r.width = inst->crop.width;
+ s->r.height = inst->crop.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iris_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub)
+{
+ struct iris_inst *inst = container_of(fh, struct iris_inst, fh);
+
+ return iris_vdec_subscribe_event(inst, sub);
+}
+
+static int iris_dec_cmd(struct file *filp, void *fh,
+ struct v4l2_decoder_cmd *dec)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+ int ret = 0;
+
+ mutex_lock(&inst->lock);
+
+ ret = v4l2_m2m_ioctl_decoder_cmd(filp, fh, dec);
+ if (ret)
+ goto unlock;
+
+ if (inst->state == IRIS_INST_DEINIT)
+ goto unlock;
+
+ if (!iris_allow_cmd(inst, dec->cmd)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (dec->cmd == V4L2_DEC_CMD_START)
+ ret = iris_vdec_start_cmd(inst);
+ else if (dec->cmd == V4L2_DEC_CMD_STOP)
+ ret = iris_vdec_stop_cmd(inst);
+ else
+ ret = -EINVAL;
+
+unlock:
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+static struct v4l2_file_operations iris_v4l2_file_ops = {
+ .owner = THIS_MODULE,
+ .open = iris_open,
+ .release = iris_close,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct vb2_ops iris_vb2_ops = {
+ .buf_init = iris_vb2_buf_init,
+ .queue_setup = iris_vb2_queue_setup,
+ .start_streaming = iris_vb2_start_streaming,
+ .stop_streaming = iris_vb2_stop_streaming,
+ .buf_prepare = iris_vb2_buf_prepare,
+ .buf_out_validate = iris_vb2_buf_out_validate,
+ .buf_queue = iris_vb2_buf_queue,
+};
+
+static const struct v4l2_ioctl_ops iris_v4l2_ioctl_ops = {
+ .vidioc_enum_fmt_vid_cap = iris_enum_fmt,
+ .vidioc_enum_fmt_vid_out = iris_enum_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = iris_try_fmt_vid_mplane,
+ .vidioc_try_fmt_vid_out_mplane = iris_try_fmt_vid_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = iris_s_fmt_vid_mplane,
+ .vidioc_s_fmt_vid_out_mplane = iris_s_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = iris_g_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_out_mplane = iris_g_fmt_vid_mplane,
+ .vidioc_enum_framesizes = iris_enum_framesizes,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_remove_bufs = v4l2_m2m_ioctl_remove_bufs,
+ .vidioc_querycap = iris_querycap,
+ .vidioc_g_selection = iris_g_selection,
+ .vidioc_subscribe_event = iris_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd,
+ .vidioc_decoder_cmd = iris_dec_cmd,
+};
+
+void iris_init_ops(struct iris_core *core)
+{
+ core->iris_v4l2_file_ops = &iris_v4l2_file_ops;
+ core->iris_vb2_ops = &iris_vb2_ops;
+ core->iris_v4l2_ioctl_ops = &iris_v4l2_ioctl_ops;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_vidc.h b/drivers/media/platform/qcom/iris/iris_vidc.h
new file mode 100644
index 000000000000..a26054ff55b5
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vidc.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_VIDC_H__
+#define __IRIS_VIDC_H__
+
+struct iris_core;
+
+void iris_init_ops(struct iris_core *core);
+int iris_open(struct file *filp);
+int iris_close(struct file *filp);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vpu2.c b/drivers/media/platform/qcom/iris/iris_vpu2.c
new file mode 100644
index 000000000000..8f502aed43ce
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu2.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "iris_instance.h"
+#include "iris_vpu_common.h"
+
+static u64 iris_vpu2_calc_freq(struct iris_inst *inst, size_t data_size)
+{
+ struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
+ struct v4l2_format *inp_f = inst->fmt_src;
+ u32 mbs_per_second, mbpf, height, width;
+ unsigned long vpp_freq, vsp_freq;
+ u32 fps = DEFAULT_FPS;
+
+ width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
+ height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
+
+ mbpf = NUM_MBS_PER_FRAME(height, width);
+ mbs_per_second = mbpf * fps;
+
+ vpp_freq = mbs_per_second * caps->mb_cycles_vpp;
+
+ /* 21 / 20 is overhead factor */
+ vpp_freq += vpp_freq / 20;
+ vsp_freq = mbs_per_second * caps->mb_cycles_vsp;
+
+ /* 10 / 7 is overhead factor */
+ vsp_freq += ((fps * data_size * 8) * 10) / 7;
+
+ return max(vpp_freq, vsp_freq);
+}
+
+const struct vpu_ops iris_vpu2_ops = {
+ .power_off_hw = iris_vpu_power_off_hw,
+ .calc_freq = iris_vpu2_calc_freq,
+};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu3.c b/drivers/media/platform/qcom/iris/iris_vpu3.c
new file mode 100644
index 000000000000..b484638e6105
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu3.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/iopoll.h>
+
+#include "iris_instance.h"
+#include "iris_vpu_common.h"
+#include "iris_vpu_register_defines.h"
+
+#define AON_MVP_NOC_RESET 0x0001F000
+
+#define WRAPPER_CORE_CLOCK_CONFIG (WRAPPER_BASE_OFFS + 0x88)
+#define CORE_CLK_RUN 0x0
+
+#define CPU_CS_AHB_BRIDGE_SYNC_RESET (CPU_CS_BASE_OFFS + 0x160)
+#define CORE_BRIDGE_SW_RESET BIT(0)
+#define CORE_BRIDGE_HW_RESET_DISABLE BIT(1)
+
+#define AON_WRAPPER_MVP_NOC_RESET_REQ (AON_MVP_NOC_RESET + 0x000)
+#define VIDEO_NOC_RESET_REQ (BIT(0) | BIT(1))
+
+#define AON_WRAPPER_MVP_NOC_RESET_ACK (AON_MVP_NOC_RESET + 0x004)
+
+#define VCODEC_SS_IDLE_STATUSN (VCODEC_BASE_OFFS + 0x70)
+
+static bool iris_vpu3_hw_power_collapsed(struct iris_core *core)
+{
+ u32 value, pwr_status;
+
+ value = readl(core->reg_base + WRAPPER_CORE_POWER_STATUS);
+ pwr_status = value & BIT(1);
+
+ return pwr_status ? false : true;
+}
+
+static void iris_vpu3_power_off_hardware(struct iris_core *core)
+{
+ u32 reg_val = 0, value, i;
+ int ret;
+
+ if (iris_vpu3_hw_power_collapsed(core))
+ goto disable_power;
+
+ dev_err(core->dev, "video hw is power on\n");
+
+ value = readl(core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
+ if (value)
+ writel(CORE_CLK_RUN, core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
+
+ for (i = 0; i < core->iris_platform_data->num_vpp_pipe; i++) {
+ ret = readl_poll_timeout(core->reg_base + VCODEC_SS_IDLE_STATUSN + 4 * i,
+ reg_val, reg_val & 0x400000, 2000, 20000);
+ if (ret)
+ goto disable_power;
+ }
+
+ writel(VIDEO_NOC_RESET_REQ, core->reg_base + AON_WRAPPER_MVP_NOC_RESET_REQ);
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_RESET_ACK,
+ reg_val, reg_val & 0x3, 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(0x0, core->reg_base + AON_WRAPPER_MVP_NOC_RESET_REQ);
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_RESET_ACK,
+ reg_val, !(reg_val & 0x3), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(CORE_BRIDGE_SW_RESET | CORE_BRIDGE_HW_RESET_DISABLE,
+ core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+ writel(CORE_BRIDGE_HW_RESET_DISABLE, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+ writel(0x0, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+
+disable_power:
+ iris_vpu_power_off_hw(core);
+}
+
+static u64 iris_vpu3_calculate_frequency(struct iris_inst *inst, size_t data_size)
+{
+ struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
+ struct v4l2_format *inp_f = inst->fmt_src;
+ u32 height, width, mbs_per_second, mbpf;
+ u64 fw_cycles, fw_vpp_cycles;
+ u64 vsp_cycles, vpp_cycles;
+ u32 fps = DEFAULT_FPS;
+
+ width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
+ height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
+
+ mbpf = NUM_MBS_PER_FRAME(height, width);
+ mbs_per_second = mbpf * fps;
+
+ fw_cycles = fps * caps->mb_cycles_fw;
+ fw_vpp_cycles = fps * caps->mb_cycles_fw_vpp;
+
+ vpp_cycles = mult_frac(mbs_per_second, caps->mb_cycles_vpp, (u32)inst->fw_caps[PIPE].value);
+ /* 21 / 20 is minimum overhead factor */
+ vpp_cycles += max(div_u64(vpp_cycles, 20), fw_vpp_cycles);
+
+ /* 1.059 is multi-pipe overhead */
+ if (inst->fw_caps[PIPE].value > 1)
+ vpp_cycles += div_u64(vpp_cycles * 59, 1000);
+
+ vsp_cycles = fps * data_size * 8;
+ vsp_cycles = div_u64(vsp_cycles, 2);
+ /* VSP FW overhead 1.05 */
+ vsp_cycles = div_u64(vsp_cycles * 21, 20);
+
+ if (inst->fw_caps[STAGE].value == STAGE_1)
+ vsp_cycles = vsp_cycles * 3;
+
+ return max3(vpp_cycles, vsp_cycles, fw_cycles);
+}
+
+const struct vpu_ops iris_vpu3_ops = {
+ .power_off_hw = iris_vpu3_power_off_hardware,
+ .calc_freq = iris_vpu3_calculate_frequency,
+};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_buffer.c b/drivers/media/platform/qcom/iris/iris_vpu_buffer.c
new file mode 100644
index 000000000000..dce25e410d80
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu_buffer.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "iris_instance.h"
+#include "iris_vpu_buffer.h"
+
+static u32 size_h264d_hw_bin_buffer(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
+{
+ u32 size_yuv, size_bin_hdr, size_bin_res;
+
+ size_yuv = ((frame_width * frame_height) <= BIN_BUFFER_THRESHOLD) ?
+ ((BIN_BUFFER_THRESHOLD * 3) >> 1) :
+ ((frame_width * frame_height * 3) >> 1);
+ size_bin_hdr = size_yuv * H264_CABAC_HDR_RATIO_HD_TOT;
+ size_bin_res = size_yuv * H264_CABAC_RES_RATIO_HD_TOT;
+ size_bin_hdr = ALIGN(size_bin_hdr / num_vpp_pipes,
+ DMA_ALIGNMENT) * num_vpp_pipes;
+ size_bin_res = ALIGN(size_bin_res / num_vpp_pipes,
+ DMA_ALIGNMENT) * num_vpp_pipes;
+
+ return size_bin_hdr + size_bin_res;
+}
+
+static u32 hfi_buffer_bin_h264d(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
+{
+ u32 n_aligned_h = ALIGN(frame_height, 16);
+ u32 n_aligned_w = ALIGN(frame_width, 16);
+
+ return size_h264d_hw_bin_buffer(n_aligned_w, n_aligned_h, num_vpp_pipes);
+}
+
+static u32 hfi_buffer_comv_h264d(u32 frame_width, u32 frame_height, u32 _comv_bufcount)
+{
+ u32 frame_height_in_mbs = DIV_ROUND_UP(frame_height, 16);
+ u32 frame_width_in_mbs = DIV_ROUND_UP(frame_width, 16);
+ u32 col_zero_aligned_width = (frame_width_in_mbs << 2);
+ u32 col_mv_aligned_width = (frame_width_in_mbs << 7);
+ u32 col_zero_size, size_colloc;
+
+ col_mv_aligned_width = ALIGN(col_mv_aligned_width, 16);
+ col_zero_aligned_width = ALIGN(col_zero_aligned_width, 16);
+ col_zero_size = col_zero_aligned_width *
+ ((frame_height_in_mbs + 1) >> 1);
+ col_zero_size = ALIGN(col_zero_size, 64);
+ col_zero_size <<= 1;
+ col_zero_size = ALIGN(col_zero_size, 512);
+ size_colloc = col_mv_aligned_width * ((frame_height_in_mbs + 1) >> 1);
+ size_colloc = ALIGN(size_colloc, 64);
+ size_colloc <<= 1;
+ size_colloc = ALIGN(size_colloc, 512);
+ size_colloc += (col_zero_size + SIZE_H264D_BUFTAB_T * 2);
+
+ return (size_colloc * (_comv_bufcount)) + 512;
+}
+
+static u32 size_h264d_bse_cmd_buf(u32 frame_height)
+{
+ u32 height = ALIGN(frame_height, 32);
+
+ return min_t(u32, (DIV_ROUND_UP(height, 16) * 48), H264D_MAX_SLICE) *
+ SIZE_H264D_BSE_CMD_PER_BUF;
+}
+
+static u32 size_h264d_vpp_cmd_buf(u32 frame_height)
+{
+ u32 size, height = ALIGN(frame_height, 32);
+
+ size = min_t(u32, (DIV_ROUND_UP(height, 16) * 48), H264D_MAX_SLICE) *
+ SIZE_H264D_VPP_CMD_PER_BUF;
+
+ return size > VPP_CMD_MAX_SIZE ? VPP_CMD_MAX_SIZE : size;
+}
+
+static u32 hfi_buffer_persist_h264d(void)
+{
+ return ALIGN(SIZE_SLIST_BUF_H264 * NUM_SLIST_BUF_H264 +
+ H264_DISPLAY_BUF_SIZE * H264_NUM_FRM_INFO +
+ NUM_HW_PIC_BUF * SIZE_SEI_USERDATA,
+ DMA_ALIGNMENT);
+}
+
+static u32 hfi_buffer_non_comv_h264d(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
+{
+ u32 size_bse, size_vpp, size;
+
+ size_bse = size_h264d_bse_cmd_buf(frame_height);
+ size_vpp = size_h264d_vpp_cmd_buf(frame_height);
+ size = ALIGN(size_bse, DMA_ALIGNMENT) +
+ ALIGN(size_vpp, DMA_ALIGNMENT) +
+ ALIGN(SIZE_HW_PIC(SIZE_H264D_HW_PIC_T), DMA_ALIGNMENT);
+
+ return ALIGN(size, DMA_ALIGNMENT);
+}
+
+static u32 size_vpss_lb(u32 frame_width, u32 frame_height)
+{
+ u32 opb_lb_wr_llb_y_buffer_size, opb_lb_wr_llb_uv_buffer_size;
+ u32 opb_wr_top_line_chroma_buffer_size;
+ u32 opb_wr_top_line_luma_buffer_size;
+ u32 macrotiling_size = 32;
+
+ opb_wr_top_line_luma_buffer_size =
+ ALIGN(frame_width, macrotiling_size) / macrotiling_size * 256;
+ opb_wr_top_line_luma_buffer_size =
+ ALIGN(opb_wr_top_line_luma_buffer_size, DMA_ALIGNMENT) +
+ (MAX_TILE_COLUMNS - 1) * 256;
+ opb_wr_top_line_luma_buffer_size =
+ max_t(u32, opb_wr_top_line_luma_buffer_size, (32 * ALIGN(frame_height, 8)));
+ opb_wr_top_line_chroma_buffer_size = opb_wr_top_line_luma_buffer_size;
+ opb_lb_wr_llb_uv_buffer_size =
+ ALIGN((ALIGN(frame_height, 8) / (4 / 2)) * 64, 32);
+ opb_lb_wr_llb_y_buffer_size =
+ ALIGN((ALIGN(frame_height, 8) / (4 / 2)) * 64, 32);
+ return opb_wr_top_line_luma_buffer_size +
+ opb_wr_top_line_chroma_buffer_size +
+ opb_lb_wr_llb_uv_buffer_size +
+ opb_lb_wr_llb_y_buffer_size;
+}
+
+static u32 hfi_buffer_line_h264d(u32 frame_width, u32 frame_height,
+ bool is_opb, u32 num_vpp_pipes)
+{
+ u32 vpss_lb_size = 0;
+ u32 size;
+
+ size = ALIGN(size_h264d_lb_fe_top_data(frame_width), DMA_ALIGNMENT) +
+ ALIGN(size_h264d_lb_fe_top_ctrl(frame_width), DMA_ALIGNMENT) +
+ ALIGN(size_h264d_lb_fe_left_ctrl(frame_height), DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(size_h264d_lb_se_top_ctrl(frame_width), DMA_ALIGNMENT) +
+ ALIGN(size_h264d_lb_se_left_ctrl(frame_height), DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(size_h264d_lb_pe_top_data(frame_width), DMA_ALIGNMENT) +
+ ALIGN(size_h264d_lb_vsp_top(frame_width), DMA_ALIGNMENT) +
+ ALIGN(size_h264d_lb_recon_dma_metadata_wr(frame_height), DMA_ALIGNMENT) * 2 +
+ ALIGN(size_h264d_qp(frame_width, frame_height), DMA_ALIGNMENT);
+ size = ALIGN(size, DMA_ALIGNMENT);
+ if (is_opb)
+ vpss_lb_size = size_vpss_lb(frame_width, frame_height);
+
+ return ALIGN((size + vpss_lb_size), DMA_ALIGNMENT);
+}
+
+static u32 iris_vpu_dec_bin_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_src;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+
+ return hfi_buffer_bin_h264d(width, height, num_vpp_pipes);
+}
+
+static u32 iris_vpu_dec_comv_size(struct iris_inst *inst)
+{
+ u32 num_comv = VIDEO_MAX_FRAME;
+ struct v4l2_format *f = inst->fmt_src;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+
+ return hfi_buffer_comv_h264d(width, height, num_comv);
+}
+
+static u32 iris_vpu_dec_persist_size(struct iris_inst *inst)
+{
+ return hfi_buffer_persist_h264d();
+}
+
+static u32 iris_vpu_dec_dpb_size(struct iris_inst *inst)
+{
+ if (iris_split_mode_enabled(inst))
+ return iris_get_buffer_size(inst, BUF_DPB);
+ else
+ return 0;
+}
+
+static u32 iris_vpu_dec_non_comv_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_src;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+
+ return hfi_buffer_non_comv_h264d(width, height, num_vpp_pipes);
+}
+
+static u32 iris_vpu_dec_line_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_src;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+ bool is_opb = false;
+
+ if (iris_split_mode_enabled(inst))
+ is_opb = true;
+
+ return hfi_buffer_line_h264d(width, height, is_opb, num_vpp_pipes);
+}
+
+static u32 iris_vpu_dec_scratch1_size(struct iris_inst *inst)
+{
+ return iris_vpu_dec_comv_size(inst) +
+ iris_vpu_dec_non_comv_size(inst) +
+ iris_vpu_dec_line_size(inst);
+}
+
+struct iris_vpu_buf_type_handle {
+ enum iris_buffer_type type;
+ u32 (*handle)(struct iris_inst *inst);
+};
+
+int iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type)
+{
+ const struct iris_vpu_buf_type_handle *buf_type_handle_arr;
+ u32 size = 0, buf_type_handle_size, i;
+
+ static const struct iris_vpu_buf_type_handle dec_internal_buf_type_handle[] = {
+ {BUF_BIN, iris_vpu_dec_bin_size },
+ {BUF_COMV, iris_vpu_dec_comv_size },
+ {BUF_NON_COMV, iris_vpu_dec_non_comv_size },
+ {BUF_LINE, iris_vpu_dec_line_size },
+ {BUF_PERSIST, iris_vpu_dec_persist_size },
+ {BUF_DPB, iris_vpu_dec_dpb_size },
+ {BUF_SCRATCH_1, iris_vpu_dec_scratch1_size },
+ };
+
+ buf_type_handle_size = ARRAY_SIZE(dec_internal_buf_type_handle);
+ buf_type_handle_arr = dec_internal_buf_type_handle;
+
+ for (i = 0; i < buf_type_handle_size; i++) {
+ if (buf_type_handle_arr[i].type == buffer_type) {
+ size = buf_type_handle_arr[i].handle(inst);
+ break;
+ }
+ }
+
+ return size;
+}
+
+static inline int iris_vpu_dpb_count(struct iris_inst *inst)
+{
+ if (iris_split_mode_enabled(inst)) {
+ return inst->fw_min_count ?
+ inst->fw_min_count : inst->buffers[BUF_OUTPUT].min_count;
+ }
+
+ return 0;
+}
+
+int iris_vpu_buf_count(struct iris_inst *inst, enum iris_buffer_type buffer_type)
+{
+ switch (buffer_type) {
+ case BUF_INPUT:
+ return MIN_BUFFERS;
+ case BUF_OUTPUT:
+ return inst->fw_min_count;
+ case BUF_BIN:
+ case BUF_COMV:
+ case BUF_NON_COMV:
+ case BUF_LINE:
+ case BUF_PERSIST:
+ case BUF_SCRATCH_1:
+ return 1; /* internal buffer count needed by firmware is 1 */
+ case BUF_DPB:
+ return iris_vpu_dpb_count(inst);
+ default:
+ return 0;
+ }
+}
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_buffer.h b/drivers/media/platform/qcom/iris/iris_vpu_buffer.h
new file mode 100644
index 000000000000..62af6ea6ba1f
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu_buffer.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_VPU_BUFFER_H__
+#define __IRIS_VPU_BUFFER_H__
+
+struct iris_inst;
+
+#define MIN_BUFFERS 4
+
+#define DMA_ALIGNMENT 256
+
+#define NUM_HW_PIC_BUF 32
+#define SIZE_HW_PIC(size_per_buf) (NUM_HW_PIC_BUF * (size_per_buf))
+
+#define MAX_TILE_COLUMNS 32
+#define BIN_BUFFER_THRESHOLD (1280 * 736)
+#define VPP_CMD_MAX_SIZE (BIT(20))
+#define H264D_MAX_SLICE 1800
+
+#define SIZE_H264D_BUFTAB_T 256
+#define SIZE_H264D_BSE_CMD_PER_BUF (32 * 4)
+#define SIZE_H264D_VPP_CMD_PER_BUF 512
+
+#define NUM_SLIST_BUF_H264 (256 + 32)
+#define SIZE_SLIST_BUF_H264 512
+#define H264_DISPLAY_BUF_SIZE 3328
+#define H264_NUM_FRM_INFO 66
+
+#define SIZE_SEI_USERDATA 4096
+
+#define H264_CABAC_HDR_RATIO_HD_TOT 1
+#define H264_CABAC_RES_RATIO_HD_TOT 3
+#define SIZE_H264D_HW_PIC_T (BIT(11))
+
+#define MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE 64
+#define MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE 16
+#define MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE 384
+#define MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE 640
+
+static inline u32 size_h264d_lb_fe_top_data(u32 frame_width)
+{
+ return MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE * ALIGN(frame_width, 16) * 3;
+}
+
+static inline u32 size_h264d_lb_fe_top_ctrl(u32 frame_width)
+{
+ return MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * DIV_ROUND_UP(frame_width, 16);
+}
+
+static inline u32 size_h264d_lb_fe_left_ctrl(u32 frame_height)
+{
+ return MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * DIV_ROUND_UP(frame_height, 16);
+}
+
+static inline u32 size_h264d_lb_se_top_ctrl(u32 frame_width)
+{
+ return MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * DIV_ROUND_UP(frame_width, 16);
+}
+
+static inline u32 size_h264d_lb_se_left_ctrl(u32 frame_height)
+{
+ return MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * DIV_ROUND_UP(frame_height, 16);
+}
+
+static inline u32 size_h264d_lb_pe_top_data(u32 frame_width)
+{
+ return MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE * DIV_ROUND_UP(frame_width, 16);
+}
+
+static inline u32 size_h264d_lb_vsp_top(u32 frame_width)
+{
+ return (DIV_ROUND_UP(frame_width, 16) << 7);
+}
+
+static inline u32 size_h264d_lb_recon_dma_metadata_wr(u32 frame_height)
+{
+ return ALIGN(frame_height, 16) * 32;
+}
+
+static inline u32 size_h264d_qp(u32 frame_width, u32 frame_height)
+{
+ return DIV_ROUND_UP(frame_width, 64) * DIV_ROUND_UP(frame_height, 64) * 128;
+}
+
+int iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type);
+int iris_vpu_buf_count(struct iris_inst *inst, enum iris_buffer_type buffer_type);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_common.c b/drivers/media/platform/qcom/iris/iris_vpu_common.c
new file mode 100644
index 000000000000..fe9896d66848
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu_common.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/pm_opp.h>
+#include <linux/reset.h>
+
+#include "iris_core.h"
+#include "iris_vpu_common.h"
+#include "iris_vpu_register_defines.h"
+
+#define WRAPPER_TZ_BASE_OFFS 0x000C0000
+#define AON_BASE_OFFS 0x000E0000
+
+#define CPU_IC_BASE_OFFS (CPU_BASE_OFFS)
+
+#define CPU_CS_A2HSOFTINTCLR (CPU_CS_BASE_OFFS + 0x1C)
+#define CLEAR_XTENSA2HOST_INTR BIT(0)
+
+#define CTRL_INIT (CPU_CS_BASE_OFFS + 0x48)
+#define CTRL_STATUS (CPU_CS_BASE_OFFS + 0x4C)
+
+#define CTRL_INIT_IDLE_MSG_BMSK 0x40000000
+#define CTRL_ERROR_STATUS__M 0xfe
+#define CTRL_STATUS_PC_READY 0x100
+
+#define QTBL_INFO (CPU_CS_BASE_OFFS + 0x50)
+#define QTBL_ENABLE BIT(0)
+
+#define QTBL_ADDR (CPU_CS_BASE_OFFS + 0x54)
+#define CPU_CS_SCIACMDARG3 (CPU_CS_BASE_OFFS + 0x58)
+#define SFR_ADDR (CPU_CS_BASE_OFFS + 0x5C)
+#define UC_REGION_ADDR (CPU_CS_BASE_OFFS + 0x64)
+#define UC_REGION_SIZE (CPU_CS_BASE_OFFS + 0x68)
+
+#define CPU_CS_H2XSOFTINTEN (CPU_CS_BASE_OFFS + 0x148)
+#define HOST2XTENSA_INTR_ENABLE BIT(0)
+
+#define CPU_CS_X2RPMH (CPU_CS_BASE_OFFS + 0x168)
+#define MSK_SIGNAL_FROM_TENSILICA BIT(0)
+#define MSK_CORE_POWER_ON BIT(1)
+
+#define CPU_IC_SOFTINT (CPU_IC_BASE_OFFS + 0x150)
+#define CPU_IC_SOFTINT_H2A_SHFT 0x0
+
+#define WRAPPER_INTR_STATUS (WRAPPER_BASE_OFFS + 0x0C)
+#define WRAPPER_INTR_STATUS_A2HWD_BMSK BIT(3)
+#define WRAPPER_INTR_STATUS_A2H_BMSK BIT(2)
+
+#define WRAPPER_INTR_MASK (WRAPPER_BASE_OFFS + 0x10)
+#define WRAPPER_INTR_MASK_A2HWD_BMSK BIT(3)
+#define WRAPPER_INTR_MASK_A2HCPU_BMSK BIT(2)
+
+#define WRAPPER_DEBUG_BRIDGE_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x54)
+#define WRAPPER_DEBUG_BRIDGE_LPI_STATUS (WRAPPER_BASE_OFFS + 0x58)
+#define WRAPPER_IRIS_CPU_NOC_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x5C)
+#define WRAPPER_IRIS_CPU_NOC_LPI_STATUS (WRAPPER_BASE_OFFS + 0x60)
+
+#define WRAPPER_TZ_CPU_STATUS (WRAPPER_TZ_BASE_OFFS + 0x10)
+#define WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG (WRAPPER_TZ_BASE_OFFS + 0x14)
+#define CTL_AXI_CLK_HALT BIT(0)
+#define CTL_CLK_HALT BIT(1)
+
+#define WRAPPER_TZ_QNS4PDXFIFO_RESET (WRAPPER_TZ_BASE_OFFS + 0x18)
+#define RESET_HIGH BIT(0)
+
+#define AON_WRAPPER_MVP_NOC_LPI_CONTROL (AON_BASE_OFFS)
+#define REQ_POWER_DOWN_PREP BIT(0)
+
+#define AON_WRAPPER_MVP_NOC_LPI_STATUS (AON_BASE_OFFS + 0x4)
+
+static void iris_vpu_interrupt_init(struct iris_core *core)
+{
+ u32 mask_val;
+
+ mask_val = readl(core->reg_base + WRAPPER_INTR_MASK);
+ mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BMSK |
+ WRAPPER_INTR_MASK_A2HCPU_BMSK);
+ writel(mask_val, core->reg_base + WRAPPER_INTR_MASK);
+}
+
+static void iris_vpu_setup_ucregion_memory_map(struct iris_core *core)
+{
+ u32 queue_size, value;
+
+ /* Iris hardware requires 4K queue alignment */
+ queue_size = ALIGN(sizeof(struct iris_hfi_queue_table_header) +
+ (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ), SZ_4K);
+
+ value = (u32)core->iface_q_table_daddr;
+ writel(value, core->reg_base + UC_REGION_ADDR);
+
+ /* Iris hardware requires 1M queue alignment */
+ value = ALIGN(SFR_SIZE + queue_size, SZ_1M);
+ writel(value, core->reg_base + UC_REGION_SIZE);
+
+ value = (u32)core->iface_q_table_daddr;
+ writel(value, core->reg_base + QTBL_ADDR);
+
+ writel(QTBL_ENABLE, core->reg_base + QTBL_INFO);
+
+ if (core->sfr_daddr) {
+ value = (u32)core->sfr_daddr + core->iris_platform_data->core_arch;
+ writel(value, core->reg_base + SFR_ADDR);
+ }
+}
+
+int iris_vpu_boot_firmware(struct iris_core *core)
+{
+ u32 ctrl_init = BIT(0), ctrl_status = 0, count = 0, max_tries = 1000;
+
+ iris_vpu_setup_ucregion_memory_map(core);
+
+ writel(ctrl_init, core->reg_base + CTRL_INIT);
+ writel(0x1, core->reg_base + CPU_CS_SCIACMDARG3);
+
+ while (!ctrl_status && count < max_tries) {
+ ctrl_status = readl(core->reg_base + CTRL_STATUS);
+ if ((ctrl_status & CTRL_ERROR_STATUS__M) == 0x4) {
+ dev_err(core->dev, "invalid setting for uc_region\n");
+ break;
+ }
+
+ usleep_range(50, 100);
+ count++;
+ }
+
+ if (count >= max_tries) {
+ dev_err(core->dev, "error booting up iris firmware\n");
+ return -ETIME;
+ }
+
+ writel(HOST2XTENSA_INTR_ENABLE, core->reg_base + CPU_CS_H2XSOFTINTEN);
+ writel(0x0, core->reg_base + CPU_CS_X2RPMH);
+
+ return 0;
+}
+
+void iris_vpu_raise_interrupt(struct iris_core *core)
+{
+ writel(1 << CPU_IC_SOFTINT_H2A_SHFT, core->reg_base + CPU_IC_SOFTINT);
+}
+
+void iris_vpu_clear_interrupt(struct iris_core *core)
+{
+ u32 intr_status, mask;
+
+ intr_status = readl(core->reg_base + WRAPPER_INTR_STATUS);
+ mask = (WRAPPER_INTR_STATUS_A2H_BMSK |
+ WRAPPER_INTR_STATUS_A2HWD_BMSK |
+ CTRL_INIT_IDLE_MSG_BMSK);
+
+ if (intr_status & mask)
+ core->intr_status |= intr_status;
+
+ writel(CLEAR_XTENSA2HOST_INTR, core->reg_base + CPU_CS_A2HSOFTINTCLR);
+}
+
+int iris_vpu_watchdog(struct iris_core *core, u32 intr_status)
+{
+ if (intr_status & WRAPPER_INTR_STATUS_A2HWD_BMSK) {
+ dev_err(core->dev, "received watchdog interrupt\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+int iris_vpu_prepare_pc(struct iris_core *core)
+{
+ u32 wfi_status, idle_status, pc_ready;
+ u32 ctrl_status, val = 0;
+ int ret;
+
+ ctrl_status = readl(core->reg_base + CTRL_STATUS);
+ pc_ready = ctrl_status & CTRL_STATUS_PC_READY;
+ idle_status = ctrl_status & BIT(30);
+ if (pc_ready)
+ return 0;
+
+ wfi_status = readl(core->reg_base + WRAPPER_TZ_CPU_STATUS);
+ wfi_status &= BIT(0);
+ if (!wfi_status || !idle_status)
+ goto skip_power_off;
+
+ ret = core->hfi_ops->sys_pc_prep(core);
+ if (ret)
+ goto skip_power_off;
+
+ ret = readl_poll_timeout(core->reg_base + CTRL_STATUS, val,
+ val & CTRL_STATUS_PC_READY, 250, 2500);
+ if (ret)
+ goto skip_power_off;
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_TZ_CPU_STATUS,
+ val, val & BIT(0), 250, 2500);
+ if (ret)
+ goto skip_power_off;
+
+ return 0;
+
+skip_power_off:
+ ctrl_status = readl(core->reg_base + CTRL_STATUS);
+ wfi_status = readl(core->reg_base + WRAPPER_TZ_CPU_STATUS);
+ wfi_status &= BIT(0);
+ dev_err(core->dev, "skip power collapse, wfi=%#x, idle=%#x, pcr=%#x, ctrl=%#x)\n",
+ wfi_status, idle_status, pc_ready, ctrl_status);
+
+ return -EAGAIN;
+}
+
+static int iris_vpu_power_off_controller(struct iris_core *core)
+{
+ u32 val = 0;
+ int ret;
+
+ writel(MSK_SIGNAL_FROM_TENSILICA | MSK_CORE_POWER_ON, core->reg_base + CPU_CS_X2RPMH);
+
+ writel(REQ_POWER_DOWN_PREP, core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
+ val, val & BIT(0), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(REQ_POWER_DOWN_PREP, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_STATUS,
+ val, val & BIT(0), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(0x0, core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS,
+ val, val == 0, 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(CTL_AXI_CLK_HALT | CTL_CLK_HALT,
+ core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
+ writel(RESET_HIGH, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
+ writel(0x0, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
+ writel(0x0, core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
+
+disable_power:
+ iris_disable_unprepare_clock(core, IRIS_CTRL_CLK);
+ iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+
+ return 0;
+}
+
+void iris_vpu_power_off_hw(struct iris_core *core)
+{
+ dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], false);
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
+ iris_disable_unprepare_clock(core, IRIS_HW_CLK);
+}
+
+void iris_vpu_power_off(struct iris_core *core)
+{
+ dev_pm_opp_set_rate(core->dev, 0);
+ core->iris_platform_data->vpu_ops->power_off_hw(core);
+ iris_vpu_power_off_controller(core);
+ iris_unset_icc_bw(core);
+
+ if (!iris_vpu_watchdog(core, core->intr_status))
+ disable_irq_nosync(core->irq);
+}
+
+static int iris_vpu_power_on_controller(struct iris_core *core)
+{
+ u32 rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
+ int ret;
+
+ ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+ if (ret)
+ return ret;
+
+ ret = reset_control_bulk_reset(rst_tbl_size, core->resets);
+ if (ret)
+ goto err_disable_power;
+
+ ret = iris_prepare_enable_clock(core, IRIS_AXI_CLK);
+ if (ret)
+ goto err_disable_power;
+
+ ret = iris_prepare_enable_clock(core, IRIS_CTRL_CLK);
+ if (ret)
+ goto err_disable_clock;
+
+ return 0;
+
+err_disable_clock:
+ iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
+err_disable_power:
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+
+ return ret;
+}
+
+static int iris_vpu_power_on_hw(struct iris_core *core)
+{
+ int ret;
+
+ ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
+ if (ret)
+ return ret;
+
+ ret = iris_prepare_enable_clock(core, IRIS_HW_CLK);
+ if (ret)
+ goto err_disable_power;
+
+ ret = dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], true);
+ if (ret)
+ goto err_disable_clock;
+
+ return 0;
+
+err_disable_clock:
+ iris_disable_unprepare_clock(core, IRIS_HW_CLK);
+err_disable_power:
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
+
+ return ret;
+}
+
+int iris_vpu_power_on(struct iris_core *core)
+{
+ u32 freq;
+ int ret;
+
+ ret = iris_set_icc_bw(core, INT_MAX);
+ if (ret)
+ goto err;
+
+ ret = iris_vpu_power_on_controller(core);
+ if (ret)
+ goto err_unvote_icc;
+
+ ret = iris_vpu_power_on_hw(core);
+ if (ret)
+ goto err_power_off_ctrl;
+
+ freq = core->power.clk_freq ? core->power.clk_freq :
+ (u32)ULONG_MAX;
+
+ dev_pm_opp_set_rate(core->dev, freq);
+
+ core->iris_platform_data->set_preset_registers(core);
+
+ iris_vpu_interrupt_init(core);
+ core->intr_status = 0;
+ enable_irq(core->irq);
+
+ return 0;
+
+err_power_off_ctrl:
+ iris_vpu_power_off_controller(core);
+err_unvote_icc:
+ iris_unset_icc_bw(core);
+err:
+ dev_err(core->dev, "power on failed\n");
+
+ return ret;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_common.h b/drivers/media/platform/qcom/iris/iris_vpu_common.h
new file mode 100644
index 000000000000..63fa1fa5a498
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu_common.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_VPU_COMMON_H__
+#define __IRIS_VPU_COMMON_H__
+
+struct iris_core;
+
+extern const struct vpu_ops iris_vpu2_ops;
+extern const struct vpu_ops iris_vpu3_ops;
+
+struct vpu_ops {
+ void (*power_off_hw)(struct iris_core *core);
+ u64 (*calc_freq)(struct iris_inst *inst, size_t data_size);
+};
+
+int iris_vpu_boot_firmware(struct iris_core *core);
+void iris_vpu_raise_interrupt(struct iris_core *core);
+void iris_vpu_clear_interrupt(struct iris_core *core);
+int iris_vpu_watchdog(struct iris_core *core, u32 intr_status);
+int iris_vpu_prepare_pc(struct iris_core *core);
+int iris_vpu_power_on(struct iris_core *core);
+void iris_vpu_power_off_hw(struct iris_core *core);
+void iris_vpu_power_off(struct iris_core *core);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_register_defines.h b/drivers/media/platform/qcom/iris/iris_vpu_register_defines.h
new file mode 100644
index 000000000000..fe8a39e5e5a3
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu_register_defines.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_VPU_REGISTER_DEFINES_H__
+#define __IRIS_VPU_REGISTER_DEFINES_H__
+
+#define VCODEC_BASE_OFFS 0x00000000
+#define CPU_BASE_OFFS 0x000A0000
+#define WRAPPER_BASE_OFFS 0x000B0000
+
+#define CPU_CS_BASE_OFFS (CPU_BASE_OFFS)
+
+#define WRAPPER_CORE_POWER_STATUS (WRAPPER_BASE_OFFS + 0x80)
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/Kconfig b/drivers/media/platform/qcom/venus/Kconfig
index bc2e410b29cb..ffb731ecd48c 100644
--- a/drivers/media/platform/qcom/venus/Kconfig
+++ b/drivers/media/platform/qcom/venus/Kconfig
@@ -2,7 +2,7 @@ config VIDEO_QCOM_VENUS
tristate "Qualcomm Venus V4L2 encoder/decoder driver"
depends on V4L_MEM2MEM_DRIVERS
depends on VIDEO_DEV && QCOM_SMEM
- depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ depends on (ARCH_QCOM && ARM64 && IOMMU_API) || COMPILE_TEST
select OF_DYNAMIC if ARCH_QCOM
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
index 3df241dc3a11..1b3db2caa99f 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -19,6 +19,8 @@ static void init_codecs(struct venus_core *core)
struct hfi_plat_caps *caps = core->caps, *cap;
unsigned long bit;
+ core->codecs_count = 0;
+
if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM)
return;
@@ -62,7 +64,7 @@ fill_buf_mode(struct hfi_plat_caps *cap, const void *data, unsigned int num)
cap->cap_bufs_mode_dynamic = true;
}
-static void
+static int
parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_buffer_alloc_mode_supported *mode = data;
@@ -70,7 +72,7 @@ parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
u32 *type;
if (num_entries > MAX_ALLOC_MODE_ENTRIES)
- return;
+ return -EINVAL;
type = mode->data;
@@ -82,6 +84,8 @@ parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
type++;
}
+
+ return sizeof(*mode);
}
static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
@@ -96,7 +100,7 @@ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
cap->num_pl += num;
}
-static void
+static int
parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_profile_level_supported *pl = data;
@@ -104,12 +108,14 @@ parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data)
struct hfi_profile_level pl_arr[HFI_MAX_PROFILE_COUNT] = {};
if (pl->profile_count > HFI_MAX_PROFILE_COUNT)
- return;
+ return -EINVAL;
memcpy(pl_arr, proflevel, pl->profile_count * sizeof(*proflevel));
for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
fill_profile_level, pl_arr, pl->profile_count);
+
+ return pl->profile_count * sizeof(*proflevel) + sizeof(u32);
}
static void
@@ -124,7 +130,7 @@ fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num)
cap->num_caps += num;
}
-static void
+static int
parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_capabilities *caps = data;
@@ -133,12 +139,14 @@ parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data)
struct hfi_capability caps_arr[MAX_CAP_ENTRIES] = {};
if (num_caps > MAX_CAP_ENTRIES)
- return;
+ return -EINVAL;
memcpy(caps_arr, cap, num_caps * sizeof(*cap));
for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
fill_caps, caps_arr, num_caps);
+
+ return sizeof(*caps);
}
static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
@@ -153,7 +161,7 @@ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
cap->num_fmts += num_fmts;
}
-static void
+static int
parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_uncompressed_format_supported *fmt = data;
@@ -162,7 +170,8 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
struct raw_formats rawfmts[MAX_FMT_ENTRIES] = {};
u32 entries = fmt->format_entries;
unsigned int i = 0;
- u32 num_planes;
+ u32 num_planes = 0;
+ u32 size;
while (entries) {
num_planes = pinfo->num_planes;
@@ -172,7 +181,7 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
i++;
if (i >= MAX_FMT_ENTRIES)
- return;
+ return -EINVAL;
if (pinfo->num_planes > MAX_PLANES)
break;
@@ -184,9 +193,13 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
fill_raw_fmts, rawfmts, i);
+ size = fmt->format_entries * (sizeof(*constr) * num_planes + 2 * sizeof(u32))
+ + 2 * sizeof(u32);
+
+ return size;
}
-static void parse_codecs(struct venus_core *core, void *data)
+static int parse_codecs(struct venus_core *core, void *data)
{
struct hfi_codec_supported *codecs = data;
@@ -198,21 +211,27 @@ static void parse_codecs(struct venus_core *core, void *data)
core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK;
core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC;
}
+
+ return sizeof(*codecs);
}
-static void parse_max_sessions(struct venus_core *core, const void *data)
+static int parse_max_sessions(struct venus_core *core, const void *data)
{
const struct hfi_max_sessions_supported *sessions = data;
core->max_sessions_supported = sessions->max_sessions;
+
+ return sizeof(*sessions);
}
-static void parse_codecs_mask(u32 *codecs, u32 *domain, void *data)
+static int parse_codecs_mask(u32 *codecs, u32 *domain, void *data)
{
struct hfi_codec_mask_supported *mask = data;
*codecs = mask->codecs;
*domain = mask->video_domains;
+
+ return sizeof(*mask);
}
static void parser_init(struct venus_inst *inst, u32 *codecs, u32 *domain)
@@ -281,8 +300,9 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
u32 size)
{
- unsigned int words_count = size >> 2;
- u32 *word = buf, *data, codecs = 0, domain = 0;
+ u32 *words = buf, *payload, codecs = 0, domain = 0;
+ u32 *frame_size = buf + size;
+ u32 rem_bytes = size;
int ret;
ret = hfi_platform_parser(core, inst);
@@ -299,38 +319,66 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
memset(core->caps, 0, sizeof(core->caps));
}
- while (words_count) {
- data = word + 1;
+ while (words < frame_size) {
+ payload = words + 1;
- switch (*word) {
+ switch (*words) {
case HFI_PROPERTY_PARAM_CODEC_SUPPORTED:
- parse_codecs(core, data);
+ if (rem_bytes <= sizeof(struct hfi_codec_supported))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_codecs(core, payload);
+ if (ret < 0)
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
init_codecs(core);
break;
case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED:
- parse_max_sessions(core, data);
+ if (rem_bytes <= sizeof(struct hfi_max_sessions_supported))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_max_sessions(core, payload);
break;
case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED:
- parse_codecs_mask(&codecs, &domain, data);
+ if (rem_bytes <= sizeof(struct hfi_codec_mask_supported))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_codecs_mask(&codecs, &domain, payload);
break;
case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
- parse_raw_formats(core, codecs, domain, data);
+ if (rem_bytes <= sizeof(struct hfi_uncompressed_format_supported))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_raw_formats(core, codecs, domain, payload);
break;
case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
- parse_caps(core, codecs, domain, data);
+ if (rem_bytes <= sizeof(struct hfi_capabilities))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_caps(core, codecs, domain, payload);
break;
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
- parse_profile_level(core, codecs, domain, data);
+ if (rem_bytes <= sizeof(struct hfi_profile_level_supported))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_profile_level(core, codecs, domain, payload);
break;
case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED:
- parse_alloc_mode(core, codecs, domain, data);
+ if (rem_bytes <= sizeof(struct hfi_buffer_alloc_mode_supported))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_alloc_mode(core, codecs, domain, payload);
break;
default:
+ ret = sizeof(u32);
break;
}
- word++;
- words_count--;
+ if (ret < 0)
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ words += ret / sizeof(u32);
+ rem_bytes -= ret;
}
if (!core->max_sessions_supported)
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index a9167867063c..b5f2ea879950 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -187,6 +187,9 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
/* ensure rd/wr indices's are read from memory */
rmb();
+ if (qsize > IFACEQ_QUEUE_SIZE / 4)
+ return -EINVAL;
+
if (wr_idx >= rd_idx)
empty_space = qsize - (wr_idx - rd_idx);
else
@@ -255,6 +258,9 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
wr_idx = qhdr->write_idx;
qsize = qhdr->q_size;
+ if (qsize > IFACEQ_QUEUE_SIZE / 4)
+ return -EINVAL;
+
/* make sure data is valid before using it */
rmb();
@@ -1035,18 +1041,26 @@ static void venus_sfr_print(struct venus_hfi_device *hdev)
{
struct device *dev = hdev->core->dev;
struct hfi_sfr *sfr = hdev->sfr.kva;
+ u32 size;
void *p;
if (!sfr)
return;
- p = memchr(sfr->data, '\0', sfr->buf_size);
+ size = sfr->buf_size;
+ if (!size)
+ return;
+
+ if (size > ALIGNED_SFR_SIZE)
+ size = ALIGNED_SFR_SIZE;
+
+ p = memchr(sfr->data, '\0', size);
/*
* SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
* that Venus is in the process of crashing.
*/
if (!p)
- sfr->data[sfr->buf_size - 1] = '\0';
+ sfr->data[size - 1] = '\0';
dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
}
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
index 51801a962ed2..4d36c44f9d44 100644
--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -662,11 +662,16 @@ int venc_ctrl_init(struct venus_inst *inst)
v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_COLORIMETRY_HDR10_CLL_INFO,
- v4l2_ctrl_ptr_create(&p_hdr10_cll));
+ v4l2_ctrl_ptr_create(&p_hdr10_cll),
+ v4l2_ctrl_ptr_create(NULL),
+ v4l2_ctrl_ptr_create(NULL));
v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY,
- v4l2_ctrl_ptr_create((void *)&p_hdr10_mastering));
+ v4l2_ctrl_ptr_create((void *)&p_hdr10_mastering),
+ v4l2_ctrl_ptr_create(NULL),
+ v4l2_ctrl_ptr_create(NULL));
+
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE,
diff --git a/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c b/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
index 12660087b12f..69a5f23e7954 100644
--- a/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
+++ b/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
@@ -1102,6 +1102,8 @@ static void cfe_buffer_queue(struct vb2_buffer *vb)
static s64 cfe_get_source_link_freq(struct cfe_device *cfe)
{
+ struct media_pad *src_pad =
+ &cfe->source_sd->entity.pads[cfe->source_pad];
struct v4l2_subdev_state *state;
s64 link_freq;
u32 bpp;
@@ -1136,7 +1138,7 @@ static s64 cfe_get_source_link_freq(struct cfe_device *cfe)
bpp = 0;
}
- link_freq = v4l2_get_link_freq(cfe->source_sd->ctrl_handler, bpp,
+ link_freq = v4l2_get_link_freq(src_pad, bpp,
2 * cfe->csi2.dphy.active_lanes);
if (link_freq < 0)
cfe_err(cfe, "failed to get link freq for subdev '%s'\n",
@@ -1315,8 +1317,6 @@ static void cfe_stop_streaming(struct vb2_queue *vq)
}
static const struct vb2_ops cfe_video_qops = {
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
.queue_setup = cfe_queue_setup,
.buf_prepare = cfe_buffer_prepare,
.buf_queue = cfe_buffer_queue,
diff --git a/drivers/media/platform/renesas/rcar-csi2.c b/drivers/media/platform/renesas/rcar-csi2.c
index 0a53dd47d7bf..38a3149f9724 100644
--- a/drivers/media/platform/renesas/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-csi2.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
@@ -15,6 +16,7 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/sys_soc.h>
+#include <linux/units.h>
#include <media/mipi-csi2.h>
#include <media/v4l2-ctrls.h>
@@ -637,6 +639,10 @@ static const struct rcar_csi2_format rcar_csi2_formats[] = {
.datatype = MIPI_CSI2_DT_YUV422_8B,
.bpp = 20,
}, {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ }, {
.code = MEDIA_BUS_FMT_Y10_1X10,
.datatype = MIPI_CSI2_DT_RAW10,
.bpp = 10,
@@ -657,9 +663,37 @@ static const struct rcar_csi2_format rcar_csi2_formats[] = {
.datatype = MIPI_CSI2_DT_RAW8,
.bpp = 8,
}, {
- .code = MEDIA_BUS_FMT_Y8_1X8,
- .datatype = MIPI_CSI2_DT_RAW8,
- .bpp = 8,
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .bpp = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .bpp = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .bpp = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .bpp = 12,
},
};
@@ -921,7 +955,7 @@ static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp,
unsigned int lanes)
{
struct v4l2_subdev *source;
- struct v4l2_ctrl *ctrl;
+ s64 freq;
u64 mbps;
if (!priv->remote)
@@ -929,21 +963,17 @@ static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp,
source = priv->remote;
- /* Read the pixel rate control from remote. */
- ctrl = v4l2_ctrl_find(source->ctrl_handler, V4L2_CID_PIXEL_RATE);
- if (!ctrl) {
- dev_err(priv->dev, "no pixel rate control in subdev %s\n",
- source->name);
- return -EINVAL;
+ freq = v4l2_get_link_freq(source->ctrl_handler, bpp, 2 * lanes);
+ if (freq < 0) {
+ int ret = (int)freq;
+
+ dev_err(priv->dev, "failed to get link freq for %s: %d\n",
+ source->name, ret);
+
+ return ret;
}
- /*
- * Calculate the phypll in mbps.
- * link_freq = (pixel_rate * bits_per_sample) / (2 * nr_of_lanes)
- * bps = link_freq * 2
- */
- mbps = v4l2_ctrl_g_ctrl_int64(ctrl) * bpp;
- do_div(mbps, lanes * 1000000);
+ mbps = div_u64(freq * 2, MEGA);
/* Adjust for C-PHY, divide by 2.8. */
if (priv->cphy)
@@ -1547,7 +1577,8 @@ static int rcsi2_start(struct rcar_csi2 *priv, struct v4l2_subdev_state *state)
return ret;
}
- ret = v4l2_subdev_call(priv->remote, video, s_stream, 1);
+ ret = v4l2_subdev_enable_streams(priv->remote, priv->remote_pad,
+ BIT_ULL(0));
if (ret) {
rcsi2_enter_standby(priv);
return ret;
@@ -1559,31 +1590,50 @@ static int rcsi2_start(struct rcar_csi2 *priv, struct v4l2_subdev_state *state)
static void rcsi2_stop(struct rcar_csi2 *priv)
{
rcsi2_enter_standby(priv);
- v4l2_subdev_call(priv->remote, video, s_stream, 0);
+ v4l2_subdev_disable_streams(priv->remote, priv->remote_pad, BIT_ULL(0));
}
-static int rcsi2_s_stream(struct v4l2_subdev *sd, int enable)
+static int rcsi2_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 source_pad,
+ u64 source_streams_mask)
{
struct rcar_csi2 *priv = sd_to_csi2(sd);
- struct v4l2_subdev_state *state;
int ret = 0;
+ if (source_streams_mask != 1)
+ return -EINVAL;
+
if (!priv->remote)
return -ENODEV;
- state = v4l2_subdev_lock_and_get_active_state(&priv->subdev);
-
- if (enable && priv->stream_count == 0) {
+ if (priv->stream_count == 0) {
ret = rcsi2_start(priv, state);
if (ret)
- goto out;
- } else if (!enable && priv->stream_count == 1) {
- rcsi2_stop(priv);
+ return ret;
}
- priv->stream_count += enable ? 1 : -1;
-out:
- v4l2_subdev_unlock_state(state);
+ priv->stream_count += 1;
+
+ return ret;
+}
+
+static int rcsi2_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 source_pad, u64 source_streams_mask)
+{
+ struct rcar_csi2 *priv = sd_to_csi2(sd);
+ int ret = 0;
+
+ if (source_streams_mask != 1)
+ return -EINVAL;
+
+ if (!priv->remote)
+ return -ENODEV;
+
+ if (priv->stream_count == 1)
+ rcsi2_stop(priv);
+
+ priv->stream_count -= 1;
return ret;
}
@@ -1610,17 +1660,15 @@ static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
return 0;
}
-static const struct v4l2_subdev_video_ops rcar_csi2_video_ops = {
- .s_stream = rcsi2_s_stream,
-};
-
static const struct v4l2_subdev_pad_ops rcar_csi2_pad_ops = {
+ .enable_streams = rcsi2_enable_streams,
+ .disable_streams = rcsi2_disable_streams,
+
.set_fmt = rcsi2_set_pad_format,
.get_fmt = v4l2_subdev_get_fmt,
};
static const struct v4l2_subdev_ops rcar_csi2_subdev_ops = {
- .video = &rcar_csi2_video_ops,
.pad = &rcar_csi2_pad_ops,
};
diff --git a/drivers/media/platform/renesas/rcar-isp.c b/drivers/media/platform/renesas/rcar-isp.c
index c515278e3be5..4bc89d4757fa 100644
--- a/drivers/media/platform/renesas/rcar-isp.c
+++ b/drivers/media/platform/renesas/rcar-isp.c
@@ -76,6 +76,54 @@ static const struct rcar_isp_format rcar_isp_formats[] = {
.code = MEDIA_BUS_FMT_YUYV10_2X10,
.datatype = MIPI_CSI2_DT_YUV422_8B,
.procmode = 0x0c,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .procmode = 0x00,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .procmode = 0x00,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .procmode = 0x00,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .procmode = 0x00,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .procmode = 0x01,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .procmode = 0x01,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .procmode = 0x01,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .procmode = 0x01,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .procmode = 0x02,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .procmode = 0x02,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .procmode = 0x02,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .procmode = 0x02,
},
};
@@ -121,9 +169,8 @@ struct rcar_isp {
struct v4l2_async_notifier notifier;
struct v4l2_subdev *remote;
+ unsigned int remote_pad;
- struct mutex lock; /* Protects mf and stream_count. */
- struct v4l2_mbus_framefmt mf;
int stream_count;
};
@@ -170,14 +217,19 @@ static void risp_power_off(struct rcar_isp *isp)
pm_runtime_put(isp->dev);
}
-static int risp_start(struct rcar_isp *isp)
+static int risp_start(struct rcar_isp *isp, struct v4l2_subdev_state *state)
{
+ const struct v4l2_mbus_framefmt *fmt;
const struct rcar_isp_format *format;
unsigned int vc;
u32 sel_csi = 0;
int ret;
- format = risp_code_to_fmt(isp->mf.code);
+ fmt = v4l2_subdev_state_get_format(state, RCAR_ISP_SINK);
+ if (!fmt)
+ return -EINVAL;
+
+ format = risp_code_to_fmt(fmt->code);
if (!format) {
dev_err(isp->dev, "Unsupported bus format\n");
return -EINVAL;
@@ -219,7 +271,8 @@ static int risp_start(struct rcar_isp *isp)
/* Start ISP. */
risp_write(isp, ISPSTART_REG, ISPSTART_START);
- ret = v4l2_subdev_call(isp->remote, video, s_stream, 1);
+ ret = v4l2_subdev_enable_streams(isp->remote, isp->remote_pad,
+ BIT_ULL(0));
if (ret)
risp_power_off(isp);
@@ -228,7 +281,7 @@ static int risp_start(struct rcar_isp *isp)
static void risp_stop(struct rcar_isp *isp)
{
- v4l2_subdev_call(isp->remote, video, s_stream, 0);
+ v4l2_subdev_disable_streams(isp->remote, isp->remote_pad, BIT_ULL(0));
/* Stop ISP. */
risp_write(isp, ISPSTART_REG, ISPSTART_STOP);
@@ -236,87 +289,79 @@ static void risp_stop(struct rcar_isp *isp)
risp_power_off(isp);
}
-static int risp_s_stream(struct v4l2_subdev *sd, int enable)
+static int risp_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 source_pad,
+ u64 source_streams_mask)
{
struct rcar_isp *isp = sd_to_isp(sd);
int ret = 0;
- mutex_lock(&isp->lock);
+ if (source_streams_mask != 1)
+ return -EINVAL;
- if (!isp->remote) {
- ret = -ENODEV;
- goto out;
- }
+ if (!isp->remote)
+ return -ENODEV;
- if (enable && isp->stream_count == 0) {
- ret = risp_start(isp);
+ if (isp->stream_count == 0) {
+ ret = risp_start(isp, state);
if (ret)
- goto out;
- } else if (!enable && isp->stream_count == 1) {
- risp_stop(isp);
+ return ret;
}
- isp->stream_count += enable ? 1 : -1;
-out:
- mutex_unlock(&isp->lock);
+ isp->stream_count += 1;
return ret;
}
-static const struct v4l2_subdev_video_ops risp_video_ops = {
- .s_stream = risp_s_stream,
-};
-
-static int risp_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
+static int risp_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 source_pad,
+ u64 source_streams_mask)
{
struct rcar_isp *isp = sd_to_isp(sd);
- struct v4l2_mbus_framefmt *framefmt;
- mutex_lock(&isp->lock);
+ if (source_streams_mask != 1)
+ return -EINVAL;
- if (!risp_code_to_fmt(format->format.code))
- format->format.code = rcar_isp_formats[0].code;
+ if (!isp->remote)
+ return -ENODEV;
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- isp->mf = format->format;
- } else {
- framefmt = v4l2_subdev_state_get_format(sd_state, 0);
- *framefmt = format->format;
- }
+ if (isp->stream_count == 1)
+ risp_stop(isp);
- mutex_unlock(&isp->lock);
+ isp->stream_count -= 1;
return 0;
}
-static int risp_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+static int risp_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
- struct rcar_isp *isp = sd_to_isp(sd);
+ struct v4l2_mbus_framefmt *framefmt;
- mutex_lock(&isp->lock);
+ if (format->pad > RCAR_ISP_SINK)
+ return v4l2_subdev_get_fmt(sd, state, format);
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- format->format = isp->mf;
- else
- format->format = *v4l2_subdev_state_get_format(sd_state, 0);
+ if (!risp_code_to_fmt(format->format.code))
+ format->format.code = rcar_isp_formats[0].code;
- mutex_unlock(&isp->lock);
+ for (unsigned int i = 0; i < RCAR_ISP_NUM_PADS; i++) {
+ framefmt = v4l2_subdev_state_get_format(state, i);
+ *framefmt = format->format;
+ }
return 0;
}
static const struct v4l2_subdev_pad_ops risp_pad_ops = {
+ .enable_streams = risp_enable_streams,
+ .disable_streams = risp_disable_streams,
.set_fmt = risp_set_pad_format,
- .get_fmt = risp_get_pad_format,
+ .get_fmt = v4l2_subdev_get_fmt,
.link_validate = v4l2_subdev_link_validate_default,
};
static const struct v4l2_subdev_ops rcar_isp_subdev_ops = {
- .video = &risp_video_ops,
.pad = &risp_pad_ops,
};
@@ -339,6 +384,7 @@ static int risp_notify_bound(struct v4l2_async_notifier *notifier,
}
isp->remote = subdev;
+ isp->remote_pad = pad;
dev_dbg(isp->dev, "Bound %s pad: %d\n", subdev->name, pad);
@@ -449,12 +495,10 @@ static int risp_probe(struct platform_device *pdev)
isp->dev = &pdev->dev;
- mutex_init(&isp->lock);
-
ret = risp_probe_resources(isp, pdev);
if (ret) {
dev_err(isp->dev, "Failed to get resources\n");
- goto error_mutex;
+ return ret;
}
platform_set_drvdata(pdev, isp);
@@ -485,20 +529,25 @@ static int risp_probe(struct platform_device *pdev)
if (ret)
goto error_notifier;
+ ret = v4l2_subdev_init_finalize(&isp->subdev);
+ if (ret)
+ goto error_notifier;
+
ret = v4l2_async_register_subdev(&isp->subdev);
if (ret < 0)
- goto error_notifier;
+ goto error_subdev;
dev_info(isp->dev, "Using CSI-2 input: %u\n", isp->csi_input);
return 0;
+
+error_subdev:
+ v4l2_subdev_cleanup(&isp->subdev);
error_notifier:
v4l2_async_nf_unregister(&isp->notifier);
v4l2_async_nf_cleanup(&isp->notifier);
error_pm:
pm_runtime_disable(&pdev->dev);
-error_mutex:
- mutex_destroy(&isp->lock);
return ret;
}
@@ -511,10 +560,9 @@ static void risp_remove(struct platform_device *pdev)
v4l2_async_nf_cleanup(&isp->notifier);
v4l2_async_unregister_subdev(&isp->subdev);
+ v4l2_subdev_cleanup(&isp->subdev);
pm_runtime_disable(&pdev->dev);
-
- mutex_destroy(&isp->lock);
}
static struct platform_driver rcar_isp_driver = {
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
index 8773998101ff..8de871240440 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
@@ -1397,7 +1397,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
if (!on) {
video_device_pipeline_stop(&vin->vdev);
- return v4l2_subdev_call(sd, video, s_stream, 0);
+ return v4l2_subdev_disable_streams(sd, pad->index, BIT_ULL(0));
}
ret = rvin_mc_validate_format(vin, sd, pad);
@@ -1408,7 +1408,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
if (ret)
return ret;
- ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ ret = v4l2_subdev_enable_streams(sd, pad->index, BIT_ULL(0));
if (ret == -ENOIOCTLCMD)
ret = 0;
if (ret)
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
index 4396348811c8..730bdf98565a 100644
--- a/drivers/media/platform/rockchip/rga/rga-buf.c
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -119,6 +119,13 @@ static int rga_buf_prepare(struct vb2_buffer *vb)
if (IS_ERR(f))
return PTR_ERR(f);
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+ }
+
for (i = 0; i < vb->num_planes; i++) {
vb2_set_plane_payload(vb, i, f->pix.plane_fmt[i].sizeimage);
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
index bf55beec0fac..43ed742a1649 100644
--- a/drivers/media/platform/rockchip/rga/rga-hw.c
+++ b/drivers/media/platform/rockchip/rga/rga-hw.c
@@ -376,7 +376,7 @@ static void rga_cmd_set_dst_info(struct rga_ctx *ctx,
* Configure the dest framebuffer base address with pixel offset.
*/
offsets = rga_get_addr_offset(&ctx->out, offset, dst_x, dst_y, dst_w, dst_h);
- dst_offset = rga_lookup_draw_pos(&offsets, mir_mode, rot_mode);
+ dst_offset = rga_lookup_draw_pos(&offsets, rot_mode, mir_mode);
dest[(RGA_DST_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
dst_offset->y_off;
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
index 73f7af674c01..0c636090d723 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
@@ -549,8 +549,9 @@ static void s5p_mfc_enc_calc_src_size_v6(struct s5p_mfc_ctx *ctx)
case V4L2_PIX_FMT_NV21M:
ctx->stride[0] = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
ctx->stride[1] = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
- ctx->luma_size = ctx->stride[0] * ALIGN(ctx->img_height, 16);
- ctx->chroma_size = ctx->stride[0] * ALIGN(ctx->img_height / 2, 16);
+ ctx->luma_size = ALIGN(ctx->stride[0] * ALIGN(ctx->img_height, 16), 256);
+ ctx->chroma_size = ALIGN(ctx->stride[0] * ALIGN(ctx->img_height / 2, 16),
+ 256);
break;
case V4L2_PIX_FMT_YUV420M:
case V4L2_PIX_FMT_YVU420M:
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
index 7b3a37957e3a..d151d2ed1f64 100644
--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
@@ -797,13 +797,12 @@ static int c8sectpfe_probe(struct platform_device *pdev)
}
tsin->i2c_adapter =
of_find_i2c_adapter_by_node(i2c_bus);
+ of_node_put(i2c_bus);
if (!tsin->i2c_adapter) {
dev_err(&pdev->dev, "No i2c adapter found\n");
- of_node_put(i2c_bus);
ret = -ENODEV;
goto err_node_put;
}
- of_node_put(i2c_bus);
/* Acquire reset GPIO and activate it */
tsin->rst_gpio = devm_fwnode_gpiod_get(dev,
diff --git a/drivers/media/platform/st/stm32/dma2d/dma2d.c b/drivers/media/platform/st/stm32/dma2d/dma2d.c
index b6c8400fb92d..48fa781aab06 100644
--- a/drivers/media/platform/st/stm32/dma2d/dma2d.c
+++ b/drivers/media/platform/st/stm32/dma2d/dma2d.c
@@ -490,7 +490,8 @@ static void device_run(void *prv)
dst->sequence = frm_cap->sequence++;
v4l2_m2m_buf_copy_metadata(src, dst, true);
- clk_enable(dev->gate);
+ if (clk_enable(dev->gate))
+ goto end;
dma2d_config_fg(dev, frm_out,
vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
diff --git a/drivers/media/platform/st/stm32/stm32-csi.c b/drivers/media/platform/st/stm32/stm32-csi.c
index 48941aae8c9b..b69048144cc1 100644
--- a/drivers/media/platform/st/stm32/stm32-csi.c
+++ b/drivers/media/platform/st/stm32/stm32-csi.c
@@ -325,7 +325,6 @@ static const struct stm32_csi_mbps_phy_reg snps_stm32mp25[] = {
{ .mbps = 2400, .hsfreqrange = 0x47, .osc_freq_target = 442 },
{ .mbps = 2450, .hsfreqrange = 0x48, .osc_freq_target = 451 },
{ .mbps = 2500, .hsfreqrange = 0x49, .osc_freq_target = 460 },
- { /* sentinel */ }
};
static const struct v4l2_mbus_framefmt fmt_default = {
@@ -358,7 +357,7 @@ static inline struct stm32_csi_dev *to_csidev(struct v4l2_subdev *sd)
static int stm32_csi_setup_lane_merger(struct stm32_csi_dev *csidev)
{
u32 lmcfgr = 0;
- int i;
+ unsigned int i;
for (i = 0; i < csidev->num_lanes; i++) {
if (!csidev->lanes[i] || csidev->lanes[i] > STM32_CSI_LANES_MAX) {
@@ -444,13 +443,15 @@ static void stm32_csi_phy_reg_write(struct stm32_csi_dev *csidev,
static int stm32_csi_start(struct stm32_csi_dev *csidev,
struct v4l2_subdev_state *state)
{
- const struct stm32_csi_mbps_phy_reg *phy_regs;
+ struct media_pad *src_pad =
+ &csidev->s_subdev->entity.pads[csidev->s_subdev_pad_nb];
+ const struct stm32_csi_mbps_phy_reg *phy_regs = NULL;
struct v4l2_mbus_framefmt *sink_fmt;
const struct stm32_csi_fmts *fmt;
unsigned long phy_clk_frate;
+ u32 lanes_ie, lanes_en;
unsigned int mbps;
- u32 lanes_ie = 0;
- u32 lanes_en = 0;
+ unsigned int i;
s64 link_freq;
int ret;
u32 ccfr;
@@ -465,7 +466,7 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
if (!csidev->s_subdev)
return -EIO;
- link_freq = v4l2_get_link_freq(csidev->s_subdev->ctrl_handler,
+ link_freq = v4l2_get_link_freq(src_pad,
fmt->bpp, 2 * csidev->num_lanes);
if (link_freq < 0)
return link_freq;
@@ -474,11 +475,14 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
mbps = div_s64(link_freq, 500000);
dev_dbg(csidev->dev, "Computed Mbps: %u\n", mbps);
- for (phy_regs = snps_stm32mp25; phy_regs->mbps != 0; phy_regs++)
- if (phy_regs->mbps >= mbps)
+ for (i = 0; i < ARRAY_SIZE(snps_stm32mp25); i++) {
+ if (snps_stm32mp25[i].mbps >= mbps) {
+ phy_regs = &snps_stm32mp25[i];
break;
+ }
+ }
- if (!phy_regs->mbps) {
+ if (!phy_regs) {
dev_err(csidev->dev, "Unsupported PHY speed (%u Mbps)", mbps);
return -ERANGE;
}
@@ -488,8 +492,8 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
phy_regs->osc_freq_target);
/* Prepare lanes related configuration bits */
- lanes_ie |= STM32_CSI_SR1_DL0_ERRORS;
- lanes_en |= STM32_CSI_PCR_DL0EN;
+ lanes_ie = STM32_CSI_SR1_DL0_ERRORS;
+ lanes_en = STM32_CSI_PCR_DL0EN;
if (csidev->num_lanes == 2) {
lanes_ie |= STM32_CSI_SR1_DL1_ERRORS;
lanes_en |= STM32_CSI_PCR_DL1EN;
@@ -497,21 +501,19 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
ret = pm_runtime_get_sync(csidev->dev);
if (ret < 0)
- return ret;
+ goto error_put;
/* Retrieve CSI2PHY clock rate to compute CCFR value */
phy_clk_frate = clk_get_rate(csidev->clks[STM32_CSI_CLK_CSI2PHY].clk);
if (!phy_clk_frate) {
- pm_runtime_put(csidev->dev);
dev_err(csidev->dev, "CSI2PHY clock rate invalid (0)\n");
- return ret;
+ ret = -EINVAL;
+ goto error_put;
}
ret = stm32_csi_setup_lane_merger(csidev);
- if (ret) {
- pm_runtime_put(csidev->dev);
- return ret;
- }
+ if (ret)
+ goto error_put;
/* Enable the CSI */
writel_relaxed(STM32_CSI_CR_CSIEN, csidev->base + STM32_CSI_CR);
@@ -567,6 +569,10 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
writel_relaxed(0, csidev->base + STM32_CSI_PMCR);
return ret;
+
+error_put:
+ pm_runtime_put(csidev->dev);
+ return ret;
}
static void stm32_csi_stop(struct stm32_csi_dev *csidev)
@@ -591,20 +597,20 @@ static int stm32_csi_start_vc(struct stm32_csi_dev *csidev,
{
struct v4l2_mbus_framefmt *mbus_fmt;
const struct stm32_csi_fmts *fmt;
- u32 cfgr1 = 0;
- int ret = 0;
u32 status;
+ u32 cfgr1;
+ int ret;
mbus_fmt = v4l2_subdev_state_get_format(state, STM32_CSI_PAD_SOURCE);
fmt = stm32_csi_code_to_fmt(mbus_fmt->code);
/* If the mbus code is JPEG, don't enable filtering */
if (mbus_fmt->code == MEDIA_BUS_FMT_JPEG_1X8) {
- cfgr1 |= STM32_CSI_VCXCFGR1_ALLDT;
+ cfgr1 = STM32_CSI_VCXCFGR1_ALLDT;
cfgr1 |= fmt->input_fmt << STM32_CSI_VCXCFGR1_CDTFT_SHIFT;
dev_dbg(csidev->dev, "VC%d: enable AllDT mode\n", vc);
} else {
- cfgr1 |= fmt->datatype << STM32_CSI_VCXCFGR1_DT0_SHIFT;
+ cfgr1 = fmt->datatype << STM32_CSI_VCXCFGR1_DT0_SHIFT;
cfgr1 |= fmt->input_fmt << STM32_CSI_VCXCFGR1_DT0FT_SHIFT;
cfgr1 |= STM32_CSI_VCXCFGR1_DT0EN;
dev_dbg(csidev->dev, "VC%d: enable DT0(0x%x)/DT0FT(0x%x)\n",
@@ -630,8 +636,8 @@ static int stm32_csi_start_vc(struct stm32_csi_dev *csidev,
static int stm32_csi_stop_vc(struct stm32_csi_dev *csidev, u32 vc)
{
- int ret = 0;
u32 status;
+ int ret;
/* Stop the Virtual Channel */
writel_relaxed(STM32_CSI_CR_VCXSTOP(vc) | STM32_CSI_CR_CSIEN,
@@ -690,25 +696,27 @@ static int stm32_csi_enable_streams(struct v4l2_subdev *sd,
ret = stm32_csi_start_vc(csidev, state, 0);
if (ret) {
dev_err(csidev->dev, "Failed to start VC0\n");
- stm32_csi_stop(csidev);
- return ret;
+ goto failed_start_vc;
}
ret = v4l2_subdev_enable_streams(csidev->s_subdev,
csidev->s_subdev_pad_nb, BIT_ULL(0));
- if (ret) {
- stm32_csi_stop_vc(csidev, 0);
- stm32_csi_stop(csidev);
- return ret;
- }
+ if (ret)
+ goto failed_enable_streams;
return 0;
+
+failed_enable_streams:
+ stm32_csi_stop_vc(csidev, 0);
+failed_start_vc:
+ stm32_csi_stop(csidev);
+ return ret;
}
static int stm32_csi_init_state(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state)
{
- int i;
+ unsigned int i;
for (i = 0; i < sd->entity.num_pads; i++)
*v4l2_subdev_state_get_format(state, i) = fmt_default;
@@ -873,7 +881,8 @@ static irqreturn_t stm32_csi_irq_thread(int irq, void *arg)
static int stm32_csi_get_resources(struct stm32_csi_dev *csidev,
struct platform_device *pdev)
{
- int irq, ret, i;
+ unsigned int i;
+ int irq, ret;
csidev->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(csidev->base))
@@ -926,38 +935,32 @@ static int stm32_csi_parse_dt(struct stm32_csi_dev *csidev)
}
ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep);
- fwnode_handle_put(ep);
if (ret) {
dev_err(csidev->dev, "Could not parse v4l2 endpoint\n");
- return ret;
+ goto out;
}
csidev->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
if (csidev->num_lanes > STM32_CSI_LANES_MAX) {
dev_err(csidev->dev, "Unsupported number of data-lanes: %d\n",
csidev->num_lanes);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
memcpy(csidev->lanes, v4l2_ep.bus.mipi_csi2.data_lanes,
sizeof(csidev->lanes));
- ep = fwnode_graph_get_next_endpoint(dev_fwnode(csidev->dev), NULL);
- if (!ep) {
- dev_err(csidev->dev, "Failed to get next endpoint\n");
- return -EINVAL;
- }
-
v4l2_async_subdev_nf_init(&csidev->notifier, &csidev->sd);
asd = v4l2_async_nf_add_fwnode_remote(&csidev->notifier, ep,
struct v4l2_async_connection);
- fwnode_handle_put(ep);
if (IS_ERR(asd)) {
dev_err(csidev->dev, "Failed to add fwnode remote subdev\n");
- return PTR_ERR(asd);
+ ret = PTR_ERR(asd);
+ goto out;
}
csidev->notifier.ops = &stm32_csi_notifier_ops;
@@ -966,9 +969,11 @@ static int stm32_csi_parse_dt(struct stm32_csi_dev *csidev)
if (ret) {
dev_err(csidev->dev, "Failed to register notifier\n");
v4l2_async_nf_cleanup(&csidev->notifier);
- return ret;
+ goto out;
}
+out:
+ fwnode_handle_put(ep);
return ret;
}
@@ -989,11 +994,11 @@ static int stm32_csi_probe(struct platform_device *pdev)
ret = stm32_csi_get_resources(csidev, pdev);
if (ret)
- goto err_free_priv;
+ return ret;
ret = stm32_csi_parse_dt(csidev);
if (ret)
- goto err_free_priv;
+ return ret;
csidev->sd.owner = THIS_MODULE;
csidev->sd.dev = &pdev->dev;
@@ -1018,10 +1023,6 @@ static int stm32_csi_probe(struct platform_device *pdev)
if (ret < 0)
goto err_cleanup;
- ret = v4l2_async_register_subdev(&csidev->sd);
- if (ret < 0)
- goto err_cleanup;
-
/* Reset device */
rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(rstc)) {
@@ -1048,6 +1049,10 @@ static int stm32_csi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
+ ret = v4l2_async_register_subdev(&csidev->sd);
+ if (ret < 0)
+ goto err_cleanup;
+
dev_info(&pdev->dev,
"Probed CSI with %u lanes\n", csidev->num_lanes);
@@ -1055,7 +1060,6 @@ static int stm32_csi_probe(struct platform_device *pdev)
err_cleanup:
v4l2_async_nf_cleanup(&csidev->notifier);
-err_free_priv:
return ret;
}
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
index 71acf539e1f3..1b7bae3266c8 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
@@ -89,6 +89,8 @@ struct dcmipp_pipeline_config {
const struct dcmipp_ent_link *links;
size_t num_links;
u32 hw_revision;
+ bool has_csi2;
+ bool needs_mclk;
};
/* --------------------------------------------------------------------------
@@ -164,7 +166,9 @@ static const struct dcmipp_pipeline_config stm32mp25_pipe_cfg = {
.num_ents = ARRAY_SIZE(stm32mp25_ent_config),
.links = stm32mp25_ent_links,
.num_links = ARRAY_SIZE(stm32mp25_ent_links),
- .hw_revision = DCMIPP_STM32MP25_VERR
+ .hw_revision = DCMIPP_STM32MP25_VERR,
+ .has_csi2 = true,
+ .needs_mclk = true
};
#define LINK_FLAG_TO_STR(f) ((f) == 0 ? "" :\
@@ -296,7 +300,7 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_async_connection *asd)
{
struct dcmipp_device *dcmipp = notifier_to_dcmipp(notifier);
- unsigned int ret;
+ int ret = -EINVAL;
int src_pad, i;
struct dcmipp_ent_device *sink;
struct v4l2_fwnode_endpoint vep = { 0 };
@@ -304,15 +308,9 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
enum v4l2_mbus_type supported_types[] = {
V4L2_MBUS_PARALLEL, V4L2_MBUS_BT656, V4L2_MBUS_CSI2_DPHY
};
- int supported_types_nb = ARRAY_SIZE(supported_types);
dev_dbg(dcmipp->dev, "Subdev \"%s\" bound\n", subdev->name);
- /* Only MP25 supports CSI input */
- if (!of_device_is_compatible(dcmipp->dev->of_node,
- "st,stm32mp25-dcmipp"))
- supported_types_nb--;
-
/*
* Link this sub-device to DCMIPP, it could be
* a parallel camera sensor or a CSI-2 to parallel bridge
@@ -330,7 +328,12 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
}
/* Check for supported MBUS type */
- for (i = 0; i < supported_types_nb; i++) {
+ for (i = 0; i < ARRAY_SIZE(supported_types); i++) {
+ /* Only MP25 supports CSI input */
+ if (supported_types[i] == V4L2_MBUS_CSI2_DPHY &&
+ !dcmipp->pipe_cfg->has_csi2)
+ continue;
+
vep.bus_type = supported_types[i];
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
if (!ret)
@@ -529,7 +532,7 @@ static int dcmipp_probe(struct platform_device *pdev)
"Unable to get kclk\n");
dcmipp->kclk = kclk;
- if (!of_device_is_compatible(pdev->dev.of_node, "st,stm32mp13-dcmipp")) {
+ if (dcmipp->pipe_cfg->needs_mclk) {
mclk = devm_clk_get(&pdev->dev, "mclk");
if (IS_ERR(mclk))
return dev_err_probe(&pdev->dev, PTR_ERR(mclk),
diff --git a/drivers/media/platform/synopsys/Kconfig b/drivers/media/platform/synopsys/Kconfig
new file mode 100644
index 000000000000..4fd521f78425
--- /dev/null
+++ b/drivers/media/platform/synopsys/Kconfig
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+source "drivers/media/platform/synopsys/hdmirx/Kconfig"
diff --git a/drivers/media/platform/synopsys/Makefile b/drivers/media/platform/synopsys/Makefile
new file mode 100644
index 000000000000..3b12c574dd67
--- /dev/null
+++ b/drivers/media/platform/synopsys/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += hdmirx/
diff --git a/drivers/media/platform/synopsys/hdmirx/Kconfig b/drivers/media/platform/synopsys/hdmirx/Kconfig
new file mode 100644
index 000000000000..27e6706f84a3
--- /dev/null
+++ b/drivers/media/platform/synopsys/hdmirx/Kconfig
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config VIDEO_SYNOPSYS_HDMIRX
+ tristate "Synopsys DesignWare HDMI Receiver driver"
+ depends on VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select CEC_CORE
+ select HDMI
+ help
+ Support for Synopsys HDMI HDMI RX Controller.
+ This driver supports HDMI 2.0 version.
+
+ To compile this driver as a module, choose M here. The module
+ will be called synopsys_hdmirx.
+
+config VIDEO_SYNOPSYS_HDMIRX_LOAD_DEFAULT_EDID
+ bool "Load default EDID"
+ depends on VIDEO_SYNOPSYS_HDMIRX
+ help
+ Preload default EDID (Extended Display Identification Data)
+ branded by Linux Foundation that exposes display modes up
+ to 4k@30Hz, which have best compatibility with HDMI transmitters.
+
+ Enabling this option is recommended for a non-production use-cases.
+ It will make driver usable out-of-the-box.
+
+ For a higher display modes you will need to load customized EDID
+ from userspace using v4l2-ctl tool or by other means.
+
+ Without enabling this option driver will be practically
+ non-functional until EDID will be loaded from userspace.
+ Which is a wanted behavior when using this driver in a
+ commercial product that should utilize own branded EDID.
diff --git a/drivers/media/platform/synopsys/hdmirx/Makefile b/drivers/media/platform/synopsys/hdmirx/Makefile
new file mode 100644
index 000000000000..2fa2d9e25300
--- /dev/null
+++ b/drivers/media/platform/synopsys/hdmirx/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+synopsys-hdmirx-objs := snps_hdmirx.o snps_hdmirx_cec.o
+
+obj-$(CONFIG_VIDEO_SYNOPSYS_HDMIRX) += synopsys-hdmirx.o
diff --git a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
new file mode 100644
index 000000000000..3d2913de9a86
--- /dev/null
+++ b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
@@ -0,0 +1,2746 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Collabora, Ltd.
+ * Author: Shreeya Patel <shreeya.patel@collabora.com>
+ * Author: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+ *
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ * Author: Dingxian Wen <shawn.wen@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
+#include <linux/hdmi.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/v4l2-dv-timings.h>
+#include <linux/workqueue.h>
+
+#include <media/cec.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "snps_hdmirx.h"
+#include "snps_hdmirx_cec.h"
+
+#define EDID_NUM_BLOCKS_MAX 4
+#define EDID_BLOCK_SIZE 128
+#define HDMIRX_PLANE_Y 0
+#define HDMIRX_PLANE_CBCR 1
+#define FILTER_FRAME_CNT 6
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-3)");
+
+enum hdmirx_pix_fmt {
+ HDMIRX_RGB888 = 0,
+ HDMIRX_YUV422 = 1,
+ HDMIRX_YUV444 = 2,
+ HDMIRX_YUV420 = 3,
+};
+
+enum ddr_store_fmt {
+ STORE_RGB888 = 0,
+ STORE_RGBA_ARGB,
+ STORE_YUV420_8BIT,
+ STORE_YUV420_10BIT,
+ STORE_YUV422_8BIT,
+ STORE_YUV422_10BIT,
+ STORE_YUV444_8BIT,
+ STORE_YUV420_16BIT = 8,
+ STORE_YUV422_16BIT = 9,
+};
+
+enum hdmirx_reg_attr {
+ HDMIRX_ATTR_RW = 0,
+ HDMIRX_ATTR_RO = 1,
+ HDMIRX_ATTR_WO = 2,
+ HDMIRX_ATTR_RE = 3,
+};
+
+enum {
+ HDMIRX_RST_A,
+ HDMIRX_RST_P,
+ HDMIRX_RST_REF,
+ HDMIRX_RST_BIU,
+ HDMIRX_NUM_RST,
+};
+
+static const char *const pix_fmt_str[] = {
+ "RGB888",
+ "YUV422",
+ "YUV444",
+ "YUV420",
+};
+
+struct hdmirx_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head queue;
+ u32 buff_addr[VIDEO_MAX_PLANES];
+};
+
+struct hdmirx_stream {
+ struct snps_hdmirx_dev *hdmirx_dev;
+ struct video_device vdev;
+ struct vb2_queue buf_queue;
+ struct list_head buf_head;
+ struct hdmirx_buffer *curr_buf;
+ struct hdmirx_buffer *next_buf;
+ struct v4l2_pix_format_mplane pixm;
+ const struct v4l2_format_info *out_finfo;
+ struct mutex vlock; /* to lock resources associated with video buffer and video device */
+ spinlock_t vbq_lock; /* to lock video buffer queue */
+ bool stopping;
+ wait_queue_head_t wq_stopped;
+ u32 frame_idx;
+ u32 line_flag_int_cnt;
+ u32 irq_stat;
+};
+
+struct snps_hdmirx_dev {
+ struct device *dev;
+ struct hdmirx_stream stream;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *detect_tx_5v_ctrl;
+ struct v4l2_ctrl *rgb_range;
+ struct v4l2_ctrl *content_type;
+ struct v4l2_dv_timings timings;
+ struct gpio_desc *detect_5v_gpio;
+ struct delayed_work delayed_work_hotplug;
+ struct delayed_work delayed_work_res_change;
+ struct hdmirx_cec *cec;
+ struct mutex stream_lock; /* to lock video stream capture */
+ struct mutex work_lock; /* to lock the critical section of hotplug event */
+ struct reset_control_bulk_data resets[HDMIRX_NUM_RST];
+ struct clk_bulk_data *clks;
+ struct regmap *grf;
+ struct regmap *vo1_grf;
+ struct completion cr_write_done;
+ struct completion timer_base_lock;
+ struct completion avi_pkt_rcv;
+ struct dentry *debugfs_dir;
+ struct v4l2_debugfs_if *infoframes;
+ enum hdmirx_pix_fmt pix_fmt;
+ void __iomem *regs;
+ int hdmi_irq;
+ int dma_irq;
+ int det_irq;
+ bool hpd_trigger_level_high;
+ bool tmds_clk_ratio;
+ bool plugged;
+ int num_clks;
+ u32 edid_blocks_written;
+ u32 cur_fmt_fourcc;
+ u32 color_depth;
+ spinlock_t rst_lock; /* to lock register access */
+ u8 edid[EDID_NUM_BLOCKS_MAX * EDID_BLOCK_SIZE];
+};
+
+static const struct v4l2_dv_timings cea640x480 = V4L2_DV_BT_CEA_640X480P59_94;
+
+static const struct v4l2_dv_timings_cap hdmirx_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(640, 4096, /* min/max width */
+ 480, 2160, /* min/max height */
+ 20000000, 600000000, /* min/max pixelclock */
+ /* standards */
+ V4L2_DV_BT_STD_CEA861,
+ /* capabilities */
+ V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_INTERLACED)
+};
+
+static void hdmirx_writel(struct snps_hdmirx_dev *hdmirx_dev, int reg, u32 val)
+{
+ guard(spinlock_irqsave)(&hdmirx_dev->rst_lock);
+
+ writel(val, hdmirx_dev->regs + reg);
+}
+
+static u32 hdmirx_readl(struct snps_hdmirx_dev *hdmirx_dev, int reg)
+{
+ guard(spinlock_irqsave)(&hdmirx_dev->rst_lock);
+
+ return readl(hdmirx_dev->regs + reg);
+}
+
+static void hdmirx_reset_dma(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ guard(spinlock_irqsave)(&hdmirx_dev->rst_lock);
+
+ reset_control_reset(hdmirx_dev->resets[0].rstc);
+}
+
+static void hdmirx_update_bits(struct snps_hdmirx_dev *hdmirx_dev, int reg,
+ u32 mask, u32 data)
+{
+ u32 val;
+
+ guard(spinlock_irqsave)(&hdmirx_dev->rst_lock);
+
+ val = readl(hdmirx_dev->regs + reg) & ~mask;
+ val |= (data & mask);
+ writel(val, hdmirx_dev->regs + reg);
+}
+
+static int hdmirx_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static bool tx_5v_power_present(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ const unsigned int detection_threshold = 7;
+ int val, i, cnt = 0;
+ bool ret;
+
+ for (i = 0; i < 10; i++) {
+ usleep_range(1000, 1100);
+ val = gpiod_get_value(hdmirx_dev->detect_5v_gpio);
+ if (val > 0)
+ cnt++;
+ if (cnt >= detection_threshold)
+ break;
+ }
+
+ ret = (cnt >= detection_threshold) ? true : false;
+ v4l2_dbg(3, debug, &hdmirx_dev->v4l2_dev, "%s: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static bool signal_not_lock(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ u32 mu_status, dma_st10, cmu_st;
+
+ mu_status = hdmirx_readl(hdmirx_dev, MAINUNIT_STATUS);
+ dma_st10 = hdmirx_readl(hdmirx_dev, DMA_STATUS10);
+ cmu_st = hdmirx_readl(hdmirx_dev, CMU_STATUS);
+
+ if ((mu_status & TMDSVALID_STABLE_ST) &&
+ (dma_st10 & HDMIRX_LOCK) &&
+ (cmu_st & TMDSQPCLK_LOCKED_ST))
+ return false;
+
+ return true;
+}
+
+static void hdmirx_get_timings(struct snps_hdmirx_dev *hdmirx_dev,
+ struct v4l2_bt_timings *bt)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 hact, vact, htotal, vtotal, fps;
+ u32 hfp, hs, hbp, vfp, vs, vbp;
+ u32 val;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS2);
+ hact = (val >> 16) & 0xffff;
+ vact = val & 0xffff;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS3);
+ htotal = (val >> 16) & 0xffff;
+ vtotal = val & 0xffff;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS4);
+ hs = (val >> 16) & 0xffff;
+ vs = val & 0xffff;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS5);
+ hbp = (val >> 16) & 0xffff;
+ vbp = val & 0xffff;
+
+ if (hdmirx_dev->pix_fmt == HDMIRX_YUV420) {
+ htotal *= 2;
+ hbp *= 2;
+ hs *= 2;
+ }
+
+ hfp = htotal - hact - hs - hbp;
+ vfp = vtotal - vact - vs - vbp;
+
+ fps = div_u64(bt->pixelclock + (htotal * vtotal) / 2, htotal * vtotal);
+ bt->width = hact;
+ bt->height = vact;
+ bt->hfrontporch = hfp;
+ bt->hsync = hs;
+ bt->hbackporch = hbp;
+ bt->vfrontporch = vfp;
+ bt->vsync = vs;
+ bt->vbackporch = vbp;
+
+ v4l2_dbg(1, debug, v4l2_dev, "get timings from dma\n");
+ v4l2_dbg(1, debug, v4l2_dev,
+ "act:%ux%u%s, total:%ux%u, fps:%u, pixclk:%llu\n",
+ bt->width, bt->height, bt->interlaced ? "i" : "p",
+ htotal, vtotal, fps, bt->pixelclock);
+
+ v4l2_dbg(2, debug, v4l2_dev,
+ "hfp:%u, hact:%u, hs:%u, hbp:%u, vfp:%u, vact:%u, vs:%u, vbp:%u\n",
+ bt->hfrontporch, hact, bt->hsync, bt->hbackporch,
+ bt->vfrontporch, vact, bt->vsync, bt->vbackporch);
+
+ if (bt->interlaced == V4L2_DV_INTERLACED) {
+ bt->height *= 2;
+ bt->il_vfrontporch = bt->vfrontporch;
+ bt->il_vsync = bt->vsync + 1;
+ bt->il_vbackporch = bt->vbackporch;
+ }
+}
+
+static bool hdmirx_check_timing_valid(struct v4l2_bt_timings *bt)
+{
+ /*
+ * Sanity-check timing values. Some of the values will be outside
+ * of a valid range till hardware becomes ready to perform capture.
+ */
+ if (bt->width < 100 || bt->width > 5000 ||
+ bt->height < 100 || bt->height > 5000)
+ return false;
+
+ if (!bt->hsync || bt->hsync > 200 ||
+ !bt->vsync || bt->vsync > 100)
+ return false;
+
+ /*
+ * According to the CEA-861, 1280x720p25 Hblank timing is up to 2680,
+ * and all standard video format timings are less than 3000.
+ */
+ if (!bt->hbackporch || bt->hbackporch > 3000 ||
+ !bt->vbackporch || bt->vbackporch > 3000)
+ return false;
+
+ if (!bt->hfrontporch || bt->hfrontporch > 3000 ||
+ !bt->vfrontporch || bt->vfrontporch > 3000)
+ return false;
+
+ return true;
+}
+
+static void hdmirx_toggle_polarity(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ u32 val = hdmirx_readl(hdmirx_dev, DMA_CONFIG6);
+
+ if (!(val & (VSYNC_TOGGLE_EN | HSYNC_TOGGLE_EN))) {
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6,
+ VSYNC_TOGGLE_EN | HSYNC_TOGGLE_EN,
+ VSYNC_TOGGLE_EN | HSYNC_TOGGLE_EN);
+ hdmirx_update_bits(hdmirx_dev, VIDEO_CONFIG2,
+ VPROC_VSYNC_POL_OVR_VALUE |
+ VPROC_VSYNC_POL_OVR_EN |
+ VPROC_HSYNC_POL_OVR_VALUE |
+ VPROC_HSYNC_POL_OVR_EN,
+ VPROC_VSYNC_POL_OVR_EN |
+ VPROC_HSYNC_POL_OVR_EN);
+ return;
+ }
+
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6,
+ VSYNC_TOGGLE_EN | HSYNC_TOGGLE_EN, 0);
+
+ hdmirx_update_bits(hdmirx_dev, VIDEO_CONFIG2,
+ VPROC_VSYNC_POL_OVR_VALUE |
+ VPROC_VSYNC_POL_OVR_EN |
+ VPROC_HSYNC_POL_OVR_VALUE |
+ VPROC_HSYNC_POL_OVR_EN, 0);
+}
+
+/*
+ * When querying DV timings during preview, if the DMA's timing is stable,
+ * we retrieve the timings directly from the DMA. However, if the current
+ * resolution is negative, obtaining the timing from CTRL may require a
+ * change in the sync polarity, potentially leading to DMA errors.
+ */
+static int hdmirx_get_detected_timings(struct snps_hdmirx_dev *hdmirx_dev,
+ struct v4l2_dv_timings *timings)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ struct v4l2_bt_timings *bt = &timings->bt;
+ u32 val, tmdsqpclk_freq, pix_clk;
+ unsigned int num_retries = 0;
+ u32 field_type, deframer_st;
+ u64 tmp_data, tmds_clk;
+ bool is_dvi_mode;
+ int ret;
+
+ mutex_lock(&hdmirx_dev->work_lock);
+retry:
+ memset(timings, 0, sizeof(struct v4l2_dv_timings));
+ timings->type = V4L2_DV_BT_656_1120;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS11);
+ field_type = (val & HDMIRX_TYPE_MASK) >> 7;
+
+ if (field_type & BIT(0))
+ bt->interlaced = V4L2_DV_INTERLACED;
+ else
+ bt->interlaced = V4L2_DV_PROGRESSIVE;
+
+ deframer_st = hdmirx_readl(hdmirx_dev, DEFRAMER_STATUS);
+ is_dvi_mode = !(deframer_st & OPMODE_STS_MASK);
+
+ tmdsqpclk_freq = hdmirx_readl(hdmirx_dev, CMU_TMDSQPCLK_FREQ);
+ tmds_clk = tmdsqpclk_freq * 4 * 1000;
+ tmp_data = tmds_clk * 24;
+ do_div(tmp_data, hdmirx_dev->color_depth);
+ pix_clk = tmp_data;
+ bt->pixelclock = pix_clk;
+
+ if (hdmirx_dev->pix_fmt == HDMIRX_YUV420)
+ bt->pixelclock *= 2;
+
+ hdmirx_get_timings(hdmirx_dev, bt);
+
+ v4l2_dbg(2, debug, v4l2_dev, "tmds_clk:%llu, pix_clk:%d\n", tmds_clk, pix_clk);
+ v4l2_dbg(1, debug, v4l2_dev, "interlace:%d, fmt:%d, color:%d, mode:%s\n",
+ bt->interlaced, hdmirx_dev->pix_fmt,
+ hdmirx_dev->color_depth,
+ is_dvi_mode ? "dvi" : "hdmi");
+ v4l2_dbg(2, debug, v4l2_dev, "deframer_st:%#x\n", deframer_st);
+
+ /*
+ * Timing will be invalid until it's latched by HW or if signal's
+ * polarity doesn't match.
+ */
+ if (!hdmirx_check_timing_valid(bt)) {
+ if (num_retries++ < 20) {
+ if (num_retries == 10)
+ hdmirx_toggle_polarity(hdmirx_dev);
+
+ usleep_range(10 * 1000, 10 * 1100);
+ goto retry;
+ }
+
+ ret = -ERANGE;
+ } else {
+ ret = 0;
+ }
+
+ mutex_unlock(&hdmirx_dev->work_lock);
+
+ return ret;
+}
+
+static bool port_no_link(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ return !tx_5v_power_present(hdmirx_dev);
+}
+
+static int hdmirx_query_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ int ret;
+
+ if (port_no_link(hdmirx_dev)) {
+ v4l2_err(v4l2_dev, "%s: port has no link\n", __func__);
+ return -ENOLINK;
+ }
+
+ if (signal_not_lock(hdmirx_dev)) {
+ v4l2_err(v4l2_dev, "%s: signal is not locked\n", __func__);
+ return -ENOLCK;
+ }
+
+ ret = hdmirx_get_detected_timings(hdmirx_dev, timings);
+ if (ret)
+ return ret;
+
+ if (debug)
+ v4l2_print_dv_timings(hdmirx_dev->v4l2_dev.name,
+ "query_dv_timings: ", timings, false);
+
+ if (!v4l2_valid_dv_timings(timings, &hdmirx_timings_cap, NULL, NULL)) {
+ v4l2_dbg(1, debug, v4l2_dev, "%s: timings out of range\n", __func__);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static void hdmirx_hpd_ctrl(struct snps_hdmirx_dev *hdmirx_dev, bool en)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: %sable, hpd_trigger_level_high:%d\n",
+ __func__, en ? "en" : "dis", hdmirx_dev->hpd_trigger_level_high);
+
+ hdmirx_update_bits(hdmirx_dev, SCDC_CONFIG, HPDLOW, en ? 0 : HPDLOW);
+ hdmirx_writel(hdmirx_dev, CORE_CONFIG,
+ hdmirx_dev->hpd_trigger_level_high ? en : !en);
+
+ /* 100ms delay as per HDMI spec */
+ if (!en)
+ msleep(100);
+}
+
+static void hdmirx_write_edid_data(struct snps_hdmirx_dev *hdmirx_dev,
+ u8 *edid, unsigned int num_blocks)
+{
+ static u8 data[EDID_NUM_BLOCKS_MAX * EDID_BLOCK_SIZE];
+ unsigned int edid_len = num_blocks * EDID_BLOCK_SIZE;
+ unsigned int i;
+
+ cec_s_phys_addr_from_edid(hdmirx_dev->cec->adap,
+ (const struct edid *)edid);
+
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG11,
+ EDID_READ_EN_MASK |
+ EDID_WRITE_EN_MASK |
+ EDID_SLAVE_ADDR_MASK,
+ EDID_READ_EN(0) |
+ EDID_WRITE_EN(1) |
+ EDID_SLAVE_ADDR(0x50));
+ for (i = 0; i < edid_len; i++)
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG10, edid[i]);
+
+ /* read out for debug */
+ if (debug >= 2) {
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG11,
+ EDID_READ_EN_MASK |
+ EDID_WRITE_EN_MASK,
+ EDID_READ_EN(1) |
+ EDID_WRITE_EN(0));
+
+ for (i = 0; i < edid_len; i++)
+ data[i] = hdmirx_readl(hdmirx_dev, DMA_STATUS14);
+
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, data,
+ edid_len, false);
+ }
+
+ /*
+ * Must set EDID_READ_EN & EDID_WRITE_EN bit to 0,
+ * when the read/write edid operation is completed. Otherwise, it
+ * will affect the reading and writing of other registers.
+ */
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG11,
+ EDID_READ_EN_MASK | EDID_WRITE_EN_MASK,
+ EDID_READ_EN(0) | EDID_WRITE_EN(0));
+}
+
+static void hdmirx_write_edid(struct snps_hdmirx_dev *hdmirx_dev,
+ struct v4l2_edid *edid)
+{
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+ memset(hdmirx_dev->edid, 0, sizeof(hdmirx_dev->edid));
+
+ hdmirx_write_edid_data(hdmirx_dev, edid->edid, edid->blocks);
+
+ hdmirx_dev->edid_blocks_written = edid->blocks;
+ memcpy(hdmirx_dev->edid, edid->edid, edid->blocks * EDID_BLOCK_SIZE);
+}
+
+/*
+ * Before clearing interrupt, we need to read the interrupt status.
+ */
+static inline void hdmirx_clear_interrupt(struct snps_hdmirx_dev *hdmirx_dev,
+ u32 reg, u32 val)
+{
+ /* (interrupt status register) = (interrupt clear register) - 0x8 */
+ hdmirx_readl(hdmirx_dev, reg - 0x8);
+ hdmirx_writel(hdmirx_dev, reg, val);
+}
+
+static void hdmirx_interrupts_setup(struct snps_hdmirx_dev *hdmirx_dev, bool en)
+{
+ v4l2_dbg(1, debug, &hdmirx_dev->v4l2_dev, "%s: %sable\n",
+ __func__, en ? "en" : "dis");
+
+ disable_irq(hdmirx_dev->hdmi_irq);
+
+ /* Note: In DVI mode, it needs to be written twice to take effect. */
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_2_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_2_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, AVPUNIT_0_INT_CLEAR, 0xffffffff);
+
+ if (en) {
+ hdmirx_update_bits(hdmirx_dev, MAINUNIT_0_INT_MASK_N,
+ TMDSQPCLK_OFF_CHG | TMDSQPCLK_LOCKED_CHG,
+ TMDSQPCLK_OFF_CHG | TMDSQPCLK_LOCKED_CHG);
+ hdmirx_update_bits(hdmirx_dev, MAINUNIT_2_INT_MASK_N,
+ TMDSVALID_STABLE_CHG, TMDSVALID_STABLE_CHG);
+ hdmirx_update_bits(hdmirx_dev, AVPUNIT_0_INT_MASK_N,
+ CED_DYN_CNT_CH2_IRQ |
+ CED_DYN_CNT_CH1_IRQ |
+ CED_DYN_CNT_CH0_IRQ,
+ CED_DYN_CNT_CH2_IRQ |
+ CED_DYN_CNT_CH1_IRQ |
+ CED_DYN_CNT_CH0_IRQ);
+ } else {
+ hdmirx_writel(hdmirx_dev, MAINUNIT_0_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, MAINUNIT_2_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, AVPUNIT_0_INT_MASK_N, 0);
+ }
+
+ enable_irq(hdmirx_dev->hdmi_irq);
+}
+
+static void hdmirx_plugout(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ if (!hdmirx_dev->plugged)
+ return;
+
+ hdmirx_update_bits(hdmirx_dev, SCDC_CONFIG, POWERPROVIDED, 0);
+ hdmirx_interrupts_setup(hdmirx_dev, false);
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6, HDMIRX_DMA_EN, 0);
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG4,
+ LINE_FLAG_INT_EN |
+ HDMIRX_DMA_IDLE_INT |
+ HDMIRX_LOCK_DISABLE_INT |
+ LAST_FRAME_AXI_UNFINISH_INT_EN |
+ FIFO_OVERFLOW_INT_EN |
+ FIFO_UNDERFLOW_INT_EN |
+ HDMIRX_AXI_ERROR_INT_EN, 0);
+ hdmirx_reset_dma(hdmirx_dev);
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, HDMI_DISABLE | PHY_RESET |
+ PHY_PDDQ, HDMI_DISABLE);
+ hdmirx_writel(hdmirx_dev, PHYCREG_CONFIG0, 0x0);
+ cancel_delayed_work(&hdmirx_dev->delayed_work_res_change);
+
+ /* will be NULL on driver removal */
+ if (hdmirx_dev->rgb_range)
+ v4l2_ctrl_s_ctrl(hdmirx_dev->rgb_range, V4L2_DV_RGB_RANGE_AUTO);
+
+ if (hdmirx_dev->content_type)
+ v4l2_ctrl_s_ctrl(hdmirx_dev->content_type,
+ V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
+
+ hdmirx_dev->plugged = false;
+}
+
+static int hdmirx_set_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ u16 phys_addr;
+ int err;
+
+ if (edid->pad)
+ return -EINVAL;
+
+ if (edid->start_block)
+ return -EINVAL;
+
+ if (edid->blocks > EDID_NUM_BLOCKS_MAX) {
+ edid->blocks = EDID_NUM_BLOCKS_MAX;
+ return -E2BIG;
+ }
+
+ if (edid->blocks) {
+ phys_addr = cec_get_edid_phys_addr(edid->edid,
+ edid->blocks * EDID_BLOCK_SIZE,
+ NULL);
+
+ err = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Touching HW registers in parallel with plugin/out handlers
+ * will bring hardware into a bad state.
+ */
+ mutex_lock(&hdmirx_dev->work_lock);
+
+ hdmirx_hpd_ctrl(hdmirx_dev, false);
+
+ if (edid->blocks) {
+ hdmirx_write_edid(hdmirx_dev, edid);
+ hdmirx_hpd_ctrl(hdmirx_dev, true);
+ } else {
+ cec_phys_addr_invalidate(hdmirx_dev->cec->adap);
+ hdmirx_dev->edid_blocks_written = 0;
+ }
+
+ mutex_unlock(&hdmirx_dev->work_lock);
+
+ return 0;
+}
+
+static int hdmirx_get_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+
+ if (edid->pad)
+ return -EINVAL;
+
+ if (!edid->start_block && !edid->blocks) {
+ edid->blocks = hdmirx_dev->edid_blocks_written;
+ return 0;
+ }
+
+ if (!hdmirx_dev->edid_blocks_written)
+ return -ENODATA;
+
+ if (edid->start_block >= hdmirx_dev->edid_blocks_written || !edid->blocks)
+ return -EINVAL;
+
+ if (edid->start_block + edid->blocks > hdmirx_dev->edid_blocks_written)
+ edid->blocks = hdmirx_dev->edid_blocks_written - edid->start_block;
+
+ memcpy(edid->edid, hdmirx_dev->edid, edid->blocks * EDID_BLOCK_SIZE);
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: read EDID:\n", __func__);
+ if (debug > 0)
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
+ edid->edid, edid->blocks * EDID_BLOCK_SIZE, false);
+
+ return 0;
+}
+
+static int hdmirx_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ parm->parm.capture.timeperframe = v4l2_calc_timeperframe(&hdmirx_dev->timings);
+
+ return 0;
+}
+
+static int hdmirx_dv_timings_cap(struct file *file, void *fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ *cap = hdmirx_timings_cap;
+ return 0;
+}
+
+static int hdmirx_enum_dv_timings(struct file *file, void *_fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ return v4l2_enum_dv_timings_cap(timings, &hdmirx_timings_cap, NULL, NULL);
+}
+
+static void hdmirx_scdc_init(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ hdmirx_update_bits(hdmirx_dev, I2C_SLAVE_CONFIG1,
+ I2C_SDA_OUT_HOLD_VALUE_QST_MASK |
+ I2C_SDA_IN_HOLD_VALUE_QST_MASK,
+ I2C_SDA_OUT_HOLD_VALUE_QST(0x80) |
+ I2C_SDA_IN_HOLD_VALUE_QST(0x15));
+ hdmirx_update_bits(hdmirx_dev, SCDC_REGBANK_CONFIG0,
+ SCDC_SINKVERSION_QST_MASK,
+ SCDC_SINKVERSION_QST(1));
+}
+
+static int wait_reg_bit_status(struct snps_hdmirx_dev *hdmirx_dev, u32 reg,
+ u32 bit_mask, u32 expect_val, bool is_grf,
+ u32 ms)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 i, val;
+
+ for (i = 0; i < ms; i++) {
+ if (is_grf)
+ regmap_read(hdmirx_dev->grf, reg, &val);
+ else
+ val = hdmirx_readl(hdmirx_dev, reg);
+
+ if ((val & bit_mask) == expect_val) {
+ v4l2_dbg(2, debug, v4l2_dev,
+ "%s: i:%d, time: %dms\n", __func__, i, ms);
+ break;
+ }
+ usleep_range(1000, 1010);
+ }
+
+ if (i == ms)
+ return -1;
+
+ return 0;
+}
+
+static int hdmirx_phy_register_write(struct snps_hdmirx_dev *hdmirx_dev,
+ u32 phy_reg, u32 val)
+{
+ struct device *dev = hdmirx_dev->dev;
+
+ reinit_completion(&hdmirx_dev->cr_write_done);
+ /* clear irq status */
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_2_INT_CLEAR, 0xffffffff);
+ /* en irq */
+ hdmirx_update_bits(hdmirx_dev, MAINUNIT_2_INT_MASK_N,
+ PHYCREG_CR_WRITE_DONE, PHYCREG_CR_WRITE_DONE);
+ /* write phy reg addr */
+ hdmirx_writel(hdmirx_dev, PHYCREG_CONFIG1, phy_reg);
+ /* write phy reg val */
+ hdmirx_writel(hdmirx_dev, PHYCREG_CONFIG2, val);
+ /* config write enable */
+ hdmirx_writel(hdmirx_dev, PHYCREG_CONTROL, PHYCREG_CR_PARA_WRITE_P);
+
+ if (!wait_for_completion_timeout(&hdmirx_dev->cr_write_done,
+ msecs_to_jiffies(20))) {
+ dev_err(dev, "%s wait cr write done failed\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void hdmirx_tmds_clk_ratio_config(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 val;
+
+ val = hdmirx_readl(hdmirx_dev, SCDC_REGBANK_STATUS1);
+ v4l2_dbg(3, debug, v4l2_dev, "%s: scdc_regbank_st:%#x\n", __func__, val);
+ hdmirx_dev->tmds_clk_ratio = (val & SCDC_TMDSBITCLKRATIO) > 0;
+
+ if (hdmirx_dev->tmds_clk_ratio) {
+ v4l2_dbg(3, debug, v4l2_dev, "%s: HDMITX greater than 3.4Gbps\n", __func__);
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG,
+ TMDS_CLOCK_RATIO, TMDS_CLOCK_RATIO);
+ } else {
+ v4l2_dbg(3, debug, v4l2_dev, "%s: HDMITX less than 3.4Gbps\n", __func__);
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG,
+ TMDS_CLOCK_RATIO, 0);
+ }
+}
+
+static void hdmirx_phy_config(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct device *dev = hdmirx_dev->dev;
+
+ hdmirx_clear_interrupt(hdmirx_dev, SCDC_INT_CLEAR, 0xffffffff);
+ hdmirx_update_bits(hdmirx_dev, SCDC_INT_MASK_N, SCDCTMDSCCFG_CHG,
+ SCDCTMDSCCFG_CHG);
+ /* cr_para_clk 24M */
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, REFFREQ_SEL_MASK, REFFREQ_SEL(0));
+ /* rx data width 40bit valid */
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, RXDATA_WIDTH, RXDATA_WIDTH);
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, PHY_RESET, PHY_RESET);
+ usleep_range(100, 110);
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, PHY_RESET, 0);
+ usleep_range(100, 110);
+ /* select cr para interface */
+ hdmirx_writel(hdmirx_dev, PHYCREG_CONFIG0, 0x3);
+
+ if (wait_reg_bit_status(hdmirx_dev, SYS_GRF_SOC_STATUS1,
+ HDMIRXPHY_SRAM_INIT_DONE,
+ HDMIRXPHY_SRAM_INIT_DONE, true, 10))
+ dev_err(dev, "%s: phy SRAM init failed\n", __func__);
+
+ regmap_write(hdmirx_dev->grf, SYS_GRF_SOC_CON1,
+ (HDMIRXPHY_SRAM_EXT_LD_DONE << 16) |
+ HDMIRXPHY_SRAM_EXT_LD_DONE);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 2);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 3);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 2);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 2);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 3);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 2);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 0);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 1);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 0);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 0);
+
+ hdmirx_phy_register_write(hdmirx_dev,
+ HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_3_REG,
+ CDR_SETTING_BOUNDARY_3_DEFAULT);
+ hdmirx_phy_register_write(hdmirx_dev,
+ HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_4_REG,
+ CDR_SETTING_BOUNDARY_4_DEFAULT);
+ hdmirx_phy_register_write(hdmirx_dev,
+ HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_5_REG,
+ CDR_SETTING_BOUNDARY_5_DEFAULT);
+ hdmirx_phy_register_write(hdmirx_dev,
+ HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_6_REG,
+ CDR_SETTING_BOUNDARY_6_DEFAULT);
+ hdmirx_phy_register_write(hdmirx_dev,
+ HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_7_REG,
+ CDR_SETTING_BOUNDARY_7_DEFAULT);
+
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, PHY_PDDQ, 0);
+ if (wait_reg_bit_status(hdmirx_dev, PHY_STATUS, PDDQ_ACK, 0, false, 10))
+ dev_err(dev, "%s: wait pddq ack failed\n", __func__);
+
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, HDMI_DISABLE, 0);
+ if (wait_reg_bit_status(hdmirx_dev, PHY_STATUS, HDMI_DISABLE_ACK, 0,
+ false, 50))
+ dev_err(dev, "%s: wait hdmi disable ack failed\n", __func__);
+
+ hdmirx_tmds_clk_ratio_config(hdmirx_dev);
+}
+
+static void hdmirx_controller_init(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ const unsigned long iref_clk_freq_hz = 428571429;
+ struct device *dev = hdmirx_dev->dev;
+
+ reinit_completion(&hdmirx_dev->timer_base_lock);
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_0_INT_CLEAR, 0xffffffff);
+ /* en irq */
+ hdmirx_update_bits(hdmirx_dev, MAINUNIT_0_INT_MASK_N,
+ TIMER_BASE_LOCKED_IRQ, TIMER_BASE_LOCKED_IRQ);
+ /* write irefclk freq */
+ hdmirx_writel(hdmirx_dev, GLOBAL_TIMER_REF_BASE, iref_clk_freq_hz);
+
+ if (!wait_for_completion_timeout(&hdmirx_dev->timer_base_lock,
+ msecs_to_jiffies(20)))
+ dev_err(dev, "%s wait timer base lock failed\n", __func__);
+
+ hdmirx_update_bits(hdmirx_dev, CMU_CONFIG0,
+ TMDSQPCLK_STABLE_FREQ_MARGIN_MASK |
+ AUDCLK_STABLE_FREQ_MARGIN_MASK,
+ TMDSQPCLK_STABLE_FREQ_MARGIN(2) |
+ AUDCLK_STABLE_FREQ_MARGIN(1));
+ hdmirx_update_bits(hdmirx_dev, DESCRAND_EN_CONTROL,
+ SCRAMB_EN_SEL_QST_MASK, SCRAMB_EN_SEL_QST(1));
+ hdmirx_update_bits(hdmirx_dev, CED_CONFIG,
+ CED_VIDDATACHECKEN_QST |
+ CED_DATAISCHECKEN_QST |
+ CED_GBCHECKEN_QST |
+ CED_CTRLCHECKEN_QST |
+ CED_CHLOCKMAXER_QST_MASK,
+ CED_VIDDATACHECKEN_QST |
+ CED_GBCHECKEN_QST |
+ CED_CTRLCHECKEN_QST |
+ CED_CHLOCKMAXER_QST(0x10));
+ hdmirx_update_bits(hdmirx_dev, DEFRAMER_CONFIG0,
+ VS_REMAPFILTER_EN_QST | VS_FILTER_ORDER_QST_MASK,
+ VS_REMAPFILTER_EN_QST | VS_FILTER_ORDER_QST(0x3));
+}
+
+static void hdmirx_get_colordepth(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 val, color_depth_reg;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS11);
+ color_depth_reg = (val & HDMIRX_COLOR_DEPTH_MASK) >> 3;
+
+ switch (color_depth_reg) {
+ case 0x4:
+ hdmirx_dev->color_depth = 24;
+ break;
+ case 0x5:
+ hdmirx_dev->color_depth = 30;
+ break;
+ case 0x6:
+ hdmirx_dev->color_depth = 36;
+ break;
+ case 0x7:
+ hdmirx_dev->color_depth = 48;
+ break;
+ default:
+ hdmirx_dev->color_depth = 24;
+ break;
+ }
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: color_depth: %d, reg_val:%d\n",
+ __func__, hdmirx_dev->color_depth, color_depth_reg);
+}
+
+static void hdmirx_get_pix_fmt(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 val;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS11);
+ hdmirx_dev->pix_fmt = val & HDMIRX_FORMAT_MASK;
+
+ switch (hdmirx_dev->pix_fmt) {
+ case HDMIRX_RGB888:
+ hdmirx_dev->cur_fmt_fourcc = V4L2_PIX_FMT_BGR24;
+ break;
+ case HDMIRX_YUV422:
+ hdmirx_dev->cur_fmt_fourcc = V4L2_PIX_FMT_NV16;
+ break;
+ case HDMIRX_YUV444:
+ hdmirx_dev->cur_fmt_fourcc = V4L2_PIX_FMT_NV24;
+ break;
+ case HDMIRX_YUV420:
+ hdmirx_dev->cur_fmt_fourcc = V4L2_PIX_FMT_NV12;
+ break;
+ default:
+ v4l2_err(v4l2_dev,
+ "%s: err pix_fmt: %d, set RGB888 as default\n",
+ __func__, hdmirx_dev->pix_fmt);
+ hdmirx_dev->pix_fmt = HDMIRX_RGB888;
+ hdmirx_dev->cur_fmt_fourcc = V4L2_PIX_FMT_BGR24;
+ break;
+ }
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: pix_fmt: %s\n", __func__,
+ pix_fmt_str[hdmirx_dev->pix_fmt]);
+}
+
+static void hdmirx_read_avi_infoframe(struct snps_hdmirx_dev *hdmirx_dev,
+ u8 *aviif)
+{
+ unsigned int i, b, itr = 0;
+ u32 val;
+
+ aviif[itr++] = HDMI_INFOFRAME_TYPE_AVI;
+ val = hdmirx_readl(hdmirx_dev, PKTDEC_AVIIF_PH2_1);
+ aviif[itr++] = val & 0xff;
+ aviif[itr++] = (val >> 8) & 0xff;
+
+ for (i = 0; i < 7; i++) {
+ val = hdmirx_readl(hdmirx_dev, PKTDEC_AVIIF_PB3_0 + 4 * i);
+
+ for (b = 0; b < 4; b++)
+ aviif[itr++] = (val >> (8 * b)) & 0xff;
+ }
+}
+
+static void hdmirx_get_avi_infoframe(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ union hdmi_infoframe frame = {};
+ u8 aviif[3 + 7 * 4];
+ int err;
+
+ hdmirx_read_avi_infoframe(hdmirx_dev, aviif);
+
+ err = hdmi_infoframe_unpack(&frame, aviif, sizeof(aviif));
+ if (err) {
+ v4l2_err(v4l2_dev, "failed to unpack AVI infoframe\n");
+ return;
+ }
+
+ v4l2_ctrl_s_ctrl(hdmirx_dev->rgb_range, frame.avi.quantization_range);
+
+ if (frame.avi.itc)
+ v4l2_ctrl_s_ctrl(hdmirx_dev->content_type,
+ frame.avi.content_type);
+ else
+ v4l2_ctrl_s_ctrl(hdmirx_dev->content_type,
+ V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
+}
+
+static ssize_t
+hdmirx_debugfs_if_read(u32 type, void *priv, struct file *filp,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = priv;
+ u8 aviif[V4L2_DEBUGFS_IF_MAX_LEN] = {};
+ int len;
+
+ if (type != V4L2_DEBUGFS_IF_AVI)
+ return 0;
+
+ hdmirx_read_avi_infoframe(hdmirx_dev, aviif);
+
+ len = aviif[2] + 4;
+ if (len > V4L2_DEBUGFS_IF_MAX_LEN)
+ len = -ENOENT;
+ else
+ len = simple_read_from_buffer(ubuf, count, ppos, aviif, len);
+
+ return len < 0 ? 0 : len;
+}
+
+static void hdmirx_format_change(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct hdmirx_stream *stream = &hdmirx_dev->stream;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ static const struct v4l2_event ev_src_chg = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ hdmirx_get_pix_fmt(hdmirx_dev);
+ hdmirx_get_colordepth(hdmirx_dev);
+ hdmirx_get_avi_infoframe(hdmirx_dev);
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: queue res_chg_event\n", __func__);
+ v4l2_event_queue(&stream->vdev, &ev_src_chg);
+}
+
+static void hdmirx_set_ddr_store_fmt(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ enum ddr_store_fmt store_fmt;
+ u32 dma_cfg1;
+
+ switch (hdmirx_dev->pix_fmt) {
+ case HDMIRX_RGB888:
+ store_fmt = STORE_RGB888;
+ break;
+ case HDMIRX_YUV444:
+ store_fmt = STORE_YUV444_8BIT;
+ break;
+ case HDMIRX_YUV422:
+ store_fmt = STORE_YUV422_8BIT;
+ break;
+ case HDMIRX_YUV420:
+ store_fmt = STORE_YUV420_8BIT;
+ break;
+ default:
+ store_fmt = STORE_RGB888;
+ break;
+ }
+
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG1,
+ DDR_STORE_FORMAT_MASK, DDR_STORE_FORMAT(store_fmt));
+ dma_cfg1 = hdmirx_readl(hdmirx_dev, DMA_CONFIG1);
+ v4l2_dbg(1, debug, v4l2_dev, "%s: pix_fmt: %s, DMA_CONFIG1:%#x\n",
+ __func__, pix_fmt_str[hdmirx_dev->pix_fmt], dma_cfg1);
+}
+
+static void hdmirx_dma_config(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ hdmirx_set_ddr_store_fmt(hdmirx_dev);
+
+ /* Note: uv_swap, rb can not swap, doc err */
+ if (hdmirx_dev->cur_fmt_fourcc != V4L2_PIX_FMT_NV16)
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6, RB_SWAP_EN, RB_SWAP_EN);
+ else
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6, RB_SWAP_EN, 0);
+
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG7,
+ LOCK_FRAME_NUM_MASK,
+ LOCK_FRAME_NUM(2));
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG1,
+ UV_WID_MASK | Y_WID_MASK | ABANDON_EN,
+ UV_WID(1) | Y_WID(2) | ABANDON_EN);
+}
+
+static void hdmirx_submodule_init(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ /* Note: if not config HDCP2_CONFIG, there will be some errors; */
+ hdmirx_update_bits(hdmirx_dev, HDCP2_CONFIG,
+ HDCP2_SWITCH_OVR_VALUE |
+ HDCP2_SWITCH_OVR_EN,
+ HDCP2_SWITCH_OVR_EN);
+ hdmirx_scdc_init(hdmirx_dev);
+ hdmirx_controller_init(hdmirx_dev);
+}
+
+static int hdmirx_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+ if (input->index > 0)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ input->std = 0;
+ strscpy(input->name, "HDMI IN", sizeof(input->name));
+ input->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+
+ return 0;
+}
+
+static int hdmirx_get_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int hdmirx_set_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i)
+ return -EINVAL;
+ return 0;
+}
+
+static void hdmirx_set_fmt(struct hdmirx_stream *stream,
+ struct v4l2_pix_format_mplane *pixm, bool try)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ struct v4l2_bt_timings *bt = &hdmirx_dev->timings.bt;
+ const struct v4l2_format_info *finfo;
+ unsigned int imagesize = 0;
+ unsigned int i;
+
+ memset(&pixm->plane_fmt[0], 0, sizeof(struct v4l2_plane_pix_format));
+ finfo = v4l2_format_info(pixm->pixelformat);
+ if (!finfo) {
+ finfo = v4l2_format_info(V4L2_PIX_FMT_BGR24);
+ v4l2_dbg(1, debug, v4l2_dev,
+ "%s: set_fmt:%#x not supported, use def_fmt:%x\n",
+ __func__, pixm->pixelformat, finfo->format);
+ }
+
+ if (!bt->width || !bt->height)
+ v4l2_dbg(1, debug, v4l2_dev, "%s: invalid resolution:%#xx%#x\n",
+ __func__, bt->width, bt->height);
+
+ pixm->pixelformat = finfo->format;
+ pixm->width = bt->width;
+ pixm->height = bt->height;
+ pixm->num_planes = finfo->mem_planes;
+ pixm->quantization = V4L2_QUANTIZATION_DEFAULT;
+ pixm->colorspace = V4L2_COLORSPACE_SRGB;
+ pixm->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+
+ if (bt->interlaced == V4L2_DV_INTERLACED)
+ pixm->field = V4L2_FIELD_INTERLACED_TB;
+ else
+ pixm->field = V4L2_FIELD_NONE;
+
+ memset(pixm->reserved, 0, sizeof(pixm->reserved));
+
+ v4l2_fill_pixfmt_mp(pixm, finfo->format, pixm->width, pixm->height);
+
+ for (i = 0; i < finfo->comp_planes; i++) {
+ struct v4l2_plane_pix_format *plane_fmt;
+ int width, height, bpl, size, bpp = 0;
+ const unsigned int hw_align = 64;
+
+ if (!i) {
+ width = pixm->width;
+ height = pixm->height;
+ } else {
+ width = pixm->width / finfo->hdiv;
+ height = pixm->height / finfo->vdiv;
+ }
+
+ switch (finfo->format) {
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_BGR24:
+ bpp = finfo->bpp[i];
+ break;
+ default:
+ v4l2_dbg(1, debug, v4l2_dev,
+ "fourcc: %#x is not supported\n",
+ finfo->format);
+ break;
+ }
+
+ bpl = ALIGN(width * bpp, hw_align);
+ size = bpl * height;
+ imagesize += size;
+
+ if (finfo->mem_planes > i) {
+ /* Set bpl and size for each mplane */
+ plane_fmt = pixm->plane_fmt + i;
+ plane_fmt->bytesperline = bpl;
+ plane_fmt->sizeimage = size;
+ }
+
+ v4l2_dbg(1, debug, v4l2_dev,
+ "C-Plane %u size: %d, Total imagesize: %d\n",
+ i, size, imagesize);
+ }
+
+ /* Convert to non-MPLANE format as we want to unify non-MPLANE and MPLANE */
+ if (finfo->mem_planes == 1)
+ pixm->plane_fmt[0].sizeimage = imagesize;
+
+ if (!try) {
+ stream->out_finfo = finfo;
+ stream->pixm = *pixm;
+ v4l2_dbg(1, debug, v4l2_dev,
+ "%s: req(%d, %d), out(%d, %d), fmt:%#x\n", __func__,
+ pixm->width, pixm->height, stream->pixm.width,
+ stream->pixm.height, finfo->format);
+ }
+}
+
+static int hdmirx_enum_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+
+ if (f->index >= 1)
+ return -EINVAL;
+
+ f->pixelformat = hdmirx_dev->cur_fmt_fourcc;
+
+ return 0;
+}
+
+static int hdmirx_s_fmt_vid_cap_mplane(struct file *file,
+ void *priv, struct v4l2_format *f)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ if (vb2_is_busy(&stream->buf_queue)) {
+ v4l2_err(v4l2_dev, "%s: queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ hdmirx_set_fmt(stream, &f->fmt.pix_mp, false);
+
+ return 0;
+}
+
+static int hdmirx_g_fmt_vid_cap_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_pix_format_mplane pixm = {};
+
+ pixm.pixelformat = hdmirx_dev->cur_fmt_fourcc;
+ hdmirx_set_fmt(stream, &pixm, true);
+ f->fmt.pix_mp = pixm;
+
+ return 0;
+}
+
+static int hdmirx_g_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 dma_cfg1;
+
+ *timings = hdmirx_dev->timings;
+ dma_cfg1 = hdmirx_readl(hdmirx_dev, DMA_CONFIG1);
+ v4l2_dbg(1, debug, v4l2_dev, "%s: pix_fmt: %s, DMA_CONFIG1:%#x\n",
+ __func__, pix_fmt_str[hdmirx_dev->pix_fmt], dma_cfg1);
+
+ return 0;
+}
+
+static int hdmirx_s_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ if (!timings)
+ return -EINVAL;
+
+ if (debug)
+ v4l2_print_dv_timings(hdmirx_dev->v4l2_dev.name,
+ "s_dv_timings: ", timings, false);
+
+ if (!v4l2_valid_dv_timings(timings, &hdmirx_timings_cap, NULL, NULL)) {
+ v4l2_dbg(1, debug, v4l2_dev,
+ "%s: timings out of range\n", __func__);
+ return -ERANGE;
+ }
+
+ /* Check if the timings are part of the CEA-861 timings. */
+ v4l2_find_dv_timings_cap(timings, &hdmirx_timings_cap, 0, NULL, NULL);
+
+ if (v4l2_match_dv_timings(&hdmirx_dev->timings, timings, 0, false)) {
+ v4l2_dbg(1, debug, v4l2_dev, "%s: no change\n", __func__);
+ return 0;
+ }
+
+ /*
+ * Changing the timings implies a format change, which is not allowed
+ * while buffers for use with streaming have already been allocated.
+ */
+ if (vb2_is_busy(&stream->buf_queue))
+ return -EBUSY;
+
+ hdmirx_dev->timings = *timings;
+ /* Update the internal format */
+ hdmirx_set_fmt(stream, &stream->pixm, false);
+
+ return 0;
+}
+
+static int hdmirx_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct device *dev = stream->hdmirx_dev->dev;
+
+ strscpy(cap->driver, dev->driver->name, sizeof(cap->driver));
+ strscpy(cap->card, dev->driver->name, sizeof(cap->card));
+
+ return 0;
+}
+
+static int hdmirx_queue_setup(struct vb2_queue *queue,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_ctxs[])
+{
+ struct hdmirx_stream *stream = vb2_get_drv_priv(queue);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ const struct v4l2_pix_format_mplane *pixm = NULL;
+ const struct v4l2_format_info *out_finfo;
+ u32 i;
+
+ pixm = &stream->pixm;
+ out_finfo = stream->out_finfo;
+
+ if (!out_finfo) {
+ v4l2_err(v4l2_dev, "%s: out_fmt not set\n", __func__);
+ return -EINVAL;
+ }
+
+ if (*num_planes) {
+ if (*num_planes != pixm->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *num_planes; i++)
+ if (sizes[i] < pixm->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ return 0;
+ }
+
+ *num_planes = out_finfo->mem_planes;
+
+ for (i = 0; i < out_finfo->mem_planes; i++)
+ sizes[i] = pixm->plane_fmt[i].sizeimage;
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: count %d, size %d\n",
+ v4l2_type_names[queue->type], *num_buffers, sizes[0]);
+
+ return 0;
+}
+
+/*
+ * The vb2_buffer are stored in hdmirx_buffer, in order to unify
+ * mplane buffer and none-mplane buffer.
+ */
+static void hdmirx_buf_queue(struct vb2_buffer *vb)
+{
+ const struct v4l2_pix_format_mplane *pixm;
+ const struct v4l2_format_info *out_finfo;
+ struct hdmirx_buffer *hdmirx_buf;
+ struct vb2_v4l2_buffer *vbuf;
+ struct hdmirx_stream *stream;
+ struct vb2_queue *queue;
+ unsigned long flags;
+ unsigned int i;
+
+ vbuf = to_vb2_v4l2_buffer(vb);
+ hdmirx_buf = container_of(vbuf, struct hdmirx_buffer, vb);
+ queue = vb->vb2_queue;
+ stream = vb2_get_drv_priv(queue);
+ pixm = &stream->pixm;
+ out_finfo = stream->out_finfo;
+
+ memset(hdmirx_buf->buff_addr, 0, sizeof(hdmirx_buf->buff_addr));
+
+ /*
+ * If mplanes > 1, every c-plane has its own m-plane,
+ * otherwise, multiple c-planes are in the same m-plane
+ */
+ for (i = 0; i < out_finfo->mem_planes; i++)
+ hdmirx_buf->buff_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+
+ if (out_finfo->mem_planes == 1) {
+ if (out_finfo->comp_planes == 1) {
+ hdmirx_buf->buff_addr[HDMIRX_PLANE_CBCR] =
+ hdmirx_buf->buff_addr[HDMIRX_PLANE_Y];
+ } else {
+ for (i = 0; i < out_finfo->comp_planes - 1; i++)
+ hdmirx_buf->buff_addr[i + 1] =
+ hdmirx_buf->buff_addr[i] +
+ pixm->plane_fmt[i].bytesperline *
+ pixm->height;
+ }
+ }
+
+ spin_lock_irqsave(&stream->vbq_lock, flags);
+ list_add_tail(&hdmirx_buf->queue, &stream->buf_head);
+ spin_unlock_irqrestore(&stream->vbq_lock, flags);
+}
+
+static void return_all_buffers(struct hdmirx_stream *stream,
+ enum vb2_buffer_state state)
+{
+ struct hdmirx_buffer *buf, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&stream->vbq_lock, flags);
+ if (stream->curr_buf)
+ list_add_tail(&stream->curr_buf->queue, &stream->buf_head);
+ if (stream->next_buf && stream->next_buf != stream->curr_buf)
+ list_add_tail(&stream->next_buf->queue, &stream->buf_head);
+ stream->curr_buf = NULL;
+ stream->next_buf = NULL;
+
+ list_for_each_entry_safe(buf, tmp, &stream->buf_head, queue) {
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+ spin_unlock_irqrestore(&stream->vbq_lock, flags);
+}
+
+static void hdmirx_stop_streaming(struct vb2_queue *queue)
+{
+ struct hdmirx_stream *stream = vb2_get_drv_priv(queue);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, v4l2_dev, "stream start stopping\n");
+ mutex_lock(&hdmirx_dev->stream_lock);
+ WRITE_ONCE(stream->stopping, true);
+
+ /* wait last irq to return the buffer */
+ ret = wait_event_timeout(stream->wq_stopped, !stream->stopping,
+ msecs_to_jiffies(500));
+ if (!ret)
+ v4l2_dbg(1, debug, v4l2_dev, "%s: timeout waiting last irq\n",
+ __func__);
+
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6, HDMIRX_DMA_EN, 0);
+ return_all_buffers(stream, VB2_BUF_STATE_ERROR);
+ mutex_unlock(&hdmirx_dev->stream_lock);
+ v4l2_dbg(1, debug, v4l2_dev, "stream stopping finished\n");
+}
+
+static int hdmirx_start_streaming(struct vb2_queue *queue, unsigned int count)
+{
+ struct hdmirx_stream *stream = vb2_get_drv_priv(queue);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ struct v4l2_dv_timings timings = hdmirx_dev->timings;
+ struct v4l2_bt_timings *bt = &timings.bt;
+ unsigned long lock_flags = 0;
+ int line_flag;
+
+ mutex_lock(&hdmirx_dev->stream_lock);
+ stream->frame_idx = 0;
+ stream->line_flag_int_cnt = 0;
+ stream->curr_buf = NULL;
+ stream->next_buf = NULL;
+ stream->irq_stat = 0;
+
+ WRITE_ONCE(stream->stopping, false);
+
+ spin_lock_irqsave(&stream->vbq_lock, lock_flags);
+ if (!stream->curr_buf) {
+ if (!list_empty(&stream->buf_head)) {
+ stream->curr_buf = list_first_entry(&stream->buf_head,
+ struct hdmirx_buffer,
+ queue);
+ list_del(&stream->curr_buf->queue);
+ } else {
+ stream->curr_buf = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
+
+ if (!stream->curr_buf) {
+ mutex_unlock(&hdmirx_dev->stream_lock);
+ return -ENOMEM;
+ }
+
+ v4l2_dbg(2, debug, v4l2_dev,
+ "%s: start_stream cur_buf y_addr:%#x, uv_addr:%#x\n",
+ __func__, stream->curr_buf->buff_addr[HDMIRX_PLANE_Y],
+ stream->curr_buf->buff_addr[HDMIRX_PLANE_CBCR]);
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG2,
+ stream->curr_buf->buff_addr[HDMIRX_PLANE_Y]);
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG3,
+ stream->curr_buf->buff_addr[HDMIRX_PLANE_CBCR]);
+
+ if (bt->height) {
+ if (bt->interlaced == V4L2_DV_INTERLACED)
+ line_flag = bt->height / 4;
+ else
+ line_flag = bt->height / 2;
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG7,
+ LINE_FLAG_NUM_MASK,
+ LINE_FLAG_NUM(line_flag));
+ } else {
+ v4l2_err(v4l2_dev, "invalid BT timing height=%d\n", bt->height);
+ }
+
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG5, 0xffffffff);
+ hdmirx_writel(hdmirx_dev, CED_DYN_CONTROL, 0x1);
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG4,
+ LINE_FLAG_INT_EN |
+ HDMIRX_DMA_IDLE_INT |
+ HDMIRX_LOCK_DISABLE_INT |
+ LAST_FRAME_AXI_UNFINISH_INT_EN |
+ FIFO_OVERFLOW_INT_EN |
+ FIFO_UNDERFLOW_INT_EN |
+ HDMIRX_AXI_ERROR_INT_EN,
+ LINE_FLAG_INT_EN |
+ HDMIRX_DMA_IDLE_INT |
+ HDMIRX_LOCK_DISABLE_INT |
+ LAST_FRAME_AXI_UNFINISH_INT_EN |
+ FIFO_OVERFLOW_INT_EN |
+ FIFO_UNDERFLOW_INT_EN |
+ HDMIRX_AXI_ERROR_INT_EN);
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6, HDMIRX_DMA_EN, HDMIRX_DMA_EN);
+ v4l2_dbg(1, debug, v4l2_dev, "%s: enable dma", __func__);
+ mutex_unlock(&hdmirx_dev->stream_lock);
+
+ return 0;
+}
+
+/* vb2 queue */
+static const struct vb2_ops hdmirx_vb2_ops = {
+ .queue_setup = hdmirx_queue_setup,
+ .buf_queue = hdmirx_buf_queue,
+ .stop_streaming = hdmirx_stop_streaming,
+ .start_streaming = hdmirx_start_streaming,
+};
+
+static int hdmirx_init_vb2_queue(struct vb2_queue *q,
+ struct hdmirx_stream *stream,
+ enum v4l2_buf_type buf_type)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+
+ q->type = buf_type;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = stream;
+ q->ops = &hdmirx_vb2_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct hdmirx_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &stream->vlock;
+ q->dev = hdmirx_dev->dev;
+ q->min_queued_buffers = 1;
+
+ return vb2_queue_init(q);
+}
+
+/* video device */
+static const struct v4l2_ioctl_ops hdmirx_v4l2_ioctl_ops = {
+ .vidioc_querycap = hdmirx_querycap,
+ .vidioc_try_fmt_vid_cap_mplane = hdmirx_g_fmt_vid_cap_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = hdmirx_s_fmt_vid_cap_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = hdmirx_g_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_cap = hdmirx_enum_fmt_vid_cap_mplane,
+
+ .vidioc_s_dv_timings = hdmirx_s_dv_timings,
+ .vidioc_g_dv_timings = hdmirx_g_dv_timings,
+ .vidioc_enum_dv_timings = hdmirx_enum_dv_timings,
+ .vidioc_query_dv_timings = hdmirx_query_dv_timings,
+ .vidioc_dv_timings_cap = hdmirx_dv_timings_cap,
+ .vidioc_enum_input = hdmirx_enum_input,
+ .vidioc_g_input = hdmirx_get_input,
+ .vidioc_s_input = hdmirx_set_input,
+ .vidioc_g_edid = hdmirx_get_edid,
+ .vidioc_s_edid = hdmirx_set_edid,
+ .vidioc_g_parm = hdmirx_g_parm,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = hdmirx_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations hdmirx_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+static int hdmirx_register_stream_vdev(struct hdmirx_stream *stream)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ struct video_device *vdev = &stream->vdev;
+ int ret;
+
+ strscpy(vdev->name, "stream_hdmirx", sizeof(vdev->name));
+ INIT_LIST_HEAD(&stream->buf_head);
+ spin_lock_init(&stream->vbq_lock);
+ mutex_init(&stream->vlock);
+ init_waitqueue_head(&stream->wq_stopped);
+ stream->curr_buf = NULL;
+ stream->next_buf = NULL;
+
+ vdev->ioctl_ops = &hdmirx_v4l2_ioctl_ops;
+ vdev->release = video_device_release_empty;
+ vdev->fops = &hdmirx_fops;
+ vdev->minor = -1;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->lock = &stream->vlock;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_STREAMING;
+ vdev->vfl_dir = VFL_DIR_RX;
+
+ video_set_drvdata(vdev, stream);
+
+ hdmirx_init_vb2_queue(&stream->buf_queue, stream,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ vdev->queue = &stream->buf_queue;
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "video_register_device failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void process_signal_change(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6, HDMIRX_DMA_EN, 0);
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG4,
+ LINE_FLAG_INT_EN |
+ HDMIRX_DMA_IDLE_INT |
+ HDMIRX_LOCK_DISABLE_INT |
+ LAST_FRAME_AXI_UNFINISH_INT_EN |
+ FIFO_OVERFLOW_INT_EN |
+ FIFO_UNDERFLOW_INT_EN |
+ HDMIRX_AXI_ERROR_INT_EN, 0);
+ hdmirx_reset_dma(hdmirx_dev);
+ queue_delayed_work(system_unbound_wq,
+ &hdmirx_dev->delayed_work_res_change,
+ msecs_to_jiffies(50));
+}
+
+static void avpunit_0_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ int status, bool *handled)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ if (status & (CED_DYN_CNT_CH2_IRQ |
+ CED_DYN_CNT_CH1_IRQ |
+ CED_DYN_CNT_CH0_IRQ)) {
+ process_signal_change(hdmirx_dev);
+ v4l2_dbg(2, debug, v4l2_dev, "%s: avp0_st:%#x\n",
+ __func__, status);
+ *handled = true;
+ }
+
+ hdmirx_clear_interrupt(hdmirx_dev, AVPUNIT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_writel(hdmirx_dev, AVPUNIT_0_INT_FORCE, 0x0);
+}
+
+static void avpunit_1_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ int status, bool *handled)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ if (status & DEFRAMER_VSYNC_THR_REACHED_IRQ) {
+ v4l2_dbg(2, debug, v4l2_dev,
+ "Vertical Sync threshold reached interrupt %#x", status);
+ hdmirx_update_bits(hdmirx_dev, AVPUNIT_1_INT_MASK_N,
+ DEFRAMER_VSYNC_THR_REACHED_MASK_N, 0);
+ *handled = true;
+ }
+}
+
+static void mainunit_0_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ int status, bool *handled)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ v4l2_dbg(2, debug, v4l2_dev, "mu0_st:%#x\n", status);
+ if (status & TIMER_BASE_LOCKED_IRQ) {
+ hdmirx_update_bits(hdmirx_dev, MAINUNIT_0_INT_MASK_N,
+ TIMER_BASE_LOCKED_IRQ, 0);
+ complete(&hdmirx_dev->timer_base_lock);
+ *handled = true;
+ }
+
+ if (status & TMDSQPCLK_OFF_CHG) {
+ process_signal_change(hdmirx_dev);
+ v4l2_dbg(2, debug, v4l2_dev, "%s: TMDSQPCLK_OFF_CHG\n", __func__);
+ *handled = true;
+ }
+
+ if (status & TMDSQPCLK_LOCKED_CHG) {
+ process_signal_change(hdmirx_dev);
+ v4l2_dbg(2, debug, v4l2_dev, "%s: TMDSQPCLK_LOCKED_CHG\n", __func__);
+ *handled = true;
+ }
+
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_writel(hdmirx_dev, MAINUNIT_0_INT_FORCE, 0x0);
+}
+
+static void mainunit_2_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ int status, bool *handled)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ v4l2_dbg(2, debug, v4l2_dev, "mu2_st:%#x\n", status);
+ if (status & PHYCREG_CR_WRITE_DONE) {
+ hdmirx_update_bits(hdmirx_dev, MAINUNIT_2_INT_MASK_N,
+ PHYCREG_CR_WRITE_DONE, 0);
+ complete(&hdmirx_dev->cr_write_done);
+ *handled = true;
+ }
+
+ if (status & TMDSVALID_STABLE_CHG) {
+ process_signal_change(hdmirx_dev);
+ v4l2_dbg(2, debug, v4l2_dev, "%s: TMDSVALID_STABLE_CHG\n", __func__);
+ *handled = true;
+ }
+
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_2_INT_CLEAR, 0xffffffff);
+ hdmirx_writel(hdmirx_dev, MAINUNIT_2_INT_FORCE, 0x0);
+}
+
+static void pkt_2_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ int status, bool *handled)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ v4l2_dbg(2, debug, v4l2_dev, "%s: pk2_st:%#x\n", __func__, status);
+ if (status & PKTDEC_AVIIF_RCV_IRQ) {
+ hdmirx_update_bits(hdmirx_dev, PKT_2_INT_MASK_N,
+ PKTDEC_AVIIF_RCV_IRQ, 0);
+ complete(&hdmirx_dev->avi_pkt_rcv);
+ v4l2_dbg(2, debug, v4l2_dev, "%s: AVIIF_RCV_IRQ\n", __func__);
+ *handled = true;
+ }
+
+ hdmirx_clear_interrupt(hdmirx_dev, PKT_2_INT_CLEAR, 0xffffffff);
+}
+
+static void scdc_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ int status, bool *handled)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ v4l2_dbg(2, debug, v4l2_dev, "%s: scdc_st:%#x\n", __func__, status);
+ if (status & SCDCTMDSCCFG_CHG) {
+ hdmirx_tmds_clk_ratio_config(hdmirx_dev);
+ *handled = true;
+ }
+
+ hdmirx_clear_interrupt(hdmirx_dev, SCDC_INT_CLEAR, 0xffffffff);
+}
+
+static irqreturn_t hdmirx_hdmi_irq_handler(int irq, void *dev_id)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_id;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 mu0_st, mu2_st, pk2_st, scdc_st, avp1_st, avp0_st;
+ u32 mu0_mask, mu2_mask, pk2_mask, scdc_mask, avp1_msk, avp0_msk;
+ bool handled = false;
+
+ mu0_mask = hdmirx_readl(hdmirx_dev, MAINUNIT_0_INT_MASK_N);
+ mu2_mask = hdmirx_readl(hdmirx_dev, MAINUNIT_2_INT_MASK_N);
+ pk2_mask = hdmirx_readl(hdmirx_dev, PKT_2_INT_MASK_N);
+ scdc_mask = hdmirx_readl(hdmirx_dev, SCDC_INT_MASK_N);
+ mu0_st = hdmirx_readl(hdmirx_dev, MAINUNIT_0_INT_STATUS);
+ mu2_st = hdmirx_readl(hdmirx_dev, MAINUNIT_2_INT_STATUS);
+ pk2_st = hdmirx_readl(hdmirx_dev, PKT_2_INT_STATUS);
+ scdc_st = hdmirx_readl(hdmirx_dev, SCDC_INT_STATUS);
+ avp0_st = hdmirx_readl(hdmirx_dev, AVPUNIT_0_INT_STATUS);
+ avp1_st = hdmirx_readl(hdmirx_dev, AVPUNIT_1_INT_STATUS);
+ avp0_msk = hdmirx_readl(hdmirx_dev, AVPUNIT_0_INT_MASK_N);
+ avp1_msk = hdmirx_readl(hdmirx_dev, AVPUNIT_1_INT_MASK_N);
+ mu0_st &= mu0_mask;
+ mu2_st &= mu2_mask;
+ pk2_st &= pk2_mask;
+ avp1_st &= avp1_msk;
+ avp0_st &= avp0_msk;
+ scdc_st &= scdc_mask;
+
+ if (avp0_st)
+ avpunit_0_int_handler(hdmirx_dev, avp0_st, &handled);
+ if (avp1_st)
+ avpunit_1_int_handler(hdmirx_dev, avp1_st, &handled);
+ if (mu0_st)
+ mainunit_0_int_handler(hdmirx_dev, mu0_st, &handled);
+ if (mu2_st)
+ mainunit_2_int_handler(hdmirx_dev, mu2_st, &handled);
+ if (pk2_st)
+ pkt_2_int_handler(hdmirx_dev, pk2_st, &handled);
+ if (scdc_st)
+ scdc_int_handler(hdmirx_dev, scdc_st, &handled);
+
+ if (!handled) {
+ v4l2_dbg(2, debug, v4l2_dev, "%s: hdmi irq not handled", __func__);
+ v4l2_dbg(2, debug, v4l2_dev,
+ "avp0:%#x, avp1:%#x, mu0:%#x, mu2:%#x, pk2:%#x, scdc:%#x\n",
+ avp0_st, avp1_st, mu0_st, mu2_st, pk2_st, scdc_st);
+ }
+
+ v4l2_dbg(2, debug, v4l2_dev, "%s: en_fiq", __func__);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void hdmirx_vb_done(struct hdmirx_stream *stream,
+ struct vb2_v4l2_buffer *vb_done)
+{
+ const struct v4l2_format_info *finfo = stream->out_finfo;
+ u32 i;
+
+ /* Dequeue a filled buffer */
+ for (i = 0; i < finfo->mem_planes; i++) {
+ vb2_set_plane_payload(&vb_done->vb2_buf, i,
+ stream->pixm.plane_fmt[i].sizeimage);
+ }
+
+ vb_done->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&vb_done->vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+static void dma_idle_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ bool *handled)
+{
+ struct hdmirx_stream *stream = &hdmirx_dev->stream;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ struct v4l2_dv_timings timings = hdmirx_dev->timings;
+ struct v4l2_bt_timings *bt = &timings.bt;
+ struct vb2_v4l2_buffer *vb_done = NULL;
+
+ if (!(stream->irq_stat) && !(stream->irq_stat & LINE_FLAG_INT_EN))
+ v4l2_dbg(1, debug, v4l2_dev,
+ "%s: last time have no line_flag_irq\n", __func__);
+
+ /* skip first frames that are expected to come out zeroed from DMA */
+ if (stream->line_flag_int_cnt <= FILTER_FRAME_CNT)
+ goto DMA_IDLE_OUT;
+
+ if (bt->interlaced != V4L2_DV_INTERLACED ||
+ !(stream->line_flag_int_cnt % 2)) {
+ if (stream->next_buf) {
+ if (stream->curr_buf)
+ vb_done = &stream->curr_buf->vb;
+
+ if (vb_done) {
+ vb_done->vb2_buf.timestamp = ktime_get_ns();
+ vb_done->sequence = stream->frame_idx;
+
+ if (bt->interlaced)
+ vb_done->field = V4L2_FIELD_INTERLACED_TB;
+ else
+ vb_done->field = V4L2_FIELD_NONE;
+
+ hdmirx_vb_done(stream, vb_done);
+ stream->frame_idx++;
+ if (stream->frame_idx == 30)
+ v4l2_dbg(1, debug, v4l2_dev,
+ "rcv frames\n");
+ }
+
+ stream->curr_buf = NULL;
+ if (stream->next_buf) {
+ stream->curr_buf = stream->next_buf;
+ stream->next_buf = NULL;
+ }
+ } else {
+ v4l2_dbg(3, debug, v4l2_dev,
+ "%s: next_buf NULL, skip vb_done\n", __func__);
+ }
+ }
+
+DMA_IDLE_OUT:
+ *handled = true;
+}
+
+static void line_flag_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ bool *handled)
+{
+ struct hdmirx_stream *stream = &hdmirx_dev->stream;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ struct v4l2_dv_timings timings = hdmirx_dev->timings;
+ struct v4l2_bt_timings *bt = &timings.bt;
+ u32 dma_cfg6;
+
+ stream->line_flag_int_cnt++;
+ if (!(stream->irq_stat) && !(stream->irq_stat & HDMIRX_DMA_IDLE_INT))
+ v4l2_dbg(1, debug, v4l2_dev,
+ "%s: last have no dma_idle_irq\n", __func__);
+ dma_cfg6 = hdmirx_readl(hdmirx_dev, DMA_CONFIG6);
+ if (!(dma_cfg6 & HDMIRX_DMA_EN)) {
+ v4l2_dbg(2, debug, v4l2_dev, "%s: dma not on\n", __func__);
+ goto LINE_FLAG_OUT;
+ }
+
+ if (stream->line_flag_int_cnt <= FILTER_FRAME_CNT)
+ goto LINE_FLAG_OUT;
+
+ if (bt->interlaced != V4L2_DV_INTERLACED ||
+ !(stream->line_flag_int_cnt % 2)) {
+ if (!stream->next_buf) {
+ spin_lock(&stream->vbq_lock);
+ if (!list_empty(&stream->buf_head)) {
+ stream->next_buf = list_first_entry(&stream->buf_head,
+ struct hdmirx_buffer,
+ queue);
+ list_del(&stream->next_buf->queue);
+ } else {
+ stream->next_buf = NULL;
+ }
+ spin_unlock(&stream->vbq_lock);
+
+ if (stream->next_buf) {
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG2,
+ stream->next_buf->buff_addr[HDMIRX_PLANE_Y]);
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG3,
+ stream->next_buf->buff_addr[HDMIRX_PLANE_CBCR]);
+ } else {
+ v4l2_dbg(3, debug, v4l2_dev,
+ "%s: no buffer is available\n", __func__);
+ }
+ }
+ } else {
+ v4l2_dbg(3, debug, v4l2_dev, "%s: interlace:%d, line_flag_int_cnt:%d\n",
+ __func__, bt->interlaced, stream->line_flag_int_cnt);
+ }
+
+LINE_FLAG_OUT:
+ *handled = true;
+}
+
+static irqreturn_t hdmirx_dma_irq_handler(int irq, void *dev_id)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_id;
+ struct hdmirx_stream *stream = &hdmirx_dev->stream;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 dma_stat1, dma_stat13;
+ bool handled = false;
+
+ dma_stat1 = hdmirx_readl(hdmirx_dev, DMA_STATUS1);
+ dma_stat13 = hdmirx_readl(hdmirx_dev, DMA_STATUS13);
+ v4l2_dbg(3, debug, v4l2_dev, "dma_irq st1:%#x, st13:%d\n",
+ dma_stat1, dma_stat13);
+
+ if (READ_ONCE(stream->stopping)) {
+ v4l2_dbg(1, debug, v4l2_dev, "%s: stop stream\n", __func__);
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG5, 0xffffffff);
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG4,
+ LINE_FLAG_INT_EN |
+ HDMIRX_DMA_IDLE_INT |
+ HDMIRX_LOCK_DISABLE_INT |
+ LAST_FRAME_AXI_UNFINISH_INT_EN |
+ FIFO_OVERFLOW_INT_EN |
+ FIFO_UNDERFLOW_INT_EN |
+ HDMIRX_AXI_ERROR_INT_EN, 0);
+ WRITE_ONCE(stream->stopping, false);
+ wake_up(&stream->wq_stopped);
+ return IRQ_HANDLED;
+ }
+
+ if (dma_stat1 & HDMIRX_DMA_IDLE_INT)
+ dma_idle_int_handler(hdmirx_dev, &handled);
+
+ if (dma_stat1 & LINE_FLAG_INT_EN)
+ line_flag_int_handler(hdmirx_dev, &handled);
+
+ if (!handled)
+ v4l2_dbg(3, debug, v4l2_dev,
+ "%s: dma irq not handled, dma_stat1:%#x\n",
+ __func__, dma_stat1);
+
+ stream->irq_stat = dma_stat1;
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG5, 0xffffffff);
+
+ return IRQ_HANDLED;
+}
+
+static int hdmirx_wait_signal_lock(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 mu_status, scdc_status, dma_st10, cmu_st;
+ u32 i;
+
+ for (i = 0; i < 300; i++) {
+ mu_status = hdmirx_readl(hdmirx_dev, MAINUNIT_STATUS);
+ scdc_status = hdmirx_readl(hdmirx_dev, SCDC_REGBANK_STATUS3);
+ dma_st10 = hdmirx_readl(hdmirx_dev, DMA_STATUS10);
+ cmu_st = hdmirx_readl(hdmirx_dev, CMU_STATUS);
+
+ if ((mu_status & TMDSVALID_STABLE_ST) &&
+ (dma_st10 & HDMIRX_LOCK) &&
+ (cmu_st & TMDSQPCLK_LOCKED_ST))
+ break;
+
+ if (!tx_5v_power_present(hdmirx_dev)) {
+ v4l2_dbg(1, debug, v4l2_dev,
+ "%s: HDMI pull out, return\n", __func__);
+ return -1;
+ }
+
+ hdmirx_tmds_clk_ratio_config(hdmirx_dev);
+ }
+
+ if (i == 300) {
+ v4l2_err(v4l2_dev, "%s: signal not lock, tmds_clk_ratio:%d\n",
+ __func__, hdmirx_dev->tmds_clk_ratio);
+ v4l2_err(v4l2_dev, "%s: mu_st:%#x, scdc_st:%#x, dma_st10:%#x\n",
+ __func__, mu_status, scdc_status, dma_st10);
+ return -1;
+ }
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: signal lock ok, i:%d\n", __func__, i);
+ hdmirx_writel(hdmirx_dev, GLOBAL_SWRESET_REQUEST, DATAPATH_SWRESETREQ);
+
+ reinit_completion(&hdmirx_dev->avi_pkt_rcv);
+ hdmirx_clear_interrupt(hdmirx_dev, PKT_2_INT_CLEAR, 0xffffffff);
+ hdmirx_update_bits(hdmirx_dev, PKT_2_INT_MASK_N,
+ PKTDEC_AVIIF_RCV_IRQ, PKTDEC_AVIIF_RCV_IRQ);
+
+ if (!wait_for_completion_timeout(&hdmirx_dev->avi_pkt_rcv,
+ msecs_to_jiffies(300))) {
+ v4l2_err(v4l2_dev, "%s wait avi_pkt_rcv failed\n", __func__);
+ hdmirx_update_bits(hdmirx_dev, PKT_2_INT_MASK_N,
+ PKTDEC_AVIIF_RCV_IRQ, 0);
+ }
+
+ msleep(50);
+ hdmirx_format_change(hdmirx_dev);
+
+ return 0;
+}
+
+static void hdmirx_plugin(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ if (hdmirx_dev->plugged)
+ return;
+
+ hdmirx_submodule_init(hdmirx_dev);
+ hdmirx_update_bits(hdmirx_dev, SCDC_CONFIG, POWERPROVIDED,
+ POWERPROVIDED);
+ hdmirx_phy_config(hdmirx_dev);
+ hdmirx_interrupts_setup(hdmirx_dev, true);
+
+ hdmirx_dev->plugged = true;
+}
+
+static void hdmirx_delayed_work_hotplug(struct work_struct *work)
+{
+ struct snps_hdmirx_dev *hdmirx_dev;
+ bool plugin;
+
+ hdmirx_dev = container_of(work, struct snps_hdmirx_dev,
+ delayed_work_hotplug.work);
+
+ mutex_lock(&hdmirx_dev->work_lock);
+ plugin = tx_5v_power_present(hdmirx_dev);
+ v4l2_ctrl_s_ctrl(hdmirx_dev->detect_tx_5v_ctrl, plugin);
+ v4l2_dbg(1, debug, &hdmirx_dev->v4l2_dev, "%s: plugin:%d\n",
+ __func__, plugin);
+
+ hdmirx_plugout(hdmirx_dev);
+
+ if (plugin)
+ hdmirx_plugin(hdmirx_dev);
+
+ mutex_unlock(&hdmirx_dev->work_lock);
+}
+
+static void hdmirx_delayed_work_res_change(struct work_struct *work)
+{
+ struct snps_hdmirx_dev *hdmirx_dev;
+ bool plugin;
+
+ hdmirx_dev = container_of(work, struct snps_hdmirx_dev,
+ delayed_work_res_change.work);
+
+ mutex_lock(&hdmirx_dev->work_lock);
+ plugin = tx_5v_power_present(hdmirx_dev);
+ v4l2_dbg(1, debug, &hdmirx_dev->v4l2_dev, "%s: plugin:%d\n",
+ __func__, plugin);
+ if (plugin) {
+ hdmirx_interrupts_setup(hdmirx_dev, false);
+ hdmirx_submodule_init(hdmirx_dev);
+ hdmirx_update_bits(hdmirx_dev, SCDC_CONFIG, POWERPROVIDED,
+ POWERPROVIDED);
+ hdmirx_phy_config(hdmirx_dev);
+
+ if (hdmirx_wait_signal_lock(hdmirx_dev)) {
+ hdmirx_plugout(hdmirx_dev);
+ queue_delayed_work(system_unbound_wq,
+ &hdmirx_dev->delayed_work_hotplug,
+ msecs_to_jiffies(200));
+ } else {
+ hdmirx_dma_config(hdmirx_dev);
+ hdmirx_interrupts_setup(hdmirx_dev, true);
+ }
+ }
+ mutex_unlock(&hdmirx_dev->work_lock);
+}
+
+static irqreturn_t hdmirx_5v_det_irq_handler(int irq, void *dev_id)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_id;
+ u32 val;
+
+ val = gpiod_get_value(hdmirx_dev->detect_5v_gpio);
+ v4l2_dbg(3, debug, &hdmirx_dev->v4l2_dev, "%s: 5v:%d\n", __func__, val);
+
+ queue_delayed_work(system_unbound_wq,
+ &hdmirx_dev->delayed_work_hotplug,
+ msecs_to_jiffies(10));
+
+ return IRQ_HANDLED;
+}
+
+static const struct hdmirx_cec_ops hdmirx_cec_ops = {
+ .write = hdmirx_writel,
+ .read = hdmirx_readl,
+};
+
+static void devm_hdmirx_of_reserved_mem_device_release(void *dev)
+{
+ of_reserved_mem_device_release(dev);
+}
+
+static int hdmirx_parse_dt(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct device *dev = hdmirx_dev->dev;
+ int ret;
+
+ hdmirx_dev->num_clks = devm_clk_bulk_get_all(dev, &hdmirx_dev->clks);
+ if (hdmirx_dev->num_clks < 1)
+ return -ENODEV;
+
+ hdmirx_dev->resets[HDMIRX_RST_A].id = "axi";
+ hdmirx_dev->resets[HDMIRX_RST_P].id = "apb";
+ hdmirx_dev->resets[HDMIRX_RST_REF].id = "ref";
+ hdmirx_dev->resets[HDMIRX_RST_BIU].id = "biu";
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, HDMIRX_NUM_RST,
+ hdmirx_dev->resets);
+ if (ret < 0) {
+ dev_err(dev, "failed to get reset controls\n");
+ return ret;
+ }
+
+ hdmirx_dev->detect_5v_gpio =
+ devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+
+ if (IS_ERR(hdmirx_dev->detect_5v_gpio)) {
+ dev_err(dev, "failed to get hdmirx hot plug detection gpio\n");
+ return PTR_ERR(hdmirx_dev->detect_5v_gpio);
+ }
+
+ hdmirx_dev->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ if (IS_ERR(hdmirx_dev->grf)) {
+ dev_err(dev, "failed to get rockchip,grf\n");
+ return PTR_ERR(hdmirx_dev->grf);
+ }
+
+ hdmirx_dev->vo1_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,vo1-grf");
+ if (IS_ERR(hdmirx_dev->vo1_grf)) {
+ dev_err(dev, "failed to get rockchip,vo1-grf\n");
+ return PTR_ERR(hdmirx_dev->vo1_grf);
+ }
+
+ if (!device_property_read_bool(dev, "hpd-is-active-low"))
+ hdmirx_dev->hpd_trigger_level_high = true;
+
+ ret = of_reserved_mem_device_init(dev);
+ if (ret) {
+ dev_warn(dev, "no reserved memory for HDMIRX, use default CMA\n");
+ } else {
+ ret = devm_add_action_or_reset(dev,
+ devm_hdmirx_of_reserved_mem_device_release,
+ dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hdmirx_disable_all_interrupts(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ hdmirx_writel(hdmirx_dev, MAINUNIT_0_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, MAINUNIT_1_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, MAINUNIT_2_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, AVPUNIT_0_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, AVPUNIT_1_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, PKT_0_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, PKT_1_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, PKT_2_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, SCDC_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, CEC_INT_MASK_N, 0);
+
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_1_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_2_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, AVPUNIT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, AVPUNIT_1_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, PKT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, PKT_1_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, PKT_2_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, SCDC_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, HDCP_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, HDCP_1_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, CEC_INT_CLEAR, 0xffffffff);
+}
+
+static void hdmirx_init(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, PHY_RESET | PHY_PDDQ, 0);
+
+ regmap_write(hdmirx_dev->vo1_grf, VO1_GRF_VO1_CON2,
+ (HDMIRX_SDAIN_MSK | HDMIRX_SCLIN_MSK) |
+ ((HDMIRX_SDAIN_MSK | HDMIRX_SCLIN_MSK) << 16));
+ /*
+ * Some interrupts are enabled by default, so we disable
+ * all interrupts and clear interrupts status first.
+ */
+ hdmirx_disable_all_interrupts(hdmirx_dev);
+}
+
+/* hdmi-4k-300mhz EDID produced by v4l2-ctl tool */
+static u8 __maybe_unused edid_default[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x34, 0x12, 0x00, 0x00, 0x00, 0x00,
+ 0x22, 0x1a, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+ 0x0f, 0x50, 0x54, 0x2f, 0xcf, 0x00, 0x31, 0x59,
+ 0x45, 0x59, 0x81, 0x80, 0x81, 0x40, 0x90, 0x40,
+ 0x95, 0x00, 0xa9, 0x40, 0xb3, 0x00, 0x04, 0x74,
+ 0x00, 0x30, 0xf2, 0x70, 0x5a, 0x80, 0xb0, 0x58,
+ 0x8a, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
+ 0x87, 0x1e, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x68,
+ 0x64, 0x6d, 0x69, 0x2d, 0x34, 0x6b, 0x2d, 0x33,
+ 0x30, 0x30, 0x0a, 0x20, 0x00, 0x00, 0x00, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc5,
+
+ 0x02, 0x03, 0x40, 0xf1, 0x4f, 0x5f, 0x5e, 0x5d,
+ 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21, 0x20, 0x05,
+ 0x14, 0x02, 0x11, 0x01, 0x23, 0x09, 0x07, 0x07,
+ 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03, 0x0c, 0x00,
+ 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00, 0x60, 0x01,
+ 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4, 0x01, 0x00,
+ 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3, 0x05, 0x00,
+ 0x00, 0xe3, 0x06, 0x01, 0x00, 0xe2, 0x0d, 0x5f,
+ 0xa3, 0x66, 0x00, 0xa0, 0xf0, 0x70, 0x1f, 0x80,
+ 0x30, 0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00,
+ 0x00, 0x1e, 0x1a, 0x36, 0x80, 0xa0, 0x70, 0x38,
+ 0x1f, 0x40, 0x30, 0x20, 0x35, 0x00, 0xc0, 0x1c,
+ 0x32, 0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80,
+ 0x51, 0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00,
+ 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1,
+};
+
+static void hdmirx_load_default_edid(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_edid def_edid = {};
+
+ hdmirx_hpd_ctrl(hdmirx_dev, false);
+
+ if (!IS_ENABLED(CONFIG_VIDEO_SYNOPSYS_HDMIRX_LOAD_DEFAULT_EDID))
+ return;
+
+ /* disable hpd and write edid */
+ def_edid.blocks = sizeof(edid_default) / EDID_BLOCK_SIZE;
+ def_edid.edid = edid_default;
+
+ hdmirx_write_edid(hdmirx_dev, &def_edid);
+ hdmirx_hpd_ctrl(hdmirx_dev, true);
+}
+
+static int hdmirx_disable(struct device *dev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ hdmirx_plugout(hdmirx_dev);
+ hdmirx_hpd_ctrl(hdmirx_dev, false);
+
+ clk_bulk_disable_unprepare(hdmirx_dev->num_clks, hdmirx_dev->clks);
+
+ v4l2_dbg(2, debug, v4l2_dev, "%s: suspend\n", __func__);
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int hdmirx_enable(struct device *dev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ int ret;
+
+ v4l2_dbg(2, debug, v4l2_dev, "%s: resume\n", __func__);
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(hdmirx_dev->num_clks, hdmirx_dev->clks);
+ if (ret) {
+ dev_err(dev, "failed to enable hdmirx bulk clks: %d\n", ret);
+ return ret;
+ }
+
+ reset_control_bulk_assert(HDMIRX_NUM_RST, hdmirx_dev->resets);
+ usleep_range(150, 160);
+ reset_control_bulk_deassert(HDMIRX_NUM_RST, hdmirx_dev->resets);
+ usleep_range(150, 160);
+
+ return 0;
+}
+
+static void hdmirx_disable_irq(struct device *dev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+
+ disable_irq(hdmirx_dev->det_irq);
+ disable_irq(hdmirx_dev->dma_irq);
+ disable_irq(hdmirx_dev->hdmi_irq);
+
+ cancel_delayed_work_sync(&hdmirx_dev->delayed_work_hotplug);
+ cancel_delayed_work_sync(&hdmirx_dev->delayed_work_res_change);
+}
+
+static void hdmirx_enable_irq(struct device *dev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+
+ enable_irq(hdmirx_dev->hdmi_irq);
+ enable_irq(hdmirx_dev->dma_irq);
+ enable_irq(hdmirx_dev->det_irq);
+
+ queue_delayed_work(system_unbound_wq,
+ &hdmirx_dev->delayed_work_hotplug,
+ msecs_to_jiffies(110));
+}
+
+static __maybe_unused int hdmirx_suspend(struct device *dev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+
+ hdmirx_disable_irq(dev);
+
+ /* TODO store CEC HW state */
+ disable_irq(hdmirx_dev->cec->irq);
+
+ return hdmirx_disable(dev);
+}
+
+static __maybe_unused int hdmirx_resume(struct device *dev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+ int ret = hdmirx_enable(dev);
+
+ if (ret)
+ return ret;
+
+ if (hdmirx_dev->edid_blocks_written) {
+ hdmirx_write_edid_data(hdmirx_dev, hdmirx_dev->edid,
+ hdmirx_dev->edid_blocks_written);
+ hdmirx_hpd_ctrl(hdmirx_dev, true);
+ }
+
+ /* TODO restore CEC HW state */
+ enable_irq(hdmirx_dev->cec->irq);
+
+ hdmirx_enable_irq(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops snps_hdmirx_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hdmirx_suspend, hdmirx_resume)
+};
+
+static int hdmirx_setup_irq(struct snps_hdmirx_dev *hdmirx_dev,
+ struct platform_device *pdev)
+{
+ struct device *dev = hdmirx_dev->dev;
+ int ret, irq;
+
+ irq = platform_get_irq_byname(pdev, "hdmi");
+ if (irq < 0) {
+ dev_err_probe(dev, irq, "failed to get hdmi irq\n");
+ return irq;
+ }
+
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ hdmirx_dev->hdmi_irq = irq;
+ ret = devm_request_irq(dev, irq, hdmirx_hdmi_irq_handler, 0,
+ "rk_hdmirx-hdmi", hdmirx_dev);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to request hdmi irq\n");
+ return ret;
+ }
+
+ irq = platform_get_irq_byname(pdev, "dma");
+ if (irq < 0) {
+ dev_err_probe(dev, irq, "failed to get dma irq\n");
+ return irq;
+ }
+
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ hdmirx_dev->dma_irq = irq;
+ ret = devm_request_threaded_irq(dev, irq, NULL, hdmirx_dma_irq_handler,
+ IRQF_ONESHOT, "rk_hdmirx-dma",
+ hdmirx_dev);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to request dma irq\n");
+ return ret;
+ }
+
+ irq = gpiod_to_irq(hdmirx_dev->detect_5v_gpio);
+ if (irq < 0) {
+ dev_err_probe(dev, irq, "failed to get hdmirx-5v irq\n");
+ return irq;
+ }
+
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ hdmirx_dev->det_irq = irq;
+ ret = devm_request_irq(dev, irq, hdmirx_5v_det_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "rk_hdmirx-5v", hdmirx_dev);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to request hdmirx-5v irq\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hdmirx_register_cec(struct snps_hdmirx_dev *hdmirx_dev,
+ struct platform_device *pdev)
+{
+ struct device *dev = hdmirx_dev->dev;
+ struct hdmirx_cec_data cec_data;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "cec");
+ if (irq < 0) {
+ dev_err_probe(dev, irq, "failed to get cec irq\n");
+ return irq;
+ }
+
+ cec_data.hdmirx = hdmirx_dev;
+ cec_data.dev = hdmirx_dev->dev;
+ cec_data.ops = &hdmirx_cec_ops;
+ cec_data.irq = irq;
+
+ hdmirx_dev->cec = snps_hdmirx_cec_register(&cec_data);
+ if (IS_ERR(hdmirx_dev->cec))
+ return dev_err_probe(dev, PTR_ERR(hdmirx_dev->cec),
+ "failed to register cec\n");
+
+ return 0;
+}
+
+static int hdmirx_probe(struct platform_device *pdev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev;
+ struct device *dev = &pdev->dev;
+ struct v4l2_ctrl_handler *hdl;
+ struct hdmirx_stream *stream;
+ struct v4l2_device *v4l2_dev;
+ int ret;
+
+ hdmirx_dev = devm_kzalloc(dev, sizeof(*hdmirx_dev), GFP_KERNEL);
+ if (!hdmirx_dev)
+ return -ENOMEM;
+
+ /*
+ * RK3588 HDMIRX SoC integration doesn't use IOMMU and can
+ * address only first 32bit of the physical address space.
+ */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ hdmirx_dev->dev = dev;
+ dev_set_drvdata(dev, hdmirx_dev);
+
+ ret = hdmirx_parse_dt(hdmirx_dev);
+ if (ret)
+ return ret;
+
+ ret = hdmirx_setup_irq(hdmirx_dev, pdev);
+ if (ret)
+ return ret;
+
+ hdmirx_dev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hdmirx_dev->regs))
+ return dev_err_probe(dev, PTR_ERR(hdmirx_dev->regs),
+ "failed to remap regs resource\n");
+
+ mutex_init(&hdmirx_dev->stream_lock);
+ mutex_init(&hdmirx_dev->work_lock);
+ spin_lock_init(&hdmirx_dev->rst_lock);
+
+ init_completion(&hdmirx_dev->cr_write_done);
+ init_completion(&hdmirx_dev->timer_base_lock);
+ init_completion(&hdmirx_dev->avi_pkt_rcv);
+
+ INIT_DELAYED_WORK(&hdmirx_dev->delayed_work_hotplug,
+ hdmirx_delayed_work_hotplug);
+ INIT_DELAYED_WORK(&hdmirx_dev->delayed_work_res_change,
+ hdmirx_delayed_work_res_change);
+
+ hdmirx_dev->cur_fmt_fourcc = V4L2_PIX_FMT_BGR24;
+ hdmirx_dev->timings = cea640x480;
+
+ hdmirx_enable(dev);
+ hdmirx_init(hdmirx_dev);
+
+ v4l2_dev = &hdmirx_dev->v4l2_dev;
+ strscpy(v4l2_dev->name, dev_name(dev), sizeof(v4l2_dev->name));
+
+ hdl = &hdmirx_dev->hdl;
+ v4l2_ctrl_handler_init(hdl, 3);
+
+ hdmirx_dev->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_DV_RX_POWER_PRESENT,
+ 0, 1, 0, 0);
+
+ hdmirx_dev->rgb_range = v4l2_ctrl_new_std_menu(hdl, NULL,
+ V4L2_CID_DV_RX_RGB_RANGE,
+ V4L2_DV_RGB_RANGE_FULL, 0,
+ V4L2_DV_RGB_RANGE_AUTO);
+
+ hdmirx_dev->rgb_range->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ hdmirx_dev->content_type =
+ v4l2_ctrl_new_std_menu(hdl, NULL, V4L2_CID_DV_RX_IT_CONTENT_TYPE,
+ V4L2_DV_IT_CONTENT_TYPE_NO_ITC, 0,
+ V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ dev_err_probe(dev, ret, "v4l2 ctrl handler init failed\n");
+ goto err_pm;
+ }
+ hdmirx_dev->v4l2_dev.ctrl_handler = hdl;
+
+ ret = v4l2_device_register(dev, &hdmirx_dev->v4l2_dev);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "v4l2 device registration failed\n");
+ goto err_hdl;
+ }
+
+ stream = &hdmirx_dev->stream;
+ stream->hdmirx_dev = hdmirx_dev;
+ ret = hdmirx_register_stream_vdev(stream);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "video device registration failed\n");
+ goto err_unreg_v4l2_dev;
+ }
+
+ ret = hdmirx_register_cec(hdmirx_dev, pdev);
+ if (ret)
+ goto err_unreg_video_dev;
+
+ hdmirx_load_default_edid(hdmirx_dev);
+
+ hdmirx_enable_irq(dev);
+
+ hdmirx_dev->debugfs_dir = debugfs_create_dir(hdmirx_dev->v4l2_dev.name,
+ v4l2_debugfs_root());
+
+ hdmirx_dev->infoframes = v4l2_debugfs_if_alloc(hdmirx_dev->debugfs_dir,
+ V4L2_DEBUGFS_IF_AVI, hdmirx_dev,
+ hdmirx_debugfs_if_read);
+
+ return 0;
+
+err_unreg_video_dev:
+ vb2_video_unregister_device(&hdmirx_dev->stream.vdev);
+err_unreg_v4l2_dev:
+ v4l2_device_unregister(&hdmirx_dev->v4l2_dev);
+err_hdl:
+ v4l2_ctrl_handler_free(&hdmirx_dev->hdl);
+err_pm:
+ hdmirx_disable(dev);
+
+ return ret;
+}
+
+static void hdmirx_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+
+ v4l2_debugfs_if_free(hdmirx_dev->infoframes);
+ debugfs_remove_recursive(hdmirx_dev->debugfs_dir);
+
+ snps_hdmirx_cec_unregister(hdmirx_dev->cec);
+
+ hdmirx_disable_irq(dev);
+
+ vb2_video_unregister_device(&hdmirx_dev->stream.vdev);
+ v4l2_ctrl_handler_free(&hdmirx_dev->hdl);
+ v4l2_device_unregister(&hdmirx_dev->v4l2_dev);
+
+ /* touched by hdmirx_disable()->hdmirx_plugout() */
+ hdmirx_dev->rgb_range = NULL;
+ hdmirx_dev->content_type = NULL;
+
+ hdmirx_disable(dev);
+
+ reset_control_bulk_assert(HDMIRX_NUM_RST, hdmirx_dev->resets);
+}
+
+static const struct of_device_id hdmirx_id[] = {
+ { .compatible = "rockchip,rk3588-hdmirx-ctrler" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hdmirx_id);
+
+static struct platform_driver hdmirx_driver = {
+ .probe = hdmirx_probe,
+ .remove = hdmirx_remove,
+ .driver = {
+ .name = "snps_hdmirx",
+ .of_match_table = hdmirx_id,
+ .pm = &snps_hdmirx_pm_ops,
+ }
+};
+module_platform_driver(hdmirx_driver);
+
+MODULE_DESCRIPTION("Synopsys HDMI Receiver Driver");
+MODULE_AUTHOR("Dingxian Wen <shawn.wen@rock-chips.com>");
+MODULE_AUTHOR("Shreeya Patel <shreeya.patel@collabora.com>");
+MODULE_AUTHOR("Dmitry Osipenko <dmitry.osipenko@collabora.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h
new file mode 100644
index 000000000000..220ab99ca611
--- /dev/null
+++ b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Dingxian Wen <shawn.wen@rock-chips.com>
+ */
+
+#ifndef DW_HDMIRX_H
+#define DW_HDMIRX_H
+
+#include <linux/bitops.h>
+
+#define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l)))
+#define HIWORD_UPDATE(v, h, l) (((v) << (l)) | (GENMASK((h), (l)) << 16))
+
+/* SYS_GRF */
+#define SYS_GRF_SOC_CON1 0x0304
+#define HDMIRXPHY_SRAM_EXT_LD_DONE BIT(1)
+#define HDMIRXPHY_SRAM_BYPASS BIT(0)
+#define SYS_GRF_SOC_STATUS1 0x0384
+#define HDMIRXPHY_SRAM_INIT_DONE BIT(10)
+#define SYS_GRF_CHIP_ID 0x0600
+
+/* VO1_GRF */
+#define VO1_GRF_VO1_CON2 0x0008
+#define HDMIRX_SDAIN_MSK BIT(2)
+#define HDMIRX_SCLIN_MSK BIT(1)
+
+/* HDMIRX PHY */
+#define SUP_DIG_ANA_CREGS_SUP_ANA_NC 0x004f
+
+#define LANE0_DIG_ASIC_RX_OVRD_OUT_0 0x100f
+#define LANE1_DIG_ASIC_RX_OVRD_OUT_0 0x110f
+#define LANE2_DIG_ASIC_RX_OVRD_OUT_0 0x120f
+#define LANE3_DIG_ASIC_RX_OVRD_OUT_0 0x130f
+#define ASIC_ACK_OVRD_EN BIT(1)
+#define ASIC_ACK BIT(0)
+
+#define LANE0_DIG_RX_VCOCAL_RX_VCO_CAL_CTRL_2 0x104a
+#define LANE1_DIG_RX_VCOCAL_RX_VCO_CAL_CTRL_2 0x114a
+#define LANE2_DIG_RX_VCOCAL_RX_VCO_CAL_CTRL_2 0x124a
+#define LANE3_DIG_RX_VCOCAL_RX_VCO_CAL_CTRL_2 0x134a
+#define FREQ_TUNE_START_VAL_MASK GENMASK(9, 0)
+#define FREQ_TUNE_START_VAL(x) UPDATE(x, 9, 0)
+
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_FSM_CONFIG 0x20c4
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_ADAPT_REF_FOM 0x20c7
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_3_REG 0x20e9
+#define CDR_SETTING_BOUNDARY_3_DEFAULT 0x52da
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_4_REG 0x20ea
+#define CDR_SETTING_BOUNDARY_4_DEFAULT 0x43cd
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_5_REG 0x20eb
+#define CDR_SETTING_BOUNDARY_5_DEFAULT 0x35b3
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_6_REG 0x20fb
+#define CDR_SETTING_BOUNDARY_6_DEFAULT 0x2799
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_7_REG 0x20fc
+#define CDR_SETTING_BOUNDARY_7_DEFAULT 0x1b65
+
+#define RAWLANE0_DIG_PCS_XF_RX_OVRD_OUT 0x300e
+#define RAWLANE1_DIG_PCS_XF_RX_OVRD_OUT 0x310e
+#define RAWLANE2_DIG_PCS_XF_RX_OVRD_OUT 0x320e
+#define RAWLANE3_DIG_PCS_XF_RX_OVRD_OUT 0x330e
+#define PCS_ACK_WRITE_SELECT BIT(14)
+#define PCS_EN_CTL BIT(1)
+#define PCS_ACK BIT(0)
+
+#define RAWLANE0_DIG_AON_FAST_FLAGS 0x305c
+#define RAWLANE1_DIG_AON_FAST_FLAGS 0x315c
+#define RAWLANE2_DIG_AON_FAST_FLAGS 0x325c
+#define RAWLANE3_DIG_AON_FAST_FLAGS 0x335c
+
+/* HDMIRX Ctrler */
+#define GLOBAL_SWRESET_REQUEST 0x0020
+#define DATAPATH_SWRESETREQ BIT(12)
+#define GLOBAL_SWENABLE 0x0024
+#define PHYCTRL_ENABLE BIT(21)
+#define CEC_ENABLE BIT(16)
+#define TMDS_ENABLE BIT(13)
+#define DATAPATH_ENABLE BIT(12)
+#define PKTFIFO_ENABLE BIT(11)
+#define AVPUNIT_ENABLE BIT(8)
+#define MAIN_ENABLE BIT(0)
+#define GLOBAL_TIMER_REF_BASE 0x0028
+#define CORE_CONFIG 0x0050
+#define CMU_CONFIG0 0x0060
+#define TMDSQPCLK_STABLE_FREQ_MARGIN_MASK GENMASK(30, 16)
+#define TMDSQPCLK_STABLE_FREQ_MARGIN(x) UPDATE(x, 30, 16)
+#define AUDCLK_STABLE_FREQ_MARGIN_MASK GENMASK(11, 9)
+#define AUDCLK_STABLE_FREQ_MARGIN(x) UPDATE(x, 11, 9)
+#define CMU_STATUS 0x007c
+#define TMDSQPCLK_LOCKED_ST BIT(4)
+#define CMU_TMDSQPCLK_FREQ 0x0084
+#define PHY_CONFIG 0x00c0
+#define LDO_AFE_PROG_MASK GENMASK(24, 23)
+#define LDO_AFE_PROG(x) UPDATE(x, 24, 23)
+#define LDO_PWRDN BIT(21)
+#define TMDS_CLOCK_RATIO BIT(16)
+#define RXDATA_WIDTH BIT(15)
+#define REFFREQ_SEL_MASK GENMASK(11, 9)
+#define REFFREQ_SEL(x) UPDATE(x, 11, 9)
+#define HDMI_DISABLE BIT(8)
+#define PHY_PDDQ BIT(1)
+#define PHY_RESET BIT(0)
+#define PHY_STATUS 0x00c8
+#define HDMI_DISABLE_ACK BIT(1)
+#define PDDQ_ACK BIT(0)
+#define PHYCREG_CONFIG0 0x00e0
+#define PHYCREG_CR_PARA_SELECTION_MODE_MASK GENMASK(1, 0)
+#define PHYCREG_CR_PARA_SELECTION_MODE(x) UPDATE(x, 1, 0)
+#define PHYCREG_CONFIG1 0x00e4
+#define PHYCREG_CONFIG2 0x00e8
+#define PHYCREG_CONFIG3 0x00ec
+#define PHYCREG_CONTROL 0x00f0
+#define PHYCREG_CR_PARA_WRITE_P BIT(1)
+#define PHYCREG_CR_PARA_READ_P BIT(0)
+#define PHYCREG_STATUS 0x00f4
+
+#define MAINUNIT_STATUS 0x0150
+#define TMDSVALID_STABLE_ST BIT(1)
+#define DESCRAND_EN_CONTROL 0x0210
+#define SCRAMB_EN_SEL_QST_MASK GENMASK(1, 0)
+#define SCRAMB_EN_SEL_QST(x) UPDATE(x, 1, 0)
+#define DESCRAND_SYNC_CONTROL 0x0214
+#define RECOVER_UNSYNC_STREAM_QST BIT(0)
+#define DESCRAND_SYNC_SEQ_CONFIG 0x022c
+#define DESCRAND_SYNC_SEQ_ERR_CNT_EN BIT(0)
+#define DESCRAND_SYNC_SEQ_STATUS 0x0234
+#define DEFRAMER_CONFIG0 0x0270
+#define VS_CNT_THR_QST_MASK GENMASK(27, 20)
+#define VS_CNT_THR_QST(x) UPDATE(x, 27, 20)
+#define HS_POL_QST_MASK GENMASK(19, 18)
+#define HS_POL_QST(x) UPDATE(x, 19, 18)
+#define VS_POL_QST_MASK GENMASK(17, 16)
+#define VS_POL_QST(x) UPDATE(x, 17, 16)
+#define VS_REMAPFILTER_EN_QST BIT(8)
+#define VS_FILTER_ORDER_QST_MASK GENMASK(1, 0)
+#define VS_FILTER_ORDER_QST(x) UPDATE(x, 1, 0)
+#define DEFRAMER_VSYNC_CNT_CLEAR 0x0278
+#define VSYNC_CNT_CLR_P BIT(0)
+#define DEFRAMER_STATUS 0x027c
+#define OPMODE_STS_MASK GENMASK(6, 4)
+#define I2C_SLAVE_CONFIG1 0x0164
+#define I2C_SDA_OUT_HOLD_VALUE_QST_MASK GENMASK(15, 8)
+#define I2C_SDA_OUT_HOLD_VALUE_QST(x) UPDATE(x, 15, 8)
+#define I2C_SDA_IN_HOLD_VALUE_QST_MASK GENMASK(7, 0)
+#define I2C_SDA_IN_HOLD_VALUE_QST(x) UPDATE(x, 7, 0)
+#define OPMODE_STS_MASK GENMASK(6, 4)
+#define REPEATER_QST BIT(28)
+#define FASTREAUTH_QST BIT(27)
+#define FEATURES_1DOT1_QST BIT(26)
+#define FASTI2C_QST BIT(25)
+#define EESS_CTL_THR_QST_MASK GENMASK(19, 16)
+#define EESS_CTL_THR_QST(x) UPDATE(x, 19, 16)
+#define OESS_CTL3_THR_QST_MASK GENMASK(11, 8)
+#define OESS_CTL3_THR_QST(x) UPDATE(x, 11, 8)
+#define EESS_OESS_SEL_QST_MASK GENMASK(5, 4)
+#define EESS_OESS_SEL_QST(x) UPDATE(x, 5, 4)
+#define KEY_DECRYPT_EN_QST BIT(0)
+#define KEY_DECRYPT_SEED_QST_MASK GENMASK(15, 0)
+#define KEY_DECRYPT_SEED_QST(x) UPDATE(x, 15, 0)
+#define HDCP_INT_CLEAR 0x50d8
+#define HDCP_1_INT_CLEAR 0x50e8
+#define HDCP2_CONFIG 0x02f0
+#define HDCP2_SWITCH_OVR_VALUE BIT(2)
+#define HDCP2_SWITCH_OVR_EN BIT(1)
+
+#define VIDEO_CONFIG2 0x042c
+#define VPROC_VSYNC_POL_OVR_VALUE BIT(19)
+#define VPROC_VSYNC_POL_OVR_EN BIT(18)
+#define VPROC_HSYNC_POL_OVR_VALUE BIT(17)
+#define VPROC_HSYNC_POL_OVR_EN BIT(16)
+#define VPROC_FMT_OVR_VALUE_MASK GENMASK(6, 4)
+#define VPROC_FMT_OVR_VALUE(x) UPDATE(x, 6, 4)
+#define VPROC_FMT_OVR_EN BIT(0)
+
+#define AFIFO_FILL_RESTART BIT(0)
+#define AFIFO_INIT_P BIT(0)
+#define AFIFO_THR_LOW_QST_MASK GENMASK(25, 16)
+#define AFIFO_THR_LOW_QST(x) UPDATE(x, 25, 16)
+#define AFIFO_THR_HIGH_QST_MASK GENMASK(9, 0)
+#define AFIFO_THR_HIGH_QST(x) UPDATE(x, 9, 0)
+#define AFIFO_THR_MUTE_LOW_QST_MASK GENMASK(25, 16)
+#define AFIFO_THR_MUTE_LOW_QST(x) UPDATE(x, 25, 16)
+#define AFIFO_THR_MUTE_HIGH_QST_MASK GENMASK(9, 0)
+#define AFIFO_THR_MUTE_HIGH_QST(x) UPDATE(x, 9, 0)
+
+#define AFIFO_UNDERFLOW_ST BIT(25)
+#define AFIFO_OVERFLOW_ST BIT(24)
+
+#define SPEAKER_ALLOC_OVR_EN BIT(16)
+#define I2S_BPCUV_EN BIT(4)
+#define SPDIF_EN BIT(2)
+#define I2S_EN BIT(1)
+#define AFIFO_THR_PASS_DEMUTEMASK_N BIT(24)
+#define AVMUTE_DEMUTEMASK_N BIT(16)
+#define AFIFO_THR_MUTE_LOW_MUTEMASK_N BIT(9)
+#define AFIFO_THR_MUTE_HIGH_MUTEMASK_N BIT(8)
+#define AVMUTE_MUTEMASK_N BIT(0)
+#define SCDC_CONFIG 0x0580
+#define HPDLOW BIT(1)
+#define POWERPROVIDED BIT(0)
+#define SCDC_REGBANK_STATUS1 0x058c
+#define SCDC_TMDSBITCLKRATIO BIT(1)
+#define SCDC_REGBANK_STATUS3 0x0594
+#define SCDC_REGBANK_CONFIG0 0x05c0
+#define SCDC_SINKVERSION_QST_MASK GENMASK(7, 0)
+#define SCDC_SINKVERSION_QST(x) UPDATE(x, 7, 0)
+#define AGEN_LAYOUT BIT(4)
+#define AGEN_SPEAKER_ALLOC GENMASK(15, 8)
+
+#define CED_CONFIG 0x0760
+#define CED_VIDDATACHECKEN_QST BIT(27)
+#define CED_DATAISCHECKEN_QST BIT(26)
+#define CED_GBCHECKEN_QST BIT(25)
+#define CED_CTRLCHECKEN_QST BIT(24)
+#define CED_CHLOCKMAXER_QST_MASK GENMASK(14, 0)
+#define CED_CHLOCKMAXER_QST(x) UPDATE(x, 14, 0)
+#define CED_DYN_CONFIG 0x0768
+#define CED_DYN_CONTROL 0x076c
+#define PKTEX_BCH_ERRFILT_CONFIG 0x07c4
+#define PKTEX_CHKSUM_ERRFILT_CONFIG 0x07c8
+
+#define PKTDEC_ACR_PH2_1 0x1100
+#define PKTDEC_ACR_PB3_0 0x1104
+#define PKTDEC_ACR_PB7_4 0x1108
+#define PKTDEC_AVIIF_PH2_1 0x1200
+#define PKTDEC_AVIIF_PB3_0 0x1204
+#define PKTDEC_AVIIF_PB7_4 0x1208
+#define VIC_VAL_MASK GENMASK(6, 0)
+#define PKTDEC_AVIIF_PB11_8 0x120c
+#define PKTDEC_AVIIF_PB15_12 0x1210
+#define PKTDEC_AVIIF_PB19_16 0x1214
+#define PKTDEC_AVIIF_PB23_20 0x1218
+#define PKTDEC_AVIIF_PB27_24 0x121c
+
+#define PKTFIFO_CONFIG 0x1500
+#define PKTFIFO_STORE_FILT_CONFIG 0x1504
+#define PKTFIFO_THR_CONFIG0 0x1508
+#define PKTFIFO_THR_CONFIG1 0x150c
+#define PKTFIFO_CONTROL 0x1510
+
+#define VMON_STATUS1 0x1580
+#define VMON_STATUS2 0x1584
+#define VMON_STATUS3 0x1588
+#define VMON_STATUS4 0x158c
+#define VMON_STATUS5 0x1590
+#define VMON_STATUS6 0x1594
+#define VMON_STATUS7 0x1598
+#define VMON_ILACE_DETECT BIT(4)
+
+#define CEC_TX_CONTROL 0x2000
+#define CEC_STATUS 0x2004
+#define CEC_CONFIG 0x2008
+#define RX_AUTO_DRIVE_ACKNOWLEDGE BIT(9)
+#define CEC_ADDR 0x200c
+#define CEC_TX_COUNT 0x2020
+#define CEC_TX_DATA3_0 0x2024
+#define CEC_RX_COUNT_STATUS 0x2040
+#define CEC_RX_DATA3_0 0x2044
+#define CEC_LOCK_CONTROL 0x2054
+#define CEC_RXQUAL_BITTIME_CONFIG 0x2060
+#define CEC_RX_BITTIME_CONFIG 0x2064
+#define CEC_TX_BITTIME_CONFIG 0x2068
+
+#define DMA_CONFIG1 0x4400
+#define UV_WID_MASK GENMASK(31, 28)
+#define UV_WID(x) UPDATE(x, 31, 28)
+#define Y_WID_MASK GENMASK(27, 24)
+#define Y_WID(x) UPDATE(x, 27, 24)
+#define DDR_STORE_FORMAT_MASK GENMASK(15, 12)
+#define DDR_STORE_FORMAT(x) UPDATE(x, 15, 12)
+#define ABANDON_EN BIT(0)
+#define DMA_CONFIG2 0x4404
+#define DMA_CONFIG3 0x4408
+#define DMA_CONFIG4 0x440c // dma irq en
+#define DMA_CONFIG5 0x4410 // dma irq clear status
+#define LINE_FLAG_INT_EN BIT(8)
+#define HDMIRX_DMA_IDLE_INT BIT(7)
+#define HDMIRX_LOCK_DISABLE_INT BIT(6)
+#define LAST_FRAME_AXI_UNFINISH_INT_EN BIT(5)
+#define FIFO_OVERFLOW_INT_EN BIT(2)
+#define FIFO_UNDERFLOW_INT_EN BIT(1)
+#define HDMIRX_AXI_ERROR_INT_EN BIT(0)
+#define DMA_CONFIG6 0x4414
+#define RB_SWAP_EN BIT(9)
+#define HSYNC_TOGGLE_EN BIT(5)
+#define VSYNC_TOGGLE_EN BIT(4)
+#define HDMIRX_DMA_EN BIT(1)
+#define DMA_CONFIG7 0x4418
+#define LINE_FLAG_NUM_MASK GENMASK(31, 16)
+#define LINE_FLAG_NUM(x) UPDATE(x, 31, 16)
+#define LOCK_FRAME_NUM_MASK GENMASK(11, 0)
+#define LOCK_FRAME_NUM(x) UPDATE(x, 11, 0)
+#define DMA_CONFIG8 0x441c
+#define REG_MIRROR_EN BIT(0)
+#define DMA_CONFIG9 0x4420
+#define DMA_CONFIG10 0x4424
+#define DMA_CONFIG11 0x4428
+#define EDID_READ_EN_MASK BIT(8)
+#define EDID_READ_EN(x) UPDATE(x, 8, 8)
+#define EDID_WRITE_EN_MASK BIT(7)
+#define EDID_WRITE_EN(x) UPDATE(x, 7, 7)
+#define EDID_SLAVE_ADDR_MASK GENMASK(6, 0)
+#define EDID_SLAVE_ADDR(x) UPDATE(x, 6, 0)
+#define DMA_STATUS1 0x4430 // dma irq status
+#define DMA_STATUS2 0x4434
+#define DMA_STATUS3 0x4438
+#define DMA_STATUS4 0x443c
+#define DMA_STATUS5 0x4440
+#define DMA_STATUS6 0x4444
+#define DMA_STATUS7 0x4448
+#define DMA_STATUS8 0x444c
+#define DMA_STATUS9 0x4450
+#define DMA_STATUS10 0x4454
+#define HDMIRX_LOCK BIT(3)
+#define DMA_STATUS11 0x4458
+#define HDMIRX_TYPE_MASK GENMASK(8, 7)
+#define HDMIRX_COLOR_DEPTH_MASK GENMASK(6, 3)
+#define HDMIRX_FORMAT_MASK GENMASK(2, 0)
+#define DMA_STATUS12 0x445c
+#define DMA_STATUS13 0x4460
+#define DMA_STATUS14 0x4464
+
+#define MAINUNIT_INTVEC_INDEX 0x5000
+#define MAINUNIT_0_INT_STATUS 0x5010
+#define CECRX_NOTIFY_ERR BIT(12)
+#define CECRX_EOM BIT(11)
+#define CECTX_DRIVE_ERR BIT(10)
+#define CECRX_BUSY BIT(9)
+#define CECTX_BUSY BIT(8)
+#define CECTX_FRAME_DISCARDED BIT(5)
+#define CECTX_NRETRANSMIT_FAIL BIT(4)
+#define CECTX_LINE_ERR BIT(3)
+#define CECTX_ARBLOST BIT(2)
+#define CECTX_NACK BIT(1)
+#define CECTX_DONE BIT(0)
+#define MAINUNIT_0_INT_MASK_N 0x5014
+#define MAINUNIT_0_INT_CLEAR 0x5018
+#define MAINUNIT_0_INT_FORCE 0x501c
+#define TIMER_BASE_LOCKED_IRQ BIT(26)
+#define TMDSQPCLK_OFF_CHG BIT(5)
+#define TMDSQPCLK_LOCKED_CHG BIT(4)
+#define MAINUNIT_1_INT_STATUS 0x5020
+#define MAINUNIT_1_INT_MASK_N 0x5024
+#define MAINUNIT_1_INT_CLEAR 0x5028
+#define MAINUNIT_1_INT_FORCE 0x502c
+#define MAINUNIT_2_INT_STATUS 0x5030
+#define MAINUNIT_2_INT_MASK_N 0x5034
+#define MAINUNIT_2_INT_CLEAR 0x5038
+#define MAINUNIT_2_INT_FORCE 0x503c
+#define PHYCREG_CR_READ_DONE BIT(11)
+#define PHYCREG_CR_WRITE_DONE BIT(10)
+#define TMDSVALID_STABLE_CHG BIT(1)
+
+#define AVPUNIT_0_INT_STATUS 0x5040
+#define AVPUNIT_0_INT_MASK_N 0x5044
+#define AVPUNIT_0_INT_CLEAR 0x5048
+#define AVPUNIT_0_INT_FORCE 0x504c
+#define CED_DYN_CNT_CH2_IRQ BIT(22)
+#define CED_DYN_CNT_CH1_IRQ BIT(21)
+#define CED_DYN_CNT_CH0_IRQ BIT(20)
+#define AVPUNIT_1_INT_STATUS 0x5050
+#define DEFRAMER_VSYNC_THR_REACHED_IRQ BIT(1)
+#define AVPUNIT_1_INT_MASK_N 0x5054
+#define DEFRAMER_VSYNC_THR_REACHED_MASK_N BIT(1)
+#define DEFRAMER_VSYNC_MASK_N BIT(0)
+#define AVPUNIT_1_INT_CLEAR 0x5058
+#define DEFRAMER_VSYNC_THR_REACHED_CLEAR BIT(1)
+#define PKT_0_INT_STATUS 0x5080
+#define PKTDEC_ACR_CHG_IRQ BIT(3)
+#define PKT_0_INT_MASK_N 0x5084
+#define PKTDEC_ACR_CHG_MASK_N BIT(3)
+#define PKT_0_INT_CLEAR 0x5088
+#define PKT_1_INT_STATUS 0x5090
+#define PKT_1_INT_MASK_N 0x5094
+#define PKT_1_INT_CLEAR 0x5098
+#define PKT_2_INT_STATUS 0x50a0
+#define PKTDEC_ACR_RCV_IRQ BIT(3)
+#define PKT_2_INT_MASK_N 0x50a4
+#define PKTDEC_AVIIF_RCV_IRQ BIT(11)
+#define PKTDEC_ACR_RCV_MASK_N BIT(3)
+#define PKT_2_INT_CLEAR 0x50a8
+#define PKTDEC_AVIIF_RCV_CLEAR BIT(11)
+#define PKTDEC_ACR_RCV_CLEAR BIT(3)
+#define SCDC_INT_STATUS 0x50c0
+#define SCDC_INT_MASK_N 0x50c4
+#define SCDC_INT_CLEAR 0x50c8
+#define SCDCTMDSCCFG_CHG BIT(2)
+
+#define CEC_INT_STATUS 0x5100
+#define CEC_INT_MASK_N 0x5104
+#define CEC_INT_CLEAR 0x5108
+
+#endif
diff --git a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.c b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.c
new file mode 100644
index 000000000000..8e470c0376d6
--- /dev/null
+++ b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Shunqing Chen <csq@rock-chips.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/cec.h>
+
+#include "snps_hdmirx.h"
+#include "snps_hdmirx_cec.h"
+
+static void hdmirx_cec_write(struct hdmirx_cec *cec, int reg, u32 val)
+{
+ cec->ops->write(cec->hdmirx, reg, val);
+}
+
+static u32 hdmirx_cec_read(struct hdmirx_cec *cec, int reg)
+{
+ return cec->ops->read(cec->hdmirx, reg);
+}
+
+static void hdmirx_cec_update_bits(struct hdmirx_cec *cec, int reg, u32 mask,
+ u32 data)
+{
+ u32 val = hdmirx_cec_read(cec, reg) & ~mask;
+
+ val |= (data & mask);
+ hdmirx_cec_write(cec, reg, val);
+}
+
+static int hdmirx_cec_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct hdmirx_cec *cec = cec_get_drvdata(adap);
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ cec->addresses = 0;
+ else
+ cec->addresses |= BIT(logical_addr) | BIT(15);
+
+ hdmirx_cec_write(cec, CEC_ADDR, cec->addresses);
+
+ return 0;
+}
+
+/* signal_free_time is handled by the Synopsys Designware
+ * HDMIRX Controller hardware.
+ */
+static int hdmirx_cec_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct hdmirx_cec *cec = cec_get_drvdata(adap);
+ u32 data[4] = {0};
+ int i, data_len, msg_len;
+
+ msg_len = msg->len;
+
+ hdmirx_cec_write(cec, CEC_TX_COUNT, msg_len - 1);
+ for (i = 0; i < msg_len; i++)
+ data[i / 4] |= msg->msg[i] << (i % 4) * 8;
+
+ data_len = DIV_ROUND_UP(msg_len, 4);
+
+ for (i = 0; i < data_len; i++)
+ hdmirx_cec_write(cec, CEC_TX_DATA3_0 + i * 4, data[i]);
+
+ hdmirx_cec_write(cec, CEC_TX_CONTROL, 0x1);
+
+ return 0;
+}
+
+static irqreturn_t hdmirx_cec_hardirq(int irq, void *data)
+{
+ struct cec_adapter *adap = data;
+ struct hdmirx_cec *cec = cec_get_drvdata(adap);
+ u32 stat = hdmirx_cec_read(cec, CEC_INT_STATUS);
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 val;
+
+ if (!stat)
+ return IRQ_NONE;
+
+ hdmirx_cec_write(cec, CEC_INT_CLEAR, stat);
+
+ if (stat & CECTX_LINE_ERR) {
+ cec->tx_status = CEC_TX_STATUS_ERROR;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CECTX_DONE) {
+ cec->tx_status = CEC_TX_STATUS_OK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CECTX_NACK) {
+ cec->tx_status = CEC_TX_STATUS_NACK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CECTX_ARBLOST) {
+ cec->tx_status = CEC_TX_STATUS_ARB_LOST;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (stat & CECRX_EOM) {
+ unsigned int len, i;
+
+ val = hdmirx_cec_read(cec, CEC_RX_COUNT_STATUS);
+ /* rxbuffer locked status */
+ if ((val & 0x80))
+ return ret;
+
+ len = (val & 0xf) + 1;
+ if (len > sizeof(cec->rx_msg.msg))
+ len = sizeof(cec->rx_msg.msg);
+
+ for (i = 0; i < len; i++) {
+ if (!(i % 4))
+ val = hdmirx_cec_read(cec, CEC_RX_DATA3_0 + i / 4 * 4);
+ cec->rx_msg.msg[i] = (val >> ((i % 4) * 8)) & 0xff;
+ }
+
+ cec->rx_msg.len = len;
+ smp_wmb(); /* receive RX msg */
+ cec->rx_done = true;
+ hdmirx_cec_write(cec, CEC_LOCK_CONTROL, 0x1);
+
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+
+static irqreturn_t hdmirx_cec_thread(int irq, void *data)
+{
+ struct cec_adapter *adap = data;
+ struct hdmirx_cec *cec = cec_get_drvdata(adap);
+
+ if (cec->tx_done) {
+ cec->tx_done = false;
+ cec_transmit_attempt_done(adap, cec->tx_status);
+ }
+ if (cec->rx_done) {
+ cec->rx_done = false;
+ smp_rmb(); /* RX msg has been received */
+ cec_received_msg(adap, &cec->rx_msg);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hdmirx_cec_enable(struct cec_adapter *adap, bool enable)
+{
+ struct hdmirx_cec *cec = cec_get_drvdata(adap);
+
+ if (!enable) {
+ hdmirx_cec_write(cec, CEC_INT_MASK_N, 0);
+ hdmirx_cec_write(cec, CEC_INT_CLEAR, 0);
+ if (cec->ops->disable)
+ cec->ops->disable(cec->hdmirx);
+ } else {
+ unsigned int irqs;
+
+ hdmirx_cec_log_addr(cec->adap, CEC_LOG_ADDR_INVALID);
+ if (cec->ops->enable)
+ cec->ops->enable(cec->hdmirx);
+ hdmirx_cec_update_bits(cec, GLOBAL_SWENABLE, CEC_ENABLE, CEC_ENABLE);
+
+ irqs = CECTX_LINE_ERR | CECTX_NACK | CECRX_EOM | CECTX_DONE;
+ hdmirx_cec_write(cec, CEC_INT_MASK_N, irqs);
+ }
+
+ return 0;
+}
+
+static const struct cec_adap_ops hdmirx_cec_ops = {
+ .adap_enable = hdmirx_cec_enable,
+ .adap_log_addr = hdmirx_cec_log_addr,
+ .adap_transmit = hdmirx_cec_transmit,
+};
+
+static void hdmirx_cec_del(void *data)
+{
+ struct hdmirx_cec *cec = data;
+
+ cec_delete_adapter(cec->adap);
+}
+
+struct hdmirx_cec *snps_hdmirx_cec_register(struct hdmirx_cec_data *data)
+{
+ struct hdmirx_cec *cec;
+ unsigned int irqs;
+ int ret;
+
+ /*
+ * Our device is just a convenience - we want to link to the real
+ * hardware device here, so that userspace can see the association
+ * between the HDMI hardware and its associated CEC chardev.
+ */
+ cec = devm_kzalloc(data->dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return ERR_PTR(-ENOMEM);
+
+ cec->dev = data->dev;
+ cec->irq = data->irq;
+ cec->ops = data->ops;
+ cec->hdmirx = data->hdmirx;
+
+ hdmirx_cec_update_bits(cec, GLOBAL_SWENABLE, CEC_ENABLE, CEC_ENABLE);
+ hdmirx_cec_update_bits(cec, CEC_CONFIG, RX_AUTO_DRIVE_ACKNOWLEDGE,
+ RX_AUTO_DRIVE_ACKNOWLEDGE);
+
+ hdmirx_cec_write(cec, CEC_TX_COUNT, 0);
+ hdmirx_cec_write(cec, CEC_INT_MASK_N, 0);
+ hdmirx_cec_write(cec, CEC_INT_CLEAR, ~0);
+
+ cec->adap = cec_allocate_adapter(&hdmirx_cec_ops, cec, "snps-hdmirx",
+ CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL,
+ CEC_MAX_LOG_ADDRS);
+ if (IS_ERR(cec->adap)) {
+ dev_err(cec->dev, "cec adapter allocation failed\n");
+ return ERR_CAST(cec->adap);
+ }
+
+ /* override the module pointer */
+ cec->adap->owner = THIS_MODULE;
+
+ ret = devm_add_action(cec->dev, hdmirx_cec_del, cec);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ERR_PTR(ret);
+ }
+
+ irq_set_status_flags(cec->irq, IRQ_NOAUTOEN);
+
+ ret = devm_request_threaded_irq(cec->dev, cec->irq,
+ hdmirx_cec_hardirq,
+ hdmirx_cec_thread, IRQF_ONESHOT,
+ "rk_hdmirx_cec", cec->adap);
+ if (ret) {
+ dev_err(cec->dev, "cec irq request failed\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = cec_register_adapter(cec->adap, cec->dev);
+ if (ret < 0) {
+ dev_err(cec->dev, "cec adapter registration failed\n");
+ return ERR_PTR(ret);
+ }
+
+ irqs = CECTX_LINE_ERR | CECTX_NACK | CECRX_EOM | CECTX_DONE;
+ hdmirx_cec_write(cec, CEC_INT_MASK_N, irqs);
+
+ /*
+ * CEC documentation says we must not call cec_delete_adapter
+ * after a successful call to cec_register_adapter().
+ */
+ devm_remove_action(cec->dev, hdmirx_cec_del, cec);
+
+ enable_irq(cec->irq);
+
+ return cec;
+}
+
+void snps_hdmirx_cec_unregister(struct hdmirx_cec *cec)
+{
+ disable_irq(cec->irq);
+
+ cec_unregister_adapter(cec->adap);
+}
diff --git a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.h b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.h
new file mode 100644
index 000000000000..1b10da5b8fd4
--- /dev/null
+++ b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Shunqing Chen <csq@rock-chips.com>
+ */
+
+#ifndef DW_HDMI_RX_CEC_H
+#define DW_HDMI_RX_CEC_H
+
+struct snps_hdmirx_dev;
+
+struct hdmirx_cec_ops {
+ void (*write)(struct snps_hdmirx_dev *hdmirx_dev, int reg, u32 val);
+ u32 (*read)(struct snps_hdmirx_dev *hdmirx_dev, int reg);
+ void (*enable)(struct snps_hdmirx_dev *hdmirx);
+ void (*disable)(struct snps_hdmirx_dev *hdmirx);
+};
+
+struct hdmirx_cec_data {
+ struct snps_hdmirx_dev *hdmirx;
+ const struct hdmirx_cec_ops *ops;
+ struct device *dev;
+ int irq;
+};
+
+struct hdmirx_cec {
+ struct snps_hdmirx_dev *hdmirx;
+ struct device *dev;
+ const struct hdmirx_cec_ops *ops;
+ u32 addresses;
+ struct cec_adapter *adap;
+ struct cec_msg rx_msg;
+ unsigned int tx_status;
+ bool tx_done;
+ bool rx_done;
+ int irq;
+};
+
+struct hdmirx_cec *snps_hdmirx_cec_register(struct hdmirx_cec_data *data);
+void snps_hdmirx_cec_unregister(struct hdmirx_cec *cec);
+
+#endif /* DW_HDMI_RX_CEC_H */
diff --git a/drivers/media/platform/ti/cal/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c
index 42dfe08b765f..9cc875665695 100644
--- a/drivers/media/platform/ti/cal/cal-camerarx.c
+++ b/drivers/media/platform/ti/cal/cal-camerarx.c
@@ -65,7 +65,8 @@ static s64 cal_camerarx_get_ext_link_freq(struct cal_camerarx *phy)
bpp = fmtinfo->bpp;
- freq = v4l2_get_link_freq(phy->source->ctrl_handler, bpp, 2 * num_lanes);
+ freq = v4l2_get_link_freq(&phy->source->entity.pads[phy->source_pad],
+ bpp, 2 * num_lanes);
if (freq < 0) {
phy_err(phy, "failed to get link freq for subdev '%s'\n",
phy->source->name);
diff --git a/drivers/media/platform/ti/cal/cal.c b/drivers/media/platform/ti/cal/cal.c
index 4bd2092e0255..6cb3e5f49686 100644
--- a/drivers/media/platform/ti/cal/cal.c
+++ b/drivers/media/platform/ti/cal/cal.c
@@ -798,7 +798,6 @@ static int cal_async_notifier_bound(struct v4l2_async_notifier *notifier,
return 0;
}
- phy->source = subdev;
phy_dbg(1, phy, "Using source %s for capture\n", subdev->name);
pad = media_entity_get_fwnode_pad(&subdev->entity,
@@ -820,6 +819,9 @@ static int cal_async_notifier_bound(struct v4l2_async_notifier *notifier,
return ret;
}
+ phy->source = subdev;
+ phy->source_pad = pad;
+
return 0;
}
diff --git a/drivers/media/platform/ti/cal/cal.h b/drivers/media/platform/ti/cal/cal.h
index 0856297adc0b..72a246a64d9e 100644
--- a/drivers/media/platform/ti/cal/cal.h
+++ b/drivers/media/platform/ti/cal/cal.h
@@ -174,6 +174,7 @@ struct cal_camerarx {
struct device_node *source_ep_node;
struct device_node *source_node;
struct v4l2_subdev *source;
+ unsigned int source_pad;
struct v4l2_subdev subdev;
struct media_pad pads[CAL_CAMERARX_NUM_PADS];
diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c
index 405ca215179d..f51cf6119e97 100644
--- a/drivers/media/platform/ti/omap3isp/isp.c
+++ b/drivers/media/platform/ti/omap3isp/isp.c
@@ -1475,43 +1475,6 @@ void omap3isp_put(struct isp_device *isp)
* Platform device driver
*/
-/*
- * omap3isp_print_status - Prints the values of the ISP Control Module registers
- * @isp: OMAP3 ISP device
- */
-#define ISP_PRINT_REGISTER(isp, name)\
- dev_dbg(isp->dev, "###ISP " #name "=0x%08x\n", \
- isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_##name))
-#define SBL_PRINT_REGISTER(isp, name)\
- dev_dbg(isp->dev, "###SBL " #name "=0x%08x\n", \
- isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_##name))
-
-void omap3isp_print_status(struct isp_device *isp)
-{
- dev_dbg(isp->dev, "-------------ISP Register dump--------------\n");
-
- ISP_PRINT_REGISTER(isp, SYSCONFIG);
- ISP_PRINT_REGISTER(isp, SYSSTATUS);
- ISP_PRINT_REGISTER(isp, IRQ0ENABLE);
- ISP_PRINT_REGISTER(isp, IRQ0STATUS);
- ISP_PRINT_REGISTER(isp, TCTRL_GRESET_LENGTH);
- ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_REPLAY);
- ISP_PRINT_REGISTER(isp, CTRL);
- ISP_PRINT_REGISTER(isp, TCTRL_CTRL);
- ISP_PRINT_REGISTER(isp, TCTRL_FRAME);
- ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_DELAY);
- ISP_PRINT_REGISTER(isp, TCTRL_STRB_DELAY);
- ISP_PRINT_REGISTER(isp, TCTRL_SHUT_DELAY);
- ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_LENGTH);
- ISP_PRINT_REGISTER(isp, TCTRL_STRB_LENGTH);
- ISP_PRINT_REGISTER(isp, TCTRL_SHUT_LENGTH);
-
- SBL_PRINT_REGISTER(isp, PCR);
- SBL_PRINT_REGISTER(isp, SDR_REQ_EXP);
-
- dev_dbg(isp->dev, "--------------------------------------------\n");
-}
-
#ifdef CONFIG_PM
/*
@@ -1961,6 +1924,13 @@ static int isp_attach_iommu(struct isp_device *isp)
struct dma_iommu_mapping *mapping;
int ret;
+ /* We always want to replace any default mapping from the arch code */
+ mapping = to_dma_iommu_mapping(isp->dev);
+ if (mapping) {
+ arm_iommu_detach_device(isp->dev);
+ arm_iommu_release_mapping(mapping);
+ }
+
/*
* Create the ARM mapping, used by the ARM DMA mapping core to allocate
* VAs. This will allocate a corresponding IOMMU domain.
@@ -2272,18 +2242,14 @@ static int isp_probe(struct platform_device *pdev)
if (ret)
goto error_release_isp;
- isp->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "syscon");
+ isp->syscon = syscon_regmap_lookup_by_phandle_args(pdev->dev.of_node,
+ "syscon", 1,
+ &isp->syscon_offset);
if (IS_ERR(isp->syscon)) {
ret = PTR_ERR(isp->syscon);
goto error_release_isp;
}
- ret = of_property_read_u32_index(pdev->dev.of_node,
- "syscon", 1, &isp->syscon_offset);
- if (ret)
- goto error_release_isp;
-
isp->autoidle = autoidle;
mutex_init(&isp->isp_mutex);
diff --git a/drivers/media/platform/ti/omap3isp/isp.h b/drivers/media/platform/ti/omap3isp/isp.h
index b4793631ad97..60acf3401ac9 100644
--- a/drivers/media/platform/ti/omap3isp/isp.h
+++ b/drivers/media/platform/ti/omap3isp/isp.h
@@ -260,8 +260,6 @@ void omap3isp_configure_bridge(struct isp_device *isp,
struct isp_device *omap3isp_get(struct isp_device *isp);
void omap3isp_put(struct isp_device *isp);
-void omap3isp_print_status(struct isp_device *isp);
-
void omap3isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res);
void omap3isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res);
diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
index 85a44143b378..0e212198dd65 100644
--- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
@@ -518,6 +518,7 @@ static void set_buffers(struct hantro_ctx *ctx)
hantro_reg_write(vpu, &g2_stream_len, src_len);
hantro_reg_write(vpu, &g2_strm_buffer_len, src_buf_len);
hantro_reg_write(vpu, &g2_strm_start_offset, 0);
+ hantro_reg_write(vpu, &g2_start_bit, 0);
hantro_reg_write(vpu, &g2_write_mvs_e, 1);
hantro_write_addr(vpu, G2_TILE_SIZES_ADDR, ctx->hevc_dec.tile_sizes.dma);
diff --git a/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
index 342e543dee4c..82a478ac645e 100644
--- a/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
@@ -776,15 +776,15 @@ config_source(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_para
struct vb2_v4l2_buffer *vb2_src)
{
dma_addr_t stream_base, tmp_addr;
- unsigned int headres_size;
+ unsigned int headers_size;
u32 src_len, start_bit, src_buf_len;
- headres_size = dec_params->uncompressed_header_size
+ headers_size = dec_params->uncompressed_header_size
+ dec_params->compressed_header_size;
stream_base = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
- tmp_addr = stream_base + headres_size;
+ tmp_addr = stream_base + headers_size;
if (ctx->dev->variant->legacy_regs)
hantro_write_addr(ctx->dev, G2_STREAM_ADDR, (tmp_addr & ~0xf));
else
@@ -794,7 +794,7 @@ config_source(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_para
hantro_reg_write(ctx->dev, &g2_start_bit, start_bit);
src_len = vb2_get_plane_payload(&vb2_src->vb2_buf, 0);
- src_len += start_bit / 8 - headres_size;
+ src_len += start_bit / 8 - headers_size;
hantro_reg_write(ctx->dev, &g2_stream_len, src_len);
if (!ctx->dev->variant->legacy_regs) {
diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c
index cb93711ea3e3..7deec6e37edc 100644
--- a/drivers/media/platform/xilinx/xilinx-tpg.c
+++ b/drivers/media/platform/xilinx/xilinx-tpg.c
@@ -722,7 +722,6 @@ static int xtpg_parse_of(struct xtpg_device *xtpg)
format = xvip_of_get_format(port);
if (IS_ERR(format)) {
dev_err(dev, "invalid format in DT");
- of_node_put(port);
return PTR_ERR(format);
}
@@ -731,7 +730,6 @@ static int xtpg_parse_of(struct xtpg_device *xtpg)
xtpg->vip_format = format;
} else if (xtpg->vip_format != format) {
dev_err(dev, "in/out format mismatch in DT");
- of_node_put(port);
return -EINVAL;
}
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 4909c337b027..d989c0b3966f 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -2,7 +2,7 @@
/*
* radio-aztech.c - Aztech radio card driver
*
- * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@xs4all.nl>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@xs4all.nl>
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
* Adapted to support the Video for Linux API by
* Russell Kroll <rkroll@exploits.org>. Based on original tuner code by:
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 511a8ede05ec..f55217ccf2b8 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1407,7 +1407,7 @@ static inline struct wl1273_device *to_radio(struct v4l2_ctrl *ctrl)
return container_of(ctrl->handler, struct wl1273_device, ctrl_handler);
}
-static int wl1273_fm_vidioc_s_ctrl(struct v4l2_ctrl *ctrl)
+static int wl1273_fm_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct wl1273_device *radio = to_radio(ctrl);
struct wl1273_core *core = radio->core;
@@ -1945,7 +1945,7 @@ static void wl1273_vdev_release(struct video_device *dev)
}
static const struct v4l2_ctrl_ops wl1273_ctrl_ops = {
- .s_ctrl = wl1273_fm_vidioc_s_ctrl,
+ .s_ctrl = wl1273_fm_s_ctrl,
.g_volatile_ctrl = wl1273_fm_g_volatile_ctrl,
};
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index 7fdf0d9edbfd..d04572627cdd 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_RC_MAP) += \
rc-rc6-mce.o \
rc-real-audio-220-32-keys.o \
rc-reddo.o \
+ rc-siemens-gigaset-rc20.o \
rc-snapstream-firefly.o \
rc-streamzap.o \
rc-su3000.o \
diff --git a/drivers/media/rc/keymaps/rc-siemens-gigaset-rc20.c b/drivers/media/rc/keymaps/rc-siemens-gigaset-rc20.c
new file mode 100644
index 000000000000..defc77932e10
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-siemens-gigaset-rc20.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* rc-siemens-gigaset-rc20.c - Keytable for the Siemens Gigaset RC 20 remote
+ *
+ * Copyright (c) 2025 by Michael Klein
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table siemens_gigaset_rc20[] = {
+ { 0x1501, KEY_POWER },
+ { 0x1502, KEY_MUTE },
+ { 0x1503, KEY_NUMERIC_1 },
+ { 0x1504, KEY_NUMERIC_2 },
+ { 0x1505, KEY_NUMERIC_3 },
+ { 0x1506, KEY_NUMERIC_4 },
+ { 0x1507, KEY_NUMERIC_5 },
+ { 0x1508, KEY_NUMERIC_6 },
+ { 0x1509, KEY_NUMERIC_7 },
+ { 0x150a, KEY_NUMERIC_8 },
+ { 0x150b, KEY_NUMERIC_9 },
+ { 0x150c, KEY_NUMERIC_0 },
+ { 0x150d, KEY_UP },
+ { 0x150e, KEY_LEFT },
+ { 0x150f, KEY_OK },
+ { 0x1510, KEY_RIGHT },
+ { 0x1511, KEY_DOWN },
+ { 0x1512, KEY_SHUFFLE }, /* double-arrow */
+ { 0x1513, KEY_EXIT },
+ { 0x1514, KEY_RED },
+ { 0x1515, KEY_GREEN },
+ { 0x1516, KEY_YELLOW }, /* OPT */
+ { 0x1517, KEY_BLUE },
+ { 0x1518, KEY_MENU },
+ { 0x1519, KEY_TEXT },
+ { 0x151a, KEY_MODE }, /* TV/Radio */
+
+ { 0x1521, KEY_EPG },
+ { 0x1522, KEY_FAVORITES },
+ { 0x1523, KEY_CHANNELUP },
+ { 0x1524, KEY_CHANNELDOWN },
+ { 0x1525, KEY_VOLUMEUP },
+ { 0x1526, KEY_VOLUMEDOWN },
+ { 0x1527, KEY_INFO },
+};
+
+static struct rc_map_list siemens_gigaset_rc20_map = {
+ .map = {
+ .scan = siemens_gigaset_rc20,
+ .size = ARRAY_SIZE(siemens_gigaset_rc20),
+ .rc_proto = RC_PROTO_RC5,
+ .name = RC_MAP_SIEMENS_GIGASET_RC20,
+ }
+};
+
+static int __init init_rc_map_siemens_gigaset_rc20(void)
+{
+ return rc_map_register(&siemens_gigaset_rc20_map);
+}
+
+static void __exit exit_rc_map_siemens_gigaset_rc20(void)
+{
+ rc_map_unregister(&siemens_gigaset_rc20_map);
+}
+
+module_init(init_rc_map_siemens_gigaset_rc20)
+module_exit(exit_rc_map_siemens_gigaset_rc20)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michael Klein");
+MODULE_DESCRIPTION("Siemens Gigaset RC20 remote keytable");
diff --git a/drivers/media/rc/pwm-ir-tx.c b/drivers/media/rc/pwm-ir-tx.c
index fe368aebbc13..84533fdd61aa 100644
--- a/drivers/media/rc/pwm-ir-tx.c
+++ b/drivers/media/rc/pwm-ir-tx.c
@@ -172,8 +172,7 @@ static int pwm_ir_probe(struct platform_device *pdev)
rcdev->tx_ir = pwm_ir_tx_sleep;
} else {
init_completion(&pwm_ir->tx_done);
- hrtimer_init(&pwm_ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pwm_ir->timer.function = pwm_ir_timer;
+ hrtimer_setup(&pwm_ir->timer, pwm_ir_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rcdev->tx_ir = pwm_ir_tx_atomic;
}
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 7df949fc65e2..4967d87ec4b7 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -85,8 +85,8 @@ struct ir_raw_event_ctrl {
struct rc6_dec {
int state;
u8 header;
- u32 body;
bool toggle;
+ u32 body;
unsigned count;
unsigned wanted_bits;
} rc6;
@@ -127,8 +127,8 @@ struct ir_raw_event_ctrl {
struct mce_kbd_dec {
/* locks key up timer */
spinlock_t keylock;
- struct timer_list rx_timeout;
int state;
+ struct timer_list rx_timeout;
u8 header;
u32 body;
unsigned count;
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 9b209e687f25..d3b48a0dd1f4 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -138,39 +138,10 @@ static void sz_push_half_space(struct streamzap_ir *sz,
sz_push_full_space(sz, value & SZ_SPACE_MASK);
}
-/*
- * streamzap_callback - usb IRQ handler callback
- *
- * This procedure is invoked on reception of data from
- * the usb remote.
- */
-static void streamzap_callback(struct urb *urb)
+static void sz_process_ir_data(struct streamzap_ir *sz, int len)
{
- struct streamzap_ir *sz;
unsigned int i;
- int len;
- if (!urb)
- return;
-
- sz = urb->context;
- len = urb->actual_length;
-
- switch (urb->status) {
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /*
- * this urb is terminated, clean up.
- * sz might already be invalid at this point
- */
- dev_err(sz->dev, "urb terminated, status: %d\n", urb->status);
- return;
- default:
- break;
- }
-
- dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len);
for (i = 0; i < len; i++) {
dev_dbg(sz->dev, "sz->buf_in[%d]: %x\n",
i, (unsigned char)sz->buf_in[i]);
@@ -219,6 +190,43 @@ static void streamzap_callback(struct urb *urb)
}
ir_raw_event_handle(sz->rdev);
+}
+
+/*
+ * streamzap_callback - usb IRQ handler callback
+ *
+ * This procedure is invoked on reception of data from
+ * the usb remote.
+ */
+static void streamzap_callback(struct urb *urb)
+{
+ struct streamzap_ir *sz;
+ int len;
+
+ if (!urb)
+ return;
+
+ sz = urb->context;
+ len = urb->actual_length;
+
+ switch (urb->status) {
+ case 0:
+ dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len);
+ sz_process_ir_data(sz, len);
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /*
+ * this urb is terminated, clean up.
+ * sz might already be invalid at this point
+ */
+ dev_err(sz->dev, "urb terminated, status: %d\n", urb->status);
+ return;
+ default:
+ break;
+ }
+
usb_submit_urb(urb, GFP_ATOMIC);
}
@@ -385,8 +393,8 @@ static void streamzap_disconnect(struct usb_interface *interface)
if (!sz)
return;
- rc_unregister_device(sz->rdev);
usb_kill_urb(sz->urb_in);
+ rc_unregister_device(sz->rdev);
usb_free_urb(sz->urb_in);
usb_free_coherent(usbdev, sz->buf_in_len, sz->buf_in, sz->dma_in);
diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c
index 6c24dcf27eb0..0fe97e208c02 100644
--- a/drivers/media/test-drivers/vim2m.c
+++ b/drivers/media/test-drivers/vim2m.c
@@ -1314,9 +1314,6 @@ static int vim2m_probe(struct platform_device *pdev)
vfd->v4l2_dev = &dev->v4l2_dev;
video_set_drvdata(vfd, dev);
- v4l2_info(&dev->v4l2_dev,
- "Device registered as /dev/video%d\n", vfd->num);
-
platform_set_drvdata(pdev, dev);
dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
@@ -1343,6 +1340,9 @@ static int vim2m_probe(struct platform_device *pdev)
goto error_m2m;
}
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
#ifdef CONFIG_MEDIA_CONTROLLER
ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
MEDIA_ENT_F_PROC_VIDEO_SCALER);
diff --git a/drivers/media/test-drivers/vimc/vimc-streamer.c b/drivers/media/test-drivers/vimc/vimc-streamer.c
index 807551a5143b..15d863f97cbf 100644
--- a/drivers/media/test-drivers/vimc/vimc-streamer.c
+++ b/drivers/media/test-drivers/vimc/vimc-streamer.c
@@ -59,6 +59,12 @@ static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
continue;
sd = media_entity_to_v4l2_subdev(ved->ent);
+ /*
+ * Do not call .s_stream() to stop an already
+ * stopped/unstarted subdev.
+ */
+ if (!v4l2_subdev_is_streaming(sd))
+ continue;
v4l2_subdev_call(sd, video, s_stream, 0);
}
}
diff --git a/drivers/media/test-drivers/visl/visl-core.c b/drivers/media/test-drivers/visl/visl-core.c
index 01c964ea6f76..5bf3136b36eb 100644
--- a/drivers/media/test-drivers/visl/visl-core.c
+++ b/drivers/media/test-drivers/visl/visl-core.c
@@ -161,9 +161,15 @@ static const struct visl_ctrl_desc visl_h264_ctrl_descs[] = {
},
{
.cfg.id = V4L2_CID_STATELESS_H264_DECODE_MODE,
+ .cfg.min = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
+ .cfg.max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .cfg.def = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
},
{
.cfg.id = V4L2_CID_STATELESS_H264_START_CODE,
+ .cfg.min = V4L2_STATELESS_H264_START_CODE_NONE,
+ .cfg.max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .cfg.def = V4L2_STATELESS_H264_START_CODE_NONE,
},
{
.cfg.id = V4L2_CID_STATELESS_H264_SLICE_PARAMS,
@@ -198,9 +204,15 @@ static const struct visl_ctrl_desc visl_hevc_ctrl_descs[] = {
},
{
.cfg.id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
+ .cfg.min = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
+ .cfg.max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+ .cfg.def = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
},
{
.cfg.id = V4L2_CID_STATELESS_HEVC_START_CODE,
+ .cfg.min = V4L2_STATELESS_HEVC_START_CODE_NONE,
+ .cfg.max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+ .cfg.def = V4L2_STATELESS_HEVC_START_CODE_NONE,
},
{
.cfg.id = V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS,
diff --git a/drivers/media/test-drivers/vivid/Kconfig b/drivers/media/test-drivers/vivid/Kconfig
index ec2e71d76965..e95edc0f22bf 100644
--- a/drivers/media/test-drivers/vivid/Kconfig
+++ b/drivers/media/test-drivers/vivid/Kconfig
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_VIVID
tristate "Virtual Video Test Driver"
- depends on VIDEO_DEV && !SPARC32 && !SPARC64 && FB
+ depends on VIDEO_DEV && !SPARC32 && !SPARC64
depends on HAS_DMA
- select FB_IOMEM_HELPERS
select FONT_SUPPORT
select FONT_8x16
select VIDEOBUF2_VMALLOC
@@ -31,6 +30,15 @@ config VIDEO_VIVID_CEC
When selected the vivid module will emulate the optional
HDMI CEC feature.
+config VIDEO_VIVID_OSD
+ bool "Enable Framebuffer for testing Output Overlay"
+ depends on VIDEO_VIVID && FB
+ default y
+ select FB_IOMEM_HELPERS
+ help
+ When selected the vivid module will emulate a Framebuffer for
+ testing Output Overlay.
+
config VIDEO_VIVID_MAX_DEVS
int "Maximum number of devices"
depends on VIDEO_VIVID
diff --git a/drivers/media/test-drivers/vivid/Makefile b/drivers/media/test-drivers/vivid/Makefile
index b12ad0152a3e..284a59e97335 100644
--- a/drivers/media/test-drivers/vivid/Makefile
+++ b/drivers/media/test-drivers/vivid/Makefile
@@ -3,10 +3,13 @@ vivid-objs := vivid-core.o vivid-ctrls.o vivid-vid-common.o vivid-vbi-gen.o \
vivid-vid-cap.o vivid-vid-out.o vivid-kthread-cap.o vivid-kthread-out.o \
vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
vivid-rds-gen.o vivid-sdr-cap.o vivid-vbi-cap.o vivid-vbi-out.o \
- vivid-osd.o vivid-meta-cap.o vivid-meta-out.o \
+ vivid-meta-cap.o vivid-meta-out.o \
vivid-kthread-touch.o vivid-touch-cap.o
ifeq ($(CONFIG_VIDEO_VIVID_CEC),y)
vivid-objs += vivid-cec.o
endif
+ifeq ($(CONFIG_VIDEO_VIVID_OSD),y)
+ vivid-objs += vivid-osd.o
+endif
obj-$(CONFIG_VIDEO_VIVID) += vivid.o
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index 7477ac8cb955..8d56168c72aa 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -125,7 +125,9 @@ MODULE_PARM_DESC(node_types, " node types, default is 0xe1d3d. Bitmask with the
"\t\t bit 8: Video Output node\n"
"\t\t bit 10-11: VBI Output node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
"\t\t bit 12: Radio Transmitter node\n"
+#ifdef CONFIG_VIDEO_VIVID_OSD
"\t\t bit 16: Framebuffer for testing output overlays\n"
+#endif
"\t\t bit 17: Metadata Capture node\n"
"\t\t bit 18: Metadata Output node\n"
"\t\t bit 19: Touch Capture node\n");
@@ -1071,9 +1073,11 @@ static int vivid_detect_feature_set(struct vivid_dev *dev, int inst,
/* do we have a modulator? */
*has_modulator = dev->has_radio_tx;
+#ifdef CONFIG_VIDEO_VIVID_OSD
if (dev->has_vid_cap)
/* do we have a framebuffer for overlay testing? */
dev->has_fb = node_type & 0x10000;
+#endif
/* can we do crop/compose/scaling while capturing? */
if (no_error_inj && *ccs_cap == -1)
@@ -1410,8 +1414,6 @@ static int vivid_create_queues(struct vivid_dev *dev)
ret = vivid_fb_init(dev);
if (ret)
return ret;
- v4l2_info(&dev->v4l2_dev, "Framebuffer device registered as fb%d\n",
- dev->fb_info.node);
}
return 0;
}
@@ -2197,12 +2199,8 @@ static void vivid_remove(struct platform_device *pdev)
video_device_node_name(&dev->radio_tx_dev));
video_unregister_device(&dev->radio_tx_dev);
}
- if (dev->has_fb) {
- v4l2_info(&dev->v4l2_dev, "unregistering fb%d\n",
- dev->fb_info.node);
- unregister_framebuffer(&dev->fb_info);
- vivid_fb_release_buffers(dev);
- }
+ if (dev->has_fb)
+ vivid_fb_deinit(dev);
if (dev->has_meta_cap) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
video_device_node_name(&dev->meta_cap_dev));
diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h
index d2d52763b119..571a6c222969 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.h
+++ b/drivers/media/test-drivers/vivid/vivid-core.h
@@ -403,9 +403,11 @@ struct vivid_dev {
int display_byte_stride;
int bits_per_pixel;
int bytes_per_pixel;
+#ifdef CONFIG_VIDEO_VIVID_OSD
struct fb_info fb_info;
struct fb_var_screeninfo fb_defined;
struct fb_fix_screeninfo fb_fix;
+#endif
/* Error injection */
bool disconnect_error;
diff --git a/drivers/media/test-drivers/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c
index 2b5c8fbcd0a2..e340df0b6261 100644
--- a/drivers/media/test-drivers/vivid/vivid-ctrls.c
+++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c
@@ -37,6 +37,7 @@
#define VIVID_CID_U8_PIXEL_ARRAY (VIVID_CID_CUSTOM_BASE + 14)
#define VIVID_CID_S32_ARRAY (VIVID_CID_CUSTOM_BASE + 15)
#define VIVID_CID_S64_ARRAY (VIVID_CID_CUSTOM_BASE + 16)
+#define VIVID_CID_RECT (VIVID_CID_CUSTOM_BASE + 17)
#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
@@ -360,6 +361,38 @@ static const struct v4l2_ctrl_config vivid_ctrl_ro_int32 = {
.step = 1,
};
+static const struct v4l2_rect rect_def = {
+ .top = 100,
+ .left = 200,
+ .width = 300,
+ .height = 400,
+};
+
+static const struct v4l2_rect rect_min = {
+ .top = 0,
+ .left = 0,
+ .width = 1,
+ .height = 1,
+};
+
+static const struct v4l2_rect rect_max = {
+ .top = 0,
+ .left = 0,
+ .width = 1000,
+ .height = 2000,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_rect = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_RECT,
+ .name = "Rect",
+ .type = V4L2_CTRL_TYPE_RECT,
+ .flags = V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX,
+ .p_def.p_const = &rect_def,
+ .p_min.p_const = &rect_min,
+ .p_max.p_const = &rect_max,
+};
+
/* Framebuffer Controls */
static int vivid_fb_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -369,7 +402,7 @@ static int vivid_fb_s_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case VIVID_CID_CLEAR_FB:
- vivid_clear_fb(dev);
+ vivid_fb_clear(dev);
break;
}
return 0;
@@ -1685,6 +1718,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
dev->int_menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int_menu, NULL);
dev->ro_int32 = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_ro_int32, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_area, NULL);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_rect, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_array, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_dyn_array, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u16_matrix, NULL);
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
index 669bd96da4c7..273e8ed8c2a9 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
@@ -789,9 +789,14 @@ static int vivid_thread_vid_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
- !kthread_should_stop())
- schedule();
+ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
+ continue;
+
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+ wait_event_interruptible_timeout(wait, kthread_should_stop(),
+ cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "Video Capture Thread End\n");
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-out.c b/drivers/media/test-drivers/vivid/vivid-kthread-out.c
index fac6208b51da..015a7b166a1e 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-out.c
@@ -235,9 +235,14 @@ static int vivid_thread_vid_out(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
- !kthread_should_stop())
- schedule();
+ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
+ continue;
+
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+ wait_event_interruptible_timeout(wait, kthread_should_stop(),
+ cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "Video Output Thread End\n");
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
index fa711ee36a3f..c862689786b6 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
@@ -135,9 +135,14 @@ static int vivid_thread_touch_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
- !kthread_should_stop())
- schedule();
+ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
+ continue;
+
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+ wait_event_interruptible_timeout(wait, kthread_should_stop(),
+ cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "Touch Capture Thread End\n");
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-osd.c b/drivers/media/test-drivers/vivid/vivid-osd.c
index 5c931b94a7b5..91ad9b314f2e 100644
--- a/drivers/media/test-drivers/vivid/vivid-osd.c
+++ b/drivers/media/test-drivers/vivid/vivid-osd.c
@@ -45,13 +45,18 @@ static const u16 rgb565[16] = {
0xffff, 0xffe0, 0x07ff, 0x07e0, 0xf81f, 0xf800, 0x001f, 0x0000
};
-void vivid_clear_fb(struct vivid_dev *dev)
+unsigned int vivid_fb_green_bits(struct vivid_dev *dev)
+{
+ return dev->fb_defined.green.length;
+}
+
+void vivid_fb_clear(struct vivid_dev *dev)
{
void *p = dev->video_vbase;
const u16 *rgb = rgb555;
unsigned x, y;
- if (dev->fb_defined.green.length == 6)
+ if (vivid_fb_green_bits(dev) == 6)
rgb = rgb565;
for (y = 0; y < dev->display_height; y++) {
@@ -333,7 +338,7 @@ static int vivid_fb_init_vidmode(struct vivid_dev *dev)
}
/* Release any memory we've grabbed */
-void vivid_fb_release_buffers(struct vivid_dev *dev)
+static void vivid_fb_release_buffers(struct vivid_dev *dev)
{
if (dev->video_vbase == NULL)
return;
@@ -370,7 +375,7 @@ int vivid_fb_init(struct vivid_dev *dev)
return ret;
}
- vivid_clear_fb(dev);
+ vivid_fb_clear(dev);
/* Register the framebuffer */
if (register_framebuffer(&dev->fb_info) < 0) {
@@ -380,6 +385,17 @@ int vivid_fb_init(struct vivid_dev *dev)
/* Set the card to the requested mode */
vivid_fb_set_par(&dev->fb_info);
+
+ v4l2_info(&dev->v4l2_dev, "Framebuffer device registered as fb%d\n",
+ dev->fb_info.node);
+
return 0;
}
+
+void vivid_fb_deinit(struct vivid_dev *dev)
+{
+ v4l2_info(&dev->v4l2_dev, "unregistering fb%d\n", dev->fb_info.node);
+ unregister_framebuffer(&dev->fb_info);
+ vivid_fb_release_buffers(dev);
+}
diff --git a/drivers/media/test-drivers/vivid/vivid-osd.h b/drivers/media/test-drivers/vivid/vivid-osd.h
index f9ac1af25dd3..c52280ebcd03 100644
--- a/drivers/media/test-drivers/vivid/vivid-osd.h
+++ b/drivers/media/test-drivers/vivid/vivid-osd.h
@@ -8,8 +8,23 @@
#ifndef _VIVID_OSD_H_
#define _VIVID_OSD_H_
+#ifdef CONFIG_VIDEO_VIVID_OSD
int vivid_fb_init(struct vivid_dev *dev);
-void vivid_fb_release_buffers(struct vivid_dev *dev);
-void vivid_clear_fb(struct vivid_dev *dev);
+void vivid_fb_deinit(struct vivid_dev *dev);
+void vivid_fb_clear(struct vivid_dev *dev);
+unsigned int vivid_fb_green_bits(struct vivid_dev *dev);
+#else
+static inline int vivid_fb_init(struct vivid_dev *dev)
+{
+ return -ENODEV;
+}
+
+static inline void vivid_fb_deinit(struct vivid_dev *dev) {}
+static inline void vivid_fb_clear(struct vivid_dev *dev) {}
+static inline unsigned int vivid_fb_green_bits(struct vivid_dev *dev)
+{
+ return 5;
+}
+#endif
#endif
diff --git a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
index 74a91d28c8be..c633fc2ed664 100644
--- a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
@@ -206,9 +206,14 @@ static int vivid_thread_sdr_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
- !kthread_should_stop())
- schedule();
+ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
+ continue;
+
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+ wait_event_interruptible_timeout(wait, kthread_should_stop(),
+ cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "SDR Capture Thread End\n");
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
index 5ec84db934d6..c3398bce6c15 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
@@ -16,6 +16,7 @@
#include <media/v4l2-rect.h>
#include "vivid-core.h"
+#include "vivid-osd.h"
#include "vivid-vid-common.h"
#include "vivid-kthread-out.h"
#include "vivid-vid-out.h"
@@ -907,7 +908,7 @@ int vivid_vid_out_g_fbuf(struct file *file, void *fh,
a->base = (void *)dev->video_pbase;
a->fmt.width = dev->display_width;
a->fmt.height = dev->display_height;
- if (dev->fb_defined.green.length == 5)
+ if (vivid_fb_green_bits(dev) == 5)
a->fmt.pixelformat = V4L2_PIX_FMT_ARGB555;
else
a->fmt.pixelformat = V4L2_PIX_FMT_RGB565;
diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
index 8fb186b25d6a..b52cd8bd07dd 100644
--- a/drivers/media/tuners/tuner-simple.c
+++ b/drivers/media/tuners/tuner-simple.c
@@ -112,7 +112,7 @@ struct tuner_simple_priv {
struct list_head hybrid_tuner_instance_list;
unsigned int type;
- struct tunertype *tun;
+ const struct tunertype *tun;
u32 frequency;
u32 bandwidth;
@@ -232,11 +232,11 @@ static inline char *tuner_param_name(enum param_type type)
return name;
}
-static struct tuner_params *simple_tuner_params(struct dvb_frontend *fe,
- enum param_type desired_type)
+static const struct tuner_params *simple_tuner_params(struct dvb_frontend *fe,
+ enum param_type desired_type)
{
struct tuner_simple_priv *priv = fe->tuner_priv;
- struct tunertype *tun = priv->tun;
+ const struct tunertype *tun = priv->tun;
int i;
for (i = 0; i < tun->count; i++)
@@ -257,7 +257,7 @@ static struct tuner_params *simple_tuner_params(struct dvb_frontend *fe,
}
static int simple_config_lookup(struct dvb_frontend *fe,
- struct tuner_params *t_params,
+ const struct tuner_params *t_params,
unsigned *frequency, u8 *config, u8 *cb)
{
struct tuner_simple_priv *priv = fe->tuner_priv;
@@ -549,7 +549,7 @@ static int simple_set_tv_freq(struct dvb_frontend *fe,
u8 buffer[4];
int rc, IFPCoff, i;
enum param_type desired_type;
- struct tuner_params *t_params;
+ const struct tuner_params *t_params;
/* IFPCoff = Video Intermediate Frequency - Vif:
940 =16*58.75 NTSC/J (Japan)
@@ -664,12 +664,12 @@ static int simple_set_tv_freq(struct dvb_frontend *fe,
static int simple_set_radio_freq(struct dvb_frontend *fe,
struct analog_parameters *params)
{
- struct tunertype *tun;
+ const struct tunertype *tun;
struct tuner_simple_priv *priv = fe->tuner_priv;
u8 buffer[4];
u16 div;
int rc, j;
- struct tuner_params *t_params;
+ const struct tuner_params *t_params;
unsigned int freq = params->frequency;
bool mono = params->audmode == V4L2_TUNER_MODE_MONO;
@@ -848,8 +848,8 @@ static u32 simple_dvb_configure(struct dvb_frontend *fe, u8 *buf,
{
/* This function returns the tuned frequency on success, 0 on error */
struct tuner_simple_priv *priv = fe->tuner_priv;
- struct tunertype *tun = priv->tun;
- struct tuner_params *t_params;
+ const struct tunertype *tun = priv->tun;
+ const struct tuner_params *t_params;
u8 config, cb;
u32 div;
int ret;
diff --git a/drivers/media/tuners/tuner-types.c b/drivers/media/tuners/tuner-types.c
index c26f1296e18f..0716cc028212 100644
--- a/drivers/media/tuners/tuner-types.c
+++ b/drivers/media/tuners/tuner-types.c
@@ -61,13 +61,13 @@ static u8 tua603x_agc112[] = { 2, 0x80|0x40|0x18|0x04|0x01, 0x80|0x20 };
/* 0-9 */
/* ------------ TUNER_TEMIC_PAL - TEMIC PAL ------------ */
-static struct tuner_range tuner_temic_pal_ranges[] = {
+static const struct tuner_range tuner_temic_pal_ranges[] = {
{ 16 * 140.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 463.25 /*MHz*/, 0x8e, 0x04, },
{ 16 * 999.99 , 0x8e, 0x01, },
};
-static struct tuner_params tuner_temic_pal_params[] = {
+static const struct tuner_params tuner_temic_pal_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_pal_ranges,
@@ -77,13 +77,13 @@ static struct tuner_params tuner_temic_pal_params[] = {
/* ------------ TUNER_PHILIPS_PAL_I - Philips PAL_I ------------ */
-static struct tuner_range tuner_philips_pal_i_ranges[] = {
+static const struct tuner_range tuner_philips_pal_i_ranges[] = {
{ 16 * 140.25 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 463.25 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_philips_pal_i_params[] = {
+static const struct tuner_params tuner_philips_pal_i_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_pal_i_ranges,
@@ -93,13 +93,13 @@ static struct tuner_params tuner_philips_pal_i_params[] = {
/* ------------ TUNER_PHILIPS_NTSC - Philips NTSC ------------ */
-static struct tuner_range tuner_philips_ntsc_ranges[] = {
+static const struct tuner_range tuner_philips_ntsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 451.25 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_philips_ntsc_params[] = {
+static const struct tuner_params tuner_philips_ntsc_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_philips_ntsc_ranges,
@@ -110,13 +110,13 @@ static struct tuner_params tuner_philips_ntsc_params[] = {
/* ------------ TUNER_PHILIPS_SECAM - Philips SECAM ------------ */
-static struct tuner_range tuner_philips_secam_ranges[] = {
+static const struct tuner_range tuner_philips_secam_ranges[] = {
{ 16 * 168.25 /*MHz*/, 0x8e, 0xa7, },
{ 16 * 447.25 /*MHz*/, 0x8e, 0x97, },
{ 16 * 999.99 , 0x8e, 0x37, },
};
-static struct tuner_params tuner_philips_secam_params[] = {
+static const struct tuner_params tuner_philips_secam_params[] = {
{
.type = TUNER_PARAM_TYPE_SECAM,
.ranges = tuner_philips_secam_ranges,
@@ -127,13 +127,13 @@ static struct tuner_params tuner_philips_secam_params[] = {
/* ------------ TUNER_PHILIPS_PAL - Philips PAL ------------ */
-static struct tuner_range tuner_philips_pal_ranges[] = {
+static const struct tuner_range tuner_philips_pal_ranges[] = {
{ 16 * 168.25 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 447.25 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_philips_pal_params[] = {
+static const struct tuner_params tuner_philips_pal_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_pal_ranges,
@@ -144,13 +144,13 @@ static struct tuner_params tuner_philips_pal_params[] = {
/* ------------ TUNER_TEMIC_NTSC - TEMIC NTSC ------------ */
-static struct tuner_range tuner_temic_ntsc_ranges[] = {
+static const struct tuner_range tuner_temic_ntsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 463.25 /*MHz*/, 0x8e, 0x04, },
{ 16 * 999.99 , 0x8e, 0x01, },
};
-static struct tuner_params tuner_temic_ntsc_params[] = {
+static const struct tuner_params tuner_temic_ntsc_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_temic_ntsc_ranges,
@@ -160,13 +160,13 @@ static struct tuner_params tuner_temic_ntsc_params[] = {
/* ------------ TUNER_TEMIC_PAL_I - TEMIC PAL_I ------------ */
-static struct tuner_range tuner_temic_pal_i_ranges[] = {
+static const struct tuner_range tuner_temic_pal_i_ranges[] = {
{ 16 * 170.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 450.00 /*MHz*/, 0x8e, 0x04, },
{ 16 * 999.99 , 0x8e, 0x01, },
};
-static struct tuner_params tuner_temic_pal_i_params[] = {
+static const struct tuner_params tuner_temic_pal_i_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_pal_i_ranges,
@@ -176,13 +176,13 @@ static struct tuner_params tuner_temic_pal_i_params[] = {
/* ------------ TUNER_TEMIC_4036FY5_NTSC - TEMIC NTSC ------------ */
-static struct tuner_range tuner_temic_4036fy5_ntsc_ranges[] = {
+static const struct tuner_range tuner_temic_4036fy5_ntsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 463.25 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_temic_4036fy5_ntsc_params[] = {
+static const struct tuner_params tuner_temic_4036fy5_ntsc_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_temic_4036fy5_ntsc_ranges,
@@ -192,13 +192,13 @@ static struct tuner_params tuner_temic_4036fy5_ntsc_params[] = {
/* ------------ TUNER_ALPS_TSBH1_NTSC - TEMIC NTSC ------------ */
-static struct tuner_range tuner_alps_tsb_1_ranges[] = {
+static const struct tuner_range tuner_alps_tsb_1_ranges[] = {
{ 16 * 137.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 385.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_alps_tsbh1_ntsc_params[] = {
+static const struct tuner_params tuner_alps_tsbh1_ntsc_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_alps_tsb_1_ranges,
@@ -209,7 +209,7 @@ static struct tuner_params tuner_alps_tsbh1_ntsc_params[] = {
/* 10-19 */
/* ------------ TUNER_ALPS_TSBE1_PAL - TEMIC PAL ------------ */
-static struct tuner_params tuner_alps_tsb_1_params[] = {
+static const struct tuner_params tuner_alps_tsb_1_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_alps_tsb_1_ranges,
@@ -219,13 +219,13 @@ static struct tuner_params tuner_alps_tsb_1_params[] = {
/* ------------ TUNER_ALPS_TSBB5_PAL_I - Alps PAL_I ------------ */
-static struct tuner_range tuner_alps_tsb_5_pal_ranges[] = {
+static const struct tuner_range tuner_alps_tsb_5_pal_ranges[] = {
{ 16 * 133.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 351.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_alps_tsbb5_params[] = {
+static const struct tuner_params tuner_alps_tsbb5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_alps_tsb_5_pal_ranges,
@@ -235,7 +235,7 @@ static struct tuner_params tuner_alps_tsbb5_params[] = {
/* ------------ TUNER_ALPS_TSBE5_PAL - Alps PAL ------------ */
-static struct tuner_params tuner_alps_tsbe5_params[] = {
+static const struct tuner_params tuner_alps_tsbe5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_alps_tsb_5_pal_ranges,
@@ -245,7 +245,7 @@ static struct tuner_params tuner_alps_tsbe5_params[] = {
/* ------------ TUNER_ALPS_TSBC5_PAL - Alps PAL ------------ */
-static struct tuner_params tuner_alps_tsbc5_params[] = {
+static const struct tuner_params tuner_alps_tsbc5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_alps_tsb_5_pal_ranges,
@@ -255,13 +255,13 @@ static struct tuner_params tuner_alps_tsbc5_params[] = {
/* ------------ TUNER_TEMIC_4006FH5_PAL - TEMIC PAL ------------ */
-static struct tuner_range tuner_lg_pal_ranges[] = {
+static const struct tuner_range tuner_lg_pal_ranges[] = {
{ 16 * 170.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 450.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_temic_4006fh5_params[] = {
+static const struct tuner_params tuner_temic_4006fh5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -271,13 +271,13 @@ static struct tuner_params tuner_temic_4006fh5_params[] = {
/* ------------ TUNER_ALPS_TSHC6_NTSC - Alps NTSC ------------ */
-static struct tuner_range tuner_alps_tshc6_ntsc_ranges[] = {
+static const struct tuner_range tuner_alps_tshc6_ntsc_ranges[] = {
{ 16 * 137.25 /*MHz*/, 0x8e, 0x14, },
{ 16 * 385.25 /*MHz*/, 0x8e, 0x12, },
{ 16 * 999.99 , 0x8e, 0x11, },
};
-static struct tuner_params tuner_alps_tshc6_params[] = {
+static const struct tuner_params tuner_alps_tshc6_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_alps_tshc6_ntsc_ranges,
@@ -287,13 +287,13 @@ static struct tuner_params tuner_alps_tshc6_params[] = {
/* ------------ TUNER_TEMIC_PAL_DK - TEMIC PAL ------------ */
-static struct tuner_range tuner_temic_pal_dk_ranges[] = {
+static const struct tuner_range tuner_temic_pal_dk_ranges[] = {
{ 16 * 168.25 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 456.25 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_temic_pal_dk_params[] = {
+static const struct tuner_params tuner_temic_pal_dk_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_pal_dk_ranges,
@@ -303,13 +303,13 @@ static struct tuner_params tuner_temic_pal_dk_params[] = {
/* ------------ TUNER_PHILIPS_NTSC_M - Philips NTSC ------------ */
-static struct tuner_range tuner_philips_ntsc_m_ranges[] = {
+static const struct tuner_range tuner_philips_ntsc_m_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 454.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_philips_ntsc_m_params[] = {
+static const struct tuner_params tuner_philips_ntsc_m_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_philips_ntsc_m_ranges,
@@ -319,13 +319,13 @@ static struct tuner_params tuner_philips_ntsc_m_params[] = {
/* ------------ TUNER_TEMIC_4066FY5_PAL_I - TEMIC PAL_I ------------ */
-static struct tuner_range tuner_temic_40x6f_5_pal_ranges[] = {
+static const struct tuner_range tuner_temic_40x6f_5_pal_ranges[] = {
{ 16 * 169.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 454.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_temic_4066fy5_pal_i_params[] = {
+static const struct tuner_params tuner_temic_4066fy5_pal_i_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_40x6f_5_pal_ranges,
@@ -335,7 +335,7 @@ static struct tuner_params tuner_temic_4066fy5_pal_i_params[] = {
/* ------------ TUNER_TEMIC_4006FN5_MULTI_PAL - TEMIC PAL ------------ */
-static struct tuner_params tuner_temic_4006fn5_multi_params[] = {
+static const struct tuner_params tuner_temic_4006fn5_multi_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_40x6f_5_pal_ranges,
@@ -346,13 +346,13 @@ static struct tuner_params tuner_temic_4006fn5_multi_params[] = {
/* 20-29 */
/* ------------ TUNER_TEMIC_4009FR5_PAL - TEMIC PAL ------------ */
-static struct tuner_range tuner_temic_4009f_5_pal_ranges[] = {
+static const struct tuner_range tuner_temic_4009f_5_pal_ranges[] = {
{ 16 * 141.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 464.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_temic_4009f_5_params[] = {
+static const struct tuner_params tuner_temic_4009f_5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_4009f_5_pal_ranges,
@@ -362,13 +362,13 @@ static struct tuner_params tuner_temic_4009f_5_params[] = {
/* ------------ TUNER_TEMIC_4039FR5_NTSC - TEMIC NTSC ------------ */
-static struct tuner_range tuner_temic_4x3x_f_5_ntsc_ranges[] = {
+static const struct tuner_range tuner_temic_4x3x_f_5_ntsc_ranges[] = {
{ 16 * 158.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 453.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_temic_4039fr5_params[] = {
+static const struct tuner_params tuner_temic_4039fr5_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_temic_4x3x_f_5_ntsc_ranges,
@@ -378,7 +378,7 @@ static struct tuner_params tuner_temic_4039fr5_params[] = {
/* ------------ TUNER_TEMIC_4046FM5 - TEMIC PAL ------------ */
-static struct tuner_params tuner_temic_4046fm5_params[] = {
+static const struct tuner_params tuner_temic_4046fm5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_40x6f_5_pal_ranges,
@@ -388,7 +388,7 @@ static struct tuner_params tuner_temic_4046fm5_params[] = {
/* ------------ TUNER_PHILIPS_PAL_DK - Philips PAL ------------ */
-static struct tuner_params tuner_philips_pal_dk_params[] = {
+static const struct tuner_params tuner_philips_pal_dk_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -398,7 +398,7 @@ static struct tuner_params tuner_philips_pal_dk_params[] = {
/* ------------ TUNER_PHILIPS_FQ1216ME - Philips PAL ------------ */
-static struct tuner_params tuner_philips_fq1216me_params[] = {
+static const struct tuner_params tuner_philips_fq1216me_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -412,7 +412,7 @@ static struct tuner_params tuner_philips_fq1216me_params[] = {
/* ------------ TUNER_LG_PAL_I_FM - LGINNOTEK PAL_I ------------ */
-static struct tuner_params tuner_lg_pal_i_fm_params[] = {
+static const struct tuner_params tuner_lg_pal_i_fm_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -422,7 +422,7 @@ static struct tuner_params tuner_lg_pal_i_fm_params[] = {
/* ------------ TUNER_LG_PAL_I - LGINNOTEK PAL_I ------------ */
-static struct tuner_params tuner_lg_pal_i_params[] = {
+static const struct tuner_params tuner_lg_pal_i_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -432,13 +432,13 @@ static struct tuner_params tuner_lg_pal_i_params[] = {
/* ------------ TUNER_LG_NTSC_FM - LGINNOTEK NTSC ------------ */
-static struct tuner_range tuner_lg_ntsc_fm_ranges[] = {
+static const struct tuner_range tuner_lg_ntsc_fm_ranges[] = {
{ 16 * 210.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 497.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_lg_ntsc_fm_params[] = {
+static const struct tuner_params tuner_lg_ntsc_fm_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_lg_ntsc_fm_ranges,
@@ -448,7 +448,7 @@ static struct tuner_params tuner_lg_ntsc_fm_params[] = {
/* ------------ TUNER_LG_PAL_FM - LGINNOTEK PAL ------------ */
-static struct tuner_params tuner_lg_pal_fm_params[] = {
+static const struct tuner_params tuner_lg_pal_fm_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -458,7 +458,7 @@ static struct tuner_params tuner_lg_pal_fm_params[] = {
/* ------------ TUNER_LG_PAL - LGINNOTEK PAL ------------ */
-static struct tuner_params tuner_lg_pal_params[] = {
+static const struct tuner_params tuner_lg_pal_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -469,7 +469,7 @@ static struct tuner_params tuner_lg_pal_params[] = {
/* 30-39 */
/* ------------ TUNER_TEMIC_4009FN5_MULTI_PAL_FM - TEMIC PAL ------------ */
-static struct tuner_params tuner_temic_4009_fn5_multi_pal_fm_params[] = {
+static const struct tuner_params tuner_temic_4009_fn5_multi_pal_fm_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_4009f_5_pal_ranges,
@@ -479,13 +479,13 @@ static struct tuner_params tuner_temic_4009_fn5_multi_pal_fm_params[] = {
/* ------------ TUNER_SHARP_2U5JF5540_NTSC - SHARP NTSC ------------ */
-static struct tuner_range tuner_sharp_2u5jf5540_ntsc_ranges[] = {
+static const struct tuner_range tuner_sharp_2u5jf5540_ntsc_ranges[] = {
{ 16 * 137.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 317.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_sharp_2u5jf5540_params[] = {
+static const struct tuner_params tuner_sharp_2u5jf5540_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_sharp_2u5jf5540_ntsc_ranges,
@@ -495,13 +495,13 @@ static struct tuner_params tuner_sharp_2u5jf5540_params[] = {
/* ------------ TUNER_Samsung_PAL_TCPM9091PD27 - Samsung PAL ------------ */
-static struct tuner_range tuner_samsung_pal_tcpm9091pd27_ranges[] = {
+static const struct tuner_range tuner_samsung_pal_tcpm9091pd27_ranges[] = {
{ 16 * 169 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 464 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_samsung_pal_tcpm9091pd27_params[] = {
+static const struct tuner_params tuner_samsung_pal_tcpm9091pd27_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_samsung_pal_tcpm9091pd27_ranges,
@@ -511,7 +511,7 @@ static struct tuner_params tuner_samsung_pal_tcpm9091pd27_params[] = {
/* ------------ TUNER_TEMIC_4106FH5 - TEMIC PAL ------------ */
-static struct tuner_params tuner_temic_4106fh5_params[] = {
+static const struct tuner_params tuner_temic_4106fh5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_4009f_5_pal_ranges,
@@ -521,7 +521,7 @@ static struct tuner_params tuner_temic_4106fh5_params[] = {
/* ------------ TUNER_TEMIC_4012FY5 - TEMIC PAL ------------ */
-static struct tuner_params tuner_temic_4012fy5_params[] = {
+static const struct tuner_params tuner_temic_4012fy5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_pal_ranges,
@@ -531,7 +531,7 @@ static struct tuner_params tuner_temic_4012fy5_params[] = {
/* ------------ TUNER_TEMIC_4136FY5 - TEMIC NTSC ------------ */
-static struct tuner_params tuner_temic_4136_fy5_params[] = {
+static const struct tuner_params tuner_temic_4136_fy5_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_temic_4x3x_f_5_ntsc_ranges,
@@ -541,13 +541,13 @@ static struct tuner_params tuner_temic_4136_fy5_params[] = {
/* ------------ TUNER_LG_PAL_NEW_TAPC - LGINNOTEK PAL ------------ */
-static struct tuner_range tuner_lg_new_tapc_ranges[] = {
+static const struct tuner_range tuner_lg_new_tapc_ranges[] = {
{ 16 * 170.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 450.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_lg_pal_new_tapc_params[] = {
+static const struct tuner_params tuner_lg_pal_new_tapc_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_new_tapc_ranges,
@@ -557,13 +557,13 @@ static struct tuner_params tuner_lg_pal_new_tapc_params[] = {
/* ------------ TUNER_PHILIPS_FM1216ME_MK3 - Philips PAL ------------ */
-static struct tuner_range tuner_fm1216me_mk3_pal_ranges[] = {
+static const struct tuner_range tuner_fm1216me_mk3_pal_ranges[] = {
{ 16 * 158.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 442.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_fm1216me_mk3_params[] = {
+static const struct tuner_params tuner_fm1216me_mk3_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_fm1216me_mk3_pal_ranges,
@@ -582,13 +582,13 @@ static struct tuner_params tuner_fm1216me_mk3_params[] = {
/* ------------ TUNER_PHILIPS_FM1216MK5 - Philips PAL ------------ */
-static struct tuner_range tuner_fm1216mk5_pal_ranges[] = {
+static const struct tuner_range tuner_fm1216mk5_pal_ranges[] = {
{ 16 * 158.00 /*MHz*/, 0xce, 0x01, },
{ 16 * 441.00 /*MHz*/, 0xce, 0x02, },
{ 16 * 864.00 , 0xce, 0x04, },
};
-static struct tuner_params tuner_fm1216mk5_params[] = {
+static const struct tuner_params tuner_fm1216mk5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_fm1216mk5_pal_ranges,
@@ -607,7 +607,7 @@ static struct tuner_params tuner_fm1216mk5_params[] = {
/* ------------ TUNER_LG_NTSC_NEW_TAPC - LGINNOTEK NTSC ------------ */
-static struct tuner_params tuner_lg_ntsc_new_tapc_params[] = {
+static const struct tuner_params tuner_lg_ntsc_new_tapc_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_lg_new_tapc_ranges,
@@ -618,7 +618,7 @@ static struct tuner_params tuner_lg_ntsc_new_tapc_params[] = {
/* 40-49 */
/* ------------ TUNER_HITACHI_NTSC - HITACHI NTSC ------------ */
-static struct tuner_params tuner_hitachi_ntsc_params[] = {
+static const struct tuner_params tuner_hitachi_ntsc_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_lg_new_tapc_ranges,
@@ -628,13 +628,13 @@ static struct tuner_params tuner_hitachi_ntsc_params[] = {
/* ------------ TUNER_PHILIPS_PAL_MK - Philips PAL ------------ */
-static struct tuner_range tuner_philips_pal_mk_pal_ranges[] = {
+static const struct tuner_range tuner_philips_pal_mk_pal_ranges[] = {
{ 16 * 140.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 463.25 /*MHz*/, 0x8e, 0xc2, },
{ 16 * 999.99 , 0x8e, 0xcf, },
};
-static struct tuner_params tuner_philips_pal_mk_params[] = {
+static const struct tuner_params tuner_philips_pal_mk_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_pal_mk_pal_ranges,
@@ -644,19 +644,19 @@ static struct tuner_params tuner_philips_pal_mk_params[] = {
/* ---- TUNER_PHILIPS_FCV1236D - Philips FCV1236D (ATSC/NTSC) ---- */
-static struct tuner_range tuner_philips_fcv1236d_ntsc_ranges[] = {
+static const struct tuner_range tuner_philips_fcv1236d_ntsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0x8e, 0xa2, },
{ 16 * 451.25 /*MHz*/, 0x8e, 0x92, },
{ 16 * 999.99 , 0x8e, 0x32, },
};
-static struct tuner_range tuner_philips_fcv1236d_atsc_ranges[] = {
+static const struct tuner_range tuner_philips_fcv1236d_atsc_ranges[] = {
{ 16 * 159.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 453.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_philips_fcv1236d_params[] = {
+static const struct tuner_params tuner_philips_fcv1236d_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_philips_fcv1236d_ntsc_ranges,
@@ -672,13 +672,13 @@ static struct tuner_params tuner_philips_fcv1236d_params[] = {
/* ------------ TUNER_PHILIPS_FM1236_MK3 - Philips NTSC ------------ */
-static struct tuner_range tuner_fm1236_mk3_ntsc_ranges[] = {
+static const struct tuner_range tuner_fm1236_mk3_ntsc_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 442.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_fm1236_mk3_params[] = {
+static const struct tuner_params tuner_fm1236_mk3_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_fm1236_mk3_ntsc_ranges,
@@ -693,7 +693,7 @@ static struct tuner_params tuner_fm1236_mk3_params[] = {
/* ------------ TUNER_PHILIPS_4IN1 - Philips NTSC ------------ */
-static struct tuner_params tuner_philips_4in1_params[] = {
+static const struct tuner_params tuner_philips_4in1_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_fm1236_mk3_ntsc_ranges,
@@ -703,7 +703,7 @@ static struct tuner_params tuner_philips_4in1_params[] = {
/* ------------ TUNER_MICROTUNE_4049FM5 - Microtune PAL ------------ */
-static struct tuner_params tuner_microtune_4049_fm5_params[] = {
+static const struct tuner_params tuner_microtune_4049_fm5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_4009f_5_pal_ranges,
@@ -718,13 +718,13 @@ static struct tuner_params tuner_microtune_4049_fm5_params[] = {
/* ------------ TUNER_PANASONIC_VP27 - Panasonic NTSC ------------ */
-static struct tuner_range tuner_panasonic_vp27_ntsc_ranges[] = {
+static const struct tuner_range tuner_panasonic_vp27_ntsc_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0xce, 0x01, },
{ 16 * 454.00 /*MHz*/, 0xce, 0x02, },
{ 16 * 999.99 , 0xce, 0x08, },
};
-static struct tuner_params tuner_panasonic_vp27_params[] = {
+static const struct tuner_params tuner_panasonic_vp27_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_panasonic_vp27_ntsc_ranges,
@@ -739,13 +739,13 @@ static struct tuner_params tuner_panasonic_vp27_params[] = {
/* ------------ TUNER_TNF_8831BGFF - Philips PAL ------------ */
-static struct tuner_range tuner_tnf_8831bgff_pal_ranges[] = {
+static const struct tuner_range tuner_tnf_8831bgff_pal_ranges[] = {
{ 16 * 161.25 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 463.25 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_tnf_8831bgff_params[] = {
+static const struct tuner_params tuner_tnf_8831bgff_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_tnf_8831bgff_pal_ranges,
@@ -755,19 +755,19 @@ static struct tuner_params tuner_tnf_8831bgff_params[] = {
/* ------------ TUNER_MICROTUNE_4042FI5 - Microtune NTSC ------------ */
-static struct tuner_range tuner_microtune_4042fi5_ntsc_ranges[] = {
+static const struct tuner_range tuner_microtune_4042fi5_ntsc_ranges[] = {
{ 16 * 162.00 /*MHz*/, 0x8e, 0xa2, },
{ 16 * 457.00 /*MHz*/, 0x8e, 0x94, },
{ 16 * 999.99 , 0x8e, 0x31, },
};
-static struct tuner_range tuner_microtune_4042fi5_atsc_ranges[] = {
+static const struct tuner_range tuner_microtune_4042fi5_atsc_ranges[] = {
{ 16 * 162.00 /*MHz*/, 0x8e, 0xa1, },
{ 16 * 457.00 /*MHz*/, 0x8e, 0x91, },
{ 16 * 999.99 , 0x8e, 0x31, },
};
-static struct tuner_params tuner_microtune_4042fi5_params[] = {
+static const struct tuner_params tuner_microtune_4042fi5_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_microtune_4042fi5_ntsc_ranges,
@@ -784,13 +784,13 @@ static struct tuner_params tuner_microtune_4042fi5_params[] = {
/* 50-59 */
/* ------------ TUNER_TCL_2002N - TCL NTSC ------------ */
-static struct tuner_range tuner_tcl_2002n_ntsc_ranges[] = {
+static const struct tuner_range tuner_tcl_2002n_ntsc_ranges[] = {
{ 16 * 172.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 448.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_tcl_2002n_params[] = {
+static const struct tuner_params tuner_tcl_2002n_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_tcl_2002n_ntsc_ranges,
@@ -801,7 +801,7 @@ static struct tuner_params tuner_tcl_2002n_params[] = {
/* ------------ TUNER_PHILIPS_FM1256_IH3 - Philips PAL ------------ */
-static struct tuner_params tuner_philips_fm1256_ih3_params[] = {
+static const struct tuner_params tuner_philips_fm1256_ih3_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_fm1236_mk3_ntsc_ranges,
@@ -813,13 +813,13 @@ static struct tuner_params tuner_philips_fm1256_ih3_params[] = {
/* ------------ TUNER_THOMSON_DTT7610 - THOMSON ATSC ------------ */
/* single range used for both ntsc and atsc */
-static struct tuner_range tuner_thomson_dtt7610_ntsc_ranges[] = {
+static const struct tuner_range tuner_thomson_dtt7610_ntsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0x8e, 0x39, },
{ 16 * 454.00 /*MHz*/, 0x8e, 0x3a, },
{ 16 * 999.99 , 0x8e, 0x3c, },
};
-static struct tuner_params tuner_thomson_dtt7610_params[] = {
+static const struct tuner_params tuner_thomson_dtt7610_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_thomson_dtt7610_ntsc_ranges,
@@ -835,13 +835,13 @@ static struct tuner_params tuner_thomson_dtt7610_params[] = {
/* ------------ TUNER_PHILIPS_FQ1286 - Philips NTSC ------------ */
-static struct tuner_range tuner_philips_fq1286_ntsc_ranges[] = {
+static const struct tuner_range tuner_philips_fq1286_ntsc_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0x8e, 0x41, },
{ 16 * 454.00 /*MHz*/, 0x8e, 0x42, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_philips_fq1286_params[] = {
+static const struct tuner_params tuner_philips_fq1286_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_philips_fq1286_ntsc_ranges,
@@ -851,13 +851,13 @@ static struct tuner_params tuner_philips_fq1286_params[] = {
/* ------------ TUNER_TCL_2002MB - TCL PAL ------------ */
-static struct tuner_range tuner_tcl_2002mb_pal_ranges[] = {
+static const struct tuner_range tuner_tcl_2002mb_pal_ranges[] = {
{ 16 * 170.00 /*MHz*/, 0xce, 0x01, },
{ 16 * 450.00 /*MHz*/, 0xce, 0x02, },
{ 16 * 999.99 , 0xce, 0x08, },
};
-static struct tuner_params tuner_tcl_2002mb_params[] = {
+static const struct tuner_params tuner_tcl_2002mb_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_tcl_2002mb_pal_ranges,
@@ -867,13 +867,13 @@ static struct tuner_params tuner_tcl_2002mb_params[] = {
/* ------------ TUNER_PHILIPS_FQ1216AME_MK4 - Philips PAL ------------ */
-static struct tuner_range tuner_philips_fq12_6a___mk4_pal_ranges[] = {
+static const struct tuner_range tuner_philips_fq12_6a___mk4_pal_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0xce, 0x01, },
{ 16 * 442.00 /*MHz*/, 0xce, 0x02, },
{ 16 * 999.99 , 0xce, 0x04, },
};
-static struct tuner_params tuner_philips_fq1216ame_mk4_params[] = {
+static const struct tuner_params tuner_philips_fq1216ame_mk4_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_fq12_6a___mk4_pal_ranges,
@@ -890,7 +890,7 @@ static struct tuner_params tuner_philips_fq1216ame_mk4_params[] = {
/* ------------ TUNER_PHILIPS_FQ1236A_MK4 - Philips NTSC ------------ */
-static struct tuner_params tuner_philips_fq1236a_mk4_params[] = {
+static const struct tuner_params tuner_philips_fq1236a_mk4_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_fm1236_mk3_ntsc_ranges,
@@ -900,7 +900,7 @@ static struct tuner_params tuner_philips_fq1236a_mk4_params[] = {
/* ------------ TUNER_YMEC_TVF_8531MF - Philips NTSC ------------ */
-static struct tuner_params tuner_ymec_tvf_8531mf_params[] = {
+static const struct tuner_params tuner_ymec_tvf_8531mf_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_philips_ntsc_m_ranges,
@@ -910,13 +910,13 @@ static struct tuner_params tuner_ymec_tvf_8531mf_params[] = {
/* ------------ TUNER_YMEC_TVF_5533MF - Philips NTSC ------------ */
-static struct tuner_range tuner_ymec_tvf_5533mf_ntsc_ranges[] = {
+static const struct tuner_range tuner_ymec_tvf_5533mf_ntsc_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 454.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_ymec_tvf_5533mf_params[] = {
+static const struct tuner_params tuner_ymec_tvf_5533mf_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_ymec_tvf_5533mf_ntsc_ranges,
@@ -928,19 +928,19 @@ static struct tuner_params tuner_ymec_tvf_5533mf_params[] = {
/* ------------ TUNER_THOMSON_DTT761X - THOMSON ATSC ------------ */
/* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */
-static struct tuner_range tuner_thomson_dtt761x_ntsc_ranges[] = {
+static const struct tuner_range tuner_thomson_dtt761x_ntsc_ranges[] = {
{ 16 * 145.25 /*MHz*/, 0x8e, 0x39, },
{ 16 * 415.25 /*MHz*/, 0x8e, 0x3a, },
{ 16 * 999.99 , 0x8e, 0x3c, },
};
-static struct tuner_range tuner_thomson_dtt761x_atsc_ranges[] = {
+static const struct tuner_range tuner_thomson_dtt761x_atsc_ranges[] = {
{ 16 * 147.00 /*MHz*/, 0x8e, 0x39, },
{ 16 * 417.00 /*MHz*/, 0x8e, 0x3a, },
{ 16 * 999.99 , 0x8e, 0x3c, },
};
-static struct tuner_params tuner_thomson_dtt761x_params[] = {
+static const struct tuner_params tuner_thomson_dtt761x_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_thomson_dtt761x_ntsc_ranges,
@@ -959,13 +959,13 @@ static struct tuner_params tuner_thomson_dtt761x_params[] = {
/* ------------ TUNER_TENA_9533_DI - Philips PAL ------------ */
-static struct tuner_range tuner_tena_9533_di_pal_ranges[] = {
+static const struct tuner_range tuner_tena_9533_di_pal_ranges[] = {
{ 16 * 160.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 464.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_tena_9533_di_params[] = {
+static const struct tuner_params tuner_tena_9533_di_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_tena_9533_di_pal_ranges,
@@ -975,13 +975,13 @@ static struct tuner_params tuner_tena_9533_di_params[] = {
/* ------------ TUNER_TENA_TNF_5337 - Tena tnf5337MFD STD M/N ------------ */
-static struct tuner_range tuner_tena_tnf_5337_ntsc_ranges[] = {
+static const struct tuner_range tuner_tena_tnf_5337_ntsc_ranges[] = {
{ 16 * 166.25 /*MHz*/, 0x86, 0x01, },
{ 16 * 466.25 /*MHz*/, 0x86, 0x02, },
{ 16 * 999.99 , 0x86, 0x08, },
};
-static struct tuner_params tuner_tena_tnf_5337_params[] = {
+static const struct tuner_params tuner_tena_tnf_5337_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_tena_tnf_5337_ntsc_ranges,
@@ -991,13 +991,13 @@ static struct tuner_params tuner_tena_tnf_5337_params[] = {
/* ------------ TUNER_PHILIPS_FMD1216ME(X)_MK3 - Philips PAL ------------ */
-static struct tuner_range tuner_philips_fmd1216me_mk3_pal_ranges[] = {
+static const struct tuner_range tuner_philips_fmd1216me_mk3_pal_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0x86, 0x51, },
{ 16 * 442.00 /*MHz*/, 0x86, 0x52, },
{ 16 * 999.99 , 0x86, 0x54, },
};
-static struct tuner_range tuner_philips_fmd1216me_mk3_dvb_ranges[] = {
+static const struct tuner_range tuner_philips_fmd1216me_mk3_dvb_ranges[] = {
{ 16 * 143.87 /*MHz*/, 0xbc, 0x41 },
{ 16 * 158.87 /*MHz*/, 0xf4, 0x41 },
{ 16 * 329.87 /*MHz*/, 0xbc, 0x42 },
@@ -1007,7 +1007,7 @@ static struct tuner_range tuner_philips_fmd1216me_mk3_dvb_ranges[] = {
{ 16 * 999.99 , 0xfc, 0x44 },
};
-static struct tuner_params tuner_philips_fmd1216me_mk3_params[] = {
+static const struct tuner_params tuner_philips_fmd1216me_mk3_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_fmd1216me_mk3_pal_ranges,
@@ -1027,7 +1027,7 @@ static struct tuner_params tuner_philips_fmd1216me_mk3_params[] = {
},
};
-static struct tuner_params tuner_philips_fmd1216mex_mk3_params[] = {
+static const struct tuner_params tuner_philips_fmd1216mex_mk3_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_fmd1216me_mk3_pal_ranges,
@@ -1051,19 +1051,19 @@ static struct tuner_params tuner_philips_fmd1216mex_mk3_params[] = {
/* ------ TUNER_LG_TDVS_H06XF - LG INNOTEK / INFINEON ATSC ----- */
-static struct tuner_range tuner_tua6034_ntsc_ranges[] = {
+static const struct tuner_range tuner_tua6034_ntsc_ranges[] = {
{ 16 * 165.00 /*MHz*/, 0x8e, 0x01 },
{ 16 * 450.00 /*MHz*/, 0x8e, 0x02 },
{ 16 * 999.99 , 0x8e, 0x04 },
};
-static struct tuner_range tuner_tua6034_atsc_ranges[] = {
+static const struct tuner_range tuner_tua6034_atsc_ranges[] = {
{ 16 * 165.00 /*MHz*/, 0xce, 0x01 },
{ 16 * 450.00 /*MHz*/, 0xce, 0x02 },
{ 16 * 999.99 , 0xce, 0x04 },
};
-static struct tuner_params tuner_lg_tdvs_h06xf_params[] = {
+static const struct tuner_params tuner_lg_tdvs_h06xf_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_tua6034_ntsc_ranges,
@@ -1079,13 +1079,13 @@ static struct tuner_params tuner_lg_tdvs_h06xf_params[] = {
/* ------------ TUNER_YMEC_TVF66T5_B_DFF - Philips PAL ------------ */
-static struct tuner_range tuner_ymec_tvf66t5_b_dff_pal_ranges[] = {
+static const struct tuner_range tuner_ymec_tvf66t5_b_dff_pal_ranges[] = {
{ 16 * 160.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 464.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_ymec_tvf66t5_b_dff_params[] = {
+static const struct tuner_params tuner_ymec_tvf66t5_b_dff_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_ymec_tvf66t5_b_dff_pal_ranges,
@@ -1095,19 +1095,19 @@ static struct tuner_params tuner_ymec_tvf66t5_b_dff_params[] = {
/* ------------ TUNER_LG_NTSC_TALN_MINI - LGINNOTEK NTSC ------------ */
-static struct tuner_range tuner_lg_taln_ntsc_ranges[] = {
+static const struct tuner_range tuner_lg_taln_ntsc_ranges[] = {
{ 16 * 137.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 373.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_range tuner_lg_taln_pal_secam_ranges[] = {
+static const struct tuner_range tuner_lg_taln_pal_secam_ranges[] = {
{ 16 * 150.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 425.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_lg_taln_params[] = {
+static const struct tuner_params tuner_lg_taln_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_lg_taln_ntsc_ranges,
@@ -1121,13 +1121,13 @@ static struct tuner_params tuner_lg_taln_params[] = {
/* ------------ TUNER_PHILIPS_TD1316 - Philips PAL ------------ */
-static struct tuner_range tuner_philips_td1316_pal_ranges[] = {
+static const struct tuner_range tuner_philips_td1316_pal_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0xc8, 0xa1, },
{ 16 * 442.00 /*MHz*/, 0xc8, 0xa2, },
{ 16 * 999.99 , 0xc8, 0xa4, },
};
-static struct tuner_range tuner_philips_td1316_dvb_ranges[] = {
+static const struct tuner_range tuner_philips_td1316_dvb_ranges[] = {
{ 16 * 93.834 /*MHz*/, 0xca, 0x60, },
{ 16 * 123.834 /*MHz*/, 0xca, 0xa0, },
{ 16 * 163.834 /*MHz*/, 0xca, 0xc0, },
@@ -1139,7 +1139,7 @@ static struct tuner_range tuner_philips_td1316_dvb_ranges[] = {
{ 16 * 999.999 , 0xca, 0xe0, },
};
-static struct tuner_params tuner_philips_td1316_params[] = {
+static const struct tuner_params tuner_philips_td1316_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_td1316_pal_ranges,
@@ -1155,19 +1155,19 @@ static struct tuner_params tuner_philips_td1316_params[] = {
/* ------------ TUNER_PHILIPS_TUV1236D - Philips ATSC ------------ */
-static struct tuner_range tuner_tuv1236d_ntsc_ranges[] = {
+static const struct tuner_range tuner_tuv1236d_ntsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0xce, 0x01, },
{ 16 * 454.00 /*MHz*/, 0xce, 0x02, },
{ 16 * 999.99 , 0xce, 0x04, },
};
-static struct tuner_range tuner_tuv1236d_atsc_ranges[] = {
+static const struct tuner_range tuner_tuv1236d_atsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0xc6, 0x41, },
{ 16 * 454.00 /*MHz*/, 0xc6, 0x42, },
{ 16 * 999.99 , 0xc6, 0x44, },
};
-static struct tuner_params tuner_tuv1236d_params[] = {
+static const struct tuner_params tuner_tuv1236d_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_tuv1236d_ntsc_ranges,
@@ -1187,19 +1187,19 @@ static struct tuner_params tuner_tuv1236d_params[] = {
* models based on TI SN 761677 chip on both PAL and NTSC
*/
-static struct tuner_range tuner_tnf_5335_d_if_pal_ranges[] = {
+static const struct tuner_range tuner_tnf_5335_d_if_pal_ranges[] = {
{ 16 * 168.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 471.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_range tuner_tnf_5335mf_ntsc_ranges[] = {
+static const struct tuner_range tuner_tnf_5335mf_ntsc_ranges[] = {
{ 16 * 169.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 469.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_tnf_5335mf_params[] = {
+static const struct tuner_params tuner_tnf_5335mf_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_tnf_5335mf_ntsc_ranges,
@@ -1216,13 +1216,13 @@ static struct tuner_params tuner_tnf_5335mf_params[] = {
/* ------------ TUNER_SAMSUNG_TCPN_2121P30A - Samsung NTSC ------------ */
/* '+ 4' turns on the Low Noise Amplifier */
-static struct tuner_range tuner_samsung_tcpn_2121p30a_ntsc_ranges[] = {
+static const struct tuner_range tuner_samsung_tcpn_2121p30a_ntsc_ranges[] = {
{ 16 * 130.00 /*MHz*/, 0xce, 0x01 + 4, },
{ 16 * 364.50 /*MHz*/, 0xce, 0x02 + 4, },
{ 16 * 999.99 , 0xce, 0x08 + 4, },
};
-static struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = {
+static const struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_samsung_tcpn_2121p30a_ntsc_ranges,
@@ -1232,20 +1232,20 @@ static struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = {
/* ------------ TUNER_THOMSON_FE6600 - DViCO Hybrid PAL ------------ */
-static struct tuner_range tuner_thomson_fe6600_pal_ranges[] = {
+static const struct tuner_range tuner_thomson_fe6600_pal_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0xfe, 0x11, },
{ 16 * 442.00 /*MHz*/, 0xf6, 0x12, },
{ 16 * 999.99 , 0xf6, 0x18, },
};
-static struct tuner_range tuner_thomson_fe6600_dvb_ranges[] = {
+static const struct tuner_range tuner_thomson_fe6600_dvb_ranges[] = {
{ 16 * 250.00 /*MHz*/, 0xb4, 0x12, },
{ 16 * 455.00 /*MHz*/, 0xfe, 0x11, },
{ 16 * 775.50 /*MHz*/, 0xbc, 0x18, },
{ 16 * 999.99 , 0xf4, 0x18, },
};
-static struct tuner_params tuner_thomson_fe6600_params[] = {
+static const struct tuner_params tuner_thomson_fe6600_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_thomson_fe6600_pal_ranges,
@@ -1262,13 +1262,13 @@ static struct tuner_params tuner_thomson_fe6600_params[] = {
/* ------------ TUNER_SAMSUNG_TCPG_6121P30A - Samsung PAL ------------ */
/* '+ 4' turns on the Low Noise Amplifier */
-static struct tuner_range tuner_samsung_tcpg_6121p30a_pal_ranges[] = {
+static const struct tuner_range tuner_samsung_tcpg_6121p30a_pal_ranges[] = {
{ 16 * 146.25 /*MHz*/, 0xce, 0x01 + 4, },
{ 16 * 428.50 /*MHz*/, 0xce, 0x02 + 4, },
{ 16 * 999.99 , 0xce, 0x08 + 4, },
};
-static struct tuner_params tuner_samsung_tcpg_6121p30a_params[] = {
+static const struct tuner_params tuner_samsung_tcpg_6121p30a_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_samsung_tcpg_6121p30a_pal_ranges,
@@ -1282,13 +1282,13 @@ static struct tuner_params tuner_samsung_tcpg_6121p30a_params[] = {
/* ------------ TUNER_TCL_MF02GIP-5N-E - TCL MF02GIP-5N ------------ */
-static struct tuner_range tuner_tcl_mf02gip_5n_ntsc_ranges[] = {
+static const struct tuner_range tuner_tcl_mf02gip_5n_ntsc_ranges[] = {
{ 16 * 172.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 448.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_tcl_mf02gip_5n_params[] = {
+static const struct tuner_params tuner_tcl_mf02gip_5n_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_tcl_mf02gip_5n_ntsc_ranges,
@@ -1300,7 +1300,7 @@ static struct tuner_params tuner_tcl_mf02gip_5n_params[] = {
/* 80-89 */
/* --------- TUNER_PHILIPS_FQ1216LME_MK3 -- active loopthrough, no FM ------- */
-static struct tuner_params tuner_fq1216lme_mk3_params[] = {
+static const struct tuner_params tuner_fq1216lme_mk3_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_fm1216me_mk3_pal_ranges,
@@ -1321,7 +1321,7 @@ static struct tuner_params tuner_fq1216lme_mk3_params[] = {
/* ----- TUNER_PARTSNIC_PTI_5NF05 - Partsnic (Daewoo) PTI-5NF05 NTSC ----- */
-static struct tuner_range tuner_partsnic_pti_5nf05_ranges[] = {
+static const struct tuner_range tuner_partsnic_pti_5nf05_ranges[] = {
/* The datasheet specified channel ranges and the bandswitch byte */
/* The control byte value of 0x8e is just a guess */
{ 16 * 133.25 /*MHz*/, 0x8e, 0x01, }, /* Channels 2 - B */
@@ -1329,7 +1329,7 @@ static struct tuner_range tuner_partsnic_pti_5nf05_ranges[] = {
{ 16 * 999.99 , 0x8e, 0x08, }, /* Channels W+12 - 69 */
};
-static struct tuner_params tuner_partsnic_pti_5nf05_params[] = {
+static const struct tuner_params tuner_partsnic_pti_5nf05_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_partsnic_pti_5nf05_ranges,
@@ -1340,13 +1340,13 @@ static struct tuner_params tuner_partsnic_pti_5nf05_params[] = {
/* --------- TUNER_PHILIPS_CU1216L - DVB-C NIM ------------------------- */
-static struct tuner_range tuner_cu1216l_ranges[] = {
+static const struct tuner_range tuner_cu1216l_ranges[] = {
{ 16 * 160.25 /*MHz*/, 0xce, 0x01 },
{ 16 * 444.25 /*MHz*/, 0xce, 0x02 },
{ 16 * 999.99 , 0xce, 0x04 },
};
-static struct tuner_params tuner_philips_cu1216l_params[] = {
+static const struct tuner_params tuner_philips_cu1216l_params[] = {
{
.type = TUNER_PARAM_TYPE_DIGITAL,
.ranges = tuner_cu1216l_ranges,
@@ -1357,13 +1357,13 @@ static struct tuner_params tuner_philips_cu1216l_params[] = {
/* ---------------------- TUNER_SONY_BTF_PXN01Z ------------------------ */
-static struct tuner_range tuner_sony_btf_pxn01z_ranges[] = {
+static const struct tuner_range tuner_sony_btf_pxn01z_ranges[] = {
{ 16 * 137.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 367.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_sony_btf_pxn01z_params[] = {
+static const struct tuner_params tuner_sony_btf_pxn01z_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_sony_btf_pxn01z_ranges,
@@ -1373,7 +1373,7 @@ static struct tuner_params tuner_sony_btf_pxn01z_params[] = {
/* ------------ TUNER_PHILIPS_FQ1236_MK5 - Philips NTSC ------------ */
-static struct tuner_params tuner_philips_fq1236_mk5_params[] = {
+static const struct tuner_params tuner_philips_fq1236_mk5_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_fm1236_mk3_ntsc_ranges,
@@ -1384,13 +1384,13 @@ static struct tuner_params tuner_philips_fq1236_mk5_params[] = {
/* --------- Sony BTF-PG472Z PAL/SECAM ------- */
-static struct tuner_range tuner_sony_btf_pg472z_ranges[] = {
+static const struct tuner_range tuner_sony_btf_pg472z_ranges[] = {
{ 16 * 144.25 /*MHz*/, 0xc6, 0x01, },
{ 16 * 427.25 /*MHz*/, 0xc6, 0x02, },
{ 16 * 999.99 , 0xc6, 0x04, },
};
-static struct tuner_params tuner_sony_btf_pg472z_params[] = {
+static const struct tuner_params tuner_sony_btf_pg472z_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_sony_btf_pg472z_ranges,
@@ -1404,13 +1404,13 @@ static struct tuner_params tuner_sony_btf_pg472z_params[] = {
/* 90-99 */
/* --------- Sony BTF-PG467Z NTSC-M-JP ------- */
-static struct tuner_range tuner_sony_btf_pg467z_ranges[] = {
+static const struct tuner_range tuner_sony_btf_pg467z_ranges[] = {
{ 16 * 220.25 /*MHz*/, 0xc6, 0x01, },
{ 16 * 467.25 /*MHz*/, 0xc6, 0x02, },
{ 16 * 999.99 , 0xc6, 0x04, },
};
-static struct tuner_params tuner_sony_btf_pg467z_params[] = {
+static const struct tuner_params tuner_sony_btf_pg467z_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_sony_btf_pg467z_ranges,
@@ -1420,13 +1420,13 @@ static struct tuner_params tuner_sony_btf_pg467z_params[] = {
/* --------- Sony BTF-PG463Z NTSC-M ------- */
-static struct tuner_range tuner_sony_btf_pg463z_ranges[] = {
+static const struct tuner_range tuner_sony_btf_pg463z_ranges[] = {
{ 16 * 130.25 /*MHz*/, 0xc6, 0x01, },
{ 16 * 364.25 /*MHz*/, 0xc6, 0x02, },
{ 16 * 999.99 , 0xc6, 0x04, },
};
-static struct tuner_params tuner_sony_btf_pg463z_params[] = {
+static const struct tuner_params tuner_sony_btf_pg463z_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_sony_btf_pg463z_ranges,
@@ -1436,13 +1436,13 @@ static struct tuner_params tuner_sony_btf_pg463z_params[] = {
/* ------------- TUNER_TENA_TNF_931D_DFDR1 - NXP TDA6509A ------------- */
-static struct tuner_range tuner_tena_tnf_931d_dfdr1_ranges[] = {
+static const struct tuner_range tuner_tena_tnf_931d_dfdr1_ranges[] = {
{ 16 * 161.15 /*MHz*/, 0x8e, 0x01, },
{ 16 * 463.15 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_tena_tnf_931d_dfdr1_params[] = {
+static const struct tuner_params tuner_tena_tnf_931d_dfdr1_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_tena_tnf_931d_dfdr1_ranges,
@@ -1452,7 +1452,7 @@ static struct tuner_params tuner_tena_tnf_931d_dfdr1_params[] = {
/* --------------------------------------------------------------------- */
-struct tunertype tuners[] = {
+const struct tunertype tuners[] = {
/* 0-9 */
[TUNER_TEMIC_PAL] = { /* TEMIC PAL */
.name = "Temic PAL (4002 FH5)",
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index a4a9781328c5..e585c8f6e4c5 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1538,20 +1538,6 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
return 0;
}
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctl)
-{
- struct cx231xx *dev = video_drvdata(file);
- struct v4l2_subdev *sd;
-
- dprintk(3, "enter vidioc_s_ctrl()\n");
- /* Update the A/V core */
- v4l2_device_for_each_subdev(sd, &dev->v4l2_dev)
- v4l2_s_ctrl(NULL, sd->ctrl_handler, ctl);
- dprintk(3, "exit vidioc_s_ctrl()\n");
- return 0;
-}
-
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
@@ -1627,7 +1613,6 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_enum_input = cx231xx_enum_input,
.vidioc_g_input = cx231xx_g_input,
.vidioc_s_input = cx231xx_s_input,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_pixelaspect = vidioc_g_pixelaspect,
.vidioc_g_selection = vidioc_g_selection,
.vidioc_querycap = cx231xx_querycap,
@@ -1720,6 +1705,8 @@ static void cx231xx_video_dev_init(
vfd->lock = &dev->lock;
vfd->release = video_device_release_empty;
vfd->ctrl_handler = &dev->mpeg_ctrl_handler.hdl;
+ vfd->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE;
video_set_drvdata(vfd, dev);
if (dev->tuner_type == TUNER_ABSENT) {
v4l2_disable_ioctl(vfd, VIDIOC_G_FREQUENCY);
diff --git a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h
index 5bc44f194d0a..62ffa16bb82c 100644
--- a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h
+++ b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h
@@ -57,19 +57,17 @@ enum USB_SPEED{
};
#define TS_MASK 0x6
-enum TS_PORT{
- NO_TS_PORT = 0x0, /* 2'b00: Neither port used. PCB not a Hybrid,
+#define NO_TS_PORT 0x0 /* 2'b00: Neither port used. PCB not a Hybrid,
only offers Analog TV or Video */
- TS1_PORT = 0x4, /* 2'b10: TS1 Input (Hybrid mode :
+#define TS1_PORT 0x4 /* 2'b10: TS1 Input (Hybrid mode :
Digital or External Analog/Compressed source) */
- TS1_TS2_PORT = 0x6, /* 2'b11: TS1 & TS2 Inputs
+#define TS1_TS2_PORT 0x6 /* 2'b11: TS1 & TS2 Inputs
(Dual inputs from Digital and/or
External Analog/Compressed sources) */
- TS1_EXT_CLOCK = 0x6, /* 2'b11: TS1 & TS2 as selector
+#define TS1_EXT_CLOCK 0x6 /* 2'b11: TS1 & TS2 as selector
to external clock */
- TS1VIP_TS2_PORT = 0x2 /* 2'b01: TS1 used as 656/VIP Output,
+#define TS1VIP_TS2_PORT 0x2 /* 2'b01: TS1 used as 656/VIP Output,
TS2 Input (from Compressor) */
-};
#define EAVP_MASK 0x8
enum EAV_PRESENT{
@@ -89,10 +87,8 @@ enum AT_MODE{
};
#define PWR_SEL_MASK 0x40
-enum POWE_TYPE{
- SELF_POWER = 0x0, /* 0: self power */
- BUS_POWER = 0x40 /* 1: bus power */
-};
+#define SELF_POWER 0x0 /* 0: self power */
+#define BUS_POWER 0x40 /* 1: bus power */
enum USB_POWE_TYPE{
USB_SELF_POWER = 0,
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 4014f7d07330..3eddc40377bf 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -260,7 +260,7 @@ static u32 af9015_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm af9015_i2c_algo = {
+static const struct i2c_algorithm af9015_i2c_algo = {
.master_xfer = af9015_i2c_xfer,
.functionality = af9015_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 218f712f56b1..17062672ea06 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -483,7 +483,7 @@ static u32 af9035_i2c_functionality(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm af9035_i2c_algo = {
+static const struct i2c_algorithm af9035_i2c_algo = {
.master_xfer = af9035_i2c_master_xfer,
.functionality = af9035_i2c_functionality,
};
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index bea12cdc85e8..64bddca5303c 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -235,7 +235,7 @@ static u32 anysee_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm anysee_i2c_algo = {
+static const struct i2c_algorithm anysee_i2c_algo = {
.master_xfer = anysee_master_xfer,
.functionality = anysee_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/au6610.c b/drivers/media/usb/dvb-usb-v2/au6610.c
index be223fc8aa14..c20a9469f564 100644
--- a/drivers/media/usb/dvb-usb-v2/au6610.c
+++ b/drivers/media/usb/dvb-usb-v2/au6610.c
@@ -115,7 +115,7 @@ static u32 au6610_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm au6610_i2c_algo = {
+static const struct i2c_algorithm au6610_i2c_algo = {
.master_xfer = au6610_i2c_xfer,
.functionality = au6610_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index 2410054ddb2c..65ef045b74ca 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -838,7 +838,7 @@ static u32 az6007_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm az6007_i2c_algo = {
+static const struct i2c_algorithm az6007_i2c_algo = {
.master_xfer = az6007_i2c_xfer,
.functionality = az6007_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/ce6230.c b/drivers/media/usb/dvb-usb-v2/ce6230.c
index d3b5cb4a24da..7ebaf3ee4491 100644
--- a/drivers/media/usb/dvb-usb-v2/ce6230.c
+++ b/drivers/media/usb/dvb-usb-v2/ce6230.c
@@ -154,7 +154,7 @@ static u32 ce6230_i2c_functionality(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm ce6230_i2c_algorithm = {
+static const struct i2c_algorithm ce6230_i2c_algorithm = {
.master_xfer = ce6230_i2c_master_xfer,
.functionality = ce6230_i2c_functionality,
};
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index 288c15a7d72b..ecdc20d45132 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -243,7 +243,7 @@ struct dvb_usb_device_properties {
int (*download_firmware) (struct dvb_usb_device *,
const struct firmware *);
- struct i2c_algorithm *i2c_algo;
+ const struct i2c_algorithm *i2c_algo;
unsigned int num_adapters;
int (*get_adapter_count) (struct dvb_usb_device *);
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 1221c924312a..ceac0ea21dab 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -169,7 +169,7 @@ static u32 dvbsky_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm dvbsky_i2c_algo = {
+static const struct i2c_algorithm dvbsky_i2c_algo = {
.master_xfer = dvbsky_i2c_xfer,
.functionality = dvbsky_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/ec168.c b/drivers/media/usb/dvb-usb-v2/ec168.c
index 0e4773fc025c..973b32356b17 100644
--- a/drivers/media/usb/dvb-usb-v2/ec168.c
+++ b/drivers/media/usb/dvb-usb-v2/ec168.c
@@ -176,7 +176,7 @@ static u32 ec168_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm ec168_i2c_algo = {
+static const struct i2c_algorithm ec168_i2c_algo = {
.master_xfer = ec168_i2c_xfer,
.functionality = ec168_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
index c71e7b93476d..0538170ccf29 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.c
+++ b/drivers/media/usb/dvb-usb-v2/gl861.c
@@ -162,7 +162,7 @@ static u32 gl861_i2c_functionality(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm gl861_i2c_algo = {
+static const struct i2c_algorithm gl861_i2c_algo = {
.master_xfer = gl861_i2c_master_xfer,
.functionality = gl861_i2c_functionality,
};
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index f0537b741d13..0c510035805b 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -559,7 +559,7 @@ static u32 lme2510_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm lme2510_i2c_algo = {
+static const struct i2c_algorithm lme2510_i2c_algo = {
.master_xfer = lme2510_i2c_xfer,
.functionality = lme2510_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index cd5861a30b6f..870ac3c8b085 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -911,7 +911,7 @@ static u32 mxl111sf_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm mxl111sf_i2c_algo = {
+static const struct i2c_algorithm mxl111sf_i2c_algo = {
.master_xfer = mxl111sf_i2c_xfer,
.functionality = mxl111sf_i2c_func,
#ifdef NEED_ALGO_CONTROL
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index f7884bb56fcc..487c6ab784ab 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -290,7 +290,7 @@ static u32 rtl28xxu_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm rtl28xxu_i2c_algo = {
+static const struct i2c_algorithm rtl28xxu_i2c_algo = {
.master_xfer = rtl28xxu_i2c_xfer,
.functionality = rtl28xxu_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb/a800.c b/drivers/media/usb/dvb-usb/a800.c
index 5f294784923c..c5f95e48f1d5 100644
--- a/drivers/media/usb/dvb-usb/a800.c
+++ b/drivers/media/usb/dvb-usb/a800.c
@@ -77,7 +77,7 @@ enum {
AVERMEDIA_DVBT_USB2_WARM,
};
-static struct usb_device_id a800_table[] = {
+static const struct usb_device_id a800_table[] = {
DVB_USB_DEV(AVERMEDIA, AVERMEDIA_DVBT_USB2_COLD),
DVB_USB_DEV(AVERMEDIA, AVERMEDIA_DVBT_USB2_WARM),
{ }
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 13604e6acdb8..a4bede7e8a1d 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -445,7 +445,7 @@ static u32 af9005_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm af9005_i2c_algo = {
+static const struct i2c_algorithm af9005_i2c_algo = {
.master_xfer = af9005_i2c_xfer,
.functionality = af9005_i2c_func,
};
@@ -1005,7 +1005,7 @@ enum {
ANSONIC_DVBT_USB,
};
-static struct usb_device_id af9005_usb_table[] = {
+static const struct usb_device_id af9005_usb_table[] = {
DVB_USB_DEV(AFATECH, AFATECH_AF9005),
DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_T_USB_XE),
DVB_USB_DEV(ANSONIC, ANSONIC_DVBT_USB),
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index 2bc27710427d..056935d3cbd6 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -1062,7 +1062,7 @@ static u32 az6027_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm az6027_i2c_algo = {
+static const struct i2c_algorithm az6027_i2c_algo = {
.master_xfer = az6027_i2c_xfer,
.functionality = az6027_i2c_func,
};
@@ -1107,7 +1107,7 @@ enum {
ELGATO_EYETV_SAT_V3,
};
-static struct usb_device_id az6027_usb_table[] = {
+static const struct usb_device_id az6027_usb_table[] = {
DVB_USB_DEV(AZUREWAVE, AZUREWAVE_AZ6027),
DVB_USB_DEV(TERRATEC, TERRATEC_DVBS2CI_V1),
DVB_USB_DEV(TERRATEC, TERRATEC_DVBS2CI_V2),
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 4926c954e29a..d86c279e2dce 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -197,7 +197,7 @@ enum {
TERRATEC_CINERGY_T2,
};
-static struct usb_device_id cinergyt2_usb_table[] = {
+static const struct usb_device_id cinergyt2_usb_table[] = {
DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_T2),
{ }
};
diff --git a/drivers/media/usb/dvb-usb/cxusb-analog.c b/drivers/media/usb/dvb-usb/cxusb-analog.c
index 8253046cd6e6..3bbee1fcbc8d 100644
--- a/drivers/media/usb/dvb-usb/cxusb-analog.c
+++ b/drivers/media/usb/dvb-usb/cxusb-analog.c
@@ -817,8 +817,8 @@ static int cxusb_medion_v_start_streaming(struct vb2_queue *q,
* doing a large continuous allocation when (if)
* s-g isochronous USB transfers are supported
*/
- streambuf = kmalloc(npackets * CXUSB_VIDEO_PKT_SIZE,
- GFP_KERNEL);
+ streambuf = kmalloc_array(npackets, CXUSB_VIDEO_PKT_SIZE,
+ GFP_KERNEL);
if (!streambuf) {
if (i < 2) {
ret = -ENOMEM;
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 1d98d3465e28..f44529b40989 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -78,7 +78,7 @@ enum cxusb_table_index {
NR__cxusb_table_index
};
-static struct usb_device_id cxusb_table[];
+static const struct usb_device_id cxusb_table[];
int cxusb_ctrl_msg(struct dvb_usb_device *d,
u8 cmd, const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
@@ -287,7 +287,7 @@ static u32 cxusb_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-static struct i2c_algorithm cxusb_i2c_algo = {
+static const struct i2c_algorithm cxusb_i2c_algo = {
.master_xfer = cxusb_i2c_xfer,
.functionality = cxusb_i2c_func,
};
@@ -1692,7 +1692,7 @@ static void cxusb_disconnect(struct usb_interface *intf)
dvb_usb_device_exit(intf);
}
-static struct usb_device_id cxusb_table[] = {
+static const struct usb_device_id cxusb_table[] = {
DVB_USB_DEV(MEDION, MEDION_MD95700),
DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_LG064F_COLD),
DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_LG064F_WARM),
diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c
index 2cd88cab4c98..431766f19931 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mb.c
@@ -155,7 +155,7 @@ enum {
ULTIMA_TVBOX_ANCHOR_COLD,
};
-static struct usb_device_id dibusb_dib3000mb_table[] = {
+static const struct usb_device_id dibusb_dib3000mb_table[] = {
DVB_USB_DEV(WIDEVIEW, WIDEVIEW_DVBT_USB_COLD),
DVB_USB_DEV(WIDEVIEW, WIDEVIEW_DVBT_USB_WARM),
DVB_USB_DEV(COMPRO, COMPRO_DVBU2000_COLD),
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc.c b/drivers/media/usb/dvb-usb/dibusb-mc.c
index 00cb016f6266..01eece2687d6 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mc.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mc.c
@@ -43,7 +43,7 @@ enum {
HUMAX_DVB_T_STICK_HIGH_SPEED_WARM,
};
-static struct usb_device_id dibusb_dib3000mc_table[] = {
+static const struct usb_device_id dibusb_dib3000mc_table[] = {
DVB_USB_DEV(DIBCOM, DIBCOM_MOD3001_COLD),
DVB_USB_DEV(DIBCOM, DIBCOM_MOD3001_WARM),
DVB_USB_DEV(ULTIMA_ELECTRONIC, ULTIMA_TVBOX_USB2_COLD),
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 32134be16914..ab229ab1a858 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -88,7 +88,7 @@ static u32 digitv_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm digitv_i2c_algo = {
+static const struct i2c_algorithm digitv_i2c_algo = {
.master_xfer = digitv_i2c_xfer,
.functionality = digitv_i2c_func,
};
@@ -299,7 +299,7 @@ enum {
ANCHOR_NEBULA_DIGITV,
};
-static struct usb_device_id digitv_table[] = {
+static const struct usb_device_id digitv_table[] = {
DVB_USB_DEV(ANCHOR, ANCHOR_NEBULA_DIGITV),
{ }
};
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index e6ee56b3a9dd..83a69df384f2 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -171,7 +171,7 @@ enum {
MIGLIA_WT220U_ZAP250_COLD,
};
-static struct usb_device_id dtt200u_usb_table[] = {
+static const struct usb_device_id dtt200u_usb_table[] = {
DVB_USB_DEV(WIDEVIEW, WIDEVIEW_DTT200U_COLD),
DVB_USB_DEV(WIDEVIEW, WIDEVIEW_DTT200U_WARM),
DVB_USB_DEV(WIDEVIEW, WIDEVIEW_WT220U_COLD),
diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c
index 56c9d521a34a..3d85c6f7f6ec 100644
--- a/drivers/media/usb/dvb-usb/dtv5100.c
+++ b/drivers/media/usb/dvb-usb/dtv5100.c
@@ -97,7 +97,7 @@ static u32 dtv5100_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm dtv5100_i2c_algo = {
+static const struct i2c_algorithm dtv5100_i2c_algo = {
.master_xfer = dtv5100_i2c_xfer,
.functionality = dtv5100_i2c_func,
};
@@ -166,7 +166,7 @@ enum {
AME_DTV5100,
};
-static struct usb_device_id dtv5100_table[] = {
+static const struct usb_device_id dtv5100_table[] = {
DVB_USB_DEV(AME, AME_DTV5100),
{ }
};
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index cbb0541d4dc1..550006a8d86f 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -73,8 +73,8 @@ struct dvb_usb_device_description {
const char *name;
#define DVB_USB_ID_MAX_NUM 15
- struct usb_device_id *cold_ids[DVB_USB_ID_MAX_NUM];
- struct usb_device_id *warm_ids[DVB_USB_ID_MAX_NUM];
+ const struct usb_device_id *cold_ids[DVB_USB_ID_MAX_NUM];
+ const struct usb_device_id *warm_ids[DVB_USB_ID_MAX_NUM];
};
static inline u8 rc5_custom(struct rc_map_table *key)
@@ -309,7 +309,7 @@ struct dvb_usb_device_properties {
struct dvb_rc core;
} rc;
- struct i2c_algorithm *i2c_algo;
+ const struct i2c_algorithm *i2c_algo;
int generic_bulk_ctrl_endpoint;
int generic_bulk_ctrl_endpoint_response;
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 79e2ccf974c9..4fecf2f965e9 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -839,37 +839,37 @@ static u32 dw210x_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm dw2102_i2c_algo = {
+static const struct i2c_algorithm dw2102_i2c_algo = {
.master_xfer = dw2102_i2c_transfer,
.functionality = dw210x_i2c_func,
};
-static struct i2c_algorithm dw2102_serit_i2c_algo = {
+static const struct i2c_algorithm dw2102_serit_i2c_algo = {
.master_xfer = dw2102_serit_i2c_transfer,
.functionality = dw210x_i2c_func,
};
-static struct i2c_algorithm dw2102_earda_i2c_algo = {
+static const struct i2c_algorithm dw2102_earda_i2c_algo = {
.master_xfer = dw2102_earda_i2c_transfer,
.functionality = dw210x_i2c_func,
};
-static struct i2c_algorithm dw2104_i2c_algo = {
+static const struct i2c_algorithm dw2104_i2c_algo = {
.master_xfer = dw2104_i2c_transfer,
.functionality = dw210x_i2c_func,
};
-static struct i2c_algorithm dw3101_i2c_algo = {
+static const struct i2c_algorithm dw3101_i2c_algo = {
.master_xfer = dw3101_i2c_transfer,
.functionality = dw210x_i2c_func,
};
-static struct i2c_algorithm s6x0_i2c_algo = {
+static const struct i2c_algorithm s6x0_i2c_algo = {
.master_xfer = s6x0_i2c_transfer,
.functionality = dw210x_i2c_func,
};
-static struct i2c_algorithm su3000_i2c_algo = {
+static const struct i2c_algorithm su3000_i2c_algo = {
.master_xfer = su3000_i2c_transfer,
.functionality = dw210x_i2c_func,
};
@@ -1836,7 +1836,7 @@ enum dw2102_table_entry {
TEVII_S662
};
-static struct usb_device_id dw2102_table[] = {
+static const struct usb_device_id dw2102_table[] = {
DVB_USB_DEV(CYPRESS, CYPRESS_DW2102),
DVB_USB_DEV(CYPRESS, CYPRESS_DW2101),
DVB_USB_DEV(CYPRESS, CYPRESS_DW2104),
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 4cd21bb8805e..96a255500b38 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -319,7 +319,7 @@ enum {
GENPIX_SKYWALKER_CW3K,
};
-static struct usb_device_id gp8psk_usb_table[] = {
+static const struct usb_device_id gp8psk_usb_table[] = {
DVB_USB_DEV(GENPIX, GENPIX_8PSK_REV_1_COLD),
DVB_USB_DEV(GENPIX, GENPIX_8PSK_REV_1_WARM),
DVB_USB_DEV(GENPIX, GENPIX_8PSK_REV_2),
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index a2054b1b100f..45337ba0a0a3 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -319,7 +319,7 @@ static u32 m920x_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm m920x_i2c_algo = {
+static const struct i2c_algorithm m920x_i2c_algo = {
.master_xfer = m920x_i2c_xfer,
.functionality = m920x_i2c_func,
};
@@ -909,7 +909,7 @@ enum {
AZUREWAVE_TWINHAN_VP7049,
};
-static struct usb_device_id m920x_table[] = {
+static const struct usb_device_id m920x_table[] = {
DVB_USB_DEV(MSI, MSI_MEGASKY580),
DVB_USB_DEV(ANUBIS_ELECTRONIC, ANUBIS_MSI_DIGI_VOX_MINI_II),
DVB_USB_DEV(ANUBIS_ELECTRONIC, ANUBIS_LIFEVIEW_TV_WALKER_TWIN_COLD),
diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
index 4782d0780913..2e5cbfacbeed 100644
--- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
@@ -165,7 +165,7 @@ enum {
HAUPPAUGE_WINTV_NOVA_T_USB2_WARM,
};
-static struct usb_device_id nova_t_table[] = {
+static const struct usb_device_id nova_t_table[] = {
DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_WINTV_NOVA_T_USB2_COLD),
DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_WINTV_NOVA_T_USB2_WARM),
{ }
diff --git a/drivers/media/usb/dvb-usb/opera1.c b/drivers/media/usb/dvb-usb/opera1.c
index 268f05fc8691..3c79cc6848b4 100644
--- a/drivers/media/usb/dvb-usb/opera1.c
+++ b/drivers/media/usb/dvb-usb/opera1.c
@@ -155,7 +155,7 @@ static u32 opera1_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm opera1_i2c_algo = {
+static const struct i2c_algorithm opera1_i2c_algo = {
.master_xfer = opera1_i2c_xfer,
.functionality = opera1_i2c_func,
};
@@ -425,7 +425,7 @@ enum {
OPERA1_WARM,
};
-static struct usb_device_id opera1_table[] = {
+static const struct usb_device_id opera1_table[] = {
DVB_USB_DEV(CYPRESS, CYPRESS_OPERA1_COLD),
DVB_USB_DEV(OPERA1, OPERA1_WARM),
{ }
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 2aab49003493..5094de9a312e 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -906,14 +906,14 @@ static struct stb6100_config stb6100_config = {
};
-static struct i2c_algorithm pctv452e_i2c_algo = {
+static const struct i2c_algorithm pctv452e_i2c_algo = {
.master_xfer = pctv452e_i2c_xfer,
.functionality = pctv452e_i2c_func
};
static int pctv452e_frontend_attach(struct dvb_usb_adapter *a)
{
- struct usb_device_id *id;
+ const struct usb_device_id *id;
a->fe_adap[0].fe = dvb_attach(stb0899_attach, &stb0899_config,
&a->dev->i2c_adap);
@@ -959,7 +959,7 @@ enum {
TECHNOTREND_CONNECT_S2_3650_CI,
};
-static struct usb_device_id pctv452e_usb_table[] = {
+static const struct usb_device_id pctv452e_usb_table[] = {
DVB_USB_DEV(PINNACLE, PINNACLE_PCTV_452E),
DVB_USB_DEV(TECHNOTREND, TECHNOTREND_CONNECT_S2_3600),
DVB_USB_DEV(TECHNOTREND, TECHNOTREND_CONNECT_S2_3650_CI),
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index df90c6c5f3b9..1e43aab2bc27 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -199,7 +199,7 @@ static u32 technisat_usb2_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm technisat_usb2_i2c_algo = {
+static const struct i2c_algorithm technisat_usb2_i2c_algo = {
.master_xfer = technisat_usb2_i2c_xfer,
.functionality = technisat_usb2_i2c_func,
};
@@ -693,7 +693,7 @@ enum {
TECHNISAT_USB2_DVB_S2,
};
-static struct usb_device_id technisat_usb2_id_table[] = {
+static const struct usb_device_id technisat_usb2_id_table[] = {
DVB_USB_DEV(TECHNISAT, TECHNISAT_USB2_DVB_S2),
{ }
};
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index 373ffa7f641e..acde6149d278 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -434,7 +434,7 @@ static u32 ttusb2_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm ttusb2_i2c_algo = {
+static const struct i2c_algorithm ttusb2_i2c_algo = {
.master_xfer = ttusb2_i2c_xfer,
.functionality = ttusb2_i2c_func,
};
@@ -638,7 +638,7 @@ enum {
TECHNOTREND_CONNECT_S2400_8KEEPROM,
};
-static struct usb_device_id ttusb2_table[] = {
+static const struct usb_device_id ttusb2_table[] = {
DVB_USB_DEV(PINNACLE, PINNACLE_PCTV_400E),
DVB_USB_DEV(PINNACLE, PINNACLE_PCTV_450E),
DVB_USB_DEV(TECHNOTREND, TECHNOTREND_CONNECT_S2400),
diff --git a/drivers/media/usb/dvb-usb/umt-010.c b/drivers/media/usb/dvb-usb/umt-010.c
index 464699b0b75b..8f23f92946d4 100644
--- a/drivers/media/usb/dvb-usb/umt-010.c
+++ b/drivers/media/usb/dvb-usb/umt-010.c
@@ -86,7 +86,7 @@ enum {
HANFTEK_UMT_010_WARM,
};
-static struct usb_device_id umt_table[] = {
+static const struct usb_device_id umt_table[] = {
DVB_USB_DEV(HANFTEK, HANFTEK_UMT_010_COLD),
DVB_USB_DEV(HANFTEK, HANFTEK_UMT_010_WARM),
{ }
diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c
index 5b6740cbd1d1..034b0652b9a1 100644
--- a/drivers/media/usb/dvb-usb/vp702x.c
+++ b/drivers/media/usb/dvb-usb/vp702x.c
@@ -375,7 +375,7 @@ enum {
VISIONPLUS_VP7020_WARM,
};
-static struct usb_device_id vp702x_usb_table[] = {
+static const struct usb_device_id vp702x_usb_table[] = {
DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7021_COLD),
// DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7020_COLD),
// DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7020_WARM),
diff --git a/drivers/media/usb/dvb-usb/vp7045.c b/drivers/media/usb/dvb-usb/vp7045.c
index 1dc2b18d44d8..5224c3233f8c 100644
--- a/drivers/media/usb/dvb-usb/vp7045.c
+++ b/drivers/media/usb/dvb-usb/vp7045.c
@@ -179,7 +179,7 @@ enum {
VISIONPLUS_TINYUSB2_WARM,
};
-static struct usb_device_id vp7045_usb_table[] = {
+static const struct usb_device_id vp7045_usb_table[] = {
DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7045_COLD),
DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7045_WARM),
DVB_USB_DEV(VISIONPLUS, VISIONPLUS_TINYUSB2_COLD),
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index d608b793fa84..ad38e1240541 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -497,8 +497,8 @@ static int pvr2_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
return pvr2_hdw_set_streaming(hdw, 0);
}
-static int pvr2_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *vc)
+static int pvr2_query_ext_ctrl(struct file *file, void *priv,
+ struct v4l2_query_ext_ctrl *vc)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
@@ -521,13 +521,16 @@ static int pvr2_queryctrl(struct file *file, void *priv,
}
pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "QUERYCTRL id=0x%x mapping name=%s (%s)",
+ "QUERYEXTCTRL id=0x%x mapping name=%s (%s)",
vc->id, pvr2_ctrl_get_name(cptr),
pvr2_ctrl_get_desc(cptr));
strscpy(vc->name, pvr2_ctrl_get_desc(cptr), sizeof(vc->name));
vc->flags = pvr2_ctrl_get_v4lflags(cptr);
pvr2_ctrl_get_def(cptr, &val);
vc->default_value = val;
+ vc->nr_of_dims = 0;
+ vc->elems = 1;
+ vc->elem_size = 4;
switch (pvr2_ctrl_get_type(cptr)) {
case pvr2_ctl_enum:
vc->type = V4L2_CTRL_TYPE_MENU;
@@ -549,7 +552,7 @@ static int pvr2_queryctrl(struct file *file, void *priv,
break;
default:
pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "QUERYCTRL id=0x%x name=%s not mappable",
+ "QUERYEXTCTRL id=0x%x name=%s not mappable",
vc->id, pvr2_ctrl_get_name(cptr));
return -EINVAL;
}
@@ -571,31 +574,6 @@ static int pvr2_querymenu(struct file *file, void *priv, struct v4l2_querymenu *
return ret;
}
-static int pvr2_g_ctrl(struct file *file, void *priv, struct v4l2_control *vc)
-{
- struct pvr2_v4l2_fh *fh = file->private_data;
- struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- int val = 0;
- int ret;
-
- ret = pvr2_ctrl_get_value(pvr2_hdw_get_ctrl_v4l(hdw, vc->id),
- &val);
- vc->value = val;
- return ret;
-}
-
-static int pvr2_s_ctrl(struct file *file, void *priv, struct v4l2_control *vc)
-{
- struct pvr2_v4l2_fh *fh = file->private_data;
- struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- int ret;
-
- ret = pvr2_ctrl_set_value(pvr2_hdw_get_ctrl_v4l(hdw, vc->id),
- vc->value);
- pvr2_hdw_commit_ctl(hdw);
- return ret;
-}
-
static int pvr2_g_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctls)
{
@@ -812,10 +790,8 @@ static const struct v4l2_ioctl_ops pvr2_ioctl_ops = {
.vidioc_try_fmt_vid_cap = pvr2_try_fmt_vid_cap,
.vidioc_streamon = pvr2_streamon,
.vidioc_streamoff = pvr2_streamoff,
- .vidioc_queryctrl = pvr2_queryctrl,
+ .vidioc_query_ext_ctrl = pvr2_query_ext_ctrl,
.vidioc_querymenu = pvr2_querymenu,
- .vidioc_g_ctrl = pvr2_g_ctrl,
- .vidioc_s_ctrl = pvr2_s_ctrl,
.vidioc_g_ext_ctrls = pvr2_g_ext_ctrls,
.vidioc_s_ext_ctrls = pvr2_s_ext_ctrls,
.vidioc_try_ext_ctrls = pvr2_try_ext_ctrls,
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 3ec9eb5956ed..c6e5d031f068 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -56,7 +56,6 @@
#endif
#include <linux/vmalloc.h>
#include <asm/io.h>
-#include <linux/kernel.h> /* simple_strtol() */
#include "pwc.h"
#include "pwc-kiara.h"
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 4e58476d305e..cbf19aa1d823 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -358,6 +358,24 @@ static const struct uvc_control_info uvc_ctrls[] = {
.flags = UVC_CTRL_FLAG_GET_CUR
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
+ /*
+ * UVC_CTRL_FLAG_AUTO_UPDATE is needed because the RoI may get updated
+ * by sensors.
+ * "This RoI should be the same as specified in most recent SET_CUR
+ * except in the case where the ‘Auto Detect and Track’ and/or
+ * ‘Image Stabilization’ bit have been set."
+ * 4.2.2.1.20 Digital Region of Interest (ROI) Control
+ */
+ {
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_REGION_OF_INTEREST_CONTROL,
+ .index = 21,
+ .size = 10,
+ .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR
+ | UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX
+ | UVC_CTRL_FLAG_GET_DEF
+ | UVC_CTRL_FLAG_AUTO_UPDATE,
+ },
};
static const u32 uvc_control_classes[] = {
@@ -367,6 +385,27 @@ static const u32 uvc_control_classes[] = {
static const int exposure_auto_mapping[] = { 2, 1, 4, 8 };
+static bool uvc_ctrl_mapping_is_compound(struct uvc_control_mapping *mapping)
+{
+ return mapping->v4l2_type >= V4L2_CTRL_COMPOUND_TYPES;
+}
+
+static s32 uvc_mapping_get_s32(struct uvc_control_mapping *mapping,
+ u8 query, const void *data_in)
+{
+ s32 data_out = 0;
+
+ mapping->get(mapping, query, data_in, sizeof(data_out), &data_out);
+
+ return data_out;
+}
+
+static void uvc_mapping_set_s32(struct uvc_control_mapping *mapping,
+ s32 data_in, void *data_out)
+{
+ mapping->set(mapping, sizeof(data_in), &data_in, data_out);
+}
+
/*
* This function translates the V4L2 menu index @idx, as exposed to userspace as
* the V4L2 control value, to the corresponding UVC control value used by the
@@ -405,58 +444,93 @@ uvc_mapping_get_menu_name(const struct uvc_control_mapping *mapping, u32 idx)
return v4l2_ctrl_get_menu(mapping->id)[idx];
}
-static s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping,
- u8 query, const u8 *data)
+static int uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, u8 query,
+ const void *uvc_in, size_t v4l2_size,
+ void *v4l2_out)
{
- s8 zoom = (s8)data[0];
+ u8 value = ((u8 *)uvc_in)[2];
+ s8 sign = ((s8 *)uvc_in)[0];
+ s32 *out = v4l2_out;
+
+ if (WARN_ON(v4l2_size != sizeof(s32)))
+ return -EINVAL;
switch (query) {
case UVC_GET_CUR:
- return (zoom == 0) ? 0 : (zoom > 0 ? data[2] : -data[2]);
+ *out = (sign == 0) ? 0 : (sign > 0 ? value : -value);
+ return 0;
case UVC_GET_MIN:
case UVC_GET_MAX:
case UVC_GET_RES:
case UVC_GET_DEF:
default:
- return data[2];
+ *out = value;
+ return 0;
}
}
-static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping,
- s32 value, u8 *data)
+static int uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping,
+ size_t v4l2_size, const void *v4l2_in,
+ void *uvc_out)
{
- data[0] = value == 0 ? 0 : (value > 0) ? 1 : 0xff;
- data[2] = min((int)abs(value), 0xff);
+ u8 *out = uvc_out;
+ s32 value;
+
+ if (WARN_ON(v4l2_size != sizeof(s32)))
+ return -EINVAL;
+
+ value = *(u32 *)v4l2_in;
+ out[0] = value == 0 ? 0 : (value > 0) ? 1 : 0xff;
+ out[2] = min_t(int, abs(value), 0xff);
+
+ return 0;
}
-static s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping,
- u8 query, const u8 *data)
+static int uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping,
+ u8 query, const void *uvc_in,
+ size_t v4l2_size, void *v4l2_out)
{
unsigned int first = mapping->offset / 8;
- s8 rel = (s8)data[first];
+ u8 value = ((u8 *)uvc_in)[first + 1];
+ s8 sign = ((s8 *)uvc_in)[first];
+ s32 *out = v4l2_out;
+
+ if (WARN_ON(v4l2_size != sizeof(s32)))
+ return -EINVAL;
switch (query) {
case UVC_GET_CUR:
- return (rel == 0) ? 0 : (rel > 0 ? data[first+1]
- : -data[first+1]);
+ *out = (sign == 0) ? 0 : (sign > 0 ? value : -value);
+ return 0;
case UVC_GET_MIN:
- return -data[first+1];
+ *out = -value;
+ return 0;
case UVC_GET_MAX:
case UVC_GET_RES:
case UVC_GET_DEF:
default:
- return data[first+1];
+ *out = value;
+ return 0;
}
}
-static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping,
- s32 value, u8 *data)
+static int uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping,
+ size_t v4l2_size, const void *v4l2_in,
+ void *uvc_out)
{
unsigned int first = mapping->offset / 8;
+ u8 *out = uvc_out;
+ s32 value;
+
+ if (WARN_ON(v4l2_size != sizeof(s32)))
+ return -EINVAL;
- data[first] = value == 0 ? 0 : (value > 0) ? 1 : 0xff;
- data[first+1] = min_t(int, abs(value), 0xff);
+ value = *(u32 *)v4l2_in;
+ out[first] = value == 0 ? 0 : (value > 0) ? 1 : 0xff;
+ out[first + 1] = min_t(int, abs(value), 0xff);
+
+ return 0;
}
static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited = {
@@ -547,6 +621,44 @@ end:
return out_mapping;
}
+static int uvc_get_rect(struct uvc_control_mapping *mapping, u8 query,
+ const void *uvc_in, size_t v4l2_size, void *v4l2_out)
+{
+ const struct uvc_rect *uvc_rect = uvc_in;
+ struct v4l2_rect *v4l2_rect = v4l2_out;
+
+ if (WARN_ON(v4l2_size != sizeof(struct v4l2_rect)))
+ return -EINVAL;
+
+ if (uvc_rect->left > uvc_rect->right ||
+ uvc_rect->top > uvc_rect->bottom)
+ return -EIO;
+
+ v4l2_rect->top = uvc_rect->top;
+ v4l2_rect->left = uvc_rect->left;
+ v4l2_rect->height = uvc_rect->bottom - uvc_rect->top + 1;
+ v4l2_rect->width = uvc_rect->right - uvc_rect->left + 1;
+
+ return 0;
+}
+
+static int uvc_set_rect(struct uvc_control_mapping *mapping, size_t v4l2_size,
+ const void *v4l2_in, void *uvc_out)
+{
+ struct uvc_rect *uvc_rect = uvc_out;
+ const struct v4l2_rect *v4l2_rect = v4l2_in;
+
+ if (WARN_ON(v4l2_size != sizeof(struct v4l2_rect)))
+ return -EINVAL;
+
+ uvc_rect->top = min(0xffff, v4l2_rect->top);
+ uvc_rect->left = min(0xffff, v4l2_rect->left);
+ uvc_rect->bottom = min(0xffff, v4l2_rect->top + v4l2_rect->height - 1);
+ uvc_rect->right = min(0xffff, v4l2_rect->left + v4l2_rect->width - 1);
+
+ return 0;
+}
+
static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
{
.id = V4L2_CID_BRIGHTNESS,
@@ -841,6 +953,28 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
.selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
.filter_mapping = uvc_ctrl_filter_plf_mapping,
},
+ {
+ .id = V4L2_CID_UVC_REGION_OF_INTEREST_RECT,
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_REGION_OF_INTEREST_CONTROL,
+ .size = sizeof(struct uvc_rect) * 8,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_RECT,
+ .data_type = UVC_CTRL_DATA_TYPE_RECT,
+ .get = uvc_get_rect,
+ .set = uvc_set_rect,
+ .name = "Region of Interest Rectangle",
+ },
+ {
+ .id = V4L2_CID_UVC_REGION_OF_INTEREST_AUTO,
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_REGION_OF_INTEREST_CONTROL,
+ .size = 16,
+ .offset = 64,
+ .v4l2_type = V4L2_CTRL_TYPE_BITMASK,
+ .data_type = UVC_CTRL_DATA_TYPE_BITMASK,
+ .name = "Region of Interest Auto Ctrls",
+ },
};
/* ------------------------------------------------------------------------
@@ -862,20 +996,45 @@ static inline void uvc_clear_bit(u8 *data, int bit)
data[bit >> 3] &= ~(1 << (bit & 7));
}
+static s32 uvc_menu_to_v4l2_menu(struct uvc_control_mapping *mapping, s32 val)
+{
+ unsigned int i;
+
+ for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
+ u32 menu_value;
+
+ if (!test_bit(i, &mapping->menu_mask))
+ continue;
+
+ menu_value = uvc_mapping_get_menu_value(mapping, i);
+
+ if (menu_value == val)
+ return i;
+ }
+
+ return val;
+}
+
/*
* Extract the bit string specified by mapping->offset and mapping->size
* from the little-endian data stored at 'data' and return the result as
* a signed 32bit integer. Sign extension will be performed if the mapping
* references a signed data type.
*/
-static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
- u8 query, const u8 *data)
+static int uvc_get_le_value(struct uvc_control_mapping *mapping,
+ u8 query, const void *uvc_in, size_t v4l2_size,
+ void *v4l2_out)
{
- int bits = mapping->size;
int offset = mapping->offset;
+ int bits = mapping->size;
+ const u8 *data = uvc_in;
+ s32 *out = v4l2_out;
s32 value = 0;
u8 mask;
+ if (WARN_ON(v4l2_size != sizeof(s32)))
+ return -EINVAL;
+
data += offset / 8;
offset &= 7;
mask = ((1LL << bits) - 1) << offset;
@@ -896,28 +1055,58 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED)
value |= -(value & (1 << (mapping->size - 1)));
- return value;
+ /* If it is a menu, convert from uvc to v4l2. */
+ if (mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) {
+ *out = value;
+ return 0;
+ }
+
+ switch (query) {
+ case UVC_GET_CUR:
+ case UVC_GET_DEF:
+ *out = uvc_menu_to_v4l2_menu(mapping, value);
+ return 0;
+ }
+
+ *out = value;
+ return 0;
}
/*
* Set the bit string specified by mapping->offset and mapping->size
* in the little-endian data stored at 'data' to the value 'value'.
*/
-static void uvc_set_le_value(struct uvc_control_mapping *mapping,
- s32 value, u8 *data)
+static int uvc_set_le_value(struct uvc_control_mapping *mapping,
+ size_t v4l2_size, const void *v4l2_in,
+ void *uvc_out)
{
- int bits = mapping->size;
int offset = mapping->offset;
+ int bits = mapping->size;
+ u8 *data = uvc_out;
+ s32 value;
u8 mask;
- /*
- * According to the v4l2 spec, writing any value to a button control
- * should result in the action belonging to the button control being
- * triggered. UVC devices however want to see a 1 written -> override
- * value.
- */
- if (mapping->v4l2_type == V4L2_CTRL_TYPE_BUTTON)
+ if (WARN_ON(v4l2_size != sizeof(s32)))
+ return -EINVAL;
+
+ value = *(s32 *)v4l2_in;
+
+ switch (mapping->v4l2_type) {
+ case V4L2_CTRL_TYPE_MENU:
+ value = uvc_mapping_get_menu_value(mapping, value);
+ break;
+ case V4L2_CTRL_TYPE_BUTTON:
+ /*
+ * According to the v4l2 spec, writing any value to a button
+ * control should result in the action belonging to the button
+ * control being triggered. UVC devices however want to see a 1
+ * written -> override value.
+ */
value = -1;
+ break;
+ default:
+ break;
+ }
data += offset / 8;
offset &= 7;
@@ -929,6 +1118,8 @@ static void uvc_set_le_value(struct uvc_control_mapping *mapping,
bits -= 8 - offset;
offset = 0;
}
+
+ return 0;
}
/* ------------------------------------------------------------------------
@@ -947,7 +1138,7 @@ static int uvc_entity_match_guid(const struct uvc_entity *entity,
static void __uvc_find_control(struct uvc_entity *entity, u32 v4l2_id,
struct uvc_control_mapping **mapping, struct uvc_control **control,
- int next)
+ int next, int next_compound)
{
struct uvc_control *ctrl;
struct uvc_control_mapping *map;
@@ -962,14 +1153,16 @@ static void __uvc_find_control(struct uvc_entity *entity, u32 v4l2_id,
continue;
list_for_each_entry(map, &ctrl->info.mappings, list) {
- if ((map->id == v4l2_id) && !next) {
+ if (map->id == v4l2_id && !next && !next_compound) {
*control = ctrl;
*mapping = map;
return;
}
if ((*mapping == NULL || (*mapping)->id > map->id) &&
- (map->id > v4l2_id) && next) {
+ (map->id > v4l2_id) &&
+ (uvc_ctrl_mapping_is_compound(map) ?
+ next_compound : next)) {
*control = ctrl;
*mapping = map;
}
@@ -983,6 +1176,7 @@ static struct uvc_control *uvc_find_control(struct uvc_video_chain *chain,
struct uvc_control *ctrl = NULL;
struct uvc_entity *entity;
int next = v4l2_id & V4L2_CTRL_FLAG_NEXT_CTRL;
+ int next_compound = v4l2_id & V4L2_CTRL_FLAG_NEXT_COMPOUND;
*mapping = NULL;
@@ -991,12 +1185,13 @@ static struct uvc_control *uvc_find_control(struct uvc_video_chain *chain,
/* Find the control. */
list_for_each_entry(entity, &chain->entities, chain) {
- __uvc_find_control(entity, v4l2_id, mapping, &ctrl, next);
- if (ctrl && !next)
+ __uvc_find_control(entity, v4l2_id, mapping, &ctrl, next,
+ next_compound);
+ if (ctrl && !next && !next_compound)
return ctrl;
}
- if (ctrl == NULL && !next)
+ if (!ctrl && !next && !next_compound)
uvc_dbg(chain->dev, CONTROL, "Control 0x%08x not found\n",
v4l2_id);
@@ -1060,32 +1255,6 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain,
return 0;
}
-static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping,
- const u8 *data)
-{
- s32 value = mapping->get(mapping, UVC_GET_CUR, data);
-
- if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
- unsigned int i;
-
- for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
- u32 menu_value;
-
- if (!test_bit(i, &mapping->menu_mask))
- continue;
-
- menu_value = uvc_mapping_get_menu_value(mapping, i);
-
- if (menu_value == value) {
- value = i;
- break;
- }
- }
- }
-
- return value;
-}
-
static int __uvc_ctrl_load_cur(struct uvc_video_chain *chain,
struct uvc_control *ctrl)
{
@@ -1136,8 +1305,8 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
if (ret < 0)
return ret;
- *value = __uvc_ctrl_get_value(mapping,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+ *value = uvc_mapping_get_s32(mapping, UVC_GET_CUR,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
return 0;
}
@@ -1145,7 +1314,8 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
static int __uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
u32 found_id)
{
- bool find_next = req_id & V4L2_CTRL_FLAG_NEXT_CTRL;
+ bool find_next = req_id &
+ (V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND);
unsigned int i;
req_id &= V4L2_CTRL_ID_MASK;
@@ -1167,7 +1337,8 @@ static int __uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
}
static int uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
- u32 found_id, struct v4l2_queryctrl *v4l2_ctrl)
+ u32 found_id,
+ struct v4l2_query_ext_ctrl *v4l2_ctrl)
{
int idx;
@@ -1185,6 +1356,37 @@ static int uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
return 0;
}
+static bool uvc_ctrl_is_readable(u32 which, struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping)
+{
+ if (which == V4L2_CTRL_WHICH_CUR_VAL)
+ return !!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR);
+
+ if (which == V4L2_CTRL_WHICH_DEF_VAL)
+ return !!(ctrl->info.flags & UVC_CTRL_FLAG_GET_DEF);
+
+ /* Types with implicit boundaries. */
+ switch (mapping->v4l2_type) {
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_BUTTON:
+ return true;
+ case V4L2_CTRL_TYPE_BITMASK:
+ return (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) ||
+ (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX);
+ default:
+ break;
+ }
+
+ if (which == V4L2_CTRL_WHICH_MIN_VAL)
+ return !!(ctrl->info.flags & UVC_CTRL_FLAG_GET_MIN);
+
+ if (which == V4L2_CTRL_WHICH_MAX_VAL)
+ return !!(ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX);
+
+ return false;
+}
+
/*
* Check if control @v4l2_id can be accessed by the given control @ioctl
* (VIDIOC_G_EXT_CTRLS, VIDIOC_TRY_EXT_CTRLS or VIDIOC_S_EXT_CTRLS).
@@ -1203,7 +1405,6 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
struct uvc_control *master_ctrl = NULL;
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
- bool read = ioctl == VIDIOC_G_EXT_CTRLS;
s32 val;
int ret;
int i;
@@ -1215,10 +1416,10 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
if (!ctrl)
return -EINVAL;
- if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) && read)
- return -EACCES;
+ if (ioctl == VIDIOC_G_EXT_CTRLS)
+ return uvc_ctrl_is_readable(ctrls->which, ctrl, mapping);
- if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) && !read)
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
return -EACCES;
if (ioctl != VIDIOC_S_EXT_CTRLS || !mapping->master_id)
@@ -1235,10 +1436,12 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
}
__uvc_find_control(ctrl->entity, mapping->master_id, &master_map,
- &master_ctrl, 0);
+ &master_ctrl, 0, 0);
if (!master_ctrl || !(master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
return 0;
+ if (WARN_ON(uvc_ctrl_mapping_is_compound(master_map)))
+ return -EIO;
ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
if (ret >= 0 && val != mapping->master_manual)
@@ -1270,50 +1473,21 @@ static u32 uvc_get_ctrl_bitmap(struct uvc_control *ctrl,
* as supported.
*/
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)
- return mapping->get(mapping, UVC_GET_RES,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ return uvc_mapping_get_s32(mapping, UVC_GET_RES,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX)
- return mapping->get(mapping, UVC_GET_MAX,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
+ return uvc_mapping_get_s32(mapping, UVC_GET_MAX,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
return ~0;
}
-static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
- struct uvc_control *ctrl,
- struct uvc_control_mapping *mapping,
- struct v4l2_queryctrl *v4l2_ctrl)
+static int __uvc_queryctrl_boundaries(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ struct v4l2_query_ext_ctrl *v4l2_ctrl)
{
- struct uvc_control_mapping *master_map = NULL;
- struct uvc_control *master_ctrl = NULL;
- unsigned int i;
-
- memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
- v4l2_ctrl->id = mapping->id;
- v4l2_ctrl->type = mapping->v4l2_type;
- strscpy(v4l2_ctrl->name, uvc_map_get_name(mapping),
- sizeof(v4l2_ctrl->name));
- v4l2_ctrl->flags = 0;
-
- if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
- v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
- if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
- v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-
- if (mapping->master_id)
- __uvc_find_control(ctrl->entity, mapping->master_id,
- &master_map, &master_ctrl, 0);
- if (master_ctrl && (master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR)) {
- s32 val;
- int ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
- if (ret < 0)
- return ret;
-
- if (val != mapping->master_manual)
- v4l2_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
- }
-
if (!ctrl->cached) {
int ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
@@ -1321,8 +1495,8 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
}
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_DEF) {
- v4l2_ctrl->default_value = mapping->get(mapping, UVC_GET_DEF,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF));
+ v4l2_ctrl->default_value = uvc_mapping_get_s32(mapping,
+ UVC_GET_DEF, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF));
}
switch (mapping->v4l2_type) {
@@ -1330,21 +1504,6 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
v4l2_ctrl->minimum = ffs(mapping->menu_mask) - 1;
v4l2_ctrl->maximum = fls(mapping->menu_mask) - 1;
v4l2_ctrl->step = 1;
-
- for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
- u32 menu_value;
-
- if (!test_bit(i, &mapping->menu_mask))
- continue;
-
- menu_value = uvc_mapping_get_menu_value(mapping, i);
-
- if (menu_value == v4l2_ctrl->default_value) {
- v4l2_ctrl->default_value = i;
- break;
- }
- }
-
return 0;
case V4L2_CTRL_TYPE_BOOLEAN:
@@ -1370,22 +1529,95 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
}
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MIN)
- v4l2_ctrl->minimum = mapping->get(mapping, UVC_GET_MIN,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN));
+ v4l2_ctrl->minimum = uvc_mapping_get_s32(mapping, UVC_GET_MIN,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN));
+ else
+ v4l2_ctrl->minimum = 0;
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX)
- v4l2_ctrl->maximum = mapping->get(mapping, UVC_GET_MAX,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
+ v4l2_ctrl->maximum = uvc_mapping_get_s32(mapping, UVC_GET_MAX,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
+ else
+ v4l2_ctrl->maximum = 0;
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)
- v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ v4l2_ctrl->step = uvc_mapping_get_s32(mapping, UVC_GET_RES,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ else
+ v4l2_ctrl->step = 0;
return 0;
}
+static size_t uvc_mapping_v4l2_size(struct uvc_control_mapping *mapping)
+{
+ if (mapping->v4l2_type == V4L2_CTRL_TYPE_RECT)
+ return sizeof(struct v4l2_rect);
+
+ if (uvc_ctrl_mapping_is_compound(mapping))
+ return DIV_ROUND_UP(mapping->size, 8);
+
+ return sizeof(s32);
+}
+
+static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ struct v4l2_query_ext_ctrl *v4l2_ctrl)
+{
+ struct uvc_control_mapping *master_map = NULL;
+ struct uvc_control *master_ctrl = NULL;
+
+ memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
+ v4l2_ctrl->id = mapping->id;
+ v4l2_ctrl->type = mapping->v4l2_type;
+ strscpy(v4l2_ctrl->name, uvc_map_get_name(mapping),
+ sizeof(v4l2_ctrl->name));
+ v4l2_ctrl->flags = 0;
+
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX) &&
+ (ctrl->info.flags & UVC_CTRL_FLAG_GET_MIN))
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX;
+
+ if (mapping->master_id)
+ __uvc_find_control(ctrl->entity, mapping->master_id,
+ &master_map, &master_ctrl, 0, 0);
+ if (master_ctrl && (master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR)) {
+ s32 val;
+ int ret;
+
+ if (WARN_ON(uvc_ctrl_mapping_is_compound(master_map)))
+ return -EIO;
+
+ ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != mapping->master_manual)
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
+ }
+
+ v4l2_ctrl->elem_size = uvc_mapping_v4l2_size(mapping);
+ v4l2_ctrl->elems = 1;
+
+ if (v4l2_ctrl->type >= V4L2_CTRL_COMPOUND_TYPES) {
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
+ v4l2_ctrl->default_value = 0;
+ v4l2_ctrl->minimum = 0;
+ v4l2_ctrl->maximum = 0;
+ v4l2_ctrl->step = 0;
+ return 0;
+ }
+
+ return __uvc_queryctrl_boundaries(chain, ctrl, mapping, v4l2_ctrl);
+}
+
int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
- struct v4l2_queryctrl *v4l2_ctrl)
+ struct v4l2_query_ext_ctrl *v4l2_ctrl)
{
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
@@ -1511,7 +1743,7 @@ static void uvc_ctrl_fill_event(struct uvc_video_chain *chain,
struct uvc_control_mapping *mapping,
s32 value, u32 changes)
{
- struct v4l2_queryctrl v4l2_ctrl;
+ struct v4l2_query_ext_ctrl v4l2_ctrl;
__uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl);
@@ -1569,11 +1801,12 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
s32 val = 0;
- __uvc_find_control(master->entity, slave_id, &mapping, &ctrl, 0);
+ __uvc_find_control(master->entity, slave_id, &mapping, &ctrl, 0, 0);
if (ctrl == NULL)
return;
- if (__uvc_ctrl_get(chain, ctrl, mapping, &val) == 0)
+ if (uvc_ctrl_mapping_is_compound(mapping) ||
+ __uvc_ctrl_get(chain, ctrl, mapping, &val) == 0)
changes |= V4L2_EVENT_CTRL_CH_VALUE;
uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
@@ -1630,7 +1863,12 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
uvc_ctrl_set_handle(handle, ctrl, NULL);
list_for_each_entry(mapping, &ctrl->info.mappings, list) {
- s32 value = __uvc_ctrl_get_value(mapping, data);
+ s32 value;
+
+ if (uvc_ctrl_mapping_is_compound(mapping))
+ value = 0;
+ else
+ value = uvc_mapping_get_s32(mapping, UVC_GET_CUR, data);
/*
* handle may be NULL here if the device sends auto-update
@@ -1714,6 +1952,7 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
for (i = 0; i < xctrls_count; ++i) {
u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
+ s32 value;
ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
@@ -1738,6 +1977,10 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
slave_id);
}
+ if (uvc_ctrl_mapping_is_compound(mapping))
+ value = 0;
+ else
+ value = xctrls[i].value;
/*
* If the master is being modified in the same transaction
* flags may change too.
@@ -1748,7 +1991,7 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
changes |= V4L2_EVENT_CTRL_CH_FLAGS;
uvc_ctrl_send_event(handle->chain, handle, ctrl, mapping,
- xctrls[i].value, changes);
+ value, changes);
}
}
@@ -1780,7 +2023,8 @@ static int uvc_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
s32 val = 0;
- if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
+ if (uvc_ctrl_mapping_is_compound(mapping) ||
+ __uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
changes |= V4L2_EVENT_CTRL_CH_VALUE;
uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, val,
@@ -1920,7 +2164,7 @@ static int uvc_ctrl_find_ctrl_idx(struct uvc_entity *entity,
for (i = 0; i < ctrls->count; i++) {
__uvc_find_control(entity, ctrls->controls[i].id, &mapping,
- &ctrl_found, 0);
+ &ctrl_found, 0, 0);
if (uvc_control == ctrl_found)
return i;
}
@@ -1956,8 +2200,120 @@ done:
return ret;
}
-int uvc_ctrl_get(struct uvc_video_chain *chain,
- struct v4l2_ext_control *xctrl)
+static int uvc_mapping_get_xctrl_compound(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ u32 which,
+ struct v4l2_ext_control *xctrl)
+{
+ u8 *data __free(kfree) = NULL;
+ size_t size;
+ u8 query;
+ int ret;
+ int id;
+
+ switch (which) {
+ case V4L2_CTRL_WHICH_CUR_VAL:
+ id = UVC_CTRL_DATA_CURRENT;
+ query = UVC_GET_CUR;
+ break;
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ id = UVC_CTRL_DATA_MIN;
+ query = UVC_GET_MIN;
+ break;
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ id = UVC_CTRL_DATA_MAX;
+ query = UVC_GET_MAX;
+ break;
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ id = UVC_CTRL_DATA_DEF;
+ query = UVC_GET_DEF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ size = uvc_mapping_v4l2_size(mapping);
+ if (xctrl->size < size) {
+ xctrl->size = size;
+ return -ENOSPC;
+ }
+
+ data = kmalloc(size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (which == V4L2_CTRL_WHICH_CUR_VAL)
+ ret = __uvc_ctrl_load_cur(chain, ctrl);
+ else
+ ret = uvc_ctrl_populate_cache(chain, ctrl);
+
+ if (ret < 0)
+ return ret;
+
+ ret = mapping->get(mapping, query, uvc_ctrl_data(ctrl, id), size, data);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * v4l2_ext_control does not have enough room to fit a compound control.
+ * Instead, the value is in the user memory at xctrl->ptr. The v4l2
+ * ioctl helper does not copy it for us.
+ */
+ return copy_to_user(xctrl->ptr, data, size) ? -EFAULT : 0;
+}
+
+static int uvc_mapping_get_xctrl_std(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ u32 which, struct v4l2_ext_control *xctrl)
+{
+ struct v4l2_query_ext_ctrl qec;
+ int ret;
+
+ switch (which) {
+ case V4L2_CTRL_WHICH_CUR_VAL:
+ return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value);
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = __uvc_queryctrl_boundaries(chain, ctrl, mapping, &qec);
+ if (ret < 0)
+ return ret;
+
+ switch (which) {
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ xctrl->value = qec.default_value;
+ break;
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ xctrl->value = qec.minimum;
+ break;
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ xctrl->value = qec.maximum;
+ break;
+ }
+
+ return 0;
+}
+
+static int uvc_mapping_get_xctrl(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ u32 which, struct v4l2_ext_control *xctrl)
+{
+ if (uvc_ctrl_mapping_is_compound(mapping))
+ return uvc_mapping_get_xctrl_compound(chain, ctrl, mapping,
+ which, xctrl);
+ return uvc_mapping_get_xctrl_std(chain, ctrl, mapping, which, xctrl);
+}
+
+int uvc_ctrl_get(struct uvc_video_chain *chain, u32 which,
+ struct v4l2_ext_control *xctrl)
{
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
@@ -1966,36 +2322,23 @@ int uvc_ctrl_get(struct uvc_video_chain *chain,
return -EACCES;
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
- if (ctrl == NULL)
+ if (!ctrl)
return -EINVAL;
- return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value);
+ return uvc_mapping_get_xctrl(chain, ctrl, mapping, which, xctrl);
}
-int uvc_ctrl_set(struct uvc_fh *handle,
- struct v4l2_ext_control *xctrl)
+static int uvc_ctrl_clamp(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ s32 *value_in_out)
{
- struct uvc_video_chain *chain = handle->chain;
- struct uvc_control *ctrl;
- struct uvc_control_mapping *mapping;
- s32 value;
+ s32 value = *value_in_out;
u32 step;
s32 min;
s32 max;
int ret;
- lockdep_assert_held(&chain->ctrl_mutex);
-
- if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0)
- return -EACCES;
-
- ctrl = uvc_find_control(chain, xctrl->id, &mapping);
- if (ctrl == NULL)
- return -EINVAL;
- if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
- return -EACCES;
-
- /* Clamp out of range values. */
switch (mapping->v4l2_type) {
case V4L2_CTRL_TYPE_INTEGER:
if (!ctrl->cached) {
@@ -2004,23 +2347,22 @@ int uvc_ctrl_set(struct uvc_fh *handle,
return ret;
}
- min = mapping->get(mapping, UVC_GET_MIN,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN));
- max = mapping->get(mapping, UVC_GET_MAX,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
- step = mapping->get(mapping, UVC_GET_RES,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ min = uvc_mapping_get_s32(mapping, UVC_GET_MIN,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN));
+ max = uvc_mapping_get_s32(mapping, UVC_GET_MAX,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
+ step = uvc_mapping_get_s32(mapping, UVC_GET_RES,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
if (step == 0)
step = 1;
- xctrl->value = min + DIV_ROUND_CLOSEST((u32)(xctrl->value - min),
- step) * step;
+ value = min + DIV_ROUND_CLOSEST((u32)(value - min), step) * step;
if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED)
- xctrl->value = clamp(xctrl->value, min, max);
+ value = clamp(value, min, max);
else
- xctrl->value = clamp_t(u32, xctrl->value, min, max);
- value = xctrl->value;
- break;
+ value = clamp_t(u32, value, min, max);
+ *value_in_out = value;
+ return 0;
case V4L2_CTRL_TYPE_BITMASK:
if (!ctrl->cached) {
@@ -2029,47 +2371,102 @@ int uvc_ctrl_set(struct uvc_fh *handle,
return ret;
}
- xctrl->value &= uvc_get_ctrl_bitmap(ctrl, mapping);
- value = xctrl->value;
- break;
+ value &= uvc_get_ctrl_bitmap(ctrl, mapping);
+ *value_in_out = value;
+ return 0;
case V4L2_CTRL_TYPE_BOOLEAN:
- xctrl->value = clamp(xctrl->value, 0, 1);
- value = xctrl->value;
- break;
+ *value_in_out = clamp(value, 0, 1);
+ return 0;
case V4L2_CTRL_TYPE_MENU:
- if (xctrl->value < (ffs(mapping->menu_mask) - 1) ||
- xctrl->value > (fls(mapping->menu_mask) - 1))
+ if (value < (ffs(mapping->menu_mask) - 1) ||
+ value > (fls(mapping->menu_mask) - 1))
return -ERANGE;
- if (!test_bit(xctrl->value, &mapping->menu_mask))
+ if (!test_bit(value, &mapping->menu_mask))
return -EINVAL;
- value = uvc_mapping_get_menu_value(mapping, xctrl->value);
-
/*
* Valid menu indices are reported by the GET_RES request for
* UVC controls that support it.
*/
if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK) {
+ int val = uvc_mapping_get_menu_value(mapping, value);
if (!ctrl->cached) {
ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
return ret;
}
- if (!(uvc_get_ctrl_bitmap(ctrl, mapping) & value))
+ if (!(uvc_get_ctrl_bitmap(ctrl, mapping) & val))
return -EINVAL;
}
-
- break;
+ return 0;
default:
- value = xctrl->value;
- break;
+ return 0;
}
+ return 0;
+}
+
+static int uvc_mapping_set_xctrl_compound(struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ struct v4l2_ext_control *xctrl)
+{
+ u8 *data __free(kfree) = NULL;
+ size_t size = uvc_mapping_v4l2_size(mapping);
+
+ if (xctrl->size != size)
+ return -EINVAL;
+
+ /*
+ * v4l2_ext_control does not have enough room to fit a compound control.
+ * Instead, the value is in the user memory at xctrl->ptr. The v4l2
+ * ioctl helper does not copy it for us.
+ */
+ data = memdup_user(xctrl->ptr, size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return mapping->set(mapping, size, data,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+}
+
+static int uvc_mapping_set_xctrl(struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ struct v4l2_ext_control *xctrl)
+{
+ if (uvc_ctrl_mapping_is_compound(mapping))
+ return uvc_mapping_set_xctrl_compound(ctrl, mapping, xctrl);
+
+ uvc_mapping_set_s32(mapping, xctrl->value,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+ return 0;
+}
+
+int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl)
+{
+ struct uvc_video_chain *chain = handle->chain;
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl;
+ int ret;
+
+ lockdep_assert_held(&chain->ctrl_mutex);
+
+ if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0)
+ return -EACCES;
+
+ ctrl = uvc_find_control(chain, xctrl->id, &mapping);
+ if (!ctrl)
+ return -EINVAL;
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
+ return -EACCES;
+
+ ret = uvc_ctrl_clamp(chain, ctrl, mapping, &xctrl->value);
+ if (ret)
+ return ret;
/*
* If the mapping doesn't span the whole UVC control, the current value
* needs to be loaded from the device to perform the read-modify-write
@@ -2088,8 +2485,9 @@ int uvc_ctrl_set(struct uvc_fh *handle,
ctrl->info.size);
}
- mapping->set(mapping, value,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+ ret = uvc_mapping_set_xctrl(ctrl, mapping, xctrl);
+ if (ret)
+ return ret;
ctrl->dirty = 1;
ctrl->modified = 1;
@@ -2464,6 +2862,7 @@ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
struct uvc_control_mapping *map;
unsigned int size;
unsigned int i;
+ int ret;
/*
* Most mappings come from static kernel data, and need to be duplicated.
@@ -2504,6 +2903,12 @@ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
goto err_nomem;
}
+ if (uvc_ctrl_mapping_is_compound(map))
+ if (WARN_ON(!map->set || !map->get)) {
+ ret = -EIO;
+ goto free_mem;
+ }
+
if (map->get == NULL)
map->get = uvc_get_le_value;
if (map->set == NULL)
@@ -2525,11 +2930,13 @@ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
return 0;
err_nomem:
+ ret = -ENOMEM;
+free_mem:
kfree(map->menu_names);
kfree(map->menu_mapping);
kfree(map->name);
kfree(map);
- return -ENOMEM;
+ return ret;
}
int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index deadbcea5e22..107e0fafd80f 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -37,6 +37,8 @@ static unsigned int uvc_quirks_param = -1;
unsigned int uvc_dbg_param;
unsigned int uvc_timeout_param = UVC_CTRL_STREAMING_TIMEOUT;
+static struct usb_driver uvc_driver;
+
/* ------------------------------------------------------------------------
* Utility functions
*/
@@ -546,7 +548,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
return -EINVAL;
}
- if (usb_driver_claim_interface(&uvc_driver.driver, intf, dev)) {
+ if (usb_driver_claim_interface(&uvc_driver, intf, dev)) {
uvc_dbg(dev, DESCR,
"device %d interface %d is already claimed\n",
dev->udev->devnum,
@@ -556,7 +558,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
streaming = uvc_stream_new(dev, intf);
if (streaming == NULL) {
- usb_driver_release_interface(&uvc_driver.driver, intf);
+ usb_driver_release_interface(&uvc_driver, intf);
return -ENOMEM;
}
@@ -779,7 +781,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
return 0;
error:
- usb_driver_release_interface(&uvc_driver.driver, intf);
+ usb_driver_release_interface(&uvc_driver, intf);
uvc_stream_delete(streaming);
return ret;
}
@@ -1922,8 +1924,7 @@ static void uvc_delete(struct kref *kref)
struct uvc_streaming *streaming;
streaming = list_entry(p, struct uvc_streaming, list);
- usb_driver_release_interface(&uvc_driver.driver,
- streaming->intf);
+ usb_driver_release_interface(&uvc_driver, streaming->intf);
uvc_stream_delete(streaming);
}
@@ -3062,6 +3063,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX
| UVC_QUIRK_IGNORE_SELECTOR_UNIT) },
+ /* Actions Microelectronics Co. Display capture-UVC05 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x1de1,
+ .idProduct = 0xf105,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_DISABLE_AUTOSUSPEND) },
/* NXP Semiconductors IR VIDEO */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -3196,17 +3206,15 @@ static const struct usb_device_id uvc_ids[] = {
MODULE_DEVICE_TABLE(usb, uvc_ids);
-struct uvc_driver uvc_driver = {
- .driver = {
- .name = "uvcvideo",
- .probe = uvc_probe,
- .disconnect = uvc_disconnect,
- .suspend = uvc_suspend,
- .resume = uvc_resume,
- .reset_resume = uvc_reset_resume,
- .id_table = uvc_ids,
- .supports_autosuspend = 1,
- },
+static struct usb_driver uvc_driver = {
+ .name = "uvcvideo",
+ .probe = uvc_probe,
+ .disconnect = uvc_disconnect,
+ .suspend = uvc_suspend,
+ .resume = uvc_resume,
+ .reset_resume = uvc_reset_resume,
+ .id_table = uvc_ids,
+ .supports_autosuspend = 1,
};
static int __init uvc_init(void)
@@ -3215,7 +3223,7 @@ static int __init uvc_init(void)
uvc_debugfs_init();
- ret = usb_register(&uvc_driver.driver);
+ ret = usb_register(&uvc_driver);
if (ret < 0) {
uvc_debugfs_cleanup();
return ret;
@@ -3226,7 +3234,7 @@ static int __init uvc_init(void)
static void __exit uvc_cleanup(void)
{
- usb_deregister(&uvc_driver.driver);
+ usb_deregister(&uvc_driver);
uvc_debugfs_cleanup();
}
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 93c6cdb23881..39065db44e86 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -108,6 +108,12 @@ static int uvc_ioctl_xu_ctrl_map(struct uvc_video_chain *chain,
struct uvc_control_mapping *map;
int ret;
+ if (xmap->data_type > UVC_CTRL_DATA_TYPE_BITMASK) {
+ uvc_dbg(chain->dev, CONTROL,
+ "Unsupported UVC data type %u\n", xmap->data_type);
+ return -EINVAL;
+ }
+
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL)
return -ENOMEM;
@@ -963,42 +969,13 @@ static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
return ret;
}
-static int uvc_ioctl_queryctrl(struct file *file, void *fh,
- struct v4l2_queryctrl *qc)
-{
- struct uvc_fh *handle = fh;
- struct uvc_video_chain *chain = handle->chain;
-
- return uvc_query_v4l2_ctrl(chain, qc);
-}
-
static int uvc_ioctl_query_ext_ctrl(struct file *file, void *fh,
struct v4l2_query_ext_ctrl *qec)
{
struct uvc_fh *handle = fh;
struct uvc_video_chain *chain = handle->chain;
- struct v4l2_queryctrl qc = { qec->id };
- int ret;
-
- ret = uvc_query_v4l2_ctrl(chain, &qc);
- if (ret)
- return ret;
-
- qec->id = qc.id;
- qec->type = qc.type;
- strscpy(qec->name, qc.name, sizeof(qec->name));
- qec->minimum = qc.minimum;
- qec->maximum = qc.maximum;
- qec->step = qc.step;
- qec->default_value = qc.default_value;
- qec->flags = qc.flags;
- qec->elem_size = 4;
- qec->elems = 1;
- qec->nr_of_dims = 0;
- memset(qec->dims, 0, sizeof(qec->dims));
- memset(qec->reserved, 0, sizeof(qec->reserved));
- return 0;
+ return uvc_query_v4l2_ctrl(chain, qec);
}
static int uvc_ctrl_check_access(struct uvc_video_chain *chain,
@@ -1027,34 +1004,33 @@ static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
struct uvc_video_chain *chain = handle->chain;
struct v4l2_ext_control *ctrl = ctrls->controls;
unsigned int i;
+ u32 which;
int ret;
+ if (!ctrls->count)
+ return 0;
+
+ switch (ctrls->which) {
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ case V4L2_CTRL_WHICH_CUR_VAL:
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ which = ctrls->which;
+ break;
+ default:
+ which = V4L2_CTRL_WHICH_CUR_VAL;
+ }
+
ret = uvc_ctrl_check_access(chain, ctrls, VIDIOC_G_EXT_CTRLS);
if (ret < 0)
return ret;
- if (ctrls->which == V4L2_CTRL_WHICH_DEF_VAL) {
- for (i = 0; i < ctrls->count; ++ctrl, ++i) {
- struct v4l2_queryctrl qc = { .id = ctrl->id };
-
- ret = uvc_query_v4l2_ctrl(chain, &qc);
- if (ret < 0) {
- ctrls->error_idx = i;
- return ret;
- }
-
- ctrl->value = qc.default_value;
- }
-
- return 0;
- }
-
ret = uvc_ctrl_begin(chain);
if (ret < 0)
return ret;
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
- ret = uvc_ctrl_get(chain, ctrl);
+ ret = uvc_ctrl_get(chain, which, ctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
ctrls->error_idx = i;
@@ -1076,6 +1052,9 @@ static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
unsigned int i;
int ret;
+ if (!ctrls->count)
+ return 0;
+
ret = uvc_ctrl_check_access(chain, ctrls, ioctl);
if (ret < 0)
return ret;
@@ -1491,7 +1470,6 @@ const struct v4l2_ioctl_ops uvc_ioctl_ops = {
.vidioc_enum_input = uvc_ioctl_enum_input,
.vidioc_g_input = uvc_ioctl_g_input,
.vidioc_s_input = uvc_ioctl_s_input,
- .vidioc_queryctrl = uvc_ioctl_queryctrl,
.vidioc_query_ext_ctrl = uvc_ioctl_query_ext_ctrl,
.vidioc_g_ext_ctrls = uvc_ioctl_g_ext_ctrls,
.vidioc_s_ext_ctrls = uvc_ioctl_s_ext_ctrls,
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 5e388f05f3fc..b4ee701835fc 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -116,7 +116,12 @@ struct uvc_control_mapping {
u8 entity[16];
u8 selector;
+ /*
+ * Size of the control data in the payload of the UVC control GET and
+ * SET requests, expressed in bits.
+ */
u8 size;
+
u8 offset;
enum v4l2_ctrl_type v4l2_type;
u32 data_type;
@@ -132,10 +137,10 @@ struct uvc_control_mapping {
const struct uvc_control_mapping *(*filter_mapping)
(struct uvc_video_chain *chain,
struct uvc_control *ctrl);
- s32 (*get)(struct uvc_control_mapping *mapping, u8 query,
- const u8 *data);
- void (*set)(struct uvc_control_mapping *mapping, s32 value,
- u8 *data);
+ int (*get)(struct uvc_control_mapping *mapping, u8 query,
+ const void *uvc_in, size_t v4l2_size, void *v4l2_out);
+ int (*set)(struct uvc_control_mapping *mapping, size_t v4l2_size,
+ const void *v4l2_in, void *uvc_out);
};
struct uvc_control {
@@ -538,6 +543,13 @@ struct uvc_device_info {
u16 uvc_version;
};
+struct uvc_rect {
+ u16 top;
+ u16 left;
+ u16 bottom;
+ u16 right;
+} __packed;
+
struct uvc_status_streaming {
u8 button;
} __packed;
@@ -620,10 +632,6 @@ struct uvc_fh {
unsigned int pending_async_ctrls;
};
-struct uvc_driver {
- struct usb_driver driver;
-};
-
/* ------------------------------------------------------------------------
* Debugging, printing and logging
*/
@@ -674,9 +682,6 @@ do { \
* Internal functions.
*/
-/* Core driver */
-extern struct uvc_driver uvc_driver;
-
struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
/* Video buffers queue management. */
@@ -766,7 +771,7 @@ void uvc_status_put(struct uvc_device *dev);
extern const struct v4l2_subscribed_event_ops uvc_ctrl_sub_ev_ops;
int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
- struct v4l2_queryctrl *v4l2_ctrl);
+ struct v4l2_query_ext_ctrl *v4l2_ctrl);
int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
struct v4l2_querymenu *query_menu);
@@ -793,7 +798,8 @@ static inline int uvc_ctrl_rollback(struct uvc_fh *handle)
return __uvc_ctrl_commit(handle, 1, NULL);
}
-int uvc_ctrl_get(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl);
+int uvc_ctrl_get(struct uvc_video_chain *chain, u32 which,
+ struct v4l2_ext_control *xctrl);
int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl);
int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
const struct v4l2_ext_controls *ctrls,
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 0a2f4f0d0a07..e4b2de3833ee 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -466,8 +466,8 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
-s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
- unsigned int div)
+s64 __v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler,
+ unsigned int mul, unsigned int div)
{
struct v4l2_ctrl *ctrl;
s64 freq;
@@ -502,7 +502,33 @@ s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
return freq > 0 ? freq : -EINVAL;
}
-EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
+EXPORT_SYMBOL_GPL(__v4l2_get_link_freq_ctrl);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+s64 __v4l2_get_link_freq_pad(struct media_pad *pad, unsigned int mul,
+ unsigned int div)
+{
+ struct v4l2_mbus_config mbus_config = {};
+ struct v4l2_subdev *sd;
+ int ret;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ ret = v4l2_subdev_call(sd, pad, get_mbus_config, pad->index,
+ &mbus_config);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (mbus_config.link_freq)
+ return mbus_config.link_freq;
+
+ /*
+ * Fall back to using the link frequency control if the media bus config
+ * doesn't provide a link frequency.
+ */
+ return __v4l2_get_link_freq_ctrl(sd->ctrl_handler, mul, div);
+}
+EXPORT_SYMBOL_GPL(__v4l2_get_link_freq_pad);
+#endif /* CONFIG_MEDIA_CONTROLLER */
/*
* Simplify a fraction using a simple continued fraction decomposition. The
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
index 95a2202879d8..d49a68b36c28 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -94,6 +94,22 @@ static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
return ptr_to_user(c, ctrl, ctrl->p_new);
}
+/* Helper function: copy the minimum control value back to the caller */
+static int min_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ ctrl->type_ops->minimum(ctrl, 0, ctrl->p_new);
+
+ return ptr_to_user(c, ctrl, ctrl->p_new);
+}
+
+/* Helper function: copy the maximum control value back to the caller */
+static int max_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ ctrl->type_ops->maximum(ctrl, 0, ctrl->p_new);
+
+ return ptr_to_user(c, ctrl, ctrl->p_new);
+}
+
/* Helper function: copy the caller-provider value as the new control value */
static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
{
@@ -229,8 +245,8 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
cs->error_idx = i;
if (cs->which &&
- cs->which != V4L2_CTRL_WHICH_DEF_VAL &&
- cs->which != V4L2_CTRL_WHICH_REQUEST_VAL &&
+ (cs->which < V4L2_CTRL_WHICH_DEF_VAL ||
+ cs->which > V4L2_CTRL_WHICH_MAX_VAL) &&
V4L2_CTRL_ID2WHICH(id) != cs->which) {
dprintk(vdev,
"invalid which 0x%x or control id 0x%x\n",
@@ -259,6 +275,15 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
return -EINVAL;
}
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX) &&
+ (cs->which == V4L2_CTRL_WHICH_MIN_VAL ||
+ cs->which == V4L2_CTRL_WHICH_MAX_VAL)) {
+ dprintk(vdev,
+ "invalid which 0x%x or control id 0x%x\n",
+ cs->which, id);
+ return -EINVAL;
+ }
+
if (ctrl->cluster[0]->ncontrols > 1)
have_clusters = true;
if (ctrl->cluster[0] != ctrl)
@@ -368,8 +393,8 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
*/
static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
{
- if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL ||
- which == V4L2_CTRL_WHICH_REQUEST_VAL)
+ if (which == 0 || (which >= V4L2_CTRL_WHICH_DEF_VAL &&
+ which <= V4L2_CTRL_WHICH_MAX_VAL))
return 0;
return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
}
@@ -389,10 +414,12 @@ int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl_helper *helpers = helper;
int ret;
int i, j;
- bool is_default, is_request;
+ bool is_default, is_request, is_min, is_max;
is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
+ is_min = (cs->which == V4L2_CTRL_WHICH_MIN_VAL);
+ is_max = (cs->which == V4L2_CTRL_WHICH_MAX_VAL);
cs->error_idx = cs->count;
cs->which = V4L2_CTRL_ID2WHICH(cs->which);
@@ -432,13 +459,14 @@ int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
/*
* g_volatile_ctrl will update the new control values.
- * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
+ * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL,
+ * V4L2_CTRL_WHICH_MIN_VAL, V4L2_CTRL_WHICH_MAX_VAL and
* V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
* it is v4l2_ctrl_request_complete() that copies the
* volatile controls at the time of request completion
* to the request, so you don't want to do that again.
*/
- if (!is_default && !is_request &&
+ if (!is_default && !is_request && !is_min && !is_max &&
((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
(master->has_volatiles && !is_cur_manual(master)))) {
for (j = 0; j < master->ncontrols; j++)
@@ -467,6 +495,10 @@ int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
ret = -ENOMEM;
else if (is_request && ref->p_req_valid)
ret = req_to_user(cs->controls + idx, ref);
+ else if (is_min)
+ ret = min_to_user(cs->controls + idx, ref->ctrl);
+ else if (is_max)
+ ret = max_to_user(cs->controls + idx, ref->ctrl);
else if (is_volatile)
ret = new_to_user(cs->controls + idx, ref->ctrl);
else
@@ -564,9 +596,11 @@ int try_set_ext_ctrls_common(struct v4l2_fh *fh,
cs->error_idx = cs->count;
- /* Default value cannot be changed */
- if (cs->which == V4L2_CTRL_WHICH_DEF_VAL) {
- dprintk(vdev, "%s: cannot change default value\n",
+ /* Default/minimum/maximum values cannot be changed */
+ if (cs->which == V4L2_CTRL_WHICH_DEF_VAL ||
+ cs->which == V4L2_CTRL_WHICH_MIN_VAL ||
+ cs->which == V4L2_CTRL_WHICH_MAX_VAL) {
+ dprintk(vdev, "%s: cannot change default/min/max value\n",
video_device_node_name(vdev));
return -EINVAL;
}
@@ -1123,39 +1157,48 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
}
EXPORT_SYMBOL(v4l2_query_ext_ctrl);
-/* Implement VIDIOC_QUERYCTRL */
-int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
+void v4l2_query_ext_ctrl_to_v4l2_queryctrl(struct v4l2_queryctrl *to,
+ const struct v4l2_query_ext_ctrl *from)
{
- struct v4l2_query_ext_ctrl qec = { qc->id };
- int rc;
-
- rc = v4l2_query_ext_ctrl(hdl, &qec);
- if (rc)
- return rc;
+ to->id = from->id;
+ to->type = from->type;
+ to->flags = from->flags;
+ strscpy(to->name, from->name, sizeof(to->name));
- qc->id = qec.id;
- qc->type = qec.type;
- qc->flags = qec.flags;
- strscpy(qc->name, qec.name, sizeof(qc->name));
- switch (qc->type) {
+ switch (from->type) {
case V4L2_CTRL_TYPE_INTEGER:
case V4L2_CTRL_TYPE_BOOLEAN:
case V4L2_CTRL_TYPE_MENU:
case V4L2_CTRL_TYPE_INTEGER_MENU:
case V4L2_CTRL_TYPE_STRING:
case V4L2_CTRL_TYPE_BITMASK:
- qc->minimum = qec.minimum;
- qc->maximum = qec.maximum;
- qc->step = qec.step;
- qc->default_value = qec.default_value;
+ to->minimum = from->minimum;
+ to->maximum = from->maximum;
+ to->step = from->step;
+ to->default_value = from->default_value;
break;
default:
- qc->minimum = 0;
- qc->maximum = 0;
- qc->step = 0;
- qc->default_value = 0;
+ to->minimum = 0;
+ to->maximum = 0;
+ to->step = 0;
+ to->default_value = 0;
break;
}
+}
+EXPORT_SYMBOL(v4l2_query_ext_ctrl_to_v4l2_queryctrl);
+
+/* Implement VIDIOC_QUERYCTRL */
+int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
+{
+ struct v4l2_query_ext_ctrl qec = { qc->id };
+ int rc;
+
+ rc = v4l2_query_ext_ctrl(hdl, &qec);
+ if (rc)
+ return rc;
+
+ v4l2_query_ext_ctrl_to_v4l2_queryctrl(qc, &qec);
+
return 0;
}
EXPORT_SYMBOL(v4l2_queryctrl);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index eeab6a5eb7ba..90d25329661e 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -182,29 +182,66 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
}
}
-void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
- union v4l2_ctrl_ptr ptr)
+static void std_min_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ void *p = ptr.p + idx * ctrl->elem_size;
+
+ if (ctrl->p_min.p_const)
+ memcpy(p, ctrl->p_min.p_const, ctrl->elem_size);
+ else
+ memset(p, 0, ctrl->elem_size);
+}
+
+static void std_max_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ void *p = ptr.p + idx * ctrl->elem_size;
+
+ if (ctrl->p_max.p_const)
+ memcpy(p, ctrl->p_max.p_const, ctrl->elem_size);
+ else
+ memset(p, 0, ctrl->elem_size);
+}
+
+static void __v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
+ u32 which, union v4l2_ctrl_ptr ptr)
{
unsigned int i;
u32 tot_elems = ctrl->elems;
u32 elems = tot_elems - from_idx;
+ s64 value;
- if (from_idx >= tot_elems)
+ switch (which) {
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ value = ctrl->default_value;
+ break;
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ value = ctrl->maximum;
+ break;
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ value = ctrl->minimum;
+ break;
+ default:
return;
+ }
switch (ctrl->type) {
case V4L2_CTRL_TYPE_STRING:
+ if (which == V4L2_CTRL_WHICH_DEF_VAL)
+ value = ctrl->minimum;
+
for (i = from_idx; i < tot_elems; i++) {
unsigned int offset = i * ctrl->elem_size;
- memset(ptr.p_char + offset, ' ', ctrl->minimum);
- ptr.p_char[offset + ctrl->minimum] = '\0';
+ memset(ptr.p_char + offset, ' ', value);
+ ptr.p_char[offset + value] = '\0';
}
break;
case V4L2_CTRL_TYPE_INTEGER64:
- if (ctrl->default_value) {
+ if (value) {
for (i = from_idx; i < tot_elems; i++)
- ptr.p_s64[i] = ctrl->default_value;
+ ptr.p_s64[i] = value;
} else {
memset(ptr.p_s64 + from_idx, 0, elems * sizeof(s64));
}
@@ -214,9 +251,9 @@ void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
case V4L2_CTRL_TYPE_MENU:
case V4L2_CTRL_TYPE_BITMASK:
case V4L2_CTRL_TYPE_BOOLEAN:
- if (ctrl->default_value) {
+ if (value) {
for (i = from_idx; i < tot_elems; i++)
- ptr.p_s32[i] = ctrl->default_value;
+ ptr.p_s32[i] = value;
} else {
memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32));
}
@@ -226,32 +263,61 @@ void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32));
break;
case V4L2_CTRL_TYPE_U8:
- memset(ptr.p_u8 + from_idx, ctrl->default_value, elems);
+ memset(ptr.p_u8 + from_idx, value, elems);
break;
case V4L2_CTRL_TYPE_U16:
- if (ctrl->default_value) {
+ if (value) {
for (i = from_idx; i < tot_elems; i++)
- ptr.p_u16[i] = ctrl->default_value;
+ ptr.p_u16[i] = value;
} else {
memset(ptr.p_u16 + from_idx, 0, elems * sizeof(u16));
}
break;
case V4L2_CTRL_TYPE_U32:
- if (ctrl->default_value) {
+ if (value) {
for (i = from_idx; i < tot_elems; i++)
- ptr.p_u32[i] = ctrl->default_value;
+ ptr.p_u32[i] = value;
} else {
memset(ptr.p_u32 + from_idx, 0, elems * sizeof(u32));
}
break;
default:
- for (i = from_idx; i < tot_elems; i++)
- std_init_compound(ctrl, i, ptr);
+ for (i = from_idx; i < tot_elems; i++) {
+ switch (which) {
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ std_init_compound(ctrl, i, ptr);
+ break;
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ std_max_compound(ctrl, i, ptr);
+ break;
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ std_min_compound(ctrl, i, ptr);
+ break;
+ }
+ }
break;
}
}
+
+void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_DEF_VAL, ptr);
+}
EXPORT_SYMBOL(v4l2_ctrl_type_op_init);
+static void v4l2_ctrl_type_op_minimum(const struct v4l2_ctrl *ctrl,
+ u32 from_idx, union v4l2_ctrl_ptr ptr)
+{
+ __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_MIN_VAL, ptr);
+}
+
+static void v4l2_ctrl_type_op_maximum(const struct v4l2_ctrl *ctrl,
+ u32 from_idx, union v4l2_ctrl_ptr ptr)
+{
+ __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_MAX_VAL, ptr);
+}
+
void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl)
{
union v4l2_ctrl_ptr ptr = ctrl->p_cur;
@@ -370,7 +436,11 @@ void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl)
case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
pr_cont("AV1_FILM_GRAIN");
break;
-
+ case V4L2_CTRL_TYPE_RECT:
+ pr_cont("(%d,%d)/%ux%u",
+ ptr.p_rect->left, ptr.p_rect->top,
+ ptr.p_rect->width, ptr.p_rect->height);
+ break;
default:
pr_cont("unknown type %d", ctrl->type);
break;
@@ -815,6 +885,7 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering;
struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params;
struct v4l2_area *area;
+ struct v4l2_rect *rect;
void *p = ptr.p + idx * ctrl->elem_size;
unsigned int i;
@@ -1172,6 +1243,12 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
return -EINVAL;
break;
+ case V4L2_CTRL_TYPE_RECT:
+ rect = p;
+ if (!rect->width || !rect->height)
+ return -EINVAL;
+ break;
+
default:
return -EINVAL;
}
@@ -1285,6 +1362,8 @@ EXPORT_SYMBOL(v4l2_ctrl_type_op_validate);
static const struct v4l2_ctrl_type_ops std_type_ops = {
.equal = v4l2_ctrl_type_op_equal,
.init = v4l2_ctrl_type_op_init,
+ .minimum = v4l2_ctrl_type_op_minimum,
+ .maximum = v4l2_ctrl_type_op_maximum,
.log = v4l2_ctrl_type_op_log,
.validate = v4l2_ctrl_type_op_validate,
};
@@ -1757,7 +1836,10 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
s64 min, s64 max, u64 step, s64 def,
const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size,
u32 flags, const char * const *qmenu,
- const s64 *qmenu_int, const union v4l2_ctrl_ptr p_def,
+ const s64 *qmenu_int,
+ const union v4l2_ctrl_ptr p_def,
+ const union v4l2_ctrl_ptr p_min,
+ const union v4l2_ctrl_ptr p_max,
void *priv)
{
struct v4l2_ctrl *ctrl;
@@ -1872,12 +1954,21 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_AREA:
elem_size = sizeof(struct v4l2_area);
break;
+ case V4L2_CTRL_TYPE_RECT:
+ elem_size = sizeof(struct v4l2_rect);
+ break;
default:
if (type < V4L2_CTRL_COMPOUND_TYPES)
elem_size = sizeof(s32);
break;
}
+ if (type < V4L2_CTRL_COMPOUND_TYPES &&
+ type != V4L2_CTRL_TYPE_BUTTON &&
+ type != V4L2_CTRL_TYPE_CTRL_CLASS &&
+ type != V4L2_CTRL_TYPE_STRING)
+ flags |= V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX;
+
/* Sanity checks */
if (id == 0 || name == NULL || !elem_size ||
id >= V4L2_CID_PRIVATE_BASE ||
@@ -1886,6 +1977,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
handler_set_err(hdl, -ERANGE);
return NULL;
}
+
err = check_range(type, min, max, step, def);
if (err) {
handler_set_err(hdl, err);
@@ -1927,6 +2019,10 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const)
sz_extra += elem_size;
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_min.p_const)
+ sz_extra += elem_size;
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_max.p_const)
+ sz_extra += elem_size;
ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
if (ctrl == NULL) {
@@ -1992,6 +2088,22 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
}
+ if (flags & V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX) {
+ void *ptr = ctrl->p_def.p;
+
+ if (p_min.p_const) {
+ ptr += elem_size;
+ ctrl->p_min.p = ptr;
+ memcpy(ctrl->p_min.p, p_min.p_const, elem_size);
+ }
+
+ if (p_max.p_const) {
+ ptr += elem_size;
+ ctrl->p_max.p = ptr;
+ memcpy(ctrl->p_max.p, p_max.p_const, elem_size);
+ }
+ }
+
ctrl->type_ops->init(ctrl, 0, ctrl->p_cur);
cur_to_new(ctrl);
@@ -2042,7 +2154,8 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
type, min, max,
is_menu ? cfg->menu_skip_mask : step, def,
cfg->dims, cfg->elem_size,
- flags, qmenu, qmenu_int, cfg->p_def, priv);
+ flags, qmenu, qmenu_int, cfg->p_def, cfg->p_min,
+ cfg->p_max, priv);
if (ctrl)
ctrl->is_private = cfg->is_private;
return ctrl;
@@ -2067,7 +2180,8 @@ struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
min, max, step, def, NULL, 0,
- flags, NULL, NULL, ptr_null, NULL);
+ flags, NULL, NULL, ptr_null, ptr_null,
+ ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std);
@@ -2100,7 +2214,8 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
- flags, qmenu, qmenu_int, ptr_null, NULL);
+ flags, qmenu, qmenu_int, ptr_null, ptr_null,
+ ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
@@ -2132,7 +2247,8 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
- flags, qmenu, NULL, ptr_null, NULL);
+ flags, qmenu, NULL, ptr_null, ptr_null,
+ ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
@@ -2140,7 +2256,9 @@ EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
/* Helper function for standard compound controls */
struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops, u32 id,
- const union v4l2_ctrl_ptr p_def)
+ const union v4l2_ctrl_ptr p_def,
+ const union v4l2_ctrl_ptr p_min,
+ const union v4l2_ctrl_ptr p_max)
{
const char *name;
enum v4l2_ctrl_type type;
@@ -2154,7 +2272,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
min, max, step, def, NULL, 0,
- flags, NULL, NULL, p_def, NULL);
+ flags, NULL, NULL, p_def, p_min, p_max, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_compound);
@@ -2178,7 +2296,8 @@ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, 0, def, NULL, 0,
- flags, NULL, qmenu_int, ptr_null, NULL);
+ flags, NULL, qmenu_int, ptr_null, ptr_null,
+ ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 5bcaeeba4d09..b40c08ce909d 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -572,13 +572,13 @@ static void determine_valid_ioctls(struct video_device *vdev)
and that can't be tested here. If the bit for these control ioctls
is set, then the ioctl is valid. But if it is 0, then it can still
be valid if the filehandle passed the control handler. */
- if (vdev->ctrl_handler || ops->vidioc_queryctrl)
+ if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl)
__set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl)
__set_bit(_IOC_NR(VIDIOC_QUERY_EXT_CTRL), valid_ioctls);
- if (vdev->ctrl_handler || ops->vidioc_g_ctrl || ops->vidioc_g_ext_ctrls)
+ if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls)
__set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls);
- if (vdev->ctrl_handler || ops->vidioc_s_ctrl || ops->vidioc_s_ext_ctrls)
+ if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls)
__set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls)
__set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls);
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index d26edf157e64..7710cb26bea0 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -764,7 +764,7 @@ bool v4l2_detect_gtf(unsigned int frame_height,
u64 num;
u32 den;
- num = ((image_width * GTF_D_C_PRIME * (u64)hfreq) -
+ num = (((u64)image_width * GTF_D_C_PRIME * hfreq) -
((u64)image_width * GTF_D_M_PRIME * 1000));
den = (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) *
(2 * GTF_CELL_GRAN);
@@ -774,7 +774,7 @@ bool v4l2_detect_gtf(unsigned int frame_height,
u64 num;
u32 den;
- num = ((image_width * GTF_S_C_PRIME * (u64)hfreq) -
+ num = (((u64)image_width * GTF_S_C_PRIME * hfreq) -
((u64)image_width * GTF_S_M_PRIME * 1000));
den = (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) *
(2 * GTF_CELL_GRAN);
@@ -1018,6 +1018,42 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry);
/**
+ * v4l2_num_edid_blocks() - return the number of EDID blocks
+ *
+ * @edid: pointer to the EDID data
+ * @max_blocks: maximum number of supported EDID blocks
+ *
+ * Return: the number of EDID blocks based on the contents of the EDID.
+ * This supports the HDMI Forum EDID Extension Override Data Block.
+ */
+unsigned int v4l2_num_edid_blocks(const u8 *edid, unsigned int max_blocks)
+{
+ unsigned int blocks;
+
+ if (!edid || !max_blocks)
+ return 0;
+
+ // The number of extension blocks is recorded at byte 126 of the
+ // first 128-byte block in the EDID.
+ //
+ // If there is an HDMI Forum EDID Extension Override Data Block
+ // present, then it is in bytes 4-6 of the first CTA-861 extension
+ // block of the EDID.
+ blocks = edid[126] + 1;
+ // Check for HDMI Forum EDID Extension Override Data Block
+ if (blocks >= 2 && // The EDID must be at least 2 blocks
+ max_blocks >= 3 && // The caller supports at least 3 blocks
+ edid[128] == 2 && // The first extension block is type CTA-861
+ edid[133] == 0x78 && // Identifier for the EEODB
+ (edid[132] & 0xe0) == 0xe0 && // Tag Code == 7
+ (edid[132] & 0x1f) >= 2 && // Length >= 2
+ edid[134] > 1) // Number of extension blocks is sane
+ blocks = edid[134] + 1;
+ return blocks > max_blocks ? max_blocks : blocks;
+}
+EXPORT_SYMBOL_GPL(v4l2_num_edid_blocks);
+
+/**
* v4l2_get_edid_phys_addr() - find and return the physical address
*
* @edid: pointer to the EDID data
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 0304daa8471d..a16fb44c7246 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -310,8 +310,8 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
win = &p->fmt.win;
- pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, global_alpha=0x%02x\n",
- win->w.width, win->w.height, win->w.left, win->w.top,
+ pr_cont(", (%d,%d)/%ux%u, field=%s, chromakey=0x%08x, global_alpha=0x%02x\n",
+ win->w.left, win->w.top, win->w.width, win->w.height,
prt_names(win->field, v4l2_field_names),
win->chromakey, win->global_alpha);
break;
@@ -589,12 +589,12 @@ static void v4l_print_cropcap(const void *arg, bool write_only)
{
const struct v4l2_cropcap *p = arg;
- pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, defrect wxh=%dx%d, x,y=%d,%d, pixelaspect %d/%d\n",
+ pr_cont("type=%s, bounds (%d,%d)/%ux%u, defrect (%d,%d)/%ux%u, pixelaspect %d/%d\n",
prt_names(p->type, v4l2_type_names),
- p->bounds.width, p->bounds.height,
p->bounds.left, p->bounds.top,
- p->defrect.width, p->defrect.height,
+ p->bounds.width, p->bounds.height,
p->defrect.left, p->defrect.top,
+ p->defrect.width, p->defrect.height,
p->pixelaspect.numerator, p->pixelaspect.denominator);
}
@@ -602,20 +602,20 @@ static void v4l_print_crop(const void *arg, bool write_only)
{
const struct v4l2_crop *p = arg;
- pr_cont("type=%s, wxh=%dx%d, x,y=%d,%d\n",
+ pr_cont("type=%s, crop=(%d,%d)/%ux%u\n",
prt_names(p->type, v4l2_type_names),
- p->c.width, p->c.height,
- p->c.left, p->c.top);
+ p->c.left, p->c.top,
+ p->c.width, p->c.height);
}
static void v4l_print_selection(const void *arg, bool write_only)
{
const struct v4l2_selection *p = arg;
- pr_cont("type=%s, target=%d, flags=0x%x, wxh=%dx%d, x,y=%d,%d\n",
+ pr_cont("type=%s, target=%d, flags=0x%x, rect=(%d,%d)/%ux%u\n",
prt_names(p->type, v4l2_type_names),
p->target, p->flags,
- p->r.width, p->r.height, p->r.left, p->r.top);
+ p->r.left, p->r.top, p->r.width, p->r.height);
}
static void v4l_print_jpegcompression(const void *arg, bool write_only)
@@ -893,7 +893,9 @@ static bool check_ext_ctrls(struct v4l2_ext_controls *c, unsigned long ioctl)
return false;
break;
case V4L2_CTRL_WHICH_DEF_VAL:
- /* Default value cannot be changed */
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ /* Default, minimum or maximum value cannot be changed */
if (ioctl == VIDIOC_S_EXT_CTRLS ||
ioctl == VIDIOC_TRY_EXT_CTRLS) {
c->error_idx = c->count;
@@ -2284,17 +2286,26 @@ static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
+ struct v4l2_query_ext_ctrl qec = {};
struct v4l2_queryctrl *p = arg;
struct v4l2_fh *vfh =
test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ int ret;
if (vfh && vfh->ctrl_handler)
return v4l2_queryctrl(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_queryctrl(vfd->ctrl_handler, p);
- if (ops->vidioc_queryctrl)
- return ops->vidioc_queryctrl(file, fh, p);
- return -ENOTTY;
+ if (!ops->vidioc_query_ext_ctrl)
+ return -ENOTTY;
+
+ /* Simulate query_ext_ctr using query_ctrl. */
+ qec.id = p->id;
+ ret = ops->vidioc_query_ext_ctrl(file, fh, &qec);
+ if (ret)
+ return ret;
+ v4l2_query_ext_ctrl_to_v4l2_queryctrl(p, &qec);
+ return ret;
}
static int v4l_query_ext_ctrl(const struct v4l2_ioctl_ops *ops,
@@ -2345,8 +2356,6 @@ static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
return v4l2_g_ctrl(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_g_ctrl(vfd->ctrl_handler, p);
- if (ops->vidioc_g_ctrl)
- return ops->vidioc_g_ctrl(file, fh, p);
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
@@ -2380,8 +2389,6 @@ static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_s_ctrl(NULL, vfd->ctrl_handler, p);
- if (ops->vidioc_s_ctrl)
- return ops->vidioc_s_ctrl(file, fh, p);
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index cde1774c9098..a3074f469b15 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -444,6 +444,8 @@ static int call_enum_dv_timings(struct v4l2_subdev *sd,
static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_config *config)
{
+ memset(config, 0, sizeof(*config));
+
return check_pad(sd, pad) ? :
sd->ops->pad->get_mbus_config(sd, pad, config);
}
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 5710348f72f6..a8f5467d6b31 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -332,6 +332,38 @@ static const u8 mtk_smi_larb_mt8188_ostd[][SMI_LARB_PORT_NR_MAX] = {
[25] = {0x01},
};
+static const u8 mtk_smi_larb_mt8192_ostd[][SMI_LARB_PORT_NR_MAX] = {
+ [0] = {0x2, 0x2, 0x28, 0xa, 0xc, 0x28,},
+ [1] = {0x2, 0x2, 0x18, 0x18, 0x18, 0xa, 0xc, 0x28,},
+ [2] = {0x5, 0x5, 0x5, 0x5, 0x1,},
+ [3] = {},
+ [4] = {0x28, 0x19, 0xb, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x1,},
+ [5] = {0x1, 0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x16,},
+ [6] = {},
+ [7] = {0x1, 0x3, 0x2, 0x1, 0x1, 0x5, 0x2, 0x12, 0x13, 0x4, 0x4, 0x1,
+ 0x4, 0x2, 0x1,},
+ [8] = {},
+ [9] = {0xa, 0x7, 0xf, 0x8, 0x1, 0x8, 0x9, 0x3, 0x3, 0x6, 0x7, 0x4,
+ 0xa, 0x3, 0x4, 0xe, 0x1, 0x7, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1, 0x1,},
+ [10] = {},
+ [11] = {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0xe, 0x1, 0x7, 0x8, 0x7, 0x7, 0x1, 0x6, 0x2,
+ 0xf, 0x8, 0x1, 0x1, 0x1,},
+ [12] = {},
+ [13] = {0x2, 0xc, 0xc, 0xe, 0x6, 0x6, 0x6, 0x6, 0x6, 0x12, 0x6, 0x28,
+ 0x2, 0xc, 0xc, 0x28, 0x12, 0x6,},
+ [14] = {},
+ [15] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4,},
+ [16] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4,},
+ [17] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4,},
+ [18] = {0x2, 0x2, 0x4, 0x2,},
+ [19] = {0x9, 0x9, 0x5, 0x5, 0x1, 0x1,},
+};
+
static const u8 mtk_smi_larb_mt8195_ostd[][SMI_LARB_PORT_NR_MAX] = {
[0] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb0 */
[1] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb1 */
@@ -427,6 +459,7 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8188 = {
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
.config_port = mtk_smi_larb_config_port_gen2_general,
+ .ostd = mtk_smi_larb_mt8192_ostd,
};
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8195 = {
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index e2a75a52563f..53f1888cc84f 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2226,26 +2226,6 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
goto err;
}
- if (of_node_name_eq(child, "nand")) {
- /* Warn about older DT blobs with no compatible property */
- if (!of_property_read_bool(child, "compatible")) {
- dev_warn(&pdev->dev,
- "Incompatible NAND node: missing compatible");
- ret = -EINVAL;
- goto err;
- }
- }
-
- if (of_node_name_eq(child, "onenand")) {
- /* Warn about older DT blobs with no compatible property */
- if (!of_property_read_bool(child, "compatible")) {
- dev_warn(&pdev->dev,
- "Incompatible OneNAND node: missing compatible");
- ret = -EINVAL;
- goto err;
- }
- }
-
if (of_match_node(omap_nand_ids, child)) {
/* NAND specific setup */
val = 8;
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 9b7d30a21a5b..44ac55feacd3 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -1191,10 +1191,8 @@ static int tegra_emc_probe(struct platform_device *pdev)
int irq, err;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "please update your device tree\n");
+ if (irq < 0)
return irq;
- }
emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
if (!emc)
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 5b617c1f6789..f4398383ae06 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1904,7 +1904,7 @@ static void msb_io_work(struct work_struct *work)
/* process the request */
dbg_verbose("IO: processing new request");
- blk_rq_map_sg(msb->queue, req, sg);
+ blk_rq_map_sg(req, sg);
lba = blk_rq_pos(req);
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 634d343b6bdb..c9853d887d28 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -627,9 +627,7 @@ static int mspro_block_issue_req(struct memstick_dev *card)
while (true) {
msb->current_page = 0;
msb->current_seg = 0;
- msb->seg_count = blk_rq_map_sg(msb->block_req->q,
- msb->block_req,
- msb->req_sg);
+ msb->seg_count = blk_rq_map_sg(msb->block_req, msb->req_sg);
if (!msb->seg_count) {
unsigned int bytes = blk_rq_cur_bytes(msb->block_req);
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 6eb892fd4d34..3878136227e4 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -813,6 +813,7 @@ static void rtsx_usb_ms_drv_remove(struct platform_device *pdev)
host->eject = true;
cancel_work_sync(&host->handle_req);
+ cancel_delayed_work_sync(&host->poll_card);
mutex_lock(&host->host_mutex);
if (host->req) {
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7e79da9684ed..185c08eab4ca 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2834,10 +2834,10 @@ struct rep_manu_reply{
u8 sas_format:1;
u8 reserved1:7;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index a9604ba3c805..3a64dc7a7e27 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1843,65 +1843,6 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
return FAILED;
}
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- * mptscsih_target_reset - Perform a SCSI TARGET_RESET!
- * @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
- *
- * (linux scsi_host_template.eh_target_reset_handler routine)
- *
- * Returns SUCCESS or FAILED.
- **/
-int
-mptscsih_target_reset(struct scsi_cmnd * SCpnt)
-{
- MPT_SCSI_HOST *hd;
- int retval;
- VirtDevice *vdevice;
- MPT_ADAPTER *ioc;
-
- /* If we can't locate our host adapter structure, return FAILED status.
- */
- if ((hd = shost_priv(SCpnt->device->host)) == NULL){
- printk(KERN_ERR MYNAM ": target reset: "
- "Can't locate host! (sc=%p)\n", SCpnt);
- return FAILED;
- }
-
- ioc = hd->ioc;
- printk(MYIOC_s_INFO_FMT "attempting target reset! (sc=%p)\n",
- ioc->name, SCpnt);
- scsi_print_command(SCpnt);
-
- vdevice = SCpnt->device->hostdata;
- if (!vdevice || !vdevice->vtarget) {
- retval = 0;
- goto out;
- }
-
- /* Target reset to hidden raid component is not supported
- */
- if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
- retval = FAILED;
- goto out;
- }
-
- retval = mptscsih_IssueTaskMgmt(hd,
- MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
- vdevice->vtarget->channel,
- vdevice->vtarget->id, 0, 0,
- mptscsih_get_tm_timeout(ioc));
-
- out:
- printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n",
- ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
-
- if (retval == 0)
- return SUCCESS;
- else
- return FAILED;
-}
-
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
@@ -2915,14 +2856,14 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
timeout = 10;
break;
- case RESERVE:
+ case RESERVE_6:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
timeout = 10;
break;
- case RELEASE:
+ case RELEASE_6:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
@@ -3306,7 +3247,6 @@ EXPORT_SYMBOL(mptscsih_sdev_destroy);
EXPORT_SYMBOL(mptscsih_sdev_configure);
EXPORT_SYMBOL(mptscsih_abort);
EXPORT_SYMBOL(mptscsih_dev_reset);
-EXPORT_SYMBOL(mptscsih_target_reset);
EXPORT_SYMBOL(mptscsih_bus_reset);
EXPORT_SYMBOL(mptscsih_host_reset);
EXPORT_SYMBOL(mptscsih_bios_param);
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index ece451c575e1..8c2bb2331fc1 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -121,7 +121,6 @@ extern int mptscsih_sdev_configure(struct scsi_device *device,
struct queue_limits *lim);
extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
extern int mptscsih_dev_reset(struct scsi_cmnd * SCpnt);
-extern int mptscsih_target_reset(struct scsi_cmnd * SCpnt);
extern int mptscsih_bus_reset(struct scsi_cmnd * SCpnt);
extern int mptscsih_host_reset(struct scsi_cmnd *SCpnt);
extern int mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev, sector_t capacity, int geom[]);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 6b0682af6e32..22b936310039 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -138,7 +138,7 @@ config MFD_AAT2870_CORE
config MFD_AT91_USART
tristate "AT91 USART Driver"
select MFD_CORE
- depends on ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_AT91 || ARCH_LAN969X || COMPILE_TEST
help
Select this to get support for AT91 USART IP. This is a wrapper
over at91-usart-serial driver and usart-spi-driver. Only one function
@@ -858,7 +858,7 @@ config MFD_MAX77541
There are regulators and adc.
config MFD_MAX77620
- bool "Maxim Semiconductor MAX77620 and MAX20024 PMIC Support"
+ tristate "Maxim Semiconductor MAX77620 and MAX20024 PMIC Support"
depends on I2C=y
depends on OF
select MFD_CORE
@@ -916,6 +916,19 @@ config MFD_MAX77693
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_MAX77705
+ tristate "Maxim MAX77705 PMIC Support"
+ depends on I2C
+ select MFD_CORE
+ select MFD_SIMPLE_MFD_I2C
+ help
+ Say yes here to add support for Maxim Integrated MAX77705 PMIC.
+ This is a Power Management IC with Charger, safe LDOs, Flash, Haptic
+ and MUIC controls on chip.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
config MFD_MAX77714
tristate "Maxim Semiconductor MAX77714 PMIC Support"
depends on I2C
@@ -1119,30 +1132,6 @@ config MFD_RETU
Retu and Tahvo are a multi-function devices found on Nokia
Internet Tablets (770, N800 and N810).
-config MFD_PCF50633
- tristate "NXP PCF50633"
- depends on I2C
- select REGMAP_I2C
- help
- Say yes here if you have NXP PCF50633 chip on your board.
- This core driver provides register access and IRQ handling
- facilities, and registers devices for the various functions
- so that function-specific drivers can bind to them.
-
-config PCF50633_ADC
- tristate "NXP PCF50633 ADC"
- depends on MFD_PCF50633
- help
- Say yes here if you want to include support for ADC in the
- NXP PCF50633 chip.
-
-config PCF50633_GPIO
- tristate "NXP PCF50633 GPIO"
- depends on MFD_PCF50633
- help
- Say yes here if you want to include support GPIO for pins on
- the PCF50633 chip.
-
config MFD_PM8XXX
tristate "Qualcomm PM8xxx PMIC chips driver"
depends on ARM || HEXAGON || COMPILE_TEST
@@ -1495,12 +1484,6 @@ config STMPE_SPI
This is used to enable SPI interface of STMPE
endmenu
-config MFD_STA2X11
- bool "STMicroelectronics STA2X11"
- depends on STA2X11
- select MFD_CORE
- select REGMAP_MMIO
-
config MFD_SUN6I_PRCM
bool "Allwinner A31/A23/A33 PRCM controller"
depends on ARCH_SUNXI || COMPILE_TEST
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 9220eaf7cf12..948cbdf42a18 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_MFD_TI_LP873X) += lp873x.o
obj-$(CONFIG_MFD_TI_LP87565) += lp87565.o
obj-$(CONFIG_MFD_TI_AM335X_TSCADC) += ti_am335x_tscadc.o
-obj-$(CONFIG_MFD_STA2X11) += sta2x11-mfd.o
obj-$(CONFIG_MFD_STMPE) += stmpe.o
obj-$(CONFIG_STMPE_I2C) += stmpe-i2c.o
obj-$(CONFIG_STMPE_SPI) += stmpe-spi.o
@@ -168,6 +167,7 @@ obj-$(CONFIG_MFD_MAX77620) += max77620.o
obj-$(CONFIG_MFD_MAX77650) += max77650.o
obj-$(CONFIG_MFD_MAX77686) += max77686.o
obj-$(CONFIG_MFD_MAX77693) += max77693.o
+obj-$(CONFIG_MFD_MAX77705) += max77705.o
obj-$(CONFIG_MFD_MAX77714) += max77714.o
obj-$(CONFIG_MFD_MAX77843) += max77843.o
obj-$(CONFIG_MFD_MAX8907) += max8907.o
@@ -183,10 +183,6 @@ obj-$(CONFIG_MFD_MT6370) += mt6370.o
mt6397-objs := mt6397-core.o mt6397-irq.o mt6358-irq.o
obj-$(CONFIG_MFD_MT6397) += mt6397.o
-pcf50633-objs := pcf50633-core.o pcf50633-irq.o
-obj-$(CONFIG_MFD_PCF50633) += pcf50633.o
-obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
-obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
obj-$(CONFIG_RZ_MTU3) += rz-mtu3.o
obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index cff56deba24f..e9914e8a29a3 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -224,6 +224,7 @@ static const struct regmap_range axp717_writeable_ranges[] = {
regmap_reg_range(AXP717_VSYS_V_POWEROFF, AXP717_VSYS_V_POWEROFF),
regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN),
regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE),
+ regmap_reg_range(AXP717_TS_PIN_CFG, AXP717_TS_PIN_CFG),
regmap_reg_range(AXP717_ICC_CHG_SET, AXP717_CV_CHG_SET),
regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL),
regmap_reg_range(AXP717_ADC_CH_EN_CONTROL, AXP717_ADC_CH_EN_CONTROL),
diff --git a/drivers/mfd/cgbc-core.c b/drivers/mfd/cgbc-core.c
index 85283c8dde25..4782ff1114a9 100644
--- a/drivers/mfd/cgbc-core.c
+++ b/drivers/mfd/cgbc-core.c
@@ -96,7 +96,7 @@ static int cgbc_session_command(struct cgbc_device_data *cgbc, u8 cmd)
static int cgbc_session_request(struct cgbc_device_data *cgbc)
{
- unsigned int ret;
+ int ret;
ret = cgbc_wait_device(cgbc);
@@ -236,6 +236,7 @@ static struct mfd_cell cgbc_devs[] = {
{ .name = "cgbc-gpio" },
{ .name = "cgbc-i2c", .id = 1 },
{ .name = "cgbc-i2c", .id = 2 },
+ { .name = "cgbc-hwmon" },
};
static int cgbc_map(struct cgbc_device_data *cgbc)
@@ -384,6 +385,13 @@ static const struct dmi_system_id cgbc_dmi_table[] __initconst = {
DMI_MATCH(DMI_BOARD_NAME, "conga-SA7"),
},
},
+ {
+ .ident = "SA8",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "congatec"),
+ DMI_MATCH(DMI_BOARD_NAME, "conga-SA8"),
+ },
+ },
{}
};
MODULE_DEVICE_TABLE(dmi, cgbc_dmi_table);
diff --git a/drivers/mfd/ene-kb3930.c b/drivers/mfd/ene-kb3930.c
index fa0ad2f14a39..9460a67acb0b 100644
--- a/drivers/mfd/ene-kb3930.c
+++ b/drivers/mfd/ene-kb3930.c
@@ -162,7 +162,7 @@ static int kb3930_probe(struct i2c_client *client)
devm_gpiod_get_array_optional(dev, "off", GPIOD_IN);
if (IS_ERR(ddata->off_gpios))
return PTR_ERR(ddata->off_gpios);
- if (ddata->off_gpios->ndescs < 2) {
+ if (ddata->off_gpios && ddata->off_gpios->ndescs < 2) {
dev_err(dev, "invalid off-gpios property\n");
return -EINVAL;
}
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 8d006f6be48c..1be4557b7bdd 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -25,11 +25,6 @@ struct pcap_adc_request {
void *data;
};
-struct pcap_adc_sync_request {
- u16 res[2];
- struct completion completion;
-};
-
struct pcap_chip {
struct spi_device *spi;
@@ -335,34 +330,6 @@ int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
}
EXPORT_SYMBOL_GPL(pcap_adc_async);
-static void pcap_adc_sync_cb(void *param, u16 res[])
-{
- struct pcap_adc_sync_request *req = param;
-
- req->res[0] = res[0];
- req->res[1] = res[1];
- complete(&req->completion);
-}
-
-int pcap_adc_sync(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
- u16 res[])
-{
- struct pcap_adc_sync_request sync_data;
- int ret;
-
- init_completion(&sync_data.completion);
- ret = pcap_adc_async(pcap, bank, flags, ch, pcap_adc_sync_cb,
- &sync_data);
- if (ret)
- return ret;
- wait_for_completion(&sync_data.completion);
- res[0] = sync_data.res[0];
- res[1] = sync_data.res[1];
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pcap_adc_sync);
-
/* subdevs */
static int pcap_remove_subdev(struct device *dev, void *unused)
{
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 3ba05ebb9035..63d6694f7145 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -480,7 +480,7 @@ EXPORT_SYMBOL_NS_GPL(intel_lpss_remove, "INTEL_LPSS");
static int resume_lpss_device(struct device *dev, void *data)
{
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ if (!dev_pm_smart_suspend(dev))
pm_runtime_resume(dev);
return 0;
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
index 8582ae65a802..4c1a68c9f575 100644
--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -82,7 +82,6 @@ static const struct regmap_config chtdc_ti_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xff,
- .cache_type = REGCACHE_NONE,
};
static const struct regmap_irq chtdc_ti_irqs[] = {
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index 879fbf5cd162..41429f9bcb69 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -113,7 +113,6 @@ static const struct regmap_config crystal_cove_regmap_config = {
.val_bits = 8,
.max_register = CRYSTAL_COVE_MAX_REGISTER,
- .cache_type = REGCACHE_NONE,
};
static const struct regmap_irq crystal_cove_irqs[] = {
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index 2370b44e2214..4b757d847282 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -22,6 +22,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/ipaq-micro.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -267,7 +268,7 @@ static void __init ipaq_micro_eeprom_dump(struct ipaq_micro *micro)
dev_info(micro->dev, "page mode: %u\n", ipaq_micro_to_u16(dump+84));
dev_info(micro->dev, "country ID: %u\n", ipaq_micro_to_u16(dump+86));
dev_info(micro->dev, "color display: %s\n",
- ipaq_micro_to_u16(dump+88) ? "yes" : "no");
+ str_yes_no(ipaq_micro_to_u16(dump + 88)));
dev_info(micro->dev, "ROM size: %u MiB\n", ipaq_micro_to_u16(dump+90));
dev_info(micro->dev, "RAM size: %u KiB\n", ipaq_micro_to_u16(dump+92));
dev_info(micro->dev, "screen: %u x %u\n",
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index 89b30ef91f4f..21d2ab3db254 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -29,6 +29,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/max77620.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -700,3 +701,7 @@ static struct i2c_driver max77620_driver = {
.id_table = max77620_id,
};
builtin_i2c_driver(max77620_driver);
+
+MODULE_DESCRIPTION("Maxim Semiconductor MAX77620 and MAX20024 PMIC Support");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max77705.c b/drivers/mfd/max77705.c
new file mode 100644
index 000000000000..60c457c21d95
--- /dev/null
+++ b/drivers/mfd/max77705.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Maxim MAX77705 PMIC core driver
+ *
+ * Copyright (C) 2025 Dzmitry Sankouski <dsankouski@gmail.com>
+ **/
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77705-private.h>
+#include <linux/mfd/max77693-common.h>
+#include <linux/pm.h>
+#include <linux/power/max17042_battery.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+
+static struct mfd_cell max77705_devs[] = {
+ MFD_CELL_OF("max77705-rgb", NULL, NULL, 0, 0, "maxim,max77705-rgb"),
+ MFD_CELL_OF("max77705-charger", NULL, NULL, 0, 0, "maxim,max77705-charger"),
+ MFD_CELL_OF("max77705-haptic", NULL, NULL, 0, 0, "maxim,max77705-haptic"),
+};
+
+static const struct regmap_range max77705_readable_ranges[] = {
+ regmap_reg_range(MAX77705_PMIC_REG_PMICID1, MAX77705_PMIC_REG_BSTOUT_MASK),
+ regmap_reg_range(MAX77705_PMIC_REG_INTSRC, MAX77705_PMIC_REG_RESERVED_29),
+ regmap_reg_range(MAX77705_PMIC_REG_BOOSTCONTROL1, MAX77705_PMIC_REG_BOOSTCONTROL1),
+ regmap_reg_range(MAX77705_PMIC_REG_MCONFIG, MAX77705_PMIC_REG_MCONFIG2),
+ regmap_reg_range(MAX77705_PMIC_REG_FORCE_EN_MASK, MAX77705_PMIC_REG_FORCE_EN_MASK),
+ regmap_reg_range(MAX77705_PMIC_REG_BOOSTCONTROL1, MAX77705_PMIC_REG_BOOSTCONTROL1),
+ regmap_reg_range(MAX77705_PMIC_REG_BOOSTCONTROL2, MAX77705_PMIC_REG_BOOSTCONTROL2),
+ regmap_reg_range(MAX77705_PMIC_REG_SW_RESET, MAX77705_PMIC_REG_USBC_RESET),
+};
+
+static const struct regmap_range max77705_writable_ranges[] = {
+ regmap_reg_range(MAX77705_PMIC_REG_MAINCTRL1, MAX77705_PMIC_REG_BSTOUT_MASK),
+ regmap_reg_range(MAX77705_PMIC_REG_INTSRC, MAX77705_PMIC_REG_RESERVED_29),
+ regmap_reg_range(MAX77705_PMIC_REG_BOOSTCONTROL1, MAX77705_PMIC_REG_BOOSTCONTROL1),
+ regmap_reg_range(MAX77705_PMIC_REG_MCONFIG, MAX77705_PMIC_REG_MCONFIG2),
+ regmap_reg_range(MAX77705_PMIC_REG_FORCE_EN_MASK, MAX77705_PMIC_REG_FORCE_EN_MASK),
+ regmap_reg_range(MAX77705_PMIC_REG_BOOSTCONTROL1, MAX77705_PMIC_REG_BOOSTCONTROL1),
+ regmap_reg_range(MAX77705_PMIC_REG_BOOSTCONTROL2, MAX77705_PMIC_REG_BOOSTCONTROL2),
+ regmap_reg_range(MAX77705_PMIC_REG_SW_RESET, MAX77705_PMIC_REG_USBC_RESET),
+};
+
+static const struct regmap_access_table max77705_readable_table = {
+ .yes_ranges = max77705_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(max77705_readable_ranges),
+};
+
+static const struct regmap_access_table max77705_writable_table = {
+ .yes_ranges = max77705_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(max77705_writable_ranges),
+};
+
+static const struct regmap_config max77705_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &max77705_readable_table,
+ .wr_table = &max77705_writable_table,
+ .max_register = MAX77705_PMIC_REG_USBC_RESET,
+};
+
+static const struct regmap_irq max77705_topsys_irqs[] = {
+ { .mask = MAX77705_SYSTEM_IRQ_BSTEN_INT, },
+ { .mask = MAX77705_SYSTEM_IRQ_SYSUVLO_INT, },
+ { .mask = MAX77705_SYSTEM_IRQ_SYSOVLO_INT, },
+ { .mask = MAX77705_SYSTEM_IRQ_TSHDN_INT, },
+ { .mask = MAX77705_SYSTEM_IRQ_TM_INT, },
+};
+
+static const struct regmap_irq_chip max77705_topsys_irq_chip = {
+ .name = "max77705-topsys",
+ .status_base = MAX77705_PMIC_REG_SYSTEM_INT,
+ .mask_base = MAX77705_PMIC_REG_SYSTEM_INT_MASK,
+ .num_regs = 1,
+ .irqs = max77705_topsys_irqs,
+ .num_irqs = ARRAY_SIZE(max77705_topsys_irqs),
+};
+
+static int max77705_i2c_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct max77693_dev *max77705;
+ struct regmap_irq_chip_data *irq_data;
+ struct irq_domain *domain;
+ enum max77705_hw_rev pmic_rev;
+ unsigned int pmic_rev_value;
+ int ret;
+
+ max77705 = devm_kzalloc(dev, sizeof(*max77705), GFP_KERNEL);
+ if (!max77705)
+ return -ENOMEM;
+
+ max77705->i2c = i2c;
+ max77705->type = TYPE_MAX77705;
+ i2c_set_clientdata(i2c, max77705);
+
+ max77705->regmap = devm_regmap_init_i2c(i2c, &max77705_regmap_config);
+ if (IS_ERR(max77705->regmap))
+ return PTR_ERR(max77705->regmap);
+
+ ret = regmap_read(max77705->regmap, MAX77705_PMIC_REG_PMICREV, &pmic_rev_value);
+ if (ret < 0)
+ return -ENODEV;
+
+ pmic_rev = pmic_rev_value & MAX77705_REVISION_MASK;
+ if (pmic_rev != MAX77705_PASS3)
+ return dev_err_probe(dev, -ENODEV, "Rev.0x%x is not tested\n", pmic_rev);
+
+ ret = devm_regmap_add_irq_chip(dev, max77705->regmap,
+ i2c->irq,
+ IRQF_ONESHOT | IRQF_SHARED, 0,
+ &max77705_topsys_irq_chip,
+ &irq_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add IRQ chip\n");
+
+ /* Unmask interrupts from all blocks in interrupt source register */
+ ret = regmap_update_bits(max77705->regmap,
+ MAX77705_PMIC_REG_INTSRC_MASK,
+ MAX77705_SRC_IRQ_ALL, (unsigned int)~MAX77705_SRC_IRQ_ALL);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Could not unmask interrupts in INTSRC\n");
+
+ domain = regmap_irq_get_domain(irq_data);
+
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
+ max77705_devs, ARRAY_SIZE(max77705_devs),
+ NULL, 0, domain);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register child devices\n");
+
+ device_init_wakeup(dev, true);
+
+ return 0;
+}
+
+static int max77705_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ disable_irq(i2c->irq);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(i2c->irq);
+
+ return 0;
+}
+
+static int max77705_resume(struct device *dev)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(i2c->irq);
+
+ enable_irq(i2c->irq);
+
+ return 0;
+}
+DEFINE_SIMPLE_DEV_PM_OPS(max77705_pm_ops, max77705_suspend, max77705_resume);
+
+static const struct of_device_id max77705_i2c_of_match[] = {
+ { .compatible = "maxim,max77705" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77705_i2c_of_match);
+
+static struct i2c_driver max77705_i2c_driver = {
+ .driver = {
+ .name = "max77705",
+ .of_match_table = max77705_i2c_of_match,
+ .pm = pm_sleep_ptr(&max77705_pm_ops),
+ },
+ .probe = max77705_i2c_probe
+};
+module_i2c_driver(max77705_i2c_driver);
+
+MODULE_DESCRIPTION("Maxim MAX77705 PMIC core driver");
+MODULE_AUTHOR("Dzmitry Sankouski <dsankouski@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index 93a3b1698d9c..92e348df03d1 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -335,7 +335,8 @@ int max8997_irq_init(struct max8997_dev *max8997)
}
max8997->irq_domain = domain;
- ret = request_threaded_irq(max8997->irq, NULL, max8997_irq_thread,
+ ret = devm_request_threaded_irq(max8997->dev, max8997->irq, NULL,
+ max8997_irq_thread,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"max8997-irq", max8997);
@@ -348,7 +349,8 @@ int max8997_irq_init(struct max8997_dev *max8997)
if (!max8997->ono)
return 0;
- ret = request_threaded_irq(max8997->ono, NULL, max8997_irq_thread,
+ ret = devm_request_threaded_irq(max8997->dev, max8997->ono, NULL,
+ max8997_irq_thread,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
IRQF_ONESHOT, "max8997-ono", max8997);
@@ -358,12 +360,3 @@ int max8997_irq_init(struct max8997_dev *max8997)
return 0;
}
-
-void max8997_irq_exit(struct max8997_dev *max8997)
-{
- if (max8997->ono)
- free_irq(max8997->ono, max8997);
-
- if (max8997->irq)
- free_irq(max8997->irq, max8997);
-}
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 0e5d59ae064a..5f8ed8988907 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -84,6 +84,12 @@ static const struct resource mt6359_keys_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6359_IRQ_HOMEKEY_R, "homekey_r"),
};
+static const struct resource mt6359_accdet_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6359_IRQ_ACCDET, "accdet_irq"),
+ DEFINE_RES_IRQ_NAMED(MT6359_IRQ_ACCDET_EINT0, "accdet_eint0"),
+ DEFINE_RES_IRQ_NAMED(MT6359_IRQ_ACCDET_EINT1, "accdet_eint1"),
+};
+
static const struct resource mt6323_keys_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6323_IRQ_STATUS_PWRKEY, "powerkey"),
DEFINE_RES_IRQ_NAMED(MT6323_IRQ_STATUS_FCHRKEY, "homekey"),
@@ -239,6 +245,12 @@ static const struct mfd_cell mt6359_devs[] = {
.resources = mt6359_keys_resources,
.of_compatible = "mediatek,mt6359-keys"
},
+ {
+ .name = "mt6359-accdet",
+ .of_compatible = "mediatek,mt6359-accdet",
+ .num_resources = ARRAY_SIZE(mt6359_accdet_resources),
+ .resources = mt6359_accdet_resources,
+ },
};
static const struct mfd_cell mt6397_devs[] = {
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
deleted file mode 100644
index 1fbba0e666d5..000000000000
--- a/drivers/mfd/pcf50633-adc.c
+++ /dev/null
@@ -1,255 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* NXP PCF50633 ADC Driver
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * Author: Balaji Rao <balajirrao@openmoko.org>
- * All rights reserved.
- *
- * Broken down from monstrous PCF50633 driver mainly by
- * Harald Welte, Andy Green and Werner Almesberger
- *
- * NOTE: This driver does not yet support subtractive ADC mode, which means
- * you can do only one measurement per read request.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/completion.h>
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/mfd/pcf50633/adc.h>
-
-struct pcf50633_adc_request {
- int mux;
- int avg;
- void (*callback)(struct pcf50633 *, void *, int);
- void *callback_param;
-};
-
-struct pcf50633_adc_sync_request {
- int result;
- struct completion completion;
-};
-
-#define PCF50633_MAX_ADC_FIFO_DEPTH 8
-
-struct pcf50633_adc {
- struct pcf50633 *pcf;
-
- /* Private stuff */
- struct pcf50633_adc_request *queue[PCF50633_MAX_ADC_FIFO_DEPTH];
- int queue_head;
- int queue_tail;
- struct mutex queue_mutex;
-};
-
-static inline struct pcf50633_adc *__to_adc(struct pcf50633 *pcf)
-{
- return platform_get_drvdata(pcf->adc_pdev);
-}
-
-static void adc_setup(struct pcf50633 *pcf, int channel, int avg)
-{
- channel &= PCF50633_ADCC1_ADCMUX_MASK;
-
- /* kill ratiometric, but enable ACCSW biasing */
- pcf50633_reg_write(pcf, PCF50633_REG_ADCC2, 0x00);
- pcf50633_reg_write(pcf, PCF50633_REG_ADCC3, 0x01);
-
- /* start ADC conversion on selected channel */
- pcf50633_reg_write(pcf, PCF50633_REG_ADCC1, channel | avg |
- PCF50633_ADCC1_ADCSTART | PCF50633_ADCC1_RES_10BIT);
-}
-
-static void trigger_next_adc_job_if_any(struct pcf50633 *pcf)
-{
- struct pcf50633_adc *adc = __to_adc(pcf);
- int head;
-
- head = adc->queue_head;
-
- if (!adc->queue[head])
- return;
-
- adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg);
-}
-
-static int
-adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req)
-{
- struct pcf50633_adc *adc = __to_adc(pcf);
- int head, tail;
-
- mutex_lock(&adc->queue_mutex);
-
- head = adc->queue_head;
- tail = adc->queue_tail;
-
- if (adc->queue[tail]) {
- mutex_unlock(&adc->queue_mutex);
- dev_err(pcf->dev, "ADC queue is full, dropping request\n");
- return -EBUSY;
- }
-
- adc->queue[tail] = req;
- if (head == tail)
- trigger_next_adc_job_if_any(pcf);
- adc->queue_tail = (tail + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1);
-
- mutex_unlock(&adc->queue_mutex);
-
- return 0;
-}
-
-static void pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param,
- int result)
-{
- struct pcf50633_adc_sync_request *req = param;
-
- req->result = result;
- complete(&req->completion);
-}
-
-int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg)
-{
- struct pcf50633_adc_sync_request req;
- int ret;
-
- init_completion(&req.completion);
-
- ret = pcf50633_adc_async_read(pcf, mux, avg,
- pcf50633_adc_sync_read_callback, &req);
- if (ret)
- return ret;
-
- wait_for_completion(&req.completion);
-
- return req.result;
-}
-EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read);
-
-int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
- void (*callback)(struct pcf50633 *, void *, int),
- void *callback_param)
-{
- struct pcf50633_adc_request *req;
- int ret;
-
- /* req is freed when the result is ready, in interrupt handler */
- req = kmalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
- req->mux = mux;
- req->avg = avg;
- req->callback = callback;
- req->callback_param = callback_param;
-
- ret = adc_enqueue_request(pcf, req);
- if (ret)
- kfree(req);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pcf50633_adc_async_read);
-
-static int adc_result(struct pcf50633 *pcf)
-{
- u8 adcs1, adcs3;
- u16 result;
-
- adcs1 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS1);
- adcs3 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS3);
- result = (adcs1 << 2) | (adcs3 & PCF50633_ADCS3_ADCDAT1L_MASK);
-
- dev_dbg(pcf->dev, "adc result = %d\n", result);
-
- return result;
-}
-
-static void pcf50633_adc_irq(int irq, void *data)
-{
- struct pcf50633_adc *adc = data;
- struct pcf50633 *pcf = adc->pcf;
- struct pcf50633_adc_request *req;
- int head, res;
-
- mutex_lock(&adc->queue_mutex);
- head = adc->queue_head;
-
- req = adc->queue[head];
- if (WARN_ON(!req)) {
- dev_err(pcf->dev, "pcf50633-adc irq: ADC queue empty!\n");
- mutex_unlock(&adc->queue_mutex);
- return;
- }
- adc->queue[head] = NULL;
- adc->queue_head = (head + 1) &
- (PCF50633_MAX_ADC_FIFO_DEPTH - 1);
-
- res = adc_result(pcf);
- trigger_next_adc_job_if_any(pcf);
-
- mutex_unlock(&adc->queue_mutex);
-
- req->callback(pcf, req->callback_param, res);
- kfree(req);
-}
-
-static int pcf50633_adc_probe(struct platform_device *pdev)
-{
- struct pcf50633_adc *adc;
-
- adc = devm_kzalloc(&pdev->dev, sizeof(*adc), GFP_KERNEL);
- if (!adc)
- return -ENOMEM;
-
- adc->pcf = dev_to_pcf50633(pdev->dev.parent);
- platform_set_drvdata(pdev, adc);
-
- pcf50633_register_irq(adc->pcf, PCF50633_IRQ_ADCRDY,
- pcf50633_adc_irq, adc);
-
- mutex_init(&adc->queue_mutex);
-
- return 0;
-}
-
-static void pcf50633_adc_remove(struct platform_device *pdev)
-{
- struct pcf50633_adc *adc = platform_get_drvdata(pdev);
- int i, head;
-
- pcf50633_free_irq(adc->pcf, PCF50633_IRQ_ADCRDY);
-
- mutex_lock(&adc->queue_mutex);
- head = adc->queue_head;
-
- if (WARN_ON(adc->queue[head]))
- dev_err(adc->pcf->dev,
- "adc driver removed with request pending\n");
-
- for (i = 0; i < PCF50633_MAX_ADC_FIFO_DEPTH; i++)
- kfree(adc->queue[i]);
-
- mutex_unlock(&adc->queue_mutex);
-}
-
-static struct platform_driver pcf50633_adc_driver = {
- .driver = {
- .name = "pcf50633-adc",
- },
- .probe = pcf50633_adc_probe,
- .remove = pcf50633_adc_remove,
-};
-
-module_platform_driver(pcf50633_adc_driver);
-
-MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
-MODULE_DESCRIPTION("PCF50633 adc driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pcf50633-adc");
-
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
deleted file mode 100644
index 014a68711b18..000000000000
--- a/drivers/mfd/pcf50633-core.c
+++ /dev/null
@@ -1,304 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* NXP PCF50633 Power Management Unit (PMU) driver
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * Author: Harald Welte <laforge@openmoko.org>
- * Balaji Rao <balajirrao@openmoko.org>
- * All rights reserved.
- */
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/pm.h>
-#include <linux/slab.h>
-#include <linux/regmap.h>
-#include <linux/err.h>
-
-#include <linux/mfd/pcf50633/core.h>
-
-/* Read a block of up to 32 regs */
-int pcf50633_read_block(struct pcf50633 *pcf, u8 reg,
- int nr_regs, u8 *data)
-{
- int ret;
-
- ret = regmap_raw_read(pcf->regmap, reg, data, nr_regs);
- if (ret != 0)
- return ret;
-
- return nr_regs;
-}
-EXPORT_SYMBOL_GPL(pcf50633_read_block);
-
-/* Write a block of up to 32 regs */
-int pcf50633_write_block(struct pcf50633 *pcf , u8 reg,
- int nr_regs, u8 *data)
-{
- return regmap_raw_write(pcf->regmap, reg, data, nr_regs);
-}
-EXPORT_SYMBOL_GPL(pcf50633_write_block);
-
-u8 pcf50633_reg_read(struct pcf50633 *pcf, u8 reg)
-{
- unsigned int val;
- int ret;
-
- ret = regmap_read(pcf->regmap, reg, &val);
- if (ret < 0)
- return -1;
-
- return val;
-}
-EXPORT_SYMBOL_GPL(pcf50633_reg_read);
-
-int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val)
-{
- return regmap_write(pcf->regmap, reg, val);
-}
-EXPORT_SYMBOL_GPL(pcf50633_reg_write);
-
-int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val)
-{
- return regmap_update_bits(pcf->regmap, reg, mask, val);
-}
-EXPORT_SYMBOL_GPL(pcf50633_reg_set_bit_mask);
-
-int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 val)
-{
- return regmap_update_bits(pcf->regmap, reg, val, 0);
-}
-EXPORT_SYMBOL_GPL(pcf50633_reg_clear_bits);
-
-/* sysfs attributes */
-static ssize_t dump_regs_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pcf50633 *pcf = dev_get_drvdata(dev);
- u8 dump[16];
- int n, n1, idx = 0;
- char *buf1 = buf;
- static u8 address_no_read[] = { /* must be ascending */
- PCF50633_REG_INT1,
- PCF50633_REG_INT2,
- PCF50633_REG_INT3,
- PCF50633_REG_INT4,
- PCF50633_REG_INT5,
- 0 /* terminator */
- };
-
- for (n = 0; n < 256; n += sizeof(dump)) {
- for (n1 = 0; n1 < sizeof(dump); n1++)
- if (n == address_no_read[idx]) {
- idx++;
- dump[n1] = 0x00;
- } else
- dump[n1] = pcf50633_reg_read(pcf, n + n1);
-
- buf1 += sprintf(buf1, "%*ph\n", (int)sizeof(dump), dump);
- }
-
- return buf1 - buf;
-}
-static DEVICE_ATTR_ADMIN_RO(dump_regs);
-
-static ssize_t resume_reason_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pcf50633 *pcf = dev_get_drvdata(dev);
- int n;
-
- n = sprintf(buf, "%02x%02x%02x%02x%02x\n",
- pcf->resume_reason[0],
- pcf->resume_reason[1],
- pcf->resume_reason[2],
- pcf->resume_reason[3],
- pcf->resume_reason[4]);
-
- return n;
-}
-static DEVICE_ATTR_ADMIN_RO(resume_reason);
-
-static struct attribute *pcf_sysfs_entries[] = {
- &dev_attr_dump_regs.attr,
- &dev_attr_resume_reason.attr,
- NULL,
-};
-
-static struct attribute_group pcf_attr_group = {
- .name = NULL, /* put in device directory */
- .attrs = pcf_sysfs_entries,
-};
-
-static void
-pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
- struct platform_device **pdev)
-{
- int ret;
-
- *pdev = platform_device_alloc(name, -1);
- if (!*pdev) {
- dev_err(pcf->dev, "Failed to allocate %s\n", name);
- return;
- }
-
- (*pdev)->dev.parent = pcf->dev;
-
- ret = platform_device_add(*pdev);
- if (ret) {
- dev_err(pcf->dev, "Failed to register %s: %d\n", name, ret);
- platform_device_put(*pdev);
- *pdev = NULL;
- }
-}
-
-static const struct regmap_config pcf50633_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
-static int pcf50633_probe(struct i2c_client *client)
-{
- struct pcf50633 *pcf;
- struct platform_device *pdev;
- struct pcf50633_platform_data *pdata = dev_get_platdata(&client->dev);
- int i, j, ret;
- int version, variant;
-
- if (!client->irq) {
- dev_err(&client->dev, "Missing IRQ\n");
- return -ENOENT;
- }
-
- pcf = devm_kzalloc(&client->dev, sizeof(*pcf), GFP_KERNEL);
- if (!pcf)
- return -ENOMEM;
-
- i2c_set_clientdata(client, pcf);
- pcf->dev = &client->dev;
- pcf->pdata = pdata;
-
- mutex_init(&pcf->lock);
-
- pcf->regmap = devm_regmap_init_i2c(client, &pcf50633_regmap_config);
- if (IS_ERR(pcf->regmap)) {
- ret = PTR_ERR(pcf->regmap);
- dev_err(pcf->dev, "Failed to allocate register map: %d\n", ret);
- return ret;
- }
-
- version = pcf50633_reg_read(pcf, 0);
- variant = pcf50633_reg_read(pcf, 1);
- if (version < 0 || variant < 0) {
- dev_err(pcf->dev, "Unable to probe pcf50633\n");
- ret = -ENODEV;
- return ret;
- }
-
- dev_info(pcf->dev, "Probed device version %d variant %d\n",
- version, variant);
-
- pcf50633_irq_init(pcf, client->irq);
-
- /* Create sub devices */
- pcf50633_client_dev_register(pcf, "pcf50633-input", &pcf->input_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-rtc", &pcf->rtc_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-mbc", &pcf->mbc_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-adc", &pcf->adc_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-backlight", &pcf->bl_pdev);
-
-
- for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
- pdev = platform_device_alloc("pcf50633-regulator", i);
- if (!pdev) {
- ret = -ENOMEM;
- goto err2;
- }
-
- pdev->dev.parent = pcf->dev;
- ret = platform_device_add_data(pdev, &pdata->reg_init_data[i],
- sizeof(pdata->reg_init_data[i]));
- if (ret)
- goto err;
-
- ret = platform_device_add(pdev);
- if (ret)
- goto err;
-
- pcf->regulator_pdev[i] = pdev;
- }
-
- ret = sysfs_create_group(&client->dev.kobj, &pcf_attr_group);
- if (ret)
- dev_warn(pcf->dev, "error creating sysfs entries\n");
-
- if (pdata->probe_done)
- pdata->probe_done(pcf);
-
- return 0;
-
-err:
- platform_device_put(pdev);
-err2:
- for (j = 0; j < i; j++)
- platform_device_put(pcf->regulator_pdev[j]);
-
- return ret;
-}
-
-static void pcf50633_remove(struct i2c_client *client)
-{
- struct pcf50633 *pcf = i2c_get_clientdata(client);
- int i;
-
- sysfs_remove_group(&client->dev.kobj, &pcf_attr_group);
- pcf50633_irq_free(pcf);
-
- platform_device_unregister(pcf->input_pdev);
- platform_device_unregister(pcf->rtc_pdev);
- platform_device_unregister(pcf->mbc_pdev);
- platform_device_unregister(pcf->adc_pdev);
- platform_device_unregister(pcf->bl_pdev);
-
- for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
- platform_device_unregister(pcf->regulator_pdev[i]);
-}
-
-static const struct i2c_device_id pcf50633_id_table[] = {
- {"pcf50633", 0x73},
- {/* end of list */}
-};
-MODULE_DEVICE_TABLE(i2c, pcf50633_id_table);
-
-static struct i2c_driver pcf50633_driver = {
- .driver = {
- .name = "pcf50633",
- .pm = pm_sleep_ptr(&pcf50633_pm),
- },
- .id_table = pcf50633_id_table,
- .probe = pcf50633_probe,
- .remove = pcf50633_remove,
-};
-
-static int __init pcf50633_init(void)
-{
- return i2c_add_driver(&pcf50633_driver);
-}
-
-static void __exit pcf50633_exit(void)
-{
- i2c_del_driver(&pcf50633_driver);
-}
-
-MODULE_DESCRIPTION("I2C chip driver for NXP PCF50633 PMU");
-MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>");
-MODULE_LICENSE("GPL");
-
-subsys_initcall(pcf50633_init);
-module_exit(pcf50633_exit);
diff --git a/drivers/mfd/pcf50633-gpio.c b/drivers/mfd/pcf50633-gpio.c
deleted file mode 100644
index 3e368219479a..000000000000
--- a/drivers/mfd/pcf50633-gpio.c
+++ /dev/null
@@ -1,92 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* NXP PCF50633 GPIO Driver
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * Author: Balaji Rao <balajirrao@openmoko.org>
- * All rights reserved.
- *
- * Broken down from monstrous PCF50633 driver mainly by
- * Harald Welte, Andy Green and Werner Almesberger
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/mfd/pcf50633/gpio.h>
-#include <linux/mfd/pcf50633/pmic.h>
-
-static const u8 pcf50633_regulator_registers[PCF50633_NUM_REGULATORS] = {
- [PCF50633_REGULATOR_AUTO] = PCF50633_REG_AUTOOUT,
- [PCF50633_REGULATOR_DOWN1] = PCF50633_REG_DOWN1OUT,
- [PCF50633_REGULATOR_DOWN2] = PCF50633_REG_DOWN2OUT,
- [PCF50633_REGULATOR_MEMLDO] = PCF50633_REG_MEMLDOOUT,
- [PCF50633_REGULATOR_LDO1] = PCF50633_REG_LDO1OUT,
- [PCF50633_REGULATOR_LDO2] = PCF50633_REG_LDO2OUT,
- [PCF50633_REGULATOR_LDO3] = PCF50633_REG_LDO3OUT,
- [PCF50633_REGULATOR_LDO4] = PCF50633_REG_LDO4OUT,
- [PCF50633_REGULATOR_LDO5] = PCF50633_REG_LDO5OUT,
- [PCF50633_REGULATOR_LDO6] = PCF50633_REG_LDO6OUT,
- [PCF50633_REGULATOR_HCLDO] = PCF50633_REG_HCLDOOUT,
-};
-
-int pcf50633_gpio_set(struct pcf50633 *pcf, int gpio, u8 val)
-{
- u8 reg;
-
- reg = gpio - PCF50633_GPIO1 + PCF50633_REG_GPIO1CFG;
-
- return pcf50633_reg_set_bit_mask(pcf, reg, 0x07, val);
-}
-EXPORT_SYMBOL_GPL(pcf50633_gpio_set);
-
-u8 pcf50633_gpio_get(struct pcf50633 *pcf, int gpio)
-{
- u8 reg, val;
-
- reg = gpio - PCF50633_GPIO1 + PCF50633_REG_GPIO1CFG;
- val = pcf50633_reg_read(pcf, reg) & 0x07;
-
- return val;
-}
-EXPORT_SYMBOL_GPL(pcf50633_gpio_get);
-
-int pcf50633_gpio_invert_set(struct pcf50633 *pcf, int gpio, int invert)
-{
- u8 val, reg;
-
- reg = gpio - PCF50633_GPIO1 + PCF50633_REG_GPIO1CFG;
- val = !!invert << 3;
-
- return pcf50633_reg_set_bit_mask(pcf, reg, 1 << 3, val);
-}
-EXPORT_SYMBOL_GPL(pcf50633_gpio_invert_set);
-
-int pcf50633_gpio_invert_get(struct pcf50633 *pcf, int gpio)
-{
- u8 reg, val;
-
- reg = gpio - PCF50633_GPIO1 + PCF50633_REG_GPIO1CFG;
- val = pcf50633_reg_read(pcf, reg);
-
- return val & (1 << 3);
-}
-EXPORT_SYMBOL_GPL(pcf50633_gpio_invert_get);
-
-int pcf50633_gpio_power_supply_set(struct pcf50633 *pcf,
- int gpio, int regulator, int on)
-{
- u8 reg, val, mask;
-
- /* the *ENA register is always one after the *OUT register */
- reg = pcf50633_regulator_registers[regulator] + 1;
-
- val = !!on << (gpio - PCF50633_GPIO1);
- mask = 1 << (gpio - PCF50633_GPIO1);
-
- return pcf50633_reg_set_bit_mask(pcf, reg, mask, val);
-}
-EXPORT_SYMBOL_GPL(pcf50633_gpio_power_supply_set);
-
-MODULE_DESCRIPTION("NXP PCF50633 GPIO Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/pcf50633-irq.c b/drivers/mfd/pcf50633-irq.c
deleted file mode 100644
index e85af7f1cb0b..000000000000
--- a/drivers/mfd/pcf50633-irq.c
+++ /dev/null
@@ -1,312 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* NXP PCF50633 Power Management Unit (PMU) driver
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * Author: Harald Welte <laforge@openmoko.org>
- * Balaji Rao <balajirrao@openmoko.org>
- * All rights reserved.
- */
-
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/mfd/pcf50633/mbc.h>
-
-int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
- void (*handler) (int, void *), void *data)
-{
- if (irq < 0 || irq >= PCF50633_NUM_IRQ || !handler)
- return -EINVAL;
-
- if (WARN_ON(pcf->irq_handler[irq].handler))
- return -EBUSY;
-
- mutex_lock(&pcf->lock);
- pcf->irq_handler[irq].handler = handler;
- pcf->irq_handler[irq].data = data;
- mutex_unlock(&pcf->lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pcf50633_register_irq);
-
-int pcf50633_free_irq(struct pcf50633 *pcf, int irq)
-{
- if (irq < 0 || irq >= PCF50633_NUM_IRQ)
- return -EINVAL;
-
- mutex_lock(&pcf->lock);
- pcf->irq_handler[irq].handler = NULL;
- mutex_unlock(&pcf->lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pcf50633_free_irq);
-
-static int __pcf50633_irq_mask_set(struct pcf50633 *pcf, int irq, u8 mask)
-{
- u8 reg, bit;
- int idx;
-
- idx = irq >> 3;
- reg = PCF50633_REG_INT1M + idx;
- bit = 1 << (irq & 0x07);
-
- pcf50633_reg_set_bit_mask(pcf, reg, bit, mask ? bit : 0);
-
- mutex_lock(&pcf->lock);
-
- if (mask)
- pcf->mask_regs[idx] |= bit;
- else
- pcf->mask_regs[idx] &= ~bit;
-
- mutex_unlock(&pcf->lock);
-
- return 0;
-}
-
-int pcf50633_irq_mask(struct pcf50633 *pcf, int irq)
-{
- dev_dbg(pcf->dev, "Masking IRQ %d\n", irq);
-
- return __pcf50633_irq_mask_set(pcf, irq, 1);
-}
-EXPORT_SYMBOL_GPL(pcf50633_irq_mask);
-
-int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq)
-{
- dev_dbg(pcf->dev, "Unmasking IRQ %d\n", irq);
-
- return __pcf50633_irq_mask_set(pcf, irq, 0);
-}
-EXPORT_SYMBOL_GPL(pcf50633_irq_unmask);
-
-int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq)
-{
- u8 reg, bits;
-
- reg = irq >> 3;
- bits = 1 << (irq & 0x07);
-
- return pcf->mask_regs[reg] & bits;
-}
-EXPORT_SYMBOL_GPL(pcf50633_irq_mask_get);
-
-static void pcf50633_irq_call_handler(struct pcf50633 *pcf, int irq)
-{
- if (pcf->irq_handler[irq].handler)
- pcf->irq_handler[irq].handler(irq, pcf->irq_handler[irq].data);
-}
-
-/* Maximum amount of time ONKEY is held before emergency action is taken */
-#define PCF50633_ONKEY1S_TIMEOUT 8
-
-static irqreturn_t pcf50633_irq(int irq, void *data)
-{
- struct pcf50633 *pcf = data;
- int ret, i, j;
- u8 pcf_int[5], chgstat;
-
- /* Read the 5 INT regs in one transaction */
- ret = pcf50633_read_block(pcf, PCF50633_REG_INT1,
- ARRAY_SIZE(pcf_int), pcf_int);
- if (ret != ARRAY_SIZE(pcf_int)) {
- dev_err(pcf->dev, "Error reading INT registers\n");
-
- /*
- * If this doesn't ACK the interrupt to the chip, we'll be
- * called once again as we're level triggered.
- */
- goto out;
- }
-
- /* defeat 8s death from lowsys on A5 */
- pcf50633_reg_write(pcf, PCF50633_REG_OOCSHDWN, 0x04);
-
- /* We immediately read the usb and adapter status. We thus make sure
- * only of USBINS/USBREM IRQ handlers are called */
- if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) {
- chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
- if (chgstat & (0x3 << 4))
- pcf_int[0] &= ~PCF50633_INT1_USBREM;
- else
- pcf_int[0] &= ~PCF50633_INT1_USBINS;
- }
-
- /* Make sure only one of ADPINS or ADPREM is set */
- if (pcf_int[0] & (PCF50633_INT1_ADPINS | PCF50633_INT1_ADPREM)) {
- chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
- if (chgstat & (0x3 << 4))
- pcf_int[0] &= ~PCF50633_INT1_ADPREM;
- else
- pcf_int[0] &= ~PCF50633_INT1_ADPINS;
- }
-
- dev_dbg(pcf->dev, "INT1=0x%02x INT2=0x%02x INT3=0x%02x "
- "INT4=0x%02x INT5=0x%02x\n", pcf_int[0],
- pcf_int[1], pcf_int[2], pcf_int[3], pcf_int[4]);
-
- /* Some revisions of the chip don't have a 8s standby mode on
- * ONKEY1S press. We try to manually do it in such cases. */
- if ((pcf_int[0] & PCF50633_INT1_SECOND) && pcf->onkey1s_held) {
- dev_info(pcf->dev, "ONKEY1S held for %d secs\n",
- pcf->onkey1s_held);
- if (pcf->onkey1s_held++ == PCF50633_ONKEY1S_TIMEOUT)
- if (pcf->pdata->force_shutdown)
- pcf->pdata->force_shutdown(pcf);
- }
-
- if (pcf_int[2] & PCF50633_INT3_ONKEY1S) {
- dev_info(pcf->dev, "ONKEY1S held\n");
- pcf->onkey1s_held = 1 ;
-
- /* Unmask IRQ_SECOND */
- pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT1M,
- PCF50633_INT1_SECOND);
-
- /* Unmask IRQ_ONKEYR */
- pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT2M,
- PCF50633_INT2_ONKEYR);
- }
-
- if ((pcf_int[1] & PCF50633_INT2_ONKEYR) && pcf->onkey1s_held) {
- pcf->onkey1s_held = 0;
-
- /* Mask SECOND and ONKEYR interrupts */
- if (pcf->mask_regs[0] & PCF50633_INT1_SECOND)
- pcf50633_reg_set_bit_mask(pcf,
- PCF50633_REG_INT1M,
- PCF50633_INT1_SECOND,
- PCF50633_INT1_SECOND);
-
- if (pcf->mask_regs[1] & PCF50633_INT2_ONKEYR)
- pcf50633_reg_set_bit_mask(pcf,
- PCF50633_REG_INT2M,
- PCF50633_INT2_ONKEYR,
- PCF50633_INT2_ONKEYR);
- }
-
- /* Have we just resumed ? */
- if (pcf->is_suspended) {
- pcf->is_suspended = 0;
-
- /* Set the resume reason filtering out non resumers */
- for (i = 0; i < ARRAY_SIZE(pcf_int); i++)
- pcf->resume_reason[i] = pcf_int[i] &
- pcf->pdata->resumers[i];
-
- /* Make sure we don't pass on any ONKEY events to
- * userspace now */
- pcf_int[1] &= ~(PCF50633_INT2_ONKEYR | PCF50633_INT2_ONKEYF);
- }
-
- for (i = 0; i < ARRAY_SIZE(pcf_int); i++) {
- /* Unset masked interrupts */
- pcf_int[i] &= ~pcf->mask_regs[i];
-
- for (j = 0; j < 8 ; j++)
- if (pcf_int[i] & (1 << j))
- pcf50633_irq_call_handler(pcf, (i * 8) + j);
- }
-
-out:
- return IRQ_HANDLED;
-}
-
-static int pcf50633_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct pcf50633 *pcf = i2c_get_clientdata(client);
- int ret;
- int i;
- u8 res[5];
-
-
- /* Make sure our interrupt handlers are not called
- * henceforth */
- disable_irq(pcf->irq);
-
- /* Save the masks */
- ret = pcf50633_read_block(pcf, PCF50633_REG_INT1M,
- ARRAY_SIZE(pcf->suspend_irq_masks),
- pcf->suspend_irq_masks);
- if (ret < 0) {
- dev_err(pcf->dev, "error saving irq masks\n");
- goto out;
- }
-
- /* Write wakeup irq masks */
- for (i = 0; i < ARRAY_SIZE(res); i++)
- res[i] = ~pcf->pdata->resumers[i];
-
- ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
- ARRAY_SIZE(res), &res[0]);
- if (ret < 0) {
- dev_err(pcf->dev, "error writing wakeup irq masks\n");
- goto out;
- }
-
- pcf->is_suspended = 1;
-
-out:
- return ret;
-}
-
-static int pcf50633_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct pcf50633 *pcf = i2c_get_clientdata(client);
- int ret;
-
- /* Write the saved mask registers */
- ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
- ARRAY_SIZE(pcf->suspend_irq_masks),
- pcf->suspend_irq_masks);
- if (ret < 0)
- dev_err(pcf->dev, "Error restoring saved suspend masks\n");
-
- enable_irq(pcf->irq);
-
- return ret;
-}
-
-EXPORT_GPL_SIMPLE_DEV_PM_OPS(pcf50633_pm, pcf50633_suspend, pcf50633_resume);
-
-int pcf50633_irq_init(struct pcf50633 *pcf, int irq)
-{
- int ret;
-
- pcf->irq = irq;
-
- /* Enable all interrupts except RTC SECOND */
- pcf->mask_regs[0] = 0x80;
- pcf50633_reg_write(pcf, PCF50633_REG_INT1M, pcf->mask_regs[0]);
- pcf50633_reg_write(pcf, PCF50633_REG_INT2M, 0x00);
- pcf50633_reg_write(pcf, PCF50633_REG_INT3M, 0x00);
- pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00);
- pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00);
-
- ret = request_threaded_irq(irq, NULL, pcf50633_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "pcf50633", pcf);
-
- if (ret)
- dev_err(pcf->dev, "Failed to request IRQ %d\n", ret);
-
- if (enable_irq_wake(irq) < 0)
- dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source"
- "in this hardware revision", irq);
-
- return ret;
-}
-
-void pcf50633_irq_free(struct pcf50633 *pcf)
-{
- free_irq(pcf->irq, pcf);
-}
diff --git a/drivers/mfd/qnap-mcu.c b/drivers/mfd/qnap-mcu.c
index 4be39d8b2905..89a8a1913d42 100644
--- a/drivers/mfd/qnap-mcu.c
+++ b/drivers/mfd/qnap-mcu.c
@@ -158,9 +158,9 @@ int qnap_mcu_exec(struct qnap_mcu *mcu,
mutex_lock(&mcu->bus_lock);
- reply->data = rx,
- reply->length = length,
- reply->received = 0,
+ reply->data = rx;
+ reply->length = length;
+ reply->received = 0;
reinit_completion(&reply->done);
qnap_mcu_write(mcu, cmd_data, cmd_data_size);
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index cdfe738e1d76..3e9b65c988a7 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -83,6 +83,11 @@ static const struct mfd_cell s2mpu02_devs[] = {
{ .name = "s2mpu02-regulator", },
};
+static const struct mfd_cell s2mpu05_devs[] = {
+ { .name = "s2mpu05-regulator", },
+ { .name = "s2mps15-rtc", },
+};
+
static const struct of_device_id sec_dt_match[] = {
{
.compatible = "samsung,s5m8767-pmic",
@@ -109,6 +114,9 @@ static const struct of_device_id sec_dt_match[] = {
.compatible = "samsung,s2mpu02-pmic",
.data = (void *)S2MPU02,
}, {
+ .compatible = "samsung,s2mpu05-pmic",
+ .data = (void *)S2MPU05,
+ }, {
/* Sentinel */
},
};
@@ -374,6 +382,10 @@ static int sec_pmic_probe(struct i2c_client *i2c)
sec_devs = s2mpu02_devs;
num_sec_devs = ARRAY_SIZE(s2mpu02_devs);
break;
+ case S2MPU05:
+ sec_devs = s2mpu05_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpu05_devs);
+ break;
default:
dev_err(&i2c->dev, "Unsupported device type (%lu)\n",
sec_pmic->device_type);
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index e191aeb0c07c..047fc065fcf1 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -14,6 +14,7 @@
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s2mpu02.h>
+#include <linux/mfd/samsung/s2mpu05.h>
#include <linux/mfd/samsung/s5m8767.h>
static const struct regmap_irq s2mps11_irqs[] = {
@@ -225,6 +226,26 @@ static const struct regmap_irq s2mpu02_irqs[] = {
},
};
+static const struct regmap_irq s2mpu05_irqs[] = {
+ REGMAP_IRQ_REG(S2MPU05_IRQ_PWRONF, 0, S2MPU05_IRQ_PWRONF_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_PWRONR, 0, S2MPU05_IRQ_PWRONR_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_JIGONBF, 0, S2MPU05_IRQ_JIGONBF_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_JIGONBR, 0, S2MPU05_IRQ_JIGONBR_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_ACOKF, 0, S2MPU05_IRQ_ACOKF_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_ACOKR, 0, S2MPU05_IRQ_ACOKR_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_PWRON1S, 0, S2MPU05_IRQ_PWRON1S_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_MRB, 0, S2MPU05_IRQ_MRB_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_RTC60S, 1, S2MPU05_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_RTCA1, 1, S2MPU05_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_RTCA0, 1, S2MPU05_IRQ_RTCA0_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_SMPL, 1, S2MPU05_IRQ_SMPL_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_RTC1S, 1, S2MPU05_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_WTSR, 1, S2MPU05_IRQ_WTSR_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_INT120C, 2, S2MPU05_IRQ_INT120C_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_INT140C, 2, S2MPU05_IRQ_INT140C_MASK),
+ REGMAP_IRQ_REG(S2MPU05_IRQ_TSD, 2, S2MPU05_IRQ_TSD_MASK),
+};
+
static const struct regmap_irq s5m8767_irqs[] = {
[S5M8767_IRQ_PWRR] = {
.reg_offset = 0,
@@ -339,6 +360,16 @@ static const struct regmap_irq_chip s2mpu02_irq_chip = {
.ack_base = S2MPU02_REG_INT1,
};
+static const struct regmap_irq_chip s2mpu05_irq_chip = {
+ .name = "s2mpu05",
+ .irqs = s2mpu05_irqs,
+ .num_irqs = ARRAY_SIZE(s2mpu05_irqs),
+ .num_regs = 3,
+ .status_base = S2MPU05_REG_INT1,
+ .mask_base = S2MPU05_REG_INT1M,
+ .ack_base = S2MPU05_REG_INT1,
+};
+
static const struct regmap_irq_chip s5m8767_irq_chip = {
.name = "s5m8767",
.irqs = s5m8767_irqs,
@@ -383,6 +414,9 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
case S2MPU02:
sec_irq_chip = &s2mpu02_irq_chip;
break;
+ case S2MPU05:
+ sec_irq_chip = &s2mpu05_irq_chip;
+ break;
default:
dev_err(sec_pmic->dev, "Unknown device type %lu\n",
sec_pmic->device_type);
diff --git a/drivers/mfd/simple-mfd-i2c.c b/drivers/mfd/simple-mfd-i2c.c
index 6eda79533208..22159913bea0 100644
--- a/drivers/mfd/simple-mfd-i2c.c
+++ b/drivers/mfd/simple-mfd-i2c.c
@@ -83,11 +83,22 @@ static const struct simple_mfd_data maxim_max5970 = {
.mfd_cell_size = ARRAY_SIZE(max5970_cells),
};
+static const struct mfd_cell max77705_sensor_cells[] = {
+ { .name = "max77705-battery" },
+ { .name = "max77705-hwmon", },
+};
+
+static const struct simple_mfd_data maxim_mon_max77705 = {
+ .mfd_cell = max77705_sensor_cells,
+ .mfd_cell_size = ARRAY_SIZE(max77705_sensor_cells),
+};
+
static const struct of_device_id simple_mfd_i2c_of_match[] = {
{ .compatible = "kontron,sl28cpld" },
{ .compatible = "silergy,sy7636a", .data = &silergy_sy7636a},
{ .compatible = "maxim,max5970", .data = &maxim_max5970},
{ .compatible = "maxim,max5978", .data = &maxim_max5970},
+ { .compatible = "maxim,max77705-battery", .data = &maxim_mon_max77705},
{}
};
MODULE_DEVICE_TABLE(of, simple_mfd_i2c_of_match);
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 0469e85d72cf..7ee293b09f62 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -920,7 +920,7 @@ static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
struct sm501_gpio *smgpio = smchip->ourgpio;
- unsigned long bit = 1 << offset;
+ unsigned long bit = BIT(offset);
void __iomem *regs = smchip->regbase;
unsigned long save;
unsigned long val;
@@ -946,7 +946,7 @@ static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset)
struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
struct sm501_gpio *smgpio = smchip->ourgpio;
void __iomem *regs = smchip->regbase;
- unsigned long bit = 1 << offset;
+ unsigned long bit = BIT(offset);
unsigned long save;
unsigned long ddr;
@@ -971,7 +971,7 @@ static int sm501_gpio_output(struct gpio_chip *chip,
{
struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
struct sm501_gpio *smgpio = smchip->ourgpio;
- unsigned long bit = 1 << offset;
+ unsigned long bit = BIT(offset);
void __iomem *regs = smchip->regbase;
unsigned long save;
unsigned long val;
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
deleted file mode 100644
index 02cc49daf2e3..000000000000
--- a/drivers/mfd/sta2x11-mfd.c
+++ /dev/null
@@ -1,645 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * STA2x11 mfd for GPIO, SCTL and APBREG
- *
- * Copyright (c) 2009-2011 Wind River Systems, Inc.
- * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini, Davide Ciminaghi)
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/seq_file.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/sta2x11-mfd.h>
-#include <linux/regmap.h>
-
-#include <asm/sta2x11.h>
-
-static inline int __reg_within_range(unsigned int r,
- unsigned int start,
- unsigned int end)
-{
- return ((r >= start) && (r <= end));
-}
-
-/* This describes STA2X11 MFD chip for us, we may have several */
-struct sta2x11_mfd {
- struct sta2x11_instance *instance;
- struct regmap *regmap[sta2x11_n_mfd_plat_devs];
- spinlock_t lock[sta2x11_n_mfd_plat_devs];
- struct list_head list;
- void __iomem *regs[sta2x11_n_mfd_plat_devs];
-};
-
-static LIST_HEAD(sta2x11_mfd_list);
-
-/* Three functions to act on the list */
-static struct sta2x11_mfd *sta2x11_mfd_find(struct pci_dev *pdev)
-{
- struct sta2x11_instance *instance;
- struct sta2x11_mfd *mfd;
-
- if (!pdev && !list_empty(&sta2x11_mfd_list)) {
- pr_warn("%s: Unspecified device, using first instance\n",
- __func__);
- return list_entry(sta2x11_mfd_list.next,
- struct sta2x11_mfd, list);
- }
-
- instance = sta2x11_get_instance(pdev);
- if (!instance)
- return NULL;
- list_for_each_entry(mfd, &sta2x11_mfd_list, list) {
- if (mfd->instance == instance)
- return mfd;
- }
- return NULL;
-}
-
-static int sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
-{
- int i;
- struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
- struct sta2x11_instance *instance;
-
- if (mfd)
- return -EBUSY;
- instance = sta2x11_get_instance(pdev);
- if (!instance)
- return -EINVAL;
- mfd = kzalloc(sizeof(*mfd), flags);
- if (!mfd)
- return -ENOMEM;
- INIT_LIST_HEAD(&mfd->list);
- for (i = 0; i < ARRAY_SIZE(mfd->lock); i++)
- spin_lock_init(&mfd->lock[i]);
- mfd->instance = instance;
- list_add(&mfd->list, &sta2x11_mfd_list);
- return 0;
-}
-
-/* This function is exported and is not expected to fail */
-u32 __sta2x11_mfd_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val,
- enum sta2x11_mfd_plat_dev index)
-{
- struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
- u32 r;
- unsigned long flags;
- void __iomem *regs;
-
- if (!mfd) {
- dev_warn(&pdev->dev, ": can't access sctl regs\n");
- return 0;
- }
-
- regs = mfd->regs[index];
- if (!regs) {
- dev_warn(&pdev->dev, ": system ctl not initialized\n");
- return 0;
- }
- spin_lock_irqsave(&mfd->lock[index], flags);
- r = readl(regs + reg);
- r &= ~mask;
- r |= val;
- if (mask)
- writel(r, regs + reg);
- spin_unlock_irqrestore(&mfd->lock[index], flags);
- return r;
-}
-EXPORT_SYMBOL(__sta2x11_mfd_mask);
-
-int sta2x11_mfd_get_regs_data(struct platform_device *dev,
- enum sta2x11_mfd_plat_dev index,
- void __iomem **regs,
- spinlock_t **lock)
-{
- struct pci_dev *pdev = *(struct pci_dev **)dev_get_platdata(&dev->dev);
- struct sta2x11_mfd *mfd;
-
- if (!pdev)
- return -ENODEV;
- mfd = sta2x11_mfd_find(pdev);
- if (!mfd)
- return -ENODEV;
- if (index >= sta2x11_n_mfd_plat_devs)
- return -ENODEV;
- *regs = mfd->regs[index];
- *lock = &mfd->lock[index];
- pr_debug("%s %d *regs = %p\n", __func__, __LINE__, *regs);
- return *regs ? 0 : -ENODEV;
-}
-EXPORT_SYMBOL(sta2x11_mfd_get_regs_data);
-
-/*
- * Special sta2x11-mfd regmap lock/unlock functions
- */
-
-static void sta2x11_regmap_lock(void *__lock)
-{
- spinlock_t *lock = __lock;
- spin_lock(lock);
-}
-
-static void sta2x11_regmap_unlock(void *__lock)
-{
- spinlock_t *lock = __lock;
- spin_unlock(lock);
-}
-
-/* OTP (one time programmable registers do not require locking */
-static void sta2x11_regmap_nolock(void *__lock)
-{
-}
-
-static const char *sta2x11_mfd_names[sta2x11_n_mfd_plat_devs] = {
- [sta2x11_sctl] = STA2X11_MFD_SCTL_NAME,
- [sta2x11_apbreg] = STA2X11_MFD_APBREG_NAME,
- [sta2x11_apb_soc_regs] = STA2X11_MFD_APB_SOC_REGS_NAME,
- [sta2x11_scr] = STA2X11_MFD_SCR_NAME,
-};
-
-static bool sta2x11_sctl_writeable_reg(struct device *dev, unsigned int reg)
-{
- return !__reg_within_range(reg, SCTL_SCPCIECSBRST, SCTL_SCRSTSTA);
-}
-
-static struct regmap_config sta2x11_sctl_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .lock = sta2x11_regmap_lock,
- .unlock = sta2x11_regmap_unlock,
- .max_register = SCTL_SCRSTSTA,
- .writeable_reg = sta2x11_sctl_writeable_reg,
-};
-
-static bool sta2x11_scr_readable_reg(struct device *dev, unsigned int reg)
-{
- return (reg == STA2X11_SECR_CR) ||
- __reg_within_range(reg, STA2X11_SECR_FVR0, STA2X11_SECR_FVR1);
-}
-
-static bool sta2x11_scr_writeable_reg(struct device *dev, unsigned int reg)
-{
- return false;
-}
-
-static struct regmap_config sta2x11_scr_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .lock = sta2x11_regmap_nolock,
- .unlock = sta2x11_regmap_nolock,
- .max_register = STA2X11_SECR_FVR1,
- .readable_reg = sta2x11_scr_readable_reg,
- .writeable_reg = sta2x11_scr_writeable_reg,
-};
-
-static bool sta2x11_apbreg_readable_reg(struct device *dev, unsigned int reg)
-{
- /* Two blocks (CAN and MLB, SARAC) 0x100 bytes apart */
- if (reg >= APBREG_BSR_SARAC)
- reg -= APBREG_BSR_SARAC;
- switch (reg) {
- case APBREG_BSR:
- case APBREG_PAER:
- case APBREG_PWAC:
- case APBREG_PRAC:
- case APBREG_PCG:
- case APBREG_PUR:
- case APBREG_EMU_PCG:
- return true;
- default:
- return false;
- }
-}
-
-static bool sta2x11_apbreg_writeable_reg(struct device *dev, unsigned int reg)
-{
- if (reg >= APBREG_BSR_SARAC)
- reg -= APBREG_BSR_SARAC;
- if (!sta2x11_apbreg_readable_reg(dev, reg))
- return false;
- return reg != APBREG_PAER;
-}
-
-static struct regmap_config sta2x11_apbreg_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .lock = sta2x11_regmap_lock,
- .unlock = sta2x11_regmap_unlock,
- .max_register = APBREG_EMU_PCG_SARAC,
- .readable_reg = sta2x11_apbreg_readable_reg,
- .writeable_reg = sta2x11_apbreg_writeable_reg,
-};
-
-static bool sta2x11_apb_soc_regs_readable_reg(struct device *dev,
- unsigned int reg)
-{
- return reg <= PCIE_SoC_INT_ROUTER_STATUS3_REG ||
- __reg_within_range(reg, DMA_IP_CTRL_REG, SPARE3_RESERVED) ||
- __reg_within_range(reg, MASTER_LOCK_REG,
- SYSTEM_CONFIG_STATUS_REG) ||
- reg == MSP_CLK_CTRL_REG ||
- __reg_within_range(reg, COMPENSATION_REG1, TEST_CTL_REG);
-}
-
-static bool sta2x11_apb_soc_regs_writeable_reg(struct device *dev,
- unsigned int reg)
-{
- if (!sta2x11_apb_soc_regs_readable_reg(dev, reg))
- return false;
- switch (reg) {
- case PCIE_COMMON_CLOCK_CONFIG_0_4_0:
- case SYSTEM_CONFIG_STATUS_REG:
- case COMPENSATION_REG1:
- case PCIE_SoC_INT_ROUTER_STATUS0_REG...PCIE_SoC_INT_ROUTER_STATUS3_REG:
- case PCIE_PM_STATUS_0_PORT_0_4...PCIE_PM_STATUS_7_0_EP4:
- return false;
- default:
- return true;
- }
-}
-
-static struct regmap_config sta2x11_apb_soc_regs_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .lock = sta2x11_regmap_lock,
- .unlock = sta2x11_regmap_unlock,
- .max_register = TEST_CTL_REG,
- .readable_reg = sta2x11_apb_soc_regs_readable_reg,
- .writeable_reg = sta2x11_apb_soc_regs_writeable_reg,
-};
-
-static struct regmap_config *
-sta2x11_mfd_regmap_configs[sta2x11_n_mfd_plat_devs] = {
- [sta2x11_sctl] = &sta2x11_sctl_regmap_config,
- [sta2x11_apbreg] = &sta2x11_apbreg_regmap_config,
- [sta2x11_apb_soc_regs] = &sta2x11_apb_soc_regs_regmap_config,
- [sta2x11_scr] = &sta2x11_scr_regmap_config,
-};
-
-/* Probe for the four platform devices */
-
-static int sta2x11_mfd_platform_probe(struct platform_device *dev,
- enum sta2x11_mfd_plat_dev index)
-{
- struct pci_dev **pdev;
- struct sta2x11_mfd *mfd;
- struct resource *res;
- const char *name = sta2x11_mfd_names[index];
- struct regmap_config *regmap_config = sta2x11_mfd_regmap_configs[index];
-
- pdev = dev_get_platdata(&dev->dev);
- mfd = sta2x11_mfd_find(*pdev);
- if (!mfd)
- return -ENODEV;
- if (!regmap_config)
- return -ENODEV;
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOMEM;
-
- if (!request_mem_region(res->start, resource_size(res), name))
- return -EBUSY;
-
- mfd->regs[index] = ioremap(res->start, resource_size(res));
- if (!mfd->regs[index]) {
- release_mem_region(res->start, resource_size(res));
- return -ENOMEM;
- }
- regmap_config->lock_arg = &mfd->lock;
- /*
- No caching, registers could be reached both via regmap and via
- void __iomem *
- */
- regmap_config->cache_type = REGCACHE_NONE;
- mfd->regmap[index] = devm_regmap_init_mmio(&dev->dev, mfd->regs[index],
- regmap_config);
- WARN_ON(IS_ERR(mfd->regmap[index]));
-
- return 0;
-}
-
-static int sta2x11_sctl_probe(struct platform_device *dev)
-{
- return sta2x11_mfd_platform_probe(dev, sta2x11_sctl);
-}
-
-static int sta2x11_apbreg_probe(struct platform_device *dev)
-{
- return sta2x11_mfd_platform_probe(dev, sta2x11_apbreg);
-}
-
-static int sta2x11_apb_soc_regs_probe(struct platform_device *dev)
-{
- return sta2x11_mfd_platform_probe(dev, sta2x11_apb_soc_regs);
-}
-
-static int sta2x11_scr_probe(struct platform_device *dev)
-{
- return sta2x11_mfd_platform_probe(dev, sta2x11_scr);
-}
-
-/* The three platform drivers */
-static struct platform_driver sta2x11_sctl_platform_driver = {
- .driver = {
- .name = STA2X11_MFD_SCTL_NAME,
- },
- .probe = sta2x11_sctl_probe,
-};
-
-static struct platform_driver sta2x11_platform_driver = {
- .driver = {
- .name = STA2X11_MFD_APBREG_NAME,
- },
- .probe = sta2x11_apbreg_probe,
-};
-
-static struct platform_driver sta2x11_apb_soc_regs_platform_driver = {
- .driver = {
- .name = STA2X11_MFD_APB_SOC_REGS_NAME,
- },
- .probe = sta2x11_apb_soc_regs_probe,
-};
-
-static struct platform_driver sta2x11_scr_platform_driver = {
- .driver = {
- .name = STA2X11_MFD_SCR_NAME,
- },
- .probe = sta2x11_scr_probe,
-};
-
-static struct platform_driver * const drivers[] = {
- &sta2x11_platform_driver,
- &sta2x11_sctl_platform_driver,
- &sta2x11_apb_soc_regs_platform_driver,
- &sta2x11_scr_platform_driver,
-};
-
-static int __init sta2x11_drivers_init(void)
-{
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-/*
- * What follows are the PCI devices that host the above pdevs.
- * Each logic block is 4kB and they are all consecutive: we use this info.
- */
-
-/* Mfd 0 device */
-
-/* Mfd 0, Bar 0 */
-enum mfd0_bar0_cells {
- STA2X11_GPIO_0 = 0,
- STA2X11_GPIO_1,
- STA2X11_GPIO_2,
- STA2X11_GPIO_3,
- STA2X11_SCTL,
- STA2X11_SCR,
- STA2X11_TIME,
-};
-/* Mfd 0 , Bar 1 */
-enum mfd0_bar1_cells {
- STA2X11_APBREG = 0,
-};
-#define CELL_4K(_name, _cell) { \
- .name = _name, \
- .start = _cell * 4096, .end = _cell * 4096 + 4095, \
- .flags = IORESOURCE_MEM, \
- }
-
-static const struct resource gpio_resources[] = {
- {
- /* 4 consecutive cells, 1 driver */
- .name = STA2X11_MFD_GPIO_NAME,
- .start = 0,
- .end = (4 * 4096) - 1,
- .flags = IORESOURCE_MEM,
- }
-};
-static const struct resource sctl_resources[] = {
- CELL_4K(STA2X11_MFD_SCTL_NAME, STA2X11_SCTL),
-};
-static const struct resource scr_resources[] = {
- CELL_4K(STA2X11_MFD_SCR_NAME, STA2X11_SCR),
-};
-static const struct resource time_resources[] = {
- CELL_4K(STA2X11_MFD_TIME_NAME, STA2X11_TIME),
-};
-
-static const struct resource apbreg_resources[] = {
- CELL_4K(STA2X11_MFD_APBREG_NAME, STA2X11_APBREG),
-};
-
-#define DEV(_name, _r) \
- { .name = _name, .num_resources = ARRAY_SIZE(_r), .resources = _r, }
-
-static struct mfd_cell sta2x11_mfd0_bar0[] = {
- /* offset 0: we add pdata later */
- DEV(STA2X11_MFD_GPIO_NAME, gpio_resources),
- DEV(STA2X11_MFD_SCTL_NAME, sctl_resources),
- DEV(STA2X11_MFD_SCR_NAME, scr_resources),
- DEV(STA2X11_MFD_TIME_NAME, time_resources),
-};
-
-static struct mfd_cell sta2x11_mfd0_bar1[] = {
- DEV(STA2X11_MFD_APBREG_NAME, apbreg_resources),
-};
-
-/* Mfd 1 devices */
-
-/* Mfd 1, Bar 0 */
-enum mfd1_bar0_cells {
- STA2X11_VIC = 0,
-};
-
-/* Mfd 1, Bar 1 */
-enum mfd1_bar1_cells {
- STA2X11_APB_SOC_REGS = 0,
-};
-
-static const struct resource vic_resources[] = {
- CELL_4K(STA2X11_MFD_VIC_NAME, STA2X11_VIC),
-};
-
-static const struct resource apb_soc_regs_resources[] = {
- CELL_4K(STA2X11_MFD_APB_SOC_REGS_NAME, STA2X11_APB_SOC_REGS),
-};
-
-static struct mfd_cell sta2x11_mfd1_bar0[] = {
- DEV(STA2X11_MFD_VIC_NAME, vic_resources),
-};
-
-static struct mfd_cell sta2x11_mfd1_bar1[] = {
- DEV(STA2X11_MFD_APB_SOC_REGS_NAME, apb_soc_regs_resources),
-};
-
-
-static int sta2x11_mfd_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-static int sta2x11_mfd_resume(struct pci_dev *pdev)
-{
- int err;
-
- pci_set_power_state(pdev, PCI_D0);
- err = pci_enable_device(pdev);
- if (err)
- return err;
- pci_restore_state(pdev);
-
- return 0;
-}
-
-struct sta2x11_mfd_bar_setup_data {
- struct mfd_cell *cells;
- int ncells;
-};
-
-struct sta2x11_mfd_setup_data {
- struct sta2x11_mfd_bar_setup_data bars[2];
-};
-
-#define STA2X11_MFD0 0
-#define STA2X11_MFD1 1
-
-static struct sta2x11_mfd_setup_data mfd_setup_data[] = {
- /* Mfd 0: gpio, sctl, scr, timers / apbregs */
- [STA2X11_MFD0] = {
- .bars = {
- [0] = {
- .cells = sta2x11_mfd0_bar0,
- .ncells = ARRAY_SIZE(sta2x11_mfd0_bar0),
- },
- [1] = {
- .cells = sta2x11_mfd0_bar1,
- .ncells = ARRAY_SIZE(sta2x11_mfd0_bar1),
- },
- },
- },
- /* Mfd 1: vic / apb-soc-regs */
- [STA2X11_MFD1] = {
- .bars = {
- [0] = {
- .cells = sta2x11_mfd1_bar0,
- .ncells = ARRAY_SIZE(sta2x11_mfd1_bar0),
- },
- [1] = {
- .cells = sta2x11_mfd1_bar1,
- .ncells = ARRAY_SIZE(sta2x11_mfd1_bar1),
- },
- },
- },
-};
-
-static void sta2x11_mfd_setup(struct pci_dev *pdev,
- struct sta2x11_mfd_setup_data *sd)
-{
- int i, j;
- for (i = 0; i < ARRAY_SIZE(sd->bars); i++)
- for (j = 0; j < sd->bars[i].ncells; j++) {
- sd->bars[i].cells[j].pdata_size = sizeof(pdev);
- sd->bars[i].cells[j].platform_data = &pdev;
- }
-}
-
-static int sta2x11_mfd_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_id)
-{
- int err, i;
- struct sta2x11_mfd_setup_data *setup_data;
-
- dev_info(&pdev->dev, "%s\n", __func__);
-
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "Can't enable device.\n");
- return err;
- }
-
- err = pci_enable_msi(pdev);
- if (err)
- dev_info(&pdev->dev, "Enable msi failed\n");
-
- setup_data = pci_id->device == PCI_DEVICE_ID_STMICRO_GPIO ?
- &mfd_setup_data[STA2X11_MFD0] :
- &mfd_setup_data[STA2X11_MFD1];
-
- /* platform data is the pci device for all of them */
- sta2x11_mfd_setup(pdev, setup_data);
-
- /* Record this pdev before mfd_add_devices: their probe looks for it */
- if (!sta2x11_mfd_find(pdev))
- sta2x11_mfd_add(pdev, GFP_KERNEL);
-
- /* Just 2 bars for all mfd's at present */
- for (i = 0; i < 2; i++) {
- err = mfd_add_devices(&pdev->dev, -1,
- setup_data->bars[i].cells,
- setup_data->bars[i].ncells,
- &pdev->resource[i],
- 0, NULL);
- if (err) {
- dev_err(&pdev->dev,
- "mfd_add_devices[%d] failed: %d\n", i, err);
- goto err_disable;
- }
- }
-
- return 0;
-
-err_disable:
- mfd_remove_devices(&pdev->dev);
- pci_disable_device(pdev);
- pci_disable_msi(pdev);
- return err;
-}
-
-static const struct pci_device_id sta2x11_mfd_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_GPIO)},
- {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIC)},
- {0,},
-};
-
-static struct pci_driver sta2x11_mfd_driver = {
- .name = "sta2x11-mfd",
- .id_table = sta2x11_mfd_tbl,
- .probe = sta2x11_mfd_probe,
- .suspend = sta2x11_mfd_suspend,
- .resume = sta2x11_mfd_resume,
-};
-
-static int __init sta2x11_mfd_init(void)
-{
- pr_info("%s\n", __func__);
- return pci_register_driver(&sta2x11_mfd_driver);
-}
-
-/*
- * All of this must be ready before "normal" devices like MMCI appear.
- * But MFD (the pci device) can't be too early. The following choice
- * prepares platform drivers very early and probe the PCI device later,
- * but before other PCI devices.
- */
-subsys_initcall(sta2x11_drivers_init);
-rootfs_initcall(sta2x11_mfd_init);
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index 650724e19b88..e3c116ee4034 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/reset.h>
#define STM32_TIMERS_MAX_REGISTERS 0x3fc
@@ -173,6 +174,31 @@ static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
regmap_write(ddata->regmap, TIM_ARR, arr);
}
+static int stm32_timers_probe_hwcfgr(struct device *dev, struct stm32_timers *ddata)
+{
+ u32 val;
+
+ ddata->ipidr = (uintptr_t)device_get_match_data(dev);
+ if (!ddata->ipidr) {
+ /* Fallback to legacy method for probing counter width */
+ stm32_timers_get_arr_size(ddata);
+ return 0;
+ }
+
+ regmap_read(ddata->regmap, TIM_IPIDR, &val);
+ if (val != ddata->ipidr) {
+ dev_err(dev, "Unsupported device detected: %u\n", val);
+ return -EINVAL;
+ }
+
+ regmap_read(ddata->regmap, TIM_HWCFGR2, &val);
+
+ /* Counter width in bits, max reload value is BIT(width) - 1 */
+ ddata->max_arr = BIT(FIELD_GET(TIM_HWCFGR2_CNT_WIDTH, val)) - 1;
+
+ return 0;
+}
+
static int stm32_timers_dma_probe(struct device *dev,
struct stm32_timers *ddata)
{
@@ -285,7 +311,9 @@ static int stm32_timers_probe(struct platform_device *pdev)
if (IS_ERR(ddata->clk))
return PTR_ERR(ddata->clk);
- stm32_timers_get_arr_size(ddata);
+ ret = stm32_timers_probe_hwcfgr(dev, ddata);
+ if (ret)
+ return ret;
ret = stm32_timers_irq_probe(pdev, ddata);
if (ret)
@@ -320,6 +348,7 @@ static void stm32_timers_remove(struct platform_device *pdev)
static const struct of_device_id stm32_timers_of_match[] = {
{ .compatible = "st,stm32-timers", },
+ { .compatible = "st,stm32mp25-timers", .data = (void *)STM32MP25_TIM_IPIDR },
{ /* end node */ },
};
MODULE_DEVICE_TABLE(of, stm32_timers_of_match);
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index aa4a9940b569..ae71a2710bed 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -47,6 +47,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
struct regmap_config syscon_config = syscon_regmap_config;
struct resource res;
struct reset_control *reset;
+ resource_size_t res_size;
WARN_ON(!mutex_is_locked(&syscon_list_lock));
@@ -96,6 +97,12 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
}
}
+ res_size = resource_size(&res);
+ if (res_size < reg_io_width) {
+ ret = -EFAULT;
+ goto err_regmap;
+ }
+
syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%pa", np, &res.start);
if (!syscon_config.name) {
ret = -ENOMEM;
@@ -103,7 +110,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
}
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
- syscon_config.max_register = resource_size(&res) - reg_io_width;
+ syscon_config.max_register = res_size - reg_io_width;
if (!syscon_config.max_register)
syscon_config.max_register_is_0 = true;
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 710364435b6b..00fb12c4f491 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -16,6 +16,7 @@
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
@@ -250,7 +251,7 @@ static int dbg_show(struct seq_file *s, void *_)
v2 = i2c_smbus_read_byte_data(tps->client, TPS_LED1_PER);
seq_printf(s, "led1 %s, on=%02x, per=%02x, %d/%d msec\n",
(value & 0x80)
- ? ((v2 & 0x80) ? "on" : "off")
+ ? str_on_off(v2 & 0x80)
: ((v2 & 0x80) ? "blink" : "(nPG)"),
value, v2,
(value & 0x7f) * 10, (v2 & 0x7f) * 100);
@@ -259,7 +260,7 @@ static int dbg_show(struct seq_file *s, void *_)
v2 = i2c_smbus_read_byte_data(tps->client, TPS_LED2_PER);
seq_printf(s, "led2 %s, on=%02x, per=%02x, %d/%d msec\n",
(value & 0x80)
- ? ((v2 & 0x80) ? "on" : "off")
+ ? str_on_off(v2 & 0x80)
: ((v2 & 0x80) ? "blink" : "off"),
value, v2,
(value & 0x7f) * 10, (v2 & 0x7f) * 100);
@@ -738,7 +739,7 @@ int tps65010_set_gpio_out_value(unsigned gpio, unsigned value)
TPS_DEFGPIO, defgpio);
pr_debug("%s: gpio%dout = %s, defgpio 0x%02x\n", DRIVER_NAME,
- gpio, value ? "high" : "low",
+ gpio, str_high_low(value),
i2c_smbus_read_byte_data(the_tps->client, TPS_DEFGPIO));
mutex_unlock(&the_tps->lock);
@@ -850,7 +851,7 @@ int tps65010_set_vib(unsigned value)
status = i2c_smbus_write_byte_data(the_tps->client,
TPS_VDCDC2, vdcdc2);
- pr_debug("%s: vibrator %s\n", DRIVER_NAME, value ? "on" : "off");
+ pr_debug("%s: vibrator %s\n", DRIVER_NAME, str_on_off(value));
mutex_unlock(&the_tps->lock);
return status;
@@ -872,7 +873,7 @@ int tps65010_set_low_pwr(unsigned mode)
mutex_lock(&the_tps->lock);
pr_debug("%s: %s low_pwr, vdcdc1 0x%02x\n", DRIVER_NAME,
- mode ? "enable" : "disable",
+ str_enable_disable(mode),
i2c_smbus_read_byte_data(the_tps->client, TPS_VDCDC1));
vdcdc1 = i2c_smbus_read_byte_data(the_tps->client, TPS_VDCDC1);
@@ -984,7 +985,7 @@ int tps65013_set_low_pwr(unsigned mode)
pr_debug("%s: %s low_pwr, chgconfig 0x%02x vdcdc1 0x%02x\n",
DRIVER_NAME,
- mode ? "enable" : "disable",
+ str_enable_disable(mode),
i2c_smbus_read_byte_data(the_tps->client, TPS_CHGCONFIG),
i2c_smbus_read_byte_data(the_tps->client, TPS_VDCDC1));
diff --git a/drivers/mfd/tps65219.c b/drivers/mfd/tps65219.c
index 081c5a30b04a..fd390600fbf0 100644
--- a/drivers/mfd/tps65219.c
+++ b/drivers/mfd/tps65219.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
//
-// Driver for TPS65219 Integrated Power Management Integrated Chips (PMIC)
+// Driver for TPS65214/TPS65215/TPS65219 Power Management Integrated Chips
//
// Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/
+// Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/
#include <linux/i2c.h>
#include <linux/reboot.h>
@@ -59,6 +60,84 @@ static const struct resource tps65219_pwrbutton_resources[] = {
DEFINE_RES_IRQ_NAMED(TPS65219_INT_PB_RISING_EDGE_DETECT, "rising"),
};
+static const struct resource tps65214_regulator_resources[] = {
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO1_SCG, "LDO1_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO1_OC, "LDO1_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO1_UV, "LDO1_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO2_SCG, "LDO2_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO2_OC, "LDO2_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO2_UV, "LDO2_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_SCG, "BUCK3_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_OC, "BUCK3_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_NEG_OC, "BUCK3_NEG_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_UV, "BUCK3_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_SCG, "BUCK1_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_OC, "BUCK1_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_NEG_OC, "BUCK1_NEG_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_UV, "BUCK1_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_SCG, "BUCK2_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_OC, "BUCK2_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_NEG_OC, "BUCK2_NEG_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_UV, "BUCK2_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_RV, "BUCK1_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_RV, "BUCK2_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_RV, "BUCK3_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO1_RV, "LDO1_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65214_INT_LDO2_RV, "LDO2_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_RV_SD, "BUCK1_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_RV_SD, "BUCK2_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_RV_SD, "BUCK3_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65214_INT_LDO1_RV_SD, "LDO1_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO2_RV_SD, "LDO2_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_TIMEOUT, "TIMEOUT"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_2_WARM, "SENSOR_2_WARM"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_1_WARM, "SENSOR_1_WARM"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_0_WARM, "SENSOR_0_WARM"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_2_HOT, "SENSOR_2_HOT"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_1_HOT, "SENSOR_1_HOT"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_0_HOT, "SENSOR_0_HOT"),
+};
+
+static const struct resource tps65215_regulator_resources[] = {
+ DEFINE_RES_IRQ_NAMED(TPS65215_INT_LDO1_SCG, "LDO1_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65215_INT_LDO1_OC, "LDO1_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65215_INT_LDO1_UV, "LDO1_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65215_INT_LDO2_SCG, "LDO2_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65215_INT_LDO2_OC, "LDO2_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65215_INT_LDO2_UV, "LDO2_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_SCG, "BUCK3_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_OC, "BUCK3_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_NEG_OC, "BUCK3_NEG_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_UV, "BUCK3_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_SCG, "BUCK1_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_OC, "BUCK1_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_NEG_OC, "BUCK1_NEG_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_UV, "BUCK1_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_SCG, "BUCK2_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_OC, "BUCK2_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_NEG_OC, "BUCK2_NEG_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_UV, "BUCK2_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_RV, "BUCK1_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_RV, "BUCK2_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_RV, "BUCK3_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO1_RV, "LDO1_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65215_INT_LDO2_RV, "LDO2_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_RV_SD, "BUCK1_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_RV_SD, "BUCK2_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_RV_SD, "BUCK3_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO1_RV_SD, "LDO1_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65215_INT_LDO2_RV_SD, "LDO2_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_TIMEOUT, "TIMEOUT"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_3_WARM, "SENSOR_3_WARM"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_2_WARM, "SENSOR_2_WARM"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_1_WARM, "SENSOR_1_WARM"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_0_WARM, "SENSOR_0_WARM"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_3_HOT, "SENSOR_3_HOT"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_2_HOT, "SENSOR_2_HOT"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_1_HOT, "SENSOR_1_HOT"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_0_HOT, "SENSOR_0_HOT"),
+};
+
static const struct resource tps65219_regulator_resources[] = {
DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO3_SCG, "LDO3_SCG"),
DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO3_OC, "LDO3_OC"),
@@ -109,6 +188,16 @@ static const struct resource tps65219_regulator_resources[] = {
DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_0_HOT, "SENSOR_0_HOT"),
};
+static const struct mfd_cell tps65214_cells[] = {
+ MFD_CELL_RES("tps65214-regulator", tps65214_regulator_resources),
+ MFD_CELL_NAME("tps65215-gpio"),
+};
+
+static const struct mfd_cell tps65215_cells[] = {
+ MFD_CELL_RES("tps65215-regulator", tps65215_regulator_resources),
+ MFD_CELL_NAME("tps65215-gpio"),
+};
+
static const struct mfd_cell tps65219_cells[] = {
MFD_CELL_RES("tps65219-regulator", tps65219_regulator_resources),
MFD_CELL_NAME("tps65219-gpio"),
@@ -136,8 +225,19 @@ static unsigned int bit3_offsets[] = { TPS65219_REG_INT_BUCK_1_2_POS }; /* Buck
static unsigned int bit4_offsets[] = { TPS65219_REG_INT_BUCK_3_POS }; /* Buck 3 */
static unsigned int bit5_offsets[] = { TPS65219_REG_INT_LDO_1_2_POS }; /* LDO 1-2 */
static unsigned int bit6_offsets[] = { TPS65219_REG_INT_LDO_3_4_POS }; /* LDO 3-4 */
+static unsigned int tps65215_bit5_offsets[] = { TPS65215_REG_INT_LDO_1_POS };
+static unsigned int tps65215_bit6_offsets[] = { TPS65215_REG_INT_LDO_2_POS };
static unsigned int bit7_offsets[] = { TPS65219_REG_INT_PB_POS }; /* Power Button */
+/* TPS65214 INT_SOURCE bit 6 is 'RESERVED'*/
+static unsigned int tps65214_bit0_offsets[] = { TPS65214_REG_INT_TO_RV_POS };
+static unsigned int tps65214_bit1_offsets[] = { TPS65214_REG_INT_RV_POS };
+static unsigned int tps65214_bit2_offsets[] = { TPS65214_REG_INT_SYS_POS };
+static unsigned int tps65214_bit3_offsets[] = { TPS65214_REG_INT_BUCK_1_2_POS };
+static unsigned int tps65214_bit4_offsets[] = { TPS65214_REG_INT_BUCK_3_POS };
+static unsigned int tps65214_bit5_offsets[] = { TPS65214_REG_INT_LDO_1_2_POS };
+static unsigned int tps65214_bit7_offsets[] = { TPS65214_REG_INT_PB_POS };
+
static struct regmap_irq_sub_irq_map tps65219_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
@@ -149,9 +249,112 @@ static struct regmap_irq_sub_irq_map tps65219_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit7_offsets),
};
+static struct regmap_irq_sub_irq_map tps65215_sub_irq_offsets[] = {
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit3_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit4_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(tps65215_bit5_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(tps65215_bit6_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit7_offsets),
+};
+
+static struct regmap_irq_sub_irq_map tps65214_sub_irq_offsets[] = {
+ REGMAP_IRQ_MAIN_REG_OFFSET(tps65214_bit0_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(tps65214_bit1_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(tps65214_bit2_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(tps65214_bit3_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(tps65214_bit4_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(tps65214_bit5_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(tps65214_bit7_offsets),
+};
+
#define TPS65219_REGMAP_IRQ_REG(int_name, register_position) \
REGMAP_IRQ_REG(int_name, register_position, int_name##_MASK)
+static const struct regmap_irq tps65214_irqs[] = {
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO1_SCG, TPS65214_REG_INT_LDO_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO1_OC, TPS65214_REG_INT_LDO_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO1_UV, TPS65214_REG_INT_LDO_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO2_SCG, TPS65214_REG_INT_LDO_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO2_OC, TPS65214_REG_INT_LDO_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO2_UV, TPS65214_REG_INT_LDO_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK3_SCG, TPS65214_REG_INT_BUCK_3_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK3_OC, TPS65214_REG_INT_BUCK_3_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK3_NEG_OC, TPS65214_REG_INT_BUCK_3_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK3_UV, TPS65214_REG_INT_BUCK_3_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK2_SCG, TPS65214_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK2_OC, TPS65214_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK2_NEG_OC, TPS65214_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK2_UV, TPS65214_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK1_SCG, TPS65214_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK1_OC, TPS65214_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK1_NEG_OC, TPS65214_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK1_UV, TPS65214_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_2_WARM, TPS65214_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_1_WARM, TPS65214_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_0_WARM, TPS65214_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_2_HOT, TPS65214_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_1_HOT, TPS65214_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_0_HOT, TPS65214_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK1_RV, TPS65214_REG_INT_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK2_RV, TPS65214_REG_INT_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK3_RV, TPS65214_REG_INT_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO1_RV, TPS65214_REG_INT_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65214_INT_LDO2_RV, TPS65214_REG_INT_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK1_RV_SD, TPS65214_REG_INT_TO_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK2_RV_SD, TPS65214_REG_INT_TO_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK3_RV_SD, TPS65214_REG_INT_TO_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65214_INT_LDO1_RV_SD, TPS65214_REG_INT_TO_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO2_RV_SD, TPS65214_REG_INT_TO_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_TIMEOUT, TPS65214_REG_INT_TO_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_PB_FALLING_EDGE_DETECT, TPS65214_REG_INT_PB_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_PB_RISING_EDGE_DETECT, TPS65214_REG_INT_PB_POS),
+};
+
+static const struct regmap_irq tps65215_irqs[] = {
+ TPS65219_REGMAP_IRQ_REG(TPS65215_INT_LDO1_SCG, TPS65215_REG_INT_LDO_1_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65215_INT_LDO1_OC, TPS65215_REG_INT_LDO_1_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65215_INT_LDO1_UV, TPS65215_REG_INT_LDO_1_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65215_INT_LDO2_SCG, TPS65215_REG_INT_LDO_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65215_INT_LDO2_OC, TPS65215_REG_INT_LDO_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65215_INT_LDO2_UV, TPS65215_REG_INT_LDO_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK3_SCG, TPS65219_REG_INT_BUCK_3_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK3_OC, TPS65219_REG_INT_BUCK_3_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK3_NEG_OC, TPS65219_REG_INT_BUCK_3_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK3_UV, TPS65219_REG_INT_BUCK_3_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK2_SCG, TPS65219_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK2_OC, TPS65219_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK2_NEG_OC, TPS65219_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK2_UV, TPS65219_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK1_SCG, TPS65219_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK1_OC, TPS65219_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK1_NEG_OC, TPS65219_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK1_UV, TPS65219_REG_INT_BUCK_1_2_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_3_WARM, TPS65219_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_2_WARM, TPS65219_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_1_WARM, TPS65219_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_0_WARM, TPS65219_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_3_HOT, TPS65219_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_2_HOT, TPS65219_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_1_HOT, TPS65219_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_SENSOR_0_HOT, TPS65219_REG_INT_SYS_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK1_RV, TPS65219_REG_INT_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK2_RV, TPS65219_REG_INT_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK3_RV, TPS65219_REG_INT_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO1_RV, TPS65219_REG_INT_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65215_INT_LDO2_RV, TPS65219_REG_INT_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK1_RV_SD, TPS65219_REG_INT_TO_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK2_RV_SD, TPS65219_REG_INT_TO_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_BUCK3_RV_SD, TPS65219_REG_INT_TO_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO1_RV_SD, TPS65219_REG_INT_TO_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65215_INT_LDO2_RV_SD, TPS65219_REG_INT_TO_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_TIMEOUT, TPS65219_REG_INT_TO_RV_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_PB_FALLING_EDGE_DETECT, TPS65219_REG_INT_PB_POS),
+ TPS65219_REGMAP_IRQ_REG(TPS65219_INT_PB_RISING_EDGE_DETECT, TPS65219_REG_INT_PB_POS),
+};
+
static const struct regmap_irq tps65219_irqs[] = {
TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO3_SCG, TPS65219_REG_INT_LDO_3_4_POS),
TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO3_OC, TPS65219_REG_INT_LDO_3_4_POS),
@@ -204,6 +407,34 @@ static const struct regmap_irq tps65219_irqs[] = {
TPS65219_REGMAP_IRQ_REG(TPS65219_INT_PB_RISING_EDGE_DETECT, TPS65219_REG_INT_PB_POS),
};
+static const struct regmap_irq_chip tps65214_irq_chip = {
+ .name = "tps65214_irq",
+ .main_status = TPS65219_REG_INT_SOURCE,
+ .num_main_regs = 1,
+ .num_main_status_bits = 8,
+ .irqs = tps65214_irqs,
+ .num_irqs = ARRAY_SIZE(tps65214_irqs),
+ .status_base = TPS65214_REG_INT_LDO_1_2,
+ .ack_base = TPS65214_REG_INT_LDO_1_2,
+ .clear_ack = 1,
+ .num_regs = 8,
+ .sub_reg_offsets = tps65214_sub_irq_offsets,
+};
+
+static const struct regmap_irq_chip tps65215_irq_chip = {
+ .name = "tps65215_irq",
+ .main_status = TPS65219_REG_INT_SOURCE,
+ .num_main_regs = 1,
+ .num_main_status_bits = 8,
+ .irqs = tps65215_irqs,
+ .num_irqs = ARRAY_SIZE(tps65215_irqs),
+ .status_base = TPS65215_REG_INT_LDO_2,
+ .ack_base = TPS65215_REG_INT_LDO_2,
+ .clear_ack = 1,
+ .num_regs = 8,
+ .sub_reg_offsets = tps65215_sub_irq_offsets,
+};
+
static const struct regmap_irq_chip tps65219_irq_chip = {
.name = "tps65219_irq",
.main_status = TPS65219_REG_INT_SOURCE,
@@ -218,10 +449,34 @@ static const struct regmap_irq_chip tps65219_irq_chip = {
.sub_reg_offsets = tps65219_sub_irq_offsets,
};
+struct tps65219_chip_data {
+ const struct regmap_irq_chip *irq_chip;
+ const struct mfd_cell *cells;
+ int n_cells;
+};
+
+static struct tps65219_chip_data chip_info_table[] = {
+ [TPS65214] = {
+ .irq_chip = &tps65214_irq_chip,
+ .cells = tps65214_cells,
+ .n_cells = ARRAY_SIZE(tps65214_cells),
+ },
+ [TPS65215] = {
+ .irq_chip = &tps65215_irq_chip,
+ .cells = tps65215_cells,
+ .n_cells = ARRAY_SIZE(tps65215_cells),
+ },
+ [TPS65219] = {
+ .irq_chip = &tps65219_irq_chip,
+ .cells = tps65219_cells,
+ .n_cells = ARRAY_SIZE(tps65219_cells),
+ },
+};
+
static int tps65219_probe(struct i2c_client *client)
{
struct tps65219 *tps;
- unsigned int chipid;
+ struct tps65219_chip_data *pmic;
bool pwr_button;
int ret;
@@ -232,6 +487,8 @@ static int tps65219_probe(struct i2c_client *client)
i2c_set_clientdata(client, tps);
tps->dev = &client->dev;
+ tps->chip_id = (uintptr_t)i2c_get_match_data(client);
+ pmic = &chip_info_table[tps->chip_id];
tps->regmap = devm_regmap_init_i2c(client, &tps65219_regmap_config);
if (IS_ERR(tps->regmap)) {
@@ -240,20 +497,14 @@ static int tps65219_probe(struct i2c_client *client)
return ret;
}
- ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, client->irq,
- IRQF_ONESHOT, 0, &tps65219_irq_chip,
+ ret = devm_regmap_add_irq_chip(tps->dev, tps->regmap, client->irq,
+ IRQF_ONESHOT, 0, pmic->irq_chip,
&tps->irq_data);
if (ret)
return ret;
- ret = regmap_read(tps->regmap, TPS65219_REG_TI_DEV_ID, &chipid);
- if (ret) {
- dev_err(tps->dev, "Failed to read device ID: %d\n", ret);
- return ret;
- }
-
ret = devm_mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO,
- tps65219_cells, ARRAY_SIZE(tps65219_cells),
+ pmic->cells, pmic->n_cells,
NULL, 0, regmap_irq_get_domain(tps->irq_data));
if (ret) {
dev_err(tps->dev, "Failed to add child devices: %d\n", ret);
@@ -291,7 +542,9 @@ static int tps65219_probe(struct i2c_client *client)
}
static const struct of_device_id of_tps65219_match_table[] = {
- { .compatible = "ti,tps65219", },
+ { .compatible = "ti,tps65214", .data = (void *)TPS65214, },
+ { .compatible = "ti,tps65215", .data = (void *)TPS65215, },
+ { .compatible = "ti,tps65219", .data = (void *)TPS65219, },
{}
};
MODULE_DEVICE_TABLE(of, of_tps65219_match_table);
@@ -306,5 +559,5 @@ static struct i2c_driver tps65219_driver = {
module_i2c_driver(tps65219_driver);
MODULE_AUTHOR("Jerome Neanne <jneanne@baylibre.com>");
-MODULE_DESCRIPTION("TPS65219 power management IC driver");
+MODULE_DESCRIPTION("TPS65214/TPS65215/TPS65219 PMIC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/upboard-fpga.c b/drivers/mfd/upboard-fpga.c
index 5a330e2f2229..afce623bbba5 100644
--- a/drivers/mfd/upboard-fpga.c
+++ b/drivers/mfd/upboard-fpga.c
@@ -11,7 +11,6 @@
* Author: Thomas Richard <thomas.richard@bootlin.com>
*/
-#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -311,7 +310,7 @@ MODULE_DEVICE_TABLE(acpi, upboard_fpga_acpi_match);
static struct platform_driver upboard_fpga_driver = {
.driver = {
.name = "upboard-fpga",
- .acpi_match_table = ACPI_PTR(upboard_fpga_acpi_match),
+ .acpi_match_table = upboard_fpga_acpi_match,
.dev_groups = upboard_fpga_groups,
},
.probe = upboard_fpga_probe,
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 67d9391f1855..7575fee96cc6 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -3,7 +3,7 @@
config INTEL_MEI
tristate "Intel Management Engine Interface"
depends on X86 && PCI
- default GENERIC_CPU || MCORE2 || MATOM || X86_GENERIC
+ default X86_64 || MATOM
help
The Intel Management Engine (Intel ME) provides Manageability,
Security and Media services for system containing Intel chipsets.
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index d5ac71a49386..d294850a35a1 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -28,11 +28,6 @@
#define DRV_MODULE_NAME "pci-endpoint-test"
-#define IRQ_TYPE_UNDEFINED -1
-#define IRQ_TYPE_INTX 0
-#define IRQ_TYPE_MSI 1
-#define IRQ_TYPE_MSIX 2
-
#define PCI_ENDPOINT_TEST_MAGIC 0x0
#define PCI_ENDPOINT_TEST_COMMAND 0x4
@@ -71,6 +66,9 @@
#define PCI_ENDPOINT_TEST_CAPS 0x30
#define CAP_UNALIGNED_ACCESS BIT(0)
+#define CAP_MSI BIT(1)
+#define CAP_MSIX BIT(2)
+#define CAP_INTX BIT(3)
#define PCI_DEVICE_ID_TI_AM654 0xb00c
#define PCI_DEVICE_ID_TI_J7200 0xb00f
@@ -88,7 +86,6 @@
#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
-#define PCI_VENDOR_ID_ROCKCHIP 0x1d87
#define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588
static DEFINE_IDA(pci_endpoint_test_ida);
@@ -96,14 +93,6 @@ static DEFINE_IDA(pci_endpoint_test_ida);
#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
miscdev)
-static bool no_msi;
-module_param(no_msi, bool, 0444);
-MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
-
-static int irq_type = IRQ_TYPE_MSI;
-module_param(irq_type, int, 0444);
-MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
-
enum pci_barno {
BAR_0,
BAR_1,
@@ -126,6 +115,7 @@ struct pci_endpoint_test {
struct miscdevice miscdev;
enum pci_barno test_reg_bar;
size_t alignment;
+ u32 ep_caps;
const char *name;
};
@@ -166,7 +156,7 @@ static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
struct pci_dev *pdev = test->pdev;
pci_free_irq_vectors(pdev);
- test->irq_type = IRQ_TYPE_UNDEFINED;
+ test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
}
static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
@@ -177,7 +167,7 @@ static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
struct device *dev = &pdev->dev;
switch (type) {
- case IRQ_TYPE_INTX:
+ case PCITEST_IRQ_TYPE_INTX:
irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
if (irq < 0) {
dev_err(dev, "Failed to get Legacy interrupt\n");
@@ -185,7 +175,7 @@ static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
}
break;
- case IRQ_TYPE_MSI:
+ case PCITEST_IRQ_TYPE_MSI:
irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
if (irq < 0) {
dev_err(dev, "Failed to get MSI interrupts\n");
@@ -193,7 +183,7 @@ static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
}
break;
- case IRQ_TYPE_MSIX:
+ case PCITEST_IRQ_TYPE_MSIX:
irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
if (irq < 0) {
dev_err(dev, "Failed to get MSI-X interrupts\n");
@@ -216,10 +206,9 @@ static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
{
int i;
struct pci_dev *pdev = test->pdev;
- struct device *dev = &pdev->dev;
for (i = 0; i < test->num_irqs; i++)
- devm_free_irq(dev, pci_irq_vector(pdev, i), test);
+ free_irq(pci_irq_vector(pdev, i), test);
test->num_irqs = 0;
}
@@ -232,9 +221,9 @@ static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
struct device *dev = &pdev->dev;
for (i = 0; i < test->num_irqs; i++) {
- ret = devm_request_irq(dev, pci_irq_vector(pdev, i),
- pci_endpoint_test_irqhandler,
- IRQF_SHARED, test->name, test);
+ ret = request_irq(pci_irq_vector(pdev, i),
+ pci_endpoint_test_irqhandler, IRQF_SHARED,
+ test->name, test);
if (ret)
goto fail;
}
@@ -242,23 +231,26 @@ static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
return 0;
fail:
- switch (irq_type) {
- case IRQ_TYPE_INTX:
+ switch (test->irq_type) {
+ case PCITEST_IRQ_TYPE_INTX:
dev_err(dev, "Failed to request IRQ %d for Legacy\n",
pci_irq_vector(pdev, i));
break;
- case IRQ_TYPE_MSI:
+ case PCITEST_IRQ_TYPE_MSI:
dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
pci_irq_vector(pdev, i),
i + 1);
break;
- case IRQ_TYPE_MSIX:
+ case PCITEST_IRQ_TYPE_MSIX:
dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
pci_irq_vector(pdev, i),
i + 1);
break;
}
+ test->num_irqs = i;
+ pci_endpoint_test_release_irq(test);
+
return ret;
}
@@ -272,9 +264,9 @@ static const u32 bar_test_pattern[] = {
};
static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
- enum pci_barno barno, int offset,
- void *write_buf, void *read_buf,
- int size)
+ enum pci_barno barno,
+ resource_size_t offset, void *write_buf,
+ void *read_buf, int size)
{
memset(write_buf, bar_test_pattern[barno], size);
memcpy_toio(test->bar[barno] + offset, write_buf, size);
@@ -287,16 +279,19 @@ static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
enum pci_barno barno)
{
- int j, bar_size, buf_size, iters;
+ resource_size_t bar_size, offset = 0;
void *write_buf __free(kfree) = NULL;
void *read_buf __free(kfree) = NULL;
struct pci_dev *pdev = test->pdev;
+ int buf_size;
+
+ bar_size = pci_resource_len(pdev, barno);
+ if (!bar_size)
+ return -ENODATA;
if (!test->bar[barno])
return -ENOMEM;
- bar_size = pci_resource_len(pdev, barno);
-
if (barno == test->test_reg_bar)
bar_size = 0x4;
@@ -314,11 +309,12 @@ static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
if (!read_buf)
return -ENOMEM;
- iters = bar_size / buf_size;
- for (j = 0; j < iters; j++)
- if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * j,
- write_buf, read_buf, buf_size))
+ while (offset < bar_size) {
+ if (pci_endpoint_test_bar_memcmp(test, barno, offset, write_buf,
+ read_buf, buf_size))
return -EIO;
+ offset += buf_size;
+ }
return 0;
}
@@ -382,7 +378,7 @@ static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test,
static int pci_endpoint_test_bars(struct pci_endpoint_test *test)
{
enum pci_barno bar;
- bool ret;
+ int ret;
/* Write all BARs in order (without reading). */
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
@@ -398,7 +394,7 @@ static int pci_endpoint_test_bars(struct pci_endpoint_test *test)
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (test->bar[bar]) {
ret = pci_endpoint_test_bars_read_bar(test, bar);
- if (!ret)
+ if (ret)
return ret;
}
}
@@ -411,7 +407,7 @@ static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
u32 val;
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
- IRQ_TYPE_INTX);
+ PCITEST_IRQ_TYPE_INTX);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
COMMAND_RAISE_INTX_IRQ);
@@ -431,7 +427,8 @@ static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
int ret;
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
- msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
+ msix ? PCITEST_IRQ_TYPE_MSIX :
+ PCITEST_IRQ_TYPE_MSI);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
msix ? COMMAND_RAISE_MSIX_IRQ :
@@ -507,7 +504,8 @@ static int pci_endpoint_test_copy(struct pci_endpoint_test *test,
if (use_dma)
flags |= FLAG_USE_DMA;
- if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
+ if (irq_type < PCITEST_IRQ_TYPE_INTX ||
+ irq_type > PCITEST_IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
return -EINVAL;
}
@@ -639,7 +637,8 @@ static int pci_endpoint_test_write(struct pci_endpoint_test *test,
if (use_dma)
flags |= FLAG_USE_DMA;
- if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
+ if (irq_type < PCITEST_IRQ_TYPE_INTX ||
+ irq_type > PCITEST_IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
return -EINVAL;
}
@@ -735,7 +734,8 @@ static int pci_endpoint_test_read(struct pci_endpoint_test *test,
if (use_dma)
flags |= FLAG_USE_DMA;
- if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
+ if (irq_type < PCITEST_IRQ_TYPE_INTX ||
+ irq_type > PCITEST_IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
return -EINVAL;
}
@@ -805,11 +805,24 @@ static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
struct device *dev = &pdev->dev;
int ret;
- if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
+ if (req_irq_type < PCITEST_IRQ_TYPE_INTX ||
+ req_irq_type > PCITEST_IRQ_TYPE_AUTO) {
dev_err(dev, "Invalid IRQ type option\n");
return -EINVAL;
}
+ if (req_irq_type == PCITEST_IRQ_TYPE_AUTO) {
+ if (test->ep_caps & CAP_MSI)
+ req_irq_type = PCITEST_IRQ_TYPE_MSI;
+ else if (test->ep_caps & CAP_MSIX)
+ req_irq_type = PCITEST_IRQ_TYPE_MSIX;
+ else if (test->ep_caps & CAP_INTX)
+ req_irq_type = PCITEST_IRQ_TYPE_INTX;
+ else
+ /* fallback to MSI if no caps defined */
+ req_irq_type = PCITEST_IRQ_TYPE_MSI;
+ }
+
if (test->irq_type == req_irq_type)
return 0;
@@ -874,7 +887,7 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
ret = pci_endpoint_test_set_irq(test, arg);
break;
case PCITEST_GET_IRQTYPE:
- ret = irq_type;
+ ret = test->irq_type;
break;
case PCITEST_CLEAR_IRQ:
ret = pci_endpoint_test_clear_irq(test);
@@ -895,13 +908,12 @@ static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test)
{
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
- u32 caps;
- caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
- dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", caps);
+ test->ep_caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
+ dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", test->ep_caps);
/* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */
- if (caps & CAP_UNALIGNED_ACCESS)
+ if (test->ep_caps & CAP_UNALIGNED_ACCESS)
test->alignment = 0;
}
@@ -910,7 +922,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
{
int ret;
int id;
- char name[24];
+ char name[29];
enum pci_barno bar;
void __iomem *base;
struct device *dev = &pdev->dev;
@@ -929,17 +941,14 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->test_reg_bar = 0;
test->alignment = 0;
test->pdev = pdev;
- test->irq_type = IRQ_TYPE_UNDEFINED;
-
- if (no_msi)
- irq_type = IRQ_TYPE_INTX;
+ test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
data = (struct pci_endpoint_test_data *)ent->driver_data;
if (data) {
test_reg_bar = data->test_reg_bar;
test->test_reg_bar = test_reg_bar;
test->alignment = data->alignment;
- irq_type = data->irq_type;
+ test->irq_type = data->irq_type;
}
init_completion(&test->irq_raised);
@@ -961,7 +970,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- ret = pci_endpoint_test_alloc_irq_vectors(test, irq_type);
+ ret = pci_endpoint_test_alloc_irq_vectors(test, test->irq_type);
if (ret)
goto err_disable_irq;
@@ -1083,23 +1092,23 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
static const struct pci_endpoint_test_data default_data = {
.test_reg_bar = BAR_0,
.alignment = SZ_4K,
- .irq_type = IRQ_TYPE_MSI,
+ .irq_type = PCITEST_IRQ_TYPE_MSI,
};
static const struct pci_endpoint_test_data am654_data = {
.test_reg_bar = BAR_2,
.alignment = SZ_64K,
- .irq_type = IRQ_TYPE_MSI,
+ .irq_type = PCITEST_IRQ_TYPE_MSI,
};
static const struct pci_endpoint_test_data j721e_data = {
.alignment = 256,
- .irq_type = IRQ_TYPE_MSI,
+ .irq_type = PCITEST_IRQ_TYPE_MSI,
};
static const struct pci_endpoint_test_data rk3588_data = {
.alignment = SZ_64K,
- .irq_type = IRQ_TYPE_MSI,
+ .irq_type = PCITEST_IRQ_TYPE_MSI,
};
/*
diff --git a/drivers/misc/vcpu_stall_detector.c b/drivers/misc/vcpu_stall_detector.c
index f0b1fc87490e..26166357b255 100644
--- a/drivers/misc/vcpu_stall_detector.c
+++ b/drivers/misc/vcpu_stall_detector.c
@@ -111,8 +111,7 @@ static int start_stall_detector_cpu(unsigned int cpu)
ping_timeout_ms = vcpu_stall_config.stall_timeout_sec *
MSEC_PER_SEC / 2;
- hrtimer_init(vcpu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- vcpu_hrtimer->function = vcpu_stall_detect_timer_fn;
+ hrtimer_setup(vcpu_hrtimer, vcpu_stall_detect_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu_stall_detector->is_initialized = true;
hrtimer_start(vcpu_hrtimer, ms_to_ktime(ping_timeout_ms),
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5241528f8b90..ce08e0ea7fc1 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -335,7 +335,7 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
- if (mrq->cmd && mrq->cmd->has_ext_addr)
+ if (mrq->cmd->has_ext_addr)
mmc_send_ext_addr(host, mrq->cmd->ext_addr);
init_completion(&mrq->cmd_completion);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 6a23be214543..1522fd2b517d 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/string.h>
#include <linux/pm_runtime.h>
#include <linux/random.h>
#include <linux/sysfs.h>
@@ -66,7 +67,7 @@ static int mmc_decode_cid(struct mmc_card *card)
/*
* The selection of the format here is based upon published
- * specs from sandisk and from what people have reported.
+ * specs from SanDisk and from what people have reported.
*/
switch (card->csd.mmca_vsn) {
case 0: /* MMC v1.0 - v1.2 */
@@ -109,6 +110,9 @@ static int mmc_decode_cid(struct mmc_card *card)
return -EINVAL;
}
+ /* some product names include trailing whitespace */
+ strim(card->cid.prod_name);
+
return 0;
}
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index 37cd858df0f4..4b47e6c3b04b 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -54,8 +54,7 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
else
bitmap_zero(values, nvalues);
- gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc,
- reset_gpios->info, values);
+ gpiod_multi_set_value_cansleep(reset_gpios, values);
bitmap_free(values);
}
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index ab662f502fe7..3ba62f825b84 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -523,5 +523,5 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
struct request *req = mmc_queue_req_to_req(mqrq);
- return blk_rq_map_sg(mq->queue, req, mqrq->sg);
+ return blk_rq_map_sg(req, mqrq->sg);
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index cc757b850e79..8eba697d3d86 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -11,6 +11,7 @@
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/string.h>
#include <linux/pm_runtime.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
@@ -95,6 +96,9 @@ void mmc_decode_cid(struct mmc_card *card)
card->cid.month = unstuff_bits(resp, 8, 4);
card->cid.year += 2000; /* SD cards year offset */
+
+ /* some product names may include trailing whitespace */
+ strim(card->cid.prod_name);
}
/*
diff --git a/drivers/mmc/core/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index 6b7471dba3bf..7423a601e1e5 100644
--- a/drivers/mmc/core/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -471,7 +471,7 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
port->icount.cts++;
tty = tty_port_tty_get(&port->port);
if (tty && C_CRTSCTS(tty)) {
- int cts = (status & UART_MSR_CTS);
+ bool cts = status & UART_MSR_CTS;
if (tty->hw_stopped) {
if (cts) {
tty->hw_stopped = false;
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 12247219e1c2..5fd455816393 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -159,18 +159,6 @@ int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on)
}
EXPORT_SYMBOL(mmc_gpio_set_cd_wake);
-/* Register an alternate interrupt service routine for
- * the card-detect GPIO.
- */
-void mmc_gpio_set_cd_isr(struct mmc_host *host, irq_handler_t isr)
-{
- struct mmc_gpio *ctx = host->slot.handler_priv;
-
- WARN_ON(ctx->cd_gpio_isr);
- ctx->cd_gpio_isr = isr;
-}
-EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
-
/**
* mmc_gpiod_request_cd - request a gpio descriptor for card-detection
* @host: mmc host
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index fc360902729d..24fffc702a94 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2499,8 +2499,10 @@ static int atmci_probe(struct platform_device *pdev)
/* Get MCI capabilities and set operations according to it */
atmci_get_cap(host);
ret = atmci_configure_dma(host);
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ clk_disable_unprepare(host->mck);
goto err_dma_probe_defer;
+ }
if (ret == 0) {
host->prepare_data = &atmci_prepare_data_dma;
host->submit_data = &atmci_submit_data_dma;
diff --git a/drivers/mmc/host/cqhci-crypto.c b/drivers/mmc/host/cqhci-crypto.c
index cb8044093402..5a467098a0d6 100644
--- a/drivers/mmc/host/cqhci-crypto.c
+++ b/drivers/mmc/host/cqhci-crypto.c
@@ -84,11 +84,11 @@ static int cqhci_crypto_keyslot_program(struct blk_crypto_profile *profile,
if (ccap_array[cap_idx].algorithm_id == CQHCI_CRYPTO_ALG_AES_XTS) {
/* In XTS mode, the blk_crypto_key's size is already doubled */
- memcpy(cfg.crypto_key, key->raw, key->size/2);
+ memcpy(cfg.crypto_key, key->bytes, key->size/2);
memcpy(cfg.crypto_key + CQHCI_CRYPTO_KEY_MAX_SIZE/2,
- key->raw + key->size/2, key->size/2);
+ key->bytes + key->size/2, key->size/2);
} else {
- memcpy(cfg.crypto_key, key->raw, key->size);
+ memcpy(cfg.crypto_key, key->bytes, key->size);
}
cqhci_crypto_program_key(cq_host, &cfg, slot);
@@ -204,6 +204,8 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
/* Unfortunately, CQHCI crypto only supports 32 DUN bits. */
profile->max_dun_bytes_supported = 4;
+ profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
+
/*
* Cache all the crypto capabilities and advertise the supported crypto
* modes and data unit sizes to the block layer.
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 53d32d0f2709..e3548408ca39 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -27,6 +27,8 @@ enum dw_mci_exynos_type {
DW_MCI_TYPE_EXYNOS5420_SMU,
DW_MCI_TYPE_EXYNOS7,
DW_MCI_TYPE_EXYNOS7_SMU,
+ DW_MCI_TYPE_EXYNOS7870,
+ DW_MCI_TYPE_EXYNOS7870_SMU,
DW_MCI_TYPE_ARTPEC8,
};
@@ -70,6 +72,12 @@ static struct dw_mci_exynos_compatible {
.compatible = "samsung,exynos7-dw-mshc-smu",
.ctrl_type = DW_MCI_TYPE_EXYNOS7_SMU,
}, {
+ .compatible = "samsung,exynos7870-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS7870,
+ }, {
+ .compatible = "samsung,exynos7870-dw-mshc-smu",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS7870_SMU,
+ }, {
.compatible = "axis,artpec8-dw-mshc",
.ctrl_type = DW_MCI_TYPE_ARTPEC8,
},
@@ -85,6 +93,8 @@ static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host)
return EXYNOS4210_FIXED_CIU_CLK_DIV;
else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1;
else
@@ -100,7 +110,8 @@ static void dw_mci_exynos_config_smu(struct dw_mci *host)
* set for non-ecryption mode at this time.
*/
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) {
mci_writel(host, MPSBEGIN0, 0);
mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX);
mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT |
@@ -126,6 +137,12 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host)
DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
}
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) {
+ /* Quirk needed for certain Exynos SoCs */
+ host->quirks |= DW_MMC_QUIRK_FIFO64_32;
+ }
+
if (priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) {
/* Quirk needed for the ARTPEC-8 SoC */
host->quirks |= DW_MMC_QUIRK_EXTENDED_TMOUT;
@@ -143,6 +160,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
@@ -152,6 +171,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
@@ -222,6 +243,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
@@ -230,6 +253,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
if (clksel & SDMMC_CLKSEL_WAKEUP_INT) {
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
@@ -409,6 +434,8 @@ static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64));
else
@@ -422,6 +449,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
@@ -429,6 +458,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
@@ -443,6 +474,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
@@ -453,6 +486,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
@@ -632,6 +667,10 @@ static const struct of_device_id dw_mci_exynos_match[] = {
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos7-dw-mshc-smu",
.data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos7870-dw-mshc",
+ .data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos7870-dw-mshc-smu",
+ .data = &exynos_drv_data, },
{ .compatible = "axis,artpec8-dw-mshc",
.data = &artpec_drv_data, },
{},
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 3cbda98d08d2..bb596d169420 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1875,8 +1875,7 @@ static void dw_mci_init_fault(struct dw_mci *host)
{
host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
- hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- host->fault_timer.function = dw_mci_fault_timer;
+ hrtimer_setup(&host->fault_timer, dw_mci_fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
#else
static void dw_mci_init_fault(struct dw_mci *host)
@@ -2579,6 +2578,91 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
}
}
+static void dw_mci_push_data64_32(struct dw_mci *host, void *buf, int cnt)
+{
+ struct mmc_data *data = host->data;
+ int init_cnt = cnt;
+
+ /* try and push anything in the part_buf */
+ if (unlikely(host->part_buf_count)) {
+ int len = dw_mci_push_part_bytes(host, buf, cnt);
+
+ buf += len;
+ cnt -= len;
+
+ if (host->part_buf_count == 8) {
+ mci_fifo_l_writeq(host->fifo_reg, host->part_buf);
+ host->part_buf_count = 0;
+ }
+ }
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+ /* memcpy from input buffer into aligned buffer */
+ memcpy(aligned_buf, buf, len);
+ buf += len;
+ cnt -= len;
+ /* push data from aligned buffer into fifo */
+ for (i = 0; i < items; ++i)
+ mci_fifo_l_writeq(host->fifo_reg, aligned_buf[i]);
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+
+ for (; cnt >= 8; cnt -= 8)
+ mci_fifo_l_writeq(host->fifo_reg, *pdata++);
+ buf = pdata;
+ }
+ /* put anything remaining in the part_buf */
+ if (cnt) {
+ dw_mci_set_part_bytes(host, buf, cnt);
+ /* Push data if we have reached the expected data length */
+ if ((data->bytes_xfered + init_cnt) ==
+ (data->blksz * data->blocks))
+ mci_fifo_l_writeq(host->fifo_reg, host->part_buf);
+ }
+}
+
+static void dw_mci_pull_data64_32(struct dw_mci *host, void *buf, int cnt)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ /* pull data from fifo into aligned buffer */
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+
+ for (i = 0; i < items; ++i)
+ aligned_buf[i] = mci_fifo_l_readq(host->fifo_reg);
+
+ /* memcpy from aligned buffer into output buffer */
+ memcpy(buf, aligned_buf, len);
+ buf += len;
+ cnt -= len;
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+
+ for (; cnt >= 8; cnt -= 8)
+ *pdata++ = mci_fifo_l_readq(host->fifo_reg);
+ buf = pdata;
+ }
+ if (cnt) {
+ host->part_buf = mci_fifo_l_readq(host->fifo_reg);
+ dw_mci_pull_final_bytes(host, buf, cnt);
+ }
+}
+
static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
{
int len;
@@ -3379,8 +3463,13 @@ int dw_mci_probe(struct dw_mci *host)
width = 16;
host->data_shift = 1;
} else if (i == 2) {
- host->push_data = dw_mci_push_data64;
- host->pull_data = dw_mci_pull_data64;
+ if ((host->quirks & DW_MMC_QUIRK_FIFO64_32)) {
+ host->push_data = dw_mci_push_data64_32;
+ host->pull_data = dw_mci_pull_data64_32;
+ } else {
+ host->push_data = dw_mci_push_data64;
+ host->pull_data = dw_mci_pull_data64;
+ }
width = 64;
host->data_shift = 3;
} else {
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 6447b916990d..5463392dc811 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -281,6 +281,8 @@ struct dw_mci_board {
/* Support for longer data read timeout */
#define DW_MMC_QUIRK_EXTENDED_TMOUT BIT(0)
+/* Force 32-bit access to the FIFO */
+#define DW_MMC_QUIRK_FIFO64_32 BIT(1)
#define DW_MMC_240A 0x240a
#define DW_MMC_280A 0x280a
@@ -472,6 +474,31 @@ struct dw_mci_board {
#define mci_fifo_writel(__value, __reg) __raw_writel(__reg, __value)
#define mci_fifo_writeq(__value, __reg) __raw_writeq(__reg, __value)
+/*
+ * Some dw_mmc devices have 64-bit FIFOs, but expect them to be
+ * accessed using two 32-bit accesses. If such controller is used
+ * with a 64-bit kernel, this has to be done explicitly.
+ */
+static inline u64 mci_fifo_l_readq(void __iomem *addr)
+{
+ u64 ans;
+ u32 proxy[2];
+
+ proxy[0] = mci_fifo_readl(addr);
+ proxy[1] = mci_fifo_readl(addr + 4);
+ memcpy(&ans, proxy, 8);
+ return ans;
+}
+
+static inline void mci_fifo_l_writeq(void __iomem *addr, u64 value)
+{
+ u32 proxy[2];
+
+ memcpy(proxy, &value, 8);
+ mci_fifo_writel(addr, proxy[0]);
+ mci_fifo_writel(addr + 4, proxy[1]);
+}
+
/* Register access macros */
#define mci_readl(dev, reg) \
readl_relaxed((dev)->regs + SDMMC_##reg)
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 62252ad4e20d..3cdb2fc44965 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1272,19 +1272,25 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
/* Check for some optional GPIO controls */
slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd",
id, GPIOD_OUT_LOW);
- if (IS_ERR(slot->vsd))
- return dev_err_probe(host->dev, PTR_ERR(slot->vsd),
+ if (IS_ERR(slot->vsd)) {
+ r = dev_err_probe(host->dev, PTR_ERR(slot->vsd),
"error looking up VSD GPIO\n");
+ goto err_free_host;
+ }
slot->vio = devm_gpiod_get_index_optional(host->dev, "vio",
id, GPIOD_OUT_LOW);
- if (IS_ERR(slot->vio))
- return dev_err_probe(host->dev, PTR_ERR(slot->vio),
+ if (IS_ERR(slot->vio)) {
+ r = dev_err_probe(host->dev, PTR_ERR(slot->vio),
"error looking up VIO GPIO\n");
+ goto err_free_host;
+ }
slot->cover = devm_gpiod_get_index_optional(host->dev, "cover",
id, GPIOD_IN);
- if (IS_ERR(slot->cover))
- return dev_err_probe(host->dev, PTR_ERR(slot->cover),
+ if (IS_ERR(slot->cover)) {
+ r = dev_err_probe(host->dev, PTR_ERR(slot->cover),
"error looking up cover switch GPIO\n");
+ goto err_free_host;
+ }
host->slots[id] = slot;
@@ -1344,6 +1350,7 @@ err_remove_slot_name:
device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
err_remove_host:
mmc_remove_host(mmc);
+err_free_host:
mmc_free_host(mmc);
return r;
}
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index f12a87442338..291ddb4ad9be 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -95,6 +95,7 @@ struct renesas_sdhi {
struct reset_control *rstc;
struct tmio_mmc_host *host;
+ struct regulator_dev *rdev;
};
#define host_to_priv(host) \
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index f73b84bae0c4..fa6526be3638 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -32,6 +32,8 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/reset.h>
#include <linux/sh_dma.h>
#include <linux/slab.h>
@@ -581,12 +583,24 @@ static void renesas_sdhi_reset(struct tmio_mmc_host *host, bool preserve)
if (!preserve) {
if (priv->rstc) {
+ u32 sd_status;
+ /*
+ * HW reset might have toggled the regulator state in
+ * HW which regulator core might be unaware of so save
+ * and restore the regulator state during HW reset.
+ */
+ if (priv->rdev)
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+
reset_control_reset(priv->rstc);
/* Unknown why but without polling reset status, it will hang */
read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
false, priv->rstc);
/* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
+ if (priv->rdev)
+ sd_ctrl_write32(host, CTL_SD_STATUS, sd_status);
+
priv->needs_adjust_hs400 = false;
renesas_sdhi_set_clock(host, host->clk_cache);
@@ -904,6 +918,102 @@ static void renesas_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
renesas_sdhi_sdbuf_width(host, enable ? width : 16);
}
+static const unsigned int renesas_sdhi_vqmmc_voltages[] = {
+ 3300000, 1800000
+};
+
+static int renesas_sdhi_regulator_disable(struct regulator_dev *rdev)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
+
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+ sd_status &= ~SD_STATUS_PWEN;
+ sd_ctrl_write32(host, CTL_SD_STATUS, sd_status);
+
+ return 0;
+}
+
+static int renesas_sdhi_regulator_enable(struct regulator_dev *rdev)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
+
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+ sd_status |= SD_STATUS_PWEN;
+ sd_ctrl_write32(host, CTL_SD_STATUS, sd_status);
+
+ return 0;
+}
+
+static int renesas_sdhi_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
+
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+
+ return (sd_status & SD_STATUS_PWEN) ? 1 : 0;
+}
+
+static int renesas_sdhi_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
+
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+
+ return (sd_status & SD_STATUS_IOVS) ? 1800000 : 3300000;
+}
+
+static int renesas_sdhi_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
+
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+ if (min_uV >= 1700000 && max_uV <= 1950000) {
+ sd_status |= SD_STATUS_IOVS;
+ *selector = 1;
+ } else {
+ sd_status &= ~SD_STATUS_IOVS;
+ *selector = 0;
+ }
+ sd_ctrl_write32(host, CTL_SD_STATUS, sd_status);
+
+ return 0;
+}
+
+static int renesas_sdhi_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector >= ARRAY_SIZE(renesas_sdhi_vqmmc_voltages))
+ return -EINVAL;
+
+ return renesas_sdhi_vqmmc_voltages[selector];
+}
+
+static const struct regulator_ops renesas_sdhi_regulator_voltage_ops = {
+ .enable = renesas_sdhi_regulator_enable,
+ .disable = renesas_sdhi_regulator_disable,
+ .is_enabled = renesas_sdhi_regulator_is_enabled,
+ .list_voltage = renesas_sdhi_regulator_list_voltage,
+ .get_voltage = renesas_sdhi_regulator_get_voltage,
+ .set_voltage = renesas_sdhi_regulator_set_voltage,
+};
+
+static const struct regulator_desc renesas_sdhi_vqmmc_regulator = {
+ .name = "sdhi-vqmmc-regulator",
+ .of_match = of_match_ptr("vqmmc-regulator"),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &renesas_sdhi_regulator_voltage_ops,
+ .volt_table = renesas_sdhi_vqmmc_voltages,
+ .n_voltages = ARRAY_SIZE(renesas_sdhi_vqmmc_voltages),
+};
+
int renesas_sdhi_probe(struct platform_device *pdev,
const struct tmio_mmc_dma_ops *dma_ops,
const struct renesas_sdhi_of_data *of_data,
@@ -911,7 +1021,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
{
struct tmio_mmc_data *mmd = pdev->dev.platform_data;
struct tmio_mmc_data *mmc_data;
+ struct regulator_config rcfg = { .dev = &pdev->dev, };
+ struct regulator_dev *rdev;
struct renesas_sdhi_dma *dma_priv;
+ struct device *dev = &pdev->dev;
struct tmio_mmc_host *host;
struct renesas_sdhi *priv;
int num_irqs, irq, ret, i;
@@ -1053,6 +1166,24 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (ret)
goto efree;
+ rcfg.of_node = of_get_child_by_name(dev->of_node, "vqmmc-regulator");
+ if (!of_device_is_available(rcfg.of_node)) {
+ of_node_put(rcfg.of_node);
+ rcfg.of_node = NULL;
+ }
+
+ if (rcfg.of_node) {
+ rcfg.driver_data = priv->host;
+ rdev = devm_regulator_register(dev, &renesas_sdhi_vqmmc_regulator, &rcfg);
+ of_node_put(rcfg.of_node);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "regulator register failed err=%ld", PTR_ERR(rdev));
+ ret = PTR_ERR(rdev);
+ goto efree;
+ }
+ priv->rdev = rdev;
+ }
+
ver = sd_ctrl_read16(host, CTL_VERSION);
/* GEN2_SDR104 is first known SDHI to use 32bit block count */
if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 0ef4d578ade8..48cdcba0f39c 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -503,8 +503,15 @@ static int sdhci_brcmstb_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ int ret;
clk_disable_unprepare(priv->base_clk);
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
return sdhci_pltfm_suspend(dev);
}
@@ -529,6 +536,9 @@ static int sdhci_brcmstb_resume(struct device *dev)
ret = clk_set_rate(priv->base_clk, priv->base_freq_hz);
}
+ if (host->mmc->caps2 & MMC_CAP2_CQE)
+ ret = cqhci_resume(host->mmc);
+
return ret;
}
#endif
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index e3d39311fdc7..57bd49eea777 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1873,7 +1873,7 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
return 0;
- ice = of_qcom_ice_get(dev);
+ ice = devm_of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;
@@ -1895,6 +1895,7 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
profile->ll_ops = sdhci_msm_crypto_ops;
profile->max_dun_bytes_supported = 4;
+ profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
profile->dev = dev;
/*
@@ -1968,7 +1969,7 @@ static int sdhci_msm_ice_keyslot_program(struct blk_crypto_profile *profile,
return qcom_ice_program_key(msm_host->ice,
QCOM_ICE_CRYPTO_ALG_AES_XTS,
QCOM_ICE_CRYPTO_KEY_SIZE_256,
- key->raw,
+ key->bytes,
key->crypto_cfg.data_unit_size / 512,
slot);
}
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 7ea3da45db32..09b9ab15e499 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -328,12 +328,17 @@ static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_request(mmc, mrq);
}
-static void dwcmshc_phy_1_8v_init(struct sdhci_host *host)
+static void dwcmshc_phy_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 rxsel = PHY_PAD_RXSEL_3V3;
u32 val;
+ if (priv->flags & FLAG_IO_FIXED_1V8 ||
+ host->mmc->ios.timing & MMC_SIGNAL_VOLTAGE_180)
+ rxsel = PHY_PAD_RXSEL_1V8;
+
/* deassert phy reset & set tx drive strength */
val = PHY_CNFG_RSTN_DEASSERT;
val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP);
@@ -353,7 +358,7 @@ static void dwcmshc_phy_1_8v_init(struct sdhci_host *host)
sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
/* configure phy pads */
- val = PHY_PAD_RXSEL_1V8;
+ val = rxsel;
val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
@@ -365,65 +370,22 @@ static void dwcmshc_phy_1_8v_init(struct sdhci_host *host)
val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
- val = PHY_PAD_RXSEL_1V8;
+ val = rxsel;
val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN);
val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
/* enable data strobe mode */
- sdhci_writeb(host, FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK, PHY_DLLDL_CNFG_SLV_INPSEL),
- PHY_DLLDL_CNFG_R);
-
- /* enable phy dll */
- sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R);
-}
-
-static void dwcmshc_phy_3_3v_init(struct sdhci_host *host)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- u32 val;
-
- /* deassert phy reset & set tx drive strength */
- val = PHY_CNFG_RSTN_DEASSERT;
- val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP);
- val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN);
- sdhci_writel(host, val, PHY_CNFG_R);
-
- /* disable delay line */
- sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R);
-
- /* set delay line */
- sdhci_writeb(host, priv->delay_line, PHY_SDCLKDL_DC_R);
- sdhci_writeb(host, PHY_DLL_CNFG2_JUMPSTEP, PHY_DLL_CNFG2_R);
-
- /* enable delay lane */
- val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
- val &= ~(PHY_SDCLKDL_CNFG_UPDATE);
- sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
+ if (rxsel == PHY_PAD_RXSEL_1V8) {
+ u8 sel = FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK, PHY_DLLDL_CNFG_SLV_INPSEL);
- /* configure phy pads */
- val = PHY_PAD_RXSEL_3V3;
- val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
- val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
- val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
- sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
- sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
- sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
-
- val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
- val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
- sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
-
- val = PHY_PAD_RXSEL_3V3;
- val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN);
- val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
- val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
- sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
+ sdhci_writeb(host, sel, PHY_DLLDL_CNFG_R);
+ }
/* enable phy dll */
sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R);
+
}
static void th1520_sdhci_set_phy(struct sdhci_host *host)
@@ -433,11 +395,7 @@ static void th1520_sdhci_set_phy(struct sdhci_host *host)
u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
u16 emmc_ctrl;
- /* Before power on, set PHY configs */
- if (priv->flags & FLAG_IO_FIXED_1V8)
- dwcmshc_phy_1_8v_init(host);
- else
- dwcmshc_phy_3_3v_init(host);
+ dwcmshc_phy_init(host);
if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
emmc_ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
@@ -1163,7 +1121,7 @@ static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = {
.get_max_clock = dwcmshc_get_max_clock,
.reset = th1520_sdhci_reset,
.adma_write_desc = dwcmshc_adma_write_desc,
- .voltage_switch = dwcmshc_phy_1_8v_init,
+ .voltage_switch = dwcmshc_phy_init,
.platform_execute_tuning = th1520_execute_tuning,
};
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 54d795205fb4..26a9a8b5682a 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1339,8 +1339,8 @@ static int sdhci_omap_probe(struct platform_device *pdev)
/* R1B responses is required to properly manage HW busy detection. */
mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
- /* Allow card power off and runtime PM for eMMC/SD card devices */
- mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_AGGRESSIVE_PM;
+ /* Enable SDIO card power off. */
+ mmc->caps |= MMC_CAP_POWER_OFF_CARD;
ret = sdhci_setup_host(host);
if (ret)
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 1f0bd723f011..13a84b9309e0 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -610,8 +610,12 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power(host, mode, vdd);
- if (mode == MMC_POWER_OFF)
+ if (mode == MMC_POWER_OFF) {
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BYT_SD)
+ usleep_range(15000, 17500);
return;
+ }
/*
* Bus power might not enable after D3 -> D0 transition due to the
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 990723a008ae..3fb56face3d8 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -399,6 +399,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (!IS_ERR(pxa->clk_core))
clk_prepare_enable(pxa->clk_core);
+ host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index f4a7733a8ad2..5f91b44891f9 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2065,10 +2065,15 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
host->mmc->actual_clock = 0;
- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (clk & SDHCI_CLOCK_CARD_EN)
+ sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN,
+ SDHCI_CLOCK_CONTROL);
- if (clock == 0)
+ if (clock == 0) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
return;
+ }
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
sdhci_enable_clk(host, clk);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index a75755f31d31..41787ea77a13 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -44,6 +44,7 @@
#define CTL_RESET_SD 0xe0
#define CTL_VERSION 0xe2
#define CTL_SDIF_MODE 0xe6 /* only known on R-Car 2+ */
+#define CTL_SD_STATUS 0xf2 /* only known on RZ/{G2L,G3E,V2H} */
/* Definitions for values the CTL_STOP_INTERNAL_ACTION register can take */
#define TMIO_STOP_STP BIT(0)
@@ -103,6 +104,10 @@
/* Definitions for values the CTL_SDIF_MODE register can take */
#define SDIF_MODE_HS400 BIT(0) /* only known on R-Car 2+ */
+/* Definitions for values the CTL_SD_STATUS register can take */
+#define SD_STATUS_PWEN BIT(0) /* only known on RZ/{G3E,V2H} */
+#define SD_STATUS_IOVS BIT(16) /* only known on RZ/{G3E,V2H} */
+
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
#define TMIO_MASK_ALL 0x837f031d
@@ -226,6 +231,11 @@ static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host,
ioread16(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
+static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+{
+ return ioread32(host->ctl + (addr << host->bus_shift));
+}
+
static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
u32 *buf, int count)
{
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
index 7584d0ba9396..4af9208f9690 100644
--- a/drivers/mtd/devices/mchp48l640.c
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -23,6 +23,7 @@
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
+#include <linux/string_choices.h>
struct mchp48_caps {
unsigned int size;
@@ -128,11 +129,11 @@ static int mchp48l640_write_prepare(struct mchp48l640_flash *flash, bool enable)
mutex_unlock(&flash->lock);
if (ret)
- dev_err(&flash->spi->dev, "write %sable failed ret: %d",
- (enable ? "en" : "dis"), ret);
+ dev_err(&flash->spi->dev, "write %s failed ret: %d",
+ str_enable_disable(enable), ret);
- dev_dbg(&flash->spi->dev, "write %sable success ret: %d",
- (enable ? "en" : "dis"), ret);
+ dev_dbg(&flash->spi->dev, "write %s success ret: %d",
+ str_enable_disable(enable), ret);
if (enable)
return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, true);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 724f917f91ba..5ba9a741f5ac 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -741,7 +741,9 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->dev.type = &mtd_devtype;
mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
- dev_set_name(&mtd->dev, "mtd%d", i);
+ error = dev_set_name(&mtd->dev, "mtd%d", i);
+ if (error)
+ goto fail_devname;
dev_set_drvdata(&mtd->dev, mtd);
mtd_check_of_node(mtd);
of_node_get(mtd_get_of_node(mtd));
@@ -790,6 +792,7 @@ fail_nvmem_add:
device_unregister(&mtd->dev);
fail_added:
of_node_put(mtd_get_of_node(mtd));
+fail_devname:
idr_remove(&mtd_idr, i);
fail_locked:
mutex_unlock(&mtd_table_mutex);
@@ -1053,7 +1056,7 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
const struct mtd_partition *parts,
int nr_parts)
{
- int ret;
+ int ret, err;
mtd_set_dev_defaults(mtd);
@@ -1105,8 +1108,11 @@ out:
nvmem_unregister(mtd->otp_factory_nvmem);
}
- if (ret && device_is_registered(&mtd->dev))
- del_mtd_device(mtd);
+ if (ret && device_is_registered(&mtd->dev)) {
+ err = del_mtd_device(mtd);
+ if (err)
+ pr_err("Error when deleting MTD device (%d)\n", err);
+ }
return ret;
}
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 6811a714349d..994e8c51e674 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -690,10 +690,9 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
parser = mtd_part_parser_get(*types);
if (!parser && !request_module("%s", *types))
parser = mtd_part_parser_get(*types);
- pr_debug("%s: got parser %s\n", master->name,
- parser ? parser->name : NULL);
if (!parser)
continue;
+ pr_debug("%s: got parser %s\n", master->name, parser->name);
ret = mtd_part_do_parse(parser, master, &pparts, data);
if (ret <= 0)
mtd_part_parser_put(parser);
diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c
index 7ac8ac901306..9cf3872e37ae 100644
--- a/drivers/mtd/mtdpstore.c
+++ b/drivers/mtd/mtdpstore.c
@@ -417,11 +417,14 @@ static void mtdpstore_notify_add(struct mtd_info *mtd)
}
longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size));
- cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
- cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+ cxt->rmmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
+ cxt->usedmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
- cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+ cxt->badmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
+
+ if (!cxt->rmmap || !cxt->usedmap || !cxt->badmap)
+ return;
/* just support dmesg right now */
cxt->dev.flags = PSTORE_FLAGS_DMESG;
@@ -527,9 +530,6 @@ static void mtdpstore_notify_remove(struct mtd_info *mtd)
mtdpstore_flush_removed(cxt);
unregister_pstore_device(&cxt->dev);
- kfree(cxt->badmap);
- kfree(cxt->usedmap);
- kfree(cxt->rmmap);
cxt->mtd = NULL;
cxt->index = -1;
}
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index da1586a36574..db516a45f0c5 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -3,7 +3,11 @@
nandcore-objs := core.o bbt.o
obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
+ifeq ($(CONFIG_SPI_QPIC_SNAND),y)
+obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o
+else
obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
+endif
obj-y += onenand/
obj-y += raw/
obj-y += spi/
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index fea5b6119956..17f6d9723df9 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -3008,7 +3008,7 @@ static int brcmnand_resume(struct device *dev)
brcmnand_save_restore_cs_config(host, 1);
/* Reset the chip, required by some chips after power-up */
- nand_reset_op(chip);
+ nand_reset(chip, 0);
}
return 0;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index d76802944453..f4e68008ea03 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -17,6 +17,7 @@
#include <linux/pm_runtime.h>
#include <linux/pinctrl/consumer.h>
#include <linux/dma/mxs-dma.h>
+#include <linux/string_choices.h>
#include "gpmi-nand.h"
#include "gpmi-regs.h"
#include "bch-regs.h"
@@ -2319,8 +2320,8 @@ static int gpmi_nand_attach_chip(struct nand_chip *chip)
"fsl,no-blockmark-swap"))
this->swap_block_mark = false;
}
- dev_dbg(this->dev, "Blockmark swapping %sabled\n",
- this->swap_block_mark ? "en" : "dis");
+ dev_dbg(this->dev, "Blockmark swapping %s\n",
+ str_enabled_disabled(this->swap_block_mark));
ret = gpmi_init_last(this);
if (ret)
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 53e16d39af4b..13e4060bd1b6 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -1833,7 +1833,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
/* READ_ID data bytes are received twice in NV-DDR mode */
if (len && nand_interface_is_nvddr(conf)) {
- ddrbuf = kzalloc(len * 2, GFP_KERNEL);
+ ddrbuf = kcalloc(2, len, GFP_KERNEL);
if (!ddrbuf)
return -ENOMEM;
@@ -2203,7 +2203,7 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
* twice.
*/
if (force_8bit && nand_interface_is_nvddr(conf)) {
- ddrbuf = kzalloc(len * 2, GFP_KERNEL);
+ ddrbuf = kcalloc(2, len, GFP_KERNEL);
if (!ddrbuf)
return -ENOMEM;
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 6720b547892b..5eaa0be367cd 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -165,9 +165,9 @@ static void nandc_set_read_loc_first(struct nand_chip *chip,
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
__le32 locreg_val;
- u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
- ((read_size) << READ_LOCATION_SIZE) |
- ((is_last_read_loc) << READ_LOCATION_LAST));
+ u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
+ FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
+ FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
locreg_val = cpu_to_le32(val);
@@ -197,9 +197,9 @@ static void nandc_set_read_loc_last(struct nand_chip *chip,
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
__le32 locreg_val;
- u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
- ((read_size) << READ_LOCATION_SIZE) |
- ((is_last_read_loc) << READ_LOCATION_LAST));
+ u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
+ FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
+ FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
locreg_val = cpu_to_le32(val);
@@ -271,14 +271,14 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
}
if (host->use_ecc) {
- cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE);
+ cfg0 = cpu_to_le32((host->cfg0 & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1)));
cfg1 = cpu_to_le32(host->cfg1);
ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
} else {
- cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE);
+ cfg0 = cpu_to_le32((host->cfg0_raw & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1)));
cfg1 = cpu_to_le32(host->cfg1_raw);
ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
@@ -882,12 +882,12 @@ static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
host->bbm_size - host->cw_data;
host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
- host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
- host->cw_data << UD_SIZE_BYTES;
+ host->cfg0 |= FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data);
host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
- host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
- host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
+ host->ecc_bch_cfg |= FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data);
+ host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, host->cw_data - 1);
}
/* implements ecc->read_page() */
@@ -1531,7 +1531,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
if (!nandc->props->qpic_version2)
- host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+ host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203);
host->clrflashstatus = FS_READY_BSY_N;
host->clrreadstatus = 0xc0;
@@ -1817,7 +1817,7 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
nandc->regs->addr0 = q_op.addr1_reg;
nandc->regs->addr1 = q_op.addr2_reg;
- nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE));
+ nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~CW_PER_PAGE_MASK);
nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
instrs = 3;
} else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
@@ -1900,8 +1900,8 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
if (!nandc->props->qpic_version2) {
nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
- nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR))
- | NAND_CMD_PARAM << READ_ADDR);
+ nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~READ_ADDR_MASK) |
+ FIELD_PREP(READ_ADDR_MASK, NAND_CMD_PARAM));
}
nandc->regs->exec = cpu_to_le32(1);
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index 1e61ab21893a..258da42451a4 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
+spinand-objs := core.o otp.o
+spinand-objs += alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
spinand-objs += micron.o paragon.o skyhigh.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index da4713692674..d16e42cf8fae 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -534,10 +534,20 @@ static int spinand_erase_op(struct spinand_device *spinand,
return spi_mem_exec_op(spinand->spimem, &op);
}
-static int spinand_wait(struct spinand_device *spinand,
- unsigned long initial_delay_us,
- unsigned long poll_delay_us,
- u8 *s)
+/**
+ * spinand_wait() - Poll memory device status
+ * @spinand: the spinand device
+ * @initial_delay_us: delay in us before starting to poll
+ * @poll_delay_us: time to sleep between reads in us
+ * @s: the pointer to variable to store the value of REG_STATUS
+ *
+ * This function polls a status register (REG_STATUS) and returns when
+ * the STATUS_READY bit is 0 or when the timeout has expired.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
+ unsigned long poll_delay_us, u8 *s)
{
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
spinand->scratchbuf);
@@ -604,8 +614,16 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
}
-static int spinand_read_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req)
+/**
+ * spinand_read_page() - Read a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
@@ -635,8 +653,16 @@ static int spinand_read_page(struct spinand_device *spinand,
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
-static int spinand_write_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req)
+/**
+ * spinand_write_page() - Write a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
@@ -674,11 +700,15 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct mtd_ecc_stats old_stats;
struct nand_io_iter iter;
bool disable_ecc = false;
bool ecc_failed = false;
+ unsigned int retry_mode = 0;
int ret;
+ old_stats = mtd->ecc_stats;
+
if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
disable_ecc = true;
@@ -690,18 +720,43 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
if (ret)
break;
+read_retry:
ret = spinand_read_page(spinand, &iter.req);
if (ret < 0 && ret != -EBADMSG)
break;
- if (ret == -EBADMSG)
+ if (ret == -EBADMSG && spinand->set_read_retry) {
+ if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) {
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0) {
+ spinand->set_read_retry(spinand, 0);
+ return ret;
+ }
+
+ /* Reset ecc_stats; retry */
+ mtd->ecc_stats = old_stats;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_failed = true;
+ }
+ } else if (ret == -EBADMSG) {
ecc_failed = true;
- else
+ } else {
*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ }
ret = 0;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
+
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ retry_mode = 0;
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0)
+ return ret;
+ }
}
if (ecc_failed && !ret)
@@ -1292,6 +1347,10 @@ int spinand_match_and_init(struct spinand_device *spinand,
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
spinand->set_cont_read = table[i].set_cont_read;
+ spinand->fact_otp = &table[i].fact_otp;
+ spinand->user_otp = &table[i].user_otp;
+ spinand->read_retries = table[i].read_retries;
+ spinand->set_read_retry = table[i].set_read_retry;
op = spinand_select_op_variant(spinand,
info->op_variants.read_cache);
@@ -1478,6 +1537,12 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
mtd->_resume = spinand_mtd_resume;
+ if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) {
+ ret = spinand_set_mtd_otp_ops(spinand);
+ if (ret)
+ goto err_cleanup_ecc_engine;
+ }
+
if (nand->ecc.engine) {
ret = mtd_ooblayout_count_freebytes(mtd);
if (ret < 0)
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index 323a20901fc9..a164d821464d 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -8,10 +8,15 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+#include <linux/spi/spi-mem.h>
/* ESMT uses GigaDevice 0xc8 JECDEC ID on some SPI NANDs */
#define SPINAND_MFR_ESMT_C8 0xc8
+#define ESMT_F50L1G41LB_CFG_OTP_PROTECT BIT(7)
+#define ESMT_F50L1G41LB_CFG_OTP_LOCK \
+ (CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT)
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
@@ -102,6 +107,83 @@ static const struct mtd_ooblayout_ops f50l1g41lb_ooblayout = {
.free = f50l1g41lb_ooblayout_free,
};
+static int f50l1g41lb_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen, bool user)
+{
+ if (len < sizeof(*buf))
+ return -EINVAL;
+
+ buf->locked = 0;
+ buf->start = 0;
+ buf->length = user ? spinand_user_otp_size(spinand) :
+ spinand_fact_otp_size(spinand);
+
+ *retlen = sizeof(*buf);
+ return 0;
+}
+
+static int f50l1g41lb_fact_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen)
+{
+ return f50l1g41lb_otp_info(spinand, len, buf, retlen, false);
+}
+
+static int f50l1g41lb_user_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen)
+{
+ return f50l1g41lb_otp_info(spinand, len, buf, retlen, true);
+}
+
+static int f50l1g41lb_otp_lock(struct spinand_device *spinand, loff_t from,
+ size_t len)
+{
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
+ u8 status;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK,
+ ESMT_F50L1G41LB_CFG_OTP_LOCK);
+ if (!ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &write_op);
+ if (!ret)
+ goto out;
+
+ ret = spi_mem_exec_op(spinand->spimem, &exec_op);
+ if (!ret)
+ goto out;
+
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ ret = -EIO;
+
+out:
+ if (spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK, 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static const struct spinand_user_otp_ops f50l1g41lb_user_otp_ops = {
+ .info = f50l1g41lb_user_otp_info,
+ .lock = f50l1g41lb_otp_lock,
+ .read = spinand_user_otp_read,
+ .write = spinand_user_otp_write,
+};
+
+static const struct spinand_fact_otp_ops f50l1g41lb_fact_otp_ops = {
+ .info = f50l1g41lb_fact_otp_info,
+ .read = spinand_fact_otp_read,
+};
+
static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_INFO("F50L1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01, 0x7f,
@@ -112,7 +194,9 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL),
+ SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
0x7f, 0x7f),
@@ -122,7 +206,9 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL),
+ SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D2G41KA",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51, 0x7f,
0x7f, 0x7f),
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 3dc4d63d6832..1ef08ad850a2 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -14,6 +14,8 @@
#define MACRONIX_ECCSR_BF_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr)
#define MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(eccsr) FIELD_GET(GENMASK(7, 4), eccsr)
#define MACRONIX_CFG_CONT_READ BIT(2)
+#define MACRONIX_FEATURE_ADDR_READ_RETRY 0x70
+#define MACRONIX_NUM_READ_RETRY_MODES 5
#define STATUS_ECC_HAS_BITFLIPS_THRESHOLD (3 << 4)
@@ -136,6 +138,23 @@ static int macronix_set_cont_read(struct spinand_device *spinand, bool enable)
return 0;
}
+/**
+ * macronix_set_read_retry - Set the retry mode
+ * @spinand: SPI NAND device
+ * @retry_mode: Specify which retry mode to set
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int macronix_set_read_retry(struct spinand_device *spinand,
+ unsigned int retry_mode)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
+ spinand->scratchbuf);
+
+ *spinand->scratchbuf = retry_mode;
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
@@ -168,7 +187,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37, 0x03),
NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1),
@@ -179,7 +200,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF1G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
@@ -188,7 +211,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF2G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
@@ -198,7 +223,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF2G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x64, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -207,7 +234,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF4G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
@@ -217,7 +246,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF4G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x75, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -226,7 +257,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX31LF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -270,7 +303,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF4G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xf5, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -280,7 +315,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -291,7 +328,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
@@ -314,7 +353,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe4, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -324,7 +365,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -335,7 +378,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
@@ -366,7 +411,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF1GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
@@ -377,7 +424,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF1GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index ad0bb9755a09..691f8a2e0791 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -9,6 +9,8 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/string.h>
#define SPINAND_MFR_MICRON 0x2c
@@ -28,6 +30,10 @@
#define MICRON_SELECT_DIE(x) ((x) << 6)
+#define MICRON_MT29F2G01ABAGD_CFG_OTP_STATE BIT(7)
+#define MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK \
+ (CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE)
+
static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -168,6 +174,131 @@ static int micron_8_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
+static int mt29f2g01abagd_otp_is_locked(struct spinand_device *spinand)
+{
+ size_t bufsize = spinand_otp_page_size(spinand);
+ size_t retlen;
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = spinand_upd_cfg(spinand,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_STATE);
+ if (ret)
+ goto free_buf;
+
+ ret = spinand_user_otp_read(spinand, 0, bufsize, &retlen, buf);
+
+ if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ if (ret)
+ goto free_buf;
+
+ /* If all zeros, then the OTP area is locked. */
+ if (mem_is_zero(buf, bufsize))
+ ret = 1;
+
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
+static int mt29f2g01abagd_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen,
+ bool user)
+{
+ int locked;
+
+ if (len < sizeof(*buf))
+ return -EINVAL;
+
+ locked = mt29f2g01abagd_otp_is_locked(spinand);
+ if (locked < 0)
+ return locked;
+
+ buf->locked = locked;
+ buf->start = 0;
+ buf->length = user ? spinand_user_otp_size(spinand) :
+ spinand_fact_otp_size(spinand);
+
+ *retlen = sizeof(*buf);
+ return 0;
+}
+
+static int mt29f2g01abagd_fact_otp_info(struct spinand_device *spinand,
+ size_t len, struct otp_info *buf,
+ size_t *retlen)
+{
+ return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, false);
+}
+
+static int mt29f2g01abagd_user_otp_info(struct spinand_device *spinand,
+ size_t len, struct otp_info *buf,
+ size_t *retlen)
+{
+ return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, true);
+}
+
+static int mt29f2g01abagd_otp_lock(struct spinand_device *spinand, loff_t from,
+ size_t len)
+{
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
+ u8 status;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK);
+ if (!ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &write_op);
+ if (!ret)
+ goto out;
+
+ ret = spi_mem_exec_op(spinand->spimem, &exec_op);
+ if (!ret)
+ goto out;
+
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ ret = -EIO;
+
+out:
+ if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static const struct spinand_user_otp_ops mt29f2g01abagd_user_otp_ops = {
+ .info = mt29f2g01abagd_user_otp_info,
+ .lock = mt29f2g01abagd_otp_lock,
+ .read = spinand_user_otp_read,
+ .write = spinand_user_otp_write,
+};
+
+static const struct spinand_fact_otp_ops mt29f2g01abagd_fact_otp_ops = {
+ .info = mt29f2g01abagd_fact_otp_info,
+ .read = spinand_fact_otp_read,
+};
+
static const struct spinand_info micron_spinand_table[] = {
/* M79A 2Gb 3.3V */
SPINAND_INFO("MT29F2G01ABAGD",
@@ -179,7 +310,9 @@ static const struct spinand_info micron_spinand_table[] = {
&x4_update_cache_variants),
0,
SPINAND_ECCINFO(&micron_8_ooblayout,
- micron_8_ecc_get_status)),
+ micron_8_ecc_get_status),
+ SPINAND_USER_OTP_INFO(12, 2, &mt29f2g01abagd_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &mt29f2g01abagd_fact_otp_ops)),
/* M79A 2Gb 1.8V */
SPINAND_INFO("MT29F2G01ABBGD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
diff --git a/drivers/mtd/nand/spi/otp.c b/drivers/mtd/nand/spi/otp.c
new file mode 100644
index 000000000000..ce41bca86ea9
--- /dev/null
+++ b/drivers/mtd/nand/spi/otp.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025, SaluteDevices. All Rights Reserved.
+ *
+ * Author: Martin Kurbanov <mmkurbanov@salutedevices.com>
+ */
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spinand.h>
+
+/**
+ * spinand_otp_page_size() - Get SPI-NAND OTP page size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP page size.
+ */
+size_t spinand_otp_page_size(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ return nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+}
+
+static size_t spinand_otp_size(struct spinand_device *spinand,
+ const struct spinand_otp_layout *layout)
+{
+ return layout->npages * spinand_otp_page_size(spinand);
+}
+
+/**
+ * spinand_fact_otp_size() - Get SPI-NAND factory OTP area size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP size.
+ */
+size_t spinand_fact_otp_size(struct spinand_device *spinand)
+{
+ return spinand_otp_size(spinand, &spinand->fact_otp->layout);
+}
+
+/**
+ * spinand_user_otp_size() - Get SPI-NAND user OTP area size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP size.
+ */
+size_t spinand_user_otp_size(struct spinand_device *spinand)
+{
+ return spinand_otp_size(spinand, &spinand->user_otp->layout);
+}
+
+static int spinand_otp_check_bounds(struct spinand_device *spinand, loff_t ofs,
+ size_t len,
+ const struct spinand_otp_layout *layout)
+{
+ if (ofs < 0 || ofs + len > spinand_otp_size(spinand, layout))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int spinand_user_otp_check_bounds(struct spinand_device *spinand,
+ loff_t ofs, size_t len)
+{
+ return spinand_otp_check_bounds(spinand, ofs, len,
+ &spinand->user_otp->layout);
+}
+
+static int spinand_otp_rw(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf, bool is_write,
+ const struct spinand_otp_layout *layout)
+{
+ struct nand_page_io_req req = {};
+ unsigned long long page;
+ size_t copied = 0;
+ size_t otp_pagesize = spinand_otp_page_size(spinand);
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_otp_check_bounds(spinand, ofs, len, layout);
+ if (ret)
+ return ret;
+
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, CFG_OTP_ENABLE);
+ if (ret)
+ return ret;
+
+ page = ofs;
+ req.dataoffs = do_div(page, otp_pagesize);
+ req.pos.page = page + layout->start_page;
+ req.type = is_write ? NAND_PAGE_WRITE : NAND_PAGE_READ;
+ req.mode = MTD_OPS_RAW;
+ req.databuf.in = buf;
+
+ while (copied < len) {
+ req.datalen = min_t(unsigned int,
+ otp_pagesize - req.dataoffs,
+ len - copied);
+
+ if (is_write)
+ ret = spinand_write_page(spinand, &req);
+ else
+ ret = spinand_read_page(spinand, &req);
+
+ if (ret < 0)
+ break;
+
+ req.databuf.in += req.datalen;
+ req.pos.page++;
+ req.dataoffs = 0;
+ copied += req.datalen;
+ }
+
+ *retlen = copied;
+
+ if (spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/**
+ * spinand_fact_otp_read() - Read from OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to read
+ * @len: the number of data bytes to read
+ * @retlen: the pointer to variable to store the number of read bytes
+ * @buf: the buffer to store the read data
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, buf, false,
+ &spinand->fact_otp->layout);
+}
+
+/**
+ * spinand_user_otp_read() - Read from OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to read
+ * @len: the number of data bytes to read
+ * @retlen: the pointer to variable to store the number of read bytes
+ * @buf: the buffer to store the read data
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, buf, false,
+ &spinand->user_otp->layout);
+}
+
+/**
+ * spinand_user_otp_write() - Write to OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to write to
+ * @len: the number of bytes to write
+ * @retlen: the pointer to variable to store the number of written bytes
+ * @buf: the buffer with data to write
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, const u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, (u8 *)buf, true,
+ &spinand->user_otp->layout);
+}
+
+static int spinand_mtd_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf,
+ bool is_fact)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ *retlen = 0;
+
+ mutex_lock(&spinand->lock);
+
+ if (is_fact)
+ ret = spinand->fact_otp->ops->info(spinand, len, buf, retlen);
+ else
+ ret = spinand->user_otp->ops->info(spinand, len, buf, retlen);
+
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_fact_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return spinand_mtd_otp_info(mtd, len, retlen, buf, true);
+}
+
+static int spinand_mtd_user_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return spinand_mtd_otp_info(mtd, len, retlen, buf, false);
+}
+
+static int spinand_mtd_otp_read(struct mtd_info *mtd, loff_t ofs, size_t len,
+ size_t *retlen, u8 *buf, bool is_fact)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ *retlen = 0;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_otp_check_bounds(spinand, ofs, len,
+ is_fact ? &spinand->fact_otp->layout :
+ &spinand->user_otp->layout);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+
+ if (is_fact)
+ ret = spinand->fact_otp->ops->read(spinand, ofs, len, retlen,
+ buf);
+ else
+ ret = spinand->user_otp->ops->read(spinand, ofs, len, retlen,
+ buf);
+
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_fact_otp_read(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, true);
+}
+
+static int spinand_mtd_user_otp_read(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, false);
+}
+
+static int spinand_mtd_user_otp_write(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, const u8 *buf)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ *retlen = 0;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->write(spinand, ofs, len, retlen, buf);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_user_otp_erase(struct mtd_info *mtd, loff_t ofs,
+ size_t len)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->erase(spinand, ofs, len);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_user_otp_lock(struct mtd_info *mtd, loff_t ofs,
+ size_t len)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->lock(spinand, ofs, len);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+/**
+ * spinand_set_mtd_otp_ops() - Setup OTP methods
+ * @spinand: the spinand device
+ *
+ * Setup OTP methods.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_set_mtd_otp_ops(struct spinand_device *spinand)
+{
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ const struct spinand_fact_otp_ops *fact_ops = spinand->fact_otp->ops;
+ const struct spinand_user_otp_ops *user_ops = spinand->user_otp->ops;
+
+ if (!user_ops && !fact_ops)
+ return -EINVAL;
+
+ if (user_ops) {
+ if (user_ops->info)
+ mtd->_get_user_prot_info = spinand_mtd_user_otp_info;
+
+ if (user_ops->read)
+ mtd->_read_user_prot_reg = spinand_mtd_user_otp_read;
+
+ if (user_ops->write)
+ mtd->_write_user_prot_reg = spinand_mtd_user_otp_write;
+
+ if (user_ops->lock)
+ mtd->_lock_user_prot_reg = spinand_mtd_user_otp_lock;
+
+ if (user_ops->erase)
+ mtd->_erase_user_prot_reg = spinand_mtd_user_otp_erase;
+ }
+
+ if (fact_ops) {
+ if (fact_ops->info)
+ mtd->_get_fact_prot_info = spinand_mtd_fact_otp_info;
+
+ if (fact_ops->read)
+ mtd->_read_fact_prot_reg = spinand_mtd_fact_otp_read;
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 19eb98bd6821..ac4b960101cc 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -7,16 +7,17 @@
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
-#include <linux/err.h>
-#include <linux/errno.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
#include <linux/mutex.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/sched/task_stack.h>
#include <linux/sizes.h>
@@ -639,32 +640,26 @@ static bool spi_nor_use_parallel_locking(struct spi_nor *nor)
static int spi_nor_rww_start_rdst(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- int ret = -EAGAIN;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd)
- goto busy;
+ return -EAGAIN;
rww->ongoing_io = true;
rww->ongoing_rd = true;
- ret = 0;
-busy:
- mutex_unlock(&nor->lock);
- return ret;
+ return 0;
}
static void spi_nor_rww_end_rdst(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
rww->ongoing_io = false;
rww->ongoing_rd = false;
-
- mutex_unlock(&nor->lock);
}
static int spi_nor_lock_rdst(struct spi_nor *nor)
@@ -1212,26 +1207,21 @@ static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len,
static bool spi_nor_rww_start_io(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- bool start = false;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io)
- goto busy;
+ return false;
rww->ongoing_io = true;
- start = true;
-busy:
- mutex_unlock(&nor->lock);
- return start;
+ return true;
}
static void spi_nor_rww_end_io(struct spi_nor *nor)
{
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
nor->rww.ongoing_io = false;
- mutex_unlock(&nor->lock);
}
static int spi_nor_lock_device(struct spi_nor *nor)
@@ -1254,32 +1244,27 @@ static void spi_nor_unlock_device(struct spi_nor *nor)
static bool spi_nor_rww_start_exclusive(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- bool start = false;
mutex_lock(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
- goto busy;
+ return false;
rww->ongoing_io = true;
rww->ongoing_rd = true;
rww->ongoing_pe = true;
- start = true;
-busy:
- mutex_unlock(&nor->lock);
- return start;
+ return true;
}
static void spi_nor_rww_end_exclusive(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
rww->ongoing_io = false;
rww->ongoing_rd = false;
rww->ongoing_pe = false;
- mutex_unlock(&nor->lock);
}
int spi_nor_prep_and_lock(struct spi_nor *nor)
@@ -1316,30 +1301,26 @@ static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len)
{
struct spi_nor_rww *rww = &nor->rww;
unsigned int used_banks = 0;
- bool started = false;
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
- goto busy;
+ return false;
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++) {
if (rww->used_banks & BIT(bank))
- goto busy;
+ return false;
used_banks |= BIT(bank);
}
rww->used_banks |= used_banks;
rww->ongoing_pe = true;
- started = true;
-busy:
- mutex_unlock(&nor->lock);
- return started;
+ return true;
}
static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
@@ -1348,15 +1329,13 @@ static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++)
rww->used_banks &= ~BIT(bank);
rww->ongoing_pe = false;
-
- mutex_unlock(&nor->lock);
}
static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len)
@@ -1393,19 +1372,18 @@ static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
{
struct spi_nor_rww *rww = &nor->rww;
unsigned int used_banks = 0;
- bool started = false;
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd)
- goto busy;
+ return false;
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++) {
if (rww->used_banks & BIT(bank))
- goto busy;
+ return false;
used_banks |= BIT(bank);
}
@@ -1413,11 +1391,8 @@ static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
rww->used_banks |= used_banks;
rww->ongoing_io = true;
rww->ongoing_rd = true;
- started = true;
-busy:
- mutex_unlock(&nor->lock);
- return started;
+ return true;
}
static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
@@ -1426,7 +1401,7 @@ static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++)
@@ -1434,8 +1409,6 @@ static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
rww->ongoing_io = false;
rww->ongoing_rd = false;
-
- mutex_unlock(&nor->lock);
}
static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len)
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 99936fd25d43..55644a3cd88c 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -45,8 +45,26 @@ mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
+static int
+macronix_qpp4b_post_sfdp_fixups(struct spi_nor *nor)
+{
+ /* PP_1_1_4_4B is supported but missing in 4BAIT. */
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4_4B, SNOR_PROTO_1_1_4);
+
+ return 0;
+}
+
static const struct spi_nor_fixups mx25l25635_fixups = {
.post_bfpt = mx25l25635_post_bfpt_fixups,
+ .post_sfdp = macronix_qpp4b_post_sfdp_fixups,
+};
+
+static const struct spi_nor_fixups macronix_qpp4b_fixups = {
+ .post_sfdp = macronix_qpp4b_post_sfdp_fixups,
};
static const struct flash_info macronix_nor_parts[] = {
@@ -102,11 +120,17 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_64M,
.no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x20, 0x1b),
.name = "mx66l1g45g",
.size = SZ_128M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixups = &macronix_qpp4b_fixups,
+ }, {
+ /* MX66L2G45G */
+ .id = SNOR_ID(0xc2, 0x20, 0x1c),
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x23, 0x14),
.name = "mx25v8035f",
@@ -148,18 +172,25 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_64M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x25, 0x3a),
.name = "mx66u51235f",
.size = SZ_64M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
+ .fixups = &macronix_qpp4b_fixups,
+ }, {
+ /* MX66U1G45G */
+ .id = SNOR_ID(0xc2, 0x25, 0x3b),
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x25, 0x3c),
.name = "mx66u2g45g",
.size = SZ_256M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x26, 0x18),
.name = "mx25l12855e",
diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c
index 9a729aa3452d..7d0b145d78d8 100644
--- a/drivers/mtd/spi-nor/otp.c
+++ b/drivers/mtd/spi-nor/otp.c
@@ -6,6 +6,7 @@
*/
#include <linux/log2.h>
+#include <linux/math64.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
index e48c3cff247a..9c9328478d8a 100644
--- a/drivers/mtd/spi-nor/swp.c
+++ b/drivers/mtd/spi-nor/swp.c
@@ -5,6 +5,7 @@
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
+#include <linux/math64.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index 8d0a00d69e12..63a93c9eb917 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -10,6 +10,7 @@
#define WINBOND_NOR_OP_RDEAR 0xc8 /* Read Extended Address Register */
#define WINBOND_NOR_OP_WREAR 0xc5 /* Write Extended Address Register */
+#define WINBOND_NOR_OP_SELDIE 0xc2 /* Select active die */
#define WINBOND_NOR_WREAR_OP(buf) \
SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_WREAR, 0), \
@@ -17,6 +18,12 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(1, buf, 0))
+#define WINBOND_NOR_SELDIE_OP(buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_SELDIE, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(1, buf, 0))
+
static int
w25q128_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
@@ -66,6 +73,79 @@ static const struct spi_nor_fixups w25q256_fixups = {
.post_bfpt = w25q256_post_bfpt_fixups,
};
+/**
+ * winbond_nor_select_die() - Set active die.
+ * @nor: pointer to 'struct spi_nor'.
+ * @die: die to set active.
+ *
+ * Certain Winbond chips feature more than a single die. This is mostly hidden
+ * to the user, except that some chips may experience time deviation when
+ * modifying the status bits between dies, which in some corner cases may
+ * produce problematic races. Being able to explicitly select a die to check its
+ * state in this case may be useful.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int winbond_nor_select_die(struct spi_nor *nor, u8 die)
+{
+ int ret;
+
+ nor->bouncebuf[0] = die;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = WINBOND_NOR_SELDIE_OP(nor->bouncebuf);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor,
+ WINBOND_NOR_OP_SELDIE,
+ nor->bouncebuf, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d selecting die %d\n", ret, die);
+
+ return ret;
+}
+
+static int winbond_nor_multi_die_ready(struct spi_nor *nor)
+{
+ int ret, i;
+
+ for (i = 0; i < nor->params->n_dice; i++) {
+ ret = winbond_nor_select_die(nor, i);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_sr_ready(nor);
+ if (ret <= 0)
+ return ret;
+ }
+
+ return 1;
+}
+
+static int
+winbond_nor_multi_die_post_sfdp_fixups(struct spi_nor *nor)
+{
+ /*
+ * SFDP supports dice numbers, but this information is only available in
+ * optional additional tables which are not provided by these chips.
+ * Dice number has an impact though, because these devices need extra
+ * care when reading the busy bit.
+ */
+ nor->params->n_dice = nor->params->size / SZ_64M;
+ nor->params->ready = winbond_nor_multi_die_ready;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups winbond_nor_multi_die_fixups = {
+ .post_sfdp = winbond_nor_multi_die_post_sfdp_fixups,
+};
+
static const struct flash_info winbond_nor_parts[] = {
{
.id = SNOR_ID(0xef, 0x30, 0x10),
@@ -147,6 +227,10 @@ static const struct flash_info winbond_nor_parts[] = {
.size = SZ_64M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
+ /* W25Q01JV */
+ .id = SNOR_ID(0xef, 0x40, 0x21),
+ .fixups = &winbond_nor_multi_die_fixups,
+ }, {
.id = SNOR_ID(0xef, 0x50, 0x12),
.name = "w25q20bw",
.size = SZ_256K,
@@ -222,6 +306,10 @@ static const struct flash_info winbond_nor_parts[] = {
.id = SNOR_ID(0xef, 0x70, 0x19),
.name = "w25q256jvm",
}, {
+ /* W25Q02JV */
+ .id = SNOR_ID(0xef, 0x70, 0x22),
+ .fixups = &winbond_nor_multi_die_fixups,
+ }, {
.id = SNOR_ID(0xef, 0x71, 0x19),
.name = "w25m512jv",
.size = SZ_64M,
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 2836905f0152..39cc0a6a4d37 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -199,7 +199,7 @@ static blk_status_t ubiblock_read(struct request *req)
* and ubi_read_sg() will check that limit.
*/
ubi_sgl_init(&pdu->usgl);
- blk_rq_map_sg(req->q, req, pdu->usgl.sg);
+ blk_rq_map_sg(req, pdu->usgl.sg);
while (bytes_left) {
/*
diff --git a/drivers/mux/gpio.c b/drivers/mux/gpio.c
index cc5f2c1861d4..5710879cd47f 100644
--- a/drivers/mux/gpio.c
+++ b/drivers/mux/gpio.c
@@ -28,9 +28,7 @@ static int mux_gpio_set(struct mux_control *mux, int state)
bitmap_from_arr32(values, &value, BITS_PER_TYPE(value));
- gpiod_set_array_value_cansleep(mux_gpio->gpios->ndescs,
- mux_gpio->gpios->desc,
- mux_gpio->gpios->info, values);
+ gpiod_multi_set_value_cansleep(mux_gpio->gpios, values);
return 0;
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1fd5acdc73c6..271520510b5f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -518,30 +518,6 @@ source "drivers/net/hippi/Kconfig"
source "drivers/net/ipa/Kconfig"
-config NET_SB1000
- tristate "General Instruments Surfboard 1000"
- depends on ISA && PNP
- help
- This is a driver for the General Instrument (also known as
- NextLevel) SURFboard 1000 internal
- cable modem. This is an ISA card which is used by a number of cable
- TV companies to provide cable modem access. It's a one-way
- downstream-only cable modem, meaning that your upstream net link is
- provided by your regular phone modem.
-
- At present this driver only compiles as a module, so say M here if
- you have this card. The module will be called sb1000. Then read
- <file:Documentation/networking/device_drivers/cable/sb1000.rst> for
- information on how to use this module, as it needs special ppp
- scripts for establishing a connection. Further documentation
- and the necessary scripts can be found at:
-
- <http://www.jacksonville.net/~fventuri/>
- <http://home.adelphia.net/~siglercm/sb1000.html>
- <http://linuxpower.cx/~cable/>
-
- If you don't have this card, of course say N.
-
source "drivers/net/phy/Kconfig"
source "drivers/net/pse-pd/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 13743d0e83b5..75333251a01a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -69,7 +69,6 @@ obj-$(CONFIG_PPPOL2TP) += ppp/
obj-$(CONFIG_PPTP) += ppp/
obj-$(CONFIG_SLIP) += slip/
obj-$(CONFIG_SLHC) += slip/
-obj-$(CONFIG_NET_SB1000) += sb1000.o
obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_WLAN) += wireless/
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 98c6205ed19f..734a0b3242a9 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -3099,7 +3099,7 @@ static void amt_link_setup(struct net_device *dev)
dev->addr_len = 0;
dev->priv_flags |= IFF_NO_QUEUE;
dev->lltx = true;
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
@@ -3161,14 +3161,17 @@ static int amt_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static int amt_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int amt_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct amt_dev *amt = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
int err = -EINVAL;
- amt->net = net;
+ amt->net = link_net;
amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
if (data[IFLA_AMT_MAX_TUNNELS] &&
@@ -3183,7 +3186,7 @@ static int amt_newlink(struct net *net, struct net_device *dev,
amt->hash_buckets = AMT_HSIZE;
amt->nr_tunnels = 0;
get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
- amt->stream_dev = dev_get_by_index(net,
+ amt->stream_dev = dev_get_by_index(link_net,
nla_get_u32(data[IFLA_AMT_LINK]));
if (!amt->stream_dev) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 70814303aab8..d1473c5f8eef 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -698,10 +698,13 @@ static void bareudp_dellink(struct net_device *dev, struct list_head *head)
unregister_netdevice_queue(dev, head);
}
-static int bareudp_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int bareudp_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct bareudp_conf conf;
int err;
@@ -709,7 +712,7 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
if (err)
return err;
- err = bareudp_configure(net, dev, &conf, extack);
+ err = bareudp_configure(link_net, dev, &conf, extack);
if (err)
return err;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e45bba240cbc..950d8e4d86f8 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -90,6 +90,7 @@
#include <net/tls.h>
#endif
#include <net/ip6_route.h>
+#include <net/netdev_lock.h>
#include <net/xdp.h>
#include "bonding_priv.h"
@@ -322,9 +323,9 @@ static bool bond_sk_check(struct bonding *bond)
}
}
-static bool bond_xdp_check(struct bonding *bond)
+bool bond_xdp_check(struct bonding *bond, int mode)
{
- switch (BOND_MODE(bond)) {
+ switch (mode) {
case BOND_MODE_ROUNDROBIN:
case BOND_MODE_ACTIVEBACKUP:
return true;
@@ -432,9 +433,6 @@ static struct net_device *bond_ipsec_dev(struct xfrm_state *xs)
struct bonding *bond;
struct slave *slave;
- if (!bond_dev)
- return NULL;
-
bond = netdev_priv(bond_dev);
if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
return NULL;
@@ -676,22 +674,16 @@ out:
static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
struct net_device *real_dev;
- bool ok = false;
rcu_read_lock();
real_dev = bond_ipsec_dev(xs);
- if (!real_dev)
- goto out;
-
- if (!real_dev->xfrmdev_ops ||
- !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
- netif_is_bond_master(real_dev))
- goto out;
+ if (!real_dev || netif_is_bond_master(real_dev)) {
+ rcu_read_unlock();
+ return false;
+ }
- ok = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
-out:
rcu_read_unlock();
- return ok;
+ return true;
}
/**
@@ -858,7 +850,6 @@ static int bond_check_dev_link(struct bonding *bond,
struct net_device *slave_dev, int reporting)
{
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
- int (*ioctl)(struct net_device *, struct ifreq *, int);
struct ifreq ifr;
struct mii_ioctl_data *mii;
@@ -874,8 +865,7 @@ static int bond_check_dev_link(struct bonding *bond,
BMSR_LSTATUS : 0;
/* Ethtool can't be used, fallback to MII ioctls. */
- ioctl = slave_ops->ndo_eth_ioctl;
- if (ioctl) {
+ if (slave_ops->ndo_eth_ioctl) {
/* TODO: set pointer to correct ioctl on a per team member
* bases to make this more efficient. that is, once
* we determine the correct ioctl, we will always
@@ -891,9 +881,10 @@ static int bond_check_dev_link(struct bonding *bond,
/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
mii = if_mii(&ifr);
- if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
+
+ if (dev_eth_ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
mii->reg_num = MII_BMSR;
- if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
+ if (dev_eth_ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
return mii->val_out & BMSR_LSTATUS;
}
}
@@ -1937,7 +1928,7 @@ void bond_xdp_set_features(struct net_device *bond_dev)
ASSERT_RTNL();
- if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) {
+ if (!bond_xdp_check(bond, BOND_MODE(bond)) || !bond_has_slaves(bond)) {
xdp_clear_features_flag(bond_dev);
return;
}
@@ -2551,7 +2542,7 @@ static int __bond_release_one(struct net_device *bond_dev,
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
- if (!all && (!bond->params.fail_over_mac ||
+ if (!all && (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
@@ -2648,10 +2639,13 @@ static int __bond_release_one(struct net_device *bond_dev,
dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
}
- if (unregister)
+ if (unregister) {
+ netdev_lock_ops(slave_dev);
__dev_set_mtu(slave_dev, slave->original_mtu);
- else
+ netdev_unlock_ops(slave_dev);
+ } else {
dev_set_mtu(slave_dev, slave->original_mtu);
+ }
if (!netif_is_bond_master(slave_dev))
slave_dev->priv_flags &= ~IFF_BONDING;
@@ -4217,7 +4211,7 @@ static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *
}
if (l34 && *ip_proto >= 0)
- fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
+ fk->ports.ports = skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
return true;
}
@@ -5699,7 +5693,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
ASSERT_RTNL();
- if (!bond_xdp_check(bond)) {
+ if (!bond_xdp_check(bond, BOND_MODE(bond))) {
BOND_NL_ERR(dev, extack,
"No native XDP support for the current bonding mode");
return -EOPNOTSUPP;
@@ -6028,7 +6022,7 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->lltx = true;
/* Don't allow bond devices to change network namespaces. */
- bond_dev->netns_local = true;
+ bond_dev->netns_immutable = true;
/* By default, we declare the bond to be fully
* VLAN hardware accelerated capable. Special
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 2a6a424806aa..ac5e402c34bc 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -564,10 +564,12 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return 0;
}
-static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int bond_newlink(struct net_device *bond_dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
int err;
err = bond_changelink(bond_dev, tb, data, extack);
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 327b6ecdc77e..91893c29b899 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -868,6 +868,9 @@ static bool bond_set_xfrm_features(struct bonding *bond)
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
+ if (bond->xdp_prog && !bond_xdp_check(bond, newval->value))
+ return -EOPNOTSUPP;
+
if (!bond_mode_uses_arp(newval->value)) {
if (bond->params.arp_interval) {
netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
@@ -1242,10 +1245,28 @@ static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave *sla
slave->dev->flags & IFF_MULTICAST;
}
+/**
+ * slave_set_ns_maddrs - add/del all NS mac addresses for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @add: add or remove all the NS mac addresses
+ *
+ * This function tries to add or delete all the NS mac addresses on the slave
+ *
+ * Note, the IPv6 NS target address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * arp_validate changes
+ * * enslaving, releasing new slaves
+ */
static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add)
{
struct in6_addr *targets = bond->params.ns_targets;
char slot_maddr[MAX_ADDR_LEN];
+ struct in6_addr mcaddr;
int i;
if (!slave_can_set_ns_maddr(bond, slave))
@@ -1255,7 +1276,8 @@ static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool
if (ipv6_addr_any(&targets[i]))
break;
- if (!ndisc_mc_map(&targets[i], slot_maddr, slave->dev, 0)) {
+ addrconf_addr_solict_mult(&targets[i], &mcaddr);
+ if (!ndisc_mc_map(&mcaddr, slot_maddr, slave->dev, 0)) {
if (add)
dev_mc_add(slave->dev, slot_maddr);
else
@@ -1278,23 +1300,43 @@ void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave)
slave_set_ns_maddrs(bond, slave, false);
}
+/**
+ * slave_set_ns_maddr - set new NS mac address for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @target: the new IPv6 target
+ * @slot: the old IPv6 target in the slot
+ *
+ * This function tries to replace the old mac address to new one on the slave.
+ *
+ * Note, the target/slot IPv6 address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * An IPv6 NS target is added or removed.
+ */
static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave,
struct in6_addr *target, struct in6_addr *slot)
{
- char target_maddr[MAX_ADDR_LEN], slot_maddr[MAX_ADDR_LEN];
+ char mac_addr[MAX_ADDR_LEN];
+ struct in6_addr mcast_addr;
if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave))
return;
- /* remove the previous maddr from slave */
+ /* remove the previous mac addr from slave */
+ addrconf_addr_solict_mult(slot, &mcast_addr);
if (!ipv6_addr_any(slot) &&
- !ndisc_mc_map(slot, slot_maddr, slave->dev, 0))
- dev_mc_del(slave->dev, slot_maddr);
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_del(slave->dev, mac_addr);
- /* add new maddr on slave if target is set */
+ /* add new mac addr on slave if target is set */
+ addrconf_addr_solict_mult(target, &mcast_addr);
if (!ipv6_addr_any(target) &&
- !ndisc_mc_map(target, target_maddr, slave->dev, 0))
- dev_mc_add(slave->dev, target_maddr);
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_add(slave->dev, mac_addr);
}
static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index ed3a589def6b..90ea3dc0fb10 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -126,15 +126,6 @@ static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
ser->rx_blob.data = ser->rx_data;
ser->rx_blob.size = size;
}
-
-static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
-{
- if (size > sizeof(ser->tx_data))
- size = sizeof(ser->tx_data);
- memcpy(ser->tx_data, data, size);
- ser->tx_blob.data = ser->tx_data;
- ser->tx_blob.size = size;
-}
#else
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
{
@@ -151,11 +142,6 @@ static inline void update_tty_status(struct ser_device *ser)
static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
{
}
-
-static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
-{
-}
-
#endif
static void ldisc_receive(struct tty_struct *tty, const u8 *data,
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 399844809bbe..19c86b94a40e 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -269,30 +269,22 @@ static int c_can_plat_probe(struct platform_device *pdev)
/* get the appropriate clk */
clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto exit;
- }
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
/* get the platform data */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto exit;
- }
+ if (irq < 0)
+ return irq;
addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
- if (IS_ERR(addr)) {
- ret = PTR_ERR(addr);
- goto exit;
- }
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
/* allocate the c_can device */
dev = alloc_c_can_dev(drvdata->msg_obj_num);
- if (!dev) {
- ret = -ENOMEM;
- goto exit;
- }
+ if (!dev)
+ return -ENOMEM;
priv = netdev_priv(dev);
switch (drvdata->id) {
@@ -324,33 +316,22 @@ static int c_can_plat_probe(struct platform_device *pdev)
/* Check if we need custom RAMINIT via syscon. Mostly for TI
* platforms. Only supported with DT boot.
*/
- if (np && of_property_read_bool(np, "syscon-raminit")) {
+ if (np && of_property_present(np, "syscon-raminit")) {
+ unsigned int args[2];
u32 id;
struct c_can_raminit *raminit = &priv->raminit_sys;
ret = -EINVAL;
- raminit->syscon = syscon_regmap_lookup_by_phandle(np,
- "syscon-raminit");
+ raminit->syscon = syscon_regmap_lookup_by_phandle_args(np,
+ "syscon-raminit",
+ 2, args);
if (IS_ERR(raminit->syscon)) {
- /* can fail with -EPROBE_DEFER */
ret = PTR_ERR(raminit->syscon);
- free_c_can_dev(dev);
- return ret;
- }
-
- if (of_property_read_u32_index(np, "syscon-raminit", 1,
- &raminit->reg)) {
- dev_err(&pdev->dev,
- "couldn't get the RAMINIT reg. offset!\n");
goto exit_free_device;
}
- if (of_property_read_u32_index(np, "syscon-raminit", 2,
- &id)) {
- dev_err(&pdev->dev,
- "couldn't get the CAN instance ID\n");
- goto exit_free_device;
- }
+ raminit->reg = args[0];
+ id = args[1];
if (id >= drvdata->raminit_num) {
dev_err(&pdev->dev,
@@ -396,8 +377,6 @@ exit_pm_runtime:
pm_runtime_disable(priv->device);
exit_free_device:
free_c_can_dev(dev);
-exit:
- dev_err(&pdev->dev, "probe failed\n");
return ret;
}
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 01aacdcda260..f1db9b7ffd4d 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -624,8 +624,8 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int can_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int can_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index ac1a860986df..6d80c341b26f 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -26,6 +26,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/can/platform/flexcan.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -386,6 +387,16 @@ static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
+static const struct flexcan_devtype_data nxp_s32g2_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_NR_IRQ_3 |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
+ FLEXCAN_QUIRK_SECONDARY_MB_IRQ,
+};
+
static const struct can_bittiming_const flexcan_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 4,
@@ -634,18 +645,22 @@ static void flexcan_clks_disable(const struct flexcan_priv *priv)
static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
{
- if (!priv->reg_xceiver)
- return 0;
+ if (priv->reg_xceiver)
+ return regulator_enable(priv->reg_xceiver);
+ else if (priv->transceiver)
+ return phy_power_on(priv->transceiver);
- return regulator_enable(priv->reg_xceiver);
+ return 0;
}
static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
{
- if (!priv->reg_xceiver)
- return 0;
+ if (priv->reg_xceiver)
+ return regulator_disable(priv->reg_xceiver);
+ else if (priv->transceiver)
+ return phy_power_off(priv->transceiver);
- return regulator_disable(priv->reg_xceiver);
+ return 0;
}
static int flexcan_chip_enable(struct flexcan_priv *priv)
@@ -1762,14 +1777,25 @@ static int flexcan_open(struct net_device *dev)
goto out_free_irq_boff;
}
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
+ err = request_irq(priv->irq_secondary_mb,
+ flexcan_irq, IRQF_SHARED, dev->name, dev);
+ if (err)
+ goto out_free_irq_err;
+ }
+
flexcan_chip_interrupts_enable(dev);
netif_start_queue(dev);
return 0;
+ out_free_irq_err:
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
+ free_irq(priv->irq_err, dev);
out_free_irq_boff:
- free_irq(priv->irq_boff, dev);
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
+ free_irq(priv->irq_boff, dev);
out_free_irq:
free_irq(dev->irq, dev);
out_can_rx_offload_disable:
@@ -1794,6 +1820,9 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
flexcan_chip_interrupts_disable(dev);
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ)
+ free_irq(priv->irq_secondary_mb, dev);
+
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
free_irq(priv->irq_err, dev);
free_irq(priv->irq_boff, dev);
@@ -2041,6 +2070,7 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
{ .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, },
+ { .compatible = "nxp,s32g2-flexcan", .data = &nxp_s32g2_devtype_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -2061,6 +2091,7 @@ static int flexcan_probe(struct platform_device *pdev)
struct net_device *dev;
struct flexcan_priv *priv;
struct regulator *reg_xceiver;
+ struct phy *transceiver;
struct clk *clk_ipg = NULL, *clk_per = NULL;
struct flexcan_regs __iomem *regs;
struct flexcan_platform_data *pdata;
@@ -2076,6 +2107,11 @@ static int flexcan_probe(struct platform_device *pdev)
else if (IS_ERR(reg_xceiver))
return PTR_ERR(reg_xceiver);
+ transceiver = devm_phy_optional_get(&pdev->dev, NULL);
+ if (IS_ERR(transceiver))
+ return dev_err_probe(&pdev->dev, PTR_ERR(transceiver),
+ "failed to get phy\n");
+
if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &clock_freq);
@@ -2173,6 +2209,10 @@ static int flexcan_probe(struct platform_device *pdev)
priv->clk_per = clk_per;
priv->clk_src = clk_src;
priv->reg_xceiver = reg_xceiver;
+ priv->transceiver = transceiver;
+
+ if (transceiver)
+ priv->can.bitrate_max = transceiver->attrs.max_link_rate;
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
priv->irq_boff = platform_get_irq(pdev, 1);
@@ -2187,6 +2227,14 @@ static int flexcan_probe(struct platform_device *pdev)
}
}
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
+ priv->irq_secondary_mb = platform_get_irq_byname(pdev, "mb-1");
+ if (priv->irq_secondary_mb < 0) {
+ err = priv->irq_secondary_mb;
+ goto failed_platform_get_irq;
+ }
+ }
+
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO;
@@ -2260,14 +2308,19 @@ static int __maybe_unused flexcan_suspend(struct device *device)
flexcan_chip_interrupts_disable(dev);
+ err = flexcan_transceiver_disable(priv);
+ if (err)
+ return err;
+
err = pinctrl_pm_select_sleep_state(device);
if (err)
return err;
}
netif_stop_queue(dev);
netif_device_detach(dev);
+
+ priv->can.state = CAN_STATE_SLEEPING;
}
- priv->can.state = CAN_STATE_SLEEPING;
return 0;
}
@@ -2278,7 +2331,6 @@ static int __maybe_unused flexcan_resume(struct device *device)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
@@ -2292,12 +2344,20 @@ static int __maybe_unused flexcan_resume(struct device *device)
if (err)
return err;
- err = flexcan_chip_start(dev);
+ err = flexcan_transceiver_enable(priv);
if (err)
return err;
+ err = flexcan_chip_start(dev);
+ if (err) {
+ flexcan_transceiver_disable(priv);
+ return err;
+ }
+
flexcan_chip_interrupts_enable(dev);
}
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
return 0;
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index 4933d8c7439e..16692a2502eb 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -70,6 +70,10 @@
#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
/* Setup stop mode with ATF SCMI protocol to support wakeup */
#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17)
+/* Device has two separate interrupt lines for two mailbox ranges, which
+ * both need to have an interrupt handler registered.
+ */
+#define FLEXCAN_QUIRK_SECONDARY_MB_IRQ BIT(18)
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
@@ -103,10 +107,12 @@ struct flexcan_priv {
struct clk *clk_per;
struct flexcan_devtype_data devtype_data;
struct regulator *reg_xceiver;
+ struct phy *transceiver;
struct flexcan_stop_mode stm;
int irq_boff;
int irq_err;
+ int irq_secondary_mb;
/* IPC handle when setup stop mode by System Controller firmware(scfw) */
struct imx_sc_ipc *sc_ipc_handle;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index d025d4163fd1..884a6352c42b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -2420,12 +2420,11 @@ int m_can_class_register(struct m_can_classdev *cdev)
if (!cdev->net->irq) {
dev_dbg(cdev->dev, "Polling enabled, initialize hrtimer");
- hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- cdev->hrtimer.function = &hrtimer_callback;
+ hrtimer_setup(&cdev->hrtimer, &hrtimer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
} else {
- hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cdev->hrtimer.function = m_can_coalescing_timer;
+ hrtimer_setup(&cdev->hrtimer, m_can_coalescing_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ret = m_can_dev_setup(cdev);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index df1a5d0b37b2..aa3df0d05b85 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -787,22 +787,14 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
}
static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
- u32 ch)
+ u32 ch, u32 rule_entry)
{
- u32 cfg;
- int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ u32 rule_entry_index = rule_entry % 16;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if (ch == 0) {
- start = 0; /* Channel 0 always starts from 0th rule */
- } else {
- /* Get number of Channel 0 rules and adjust */
- cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch));
- start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg);
- }
-
/* Enable write access to entry */
- page = RCANFD_GAFL_PAGENUM(start);
+ page = RCANFD_GAFL_PAGENUM(rule_entry);
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
(RCANFD_GAFLECTR_AFLPN(gpriv, page) |
RCANFD_GAFLECTR_AFLDAE));
@@ -818,13 +810,13 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
offset = RCANFD_C_GAFL_OFFSET;
/* Accept all IDs */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, rule_entry_index), 0);
/* IDE or RTR is not considered for matching */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, rule_entry_index), 0);
/* Any data length accepted */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, rule_entry_index), 0);
/* Place the msg in corresponding Rx FIFO entry */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start),
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, rule_entry_index),
RCANFD_GAFLP1_GAFLFDP(ridx));
/* Disable write access to page */
@@ -1851,6 +1843,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
unsigned long channels_mask = 0;
int err, ch_irq, g_irq;
int g_err_irq, g_recc_irq;
+ u32 rule_entry = 0;
bool fdmode = true; /* CAN FD only mode - default */
char name[9] = "channelX";
int i;
@@ -2023,7 +2016,8 @@ static int rcar_canfd_probe(struct platform_device *pdev)
rcar_canfd_configure_tx(gpriv, ch);
/* Configure receive rules */
- rcar_canfd_configure_afl_rules(gpriv, ch);
+ rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry);
+ rule_entry += RCANFD_CHANNEL_NUMRULES;
}
/* Configure common interrupts */
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
index d9a937ba126c..46201c126703 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -236,11 +236,6 @@ static void rkcanfd_chip_fifo_setup(struct rkcanfd_priv *priv)
{
u32 reg;
- /* TXE FIFO */
- reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
- reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE;
- rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg);
-
/* RX FIFO */
reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index 7209a831f0f2..c34f2067a989 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -541,11 +541,11 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
}
priv->rx_ring_num = i;
- hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
+ hrtimer_setup(&priv->rx_irq_timer, mcp251xfd_rx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
+ hrtimer_setup(&priv->tx_irq_timer, mcp251xfd_tx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
return 0;
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index b6f4de375df7..3ccac6781b98 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -43,6 +43,9 @@
#define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0
#define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30
+#define USB_CANNECTIVITY_VENDOR_ID 0x1209
+#define USB_CANNECTIVITY_PRODUCT_ID 0xca01
+
/* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts
* for timer overflow (will be after ~71 minutes)
*/
@@ -1546,6 +1549,8 @@ static const struct usb_device_id gs_usb_table[] = {
USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_XYLANTA_SAINT3_VENDOR_ID,
USB_XYLANTA_SAINT3_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CANNECTIVITY_VENDOR_ID,
+ USB_CANNECTIVITY_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 39a63b7313a4..07406daf7c88 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -186,7 +186,7 @@ union ucan_ctl_payload {
*/
struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version;
- u8 raw[128];
+ u8 fw_str[128];
} __packed;
enum {
@@ -424,18 +424,20 @@ static int ucan_ctrl_command_out(struct ucan_priv *up,
UCAN_USB_CTL_PIPE_TIMEOUT);
}
-static int ucan_device_request_in(struct ucan_priv *up,
- u8 cmd, u16 subcmd, u16 datalen)
+static void ucan_get_fw_str(struct ucan_priv *up, char *fw_str, size_t size)
{
- return usb_control_msg(up->udev,
- usb_rcvctrlpipe(up->udev, 0),
- cmd,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- subcmd,
- 0,
- up->ctl_msg_buffer,
- datalen,
- UCAN_USB_CTL_PIPE_TIMEOUT);
+ int ret;
+
+ ret = usb_control_msg(up->udev, usb_rcvctrlpipe(up->udev, 0),
+ UCAN_DEVICE_GET_FW_STRING,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ 0, 0, fw_str, size - 1,
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+ if (ret > 0)
+ fw_str[ret] = '\0';
+ else
+ strscpy(fw_str, "unknown", size);
}
/* Parse the device information structure reported by the device and
@@ -1314,7 +1316,6 @@ static int ucan_probe(struct usb_interface *intf,
u8 in_ep_addr;
u8 out_ep_addr;
union ucan_ctl_payload *ctl_msg_buffer;
- char firmware_str[sizeof(union ucan_ctl_payload) + 1];
udev = interface_to_usbdev(intf);
@@ -1527,17 +1528,6 @@ static int ucan_probe(struct usb_interface *intf,
*/
ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info);
- /* just print some device information - if available */
- ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
- sizeof(union ucan_ctl_payload));
- if (ret > 0) {
- /* copy string while ensuring zero termination */
- strscpy(firmware_str, up->ctl_msg_buffer->raw,
- sizeof(union ucan_ctl_payload) + 1);
- } else {
- strcpy(firmware_str, "unknown");
- }
-
/* device is compatible, reset it */
ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
if (ret < 0)
@@ -1555,7 +1545,10 @@ static int ucan_probe(struct usb_interface *intf,
/* initialisation complete, log device info */
netdev_info(up->netdev, "registered device\n");
- netdev_info(up->netdev, "firmware string: %s\n", firmware_str);
+ ucan_get_fw_str(up, up->ctl_msg_buffer->fw_str,
+ sizeof(up->ctl_msg_buffer->fw_str));
+ netdev_info(up->netdev, "firmware string: %s\n",
+ up->ctl_msg_buffer->fw_str);
/* success */
return 0;
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index ca8811941085..99a78a757167 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -172,10 +172,13 @@ static void vxcan_setup(struct net_device *dev)
/* forward declaration for rtnl_create_link() */
static struct rtnl_link_ops vxcan_link_ops;
-static int vxcan_newlink(struct net *peer_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int vxcan_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *peer_net = rtnl_newlink_peer_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct vxcan_priv *priv;
struct net_device *peer;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 2d10b4d6cfbb..bb9812b3b0e8 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -37,6 +37,7 @@ config NET_DSA_LANTIQ_GSWIP
config NET_DSA_MT7530
tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
select NET_DSA_TAG_MTK
+ select REGMAP_IRQ
imply NET_DSA_MT7530_MDIO
imply NET_DSA_MT7530_MMIO
help
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 79dc77835681..61d164ffb3ae 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -2410,6 +2410,19 @@ static const struct b53_chip_data b53_switch_chips[] = {
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
+ .chip_id = BCM53101_DEVICE_ID,
+ .dev_name = "BCM53101",
+ .vlans = 4096,
+ .enabled_ports = 0x11f,
+ .arl_bins = 4,
+ .arl_buckets = 512,
+ .vta_regs = B53_VTA_REGS,
+ .imp_port = 8,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
.chip_id = BCM53115_DEVICE_ID,
.dev_name = "BCM53115",
.vlans = 4096,
@@ -2789,6 +2802,7 @@ int b53_switch_detect(struct b53_device *dev)
return ret;
switch (id32) {
+ case BCM53101_DEVICE_ID:
case BCM53115_DEVICE_ID:
case BCM53125_DEVICE_ID:
case BCM53128_DEVICE_ID:
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index 31d070bf161a..43a3b37b731b 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -374,6 +374,7 @@ static void b53_mdio_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id b53_of_match[] = {
{ .compatible = "brcm,bcm5325" },
+ { .compatible = "brcm,bcm53101" },
{ .compatible = "brcm,bcm53115" },
{ .compatible = "brcm,bcm53125" },
{ .compatible = "brcm,bcm53128" },
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 9e9b5bc0c5d6..0166c37a13a7 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -66,6 +66,7 @@ enum {
BCM5395_DEVICE_ID = 0x95,
BCM5397_DEVICE_ID = 0x97,
BCM5398_DEVICE_ID = 0x98,
+ BCM53101_DEVICE_ID = 0x53101,
BCM53115_DEVICE_ID = 0x53115,
BCM53125_DEVICE_ID = 0x53125,
BCM53128_DEVICE_ID = 0x53128,
@@ -188,6 +189,7 @@ static inline int is531x5(struct b53_device *dev)
{
return dev->chip_id == BCM53115_DEVICE_ID ||
dev->chip_id == BCM53125_DEVICE_ID ||
+ dev->chip_id == BCM53101_DEVICE_ID ||
dev->chip_id == BCM53128_DEVICE_ID ||
dev->chip_id == BCM53134_DEVICE_ID;
}
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 4730982b6840..7460122f6abc 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -239,7 +239,6 @@ int b53_serdes_init(struct b53_device *dev, int port)
pcs->dev = dev;
pcs->lane = lane;
pcs->pcs.ops = &b53_pcs_ops;
- pcs->pcs.neg_mode = true;
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz8.c b/drivers/net/dsa/microchip/ksz8.c
index da7110d67558..be433b4e2b1c 100644
--- a/drivers/net/dsa/microchip/ksz8.c
+++ b/drivers/net/dsa/microchip/ksz8.c
@@ -1625,7 +1625,6 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
const u16 *regs = dev->info->regs;
struct dsa_switch *ds = dev->ds;
const u32 *masks;
- int queues;
u8 member;
masks = dev->info->masks;
@@ -1633,15 +1632,7 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
- /* For KSZ88x3 enable only one queue by default, otherwise we won't
- * be able to get rid of PCP prios on Port 2.
- */
- if (ksz_is_ksz88x3(dev))
- queues = 1;
- else
- queues = dev->info->num_tx_queues;
-
- ksz8_port_queue_split(dev, port, queues);
+ ksz8_port_queue_split(dev, port, dev->info->num_tx_queues);
/* replace priority */
ksz_port_cfg(dev, port, P_802_1P_CTRL,
diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c
index 30b4a6186e38..c3b501997ac9 100644
--- a/drivers/net/dsa/microchip/ksz_dcb.c
+++ b/drivers/net/dsa/microchip/ksz_dcb.c
@@ -10,7 +10,12 @@
#include "ksz_dcb.h"
#include "ksz8.h"
-#define KSZ8_REG_PORT_1_CTRL_0 0x10
+/* Port X Control 0 register.
+ * The datasheet specifies: Port 1 - 0x10, Port 2 - 0x20, Port 3 - 0x30.
+ * However, the driver uses get_port_addr(), which maps Port 1 to offset 0.
+ * Therefore, we define the base offset as 0x00 here to align with that logic.
+ */
+#define KSZ8_REG_PORT_1_CTRL_0 0x00
#define KSZ8_PORT_DIFFSERV_ENABLE BIT(6)
#define KSZ8_PORT_802_1P_ENABLE BIT(5)
#define KSZ8_PORT_BASED_PRIO_M GENMASK(4, 3)
@@ -182,49 +187,6 @@ int ksz_port_get_default_prio(struct dsa_switch *ds, int port)
}
/**
- * ksz88x3_port_set_default_prio_quirks - Quirks for default priority
- * @dev: Pointer to the KSZ switch device structure
- * @port: Port number for which to set the default priority
- * @prio: Priority value to set
- *
- * This function implements quirks for setting the default priority on KSZ88x3
- * devices. On Port 2, no other priority providers are working
- * except of PCP. So, configuring default priority on Port 2 is not possible.
- * On Port 1, it is not possible to configure port priority if PCP
- * apptrust on Port 2 is disabled. Since we disable multiple queues on the
- * switch to disable PCP on Port 2, we need to ensure that the default priority
- * configuration on Port 1 is in agreement with the configuration on Port 2.
- *
- * Return: 0 on success, or a negative error code on failure
- */
-static int ksz88x3_port_set_default_prio_quirks(struct ksz_device *dev, int port,
- u8 prio)
-{
- if (!prio)
- return 0;
-
- if (port == KSZ_PORT_2) {
- dev_err(dev->dev, "Port priority configuration is not working on Port 2\n");
- return -EINVAL;
- } else if (port == KSZ_PORT_1) {
- u8 port2_data;
- int ret;
-
- ret = ksz_pread8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0,
- &port2_data);
- if (ret)
- return ret;
-
- if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) {
- dev_err(dev->dev, "Not possible to configure port priority on Port 1 if PCP apptrust on Port 2 is disabled\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/**
* ksz_port_set_default_prio - Sets the default priority for a port on a KSZ
* switch
* @ds: Pointer to the DSA switch structure
@@ -239,18 +201,12 @@ static int ksz88x3_port_set_default_prio_quirks(struct ksz_device *dev, int port
int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio)
{
struct ksz_device *dev = ds->priv;
- int reg, shift, ret;
+ int reg, shift;
u8 mask;
if (prio >= dev->info->num_ipms)
return -EINVAL;
- if (ksz_is_ksz88x3(dev)) {
- ret = ksz88x3_port_set_default_prio_quirks(dev, port, prio);
- if (ret)
- return ret;
- }
-
ksz_get_default_port_prio_reg(dev, &reg, &mask, &shift);
return ksz_prmw8(dev, port, reg, mask, (prio << shift) & mask);
@@ -519,155 +475,6 @@ err_sel_not_vaild:
}
/**
- * ksz88x3_port1_apptrust_quirk - Quirk for apptrust configuration on Port 1
- * of KSZ88x3 devices
- * @dev: Pointer to the KSZ switch device structure
- * @port: Port number for which to set the apptrust selectors
- * @reg: Register address for the apptrust configuration
- * @port1_data: Data to set for the apptrust configuration
- *
- * This function implements a quirk for apptrust configuration on Port 1 of
- * KSZ88x3 devices. It ensures that apptrust configuration on Port 1 is not
- * possible if PCP apptrust on Port 2 is disabled. This is because the Port 2
- * seems to be permanently hardwired to PCP classification, so we need to
- * do Port 1 configuration always in agreement with Port 2 configuration.
- *
- * Return: 0 on success, or a negative error code on failure
- */
-static int ksz88x3_port1_apptrust_quirk(struct ksz_device *dev, int port,
- int reg, u8 port1_data)
-{
- u8 port2_data;
- int ret;
-
- /* If no apptrust is requested for Port 1, no need to care about Port 2
- * configuration.
- */
- if (!(port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE)))
- return 0;
-
- /* We got request to enable any apptrust on Port 1. To make it possible,
- * we need to enable multiple queues on the switch. If we enable
- * multiqueue support, PCP classification on Port 2 will be
- * automatically activated by HW.
- */
- ret = ksz_pread8(dev, KSZ_PORT_2, reg, &port2_data);
- if (ret)
- return ret;
-
- /* If KSZ8_PORT_802_1P_ENABLE bit is set on Port 2, the driver showed
- * the interest in PCP classification on Port 2. In this case,
- * multiqueue support is enabled and we can enable any apptrust on
- * Port 1.
- * If KSZ8_PORT_802_1P_ENABLE bit is not set on Port 2, the PCP
- * classification on Port 2 is still active, but the driver disabled
- * multiqueue support and made frame prioritization inactive for
- * all ports. In this case, we can't enable any apptrust on Port 1.
- */
- if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) {
- dev_err(dev->dev, "Not possible to enable any apptrust on Port 1 if PCP apptrust on Port 2 is disabled\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * ksz88x3_port2_apptrust_quirk - Quirk for apptrust configuration on Port 2
- * of KSZ88x3 devices
- * @dev: Pointer to the KSZ switch device structure
- * @port: Port number for which to set the apptrust selectors
- * @reg: Register address for the apptrust configuration
- * @port2_data: Data to set for the apptrust configuration
- *
- * This function implements a quirk for apptrust configuration on Port 2 of
- * KSZ88x3 devices. It ensures that DSCP apptrust is not working on Port 2 and
- * that it is not possible to disable PCP on Port 2. The only way to disable PCP
- * on Port 2 is to disable multiple queues on the switch.
- *
- * Return: 0 on success, or a negative error code on failure
- */
-static int ksz88x3_port2_apptrust_quirk(struct ksz_device *dev, int port,
- int reg, u8 port2_data)
-{
- struct dsa_switch *ds = dev->ds;
- u8 port1_data;
- int ret;
-
- /* First validate Port 2 configuration. DiffServ/DSCP is not working
- * on this port.
- */
- if (port2_data & KSZ8_PORT_DIFFSERV_ENABLE) {
- dev_err(dev->dev, "DSCP apptrust is not working on Port 2\n");
- return -EINVAL;
- }
-
- /* If PCP support is requested, we need to enable all queues on the
- * switch to make PCP priority working on Port 2.
- */
- if (port2_data & KSZ8_PORT_802_1P_ENABLE)
- return ksz8_all_queues_split(dev, dev->info->num_tx_queues);
-
- /* We got request to disable PCP priority on Port 2.
- * Now, we need to compare Port 2 configuration with Port 1
- * configuration.
- */
- ret = ksz_pread8(dev, KSZ_PORT_1, reg, &port1_data);
- if (ret)
- return ret;
-
- /* If Port 1 has any apptrust enabled, we can't disable multiple queues
- * on the switch, so we can't disable PCP on Port 2.
- */
- if (port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE)) {
- dev_err(dev->dev, "Not possible to disable PCP on Port 2 if any apptrust is enabled on Port 1\n");
- return -EINVAL;
- }
-
- /* Now we need to ensure that default priority on Port 1 is set to 0
- * otherwise we can't disable multiqueue support on the switch.
- */
- ret = ksz_port_get_default_prio(ds, KSZ_PORT_1);
- if (ret < 0) {
- return ret;
- } else if (ret) {
- dev_err(dev->dev, "Not possible to disable PCP on Port 2 if non zero default priority is set on Port 1\n");
- return -EINVAL;
- }
-
- /* Port 1 has no apptrust or default priority set and we got request to
- * disable PCP on Port 2. We can disable multiqueue support to disable
- * PCP on Port 2.
- */
- return ksz8_all_queues_split(dev, 1);
-}
-
-/**
- * ksz88x3_port_apptrust_quirk - Quirk for apptrust configuration on KSZ88x3
- * devices
- * @dev: Pointer to the KSZ switch device structure
- * @port: Port number for which to set the apptrust selectors
- * @reg: Register address for the apptrust configuration
- * @data: Data to set for the apptrust configuration
- *
- * This function implements a quirk for apptrust configuration on KSZ88x3
- * devices. It ensures that apptrust configuration on Port 1 and
- * Port 2 is done in agreement with each other.
- *
- * Return: 0 on success, or a negative error code on failure
- */
-static int ksz88x3_port_apptrust_quirk(struct ksz_device *dev, int port,
- int reg, u8 data)
-{
- if (port == KSZ_PORT_1)
- return ksz88x3_port1_apptrust_quirk(dev, port, reg, data);
- else if (port == KSZ_PORT_2)
- return ksz88x3_port2_apptrust_quirk(dev, port, reg, data);
-
- return 0;
-}
-
-/**
* ksz_port_set_apptrust - Sets the apptrust selectors for a port on a KSZ
* switch
* @ds: Pointer to the DSA switch structure
@@ -707,12 +514,6 @@ int ksz_port_set_apptrust(struct dsa_switch *ds, int port,
}
}
- if (ksz_is_ksz88x3(dev)) {
- ret = ksz88x3_port_apptrust_quirk(dev, port, reg, data);
- if (ret)
- return ret;
- }
-
return ksz_prmw8(dev, port, reg, mask, data);
}
@@ -799,21 +600,5 @@ int ksz_dcb_init_port(struct ksz_device *dev, int port)
*/
int ksz_dcb_init(struct ksz_device *dev)
{
- int ret;
-
- ret = ksz_init_global_dscp_map(dev);
- if (ret)
- return ret;
-
- /* Enable 802.1p priority control on Port 2 during switch initialization.
- * This setup is critical for the apptrust functionality on Port 1, which
- * relies on the priority settings of Port 2. Note: Port 1 is naturally
- * configured before Port 2, necessitating this configuration order.
- */
- if (ksz_is_ksz88x3(dev))
- return ksz_prmw8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0,
- KSZ8_PORT_802_1P_ENABLE,
- KSZ8_PORT_802_1P_ENABLE);
-
- return 0;
+ return ksz_init_global_dscp_map(dev);
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 5883eb93efb1..d70399bce5b9 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2050,131 +2050,6 @@ mt7530_setup_gpio(struct mt7530_priv *priv)
}
#endif /* CONFIG_GPIOLIB */
-static irqreturn_t
-mt7530_irq_thread_fn(int irq, void *dev_id)
-{
- struct mt7530_priv *priv = dev_id;
- bool handled = false;
- u32 val;
- int p;
-
- mt7530_mutex_lock(priv);
- val = mt7530_mii_read(priv, MT7530_SYS_INT_STS);
- mt7530_mii_write(priv, MT7530_SYS_INT_STS, val);
- mt7530_mutex_unlock(priv);
-
- for (p = 0; p < MT7530_NUM_PHYS; p++) {
- if (BIT(p) & val) {
- unsigned int irq;
-
- irq = irq_find_mapping(priv->irq_domain, p);
- handle_nested_irq(irq);
- handled = true;
- }
- }
-
- return IRQ_RETVAL(handled);
-}
-
-static void
-mt7530_irq_mask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable &= ~BIT(d->hwirq);
-}
-
-static void
-mt7530_irq_unmask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable |= BIT(d->hwirq);
-}
-
-static void
-mt7530_irq_bus_lock(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- mt7530_mutex_lock(priv);
-}
-
-static void
-mt7530_irq_bus_sync_unlock(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
- mt7530_mutex_unlock(priv);
-}
-
-static struct irq_chip mt7530_irq_chip = {
- .name = KBUILD_MODNAME,
- .irq_mask = mt7530_irq_mask,
- .irq_unmask = mt7530_irq_unmask,
- .irq_bus_lock = mt7530_irq_bus_lock,
- .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock,
-};
-
-static int
-mt7530_irq_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_data(irq, domain->host_data);
- irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq);
- irq_set_nested_thread(irq, true);
- irq_set_noprobe(irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops mt7530_irq_domain_ops = {
- .map = mt7530_irq_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-static void
-mt7988_irq_mask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable &= ~BIT(d->hwirq);
- mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
-}
-
-static void
-mt7988_irq_unmask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable |= BIT(d->hwirq);
- mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
-}
-
-static struct irq_chip mt7988_irq_chip = {
- .name = KBUILD_MODNAME,
- .irq_mask = mt7988_irq_mask,
- .irq_unmask = mt7988_irq_unmask,
-};
-
-static int
-mt7988_irq_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_data(irq, domain->host_data);
- irq_set_chip_and_handler(irq, &mt7988_irq_chip, handle_simple_irq);
- irq_set_nested_thread(irq, true);
- irq_set_noprobe(irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops mt7988_irq_domain_ops = {
- .map = mt7988_irq_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
static void
mt7530_setup_mdio_irq(struct mt7530_priv *priv)
{
@@ -2191,49 +2066,72 @@ mt7530_setup_mdio_irq(struct mt7530_priv *priv)
}
}
+static const struct regmap_irq mt7530_irqs[] = {
+ REGMAP_IRQ_REG_LINE(0, 32), /* PHY0_LC */
+ REGMAP_IRQ_REG_LINE(1, 32), /* PHY1_LC */
+ REGMAP_IRQ_REG_LINE(2, 32), /* PHY2_LC */
+ REGMAP_IRQ_REG_LINE(3, 32), /* PHY3_LC */
+ REGMAP_IRQ_REG_LINE(4, 32), /* PHY4_LC */
+ REGMAP_IRQ_REG_LINE(5, 32), /* PHY5_LC */
+ REGMAP_IRQ_REG_LINE(6, 32), /* PHY6_LC */
+ REGMAP_IRQ_REG_LINE(16, 32), /* MAC_PC */
+ REGMAP_IRQ_REG_LINE(17, 32), /* BMU */
+ REGMAP_IRQ_REG_LINE(18, 32), /* MIB */
+ REGMAP_IRQ_REG_LINE(22, 32), /* ARL_COL_FULL_COL */
+ REGMAP_IRQ_REG_LINE(23, 32), /* ARL_COL_FULL */
+ REGMAP_IRQ_REG_LINE(24, 32), /* ARL_TBL_ERR */
+ REGMAP_IRQ_REG_LINE(25, 32), /* ARL_PKT_QERR */
+ REGMAP_IRQ_REG_LINE(26, 32), /* ARL_EQ_ERR */
+ REGMAP_IRQ_REG_LINE(27, 32), /* ARL_PKT_BC */
+ REGMAP_IRQ_REG_LINE(28, 32), /* ARL_SEC_IG1X */
+ REGMAP_IRQ_REG_LINE(29, 32), /* ARL_SEC_VLAN */
+ REGMAP_IRQ_REG_LINE(30, 32), /* ARL_SEC_TAG */
+ REGMAP_IRQ_REG_LINE(31, 32), /* ACL */
+};
+
+static const struct regmap_irq_chip mt7530_regmap_irq_chip = {
+ .name = KBUILD_MODNAME,
+ .status_base = MT7530_SYS_INT_STS,
+ .unmask_base = MT7530_SYS_INT_EN,
+ .ack_base = MT7530_SYS_INT_STS,
+ .init_ack_masked = true,
+ .irqs = mt7530_irqs,
+ .num_irqs = ARRAY_SIZE(mt7530_irqs),
+ .num_regs = 1,
+};
+
static int
mt7530_setup_irq(struct mt7530_priv *priv)
{
+ struct regmap_irq_chip_data *irq_data;
struct device *dev = priv->dev;
struct device_node *np = dev->of_node;
- int ret;
+ int irq, ret;
if (!of_property_read_bool(np, "interrupt-controller")) {
dev_info(dev, "no interrupt support\n");
return 0;
}
- priv->irq = of_irq_get(np, 0);
- if (priv->irq <= 0) {
- dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq);
- return priv->irq ? : -EINVAL;
- }
-
- if (priv->id == ID_MT7988 || priv->id == ID_EN7581)
- priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
- &mt7988_irq_domain_ops,
- priv);
- else
- priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
- &mt7530_irq_domain_ops,
- priv);
-
- if (!priv->irq_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
+ irq = of_irq_get(np, 0);
+ if (irq <= 0) {
+ dev_err(dev, "failed to get parent IRQ: %d\n", irq);
+ return irq ? : -EINVAL;
}
/* This register must be set for MT7530 to properly fire interrupts */
if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
- ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
- IRQF_ONESHOT, KBUILD_MODNAME, priv);
- if (ret) {
- irq_domain_remove(priv->irq_domain);
- dev_err(dev, "failed to request IRQ: %d\n", ret);
+ ret = devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
+ priv->regmap, irq,
+ IRQF_ONESHOT,
+ 0, &mt7530_regmap_irq_chip,
+ &irq_data);
+ if (ret)
return ret;
- }
+
+ priv->irq_domain = regmap_irq_get_domain(irq_data);
return 0;
}
@@ -2253,26 +2151,6 @@ mt7530_free_mdio_irq(struct mt7530_priv *priv)
}
}
-static void
-mt7530_free_irq_common(struct mt7530_priv *priv)
-{
- free_irq(priv->irq, priv);
- irq_domain_remove(priv->irq_domain);
-}
-
-static void
-mt7530_free_irq(struct mt7530_priv *priv)
-{
- struct device_node *mnp, *np = priv->dev->of_node;
-
- mnp = of_get_child_by_name(np, "mdio");
- if (!mnp)
- mt7530_free_mdio_irq(priv);
- of_node_put(mnp);
-
- mt7530_free_irq_common(priv);
-}
-
static int
mt7530_setup_mdio(struct mt7530_priv *priv)
{
@@ -2307,13 +2185,13 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
bus->parent = dev;
bus->phy_mask = ~ds->phys_mii_mask;
- if (priv->irq && !mnp)
+ if (priv->irq_domain && !mnp)
mt7530_setup_mdio_irq(priv);
ret = devm_of_mdiobus_register(dev, bus, mnp);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
- if (priv->irq && !mnp)
+ if (priv->irq_domain && !mnp)
mt7530_free_mdio_irq(priv);
}
@@ -2586,6 +2464,11 @@ mt7531_setup_common(struct dsa_switch *ds)
/* Allow mirroring frames received on the local port (monitor port). */
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+ /* Enable Special Tag for rx frames */
+ if (priv->id == ID_EN7581)
+ mt7530_write(priv, MT753X_CPORT_SPTAG_CFG,
+ CPORT_SW2FE_STAG_EN | CPORT_FE2SW_STAG_EN);
+
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@@ -2953,28 +2836,61 @@ static void mt753x_phylink_mac_link_up(struct phylink_config *config,
mcr |= PMCR_FORCE_RX_FC_EN;
}
- if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
- switch (speed) {
- case SPEED_1000:
- case SPEED_2500:
- mcr |= PMCR_FORCE_EEE1G;
- break;
- case SPEED_100:
- mcr |= PMCR_FORCE_EEE100;
- break;
- }
- }
-
mt7530_set(priv, MT753X_PMCR_P(dp->index), mcr);
}
+static void mt753x_phylink_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
+
+ mt7530_clear(priv, MT753X_PMCR_P(dp->index),
+ PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100);
+}
+
+static int mt753x_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
+ u32 val;
+
+ /* If the timer is zero, then set LPI_MODE_EN, which allows the
+ * system to enter LPI mode immediately rather than waiting for
+ * the LPI threshold.
+ */
+ if (!timer)
+ val = LPI_MODE_EN;
+ else if (FIELD_FIT(LPI_THRESH_MASK, timer))
+ val = FIELD_PREP(LPI_THRESH_MASK, timer);
+ else
+ val = LPI_THRESH_MASK;
+
+ mt7530_rmw(priv, MT753X_PMEEECR_P(dp->index),
+ LPI_THRESH_MASK | LPI_MODE_EN, val);
+
+ mt7530_set(priv, MT753X_PMCR_P(dp->index),
+ PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100);
+
+ return 0;
+}
+
static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
+ u32 eeecr;
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
+ config->lpi_capabilities = MAC_100FD | MAC_1000FD | MAC_2500FD;
+
+ eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port));
+ /* tx_lpi_timer should be in microseconds. The time units for
+ * LPI threshold are unspecified.
+ */
+ config->lpi_timer_default = FIELD_GET(LPI_THRESH_MASK, eeecr);
+
priv->info->mac_port_get_caps(ds, port, config);
}
@@ -3059,24 +2975,21 @@ mt753x_setup(struct dsa_switch *ds)
return ret;
ret = mt7530_setup_mdio(priv);
- if (ret && priv->irq)
- mt7530_free_irq_common(priv);
if (ret)
return ret;
/* Initialise the PCS devices */
for (i = 0; i < priv->ds->num_ports; i++) {
priv->pcs[i].pcs.ops = priv->info->pcs_ops;
- priv->pcs[i].pcs.neg_mode = true;
priv->pcs[i].priv = priv;
priv->pcs[i].port = i;
}
- if (priv->create_sgmii) {
+ if (priv->create_sgmii)
ret = priv->create_sgmii(priv);
- if (ret && priv->irq)
- mt7530_free_irq(priv);
- }
+
+ if (ret && priv->irq_domain)
+ mt7530_free_mdio_irq(priv);
return ret;
}
@@ -3084,18 +2997,9 @@ mt753x_setup(struct dsa_switch *ds)
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
- struct mt7530_priv *priv = ds->priv;
- u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
-
if (e->tx_lpi_timer > 0xFFF)
return -EINVAL;
- set = LPI_THRESH_SET(e->tx_lpi_timer);
- if (!e->tx_lpi_enabled)
- /* Force LPI Mode without a delay */
- set |= LPI_MODE_EN;
- mt7530_rmw(priv, MT753X_PMEEECR_P(port), mask, set);
-
return 0;
}
@@ -3234,6 +3138,8 @@ static const struct phylink_mac_ops mt753x_phylink_mac_ops = {
.mac_config = mt753x_phylink_mac_config,
.mac_link_down = mt753x_phylink_mac_link_down,
.mac_link_up = mt753x_phylink_mac_link_up,
+ .mac_disable_tx_lpi = mt753x_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mt753x_phylink_mac_enable_tx_lpi,
};
const struct mt753x_info mt753x_table[] = {
@@ -3327,8 +3233,8 @@ EXPORT_SYMBOL_GPL(mt7530_probe_common);
void
mt7530_remove_common(struct mt7530_priv *priv)
{
- if (priv->irq)
- mt7530_free_irq(priv);
+ if (priv->irq_domain)
+ mt7530_free_mdio_irq(priv);
dsa_unregister_switch(priv->ds);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 448200689f49..c3ea403d7acf 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -627,6 +627,10 @@ enum mt7531_xtal_fsel {
#define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16)
#define MT7531_EXT_P_MDIO_12 (2 << 16)
+#define MT753X_CPORT_SPTAG_CFG 0x7c10
+#define CPORT_SW2FE_STAG_EN BIT(1)
+#define CPORT_FE2SW_STAG_EN BIT(0)
+
/* Registers for LED GPIO control (MT7530 only)
* All registers follow this pattern:
* [ 2: 0] port 0
@@ -815,9 +819,7 @@ struct mt753x_info {
* @p5_mode: Holding the current mode of port 5 of the MT7530 switch
* @p5_sgmii: Flag for distinguishing if port 5 of the MT7531 switch
* has got SGMII
- * @irq: IRQ number of the switch
* @irq_domain: IRQ domain of the switch irq_chip
- * @irq_enable: IRQ enable bits, synced to SYS_INT_EN
* @create_sgmii: Pointer to function creating SGMII PCS instance(s)
* @active_cpu_ports: Holding the active CPU ports
* @mdiodev: The pointer to the MDIO device structure
@@ -842,9 +844,7 @@ struct mt7530_priv {
struct mt753x_pcs pcs[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
struct mutex reg_mutex;
- int irq;
struct irq_domain *irq_domain;
- u32 irq_enable;
int (*create_sgmii)(struct mt7530_priv *priv);
u8 active_cpu_ports;
struct mdio_device *mdiodev;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 68d1e891752b..901929f96b38 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2208,13 +2208,11 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
return err;
}
-static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
- const unsigned char *addr, u16 vid,
- u8 state)
+static int mv88e6xxx_port_db_get(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid,
+ u16 *fid, struct mv88e6xxx_atu_entry *entry)
{
- struct mv88e6xxx_atu_entry entry;
struct mv88e6xxx_vtu_entry vlan;
- u16 fid;
int err;
/* Ports have two private address databases: one for when the port is
@@ -2225,7 +2223,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
* VLAN ID into the port's database used for VLAN-unaware bridging.
*/
if (vid == 0) {
- fid = MV88E6XXX_FID_BRIDGED;
+ *fid = MV88E6XXX_FID_BRIDGED;
} else {
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
@@ -2235,14 +2233,39 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (!vlan.valid)
return -EOPNOTSUPP;
- fid = vlan.fid;
+ *fid = vlan.fid;
}
- entry.state = 0;
- ether_addr_copy(entry.mac, addr);
- eth_addr_dec(entry.mac);
+ entry->state = 0;
+ ether_addr_copy(entry->mac, addr);
+ eth_addr_dec(entry->mac);
+
+ return mv88e6xxx_g1_atu_getnext(chip, *fid, entry);
+}
+
+static bool mv88e6xxx_port_db_find(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
+
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
+ if (err)
+ return false;
+
+ return entry.state && ether_addr_equal(entry.mac, addr);
+}
+
+static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
+ const unsigned char *addr, u16 vid,
+ u8 state)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
- err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry);
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
if (err)
return err;
@@ -2846,6 +2869,13 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, addr, vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
@@ -3644,6 +3674,21 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_stats_clear(chip);
}
+static int mv88e6320_setup_errata(struct mv88e6xxx_chip *chip)
+{
+ u16 dummy;
+ int err;
+
+ /* Workaround for erratum
+ * 3.3 RGMII timing may be out of spec when transmit delay is enabled
+ */
+ err = mv88e6xxx_port_hidden_write(chip, 0, 0xf, 0x7, 0xe000);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_hidden_read(chip, 0, 0xf, 0x7, &dummy);
+}
+
/* Check if the errata has already been applied. */
static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
{
@@ -5100,6 +5145,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
static const struct mv88e6xxx_ops mv88e6320_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .setup_errata = mv88e6320_setup_errata,
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
@@ -5115,6 +5161,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -5139,8 +5186,10 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
- .vtu_getnext = mv88e6185_g1_vtu_getnext,
- .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5149,6 +5198,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
static const struct mv88e6xxx_ops mv88e6321_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .setup_errata = mv88e6320_setup_errata,
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
@@ -5164,6 +5214,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -5187,8 +5238,10 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
- .vtu_getnext = mv88e6185_g1_vtu_getnext,
- .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5788,7 +5841,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
- .atu_move_port_mask = 0x1f,
+ .atu_move_port_mask = 0xf,
.g1_irqs = 9,
.g2_irqs = 10,
.stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
@@ -6206,9 +6259,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
- .num_internal_phys = 5,
+ .num_internal_phys = 2,
+ .internal_phys_offset = 3,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -6232,9 +6287,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
- .num_internal_phys = 5,
+ .num_internal_phys = 2,
+ .internal_phys_offset = 3,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -6244,6 +6301,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.g2_irqs = 10,
.stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
+ .pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
@@ -6266,7 +6324,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
- .atu_move_port_mask = 0x1f,
+ .atu_move_port_mask = 0xf,
.g1_irqs = 9,
.g2_irqs = 10,
.stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
@@ -6614,6 +6672,13 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, mdb->addr, mdb->vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
index 75ed1fa500a5..af7e06d265f7 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6185.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
@@ -138,7 +138,6 @@ static int mv88e6185_pcs_init(struct mv88e6xxx_chip *chip, int port)
mpcs->chip = chip;
mpcs->port = port;
mpcs->phylink_pcs.ops = &mv88e6185_phylink_pcs_ops;
- mpcs->phylink_pcs.neg_mode = true;
irq = mv88e6xxx_serdes_irq_mapping(chip, port);
if (irq) {
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6352.c b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
index 143fe21d1834..36993400837e 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6352.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
@@ -275,7 +275,6 @@ static struct marvell_c22_pcs *marvell_c22_pcs_alloc(struct device *dev,
mpcs->mdio.bus = bus;
mpcs->mdio.addr = addr;
mpcs->phylink_pcs.ops = &marvell_c22_pcs_ops;
- mpcs->phylink_pcs.neg_mode = true;
return mpcs;
}
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
index 59f63d6beec8..5db17c0b77f5 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-639x.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
@@ -565,9 +565,7 @@ static int mv88e6390_pcs_init(struct mv88e6xxx_chip *chip, int port)
return -ENOMEM;
mpcs->sgmii_pcs.ops = &mv88e639x_sgmii_pcs_ops;
- mpcs->sgmii_pcs.neg_mode = true;
mpcs->xg_pcs.ops = &mv88e6390_xg_pcs_ops;
- mpcs->xg_pcs.neg_mode = true;
if (chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6190X ||
chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6390X)
@@ -945,9 +943,7 @@ static int mv88e6393x_pcs_init(struct mv88e6xxx_chip *chip, int port)
return -ENOMEM;
mpcs->sgmii_pcs.ops = &mv88e6393x_sgmii_pcs_ops;
- mpcs->sgmii_pcs.neg_mode = true;
mpcs->xg_pcs.ops = &mv88e6393x_xg_pcs_ops;
- mpcs->xg_pcs.neg_mode = true;
mpcs->supports_5g = true;
err = mv88e6393x_erratum_4_6(mpcs);
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index e8cb4da15dbe..a36b8b07030e 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -1634,7 +1634,6 @@ static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
int port)
{
qpcs->pcs.ops = &qca8k_pcs_ops;
- qpcs->pcs.neg_mode = true;
/* We don't have interrupts for link changes, so we need to poll */
qpcs->pcs.poll = true;
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index 66974379334a..31ea8130a495 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -1248,18 +1248,16 @@ static int a5psw_probe(struct platform_device *pdev)
if (ret)
goto clk_disable;
- mdio = of_get_child_by_name(dev->of_node, "mdio");
- if (of_device_is_available(mdio)) {
+ mdio = of_get_available_child_by_name(dev->of_node, "mdio");
+ if (mdio) {
ret = a5psw_probe_mdio(a5psw, mdio);
+ of_node_put(mdio);
if (ret) {
- of_node_put(mdio);
dev_err(dev, "Failed to register MDIO: %d\n", ret);
goto hclk_disable;
}
}
- of_node_put(mdio);
-
ds = &a5psw->ds;
ds->dev = dev;
ds->num_ports = A5PSW_PORTS_NUM;
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
index 2ea64b1d026d..84d7d3f66bd0 100644
--- a/drivers/net/dsa/sja1105/sja1105_ethtool.c
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -571,6 +571,9 @@ void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
for (i = 0; i < max_ctr; i++) {
+ if (!strlen(sja1105_port_counters[i].name))
+ continue;
+
rc = sja1105_port_counter_read(priv, port, i, &data[k++]);
if (rc) {
dev_err(ds->dev,
@@ -596,8 +599,12 @@ void sja1105_get_strings(struct dsa_switch *ds, int port,
else
max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
- for (i = 0; i < max_ctr; i++)
+ for (i = 0; i < max_ctr; i++) {
+ if (!strlen(sja1105_port_counters[i].name))
+ continue;
+
ethtool_puts(&data, sja1105_port_counters[i].name);
+ }
}
int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset)
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 84b7169f2974..8d535c033cef 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -468,13 +468,10 @@ int sja1105_mdiobus_register(struct dsa_switch *ds)
if (rc)
return rc;
- mdio_node = of_get_child_by_name(switch_node, "mdios");
+ mdio_node = of_get_available_child_by_name(switch_node, "mdios");
if (!mdio_node)
return 0;
- if (!of_device_is_available(mdio_node))
- goto out_put_mdio_node;
-
if (regs->mdio_100base_tx != SJA1105_RSV_ADDR) {
rc = sja1105_mdiobus_base_tx_register(priv, mdio_node);
if (rc)
@@ -487,7 +484,6 @@ int sja1105_mdiobus_register(struct dsa_switch *ds)
goto err_free_base_tx_mdiobus;
}
-out_put_mdio_node:
of_node_put(mdio_node);
return 0;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index a1f4ca6ad888..08b45fdd1d24 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -61,17 +61,21 @@ enum sja1105_ptp_clk_mode {
int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
{
struct sja1105_private *priv = ds->priv;
+ unsigned long hwts_tx_en, hwts_rx_en;
struct hwtstamp_config config;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
+ hwts_tx_en = priv->hwts_tx_en;
+ hwts_rx_en = priv->hwts_rx_en;
+
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- priv->hwts_tx_en &= ~BIT(port);
+ hwts_tx_en &= ~BIT(port);
break;
case HWTSTAMP_TX_ON:
- priv->hwts_tx_en |= BIT(port);
+ hwts_tx_en |= BIT(port);
break;
default:
return -ERANGE;
@@ -79,15 +83,21 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- priv->hwts_rx_en &= ~BIT(port);
+ hwts_rx_en &= ~BIT(port);
break;
- default:
- priv->hwts_rx_en |= BIT(port);
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ hwts_rx_en |= BIT(port);
break;
+ default:
+ return -ERANGE;
}
if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
return -EFAULT;
+
+ priv->hwts_tx_en = hwts_tx_en;
+ priv->hwts_rx_en = hwts_rx_en;
+
return 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index 3d790f8c6f4d..ffece8a400a6 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -1917,8 +1917,10 @@ int sja1105_table_delete_entry(struct sja1105_table *table, int i)
if (i > table->entry_count)
return -ERANGE;
- memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
- (table->entry_count - i) * entry_size);
+ if (i + 1 < table->entry_count) {
+ memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
+ (table->entry_count - i - 1) * entry_size);
+ }
table->entry_count--;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 005d79975f3b..a4938c6a5ebb 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -38,6 +38,7 @@
#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
#include <linux/net_tstamp.h>
+#include <net/netdev_lock.h>
#include <net/rtnetlink.h>
#include <linux/u64_stats_sync.h>
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 977b42bc1e8c..f86d4557d8d7 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -20,6 +20,7 @@ source "drivers/net/ethernet/actions/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
source "drivers/net/ethernet/aeroflex/Kconfig"
source "drivers/net/ethernet/agere/Kconfig"
+source "drivers/net/ethernet/airoha/Kconfig"
source "drivers/net/ethernet/alacritech/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 99fa180dedb8..67182339469a 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
obj-$(CONFIG_NET_VENDOR_ADI) += adi/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
+obj-$(CONFIG_NET_VENDOR_AIROHA) += airoha/
obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index 115f48b3342c..0a08da799255 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1325,15 +1325,10 @@ static int owl_emac_mdio_init(struct net_device *netdev)
struct device_node *mdio_node;
int ret;
- mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ mdio_node = of_get_available_child_by_name(dev->of_node, "mdio");
if (!mdio_node)
return -ENODEV;
- if (!of_device_is_available(mdio_node)) {
- ret = -ENODEV;
- goto err_put_node;
- }
-
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 68fad5575fd4..30f9d271e595 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -1599,7 +1599,7 @@ static int adin1110_probe_netdevs(struct adin1110_priv *priv)
netdev->netdev_ops = &adin1110_netdev_ops;
netdev->ethtool_ops = &adin1110_ethtool_ops;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
if (IS_ERR(port_priv->phydev)) {
diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig
new file mode 100644
index 000000000000..1a4cf6a259f6
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_VENDOR_AIROHA
+ bool "Airoha devices"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ help
+ If you have a Airoha SoC with ethernet, say Y.
+
+if NET_VENDOR_AIROHA
+
+config NET_AIROHA_NPU
+ tristate "Airoha NPU support"
+ select WANT_DEV_COREDUMP
+ select REGMAP_MMIO
+ help
+ This driver supports Airoha Network Processor (NPU) available
+ on the Airoha Soc family.
+
+config NET_AIROHA
+ tristate "Airoha SoC Gigabit Ethernet support"
+ depends on NET_DSA || !NET_DSA
+ select NET_AIROHA_NPU
+ select PAGE_POOL
+ help
+ This driver supports the gigabit ethernet MACs in the
+ Airoha SoC family.
+
+endif #NET_VENDOR_AIROHA
diff --git a/drivers/net/ethernet/airoha/Makefile b/drivers/net/ethernet/airoha/Makefile
new file mode 100644
index 000000000000..94468053e34b
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Airoha for the Mediatek SoCs built-in ethernet macs
+#
+
+obj-$(CONFIG_NET_AIROHA) += airoha-eth.o
+airoha-eth-y := airoha_eth.o airoha_ppe.o
+airoha-eth-$(CONFIG_DEBUG_FS) += airoha_ppe_debugfs.o
+obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index 09f448f29124..c0a642568ac1 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -3,925 +3,30 @@
* Copyright (c) 2024 AIROHA Inc
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
*/
-#include <linux/etherdevice.h>
-#include <linux/iopoll.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include <linux/tcp.h>
#include <linux/u64_stats_sync.h>
-#include <net/dsa.h>
+#include <net/dst_metadata.h>
#include <net/page_pool/helpers.h>
#include <net/pkt_cls.h>
#include <uapi/linux/ppp_defs.h>
-#define AIROHA_MAX_NUM_GDM_PORTS 1
-#define AIROHA_MAX_NUM_QDMA 2
-#define AIROHA_MAX_NUM_RSTS 3
-#define AIROHA_MAX_NUM_XSI_RSTS 5
-#define AIROHA_MAX_MTU 2000
-#define AIROHA_MAX_PACKET_SIZE 2048
-#define AIROHA_NUM_QOS_CHANNELS 4
-#define AIROHA_NUM_QOS_QUEUES 8
-#define AIROHA_NUM_TX_RING 32
-#define AIROHA_NUM_RX_RING 32
-#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
- AIROHA_NUM_QOS_CHANNELS)
-#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
-#define AIROHA_FE_MC_MAX_VLAN_PORT 16
-#define AIROHA_NUM_TX_IRQ 2
-#define HW_DSCP_NUM 2048
-#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
-#define TX_DSCP_NUM 1024
-#define RX_DSCP_NUM(_n) \
- ((_n) == 2 ? 128 : \
- (_n) == 11 ? 128 : \
- (_n) == 15 ? 128 : \
- (_n) == 0 ? 1024 : 16)
-
-#define PSE_RSV_PAGES 128
-#define PSE_QUEUE_RSV_PAGES 64
-
-#define QDMA_METER_IDX(_n) ((_n) & 0xff)
-#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
-
-/* FE */
-#define PSE_BASE 0x0100
-#define CSR_IFC_BASE 0x0200
-#define CDM1_BASE 0x0400
-#define GDM1_BASE 0x0500
-#define PPE1_BASE 0x0c00
-
-#define CDM2_BASE 0x1400
-#define GDM2_BASE 0x1500
-
-#define GDM3_BASE 0x1100
-#define GDM4_BASE 0x2500
-
-#define GDM_BASE(_n) \
- ((_n) == 4 ? GDM4_BASE : \
- (_n) == 3 ? GDM3_BASE : \
- (_n) == 2 ? GDM2_BASE : GDM1_BASE)
-
-#define REG_FE_DMA_GLO_CFG 0x0000
-#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
-#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
-
-#define REG_FE_RST_GLO_CFG 0x0004
-#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
-#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
-#define FE_RST_CORE_MASK BIT(0)
-
-#define REG_FE_WAN_MAC_H 0x0030
-#define REG_FE_LAN_MAC_H 0x0040
-
-#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
-#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
-
-#define REG_FE_CDM1_OQ_MAP0 0x0050
-#define REG_FE_CDM1_OQ_MAP1 0x0054
-#define REG_FE_CDM1_OQ_MAP2 0x0058
-#define REG_FE_CDM1_OQ_MAP3 0x005c
-
-#define REG_FE_PCE_CFG 0x0070
-#define PCE_DPI_EN_MASK BIT(2)
-#define PCE_KA_EN_MASK BIT(1)
-#define PCE_MC_EN_MASK BIT(0)
-
-#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
-#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
-#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
-#define PSE_CFG_WR_EN_MASK BIT(8)
-#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
-
-#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
-#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
-
-#define PSE_FQ_CFG 0x008c
-#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
-
-#define REG_FE_PSE_BUF_SET 0x0090
-#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
-#define PSE_ALLRSV_MASK GENMASK(14, 0)
-
-#define REG_PSE_SHARE_USED_THD 0x0094
-#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
-#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
-
-#define REG_GDM_MISC_CFG 0x0148
-#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
-#define GDM2_CHN_VLD_MODE_MASK BIT(5)
-
-#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
-#define FE_IFC_EN_MASK BIT(0)
-
-#define REG_FE_VIP_PORT_EN 0x01f0
-#define REG_FE_IFC_PORT_EN 0x01f4
-
-#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
-#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
-
-#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
-#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
-#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
-
-#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
-#define PATN_FCPU_EN_MASK BIT(7)
-#define PATN_SWP_EN_MASK BIT(6)
-#define PATN_DP_EN_MASK BIT(5)
-#define PATN_SP_EN_MASK BIT(4)
-#define PATN_TYPE_MASK GENMASK(3, 1)
-#define PATN_EN_MASK BIT(0)
-
-#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
-#define PATN_DP_MASK GENMASK(31, 16)
-#define PATN_SP_MASK GENMASK(15, 0)
-
-#define REG_CDM1_VLAN_CTRL CDM1_BASE
-#define CDM1_VLAN_MASK GENMASK(31, 16)
-
-#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
-#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
-
-#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
-#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
- GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
-
-#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
-#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
-#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
-
-#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
-#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
- GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
-
-#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
-#define GDM_DROP_CRC_ERR BIT(23)
-#define GDM_IP4_CKSUM BIT(22)
-#define GDM_TCP_CKSUM BIT(21)
-#define GDM_UDP_CKSUM BIT(20)
-#define GDM_UCFQ_MASK GENMASK(15, 12)
-#define GDM_BCFQ_MASK GENMASK(11, 8)
-#define GDM_MCFQ_MASK GENMASK(7, 4)
-#define GDM_OCFQ_MASK GENMASK(3, 0)
-
-#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
-#define GDM_INGRESS_FC_EN_MASK BIT(1)
-#define GDM_STAG_EN_MASK BIT(0)
-
-#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
-#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
-#define GDM_LONG_LEN_MASK GENMASK(29, 16)
-
-#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
-#define FE_CPORT_PAD BIT(26)
-#define FE_CPORT_PORT_XFC_MASK BIT(25)
-#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
-
-#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
-#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
-#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
-
-#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
-#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
-#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
-#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
-#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
-#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
-
-#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
-#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
-#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
-#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
-#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
-#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
-#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
-#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
-#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
-#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
-#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
-#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
-#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
-#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
-#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
-
-#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
-#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
-#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
-#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
-#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
-#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
-#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
-#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
-#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
-#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
-#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
-#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
-#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
-#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
-#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
-#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
-#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
-#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
-#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
-#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
-#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
-#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
-
-#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250)
-#define PPE1_SRAM_TABLE_EN_MASK BIT(0)
-#define PPE1_SRAM_HASH1_EN_MASK BIT(8)
-#define PPE1_DRAM_TABLE_EN_MASK BIT(16)
-#define PPE1_DRAM_HASH1_EN_MASK BIT(24)
-
-#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
-#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
-#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
-#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
-
-#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
-#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
-#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
-#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
-#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
-#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
-#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
-#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
-#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
-#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
-#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
-#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
-#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
-#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
-#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
-#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
-
-#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
-#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
-#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
-
-#define REG_GDM3_FWD_CFG GDM3_BASE
-#define GDM3_PAD_EN_MASK BIT(28)
-
-#define REG_GDM4_FWD_CFG GDM4_BASE
-#define GDM4_PAD_EN_MASK BIT(28)
-#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
-
-#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
-#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
-#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
-#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
-
-#define REG_IP_FRAG_FP 0x2010
-#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
-#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
-#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
-#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
-
-#define REG_MC_VLAN_EN 0x2100
-#define MC_VLAN_EN_MASK BIT(0)
-
-#define REG_MC_VLAN_CFG 0x2104
-#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
-#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
-#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
-#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
-#define MC_VLAN_CFG_RW_MASK BIT(0)
-
-#define REG_MC_VLAN_DATA 0x2108
-
-#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
-
-/* QDMA */
-#define REG_QDMA_GLOBAL_CFG 0x0004
-#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
-#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
-#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
-#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
-#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
-#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
-#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
-#define GLOBAL_CFG_RESET_MASK BIT(23)
-#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
-#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
-#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
-#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
-#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
-#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
-#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
-#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
-#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
-#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
-#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
-#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
-#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
-#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
-#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
-
-#define REG_FWD_DSCP_BASE 0x0010
-#define REG_FWD_BUF_BASE 0x0014
-
-#define REG_HW_FWD_DSCP_CFG 0x0018
-#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
-#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
-#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
-
-#define REG_INT_STATUS(_n) \
- (((_n) == 4) ? 0x0730 : \
- ((_n) == 3) ? 0x0724 : \
- ((_n) == 2) ? 0x0720 : \
- ((_n) == 1) ? 0x0024 : 0x0020)
-
-#define REG_INT_ENABLE(_n) \
- (((_n) == 4) ? 0x0750 : \
- ((_n) == 3) ? 0x0744 : \
- ((_n) == 2) ? 0x0740 : \
- ((_n) == 1) ? 0x002c : 0x0028)
-
-/* QDMA_CSR_INT_ENABLE1 */
-#define RX15_COHERENT_INT_MASK BIT(31)
-#define RX14_COHERENT_INT_MASK BIT(30)
-#define RX13_COHERENT_INT_MASK BIT(29)
-#define RX12_COHERENT_INT_MASK BIT(28)
-#define RX11_COHERENT_INT_MASK BIT(27)
-#define RX10_COHERENT_INT_MASK BIT(26)
-#define RX9_COHERENT_INT_MASK BIT(25)
-#define RX8_COHERENT_INT_MASK BIT(24)
-#define RX7_COHERENT_INT_MASK BIT(23)
-#define RX6_COHERENT_INT_MASK BIT(22)
-#define RX5_COHERENT_INT_MASK BIT(21)
-#define RX4_COHERENT_INT_MASK BIT(20)
-#define RX3_COHERENT_INT_MASK BIT(19)
-#define RX2_COHERENT_INT_MASK BIT(18)
-#define RX1_COHERENT_INT_MASK BIT(17)
-#define RX0_COHERENT_INT_MASK BIT(16)
-#define TX7_COHERENT_INT_MASK BIT(15)
-#define TX6_COHERENT_INT_MASK BIT(14)
-#define TX5_COHERENT_INT_MASK BIT(13)
-#define TX4_COHERENT_INT_MASK BIT(12)
-#define TX3_COHERENT_INT_MASK BIT(11)
-#define TX2_COHERENT_INT_MASK BIT(10)
-#define TX1_COHERENT_INT_MASK BIT(9)
-#define TX0_COHERENT_INT_MASK BIT(8)
-#define CNT_OVER_FLOW_INT_MASK BIT(7)
-#define IRQ1_FULL_INT_MASK BIT(5)
-#define IRQ1_INT_MASK BIT(4)
-#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
-#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
-#define IRQ0_FULL_INT_MASK BIT(1)
-#define IRQ0_INT_MASK BIT(0)
-
-#define TX_DONE_INT_MASK(_n) \
- ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
- : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-
-#define INT_TX_MASK \
- (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
- IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-
-#define INT_IDX0_MASK \
- (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
- TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
- TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
- TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
- RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
- RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
- RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
- RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
- RX15_COHERENT_INT_MASK | INT_TX_MASK)
-
-/* QDMA_CSR_INT_ENABLE2 */
-#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
-#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
-#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
-#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
-#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
-#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
-#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
-#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
-#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
-#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
-#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
-#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
-#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
-#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
-#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
-#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
-#define RX15_DONE_INT_MASK BIT(15)
-#define RX14_DONE_INT_MASK BIT(14)
-#define RX13_DONE_INT_MASK BIT(13)
-#define RX12_DONE_INT_MASK BIT(12)
-#define RX11_DONE_INT_MASK BIT(11)
-#define RX10_DONE_INT_MASK BIT(10)
-#define RX9_DONE_INT_MASK BIT(9)
-#define RX8_DONE_INT_MASK BIT(8)
-#define RX7_DONE_INT_MASK BIT(7)
-#define RX6_DONE_INT_MASK BIT(6)
-#define RX5_DONE_INT_MASK BIT(5)
-#define RX4_DONE_INT_MASK BIT(4)
-#define RX3_DONE_INT_MASK BIT(3)
-#define RX2_DONE_INT_MASK BIT(2)
-#define RX1_DONE_INT_MASK BIT(1)
-#define RX0_DONE_INT_MASK BIT(0)
-
-#define RX_DONE_INT_MASK \
- (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
- RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
- RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
- RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
- RX15_DONE_INT_MASK)
-#define INT_IDX1_MASK \
- (RX_DONE_INT_MASK | \
- RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
- RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
- RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
- RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
- RX15_NO_CPU_DSCP_INT_MASK)
-
-/* QDMA_CSR_INT_ENABLE5 */
-#define TX31_COHERENT_INT_MASK BIT(31)
-#define TX30_COHERENT_INT_MASK BIT(30)
-#define TX29_COHERENT_INT_MASK BIT(29)
-#define TX28_COHERENT_INT_MASK BIT(28)
-#define TX27_COHERENT_INT_MASK BIT(27)
-#define TX26_COHERENT_INT_MASK BIT(26)
-#define TX25_COHERENT_INT_MASK BIT(25)
-#define TX24_COHERENT_INT_MASK BIT(24)
-#define TX23_COHERENT_INT_MASK BIT(23)
-#define TX22_COHERENT_INT_MASK BIT(22)
-#define TX21_COHERENT_INT_MASK BIT(21)
-#define TX20_COHERENT_INT_MASK BIT(20)
-#define TX19_COHERENT_INT_MASK BIT(19)
-#define TX18_COHERENT_INT_MASK BIT(18)
-#define TX17_COHERENT_INT_MASK BIT(17)
-#define TX16_COHERENT_INT_MASK BIT(16)
-#define TX15_COHERENT_INT_MASK BIT(15)
-#define TX14_COHERENT_INT_MASK BIT(14)
-#define TX13_COHERENT_INT_MASK BIT(13)
-#define TX12_COHERENT_INT_MASK BIT(12)
-#define TX11_COHERENT_INT_MASK BIT(11)
-#define TX10_COHERENT_INT_MASK BIT(10)
-#define TX9_COHERENT_INT_MASK BIT(9)
-#define TX8_COHERENT_INT_MASK BIT(8)
-
-#define INT_IDX4_MASK \
- (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
- TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
- TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
- TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
- TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
- TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
- TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
- TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
- TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
- TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
- TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
- TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
-
-#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
-
-#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
-#define TX_IRQ_THR_MASK GENMASK(27, 16)
-#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
-
-#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
-#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
-
-#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
-#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
-#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
-
-#define REG_TX_RING_BASE(_n) \
- (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
-
-#define REG_TX_RING_BLOCKING(_n) \
- (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
-
-#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
-#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
-#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
-#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
-#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
-
-#define REG_TX_CPU_IDX(_n) \
- (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
-
-#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
-
-#define REG_TX_DMA_IDX(_n) \
- (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
-
-#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
-
-#define IRQ_RING_IDX_MASK GENMASK(20, 16)
-#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
-
-#define REG_RX_RING_BASE(_n) \
- (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
-
-#define REG_RX_RING_SIZE(_n) \
- (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
-
-#define RX_RING_THR_MASK GENMASK(31, 16)
-#define RX_RING_SIZE_MASK GENMASK(15, 0)
-
-#define REG_RX_CPU_IDX(_n) \
- (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
-
-#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
-
-#define REG_RX_DMA_IDX(_n) \
- (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
-
-#define REG_RX_DELAY_INT_IDX(_n) \
- (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
-
-#define RX_DELAY_INT_MASK GENMASK(15, 0)
-
-#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
-
-#define REG_INGRESS_TRTCM_CFG 0x0070
-#define INGRESS_TRTCM_EN_MASK BIT(31)
-#define INGRESS_TRTCM_MODE_MASK BIT(30)
-#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
-
-#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
-#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
-
-#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
-#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
-
-#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
-#define CNTR_EN_MASK BIT(31)
-#define CNTR_ALL_CHAN_EN_MASK BIT(30)
-#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
-#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
-#define CNTR_SRC_MASK GENMASK(27, 24)
-#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
-#define CNTR_CHAN_MASK GENMASK(7, 3)
-#define CNTR_QUEUE_MASK GENMASK(2, 0)
-
-#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
-
-#define REG_LMGR_INIT_CFG 0x1000
-#define LMGR_INIT_START BIT(31)
-#define LMGR_SRAM_MODE_MASK BIT(30)
-#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
-#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
-
-#define REG_FWD_DSCP_LOW_THR 0x1004
-#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
-
-#define REG_EGRESS_RATE_METER_CFG 0x100c
-#define EGRESS_RATE_METER_EN_MASK BIT(31)
-#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
-#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
-#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
-
-#define REG_EGRESS_TRTCM_CFG 0x1010
-#define EGRESS_TRTCM_EN_MASK BIT(31)
-#define EGRESS_TRTCM_MODE_MASK BIT(30)
-#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
-
-#define TRTCM_PARAM_RW_MASK BIT(31)
-#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
-#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
-#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
-#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
-#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
-
-#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
-#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
-#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
-
-#define REG_TXWRR_MODE_CFG 0x1020
-#define TWRR_WEIGHT_SCALE_MASK BIT(31)
-#define TWRR_WEIGHT_BASE_MASK BIT(3)
-
-#define REG_TXWRR_WEIGHT_CFG 0x1024
-#define TWRR_RW_CMD_MASK BIT(31)
-#define TWRR_RW_CMD_DONE BIT(30)
-#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
-#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
-#define TWRR_VALUE_MASK GENMASK(15, 0)
-
-#define REG_PSE_BUF_USAGE_CFG 0x1028
-#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
-
-#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
-#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
-
-#define REG_GLB_TRTCM_CFG 0x1080
-#define GLB_TRTCM_EN_MASK BIT(31)
-#define GLB_TRTCM_MODE_MASK BIT(30)
-#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define GLB_FAST_TICK_MASK GENMASK(15, 0)
-
-#define REG_TXQ_CNGST_CFG 0x10a0
-#define TXQ_CNGST_DROP_EN BIT(31)
-#define TXQ_CNGST_DEI_DROP_EN BIT(30)
-
-#define REG_SLA_TRTCM_CFG 0x1150
-#define SLA_TRTCM_EN_MASK BIT(31)
-#define SLA_TRTCM_MODE_MASK BIT(30)
-#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define SLA_FAST_TICK_MASK GENMASK(15, 0)
-
-/* CTRL */
-#define QDMA_DESC_DONE_MASK BIT(31)
-#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
-#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
-#define QDMA_DESC_DEI_MASK BIT(25)
-#define QDMA_DESC_NO_DROP_MASK BIT(24)
-#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
-/* DATA */
-#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
-/* TX MSG0 */
-#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
-#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
-#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
-#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
-#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
-#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
-#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
-#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
-#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
-#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
-/* TX MSG1 */
-#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
-#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
-#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
-#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
-#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
-#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
-#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
-#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
-#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
-
-/* RX MSG1 */
-#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
-#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
-#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
-#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
-#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
-#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
-#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
-#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
-#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
-
-struct airoha_qdma_desc {
- __le32 rsv;
- __le32 ctrl;
- __le32 addr;
- __le32 data;
- __le32 msg0;
- __le32 msg1;
- __le32 msg2;
- __le32 msg3;
-};
-
-/* CTRL0 */
-#define QDMA_FWD_DESC_CTX_MASK BIT(31)
-#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
-#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
-#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
-/* CTRL1 */
-#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
-/* CTRL2 */
-#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
-
-struct airoha_qdma_fwd_desc {
- __le32 addr;
- __le32 ctrl0;
- __le32 ctrl1;
- __le32 ctrl2;
- __le32 msg0;
- __le32 msg1;
- __le32 rsv0;
- __le32 rsv1;
-};
-
-enum {
- QDMA_INT_REG_IDX0,
- QDMA_INT_REG_IDX1,
- QDMA_INT_REG_IDX2,
- QDMA_INT_REG_IDX3,
- QDMA_INT_REG_IDX4,
- QDMA_INT_REG_MAX
-};
-
-enum {
- XSI_PCIE0_PORT,
- XSI_PCIE1_PORT,
- XSI_USB_PORT,
- XSI_AE_PORT,
- XSI_ETH_PORT,
-};
-
-enum {
- XSI_PCIE0_VIP_PORT_MASK = BIT(22),
- XSI_PCIE1_VIP_PORT_MASK = BIT(23),
- XSI_USB_VIP_PORT_MASK = BIT(25),
- XSI_ETH_VIP_PORT_MASK = BIT(24),
-};
-
-enum {
- DEV_STATE_INITIALIZED,
-};
-
-enum {
- CDM_CRSN_QSEL_Q1 = 1,
- CDM_CRSN_QSEL_Q5 = 5,
- CDM_CRSN_QSEL_Q6 = 6,
- CDM_CRSN_QSEL_Q15 = 15,
-};
-
-enum {
- CRSN_08 = 0x8,
- CRSN_21 = 0x15, /* KA */
- CRSN_22 = 0x16, /* hit bind and force route to CPU */
- CRSN_24 = 0x18,
- CRSN_25 = 0x19,
-};
-
-enum {
- FE_PSE_PORT_CDM1,
- FE_PSE_PORT_GDM1,
- FE_PSE_PORT_GDM2,
- FE_PSE_PORT_GDM3,
- FE_PSE_PORT_PPE1,
- FE_PSE_PORT_CDM2,
- FE_PSE_PORT_CDM3,
- FE_PSE_PORT_CDM4,
- FE_PSE_PORT_PPE2,
- FE_PSE_PORT_GDM4,
- FE_PSE_PORT_CDM5,
- FE_PSE_PORT_DROP = 0xf,
-};
-
-enum tx_sched_mode {
- TC_SCH_WRR8,
- TC_SCH_SP,
- TC_SCH_WRR7,
- TC_SCH_WRR6,
- TC_SCH_WRR5,
- TC_SCH_WRR4,
- TC_SCH_WRR3,
- TC_SCH_WRR2,
-};
-
-enum trtcm_param_type {
- TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
- TRTCM_TOKEN_RATE_MODE,
- TRTCM_BUCKETSIZE_SHIFT_MODE,
- TRTCM_BUCKET_COUNTER_MODE,
-};
-
-enum trtcm_mode_type {
- TRTCM_COMMIT_MODE,
- TRTCM_PEAK_MODE,
-};
-
-enum trtcm_param {
- TRTCM_TICK_SEL = BIT(0),
- TRTCM_PKT_MODE = BIT(1),
- TRTCM_METER_MODE = BIT(2),
-};
-
-#define MIN_TOKEN_SIZE 4096
-#define MAX_TOKEN_SIZE_OFFSET 17
-#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
-#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
-
-struct airoha_queue_entry {
- union {
- void *buf;
- struct sk_buff *skb;
- };
- dma_addr_t dma_addr;
- u16 dma_len;
-};
-
-struct airoha_queue {
- struct airoha_qdma *qdma;
-
- /* protect concurrent queue accesses */
- spinlock_t lock;
- struct airoha_queue_entry *entry;
- struct airoha_qdma_desc *desc;
- u16 head;
- u16 tail;
-
- int queued;
- int ndesc;
- int free_thr;
- int buf_size;
-
- struct napi_struct napi;
- struct page_pool *page_pool;
-};
-
-struct airoha_tx_irq_queue {
- struct airoha_qdma *qdma;
-
- struct napi_struct napi;
-
- int size;
- u32 *q;
-};
-
-struct airoha_hw_stats {
- /* protect concurrent hw_stats accesses */
- spinlock_t lock;
- struct u64_stats_sync syncp;
-
- /* get_stats64 */
- u64 rx_ok_pkts;
- u64 tx_ok_pkts;
- u64 rx_ok_bytes;
- u64 tx_ok_bytes;
- u64 rx_multicast;
- u64 rx_errors;
- u64 rx_drops;
- u64 tx_drops;
- u64 rx_crc_error;
- u64 rx_over_errors;
- /* ethtool stats */
- u64 tx_broadcast;
- u64 tx_multicast;
- u64 tx_len[7];
- u64 rx_broadcast;
- u64 rx_fragment;
- u64 rx_jabber;
- u64 rx_len[7];
-};
-
-struct airoha_qdma {
- struct airoha_eth *eth;
- void __iomem *regs;
-
- /* protect concurrent irqmask accesses */
- spinlock_t irq_lock;
- u32 irqmask[QDMA_INT_REG_MAX];
- int irq;
+#include "airoha_regs.h"
+#include "airoha_eth.h"
- struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
-
- struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
- struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-
- /* descriptor and packet buffers for qdma hw forward */
- struct {
- void *desc;
- void *q;
- } hfwd;
-};
-
-struct airoha_gdm_port {
- struct airoha_qdma *qdma;
- struct net_device *dev;
- int id;
-
- struct airoha_hw_stats stats;
-
- DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
-
- /* qos stats counters */
- u64 cpu_tx_packets;
- u64 fwd_tx_packets;
-};
-
-struct airoha_eth {
- struct device *dev;
-
- unsigned long state;
- void __iomem *fe_regs;
-
- struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
- struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
-
- struct net_device *napi_dev;
-
- struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
- struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
-};
-
-static u32 airoha_rr(void __iomem *base, u32 offset)
+u32 airoha_rr(void __iomem *base, u32 offset)
{
return readl(base + offset);
}
-static void airoha_wr(void __iomem *base, u32 offset, u32 val)
+void airoha_wr(void __iomem *base, u32 offset, u32 val)
{
writel(val, base + offset);
}
-static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
{
val |= (airoha_rr(base, offset) & ~mask);
airoha_wr(base, offset, val);
@@ -929,28 +34,6 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
return val;
}
-#define airoha_fe_rr(eth, offset) \
- airoha_rr((eth)->fe_regs, (offset))
-#define airoha_fe_wr(eth, offset, val) \
- airoha_wr((eth)->fe_regs, (offset), (val))
-#define airoha_fe_rmw(eth, offset, mask, val) \
- airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
-#define airoha_fe_set(eth, offset, val) \
- airoha_rmw((eth)->fe_regs, (offset), 0, (val))
-#define airoha_fe_clear(eth, offset, val) \
- airoha_rmw((eth)->fe_regs, (offset), (val), 0)
-
-#define airoha_qdma_rr(qdma, offset) \
- airoha_rr((qdma)->regs, (offset))
-#define airoha_qdma_wr(qdma, offset, val) \
- airoha_wr((qdma)->regs, (offset), (val))
-#define airoha_qdma_rmw(qdma, offset, mask, val) \
- airoha_rmw((qdma)->regs, (offset), (mask), (val))
-#define airoha_qdma_set(qdma, offset, val) \
- airoha_rmw((qdma)->regs, (offset), 0, (val))
-#define airoha_qdma_clear(qdma, offset, val) \
- airoha_rmw((qdma)->regs, (offset), (val), 0)
-
static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
u32 clear, u32 set)
{
@@ -1021,30 +104,23 @@ static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
FIELD_PREP(GDM_UCFQ_MASK, val));
}
-static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
+static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
+ bool enable)
{
- u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
- u32 vip_port, cfg_addr;
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 vip_port;
- switch (port) {
- case XSI_PCIE0_PORT:
+ switch (port->id) {
+ case 3:
+ /* FIXME: handle XSI_PCIE1_PORT */
vip_port = XSI_PCIE0_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(3);
- break;
- case XSI_PCIE1_PORT:
- vip_port = XSI_PCIE1_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(3);
- break;
- case XSI_USB_PORT:
- vip_port = XSI_USB_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(4);
break;
- case XSI_ETH_PORT:
+ case 4:
+ /* FIXME: handle XSI_USB_PORT */
vip_port = XSI_ETH_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(4);
break;
default:
- return -EINVAL;
+ return 0;
}
if (enable) {
@@ -1055,51 +131,17 @@ static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
}
- airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
-
return 0;
}
-static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
-{
- const int port_list[] = {
- XSI_PCIE0_PORT,
- XSI_PCIE1_PORT,
- XSI_USB_PORT,
- XSI_ETH_PORT
- };
- int i, err;
-
- for (i = 0; i < ARRAY_SIZE(port_list); i++) {
- err = airoha_set_gdm_port(eth, port_list[i], enable);
- if (err)
- goto error;
- }
-
- return 0;
-
-error:
- for (i--; i >= 0; i--)
- airoha_set_gdm_port(eth, port_list[i], false);
-
- return err;
-}
-
static void airoha_fe_maccr_init(struct airoha_eth *eth)
{
int p;
- for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
+ for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
GDM_DROP_CRC_ERR);
- airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
- FE_PSE_PORT_CDM1);
- airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
- GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
- FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
- FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
- }
airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
@@ -1547,7 +589,7 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
switch (sport) {
- case 0x10 ... 0x13:
+ case 0x10 ... 0x14:
port = 0;
break;
case 0x2 ... 0x4:
@@ -1571,10 +613,12 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
while (done < budget) {
struct airoha_queue_entry *e = &q->entry[q->tail];
struct airoha_qdma_desc *desc = &q->desc[q->tail];
+ u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
dma_addr_t dma_addr = le32_to_cpu(desc->addr);
+ struct page *page = virt_to_head_page(e->buf);
u32 desc_ctrl = le32_to_cpu(desc->ctrl);
- struct sk_buff *skb;
- int len, p;
+ struct airoha_gdm_port *port;
+ int data_len, len, p;
if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
break;
@@ -1592,32 +636,74 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
dma_sync_single_for_cpu(eth->dev, dma_addr,
SKB_WITH_OVERHEAD(q->buf_size), dir);
+ data_len = q->skb ? q->buf_size
+ : SKB_WITH_OVERHEAD(q->buf_size);
+ if (data_len < len)
+ goto free_frag;
+
p = airoha_qdma_get_gdm_port(eth, desc);
- if (p < 0 || !eth->ports[p]) {
- page_pool_put_full_page(q->page_pool,
- virt_to_head_page(e->buf),
- true);
- continue;
+ if (p < 0 || !eth->ports[p])
+ goto free_frag;
+
+ port = eth->ports[p];
+ if (!q->skb) { /* first buffer */
+ q->skb = napi_build_skb(e->buf, q->buf_size);
+ if (!q->skb)
+ goto free_frag;
+
+ __skb_put(q->skb, len);
+ skb_mark_for_recycle(q->skb);
+ q->skb->dev = port->dev;
+ q->skb->protocol = eth_type_trans(q->skb, port->dev);
+ q->skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(q->skb, qid);
+ } else { /* scattered frame */
+ struct skb_shared_info *shinfo = skb_shinfo(q->skb);
+ int nr_frags = shinfo->nr_frags;
+
+ if (nr_frags >= ARRAY_SIZE(shinfo->frags))
+ goto free_frag;
+
+ skb_add_rx_frag(q->skb, nr_frags, page,
+ e->buf - page_address(page), len,
+ q->buf_size);
}
- skb = napi_build_skb(e->buf, q->buf_size);
- if (!skb) {
- page_pool_put_full_page(q->page_pool,
- virt_to_head_page(e->buf),
- true);
- break;
+ if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
+ continue;
+
+ if (netdev_uses_dsa(port->dev)) {
+ /* PPE module requires untagged packets to work
+ * properly and it provides DSA port index via the
+ * DMA descriptor. Report DSA tag to the DSA stack
+ * via skb dst info.
+ */
+ u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG,
+ le32_to_cpu(desc->msg0));
+
+ if (sptag < ARRAY_SIZE(port->dsa_meta) &&
+ port->dsa_meta[sptag])
+ skb_dst_set_noref(q->skb,
+ &port->dsa_meta[sptag]->dst);
}
- skb_reserve(skb, 2);
- __skb_put(skb, len);
- skb_mark_for_recycle(skb);
- skb->dev = eth->ports[p]->dev;
- skb->protocol = eth_type_trans(skb, skb->dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb_record_rx_queue(skb, qid);
- napi_gro_receive(&q->napi, skb);
+ hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
+ if (hash != AIROHA_RXD4_FOE_ENTRY)
+ skb_set_hash(q->skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+
+ reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
+ if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ airoha_ppe_check_skb(eth->ppe, hash);
done++;
+ napi_gro_receive(&q->napi, q->skb);
+ q->skb = NULL;
+ continue;
+free_frag:
+ page_pool_put_full_page(q->page_pool, page, true);
+ dev_kfree_skb(q->skb);
+ q->skb = NULL;
}
airoha_qdma_fill_rx_queue(q);
@@ -1692,6 +778,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
FIELD_PREP(RX_RING_THR_MASK, thr));
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
+ airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
airoha_qdma_fill_rx_queue(q);
@@ -2091,7 +1178,6 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
}
airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
- GLOBAL_CFG_RX_2B_OFFSET_MASK |
FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
GLOBAL_CFG_CPU_TXR_RR_MASK |
GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
@@ -2235,6 +1321,10 @@ static int airoha_hw_init(struct platform_device *pdev,
return err;
}
+ err = airoha_ppe_init(eth);
+ if (err)
+ return err;
+
set_bit(DEV_STATE_INITIALIZED, &eth->state);
return 0;
@@ -2441,12 +1531,12 @@ static void airoha_update_hw_stats(struct airoha_gdm_port *port)
static int airoha_dev_open(struct net_device *dev)
{
+ int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN;
struct airoha_gdm_port *port = netdev_priv(dev);
struct airoha_qdma *qdma = port->qdma;
- int err;
netif_tx_start_all_queues(dev);
- err = airoha_set_gdm_ports(qdma->eth, true);
+ err = airoha_set_vip_for_gdm_port(port, true);
if (err)
return err;
@@ -2457,9 +1547,15 @@ static int airoha_dev_open(struct net_device *dev)
airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
+ airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+
airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
GLOBAL_CFG_TX_DMA_EN_MASK |
GLOBAL_CFG_RX_DMA_EN_MASK);
+ atomic_inc(&qdma->users);
return 0;
}
@@ -2471,20 +1567,24 @@ static int airoha_dev_stop(struct net_device *dev)
int i, err;
netif_tx_disable(dev);
- err = airoha_set_gdm_ports(qdma->eth, false);
+ err = airoha_set_vip_for_gdm_port(port, false);
if (err)
return err;
- airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
- GLOBAL_CFG_TX_DMA_EN_MASK |
- GLOBAL_CFG_RX_DMA_EN_MASK);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++)
+ netdev_tx_reset_subqueue(dev, i);
- for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
- if (!qdma->q_tx[i].ndesc)
- continue;
+ if (atomic_dec_and_test(&qdma->users)) {
+ airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
- airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
- netdev_tx_reset_subqueue(dev, i);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
+ }
}
return 0;
@@ -2504,12 +1604,82 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
return 0;
}
+static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
+{
+ u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4;
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 chan = port->id == 3 ? 4 : 0;
+
+ /* Forward the traffic to the proper GDM port */
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port);
+ airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC);
+
+ /* Enable GDM2 loopback */
+ airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff);
+ airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff);
+ airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2),
+ LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
+ FIELD_PREP(LPBK_CHAN_MASK, chan) | LPBK_EN_MASK);
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU));
+
+ /* Disable VIP and IFC for GDM2 */
+ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2));
+ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2));
+
+ if (port->id == 3) {
+ /* FIXME: handle XSI_PCE1_PORT */
+ airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), 0x5500);
+ airoha_fe_rmw(eth, REG_FE_WAN_PORT,
+ WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
+ FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT));
+ airoha_fe_rmw(eth,
+ REG_SP_DFT_CPORT(HSGMII_LAN_PCIE0_SRCPORT >> 3),
+ SP_CPORT_PCIE0_MASK,
+ FIELD_PREP(SP_CPORT_PCIE0_MASK,
+ FE_PSE_PORT_CDM2));
+ } else {
+ /* FIXME: handle XSI_USB_PORT */
+ airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6,
+ FC_ID_OF_SRC_PORT24_MASK,
+ FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2));
+ airoha_fe_rmw(eth, REG_FE_WAN_PORT,
+ WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
+ FIELD_PREP(WAN0_MASK, HSGMII_LAN_ETH_SRCPORT));
+ airoha_fe_rmw(eth,
+ REG_SP_DFT_CPORT(HSGMII_LAN_ETH_SRCPORT >> 3),
+ SP_CPORT_ETH_MASK,
+ FIELD_PREP(SP_CPORT_ETH_MASK, FE_PSE_PORT_CDM2));
+ }
+}
+
static int airoha_dev_init(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 pse_port;
airoha_set_macaddr(port, dev->dev_addr);
+ switch (port->id) {
+ case 3:
+ case 4:
+ /* If GDM2 is active we can't enable loopback */
+ if (!eth->ports[1])
+ airhoha_set_gdm2_loopback(port);
+ fallthrough;
+ case 2:
+ pse_port = FE_PSE_PORT_PPE2;
+ break;
+ default:
+ pse_port = FE_PSE_PORT_PPE1;
+ break;
+ }
+
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port);
+
return 0;
}
@@ -2535,6 +1705,20 @@ static void airoha_dev_get_stats64(struct net_device *dev,
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
+static int airoha_dev_change_mtu(struct net_device *dev, int mtu)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 len = ETH_HLEN + mtu + ETH_FCS_LEN;
+
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id),
+ GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+ WRITE_ONCE(dev->mtu, mtu);
+
+ return 0;
+}
+
static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -2553,26 +1737,71 @@ static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
return queue < dev->num_tx_queues ? queue : 0;
}
+static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct ethhdr *ehdr;
+ u8 xmit_tpid;
+ u16 tag;
+
+ if (!netdev_uses_dsa(dev))
+ return 0;
+
+ if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ return 0;
+
+ if (skb_cow_head(skb, 0))
+ return 0;
+
+ ehdr = (struct ethhdr *)skb->data;
+ tag = be16_to_cpu(ehdr->h_proto);
+ xmit_tpid = tag >> 8;
+
+ switch (xmit_tpid) {
+ case MTK_HDR_XMIT_TAGGED_TPID_8100:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021Q);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8);
+ break;
+ case MTK_HDR_XMIT_TAGGED_TPID_88A8:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021AD);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8);
+ break;
+ default:
+ /* PPE module requires untagged DSA packets to work properly,
+ * so move DSA tag to DMA descriptor.
+ */
+ memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN);
+ __skb_pull(skb, MTK_HDR_LEN);
+ break;
+ }
+
+ return tag;
+#else
+ return 0;
+#endif
+}
+
static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct skb_shared_info *sinfo = skb_shinfo(skb);
struct airoha_gdm_port *port = netdev_priv(dev);
- u32 msg0, msg1, len = skb_headlen(skb);
struct airoha_qdma *qdma = port->qdma;
- u32 nr_frags = 1 + sinfo->nr_frags;
+ u32 nr_frags, tag, msg0, msg1, len;
struct netdev_queue *txq;
struct airoha_queue *q;
- void *data = skb->data;
+ void *data;
int i, qid;
u16 index;
u8 fport;
qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
+ tag = airoha_get_dsa_tag(skb, dev);
+
msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
qid / AIROHA_NUM_QOS_QUEUES) |
FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
- qid % AIROHA_NUM_QOS_QUEUES);
+ qid % AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag);
if (skb->ip_summed == CHECKSUM_PARTIAL)
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
@@ -2583,8 +1812,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
if (skb_cow_head(skb, 0))
goto error;
- if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- __be16 csum = cpu_to_be16(sinfo->gso_size);
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
+ SKB_GSO_TCPV6)) {
+ __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
tcp_hdr(skb)->check = (__force __sum16)csum;
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
@@ -2602,6 +1832,8 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
spin_lock_bh(&q->lock);
txq = netdev_get_tx_queue(dev, qid);
+ nr_frags = 1 + skb_shinfo(skb)->nr_frags;
+
if (q->queued + nr_frags > q->ndesc) {
/* not enough space in the queue */
netif_tx_stop_queue(txq);
@@ -2609,11 +1841,14 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ len = skb_headlen(skb);
+ data = skb->data;
index = q->head;
+
for (i = 0; i < nr_frags; i++) {
struct airoha_qdma_desc *desc = &q->desc[index];
struct airoha_queue_entry *e = &q->entry[index];
- skb_frag_t *frag = &sinfo->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t addr;
u32 val;
@@ -3038,6 +2273,47 @@ static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
return 0;
}
+static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
+ struct flow_block_offload *f)
+{
+ flow_setup_cb_t *cb = airoha_ppe_setup_tc_block_cb;
+ static LIST_HEAD(block_cb_list);
+ struct flow_block_cb *block_cb;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &block_cb_list;
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (block_cb) {
+ flow_block_cb_incref(block_cb);
+ return 0;
+ }
+ block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_incref(block_cb);
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (!block_cb)
+ return -ENOENT;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
{
struct net_device *dev = port->dev;
@@ -3121,6 +2397,9 @@ static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
return airoha_tc_setup_qdisc_ets(port, type_data);
case TC_SETUP_QDISC_HTB:
return airoha_tc_setup_qdisc_htb(port, type_data);
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
+ return airoha_dev_setup_tc_block(port, type_data);
default:
return -EOPNOTSUPP;
}
@@ -3130,6 +2409,7 @@ static const struct net_device_ops airoha_netdev_ops = {
.ndo_init = airoha_dev_init,
.ndo_open = airoha_dev_open,
.ndo_stop = airoha_dev_stop,
+ .ndo_change_mtu = airoha_dev_change_mtu,
.ndo_select_queue = airoha_dev_select_queue,
.ndo_start_xmit = airoha_dev_xmit,
.ndo_get_stats64 = airoha_dev_get_stats64,
@@ -3143,13 +2423,45 @@ static const struct ethtool_ops airoha_ethtool_ops = {
.get_rmon_stats = airoha_ethtool_get_rmon_stats,
};
-static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
+static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ struct metadata_dst *md_dst;
+
+ md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!md_dst)
+ return -ENOMEM;
+
+ md_dst->u.port_info.port_id = i;
+ port->dsa_meta[i] = md_dst;
+ }
+
+ return 0;
+}
+
+static void airoha_metadata_dst_free(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ if (!port->dsa_meta[i])
+ continue;
+
+ metadata_dst_free(port->dsa_meta[i]);
+ }
+}
+
+static int airoha_alloc_gdm_port(struct airoha_eth *eth,
+ struct device_node *np, int index)
{
const __be32 *id_ptr = of_get_property(np, "reg", NULL);
struct airoha_gdm_port *port;
struct airoha_qdma *qdma;
struct net_device *dev;
- int err, index;
+ int err, p;
u32 id;
if (!id_ptr) {
@@ -3158,14 +2470,14 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
}
id = be32_to_cpup(id_ptr);
- index = id - 1;
+ p = id - 1;
if (!id || id > ARRAY_SIZE(eth->ports)) {
dev_err(eth->dev, "invalid gdm port id: %d\n", id);
return -EINVAL;
}
- if (eth->ports[index]) {
+ if (eth->ports[p]) {
dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
return -EINVAL;
}
@@ -3188,6 +2500,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
NETIF_F_SG | NETIF_F_TSO |
NETIF_F_HW_TC;
dev->features |= dev->hw_features;
+ dev->vlan_features = dev->hw_features;
dev->dev.of_node = np;
dev->irq = qdma->irq;
SET_NETDEV_DEV(dev, eth->dev);
@@ -3213,7 +2526,11 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
port->qdma = qdma;
port->dev = dev;
port->id = id;
- eth->ports[index] = port;
+ eth->ports[p] = port;
+
+ err = airoha_metadata_dst_alloc(port);
+ if (err)
+ return err;
return register_netdev(dev);
}
@@ -3281,6 +2598,7 @@ static int airoha_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_qdma_start_napi(&eth->qdma[i]);
+ i = 0;
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_device_is_compatible(np, "airoha,eth-mac"))
continue;
@@ -3288,7 +2606,7 @@ static int airoha_probe(struct platform_device *pdev)
if (!of_device_is_available(np))
continue;
- err = airoha_alloc_gdm_port(eth, np);
+ err = airoha_alloc_gdm_port(eth, np, i++);
if (err) {
of_node_put(np);
goto error_napi_stop;
@@ -3307,8 +2625,10 @@ error_hw_cleanup:
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
- if (port && port->dev->reg_state == NETREG_REGISTERED)
+ if (port && port->dev->reg_state == NETREG_REGISTERED) {
unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
+ }
}
free_netdev(eth->napi_dev);
platform_set_drvdata(pdev, NULL);
@@ -3334,9 +2654,11 @@ static void airoha_remove(struct platform_device *pdev)
airoha_dev_stop(port->dev);
unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
}
free_netdev(eth->napi_dev);
+ airoha_ppe_deinit(eth);
platform_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
new file mode 100644
index 000000000000..60690b685710
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -0,0 +1,552 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_ETH_H
+#define AIROHA_ETH_H
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/reset.h>
+#include <net/dsa.h>
+
+#define AIROHA_MAX_NUM_GDM_PORTS 4
+#define AIROHA_MAX_NUM_QDMA 2
+#define AIROHA_MAX_DSA_PORTS 7
+#define AIROHA_MAX_NUM_RSTS 3
+#define AIROHA_MAX_NUM_XSI_RSTS 5
+#define AIROHA_MAX_MTU 9216
+#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_QOS_CHANNELS 4
+#define AIROHA_NUM_QOS_QUEUES 8
+#define AIROHA_NUM_TX_RING 32
+#define AIROHA_NUM_RX_RING 32
+#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
+ AIROHA_NUM_QOS_CHANNELS)
+#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
+#define AIROHA_FE_MC_MAX_VLAN_PORT 16
+#define AIROHA_NUM_TX_IRQ 2
+#define HW_DSCP_NUM 2048
+#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
+#define TX_DSCP_NUM 1024
+#define RX_DSCP_NUM(_n) \
+ ((_n) == 2 ? 128 : \
+ (_n) == 11 ? 128 : \
+ (_n) == 15 ? 128 : \
+ (_n) == 0 ? 1024 : 16)
+
+#define PSE_RSV_PAGES 128
+#define PSE_QUEUE_RSV_PAGES 64
+
+#define QDMA_METER_IDX(_n) ((_n) & 0xff)
+#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
+
+#define PPE_NUM 2
+#define PPE1_SRAM_NUM_ENTRIES (8 * 1024)
+#define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES)
+#define PPE_DRAM_NUM_ENTRIES (16 * 1024)
+#define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES)
+#define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1)
+#define PPE_ENTRY_SIZE 80
+#define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10))
+
+#define MTK_HDR_LEN 4
+#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
+#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2
+
+enum {
+ QDMA_INT_REG_IDX0,
+ QDMA_INT_REG_IDX1,
+ QDMA_INT_REG_IDX2,
+ QDMA_INT_REG_IDX3,
+ QDMA_INT_REG_IDX4,
+ QDMA_INT_REG_MAX
+};
+
+enum {
+ HSGMII_LAN_PCIE0_SRCPORT = 0x16,
+ HSGMII_LAN_PCIE1_SRCPORT,
+ HSGMII_LAN_ETH_SRCPORT,
+ HSGMII_LAN_USB_SRCPORT,
+};
+
+enum {
+ XSI_PCIE0_VIP_PORT_MASK = BIT(22),
+ XSI_PCIE1_VIP_PORT_MASK = BIT(23),
+ XSI_USB_VIP_PORT_MASK = BIT(25),
+ XSI_ETH_VIP_PORT_MASK = BIT(24),
+};
+
+enum {
+ DEV_STATE_INITIALIZED,
+};
+
+enum {
+ CDM_CRSN_QSEL_Q1 = 1,
+ CDM_CRSN_QSEL_Q5 = 5,
+ CDM_CRSN_QSEL_Q6 = 6,
+ CDM_CRSN_QSEL_Q15 = 15,
+};
+
+enum {
+ CRSN_08 = 0x8,
+ CRSN_21 = 0x15, /* KA */
+ CRSN_22 = 0x16, /* hit bind and force route to CPU */
+ CRSN_24 = 0x18,
+ CRSN_25 = 0x19,
+};
+
+enum {
+ FE_PSE_PORT_CDM1,
+ FE_PSE_PORT_GDM1,
+ FE_PSE_PORT_GDM2,
+ FE_PSE_PORT_GDM3,
+ FE_PSE_PORT_PPE1,
+ FE_PSE_PORT_CDM2,
+ FE_PSE_PORT_CDM3,
+ FE_PSE_PORT_CDM4,
+ FE_PSE_PORT_PPE2,
+ FE_PSE_PORT_GDM4,
+ FE_PSE_PORT_CDM5,
+ FE_PSE_PORT_DROP = 0xf,
+};
+
+enum tx_sched_mode {
+ TC_SCH_WRR8,
+ TC_SCH_SP,
+ TC_SCH_WRR7,
+ TC_SCH_WRR6,
+ TC_SCH_WRR5,
+ TC_SCH_WRR4,
+ TC_SCH_WRR3,
+ TC_SCH_WRR2,
+};
+
+enum trtcm_param_type {
+ TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
+ TRTCM_TOKEN_RATE_MODE,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ TRTCM_BUCKET_COUNTER_MODE,
+};
+
+enum trtcm_mode_type {
+ TRTCM_COMMIT_MODE,
+ TRTCM_PEAK_MODE,
+};
+
+enum trtcm_param {
+ TRTCM_TICK_SEL = BIT(0),
+ TRTCM_PKT_MODE = BIT(1),
+ TRTCM_METER_MODE = BIT(2),
+};
+
+#define MIN_TOKEN_SIZE 4096
+#define MAX_TOKEN_SIZE_OFFSET 17
+#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
+#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
+
+struct airoha_queue_entry {
+ union {
+ void *buf;
+ struct sk_buff *skb;
+ };
+ dma_addr_t dma_addr;
+ u16 dma_len;
+};
+
+struct airoha_queue {
+ struct airoha_qdma *qdma;
+
+ /* protect concurrent queue accesses */
+ spinlock_t lock;
+ struct airoha_queue_entry *entry;
+ struct airoha_qdma_desc *desc;
+ u16 head;
+ u16 tail;
+
+ int queued;
+ int ndesc;
+ int free_thr;
+ int buf_size;
+
+ struct napi_struct napi;
+ struct page_pool *page_pool;
+ struct sk_buff *skb;
+};
+
+struct airoha_tx_irq_queue {
+ struct airoha_qdma *qdma;
+
+ struct napi_struct napi;
+
+ int size;
+ u32 *q;
+};
+
+struct airoha_hw_stats {
+ /* protect concurrent hw_stats accesses */
+ spinlock_t lock;
+ struct u64_stats_sync syncp;
+
+ /* get_stats64 */
+ u64 rx_ok_pkts;
+ u64 tx_ok_pkts;
+ u64 rx_ok_bytes;
+ u64 tx_ok_bytes;
+ u64 rx_multicast;
+ u64 rx_errors;
+ u64 rx_drops;
+ u64 tx_drops;
+ u64 rx_crc_error;
+ u64 rx_over_errors;
+ /* ethtool stats */
+ u64 tx_broadcast;
+ u64 tx_multicast;
+ u64 tx_len[7];
+ u64 rx_broadcast;
+ u64 rx_fragment;
+ u64 rx_jabber;
+ u64 rx_len[7];
+};
+
+enum {
+ PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
+};
+
+enum {
+ AIROHA_FOE_STATE_INVALID,
+ AIROHA_FOE_STATE_UNBIND,
+ AIROHA_FOE_STATE_BIND,
+ AIROHA_FOE_STATE_FIN
+};
+
+enum {
+ PPE_PKT_TYPE_IPV4_HNAPT = 0,
+ PPE_PKT_TYPE_IPV4_ROUTE = 1,
+ PPE_PKT_TYPE_BRIDGE = 2,
+ PPE_PKT_TYPE_IPV4_DSLITE = 3,
+ PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
+ PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
+ PPE_PKT_TYPE_IPV6_6RD = 7,
+};
+
+#define AIROHA_FOE_MAC_SMAC_ID GENMASK(20, 16)
+#define AIROHA_FOE_MAC_PPPOE_ID GENMASK(15, 0)
+
+struct airoha_foe_mac_info_common {
+ u16 vlan1;
+ u16 etype;
+
+ u32 dest_mac_hi;
+
+ u16 vlan2;
+ u16 dest_mac_lo;
+
+ u32 src_mac_hi;
+};
+
+struct airoha_foe_mac_info {
+ struct airoha_foe_mac_info_common common;
+
+ u16 pppoe_id;
+ u16 src_mac_lo;
+};
+
+#define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24)
+#define AIROHA_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
+#define AIROHA_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
+
+#define AIROHA_FOE_IB1_BIND_STATIC BIT(31)
+#define AIROHA_FOE_IB1_BIND_UDP BIT(30)
+#define AIROHA_FOE_IB1_BIND_STATE GENMASK(29, 28)
+#define AIROHA_FOE_IB1_BIND_PACKET_TYPE GENMASK(27, 25)
+#define AIROHA_FOE_IB1_BIND_TTL BIT(24)
+#define AIROHA_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
+#define AIROHA_FOE_IB1_BIND_PPPOE BIT(22)
+#define AIROHA_FOE_IB1_BIND_VPM GENMASK(21, 20)
+#define AIROHA_FOE_IB1_BIND_VLAN_LAYER GENMASK(19, 16)
+#define AIROHA_FOE_IB1_BIND_KEEPALIVE BIT(15)
+#define AIROHA_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
+
+#define AIROHA_FOE_IB2_DSCP GENMASK(31, 24)
+#define AIROHA_FOE_IB2_PORT_AG GENMASK(23, 13)
+#define AIROHA_FOE_IB2_PCP BIT(12)
+#define AIROHA_FOE_IB2_MULTICAST BIT(11)
+#define AIROHA_FOE_IB2_FAST_PATH BIT(10)
+#define AIROHA_FOE_IB2_PSE_QOS BIT(9)
+#define AIROHA_FOE_IB2_PSE_PORT GENMASK(8, 5)
+#define AIROHA_FOE_IB2_NBQ GENMASK(4, 0)
+
+#define AIROHA_FOE_ACTDP GENMASK(31, 24)
+#define AIROHA_FOE_SHAPER_ID GENMASK(23, 16)
+#define AIROHA_FOE_CHANNEL GENMASK(15, 11)
+#define AIROHA_FOE_QID GENMASK(10, 8)
+#define AIROHA_FOE_DPI BIT(7)
+#define AIROHA_FOE_TUNNEL BIT(6)
+#define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0)
+
+struct airoha_foe_bridge {
+ u32 dest_mac_hi;
+
+ u16 src_mac_hi;
+ u16 dest_mac_lo;
+
+ u32 src_mac_lo;
+
+ u32 ib2;
+
+ u32 rsv[5];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_tuple {
+ u32 src_ip;
+ u32 dest_ip;
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 _pad[3]; /* fill with 0xa5a5a5 */
+ };
+ u32 ports;
+ };
+};
+
+struct airoha_foe_ipv4 {
+ struct airoha_foe_ipv4_tuple orig_tuple;
+
+ u32 ib2;
+
+ struct airoha_foe_ipv4_tuple new_tuple;
+
+ u32 rsv[2];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_dslite {
+ struct airoha_foe_ipv4_tuple ip4;
+
+ u32 ib2;
+
+ u8 flow_label[3];
+ u8 priority;
+
+ u32 rsv[4];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv6 {
+ u32 src_ip[4];
+ u32 dest_ip[4];
+
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 pad[3];
+ };
+ u32 ports;
+ };
+
+ u32 data;
+
+ u32 ib2;
+
+ struct airoha_foe_mac_info_common l2;
+};
+
+struct airoha_foe_entry {
+ union {
+ struct {
+ u32 ib1;
+ union {
+ struct airoha_foe_bridge bridge;
+ struct airoha_foe_ipv4 ipv4;
+ struct airoha_foe_ipv4_dslite dslite;
+ struct airoha_foe_ipv6 ipv6;
+ DECLARE_FLEX_ARRAY(u32, d);
+ };
+ };
+ u8 data[PPE_ENTRY_SIZE];
+ };
+};
+
+struct airoha_flow_data {
+ struct ethhdr eth;
+
+ union {
+ struct {
+ __be32 src_addr;
+ __be32 dst_addr;
+ } v4;
+
+ struct {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ } v6;
+ };
+
+ __be16 src_port;
+ __be16 dst_port;
+
+ struct {
+ struct {
+ u16 id;
+ __be16 proto;
+ } hdr[2];
+ u8 num;
+ } vlan;
+ struct {
+ u16 sid;
+ u8 num;
+ } pppoe;
+};
+
+struct airoha_flow_table_entry {
+ struct hlist_node list;
+
+ struct airoha_foe_entry data;
+ u32 hash;
+
+ struct rhash_head node;
+ unsigned long cookie;
+};
+
+struct airoha_qdma {
+ struct airoha_eth *eth;
+ void __iomem *regs;
+
+ /* protect concurrent irqmask accesses */
+ spinlock_t irq_lock;
+ u32 irqmask[QDMA_INT_REG_MAX];
+ int irq;
+
+ atomic_t users;
+
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+
+ /* descriptor and packet buffers for qdma hw forward */
+ struct {
+ void *desc;
+ void *q;
+ } hfwd;
+};
+
+struct airoha_gdm_port {
+ struct airoha_qdma *qdma;
+ struct net_device *dev;
+ int id;
+
+ struct airoha_hw_stats stats;
+
+ DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
+
+ /* qos stats counters */
+ u64 cpu_tx_packets;
+ u64 fwd_tx_packets;
+
+ struct metadata_dst *dsa_meta[AIROHA_MAX_DSA_PORTS];
+};
+
+#define AIROHA_RXD4_PPE_CPU_REASON GENMASK(20, 16)
+#define AIROHA_RXD4_FOE_ENTRY GENMASK(15, 0)
+
+struct airoha_ppe {
+ struct airoha_eth *eth;
+
+ void *foe;
+ dma_addr_t foe_dma;
+
+ struct hlist_head *foe_flow;
+ u16 foe_check_time[PPE_NUM_ENTRIES];
+
+ struct dentry *debugfs_dir;
+};
+
+struct airoha_eth {
+ struct device *dev;
+
+ unsigned long state;
+ void __iomem *fe_regs;
+
+ struct airoha_npu __rcu *npu;
+
+ struct airoha_ppe *ppe;
+ struct rhashtable flow_table;
+
+ struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
+ struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
+
+ struct net_device *napi_dev;
+
+ struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
+};
+
+u32 airoha_rr(void __iomem *base, u32 offset);
+void airoha_wr(void __iomem *base, u32 offset, u32 val);
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val);
+
+#define airoha_fe_rr(eth, offset) \
+ airoha_rr((eth)->fe_regs, (offset))
+#define airoha_fe_wr(eth, offset, val) \
+ airoha_wr((eth)->fe_regs, (offset), (val))
+#define airoha_fe_rmw(eth, offset, mask, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
+#define airoha_fe_set(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), 0, (val))
+#define airoha_fe_clear(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (val), 0)
+
+#define airoha_qdma_rr(qdma, offset) \
+ airoha_rr((qdma)->regs, (offset))
+#define airoha_qdma_wr(qdma, offset, val) \
+ airoha_wr((qdma)->regs, (offset), (val))
+#define airoha_qdma_rmw(qdma, offset, mask, val) \
+ airoha_rmw((qdma)->regs, (offset), (mask), (val))
+#define airoha_qdma_set(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), 0, (val))
+#define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
+void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash);
+int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+int airoha_ppe_init(struct airoha_eth *eth);
+void airoha_ppe_deinit(struct airoha_eth *eth);
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash);
+
+#ifdef CONFIG_DEBUG_FS
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
+#else
+static inline int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ return 0;
+}
+#endif
+
+#endif /* AIROHA_ETH_H */
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
new file mode 100644
index 000000000000..7a5710f9ccf6
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/devcoredump.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/regmap.h>
+
+#include "airoha_npu.h"
+
+#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
+#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
+#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000
+#define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE 0x10000
+#define NPU_DUMP_SIZE 512
+
+#define REG_NPU_LOCAL_SRAM 0x0
+
+#define NPU_PC_BASE_ADDR 0x305000
+#define REG_PC_DBG(_n) (0x305000 + ((_n) * 0x100))
+
+#define NPU_CLUSTER_BASE_ADDR 0x306000
+
+#define REG_CR_BOOT_TRIGGER (NPU_CLUSTER_BASE_ADDR + 0x000)
+#define REG_CR_BOOT_CONFIG (NPU_CLUSTER_BASE_ADDR + 0x004)
+#define REG_CR_BOOT_BASE(_n) (NPU_CLUSTER_BASE_ADDR + 0x020 + ((_n) << 2))
+
+#define NPU_MBOX_BASE_ADDR 0x30c000
+
+#define REG_CR_MBOX_INT_STATUS (NPU_MBOX_BASE_ADDR + 0x000)
+#define MBOX_INT_STATUS_MASK BIT(8)
+
+#define REG_CR_MBOX_INT_MASK(_n) (NPU_MBOX_BASE_ADDR + 0x004 + ((_n) << 2))
+#define REG_CR_MBQ0_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x030 + ((_n) << 2))
+#define REG_CR_MBQ8_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2))
+#define REG_CR_NPU_MIB(_n) (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2))
+
+#define NPU_TIMER_BASE_ADDR 0x310100
+#define REG_WDT_TIMER_CTRL(_n) (NPU_TIMER_BASE_ADDR + ((_n) * 0x100))
+#define WDT_EN_MASK BIT(25)
+#define WDT_INTR_MASK BIT(21)
+
+enum {
+ NPU_OP_SET = 1,
+ NPU_OP_SET_NO_WAIT,
+ NPU_OP_GET,
+ NPU_OP_GET_NO_WAIT,
+};
+
+enum {
+ NPU_FUNC_WIFI,
+ NPU_FUNC_TUNNEL,
+ NPU_FUNC_NOTIFY,
+ NPU_FUNC_DBA,
+ NPU_FUNC_TR471,
+ NPU_FUNC_PPE,
+};
+
+enum {
+ NPU_MBOX_ERROR,
+ NPU_MBOX_SUCCESS,
+};
+
+enum {
+ PPE_FUNC_SET_WAIT,
+ PPE_FUNC_SET_WAIT_HWNAT_INIT,
+ PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
+ PPE_FUNC_SET_WAIT_API,
+};
+
+enum {
+ PPE2_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_VAL,
+ PPE_SRAM_RESET_VAL,
+};
+
+enum {
+ QDMA_WAN_ETHER = 1,
+ QDMA_WAN_PON_XDSL,
+};
+
+#define MBOX_MSG_FUNC_ID GENMASK(14, 11)
+#define MBOX_MSG_STATIC_BUF BIT(5)
+#define MBOX_MSG_STATUS GENMASK(4, 2)
+#define MBOX_MSG_DONE BIT(1)
+#define MBOX_MSG_WAIT_RSP BIT(0)
+
+#define PPE_TYPE_L2B_IPV4 2
+#define PPE_TYPE_L2B_IPV4_IPV6 3
+
+struct ppe_mbox_data {
+ u32 func_type;
+ u32 func_id;
+ union {
+ struct {
+ u8 cds;
+ u8 xpon_hal_api;
+ u8 wan_xsi;
+ u8 ct_joyme4;
+ int ppe_type;
+ int wan_mode;
+ int wan_sel;
+ } init_info;
+ struct {
+ int func_id;
+ u32 size;
+ u32 data;
+ } set_info;
+ };
+};
+
+static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
+ void *p, int size)
+{
+ u16 core = 0; /* FIXME */
+ u32 val, offset = core << 4;
+ dma_addr_t dma_addr;
+ void *addr;
+ int ret;
+
+ addr = kmemdup(p, size, GFP_ATOMIC);
+ if (!addr)
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(npu->dev, dma_addr);
+ if (ret)
+ goto out;
+
+ spin_lock_bh(&npu->cores[core].lock);
+
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(0) + offset, dma_addr);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(1) + offset, size);
+ regmap_read(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, &val);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, val + 1);
+ val = FIELD_PREP(MBOX_MSG_FUNC_ID, func_id) | MBOX_MSG_WAIT_RSP;
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(3) + offset, val);
+
+ ret = regmap_read_poll_timeout_atomic(npu->regmap,
+ REG_CR_MBQ0_CTRL(3) + offset,
+ val, (val & MBOX_MSG_DONE),
+ 100, 100 * MSEC_PER_SEC);
+ if (!ret && FIELD_GET(MBOX_MSG_STATUS, val) != NPU_MBOX_SUCCESS)
+ ret = -EINVAL;
+
+ spin_unlock_bh(&npu->cores[core].lock);
+
+ dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
+out:
+ kfree(addr);
+
+ return ret;
+}
+
+static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
+ struct reserved_mem *rmem)
+{
+ const struct firmware *fw;
+ void __iomem *addr;
+ int ret;
+
+ ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_RV32, dev);
+ if (ret)
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
+
+ if (fw->size > NPU_EN7581_FIRMWARE_RV32_MAX_SIZE) {
+ dev_err(dev, "%s: fw size too overlimit (%zu)\n",
+ NPU_EN7581_FIRMWARE_RV32, fw->size);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ addr = devm_ioremap(dev, rmem->base, rmem->size);
+ if (!addr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy_toio(addr, fw->data, fw->size);
+ release_firmware(fw);
+
+ ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_DATA, dev);
+ if (ret)
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
+
+ if (fw->size > NPU_EN7581_FIRMWARE_DATA_MAX_SIZE) {
+ dev_err(dev, "%s: fw size too overlimit (%zu)\n",
+ NPU_EN7581_FIRMWARE_DATA, fw->size);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ memcpy_toio(base + REG_NPU_LOCAL_SRAM, fw->data, fw->size);
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance)
+{
+ struct airoha_npu *npu = npu_instance;
+
+ /* clear mbox interrupt status */
+ regmap_write(npu->regmap, REG_CR_MBOX_INT_STATUS,
+ MBOX_INT_STATUS_MASK);
+
+ /* acknowledge npu */
+ regmap_update_bits(npu->regmap, REG_CR_MBQ8_CTRL(3),
+ MBOX_MSG_STATUS | MBOX_MSG_DONE, MBOX_MSG_DONE);
+
+ return IRQ_HANDLED;
+}
+
+static void airoha_npu_wdt_work(struct work_struct *work)
+{
+ struct airoha_npu_core *core;
+ struct airoha_npu *npu;
+ void *dump;
+ u32 val[3];
+ int c;
+
+ core = container_of(work, struct airoha_npu_core, wdt_work);
+ npu = core->npu;
+
+ dump = vzalloc(NPU_DUMP_SIZE);
+ if (!dump)
+ return;
+
+ c = core - &npu->cores[0];
+ regmap_bulk_read(npu->regmap, REG_PC_DBG(c), val, ARRAY_SIZE(val));
+ snprintf(dump, NPU_DUMP_SIZE, "PC: %08x SP: %08x LR: %08x\n",
+ val[0], val[1], val[2]);
+
+ dev_coredumpv(npu->dev, dump, NPU_DUMP_SIZE, GFP_KERNEL);
+}
+
+static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance)
+{
+ struct airoha_npu_core *core = core_instance;
+ struct airoha_npu *npu = core->npu;
+ int c = core - &npu->cores[0];
+ u32 val;
+
+ regmap_set_bits(npu->regmap, REG_WDT_TIMER_CTRL(c), WDT_INTR_MASK);
+ if (!regmap_read(npu->regmap, REG_WDT_TIMER_CTRL(c), &val) &&
+ FIELD_GET(WDT_EN_MASK, val))
+ schedule_work(&core->wdt_work);
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_npu_ppe_init(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data ppe_data = {
+ .func_type = NPU_OP_SET,
+ .func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT,
+ .init_info = {
+ .ppe_type = PPE_TYPE_L2B_IPV4_IPV6,
+ .wan_mode = QDMA_WAN_ETHER,
+ },
+ };
+
+ return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+}
+
+static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data ppe_data = {
+ .func_type = NPU_OP_SET,
+ .func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
+ };
+
+ return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+}
+
+static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries)
+{
+ struct ppe_mbox_data ppe_data = {
+ .func_type = NPU_OP_SET,
+ .func_id = PPE_FUNC_SET_WAIT_API,
+ .set_info = {
+ .func_id = PPE_SRAM_RESET_VAL,
+ .data = foe_addr,
+ .size = sram_num_entries,
+ },
+ };
+
+ return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+}
+
+static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash, bool ppe2)
+{
+ struct ppe_mbox_data ppe_data = {
+ .func_type = NPU_OP_SET,
+ .func_id = PPE_FUNC_SET_WAIT_API,
+ .set_info = {
+ .data = foe_addr,
+ .size = entry_size,
+ },
+ };
+ int err;
+
+ ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
+ : PPE_SRAM_SET_ENTRY;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+ if (err)
+ return err;
+
+ ppe_data.set_info.func_id = PPE_SRAM_SET_VAL;
+ ppe_data.set_info.data = hash;
+ ppe_data.set_info.size = sizeof(u32);
+
+ return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+}
+
+struct airoha_npu *airoha_npu_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct airoha_npu *npu;
+
+ np = of_parse_phandle(dev->of_node, "airoha,npu", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+
+ if (!pdev) {
+ dev_err(dev, "cannot find device node %s\n", np->name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(dev, "failed to get the device driver module\n");
+ npu = ERR_PTR(-ENODEV);
+ goto error_pdev_put;
+ }
+
+ npu = platform_get_drvdata(pdev);
+ if (!npu) {
+ npu = ERR_PTR(-ENODEV);
+ goto error_module_put;
+ }
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
+ dev_err(&pdev->dev,
+ "failed to create device link to consumer %s\n",
+ dev_name(dev));
+ npu = ERR_PTR(-EINVAL);
+ goto error_module_put;
+ }
+
+ return npu;
+
+error_module_put:
+ module_put(THIS_MODULE);
+error_pdev_put:
+ platform_device_put(pdev);
+
+ return npu;
+}
+EXPORT_SYMBOL_GPL(airoha_npu_get);
+
+void airoha_npu_put(struct airoha_npu *npu)
+{
+ module_put(THIS_MODULE);
+ put_device(npu->dev);
+}
+EXPORT_SYMBOL_GPL(airoha_npu_put);
+
+static const struct of_device_id of_airoha_npu_match[] = {
+ { .compatible = "airoha,en7581-npu" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_airoha_npu_match);
+
+static const struct regmap_config regmap_config = {
+ .name = "npu",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .disable_locking = true,
+};
+
+static int airoha_npu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reserved_mem *rmem;
+ struct airoha_npu *npu;
+ struct device_node *np;
+ void __iomem *base;
+ int i, irq, err;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ npu = devm_kzalloc(dev, sizeof(*npu), GFP_KERNEL);
+ if (!npu)
+ return -ENOMEM;
+
+ npu->dev = dev;
+ npu->ops.ppe_init = airoha_npu_ppe_init;
+ npu->ops.ppe_deinit = airoha_npu_ppe_deinit;
+ npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
+ npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
+
+ npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(npu->regmap))
+ return PTR_ERR(npu->regmap);
+
+ np = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+
+ if (!rmem)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_mbox_handler,
+ IRQF_SHARED, "airoha-npu-mbox", npu);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++) {
+ struct airoha_npu_core *core = &npu->cores[i];
+
+ spin_lock_init(&core->lock);
+ core->npu = npu;
+
+ irq = platform_get_irq(pdev, i + 1);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_wdt_handler,
+ IRQF_SHARED, "airoha-npu-wdt", core);
+ if (err)
+ return err;
+
+ INIT_WORK(&core->wdt_work, airoha_npu_wdt_work);
+ }
+
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ err = airoha_npu_run_firmware(dev, base, rmem);
+ if (err)
+ return dev_err_probe(dev, err, "failed to run npu firmware\n");
+
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(10),
+ rmem->base + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(11), 0x40000); /* SRAM 256K */
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(12), 0);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(21), 1);
+ msleep(100);
+
+ /* setting booting address */
+ for (i = 0; i < NPU_NUM_CORES; i++)
+ regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), rmem->base);
+ usleep_range(1000, 2000);
+
+ /* enable NPU cores */
+ /* do not start core3 since it is used for WiFi offloading */
+ regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xf7);
+ regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
+ msleep(100);
+
+ platform_set_drvdata(pdev, npu);
+
+ return 0;
+}
+
+static void airoha_npu_remove(struct platform_device *pdev)
+{
+ struct airoha_npu *npu = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++)
+ cancel_work_sync(&npu->cores[i].wdt_work);
+}
+
+static struct platform_driver airoha_npu_driver = {
+ .probe = airoha_npu_probe,
+ .remove = airoha_npu_remove,
+ .driver = {
+ .name = "airoha-npu",
+ .of_match_table = of_airoha_npu_match,
+ },
+};
+module_platform_driver(airoha_npu_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("Airoha Network Processor Unit driver");
diff --git a/drivers/net/ethernet/airoha/airoha_npu.h b/drivers/net/ethernet/airoha/airoha_npu.h
new file mode 100644
index 000000000000..a2b8ae4d9473
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_npu.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#define NPU_NUM_CORES 8
+
+struct airoha_npu {
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct airoha_npu_core {
+ struct airoha_npu *npu;
+ /* protect concurrent npu memory accesses */
+ spinlock_t lock;
+ struct work_struct wdt_work;
+ } cores[NPU_NUM_CORES];
+
+ struct {
+ int (*ppe_init)(struct airoha_npu *npu);
+ int (*ppe_deinit)(struct airoha_npu *npu);
+ int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries);
+ int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash,
+ bool ppe2);
+ } ops;
+};
+
+struct airoha_npu *airoha_npu_get(struct device *dev);
+void airoha_npu_put(struct airoha_npu *npu);
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
new file mode 100644
index 000000000000..8b55e871352d
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/rhashtable.h>
+#include <net/ipv6.h>
+#include <net/pkt_cls.h>
+
+#include "airoha_npu.h"
+#include "airoha_regs.h"
+#include "airoha_eth.h"
+
+static DEFINE_MUTEX(flow_offload_mutex);
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params airoha_flow_table_params = {
+ .head_offset = offsetof(struct airoha_flow_table_entry, node),
+ .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
+ .key_len = sizeof(unsigned long),
+ .automatic_shrinking = true,
+};
+
+static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
+{
+ return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
+}
+
+static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
+{
+ u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
+
+ return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
+}
+
+static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
+{
+ u32 sram_tb_size, sram_num_entries, dram_num_entries;
+ struct airoha_eth *eth = ppe->eth;
+ int i;
+
+ sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
+ dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
+
+ for (i = 0; i < PPE_NUM; i++) {
+ int p;
+
+ airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
+ ppe->foe_dma + sram_tb_size);
+
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
+ PPE_BIND_AGE0_DELTA_NON_L4 |
+ PPE_BIND_AGE0_DELTA_UDP,
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
+ PPE_BIND_AGE1_DELTA_TCP_FIN |
+ PPE_BIND_AGE1_DELTA_TCP,
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
+ PPE_SRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH1_EN_MASK |
+ PPE_DRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH0_MODE_MASK |
+ PPE_SRAM_HASH1_MODE_MASK |
+ PPE_DRAM_HASH0_MODE_MASK |
+ PPE_DRAM_HASH1_MODE_MASK,
+ FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
+ FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
+ PPE_TB_CFG_SEARCH_MISS_MASK |
+ PPE_TB_ENTRY_SIZE_MASK,
+ FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
+ FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
+
+ airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
+
+ for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
+ airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
+ FP0_EGRESS_MTU_MASK |
+ FP1_EGRESS_MTU_MASK,
+ FIELD_PREP(FP0_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU) |
+ FIELD_PREP(FP1_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU));
+ }
+
+ if (airoha_ppe2_is_enabled(eth)) {
+ sram_num_entries =
+ PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES);
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_num_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(1),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_num_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+ } else {
+ sram_num_entries =
+ PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES);
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_num_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+ }
+}
+
+static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
+{
+ void *dest = eth + act->mangle.offset;
+ const void *src = &act->mangle.val;
+
+ if (act->mangle.offset > 8)
+ return;
+
+ if (act->mangle.mask == 0xffff) {
+ src += 2;
+ dest += 2;
+ }
+
+ memcpy(dest, src, act->mangle.mask ? 2 : 4);
+}
+
+static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ u32 val = be32_to_cpu((__force __be32)act->mangle.val);
+
+ switch (act->mangle.offset) {
+ case 0:
+ if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
+ data->dst_port = cpu_to_be16(val);
+ else
+ data->src_port = cpu_to_be16(val >> 16);
+ break;
+ case 2:
+ data->dst_port = cpu_to_be16(val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ __be32 *dest;
+
+ switch (act->mangle.offset) {
+ case offsetof(struct iphdr, saddr):
+ dest = &data->v4.src_addr;
+ break;
+ case offsetof(struct iphdr, daddr):
+ dest = &data->v4.dst_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(dest, &act->mangle.val, sizeof(u32));
+
+ return 0;
+}
+
+static int airoha_get_dsa_port(struct net_device **dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct dsa_port *dp = dsa_port_from_netdev(*dev);
+
+ if (IS_ERR(dp))
+ return -ENODEV;
+
+ *dev = dsa_port_to_conduit(dp);
+ return dp->index;
+#else
+ return -ENODEV;
+#endif
+}
+
+static int airoha_ppe_foe_entry_prepare(struct airoha_foe_entry *hwe,
+ struct net_device *dev, int type,
+ struct airoha_flow_data *data,
+ int l4proto)
+{
+ int dsa_port = airoha_get_dsa_port(&dev);
+ struct airoha_foe_mac_info_common *l2;
+ u32 qdata, ports_pad, val;
+
+ memset(hwe, 0, sizeof(*hwe));
+
+ val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
+ AIROHA_FOE_IB1_BIND_TTL;
+ hwe->ib1 = val;
+
+ val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
+ AIROHA_FOE_IB2_PSE_QOS;
+ if (dsa_port >= 0)
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
+
+ if (dev) {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u8 pse_port;
+
+ if (dsa_port >= 0)
+ pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
+ else
+ pse_port = 2; /* uplink relies on GDM2 loopback */
+ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
+ }
+
+ if (is_multicast_ether_addr(data->eth.h_dest))
+ val |= AIROHA_FOE_IB2_MULTICAST;
+
+ ports_pad = 0xa5a5a500 | (l4proto & 0xff);
+ if (type == PPE_PKT_TYPE_IPV4_ROUTE)
+ hwe->ipv4.orig_tuple.ports = ports_pad;
+ if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
+ hwe->ipv6.ports = ports_pad;
+
+ qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ hwe->bridge.dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
+ hwe->bridge.dest_mac_lo =
+ get_unaligned_be16(data->eth.h_dest + 4);
+ hwe->bridge.src_mac_hi =
+ get_unaligned_be16(data->eth.h_source);
+ hwe->bridge.src_mac_lo =
+ get_unaligned_be32(data->eth.h_source + 2);
+ hwe->bridge.data = qdata;
+ hwe->bridge.ib2 = val;
+ l2 = &hwe->bridge.l2.common;
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ hwe->ipv6.data = qdata;
+ hwe->ipv6.ib2 = val;
+ l2 = &hwe->ipv6.l2;
+ } else {
+ hwe->ipv4.data = qdata;
+ hwe->ipv4.ib2 = val;
+ l2 = &hwe->ipv4.l2.common;
+ }
+
+ l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
+ l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
+ if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
+ l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
+ hwe->ipv4.l2.src_mac_lo =
+ get_unaligned_be16(data->eth.h_source + 4);
+ } else {
+ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 0xf);
+ }
+
+ if (data->vlan.num) {
+ l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
+ l2->vlan1 = data->vlan.hdr[0].id;
+ if (data->vlan.num == 2)
+ l2->vlan2 = data->vlan.hdr[1].id;
+ } else if (dsa_port >= 0) {
+ l2->etype = BIT(15) | BIT(dsa_port);
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ l2->etype = ETH_P_IPV6;
+ } else {
+ l2->etype = ETH_P_IP;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data,
+ bool egress)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ struct airoha_foe_ipv4_tuple *t;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ if (egress) {
+ t = &hwe->ipv4.new_tuple;
+ break;
+ }
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ t = &hwe->ipv4.orig_tuple;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ t->src_ip = be32_to_cpu(data->v4.src_addr);
+ t->dest_ip = be32_to_cpu(data->v4.dst_addr);
+
+ if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
+ t->src_port = be16_to_cpu(data->src_port);
+ t->dest_port = be16_to_cpu(data->dst_port);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data)
+
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 *src, *dest;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ hwe->ipv6.src_port = be16_to_cpu(data->src_port);
+ hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ src = hwe->ipv6.src_ip;
+ dest = hwe->ipv6.dest_ip;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
+ ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
+
+ return 0;
+}
+
+static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 hash, hv1, hv2, hv3;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ hv1 = hwe->ipv4.orig_tuple.ports;
+ hv2 = hwe->ipv4.orig_tuple.dest_ip;
+ hv3 = hwe->ipv4.orig_tuple.src_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
+ hv1 ^= hwe->ipv6.ports;
+
+ hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
+ hv2 ^= hwe->ipv6.dest_ip[0];
+
+ hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
+ hv3 ^= hwe->ipv6.src_ip[0];
+ break;
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ default:
+ WARN_ON_ONCE(1);
+ return PPE_HASH_MASK;
+ }
+
+ hash = (hv1 & hv2) | ((~hv1) & hv3);
+ hash = (hash >> 24) | ((hash & 0xffffff) << 8);
+ hash ^= hv1 ^ hv2 ^ hv3;
+ hash ^= hash >> 16;
+ hash &= PPE_NUM_ENTRIES - 1;
+
+ return hash;
+}
+
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash)
+{
+ if (hash < PPE_SRAM_NUM_ENTRIES) {
+ u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
+ struct airoha_eth *eth = ppe->eth;
+ bool ppe2;
+ u32 val;
+ int i;
+
+ ppe2 = airoha_ppe2_is_enabled(ppe->eth) &&
+ hash >= PPE1_SRAM_NUM_ENTRIES;
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
+ FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
+ PPE_SRAM_CTRL_REQ_MASK);
+ if (read_poll_timeout_atomic(airoha_fe_rr, val,
+ val & PPE_SRAM_CTRL_ACK_MASK,
+ 10, 100, false, eth,
+ REG_PPE_RAM_CTRL(ppe2)))
+ return NULL;
+
+ for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++)
+ hwe[i] = airoha_fe_rr(eth,
+ REG_PPE_RAM_ENTRY(ppe2, i));
+ }
+
+ return ppe->foe + hash * sizeof(struct airoha_foe_entry);
+}
+
+static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
+ struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
+ int len;
+
+ if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
+ return false;
+
+ if (type > PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct airoha_foe_entry, ipv6.data);
+ else
+ len = offsetof(struct airoha_foe_entry, ipv4.ib2);
+
+ return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
+}
+
+static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_foe_entry *e,
+ u32 hash)
+{
+ struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
+ u32 ts = airoha_ppe_get_timestamp(ppe);
+ struct airoha_eth *eth = ppe->eth;
+
+ memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
+ wmb();
+
+ e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
+ hwe->ib1 = e->ib1;
+
+ if (hash < PPE_SRAM_NUM_ENTRIES) {
+ dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
+ bool ppe2 = airoha_ppe2_is_enabled(eth) &&
+ hash >= PPE1_SRAM_NUM_ENTRIES;
+ struct airoha_npu *npu;
+ int err = -ENODEV;
+
+ rcu_read_lock();
+ npu = rcu_dereference(eth->npu);
+ if (npu)
+ err = npu->ops.ppe_foe_commit_entry(npu, addr,
+ sizeof(*hwe), hash,
+ ppe2);
+ rcu_read_unlock();
+
+ return err;
+ }
+
+ return 0;
+}
+
+static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
+{
+ struct airoha_flow_table_entry *e;
+ struct airoha_foe_entry *hwe;
+ struct hlist_node *n;
+ u32 index, state;
+
+ spin_lock_bh(&ppe_lock);
+
+ hwe = airoha_ppe_foe_get_entry(ppe, hash);
+ if (!hwe)
+ goto unlock;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (state == AIROHA_FOE_STATE_BIND)
+ goto unlock;
+
+ index = airoha_ppe_foe_get_entry_hash(hwe);
+ hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
+ if (airoha_ppe_foe_compare_entry(e, hwe)) {
+ airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
+ e->hash = hash;
+ break;
+ }
+ }
+unlock:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ u32 hash = airoha_ppe_foe_get_entry_hash(&e->data);
+
+ e->hash = 0xffff;
+
+ spin_lock_bh(&ppe_lock);
+ hlist_add_head(&e->list, &ppe->foe_flow[hash]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+}
+
+static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ spin_lock_bh(&ppe_lock);
+
+ hlist_del_init(&e->list);
+ if (e->hash != 0xffff) {
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
+ e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
+ AIROHA_FOE_STATE_INVALID);
+ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
+ e->hash = 0xffff;
+ }
+
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+ struct airoha_flow_data data = {};
+ struct net_device *odev = NULL;
+ struct flow_action_entry *act;
+ struct airoha_foe_entry hwe;
+ int err, i, offload_type;
+ u16 addr_type = 0;
+ u8 l4proto = 0;
+
+ if (rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params))
+ return -EEXIST;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ l4proto = match.key->ip_proto;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ switch (addr_type) {
+ case 0:
+ offload_type = PPE_PKT_TYPE_BRIDGE;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
+ airoha_ppe_flow_mangle_eth(act, &data.eth);
+ break;
+ case FLOW_ACTION_REDIRECT:
+ odev = act->dev;
+ break;
+ case FLOW_ACTION_CSUM:
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (data.vlan.num == 2 ||
+ act->vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
+ data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
+ data.vlan.num++;
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ break;
+ case FLOW_ACTION_PPPOE_PUSH:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!is_valid_ether_addr(data.eth.h_source) ||
+ !is_valid_ether_addr(data.eth.h_dest))
+ return -EINVAL;
+
+ err = airoha_ppe_foe_entry_prepare(&hwe, odev, offload_type,
+ &data, l4proto);
+ if (err)
+ return err;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports ports;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ flow_rule_match_ports(rule, &ports);
+ data.src_port = ports.key->src;
+ data.dst_port = ports.key->dst;
+ } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
+ return -EOPNOTSUPP;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs addrs;
+
+ flow_rule_match_ipv4_addrs(rule, &addrs);
+ data.v4.src_addr = addrs.key->src;
+ data.v4.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs addrs;
+
+ flow_rule_match_ipv6_addrs(rule, &addrs);
+
+ data.v6.src_addr = addrs.key->src;
+ data.v6.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id != FLOW_ACTION_MANGLE)
+ continue;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ switch (act->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ err = airoha_ppe_flow_mangle_ports(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ err = airoha_ppe_flow_mangle_ipv4(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ /* handled earlier */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (err)
+ return err;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
+ if (err)
+ return err;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->cookie = f->cookie;
+ memcpy(&e->data, &hwe, sizeof(e->data));
+
+ err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
+ if (err)
+ goto free_entry;
+
+ err = rhashtable_insert_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ if (err < 0)
+ goto remove_foe_entry;
+
+ return 0;
+
+remove_foe_entry:
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+free_entry:
+ kfree(e);
+
+ return err;
+}
+
+static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+
+ e = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params);
+ if (!e)
+ return -ENOENT;
+
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+ rhashtable_remove_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ kfree(e);
+
+ return 0;
+}
+
+static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return airoha_ppe_flow_offload_replace(port, f);
+ case FLOW_CLS_DESTROY:
+ return airoha_ppe_flow_offload_destroy(port, f);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
+ struct airoha_npu *npu)
+{
+ int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES;
+ struct airoha_foe_entry *hwe = ppe->foe;
+
+ if (airoha_ppe2_is_enabled(ppe->eth))
+ sram_num_entries = sram_num_entries / 2;
+
+ for (i = 0; i < sram_num_entries; i++)
+ memset(&hwe[i], 0, sizeof(*hwe));
+
+ return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma,
+ PPE_SRAM_NUM_ENTRIES);
+}
+
+static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_npu_get(eth->dev);
+
+ if (IS_ERR(npu)) {
+ request_module("airoha-npu");
+ npu = airoha_npu_get(eth->dev);
+ }
+
+ return npu;
+}
+
+static int airoha_ppe_offload_setup(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_ppe_npu_get(eth);
+ int err;
+
+ if (IS_ERR(npu))
+ return PTR_ERR(npu);
+
+ err = npu->ops.ppe_init(npu);
+ if (err)
+ goto error_npu_put;
+
+ airoha_ppe_hw_init(eth->ppe);
+ err = airoha_ppe_flush_sram_entries(eth->ppe, npu);
+ if (err)
+ goto error_npu_put;
+
+ rcu_assign_pointer(eth->npu, npu);
+ synchronize_rcu();
+
+ return 0;
+
+error_npu_put:
+ airoha_npu_put(npu);
+
+ return err;
+}
+
+int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *cls = type_data;
+ struct net_device *dev = cb_priv;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ int err = 0;
+
+ if (!tc_can_offload(dev) || type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&flow_offload_mutex);
+
+ if (!eth->npu)
+ err = airoha_ppe_offload_setup(eth);
+ if (!err)
+ err = airoha_ppe_flow_offload_cmd(port, cls);
+
+ mutex_unlock(&flow_offload_mutex);
+
+ return err;
+}
+
+void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash)
+{
+ u16 now, diff;
+
+ if (hash > PPE_HASH_MASK)
+ return;
+
+ now = (u16)jiffies;
+ diff = now - ppe->foe_check_time[hash];
+ if (diff < HZ / 10)
+ return;
+
+ ppe->foe_check_time[hash] = now;
+ airoha_ppe_foe_insert_entry(ppe, hash);
+}
+
+int airoha_ppe_init(struct airoha_eth *eth)
+{
+ struct airoha_ppe *ppe;
+ int foe_size, err;
+
+ ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+ return -ENOMEM;
+
+ foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
+ ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
+ GFP_KERNEL);
+ if (!ppe->foe)
+ return -ENOMEM;
+
+ ppe->eth = eth;
+ eth->ppe = ppe;
+
+ ppe->foe_flow = devm_kzalloc(eth->dev,
+ PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow),
+ GFP_KERNEL);
+ if (!ppe->foe_flow)
+ return -ENOMEM;
+
+ err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
+ if (err)
+ return err;
+
+ err = airoha_ppe_debugfs_init(ppe);
+ if (err)
+ rhashtable_destroy(&eth->flow_table);
+
+ return err;
+}
+
+void airoha_ppe_deinit(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu;
+
+ rcu_read_lock();
+ npu = rcu_dereference(eth->npu);
+ if (npu) {
+ npu->ops.ppe_deinit(npu);
+ airoha_npu_put(npu);
+ }
+ rcu_read_unlock();
+
+ rhashtable_destroy(&eth->flow_table);
+ debugfs_remove(eth->ppe->debugfs_dir);
+}
diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
new file mode 100644
index 000000000000..3cdc6fd53fc7
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include "airoha_eth.h"
+
+static void airoha_debugfs_ppe_print_tuple(struct seq_file *m,
+ void *src_addr, void *dest_addr,
+ u16 *src_port, u16 *dest_port,
+ bool ipv6)
+{
+ __be32 n_addr[IPV6_ADDR_WORDS];
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, src_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", src_addr);
+ }
+ if (src_port)
+ seq_printf(m, ":%d", *src_port);
+
+ seq_puts(m, "->");
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, dest_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", dest_addr);
+ }
+ if (dest_port)
+ seq_printf(m, ":%d", *dest_port);
+}
+
+static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
+ bool bind)
+{
+ static const char *const ppe_type_str[] = {
+ [PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
+ [PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
+ [PPE_PKT_TYPE_BRIDGE] = "L2B",
+ [PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
+ [PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
+ [PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
+ [PPE_PKT_TYPE_IPV6_6RD] = "6RD",
+ };
+ static const char *const ppe_state_str[] = {
+ [AIROHA_FOE_STATE_INVALID] = "INV",
+ [AIROHA_FOE_STATE_UNBIND] = "UNB",
+ [AIROHA_FOE_STATE_BIND] = "BND",
+ [AIROHA_FOE_STATE_FIN] = "FIN",
+ };
+ struct airoha_ppe *ppe = m->private;
+ int i;
+
+ for (i = 0; i < PPE_NUM_ENTRIES; i++) {
+ const char *state_str, *type_str = "UNKNOWN";
+ void *src_addr = NULL, *dest_addr = NULL;
+ u16 *src_port = NULL, *dest_port = NULL;
+ struct airoha_foe_mac_info_common *l2;
+ unsigned char h_source[ETH_ALEN] = {};
+ unsigned char h_dest[ETH_ALEN];
+ struct airoha_foe_entry *hwe;
+ u32 type, state, ib2, data;
+ bool ipv6 = false;
+
+ hwe = airoha_ppe_foe_get_entry(ppe, i);
+ if (!hwe)
+ continue;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (!state)
+ continue;
+
+ if (bind && state != AIROHA_FOE_STATE_BIND)
+ continue;
+
+ state_str = ppe_state_str[state % ARRAY_SIZE(ppe_state_str)];
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ if (type < ARRAY_SIZE(ppe_type_str) && ppe_type_str[type])
+ type_str = ppe_type_str[type];
+
+ seq_printf(m, "%05x %s %7s", i, state_str, type_str);
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.orig_tuple.src_port;
+ dest_port = &hwe->ipv4.orig_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.orig_tuple.src_ip;
+ dest_addr = &hwe->ipv4.orig_tuple.dest_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ src_port = &hwe->ipv6.src_port;
+ dest_port = &hwe->ipv6.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ src_addr = &hwe->ipv6.src_ip;
+ dest_addr = &hwe->ipv6.dest_ip;
+ ipv6 = true;
+ break;
+ default:
+ break;
+ }
+
+ if (src_addr && dest_addr) {
+ seq_puts(m, " orig=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port, ipv6);
+ }
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.new_tuple.src_port;
+ dest_port = &hwe->ipv4.new_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.new_tuple.src_ip;
+ dest_addr = &hwe->ipv4.new_tuple.dest_ip;
+ seq_puts(m, " new=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port,
+ ipv6);
+ break;
+ default:
+ break;
+ }
+
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ data = hwe->ipv6.data;
+ ib2 = hwe->ipv6.ib2;
+ l2 = &hwe->ipv6.l2;
+ } else {
+ data = hwe->ipv4.data;
+ ib2 = hwe->ipv4.ib2;
+ l2 = &hwe->ipv4.l2.common;
+ *((__be16 *)&h_source[4]) =
+ cpu_to_be16(hwe->ipv4.l2.src_mac_lo);
+ }
+
+ *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi);
+ *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo);
+ *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi);
+
+ seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x"
+ " vlan=%d,%d ib1=%08x ib2=%08x\n",
+ h_source, h_dest, l2->etype, data,
+ l2->vlan1, l2->vlan2, hwe->ib1, ib2);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, false);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_all);
+
+static int airoha_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, true);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_bind);
+
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ ppe->debugfs_dir = debugfs_create_dir("ppe", NULL);
+ debugfs_create_file("entries", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_all_fops);
+ debugfs_create_file("bind", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_bind_fops);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
new file mode 100644
index 000000000000..8146cde4e8ba
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -0,0 +1,803 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_REGS_H
+#define AIROHA_REGS_H
+
+#include <linux/types.h>
+
+/* FE */
+#define PSE_BASE 0x0100
+#define CSR_IFC_BASE 0x0200
+#define CDM1_BASE 0x0400
+#define GDM1_BASE 0x0500
+#define PPE1_BASE 0x0c00
+#define PPE2_BASE 0x1c00
+
+#define CDM2_BASE 0x1400
+#define GDM2_BASE 0x1500
+
+#define GDM3_BASE 0x1100
+#define GDM4_BASE 0x2500
+
+#define GDM_BASE(_n) \
+ ((_n) == 4 ? GDM4_BASE : \
+ (_n) == 3 ? GDM3_BASE : \
+ (_n) == 2 ? GDM2_BASE : GDM1_BASE)
+
+#define REG_FE_DMA_GLO_CFG 0x0000
+#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
+#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
+
+#define REG_FE_RST_GLO_CFG 0x0004
+#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
+#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
+#define FE_RST_CORE_MASK BIT(0)
+
+#define REG_FE_FOE_TS 0x0010
+
+#define REG_FE_WAN_PORT 0x0024
+#define WAN1_EN_MASK BIT(16)
+#define WAN1_MASK GENMASK(12, 8)
+#define WAN0_MASK GENMASK(4, 0)
+
+#define REG_FE_WAN_MAC_H 0x0030
+#define REG_FE_LAN_MAC_H 0x0040
+
+#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
+#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
+
+#define REG_FE_CDM1_OQ_MAP0 0x0050
+#define REG_FE_CDM1_OQ_MAP1 0x0054
+#define REG_FE_CDM1_OQ_MAP2 0x0058
+#define REG_FE_CDM1_OQ_MAP3 0x005c
+
+#define REG_FE_PCE_CFG 0x0070
+#define PCE_DPI_EN_MASK BIT(2)
+#define PCE_KA_EN_MASK BIT(1)
+#define PCE_MC_EN_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
+#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
+#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
+#define PSE_CFG_WR_EN_MASK BIT(8)
+#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
+#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
+
+#define PSE_FQ_CFG 0x008c
+#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
+
+#define REG_FE_PSE_BUF_SET 0x0090
+#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
+#define PSE_ALLRSV_MASK GENMASK(14, 0)
+
+#define REG_PSE_SHARE_USED_THD 0x0094
+#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
+#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
+
+#define REG_GDM_MISC_CFG 0x0148
+#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
+#define GDM2_CHN_VLD_MODE_MASK BIT(5)
+
+#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
+#define FE_IFC_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PORT_EN 0x01f0
+#define REG_FE_IFC_PORT_EN 0x01f4
+
+#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
+#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
+
+#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
+#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
+#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
+
+#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
+#define PATN_FCPU_EN_MASK BIT(7)
+#define PATN_SWP_EN_MASK BIT(6)
+#define PATN_DP_EN_MASK BIT(5)
+#define PATN_SP_EN_MASK BIT(4)
+#define PATN_TYPE_MASK GENMASK(3, 1)
+#define PATN_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
+#define PATN_DP_MASK GENMASK(31, 16)
+#define PATN_SP_MASK GENMASK(15, 0)
+
+#define REG_CDM1_VLAN_CTRL CDM1_BASE
+#define CDM1_VLAN_MASK GENMASK(31, 16)
+
+#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
+#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
+#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
+#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
+#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
+#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
+#define GDM_DROP_CRC_ERR BIT(23)
+#define GDM_IP4_CKSUM BIT(22)
+#define GDM_TCP_CKSUM BIT(21)
+#define GDM_UDP_CKSUM BIT(20)
+#define GDM_STRIP_CRC BIT(16)
+#define GDM_UCFQ_MASK GENMASK(15, 12)
+#define GDM_BCFQ_MASK GENMASK(11, 8)
+#define GDM_MCFQ_MASK GENMASK(7, 4)
+#define GDM_OCFQ_MASK GENMASK(3, 0)
+
+#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
+#define GDM_INGRESS_FC_EN_MASK BIT(1)
+#define GDM_STAG_EN_MASK BIT(0)
+
+#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
+#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
+#define GDM_LONG_LEN_MASK GENMASK(29, 16)
+
+#define REG_GDM_LPBK_CFG(_n) (GDM_BASE(_n) + 0x1c)
+#define LPBK_GAP_MASK GENMASK(31, 24)
+#define LPBK_LEN_MASK GENMASK(23, 10)
+#define LPBK_CHAN_MASK GENMASK(8, 4)
+#define LPBK_MODE_MASK GENMASK(3, 1)
+#define LPBK_EN_MASK BIT(0)
+
+#define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24)
+#define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28)
+
+#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
+#define FE_CPORT_PAD BIT(26)
+#define FE_CPORT_PORT_XFC_MASK BIT(25)
+#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
+
+#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
+#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
+#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
+
+#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
+#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
+#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
+#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
+#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
+#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
+#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
+#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
+#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
+#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
+#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
+#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
+#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
+#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
+#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
+#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
+#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
+#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
+#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
+#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
+#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
+#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
+#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
+#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
+#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
+#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
+#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
+#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
+#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
+#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
+#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
+#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
+#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
+
+#define REG_PPE_GLO_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200)
+#define PPE_GLO_CFG_BUSY_MASK BIT(31)
+#define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK BIT(9)
+#define PPE_GLO_CFG_PSE_HASH_OFS_MASK BIT(6)
+#define PPE_GLO_CFG_PPE_BSWAP_MASK BIT(5)
+#define PPE_GLO_CFG_TTL_DROP_MASK BIT(4)
+#define PPE_GLO_CFG_IP4_CS_DROP_MASK BIT(3)
+#define PPE_GLO_CFG_IP4_L4_CS_DROP_MASK BIT(2)
+#define PPE_GLO_CFG_EN_MASK BIT(0)
+
+#define REG_PPE_PPE_FLOW_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x204)
+#define PPE_FLOW_CFG_IP6_HASH_GRE_KEY_MASK BIT(20)
+#define PPE_FLOW_CFG_IP4_HASH_GRE_KEY_MASK BIT(19)
+#define PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL_MASK BIT(18)
+#define PPE_FLOW_CFG_IP4_NAT_FRAG_MASK BIT(17)
+#define PPE_FLOW_CFG_IP_PROTO_BLACKLIST_MASK BIT(16)
+#define PPE_FLOW_CFG_IP4_DSLITE_MASK BIT(14)
+#define PPE_FLOW_CFG_IP4_NAPT_MASK BIT(13)
+#define PPE_FLOW_CFG_IP4_NAT_MASK BIT(12)
+#define PPE_FLOW_CFG_IP6_6RD_MASK BIT(10)
+#define PPE_FLOW_CFG_IP6_5T_ROUTE_MASK BIT(9)
+#define PPE_FLOW_CFG_IP6_3T_ROUTE_MASK BIT(8)
+#define PPE_FLOW_CFG_IP4_UDP_FRAG_MASK BIT(7)
+#define PPE_FLOW_CFG_IP4_TCP_FRAG_MASK BIT(6)
+
+#define REG_PPE_IP_PROTO_CHK(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x208)
+#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(15, 0)
+#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(31, 16)
+
+#define REG_PPE_TB_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x21c)
+#define PPE_SRAM_TB_NUM_ENTRY_MASK GENMASK(26, 24)
+#define PPE_TB_CFG_KEEPALIVE_MASK GENMASK(13, 12)
+#define PPE_TB_CFG_AGE_TCP_FIN_MASK BIT(11)
+#define PPE_TB_CFG_AGE_UDP_MASK BIT(10)
+#define PPE_TB_CFG_AGE_TCP_MASK BIT(9)
+#define PPE_TB_CFG_AGE_UNBIND_MASK BIT(8)
+#define PPE_TB_CFG_AGE_NON_L4_MASK BIT(7)
+#define PPE_TB_CFG_AGE_PREBIND_MASK BIT(6)
+#define PPE_TB_CFG_SEARCH_MISS_MASK GENMASK(5, 4)
+#define PPE_TB_ENTRY_SIZE_MASK BIT(3)
+#define PPE_DRAM_TB_NUM_ENTRY_MASK GENMASK(2, 0)
+
+#define REG_PPE_TB_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x220)
+
+#define REG_PPE_BIND_RATE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x228)
+#define PPE_BIND_RATE_L2B_BIND_MASK GENMASK(31, 16)
+#define PPE_BIND_RATE_BIND_MASK GENMASK(15, 0)
+
+#define REG_PPE_BIND_LIMIT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x22c)
+#define PPE_BIND_LIMIT0_HALF_MASK GENMASK(29, 16)
+#define PPE_BIND_LIMIT0_QUARTER_MASK GENMASK(13, 0)
+
+#define REG_PPE_BIND_LIMIT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x230)
+#define PPE_BIND_LIMIT1_NON_L4_MASK GENMASK(23, 16)
+#define PPE_BIND_LIMIT1_FULL_MASK GENMASK(13, 0)
+
+#define REG_PPE_BND_AGE0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x23c)
+#define PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
+#define PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
+
+#define REG_PPE_UNBIND_AGE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x238)
+#define PPE_UNBIND_AGE_MIN_PACKETS_MASK GENMASK(31, 16)
+#define PPE_UNBIND_AGE_DELTA_MASK GENMASK(7, 0)
+
+#define REG_PPE_BND_AGE1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x240)
+#define PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
+#define PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
+
+#define REG_PPE_HASH_SEED(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x244)
+#define PPE_HASH_SEED 0x12345678
+
+#define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248)
+
+#define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c)
+
+#define REG_PPE_TB_HASH_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x250)
+#define PPE_DRAM_HASH1_MODE_MASK GENMASK(31, 28)
+#define PPE_DRAM_HASH1_EN_MASK BIT(24)
+#define PPE_DRAM_HASH0_MODE_MASK GENMASK(23, 20)
+#define PPE_DRAM_TABLE_EN_MASK BIT(16)
+#define PPE_SRAM_HASH1_MODE_MASK GENMASK(15, 12)
+#define PPE_SRAM_HASH1_EN_MASK BIT(8)
+#define PPE_SRAM_HASH0_MODE_MASK GENMASK(7, 4)
+#define PPE_SRAM_TABLE_EN_MASK BIT(0)
+
+#define REG_PPE_MTU_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x304)
+#define REG_PPE_MTU(_m, _n) (REG_PPE_MTU_BASE(_m) + ((_n) << 2))
+#define FP1_EGRESS_MTU_MASK GENMASK(29, 16)
+#define FP0_EGRESS_MTU_MASK GENMASK(13, 0)
+
+#define REG_PPE_RAM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x31c)
+#define PPE_SRAM_CTRL_ACK_MASK BIT(31)
+#define PPE_SRAM_CTRL_DUAL_SUCESS_MASK BIT(30)
+#define PPE_SRAM_CTRL_ENTRY_MASK GENMASK(23, 8)
+#define PPE_SRAM_WR_DUAL_DIRECTION_MASK BIT(2)
+#define PPE_SRAM_CTRL_WR_MASK BIT(1)
+#define PPE_SRAM_CTRL_REQ_MASK BIT(0)
+
+#define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
+#define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
+#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
+#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
+#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
+#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
+#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
+#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
+#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
+#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
+#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
+#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
+
+#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
+#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
+#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
+
+#define REG_GDM3_FWD_CFG GDM3_BASE
+#define GDM3_PAD_EN_MASK BIT(28)
+
+#define REG_GDM4_FWD_CFG GDM4_BASE
+#define GDM4_PAD_EN_MASK BIT(28)
+#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
+
+#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
+#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
+#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
+#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
+
+#define REG_IP_FRAG_FP 0x2010
+#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
+#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
+#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
+#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
+
+#define REG_MC_VLAN_EN 0x2100
+#define MC_VLAN_EN_MASK BIT(0)
+
+#define REG_MC_VLAN_CFG 0x2104
+#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
+#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
+#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
+#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
+#define MC_VLAN_CFG_RW_MASK BIT(0)
+
+#define REG_MC_VLAN_DATA 0x2108
+
+#define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2))
+#define SP_CPORT_PCIE1_MASK GENMASK(31, 28)
+#define SP_CPORT_PCIE0_MASK GENMASK(27, 24)
+#define SP_CPORT_USB_MASK GENMASK(7, 4)
+#define SP_CPORT_ETH_MASK GENMASK(7, 4)
+
+#define REG_SRC_PORT_FC_MAP6 0x2298
+#define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24)
+#define FC_ID_OF_SRC_PORT26_MASK GENMASK(20, 16)
+#define FC_ID_OF_SRC_PORT25_MASK GENMASK(12, 8)
+#define FC_ID_OF_SRC_PORT24_MASK GENMASK(4, 0)
+
+#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
+
+/* QDMA */
+#define REG_QDMA_GLOBAL_CFG 0x0004
+#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
+#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
+#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
+#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
+#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
+#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
+#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
+#define GLOBAL_CFG_RESET_MASK BIT(23)
+#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
+#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
+#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
+#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
+#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
+#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
+#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
+#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
+#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
+#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
+#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
+#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
+#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
+#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
+#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
+
+#define REG_FWD_DSCP_BASE 0x0010
+#define REG_FWD_BUF_BASE 0x0014
+
+#define REG_HW_FWD_DSCP_CFG 0x0018
+#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
+#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
+#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
+
+#define REG_INT_STATUS(_n) \
+ (((_n) == 4) ? 0x0730 : \
+ ((_n) == 3) ? 0x0724 : \
+ ((_n) == 2) ? 0x0720 : \
+ ((_n) == 1) ? 0x0024 : 0x0020)
+
+#define REG_INT_ENABLE(_n) \
+ (((_n) == 4) ? 0x0750 : \
+ ((_n) == 3) ? 0x0744 : \
+ ((_n) == 2) ? 0x0740 : \
+ ((_n) == 1) ? 0x002c : 0x0028)
+
+/* QDMA_CSR_INT_ENABLE1 */
+#define RX15_COHERENT_INT_MASK BIT(31)
+#define RX14_COHERENT_INT_MASK BIT(30)
+#define RX13_COHERENT_INT_MASK BIT(29)
+#define RX12_COHERENT_INT_MASK BIT(28)
+#define RX11_COHERENT_INT_MASK BIT(27)
+#define RX10_COHERENT_INT_MASK BIT(26)
+#define RX9_COHERENT_INT_MASK BIT(25)
+#define RX8_COHERENT_INT_MASK BIT(24)
+#define RX7_COHERENT_INT_MASK BIT(23)
+#define RX6_COHERENT_INT_MASK BIT(22)
+#define RX5_COHERENT_INT_MASK BIT(21)
+#define RX4_COHERENT_INT_MASK BIT(20)
+#define RX3_COHERENT_INT_MASK BIT(19)
+#define RX2_COHERENT_INT_MASK BIT(18)
+#define RX1_COHERENT_INT_MASK BIT(17)
+#define RX0_COHERENT_INT_MASK BIT(16)
+#define TX7_COHERENT_INT_MASK BIT(15)
+#define TX6_COHERENT_INT_MASK BIT(14)
+#define TX5_COHERENT_INT_MASK BIT(13)
+#define TX4_COHERENT_INT_MASK BIT(12)
+#define TX3_COHERENT_INT_MASK BIT(11)
+#define TX2_COHERENT_INT_MASK BIT(10)
+#define TX1_COHERENT_INT_MASK BIT(9)
+#define TX0_COHERENT_INT_MASK BIT(8)
+#define CNT_OVER_FLOW_INT_MASK BIT(7)
+#define IRQ1_FULL_INT_MASK BIT(5)
+#define IRQ1_INT_MASK BIT(4)
+#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
+#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
+#define IRQ0_FULL_INT_MASK BIT(1)
+#define IRQ0_INT_MASK BIT(0)
+
+#define TX_DONE_INT_MASK(_n) \
+ ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
+ : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_TX_MASK \
+ (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
+ IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_IDX0_MASK \
+ (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
+ TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
+ TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
+ TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
+ RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
+ RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
+ RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
+ RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
+ RX15_COHERENT_INT_MASK | INT_TX_MASK)
+
+/* QDMA_CSR_INT_ENABLE2 */
+#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX15_DONE_INT_MASK BIT(15)
+#define RX14_DONE_INT_MASK BIT(14)
+#define RX13_DONE_INT_MASK BIT(13)
+#define RX12_DONE_INT_MASK BIT(12)
+#define RX11_DONE_INT_MASK BIT(11)
+#define RX10_DONE_INT_MASK BIT(10)
+#define RX9_DONE_INT_MASK BIT(9)
+#define RX8_DONE_INT_MASK BIT(8)
+#define RX7_DONE_INT_MASK BIT(7)
+#define RX6_DONE_INT_MASK BIT(6)
+#define RX5_DONE_INT_MASK BIT(5)
+#define RX4_DONE_INT_MASK BIT(4)
+#define RX3_DONE_INT_MASK BIT(3)
+#define RX2_DONE_INT_MASK BIT(2)
+#define RX1_DONE_INT_MASK BIT(1)
+#define RX0_DONE_INT_MASK BIT(0)
+
+#define RX_DONE_INT_MASK \
+ (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
+ RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
+ RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
+ RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
+ RX15_DONE_INT_MASK)
+#define INT_IDX1_MASK \
+ (RX_DONE_INT_MASK | \
+ RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
+ RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
+ RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
+ RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
+ RX15_NO_CPU_DSCP_INT_MASK)
+
+/* QDMA_CSR_INT_ENABLE5 */
+#define TX31_COHERENT_INT_MASK BIT(31)
+#define TX30_COHERENT_INT_MASK BIT(30)
+#define TX29_COHERENT_INT_MASK BIT(29)
+#define TX28_COHERENT_INT_MASK BIT(28)
+#define TX27_COHERENT_INT_MASK BIT(27)
+#define TX26_COHERENT_INT_MASK BIT(26)
+#define TX25_COHERENT_INT_MASK BIT(25)
+#define TX24_COHERENT_INT_MASK BIT(24)
+#define TX23_COHERENT_INT_MASK BIT(23)
+#define TX22_COHERENT_INT_MASK BIT(22)
+#define TX21_COHERENT_INT_MASK BIT(21)
+#define TX20_COHERENT_INT_MASK BIT(20)
+#define TX19_COHERENT_INT_MASK BIT(19)
+#define TX18_COHERENT_INT_MASK BIT(18)
+#define TX17_COHERENT_INT_MASK BIT(17)
+#define TX16_COHERENT_INT_MASK BIT(16)
+#define TX15_COHERENT_INT_MASK BIT(15)
+#define TX14_COHERENT_INT_MASK BIT(14)
+#define TX13_COHERENT_INT_MASK BIT(13)
+#define TX12_COHERENT_INT_MASK BIT(12)
+#define TX11_COHERENT_INT_MASK BIT(11)
+#define TX10_COHERENT_INT_MASK BIT(10)
+#define TX9_COHERENT_INT_MASK BIT(9)
+#define TX8_COHERENT_INT_MASK BIT(8)
+
+#define INT_IDX4_MASK \
+ (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
+ TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
+ TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
+ TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
+ TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
+ TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
+ TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
+ TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
+ TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
+ TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
+ TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
+ TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
+
+#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
+
+#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
+#define TX_IRQ_THR_MASK GENMASK(27, 16)
+#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
+
+#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
+#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
+
+#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
+#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
+#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
+
+#define REG_TX_RING_BASE(_n) \
+ (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
+
+#define REG_TX_RING_BLOCKING(_n) \
+ (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
+
+#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
+#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
+#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
+#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
+#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
+
+#define REG_TX_CPU_IDX(_n) \
+ (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
+
+#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_TX_DMA_IDX(_n) \
+ (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
+
+#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define IRQ_RING_IDX_MASK GENMASK(20, 16)
+#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_RING_BASE(_n) \
+ (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
+
+#define REG_RX_RING_SIZE(_n) \
+ (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
+
+#define RX_RING_THR_MASK GENMASK(31, 16)
+#define RX_RING_SIZE_MASK GENMASK(15, 0)
+
+#define REG_RX_CPU_IDX(_n) \
+ (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
+
+#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_DMA_IDX(_n) \
+ (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
+
+#define REG_RX_DELAY_INT_IDX(_n) \
+ (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
+
+#define REG_RX_SCATTER_CFG(_n) \
+ (((_n) < 16) ? 0x0214 + ((_n) << 5) : 0x0e14 + (((_n) - 16) << 5))
+
+#define RX_DELAY_INT_MASK GENMASK(15, 0)
+
+#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define RX_RING_SG_EN_MASK BIT(0)
+
+#define REG_INGRESS_TRTCM_CFG 0x0070
+#define INGRESS_TRTCM_EN_MASK BIT(31)
+#define INGRESS_TRTCM_MODE_MASK BIT(30)
+#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
+
+#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
+#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
+
+#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
+#define CNTR_EN_MASK BIT(31)
+#define CNTR_ALL_CHAN_EN_MASK BIT(30)
+#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
+#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
+#define CNTR_SRC_MASK GENMASK(27, 24)
+#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
+#define CNTR_CHAN_MASK GENMASK(7, 3)
+#define CNTR_QUEUE_MASK GENMASK(2, 0)
+
+#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
+
+#define REG_LMGR_INIT_CFG 0x1000
+#define LMGR_INIT_START BIT(31)
+#define LMGR_SRAM_MODE_MASK BIT(30)
+#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
+#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
+
+#define REG_FWD_DSCP_LOW_THR 0x1004
+#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
+
+#define REG_EGRESS_RATE_METER_CFG 0x100c
+#define EGRESS_RATE_METER_EN_MASK BIT(31)
+#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
+#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
+#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
+
+#define REG_EGRESS_TRTCM_CFG 0x1010
+#define EGRESS_TRTCM_EN_MASK BIT(31)
+#define EGRESS_TRTCM_MODE_MASK BIT(30)
+#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define TRTCM_PARAM_RW_MASK BIT(31)
+#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
+#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
+#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
+#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
+#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
+
+#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
+#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
+#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
+
+#define REG_TXWRR_MODE_CFG 0x1020
+#define TWRR_WEIGHT_SCALE_MASK BIT(31)
+#define TWRR_WEIGHT_BASE_MASK BIT(3)
+
+#define REG_TXWRR_WEIGHT_CFG 0x1024
+#define TWRR_RW_CMD_MASK BIT(31)
+#define TWRR_RW_CMD_DONE BIT(30)
+#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
+#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
+#define TWRR_VALUE_MASK GENMASK(15, 0)
+
+#define REG_PSE_BUF_USAGE_CFG 0x1028
+#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
+
+#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
+#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
+
+#define REG_GLB_TRTCM_CFG 0x1080
+#define GLB_TRTCM_EN_MASK BIT(31)
+#define GLB_TRTCM_MODE_MASK BIT(30)
+#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define GLB_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_TXQ_CNGST_CFG 0x10a0
+#define TXQ_CNGST_DROP_EN BIT(31)
+#define TXQ_CNGST_DEI_DROP_EN BIT(30)
+
+#define REG_SLA_TRTCM_CFG 0x1150
+#define SLA_TRTCM_EN_MASK BIT(31)
+#define SLA_TRTCM_MODE_MASK BIT(30)
+#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define SLA_FAST_TICK_MASK GENMASK(15, 0)
+
+/* CTRL */
+#define QDMA_DESC_DONE_MASK BIT(31)
+#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
+#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
+#define QDMA_DESC_DEI_MASK BIT(25)
+#define QDMA_DESC_NO_DROP_MASK BIT(24)
+#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
+/* DATA */
+#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
+/* TX MSG0 */
+#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
+#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
+#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
+#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
+#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
+#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
+#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
+#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
+#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
+#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
+/* TX MSG1 */
+#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
+#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
+#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
+#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
+#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
+#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
+#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
+#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
+#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
+
+/* RX MSG0 */
+#define QDMA_ETH_RXMSG_SPTAG GENMASK(21, 14)
+/* RX MSG1 */
+#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
+#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
+#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
+#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
+#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
+#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
+#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
+#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
+#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
+
+struct airoha_qdma_desc {
+ __le32 rsv;
+ __le32 ctrl;
+ __le32 addr;
+ __le32 data;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 msg2;
+ __le32 msg3;
+};
+
+/* CTRL0 */
+#define QDMA_FWD_DESC_CTX_MASK BIT(31)
+#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
+#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
+#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
+/* CTRL1 */
+#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
+/* CTRL2 */
+#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
+
+struct airoha_qdma_fwd_desc {
+ __le32 addr;
+ __le32 ctrl0;
+ __le32 ctrl1;
+ __le32 ctrl2;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 rsv0;
+ __le32 rsv1;
+};
+
+#endif /* AIROHA_REGS_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c1295dfad0d0..70fa3adb4934 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -5,9 +5,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#ifdef CONFIG_RFS_ACCEL
-#include <linux/cpu_rmap.h>
-#endif /* CONFIG_RFS_ACCEL */
#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -162,30 +159,6 @@ int ena_xmit_common(struct ena_adapter *adapter,
return 0;
}
-static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
-{
-#ifdef CONFIG_RFS_ACCEL
- u32 i;
- int rc;
-
- adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
- if (!adapter->netdev->rx_cpu_rmap)
- return -ENOMEM;
- for (i = 0; i < adapter->num_io_queues; i++) {
- int irq_idx = ENA_IO_IRQ_IDX(i);
-
- rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
- pci_irq_vector(adapter->pdev, irq_idx));
- if (rc) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- return rc;
- }
- }
-#endif /* CONFIG_RFS_ACCEL */
- return 0;
-}
-
static void ena_init_io_rings_common(struct ena_adapter *adapter,
struct ena_ring *ring, u16 qid)
{
@@ -1596,7 +1569,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
- if (ena_init_rx_cpu_rmap(adapter))
+ if (netif_enable_cpu_rmap(adapter->netdev, adapter->num_io_queues))
netif_warn(adapter, probe, adapter->netdev,
"Failed to map IRQs to CPUs\n");
@@ -1742,16 +1715,13 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
struct ena_irq *irq;
int i;
-#ifdef CONFIG_RFS_ACCEL
- if (adapter->msix_vecs >= 1) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- }
-#endif /* CONFIG_RFS_ACCEL */
-
for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
+ struct ena_napi *ena_napi;
+
irq = &adapter->irq_tbl[i];
irq_set_affinity_hint(irq->vector, NULL);
+ ena_napi = irq->data;
+ netif_napi_set_irq(&ena_napi->napi, -1);
free_irq(irq->vector, irq->data);
}
}
@@ -4131,13 +4101,6 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_dev = adapter->ena_dev;
netdev = adapter->netdev;
-#ifdef CONFIG_RFS_ACCEL
- if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
- }
-
-#endif /* CONFIG_RFS_ACCEL */
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 0671a066913b..9d35ac348ebe 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -571,7 +571,7 @@ static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
return pDB;
}
-void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
+static void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
{
struct db_dest *pDBfree = aup->pDBfree;
if (pDBfree)
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 2babea110991..eeb72b1809ea 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -175,34 +175,32 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
return padev;
}
-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
- int err = 0;
- if (!cf)
- return -ENODEV;
+ if (!*pd_ptr)
+ return;
mutex_lock(&pf->config_lock);
- padev = pf->vfs[cf->vf_id].padev;
- if (padev) {
- pds_client_unregister(pf, padev->client_id);
- auxiliary_device_delete(&padev->aux_dev);
- auxiliary_device_uninit(&padev->aux_dev);
- padev->client_id = 0;
- }
- pf->vfs[cf->vf_id].padev = NULL;
+ padev = *pd_ptr;
+ pds_client_unregister(pf, padev->client_id);
+ auxiliary_device_delete(&padev->aux_dev);
+ auxiliary_device_uninit(&padev->aux_dev);
+ padev->client_id = 0;
+ *pd_ptr = NULL;
mutex_unlock(&pf->config_lock);
- return err;
}
-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
char devname[PDS_DEVNAME_LEN];
- enum pds_core_vif_types vt;
unsigned long mask;
u16 vt_support;
int client_id;
@@ -211,6 +209,9 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
if (!cf)
return -ENODEV;
+ if (vt >= PDS_DEV_TYPE_MAX)
+ return -EINVAL;
+
mutex_lock(&pf->config_lock);
mask = BIT_ULL(PDSC_S_FW_DEAD) |
@@ -222,17 +223,10 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
goto out_unlock;
}
- /* We only support vDPA so far, so it is the only one to
- * be verified that it is available in the Core device and
- * enabled in the devlink param. In the future this might
- * become a loop for several VIF types.
- */
-
/* Verify that the type is supported and enabled. It is not
- * an error if there is no auxbus device support for this
- * VF, it just means something else needs to happen with it.
+ * an error if the firmware doesn't support the feature, the
+ * driver just won't set up an auxiliary_device for it.
*/
- vt = PDS_DEV_TYPE_VDPA;
vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
if (!(vt_support &&
pf->viftype_status[vt].supported &&
@@ -258,7 +252,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
err = PTR_ERR(padev);
goto out_unlock;
}
- pf->vfs[cf->vf_id].padev = padev;
+ *pd_ptr = padev;
out_unlock:
mutex_unlock(&pf->config_lock);
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 536635e57727..1eb0d92786f7 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -402,6 +402,9 @@ err_out_uninit:
}
static struct pdsc_viftype pdsc_viftype_defaults[] = {
+ [PDS_DEV_TYPE_FWCTL] = { .name = PDS_DEV_TYPE_FWCTL_STR,
+ .vif_id = PDS_DEV_TYPE_FWCTL,
+ .dl_id = -1 },
[PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
.vif_id = PDS_DEV_TYPE_VDPA,
.dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
@@ -428,6 +431,10 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
/* See what the Core device has for support */
vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
+
+ if (vt == PDS_DEV_TYPE_FWCTL)
+ pdsc->viftype_status[vt].enabled = true;
+
dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
pdsc->viftype_status[vt].name,
vt_support ? "" : "not ");
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 14522d6d5f86..0bf320c43083 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -156,6 +156,7 @@ struct pdsc {
struct dentry *dentry;
struct device *dev;
struct pdsc_dev_bar bars[PDS_CORE_BARS_MAX];
+ struct pds_auxiliary_dev *padev;
struct pdsc_vf *vfs;
int num_vfs;
int vf_id;
@@ -303,8 +304,11 @@ void pdsc_health_thread(struct work_struct *work);
int pdsc_register_notify(struct notifier_block *nb);
void pdsc_unregister_notify(struct notifier_block *nb);
void pdsc_notify(unsigned long event, void *data);
-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf);
-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf);
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr);
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr);
void pdsc_process_adminq(struct pdsc_qcq *qcq);
void pdsc_work_thread(struct work_struct *work);
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 44971e71991f..c5c787df61a4 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -56,8 +56,11 @@ int pdsc_dl_enable_set(struct devlink *dl, u32 id,
for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
struct pdsc *vf = pdsc->vfs[vf_id].vf;
- err = ctx->val.vbool ? pdsc_auxbus_dev_add(vf, pdsc) :
- pdsc_auxbus_dev_del(vf, pdsc);
+ if (ctx->val.vbool)
+ err = pdsc_auxbus_dev_add(vf, pdsc, vt_entry->vif_id,
+ &pdsc->vfs[vf_id].padev);
+ else
+ pdsc_auxbus_dev_del(vf, pdsc, &pdsc->vfs[vf_id].padev);
}
return err;
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 660268ff9562..4843f9249a31 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -190,7 +190,8 @@ static int pdsc_init_vf(struct pdsc *vf)
devl_unlock(dl);
pf->vfs[vf->vf_id].vf = vf;
- err = pdsc_auxbus_dev_add(vf, pf);
+ err = pdsc_auxbus_dev_add(vf, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[vf->vf_id].padev);
if (err) {
devl_lock(dl);
devl_unregister(dl);
@@ -264,6 +265,10 @@ static int pdsc_init_pf(struct pdsc *pdsc)
mutex_unlock(&pdsc->config_lock);
+ err = pdsc_auxbus_dev_add(pdsc, pdsc, PDS_DEV_TYPE_FWCTL, &pdsc->padev);
+ if (err)
+ goto err_out_stop;
+
dl = priv_to_devlink(pdsc);
devl_lock(dl);
err = devl_params_register(dl, pdsc_dl_params,
@@ -272,7 +277,7 @@ static int pdsc_init_pf(struct pdsc *pdsc)
devl_unlock(dl);
dev_warn(pdsc->dev, "Failed to register devlink params: %pe\n",
ERR_PTR(err));
- goto err_out_stop;
+ goto err_out_del_dev;
}
hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, 0, pdsc);
@@ -295,6 +300,8 @@ static int pdsc_init_pf(struct pdsc *pdsc)
err_out_unreg_params:
devlink_params_unregister(dl, pdsc_dl_params,
ARRAY_SIZE(pdsc_dl_params));
+err_out_del_dev:
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
err_out_stop:
pdsc_stop(pdsc);
err_out_teardown:
@@ -417,7 +424,7 @@ static void pdsc_remove(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf)) {
- pdsc_auxbus_dev_del(pdsc, pf);
+ pdsc_auxbus_dev_del(pdsc, pf, &pf->vfs[pdsc->vf_id].padev);
pf->vfs[pdsc->vf_id].vf = NULL;
}
} else {
@@ -426,6 +433,7 @@ static void pdsc_remove(struct pci_dev *pdev)
* shut themselves down.
*/
pdsc_sriov_configure(pdev, 0);
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
timer_shutdown_sync(&pdsc->wdtimer);
if (pdsc->wq)
@@ -482,7 +490,10 @@ static void pdsc_reset_prepare(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
- pdsc_auxbus_dev_del(pdsc, pf);
+ pdsc_auxbus_dev_del(pdsc, pf,
+ &pf->vfs[pdsc->vf_id].padev);
+ } else {
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
}
pdsc_unmap_bars(pdsc);
@@ -527,7 +538,11 @@ static void pdsc_reset_done(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
- pdsc_auxbus_dev_add(pdsc, pf);
+ pdsc_auxbus_dev_add(pdsc, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[pdsc->vf_id].padev);
+ } else {
+ pdsc_auxbus_dev_add(pdsc, pdsc, PDS_DEV_TYPE_FWCTL,
+ &pdsc->padev);
}
}
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 2a91c84aebdb..d7ca847d44c7 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -9,8 +9,6 @@
#include "main.h"
-static const struct acpi_device_id xge_acpi_match[];
-
static int xge_get_resources(struct xge_pdata *pdata)
{
struct platform_device *pdev;
@@ -731,7 +729,7 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
static struct platform_driver xge_driver = {
.driver = {
.name = "xgene-enet-v2",
- .acpi_match_table = ACPI_PTR(xge_acpi_match),
+ .acpi_match_table = xge_acpi_match,
},
.probe = xge_probe,
.remove = xge_remove,
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
index eba06831aec2..6a17045a5f62 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mdio.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -97,7 +97,6 @@ void xge_mdio_remove(struct net_device *ndev)
int xge_mdio_config(struct net_device *ndev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct xge_pdata *pdata = netdev_priv(ndev);
struct device *dev = &pdata->pdev->dev;
struct mii_bus *mdio_bus;
@@ -137,17 +136,12 @@ int xge_mdio_config(struct net_device *ndev)
goto err;
}
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask);
-
- linkmode_andnot(phydev->supported, phydev->supported, mask);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
pdata->phy_speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 86607b79c09f..cc3b1631c905 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -6,8 +6,14 @@
* Keyur Chudgar <kchudgar@apm.com>
*/
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_xgmac.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
index 414b2e448d59..787ea91802e7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
@@ -113,19 +113,9 @@ static const struct hwmon_ops aq_hwmon_ops = {
.read_string = aq_hwmon_read_string,
};
-static u32 aq_hwmon_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info aq_hwmon_temp = {
- .type = hwmon_temp,
- .config = aq_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const aq_hwmon_info[] = {
- &aq_hwmon_temp,
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index f5901f8e3907..f6b990b7f5b4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -226,7 +226,6 @@ struct __packed offload_info {
struct offload_port_info ports;
struct offload_ka_info kas;
struct offload_rr_info rrs;
- u8 buf[];
};
struct __packed hw_atl_utils_fw_rpc {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8e04552d2216..02c8213915a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2593,7 +2593,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
/********************* Multicast verbs: SET, CLEAR ****************************/
static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
{
- return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+ return (crc32c(0, mac, ETH_ALEN) >> 24) & 0xff;
}
struct bnx2x_mcast_mac_elem {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7b8b5b39c7bb..934ba9425857 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -54,7 +54,10 @@
#include <net/pkt_cls.h>
#include <net/page_pool/helpers.h>
#include <linux/align.h>
+#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <linux/pci-tph.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -76,6 +79,7 @@
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
NETIF_MSG_TX_ERR)
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
@@ -485,6 +489,17 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txr = &bp->tx_ring[bp->tx_ring_map[i]];
prod = txr->tx_prod;
+#if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
+ if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
+ netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
+ skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ dev_core_stats_tx_dropped_inc(dev);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
/* We must have raced with NAPI cleanup */
@@ -564,7 +579,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
TX_BD_FLAGS_LHINT_512_AND_SMALLER |
TX_BD_FLAGS_COAL_NOW |
TX_BD_FLAGS_PACKET_END |
- (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+ TX_BD_CNT(2));
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_push1->tx_bd_hsize_lflags =
@@ -639,7 +654,7 @@ normal_tx:
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
- ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+ TX_BD_CNT(last_frag + 2);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
@@ -2038,6 +2053,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ struct skb_shared_info *sinfo;
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
@@ -2164,6 +2180,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
false);
if (!frag_len)
goto oom_next_rx;
+
}
xdp_active = true;
}
@@ -2173,6 +2190,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = 1;
goto next_rx;
}
+ if (xdp_buff_has_frags(&xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ agg_bufs = sinfo->nr_frags;
+ } else {
+ agg_bufs = 0;
+ }
}
if (len <= bp->rx_copybreak) {
@@ -2210,7 +2233,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!skb)
goto oom_next_rx;
} else {
- skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
+ rxr->page_pool, &xdp);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
@@ -3314,74 +3338,81 @@ poll_done:
return work_done;
}
-static void bnxt_free_tx_skbs(struct bnxt *bp)
+static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, int idx)
{
int i, max_idx;
struct pci_dev *pdev = bp->pdev;
- if (!bp->tx_ring)
- return;
-
max_idx = bp->tx_nr_pages * TX_DESC_CNT;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- int j;
- if (!txr->tx_buf_ring)
+ for (i = 0; i < max_idx;) {
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
+ struct sk_buff *skb;
+ int j, last;
+
+ if (idx < bp->tx_nr_rings_xdp &&
+ tx_buf->action == XDP_REDIRECT) {
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ xdp_return_frame(tx_buf->xdpf);
+ tx_buf->action = 0;
+ tx_buf->xdpf = NULL;
+ i++;
continue;
+ }
- for (j = 0; j < max_idx;) {
- struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
- struct sk_buff *skb;
- int k, last;
-
- if (i < bp->tx_nr_rings_xdp &&
- tx_buf->action == XDP_REDIRECT) {
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- xdp_return_frame(tx_buf->xdpf);
- tx_buf->action = 0;
- tx_buf->xdpf = NULL;
- j++;
- continue;
- }
+ skb = tx_buf->skb;
+ if (!skb) {
+ i++;
+ continue;
+ }
- skb = tx_buf->skb;
- if (!skb) {
- j++;
- continue;
- }
+ tx_buf->skb = NULL;
- tx_buf->skb = NULL;
+ if (tx_buf->is_push) {
+ dev_kfree_skb(skb);
+ i += 2;
+ continue;
+ }
- if (tx_buf->is_push) {
- dev_kfree_skb(skb);
- j += 2;
- continue;
- }
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_headlen(skb),
- DMA_TO_DEVICE);
+ last = tx_buf->nr_frags;
+ i += 2;
+ for (j = 0; j < last; j++, i++) {
+ int ring_idx = i & bp->tx_ring_mask;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
- last = tx_buf->nr_frags;
- j += 2;
- for (k = 0; k < last; k++, j++) {
- int ring_idx = j & bp->tx_ring_mask;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
-
- tx_buf = &txr->tx_buf_ring[ring_idx];
- dma_unmap_page(
- &pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
- dev_kfree_skb(skb);
+ tx_buf = &txr->tx_buf_ring[ring_idx];
+ dma_unmap_page(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag), DMA_TO_DEVICE);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+ dev_kfree_skb(skb);
+ }
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
+}
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->tx_ring)
+ return;
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+
+ if (!txr->tx_buf_ring)
+ continue;
+
+ bnxt_free_one_tx_ring_skbs(bp, txr, i);
}
}
@@ -5236,8 +5267,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
int i;
- /* Under rtnl_lock and all our NAPIs have been disabled. It's
- * safe to delete the hash table.
+ netdev_assert_locked(bp->dev);
+
+ /* Under netdev instance lock and all our NAPIs have been disabled.
+ * It's safe to delete the hash table.
*/
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
struct hlist_head *head;
@@ -5565,6 +5598,8 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
req->flags = cpu_to_le32(flags);
req->ver_maj_8b = DRV_VER_MAJ;
req->ver_min_8b = DRV_VER_MIN;
@@ -6935,6 +6970,30 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
hwrm_req_drop(bp, req);
}
+static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
+ struct hwrm_ring_alloc_input *req,
+ struct bnxt_ring_struct *ring)
+{
+ struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
+ u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
+ RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
+
+ if (ring_type == HWRM_RING_ALLOC_AGG) {
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
+ } else {
+ req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
+ if (NET_IP_ALIGN == 2)
+ req->flags =
+ cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
+ }
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req->enables |= cpu_to_le32(enables);
+}
+
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, u32 map_index)
@@ -6986,37 +7045,13 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
break;
}
case HWRM_RING_ALLOC_RX:
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- req->length = cpu_to_le32(bp->rx_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 flags = 0;
-
- /* Association of rx ring with stats context */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- if (NET_IP_ALIGN == 2)
- flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
- req->flags = cpu_to_le16(flags);
- }
- break;
case HWRM_RING_ALLOC_AGG:
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
- /* Association of agg ring with rx ring */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
- req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- } else {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- }
- req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
+ cpu_to_le32(bp->rx_ring_mask + 1) :
+ cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
break;
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -7197,6 +7232,39 @@ static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
return 0;
}
+static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ const u32 type = HWRM_RING_ALLOC_CMPL;
+ struct bnxt_napi *bnapi = cpr->bnapi;
+ struct bnxt_ring_struct *ring;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ ring = &cpr->cp_ring_struct;
+ ring->handle = BNXT_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ return 0;
+}
+
+static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, u32 tx_idx)
+{
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ const u32 type = HWRM_RING_ALLOC_TX;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
+ return 0;
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
@@ -7233,33 +7301,17 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
}
- type = HWRM_RING_ALLOC_TX;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring;
- u32 map_idx;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
- struct bnxt_napi *bnapi = txr->bnapi;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
-
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_SET_NQ_HDL(cpr2);
- map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
- ring = &txr->tx_ring_struct;
- map_idx = i;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
if (rc)
goto err_out;
- bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -7272,20 +7324,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
- struct bnxt_napi *bnapi = rxr->bnapi;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
- struct bnxt_ring_struct *ring;
- u32 map_idx = bnapi->index;
-
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_SET_NQ_HDL(cpr2);
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
}
@@ -7353,6 +7394,23 @@ exit:
return 0;
}
+static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
+ INVALID_HW_RING_ID;
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
+ cmpl_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
bool close_path)
@@ -7397,6 +7455,33 @@ static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
}
+static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring;
+
+ ring = &cpr->cp_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ int i, size = ring->ring_mem.page_size;
+
+ cpr->cp_raw_cons = 0;
+ cpr->toggle = 0;
+
+ for (i = 0; i < bp->cp_nr_pages; i++)
+ if (cpr->cp_desc_ring[i])
+ memset(cpr->cp_desc_ring[i], 0, size);
+}
+
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
u32 type;
@@ -7405,20 +7490,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
if (!bp->bnapi)
return;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
-
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_TX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
- }
+ for (i = 0; i < bp->tx_nr_rings; i++)
+ bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
bnxt_cancel_dim(bp);
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -7442,17 +7515,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_ring_struct *ring;
int j;
- for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
- struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
+ bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
- ring = &cpr2->cp_ring_struct;
- if (ring->fw_ring_id == INVALID_HW_RING_ID)
- continue;
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_L2_CMPL,
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring, type,
@@ -8365,6 +8430,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
bp->port_partition_type = resp->port_partition_type;
@@ -9529,6 +9595,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
@@ -11237,6 +11305,155 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
return 0;
}
+static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int i;
+
+ bnapi = bp->bnapi[idx];
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
+ synchronize_net();
+
+ if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq) {
+ __netif_tx_lock_bh(txq);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock_bh(txq);
+ }
+ }
+
+ if (!bp->tph_mode)
+ continue;
+
+ bnxt_hwrm_tx_ring_free(bp, txr, true);
+ bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
+ bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
+ bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
+ }
+}
+
+static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int rc, i;
+
+ bnapi = bp->bnapi[idx];
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ if (!bp->tph_mode)
+ goto start_tx;
+
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
+ if (rc)
+ return rc;
+
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+ txr->tx_hw_cons = 0;
+start_tx:
+ WRITE_ONCE(txr->dev_state, 0);
+ synchronize_net();
+
+ if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
+ continue;
+
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq)
+ netif_tx_start_queue(txq);
+ }
+
+ return 0;
+}
+
+static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct bnxt_irq *irq;
+ u16 tag;
+ int err;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ cpumask_copy(irq->cpu_mask, mask);
+
+ if (irq->ring_nr >= irq->bp->rx_nr_rings)
+ return;
+
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask), &tag))
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
+ return;
+
+ netdev_lock(irq->bp->dev);
+ if (netif_running(irq->bp->dev)) {
+ err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
+ if (err)
+ netdev_err(irq->bp->dev,
+ "RX queue restart failed: err=%d\n", err);
+ }
+ netdev_unlock(irq->bp->dev);
+}
+
+static void bnxt_irq_affinity_release(struct kref *ref)
+{
+ struct irq_affinity_notify *notify =
+ container_of(ref, struct irq_affinity_notify, kref);
+ struct bnxt_irq *irq;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
+ netdev_err(irq->bp->dev,
+ "Setting ST=0 for MSIX entry %d failed\n",
+ irq->msix_nr);
+ return;
+ }
+}
+
+static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
+{
+ irq_set_affinity_notifier(irq->vector, NULL);
+}
+
+static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
+{
+ struct irq_affinity_notify *notify;
+
+ irq->bp = bp;
+
+ /* Nothing to do if TPH is not enabled */
+ if (!bp->tph_mode)
+ return;
+
+ /* Register IRQ affinity notifier */
+ notify = &irq->affinity_notify;
+ notify->irq = irq->vector;
+ notify->notify = bnxt_irq_affinity_notify;
+ notify->release = bnxt_irq_affinity_release;
+
+ irq_set_affinity_notifier(irq->vector, notify);
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -11259,11 +11476,18 @@ static void bnxt_free_irq(struct bnxt *bp)
free_cpumask_var(irq->cpu_mask);
irq->have_cpumask = 0;
}
+
+ bnxt_release_irq_notifier(irq);
+
free_irq(irq->vector, bp->bnapi[i]);
}
irq->requested = 0;
}
+
+ /* Disable TPH support */
+ pcie_disable_tph(bp->pdev);
+ bp->tph_mode = 0;
}
static int bnxt_request_irq(struct bnxt *bp)
@@ -11283,6 +11507,12 @@ static int bnxt_request_irq(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
rmap = bp->dev->rx_cpu_rmap;
#endif
+
+ /* Enable TPH support as part of IRQ request */
+ rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
+ if (!rc)
+ bp->tph_mode = PCI_TPH_ST_IV_MODE;
+
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
int map_idx = bnxt_cp_num_to_irq_num(bp, i);
struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
@@ -11301,13 +11531,16 @@ static int bnxt_request_irq(struct bnxt *bp)
if (rc)
break;
- netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
+ netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
int numa_node = dev_to_node(&bp->pdev->dev);
+ u16 tag;
irq->have_cpumask = 1;
+ irq->msix_nr = map_idx;
+ irq->ring_nr = i;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
irq->cpu_mask);
rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
@@ -11317,6 +11550,16 @@ static int bnxt_request_irq(struct bnxt *bp)
irq->vector);
break;
}
+
+ bnxt_register_irq_notifier(bp, irq);
+
+ /* Init ST table entry */
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask),
+ &tag))
+ continue;
+
+ pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
}
}
return rc;
@@ -11337,9 +11580,9 @@ static void bnxt_del_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- __netif_napi_del(&bnapi->napi);
+ __netif_napi_del_locked(&bnapi->napi);
}
- /* We called __netif_napi_del(), we need
+ /* We called __netif_napi_del_locked(), we need
* to respect an RCU grace period before freeing napi structures.
*/
synchronize_net();
@@ -11358,12 +11601,12 @@ static void bnxt_init_napi(struct bnxt *bp)
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn,
- bnapi->index);
+ netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
+ bnapi->index);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
+ netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
}
}
@@ -11384,7 +11627,7 @@ static void bnxt_disable_napi(struct bnxt *bp)
cpr->sw_stats->tx.tx_resets++;
if (bnapi->in_reset)
cpr->sw_stats->rx.rx_resets++;
- napi_disable(&bnapi->napi);
+ napi_disable_locked(&bnapi->napi);
}
}
@@ -11406,7 +11649,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
- napi_enable(&bnapi->napi);
+ napi_enable_locked(&bnapi->napi);
}
}
@@ -12077,6 +12320,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
struct hwrm_func_drv_if_change_input *req;
bool fw_reset = !bp->irq_tbl;
bool resc_reinit = false;
+ bool caps_change = false;
int rc, retry = 0;
u32 flags = 0;
@@ -12132,8 +12376,11 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return -ENODEV;
}
- if (resc_reinit || fw_reset) {
- if (fw_reset) {
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
+ caps_change = true;
+
+ if (resc_reinit || fw_reset || caps_change) {
+ if (fw_reset || caps_change) {
set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_irq_stop(bp);
@@ -12569,7 +12816,6 @@ open_err_free_mem:
return rc;
}
-/* rtnl_lock held */
int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -12580,14 +12826,14 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
if (rc) {
netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
return rc;
}
-/* rtnl_lock held, open the NIC half way by allocating all resources, but
- * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
- * self tests.
+/* netdev instance lock held, open the NIC half way by allocating all
+ * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
+ * for offline self tests.
*/
int bnxt_half_open_nic(struct bnxt *bp)
{
@@ -12618,12 +12864,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
half_open_err:
bnxt_free_skbs(bp);
bnxt_free_mem(bp, true);
- dev_close(bp->dev);
+ netif_close(bp->dev);
return rc;
}
-/* rtnl_lock held, this call can only be made after a previous successful
- * call to bnxt_half_open_nic().
+/* netdev instance lock held, this call can only be made after a previous
+ * successful call to bnxt_half_open_nic().
*/
void bnxt_half_close_nic(struct bnxt *bp)
{
@@ -12732,10 +12978,11 @@ void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
/* If we get here, it means firmware reset is in progress
* while we are trying to close. We can safely proceed with
- * the close because we are holding rtnl_lock(). Some firmware
- * messages may fail as we proceed to close. We set the
- * ABORT_ERR flag here so that the FW reset thread will later
- * abort when it gets the rtnl_lock() and sees the flag.
+ * the close because we are holding netdev instance lock.
+ * Some firmware messages may fail as we proceed to close.
+ * We set the ABORT_ERR flag here so that the FW reset thread
+ * will later abort when it gets the netdev instance lock
+ * and sees the flag.
*/
netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -12826,7 +13073,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
return hwrm_req_send(bp, req);
}
-/* rtnl_lock held */
+/* netdev instance lock held */
static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mdio = if_mii(ifr);
@@ -13745,30 +13992,31 @@ bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
-static void bnxt_rtnl_lock_sp(struct bnxt *bp)
+static void bnxt_lock_sp(struct bnxt *bp)
{
/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
* set. If the device is being closed, bnxt_close() may be holding
- * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
- * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
+ * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
+ * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
+ * instance lock.
*/
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
+ netdev_lock(bp->dev);
}
-static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+static void bnxt_unlock_sp(struct bnxt *bp)
{
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
}
/* Only called from bnxt_sp_task() */
static void bnxt_reset(struct bnxt *bp, bool silent)
{
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_reset_task(bp, silent);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Only called from bnxt_sp_task() */
@@ -13776,9 +14024,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
{
int i;
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
return;
}
/* Disable and flush TPA before resetting the RX ring */
@@ -13817,7 +14065,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
}
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, true);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_fw_fatal_close(struct bnxt *bp)
@@ -13873,7 +14121,7 @@ static bool is_bnxt_fw_ok(struct bnxt *bp)
return false;
}
-/* rtnl_lock is acquired before calling this function */
+/* netdev instance lock is acquired before calling this function */
static void bnxt_force_fw_reset(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -13916,9 +14164,9 @@ void bnxt_fw_exception(struct bnxt *bp)
netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
bnxt_ulp_stop(bp);
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
bnxt_force_fw_reset(bp);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
@@ -13948,7 +14196,7 @@ static int bnxt_get_registered_vfs(struct bnxt *bp)
void bnxt_fw_reset(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
@@ -13971,7 +14219,7 @@ void bnxt_fw_reset(struct bnxt *bp)
netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
n);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- dev_close(bp->dev);
+ netif_close(bp->dev);
goto fw_reset_exit;
} else if (n > 0) {
u16 vf_tmo_dsecs = n * 10;
@@ -13994,7 +14242,7 @@ void bnxt_fw_reset(struct bnxt *bp)
bnxt_queue_fw_reset_work(bp, tmo);
}
fw_reset_exit:
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_chk_missed_irq(struct bnxt *bp)
@@ -14193,7 +14441,7 @@ static void bnxt_sp_task(struct work_struct *work)
static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int *max_cp);
-/* Under rtnl_lock */
+/* Under netdev instance lock */
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
@@ -14586,7 +14834,7 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
bnxt_dl_health_fw_status_update(bp, false);
bp->fw_reset_state = 0;
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
static void bnxt_fw_reset_task(struct work_struct *work)
@@ -14621,10 +14869,10 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return;
}
bp->fw_reset_timestamp = jiffies;
- rtnl_lock();
+ netdev_lock(bp->dev);
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
goto ulp_start;
}
bnxt_fw_reset_close(bp);
@@ -14635,7 +14883,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
tmo = bp->fw_reset_min_dsecs * HZ / 10;
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_queue_fw_reset_work(bp, tmo);
return;
}
@@ -14709,7 +14957,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(bp->dev)) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
@@ -14717,7 +14965,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (rc) {
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
goto ulp_start;
}
@@ -14736,13 +14984,13 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_dl_health_fw_recovery_done(bp);
bnxt_dl_health_fw_status_update(bp, true);
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_ulp_start(bp, 0);
bnxt_reenable_sriov(bp);
- rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
break;
}
return;
@@ -14755,9 +15003,9 @@ fw_reset_abort_status:
netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
}
fw_reset_abort:
- rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
ulp_start:
bnxt_ulp_start(bp, rc);
}
@@ -14849,13 +15097,14 @@ init_err:
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
+ netdev_assert_locked(dev);
+
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -14876,11 +15125,12 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnxt *bp = netdev_priv(dev);
+ netdev_assert_locked(dev);
+
if (netif_running(dev))
bnxt_close_nic(bp, true, false);
@@ -15375,6 +15625,9 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
struct bnxt_cp_ring_info *cpr;
u64 *sw;
+ if (!bp->bnapi)
+ return;
+
cpr = &bp->bnapi[i]->cp_ring;
sw = cpr->stats.sw_stats;
@@ -15398,6 +15651,9 @@ static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
struct bnxt_napi *bnapi;
u64 *sw;
+ if (!bp->tx_ring)
+ return;
+
bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
sw = bnapi->cp_ring.stats.sw_stats;
@@ -15439,6 +15695,9 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
struct bnxt_ring_struct *ring;
int rc;
+ if (!bp->rx_ring)
+ return -ENETDOWN;
+
rxr = &bp->rx_ring[idx];
clone = qmem;
memcpy(clone, rxr, sizeof(*rxr));
@@ -15521,6 +15780,7 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
struct bnxt_ring_struct *ring;
bnxt_free_one_rx_ring_skbs(bp, rxr);
+ bnxt_free_one_tpa_info(bp, rxr);
xdp_rxq_info_unreg(&rxr->xdp_rxq);
@@ -15601,6 +15861,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
struct bnxt_rx_ring_info *rxr, *clone;
struct bnxt_cp_ring_info *cpr;
struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
int i, rc;
rxr = &bp->rx_ring[idx];
@@ -15618,21 +15879,40 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
bnxt_copy_rx_ring(bp, rxr, clone);
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
if (rc)
- return rc;
+ goto err_reset;
+
+ if (bp->tph_mode) {
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
+ if (rc)
+ goto err_reset;
+ }
+
rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
if (rc)
- goto err_free_hwrm_rx_ring;
+ goto err_reset;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
- cpr = &rxr->bnapi->cp_ring;
- cpr->sw_stats->rx.rx_resets++;
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
+ rc = bnxt_tx_queue_start(bp, idx);
+ if (rc)
+ goto err_reset;
+ }
+
+ napi_enable(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
- for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
@@ -15648,8 +15928,12 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
return 0;
-err_free_hwrm_rx_ring:
- bnxt_hwrm_rx_ring_free(bp, rxr, false);
+err_reset:
+ netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
+ rc);
+ napi_enable(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ bnxt_reset_task(bp, true);
return rc;
}
@@ -15657,10 +15941,12 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
int i;
- for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
vnic->mru = 0;
bnxt_hwrm_vnic_update(bp, vnic,
@@ -15669,14 +15955,30 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
/* Make sure NAPI sees that the VNIC is disabled */
synchronize_net();
rxr = &bp->rx_ring[idx];
- cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+ cancel_work_sync(&cpr->dim.work);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
- rxr->rx_next_cons = 0;
page_pool_disable_direct_recycling(rxr->page_pool);
if (bnxt_separate_head_pool())
page_pool_disable_direct_recycling(rxr->head_pool);
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ bnxt_tx_queue_stop(bp, idx);
+
+ /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
+ * completion is handled in NAPI to guarantee no more DMA on that ring
+ * after seeing the completion.
+ */
+ napi_disable(&bnapi->napi);
+
+ if (bp->tph_mode) {
+ bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
+ bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
+ }
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+
memcpy(qmem, rxr, sizeof(*rxr));
bnxt_init_rx_ring_struct(bp, qmem);
@@ -15995,7 +16297,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
int rc;
- ASSERT_RTNL();
+ netdev_ops_assert_locked(bp->dev);
bnxt_hwrm_func_qcaps(bp);
if (netif_running(bp->dev))
@@ -16008,7 +16310,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
if (netif_running(bp->dev)) {
if (rc)
- dev_close(bp->dev);
+ netif_close(bp->dev);
else
rc = bnxt_open_nic(bp, true, false);
}
@@ -16345,6 +16647,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
if (BNXT_SUPPORTS_QUEUE_API(bp))
dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
+ dev->request_ops_lock = true;
rc = register_netdev(dev);
if (rc)
@@ -16395,13 +16698,13 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (!dev)
return;
- rtnl_lock();
+ netdev_lock(dev);
bp = netdev_priv(dev);
if (!bp)
goto shutdown_exit;
if (netif_running(dev))
- dev_close(dev);
+ netif_close(dev);
bnxt_ptp_clear(bp);
bnxt_clear_int_mode(bp);
@@ -16413,7 +16716,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
}
shutdown_exit:
- rtnl_unlock();
+ netdev_unlock(dev);
}
#ifdef CONFIG_PM_SLEEP
@@ -16425,7 +16728,7 @@ static int bnxt_suspend(struct device *device)
bnxt_ulp_stop(bp);
- rtnl_lock();
+ netdev_lock(dev);
if (netif_running(dev)) {
netif_device_detach(dev);
rc = bnxt_close(dev);
@@ -16434,7 +16737,7 @@ static int bnxt_suspend(struct device *device)
bnxt_ptp_clear(bp);
pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp, false);
- rtnl_unlock();
+ netdev_unlock(dev);
return rc;
}
@@ -16444,7 +16747,7 @@ static int bnxt_resume(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
+ netdev_lock(dev);
rc = pci_enable_device(bp->pdev);
if (rc) {
netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
@@ -16487,7 +16790,7 @@ static int bnxt_resume(struct device *device)
}
resume_exit:
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
@@ -16522,7 +16825,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_ulp_stop(bp);
- rtnl_lock();
+ netdev_lock(netdev);
netif_device_detach(netdev);
if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
@@ -16531,7 +16834,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
}
if (abort || state == pci_channel_io_perm_failure) {
- rtnl_unlock();
+ netdev_unlock(netdev);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -16550,7 +16853,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
bnxt_free_ctx_mem(bp, false);
- rtnl_unlock();
+ netdev_unlock(netdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -16580,7 +16883,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
msleep(900);
- rtnl_lock();
+ netdev_lock(netdev);
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
@@ -16635,7 +16938,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
reset_exit:
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
- rtnl_unlock();
+ netdev_unlock(netdev);
return result;
}
@@ -16654,7 +16957,7 @@ static void bnxt_io_resume(struct pci_dev *pdev)
int err;
netdev_info(bp->dev, "PCI Slot Resume\n");
- rtnl_lock();
+ netdev_lock(netdev);
err = bnxt_hwrm_func_qcaps(bp);
if (!err) {
@@ -16667,7 +16970,7 @@ static void bnxt_io_resume(struct pci_dev *pdev)
if (!err)
netif_device_attach(netdev);
- rtnl_unlock();
+ netdev_unlock(netdev);
bnxt_ulp_start(bp, err);
if (!err)
bnxt_reenable_sriov(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2373f423a523..21726cf56586 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -82,6 +82,12 @@ struct tx_bd {
#define TX_OPAQUE_PROD(bp, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\
(bp)->tx_ring_mask)
+#define TX_BD_CNT(n) (((n) << TX_BD_FLAGS_BD_CNT_SHIFT) & TX_BD_FLAGS_BD_CNT)
+
+#define TX_MAX_BD_CNT 32
+
+#define TX_MAX_FRAGS (TX_MAX_BD_CNT - 2)
+
struct tx_bd_ext {
__le32 tx_bd_hsize_lflags;
#define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
@@ -1234,6 +1240,11 @@ struct bnxt_irq {
u8 have_cpumask:1;
char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA];
cpumask_var_t cpu_mask;
+
+ struct bnxt *bp;
+ int msix_nr;
+ int ring_nr;
+ struct irq_affinity_notify affinity_notify;
};
#define HWRM_RING_ALLOC_TX 0x1
@@ -2410,6 +2421,8 @@ struct bnxt {
u8 max_q;
u8 num_tc;
+ u8 tph_mode;
+
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -2492,6 +2505,7 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
#define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
#define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
+ #define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42)
u32 fw_dbg_cap;
@@ -2689,6 +2703,7 @@ struct bnxt {
#define BNXT_DUMP_LIVE 0
#define BNXT_DUMP_CRASH 1
#define BNXT_DUMP_DRIVER 2
+#define BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE 3
struct bpf_prog *xdp_prog;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index 7236d8e548ab..5576e7cf8463 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -159,8 +159,8 @@ static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
return rc;
}
-static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
- u16 segment_id)
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 dump_type,
+ u16 component_id, u16 segment_id)
{
struct hwrm_dbg_coredump_initiate_input *req;
int rc;
@@ -172,6 +172,8 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
+ if (dump_type == BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE)
+ req->seg_flags = DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE;
return hwrm_req_send(bp, req);
}
@@ -450,7 +452,8 @@ static int __bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf,
start = jiffies;
- rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, dump_type, comp_id,
+ seg_id);
if (rc) {
netdev_err(bp->dev,
"Failed to initiate coredump for seg = %d\n",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index ef8288fd68f4..777880594a04 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <net/devlink.h>
+#include <net/netdev_lock.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
@@ -439,14 +440,17 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
bnxt_ulp_stop(bp);
rtnl_lock();
+ netdev_lock(bp->dev);
if (bnxt_sriov_cfg(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"reload is unsupported while VFs are allocated or being configured");
+ netdev_unlock(bp->dev);
rtnl_unlock();
bnxt_ulp_start(bp, 0);
return -EOPNOTSUPP;
}
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
rtnl_unlock();
bnxt_ulp_start(bp, 0);
return -ENODEV;
@@ -458,7 +462,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to deregister");
if (netif_running(bp->dev))
- dev_close(bp->dev);
+ netif_close(bp->dev);
+ netdev_unlock(bp->dev);
rtnl_unlock();
break;
}
@@ -479,7 +484,9 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
return -EPERM;
}
rtnl_lock();
+ netdev_lock(bp->dev);
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
rtnl_unlock();
return -ENODEV;
}
@@ -493,6 +500,7 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware");
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ netdev_unlock(bp->dev);
rtnl_unlock();
}
break;
@@ -511,6 +519,8 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc = 0;
+ netdev_assert_locked(bp->dev);
+
*actions_performed = 0;
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
@@ -535,6 +545,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
if (!netif_running(bp->dev))
NL_SET_ERR_MSG_MOD(extack,
"Device is closed, not waiting for reset notice that will never come");
+ netdev_unlock(bp->dev);
rtnl_unlock();
while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) {
if (time_after(jiffies, timeout)) {
@@ -550,6 +561,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
msleep(50);
}
rtnl_lock();
+ netdev_lock(bp->dev);
if (!rc)
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
@@ -568,8 +580,9 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
}
*actions_performed |= BIT(action);
} else if (netif_running(bp->dev)) {
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
+ netdev_unlock(bp->dev);
rtnl_unlock();
if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
bnxt_ulp_start(bp, rc);
@@ -666,6 +679,8 @@ static const struct bnxt_dl_nvm_param nvm_params[] = {
NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
{DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
+ {DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, NVM_OFF_SUPPORT_RDMA,
+ BNXT_NVM_FUNC_CFG, 1, 1},
{BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
BNXT_NVM_SHARED_CFG, 1, 1},
};
@@ -1010,37 +1025,19 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
}
-static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
- union devlink_param_value *val)
+static int __bnxt_hwrm_nvm_req(struct bnxt *bp,
+ const struct bnxt_dl_nvm_param *nvm, void *msg,
+ union devlink_param_value *val)
{
struct hwrm_nvm_get_variable_input *req = msg;
- struct bnxt_dl_nvm_param nvm_param;
struct hwrm_err_output *resp;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
- int idx = 0, rc, i;
-
- /* Get/Set NVM CFG parameter is supported only on PFs */
- if (BNXT_VF(bp)) {
- hwrm_req_drop(bp, req);
- return -EPERM;
- }
-
- for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
- if (nvm_params[i].id == param_id) {
- nvm_param = nvm_params[i];
- break;
- }
- }
+ int idx = 0, rc;
- if (i == ARRAY_SIZE(nvm_params)) {
- hwrm_req_drop(bp, req);
- return -EOPNOTSUPP;
- }
-
- if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
+ if (nvm->dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id;
- else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+ else if (nvm->dir_type == BNXT_NVM_FUNC_CFG)
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
@@ -1051,23 +1048,23 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
req->dest_data_addr = cpu_to_le64(data_dma_addr);
- req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
- req->option_num = cpu_to_le16(nvm_param.offset);
+ req->data_len = cpu_to_le16(nvm->nvm_num_bits);
+ req->option_num = cpu_to_le16(nvm->offset);
req->index_0 = cpu_to_le16(idx);
if (idx)
req->dimensions = cpu_to_le16(1);
resp = hwrm_req_hold(bp, req);
if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
- bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ bnxt_copy_to_nvm_data(data, val, nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
rc = hwrm_req_send(bp, msg);
} else {
rc = hwrm_req_send_silent(bp, msg);
if (!rc) {
bnxt_copy_from_nvm_data(val, data,
- nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
} else {
if (resp->cmd_err ==
NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
@@ -1080,6 +1077,27 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
return rc;
}
+static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ union devlink_param_value *val)
+{
+ struct hwrm_nvm_get_variable_input *req = msg;
+ const struct bnxt_dl_nvm_param *nvm_param;
+ int i;
+
+ /* Get/Set NVM CFG parameter is supported only on PFs */
+ if (BNXT_VF(bp)) {
+ hwrm_req_drop(bp, req);
+ return -EPERM;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
+ nvm_param = &nvm_params[i];
+ if (nvm_param->id == param_id)
+ return __bnxt_hwrm_nvm_req(bp, nvm_param, msg, val);
+ }
+ return -EOPNOTSUPP;
+}
+
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
@@ -1116,6 +1134,32 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
}
+static int bnxt_dl_roce_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ const struct bnxt_dl_nvm_param nvm_roce_cap = {0, NVM_OFF_RDMA_CAPABLE,
+ BNXT_NVM_SHARED_CFG, 1, 1};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ struct hwrm_nvm_get_variable_input *req;
+ union devlink_param_value roce_cap;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
+ if (rc)
+ return rc;
+
+ if (__bnxt_hwrm_nvm_req(bp, &nvm_roce_cap, req, &roce_cap)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to verify if device is RDMA Capable");
+ return -EINVAL;
+ }
+ if (!roce_cap.vbool) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support RDMA");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -1180,6 +1224,10 @@ static const struct devlink_param bnxt_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
bnxt_dl_msix_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ bnxt_dl_roce_validate),
DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
"gre_ver_check", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index b8105065367b..7f45dcd7b287 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -41,8 +41,10 @@ static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value)
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
#define NVM_OFF_IGNORE_ARI 164
+#define NVM_OFF_RDMA_CAPABLE 161
#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
+#define NVM_OFF_SUPPORT_RDMA 506
#define NVM_OFF_NVM_CFG_VER 602
#define BNXT_NVM_CFG_VER_BITS 8
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9c5820839514..48dd5922e4dd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -4541,16 +4541,16 @@ static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extac
return -EINVAL;
}
-static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
- const struct ethtool_module_eeprom *page_data,
- struct netlink_ext_ack *extack)
+static int
+bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
{
- struct bnxt *bp = netdev_priv(dev);
int rc;
if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
NL_SET_ERR_MSG_MOD(extack,
- "Module read not permitted on untrusted VF");
+ "Module read/write not permitted on untrusted VF");
return -EPERM;
}
@@ -4567,6 +4567,19 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
return -EINVAL;
}
+ return 0;
+}
+
+static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
page_data->page, page_data->bank,
@@ -4580,6 +4593,62 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
return page_data->length;
}
+static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page)
+{
+ struct hwrm_port_phy_i2c_write_input *req;
+ int bytes_written = 0;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE);
+ if (rc)
+ return rc;
+
+ hwrm_req_hold(bp, req);
+ req->i2c_slave_addr = page->i2c_address << 1;
+ req->page_number = cpu_to_le16(page->page);
+ req->bank_number = page->bank;
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET |
+ PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER);
+
+ while (bytes_written < page->length) {
+ u16 xfer_size;
+
+ xfer_size = min_t(u16, page->length - bytes_written,
+ BNXT_MAX_PHY_I2C_RESP_SIZE);
+ req->page_offset = cpu_to_le16(page->offset + bytes_written);
+ req->data_length = xfer_size;
+ memcpy(req->data, page->data + bytes_written, xfer_size);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ break;
+ bytes_written += xfer_size;
+ }
+
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_set_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
+
+ rc = bnxt_write_sfp_module_eeprom_info(bp, page_data);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed");
+ return rc;
+ }
+ return page_data->length;
+}
+
static int bnxt_nway_reset(struct net_device *dev)
{
int rc = 0;
@@ -5077,8 +5146,9 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
- if (dump->flag > BNXT_DUMP_DRIVER) {
- netdev_info(dev, "Supports only Live(0), Crash(1), Driver(2) dumps.\n");
+ if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) {
+ netdev_info(dev,
+ "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n");
return -EINVAL;
}
@@ -5441,6 +5511,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page,
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 5f8de1634378..549231703bce 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2024 Broadcom Inc.
+ * Copyright (c) 2018-2025 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -438,6 +438,7 @@ struct cmd_nums {
#define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
#define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
#define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL
#define HWRM_PORT_POE_CFG 0x230UL
#define HWRM_PORT_POE_QCFG 0x231UL
#define HWRM_UDCC_QCAPS 0x258UL
@@ -514,6 +515,8 @@ struct cmd_nums {
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
#define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
#define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL
+ #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL
+ #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL
#define HWRM_SV 0x400UL
#define HWRM_DBG_SERDES_TEST 0xff0eUL
#define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
@@ -629,8 +632,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 85
-#define HWRM_VERSION_STR "1.10.3.85"
+#define HWRM_VERSION_RSVD 97
+#define HWRM_VERSION_STR "1.10.3.97"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1905,11 +1908,15 @@ struct hwrm_func_qcaps_output {
__le32 roce_vf_max_srq;
__le32 roce_vf_max_gid;
__le32 flags_ext3;
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL
__le16 max_roce_vfs;
- u8 unused_3[5];
+ __le16 max_crypto_rx_flow_filters;
+ u8 unused_3[3];
u8 valid;
};
@@ -1924,7 +1931,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:1280b/160B) */
+/* hwrm_func_qcfg_output (size:1344b/168B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -2087,14 +2094,18 @@ struct hwrm_func_qcfg_output {
__le16 host_mtu;
__le16 flags2;
#define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
- u8 unused_4[2];
+ __le16 stag_vid;
u8 port_kdnet_mode;
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
u8 kdnet_pcie_function;
__le16 port_kdnet_fid;
- u8 unused_5[2];
+ u8 unused_5;
+ u8 roce_bidi_opt_mode;
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL
__le32 num_ktls_tx_key_ctxs;
__le32 num_ktls_rx_key_ctxs;
u8 lag_id;
@@ -2112,7 +2123,8 @@ struct hwrm_func_qcfg_output {
__le16 xid_partition_cfg;
#define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
#define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
- u8 unused_7;
+ __le16 mirror_vnic_id;
+ u8 unused_7[7];
u8 valid;
};
@@ -3965,7 +3977,7 @@ struct ts_split_entries {
__le32 region_num_entries;
u8 tsid;
u8 lkup_static_bkt_cnt_exp[2];
- u8 rsvd;
+ u8 locked;
__le32 rsvd2[2];
};
@@ -5483,6 +5495,37 @@ struct hwrm_port_phy_qcaps_output {
u8 valid;
};
+/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
+struct hwrm_port_phy_i2c_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+ __le32 data[16];
+};
+
+/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
+struct hwrm_port_phy_i2c_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
struct hwrm_port_phy_i2c_read_input {
__le16 req_type;
@@ -6610,8 +6653,9 @@ struct hwrm_vnic_alloc_input {
__le32 flags;
#define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
#define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL
__le16 virtio_net_fid;
- u8 unused_0[2];
+ __le16 vnic_id;
};
/* hwrm_vnic_alloc_output (size:128b/16B) */
@@ -6710,6 +6754,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
#define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
#define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
+ #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
@@ -6729,7 +6774,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
#define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
#define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
- u8 unused0[4];
+ __le32 raw_qp_id;
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -7082,6 +7127,15 @@ struct hwrm_vnic_plcmodes_cfg_output {
u8 valid;
};
+/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_plcmodes_cfg_cmd_err {
+ u8 code;
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD
+ u8 unused_0[7];
+};
+
/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
__le16 req_type;
@@ -7131,15 +7185,16 @@ struct hwrm_ring_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
- #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
- #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
- #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
- #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
- #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
@@ -7226,7 +7281,11 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
#define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
#define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
- u8 unused_4[2];
+ u8 rx_rate_profile_sel;
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE
+ u8 unused_4;
__le64 cq_handle;
};
@@ -9122,6 +9181,39 @@ struct pcie_ctx_hw_stats {
__le64 pcie_recovery_histogram;
};
+/* pcie_ctx_hw_stats_v2 (size:4096b/512B) */
+struct pcie_ctx_hw_stats_v2 {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+ __le32 pcie_tl_credit_nph_histogram[8];
+ __le32 pcie_tl_credit_ph_histogram[8];
+ __le32 pcie_tl_credit_pd_histogram[8];
+ __le32 pcie_cmpl_latest_times[4];
+ __le32 pcie_cmpl_longest_time;
+ __le32 pcie_cmpl_shortest_time;
+ __le32 unused_0[2];
+ __le32 pcie_cmpl_latest_headers[4][4];
+ __le32 pcie_cmpl_longest_headers[4][4];
+ __le32 pcie_cmpl_shortest_headers[4][4];
+ __le32 pcie_wr_latency_histogram[12];
+ __le32 pcie_wr_latency_all_normal_count;
+ __le32 unused_1;
+ __le64 pcie_posted_packet_count;
+ __le64 pcie_non_posted_packet_count;
+ __le64 pcie_other_packet_count;
+ __le64 pcie_blocked_packet_count;
+ __le64 pcie_cmpl_packet_count;
+};
+
/* hwrm_stat_generic_qstats_input (size:256b/32B) */
struct hwrm_stat_generic_qstats_input {
__le16 req_type;
@@ -9317,6 +9409,9 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND
__le16 len;
u8 version;
+ #define STRUCT_HDR_VERSION_0 0x0UL
+ #define STRUCT_HDR_VERSION_1 0x1UL
+ #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1
u8 count;
__le16 subtype;
__le16 next_offset;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 12b6ed51fd88..5ddddd89052f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -946,7 +946,9 @@ void bnxt_sriov_disable(struct bnxt *bp)
/* Reclaim all resources for the PF. */
rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_restore_pf_fw_resources(bp);
+ netdev_unlock(bp->dev);
rtnl_unlock();
}
@@ -956,17 +958,21 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct bnxt *bp = netdev_priv(dev);
rtnl_lock();
+ netdev_lock(dev);
if (!netif_running(dev)) {
netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
bp->sriov_cfg = true;
+ netdev_unlock(dev);
rtnl_unlock();
if (pci_vfs_assigned(bp->pdev)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index e4a7f37036ed..a8e930d5dbb0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -112,7 +112,7 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt_ulp *ulp;
int rc = 0;
- rtnl_lock();
+ netdev_lock(dev);
mutex_lock(&edev->en_dev_lock);
if (!bp->irq_tbl) {
rc = -ENODEV;
@@ -138,7 +138,7 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
exit:
mutex_unlock(&edev->en_dev_lock);
- rtnl_unlock();
+ netdev_unlock(dev);
return rc;
}
EXPORT_SYMBOL(bnxt_register_dev);
@@ -151,7 +151,7 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
int i = 0;
ulp = edev->ulp_tbl;
- rtnl_lock();
+ netdev_lock(dev);
mutex_lock(&edev->en_dev_lock);
if (ulp->msix_requested)
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
@@ -169,7 +169,7 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
i++;
}
mutex_unlock(&edev->en_dev_lock);
- rtnl_unlock();
+ netdev_unlock(dev);
return;
}
EXPORT_SYMBOL(bnxt_unregister_dev);
@@ -309,12 +309,14 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ netdev_lock(bp->dev);
+ ops = rcu_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_stop)
return;
if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
reset = true;
ops->ulp_irq_stop(ulp->handle, reset);
+ netdev_unlock(bp->dev);
}
}
@@ -333,7 +335,8 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ netdev_lock(bp->dev);
+ ops = rcu_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_restart)
return;
@@ -345,6 +348,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
bnxt_fill_msix_vecs(bp, ent);
}
ops->ulp_irq_restart(ulp->handle, ent);
+ netdev_unlock(bp->dev);
kfree(ent);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 1467b94a6427..619f0844e778 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -257,8 +257,7 @@ bool bnxt_dev_is_vf_rep(struct net_device *dev)
/* Called when the parent PF interface is closed:
* As the mode transition from SWITCHDEV to LEGACY
- * happens under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happens under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_close(struct bnxt *bp)
{
@@ -278,8 +277,7 @@ void bnxt_vf_reps_close(struct bnxt *bp)
/* Called when the parent PF interface is opened (re-opened):
* As the mode transition from SWITCHDEV to LEGACY
- * happen under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happen under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_open(struct bnxt *bp)
{
@@ -348,7 +346,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Ensure that parent PF's and VF-reps' RX/TX has been quiesced
* before proceeding with VF-rep cleanup.
*/
- rtnl_lock();
+ netdev_lock(bp->dev);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
closed = true;
@@ -365,10 +363,10 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
bnxt_open_nic(bp, false, false);
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
- /* Need to call vf_reps_destroy() outside of rntl_lock
- * as unregister_netdev takes rtnl_lock
+ /* Need to call vf_reps_destroy() outside of netdev instance lock
+ * as unregister_netdev takes it
*/
__bnxt_vf_reps_destroy(bp);
}
@@ -376,7 +374,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Free the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
void bnxt_vf_reps_free(struct bnxt *bp)
{
@@ -413,7 +411,7 @@ static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
/* Allocate the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
int bnxt_vf_reps_alloc(struct bnxt *bp)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index e6c64e4bd66c..e675611777b5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -15,6 +15,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
+#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -48,8 +49,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) |
- ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_CNT(num_frags + 1) |
bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
@@ -382,13 +382,14 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
return nxmit;
}
-/* Under rtnl_lock */
static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
{
struct net_device *dev = bp->dev;
int tx_xdp = 0, tx_cp, rc, tc;
struct bpf_prog *old;
+ netdev_assert_locked(dev);
+
if (prog && !prog->aux->xdp_has_frags &&
bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
@@ -460,23 +461,16 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct sk_buff *
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
- struct page_pool *pool, struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1)
+ struct page_pool *pool, struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (!skb)
return NULL;
- skb_checksum_none_assert(skb);
- if (RX_CMP_L4_CS_OK(rxcmp1)) {
- if (bp->dev->features & NETIF_F_RXCSUM) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = RX_CMP_ENCAP(rxcmp1);
- }
- }
+
xdp_update_skb_shared_info(skb, num_frags,
sinfo->xdp_frags_size,
- BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
+ BNXT_RX_PAGE_SIZE * num_frags,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 0122782400b8..220285e190fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -33,6 +33,5 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
u8 num_frags, struct page_pool *pool,
- struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1);
+ struct xdp_buff *xdp);
#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 3e93f957430b..73d78dcb774d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -41,15 +41,13 @@
#include "bcmgenet.h"
-/* Maximum number of hardware queues, downsized if needed */
-#define GENET_MAX_MQ_CNT 4
-
/* Default highest priority queue for multi queue support */
-#define GENET_Q0_PRIORITY 0
+#define GENET_Q1_PRIORITY 0
+#define GENET_Q0_PRIORITY 1
-#define GENET_Q16_RX_BD_CNT \
+#define GENET_Q0_RX_BD_CNT \
(TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
-#define GENET_Q16_TX_BD_CNT \
+#define GENET_Q0_TX_BD_CNT \
(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
#define RX_BUF_LENGTH 2048
@@ -104,7 +102,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
* the platform is explicitly configured for 64-bits/LPAE.
*/
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
#endif
}
@@ -446,33 +444,48 @@ static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
u32 offset;
u32 reg;
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg |= (1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg |= RBUF_HFB_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= (1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT)) |
+ RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg |= (1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 offset, reg, reg1;
- offset = HFB_FLT_ENABLE_V3PLUS;
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
- if (f_index < 32) {
- reg1 &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
- } else {
- reg &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- }
- if (!reg && !reg1) {
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg &= ~RBUF_HFB_EN;
+ reg &= ~(1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT));
+ if (!(reg & RBUF_HFB_FILTER_EN_MASK))
+ reg &= ~RBUF_HFB_EN;
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ if (f_index < 32) {
+ reg1 &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
+ } else {
+ reg &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ }
+ if (!reg && !reg1) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
}
@@ -482,6 +495,9 @@ static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ return;
+
offset = f_index / 8;
reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
reg &= ~(0xF << (4 * (f_index % 8)));
@@ -495,9 +511,13 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
- offset = HFB_FLT_LEN_V3PLUS +
- ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
- sizeof(u32);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ offset = HFB_FLT_LEN_V2;
+ else
+ offset = HFB_FLT_LEN_V3PLUS;
+
+ offset += sizeof(u32) *
+ ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4);
reg = bcmgenet_hfb_reg_readl(priv, offset);
reg &= ~(0xFF << (8 * (f_index % 4)));
reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
@@ -579,13 +599,13 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
struct bcmgenet_rxnfc_rule *rule)
{
struct ethtool_rx_flow_spec *fs = &rule->fs;
- u32 offset = 0, f_length = 0, f;
+ u32 offset = 0, f_length = 0, f, q;
u8 val_8, mask_8;
__be16 val_16;
u16 mask_16;
size_t size;
- f = fs->location;
+ f = fs->location + 1;
if (fs->flow_type & FLOW_MAC_EXT) {
bcmgenet_hfb_insert_data(priv, f, 0,
&fs->h_ext.h_dest, &fs->m_ext.h_dest,
@@ -667,19 +687,16 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
}
bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
- if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
- /* Ring 0 flows can be handled by the default Descriptor Ring
- * We'll map them to ring 0, but don't enable the filter
- */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
- rule->state = BCMGENET_RXNFC_STATE_DISABLED;
- } else {
+ if (fs->ring_cookie == RX_CLS_FLOW_WAKE)
+ q = 0;
+ else if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ q = priv->hw_params->rx_queues + 1;
+ else
/* Other Rx rings are direct mapped here */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
- fs->ring_cookie);
- bcmgenet_hfb_enable_filter(priv, f);
- rule->state = BCMGENET_RXNFC_STATE_ENABLED;
- }
+ q = fs->ring_cookie;
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, q);
+ bcmgenet_hfb_enable_filter(priv, f);
+ rule->state = BCMGENET_RXNFC_STATE_ENABLED;
}
/* bcmgenet_hfb_clear
@@ -690,6 +707,7 @@ static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 base, i;
+ bcmgenet_hfb_set_filter_length(priv, f_index, 0);
base = f_index * priv->hw_params->hfb_filter_size;
for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
@@ -699,22 +717,23 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
{
u32 i;
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
-
- for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
- bcmgenet_rdma_writel(priv, 0x0, i);
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
- for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
- bcmgenet_hfb_reg_writel(priv, 0x0,
- HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
+ if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) {
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS);
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
+ bcmgenet_rdma_writel(priv, 0, i);
+ }
for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
bcmgenet_hfb_clear_filter(priv, i);
+
+ /* Enable filter 0 to send default flow to ring 0 */
+ bcmgenet_hfb_set_filter_length(priv, 0, 4);
+ bcmgenet_hfb_enable_filter(priv, 0);
}
static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
@@ -722,9 +741,6 @@ static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
int i;
INIT_LIST_HEAD(&priv->rxnfc_list);
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
@@ -819,20 +835,16 @@ static int bcmgenet_get_coalesce(struct net_device *dev,
unsigned int i;
ec->tx_max_coalesced_frames =
- bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_tdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_max_coalesced_frames =
- bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_rdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_coalesce_usecs =
- bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
+ bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT) * 8192 / 1000;
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
ring = &priv->rx_rings[i];
ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
}
- ring = &priv->rx_rings[DESC_INDEX];
- ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
return 0;
}
@@ -902,17 +914,13 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
/* Program all TX queues with the same values, as there is no
* ethtool knob to do coalescing on a per-queue basis
*/
- for (i = 0; i < priv->hw_params->tx_queues; i++)
+ for (i = 0; i <= priv->hw_params->tx_queues; i++)
bcmgenet_tdma_ring_writel(priv, i,
ec->tx_max_coalesced_frames,
DMA_MBUF_DONE_THRESH);
- bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
- ec->tx_max_coalesced_frames,
- DMA_MBUF_DONE_THRESH);
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
- bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
return 0;
}
@@ -1120,7 +1128,7 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_Q(1),
STAT_GENET_Q(2),
STAT_GENET_Q(3),
- STAT_GENET_Q(16),
+ STAT_GENET_Q(4),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -1438,7 +1446,8 @@ static int bcmgenet_insert_flow(struct net_device *dev,
}
if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
- cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
+ cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE &&
+ cmd->fs.ring_cookie != RX_CLS_FLOW_DISC) {
netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
cmd->fs.ring_cookie);
return -EINVAL;
@@ -1472,10 +1481,10 @@ static int bcmgenet_insert_flow(struct net_device *dev,
loc_rule = &priv->rxnfc_rules[cmd->fs.location];
}
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&loc_rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memcpy(&loc_rule->fs, &cmd->fs,
@@ -1505,10 +1514,10 @@ static int bcmgenet_delete_flow(struct net_device *dev,
}
if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
@@ -1651,9 +1660,9 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
/* Power down LED */
- if (priv->hw_params->flags & GENET_HAS_EXT) {
+ if (bcmgenet_has_ext(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm)
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv))
reg |= EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1676,13 +1685,14 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
return ret;
}
-static void bcmgenet_power_up(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+static int bcmgenet_power_up(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ int ret = 0;
u32 reg;
- if (!(priv->hw_params->flags & GENET_HAS_EXT))
- return;
+ if (!bcmgenet_has_ext(priv))
+ return ret;
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
@@ -1690,7 +1700,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
EXT_ENERGY_DET_MASK);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv)) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1718,11 +1728,13 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
break;
case GENET_POWER_WOL_MAGIC:
- bcmgenet_wol_power_up_cfg(priv, mode);
- return;
+ ret = bcmgenet_wol_power_up_cfg(priv, mode);
+ break;
default:
break;
}
+
+ return ret;
}
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1759,18 +1771,6 @@ static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
return tx_cb_ptr;
}
-static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv,
@@ -1785,18 +1785,6 @@ static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
INTRL2_CPU_MASK_CLEAR);
}
-static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
@@ -1877,12 +1865,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct sk_buff *skb;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX)
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_CLEAR);
- else
- bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
- INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index), INTRL2_CPU_CLEAR);
/* Compute how many buffers are transmitted since last xmit call */
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
@@ -1916,19 +1899,46 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
ring->packets += pkts_compl;
ring->bytes += bytes_compl;
- netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index),
pkts_compl, bytes_compl);
return txbds_processed;
}
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
- struct bcmgenet_tx_ring *ring)
+ struct bcmgenet_tx_ring *ring,
+ bool all)
{
- unsigned int released;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ unsigned int released, drop, wr_ptr;
+ struct enet_cb *cb_ptr;
+ struct sk_buff *skb;
spin_lock_bh(&ring->lock);
released = __bcmgenet_tx_reclaim(dev, ring);
+ if (all) {
+ skb = NULL;
+ drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK;
+ released += drop;
+ ring->prod_index = ring->c_index & DMA_C_INDEX_MASK;
+ while (drop--) {
+ cb_ptr = bcmgenet_put_txcb(priv, ring);
+ skb = cb_ptr->skb;
+ bcmgenet_free_tx_cb(kdev, cb_ptr);
+ if (skb && cb_ptr == GENET_CB(skb)->first_cb) {
+ dev_consume_skb_any(skb);
+ skb = NULL;
+ }
+ }
+ if (skb)
+ dev_consume_skb_any(skb);
+ bcmgenet_tdma_ring_writel(priv, ring->index,
+ ring->prod_index, TDMA_PROD_INDEX);
+ wr_ptr = ring->write_ptr * WORDS_PER_BD(priv);
+ bcmgenet_tdma_ring_writel(priv, ring->index, wr_ptr,
+ TDMA_WRITE_PTR);
+ }
spin_unlock_bh(&ring->lock);
return released;
@@ -1944,14 +1954,14 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
spin_lock(&ring->lock);
work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(ring->priv->dev, ring->index);
netif_tx_wake_queue(txq);
}
spin_unlock(&ring->lock);
if (work_done == 0) {
napi_complete(napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
return 0;
}
@@ -1962,14 +1972,11 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
static void bcmgenet_tx_reclaim_all(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int i;
-
- if (netif_is_multiqueue(dev)) {
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
- }
+ int i = 0;
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
+ do {
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[i++], true);
+ } while (i <= priv->hw_params->tx_queues && netif_is_multiqueue(dev));
}
/* Reallocate the SKB to put enough headroom in front of it and insert
@@ -2057,19 +2064,14 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
index = skb_get_queue_mapping(skb);
/* Mapping strategy:
- * queue_mapping = 0, unclassified, packet xmited through ring16
- * queue_mapping = 1, goes to ring 0. (highest priority queue
- * queue_mapping = 2, goes to ring 1.
- * queue_mapping = 3, goes to ring 2.
- * queue_mapping = 4, goes to ring 3.
+ * queue_mapping = 0, unclassified, packet xmited through ring 0
+ * queue_mapping = 1, goes to ring 1. (highest priority queue)
+ * queue_mapping = 2, goes to ring 2.
+ * queue_mapping = 3, goes to ring 3.
+ * queue_mapping = 4, goes to ring 4.
*/
- if (index == 0)
- index = DESC_INDEX;
- else
- index -= 1;
-
ring = &priv->tx_rings[index];
- txq = netdev_get_tx_queue(dev, ring->queue);
+ txq = netdev_get_tx_queue(dev, index);
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -2242,15 +2244,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int discards;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX) {
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_CLEAR);
- } else {
- mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
- bcmgenet_intrl2_1_writel(priv,
- mask,
- INTRL2_CPU_CLEAR);
- }
+ mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
+ bcmgenet_intrl2_1_writel(priv, mask, INTRL2_CPU_CLEAR);
p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
@@ -2399,7 +2394,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete_done(napi, work_done);
- ring->int_enable(ring);
+ bcmgenet_rx_ring_int_enable(ring);
}
if (ring->dim.use_dim) {
@@ -2523,7 +2518,7 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
} else if (priv->ext_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
int0_enable |= UMAC_IRQ_LINK_EVENT;
}
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2588,8 +2583,8 @@ static void init_umac(struct bcmgenet_priv *priv)
}
/* Enable MDIO interrupts on GENET v3+ */
- if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
- int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ if (bcmgenet_has_mdio_intr(priv))
+ int0_enable |= UMAC_IRQ_MDIO_EVENT;
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2639,15 +2634,6 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
spin_lock_init(&ring->lock);
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->queue = 0;
- ring->int_enable = bcmgenet_tx_ring16_int_enable;
- ring->int_disable = bcmgenet_tx_ring16_int_disable;
- } else {
- ring->queue = index + 1;
- ring->int_enable = bcmgenet_tx_ring_int_enable;
- ring->int_disable = bcmgenet_tx_ring_int_disable;
- }
ring->cbs = priv->tx_cbs + start_ptr;
ring->size = size;
ring->clean_ptr = start_ptr;
@@ -2658,8 +2644,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
ring->end_ptr = end_ptr - 1;
ring->prod_index = 0;
- /* Set flow period for ring != 16 */
- if (index != DESC_INDEX)
+ /* Set flow period for ring != 0 */
+ if (index)
flow_period_val = ENET_MAX_MTU_SIZE << 16;
bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
@@ -2697,13 +2683,6 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->int_enable = bcmgenet_rx_ring16_int_enable;
- ring->int_disable = bcmgenet_rx_ring16_int_disable;
- } else {
- ring->int_enable = bcmgenet_rx_ring_int_enable;
- ring->int_disable = bcmgenet_rx_ring_int_disable;
- }
ring->cbs = priv->rx_cbs + start_ptr;
ring->size = size;
ring->c_index = 0;
@@ -2749,15 +2728,11 @@ static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
@@ -2765,13 +2740,10 @@ static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_disable(&ring->napi);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
}
static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
@@ -2779,82 +2751,104 @@ static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
netif_napi_del(&ring->napi);
}
+}
- ring = &priv->tx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
+static int bcmgenet_tdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int bcmgenet_rdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
}
/* Initialize Tx queues
*
- * Queues 0-3 are priority-based, each one has 32 descriptors,
- * with queue 0 being the highest priority queue.
+ * Queues 1-4 are priority-based, each one has 32 descriptors,
+ * with queue 1 being the highest priority queue.
*
- * Queue 16 is the default Tx queue with
- * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
+ * Queue 0 is the default Tx queue with
+ * GENET_Q0_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
*
* The transmit control block pool is then partitioned as follows:
- * - Tx queue 0 uses tx_cbs[0..31]
- * - Tx queue 1 uses tx_cbs[32..63]
- * - Tx queue 2 uses tx_cbs[64..95]
- * - Tx queue 3 uses tx_cbs[96..127]
- * - Tx queue 16 uses tx_cbs[128..255]
+ * - Tx queue 0 uses tx_cbs[0..127]
+ * - Tx queue 1 uses tx_cbs[128..159]
+ * - Tx queue 2 uses tx_cbs[160..191]
+ * - Tx queue 3 uses tx_cbs[192..223]
+ * - Tx queue 4 uses tx_cbs[224..255]
*/
static void bcmgenet_init_tx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i, dma_enable;
- u32 dma_ctrl, ring_cfg;
- u32 dma_priority[3] = {0, 0, 0};
-
- dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
+ unsigned int start = 0, end = GENET_Q0_TX_BD_CNT;
+ u32 i, ring_mask, dma_priority[3] = {0, 0, 0};
/* Enable strict priority arbiter mode */
bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
/* Initialize Tx priority queues */
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
- i * priv->hw_params->tx_bds_per_q,
- (i + 1) * priv->hw_params->tx_bds_per_q);
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ bcmgenet_init_tx_ring(priv, i, end - start, start, end);
+ start = end;
+ end += priv->hw_params->tx_bds_per_q;
dma_priority[DMA_PRIO_REG_INDEX(i)] |=
- ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
+ (i ? GENET_Q1_PRIORITY : GENET_Q0_PRIORITY)
+ << DMA_PRIO_REG_SHIFT(i);
}
- /* Initialize Tx default queue 16 */
- bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
- priv->hw_params->tx_queues *
- priv->hw_params->tx_bds_per_q,
- TOTAL_DESC);
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
- dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
- ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
- DMA_PRIO_REG_SHIFT(DESC_INDEX));
-
/* Set Tx queue priorities */
bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
- /* Enable Tx queues */
- bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
+ /* Configure Tx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Enable Tx DMA */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Tx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_CTRL);
}
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
@@ -2862,15 +2856,11 @@ static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_rx_ring_int_enable(ring);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
@@ -2878,15 +2868,11 @@ static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
cancel_work_sync(&ring->dim.dim.work);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
- cancel_work_sync(&ring->dim.dim.work);
}
static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
@@ -2894,13 +2880,10 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
netif_napi_del(&ring->napi);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
}
/* Initialize Rx queues
@@ -2908,57 +2891,32 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
* Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
* used to direct traffic to these queues.
*
- * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
+ * Queue 0 is also the default Rx queue with GENET_Q0_RX_BD_CNT descriptors.
*/
static int bcmgenet_init_rx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i;
- u32 dma_enable;
- u32 dma_ctrl;
- u32 ring_cfg;
+ unsigned int start = 0, end = GENET_Q0_RX_BD_CNT;
+ u32 i, ring_mask;
int ret;
- dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
-
/* Initialize Rx priority queues */
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
- ret = bcmgenet_init_rx_ring(priv, i,
- priv->hw_params->rx_bds_per_q,
- i * priv->hw_params->rx_bds_per_q,
- (i + 1) *
- priv->hw_params->rx_bds_per_q);
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
+ ret = bcmgenet_init_rx_ring(priv, i, end - start, start, end);
if (ret)
return ret;
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ start = end;
+ end += priv->hw_params->rx_bds_per_q;
}
- /* Initialize Rx default queue 16 */
- ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
- priv->hw_params->rx_queues *
- priv->hw_params->rx_bds_per_q,
- TOTAL_DESC);
- if (ret)
- return ret;
-
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
-
- /* Enable rings */
- bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
+ /* Configure Rx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Configure ring as descriptor ring and re-enable DMA if enabled */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Rx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_CTRL);
return 0;
}
@@ -2966,26 +2924,9 @@ static int bcmgenet_init_rx_queues(struct net_device *dev)
static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
{
int ret = 0;
- int timeout = 0;
- u32 reg;
- u32 dma_ctrl;
- int i;
/* Disable TDMA to stop add more frames in TX DMA */
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- /* Check TDMA status register to confirm TDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
ret = -ETIMEDOUT;
}
@@ -2994,39 +2935,11 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
usleep_range(10000, 20000);
/* Disable RDMA */
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- timeout = 0;
- /* Check RDMA status register to confirm RDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_rdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
ret = -ETIMEDOUT;
}
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
return ret;
}
@@ -3038,32 +2951,53 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
- for (i = 0; i < priv->num_tx_bds; i++)
- dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
- priv->tx_cbs + i));
-
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ txq = netdev_get_tx_queue(priv->dev, i);
netdev_tx_reset_queue(txq);
}
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
- netdev_tx_reset_queue(txq);
-
bcmgenet_free_rx_buffers(priv);
kfree(priv->rx_cbs);
kfree(priv->tx_cbs);
}
/* init_edma: Initialize DMA control register */
-static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+static int bcmgenet_init_dma(struct bcmgenet_priv *priv, bool flush_rx)
{
- int ret;
- unsigned int i;
struct enet_cb *cb;
+ unsigned int i;
+ int ret;
+ u32 reg;
netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
+ /* Disable TX DMA */
+ ret = bcmgenet_tdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Tx DMA\n");
+ return ret;
+ }
+
+ /* Disable RX DMA */
+ ret = bcmgenet_rdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Rx DMA\n");
+ return ret;
+ }
+
+ /* Flush TX queues */
+ bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
+ udelay(10);
+ bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
+ if (flush_rx) {
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
+ udelay(10);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+ }
+
/* Initialize common Rx ring structures */
priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
priv->num_rx_bds = TOTAL_DESC;
@@ -3113,6 +3047,15 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
/* Initialize Tx queues */
bcmgenet_init_tx_queues(priv->dev);
+ /* Enable RX/TX DMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
return 0;
}
@@ -3142,7 +3085,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
}
-/* bcmgenet_isr1: handle Rx and Tx priority queues */
+/* bcmgenet_isr1: handle Rx and Tx queues */
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
@@ -3161,7 +3104,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
"%s: IRQ=0x%x\n", __func__, status);
/* Check Rx priority queue interrupts */
- for (index = 0; index < priv->hw_params->rx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->rx_queues; index++) {
if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
continue;
@@ -3169,20 +3112,20 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
rx_ring->dim.event_ctr++;
if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
+ bcmgenet_rx_ring_int_disable(rx_ring);
__napi_schedule_irqoff(&rx_ring->napi);
}
}
/* Check Tx priority queue interrupts */
- for (index = 0; index < priv->hw_params->tx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->tx_queues; index++) {
if (!(status & BIT(index)))
continue;
tx_ring = &priv->tx_rings[index];
if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
+ bcmgenet_tx_ring_int_disable(tx_ring);
__napi_schedule_irqoff(&tx_ring->napi);
}
}
@@ -3190,12 +3133,10 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
+/* bcmgenet_isr0: handle other stuff */
static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
- struct bcmgenet_rx_ring *rx_ring;
- struct bcmgenet_tx_ring *tx_ring;
unsigned int status;
unsigned long flags;
@@ -3209,29 +3150,8 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
netif_dbg(priv, intr, priv->dev,
"IRQ=0x%x\n", status);
- if (status & UMAC_IRQ_RXDMA_DONE) {
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_ring->dim.event_ctr++;
-
- if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
- __napi_schedule_irqoff(&rx_ring->napi);
- }
- }
-
- if (status & UMAC_IRQ_TXDMA_DONE) {
- tx_ring = &priv->tx_rings[DESC_INDEX];
-
- if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
- __napi_schedule_irqoff(&tx_ring->napi);
- }
- }
-
- if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+ if (bcmgenet_has_mdio_intr(priv) && status & UMAC_IRQ_MDIO_EVENT)
wake_up(&priv->wq);
- }
/* all other interested interrupts handled in bottom half */
status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
@@ -3285,56 +3205,6 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
put_unaligned_be16(addr_tmp, &addr[4]);
}
-/* Returns a reusable dma control register value */
-static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
-{
- unsigned int i;
- u32 reg;
- u32 dma_ctrl;
-
- /* disable DMA */
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
- udelay(10);
- bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
-
- if (flush_rx) {
- reg = bcmgenet_rbuf_ctrl_get(priv);
- bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
- udelay(10);
- bcmgenet_rbuf_ctrl_set(priv, reg);
- udelay(10);
- }
-
- return dma_ctrl;
-}
-
-static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
-{
- u32 reg;
-
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-}
-
static void bcmgenet_netif_start(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -3358,7 +3228,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
static int bcmgenet_open(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long dma_ctrl;
int ret;
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
@@ -3384,22 +3253,16 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- /* Disable RX/TX DMA and flush TX and RX queues */
- dma_ctrl = bcmgenet_dma_disable(priv, true);
+ /* HFB init */
+ bcmgenet_hfb_init(priv);
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, true);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto err_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
- /* HFB init */
- bcmgenet_hfb_init(priv);
-
ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
dev->name, priv);
if (ret < 0) {
@@ -3446,19 +3309,21 @@ static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- bcmgenet_disable_tx_napi(priv);
netif_tx_disable(dev);
/* Disable MAC receive */
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
umac_enable_set(priv, CMD_RX_EN, false);
+ if (stop_phy)
+ phy_stop(dev->phydev);
+
bcmgenet_dma_teardown(priv);
/* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
- if (stop_phy)
- phy_stop(dev->phydev);
+ bcmgenet_disable_tx_napi(priv);
bcmgenet_disable_rx_napi(priv);
bcmgenet_intr_disable(priv);
@@ -3506,16 +3371,11 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
if (!netif_msg_tx_err(priv))
return;
- txq = netdev_get_tx_queue(priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(priv->dev, ring->index);
spin_lock(&ring->lock);
- if (ring->index == DESC_INDEX) {
- intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
- } else {
- intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = 1 << ring->index;
- }
+ intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = 1 << ring->index;
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
txq_stopped = netif_tx_queue_stopped(txq);
@@ -3529,7 +3389,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
"(sw)c_index: %d (hw)c_index: %d\n"
"(sw)clean_p: %d (sw)write_p: %d\n"
"(sw)cb_ptr: %d (sw)end_ptr: %d\n",
- ring->index, ring->queue,
+ ring->index, ring->index,
txq_stopped ? "stopped" : "active",
intsts & intmsk ? "enabled" : "disabled",
free_bds, ring->size,
@@ -3542,25 +3402,20 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 int0_enable = 0;
u32 int1_enable = 0;
unsigned int q;
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
- bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
bcmgenet_tx_reclaim_all(dev);
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
int1_enable |= (1 << q);
- int0_enable = UMAC_IRQ_TXDMA_DONE;
-
/* Re-enable TX interrupts if disabled */
- bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
netif_trans_update(dev);
@@ -3664,16 +3519,13 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
struct bcmgenet_rx_ring *rx_ring;
unsigned int q;
- for (q = 0; q < priv->hw_params->tx_queues; q++) {
+ for (q = 0; q <= priv->hw_params->tx_queues; q++) {
tx_ring = &priv->tx_rings[q];
tx_bytes += tx_ring->bytes;
tx_packets += tx_ring->packets;
}
- tx_ring = &priv->tx_rings[DESC_INDEX];
- tx_bytes += tx_ring->bytes;
- tx_packets += tx_ring->packets;
- for (q = 0; q < priv->hw_params->rx_queues; q++) {
+ for (q = 0; q <= priv->hw_params->rx_queues; q++) {
rx_ring = &priv->rx_rings[q];
rx_bytes += rx_ring->bytes;
@@ -3681,11 +3533,6 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
rx_errors += rx_ring->errors;
rx_dropped += rx_ring->dropped;
}
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_bytes += rx_ring->bytes;
- rx_packets += rx_ring->packets;
- rx_errors += rx_ring->errors;
- rx_dropped += rx_ring->dropped;
dev->stats.tx_bytes = tx_bytes;
dev->stats.tx_packets = tx_packets;
@@ -3726,128 +3573,109 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_change_carrier = bcmgenet_change_carrier,
};
-/* Array of GENET hardware parameters/characteristics */
-static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
- [GENET_V1] = {
- .tx_queues = 0,
- .tx_bds_per_q = 0,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .hfb_offset = 0x1000,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x3000,
- .words_per_bd = 2,
- },
- [GENET_V2] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x1000,
- .hfb_reg_offset = 0x2000,
- .rdma_offset = 0x3000,
- .tdma_offset = 0x4000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT,
- },
- [GENET_V3] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x10000,
- .tdma_offset = 0x11000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
- GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V4] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V5] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
+/* GENET hardware parameters/characteristics */
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v1 = {
+ .tx_queues = 0,
+ .tx_bds_per_q = 0,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = GENET_RBUF_OFF + RBUF_HFB_CTRL_V1,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x3000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v2 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = 0x2000,
+ .rdma_offset = 0x3000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v3 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x10000,
+ .tdma_offset = 0x11000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v4 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
};
/* Infer hardware parameters from the detected GENET version */
static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
{
- struct bcmgenet_hw_params *params;
+ const struct bcmgenet_hw_params *params;
u32 reg;
u8 major;
u16 gphy_rev;
- if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
- bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
- genet_dma_ring_regs = genet_dma_ring_regs_v4;
- } else if (GENET_IS_V3(priv)) {
+ /* default to latest values */
+ params = &bcmgenet_hw_params_v4;
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v4;
+ if (GENET_IS_V3(priv)) {
+ params = &bcmgenet_hw_params_v3;
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V2(priv)) {
+ params = &bcmgenet_hw_params_v2;
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V1(priv)) {
+ params = &bcmgenet_hw_params_v1;
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
}
-
- /* enum genet_version starts at 1 */
- priv->hw_params = &bcmgenet_hw_params[priv->version];
- params = priv->hw_params;
+ priv->hw_params = params;
/* Read GENET HW version */
reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
major = (reg >> 24 & 0x0f);
- if (major == 6)
+ if (major == 6 || major == 7)
major = 5;
else if (major == 5)
major = 4;
@@ -3898,7 +3726,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
}
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (!(params->flags & GENET_HAS_40BITS))
+ if (!bcmgenet_has_40bits(priv))
pr_warn("GENET does not support 40-bits PA\n");
#endif
@@ -3923,7 +3751,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
struct bcmgenet_plat_data {
enum bcmgenet_version version;
u32 dma_max_burst_length;
- bool ephy_16nm;
+ u32 flags;
};
static const struct bcmgenet_plat_data v1_plat_data = {
@@ -3934,32 +3762,43 @@ static const struct bcmgenet_plat_data v1_plat_data = {
static const struct bcmgenet_plat_data v2_plat_data = {
.version = GENET_V2,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT,
};
static const struct bcmgenet_plat_data v3_plat_data = {
.version = GENET_V3,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
+ GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v4_plat_data = {
.version = GENET_V4,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v5_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm2711_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = 0x08,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm7712_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
- .ephy_16nm = true,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET |
+ GENET_HAS_EPHY_16NM,
};
static const struct of_device_id bcmgenet_match[] = {
@@ -4057,7 +3896,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (pdata) {
priv->version = pdata->version;
priv->dma_max_burst_length = pdata->dma_max_burst_length;
- priv->ephy_16nm = pdata->ephy_16nm;
+ priv->flags = pdata->flags;
} else {
priv->version = pd->genet_version;
priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
@@ -4077,7 +3916,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_set_hw_params(priv);
err = -EIO;
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -4132,16 +3971,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (err)
goto err_clk_disable;
- /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
- * just the ring 16 descriptor based TX
- */
+ /* setup number of real queues + 1 */
netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
/* Set default coalescing parameters */
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
priv->rx_rings[i].rx_max_coalesced_frames = 1;
- priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
/* libphy will determine the link state */
netif_carrier_off(dev);
@@ -4205,9 +4041,22 @@ static int bcmgenet_resume_noirq(struct device *d)
reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
if (reg & UMAC_IRQ_WAKE_EVENT)
pm_wakeup_event(&priv->pdev->dev, 0);
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ if (!bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC))
+ return 0;
+
+ /* Failed so fall through to reset MAC */
}
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
+ /* If this is an internal GPHY, power it back on now, before UniMAC is
+ * brought out of reset as absolutely no UniMAC activity is allowed
+ */
+ if (priv->internal_phy)
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+ /* take MAC out of reset */
+ bcmgenet_umac_reset(priv);
return 0;
}
@@ -4217,23 +4066,46 @@ static int bcmgenet_resume(struct device *d)
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
- unsigned long dma_ctrl;
int ret;
+ u32 reg;
if (!netif_running(dev))
return 0;
- /* From WOL-enabled suspend, switch to regular clock */
- if (device_may_wakeup(d) && priv->wolopts)
- bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
-
- /* If this is an internal GPHY, power it back on now, before UniMAC is
- * brought out of reset as absolutely no UniMAC activity is allowed
- */
- if (priv->internal_phy)
- bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
-
- bcmgenet_umac_reset(priv);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (reg & CMD_RX_EN) {
+ /* Successfully exited WoL, just resume data flows */
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ bcmgenet_hfb_enable_filter(priv,
+ rule->fs.location + 1);
+ bcmgenet_hfb_enable_filter(priv, 0);
+ bcmgenet_set_rx_mode(dev);
+ bcmgenet_enable_rx_napi(priv);
+
+ /* Reinitialize Tx flows */
+ bcmgenet_tdma_disable(priv);
+ bcmgenet_init_tx_queues(priv->dev);
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+ bcmgenet_enable_tx_napi(priv);
+
+ bcmgenet_link_intr_enable(priv);
+ phy_start_machine(dev->phydev);
+
+ netif_device_attach(dev);
+ enable_irq(priv->irq1);
+ return 0;
+ }
+ /* MAC was reset so complete bcmgenet_netif_stop() */
+ umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, false);
+ bcmgenet_rdma_disable(priv);
+ bcmgenet_intr_disable(priv);
+ bcmgenet_fini_dma(priv);
+ enable_irq(priv->irq1);
+ }
init_umac(priv);
@@ -4254,19 +4126,13 @@ static int bcmgenet_resume(struct device *d)
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
- /* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv, false);
-
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, false);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto out_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
@@ -4287,19 +4153,52 @@ static int bcmgenet_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ u32 reg, hfb_enable = 0;
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
- bcmgenet_netif_stop(dev, true);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ netif_tx_disable(dev);
+
+ /* Suspend non-wake Rx data flows */
+ if (priv->wolopts & WAKE_FILTER)
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE &&
+ rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ hfb_enable |= 1 << rule->fs.location;
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg &= ~RBUF_HFB_FILTER_EN_MASK;
+ reg |= hfb_enable << (RBUF_HFB_FILTER_EN_SHIFT + 1);
+ } else {
+ bcmgenet_hfb_reg_writel(priv, hfb_enable << 1,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ }
+ if (!hfb_enable)
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- if (!device_may_wakeup(d))
- phy_suspend(dev->phydev);
+ /* Clear any old filter matches so only new matches wake */
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- /* Disable filtering */
- bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv))
+ netdev_warn(priv->dev,
+ "Timed out while disabling TX DMA\n");
+
+ bcmgenet_disable_tx_napi(priv);
+ bcmgenet_disable_rx_napi(priv);
+ disable_irq(priv->irq1);
+ bcmgenet_tx_reclaim_all(dev);
+ bcmgenet_fini_tx_napi(priv);
+ } else {
+ /* Teardown the interface */
+ bcmgenet_netif_stop(dev, true);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 43b923c48b14..10c631bbe964 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -18,6 +18,9 @@
#include "../unimac.h"
+/* Maximum number of hardware queues, downsized if needed */
+#define GENET_MAX_MQ_CNT 4
+
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -271,6 +274,8 @@ struct bcmgenet_mib_counters {
/* Only valid for GENETv3+ */
#define UMAC_IRQ_MDIO_DONE (1 << 23)
#define UMAC_IRQ_MDIO_ERROR (1 << 24)
+#define UMAC_IRQ_MDIO_EVENT (UMAC_IRQ_MDIO_DONE | \
+ UMAC_IRQ_MDIO_ERROR)
/* INTRL2 instance 1 definitions */
#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF
@@ -476,6 +481,7 @@ enum bcmgenet_version {
#define GENET_HAS_EXT (1 << 1)
#define GENET_HAS_MDIO_INTR (1 << 2)
#define GENET_HAS_MOCA_LINK_DET (1 << 3)
+#define GENET_HAS_EPHY_16NM (1 << 4)
/* BCMGENET hardware parameters, keep this structure nicely aligned
* since it is going to be used in hot paths
@@ -496,7 +502,6 @@ struct bcmgenet_hw_params {
u32 rdma_offset;
u32 tdma_offset;
u32 words_per_bd;
- u32 flags;
};
struct bcmgenet_skb_cb {
@@ -513,7 +518,6 @@ struct bcmgenet_tx_ring {
unsigned long packets;
unsigned long bytes;
unsigned int index; /* ring index */
- unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
unsigned int size; /* size of each tx ring */
unsigned int clean_ptr; /* Tx ring clean pointer */
@@ -523,8 +527,6 @@ struct bcmgenet_tx_ring {
unsigned int prod_index; /* Tx ring producer index SW copy */
unsigned int cb_ptr; /* Tx ring initial CB ptr */
unsigned int end_ptr; /* Tx ring end CB ptr */
- void (*int_enable)(struct bcmgenet_tx_ring *);
- void (*int_disable)(struct bcmgenet_tx_ring *);
struct bcmgenet_priv *priv;
};
@@ -553,8 +555,6 @@ struct bcmgenet_rx_ring {
struct bcmgenet_net_dim dim;
u32 rx_max_coalesced_frames;
u32 rx_coalesce_usecs;
- void (*int_enable)(struct bcmgenet_rx_ring *);
- void (*int_disable)(struct bcmgenet_rx_ring *);
struct bcmgenet_priv *priv;
};
@@ -583,7 +583,7 @@ struct bcmgenet_priv {
struct enet_cb *tx_cbs;
unsigned int num_tx_bds;
- struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
+ struct bcmgenet_tx_ring tx_rings[GENET_MAX_MQ_CNT + 1];
/* receive variables */
void __iomem *rx_bds;
@@ -593,10 +593,11 @@ struct bcmgenet_priv {
struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES];
struct list_head rxnfc_list;
- struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
+ struct bcmgenet_rx_ring rx_rings[GENET_MAX_MQ_CNT + 1];
/* other misc variables */
- struct bcmgenet_hw_params *hw_params;
+ const struct bcmgenet_hw_params *hw_params;
+ u32 flags;
unsigned autoneg_pause:1;
unsigned tx_pause:1;
unsigned rx_pause:1;
@@ -615,7 +616,6 @@ struct bcmgenet_priv {
phy_interface_t phy_interface;
int phy_addr;
int ext_phy;
- bool ephy_16nm;
/* Interrupt variables */
struct work_struct bcmgenet_irq_work;
@@ -643,13 +643,37 @@ struct bcmgenet_priv {
struct clk *clk_wol;
u32 wolopts;
u8 sopass[SOPASS_MAX];
- bool wol_active;
struct bcmgenet_mib_counters mib;
struct ethtool_keee eee;
};
+static inline bool bcmgenet_has_40bits(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_40BITS);
+}
+
+static inline bool bcmgenet_has_ext(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EXT);
+}
+
+static inline bool bcmgenet_has_mdio_intr(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MDIO_INTR);
+}
+
+static inline bool bcmgenet_has_moca_link_det(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MOCA_LINK_DET);
+}
+
+static inline bool bcmgenet_has_ephy_16nm(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EPHY_16NM);
+}
+
#define GENET_IO_MACRO(name, offset) \
static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
u32 off) \
@@ -702,8 +726,8 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode);
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode);
void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
bool tx_lpi_enabled);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 3b082114f2e5..8fb551288298 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -145,8 +145,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
- struct bcmgenet_rxnfc_rule *rule;
- u32 reg, hfb_ctrl_reg, hfb_enable = 0;
+ u32 reg, hfb_ctrl_reg;
int retries = 0;
if (mode != GENET_POWER_WOL_MAGIC) {
@@ -154,18 +153,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return -EINVAL;
}
- /* Can't suspend with WoL if MAC is still in reset */
- spin_lock_bh(&priv->reg_lock);
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
- reg &= ~CMD_SW_RESET;
-
- /* disable RX */
- reg &= ~CMD_RX_EN;
- bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- spin_unlock_bh(&priv->reg_lock);
- mdelay(10);
-
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg |= MPD_EN;
@@ -177,13 +164,8 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (priv->wolopts & WAKE_FILTER) {
- list_for_each_entry(rule, &priv->rxnfc_list, list)
- if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE)
- hfb_enable |= (1 << rule->fs.location);
- reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ reg = hfb_ctrl_reg | RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Do not leave UniMAC in MPD mode only */
retries = bcmgenet_poll_wol_status(priv);
@@ -198,15 +180,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
retries);
- clk_prepare_enable(priv->clk_wol);
- priv->wol_active = 1;
+ /* Disable phy status updates while suspending */
+ mutex_lock(&dev->phydev->lock);
+ dev->phydev->state = PHY_READY;
+ mutex_unlock(&dev->phydev->lock);
- if (hfb_enable) {
- bcmgenet_hfb_reg_writel(priv, hfb_enable,
- HFB_FLT_ENABLE_V3PLUS + 4);
- hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
- }
+ clk_prepare_enable(priv->clk_wol);
/* Enable CRC forward */
spin_lock_bh(&priv->reg_lock);
@@ -214,13 +193,17 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
+ /* Can't suspend with WoL if MAC is still in reset */
+ if (reg & CMD_SW_RESET)
+ reg &= ~CMD_SW_RESET;
+
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
spin_unlock_bh(&priv->reg_lock);
reg = UMAC_IRQ_MPD_R;
- if (hfb_enable)
+ if (hfb_ctrl_reg & RBUF_HFB_EN)
reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR);
@@ -228,40 +211,42 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return 0;
}
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ struct net_device *dev = priv->dev;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode);
- return;
+ return -EINVAL;
}
- if (!priv->wol_active)
- return; /* failed to suspend so skip the rest */
-
- priv->wol_active = 0;
clk_disable_unprepare(priv->clk_wol);
priv->crc_fwd_en = 0;
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT,
+ INTRL2_CPU_MASK_SET);
+ if (bcmgenet_has_mdio_intr(priv))
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_MDIO_EVENT,
+ INTRL2_CPU_MASK_CLEAR);
+
/* Disable Magic Packet Detection */
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
if (!(reg & MPD_EN))
- return; /* already reset so skip the rest */
+ return -EPERM; /* already reset so skip the rest */
reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
}
- /* Disable WAKE_FILTER Detection */
- if (priv->wolopts & WAKE_FILTER) {
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (!(reg & RBUF_ACPI_EN))
- return; /* already reset so skip the rest */
- reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ /* Disable ACPI mode */
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (!(reg & RBUF_ACPI_EN))
+ return -EPERM; /* already reset so skip the rest */
+ reg &= ~RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Disable CRC Forward */
spin_lock_bh(&priv->reg_lock);
@@ -269,4 +254,14 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
spin_unlock_bh(&priv->reg_lock);
+
+ /* Resume link status tracking */
+ mutex_lock(&dev->phydev->lock);
+ if (dev->phydev->link)
+ dev->phydev->state = PHY_RUNNING;
+ else
+ dev->phydev->state = PHY_NOLINK;
+ mutex_unlock(&dev->phydev->lock);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c4a3698cef66..71c619d2bea5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#include <linux/acpi.h>
@@ -154,7 +154,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
- if (GENET_IS_V4(priv) || priv->ephy_16nm) {
+ if (GENET_IS_V4(priv) || bcmgenet_has_ephy_16nm(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
if (enable) {
reg &= ~EXT_CK25_DIS;
@@ -184,7 +184,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
fixed_phy_set_link_update(priv->dev->phydev,
bcmgenet_fixed_phy_link_update);
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ece6f3b48327..3b9107003b00 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -19,6 +19,7 @@
#include <linux/ip.h>
#include <linux/prefetch.h>
#include <linux/module.h>
+#include <net/gro.h>
#include "bnad.h"
#include "bna.h"
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 2847278d9cd4..c9a5c8beb2fa 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -951,75 +951,73 @@ struct macb_tx_skb {
* device stats by a periodic timer.
*/
struct macb_stats {
- u32 rx_pause_frames;
- u32 tx_ok;
- u32 tx_single_cols;
- u32 tx_multiple_cols;
- u32 rx_ok;
- u32 rx_fcs_errors;
- u32 rx_align_errors;
- u32 tx_deferred;
- u32 tx_late_cols;
- u32 tx_excessive_cols;
- u32 tx_underruns;
- u32 tx_carrier_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_symbol_errors;
- u32 rx_oversize_pkts;
- u32 rx_jabbers;
- u32 rx_undersize_pkts;
- u32 sqe_test_errors;
- u32 rx_length_mismatch;
- u32 tx_pause_frames;
+ u64 rx_pause_frames;
+ u64 tx_ok;
+ u64 tx_single_cols;
+ u64 tx_multiple_cols;
+ u64 rx_ok;
+ u64 rx_fcs_errors;
+ u64 rx_align_errors;
+ u64 tx_deferred;
+ u64 tx_late_cols;
+ u64 tx_excessive_cols;
+ u64 tx_underruns;
+ u64 tx_carrier_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_symbol_errors;
+ u64 rx_oversize_pkts;
+ u64 rx_jabbers;
+ u64 rx_undersize_pkts;
+ u64 sqe_test_errors;
+ u64 rx_length_mismatch;
+ u64 tx_pause_frames;
};
struct gem_stats {
- u32 tx_octets_31_0;
- u32 tx_octets_47_32;
- u32 tx_frames;
- u32 tx_broadcast_frames;
- u32 tx_multicast_frames;
- u32 tx_pause_frames;
- u32 tx_64_byte_frames;
- u32 tx_65_127_byte_frames;
- u32 tx_128_255_byte_frames;
- u32 tx_256_511_byte_frames;
- u32 tx_512_1023_byte_frames;
- u32 tx_1024_1518_byte_frames;
- u32 tx_greater_than_1518_byte_frames;
- u32 tx_underrun;
- u32 tx_single_collision_frames;
- u32 tx_multiple_collision_frames;
- u32 tx_excessive_collisions;
- u32 tx_late_collisions;
- u32 tx_deferred_frames;
- u32 tx_carrier_sense_errors;
- u32 rx_octets_31_0;
- u32 rx_octets_47_32;
- u32 rx_frames;
- u32 rx_broadcast_frames;
- u32 rx_multicast_frames;
- u32 rx_pause_frames;
- u32 rx_64_byte_frames;
- u32 rx_65_127_byte_frames;
- u32 rx_128_255_byte_frames;
- u32 rx_256_511_byte_frames;
- u32 rx_512_1023_byte_frames;
- u32 rx_1024_1518_byte_frames;
- u32 rx_greater_than_1518_byte_frames;
- u32 rx_undersized_frames;
- u32 rx_oversize_frames;
- u32 rx_jabbers;
- u32 rx_frame_check_sequence_errors;
- u32 rx_length_field_frame_errors;
- u32 rx_symbol_errors;
- u32 rx_alignment_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_ip_header_checksum_errors;
- u32 rx_tcp_checksum_errors;
- u32 rx_udp_checksum_errors;
+ u64 tx_octets;
+ u64 tx_frames;
+ u64 tx_broadcast_frames;
+ u64 tx_multicast_frames;
+ u64 tx_pause_frames;
+ u64 tx_64_byte_frames;
+ u64 tx_65_127_byte_frames;
+ u64 tx_128_255_byte_frames;
+ u64 tx_256_511_byte_frames;
+ u64 tx_512_1023_byte_frames;
+ u64 tx_1024_1518_byte_frames;
+ u64 tx_greater_than_1518_byte_frames;
+ u64 tx_underrun;
+ u64 tx_single_collision_frames;
+ u64 tx_multiple_collision_frames;
+ u64 tx_excessive_collisions;
+ u64 tx_late_collisions;
+ u64 tx_deferred_frames;
+ u64 tx_carrier_sense_errors;
+ u64 rx_octets;
+ u64 rx_frames;
+ u64 rx_broadcast_frames;
+ u64 rx_multicast_frames;
+ u64 rx_pause_frames;
+ u64 rx_64_byte_frames;
+ u64 rx_65_127_byte_frames;
+ u64 rx_128_255_byte_frames;
+ u64 rx_256_511_byte_frames;
+ u64 rx_512_1023_byte_frames;
+ u64 rx_1024_1518_byte_frames;
+ u64 rx_greater_than_1518_byte_frames;
+ u64 rx_undersized_frames;
+ u64 rx_oversize_frames;
+ u64 rx_jabbers;
+ u64 rx_frame_check_sequence_errors;
+ u64 rx_length_field_frame_errors;
+ u64 rx_symbol_errors;
+ u64 rx_alignment_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_ip_header_checksum_errors;
+ u64 rx_tcp_checksum_errors;
+ u64 rx_udp_checksum_errors;
};
/* Describes the name and offset of an individual statistic register, as
@@ -1027,7 +1025,7 @@ struct gem_stats {
* this register should contribute to.
*/
struct gem_statistic {
- char stat_string[ETH_GSTRING_LEN];
+ char stat_string[ETH_GSTRING_LEN] __nonstring;
int offset;
u32 stat_bits;
};
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index c1f57d96e63f..1fe8ec37491b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -17,8 +17,6 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -26,7 +24,6 @@
#include <linux/platform_device.h>
#include <linux/phylink.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/ip.h>
@@ -853,9 +850,7 @@ static int macb_mii_probe(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
- bp->phylink_sgmii_pcs.neg_mode = true;
bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
- bp->phylink_usx_pcs.neg_mode = true;
bp->phylink_config.dev = &dev->dev;
bp->phylink_config.type = PHYLINK_NETDEV;
@@ -990,8 +985,8 @@ err_out:
static void macb_update_stats(struct macb *bp)
{
- u32 *p = &bp->hw_stats.macb.rx_pause_frames;
- u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+ u64 *p = &bp->hw_stats.macb.rx_pause_frames;
+ u64 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
int offset = MACB_PFR;
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
@@ -1081,15 +1076,18 @@ static void macb_tx_error_task(struct work_struct *work)
tx_error_task);
bool halt_timeout = false;
struct macb *bp = queue->bp;
+ u32 queue_index;
+ u32 packets = 0;
+ u32 bytes = 0;
struct macb_tx_skb *tx_skb;
struct macb_dma_desc *desc;
struct sk_buff *skb;
unsigned int tail;
unsigned long flags;
+ queue_index = queue - bp->queues;
netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
- (unsigned int)(queue - bp->queues),
- queue->tx_tail, queue->tx_head);
+ queue_index, queue->tx_tail, queue->tx_head);
/* Prevent the queue NAPI TX poll from running, as it calls
* macb_tx_complete(), which in turn may call netif_wake_subqueue().
@@ -1142,8 +1140,10 @@ static void macb_tx_error_task(struct work_struct *work)
skb->data);
bp->dev->stats.tx_packets++;
queue->stats.tx_packets++;
+ packets++;
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
+ bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -1160,6 +1160,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_unmap(bp, tx_skb, 0);
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
macb_set_addr(bp, desc, 0);
@@ -1230,6 +1233,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
unsigned int tail;
unsigned int head;
int packets = 0;
+ u32 bytes = 0;
spin_lock(&queue->tx_ptr_lock);
head = queue->tx_head;
@@ -1271,6 +1275,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
packets++;
+ bytes += skb->len;
}
/* Now we can safely release resources */
@@ -1285,6 +1290,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
}
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
queue->tx_tail = tail;
if (__netif_subqueue_stopped(bp->dev, queue_index) &&
CIRC_CNT(queue->tx_head, queue->tx_tail,
@@ -2388,6 +2396,8 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Make newly initialized descriptor visible to hardware */
wmb();
skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ skb->len);
spin_lock_irq(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
@@ -3023,6 +3033,7 @@ static int macb_close(struct net_device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
napi_disable(&queue->napi_rx);
napi_disable(&queue->napi_tx);
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
}
phylink_stop(bp->phylink);
@@ -3073,7 +3084,7 @@ static void gem_update_stats(struct macb *bp)
unsigned int i, q, idx;
unsigned long *stat;
- u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
+ u64 *p = &bp->hw_stats.gem.tx_octets;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
u32 offset = gem_statistics[i].offset;
@@ -3086,7 +3097,7 @@ static void gem_update_stats(struct macb *bp)
/* Add GEM_OCTTXH, GEM_OCTRXH */
val = bp->macb_reg_readl(bp, offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
- *(++p) += val;
+ *(p++) += ((u64)val) << 32;
}
}
@@ -3096,16 +3107,13 @@ static void gem_update_stats(struct macb *bp)
bp->ethtool_stats[idx++] = *stat;
}
-static struct net_device_stats *gem_get_stats(struct macb *bp)
+static void gem_get_stats(struct macb *bp, struct rtnl_link_stats64 *nstat)
{
struct gem_stats *hwstat = &bp->hw_stats.gem;
- struct net_device_stats *nstat = &bp->dev->stats;
-
- if (!netif_running(bp->dev))
- return nstat;
spin_lock_irq(&bp->stats_lock);
- gem_update_stats(bp);
+ if (netif_running(bp->dev))
+ gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
hwstat->rx_alignment_errors +
@@ -3135,8 +3143,6 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
nstat->tx_fifo_errors = hwstat->tx_underrun;
spin_unlock_irq(&bp->stats_lock);
-
- return nstat;
}
static void gem_get_ethtool_stats(struct net_device *dev,
@@ -3188,14 +3194,17 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
}
}
-static struct net_device_stats *macb_get_stats(struct net_device *dev)
+static void macb_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nstat)
{
struct macb *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &bp->dev->stats;
struct macb_stats *hwstat = &bp->hw_stats.macb;
- if (macb_is_gem(bp))
- return gem_get_stats(bp);
+ netdev_stats_to_stats64(nstat, &bp->dev->stats);
+ if (macb_is_gem(bp)) {
+ gem_get_stats(bp, nstat);
+ return;
+ }
/* read stats from hardware */
spin_lock_irq(&bp->stats_lock);
@@ -3233,8 +3242,170 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
nstat->tx_fifo_errors = hwstat->tx_underruns;
/* Don't know about heartbeat or window errors... */
spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_ok;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_cols;
+ mac_stats->MultipleCollisionFrames = hwstat->tx_multiple_cols;
+ mac_stats->FramesReceivedOK = hwstat->rx_ok;
+ mac_stats->FrameCheckSequenceErrors = hwstat->rx_fcs_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_align_errors;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred;
+ mac_stats->LateCollisions = hwstat->tx_late_cols;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_cols;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underruns;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_errors;
+ mac_stats->FramesLostDueToIntMACRcvError = hwstat->rx_overruns;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_mismatch;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_pkts;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
- return nstat;
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_frames;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_collision_frames;
+ mac_stats->MultipleCollisionFrames =
+ hwstat->tx_multiple_collision_frames;
+ mac_stats->FramesReceivedOK = hwstat->rx_frames;
+ mac_stats->FrameCheckSequenceErrors =
+ hwstat->rx_frame_check_sequence_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_alignment_errors;
+ mac_stats->OctetsTransmittedOK = hwstat->tx_octets;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred_frames;
+ mac_stats->LateCollisions = hwstat->tx_late_collisions;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_collisions;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underrun;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_sense_errors;
+ mac_stats->OctetsReceivedOK = hwstat->rx_octets;
+ mac_stats->MulticastFramesXmittedOK = hwstat->tx_multicast_frames;
+ mac_stats->BroadcastFramesXmittedOK = hwstat->tx_broadcast_frames;
+ mac_stats->MulticastFramesReceivedOK = hwstat->rx_multicast_frames;
+ mac_stats->BroadcastFramesReceivedOK = hwstat->rx_broadcast_frames;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_field_frame_errors;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+/* TODO: Report SQE test errors when added to phy_stats */
+static void macb_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersize_pkts;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_pkts;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range gem_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 16384 },
+ { },
+};
+
+static void gem_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersized_frames;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_frames;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ rmon_stats->hist[0] = hwstat->rx_64_byte_frames;
+ rmon_stats->hist[1] = hwstat->rx_65_127_byte_frames;
+ rmon_stats->hist[2] = hwstat->rx_128_255_byte_frames;
+ rmon_stats->hist[3] = hwstat->rx_256_511_byte_frames;
+ rmon_stats->hist[4] = hwstat->rx_512_1023_byte_frames;
+ rmon_stats->hist[5] = hwstat->rx_1024_1518_byte_frames;
+ rmon_stats->hist[6] = hwstat->rx_greater_than_1518_byte_frames;
+ rmon_stats->hist_tx[0] = hwstat->tx_64_byte_frames;
+ rmon_stats->hist_tx[1] = hwstat->tx_65_127_byte_frames;
+ rmon_stats->hist_tx[2] = hwstat->tx_128_255_byte_frames;
+ rmon_stats->hist_tx[3] = hwstat->tx_256_511_byte_frames;
+ rmon_stats->hist_tx[4] = hwstat->tx_512_1023_byte_frames;
+ rmon_stats->hist_tx[5] = hwstat->tx_1024_1518_byte_frames;
+ rmon_stats->hist_tx[6] = hwstat->tx_greater_than_1518_byte_frames;
+ spin_unlock_irq(&bp->stats_lock);
+ *ranges = gem_rmon_ranges;
}
static int macb_get_regs_len(struct net_device *netdev)
@@ -3763,6 +3934,10 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_pause_stats = macb_get_pause_stats,
+ .get_eth_mac_stats = macb_get_eth_mac_stats,
+ .get_eth_phy_stats = macb_get_eth_phy_stats,
+ .get_rmon_stats = macb_get_rmon_stats,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
.get_link_ksettings = macb_get_link_ksettings,
@@ -3781,6 +3956,10 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
+ .get_pause_stats = gem_get_pause_stats,
+ .get_eth_mac_stats = gem_get_eth_mac_stats,
+ .get_eth_phy_stats = gem_get_eth_phy_stats,
+ .get_rmon_stats = gem_get_rmon_stats,
.get_link_ksettings = macb_get_link_ksettings,
.set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
@@ -3917,7 +4096,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_stop = macb_close,
.ndo_start_xmit = macb_start_xmit,
.ndo_set_rx_mode = macb_set_rx_mode,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = macb_change_mtu,
@@ -4578,7 +4757,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_open = at91ether_open,
.ndo_stop = at91ether_close,
.ndo_start_xmit = at91ether_start_xmit,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_set_rx_mode = macb_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
.ndo_eth_ioctl = macb_ioctl,
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 9ad49aea2673..ff8f2f9f9cae 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -49,7 +49,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
- /* Wait for 100ms as Octeon resets. */
+ /* Wait for 100ms as Octeon resets */
mdelay(100);
if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
@@ -61,7 +61,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
oct->octeon_id);
- /* restore the reset value*/
+ /* Restore the reset value */
octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
return 0;
@@ -121,7 +121,7 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
oqticks_per_us /= 1024;
/* time_intr is in microseconds. The next 2 steps gives the oq ticks
- * corressponding to time_intr.
+ * corresponding to time_intr.
*/
oqticks_per_us *= time_intr_in_us;
oqticks_per_us /= 1000;
@@ -136,11 +136,11 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
u64 reg_val;
u64 temp;
- /* programming SRN and TRS for each MAC(0..3) */
+ /* Programming SRN and TRS for each MAC(0..3) */
dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
__func__, mac_no);
- /* By default, mapping all 64 IOQs to a single MACs */
+ /* By default, map all 64 IOQs to a single MAC */
reg_val =
octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
@@ -164,7 +164,7 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
temp = oct->sriov_info.max_vfs & 0xff;
reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS);
- /* write these settings to MAC register */
+ /* Write these settings to MAC register */
octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
reg_val);
@@ -183,10 +183,10 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
srn = oct->sriov_info.pf_srn;
ern = srn + oct->sriov_info.num_pf_rings;
- /*As per HRM reg description, s/w cant write 0 to ENB. */
- /*to make the queue off, need to set the RST bit. */
+ /* As per HRM reg description, s/w can't write 0 to ENB. */
+ /* We need to set the RST bit, to turn the queue off. */
- /* Reset the Enable bit for all the 64 IQs. */
+ /* Reset the enable bit for all the 64 IQs. */
for (q_no = srn; q_no < ern; q_no++) {
/* set RST bit to 1. This bit applies to both IQ and OQ */
d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -194,7 +194,7 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
}
- /*wait until the RST bit is clear or the RST and quite bits are set*/
+ /* Wait until the RST bit is clear or the RST and quiet bits are set */
for (q_no = srn; q_no < ern; q_no++) {
u64 reg_val = octeon_read_csr64(oct,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -245,15 +245,15 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
if (cn23xx_reset_io_queues(oct))
return -1;
- /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
- * for all queues.Only PF can set these bits.
+ /* Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
+ * for all queues. Only PF can set these bits.
* bits 29:30 indicate the MAC num.
* bits 32:47 indicate the PVF num.
*/
for (q_no = 0; q_no < ern; q_no++) {
reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
- /* for VF assigned queues. */
+ /* For VF assigned queues. */
if (q_no < oct->sriov_info.pf_srn) {
vf_num = q_no / oct->sriov_info.rings_per_vf;
vf_num += 1; /* VF1, VF2,........ */
@@ -268,7 +268,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
reg_val);
}
- /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ /* Select ES, RO, NS, RDSIZE,DPTR Format#0 for
* pf queues
*/
for (q_no = srn; q_no < ern; q_no++) {
@@ -289,7 +289,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
reg_val);
- /* Set WMARK level for triggering PI_INT */
+ /* Set WMARK level to trigger PI_INT */
/* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
CN23XX_PKT_IN_DONE_WMARK_MASK;
@@ -354,7 +354,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/* set the ES bit */
reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
- /* write all the selected settings */
+ /* Write all the selected settings */
octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
/* Enabling these interrupt in oct->fn_list.enable_interrupt()
@@ -373,7 +373,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/** Setting the water mark level for pko back pressure **/
writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
- /** Disabling setting OQs in reset when ring has no dorebells
+ /* Disabling setting OQs in reset when ring has no doorbells
* enabling this will cause of head of line blocking
*/
/* Do it only for pass1.1. and pass1.2 */
@@ -383,7 +383,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
CN23XX_SLI_GBL_CONTROL) | 0x2,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
- /** Enable channel-level backpressure */
+ /** Enable channel-level backpressure **/
if (oct->pf_num)
writeq(0xffffffffffffffffULL,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
@@ -396,7 +396,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
{
cn23xx_enable_error_reporting(oct);
- /* program the MAC(0..3)_RINFO before setting up input/output regs */
+ /* Program the MAC(0..3)_RINFO before setting up input/output regs */
cn23xx_setup_global_mac_regs(oct);
if (cn23xx_pf_setup_global_input_regs(oct))
@@ -410,7 +410,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
CN23XX_SLI_WINDOW_CTL_DEFAULT);
- /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
+ /* Set SLI_PKT_IN_JABBER to handle large VXLAN packets */
octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
return 0;
}
@@ -574,7 +574,7 @@ static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1);
- /*Mail Box Thread creation*/
+ /* Mail Box Thread creation */
INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
cn23xx_pf_mbox_thread);
mbox->mbox_poll_wk.ctxptr = (void *)mbox;
@@ -626,7 +626,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
ern = srn + oct->num_iqs;
for (q_no = srn; q_no < ern; q_no++) {
- /* set the corresponding IQ IS_64B bit */
+ /* Set the corresponding IQ IS_64B bit */
if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -635,7 +635,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
}
- /* set the corresponding IQ ENB bit */
+ /* Set the corresponding IQ ENB bit */
if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
/* IOQs are in reset by default in PEM2 mode,
* clearing reset bit
@@ -681,7 +681,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
}
for (q_no = srn; q_no < ern; q_no++) {
u32 reg_val;
- /* set the corresponding OQ ENB bit */
+ /* Set the corresponding OQ ENB bit */
if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr(
oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
@@ -707,7 +707,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
for (q_no = srn; q_no < ern; q_no++) {
loop = HZ;
- /* start the Reset for a particular ring */
+ /* Start the Reset for a particular ring */
WRITE_ONCE(d64, octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
WRITE_ONCE(d64, READ_ONCE(d64) &
@@ -740,7 +740,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
loop = HZ;
/* Wait until hardware indicates that the particular IQ
- * is out of reset.It given that SLI_PKT_RING_RST is
+ * is out of reset. Given that SLI_PKT_RING_RST is
* common for both IQs and OQs
*/
WRITE_ONCE(d64, octeon_read_csr64(
@@ -760,7 +760,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
schedule_timeout_uninterruptible(1);
}
- /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
+ /* Clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
WRITE_ONCE(d32, octeon_read_csr(
oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
@@ -793,7 +793,7 @@ static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
return ret;
- /* Write count reg in sli_pkt_cnts to clear these int.*/
+ /* Write count reg in sli_pkt_cnts to clear these int. */
if ((pkts_sent & CN23XX_INTR_PO_INT) ||
(pkts_sent & CN23XX_INTR_PI_INT)) {
if (pkts_sent & CN23XX_INTR_PO_INT)
@@ -908,7 +908,7 @@ static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
}
-/* always call with lock held */
+/* Always call with lock held */
static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
{
u32 new_idx;
@@ -919,7 +919,7 @@ static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
iq->pkt_in_done = pkt_in_done;
/* Modulo of the new index with the IQ size will give us
- * the new index. The iq->reset_instr_cnt is always zero for
+ * the new index. The iq->reset_instr_cnt is always zero for
* cn23xx, so no extra adjustments are needed.
*/
new_idx = (iq->octeon_read_index +
@@ -934,8 +934,8 @@ static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
u64 intr_val = 0;
- /* Divide the single write to multiple writes based on the flag. */
- /* Enable Interrupt */
+ /* Divide the single write to multiple writes based on the flag. */
+ /* Enable Interrupts */
if (intr_flag == OCTEON_ALL_INTR) {
writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
} else if (intr_flag & OCTEON_OUTPUT_INTR) {
@@ -990,7 +990,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
ret = 0;
- /** Read Function Dependency Link reg to get the function number */
+ /* Read Function Dependency Link reg to get the function number */
if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
&fdl_bit) == 0) {
oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
@@ -1003,13 +1003,13 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
* In this case, read the PF number from the
* SLI_PKT0_INPUT_CONTROL reg (written by f/w)
*/
- pkt0_in_ctl = octeon_read_csr64(oct,
- CN23XX_SLI_IQ_PKT_CONTROL64(0));
+ pkt0_in_ctl =
+ octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(0));
pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
- /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/
+ /* Validate PF num by reading RINFO; f/w writes RINFO.trs == 1 */
d64 = octeon_read_csr64(oct,
CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum));
trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff;
@@ -1252,9 +1252,9 @@ int cn23xx_fw_loaded(struct octeon_device *oct)
u64 val;
/* If there's more than one active PF on this NIC, then that
- * implies that the NIC firmware is loaded and running. This check
+ * implies that the NIC firmware is loaded and running. This check
* prevents a rare false negative that might occur if we only relied
- * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
+ * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
* happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even
* though the firmware was already loaded but still booting and has yet
* to set SCR2_BIT_FW_LOADED.
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 6b6cb73482d7..1753bb87dfbd 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1433,22 +1433,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
}
EXPORT_SYMBOL_GPL(octeon_wait_for_ddr_init);
-/* Get the octeon id assigned to the octeon device passed as argument.
- * This function is exported to other modules.
- * @param dev - octeon device pointer passed as a void *.
- * @return octeon device id
- */
-int lio_get_device_id(void *dev)
-{
- struct octeon_device *octeon_dev = (struct octeon_device *)dev;
- u32 i;
-
- for (i = 0; i < MAX_OCTEON_DEVICES; i++)
- if (octeon_device[i] == octeon_dev)
- return octeon_dev->octeon_id;
- return -1;
-}
-
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
{
u64 instr_cnt;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index d26364c2ac81..19344b21f8fb 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -705,13 +705,6 @@ octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
*/
struct octeon_device *lio_get_device(u32 octeon_id);
-/** Get the octeon id assigned to the octeon device passed as argument.
- * This function is exported to other modules.
- * @param dev - octeon device pointer passed as a void *.
- * @return octeon device id
- */
-int lio_get_device_id(void *dev);
-
/** Read windowed register.
* @param oct - pointer to the Octeon device.
* @param addr - Address of the register to read.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index c7c2c15a1815..95e6f015a6af 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1211,9 +1211,6 @@ struct adapter {
struct timer_list flower_stats_timer;
struct work_struct flower_stats_work;
- /* Ethtool Dump */
- struct ethtool_dump eth_dump;
-
/* HMA */
struct hma_data hma;
@@ -1233,6 +1230,10 @@ struct adapter {
/* Ethtool n-tuple */
struct cxgb4_ethtool_filter *ethtool_filters;
+
+ /* Ethtool Dump */
+ /* Must be last - ends in a flex-array member. */
+ struct ethtool_dump eth_dump;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2f0b3e389e62..551c279dc14b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6538,26 +6538,6 @@ out_unlock:
mutex_unlock(&uld_mutex);
}
-static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- struct adapter *adap = netdev2adap(x->xso.dev);
- bool ret = false;
-
- if (!mutex_trylock(&uld_mutex)) {
- dev_dbg(adap->pdev_dev,
- "crypto uld critical resource is under use\n");
- return ret;
- }
- if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
- goto out_unlock;
-
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
-
-out_unlock:
- mutex_unlock(&uld_mutex);
- return ret;
-}
-
static void cxgb4_advance_esn_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
@@ -6583,7 +6563,6 @@ static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
.xdo_dev_state_add = cxgb4_xfrm_add_state,
.xdo_dev_state_delete = cxgb4_xfrm_del_state,
.xdo_dev_state_free = cxgb4_xfrm_free_state,
- .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
.xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
};
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index c7338ac6a5bb..baba96883f48 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -71,7 +71,6 @@
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
@@ -85,7 +84,6 @@ static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = ch_ipsec_xfrm_add_state,
.xdo_dev_state_delete = ch_ipsec_xfrm_del_state,
.xdo_dev_state_free = ch_ipsec_xfrm_free_state,
- .xdo_dev_offload_ok = ch_ipsec_offload_ok,
.xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state,
};
@@ -323,20 +321,6 @@ static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
module_put(THIS_MODULE);
}
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IP options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
- return true;
-}
-
static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
{
/* do nothing */
diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig
index ad80c0fa96a6..96709875fe4f 100644
--- a/drivers/net/ethernet/cisco/enic/Kconfig
+++ b/drivers/net/ethernet/cisco/enic/Kconfig
@@ -6,5 +6,6 @@
config ENIC
tristate "Cisco VIC Ethernet NIC Support"
depends on PCI
+ select PAGE_POOL
help
This enables the support for the Cisco VIC Ethernet card.
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
index c3b6febfdbe4..a96b8332e6e2 100644
--- a/drivers/net/ethernet/cisco/enic/Makefile
+++ b/drivers/net/ethernet/cisco/enic/Makefile
@@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
- enic_ethtool.o enic_api.o enic_clsf.o
+ enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o enic_wq.o
diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h
index 462c5435a206..bfb3f14e89f5 100644
--- a/drivers/net/ethernet/cisco/enic/cq_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_desc.h
@@ -40,28 +40,7 @@ struct cq_desc {
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
-static inline void cq_desc_dec(const struct cq_desc *desc_arg,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- const struct cq_desc *desc = desc_arg;
- const u8 type_color = desc->type_color;
-
- *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
-
- /*
- * Make sure color bit is read from desc *before* other fields
- * are read from desc. Hardware guarantees color bit is last
- * bit (byte) written. Adding the rmb() prevents the compiler
- * and/or CPU from reordering the reads which would potentially
- * result in reading stale values.
- */
-
- rmb();
-
- *type = type_color & CQ_DESC_TYPE_MASK;
- *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
- *completed_index = le16_to_cpu(desc->completed_index) &
- CQ_DESC_COMP_NDX_MASK;
-}
+#define CQ_DESC_32_FI_MASK (BIT(0) | BIT(1))
+#define CQ_DESC_64_FI_MASK (BIT(0) | BIT(1))
#endif /* _CQ_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
index d25426470a29..50787cff29db 100644
--- a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
@@ -17,12 +17,22 @@ struct cq_enet_wq_desc {
u8 type_color;
};
-static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-}
+/*
+ * Defines and Capabilities for CMD_CQ_ENTRY_SIZE_SET
+ */
+#define VNIC_RQ_ALL (~0ULL)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16 0
+#define VNIC_RQ_CQ_ENTRY_SIZE_32 1
+#define VNIC_RQ_CQ_ENTRY_SIZE_64 2
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_16)
+#define VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_32)
+#define VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_64)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT (VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE)
/* Completion queue descriptor: Ethernet receive queue, 16B */
struct cq_enet_rq_desc {
@@ -36,6 +46,45 @@ struct cq_enet_rq_desc {
u8 type_color;
};
+/* Completion queue descriptor: Ethernet receive queue, 32B */
+struct cq_enet_rq_desc_32 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 type_color;
+};
+
+/* Completion queue descriptor: Ethernet receive queue, 64B */
+struct cq_enet_rq_desc_64 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 reserved[32];
+ u8 type_color;
+};
+
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
@@ -88,85 +137,4 @@ struct cq_enet_rq_desc {
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
-static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
- u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
- u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
- u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
- u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
- u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
- u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
-{
- u16 completed_index_flags;
- u16 q_number_rss_type_flags;
- u16 bytes_written_flags;
-
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-
- completed_index_flags = le16_to_cpu(desc->completed_index_flags);
- q_number_rss_type_flags =
- le16_to_cpu(desc->q_number_rss_type_flags);
- bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
-
- *ingress_port = (completed_index_flags &
- CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
- *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
- 1 : 0;
- *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
- 1 : 0;
- *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
- 1 : 0;
-
- *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
- CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
- *csum_not_calc = (q_number_rss_type_flags &
- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
-
- *rss_hash = le32_to_cpu(desc->rss_hash);
-
- *bytes_written = bytes_written_flags &
- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
- *packet_error = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
- *vlan_stripped = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
-
- /*
- * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
- */
- *vlan_tci = le16_to_cpu(desc->vlan);
-
- if (*fcoe) {
- *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
- CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
- *fcoe_fc_crc_ok = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
- *fcoe_enc_error = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
- *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
- CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
- CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
- *checksum = 0;
- } else {
- *fcoe_sof = 0;
- *fcoe_fc_crc_ok = 0;
- *fcoe_enc_error = 0;
- *fcoe_eof = 0;
- *checksum = le16_to_cpu(desc->checksum_fcoe);
- }
-
- *tcp_udp_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
- *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
- *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
- *ipv4_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
- *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
- *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
- *ipv4_fragment =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
- *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
-}
-
#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 10b7e02ba4d0..9c12e967e9f1 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -17,6 +17,7 @@
#include "vnic_nic.h"
#include "vnic_rss.h"
#include <linux/irq.h>
+#include <net/page_pool/helpers.h>
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
@@ -30,6 +31,13 @@
#define ENIC_AIC_LARGE_PKT_DIFF 3
+enum ext_cq {
+ ENIC_RQ_CQ_ENTRY_SIZE_16,
+ ENIC_RQ_CQ_ENTRY_SIZE_32,
+ ENIC_RQ_CQ_ENTRY_SIZE_64,
+ ENIC_RQ_CQ_ENTRY_SIZE_MAX,
+};
+
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ + 8];
@@ -75,6 +83,10 @@ struct enic_rx_coal {
#define ENIC_SET_INSTANCE (1 << 3)
#define ENIC_SET_HOST (1 << 4)
+#define MAX_TSO BIT(16)
+#define WQ_ENET_MAX_DESC_LEN BIT(WQ_ENET_LEN_BITS)
+#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
+
struct enic_port_profile {
u32 set;
u8 request;
@@ -158,6 +170,7 @@ struct enic_rq_stats {
u64 pkt_truncated; /* truncated pkts */
u64 no_skb; /* out of skbs */
u64 desc_skip; /* Rx pkt went into later buffer */
+ u64 pp_alloc_fail; /* page pool alloc failure */
};
struct enic_wq {
@@ -169,6 +182,7 @@ struct enic_wq {
struct enic_rq {
struct vnic_rq vrq;
struct enic_rq_stats stats;
+ struct page_pool *pool;
} ____cacheline_aligned;
/* Per-instance private data structure */
@@ -223,9 +237,9 @@ struct enic {
unsigned int cq_avail;
unsigned int cq_count;
struct enic_rfs_flw_tbl rfs_h;
- u32 rx_copybreak;
u8 rss_key[ENIC_RSS_LEN];
struct vnic_gen_stats gen_stats;
+ enum ext_cq ext_cq;
};
static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
@@ -347,5 +361,6 @@ int enic_is_valid_vf(struct enic *enic, int vf);
int enic_is_dynamic(struct enic *enic);
void enic_set_ethtool_ops(struct net_device *netdev);
int __enic_set_rsskey(struct enic *enic);
+void enic_ext_cq(struct enic *enic);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index d607b4f0542c..529160926a96 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -222,9 +222,9 @@ static void enic_get_ringparam(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
- ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
+ ring->rx_max_pending = c->max_rq_ring;
ring->rx_pending = c->rq_desc_count;
- ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
+ ring->tx_max_pending = c->max_wq_ring;
ring->tx_pending = c->wq_desc_count;
}
@@ -252,18 +252,18 @@ static int enic_set_ringparam(struct net_device *netdev,
}
rx_pending = c->rq_desc_count;
tx_pending = c->wq_desc_count;
- if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
+ if (ring->rx_pending > c->max_rq_ring ||
ring->rx_pending < ENIC_MIN_RQ_DESCS) {
netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
ring->rx_pending, ENIC_MIN_RQ_DESCS,
- ENIC_MAX_RQ_DESCS);
+ c->max_rq_ring);
return -EINVAL;
}
- if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
+ if (ring->tx_pending > c->max_wq_ring ||
ring->tx_pending < ENIC_MIN_WQ_DESCS) {
netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
ring->tx_pending, ENIC_MIN_WQ_DESCS,
- ENIC_MAX_WQ_DESCS);
+ c->max_wq_ring);
return -EINVAL;
}
if (running)
@@ -608,43 +608,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int enic_get_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna, void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = enic->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int enic_set_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- enic->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static u32 enic_get_rxfh_key_size(struct net_device *netdev)
{
return ENIC_RSS_LEN;
@@ -727,8 +690,6 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_coalesce = enic_get_coalesce,
.set_coalesce = enic_set_coalesce,
.get_rxnfc = enic_get_rxnfc,
- .get_tunable = enic_get_tunable,
- .set_tunable = enic_set_tunable,
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 49f6cab01ed5..54aa3953bf7b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -58,18 +58,15 @@
#include "enic_dev.h"
#include "enic_pp.h"
#include "enic_clsf.h"
+#include "enic_rq.h"
+#include "enic_wq.h"
#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
-#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
-#define MAX_TSO (1 << 16)
-#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
-#define RX_COPYBREAK_DEFAULT 256
-
/* Supported devices */
static const struct pci_device_id enic_id_table[] = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
@@ -322,54 +319,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
#endif
}
-static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
- if (buf->sop)
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
-
- if (buf->os_buf)
- dev_kfree_skb_any(buf->os_buf);
-}
-
-static void enic_wq_free_buf(struct vnic_wq *wq,
- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
- enic->wq[wq->index].stats.cq_work++;
- enic->wq[wq->index].stats.cq_bytes += buf->len;
- enic_free_wq_buf(wq, buf);
-}
-
-static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- spin_lock(&enic->wq[q_number].lock);
-
- vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
- completed_index, enic_wq_free_buf,
- opaque);
-
- if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
- vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
- (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
- netif_wake_subqueue(enic->netdev, q_number);
- enic->wq[q_number].stats.wake++;
- }
-
- spin_unlock(&enic->wq[q_number].lock);
-
- return 0;
-}
-
static bool enic_log_q_error(struct enic *enic)
{
unsigned int i;
@@ -1313,243 +1262,6 @@ nla_put_failure:
return -EMSGSIZE;
}
-static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
-
- if (!buf->os_buf)
- return;
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(buf->os_buf);
- buf->os_buf = NULL;
-}
-
-static int enic_rq_alloc_buf(struct vnic_rq *rq)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
- unsigned int os_buf_index = 0;
- dma_addr_t dma_addr;
- struct vnic_rq_buf *buf = rq->to_use;
-
- if (buf->os_buf) {
- enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
- buf->len);
-
- return 0;
- }
- skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!skb) {
- enic->rq[rq->index].stats.no_skb++;
- return -ENOMEM;
- }
-
- dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
- DMA_FROM_DEVICE);
- if (unlikely(enic_dma_map_check(enic, dma_addr))) {
- dev_kfree_skb(skb);
- return -ENOMEM;
- }
-
- enic_queue_rq_desc(rq, skb, os_buf_index,
- dma_addr, len);
-
- return 0;
-}
-
-static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
- u32 pkt_len)
-{
- if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
- pkt_size->large_pkt_bytes_cnt += pkt_len;
- else
- pkt_size->small_pkt_bytes_cnt += pkt_len;
-}
-
-static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
- struct vnic_rq_buf *buf, u16 len)
-{
- struct enic *enic = netdev_priv(netdev);
- struct sk_buff *new_skb;
-
- if (len > enic->rx_copybreak)
- return false;
- new_skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!new_skb)
- return false;
- dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
- DMA_FROM_DEVICE);
- memcpy(new_skb->data, (*skb)->data, len);
- *skb = new_skb;
-
- return true;
-}
-
-static void enic_rq_indicate_buf(struct vnic_rq *rq,
- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
- int skipped, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
-
- u8 type, color, eop, sop, ingress_port, vlan_stripped;
- u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
- u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
- u8 packet_error;
- u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
- u32 rss_hash;
- bool outer_csum_ok = true, encap = false;
-
- rqstats->packets++;
- if (skipped) {
- rqstats->desc_skip++;
- return;
- }
-
- skb = buf->os_buf;
-
- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &ingress_port, &fcoe, &eop, &sop, &rss_type,
- &csum_not_calc, &rss_hash, &bytes_written,
- &packet_error, &vlan_stripped, &vlan_tci, &checksum,
- &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
- &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
- &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
- &fcs_ok);
-
- if (packet_error) {
-
- if (!fcs_ok) {
- if (bytes_written > 0)
- rqstats->bad_fcs++;
- else if (bytes_written == 0)
- rqstats->pkt_truncated++;
- }
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
-
- return;
- }
-
- if (eop && bytes_written > 0) {
-
- /* Good receive
- */
- rqstats->bytes += bytes_written;
- if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
- buf->os_buf = NULL;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
- buf->len, DMA_FROM_DEVICE);
- }
- prefetch(skb->data - NET_IP_ALIGN);
-
- skb_put(skb, bytes_written);
- skb->protocol = eth_type_trans(skb, netdev);
- skb_record_rx_queue(skb, q_number);
- if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
- (type == 3)) {
- switch (rss_type) {
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
- rqstats->l4_rss_hash++;
- break;
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
- rqstats->l3_rss_hash++;
- break;
- }
- }
- if (enic->vxlan.vxlan_udp_port_number) {
- switch (enic->vxlan.patch_level) {
- case 0:
- if (fcoe) {
- encap = true;
- outer_csum_ok = fcoe_fc_crc_ok;
- }
- break;
- case 2:
- if ((type == 7) &&
- (rss_hash & BIT(0))) {
- encap = true;
- outer_csum_ok = (rss_hash & BIT(1)) &&
- (rss_hash & BIT(2));
- }
- break;
- }
- }
-
- /* Hardware does not provide whole packet checksum. It only
- * provides pseudo checksum. Since hw validates the packet
- * checksum but not provide us the checksum value. use
- * CHECSUM_UNNECESSARY.
- *
- * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
- * inner csum_ok. outer_csum_ok is set by hw when outer udp
- * csum is correct or is zero.
- */
- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
- tcp_udp_csum_ok && outer_csum_ok &&
- (ipv4_csum_ok || ipv6)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = encap;
- if (encap)
- rqstats->csum_unnecessary_encap++;
- else
- rqstats->csum_unnecessary++;
- }
-
- if (vlan_stripped) {
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
- rqstats->vlan_stripped++;
- }
- skb_mark_napi_id(skb, &enic->napi[rq->index]);
- if (!(netdev->features & NETIF_F_GRO))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&enic->napi[q_number], skb);
- if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_intr_update_pkt_size(&cq->pkt_size_counter,
- bytes_written);
- } else {
-
- /* Buffer overflow
- */
- rqstats->pkt_truncated++;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
- }
-}
-
-static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- vnic_rq_service(&enic->rq[q_number].vrq, cq_desc,
- completed_index, VNIC_RQ_RETURN_DESC,
- enic_rq_indicate_buf, opaque);
-
- return 0;
-}
-
static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
{
unsigned int intr = enic_msix_rq_intr(enic, rq->index);
@@ -1620,12 +1332,10 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
- wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
- enic_wq_service, NULL);
+ wq_work_done = enic_wq_cq_service(enic, cq_wq, wq_work_to_do);
if (budget > 0)
- rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
- rq_work_to_do, enic_rq_service, NULL);
+ rq_work_done = enic_rq_cq_service(enic, cq_rq, rq_work_to_do);
/* Accumulate intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1724,8 +1434,8 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
wq_irq = wq->index;
cq = enic_cq_wq(enic, wq_irq);
intr = enic_msix_wq_intr(enic, wq_irq);
- wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
- enic_wq_service, NULL);
+
+ wq_work_done = enic_wq_cq_service(enic, cq, wq_work_to_do);
vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
0 /* don't unmask intr */,
@@ -1754,8 +1464,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
if (budget > 0)
- work_done = vnic_cq_service(&enic->cq[cq],
- work_to_do, enic_rq_service, NULL);
+ work_done = enic_rq_cq_service(enic, cq, work_to_do);
/* Return intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1972,6 +1681,17 @@ static int enic_open(struct net_device *netdev)
struct enic *enic = netdev_priv(netdev);
unsigned int i;
int err, ret;
+ unsigned int max_pkt_len = netdev->mtu + VLAN_ETH_HLEN;
+ struct page_pool_params pp_params = {
+ .order = get_order(max_pkt_len),
+ .pool_size = enic->config.rq_desc_count,
+ .nid = dev_to_node(&enic->pdev->dev),
+ .dev = &enic->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = (max_pkt_len > PAGE_SIZE) ? max_pkt_len : PAGE_SIZE,
+ .netdev = netdev,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ };
err = enic_request_intr(enic);
if (err) {
@@ -1989,6 +1709,16 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
+ /* create a page pool for each RQ */
+ pp_params.napi = &enic->napi[i];
+ pp_params.queue_idx = i;
+ enic->rq[i].pool = page_pool_create(&pp_params);
+ if (IS_ERR(enic->rq[i].pool)) {
+ err = PTR_ERR(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ goto err_out_free_rq;
+ }
+
/* enable rq before updating rq desc */
vnic_rq_enable(&enic->rq[i].vrq);
vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf);
@@ -2029,8 +1759,11 @@ static int enic_open(struct net_device *netdev)
err_out_free_rq:
for (i = 0; i < enic->rq_count; i++) {
ret = vnic_rq_disable(&enic->rq[i].vrq);
- if (!ret)
+ if (!ret) {
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
}
enic_dev_notify_unset(enic);
err_out_free_intr:
@@ -2088,8 +1821,11 @@ static int enic_stop(struct net_device *netdev)
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf);
- for (i = 0; i < enic->rq_count; i++)
+ for (i = 0; i < enic->rq_count; i++) {
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -2405,6 +2141,7 @@ static void enic_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2431,6 +2168,7 @@ static void enic_tx_hang_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2599,6 +2337,7 @@ static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
rxs->hw_drop_overruns = rqstats->pkt_truncated;
rxs->csum_unnecessary = rqstats->csum_unnecessary +
rqstats->csum_unnecessary_encap;
+ rxs->alloc_fail = rqstats->pp_alloc_fail;
}
static void enic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -2626,6 +2365,7 @@ static void enic_get_base_stats(struct net_device *dev,
rxs->hw_drops = 0;
rxs->hw_drop_overruns = 0;
rxs->csum_unnecessary = 0;
+ rxs->alloc_fail = 0;
txs->bytes = 0;
txs->packets = 0;
txs->csum_none = 0;
@@ -2803,6 +2543,8 @@ static int enic_dev_init(struct enic *enic)
enic_get_res_counts(enic);
+ enic_ext_cq(enic);
+
err = enic_alloc_enic_resources(enic);
if (err) {
dev_err(dev, "Failed to allocate enic resources\n");
@@ -3179,7 +2921,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot register net device, aborting\n");
goto err_out_dev_deinit;
}
- enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
return 0;
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 126125199833..bbd3143ed73e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -59,31 +59,38 @@ int enic_get_vnic_config(struct enic *enic)
GET_CONFIG(intr_timer_usec);
GET_CONFIG(loop_tag);
GET_CONFIG(num_arfs);
+ GET_CONFIG(max_rq_ring);
+ GET_CONFIG(max_wq_ring);
+ GET_CONFIG(max_cq_ring);
+
+ if (!c->max_wq_ring)
+ c->max_wq_ring = ENIC_MAX_WQ_DESCS_DEFAULT;
+ if (!c->max_rq_ring)
+ c->max_rq_ring = ENIC_MAX_RQ_DESCS_DEFAULT;
+ if (!c->max_cq_ring)
+ c->max_cq_ring = ENIC_MAX_CQ_DESCS_DEFAULT;
c->wq_desc_count =
- min_t(u32, ENIC_MAX_WQ_DESCS,
- max_t(u32, ENIC_MIN_WQ_DESCS,
- c->wq_desc_count));
+ min_t(u32, c->max_wq_ring,
+ max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count));
c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
c->rq_desc_count =
- min_t(u32, ENIC_MAX_RQ_DESCS,
- max_t(u32, ENIC_MIN_RQ_DESCS,
- c->rq_desc_count));
+ min_t(u32, c->max_rq_ring,
+ max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count));
c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
if (c->mtu == 0)
c->mtu = 1500;
- c->mtu = min_t(u16, ENIC_MAX_MTU,
- max_t(u16, ENIC_MIN_MTU,
- c->mtu));
+ c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu));
c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
vnic_dev_get_intr_coal_timer_max(enic->vdev));
dev_info(enic_get_dev(enic),
- "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
- enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
+ "vNIC MAC addr %pM wq/rq %d/%d max wq/rq/cq %d/%d/%d mtu %d\n",
+ enic->mac_addr, c->wq_desc_count, c->rq_desc_count,
+ c->max_wq_ring, c->max_rq_ring, c->max_cq_ring, c->mtu);
dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
"tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
@@ -312,6 +319,7 @@ void enic_init_vnic_resources(struct enic *enic)
int enic_alloc_vnic_resources(struct enic *enic)
{
enum vnic_dev_intr_mode intr_mode;
+ int rq_cq_desc_size;
unsigned int i;
int err;
@@ -326,6 +334,24 @@ int enic_alloc_vnic_resources(struct enic *enic)
intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
"unknown");
+ switch (enic->ext_cq) {
+ case ENIC_RQ_CQ_ENTRY_SIZE_16:
+ rq_cq_desc_size = 16;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_32:
+ rq_cq_desc_size = 32;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_64:
+ rq_cq_desc_size = 64;
+ break;
+ default:
+ dev_err(enic_get_dev(enic),
+ "Unable to determine rq cq desc size: %d",
+ enic->ext_cq);
+ err = -ENODEV;
+ goto err_out;
+ }
+
/* Allocate queue resources
*/
@@ -348,8 +374,8 @@ int enic_alloc_vnic_resources(struct enic *enic)
for (i = 0; i < enic->cq_count; i++) {
if (i < enic->rq_count)
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
- enic->config.rq_desc_count,
- sizeof(struct cq_enet_rq_desc));
+ enic->config.rq_desc_count,
+ rq_cq_desc_size);
else
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
enic->config.wq_desc_count,
@@ -380,6 +406,39 @@ int enic_alloc_vnic_resources(struct enic *enic)
err_out_cleanup:
enic_free_vnic_resources(enic);
-
+err_out:
return err;
}
+
+/*
+ * CMD_CQ_ENTRY_SIZE_SET can fail on older hw generations that don't support
+ * that command
+ */
+void enic_ext_cq(struct enic *enic)
+{
+ u64 a0 = CMD_CQ_ENTRY_SIZE_SET, a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ spin_lock_bh(&enic->devcmd_lock);
+ ret = vnic_dev_cmd(enic->vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (ret || a0) {
+ dev_info(&enic->pdev->dev,
+ "CMD_CQ_ENTRY_SIZE_SET not supported.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ goto out;
+ }
+ a1 &= VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT;
+ enic->ext_cq = fls(a1) - 1;
+ a0 = VNIC_RQ_ALL;
+ a1 = enic->ext_cq;
+ ret = vnic_dev_cmd(enic->vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait);
+ if (ret) {
+ dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET failed.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ }
+out:
+ spin_unlock_bh(&enic->devcmd_lock);
+ dev_info(&enic->pdev->dev, "CQ entry size set to %d bytes",
+ 16 << enic->ext_cq);
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index b8ee42d297aa..02dca1ae4a22 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -12,10 +12,13 @@
#include "vnic_wq.h"
#include "vnic_rq.h"
-#define ENIC_MIN_WQ_DESCS 64
-#define ENIC_MAX_WQ_DESCS 4096
-#define ENIC_MIN_RQ_DESCS 64
-#define ENIC_MAX_RQ_DESCS 4096
+#define ENIC_MIN_WQ_DESCS 64
+#define ENIC_MAX_WQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_WQ_DESCS 16384
+#define ENIC_MIN_RQ_DESCS 64
+#define ENIC_MAX_RQ_DESCS 16384
+#define ENIC_MAX_RQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_CQ_DESCS_DEFAULT (64 * 1024)
#define ENIC_MIN_MTU ETH_MIN_MTU
#define ENIC_MAX_MTU 9000
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.c b/drivers/net/ethernet/cisco/enic/enic_rq.c
new file mode 100644
index 000000000000..ccbf5c9a21d0
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2024 Cisco Systems, Inc. All rights reserved.
+
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/busy_poll.h>
+#include "enic.h"
+#include "enic_res.h"
+#include "enic_rq.h"
+#include "vnic_rq.h"
+#include "cq_enet_desc.h"
+
+#define ENIC_LARGE_PKT_THRESHOLD 1000
+
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+ u32 pkt_len)
+{
+ if (pkt_len > ENIC_LARGE_PKT_THRESHOLD)
+ pkt_size->large_pkt_bytes_cnt += pkt_len;
+ else
+ pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
+static void enic_rq_cq_desc_dec(void *cq_desc, u8 cq_desc_size, u8 *type,
+ u8 *color, u16 *q_number, u16 *completed_index)
+{
+ /* type_color is the last field for all cq structs */
+ u8 type_color;
+
+ switch (cq_desc_size) {
+ case VNIC_RQ_CQ_ENTRY_SIZE_16: {
+ struct cq_enet_rq_desc *desc =
+ (struct cq_enet_rq_desc *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_32: {
+ struct cq_enet_rq_desc_32 *desc =
+ (struct cq_enet_rq_desc_32 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_32_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_64: {
+ struct cq_enet_rq_desc_64 *desc =
+ (struct cq_enet_rq_desc_64 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_64_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ }
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+ *type = type_color & CQ_DESC_TYPE_MASK;
+}
+
+static void enic_rq_set_skb_flags(struct vnic_rq *vrq, u8 type, u32 rss_hash,
+ u8 rss_type, u8 fcoe, u8 fcoe_fc_crc_ok,
+ u8 vlan_stripped, u8 csum_not_calc,
+ u8 tcp_udp_csum_ok, u8 ipv6, u8 ipv4_csum_ok,
+ u16 vlan_tci, struct sk_buff *skb)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+ bool outer_csum_ok = true, encap = false;
+
+ if ((netdev->features & NETIF_F_RXHASH) && rss_hash && type == 3) {
+ switch (rss_type) {
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
+ rqstats->l4_rss_hash++;
+ break;
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
+ rqstats->l3_rss_hash++;
+ break;
+ }
+ }
+ if (enic->vxlan.vxlan_udp_port_number) {
+ switch (enic->vxlan.patch_level) {
+ case 0:
+ if (fcoe) {
+ encap = true;
+ outer_csum_ok = fcoe_fc_crc_ok;
+ }
+ break;
+ case 2:
+ if (type == 7 && (rss_hash & BIT(0))) {
+ encap = true;
+ outer_csum_ok = (rss_hash & BIT(1)) &&
+ (rss_hash & BIT(2));
+ }
+ break;
+ }
+ }
+
+ /* Hardware does not provide whole packet checksum. It only
+ * provides pseudo checksum. Since hw validates the packet
+ * checksum but not provide us the checksum value. use
+ * CHECSUM_UNNECESSARY.
+ *
+ * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
+ * inner csum_ok. outer_csum_ok is set by hw when outer udp
+ * csum is correct or is zero.
+ */
+ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
+ tcp_udp_csum_ok && outer_csum_ok && (ipv4_csum_ok || ipv6)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = encap;
+ if (encap)
+ rqstats->csum_unnecessary_encap++;
+ else
+ rqstats->csum_unnecessary++;
+ }
+
+ if (vlan_stripped) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ rqstats->vlan_stripped++;
+ }
+}
+
+/*
+ * cq_enet_rq_desc accesses section uses only the 1st 15 bytes of the cq which
+ * is identical for all type (16,32 and 64 byte) of cqs.
+ */
+static void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *ingress_port,
+ u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+ u8 *csum_not_calc, u32 *rss_hash,
+ u16 *bytes_written, u8 *packet_error,
+ u8 *vlan_stripped, u16 *vlan_tci,
+ u16 *checksum, u8 *fcoe_sof,
+ u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error,
+ u8 *fcoe_eof, u8 *tcp_udp_csum_ok, u8 *udp,
+ u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4,
+ u8 *ipv4_fragment, u8 *fcs_ok)
+{
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
+
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+ *ingress_port = (completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+ *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+ 1 : 0;
+ *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+ 1 : 0;
+ *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+ 1 : 0;
+
+ *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+ *csum_not_calc = (q_number_rss_type_flags &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+ *rss_hash = le32_to_cpu(desc->rss_hash);
+
+ *bytes_written = bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_error = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+ *vlan_stripped = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+ /*
+ * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
+ */
+ *vlan_tci = le16_to_cpu(desc->vlan);
+
+ if (*fcoe) {
+ *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+ *fcoe_fc_crc_ok = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+ *fcoe_enc_error = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+ *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+ CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+ *checksum = 0;
+ } else {
+ *fcoe_sof = 0;
+ *fcoe_fc_crc_ok = 0;
+ *fcoe_enc_error = 0;
+ *fcoe_eof = 0;
+ *checksum = le16_to_cpu(desc->checksum_fcoe);
+ }
+
+ *tcp_udp_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+ *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+ *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+ *ipv4_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+ *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+ *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+ *ipv4_fragment =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+ *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+static bool enic_rq_pkt_error(struct vnic_rq *vrq, u8 packet_error, u8 fcs_ok,
+ u16 bytes_written)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+
+ if (packet_error) {
+ if (!fcs_ok) {
+ if (bytes_written > 0)
+ rqstats->bad_fcs++;
+ else if (bytes_written == 0)
+ rqstats->pkt_truncated++;
+ }
+ return true;
+ }
+ return false;
+}
+
+int enic_rq_alloc_buf(struct vnic_rq *rq)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq *erq = &enic->rq[rq->index];
+ struct enic_rq_stats *rqstats = &erq->stats;
+ unsigned int offset = 0;
+ unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
+ unsigned int os_buf_index = 0;
+ dma_addr_t dma_addr;
+ struct vnic_rq_buf *buf = rq->to_use;
+ struct page *page;
+ unsigned int truesize = len;
+
+ if (buf->os_buf) {
+ enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
+ buf->len);
+
+ return 0;
+ }
+
+ page = page_pool_dev_alloc(erq->pool, &offset, &truesize);
+ if (unlikely(!page)) {
+ rqstats->pp_alloc_fail++;
+ return -ENOMEM;
+ }
+ buf->offset = offset;
+ buf->truesize = truesize;
+ dma_addr = page_pool_get_dma_addr(page) + offset;
+ enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len);
+
+ return 0;
+}
+
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct enic_rq *erq = &enic->rq[rq->index];
+
+ if (!buf->os_buf)
+ return;
+
+ page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true);
+ buf->os_buf = NULL;
+}
+
+static void enic_rq_indicate_buf(struct enic *enic, struct vnic_rq *rq,
+ struct vnic_rq_buf *buf, void *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct sk_buff *skb;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
+ struct napi_struct *napi;
+
+ u8 eop, sop, ingress_port, vlan_stripped;
+ u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+ u8 packet_error;
+ u16 bytes_written, vlan_tci, checksum;
+ u32 rss_hash;
+
+ rqstats->packets++;
+
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &ingress_port,
+ &fcoe, &eop, &sop, &rss_type, &csum_not_calc,
+ &rss_hash, &bytes_written, &packet_error,
+ &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof,
+ &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof,
+ &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6,
+ &ipv4, &ipv4_fragment, &fcs_ok);
+
+ if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written))
+ return;
+
+ if (eop && bytes_written > 0) {
+ /* Good receive
+ */
+ rqstats->bytes += bytes_written;
+ napi = &enic->napi[rq->index];
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n",
+ enic->netdev->name, rq->index,
+ completed_index);
+ rqstats->no_skb++;
+ return;
+ }
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr,
+ bytes_written, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ (struct page *)buf->os_buf, buf->offset,
+ bytes_written, buf->truesize);
+ skb_record_rx_queue(skb, q_number);
+ enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe,
+ fcoe_fc_crc_ok, vlan_stripped,
+ csum_not_calc, tcp_udp_csum_ok, ipv6,
+ ipv4_csum_ok, vlan_tci, skb);
+ skb_mark_for_recycle(skb);
+ napi_gro_frags(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_intr_update_pkt_size(&cq->pkt_size_counter,
+ bytes_written);
+ buf->os_buf = NULL;
+ buf->dma_addr = 0;
+ buf = buf->next;
+ } else {
+ /* Buffer overflow
+ */
+ rqstats->pkt_truncated++;
+ }
+}
+
+static void enic_rq_service(struct enic *enic, void *cq_desc, u8 type,
+ u16 q_number, u16 completed_index)
+{
+ struct enic_rq_stats *rqstats = &enic->rq[q_number].stats;
+ struct vnic_rq *vrq = &enic->rq[q_number].vrq;
+ struct vnic_rq_buf *vrq_buf = vrq->to_clean;
+ int skipped;
+
+ while (1) {
+ skipped = (vrq_buf->index != completed_index);
+ if (!skipped)
+ enic_rq_indicate_buf(enic, vrq, vrq_buf, cq_desc, type,
+ q_number, completed_index);
+ else
+ rqstats->desc_skip++;
+
+ vrq->ring.desc_avail++;
+ vrq->to_clean = vrq_buf->next;
+ vrq_buf = vrq_buf->next;
+ if (!skipped)
+ break;
+ }
+}
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ void *cq_desc = vnic_cq_to_clean(cq);
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ u8 type, color;
+
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, &q_number,
+ &completed_index);
+
+ while (color != cq->last_color) {
+ enic_rq_service(enic, cq_desc, type, q_number, completed_index);
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = vnic_cq_to_clean(cq);
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.h b/drivers/net/ethernet/cisco/enic/enic_rq.h
new file mode 100644
index 000000000000..98476a7297af
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2024 Cisco Systems, Inc. All rights reserved.
+ */
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
+int enic_rq_alloc_buf(struct vnic_rq *rq);
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.c b/drivers/net/ethernet/cisco/enic/enic_wq.c
new file mode 100644
index 000000000000..07936f8b4231
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2025 Cisco Systems, Inc. All rights reserved.
+
+#include <net/netdev_queues.h>
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_wq.h"
+
+#define ENET_CQ_DESC_COMP_NDX_BITS 14
+#define ENET_CQ_DESC_COMP_NDX_MASK GENMASK(ENET_CQ_DESC_COMP_NDX_BITS - 1, 0)
+
+static void enic_wq_cq_desc_dec(const struct cq_desc *desc_arg, bool ext_wq,
+ u8 *type, u8 *color, u16 *q_number,
+ u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+
+ if (ext_wq)
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ ENET_CQ_DESC_COMP_NDX_MASK;
+ else
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ if (buf->sop)
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+
+ if (buf->os_buf)
+ dev_kfree_skb_any(buf->os_buf);
+}
+
+static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc,
+ struct vnic_wq_buf *buf, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ enic->wq[wq->index].stats.cq_work++;
+ enic->wq[wq->index].stats.cq_bytes += buf->len;
+ enic_free_wq_buf(wq, buf);
+}
+
+static void enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct enic *enic = vnic_dev_priv(vdev);
+
+ spin_lock(&enic->wq[q_number].lock);
+
+ vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
+ completed_index, enic_wq_free_buf, NULL);
+
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number))
+ && vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
+ (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
+ netif_wake_subqueue(enic->netdev, q_number);
+ enic->wq[q_number].stats.wake++;
+ }
+
+ spin_unlock(&enic->wq[q_number].lock);
+}
+
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ struct cq_desc *cq_desc;
+ u8 type, color;
+ bool ext_wq;
+
+ ext_wq = cq->ring.size > ENIC_MAX_WQ_DESCS_DEFAULT;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+ enic_wq_service(cq->vdev, cq_desc, type, q_number,
+ completed_index);
+
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.h b/drivers/net/ethernet/cisco/enic/enic_wq.h
new file mode 100644
index 000000000000..12acb3f2fbc9
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2025 Cisco Systems, Inc. All rights reserved.
+ */
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index eed5bf59e5d2..0e37f5d5e527 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -56,45 +56,18 @@ struct vnic_cq {
ktime_t prev_ts;
};
-static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
- unsigned int work_to_do,
- int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque),
- void *opaque)
+static inline void *vnic_cq_to_clean(struct vnic_cq *cq)
{
- struct cq_desc *cq_desc;
- unsigned int work_done = 0;
- u16 q_number, completed_index;
- u8 type, color;
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
-
- while (color != cq->last_color) {
-
- if ((*q_service)(cq->vdev, cq_desc, type,
- q_number, completed_index, opaque))
- break;
-
- cq->to_clean++;
- if (cq->to_clean == cq->ring.desc_count) {
- cq->to_clean = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
+ return ((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean);
+}
- work_done++;
- if (work_done >= work_to_do)
- break;
+static inline void vnic_cq_inc_to_clean(struct vnic_cq *cq)
+{
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
}
-
- return work_done;
}
void vnic_cq_free(struct vnic_cq *cq);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index db56d778877a..605ef17f967e 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -436,6 +436,25 @@ enum vnic_devcmd_cmd {
* in: (u16) a2 = unsigned short int port information
*/
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+
+ /*
+ * Set extended CQ field in MREGS of RQ (or all RQs)
+ * for given vNIC
+ * in: (u64) a0 = RQ selection (VNIC_RQ_ALL for all RQs)
+ * (u32) a1 = CQ entry size
+ * VNIC_RQ_CQ_ENTRY_SIZE_16 --> 16 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_32 --> 32 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_64 --> 64 bytes
+ *
+ * Capability query:
+ * out: (u32) a0 = errno, 0:valid cmd
+ * (u32) a1 = value consisting of supported entries
+ * bit 0: 16 bytes
+ * bit 1: 32 bytes
+ * bit 2: 64 bytes
+ */
+ CMD_CQ_ENTRY_SIZE_SET = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 90),
+
};
/* CMD_ENABLE2 flags */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
index 5acc236069de..9e8e86262a3f 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_enet.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
@@ -21,6 +21,11 @@ struct vnic_enet_config {
u16 loop_tag;
u16 vf_rq_count;
u16 num_arfs;
+ u8 reserved[66];
+ u32 max_rq_ring; // MAX RQ ring size
+ u32 max_wq_ring; // MAX WQ ring size
+ u32 max_cq_ring; // MAX CQ ring size
+ u32 rdma_rsvd_lkey; // Reserved (privileged) LKey
};
#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 0bc595abc03b..a1cdd729caec 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -50,7 +50,7 @@ struct vnic_rq_ctrl {
(VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(16384)
struct vnic_rq_buf {
struct vnic_rq_buf *next;
@@ -61,6 +61,8 @@ struct vnic_rq_buf {
unsigned int index;
void *desc;
uint64_t wr_id;
+ unsigned int offset;
+ unsigned int truesize;
};
enum enic_poll_state {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 75c526911074..3bb4758100ba 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -62,7 +62,7 @@ struct vnic_wq_buf {
(VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(16384)
struct vnic_wq {
unsigned int index;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 991e3839858b..517a15904fb0 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -40,6 +40,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/gro.h>
#include "gemini.h"
@@ -1833,9 +1834,8 @@ static int gmac_open(struct net_device *netdev)
gmac_enable_tx_rx(netdev);
netif_tx_start_all_queues(netdev);
- hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+ hrtimer_setup(&port->rx_coalesce_timer, &gmac_coalesce_delay_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
netdev_dbg(netdev, "opened\n");
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 27e01d780cd0..75eac18ff246 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1177,7 +1177,6 @@ static void set_rx_mode(struct net_device *dev)
iowrite32(csr6, ioaddr + CSR6);
}
-#ifdef CONFIG_TULIP_MWI
static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
@@ -1251,7 +1250,6 @@ out:
netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
cache, csr0);
}
-#endif
/*
* Chips that have the MRM/reserved bit quirk and the burst quirk. That
@@ -1463,10 +1461,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
-#ifdef CONFIG_TULIP_MWI
- if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
+ if (IS_ENABLED(CONFIG_TULIP_MWI) && !force_csr0 &&
+ (tp->flags & HAS_PCI_MWI))
tulip_mwi_config (pdev, dev);
-#endif
/* Stop the chip's Tx and Rx processes. */
tulip_stop_rxtx(tp);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 44af1d13d931..67275aa4f65b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -416,8 +416,7 @@ static int ec_bhf_open(struct net_device *net_dev)
netif_start_queue(net_dev);
- hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->hrtimer.function = ec_bhf_timer_fun;
+ hrtimer_setup(&priv->hrtimer, ec_bhf_timer_fun, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
return 0;
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 0d030cb0b21c..625245b0845c 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -221,20 +221,19 @@ static void tsnep_phy_link_status_change(struct net_device *netdev)
static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
{
- int retval;
-
- retval = phy_loopback(adapter->phydev, enable);
+ int speed;
- /* PHY link state change is not signaled if loopback is enabled, it
- * would delay a working loopback anyway, let's ensure that loopback
- * is working immediately by setting link mode directly
- */
- if (!retval && enable) {
- netif_carrier_on(adapter->netdev);
- tsnep_set_link_mode(adapter);
+ if (enable) {
+ if (adapter->phydev->autoneg == AUTONEG_DISABLE &&
+ adapter->phydev->speed == SPEED_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_1000;
+ } else {
+ speed = 0;
}
- return retval;
+ return phy_loopback(adapter->phydev, enable, speed);
}
static int tsnep_phy_open(struct tsnep_adapter *adapter)
@@ -852,8 +851,8 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
struct skb_shared_hwtstamps hwtstamps;
u64 timestamp;
- if (skb_shinfo(entry->skb)->tx_flags &
- SKBTX_HW_TSTAMP_USE_CYCLES)
+ if (entry->skb->sk &&
+ READ_ONCE(entry->skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
timestamp =
__le64_to_cpu(entry->desc_wb->counter);
else
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f7c4ce8e9a26..a86cfebedaa8 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1093,6 +1093,29 @@ static void fec_enet_enable_ring(struct net_device *ndev)
}
}
+/* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol)
+{
+ u32 val;
+
+ if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+ } else {
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+ }
+}
+
/*
* This function is called to start or restart the FEC during a link
* change, transmit timeout, or to reconfigure the FEC. The network
@@ -1109,17 +1132,7 @@ fec_restart(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
- ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
+ fec_ctrl_reset(fep, false);
/*
* enet-mac reset will reset mac address registers too,
@@ -1373,22 +1386,7 @@ fec_stop(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
- } else {
- val = readl(fep->hwp + FEC_ECNTRL);
- val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
- writel(val, fep->hwp + FEC_ECNTRL);
- }
+ fec_ctrl_reset(fep, true);
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7f6b57432071..876d90832596 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -30,7 +30,6 @@
#include <linux/phy.h>
#include <linux/fec.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include "fec.h"
@@ -739,8 +738,8 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
- hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL);
- fep->perout_timer.function = fec_ptp_pps_perout_handler;
+ hrtimer_setup(&fep->perout_timer, fec_ptp_pps_perout_handler, CLOCK_REALTIME,
+ HRTIMER_MODE_REL);
irq = platform_get_irq_byname_optional(pdev, "pps");
if (irq < 0)
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index b3e2a596ad2c..51402dff72c5 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1446,7 +1446,6 @@ int dtsec_initialization(struct mac_device *mac_dev,
goto _return_fm_mac_free;
}
dtsec->pcs.ops = &dtsec_pcs_ops;
- dtsec->pcs.neg_mode = true;
dtsec->pcs.poll = true;
supported = mac_dev->phylink_config.supported_interfaces;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 435138f4699d..deb35b38c976 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1647,20 +1647,11 @@ static void gfar_configure_serdes(struct net_device *dev)
*/
static int init_phy(struct net_device *dev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct gfar_private *priv = netdev_priv(dev);
phy_interface_t interface = priv->interface;
struct phy_device *phydev;
struct ethtool_keee edata;
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
-
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
@@ -1675,9 +1666,8 @@ static int init_phy(struct net_device *dev)
if (interface == PHY_INTERFACE_MODE_SGMII)
gfar_configure_serdes(dev);
- /* Remove any features not supported by the controller */
- linkmode_and(phydev->supported, phydev->supported, mask);
- linkmode_copy(phydev->advertising, phydev->supported);
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT))
+ phy_set_max_speed(phydev, SPEED_100);
/* Add support for flow control */
phy_support_asym_pause(phydev);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 88510f822759..affd5a6c44e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3408,7 +3408,7 @@ static int ucc_geth_parse_clock(struct device_node *np, const char *which,
return 0;
}
-struct phylink_mac_ops ugeth_mac_ops = {
+static const struct phylink_mac_ops ugeth_mac_ops = {
.mac_link_up = ugeth_mac_link_up,
.mac_link_down = ugeth_mac_link_down,
.mac_config = ugeth_mac_config,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 38789faae706..84f92f6384e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -890,8 +890,6 @@ struct ucc_geth_hardware_statistics {
addresses */
#define TX_TIMEOUT (1*HZ)
-#define PHY_INIT_TIMEOUT 100000
-#define PHY_CHANGE_TIME 2
/* Fast Ethernet (10/100 Mbps) */
#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 78d2a19593d1..2fab38c8ee78 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -59,6 +59,8 @@
#define GVE_MAX_RX_BUFFER_SIZE 4096
+#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
+
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
@@ -68,6 +70,9 @@
#define GVE_FLOW_RULE_IDS_CACHE_SIZE \
(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
+#define GVE_RSS_KEY_SIZE 40
+#define GVE_RSS_INDIR_SIZE 128
+
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
@@ -102,7 +107,13 @@ struct gve_rx_desc_queue {
/* The page info for a single slot in the RX data queue */
struct gve_rx_slot_page_info {
- struct page *page;
+ /* netmem is used for DQO RDA mode
+ * page is used in all other modes
+ */
+ union {
+ struct page *page;
+ netmem_ref netmem;
+ };
void *page_address;
u32 page_offset; /* offset to write to in page */
unsigned int buf_size;
@@ -218,6 +229,11 @@ struct gve_rx_cnts {
/* Contains datapath state used to represent an RX queue. */
struct gve_rx_ring {
struct gve_priv *gve;
+
+ u16 packet_buffer_size; /* Size of buffer posted to NIC */
+ u16 packet_buffer_truesize; /* Total size of RX buffer */
+ u16 rx_headroom;
+
union {
/* GQI fields */
struct {
@@ -226,7 +242,6 @@ struct gve_rx_ring {
/* threshold for posting new buffs and descs */
u32 db_threshold;
- u16 packet_buffer_size;
u32 qpl_copy_pool_mask;
u32 qpl_copy_pool_head;
@@ -604,8 +619,6 @@ struct gve_tx_ring {
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
struct u64_stats_sync statss; /* sync stats for 32bit archs */
struct xsk_buff_pool *xsk_pool;
- u32 xdp_xsk_wakeup;
- u32 xdp_xsk_done;
u64 xdp_xsk_sent;
u64 xdp_xmit;
u64 xdp_xmit_errors;
@@ -624,10 +637,18 @@ struct gve_notify_block {
u32 irq;
};
-/* Tracks allowed and current queue settings */
-struct gve_queue_config {
+/* Tracks allowed and current rx queue settings */
+struct gve_rx_queue_config {
+ u16 max_queues;
+ u16 num_queues;
+ u16 packet_buffer_size;
+};
+
+/* Tracks allowed and current tx queue settings */
+struct gve_tx_queue_config {
u16 max_queues;
- u16 num_queues; /* current */
+ u16 num_queues; /* number of TX queues, excluding XDP queues */
+ u16 num_xdp_queues;
};
/* Tracks the available and used qpl IDs */
@@ -651,11 +672,11 @@ struct gve_ptype_lut {
/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {
- struct gve_queue_config *qcfg;
+ struct gve_tx_queue_config *qcfg;
+
+ u16 num_xdp_rings;
u16 ring_size;
- u16 start_idx;
- u16 num_rings;
bool raw_addressing;
/* Allocated resources are returned here */
@@ -665,13 +686,15 @@ struct gve_tx_alloc_rings_cfg {
/* Parameters for allocating resources for rx queues */
struct gve_rx_alloc_rings_cfg {
/* tx config is also needed to determine QPL ids */
- struct gve_queue_config *qcfg;
- struct gve_queue_config *qcfg_tx;
+ struct gve_rx_queue_config *qcfg_rx;
+ struct gve_tx_queue_config *qcfg_tx;
u16 ring_size;
u16 packet_buffer_size;
bool raw_addressing;
bool enable_header_split;
+ bool reset_rss;
+ bool xdp;
/* Allocated resources are returned here */
struct gve_rx_ring *rx;
@@ -722,6 +745,11 @@ struct gve_flow_rules_cache {
u32 rule_ids_cache_num;
};
+struct gve_rss_config {
+ u8 *hash_key;
+ u32 *hash_lut;
+};
+
struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
@@ -751,9 +779,8 @@ struct gve_priv {
u32 rx_copybreak; /* copy packets smaller than this */
u16 default_num_queues; /* default num queues to set up */
- u16 num_xdp_queues;
- struct gve_queue_config tx_cfg;
- struct gve_queue_config rx_cfg;
+ struct gve_tx_queue_config tx_cfg;
+ struct gve_rx_queue_config rx_cfg;
u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
@@ -823,7 +850,6 @@ struct gve_priv {
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
- u16 data_buffer_size_dqo;
u16 max_rx_buffer_size; /* device limit */
enum gve_queue_format queue_format;
@@ -842,6 +868,8 @@ struct gve_priv {
u16 rss_key_size;
u16 rss_lut_size;
+ bool cache_rss_config;
+ struct gve_rss_config rss_config;
};
enum gve_service_task_flags_bit {
@@ -1024,27 +1052,16 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
}
/* Returns the number of tx queue page lists */
-static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
- int num_xdp_queues,
+static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg,
bool is_qpl)
{
if (!is_qpl)
return 0;
- return tx_cfg->num_queues + num_xdp_queues;
-}
-
-/* Returns the number of XDP tx queue page lists
- */
-static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
-{
- if (priv->queue_format != GVE_GQI_QPL_FORMAT)
- return 0;
-
- return priv->num_xdp_queues;
+ return tx_cfg->num_queues + tx_cfg->num_xdp_queues;
}
/* Returns the number of rx queue page lists */
-static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
+static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg,
bool is_qpl)
{
if (!is_qpl)
@@ -1062,7 +1079,8 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
-static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
+static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg,
+ int rx_qid)
{
return tx_cfg->max_queues + rx_qid;
}
@@ -1072,7 +1090,7 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
return gve_tx_qpl_id(priv, 0);
}
-static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
+static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
{
return gve_get_rx_qpl_id(tx_cfg, 0);
}
@@ -1103,7 +1121,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
{
- return priv->tx_cfg.num_queues + priv->num_xdp_queues;
+ return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
}
static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
@@ -1207,7 +1225,8 @@ void gve_free_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state);
int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
- struct gve_rx_ring *rx);
+ struct gve_rx_ring *rx,
+ bool xdp);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
@@ -1219,14 +1238,17 @@ int gve_adjust_config(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,
- struct gve_queue_config new_rx_config,
- struct gve_queue_config new_tx_config);
+ struct gve_rx_queue_config new_rx_config,
+ struct gve_tx_queue_config new_tx_config,
+ bool reset_rss);
/* flow steering rule */
int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_flow_rules_reset(struct gve_priv *priv);
+/* RSS config */
+int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
/* report stats handling */
void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index aa7d723011d0..3e8fc33cc11f 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -731,6 +731,7 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
.ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
.rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
+ .packet_buffer_size = cpu_to_be16(rx->packet_buffer_size),
};
if (gve_is_gqi(priv)) {
@@ -743,7 +744,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
cpu_to_be64(rx->data.data_bus);
cmd->create_rx_queue.index = cpu_to_be32(queue_index);
cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else {
u32 qpl_id = 0;
@@ -756,8 +756,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
cpu_to_be64(rx->dqo.complq.bus);
cmd->create_rx_queue.rx_data_ring_addr =
cpu_to_be64(rx->dqo.bufq.bus);
- cmd->create_rx_queue.packet_buffer_size =
- cpu_to_be16(priv->data_buffer_size_dqo);
cmd->create_rx_queue.rx_buff_ring_size =
cpu_to_be16(priv->rx_desc_cnt);
cmd->create_rx_queue.enable_rsc =
@@ -885,6 +883,15 @@ static void gve_set_default_desc_cnt(struct gve_priv *priv,
priv->min_rx_desc_cnt = priv->rx_desc_cnt;
}
+static void gve_set_default_rss_sizes(struct gve_priv *priv)
+{
+ if (!gve_is_gqi(priv)) {
+ priv->rss_key_size = GVE_RSS_KEY_SIZE;
+ priv->rss_lut_size = GVE_RSS_INDIR_SIZE;
+ priv->cache_rss_config = true;
+ }
+}
+
static void gve_enable_supported_features(struct gve_priv *priv,
u32 supported_features_mask,
const struct gve_device_option_jumbo_frames
@@ -968,6 +975,10 @@ static void gve_enable_supported_features(struct gve_priv *priv,
be16_to_cpu(dev_op_rss_config->hash_key_size);
priv->rss_lut_size =
be16_to_cpu(dev_op_rss_config->hash_lut_size);
+ priv->cache_rss_config = false;
+ dev_dbg(&priv->pdev->dev,
+ "RSS device option enabled with key size of %u, lut size of %u.\n",
+ priv->rss_key_size, priv->rss_lut_size);
}
}
@@ -1052,6 +1063,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
/* set default descriptor counts */
gve_set_default_desc_cnt(priv, descriptor);
+ gve_set_default_rss_sizes(priv);
+
/* DQO supports LRO. */
if (!gve_is_gqi(priv))
priv->dev->hw_features |= NETIF_F_LRO;
@@ -1276,8 +1289,9 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv)
int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
{
+ const u32 *hash_lut_to_config = NULL;
+ const u8 *hash_key_to_config = NULL;
dma_addr_t lut_bus = 0, key_bus = 0;
- u16 key_size = 0, lut_size = 0;
union gve_adminq_command cmd;
__be32 *lut = NULL;
u8 hash_alg = 0;
@@ -1287,7 +1301,7 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
switch (rxfh->hfunc) {
case ETH_RSS_HASH_NO_CHANGE:
- break;
+ fallthrough;
case ETH_RSS_HASH_TOP:
hash_alg = ETH_RSS_HASH_TOP;
break;
@@ -1296,27 +1310,46 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
}
if (rxfh->indir) {
- lut_size = priv->rss_lut_size;
+ if (rxfh->indir_size != priv->rss_lut_size)
+ return -EINVAL;
+
+ hash_lut_to_config = rxfh->indir;
+ } else if (priv->cache_rss_config) {
+ hash_lut_to_config = priv->rss_config.hash_lut;
+ }
+
+ if (hash_lut_to_config) {
lut = dma_alloc_coherent(&priv->pdev->dev,
- lut_size * sizeof(*lut),
+ priv->rss_lut_size * sizeof(*lut),
&lut_bus, GFP_KERNEL);
if (!lut)
return -ENOMEM;
for (i = 0; i < priv->rss_lut_size; i++)
- lut[i] = cpu_to_be32(rxfh->indir[i]);
+ lut[i] = cpu_to_be32(hash_lut_to_config[i]);
}
if (rxfh->key) {
- key_size = priv->rss_key_size;
+ if (rxfh->key_size != priv->rss_key_size) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ hash_key_to_config = rxfh->key;
+ } else if (priv->cache_rss_config) {
+ hash_key_to_config = priv->rss_config.hash_key;
+ }
+
+ if (hash_key_to_config) {
key = dma_alloc_coherent(&priv->pdev->dev,
- key_size, &key_bus, GFP_KERNEL);
+ priv->rss_key_size,
+ &key_bus, GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto out;
}
- memcpy(key, rxfh->key, key_size);
+ memcpy(key, hash_key_to_config, priv->rss_key_size);
}
/* Zero-valued fields in the cmd.configure_rss instruct the device to
@@ -1330,8 +1363,10 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
BIT(GVE_RSS_HASH_TCPV6) |
BIT(GVE_RSS_HASH_UDPV6)),
.hash_alg = hash_alg,
- .hash_key_size = cpu_to_be16(key_size),
- .hash_lut_size = cpu_to_be16(lut_size),
+ .hash_key_size =
+ cpu_to_be16((key_bus) ? priv->rss_key_size : 0),
+ .hash_lut_size =
+ cpu_to_be16((lut_bus) ? priv->rss_lut_size : 0),
.hash_key_addr = cpu_to_be64(key_bus),
.hash_lut_addr = cpu_to_be64(lut_bus),
};
@@ -1341,11 +1376,11 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
out:
if (lut)
dma_free_coherent(&priv->pdev->dev,
- lut_size * sizeof(*lut),
+ priv->rss_lut_size * sizeof(*lut),
lut, lut_bus);
if (key)
dma_free_coherent(&priv->pdev->dev,
- key_size, key, key_bus);
+ priv->rss_key_size, key, key_bus);
return err;
}
@@ -1449,12 +1484,15 @@ static int gve_adminq_process_rss_query(struct gve_priv *priv,
rxfh->hfunc = descriptor->hash_alg;
rss_info_addr = (void *)(descriptor + 1);
- if (rxfh->key)
+ if (rxfh->key) {
+ rxfh->key_size = priv->rss_key_size;
memcpy(rxfh->key, rss_info_addr, priv->rss_key_size);
+ }
rss_info_addr += priv->rss_key_size;
lut = (__be32 *)rss_info_addr;
if (rxfh->indir) {
+ rxfh->indir_size = priv->rss_lut_size;
for (i = 0; i < priv->rss_lut_size; i++)
rxfh->indir[i] = be32_to_cpu(lut[i]);
}
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
index 403f0f335ba6..a71883e1d920 100644
--- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -139,7 +139,8 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
buf_state->page_info.page_offset = 0;
buf_state->page_info.page_address =
page_address(buf_state->page_info.page);
- buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
+ buf_state->page_info.buf_size = rx->packet_buffer_truesize;
+ buf_state->page_info.pad = rx->rx_headroom;
buf_state->last_single_ref_offset = 0;
/* The page already has 1 ref. */
@@ -162,7 +163,7 @@ void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- const u16 data_buffer_size = priv->data_buffer_size_dqo;
+ const u16 data_buffer_size = rx->packet_buffer_truesize;
int pagecount;
/* Can't reuse if we only fit one buffer per page */
@@ -205,38 +206,40 @@ void gve_free_to_page_pool(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state,
bool allow_direct)
{
- struct page *page = buf_state->page_info.page;
+ netmem_ref netmem = buf_state->page_info.netmem;
- if (!page)
+ if (!netmem)
return;
- page_pool_put_full_page(page->pp, page, allow_direct);
- buf_state->page_info.page = NULL;
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, allow_direct);
+ buf_state->page_info.netmem = 0;
}
static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- struct gve_priv *priv = rx->gve;
- struct page *page;
+ netmem_ref netmem;
- buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
- page = page_pool_alloc(rx->dqo.page_pool,
- &buf_state->page_info.page_offset,
- &buf_state->page_info.buf_size, GFP_ATOMIC);
+ buf_state->page_info.buf_size = rx->packet_buffer_truesize;
+ netmem = page_pool_alloc_netmem(rx->dqo.page_pool,
+ &buf_state->page_info.page_offset,
+ &buf_state->page_info.buf_size,
+ GFP_ATOMIC);
- if (!page)
+ if (!netmem)
return -ENOMEM;
- buf_state->page_info.page = page;
- buf_state->page_info.page_address = page_address(page);
- buf_state->addr = page_pool_get_dma_addr(page);
+ buf_state->page_info.netmem = netmem;
+ buf_state->page_info.page_address = netmem_address(netmem);
+ buf_state->addr = page_pool_get_dma_addr_netmem(netmem);
+ buf_state->page_info.pad = rx->dqo.page_pool->p.offset;
return 0;
}
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
- struct gve_rx_ring *rx)
+ struct gve_rx_ring *rx,
+ bool xdp)
{
u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
struct page_pool_params pp = {
@@ -247,7 +250,8 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
.netdev = priv->dev,
.napi = &priv->ntfy_blocks[ntfy_id].napi,
.max_len = PAGE_SIZE,
- .dma_dir = DMA_FROM_DEVICE,
+ .dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = xdp ? XDP_PACKET_HEADROOM : 0,
};
return page_pool_create(&pp);
@@ -269,7 +273,7 @@ void gve_reuse_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
if (rx->dqo.page_pool) {
- buf_state->page_info.page = NULL;
+ buf_state->page_info.netmem = 0;
gve_free_buf_state(rx, buf_state);
} else {
gve_dec_pagecnt_bias(&buf_state->page_info);
@@ -301,7 +305,8 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
}
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
desc->buf_addr = cpu_to_le64(buf_state->addr +
- buf_state->page_info.page_offset);
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index bdfc6e77b2af..31a21ccf4863 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -63,8 +63,8 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
- "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
- "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
+ "tx_dma_mapping_error[%u]",
+ "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
};
static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
@@ -417,9 +417,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = value;
}
}
- /* XDP xsk counters */
- data[i++] = tx->xdp_xsk_wakeup;
- data[i++] = tx->xdp_xsk_done;
+ /* XDP counters */
do {
start = u64_stats_fetch_begin(&priv->tx[ring].statss);
data[i] = tx->xdp_xsk_sent;
@@ -477,11 +475,12 @@ static int gve_set_channels(struct net_device *netdev,
struct ethtool_channels *cmd)
{
struct gve_priv *priv = netdev_priv(netdev);
- struct gve_queue_config new_tx_cfg = priv->tx_cfg;
- struct gve_queue_config new_rx_cfg = priv->rx_cfg;
+ struct gve_tx_queue_config new_tx_cfg = priv->tx_cfg;
+ struct gve_rx_queue_config new_rx_cfg = priv->rx_cfg;
struct ethtool_channels old_settings;
int new_tx = cmd->tx_count;
int new_rx = cmd->rx_count;
+ bool reset_rss = false;
gve_get_channels(netdev, &old_settings);
@@ -492,22 +491,27 @@ static int gve_set_channels(struct net_device *netdev,
if (!new_rx || !new_tx)
return -EINVAL;
- if (priv->num_xdp_queues &&
- (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
- dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
- return -EINVAL;
- }
+ if (priv->xdp_prog) {
+ if (new_tx != new_rx ||
+ (2 * new_tx > priv->tx_cfg.max_queues)) {
+ dev_err(&priv->pdev->dev, "The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues when XDP program is installed");
+ return -EINVAL;
+ }
- if (!netif_running(netdev)) {
- priv->tx_cfg.num_queues = new_tx;
- priv->rx_cfg.num_queues = new_rx;
- return 0;
+ /* One XDP TX queue per RX queue. */
+ new_tx_cfg.num_xdp_queues = new_rx;
+ } else {
+ new_tx_cfg.num_xdp_queues = 0;
}
+ if (new_rx != priv->rx_cfg.num_queues &&
+ priv->cache_rss_config && !netif_is_rxfh_configured(netdev))
+ reset_rss = true;
+
new_tx_cfg.num_queues = new_tx;
new_rx_cfg.num_queues = new_rx;
- return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
+ return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg, reset_rss);
}
static void gve_get_ringparam(struct net_device *netdev,
@@ -643,8 +647,7 @@ static int gve_set_tunable(struct net_device *netdev,
switch (etuna->id) {
case ETHTOOL_RX_COPYBREAK:
{
- u32 max_copybreak = gve_is_gqi(priv) ?
- GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
+ u32 max_copybreak = priv->rx_cfg.packet_buffer_size;
len = *(u32 *)value;
if (len > max_copybreak)
@@ -855,6 +858,25 @@ static u32 gve_get_rxfh_indir_size(struct net_device *netdev)
return priv->rss_lut_size;
}
+static void gve_get_rss_config_cache(struct gve_priv *priv,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->key) {
+ rxfh->key_size = priv->rss_key_size;
+ memcpy(rxfh->key, rss_config->hash_key, priv->rss_key_size);
+ }
+
+ if (rxfh->indir) {
+ rxfh->indir_size = priv->rss_lut_size;
+ memcpy(rxfh->indir, rss_config->hash_lut,
+ priv->rss_lut_size * sizeof(*rxfh->indir));
+ }
+}
+
static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct gve_priv *priv = netdev_priv(netdev);
@@ -862,18 +884,46 @@ static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rx
if (!priv->rss_key_size || !priv->rss_lut_size)
return -EOPNOTSUPP;
+ if (priv->cache_rss_config) {
+ gve_get_rss_config_cache(priv, rxfh);
+ return 0;
+ }
+
return gve_adminq_query_rss_config(priv, rxfh);
}
+static void gve_set_rss_config_cache(struct gve_priv *priv,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ if (rxfh->key)
+ memcpy(rss_config->hash_key, rxfh->key, priv->rss_key_size);
+
+ if (rxfh->indir)
+ memcpy(rss_config->hash_lut, rxfh->indir,
+ priv->rss_lut_size * sizeof(*rxfh->indir));
+}
+
static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
+ int err;
if (!priv->rss_key_size || !priv->rss_lut_size)
return -EOPNOTSUPP;
- return gve_adminq_configure_rss(priv, rxfh);
+ err = gve_adminq_configure_rss(priv, rxfh);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Fail to configure RSS config");
+ return err;
+ }
+
+ if (priv->cache_rss_config)
+ gve_set_rss_config_cache(priv, rxfh);
+
+ return 0;
}
const struct ethtool_ops gve_ethtool_ops = {
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 92237fb0b60c..cb2f9978f45e 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -184,6 +184,43 @@ static void gve_free_flow_rule_caches(struct gve_priv *priv)
flow_rules_cache->rules_cache = NULL;
}
+static int gve_alloc_rss_config_cache(struct gve_priv *priv)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ if (!priv->cache_rss_config)
+ return 0;
+
+ rss_config->hash_key = kcalloc(priv->rss_key_size,
+ sizeof(rss_config->hash_key[0]),
+ GFP_KERNEL);
+ if (!rss_config->hash_key)
+ return -ENOMEM;
+
+ rss_config->hash_lut = kcalloc(priv->rss_lut_size,
+ sizeof(rss_config->hash_lut[0]),
+ GFP_KERNEL);
+ if (!rss_config->hash_lut)
+ goto free_rss_key_cache;
+
+ return 0;
+
+free_rss_key_cache:
+ kfree(rss_config->hash_key);
+ rss_config->hash_key = NULL;
+ return -ENOMEM;
+}
+
+static void gve_free_rss_config_cache(struct gve_priv *priv)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ kfree(rss_config->hash_key);
+ kfree(rss_config->hash_lut);
+
+ memset(rss_config, 0, sizeof(*rss_config));
+}
+
static int gve_alloc_counter_array(struct gve_priv *priv)
{
priv->counter_array =
@@ -575,9 +612,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_alloc_flow_rule_caches(priv);
if (err)
return err;
- err = gve_alloc_counter_array(priv);
+ err = gve_alloc_rss_config_cache(priv);
if (err)
goto abort_with_flow_rule_caches;
+ err = gve_alloc_counter_array(priv);
+ if (err)
+ goto abort_with_rss_config_cache;
err = gve_alloc_notify_blocks(priv);
if (err)
goto abort_with_counter;
@@ -611,6 +651,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
}
}
+ err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
+ if (err) {
+ dev_err(&priv->pdev->dev, "Failed to init RSS config");
+ goto abort_with_ptype_lut;
+ }
+
err = gve_adminq_report_stats(priv, priv->stats_report_len,
priv->stats_report_bus,
GVE_STATS_REPORT_TIMER_PERIOD);
@@ -629,6 +675,8 @@ abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
abort_with_counter:
gve_free_counter_array(priv);
+abort_with_rss_config_cache:
+ gve_free_rss_config_cache(priv);
abort_with_flow_rule_caches:
gve_free_flow_rule_caches(priv);
@@ -669,6 +717,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
priv->ptype_lut_dqo = NULL;
gve_free_flow_rule_caches(priv);
+ gve_free_rss_config_cache(priv);
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
gve_free_stats_report(priv);
@@ -746,30 +795,13 @@ static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx
return rx->dqo.qpl;
}
-static int gve_register_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int err;
- int i;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
- /* This failure will trigger a reset - no need to clean up */
- if (err)
- return err;
- }
- return 0;
-}
-
static int gve_register_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
int err;
int i;
- num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
- gve_is_qpl(priv));
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
@@ -787,30 +819,13 @@ static int gve_register_qpls(struct gve_priv *priv)
return 0;
}
-static int gve_unregister_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int err;
- int i;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
- /* This failure will trigger a reset - no need to clean */
- if (err)
- return err;
- }
- return 0;
-}
-
static int gve_unregister_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
int err;
int i;
- num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
- gve_is_qpl(priv));
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
@@ -829,27 +844,6 @@ static int gve_unregister_qpls(struct gve_priv *priv)
return 0;
}
-static int gve_create_xdp_rings(struct gve_priv *priv)
-{
- int err;
-
- err = gve_adminq_create_tx_queues(priv,
- gve_xdp_tx_start_queue_id(priv),
- priv->num_xdp_queues);
- if (err) {
- netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n",
- priv->num_xdp_queues);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n",
- priv->num_xdp_queues);
-
- return 0;
-}
-
static int gve_create_rings(struct gve_priv *priv)
{
int num_tx_queues = gve_num_tx_queues(priv);
@@ -905,7 +899,7 @@ static void init_xdp_sync_stats(struct gve_priv *priv)
int i;
/* Init stats */
- for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
+ for (i = start_id; i < start_id + priv->tx_cfg.num_xdp_queues; i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss);
@@ -930,24 +924,21 @@ static void gve_init_sync_stats(struct gve_priv *priv)
static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
- int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0;
-
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->ring_size = priv->tx_desc_cnt;
- cfg->start_idx = 0;
- cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues;
+ cfg->num_xdp_rings = cfg->qcfg->num_xdp_queues;
cfg->tx = priv->tx;
}
-static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
+static void gve_tx_stop_rings(struct gve_priv *priv, int num_rings)
{
int i;
if (!priv->tx)
return;
- for (i = start_id; i < start_id + num_rings; i++) {
+ for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_tx_stop_ring_gqi(priv, i);
else
@@ -955,12 +946,11 @@ static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings
}
}
-static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
- int num_rings)
+static void gve_tx_start_rings(struct gve_priv *priv, int num_rings)
{
int i;
- for (i = start_id; i < start_id + num_rings; i++) {
+ for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_tx_start_ring_gqi(priv, i);
else
@@ -968,28 +958,6 @@ static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
}
}
-static int gve_alloc_xdp_rings(struct gve_priv *priv)
-{
- struct gve_tx_alloc_rings_cfg cfg = {0};
- int err = 0;
-
- if (!priv->num_xdp_queues)
- return 0;
-
- gve_tx_get_curr_alloc_cfg(priv, &cfg);
- cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
- cfg.num_rings = priv->num_xdp_queues;
-
- err = gve_tx_alloc_rings_gqi(priv, &cfg);
- if (err)
- return err;
-
- gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
- init_xdp_sync_stats(priv);
-
- return 0;
-}
-
static int gve_queues_mem_alloc(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
@@ -1020,26 +988,6 @@ free_tx:
return err;
}
-static int gve_destroy_xdp_rings(struct gve_priv *priv)
-{
- int start_id;
- int err;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- err = gve_adminq_destroy_tx_queues(priv,
- start_id,
- priv->num_xdp_queues);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to destroy XDP queues\n");
- /* This failure will trigger a reset - no need to clean up */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n");
-
- return 0;
-}
-
static int gve_destroy_rings(struct gve_priv *priv)
{
int num_tx_queues = gve_num_tx_queues(priv);
@@ -1064,20 +1012,6 @@ static int gve_destroy_rings(struct gve_priv *priv)
return 0;
}
-static void gve_free_xdp_rings(struct gve_priv *priv)
-{
- struct gve_tx_alloc_rings_cfg cfg = {0};
-
- gve_tx_get_curr_alloc_cfg(priv, &cfg);
- cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
- cfg.num_rings = priv->num_xdp_queues;
-
- if (priv->tx) {
- gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
- gve_tx_free_rings_gqi(priv, &cfg);
- }
-}
-
static void gve_queues_mem_free(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_cfg,
struct gve_rx_alloc_rings_cfg *rx_cfg)
@@ -1204,7 +1138,7 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
int i, j;
u32 tx_qid;
- if (!priv->num_xdp_queues)
+ if (!priv->tx_cfg.num_xdp_queues)
return 0;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
@@ -1215,8 +1149,14 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
napi->napi_id);
if (err)
goto err;
- err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
+ if (gve_is_qpl(priv))
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ else
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx->dqo.page_pool);
if (err)
goto err;
rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
@@ -1234,7 +1174,7 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
}
}
- for (i = 0; i < priv->num_xdp_queues; i++) {
+ for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
tx_qid = gve_xdp_tx_queue_id(priv, i);
priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
}
@@ -1255,7 +1195,7 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
{
int i, tx_qid;
- if (!priv->num_xdp_queues)
+ if (!priv->tx_cfg.num_xdp_queues || !priv->rx || !priv->tx)
return;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
@@ -1268,7 +1208,7 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
}
}
- for (i = 0; i < priv->num_xdp_queues; i++) {
+ for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
tx_qid = gve_xdp_tx_queue_id(priv, i);
priv->tx[tx_qid].xsk_pool = NULL;
}
@@ -1285,15 +1225,14 @@ static void gve_drain_page_cache(struct gve_priv *priv)
static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{
- cfg->qcfg = &priv->rx_cfg;
+ cfg->qcfg_rx = &priv->rx_cfg;
cfg->qcfg_tx = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled;
cfg->ring_size = priv->rx_desc_cnt;
- cfg->packet_buffer_size = gve_is_gqi(priv) ?
- GVE_DEFAULT_RX_BUFFER_SIZE :
- priv->data_buffer_size_dqo;
+ cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size;
cfg->rx = priv->rx;
+ cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues;
}
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
@@ -1366,17 +1305,13 @@ static int gve_queues_start(struct gve_priv *priv,
/* Record new configs into priv */
priv->tx_cfg = *tx_alloc_cfg->qcfg;
- priv->rx_cfg = *rx_alloc_cfg->qcfg;
+ priv->tx_cfg.num_xdp_queues = tx_alloc_cfg->num_xdp_rings;
+ priv->rx_cfg = *rx_alloc_cfg->qcfg_rx;
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
- if (priv->xdp_prog)
- priv->num_xdp_queues = priv->rx_cfg.num_queues;
- else
- priv->num_xdp_queues = 0;
-
- gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
- gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
+ gve_tx_start_rings(priv, gve_num_tx_queues(priv));
+ gve_rx_start_rings(priv, rx_alloc_cfg->qcfg_rx->num_queues);
gve_init_sync_stats(priv);
err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
@@ -1390,12 +1325,18 @@ static int gve_queues_start(struct gve_priv *priv,
if (err)
goto stop_and_free_rings;
+ if (rx_alloc_cfg->reset_rss) {
+ err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
+ if (err)
+ goto reset;
+ }
+
err = gve_register_qpls(priv);
if (err)
goto reset;
priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
- priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
+ priv->rx_cfg.packet_buffer_size = rx_alloc_cfg->packet_buffer_size;
err = gve_create_rings(priv);
if (err)
@@ -1422,7 +1363,7 @@ reset:
/* return the original error */
return err;
stop_and_free_rings:
- gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
gve_queues_mem_remove(priv);
return err;
@@ -1471,7 +1412,7 @@ static int gve_queues_stop(struct gve_priv *priv)
gve_unreg_xdp_info(priv);
- gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
priv->interface_down_cnt++;
@@ -1501,56 +1442,6 @@ static int gve_close(struct net_device *dev)
return 0;
}
-static int gve_remove_xdp_queues(struct gve_priv *priv)
-{
- int err;
-
- err = gve_destroy_xdp_rings(priv);
- if (err)
- return err;
-
- err = gve_unregister_xdp_qpls(priv);
- if (err)
- return err;
-
- gve_unreg_xdp_info(priv);
- gve_free_xdp_rings(priv);
-
- priv->num_xdp_queues = 0;
- return 0;
-}
-
-static int gve_add_xdp_queues(struct gve_priv *priv)
-{
- int err;
-
- priv->num_xdp_queues = priv->rx_cfg.num_queues;
-
- err = gve_alloc_xdp_rings(priv);
- if (err)
- goto err;
-
- err = gve_reg_xdp_info(priv, priv->dev);
- if (err)
- goto free_xdp_rings;
-
- err = gve_register_xdp_qpls(priv);
- if (err)
- goto free_xdp_rings;
-
- err = gve_create_xdp_rings(priv);
- if (err)
- goto free_xdp_rings;
-
- return 0;
-
-free_xdp_rings:
- gve_free_xdp_rings(priv);
-err:
- priv->num_xdp_queues = 0;
- return err;
-}
-
static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
{
if (!gve_get_napi_enabled(priv))
@@ -1568,6 +1459,19 @@ static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
}
}
+static int gve_configure_rings_xdp(struct gve_priv *priv,
+ u16 num_xdp_rings)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ tx_alloc_cfg.num_xdp_rings = num_xdp_rings;
+
+ rx_alloc_cfg.xdp = !!num_xdp_rings;
+ return gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+}
+
static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
@@ -1580,29 +1484,26 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
+
+ /* Update priv XDP queue configuration */
+ priv->tx_cfg.num_xdp_queues = priv->xdp_prog ?
+ priv->rx_cfg.num_queues : 0;
return 0;
}
- gve_turndown(priv);
- if (!old_prog && prog) {
- // Allocate XDP TX queues if an XDP program is
- // being installed
- err = gve_add_xdp_queues(priv);
- if (err)
- goto out;
- } else if (old_prog && !prog) {
- // Remove XDP TX queues if an XDP program is
- // being uninstalled
- err = gve_remove_xdp_queues(priv);
- if (err)
- goto out;
- }
+ if (!old_prog && prog)
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ else if (old_prog && !prog)
+ err = gve_configure_rings_xdp(priv, 0);
+
+ if (err)
+ goto out;
+
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
out:
- gve_turnup(priv);
status = ioread32be(&priv->reg_bar0->device_status);
gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
return err;
@@ -1736,6 +1637,7 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
static int verify_xdp_configuration(struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
+ u16 max_xdp_mtu;
if (dev->features & NETIF_F_LRO) {
netdev_warn(dev, "XDP is not supported when LRO is on.\n");
@@ -1748,7 +1650,11 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP;
}
- if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
+ max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr);
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT)
+ max_xdp_mtu -= GVE_RX_PAD;
+
+ if (dev->mtu > max_xdp_mtu) {
netdev_warn(dev, "XDP is not supported for mtu %d.\n",
dev->mtu);
return -EOPNOTSUPP;
@@ -1786,6 +1692,26 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+int gve_init_rss_config(struct gve_priv *priv, u16 num_queues)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+ struct ethtool_rxfh_param rxfh = {0};
+ u16 i;
+
+ if (!priv->cache_rss_config)
+ return 0;
+
+ for (i = 0; i < priv->rss_lut_size; i++)
+ rss_config->hash_lut[i] =
+ ethtool_rxfh_indir_default(i, num_queues);
+
+ netdev_rss_key_fill(rss_config->hash_key, priv->rss_key_size);
+
+ rxfh.hfunc = ETH_RSS_HASH_TOP;
+
+ return gve_adminq_configure_rss(priv, &rxfh);
+}
+
int gve_flow_rules_reset(struct gve_priv *priv)
{
if (!priv->max_flow_rules)
@@ -1833,12 +1759,12 @@ int gve_adjust_config(struct gve_priv *priv,
}
int gve_adjust_queues(struct gve_priv *priv,
- struct gve_queue_config new_rx_config,
- struct gve_queue_config new_tx_config)
+ struct gve_rx_queue_config new_rx_config,
+ struct gve_tx_queue_config new_tx_config,
+ bool reset_rss)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- int num_xdp_queues;
int err;
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
@@ -1846,18 +1772,19 @@ int gve_adjust_queues(struct gve_priv *priv,
/* Relay the new config from ethtool */
tx_alloc_cfg.qcfg = &new_tx_config;
rx_alloc_cfg.qcfg_tx = &new_tx_config;
- rx_alloc_cfg.qcfg = &new_rx_config;
- tx_alloc_cfg.num_rings = new_tx_config.num_queues;
-
- /* Add dedicated XDP TX queues if enabled. */
- num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0;
- tx_alloc_cfg.num_rings += num_xdp_queues;
+ rx_alloc_cfg.qcfg_rx = &new_rx_config;
+ rx_alloc_cfg.reset_rss = reset_rss;
if (netif_running(priv->dev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
/* Set the config for the next up. */
+ if (reset_rss) {
+ err = gve_init_rss_config(priv, new_rx_config.num_queues);
+ if (err)
+ return err;
+ }
priv->tx_cfg = new_tx_config;
priv->rx_cfg = new_rx_config;
@@ -1886,7 +1813,7 @@ static void gve_turndown(struct gve_priv *priv)
netif_queue_set_napi(priv->dev, idx,
NETDEV_QUEUE_TYPE_TX, NULL);
- napi_disable(&block->napi);
+ napi_disable_locked(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -1897,7 +1824,7 @@ static void gve_turndown(struct gve_priv *priv)
netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
NULL);
- napi_disable(&block->napi);
+ napi_disable_locked(&block->napi);
}
/* Stop tx queues */
@@ -1927,7 +1854,7 @@ static void gve_turnup(struct gve_priv *priv)
if (!gve_tx_was_added_to_block(priv, idx))
continue;
- napi_enable(&block->napi);
+ napi_enable_locked(&block->napi);
if (idx < priv->tx_cfg.num_queues)
netif_queue_set_napi(priv->dev, idx,
@@ -1955,7 +1882,7 @@ static void gve_turnup(struct gve_priv *priv)
if (!gve_rx_was_added_to_block(priv, idx))
continue;
- napi_enable(&block->napi);
+ napi_enable_locked(&block->napi);
netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
&block->napi);
@@ -1974,7 +1901,7 @@ static void gve_turnup(struct gve_priv *priv)
napi_schedule(&block->napi);
}
- if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv))
+ if (priv->tx_cfg.num_xdp_queues && gve_supports_xdp_xmit(priv))
xdp_features_set_redirect_target(priv->dev, false);
gve_set_napi_enabled(priv);
@@ -2330,6 +2257,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
priv->rx_cfg.num_queues);
}
+ priv->tx_cfg.num_xdp_queues = 0;
dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
@@ -2710,7 +2638,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
priv->ethtool_flags = 0x0;
- priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
+ priv->rx_cfg.packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_set_probe_in_progress(priv);
@@ -2805,6 +2733,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
priv->suspend_cnt++;
rtnl_lock();
+ netdev_lock(netdev);
if (was_up && gve_close(priv->dev)) {
/* If the dev was up, attempt to close, if close fails, reset */
gve_reset_and_teardown(priv, was_up);
@@ -2813,6 +2742,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
gve_teardown_priv_resources(priv);
}
priv->up_before_suspend = was_up;
+ netdev_unlock(netdev);
rtnl_unlock();
return 0;
}
@@ -2825,7 +2755,9 @@ static int gve_resume(struct pci_dev *pdev)
priv->resume_cnt++;
rtnl_lock();
+ netdev_lock(netdev);
err = gve_reset_recovery(priv, priv->up_before_suspend);
+ netdev_unlock(netdev);
rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index acb73d4d0de6..90e875c1832f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -141,12 +141,15 @@ void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
-static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
- dma_addr_t addr, struct page *page, __be64 *slot_addr)
+static void gve_setup_rx_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_slot_page_info *page_info,
+ dma_addr_t addr, struct page *page,
+ __be64 *slot_addr)
{
page_info->page = page;
page_info->page_offset = 0;
page_info->page_address = page_address(page);
+ page_info->buf_size = rx->packet_buffer_size;
*slot_addr = cpu_to_be64(addr);
/* The page already has 1 ref */
page_ref_add(page, INT_MAX - 1);
@@ -171,7 +174,7 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
return err;
}
- gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
+ gve_setup_rx_buffer(rx, page_info, dma, page, &data_slot->addr);
return 0;
}
@@ -199,7 +202,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
struct page *page = rx->data.qpl->pages[i];
dma_addr_t addr = i * PAGE_SIZE;
- gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
+ gve_setup_rx_buffer(rx, &rx->data.page_info[i], addr,
+ page,
&rx->data.data_ring[i].qpl_offset);
continue;
}
@@ -222,6 +226,7 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
rx->qpl_copy_pool[j].page = page;
rx->qpl_copy_pool[j].page_offset = 0;
rx->qpl_copy_pool[j].page_address = page_address(page);
+ rx->qpl_copy_pool[j].buf_size = rx->packet_buffer_size;
/* The page already has 1 ref. */
page_ref_add(page, INT_MAX - 1);
@@ -283,6 +288,7 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->gve = priv;
rx->q_num = idx;
+ rx->packet_buffer_size = cfg->packet_buffer_size;
rx->mask = slots - 1;
rx->data.raw_addressing = cfg->raw_addressing;
@@ -351,7 +357,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->db_threshold = slots / 2;
gve_rx_init_ring_state_gqi(rx);
- rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
return 0;
@@ -385,12 +390,12 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
- rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
- for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++) {
err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -419,7 +424,7 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv,
if (!rx)
return;
- for (i = 0; i < cfg->qcfg->num_queues; i++)
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++)
gve_rx_free_ring_gqi(priv, &rx[i], cfg);
kvfree(rx);
@@ -590,7 +595,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
copy_page_info->pad = page_info->pad;
skb = gve_rx_add_frags(napi, copy_page_info,
- rx->packet_buffer_size, len, ctx);
+ copy_page_info->buf_size, len, ctx);
if (unlikely(!skb))
return NULL;
@@ -630,7 +635,8 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
* device.
*/
if (page_info->can_flip) {
- skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
+ skb = gve_rx_add_frags(napi, page_info, page_info->buf_size,
+ len, ctx);
/* No point in recycling if we didn't get the skb */
if (skb) {
/* Make sure that the page isn't freed. */
@@ -680,7 +686,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
page_info, len, napi,
data_slot,
- rx->packet_buffer_size, ctx);
+ page_info->buf_size, ctx);
} else {
skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
page_info, len, napi, data_slot);
@@ -855,7 +861,7 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
void *old_data;
int xdp_act;
- xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
+ xdp_init_buff(&xdp, page_info->buf_size, &rx->xdp_rxq);
xdp_prepare_buff(&xdp, page_info->page_address +
page_info->page_offset, GVE_RX_PAD,
len, false);
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index f0674a443567..dcb0545baa50 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -114,7 +114,8 @@ void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
if (!gve_rx_was_added_to_block(priv, idx))
return;
- page_pool_disable_direct_recycling(rx->dqo.page_pool);
+ if (rx->dqo.page_pool)
+ page_pool_disable_direct_recycling(rx->dqo.page_pool);
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
gve_rx_reset_ring_dqo(priv, idx);
@@ -223,6 +224,15 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
memset(rx, 0, sizeof(*rx));
rx->gve = priv;
rx->q_num = idx;
+ rx->packet_buffer_size = cfg->packet_buffer_size;
+
+ if (cfg->xdp) {
+ rx->packet_buffer_truesize = GVE_XDP_RX_BUFFER_SIZE_DQO;
+ rx->rx_headroom = XDP_PACKET_HEADROOM;
+ } else {
+ rx->packet_buffer_truesize = rx->packet_buffer_size;
+ rx->rx_headroom = 0;
+ }
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
@@ -253,7 +263,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (cfg->raw_addressing) {
- pool = gve_rx_create_page_pool(priv, rx);
+ pool = gve_rx_create_page_pool(priv, rx, cfg->xdp);
if (IS_ERR(pool))
goto err;
@@ -299,12 +309,12 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
int err;
int i;
- rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
- for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++) {
err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -333,7 +343,7 @@ void gve_rx_free_rings_dqo(struct gve_priv *priv,
if (!rx)
return;
- for (i = 0; i < cfg->qcfg->num_queues; i++)
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++)
gve_rx_free_ring_dqo(priv, &rx[i], cfg);
kvfree(rx);
@@ -476,6 +486,25 @@ static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
return 0;
}
+static void gve_skb_add_rx_frag(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state,
+ int num_frags, u16 buf_len)
+{
+ if (rx->dqo.page_pool) {
+ skb_add_rx_frag_netmem(rx->ctx.skb_tail, num_frags,
+ buf_state->page_info.netmem,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad, buf_len,
+ buf_state->page_info.buf_size);
+ } else {
+ skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
+ buf_state->page_info.page,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad, buf_len,
+ buf_state->page_info.buf_size);
+ }
+}
+
/* Chains multi skbs for single rx packet.
* Returns 0 if buffer is appended, -1 otherwise.
*/
@@ -513,14 +542,34 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (gve_rx_should_trigger_copy_ondemand(rx))
return gve_rx_copy_ondemand(rx, buf_state, buf_len);
- skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
- buf_state->page_info.page,
- buf_state->page_info.page_offset,
- buf_len, buf_state->page_info.buf_size);
+ gve_skb_add_rx_frag(rx, buf_state, num_frags, buf_len);
gve_reuse_buffer(rx, buf_state);
return 0;
}
+static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ u64_stats_update_begin(&rx->statss);
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ rx->xdp_actions[xdp_act]++;
+ break;
+ case XDP_TX:
+ rx->xdp_tx_errors++;
+ break;
+ case XDP_REDIRECT:
+ rx->xdp_redirect_errors++;
+ break;
+ }
+ u64_stats_update_end(&rx->statss);
+ gve_free_buffer(rx, buf_state);
+}
+
/* Returns 0 if descriptor is completed successfully.
* Returns -EINVAL if descriptor is invalid.
* Returns -ENOMEM if data cannot be copied to skb.
@@ -535,6 +584,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
const bool hsplit = compl_desc->split_header;
struct gve_rx_buf_state_dqo *buf_state;
struct gve_priv *priv = rx->gve;
+ struct bpf_prog *xprog;
u16 buf_len;
u16 hdr_len;
@@ -561,7 +611,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
/* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
- prefetch(buf_state->page_info.page);
+ if (rx->dqo.page_pool) {
+ if (!netmem_is_net_iov(buf_state->page_info.netmem))
+ prefetch(netmem_to_page(buf_state->page_info.netmem));
+ } else {
+ prefetch(buf_state->page_info.page);
+ }
/* Copy the header into the skb in the case of header split */
if (hsplit) {
@@ -590,7 +645,8 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
/* Sync the portion of dma buffer for CPU to read. */
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
- buf_state->page_info.page_offset,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad,
buf_len, DMA_FROM_DEVICE);
/* Append to current skb if one exists. */
@@ -602,6 +658,34 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
+ xprog = READ_ONCE(priv->xdp_prog);
+ if (xprog) {
+ struct xdp_buff xdp;
+ void *old_data;
+ int xdp_act;
+
+ xdp_init_buff(&xdp, buf_state->page_info.buf_size,
+ &rx->xdp_rxq);
+ xdp_prepare_buff(&xdp,
+ buf_state->page_info.page_address +
+ buf_state->page_info.page_offset,
+ buf_state->page_info.pad,
+ buf_len, false);
+ old_data = xdp.data;
+ xdp_act = bpf_prog_run_xdp(xprog, &xdp);
+ buf_state->page_info.pad += xdp.data - old_data;
+ buf_len = xdp.data_end - xdp.data;
+ if (xdp_act != XDP_PASS) {
+ gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act,
+ buf_state);
+ return 0;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
+ }
+
if (eop && buf_len <= priv->rx_copybreak) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len);
@@ -632,9 +716,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (rx->dqo.page_pool)
skb_mark_for_recycle(rx->ctx.skb_head);
- skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
- buf_state->page_info.page_offset, buf_len,
- buf_state->page_info.buf_size);
+ gve_skb_add_rx_frag(rx, buf_state, 0, buf_len);
gve_reuse_buffer(rx, buf_state);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 4350ebd9c2bd..1b40bf0c811a 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -334,27 +334,23 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct gve_tx_ring *tx = cfg->tx;
+ int total_queues;
int err = 0;
int i, j;
- if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings;
+ if (total_queues > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
- if (cfg->start_idx == 0) {
- tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
- GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
- } else if (!tx) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc tx rings from a nonzero start idx without tx array\n");
- return -EINVAL;
- }
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ for (i = 0; i < total_queues; i++) {
err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -370,8 +366,7 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
cleanup:
for (j = 0; j < i; j++)
gve_tx_free_ring_gqi(priv, &tx[j], cfg);
- if (cfg->start_idx == 0)
- kvfree(tx);
+ kvfree(tx);
return err;
}
@@ -384,13 +379,11 @@ void gve_tx_free_rings_gqi(struct gve_priv *priv,
if (!tx)
return;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++)
gve_tx_free_ring_gqi(priv, &tx[i], cfg);
- if (cfg->start_idx == 0) {
- kvfree(tx);
- cfg->tx = NULL;
- }
+ kvfree(tx);
+ cfg->tx = NULL;
}
/* gve_tx_avail - Calculates the number of slots available in the ring
@@ -844,7 +837,7 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
return -ENETDOWN;
qid = gve_xdp_tx_queue_id(priv,
- smp_processor_id() % priv->num_xdp_queues);
+ smp_processor_id() % priv->tx_cfg.num_xdp_queues);
tx = &priv->tx[qid];
@@ -959,13 +952,9 @@ static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
spin_lock(&tx->xdp_lock);
while (sent < budget) {
- if (!gve_can_tx(tx, GVE_TX_START_THRESH))
- goto out;
-
- if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) {
- tx->xdp_xsk_done = tx->xdp_xsk_wakeup;
+ if (!gve_can_tx(tx, GVE_TX_START_THRESH) ||
+ !xsk_tx_peek_desc(tx->xsk_pool, &desc))
goto out;
- }
data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 394debc62268..2eba868d8037 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -379,27 +379,23 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct gve_tx_ring *tx = cfg->tx;
+ int total_queues;
int err = 0;
int i, j;
- if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings;
+ if (total_queues > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
- if (cfg->start_idx == 0) {
- tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
- GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
- } else if (!tx) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc tx rings from a nonzero start idx without tx array\n");
- return -EINVAL;
- }
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ for (i = 0; i < total_queues; i++) {
err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -415,8 +411,7 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
err:
for (j = 0; j < i; j++)
gve_tx_free_ring_dqo(priv, &tx[j], cfg);
- if (cfg->start_idx == 0)
- kvfree(tx);
+ kvfree(tx);
return err;
}
@@ -429,13 +424,11 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv,
if (!tx)
return;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++)
gve_tx_free_ring_dqo(priv, &tx[i], cfg);
- if (cfg->start_idx == 0) {
- kvfree(tx);
- cfg->tx = NULL;
- }
+ kvfree(tx);
+ cfg->tx = NULL;
}
/* Returns the number of slots available in the ring */
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 30fef100257e..ace9b8698021 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -110,13 +110,13 @@ void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_add(priv->dev, &block->napi, gve_poll);
- netif_napi_set_irq(&block->napi, block->irq);
+ netif_napi_add_locked(priv->dev, &block->napi, gve_poll);
+ netif_napi_set_irq_locked(&block->napi, block->irq);
}
void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_del(&block->napi);
+ netif_napi_del_locked(&block->napi);
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
index 7ea15f9ef849..1a9da564b306 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile
+++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
@@ -6,4 +6,4 @@
obj-$(CONFIG_HIBMCGE) += hibmcge.o
hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \
- hbg_debugfs.o hbg_err.o
+ hbg_debugfs.o hbg_err.o hbg_diagnose.o
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index b4300d8ea4ad..f8cdab62bf85 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -36,6 +36,8 @@ enum hbg_nic_state {
HBG_NIC_STATE_EVENT_HANDLING = 0,
HBG_NIC_STATE_RESETTING,
HBG_NIC_STATE_RESET_FAIL,
+ HBG_NIC_STATE_NEED_RESET, /* trigger a reset in scheduled task */
+ HBG_NIC_STATE_NP_LINK_FAIL,
};
enum hbg_reset_type {
@@ -81,6 +83,7 @@ enum hbg_hw_event_type {
HBG_HW_EVENT_NONE = 0,
HBG_HW_EVENT_INIT, /* driver is loading */
HBG_HW_EVENT_RESET,
+ HBG_HW_EVENT_CORE_RESET,
};
struct hbg_dev_specs {
@@ -104,6 +107,7 @@ struct hbg_irq_info {
u32 mask;
bool re_enable;
bool need_print;
+ bool need_reset;
u64 count;
void (*irq_handle)(struct hbg_priv *priv, struct hbg_irq_info *info);
@@ -142,6 +146,118 @@ struct hbg_user_def {
struct ethtool_pauseparam pause_param;
};
+struct hbg_stats {
+ u64 rx_desc_drop;
+ u64 rx_desc_l2_err_cnt;
+ u64 rx_desc_pkt_len_err_cnt;
+ u64 rx_desc_l3l4_err_cnt;
+ u64 rx_desc_l3_wrong_head_cnt;
+ u64 rx_desc_l3_csum_err_cnt;
+ u64 rx_desc_l3_len_err_cnt;
+ u64 rx_desc_l3_zero_ttl_cnt;
+ u64 rx_desc_l3_other_cnt;
+ u64 rx_desc_l4_err_cnt;
+ u64 rx_desc_l4_wrong_head_cnt;
+ u64 rx_desc_l4_len_err_cnt;
+ u64 rx_desc_l4_csum_err_cnt;
+ u64 rx_desc_l4_zero_port_num_cnt;
+ u64 rx_desc_l4_other_cnt;
+ u64 rx_desc_frag_cnt;
+ u64 rx_desc_ip_ver_err_cnt;
+ u64 rx_desc_ipv4_pkt_cnt;
+ u64 rx_desc_ipv6_pkt_cnt;
+ u64 rx_desc_no_ip_pkt_cnt;
+ u64 rx_desc_ip_pkt_cnt;
+ u64 rx_desc_tcp_pkt_cnt;
+ u64 rx_desc_udp_pkt_cnt;
+ u64 rx_desc_vlan_pkt_cnt;
+ u64 rx_desc_icmp_pkt_cnt;
+ u64 rx_desc_arp_pkt_cnt;
+ u64 rx_desc_rarp_pkt_cnt;
+ u64 rx_desc_multicast_pkt_cnt;
+ u64 rx_desc_broadcast_pkt_cnt;
+ u64 rx_desc_ipsec_pkt_cnt;
+ u64 rx_desc_ip_opt_pkt_cnt;
+ u64 rx_desc_key_not_match_cnt;
+
+ u64 rx_octets_total_ok_cnt;
+ u64 rx_uc_pkt_cnt;
+ u64 rx_mc_pkt_cnt;
+ u64 rx_bc_pkt_cnt;
+ u64 rx_vlan_pkt_cnt;
+ u64 rx_octets_bad_cnt;
+ u64 rx_octets_total_filt_cnt;
+ u64 rx_filt_pkt_cnt;
+ u64 rx_trans_pkt_cnt;
+ u64 rx_framesize_64;
+ u64 rx_framesize_65_127;
+ u64 rx_framesize_128_255;
+ u64 rx_framesize_256_511;
+ u64 rx_framesize_512_1023;
+ u64 rx_framesize_1024_1518;
+ u64 rx_framesize_bt_1518;
+ u64 rx_fcs_error_cnt;
+ u64 rx_data_error_cnt;
+ u64 rx_align_error_cnt;
+ u64 rx_pause_macctl_frame_cnt;
+ u64 rx_unknown_macctl_frame_cnt;
+ /* crc ok, > max_frm_size, < 2max_frm_size */
+ u64 rx_frame_long_err_cnt;
+ /* crc fail, > max_frm_size, < 2max_frm_size */
+ u64 rx_jabber_err_cnt;
+ /* > 2max_frm_size */
+ u64 rx_frame_very_long_err_cnt;
+ /* < 64byte, >= short_runts_thr */
+ u64 rx_frame_runt_err_cnt;
+ /* < short_runts_thr */
+ u64 rx_frame_short_err_cnt;
+ /* PCU: dropped when the RX FIFO is full.*/
+ u64 rx_overflow_cnt;
+ /* GMAC: the count of overflows of the RX FIFO */
+ u64 rx_overrun_cnt;
+ /* PCU: the count of buffer alloc errors in RX */
+ u64 rx_bufrq_err_cnt;
+ /* PCU: the count of write descriptor errors in RX */
+ u64 rx_we_err_cnt;
+ /* GMAC: the count of pkts that contain PAD but length is not 64 */
+ u64 rx_lengthfield_err_cnt;
+ u64 rx_fail_comma_cnt;
+
+ u64 rx_dma_err_cnt;
+ u64 rx_fifo_less_empty_thrsld_cnt;
+
+ u64 tx_octets_total_ok_cnt;
+ u64 tx_uc_pkt_cnt;
+ u64 tx_mc_pkt_cnt;
+ u64 tx_bc_pkt_cnt;
+ u64 tx_vlan_pkt_cnt;
+ u64 tx_octets_bad_cnt;
+ u64 tx_trans_pkt_cnt;
+ u64 tx_pause_frame_cnt;
+ u64 tx_framesize_64;
+ u64 tx_framesize_65_127;
+ u64 tx_framesize_128_255;
+ u64 tx_framesize_256_511;
+ u64 tx_framesize_512_1023;
+ u64 tx_framesize_1024_1518;
+ u64 tx_framesize_bt_1518;
+ /* GMAC: the count of times that frames fail to be transmitted
+ * due to internal errors.
+ */
+ u64 tx_underrun_err_cnt;
+ u64 tx_add_cs_fail_cnt;
+ /* PCU: the count of buffer free errors in TX */
+ u64 tx_bufrl_err_cnt;
+ u64 tx_crc_err_cnt;
+ u64 tx_drop_cnt;
+ u64 tx_excessive_length_drop_cnt;
+
+ u64 tx_timeout_cnt;
+ u64 tx_dma_err_cnt;
+
+ u64 np_link_fail_cnt;
+};
+
struct hbg_priv {
struct net_device *netdev;
struct pci_dev *pdev;
@@ -155,6 +271,12 @@ struct hbg_priv {
struct hbg_mac_filter filter;
enum hbg_reset_type reset_type;
struct hbg_user_def user_def;
+ struct hbg_stats stats;
+ unsigned long last_update_stats_time;
+ struct delayed_work service_task;
};
+void hbg_err_reset_task_schedule(struct hbg_priv *priv);
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
index 8473c43d171a..5e0ba4d5b08d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
@@ -67,10 +67,11 @@ static int hbg_dbg_irq_info(struct seq_file *s, void *unused)
for (i = 0; i < priv->vectors.info_array_len; i++) {
info = &priv->vectors.info_array[i];
seq_printf(s,
- "%-20s: enabled: %-5s, logged: %-5s, count: %llu\n",
+ "%-20s: enabled: %-5s, reset: %-5s, logged: %-5s, count: %llu\n",
info->name,
str_true_false(hbg_hw_irq_is_enabled(priv,
info->mask)),
+ str_true_false(info->need_reset),
str_true_false(info->need_print),
info->count);
}
@@ -114,6 +115,10 @@ static int hbg_dbg_nic_state(struct seq_file *s, void *unused)
state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL));
seq_printf(s, "last reset type: %s\n",
reset_type_str[priv->reset_type]);
+ seq_printf(s, "need reset state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_NEED_RESET));
+ seq_printf(s, "np_link fail state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_NP_LINK_FAIL));
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
new file mode 100644
index 000000000000..d61c03f34ff0
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2025 Hisilicon Limited.
+
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include "hbg_common.h"
+#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+#include "hbg_diagnose.h"
+
+#define HBG_MSG_DATA_MAX_NUM 64
+
+struct hbg_diagnose_message {
+ u32 opcode;
+ u32 status;
+ u32 data_num;
+ struct hbg_priv *priv;
+
+ u32 data[HBG_MSG_DATA_MAX_NUM];
+};
+
+#define HBG_HW_PUSH_WAIT_TIMEOUT_US (2 * 1000 * 1000)
+#define HBG_HW_PUSH_WAIT_INTERVAL_US (1 * 1000)
+
+enum hbg_push_cmd {
+ HBG_PUSH_CMD_IRQ = 0,
+ HBG_PUSH_CMD_STATS,
+ HBG_PUSH_CMD_LINK,
+};
+
+struct hbg_push_stats_info {
+ /* id is used to match the name of the current stats item.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u64 offset;
+};
+
+struct hbg_push_irq_info {
+ /* id is used to match the name of the current irq.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u32 mask;
+};
+
+#define HBG_PUSH_IRQ_I(name, id) {id, HBG_INT_MSK_##name##_B}
+static const struct hbg_push_irq_info hbg_push_irq_list[] = {
+ HBG_PUSH_IRQ_I(RX, 0),
+ HBG_PUSH_IRQ_I(TX, 1),
+ HBG_PUSH_IRQ_I(TX_PKT_CPL, 2),
+ HBG_PUSH_IRQ_I(MAC_MII_FIFO_ERR, 3),
+ HBG_PUSH_IRQ_I(MAC_PCS_RX_FIFO_ERR, 4),
+ HBG_PUSH_IRQ_I(MAC_PCS_TX_FIFO_ERR, 5),
+ HBG_PUSH_IRQ_I(MAC_APP_RX_FIFO_ERR, 6),
+ HBG_PUSH_IRQ_I(MAC_APP_TX_FIFO_ERR, 7),
+ HBG_PUSH_IRQ_I(SRAM_PARITY_ERR, 8),
+ HBG_PUSH_IRQ_I(TX_AHB_ERR, 9),
+ HBG_PUSH_IRQ_I(RX_BUF_AVL, 10),
+ HBG_PUSH_IRQ_I(REL_BUF_ERR, 11),
+ HBG_PUSH_IRQ_I(TXCFG_AVL, 12),
+ HBG_PUSH_IRQ_I(TX_DROP, 13),
+ HBG_PUSH_IRQ_I(RX_DROP, 14),
+ HBG_PUSH_IRQ_I(RX_AHB_ERR, 15),
+ HBG_PUSH_IRQ_I(MAC_FIFO_ERR, 16),
+ HBG_PUSH_IRQ_I(RBREQ_ERR, 17),
+ HBG_PUSH_IRQ_I(WE_ERR, 18),
+};
+
+#define HBG_PUSH_STATS_I(name, id) {id, HBG_STATS_FIELD_OFF(name)}
+static const struct hbg_push_stats_info hbg_push_stats_list[] = {
+ HBG_PUSH_STATS_I(rx_desc_drop, 0),
+ HBG_PUSH_STATS_I(rx_desc_l2_err_cnt, 1),
+ HBG_PUSH_STATS_I(rx_desc_pkt_len_err_cnt, 2),
+ HBG_PUSH_STATS_I(rx_desc_l3_wrong_head_cnt, 3),
+ HBG_PUSH_STATS_I(rx_desc_l3_csum_err_cnt, 4),
+ HBG_PUSH_STATS_I(rx_desc_l3_len_err_cnt, 5),
+ HBG_PUSH_STATS_I(rx_desc_l3_zero_ttl_cnt, 6),
+ HBG_PUSH_STATS_I(rx_desc_l3_other_cnt, 7),
+ HBG_PUSH_STATS_I(rx_desc_l4_err_cnt, 8),
+ HBG_PUSH_STATS_I(rx_desc_l4_wrong_head_cnt, 9),
+ HBG_PUSH_STATS_I(rx_desc_l4_len_err_cnt, 10),
+ HBG_PUSH_STATS_I(rx_desc_l4_csum_err_cnt, 11),
+ HBG_PUSH_STATS_I(rx_desc_l4_zero_port_num_cnt, 12),
+ HBG_PUSH_STATS_I(rx_desc_l4_other_cnt, 13),
+ HBG_PUSH_STATS_I(rx_desc_frag_cnt, 14),
+ HBG_PUSH_STATS_I(rx_desc_ip_ver_err_cnt, 15),
+ HBG_PUSH_STATS_I(rx_desc_ipv4_pkt_cnt, 16),
+ HBG_PUSH_STATS_I(rx_desc_ipv6_pkt_cnt, 17),
+ HBG_PUSH_STATS_I(rx_desc_no_ip_pkt_cnt, 18),
+ HBG_PUSH_STATS_I(rx_desc_ip_pkt_cnt, 19),
+ HBG_PUSH_STATS_I(rx_desc_tcp_pkt_cnt, 20),
+ HBG_PUSH_STATS_I(rx_desc_udp_pkt_cnt, 21),
+ HBG_PUSH_STATS_I(rx_desc_vlan_pkt_cnt, 22),
+ HBG_PUSH_STATS_I(rx_desc_icmp_pkt_cnt, 23),
+ HBG_PUSH_STATS_I(rx_desc_arp_pkt_cnt, 24),
+ HBG_PUSH_STATS_I(rx_desc_rarp_pkt_cnt, 25),
+ HBG_PUSH_STATS_I(rx_desc_multicast_pkt_cnt, 26),
+ HBG_PUSH_STATS_I(rx_desc_broadcast_pkt_cnt, 27),
+ HBG_PUSH_STATS_I(rx_desc_ipsec_pkt_cnt, 28),
+ HBG_PUSH_STATS_I(rx_desc_ip_opt_pkt_cnt, 29),
+ HBG_PUSH_STATS_I(rx_desc_key_not_match_cnt, 30),
+ HBG_PUSH_STATS_I(rx_octets_total_ok_cnt, 31),
+ HBG_PUSH_STATS_I(rx_uc_pkt_cnt, 32),
+ HBG_PUSH_STATS_I(rx_mc_pkt_cnt, 33),
+ HBG_PUSH_STATS_I(rx_bc_pkt_cnt, 34),
+ HBG_PUSH_STATS_I(rx_vlan_pkt_cnt, 35),
+ HBG_PUSH_STATS_I(rx_octets_bad_cnt, 36),
+ HBG_PUSH_STATS_I(rx_octets_total_filt_cnt, 37),
+ HBG_PUSH_STATS_I(rx_filt_pkt_cnt, 38),
+ HBG_PUSH_STATS_I(rx_trans_pkt_cnt, 39),
+ HBG_PUSH_STATS_I(rx_framesize_64, 40),
+ HBG_PUSH_STATS_I(rx_framesize_65_127, 41),
+ HBG_PUSH_STATS_I(rx_framesize_128_255, 42),
+ HBG_PUSH_STATS_I(rx_framesize_256_511, 43),
+ HBG_PUSH_STATS_I(rx_framesize_512_1023, 44),
+ HBG_PUSH_STATS_I(rx_framesize_1024_1518, 45),
+ HBG_PUSH_STATS_I(rx_framesize_bt_1518, 46),
+ HBG_PUSH_STATS_I(rx_fcs_error_cnt, 47),
+ HBG_PUSH_STATS_I(rx_data_error_cnt, 48),
+ HBG_PUSH_STATS_I(rx_align_error_cnt, 49),
+ HBG_PUSH_STATS_I(rx_frame_long_err_cnt, 50),
+ HBG_PUSH_STATS_I(rx_jabber_err_cnt, 51),
+ HBG_PUSH_STATS_I(rx_pause_macctl_frame_cnt, 52),
+ HBG_PUSH_STATS_I(rx_unknown_macctl_frame_cnt, 53),
+ HBG_PUSH_STATS_I(rx_frame_very_long_err_cnt, 54),
+ HBG_PUSH_STATS_I(rx_frame_runt_err_cnt, 55),
+ HBG_PUSH_STATS_I(rx_frame_short_err_cnt, 56),
+ HBG_PUSH_STATS_I(rx_overflow_cnt, 57),
+ HBG_PUSH_STATS_I(rx_bufrq_err_cnt, 58),
+ HBG_PUSH_STATS_I(rx_we_err_cnt, 59),
+ HBG_PUSH_STATS_I(rx_overrun_cnt, 60),
+ HBG_PUSH_STATS_I(rx_lengthfield_err_cnt, 61),
+ HBG_PUSH_STATS_I(rx_fail_comma_cnt, 62),
+ HBG_PUSH_STATS_I(rx_dma_err_cnt, 63),
+ HBG_PUSH_STATS_I(rx_fifo_less_empty_thrsld_cnt, 64),
+ HBG_PUSH_STATS_I(tx_octets_total_ok_cnt, 65),
+ HBG_PUSH_STATS_I(tx_uc_pkt_cnt, 66),
+ HBG_PUSH_STATS_I(tx_mc_pkt_cnt, 67),
+ HBG_PUSH_STATS_I(tx_bc_pkt_cnt, 68),
+ HBG_PUSH_STATS_I(tx_vlan_pkt_cnt, 69),
+ HBG_PUSH_STATS_I(tx_octets_bad_cnt, 70),
+ HBG_PUSH_STATS_I(tx_trans_pkt_cnt, 71),
+ HBG_PUSH_STATS_I(tx_pause_frame_cnt, 72),
+ HBG_PUSH_STATS_I(tx_framesize_64, 73),
+ HBG_PUSH_STATS_I(tx_framesize_65_127, 74),
+ HBG_PUSH_STATS_I(tx_framesize_128_255, 75),
+ HBG_PUSH_STATS_I(tx_framesize_256_511, 76),
+ HBG_PUSH_STATS_I(tx_framesize_512_1023, 77),
+ HBG_PUSH_STATS_I(tx_framesize_1024_1518, 78),
+ HBG_PUSH_STATS_I(tx_framesize_bt_1518, 79),
+ HBG_PUSH_STATS_I(tx_underrun_err_cnt, 80),
+ HBG_PUSH_STATS_I(tx_add_cs_fail_cnt, 81),
+ HBG_PUSH_STATS_I(tx_bufrl_err_cnt, 82),
+ HBG_PUSH_STATS_I(tx_crc_err_cnt, 83),
+ HBG_PUSH_STATS_I(tx_drop_cnt, 84),
+ HBG_PUSH_STATS_I(tx_excessive_length_drop_cnt, 85),
+ HBG_PUSH_STATS_I(tx_dma_err_cnt, 86),
+};
+
+static int hbg_push_msg_send(struct hbg_priv *priv,
+ struct hbg_diagnose_message *msg)
+{
+ u32 header = 0;
+ u32 i;
+
+ if (msg->data_num == 0)
+ return 0;
+
+ for (i = 0; i < msg->data_num && i < HBG_MSG_DATA_MAX_NUM; i++)
+ hbg_reg_write(priv,
+ HBG_REG_MSG_DATA_BASE_ADDR + i * sizeof(u32),
+ msg->data[i]);
+
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_OPCODE_M, msg->opcode);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_DATA_NUM_M, msg->data_num);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_RESP_CODE_M, ETIMEDOUT);
+
+ /* start status */
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_STATUS_M, 1);
+
+ /* write header msg to start push */
+ hbg_reg_write(priv, HBG_REG_MSG_HEADER_ADDR, header);
+
+ /* wait done */
+ readl_poll_timeout(priv->io_base + HBG_REG_MSG_HEADER_ADDR, header,
+ !FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header),
+ HBG_HW_PUSH_WAIT_INTERVAL_US,
+ HBG_HW_PUSH_WAIT_TIMEOUT_US);
+
+ msg->status = FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header);
+ return -(int)FIELD_GET(HBG_REG_MSG_HEADER_RESP_CODE_M, header);
+}
+
+static int hbg_push_data(struct hbg_priv *priv,
+ u32 opcode, u32 *data, u32 data_num)
+{
+ struct hbg_diagnose_message msg = {0};
+ u32 data_left_num;
+ u32 i, j;
+ int ret;
+
+ msg.priv = priv;
+ msg.opcode = opcode;
+ for (i = 0; i < data_num / HBG_MSG_DATA_MAX_NUM + 1; i++) {
+ if (i * HBG_MSG_DATA_MAX_NUM >= data_num)
+ break;
+
+ data_left_num = data_num - i * HBG_MSG_DATA_MAX_NUM;
+ for (j = 0; j < data_left_num && j < HBG_MSG_DATA_MAX_NUM; j++)
+ msg.data[j] = data[i * HBG_MSG_DATA_MAX_NUM + j];
+
+ msg.data_num = j;
+ ret = hbg_push_msg_send(priv, &msg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hbg_push_data_u64(struct hbg_priv *priv, u32 opcode,
+ u64 *data, u32 data_num)
+{
+ /* The length of u64 is twice that of u32,
+ * the data_num must be multiplied by 2.
+ */
+ return hbg_push_data(priv, opcode, (u32 *)data, data_num * 2);
+}
+
+static u64 hbg_get_irq_stats(struct hbg_vector *vectors, u32 mask)
+{
+ u32 i = 0;
+
+ for (i = 0; i < vectors->info_array_len; i++)
+ if (vectors->info_array[i].mask == mask)
+ return vectors->info_array[i].count;
+
+ return 0;
+}
+
+static int hbg_push_irq_cnt(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u32 data_num = ARRAY_SIZE(hbg_push_irq_list) * 2;
+ struct hbg_vector *vectors = &priv->vectors;
+ const struct hbg_push_irq_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_irq_list[j++];
+ data[i] = info->id;
+ data[i + 1] = hbg_get_irq_stats(vectors, info->mask);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_IRQ, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+static int hbg_push_link_status(struct hbg_priv *priv)
+{
+ u32 link_status[2];
+
+ /* phy link status */
+ link_status[0] = priv->mac.phydev->link;
+ /* mac link status */
+ link_status[1] = hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B);
+
+ return hbg_push_data(priv, HBG_PUSH_CMD_LINK,
+ link_status, ARRAY_SIZE(link_status));
+}
+
+static int hbg_push_stats(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u64 data_num = ARRAY_SIZE(hbg_push_stats_list) * 2;
+ struct hbg_stats *stats = &priv->stats;
+ const struct hbg_push_stats_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_stats_list[j++];
+ data[i] = info->id;
+ data[i + 1] = HBG_STATS_R(stats, info->offset);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_STATS, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+void hbg_diagnose_message_push(struct hbg_priv *priv)
+{
+ int ret;
+
+ if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ /* only 1 is the right value */
+ if (hbg_reg_read(priv, HBG_REG_PUSH_REQ_ADDR) != 1)
+ return;
+
+ ret = hbg_push_irq_cnt(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push irq cnt, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_link_status(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push link status, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_stats(priv);
+ if (ret)
+ dev_err(&priv->pdev->dev,
+ "failed to push stats, ret = %d\n", ret);
+
+push_done:
+ hbg_reg_write(priv, HBG_REG_PUSH_REQ_ADDR, 0);
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
new file mode 100644
index 000000000000..ba04c6d8c03d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2025 Hisilicon Limited. */
+
+#ifndef __HBG_DIAGNOSE_H
+#define __HBG_DIAGNOSE_H
+
+#include "hbg_common.h"
+
+void hbg_diagnose_message_push(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
index 4d1f4a33391a..4e8cb66f601c 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -105,6 +105,62 @@ int hbg_reset(struct hbg_priv *priv)
return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION);
}
+void hbg_err_reset(struct hbg_priv *priv)
+{
+ bool running;
+
+ rtnl_lock();
+ running = netif_running(priv->netdev);
+ if (running)
+ dev_close(priv->netdev);
+
+ hbg_reset(priv);
+
+ /* in hbg_pci_err_detected(), we will detach first,
+ * so we need to attach before open
+ */
+ if (!netif_device_present(priv->netdev))
+ netif_device_attach(priv->netdev);
+
+ if (running)
+ dev_open(priv->netdev, NULL);
+ rtnl_unlock();
+}
+
+static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "failed to re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ hbg_err_reset(priv);
+ netif_device_attach(netdev);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -124,6 +180,8 @@ static void hbg_pci_err_reset_done(struct pci_dev *pdev)
}
static const struct pci_error_handlers hbg_pci_err_handler = {
+ .error_detected = hbg_pci_err_detected,
+ .slot_reset = hbg_pci_err_slot_reset,
.reset_prepare = hbg_pci_err_reset_prepare,
.reset_done = hbg_pci_err_reset_done,
};
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
index d7828e446308..fb9fbe7004e8 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
@@ -9,5 +9,6 @@
void hbg_set_pci_err_handler(struct pci_driver *pdrv);
int hbg_reset(struct hbg_priv *priv);
int hbg_rebuild(struct hbg_priv *priv);
+void hbg_err_reset(struct hbg_priv *priv);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
index 00364a438ec2..8f1107b85fbb 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -9,6 +9,136 @@
#include "hbg_ethtool.h"
#include "hbg_hw.h"
+struct hbg_ethtool_stats {
+ char name[ETH_GSTRING_LEN];
+ unsigned long offset;
+ u32 reg; /* set to 0 if stats is not updated via dump reg */
+};
+
+#define HBG_STATS_I(stats) { #stats, HBG_STATS_FIELD_OFF(stats), 0}
+#define HBG_STATS_REG_I(stats, reg) { #stats, HBG_STATS_FIELD_OFF(stats), reg}
+
+static const struct hbg_ethtool_stats hbg_ethtool_stats_info[] = {
+ HBG_STATS_I(rx_desc_l2_err_cnt),
+ HBG_STATS_I(rx_desc_pkt_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l3_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l3_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_zero_ttl_cnt),
+ HBG_STATS_I(rx_desc_l3_other_cnt),
+ HBG_STATS_I(rx_desc_l4_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l4_len_err_cnt),
+ HBG_STATS_I(rx_desc_l4_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l4_zero_port_num_cnt),
+ HBG_STATS_I(rx_desc_l4_other_cnt),
+ HBG_STATS_I(rx_desc_ip_ver_err_cnt),
+ HBG_STATS_I(rx_desc_ipv4_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipv6_pkt_cnt),
+ HBG_STATS_I(rx_desc_no_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_tcp_pkt_cnt),
+ HBG_STATS_I(rx_desc_udp_pkt_cnt),
+ HBG_STATS_I(rx_desc_vlan_pkt_cnt),
+ HBG_STATS_I(rx_desc_icmp_pkt_cnt),
+ HBG_STATS_I(rx_desc_arp_pkt_cnt),
+ HBG_STATS_I(rx_desc_rarp_pkt_cnt),
+ HBG_STATS_I(rx_desc_multicast_pkt_cnt),
+ HBG_STATS_I(rx_desc_broadcast_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipsec_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_opt_pkt_cnt),
+ HBG_STATS_I(rx_desc_key_not_match_cnt),
+
+ HBG_STATS_REG_I(rx_octets_bad_cnt, HBG_REG_RX_OCTETS_BAD_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_filt_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR),
+ HBG_STATS_REG_I(rx_uc_pkt_cnt, HBG_REG_RX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_vlan_pkt_cnt, HBG_REG_RX_TAGGED_ADDR),
+ HBG_STATS_REG_I(rx_filt_pkt_cnt, HBG_REG_RX_FILT_PKT_CNT_ADDR),
+ HBG_STATS_REG_I(rx_data_error_cnt, HBG_REG_RX_DATA_ERR_ADDR),
+ HBG_STATS_REG_I(rx_frame_long_err_cnt, HBG_REG_RX_LONG_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_jabber_err_cnt, HBG_REG_RX_JABBER_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_frame_very_long_err_cnt,
+ HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_runt_err_cnt, HBG_REG_RX_RUNT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_short_err_cnt, HBG_REG_RX_SHORT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overflow_cnt, HBG_REG_RX_OVER_FLOW_CNT_ADDR),
+ HBG_STATS_REG_I(rx_bufrq_err_cnt, HBG_REG_RX_BUFRQ_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_we_err_cnt, HBG_REG_RX_WE_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overrun_cnt, HBG_REG_RX_OVERRUN_CNT_ADDR),
+ HBG_STATS_REG_I(rx_lengthfield_err_cnt,
+ HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fail_comma_cnt, HBG_REG_RX_FAIL_COMMA_CNT_ADDR),
+ HBG_STATS_I(rx_dma_err_cnt),
+ HBG_STATS_I(rx_fifo_less_empty_thrsld_cnt),
+
+ HBG_STATS_REG_I(tx_uc_pkt_cnt, HBG_REG_TX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_vlan_pkt_cnt, HBG_REG_TX_TAGGED_ADDR),
+ HBG_STATS_REG_I(tx_octets_bad_cnt, HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR),
+
+ HBG_STATS_REG_I(tx_underrun_err_cnt, HBG_REG_TX_UNDERRUN_ADDR),
+ HBG_STATS_REG_I(tx_add_cs_fail_cnt, HBG_REG_TX_CS_FAIL_CNT_ADDR),
+ HBG_STATS_REG_I(tx_bufrl_err_cnt, HBG_REG_TX_BUFRL_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(tx_crc_err_cnt, HBG_REG_TX_CRC_ERROR_ADDR),
+ HBG_STATS_REG_I(tx_drop_cnt, HBG_REG_TX_DROP_CNT_ADDR),
+ HBG_STATS_REG_I(tx_excessive_length_drop_cnt,
+ HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR),
+ HBG_STATS_I(tx_dma_err_cnt),
+ HBG_STATS_I(tx_timeout_cnt),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_rmon_stats_info[] = {
+ HBG_STATS_I(rx_desc_frag_cnt),
+ HBG_STATS_REG_I(rx_framesize_64, HBG_REG_RX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_65_127,
+ HBG_REG_RX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_128_255,
+ HBG_REG_RX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_256_511,
+ HBG_REG_RX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_512_1023,
+ HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_1024_1518,
+ HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_bt_1518,
+ HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_64, HBG_REG_TX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_65_127,
+ HBG_REG_TX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_128_255,
+ HBG_REG_TX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_256_511,
+ HBG_REG_TX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_512_1023,
+ HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_1024_1518,
+ HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_bt_1518,
+ HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_mac_stats_info[] = {
+ HBG_STATS_REG_I(rx_mc_pkt_cnt, HBG_REG_RX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_bc_pkt_cnt, HBG_REG_RX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_align_error_cnt, HBG_REG_RX_ALIGN_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_ok_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_OK_ADDR),
+ HBG_STATS_REG_I(rx_trans_pkt_cnt, HBG_REG_RX_TRANS_PKG_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fcs_error_cnt, HBG_REG_RX_FCS_ERRORS_ADDR),
+ HBG_STATS_REG_I(tx_mc_pkt_cnt, HBG_REG_TX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_bc_pkt_cnt, HBG_REG_TX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_octets_total_ok_cnt,
+ HBG_REG_OCTETS_TRANSMITTED_OK_ADDR),
+ HBG_STATS_REG_I(tx_trans_pkt_cnt, HBG_REG_TX_TRANS_PKG_CNT_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_ctrl_stats_info[] = {
+ HBG_STATS_REG_I(rx_pause_macctl_frame_cnt,
+ HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR),
+ HBG_STATS_REG_I(tx_pause_frame_cnt, HBG_REG_TX_PAUSE_FRAMES_ADDR),
+ HBG_STATS_REG_I(rx_unknown_macctl_frame_cnt,
+ HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR),
+};
+
enum hbg_reg_dump_type {
HBG_DUMP_REG_TYPE_SPEC = 0,
HBG_DUMP_REG_TYPE_MDIO,
@@ -180,6 +310,167 @@ static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags)
return hbg_reset(priv);
}
+static void hbg_update_stats_by_info(struct hbg_priv *priv,
+ const struct hbg_ethtool_stats *info,
+ u32 info_len)
+{
+ const struct hbg_ethtool_stats *stats;
+ u32 i;
+
+ for (i = 0; i < info_len; i++) {
+ stats = &info[i];
+ if (!stats->reg)
+ continue;
+
+ HBG_STATS_U(&priv->stats, stats->offset,
+ hbg_reg_read(priv, stats->reg));
+ }
+}
+
+void hbg_update_stats(struct hbg_priv *priv)
+{
+ hbg_update_stats_by_info(priv, hbg_ethtool_stats_info,
+ ARRAY_SIZE(hbg_ethtool_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_rmon_stats_info,
+ ARRAY_SIZE(hbg_ethtool_rmon_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_mac_stats_info,
+ ARRAY_SIZE(hbg_ethtool_mac_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_ctrl_stats_info,
+ ARRAY_SIZE(hbg_ethtool_ctrl_stats_info));
+}
+
+static int hbg_ethtool_get_sset_count(struct net_device *netdev, int stringset)
+{
+ if (stringset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return ARRAY_SIZE(hbg_ethtool_stats_info);
+}
+
+static void hbg_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ ethtool_puts(&data, hbg_ethtool_stats_info[i].name);
+}
+
+static void hbg_ethtool_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ u32 i;
+
+ hbg_update_stats(priv);
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ *data++ = HBG_STATS_R(&priv->stats,
+ hbg_ethtool_stats_info[i].offset);
+}
+
+static void hbg_ethtool_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *epstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ epstats->rx_pause_frames = stats->rx_pause_macctl_frame_cnt;
+ epstats->tx_pause_frames = stats->tx_pause_frame_cnt;
+}
+
+static void hbg_ethtool_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *emstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ emstats->FramesTransmittedOK = stats->tx_trans_pkt_cnt;
+ emstats->FramesReceivedOK = stats->rx_trans_pkt_cnt;
+ emstats->FrameCheckSequenceErrors = stats->rx_fcs_error_cnt;
+ emstats->AlignmentErrors = stats->rx_align_error_cnt;
+ emstats->OctetsTransmittedOK = stats->tx_octets_total_ok_cnt;
+ emstats->OctetsReceivedOK = stats->rx_octets_total_ok_cnt;
+
+ emstats->MulticastFramesXmittedOK = stats->tx_mc_pkt_cnt;
+ emstats->BroadcastFramesXmittedOK = stats->tx_bc_pkt_cnt;
+ emstats->MulticastFramesReceivedOK = stats->rx_mc_pkt_cnt;
+ emstats->BroadcastFramesReceivedOK = stats->rx_bc_pkt_cnt;
+ emstats->InRangeLengthErrors = stats->rx_fcs_error_cnt +
+ stats->rx_jabber_err_cnt +
+ stats->rx_unknown_macctl_frame_cnt +
+ stats->rx_bufrq_err_cnt +
+ stats->rx_we_err_cnt;
+ emstats->OutOfRangeLengthField = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt +
+ stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ emstats->FrameTooLongErrors = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+}
+
+static void
+hbg_ethtool_get_eth_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *ecstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *s = &priv->stats;
+
+ hbg_update_stats(priv);
+ ecstats->MACControlFramesTransmitted = s->tx_pause_frame_cnt;
+ ecstats->MACControlFramesReceived = s->rx_pause_macctl_frame_cnt;
+ ecstats->UnsupportedOpcodesReceived = s->rx_unknown_macctl_frame_cnt;
+}
+
+static const struct ethtool_rmon_hist_range hbg_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 4095 },
+};
+
+static void
+hbg_ethtool_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ rmon_stats->undersize_pkts = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt;
+ rmon_stats->oversize_pkts = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ rmon_stats->fragments = stats->rx_desc_frag_cnt;
+ rmon_stats->hist[0] = stats->rx_framesize_64;
+ rmon_stats->hist[1] = stats->rx_framesize_65_127;
+ rmon_stats->hist[2] = stats->rx_framesize_128_255;
+ rmon_stats->hist[3] = stats->rx_framesize_256_511;
+ rmon_stats->hist[4] = stats->rx_framesize_512_1023;
+ rmon_stats->hist[5] = stats->rx_framesize_1024_1518;
+ rmon_stats->hist[6] = stats->rx_framesize_bt_1518;
+
+ rmon_stats->hist_tx[0] = stats->tx_framesize_64;
+ rmon_stats->hist_tx[1] = stats->tx_framesize_65_127;
+ rmon_stats->hist_tx[2] = stats->tx_framesize_128_255;
+ rmon_stats->hist_tx[3] = stats->tx_framesize_256_511;
+ rmon_stats->hist_tx[4] = stats->tx_framesize_512_1023;
+ rmon_stats->hist_tx[5] = stats->tx_framesize_1024_1518;
+ rmon_stats->hist_tx[6] = stats->tx_framesize_bt_1518;
+
+ *ranges = hbg_rmon_ranges;
+}
+
static const struct ethtool_ops hbg_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
@@ -190,6 +481,13 @@ static const struct ethtool_ops hbg_ethtool_ops = {
.set_pauseparam = hbg_ethtool_set_pauseparam,
.reset = hbg_ethtool_reset,
.nway_reset = phy_ethtool_nway_reset,
+ .get_sset_count = hbg_ethtool_get_sset_count,
+ .get_strings = hbg_ethtool_get_strings,
+ .get_ethtool_stats = hbg_ethtool_get_stats,
+ .get_pause_stats = hbg_ethtool_get_pause_stats,
+ .get_eth_mac_stats = hbg_ethtool_get_eth_mac_stats,
+ .get_eth_ctrl_stats = hbg_ethtool_get_eth_ctrl_stats,
+ .get_rmon_stats = hbg_ethtool_get_rmon_stats,
};
void hbg_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
index 628707ec2686..e173155b146a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
@@ -6,6 +6,11 @@
#include <linux/netdevice.h>
+#define HBG_STATS_FIELD_OFF(f) (offsetof(struct hbg_stats, f))
+#define HBG_STATS_R(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
+#define HBG_STATS_U(p, offset, val) (HBG_STATS_R(p, offset) += (val))
+
void hbg_ethtool_set_ops(struct net_device *netdev);
+void hbg_update_stats(struct hbg_priv *priv);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index e7798f213645..74a18033b444 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -213,10 +213,20 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
{
+ hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
+
hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
HBG_REG_PORT_MODE_M, speed);
hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR,
HBG_REG_DUPLEX_B, duplex);
+
+ hbg_hw_event_notify(priv, HBG_HW_EVENT_CORE_RESET);
+
+ hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
+
+ if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B))
+ hbg_np_link_fail_task_schedule(priv);
}
/* only support uc filter */
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
index 25dd25f096fe..e79e9ab3e530 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
@@ -11,6 +11,9 @@ static void hbg_irq_handle_err(struct hbg_priv *priv,
if (irq_info->need_print)
dev_err(&priv->pdev->dev,
"receive error interrupt: %s\n", irq_info->name);
+
+ if (irq_info->need_reset)
+ hbg_err_reset_task_schedule(priv);
}
static void hbg_irq_handle_tx(struct hbg_priv *priv,
@@ -25,30 +28,38 @@ static void hbg_irq_handle_rx(struct hbg_priv *priv,
napi_schedule(&priv->rx_ring.napi);
}
-#define HBG_TXRX_IRQ_I(name, handle) \
- {#name, HBG_INT_MSK_##name##_B, false, false, 0, handle}
-#define HBG_ERR_IRQ_I(name, need_print) \
- {#name, HBG_INT_MSK_##name##_B, true, need_print, 0, hbg_irq_handle_err}
+static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv,
+ struct hbg_irq_info *irq_info)
+{
+ priv->stats.rx_fifo_less_empty_thrsld_cnt++;
+}
+
+#define HBG_IRQ_I(name, handle) \
+ {#name, HBG_INT_MSK_##name##_B, false, false, false, 0, handle}
+#define HBG_ERR_IRQ_I(name, need_print, ndde_reset) \
+ {#name, HBG_INT_MSK_##name##_B, true, need_print, \
+ ndde_reset, 0, hbg_irq_handle_err}
static struct hbg_irq_info hbg_irqs[] = {
- HBG_TXRX_IRQ_I(RX, hbg_irq_handle_rx),
- HBG_TXRX_IRQ_I(TX, hbg_irq_handle_tx),
- HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true),
- HBG_ERR_IRQ_I(TX_AHB_ERR, true),
- HBG_ERR_IRQ_I(RX_BUF_AVL, false),
- HBG_ERR_IRQ_I(REL_BUF_ERR, true),
- HBG_ERR_IRQ_I(TXCFG_AVL, false),
- HBG_ERR_IRQ_I(TX_DROP, false),
- HBG_ERR_IRQ_I(RX_DROP, false),
- HBG_ERR_IRQ_I(RX_AHB_ERR, true),
- HBG_ERR_IRQ_I(MAC_FIFO_ERR, false),
- HBG_ERR_IRQ_I(RBREQ_ERR, false),
- HBG_ERR_IRQ_I(WE_ERR, false),
+ HBG_IRQ_I(RX, hbg_irq_handle_rx),
+ HBG_IRQ_I(TX, hbg_irq_handle_tx),
+ HBG_ERR_IRQ_I(TX_PKT_CPL, true, true),
+ HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true, false),
+ HBG_ERR_IRQ_I(TX_AHB_ERR, true, true),
+ HBG_IRQ_I(RX_BUF_AVL, hbg_irq_handle_rx_buf_val),
+ HBG_ERR_IRQ_I(REL_BUF_ERR, true, false),
+ HBG_ERR_IRQ_I(TXCFG_AVL, false, false),
+ HBG_ERR_IRQ_I(TX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_AHB_ERR, true, false),
+ HBG_ERR_IRQ_I(MAC_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(RBREQ_ERR, true, true),
+ HBG_ERR_IRQ_I(WE_ERR, true, true),
};
static irqreturn_t hbg_irq_handle(int irq_num, void *p)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
index bb0f25ac9760..2ac5454338e4 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -5,7 +5,9 @@
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <linux/phy.h>
#include "hbg_common.h"
+#include "hbg_diagnose.h"
#include "hbg_err.h"
#include "hbg_ethtool.h"
#include "hbg_hw.h"
@@ -14,6 +16,9 @@
#include "hbg_txrx.h"
#include "hbg_debugfs.h"
+#define HBG_SUPPORT_FEATURES (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+ NETIF_F_RXCSUM)
+
static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
{
struct hbg_irq_info *info;
@@ -214,6 +219,10 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
char *buf = ring->tout_log_buf;
u32 pos = 0;
+ priv->stats.tx_timeout_cnt++;
+
+ pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
+ "tx_timeout cnt: %llu\n", priv->stats.tx_timeout_cnt);
pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
"ring used num: %u, fifo used num: %u\n",
hbg_get_queue_used_num(ring),
@@ -226,6 +235,39 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
netdev_info(netdev, "%s", buf);
}
+static void hbg_net_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *h_stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ dev_get_tstats64(netdev, stats);
+
+ /* fifo empty */
+ stats->tx_fifo_errors += h_stats->tx_drop_cnt;
+
+ stats->tx_dropped += h_stats->tx_excessive_length_drop_cnt +
+ h_stats->tx_drop_cnt;
+ stats->tx_errors += h_stats->tx_add_cs_fail_cnt +
+ h_stats->tx_bufrl_err_cnt +
+ h_stats->tx_underrun_err_cnt +
+ h_stats->tx_crc_err_cnt;
+ stats->rx_errors += h_stats->rx_data_error_cnt;
+ stats->multicast += h_stats->rx_mc_pkt_cnt;
+ stats->rx_dropped += h_stats->rx_desc_drop;
+ stats->rx_length_errors += h_stats->rx_frame_very_long_err_cnt +
+ h_stats->rx_frame_long_err_cnt +
+ h_stats->rx_frame_runt_err_cnt +
+ h_stats->rx_frame_short_err_cnt +
+ h_stats->rx_lengthfield_err_cnt;
+ stats->rx_frame_errors += h_stats->rx_desc_l2_err_cnt +
+ h_stats->rx_desc_l3l4_err_cnt;
+ stats->rx_fifo_errors += h_stats->rx_overflow_cnt +
+ h_stats->rx_overrun_cnt;
+ stats->rx_crc_errors += h_stats->rx_fcs_error_cnt;
+}
+
static const struct net_device_ops hbg_netdev_ops = {
.ndo_open = hbg_net_open,
.ndo_stop = hbg_net_stop,
@@ -235,8 +277,62 @@ static const struct net_device_ops hbg_netdev_ops = {
.ndo_change_mtu = hbg_net_change_mtu,
.ndo_tx_timeout = hbg_net_tx_timeout,
.ndo_set_rx_mode = hbg_net_set_rx_mode,
+ .ndo_get_stats64 = hbg_net_get_stats,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
};
+static void hbg_service_task(struct work_struct *work)
+{
+ struct hbg_priv *priv = container_of(work, struct hbg_priv,
+ service_task.work);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NEED_RESET, &priv->state))
+ hbg_err_reset(priv);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state))
+ hbg_fix_np_link_fail(priv);
+
+ hbg_diagnose_message_push(priv);
+
+ /* The type of statistics register is u32,
+ * To prevent the statistics register from overflowing,
+ * the driver dumps the statistics every 30 seconds.
+ */
+ if (time_after(jiffies, priv->last_update_stats_time + 30 * HZ)) {
+ hbg_update_stats(priv);
+ priv->last_update_stats_time = jiffies;
+ }
+
+ schedule_delayed_work(&priv->service_task,
+ msecs_to_jiffies(MSEC_PER_SEC));
+}
+
+void hbg_err_reset_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NEED_RESET, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+static void hbg_cancel_delayed_work_sync(void *data)
+{
+ cancel_delayed_work_sync(data);
+}
+
+static int hbg_delaywork_init(struct hbg_priv *priv)
+{
+ INIT_DELAYED_WORK(&priv->service_task, hbg_service_task);
+ schedule_delayed_work(&priv->service_task, 0);
+ return devm_add_action_or_reset(&priv->pdev->dev,
+ hbg_cancel_delayed_work_sync,
+ &priv->service_task);
+}
+
static int hbg_mac_filter_init(struct hbg_priv *priv)
{
struct hbg_dev_specs *dev_specs = &priv->dev_specs;
@@ -291,6 +387,10 @@ static int hbg_init(struct hbg_priv *priv)
if (ret)
return ret;
+ ret = hbg_delaywork_init(priv);
+ if (ret)
+ return ret;
+
hbg_debugfs_init(priv);
hbg_init_user_def(priv);
return 0;
@@ -349,6 +449,9 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
+ /* set default features */
+ netdev->features |= HBG_SUPPORT_FEATURES;
+ netdev->hw_features |= HBG_SUPPORT_FEATURES;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index db6bc4cfb971..f29a937ad087 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -17,6 +17,8 @@
#define HBG_MDIO_OP_TIMEOUT_US (1 * 1000 * 1000)
#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
+#define HBG_NP_LINK_FAIL_RETRY_TIMES 5
+
static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
{
hbg_reg_write(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR, cmd);
@@ -127,6 +129,26 @@ static void hbg_flowctrl_cfg(struct hbg_priv *priv)
hbg_hw_set_pause_enable(priv, tx_pause, rx_pause);
}
+void hbg_fix_np_link_fail(struct hbg_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+
+ if (priv->stats.np_link_fail_cnt >= HBG_NP_LINK_FAIL_RETRY_TIMES) {
+ dev_err(dev, "failed to fix the MAC link status\n");
+ priv->stats.np_link_fail_cnt = 0;
+ return;
+ }
+
+ priv->stats.np_link_fail_cnt++;
+ dev_err(dev, "failed to link between MAC and PHY, try to fix...\n");
+
+ /* Replace phy_reset() with phy_stop() and phy_start(),
+ * as suggested by Andrew.
+ */
+ hbg_phy_stop(priv);
+ hbg_phy_start(priv);
+}
+
static void hbg_phy_adjust_link(struct net_device *netdev)
{
struct hbg_priv *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
index febd02a309c7..f3771c1bbd34 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
@@ -9,4 +9,6 @@
int hbg_mdio_init(struct hbg_priv *priv);
void hbg_phy_start(struct hbg_priv *priv);
void hbg_phy_stop(struct hbg_priv *priv);
+void hbg_fix_np_link_fail(struct hbg_priv *priv);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index f12efc12f3c5..cc2cc612770d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -18,6 +18,13 @@
#define HBG_REG_TX_FIFO_NUM_ADDR 0x0030
#define HBG_REG_RX_FIFO_NUM_ADDR 0x0034
#define HBG_REG_VLAN_LAYERS_ADDR 0x0038
+#define HBG_REG_PUSH_REQ_ADDR 0x00F0
+#define HBG_REG_MSG_HEADER_ADDR 0x00F4
+#define HBG_REG_MSG_HEADER_OPCODE_M GENMASK(7, 0)
+#define HBG_REG_MSG_HEADER_STATUS_M GENMASK(11, 8)
+#define HBG_REG_MSG_HEADER_DATA_NUM_M GENMASK(19, 12)
+#define HBG_REG_MSG_HEADER_RESP_CODE_M GENMASK(27, 20)
+#define HBG_REG_MSG_DATA_BASE_ADDR 0x0100
/* MDIO */
#define HBG_REG_MDIO_BASE 0x8000
@@ -54,12 +61,55 @@
#define HBG_REG_PAUSE_ENABLE_RX_B BIT(0)
#define HBG_REG_PAUSE_ENABLE_TX_B BIT(1)
#define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058)
+#define HBG_REG_AN_NEG_STATE_NP_LINK_OK_B BIT(15)
#define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060)
#define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7)
#define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6)
#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5)
#define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064)
#define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0)
+#define HBG_REG_RX_OCTETS_TOTAL_OK_ADDR (HBG_REG_SGMII_BASE + 0x0080)
+#define HBG_REG_RX_OCTETS_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0084)
+#define HBG_REG_RX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0088)
+#define HBG_REG_RX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x008C)
+#define HBG_REG_RX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0090)
+#define HBG_REG_RX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0094)
+#define HBG_REG_RX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0098)
+#define HBG_REG_RX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x009C)
+#define HBG_REG_RX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A0)
+#define HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A4)
+#define HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A8)
+#define HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00AC)
+#define HBG_REG_RX_FCS_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00B0)
+#define HBG_REG_RX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x00B4)
+#define HBG_REG_RX_DATA_ERR_ADDR (HBG_REG_SGMII_BASE + 0x00B8)
+#define HBG_REG_RX_ALIGN_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00BC)
+#define HBG_REG_RX_LONG_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C0)
+#define HBG_REG_RX_JABBER_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C4)
+#define HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00C8)
+#define HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00CC)
+#define HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D0)
+#define HBG_REG_RX_RUNT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D4)
+#define HBG_REG_RX_SHORT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D8)
+#define HBG_REG_RX_FILT_PKT_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00E8)
+#define HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR (HBG_REG_SGMII_BASE + 0x00EC)
+#define HBG_REG_OCTETS_TRANSMITTED_OK_ADDR (HBG_REG_SGMII_BASE + 0x0100)
+#define HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0104)
+#define HBG_REG_TX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0108)
+#define HBG_REG_TX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x010C)
+#define HBG_REG_TX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0110)
+#define HBG_REG_TX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0114)
+#define HBG_REG_TX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0118)
+#define HBG_REG_TX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x011C)
+#define HBG_REG_TX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0120)
+#define HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0124)
+#define HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0128)
+#define HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x012C)
+#define HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR (HBG_REG_SGMII_BASE + 0x014C)
+#define HBG_REG_TX_UNDERRUN_ADDR (HBG_REG_SGMII_BASE + 0x0150)
+#define HBG_REG_TX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x0154)
+#define HBG_REG_TX_CRC_ERROR_ADDR (HBG_REG_SGMII_BASE + 0x0158)
+#define HBG_REG_TX_PAUSE_FRAMES_ADDR (HBG_REG_SGMII_BASE + 0x015C)
#define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8)
#define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0)
#define HBG_REG_CF_CRC_STRIP_B BIT(0)
@@ -69,6 +119,9 @@
#define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0)
#define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3)
#define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8)
+#define HBG_REG_RX_OVERRUN_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01EC)
+#define HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F4)
+#define HBG_REG_RX_FAIL_COMMA_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F8)
#define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200)
#define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204)
#define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208)
@@ -103,6 +156,7 @@
#define HBG_INT_MSK_MAC_PCS_TX_FIFO_ERR_B BIT(17)
#define HBG_INT_MSK_MAC_PCS_RX_FIFO_ERR_B BIT(16)
#define HBG_INT_MSK_MAC_MII_FIFO_ERR_B BIT(15)
+#define HBG_INT_MSK_TX_PKT_CPL_B BIT(14)
#define HBG_INT_MSK_TX_B BIT(1) /* just used in driver */
#define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */
#define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434)
@@ -111,12 +165,17 @@
#define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440)
#define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444)
#define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0)
+#define HBG_REG_TX_DROP_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0448)
+#define HBG_REG_RX_OVER_FLOW_CNT_ADDR (HBG_REG_SGMII_BASE + 0x044C)
#define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450)
#define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454)
#define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16)
+#define HBG_REG_TX_CS_FAIL_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0460)
+#define HBG_REG_RX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0464)
+#define HBG_REG_TX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0468)
#define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470)
#define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488)
#define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C)
@@ -136,6 +195,9 @@
#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0)
#define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4)
#define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21)
+#define HBG_REG_RX_BUFRQ_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x058C)
+#define HBG_REG_TX_BUFRL_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0590)
+#define HBG_REG_RX_WE_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0594)
#define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4)
#define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8)
#define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC)
@@ -178,5 +240,48 @@ struct hbg_rx_desc {
};
#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16)
+#define HBG_RX_DESC_W2_PORT_NUM_M GENMASK(15, 12)
+#define HBG_RX_DESC_W4_IP_TCP_UDP_M GENMASK(31, 30)
+#define HBG_RX_DESC_W4_IPSEC_B BIT(29)
+#define HBG_RX_DESC_W4_IP_VERSION_B BIT(28)
+#define HBG_RX_DESC_W4_L4_ERR_CODE_M GENMASK(26, 23)
+#define HBG_RX_DESC_W4_FRAG_B BIT(22)
+#define HBG_RX_DESC_W4_OPT_B BIT(21)
+#define HBG_RX_DESC_W4_IP_VERSION_ERR_B BIT(20)
+#define HBG_RX_DESC_W4_BRD_CST_B BIT(19)
+#define HBG_RX_DESC_W4_MUL_CST_B BIT(18)
+#define HBG_RX_DESC_W4_ARP_B BIT(17)
+#define HBG_RX_DESC_W4_RARP_B BIT(16)
+#define HBG_RX_DESC_W4_ICMP_B BIT(15)
+#define HBG_RX_DESC_W4_VLAN_FLAG_B BIT(14)
+#define HBG_RX_DESC_W4_DROP_B BIT(13)
+#define HBG_RX_DESC_W4_L3_ERR_CODE_M GENMASK(12, 9)
+#define HBG_RX_DESC_W4_L2_ERR_B BIT(8)
+#define HBG_RX_DESC_W4_IDX_MATCH_B BIT(7)
+
+enum hbg_l3_err_code {
+ HBG_L3_OK = 0,
+ HBG_L3_WRONG_HEAD,
+ HBG_L3_CSUM_ERR,
+ HBG_L3_LEN_ERR,
+ HBG_L3_ZERO_TTL,
+ HBG_L3_RSVD,
+};
+
+enum hbg_l4_err_code {
+ HBG_L4_OK = 0,
+ HBG_L4_WRONG_HEAD,
+ HBG_L4_LEN_ERR,
+ HBG_L4_CSUM_ERR,
+ HBG_L4_ZERO_PORT_NUM,
+ HBG_L4_RSVD,
+};
+
+enum hbg_pkt_type_code {
+ HBG_NO_IP_PKT = 0,
+ HBG_IP_PKT,
+ HBG_TCP_PKT,
+ HBG_UDP_PKT,
+};
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
index f4f256a0dfea..8d814c8f19ea 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
@@ -38,8 +38,14 @@ static int hbg_dma_map(struct hbg_buffer *buffer)
buffer->skb_dma = dma_map_single(&priv->pdev->dev,
buffer->skb->data, buffer->skb_len,
buffer_to_dma_dir(buffer));
- if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma)))
+ if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) {
+ if (buffer->dir == HBG_DIR_RX)
+ priv->stats.rx_dma_err_cnt++;
+ else
+ priv->stats.tx_dma_err_cnt++;
+
return -ENOMEM;
+ }
return 0;
}
@@ -195,6 +201,173 @@ static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
return packet_done;
}
+static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ bool rx_checksum_offload = !!(priv->netdev->features & NETIF_F_RXCSUM);
+
+ skb->ip_summed = rx_checksum_offload ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
+
+ if (likely(!FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4) &&
+ !FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)))
+ return true;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4)) {
+ case HBG_L3_OK:
+ break;
+ case HBG_L3_WRONG_HEAD:
+ priv->stats.rx_desc_l3_wrong_head_cnt++;
+ return false;
+ case HBG_L3_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l3_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L3_LEN_ERR:
+ priv->stats.rx_desc_l3_len_err_cnt++;
+ return false;
+ case HBG_L3_ZERO_TTL:
+ priv->stats.rx_desc_l3_zero_ttl_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l3_other_cnt++;
+ return false;
+ }
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)) {
+ case HBG_L4_OK:
+ break;
+ case HBG_L4_WRONG_HEAD:
+ priv->stats.rx_desc_l4_wrong_head_cnt++;
+ return false;
+ case HBG_L4_LEN_ERR:
+ priv->stats.rx_desc_l4_len_err_cnt++;
+ return false;
+ case HBG_L4_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l4_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L4_ZERO_PORT_NUM:
+ priv->stats.rx_desc_l4_zero_port_num_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l4_other_cnt++;
+ return false;
+ }
+
+ return true;
+}
+
+static void hbg_update_rx_ip_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4))) {
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ return;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_ip_ver_err_cnt++;
+ return;
+ }
+
+ /* 0:ipv4, 1:ipv6 */
+ if (FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_B, desc->word4))
+ priv->stats.rx_desc_ipv6_pkt_cnt++;
+ else
+ priv->stats.rx_desc_ipv4_pkt_cnt++;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4)) {
+ case HBG_IP_PKT:
+ priv->stats.rx_desc_ip_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_OPT_B, desc->word4))
+ priv->stats.rx_desc_ip_opt_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_FRAG_B, desc->word4))
+ priv->stats.rx_desc_frag_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ICMP_B, desc->word4))
+ priv->stats.rx_desc_icmp_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_IPSEC_B, desc->word4))
+ priv->stats.rx_desc_ipsec_pkt_cnt++;
+ break;
+ case HBG_TCP_PKT:
+ priv->stats.rx_desc_tcp_pkt_cnt++;
+ break;
+ case HBG_UDP_PKT:
+ priv->stats.rx_desc_udp_pkt_cnt++;
+ break;
+ default:
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ break;
+ }
+}
+
+static void hbg_update_rx_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IDX_MATCH_B, desc->word4))) {
+ priv->stats.rx_desc_key_not_match_cnt++;
+ return;
+ }
+
+ if (FIELD_GET(HBG_RX_DESC_W4_BRD_CST_B, desc->word4))
+ priv->stats.rx_desc_broadcast_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_MUL_CST_B, desc->word4))
+ priv->stats.rx_desc_multicast_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_VLAN_FLAG_B, desc->word4))
+ priv->stats.rx_desc_vlan_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ARP_B, desc->word4)) {
+ priv->stats.rx_desc_arp_pkt_cnt++;
+ return;
+ } else if (FIELD_GET(HBG_RX_DESC_W4_RARP_B, desc->word4)) {
+ priv->stats.rx_desc_rarp_pkt_cnt++;
+ return;
+ }
+
+ hbg_update_rx_ip_protocol_stats(priv, desc);
+}
+
+static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, desc->word2) >
+ priv->dev_specs.max_frame_len)) {
+ priv->stats.rx_desc_pkt_len_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M, desc->word2) !=
+ priv->dev_specs.mac_id ||
+ FIELD_GET(HBG_RX_DESC_W4_DROP_B, desc->word4))) {
+ priv->stats.rx_desc_drop++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_l2_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(!hbg_rx_check_l3l4_error(priv, desc, skb))) {
+ priv->stats.rx_desc_l3l4_err_cnt++;
+ return false;
+ }
+
+ hbg_update_rx_protocol_stats(priv, desc);
+ return true;
+}
+
static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
{
struct hbg_ring *ring = &priv->rx_ring;
@@ -257,8 +430,12 @@ static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
- hbg_dma_unmap(buffer);
+ if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) {
+ hbg_buffer_free(buffer);
+ goto next_buffer;
+ }
+ hbg_dma_unmap(buffer);
skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
skb_put(buffer->skb, pkt_len);
buffer->skb->protocol = eth_type_trans(buffer->skb,
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index a376d4bdf281..18376bcc718a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -934,8 +934,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->chan = arg.args[1] * RX_DESC_NUM;
priv->group = arg.args[2];
- hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
/* BQL will try to keep the TX queue as short as possible, but it can't
* be faster than tx_coalesce_usecs, so we need a fast timeout here,
* but also long enough to gather up enough frames to ensure we don't
@@ -944,7 +942,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
*/
priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
priv->tx_coalesce_usecs = 200;
- priv->tx_coalesce_timer.function = tx_done;
+ hrtimer_setup(&priv->tx_coalesce_timer, tx_done, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
priv->map = syscon_node_to_regmap(arg.np);
of_node_put(arg.np);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 6c458f037262..60a586a951a0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -266,9 +266,9 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
if (err)
goto out;
- err = phy_loopback(phy_dev, true);
+ err = phy_loopback(phy_dev, true, 0);
} else {
- err = phy_loopback(phy_dev, false);
+ err = phy_loopback(phy_dev, false, 0);
if (err)
goto out;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 9bbece25552b..09749e9f7398 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/string_choices.h>
#include "hnae3.h"
#include "hns3_debugfs.h"
@@ -661,12 +662,14 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
sprintf(result[j++], "%u", ring->rx_copybreak);
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_EN_REG)));
else
sprintf(result[j++], "%s", "NA");
@@ -764,12 +767,14 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_EN_REG)));
else
sprintf(result[j++], "%s", "NA");
@@ -1030,7 +1035,6 @@ static void
hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- const char * const str[] = {"no", "yes"};
unsigned long *caps = ae_dev->caps;
u32 i, state;
@@ -1039,7 +1043,7 @@ hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) {
state = test_bit(hns3_dbg_cap[i].cap_bit, caps);
*pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n",
- hns3_dbg_cap[i].name, str[state]);
+ hns3_dbg_cap[i].name, str_yes_no(state));
}
*pos += scnprintf(buf + *pos, len - *pos, "\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index b771a2daba43..6715222aeb66 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/phy.h>
#include <linux/sfp.h>
@@ -1198,7 +1199,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
return 0;
netdev_dbg(netdev, "Changing tx push from %s to %s\n",
- old_state ? "on" : "off", tx_push ? "on" : "off");
+ str_on_off(old_state), str_on_off(tx_push));
if (tx_push)
set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index debf143e9940..c46490693594 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/device.h>
#include <linux/sched/clock.h>
+#include <linux/string_choices.h>
#include "hclge_debugfs.h"
#include "hclge_err.h"
@@ -11,7 +12,6 @@
#include "hclge_tm.h"
#include "hnae3.h"
-static const char * const state_str[] = { "off", "on" };
static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
@@ -2573,7 +2573,7 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
HCLGE_MAC_APP_LP_B);
pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2586,22 +2586,22 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
loopback_en = req_common->enable &
HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
if (phydev) {
loopback_en = phydev->loopback_enabled;
pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
} else if (hnae3_dev_phy_imp_supported(hdev)) {
loopback_en = req_common->enable &
HCLGE_CMD_GE_PHY_INNER_LOOP_B;
pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
}
return 0;
@@ -2894,9 +2894,9 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
*pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
- state_str[ingress]);
+ str_on_off(ingress));
*pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
- state_str[egress]);
+ str_on_off(egress));
hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
NULL, ARRAY_SIZE(vlan_filter_items));
@@ -2915,11 +2915,11 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
return ret;
j = 0;
result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = state_str[ingress];
- result[j++] = state_str[egress];
- result[j++] =
- test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
- hdev->ae_dev->caps) ? state_str[bypass] : "NA";
+ result[j++] = str_on_off(ingress);
+ result[j++] = str_on_off(egress);
+ result[j++] = test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ hdev->ae_dev->caps) ?
+ str_on_off(bypass) : "NA";
hclge_dbg_fill_content(content, sizeof(content),
vlan_filter_items, result,
ARRAY_SIZE(vlan_filter_items));
@@ -2958,19 +2958,19 @@ static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
j = 0;
result[j++] = hclge_dbg_get_func_id_str(str_id, i);
result[j++] = str_pvid;
- result[j++] = state_str[vlan_cfg.accept_tag1];
- result[j++] = state_str[vlan_cfg.accept_tag2];
- result[j++] = state_str[vlan_cfg.accept_untag1];
- result[j++] = state_str[vlan_cfg.accept_untag2];
- result[j++] = state_str[vlan_cfg.insert_tag1];
- result[j++] = state_str[vlan_cfg.insert_tag2];
- result[j++] = state_str[vlan_cfg.shift_tag];
- result[j++] = state_str[vlan_cfg.strip_tag1];
- result[j++] = state_str[vlan_cfg.strip_tag2];
- result[j++] = state_str[vlan_cfg.drop_tag1];
- result[j++] = state_str[vlan_cfg.drop_tag2];
- result[j++] = state_str[vlan_cfg.pri_only1];
- result[j++] = state_str[vlan_cfg.pri_only2];
+ result[j++] = str_on_off(vlan_cfg.accept_tag1);
+ result[j++] = str_on_off(vlan_cfg.accept_tag2);
+ result[j++] = str_on_off(vlan_cfg.accept_untag1);
+ result[j++] = str_on_off(vlan_cfg.accept_untag2);
+ result[j++] = str_on_off(vlan_cfg.insert_tag1);
+ result[j++] = str_on_off(vlan_cfg.insert_tag2);
+ result[j++] = str_on_off(vlan_cfg.shift_tag);
+ result[j++] = str_on_off(vlan_cfg.strip_tag1);
+ result[j++] = str_on_off(vlan_cfg.strip_tag2);
+ result[j++] = str_on_off(vlan_cfg.drop_tag1);
+ result[j++] = str_on_off(vlan_cfg.drop_tag2);
+ result[j++] = str_on_off(vlan_cfg.pri_only1);
+ result[j++] = str_on_off(vlan_cfg.pri_only2);
hclge_dbg_fill_content(content, sizeof(content),
vlan_offload_items, result,
@@ -3007,14 +3007,13 @@ static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
ptp->info.name);
pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
- "yes" : "no");
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags)));
pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
- "yes" : "no");
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_TX_EN,
+ &ptp->flags)));
pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
- "yes" : "no");
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_RX_EN,
+ &ptp->flags)));
last_rx = jiffies_to_msecs(ptp->last_rx);
pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 3f17b3073e50..92f9b8ec76d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -7875,7 +7875,7 @@ static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
if (ret)
return ret;
- return phy_loopback(phydev, true);
+ return phy_loopback(phydev, true, 0);
}
static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
@@ -7883,7 +7883,7 @@ static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
{
int ret;
- ret = phy_loopback(phydev, false);
+ ret = phy_loopback(phydev, false, 0);
if (ret)
return ret;
@@ -8000,7 +8000,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
ret = hclge_tqp_enable(handle, en);
if (ret)
dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
@@ -11200,9 +11200,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
- handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.dcb_ets_active));
dev_info(dev, "MQPRIO %s\n",
- handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.mqprio_active));
dev_info(dev, "Default tx spare buffer size: %u\n",
hdev->tx_spare_buf_size);
@@ -11976,7 +11976,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret) {
dev_err(&hdev->pdev->dev,
"Set vf %d mac spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
@@ -11984,7 +11984,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret)
dev_err(&hdev->pdev->dev,
"Set vf %d vlan spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 80079657afeb..9a456ebf9b7c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -258,7 +258,7 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return;
- phy_loopback(phydev, false);
+ phy_loopback(phydev, false, 0);
phy_start(phydev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 181af419b878..59cc9221185f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -2,6 +2,7 @@
// Copyright (c) 2021 Hisilicon Limited.
#include <linux/skbuff.h>
+#include <linux/string_choices.h>
#include "hclge_main.h"
#include "hnae3.h"
@@ -226,7 +227,7 @@ static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
if (ret)
dev_err(&hdev->pdev->dev,
"failed to %s ptp interrupt, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index a1aa6c1f966e..6812be8dc64f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -640,7 +640,7 @@ static struct platform_driver hns_mdio_driver = {
.driver = {
.name = MDIO_DRV_NAME,
.of_match_table = hns_mdio_match,
- .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match),
+ .acpi_match_table = hns_mdio_acpi_match,
},
};
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 25b8a3556004..417dfa18daae 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2554,17 +2554,12 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
struct mii_bus *bus;
int res;
- mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
+ mii_np = of_get_available_child_by_name(dev->ofdev->dev.of_node, "mdio");
if (!mii_np) {
dev_err(&dev->ofdev->dev, "no mdio definition found.");
return -ENODEV;
}
- if (!of_device_is_available(mii_np)) {
- res = -ENODEV;
- goto put_node;
- }
-
bus = devm_mdiobus_alloc(&dev->ofdev->dev);
if (!bus) {
res = -ENOMEM;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0676fc547b6f..92647e137cf8 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -234,11 +234,17 @@ static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
(*stragglers)--;
}
/* atomic write is safer than writing bit by bit directly */
- for (i = 0; i < stride; i++) {
- cpumask_set_cpu(*cpu, mask);
- *cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
- nr_cpu_ids, false);
+ for_each_online_cpu_wrap(i, *cpu) {
+ if (!stride--) {
+ /* For the next queue we start from the first
+ * unused CPU in this queue
+ */
+ *cpu = i;
+ break;
+ }
+ cpumask_set_cpu(i, mask);
}
+
/* set queue affinity mask */
cpumask_copy(queue->affinity_mask, mask);
rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
@@ -256,7 +262,7 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
int total_queues, stride, stragglers, i;
- unsigned int num_cpu, cpu;
+ unsigned int num_cpu, cpu = 0;
bool is_rx_queue;
int rc = 0;
@@ -274,8 +280,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
stride = max_t(int, num_cpu / total_queues, 1);
/* number of leftover cpu's */
stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
- /* next available cpu to assign irq to */
- cpu = cpumask_next(-1, cpu_online_mask);
for (i = 0; i < total_queues; i++) {
is_rx_queue = false;
@@ -4829,6 +4833,18 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
strscpy(vlcd->name, adapter->netdev->name, len);
}
+static void ibmvnic_print_hex_dump(struct net_device *dev, void *buf,
+ size_t len)
+{
+ unsigned char hex_str[16 * 3];
+
+ for (size_t i = 0; i < len; i += 16) {
+ hex_dump_to_buffer((unsigned char *)buf + i, len - i, 16, 8,
+ hex_str, sizeof(hex_str), false);
+ netdev_dbg(dev, "%s\n", hex_str);
+ }
+}
+
static int send_login(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
@@ -4939,10 +4955,8 @@ static int send_login(struct ibmvnic_adapter *adapter)
vnic_add_client_data(adapter, vlcd);
netdev_dbg(adapter->netdev, "Login Buffer:\n");
- for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(adapter->login_buf))[i]);
- }
+ ibmvnic_print_hex_dump(adapter->netdev, adapter->login_buf,
+ adapter->login_buf_sz);
memset(&crq, 0, sizeof(crq));
crq.login.first = IBMVNIC_CRQ_CMD;
@@ -5319,15 +5333,13 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
- int i;
dma_unmap_single(dev, adapter->ip_offload_tok,
sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
- for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(buf))[i]);
+ ibmvnic_print_hex_dump(adapter->netdev, buf,
+ sizeof(adapter->ip_offload_buf));
netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
@@ -5558,10 +5570,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
- for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(adapter->login_rsp_buf))[i]);
- }
+ ibmvnic_print_hex_dump(netdev, adapter->login_rsp_buf,
+ adapter->login_rsp_buf_sz);
/* Sanity checks */
if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 24ec9a4f1ffa..1640d2f27833 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -264,6 +264,7 @@ config I40EVF
tristate "Intel(R) Ethernet Adaptive Virtual Function support"
select IAVF
depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports virtual functions for Intel XL710,
X710, X722, XXV710, and all devices advertising support for
@@ -336,7 +337,7 @@ config ICE_SWITCHDEV
config ICE_HWTS
bool "Support HW cross-timestamp on platforms with PTM support"
default y
- depends on ICE && X86
+ depends on ICE && X86 && PCIE_PTM
help
Say Y to enable hardware supported cross-timestamping on platforms
with PCIe PTM support. The cross-timestamp is available through
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index d7df2a0ed629..44249dd91bd6 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -331,8 +331,21 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
}
/* replace the entire MTA table */
- for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) {
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ /*
+ * Do not queue up too many posted writes to prevent
+ * increased latency for other devices on the
+ * interconnect. Flush after each 8th posted write,
+ * to keep additional execution time low while still
+ * preventing increased latency.
+ */
+ if (!(i % 8) && i)
+ e1e_flush();
+ }
+ }
e1e_flush();
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index e28f1905a4a0..9f47388eaba5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
#include <net/xdp_sock_drv.h>
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
@@ -529,7 +530,8 @@ static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *des
dma_addr_t dma;
u32 i;
- loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ unrolled_count(PKTS_PER_BATCH)
+ for (i = 0; i < PKTS_PER_BATCH; i++) {
u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]);
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index ef156fad52f2..dd16351a7af8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -6,7 +6,7 @@
#include <linux/types.h>
-/* This value should match the pragma in the loop_unrolled_for
+/* This value should match the pragma in the unrolled_count()
* macro. Why 4? It is strictly empirical. It seems to be a good
* compromise between the advantage of having simultaneous outstanding
* reads to the DMA array that can hide each others latency and the
@@ -14,14 +14,6 @@
*/
#define PKTS_PER_BATCH 4
-#ifdef __clang__
-#define loop_unrolled_for _Pragma("clang loop unroll_count(4)") for
-#elif __GNUC__ >= 8
-#define loop_unrolled_for _Pragma("GCC unroll 4") for
-#else
-#define loop_unrolled_for for
-#endif
-
struct i40e_ring;
struct i40e_vsi;
struct net_device;
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index 356ac9faa5bf..e13720a728ff 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_IAVF) += iavf.o
iavf-y := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
iavf_adv_rss.o iavf_txrx.o iavf_common.o iavf_adminq.o
+
+iavf-$(CONFIG_PTP_1588_CLOCK) += iavf_ptp.o
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 532a0a595fe8..9de3e0ba3731 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -41,6 +41,7 @@
#include "iavf_txrx.h"
#include "iavf_fdir.h"
#include "iavf_adv_rss.h"
+#include "iavf_types.h"
#include <linux/bitmap.h>
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
@@ -82,7 +83,7 @@ struct iavf_vsi {
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
-#define IAVF_RX_DESC(R, i) (&(((union iavf_32byte_rx_desc *)((R)->desc))[i]))
+#define IAVF_RX_DESC(R, i) (&(((struct iavf_rx_desc *)((R)->desc))[i]))
#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))
#define IAVF_TX_CTXTDESC(R, i) \
(&(((struct iavf_tx_context_desc *)((R)->desc))[i]))
@@ -271,6 +272,7 @@ struct iavf_adapter {
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
+ u8 rxdid;
int num_active_queues;
int num_req_queues;
@@ -343,6 +345,17 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW BIT_ULL(39)
#define IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE BIT_ULL(40)
#define IAVF_FLAG_AQ_GET_QOS_CAPS BIT_ULL(41)
+#define IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS BIT_ULL(42)
+#define IAVF_FLAG_AQ_GET_PTP_CAPS BIT_ULL(43)
+#define IAVF_FLAG_AQ_SEND_PTP_CMD BIT_ULL(44)
+
+ /* AQ messages that must be sent after IAVF_FLAG_AQ_GET_CONFIG, in
+ * order to negotiated extended capabilities.
+ */
+#define IAVF_FLAG_AQ_EXTENDED_CAPS \
+ (IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS | \
+ IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS | \
+ IAVF_FLAG_AQ_GET_PTP_CAPS)
/* flags for processing extended capability messages during
* __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires
@@ -354,10 +367,18 @@ struct iavf_adapter {
u64 extended_caps;
#define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0)
#define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1)
+#define IAVF_EXTENDED_CAP_SEND_RXDID BIT_ULL(2)
+#define IAVF_EXTENDED_CAP_RECV_RXDID BIT_ULL(3)
+#define IAVF_EXTENDED_CAP_SEND_PTP BIT_ULL(4)
+#define IAVF_EXTENDED_CAP_RECV_PTP BIT_ULL(5)
#define IAVF_EXTENDED_CAPS \
(IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
- IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+ IAVF_EXTENDED_CAP_RECV_VLAN_V2 | \
+ IAVF_EXTENDED_CAP_SEND_RXDID | \
+ IAVF_EXTENDED_CAP_RECV_RXDID | \
+ IAVF_EXTENDED_CAP_SEND_PTP | \
+ IAVF_EXTENDED_CAP_RECV_PTP)
/* Lock to prevent possible clobbering of
* current_netdev_promisc_flags
@@ -417,12 +438,18 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
#define QOS_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_QOS)
+#define IAVF_RXDID_ALLOWED(a) \
+ ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+#define IAVF_PTP_ALLOWED(a) \
+ ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1))
struct virtchnl_vlan_caps vlan_v2_caps;
+ u64 supp_rxdids;
+ struct iavf_ptp ptp;
u16 msg_enable;
struct iavf_eth_stats current_stats;
struct virtchnl_qos_cap_list *qos_caps;
@@ -555,6 +582,10 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter);
int iavf_get_vf_config(struct iavf_adapter *adapter);
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
+int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter);
+int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter);
+int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter);
+int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter);
void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 74a1e9fe1821..288bb5b2e72e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1808,7 +1808,7 @@ static int iavf_set_rxfh(struct net_device *netdev,
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
- .cap_rss_sym_xor_supported = true,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 6faa62bced3a..6d7ba4d67a19 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2,8 +2,10 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/net/intel/libie/rx.h>
+#include <net/netdev_lock.h>
#include "iavf.h"
+#include "iavf_ptp.h"
#include "iavf_prototype.h"
/* All iavf tracepoints are defined by the include below, which must
* be included exactly once across the whole kernel with
@@ -710,6 +712,47 @@ static void iavf_configure_tx(struct iavf_adapter *adapter)
}
/**
+ * iavf_select_rx_desc_format - Select Rx descriptor format
+ * @adapter: adapter private structure
+ *
+ * Select what Rx descriptor format based on availability and enabled
+ * features.
+ *
+ * Return: the desired RXDID to select for a given Rx queue, as defined by
+ * enum virtchnl_rxdid_format.
+ */
+static u8 iavf_select_rx_desc_format(const struct iavf_adapter *adapter)
+{
+ u64 rxdids = adapter->supp_rxdids;
+
+ /* If we did not negotiate VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, we must
+ * stick with the default value of the legacy 32 byte format.
+ */
+ if (!IAVF_RXDID_ALLOWED(adapter))
+ return VIRTCHNL_RXDID_1_32B_BASE;
+
+ /* Rx timestamping requires the use of flexible NIC descriptors */
+ if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
+ if (rxdids & BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC))
+ return VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
+
+ pci_warn(adapter->pdev,
+ "Unable to negotiate flexible descriptor format\n");
+ }
+
+ /* Warn if the PF does not list support for the default legacy
+ * descriptor format. This shouldn't happen, as this is the format
+ * used if VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is not supported. It is
+ * likely caused by a bug in the PF implementation failing to indicate
+ * support for the format.
+ */
+ if (!(rxdids & VIRTCHNL_RXDID_1_32B_BASE_M))
+ netdev_warn(adapter->netdev, "PF does not list support for default Rx descriptor format\n");
+
+ return VIRTCHNL_RXDID_1_32B_BASE;
+}
+
+/**
* iavf_configure_rx - Configure Receive Unit after Reset
* @adapter: board private structure
*
@@ -719,8 +762,12 @@ static void iavf_configure_rx(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
- for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rxdid = iavf_select_rx_desc_format(adapter);
+
+ for (u32 i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
+ adapter->rx_rings[i].rxdid = adapter->rxdid;
+ }
}
/**
@@ -2075,6 +2122,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
return iavf_send_vf_config_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
return iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS)
+ return iavf_send_vf_supported_rxdids_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_PTP_CAPS)
+ return iavf_send_vf_ptp_caps_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
iavf_disable_queues(adapter);
return 0;
@@ -2239,7 +2290,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
return 0;
}
-
+ if (adapter->aq_required & IAVF_FLAG_AQ_SEND_PTP_CMD) {
+ iavf_virtchnl_send_ptp_cmd(adapter);
+ return IAVF_SUCCESS;
+ }
if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
iavf_request_stats(adapter);
return 0;
@@ -2604,6 +2658,112 @@ err:
}
/**
+ * iavf_init_send_supported_rxdids - part of querying for supported RXDID
+ * formats
+ * @adapter: board private structure
+ *
+ * Function processes send of the request for supported RXDIDs to the PF.
+ * Must clear IAVF_EXTENDED_CAP_RECV_RXDID if the message is not sent, e.g.
+ * due to the PF not negotiating VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC.
+ */
+static void iavf_init_send_supported_rxdids(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ ret = iavf_send_vf_supported_rxdids_msg(adapter);
+ if (ret == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC. In this
+ * case, we did not send the capability exchange message and
+ * do not expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_RXDID;
+}
+
+/**
+ * iavf_init_recv_supported_rxdids - part of querying for supported RXDID
+ * formats
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the supported RXDIDs message from the PF.
+ **/
+static void iavf_init_recv_supported_rxdids(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ memset(&adapter->supp_rxdids, 0, sizeof(adapter->supp_rxdids));
+
+ ret = iavf_get_vf_supported_rxdids(adapter);
+ if (ret)
+ goto err;
+
+ /* We've processed the PF response to the
+ * VIRTCHNL_OP_GET_SUPPORTED_RXDIDS message we sent previously.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
+ return;
+
+err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_RXDID;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
+ * iavf_init_send_ptp_caps - part of querying for extended PTP capabilities
+ * @adapter: board private structure
+ *
+ * Function processes send of the request for 1588 PTP capabilities to the PF.
+ * Must clear IAVF_EXTENDED_CAP_SEND_PTP if the message is not sent, e.g.
+ * due to the PF not negotiating VIRTCHNL_VF_PTP_CAP
+ */
+static void iavf_init_send_ptp_caps(struct iavf_adapter *adapter)
+{
+ if (iavf_send_vf_ptp_caps_msg(adapter) == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_PTP_CAP. In this case, we
+ * did not send the capability exchange message and do not
+ * expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_PTP;
+}
+
+/**
+ * iavf_init_recv_ptp_caps - part of querying for supported PTP capabilities
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the PTP capabilities supported on this VF.
+ **/
+static void iavf_init_recv_ptp_caps(struct iavf_adapter *adapter)
+{
+ memset(&adapter->ptp.hw_caps, 0, sizeof(adapter->ptp.hw_caps));
+
+ if (iavf_get_vf_ptp_caps(adapter))
+ goto err;
+
+ /* We've processed the PF response to the VIRTCHNL_OP_1588_PTP_GET_CAPS
+ * message we sent previously.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
+ return;
+
+err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_PTP;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
* iavf_init_process_extended_caps - Part of driver startup
* @adapter: board private structure
*
@@ -2627,6 +2787,24 @@ static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
return;
}
+ /* Process capability exchange for RXDID formats */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_RXDID) {
+ iavf_init_send_supported_rxdids(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_RXDID) {
+ iavf_init_recv_supported_rxdids(adapter);
+ return;
+ }
+
+ /* Process capability exchange for PTP features */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_PTP) {
+ iavf_init_send_ptp_caps(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_PTP) {
+ iavf_init_recv_ptp_caps(adapter);
+ return;
+ }
+
/* When we reach here, no further extended capabilities exchanges are
* necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
*/
@@ -2718,6 +2896,9 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
if (QOS_ALLOWED(adapter))
adapter->aq_required |= IAVF_FLAG_AQ_GET_QOS_CAPS;
+ /* Setup initial PTP configuration */
+ iavf_ptp_init(adapter);
+
iavf_schedule_finish_config(adapter);
return;
@@ -3143,15 +3324,18 @@ continue_reset:
}
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
- /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
- * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
- * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
- * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
- * been successfully sent and negotiated
- */
- adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
+ /* Certain capabilities require an extended negotiation process using
+ * extra messages that must be processed after getting the VF
+ * configuration. The related checks such as VLAN_V2_ALLOWED() are not
+ * reliable here, since the configuration has not yet been negotiated.
+ *
+ * Always set these flags, since them related VIRTCHNL messages won't
+ * be sent until after VIRTCHNL_OP_GET_VF_RESOURCES.
+ */
+ adapter->aq_required |= IAVF_FLAG_AQ_EXTENDED_CAPS;
+
spin_lock_bh(&adapter->mac_vlan_list_lock);
/* Delete filter for the current MAC address, it could have
@@ -3711,10 +3895,8 @@ exit:
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return 0;
- netdev_lock(netdev);
netif_set_real_num_rx_queues(netdev, total_qps);
netif_set_real_num_tx_queues(netdev, total_qps);
- netdev_unlock(netdev);
return ret;
}
@@ -4379,22 +4561,21 @@ static int iavf_open(struct net_device *netdev)
struct iavf_adapter *adapter = netdev_priv(netdev);
int err;
+ netdev_assert_locked(netdev);
+
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
return -EIO;
}
- netdev_lock(netdev);
while (!mutex_trylock(&adapter->crit_lock)) {
/* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
* is already taken and iavf_open is called from an upper
* device's notifier reacting on NETDEV_REGISTER event.
* We have to leave here to avoid dead lock.
*/
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) {
- netdev_unlock(netdev);
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
return -EBUSY;
- }
usleep_range(500, 1000);
}
@@ -4443,7 +4624,6 @@ static int iavf_open(struct net_device *netdev)
iavf_irq_enable(adapter, true);
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
return 0;
@@ -4456,7 +4636,6 @@ err_setup_tx:
iavf_free_all_tx_resources(adapter);
err_unlock:
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
return err;
}
@@ -4478,12 +4657,12 @@ static int iavf_close(struct net_device *netdev)
u64 aq_to_restore;
int status;
- netdev_lock(netdev);
+ netdev_assert_locked(netdev);
+
mutex_lock(&adapter->crit_lock);
if (adapter->state <= __IAVF_DOWN_PENDING) {
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
return 0;
}
@@ -4536,6 +4715,7 @@ static int iavf_close(struct net_device *netdev)
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
+ netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
adapter->aq_required |= aq_to_restore;
mutex_unlock(&adapter->crit_lock);
@@ -5000,6 +5180,25 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
return iavf_fix_strip_features(adapter, features);
}
+static int iavf_hwstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ *config = adapter->ptp.hwtstamp_config;
+
+ return 0;
+}
+
+static int iavf_hwstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return iavf_ptp_set_ts_config(adapter, config, extack);
+}
+
static int
iavf_verify_shaper(struct net_shaper_binding *binding,
const struct net_shaper *shaper,
@@ -5108,6 +5307,8 @@ static const struct net_device_ops iavf_netdev_ops = {
.ndo_set_features = iavf_set_features,
.ndo_setup_tc = iavf_setup_tc,
.net_shaper_ops = &iavf_shaper_ops,
+ .ndo_hwtstamp_get = iavf_hwstamp_get,
+ .ndo_hwtstamp_set = iavf_hwstamp_set,
};
/**
@@ -5362,6 +5563,10 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the wait queue for indicating virtchannel events */
init_waitqueue_head(&adapter->vc_waitqueue);
+ INIT_LIST_HEAD(&adapter->ptp.aq_cmds);
+ init_waitqueue_head(&adapter->ptp.phc_time_waitqueue);
+ mutex_init(&adapter->ptp.aq_cmd_lock);
+
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Initialization goes on in the work. Do not add more of it below. */
@@ -5518,6 +5723,8 @@ static void iavf_remove(struct pci_dev *pdev)
msleep(50);
}
+ iavf_ptp_release(adapter);
+
iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */
cancel_work_sync(&adapter->reset_task);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.c b/drivers/net/ethernet/intel/iavf/iavf_ptp.c
new file mode 100644
index 000000000000..b4d5eda2e84f
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Intel Corporation. */
+
+#include "iavf.h"
+#include "iavf_ptp.h"
+
+#define iavf_clock_to_adapter(info) \
+ container_of_const(info, struct iavf_adapter, ptp.info)
+
+/**
+ * iavf_ptp_disable_rx_tstamp - Disable timestamping in Rx rings
+ * @adapter: private adapter structure
+ *
+ * Disable timestamp reporting for all Rx rings.
+ */
+static void iavf_ptp_disable_rx_tstamp(struct iavf_adapter *adapter)
+{
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rx_rings[i].flags &= ~IAVF_TXRX_FLAGS_HW_TSTAMP;
+}
+
+/**
+ * iavf_ptp_enable_rx_tstamp - Enable timestamping in Rx rings
+ * @adapter: private adapter structure
+ *
+ * Enable timestamp reporting for all Rx rings.
+ */
+static void iavf_ptp_enable_rx_tstamp(struct iavf_adapter *adapter)
+{
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rx_rings[i].flags |= IAVF_TXRX_FLAGS_HW_TSTAMP;
+}
+
+/**
+ * iavf_ptp_set_timestamp_mode - Set device timestamping mode
+ * @adapter: private adapter structure
+ * @config: pointer to kernel_hwtstamp_config
+ *
+ * Set the timestamping mode requested from the userspace.
+ *
+ * Note: this function always translates Rx timestamp requests for any packet
+ * category into HWTSTAMP_FILTER_ALL.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int iavf_ptp_set_timestamp_mode(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config)
+{
+ /* Reserved for future extensions. */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ return -EOPNOTSUPP;
+ default:
+ return -ERANGE;
+ }
+
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
+ iavf_ptp_disable_rx_tstamp(adapter);
+ return 0;
+ } else if (config->rx_filter > HWTSTAMP_FILTER_NTP_ALL) {
+ return -ERANGE;
+ } else if (!(iavf_ptp_cap_supported(adapter,
+ VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))) {
+ return -EOPNOTSUPP;
+ }
+
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ iavf_ptp_enable_rx_tstamp(adapter);
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_set_ts_config - Set timestamping configuration
+ * @adapter: private adapter structure
+ * @config: pointer to kernel_hwtstamp_config structure
+ * @extack: pointer to netlink_ext_ack structure
+ *
+ * Program the requested timestamping configuration to the device.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = iavf_ptp_set_timestamp_mode(adapter, config);
+ if (err)
+ return err;
+
+ /* Save successful settings for future reference */
+ adapter->ptp.hwtstamp_config = *config;
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_cap_supported - Check if a PTP capability is supported
+ * @adapter: private adapter structure
+ * @cap: the capability bitmask to check
+ *
+ * Return: true if every capability set in cap is also set in the enabled
+ * capabilities reported by the PF, false otherwise.
+ */
+bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap)
+{
+ if (!IAVF_PTP_ALLOWED(adapter))
+ return false;
+
+ /* Only return true if every bit in cap is set in hw_caps.caps */
+ return (adapter->ptp.hw_caps.caps & cap) == cap;
+}
+
+/**
+ * iavf_allocate_ptp_cmd - Allocate a PTP command message structure
+ * @v_opcode: the virtchnl opcode
+ * @msglen: length in bytes of the associated virtchnl structure
+ *
+ * Allocates a PTP command message and pre-fills it with the provided message
+ * length and opcode.
+ *
+ * Return: allocated PTP command.
+ */
+static struct iavf_ptp_aq_cmd *iavf_allocate_ptp_cmd(enum virtchnl_ops v_opcode,
+ u16 msglen)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+
+ cmd = kzalloc(struct_size(cmd, msg, msglen), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->v_opcode = v_opcode;
+ cmd->msglen = msglen;
+
+ return cmd;
+}
+
+/**
+ * iavf_queue_ptp_cmd - Queue PTP command for sending over virtchnl
+ * @adapter: private adapter structure
+ * @cmd: the command structure to send
+ *
+ * Queue the given command structure into the PTP virtchnl command queue tos
+ * end to the PF.
+ */
+static void iavf_queue_ptp_cmd(struct iavf_adapter *adapter,
+ struct iavf_ptp_aq_cmd *cmd)
+{
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ list_add_tail(&cmd->list, &adapter->ptp.aq_cmds);
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+
+ adapter->aq_required |= IAVF_FLAG_AQ_SEND_PTP_CMD;
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+}
+
+/**
+ * iavf_send_phc_read - Send request to read PHC time
+ * @adapter: private adapter structure
+ *
+ * Send a request to obtain the PTP hardware clock time. This allocates the
+ * VIRTCHNL_OP_1588_PTP_GET_TIME message and queues it up to send to
+ * indirectly read the PHC time.
+ *
+ * This function does not wait for the reply from the PF.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+static int iavf_send_phc_read(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+
+ if (!adapter->ptp.clock)
+ return -EOPNOTSUPP;
+
+ cmd = iavf_allocate_ptp_cmd(VIRTCHNL_OP_1588_PTP_GET_TIME,
+ sizeof(struct virtchnl_phc_time));
+ if (!cmd)
+ return -ENOMEM;
+
+ iavf_queue_ptp_cmd(adapter, cmd);
+
+ return 0;
+}
+
+/**
+ * iavf_read_phc_indirect - Indirectly read the PHC time via virtchnl
+ * @adapter: private adapter structure
+ * @ts: storage for the timestamp value
+ * @sts: system timestamp values before and after the read
+ *
+ * Used when the device does not have direct register access to the PHC time.
+ * Indirectly reads the time via the VIRTCHNL_OP_1588_PTP_GET_TIME, and waits
+ * for the reply from the PF.
+ *
+ * Based on some simple measurements using ftrace and phc2sys, this clock
+ * access method has about a ~110 usec latency even when the system is not
+ * under load. In order to achieve acceptable results when using phc2sys with
+ * the indirect clock access method, it is recommended to use more
+ * conservative proportional and integration constants with the P/I servo.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+static int iavf_read_phc_indirect(struct iavf_adapter *adapter,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ long ret;
+ int err;
+
+ adapter->ptp.phc_time_ready = false;
+
+ ptp_read_system_prets(sts);
+
+ err = iavf_send_phc_read(adapter);
+ if (err)
+ return err;
+
+ ret = wait_event_interruptible_timeout(adapter->ptp.phc_time_waitqueue,
+ adapter->ptp.phc_time_ready,
+ HZ);
+
+ ptp_read_system_postts(sts);
+
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ return -EBUSY;
+
+ *ts = ns_to_timespec64(adapter->ptp.cached_phc_time);
+
+ return 0;
+}
+
+static int iavf_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct iavf_adapter *adapter = iavf_clock_to_adapter(info);
+
+ if (!adapter->ptp.clock)
+ return -EOPNOTSUPP;
+
+ return iavf_read_phc_indirect(adapter, ts, sts);
+}
+
+/**
+ * iavf_ptp_cache_phc_time - Cache PHC time for performing timestamp extension
+ * @adapter: private adapter structure
+ *
+ * Periodically cache the PHC time in order to allow for timestamp extension.
+ * This is required because the Tx and Rx timestamps only contain 32bits of
+ * nanoseconds. Timestamp extension allows calculating the corrected 64bit
+ * timestamp. This algorithm relies on the cached time being within ~1 second
+ * of the timestamp.
+ */
+static void iavf_ptp_cache_phc_time(struct iavf_adapter *adapter)
+{
+ if (!time_is_before_jiffies(adapter->ptp.cached_phc_updated + HZ))
+ return;
+
+ /* The response from virtchnl will store the time into
+ * cached_phc_time.
+ */
+ iavf_send_phc_read(adapter);
+}
+
+/**
+ * iavf_ptp_do_aux_work - Perform periodic work required for PTP support
+ * @info: PTP clock info structure
+ *
+ * Handler to take care of periodic work required for PTP operation. This
+ * includes the following tasks:
+ *
+ * 1) updating cached_phc_time
+ *
+ * cached_phc_time is used by the Tx and Rx timestamp flows in order to
+ * perform timestamp extension, by carefully comparing the timestamp
+ * 32bit nanosecond timestamps and determining the corrected 64bit
+ * timestamp value to report to userspace. This algorithm only works if
+ * the cached_phc_time is within ~1 second of the Tx or Rx timestamp
+ * event. This task periodically reads the PHC time and stores it, to
+ * ensure that timestamp extension operates correctly.
+ *
+ * Returns: time in jiffies until the periodic task should be re-scheduled.
+ */
+static long iavf_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct iavf_adapter *adapter = iavf_clock_to_adapter(info);
+
+ iavf_ptp_cache_phc_time(adapter);
+
+ /* Check work about twice a second */
+ return msecs_to_jiffies(500);
+}
+
+/**
+ * iavf_ptp_register_clock - Register a new PTP for userspace
+ * @adapter: private adapter structure
+ *
+ * Allocate and register a new PTP clock device if necessary.
+ *
+ * Return: 0 if success, error otherwise.
+ */
+static int iavf_ptp_register_clock(struct iavf_adapter *adapter)
+{
+ struct ptp_clock_info *ptp_info = &adapter->ptp.info;
+ struct device *dev = &adapter->pdev->dev;
+ struct ptp_clock *clock;
+
+ snprintf(ptp_info->name, sizeof(ptp_info->name), "%s-%s-clk",
+ KBUILD_MODNAME, dev_name(dev));
+ ptp_info->owner = THIS_MODULE;
+ ptp_info->gettimex64 = iavf_ptp_gettimex64;
+ ptp_info->do_aux_work = iavf_ptp_do_aux_work;
+
+ clock = ptp_clock_register(ptp_info, dev);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ adapter->ptp.clock = clock;
+
+ dev_dbg(&adapter->pdev->dev, "PTP clock %s registered\n",
+ adapter->ptp.info.name);
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_init - Initialize PTP support if capability was negotiated
+ * @adapter: private adapter structure
+ *
+ * Initialize PTP functionality, based on the capabilities that the PF has
+ * enabled for this VF.
+ */
+void iavf_ptp_init(struct iavf_adapter *adapter)
+{
+ int err;
+
+ if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC)) {
+ pci_notice(adapter->pdev,
+ "Device does not have PTP clock support\n");
+ return;
+ }
+
+ err = iavf_ptp_register_clock(adapter);
+ if (err) {
+ pci_err(adapter->pdev,
+ "Failed to register PTP clock device (%p)\n",
+ ERR_PTR(err));
+ return;
+ }
+
+ for (int i = 0; i < adapter->num_active_queues; i++) {
+ struct iavf_ring *rx_ring = &adapter->rx_rings[i];
+
+ rx_ring->ptp = &adapter->ptp;
+ }
+
+ ptp_schedule_worker(adapter->ptp.clock, 0);
+}
+
+/**
+ * iavf_ptp_release - Disable PTP support
+ * @adapter: private adapter structure
+ *
+ * Release all PTP resources that were previously initialized.
+ */
+void iavf_ptp_release(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd, *tmp;
+
+ if (!adapter->ptp.clock)
+ return;
+
+ pci_dbg(adapter->pdev, "removing PTP clock %s\n",
+ adapter->ptp.info.name);
+ ptp_clock_unregister(adapter->ptp.clock);
+ adapter->ptp.clock = NULL;
+
+ /* Cancel any remaining uncompleted PTP clock commands */
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &adapter->ptp.aq_cmds, list) {
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+
+ adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ iavf_ptp_disable_rx_tstamp(adapter);
+}
+
+/**
+ * iavf_ptp_process_caps - Handle change in PTP capabilities
+ * @adapter: private adapter structure
+ *
+ * Handle any state changes necessary due to change in PTP capabilities, such
+ * as after a device reset or change in configuration from the PF.
+ */
+void iavf_ptp_process_caps(struct iavf_adapter *adapter)
+{
+ bool phc = iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC);
+
+ /* Check if the device gained or lost necessary access to support the
+ * PTP hardware clock. If so, driver must respond appropriately by
+ * creating or destroying the PTP clock device.
+ */
+ if (adapter->ptp.clock && !phc)
+ iavf_ptp_release(adapter);
+ else if (!adapter->ptp.clock && phc)
+ iavf_ptp_init(adapter);
+
+ /* Check if the device lost access to Rx timestamp incoming packets */
+ if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
+ adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ iavf_ptp_disable_rx_tstamp(adapter);
+ }
+}
+
+/**
+ * iavf_ptp_extend_32b_timestamp - Convert a 32b nanoseconds timestamp to 64b
+ * nanoseconds
+ * @cached_phc_time: recently cached copy of PHC time
+ * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
+ *
+ * Hardware captures timestamps which contain only 32 bits of nominal
+ * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
+ *
+ * Extend the 32bit nanosecond timestamp using the following algorithm and
+ * assumptions:
+ *
+ * 1) have a recently cached copy of the PHC time
+ * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
+ * seconds) before or after the PHC time was captured.
+ * 3) calculate the delta between the cached time and the timestamp
+ * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
+ * captured after the PHC time. In this case, the full timestamp is just
+ * the cached PHC time plus the delta.
+ * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
+ * timestamp was captured *before* the PHC time, i.e. because the PHC
+ * cache was updated after the timestamp was captured by hardware. In this
+ * case, the full timestamp is the cached time minus the inverse delta.
+ *
+ * This algorithm works even if the PHC time was updated after a Tx timestamp
+ * was requested, but before the Tx timestamp event was reported from
+ * hardware.
+ *
+ * This calculation primarily relies on keeping the cached PHC time up to
+ * date. If the timestamp was captured more than 2^31 nanoseconds after the
+ * PHC time, it is possible that the lower 32bits of PHC time have
+ * overflowed more than once, and we might generate an incorrect timestamp.
+ *
+ * This is prevented by (a) periodically updating the cached PHC time once
+ * a second, and (b) discarding any Tx timestamp packet if it has waited for
+ * a timestamp for more than one second.
+ *
+ * Return: extended timestamp (to 64b).
+ */
+u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp)
+{
+ u32 low = lower_32_bits(cached_phc_time);
+ u32 delta = in_tstamp - low;
+ u64 ns;
+
+ /* Do not assume that the in_tstamp is always more recent than the
+ * cached PHC time. If the delta is large, it indicates that the
+ * in_tstamp was taken in the past, and should be converted
+ * forward.
+ */
+ if (delta > S32_MAX)
+ ns = cached_phc_time - (low - in_tstamp);
+ else
+ ns = cached_phc_time + delta;
+
+ return ns;
+}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.h b/drivers/net/ethernet/intel/iavf/iavf_ptp.h
new file mode 100644
index 000000000000..783b8f287cd9
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IAVF_PTP_H_
+#define _IAVF_PTP_H_
+
+#include "iavf_types.h"
+
+/* bit indicating whether a 40bit timestamp is valid */
+#define IAVF_PTP_40B_TSTAMP_VALID BIT(24)
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+void iavf_ptp_init(struct iavf_adapter *adapter);
+void iavf_ptp_release(struct iavf_adapter *adapter);
+void iavf_ptp_process_caps(struct iavf_adapter *adapter);
+bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap);
+void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter);
+int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp);
+#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+static inline void iavf_ptp_init(struct iavf_adapter *adapter) { }
+static inline void iavf_ptp_release(struct iavf_adapter *adapter) { }
+static inline void iavf_ptp_process_caps(struct iavf_adapter *adapter) { }
+static inline bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter,
+ u32 cap)
+{
+ return false;
+}
+
+static inline void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter) { }
+static inline int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ return -1;
+}
+
+static inline u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time,
+ u32 in_tstamp)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+#endif /* _IAVF_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h
index 62212011c807..c5e4d1823886 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_trace.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h
@@ -112,7 +112,7 @@ DECLARE_EVENT_CLASS(
iavf_rx_template,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
@@ -148,7 +148,7 @@ DEFINE_EVENT(
DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq_rx,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 26b424fd6718..422312b8b54a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -8,6 +8,26 @@
#include "iavf.h"
#include "iavf_trace.h"
#include "iavf_prototype.h"
+#include "iavf_ptp.h"
+
+/**
+ * iavf_is_descriptor_done - tests DD bit in Rx descriptor
+ * @qw1: quad word 1 from descriptor to get Descriptor Done field from
+ * @flex: is the descriptor flex or legacy
+ *
+ * This function tests the descriptor done bit in specified descriptor. Because
+ * there are two types of descriptors (legacy and flex) the parameter rx_ring
+ * is used to distinguish.
+ *
+ * Return: true or false based on the state of DD bit in Rx descriptor.
+ */
+static bool iavf_is_descriptor_done(u64 qw1, bool flex)
+{
+ if (flex)
+ return FIELD_GET(IAVF_RXD_FLEX_DD_M, qw1);
+ else
+ return FIELD_GET(IAVF_RXD_LEGACY_DD_M, qw1);
+}
static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
@@ -766,7 +786,7 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(struct iavf_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -845,7 +865,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
.count = rx_ring->count,
};
u16 ntu = rx_ring->next_to_use;
- union iavf_rx_desc *rx_desc;
+ struct iavf_rx_desc *rx_desc;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
@@ -863,7 +883,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(addr);
+ rx_desc->qw0 = cpu_to_le64(addr);
rx_desc++;
ntu++;
@@ -873,7 +893,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
}
/* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.qword1.status_error_len = 0;
+ rx_desc->qw1 = 0;
cleaned_count--;
} while (cleaned_count);
@@ -896,60 +916,43 @@ no_buffers:
}
/**
- * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * iavf_rx_csum - Indicate in skb if hw indicated a good checksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_desc: the receive descriptor
+ * @decoded_pt: decoded ptype information
+ * @csum_bits: decoded Rx descriptor information
**/
-static void iavf_rx_checksum(struct iavf_vsi *vsi,
- struct sk_buff *skb,
- union iavf_rx_desc *rx_desc)
+static void iavf_rx_csum(const struct iavf_vsi *vsi, struct sk_buff *skb,
+ struct libeth_rx_pt decoded_pt,
+ struct libeth_rx_csum csum_bits)
{
- struct libeth_rx_pt decoded;
- u32 rx_error, rx_status;
bool ipv4, ipv6;
- u8 ptype;
- u64 qword;
skb->ip_summed = CHECKSUM_NONE;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
-
- decoded = libie_rx_pt_parse(ptype);
- if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
- return;
-
- rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
- rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
-
/* did the hardware decode the packet and checksum? */
- if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (unlikely(!csum_bits.l3l4p))
return;
- ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
- ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV6;
- if (ipv4 &&
- (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
- BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
- if (ipv6 &&
- rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
- /* don't increment checksum err here, non-fatal err */
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* there was some L4 error, count error and punt packet to the stack */
- if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
- if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
+ if (unlikely(csum_bits.pprs))
return;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -960,52 +963,196 @@ checksum_fail:
}
/**
- * iavf_rx_hash - set the hash value in the skb
+ * iavf_legacy_rx_csum - Indicate in skb if hw indicated a good checksum
+ * @vsi: the VSI we care about
+ * @qw1: quad word 1
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ *
+ * Return: decoded checksum bits.
+ **/
+static struct libeth_rx_csum
+iavf_legacy_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
+ const struct libeth_rx_pt decoded_pt)
+{
+ struct libeth_rx_csum csum_bits = {};
+
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
+ return csum_bits;
+
+ csum_bits.ipe = FIELD_GET(IAVF_RXD_LEGACY_IPE_M, qw1);
+ csum_bits.eipe = FIELD_GET(IAVF_RXD_LEGACY_EIPE_M, qw1);
+ csum_bits.l4e = FIELD_GET(IAVF_RXD_LEGACY_L4E_M, qw1);
+ csum_bits.pprs = FIELD_GET(IAVF_RXD_LEGACY_PPRS_M, qw1);
+ csum_bits.l3l4p = FIELD_GET(IAVF_RXD_LEGACY_L3L4P_M, qw1);
+ csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_LEGACY_IPV6EXADD_M, qw1);
+
+ return csum_bits;
+}
+
+/**
+ * iavf_flex_rx_csum - Indicate in skb if hw indicated a good checksum
+ * @vsi: the VSI we care about
+ * @qw1: quad word 1
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ *
+ * Return: decoded checksum bits.
+ **/
+static struct libeth_rx_csum
+iavf_flex_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
+ const struct libeth_rx_pt decoded_pt)
+{
+ struct libeth_rx_csum csum_bits = {};
+
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
+ return csum_bits;
+
+ csum_bits.ipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_IPE_M, qw1);
+ csum_bits.eipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EIPE_M, qw1);
+ csum_bits.l4e = FIELD_GET(IAVF_RXD_FLEX_XSUM_L4E_M, qw1);
+ csum_bits.eudpe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EUDPE_M, qw1);
+ csum_bits.l3l4p = FIELD_GET(IAVF_RXD_FLEX_L3L4P_M, qw1);
+ csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_FLEX_IPV6EXADD_M, qw1);
+ csum_bits.nat = FIELD_GET(IAVF_RXD_FLEX_NAT_M, qw1);
+
+ return csum_bits;
+}
+
+/**
+ * iavf_legacy_rx_hash - set the hash value in the skb
+ * @ring: descriptor ring
+ * @qw0: quad word 0
+ * @qw1: quad word 1
+ * @skb: skb currently being received and modified
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ **/
+static void iavf_legacy_rx_hash(const struct iavf_ring *ring, __le64 qw0,
+ __le64 qw1, struct sk_buff *skb,
+ const struct libeth_rx_pt decoded_pt)
+{
+ const __le64 rss_mask = cpu_to_le64(IAVF_RXD_LEGACY_FLTSTAT_M);
+ u32 hash;
+
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
+ return;
+
+ if ((qw1 & rss_mask) == rss_mask) {
+ hash = le64_get_bits(qw0, IAVF_RXD_LEGACY_RSS_M);
+ libeth_rx_pt_set_hash(skb, hash, decoded_pt);
+ }
+}
+
+/**
+ * iavf_flex_rx_hash - set the hash value in the skb
* @ring: descriptor ring
- * @rx_desc: specific descriptor
+ * @qw1: quad word 1
* @skb: skb currently being received and modified
- * @rx_ptype: Rx packet type
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
**/
-static void iavf_rx_hash(struct iavf_ring *ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_flex_rx_hash(const struct iavf_ring *ring, __le64 qw1,
+ struct sk_buff *skb,
+ const struct libeth_rx_pt decoded_pt)
{
- struct libeth_rx_pt decoded;
+ bool rss_valid;
u32 hash;
- const __le64 rss_mask =
- cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
- IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- decoded = libie_rx_pt_parse(rx_ptype);
- if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
return;
- if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
- hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- libeth_rx_pt_set_hash(skb, hash, decoded);
+ rss_valid = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_VALID_M);
+ if (rss_valid) {
+ hash = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_HASH_M);
+ libeth_rx_pt_set_hash(skb, hash, decoded_pt);
}
}
/**
+ * iavf_flex_rx_tstamp - Capture Rx timestamp from the descriptor
+ * @rx_ring: descriptor ring
+ * @qw2: quad word 2 of descriptor
+ * @qw3: quad word 3 of descriptor
+ * @skb: skb currently being received
+ *
+ * Read the Rx timestamp value from the descriptor and pass it to the stack.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ */
+static void iavf_flex_rx_tstamp(const struct iavf_ring *rx_ring, __le64 qw2,
+ __le64 qw3, struct sk_buff *skb)
+{
+ u32 tstamp;
+ u64 ns;
+
+ /* Skip processing if timestamps aren't enabled */
+ if (!(rx_ring->flags & IAVF_TXRX_FLAGS_HW_TSTAMP))
+ return;
+
+ /* Check if this Rx descriptor has a valid timestamp */
+ if (!le64_get_bits(qw2, IAVF_PTP_40B_TSTAMP_VALID))
+ return;
+
+ /* the ts_low field only contains the valid bit and sub-nanosecond
+ * precision, so we don't need to extract it.
+ */
+ tstamp = le64_get_bits(qw3, IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M);
+
+ ns = iavf_ptp_extend_32b_timestamp(rx_ring->ptp->cached_phc_time,
+ tstamp);
+
+ *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
+ .hwtstamp = ns_to_ktime(ns),
+ };
+}
+
+/**
* iavf_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @rx_ptype: the packet type decoded by hardware
+ * @ptype: the packet type decoded by hardware
+ * @flex: is the descriptor flex or legacy
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
**/
-static void
-iavf_process_skb_fields(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_process_skb_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc,
+ struct sk_buff *skb, u32 ptype,
+ bool flex)
{
- iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
-
- iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
+ struct libeth_rx_csum csum_bits;
+ struct libeth_rx_pt decoded_pt;
+ __le64 qw0 = rx_desc->qw0;
+ __le64 qw1 = rx_desc->qw1;
+ __le64 qw2 = rx_desc->qw2;
+ __le64 qw3 = rx_desc->qw3;
+
+ decoded_pt = libie_rx_pt_parse(ptype);
+
+ if (flex) {
+ iavf_flex_rx_hash(rx_ring, qw1, skb, decoded_pt);
+ iavf_flex_rx_tstamp(rx_ring, qw2, qw3, skb);
+ csum_bits = iavf_flex_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
+ decoded_pt);
+ } else {
+ iavf_legacy_rx_hash(rx_ring, qw0, qw1, skb, decoded_pt);
+ csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
+ decoded_pt);
+ }
+ iavf_rx_csum(rx_ring->vsi, skb, decoded_pt, csum_bits);
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -1092,8 +1239,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
/**
* iavf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
+ * @fields: Rx descriptor extracted fields
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -1101,8 +1247,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
* that this is in fact a non-EOP buffer.
**/
static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct libeth_rqe_info fields)
{
u32 ntc = rx_ring->next_to_clean + 1;
@@ -1113,8 +1258,7 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
prefetch(IAVF_RX_DESC(rx_ring, ntc));
/* if we are the last buffer then there is nothing else to do */
-#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
- if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
+ if (likely(fields.eop))
return false;
rx_ring->rx_stats.non_eop_descs++;
@@ -1123,6 +1267,109 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
}
/**
+ * iavf_extract_legacy_rx_fields - Extract fields from the Rx descriptor
+ * @rx_ring: rx descriptor ring
+ * @rx_desc: the descriptor to process
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ *
+ * Return: fields extracted from the Rx descriptor.
+ */
+static struct libeth_rqe_info
+iavf_extract_legacy_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc)
+{
+ u64 qw0 = le64_to_cpu(rx_desc->qw0);
+ u64 qw1 = le64_to_cpu(rx_desc->qw1);
+ u64 qw2 = le64_to_cpu(rx_desc->qw2);
+ struct libeth_rqe_info fields;
+ bool l2tag1p, l2tag2p;
+
+ fields.eop = FIELD_GET(IAVF_RXD_LEGACY_EOP_M, qw1);
+ fields.len = FIELD_GET(IAVF_RXD_LEGACY_LENGTH_M, qw1);
+
+ if (!fields.eop)
+ return fields;
+
+ fields.rxe = FIELD_GET(IAVF_RXD_LEGACY_RXE_M, qw1);
+ fields.ptype = FIELD_GET(IAVF_RXD_LEGACY_PTYPE_M, qw1);
+ fields.vlan = 0;
+
+ if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ l2tag1p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1P_M, qw1);
+ if (l2tag1p)
+ fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1_M, qw0);
+ } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ l2tag2p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2P_M, qw2);
+ if (l2tag2p)
+ fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2_M, qw2);
+ }
+
+ return fields;
+}
+
+/**
+ * iavf_extract_flex_rx_fields - Extract fields from the Rx descriptor
+ * @rx_ring: rx descriptor ring
+ * @rx_desc: the descriptor to process
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ *
+ * Return: fields extracted from the Rx descriptor.
+ */
+static struct libeth_rqe_info
+iavf_extract_flex_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc)
+{
+ struct libeth_rqe_info fields = {};
+ u64 qw0 = le64_to_cpu(rx_desc->qw0);
+ u64 qw1 = le64_to_cpu(rx_desc->qw1);
+ u64 qw2 = le64_to_cpu(rx_desc->qw2);
+ bool l2tag1p, l2tag2p;
+
+ fields.eop = FIELD_GET(IAVF_RXD_FLEX_EOP_M, qw1);
+ fields.len = FIELD_GET(IAVF_RXD_FLEX_PKT_LEN_M, qw0);
+
+ if (!fields.eop)
+ return fields;
+
+ fields.rxe = FIELD_GET(IAVF_RXD_FLEX_RXE_M, qw1);
+ fields.ptype = FIELD_GET(IAVF_RXD_FLEX_PTYPE_M, qw0);
+ fields.vlan = 0;
+
+ if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ l2tag1p = FIELD_GET(IAVF_RXD_FLEX_L2TAG1P_M, qw1);
+ if (l2tag1p)
+ fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG1_M, qw1);
+ } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ l2tag2p = FIELD_GET(IAVF_RXD_FLEX_L2TAG2P_M, qw2);
+ if (l2tag2p)
+ fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG2_2_M, qw2);
+ }
+
+ return fields;
+}
+
+static struct libeth_rqe_info
+iavf_extract_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc,
+ bool flex)
+{
+ if (flex)
+ return iavf_extract_flex_rx_fields(rx_ring, rx_desc);
+ else
+ return iavf_extract_legacy_rx_fields(rx_ring, rx_desc);
+}
+
+/**
* iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1136,18 +1383,17 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
**/
static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
{
+ bool flex = rx_ring->rxdid == VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
bool failure = false;
while (likely(total_rx_packets < (unsigned int)budget)) {
+ struct libeth_rqe_info fields;
struct libeth_fqe *rx_buffer;
- union iavf_rx_desc *rx_desc;
- unsigned int size;
- u16 vlan_tag = 0;
- u8 rx_ptype;
- u64 qword;
+ struct iavf_rx_desc *rx_desc;
+ u64 qw1;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
@@ -1158,35 +1404,32 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
- /* status_error_len will always be zero for unused descriptors
- * because it's cleared in cleanup, and overlaps with hdr_addr
- * which is always zero because packet split isn't used, if the
- * hardware wrote DD then the length will be non-zero
- */
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we have
* verified the descriptor has been written back.
*/
dma_rmb();
-#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
- if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
+
+ qw1 = le64_to_cpu(rx_desc->qw1);
+ /* If DD field (descriptor done) is unset then other fields are
+ * not valid
+ */
+ if (!iavf_is_descriptor_done(qw1, flex))
break;
- size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword);
+ fields = iavf_extract_rx_fields(rx_ring, rx_desc, flex);
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean];
- if (!libeth_rx_sync_for_cpu(rx_buffer, size))
+ if (!libeth_rx_sync_for_cpu(rx_buffer, fields.len))
goto skip_data;
/* retrieve a buffer from the ring */
if (skb)
- iavf_add_rx_frag(skb, rx_buffer, size);
+ iavf_add_rx_frag(skb, rx_buffer, fields.len);
else
- skb = iavf_build_skb(rx_buffer, size);
+ skb = iavf_build_skb(rx_buffer, fields.len);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1197,15 +1440,14 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
skip_data:
cleaned_count++;
- if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb))
+ if (iavf_is_non_eop(rx_ring, fields) || unlikely(!skb))
continue;
- /* ERR_MASK will only have valid bits if EOP set, and
- * what we are doing here is actually checking
- * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
- * the error field
+ /* RXE field in descriptor is an indication of the MAC errors
+ * (like CRC, alignment, oversize etc). If it is set then iavf
+ * should finish.
*/
- if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
+ if (unlikely(fields.rxe)) {
dev_kfree_skb_any(skb);
skb = NULL;
continue;
@@ -1219,22 +1461,11 @@ skip_data:
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
-
/* populate checksum, VLAN, and protocol */
- iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
- rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
- vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
- if (rx_desc->wb.qword2.ext_status &
- cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
- rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
- vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);
+ iavf_process_skb_fields(rx_ring, rx_desc, skb, fields.ptype, flex);
iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
- iavf_receive_skb(rx_ring, skb, vlan_tag);
+ iavf_receive_skb(rx_ring, skb, fields.vlan);
skb = NULL;
/* update budget accounting */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index f97c702c0802..79ad554f2d53 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -80,25 +80,6 @@ enum iavf_dyn_idx_t {
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-#define iavf_rx_desc iavf_32byte_rx_desc
-
-/**
- * iavf_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
- * @stat_err_bits: value to mask
- *
- * This function does some fast chicanery in order to return the
- * value of the mask which is really only used for boolean tests.
- * The status_error_len doesn't need to be shifted because it begins
- * at offset zero.
- */
-static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
- const u64 stat_err_bits)
-{
- return !!(rx_desc->wb.qword1.status_error_len &
- cpu_to_le64(stat_err_bits));
-}
-
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IAVF_RX_INCREMENT(r, i) \
do { \
@@ -262,6 +243,8 @@ struct iavf_ring {
u16 next_to_use;
u16 next_to_clean;
+ u16 rxdid; /* Rx descriptor format */
+
u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
#define IAVF_TXR_FLAGS_ARM_WB BIT(1)
@@ -269,6 +252,7 @@ struct iavf_ring {
#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
+#define IAVF_TXRX_FLAGS_HW_TSTAMP BIT(6)
/* stats structs */
struct iavf_queue_stats stats;
@@ -295,6 +279,8 @@ struct iavf_ring {
* for this ring.
*/
+ struct iavf_ptp *ptp;
+
u32 rx_buf_len;
struct net_shaper q_shaper;
bool q_shaper_update;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index f6b09e57abce..f9e1319620f4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -178,110 +178,116 @@ struct iavf_hw {
char err_str[16];
};
-/* RX Descriptors */
-union iavf_16byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fd_id; /* Flow director filter id */
- __le32 fcoe_param; /* FCoE DDP Context id */
- } hi_dword;
- } qword0;
- struct {
- /* ext status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- } wb; /* writeback */
-};
-
-union iavf_32byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- /* bit 0 of hdr_buffer_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fcoe_param; /* FCoE DDP Context id */
- /* Flow director filter id in case of
- * Programming status desc WB
- */
- __le32 fd_id;
- } hi_dword;
- } qword0;
- struct {
- /* status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- struct {
- __le16 ext_status; /* extended status */
- __le16 rsvd;
- __le16 l2tag2_1;
- __le16 l2tag2_2;
- } qword2;
- struct {
- union {
- __le32 flex_bytes_lo;
- __le32 pe_status;
- } lo_dword;
- union {
- __le32 flex_bytes_hi;
- __le32 fd_id;
- } hi_dword;
- } qword3;
- } wb; /* writeback */
-};
-
-enum iavf_rx_desc_status_bits {
- /* Note: These are predefined bit offsets */
- IAVF_RX_DESC_STATUS_DD_SHIFT = 0,
- IAVF_RX_DESC_STATUS_EOF_SHIFT = 1,
- IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
- IAVF_RX_DESC_STATUS_L3L4P_SHIFT = 3,
- IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4,
- IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
- IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- /* Note: Bit 8 is reserved in X710 and XL710 */
- IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
- IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
- IAVF_RX_DESC_STATUS_FLM_SHIFT = 11,
- IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
- IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14,
- IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
- IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- /* Note: For non-tunnel packets INT_UDP_0 is the right status for
- * UDP header
- */
- IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
- IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
-};
-
-#define IAVF_RXD_QW1_STATUS_SHIFT 0
-#define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \
- << IAVF_RXD_QW1_STATUS_SHIFT)
+/**
+ * struct iavf_rx_desc - Receive descriptor (both legacy and flexible)
+ * @qw0: quad word 0 fields:
+ * Legacy: Descriptor Type; Mirror ID; L2TAG1P (S-TAG); Filter Status
+ * Flex: Descriptor Type; Mirror ID; UMBCAST; Packet Type; Flexible Flags
+ * Section 0; Packet Length; Header Length; Split Header Flag;
+ * Flexible Flags section 1 / Extended Status
+ * @qw1: quad word 1 fields:
+ * Legacy: Status Field; Error Field; Packet Type; Packet Length (packet,
+ * header, Split Header Flag)
+ * Flex: Status / Error 0 Field; L2TAG1P (S-TAG); Flexible Metadata
+ * Container #0; Flexible Metadata Container #1
+ * @qw2: quad word 2 fields:
+ * Legacy: Extended Status; 1st L2TAG2P (C-TAG); 2nd L2TAG2P (C-TAG)
+ * Flex: Status / Error 1 Field; Flexible Flags section 2; Timestamp Low;
+ * 1st L2TAG2 (C-TAG); 2nd L2TAG2 (C-TAG)
+ * @qw3: quad word 3 fields:
+ * Legacy: FD Filter ID / Flexible Bytes
+ * Flex: Flexible Metadata Container #2; Flexible Metadata Container #3;
+ * Flexible Metadata Container #4 / Timestamp High 0; Flexible
+ * Metadata Container #5 / Timestamp High 1;
+ */
+struct iavf_rx_desc {
+ aligned_le64 qw0;
+/* The hash signature (RSS) */
+#define IAVF_RXD_LEGACY_RSS_M GENMASK_ULL(63, 32)
+/* Stripped C-TAG VLAN from the receive packet */
+#define IAVF_RXD_LEGACY_L2TAG1_M GENMASK_ULL(33, 16)
+/* Packet type */
+#define IAVF_RXD_FLEX_PTYPE_M GENMASK_ULL(25, 16)
+/* Packet length */
+#define IAVF_RXD_FLEX_PKT_LEN_M GENMASK_ULL(45, 32)
+
+ aligned_le64 qw1;
+/* Descriptor done indication flag. */
+#define IAVF_RXD_LEGACY_DD_M BIT(0)
+/* End of packet. Set to 1 if this descriptor is the last one of the packet */
+#define IAVF_RXD_LEGACY_EOP_M BIT(1)
+/* L2 TAG 1 presence indication */
+#define IAVF_RXD_LEGACY_L2TAG1P_M BIT(2)
+/* Detectable L3 and L4 integrity check is processed by the HW */
+#define IAVF_RXD_LEGACY_L3L4P_M BIT(3)
+/* Set when an IPv6 packet contains a Destination Options Header or a Routing
+ * Header.
+ */
+#define IAVF_RXD_LEGACY_IPV6EXADD_M BIT(15)
+/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */
+#define IAVF_RXD_LEGACY_RXE_M BIT(19)
+/* Checksum reports:
+ * - IPE: IP checksum error
+ * - L4E: L4 integrity error
+ * - EIPE: External IP header (tunneled packets)
+ */
+#define IAVF_RXD_LEGACY_IPE_M BIT(22)
+#define IAVF_RXD_LEGACY_L4E_M BIT(23)
+#define IAVF_RXD_LEGACY_EIPE_M BIT(24)
+/* Set for packets that skip checksum calculation in pre-parser */
+#define IAVF_RXD_LEGACY_PPRS_M BIT(26)
+/* Indicates the content in the Filter Status field */
+#define IAVF_RXD_LEGACY_FLTSTAT_M GENMASK_ULL(13, 12)
+/* Packet type */
+#define IAVF_RXD_LEGACY_PTYPE_M GENMASK_ULL(37, 30)
+/* Packet length */
+#define IAVF_RXD_LEGACY_LENGTH_M GENMASK_ULL(51, 38)
+/* Descriptor done indication flag */
+#define IAVF_RXD_FLEX_DD_M BIT(0)
+/* End of packet. Set to 1 if this descriptor is the last one of the packet */
+#define IAVF_RXD_FLEX_EOP_M BIT(1)
+/* Detectable L3 and L4 integrity check is processed by the HW */
+#define IAVF_RXD_FLEX_L3L4P_M BIT(3)
+/* Checksum reports:
+ * - IPE: IP checksum error
+ * - L4E: L4 integrity error
+ * - EIPE: External IP header (tunneled packets)
+ * - EUDPE: External UDP checksum error (tunneled packets)
+ */
+#define IAVF_RXD_FLEX_XSUM_IPE_M BIT(4)
+#define IAVF_RXD_FLEX_XSUM_L4E_M BIT(5)
+#define IAVF_RXD_FLEX_XSUM_EIPE_M BIT(6)
+#define IAVF_RXD_FLEX_XSUM_EUDPE_M BIT(7)
+/* Set when an IPv6 packet contains a Destination Options Header or a Routing
+ * Header.
+ */
+#define IAVF_RXD_FLEX_IPV6EXADD_M BIT(9)
+/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */
+#define IAVF_RXD_FLEX_RXE_M BIT(10)
+/* Indicates that the RSS/HASH result is valid */
+#define IAVF_RXD_FLEX_RSS_VALID_M BIT(12)
+/* L2 TAG 1 presence indication */
+#define IAVF_RXD_FLEX_L2TAG1P_M BIT(13)
+/* Stripped L2 Tag from the receive packet */
+#define IAVF_RXD_FLEX_L2TAG1_M GENMASK_ULL(31, 16)
+/* The hash signature (RSS) */
+#define IAVF_RXD_FLEX_RSS_HASH_M GENMASK_ULL(63, 32)
+
+ aligned_le64 qw2;
+/* L2 Tag 2 Presence */
+#define IAVF_RXD_LEGACY_L2TAG2P_M BIT(0)
+/* Stripped S-TAG VLAN from the receive packet */
+#define IAVF_RXD_LEGACY_L2TAG2_M GENMASK_ULL(63, 32)
+/* Stripped S-TAG VLAN from the receive packet */
+#define IAVF_RXD_FLEX_L2TAG2_2_M GENMASK_ULL(63, 48)
+/* The packet is a UDP tunneled packet */
+#define IAVF_RXD_FLEX_NAT_M BIT(4)
+/* L2 Tag 2 Presence */
+#define IAVF_RXD_FLEX_L2TAG2P_M BIT(11)
+ aligned_le64 qw3;
+#define IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M GENMASK_ULL(63, 32)
+} __aligned(4 * sizeof(__le64));
+static_assert(sizeof(struct iavf_rx_desc) == 32);
#define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT
#define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
@@ -298,22 +304,6 @@ enum iavf_rx_desc_fltstat_values {
IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3,
};
-#define IAVF_RXD_QW1_ERROR_SHIFT 19
-#define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT)
-
-enum iavf_rx_desc_error_bits {
- /* Note: These are predefined bit offsets */
- IAVF_RX_DESC_ERROR_RXE_SHIFT = 0,
- IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1,
- IAVF_RX_DESC_ERROR_HBO_SHIFT = 2,
- IAVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
- IAVF_RX_DESC_ERROR_IPE_SHIFT = 3,
- IAVF_RX_DESC_ERROR_L4E_SHIFT = 4,
- IAVF_RX_DESC_ERROR_EIPE_SHIFT = 5,
- IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
- IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7
-};
-
enum iavf_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_NONE = 0,
IAVF_RX_DESC_ERROR_L3L4E_PROT = 1,
@@ -322,13 +312,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
};
-#define IAVF_RXD_QW1_PTYPE_SHIFT 30
-#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
-
-#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
-#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
- IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
-
#define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52
#define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
IAVF_RXD_QW1_LENGTH_HBUF_SHIFT)
@@ -347,6 +330,8 @@ enum iavf_rx_desc_ext_status_bits {
IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
};
+#define IAVF_RX_DESC_EXT_STATUS_L2TAG2P_M BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)
+
enum iavf_rx_desc_pe_status_bits {
/* Note: These are predefined bit offsets */
IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_types.h b/drivers/net/ethernet/intel/iavf/iavf_types.h
new file mode 100644
index 000000000000..a095855122bf
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IAVF_TYPES_H_
+#define _IAVF_TYPES_H_
+
+#include "iavf_types.h"
+
+#include <linux/avf/virtchnl.h>
+#include <linux/ptp_clock_kernel.h>
+
+/* structure used to queue PTP commands for processing */
+struct iavf_ptp_aq_cmd {
+ struct list_head list;
+ enum virtchnl_ops v_opcode:16;
+ u16 msglen;
+ u8 msg[] __counted_by(msglen);
+};
+
+struct iavf_ptp {
+ wait_queue_head_t phc_time_waitqueue;
+ struct virtchnl_ptp_caps hw_caps;
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct list_head aq_cmds;
+ u64 cached_phc_time;
+ unsigned long cached_phc_updated;
+ /* Lock protecting access to the AQ command list */
+ struct mutex aq_cmd_lock;
+ struct kernel_hwtstamp_config hwtstamp_config;
+ bool phc_time_ready:1;
+};
+
+#endif /* _IAVF_TYPES_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 15d388b431c5..a6f0e5990be2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -4,6 +4,7 @@
#include <linux/net/intel/libie/rx.h>
#include "iavf.h"
+#include "iavf_ptp.h"
#include "iavf_prototype.h"
/**
@@ -144,9 +145,11 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_ENCAP |
VIRTCHNL_VF_OFFLOAD_TC_U32 |
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+ VIRTCHNL_VF_CAP_PTP |
VIRTCHNL_VF_OFFLOAD_ADQ |
VIRTCHNL_VF_OFFLOAD_USO |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
@@ -177,6 +180,54 @@ int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
NULL, 0);
}
+int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter)
+{
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS;
+
+ if (!IAVF_RXDID_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+ NULL, 0);
+}
+
+/**
+ * iavf_send_vf_ptp_caps_msg - Send request for PTP capabilities
+ * @adapter: private adapter structure
+ *
+ * Send the VIRTCHNL_OP_1588_PTP_GET_CAPS command to the PF to request the PTP
+ * capabilities available to this device. This includes the following
+ * potential access:
+ *
+ * * READ_PHC - access to read the PTP hardware clock time
+ * * RX_TSTAMP - access to request Rx timestamps on all received packets
+ *
+ * The PF will reply with the same opcode a filled out copy of the
+ * virtchnl_ptp_caps structure which defines the specifics of which features
+ * are accessible to this device.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter)
+{
+ struct virtchnl_ptp_caps hw_caps = {
+ .caps = VIRTCHNL_1588_PTP_CAP_READ_PHC |
+ VIRTCHNL_1588_PTP_CAP_RX_TSTAMP
+ };
+
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_PTP_CAPS;
+
+ if (!IAVF_PTP_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_1588_PTP_GET_CAPS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_1588_PTP_GET_CAPS,
+ (u8 *)&hw_caps, sizeof(hw_caps));
+}
+
/**
* iavf_validate_num_queues
* @adapter: adapter structure
@@ -263,6 +314,40 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
return err;
}
+int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter)
+{
+ struct iavf_arq_event_info event;
+ u64 rxdids;
+ int err;
+
+ event.msg_buf = (u8 *)&rxdids;
+ event.buf_len = sizeof(rxdids);
+
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS);
+ if (!err)
+ adapter->supp_rxdids = rxdids;
+
+ return err;
+}
+
+int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter)
+{
+ struct virtchnl_ptp_caps caps = {};
+ struct iavf_arq_event_info event;
+ int err;
+
+ event.msg_buf = (u8 *)&caps;
+ event.buf_len = sizeof(caps);
+
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_1588_PTP_GET_CAPS);
+ if (!err)
+ adapter->ptp.hw_caps = caps;
+
+ return err;
+}
+
/**
* iavf_configure_queues
* @adapter: adapter structure
@@ -275,6 +360,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
int pairs = adapter->num_active_queues;
struct virtchnl_queue_pair_info *vqpi;
u32 i, max_frame;
+ u8 rx_flags = 0;
size_t len;
max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
@@ -292,6 +378,9 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
if (!vqci)
return;
+ if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))
+ rx_flags |= VIRTCHNL_PTP_RX_TSTAMP;
+
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -309,9 +398,12 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = max_frame;
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
+ if (IAVF_RXDID_ALLOWED(adapter))
+ vqpi->rxq.rxdid = adapter->rxdid;
if (CRC_OFFLOAD_ALLOWED(adapter))
vqpi->rxq.crc_disable = !!(adapter->netdev->features &
NETIF_F_RXFCS);
+ vqpi->rxq.flags = rx_flags;
vqpi++;
}
@@ -1402,6 +1494,67 @@ void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
}
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+/**
+ * iavf_virtchnl_send_ptp_cmd - Send one queued PTP command
+ * @adapter: adapter private structure
+ *
+ * De-queue one PTP command request and send the command message to the PF.
+ * Clear IAVF_FLAG_AQ_SEND_PTP_CMD if no more messages are left to send.
+ */
+void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+ int err;
+
+ if (!adapter->ptp.clock) {
+ /* This shouldn't be possible to hit, since no messages should
+ * be queued if PTP is not initialized.
+ */
+ pci_err(adapter->pdev, "PTP is not initialized\n");
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ return;
+ }
+
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ cmd = list_first_entry_or_null(&adapter->ptp.aq_cmds,
+ struct iavf_ptp_aq_cmd, list);
+ if (!cmd) {
+ /* no further PTP messages to send */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ goto out_unlock;
+ }
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ pci_err(adapter->pdev,
+ "Cannot send PTP command %d, command %d pending\n",
+ cmd->v_opcode, adapter->current_op);
+ goto out_unlock;
+ }
+
+ err = iavf_send_pf_msg(adapter, cmd->v_opcode, cmd->msg, cmd->msglen);
+ if (!err) {
+ /* Command was sent without errors, so we can remove it from
+ * the list and discard it.
+ */
+ list_del(&cmd->list);
+ kfree(cmd);
+ } else {
+ /* We failed to send the command, try again next cycle */
+ pci_err(adapter->pdev, "Failed to send PTP command %d\n",
+ cmd->v_opcode);
+ }
+
+ if (list_empty(&adapter->ptp.aq_cmds))
+ /* no further PTP messages to send */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+
+out_unlock:
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+}
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+
/**
* iavf_print_link_message - print link up or down
* @adapter: adapter structure
@@ -2098,6 +2251,37 @@ static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_virtchnl_ptp_get_time - Respond to VIRTCHNL_OP_1588_PTP_GET_TIME
+ * @adapter: private adapter structure
+ * @data: the message from the PF
+ * @len: length of the message from the PF
+ *
+ * Handle the VIRTCHNL_OP_1588_PTP_GET_TIME message from the PF. This message
+ * is sent by the PF in response to the same op as a request from the VF.
+ * Extract the 64bit nanoseconds time from the message and store it in
+ * cached_phc_time. Then, notify any thread that is waiting for the update via
+ * the wait queue.
+ */
+static void iavf_virtchnl_ptp_get_time(struct iavf_adapter *adapter,
+ void *data, u16 len)
+{
+ struct virtchnl_phc_time *msg = data;
+
+ if (len != sizeof(*msg)) {
+ dev_err_once(&adapter->pdev->dev,
+ "Invalid VIRTCHNL_OP_1588_PTP_GET_TIME from PF. Got size %u, expected %zu\n",
+ len, sizeof(*msg));
+ return;
+ }
+
+ adapter->ptp.cached_phc_time = msg->time;
+ adapter->ptp.cached_phc_updated = jiffies;
+ adapter->ptp.phc_time_ready = true;
+
+ wake_up(&adapter->ptp.phc_time_waitqueue);
+}
+
+/**
* iavf_virtchnl_completion
* @adapter: adapter structure
* @v_opcode: opcode sent by PF
@@ -2509,6 +2693,25 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
aq_required;
}
break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ if (msglen != sizeof(u64))
+ return;
+
+ adapter->supp_rxdids = *(u64 *)msg;
+
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ if (msglen != sizeof(adapter->ptp.hw_caps))
+ return;
+
+ adapter->ptp.hw_caps = *(struct virtchnl_ptp_caps *)msg;
+
+ /* process any state change needed due to new capabilities */
+ iavf_ptp_process_caps(adapter);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ iavf_virtchnl_ptp_get_time(adapter, msg, msglen);
+ break;
case VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
iavf_irq_enable(adapter, true);
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index dbdb83567364..fcb199efbea5 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -1205,6 +1205,25 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
return status;
}
+static void ice_set_min_max_msix(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ &val);
+ if (!err)
+ pf->msix.min = val.vu32;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ &val);
+ if (!err)
+ pf->msix.max = val.vu32;
+}
+
/**
* ice_devlink_reinit_up - do reinit of the given PF
* @pf: pointer to the PF struct
@@ -1220,6 +1239,9 @@ static int ice_devlink_reinit_up(struct ice_pf *pf)
return err;
}
+ /* load MSI-X values */
+ ice_set_min_max_msix(pf);
+
err = ice_init_dev(pf);
if (err)
goto unroll_hw_init;
@@ -1533,6 +1555,43 @@ static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id,
return 0;
}
+static int
+ice_devlink_msix_max_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (val.vu32 > pf->hw.func_caps.common_cap.num_msix_vectors)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+ice_devlink_msix_min_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu32 < ICE_MIN_MSIX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ice_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool new_state = val.vbool;
+
+ if (new_state && !test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
enum ice_param_id {
ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
@@ -1548,6 +1607,17 @@ static const struct devlink_param ice_dvl_rdma_params[] = {
ice_devlink_enable_iw_get,
ice_devlink_enable_iw_set,
ice_devlink_enable_iw_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_enable_rdma_validate),
+};
+
+static const struct devlink_param ice_dvl_msix_params[] = {
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_max_pf_validate),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_min_pf_validate),
};
static const struct devlink_param ice_dvl_sched_params[] = {
@@ -1651,6 +1721,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value value;
struct ice_hw *hw = &pf->hw;
int status;
@@ -1659,10 +1730,39 @@ int ice_devlink_register_params(struct ice_pf *pf)
if (status)
return status;
+ status = devl_params_register(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+ if (status)
+ goto unregister_rdma_params;
+
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
status = devl_params_register(devlink, ice_dvl_sched_params,
ARRAY_SIZE(ice_dvl_sched_params));
+ if (status)
+ goto unregister_msix_params;
+
+ value.vu32 = pf->msix.max;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ value);
+ value.vu32 = pf->msix.min;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ value);
+
+ value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ value);
+
+ return 0;
+unregister_msix_params:
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+unregister_rdma_params:
+ devl_params_unregister(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
return status;
}
@@ -1673,6 +1773,8 @@ void ice_devlink_unregister_params(struct ice_pf *pf)
devl_params_unregister(devlink, ice_dvl_rdma_params,
ARRAY_SIZE(ice_dvl_rdma_params));
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
devl_params_unregister(devlink, ice_dvl_sched_params,
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c
index ea40f7941259..19c3d37aa768 100644
--- a/drivers/net/ethernet/intel/ice/devlink/health.c
+++ b/drivers/net/ethernet/intel/ice/devlink/health.c
@@ -25,10 +25,10 @@ struct ice_health_status {
* The below lookup requires to be sorted by code.
*/
-static const char *const ice_common_port_solutions =
+static const char ice_common_port_solutions[] =
"Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.";
-static const char *const ice_port_number_label = "Port Number";
-static const char *const ice_update_nvm_solution = "Update to the latest NVM image.";
+static const char ice_port_number_label[] = "Port Number";
+static const char ice_update_nvm_solution[] = "Update to the latest NVM image.";
static const struct ice_health_status ice_health_status_lookup[] = {
{ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.",
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 71e05d30f0fd..fd083647c14a 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -97,9 +97,6 @@
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 2
-#define ICE_RDMA_NUM_AEQ_MSIX 4
-#define ICE_MIN_RDMA_MSIX 2
-#define ICE_ESWITCH_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -204,6 +201,7 @@ enum ice_feature {
ICE_F_SMA_CTRL,
ICE_F_CGU,
ICE_F_GNSS,
+ ICE_F_GCS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
ICE_F_MBX_LIMIT,
@@ -478,9 +476,6 @@ struct ice_q_vector {
struct ice_ring_container rx;
struct ice_ring_container tx;
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
-
struct ice_channel *ch;
char name[ICE_INT_NAME_STR_LEN];
@@ -542,6 +537,14 @@ struct ice_agg_node {
u8 valid;
};
+struct ice_pf_msix {
+ u32 cur;
+ u32 min;
+ u32 max;
+ u32 total;
+ u32 rest;
+};
+
struct ice_pf {
struct pci_dev *pdev;
struct ice_adapter *adapter;
@@ -556,13 +559,7 @@ struct ice_pf {
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
struct ice_irq_tracker irq_tracker;
- /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
- * number of MSIX vectors needed for all SR-IOV VFs from the number of
- * MSIX vectors allowed on this PF.
- */
- u16 sriov_base_vector;
- unsigned long *sriov_irq_bm; /* bitmap to track irq usage */
- u16 sriov_irq_size; /* size of the irq_bm bitmap */
+ struct ice_virt_irq_tracker virt_irq_tracker;
u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
@@ -612,7 +609,7 @@ struct ice_pf {
struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
- u16 num_lan_msix; /* Total MSIX vectors for base driver */
+ struct ice_pf_msix msix;
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -1047,10 +1044,5 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
-static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw)
-{
- return hw->ptp.phy_model;
-}
-
extern const struct xdp_metadata_ops ice_xdp_md_ops;
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 7cee365cc7d1..2bc5c7f59844 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -511,7 +511,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
struct hlist_head *arfs_fltr_list;
unsigned int i;
- if (!vsi || vsi->type != ICE_VSI_PF)
+ if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
return;
arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
@@ -571,25 +571,6 @@ void ice_clear_arfs(struct ice_vsi *vsi)
}
/**
- * ice_free_cpu_rx_rmap - free setup CPU reverse map
- * @vsi: the VSI to be forwarded to
- */
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
-{
- struct net_device *netdev;
-
- if (!vsi || vsi->type != ICE_VSI_PF)
- return;
-
- netdev = vsi->netdev;
- if (!netdev || !netdev->rx_cpu_rmap)
- return;
-
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
-}
-
-/**
* ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
* @vsi: the VSI to be forwarded to
*/
@@ -597,7 +578,6 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
struct ice_pf *pf;
- int i;
if (!vsi || vsi->type != ICE_VSI_PF)
return 0;
@@ -610,18 +590,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
vsi->type, netdev->name, vsi->num_q_vectors);
- netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
- if (unlikely(!netdev->rx_cpu_rmap))
- return -EINVAL;
-
- ice_for_each_q_vector(vsi, i)
- if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
- vsi->q_vectors[i]->irq.virq)) {
- ice_free_cpu_rx_rmap(vsi);
- return -EINVAL;
- }
-
- return 0;
+ return netif_enable_cpu_rmap(netdev, vsi->num_q_vectors);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
index 9669ad9bf7b5..9706293128c3 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.h
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -45,7 +45,6 @@ int
ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
u16 rxq_idx, u32 flow_id);
void ice_clear_arfs(struct ice_vsi *vsi);
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi);
void ice_init_arfs(struct ice_vsi *vsi);
void ice_sync_arfs_fltrs(struct ice_pf *pf);
int ice_set_cpu_rx_rmap(struct ice_vsi *vsi);
@@ -56,7 +55,6 @@ ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
enum ice_fltr_ptype flow_type);
#else
static inline void ice_clear_arfs(struct ice_vsi *vsi) { }
-static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { }
static inline void ice_init_arfs(struct ice_vsi *vsi) { }
static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { }
static inline void ice_remove_arfs(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index b2af8e3586f7..6db4ad8fc70b 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -147,10 +147,6 @@ skip_alloc:
q_vector->reg_idx = q_vector->irq.index;
q_vector->vf_reg_idx = q_vector->irq.index;
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
-
/* This will not be called in the driver load path because the netdev
* will not be created yet. All other cases with register the NAPI
* handler here (i.e. resume, reset/rebuild, etc.)
@@ -276,7 +272,8 @@ static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
return;
- netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
+ netif_set_xps_queue(ring->netdev,
+ &ring->q_vector->napi.config->affinity_mask,
ring->q_index);
}
@@ -473,9 +470,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
*/
if (vsi->type != ICE_VSI_VF)
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
- else
- ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
- false);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -801,13 +795,11 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
return 0;
err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
- dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
+ dev_info(dev, "Failed to allocate %d q_vectors for VSI %d, new value %d",
+ vsi->num_q_vectors, vsi->vsi_num, v_idx);
+ vsi->num_q_vectors = v_idx;
+ return v_idx ? 0 : err;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 7a2a2e8da8fa..59df31c2c83f 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -186,7 +186,7 @@ static int ice_set_mac_type(struct ice_hw *hw)
* ice_is_generic_mac - check if device's mac_type is generic
* @hw: pointer to the hardware structure
*
- * Return: true if mac_type is generic (with SBQ support), false if not
+ * Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise.
*/
bool ice_is_generic_mac(struct ice_hw *hw)
{
@@ -195,120 +195,6 @@ bool ice_is_generic_mac(struct ice_hw *hw)
}
/**
- * ice_is_e810
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E810 based, false if not.
- */
-bool ice_is_e810(struct ice_hw *hw)
-{
- return hw->mac_type == ICE_MAC_E810;
-}
-
-/**
- * ice_is_e810t
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E810T based, false if not.
- */
-bool ice_is_e810t(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E810C_SFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T:
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T4:
- case ICE_SUBDEV_ID_E810T6:
- case ICE_SUBDEV_ID_E810T7:
- return true;
- }
- break;
- case ICE_DEV_ID_E810C_QSFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T5:
- return true;
- }
- break;
- default:
- break;
- }
-
- return false;
-}
-
-/**
- * ice_is_e822 - Check if a device is E822 family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E822 based, false if not.
- */
-bool ice_is_e822(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E822C_BACKPLANE:
- case ICE_DEV_ID_E822C_QSFP:
- case ICE_DEV_ID_E822C_SFP:
- case ICE_DEV_ID_E822C_10G_BASE_T:
- case ICE_DEV_ID_E822C_SGMII:
- case ICE_DEV_ID_E822L_BACKPLANE:
- case ICE_DEV_ID_E822L_SFP:
- case ICE_DEV_ID_E822L_10G_BASE_T:
- case ICE_DEV_ID_E822L_SGMII:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * ice_is_e823
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E823-L or E823-C based, false if not.
- */
-bool ice_is_e823(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E823L_BACKPLANE:
- case ICE_DEV_ID_E823L_SFP:
- case ICE_DEV_ID_E823L_10G_BASE_T:
- case ICE_DEV_ID_E823L_1GBE:
- case ICE_DEV_ID_E823L_QSFP:
- case ICE_DEV_ID_E823C_BACKPLANE:
- case ICE_DEV_ID_E823C_QSFP:
- case ICE_DEV_ID_E823C_SFP:
- case ICE_DEV_ID_E823C_10G_BASE_T:
- case ICE_DEV_ID_E823C_SGMII:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * ice_is_e825c - Check if a device is E825C family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E825-C based, false if not.
- */
-bool ice_is_e825c(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E825C_BACKPLANE:
- case ICE_DEV_ID_E825C_QSFP:
- case ICE_DEV_ID_E825C_SFP:
- case ICE_DEV_ID_E825C_SGMII:
- return true;
- default:
- return false;
- }
-}
-
-/**
* ice_is_pf_c827 - check if pf contains c827 phy
* @hw: pointer to the hw struct
*
@@ -2271,7 +2157,8 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->nvm_unified_update);
break;
case ICE_AQC_CAPS_RDMA:
- caps->rdma = (number == 1);
+ if (IS_ENABLED(CONFIG_INFINIBAND_IRDMA))
+ caps->rdma = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
break;
case ICE_AQC_CAPS_MAX_MTU:
@@ -2408,7 +2295,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- if (!ice_is_e825c(hw)) {
+ if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) {
info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
} else {
@@ -5765,6 +5652,96 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
}
/**
+ * ice_get_pca9575_handle - find and return the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value.
+ *
+ * Return: 0 on success, -ENXIO when there's no PCA9575 present.
+ */
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+ int err;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -ENXIO;
+
+ /* If handle was not detected read it from the netlist */
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ cmd = &desc.params.get_link_topo;
+ cmd->addr.topo_params.node_type_ctx =
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL;
+ cmd->addr.topo_params.index = idx;
+
+ err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (err)
+ return -ENXIO;
+
+ /* Verify if we found the right IO expander type */
+ if (desc.params.get_link_topo.node_part_num !=
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -ENXIO;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_pca9575_reg - read the register from the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
+/**
* ice_aq_set_gpio
* @hw: pointer to the hw struct
* @gpio_ctrl_handle: GPIO controller node handle
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 15ba38543738..9b00aa0ddf10 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -131,7 +131,6 @@ int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_generic_mac(struct ice_hw *hw);
-bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
@@ -276,10 +275,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
-bool ice_is_e810t(struct ice_hw *hw);
-bool ice_is_e822(struct ice_hw *hw);
-bool ice_is_e823(struct ice_hw *hw);
-bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -306,5 +301,7 @@ int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle);
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 03988be03729..69d5b1a28491 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -2345,14 +2345,14 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
- if (ice_is_e825c(hw))
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
}
- if (!ice_is_e825c(hw))
+ if (hw->mac_type != ICE_MAC_GENERIC_3K_E825)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 8d806d8ad761..bce3ad6ca2a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -95,7 +95,7 @@ ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
if (ret) {
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin freq:%u on pin:%u\n",
+ "err:%d %s failed to set pin freq:%u on pin:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
freq, pin->idx);
@@ -322,7 +322,7 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to enable %s pin:%u\n",
+ "err:%d %s failed to enable %s pin:%u",
ret, ice_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
@@ -367,7 +367,7 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to disable %s pin:%u\n",
+ "err:%d %s failed to disable %s pin:%u",
ret, ice_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
@@ -479,7 +479,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
err:
if (extack)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to update %s pin:%u\n",
+ "err:%d %s failed to update %s pin:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
@@ -518,7 +518,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
(u8)prio);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin prio:%u on pin:%u\n",
+ "err:%d %s failed to set pin prio:%u on pin:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
prio, pin->idx);
@@ -1004,7 +1004,7 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
mutex_unlock(&pf->dplls.lock);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u\n",
+ "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
phase_adjust, p->idx, d->dpll_idx);
@@ -1362,7 +1362,7 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
&p->freq);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n",
+ "err:%d %s failed to set pin state:%u for pin:%u on parent:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
state, p->idx, parent->idx);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index d649c197cf67..ed21d7f55ac1 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -49,9 +49,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (vlan_ops->dis_rx_filtering(uplink_vsi))
goto err_vlan_filtering;
- if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
- goto err_override_uplink;
-
if (ice_vsi_update_local_lb(uplink_vsi, true))
goto err_override_local_lb;
@@ -63,8 +60,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
err_up:
ice_vsi_update_local_lb(uplink_vsi, false);
err_override_local_lb:
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
-err_override_uplink:
vlan_ops->ena_rx_filtering(uplink_vsi);
err_vlan_filtering:
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
@@ -275,7 +270,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
ice_vsi_update_local_lb(uplink_vsi, false);
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
vlan_ops->ena_rx_filtering(uplink_vsi);
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
ICE_FLTR_TX);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index f241493a6ac8..7c2dc347e4e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3788,8 +3788,7 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_txq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq);
}
/**
@@ -3798,8 +3797,7 @@ static int ice_get_max_txq(struct ice_pf *pf)
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_rxq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq);
}
/**
@@ -3817,8 +3815,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
- combined++;
+ combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx);
}
return combined;
@@ -4773,7 +4770,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
- .cap_rss_sym_xor_supported = true,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
.rxfh_per_ctx_key = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index ee9862ddfe15..1d118171de37 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1605,22 +1605,19 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf)
*/
int ice_fdir_create_dflt_rules(struct ice_pf *pf)
{
+ const enum ice_fltr_ptype dflt_rules[] = {
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP,
+ };
int err;
/* Create perfect TCP and UDP rules in hardware. */
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
- if (err)
- return err;
+ for (int i = 0; i < ARRAY_SIZE(dflt_rules); i++) {
+ err = ice_create_init_fdir_rule(pf, dflt_rules[i]);
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
+ if (err)
+ break;
+ }
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index b2148dbe49b2..6b26290452d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -381,32 +381,23 @@ void ice_gnss_exit(struct ice_pf *pf)
}
/**
- * ice_gnss_is_gps_present - Check if GPS HW is present
+ * ice_gnss_is_module_present - Check if GNSS HW is present
* @hw: pointer to HW struct
+ *
+ * Return: true when GNSS is present, false otherwise.
*/
-bool ice_gnss_is_gps_present(struct ice_hw *hw)
+bool ice_gnss_is_module_present(struct ice_hw *hw)
{
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
- return false;
+ int err;
+ u8 data;
- if (!ice_is_gps_in_netlist(hw))
+ if (!hw->func_caps.ts_func_info.src_tmr_owned ||
+ !ice_is_gps_in_netlist(hw))
return false;
-#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
- if (ice_is_e810t(hw)) {
- int err;
- u8 data;
-
- err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
- if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
- return false;
- } else {
- return false;
- }
-#else
- if (!ice_is_e810t(hw))
+ err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
+ if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
return false;
-#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
return true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index 75e567ad7059..15daf603ed7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -37,11 +37,11 @@ struct gnss_serial {
#if IS_ENABLED(CONFIG_GNSS)
void ice_gnss_init(struct ice_pf *pf);
void ice_gnss_exit(struct ice_pf *pf);
-bool ice_gnss_is_gps_present(struct ice_hw *hw);
+bool ice_gnss_is_module_present(struct ice_hw *hw);
#else
static inline void ice_gnss_init(struct ice_pf *pf) { }
static inline void ice_gnss_exit(struct ice_pf *pf) { }
-static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
+static inline bool ice_gnss_is_module_present(struct ice_hw *hw)
{
return false;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index dc88aea9f473..aa4bfbcf85d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -541,10 +541,22 @@
#define PFPM_WUS_MAG_M BIT(1)
#define PFPM_WUS_MNG_M BIT(3)
#define PFPM_WUS_FW_RST_WK_M BIT(31)
+#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020
+#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000
#define E830_PRTMAC_CL01_PS_QNT 0x001E32A0
#define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0)
#define E830_PRTMAC_CL01_QNT_THR 0x001E3320
#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
+#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32))
+#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32))
+#define E830_GLPTM_ART_CTL 0x00088B50
+#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0)
+#define E830_GLPTM_ART_TIME_H 0x00088B54
+#define E830_GLPTM_ART_TIME_L 0x00088B58
+#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4))
+#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4))
+#define E830_PFPTM_SEM 0x00088B00
+#define E830_PFPTM_SEM_BUSY_M BIT(0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 145b27f2a4ce..bab3e81cad5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -228,61 +228,34 @@ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
}
EXPORT_SYMBOL_GPL(ice_get_qos_params);
-/**
- * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver
- * @pf: board private structure to initialize
- */
-static int ice_alloc_rdma_qvectors(struct ice_pf *pf)
+int ice_alloc_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry)
{
- if (ice_is_rdma_ena(pf)) {
- int i;
-
- pf->msix_entries = kcalloc(pf->num_rdma_msix,
- sizeof(*pf->msix_entries),
- GFP_KERNEL);
- if (!pf->msix_entries)
- return -ENOMEM;
+ struct msi_map map = ice_alloc_irq(pf, true);
- /* RDMA is the only user of pf->msix_entries array */
- pf->rdma_base_vector = 0;
-
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msix_entry *entry = &pf->msix_entries[i];
- struct msi_map map;
+ if (map.index < 0)
+ return -ENOMEM;
- map = ice_alloc_irq(pf, false);
- if (map.index < 0)
- break;
+ entry->entry = map.index;
+ entry->vector = map.virq;
- entry->entry = map.index;
- entry->vector = map.virq;
- }
- }
return 0;
}
+EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector);
/**
* ice_free_rdma_qvector - free vector resources reserved for RDMA driver
* @pf: board private structure to initialize
+ * @entry: MSI-X entry to be removed
*/
-static void ice_free_rdma_qvector(struct ice_pf *pf)
+void ice_free_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry)
{
- int i;
-
- if (!pf->msix_entries)
- return;
-
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msi_map map;
+ struct msi_map map;
- map.index = pf->msix_entries[i].entry;
- map.virq = pf->msix_entries[i].vector;
- ice_free_irq(pf, map);
- }
-
- kfree(pf->msix_entries);
- pf->msix_entries = NULL;
+ map.index = entry->entry;
+ map.virq = entry->vector;
+ ice_free_irq(pf, map);
}
+EXPORT_SYMBOL_GPL(ice_free_rdma_qvector);
/**
* ice_adev_release - function to be mapped to AUX dev's release op
@@ -382,12 +355,6 @@ int ice_init_rdma(struct ice_pf *pf)
return -ENOMEM;
}
- /* Reserve vector resources */
- ret = ice_alloc_rdma_qvectors(pf);
- if (ret < 0) {
- dev_err(dev, "failed to reserve vectors for RDMA\n");
- goto err_reserve_rdma_qvector;
- }
pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
ret = ice_plug_aux_dev(pf);
if (ret)
@@ -395,8 +362,6 @@ int ice_init_rdma(struct ice_pf *pf)
return 0;
err_plug_aux_dev:
- ice_free_rdma_qvector(pf);
-err_reserve_rdma_qvector:
pf->adev = NULL;
xa_erase(&ice_aux_id, pf->aux_idx);
return ret;
@@ -412,6 +377,5 @@ void ice_deinit_rdma(struct ice_pf *pf)
return;
ice_unplug_aux_dev(pf);
- ice_free_rdma_qvector(pf);
xa_erase(&ice_aux_id, pf->aux_idx);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
index ad82ff7d1995..30801fd375f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.c
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -20,6 +20,19 @@ ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors,
xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC);
}
+static int
+ice_init_virt_irq_tracker(struct ice_pf *pf, u32 base, u32 num_entries)
+{
+ pf->virt_irq_tracker.bm = bitmap_zalloc(num_entries, GFP_KERNEL);
+ if (!pf->virt_irq_tracker.bm)
+ return -ENOMEM;
+
+ pf->virt_irq_tracker.num_entries = num_entries;
+ pf->virt_irq_tracker.base = base;
+
+ return 0;
+}
+
/**
* ice_deinit_irq_tracker - free xarray tracker
* @pf: board private structure
@@ -29,6 +42,11 @@ static void ice_deinit_irq_tracker(struct ice_pf *pf)
xa_destroy(&pf->irq_tracker.entries);
}
+static void ice_deinit_virt_irq_tracker(struct ice_pf *pf)
+{
+ bitmap_free(pf->virt_irq_tracker.bm);
+}
+
/**
* ice_free_irq_res - free a block of resources
* @pf: board private structure
@@ -45,7 +63,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
/**
* ice_get_irq_res - get an interrupt resource
* @pf: board private structure
- * @dyn_only: force entry to be dynamically allocated
+ * @dyn_allowed: allow entry to be dynamically allocated
*
* Allocate new irq entry in the free slot of the tracker. Since xarray
* is used, always allocate new entry at the lowest possible index. Set
@@ -53,11 +71,12 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
*
* Returns allocated irq entry or NULL on failure.
*/
-static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
+static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
+ bool dyn_allowed)
{
- struct xa_limit limit = { .max = pf->irq_tracker.num_entries,
+ struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1,
.min = 0 };
- unsigned int num_static = pf->irq_tracker.num_static;
+ unsigned int num_static = pf->irq_tracker.num_static - 1;
struct ice_irq_entry *entry;
unsigned int index;
int ret;
@@ -66,9 +85,9 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
if (!entry)
return NULL;
- /* skip preallocated entries if the caller says so */
- if (dyn_only)
- limit.min = num_static;
+ /* only already allocated if the caller says so */
+ if (!dyn_allowed)
+ limit.max = num_static;
ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
GFP_KERNEL);
@@ -78,161 +97,18 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
entry = NULL;
} else {
entry->index = index;
- entry->dynamic = index >= num_static;
+ entry->dynamic = index > num_static;
}
return entry;
}
-/**
- * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
- * @pf: board private structure
- * @v_remain: number of remaining MSI-X vectors to be distributed
- *
- * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
- * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
- * remaining vectors.
- */
-static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
-{
- int v_rdma;
-
- if (!ice_is_rdma_ena(pf)) {
- pf->num_lan_msix = v_remain;
- return;
- }
-
- /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
- v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
- dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
- clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
-
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
- (v_remain - v_rdma < v_rdma)) {
- /* Support minimum RDMA and give remaining vectors to LAN MSIX
- */
- pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
- pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
- } else {
- /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
- */
- pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
- ICE_RDMA_NUM_AEQ_MSIX;
- pf->num_lan_msix = v_remain - pf->num_rdma_msix;
- }
-}
-
-/**
- * ice_ena_msix_range - Request a range of MSIX vectors from the OS
- * @pf: board private structure
- *
- * Compute the number of MSIX vectors wanted and request from the OS. Adjust
- * device usage if there are not enough vectors. Return the number of vectors
- * reserved or negative on failure.
- */
-static int ice_ena_msix_range(struct ice_pf *pf)
+#define ICE_RDMA_AEQ_MSIX 1
+static int ice_get_default_msix_amount(struct ice_pf *pf)
{
- int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
- num_cpus = num_online_cpus();
-
- /* LAN miscellaneous handler */
- v_other = ICE_MIN_LAN_OICR_MSIX;
-
- /* Flow Director */
- if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
- v_other += ICE_FDIR_MSIX;
-
- /* switchdev */
- v_other += ICE_ESWITCH_MSIX;
-
- v_wanted = v_other;
-
- /* LAN traffic */
- pf->num_lan_msix = num_cpus;
- v_wanted += pf->num_lan_msix;
-
- /* RDMA auxiliary driver */
- if (ice_is_rdma_ena(pf)) {
- pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
- v_wanted += pf->num_rdma_msix;
- }
-
- if (v_wanted > hw_num_msix) {
- int v_remain;
-
- dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
- v_wanted, hw_num_msix);
-
- if (hw_num_msix < ICE_MIN_MSIX) {
- err = -ERANGE;
- goto exit_err;
- }
-
- v_remain = hw_num_msix - v_other;
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
- v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
- }
-
- ice_reduce_msix_usage(pf, v_remain);
- v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
-
- dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
-
- /* actually reserve the vectors */
- v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted,
- PCI_IRQ_MSIX);
- if (v_actual < 0) {
- dev_err(dev, "unable to reserve MSI-X vectors\n");
- err = v_actual;
- goto exit_err;
- }
-
- if (v_actual < v_wanted) {
- dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
- v_wanted, v_actual);
-
- if (v_actual < ICE_MIN_MSIX) {
- /* error if we can't get minimum vectors */
- pci_free_irq_vectors(pf->pdev);
- err = -ERANGE;
- goto exit_err;
- } else {
- int v_remain = v_actual - v_other;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
-
- ice_reduce_msix_usage(pf, v_remain);
-
- dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
-
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
- }
-
- return v_actual;
-
-exit_err:
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = 0;
- return err;
+ return ICE_MIN_LAN_OICR_MSIX + num_online_cpus() +
+ (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? ICE_FDIR_MSIX : 0) +
+ (ice_is_rdma_ena(pf) ? num_online_cpus() + ICE_RDMA_AEQ_MSIX : 0);
}
/**
@@ -243,6 +119,7 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
{
pci_free_irq_vectors(pf->pdev);
ice_deinit_irq_tracker(pf);
+ ice_deinit_virt_irq_tracker(pf);
}
/**
@@ -252,27 +129,38 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors, max_vectors;
+ int vectors;
- vectors = ice_ena_msix_range(pf);
+ /* load default PF MSI-X range */
+ if (!pf->msix.min)
+ pf->msix.min = ICE_MIN_MSIX;
- if (vectors < 0)
- return -ENOMEM;
+ if (!pf->msix.max)
+ pf->msix.max = min(total_vectors,
+ ice_get_default_msix_amount(pf));
+
+ pf->msix.total = total_vectors;
+ pf->msix.rest = total_vectors - pf->msix.max;
if (pci_msix_can_alloc_dyn(pf->pdev))
- max_vectors = total_vectors;
+ vectors = pf->msix.min;
else
- max_vectors = vectors;
+ vectors = pf->msix.max;
+
+ vectors = pci_alloc_irq_vectors(pf->pdev, pf->msix.min, vectors,
+ PCI_IRQ_MSIX);
+ if (vectors < 0)
+ return vectors;
- ice_init_irq_tracker(pf, max_vectors, vectors);
+ ice_init_irq_tracker(pf, pf->msix.max, vectors);
- return 0;
+ return ice_init_virt_irq_tracker(pf, pf->msix.max, pf->msix.rest);
}
/**
* ice_alloc_irq - Allocate new interrupt vector
* @pf: board private structure
- * @dyn_only: force dynamic allocation of the interrupt
+ * @dyn_allowed: allow dynamic allocation of the interrupt
*
* Allocate new interrupt vector for a given owner id.
* return struct msi_map with interrupt details and track
@@ -285,27 +173,22 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
* interrupt will be allocated with pci_msix_alloc_irq_at.
*
* Some callers may only support dynamically allocated interrupts.
- * This is indicated with dyn_only flag.
+ * This is indicated with dyn_allowed flag.
*
* On failure, return map with negative .index. The caller
* is expected to check returned map index.
*
*/
-struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only)
+struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed)
{
- int sriov_base_vector = pf->sriov_base_vector;
struct msi_map map = { .index = -ENOENT };
struct device *dev = ice_pf_to_dev(pf);
struct ice_irq_entry *entry;
- entry = ice_get_irq_res(pf, dyn_only);
+ entry = ice_get_irq_res(pf, dyn_allowed);
if (!entry)
return map;
- /* fail if we're about to violate SRIOV vectors space */
- if (sriov_base_vector && entry->index >= sriov_base_vector)
- goto exit_free_res;
-
if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) {
map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL);
if (map.index < 0)
@@ -353,26 +236,40 @@ void ice_free_irq(struct ice_pf *pf, struct msi_map map)
}
/**
- * ice_get_max_used_msix_vector - Get the max used interrupt vector
- * @pf: board private structure
+ * ice_virt_get_irqs - get irqs for SR-IOV usacase
+ * @pf: pointer to PF structure
+ * @needed: number of irqs to get
*
- * Return index of maximum used interrupt vectors with respect to the
- * beginning of the MSIX table. Take into account that some interrupts
- * may have been dynamically allocated after MSIX was initially enabled.
+ * This returns the first MSI-X vector index in PF space that is used by this
+ * VF. This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration_id: id of VF which will
+ * use this irqs
*/
-int ice_get_max_used_msix_vector(struct ice_pf *pf)
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed)
{
- unsigned long start, index, max_idx;
- void *entry;
+ int res = bitmap_find_next_zero_area(pf->virt_irq_tracker.bm,
+ pf->virt_irq_tracker.num_entries,
+ 0, needed, 0);
- /* Treat all preallocated interrupts as used */
- start = pf->irq_tracker.num_static;
- max_idx = start - 1;
+ if (res >= pf->virt_irq_tracker.num_entries)
+ return -ENOENT;
- xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) {
- if (index > max_idx)
- max_idx = index;
- }
+ bitmap_set(pf->virt_irq_tracker.bm, res, needed);
+
+ /* conversion from number in bitmap to global irq index */
+ return res + pf->virt_irq_tracker.base;
+}
- return max_idx;
+/**
+ * ice_virt_free_irqs - free irqs used by the VF
+ * @pf: pointer to PF structure
+ * @index: first index to be free
+ * @irqs: number of irqs to free
+ */
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs)
+{
+ bitmap_clear(pf->virt_irq_tracker.bm, index - pf->virt_irq_tracker.base,
+ irqs);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.h b/drivers/net/ethernet/intel/ice/ice_irq.h
index f35efc08575e..b2f9dbafd57e 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.h
+++ b/drivers/net/ethernet/intel/ice/ice_irq.h
@@ -15,11 +15,22 @@ struct ice_irq_tracker {
u16 num_static; /* preallocated entries */
};
+struct ice_virt_irq_tracker {
+ unsigned long *bm; /* bitmap to track irq usage */
+ u32 num_entries;
+ /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
+ * number of MSIX vectors needed for all SR-IOV VFs from the number of
+ * MSIX vectors allowed on this PF.
+ */
+ u32 base;
+};
+
int ice_init_interrupt_scheme(struct ice_pf *pf);
void ice_clear_interrupt_scheme(struct ice_pf *pf);
struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only);
void ice_free_irq(struct ice_pf *pf, struct msi_map map);
-int ice_get_max_used_msix_vector(struct ice_pf *pf);
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed);
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 1ccb572ce285..22371011c249 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -1001,6 +1001,28 @@ static void ice_lag_link(struct ice_lag *lag)
}
/**
+ * ice_lag_config_eswitch - configure eswitch to work with LAG
+ * @lag: lag info struct
+ * @netdev: active network interface device struct
+ *
+ * Updates all port representors in eswitch to use @netdev for Tx.
+ *
+ * Configures the netdev to keep dst metadata (also used in representor Tx).
+ * This is required for an uplink without switchdev mode configured.
+ */
+static void ice_lag_config_eswitch(struct ice_lag *lag,
+ struct net_device *netdev)
+{
+ struct ice_repr *repr;
+ unsigned long id;
+
+ xa_for_each(&lag->pf->eswitch.reprs, id, repr)
+ repr->dst->u.port_info.lower_dev = netdev;
+
+ netif_keep_dst(netdev);
+}
+
+/**
* ice_lag_unlink - handle unlink event
* @lag: LAG info struct
*/
@@ -1021,6 +1043,9 @@ static void ice_lag_unlink(struct ice_lag *lag)
ice_lag_move_vf_nodes(lag, act_port, pri_port);
lag->primary = false;
lag->active_port = ICE_LAG_INVALID_PORT;
+
+ /* Config primary's eswitch back to normal operation. */
+ ice_lag_config_eswitch(lag, lag->netdev);
} else {
struct ice_lag *primary_lag;
@@ -1419,6 +1444,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
ice_lag_move_vf_nodes(lag, prim_port,
event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
return;
}
@@ -1428,6 +1454,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
/* new active port */
ice_lag_move_vf_nodes(lag, lag->active_port, event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
} else {
/* port not set as currently active (e.g. new active port
* has already claimed the nodes and filters
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 1479b45738af..77ba26538b07 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -229,7 +229,7 @@ struct ice_32b_rx_flex_desc_nic {
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
- __le16 l2tag2_1st;
+ __le16 raw_csum;
__le16 l2tag2_2nd;
/* Qword 3 */
@@ -478,10 +478,15 @@ enum ice_tx_desc_len_fields {
struct ice_tx_ctx_desc {
__le32 tunneling_params;
__le16 l2tag2;
- __le16 rsvd;
+ __le16 gcs;
__le64 qw1;
};
+#define ICE_TX_GCS_DESC_START_M GENMASK(7, 0)
+#define ICE_TX_GCS_DESC_OFFSET_M GENMASK(11, 8)
+#define ICE_TX_GCS_DESC_TYPE_M GENMASK(14, 12)
+#define ICE_TX_GCS_DESC_CSUM_PSH 1
+
#define ICE_TXD_CTX_QW1_CMD_S 4
#define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 38a1c8372180..0bcf9d127ac9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -157,6 +157,16 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
}
}
+static u16 ice_get_rxq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_rxq_count(pf), num_online_cpus());
+}
+
+static u16 ice_get_txq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_txq_count(pf), num_online_cpus());
+}
+
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
@@ -178,9 +188,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
} else {
- vsi->alloc_txq = min3(pf->num_lan_msix,
- ice_get_avail_txq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_txq = ice_get_txq_count(pf);
}
pf->num_lan_tx = vsi->alloc_txq;
@@ -193,17 +201,13 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
} else {
- vsi->alloc_rxq = min3(pf->num_lan_msix,
- ice_get_avail_rxq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_rxq = ice_get_rxq_count(pf);
}
}
pf->num_lan_rx = vsi->alloc_rxq;
- vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
- max_t(int, vsi->alloc_rxq,
- vsi->alloc_txq));
+ vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq);
break;
case ICE_VSI_SF:
vsi->alloc_txq = 1;
@@ -567,6 +571,8 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
return -ENOMEM;
}
+ vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev);
+
switch (vsi->type) {
case ICE_VSI_PF:
case ICE_VSI_SF:
@@ -827,7 +833,13 @@ bool ice_is_safe_mode(struct ice_pf *pf)
*/
bool ice_is_rdma_ena(struct ice_pf *pf)
{
- return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ union devlink_param_value value;
+ int err;
+
+ err = devl_param_driverinit_value_get(priv_to_devlink(pf),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ &value);
+ return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool;
}
/**
@@ -1173,12 +1185,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
static void
ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- struct ice_pf *pf = vsi->back;
u16 qcount, qmap;
u8 offset = 0;
int pow;
- qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
+ qcount = vsi->num_rxq;
pow = order_base_2(qcount);
qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
@@ -1420,6 +1431,10 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->dev = dev;
ring->count = vsi->num_rx_desc;
ring->cached_phctime = pf->ptp.cached_phc_time;
+
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ ring->flags |= ICE_RX_FLAGS_RING_GCS;
+
WRITE_ONCE(vsi->rx_rings[i], ring);
}
@@ -1764,9 +1779,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
* @prio: priority for the RXDID for this queue
* @ena_ts: true to enable timestamp and false to disable timestamp
*/
-void
-ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
- bool ena_ts)
+void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
+ bool ena_ts)
{
int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
@@ -2582,7 +2596,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
- ice_free_cpu_rx_rmap(vsi);
ice_for_each_q_vector(vsi, i) {
int irq_num;
@@ -2595,12 +2608,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
vsi->q_vectors[i]->num_ring_rx))
continue;
- /* clear the affinity notifier in the IRQ descriptor */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
-
- /* clear the affinity_hint in the IRQ descriptor */
- irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
}
@@ -2755,11 +2762,18 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
{
struct net_device *netdev = vsi->netdev;
- int q_idx;
+ int q_idx, v_idx;
if (!netdev)
return;
+ /* Clear the NAPI's interrupt number */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+
+ netif_napi_set_irq(&q_vector->napi, -1);
+ }
+
ice_for_each_txq(vsi, q_idx)
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL);
@@ -3882,15 +3896,17 @@ void ice_init_feature_support(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_CGU);
if (ice_is_clock_mux_in_netlist(&pf->hw))
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
- if (ice_gnss_is_gps_present(&pf->hw))
+ if (ice_gnss_is_module_present(&pf->hw))
ice_set_feature_support(pf, ICE_F_GNSS);
break;
default:
break;
}
- if (pf->hw.mac_type == ICE_MAC_E830)
+ if (pf->hw.mac_type == ICE_MAC_E830) {
ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
+ ice_set_feature_support(pf, ICE_F_GCS);
+ }
}
/**
@@ -3937,24 +3953,6 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
}
/**
- * ice_vsi_ctx_set_allow_override - allow destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
- * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
* ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
* @vsi: pointer to VSI structure
* @set: set or unset the bit
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index eabb35834a24..b4c9cb28a016 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -105,10 +105,6 @@ ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set);
int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c3a0fb97c5ee..049edeb60104 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2528,34 +2528,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
}
/**
- * ice_irq_affinity_notify - Callback for affinity changes
- * @notify: context as to what irq was changed
- * @mask: the new affinity mask
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * so that we may register to receive changes to the irq affinity masks.
- */
-static void
-ice_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct ice_q_vector *q_vector =
- container_of(notify, struct ice_q_vector, affinity_notify);
-
- cpumask_copy(&q_vector->affinity_mask, mask);
-}
-
-/**
- * ice_irq_affinity_release - Callback for affinity notifier release
- * @ref: internal core kernel usage
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * to inform the current notification subscriber that they will no longer
- * receive notifications.
- */
-static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
-
-/**
* ice_vsi_ena_irq - Enable IRQ for the given VSI
* @vsi: the VSI being configured
*/
@@ -2618,19 +2590,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
err);
goto free_q_irqs;
}
-
- /* register for affinity change notifications */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
- struct irq_affinity_notify *affinity_notify;
-
- affinity_notify = &q_vector->affinity_notify;
- affinity_notify->notify = ice_irq_affinity_notify;
- affinity_notify->release = ice_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, affinity_notify);
- }
-
- /* assign the mask for this irq */
- irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
}
err = ice_set_cpu_rx_rmap(vsi);
@@ -2646,9 +2605,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
free_q_irqs:
while (vector--) {
irq_num = vsi->q_vectors[vector]->irq.virq;
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -3304,22 +3260,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (ice_pf_state_is_nominal(pf) &&
- pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
- struct ice_ptp_tx *tx = &pf->ptp.port.tx;
- unsigned long flags;
- u8 idx;
-
- spin_lock_irqsave(&tx->lock, flags);
- idx = find_next_bit_wrap(tx->in_use, tx->len,
- tx->last_ll_ts_idx_read + 1);
- if (idx != tx->len)
- ice_ptp_req_tx_single_tstamp(tx, idx);
- spin_unlock_irqrestore(&tx->lock, flags);
- } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
- set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
- ret = IRQ_WAKE_THREAD;
- }
+
+ ret = ice_ptp_ts_irq(pf);
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3689,6 +3631,15 @@ void ice_set_netdev_features(struct net_device *netdev)
*/
netdev->hw_features |= NETIF_F_RXFCS;
+ /* Allow core to manage IRQs affinity */
+ netif_set_affinity_auto(netdev);
+
+ /* Mutual exclusivity for TSO and GCS is enforced by the set features
+ * ndo callback.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ netdev->hw_features |= NETIF_F_HW_CSUM;
+
netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
}
@@ -4066,8 +4017,7 @@ static void ice_set_pf_caps(struct ice_pf *pf)
}
clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
- if (func_caps->common_cap.ieee_1588 &&
- !(pf->hw.mac_type == ICE_MAC_E830))
+ if (func_caps->common_cap.ieee_1588)
set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
pf->max_pf_txqs = func_caps->common_cap.num_txq;
@@ -5065,16 +5015,16 @@ static int ice_init_devlink(struct ice_pf *pf)
return err;
ice_devlink_init_regions(pf);
- ice_health_init(pf);
ice_devlink_register(pf);
+ ice_health_init(pf);
return 0;
}
static void ice_deinit_devlink(struct ice_pf *pf)
{
- ice_devlink_unregister(pf);
ice_health_deinit(pf);
+ ice_devlink_unregister(pf);
ice_devlink_destroy_regions(pf);
ice_devlink_unregister_params(pf);
}
@@ -5087,6 +5037,12 @@ static int ice_init(struct ice_pf *pf)
if (err)
return err;
+ if (pf->hw.mac_type == ICE_MAC_E830) {
+ err = pci_enable_ptm(pf->pdev, NULL);
+ if (err)
+ dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n");
+ }
+
err = ice_alloc_vsis(pf);
if (err)
goto err_alloc_vsis;
@@ -5186,11 +5142,12 @@ int ice_load(struct ice_pf *pf)
ice_napi_add(vsi);
+ ice_init_features(pf);
+
err = ice_init_rdma(pf);
if (err)
goto err_init_rdma;
- ice_init_features(pf);
ice_service_task_restart(pf);
clear_bit(ICE_DOWN, pf->state);
@@ -5198,6 +5155,7 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
err_tc_indir_block_register:
ice_unregister_netdev(vsi);
@@ -5221,8 +5179,8 @@ void ice_unload(struct ice_pf *pf)
devl_assert_locked(priv_to_devlink(pf));
- ice_deinit_features(pf);
ice_deinit_rdma(pf);
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
ice_unregister_netdev(vsi);
ice_devlink_destroy_pf_port(pf);
@@ -6597,6 +6555,18 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if (changed & NETIF_F_LOOPBACK)
ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
+ /* Due to E830 hardware limitations, TSO (NETIF_F_ALL_TSO) with GCS
+ * (NETIF_F_HW_CSUM) is not supported.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS) &&
+ ((features & NETIF_F_HW_CSUM) && (features & NETIF_F_ALL_TSO))) {
+ if (netdev->features & NETIF_F_HW_CSUM)
+ dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n");
+ else
+ dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n");
+ return -EIO;
+ }
+
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index e26320ce52ca..1fd1ae03eb90 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -298,8 +298,8 @@ void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
* @sts: Optional parameter for holding a pair of system timestamps from
* the system clock. Will be ignored if NULL is given.
*/
-static u64
-ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
{
struct ice_hw *hw = &pf->hw;
u32 hi, lo, lo2;
@@ -310,6 +310,15 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
/* Read the system timestamp pre PHC read */
ptp_read_system_prets(sts);
+ if (hw->mac_type == ICE_MAC_E830) {
+ u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ return clk_time;
+ }
+
lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
/* Read the system timestamp post PHC read */
@@ -972,28 +981,6 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
- * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- * @port: the port this structure tracks
- *
- * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
- * have independent memory blocks for all ports.
- *
- * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
- */
-static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
- u8 port)
-{
- tx->block = port;
- tx->offset = 0;
- tx->len = INDEX_PER_PORT_ETH56G;
- tx->has_ready_bitmap = 1;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
* ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
@@ -1003,9 +990,11 @@ static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
* the timestamp block is shared for all ports in the same quad. To avoid
* ports using the same timestamp index, logically break the block of
* registers into chunks based on the port number.
+ *
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
+ u8 port)
{
tx->block = ICE_GET_QUAD_NUM(port);
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
@@ -1016,24 +1005,27 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
}
/**
- * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * ice_ptp_init_tx - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
+ * each port has its own block of timestamps, independent of the other ports.
*
- * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
- * port has its own block of timestamps, independent of the other ports.
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
- tx->block = pf->hw.port_info->lport;
+ tx->block = port;
tx->offset = 0;
- tx->len = INDEX_PER_PORT_E810;
+ tx->len = INDEX_PER_PORT;
+
/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
* verify new timestamps against cached copy of the last read
* timestamp.
*/
- tx->has_ready_bitmap = 0;
+ tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1318,20 +1310,21 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
struct ice_hw *hw = &pf->hw;
int err;
- if (ice_is_e810(hw))
- return 0;
-
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_stop_phy_timer_eth56g(hw, port, true);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
err = ice_stop_phy_timer_e82x(hw, port, true);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_stop_phy_timer_eth56g(hw, port, true);
+ break;
default:
err = -ENODEV;
}
@@ -1361,19 +1354,17 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
unsigned long flags;
int err;
- if (ice_is_e810(hw))
- return 0;
-
if (!ptp_port->link_up)
return ice_ptp_port_phy_stop(ptp_port);
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_start_phy_timer_eth56g(hw, port);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
/* Start the PHY timer in Vernier mode */
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
@@ -1398,6 +1389,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
0);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_start_phy_timer_eth56g(hw, port);
+ break;
default:
err = -ENODEV;
}
@@ -1432,12 +1426,14 @@ void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
/* Skip HW writes if reset is in progress */
if (pf->hw.reset_ongoing)
return;
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
- /* Do not reconfigure E810 PHY */
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ /* Do not reconfigure E810 or E830 PHY */
return;
- case ICE_PHY_ETH56G:
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_port_phy_restart(ptp_port);
return;
default:
@@ -1465,46 +1461,45 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
ice_ptp_reset_ts_memory(hw);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G: {
- int port;
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ return 0;
+ case ICE_MAC_GENERIC: {
+ int quad;
- for (port = 0; port < hw->ptp.num_lports; port++) {
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
+ quad++) {
int err;
- err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
+ err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
- port, err);
+ dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
+ quad, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E82X: {
- int quad;
+ case ICE_MAC_GENERIC_3K_E825: {
+ int port;
- for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
- quad++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
- err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
+ err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
- quad, err);
+ dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
+ port, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E810:
- return 0;
- case ICE_PHY_UNSUP:
+ case ICE_MAC_UNKNOWN:
default:
- dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
- ice_get_phy_model(hw));
return -EOPNOTSUPP;
}
}
@@ -1740,7 +1735,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
/* 0. Reset mode & out_en in AUX_OUT */
wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
- if (ice_is_e825c(hw)) {
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
int err;
/* Enable/disable CGU 1PPS output for E825C */
@@ -1783,6 +1778,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
8 + chan + (tmr_idx * 4));
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+ ice_flush(hw);
return 0;
}
@@ -1824,7 +1820,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
- period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) {
+ period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
return -EOPNOTSUPP;
}
@@ -1843,9 +1839,10 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
div64_u64_rem(start, period, &phase);
/* If we have only phase or start time is in the past, start the timer
- * at the next multiple of period, maintaining phase.
+ * at the next multiple of period, maintaining phase at least 0.5 second
+ * from now, so we have time to write it to HW.
*/
- clk = ice_ptp_read_src_clk_reg(pf, NULL);
+ clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
start = div64_u64(clk + period - 1, period) * period + phase;
@@ -2078,7 +2075,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* For Vernier mode on E82X, we need to recalibrate after new settime.
* Start with marking timestamps as invalid.
*/
- if (ice_get_phy_model(hw) == ICE_PHY_E82X) {
+ if (hw->mac_type == ICE_MAC_GENERIC) {
err = ice_ptp_clear_phy_offset_ready_e82x(hw);
if (err)
dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
@@ -2102,7 +2099,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_perout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (ice_get_phy_model(hw) == ICE_PHY_E82X)
+ if (hw->mac_type == ICE_MAC_GENERIC)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2180,93 +2177,157 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return 0;
}
+/**
+ * struct ice_crosststamp_cfg - Device cross timestamp configuration
+ * @lock_reg: The hardware semaphore lock to use
+ * @lock_busy: Bit in the semaphore lock indicating the lock is busy
+ * @ctl_reg: The hardware register to request cross timestamp
+ * @ctl_active: Bit in the control register to request cross timestamp
+ * @art_time_l: Lower 32-bits of ART system time
+ * @art_time_h: Upper 32-bits of ART system time
+ * @dev_time_l: Lower 32-bits of device time (per timer index)
+ * @dev_time_h: Upper 32-bits of device time (per timer index)
+ */
+struct ice_crosststamp_cfg {
+ /* HW semaphore lock register */
+ u32 lock_reg;
+ u32 lock_busy;
+
+ /* Capture control register */
+ u32 ctl_reg;
+ u32 ctl_active;
+
+ /* Time storage */
+ u32 art_time_l;
+ u32 art_time_h;
+ u32 dev_time_l[2];
+ u32 dev_time_h[2];
+};
+
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
+ .lock_reg = PFHH_SEM,
+ .lock_busy = PFHH_SEM_BUSY_M,
+ .ctl_reg = GLHH_ART_CTL,
+ .ctl_active = GLHH_ART_CTL_ACTIVE_M,
+ .art_time_l = GLHH_ART_TIME_L,
+ .art_time_h = GLHH_ART_TIME_H,
+ .dev_time_l[0] = GLTSYN_HHTIME_L(0),
+ .dev_time_h[0] = GLTSYN_HHTIME_H(0),
+ .dev_time_l[1] = GLTSYN_HHTIME_L(1),
+ .dev_time_h[1] = GLTSYN_HHTIME_H(1),
+};
+
#ifdef CONFIG_ICE_HWTS
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
+ .lock_reg = E830_PFPTM_SEM,
+ .lock_busy = E830_PFPTM_SEM_BUSY_M,
+ .ctl_reg = E830_GLPTM_ART_CTL,
+ .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
+ .art_time_l = E830_GLPTM_ART_TIME_L,
+ .art_time_h = E830_GLPTM_ART_TIME_H,
+ .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
+ .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
+ .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
+ .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
+};
+
+#endif /* CONFIG_ICE_HWTS */
+/**
+ * struct ice_crosststamp_ctx - Device cross timestamp context
+ * @snapshot: snapshot of system clocks for historic interpolation
+ * @pf: pointer to the PF private structure
+ * @cfg: pointer to hardware configuration for cross timestamp
+ */
+struct ice_crosststamp_ctx {
+ struct system_time_snapshot snapshot;
+ struct ice_pf *pf;
+ const struct ice_crosststamp_cfg *cfg;
+};
+
/**
- * ice_ptp_get_syncdevicetime - Get the cross time stamp info
+ * ice_capture_crosststamp - Capture a device/system cross timestamp
* @device: Current device time
* @system: System counter value read synchronously with device time
- * @ctx: Context provided by timekeeping code
+ * @__ctx: Context passed from ice_ptp_getcrosststamp
*
* Read device and system (ART) clock simultaneously and return the corrected
* clock values in ns.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_get_syncdevicetime(ktime_t *device,
- struct system_counterval_t *system,
- void *ctx)
+static int ice_capture_crosststamp(ktime_t *device,
+ struct system_counterval_t *system,
+ void *__ctx)
{
- struct ice_pf *pf = (struct ice_pf *)ctx;
- struct ice_hw *hw = &pf->hw;
- u32 hh_lock, hh_art_ctl;
- int i;
+ struct ice_crosststamp_ctx *ctx = __ctx;
+ const struct ice_crosststamp_cfg *cfg;
+ u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+ u64 ts;
-#define MAX_HH_HW_LOCK_TRIES 5
-#define MAX_HH_CTL_LOCK_TRIES 100
+ cfg = ctx->cfg;
+ pf = ctx->pf;
+ hw = &pf->hw;
- for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
- /* Get the HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- if (hh_lock & PFHH_SEM_BUSY_M) {
- usleep_range(10000, 15000);
- continue;
- }
- break;
- }
- if (hh_lock & PFHH_SEM_BUSY_M) {
- dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ if (tmr_idx > 1)
+ return -EINVAL;
+
+ /* Poll until we obtain the cross-timestamp hardware semaphore */
+ err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
+ !(lock & cfg->lock_busy),
+ 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
return -EBUSY;
}
+ /* Snapshot system time for historic interpolation */
+ ktime_get_snapshot(&ctx->snapshot);
+
/* Program cmd to master timer */
ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
/* Start the ART and device clock sync sequence */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
- wr32(hw, GLHH_ART_CTL, hh_art_ctl);
-
- for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
- /* Wait for sync to complete */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
- udelay(1);
- continue;
- } else {
- u32 hh_ts_lo, hh_ts_hi, tmr_idx;
- u64 hh_ts;
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- /* Read ART time */
- hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
- hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- system->cycles = hh_ts;
- system->cs_id = CSID_X86_ART;
- /* Read Device source clock time */
- hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
- hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- *device = ns_to_ktime(hh_ts);
- break;
- }
- }
+ ctl = rd32(hw, cfg->ctl_reg);
+ ctl |= cfg->ctl_active;
+ wr32(hw, cfg->ctl_reg, ctl);
+ /* Poll until hardware completes the capture */
+ err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
+ 5, 20 * USEC_PER_MSEC);
+ if (err)
+ goto err_timeout;
+
+ /* Read ART system time */
+ ts_lo = rd32(hw, cfg->art_time_l);
+ ts_hi = rd32(hw, cfg->art_time_h);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ system->cycles = ts;
+ system->cs_id = CSID_X86_ART;
+
+ /* Read Device source clock time */
+ ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
+ ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ *device = ns_to_ktime(ts);
+
+err_timeout:
/* Clear the master timer */
ice_ptp_src_cmd(hw, ICE_PTP_NOP);
/* Release HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
- wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
-
- if (i == MAX_HH_CTL_LOCK_TRIES)
- return -ETIMEDOUT;
+ lock = rd32(hw, cfg->lock_reg);
+ lock &= ~cfg->lock_busy;
+ wr32(hw, cfg->lock_reg, lock);
- return 0;
+ return err;
}
/**
- * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
+ * ice_ptp_getcrosststamp - Capture a device cross timestamp
* @info: the driver's PTP info structure
* @cts: The memory to fill the cross timestamp info
*
@@ -2274,22 +2335,36 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
* clock. Fill the cross timestamp information and report it back to the
* caller.
*
- * This is only valid for E822 and E823 devices which have support for
- * generating the cross timestamp via PCIe PTM.
- *
* In order to correctly correlate the ART timestamp back to the TSC time, the
* CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
- struct system_device_crosststamp *cts)
+static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_crosststamp_ctx ctx = {
+ .pf = pf,
+ };
+
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ ctx.cfg = &ice_crosststamp_cfg_e82x;
+ break;
+#ifdef CONFIG_ICE_HWTS
+ case ICE_MAC_E830:
+ ctx.cfg = &ice_crosststamp_cfg_e830;
+ break;
+#endif /* CONFIG_ICE_HWTS */
+ default:
+ return -EOPNOTSUPP;
+ }
- return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
- pf, NULL, cts);
+ return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
+ &ctx.snapshot, cts);
}
-#endif /* CONFIG_ICE_HWTS */
/**
* ice_ptp_get_ts_config - ioctl interface to read the timestamping config
@@ -2550,13 +2625,9 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
*/
static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
{
-#ifdef CONFIG_ICE_HWTS
- if (boot_cpu_has(X86_FEATURE_ART) &&
- boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
- pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x;
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
-#endif /* CONFIG_ICE_HWTS */
- if (ice_is_e825c(&pf->hw)) {
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
} else {
@@ -2623,6 +2694,28 @@ err:
}
/**
+ * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
+ * @pf: Board private structure
+ *
+ * Assign functions to the PTP capabiltiies structure for E830 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E830
+ * devices.
+ */
+static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
+{
+#ifdef CONFIG_ICE_HWTS
+ if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
+
+#endif /* CONFIG_ICE_HWTS */
+ /* Rest of the config is the same as base E810 */
+ pf->ptp.ice_pin_desc = ice_pin_desc_e810;
+ pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
+ ice_ptp_setup_pin_cfg(pf);
+}
+
+/**
* ice_ptp_set_caps - Set PTP capabilities
* @pf: Board private structure
*/
@@ -2644,10 +2737,20 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->enable = ice_ptp_gpio_enable;
info->verify = ice_verify_pin;
- if (ice_is_e810(&pf->hw))
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_E810:
ice_ptp_set_funcs_e810(pf);
- else
+ return;
+ case ICE_MAC_E830:
+ ice_ptp_set_funcs_e830(pf);
+ return;
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_set_funcs_e82x(pf);
+ return;
+ default:
+ return;
+ }
}
/**
@@ -2758,6 +2861,65 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
}
/**
+ * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
+ * @pf: Board private structure
+ *
+ * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
+ * half of the interrupt and IRQ_HANDLED otherwise.
+ */
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ /* E810 capable of low latency timestamping with interrupt can
+ * request a single timestamp in the top half and wait for
+ * a second LL TS interrupt from the FW when it's ready.
+ */
+ if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
+ struct ice_ptp_tx *tx = &pf->ptp.port.tx;
+ u8 idx;
+
+ if (!ice_pf_state_is_nominal(pf))
+ return IRQ_HANDLED;
+
+ spin_lock(&tx->lock);
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ spin_unlock(&tx->lock);
+
+ return IRQ_HANDLED;
+ }
+ fallthrough; /* non-LL_TS E810 */
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ /* All other devices process timestamps in the bottom half due
+ * to sleeping or polling.
+ */
+ if (!ice_ptp_pf_handles_tx_interrupt(pf))
+ return IRQ_HANDLED;
+
+ set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
+ return IRQ_WAKE_THREAD;
+ case ICE_MAC_E830:
+ /* E830 can read timestamps in the top half using rd32() */
+ if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
+ /* Process outstanding Tx timestamps. If there
+ * is more work, re-arm the interrupt to trigger again.
+ */
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
+ }
+ return IRQ_HANDLED;
+ default:
+ return IRQ_HANDLED;
+ }
+}
+
+/**
* ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
* @pf: Board private structure
*
@@ -2777,7 +2939,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
bool trigger_oicr = false;
unsigned int i;
- if (ice_is_e810(hw))
+ if (!pf->ptp.port.tx.has_ready_bitmap)
return;
if (!ice_pf_src_tmr_owned(pf))
@@ -2912,14 +3074,12 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
*/
ice_ptp_flush_all_tx_tracker(pf);
- if (!ice_is_e810(hw)) {
- /* Enable quad interrupts */
- err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
- if (err)
- return err;
+ /* Enable quad interrupts */
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
+ if (err)
+ return err;
- ice_ptp_restart_all_phy(pf);
- }
+ ice_ptp_restart_all_phy(pf);
/* Re-enable all periodic outputs and external timestamp events */
ice_ptp_enable_all_perout(pf);
@@ -2971,8 +3131,9 @@ err:
static bool ice_is_primary(struct ice_hw *hw)
{
- return ice_is_e825c(hw) && ice_is_dual(hw) ?
- !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true;
+ return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ?
+ !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) :
+ true;
}
static int ice_ptp_setup_adapter(struct ice_pf *pf)
@@ -2990,7 +3151,7 @@ static int ice_ptp_setup_pf(struct ice_pf *pf)
struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
struct ice_ptp *ptp = &pf->ptp;
- if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP)
+ if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
return -ENODEV;
INIT_LIST_HEAD(&ptp->port.list_node);
@@ -3007,7 +3168,7 @@ static void ice_ptp_cleanup_pf(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
- if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) {
+ if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
mutex_lock(&pf->adapter->ports.lock);
list_del(&ptp->port.list_node);
mutex_unlock(&pf->adapter->ports.lock);
@@ -3127,6 +3288,8 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
* ice_ptp_init_port - Initialize PTP port structure
* @pf: Board private structure
* @ptp_port: PTP port structure
+ *
+ * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
*/
static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
{
@@ -3134,16 +3297,14 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
mutex_init(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
- ptp_port->port_num);
- case ICE_PHY_E810:
- return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
+ case ICE_MAC_GENERIC:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
-
return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
default:
@@ -3162,8 +3323,8 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
*/
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
- switch (ice_get_phy_model(&pf->hw)) {
- case ICE_PHY_E82X:
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index a1d0e988c084..3b769a0cad00 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -128,8 +128,7 @@ struct ice_ptp_tx {
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT_E82X 16
-#define INDEX_PER_PORT_E810 64
-#define INDEX_PER_PORT_ETH56G 64
+#define INDEX_PER_PORT 64
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
@@ -304,6 +303,9 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx);
void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx);
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf);
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts);
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx);
@@ -342,6 +344,17 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf)
return true;
}
+static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ return IRQ_HANDLED;
+}
+
+static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
+{
+ return 0;
+}
+
static inline u64
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx)
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index ac46d1183300..003cdfada3ca 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -10,70 +10,25 @@
/* Constants defined for the PTP 1588 clock hardware. */
const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = {
- /* ETH56G_PHY_REG_PTP */
- {
- /* base_addr */
- {
- 0x092000,
- 0x126000,
- 0x1BA000,
- 0x24E000,
- 0x2E2000,
- },
- /* step */
- 0x98,
+ [ETH56G_PHY_REG_PTP] = {
+ .base_addr = 0x092000,
+ .step = 0x98,
},
- /* ETH56G_PHY_MEM_PTP */
- {
- /* base_addr */
- {
- 0x093000,
- 0x127000,
- 0x1BB000,
- 0x24F000,
- 0x2E3000,
- },
- /* step */
- 0x200,
+ [ETH56G_PHY_MEM_PTP] = {
+ .base_addr = 0x093000,
+ .step = 0x200,
},
- /* ETH56G_PHY_REG_XPCS */
- {
- /* base_addr */
- {
- 0x000000,
- 0x009400,
- 0x128000,
- 0x1BC000,
- 0x250000,
- },
- /* step */
- 0x21000,
+ [ETH56G_PHY_REG_XPCS] = {
+ .base_addr = 0x000000,
+ .step = 0x21000,
},
- /* ETH56G_PHY_REG_MAC */
- {
- /* base_addr */
- {
- 0x085000,
- 0x119000,
- 0x1AD000,
- 0x241000,
- 0x2D5000,
- },
- /* step */
- 0x1000,
+ [ETH56G_PHY_REG_MAC] = {
+ .base_addr = 0x085000,
+ .step = 0x1000,
},
- /* ETH56G_PHY_REG_GPCS */
- {
- /* base_addr */
- {
- 0x084000,
- 0x118000,
- 0x1AC000,
- 0x240000,
- 0x2D4000,
- },
- /* step */
- 0x400,
+ [ETH56G_PHY_REG_GPCS] = {
+ .base_addr = 0x084000,
+ .step = 0x400,
},
};
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index ec91822e9280..89bb8461284a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -746,7 +746,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw)
int err;
/* Disable sticky lock detection so lock err reported is accurate */
- if (ice_is_e825c(hw))
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw);
else
err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw);
@@ -756,7 +756,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw)
/* Configure the CGU PLL using the parameters from the function
* capabilities.
*/
- if (ice_is_e825c(hw))
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref,
(enum ice_clk_src)ts_info->clk_src);
else
@@ -827,8 +827,9 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
/* Certain hardware families share the same register values for the
* port register and source timer register.
*/
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
default:
break;
@@ -895,6 +896,17 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
ice_flush(hw);
}
+/**
+ * ice_ptp_cfg_sync_delay - Configure PHC to PHY synchronization delay
+ * @hw: pointer to HW struct
+ * @delay: delay between PHC and PHY SYNC command execution in nanoseconds
+ */
+static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay)
+{
+ wr32(hw, GLTSYN_SYNC_DLAY, delay);
+ ice_flush(hw);
+}
+
/* 56G PHY device functions
*
* The following functions operate on devices with the ETH 56G PHY.
@@ -998,7 +1010,7 @@ static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
/* Lanes 4..7 are in fact 0..3 on a second PHY */
lane %= hw->ptp.ports_per_phy;
- *addr = eth56g_phy_res[res_type].base[0] +
+ *addr = eth56g_phy_res[res_type].base_addr +
lane * eth56g_phy_res[res_type].step + offset;
return 0;
@@ -1228,7 +1240,7 @@ static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
if (port >= hw->ptp.num_lports)
return -EIO;
- addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
return ice_write_phy_eth56g(hw, port, addr, val);
}
@@ -1253,7 +1265,7 @@ static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
if (port >= hw->ptp.num_lports)
return -EIO;
- addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
return ice_read_phy_eth56g(hw, port, addr, val);
}
@@ -1576,9 +1588,8 @@ static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) |
- FIELD_PREP(TS_PHY_LOW_M, lo);
-
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
return 0;
}
@@ -2639,18 +2650,17 @@ static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
}
/**
- * ice_ptp_init_phc_eth56g - Perform E82X specific PHC initialization
+ * ice_ptp_init_phc_e825 - Perform E825 specific PHC initialization
* @hw: pointer to HW struct
*
- * Perform PHC initialization steps specific to E82X devices.
+ * Perform E825-specific PTP hardware clock initialization steps.
*
- * Return:
- * * %0 - success
- * * %other - failed to initialize CGU
+ * Return: 0 on success, negative error code otherwise.
*/
-static int ice_ptp_init_phc_eth56g(struct ice_hw *hw)
+static int ice_ptp_init_phc_e825(struct ice_hw *hw)
{
ice_sb_access_ena_eth56g(hw, true);
+
/* Initialize the Clock Generation Unit */
return ice_init_cgu_e82x(hw);
}
@@ -2729,10 +2739,7 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
struct ice_eth56g_params *params;
- u32 phy_rev;
- int err;
- ptp->phy_model = ICE_PHY_ETH56G;
params = &ptp->phy.eth56g;
params->onestep_ena = false;
params->peer_delay = 0;
@@ -2742,9 +2749,6 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw)
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
ice_sb_access_ena_eth56g(hw, true);
- err = ice_read_phy_eth56g(hw, hw->pf_id, PHY_REG_REVISION, &phy_rev);
- if (err || phy_rev != PHY_REVISION_ETH56G)
- ptp->phy_model = ICE_PHY_UNSUP;
}
/* E822 family functions
@@ -3219,7 +3223,8 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo);
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
return 0;
}
@@ -4792,7 +4797,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold)
*/
static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E82X;
ptp->num_lports = 8;
ptp->ports_per_phy = 8;
}
@@ -4986,7 +4990,8 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
/* For E810 devices, the timestamp is reported with the lower 32 bits
* in the low register, and the upper 8 bits in the high register.
*/
- *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
return 0;
}
@@ -5049,8 +5054,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw)
u8 tmr_idx;
int err;
- /* Ensure synchronization delay is zero */
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
@@ -5316,68 +5320,6 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
*/
/**
- * ice_get_pca9575_handle
- * @hw: pointer to the hw struct
- * @pca9575_handle: GPIO controller's handle
- *
- * Find and return the GPIO controller's handle in the netlist.
- * When found - the value will be cached in the hw structure and following calls
- * will return cached value
- */
-static int
-ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
-{
- struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
- int status;
- u8 idx;
-
- /* If handle was read previously return cached value */
- if (hw->io_expander_handle) {
- *pca9575_handle = hw->io_expander_handle;
- return 0;
- }
-
- /* If handle was not detected read it from the netlist */
- cmd = &desc.params.get_link_topo;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
-
- /* Set node type to GPIO controller */
- cmd->addr.topo_params.node_type_ctx =
- (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
- ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
-
-#define SW_PCA9575_SFP_TOPO_IDX 2
-#define SW_PCA9575_QSFP_TOPO_IDX 1
-
- /* Check if the SW IO expander controlling SMA exists in the netlist. */
- if (hw->device_id == ICE_DEV_ID_E810C_SFP)
- idx = SW_PCA9575_SFP_TOPO_IDX;
- else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
- idx = SW_PCA9575_QSFP_TOPO_IDX;
- else
- return -EOPNOTSUPP;
-
- cmd->addr.topo_params.index = idx;
-
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
- if (status)
- return -EOPNOTSUPP;
-
- /* Verify if we found the right IO expander type */
- if (desc.params.get_link_topo.node_part_num !=
- ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
- return -EOPNOTSUPP;
-
- /* If present save the handle and return it */
- hw->io_expander_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
- *pca9575_handle = hw->io_expander_handle;
-
- return 0;
-}
-
-/**
* ice_read_sma_ctrl
* @hw: pointer to the hw struct
* @data: pointer to data to be read from the GPIO controller
@@ -5442,37 +5384,6 @@ int ice_write_sma_ctrl(struct ice_hw *hw, u8 data)
}
/**
- * ice_read_pca9575_reg
- * @hw: pointer to the hw struct
- * @offset: GPIO controller register offset
- * @data: pointer to data to be read from the GPIO controller
- *
- * Read the register from the GPIO controller
- */
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
-{
- struct ice_aqc_link_topo_addr link_topo;
- __le16 addr;
- u16 handle;
- int err;
-
- memset(&link_topo, 0, sizeof(link_topo));
-
- err = ice_get_pca9575_handle(hw, &handle);
- if (err)
- return err;
-
- link_topo.handle = cpu_to_le16(handle);
- link_topo.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
- ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
-
- addr = cpu_to_le16((u16)offset);
-
- return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
-}
-
-/**
* ice_ptp_read_sdp_ac - read SDP available connections section from NVM
* @hw: pointer to the HW struct
* @entries: returns the SDP available connections section from NVM
@@ -5538,18 +5449,138 @@ exit:
*/
static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E810;
ptp->num_lports = 8;
ptp->ports_per_phy = 4;
init_waitqueue_head(&ptp->phy.e810.atqbal_wq);
}
+/* E830 functions
+ *
+ * The following functions operate on the E830 series devices.
+ *
+ */
+
+/**
+ * ice_ptp_init_phc_e830 - Perform E830 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform E830-specific PTP hardware clock initialization steps.
+ */
+static void ice_ptp_init_phc_e830(const struct ice_hw *hw)
+{
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
+}
+
+/**
+ * ice_ptp_write_direct_incval_e830 - Prep PHY port increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Prepare the PHY port for a new increment value by programming the PHC
+ * GLTSYN_INCVAL_L and GLTSYN_INCVAL_H registers. The actual change is
+ * completed by FW automatically.
+ */
+static void ice_ptp_write_direct_incval_e830(const struct ice_hw *hw,
+ u64 incval)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_INCVAL_L(tmr_idx), lower_32_bits(incval));
+ wr32(hw, GLTSYN_INCVAL_H(tmr_idx), upper_32_bits(incval));
+}
+
+/**
+ * ice_ptp_write_direct_phc_time_e830 - Prepare PHY port with initial time
+ * @hw: Board private structure
+ * @time: Time to initialize the PHY port clock to
+ *
+ * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
+ * initial clock time. The time will not actually be programmed until the
+ * driver issues an ICE_PTP_INIT_TIME command.
+ *
+ * The time value is the upper 32 bits of the PHY timer, usually in units of
+ * nominal nanoseconds.
+ */
+static void ice_ptp_write_direct_phc_time_e830(const struct ice_hw *hw,
+ u64 time)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_TIME_0(tmr_idx), 0);
+ wr32(hw, GLTSYN_TIME_L(tmr_idx), lower_32_bits(time));
+ wr32(hw, GLTSYN_TIME_H(tmr_idx), upper_32_bits(time));
+}
+
+/**
+ * ice_ptp_port_cmd_e830 - Prepare all external PHYs for a timer command
+ * @hw: pointer to HW struct
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the external PHYs connected to this device for a timer sync
+ * command.
+ *
+ * Return: 0 on success, negative error code when PHY write failed
+ */
+static int ice_ptp_port_cmd_e830(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
+
+ return ice_write_phy_reg_e810(hw, E830_ETH_GLTSYN_CMD, val);
+}
+
+/**
+ * ice_read_phy_tstamp_e830 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E830 device.
+ */
+static void ice_read_phy_tstamp_e830(const struct ice_hw *hw, u8 idx,
+ u64 *tstamp)
+{
+ u32 hi, lo;
+
+ hi = rd32(hw, E830_PRTTSYN_TXTIME_H(idx));
+ lo = rd32(hw, E830_PRTTSYN_TXTIME_L(idx));
+
+ /* For E830 devices, the timestamp is reported with the lower 32 bits
+ * in the low register, and the upper 8 bits in the high register.
+ */
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready_e830 - Read Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @tstamp_ready: contents of the Tx memory status register
+ */
+static void ice_get_phy_tx_tstamp_ready_e830(const struct ice_hw *hw, u8 port,
+ u64 *tstamp_ready)
+{
+ *tstamp_ready = rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_H);
+ *tstamp_ready <<= 32;
+ *tstamp_ready |= rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_L);
+}
+
+/**
+ * ice_ptp_init_phy_e830 - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e830(struct ice_ptp_hw *ptp)
+{
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 4;
+}
+
/* Device agnostic functions
*
- * The following functions implement shared behavior common to both E822 and
- * E810 devices, possibly calling a device specific implementation where
- * necessary.
+ * The following functions implement shared behavior common to all devices,
+ * possibly calling a device specific implementation where necessary.
*/
/**
@@ -5612,14 +5643,22 @@ void ice_ptp_init_hw(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
- if (ice_is_e822(hw) || ice_is_e823(hw))
- ice_ptp_init_phy_e82x(ptp);
- else if (ice_is_e810(hw))
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
ice_ptp_init_phy_e810(ptp);
- else if (ice_is_e825c(hw))
+ break;
+ case ICE_MAC_E830:
+ ice_ptp_init_phy_e830(ptp);
+ break;
+ case ICE_MAC_GENERIC:
+ ice_ptp_init_phy_e82x(ptp);
+ break;
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_init_phy_e825(hw);
- else
- ptp->phy_model = ICE_PHY_UNSUP;
+ break;
+ default:
+ return;
+ }
}
/**
@@ -5640,11 +5679,11 @@ void ice_ptp_init_hw(struct ice_hw *hw)
static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
enum ice_ptp_tmr_cmd cmd)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
return ice_ptp_write_port_cmd_e82x(hw, port, cmd);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
default:
return -EOPNOTSUPP;
}
@@ -5705,9 +5744,11 @@ static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
u32 port;
/* PHY models which can program all ports simultaneously */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_port_cmd_e810(hw, cmd);
+ case ICE_MAC_E830:
+ return ice_ptp_port_cmd_e830(hw, cmd);
default:
break;
}
@@ -5778,23 +5819,29 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Source timers */
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_phc_time_e830(hw, time);
+ return 0;
+ }
+
wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_time_eth56g(hw,
- (u32)(time & 0xFFFFFFFF));
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_time_eth56g(hw,
+ (u32)(time & 0xFFFFFFFF));
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5826,20 +5873,26 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_incval_e830(hw, incval);
+ return 0;
+ }
+
/* Shadow Adjust */
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_incval_e82x(hw, incval);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5899,16 +5952,19 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ /* E830 sync PHYs automatically after setting GLTSYN_SHADJ */
+ return 0;
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_adj_e82x(hw, adj);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5932,13 +5988,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_read_phy_tstamp_e830(hw, idx, tstamp);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
default:
return -EOPNOTSUPP;
}
@@ -5962,13 +6021,13 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_clear_phy_tstamp_e82x(hw, block, idx);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
default:
return -EOPNOTSUPP;
}
@@ -6025,14 +6084,14 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- ice_ptp_reset_ts_memory_eth56g(hw);
- break;
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
ice_ptp_reset_ts_memory_e82x(hw);
break;
- case ICE_PHY_E810:
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_ptp_reset_ts_memory_eth56g(hw);
+ break;
+ case ICE_MAC_E810:
default:
return;
}
@@ -6054,13 +6113,16 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_phc_eth56g(hw);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_init_phc_e810(hw);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_ptp_init_phc_e830(hw);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_ptp_init_phc_e82x(hw);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_init_phc_e825(hw);
default:
return -EOPNOTSUPP;
}
@@ -6079,17 +6141,19 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
- tstamp_ready);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_get_phy_tx_tstamp_ready_e830(hw, block, tstamp_ready);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
tstamp_ready);
- break;
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
+ tstamp_ready);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 6779ce120515..e5925ccc2613 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -65,14 +65,14 @@ enum ice_eth56g_link_spd {
/**
* struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters
- * @base: base address for each PHY block
+ * @base_addr: base address for each PHY block
* @step: step between PHY lanes
*
* Characteristic information for the various PHY register parameters in the
* ETH56G devices
*/
struct ice_phy_reg_info_eth56g {
- u32 base[NUM_ETH56G_PHY_RES];
+ u32 base_addr;
u32 step;
};
@@ -324,6 +324,7 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
*/
#define ICE_E810_PLL_FREQ 812500000
#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+#define ICE_E810_E830_SYNC_DELAY 0
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
@@ -395,7 +396,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
/* E810 family functions */
int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl(struct ice_hw *hw, u8 data);
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries);
int ice_cgu_get_num_pins(struct ice_hw *hw, bool input);
enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
@@ -431,13 +431,14 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
*/
static inline u64 ice_get_base_incval(struct ice_hw *hw)
{
- switch (hw->ptp.phy_model) {
- case ICE_PHY_ETH56G:
- return ICE_ETH56G_NOMINAL_INCVAL;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
return ICE_PTP_NOMINAL_INCVAL_E810;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
+ case ICE_MAC_GENERIC_3K_E825:
+ return ICE_ETH56G_NOMINAL_INCVAL;
default:
return 0;
}
@@ -650,18 +651,25 @@ static inline bool ice_is_dual(struct ice_hw *hw)
/* E810 timer command register */
#define E810_ETH_GLTSYN_CMD 0x03000344
+/* E830 timer command register */
+#define E830_ETH_GLTSYN_CMD 0x00088814
+
+/* E810 PHC time register */
+#define E830_GLTSYN_TIME_L(_tmr_idx) (0x0008A000 + 0x1000 * (_tmr_idx))
+
/* Source timer incval macros */
#define INCVAL_HIGH_M 0xFF
-/* Timestamp block macros */
+/* PHY 40b registers macros */
+#define PHY_EXT_40B_LOW_M GENMASK(31, 0)
+#define PHY_EXT_40B_HIGH_M GENMASK_ULL(39, 32)
+#define PHY_40B_LOW_M GENMASK(7, 0)
+#define PHY_40B_HIGH_M GENMASK_ULL(39, 8)
#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
-#define TS_PHY_LOW_M GENMASK(7, 0)
-#define TS_PHY_HIGH_M GENMASK_ULL(39, 8)
-
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4
@@ -772,36 +780,19 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20)
#define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21)
-/* GPCS config register */
-#define PHY_GPCS_CONFIG_REG0 0x268
-#define PHY_GPCS_CONFIG_REG0_TX_THR_M ICE_M(0xF, 24)
-#define PHY_GPCS_BITSLIP 0x5C
-
#define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0)
#define PHY_TS_INT_CONFIG_ENA_M BIT(6)
-/* 1-step PTP config */
-#define PHY_PTP_1STEP_CONFIG 0x270
-#define PHY_PTP_1STEP_T1S_UP64_M ICE_M(0xF, 4)
-#define PHY_PTP_1STEP_T1S_DELTA_M ICE_M(0xF, 8)
-#define PHY_PTP_1STEP_PEER_DELAY(_port) (0x274 + 4 * (_port))
-#define PHY_PTP_1STEP_PD_ADD_PD_M ICE_M(0x1, 0)
-#define PHY_PTP_1STEP_PD_DELAY_M ICE_M(0x3fffffff, 1)
-#define PHY_PTP_1STEP_PD_DLY_V_M ICE_M(0x1, 31)
-
/* Macros to derive offsets for TimeStampLow and TimeStampHigh */
#define PHY_TSTAMP_L(x) (((x) * 8) + 0)
#define PHY_TSTAMP_U(x) (((x) * 8) + 4)
-#define PHY_REG_REVISION 0x85000
-
#define PHY_REG_DESKEW_0 0x94
#define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0)
#define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7)
#define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3
#define PHY_REG_DESKEW_0_VALID GENMASK(10, 10)
-#define PHY_REG_GPCS_BITSLIP 0x5C
#define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset))
#define PHY_REVISION_ETH56G 0x10200
#define PHY_VENDOR_TXLANE_THRESH 0x2000C
@@ -821,7 +812,21 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define PHY_MAC_BLOCKTIME 0x50
#define PHY_MAC_MARKERTIME 0x54
#define PHY_MAC_TX_OFFSET 0x58
+#define PHY_GPCS_BITSLIP 0x5C
#define PHY_PTP_INT_STATUS 0x7FD140
+/* ETH56G registers shared per quad */
+/* GPCS config register */
+#define PHY_GPCS_CONFIG_REG0 0x268
+#define PHY_GPCS_CONFIG_REG0_TX_THR_M GENMASK(27, 24)
+/* 1-step PTP config */
+#define PHY_PTP_1STEP_CONFIG 0x270
+#define PHY_PTP_1STEP_T1S_UP64_M GENMASK(7, 4)
+#define PHY_PTP_1STEP_T1S_DELTA_M GENMASK(11, 8)
+#define PHY_PTP_1STEP_PEER_DELAY(_quad_lane) (0x274 + 4 * (_quad_lane))
+#define PHY_PTP_1STEP_PD_ADD_PD_M BIT(0)
+#define PHY_PTP_1STEP_PD_DELAY_M GENMASK(30, 1)
+#define PHY_PTP_1STEP_PD_DLY_V_M BIT(31)
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 8aabf7749aa5..f1648cf103b7 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -124,27 +124,6 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
}
/**
- * ice_sriov_free_msix_res - Reset/free any used MSIX resources
- * @pf: pointer to the PF structure
- *
- * Since no MSIX entries are taken from the pf->irq_tracker then just clear
- * the pf->sriov_base_vector.
- *
- * Returns 0 on success, and -EINVAL on error.
- */
-static int ice_sriov_free_msix_res(struct ice_pf *pf)
-{
- if (!pf)
- return -EINVAL;
-
- bitmap_free(pf->sriov_irq_bm);
- pf->sriov_irq_size = 0;
- pf->sriov_base_vector = 0;
-
- return 0;
-}
-
-/**
* ice_free_vfs - Free all VFs
* @pf: pointer to the PF structure
*/
@@ -178,6 +157,7 @@ void ice_free_vfs(struct ice_pf *pf)
ice_eswitch_detach_vf(pf, vf);
ice_dis_vf_qs(vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
/* disable VF qp mappings and set VF disable state */
@@ -197,9 +177,6 @@ void ice_free_vfs(struct ice_pf *pf)
mutex_unlock(&vf->cfg_lock);
}
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
vfs->num_qps_per = 0;
ice_free_vf_entries(pf);
@@ -369,40 +346,6 @@ void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
}
/**
- * ice_sriov_set_msix_res - Set any used MSIX resources
- * @pf: pointer to PF structure
- * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
- *
- * This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
- * just set the pf->sriov_base_vector and return success.
- *
- * If there are not enough resources available, return an error. This should
- * always be caught by ice_set_per_vf_res().
- *
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
- * in the PF's space available for SR-IOV.
- */
-static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
-{
- u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors_used = ice_get_max_used_msix_vector(pf);
- int sriov_base_vector;
-
- sriov_base_vector = total_vectors - num_msix_needed;
-
- /* make sure we only grab irq_tracker entries from the list end and
- * that we have enough available MSIX vectors
- */
- if (sriov_base_vector < vectors_used)
- return -EINVAL;
-
- pf->sriov_base_vector = sriov_base_vector;
-
- return 0;
-}
-
-/**
* ice_set_per_vf_res - check if vectors and queues are available
* @pf: pointer to the PF structure
* @num_vfs: the number of SR-IOV VFs being configured
@@ -426,11 +369,9 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
*/
static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
{
- int vectors_used = ice_get_max_used_msix_vector(pf);
u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
int msix_avail_per_vf, msix_avail_for_sriov;
struct device *dev = ice_pf_to_dev(pf);
- int err;
lockdep_assert_held(&pf->vfs.table_lock);
@@ -438,8 +379,7 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -EINVAL;
/* determine MSI-X resources per VF */
- msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
- vectors_used;
+ msix_avail_for_sriov = pf->virt_irq_tracker.num_entries;
msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
@@ -478,13 +418,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -ENOSPC;
}
- err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
- if (err) {
- dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
- num_vfs, err);
- return err;
- }
-
/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
pf->vfs.num_msix_per = num_msix_per_vf;
@@ -495,52 +428,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
}
/**
- * ice_sriov_get_irqs - get irqs for SR-IOV usacase
- * @pf: pointer to PF structure
- * @needed: number of irqs to get
- *
- * This returns the first MSI-X vector index in PF space that is used by this
- * VF. This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration_id: id of VF which will
- * use this irqs
- *
- * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are
- * allocated from the end of global irq index. First bit in sriov_irq_bm means
- * last irq index etc. It simplifies extension of SRIOV vectors.
- * They will be always located from sriov_base_vector to the last irq
- * index. While increasing/decreasing sriov_base_vector can be moved.
- */
-static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed)
-{
- int res = bitmap_find_next_zero_area(pf->sriov_irq_bm,
- pf->sriov_irq_size, 0, needed, 0);
- /* conversion from number in bitmap to global irq index */
- int index = pf->sriov_irq_size - res - needed;
-
- if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector)
- return -ENOENT;
-
- bitmap_set(pf->sriov_irq_bm, res, needed);
- return index;
-}
-
-/**
- * ice_sriov_free_irqs - free irqs used by the VF
- * @pf: pointer to PF structure
- * @vf: pointer to VF structure
- */
-static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf)
-{
- /* Move back from first vector index to first index in bitmap */
- int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix;
-
- bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix);
- vf->first_vector_idx = 0;
-}
-
-/**
* ice_init_vf_vsi_res - initialize/setup VF VSI resources
* @vf: VF to initialize/setup the VSI for
*
@@ -553,7 +440,7 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
struct ice_vsi *vsi;
int err;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
return -ENOMEM;
@@ -853,16 +740,10 @@ err_free_entries:
*/
static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
{
- int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int ret;
- pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL);
- if (!pf->sriov_irq_bm)
- return -ENOMEM;
- pf->sriov_irq_size = total_vectors;
-
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
@@ -915,7 +796,6 @@ err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
clear_bit(ICE_OICR_INTR_DIS, pf->state);
- bitmap_free(pf->sriov_irq_bm);
return ret;
}
@@ -989,16 +869,7 @@ u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
- return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf);
-}
-
-static int ice_sriov_move_base_vector(struct ice_pf *pf, int move)
-{
- if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf))
- return -ENOMEM;
-
- pf->sriov_base_vector -= move;
- return 0;
+ return pf->virt_irq_tracker.num_entries;
}
static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
@@ -1017,7 +888,8 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
ice_dis_vf_mappings(tmp_vf);
- ice_sriov_free_irqs(pf, tmp_vf);
+ ice_virt_free_irqs(pf, tmp_vf->first_vector_idx,
+ tmp_vf->num_msix);
vf_ids[to_remap] = tmp_vf->vf_id;
to_remap += 1;
@@ -1029,7 +901,7 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
tmp_vf->first_vector_idx =
- ice_sriov_get_irqs(pf, tmp_vf->num_msix);
+ ice_virt_get_irqs(pf, tmp_vf->num_msix);
/* there is no need to rebuild VSI as we are only changing the
* vector indexes not amount of MSI-X or queues
*/
@@ -1102,20 +974,15 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs;
- if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) {
- ice_put_vf(vf);
- return -ENOSPC;
- }
-
ice_dis_vf_mappings(vf);
- ice_sriov_free_irqs(pf, vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
/* Remap all VFs beside the one is now configured */
ice_sriov_remap_vectors(pf, vf->vf_id);
vf->num_msix = msix_vec_count;
vf->num_vf_qs = queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
goto unroll;
@@ -1144,7 +1011,8 @@ unroll:
vf->num_msix = prev_msix;
vf->num_vf_qs = prev_queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0) {
ice_put_vf(vf);
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 9c9ea4c1b93b..1e4f6f6ee449 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1809,6 +1809,7 @@ dma_error:
static
int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
+ const struct ice_tx_ring *tx_ring = off->tx_ring;
u32 l4_len = 0, l3_len = 0, l2_len = 0;
struct sk_buff *skb = first->skb;
union {
@@ -1958,6 +1959,30 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l3_len = l4.hdr - ip.hdr;
offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
+ if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) &&
+ !(first->tx_flags & ICE_TX_FLAGS_TSO) &&
+ !skb_csum_is_sctp(skb)) {
+ /* Set GCS */
+ u16 csum_start = (skb->csum_start - skb->mac_header) / 2;
+ u16 csum_offset = skb->csum_offset / 2;
+ u16 gcs_params;
+
+ gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) |
+ FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) |
+ FIELD_PREP(ICE_TX_GCS_DESC_TYPE_M,
+ ICE_TX_GCS_DESC_CSUM_PSH);
+
+ /* Unlike legacy HW checksums, GCS requires a context
+ * descriptor.
+ */
+ off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX;
+ off->cd_gcs_params = gcs_params;
+ /* Fill out CSO info in data descriptors */
+ off->td_offset |= offset;
+ off->td_cmd |= cmd;
+ return 1;
+ }
+
/* Enable L4 checksum offloads */
switch (l4_proto) {
case IPPROTO_TCP:
@@ -2424,7 +2449,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
- if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF)
+ if ((ice_is_switchdev_running(vsi->back) ||
+ ice_lag_is_switchdev_running(vsi->back)) &&
+ vsi->type != ICE_VSI_SF)
ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
@@ -2439,7 +2466,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* setup context descriptor */
cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
- cdesc->rsvd = cpu_to_le16(0);
+ cdesc->gcs = cpu_to_le16(offload.cd_gcs_params);
cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 806bce701df3..a4b1e9514632 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -193,6 +193,7 @@ struct ice_tx_offload_params {
u32 td_l2tag1;
u32 cd_tunnel_params;
u16 cd_l2tag2;
+ u16 cd_gcs_params;
u8 header_len;
};
@@ -366,6 +367,7 @@ struct ice_rx_ring {
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
#define ICE_RX_FLAGS_MULTIDEV BIT(3)
+#define ICE_RX_FLAGS_RING_GCS BIT(4)
u8 flags;
/* CL5 - 5th cacheline starts here */
struct xdp_rxq_info xdp_rxq;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 2719f0e20933..45cfaabc41cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -81,6 +81,23 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
}
/**
+ * ice_rx_gcs - Set generic checksum in skb
+ * @skb: skb currently being received and modified
+ * @rx_desc: receive descriptor
+ */
+static void ice_rx_gcs(struct sk_buff *skb,
+ const union ice_32b_rx_flex_desc *rx_desc)
+{
+ const struct ice_32b_rx_flex_desc_nic *desc;
+ u16 csum;
+
+ desc = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ csum = (__force u16)desc->raw_csum;
+ skb->csum = csum_unfold((__force __sum16)swab16(csum));
+}
+
+/**
* ice_rx_csum - Indicate in skb if checksum is good
* @ring: the ring we care about
* @skb: skb currently being received and modified
@@ -107,6 +124,15 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
+ if ((ring->flags & ICE_RX_FLAGS_RING_GCS) &&
+ rx_desc->wb.rxdid == ICE_RXDID_FLEX_NIC &&
+ (decoded.inner_prot == LIBETH_RX_PT_INNER_TCP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_UDP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_ICMP)) {
+ ice_rx_gcs(skb, rx_desc);
+ return;
+ }
+
/* check if HW has decoded the packet and checksum */
if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 33a1a5934c0d..0aab21113cc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -871,14 +871,6 @@ union ice_phy_params {
struct ice_eth56g_params eth56g;
};
-/* PHY model */
-enum ice_phy_model {
- ICE_PHY_UNSUP = -1,
- ICE_PHY_E810 = 1,
- ICE_PHY_E82X,
- ICE_PHY_ETH56G,
-};
-
/* Global Link Topology */
enum ice_global_link_topo {
ICE_LINK_TOPO_UP_TO_2_LINKS,
@@ -888,7 +880,6 @@ enum ice_global_link_topo {
};
struct ice_ptp_hw {
- enum ice_phy_model phy_model;
union ice_phy_params phy;
u8 num_lports;
u8 ports_per_phy;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 4261fe1c2bcd..799b2c1f1184 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -124,6 +124,9 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
+
+ u32 ptp_caps;
+
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index ff4ad788d96a..7c3006eb68dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -498,6 +498,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_QOS)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_QOS;
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_PTP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_PTP;
+
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
@@ -562,7 +565,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
*
* check for the valid queue ID
*/
-static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
{
/* allocated Tx and Rx queues should be always equal for VF VSI */
return qid < vsi->alloc_txq;
@@ -1862,15 +1865,33 @@ static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
for (i = 0; i < qbw->num_queues; i++) {
if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
- qbw->cfg[i].shaper.peak > vf->max_tx_rate)
+ qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
qbw->cfg[i].queue_id, vf->vf_id,
vf->max_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
- qbw->cfg[i].shaper.committed < vf->min_tx_rate)
+ qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
qbw->cfg[i].queue_id, vf->vf_id,
- vf->max_tx_rate);
+ vf->min_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
}
for (i = 0; i < qbw->num_queues; i++) {
@@ -1900,13 +1921,21 @@ err:
*/
static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
{
+ u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u16 quanta_prof_id, quanta_size, start_qid, end_qid, i;
struct virtchnl_quanta_cfg *qquanta =
(struct virtchnl_quanta_cfg *)msg;
struct ice_vsi *vsi;
int ret;
+ start_qid = qquanta->queue_select.start_queue_id;
+ num_queues = qquanta->queue_select.num_queues;
+
+ if (check_add_overflow(start_qid, num_queues, &end_qid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err;
@@ -1918,8 +1947,6 @@ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
goto err;
}
- end_qid = qquanta->queue_select.start_queue_id +
- qquanta->queue_select.num_queues;
if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
@@ -1948,7 +1975,6 @@ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
goto err;
}
- start_qid = qquanta->queue_select.start_queue_id;
for (i = start_qid; i < end_qid; i++)
vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
@@ -1975,6 +2001,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
struct ice_vsi *vsi;
u8 act_prt, pri_prt;
int i = -1, q_idx;
+ bool ena_ts;
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
@@ -2104,9 +2131,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
rxdid = ICE_RXDID_LEGACY_1;
}
+ ena_ts = ((vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
+ (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
+ (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
+
ice_write_qrxflxp_cntxt(&vsi->back->hw,
- vsi->rxq_map[q_idx],
- rxdid, 0x03, false);
+ vsi->rxq_map[q_idx], rxdid,
+ ICE_RXDID_PRIO, ena_ts);
}
}
@@ -3031,8 +3063,8 @@ err:
static int ice_vc_query_rxdid(struct ice_vf *vf)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_supported_rxdids rxdid = {};
struct ice_pf *pf = vf->pf;
+ u64 rxdid;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3044,7 +3076,7 @@ static int ice_vc_query_rxdid(struct ice_vf *vf)
goto err;
}
- rxdid.supported_rxdids = pf->supported_rxdids;
+ rxdid = pf->supported_rxdids;
err:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
@@ -4092,6 +4124,59 @@ out:
v_ret, NULL, 0);
}
+static int ice_vc_get_ptp_cap(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ u32 caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
+ VIRTCHNL_1588_PTP_CAP_READ_PHC;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ if (msg->caps & caps)
+ vf->ptp_caps = caps;
+
+err:
+ /* send the response back to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_CAPS, v_ret,
+ (u8 *)&vf->ptp_caps,
+ sizeof(struct virtchnl_ptp_caps));
+}
+
+static int ice_vc_get_phc_time(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ struct virtchnl_phc_time *phc_time = NULL;
+ struct ice_pf *pf = vf->pf;
+ u32 len = 0;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ phc_time = kzalloc(sizeof(*phc_time), GFP_KERNEL);
+ if (!phc_time) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto err;
+ }
+
+ len = sizeof(*phc_time);
+
+ phc_time->time = ice_ptp_read_src_clk_reg(pf, NULL);
+
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_TIME, v_ret,
+ (u8 *)phc_time, len);
+ kfree(phc_time);
+ return ret;
+}
+
static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_ver_msg = ice_vc_get_ver_msg,
.get_vf_res_msg = ice_vc_get_vf_res_msg,
@@ -4128,6 +4213,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_qos_caps = ice_vc_get_qos_caps,
.cfg_q_bw = ice_vc_cfg_q_bw,
.cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
/* If you add a new op here please make sure to add it to
* ice_virtchnl_repr_ops as well.
*/
@@ -4264,6 +4351,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.get_qos_caps = ice_vc_get_qos_caps,
.cfg_q_bw = ice_vc_cfg_q_bw,
.cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
};
/**
@@ -4501,6 +4590,12 @@ error_handler:
case VIRTCHNL_OP_CONFIG_QUANTA:
err = ops->cfg_q_quanta(vf, msg);
break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ err = ops->get_ptp_cap(vf, (const void *)msg);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ err = ops->get_phc_time(vf);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index 0c629aef9baf..222990f229d5 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -26,6 +26,9 @@
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+/* Priority to be compared against previous priority from the pipe */
+#define ICE_RXDID_PRIO 0x03
+
/* VFs only get a single VSI. For ice hardware, the VF does not need to know
* its VSI index. However, the virtchnl interface requires a VSI number,
* mainly due to legacy hardware.
@@ -72,6 +75,9 @@ struct ice_virtchnl_ops {
int (*cfg_q_tc_map)(struct ice_vf *vf, u8 *msg);
int (*cfg_q_bw)(struct ice_vf *vf, u8 *msg);
int (*cfg_q_quanta)(struct ice_vf *vf, u8 *msg);
+ int (*get_ptp_cap)(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg);
+ int (*get_phc_time)(struct ice_vf *vf);
};
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index c105a82ee136..a3d1579a619a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -84,6 +84,12 @@ static const u32 fdir_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER,
};
+/* VIRTCHNL_VF_CAP_PTP */
+static const u32 ptp_allowlist_opcodes[] = {
+ VIRTCHNL_OP_1588_PTP_GET_CAPS,
+ VIRTCHNL_OP_1588_PTP_GET_TIME,
+};
+
static const u32 tc_allowlist_opcodes[] = {
VIRTCHNL_OP_GET_QOS_CAPS, VIRTCHNL_OP_CONFIG_QUEUE_BW,
VIRTCHNL_OP_CONFIG_QUANTA,
@@ -110,6 +116,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_QOS, tc_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_CAP_PTP, ptp_allowlist_opcodes),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 14e3f0f89c78..9be4bd717512 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -832,21 +832,27 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
struct virtchnl_proto_hdrs *proto,
struct virtchnl_fdir_fltr_conf *conf)
{
- u8 *pkt_buf, *msk_buf __free(kfree);
+ u8 *pkt_buf, *msk_buf __free(kfree) = NULL;
struct ice_parser_result rslt;
struct ice_pf *pf = vf->pf;
+ u16 pkt_len, udp_port = 0;
struct ice_parser *psr;
int status = -ENOMEM;
struct ice_hw *hw;
- u16 udp_port = 0;
- pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
- msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
+ pkt_len = proto->raw.pkt_len;
+
+ if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET)
+ return -EINVAL;
+
+ pkt_buf = kzalloc(pkt_len, GFP_KERNEL);
+ msk_buf = kzalloc(pkt_len, GFP_KERNEL);
+
if (!pkt_buf || !msk_buf)
goto err_mem_alloc;
- memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len);
- memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len);
+ memcpy(pkt_buf, proto->raw.spec, pkt_len);
+ memcpy(msk_buf, proto->raw.mask, pkt_len);
hw = &pf->hw;
@@ -862,7 +868,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
ice_parser_vxlan_tunnel_set(psr, udp_port, true);
- status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt);
+ status = ice_parser_run(psr, pkt_buf, pkt_len, &rslt);
if (status)
goto err_parser_destroy;
@@ -876,7 +882,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
}
status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
- proto->raw.pkt_len, ICE_BLK_FD,
+ pkt_len, ICE_BLK_FD,
conf->prof);
if (status)
goto err_parser_profile_init;
@@ -885,7 +891,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
ice_parser_profile_dump(hw, conf->prof);
/* Store raw flow info into @conf */
- conf->pkt_len = proto->raw.pkt_len;
+ conf->pkt_len = pkt_len;
conf->pkt_buf = pkt_buf;
conf->parser_ena = true;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 8975d2971bc3..a3a4eaa17739 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ice.h"
@@ -989,7 +990,8 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
struct ice_tx_desc *tx_desc;
u32 i;
- loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ unrolled_count(PKTS_PER_BATCH)
+ for (i = 0; i < PKTS_PER_BATCH; i++) {
dma_addr_t dma;
dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 45adeb513253..8dc5d55e26c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -7,14 +7,6 @@
#define PKTS_PER_BATCH 8
-#ifdef __clang__
-#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
-#elif __GNUC__ >= 8
-#define loop_unrolled_for _Pragma("GCC unroll 8") for
-#else
-#define loop_unrolled_for for
-#endif
-
struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index a3d6b8f198a8..aa755dedb41d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -814,6 +814,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
netdev->hw_features |= dflt_features | offloads;
netdev->hw_enc_features |= dflt_features | offloads;
idpf_set_ethtool_ops(netdev);
+ netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
/* carrier off on init to avoid Tx hangs */
@@ -927,15 +928,19 @@ static int idpf_stop(struct net_device *netdev)
static void idpf_decfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
+ u16 idx = vport->idx;
kfree(vport->rx_ptype_lkup);
vport->rx_ptype_lkup = NULL;
- unregister_netdev(vport->netdev);
- free_netdev(vport->netdev);
+ if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV,
+ adapter->vport_config[idx]->flags)) {
+ unregister_netdev(vport->netdev);
+ free_netdev(vport->netdev);
+ }
vport->netdev = NULL;
- adapter->netdevs[vport->idx] = NULL;
+ adapter->netdevs[idx] = NULL;
}
/**
@@ -1536,13 +1541,22 @@ void idpf_init_task(struct work_struct *work)
}
for (index = 0; index < adapter->max_vports; index++) {
- if (adapter->netdevs[index] &&
- !test_bit(IDPF_VPORT_REG_NETDEV,
- adapter->vport_config[index]->flags)) {
- register_netdev(adapter->netdevs[index]);
- set_bit(IDPF_VPORT_REG_NETDEV,
- adapter->vport_config[index]->flags);
+ struct net_device *netdev = adapter->netdevs[index];
+ struct idpf_vport_config *vport_config;
+
+ vport_config = adapter->vport_config[index];
+
+ if (!netdev ||
+ test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags))
+ continue;
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n",
+ index, ERR_PTR(err));
+ continue;
}
+ set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
}
/* As all the required vports are created, clear the reset flag
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index dfd7cf1d9aa0..eae1b6f474e6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -595,7 +595,7 @@ static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc)
*/
static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq,
struct sk_buff *skb,
- struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_csum csum_bits,
struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
@@ -661,10 +661,10 @@ checksum_fail:
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
+ struct libeth_rx_csum csum_bits = { };
u32 rx_error, rx_status;
u64 qword;
@@ -696,10 +696,10 @@ idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
+ struct libeth_rx_csum csum_bits = { };
u16 rx_status0, rx_status1;
rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0);
@@ -798,7 +798,7 @@ idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
u16 ptype)
{
struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype];
- struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_csum csum_bits;
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_q->netdev);
@@ -891,6 +891,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
* idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
+ * @ptype: pointer that will store packet type
*
* Decode the Rx descriptor and extract relevant information including the
* size and Rx packet type.
@@ -900,20 +901,21 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
*/
static void
idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields, u32 *ptype)
{
u64 qword;
qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
- fields->size = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
- fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
+ fields->len = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
+ *ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
}
/**
* idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
+ * @ptype: pointer that will store packet type
*
* Decode the Rx descriptor and extract relevant information including the
* size and Rx packet type.
@@ -923,12 +925,12 @@ idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
*/
static void
idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields, u32 *ptype)
{
- fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
- le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
- fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
- le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
+ fields->len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
+ *ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
}
/**
@@ -936,17 +938,18 @@ idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
* @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
+ * @ptype: pointer that will store packet type
*
*/
static void
idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields, u32 *ptype)
{
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
- idpf_rx_singleq_extract_base_fields(rx_desc, fields);
+ idpf_rx_singleq_extract_base_fields(rx_desc, fields, ptype);
else
- idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
+ idpf_rx_singleq_extract_flex_fields(rx_desc, fields, ptype);
}
/**
@@ -966,9 +969,10 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
/* Process Rx packets bounded by budget */
while (likely(total_rx_pkts < (unsigned int)budget)) {
- struct idpf_rx_extracted fields = { };
+ struct libeth_rqe_info fields = { };
union virtchnl2_rx_desc *rx_desc;
struct idpf_rx_buf *rx_buf;
+ u32 ptype;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
rx_desc = &rx_q->rx[ntc];
@@ -989,16 +993,16 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
*/
dma_rmb();
- idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
+ idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields, &ptype);
rx_buf = &rx_q->rx_buf[ntc];
- if (!libeth_rx_sync_for_cpu(rx_buf, fields.size))
+ if (!libeth_rx_sync_for_cpu(rx_buf, fields.len))
goto skip_data;
if (skb)
- idpf_rx_add_frag(rx_buf, skb, fields.size);
+ idpf_rx_add_frag(rx_buf, skb, fields.len);
else
- skb = idpf_rx_build_skb(rx_buf, fields.size);
+ skb = idpf_rx_build_skb(rx_buf, fields.len);
/* exit if we failed to retrieve a buffer */
if (!skb)
@@ -1033,8 +1037,7 @@ skip_data:
total_rx_bytes += skb->len;
/* protocol */
- idpf_rx_singleq_process_skb_fields(rx_q, skb,
- rx_desc, fields.rx_ptype);
+ idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc, ptype);
/* send completed skb up the stack */
napi_gro_receive(rx_q->pp->p.napi, skb);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 977741c41498..bdf52cef3891 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2895,7 +2895,7 @@ idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
* skb->protocol must be set before this function is called
*/
static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
- struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_csum csum_bits,
struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
@@ -2923,7 +2923,7 @@ static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
if (unlikely(csum_bits.l4e))
goto checksum_fail;
- if (csum_bits.raw_csum_inv ||
+ if (!csum_bits.raw_csum_valid ||
decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
@@ -2946,10 +2946,10 @@ checksum_fail:
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum = { };
+ struct libeth_rx_csum csum = { };
u8 qword0, qword1;
qword0 = rx_desc->status_err0_qw0;
@@ -2965,9 +2965,9 @@ idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *
qword1);
csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
qword0);
- csum.raw_csum_inv =
- le16_get_bits(rx_desc->ptype_err_fflags0,
- VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
+ csum.raw_csum_valid =
+ !le16_get_bits(rx_desc->ptype_err_fflags0,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
return csum;
@@ -3059,7 +3059,7 @@ static int
idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_csum csum_bits;
struct libeth_rx_pt decoded;
u16 rx_ptype;
@@ -3552,8 +3552,6 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
q_vector->tx = NULL;
kfree(q_vector->rx);
q_vector->rx = NULL;
-
- free_cpumask_var(q_vector->affinity_mask);
}
kfree(vport->q_vectors);
@@ -3580,8 +3578,6 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
kfree(free_irq(irq_num, q_vector));
}
}
@@ -3769,8 +3765,6 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
"Request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
- /* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
}
return 0;
@@ -4182,7 +4176,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
{
int (*napi_poll)(struct napi_struct *napi, int budget);
- u16 v_idx;
+ u16 v_idx, qv_idx;
+ int irq_num;
if (idpf_is_queue_model_split(vport->txq_model))
napi_poll = idpf_vport_splitq_napi_poll;
@@ -4191,12 +4186,12 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ qv_idx = vport->q_vector_idxs[v_idx];
+ irq_num = vport->adapter->msix_entries[qv_idx].vector;
- netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
-
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, q_vector->affinity_mask);
+ netif_napi_add_config(vport->netdev, &q_vector->napi,
+ napi_poll, v_idx);
+ netif_napi_set_irq(&q_vector->napi, irq_num);
}
}
@@ -4240,9 +4235,6 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
- if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
- goto error;
-
q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
GFP_KERNEL);
if (!q_vector->tx)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 0f71a6f5557b..b029f566e57c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -213,25 +213,6 @@ enum idpf_tx_ctx_desc_eipt_offload {
IDPF_TX_CTX_EXT_IP_IPV4 = 0x3
};
-/* Checksum offload bits decoded from the receive descriptor. */
-struct idpf_rx_csum_decoded {
- u32 l3l4p : 1;
- u32 ipe : 1;
- u32 eipe : 1;
- u32 eudpe : 1;
- u32 ipv6exadd : 1;
- u32 l4e : 1;
- u32 pprs : 1;
- u32 nat : 1;
- u32 raw_csum_inv : 1;
- u32 raw_csum : 16;
-};
-
-struct idpf_rx_extracted {
- unsigned int size;
- u16 rx_ptype;
-};
-
#define IDPF_TX_COMPLQ_CLEAN_BUDGET 256
#define IDPF_TX_MIN_PKT_LEN 17
#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
@@ -401,7 +382,6 @@ struct idpf_intr_reg {
* @rx_intr_mode: Dynamic ITR or not
* @rx_itr_idx: RX ITR index
* @v_idx: Vector index
- * @affinity_mask: CPU affinity mask
*/
struct idpf_q_vector {
__cacheline_group_begin_aligned(read_mostly);
@@ -438,13 +418,12 @@ struct idpf_q_vector {
__cacheline_group_begin_aligned(cold);
u16 v_idx;
- cpumask_var_t affinity_mask;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_q_vector, 120,
24 + sizeof(struct napi_struct) +
2 * sizeof(struct dim),
- 8 + sizeof(cpumask_var_t));
+ 8);
struct idpf_rx_queue_stats {
u64_stats_t packets;
@@ -940,7 +919,7 @@ static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
if (!q_vector)
return NUMA_NO_NODE;
- cpu = cpumask_first(q_vector->affinity_mask);
+ cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index f94570556120..f323e1c1989f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -509,6 +509,12 @@ static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp,
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
+ /* Both the rising and falling edge are timestamped */
+ if (rq->extts.flags & PTP_STRICT_FLAGS &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index b8111ad9a9a8..cd1d7b6c1782 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -579,6 +579,7 @@ struct igc_metadata_request {
struct xsk_tx_metadata *meta;
struct igc_ring *tx_ring;
u32 cmd_type;
+ u16 used_desc;
};
struct igc_q_vector {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 84307bb7313e..491d942cefca 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1092,7 +1092,8 @@ static int igc_init_empty_frame(struct igc_ring *ring,
dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(ring->dev, dma)) {
- netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ net_err_ratelimited("%s: DMA mapping error for empty frame\n",
+ netdev_name(ring->netdev));
return -ENOMEM;
}
@@ -1108,20 +1109,12 @@ static int igc_init_empty_frame(struct igc_ring *ring,
return 0;
}
-static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
- struct sk_buff *skb,
- struct igc_tx_buffer *first)
+static void igc_init_tx_empty_descriptor(struct igc_ring *ring,
+ struct sk_buff *skb,
+ struct igc_tx_buffer *first)
{
union igc_adv_tx_desc *desc;
u32 cmd_type, olinfo_status;
- int err;
-
- if (!igc_desc_unused(ring))
- return -EBUSY;
-
- err = igc_init_empty_frame(ring, first, skb);
- if (err)
- return err;
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
@@ -1140,8 +1133,6 @@ static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
ring->next_to_use++;
if (ring->next_to_use == ring->count)
ring->next_to_use = 0;
-
- return 0;
}
#define IGC_EMPTY_FRAME_SIZE 60
@@ -1567,6 +1558,40 @@ static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *s
return false;
}
+static int igc_insert_empty_frame(struct igc_ring *tx_ring)
+{
+ struct igc_tx_buffer *empty_info;
+ struct sk_buff *empty_skb;
+ void *data;
+ int ret;
+
+ empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ empty_skb = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
+ if (unlikely(!empty_skb)) {
+ net_err_ratelimited("%s: skb alloc error for empty frame\n",
+ netdev_name(tx_ring->netdev));
+ return -ENOMEM;
+ }
+
+ data = skb_put(empty_skb, IGC_EMPTY_FRAME_SIZE);
+ memset(data, 0, IGC_EMPTY_FRAME_SIZE);
+
+ /* Prepare DMA mapping and Tx buffer information */
+ ret = igc_init_empty_frame(tx_ring, empty_info, empty_skb);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(empty_skb);
+ return ret;
+ }
+
+ /* Prepare advanced context descriptor for empty packet */
+ igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
+
+ /* Prepare advanced data descriptor for empty packet */
+ igc_init_tx_empty_descriptor(tx_ring, empty_skb, empty_info);
+
+ return 0;
+}
+
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
@@ -1586,6 +1611,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
* + 1 desc for context descriptor,
+ * + 2 desc for inserting an empty packet for launch time,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
@@ -1605,24 +1631,16 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
if (insert_empty) {
- struct igc_tx_buffer *empty_info;
- struct sk_buff *empty;
- void *data;
-
- empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
- empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
- if (!empty)
- goto done;
-
- data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
- memset(data, 0, IGC_EMPTY_FRAME_SIZE);
-
- igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
-
- if (igc_init_tx_empty_descriptor(tx_ring,
- empty,
- empty_info) < 0)
- dev_kfree_skb_any(empty);
+ /* Reset the launch time if the required empty frame fails to
+ * be inserted. However, this packet is not dropped, so it
+ * "dirties" the current Qbv cycle. This ensures that the
+ * upcoming packet, which is scheduled in the next Qbv cycle,
+ * does not require an empty frame. This way, the launch time
+ * continues to function correctly despite the current failure
+ * to insert the empty frame.
+ */
+ if (igc_insert_empty_frame(tx_ring))
+ launch_time = 0;
}
done:
@@ -1650,7 +1668,8 @@ done:
if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES)
+ if (skb->sk &&
+ READ_ONCE(skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1;
} else {
adapter->tx_hwtstamp_skipped++;
@@ -2953,9 +2972,48 @@ static u64 igc_xsk_fill_timestamp(void *_priv)
return *(u64 *)_priv;
}
+static void igc_xsk_request_launch_time(u64 launch_time, void *_priv)
+{
+ struct igc_metadata_request *meta_req = _priv;
+ struct igc_ring *tx_ring = meta_req->tx_ring;
+ __le32 launch_time_offset;
+ bool insert_empty = false;
+ bool first_flag = false;
+ u16 used_desc = 0;
+
+ if (!tx_ring->launchtime_enable)
+ return;
+
+ launch_time_offset = igc_tx_launchtime(tx_ring,
+ ns_to_ktime(launch_time),
+ &first_flag, &insert_empty);
+ if (insert_empty) {
+ /* Disregard the launch time request if the required empty frame
+ * fails to be inserted.
+ */
+ if (igc_insert_empty_frame(tx_ring))
+ return;
+
+ meta_req->tx_buffer =
+ &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ /* Inserting an empty packet requires two descriptors:
+ * one data descriptor and one context descriptor.
+ */
+ used_desc += 2;
+ }
+
+ /* Use one context descriptor to specify launch time and first flag. */
+ igc_tx_ctxtdesc(tx_ring, launch_time_offset, first_flag, 0, 0, 0);
+ used_desc += 1;
+
+ /* Update the number of used descriptors in this request */
+ meta_req->used_desc += used_desc;
+}
+
const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = {
.tmo_request_timestamp = igc_xsk_request_timestamp,
.tmo_fill_timestamp = igc_xsk_fill_timestamp,
+ .tmo_request_launch_time = igc_xsk_request_launch_time,
};
static void igc_xdp_xmit_zc(struct igc_ring *ring)
@@ -2978,7 +3036,13 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
ntu = ring->next_to_use;
budget = igc_desc_unused(ring);
- while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
+ /* Packets with launch time require one data descriptor and one context
+ * descriptor. When the launch time falls into the next Qbv cycle, we
+ * may need to insert an empty packet, which requires two more
+ * descriptors. Therefore, to be safe, we always ensure we have at least
+ * 4 descriptors available.
+ */
+ while (xsk_tx_peek_desc(pool, &xdp_desc) && budget >= 4) {
struct igc_metadata_request meta_req;
struct xsk_tx_metadata *meta = NULL;
struct igc_tx_buffer *bi;
@@ -2999,9 +3063,19 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
meta_req.tx_ring = ring;
meta_req.tx_buffer = bi;
meta_req.meta = meta;
+ meta_req.used_desc = 0;
xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops,
&meta_req);
+ /* xsk_tx_metadata_request() may have updated next_to_use */
+ ntu = ring->next_to_use;
+
+ /* xsk_tx_metadata_request() may have updated Tx buffer info */
+ bi = meta_req.tx_buffer;
+
+ /* xsk_tx_metadata_request() may use a few descriptors */
+ budget -= meta_req.used_desc;
+
tx_desc = IGC_TX_DESC(ring, ntu);
tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
@@ -3019,9 +3093,11 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
ntu++;
if (ntu == ring->count)
ntu = 0;
+
+ ring->next_to_use = ntu;
+ budget--;
}
- ring->next_to_use = ntu;
if (tx_desc) {
igc_flush_tx_descriptors(ring);
xsk_tx_release(pool);
@@ -7090,8 +7166,8 @@ static int igc_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->reset_task, igc_reset_task);
INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
- hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- adapter->hrtimer.function = &igc_qbv_scheduling_timer;
+ hrtimer_setup(&adapter->hrtimer, &igc_qbv_scheduling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Initialize link properties that are user-changeable */
adapter->fc_autoneg = true;
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index 13bbd3346e01..c538e6b18aad 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -14,6 +14,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
bool if_running = netif_running(dev);
struct bpf_prog *old_prog;
bool need_update;
+ unsigned int i;
if (dev->mtu > ETH_DATA_LEN) {
/* For now, the driver doesn't support XDP functionality with
@@ -24,8 +25,13 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
}
need_update = !!adapter->xdp_prog != !!prog;
- if (if_running && need_update)
- igc_close(dev);
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igc_disable_rx_ring(adapter->rx_ring[i]);
+ igc_disable_tx_ring(adapter->tx_ring[i]);
+ napi_disable(&adapter->rx_ring[i]->q_vector->napi);
+ }
+ }
old_prog = xchg(&adapter->xdp_prog, prog);
if (old_prog)
@@ -36,8 +42,13 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
else
xdp_features_clear_redirect_target(dev);
- if (if_running && need_update)
- igc_open(dev);
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ napi_enable(&adapter->rx_ring[i]->q_vector->napi);
+ igc_enable_tx_ring(adapter->tx_ring[i]);
+ igc_enable_rx_ring(adapter->rx_ring[i]);
+ }
+ }
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index da91c582d439..f03925c1f521 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3185,6 +3185,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
break;
case ixgbe_mac_X540:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 866024f2b9ee..07ea1954a276 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -817,30 +817,9 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
}
}
-/**
- * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
- * @xs: pointer to transformer state struct
- **/
-static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- if (xs->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl != 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
.xdo_dev_state_add = ixgbe_ipsec_add_sa,
.xdo_dev_state_delete = ixgbe_ipsec_del_sa,
- .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
};
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 467f81239e12..481f917f7ed2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3185,6 +3185,10 @@ static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter)
case ixgbe_aci_opc_get_link_status:
ixgbe_handle_link_status_event(adapter, &event);
break;
+ case ixgbe_aci_opc_temp_tca_event:
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+ ixgbe_down(adapter);
+ break;
default:
e_warn(hw, "unknown FW async event captured\n");
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 9339edbd9082..eef25e11d938 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -140,6 +140,7 @@
* proper mult and shift to convert the cycles into nanoseconds of time.
*/
#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL
+#define IXGBE_E610_BASE_PERIOD 0x333333333ULL
#define INCVALUE_MASK 0x7FFFFFFF
#define ISGN 0x80000000
@@ -415,6 +416,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -492,11 +494,13 @@ static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm)
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
+ u64 rate, base;
bool neg_adj;
- u64 rate;
u32 inca;
- neg_adj = diff_by_scaled_ppm(IXGBE_X550_BASE_PERIOD, scaled_ppm, &rate);
+ base = hw->mac.type == ixgbe_mac_e610 ? IXGBE_E610_BASE_PERIOD :
+ IXGBE_X550_BASE_PERIOD;
+ neg_adj = diff_by_scaled_ppm(base, scaled_ppm, &rate);
/* warn if rate is too large */
if (rate >= INCVALUE_MASK)
@@ -559,6 +563,7 @@ static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -1067,6 +1072,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* enable timestamping all packets only if at least some
* packets were requested. Otherwise, play nice and disable
* timestamping
@@ -1233,6 +1239,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
fallthrough;
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
cc.read = ixgbe_ptp_read_X550;
break;
case ixgbe_mac_X540:
@@ -1280,6 +1287,7 @@ static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
/* Reset SYSTIME registers to 0 */
@@ -1407,6 +1415,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 30000000;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
index 8d06ade3c7cd..617e07878e4f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -171,6 +171,9 @@ enum ixgbe_aci_opc {
ixgbe_aci_opc_done_alt_write = 0x0904,
ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+ /* TCA Events */
+ ixgbe_aci_opc_temp_tca_event = 0x0C94,
+
/* debug commands */
ixgbe_aci_opc_debug_dump_internals = 0xFF08,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index f804b35d79c7..8ba037e3d9c2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -428,30 +428,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
}
}
-/**
- * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
- * @xs: pointer to transformer state struct
- **/
-static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- if (xs->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl != 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = {
.xdo_dev_state_add = ixgbevf_ipsec_add_sa,
.xdo_dev_state_delete = ixgbevf_ipsec_del_sa,
- .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok,
};
/**
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 4fe121b9f94b..147571fdada3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2342,7 +2342,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
prefetch(data);
xdp_buff_clear_frags_flag(xdp);
xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
- data_len, false);
+ data_len, true);
}
static void
@@ -2396,6 +2396,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
u8 num_frags;
@@ -2410,6 +2411,8 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->ip_summed = mvneta_rx_csum(pp, desc_status);
if (unlikely(xdp_buff_has_frags(xdp)))
@@ -5557,7 +5560,6 @@ static int mvneta_probe(struct platform_device *pdev)
clk_prepare_enable(pp->clk_bus);
pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
- pp->phylink_pcs.neg_mode = true;
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index dd76c1b7ed3a..566c12c89520 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3915,13 +3915,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
while (rx_done < rx_todo) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+ u32 rx_status, timestamp, metasize = 0;
struct mvpp2_bm_pool *bm_pool;
struct page_pool *pp = NULL;
struct sk_buff *skb;
unsigned int frag_size;
dma_addr_t dma_addr;
phys_addr_t phys_addr;
- u32 rx_status, timestamp;
int pool, rx_bytes, err, ret;
struct page *page;
void *data;
@@ -3983,7 +3983,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
xdp_prepare_buff(&xdp, data,
MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
- rx_bytes, false);
+ rx_bytes, true);
ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
@@ -3999,6 +3999,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
ps.rx_bytes += rx_bytes;
continue;
}
+
+ metasize = xdp.data - xdp.data_meta;
}
if (frag_size)
@@ -4038,6 +4040,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
skb_put(skb, rx_bytes);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->ip_summed = mvpp2_rx_csum(port, rx_status);
skb->protocol = eth_type_trans(skb, dev);
@@ -6985,9 +6989,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
for (thread = 0; thread < priv->nthreads; thread++) {
port_pcpu = per_cpu_ptr(port->pcpu, thread);
- hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED_SOFT);
- port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ hrtimer_setup(&port_pcpu->tx_done_timer, mvpp2_hr_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED_SOFT);
port_pcpu->timer_scheduled = false;
port_pcpu->dev = dev;
}
@@ -7024,9 +7027,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->dev_port = port->id;
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
- port->pcs_gmac.neg_mode = true;
port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
- port->pcs_xlg.neg_mode = true;
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 8216f843a7cd..0b27a695008b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -66,8 +66,18 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN20KA) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF20KA) },
{ 0, } /* end of table */
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index d39d86e694cc..655dd4726d36 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -925,7 +925,6 @@ void rvu_mcs_exit(struct rvu *rvu)
if (!rvu->mcs_intr_wq)
return;
- flush_workqueue(rvu->mcs_intr_wq);
destroy_workqueue(rvu->mcs_intr_wq);
rvu->mcs_intr_wq = NULL;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index bcc96eed2481..66749b3649c1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -545,8 +545,7 @@ static int ptp_probe(struct pci_dev *pdev,
spin_lock_init(&ptp->ptp_lock);
if (cn10k_ptp_errata(ptp)) {
ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
- hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ptp->hrtimer.function = ptp_reset_thresh;
+ hrtimer_setup(&ptp->hrtimer, ptp_reset_thresh, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
} else {
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index a383b5ef5b2d..60f085b00a8c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -30,6 +30,8 @@
#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00
#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
+#define PCI_SUBSYS_DEVID_CN20KA 0xC220
+#define PCI_SUBSYS_DEVID_CNF20KA 0xC320
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index cb6513ab35e7..69e0778f9ac1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
- otx2_devlink.o qos_sq.o qos.o
+ otx2_devlink.o qos_sq.o qos.o otx2_xsk.o
rvu_nicvf-y := otx2_vf.o
rvu_rep-y := rep.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index a15cc86635d6..c3b6e0f60a79 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -112,9 +112,12 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
struct otx2_nic *pfvf = dev;
int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST];
+ struct otx2_pool *pool;
dma_addr_t bufptr;
int num_ptrs = 1;
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
@@ -124,7 +127,9 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
break;
}
cq->pool_ptrs--;
- ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
+ ptrs[num_ptrs] = pool->xsk_pool ?
+ (u64)bufptr : (u64)bufptr + OTX2_HEAD_ROOM;
+
num_ptrs++;
if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index 09a5b5268205..fc59e50bafce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -744,24 +744,9 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x)
queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work);
}
-static bool cn10k_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
- return true;
-}
-
static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = cn10k_ipsec_add_state,
.xdo_dev_state_delete = cn10k_ipsec_del_state,
- .xdo_dev_offload_ok = cn10k_ipsec_offload_ok,
};
static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 2b49bfec7869..84cd029a85aa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -17,6 +17,7 @@
#include "otx2_common.h"
#include "otx2_struct.h"
#include "cn10k.h"
+#include "otx2_xsk.h"
static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)
{
@@ -330,6 +331,10 @@ int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
rss_ctx = rss->rss_ctx[ctx_id];
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
+ /* Ignore the queue if AF_XDP zero copy is enabled */
+ if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ continue;
+
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) {
/* The shared memory buffer can be full.
@@ -549,10 +554,13 @@ static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
u8 *buf;
+ if (pool->xsk_pool)
+ return otx2_xsk_pool_alloc_buf(pfvf, pool, dma, idx);
+
if (pool->page_pool)
return otx2_alloc_pool_buf(pfvf, pool, dma);
@@ -571,12 +579,12 @@ static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
int ret;
local_bh_disable();
- ret = __otx2_alloc_rbuf(pfvf, pool, dma);
+ ret = __otx2_alloc_rbuf(pfvf, pool, dma, qidx, idx);
local_bh_enable();
return ret;
}
@@ -584,7 +592,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma)
{
- if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma,
+ cq->cq_idx, cq->pool_ptrs - 1)))
return -ENOMEM;
return 0;
}
@@ -884,7 +893,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
-static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
struct nix_aq_enq_req *aq;
@@ -1028,6 +1037,10 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->stats.bytes = 0;
sq->stats.pkts = 0;
+ /* Attach XSK_BUFF_POOL to XDP queue */
+ if (qidx > pfvf->hw.xdp_queues)
+ otx2_attach_xsk_buff(pfvf, sq, (qidx - pfvf->hw.xdp_queues));
+
chan_offset = qidx % pfvf->hw.tx_chan_cnt;
err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura);
@@ -1041,12 +1054,13 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
}
-static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
cq = &qset->cq[qidx];
cq->cq_idx = qidx;
@@ -1055,8 +1069,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
- if (pfvf->xdp_prog)
+ if (pfvf->xdp_prog) {
xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
+ pool = &qset->pool[qidx];
+ if (pool->xsk_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ xsk_pool_set_rxq_info(pool->xsk_pool, &cq->xdp_rxq);
+ } else if (pool->page_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool->page_pool);
+ }
+ }
} else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
@@ -1275,9 +1301,10 @@ void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_head_page(phys_to_virt(pa));
-
if (pool->page_pool) {
page_pool_put_full_page(pool->page_pool, page, true);
+ } else if (pool->xsk_pool) {
+ /* Note: No way of identifying xdp_buff */
} else {
dma_unmap_page_attrs(pfvf->dev, iova, size,
DMA_FROM_DEVICE,
@@ -1292,6 +1319,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
int pool_id, pool_start = 0, pool_end = 0, size = 0;
struct otx2_pool *pool;
u64 iova;
+ int idx;
if (type == AURA_NIX_SQ) {
pool_start = otx2_get_pool_idx(pfvf, type, 0);
@@ -1306,16 +1334,21 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
/* Free SQB and RQB pointers from the aura pool */
for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
- iova = otx2_aura_allocptr(pfvf, pool_id);
pool = &pfvf->qset.pool[pool_id];
+ iova = otx2_aura_allocptr(pfvf, pool_id);
while (iova) {
if (type == AURA_NIX_RQ)
iova -= OTX2_HEAD_ROOM;
-
otx2_free_bufs(pfvf, pool, iova, size);
-
iova = otx2_aura_allocptr(pfvf, pool_id);
}
+
+ for (idx = 0 ; idx < pool->xdp_cnt; idx++) {
+ if (!pool->xdp[idx])
+ continue;
+
+ xsk_buff_free(pool->xdp[idx]);
+ }
}
}
@@ -1332,7 +1365,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
qmem_free(pfvf->dev, pool->stack);
qmem_free(pfvf->dev, pool->fc_addr);
page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
+ devm_kfree(pfvf->dev, pool->xdp);
+ pool->xsk_pool = NULL;
}
devm_kfree(pfvf->dev, pfvf->qset.pool);
pfvf->qset.pool = NULL;
@@ -1419,6 +1453,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size, int type)
{
struct page_pool_params pp_params = { 0 };
+ struct xsk_buff_pool *xsk_pool;
struct npa_aq_enq_req *aq;
struct otx2_pool *pool;
int err;
@@ -1462,21 +1497,35 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
aq->ctype = NPA_AQ_CTYPE_POOL;
aq->op = NPA_AQ_INSTOP_INIT;
- if (type != AURA_NIX_RQ) {
- pool->page_pool = NULL;
+ if (type != AURA_NIX_RQ)
+ return 0;
+
+ if (!test_bit(pool_id, pfvf->af_xdp_zc_qidx)) {
+ pp_params.order = get_order(buf_size);
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = pfvf->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pool->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool->page_pool)) {
+ netdev_err(pfvf->netdev, "Creation of page pool failed\n");
+ return PTR_ERR(pool->page_pool);
+ }
return 0;
}
- pp_params.order = get_order(buf_size);
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
- pp_params.nid = NUMA_NO_NODE;
- pp_params.dev = pfvf->dev;
- pp_params.dma_dir = DMA_FROM_DEVICE;
- pool->page_pool = page_pool_create(&pp_params);
- if (IS_ERR(pool->page_pool)) {
- netdev_err(pfvf->netdev, "Creation of page pool failed\n");
- return PTR_ERR(pool->page_pool);
+ /* Set XSK pool to support AF_XDP zero-copy */
+ xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, pool_id);
+ if (xsk_pool) {
+ pool->xsk_pool = xsk_pool;
+ pool->xdp_cnt = numptrs;
+ pool->xdp = devm_kcalloc(pfvf->dev,
+ numptrs, sizeof(struct xdp_buff *), GFP_KERNEL);
+ if (IS_ERR(pool->xdp)) {
+ netdev_err(pfvf->netdev, "Creation of xsk pool failed\n");
+ return PTR_ERR(pool->xdp);
+ }
}
return 0;
@@ -1537,9 +1586,18 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ ptr--;
+ while (ptr >= 0) {
+ xsk_buff_free(pool->xdp[ptr]);
+ ptr--;
+ }
+ }
goto err_mem;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
@@ -1589,11 +1647,19 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
/* Allocate pointers and free them to aura/pool */
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
+
for (ptr = 0; ptr < num_ptrs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ while (ptr)
+ xsk_buff_free(pool->xdp[--ptr]);
+ }
return -ENOMEM;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ pool->xsk_pool ? bufptr :
bufptr + OTX2_HEAD_ROOM);
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 65814e3dc93f..1e88422825be 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -21,6 +21,7 @@
#include <linux/time64.h>
#include <linux/dim.h>
#include <uapi/linux/if_macsec.h>
+#include <net/page_pool/helpers.h>
#include <mbox.h>
#include <npc.h>
@@ -128,6 +129,12 @@ enum otx2_errcodes_re {
ERRCODE_IL4_CSUM = 0x22,
};
+enum otx2_xdp_action {
+ OTX2_XDP_TX = BIT(0),
+ OTX2_XDP_REDIRECT = BIT(1),
+ OTX2_AF_XDP_FRAME = BIT(2),
+};
+
struct otx2_dev_stats {
u64 rx_bytes;
u64 rx_frames;
@@ -531,6 +538,8 @@ struct otx2_nic {
/* Inline ipsec */
struct cn10k_ipsec ipsec;
+ /* af_xdp zero-copy */
+ unsigned long *af_xdp_zc_qidx;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -1002,7 +1011,7 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
void otx2_free_pending_sqe(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma);
+ dma_addr_t *dma, int qidx, int idx);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
@@ -1032,6 +1041,8 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf);
void otx2_disable_mbox_intr(struct otx2_nic *pf);
void otx2_disable_napi(struct otx2_nic *pf);
irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq);
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura);
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
@@ -1094,7 +1105,8 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
@@ -1175,4 +1187,5 @@ static inline int mcam_entry_cmp(const void *a, const void *b)
dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
struct sk_buff *skb, int seg, int *len);
void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
+int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 2d53dc77ef1e..010385b29988 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -910,8 +910,12 @@ static int otx2_get_rxfh(struct net_device *dev,
return -ENOENT;
if (indir) {
- for (idx = 0; idx < rss->rss_size; idx++)
+ for (idx = 0; idx < rss->rss_size; idx++) {
+ /* Ignore if the rx queue is AF_XDP zero copy enabled */
+ if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ continue;
indir[idx] = rss_ctx->ind_tbl[idx];
+ }
}
if (rxfh->key)
memcpy(rxfh->key, rss->key, sizeof(rss->key));
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e1dde93e8af8..cfed9ec5b157 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -27,6 +27,7 @@
#include "qos.h"
#include <rvu_trace.h>
#include "cn10k_ipsec.h"
+#include "otx2_xsk.h"
#define DRV_NAME "rvu_nicpf"
#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
@@ -1662,9 +1663,7 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
- struct otx2_pool *pool;
struct msg_req *req;
- int pool_id;
int qidx;
/* Ensure all SQE are processed */
@@ -1705,13 +1704,6 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
/* Free RQ buffer pointers*/
otx2_free_aura_ptr(pf, AURA_NIX_RQ);
- for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
- pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
- pool = &pf->qset.pool[pool_id];
- page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
- }
-
otx2_free_cq_res(pf);
/* Free all ingress bandwidth profiles allocated */
@@ -2691,7 +2683,6 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
int qidx)
{
- struct page *page;
u64 dma_addr;
int err = 0;
@@ -2701,11 +2692,11 @@ static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
if (dma_mapping_error(pf->dev, dma_addr))
return -ENOMEM;
- err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ err = otx2_xdp_sq_append_pkt(pf, xdpf, dma_addr, xdpf->len,
+ qidx, OTX2_XDP_REDIRECT);
if (!err) {
otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
- page = virt_to_page(xdpf->data);
- put_page(page);
+ xdp_return_frame(xdpf);
return -ENOMEM;
}
return 0;
@@ -2789,6 +2780,8 @@ static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return otx2_xdp_setup(pf, xdp->prog);
+ case XDP_SETUP_XSK_POOL:
+ return otx2_xsk_pool_setup(pf, xdp->xsk.pool, xdp->xsk.queue_id);
default:
return -EINVAL;
}
@@ -2866,6 +2859,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
.ndo_bpf = otx2_xdp,
+ .ndo_xsk_wakeup = otx2_xsk_wakeup,
.ndo_xdp_xmit = otx2_xdp_xmit,
.ndo_setup_tc = otx2_setup_tc,
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
@@ -3204,16 +3198,28 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
+ pf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!pf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_sriov_cleannup;
+ }
+
#ifdef CONFIG_DCB
err = otx2_dcbnl_set_ops(netdev);
if (err)
- goto err_pf_sriov_init;
+ goto err_free_zc_bmap;
#endif
otx2_qos_init(pf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(pf->af_xdp_zc_qidx);
+#endif
+err_sriov_cleannup:
+ otx2_sriov_vfcfg_cleanup(pf);
err_pf_sriov_init:
otx2_shutdown_tc(pf);
err_mcam_flow_del:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 224cef938927..af8cabe828d0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -12,6 +12,7 @@
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
+#include <net/xdp.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -19,6 +20,7 @@
#include "otx2_txrx.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "otx2_xsk.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
#define PTP_PORT 0x13F
@@ -29,11 +31,17 @@
DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+static int otx2_get_free_sqe(struct otx2_snd_queue *sq)
+{
+ return (sq->cons_head - sq->head - 1 + sq->sqe_cnt)
+ & (sq->sqe_cnt - 1);
+}
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
- bool *need_xdp_flush);
+ u32 *metasize, bool *need_xdp_flush);
static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq,
struct sk_buff *skb)
@@ -96,20 +104,22 @@ static unsigned int frag_num(unsigned int i)
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_snd_queue *sq,
- struct nix_cqe_tx_s *cqe)
+ struct nix_cqe_tx_s *cqe,
+ int *xsk_frames)
{
struct nix_send_comp_s *snd_comp = &cqe->comp;
struct sg_list *sg;
- struct page *page;
- u64 pa;
sg = &sq->sg[snd_comp->sqe_id];
+ if (sg->flags & OTX2_AF_XDP_FRAME) {
+ (*xsk_frames)++;
+ return;
+ }
- pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
- otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
- sg->size[0], DMA_TO_DEVICE);
- page = virt_to_page(phys_to_virt(pa));
- put_page(page);
+ if (sg->flags & OTX2_XDP_REDIRECT)
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0], sg->size[0], DMA_TO_DEVICE);
+ xdp_return_frame((struct xdp_frame *)sg->skb);
+ sg->skb = (u64)NULL;
}
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
@@ -326,6 +336,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_rx_sg_s *sg = &cqe->sg;
struct sk_buff *skb = NULL;
void *end, *start;
+ u32 metasize = 0;
u64 *seg_addr;
u16 *seg_size;
int seg;
@@ -336,7 +347,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
}
if (pfvf->xdp_prog)
- if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq,
+ &metasize, need_xdp_flush))
return;
skb = napi_get_frags(napi);
@@ -368,6 +380,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
skb->mark = parse->match_id;
skb_mark_for_recycle(skb);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
napi_gro_frags(napi);
}
@@ -431,6 +445,18 @@ int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
return cnt - cq->pool_ptrs;
}
+static void otx2_zc_submit_pkts(struct otx2_nic *pfvf, struct xsk_buff_pool *xsk_pool,
+ int *xsk_frames, int qidx, int budget)
+{
+ if (*xsk_frames)
+ xsk_tx_completed(xsk_pool, *xsk_frames);
+
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
+
+ otx2_zc_napi_handler(pfvf, xsk_pool, qidx, budget);
+}
+
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
@@ -439,16 +465,22 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct nix_cqe_tx_s *cqe;
struct net_device *ndev;
int processed_cqe = 0;
+ int xsk_frames = 0;
+
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ sq = &pfvf->qset.sq[qidx];
if (cq->pend_cqe >= budget)
goto process_cqe;
- if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) {
+ if (sq->xsk_pool)
+ otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames,
+ qidx, budget);
return 0;
+ }
process_cqe:
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
- sq = &pfvf->qset.sq[qidx];
while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
@@ -458,10 +490,8 @@ process_cqe:
break;
}
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
-
if (cq->cq_type == CQ_XDP)
- otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
+ otx2_xdp_snd_pkt_handler(pfvf, sq, cqe, &xsk_frames);
else
otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
cqe, budget, &tx_pkts, &tx_bytes);
@@ -502,6 +532,10 @@ process_cqe:
netif_carrier_ok(ndev))
netif_tx_wake_queue(txq);
}
+
+ if (sq->xsk_pool)
+ otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames, qidx, budget);
+
return 0;
}
@@ -527,9 +561,10 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
struct otx2_cq_queue *rx_cq = NULL;
+ struct otx2_cq_queue *cq = NULL;
+ struct otx2_pool *pool = NULL;
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
- struct otx2_cq_queue *cq;
struct otx2_qset *qset;
struct otx2_nic *pfvf;
int filled_cnt = -1;
@@ -554,6 +589,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (rx_cq && rx_cq->pool_ptrs)
filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
+
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -566,20 +602,31 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+ if (likely(cq))
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
if (unlikely(!filled_cnt)) {
struct refill_work *work;
struct delayed_work *dwork;
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- work->napi = napi;
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
+ if (likely(cq)) {
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ work->napi = napi;
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ /* Call wake-up for not able to fill buffers */
+ if (pool->xsk_pool)
+ xsk_set_rx_need_wakeup(pool->xsk_pool);
}
} else {
+ /* Clear wake-up, since buffers are filled successfully */
+ if (pool && pool->xsk_pool)
+ xsk_clear_rx_need_wakeup(pool->xsk_pool);
/* Re-enable interrupts */
otx2_write64(pfvf,
NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
@@ -1147,7 +1194,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
/* Check if there is enough room between producer
* and consumer index.
*/
- free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
+ free_desc = otx2_get_free_sqe(sq);
if (free_desc < sq->sqe_thresh)
return false;
@@ -1230,15 +1277,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q
u16 pool_id;
u64 iova;
- if (pfvf->xdp_prog)
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+ if (pfvf->xdp_prog) {
+ if (pool->page_pool)
+ xdp_rxq_info_unreg_mem_model(&cq->xdp_rxq);
+
xdp_rxq_info_unreg(&cq->xdp_rxq);
+ }
if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
return;
- pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
- pool = &pfvf->qset.pool[pool_id];
-
while (cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
@@ -1359,8 +1410,9 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf)
}
}
-static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
- int len, int *offset)
+static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq,
+ struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags)
{
struct nix_sqe_sg_s *sg = NULL;
u64 *iova = NULL;
@@ -1377,16 +1429,34 @@ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
sq->sg[sq->head].dma_addr[0] = dma_addr;
sq->sg[sq->head].size[0] = len;
sq->sg[sq->head].num_segs = 1;
+ sq->sg[sq->head].flags = flags;
+ sq->sg[sq->head].skb = (u64)xdpf;
}
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx)
+{
+ struct otx2_snd_queue *sq;
+ int free_sqe;
+
+ sq = &pfvf->qset.sq[qidx];
+ free_sqe = otx2_get_free_sqe(sq);
+ if (free_sqe < sq->sqe_thresh) {
+ netdev_warn(pfvf->netdev, "No free sqe for Send queue%d\n", qidx);
+ return 0;
+ }
+
+ return free_sqe - sq->sqe_thresh;
+}
+
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags)
{
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_snd_queue *sq;
int offset, free_sqe;
sq = &pfvf->qset.sq[qidx];
- free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ free_sqe = otx2_get_free_sqe(sq);
if (free_sqe < sq->sqe_thresh)
return false;
@@ -1405,7 +1475,7 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
offset = sizeof(*sqe_hdr);
- otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ otx2_xdp_sqe_add_sg(sq, xdpf, iova, len, &offset, flags);
sqe_hdr->sizem1 = (offset / 16) - 1;
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
@@ -1416,16 +1486,30 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
- bool *need_xdp_flush)
+ u32 *metasize, bool *need_xdp_flush)
{
+ struct xdp_buff xdp, *xsk_buff = NULL;
unsigned char *hard_start;
+ struct otx2_pool *pool;
+ struct xdp_frame *xdpf;
int qidx = cq->cq_idx;
- struct xdp_buff xdp;
struct page *page;
u64 iova, pa;
u32 act;
int err;
+ pool = &pfvf->qset.pool[qidx];
+
+ if (pool->xsk_pool) {
+ xsk_buff = pool->xdp[--cq->rbpool->xdp_top];
+ if (!xsk_buff)
+ return false;
+
+ xsk_buff->data_end = xsk_buff->data + cqe->sg.seg_size;
+ act = bpf_prog_run_xdp(prog, xsk_buff);
+ goto handle_xdp_verdict;
+ }
+
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_page(phys_to_virt(pa));
@@ -1434,41 +1518,64 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
hard_start = (unsigned char *)phys_to_virt(pa);
xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
- cqe->sg.seg_size, false);
+ cqe->sg.seg_size, true);
act = bpf_prog_run_xdp(prog, &xdp);
+handle_xdp_verdict:
switch (act) {
case XDP_PASS:
+ *metasize = xdp.data - xdp.data_meta;
break;
case XDP_TX:
qidx += pfvf->hw.tx_queues;
cq->pool_ptrs++;
- return otx2_xdp_sq_append_pkt(pfvf, iova,
- cqe->sg.seg_size, qidx);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ return otx2_xdp_sq_append_pkt(pfvf, xdpf,
+ cqe->sg.seg_addr,
+ cqe->sg.seg_size,
+ qidx, OTX2_XDP_TX);
case XDP_REDIRECT:
cq->pool_ptrs++;
- err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
+ if (xsk_buff) {
+ err = xdp_do_redirect(pfvf->netdev, xsk_buff, prog);
+ if (!err) {
+ *need_xdp_flush = true;
+ return true;
+ }
+ return false;
+ }
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
+ err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
if (!err) {
*need_xdp_flush = true;
return true;
}
- put_page(page);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ xdp_return_frame(xdpf);
break;
default:
bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
break;
case XDP_ABORTED:
+ if (xsk_buff)
+ xsk_buff_free(xsk_buff);
trace_xdp_exception(pfvf->netdev, prog, act);
break;
case XDP_DROP:
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
- put_page(page);
cq->pool_ptrs++;
+ if (xsk_buff) {
+ xsk_buff_free(xsk_buff);
+ } else if (page->pp) {
+ page_pool_recycle_direct(pool->page_pool, page);
+ } else {
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
return true;
}
return false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index d23810963fdb..acf259d72008 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -12,6 +12,7 @@
#include <linux/iommu.h>
#include <linux/if_vlan.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
#define LBK_CHAN_BASE 0x000
#define SDP_CHAN_BASE 0x700
@@ -76,6 +77,7 @@ struct otx2_rcv_queue {
struct sg_list {
u16 num_segs;
+ u16 flags;
u64 skb;
u64 size[OTX2_MAX_FRAGS_IN_SQE];
u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
@@ -104,6 +106,8 @@ struct otx2_snd_queue {
/* SQE ring and CPT response queue for Inline IPSEC */
struct qmem *sqe_ring;
struct qmem *cpt_resp;
+ /* Buffer pool for af_xdp zero-copy */
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_aligned_in_smp;
enum cq_type {
@@ -127,7 +131,11 @@ struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
struct page_pool *page_pool;
+ struct xsk_buff_pool *xsk_pool;
+ struct xdp_buff **xdp;
+ u16 xdp_cnt;
u16 rbsize;
+ u16 xdp_top;
};
struct otx2_cq_queue {
@@ -144,6 +152,7 @@ struct otx2_cq_queue {
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
+ bool xsk_zc_en;
struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index e926c6ce96cf..7ef3ba477d49 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -722,15 +722,27 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_shutdown_tc;
+ vf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!vf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_unreg_devlink;
+ }
+
#ifdef CONFIG_DCB
err = otx2_dcbnl_set_ops(netdev);
if (err)
- goto err_shutdown_tc;
+ goto err_free_zc_bmap;
#endif
otx2_qos_init(vf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(vf->af_xdp_zc_qidx);
+#endif
+err_unreg_devlink:
+ otx2_unregister_dl(vf);
err_shutdown_tc:
otx2_shutdown_tc(vf);
err_unreg_netdev:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
new file mode 100644
index 000000000000..ce10caea8511
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/bpf_trace.h>
+#include <linux/stringify.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "otx2_common.h"
+#include "otx2_xsk.h"
+
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx)
+{
+ struct xdp_buff *xdp;
+ int delta;
+
+ xdp = xsk_buff_alloc(pool->xsk_pool);
+ if (!xdp)
+ return -ENOMEM;
+
+ pool->xdp[pool->xdp_top++] = xdp;
+ *dma = OTX2_DATA_ALIGN(xsk_buff_xdp_get_dma(xdp));
+ /* Adjust xdp->data for unaligned addresses */
+ delta = *dma - xsk_buff_xdp_get_dma(xdp);
+ xdp->data += delta;
+
+ return 0;
+}
+
+static int otx2_xsk_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_rq_aq;
+ struct npa_aq_enq_req *aura_aq;
+ struct npa_aq_enq_req *pool_aq;
+ struct nix_aq_enq_req *rq_aq;
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_rq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_rq_aq)
+ return -ENOMEM;
+ cn10k_rq_aq->qidx = qidx;
+ cn10k_rq_aq->rq.ena = 0;
+ cn10k_rq_aq->rq_mask.ena = 1;
+ cn10k_rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ cn10k_rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ } else {
+ rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!rq_aq)
+ return -ENOMEM;
+ rq_aq->qidx = qidx;
+ rq_aq->sq.ena = 0;
+ rq_aq->sq_mask.ena = 1;
+ rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ }
+
+ aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aura_aq)
+ goto fail;
+
+ aura_aq->aura_id = aura_id;
+ aura_aq->aura.ena = 0;
+ aura_aq->aura_mask.ena = 1;
+ aura_aq->ctype = NPA_AQ_CTYPE_AURA;
+ aura_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!pool_aq)
+ goto fail;
+
+ pool_aq->aura_id = aura_id;
+ pool_aq->pool.ena = 0;
+ pool_aq->pool_mask.ena = 1;
+
+ pool_aq->ctype = NPA_AQ_CTYPE_POOL;
+ pool_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ return -ENOMEM;
+}
+
+static void otx2_clean_up_rq(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
+ u64 iova;
+
+ /* If the DOWN flag is set SQs are already freed */
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return;
+
+ cq = &qset->cq[qidx];
+ if (cq)
+ otx2_cleanup_rx_cqes(pfvf, cq, qidx);
+
+ pool = &pfvf->qset.pool[qidx];
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ while (iova) {
+ iova -= OTX2_HEAD_ROOM;
+ otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ otx2_xsk_ctx_disable(pfvf, qidx, qidx);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ u16 rx_queues = pf->hw.rx_queues;
+ u16 tx_queues = pf->hw.tx_queues;
+ int err;
+
+ if (qidx >= rx_queues || qidx >= tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, pf->dev, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ if (err)
+ return err;
+
+ set_bit(qidx, pf->af_xdp_zc_qidx);
+ otx2_clean_up_rq(pf, qidx);
+ /* Reconfigure RSS table as 'qidx' cannot be part of RSS now */
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP);
+ /* Kick start the NAPI context so that receiving will start */
+ return otx2_xsk_wakeup(pf->netdev, qidx, XDP_WAKEUP_RX);
+}
+
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qidx)
+{
+ struct net_device *netdev = pf->netdev;
+ struct xsk_buff_pool *pool;
+ struct otx2_snd_queue *sq;
+
+ pool = xsk_get_pool_from_qid(netdev, qidx);
+ if (!pool)
+ return -EINVAL;
+
+ sq = &pf->qset.sq[qidx + pf->hw.tx_queues];
+ sq->xsk_pool = NULL;
+ otx2_clean_up_rq(pf, qidx);
+ clear_bit(qidx, pf->af_xdp_zc_qidx);
+ xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ /* Reconfigure RSS table as 'qidx' now need to be part of RSS now */
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP);
+
+ return 0;
+}
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ if (pool)
+ return otx2_xsk_pool_enable(pf, pool, qidx);
+
+ return otx2_xsk_pool_disable(pf, qidx);
+}
+
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(dev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return -ENETDOWN;
+
+ if (queue_id >= pf->hw.rx_queues || queue_id >= pf->hw.tx_queues)
+ return -EINVAL;
+
+ cq_poll = &qset->napi[queue_id];
+ if (!cq_poll)
+ return -EINVAL;
+
+ /* Trigger interrupt */
+ if (!napi_if_scheduled_mark_missed(&cq_poll->napi)) {
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0));
+ otx2_write64(pf, NIX_LF_CINTX_INT_W1S(cq_poll->cint_idx), BIT_ULL(0));
+ }
+
+ return 0;
+}
+
+void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx)
+{
+ if (test_bit(qidx, pfvf->af_xdp_zc_qidx))
+ sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx);
+}
+
+void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
+ int queue, int budget)
+{
+ struct xdp_desc *xdp_desc = pool->tx_descs;
+ int err, i, work_done = 0, batch;
+
+ budget = min(budget, otx2_read_free_sqe(pfvf, queue));
+ batch = xsk_tx_peek_release_desc_batch(pool, budget);
+ if (!batch)
+ return;
+
+ for (i = 0; i < batch; i++) {
+ dma_addr_t dma_addr;
+
+ dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr);
+ err = otx2_xdp_sq_append_pkt(pfvf, NULL, dma_addr, xdp_desc[i].len,
+ queue, OTX2_AF_XDP_FRAME);
+ if (!err) {
+ netdev_err(pfvf->netdev, "AF_XDP: Unable to transfer packet err%d\n", err);
+ break;
+ }
+ work_done++;
+ }
+
+ if (work_done)
+ xsk_tx_release(pool);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
new file mode 100644
index 000000000000..8047fafee8fe
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef OTX2_XSK_H
+#define OTX2_XSK_H
+
+struct otx2_nic;
+struct xsk_buff_pool;
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qid);
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx);
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
+ int queue, int budget);
+void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx);
+
+#endif /* OTX2_XSK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
index 9d887bfc3108..c5dbae0e513b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -82,7 +82,7 @@ static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
if (err)
goto sqb_free;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 440a4c42b405..71ffb55d1fc4 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -396,7 +396,6 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
continue;
port->phylink_pcs.ops = &prestera_pcs_ops;
- port->phylink_pcs.neg_mode = true;
port->phy_config.dev = &port->dev->dev;
port->phy_config.type = PHYLINK_NETDEV;
@@ -635,7 +634,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_dl_port_register;
dev->features |= NETIF_F_HW_TC;
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
SET_NETDEV_DEV(dev, sw->dev->dev);
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 95c4405b7d7b..7bfd3f230ff5 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -7,14 +7,6 @@ config NET_VENDOR_MEDIATEK
if NET_VENDOR_MEDIATEK
-config NET_AIROHA
- tristate "Airoha SoC Gigabit Ethernet support"
- depends on NET_DSA || !NET_DSA
- select PAGE_POOL
- help
- This driver supports the gigabit ethernet MACs in the
- Airoha SoC family.
-
config NET_MEDIATEK_SOC_WED
depends on ARCH_MEDIATEK || COMPILE_TEST
def_bool NET_MEDIATEK_SOC != n
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index ddbb7f4a516c..03e008fbc859 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -11,4 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
endif
obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
-obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 53485142938c..43197b28b3e7 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -815,12 +815,60 @@ static void mtk_mac_link_up(struct phylink_config *config,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
+static void mtk_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+
+ mtk_m32(eth, MAC_MCR_EEE100M | MAC_MCR_EEE1G, 0, MTK_MAC_MCR(mac->id));
+}
+
+static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 val;
+
+ /* Tx idle timer in ms */
+ timer = DIV_ROUND_UP(timer, 1000);
+
+ /* If the timer is zero, then set LPI_MODE, which allows the
+ * system to enter LPI mode immediately rather than waiting for
+ * the LPI threshold.
+ */
+ if (!timer)
+ val = MAC_EEE_LPI_MODE;
+ else if (FIELD_FIT(MAC_EEE_LPI_TXIDLE_THD, timer))
+ val = FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD, timer);
+ else
+ val = MAC_EEE_LPI_TXIDLE_THD;
+
+ if (tx_clk_stop)
+ val |= MAC_EEE_CKG_TXIDLE;
+
+ /* PHY Wake-up time, this field does not have a reset value, so use the
+ * reset value from MT7531 (36us for 100M and 17us for 1000M).
+ */
+ val |= FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 17) |
+ FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 36);
+
+ mtk_w32(eth, val, MTK_MAC_EEECR(mac->id));
+ mtk_m32(eth, 0, MAC_MCR_EEE100M | MAC_MCR_EEE1G, MTK_MAC_MCR(mac->id));
+
+ return 0;
+}
+
static const struct phylink_mac_ops mtk_phylink_ops = {
.mac_select_pcs = mtk_mac_select_pcs,
.mac_config = mtk_mac_config,
.mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
.mac_link_up = mtk_mac_link_up,
+ .mac_disable_tx_lpi = mtk_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mtk_mac_enable_tx_lpi,
};
static int mtk_mdio_init(struct mtk_eth *eth)
@@ -830,17 +878,12 @@ static int mtk_mdio_init(struct mtk_eth *eth)
int ret;
u32 val;
- mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
+ mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
dev_err(eth->dev, "no %s child node found", "mdio-bus");
return -ENODEV;
}
- if (!of_device_is_available(mii_np)) {
- ret = -ENODEV;
- goto err_put_node;
- }
-
eth->mii_bus = devm_mdiobus_alloc(eth->dev);
if (!eth->mii_bus) {
ret = -ENOMEM;
@@ -2079,7 +2122,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (ring->page_pool) {
struct page *page = virt_to_head_page(data);
struct xdp_buff xdp;
- u32 ret;
+ u32 ret, metasize;
new_data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr,
@@ -2095,7 +2138,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
- false);
+ true);
xdp_buff_clear_frags_flag(&xdp);
ret = mtk_xdp_run(eth, ring, &xdp, netdev);
@@ -2115,6 +2158,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
+ metasize = xdp.data - xdp.data_meta;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb_mark_for_recycle(skb);
} else {
if (ring->frag_size <= PAGE_SIZE)
@@ -4474,6 +4520,20 @@ static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
return phylink_ethtool_set_pauseparam(mac->phylink, pause);
}
+static int mtk_get_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_get_eee(mac->phylink, eee);
+}
+
+static int mtk_set_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_set_eee(mac->phylink, eee);
+}
+
static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -4506,6 +4566,8 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.set_pauseparam = mtk_set_pauseparam,
.get_rxnfc = mtk_get_rxnfc,
.set_rxnfc = mtk_set_rxnfc,
+ .get_eee = mtk_get_eee,
+ .set_eee = mtk_set_eee,
};
static const struct net_device_ops mtk_netdev_ops = {
@@ -4615,6 +4677,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.type = PHYLINK_NETDEV;
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+ mac->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD |
+ MAC_2500FD;
+ mac->phylink_config.lpi_timer_default = 1000;
/* MT7623 gmac0 is now missing its speed-specific PLL configuration
* in its .mac_config method (since state->speed is not valid there.
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 0d5225f1d3ee..90a377ab4359 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -453,6 +453,8 @@
#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12)
#define MAC_MCR_BACKOFF_EN BIT(9)
#define MAC_MCR_BACKPR_EN BIT(8)
+#define MAC_MCR_EEE1G BIT(7)
+#define MAC_MCR_EEE100M BIT(6)
#define MAC_MCR_FORCE_RX_FC BIT(5)
#define MAC_MCR_FORCE_TX_FC BIT(4)
#define MAC_MCR_SPEED_1000 BIT(3)
@@ -461,6 +463,15 @@
#define MAC_MCR_FORCE_LINK BIT(0)
#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
+/* Mac EEE control registers */
+#define MTK_MAC_EEECR(x) (0x10104 + (x * 0x100))
+#define MAC_EEE_WAKEUP_TIME_1000 GENMASK(31, 24)
+#define MAC_EEE_WAKEUP_TIME_100 GENMASK(23, 16)
+#define MAC_EEE_LPI_TXIDLE_THD GENMASK(15, 8)
+#define MAC_EEE_CKG_TXIDLE BIT(3)
+#define MAC_EEE_CKG_RXLPI BIT(2)
+#define MAC_EEE_LPI_MODE BIT(0)
+
/* Mac status registers */
#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
#define MAC_MSR_EEE1G BIT(7)
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index f20bb390df3a..c855fb799ce1 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -34,8 +34,10 @@ struct mtk_flow_data {
u16 vlan_in;
struct {
- u16 id;
- __be16 proto;
+ struct {
+ u16 id;
+ __be16 proto;
+ } vlans[2];
u8 num;
} vlan;
struct {
@@ -349,18 +351,19 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
case FLOW_ACTION_CSUM:
break;
case FLOW_ACTION_VLAN_PUSH:
- if (data.vlan.num == 1 ||
+ if (data.vlan.num + data.pppoe.num == 2 ||
act->vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
- data.vlan.id = act->vlan.vid;
- data.vlan.proto = act->vlan.proto;
+ data.vlan.vlans[data.vlan.num].id = act->vlan.vid;
+ data.vlan.vlans[data.vlan.num].proto = act->vlan.proto;
data.vlan.num++;
break;
case FLOW_ACTION_VLAN_POP:
break;
case FLOW_ACTION_PPPOE_PUSH:
- if (data.pppoe.num == 1)
+ if (data.pppoe.num == 1 ||
+ data.vlan.num == 2)
return -EOPNOTSUPP;
data.pppoe.sid = act->pppoe.sid;
@@ -450,12 +453,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
foe.bridge.vlan = data.vlan_in;
- if (data.vlan.num == 1) {
- if (data.vlan.proto != htons(ETH_P_8021Q))
- return -EOPNOTSUPP;
+ for (i = 0; i < data.vlan.num; i++)
+ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id);
- mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
- }
if (data.pppoe.num == 1)
mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 25989c79c92e..76f202d7f055 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1427,15 +1427,10 @@ static int mtk_star_mdio_init(struct net_device *ndev)
of_node = dev->of_node;
- mdio_node = of_get_child_by_name(of_node, "mdio");
+ mdio_node = of_get_available_child_by_name(of_node, "mdio");
if (!mdio_node)
return -ENODEV;
- if (!of_device_is_available(mdio_node)) {
- ret = -ENODEV;
- goto out_put_node;
- }
-
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index b330020dc0d6..07b061a97a6e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -526,28 +526,6 @@ out:
return res;
}
-u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count)
-{
- struct mlx4_zone_entry *zone;
- int res = 0;
-
- spin_lock(&zones->lock);
-
- zone = __mlx4_find_zone_by_uid(zones, uid);
-
- if (NULL == zone) {
- res = -1;
- goto out;
- }
-
- __mlx4_free_from_zone(zone, obj, count);
-
-out:
- spin_unlock(&zones->lock);
-
- return res;
-}
-
u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count)
{
struct mlx4_zone_entry *zone;
@@ -682,9 +660,9 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
}
static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
- struct mlx4_db *db, int order)
+ struct mlx4_db *db, unsigned int order)
{
- int o;
+ unsigned int o;
int i;
for (o = order; o <= 1; ++o) {
@@ -712,7 +690,7 @@ found:
return 0;
}
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 15c57e9517e9..b33285d755b9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -48,60 +48,43 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_checksum.h>
#endif
+#include <net/page_pool/helpers.h>
#include "mlx4_en.h"
-static int mlx4_alloc_page(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_alloc *frag,
- gfp_t gfp)
-{
- struct page *page;
- dma_addr_t dma;
-
- page = alloc_page(gfp);
- if (unlikely(!page))
- return -ENOMEM;
- dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
- if (unlikely(dma_mapping_error(priv->ddev, dma))) {
- __free_page(page);
- return -ENOMEM;
- }
- frag->page = page;
- frag->dma = dma;
- frag->page_offset = priv->rx_headroom;
- return 0;
-}
-
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_desc *rx_desc,
struct mlx4_en_rx_alloc *frags,
gfp_t gfp)
{
+ dma_addr_t dma;
int i;
for (i = 0; i < priv->num_frags; i++, frags++) {
if (!frags->page) {
- if (mlx4_alloc_page(priv, frags, gfp)) {
+ frags->page = page_pool_alloc_pages(ring->pp, gfp);
+ if (!frags->page) {
ring->alloc_fail++;
return -ENOMEM;
}
+ page_pool_fragment_page(frags->page, 1);
+ frags->page_offset = priv->rx_headroom;
+
ring->rx_alloc_pages++;
}
- rx_desc->data[i].addr = cpu_to_be64(frags->dma +
- frags->page_offset);
+ dma = page_pool_get_dma_addr(frags->page);
+ rx_desc->data[i].addr = cpu_to_be64(dma + frags->page_offset);
}
return 0;
}
static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_alloc *frag)
{
- if (frag->page) {
- dma_unmap_page(priv->ddev, frag->dma,
- PAGE_SIZE, priv->dma_dir);
- __free_page(frag->page);
- }
+ if (frag->page)
+ page_pool_put_full_page(ring->pp, frag->page, false);
/* We need to clear all fields, otherwise a change of priv->log_rx_info
* could lead to see garbage later in frag->page.
*/
@@ -141,18 +124,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
(index << ring->log_stride);
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
- if (likely(ring->page_cache.index > 0)) {
- /* XDP uses a single page per frame */
- if (!frags->page) {
- ring->page_cache.index--;
- frags->page = ring->page_cache.buf[ring->page_cache.index].page;
- frags->dma = ring->page_cache.buf[ring->page_cache.index].dma;
- }
- frags->page_offset = XDP_PACKET_HEADROOM;
- rx_desc->data[0].addr = cpu_to_be64(frags->dma +
- XDP_PACKET_HEADROOM);
- return 0;
- }
return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
}
@@ -178,7 +149,7 @@ static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- mlx4_en_free_frag(priv, frags + nr);
+ mlx4_en_free_frag(priv, ring, frags + nr);
}
}
@@ -268,6 +239,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
u32 size, u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct page_pool_params pp = {};
struct mlx4_en_rx_ring *ring;
int err = -ENOMEM;
int tmp;
@@ -286,9 +258,26 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
- if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
+ pp.flags = PP_FLAG_DMA_MAP;
+ pp.pool_size = size * DIV_ROUND_UP(priv->rx_skb_size, PAGE_SIZE);
+ pp.nid = node;
+ pp.napi = &priv->rx_cq[queue_index]->napi;
+ pp.netdev = priv->dev;
+ pp.dev = &mdev->dev->persist->pdev->dev;
+ pp.dma_dir = priv->dma_dir;
+
+ ring->pp = page_pool_create(&pp);
+ if (!ring->pp)
goto err_ring;
+ if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
+ goto err_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ ring->pp);
+ if (err)
+ goto err_xdp_info;
+
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
@@ -319,6 +308,8 @@ err_info:
ring->rx_info = NULL;
err_xdp_info:
xdp_rxq_info_unreg(&ring->xdp_rxq);
+err_pp:
+ page_pool_destroy(ring->pp);
err_ring:
kfree(ring);
*pring = NULL;
@@ -409,26 +400,6 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
}
}
-/* When the rx ring is running in page-per-packet mode, a released frame can go
- * directly into a small cache, to avoid unmapping or touching the page
- * allocator. In bpf prog performance scenarios, buffers are either forwarded
- * or dropped, never converted to skbs, so every page can come directly from
- * this cache when it is sized to be a multiple of the napi budget.
- */
-bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
- struct mlx4_en_rx_alloc *frame)
-{
- struct mlx4_en_page_cache *cache = &ring->page_cache;
-
- if (cache->index >= MLX4_EN_CACHE_SIZE)
- return false;
-
- cache->buf[cache->index].page = frame->page;
- cache->buf[cache->index].dma = frame->dma;
- cache->index++;
- return true;
-}
-
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride)
@@ -445,6 +416,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
xdp_rxq_info_unreg(&ring->xdp_rxq);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
kvfree(ring->rx_info);
+ page_pool_destroy(ring->pp);
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
@@ -453,14 +425,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
- int i;
-
- for (i = 0; i < ring->page_cache.index; i++) {
- dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
- PAGE_SIZE, priv->dma_dir);
- put_page(ring->page_cache.buf[i].page);
- }
- ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
@@ -487,7 +451,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
if (unlikely(!page))
goto fail;
- dma = frags->dma;
+ dma = page_pool_get_dma_addr(page);
dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
frag_size, priv->dma_dir);
@@ -498,6 +462,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
if (frag_info->frag_stride == PAGE_SIZE / 2) {
frags->page_offset ^= PAGE_SIZE / 2;
release = page_count(page) != 1 ||
+ atomic_long_read(&page->pp_ref_count) != 1 ||
page_is_pfmemalloc(page) ||
page_to_nid(page) != numa_mem_id();
} else if (!priv->rx_headroom) {
@@ -511,10 +476,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
}
if (release) {
- dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
frags->page = NULL;
} else {
- page_ref_inc(page);
+ page_pool_ref_page(page);
}
nr++;
@@ -784,7 +748,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Get pointer to first fragment since we haven't
* skb yet and cast it to ethhdr struct
*/
- dma = frags[0].dma + frags[0].page_offset;
+ dma = page_pool_get_dma_addr(frags[0].page);
+ dma += frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
@@ -823,7 +788,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
void *orig_data;
u32 act;
- dma = frags[0].dma + frags[0].page_offset;
+ dma = page_pool_get_dma_addr(frags[0].page);
+ dma += frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma,
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
@@ -886,6 +852,7 @@ xdp_drop_no_cnt:
skb = napi_get_frags(&cq->napi);
if (unlikely(!skb))
goto next;
+ skb_mark_for_recycle(skb);
if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
u64 timestamp = mlx4_en_get_cqe_ts(cqe);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1ddb11cb25f9..87f35bcbeff8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -44,6 +44,7 @@
#include <linux/ipv6.h>
#include <linux/indirect_call_wrapper.h>
#include <net/ipv6.h>
+#include <net/page_pool/helpers.h>
#include "mlx4_en.h"
@@ -350,16 +351,10 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
- struct mlx4_en_rx_alloc frame = {
- .page = tx_info->page,
- .dma = tx_info->map0_dma,
- };
-
- if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
- dma_unmap_page(priv->ddev, tx_info->map0_dma,
- PAGE_SIZE, priv->dma_dir);
- put_page(tx_info->page);
- }
+ struct page_pool *pool = ring->recycle_ring->pp;
+
+ /* Note that napi_mode = 0 means ndo_close() path, not budget = 0 */
+ page_pool_put_full_page(pool, tx_info->page, !!napi_mode);
return tx_info->nr_txbb;
}
@@ -450,6 +445,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
if (unlikely(!priv->port_up))
return 0;
+ if (unlikely(!napi_budget) && cq->type == TX_XDP)
+ return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
@@ -1194,7 +1191,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
data = &tx_desc->data;
- dma = frame->dma;
+ dma = page_pool_get_dma_addr(frame->page);
tx_info->page = frame->page;
frame->page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index d7d856d1758a..b213094ea30f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1478,12 +1478,6 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc);
u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
int align, u32 skip_mask, u32 *puid);
-/* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator
- * <zones>.
- */
-u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones,
- u32 uid, u32 obj, u32 count);
-
/* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of
* specifying the uid when freeing an object, zone allocator could figure it by
* itself. Other parameters are similar to mlx4_zone_free.
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 28b70dcc652e..ad0d91a75184 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -247,20 +247,11 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
- dma_addr_t dma;
u32 page_offset;
};
#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
-struct mlx4_en_page_cache {
- u32 index;
- struct {
- struct page *page;
- dma_addr_t dma;
- } buf[MLX4_EN_CACHE_SIZE];
-};
-
enum {
MLX4_EN_TX_RING_STATE_RECOVERING,
};
@@ -335,14 +326,14 @@ struct mlx4_en_rx_ring {
u16 stride;
u16 log_stride;
u16 cqn; /* index of port CQ associated with this ring */
+ u8 fcs_del;
u32 prod;
u32 cons;
u32 buf_size;
- u8 fcs_del;
+ struct page_pool *pp;
void *buf;
void *rx_info;
struct bpf_prog __rcu *xdp_prog;
- struct mlx4_en_page_cache page_cache;
unsigned long bytes;
unsigned long packets;
unsigned long csum_ok;
@@ -707,8 +698,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_priv *priv, unsigned int length,
int tx_ind, bool *doorbell_pending);
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
-bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
- struct mlx4_en_rx_alloc *frame);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 4e43f4a7d246..e3d0b13c1610 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -147,26 +147,6 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
return err;
}
-int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
-{
- struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_table *table = &info->mac_table;
- int i;
-
- for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
- if (!table->refs[i])
- continue;
-
- if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
- *idx = i;
- return 0;
- }
- }
-
- return -ENOENT;
-}
-EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
-
static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
{
int i, num_eth_ports = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index ea6070180c96..6ec7d6e0181d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -31,6 +31,7 @@ config MLX5_CORE_EN
bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
select PAGE_POOL
+ select PAGE_POOL_STATS
select DIMLIB
help
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
@@ -80,8 +81,8 @@ config MLX5_BRIDGE
default y
help
mlx5 ConnectX offloads support for Ethernet Bridging (BRIDGE).
- Enable adding representors of mlx5 uplink and VF ports to Bridge and
- offloading rules for traffic between such ports. Supports VLANs (trunk and
+ Enable offloading FDB rules from a bridge device containing
+ representors of mlx5 uplink and VF ports. Supports VLANs (trunk and
access modes).
config MLX5_CLS_ACT
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e733b81e18a2..e53dbdc0a7a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -94,6 +94,11 @@ static u16 in_to_opcode(void *in)
return MLX5_GET(mbox_in, in, opcode);
}
+static u16 in_to_uid(void *in)
+{
+ return MLX5_GET(mbox_in, in, uid);
+}
+
/* Returns true for opcodes that might be triggered very frequently and throttle
* the command interface. Limit their command slots usage.
*/
@@ -823,7 +828,7 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
opcode = in_to_opcode(in);
op_mod = MLX5_GET(mbox_in, in, op_mod);
- uid = MLX5_GET(mbox_in, in, uid);
+ uid = in_to_uid(in);
status = MLX5_GET(mbox_out, out, status);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
@@ -1871,6 +1876,17 @@ static int is_manage_pages(void *in)
return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
}
+static bool mlx5_has_privileged_uid(struct mlx5_core_dev *dev)
+{
+ return !xa_empty(&dev->cmd.vars.privileged_uids);
+}
+
+static bool mlx5_cmd_is_privileged_uid(struct mlx5_core_dev *dev,
+ u16 uid)
+{
+ return !!xa_load(&dev->cmd.vars.privileged_uids, uid);
+}
+
/* Notes:
* 1. Callback functions may not sleep
* 2. Page queue commands do not support asynchrous completion
@@ -1881,7 +1897,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
{
struct mlx5_cmd_msg *inb, *outb;
u16 opcode = in_to_opcode(in);
- bool throttle_op;
+ bool throttle_locked = false;
+ bool unpriv_locked = false;
+ u16 uid = in_to_uid(in);
int pages_queue;
gfp_t gfp;
u8 token;
@@ -1890,12 +1908,17 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
return -ENXIO;
- throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
- if (throttle_op) {
- if (callback) {
- if (down_trylock(&dev->cmd.vars.throttle_sem))
- return -EBUSY;
- } else {
+ if (!callback) {
+ /* The semaphore is already held for callback commands. It was
+ * acquired in mlx5_cmd_exec_cb()
+ */
+ if (uid && mlx5_has_privileged_uid(dev)) {
+ if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
+ unpriv_locked = true;
+ down(&dev->cmd.vars.unprivileged_sem);
+ }
+ } else if (mlx5_cmd_is_throttle_opcode(opcode)) {
+ throttle_locked = true;
down(&dev->cmd.vars.throttle_sem);
}
}
@@ -1941,8 +1964,11 @@ out_out:
out_in:
free_msg(dev, inb);
out_up:
- if (throttle_op)
+ if (throttle_locked)
up(&dev->cmd.vars.throttle_sem);
+ if (unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
+
return err;
}
@@ -2104,18 +2130,22 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
struct mlx5_async_work *work = _work;
struct mlx5_async_ctx *ctx;
struct mlx5_core_dev *dev;
- u16 opcode;
+ bool throttle_locked;
+ bool unpriv_locked;
ctx = work->ctx;
dev = ctx->dev;
- opcode = work->opcode;
+ throttle_locked = work->throttle_locked;
+ unpriv_locked = work->unpriv_locked;
status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
/* Can't access "work" from this point on. It could have been freed in
* the callback.
*/
- if (mlx5_cmd_is_throttle_opcode(opcode))
+ if (throttle_locked)
up(&dev->cmd.vars.throttle_sem);
+ if (unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
}
@@ -2124,6 +2154,8 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
void *out, int out_size, mlx5_async_cbk_t callback,
struct mlx5_async_work *work)
{
+ struct mlx5_core_dev *dev = ctx->dev;
+ u16 uid;
int ret;
work->ctx = ctx;
@@ -2131,11 +2163,43 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->opcode = in_to_opcode(in);
work->op_mod = MLX5_GET(mbox_in, in, op_mod);
work->out = out;
+ work->throttle_locked = false;
+ work->unpriv_locked = false;
+ uid = in_to_uid(in);
+
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;
- ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
+
+ if (uid && mlx5_has_privileged_uid(dev)) {
+ if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
+ if (down_trylock(&dev->cmd.vars.unprivileged_sem)) {
+ ret = -EBUSY;
+ goto dec_num_inflight;
+ }
+ work->unpriv_locked = true;
+ }
+ } else if (mlx5_cmd_is_throttle_opcode(in_to_opcode(in))) {
+ if (down_trylock(&dev->cmd.vars.throttle_sem)) {
+ ret = -EBUSY;
+ goto dec_num_inflight;
+ }
+ work->throttle_locked = true;
+ }
+
+ ret = cmd_exec(dev, in, in_size, out, out_size,
mlx5_cmd_exec_cb_handler, work, false);
- if (ret && atomic_dec_and_test(&ctx->num_inflight))
+ if (ret)
+ goto sem_up;
+
+ return 0;
+
+sem_up:
+ if (work->throttle_locked)
+ up(&dev->cmd.vars.throttle_sem);
+ if (work->unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
+dec_num_inflight:
+ if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
return ret;
@@ -2371,10 +2435,16 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
sema_init(&cmd->vars.pages_sem, 1);
sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+ sema_init(&cmd->vars.unprivileged_sem,
+ DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+
+ xa_init(&cmd->vars.privileged_uids);
cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
- if (!cmd->pool)
- return -ENOMEM;
+ if (!cmd->pool) {
+ err = -ENOMEM;
+ goto err_destroy_xa;
+ }
err = alloc_cmd_page(dev, cmd);
if (err)
@@ -2408,6 +2478,8 @@ err_cmd_page:
free_cmd_page(dev, cmd);
err_free_pool:
dma_pool_destroy(cmd->pool);
+err_destroy_xa:
+ xa_destroy(&dev->cmd.vars.privileged_uids);
return err;
}
@@ -2420,6 +2492,7 @@ void mlx5_cmd_disable(struct mlx5_core_dev *dev)
destroy_msg_cache(dev);
free_cmd_page(dev, cmd);
dma_pool_destroy(cmd->pool);
+ xa_destroy(&dev->cmd.vars.privileged_uids);
}
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
@@ -2427,3 +2500,18 @@ void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
{
dev->cmd.state = cmdif_state;
}
+
+int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
+{
+ return xa_insert(&dev->cmd.vars.privileged_uids, uid,
+ xa_mk_value(uid), GFP_KERNEL);
+}
+EXPORT_SYMBOL(mlx5_cmd_add_privileged_uid);
+
+void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
+{
+ void *data = xa_erase(&dev->cmd.vars.privileged_uids, uid);
+
+ WARN(!data, "Privileged UID %u does not exist\n", uid);
+}
+EXPORT_SYMBOL(mlx5_cmd_remove_privileged_uid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 9a79674d27f1..891bbbbfbbf1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -228,8 +228,15 @@ enum {
MLX5_INTERFACE_PROTOCOL_VNET,
MLX5_INTERFACE_PROTOCOL_DPLL,
+ MLX5_INTERFACE_PROTOCOL_FWCTL,
};
+static bool is_fwctl_supported(struct mlx5_core_dev *dev)
+{
+ /* fwctl is most useful on PFs, prevent fwctl on SFs for now */
+ return MLX5_CAP_GEN(dev, uctx_cap) && !mlx5_core_is_sf(dev);
+}
+
static const struct mlx5_adev_device {
const char *suffix;
bool (*is_supported)(struct mlx5_core_dev *dev);
@@ -252,6 +259,8 @@ static const struct mlx5_adev_device {
.is_supported = &is_mp_supported },
[MLX5_INTERFACE_PROTOCOL_DPLL] = { .suffix = "dpll",
.is_supported = &is_dpll_supported },
+ [MLX5_INTERFACE_PROTOCOL_FWCTL] = { .suffix = "fwctl",
+ .is_supported = &is_fwctl_supported },
};
int mlx5_adev_idx_alloc(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 98d4306929f3..73cd74644378 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -46,6 +46,9 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
u32 running_fw, stored_fw;
int err;
+ if (!mlx5_core_is_pf(dev))
+ return 0;
+
err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
if (err)
return err;
@@ -324,7 +327,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
.rate_node_tx_max_set = mlx5_esw_devlink_rate_node_tx_max_set,
.rate_node_new = mlx5_esw_devlink_rate_node_new,
.rate_node_del = mlx5_esw_devlink_rate_node_del,
- .rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set,
+ .rate_leaf_parent_set = mlx5_esw_devlink_rate_leaf_parent_set,
+ .rate_node_parent_set = mlx5_esw_devlink_rate_node_parent_set,
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
.port_new = mlx5_devlink_sf_port_new,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
index c7216e84ef8c..86253a89c24c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
@@ -13,6 +13,50 @@ struct mlx5_vnic_diag_stats {
__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
};
+static void mlx5_reporter_vnic_diagnose_counter_icm(struct mlx5_core_dev *dev,
+ struct devlink_fmsg *fmsg,
+ u16 vport_num, bool other_vport)
+{
+ u32 out_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
+ u32 in_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
+ u32 out_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
+ u32 in_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
+ u32 cur_alloc_icm;
+ int vhca_icm_ctrl;
+ u16 vhca_id;
+ int err;
+
+ err = mlx5_core_access_reg(dev, in_reg, sizeof(in_reg), out_reg,
+ sizeof(out_reg), MLX5_REG_NIC_CAP, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "Reading nic_cap_reg failed. err = %d\n", err);
+ return;
+ }
+ vhca_icm_ctrl = MLX5_GET(nic_cap_reg, out_reg, vhca_icm_ctrl);
+ if (!vhca_icm_ctrl)
+ return;
+
+ MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id_valid, other_vport);
+ if (other_vport) {
+ err = mlx5_vport_get_vhca_id(dev, vport_num, &vhca_id);
+ if (err) {
+ mlx5_core_warn(dev, "vport to vhca_id failed. vport_num = %d, err = %d\n",
+ vport_num, err);
+ return;
+ }
+ MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id, vhca_id);
+ }
+ err = mlx5_core_access_reg(dev, in_icm_reg, sizeof(in_icm_reg),
+ out_icm_reg, sizeof(out_icm_reg),
+ MLX5_REG_VHCA_ICM_CTRL, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "Reading vhca_icm_ctrl failed. err = %d\n", err);
+ return;
+ }
+ cur_alloc_icm = MLX5_GET(vhca_icm_ctrl_reg, out_icm_reg, cur_alloc_icm);
+ devlink_fmsg_u32_pair_put(fmsg, "icm_consumption", cur_alloc_icm);
+}
+
void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
struct devlink_fmsg *fmsg,
u16 vport_num, bool other_vport)
@@ -59,6 +103,8 @@ void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
}
+ if (MLX5_CAP_GEN(dev, nic_cap_reg))
+ mlx5_reporter_vnic_diagnose_counter_icm(dev, fmsg, vport_num, other_vport);
devlink_fmsg_obj_nest_end(fmsg);
devlink_fmsg_pair_nest_end(fmsg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 31142f6cc372..1e5522a19483 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -242,7 +242,7 @@ static int mlx5_dpll_clock_quality_level_get(const struct dpll_device *dpll,
return 0;
}
errout:
- NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 979fc56205e1..32ed4963b8ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -95,8 +95,6 @@ struct page_pool;
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
-#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
-
/* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
* These are theoretical maximums, which can be further restricted by
* capabilities. These values are used for static resource allocations and
@@ -232,16 +230,22 @@ struct mlx5e_rx_wqe_cyc {
DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data);
};
-struct mlx5e_umr_wqe {
+struct mlx5e_umr_wqe_hdr {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
+};
+
+struct mlx5e_umr_wqe {
+ struct mlx5e_umr_wqe_hdr hdr;
union {
DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms);
};
};
+static_assert(offsetof(struct mlx5e_umr_wqe, inline_mtts) == sizeof(struct mlx5e_umr_wqe_hdr),
+ "struct members should be included in struct mlx5e_umr_wqe_hdr, not in struct mlx5e_umr_wqe");
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER,
@@ -386,7 +390,6 @@ enum {
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
- MLX5E_SQ_STATE_XDP_MULTIBUF,
MLX5E_NUM_SQ_STATES, /* Must be kept last */
};
@@ -395,6 +398,7 @@ struct mlx5e_tx_mpwqe {
struct mlx5e_tx_wqe *wqe;
u32 bytes_count;
u8 ds_count;
+ u8 ds_count_max;
u8 pkt_count;
u8 inline_on;
};
@@ -660,7 +664,7 @@ struct mlx5e_rq {
} wqe;
struct {
struct mlx5_wq_ll wq;
- struct mlx5e_umr_wqe umr_wqe;
+ struct mlx5e_umr_wqe_hdr umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
__be32 umr_mkey_be;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 1e8b7d330701..b5c3a2a9d2a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -84,9 +84,9 @@ enum {
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
- MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
- MLX5E_ACCEL_FS_ESP_FT_LEVEL,
+ MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
+ MLX5E_ACCEL_FS_POL_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 64b62ed17b07..aa36670d9a36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -10,6 +10,9 @@
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
+#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
+#define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17
+
static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
{
u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
@@ -103,18 +106,22 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
- u8 max_pages_per_wqe, max_log_mpwqe_size;
+ u8 max_pages_per_wqe, max_log_wqe_size_calc;
+ u8 max_log_wqe_size_cap;
u16 max_wqe_size;
/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
- max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
+ max_log_wqe_size_calc = ilog2(max_pages_per_wqe) + page_shift;
+
+ WARN_ON_ONCE(max_log_wqe_size_calc < MLX5E_ORDER2_MAX_PACKET_MTU);
- WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
+ max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ?
+ MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ : MLX5_MPWRQ_MAX_LOG_WQE_SZ;
- return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
+ return min_t(u8, max_log_wqe_size_calc, max_log_wqe_size_cap);
}
u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
@@ -1240,7 +1247,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
- param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 3f8986f9d862..bd5877acc5b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -33,7 +33,6 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
bool is_mpw;
bool is_tls;
- bool is_xdp_mb;
u16 stop_room;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 5f6a0605e4ae..6049ccf475bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -80,6 +80,7 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
struct mlx5_port_eth_proto eproto;
+ const struct mlx5_link_info *info;
bool force_legacy = false;
bool ext;
int err;
@@ -94,9 +95,13 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
goto out;
}
- *speed = mlx5_port_ptys2speed(mdev, eproto.oper, force_legacy);
- if (!(*speed))
+ info = mlx5_port_ptys2info(mdev, eproto.oper, force_legacy);
+ if (!info) {
+ *speed = SPEED_UNKNOWN;
err = -EINVAL;
+ goto out;
+ }
+ *speed = info->speed;
out:
return err;
@@ -296,11 +301,16 @@ enum mlx5e_fec_supported_link_mode {
MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X,
MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X,
MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X,
MLX5E_MAX_FEC_SUPPORTED_LINK_MODE,
};
#define MLX5E_FEC_FIRST_50G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X
#define MLX5E_FEC_FIRST_100G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X
+#define MLX5E_FEC_FIRST_200G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X
#define MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, policy, write, link) \
do { \
@@ -320,8 +330,10 @@ static bool mlx5e_is_fec_supported_link_mode(struct mlx5_core_dev *dev,
return link_mode < MLX5E_FEC_FIRST_50G_PER_LANE_MODE ||
(link_mode < MLX5E_FEC_FIRST_100G_PER_LANE_MODE &&
MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm)) ||
- (link_mode >= MLX5E_FEC_FIRST_100G_PER_LANE_MODE &&
- MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm));
+ (link_mode < MLX5E_FEC_FIRST_200G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm)) ||
+ (link_mode >= MLX5E_FEC_FIRST_200G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_200G_per_lane_in_pplm));
}
/* get/set FEC admin field for a given speed */
@@ -368,6 +380,18 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 1600g_8x);
+ break;
default:
return -EINVAL;
}
@@ -421,6 +445,18 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap,
case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
*fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 1600g_8x);
+ break;
default:
return -EINVAL;
}
@@ -494,6 +530,26 @@ out:
return 0;
}
+static u16 mlx5e_remap_fec_conf_mode(enum mlx5e_fec_supported_link_mode link_mode,
+ u16 conf_fec)
+{
+ /* RS fec in ethtool is originally mapped to MLX5E_FEC_RS_528_514.
+ * For link modes up to 25G per lane, the value is kept.
+ * For 50G or 100G per lane, it's remapped to MLX5E_FEC_RS_544_514.
+ * For 200G per lane, remapped to MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD.
+ */
+ if (conf_fec != BIT(MLX5E_FEC_RS_528_514))
+ return conf_fec;
+
+ if (link_mode >= MLX5E_FEC_FIRST_200G_PER_LANE_MODE)
+ return BIT(MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD);
+
+ if (link_mode >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
+ return BIT(MLX5E_FEC_RS_544_514);
+
+ return conf_fec;
+}
+
int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
{
bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
@@ -530,14 +586,7 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
if (!mlx5e_is_fec_supported_link_mode(dev, i))
break;
- /* RS fec in ethtool is mapped to MLX5E_FEC_RS_528_514
- * to link modes up to 25G per lane and to
- * MLX5E_FEC_RS_544_514 in the new link modes based on
- * 50G or 100G per lane
- */
- if (conf_fec == (1 << MLX5E_FEC_RS_528_514) &&
- i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
- conf_fec = (1 << MLX5E_FEC_RS_544_514);
+ conf_fec = mlx5e_remap_fec_conf_mode(i, conf_fec);
mlx5e_get_fec_cap_field(out, &fec_caps, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index d1da225f35da..fa2283dd383b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -61,6 +61,7 @@ enum {
MLX5E_FEC_NOFEC,
MLX5E_FEC_FIRECODE,
MLX5E_FEC_RS_528_514,
+ MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD = 4,
MLX5E_FEC_RS_544_514 = 7,
MLX5E_FEC_LLRS_272_257_1 = 9,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index afd654583b6b..131ed97ca997 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -326,7 +326,7 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
int node;
sq->pdev = c->pdev;
- sq->clock = &mdev->clock;
+ sq->clock = mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->priv = c->priv;
@@ -696,7 +696,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
rq->pdev = c->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
- rq->clock = &mdev->clock;
+ rq->clock = mdev->clock;
rq->tstamp = &priv->tstamp;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 5d128c5b4529..0f5d7ea8956f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -48,15 +48,10 @@ mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
struct list_head *iter;
netdev_for_each_lower_dev(dev, lower, iter) {
- struct mlx5_core_dev *mdev;
- struct mlx5e_priv *priv;
-
if (!mlx5e_eswitch_rep(lower))
continue;
- priv = netdev_priv(lower);
- mdev = priv->mdev;
- if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
+ if (mlx5_esw_bridge_dev_same_esw(lower, esw))
return lower;
}
@@ -125,7 +120,7 @@ static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *
priv = netdev_priv(rep);
mdev = priv->mdev;
if (netif_is_lag_master(dev))
- return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
+ return mlx5_lag_is_master(mdev);
return true;
}
@@ -455,6 +450,9 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
if (!rep)
return NOTIFY_DONE;
+ if (netif_is_lag_master(dev) && !mlx5_lag_is_shared_fdb(esw->dev))
+ return NOTIFY_DONE;
+
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
fdb_info = container_of(info,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 25d751eba99b..e75759533ae0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -317,10 +317,8 @@ mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx
}
static void
-mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+mlx5e_rx_reporter_diagnose_common_config(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq;
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
@@ -340,20 +338,100 @@ static void mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
devlink_fmsg_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg,
- struct netlink_ext_ack *extack)
+static void mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(struct mlx5e_rx_res *rx_res,
+ struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ unsigned int max_nch = mlx5e_rx_res_get_max_nch(rx_res);
int i;
- mutex_lock(&priv->state_lock);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "Direct TIRs");
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
+ for (i = 0; i < max_nch; i++) {
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "ix", i);
+ devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rx_res_get_tirn_direct(rx_res, i));
+ devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rx_res_get_rqtn_direct(rx_res, i));
+
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(struct mlx5e_rss *rss, bool inner,
+ struct devlink_fmsg *fmsg)
+{
+ bool found_valid_tir = false;
+ int tt;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ if (!mlx5e_rss_valid_tir(rss, tt, inner))
+ continue;
+
+ if (!found_valid_tir) {
+ char *tir_msg = inner ? "Inner TIRs Numbers" : "TIRs Numbers";
+
+ found_valid_tir = true;
+ devlink_fmsg_arr_pair_nest_start(fmsg, tir_msg);
+ }
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_string_pair_put(fmsg, "tt", mlx5_ttc_get_name(tt));
+ devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rss_get_tirn(rss, tt, inner));
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+
+ if (found_valid_tir)
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss_ix(struct mlx5e_rx_res *rx_res, u32 rss_idx,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_rss *rss = mlx5e_rx_res_rss_get(rx_res, rss_idx);
+
+ if (!rss)
+ return;
+
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "Index", rss_idx);
+ devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rss_get_rqtn(rss));
+ mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, false, fmsg);
+ if (mlx5e_rss_get_inner_ft_support(rss))
+ mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, true, fmsg);
+
+ devlink_fmsg_obj_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss(struct mlx5e_rx_res *rx_res,
+ struct devlink_fmsg *fmsg)
+{
+ int rss_ix;
+
+ devlink_fmsg_arr_pair_nest_start(fmsg, "RSS");
+ for (rss_ix = 0; rss_ix < MLX5E_MAX_NUM_RSS; rss_ix++)
+ mlx5e_rx_reporter_diagnose_rx_res_rss_ix(rx_res, rss_ix, fmsg);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_rx_res *rx_res = priv->rx_res;
+
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX resources");
+ mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(rx_res, fmsg);
+ mlx5e_rx_reporter_diagnose_rx_res_rss(rx_res, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rqs(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ int i;
- mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
for (i = 0; i < priv->channels.num; i++) {
@@ -367,7 +445,24 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
}
if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
+
devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ mlx5e_rx_reporter_diagnose_common_config(priv, fmsg);
+ mlx5e_rx_reporter_diagnose_rqs(priv, fmsg);
+ mlx5e_rx_reporter_diagnose_rx_res(priv, fmsg);
unlock:
mutex_unlock(&priv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 09433b91be17..532c7fa94d17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -16,7 +16,6 @@ static const char * const sq_sw_state_type_name[] = {
[MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
- [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
};
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index 5f742f896600..74cd111ee320 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -81,6 +81,11 @@ struct mlx5e_rss {
refcount_t refcnt;
};
+bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss)
+{
+ return rss->inner_ft_support;
+}
+
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
{
rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
@@ -156,6 +161,7 @@ static void mlx5e_rss_params_init(struct mlx5e_rss *rss)
{
enum mlx5_traffic_types tt;
+ rss->hash.symmetric = true;
rss->hash.hfunc = ETH_RSS_HASH_TOP;
netdev_rss_key_fill(rss->hash.toeplitz_hash_key,
sizeof(rss->hash.toeplitz_hash_key));
@@ -449,6 +455,16 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
return mlx5e_tir_get_tirn(tir);
}
+u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss)
+{
+ return mlx5e_rqt_get_rqtn(&rss->rqt);
+}
+
+bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner)
+{
+ return !!rss_get_tir(rss, tt, inner);
+}
+
/* Fill the "tirn" output parameter.
* Create the requested TIR if it's its first usage.
*/
@@ -551,7 +567,7 @@ inner_tir:
return final_err;
}
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
+int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
{
if (indir)
memcpy(indir, rss->indir.table,
@@ -564,11 +580,14 @@ int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
if (hfunc)
*hfunc = rss->hash.hfunc;
+ if (symmetric)
+ *symmetric = rss->hash.symmetric;
+
return 0;
}
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
- const u8 *key, const u8 *hfunc,
+ const u8 *key, const u8 *hfunc, const bool *symmetric,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
bool changed_indir = false;
@@ -608,6 +627,11 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
rss->indir.actual_table_size * sizeof(*rss->indir.table));
}
+ if (symmetric) {
+ rss->hash.symmetric = *symmetric;
+ changed_hash = true;
+ }
+
if (changed_indir && rss->enabled) {
err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index d0df98963c8d..8ac902190010 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -32,8 +32,11 @@ void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss);
unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss);
+bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss);
u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner);
+bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner);
+u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss);
int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
@@ -44,9 +47,9 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss);
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
struct mlx5e_packet_merge_param *pkt_merge_param);
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
+int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bool *symmetric);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
- const u8 *key, const u8 *hfunc,
+ const u8 *key, const u8 *hfunc, const bool *symmetric,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss);
u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index a86eade9a9e0..5fcbe47337b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -5,8 +5,6 @@
#include "channels.h"
#include "params.h"
-#define MLX5E_MAX_NUM_RSS 16
-
struct mlx5e_rx_res {
struct mlx5_core_dev *mdev; /* primary */
enum mlx5e_rx_res_features features;
@@ -196,7 +194,7 @@ void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int n
}
int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc)
+ u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
{
struct mlx5e_rss *rss;
@@ -207,11 +205,12 @@ int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_get_rxfh(rss, indir, key, hfunc);
+ return mlx5e_rss_get_rxfh(rss, indir, key, hfunc, symmetric);
}
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- const u32 *indir, const u8 *key, const u8 *hfunc)
+ const u32 *indir, const u8 *key, const u8 *hfunc,
+ const bool *symmetric)
{
u32 *vhca_ids = get_vhca_ids(res, 0);
struct mlx5e_rss *rss;
@@ -223,8 +222,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, vhca_ids,
- res->rss_nch);
+ return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, symmetric,
+ res->rss_rqns, vhca_ids, res->rss_nch);
}
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
@@ -497,6 +496,11 @@ void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
mlx5e_rx_res_free(res);
}
+unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res)
+{
+ return res->max_nch;
+}
+
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
@@ -522,7 +526,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
return mlx5e_tir_get_tirn(&res->ptp.tir);
}
-static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 7b1a9f0f1874..3e09d91281af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -10,6 +10,8 @@
#include "fs.h"
#include "rss.h"
+#define MLX5E_MAX_NUM_RSS 16
+
struct mlx5e_rx_res;
struct mlx5e_channels;
@@ -34,6 +36,9 @@ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
+unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res);
+bool mlx5_rx_res_rss_inner_ft_support(struct mlx5e_rx_res *res);
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
@@ -44,9 +49,10 @@ void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *ch
/* Configuration API */
void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch);
int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc);
+ u32 *indir, u8 *key, u8 *hfunc, bool *symmetric);
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- const u32 *indir, const u8 *key, const u8 *hfunc);
+ const u32 *indir, const u8 *key, const u8 *hfunc,
+ const bool *symmetric);
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
enum mlx5_traffic_types tt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index d6c12d0ea55b..2e528b2c34d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -73,11 +73,6 @@ struct mlx5e_tc_act {
bool is_terminating_action;
};
-struct mlx5e_tc_flow_action {
- unsigned int num_entries;
- struct flow_action_entry **entries;
-};
-
extern struct mlx5e_tc_act mlx5e_tc_act_drop;
extern struct mlx5e_tc_act mlx5e_tc_act_trap;
extern struct mlx5e_tc_act mlx5e_tc_act_accept;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index feeb41693c17..b6cabe829f19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -5,6 +5,16 @@
#include "en/tc_priv.h"
#include "en/tc_ct.h"
+static bool
+tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index,
+ struct mlx5_flow_attr *attr)
+{
+ return !((act->ct.action & TCA_CT_ACT_COMMIT) &&
+ flow_action_is_last_entry(parse_state->flow_action, act));
+}
+
static int
tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -56,6 +66,7 @@ tc_act_is_missable_ct(const struct flow_action_entry *act)
}
struct mlx5e_tc_act mlx5e_tc_act_ct = {
+ .can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
.post_parse = tc_act_post_parse_ct,
.is_multi_table_act = tc_act_is_multi_table_act_ct,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index 8218c892b161..7819fb297280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -593,3 +593,8 @@ mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
*drops = packets2;
*lastuse = max_t(u64, lastuse1, lastuse2);
}
+
+int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter)
+{
+ return meter->meters_obj->base_id;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
index 9b795cd106bb..d6afb6556875 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -72,4 +72,17 @@ void
mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse);
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline int
+mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter)
+{
+ return 0;
+}
+#endif /* CONFIG_MLX5_CLS_ACT */
+
#endif /* __MLX5_EN_FLOW_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index a065e8fafb1d..81332cd4a582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -1349,6 +1349,32 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
return 0;
}
+static bool
+mlx5_tc_ct_filter_legacy_non_nic_flows(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ struct flow_match_meta match;
+ struct net_device *netdev;
+ bool same_dev = false;
+
+ if (!is_mdev_legacy_mode(ct_priv->dev) ||
+ !flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return true;
+
+ flow_rule_match_meta(rule, &match);
+
+ if (!(match.key->ingress_ifindex & match.mask->ingress_ifindex))
+ return true;
+
+ netdev = dev_get_by_index(&init_net, match.key->ingress_ifindex);
+ same_dev = ct_priv->netdev == netdev;
+ dev_put(netdev);
+
+ return same_dev;
+}
+
static int
mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
void *cb_priv)
@@ -1361,6 +1387,9 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
switch (f->command) {
case FLOW_CLS_REPLACE:
+ if (!mlx5_tc_ct_filter_legacy_non_nic_flows(ft, f))
+ return -EOPNOTSUPP;
+
return mlx5_tc_ct_block_flow_offload_add(ft, f);
case FLOW_CLS_DESTROY:
return mlx5_tc_ct_block_flow_offload_del(ft, f);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 721f35e59757..2162d776fe35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -31,8 +31,7 @@ static void mlx5e_tc_tun_route_attr_cleanup(struct mlx5e_tc_tun_route_attr *attr
{
if (attr->n)
neigh_release(attr->n);
- if (attr->route_dev)
- dev_put(attr->route_dev);
+ dev_put(attr->route_dev);
}
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
@@ -68,16 +67,14 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
* while holding rcu read lock. Take the net_device for correctness
* sake.
*/
- if (uplink_upper)
- dev_hold(uplink_upper);
+ dev_hold(uplink_upper);
rcu_read_unlock();
dst_is_lag_dev = (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
real_dev == uplink_upper &&
mlx5_lag_is_sriov(priv->mdev));
- if (uplink_upper)
- dev_put(uplink_upper);
+ dev_put(uplink_upper);
/* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index e7e01f3298ef..a0fc76a1bc08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -42,8 +42,7 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
&attr->action, out_index);
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -753,8 +752,7 @@ static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw,
}
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -788,8 +786,7 @@ static int mlx5e_update_vf_tunnel(struct mlx5_eswitch *esw,
mlx5e_tc_match_to_reg_mod_hdr_change(esw->dev, mod_hdr_acts, VPORT_TO_REG, act_id, data);
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index e4e487c8431b..5c762a71818d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -140,7 +140,7 @@ static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv,
gbp_mask = (u32 *)&enc_opts.mask->data[0];
if (*gbp_mask & ~VXLAN_GBP_MASK) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)\n", *gbp_mask);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)", *gbp_mask);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
index 11f724ad90db..19499072f67f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -124,7 +124,7 @@ void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
const size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, rss_hash->symmetric);
memcpy(rss_key, rss_hash->toeplitz_hash_key, len);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
index 857a84bcd53a..e8df3aaf6562 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
@@ -9,6 +9,7 @@
struct mlx5e_rss_params_hash {
u8 hfunc;
u8 toeplitz_hash_key[40];
+ bool symmetric;
};
struct mlx5e_rss_params_traffic_type {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 53ca16cb9c41..140606fcd23b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -46,7 +46,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
rq->pdev = t->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
- rq->clock = &mdev->clock;
+ rq->clock = mdev->clock;
rq->tstamp = &priv->tstamp;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 5ec468268d1a..e837c21d3d21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -214,6 +214,19 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+static inline u16 mlx5e_txqsq_get_next_pi_anysize(struct mlx5e_txqsq *sq,
+ u16 *size)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi, contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ *size = min_t(u16, contig_wqebbs, sq->max_sq_mpw_wqebbs);
+
+ return pi;
+}
+
void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
@@ -358,9 +371,9 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
-static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
{
- return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+ return session->ds_count == session->ds_count_max;
}
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 94b291662087..f803e1c93590 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -289,9 +289,9 @@ static u64 mlx5e_xsk_fill_timestamp(void *_priv)
ts = get_cqe_ts(priv->cqe);
if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev))
- return mlx5_real_time_cyc2time(&priv->cq->mdev->clock, ts);
+ return mlx5_real_time_cyc2time(priv->cq->mdev->clock, ts);
- return mlx5_timecounter_cyc2time(&priv->cq->mdev->clock, ts);
+ return mlx5_timecounter_cyc2time(priv->cq->mdev->clock, ts);
}
static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv)
@@ -390,6 +390,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
.wqe = wqe,
.bytes_count = 0,
.ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .ds_count_max = sq->max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS,
.pkt_count = 0,
.inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on),
};
@@ -501,7 +502,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
mlx5e_xdp_mpwqe_add_dseg(sq, p, stats);
- if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
+ if (unlikely(mlx5e_xdp_mpwqe_is_full(session)))
mlx5e_xdp_mpwqe_complete(sq);
stats->xmit++;
@@ -546,6 +547,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
bool inline_ok;
bool linear;
u16 pi;
+ int i;
struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -612,41 +614,33 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- int i;
-
- memset(&cseg->trailer, 0, sizeof(cseg->trailer));
- memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
-
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+ memset(&cseg->trailer, 0, sizeof(cseg->trailer));
+ memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
- for (i = 0; i < num_frags; i++) {
- skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
- dma_addr_t addr;
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
- page_pool_get_dma_addr(skb_frag_page(frag)) +
- skb_frag_off(frag);
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
+ dma_addr_t addr;
- dseg->addr = cpu_to_be64(addr);
- dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
- dseg->lkey = sq->mkey_be;
- dseg++;
- }
+ addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
+ page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ dseg->addr = cpu_to_be64(addr);
+ dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+ dseg->lkey = sq->mkey_be;
+ dseg++;
+ }
- sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = num_wqebbs,
- .num_pkts = 1,
- };
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->pc += num_wqebbs;
- } else {
- cseg->fm_ce_se = 0;
+ sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_pkts = 1,
+ };
- sq->pc++;
- }
+ sq->pc += num_wqebbs;
xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index e054db1e10f8..446e492c6bb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -182,13 +182,13 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
return cur;
}
-static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
{
if (session->inline_on)
return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
- max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+ session->ds_count_max;
- return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs);
+ return mlx5e_tx_mpwqe_is_full(session);
}
struct mlx5e_xdp_wqe_info {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 1b7132fa70de..2b05536d564a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -123,7 +123,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((icosq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR);
/* Optimized for speed: keep in sync with mlx5e_mpwrq_umr_entry_size. */
@@ -134,7 +134,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
offset = offset * sizeof(struct mlx5_klm) * 2 / MLX5_OCTWORD;
else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE))
offset = offset * sizeof(struct mlx5_ksm) * 4 / MLX5_OCTWORD;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
icosq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
@@ -144,7 +144,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
icosq->pc += rq->mpwqe.umr_wqebbs;
- icosq->doorbell_cseg = &umr_wqe->ctrl;
+ icosq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 9240cfe25d10..d743e823362a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -72,7 +72,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->netdev = c->netdev;
rq->priv = c->priv;
rq->tstamp = c->tstamp;
- rq->clock = &mdev->clock;
+ rq->clock = mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->channel = c;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 501709ac310f..2dd842aac6fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -277,12 +277,12 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
case XFRM_DEV_OFFLOAD_IN:
src = attrs->dmac;
dst = attrs->smac;
- pkey = &attrs->saddr.a4;
+ pkey = &attrs->addrs.saddr.a4;
break;
case XFRM_DEV_OFFLOAD_OUT:
src = attrs->smac;
dst = attrs->dmac;
- pkey = &attrs->daddr.a4;
+ pkey = &attrs->addrs.daddr.a4;
break;
default:
return;
@@ -303,6 +303,16 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
neigh_release(n);
}
+static void mlx5e_ipsec_state_mask(struct mlx5e_ipsec_addr *addrs)
+{
+ /*
+ * State doesn't have subnet prefixes in outer headers.
+ * The match is performed for exaxt source/destination addresses.
+ */
+ memset(addrs->smask.m6, 0xFF, sizeof(__be32) * 4);
+ memset(addrs->dmask.m6, 0xFF, sizeof(__be32) * 4);
+}
+
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
@@ -374,9 +384,11 @@ skip_replay_window:
attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */
- memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
- memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
- attrs->family = x->props.family;
+ memcpy(&attrs->addrs.saddr, x->props.saddr.a6,
+ sizeof(attrs->addrs.saddr));
+ memcpy(&attrs->addrs.daddr, x->id.daddr.a6, sizeof(attrs->addrs.daddr));
+ attrs->addrs.family = x->props.family;
+ mlx5e_ipsec_state_mask(&attrs->addrs);
attrs->type = x->xso.type;
attrs->reqid = x->props.reqid;
attrs->upspec.dport = ntohs(x->sel.dport);
@@ -428,7 +440,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
}
if (x->encap) {
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
- NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Encapsulation is not supported");
return -EINVAL;
}
@@ -853,13 +866,13 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
attrs = &sa_entry->attrs;
- if (attrs->family == AF_INET) {
- if (!neigh_key_eq32(n, &attrs->saddr.a4) &&
- !neigh_key_eq32(n, &attrs->daddr.a4))
+ if (attrs->addrs.family == AF_INET) {
+ if (!neigh_key_eq32(n, &attrs->addrs.saddr.a4) &&
+ !neigh_key_eq32(n, &attrs->addrs.daddr.a4))
continue;
} else {
- if (!neigh_key_eq128(n, &attrs->saddr.a4) &&
- !neigh_key_eq128(n, &attrs->daddr.a4))
+ if (!neigh_key_eq128(n, &attrs->addrs.saddr.a4) &&
+ !neigh_key_eq128(n, &attrs->addrs.daddr.a4))
continue;
}
@@ -953,21 +966,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
priv->ipsec = NULL;
}
-static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
@@ -1035,7 +1033,7 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
* by removing always available headers.
*/
headers = sizeof(struct ethhdr);
- if (sa_entry->attrs.family == AF_INET)
+ if (sa_entry->attrs.addrs.family == AF_INET)
headers += sizeof(struct iphdr);
else
headers += sizeof(struct ipv6hdr);
@@ -1044,6 +1042,43 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
x->curlft.bytes += success_bytes - headers * success_packets;
}
+static __be32 word_to_mask(int prefix)
+{
+ if (prefix < 0)
+ return 0;
+
+ if (!prefix || prefix > 31)
+ return cpu_to_be32(0xFFFFFFFF);
+
+ return cpu_to_be32(((1U << prefix) - 1) << (32 - prefix));
+}
+
+static void mlx5e_ipsec_policy_mask(struct mlx5e_ipsec_addr *addrs,
+ struct xfrm_selector *sel)
+{
+ int i;
+
+ if (addrs->family == AF_INET) {
+ addrs->smask.m4 = word_to_mask(sel->prefixlen_s);
+ addrs->saddr.a4 &= addrs->smask.m4;
+ addrs->dmask.m4 = word_to_mask(sel->prefixlen_d);
+ addrs->daddr.a4 &= addrs->dmask.m4;
+ return;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (sel->prefixlen_s != 32 * i)
+ addrs->smask.m6[i] =
+ word_to_mask(sel->prefixlen_s - 32 * i);
+ addrs->saddr.a6[i] &= addrs->smask.m6[i];
+
+ if (sel->prefixlen_d != 32 * i)
+ addrs->dmask.m6[i] =
+ word_to_mask(sel->prefixlen_d - 32 * i);
+ addrs->daddr.a6[i] &= addrs->dmask.m6[i];
+ }
+}
+
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
struct xfrm_policy *x,
struct netlink_ext_ack *extack)
@@ -1116,9 +1151,10 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
sel = &x->selector;
memset(attrs, 0, sizeof(*attrs));
- memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
- memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
- attrs->family = sel->family;
+ memcpy(&attrs->addrs.saddr, sel->saddr.a6, sizeof(attrs->addrs.saddr));
+ memcpy(&attrs->addrs.daddr, sel->daddr.a6, sizeof(attrs->addrs.daddr));
+ attrs->addrs.family = sel->family;
+ mlx5e_ipsec_policy_mask(&attrs->addrs, sel);
attrs->dir = x->xdo.dir;
attrs->action = x->action;
attrs->type = XFRM_DEV_OFFLOAD_PACKET;
@@ -1196,7 +1232,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
.xdo_dev_state_free = mlx5e_xfrm_free_state,
- .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
.xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 7d943e93cf6d..a63c2289f8af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -76,27 +76,36 @@ struct mlx5_replay_esn {
u8 trigger : 1;
};
-struct mlx5_accel_esp_xfrm_attrs {
- u32 spi;
- u32 mode;
- struct aes_gcm_keymat aes_gcm;
-
+struct mlx5e_ipsec_addr {
union {
__be32 a4;
__be32 a6[4];
} saddr;
-
+ union {
+ __be32 m4;
+ __be32 m6[4];
+ } smask;
union {
__be32 a4;
__be32 a6[4];
} daddr;
+ union {
+ __be32 m4;
+ __be32 m6[4];
+ } dmask;
+ u8 family;
+};
+struct mlx5_accel_esp_xfrm_attrs {
+ u32 spi;
+ u32 mode;
+ struct aes_gcm_keymat aes_gcm;
+ struct mlx5e_ipsec_addr addrs;
struct upspec upspec;
u8 dir : 2;
u8 type : 2;
u8 drop : 1;
u8 encap : 1;
- u8 family;
struct mlx5_replay_esn replay_esn;
u32 authsize;
u32 reqid;
@@ -128,6 +137,7 @@ struct mlx5e_ipsec_hw_stats {
u64 ipsec_rx_bytes;
u64 ipsec_rx_drop_pkts;
u64 ipsec_rx_drop_bytes;
+ u64 ipsec_rx_drop_mismatch_sa_sel;
u64 ipsec_tx_pkts;
u64 ipsec_tx_bytes;
u64 ipsec_tx_drop_pkts;
@@ -184,6 +194,7 @@ struct mlx5e_ipsec_ft {
struct mutex mutex; /* Protect changes to this struct */
struct mlx5_flow_table *pol;
struct mlx5_flow_table *sa;
+ struct mlx5_flow_table *sa_sel;
struct mlx5_flow_table *status;
u32 refcnt;
};
@@ -195,6 +206,8 @@ struct mlx5e_ipsec_drop {
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *status_pass;
+ struct mlx5_flow_handle *sa_sel;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
@@ -206,6 +219,7 @@ struct mlx5e_ipsec_rule {
struct mlx5e_ipsec_miss {
struct mlx5_flow_group *group;
struct mlx5_flow_handle *rule;
+ struct mlx5_fc *fc;
};
struct mlx5e_ipsec_tx_create_attr {
@@ -274,18 +288,8 @@ struct mlx5e_ipsec_sa_entry {
};
struct mlx5_accel_pol_xfrm_attrs {
- union {
- __be32 a4;
- __be32 a6[4];
- } saddr;
-
- union {
- __be32 a4;
- __be32 a6[4];
- } daddr;
-
+ struct mlx5e_ipsec_addr addrs;
struct upspec upspec;
- u8 family;
u8 action;
u8 type : 2;
u8 dir : 2;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index e7b64679f121..98b6a3a623f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -16,6 +16,16 @@
#define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
#define IPSEC_TUNNEL_DEFAULT_TTL 0x40
+#define MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS 16
+
+enum {
+ MLX5_IPSEC_ASO_OK,
+ MLX5_IPSEC_ASO_BAD_REPLY,
+
+ /* For crypto offload, set by driver */
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD = 0xAA,
+};
+
struct mlx5e_ipsec_fc {
struct mlx5_fc *cnt;
struct mlx5_fc *drop;
@@ -33,6 +43,9 @@ struct mlx5e_ipsec_tx {
};
struct mlx5e_ipsec_status_checks {
+ struct mlx5_flow_group *pass_group;
+ struct mlx5_flow_handle *packet_offload_pass_rule;
+ struct mlx5_flow_handle *crypto_offload_pass_rule;
struct mlx5_flow_group *drop_all_group;
struct mlx5e_ipsec_drop all;
};
@@ -41,10 +54,12 @@ struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
struct mlx5e_ipsec_miss sa;
- struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_status_checks status_drops;
+ struct mlx5e_ipsec_miss sa_sel;
+ struct mlx5e_ipsec_status_checks status_checks;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
+ struct mlx5_flow_table *pol_miss_ft;
+ struct mlx5_flow_handle *pol_miss_rule;
u8 allow_tunnel_mode : 1;
};
@@ -130,11 +145,12 @@ static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
int level, int prio,
+ int num_reserved_entries,
int max_num_groups, u32 flags)
{
struct mlx5_flow_table_attr ft_attr = {};
- ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.num_reserved_entries = num_reserved_entries;
ft_attr.autogroup.max_num_groups = max_num_groups;
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.level = level;
@@ -147,22 +163,35 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status_drops.all.rule);
- mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
- mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
+ mlx5_del_flow_rules(rx->status_checks.all.rule);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_checks.all.fc);
+ mlx5_destroy_flow_group(rx->status_checks.drop_all_group);
}
static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status.rule);
+ mlx5_del_flow_rules(rx->status_checks.packet_offload_pass_rule);
+ mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
+}
- if (rx != ipsec->rx_esw)
- return;
+static void ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_spec *spec)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
-#ifdef CONFIG_MLX5_ESWITCH
- mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
-#endif
+ if (rx == ipsec->rx_esw) {
+ mlx5_esw_ipsec_rx_rule_add_match_obj(sa_entry, spec);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ }
}
static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
@@ -200,11 +229,8 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -281,10 +307,8 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -359,9 +383,9 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status_drops.drop_all_group = g;
- rx->status_drops.all.rule = rule;
- rx->status_drops.all.fc = flow_counter;
+ rx->status_checks.drop_all_group = g;
+ rx->status_checks.all.rule = rule;
+ rx->status_checks.all.fc = flow_counter;
kvfree(flow_group_in);
kvfree(spec);
@@ -377,9 +401,52 @@ err_out:
return err;
}
-static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
+static int ipsec_rx_status_pass_group_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_flow_group *fg;
+ void *match_criteria;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ start_flow_index, ft->max_fte - 3);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ end_flow_index, ft->max_fte - 2);
+
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_warn(ipsec->mdev,
+ "Failed to create rx status pass flow group, err=%d\n",
+ err);
+ }
+ rx->status_checks.pass_group = fg;
+
+ kvfree(flow_group_in);
+ return err;
+}
+
+static struct mlx5_flow_handle *
+ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest,
+ u8 aso_ok)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
@@ -388,7 +455,7 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.ipsec_syndrome);
@@ -397,11 +464,11 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.ipsec_syndrome, 0);
MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_4, 0);
+ misc_parameters_2.metadata_reg_c_4, aso_ok);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
- flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
@@ -412,19 +479,19 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status.rule = rule;
kvfree(spec);
- return 0;
+ return rule;
err_rule:
kvfree(spec);
- return err;
+ return ERR_PTR(err);
}
static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
ipsec_rx_status_pass_destroy(ipsec, rx);
+ mlx5_destroy_flow_group(rx->status_checks.pass_group);
ipsec_rx_status_drop_destroy(ipsec, rx);
}
@@ -432,19 +499,44 @@ static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
+ struct mlx5_flow_destination pol_dest[2];
+ struct mlx5_flow_handle *rule;
int err;
err = ipsec_rx_status_drop_all_create(ipsec, rx);
if (err)
return err;
- err = ipsec_rx_status_pass_create(ipsec, rx, dest);
+ err = ipsec_rx_status_pass_group_create(ipsec, rx);
if (err)
- goto err_pass_create;
+ goto err_pass_group_create;
+
+ rule = ipsec_rx_status_pass_create(ipsec, rx, dest,
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_crypto_offload_pass_create;
+ }
+ rx->status_checks.crypto_offload_pass_rule = rule;
+
+ pol_dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ pol_dest[0].ft = rx->ft.pol;
+ pol_dest[1] = dest[1];
+ rule = ipsec_rx_status_pass_create(ipsec, rx, pol_dest,
+ MLX5_IPSEC_ASO_OK);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_packet_offload_pass_create;
+ }
+ rx->status_checks.packet_offload_pass_rule = rule;
return 0;
-err_pass_create:
+err_packet_offload_pass_create:
+ mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
+err_crypto_offload_pass_create:
+ mlx5_destroy_flow_group(rx->status_checks.pass_group);
+err_pass_group_create:
ipsec_rx_status_drop_destroy(ipsec, rx);
return err;
}
@@ -493,6 +585,15 @@ out:
return err;
}
+static void ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *old_dest,
+ struct mlx5_flow_destination *new_dest)
+{
+ mlx5_modify_rule_destination(rx->pol_miss_rule, new_dest, old_dest);
+ mlx5_modify_rule_destination(rx->status_checks.crypto_offload_pass_rule,
+ new_dest, old_dest);
+}
+
static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
@@ -507,8 +608,7 @@ static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
- mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
+ ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
}
static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
@@ -520,8 +620,7 @@ static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
family2tt(family));
- mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
- mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
+ ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
}
@@ -577,13 +676,8 @@ static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
}
-static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx, u32 family)
+static void ipsec_rx_policy_destroy(struct mlx5e_ipsec_rx *rx)
{
- /* disconnect */
- if (rx != ipsec->rx_esw)
- ipsec_rx_ft_disconnect(ipsec, family);
-
if (rx->chains) {
ipsec_chains_destroy(rx->chains);
} else {
@@ -592,6 +686,29 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.pol);
}
+ if (rx->pol_miss_rule) {
+ mlx5_del_flow_rules(rx->pol_miss_rule);
+ mlx5_destroy_flow_table(rx->pol_miss_ft);
+ }
+}
+
+static void ipsec_rx_sa_selector_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->sa_sel.rule);
+ mlx5_fc_destroy(mdev, rx->sa_sel.fc);
+ rx->sa_sel.fc = NULL;
+ mlx5_destroy_flow_group(rx->sa_sel.group);
+ mlx5_destroy_flow_table(rx->ft.sa_sel);
+}
+
+static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
+{
+ /* disconnect */
+ if (rx != ipsec->rx_esw)
+ ipsec_rx_ft_disconnect(ipsec, family);
+
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
mlx5_destroy_flow_table(rx->ft.sa);
@@ -600,7 +717,17 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_destroy_flow_table(rx->ft.status);
+ ipsec_rx_sa_selector_destroy(mdev, rx);
+
+ ipsec_rx_policy_destroy(rx);
+
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
+
+#ifdef CONFIG_MLX5_ESWITCH
+ if (rx == ipsec->rx_esw)
+ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch),
+ 0, 1, 0);
+#endif
}
static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
@@ -652,6 +779,28 @@ static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
return 0;
}
+static void ipsec_rx_sa_miss_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest,
+ struct mlx5_flow_destination *miss_dest)
+{
+ if (rx == ipsec->rx_esw)
+ *miss_dest = *dest;
+ else
+ *miss_dest =
+ mlx5_ttc_get_default_dest(attr->ttc,
+ family2tt(attr->family));
+}
+
+static void ipsec_rx_default_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest->ft = rx->pol_miss_ft;
+}
+
static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5e_ipsec_rx_create_attr *attr)
@@ -659,15 +808,219 @@ static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
struct mlx5_flow_destination dest = {};
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = rx->ft.pol;
+ dest.ft = rx->ft.sa;
mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
}
+static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ if (rx == ipsec->rx_esw) {
+ /* No need to create miss table for switchdev mode,
+ * just set it to the root chain table.
+ */
+ rx->pol_miss_ft = dest->ft;
+ return 0;
+ }
+
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = attr->pol_level;
+ ft_attr.prio = attr->prio;
+
+ ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_rule;
+ }
+
+ rx->pol_miss_ft = ft;
+ rx->pol_miss_rule = rule;
+
+ return 0;
+
+err_rule:
+ mlx5_destroy_flow_table(ft);
+ return err;
+}
+
+static int ipsec_rx_policy_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_destination default_dest;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ err = ipsec_rx_chains_create_miss(ipsec, rx, attr, dest);
+ if (err)
+ return err;
+
+ ipsec_rx_default_dest_get(ipsec, rx, &default_dest);
+
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
+ rx->chains = ipsec_chains_create(mdev,
+ default_dest.ft,
+ attr->chains_ns,
+ attr->prio,
+ attr->sa_level,
+ &rx->ft.pol);
+ if (IS_ERR(rx->chains))
+ err = PTR_ERR(rx->chains);
+ } else {
+ ft = ipsec_ft_create(attr->ns, attr->pol_level,
+ attr->prio, 1, 2, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_out;
+ }
+ rx->ft.pol = ft;
+
+ err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol,
+ &default_dest);
+ if (err)
+ mlx5_destroy_flow_table(rx->ft.pol);
+ }
+
+ if (!err)
+ return 0;
+
+err_out:
+ if (rx->pol_miss_rule) {
+ mlx5_del_flow_rules(rx->pol_miss_rule);
+ mlx5_destroy_flow_table(rx->pol_miss_ft);
+ }
+ return err;
+}
+
+static int ipsec_rx_sa_selector_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ u32 *flow_group_in;
+ struct mlx5_fc *fc;
+ int err;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ ft = ipsec_ft_create(attr->ns, attr->status_level, attr->prio, 1,
+ MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Failed to create RX SA selector flow table, err=%d\n",
+ err);
+ goto err_ft;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ft->max_fte - 1);
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_err(mdev, "Failed to create RX SA selector miss group, err=%d\n",
+ err);
+ goto err_fg;
+ }
+
+ fc = mlx5_fc_create(mdev, false);
+ if (IS_ERR(fc)) {
+ err = PTR_ERR(fc);
+ mlx5_core_err(mdev,
+ "Failed to create ipsec RX SA selector miss rule counter, err=%d\n",
+ err);
+ goto err_cnt;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter = fc;
+ flow_act.action =
+ MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to create RX SA selector miss drop rule, err=%d\n",
+ err);
+ goto err_rule;
+ }
+
+ rx->ft.sa_sel = ft;
+ rx->sa_sel.group = fg;
+ rx->sa_sel.fc = fc;
+ rx->sa_sel.rule = rule;
+
+ kvfree(flow_group_in);
+
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, fc);
+err_cnt:
+ mlx5_destroy_flow_group(fg);
+err_fg:
+ mlx5_destroy_flow_table(ft);
+err_ft:
+ kvfree(flow_group_in);
+ return err;
+}
+
+/* The decryption processing is as follows:
+ *
+ * +----------+ +-------------+
+ * | | | |
+ * | Kernel <--------------+----------+ policy miss <------------+
+ * | | ^ | | ^
+ * +----^-----+ | +-------------+ |
+ * | crypto |
+ * miss offload ok allow/default
+ * ^ ^ ^
+ * | | packet |
+ * +----+---------+ +----+-------------+ offload ok +------+---+
+ * | | | | (no UPSPEC) | |
+ * | SA (decrypt) +-----> status +--->------->----+ policy |
+ * | | | | | |
+ * +--------------+ ++---------+-------+ +-^----+---+
+ * | | | |
+ * v packet +-->->---+ v
+ * | offload ok match |
+ * fails (with UPSPEC) | block
+ * | | +-------------+-+ |
+ * v v | | miss v
+ * drop +---> SA sel +--------->drop
+ * | |
+ * +---------------+
+ */
+
static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx, u32 family)
{
+ struct mlx5_flow_destination dest[2], miss_dest;
struct mlx5e_ipsec_rx_create_attr attr;
- struct mlx5_flow_destination dest[2];
struct mlx5_flow_table *ft;
u32 flags = 0;
int err;
@@ -678,80 +1031,61 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
return err;
- ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
+ ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 4, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
}
rx->ft.status = ft;
- dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter = rx->fc->cnt;
- err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
+ err = ipsec_rx_sa_selector_create(ipsec, rx, &attr);
if (err)
- goto err_add;
+ goto err_fs_ft_sa_sel;
/* Create FT */
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (rx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
+ ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
}
rx->ft.sa = ft;
- err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
+ ipsec_rx_sa_miss_dest_get(ipsec, rx, &attr, &dest[0], &miss_dest);
+ err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &miss_dest);
if (err)
goto err_fs;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
- rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
- attr.chains_ns,
- attr.prio,
- attr.pol_level,
- &rx->ft.pol);
- if (IS_ERR(rx->chains)) {
- err = PTR_ERR(rx->chains);
- goto err_pol_ft;
- }
-
- goto connect;
- }
+ err = ipsec_rx_policy_create(ipsec, rx, &attr, &dest[0]);
+ if (err)
+ goto err_policy;
- ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- goto err_pol_ft;
- }
- rx->ft.pol = ft;
- memset(dest, 0x00, 2 * sizeof(*dest));
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[0].ft = rx->ft.sa;
- err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = rx->fc->cnt;
+ err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
- goto err_pol_miss;
+ goto err_add;
-connect:
/* connect */
if (rx != ipsec->rx_esw)
ipsec_rx_ft_connect(ipsec, rx, &attr);
return 0;
-err_pol_miss:
- mlx5_destroy_flow_table(rx->ft.pol);
-err_pol_ft:
+err_add:
+ ipsec_rx_policy_destroy(rx);
+err_policy:
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
err_fs:
mlx5_destroy_flow_table(rx->ft.sa);
-err_fs_ft:
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- mlx5_ipsec_rx_status_destroy(ipsec, rx);
-err_add:
+err_fs_ft:
+ ipsec_rx_sa_selector_destroy(mdev, rx);
+err_fs_ft_sa_sel:
mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status:
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
@@ -941,7 +1275,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
int err;
ipsec_tx_create_attr_set(ipsec, tx, &attr);
- ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
+ ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 1, 0);
if (IS_ERR(ft))
return PTR_ERR(ft);
tx->ft.status = ft;
@@ -954,7 +1288,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (tx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
+ ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_sa_ft;
@@ -982,7 +1316,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
goto connect_roce;
}
- ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
+ ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 1, 2, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_pol_ft;
@@ -1150,9 +1484,14 @@ static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
mutex_unlock(&tx->ft.mutex);
}
-static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
- __be32 *daddr)
+static void setup_fte_addr4(struct mlx5_flow_spec *spec,
+ struct mlx5e_ipsec_addr *addrs)
{
+ __be32 *saddr = &addrs->saddr.a4;
+ __be32 *smask = &addrs->smask.m4;
+ __be32 *daddr = &addrs->daddr.a4;
+ __be32 *dmask = &addrs->dmask.m4;
+
if (!*saddr && !*daddr)
return;
@@ -1164,21 +1503,26 @@ static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
if (*saddr) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), smask, 4);
}
if (*daddr) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), dmask, 4);
}
}
-static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
- __be32 *daddr)
+static void setup_fte_addr6(struct mlx5_flow_spec *spec,
+ struct mlx5e_ipsec_addr *addrs)
{
+ __be32 *saddr = addrs->saddr.a6;
+ __be32 *smask = addrs->smask.m6;
+ __be32 *daddr = addrs->daddr.a6;
+ __be32 *dmask = addrs->dmask.m6;
+
if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
return;
@@ -1190,15 +1534,15 @@ static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
if (!addr6_all_zero(saddr)) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), dmask, 16);
}
if (!addr6_all_zero(daddr)) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), smask, 16);
}
}
@@ -1340,7 +1684,8 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action[2], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
- MLX5_SET(set_action_in, action[2], data, 0);
+ MLX5_SET(set_action_in, action[2], data,
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
MLX5_SET(set_action_in, action[2], offset, 0);
MLX5_SET(set_action_in, action[2], length, 32);
}
@@ -1387,7 +1732,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
bfflen += sizeof(*esp_hdr) + 8;
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
bfflen += sizeof(*iphdr);
break;
@@ -1404,7 +1749,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
return -ENOMEM;
eth_hdr = (struct ethhdr *)reformatbf;
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
eth_hdr->h_proto = htons(ETH_P_IP);
break;
@@ -1427,11 +1772,11 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
reformat_params->param_0 = attrs->authsize;
hdr = reformatbf + sizeof(*eth_hdr);
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
iphdr = (struct iphdr *)hdr;
- memcpy(&iphdr->saddr, &attrs->saddr.a4, 4);
- memcpy(&iphdr->daddr, &attrs->daddr.a4, 4);
+ memcpy(&iphdr->saddr, &attrs->addrs.saddr.a4, 4);
+ memcpy(&iphdr->daddr, &attrs->addrs.daddr.a4, 4);
iphdr->version = 4;
iphdr->ihl = 5;
iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
@@ -1440,8 +1785,8 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
break;
case AF_INET6:
ipv6hdr = (struct ipv6hdr *)hdr;
- memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16);
- memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16);
+ memcpy(&ipv6hdr->saddr, &attrs->addrs.saddr.a6, 16);
+ memcpy(&ipv6hdr->daddr, &attrs->addrs.daddr.a6, 16);
ipv6hdr->nexthdr = IPPROTO_ESP;
ipv6hdr->version = 6;
ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
@@ -1475,7 +1820,7 @@ static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
case XFRM_DEV_OFFLOAD_OUT:
- if (attrs->family == AF_INET) {
+ if (attrs->addrs.family == AF_INET) {
if (attrs->encap)
return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
@@ -1576,6 +1921,85 @@ static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
return 0;
}
+static int rx_add_rule_sa_selector(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx,
+ struct upspec *upspec)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest[2];
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_4, 0);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.sa_sel;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = rx->fc->cnt;
+
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx pass rule, err=%d\n",
+ err);
+ goto err_add_status_pass_rule;
+ }
+
+ sa_entry->ipsec_rule.status_pass = rule;
+
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4, 0);
+
+ setup_fte_upper_proto_match(spec, upspec);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.pol;
+
+ rule = mlx5_add_flow_rules(rx->ft.sa_sel, spec, &flow_act, &dest[0], 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx sa selector rule, err=%d\n",
+ err);
+ goto err_add_sa_sel_rule;
+ }
+
+ sa_entry->ipsec_rule.sa_sel = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_add_sa_sel_rule:
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
+err_add_status_pass_rule:
+ kvfree(spec);
+ return err;
+}
+
static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
@@ -1589,7 +2013,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5_fc *counter;
int err = 0;
- rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
+ rx = rx_ft_get(mdev, ipsec, attrs->addrs.family, attrs->type);
if (IS_ERR(rx))
return PTR_ERR(rx);
@@ -1599,16 +2023,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_spi(spec, attrs->spi, attrs->encap);
if (!attrs->encap)
setup_fte_esp(spec);
setup_fte_no_frags(spec);
- setup_fte_upper_proto_match(spec, &attrs->upspec);
if (!attrs->drop) {
if (rx != ipsec->rx_esw)
@@ -1656,6 +2079,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
goto err_add_flow;
}
+
+ if (attrs->upspec.proto && attrs->type == XFRM_DEV_OFFLOAD_PACKET) {
+ err = rx_add_rule_sa_selector(sa_entry, rx, &attrs->upspec);
+ if (err)
+ goto err_add_sa_sel;
+ }
+
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
err = rx_add_rule_drop_replay(sa_entry, rx);
if (err)
@@ -1679,6 +2109,11 @@ err_drop_reason:
mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
}
err_add_replay:
+ if (sa_entry->ipsec_rule.sa_sel) {
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.sa_sel);
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
+ }
+err_add_sa_sel:
mlx5_del_flow_rules(rule);
err_add_flow:
mlx5_fc_destroy(mdev, counter);
@@ -1691,7 +2126,7 @@ err_pkt_reformat:
err_mod_header:
kvfree(spec);
err_alloc:
- rx_ft_put(ipsec, attrs->family, attrs->type);
+ rx_ft_put(ipsec, attrs->addrs.family, attrs->type);
return err;
}
@@ -1723,10 +2158,10 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
@@ -1810,10 +2245,10 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
}
tx = ipsec_tx(ipsec, attrs->type);
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1883,12 +2318,12 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
struct mlx5e_ipsec_rx *rx;
int err, dstn = 0;
- ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
- attrs->type);
+ ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->addrs.family,
+ attrs->prio, attrs->type);
if (IS_ERR(ft))
return PTR_ERR(ft);
- rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
+ rx = ipsec_rx(pol_entry->ipsec, attrs->addrs.family, attrs->type);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -1896,10 +2331,10 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1923,8 +2358,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
flow_act.flags |= FLOW_ACT_NO_APPEND;
if (rx == ipsec->rx_esw && rx->chains)
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[dstn].ft = rx->ft.sa;
+ ipsec_rx_default_dest_get(ipsec, rx, &dest[dstn]);
dstn++;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
if (IS_ERR(rule)) {
@@ -1940,7 +2374,8 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
err_action:
kvfree(spec);
err_alloc:
- rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
+ rx_ft_put_policy(pol_entry->ipsec, attrs->addrs.family, attrs->prio,
+ attrs->type);
return err;
}
@@ -2061,6 +2496,7 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
stats->ipsec_rx_bytes = 0;
stats->ipsec_rx_drop_pkts = 0;
stats->ipsec_rx_drop_bytes = 0;
+ stats->ipsec_rx_drop_mismatch_sa_sel = 0;
stats->ipsec_tx_pkts = 0;
stats->ipsec_tx_bytes = 0;
stats->ipsec_tx_drop_pkts = 0;
@@ -2070,6 +2506,9 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
&stats->ipsec_rx_drop_bytes);
+ if (ipsec->rx_ipv4->sa_sel.fc)
+ mlx5_fc_query(mdev, ipsec->rx_ipv4->sa_sel.fc,
+ &stats->ipsec_rx_drop_mismatch_sa_sel, &bytes);
fc = ipsec->tx->fc;
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
@@ -2098,6 +2537,11 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
stats->ipsec_tx_drop_pkts += packets;
stats->ipsec_tx_drop_bytes += bytes;
}
+
+ if (ipsec->rx_esw->sa_sel.fc &&
+ !mlx5_fc_query(mdev, ipsec->rx_esw->sa_sel.fc,
+ &packets, &bytes))
+ stats->ipsec_rx_drop_mismatch_sa_sel += packets;
}
}
@@ -2195,12 +2639,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_del_flow_rules(ipsec_rule->auth.rule);
mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
+ if (ipsec_rule->sa_sel) {
+ mlx5_del_flow_rules(ipsec_rule->sa_sel);
+ mlx5_del_flow_rules(ipsec_rule->status_pass);
+ }
+
if (ipsec_rule->replay.rule) {
mlx5_del_flow_rules(ipsec_rule->replay.rule);
mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
}
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
- rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
+ rx_ft_put(sa_entry->ipsec, sa_entry->attrs.addrs.family,
+ sa_entry->attrs.type);
}
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
@@ -2236,7 +2686,8 @@ void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
- rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
+ rx_ft_put_policy(pol_entry->ipsec,
+ pol_entry->attrs.addrs.family,
pol_entry->attrs.prio, pol_entry->attrs.type);
return;
}
@@ -2376,7 +2827,7 @@ bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec_rx *rx;
struct mlx5e_ipsec_tx *tx;
- rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
+ rx = ipsec_rx(sa_entry->ipsec, attrs->addrs.family, attrs->type);
tx = ipsec_tx(sa_entry->ipsec, attrs->type);
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
return tx->allow_tunnel_mode;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 92bf3fa44a3b..93be388068f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -42,6 +42,7 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_mismatch_sa_sel) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index cae39198b4db..fdf9e9bb99ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -42,6 +42,9 @@
#include "lib/clock.h"
#include "en/fs_ethtool.h"
+#define LANES_UNKNOWN 0
+#define MAX_LANES 8
+
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
{
@@ -237,14 +240,33 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT,
ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT,
ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT);
-}
-
-static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_1_200GBASE_CR1_KR1, ext,
+ ETHTOOL_LINK_MODE_200000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR_2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseVR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_2_400GBASE_CR2_KR2, ext,
+ ETHTOOL_LINK_MODE_400000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR2_2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseVR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_800GAUI_4_800GBASE_CR4_KR4, ext,
+ ETHTOOL_LINK_MODE_800000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT);
+}
+
+static void mlx5e_ethtool_get_speed_arr(bool ext,
struct ptys2ethtool_config **arr,
u32 *size)
{
- bool ext = mlx5_ptys_ext_supported(mdev);
-
*arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
*size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
ARRAY_SIZE(ptys2legacy_ethtool_table);
@@ -891,37 +913,19 @@ int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
return mlx5e_ethtool_set_per_queue_coalesce(priv, queue, coal);
}
-static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
- unsigned long *supported_modes,
- u32 eth_proto_cap)
-{
- unsigned long proto_cap = eth_proto_cap;
- struct ptys2ethtool_config *table;
- u32 max_size;
- int proto;
-
- mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
- for_each_set_bit(proto, &proto_cap, max_size)
- bitmap_or(supported_modes, supported_modes,
- table[proto].supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
- u32 eth_proto_cap, bool ext)
+static void ptys2ethtool_process_link(u32 eth_eproto, bool ext, bool advertised,
+ unsigned long *modes)
{
- unsigned long proto_cap = eth_proto_cap;
+ unsigned long eproto = eth_eproto;
struct ptys2ethtool_config *table;
u32 max_size;
int proto;
- table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
- max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
- ARRAY_SIZE(ptys2legacy_ethtool_table);
-
- for_each_set_bit(proto, &proto_cap, max_size)
- bitmap_or(advertising_modes, advertising_modes,
- table[proto].advertised,
+ mlx5e_ethtool_get_speed_arr(ext, &table, &max_size);
+ for_each_set_bit(proto, &eproto, max_size)
+ bitmap_or(modes, modes,
+ advertised ?
+ table[proto].advertised : table[proto].supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
@@ -931,6 +935,7 @@ static const u32 pplm_fec_2_ethtool[] = {
[MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS,
[MLX5E_FEC_RS_544_514] = ETHTOOL_FEC_RS,
[MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_FEC_LLRS,
+ [MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD] = ETHTOOL_FEC_RS,
};
static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
@@ -1074,50 +1079,53 @@ static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
}
}
-static void get_speed_duplex(struct net_device *netdev,
- u32 eth_proto_oper, bool force_legacy,
- u16 data_rate_oper,
- struct ethtool_link_ksettings *link_ksettings)
+static void get_link_properties(struct net_device *netdev,
+ u32 eth_proto_oper, bool force_legacy,
+ u16 data_rate_oper,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- u32 speed = SPEED_UNKNOWN;
+ const struct mlx5_link_info *info;
u8 duplex = DUPLEX_UNKNOWN;
+ u32 speed = SPEED_UNKNOWN;
+ u32 lanes = LANES_UNKNOWN;
if (!netif_carrier_ok(netdev))
goto out;
- speed = mlx5_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
- if (!speed) {
- if (data_rate_oper)
- speed = 100 * data_rate_oper;
- else
- speed = SPEED_UNKNOWN;
- goto out;
+ info = mlx5_port_ptys2info(priv->mdev, eth_proto_oper, force_legacy);
+ if (info) {
+ speed = info->speed;
+ lanes = info->lanes;
+ duplex = DUPLEX_FULL;
+ } else if (data_rate_oper) {
+ speed = 100 * data_rate_oper;
+ lanes = MAX_LANES;
}
- duplex = DUPLEX_FULL;
-
out:
- link_ksettings->base.speed = speed;
link_ksettings->base.duplex = duplex;
+ link_ksettings->base.speed = speed;
+ link_ksettings->lanes = lanes;
}
static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *supported = link_ksettings->link_modes.supported;
- ptys2ethtool_supported_link(mdev, supported, eth_proto_cap);
+ bool ext = mlx5_ptys_ext_supported(mdev);
+
+ ptys2ethtool_process_link(eth_proto_cap, ext, false, supported);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
-static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+static void get_advertising(u32 eth_proto_admin, u8 tx_pause, u8 rx_pause,
struct ethtool_link_ksettings *link_ksettings,
bool ext)
{
unsigned long *advertising = link_ksettings->link_modes.advertising;
- ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
-
+ ptys2ethtool_process_link(eth_proto_admin, ext, true, advertising);
if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
if (tx_pause ^ rx_pause)
@@ -1173,7 +1181,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
bool ext = mlx5_ptys_ext_supported(mdev);
- ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
+ ptys2ethtool_process_link(eth_proto_lp, ext, true, lp_advertising);
}
static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -1235,8 +1243,8 @@ static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
get_supported(mdev, eth_proto_cap, link_ksettings);
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
admin_ext);
- get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
- data_rate_oper, link_ksettings);
+ get_link_properties(priv->netdev, eth_proto_oper, !admin_ext,
+ data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
@@ -1341,28 +1349,22 @@ static bool ext_link_mode_requested(const unsigned long *adver)
return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported)
-{
- bool ext_link_mode = ext_link_mode_requested(adver);
-
- return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported;
-}
-
static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_port_eth_proto eproto;
+ struct mlx5_link_info info = {};
const unsigned long *adver;
bool an_changes = false;
u8 an_disable_admin;
bool ext_supported;
+ bool ext_requested;
u8 an_disable_cap;
bool an_disable;
u32 link_modes;
u8 an_status;
u8 autoneg;
- u32 speed;
bool ext;
int err;
@@ -1370,13 +1372,15 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
adver = link_ksettings->link_modes.advertising;
autoneg = link_ksettings->base.autoneg;
- speed = link_ksettings->base.speed;
+ info.speed = link_ksettings->base.speed;
+ info.lanes = link_ksettings->lanes;
ext_supported = mlx5_ptys_ext_supported(mdev);
- ext = ext_requested(autoneg, adver, ext_supported);
- if (!ext_supported && ext)
+ ext_requested = ext_link_mode_requested(adver);
+ if (!ext_supported && ext_requested)
return -EOPNOTSUPP;
+ ext = autoneg == AUTONEG_ENABLE ? ext_requested : ext_supported;
ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
@@ -1386,7 +1390,7 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
goto out;
}
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
- mlx5_port_speed2linkmodes(mdev, speed, !ext);
+ mlx5_port_info2linkmodes(mdev, &info, !ext);
err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
if (err)
@@ -1458,18 +1462,27 @@ static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u32 rss_context = rxfh->rss_context;
+ bool symmetric;
int err;
mutex_lock(&priv->state_lock);
err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context,
- rxfh->indir, rxfh->key, &rxfh->hfunc);
+ rxfh->indir, rxfh->key, &rxfh->hfunc, &symmetric);
mutex_unlock(&priv->state_lock);
- return err;
+
+ if (err)
+ return err;
+
+ if (symmetric)
+ rxfh->input_xfrm = RXH_XFRM_SYM_OR_XOR;
+
+ return 0;
}
static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
+ bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
struct mlx5e_priv *priv = netdev_priv(dev);
u32 *rss_context = &rxfh->rss_context;
u8 hfunc = rxfh->hfunc;
@@ -1504,7 +1517,8 @@ static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxf
err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context,
rxfh->indir, rxfh->key,
- hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc,
+ rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric);
unlock:
mutex_unlock(&priv->state_lock);
@@ -2018,7 +2032,7 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
if (size_read < 0) {
NL_SET_ERR_MSG_FMT_MOD(
extack,
- "Query module eeprom by page failed, read %u bytes, err %d\n",
+ "Query module eeprom by page failed, read %u bytes, err %d",
i, size_read);
return i;
}
@@ -2607,12 +2621,14 @@ static void mlx5e_get_ts_stats(struct net_device *netdev,
}
const struct ethtool_ops mlx5e_ethtool_ops = {
+ .cap_link_lanes_supported = true,
.cap_rss_ctx_supported = true,
.rxfh_per_ctx_key = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_USE_CQE,
+ .supported_input_xfrm = RXH_XFRM_SYM_OR_XOR,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ext_state = mlx5e_get_link_ext_state,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 773624bb2c5d..d68230a7b9f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -884,8 +884,10 @@ static int flow_type_to_traffic_type(u32 flow_type)
case ESP_V6_FLOW:
return MLX5_TT_IPV6_IPSEC_ESP;
case IPV4_FLOW:
+ case IP_USER_FLOW:
return MLX5_TT_IPV4;
case IPV6_FLOW:
+ case IPV6_USER_FLOW:
return MLX5_TT_IPV6;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a814b63ed97e..3506024c2453 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -311,8 +311,8 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
{
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->hdr.ctrl;
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->hdr.uctrl;
u16 octowords;
u8 ds_cnt;
@@ -359,7 +359,7 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
return 0;
err_nomem:
- kvfree(shampo->bitmap);
+ bitmap_free(shampo->bitmap);
kvfree(shampo->pages);
return -ENOMEM;
@@ -367,7 +367,7 @@ err_nomem:
static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
{
- kvfree(rq->mpwqe.shampo->bitmap);
+ bitmap_free(rq->mpwqe.shampo->bitmap);
kvfree(rq->mpwqe.shampo->pages);
}
@@ -393,7 +393,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
}
- mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
+ mlx5e_build_umr_wqe(rq, rq->icosq,
+ container_of(&rq->mpwqe.umr_wqe,
+ struct mlx5e_umr_wqe, hdr));
return 0;
}
@@ -737,7 +739,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->netdev = c->netdev;
rq->priv = c->priv;
rq->tstamp = c->tstamp;
- rq->clock = &mdev->clock;
+ rq->clock = mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->channel = c;
@@ -1614,7 +1616,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int err;
sq->pdev = c->pdev;
- sq->clock = &mdev->clock;
+ sq->clock = mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->mdev = c->mdev;
@@ -2023,41 +2025,12 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- if (param->is_xdp_mb)
- set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
-
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
mlx5e_set_xmit_fp(sq, param->is_mpw);
- if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
- unsigned int inline_hdr_sz = 0;
- int i;
-
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
- ds_cnt++;
- }
-
- /* Pre initialize fixed WQE fields */
- for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
-
- sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = 1,
- .num_pkts = 1,
- };
-
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- }
- }
-
return 0;
err_free_xdpsq:
@@ -3816,8 +3789,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
/* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB.
*/
- if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
- return -EINVAL;
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "MQPRIO cannot be configured when HTB offload is enabled.");
+ return -EOPNOTSUPP;
+ }
switch (mqprio->mode) {
case TC_MQPRIO_MODE_DCB:
@@ -4447,9 +4423,9 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
} else {
- netdev->netns_local = false;
+ netdev->netns_immutable = false;
}
mutex_unlock(&priv->state_lock);
@@ -5132,11 +5108,9 @@ static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 mode, setting;
- int err;
- err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
- if (err)
- return err;
+ if (mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting))
+ return -EOPNOTSUPP;
mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
mode,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index fdff9fd8a89e..2abab241f03b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -65,6 +65,7 @@
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
+#define MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE 0x8
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -855,6 +856,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* RQ */
mlx5e_build_rq_params(mdev, params);
+ if (!mlx5e_is_uplink_rep(priv) && mlx5_core_is_ecpf(mdev))
+ params->log_rq_mtu_frames = MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE;
/* If netdev is already registered (e.g. move from nic profile to uplink,
* RTNL lock must be held before triggering netdev notifiers.
@@ -886,6 +889,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
netdev->watchdog_timeo = 15 * HZ;
+ if (mlx5_core_is_ecpf(mdev))
+ netdev->tx_queue_len = 1 << MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
@@ -900,7 +905,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->features |= netdev->hw_features;
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1963bc5adb18..5fd70b4d55be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -631,16 +631,16 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
__be32 key, u16 offset, u16 ksm_len)
{
memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
- umr_wqe->ctrl.umr_mkey = key;
- umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
+ umr_wqe->hdr.ctrl.umr_mkey = key;
+ umr_wqe->hdr.ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
| MLX5E_KSM_UMR_DS_CNT(ksm_len));
- umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
- umr_wqe->uctrl.xlt_octowords = cpu_to_be16(ksm_len);
- umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ umr_wqe->hdr.uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_octowords = cpu_to_be16(ksm_len);
+ umr_wqe->hdr.uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index)
@@ -704,7 +704,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1);
sq->pc += wqe_bbs;
- sq->doorbell_cseg = &umr_wqe->ctrl;
+ sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
@@ -814,12 +814,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
@@ -829,7 +829,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
sq->pc += rq->mpwqe.umr_wqebbs;
- sq->doorbell_cseg = &umr_wqe->ctrl;
+ sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 1d60465cc2ca..2f7a543feca6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -166,6 +166,9 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
struct udphdr *udph;
struct iphdr *iph;
+ if (skb_linearize(skb))
+ goto out;
+
/* We are only going to peek, no need to clone the SKB */
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 611ec4b6f370..1c121b435016 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -37,9 +37,7 @@
#include "en/ptp.h"
#include "en/port.h"
-#ifdef CONFIG_PAGE_POOL_STATS
#include <net/page_pool/helpers.h>
-#endif
void mlx5e_ethtool_put_stat(u64 **data, u64 val)
{
@@ -196,7 +194,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
-#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
@@ -208,7 +205,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
-#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
@@ -377,7 +373,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_arfs_err += rq_stats->arfs_err;
#endif
s->rx_recover += rq_stats->recover;
-#ifdef CONFIG_PAGE_POOL_STATS
s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
@@ -389,7 +384,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
@@ -496,7 +490,6 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
}
}
-#ifdef CONFIG_PAGE_POOL_STATS
static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
{
struct mlx5e_rq_stats *rq_stats = c->rq.stats;
@@ -519,11 +512,6 @@ static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
}
-#else
-static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
-{
-}
-#endif
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
@@ -1227,6 +1215,13 @@ out:
mutex_unlock(&priv->state_lock);
}
+#define PPORT_PHY_LAYER_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.phys_layer_cntrs.c)
+static const struct counter_desc pport_phy_layer_cntrs_stats_desc[] = {
+ { "link_down_events_phy", PPORT_PHY_LAYER_OFF(link_down_events) }
+};
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -1243,25 +1238,45 @@ pport_phy_statistical_err_lanes_stats_desc[] = {
{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
};
+#define PPORT_PHY_RECOVERY_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, counter_set.phys_layer_recovery_cntrs.c)
+static const struct counter_desc
+pport_phy_recovery_cntrs_stats_desc[] = {
+ { "total_success_recovery_phy",
+ PPORT_PHY_RECOVERY_OFF(total_successful_recovery_events) }
+};
+
+#define NUM_PPORT_PHY_LAYER_COUNTERS \
+ ARRAY_SIZE(pport_phy_layer_cntrs_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
+#define NUM_PPORT_PHY_RECOVERY_COUNTERS \
+ ARRAY_SIZE(pport_phy_recovery_cntrs_stats_desc)
+
+#define NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_statistical_group) ? \
+ NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0)
+#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, per_lane_error_counters) ? \
+ NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0)
+#define NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_recovery_counters) ? \
+ NUM_PPORT_PHY_RECOVERY_COUNTERS : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int num_stats;
- /* "1" for link_down_events special counter */
- num_stats = 1;
+ num_stats = NUM_PPORT_PHY_LAYER_COUNTERS;
- num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
- NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
+ num_stats += NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev);
- num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
- NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
+ num_stats += NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ num_stats += NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev);
return num_stats;
}
@@ -1270,18 +1285,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- ethtool_puts(data, "link_down_events_phy");
-
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
+ ethtool_puts(data, pport_phy_layer_cntrs_stats_desc[i].format);
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
ethtool_puts(data, pport_phy_statistical_stats_desc[i].format);
- if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- ethtool_puts(data,
- pport_phy_statistical_err_lanes_stats_desc[i].format);
+ for (i = 0;
+ i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ i++)
+ ethtool_puts(data,
+ pport_phy_statistical_err_lanes_stats_desc[i]
+ .format);
+
+ for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
+ ethtool_puts(data,
+ pport_phy_recovery_cntrs_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
@@ -1289,30 +1308,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- /* link_down_events_phy has special handling since it is not stored in __be64 format */
- mlx5e_ethtool_put_stat(
- data, MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
- counter_set.phys_layer_cntrs.link_down_events));
-
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(&priv->stats.pport
+ .phy_counters,
+ pport_phy_layer_cntrs_stats_desc, i));
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
mlx5e_ethtool_put_stat(
data,
MLX5E_READ_CTR64_BE(
&priv->stats.pport.phy_statistical_counters,
pport_phy_statistical_stats_desc, i));
- if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- mlx5e_ethtool_put_stat(
- data,
- MLX5E_READ_CTR64_BE(
- &priv->stats.pport
- .phy_statistical_counters,
- pport_phy_statistical_err_lanes_stats_desc,
- i));
+ for (i = 0;
+ i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_err_lanes_stats_desc, i));
+
+ for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(
+ &priv->stats.pport.phy_recovery_counters,
+ pport_phy_recovery_cntrs_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
@@ -1328,12 +1352,21 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
+ out = pstats->phy_statistical_counters;
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
+ 0);
+ }
- out = pstats->phy_statistical_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_recovery_counters)) {
+ out = pstats->phy_recovery_counters;
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_PHYSICAL_LAYER_RECOVERY_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
+ 0);
+ }
}
void mlx5e_get_link_ext_stats(struct net_device *dev,
@@ -2086,7 +2119,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
#endif
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
-#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
@@ -2098,7 +2130,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
-#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 5961c569cfe0..8de6fcbd3a03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -215,7 +215,6 @@ struct mlx5e_sw_stats {
u64 ch_aff_change;
u64 ch_force_irq;
u64 ch_eq_rearm;
-#ifdef CONFIG_PAGE_POOL_STATS
u64 rx_pp_alloc_fast;
u64 rx_pp_alloc_slow;
u64 rx_pp_alloc_slow_high_order;
@@ -227,7 +226,6 @@ struct mlx5e_sw_stats {
u64 rx_pp_recycle_ring;
u64 rx_pp_recycle_ring_full;
u64 rx_pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
@@ -309,6 +307,9 @@ struct mlx5e_vport_stats {
#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
counter_set.phys_layer_statistical_cntrs.c##_high)
+#define PPORT_PHY_RECOVERY_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, (pstats)->phy_recovery_counters, \
+ counter_set.phys_layer_recovery_cntrs.c)
#define PPORT_PER_PRIO_GET(pstats, prio, c) \
MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -324,6 +325,7 @@ struct mlx5e_pport_stats {
__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 phy_recovery_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
@@ -381,7 +383,6 @@ struct mlx5e_rq_stats {
u64 arfs_err;
#endif
u64 recover;
-#ifdef CONFIG_PAGE_POOL_STATS
u64 pp_alloc_fast;
u64 pp_alloc_slow;
u64 pp_alloc_slow_high_order;
@@ -393,7 +394,6 @@ struct mlx5e_rq_stats {
u64 pp_recycle_ring;
u64 pp_recycle_ring_full;
u64 pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f8c7912abe0e..4fd853d19e31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -525,9 +525,9 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
{
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_tx_wqe *wqe;
- u16 pi;
+ u16 pi, num_wqebbs;
- pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
+ pi = mlx5e_txqsq_get_next_pi_anysize(sq, &num_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
@@ -535,6 +535,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
.wqe = wqe,
.bytes_count = 0,
.ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .ds_count_max = num_wqebbs * MLX5_SEND_WQEBB_NUM_DS,
.pkt_count = 0,
.inline_on = 0,
};
@@ -626,7 +627,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
mlx5e_tx_skb_update_hwts_flags(skb);
- if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
+ if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
/* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
cseg = mlx5e_tx_mpwqe_session_complete(sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 2b229b6226c6..dfb079e59d85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -871,8 +871,8 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
+ struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
struct irq_affinity_desc af_desc = {};
struct mlx5_irq *irq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
index d599e50af346..3ce455c2535c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
@@ -27,7 +27,7 @@ esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns,
esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index);
+ root_ns = mlx5_get_flow_vport_namespace(dev, ns, vport->index);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
vport_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 5f647358a05c..76e35c827da0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1863,7 +1863,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
addr, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
+ "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)",
addr, vport_num);
return -EINVAL;
}
@@ -1876,7 +1876,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)",
addr, vid, vport_num);
return -EINVAL;
}
@@ -1884,7 +1884,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
err = mlx5_esw_bridge_port_mdb_attach(dev, port, addr, vid);
if (err) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)",
addr, vid, vport_num);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 982fe3714683..b7102e14d23d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -32,7 +32,7 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
u16 pfnum;
mlx5_esw_get_port_parent_id(dev, &ppid);
- pfnum = mlx5_get_dev_index(dev);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = dev->priv.eswitch->offloads.host_number + 1;
@@ -110,7 +110,7 @@ static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw
struct netdev_phys_item_id ppid = {};
u16 pfnum;
- pfnum = mlx5_get_dev_index(dev);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index ed977ae75fab..3cfe743610d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -10,9 +10,9 @@
#endif
enum {
- MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL,
MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL,
+ MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
};
enum {
@@ -85,6 +85,19 @@ err_header_alloc:
return err;
}
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec)
+{
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1,
+ ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_1,
+ sa_entry->rx_mapped_id << ESW_ZONE_ID_BITS);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+}
+
void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index ac9c65b89166..514c15258b1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -20,6 +20,8 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec);
#else
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}
@@ -48,5 +50,8 @@ static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr) {}
static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {}
+static inline void
+mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESW_IPSEC_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 45183de424f3..76382626ad41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -96,7 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
if (!flow_group_in)
return -ENOMEM;
- ft_attr.max_fte = POOL_NEXT_SIZE;
+ ft_attr.max_fte = MLX5_FS_MAX_POOL_SIZE;
ft_attr.prio = LEGACY_FDB_PRIO;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 823c1ba456cd..b6ae384396b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -90,15 +90,30 @@ struct mlx5_esw_sched_node {
struct list_head children;
/* Valid only if this node is associated with a vport. */
struct mlx5_vport *vport;
+ /* Level in the hierarchy. The root node level is 1. */
+ u8 level;
};
+static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
+{
+ if (!node->parent) {
+ /* Root children are assigned a depth level of 2. */
+ node->level = 2;
+ list_add_tail(&node->entry, &node->esw->qos.domain->nodes);
+ } else {
+ node->level = node->parent->level + 1;
+ list_add_tail(&node->entry, &node->parent->children);
+ }
+}
+
static void
esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_node *parent)
{
list_del_init(&node->entry);
node->parent = parent;
- list_add_tail(&node->entry, &parent->children);
- node->esw = parent->esw;
+ if (parent)
+ node->esw = parent->esw;
+ esw_qos_node_attach_to_parent(node);
}
void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport)
@@ -305,8 +320,9 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node,
return 0;
}
-static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
- u32 *tsar_ix)
+static int
+esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
+ u32 max_rate, u32 bw_share, u32 *tsar_ix)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
void *attr;
@@ -323,6 +339,8 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
parent_element_id);
+ MLX5_SET(scheduling_context, tsar_ctx, max_average_bw, max_rate);
+ MLX5_SET(scheduling_context, tsar_ctx, bw_share, bw_share);
attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
@@ -358,7 +376,6 @@ static struct mlx5_esw_sched_node *
__esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type,
struct mlx5_esw_sched_node *parent)
{
- struct list_head *parent_children;
struct mlx5_esw_sched_node *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -370,8 +387,7 @@ __esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type
node->type = type;
node->parent = parent;
INIT_LIST_HEAD(&node->children);
- parent_children = parent ? &parent->children : &esw->qos.domain->nodes;
- list_add_tail(&node->entry, parent_children);
+ esw_qos_node_attach_to_parent(node);
return node;
}
@@ -396,7 +412,8 @@ __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sch
u32 tsar_ix;
int err;
- err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, &tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, 0,
+ 0, &tsar_ix);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for node failed");
return ERR_PTR(err);
@@ -463,7 +480,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP;
- err = esw_qos_create_node_sched_elem(esw->dev, 0, &esw->qos.root_tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, 0, 0, 0,
+ &esw->qos.root_tsar_ix);
if (err) {
esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
return err;
@@ -986,10 +1004,10 @@ int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_s
return err;
}
-int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
- struct devlink_rate *parent,
- void *priv, void *parent_priv,
- struct netlink_ext_ack *extack)
+int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *node;
struct mlx5_vport *vport = priv;
@@ -1000,3 +1018,105 @@ int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
node = parent_priv;
return mlx5_esw_qos_vport_update_parent(vport, node, extack);
}
+
+static int
+mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ u8 new_level, max_level;
+
+ if (parent && parent->esw != node->esw) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot assign node to another E-Switch");
+ return -EOPNOTSUPP;
+ }
+
+ if (!list_empty(&node->children)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot reassign a node that contains rate objects");
+ return -EOPNOTSUPP;
+ }
+
+ new_level = parent ? parent->level + 1 : 2;
+ max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
+ if (new_level > max_level) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Node hierarchy depth exceeds the maximum supported level");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int esw_qos_vports_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent = node->parent;
+ struct mlx5_eswitch *esw = node->esw;
+ u32 parent_ix;
+ int err;
+
+ parent_ix = parent ? parent->ix : node->esw->qos.root_tsar_ix;
+ mlx5_destroy_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ node->ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, parent_ix,
+ node->max_rate, 0, &node->ix);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to create a node under the new hierarchy.");
+ if (esw_qos_create_node_sched_elem(esw->dev, curr_parent->ix,
+ node->max_rate,
+ node->bw_share,
+ &node->ix))
+ esw_warn(esw->dev, "Node restore QoS failed\n");
+
+ return err;
+ }
+ esw_qos_node_set_parent(node, parent);
+
+ return 0;
+}
+
+static int mlx5_esw_qos_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent;
+ struct mlx5_eswitch *esw = node->esw;
+ int err;
+
+ err = mlx5_esw_qos_node_validate_set_parent(node, parent, extack);
+ if (err)
+ return err;
+
+ esw_qos_lock(esw);
+ curr_parent = node->parent;
+ err = esw_qos_vports_node_update_parent(node, parent, extack);
+ if (err)
+ goto out;
+
+ esw_qos_normalize_min_rate(esw, curr_parent, extack);
+ esw_qos_normalize_min_rate(esw, parent, extack);
+
+out:
+ esw_qos_unlock(esw);
+
+ return err;
+}
+
+int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *node = priv, *parent_node;
+
+ if (!parent)
+ return mlx5_esw_qos_node_update_parent(node, NULL, extack);
+
+ parent_node = parent_priv;
+ return mlx5_esw_qos_node_update_parent(node, parent_node, extack);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
index 6eb8f6a648c8..ed40ec8f027e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
@@ -29,10 +29,14 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
struct netlink_ext_ack *extack);
-int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
- struct devlink_rate *parent,
- void *priv, void *parent_priv,
- struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack);
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 20cc01ceee8a..a6a8eea5980c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -648,6 +648,7 @@ esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
meter = attr->meter_attr.meter;
flow_act->exe_aso.type = attr->exe_aso_type;
flow_act->exe_aso.object_id = meter->obj_id;
+ flow_act->exe_aso.base_id = mlx5e_flow_meter_get_base_id(meter);
flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
/* use metadata reg 5 for packet color */
@@ -2828,9 +2829,9 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
if (IS_ERR(vport))
return PTR_ERR(vport);
- egress_ns = mlx5_get_flow_vport_acl_namespace(master,
- MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- vport->index);
+ egress_ns = mlx5_get_flow_vport_namespace(master,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ vport->index);
if (!egress_ns)
return -EINVAL;
@@ -4157,37 +4158,12 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
-static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
-{
- int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *query_ctx;
- void *hca_caps;
- int err;
-
- *vhca_id = 0;
-
- query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
- if (!query_ctx)
- return -ENOMEM;
-
- err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
- if (err)
- goto out_free;
-
- hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
- *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
-
-out_free:
- kfree(query_ctx);
- return err;
-}
-
int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
{
u16 *old_entry, *vhca_map_entry, vhca_id;
int err;
- err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id);
if (err) {
esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
vport_num, err);
@@ -4213,7 +4189,7 @@ void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
u16 *vhca_map_entry, vhca_id;
int err;
- err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id);
if (err)
esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
vport_num, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index d91ea53eb394..01c5f5990f9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -6,6 +6,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/events.h"
+#include "hwmon.h"
struct mlx5_event_nb {
struct mlx5_nb nb;
@@ -153,21 +154,50 @@ static int any_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
+#if IS_ENABLED(CONFIG_HWMON)
+static void print_sensor_names_in_bit_set(struct mlx5_core_dev *dev, struct mlx5_hwmon *hwmon,
+ u64 bit_set, int bit_set_offset)
+{
+ unsigned long *bit_set_ptr = (unsigned long *)&bit_set;
+ int num_bits = sizeof(bit_set) * BITS_PER_BYTE;
+ int i;
+
+ for_each_set_bit(i, bit_set_ptr, num_bits) {
+ const char *sensor_name = hwmon_get_sensor_name(hwmon, i + bit_set_offset);
+
+ mlx5_core_warn(dev, "Sensor name[%d]: %s\n", i + bit_set_offset, sensor_name);
+ }
+}
+#endif /* CONFIG_HWMON */
+
/* type == MLX5_EVENT_TYPE_TEMP_WARN_EVENT */
static int temp_warn(struct notifier_block *nb, unsigned long type, void *data)
{
struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_core_dev *dev = events->dev;
struct mlx5_eqe *eqe = data;
u64 value_lsb;
u64 value_msb;
value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
+ /* bit 1-63 are not supported for NICs,
+ * hence read only bit 0 (asic) from lsb.
+ */
+ value_lsb &= 0x1;
value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
- mlx5_core_warn(events->dev,
- "High temperature on sensors with bit set %llx %llx",
- value_msb, value_lsb);
+ if (net_ratelimit()) {
+ mlx5_core_warn(dev, "High temperature on sensors with bit set %#llx %#llx.\n",
+ value_msb, value_lsb);
+#if IS_ENABLED(CONFIG_HWMON)
+ if (dev->hwmon) {
+ print_sensor_names_in_bit_set(dev, dev->hwmon, value_lsb, 0);
+ print_sensor_names_in_bit_set(dev, dev->hwmon, value_msb,
+ sizeof(value_lsb) * BITS_PER_BYTE);
+ }
+#endif
+ }
return NOTIFY_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index ae20c061e0fb..a47c29571f64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -1142,6 +1142,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_RDMA_RX:
case FS_FT_RDMA_TX:
case FS_FT_PORT_SEL:
+ case FS_FT_RDMA_TRANSPORT_RX:
+ case FS_FT_RDMA_TRANSPORT_TX:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 22dc23d991d2..6163bc98d94a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1456,7 +1456,7 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table *ft;
int autogroups_max_fte;
- ft = mlx5_create_flow_table(ns, ft_attr);
+ ft = mlx5_create_vport_flow_table(ns, ft_attr, ft_attr->vport);
if (IS_ERR(ft))
return ft;
@@ -2764,9 +2764,9 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_flow_namespace);
-struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type type,
- int vport)
+struct mlx5_flow_namespace *
+mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type, int vport_idx)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
@@ -2775,25 +2775,43 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d
switch (type) {
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (vport >= steering->esw_egress_acl_vports)
+ if (vport_idx >= steering->esw_egress_acl_vports)
return NULL;
if (steering->esw_egress_root_ns &&
- steering->esw_egress_root_ns[vport])
- return &steering->esw_egress_root_ns[vport]->ns;
+ steering->esw_egress_root_ns[vport_idx])
+ return &steering->esw_egress_root_ns[vport_idx]->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (vport >= steering->esw_ingress_acl_vports)
+ if (vport_idx >= steering->esw_ingress_acl_vports)
return NULL;
if (steering->esw_ingress_root_ns &&
- steering->esw_ingress_root_ns[vport])
- return &steering->esw_ingress_root_ns[vport]->ns;
+ steering->esw_ingress_root_ns[vport_idx])
+ return &steering->esw_ingress_root_ns[vport_idx]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
+ if (vport_idx >= steering->rdma_transport_rx_vports)
+ return NULL;
+ if (steering->rdma_transport_rx_root_ns &&
+ steering->rdma_transport_rx_root_ns[vport_idx])
+ return &steering->rdma_transport_rx_root_ns[vport_idx]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
+ if (vport_idx >= steering->rdma_transport_tx_vports)
+ return NULL;
+
+ if (steering->rdma_transport_tx_root_ns &&
+ steering->rdma_transport_tx_root_ns[vport_idx])
+ return &steering->rdma_transport_tx_root_ns[vport_idx]->ns;
else
return NULL;
default:
return NULL;
}
}
+EXPORT_SYMBOL(mlx5_get_flow_vport_namespace);
static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
unsigned int prio,
@@ -3199,6 +3217,127 @@ out_err:
return err;
}
+static int
+init_rdma_transport_rx_root_ns_one(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ struct fs_prio *prio;
+
+ steering->rdma_transport_rx_root_ns[vport_idx] =
+ create_root_ns(steering, FS_FT_RDMA_TRANSPORT_RX);
+ if (!steering->rdma_transport_rx_root_ns[vport_idx])
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&steering->rdma_transport_rx_root_ns[vport_idx]->ns,
+ MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
+static int
+init_rdma_transport_tx_root_ns_one(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ struct fs_prio *prio;
+
+ steering->rdma_transport_tx_root_ns[vport_idx] =
+ create_root_ns(steering, FS_FT_RDMA_TRANSPORT_TX);
+ if (!steering->rdma_transport_tx_root_ns[vport_idx])
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&steering->rdma_transport_tx_root_ns[vport_idx]->ns,
+ MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
+static int init_rdma_transport_rx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_core_dev *dev = steering->dev;
+ int total_vports;
+ int err;
+ int i;
+
+ /* In case eswitch not supported and working in legacy mode */
+ total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1;
+
+ steering->rdma_transport_rx_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->rdma_transport_rx_root_ns),
+ GFP_KERNEL);
+ if (!steering->rdma_transport_rx_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_rdma_transport_rx_root_ns_one(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->rdma_transport_rx_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ while (i--)
+ cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]);
+ kfree(steering->rdma_transport_rx_root_ns);
+ steering->rdma_transport_rx_root_ns = NULL;
+ return err;
+}
+
+static int init_rdma_transport_tx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_core_dev *dev = steering->dev;
+ int total_vports;
+ int err;
+ int i;
+
+ /* In case eswitch not supported and working in legacy mode */
+ total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1;
+
+ steering->rdma_transport_tx_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->rdma_transport_tx_root_ns),
+ GFP_KERNEL);
+ if (!steering->rdma_transport_tx_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_rdma_transport_tx_root_ns_one(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->rdma_transport_tx_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ while (i--)
+ cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]);
+ kfree(steering->rdma_transport_tx_root_ns);
+ steering->rdma_transport_tx_root_ns = NULL;
+ return err;
+}
+
+static void cleanup_rdma_transport_roots_ns(struct mlx5_flow_steering *steering)
+{
+ int i;
+
+ if (steering->rdma_transport_rx_root_ns) {
+ for (i = 0; i < steering->rdma_transport_rx_vports; i++)
+ cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]);
+
+ kfree(steering->rdma_transport_rx_root_ns);
+ steering->rdma_transport_rx_root_ns = NULL;
+ }
+
+ if (steering->rdma_transport_tx_root_ns) {
+ for (i = 0; i < steering->rdma_transport_tx_vports; i++)
+ cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]);
+
+ kfree(steering->rdma_transport_tx_root_ns);
+ steering->rdma_transport_tx_root_ns = NULL;
+ }
+}
+
/* FT and tc chains are stored in the same array so we can re-use the
* mlx5_get_fdb_sub_ns() and tc api for FT chains.
* When creating a new ns for each chain store it in the first available slot.
@@ -3631,6 +3770,7 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
+ cleanup_rdma_transport_roots_ns(steering);
devl_params_unregister(priv_to_devlink(dev), mlx5_fs_params,
ARRAY_SIZE(mlx5_fs_params));
@@ -3700,6 +3840,18 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(dev, ft_support)) {
+ err = init_rdma_transport_rx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(dev, ft_support)) {
+ err = init_rdma_transport_tx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
return 0;
err:
@@ -3850,8 +4002,10 @@ mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type
struct mlx5_flow_namespace *ns;
if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
- ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
- ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
+ ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX)
+ ns = mlx5_get_flow_vport_namespace(dev, ns_type, 0);
else
ns = mlx5_get_flow_namespace(dev, ns_type);
if (!ns)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 20837e526679..0767239f651c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -115,7 +115,9 @@ enum fs_flow_table_type {
FS_FT_PORT_SEL = 0X9,
FS_FT_FDB_RX = 0xa,
FS_FT_FDB_TX = 0xb,
- FS_FT_MAX_TYPE = FS_FT_FDB_TX,
+ FS_FT_RDMA_TRANSPORT_RX = 0xd,
+ FS_FT_RDMA_TRANSPORT_TX = 0xe,
+ FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX,
};
enum fs_flow_table_op_mod {
@@ -158,6 +160,10 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *port_sel_root_ns;
int esw_egress_acl_vports;
int esw_ingress_acl_vports;
+ struct mlx5_flow_root_namespace **rdma_transport_rx_root_ns;
+ struct mlx5_flow_root_namespace **rdma_transport_tx_root_ns;
+ int rdma_transport_rx_vports;
+ int rdma_transport_tx_vports;
};
struct fs_node {
@@ -341,16 +347,10 @@ struct mlx5_fc {
u64 lastbytes;
};
-struct mlx5_fc_bulk_hws_data {
- struct mlx5hws_action *hws_action;
- struct mutex lock; /* protects hws_action */
- refcount_t hws_action_refcount;
-};
-
struct mlx5_fc_bulk {
struct mlx5_fs_bulk fs_bulk;
u32 base_id;
- struct mlx5_fc_bulk_hws_data hws_data;
+ struct mlx5_fs_hws_data hws_data;
struct mlx5_fc fcs[];
};
@@ -434,7 +434,9 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
(type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
(type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_FDB_TX != FS_FT_MAX_TYPE))\
+ (type == FS_FT_RDMA_TRANSPORT_RX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(mdev, cap) : \
+ (type == FS_FT_RDMA_TRANSPORT_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TRANSPORT_TX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
index c14590acc772..f6abfd00d7e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
@@ -50,10 +50,12 @@ mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type tab
int i, found_i = -1;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
- if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size &&
+ if (dev->priv.ft_pool->ft_left[i] &&
+ (FT_POOLS[i] >= desired_size ||
+ desired_size == MLX5_FS_MAX_POOL_SIZE) &&
FT_POOLS[i] <= max_ft_size) {
found_i = i;
- if (desired_size != POOL_NEXT_SIZE)
+ if (desired_size != MLX5_FS_MAX_POOL_SIZE)
break;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
index 25f4274b372b..173e312db720 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
@@ -7,8 +7,6 @@
#include <linux/mlx5/driver.h>
#include "fs_core.h"
-#define POOL_NEXT_SIZE 0
-
int mlx5_ft_pool_init(struct mlx5_core_dev *dev);
void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index b253d1673398..57476487e31f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -287,6 +287,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, adv_rdma)) {
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ADV_RDMA,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 566710d34a7b..6830a49fe682 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -345,15 +345,12 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
-static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
+static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev,
+ struct pci_dev *bridge)
{
- struct pci_dev *bridge = dev->pdev->bus->self;
u16 reg16;
int err;
- if (!bridge)
- return -EOPNOTSUPP;
-
err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &reg16);
if (err)
return err;
@@ -416,9 +413,15 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
u8 reset_method)
{
+ struct pci_dev *bridge = dev->pdev->bus->self;
u16 dev_id;
int err;
+ if (!bridge) {
+ mlx5_core_warn(dev, "PCI bus bridge is not accessible\n");
+ return false;
+ }
+
if (!MLX5_CAP_GEN(dev, fast_teardown)) {
mlx5_core_warn(dev, "fast teardown is not supported by firmware\n");
return false;
@@ -426,7 +429,7 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
- err = mlx5_check_hotplug_interrupt(dev);
+ err = mlx5_check_hotplug_interrupt(dev, bridge);
if (err)
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index a6329ca2d9bf..91613d5a36cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -96,6 +96,11 @@ static int mlx5_health_get_rfr(u8 rfr_severity)
return rfr_severity >> MLX5_RFR_BIT_OFFSET;
}
+static int mlx5_health_get_crr(u8 rfr_severity)
+{
+ return (rfr_severity >> MLX5_CRR_BIT_OFFSET) & 0x01;
+}
+
static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -375,6 +380,8 @@ static const char *hsynd_str(u8 synd)
return "High temperature";
case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR:
return "ICM fetch PCI data poisoned error";
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_TRUST_LOCKDOWN_ERR:
+ return "Trust lockdown error";
default:
return "unrecognized error";
}
@@ -442,12 +449,15 @@ static void print_health_info(struct mlx5_core_dev *dev)
mlx5_log(dev, severity, "time %u\n", ioread32be(&h->time));
mlx5_log(dev, severity, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
mlx5_log(dev, severity, "rfr %d\n", mlx5_health_get_rfr(rfr_severity));
+ mlx5_log(dev, severity, "crr %d\n", mlx5_health_get_crr(rfr_severity));
mlx5_log(dev, severity, "severity %d (%s)\n", severity, mlx5_loglevel_str(severity));
mlx5_log(dev, severity, "irisc_index %d\n", ioread8(&h->irisc_index));
mlx5_log(dev, severity, "synd 0x%x: %s\n", ioread8(&h->synd),
hsynd_str(ioread8(&h->synd)));
mlx5_log(dev, severity, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
mlx5_log(dev, severity, "raw fw_ver 0x%08x\n", ioread32be(&h->fw_ver));
+ if (mlx5_health_get_crr(rfr_severity))
+ mlx5_core_warn(dev, "Cold reset is required\n");
}
static int
@@ -799,14 +809,17 @@ static void poll_health(struct timer_list *t)
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
mlx5_core_err(dev, "device's health compromised - reached miss count\n");
+ health->synd = ioread8(&h->synd);
print_health_info(dev);
queue_work(health->wq, &health->report_work);
}
prev_synd = health->synd;
health->synd = ioread8(&h->synd);
- if (health->synd && health->synd != prev_synd)
+ if (health->synd && health->synd != prev_synd) {
+ print_health_info(dev);
queue_work(health->wq, &health->report_work);
+ }
out:
mod_timer(&health->timer, get_next_poll_jiffies(dev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
index 353f81dccd1c..4ba2636d7fb6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
@@ -416,3 +416,8 @@ void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev)
mlx5_hwmon_free(hwmon);
mdev->hwmon = NULL;
}
+
+const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel)
+{
+ return hwmon->temp_channel_desc[channel].sensor_name;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
index 999654a9b9da..f38271c22c10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
@@ -10,6 +10,7 @@
int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev);
void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev);
+const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel);
#else
static inline int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 1477db7f5307..2691d88cdee1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -175,7 +175,7 @@ unlock:
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ struct mlx5_irq_pool *pool = mlx5_irq_get_pool(irq);
int cpu;
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index cea5aa314f6c..d058cbb4a00c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -523,8 +523,7 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
ndev = ldev->pf[last_idx].netdev;
}
- if (ndev)
- dev_hold(ndev);
+ dev_hold(ndev);
unlock:
spin_unlock_irqrestore(&lag_lock, flags);
@@ -584,8 +583,9 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
}
}
-static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
- unsigned long *flags)
+static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
+ enum mlx5_lag_mode mode,
+ unsigned long *flags)
{
int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev0;
@@ -593,7 +593,12 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
if (first_idx < 0)
return -EINVAL;
+ if (mode == MLX5_LAG_MODE_MPESW ||
+ mode == MLX5_LAG_MODE_MULTIPATH)
+ return 0;
+
dev0 = ldev->pf[first_idx].dev;
+
if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
if (ldev->ports > 2)
return -EINVAL;
@@ -608,32 +613,10 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
return 0;
}
-static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
- struct lag_tracker *tracker,
- enum mlx5_lag_mode mode,
- unsigned long *flags)
-{
- int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
- struct lag_func *dev0;
-
- if (first_idx < 0 || mode == MLX5_LAG_MODE_MPESW)
- return;
-
- dev0 = &ldev->pf[first_idx];
- if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
- tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
- if (ldev->ports > 2)
- ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
- set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
- }
-}
-
static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
struct lag_tracker *tracker, bool shared_fdb,
unsigned long *flags)
{
- bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
-
*flags = 0;
if (shared_fdb) {
set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
@@ -643,11 +626,7 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
if (mode == MLX5_LAG_MODE_MPESW)
set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
- if (roce_lag)
- return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
-
- mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
- return 0;
+ return mlx5_lag_set_port_sel_mode(ldev, mode, flags);
}
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
@@ -951,7 +930,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
-static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev)
{
int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev;
@@ -1038,7 +1017,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
}
if (do_bond && !__mlx5_lag_is_active(ldev)) {
- bool shared_fdb = mlx5_shared_fdb_supported(ldev);
+ bool shared_fdb = mlx5_lag_shared_fdb_supported(ldev);
roce_lag = mlx5_lag_is_roce_lag(ldev);
@@ -1052,6 +1031,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (err) {
if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);
+ if (shared_fdb) {
+ mlx5_ldev_for_each(i, 0, ldev)
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ }
return;
} else if (roce_lag) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 01cf72366947..c2f256bb2bc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -92,6 +92,7 @@ mlx5_lag_is_ready(struct mlx5_lag *ldev)
return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
}
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev);
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev);
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index ffac0bd6c895..aad52d3a90e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -65,7 +65,6 @@ err_metadata:
return err;
}
-#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
@@ -77,13 +76,11 @@ static int enable_mpesw(struct mlx5_lag *ldev)
return -EINVAL;
dev0 = ldev->pf[idx].dev;
- if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
- return -EOPNOTSUPP;
-
if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
!MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
- !mlx5_lag_check_prereq(ldev))
+ !mlx5_lag_check_prereq(ldev) ||
+ !mlx5_lag_shared_fdb_supported(ldev))
return -EOPNOTSUPP;
err = mlx5_mpesw_metadata_set(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index bde79cac33a9..d832a12ffec0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -97,7 +97,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
mlx5_del_flow_rules(lag_definer->rules[idx]);
}
j = ldev->buckets;
- };
+ }
goto destroy_fg;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index d61a1a9297c9..65a94e46edcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -43,6 +43,8 @@
#include <linux/cpufeature.h>
#endif /* CONFIG_X86 */
+#define MLX5_RT_CLOCK_IDENTITY_SIZE MLX5_FLD_SZ_BYTES(mrtcq_reg, rt_clock_identity)
+
enum {
MLX5_PIN_MODE_IN = 0x0,
MLX5_PIN_MODE_OUT = 0x1,
@@ -77,6 +79,56 @@ enum {
MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
};
+struct mlx5_clock_dev_state {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_devcom_comp_dev *compdev;
+ struct mlx5_nb pps_nb;
+ struct work_struct out_work;
+};
+
+struct mlx5_clock_priv {
+ struct mlx5_clock clock;
+ struct mlx5_core_dev *mdev;
+ struct mutex lock; /* protect mdev and used in PTP callbacks */
+ struct mlx5_core_dev *event_mdev;
+};
+
+static struct mlx5_clock_priv *clock_priv(struct mlx5_clock *clock)
+{
+ return container_of(clock, struct mlx5_clock_priv, clock);
+}
+
+static void mlx5_clock_lockdep_assert(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ lockdep_assert(lockdep_is_held(&clock_priv(clock)->lock));
+}
+
+static struct mlx5_core_dev *mlx5_clock_mdev_get(struct mlx5_clock *clock)
+{
+ mlx5_clock_lockdep_assert(clock);
+
+ return clock_priv(clock)->mdev;
+}
+
+static void mlx5_clock_lock(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ mutex_lock(&clock_priv(clock)->lock);
+}
+
+static void mlx5_clock_unlock(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ mutex_unlock(&clock_priv(clock)->lock);
+}
+
static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
{
return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
@@ -94,6 +146,22 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
}
+static int mlx5_clock_identity_get(struct mlx5_core_dev *mdev,
+ u8 identify[MLX5_RT_CLOCK_IDENTITY_SIZE])
+{
+ u32 out[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in),
+ out, sizeof(out), MLX5_REG_MRTCQ, 0, 0);
+ if (!err)
+ memcpy(identify, MLX5_ADDR_OF(mrtcq_reg, out, rt_clock_identity),
+ MLX5_RT_CLOCK_IDENTITY_SIZE);
+
+ return err;
+}
+
static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
{
/* Optimal shift constant leads to corrections above just 1 scaled ppm.
@@ -119,21 +187,30 @@ static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
}
+static s32 mlx5_clock_getmaxphase(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+}
+
static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
+ s32 ret;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ ret = mlx5_clock_getmaxphase(mdev);
+ mlx5_clock_unlock(clock);
- return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
- MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
- MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+ return ret;
}
static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
{
- s64 max = mlx5_ptp_getmaxphase(&mdev->clock.ptp_info);
+ s64 max = mlx5_clock_getmaxphase(mdev);
if (delta < -max || delta > max)
return false;
@@ -209,7 +286,7 @@ static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
if (real_time_mode)
*device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
else
- *device_time = mlx5_timecounter_cyc2time(&mdev->clock, device);
+ *device_time = mlx5_timecounter_cyc2time(mdev->clock, device);
return 0;
}
@@ -220,16 +297,23 @@ static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct system_time_snapshot history_begin = {0};
struct mlx5_core_dev *mdev;
+ int err;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
- if (!mlx5_is_ptm_source_time_available(mdev))
- return -EBUSY;
+ if (!mlx5_is_ptm_source_time_available(mdev)) {
+ err = -EBUSY;
+ goto unlock;
+ }
ktime_get_snapshot(&history_begin);
- return get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
- &history_begin, cts);
+ err = get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
+ &history_begin, cts);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
#endif /* CONFIG_X86 */
@@ -263,8 +347,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
{
struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_core_dev *mdev = mlx5_clock_mdev_get(clock);
return mlx5_read_time(mdev, NULL, false) & cc->mask;
}
@@ -272,7 +355,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer;
u32 sign;
@@ -295,12 +378,10 @@ static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
static void mlx5_pps_out(struct work_struct *work)
{
- struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
- out_work);
- struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
- pps_info);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_clock_dev_state *clock_state = container_of(work, struct mlx5_clock_dev_state,
+ out_work);
+ struct mlx5_core_dev *mdev = clock_state->mdev;
+ struct mlx5_clock *clock = mdev->clock;
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
unsigned long flags;
int i;
@@ -330,7 +411,8 @@ static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
unsigned long flags;
clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
timer = &clock->timer;
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
@@ -342,6 +424,7 @@ static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
write_sequnlock_irqrestore(&clock->lock, flags);
out:
+ mlx5_clock_unlock(clock);
return timer->overflow_period;
}
@@ -361,15 +444,12 @@ static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
-static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+static int mlx5_clock_settime(struct mlx5_core_dev *mdev, struct mlx5_clock *clock,
+ const struct timespec64 *ts)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_timer *timer = &clock->timer;
- struct mlx5_core_dev *mdev;
unsigned long flags;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
-
if (mlx5_modify_mtutc_allowed(mdev)) {
int err = mlx5_ptp_settime_real_time(mdev, ts);
@@ -385,6 +465,20 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64
return 0;
}
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ err = mlx5_clock_settime(mdev, clock, ts);
+ mlx5_clock_unlock(clock);
+
+ return err;
+}
+
static
struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
struct ptp_system_timestamp *sts)
@@ -404,7 +498,8 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct mlx5_core_dev *mdev;
u64 cycles, ns;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_real_time_mode(mdev)) {
*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
goto out;
@@ -414,6 +509,7 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
ns = mlx5_timecounter_cyc2time(clock, cycles);
*ts = ns_to_timespec64(ns);
out:
+ mlx5_clock_unlock(clock);
return 0;
}
@@ -444,14 +540,16 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err = 0;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_modify_mtutc_allowed(mdev)) {
- int err = mlx5_ptp_adjtime_real_time(mdev, delta);
+ err = mlx5_ptp_adjtime_real_time(mdev, delta);
if (err)
- return err;
+ goto unlock;
}
write_seqlock_irqsave(&clock->lock, flags);
@@ -459,17 +557,23 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
- return 0;
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
+ int err;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ err = mlx5_ptp_adjtime_real_time(mdev, delta);
+ mlx5_clock_unlock(clock);
- return mlx5_ptp_adjtime_real_time(mdev, delta);
+ return err;
}
static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
@@ -498,15 +602,17 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err = 0;
u32 mult;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_modify_mtutc_allowed(mdev)) {
- int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
+ err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
if (err)
- return err;
+ goto unlock;
}
mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
@@ -518,7 +624,9 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
write_sequnlock_irqrestore(&clock->lock, flags);
ptp_schedule_worker(clock->ptp, timer->overflow_period);
- return 0;
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_extts_configure(struct ptp_clock_info *ptp,
@@ -527,18 +635,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
{
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_core_dev *mdev =
- container_of(clock, struct mlx5_core_dev, clock);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ struct mlx5_core_dev *mdev;
u32 field_select = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
int err = 0;
- if (!MLX5_PPS_CAP(mdev))
- return -EOPNOTSUPP;
-
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
@@ -569,6 +673,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
field_select = MLX5_MTPPS_FS_ENABLE;
}
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ if (!MLX5_PPS_CAP(mdev)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, pattern);
@@ -577,15 +689,23 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
- return err;
+ goto unlock;
+
+ err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
+ if (err)
+ goto unlock;
+
+ clock->pps_info.pin_armed[pin] = on;
+ clock_priv(clock)->event_mdev = mdev;
- return mlx5_set_mtppse(mdev, pin, 0,
- MLX5_EVENT_MODE_REPETETIVE & on);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
u64 cycles_now, cycles_delta;
u64 nsec_now, nsec_delta;
struct mlx5_timer *timer;
@@ -644,7 +764,7 @@ static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
struct ptp_clock_request *rq,
u32 *out_pulse_duration_ns)
{
- struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ struct mlx5_pps *pps_info = &mdev->clock->pps_info;
u32 out_pulse_duration;
struct timespec64 ts;
@@ -677,7 +797,7 @@ static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clo
u32 *field_select, u32 *out_pulse_duration_ns,
u64 *period, u64 *time_stamp)
{
- struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ struct mlx5_pps *pps_info = &mdev->clock->pps_info;
struct ptp_clock_time *time = &rq->perout.start;
struct timespec64 ts;
@@ -712,26 +832,18 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
{
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_core_dev *mdev =
- container_of(clock, struct mlx5_core_dev, clock);
- bool rt_mode = mlx5_real_time_mode(mdev);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u32 out_pulse_duration_ns = 0;
+ struct mlx5_core_dev *mdev;
u32 field_select = 0;
u64 npps_period = 0;
u64 time_stamp = 0;
u8 pin_mode = 0;
u8 pattern = 0;
+ bool rt_mode;
int pin = -1;
int err = 0;
- if (!MLX5_PPS_CAP(mdev))
- return -EOPNOTSUPP;
-
- /* Reject requests with unsupported flags */
- if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
- return -EOPNOTSUPP;
-
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
@@ -740,14 +852,29 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (pin < 0)
return -EBUSY;
- if (on) {
- bool rt_mode = mlx5_real_time_mode(mdev);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ rt_mode = mlx5_real_time_mode(mdev);
+
+ if (!MLX5_PPS_CAP(mdev)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ /* Reject requests with unsupported flags */
+ if (mlx5_perout_verify_flags(mdev, rq->perout.flags)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+ if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
- if (rt_mode && rq->perout.start.sec > U32_MAX)
- return -EINVAL;
+ if (rt_mode && rq->perout.start.sec > U32_MAX) {
+ err = -EINVAL;
+ goto unlock;
+ }
field_select |= MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
@@ -760,7 +887,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
else
err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
if (err)
- return err;
+ goto unlock;
}
MLX5_SET(mtpps_reg, in, pin, pin);
@@ -773,13 +900,16 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
- return err;
+ goto unlock;
if (rt_mode)
- return 0;
+ goto unlock;
+
+ err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
- return mlx5_set_mtppse(mdev, pin, 0,
- MLX5_EVENT_MODE_REPETETIVE & on);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_pps_configure(struct ptp_clock_info *ptp,
@@ -866,10 +996,8 @@ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
mtpps_size, MLX5_REG_MTPPS, 0, 0);
}
-static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
+static int mlx5_get_pps_pin_mode(struct mlx5_core_dev *mdev, u8 pin)
{
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
-
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
u8 mode;
int err;
@@ -888,8 +1016,9 @@ static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
return PTP_PF_NONE;
}
-static void mlx5_init_pin_config(struct mlx5_clock *clock)
+static void mlx5_init_pin_config(struct mlx5_core_dev *mdev)
{
+ struct mlx5_clock *clock = mdev->clock;
int i;
if (!clock->ptp_info.n_pins)
@@ -910,15 +1039,15 @@ static void mlx5_init_pin_config(struct mlx5_clock *clock)
sizeof(clock->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
clock->ptp_info.pin_config[i].index = i;
- clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
+ clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(mdev, i);
clock->ptp_info.pin_config[i].chan = 0;
}
}
static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ struct mlx5_clock *clock = mdev->clock;
mlx5_query_mtpps(mdev, out, sizeof(out));
@@ -968,16 +1097,16 @@ static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data)
{
- struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
+ struct mlx5_clock_dev_state *clock_state = mlx5_nb_cof(nb, struct mlx5_clock_dev_state,
+ pps_nb);
+ struct mlx5_core_dev *mdev = clock_state->mdev;
+ struct mlx5_clock *clock = mdev->clock;
struct ptp_clock_event ptp_event;
struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
- struct mlx5_core_dev *mdev;
unsigned long flags;
u64 ns;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
-
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
@@ -997,11 +1126,15 @@ static int mlx5_pps_event(struct notifier_block *nb,
ptp_clock_event(clock->ptp, &ptp_event);
break;
case PTP_PF_PEROUT:
+ if (clock->shared) {
+ mlx5_core_warn(mdev, " Received unexpected PPS out event\n");
+ break;
+ }
ns = perout_conf_next_event_timer(mdev, clock);
write_seqlock_irqsave(&clock->lock, flags);
clock->pps_info.start[pin] = ns;
write_sequnlock_irqrestore(&clock->lock, flags);
- schedule_work(&clock->pps_info.out_work);
+ schedule_work(&clock_state->out_work);
break;
default:
mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
@@ -1013,7 +1146,7 @@ static int mlx5_pps_event(struct notifier_block *nb,
static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer = &clock->timer;
u32 dev_freq;
@@ -1029,10 +1162,10 @@ static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
ktime_to_ns(ktime_get_real()));
}
-static void mlx5_init_overflow_period(struct mlx5_clock *clock)
+static void mlx5_init_overflow_period(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer = &clock->timer;
u64 overflow_cycles;
u64 frac = 0;
@@ -1065,7 +1198,7 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_ib_clock_info *info;
struct mlx5_timer *timer;
@@ -1088,7 +1221,7 @@ static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
u8 log_max_freq_adjustment = 0;
@@ -1107,7 +1240,7 @@ static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
/* Configure the PHC */
clock->ptp_info = mlx5_ptp_clock_info;
@@ -1123,38 +1256,30 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
- mlx5_init_overflow_period(clock);
+ mlx5_init_overflow_period(mdev);
if (mlx5_real_time_mode(mdev)) {
struct timespec64 ts;
ktime_get_real_ts64(&ts);
- mlx5_ptp_settime(&clock->ptp_info, &ts);
+ mlx5_clock_settime(mdev, clock, &ts);
}
}
static void mlx5_init_pps(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
-
if (!MLX5_PPS_CAP(mdev))
return;
mlx5_get_pps_caps(mdev);
- mlx5_init_pin_config(clock);
+ mlx5_init_pin_config(mdev);
}
-void mlx5_init_clock(struct mlx5_core_dev *mdev)
+static void mlx5_init_clock_dev(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
-
- if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
- mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
- return;
- }
+ struct mlx5_clock *clock = mdev->clock;
seqlock_init(&clock->lock);
- INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
/* Initialize the device clock */
mlx5_init_timer_clock(mdev);
@@ -1163,35 +1288,27 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
mlx5_init_pps(mdev);
clock->ptp = ptp_clock_register(&clock->ptp_info,
- &mdev->pdev->dev);
+ clock->shared ? NULL : &mdev->pdev->dev);
if (IS_ERR(clock->ptp)) {
- mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
+ mlx5_core_warn(mdev, "%sptp_clock_register failed %ld\n",
+ clock->shared ? "shared clock " : "",
PTR_ERR(clock->ptp));
clock->ptp = NULL;
}
- MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
- mlx5_eq_notifier_register(mdev, &clock->pps_nb);
-
if (clock->ptp)
ptp_schedule_worker(clock->ptp, 0);
}
-void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+static void mlx5_destroy_clock_dev(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
- if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
- return;
-
- mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
if (clock->ptp) {
ptp_clock_unregister(clock->ptp);
clock->ptp = NULL;
}
- cancel_work_sync(&clock->pps_info.out_work);
-
if (mdev->clock_info) {
free_page((unsigned long)mdev->clock_info);
mdev->clock_info = NULL;
@@ -1199,3 +1316,248 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
kfree(clock->ptp_info.pin_config);
}
+
+static void mlx5_clock_free(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock_priv *cpriv = clock_priv(mdev->clock);
+
+ mlx5_destroy_clock_dev(mdev);
+ mutex_destroy(&cpriv->lock);
+ kfree(cpriv);
+ mdev->clock = NULL;
+}
+
+static int mlx5_clock_alloc(struct mlx5_core_dev *mdev, bool shared)
+{
+ struct mlx5_clock_priv *cpriv;
+ struct mlx5_clock *clock;
+
+ cpriv = kzalloc(sizeof(*cpriv), GFP_KERNEL);
+ if (!cpriv)
+ return -ENOMEM;
+
+ mutex_init(&cpriv->lock);
+ cpriv->mdev = mdev;
+ clock = &cpriv->clock;
+ clock->shared = shared;
+ mdev->clock = clock;
+ mlx5_clock_lock(clock);
+ mlx5_init_clock_dev(mdev);
+ mlx5_clock_unlock(clock);
+
+ if (!clock->shared)
+ return 0;
+
+ if (!clock->ptp) {
+ mlx5_core_warn(mdev, "failed to create ptp dev shared by multiple functions");
+ mlx5_clock_free(mdev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key)
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_devcom_comp_dev *pos;
+
+ mdev->clock_state->compdev = mlx5_devcom_register_component(mdev->priv.devc,
+ MLX5_DEVCOM_SHARED_CLOCK,
+ key, NULL, mdev);
+ if (IS_ERR(mdev->clock_state->compdev))
+ return;
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ if (next) {
+ mdev->clock = next->clock;
+ /* clock info is shared among all the functions using the same clock */
+ mdev->clock_info = next->clock_info;
+ } else {
+ mlx5_clock_alloc(mdev, true);
+ }
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+ if (!mdev->clock) {
+ mlx5_devcom_unregister_component(mdev->clock_state->compdev);
+ mdev->clock_state->compdev = NULL;
+ }
+}
+
+static void mlx5_shared_clock_unregister(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_devcom_comp_dev *pos;
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock && peer_dev != mdev) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ if (next) {
+ struct mlx5_clock_priv *cpriv = clock_priv(clock);
+
+ mlx5_clock_lock(clock);
+ if (mdev == cpriv->mdev)
+ cpriv->mdev = next;
+ mlx5_clock_unlock(clock);
+ } else {
+ mlx5_clock_free(mdev);
+ }
+
+ mdev->clock = NULL;
+ mdev->clock_info = NULL;
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+ mlx5_devcom_unregister_component(mdev->clock_state->compdev);
+}
+
+static void mlx5_clock_arm_pps_in_event(struct mlx5_clock *clock,
+ struct mlx5_core_dev *new_mdev,
+ struct mlx5_core_dev *old_mdev)
+{
+ struct ptp_clock_info *ptp_info = &clock->ptp_info;
+ struct mlx5_clock_priv *cpriv = clock_priv(clock);
+ int i;
+
+ for (i = 0; i < ptp_info->n_pins; i++) {
+ if (ptp_info->pin_config[i].func != PTP_PF_EXTTS ||
+ !clock->pps_info.pin_armed[i])
+ continue;
+
+ if (new_mdev) {
+ mlx5_set_mtppse(new_mdev, i, 0, MLX5_EVENT_MODE_REPETETIVE);
+ cpriv->event_mdev = new_mdev;
+ } else {
+ cpriv->event_mdev = NULL;
+ }
+
+ if (old_mdev)
+ mlx5_set_mtppse(old_mdev, i, 0, MLX5_EVENT_MODE_DISABLE);
+ }
+}
+
+void mlx5_clock_load(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_clock_priv *cpriv;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ INIT_WORK(&mdev->clock_state->out_work, mlx5_pps_out);
+ MLX5_NB_INIT(&mdev->clock_state->pps_nb, mlx5_pps_event, PPS_EVENT);
+ mlx5_eq_notifier_register(mdev, &mdev->clock_state->pps_nb);
+
+ if (!clock->shared) {
+ mlx5_clock_arm_pps_in_event(clock, mdev, NULL);
+ return;
+ }
+
+ cpriv = clock_priv(clock);
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_clock_lock(clock);
+ if (mdev == cpriv->mdev && mdev != cpriv->event_mdev)
+ mlx5_clock_arm_pps_in_event(clock, mdev, cpriv->event_mdev);
+ mlx5_clock_unlock(clock);
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+}
+
+void mlx5_clock_unload(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_devcom_comp_dev *pos;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ if (!clock->shared) {
+ mlx5_clock_arm_pps_in_event(clock, NULL, mdev);
+ goto out;
+ }
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock && peer_dev != mdev) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ mlx5_clock_lock(clock);
+ if (mdev == clock_priv(clock)->event_mdev)
+ mlx5_clock_arm_pps_in_event(clock, next, mdev);
+ mlx5_clock_unlock(clock);
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+out:
+ mlx5_eq_notifier_unregister(mdev, &mdev->clock_state->pps_nb);
+ cancel_work_sync(&mdev->clock_state->out_work);
+}
+
+static struct mlx5_clock null_clock;
+
+int mlx5_init_clock(struct mlx5_core_dev *mdev)
+{
+ u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE];
+ struct mlx5_clock_dev_state *clock_state;
+ u64 key;
+ int err;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
+ mdev->clock = &null_clock;
+ mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
+ return 0;
+ }
+
+ clock_state = kzalloc(sizeof(*clock_state), GFP_KERNEL);
+ if (!clock_state)
+ return -ENOMEM;
+ clock_state->mdev = mdev;
+ mdev->clock_state = clock_state;
+
+ if (MLX5_CAP_MCAM_REG3(mdev, mrtcq) && mlx5_real_time_mode(mdev)) {
+ if (mlx5_clock_identity_get(mdev, identity)) {
+ mlx5_core_warn(mdev, "failed to get rt clock identity, create ptp dev per function\n");
+ } else {
+ memcpy(&key, &identity, sizeof(key));
+ mlx5_shared_clock_register(mdev, key);
+ }
+ }
+
+ if (!mdev->clock) {
+ err = mlx5_clock_alloc(mdev, false);
+ if (err) {
+ kfree(clock_state);
+ mdev->clock_state = NULL;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ if (mdev->clock->shared)
+ mlx5_shared_clock_unregister(mdev);
+ else
+ mlx5_clock_free(mdev);
+ kfree(mdev->clock_state);
+ mdev->clock_state = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index bd95b9f8d143..c18a652c0faa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -33,6 +33,35 @@
#ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__
+#include <linux/ptp_clock_kernel.h>
+
+#define MAX_PIN_NUM 8
+struct mlx5_pps {
+ u8 pin_caps[MAX_PIN_NUM];
+ u64 start[MAX_PIN_NUM];
+ u8 enabled;
+ u64 min_npps_period;
+ u64 min_out_pulse_duration_ns;
+ bool pin_armed[MAX_PIN_NUM];
+};
+
+struct mlx5_timer {
+ struct cyclecounter cycles;
+ struct timecounter tc;
+ u32 nominal_c_mult;
+ unsigned long overflow_period;
+};
+
+struct mlx5_clock {
+ seqlock_t lock;
+ struct hwtstamp_config hwtstamp_config;
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+ struct mlx5_pps pps_info;
+ struct mlx5_timer timer;
+ bool shared;
+};
+
static inline bool mlx5_is_real_time_rq(struct mlx5_core_dev *mdev)
{
u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format);
@@ -54,12 +83,14 @@ static inline bool mlx5_is_real_time_sq(struct mlx5_core_dev *mdev)
typedef ktime_t (*cqe_ts_to_ns)(struct mlx5_clock *, u64);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
-void mlx5_init_clock(struct mlx5_core_dev *mdev);
+int mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
+void mlx5_clock_load(struct mlx5_core_dev *mdev);
+void mlx5_clock_unload(struct mlx5_core_dev *mdev);
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
- return mdev->clock.ptp ? ptp_clock_index(mdev->clock.ptp) : -1;
+ return mdev->clock->ptp ? ptp_clock_index(mdev->clock->ptp) : -1;
}
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
@@ -87,8 +118,10 @@ static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
return ns_to_ktime(time);
}
#else
-static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
+static inline int mlx5_init_clock(struct mlx5_core_dev *mdev) { return 0; }
static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_clock_load(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_clock_unload(struct mlx5_core_dev *mdev) {}
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
return -1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index d58032dd0df7..c79699b94a02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -11,6 +11,7 @@ enum mlx5_devcom_component {
MLX5_DEVCOM_MPV,
MLX5_DEVCOM_HCA_PORTS,
MLX5_DEVCOM_SD_GROUP,
+ MLX5_DEVCOM_SHARED_CLOCK,
MLX5_DEVCOM_NUM_COMPONENTS,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index a80ecb672f33..0a3c260af377 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -161,7 +161,8 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
+ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ FT_TBL_SZ : MLX5_FS_MAX_POOL_SIZE;
ft_attr.max_fte = sz;
/* We use chains_default_ft(chains) as the table's next_ft till
@@ -196,6 +197,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
}
+ if (!ns) {
+ mlx5_core_warn(chains->dev, "Failed to get flow namespace\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
ft_attr.autogroup.num_reserved_entries = 2;
ft_attr.autogroup.max_num_groups = chains->group_num;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
@@ -699,7 +705,7 @@ mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
goto err_ignore;
}
- chain = mlx5_chains_get_chain_range(chains),
+ chain = mlx5_chains_get_chain_range(chains);
prio = mlx5_chains_get_prio_range(chains);
level = mlx5_chains_get_level_range(chains);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index 9f13cea16446..eb3bd9c7f66e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -61,6 +61,25 @@ static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
}
}
+static const char *mlx5_traffic_types_names[MLX5_NUM_TT] = {
+ [MLX5_TT_IPV4_TCP] = "TT_IPV4_TCP",
+ [MLX5_TT_IPV6_TCP] = "TT_IPV6_TCP",
+ [MLX5_TT_IPV4_UDP] = "TT_IPV4_UDP",
+ [MLX5_TT_IPV6_UDP] = "TT_IPV6_UDP",
+ [MLX5_TT_IPV4_IPSEC_AH] = "TT_IPV4_IPSEC_AH",
+ [MLX5_TT_IPV6_IPSEC_AH] = "TT_IPV6_IPSEC_AH",
+ [MLX5_TT_IPV4_IPSEC_ESP] = "TT_IPV4_IPSEC_ESP",
+ [MLX5_TT_IPV6_IPSEC_ESP] = "TT_IPV6_IPSEC_ESP",
+ [MLX5_TT_IPV4] = "TT_IPV4",
+ [MLX5_TT_IPV6] = "TT_IPV6",
+ [MLX5_TT_ANY] = "TT_ANY"
+};
+
+const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt)
+{
+ return mlx5_traffic_types_names[tt];
+}
+
struct mlx5_etype_proto {
u16 etype;
u8 proto;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index 92eea6bea310..ab9434fe3ae6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -49,6 +49,7 @@ struct ttc_params {
struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
};
+const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt);
struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc);
struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index ec956c4bcebd..41e8660c819c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1038,7 +1038,11 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_init_reserved_gids(dev);
- mlx5_init_clock(dev);
+ err = mlx5_init_clock(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to initialize hardware clock\n");
+ goto err_tables_cleanup;
+ }
dev->vxlan = mlx5_vxlan_create(dev);
dev->geneve = mlx5_geneve_create(dev);
@@ -1046,7 +1050,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
err = mlx5_init_rl_table(dev);
if (err) {
mlx5_core_err(dev, "Failed to init rate limiting\n");
- goto err_tables_cleanup;
+ goto err_clock_cleanup;
}
err = mlx5_mpfs_init(dev);
@@ -1123,10 +1127,11 @@ err_mpfs_cleanup:
mlx5_mpfs_cleanup(dev);
err_rl_cleanup:
mlx5_cleanup_rl_table(dev);
-err_tables_cleanup:
+err_clock_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
+err_tables_cleanup:
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
@@ -1205,24 +1210,24 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
- mlx5_start_health_poll(dev);
-
err = mlx5_core_enable_hca(dev, 0);
if (err) {
mlx5_core_err(dev, "enable hca failed\n");
- goto stop_health_poll;
+ goto err_cmd_cleanup;
}
+ mlx5_start_health_poll(dev);
+
err = mlx5_core_set_issi(dev);
if (err) {
mlx5_core_err(dev, "failed to set issi\n");
- goto err_disable_hca;
+ goto stop_health_poll;
}
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
mlx5_core_err(dev, "failed to allocate boot pages\n");
- goto err_disable_hca;
+ goto stop_health_poll;
}
err = mlx5_tout_query_dtor(dev);
@@ -1235,10 +1240,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
-err_disable_hca:
- mlx5_core_disable_hca(dev, 0);
stop_health_poll:
mlx5_stop_health_poll(dev, boot);
+ mlx5_core_disable_hca(dev, 0);
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_disable(dev);
@@ -1249,8 +1253,8 @@ err_cmd_cleanup:
static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot)
{
mlx5_reclaim_startup_pages(dev);
- mlx5_core_disable_hca(dev, 0);
mlx5_stop_health_poll(dev, boot);
+ mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_disable(dev);
}
@@ -1359,6 +1363,8 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_eq_table;
}
+ mlx5_clock_load(dev);
+
err = mlx5_fw_tracer_init(dev->tracer);
if (err) {
mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
@@ -1442,6 +1448,7 @@ err_fpga_start:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
+ mlx5_clock_unload(dev);
mlx5_eq_table_destroy(dev);
err_eq_table:
mlx5_irq_table_destroy(dev);
@@ -1468,6 +1475,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
+ mlx5_clock_unload(dev);
mlx5_eq_table_destroy(dev);
mlx5_irq_table_destroy(dev);
mlx5_pagealloc_stop(dev);
@@ -1795,6 +1803,7 @@ static const int types[] = {
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
MLX5_CAP_SHAMPO,
+ MLX5_CAP_ADV_RDMA,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 99de67c3aa74..2e02bdea8361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -114,6 +114,26 @@ struct mlx5_cmd_alias_obj_create_attr {
u8 access_key[ACCESS_KEY_LEN];
};
+struct mlx5_port_eth_proto {
+ u32 cap;
+ u32 admin;
+ u32 oper;
+};
+
+struct mlx5_module_eeprom_query_params {
+ u16 size;
+ u16 offset;
+ u16 i2c_address;
+ u32 page;
+ u32 bank;
+ u32 module_number;
+};
+
+struct mlx5_link_info {
+ u32 speed;
+ u32 lanes;
+};
+
static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
{
struct device *device = dev->device;
@@ -280,6 +300,78 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause);
+
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
+ u8 *pfc_en_rx);
+
+int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 stall_critical_watermark,
+ u16 stall_minor_watermark);
+int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 *stall_critical_watermark,
+ u16 *stall_minor_watermark);
+
+int mlx5_max_tc(struct mlx5_core_dev *mdev);
+int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
+int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+ u8 prio, u8 *tc);
+int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *tc_group);
+int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
+int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *bw_pct);
+int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
+int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+
+int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
+int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+ bool *enabled);
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ u16 offset, u16 size, u8 *data);
+int
+mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params,
+ u8 *data);
+
+int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
+int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
+int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
+int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
+int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
+int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
+
+int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
+ struct mlx5_port_eth_proto *eproto);
+bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
+const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
+ u32 eth_proto_oper,
+ bool force_legacy);
+u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
+ struct mlx5_link_info *info,
+ bool force_legacy);
+int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
MLX5_CAP_GEN((mdev), pps_modify) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
@@ -346,6 +438,8 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id);
+
static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index 0881e961d8b1..586688da9940 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -10,12 +10,15 @@
struct mlx5_irq;
struct cpu_rmap;
+struct mlx5_irq_pool;
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev);
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
@@ -38,7 +41,6 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
int mlx5_irq_get_index(struct mlx5_irq *irq);
int mlx5_irq_get_irq(const struct mlx5_irq *irq);
-struct mlx5_irq_pool;
#ifdef CONFIG_MLX5_SF
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
struct cpumask *used_cpus, u16 vecidx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index d9362eabc6a1..2c5f850c31f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -378,6 +378,11 @@ int mlx5_irq_get_index(struct mlx5_irq *irq)
return irq->map.index;
}
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq)
+{
+ return irq->pool;
+}
+
/* irq_pool API */
/* requesting an irq from a given pool according to given index */
@@ -405,18 +410,20 @@ static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_tab
return irq_table->sf_ctrl_pool;
}
-static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
+static struct mlx5_irq_pool *
+sf_comp_irq_pool_get(struct mlx5_irq_table *irq_table)
{
return irq_table->sf_comp_pool;
}
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev)
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = NULL;
if (mlx5_core_is_sf(dev))
- pool = sf_irq_pool_get(irq_table);
+ pool = sf_comp_irq_pool_get(irq_table);
/* In some configs, there won't be a pool of SFs IRQs. Hence, returning
* the PF IRQs pool in case the SF pool doesn't exist.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
index c4d377f8df30..cc064425fe16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
@@ -28,7 +28,6 @@ struct mlx5_irq_pool {
struct mlx5_core_dev *dev;
};
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev);
static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
{
return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
@@ -40,5 +39,6 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
int mlx5_irq_get_locked(struct mlx5_irq *irq);
int mlx5_irq_read_locked(struct mlx5_irq *irq);
int mlx5_irq_put(struct mlx5_irq *irq);
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq);
#endif /* __PCI_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 50931584132b..549f1066d2a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -196,7 +196,6 @@ void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
if (ps == MLX5_PORT_UP)
mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
}
-EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status)
@@ -210,7 +209,6 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status)
@@ -227,7 +225,6 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
*status = MLX5_GET(paos_reg, out, admin_status);
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
u16 *max_mtu, u16 *oper_mtu, u8 port)
@@ -257,7 +254,6 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
u8 port)
@@ -447,7 +443,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
return mlx5_query_mcia(dev, &query, data);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params,
@@ -467,7 +462,6 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
return mlx5_query_mcia(dev, params, data);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom_by_page);
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
@@ -518,7 +512,6 @@ int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
u32 *rx_pause, u32 *tx_pause)
@@ -538,7 +531,6 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
u16 stall_critical_watermark,
@@ -597,7 +589,6 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
{
@@ -616,7 +607,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
int mlx5_max_tc(struct mlx5_core_dev *mdev)
{
@@ -667,7 +657,6 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
u8 prio, u8 *tc)
@@ -689,7 +678,6 @@ int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
int inlen)
@@ -728,7 +716,6 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
u8 tc, u8 *tc_group)
@@ -749,7 +736,6 @@ int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
@@ -763,7 +749,6 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc);
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
u8 tc, u8 *bw_pct)
@@ -784,7 +769,6 @@ int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc);
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
@@ -808,7 +792,6 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit);
int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
@@ -834,7 +817,6 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
{
@@ -845,7 +827,6 @@ int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
return mlx5_cmd_exec_in(mdev, set_wol_rol, in);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
{
@@ -860,7 +841,6 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen)
{
@@ -1058,53 +1038,57 @@ out:
}
/* speed in units of 1Mb */
-static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = {
- [MLX5E_1000BASE_CX_SGMII] = 1000,
- [MLX5E_1000BASE_KX] = 1000,
- [MLX5E_10GBASE_CX4] = 10000,
- [MLX5E_10GBASE_KX4] = 10000,
- [MLX5E_10GBASE_KR] = 10000,
- [MLX5E_20GBASE_KR2] = 20000,
- [MLX5E_40GBASE_CR4] = 40000,
- [MLX5E_40GBASE_KR4] = 40000,
- [MLX5E_56GBASE_R4] = 56000,
- [MLX5E_10GBASE_CR] = 10000,
- [MLX5E_10GBASE_SR] = 10000,
- [MLX5E_10GBASE_ER] = 10000,
- [MLX5E_40GBASE_SR4] = 40000,
- [MLX5E_40GBASE_LR4] = 40000,
- [MLX5E_50GBASE_SR2] = 50000,
- [MLX5E_100GBASE_CR4] = 100000,
- [MLX5E_100GBASE_SR4] = 100000,
- [MLX5E_100GBASE_KR4] = 100000,
- [MLX5E_100GBASE_LR4] = 100000,
- [MLX5E_100BASE_TX] = 100,
- [MLX5E_1000BASE_T] = 1000,
- [MLX5E_10GBASE_T] = 10000,
- [MLX5E_25GBASE_CR] = 25000,
- [MLX5E_25GBASE_KR] = 25000,
- [MLX5E_25GBASE_SR] = 25000,
- [MLX5E_50GBASE_CR2] = 50000,
- [MLX5E_50GBASE_KR2] = 50000,
+static const struct mlx5_link_info mlx5e_link_info[MLX5E_LINK_MODES_NUMBER] = {
+ [MLX5E_1000BASE_CX_SGMII] = {.speed = 1000, .lanes = 1},
+ [MLX5E_1000BASE_KX] = {.speed = 1000, .lanes = 1},
+ [MLX5E_10GBASE_CX4] = {.speed = 10000, .lanes = 4},
+ [MLX5E_10GBASE_KX4] = {.speed = 10000, .lanes = 4},
+ [MLX5E_10GBASE_KR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_20GBASE_KR2] = {.speed = 20000, .lanes = 2},
+ [MLX5E_40GBASE_CR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_40GBASE_KR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_56GBASE_R4] = {.speed = 56000, .lanes = 4},
+ [MLX5E_10GBASE_CR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_10GBASE_SR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_10GBASE_ER] = {.speed = 10000, .lanes = 1},
+ [MLX5E_40GBASE_SR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_40GBASE_LR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_50GBASE_SR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_100GBASE_CR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_SR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_KR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_LR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100BASE_TX] = {.speed = 100, .lanes = 1},
+ [MLX5E_1000BASE_T] = {.speed = 1000, .lanes = 1},
+ [MLX5E_10GBASE_T] = {.speed = 10000, .lanes = 1},
+ [MLX5E_25GBASE_CR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_25GBASE_KR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_25GBASE_SR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_50GBASE_CR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_50GBASE_KR2] = {.speed = 50000, .lanes = 2},
};
-static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
- [MLX5E_SGMII_100M] = 100,
- [MLX5E_1000BASE_X_SGMII] = 1000,
- [MLX5E_5GBASE_R] = 5000,
- [MLX5E_10GBASE_XFI_XAUI_1] = 10000,
- [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000,
- [MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000,
- [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
- [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
- [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
- [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
- [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
- [MLX5E_400GAUI_8_400GBASE_CR8] = 400000,
- [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000,
- [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000,
- [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000,
- [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = 800000,
+static const struct mlx5_link_info
+mlx5e_ext_link_info[MLX5E_EXT_LINK_MODES_NUMBER] = {
+ [MLX5E_SGMII_100M] = {.speed = 100, .lanes = 1},
+ [MLX5E_1000BASE_X_SGMII] = {.speed = 1000, .lanes = 1},
+ [MLX5E_5GBASE_R] = {.speed = 5000, .lanes = 1},
+ [MLX5E_10GBASE_XFI_XAUI_1] = {.speed = 10000, .lanes = 1},
+ [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_25GAUI_1_25GBASE_CR_KR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = {.speed = 50000, .lanes = 1},
+ [MLX5E_CAUI_4_100GBASE_CR4_KR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = {.speed = 100000, .lanes = 2},
+ [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = {.speed = 200000, .lanes = 4},
+ [MLX5E_400GAUI_8_400GBASE_CR8] = {.speed = 400000, .lanes = 8},
+ [MLX5E_100GAUI_1_100GBASE_CR_KR] = {.speed = 100000, .lanes = 1},
+ [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = {.speed = 200000, .lanes = 2},
+ [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = {.speed = 400000, .lanes = 4},
+ [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = {.speed = 800000, .lanes = 8},
+ [MLX5E_200GAUI_1_200GBASE_CR1_KR1] = {.speed = 200000, .lanes = 1},
+ [MLX5E_400GAUI_2_400GBASE_CR2_KR2] = {.speed = 400000, .lanes = 2},
+ [MLX5E_800GAUI_4_800GBASE_CR4_KR4] = {.speed = 800000, .lanes = 4},
};
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
@@ -1142,54 +1126,61 @@ bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev)
return !!eproto.cap;
}
-static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
- const u32 **arr, u32 *size,
- bool force_legacy)
+static void mlx5e_port_get_link_mode_info_arr(struct mlx5_core_dev *mdev,
+ const struct mlx5_link_info **arr,
+ u32 *size,
+ bool force_legacy)
{
bool ext = force_legacy ? false : mlx5_ptys_ext_supported(mdev);
- *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
- ARRAY_SIZE(mlx5e_link_speed);
- *arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed;
+ *size = ext ? ARRAY_SIZE(mlx5e_ext_link_info) :
+ ARRAY_SIZE(mlx5e_link_info);
+ *arr = ext ? mlx5e_ext_link_info : mlx5e_link_info;
}
-u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
- bool force_legacy)
+const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
+ u32 eth_proto_oper,
+ bool force_legacy)
{
unsigned long temp = eth_proto_oper;
- const u32 *table;
- u32 speed = 0;
+ const struct mlx5_link_info *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
+ force_legacy);
i = find_first_bit(&temp, max_size);
if (i < max_size)
- speed = table[i];
- return speed;
+ return &table[i];
+
+ return NULL;
}
-u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
- bool force_legacy)
+u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
+ struct mlx5_link_info *info,
+ bool force_legacy)
{
+ const struct mlx5_link_info *table;
u32 link_modes = 0;
- const u32 *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
+ force_legacy);
for (i = 0; i < max_size; ++i) {
- if (table[i] == speed)
- link_modes |= MLX5E_PROT_MASK(i);
+ if (table[i].speed == info->speed) {
+ if (!info->lanes || table[i].lanes == info->lanes)
+ link_modes |= MLX5E_PROT_MASK(i);
+ }
}
return link_modes;
}
int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
+ const struct mlx5_link_info *table;
struct mlx5_port_eth_proto eproto;
u32 max_speed = 0;
- const u32 *table;
u32 max_size;
bool ext;
int err;
@@ -1200,10 +1191,10 @@ int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
return err;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, false);
for (i = 0; i < max_size; ++i)
if (eproto.cap & MLX5E_PROT_MASK(i))
- max_speed = max(max_speed, table[i]);
+ max_speed = max(max_speed, table[i].speed);
*speed = max_speed;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index b96909fbeb12..0864ba625c07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -285,7 +285,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP;
}
- if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
+ if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index 3dbd4efa21a2..19dce1ba512d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -220,7 +220,7 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
bool drain)
{
unsigned long timeout = jiffies +
- msecs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT * MSEC_PER_SEC);
+ secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT);
struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
bool got_comp = *pending_rules >= burst_th;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index f9f569131dde..47f7ed141553 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -24,8 +24,8 @@ struct mlx5hws_bwc_matcher {
struct mlx5hws_matcher *matcher;
struct mlx5hws_match_template *mt;
struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
+ u32 priority;
u8 num_of_at;
- u16 priority;
u8 size_log;
atomic_t num_of_rules;
struct list_head *rules;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index 487e75476b0a..e8f98c109b99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -130,12 +130,6 @@ int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
}
-void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
- u32 table_id)
-{
- hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id);
-}
-
static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_fg_attr *fg_attr,
u32 *group_id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
index 610c63d81ad9..51d9e0291ac1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -258,9 +258,6 @@ int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
u8 fw_ft_type, u32 table_id);
-void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
- u32 table_id);
-
int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
u32 *rtc_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index 10ece7df1cfa..c8cc0c8115f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -500,7 +500,8 @@ hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
return 0;
err_conflict:
- mlx5hws_err(cd->ctx, "Invalid definer fields combination\n");
+ mlx5hws_err(cd->ctx, "Invalid definer fields combination: match_flags = 0x%08x\n",
+ cd->match_flags);
return -EINVAL;
}
@@ -1985,8 +1986,7 @@ int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
continue;
/* Reuse definer and set LRU (move to be first in the list) */
- list_del_init(&cached_definer->list_node);
- list_add(&cached_definer->list_node, &cache->list_head);
+ list_move(&cached_definer->list_node, &cache->list_head);
cached_definer->refcount++;
return cached_definer->definer.obj_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
index f34bbbbba1c2..1b787cd66e6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -66,6 +66,8 @@ static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
xa_init(&hws_pool->table_dests);
xa_init(&hws_pool->vport_dests);
xa_init(&hws_pool->vport_vhca_dests);
+ xa_init(&hws_pool->aso_meters);
+ xa_init(&hws_pool->sample_dests);
return 0;
cleanup_insert_hdr:
@@ -88,10 +90,17 @@ destroy_tag:
static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
{
struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5_fs_hws_data *fs_hws_data;
struct mlx5hws_action *action;
struct mlx5_fs_pool *pool;
unsigned long i;
+ xa_for_each(&hws_pool->sample_dests, i, fs_hws_data)
+ kfree(fs_hws_data);
+ xa_destroy(&hws_pool->sample_dests);
+ xa_for_each(&hws_pool->aso_meters, i, fs_hws_data)
+ kfree(fs_hws_data);
+ xa_destroy(&hws_pool->aso_meters);
xa_for_each(&hws_pool->vport_vhca_dests, i, action)
mlx5hws_action_destroy(action);
xa_destroy(&hws_pool->vport_vhca_dests);
@@ -459,6 +468,106 @@ mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
flags);
}
+static struct mlx5_fs_hws_data *
+mlx5_fs_get_cached_hws_data(struct xarray *cache_xa, unsigned long index)
+{
+ struct mlx5_fs_hws_data *fs_hws_data;
+ int err;
+
+ xa_lock(cache_xa);
+ fs_hws_data = xa_load(cache_xa, index);
+ if (!fs_hws_data) {
+ fs_hws_data = kzalloc(sizeof(*fs_hws_data), GFP_ATOMIC);
+ if (!fs_hws_data) {
+ xa_unlock(cache_xa);
+ return NULL;
+ }
+ refcount_set(&fs_hws_data->hws_action_refcount, 0);
+ mutex_init(&fs_hws_data->lock);
+ err = __xa_insert(cache_xa, index, fs_hws_data, GFP_ATOMIC);
+ if (err) {
+ kfree(fs_hws_data);
+ xa_unlock(cache_xa);
+ return NULL;
+ }
+ }
+ xa_unlock(cache_xa);
+
+ return fs_hws_data;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_exe_aso *exe_aso)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_data *meter_hws_data;
+ u32 id = exe_aso->base_id;
+ struct xarray *meters_xa;
+
+ meters_xa = &fs_ctx->hws_pool.aso_meters;
+ meter_hws_data = mlx5_fs_get_cached_hws_data(meters_xa, id);
+ if (!meter_hws_data)
+ return NULL;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_ASO_METER;
+ create_ctx.id = id;
+ create_ctx.return_reg_id = exe_aso->return_reg_id;
+
+ return mlx5_fs_get_hws_action(meter_hws_data, &create_ctx);
+}
+
+static void mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_exe_aso *exe_aso)
+{
+ struct mlx5_fs_hws_data *meter_hws_data;
+ struct xarray *meters_xa;
+
+ meters_xa = &fs_ctx->hws_pool.aso_meters;
+ meter_hws_data = xa_load(meters_xa, exe_aso->base_id);
+ if (!meter_hws_data)
+ return;
+ return mlx5_fs_put_hws_action(meter_hws_data);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_data *sampler_hws_data;
+ u32 id = dst->dest_attr.sampler_id;
+ struct xarray *sampler_xa;
+
+ sampler_xa = &fs_ctx->hws_pool.sample_dests;
+ sampler_hws_data = mlx5_fs_get_cached_hws_data(sampler_xa, id);
+ if (!sampler_hws_data)
+ return NULL;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_SAMPLER;
+ create_ctx.id = id;
+
+ return mlx5_fs_get_hws_action(sampler_hws_data, &create_ctx);
+}
+
+static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
+ u32 sampler_id)
+{
+ struct mlx5_fs_hws_data *sampler_hws_data;
+ struct xarray *sampler_xa;
+
+ sampler_xa = &fs_ctx->hws_pool.sample_dests;
+ sampler_hws_data = xa_load(sampler_xa, sampler_id);
+ if (!sampler_hws_data)
+ return;
+
+ mlx5_fs_put_hws_action(sampler_hws_data);
+}
+
static struct mlx5hws_action *
mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
struct mlx5hws_action_dest_attr *dests,
@@ -519,26 +628,101 @@ mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
return mlx5hws_action_create_last(ctx, flags);
}
-static void mlx5_fs_destroy_fs_action(struct mlx5_fs_hws_rule_action *fs_action)
+static struct mlx5hws_action *
+mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx *create_ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ switch (create_ctx->actions_type) {
+ case MLX5HWS_ACTION_TYP_CTR:
+ return mlx5hws_action_create_counter(create_ctx->hws_ctx,
+ create_ctx->id, flags);
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ return mlx5hws_action_create_aso_meter(create_ctx->hws_ctx,
+ create_ctx->id,
+ create_ctx->return_reg_id,
+ flags);
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ return mlx5hws_action_create_flow_sampler(create_ctx->hws_ctx,
+ create_ctx->id, flags);
+ default:
+ return NULL;
+ }
+}
+
+struct mlx5hws_action *
+mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
+ struct mlx5_fs_hws_create_action_ctx *create_ctx)
+{
+ /* try avoid locking if not necessary */
+ if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount))
+ return fs_hws_data->hws_action;
+
+ mutex_lock(&fs_hws_data->lock);
+ if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount)) {
+ mutex_unlock(&fs_hws_data->lock);
+ return fs_hws_data->hws_action;
+ }
+ fs_hws_data->hws_action = mlx5_fs_create_hws_action(create_ctx);
+ if (!fs_hws_data->hws_action) {
+ mutex_unlock(&fs_hws_data->lock);
+ return NULL;
+ }
+ refcount_set(&fs_hws_data->hws_action_refcount, 1);
+ mutex_unlock(&fs_hws_data->lock);
+
+ return fs_hws_data->hws_action;
+}
+
+void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data)
+{
+ if (!fs_hws_data)
+ return;
+
+ /* try avoid locking if not necessary */
+ if (refcount_dec_not_one(&fs_hws_data->hws_action_refcount))
+ return;
+
+ mutex_lock(&fs_hws_data->lock);
+ if (!refcount_dec_and_test(&fs_hws_data->hws_action_refcount)) {
+ mutex_unlock(&fs_hws_data->lock);
+ return;
+ }
+ mlx5hws_action_destroy(fs_hws_data->hws_action);
+ fs_hws_data->hws_action = NULL;
+ mutex_unlock(&fs_hws_data->lock);
+}
+
+static void mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_fs_hws_rule_action *fs_action)
{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+
switch (mlx5hws_action_get_type(fs_action->action)) {
case MLX5HWS_ACTION_TYP_CTR:
mlx5_fc_put_hws_action(fs_action->counter);
break;
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ mlx5_fs_put_action_aso_meter(fs_ctx, fs_action->exe_aso);
+ break;
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ mlx5_fs_put_dest_action_sampler(fs_ctx, fs_action->sampler_id);
+ break;
default:
mlx5hws_action_destroy(fs_action->action);
}
}
static void
-mlx5_fs_destroy_fs_actions(struct mlx5_fs_hws_rule_action **fs_actions,
+mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_fs_hws_rule_action **fs_actions,
int *num_fs_actions)
{
int i;
/* Free in reverse order to handle action dependencies */
for (i = *num_fs_actions - 1; i >= 0; i--)
- mlx5_fs_destroy_fs_action(*fs_actions + i);
+ mlx5_fs_destroy_fs_action(ns, *fs_actions + i);
*num_fs_actions = 0;
kfree(*fs_actions);
*fs_actions = NULL;
@@ -735,8 +919,25 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
}
if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
- err = -EOPNOTSUPP;
- goto free_actions;
+ if (fte_action->exe_aso.type != MLX5_EXE_ASO_FLOW_METER ||
+ num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action = mlx5_fs_get_action_aso_meter(fs_ctx,
+ &fte_action->exe_aso);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].aso_meter.offset =
+ fte_action->exe_aso.flow_meter.meter_idx;
+ (*ractions)[num_actions].aso_meter.init_color =
+ fte_action->exe_aso.flow_meter.init_color;
+ (*ractions)[num_actions++].action = tmp_action;
+ fs_actions[num_fs_actions].action = tmp_action;
+ fs_actions[num_fs_actions++].exe_aso = &fte_action->exe_aso;
}
if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
@@ -784,6 +985,14 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
type_uplink);
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ dest_action =
+ mlx5_fs_get_dest_action_sampler(fs_ctx,
+ dst);
+ fs_actions[num_fs_actions].action = dest_action;
+ fs_actions[num_fs_actions++].sampler_id =
+ dst->dest_attr.sampler_id;
+ break;
default:
err = -EOPNOTSUPP;
goto free_actions;
@@ -850,7 +1059,7 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
return 0;
free_actions:
- mlx5_fs_destroy_fs_actions(&fs_actions, &num_fs_actions);
+ mlx5_fs_destroy_fs_actions(ns, &fs_actions, &num_fs_actions);
free_dest_actions_alloc:
kfree(dest_actions);
free_fs_actions_alloc:
@@ -900,7 +1109,7 @@ static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
return 0;
free_actions:
- mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+ mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
&fte->fs_hws_rule.num_fs_actions);
out_err:
mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
@@ -920,7 +1129,8 @@ static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
rule->bwc_rule = NULL;
- mlx5_fs_destroy_fs_actions(&rule->hws_fs_actions, &rule->num_fs_actions);
+ mlx5_fs_destroy_fs_actions(ns, &rule->hws_fs_actions,
+ &rule->num_fs_actions);
return err;
}
@@ -958,11 +1168,12 @@ static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
if (ret)
goto restore_actions;
- mlx5_fs_destroy_fs_actions(&saved_hws_fs_actions, &saved_num_fs_actions);
+ mlx5_fs_destroy_fs_actions(ns, &saved_hws_fs_actions,
+ &saved_num_fs_actions);
return ret;
restore_actions:
- mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+ mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
&fte->fs_hws_rule.num_fs_actions);
fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
index cbddb72d4362..8b56298288da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
@@ -22,6 +22,8 @@ struct mlx5_fs_hws_actions_pool {
struct xarray table_dests;
struct xarray vport_vhca_dests;
struct xarray vport_dests;
+ struct xarray aso_meters;
+ struct xarray sample_dests;
};
struct mlx5_fs_hws_context {
@@ -49,6 +51,8 @@ struct mlx5_fs_hws_rule_action {
struct mlx5hws_action *action;
union {
struct mlx5_fc *counter;
+ struct mlx5_exe_aso *exe_aso;
+ u32 sampler_id;
};
};
@@ -58,6 +62,26 @@ struct mlx5_fs_hws_rule {
int num_fs_actions;
};
+struct mlx5_fs_hws_data {
+ struct mlx5hws_action *hws_action;
+ struct mutex lock; /* protects hws_action */
+ refcount_t hws_action_refcount;
+};
+
+struct mlx5_fs_hws_create_action_ctx {
+ enum mlx5hws_action_type actions_type;
+ struct mlx5hws_context *hws_ctx;
+ u32 id;
+ union {
+ u8 return_reg_id;
+ };
+};
+
+struct mlx5hws_action *
+mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
+ struct mlx5_fs_hws_create_action_ctx *create_ctx);
+void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data);
+
#ifdef CONFIG_MLX5_HW_STEERING
bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
index 2ae4ac62b0e2..f1ecdba74e1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -405,46 +405,17 @@ bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
struct mlx5_fc *counter)
{
- u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
struct mlx5_fc_bulk *fc_bulk = counter->bulk;
- struct mlx5_fc_bulk_hws_data *fc_bulk_hws;
- fc_bulk_hws = &fc_bulk->hws_data;
- /* try avoid locking if not necessary */
- if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount))
- return fc_bulk_hws->hws_action;
+ create_ctx.hws_ctx = ctx;
+ create_ctx.id = fc_bulk->base_id;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_CTR;
- mutex_lock(&fc_bulk_hws->lock);
- if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) {
- mutex_unlock(&fc_bulk_hws->lock);
- return fc_bulk_hws->hws_action;
- }
- fc_bulk_hws->hws_action =
- mlx5hws_action_create_counter(ctx, fc_bulk->base_id, flags);
- if (!fc_bulk_hws->hws_action) {
- mutex_unlock(&fc_bulk_hws->lock);
- return NULL;
- }
- refcount_set(&fc_bulk_hws->hws_action_refcount, 1);
- mutex_unlock(&fc_bulk_hws->lock);
-
- return fc_bulk_hws->hws_action;
+ return mlx5_fs_get_hws_action(&fc_bulk->hws_data, &create_ctx);
}
void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
{
- struct mlx5_fc_bulk_hws_data *fc_bulk_hws = &counter->bulk->hws_data;
-
- /* try avoid locking if not necessary */
- if (refcount_dec_not_one(&fc_bulk_hws->hws_action_refcount))
- return;
-
- mutex_lock(&fc_bulk_hws->lock);
- if (!refcount_dec_and_test(&fc_bulk_hws->hws_action_refcount)) {
- mutex_unlock(&fc_bulk_hws->lock);
- return;
- }
- mlx5hws_action_destroy(fc_bulk_hws->hws_action);
- fc_bulk_hws->hws_action = NULL;
- mutex_unlock(&fc_bulk_hws->lock);
+ mlx5_fs_put_hws_action(&counter->bulk->hws_data);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index d9dc4f2d0dc6..f51ed24526b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -153,8 +153,7 @@ mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
if (cached_pattern) {
/* LRU: move it to be first in the list */
- list_del_init(&cached_pattern->ptrn_list_node);
- list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ list_move(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
cached_pattern->refcount++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
index 60cb4527588a..65740bb68b09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
@@ -516,30 +516,6 @@ def_xa_destroy:
return NULL;
}
-/* Assure synchronization of the device steering tables with updates made by SW
- * insertion.
- */
-int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
-{
- int ret = 0;
-
- if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
- mlx5dr_domain_lock(dmn);
- ret = mlx5dr_send_ring_force_drain(dmn);
- mlx5dr_domain_unlock(dmn);
- if (ret) {
- mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
- flags, ret);
- return ret;
- }
- }
-
- if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
- ret = mlx5dr_cmd_sync_steering(dmn->mdev);
-
- return ret;
-}
-
int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
{
if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
index f57c84e5128b..4fd4e8483382 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
@@ -1331,36 +1331,3 @@ void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
kfree(send_ring->sync_buff);
kfree(send_ring);
}
-
-int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
-{
- struct mlx5dr_send_ring *send_ring = dmn->send_ring;
- struct postsend_info send_info = {};
- u8 data[DR_STE_SIZE];
- int num_of_sends_req;
- int ret;
- int i;
-
- /* Sending this amount of requests makes sure we will get drain */
- num_of_sends_req = send_ring->signal_th * TH_NUMS_TO_DRAIN / 2;
-
- /* Send fake requests forcing the last to be signaled */
- send_info.write.addr = (uintptr_t)data;
- send_info.write.length = DR_STE_SIZE;
- send_info.write.lkey = 0;
- /* Using the sync_mr in order to write/read */
- send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr;
- send_info.rkey = send_ring->sync_mr->mkey;
-
- for (i = 0; i < num_of_sends_req; i++) {
- ret = dr_postsend_icm_data(dmn, &send_info);
- if (ret)
- return ret;
- }
-
- spin_lock(&send_ring->lock);
- ret = dr_handle_pending_wc(dmn, send_ring);
- spin_unlock(&send_ring->lock);
-
- return ret;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
index 5f409dc30aca..3d5afc832fa5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
@@ -210,6 +210,10 @@ struct mlx5dr_ste_ctx {
void (*set_encap_l3)(u8 *hw_ste_p, u8 *frst_s_action,
u8 *scnd_d_action, u32 reformat_id,
int size);
+ void (*set_insert_hdr)(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+ void (*set_remove_hdr)(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
/* Send */
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
index 7f83d77c43ef..6447efbae00d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
@@ -266,10 +266,10 @@ void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -286,9 +286,9 @@ static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -584,11 +584,11 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
@@ -597,10 +597,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -792,11 +792,11 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -808,10 +808,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
allow_modify_hdr = true;
allow_ctr = true;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -2200,6 +2200,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_pop_vlan = &dr_ste_v1_set_pop_vlan,
.set_rx_decap = &dr_ste_v1_set_rx_decap,
.set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
index a8d9e308d339..591c20c95a6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
@@ -156,6 +156,10 @@ void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num);
void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
u32 reformat_id, int size);
void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action);
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
u8 *action_type_set, u32 actions_caps, u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
index 0882dba0f64b..d0ebaf820d42 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
@@ -69,6 +69,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = {
.set_pop_vlan = &dr_ste_v1_set_pop_vlan,
.set_rx_decap = &dr_ste_v1_set_rx_decap,
.set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
index cc60ce1d274e..e468a9ae44e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
@@ -79,6 +79,46 @@ static void dr_ste_v3_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
dr_ste_v1_set_reparse(hw_ste_p);
}
+static void dr_ste_v3_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, u8 anchor,
+ u8 offset, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_offset, offset / 2);
+
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ attributes, DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset, int size)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, size / 2);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_offset, offset / 2);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
static int
dr_ste_v3_set_action_decap_l3_list(void *data, u32 data_sz,
u8 *hw_action, u32 hw_action_sz,
@@ -211,6 +251,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v3 = {
.set_pop_vlan = &dr_ste_v3_set_pop_vlan,
.set_rx_decap = &dr_ste_v3_set_rx_decap,
.set_encap_l3 = &dr_ste_v3_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v3_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v3_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
index 7618c6147f86..cc328292bf84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
@@ -1473,7 +1473,6 @@ struct mlx5dr_send_ring {
int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
struct mlx5dr_send_ring *send_ring);
-int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
struct mlx5dr_ste *ste,
u8 *data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
index 0bb3724c10c2..fc8a2169d1a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
@@ -45,8 +45,6 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type);
int mlx5dr_domain_destroy(struct mlx5dr_domain *domain);
-int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
-
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn,
u16 peer_vhca_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 0d5f750faa45..d10d4c396040 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1199,6 +1199,31 @@ int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *ou
}
EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ *vhca_id = 0;
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_general_cap(dev, vport, query_ctx);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
+
+out_free:
+ kfree(query_ctx);
+ return err;
+}
+
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
u16 vport, u16 opmod)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 5b44c931b660..058dcabfaa2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -2214,6 +2214,8 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+ mlxsw_pci_wqe_ipcs_set(wqe, skb->ip_summed == CHECKSUM_PARTIAL);
+
/* Everything is set up, ring producer doorbell to get HW going */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 6bed495dcf0f..7fa94e5828de 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -90,6 +90,11 @@ MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
*/
MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+/* pci_wqe_ipcs
+ * Calculate IPv4 and TCP / UDP checksums.
+ */
+MLXSW_ITEM32(pci, wqe, ipcs, 0x00, 14, 1);
+
/* pci_wqe_byte_count
* Size of i-th scatter/gather entry, 0 if entry is unused.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d714311fd884..3080ea032e7f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1574,10 +1574,12 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
netif_carrier_off(dev);
dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_TC;
- dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
+ NETIF_F_HW_TC | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->lltx = true;
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR;
@@ -2407,8 +2409,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
/* Multicast Router Traps */
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
- /* NVE traps */
- MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
};
static const struct mlxsw_listener mlxsw_sp1_listener[] = {
@@ -5230,25 +5230,13 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
return -EOPNOTSUPP;
- if (cu_info->linking) {
- if (!netif_running(dev))
- return 0;
- /* When the bridge is VLAN-aware, the VNI of the VxLAN
- * device needs to be mapped to a VLAN, but at this
- * point no VLANs are configured on the VxLAN device
- */
- if (br_vlan_enabled(upper_dev))
- return 0;
+ if (!netif_running(dev))
+ return 0;
+ if (cu_info->linking)
return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
dev, 0, extack);
- } else {
- /* VLANs were already flushed, which triggered the
- * necessary cleanup
- */
- if (br_vlan_enabled(upper_dev))
- return 0;
+ else
mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
- }
break;
case NETDEV_PRE_UP:
upper_dev = netdev_master_upper_dev_get(dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index b10f80fc651b..37cd1d002b3b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -661,10 +661,10 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev);
int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
- const struct net_device *vxlan_dev, u16 vid,
+ struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack);
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *vxlan_dev);
+ struct net_device *vxlan_dev);
extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
@@ -754,9 +754,6 @@ void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
-bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
-u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
enum mlxsw_sp_l3proto ul_proto,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index a54eedb69a3f..067f0055a55a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -212,7 +212,22 @@ static const u8 mlxsw_sp4_acl_bf_crc6_tab[256] = {
* This array defines key offsets for easy access when copying key blocks from
* entry key to Bloom filter chunk.
*/
-static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38};
+static char *
+mlxsw_sp_acl_bf_enc_key_get(struct mlxsw_sp_acl_atcam_entry *aentry,
+ u8 chunk_index)
+{
+ switch (chunk_index) {
+ case 0:
+ return &aentry->ht_key.enc_key[2];
+ case 1:
+ return &aentry->ht_key.enc_key[20];
+ case 2:
+ return &aentry->ht_key.enc_key[38];
+ default:
+ WARN_ON_ONCE(1);
+ return &aentry->ht_key.enc_key[0];
+ }
+}
static u16 mlxsw_sp2_acl_bf_crc16_byte(u16 crc, u8 c)
{
@@ -235,9 +250,10 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
u8 key_offset, u8 chunk_key_len, u8 chunk_len)
{
struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
- u8 chunk_index, chunk_count, block_count;
+ u8 chunk_index, chunk_count;
char *chunk = output;
__be16 erp_region_id;
+ u32 block_count;
block_count = mlxsw_afk_key_info_blocks_count_get(key_info);
chunk_count = 1 + ((block_count - 1) >> 2);
@@ -245,12 +261,13 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
(aregion->region->id << 4));
for (chunk_index = max_chunks - chunk_count; chunk_index < max_chunks;
chunk_index++) {
+ char *enc_key;
+
memset(chunk, 0, pad_bytes);
memcpy(chunk + pad_bytes, &erp_region_id,
sizeof(erp_region_id));
- memcpy(chunk + key_offset,
- &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]],
- chunk_key_len);
+ enc_key = mlxsw_sp_acl_bf_enc_key_get(aentry, chunk_index);
+ memcpy(chunk + key_offset, enc_key, chunk_key_len);
chunk += chunk_len;
}
*len = chunk_count * chunk_len;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7d6d859cef3f..464821dd492d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -8184,41 +8184,6 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev)
-{
- struct mlxsw_sp_rif *rif;
-
- mutex_lock(&mlxsw_sp->router->lock);
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- mutex_unlock(&mlxsw_sp->router->lock);
-
- return rif;
-}
-
-u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
-{
- struct mlxsw_sp_rif *rif;
- u16 vid = 0;
-
- mutex_lock(&mlxsw_sp->router->lock);
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!rif)
- goto out;
-
- /* We only return the VID for VLAN RIFs. Otherwise we return an
- * invalid value (0).
- */
- if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
- goto out;
-
- vid = mlxsw_sp_fid_8021q_vid(rif->fid);
-
-out:
- mutex_unlock(&mlxsw_sp->router->lock);
- return vid;
-}
-
static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -8417,19 +8382,6 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
return lb_rif->common.rif_index;
}
-u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
-{
- struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
- u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
- struct mlxsw_sp_vr *ul_vr;
-
- ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
- if (WARN_ON(IS_ERR(ul_vr)))
- return 0;
-
- return ul_vr->id;
-}
-
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
{
return lb_rif->ul_rif_id;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 0432c7cc6b07..313efab5c324 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -90,7 +90,6 @@ struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
-u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 6397ff0dc951..a48bf342084d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2929,23 +2929,8 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
}
-int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- const struct net_device *vxlan_dev, u16 vid,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_bridge_device *bridge_device;
-
- bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
- if (WARN_ON(!bridge_device))
- return -EINVAL;
-
- return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
- extack);
-}
-
-void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *vxlan_dev)
+static void __mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *vxlan_dev)
{
struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
struct mlxsw_sp_fid *fid;
@@ -2963,6 +2948,47 @@ void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_put(fid);
}
+int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (WARN_ON(!bridge_device))
+ return -EINVAL;
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(bridge_device->dev);
+ if (!mlxsw_sp_port)
+ return -EINVAL;
+
+ err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
+ extack);
+ if (err)
+ return err;
+
+ err = switchdev_bridge_port_offload(vxlan_dev, mlxsw_sp_port->dev,
+ NULL, NULL, NULL, false, extack);
+ if (err)
+ goto err_bridge_port_offload;
+
+ return 0;
+
+err_bridge_port_offload:
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ return err;
+}
+
+void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vxlan_dev)
+{
+ switchdev_bridge_port_unoffload(vxlan_dev, NULL, NULL, NULL);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+}
+
static void
mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
enum mlxsw_sp_l3proto *proto,
@@ -3867,7 +3893,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_put(fid);
return -EINVAL;
}
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
mlxsw_sp_fid_put(fid);
return 0;
}
@@ -3883,7 +3909,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
* mapped to the VNI should be unmapped
*/
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
mlxsw_sp_fid_put(fid);
/* Fifth case: The new VLAN is also egress untagged, which means the
@@ -3923,7 +3949,7 @@ mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
if (mlxsw_sp_fid_8021q_vid(fid) != vid)
goto out;
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
out:
mlxsw_sp_fid_put(fid);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 1f9c1c86839f..b5c3f789c685 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -959,18 +959,18 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY,
- MIRROR),
+ TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ROUTER_ARPBC, NEIGH_DISCOVERY,
- TRAP_TO_CPU, false),
+ MLXSW_SP_RXL_NO_MARK(ARPBC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
- MIRROR),
+ TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ROUTER_ARPUC, NEIGH_DISCOVERY,
- TRAP_TO_CPU, false),
+ MLXSW_SP_RXL_NO_MARK(ARPUC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 83477c8e6971..80ee5c4825dc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -29,6 +29,8 @@ enum {
MLXSW_TRAP_ID_FDB_MISMATCH = 0x3B,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
+ MLXSW_TRAP_ID_ARPBC = 0x50,
+ MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
MLXSW_TRAP_ID_TTLERROR = 0x53,
MLXSW_TRAP_ID_LBERROR = 0x54,
@@ -66,13 +68,10 @@ enum {
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
- MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_IPV4_BFD = 0xD0,
MLXSW_TRAP_ID_IPV6_BFD = 0xD1,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
- MLXSW_TRAP_ID_ROUTER_ARPBC = 0xE0,
- MLXSW_TRAP_ID_ROUTER_ARPUC = 0xE1,
MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index 239b2258ec65..0dbc634adb4b 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -20,6 +20,7 @@ fbnic-y := fbnic_csr.o \
fbnic_pci.o \
fbnic_phylink.o \
fbnic_rpc.o \
+ fbnic_time.o \
fbnic_tlv.o \
fbnic_txrx.o \
- fbnic_time.o
+# End of objects
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index 14751f16e125..4ca7b99ef131 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -60,6 +60,12 @@ struct fbnic_dev {
u8 mac_addr_boundary;
u8 tce_tcam_last;
+ /* IP TCAM */
+ struct fbnic_ip_addr ip_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ip_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ipo_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ipo_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+
/* Number of TCQs/RCQs available on hardware */
u16 max_num_queues;
@@ -180,6 +186,9 @@ void fbnic_dbg_exit(void);
void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version);
int fbnic_csr_regs_len(struct fbnic_dev *fbd);
+void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm);
+void fbnic_config_rx_frames(struct fbnic_napi_vector *nv);
+
enum fbnic_boards {
fbnic_board_asic
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
index aeb9f333f4c7..d9c0dc1c2af9 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
@@ -30,6 +30,7 @@ static const struct fbnic_csr_bounds fbnic_csr_sects[] = {
FBNIC_BOUNDS(RSFEC),
FBNIC_BOUNDS(MAC_MAC),
FBNIC_BOUNDS(SIG),
+ FBNIC_BOUNDS(PCIE_SS_COMPHY),
FBNIC_BOUNDS(PUL_USER),
FBNIC_BOUNDS(QUEUE),
FBNIC_BOUNDS(RPC_RAM),
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 02bb81b3c506..3b12a0ab5906 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -605,8 +605,11 @@ enum {
FBNIC_RPC_ACT_TBL0_DEST_EI = 4,
};
+#define FBNIC_RPC_ACT_TBL0_Q_SEL CSR_BIT(4)
+#define FBNIC_RPC_ACT_TBL0_Q_ID CSR_GENMASK(15, 8)
#define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16)
#define FBNIC_RPC_ACT_TBL0_TS_ENA CSR_BIT(28)
+#define FBNIC_RPC_ACT_TBL0_ACT_TBL_IDX CSR_BIT(29)
#define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30)
#define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */
@@ -677,6 +680,9 @@ enum {
#define FBNIC_RPC_TCAM_OUTER_IPSRC(m, n)\
(0x08c00 + 0x08 * (n) + (m)) /* 0x023000 + 32*n + 4*m */
+#define FBNIC_RPC_TCAM_IP_ADDR_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_IP_ADDR_MASK CSR_GENMASK(31, 16)
+
#define FBNIC_RPC_TCAM_OUTER_IPDST(m, n)\
(0x08c48 + 0x08 * (n) + (m)) /* 0x023120 + 32*n + 4*m */
#define FBNIC_RPC_TCAM_IPSRC(m, n)\
@@ -782,13 +788,52 @@ enum {
#define FBNIC_MAC_STAT_TX_MULTICAST_H 0x11a4b /* 0x4692c */
#define FBNIC_MAC_STAT_TX_BROADCAST_L 0x11a4c /* 0x46930 */
#define FBNIC_MAC_STAT_TX_BROADCAST_H 0x11a4d /* 0x46934 */
+
+/* PCIE Comphy Registers */
+#define FBNIC_CSR_START_PCIE_SS_COMPHY 0x2442e /* CSR section delimiter */
+#define FBNIC_CSR_END_PCIE_SS_COMPHY 0x279d7 /* CSR section delimiter */
+
/* PUL User Registers */
#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18)
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18)
-#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
+#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \
+ 0x3106e /* 0xc41b8 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0 \
+ 0x31070 /* 0xc41c0 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_63_32 \
+ 0x31071 /* 0xc41c4 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0 \
+ 0x31072 /* 0xc41c8 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_63_32 \
+ 0x31073 /* 0xc41cc */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0 \
+ 0x31074 /* 0xc41d0 */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_63_32 \
+ 0x31075 /* 0xc41d4 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0 \
+ 0x31076 /* 0xc41d8 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_63_32 \
+ 0x31077 /* 0xc41dc */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0 \
+ 0x31078 /* 0xc41e0 */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_63_32 \
+ 0x31079 /* 0xc41e4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0 \
+ 0x3107a /* 0xc41e8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_63_32 \
+ 0x3107b /* 0xc41ec */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0 \
+ 0x3107c /* 0xc41f0 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_63_32 \
+ 0x3107d /* 0xc41f4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0 \
+ 0x3107e /* 0xc41f8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \
+ 0x3107f /* 0xc41fc */
+#define FBNIC_CSR_END_PUL_USER 0x310ea /* CSR section delimiter */
/* Queue Registers
*
@@ -928,43 +973,6 @@ enum {
#define FBNIC_MAX_QUEUES 128
#define FBNIC_CSR_END_QUEUE (0x40000 + 0x400 * FBNIC_MAX_QUEUES - 1)
-/* PUL User Registers*/
-#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \
- 0x3106e /* 0xc41b8 */
-#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0 \
- 0x31070 /* 0xc41c0 */
-#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_63_32 \
- 0x31071 /* 0xc41c4 */
-#define FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0 \
- 0x31072 /* 0xc41c8 */
-#define FBNIC_PUL_USER_OB_WR_TLP_CNT_63_32 \
- 0x31073 /* 0xc41cc */
-#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0 \
- 0x31074 /* 0xc41d0 */
-#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_63_32 \
- 0x31075 /* 0xc41d4 */
-#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0 \
- 0x31076 /* 0xc41d8 */
-#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_63_32 \
- 0x31077 /* 0xc41dc */
-#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0 \
- 0x31078 /* 0xc41e0 */
-#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_63_32 \
- 0x31079 /* 0xc41e4 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0 \
- 0x3107a /* 0xc41e8 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_63_32 \
- 0x3107b /* 0xc41ec */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0 \
- 0x3107c /* 0xc41f0 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_63_32 \
- 0x3107d /* 0xc41f4 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0 \
- 0x3107e /* 0xc41f8 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \
- 0x3107f /* 0xc41fc */
-#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
-
/* BAR 4 CSRs */
/* The IPC mailbox consists of 32 mailboxes, with each mailbox consisting
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
index 59951b5abdb7..e8f2d7f2d962 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
@@ -10,6 +10,166 @@
static struct dentry *fbnic_dbg_root;
+static void fbnic_dbg_desc_break(struct seq_file *s, int i)
+{
+ while (i--)
+ seq_putc(s, '-');
+
+ seq_putc(s, '\n');
+}
+
+static int fbnic_dbg_mac_addr_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s\n",
+ "Idx", "S", "TCAM Bitmap", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ seq_printf(s, "%02d %d %64pb %pm\n",
+ i, mac_addr->state, mac_addr->act_tcam,
+ mac_addr->value.addr8);
+ seq_printf(s, " %pm\n",
+ mac_addr->mask.addr8);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_mac_addr);
+
+static int fbnic_dbg_tce_tcam_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ int i, tcam_idx = 0;
+ char hdr[80];
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s\n",
+ "Idx", "S", "TCAM Bitmap", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < ARRAY_SIZE(fbd->mac_addr); i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
+ continue;
+
+ if (tcam_idx == FBNIC_TCE_TCAM_NUM_ENTRIES)
+ break;
+
+ seq_printf(s, "%02d %d %64pb %pm\n",
+ tcam_idx, mac_addr->state, mac_addr->act_tcam,
+ mac_addr->value.addr8);
+ seq_printf(s, " %pm\n",
+ mac_addr->mask.addr8);
+ tcam_idx++;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_tce_tcam);
+
+static int fbnic_dbg_act_tcam_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-55s %-4s %s\n",
+ "Idx", "S", "Value/Mask", "RSS", "Dest");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i];
+
+ seq_printf(s, "%02d %d %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %08x\n",
+ i, act_tcam->state,
+ act_tcam->value.tcam[10], act_tcam->value.tcam[9],
+ act_tcam->value.tcam[8], act_tcam->value.tcam[7],
+ act_tcam->value.tcam[6], act_tcam->value.tcam[5],
+ act_tcam->value.tcam[4], act_tcam->value.tcam[3],
+ act_tcam->value.tcam[2], act_tcam->value.tcam[1],
+ act_tcam->value.tcam[0], act_tcam->rss_en_mask,
+ act_tcam->dest);
+ seq_printf(s, " %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ act_tcam->mask.tcam[10], act_tcam->mask.tcam[9],
+ act_tcam->mask.tcam[8], act_tcam->mask.tcam[7],
+ act_tcam->mask.tcam[6], act_tcam->mask.tcam[5],
+ act_tcam->mask.tcam[4], act_tcam->mask.tcam[3],
+ act_tcam->mask.tcam[2], act_tcam->mask.tcam[1],
+ act_tcam->mask.tcam[0]);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_act_tcam);
+
+static int fbnic_dbg_ip_addr_show(struct seq_file *s,
+ struct fbnic_ip_addr *ip_addr)
+{
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s %s\n",
+ "Idx", "S", "TCAM Bitmap", "V", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) {
+ seq_printf(s, "%02d %d %64pb %d %pi6\n",
+ i, ip_addr->state, ip_addr->act_tcam,
+ ip_addr->version, &ip_addr->value);
+ seq_printf(s, " %pi6\n",
+ &ip_addr->mask);
+ }
+
+ return 0;
+}
+
+static int fbnic_dbg_ip_src_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ip_src);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_src);
+
+static int fbnic_dbg_ip_dst_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ip_dst);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_dst);
+
+static int fbnic_dbg_ipo_src_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ipo_src);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_src);
+
+static int fbnic_dbg_ipo_dst_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ipo_dst);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_dst);
+
static int fbnic_dbg_pcie_stats_show(struct seq_file *s, void *v)
{
struct fbnic_dev *fbd = s->private;
@@ -48,6 +208,20 @@ void fbnic_dbg_fbd_init(struct fbnic_dev *fbd)
fbd->dbg_fbd = debugfs_create_dir(name, fbnic_dbg_root);
debugfs_create_file("pcie_stats", 0400, fbd->dbg_fbd, fbd,
&fbnic_dbg_pcie_stats_fops);
+ debugfs_create_file("mac_addr", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_mac_addr_fops);
+ debugfs_create_file("tce_tcam", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_tce_tcam_fops);
+ debugfs_create_file("act_tcam", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_act_tcam_fops);
+ debugfs_create_file("ip_src", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ip_src_fops);
+ debugfs_create_file("ip_dst", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ip_dst_fops);
+ debugfs_create_file("ipo_src", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ipo_src_fops);
+ debugfs_create_file("ipo_dst", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ipo_dst_fops);
}
void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 20cd9f5f89e2..0a751a2aaf73 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -4,6 +4,7 @@
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <net/ipv6.h>
#include "fbnic.h"
#include "fbnic_netdev.h"
@@ -135,6 +136,168 @@ static void fbnic_clone_free(struct fbnic_net *clone)
kfree(clone);
}
+static int fbnic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ ec->tx_coalesce_usecs = fbn->tx_usecs;
+ ec->rx_coalesce_usecs = fbn->rx_usecs;
+ ec->rx_max_coalesced_frames = fbn->rx_max_frames;
+
+ return 0;
+}
+
+static int fbnic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ /* Verify against hardware limits */
+ if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max");
+ return -EINVAL;
+ }
+ if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max");
+ return -EINVAL;
+ }
+ if (ec->rx_max_coalesced_frames >
+ FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) /
+ FBNIC_MIN_RXD_PER_FRAME) {
+ NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max");
+ return -EINVAL;
+ }
+
+ fbn->tx_usecs = ec->tx_coalesce_usecs;
+ fbn->rx_usecs = ec->rx_coalesce_usecs;
+ fbn->rx_max_frames = ec->rx_max_coalesced_frames;
+
+ if (netif_running(netdev)) {
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
+ fbnic_config_txrx_usecs(nv, 0);
+ fbnic_config_rx_frames(nv);
+ }
+ }
+
+ return 0;
+}
+
+static void
+fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ ring->rx_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->rx_mini_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->rx_jumbo_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->tx_max_pending = FBNIC_QUEUE_SIZE_MAX;
+
+ ring->rx_pending = fbn->rcq_size;
+ ring->rx_mini_pending = fbn->hpq_size;
+ ring->rx_jumbo_pending = fbn->ppq_size;
+ ring->tx_pending = fbn->txq_size;
+}
+
+static void fbnic_set_rings(struct fbnic_net *fbn,
+ struct ethtool_ringparam *ring)
+{
+ fbn->rcq_size = ring->rx_pending;
+ fbn->hpq_size = ring->rx_mini_pending;
+ fbn->ppq_size = ring->rx_jumbo_pending;
+ fbn->txq_size = ring->tx_pending;
+}
+
+static int
+fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_net *clone;
+ int err;
+
+ ring->rx_pending = roundup_pow_of_two(ring->rx_pending);
+ ring->rx_mini_pending = roundup_pow_of_two(ring->rx_mini_pending);
+ ring->rx_jumbo_pending = roundup_pow_of_two(ring->rx_jumbo_pending);
+ ring->tx_pending = roundup_pow_of_two(ring->tx_pending);
+
+ /* These are absolute minimums allowing the device and driver to operate
+ * but not necessarily guarantee reasonable performance. Settings below
+ * Rx queue size of 128 and BDQs smaller than 64 are likely suboptimal
+ * at best.
+ */
+ if (ring->rx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_RX_DESC_MIN) ||
+ ring->rx_mini_pending < FBNIC_QUEUE_SIZE_MIN ||
+ ring->rx_jumbo_pending < FBNIC_QUEUE_SIZE_MIN ||
+ ring->tx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_TX_DESC_MIN)) {
+ NL_SET_ERR_MSG_MOD(extack, "requested ring size too small");
+ return -EINVAL;
+ }
+
+ if (!netif_running(netdev)) {
+ fbnic_set_rings(fbn, ring);
+ return 0;
+ }
+
+ clone = fbnic_clone_create(fbn);
+ if (!clone)
+ return -ENOMEM;
+
+ fbnic_set_rings(clone, ring);
+
+ err = fbnic_alloc_napi_vectors(clone);
+ if (err)
+ goto err_free_clone;
+
+ err = fbnic_alloc_resources(clone);
+ if (err)
+ goto err_free_napis;
+
+ fbnic_down_noidle(fbn);
+ err = fbnic_wait_all_queues_idle(fbn->fbd, true);
+ if (err)
+ goto err_start_stack;
+
+ err = fbnic_set_netif_queues(clone);
+ if (err)
+ goto err_start_stack;
+
+ /* Nothing can fail past this point */
+ fbnic_flush(fbn);
+
+ fbnic_clone_swap(fbn, clone);
+
+ fbnic_up(fbn);
+
+ fbnic_free_resources(clone);
+ fbnic_free_napi_vectors(clone);
+ fbnic_clone_free(clone);
+
+ return 0;
+
+err_start_stack:
+ fbnic_flush(fbn);
+ fbnic_up(fbn);
+ fbnic_free_resources(clone);
+err_free_napis:
+ fbnic_free_napi_vectors(clone);
+err_free_clone:
+ fbnic_clone_free(clone);
+ return err;
+}
+
static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
{
int i;
@@ -218,11 +381,234 @@ fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
return 0;
}
+static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, cnt = 0;
+
+ /* Report maximum rule count */
+ cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
+
+ for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) {
+ int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ struct fbnic_act_tcam *act_tcam;
+
+ act_tcam = &fbd->act_tcam[idx];
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ if (rule_locs) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[cnt] = i;
+ }
+
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ int idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
+ return -EINVAL;
+
+ idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ return -EINVAL;
+
+ /* Report maximum rule count */
+ cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
+
+ /* Set flow type field */
+ if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) {
+ fsp->flow_type = ETHER_FLOW;
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ act_tcam->mask.tcam[1])) {
+ struct fbnic_mac_addr *mac_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ act_tcam->value.tcam[1]);
+ mac_addr = &fbd->mac_addr[idx];
+
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ mac_addr->value.addr8);
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
+ } else if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) {
+ fsp->flow_type = IPV6_USER_FLOW;
+ fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6;
+ fsp->m_u.usr_ip6_spec.l4_proto = 0xff;
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ipo_src[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6src[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6src[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ipo_dst[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6dst[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6dst[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+ } else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) {
+ if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
+ if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
+ fsp->flow_type = UDP_V6_FLOW;
+ else
+ fsp->flow_type = TCP_V6_FLOW;
+ fsp->h_u.tcp_ip6_spec.psrc =
+ cpu_to_be16(act_tcam->value.tcam[3]);
+ fsp->m_u.tcp_ip6_spec.psrc =
+ cpu_to_be16(~act_tcam->mask.tcam[3]);
+ fsp->h_u.tcp_ip6_spec.pdst =
+ cpu_to_be16(act_tcam->value.tcam[4]);
+ fsp->m_u.tcp_ip6_spec.pdst =
+ cpu_to_be16(~act_tcam->mask.tcam[4]);
+ } else {
+ fsp->flow_type = IPV6_USER_FLOW;
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_src[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6src[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6src[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_dst[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6dst[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6dst[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+ } else {
+ if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
+ if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
+ fsp->flow_type = UDP_V4_FLOW;
+ else
+ fsp->flow_type = TCP_V4_FLOW;
+ fsp->h_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(act_tcam->value.tcam[3]);
+ fsp->m_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(~act_tcam->mask.tcam[3]);
+ fsp->h_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(act_tcam->value.tcam[4]);
+ fsp->m_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(~act_tcam->mask.tcam[4]);
+ } else {
+ fsp->flow_type = IPV4_USER_FLOW;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_src[idx];
+
+ fsp->h_u.usr_ip4_spec.ip4src =
+ ip_addr->value.s6_addr32[3];
+ fsp->m_u.usr_ip4_spec.ip4src =
+ ~ip_addr->mask.s6_addr32[3];
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_dst[idx];
+
+ fsp->h_u.usr_ip4_spec.ip4dst =
+ ip_addr->value.s6_addr32[3];
+ fsp->m_u.usr_ip4_spec.ip4dst =
+ ~ip_addr->mask.s6_addr32[3];
+ }
+ }
+
+ /* Record action */
+ if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL)
+ fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID,
+ act_tcam->dest);
+ else
+ fsp->flow_type |= FLOW_RSS;
+
+ cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID,
+ act_tcam->dest);
+
+ return 0;
+}
+
static int fbnic_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct fbnic_net *fbn = netdev_priv(netdev);
int ret = -EOPNOTSUPP;
+ u32 special = 0;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
@@ -232,6 +618,22 @@ static int fbnic_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXFH:
ret = fbnic_get_rss_hash_opts(fbn, cmd);
break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = fbnic_get_cls_rule(fbn, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ rule_locs = NULL;
+ special = RX_CLS_LOC_SPECIAL;
+ fallthrough;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs);
+ if (ret < 0)
+ break;
+
+ cmd->data |= special;
+ cmd->rule_cnt = ret;
+ ret = 0;
+ break;
}
return ret;
@@ -272,6 +674,406 @@ fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd)
return 0;
}
+static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
+{
+ int i;
+
+ for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) {
+ int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+
+ if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID)
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn,
+ const struct ethtool_rxnfc *cmd)
+{
+ u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff;
+ u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0;
+ u16 misc = 0, misc_mask = ~0;
+ u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_HOST);
+ struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL;
+ struct fbnic_mac_addr *mac_addr = NULL;
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ struct in6_addr *addr6, *mask6;
+ struct in_addr *addr4, *mask4;
+ int hash_idx, location;
+ u32 flow_type;
+ int idx, j;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+ location = fbnic_cls_rule_any_loc(fbd);
+ if (location < 0)
+ return location;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ dest = FBNIC_RPC_ACT_TBL0_DROP;
+ } else if (fsp->flow_type & FLOW_RSS) {
+ if (cmd->rss_context == 1)
+ dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID;
+ } else {
+ u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+
+ if (ring_idx >= fbn->num_rx_queues)
+ return -EINVAL;
+
+ dest |= FBNIC_RPC_ACT_TBL0_Q_SEL |
+ FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx);
+ }
+
+ idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ /* Do not allow overwriting for now.
+ * To support overwriting rules we will need to add logic to free
+ * any IP or MACDA TCAMs that may be associated with the old rule.
+ */
+ if (act_tcam->state != FBNIC_TCAM_S_DISABLED)
+ return -EBUSY;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS);
+ hash_idx = fbnic_get_rss_hash_idx(flow_type);
+
+ switch (flow_type) {
+ case UDP_V4_FLOW:
+udp4_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
+ fallthrough;
+ case TCP_V4_FLOW:
+tcp4_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
+ FBNIC_RPC_TCAM_ACT1_L4_VALID);
+
+ sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
+ sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
+ dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
+ goto ip4_flow;
+ case IP_USER_FLOW:
+ if (!fsp->m_u.usr_ip4_spec.proto)
+ goto ip4_flow;
+ if (fsp->m_u.usr_ip4_spec.proto != 0xff)
+ return -EINVAL;
+ if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP)
+ goto udp4_flow;
+ if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP)
+ goto tcp4_flow;
+ return -EINVAL;
+ip4_flow:
+ addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src;
+ mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src;
+ if (mask4->s_addr) {
+ ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src,
+ addr4, mask4);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ ip_src - fbd->ip_src);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
+ }
+
+ addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst;
+ mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst;
+ if (mask4->s_addr) {
+ ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst,
+ addr4, mask4);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ ip_dst - fbd->ip_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
+ }
+ flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
+ break;
+ case UDP_V6_FLOW:
+udp6_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
+ fallthrough;
+ case TCP_V6_FLOW:
+tcp6_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
+ FBNIC_RPC_TCAM_ACT1_L4_VALID);
+
+ sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc);
+ sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst);
+ dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst);
+ goto ipv6_flow;
+ case IPV6_USER_FLOW:
+ if (!fsp->m_u.usr_ip6_spec.l4_proto)
+ goto ipv6_flow;
+
+ if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff)
+ return -EINVAL;
+ if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP)
+ goto udp6_flow;
+ if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP)
+ goto tcp6_flow;
+ if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6)
+ return -EINVAL;
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
+ if (!ipv6_addr_any(mask6)) {
+ ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src,
+ addr6, mask6);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |=
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ ip_src - fbd->ipo_src);
+ ip_mask &=
+ ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX);
+ }
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
+ if (!ipv6_addr_any(mask6)) {
+ ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst,
+ addr6, mask6);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |=
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ ip_dst - fbd->ipo_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX);
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
+ flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
+ipv6_flow:
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
+ if (!ip_src && !ipv6_addr_any(mask6)) {
+ ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src,
+ addr6, mask6);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ ip_src - fbd->ip_src);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
+ }
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
+ if (!ip_dst && !ipv6_addr_any(mask6)) {
+ ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst,
+ addr6, mask6);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ ip_dst - fbd->ip_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
+ break;
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ u8 *addr = fsp->h_u.ether_spec.h_dest;
+ u8 *mask = fsp->m_u.ether_spec.h_dest;
+
+ /* Do not allow MAC addr of 0 */
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Only support full MAC address to avoid
+ * conflicts with other MAC addresses.
+ */
+ if (!is_broadcast_ether_addr(mask))
+ return -EINVAL;
+
+ if (is_multicast_ether_addr(addr))
+ mac_addr = __fbnic_mc_sync(fbd, addr);
+ else
+ mac_addr = __fbnic_uc_sync(fbd, addr);
+
+ if (!mac_addr)
+ return -ENOSPC;
+
+ set_bit(idx, mac_addr->act_tcam);
+ flow_value |=
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ mac_addr - fbd->mac_addr);
+ flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX;
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Write action table values */
+ act_tcam->dest = dest;
+ act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
+
+ /* Write IP Match value/mask to action_tcam[0] */
+ act_tcam->value.tcam[0] = ip_value;
+ act_tcam->mask.tcam[0] = ip_mask;
+
+ /* Write flow type value/mask to action_tcam[1] */
+ act_tcam->value.tcam[1] = flow_value;
+ act_tcam->mask.tcam[1] = flow_mask;
+
+ /* Write error, DSCP, extra L4 matches to action_tcam[2] */
+ act_tcam->value.tcam[2] = misc;
+ act_tcam->mask.tcam[2] = misc_mask;
+
+ /* Write source/destination port values */
+ act_tcam->value.tcam[3] = sport;
+ act_tcam->mask.tcam[3] = sport_mask;
+ act_tcam->value.tcam[4] = dport;
+ act_tcam->mask.tcam[4] = dport_mask;
+
+ for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ fsp->location = location;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_write_rules(fbd);
+ if (ip_src || ip_dst)
+ fbnic_write_ip_addr(fbd);
+ if (mac_addr)
+ fbnic_write_macda(fbd);
+ }
+
+ return 0;
+}
+
+static void fbnic_clear_nfc_macda(struct fbnic_net *fbn,
+ unsigned int tcam_idx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;)
+ __fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx);
+
+ /* Write updates to hardware */
+ if (netif_running(fbn->netdev))
+ fbnic_write_macda(fbd);
+}
+
+static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn,
+ unsigned int tcam_idx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->ip_src); idx--;)
+ __fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;)
+ __fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;)
+ __fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;)
+ __fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx);
+
+ /* Write updates to hardware */
+ if (netif_running(fbn->netdev))
+ fbnic_write_ip_addr(fbd);
+}
+
+static int fbnic_set_cls_rule_del(struct fbnic_net *fbn,
+ const struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ int idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
+ return -EINVAL;
+
+ idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ return -EINVAL;
+
+ act_tcam->state = FBNIC_TCAM_S_DELETE;
+
+ if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) &&
+ (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX))
+ fbnic_clear_nfc_macda(fbn, idx);
+
+ if ((act_tcam->value.tcam[0] &
+ (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) &&
+ (~act_tcam->mask.tcam[0] &
+ (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX)))
+ fbnic_clear_nfc_ip_addr(fbn, idx);
+
+ if (netif_running(fbn->netdev))
+ fbnic_write_rules(fbd);
+
+ return 0;
+}
+
static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct fbnic_net *fbn = netdev_priv(netdev);
@@ -281,6 +1083,12 @@ static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXFH:
ret = fbnic_set_rss_hash_opts(fbn, cmd);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = fbnic_set_cls_rule_ins(fbn, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = fbnic_set_cls_rule_del(fbn, cmd);
+ break;
}
return ret;
@@ -374,6 +1182,61 @@ fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
return 0;
}
+static int
+fbnic_modify_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ const u32 *indir = rxfh->indir;
+ unsigned int changes;
+
+ if (!indir)
+ indir = ethtool_rxfh_context_indir(ctx);
+
+ changes = fbnic_set_indir(fbn, rxfh->rss_context, indir);
+ if (changes && netif_running(netdev))
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ return 0;
+}
+
+static int
+fbnic_create_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
+ return -EOPNOTSUPP;
+ }
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+
+ if (!rxfh->indir) {
+ u32 *indir = ethtool_rxfh_context_indir(ctx);
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int i;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
+ indir[i] = ethtool_rxfh_indir_default(i, num_rx);
+ }
+
+ return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack);
+}
+
+static int
+fbnic_remove_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx, u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ /* Nothing to do, contexts are allocated statically */
+ return 0;
+}
+
static void fbnic_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
@@ -523,14 +1386,14 @@ static void fbnic_get_ts_stats(struct net_device *netdev,
unsigned int start;
int i;
- ts_stats->pkts = fbn->tx_stats.ts_packets;
- ts_stats->lost = fbn->tx_stats.ts_lost;
+ ts_stats->pkts = fbn->tx_stats.twq.ts_packets;
+ ts_stats->lost = fbn->tx_stats.twq.ts_lost;
for (i = 0; i < fbn->num_tx_queues; i++) {
ring = fbn->tx[i];
do {
start = u64_stats_fetch_begin(&ring->stats.syncp);
- ts_packets = ring->stats.ts_packets;
- ts_lost = ring->stats.ts_lost;
+ ts_packets = ring->stats.twq.ts_packets;
+ ts_lost = ring->stats.twq.ts_lost;
} while (u64_stats_fetch_retry(&ring->stats.syncp, start));
ts_stats->pkts += ts_packets;
ts_stats->lost += ts_lost;
@@ -586,9 +1449,17 @@ fbnic_get_eth_mac_stats(struct net_device *netdev,
}
static const struct ethtool_ops fbnic_ethtool_ops = {
+ .supported_coalesce_params =
+ ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
.get_drvinfo = fbnic_get_drvinfo,
.get_regs_len = fbnic_get_regs_len,
.get_regs = fbnic_get_regs,
+ .get_coalesce = fbnic_get_coalesce,
+ .set_coalesce = fbnic_set_coalesce,
+ .get_ringparam = fbnic_get_ringparam,
+ .set_ringparam = fbnic_set_ringparam,
.get_strings = fbnic_get_strings,
.get_ethtool_stats = fbnic_get_ethtool_stats,
.get_sset_count = fbnic_get_sset_count,
@@ -598,6 +1469,9 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
.get_rxfh = fbnic_get_rxfh,
.set_rxfh = fbnic_set_rxfh,
+ .create_rxfh_context = fbnic_create_rxfh_context,
+ .modify_rxfh_context = fbnic_modify_rxfh_context,
+ .remove_rxfh_context = fbnic_remove_rxfh_context,
.get_channels = fbnic_get_channels,
.set_channels = fbnic_set_channels,
.get_ts_info = fbnic_get_ts_info,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index bbc7c1c0c37e..88db3dacb940 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -494,16 +494,13 @@ static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
{
- u32 active_slot = 0, all_multi = 0;
+ u32 all_multi = 0, version = 0;
struct fbnic_dev *fbd = opaque;
- u32 speed = 0, fec = 0;
- size_t commit_size = 0;
bool bmc_present;
int err;
- get_unsigned_result(FBNIC_FW_CAP_RESP_VERSION,
- fbd->fw_cap.running.mgmt.version);
-
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION);
+ fbd->fw_cap.running.mgmt.version = version;
if (!fbd->fw_cap.running.mgmt.version)
return -EINVAL;
@@ -524,43 +521,41 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
return -EINVAL;
}
- get_string_result(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, commit_size,
- fbd->fw_cap.running.mgmt.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
- if (!commit_size)
+ if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
+ fbd->fw_cap.running.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0)
dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
- get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_VERSION,
- fbd->fw_cap.stored.mgmt.version);
- get_string_result(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.mgmt.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_CMRT_VERSION,
- fbd->fw_cap.running.bootloader.version);
- get_string_result(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, commit_size,
- fbd->fw_cap.running.bootloader.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION,
- fbd->fw_cap.stored.bootloader.version);
- get_string_result(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.bootloader.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_UEFI_VERSION,
- fbd->fw_cap.stored.undi.version);
- get_string_result(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.undi.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT, active_slot);
- fbd->fw_cap.active_slot = active_slot;
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_SPEED, speed);
- get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_FEC, fec);
- fbd->fw_cap.link_speed = speed;
- fbd->fw_cap.link_fec = fec;
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION);
+ fbd->fw_cap.stored.mgmt.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
+ fbd->fw_cap.stored.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION);
+ fbd->fw_cap.running.bootloader.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
+ fbd->fw_cap.running.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION);
+ fbd->fw_cap.stored.bootloader.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
+ fbd->fw_cap.stored.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION);
+ fbd->fw_cap.stored.undi.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
+ fbd->fw_cap.stored.undi.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ fbd->fw_cap.active_slot =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT);
+ fbd->fw_cap.link_speed =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED);
+ fbd->fw_cap.link_fec =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC);
bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
if (bmc_present) {
@@ -575,7 +570,8 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
if (err)
return err;
- get_unsigned_result(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI, all_multi);
+ all_multi =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI);
} else {
memset(fbd->fw_cap.bmc_mac_addr, 0,
sizeof(fbd->fw_cap.bmc_mac_addr));
@@ -743,9 +739,9 @@ free_message:
}
static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
- FBNIC_TLV_ATTR_S32(FBNIC_TSENE_THERM),
- FBNIC_TLV_ATTR_S32(FBNIC_TSENE_VOLT),
- FBNIC_TLV_ATTR_S32(FBNIC_TSENE_ERROR),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
FBNIC_TLV_ATTR_LAST
};
@@ -754,32 +750,31 @@ static int fbnic_fw_parse_tsene_read_resp(void *opaque,
{
struct fbnic_fw_completion *cmpl_data;
struct fbnic_dev *fbd = opaque;
+ s32 err_resp;
int err = 0;
/* Verify we have a completion pointer to provide with data */
cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
if (!cmpl_data)
- return -EINVAL;
+ return -ENOSPC;
- if (results[FBNIC_TSENE_ERROR]) {
- err = fbnic_tlv_attr_get_unsigned(results[FBNIC_TSENE_ERROR]);
- if (err)
- goto exit_complete;
- }
+ err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR);
+ if (err_resp)
+ goto msg_err;
- if (!results[FBNIC_TSENE_THERM] || !results[FBNIC_TSENE_VOLT]) {
+ if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
err = -EINVAL;
- goto exit_complete;
+ goto msg_err;
}
cmpl_data->u.tsene.millidegrees =
- fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_THERM]);
+ fta_get_sint(results, FBNIC_FW_TSENE_THERM);
cmpl_data->u.tsene.millivolts =
- fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_VOLT]);
+ fta_get_sint(results, FBNIC_FW_TSENE_VOLT);
-exit_complete:
- cmpl_data->result = err;
+msg_err:
+ cmpl_data->result = err_resp ? : err;
complete(&cmpl_data->done);
fbnic_fw_put_cmpl(cmpl_data);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index fe68333d51b1..a3618e7826c2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -139,10 +139,10 @@ enum {
};
enum {
- FBNIC_TSENE_THERM = 0x0,
- FBNIC_TSENE_VOLT = 0x1,
- FBNIC_TSENE_ERROR = 0x2,
- FBNIC_TSENE_MSG_MAX
+ FBNIC_FW_TSENE_THERM = 0x0,
+ FBNIC_FW_TSENE_VOLT = 0x1,
+ FBNIC_FW_TSENE_ERROR = 0x2,
+ FBNIC_FW_TSENE_MSG_MAX
};
enum {
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index 7a96b6ee773f..79a01fdd1dd1 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -487,8 +487,9 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *rxr = fbn->rx[idx];
struct fbnic_queue_stats *stats;
+ u64 bytes, packets, alloc_fail;
+ u64 csum_complete, csum_none;
unsigned int start;
- u64 bytes, packets;
if (!rxr)
return;
@@ -498,10 +499,16 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
start = u64_stats_fetch_begin(&stats->syncp);
bytes = stats->bytes;
packets = stats->packets;
+ alloc_fail = stats->rx.alloc_failed;
+ csum_complete = stats->rx.csum_complete;
+ csum_none = stats->rx.csum_none;
} while (u64_stats_fetch_retry(&stats->syncp, start));
rx->bytes = bytes;
rx->packets = packets;
+ rx->alloc_fail = alloc_fail;
+ rx->csum_complete = csum_complete;
+ rx->csum_none = csum_none;
}
static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -510,6 +517,7 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *txr = fbn->tx[idx];
struct fbnic_queue_stats *stats;
+ u64 stop, wake, csum, lso;
unsigned int start;
u64 bytes, packets;
@@ -521,10 +529,18 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
start = u64_stats_fetch_begin(&stats->syncp);
bytes = stats->bytes;
packets = stats->packets;
+ csum = stats->twq.csum_partial;
+ lso = stats->twq.lso;
+ stop = stats->twq.stop;
+ wake = stats->twq.wake;
} while (u64_stats_fetch_retry(&stats->syncp, start));
tx->bytes = bytes;
tx->packets = packets;
+ tx->needs_csum = csum + lso;
+ tx->hw_gso_wire_packets = lso;
+ tx->stop = stop;
+ tx->wake = wake;
}
static void fbnic_get_base_stats(struct net_device *dev,
@@ -535,9 +551,16 @@ static void fbnic_get_base_stats(struct net_device *dev,
tx->bytes = fbn->tx_stats.bytes;
tx->packets = fbn->tx_stats.packets;
+ tx->needs_csum = fbn->tx_stats.twq.csum_partial + fbn->tx_stats.twq.lso;
+ tx->hw_gso_wire_packets = fbn->tx_stats.twq.lso;
+ tx->stop = fbn->tx_stats.twq.stop;
+ tx->wake = fbn->tx_stats.twq.wake;
rx->bytes = fbn->rx_stats.bytes;
rx->packets = fbn->rx_stats.packets;
+ rx->alloc_fail = fbn->rx_stats.rx.alloc_failed;
+ rx->csum_complete = fbn->rx_stats.rx.csum_complete;
+ rx->csum_none = fbn->rx_stats.rx.csum_none;
}
static const struct netdev_stat_ops fbnic_stat_ops = {
@@ -588,7 +611,7 @@ void fbnic_netdev_free(struct fbnic_dev *fbd)
* Allocate and initialize the netdev and netdev private structure. Bind
* together the hardware, netdev, and pci data structures.
*
- * Return: 0 on success, negative on failure
+ * Return: Pointer to net_device on success, NULL on failure
**/
struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
{
@@ -618,6 +641,10 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
+ fbn->tx_usecs = FBNIC_TX_USECS_DEFAULT;
+ fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT;
+ fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT;
+
default_queues = netif_get_num_default_rss_queues();
if (default_queues > fbd->max_num_queues)
default_queues = fbd->max_num_queues;
@@ -628,15 +655,32 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbnic_rss_key_fill(fbn->rss_key);
fbnic_rss_init_en_mask(fbn);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->gso_partial_features =
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
netdev->features |=
+ netdev->gso_partial_features |
+ FBNIC_TUN_GSO_FEATURES |
NETIF_F_RXHASH |
NETIF_F_SG |
NETIF_F_HW_CSUM |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_UDP_L4;
netdev->hw_features |= netdev->features;
netdev->vlan_features |= netdev->features;
netdev->hw_enc_features |= netdev->features;
+ netdev->features |= NETIF_F_NTUPLE;
netdev->min_mtu = IPV6_MIN_MTU;
netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index a392ac1cc4f2..561837e80ec8 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -12,6 +12,10 @@
#include "fbnic_txrx.h"
#define FBNIC_MAX_NAPI_VECTORS 128u
+#define FBNIC_MIN_RXD_PER_FRAME 2
+
+/* Natively supported tunnel GSO features (not thru GSO_PARTIAL) */
+#define FBNIC_TUN_GSO_FEATURES NETIF_F_GSO_IPXIP6
struct fbnic_net {
struct fbnic_ring *tx[FBNIC_MAX_TXQS];
@@ -27,6 +31,11 @@ struct fbnic_net {
u32 ppq_size;
u32 rcq_size;
+ u16 rx_usecs;
+ u16 tx_usecs;
+
+ u32 rx_max_frames;
+
u16 num_napi;
struct phylink *phylink;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
index bb11fc83367d..860b02b22c15 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -133,7 +133,6 @@ int fbnic_phylink_init(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
struct phylink *phylink;
- fbn->phylink_pcs.neg_mode = true;
fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops;
fbn->phylink_config.dev = &netdev->dev;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
index c25bd300b902..8ff07b5562e3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <net/ipv6.h>
#include "fbnic.h"
#include "fbnic_netdev.h"
@@ -60,7 +61,7 @@ void fbnic_rss_disable_hw(struct fbnic_dev *fbd)
#define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val) \
FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem, \
FIELD_GET(RXH_##_fh, _val))
-static u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
+u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
{
u32 flow_hash = fbn->rss_flow_hash[flow_type];
u32 rss_en_mask = 0;
@@ -698,6 +699,359 @@ void fbnic_write_tce_tcam(struct fbnic_dev *fbd)
__fbnic_write_tce_tcam(fbd);
}
+struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in_addr *addr,
+ const struct in_addr *mask)
+{
+ struct fbnic_ip_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from top of list to bottom, filling bottom up. */
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) {
+ struct in6_addr *m = &ip_addr->mask;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = ip_addr;
+ continue;
+ }
+
+ if (ip_addr->version != 4)
+ continue;
+
+ /* Drop avail_addr if mask is a subset of our current mask,
+ * This prevents us from inserting a longer prefix behind a
+ * shorter one.
+ *
+ * The mask is stored inverted value so as an example:
+ * m ffff ffff ffff ffff ffff ffff ffff 0000 0000
+ * mask 0000 0000 0000 0000 0000 0000 0000 ffff ffff
+ *
+ * "m" and "mask" represent typical IPv4 mask stored in
+ * the TCAM and those provided by the stack. The code below
+ * should return a non-zero result if there is a 0 stored
+ * anywhere in "m" where "mask" has a 0.
+ */
+ if (~m->s6_addr32[3] & ~mask->s_addr) {
+ avail_addr = NULL;
+ continue;
+ }
+
+ /* Check to see if the mask actually contains fewer bits than
+ * our new mask "m". The XOR below should only result in 0 if
+ * "m" is masking a bit that we are looking for in our new
+ * "mask", we eliminated the 0^0 case with the check above.
+ *
+ * If it contains fewer bits we need to stop here, otherwise
+ * we might be adding an unreachable rule.
+ */
+ if (~(m->s6_addr32[3] ^ mask->s_addr))
+ break;
+
+ if (ip_addr->value.s6_addr32[3] == addr->s_addr) {
+ avail_addr = ip_addr;
+ break;
+ }
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ipv6_addr_set(&avail_addr->value, 0, 0, 0, addr->s_addr);
+ ipv6_addr_set(&avail_addr->mask, htonl(~0), htonl(~0),
+ htonl(~0), ~mask->s_addr);
+ avail_addr->version = 4;
+
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask)
+{
+ struct fbnic_ip_addr *avail_addr = NULL;
+ unsigned int i;
+
+ ip_addr = &ip_addr[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES - 1];
+
+ /* Scan from bottom of list to top, filling top down. */
+ for (i = FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i--; ip_addr--) {
+ struct in6_addr *m = &ip_addr->mask;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = ip_addr;
+ continue;
+ }
+
+ if (ip_addr->version != 6)
+ continue;
+
+ /* Drop avail_addr if mask is a superset of our current mask.
+ * This prevents us from inserting a longer prefix behind a
+ * shorter one.
+ *
+ * The mask is stored inverted value so as an example:
+ * m 0000 0000 0000 0000 0000 0000 0000 0000 0000
+ * mask ffff ffff ffff ffff ffff ffff ffff ffff ffff
+ *
+ * "m" and "mask" represent typical IPv6 mask stored in
+ * the TCAM and those provided by the stack. The code below
+ * should return a non-zero result which will cause us
+ * to drop the avail_addr value that might be cached
+ * to prevent us from dropping a v6 address behind it.
+ */
+ if ((m->s6_addr32[0] & mask->s6_addr32[0]) |
+ (m->s6_addr32[1] & mask->s6_addr32[1]) |
+ (m->s6_addr32[2] & mask->s6_addr32[2]) |
+ (m->s6_addr32[3] & mask->s6_addr32[3])) {
+ avail_addr = NULL;
+ continue;
+ }
+
+ /* The previous test eliminated any overlap between the
+ * two values so now we need to check for gaps.
+ *
+ * If the mask is equal to our current mask then it should
+ * result with m ^ mask = ffff ffff, if however the value
+ * stored in m is bigger then we should see a 0 appear
+ * somewhere in the mask.
+ */
+ if (~(m->s6_addr32[0] ^ mask->s6_addr32[0]) |
+ ~(m->s6_addr32[1] ^ mask->s6_addr32[1]) |
+ ~(m->s6_addr32[2] ^ mask->s6_addr32[2]) |
+ ~(m->s6_addr32[3] ^ mask->s6_addr32[3]))
+ break;
+
+ if (ipv6_addr_cmp(&ip_addr->value, addr))
+ continue;
+
+ avail_addr = ip_addr;
+ break;
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ memcpy(&avail_addr->value, addr, sizeof(*addr));
+ ipv6_addr_set(&avail_addr->mask,
+ ~mask->s6_addr32[0], ~mask->s6_addr32[1],
+ ~mask->s6_addr32[2], ~mask->s6_addr32[3]);
+ avail_addr->version = 6;
+
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx)
+{
+ if (!test_and_clear_bit(tcam_idx, ip_addr->act_tcam))
+ return -ENOENT;
+
+ if (bitmap_empty(ip_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ ip_addr->state = FBNIC_TCAM_S_DELETE;
+
+ return 0;
+}
+
+static void fbnic_clear_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i), 0);
+}
+
+static void fbnic_clear_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i), 0);
+}
+
+static void fbnic_clear_ip_outer_src_entry(struct fbnic_dev *fbd,
+ unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), 0);
+}
+
+static void fbnic_clear_ip_outer_dst_entry(struct fbnic_dev *fbd,
+ unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), 0);
+}
+
+static void fbnic_write_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ /* Bit 129 is used to flag for v4/v6 */
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
+ (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ /* Bit 129 is used to flag for v4/v6 */
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
+ (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_outer_src_entry(struct fbnic_dev *fbd,
+ unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_outer_dst_entry(struct fbnic_dev *fbd,
+ unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+void fbnic_write_ip_addr(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->ip_src); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ip_src[idx];
+
+ /* Check if update flag is set else skip. */
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ /* Clear by writing 0s. */
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ /* Invalidate entry and clear addr state info */
+ fbnic_clear_ip_src_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_src_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ /* Repeat process for other IP TCAMs */
+ for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ip_dst[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_dst_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_dst_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ipo_src[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_outer_src_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_outer_src_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ipo_dst[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_outer_dst_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_outer_dst_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+}
+
void fbnic_clear_rules(struct fbnic_dev *fbd)
{
u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
index 0d8285fa5b45..6892414195c3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
@@ -7,6 +7,8 @@
#include <uapi/linux/in6.h>
#include <linux/bitfield.h>
+struct in_addr;
+
/* The TCAM state definitions follow an expected ordering.
* They start out disabled, then move through the following states:
* Disabled 0 -> Add 2
@@ -32,6 +34,12 @@ enum {
#define FBNIC_RPC_TCAM_MACDA_WORD_LEN 3
#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32
+/* 8 IPSRC and IPDST TCAM Entries each
+ * 8 registers, Validate each
+ */
+#define FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN 8
+#define FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES 8
+
#define FBNIC_RPC_TCAM_ACT_WORD_LEN 11
#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64
@@ -47,6 +55,13 @@ struct fbnic_mac_addr {
DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
};
+struct fbnic_ip_addr {
+ struct in6_addr mask, value;
+ unsigned char version;
+ unsigned char state;
+ DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
+};
+
struct fbnic_act_tcam {
struct {
u16 tcam[FBNIC_RPC_TCAM_ACT_WORD_LEN];
@@ -81,6 +96,11 @@ enum {
#define FBNIC_RPC_ACT_TBL_BMC_OFFSET 0
#define FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET 1
+/* This should leave us with 48 total entries in the TCAM that can be used
+ * for NFC after also deducting the 14 needed for RSS table programming.
+ */
+#define FBNIC_RPC_ACT_TBL_NFC_OFFSET 2
+
/* We reserve the last 14 entries for RSS rules on the host. The BMC
* unicast rule will need to be populated above these and is expected to
* use MACDA TCAM entry 23 to store the BMC MAC address.
@@ -88,6 +108,9 @@ enum {
#define FBNIC_RPC_ACT_TBL_RSS_OFFSET \
(FBNIC_RPC_ACT_TBL_NUM_ENTRIES - FBNIC_RSS_EN_NUM_ENTRIES)
+#define FBNIC_RPC_ACT_TBL_NFC_ENTRIES \
+ (FBNIC_RPC_ACT_TBL_RSS_OFFSET - FBNIC_RPC_ACT_TBL_NFC_OFFSET)
+
/* Flags used to identify the owner for this MAC filter. Note that any
* flags set for Broadcast thru Promisc indicate that the rule belongs
* to the RSS filters for the host.
@@ -168,6 +191,7 @@ void fbnic_rss_init_en_mask(struct fbnic_net *fbn);
void fbnic_rss_disable_hw(struct fbnic_dev *fbd);
void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn);
void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn);
+u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type);
int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx);
struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
@@ -177,6 +201,17 @@ struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
void fbnic_sift_macda(struct fbnic_dev *fbd);
void fbnic_write_macda(struct fbnic_dev *fbd);
+struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in_addr *addr,
+ const struct in_addr *mask);
+struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask);
+int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx);
+void fbnic_write_ip_addr(struct fbnic_dev *fbd);
+
static inline int __fbnic_uc_unsync(struct fbnic_mac_addr *mac_addr)
{
return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_UNICAST);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
index 2a174ab062a3..517ed8b2f1cb 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
@@ -196,13 +196,17 @@ int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id,
/**
* fbnic_tlv_attr_get_unsigned - Retrieve unsigned value from result
* @attr: Attribute to retrieve data from
+ * @def: The default value if attr is NULL
*
* Return: unsigned 64b value containing integer value
**/
-u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr)
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def)
{
__le64 le64_value = 0;
+ if (!attr)
+ return def;
+
memcpy(&le64_value, &attr->value[0],
le16_to_cpu(attr->hdr.len) - sizeof(*attr));
@@ -212,15 +216,21 @@ u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr)
/**
* fbnic_tlv_attr_get_signed - Retrieve signed value from result
* @attr: Attribute to retrieve data from
+ * @def: The default value if attr is NULL
*
* Return: signed 64b value containing integer value
**/
-s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr)
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def)
{
- int shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
__le64 le64_value = 0;
+ int shift;
s64 value;
+ if (!attr)
+ return def;
+
+ shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
+
/* Copy the value and adjust for byte ordering */
memcpy(&le64_value, &attr->value[0],
le16_to_cpu(attr->hdr.len) - sizeof(*attr));
@@ -233,19 +243,40 @@ s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr)
/**
* fbnic_tlv_attr_get_string - Retrieve string value from result
* @attr: Attribute to retrieve data from
- * @str: Pointer to an allocated string to store the data
- * @max_size: The maximum size which can be in str
+ * @dst: Pointer to an allocated string to store the data
+ * @dstsize: The maximum size which can be in dst
*
- * Return: the size of the string read from firmware
+ * Return: the size of the string read from firmware or negative error.
**/
-size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
- size_t max_size)
+ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst,
+ size_t dstsize)
{
- max_size = min_t(size_t, max_size,
- (le16_to_cpu(attr->hdr.len) * 4) - sizeof(*attr));
- memcpy(str, &attr->value, max_size);
+ size_t srclen, len;
+ ssize_t ret;
+
+ if (!attr)
+ return -EINVAL;
+
+ if (dstsize == 0)
+ return -E2BIG;
+
+ srclen = le16_to_cpu(attr->hdr.len) - sizeof(*attr);
+ if (srclen > 0 && ((char *)attr->value)[srclen - 1] == '\0')
+ srclen--;
+
+ if (srclen >= dstsize) {
+ len = dstsize - 1;
+ ret = -E2BIG;
+ } else {
+ len = srclen;
+ ret = len;
+ }
+
+ memcpy(dst, &attr->value, len);
+ /* Zero pad end of dst. */
+ memset(dst + len, 0, dstsize - len);
- return max_size;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
index 67300ab44353..c34bf87eeec9 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
@@ -114,34 +114,10 @@ static inline bool fbnic_tlv_attr_get_bool(struct fbnic_tlv_msg *attr)
return !!attr;
}
-u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr);
-s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr);
-size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
- size_t max_size);
-
-#define get_unsigned_result(id, location) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- location = fbnic_tlv_attr_get_unsigned(result); \
-} while (0)
-
-#define get_signed_result(id, location) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- location = fbnic_tlv_attr_get_signed(result); \
-} while (0)
-
-#define get_string_result(id, size, str, max_size) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- size = fbnic_tlv_attr_get_string(result, str, max_size); \
-} while (0)
-
-#define get_bool(id) (!!(results[id]))
-
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def);
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def);
+ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst,
+ size_t dstsize);
struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id);
int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id);
int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id,
@@ -170,6 +146,13 @@ int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg,
const struct fbnic_tlv_parser *parser);
int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results);
+#define fta_get_uint(_results, _id) \
+ fbnic_tlv_attr_get_unsigned(_results[_id], 0)
+#define fta_get_sint(_results, _id) \
+ fbnic_tlv_attr_get_signed(_results[_id], 0)
+#define fta_get_str(_results, _id, _dst, _dstsize) \
+ fbnic_tlv_attr_get_string(_results[_id], _dst, _dstsize)
+
#define FBNIC_TLV_MSG_ERROR \
FBNIC_TLV_PARSER(UNKNOWN, NULL, fbnic_tlv_parser_error)
#endif /* _FBNIC_TLV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index d4d7027df9a0..ac11389a764c 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <net/netdev_queues.h>
#include <net/page_pool/helpers.h>
+#include <net/tcp.h>
#include "fbnic.h"
#include "fbnic_csr.h"
@@ -18,6 +19,7 @@ enum {
struct fbnic_xmit_cb {
u32 bytecount;
+ u16 gso_segs;
u8 desc_count;
u8 flags;
int hw_head;
@@ -113,6 +115,11 @@ static int fbnic_maybe_stop_tx(const struct net_device *dev,
res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size,
FBNIC_TX_DESC_WAKEUP);
+ if (!res) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.stop++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
return !res;
}
@@ -174,8 +181,72 @@ static bool fbnic_tx_tstamp(struct sk_buff *skb)
}
static bool
+fbnic_tx_lso(struct fbnic_ring *ring, struct sk_buff *skb,
+ struct skb_shared_info *shinfo, __le64 *meta,
+ unsigned int *l2len, unsigned int *i3len)
+{
+ unsigned int l3_type, l4_type, l4len, hdrlen;
+ unsigned char *l4hdr;
+ __be16 payload_len;
+
+ if (unlikely(skb_cow_head(skb, 0)))
+ return true;
+
+ if (shinfo->gso_type & SKB_GSO_PARTIAL) {
+ l3_type = FBNIC_TWD_L3_TYPE_OTHER;
+ } else if (!skb->encapsulation) {
+ if (ip_hdr(skb)->version == 4)
+ l3_type = FBNIC_TWD_L3_TYPE_IPV4;
+ else
+ l3_type = FBNIC_TWD_L3_TYPE_IPV6;
+ } else {
+ unsigned int o3len;
+
+ o3len = skb_inner_network_header(skb) - skb_network_header(skb);
+ *i3len -= o3len;
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_OHLEN_MASK,
+ o3len / 2));
+ l3_type = FBNIC_TWD_L3_TYPE_V6V6;
+ }
+
+ l4hdr = skb_checksum_start(skb);
+ payload_len = cpu_to_be16(skb->len - (l4hdr - skb->data));
+
+ if (shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ struct tcphdr *tcph = (struct tcphdr *)l4hdr;
+
+ l4_type = FBNIC_TWD_L4_TYPE_TCP;
+ l4len = __tcp_hdrlen((struct tcphdr *)l4hdr);
+ csum_replace_by_diff(&tcph->check, (__force __wsum)payload_len);
+ } else {
+ struct udphdr *udph = (struct udphdr *)l4hdr;
+
+ l4_type = FBNIC_TWD_L4_TYPE_UDP;
+ l4len = sizeof(struct udphdr);
+ csum_replace_by_diff(&udph->check, (__force __wsum)payload_len);
+ }
+
+ hdrlen = (l4hdr - skb->data) + l4len;
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_TYPE_MASK, l3_type) |
+ FIELD_PREP(FBNIC_TWD_L4_TYPE_MASK, l4_type) |
+ FIELD_PREP(FBNIC_TWD_L4_HLEN_MASK, l4len / 4) |
+ FIELD_PREP(FBNIC_TWD_MSS_MASK, shinfo->gso_size) |
+ FBNIC_TWD_FLAG_REQ_LSO);
+
+ FBNIC_XMIT_CB(skb)->bytecount += (shinfo->gso_segs - 1) * hdrlen;
+ FBNIC_XMIT_CB(skb)->gso_segs = shinfo->gso_segs;
+
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.lso += shinfo->gso_segs;
+ u64_stats_update_end(&ring->stats.syncp);
+
+ return false;
+}
+
+static bool
fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int l2len, i3len;
if (fbnic_tx_tstamp(skb))
@@ -190,7 +261,15 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK,
skb->csum_offset / 2));
- *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+ if (shinfo->gso_size) {
+ if (fbnic_tx_lso(ring, skb, shinfo, meta, &l2len, &i3len))
+ return true;
+ } else {
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.csum_partial++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) |
FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2));
@@ -198,12 +277,15 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
}
static void
-fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
+fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq,
+ u64 *csum_cmpl, u64 *csum_none)
{
skb_checksum_none_assert(skb);
- if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ (*csum_none)++;
return;
+ }
if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -212,6 +294,7 @@ fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = (__force __wsum)csum;
+ (*csum_cmpl)++;
}
}
@@ -329,7 +412,9 @@ fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
/* Write all members within DWORD to condense this into 2 4B writes */
FBNIC_XMIT_CB(skb)->bytecount = skb->len;
+ FBNIC_XMIT_CB(skb)->gso_segs = 1;
FBNIC_XMIT_CB(skb)->desc_count = 0;
+ FBNIC_XMIT_CB(skb)->flags = 0;
if (fbnic_tx_offloads(ring, skb, meta))
goto err_free;
@@ -356,6 +441,59 @@ netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev)
return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]);
}
+static netdev_features_t
+fbnic_features_check_encap_gso(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features, unsigned int l3len)
+{
+ netdev_features_t skb_gso_features;
+ struct ipv6hdr *ip6_hdr;
+ unsigned char l4_hdr;
+ unsigned int start;
+ __be16 frag_off;
+
+ /* Require MANGLEID for GSO_PARTIAL of IPv4.
+ * In theory we could support TSO with single, innermost v4 header
+ * by pretending everything before it is L2, but that needs to be
+ * parsed case by case.. so leaving it for when the need arises.
+ */
+ if (!(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ skb_gso_features = skb_shinfo(skb)->gso_type;
+ skb_gso_features <<= NETIF_F_GSO_SHIFT;
+
+ /* We'd only clear the native GSO features, so don't bother validating
+ * if the match can only be on those supported thru GSO_PARTIAL.
+ */
+ if (!(skb_gso_features & FBNIC_TUN_GSO_FEATURES))
+ return features;
+
+ /* We can only do IPv6-in-IPv6, not v4-in-v6. It'd be nice
+ * to fall back to partial for this, or any failure below.
+ * This is just an optimization, UDPv4 will be caught later on.
+ */
+ if (skb_gso_features & NETIF_F_TSO)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ /* Inner headers multiple of 2 */
+ if ((skb_inner_network_header(skb) - skb_network_header(skb)) % 2)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ /* Encapsulated GSO packet, make 100% sure it's IPv6-in-IPv6. */
+ ip6_hdr = ipv6_hdr(skb);
+ if (ip6_hdr->version != 6)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ l4_hdr = ip6_hdr->nexthdr;
+ start = (unsigned char *)ip6_hdr - skb->data + sizeof(struct ipv6hdr);
+ start = ipv6_skip_exthdr(skb, start, &l4_hdr, &frag_off);
+ if (frag_off || l4_hdr != IPPROTO_IPV6 ||
+ skb->data + start != skb_inner_network_header(skb))
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ return features;
+}
+
netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
@@ -376,9 +514,12 @@ fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
!FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) ||
!FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) ||
!FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2))
- return features & ~NETIF_F_CSUM_MASK;
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
- return features;
+ if (likely(!skb->encapsulation) || !skb_is_gso(skb))
+ return features;
+
+ return fbnic_features_check_encap_gso(skb, dev, features, l3len);
}
static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
@@ -429,7 +570,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
}
total_bytes += FBNIC_XMIT_CB(skb)->bytecount;
- total_packets += 1;
+ total_packets += FBNIC_XMIT_CB(skb)->gso_segs;
napi_consume_skb(skb, napi_budget);
}
@@ -444,7 +585,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
if (unlikely(discard)) {
u64_stats_update_begin(&ring->stats.syncp);
ring->stats.dropped += total_packets;
- ring->stats.ts_lost += ts_lost;
+ ring->stats.twq.ts_lost += ts_lost;
u64_stats_update_end(&ring->stats.syncp);
netdev_tx_completed_queue(txq, total_packets, total_bytes);
@@ -456,9 +597,13 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
ring->stats.packets += total_packets;
u64_stats_update_end(&ring->stats.syncp);
- netif_txq_completed_wake(txq, total_packets, total_bytes,
- fbnic_desc_unused(ring),
- FBNIC_TX_DESC_WAKEUP);
+ if (!netif_txq_completed_wake(txq, total_packets, total_bytes,
+ fbnic_desc_unused(ring),
+ FBNIC_TX_DESC_WAKEUP)) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.wake++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
}
static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
@@ -507,7 +652,7 @@ static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
skb_tstamp_tx(skb, &hwtstamp);
u64_stats_update_begin(&ring->stats.syncp);
- ring->stats.ts_packets++;
+ ring->stats.twq.ts_packets++;
u64_stats_update_end(&ring->stats.syncp);
}
@@ -661,8 +806,13 @@ static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
struct page *page;
page = page_pool_dev_alloc_pages(nv->page_pool);
- if (!page)
+ if (!page) {
+ u64_stats_update_begin(&bdq->stats.syncp);
+ bdq->stats.rx.alloc_failed++;
+ u64_stats_update_end(&bdq->stats.syncp);
+
break;
+ }
fbnic_page_pool_init(bdq, i, page);
fbnic_bd_prep(bdq, i, page);
@@ -875,12 +1025,13 @@ static void fbnic_rx_tstamp(struct fbnic_napi_vector *nv, u64 rcd,
static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv,
u64 rcd, struct sk_buff *skb,
- struct fbnic_q_triad *qt)
+ struct fbnic_q_triad *qt,
+ u64 *csum_cmpl, u64 *csum_none)
{
struct net_device *netdev = nv->napi.dev;
struct fbnic_ring *rcq = &qt->cmpl;
- fbnic_rx_csum(rcd, skb, rcq);
+ fbnic_rx_csum(rcd, skb, rcq, csum_cmpl, csum_none);
if (netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
@@ -898,7 +1049,8 @@ static bool fbnic_rcd_metadata_err(u64 rcd)
static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt, int budget)
{
- unsigned int packets = 0, bytes = 0, dropped = 0;
+ unsigned int packets = 0, bytes = 0, dropped = 0, alloc_failed = 0;
+ u64 csum_complete = 0, csum_none = 0;
struct fbnic_ring *rcq = &qt->cmpl;
struct fbnic_pkt_buff *pkt;
s32 head0 = -1, head1 = -1;
@@ -947,14 +1099,22 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
/* Populate skb and invalidate XDP */
if (!IS_ERR_OR_NULL(skb)) {
- fbnic_populate_skb_fields(nv, rcd, skb, qt);
+ fbnic_populate_skb_fields(nv, rcd, skb, qt,
+ &csum_complete,
+ &csum_none);
packets++;
bytes += skb->len;
napi_gro_receive(&nv->napi, skb);
} else {
- dropped++;
+ if (!skb) {
+ alloc_failed++;
+ dropped++;
+ } else {
+ dropped++;
+ }
+
fbnic_put_pkt_buff(nv, pkt, 1);
}
@@ -977,6 +1137,9 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
/* Re-add ethernet header length (removed in fbnic_build_skb) */
rcq->stats.bytes += ETH_HLEN * packets;
rcq->stats.dropped += dropped;
+ rcq->stats.rx.alloc_failed += alloc_failed;
+ rcq->stats.rx.csum_complete += csum_complete;
+ rcq->stats.rx.csum_none += csum_none;
u64_stats_update_end(&rcq->stats.syncp);
/* Unmap and free processed buffers */
@@ -1054,6 +1217,11 @@ void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
fbn->rx_stats.bytes += stats->bytes;
fbn->rx_stats.packets += stats->packets;
fbn->rx_stats.dropped += stats->dropped;
+ fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed;
+ fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete;
+ fbn->rx_stats.rx.csum_none += stats->rx.csum_none;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 3);
}
void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
@@ -1065,8 +1233,14 @@ void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
fbn->tx_stats.bytes += stats->bytes;
fbn->tx_stats.packets += stats->packets;
fbn->tx_stats.dropped += stats->dropped;
- fbn->tx_stats.ts_lost += stats->ts_lost;
- fbn->tx_stats.ts_packets += stats->ts_packets;
+ fbn->tx_stats.twq.csum_partial += stats->twq.csum_partial;
+ fbn->tx_stats.twq.lso += stats->twq.lso;
+ fbn->tx_stats.twq.ts_lost += stats->twq.ts_lost;
+ fbn->tx_stats.twq.ts_packets += stats->twq.ts_packets;
+ fbn->tx_stats.twq.stop += stats->twq.stop;
+ fbn->tx_stats.twq.wake += stats->twq.wake;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6);
}
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
@@ -1142,7 +1316,9 @@ static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
.dev = nv->dev,
.dma_dir = DMA_BIDIRECTIONAL,
.offset = 0,
- .max_len = PAGE_SIZE
+ .max_len = PAGE_SIZE,
+ .napi = &nv->napi,
+ .netdev = fbn->netdev,
};
struct page_pool *pp;
@@ -2010,9 +2186,51 @@ static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
}
+static void fbnic_config_rim_threshold(struct fbnic_ring *rcq, u16 nv_idx, u32 rx_desc)
+{
+ u32 threshold;
+
+ /* Set the threhsold to half the ring size if rx_frames
+ * is not configured
+ */
+ threshold = rx_desc ? : rcq->size_mask / 2;
+
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv_idx);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, threshold);
+}
+
+void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 val = arm;
+
+ val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT, fbn->rx_usecs) |
+ FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT_UPD_EN;
+ val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT, fbn->tx_usecs) |
+ FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT_UPD_EN;
+
+ fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+}
+
+void fbnic_config_rx_frames(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ int i;
+
+ for (i = nv->txt_count; i < nv->rxt_count + nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_config_rim_threshold(&qt->cmpl, nv->v_idx,
+ fbn->rx_max_frames *
+ FBNIC_MIN_RXD_PER_FRAME);
+ }
+}
+
static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
struct fbnic_ring *rcq)
{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
u32 log_size = fls(rcq->size_mask);
u32 rcq_ctl;
@@ -2040,8 +2258,8 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf);
/* Store interrupt information for the completion queue */
- fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx);
- fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2);
+ fbnic_config_rim_threshold(rcq, nv->v_idx, fbn->rx_max_frames *
+ FBNIC_MIN_RXD_PER_FRAME);
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0);
/* Enable queue */
@@ -2080,12 +2298,7 @@ void fbnic_enable(struct fbnic_net *fbn)
static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv)
{
- struct fbnic_dev *fbd = nv->fbd;
- u32 val;
-
- val = FBNIC_INTR_CQ_REARM_INTR_UNMASK;
-
- fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+ fbnic_config_txrx_usecs(nv, FBNIC_INTR_CQ_REARM_INTR_UNMASK);
}
void fbnic_napi_enable(struct fbnic_net *fbn)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index c2a94f31f71b..f46616af41ea 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -24,13 +24,29 @@ struct fbnic_net;
#define FBNIC_TX_DESC_WAKEUP (FBNIC_MAX_SKB_DESC * 2)
#define FBNIC_TX_DESC_MIN roundup_pow_of_two(FBNIC_TX_DESC_WAKEUP)
+/* To receive the worst case packet we need:
+ * 1 descriptor for primary metadata
+ * + 1 descriptor for optional metadata
+ * + 1 descriptor for headers
+ * + 4 descriptors for payload
+ */
+#define FBNIC_MAX_RX_PKT_DESC 7
+#define FBNIC_RX_DESC_MIN roundup_pow_of_two(FBNIC_MAX_RX_PKT_DESC * 2)
+
#define FBNIC_MAX_TXQS 128u
#define FBNIC_MAX_RXQS 128u
+/* These apply to TWQs, TCQ, RCQ */
+#define FBNIC_QUEUE_SIZE_MIN 16u
+#define FBNIC_QUEUE_SIZE_MAX SZ_64K
+
#define FBNIC_TXQ_SIZE_DEFAULT 1024
#define FBNIC_HPQ_SIZE_DEFAULT 256
#define FBNIC_PPQ_SIZE_DEFAULT 256
#define FBNIC_RCQ_SIZE_DEFAULT 1024
+#define FBNIC_TX_USECS_DEFAULT 35
+#define FBNIC_RX_USECS_DEFAULT 30
+#define FBNIC_RX_FRAMES_DEFAULT 0
#define FBNIC_RX_TROOM \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
@@ -56,9 +72,22 @@ struct fbnic_pkt_buff {
struct fbnic_queue_stats {
u64 packets;
u64 bytes;
+ union {
+ struct {
+ u64 csum_partial;
+ u64 lso;
+ u64 ts_packets;
+ u64 ts_lost;
+ u64 stop;
+ u64 wake;
+ } twq;
+ struct {
+ u64 alloc_failed;
+ u64 csum_complete;
+ u64 csum_none;
+ } rx;
+ };
u64 dropped;
- u64 ts_packets;
- u64 ts_lost;
struct u64_stats_sync syncp;
};
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 3062cc0f9199..c862b13b447a 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -20,8 +20,6 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include "ks8851.h"
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 4a777b449ecd..0be44dcb3393 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -942,6 +942,12 @@ static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on,
extts = &ptp->extts[index];
+ if (extts_request->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
if (on) {
extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index);
if (extts_pin < 0)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 3234a960fcc3..0af143ec0f86 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -828,7 +828,6 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink_config.type = PHYLINK_NETDEV;
port->phylink_pcs.poll = true;
port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
- port->phylink_pcs.neg_mode = true;
port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 6a0e5b83ecd0..74ad1d73b465 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -338,7 +338,6 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->custom_etype = 0x8880; /* Vitesse */
spx5_port->phylink_pcs.poll = true;
spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
- spx5_port->phylink_pcs.neg_mode = true;
spx5_port->is_mrouter = false;
INIT_LIST_HEAD(&spx5_port->tc_templates);
sparx5->ports[config->portno] = spx5_port;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 138ac58fae51..f713656f1fae 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -375,6 +375,6 @@ irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
void sparx5_port_inj_timer_setup(struct sparx5_port *port)
{
- hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->inj_timer.function = sparx5_injection_timeout;
+ hrtimer_setup(&port->inj_timer, sparx5_injection_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index be95336ce089..4ffaf7588885 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -134,9 +134,10 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
struct gdma_list_devices_resp resp = {};
struct gdma_general_req req = {};
struct gdma_dev_id dev;
- u32 i, max_num_devs;
+ int found_dev = 0;
u16 dev_type;
int err;
+ u32 i;
mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
sizeof(resp));
@@ -148,12 +149,17 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
-
- for (i = 0; i < max_num_devs; i++) {
+ for (i = 0; i < GDMA_DEV_LIST_SIZE &&
+ found_dev < resp.num_of_devs; i++) {
dev = resp.devs[i];
dev_type = dev.type;
+ /* Skip empty devices */
+ if (dev.as_uint32 == 0)
+ continue;
+
+ found_dev++;
+
/* HWC is already detected in mana_hwc_create_channel(). */
if (dev_type == GDMA_DEVICE_HWC)
continue;
@@ -331,6 +337,7 @@ void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
}
+EXPORT_SYMBOL_NS(mana_gd_wq_ring_doorbell, "NET_MANA");
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
{
@@ -343,6 +350,7 @@ void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
head, arm_bit);
}
+EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA");
static void mana_gd_process_eqe(struct gdma_queue *eq)
{
@@ -666,8 +674,11 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
queue->head = 0;
queue->tail = 0;
@@ -688,6 +699,8 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
@@ -770,7 +783,13 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
}
gmi->dma_region_handle = resp.dma_region_handle;
+ dev_dbg(gc->dev, "Created DMA region handle 0x%llx\n",
+ gmi->dma_region_handle);
out:
+ if (err)
+ dev_dbg(gc->dev,
+ "Failed to create DMA region of length: %u, page_type: %d, status: 0x%x, err: %d\n",
+ length, req->gdma_page_type, resp.hdr.status, err);
kfree(req);
return err;
}
@@ -793,8 +812,11 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -815,6 +837,8 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
@@ -841,8 +865,11 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -862,11 +889,14 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
return err;
}
+EXPORT_SYMBOL_NS(mana_gd_create_mana_wq_cq, "NET_MANA");
void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
{
@@ -1041,7 +1071,7 @@ static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
if (oob_in_sgl) {
- WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
+ WARN_ON_ONCE(wqe_req->num_sge < 2);
header->client_oob_in_sgl = 1;
@@ -1148,6 +1178,7 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
return 0;
}
+EXPORT_SYMBOL_NS(mana_gd_post_work_request, "NET_MANA");
int mana_gd_post_and_ring(struct gdma_queue *queue,
const struct gdma_wqe_request *wqe_req,
@@ -1157,8 +1188,11 @@ int mana_gd_post_and_ring(struct gdma_queue *queue,
int err;
err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to post work req from queue type %d of size %u (err=%d)\n",
+ queue->type, queue->queue_size, err);
return err;
+ }
mana_gd_wq_ring_doorbell(gc, queue);
@@ -1218,6 +1252,7 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
return cqe_idx;
}
+EXPORT_SYMBOL_NS(mana_gd_poll_cq, "NET_MANA");
static irqreturn_t mana_gd_intr(int irq, void *arg)
{
@@ -1435,8 +1470,10 @@ static int mana_gd_setup(struct pci_dev *pdev)
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
err = mana_gd_setup_irqs(pdev);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
return err;
+ }
err = mana_hwc_create_channel(gc);
if (err)
@@ -1454,12 +1491,14 @@ static int mana_gd_setup(struct pci_dev *pdev)
if (err)
goto destroy_hwc;
+ dev_dbg(&pdev->dev, "mana gdma setup successful\n");
return 0;
destroy_hwc:
mana_hwc_destroy_channel(gc);
remove_irq:
mana_gd_remove_irqs(pdev);
+ dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
return err;
}
@@ -1470,6 +1509,7 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
mana_hwc_destroy_channel(gc);
mana_gd_remove_irqs(pdev);
+ dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
}
static bool mana_is_pf(unsigned short dev_id)
@@ -1488,8 +1528,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
err = pci_enable_device(pdev);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable pci device (err=%d)\n", err);
return -ENXIO;
+ }
pci_set_master(pdev);
@@ -1498,9 +1540,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto disable_dev;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "DMA set mask failed: %d\n", err);
goto release_region;
-
+ }
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
err = -ENOMEM;
@@ -1547,6 +1590,7 @@ unmap_bar:
* adapter-MTU file and apc->mana_pci_debugfs folder.
*/
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
pci_iounmap(pdev, bar0_va);
free_gc:
pci_set_drvdata(pdev, NULL);
@@ -1569,12 +1613,16 @@ static void mana_gd_remove(struct pci_dev *pdev)
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
pci_iounmap(pdev, gc->bar0_va);
vfree(gc);
pci_release_regions(pdev);
pci_disable_device(pdev);
+
+ dev_dbg(&pdev->dev, "mana gdma remove successful\n");
}
/* The 'state' parameter is not used. */
@@ -1622,6 +1670,8 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
pci_disable_device(pdev);
}
@@ -1648,8 +1698,10 @@ static int __init mana_driver_init(void)
mana_debugfs_root = debugfs_create_dir("mana", NULL);
err = pci_register_driver(&mana_driver);
- if (err)
+ if (err) {
debugfs_remove(mana_debugfs_root);
+ mana_debugfs_root = NULL;
+ }
return err;
}
@@ -1659,6 +1711,8 @@ static void __exit mana_driver_exit(void)
pci_unregister_driver(&mana_driver);
debugfs_remove(mana_debugfs_root);
+
+ mana_debugfs_root = NULL;
}
module_init(mana_driver_init);
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index a00f915c5188..1ba49602089b 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -440,7 +440,8 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
gmi = &dma_buf->mem_info;
err = mana_gd_alloc_memory(gc, buf_size, gmi);
if (err) {
- dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
+ dev_err(hwc->dev, "Failed to allocate DMA buffer size: %u, err %d\n",
+ buf_size, err);
goto out;
}
@@ -529,6 +530,9 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
out:
if (err)
mana_hwc_destroy_wq(hwc, hwc_wq);
+
+ dev_err(hwc->dev, "Failed to create HWC queue size= %u type= %d err= %d\n",
+ queue_size, q_type, err);
return err;
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index 23b1521c0df9..d30721d4516f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -91,7 +91,7 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
goto out;
xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
- xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
+ xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, true);
act = bpf_prog_run_xdp(prog, xdp);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index aa1e47233fe5..1423df8531f7 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -13,6 +13,7 @@
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
@@ -52,10 +53,12 @@ static int mana_open(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
int err;
-
err = mana_alloc_queues(ndev);
- if (err)
+
+ if (err) {
+ netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err);
return err;
+ }
apc->port_is_up = true;
@@ -64,7 +67,7 @@ static int mana_open(struct net_device *ndev)
netif_carrier_on(ndev);
netif_tx_wake_all_queues(ndev);
-
+ netdev_dbg(ndev, "%s successful\n", __func__);
return 0;
}
@@ -176,6 +179,9 @@ static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
return 0;
frag_err:
+ if (net_ratelimit())
+ netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
+ skb->len);
for (i = sg_i - 1; i >= hsg; i--)
dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
DMA_TO_DEVICE);
@@ -256,6 +262,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb_cow_head(skb, MANA_HEADROOM))
goto tx_drop_count;
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto tx_drop_count;
+
txq = &apc->tx_qp[txq_idx].txq;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq_idx].tx_cq;
@@ -687,6 +696,7 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
return 0;
error:
+ netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues);
mana_pre_dealloc_rxbufs(mpc);
return -ENOMEM;
}
@@ -738,12 +748,11 @@ static const struct net_device_ops mana_devops = {
static void mana_cleanup_port_context(struct mana_port_context *apc)
{
/*
- * at this point all dir/files under the vport directory
- * are already cleaned up.
- * We are sure the apc->mana_port_debugfs remove will not
- * cause any freed memory access issues
+ * make sure subsequent cleanup attempts don't end up removing already
+ * cleaned dentry pointer
*/
debugfs_remove(apc->mana_port_debugfs);
+ apc->mana_port_debugfs = NULL;
kfree(apc->rxqs);
apc->rxqs = NULL;
}
@@ -1254,6 +1263,7 @@ static void mana_destroy_eq(struct mana_context *ac)
return;
debugfs_remove_recursive(ac->mana_eqs_debugfs);
+ ac->mana_eqs_debugfs = NULL;
for (i = 0; i < gc->max_num_queues; i++) {
eq = ac->eqs[i].eq;
@@ -1304,8 +1314,10 @@ static int mana_create_eq(struct mana_context *ac)
for (i = 0; i < gc->max_num_queues; i++) {
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err);
goto out;
+ }
mana_create_eq_debugfs(ac, i);
}
@@ -1547,8 +1559,12 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
return NULL;
if (xdp->data_hard_start) {
+ u32 metasize = xdp->data - xdp->data_meta;
+
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
return skb;
}
@@ -1914,6 +1930,7 @@ static void mana_destroy_txq(struct mana_port_context *apc)
for (i = 0; i < apc->num_queues; i++) {
debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
+ apc->tx_qp[i].mana_tx_debugfs = NULL;
napi = &apc->tx_qp[i].tx_cq.napi;
if (apc->tx_qp[i].txq.napi_initialized) {
@@ -2080,6 +2097,8 @@ static int mana_create_txq(struct mana_port_context *apc,
return 0;
out:
+ netdev_err(net, "Failed to create %d TX queues, %d\n",
+ apc->num_queues, err);
mana_destroy_txq(apc);
return err;
}
@@ -2099,6 +2118,7 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
return;
debugfs_remove_recursive(rxq->mana_rx_debugfs);
+ rxq->mana_rx_debugfs = NULL;
napi = &rxq->rx_cq.napi;
@@ -2415,6 +2435,7 @@ static int mana_add_rx_queues(struct mana_port_context *apc,
rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
if (!rxq) {
err = -ENOMEM;
+ netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err);
goto out;
}
@@ -2661,12 +2682,18 @@ int mana_alloc_queues(struct net_device *ndev)
int err;
err = mana_create_vport(apc, ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
return err;
+ }
err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
err = mana_add_rx_queues(apc, ndev);
if (err)
@@ -2675,14 +2702,20 @@ int mana_alloc_queues(struct net_device *ndev)
apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
mana_rss_table_init(apc);
err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to configure RSS table: %d\n", err);
goto destroy_vport;
+ }
if (gd->gdma_context->is_pf) {
err = mana_pf_register_filter(apc);
@@ -2823,8 +2856,10 @@ int mana_detach(struct net_device *ndev, bool from_close)
if (apc->port_st_save) {
err = mana_dealloc_queues(ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err);
return err;
+ }
}
if (!from_close) {
@@ -2873,6 +2908,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->dev_port = port_idx;
SET_NETDEV_DEV(ndev, gc->dev);
+ netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
+
netif_carrier_off(ndev);
netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
@@ -2968,6 +3005,8 @@ static int add_adev(struct gdma_dev *gd)
goto add_fail;
gd->adev = adev;
+ dev_dbg(gd->gdma_context->dev,
+ "Auxiliary device added successfully\n");
return 0;
add_fail:
@@ -3009,8 +3048,10 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
err = mana_create_eq(ac);
- if (err)
+ if (err) {
+ dev_err(dev, "Failed to create EQs: %d\n", err);
goto out;
+ }
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
MANA_MICRO_VERSION, &num_ports);
@@ -3066,8 +3107,14 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
err = add_adev(gd);
out:
- if (err)
+ if (err) {
mana_remove(gd, false);
+ } else {
+ dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n",
+ gd, gd->dev_id.as_uint32, ac->num_ports,
+ gd->dev_id.type, gd->dev_id.instance);
+ dev_dbg(dev, "%s succeeded\n", __func__);
+ }
return err;
}
@@ -3129,23 +3176,30 @@ out:
gd->driver_data = NULL;
gd->gdma_context = NULL;
kfree(ac);
+ dev_dbg(dev, "%s succeeded\n", __func__);
}
-struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index)
+struct net_device *mana_get_primary_netdev(struct mana_context *ac,
+ u32 port_index,
+ netdevice_tracker *tracker)
{
struct net_device *ndev;
- RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
- "Taking primary netdev without holding the RCU read lock");
if (port_index >= ac->num_ports)
return NULL;
- /* When mana is used in netvsc, the upper netdevice should be returned. */
- if (ac->ports[port_index]->flags & IFF_SLAVE)
- ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
- else
+ rcu_read_lock();
+
+ /* If mana is used in netvsc, the upper netdevice should be returned. */
+ ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
+
+ /* If there is no upper device, use the parent Ethernet device */
+ if (!ndev)
ndev = ac->ports[port_index];
+ netdev_hold(ndev, tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+
return ndev;
}
-EXPORT_SYMBOL_NS(mana_get_primary_netdev_rcu, "NET_MANA");
+EXPORT_SYMBOL_NS(mana_get_primary_netdev, "NET_MANA");
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index 515069d5637b..671af5d4c5d2 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -565,20 +565,9 @@ static void nfp_net_xfrm_del_state(struct xfrm_state *x)
xa_erase(&nn->xa_ipsec, x->xso.offload_handle - 1);
}
-static bool nfp_net_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET)
- /* Offload with IPv4 options is not supported yet */
- return ip_hdr(skb)->ihl == 5;
-
- /* Offload with IPv6 extension headers is not support yet */
- return !(ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr));
-}
-
static const struct xfrmdev_ops nfp_net_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = nfp_net_xfrm_add_state,
.xdo_dev_state_delete = nfp_net_xfrm_del_state,
- .xdo_dev_offload_ok = nfp_net_ipsec_offload_ok,
};
void nfp_net_ipsec_init(struct nfp_net *nn)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
index 0d6c59d6d4ae..ea6a288c0d5e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
@@ -83,42 +83,12 @@ nfp_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
return 0;
}
-static u32 nfp_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info nfp_chip = {
- .type = hwmon_chip,
- .config = nfp_chip_config,
-};
-
-static u32 nfp_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT,
- 0
-};
-
-static const struct hwmon_channel_info nfp_temp = {
- .type = hwmon_temp,
- .config = nfp_temp_config,
-};
-
-static u32 nfp_power_config[] = {
- HWMON_P_INPUT | HWMON_P_MAX,
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nfp_power = {
- .type = hwmon_power,
- .config = nfp_power_config,
-};
-
static const struct hwmon_channel_info * const nfp_hwmon_info[] = {
- &nfp_chip,
- &nfp_temp,
- &nfp_power,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT),
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_MAX,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT),
NULL
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index f915c423fe70..886061d7351a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -454,7 +454,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
static void qed_free_cdev(struct qed_dev *cdev)
{
- kfree((void *)cdev);
+ kfree(cdev);
}
static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index fa167b1aa019..5222a035fd19 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -3033,7 +3033,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
u16 length;
int rc;
- /* Valiate PF can send such a request */
+ /* Validate PF can send such a request */
if (!vf->vport_instance) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
@@ -3312,7 +3312,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
goto out;
}
- /* Determine if the unicast filtering is acceptible by PF */
+ /* Determine if the unicast filtering is acceptable by PF */
if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
(params.type == QED_FILTER_VLAN ||
params.type == QED_FILTER_MAC_VLAN)) {
@@ -3729,7 +3729,7 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
if (rc) {
- DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] access\n",
vfid);
return rc;
}
@@ -4480,7 +4480,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
/* Failure to acquire the ptt in 100g creates an odd error
- * where the first engine has already relased IOV.
+ * where the first engine has already released IOV.
*/
if (!ptt) {
DP_ERR(hwfn, "Failed to acquire ptt\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index f9dd50152b1e..28d24d59efb8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -454,8 +454,10 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
num_vlans = sriov->num_allowed_vlans;
sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL);
- if (!sriov->allowed_vlans)
+ if (!sriov->allowed_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
vlans = (u16 *)&cmd->rsp.arg[3];
for (i = 0; i < num_vlans; i++)
@@ -2167,8 +2169,10 @@ int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
vf = &sriov->vf_info[i];
vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
sizeof(*vf->sriov_vlans), GFP_KERNEL);
- if (!vf->sriov_vlans)
+ if (!vf->sriov_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index f3bea196a8f9..ba8763cac9d9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -117,11 +117,14 @@ static void rmnet_unregister_bridge(struct rmnet_port *port)
rmnet_unregister_real_device(bridge_dev);
}
-static int rmnet_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int rmnet_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
@@ -134,7 +137,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
}
- real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev) {
NL_SET_ERR_MSG_MOD(extack, "link does not exist");
return -ENODEV;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index a5e3d1a88305..8b4640c5d61e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -686,8 +686,8 @@ void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
- hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
+ hrtimer_setup(&port->hrtimer, rmnet_map_flush_tx_packet_queue, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
spin_lock_init(&port->agg_lock);
rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 8a8ea51c639e..fe136f61586f 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -114,7 +114,8 @@ config R8169
will be called r8169. This is recommended.
config R8169_LEDS
- def_bool R8169 && LEDS_TRIGGER_NETDEV
+ bool "Support for controlling the NIC LEDs"
+ depends on R8169 && LEDS_TRIGGER_NETDEV
depends on !(R8169=y && LEDS_CLASS=m)
help
Optional support for controlling the NIC LED's with the netdev
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 5a5eba49c651..4eebd9cb40a3 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -89,6 +89,7 @@
#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_16K (SZ_16K - VLAN_ETH_HLEN - ETH_FCS_LEN)
static const struct {
const char *name;
@@ -169,6 +170,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(REALTEK, 0x8125) },
{ PCI_VDEVICE(REALTEK, 0x8126) },
{ PCI_VDEVICE(REALTEK, 0x3000) },
+ { PCI_VDEVICE(REALTEK, 0x5000) },
{}
};
@@ -2850,6 +2852,32 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_R32(tp, CSIDR) : ~0;
}
+static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+ u32 csi;
+ int rc;
+ u8 val;
+
+#define RTL_GEN3_RELATED_OFF 0x0890
+#define RTL_GEN3_ZRXDC_NONCOMPL 0x1
+ if (pdev->cfg_size > RTL_GEN3_RELATED_OFF) {
+ rc = pci_read_config_byte(pdev, RTL_GEN3_RELATED_OFF, &val);
+ if (rc == PCIBIOS_SUCCESSFUL) {
+ val &= ~RTL_GEN3_ZRXDC_NONCOMPL;
+ rc = pci_write_config_byte(pdev, RTL_GEN3_RELATED_OFF,
+ val);
+ if (rc == PCIBIOS_SUCCESSFUL)
+ return;
+ }
+ }
+
+ netdev_notice_once(tp->dev,
+ "No native access to PCI extended config space, falling back to CSI\n");
+ csi = rtl_csi_read(tp, RTL_GEN3_RELATED_OFF);
+ rtl_csi_write(tp, RTL_GEN3_RELATED_OFF, csi & ~RTL_GEN3_ZRXDC_NONCOMPL);
+}
+
static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
{
struct pci_dev *pdev = tp->pci_dev;
@@ -3822,6 +3850,7 @@ static void rtl_hw_start_8125d(struct rtl8169_private *tp)
static void rtl_hw_start_8126a(struct rtl8169_private *tp)
{
+ rtl_disable_zrxdc_timeout(tp);
rtl_set_def_aspm_entry_latency(tp);
rtl_hw_start_8125_common(tp);
}
@@ -5199,6 +5228,33 @@ static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
return 0;
}
+static int r8169_mdio_read_reg_c45(struct mii_bus *mii_bus, int addr,
+ int devnum, int regnum)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (addr > 0)
+ return -ENODEV;
+
+ if (devnum == MDIO_MMD_VEND2 && regnum > MDIO_STAT2)
+ return r8168_phy_ocp_read(tp, regnum);
+
+ return 0;
+}
+
+static int r8169_mdio_write_reg_c45(struct mii_bus *mii_bus, int addr,
+ int devnum, int regnum, u16 val)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (addr > 0 || devnum != MDIO_MMD_VEND2 || regnum <= MDIO_STAT2)
+ return -ENODEV;
+
+ r8168_phy_ocp_write(tp, regnum, val);
+
+ return 0;
+}
+
static int r8169_mdio_register(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
@@ -5222,12 +5278,18 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
new_bus->irq[0] = PHY_MAC_INTERRUPT;
+ new_bus->phy_mask = GENMASK(31, 1);
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
pci_domain_nr(pdev->bus), pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_40) {
+ new_bus->read_c45 = r8169_mdio_read_reg_c45;
+ new_bus->write_c45 = r8169_mdio_write_reg_c45;
+ }
+
ret = devm_mdiobus_register(&pdev->dev, new_bus);
if (ret)
return ret;
@@ -5251,9 +5313,9 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
/* mimic behavior of r8125/r8126 vendor drivers */
if (tp->mac_version == RTL_GIGA_MAC_VER_61)
- phy_set_eee_broken(tp->phydev,
- ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
- phy_set_eee_broken(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
+ phy_disable_eee_mode(tp->phydev,
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
+ phy_disable_eee_mode(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
/* PHY will be woken up in rtl_open() */
phy_suspend(tp->phydev);
@@ -5326,6 +5388,9 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
/* RTL8168c */
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
return JUMBO_6K;
+ /* RTL8125/8126 */
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ return JUMBO_16K;
default:
return JUMBO_9K;
}
@@ -5360,7 +5425,7 @@ done:
/* register is set if system vendor successfully tested ASPM 1.2 */
static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
{
- if (tp->mac_version >= RTL_GIGA_MAC_VER_61 &&
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_46 &&
r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
return true;
@@ -5409,11 +5474,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (region < 0)
return dev_err_probe(&pdev->dev, -ENODEV, "no MMIO resource found\n");
- rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME);
- if (rc < 0)
- return dev_err_probe(&pdev->dev, rc, "cannot remap MMIO, aborting\n");
-
- tp->mmio_addr = pcim_iomap_table(pdev)[region];
+ tp->mmio_addr = pcim_iomap_region(pdev, region, KBUILD_MODNAME);
+ if (IS_ERR(tp->mmio_addr))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tp->mmio_addr),
+ "cannot remap MMIO, aborting\n");
txconfig = RTL_R32(tp, TxConfig);
if (txconfig == ~0U)
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index 3bd11cb56294..2aacc1996796 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -1501,7 +1501,10 @@ static void rtase_wait_for_quiescence(const struct net_device *dev)
static void rtase_sw_reset(struct net_device *dev)
{
struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_ring *ring, *tmp;
+ struct rtase_int_vector *ivec;
int ret;
+ u32 i;
netif_stop_queue(dev);
netif_carrier_off(dev);
@@ -1512,6 +1515,13 @@ static void rtase_sw_reset(struct net_device *dev)
rtase_tx_clear(tp);
rtase_rx_clear(tp);
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
+ ring_entry)
+ list_del(&ring->ring_entry);
+ }
+
ret = rtase_init_ring(dev);
if (ret) {
netdev_err(dev, "unable to init ring\n");
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 6e4ef7af27bf..b4365906669f 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -179,8 +179,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
/* Reject requests with unsupported flags */
if (req->flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
+ PTP_FALLING_EDGE))
return -EOPNOTSUPP;
if (req->index)
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
index 72e7fcc56693..4c3e8cc5046f 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -29,8 +29,8 @@ static const struct rcar_gen4_ptp_reg_offset gen4_offs = {
static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- bool neg_adj = scaled_ppm < 0 ? true : false;
s64 addend = ptp_priv->default_addend;
+ bool neg_adj = scaled_ppm < 0;
s64 diff;
if (neg_adj)
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 84d09a8973b7..aba772e14555 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1287,17 +1287,14 @@ static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
if (!ports)
return NULL;
- for_each_child_of_node(ports, port) {
+ for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &index);
if (err < 0) {
port = NULL;
goto out;
}
- if (index == rdev->etha->index) {
- if (!of_device_is_available(port))
- port = NULL;
+ if (index == rdev->etha->index)
break;
- }
}
out:
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index fe0bf1d3217a..36af94a2e062 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2576,7 +2576,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_carrier_init(rocker_port);
dev->features |= NETIF_F_SG;
- dev->netns_local = true;
+ dev->netns_immutable = true;
/* MTU range: 68 - 9000 */
dev->min_mtu = ROCKER_PORT_MIN_MTU;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 3eb55dcfa8a6..c4c43434f314 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -38,8 +38,9 @@ config SFC_MTD
default y
help
This exposes the on-board flash and/or EEPROM as MTD devices
- (e.g. /dev/mtd1). This is required to update the firmware or
- the boot configuration under Linux.
+ (e.g. /dev/mtd1). This is required to update the boot
+ configuration under Linux, or use some older userland tools to
+ update the firmware.
config SFC_MCDI_MON
bool "Solarflare SFC9100-family hwmon support"
depends on SFC && HWMON && !(SFC=y && HWMON=m)
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 8f446b9bd5ee..d99039ec468d 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -7,7 +7,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
mcdi_functions.o mcdi_filters.o mcdi_mon.o \
ef100.o ef100_nic.o ef100_netdev.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o \
- efx_devlink.o
+ efx_devlink.o efx_reflash.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
mae.o tc.o tc_bindings.o tc_counters.o \
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 452009ed7a43..47349c148c0c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3501,7 +3501,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
const struct efx_ef10_nvram_type_info *info;
- size_t size, erase_size, outlen;
+ size_t size, erase_size, write_size, outlen;
int type_idx = 0;
bool protected;
int rc;
@@ -3516,7 +3516,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
if (info->port != efx_port_num(efx))
return -ENODEV;
- rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &write_size,
+ &protected);
if (rc)
return rc;
if (protected &&
@@ -3561,6 +3562,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
if (!erase_size)
part->common.mtd.flags |= MTD_NO_ERASE;
+ part->common.mtd.writesize = write_size;
+
return 0;
}
@@ -4416,6 +4419,7 @@ const struct efx_nic_type efx_x4_nic_type = {
.can_rx_scatter = true,
.always_rx_scatter = true,
.option_descriptors = true,
+ .flash_auto_partition = true,
.min_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 7f7d560cb2b4..d941f073f1eb 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -452,7 +452,6 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
netif_set_tso_max_segs(net_dev,
ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
- efx->mdio.dev = net_dev;
rc = efx_ef100_init_datapath_caps(efx);
if (rc < 0)
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 650136dfc642..112e55b98ed3 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -476,28 +476,6 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
/**************************************************************************
*
- * ioctls
- *
- *************************************************************************/
-
-/* Net device ioctl
- * Context: process, rtnl_lock() held.
- */
-static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- /* Convert phy_id from older PRTAD/DEVAD format */
- if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
- (data->phy_id & 0xfc00) == 0x0400)
- data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
-
- return mdio_mii_ioctl(&efx->mdio, data, cmd);
-}
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -593,7 +571,6 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
@@ -1201,7 +1178,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail1;
- efx->mdio.dev = net_dev;
pci_info(pci_dev, "Solarflare NIC detected\n");
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index c88ec3e24836..5a14d94163b1 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -1003,6 +1003,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
INIT_LIST_HEAD(&efx->vf_reps);
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
+ mutex_init(&efx->reflash_mutex);
efx->tx_queues_per_channel = 1;
efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE;
diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c
index 3cd750820fdd..d842c60dfc10 100644
--- a/drivers/net/ethernet/sfc/efx_devlink.c
+++ b/drivers/net/ethernet/sfc/efx_devlink.c
@@ -19,6 +19,7 @@
#include "mae.h"
#include "ef100_rep.h"
#endif
+#include "efx_reflash.h"
struct efx_devlink {
struct efx_nic *efx;
@@ -615,7 +616,19 @@ static int efx_devlink_info_get(struct devlink *devlink,
return 0;
}
+static int efx_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_devlink *devlink_private = devlink_priv(devlink);
+ struct efx_nic *efx = devlink_private->efx;
+
+ return efx_reflash_flash_firmware(efx, params->fw, extack);
+}
+
static const struct devlink_ops sfc_devlink_ops = {
+ .supported_flash_update_params = 0,
+ .flash_update = efx_devlink_flash_update,
.info_get = efx_devlink_info_get,
};
diff --git a/drivers/net/ethernet/sfc/efx_reflash.c b/drivers/net/ethernet/sfc/efx_reflash.c
new file mode 100644
index 000000000000..b12e95f1c80a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_reflash.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/crc32.h>
+#include <net/devlink.h>
+#include "efx_reflash.h"
+#include "net_driver.h"
+#include "fw_formats.h"
+#include "mcdi_pcol.h"
+#include "mcdi.h"
+
+/* Try to parse a Reflash header at the specified offset */
+static bool efx_reflash_parse_reflash_header(const struct firmware *fw,
+ size_t header_offset, u32 *type,
+ u32 *subtype, const u8 **data,
+ size_t *data_size)
+{
+ size_t header_end, trailer_offset, trailer_end;
+ u32 magic, version, payload_size, header_len;
+ const u8 *header, *trailer;
+ u32 expected_crc, crc;
+
+ if (check_add_overflow(header_offset, EFX_REFLASH_HEADER_LENGTH_OFST +
+ EFX_REFLASH_HEADER_LENGTH_LEN,
+ &header_end))
+ return false;
+ if (fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_REFLASH_HEADER_MAGIC_OFST);
+ if (magic != EFX_REFLASH_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_REFLASH_HEADER_VERSION_OFST);
+ if (version != EFX_REFLASH_HEADER_VERSION_VALUE)
+ return false;
+
+ payload_size = get_unaligned_le32(header + EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST);
+ header_len = get_unaligned_le32(header + EFX_REFLASH_HEADER_LENGTH_OFST);
+ if (check_add_overflow(header_offset, header_len, &trailer_offset) ||
+ check_add_overflow(trailer_offset, payload_size, &trailer_offset) ||
+ check_add_overflow(trailer_offset, EFX_REFLASH_TRAILER_LEN,
+ &trailer_end))
+ return false;
+ if (fw->size < trailer_end)
+ return false;
+
+ trailer = fw->data + trailer_offset;
+ expected_crc = get_unaligned_le32(trailer + EFX_REFLASH_TRAILER_CRC_OFST);
+ /* Addition could overflow u32, but not size_t since we already
+ * checked trailer_offset didn't overflow. So cast to size_t first.
+ */
+ crc = crc32_le(0, header, (size_t)header_len + payload_size);
+ if (crc != expected_crc)
+ return false;
+
+ *type = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST);
+ *subtype = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST);
+ if (*type == EFX_REFLASH_FIRMWARE_TYPE_BUNDLE) {
+ /* All the bundle data is written verbatim to NVRAM */
+ *data = fw->data;
+ *data_size = fw->size;
+ } else {
+ /* Other payload types strip the reflash header and trailer
+ * from the data written to NVRAM
+ */
+ *data = header + header_len;
+ *data_size = payload_size;
+ }
+
+ return true;
+}
+
+/* Map from FIRMWARE_TYPE to NVRAM_PARTITION_TYPE */
+static int efx_reflash_partition_type(u32 type, u32 subtype,
+ u32 *partition_type,
+ u32 *partition_subtype)
+{
+ int rc = 0;
+
+ switch (type) {
+ case EFX_REFLASH_FIRMWARE_TYPE_BOOTROM:
+ *partition_type = NVRAM_PARTITION_TYPE_EXPANSION_ROM;
+ *partition_subtype = subtype;
+ break;
+ case EFX_REFLASH_FIRMWARE_TYPE_BUNDLE:
+ *partition_type = NVRAM_PARTITION_TYPE_BUNDLE;
+ *partition_subtype = subtype;
+ break;
+ default:
+ /* Not supported */
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/* Try to parse a SmartNIC image header at the specified offset */
+static bool efx_reflash_parse_snic_header(const struct firmware *fw,
+ size_t header_offset,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data, size_t *data_size)
+{
+ u32 magic, version, payload_size, header_len, expected_crc, crc;
+ size_t header_end, payload_end;
+ const u8 *header;
+
+ if (check_add_overflow(header_offset, EFX_SNICIMAGE_HEADER_MINLEN,
+ &header_end) ||
+ fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_MAGIC_OFST);
+ if (magic != EFX_SNICIMAGE_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_VERSION_OFST);
+ if (version != EFX_SNICIMAGE_HEADER_VERSION_VALUE)
+ return false;
+
+ header_len = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_LENGTH_OFST);
+ if (check_add_overflow(header_offset, header_len, &header_end))
+ return false;
+ payload_size = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST);
+ if (check_add_overflow(header_end, payload_size, &payload_end) ||
+ fw->size < payload_end)
+ return false;
+
+ expected_crc = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_CRC_OFST);
+
+ /* Calculate CRC omitting the expected CRC field itself */
+ crc = crc32_le(~0, header, EFX_SNICIMAGE_HEADER_CRC_OFST);
+ crc = ~crc32_le(crc,
+ header + EFX_SNICIMAGE_HEADER_CRC_OFST +
+ EFX_SNICIMAGE_HEADER_CRC_LEN,
+ header_len + payload_size - EFX_SNICIMAGE_HEADER_CRC_OFST -
+ EFX_SNICIMAGE_HEADER_CRC_LEN);
+ if (crc != expected_crc)
+ return false;
+
+ *partition_type =
+ get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST);
+ *partition_subtype =
+ get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST);
+ *data = fw->data;
+ *data_size = fw->size;
+ return true;
+}
+
+/* Try to parse a SmartNIC bundle header at the specified offset */
+static bool efx_reflash_parse_snic_bundle_header(const struct firmware *fw,
+ size_t header_offset,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data,
+ size_t *data_size)
+{
+ u32 magic, version, bundle_type, header_len, expected_crc, crc;
+ size_t header_end;
+ const u8 *header;
+
+ if (check_add_overflow(header_offset, EFX_SNICBUNDLE_HEADER_LEN,
+ &header_end))
+ return false;
+ if (fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_MAGIC_OFST);
+ if (magic != EFX_SNICBUNDLE_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_VERSION_OFST);
+ if (version != EFX_SNICBUNDLE_HEADER_VERSION_VALUE)
+ return false;
+
+ bundle_type = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST);
+ if (bundle_type != NVRAM_PARTITION_TYPE_BUNDLE)
+ return false;
+
+ header_len = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_LENGTH_OFST);
+ if (header_len != EFX_SNICBUNDLE_HEADER_LEN)
+ return false;
+
+ expected_crc = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_CRC_OFST);
+ crc = ~crc32_le(~0, header, EFX_SNICBUNDLE_HEADER_CRC_OFST);
+ if (crc != expected_crc)
+ return false;
+
+ *partition_type = NVRAM_PARTITION_TYPE_BUNDLE;
+ *partition_subtype = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST);
+ *data = fw->data;
+ *data_size = fw->size;
+ return true;
+}
+
+/* Try to find a valid firmware payload in the firmware data.
+ * When we recognise a valid header, we parse it for the partition type
+ * (so we know where to ask the MC to write it to) and the location of
+ * the data blob to write.
+ */
+static int efx_reflash_parse_firmware_data(const struct firmware *fw,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data, size_t *data_size)
+{
+ size_t header_offset;
+ u32 type, subtype;
+
+ /* Some packaging formats (such as CMS/PKCS#7 signed images)
+ * prepend a header for which finding the size is a non-trivial
+ * task, so step through the firmware data until we find a valid
+ * header.
+ *
+ * The checks are intended to reject firmware data that is clearly not
+ * in the expected format. They do not need to be exhaustive as the
+ * running firmware will perform its own comprehensive validity and
+ * compatibility checks during the update procedure.
+ *
+ * Firmware packages may contain multiple reflash images, e.g. a
+ * bundle containing one or more other images. Only check the
+ * outermost container by stopping after the first candidate image
+ * found even it is for an unsupported partition type.
+ */
+ for (header_offset = 0; header_offset < fw->size; header_offset++) {
+ if (efx_reflash_parse_snic_bundle_header(fw, header_offset,
+ partition_type,
+ partition_subtype,
+ data, data_size))
+ return 0;
+
+ if (efx_reflash_parse_snic_header(fw, header_offset,
+ partition_type,
+ partition_subtype, data,
+ data_size))
+ return 0;
+
+ if (efx_reflash_parse_reflash_header(fw, header_offset, &type,
+ &subtype, data, data_size))
+ return efx_reflash_partition_type(type, subtype,
+ partition_type,
+ partition_subtype);
+ }
+
+ return -EINVAL;
+}
+
+/* Limit the number of status updates during the erase or write phases */
+#define EFX_DEVLINK_STATUS_UPDATE_COUNT 50
+
+/* Expected timeout for the efx_mcdi_nvram_update_finish_polled() */
+#define EFX_DEVLINK_UPDATE_FINISH_TIMEOUT 900
+
+/* Ideal erase chunk size. This is a balance between minimising the number of
+ * MCDI requests to erase an entire partition whilst avoiding tripping the MCDI
+ * RPC timeout.
+ */
+#define EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE (64 * 1024)
+
+static int efx_reflash_erase_partition(struct efx_nic *efx,
+ struct netlink_ext_ack *extack,
+ struct devlink *devlink, u32 type,
+ size_t partition_size,
+ size_t align)
+{
+ size_t chunk, offset, next_update;
+ int rc;
+
+ /* Partitions that cannot be erased or do not require erase before
+ * write are advertised with a erase alignment/sector size of zero.
+ */
+ if (align == 0)
+ /* Nothing to do */
+ return 0;
+
+ if (partition_size % align)
+ return -EINVAL;
+
+ /* Erase the entire NVRAM partition a chunk at a time to avoid
+ * potentially tripping the MCDI RPC timeout.
+ */
+ if (align >= EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE)
+ chunk = align;
+ else
+ chunk = rounddown(EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE, align);
+
+ for (offset = 0, next_update = 0; offset < partition_size; offset += chunk) {
+ if (offset >= next_update) {
+ devlink_flash_update_status_notify(devlink, "Erasing",
+ NULL, offset,
+ partition_size);
+ next_update += partition_size / EFX_DEVLINK_STATUS_UPDATE_COUNT;
+ }
+
+ chunk = min_t(size_t, partition_size - offset, chunk);
+ rc = efx_mcdi_nvram_erase(efx, type, offset, chunk);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Erase failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ devlink_flash_update_status_notify(devlink, "Erasing", NULL,
+ partition_size, partition_size);
+
+ return 0;
+}
+
+static int efx_reflash_write_partition(struct efx_nic *efx,
+ struct netlink_ext_ack *extack,
+ struct devlink *devlink, u32 type,
+ const u8 *data, size_t data_size,
+ size_t align)
+{
+ size_t write_max, chunk, offset, next_update;
+ int rc;
+
+ if (align == 0)
+ return -EINVAL;
+
+ /* Write the NVRAM partition in chunks that are the largest multiple
+ * of the partition's required write alignment that will fit into the
+ * MCDI NVRAM_WRITE RPC payload.
+ */
+ if (efx->type->mcdi_max_ver < 2)
+ write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN *
+ MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM;
+ else
+ write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN *
+ MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2;
+ chunk = rounddown(write_max, align);
+
+ for (offset = 0, next_update = 0; offset + chunk <= data_size; offset += chunk) {
+ if (offset >= next_update) {
+ devlink_flash_update_status_notify(devlink, "Writing",
+ NULL, offset,
+ data_size);
+ next_update += data_size / EFX_DEVLINK_STATUS_UPDATE_COUNT;
+ }
+
+ rc = efx_mcdi_nvram_write(efx, type, offset, data + offset, chunk);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Write failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ /* Round up left over data to satisfy write alignment */
+ if (offset < data_size) {
+ size_t remaining = data_size - offset;
+ u8 *buf;
+
+ if (offset >= next_update)
+ devlink_flash_update_status_notify(devlink, "Writing",
+ NULL, offset,
+ data_size);
+
+ chunk = roundup(remaining, align);
+ buf = kmalloc(chunk, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data + offset, remaining);
+ memset(buf + remaining, 0xFF, chunk - remaining);
+ rc = efx_mcdi_nvram_write(efx, type, offset, buf, chunk);
+ kfree(buf);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Write failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ devlink_flash_update_status_notify(devlink, "Writing", NULL, data_size,
+ data_size);
+
+ return 0;
+}
+
+int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw,
+ struct netlink_ext_ack *extack)
+{
+ size_t data_size, size, erase_align, write_align;
+ struct devlink *devlink = efx->devlink;
+ u32 type, data_subtype, subtype;
+ const u8 *data;
+ bool protected;
+ int rc;
+
+ if (!efx_has_cap(efx, BUNDLE_UPDATE)) {
+ NL_SET_ERR_MSG_MOD(extack, "NVRAM bundle updates are not supported by the firmware");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&efx->reflash_mutex);
+
+ devlink_flash_update_status_notify(devlink, "Checking update", NULL, 0, 0);
+
+ if (efx->type->flash_auto_partition) {
+ /* NIC wants entire FW file including headers;
+ * FW will validate 'subtype' if there is one
+ */
+ type = NVRAM_PARTITION_TYPE_AUTO;
+ data = fw->data;
+ data_size = fw->size;
+ } else {
+ rc = efx_reflash_parse_firmware_data(fw, &type, &data_subtype, &data,
+ &data_size);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image validation check failed");
+ goto out_unlock;
+ }
+
+ rc = efx_mcdi_nvram_metadata(efx, type, &subtype, NULL, NULL, 0);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Metadata query for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ if (subtype != data_subtype) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image is not appropriate for this adapter");
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_align, &write_align,
+ &protected);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Info query for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ if (protected) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x is protected",
+ type);
+ rc = -EPERM;
+ goto out_unlock;
+ }
+
+ if (write_align == 0) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x is not writable",
+ type);
+ rc = -EACCES;
+ goto out_unlock;
+ }
+
+ if (erase_align != 0 && size % erase_align) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x has a bad partition table entry, can't erase it",
+ type);
+ rc = -EACCES;
+ goto out_unlock;
+ }
+
+ if (data_size > size) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Firmware image is too big for NVRAM partition %#x",
+ type);
+ rc = -EFBIG;
+ goto out_unlock;
+ }
+
+ devlink_flash_update_status_notify(devlink, "Starting update", NULL, 0, 0);
+
+ rc = efx_mcdi_nvram_update_start(efx, type);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Update start request for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ rc = efx_reflash_erase_partition(efx, extack, devlink, type, size,
+ erase_align);
+ if (rc)
+ goto out_update_finish;
+
+ rc = efx_reflash_write_partition(efx, extack, devlink, type, data,
+ data_size, write_align);
+ if (rc)
+ goto out_update_finish;
+
+ devlink_flash_update_timeout_notify(devlink, "Finishing update", NULL,
+ EFX_DEVLINK_UPDATE_FINISH_TIMEOUT);
+
+out_update_finish:
+ if (rc)
+ /* Don't obscure the return code from an earlier failure */
+ efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_ABORT);
+ else
+ rc = efx_mcdi_nvram_update_finish_polled(efx, type);
+out_unlock:
+ mutex_unlock(&efx->reflash_mutex);
+ devlink_flash_update_status_notify(devlink, rc ? "Update failed" :
+ "Update complete",
+ NULL, 0, 0);
+ return rc;
+}
diff --git a/drivers/net/ethernet/sfc/efx_reflash.h b/drivers/net/ethernet/sfc/efx_reflash.h
new file mode 100644
index 000000000000..3dffac565161
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_reflash.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_REFLASH_H
+#define _EFX_REFLASH_H
+
+#include "net_driver.h"
+#include <linux/firmware.h>
+
+int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
+
+#endif /* _EFX_REFLASH_H */
diff --git a/drivers/net/ethernet/sfc/fw_formats.h b/drivers/net/ethernet/sfc/fw_formats.h
new file mode 100644
index 000000000000..cbc350c96013
--- /dev/null
+++ b/drivers/net/ethernet/sfc/fw_formats.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_FW_FORMATS_H
+#define _EFX_FW_FORMATS_H
+
+/* Header layouts of firmware update images recognised by Efx NICs.
+ * The sources-of-truth for these layouts are AMD internal documents
+ * and sfregistry headers, neither of which are available externally
+ * nor usable directly by the driver.
+ *
+ * While each format includes a 'magic number', these are at different
+ * offsets in the various formats, and a legal header for one format
+ * could have the right value in whichever field occupies that offset
+ * to match another format's magic.
+ * Besides, some packaging formats (such as CMS/PKCS#7 signed images)
+ * prepend a header for which finding the size is a non-trivial task;
+ * rather than trying to parse those headers, we search byte-by-byte
+ * through the provided firmware image looking for a valid header.
+ * Thus, format recognition has to include validation of the checksum
+ * field, even though the firmware will validate that itself before
+ * applying the image.
+ */
+
+/* EF10 (Medford2, X2) "reflash" header format. Defined in SF-121352-AN */
+#define EFX_REFLASH_HEADER_MAGIC_OFST 0
+#define EFX_REFLASH_HEADER_MAGIC_LEN 4
+#define EFX_REFLASH_HEADER_MAGIC_VALUE 0x106F1A5
+
+#define EFX_REFLASH_HEADER_VERSION_OFST 4
+#define EFX_REFLASH_HEADER_VERSION_LEN 4
+#define EFX_REFLASH_HEADER_VERSION_VALUE 4
+
+#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST 8
+#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_LEN 4
+#define EFX_REFLASH_FIRMWARE_TYPE_BOOTROM 0x2
+#define EFX_REFLASH_FIRMWARE_TYPE_BUNDLE 0xd
+
+#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST 12
+#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_LEN 4
+
+#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST 16
+#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_LEN 4
+
+#define EFX_REFLASH_HEADER_LENGTH_OFST 20
+#define EFX_REFLASH_HEADER_LENGTH_LEN 4
+
+/* Reflash trailer */
+#define EFX_REFLASH_TRAILER_CRC_OFST 0
+#define EFX_REFLASH_TRAILER_CRC_LEN 4
+
+#define EFX_REFLASH_TRAILER_LEN \
+ (EFX_REFLASH_TRAILER_CRC_OFST + EFX_REFLASH_TRAILER_CRC_LEN)
+
+/* EF100 "SmartNIC image" header format.
+ * Defined in sfregistry "src/layout/snic_image_hdr.h".
+ */
+#define EFX_SNICIMAGE_HEADER_MAGIC_OFST 16
+#define EFX_SNICIMAGE_HEADER_MAGIC_LEN 4
+#define EFX_SNICIMAGE_HEADER_MAGIC_VALUE 0x541C057A
+
+#define EFX_SNICIMAGE_HEADER_VERSION_OFST 20
+#define EFX_SNICIMAGE_HEADER_VERSION_LEN 4
+#define EFX_SNICIMAGE_HEADER_VERSION_VALUE 1
+
+#define EFX_SNICIMAGE_HEADER_LENGTH_OFST 24
+#define EFX_SNICIMAGE_HEADER_LENGTH_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST 36
+#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST 40
+#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST 60
+#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_CRC_OFST 64
+#define EFX_SNICIMAGE_HEADER_CRC_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_MINLEN 256
+
+/* EF100 "SmartNIC bundle" header format. Defined in SF-122606-TC */
+#define EFX_SNICBUNDLE_HEADER_MAGIC_OFST 0
+#define EFX_SNICBUNDLE_HEADER_MAGIC_LEN 4
+#define EFX_SNICBUNDLE_HEADER_MAGIC_VALUE 0xB1001001
+
+#define EFX_SNICBUNDLE_HEADER_VERSION_OFST 4
+#define EFX_SNICBUNDLE_HEADER_VERSION_LEN 4
+#define EFX_SNICBUNDLE_HEADER_VERSION_VALUE 1
+
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST 8
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST 12
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_LENGTH_OFST 20
+#define EFX_SNICBUNDLE_HEADER_LENGTH_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_CRC_OFST 224
+#define EFX_SNICBUNDLE_HEADER_CRC_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_LEN \
+ (EFX_SNICBUNDLE_HEADER_CRC_OFST + EFX_SNICBUNDLE_HEADER_CRC_LEN)
+
+#endif /* _EFX_FW_FORMATS_H */
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 50f097487b14..6fd0c1e9a7d5 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -755,7 +755,7 @@ int efx_mae_match_check_caps_lhs(struct efx_nic *efx,
rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
ingress_port_mask_type);
if (rc) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s",
mask_type_name(ingress_port_mask_type),
"ingress_port");
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index d461b1a6ce81..dbd2ee915838 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1625,12 +1625,15 @@ fail:
return rc;
}
+#define EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN 128
+
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
- bool *protected_out)
+ size_t *write_size_out, bool *protected_out)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_V2_OUT_LEN);
+ size_t write_size = 0;
size_t outlen;
int rc;
@@ -1645,6 +1648,12 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
goto fail;
}
+ if (outlen >= MC_CMD_NVRAM_INFO_V2_OUT_LEN)
+ write_size = MCDI_DWORD(outbuf, NVRAM_INFO_V2_OUT_WRITESIZE);
+ else
+ write_size = EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN;
+
+ *write_size_out = write_size;
*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
@@ -2163,11 +2172,9 @@ out_free:
return rc;
}
-#ifdef CONFIG_SFC_MTD
-
#define EFX_MCDI_NVRAM_LEN_MAX 128
-static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
int rc;
@@ -2185,6 +2192,8 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
return rc;
}
+#ifdef CONFIG_SFC_MTD
+
static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
@@ -2209,13 +2218,20 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
return 0;
}
-static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer, size_t length)
+#endif /* CONFIG_SFC_MTD */
+
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
{
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ efx_dword_t *inbuf;
+ size_t inlen;
int rc;
+ inlen = ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4);
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
@@ -2223,14 +2239,14 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
- ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, inlen, NULL, 0, NULL);
+ kfree(inbuf);
+
return rc;
}
-static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length)
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, loff_t offset,
+ size_t length)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
int rc;
@@ -2246,7 +2262,8 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
return rc;
}
-static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type,
+ enum efx_update_finish_mode mode)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
@@ -2254,22 +2271,41 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
int rc, rc2;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
- /* Always set this flag. Old firmware ignores it */
- MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+
+ /* Old firmware doesn't support background update finish and abort
+ * operations. Fallback to waiting if the requested mode is not
+ * supported.
+ */
+ if (!efx_has_cap(efx, NVRAM_UPDATE_POLL_VERIFY_RESULT) ||
+ (!efx_has_cap(efx, NVRAM_UPDATE_ABORT_SUPPORTED) &&
+ mode == EFX_UPDATE_FINISH_ABORT))
+ mode = EFX_UPDATE_FINISH_WAIT;
+
+ MCDI_POPULATE_DWORD_4(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
- 1);
+ (mode != EFX_UPDATE_FINISH_ABORT),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND,
+ (mode == EFX_UPDATE_FINISH_BACKGROUND),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT,
+ (mode == EFX_UPDATE_FINISH_POLL),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT,
+ (mode == EFX_UPDATE_FINISH_ABORT));
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
- if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
+ if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS &&
+ rc2 != MC_CMD_NVRAM_VERIFY_RC_PENDING)
netif_err(efx, drv, efx->net_dev,
"NVRAM update failed verification with code 0x%x\n",
rc2);
switch (rc2) {
case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
break;
+ case MC_CMD_NVRAM_VERIFY_RC_PENDING:
+ rc = -EAGAIN;
+ break;
case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
@@ -2284,6 +2320,8 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
+ case MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED:
+ case MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE:
rc = -EPERM;
break;
default:
@@ -2296,6 +2334,42 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
return rc;
}
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS 5
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS 5000
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES 185
+
+int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type)
+{
+ unsigned int delay = EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS;
+ unsigned int retry = 0;
+ int rc;
+
+ /* NVRAM updates can take a long time (e.g. up to 1 minute for bundle
+ * images). Polling for NVRAM update completion ensures that other MCDI
+ * commands can be issued before the background NVRAM update completes.
+ *
+ * The initial call either completes the update synchronously, or
+ * returns -EAGAIN to indicate processing is continuing. In the latter
+ * case, we poll for at least 900 seconds, at increasing intervals
+ * (5ms, 50ms, 500ms, 5s).
+ */
+ rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_BACKGROUND);
+ while (rc == -EAGAIN) {
+ if (retry > EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES)
+ return -ETIMEDOUT;
+ retry++;
+
+ msleep(delay);
+ if (delay < EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS)
+ delay *= 10;
+
+ rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_POLL);
+ }
+ return rc;
+}
+
+#ifdef CONFIG_SFC_MTD
+
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
@@ -2389,7 +2463,8 @@ int efx_mcdi_mtd_sync(struct mtd_info *mtd)
if (part->updating) {
part->updating = false;
- rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
+ rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type,
+ EFX_UPDATE_FINISH_WAIT);
}
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index cdb17d7c147f..3755cd3fe1e6 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -392,7 +392,7 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
- bool *protected_out);
+ size_t *write_size_out, bool *protected_out);
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
u32 *subtype, u16 version[4], char *desc,
@@ -424,6 +424,26 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type);
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length);
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length);
+int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
+ u32 *subtype, u16 version[4], char *desc,
+ size_t descsize);
+
+enum efx_update_finish_mode {
+ EFX_UPDATE_FINISH_WAIT,
+ EFX_UPDATE_FINISH_BACKGROUND,
+ EFX_UPDATE_FINISH_POLL,
+ EFX_UPDATE_FINISH_ABORT,
+};
+
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type,
+ enum efx_update_finish_mode mode);
+int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type);
+
#ifdef CONFIG_SFC_MTD
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, u8 *buffer);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index cd297e19cddc..9cb339c461fb 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -72,19 +72,19 @@
* | \------- Error
* \------------------------------ Resync (always set)
*
- * The client writes it's request into MC shared memory, and rings the
- * doorbell. Each request is completed by either by the MC writing
+ * The client writes its request into MC shared memory, and rings the
+ * doorbell. Each request is completed either by the MC writing
* back into shared memory, or by writing out an event.
*
* All MCDI commands support completion by shared memory response. Each
* request may also contain additional data (accounted for by HEADER.LEN),
- * and some response's may also contain additional data (again, accounted
+ * and some responses may also contain additional data (again, accounted
* for by HEADER.LEN).
*
* Some MCDI commands support completion by event, in which any associated
* response data is included in the event.
*
- * The protocol requires one response to be delivered for every request, a
+ * The protocol requires one response to be delivered for every request; a
* request should not be sent unless the response for the previous request
* has been received (either by polling shared memory, or by receiving
* an event).
@@ -165,6 +165,7 @@
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
#define MC_CMD_ERR_CODE_OFST 0
#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
@@ -321,7 +322,7 @@
/* enum: The requesting client is not a function */
#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
/* enum: The requested operation might require the command to be passed between
- * MCs, and thetransport doesn't support that. Should only ever been seen over
+ * MCs, and the transport doesn't support that. Should only ever been seen over
* the UART.
*/
#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
@@ -358,7 +359,7 @@
* sub-variant switching.
*/
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
-/* enum: The clock whose frequency you've attempted to set set doesn't exist on
+/* enum: The clock whose frequency you've attempted to set doesn't exist on
* this NIC
*/
#define MC_CMD_ERR_NO_CLOCK 0x1015
@@ -387,25 +388,6 @@
*/
#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
-/* MC_CMD_RESOURCE_SPECIFIER enum */
-/* enum: Any */
-#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
-#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
-
-/* MC_CMD_FPGA_FLASH_INDEX enum */
-#define MC_CMD_FPGA_FLASH_PRIMARY 0x0 /* enum */
-#define MC_CMD_FPGA_FLASH_SECONDARY 0x1 /* enum */
-
-/* MC_CMD_EXTERNAL_MAE_LINK_MODE enum */
-/* enum: Legacy mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_LEGACY 0x0
-/* enum: Switchdev mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_SWITCHDEV 0x1
-/* enum: Bootstrap mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_BOOTSTRAP 0x2
-/* enum: Link-mode change is in-progress as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_PENDING 0xf
-
/* PCIE_INTERFACE enum: From EF100 onwards, SFC products can have multiple PCIe
* interfaces. There is a need to refer to interfaces explicitly from drivers
* (for example, a management driver on one interface administering a function
@@ -424,6 +406,14 @@
* an on-NIC ARM module is expected to be connected.
*/
#define PCIE_INTERFACE_NIC_EMBEDDED 0x1
+/* enum: The PCIe logical interface 0. It is an alias for HOST_PRIMARY. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_0 0x0
+/* enum: The PCIe logical interface 1. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_1 0x2
+/* enum: The PCIe logical interface 2. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_2 0x3
+/* enum: The PCIe logical interface 3. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_3 0x4
/* enum: For MCDI commands issued over a PCIe interface, this value is
* translated into the interface over which the command was issued. Not
* meaningful for other MCDI transports.
@@ -640,7 +630,11 @@
* be allocated by different counter blocks, so e.g. AR counter 42 is different
* from CT counter 42. Generation counts are also type-specific. This value is
* also present in the header of streaming counter packets, in the IDENTIFIER
- * field (see packetiser packet format definitions).
+ * field (see packetiser packet format definitions). Also note that LACP
+ * counter IDs are not allocated individually, instead the counter IDs are
+ * directly tied to the LACP balance table indices. These in turn are allocated
+ * in large contiguous blocks as a LAG config. Calling MAE_COUNTER_ALLOC/FREE
+ * with an LACP counter type will return EPERM.
*/
/* enum: Action Rule counters - can be referenced in AR response. */
#define MAE_COUNTER_TYPE_AR 0x0
@@ -648,6 +642,14 @@
#define MAE_COUNTER_TYPE_CT 0x1
/* enum: Outer Rule counters - can be referenced in OR response. */
#define MAE_COUNTER_TYPE_OR 0x2
+/* enum: LACP counters - linked to LACP balance table entries. */
+#define MAE_COUNTER_TYPE_LACP 0x3
+
+/* MAE_COUNTER_ID enum: ID of allocated counter or counter list. */
+/* enum: A counter ID that is guaranteed never to represent a real counter or
+ * counter list.
+ */
+#define MAE_COUNTER_ID_NULL 0xffffffff
/* TABLE_ID enum: Unique IDs for tables. The 32-bit ID values have been
* structured with bits [31:24] reserved (0), [23:16] indicating which major
@@ -656,7 +658,9 @@
* variations of the same table. (All of the tables currently defined within
* the streaming engines are listed here, but this does not imply that they are
* all supported - MC_CMD_TABLE_LIST returns the list of actually supported
- * tables.)
+ * tables.) The DPU offload engines' enumerators follow a deliberate pattern:
+ * 0x01010000 + is_dpu_net * 0x10000 + is_wr_or_tx * 0x8000 + is_lite_pipe *
+ * 0x1000 + oe_engine_type * 0x100 + oe_instance_within_pipe * 0x10
*/
/* enum: Outer_Rule_Table in the MAE - refer to SF-123102-TC. */
#define TABLE_ID_OUTER_RULE_TABLE 0x10000
@@ -694,45 +698,70 @@
#define TABLE_ID_RSS_CONTEXT_TABLE 0x20200
/* enum: Indirection_Table in VNIC Rx - refer to SF-123102-TC. */
#define TABLE_ID_INDIRECTION_TABLE 0x20300
-
-/* TABLE_COMPRESSED_VLAN enum: Compressed VLAN TPID as used by some field
- * types; can be calculated by (((ether_type_msb >> 2) & 0x4) ^ 0x4) |
- * (ether_type_msb & 0x3);
- */
-#define TABLE_COMPRESSED_VLAN_TPID_8100 0x5 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_88A8 0x4 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9100 0x1 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9200 0x2 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9300 0x3 /* enum */
-
-/* TABLE_NAT_DIR enum: NAT direction. */
-#define TABLE_NAT_DIR_SOURCE 0x0 /* enum */
-#define TABLE_NAT_DIR_DEST 0x1 /* enum */
-
-/* TABLE_RSS_KEY_MODE enum: Defines how the value for Toeplitz hashing for RSS
- * is constructed as a concatenation (indicated here by "++") of packet header
- * fields.
- */
-/* enum: IP src addr ++ IP dst addr */
-#define TABLE_RSS_KEY_MODE_SA_DA 0x0
-/* enum: IP src addr ++ IP dst addr ++ TCP/UDP src port ++ TCP/UDP dst port */
-#define TABLE_RSS_KEY_MODE_SA_DA_SP_DP 0x1
-/* enum: IP src addr */
-#define TABLE_RSS_KEY_MODE_SA 0x2
-/* enum: IP dst addr */
-#define TABLE_RSS_KEY_MODE_DA 0x3
-/* enum: IP src addr ++ TCP/UDP src port */
-#define TABLE_RSS_KEY_MODE_SA_SP 0x4
-/* enum: IP dest addr ++ TCP dest port */
-#define TABLE_RSS_KEY_MODE_DA_DP 0x5
-/* enum: Nothing (produces input of 0, resulting in output hash of 0) */
-#define TABLE_RSS_KEY_MODE_NONE 0x7
-
-/* TABLE_RSS_SPREAD_MODE enum: RSS spreading mode. */
-/* enum: RSS uses Indirection_Table lookup. */
-#define TABLE_RSS_SPREAD_MODE_INDIRECTION 0x0
-/* enum: RSS uses even spreading calculation. */
-#define TABLE_RSS_SPREAD_MODE_EVEN 0x1
+/* enum: DPU.host read pipe first CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_RD_CRC0_OE_PROFILE 0x1010000
+/* enum: DPU.host read pipe second CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_RD_CRC1_OE_PROFILE 0x1010010
+/* enum: DPU.host write pipe first CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_WR_CRC0_OE_PROFILE 0x1018000
+/* enum: DPU.host write pipe second CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_WR_CRC1_OE_PROFILE 0x1018010
+/* enum: DPU.net 'full' receive pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CRC0_OE_PROFILE 0x1020000
+/* enum: DPU.net 'full' receive pipe first checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CSUM0_OE_PROFILE 0x1020100
+/* enum: DPU.net 'full' receive pipe second checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CSUM1_OE_PROFILE 0x1020110
+/* enum: DPU.net 'full' receive pipe AES-GCM offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_AES_GCM0_OE_PROFILE 0x1020200
+/* enum: DPU.net 'lite' receive pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RXLITE_CRC0_OE_PROFILE 0x1021000
+/* enum: DPU.net 'lite' receive pipe checksum offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RXLITE_CSUM0_OE_PROFILE 0x1021100
+/* enum: DPU.net 'full' transmit pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CRC0_OE_PROFILE 0x1028000
+/* enum: DPU.net 'full' transmit pipe first checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CSUM0_OE_PROFILE 0x1028100
+/* enum: DPU.net 'full' transmit pipe second checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CSUM1_OE_PROFILE 0x1028110
+/* enum: DPU.net 'full' transmit pipe AES-GCM offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_AES_GCM0_OE_PROFILE 0x1028200
+/* enum: DPU.net 'lite' transmit pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TXLITE_CRC0_OE_PROFILE 0x1029000
+/* enum: DPU.net 'lite' transmit pipe checksum offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TXLITE_CSUM0_OE_PROFILE 0x1029100
/* TABLE_FIELD_ID enum: Unique IDs for fields. Related concepts have been
* loosely grouped together into blocks with gaps for expansion, but the values
@@ -1026,6 +1055,16 @@
#define TABLE_FIELD_ID_BAL_TBL_BASE_DIV64 0xde
/* enum: Length of balance table region: 0=>64, 1=>128, 2=>256. */
#define TABLE_FIELD_ID_BAL_TBL_LEN_ID 0xdf
+/* enum: LACP LAG ID (i.e. the low 3 bits of LACP LAG mport ID), indexing
+ * LACP_LAG_Config_Table. Refer to SF-123102-TC.
+ */
+#define TABLE_FIELD_ID_LACP_LAG_ID 0xe0
+/* enum: Address in LACP_Balance_Table. The balance table is partitioned
+ * between LAGs according to the settings in LACP_LAG_Config_Table and then
+ * indexed by the LACP hash, providing the mapping to destination mports. Refer
+ * to SF-123102-TC.
+ */
+#define TABLE_FIELD_ID_BAL_TBL_ADDR 0xe1
/* enum: UDP port to match for UDP-based encapsulations; required to be 0 for
* other encapsulation types.
*/
@@ -1082,6 +1121,55 @@
#define TABLE_FIELD_ID_INDIR_TBL_LEN_ID 0x105
/* enum: An offset to be applied to the base destination queue ID. */
#define TABLE_FIELD_ID_INDIR_OFFSET 0x106
+/* enum: DPU offload engine profile ID to address. */
+#define TABLE_FIELD_ID_OE_PROFILE 0x3e8
+/* enum: Width of the CRC to calculate - see CRC_VARIANT enum. */
+#define TABLE_FIELD_ID_CRC_VARIANT 0x3f2
+/* enum: If set, reflect the bits of each input byte, bit 7 is LSB, bit 0 is
+ * MSB. If clear, bit 7 is MSB, bit 0 is LSB.
+ */
+#define TABLE_FIELD_ID_CRC_REFIN 0x3f3
+/* enum: If set, reflect the bits of each output byte, bit 7 is LSB, bit 0 is
+ * MSB. If clear, bit 7 is MSB, bit 0 is LSB.
+ */
+#define TABLE_FIELD_ID_CRC_REFOUT 0x3f4
+/* enum: If set, invert every bit of the output value. */
+#define TABLE_FIELD_ID_CRC_INVOUT 0x3f5
+/* enum: The CRC polynomial to use for checksumming, in normal form. */
+#define TABLE_FIELD_ID_CRC_POLY 0x3f6
+/* enum: Operation for the checksum engine to perform - see DPU_CSUM_OP enum.
+ */
+#define TABLE_FIELD_ID_CSUM_OP 0x410
+/* enum: Byte offset of checksum relative to region_start (for VALIDATE_*
+ * operations only).
+ */
+#define TABLE_FIELD_ID_CSUM_OFFSET 0x411
+/* enum: Indicates there is additional data on OPR bus that needs to be
+ * incorporated into the payload checksum.
+ */
+#define TABLE_FIELD_ID_CSUM_OPR_ADDITIONAL_DATA 0x412
+/* enum: Log2 data size of additional data on OPR bus. */
+#define TABLE_FIELD_ID_CSUM_OPR_DATA_SIZE_LOG2 0x413
+/* enum: 4 byte offset of where to find the additional data on the OPR bus. */
+#define TABLE_FIELD_ID_CSUM_OPR_4B_OFF 0x414
+/* enum: Operation type for the AES-GCM core - see GCM_OP_CODE enum. */
+#define TABLE_FIELD_ID_GCM_OP_CODE 0x41a
+/* enum: Key length - AES_KEY_LEN enum. */
+#define TABLE_FIELD_ID_GCM_KEY_LEN 0x41b
+/* enum: OPR 4 byte offset for ICV or GHASH output (only in BULK_* mode) or
+ * IPSEC descrypt output.
+ */
+#define TABLE_FIELD_ID_GCM_OPR_4B_OFFSET 0x41c
+/* enum: If OP_CODE is BULK_*, indicates Emit GHASH (Fragment mode). Else,
+ * indicates IPSEC-ESN mode.
+ */
+#define TABLE_FIELD_ID_GCM_EMIT_GHASH_ISESN 0x41d
+/* enum: Replay Protection Enable. */
+#define TABLE_FIELD_ID_GCM_REPLAY_PROTECT_EN 0x41e
+/* enum: IPSEC Encrypt ESP trailer NEXT_HEADER byte. */
+#define TABLE_FIELD_ID_GCM_NEXT_HDR 0x41f
+/* enum: Replay Window Size. */
+#define TABLE_FIELD_ID_GCM_REPLAY_WIN_SIZE 0x420
/* MCDI_EVENT structuredef: The structure of an MCDI_EVENT on Siena/EF10/EF100
* platforms
@@ -1138,6 +1226,24 @@
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_LBN 0
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_WIDTH 24
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_LBN 24
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_WIDTH 7
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_LBN 31
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_WIDTH 1
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_LBN 0
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_WIDTH 24
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_LBN 24
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_WIDTH 7
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_LBN 31
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_WIDTH 1
#define MCDI_EVENT_SENSOREVT_MONITOR_OFST 0
#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
@@ -1237,7 +1343,7 @@
#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
/* enum: Notify that invalid flash type detected */
#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
-/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+/* enum: Notify that the attempt to run FPGA Controller firmware timed out */
#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
/* enum: Failure to probe one or more FPGA boot flash chips */
#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
@@ -1255,7 +1361,7 @@
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
/* enum: FC Assert happened, but the register information is not available */
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
-/* enum: The register information for FC Assert is ready for readinng by driver
+/* enum: The register information for FC Assert is ready for reading by driver
*/
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0
@@ -1364,6 +1470,12 @@
#define MCDI_EVENT_MODULECHANGE_SEQ_OFST 0
#define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30
#define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_LBN 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_WIDTH 16
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_LBN 16
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_WIDTH 16
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
/* Alias for PTP_DATA. */
@@ -1500,6 +1612,31 @@
* change to the journal.
*/
#define MCDI_EVENT_CODE_MPORT_JOURNAL_CHANGE 0x27
+/* enum: Notification that a source queue is enabled and attached to its proxy
+ * sink queue. SRC field contains the handle of the affected descriptor proxy
+ * function. DATA field contains the relative source queue number and absolute
+ * VI ID.
+ */
+#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_QUEUE_START 0x28
+/* enum: Notification of a change in link state and/or link speed of a network
+ * port link. This event applies to a network port identified by a handle,
+ * PORT_HANDLE, which is discovered by the driver using the MC_CMD_ENUM_PORTS
+ * command.
+ */
+#define MCDI_EVENT_CODE_PORT_LINKCHANGE 0x29
+/* enum: Notification of a change in the state of an MDI (external connector)
+ * of a network port. This typically corresponds to module plug/unplug for
+ * modular interfaces (e.g., SFP/QSFP and similar) or cable connect/disconnect.
+ * This event applies to a network port identified by a handle, PORT_HANDLE,
+ * which is discovered by the driver using the MC_CMD_ENUM_PORTS command.
+ */
+#define MCDI_EVENT_CODE_PORT_MODULECHANGE 0x2a
+/* enum: Notification that the port enumeration journal has changed since it
+ * was last read and updates can be read using the MC_CMD_ENUM_PORTS command.
+ * The firmware may moderate the events so that an event is not sent for every
+ * change to the journal.
+ */
+#define MCDI_EVENT_CODE_ENUM_PORTS_CHANGE 0x2b
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -1512,6 +1649,14 @@
#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LEN 4
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LEN 4
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LBN 0
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_WIDTH 32
#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
@@ -1668,247 +1813,6 @@
#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0
#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32
-/* FCDI_EVENT structuredef */
-#define FCDI_EVENT_LEN 8
-#define FCDI_EVENT_CONT_LBN 32
-#define FCDI_EVENT_CONT_WIDTH 1
-#define FCDI_EVENT_LEVEL_LBN 33
-#define FCDI_EVENT_LEVEL_WIDTH 3
-/* enum: Info. */
-#define FCDI_EVENT_LEVEL_INFO 0x0
-/* enum: Warning. */
-#define FCDI_EVENT_LEVEL_WARN 0x1
-/* enum: Error. */
-#define FCDI_EVENT_LEVEL_ERR 0x2
-/* enum: Fatal. */
-#define FCDI_EVENT_LEVEL_FATAL 0x3
-#define FCDI_EVENT_DATA_OFST 0
-#define FCDI_EVENT_DATA_LEN 4
-#define FCDI_EVENT_LINK_STATE_STATUS_OFST 0
-#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
-#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
-#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
-#define FCDI_EVENT_LINK_UP 0x1 /* enum */
-#define FCDI_EVENT_DATA_LBN 0
-#define FCDI_EVENT_DATA_WIDTH 32
-#define FCDI_EVENT_SRC_LBN 36
-#define FCDI_EVENT_SRC_WIDTH 8
-#define FCDI_EVENT_EV_CODE_LBN 60
-#define FCDI_EVENT_EV_CODE_WIDTH 4
-#define FCDI_EVENT_CODE_LBN 44
-#define FCDI_EVENT_CODE_WIDTH 8
-/* enum: The FC was rebooted. */
-#define FCDI_EVENT_CODE_REBOOT 0x1
-/* enum: Bad assert. */
-#define FCDI_EVENT_CODE_ASSERT 0x2
-/* enum: DDR3 test result. */
-#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
-/* enum: Link status. */
-#define FCDI_EVENT_CODE_LINK_STATE 0x4
-/* enum: A timed read is ready to be serviced. */
-#define FCDI_EVENT_CODE_TIMED_READ 0x5
-/* enum: One or more PPS IN events */
-#define FCDI_EVENT_CODE_PPS_IN 0x6
-/* enum: Tick event from PTP clock */
-#define FCDI_EVENT_CODE_PTP_TICK 0x7
-/* enum: ECC error counters */
-#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
-/* enum: Current status of PTP */
-#define FCDI_EVENT_CODE_PTP_STATUS 0x9
-/* enum: Port id config to map MC-FC port idx */
-#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
-/* enum: Boot result or error code */
-#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
-#define FCDI_EVENT_REBOOT_SRC_LBN 36
-#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
-#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
-#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
-#define FCDI_EVENT_ASSERT_TYPE_LBN 36
-#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
-#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
-#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
-#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
-#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
-#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
-#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
-#define FCDI_EVENT_PTP_STATE_OFST 0
-#define FCDI_EVENT_PTP_STATE_LEN 4
-#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
-#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
-#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
-#define FCDI_EVENT_PTP_STATE_LBN 0
-#define FCDI_EVENT_PTP_STATE_WIDTH 32
-#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
-#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
-/* Index of MC port being referred to */
-#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
-#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
-/* FC Port index that matches the MC port index in SRC */
-#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
-#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
-#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
-#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
-#define FCDI_EVENT_BOOT_RESULT_OFST 0
-#define FCDI_EVENT_BOOT_RESULT_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
-#define FCDI_EVENT_BOOT_RESULT_LBN 0
-#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
-
-/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
- * to the MC. Note that this structure | is overlayed over a normal FCDI event
- * such that bits 32-63 containing | event code, level, source etc remain the
- * same. In this case the data | field of the header is defined to be the
- * number of timestamps
- */
-#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
-#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
-#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016
-#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8)
-/* Number of timestamps following */
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
-/* Seconds field of a timestamp record */
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
-/* Nanoseconds field of a timestamp record */
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
-/* Timestamp records comprising the event */
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_WIDTH 32
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LBN 96
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_WIDTH 32
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
-
-/* MUM_EVENT structuredef */
-#define MUM_EVENT_LEN 8
-#define MUM_EVENT_CONT_LBN 32
-#define MUM_EVENT_CONT_WIDTH 1
-#define MUM_EVENT_LEVEL_LBN 33
-#define MUM_EVENT_LEVEL_WIDTH 3
-/* enum: Info. */
-#define MUM_EVENT_LEVEL_INFO 0x0
-/* enum: Warning. */
-#define MUM_EVENT_LEVEL_WARN 0x1
-/* enum: Error. */
-#define MUM_EVENT_LEVEL_ERR 0x2
-/* enum: Fatal. */
-#define MUM_EVENT_LEVEL_FATAL 0x3
-#define MUM_EVENT_DATA_OFST 0
-#define MUM_EVENT_DATA_LEN 4
-#define MUM_EVENT_SENSOR_ID_OFST 0
-#define MUM_EVENT_SENSOR_ID_LBN 0
-#define MUM_EVENT_SENSOR_ID_WIDTH 8
-/* Enum values, see field(s): */
-/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-#define MUM_EVENT_SENSOR_STATE_OFST 0
-#define MUM_EVENT_SENSOR_STATE_LBN 8
-#define MUM_EVENT_SENSOR_STATE_WIDTH 8
-#define MUM_EVENT_PORT_PHY_READY_OFST 0
-#define MUM_EVENT_PORT_PHY_READY_LBN 0
-#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
-#define MUM_EVENT_PORT_PHY_LINK_UP_OFST 0
-#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
-#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_LOL_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
-#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
-#define MUM_EVENT_PORT_PHY_RX_LOL_OFST 0
-#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
-#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_LOS_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
-#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
-#define MUM_EVENT_PORT_PHY_RX_LOS_OFST 0
-#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
-#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
-#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
-#define MUM_EVENT_DATA_LBN 0
-#define MUM_EVENT_DATA_WIDTH 32
-#define MUM_EVENT_SRC_LBN 36
-#define MUM_EVENT_SRC_WIDTH 8
-#define MUM_EVENT_EV_CODE_LBN 60
-#define MUM_EVENT_EV_CODE_WIDTH 4
-#define MUM_EVENT_CODE_LBN 44
-#define MUM_EVENT_CODE_WIDTH 8
-/* enum: The MUM was rebooted. */
-#define MUM_EVENT_CODE_REBOOT 0x1
-/* enum: Bad assert. */
-#define MUM_EVENT_CODE_ASSERT 0x2
-/* enum: Sensor failure. */
-#define MUM_EVENT_CODE_SENSOR 0x3
-/* enum: Link fault has been asserted, or has cleared. */
-#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
-#define MUM_EVENT_SENSOR_DATA_OFST 0
-#define MUM_EVENT_SENSOR_DATA_LEN 4
-#define MUM_EVENT_SENSOR_DATA_LBN 0
-#define MUM_EVENT_SENSOR_DATA_WIDTH 32
-#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
-#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
-#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
-#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
-#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
-#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
-#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
-#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
-#define MUM_EVENT_PORT_PHY_TECH_OFST 0
-#define MUM_EVENT_PORT_PHY_TECH_LEN 4
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
-#define MUM_EVENT_PORT_PHY_TECH_LBN 0
-#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
-#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
-
/***********************************/
/* MC_CMD_READ32
@@ -1969,90 +1873,6 @@
/***********************************/
-/* MC_CMD_COPYCODE
- * Copy MC code between two locations and jump. Note - this command really
- * belongs to INSECURE category but is required by shmboot. The command handler
- * has additional checks to reject insecure calls.
- */
-#define MC_CMD_COPYCODE 0x3
-#undef MC_CMD_0x3_PRIVILEGE_CTG
-
-#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_COPYCODE_IN msgrequest */
-#define MC_CMD_COPYCODE_IN_LEN 16
-/* Source address
- *
- * The main image should be entered via a copy of a single word from and to a
- * magic address, which controls various aspects of the boot. The magic address
- * is a bitfield, with each bit as documented below.
- */
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
-#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
- * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
- */
-#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
- * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
- * below)
- */
-#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
-/* Destination address */
-#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
-#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
-#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
-#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
-/* Address of where to jump after copy. */
-#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
-#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
-/* enum: Control should return to the caller rather than jumping */
-#define MC_CMD_COPYCODE_JUMP_NONE 0x1
-
-/* MC_CMD_COPYCODE_OUT msgresponse */
-#define MC_CMD_COPYCODE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SET_FUNC
- * Select function for function-specific commands.
- */
-#define MC_CMD_SET_FUNC 0x4
-#undef MC_CMD_0x4_PRIVILEGE_CTG
-
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_FUNC_IN msgrequest */
-#define MC_CMD_SET_FUNC_IN_LEN 4
-/* Set function */
-#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
-#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
-
-/* MC_CMD_SET_FUNC_OUT msgresponse */
-#define MC_CMD_SET_FUNC_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_BOOT_STATUS
* Get the instruction address from which the MC booted.
*/
@@ -2259,6 +2079,7 @@
/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
+/* enum property: bitmask */
/* enum: UART. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
@@ -2304,6 +2125,9 @@
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2326,6 +2150,9 @@
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2356,6 +2183,9 @@
* (depending on which components exist on a particular adapter)
*/
#define MC_CMD_GET_VERSION_V2_OUT_LEN 304
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2495,6 +2325,9 @@
* (depending on which components exist on a particular adapter)
*/
#define MC_CMD_GET_VERSION_V3_OUT_LEN 328
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2641,6 +2474,9 @@
* version information
*/
#define MC_CMD_GET_VERSION_V4_OUT_LEN 392
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2803,6 +2639,9 @@
* and board version information
*/
#define MC_CMD_GET_VERSION_V5_OUT_LEN 424
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -3065,8 +2904,18 @@
* subscribers.
*/
#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
-/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x1c
+/* enum: X4 and later adapters should use this instead of
+ * PTP_OP_TIME_EVENT_SUBSCRIBE. Subscribe to receive periodic time events
+ * indicating the current NIC time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE_V2 0x1c
+/* enum: For X4 and later NICs. Packet timestamps and time sync events have
+ * IS_SET and IN_SYNC flags, that indicates whether time is updated and if it
+ * is in sync with host. Once set, IN_SYNC flag is cleared by hardware after a
+ * software configurable time out. Host driver need to query what is set and
+ * what is maximum supported interval, this MCDI can be used to query these.
+ */
+#define MC_CMD_PTP_OP_GET_SYNC_TIMEOUT 0x1d
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -3507,6 +3356,22 @@
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2 msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_LEN 4
+/* Space for flags. */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_LEN 4
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_WIDTH 1
+
/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
@@ -3540,6 +3405,13 @@
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
+/* MC_CMD_PTP_IN_GET_SYNC_TIMEOUT msgrequest */
+#define MC_CMD_PTP_IN_GET_SYNC_TIMEOUT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -3939,416 +3811,14 @@
/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
-
-/***********************************/
-/* MC_CMD_CSR_READ32
- * Read 32bit words from the indirect memory map.
- */
-#define MC_CMD_CSR_READ32 0xc
-#undef MC_CMD_0xc_PRIVILEGE_CTG
-
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CSR_READ32_IN msgrequest */
-#define MC_CMD_CSR_READ32_IN_LEN 12
-/* Address */
-#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
-#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
-#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
-#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
-#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
-
-/* MC_CMD_CSR_READ32_OUT msgresponse */
-#define MC_CMD_CSR_READ32_OUT_LENMIN 4
-#define MC_CMD_CSR_READ32_OUT_LENMAX 252
-#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
-/* The last dword is the status, not a value read */
-#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
-#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_CSR_WRITE32
- * Write 32bit dwords to the indirect memory map.
- */
-#define MC_CMD_CSR_WRITE32 0xd
-#undef MC_CMD_0xd_PRIVILEGE_CTG
-
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CSR_WRITE32_IN msgrequest */
-#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
-#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
-#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020
-#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4)
-/* Address */
-#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
-#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253
-
-/* MC_CMD_CSR_WRITE32_OUT msgresponse */
-#define MC_CMD_CSR_WRITE32_OUT_LEN 4
-#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
-#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_HP
- * These commands are used for HP related features. They are grouped under one
- * MCDI command to avoid creating too many MCDI commands.
- */
-#define MC_CMD_HP 0x54
-#undef MC_CMD_0x54_PRIVILEGE_CTG
-
-#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_HP_IN msgrequest */
-#define MC_CMD_HP_IN_LEN 16
-/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
- * the specified address with the specified interval.When address is NULL,
- * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
- * state / 2: (debug) Show temperature reported by one of the supported
- * sensors.
- */
-#define MC_CMD_HP_IN_SUBCMD_OFST 0
-#define MC_CMD_HP_IN_SUBCMD_LEN 4
-/* enum: OCSD (Option Card Sensor Data) sub-command. */
-#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
-/* enum: Last known valid HP sub-command. */
-#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
-/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
- */
-#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_LEN 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_LBN 32
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_WIDTH 32
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_LEN 4
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_LBN 64
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_WIDTH 32
-/* The requested update interval, in seconds. (Or the sub-command if ADDR is
- * NULL.)
- */
-#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
-#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
-
-/* MC_CMD_HP_OUT msgresponse */
-#define MC_CMD_HP_OUT_LEN 4
-#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
-#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
-/* enum: OCSD stopped for this card. */
-#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
-/* enum: OCSD was successfully started with the address provided. */
-#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
-/* enum: OCSD was already started for this card. */
-#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
-
-
-/***********************************/
-/* MC_CMD_STACKINFO
- * Get stack information.
- */
-#define MC_CMD_STACKINFO 0xf
-#undef MC_CMD_0xf_PRIVILEGE_CTG
-
-#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_STACKINFO_IN msgrequest */
-#define MC_CMD_STACKINFO_IN_LEN 0
-
-/* MC_CMD_STACKINFO_OUT msgresponse */
-#define MC_CMD_STACKINFO_OUT_LENMIN 12
-#define MC_CMD_STACKINFO_OUT_LENMAX 252
-#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12)
-/* (thread ptr, stack size, free space) for each thread in system */
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85
-
-
-/***********************************/
-/* MC_CMD_MDIO_READ
- * MDIO register read.
- */
-#define MC_CMD_MDIO_READ 0x10
-#undef MC_CMD_0x10_PRIVILEGE_CTG
-
-#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MDIO_READ_IN msgrequest */
-#define MC_CMD_MDIO_READ_IN_LEN 16
-/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
- * external devices.
- */
-#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
-#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
-/* enum: Internal. */
-#define MC_CMD_MDIO_BUS_INTERNAL 0x0
-/* enum: External. */
-#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
-/* Port address */
-#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
-/* Device Address or clause 22. */
-#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
-/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
- * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
- */
-#define MC_CMD_MDIO_CLAUSE22 0x20
-/* Address */
-#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
-
-/* MC_CMD_MDIO_READ_OUT msgresponse */
-#define MC_CMD_MDIO_READ_OUT_LEN 8
-/* Value */
-#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
-#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
-/* Status the MDIO commands return the raw status bits from the MDIO block. A
- * "good" transaction should have the DONE bit set and all other bits clear.
- */
-#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
-#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
-/* enum: Good. */
-#define MC_CMD_MDIO_STATUS_GOOD 0x8
-
-
-/***********************************/
-/* MC_CMD_MDIO_WRITE
- * MDIO register write.
- */
-#define MC_CMD_MDIO_WRITE 0x11
-#undef MC_CMD_0x11_PRIVILEGE_CTG
-
-#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_MDIO_WRITE_IN msgrequest */
-#define MC_CMD_MDIO_WRITE_IN_LEN 20
-/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
- * external devices.
- */
-#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
-#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
-/* enum: Internal. */
-/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
-/* enum: External. */
-/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
-/* Port address */
-#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
-/* Device Address or clause 22. */
-#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
-/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
- * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
- */
-/* MC_CMD_MDIO_CLAUSE22 0x20 */
-/* Address */
-#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
-#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
-
-/* MC_CMD_MDIO_WRITE_OUT msgresponse */
-#define MC_CMD_MDIO_WRITE_OUT_LEN 4
-/* Status; the MDIO commands return the raw status bits from the MDIO block. A
- * "good" transaction should have the DONE bit set and all other bits clear.
- */
-#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
-#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
-/* enum: Good. */
-/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
-
-
-/***********************************/
-/* MC_CMD_DBI_WRITE
- * Write DBI register(s).
- */
-#define MC_CMD_DBI_WRITE 0x12
-#undef MC_CMD_0x12_PRIVILEGE_CTG
-
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DBI_WRITE_IN msgrequest */
-#define MC_CMD_DBI_WRITE_IN_LENMIN 12
-#define MC_CMD_DBI_WRITE_IN_LENMAX 252
-#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12)
-/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
- * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
- */
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85
-
-/* MC_CMD_DBI_WRITE_OUT msgresponse */
-#define MC_CMD_DBI_WRITE_OUT_LEN 0
-
-/* MC_CMD_DBIWROP_TYPEDEF structuredef */
-#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_PORT_READ32
- * Read a 32-bit register from the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_READ32 0x14
-
-/* MC_CMD_PORT_READ32_IN msgrequest */
-#define MC_CMD_PORT_READ32_IN_LEN 4
-/* Address */
-#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
-
-/* MC_CMD_PORT_READ32_OUT msgresponse */
-#define MC_CMD_PORT_READ32_OUT_LEN 8
-/* Value */
-#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
-/* Status */
-#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
-#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_WRITE32
- * Write a 32-bit register to the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_WRITE32 0x15
-
-/* MC_CMD_PORT_WRITE32_IN msgrequest */
-#define MC_CMD_PORT_WRITE32_IN_LEN 8
-/* Address */
-#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
-
-/* MC_CMD_PORT_WRITE32_OUT msgresponse */
-#define MC_CMD_PORT_WRITE32_OUT_LEN 4
-/* Status */
-#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
-#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_READ128
- * Read a 128-bit register from the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_READ128 0x16
-
-/* MC_CMD_PORT_READ128_IN msgrequest */
-#define MC_CMD_PORT_READ128_IN_LEN 4
-/* Address */
-#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
-
-/* MC_CMD_PORT_READ128_OUT msgresponse */
-#define MC_CMD_PORT_READ128_OUT_LEN 20
-/* Value */
-#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
-/* Status */
-#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
-#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_WRITE128
- * Write a 128-bit register to the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_WRITE128 0x17
-
-/* MC_CMD_PORT_WRITE128_IN msgrequest */
-#define MC_CMD_PORT_WRITE128_IN_LEN 20
-/* Address */
-#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
-
-/* MC_CMD_PORT_WRITE128_OUT msgresponse */
-#define MC_CMD_PORT_WRITE128_OUT_LEN 4
-/* Status */
-#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
-#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
-
-/* MC_CMD_CAPABILITIES structuredef */
-#define MC_CMD_CAPABILITIES_LEN 4
-/* Small buf table. */
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
-/* Turbo mode (for Maranello). */
-#define MC_CMD_CAPABILITIES_TURBO_LBN 1
-#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
-/* Turbo mode active (for Maranello). */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
-/* PTP offload. */
-#define MC_CMD_CAPABILITIES_PTP_LBN 3
-#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
-/* AOE mode. */
-#define MC_CMD_CAPABILITIES_AOE_LBN 4
-#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
-/* AOE mode active. */
-#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
-#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
-/* AOE mode active. */
-#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
-#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
-#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
-#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+/* MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT msgresponse */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_LEN 8
+/* Current value set in NIC, in seconds */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_OFST 0
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_LEN 4
+/* Maximum supported by NIC, in seconds */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_OFST 4
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_LEN 4
/***********************************/
@@ -4427,112 +3897,6 @@
/***********************************/
-/* MC_CMD_DBI_READX
- * Read DBI register(s) -- extended functionality
- */
-#define MC_CMD_DBI_READX 0x19
-#undef MC_CMD_0x19_PRIVILEGE_CTG
-
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DBI_READX_IN msgrequest */
-#define MC_CMD_DBI_READX_IN_LENMIN 8
-#define MC_CMD_DBI_READX_IN_LENMAX 248
-#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016
-#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
-#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8)
-/* Each Read op consists of an address (offset 0), VF/CS2) */
-#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LEN 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LBN 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_WIDTH 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LEN 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LBN 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_WIDTH 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127
-
-/* MC_CMD_DBI_READX_OUT msgresponse */
-#define MC_CMD_DBI_READX_OUT_LENMIN 4
-#define MC_CMD_DBI_READX_OUT_LENMAX 252
-#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4)
-/* Value */
-#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
-#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
-#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
-#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
-#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255
-
-/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
-#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_SET_RAND_SEED
- * Set the 16byte seed for the MC pseudo-random generator.
- */
-#define MC_CMD_SET_RAND_SEED 0x1a
-#undef MC_CMD_0x1a_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_RAND_SEED_IN msgrequest */
-#define MC_CMD_SET_RAND_SEED_IN_LEN 16
-/* Seed value. */
-#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
-#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
-
-/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
-#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LTSSM_HIST
- * Retrieve the history of the LTSSM, if the build supports it.
- */
-#define MC_CMD_LTSSM_HIST 0x1b
-
-/* MC_CMD_LTSSM_HIST_IN msgrequest */
-#define MC_CMD_LTSSM_HIST_IN_LEN 0
-
-/* MC_CMD_LTSSM_HIST_OUT msgresponse */
-#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
-#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
-#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4)
-/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
-#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
-#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255
-
-
-/***********************************/
/* MC_CMD_DRV_ATTACH
* Inform MCPU that this port is managed on the host (i.e. driver active). For
* Huntington, also request the preferred datapath firmware to use if possible
@@ -4705,6 +4069,7 @@
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
+/* enum property: bitshift */
/* enum: Labels the lowest-numbered function visible to the OS */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
/* enum: The function can control the link state of the physical port it is
@@ -4732,22 +4097,6 @@
/***********************************/
-/* MC_CMD_SHMUART
- * Route UART output to circular buffer in shared memory instead.
- */
-#define MC_CMD_SHMUART 0x1f
-
-/* MC_CMD_SHMUART_IN msgrequest */
-#define MC_CMD_SHMUART_IN_LEN 4
-/* ??? */
-#define MC_CMD_SHMUART_IN_FLAG_OFST 0
-#define MC_CMD_SHMUART_IN_FLAG_LEN 4
-
-/* MC_CMD_SHMUART_OUT msgresponse */
-#define MC_CMD_SHMUART_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_PORT_RESET
* Generic per-port reset. There is no equivalent for per-board reset. Locks
* required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
@@ -4790,100 +4139,6 @@
/***********************************/
-/* MC_CMD_PCIE_CREDITS
- * Read instantaneous and minimum flow control thresholds.
- */
-#define MC_CMD_PCIE_CREDITS 0x21
-
-/* MC_CMD_PCIE_CREDITS_IN msgrequest */
-#define MC_CMD_PCIE_CREDITS_IN_LEN 8
-/* poll period. 0 is disabled */
-#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
-#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
-/* wipe statistics */
-#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
-#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
-
-/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
-#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
-
-
-/***********************************/
-/* MC_CMD_RXD_MONITOR
- * Get histogram of RX queue fill level.
- */
-#define MC_CMD_RXD_MONITOR 0x22
-
-/* MC_CMD_RXD_MONITOR_IN msgrequest */
-#define MC_CMD_RXD_MONITOR_IN_LEN 12
-#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
-#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
-#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
-#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
-#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
-#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
-
-/* MC_CMD_RXD_MONITOR_OUT msgresponse */
-#define MC_CMD_RXD_MONITOR_OUT_LEN 80
-#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
-#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
-#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
-
-
-/***********************************/
/* MC_CMD_PUTS
* Copy the given ASCII string out onto UART and/or out of the network port.
*/
@@ -4931,6 +4186,54 @@
/* MC_CMD_GET_PHY_CFG_IN msgrequest */
#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+/* MC_CMD_GET_PHY_CFG_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_V2_LEN 8
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
/* flags */
@@ -5026,6 +4329,9 @@
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_200000FDX_OFST 8
+#define MC_CMD_PHY_CAP_200000FDX_LBN 22
+#define MC_CMD_PHY_CAP_200000FDX_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
@@ -5059,6 +4365,7 @@
#define MC_CMD_MEDIA_DSFP 0x8
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
+/* enum property: bitshift */
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
@@ -5084,7 +4391,7 @@
#define MC_CMD_START_BIST 0x25
#undef MC_CMD_0x25_PRIVILEGE_CTG
-#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
@@ -5124,7 +4431,7 @@
#define MC_CMD_POLL_BIST 0x26
#undef MC_CMD_0x26_PRIVILEGE_CTG
-#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_POLL_BIST_IN msgrequest */
#define MC_CMD_POLL_BIST_IN_LEN 0
@@ -5282,33 +4589,6 @@
/***********************************/
-/* MC_CMD_FLUSH_RX_QUEUES
- * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
- * flushes should be initiated via this MCDI operation, rather than via
- * directly writing FLUSH_CMD.
- *
- * The flush is completed (either done/fail) asynchronously (after this command
- * returns). The driver must still wait for flush done/failure events as usual.
- */
-#define MC_CMD_FLUSH_RX_QUEUES 0x27
-
-/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4)
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255
-
-/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
-#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_LOOPBACK_MODES
* Returns a bitmask of loopback modes available at each speed.
*/
@@ -5320,6 +4600,54 @@
/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+/* MC_CMD_GET_LOOPBACK_MODES_IN_V2 msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_LEN 8
+/* Target port to request loopback modes for. Uses MAE_LINK_ENDPOINT_SELECTOR
+ * which identifies a real or virtual network port by MAE port and link end.
+ * See the structure definition for more details
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
/* Supported loopbacks. */
@@ -5333,6 +4661,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LBN 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_WIDTH 32
+/* enum property: bitshift */
/* enum: None. */
#define MC_CMD_LOOPBACK_NONE 0x0
/* enum: Data. */
@@ -5422,6 +4751,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LBN 96
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5435,6 +4765,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LBN 160
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5448,6 +4779,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LBN 224
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5461,6 +4793,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LBN 288
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
@@ -5479,6 +4812,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LBN 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_WIDTH 32
+/* enum property: bitshift */
/* enum: None. */
/* MC_CMD_LOOPBACK_NONE 0x0 */
/* enum: Data. */
@@ -5568,6 +4902,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LBN 96
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5581,6 +4916,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LBN 160
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5594,6 +4930,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LBN 224
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5607,6 +4944,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LBN 288
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 25G loopbacks. */
@@ -5620,6 +4958,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LBN 352
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 50 loopbacks. */
@@ -5633,6 +4972,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LBN 416
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 100G loopbacks. */
@@ -5646,6 +4986,214 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LBN 480
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V3 msgresponse: Supported loopback modes for
+ * newer NICs with 200G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_LEN 72
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_WIDTH 32
+/* enum property: bitshift */
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LBN 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_OFST 12
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LBN 96
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LBN 128
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_OFST 20
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LBN 160
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LBN 192
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_OFST 28
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LBN 224
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LBN 256
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_OFST 36
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LBN 288
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LBN 320
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_OFST 44
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LBN 352
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LBN 384
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_OFST 52
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LBN 416
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LBN 448
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_OFST 60
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LBN 480
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 200G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_OFST 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_OFST 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LBN 512
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_OFST 68
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LBN 544
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
@@ -5673,13 +5221,835 @@
#define FEC_TYPE_TYPE_LEN 4
/* enum: No FEC */
#define MC_CMD_FEC_NONE 0x0
-/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */
+/* enum: IEEE 802.3, Clause 74 BASE-R FEC (a.k.a Firecode) */
#define MC_CMD_FEC_BASER 0x1
-/* enum: Clause 91/Clause 108 Reed-Solomon FEC */
+/* enum: IEEE 802.3, Clause 91/Clause 108 Reed-Solomon FEC */
#define MC_CMD_FEC_RS 0x2
+/* enum: IEEE 802.3, Clause 161, interleaved RS-FEC sublayer for 100GBASE-R
+ * PHYs
+ */
+#define MC_CMD_FEC_IEEE_RS_INT 0x3
+/* enum: Ethernet Consortium, Low Latency RS-FEC. RS(272, 258). Replaces FEC
+ * specified in Clause 119 for 100/200G PHY. Replaces FEC specified in Clause
+ * 134 for 50G PHY.
+ */
+#define MC_CMD_FEC_ETCS_RS_LL 0x4
+/* enum: FEC mode selected automatically */
+#define MC_CMD_FEC_AUTO 0x5
#define FEC_TYPE_TYPE_LBN 0
#define FEC_TYPE_TYPE_WIDTH 32
+/* MC_CMD_ETH_TECH structuredef: Ethernet technology as defined by IEEE802.3,
+ * Ethernet Technology Consortium, proprietary technologies. The driver must
+ * not use technologies labelled NONE and AUTO.
+ */
+#define MC_CMD_ETH_TECH_LEN 16
+/* The enums in this field can be used either as bitwise indices into a tech
+ * mask (e.g. see MC_CMD_ETH_AN_FIELDS/TECH_MASK for example) or as regular
+ * enums (e.g. see MC_CMD_LINK_CTRL_IN/ADVERTISED_TECH_ABILITIES_MASK). This
+ * structure must be updated to add new technologies when projects that need
+ * them arise. An incomplete list of possible expansion in the future include:
+ * 100GBASE_KP4, 800GBASE_CR8, 800GBASE_KR8, 800GBASE_DR8, 800GBASE_SR8
+ * 800GBASE_VR8
+ */
+#define MC_CMD_ETH_TECH_TECH_OFST 0
+#define MC_CMD_ETH_TECH_TECH_LEN 16
+/* enum: 1000BASE-KX - 1000BASE-X PCS/PMA over an electrical backplane PMD. See
+ * IEEE 802.3 Clause 70
+ */
+#define MC_CMD_ETH_TECH_1000BASEKX 0x0
+/* enum: 10GBASE-R - PCS/PMA over an electrical backplane PMD. Refer to IEEE
+ * 802.3 Clause 72
+ */
+#define MC_CMD_ETH_TECH_10GBASE_KR 0x1
+/* enum: 40GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 84.
+ */
+#define MC_CMD_ETH_TECH_40GBASE_KR4 0x2
+/* enum: 40GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 85
+ */
+#define MC_CMD_ETH_TECH_40GBASE_CR4 0x3
+/* enum: 40GBASE-R PCS/PMA over 4 lane multimode fiber PMD as specified in
+ * Clause 86
+ */
+#define MC_CMD_ETH_TECH_40GBASE_SR4 0x4
+/* enum: 40GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD with long
+ * reach. See IEEE 802.3 Clause 87
+ */
+#define MC_CMD_ETH_TECH_40GBASE_LR4 0x5
+/* enum: 25GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE
+ * 802.3 Clause 110
+ */
+#define MC_CMD_ETH_TECH_25GBASE_CR 0x6
+/* enum: 25GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 111
+ */
+#define MC_CMD_ETH_TECH_25GBASE_KR 0x7
+/* enum: 25GBASE-R PCS/PMA over multimode fiber PMD. Refer to IEEE 802.3 Clause
+ * 112
+ */
+#define MC_CMD_ETH_TECH_25GBASE_SR 0x8
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on twin-axial copper
+ * cable. Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_CR2 0x9
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on copper backplane.
+ * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_KR2 0xa
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 93
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR4 0xb
+/* enum: 100GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 95
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR4 0xc
+/* enum: 100GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 92
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR4 0xd
+/* enum: 100GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD, with
+ * long/extended reach,. See IEEE 802.3 Clause 88
+ */
+#define MC_CMD_ETH_TECH_100GBASE_LR4_ER4 0xe
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on short reach fiber.
+ * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_SR2 0xf
+/* enum: 1000BASEX PCS/PMA. See IEEE 802.3 Clause 36 over undefined PMD, duplex
+ * mode unknown
+ */
+#define MC_CMD_ETH_TECH_1000BASEX 0x10
+/* enum: Non-standardised. 10G direct attach */
+#define MC_CMD_ETH_TECH_10GBASE_CR 0x11
+/* enum: 10GBASE-SR fiber over 850nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_SR 0x12
+/* enum: 10GBASE-LR fiber over 1310nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_LR 0x13
+/* enum: 10GBASE-LRM fiber over 1310 nm optics. See IEEE 802.3 Clause 68 */
+#define MC_CMD_ETH_TECH_10GBASE_LRM 0x14
+/* enum: 10GBASE-ER fiber over 1550nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_ER 0x15
+/* enum: 50GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_50GBASE_KR 0x16
+/* enum: 50GBASE-SR PCS/PMA over multimode fiber PMD as specified in Clause 138
+ */
+#define MC_CMD_ETH_TECH_50GBASE_SR 0x17
+/* enum: 50GBASE-CR PCS/PMA over shielded copper balanced cable PMD. See IEEE
+ * 802.3 Clause 136
+ */
+#define MC_CMD_ETH_TECH_50GBASE_CR 0x18
+/* enum: 50GBASE-R PCS/PMA over single mode fiber PMD as specified in Clause
+ * 139.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_LR_ER_FR 0x19
+/* enum: 100 Gb/s PHY using 100GBASE-R encoding over single-mode fiber with
+ * reach up to at least 500 m (see IEEE 802.3 Clause 140)
+ */
+#define MC_CMD_ETH_TECH_50GBASE_DR 0x1a
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR2 0x1b
+/* enum: 100GBASE-R PCS/PMA over 2 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR2 0x1c
+/* enum: 100GBASE-R PCS/PMA over 2 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 136
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR2 0x1d
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_100GBASE_LR2_ER2_FR2 0x1e
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_100GBASE_DR2 0x1f
+/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_200GBASE_KR4 0x20
+/* enum: 200GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_200GBASE_SR4 0x21
+/* enum: 200GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD as specified
+ * in Clause 122
+ */
+#define MC_CMD_ETH_TECH_200GBASE_LR4_ER4_FR4 0x22
+/* enum: 200GBASE-R PCS/PMA over 4-lane single-mode fiber PMD. See IEEE 802.3
+ * Clause 121
+ */
+#define MC_CMD_ETH_TECH_200GBASE_DR4 0x23
+/* enum: 200GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD as
+ * specified in Clause 136
+ */
+#define MC_CMD_ETH_TECH_200GBASE_CR4 0x24
+/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-KR8 PMD uses
+ * 802.3 Clause 137, but the number PMD lanes is 8.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_KR8 0x25
+/* enum: 400GBASE-R PCS/PMA over 8-lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_400GBASE_SR8 0x26
+/* enum: 400GBASE-R PCS/PMA over 8 WDM lane single-mode fiber PMD. See IEEE
+ * 802.3 Clause 122
+ */
+#define MC_CMD_ETH_TECH_400GBASE_LR8_ER8_FR8 0x27
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_400GBASE_DR8 0x28
+/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-CR8 PMD uses
+ * IEEE 802.3 Clause 136, but the number PMD lanes is 8.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_CR8 0x29
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3ck
+ * Clause 163.
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR 0x2a
+/* enum: IEEE 802.3ck. 100G PHY with PMD as specified in Clause 167 over short
+ * reach fiber
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR 0x2b
+/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause
+ * 140
+ */
+#define MC_CMD_ETH_TECH_100GBASE_LR_ER_FR 0x2c
+/* enum: 100GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE
+ * 802.3 in Clause 162 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR 0x2d
+/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause
+ * 140
+ */
+#define MC_CMD_ETH_TECH_100GBASE_DR 0x2e
+/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD as specified in
+ * Clause 163 IEEE 802.3ck
+ */
+#define MC_CMD_ETH_TECH_200GBASE_KR2 0x2f
+/* enum: 200G PHY with PMD as specified in Clause 167 over short reach fiber
+ * IEEE 802.3ck
+ */
+#define MC_CMD_ETH_TECH_200GBASE_SR2 0x30
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_200GBASE_LR2_ER2_FR2 0x31
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_200GBASE_DR2 0x32
+/* enum: 200GBASE-R PCS/PMA over 2 lane shielded balanced copper cable PMD as
+ * specified in Clause 162 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_200GBASE_CR2 0x33
+/* enum: 400GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 163 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_KR4 0x34
+/* enum: 400G PHY with PMD over short reach fiber. See Clause 167 of IEEE
+ * 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_SR4 0x35
+/* enum: 400GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD. See IEEE
+ * 802.3 Clause 151
+ */
+#define MC_CMD_ETH_TECH_400GBASE_LR4_ER4_FR4 0x36
+/* enum: 400GBASE-R PCS/PMA over 4-lane single-mode fiber PMD as specified in
+ * Clause 124
+ */
+#define MC_CMD_ETH_TECH_400GBASE_DR4 0x37
+/* enum: 400GBASE-R PCS/PMA over 4 lane shielded balanced copper cable PMD as
+ * specified in Clause 162 of IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_CR4 0x38
+/* enum: Automatic tech mode. The driver must not use this. */
+#define MC_CMD_ETH_TECH_AUTO 0x39
+/* enum: See IEEE 802.3cc-2017 Clause 114 */
+#define MC_CMD_ETH_TECH_25GBASE_LR_ER 0x3a
+/* enum: Up to 7 m over twinaxial copper cable assembly (10 lanes, 10 Gbit/s
+ * each) See IEEE 802.3ba-2010 Clause 85
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR10 0x3b
+/* enum: Invalid tech mode. The driver must not use this. */
+#define MC_CMD_ETH_TECH_NONE 0x7f
+#define MC_CMD_ETH_TECH_TECH_LBN 0
+#define MC_CMD_ETH_TECH_TECH_WIDTH 128
+
+/* MC_CMD_LINK_STATUS_FLAGS structuredef */
+#define MC_CMD_LINK_STATUS_FLAGS_LEN 8
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: bitshift */
+/* enum: Whether we have overall link up */
+#define MC_CMD_LINK_STATUS_FLAGS_LINK_UP 0x0
+/* enum: If set, the PHY has no external RX link synchronisation */
+#define MC_CMD_LINK_STATUS_FLAGS_NO_PHY_LINK 0x1
+/* enum: If set, PMD/MDI is not connected (e.g. cable disconnected, module cage
+ * empty)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_MDI_DISCONNECTED 0x2
+/* enum: Set on error while decoding module data (e.g. module EEPROM does not
+ * contain valid values, has checksum errors, etc.)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_BAD 0x3
+/* enum: Set when module unsupported (e.g. unsupported link rate or link
+ * technology)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_UNSUPPORTED 0x4
+/* enum: Set on error while communicating with the module (e.g. I2C errors
+ * while reading EEPROM)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_COMMS_FAULT 0x5
+/* enum: Set on module overcurrent/overvoltage condition */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_POWER_FAULT 0x6
+/* enum: Set on module overtemperature condition */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_THERMAL_FAULT 0x7
+/* enum: If set, the module is indicating Loss of Signal */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_LOS 0x8
+/* enum: If set, PMA is indicating loss of CDR lock (clock sync) */
+#define MC_CMD_LINK_STATUS_FLAGS_PMA_NO_CDR_LOCK 0x9
+/* enum: If set, PMA is indicating loss of analog signal */
+#define MC_CMD_LINK_STATUS_FLAGS_PMA_LOS 0xa
+/* enum: If set, PCS is indicating loss of block lock */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_BLOCK_LOCK 0xb
+/* enum: If set, PCS is indicating loss of alignment marker lock on one or more
+ * lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_AM_LOCK 0xc
+/* enum: If set, PCS is indicating loss of overall alignment lock */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_ALIGN_LOCK 0xd
+/* enum: If set, PCS is indicating high bit error rate condition. */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_HI_BER 0xe
+/* enum: If set, FEC is indicating loss of FEC lock */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_NO_LOCK 0xf
+/* enum: If set, indicates that the number of symbol errors in a 8192-codeword
+ * window has exceeded the threshold K (417).
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_HI_SER 0x10
+/* enum: If set, the receiver has detected the local FEC has degraded. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_LOCAL_DEGRADED 0x11
+/* enum: If set, the receiver has detected the remote FEC has degraded. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_RM_DEGRADED 0x12
+/* enum: If set, the number of symbol errors is over an internal threshold. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_DEGRADED_SER 0x13
+/* enum: If set, autonegotiation has detected an auto-negotiation capable link
+ * partner
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_ABLE 0x14
+/* enum: If set, autonegotiation base page exchange has failed */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_BP_FAILED 0x15
+/* enum: If set, autonegotiation next page exchange has failed */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_NP_FAILED 0x16
+/* enum: If set, autonegotiation has failed to negotiate a common set of
+ * capabilities
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_NO_HCD 0x17
+/* enum: If set, local end link training has failed to establish link training
+ * frame lock on one or more lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_LOCAL_FRAME_LOCK 0x18
+/* enum: If set, remote end link training has failed to establish link training
+ * frame lock on one or more lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RM_FRAME_LOCK 0x19
+/* enum: If set, remote end has failed to assert Receiver Ready (link training
+ * success) within the designated timeout
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RX_READY 0x1a
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LBN 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_WIDTH 64
+
+/* MC_CMD_PAUSE_MODE structuredef */
+#define MC_CMD_PAUSE_MODE_LEN 1
+#define MC_CMD_PAUSE_MODE_TYPE_OFST 0
+#define MC_CMD_PAUSE_MODE_TYPE_LEN 1
+/* enum: See IEEE 802.3 Clause 73.6.6 */
+#define MC_CMD_PAUSE_MODE_AN_PAUSE 0x0
+/* enum: See IEEE 802.3 Clause 73.6.6 */
+#define MC_CMD_PAUSE_MODE_AN_ASYM_DIR 0x1
+#define MC_CMD_PAUSE_MODE_TYPE_LBN 0
+#define MC_CMD_PAUSE_MODE_TYPE_WIDTH 8
+
+/* MC_CMD_ETH_AN_FIELDS structuredef: Fields used for IEEE 802.3 Clause 73
+ * Auto-Negotiation. Warning - This is fixed size and cannot be extended. This
+ * structure is used to define autonegotiable abilities (advertised, link
+ * partner and supported abilities).
+ */
+#define MC_CMD_ETH_AN_FIELDS_LEN 25
+/* Mask of Ethernet technologies. The bit indices in this mask are taken from
+ * the TECH field in the MC_CMD_ETH_TECH structure.
+ */
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_OFST 0
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LBN 0
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_WIDTH 128
+/* Mask of supported FEC modes */
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_OFST 16
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LBN 128
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_WIDTH 32
+/* Mask of requested FEC modes */
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_OFST 20
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LBN 160
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_WIDTH 32
+/* Bitmask of negotiated pause modes */
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_OFST 24
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LBN 192
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_WIDTH 8
+
+/* MC_CMD_LOOPBACK_V2 structuredef: Loopback modes for use with the new
+ * MC_CMD_LINK_CTRL and MC_CMD_LINK_STATE. These loopback modes are not
+ * supported in other getlink/setlink commands.
+ */
+#define MC_CMD_LOOPBACK_V2_LEN 4
+#define MC_CMD_LOOPBACK_V2_MODE_OFST 0
+#define MC_CMD_LOOPBACK_V2_MODE_LEN 4
+/* enum: No loopback */
+#define MC_CMD_LOOPBACK_V2_NONE 0x0
+/* enum: Let firmware choose a supported loopback mode */
+#define MC_CMD_LOOPBACK_V2_AUTO 0x1
+/* enum: Loopback after the MAC */
+#define MC_CMD_LOOPBACK_V2_POST_MAC 0x2
+/* enum: Loopback after the PCS */
+#define MC_CMD_LOOPBACK_V2_POST_PCS 0x3
+/* enum: Loopback after the PMA */
+#define MC_CMD_LOOPBACK_V2_POST_PMA 0x4
+/* enum: Loopback after the MDI Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_MDI_WS 0x5
+/* enum: Loopback after the PMA Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_PMA_WS 0x6
+/* enum: Loopback after the PCS Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_PCS_WS 0x7
+/* enum: Loopback after the MAC Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_MAC_WS 0x8
+/* enum: Loopback after the MAC FIFOs (before the MAC) */
+#define MC_CMD_LOOPBACK_V2_PRE_MAC 0x9
+#define MC_CMD_LOOPBACK_V2_MODE_LBN 0
+#define MC_CMD_LOOPBACK_V2_MODE_WIDTH 32
+
+/* MC_CMD_FCNTL structuredef */
+#define MC_CMD_FCNTL_LEN 4
+#define MC_CMD_FCNTL_MASK_OFST 0
+#define MC_CMD_FCNTL_MASK_LEN 4
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto negotiate flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control. This is only supported on KSB. */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_FCNTL_MASK_LBN 0
+#define MC_CMD_FCNTL_MASK_WIDTH 32
+
+/* MC_CMD_LINK_FLAGS structuredef */
+#define MC_CMD_LINK_FLAGS_LEN 4
+/* The enums defined in this field are used as indices into the
+ * MC_CMD_LINK_FLAGS bitmask.
+ */
+#define MC_CMD_LINK_FLAGS_MASK_OFST 0
+#define MC_CMD_LINK_FLAGS_MASK_LEN 4
+/* enum property: bitshift */
+/* enum: Enable auto-negotiation. If AN is enabled, link technology and FEC
+ * mode are determined by advertised capabilities and requested FEC modes,
+ * combined with link partner capabilities. If AN is disabled, link technology
+ * is forced to LINK_TECHNOLOGY and FEC mode is forced to FEC_MODE. Not valid
+ * if loopback is enabled
+ */
+#define MC_CMD_LINK_FLAGS_AUTONEG_EN 0x0
+/* enum: Enable parallel detect. In addition to AN, try to sense partner forced
+ * speed/FEC mode (when partner AN disabled). Only valid if AN is enabled.
+ */
+#define MC_CMD_LINK_FLAGS_PARALLEL_DETECT_EN 0x1
+/* enum: Force link down, in electrical idle. */
+#define MC_CMD_LINK_FLAGS_LINK_DISABLE 0x2
+/* enum: Ignore the sequence number and always apply. */
+#define MC_CMD_LINK_FLAGS_IGNORE_MODULE_SEQ 0x3
+#define MC_CMD_LINK_FLAGS_MASK_LBN 0
+#define MC_CMD_LINK_FLAGS_MASK_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_LINK_CTRL
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME, EAGAIN
+ */
+#define MC_CMD_LINK_CTRL 0x6b
+#undef MC_CMD_0x6b_PRIVILEGE_CTG
+
+#define MC_CMD_0x6b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_LINK_CTRL_IN msgrequest */
+#define MC_CMD_LINK_CTRL_IN_LEN 40
+/* Handle to the port to set link state for. */
+#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_LEN 4
+/* Control flags */
+#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_OFST 4
+#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Reserved for future expansion, and included to provide padding for alignment
+ * purposes.
+ */
+#define MC_CMD_LINK_CTRL_IN_RESERVED_OFST 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LEN 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_OFST 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LEN 4
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LBN 64
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_WIDTH 32
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_OFST 12
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LEN 4
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LBN 96
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_WIDTH 32
+/* Technology abilities to advertise during auto-negotiation */
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_OFST 16
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Pause abilities to advertise during auto-negotiation. Valid when auto-
+ * negotation is enabled and MC_CMD_SET_MAC_IN/FCTL is set to
+ * MC_CMD_FCNTL_AUTO. If auto-negotiation is disabled the driver must
+ * explicitly configure pause mode with MC_CMD_SET_MAC.
+ */
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_OFST 32
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* When auto-negotiation is enabled, this is the FEC mode to request. Note that
+ * a weaker FEC mode may get negotiated, depending on what the link partner
+ * supports. The driver should subsequently use MC_CMD_GET_LINK to check the
+ * actual negotiated FEC mode. When auto-negotiation is disabled, this is the
+ * forced FEC mode.
+ */
+#define MC_CMD_LINK_CTRL_IN_FEC_MODE_OFST 33
+#define MC_CMD_LINK_CTRL_IN_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* This is only to be used when auto-negotiation is disabled (forced speed or
+ * loopback mode). If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_OFST 36
+#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* The sequence number of the last MODULECHANGE event. If this doesn't match,
+ * fail with EAGAIN.
+ */
+#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_OFST 38
+#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_LEN 1
+/* Loopback Mode. Only valid when auto-negotiation is disabled. */
+#define MC_CMD_LINK_CTRL_IN_LOOPBACK_OFST 39
+#define MC_CMD_LINK_CTRL_IN_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+
+/* MC_CMD_LINK_CTRL_OUT msgresponse */
+#define MC_CMD_LINK_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE
+ */
+#define MC_CMD_LINK_STATE 0x6c
+#undef MC_CMD_0x6c_PRIVILEGE_CTG
+
+#define MC_CMD_0x6c_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_LINK_STATE_IN msgrequest */
+#define MC_CMD_LINK_STATE_IN_LEN 4
+/* Handle to the port to get link state for. */
+#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_LINK_STATE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_OUT_LEN 114
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_LEN 32
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_OFST 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_OFST 32
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_OFST 36
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_OFST 40
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_LEN 1
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_LEN 32
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_OFST 48
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_OFST 64
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_OFST 68
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_OFST 72
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_LEN 1
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_LEN 28
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_OFST 80
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_OFST 96
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_OFST 100
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_OFST 104
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_LEN 1
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+
+/* MC_CMD_LINK_STATE_OUT_V2 msgresponse: Updated LINK_STATE_OUT with
+ * LOCAL_AN_SUPPORT
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LEN 120
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_LEN 32
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_LEN 32
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_LEN 28
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+/* Reports the auto-negotiation supported by the local device. This depends on
+ * the port and module properties.
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_OFST 116
+#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+
+/* MC_CMD_LINK_STATE_OUT_V3 msgresponse: Updated LINK_STATE_OUT_V2 for explicit
+ * reporting of the link speed and duplex mode.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LEN 128
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_LEN 32
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_LEN 32
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_LEN 28
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+/* Reports the auto-negotiation supported by the local device. This depends on
+ * the port and module properties.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_OFST 116
+#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero. LINK_SPEED field is intended to be used by drivers without
+ * the most up-to-date MCDI definitions, unable to deduce the link speed from
+ * the reported LINK_TECHNOLOGY field.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_OFST 120
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_OFST 124
+#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_OFST 124
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_WIDTH 1
+
/***********************************/
/* MC_CMD_GET_LINK
@@ -5694,6 +6064,54 @@
/* MC_CMD_GET_LINK_IN msgrequest */
#define MC_CMD_GET_LINK_IN_LEN 0
+/* MC_CMD_GET_LINK_IN_V2 msgrequest */
+#define MC_CMD_GET_LINK_IN_V2_LEN 8
+/* Target port to request link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details.
+ */
+#define MC_CMD_GET_LINK_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_LINK_OUT msgresponse */
#define MC_CMD_GET_LINK_OUT_LEN 28
/* Near-side advertised capabilities. Refer to
@@ -5745,6 +6163,7 @@
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
+/* enum property: value */
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
@@ -5813,6 +6232,7 @@
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20
#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4
+/* enum property: value */
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24
@@ -5969,6 +6389,95 @@
#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7
#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1
+/* MC_CMD_SET_LINK_IN_V3 msgrequest */
+#define MC_CMD_SET_LINK_IN_V3_LEN 28
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_SET_LINK_IN_V3_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_V3_CAP_LEN 4
+/* Flags */
+#define MC_CMD_SET_LINK_IN_V3_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_FLAGS_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_LBN 3
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_LBN 0
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_WIDTH 7
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_LBN 7
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_WIDTH 1
+/* Padding */
+#define MC_CMD_SET_LINK_IN_V3_RESERVED_OFST 17
+#define MC_CMD_SET_LINK_IN_V3_RESERVED_LEN 3
+/* Target port to set link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_SET_LINK_IN_V3_TARGET_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LEN 8
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_WIDTH 32
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LBN 192
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_OFST 23
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LEN 8
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LBN 192
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_SET_LINK_OUT msgresponse */
#define MC_CMD_SET_LINK_OUT_LEN 0
@@ -6034,17 +6543,17 @@
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
-#define MC_CMD_FCNTL_OFF 0x0
+/* MC_CMD_FCNTL_OFF 0x0 */
/* enum: Respond to flow control. */
-#define MC_CMD_FCNTL_RESPOND 0x1
+/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
-#define MC_CMD_FCNTL_BIDIR 0x2
-/* enum: Auto neg flow control. */
-#define MC_CMD_FCNTL_AUTO 0x3
-/* enum: Priority flow control (eftest builds only). */
-#define MC_CMD_FCNTL_QBB 0x4
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto negotiate flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control. This is only supported on KSB. */
+/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
-#define MC_CMD_FCNTL_GENERATE 0x5
+/* MC_CMD_FCNTL_GENERATE 0x5 */
#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24
@@ -6086,9 +6595,9 @@
/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-/* enum: Auto neg flow control. */
+/* enum: Auto negotiate flow control. */
/* MC_CMD_FCNTL_AUTO 0x3 */
-/* enum: Priority flow control (eftest builds only). */
+/* enum: Priority flow control. This is only supported on KSB. */
/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
@@ -6155,9 +6664,9 @@
/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-/* enum: Auto neg flow control. */
+/* enum: Auto negotiate flow control. */
/* MC_CMD_FCNTL_AUTO 0x3 */
-/* enum: Priority flow control (eftest builds only). */
+/* enum: Priority flow control. This is only supported on KSB. */
/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
@@ -6188,19 +6697,9 @@
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_OFST 28
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_LBN 4
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_WIDTH 1
-/* Identifies the MAC to update by the specifying the end of a logical MAE
- * link. Setting TARGET to MAE_LINK_ENDPOINT_COMPAT is equivalent to using the
- * previous version of the command (MC_CMD_SET_MAC_EXT). Not all possible
- * combinations of MPORT_END and MPORT_SELECTOR in TARGET will work in all
- * circumstances. 1. Some will always work (e.g. a VF can always address its
- * logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC), 2. Some are not
- * meaningful and will always fail with EINVAL (e.g. attempting to address the
- * VNIC end of a link to a physical port), 3. Some are meaningful but require
- * the MCDI client to have the required permission and fail with EPERM
- * otherwise (e.g. trying to set the MAC on a VF the caller cannot administer),
- * and 4. Some could be implementation-specific and fail with ENOTSUP if not
- * available (no examples exist right now). See SF-123581-TC section 4.3 for
- * more details.
+/* Target port to set mac state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
*/
#define MC_CMD_SET_MAC_V3_IN_TARGET_OFST 32
#define MC_CMD_SET_MAC_V3_IN_TARGET_LEN 8
@@ -6212,6 +6711,7 @@
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LEN 4
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LBN 288
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_OFST 32
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_LEN 4
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 32
@@ -6405,6 +6905,98 @@
#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
+/* MC_CMD_MAC_STATS_V2_IN msgrequest */
+#define MC_CMD_MAC_STATS_V2_IN_LEN 28
+/* ??? */
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_LEN 4
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_LEN 4
+/* Target port to request statistics for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LBN 192
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_OFST 23
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LBN 192
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -6421,6 +7013,7 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+/* enum property: index */
#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
@@ -6583,6 +7176,7 @@
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum property: index */
/* enum: Start of FEC stats buffer space, Medford2 and up */
#define MC_CMD_MAC_FEC_DMABUF_START 0x61
/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
@@ -6622,6 +7216,7 @@
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum property: index */
/* enum: Start of CTPIO stats buffer space, Medford2 and up */
#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
@@ -6702,6 +7297,7 @@
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
+/* enum property: index */
/* enum: Start of V4 stats buffer space */
#define MC_CMD_MAC_V4_DMABUF_START 0x79
/* enum: RXDP counter: Number of packets truncated because scattering was
@@ -6723,112 +7319,35 @@
/* Other enum values, see field(s): */
/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */
-
-/***********************************/
-/* MC_CMD_SRIOV
- * to be documented
- */
-#define MC_CMD_SRIOV 0x30
-
-/* MC_CMD_SRIOV_IN msgrequest */
-#define MC_CMD_SRIOV_IN_LEN 12
-#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
-#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
-#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
-#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
-#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
-#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
-
-/* MC_CMD_SRIOV_OUT msgresponse */
-#define MC_CMD_SRIOV_OUT_LEN 8
-#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
-#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
-#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
-#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
-
-/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
-/* this is only used for the first record */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LBN 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LBN 96
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LBN 160
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LBN 192
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_MEMCPY
- * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
- * embedded directly in the command.
- *
- * A common pattern is for a client to use generation counts to signal a dma
- * update of a datastructure. To facilitate this, this MCDI operation can
- * contain multiple requests which are executed in strict order. Requests take
- * the form of duplicating the entire MCDI request continuously (including the
- * requests record, which is ignored in all but the first structure)
- *
- * The source data can either come from a DMA from the host, or it can be
- * embedded within the request directly, thereby eliminating a DMA read. To
- * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
- * ADDR_LO=offset, and inserts the data at %offset from the start of the
- * payload. It's the callers responsibility to ensure that the embedded data
- * doesn't overlap the records.
- *
- * Returns: 0, EINVAL (invalid RID)
- */
-#define MC_CMD_MEMCPY 0x31
-
-/* MC_CMD_MEMCPY_IN msgrequest */
-#define MC_CMD_MEMCPY_IN_LENMIN 32
-#define MC_CMD_MEMCPY_IN_LENMAX 224
-#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992
-#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
-#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32)
-/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
-#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
-#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
-#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
-#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
-#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31
-
-/* MC_CMD_MEMCPY_OUT msgresponse */
-#define MC_CMD_MEMCPY_OUT_LEN 0
+/* MC_CMD_MAC_STATS_V5_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V5_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V5_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V5*64))>>3)
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V5
+/* enum property: index */
+/* enum: Start of V5 stats buffer space */
+#define MC_CMD_MAC_V5_DMABUF_START 0x7c
+/* enum: Link toggle counter: Number of times the link has toggled between
+ * up/down and down/up
+ */
+#define MC_CMD_MAC_LINK_TOGGLES 0x7c
+/* enum: This includes the space at offset 125 which is the final
+ * GENERATION_END in a MAC_STATS_V5 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V5 0x7e
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA/STATISTICS */
/***********************************/
@@ -6984,6 +7503,7 @@
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
+/* enum property: bitmask */
#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
@@ -6992,23 +7512,6 @@
/***********************************/
-/* MC_CMD_SET_MCAST_HASH
- * Set the MCAST hash value without otherwise reconfiguring the MAC
- */
-#define MC_CMD_SET_MCAST_HASH 0x35
-
-/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
-#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
-#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
-#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
-#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
-#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
-
-/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
-#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_NVRAM_TYPES
* Return bitfield indicating available types of virtual NVRAM partitions.
* Locks required: none. Returns: 0
@@ -7026,6 +7529,7 @@
/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
+/* enum property: bitshift */
/* enum: Disabled callisto. */
#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
/* enum: MC firmware. */
@@ -7152,6 +7656,12 @@
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_LBN 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_LBN 9
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
@@ -7499,6 +8009,128 @@
#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19
/* enum: The update operation is in-progress. */
#define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a
+/* enum: The update was an invalid user configuration file. */
+#define MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b
+/* enum: The write was to the AUTO partition but the data was not recognised as
+ * a valid partition.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_LEN 88
+/* Result of nvram update completion processing. Result codes that indicate an
+ * internal build failure and therefore not expected to be seen by customers in
+ * the field are marked with a prefix 'Internal-error'.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0 */
+/* enum: Verify succeeded without any errors. */
+/* MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1 */
+/* enum: CMS format verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2 */
+/* enum: Invalid CMS format in image metadata. */
+/* MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3 */
+/* enum: Message digest verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4 */
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5 */
+/* enum: Signature verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6 */
+/* enum: There are no valid signatures in the image. */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7 */
+/* enum: Trusted approvers verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8 */
+/* enum: The Trusted approver's list is empty. */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9 */
+/* enum: Signature chain verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa */
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb */
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc */
+/* enum: The image has a lower security level than the current firmware. */
+/* MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd */
+/* enum: Internal-error. The signed image is missing the 'contents' section,
+ * where the 'contents' section holds the actual image payload to be applied.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe */
+/* enum: Internal-error. The bundle header is invalid. */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf */
+/* enum: Internal-error. The bundle does not have a valid reflash image layout.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10 */
+/* enum: Internal-error. The bundle has an inconsistent layout of components or
+ * incorrect checksum.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11 */
+/* enum: Internal-error. The bundle manifest is inconsistent with components in
+ * the bundle.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12 */
+/* enum: Internal-error. The number of components in a bundle do not match the
+ * number of components advertised by the bundle manifest.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13 */
+/* enum: Internal-error. The bundle contains too many components for the MC
+ * firmware to process
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14 */
+/* enum: Internal-error. The bundle manifest has an invalid/inconsistent
+ * component.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15 */
+/* enum: Internal-error. The hash of a component does not match the hash stored
+ * in the bundle manifest.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16 */
+/* enum: Internal-error. Component hash calculation failed. */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17 */
+/* enum: Internal-error. The component does not have a valid reflash image
+ * layout.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18 */
+/* enum: The bundle processing code failed to copy a component to its target
+ * partition.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19 */
+/* enum: The update operation is in-progress. */
+/* MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a */
+/* enum: The update was an invalid user configuration file. */
+/* MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b */
+/* enum: The write was to the AUTO partition but the data was not recognised as
+ * a valid partition.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c */
+/* If the update was a user configuration, what action(s) the user must take to
+ * apply the new configuration.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_LEN 4
+/* enum: No action required. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_NONE 0x0
+/* enum: The MC firmware must be rebooted (eg with MC_CMD_REBOOT). */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_REBOOT 0x1
+/* enum: The host must be rebooted. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_REBOOT 0x2
+/* enum: The firmware and host must be rebooted (in either order). */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_AND_HOST_REBOOT 0x3
+/* enum: The host must be fully powered off. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_POWERCYCLE 0x4
+/* If the update failed with MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG, a null-
+ * terminated US-ASCII string suitable for showing to the user.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_LEN 80
/***********************************/
@@ -7522,7 +8154,7 @@
#define MC_CMD_REBOOT 0x3d
#undef MC_CMD_0x3d_PRIVILEGE_CTG
-#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
@@ -7535,65 +8167,6 @@
/***********************************/
-/* MC_CMD_SCHEDINFO
- * Request scheduler info. Locks required: NONE. Returns: An array of
- * (timeslice,maximum overrun), one for each thread, in ascending order of
- * thread address.
- */
-#define MC_CMD_SCHEDINFO 0x3e
-#undef MC_CMD_0x3e_PRIVILEGE_CTG
-
-#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SCHEDINFO_IN msgrequest */
-#define MC_CMD_SCHEDINFO_IN_LEN 0
-
-/* MC_CMD_SCHEDINFO_OUT msgresponse */
-#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
-#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
-#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4)
-#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
-#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
-#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
-#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
-#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_REBOOT_MODE
- * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
- * mode to the specified value. Returns the old mode.
- */
-#define MC_CMD_REBOOT_MODE 0x3f
-#undef MC_CMD_0x3f_PRIVILEGE_CTG
-
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_REBOOT_MODE_IN msgrequest */
-#define MC_CMD_REBOOT_MODE_IN_LEN 4
-#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
-/* enum: Normal. */
-#define MC_CMD_REBOOT_MODE_NORMAL 0x0
-/* enum: Power-on Reset. */
-#define MC_CMD_REBOOT_MODE_POR 0x2
-/* enum: Snapper. */
-#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
-/* enum: snapper fake POR */
-#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
-#define MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0
-#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
-#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
-
-/* MC_CMD_REBOOT_MODE_OUT msgresponse */
-#define MC_CMD_REBOOT_MODE_OUT_LEN 4
-#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
-
-
-/***********************************/
/* MC_CMD_SENSOR_INFO
* Returns information about every available sensor.
*
@@ -8061,6 +8634,54 @@
/* MC_CMD_GET_PHY_STATE_IN msgrequest */
#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+/* MC_CMD_GET_PHY_STATE_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_V2_LEN 8
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details.
+ */
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
@@ -8072,22 +8693,6 @@
/***********************************/
-/* MC_CMD_SETUP_8021QBB
- * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
- * disable 802.Qbb for a given priority.
- */
-#define MC_CMD_SETUP_8021QBB 0x44
-
-/* MC_CMD_SETUP_8021QBB_IN msgrequest */
-#define MC_CMD_SETUP_8021QBB_IN_LEN 32
-#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
-#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
-
-/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
-#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_WOL_FILTER_GET
* Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
*/
@@ -8106,133 +8711,6 @@
/***********************************/
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
- * Add a protocol offload to NIC for lights-out state. Locks required: None.
- * Returns: 0, ENOSYS
- */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
-#undef MC_CMD_0x46_PRIVILEGE_CTG
-
-#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4)
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
- * Remove a protocol offload from NIC for lights-out state. Locks required:
- * None. Returns: 0, ENOSYS
- */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
-#undef MC_CMD_0x47_PRIVILEGE_CTG
-
-#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
-
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
-
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_MAC_RESET_RESTORE
- * Restore MAC after block reset. Locks required: None. Returns: 0.
- */
-#define MC_CMD_MAC_RESET_RESTORE 0x48
-
-/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
-#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
-
-/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
-#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TESTASSERT
- * Deliberately trigger an assert-detonation in the firmware for testing
- * purposes (i.e. to allow tests that the driver copes gracefully). Locks
- * required: None Returns: 0
- */
-#define MC_CMD_TESTASSERT 0x49
-#undef MC_CMD_0x49_PRIVILEGE_CTG
-
-#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_TESTASSERT_IN msgrequest */
-#define MC_CMD_TESTASSERT_IN_LEN 0
-
-/* MC_CMD_TESTASSERT_OUT msgresponse */
-#define MC_CMD_TESTASSERT_OUT_LEN 0
-
-/* MC_CMD_TESTASSERT_V2_IN msgrequest */
-#define MC_CMD_TESTASSERT_V2_IN_LEN 4
-/* How to provoke the assertion */
-#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
-#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
-/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
- * you're testing firmware, this is what you want.
- */
-#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
-/* enum: Assert using assert(0); */
-#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
-/* enum: Deliberately trigger a watchdog */
-#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
-/* enum: Deliberately trigger a trap by loading from an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
-/* enum: Deliberately trigger a trap by storing to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
-/* enum: Jump to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
-
-/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
-#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_WORKAROUND
* Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
* understand the given workaround number - which should not be treated as a
@@ -8324,6 +8802,62 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_LBN 16
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_WIDTH 16
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_LEN 12
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_LBN 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_WIDTH 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_LBN 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_WIDTH 16
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LBN 64
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 7
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 52
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 48
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 6
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LBN 64
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
@@ -8348,7 +8882,7 @@
#define MC_CMD_NVRAM_TEST 0x4c
#undef MC_CMD_0x4c_PRIVILEGE_CTG
-#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
@@ -8370,103 +8904,6 @@
/***********************************/
-/* MC_CMD_MRSFP_TWEAK
- * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
- * I2C I/O expander bits are always read; if equaliser parameters are supplied,
- * they are configured first. Locks required: None. Return code: 0, EINVAL.
- */
-#define MC_CMD_MRSFP_TWEAK 0x4d
-
-/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
-/* 0-6 low->high de-emph. */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
-/* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
-/* 0-8 0-8 low->high boost */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
-/* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
-
-/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
-#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
-
-/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
-#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
-/* input bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
-/* output bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
-/* direction */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
-/* enum: Out. */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
-/* enum: In. */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
-
-
-/***********************************/
-/* MC_CMD_SENSOR_SET_LIMS
- * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
- * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
- * of range.
- */
-#define MC_CMD_SENSOR_SET_LIMS 0x4e
-#undef MC_CMD_0x4e_PRIVILEGE_CTG
-
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
-#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
-#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
-
-/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
-#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_RESOURCE_LIMITS
- */
-#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
-
-/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
-#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
-
-/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
-
-
-/***********************************/
/* MC_CMD_NVRAM_PARTITIONS
* Reads the list of available virtual NVRAM partition types. Locks required:
* none. Returns: 0, EINVAL (bad type).
@@ -8582,806 +9019,6 @@
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
-
-/***********************************/
-/* MC_CMD_CLP
- * Perform a CLP related operation, see SF-110495-PS for details of CLP
- * processing. This command has been extended to accomodate the requirements of
- * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC,
- * SF-120509-TC and SF-117282-PS.
- */
-#define MC_CMD_CLP 0x56
-#undef MC_CMD_0x56_PRIVILEGE_CTG
-
-#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_CLP_IN msgrequest */
-#define MC_CMD_CLP_IN_LEN 4
-/* Sub operation */
-#define MC_CMD_CLP_IN_OP_OFST 0
-#define MC_CMD_CLP_IN_OP_LEN 4
-/* enum: Return to factory default settings */
-#define MC_CMD_CLP_OP_DEFAULT 0x1
-/* enum: Set MAC address */
-#define MC_CMD_CLP_OP_SET_MAC 0x2
-/* enum: Get MAC address */
-#define MC_CMD_CLP_OP_GET_MAC 0x3
-/* enum: Set UEFI/GPXE boot mode */
-#define MC_CMD_CLP_OP_SET_BOOT 0x4
-/* enum: Get UEFI/GPXE boot mode */
-#define MC_CMD_CLP_OP_GET_BOOT 0x5
-
-/* MC_CMD_CLP_OUT msgresponse */
-#define MC_CMD_CLP_OUT_LEN 0
-
-/* MC_CMD_CLP_IN_DEFAULT msgrequest */
-#define MC_CMD_CLP_IN_DEFAULT_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
-#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
-
-/* MC_CMD_CLP_IN_SET_MAC msgrequest */
-#define MC_CMD_CLP_IN_SET_MAC_LEN 12
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
- * restores the permanent (factory-programmed) MAC address associated with the
- * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
- */
-#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
-#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
-#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
-
-/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
-#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
-
-/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */
-#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
- * restores the permanent (factory-programmed) MAC address associated with the
- * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
- */
-#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4
-#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10
-#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2
-#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12
-#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1
-
-/* MC_CMD_CLP_IN_GET_MAC msgrequest */
-#define MC_CMD_CLP_IN_GET_MAC_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */
-#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1
-
-/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
-#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
-/* MAC address assigned to port */
-#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
-#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
-#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
-
-/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
-#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* Boot flag */
-#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
-#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
-
-/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
-#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
-
-/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
-#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
-#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
-/* Boot flag */
-#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
-#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
-/* Padding */
-#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
-#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
-
-
-/***********************************/
-/* MC_CMD_MUM
- * Perform a MUM operation
- */
-#define MC_CMD_MUM 0x57
-#undef MC_CMD_0x57_PRIVILEGE_CTG
-
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_MUM_IN msgrequest */
-#define MC_CMD_MUM_IN_LEN 4
-#define MC_CMD_MUM_IN_OP_HDR_OFST 0
-#define MC_CMD_MUM_IN_OP_HDR_LEN 4
-#define MC_CMD_MUM_IN_OP_OFST 0
-#define MC_CMD_MUM_IN_OP_LBN 0
-#define MC_CMD_MUM_IN_OP_WIDTH 8
-/* enum: NULL MCDI command to MUM */
-#define MC_CMD_MUM_OP_NULL 0x1
-/* enum: Get MUM version */
-#define MC_CMD_MUM_OP_GET_VERSION 0x2
-/* enum: Issue raw I2C command to MUM */
-#define MC_CMD_MUM_OP_RAW_CMD 0x3
-/* enum: Read from registers on devices connected to MUM. */
-#define MC_CMD_MUM_OP_READ 0x4
-/* enum: Write to registers on devices connected to MUM. */
-#define MC_CMD_MUM_OP_WRITE 0x5
-/* enum: Control UART logging. */
-#define MC_CMD_MUM_OP_LOG 0x6
-/* enum: Operations on MUM GPIO lines */
-#define MC_CMD_MUM_OP_GPIO 0x7
-/* enum: Get sensor readings from MUM */
-#define MC_CMD_MUM_OP_READ_SENSORS 0x8
-/* enum: Initiate clock programming on the MUM */
-#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
-/* enum: Initiate FPGA load from flash on the MUM */
-#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
-/* enum: Request sensor reading from MUM ADC resulting from earlier request via
- * MUM ATB
- */
-#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
-/* enum: Send commands relating to the QSFP ports via the MUM for PHY
- * operations
- */
-#define MC_CMD_MUM_OP_QSFP 0xc
-/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
- * level) from MUM
- */
-#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
-
-/* MC_CMD_MUM_IN_NULL msgrequest */
-#define MC_CMD_MUM_IN_NULL_LEN 4
-/* MUM cmd header */
-#define MC_CMD_MUM_IN_CMD_OFST 0
-#define MC_CMD_MUM_IN_CMD_LEN 4
-
-/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
-#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_IN_READ msgrequest */
-#define MC_CMD_MUM_IN_READ_LEN 16
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* ID of (device connected to MUM) to read from registers of */
-#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
-#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
-/* enum: Hittite HMC1035 clock generator on Sorrento board */
-#define MC_CMD_MUM_DEV_HITTITE 0x1
-/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
-#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
-/* 32-bit address to read from */
-#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
-#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
-/* Number of words to read. */
-#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
-#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
-
-/* MC_CMD_MUM_IN_WRITE msgrequest */
-#define MC_CMD_MUM_IN_WRITE_LENMIN 16
-#define MC_CMD_MUM_IN_WRITE_LENMAX 252
-#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
-#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4)
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* ID of (device connected to MUM) to write to registers of */
-#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
-#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
-/* enum: Hittite HMC1035 clock generator on Sorrento board */
-/* MC_CMD_MUM_DEV_HITTITE 0x1 */
-/* 32-bit address to write to */
-#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
-#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
-/* Words to write */
-#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
-#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252
-
-/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
-#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
-#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
-#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1)
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* MUM I2C cmd code */
-#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
-#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
-/* Number of bytes to write */
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
-/* Number of bytes to read */
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
-/* Bytes to write */
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004
-
-/* MC_CMD_MUM_IN_LOG msgrequest */
-#define MC_CMD_MUM_IN_LOG_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_LOG_OP_OFST 4
-#define MC_CMD_MUM_IN_LOG_OP_LEN 4
-#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
-
-/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
-#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
-/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
-/* Enable/disable debug output to UART */
-#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
-#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO msgrequest */
-#define MC_CMD_MUM_IN_GPIO_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
-#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
-#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
-
-/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
-/* The first 32-bit word to be written to the GPIO OUT register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
-/* The second 32-bit word to be written to the GPIO OUT register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
-/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
-/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
-
-/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
-#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
-
-/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* Bit-mask of clocks to be programmed */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
-#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
-#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
-#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
-/* Control flags for clock programming */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
-
-/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
-#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* Enable/Disable FPGA config from flash */
-#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
-#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
-
-/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
-#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_IN_QSFP msgrequest */
-#define MC_CMD_MUM_IN_QSFP_LEN 12
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4
-#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
-#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
-#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
-#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
-#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
-#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_OUT msgresponse */
-#define MC_CMD_MUM_OUT_LEN 0
-
-/* MC_CMD_MUM_OUT_NULL msgresponse */
-#define MC_CMD_MUM_OUT_NULL_LEN 0
-
-/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
-#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
-#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
-#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LBN 32
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_WIDTH 32
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LBN 64
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_WIDTH 32
-
-/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1)
-/* returned data */
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020
-
-/* MC_CMD_MUM_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_READ_LENMIN 4
-#define MC_CMD_MUM_OUT_READ_LENMAX 252
-#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
-#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4)
-#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
-#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
-#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
-#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
-#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255
-
-/* MC_CMD_MUM_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_LOG msgresponse */
-#define MC_CMD_MUM_OUT_LOG_LEN 0
-
-/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
-#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
-/* The first 32-bit word read from the GPIO IN register. */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
-/* The second 32-bit word read from the GPIO IN register. */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
-/* The first 32-bit word read from the GPIO OUT register. */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
-/* The second 32-bit word read from the GPIO OUT register. */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
-
-/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4)
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
-
-/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
-
-/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
-#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
-
-/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
-
-/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
-
-/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1)
-/* in bytes */
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016
-
-/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
-
-/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8)
-/* Discrete (soldered) DDR resistor strap info */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
-/* Number of SODIMM info records */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
-/* Array of SODIMM info records */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LBN 64
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_WIDTH 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LBN 96
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_WIDTH 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
-/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
-/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
-/* enum: Total number of SODIMM banks */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
-/* enum: Values 5-15 are reserved for future usage */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
-/* enum: No module present */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
-/* enum: Module present supported and powered on */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
-/* enum: Module present but bad type */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
-/* enum: Module present but incompatible voltage */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
-/* enum: Module present but unknown SPD */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
-/* enum: Module present but slot cannot support it */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
-/* enum: Modules may or may not be present, but cannot establish contact by I2C
- */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
-
/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This
* should match the equivalent structure in the sensor_query SPHINX service.
*/
@@ -9500,27 +9137,22 @@
* and a generation count for this version of the sensor table. On systems
* advertising the DYNAMIC_SENSORS capability bit, this replaces the
* MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors
- * added by the NMC.
- *
- * Sensor handles are persistent for the lifetime of the sensor and are used to
- * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
- * MC_CMD_DYNAMIC_SENSORS_GET_VALUES.
- *
- * The generation count is maintained by the MC, is persistent across reboots
- * and will be incremented each time the sensor table is modified. When the
- * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated
- * containing the new generation count. The driver should compare this against
- * the current generation count, and if it is different, call
- * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table.
- *
- * The sensor count is provided to allow a future path to supporting more than
+ * added by the NMC. Sensor handles are persistent for the lifetime of the
+ * sensor and are used to identify sensors in
+ * MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES. The generation count is maintained by the
+ * MC, is persistent across reboots and will be incremented each time the
+ * sensor table is modified. When the table is modified, a
+ * CODE_DYNAMIC_SENSORS_CHANGE event will be generated containing the new
+ * generation count. The driver should compare this against the current
+ * generation count, and if it is different, call MC_CMD_DYNAMIC_SENSORS_LIST
+ * again to update it's copy of the sensor table. The sensor count is provided
+ * to allow a future path to supporting more than
* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e.
* the maximum number that will fit in a single response. As this is a fairly
* large number (253) it is not anticipated that this will be needed in the
- * near future, so can currently be ignored.
- *
- * On Riverhead this command is implemented as a wrapper for `list` in the
- * sensor_query SPHINX service.
+ * near future, so can currently be ignored. On Riverhead this command is
+ * implemented as a wrapper for `list` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
#undef MC_CMD_0x66_PRIVILEGE_CTG
@@ -9557,15 +9189,13 @@
/***********************************/
/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS
* Get descriptions for a set of sensors, specified as an array of sensor
- * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
- *
- * Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
- * update is in progress, and effectively means the set of usable sensors is
- * the intersection between the sets of sensors known to the driver and the MC.
- *
- * On Riverhead this command is implemented as a wrapper for
- * `get_descriptions` in the sensor_query SPHINX service.
+ * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. Any handles which do not
+ * correspond to a sensor currently managed by the MC will be dropped from from
+ * the response. This may happen when a sensor table update is in progress, and
+ * effectively means the set of usable sensors is the intersection between the
+ * sets of sensors known to the driver and the MC. On Riverhead this command is
+ * implemented as a wrapper for `get_descriptions` in the sensor_query SPHINX
+ * service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
#undef MC_CMD_0x67_PRIVILEGE_CTG
@@ -9602,19 +9232,15 @@
/***********************************/
/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS
* Read the state and value for a set of sensors, specified as an array of
- * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST.
- *
- * In the case of a broken sensor, then the state of the response's
- * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value
- * provided should be treated as erroneous.
- *
- * Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
- * update is in progress, and effectively means the set of usable sensors is
- * the intersection between the sets of sensors known to the driver and the MC.
- *
- * On Riverhead this command is implemented as a wrapper for `get_readings`
- * in the sensor_query SPHINX service.
+ * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. In the case of a
+ * broken sensor, then the state of the response's MC_CMD_DYNAMIC_SENSORS_VALUE
+ * entry will be set to BROKEN, and any value provided should be treated as
+ * erroneous. Any handles which do not correspond to a sensor currently managed
+ * by the MC will be dropped from from the response. This may happen when a
+ * sensor table update is in progress, and effectively means the set of usable
+ * sensors is the intersection between the sets of sensors known to the driver
+ * and the MC. On Riverhead this command is implemented as a wrapper for
+ * `get_readings` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
#undef MC_CMD_0x68_PRIVILEGE_CTG
@@ -9647,45 +9273,1286 @@
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85
+/* MC_CMD_MAC_FLAGS structuredef */
+#define MC_CMD_MAC_FLAGS_LEN 4
+/* The enums defined in this field are used as indices into the
+ * MC_CMD_MAC_FLAGS bitmask.
+ */
+#define MC_CMD_MAC_FLAGS_MASK_OFST 0
+#define MC_CMD_MAC_FLAGS_MASK_LEN 4
+/* enum property: bitshift */
+/* enum: Include the FCS in the packet data delivered to the host. Ignored if
+ * RX_INCLUDE_FCS not set in capabilities.
+ */
+#define MC_CMD_MAC_FLAGS_FLAG_INCLUDE_FCS 0x0
+#define MC_CMD_MAC_FLAGS_MASK_LBN 0
+#define MC_CMD_MAC_FLAGS_MASK_WIDTH 32
+
+/* MC_CMD_TRANSMISSION_MODE structuredef */
+#define MC_CMD_TRANSMISSION_MODE_LEN 4
+#define MC_CMD_TRANSMISSION_MODE_MASK_OFST 0
+#define MC_CMD_TRANSMISSION_MODE_MASK_LEN 4
+/* enum property: value */
+#define MC_CMD_TRANSMISSION_MODE_PROMSC_MODE 0x0 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_UNCST_MODE 0x1 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_BRDCST_MODE 0x2 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_MASK_LBN 0
+#define MC_CMD_TRANSMISSION_MODE_MASK_WIDTH 32
+
+/* MC_CMD_MAC_CONFIG_OPTIONS structuredef */
+#define MC_CMD_MAC_CONFIG_OPTIONS_LEN 4
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_OFST 0
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LEN 4
+/* enum property: bitmask */
+/* enum: Configure the MAC address. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_ADDR 0x0
+/* enum: Configure the maximum frame length. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_MAX_FRAME_LEN 0x1
+/* enum: Configure flow control. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_FCNTL 0x2
+/* enum: Configure the transmission mode. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_TRANSMISSION_MODE 0x3
+/* enum: Configure FCS. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_INCLUDE_FCS 0x4
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LBN 0
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MAC_CTRL
+ * Set MAC configuration. Return code: 0, EINVAL, ENOTSUP
+ */
+#define MC_CMD_MAC_CTRL 0x1df
+#undef MC_CMD_0x1df_PRIVILEGE_CTG
+
+#define MC_CMD_0x1df_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_MAC_CTRL_IN msgrequest */
+#define MC_CMD_MAC_CTRL_IN_LEN 32
+/* Handle for selected network port. */
+#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_LEN 4
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set.
+ */
+#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_OFST 4
+#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_CTRL_IN_ADDR_OFST 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LEN 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_OFST 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LEN 4
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LBN 64
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_OFST 12
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LEN 4
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LBN 96
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_WIDTH 32
+/* Includes the ethernet header, optional VLAN tags, payload and FCS. */
+#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_OFST 16
+#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_LEN 4
+/* Settings for flow control. */
+#define MC_CMD_MAC_CTRL_IN_FCNTL_OFST 20
+#define MC_CMD_MAC_CTRL_IN_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast
+ * mode.
+ */
+#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* Flags to control and expand the configuration of the MAC. */
+#define MC_CMD_MAC_CTRL_IN_FLAGS_OFST 28
+#define MC_CMD_MAC_CTRL_IN_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+
+/* MC_CMD_MAC_CTRL_IN_V2 msgrequest: Updated MAC_CTRL with QBB mask */
+#define MC_CMD_MAC_CTRL_IN_V2_LEN 33
+/* Handle for selected network port. */
+#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_LEN 4
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set.
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_OFST 4
+#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_OFST 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LEN 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_OFST 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LEN 4
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LBN 64
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_OFST 12
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LEN 4
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LBN 96
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_WIDTH 32
+/* Includes the ethernet header, optional VLAN tags, payload and FCS. */
+#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_OFST 16
+#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_LEN 4
+/* Settings for flow control. */
+#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_OFST 20
+#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast
+ * mode.
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* Flags to control and expand the configuration of the MAC. */
+#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_OFST 28
+#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+/* Priority-based flow control mask (QBB). PRIO7 corresponds to the highest
+ * priority, and PRIO0 to the lowest. This field is only used when CFG_FCNTL is
+ * set and FCNTL is QBB
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_OFST 32
+#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_LEN 1
+/* enum property: bitmask */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO0 0x0 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO1 0x1 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO2 0x2 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO3 0x3 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO4 0x4 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO5 0x5 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO6 0x6 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO7 0x7 /* enum */
+
+/* MC_CMD_MAC_CTRL_OUT msgresponse */
+#define MC_CMD_MAC_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_STATE
+ * Read the MAC state. Return code: 0, ETIME.
+ */
+#define MC_CMD_MAC_STATE 0x1e0
+#undef MC_CMD_0x1e0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e0_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_MAC_STATE_IN msgrequest */
+#define MC_CMD_MAC_STATE_IN_LEN 4
+/* Handle for selected network port. */
+#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_MAC_STATE_OUT msgresponse */
+#define MC_CMD_MAC_STATE_OUT_LEN 32
+/* The configured maximum frame length of the MAC. */
+#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_OFST 0
+#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_LEN 4
+/* This returns the negotiated flow control value. */
+#define MC_CMD_MAC_STATE_OUT_FCNTL_OFST 4
+#define MC_CMD_MAC_STATE_OUT_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_STATE_OUT_ADDR_OFST 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LEN 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_OFST 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LBN 64
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_OFST 12
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LBN 96
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_WIDTH 32
+/* Flags indicating MAC faults. */
+#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_OFST 16
+#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_LEN 4
+/* enum property: bitshift */
+/* enum: Indicates a local MAC fault. */
+#define MC_CMD_MAC_STATE_OUT_LOCAL 0x0
+/* enum: Indicates a remote MAC fault. */
+#define MC_CMD_MAC_STATE_OUT_REMOTE 0x1
+/* enum: Indicates a pending reconfiguration of the MAC. */
+#define MC_CMD_MAC_STATE_OUT_PENDING_RECONFIG 0x2
+/* The flags that were used to configure the MAC. This is a copy of the FLAGS
+ * field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_FLAGS_OFST 20
+#define MC_CMD_MAC_STATE_OUT_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+/* The transmission mode that was used to configure the MAC. This is a copy of
+ * the TRANSMISSION_MODE field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* The control flags that were used to configure the MAC. This is a copy of the
+ * CONTROL field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_OFST 28
+#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+
+
+/***********************************/
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE
+ * Obtain a handle that can be operated on to configure and query the status of
+ * the link. ENOENT is returned when no port is assigned to the client. Return
+ * code: 0, ENOENT
+ */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE 0x1e2
+#undef MC_CMD_0x1e2_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN msgrequest */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN_LEN 0
+
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT msgresponse */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_LEN 4
+/* Handle for assigned port. */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_LEN 4
+
+/* MC_CMD_STAT_ID structuredef */
+#define MC_CMD_STAT_ID_LEN 4
+#define MC_CMD_STAT_ID_SOURCE_ID_OFST 0
+#define MC_CMD_STAT_ID_SOURCE_ID_LEN 2
+/* enum property: index */
+/* enum: Internal markers (generation start and end markers) */
+#define MC_CMD_STAT_ID_MARKER 0x1
+/* enum: Network port MAC statistics. */
+#define MC_CMD_STAT_ID_MAC 0x2
+/* enum: Network port PHY statistics. */
+#define MC_CMD_STAT_ID_PHY 0x3
+#define MC_CMD_STAT_ID_SOURCE_ID_LBN 0
+#define MC_CMD_STAT_ID_SOURCE_ID_WIDTH 16
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: This value is used to mark the start of a generation of statistics for
+ * DMA synchronization. It is incremented whenever a new set of statistics is
+ * transferred. Always the first entry in the DMA buffer.
+ */
+#define MC_CMD_STAT_ID_GENERATION_START 0x1
+/* enum: This value is used to mark the end of a generation of statistics for
+ * DMA synchronizaion. Always the last entry in the DMA buffer and set to the
+ * same value as GENERATION_START. The host driver must compare the
+ * GENERATION_START and GENERATION_END values to verify that the DMA buffer is
+ * consistent upon copying the the DMA buffer. If they do not match, it means
+ * that new DMA transfer has started while the host driver was copying the DMA
+ * buffer. In this case, the host driver must repeat the copy operation.
+ */
+#define MC_CMD_STAT_ID_GENERATION_END 0x2
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_ID_MAC_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_MAC_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: Total number of packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_PKTS 0x1
+/* enum: Pause frames transmitted. */
+#define MC_CMD_STAT_ID_TX_PAUSE_PKTS 0x2
+/* enum: Control frames transmitted. */
+#define MC_CMD_STAT_ID_TX_CONTROL_PKTS 0x3
+/* enum: Unicast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_UNICAST_PKTS 0x4
+/* enum: Multicast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_MULTICAST_PKTS 0x5
+/* enum: Broadcast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_BROADCAST_PKTS 0x6
+/* enum: Bytes transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_BYTES 0x7
+/* enum: Bytes transmitted with bad CRC. */
+#define MC_CMD_STAT_ID_TX_BAD_BYTES 0x8
+/* enum: Bytes transmitted with good CRC. */
+#define MC_CMD_STAT_ID_TX_GOOD_BYTES 0x9
+/* enum: Packets transmitted with length less than 64 bytes. */
+#define MC_CMD_STAT_ID_TX_LT64_PKTS 0xa
+/* enum: Packets transmitted with length equal to 64 bytes. */
+#define MC_CMD_STAT_ID_TX_64_PKTS 0xb
+/* enum: Packets transmitted with length between 65 and 127 bytes. */
+#define MC_CMD_STAT_ID_TX_65_TO_127_PKTS 0xc
+/* enum: Packets transmitted with length between 128 and 255 bytes. */
+#define MC_CMD_STAT_ID_TX_128_TO_255_PKTS 0xd
+/* enum: Packets transmitted with length between 256 and 511 bytes. */
+#define MC_CMD_STAT_ID_TX_256_TO_511_PKTS 0xe
+/* enum: Packets transmitted with length between 512 and 1023 bytes. */
+#define MC_CMD_STAT_ID_TX_512_TO_1023_PKTS 0xf
+/* enum: Packets transmitted with length between 1024 and 1518 bytes. */
+#define MC_CMD_STAT_ID_TX_1024_TO_15XX_PKTS 0x10
+/* enum: Packets transmitted with length between 1519 and 9216 bytes. */
+#define MC_CMD_STAT_ID_TX_15XX_TO_JUMBO_PKTS 0x11
+/* enum: Packets transmitted with length greater than 9216 bytes. */
+#define MC_CMD_STAT_ID_TX_GTJUMBO_PKTS 0x12
+/* enum: Packets transmitted with bad FCS. */
+#define MC_CMD_STAT_ID_TX_BAD_FCS_PKTS 0x13
+/* enum: Packets transmitted with good FCS. */
+#define MC_CMD_STAT_ID_TX_GOOD_FCS_PKTS 0x14
+/* enum: Packets received. */
+#define MC_CMD_STAT_ID_RX_PKTS 0x15
+/* enum: Pause frames received. */
+#define MC_CMD_STAT_ID_RX_PAUSE_PKTS 0x16
+/* enum: Total number of good packets received. */
+#define MC_CMD_STAT_ID_RX_GOOD_PKTS 0x17
+/* enum: Total number of BAD packets received. */
+#define MC_CMD_STAT_ID_RX_BAD_PKTS 0x18
+/* enum: Total number of control frames received. */
+#define MC_CMD_STAT_ID_RX_CONTROL_PKTS 0x19
+/* enum: Total number of unicast packets received. */
+#define MC_CMD_STAT_ID_RX_UNICAST_PKTS 0x1a
+/* enum: Total number of multicast packets received. */
+#define MC_CMD_STAT_ID_RX_MULTICAST_PKTS 0x1b
+/* enum: Total number of broadcast packets received. */
+#define MC_CMD_STAT_ID_RX_BROADCAST_PKTS 0x1c
+/* enum: Total number of bytes received. */
+#define MC_CMD_STAT_ID_RX_BYTES 0x1d
+/* enum: Total number of bytes received with bad CRC. */
+#define MC_CMD_STAT_ID_RX_BAD_BYTES 0x1e
+/* enum: Total number of bytes received with GOOD CRC. */
+#define MC_CMD_STAT_ID_RX_GOOD_BYTES 0x1f
+/* enum: Packets received with length equal to 64 bytes. */
+#define MC_CMD_STAT_ID_RX_64_PKTS 0x20
+/* enum: Packets received with length between 65 and 127 bytes. */
+#define MC_CMD_STAT_ID_RX_65_TO_127_PKTS 0x21
+/* enum: Packets received with length between 128 and 255 bytes. */
+#define MC_CMD_STAT_ID_RX_128_TO_255_PKTS 0x22
+/* enum: Packets received with length between 256 and 511 bytes. */
+#define MC_CMD_STAT_ID_RX_256_TO_511_PKTS 0x23
+/* enum: Packets received with length between 512 and 1023 bytes. */
+#define MC_CMD_STAT_ID_RX_512_TO_1023_PKTS 0x24
+/* enum: Packets received with length between 1024 and 1518 bytes. */
+#define MC_CMD_STAT_ID_RX_1024_TO_15XX_PKTS 0x25
+/* enum: Packets received with length between 1519 and 9216 bytes. */
+#define MC_CMD_STAT_ID_RX_15XX_TO_JUMBO_PKTS 0x26
+/* enum: Packets received with length greater than 9216 bytes. */
+#define MC_CMD_STAT_ID_RX_GTJUMBO_PKTS 0x27
+/* enum: Packets received with length less than 64 bytes. */
+#define MC_CMD_STAT_ID_RX_UNDERSIZE_PKTS 0x28
+/* enum: Packets received with bad FCS. */
+#define MC_CMD_STAT_ID_RX_BAD_FCS_PKTS 0x29
+/* enum: Packets received with GOOD FCS. */
+#define MC_CMD_STAT_ID_RX_GOOD_FCS_PKTS 0x2a
+/* enum: Packets received with overflow. */
+#define MC_CMD_STAT_ID_RX_OVERFLOW_PKTS 0x2b
+/* enum: Packets received with symbol error. */
+#define MC_CMD_STAT_ID_RX_SYMBOL_ERROR_PKTS 0x2c
+/* enum: Packets received with alignment error. */
+#define MC_CMD_STAT_ID_RX_ALIGN_ERROR_PKTS 0x2d
+/* enum: Packets received with length error. */
+#define MC_CMD_STAT_ID_RX_LENGTH_ERROR_PKTS 0x2e
+/* enum: Packets received with internal error. */
+#define MC_CMD_STAT_ID_RX_INTERNAL_ERROR_PKTS 0x2f
+/* enum: Packets received with jabber. These packets are larger than the
+ * allowed maximum receive unit (MRU). This indicates that a packet either has
+ * a bad CRC or has an RX error.
+ */
+#define MC_CMD_STAT_ID_RX_JABBER_PKTS 0x30
+/* enum: Packets dropped due to having no descriptor. This is a datapath stat
+ */
+#define MC_CMD_STAT_ID_RX_NODESC_DROPS 0x31
+/* enum: Packets received with lanes 0 and 1 character error. */
+#define MC_CMD_STAT_ID_RX_LANES01_CHAR_ERR 0x32
+/* enum: Packets received with lanes 2 and 3 character error. */
+#define MC_CMD_STAT_ID_RX_LANES23_CHAR_ERR 0x33
+/* enum: Packets received with lanes 0 and 1 disparity error. */
+#define MC_CMD_STAT_ID_RX_LANES01_DISP_ERR 0x34
+/* enum: Packets received with lanes 2 and 3 disparity error. */
+#define MC_CMD_STAT_ID_RX_LANES23_DISP_ERR 0x35
+/* enum: Packets received with match fault. */
+#define MC_CMD_STAT_ID_RX_MATCH_FAULT 0x36
+#define MC_CMD_STAT_ID_MAC_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_MAC_STAT_ID_WIDTH 16
+/* Include FEC stats. */
+#define MC_CMD_STAT_ID_PHY_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_PHY_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only from Medford2
+ * onwards)
+ */
+#define MC_CMD_STAT_ID_FEC_UNCORRECTED_ERRORS 0x1
+/* enum: Number of corrected FEC codewords on link (RS-FEC only from Medford2
+ * onwards)
+ */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_ERRORS 0x2
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE0 0x3
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE1 0x4
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE2 0x5
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE3 0x6
+#define MC_CMD_STAT_ID_PHY_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_PHY_STAT_ID_WIDTH 16
+
+/* MC_CMD_STAT_DESC structuredef: Structure describing the layout and size of
+ * the stats DMA buffer descriptor.
+ */
+#define MC_CMD_STAT_DESC_LEN 8
+/* Unique identifier of the statistic. Formatted as MC_CMD_STAT_ID */
+#define MC_CMD_STAT_DESC_STAT_ID_OFST 0
+#define MC_CMD_STAT_DESC_STAT_ID_LEN 4
+#define MC_CMD_STAT_DESC_STAT_ID_LBN 0
+#define MC_CMD_STAT_DESC_STAT_ID_WIDTH 32
+/* See structuredef: MC_CMD_STAT_ID */
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_OFST 0
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LBN 0
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_WIDTH 16
+/* Index of the statistic in the DMA buffer. */
+#define MC_CMD_STAT_DESC_STAT_INDEX_OFST 4
+#define MC_CMD_STAT_DESC_STAT_INDEX_LEN 2
+#define MC_CMD_STAT_DESC_STAT_INDEX_LBN 32
+#define MC_CMD_STAT_DESC_STAT_INDEX_WIDTH 16
+/* Reserved for future extension (e.g. flags field) - currently always 0. */
+#define MC_CMD_STAT_DESC_RESERVED_OFST 6
+#define MC_CMD_STAT_DESC_RESERVED_LEN 2
+#define MC_CMD_STAT_DESC_RESERVED_LBN 48
+#define MC_CMD_STAT_DESC_RESERVED_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR
+ * Get a list of descriptors that describe the layout and size of the stats
+ * buffer required for retrieving statistics for a given port. Each entry in
+ * the list is formatted as MC_CMD_STAT_DESC and provides the ID of each stat
+ * and its location and size in the buffer. It also gives the overall minimum
+ * size of the DMA buffer required when DMA mode is used. Note that the first
+ * and last entries in the list are reserved for the generation start
+ * (MC_CMD_MARKER_STAT_GENERATION_START) and end
+ * (MC_CMD_MARKER_STAT_GENERATION_END) markers respectively, to be used for DMA
+ * synchronisation as described in the documentation for the relevant enum
+ * entries. The entries are present in the buffer even if DMA mode is not used.
+ * Provisions are made (but currently unused) for extending the size of the
+ * descriptors, extending the size of the list beyond the maximum MCDI response
+ * size, as well as the dynamic runtime updates of the list. Returns: 0 on
+ * success, ENOENT on non-existent port handle
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR 0x1e3
+#undef MC_CMD_0x1e3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN msgrequest */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_LEN 8
+/* Handle of port to get MAC statitstics descriptors for. */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_LEN 4
+/* Offset of the first entry to return, for cases where not all entries fit in
+ * the MCDI response. Should be set to 0 on initial request, and on subsequent
+ * requests updated by the number of entries already returned, as long as the
+ * MORE_ENTRIES flag is set.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_OFST 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_LEN 4
+
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT msgresponse */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMIN 28
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX 252
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LEN(num) (20+8*(num))
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_NUM(len) (((len)-20)/8)
+/* Generation number of the stats buffer. This is incremented each time the
+ * buffer is updated, and is used to verify the consistency of the buffer
+ * contents. Reserved for future extension (dynamic list updates). Currently
+ * always set to 0.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_OFST 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_LEN 4
+/* Minimum size of the DMA buffer required to retrieve all statistics for the
+ * port. This is the sum of the sizes of all the statistics, plus the size of
+ * the generation markers. Minimum buffer size in bytes required to fit all
+ * statistics. Drivers will typically round up this value to the granularity of
+ * the host DMA allocation units.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_OFST 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_OFST 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_OFST 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_LBN 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_WIDTH 1
+/* Size of the individual descriptor entry in the list. Determines the entry
+ * stride in the list. Currently always set to size of MC_CMD_STAT_DESC, larger
+ * values can be used in the future for extending the descriptor, by appending
+ * new data to the end of the existing structure.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_OFST 12
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_LEN 4
+/* Number of entries returned in the descriptor list. */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_OFST 16
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_LEN 4
+/* List of descriptors. Each entry is formatted as MC_CMD_STAT_DESC and
+ * provides the ID of each stat and its location and size in the buffer. The
+ * first and last entries are reserved for the generation start and end markers
+ * respectively.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_OFST 20
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LEN 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_OFST 20
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LBN 160
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_OFST 24
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LBN 192
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MINNUM 1
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM 29
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM_MCDI2 125
+
+
+/***********************************/
+/* MC_CMD_MAC_STATISTICS
+ * Get generic MAC statistics. This call retrieves unified statistics managed
+ * by the MC. The MC will populate and provide all supported statistics in the
+ * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the
+ * aforementioned command for the format and contents of the stats DMA buffer.
+ * To ensure consistent and accurate results, it is essential for the driver to
+ * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on
+ * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port
+ * handle, and EINVAL on invalid parameters (DMA buffer too small)
+ */
+#define MC_CMD_MAC_STATISTICS 0x1e4
+#undef MC_CMD_0x1e4_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATISTICS_IN msgrequest */
+#define MC_CMD_MAC_STATISTICS_IN_LEN 20
+/* Handle of port to get MAC statistics for. */
+#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_LEN 4
+/* Contains options for querying the MAC statistics. */
+#define MC_CMD_MAC_STATISTICS_IN_CMD_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATISTICS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_WIDTH 16
+/* This is the address of the DMA buffer to use for transfer of the statistics.
+ * Only valid if the DMA flag is set to 1.
+ */
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_OFST 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_OFST 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LBN 64
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_OFST 12
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LBN 96
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32
+/* This is the length of the DMA buffer to use for the transfer of the
+ * statistics. The buffer should be at least DMA_BUFFER_SIZE long, as returned
+ * by MC_CMD_MAC_STATISTICS_DESCRIPTOR. If the supplied buffer is too small,
+ * the command will fail with EINVAL. Only valid if the DMA flag is set to 1.
+ */
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_OFST 16
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_LEN 4
+
+/* MC_CMD_MAC_STATISTICS_OUT msgresponse */
+#define MC_CMD_MAC_STATISTICS_OUT_LENMIN 5
+#define MC_CMD_MAC_STATISTICS_OUT_LENMAX 252
+#define MC_CMD_MAC_STATISTICS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAC_STATISTICS_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_NUM(len) (((len)-4)/1)
+/* length of the data in bytes */
+#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_OFST 0
+#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_LEN 4
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_OFST 4
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_LEN 1
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MINNUM 1
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM 248
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM_MCDI2 1016
+
+/* NET_PORT_HANDLE_DESC structuredef: Network port descriptor containing a port
+ * handle and attributes used, for example, in MC_CMD_ENUM_PORTS.
+ */
+#define NET_PORT_HANDLE_DESC_LEN 53
+/* The handle to identify the port */
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_OFST 0
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LBN 0
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_WIDTH 32
+/* Includes the type of port e.g. physical, virtual or MAE MPORT and other
+ * properties relevant to the port.
+ */
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LEN 8
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LBN 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_WIDTH 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_OFST 8
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LBN 64
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_WIDTH 32
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_LBN 0
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_WIDTH 3
+#define NET_PORT_HANDLE_DESC_PHYSICAL 0x0 /* enum */
+#define NET_PORT_HANDLE_DESC_VIRTUAL 0x1 /* enum */
+#define NET_PORT_HANDLE_DESC_MPORT 0x2 /* enum */
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_OFST 4
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_LBN 8
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_WIDTH 1
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LBN 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_WIDTH 64
+/* The dynamic change that led to the port enumeration */
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_OFST 12
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LEN 1
+/* enum: Indicates that the ENTRY_SRC field has not been initialized. */
+#define NET_PORT_HANDLE_DESC_UNKNOWN 0x0
+/* enum: The port was enumerated at start of day. */
+#define NET_PORT_HANDLE_DESC_PRESENT 0x1
+/* enum: The port was dynamically added. */
+#define NET_PORT_HANDLE_DESC_ADDED 0x2
+/* enum: The port was dynamically deleted. */
+#define NET_PORT_HANDLE_DESC_DELETED 0x3
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LBN 96
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_WIDTH 8
+/* This is an opaque 40 byte label exposed to users as a unique identifier of
+ * the port. It is represented as a zero-terminated ASCII string and assigned
+ * by the port administrator which is typically either the firmware for a
+ * physical port or the host software responsible for creating the virtual
+ * port. The label is conveyed to the driver after assignment, which, unlike
+ * the port administrator, does not need to know how to interpret the label.
+ */
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_OFST 13
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_LEN 40
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_LBN 104
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_WIDTH 320
+
+
+/***********************************/
+/* MC_CMD_ENUM_PORTS
+ * This command returns handles for all ports present in the system. The PCIe
+ * function type of each port (either physical or virtual) is also reported.
+ * After a start-of-day port enumeration, firmware keeps track of all available
+ * ports upon creation or deletion and updates the ports if there is a change.
+ * This command is cleared after a control interface reset (e.g. FLR,
+ * ENTITY_RESET), in which case it must be called again to reenumerate the
+ * ports. The command is also clear-on-read and repeated calls will drain the
+ * buffer.
+ */
+#define MC_CMD_ENUM_PORTS 0x1e5
+#undef MC_CMD_0x1e5_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e5_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ENUM_PORTS_IN msgrequest */
+#define MC_CMD_ENUM_PORTS_IN_LEN 0
+
+/* MC_CMD_ENUM_PORTS_OUT msgresponse */
+#define MC_CMD_ENUM_PORTS_OUT_LENMIN 12
+#define MC_CMD_ENUM_PORTS_OUT_LENMAX 252
+#define MC_CMD_ENUM_PORTS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_ENUM_PORTS_OUT_LEN(num) (12+1*(num))
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_NUM(len) (((len)-12)/1)
+/* Any unused flags are reserved and must be ignored. */
+#define MC_CMD_ENUM_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_ENUM_PORTS_OUT_FLAGS_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_MORE_OFST 0
+#define MC_CMD_ENUM_PORTS_OUT_MORE_LBN 0
+#define MC_CMD_ENUM_PORTS_OUT_MORE_WIDTH 1
+/* The number of NET_PORT_HANDLE_DESC structures in PORT_HANDLES. */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_OFST 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_OFST 8
+#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_LEN 4
+/* Array of NET_PORT_HANDLE_DESC structures. Callers must use must use the
+ * SIZEOF_NET_PORT_HANDLE_DESC field field as the array stride between entries.
+ * This may also allow for tail padding for alignment. Fields beyond
+ * SIZEOF_NET_PORT_HANDLE_DESC are not present.
+ */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_OFST 12
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_LEN 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MINNUM 0
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM 240
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM_MCDI2 1008
+/* See structuredef: NET_PORT_HANDLE_DESC */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_OFST 12
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_OFST 16
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LEN 8
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_OFST 16
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LBN 128
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_WIDTH 32
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_OFST 20
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LBN 160
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_WIDTH 32
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_LBN 128
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_WIDTH 3
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_LBN 136
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_WIDTH 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_OFST 24
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_LEN 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_OFST 25
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_LEN 40
+
+
+/***********************************/
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES
+ * Read properties of the transceiver associated with the port. Can be either
+ * for a fixed onboard transceiver or an inserted module. The returned data is
+ * interpreted from the transceiver hardware and may be fixed up by the
+ * firmware. Use MC_CMD_GET_MODULE_DATA to get raw undecoded data.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES 0x1e6
+#undef MC_CMD_0x1e6_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e6_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN msgrequest */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_LEN 4
+/* Handle to port to get transceiver properties from. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LEN 89
+/* Supported technology abilities. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_OFST 0
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Reserved for future expansion to accommodate future Ethernet technology
+ * expansion.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_OFST 16
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_LEN 16
+/* Preferred FEC modes. This is a function of the cable type and length. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_OFST 32
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* SFF-8042 code reported by the module. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_OFST 36
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_LEN 2
+/* Medium. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_OFST 38
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_LEN 1
+/* enum property: value */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_COPPER 0x1 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_OPTICAL 0x2 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BACKPLANE 0x3 /* enum */
+/* Identifies the tech */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_OFST 39
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_LEN 1
+/* enum property: value */
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 */
+/* enum: Ethernet over twisted-pair copper cables for distances up to 100
+ * meters.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASET 0x1
+/* enum: Ethernet over twin-axial, balanced copper cable. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CR 0x2
+/* enum: Ethernet over backplane for connections on the same board. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KX 0x3
+/* enum: Ethernet over a single backplane lane for connections between
+ * different boards.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KR 0x4
+/* enum: Ethernet over copper backplane. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KP 0x5
+/* enum: Ethernet over fiber optic. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASEX 0x6
+/* enum: Short range ethernet over multimode fiber optic (See IEEE 802.3 Clause
+ * 49 and 52).
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SR 0x7
+/* enum: Long range, extended range or far reach ethernet used with single mode
+ * fiber optics.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LR_ER_FR 0x8
+/* enum: Long reach multimode ethernet over multimode optical fiber. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LRM 0x9
+/* enum: Very short reach PAM4 ethernet over multimode optical fiber (see IEEE
+ * 802.3db).
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VR 0xa
+/* enum: BASE-R encoding and PAM4 over single-mode fiber with reach up to at
+ * least 500 meters (803.2 Clause 121 and 124)
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_DR 0xb
+/* String of the vendor name as intepreted by NMC firmware. NMC firmware
+ * applies workarounds for known buggy transceivers. The vendor name is
+ * presented as 16 bytes of ASCII characters padded with spaces. It can also be
+ * represented as 16 bytes of zeros if the field is unspecified for the
+ * connected module. See SFF-8472/CMIS specifications for details.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_OFST 40
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_NUM 16
+/* The vendor part number as intepreted by NMC firmware. The field is presented
+ * as 16 bytes of ASCII chars padded with spaces. It can also be 16 bytes of
+ * zeros if the field is unspecified for the connected module.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_OFST 56
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_NUM 16
+/* Serial number of the module presented as 16 bytes of ASCII characters padded
+ * with spaces. It can also be 16 bytes of zeros if the field is unspecified
+ * for the connected module. See SFF-8472/CMIS specifications for details.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_OFST 72
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_NUM 16
+/* This reports the number of module changes detected by the NMC firmware. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 88
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+
+
+/***********************************/
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES 0x1e7
+#undef MC_CMD_0x1e7_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e7_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_IN msgrequest: In this context, the port
+ * consists of the MAC and the PHY, and excludes any modules inserted into the
+ * cage. This information is fixed for a given board but not for a given ASIC.
+ * This command reports properties for the port as it is currently configured,
+ * and not its hardware capabilities, which can be better than the current
+ * configuration.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_LEN 4
+/* Handle to the port to from which to retreive properties */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LEN 36
+/* Supported capabilities of the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_LEN 25
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_OFST 16
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_OFST 20
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_OFST 24
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_LEN 1
+/* Number of lanes supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_OFST 25
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_LEN 1
+/* Bitmask of supported loopback modes. Where the response to this command
+ * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in
+ * preference to ensure that all available loopback modes are seen.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_OFST 26
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* This field serves as a cage index that uniquely identifies the cage to which
+ * the module is connected. This is useful when splitter cables that have
+ * multiple ports on a single cage are used.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_OFST 27
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_LEN 1
+/* This bitmask is used to specify the lanes within the cage identified by
+ * MDI_INDEX that are allocated to the port.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_OFST 28
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_LEN 1
+/* Maximum frame length supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_OFST 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_LEN 4
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2 msgresponse */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LEN 48
+/* Supported capabilities of the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_LEN 25
+/* Number of lanes supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_OFST 25
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_LEN 1
+/* Bitmask of supported loopback modes. Where the response to this command
+ * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in
+ * preference to ensure that all available loopback modes are seen.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_OFST 26
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* This field serves as a cage index that uniquely identifies the cage to which
+ * the module is connected. This is useful when splitter cables that have
+ * multiple ports on a single cage are used.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_OFST 27
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_LEN 1
+/* This bitmask is used to specify the lanes within the cage identified by
+ * MDI_INDEX that are allocated to the port.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_OFST 28
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_LEN 1
+/* Maximum frame length supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_OFST 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_LEN 4
+/* Bitmask of supported loopback modes. This field replaces the
+ * LOOPBACK_MODES_MASK field which is defined under version 1 of this command.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_OFST 40
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LEN 8
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_OFST 40
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LBN 320
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_WIDTH 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_OFST 44
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LBN 352
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+
+
+/***********************************/
+/* MC_CMD_GET_MODULE_DATA
+ * Read media-specific data from the PHY (e.g. SFP/SFP+ module ID information
+ * for SFP+ PHYs). This command returns raw data from the module's EEPROM and
+ * it is not interpreted by the MC. Use MC_CMD_GET_TRANSCEIVER_PROPERTIES to
+ * get interpreted data. Return code: 0, ENOENT
+ */
+#define MC_CMD_GET_MODULE_DATA 0x1e8
+#undef MC_CMD_0x1e8_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e8_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_MODULE_DATA_IN msgrequest */
+#define MC_CMD_GET_MODULE_DATA_IN_LEN 16
+/* Handle to identify the port from which to request module properties. */
+#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_LEN 4
+/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by
+ * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier
+ * access.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_LBN 32
+#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_WIDTH 7
+/* 0 if the page does not support banked access, non-zero otherwise. Non-zero
+ * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory
+ * region.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_BANK_OFST 6
+#define MC_CMD_GET_MODULE_DATA_IN_BANK_LEN 2
+/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is
+ * valid if OFFSET is in the range 80h - ffh.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_PAGE_OFST 8
+#define MC_CMD_GET_MODULE_DATA_IN_PAGE_LEN 2
+/* Offset in the range 00h - 7fh to access lower memory. Offset in the range
+ * 80h - ffh to access upper memory
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_OFST 10
+#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_OFST 12
+#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_LEN 4
+
+/* MC_CMD_GET_MODULE_DATA_IN_V2 msgrequest: Updated MC_CMD_GET_MODULE_DATA with
+ * 8-bit wide ADDRESSING field. This new field provides a correctly aligned
+ * container for the 7-bit DEVADDR field from V1, now renamed MODULE_ADDR, to
+ * ensure proper alignment.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LEN 16
+/* Handle to identify the port from which to request module properties. */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_LEN 4
+/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by
+ * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier
+ * access.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_LBN 32
+#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_WIDTH 7
+/* 0 if the page does not support banked access, non-zero otherwise. Non-zero
+ * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory
+ * region.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_OFST 6
+#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_LEN 2
+/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is
+ * valid if OFFSET is in the range 80h - ffh.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_OFST 8
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_LEN 2
+/* Offset in the range 00h - 7fh to access lower memory. Offset in the range
+ * 80h - ffh to access upper memory
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_OFST 10
+#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_OFST 12
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_LEN 4
+/* Container for 7 bit I2C addresses. */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_OFST 4
+#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_OFST 4
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_LBN 0
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_WIDTH 7
+
+/* MC_CMD_GET_MODULE_DATA_OUT msgresponse */
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMIN 5
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX 252
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_MODULE_DATA_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_NUM(len) (((len)-4)/1)
+/* length of the data in bytes */
+#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_LEN 4
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_OFST 4
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_LEN 1
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM 248
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM_MCDI2 1016
+
+/* EVENT_MASK structuredef */
+#define EVENT_MASK_LEN 4
+#define EVENT_MASK_TYPE_OFST 0
+#define EVENT_MASK_TYPE_LEN 4
+/* enum: PORT_LINKCHANGE event is enabled */
+#define EVENT_MASK_PORT_LINKCHANGE 0x0
+/* enum: PORT_MODULECHANGE event is enabled */
+#define EVENT_MASK_PORT_MODULECHANGE 0x1
+#define EVENT_MASK_TYPE_LBN 0
+#define EVENT_MASK_TYPE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_NETPORT_EVENTS_MASK
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK 0x1e9
+#undef MC_CMD_0x1e9_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e9_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_NETPORT_EVENTS_MASK_IN msgrequest: Enable or disable delivery of
+ * specified network port events for a given port identified by PORT_HANDLE. At
+ * start of day, or after any control interface reset (FLR, ENTITY_RESET,
+ * etc.), all event delivery is disabled for all ports associated with the
+ * control interface.
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_LEN 8
+/* Handle to port to set event delivery mask. */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4
+/* Bitmask of events to enable. Event delivery is enabled when corresponding
+ * bit is 1, disabled when 0.
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_OFST 4
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+/* MC_CMD_SET_NETPORT_EVENTS_MASK_OUT msgresponse */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_OUT_LEN 0
+
/***********************************/
-/* MC_CMD_EVENT_CTRL
- * Configure which categories of unsolicited events the driver expects to
- * receive (Riverhead).
- */
-#define MC_CMD_EVENT_CTRL 0x69
-#undef MC_CMD_0x69_PRIVILEGE_CTG
-
-#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_EVENT_CTRL_IN msgrequest */
-#define MC_CMD_EVENT_CTRL_IN_LENMIN 0
-#define MC_CMD_EVENT_CTRL_IN_LENMAX 252
-#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020
-#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num))
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4)
-/* Array of event categories for which the driver wishes to receive events. */
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255
-/* enum: Driver wishes to receive LINKCHANGE events. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0
-/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events.
- */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1
-/* enum: Driver wishes to receive receive errors. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2
-/* enum: Driver wishes to receive transmit errors. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3
-/* enum: Driver wishes to receive firmware alerts. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4
-/* enum: Driver wishes to receive reboot events. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5
-
-/* MC_CMD_EVENT_CTRL_OUT msgrequest */
-#define MC_CMD_EVENT_CTRL_OUT_LEN 0
+/* MC_CMD_GET_NETPORT_EVENTS_MASK
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK 0x1ea
+#undef MC_CMD_0x1ea_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ea_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_NETPORT_EVENTS_MASK_IN msgrequest: Get event delivery mask a
+ * given port identified by PORT_HANDLE.
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_LEN 4
+/* Handle to port to get event deliver mask for. */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_NETPORT_EVENTS_MASK_OUT msgresponse */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_LEN 4
+/* Bitmask of events enabled. Event delivery is enabled when corresponding bit
+ * is 1, disabled when 0.
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_OFST 0
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+
+/***********************************/
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS 0x1eb
+#undef MC_CMD_0x1eb_PRIVILEGE_CTG
+
+#define MC_CMD_0x1eb_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN msgrequest: Get network port events
+ * supported by the platform. Information returned is fixed for a given NIC
+ * platform.
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN_LEN 0
+
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT msgresponse */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_LEN 4
+/* Bitmask of events enabled. Event delivery is enabled when corresponding bit
+ * is 1, disabled when 0.
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_OFST 0
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+
+/***********************************/
+/* MC_CMD_GET_NETPORT_STATISTICS
+ * Get generic MAC statistics. This call retrieves unified statistics managed
+ * by the MC. The MC will populate and provide all supported statistics in the
+ * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the
+ * aforementioned command for the format and contents of the stats DMA buffer.
+ * To ensure consistent and accurate results, it is essential for the driver to
+ * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on
+ * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port
+ * handle, and EINVAL on invalid parameters (DMA buffer too small)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS 0x1fa
+#undef MC_CMD_0x1fa_PRIVILEGE_CTG
+
+#define MC_CMD_0x1fa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_NETPORT_STATISTICS_IN msgrequest */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_LEN 20
+/* Handle of port to get MAC statistics for. */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_LEN 4
+/* Contains options for querying the MAC statistics. */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LBN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_LBN 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_LBN 15
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_WIDTH 17
+/* Specifies the physical address of the DMA buffer to use for statistics
+ * transfer. This field must contain a valid address under either of these
+ * conditions: 1. DMA flag is set (immediate DMA requested) 2. Both
+ * PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_OFST 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_OFST 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LBN 64
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_OFST 12
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LBN 96
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32
+/* Specifies the length of the DMA buffer in bytes for statistics transfer. The
+ * buffer size must be at least DMA_BUFFER_SIZE bytes (as returned by
+ * MC_CMD_MAC_STATISTICS_DESCRIPTOR). Providing an insufficient buffer size
+ * will result in an EINVAL error. This field must contain a valid length under
+ * either of these conditions: 1. DMA flag is set (immediate DMA requested) 2.
+ * Both PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_OFST 16
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_LEN 4
+
+/* MC_CMD_GET_NETPORT_STATISTICS_OUT msgresponse */
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMIN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX 248
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX_MCDI2 1016
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_NUM(len) (((len)-0)/8)
+/* Statistics buffer. Zero-length if DMA mode is used. The statistics buffer is
+ * an array of 8-byte counter values, containing the generation start marker,
+ * stats counters, and generation end marker. The index of each counter in the
+ * array is reported by the MAC_STATISTICS_DESCRIPTOR command. The same layout
+ * is used for the DMA buffer for DMA mode stats.
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LEN 8
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LBN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LBN 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MINNUM 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM 31
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM_MCDI2 127
/* EVB_PORT_ID structuredef */
#define EVB_PORT_ID_LEN 4
@@ -9706,44 +10573,6 @@
#define EVB_PORT_ID_PORT_ID_LBN 0
#define EVB_PORT_ID_PORT_ID_WIDTH 32
-/* EVB_VLAN_TAG structuredef */
-#define EVB_VLAN_TAG_LEN 2
-/* The VLAN tag value */
-#define EVB_VLAN_TAG_VLAN_ID_LBN 0
-#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
-#define EVB_VLAN_TAG_MODE_LBN 12
-#define EVB_VLAN_TAG_MODE_WIDTH 4
-/* enum: Insert the VLAN. */
-#define EVB_VLAN_TAG_INSERT 0x0
-/* enum: Replace the VLAN if already present. */
-#define EVB_VLAN_TAG_REPLACE 0x1
-
-/* BUFTBL_ENTRY structuredef */
-#define BUFTBL_ENTRY_LEN 12
-/* the owner ID */
-#define BUFTBL_ENTRY_OID_OFST 0
-#define BUFTBL_ENTRY_OID_LEN 2
-#define BUFTBL_ENTRY_OID_LBN 0
-#define BUFTBL_ENTRY_OID_WIDTH 16
-/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
-#define BUFTBL_ENTRY_PGSZ_OFST 2
-#define BUFTBL_ENTRY_PGSZ_LEN 2
-#define BUFTBL_ENTRY_PGSZ_LBN 16
-#define BUFTBL_ENTRY_PGSZ_WIDTH 16
-/* the raw 64-bit address field from the SMC, not adjusted for page size */
-#define BUFTBL_ENTRY_RAWADDR_OFST 4
-#define BUFTBL_ENTRY_RAWADDR_LEN 8
-#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
-#define BUFTBL_ENTRY_RAWADDR_LO_LEN 4
-#define BUFTBL_ENTRY_RAWADDR_LO_LBN 32
-#define BUFTBL_ENTRY_RAWADDR_LO_WIDTH 32
-#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
-#define BUFTBL_ENTRY_RAWADDR_HI_LEN 4
-#define BUFTBL_ENTRY_RAWADDR_HI_LBN 64
-#define BUFTBL_ENTRY_RAWADDR_HI_WIDTH 32
-#define BUFTBL_ENTRY_RAWADDR_LBN 32
-#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
-
/* NVRAM_PARTITION_TYPE structuredef */
#define NVRAM_PARTITION_TYPE_LEN 2
#define NVRAM_PARTITION_TYPE_ID_OFST 0
@@ -9787,6 +10616,8 @@
#define NVRAM_PARTITION_TYPE_NMC_LOG 0x700
/* enum: Non-volatile log output of second core on dual-core device */
#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: RAM (volatile) log output partition */
+#define NVRAM_PARTITION_TYPE_RAM_LOG 0x702
/* enum: Device state dump output partition */
#define NVRAM_PARTITION_TYPE_DUMP 0x800
/* enum: Crash log partition for NMC firmware */
@@ -9923,6 +10754,16 @@
#define NVRAM_PARTITION_TYPE_SUC_SOC_CONFIG 0x1f07
/* enum: System-on-Chip update information. */
#define NVRAM_PARTITION_TYPE_SOC_UPDATE 0x2003
+/* enum: Virtual partition. Write-only. Writes will actually be sent to an
+ * appropriate partition (for instance BUNDLE if the data starts with the magic
+ * number for a bundle update), or discarded with an error if not recognised as
+ * a supported type.
+ */
+#define NVRAM_PARTITION_TYPE_AUTO 0x2100
+/* enum: MC/NMC (first stage) bootloader firmware. (For X4, see XN-202072-PS
+ * and XN-202084-SW section 3.1).
+ */
+#define NVRAM_PARTITION_TYPE_BOOTLOADER 0x2200
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -9981,116 +10822,6 @@
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
-/* LICENSED_FEATURES structuredef */
-#define LICENSED_FEATURES_LEN 8
-/* Bitmask of licensed firmware features */
-#define LICENSED_FEATURES_MASK_OFST 0
-#define LICENSED_FEATURES_MASK_LEN 8
-#define LICENSED_FEATURES_MASK_LO_OFST 0
-#define LICENSED_FEATURES_MASK_LO_LEN 4
-#define LICENSED_FEATURES_MASK_LO_LBN 0
-#define LICENSED_FEATURES_MASK_LO_WIDTH 32
-#define LICENSED_FEATURES_MASK_HI_OFST 4
-#define LICENSED_FEATURES_MASK_HI_LEN 4
-#define LICENSED_FEATURES_MASK_HI_LBN 32
-#define LICENSED_FEATURES_MASK_HI_WIDTH 32
-#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0
-#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
-#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
-#define LICENSED_FEATURES_PIO_OFST 0
-#define LICENSED_FEATURES_PIO_LBN 1
-#define LICENSED_FEATURES_PIO_WIDTH 1
-#define LICENSED_FEATURES_EVQ_TIMER_OFST 0
-#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
-#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
-#define LICENSED_FEATURES_CLOCK_OFST 0
-#define LICENSED_FEATURES_CLOCK_LBN 3
-#define LICENSED_FEATURES_CLOCK_WIDTH 1
-#define LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0
-#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
-#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
-#define LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0
-#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
-#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
-#define LICENSED_FEATURES_RX_SNIFF_OFST 0
-#define LICENSED_FEATURES_RX_SNIFF_LBN 6
-#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
-#define LICENSED_FEATURES_TX_SNIFF_OFST 0
-#define LICENSED_FEATURES_TX_SNIFF_LBN 7
-#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
-#define LICENSED_FEATURES_MASK_LBN 0
-#define LICENSED_FEATURES_MASK_WIDTH 64
-
-/* LICENSED_V3_APPS structuredef */
-#define LICENSED_V3_APPS_LEN 8
-/* Bitmask of licensed applications */
-#define LICENSED_V3_APPS_MASK_OFST 0
-#define LICENSED_V3_APPS_MASK_LEN 8
-#define LICENSED_V3_APPS_MASK_LO_OFST 0
-#define LICENSED_V3_APPS_MASK_LO_LEN 4
-#define LICENSED_V3_APPS_MASK_LO_LBN 0
-#define LICENSED_V3_APPS_MASK_LO_WIDTH 32
-#define LICENSED_V3_APPS_MASK_HI_OFST 4
-#define LICENSED_V3_APPS_MASK_HI_LEN 4
-#define LICENSED_V3_APPS_MASK_HI_LBN 32
-#define LICENSED_V3_APPS_MASK_HI_WIDTH 32
-#define LICENSED_V3_APPS_ONLOAD_OFST 0
-#define LICENSED_V3_APPS_ONLOAD_LBN 0
-#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
-#define LICENSED_V3_APPS_PTP_OFST 0
-#define LICENSED_V3_APPS_PTP_LBN 1
-#define LICENSED_V3_APPS_PTP_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
-#define LICENSED_V3_APPS_SOLARSECURE_OFST 0
-#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
-#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
-#define LICENSED_V3_APPS_PERF_MONITOR_OFST 0
-#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
-#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
-#define LICENSED_V3_APPS_TCP_DIRECT_OFST 0
-#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8
-#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
-#define LICENSED_V3_APPS_LOW_LATENCY_OFST 0
-#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9
-#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
-#define LICENSED_V3_APPS_DSHBRD_OFST 0
-#define LICENSED_V3_APPS_DSHBRD_LBN 14
-#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
-#define LICENSED_V3_APPS_SCATRD_OFST 0
-#define LICENSED_V3_APPS_SCATRD_LBN 15
-#define LICENSED_V3_APPS_SCATRD_WIDTH 1
-#define LICENSED_V3_APPS_MASK_LBN 0
-#define LICENSED_V3_APPS_MASK_WIDTH 64
-
/* LICENSED_V3_FEATURES structuredef */
#define LICENSED_V3_FEATURES_LEN 8
/* Bitmask of licensed firmware features */
@@ -10199,44 +10930,6 @@
#define RSS_MODE_HASH_SELECTOR_LBN 0
#define RSS_MODE_HASH_SELECTOR_WIDTH 8
-/* CTPIO_STATS_MAP structuredef */
-#define CTPIO_STATS_MAP_LEN 4
-/* The (function relative) VI number */
-#define CTPIO_STATS_MAP_VI_OFST 0
-#define CTPIO_STATS_MAP_VI_LEN 2
-#define CTPIO_STATS_MAP_VI_LBN 0
-#define CTPIO_STATS_MAP_VI_WIDTH 16
-/* The target bucket for the VI */
-#define CTPIO_STATS_MAP_BUCKET_OFST 2
-#define CTPIO_STATS_MAP_BUCKET_LEN 2
-#define CTPIO_STATS_MAP_BUCKET_LBN 16
-#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
-
-
-/***********************************/
-/* MC_CMD_READ_REGS
- * Get a dump of the MCPU registers
- */
-#define MC_CMD_READ_REGS 0x50
-#undef MC_CMD_0x50_PRIVILEGE_CTG
-
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_REGS_IN msgrequest */
-#define MC_CMD_READ_REGS_IN_LEN 0
-
-/* MC_CMD_READ_REGS_OUT msgresponse */
-#define MC_CMD_READ_REGS_OUT_LEN 308
-/* Whether the corresponding register entry contains a valid value */
-#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
-#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
-/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
- * fir, fp)
- */
-#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
-#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
-#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
-
/***********************************/
/* MC_CMD_INIT_EVQ
@@ -10640,25 +11333,6 @@
#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
-/* QUEUE_CRC_MODE structuredef */
-#define QUEUE_CRC_MODE_LEN 1
-#define QUEUE_CRC_MODE_MODE_LBN 0
-#define QUEUE_CRC_MODE_MODE_WIDTH 4
-/* enum: No CRC. */
-#define QUEUE_CRC_MODE_NONE 0x0
-/* enum: CRC Fiber channel over ethernet. */
-#define QUEUE_CRC_MODE_FCOE 0x1
-/* enum: CRC (digest) iSCSI header only. */
-#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
-/* enum: CRC (digest) iSCSI header and payload. */
-#define QUEUE_CRC_MODE_ISCSI 0x3
-/* enum: CRC Fiber channel over IP over ethernet. */
-#define QUEUE_CRC_MODE_FCOIPOE 0x4
-/* enum: CRC MPA. */
-#define QUEUE_CRC_MODE_MPA 0x5
-#define QUEUE_CRC_MODE_SPARE_LBN 4
-#define QUEUE_CRC_MODE_SPARE_WIDTH 4
-
/***********************************/
/* MC_CMD_INIT_RXQ
@@ -10827,6 +11501,9 @@
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
@@ -10933,6 +11610,9 @@
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4
@@ -11068,6 +11748,9 @@
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4
@@ -11216,6 +11899,9 @@
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4
@@ -11610,320 +12296,6 @@
/* MC_CMD_PROXY_CMD_OUT msgresponse */
#define MC_CMD_PROXY_CMD_OUT_LEN 0
-/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
- * manage proxied requests
- */
-#define MC_PROXY_STATUS_BUFFER_LEN 16
-/* Handle allocated by the firmware for this proxy transaction */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
-/* enum: An invalid handle. */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
-/* The requesting physical function number */
-#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
-#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
-#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
-#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
-/* The requesting virtual function number. Set to VF_NULL if the target is a
- * PF.
- */
-#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
-#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
-#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
-#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
-/* The target function RID. */
-#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
-#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
-#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
-#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
-/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
-#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
-#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
-#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
-#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
-/* If a request is authorized rather than carried out by the host, this is the
- * elevated privilege mask granted to the requesting function.
- */
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_PROXY_CONFIGURE
- * Enable/disable authorization of MCDI requests from unprivileged functions by
- * a designated admin function
- */
-#define MC_CMD_PROXY_CONFIGURE 0x58
-#undef MC_CMD_0x58_PRIVILEGE_CTG
-
-#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
-#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
-#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REQUEST_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LBN 32
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LBN 64
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REPLY_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LBN 128
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LBN 160
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
- * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LBN 224
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LBN 256
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2, or zero if this buffer is not provided */
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
-/* Applies to all three buffers */
-#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
-#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
-/* A bit mask defining which MCDI operations may be proxied */
-#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
-#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
-
-/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REQUEST_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LBN 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LBN 64
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REPLY_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LBN 128
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LBN 160
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
- * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LBN 224
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LBN 256
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2, or zero if this buffer is not provided */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
-/* Applies to all three buffers */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
-/* A bit mask defining which MCDI operations may be proxied */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
-
-/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
-#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PROXY_COMPLETE
- * Tells FW that a requested proxy operation has either been completed (by
- * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
- * function that enabled proxying/authorization (by using
- * MC_CMD_PROXY_CONFIGURE).
- */
-#define MC_CMD_PROXY_COMPLETE 0x5f
-#undef MC_CMD_0x5f_PRIVILEGE_CTG
-
-#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
-#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
-#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
-#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
-#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
-#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
-/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
- * is stored in the REPLY_BUFF.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
-/* enum: The operation has been authorized. The originating function may now
- * try again.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
-/* enum: The operation has been declined. */
-#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
-/* enum: The authorization failed because the relevant application did not
- * respond in time.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
-#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
-#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
-
-/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
-#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_ALLOC_BUFTBL_CHUNK
- * Allocate a set of buffer table entries using the specified owner ID. This
- * operation allocates the required buffer table entries (and fails if it
- * cannot do so). The buffer table entries will initially be zeroed.
- */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
-#undef MC_CMD_0x87_PRIVILEGE_CTG
-
-#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
-/* Owner ID to use */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
-/* Size of buffer table pages to use, in bytes (note that only a few values are
- * legal on any specific hardware).
- */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
-
-/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
-/* Buffer table IDs for use in DMA descriptors. */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
- * Reprogram a set of buffer table entries in the specified chunk.
- */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
-#undef MC_CMD_0x88_PRIVILEGE_CTG
-
-#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8)
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
-/* ID */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
-/* Num entries */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
-/* Buffer table entry address */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LEN 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LBN 96
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_WIDTH 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LEN 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LBN 128
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_WIDTH 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32
-
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_FREE_BUFTBL_CHUNK
- */
-#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
-#undef MC_CMD_0x89_PRIVILEGE_CTG
-
-#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
-
-/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
-#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
-
/***********************************/
/* MC_CMD_FILTER_OP
@@ -12822,6 +13194,10 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5
/* enum: read the supported encapsulation types for the VNIC */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_TYPES 0x6
+/* enum: read the supported RX filter matches for low-latency queues (as
+ * allocated by MC_CMD_ALLOC_LL_QUEUES)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_LL_RX_MATCHES 0x7
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -12860,6 +13236,48 @@
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse:
+ * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* a version number representing the set of rule lookups that are implemented
+ * by the currently running firmware
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4
+/* enum: implements lookup sequences described in SF-114946-SW draft C */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+/* the number of nodes in the subnet map */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4
+/* the number of entries in one subnet map node */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4
+/* minimum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4
+/* maximum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4
+/* the number of entries in the local and remote port range maps */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4
+/* minimum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4
+/* maximum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4
+
/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is
* returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value
* OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the
@@ -12914,136 +13332,6 @@
/***********************************/
-/* MC_CMD_PARSER_DISP_RW
- * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
- * Please note that this interface is only of use to debug tools which have
- * knowledge of firmware and hardware data structures; nothing here is intended
- * for use by normal driver code. Note that although this command is in the
- * Admin privilege group, in tamperproof adapters, only read operations are
- * permitted.
- */
-#define MC_CMD_PARSER_DISP_RW 0xe5
-#undef MC_CMD_0xe5_PRIVILEGE_CTG
-
-#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
-#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
-/* identifies the target of the operation */
-#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
-#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
-/* enum: RX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
-/* enum: TX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine (with original metadata format). Deprecated; used only
- * by cmdclient as a fallback for very old Huntington firmware, and not
- * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
- * instead.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
-/* enum: Lookup engine (with requested metadata format) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
-/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
-/* enum: RX1 dispatcher CPU (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
-/* enum: Miscellaneous other state (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
-/* identifies the type of operation requested */
-#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
-/* enum: Read a word of DICPU DMEM or a LUE entry */
-#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
-/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on
- * tamperproof adapters.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
-/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not
- * permitted on tamperproof adapters.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
-/* data memory address (DICPU targets) or LUE index (LUE targets) */
-#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
-#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
-/* selector (for MISC_STATE target) */
-#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
-#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
-/* enum: Port to datapath mapping */
-#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
-/* value to write (for DMEM writes) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
-/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
-/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
-/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
-/* value to write (for LUE writes) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
-
-/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
-#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
-/* value read (for DMEM reads) */
-#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
-/* value read (for LUE reads) */
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
-/* up to 8 32-bit words of additional soft state from the LUE manager (the
- * exact content is firmware-dependent and intended only for debug use)
- */
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
-/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
-#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
-#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
-
-
-/***********************************/
-/* MC_CMD_GET_PF_COUNT
- * Get number of PFs on the device.
- */
-#define MC_CMD_GET_PF_COUNT 0xb6
-#undef MC_CMD_0xb6_PRIVILEGE_CTG
-
-#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PF_COUNT_IN msgrequest */
-#define MC_CMD_GET_PF_COUNT_IN_LEN 0
-
-/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
-#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
-/* Identifies the number of PFs on the device. */
-#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
-#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
-
-
-/***********************************/
-/* MC_CMD_SET_PF_COUNT
- * Set number of PFs on the device.
- */
-#define MC_CMD_SET_PF_COUNT 0xb7
-
-/* MC_CMD_SET_PF_COUNT_IN msgrequest */
-#define MC_CMD_SET_PF_COUNT_IN_LEN 4
-/* New number of PFs on the device. */
-#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
-#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
-
-/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
-#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_PORT_ASSIGNMENT
* Get port assignment for current PCI function.
*/
@@ -13069,25 +13357,6 @@
/***********************************/
-/* MC_CMD_SET_PORT_ASSIGNMENT
- * Set port assignment for current PCI function.
- */
-#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
-#undef MC_CMD_0xb9_PRIVILEGE_CTG
-
-#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
-/* Identifies the port assignment for this function. */
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
-
-/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
-#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_ALLOC_VIS
* Allocate VIs for current PCI function.
*/
@@ -13184,263 +13453,6 @@
/***********************************/
-/* MC_CMD_SET_SRIOV_CFG
- * Set SRIOV config for this PF.
- */
-#define MC_CMD_SET_SRIOV_CFG 0xbb
-#undef MC_CMD_0xbb_PRIVILEGE_CTG
-
-#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
-#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
-/* Number of VFs currently enabled. */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
-/* Max number of VFs before sriov stride and offset may need to be changed. */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
-#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
-#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
-/* RID offset of first VF from PF, or 0 for no change, or
- * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
- */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
-/* RID offset of each subsequent VF from the previous, 0 for no change, or
- * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
- */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
-
-/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
-#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_VI_ALLOC_INFO
- * Get information about number of VI's and base VI number allocated to this
- * function. This message is not available to dynamic clients created by
- * MC_CMD_CLIENT_ALLOC.
- */
-#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
-#undef MC_CMD_0x8d_PRIVILEGE_CTG
-
-#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
-#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
-
-/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
-/* The number of VIs allocated on this function */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
-/* The base absolute VI number allocated to this function. Required to
- * correctly interpret wakeup events.
- */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
-/* Function's port vi_shift value (always 0 on Huntington) */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DUMP_VI_STATE
- * For CmdClient use. Dump pertinent information on a specific absolute VI. The
- * VI must be owned by the calling client or one of its ancestors; usership of
- * the VI (as set by MC_CMD_SET_VI_USER) is not sufficient.
- */
-#define MC_CMD_DUMP_VI_STATE 0x8e
-#undef MC_CMD_0x8e_PRIVILEGE_CTG
-
-#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
-#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
-/* The VI number to query. */
-#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
-#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
-
-/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
-#define MC_CMD_DUMP_VI_STATE_OUT_LEN 100
-/* The PF part of the function owning this VI. */
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
-/* The VF part of the function owning this VI. */
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
-/* Base of VIs allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
-/* Count of VIs allocated to the owner function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
-/* Base interrupt vector allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
-/* Number of interrupt vectors allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
-/* Raw evq ptr table data. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LBN 96
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LBN 128
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_WIDTH 32
-/* Raw evq timer table data. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LBN 160
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LBN 192
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LBN 256
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LBN 288
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_WIDTH 32
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LBN 320
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LBN 352
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_WIDTH 32
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LBN 384
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LBN 416
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LBN 448
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LBN 480
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
-/* RXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LBN 512
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LBN 544
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_WIDTH 32
-/* RXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LBN 576
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LBN 608
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_WIDTH 32
-/* Reserved, currently 0. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LBN 640
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LBN 672
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LBN 704
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LBN 736
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
-/* Current user, as assigned by MC_CMD_SET_VI_USER. */
-#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_OFST 96
-#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_LEN 4
-
-
-/***********************************/
/* MC_CMD_ALLOC_PIOBUF
* Allocate a push I/O buffer for later use with a tx queue.
*/
@@ -13491,354 +13503,102 @@
/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
-/* VI number to get information for. */
+/* Queue handle, encodes queue type and VI number to get information for. */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
-/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
-/* Transaction processing steering hint 1 for use with the Rx Queue. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
-/* Transaction processing steering hint 2 for use with the Ev Queue. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
-/* Use Relaxed ordering model for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
-/* Use ID based ordering for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
-/* Set no snoop bit for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
-/* Enable TPH for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
+/* MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT msgresponse: This message has the same
+ * layout as GET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to
+ * simplify use in drivers
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_LEN 4
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_LEN 4
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_LBN 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_WIDTH 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_LBN 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_WIDTH 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_LBN 20
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_WIDTH 1
/***********************************/
/* MC_CMD_SET_VI_TLP_PROCESSING
* Set TLP steering and ordering information for a VI. The caller must have the
* GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or
- * an ancestor of the current user (see MC_CMD_SET_VI_USER).
+ * an ancestor of the current user (see MC_CMD_SET_VI_USER). Note that LL
+ * queues require this to be called after allocation but before initialisation
+ * of the queue. TLP options of a queue are fixed after queue is initialised,
+ * with the values set to current global value or they can be overriden using
+ * this command. At LL queue allocation, all overrides are cleared.
*/
#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
#undef MC_CMD_0xb1_PRIVILEGE_CTG
#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
-/* VI number to set information for. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
-/* Transaction processing steering hint 1 for use with the Rx Queue. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
-/* Transaction processing steering hint 2 for use with the Ev Queue. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
-/* Use Relaxed ordering model for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
-/* Use ID based ordering for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
-/* Set the no snoop bit for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
-/* Enable TPH for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
+/* MC_CMD_SET_VI_TLP_PROCESSING_V2_IN msgrequest: This message has the same
+ * layout as SET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to
+ * simplify use in drivers.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_LEN 8
+/* Queue handle, encodes queue type and VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_LEN 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_LEN 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_LBN 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_WIDTH 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_LBN 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_WIDTH 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_LBN 16
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_LBN 16
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_LBN 18
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_LBN 19
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_LBN 20
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_WIDTH 1
/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
/***********************************/
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
- * Get global PCIe steering and transaction processing configuration.
- */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
-#undef MC_CMD_0xbc_PRIVILEGE_CTG
-
-#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
-/* enum: MISC. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
-/* enum: IDO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
-/* enum: RO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
-/* enum: TPH Type. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
-
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
-/* Amalgamated TLP info word. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
-
-
-/***********************************/
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
- * Set global PCIe steering and transaction processing configuration.
- */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
-#undef MC_CMD_0xbd_PRIVILEGE_CTG
-
-#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
-/* Amalgamated TLP info word. */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
-
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SATELLITE_DOWNLOAD
- * Download a new set of images to the satellite CPUs from the host.
- */
-#define MC_CMD_SATELLITE_DOWNLOAD 0x91
-#undef MC_CMD_0x91_PRIVILEGE_CTG
-
-#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
- * are subtle, and so downloads must proceed in a number of phases.
- *
- * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
- *
- * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
- * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
- * be a checksum (a simple 32-bit sum) of the transferred data. An individual
- * download may be aborted using CHUNK_ID_ABORT.
- *
- * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
- * similar to PHASE_IMEMS.
- *
- * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
- *
- * After any error (a requested abort is not considered to be an error) the
- * sequence must be restarted from PHASE_RESET.
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX_MCDI2 1020
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_NUM(len) (((len)-16)/4)
-/* Download phase. (Note: the IDLE phase is used internally and is never valid
- * in a command from the host.)
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
-/* Target for download. (These match the blob numbers defined in
- * mc_flash_layout.h.)
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
-/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
-/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
-/* enum: Last chunk, containing checksum rather than data */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
-/* enum: Abort download of this item */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
-/* Length of this chunk in bytes */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
-/* Data for this chunk */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM_MCDI2 251
-
-/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
-/* Same as MC_CMD_ERR field, but included as 0 in success cases */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
-/* Extra status information */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
-/* enum: Code download OK, completed. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
-/* enum: Code download aborted as requested. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
-/* enum: Code download OK so far, send next chunk. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
-/* enum: Download phases out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
-/* enum: Bad target for this phase */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
-/* enum: Chunk ID out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
-/* enum: Chunk length zero or too large */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
-/* enum: Checksum was incorrect */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
-
-
-/***********************************/
/* MC_CMD_GET_CAPABILITIES
- * Get device capabilities.
- *
- * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
- * reference inherent device capabilities as opposed to current NVRAM config.
+ * Get device capabilities. This is supplementary to the MC_CMD_GET_BOARD_CFG
+ * command, and intended to reference inherent device capabilities as opposed
+ * to current NVRAM config.
*/
#define MC_CMD_GET_CAPABILITIES 0xbe
#undef MC_CMD_0xbe_PRIVILEGE_CTG
@@ -14490,7 +14250,10 @@
/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -14900,7 +14663,10 @@
/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -15335,7 +15101,10 @@
/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -15778,7 +15547,10 @@
/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16226,7 +15998,10 @@
/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16685,7 +16460,10 @@
/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16796,9 +16574,21 @@
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160
@@ -17189,7 +16979,10 @@
/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -17300,9 +17093,21 @@
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -17707,7 +17512,10 @@
/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -17818,9 +17626,21 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -18260,7 +18080,10 @@
/* MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -18371,9 +18194,21 @@
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -18438,6 +18273,1182 @@
#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* MC_CMD_GET_CAPABILITIES_V11_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LEN 196
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* Number of available indirect memory maps. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V12_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LEN 204
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* Number of available indirect memory maps. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4
+/* Number of VIs available for external ports 4-7. Information for ports 0-3 is
+ * in NUM_VIS_PER_PORT in GET_CAPABILITIES_V2_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_OFST 196
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_NUM 4
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -18468,168 +19479,13 @@
* are not defined.
*/
#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_ALLOC
- * Allocate a pacer bucket (for qau rp or a snapper test)
+/* enum: MCDI command used for platform management. Typically, these commands
+ * are used for low-level operations directed at the platform as a whole (e.g.
+ * MMIO device enumeration) rather than individual functions and use a
+ * dedicated comms channel (e.g. RPmsg/IPI). May be handled by the same or
+ * different CPU as MCDI_MESSAGE_TYPE_MC.
*/
-#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
-#undef MC_CMD_0xb2_PRIVILEGE_CTG
-
-#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
-
-/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_FREE
- * Free a pacer bucket
- */
-#define MC_CMD_TCM_BUCKET_FREE 0xb3
-#undef MC_CMD_0xb3_PRIVILEGE_CTG
-
-#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
-
-/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_INIT
- * Initialise pacer bucket with a given rate
- */
-#define MC_CMD_TCM_BUCKET_INIT 0xb4
-#undef MC_CMD_0xb4_PRIVILEGE_CTG
-
-#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
-/* the rate in mbps */
-#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
-#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
-
-/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
-/* the rate in mbps */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
-/* the desired maximum fill level */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
-
-/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TCM_TXQ_INIT
- * Initialise txq in pacer with given options or set options
- */
-#define MC_CMD_TCM_TXQ_INIT 0xb5
-#undef MC_CMD_0xb5_PRIVILEGE_CTG
-
-#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
-#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
-/* the txq id */
-#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
-#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
-#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
-/* bitmask of the priority queues this txq is inserted into when inserted. */
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
-/* the reaction point (RP) bucket */
-#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
-#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
-/* an already reserved bucket (typically set to bucket associated with outer
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
-/* an already reserved bucket (typically set to bucket associated with inner
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
-/* the min bucket (typically for ETS/minimum bandwidth) */
-#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
-#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
-
-/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
-/* the txq id */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
-/* bitmask of the priority queues this txq is inserted into when inserted. */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
-/* the reaction point (RP) bucket */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
-/* an already reserved bucket (typically set to bucket associated with outer
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
-/* an already reserved bucket (typically set to bucket associated with inner
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
-/* the min bucket (typically for ETS/minimum bandwidth) */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
-
-/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
-#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_PLATFORM 0x2
/***********************************/
@@ -18740,27 +19596,6 @@
/***********************************/
-/* MC_CMD_VSWITCH_QUERY
- * read some config of v-switch. For now this command is an empty placeholder.
- * It may be used to check if a v-switch is connected to a given EVB port (if
- * not, then the command returns ENOENT).
- */
-#define MC_CMD_VSWITCH_QUERY 0x63
-#undef MC_CMD_0x63_PRIVILEGE_CTG
-
-#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
-#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
-/* The port to which the v-switch is connected. */
-#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
-#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_VPORT_ALLOC
* allocate a v-port.
*/
@@ -18936,28 +19771,6 @@
/***********************************/
-/* MC_CMD_VADAPTOR_GET_MAC
- * read the MAC address assigned to a v-adaptor.
- */
-#define MC_CMD_VADAPTOR_GET_MAC 0x5e
-#undef MC_CMD_0x5e_PRIVILEGE_CTG
-
-#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
-#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
-/* The port to which the v-adaptor is connected. */
-#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
-/* The MAC address assigned to this v-adaptor */
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
-
-
-/***********************************/
/* MC_CMD_VADAPTOR_QUERY
* read some config of v-adaptor.
*/
@@ -19014,86 +19827,6 @@
/***********************************/
-/* MC_CMD_RDWR_A64_REGIONS
- * Assign the 64 bit region addresses.
- */
-#define MC_CMD_RDWR_A64_REGIONS 0x9b
-#undef MC_CMD_0x9b_PRIVILEGE_CTG
-
-#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
-#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
-/* Write enable bits 0-3, set to write, clear to read. */
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
-
-/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
- * regardless of state of write bits in the request.
- */
-#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ONLOAD_STACK_ALLOC
- * Allocate an Onload stack ID.
- */
-#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
-#undef MC_CMD_0x9c_PRIVILEGE_CTG
-
-#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
-/* The handle of the owning upstream port */
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
-/* The handle of the new Onload stack */
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ONLOAD_STACK_FREE
- * Free an Onload stack ID.
- */
-#define MC_CMD_ONLOAD_STACK_FREE 0x9d
-#undef MC_CMD_0x9d_PRIVILEGE_CTG
-
-#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
-#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
-/* The handle of the Onload stack */
-#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
-
-/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
-#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_RSS_CONTEXT_ALLOC
* Allocate an RSS context.
*/
@@ -19305,93 +20038,6 @@
/***********************************/
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE
- * Write a portion of a selectable-size indirection table for an RSS context.
- * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the
- * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
- */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e
-#undef MC_CMD_0x13e_PRIVILEGE_CTG
-
-#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num))
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4)
-/* The handle of the RSS context */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4
-/* An array of index-value pairs to be written to the table. Structure is
- * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY.
- */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4
-/* The index of the table entry to be written. */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16
-/* The value to write into the table entry. */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16
-
-
-/***********************************/
-/* MC_CMD_RSS_CONTEXT_READ_TABLE
- * Read a portion of a selectable-size indirection table for an RSS context.
- * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the
- * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
- */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f
-#undef MC_CMD_0x13f_PRIVILEGE_CTG
-
-#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num))
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2)
-/* The handle of the RSS context */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4
-/* An array containing the indices of the entries to be read. */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508
-
-/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2)
-/* A buffer containing the requested entries read from the table. */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510
-
-
-/***********************************/
/* MC_CMD_RSS_CONTEXT_SET_FLAGS
* Set various control flags for an RSS context.
*/
@@ -19525,158 +20171,6 @@
/***********************************/
-/* MC_CMD_DOT1P_MAPPING_ALLOC
- * Allocate a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
-#undef MC_CMD_0xa4_PRIVILEGE_CTG
-
-#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
-/* The handle of the owning upstream port */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
-/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
- * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
- * referenced RSS contexts must span no more than this number.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
-/* The handle of the new .1p mapping. This should be considered opaque to the
- * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
- * handle.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
-/* enum: guaranteed invalid .1p mapping handle value */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_FREE
- * Free a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
-#undef MC_CMD_0xa5_PRIVILEGE_CTG
-
-#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE
- * Set the mapping table for a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
-#undef MC_CMD_0xa6_PRIVILEGE_CTG
-
-#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
-/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
- * handle)
- */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
-
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE
- * Get the mapping table for a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
-#undef MC_CMD_0xa7_PRIVILEGE_CTG
-
-#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
-/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
- * handle)
- */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
-
-
-/***********************************/
-/* MC_CMD_GET_VECTOR_CFG
- * Get Interrupt Vector config for this PF.
- */
-#define MC_CMD_GET_VECTOR_CFG 0xbf
-#undef MC_CMD_0xbf_PRIVILEGE_CTG
-
-#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
-#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
-
-/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
-#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
-/* Base absolute interrupt vector number. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
-#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
-/* Number of interrupt vectors allocate to this PF. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
-/* Number of interrupt vectors to allocate per VF. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
-
-
-/***********************************/
-/* MC_CMD_SET_VECTOR_CFG
- * Set Interrupt Vector config for this PF.
- */
-#define MC_CMD_SET_VECTOR_CFG 0xc0
-#undef MC_CMD_0xc0_PRIVILEGE_CTG
-
-#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
-#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
-/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
- * let the system find a suitable base.
- */
-#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
-#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
-/* Number of interrupt vectors allocate to this PF. */
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
-/* Number of interrupt vectors to allocate per VF. */
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
-
-/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
-#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_VPORT_ADD_MAC_ADDRESS
* Add a MAC address to a v-port
*/
@@ -19810,124 +20304,6 @@
/***********************************/
-/* MC_CMD_EVB_PORT_QUERY
- * read some config of v-port.
- */
-#define MC_CMD_EVB_PORT_QUERY 0x62
-#undef MC_CMD_0x62_PRIVILEGE_CTG
-
-#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
-#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
-/* The handle of the v-port */
-#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
-#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
-
-/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
-#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
-/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
-#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
-#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
-/* The number of VLAN tags that may be used on a v-adaptor connected to this
- * EVB port.
- */
-#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
-#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DUMP_BUFTBL_ENTRIES
- * Dump buffer table entries, mainly for command client debug use. Dumps
- * absolute entries, and does not use chunk handles. All entries must be in
- * range, and used for q page mapping, Although the latter restriction may be
- * lifted in future.
- */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
-#undef MC_CMD_0xab_PRIVILEGE_CTG
-
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
-/* Index of the first buffer table entry. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
-/* Number of buffer table entries to dump. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
-
-/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_NUM(len) (((len)-0)/12)
-/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM_MCDI2 85
-
-
-/***********************************/
-/* MC_CMD_SET_RXDP_CONFIG
- * Set global RXDP configuration settings
- */
-#define MC_CMD_SET_RXDP_CONFIG 0xc1
-#undef MC_CMD_0xc1_PRIVILEGE_CTG
-
-#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
-#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
-#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
-/* enum: pad to 64 bytes */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
-/* enum: pad to 128 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
-/* enum: pad to 256 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
-
-/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_RXDP_CONFIG
- * Get global RXDP configuration settings
- */
-#define MC_CMD_GET_RXDP_CONFIG 0xc2
-#undef MC_CMD_0xc2_PRIVILEGE_CTG
-
-#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
-#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
-#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
-/* Enum values, see field(s): */
-/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
-
-
-/***********************************/
/* MC_CMD_GET_CLOCK
* Return the system and PDCPU clock frequencies.
*/
@@ -19950,210 +20326,6 @@
/***********************************/
-/* MC_CMD_SET_CLOCK
- * Control the system and DPCPU clock frequencies. Changes are lost reboot.
- */
-#define MC_CMD_SET_CLOCK 0xad
-#undef MC_CMD_0xad_PRIVILEGE_CTG
-
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_CLOCK_IN msgrequest */
-#define MC_CMD_SET_CLOCK_IN_LEN 28
-/* Requested frequency in MHz for system clock domain */
-#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
-#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
-/* enum: Leave the system clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for inter-core clock domain */
-#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
-#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
-/* enum: Leave the inter-core clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for DPCPU clock domain */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
-#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
-/* enum: Leave the DPCPU clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for PCS clock domain */
-#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
-#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
-/* enum: Leave the PCS clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for MC clock domain */
-#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
-#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
-/* enum: Leave the MC clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for rmon clock domain */
-#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
-#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
-/* enum: Leave the rmon clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for vswitch clock domain */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
-/* enum: Leave the vswitch clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
-
-/* MC_CMD_SET_CLOCK_OUT msgresponse */
-#define MC_CMD_SET_CLOCK_OUT_LEN 28
-/* Resulting system frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
-#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
-/* enum: The system clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
-/* Resulting inter-core frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
-#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
-/* enum: The inter-core clock domain doesn't exist / isn't used */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
-/* Resulting DPCPU frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
-/* enum: The dpcpu clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
-/* Resulting PCS frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
-#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
-/* enum: The PCS clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
-/* Resulting MC frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
-#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
-/* enum: The MC clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
-/* Resulting rmon frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
-#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
-/* enum: The rmon clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
-/* Resulting vswitch frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
-/* enum: The vswitch clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
-
-
-/***********************************/
-/* MC_CMD_DPCPU_RPC
- * Send an arbitrary DPCPU message.
- */
-#define MC_CMD_DPCPU_RPC 0xae
-#undef MC_CMD_0xae_PRIVILEGE_CTG
-
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DPCPU_RPC_IN msgrequest */
-#define MC_CMD_DPCPU_RPC_IN_LEN 36
-#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
-#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
-/* enum: RxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
-/* enum: TxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
-/* enum: TxDPCPU1 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
-/* enum: RxDPCPU1 (Medford only) */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
-/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
- * DPCPU_RX0)
- */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
-/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
- * DPCPU_TX0)
- */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
-/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
- * initialised to zero
- */
-#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
-#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
-/* Register data to write. Only valid in write/write-read. */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
-/* Register address. */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
-
-/* MC_CMD_DPCPU_RPC_OUT msgresponse */
-#define MC_CMD_DPCPU_RPC_OUT_LEN 36
-#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
-#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
-/* DATA */
-#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
-#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
-
-
-/***********************************/
/* MC_CMD_TRIGGER_INTERRUPT
* Trigger an interrupt by prodding the BIU.
*/
@@ -20173,66 +20345,6 @@
/***********************************/
-/* MC_CMD_SHMBOOT_OP
- * Special operations to support (for now) shmboot.
- */
-#define MC_CMD_SHMBOOT_OP 0xe6
-#undef MC_CMD_0xe6_PRIVILEGE_CTG
-
-#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SHMBOOT_OP_IN msgrequest */
-#define MC_CMD_SHMBOOT_OP_IN_LEN 4
-/* Identifies the operation to perform */
-#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
-#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
-/* enum: Copy slave_data section to the slave core. (Greenport only) */
-#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
-
-/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
-#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_CAP_BLK_READ
- * Read multiple 64bit words from capture block memory
- */
-#define MC_CMD_CAP_BLK_READ 0xe7
-#undef MC_CMD_0xe7_PRIVILEGE_CTG
-
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CAP_BLK_READ_IN msgrequest */
-#define MC_CMD_CAP_BLK_READ_IN_LEN 12
-#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
-#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
-#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
-#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
-#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
-#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
-
-/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
-#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
-#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
-#define MC_CMD_CAP_BLK_READ_OUT_LENMAX_MCDI2 1016
-#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_NUM(len) (((len)-0)/8)
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LEN 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LBN 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_WIDTH 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LEN 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LBN 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_WIDTH 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127
-
-
-/***********************************/
/* MC_CMD_DUMP_DO
* Take a dump of the DUT state
*/
@@ -20380,34 +20492,6 @@
/***********************************/
-/* MC_CMD_SET_PSU
- * Adjusts power supply parameters. This is a warranty-voiding operation.
- * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
- * the parameter is out of range.
- */
-#define MC_CMD_SET_PSU 0xea
-#undef MC_CMD_0xea_PRIVILEGE_CTG
-
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_PSU_IN msgrequest */
-#define MC_CMD_SET_PSU_IN_LEN 12
-#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
-#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
-#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
-#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
-#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
-/* desired value, eg voltage in mV */
-#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
-#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
-
-/* MC_CMD_SET_PSU_OUT msgresponse */
-#define MC_CMD_SET_PSU_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_FUNCTION_INFO
* Get function information. PF and VF number.
*/
@@ -20448,7 +20532,7 @@
#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
#undef MC_CMD_0xed_PRIVILEGE_CTG
-#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
@@ -20458,137 +20542,13 @@
/***********************************/
-/* MC_CMD_UART_SEND_DATA
- * Send checksummed[sic] block of data over the uart. Response is a placeholder
- * should we wish to make this reliable; currently requests are fire-and-
- * forget.
- */
-#define MC_CMD_UART_SEND_DATA 0xee
-#undef MC_CMD_0xee_PRIVILEGE_CTG
-
-#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
-#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
-#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
-#define MC_CMD_UART_SEND_DATA_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_NUM(len) (((len)-16)/1)
-/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
-#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
-#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
-/* Offset at which to write the data */
-#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
-#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
-/* Length of data */
-#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
-#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
-#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM_MCDI2 1004
-
-/* MC_CMD_UART_SEND_DATA_IN msgresponse */
-#define MC_CMD_UART_SEND_DATA_IN_LEN 0
-
-
-/***********************************/
-/* MC_CMD_UART_RECV_DATA
- * Request checksummed[sic] block of data over the uart. Only a placeholder,
- * subject to change and not currently implemented.
- */
-#define MC_CMD_UART_RECV_DATA 0xef
-#undef MC_CMD_0xef_PRIVILEGE_CTG
-
-#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
-#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
-/* CRC32 over OFFSET, LENGTH, RESERVED */
-#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
-#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
-/* Offset from which to read the data */
-#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
-#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
-/* Length of data */
-#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
-#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
-#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
-
-/* MC_CMD_UART_RECV_DATA_IN msgresponse */
-#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
-#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
-#define MC_CMD_UART_RECV_DATA_IN_LENMAX_MCDI2 1020
-#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
-#define MC_CMD_UART_RECV_DATA_IN_DATA_NUM(len) (((len)-16)/1)
-/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
-#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
-#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
-/* Offset at which to write the data */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
-/* Length of data */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
-#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
-#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM_MCDI2 1004
-
-
-/***********************************/
-/* MC_CMD_READ_FUSES
- * Read data programmed into the device One-Time-Programmable (OTP) Fuses
- */
-#define MC_CMD_READ_FUSES 0xf0
-#undef MC_CMD_0xf0_PRIVILEGE_CTG
-
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_FUSES_IN msgrequest */
-#define MC_CMD_READ_FUSES_IN_LEN 8
-/* Offset in OTP to read */
-#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
-#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
-/* Length of data to read in bytes */
-#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
-#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
-
-/* MC_CMD_READ_FUSES_OUT msgresponse */
-#define MC_CMD_READ_FUSES_OUT_LENMIN 4
-#define MC_CMD_READ_FUSES_OUT_LENMAX 252
-#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Length of returned OTP data in bytes */
-#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
-#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
-/* Returned data */
-#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
-#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
-#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
-#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
-#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016
-
-
-/***********************************/
/* MC_CMD_KR_TUNE
* Get or set KR Serdes RXEQ and TX Driver settings
*/
#define MC_CMD_KR_TUNE 0xf1
#undef MC_CMD_0xf1_PRIVILEGE_CTG
-#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_KR_TUNE_IN msgrequest */
#define MC_CMD_KR_TUNE_IN_LENMIN 4
@@ -21138,262 +21098,6 @@
/***********************************/
-/* MC_CMD_PCIE_TUNE
- * Get or set PCIE Serdes RXEQ and TX Driver settings
- */
-#define MC_CMD_PCIE_TUNE 0xf2
-#undef MC_CMD_0xf2_PRIVILEGE_CTG
-
-#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PCIE_TUNE_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
-#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
-#define MC_CMD_PCIE_TUNE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_NUM(len) (((len)-4)/4)
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
-/* enum: Get current RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
-/* enum: Override RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
-/* enum: Get current TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
-/* enum: Override TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
-/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
-#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
-/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
- * caller should call this command repeatedly after starting eye plot, until no
- * more data is returned.
- */
-#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
-/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
-#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
-/* Arguments specific to the operation */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM_MCDI2 254
-
-/* MC_CMD_PCIE_TUNE_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
-/* enum: CTLE Boost (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
-/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
-/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
-/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
-/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
-/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
-/* enum: DFE DLev */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
-/* enum: Figure of Merit */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
-/* enum: CTLE EQ Capacitor (HF Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
-/* enum: CTLE EQ Resistor (DC Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4)
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TxMargin (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
-/* enum: TxSwing (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
-/* enum: De-emphasis coefficient C(-1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
-/* enum: De-emphasis coefficient C(0) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
-/* enum: De-emphasis coefficient C(+1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
-
-/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2)
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510
-
-/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0
-
-/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */
-#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_LICENSING
* Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
* - not used for V3 licensing
@@ -21532,56 +21236,6 @@
/***********************************/
-/* MC_CMD_LICENSING_GET_ID_V3
- * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
- * partition - V3 licensing (Medford)
- */
-#define MC_CMD_LICENSING_GET_ID_V3 0xd1
-#undef MC_CMD_0xd1_PRIVILEGE_CTG
-
-#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
-#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
-
-/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1)
-/* type of license (eg 3) */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
-/* length of the license ID (in bytes) */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
-/* the unique license ID of the adapter */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012
-
-
-/***********************************/
-/* MC_CMD_MC2MC_PROXY
- * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
- * This will fail on a single-core system.
- */
-#define MC_CMD_MC2MC_PROXY 0xf4
-#undef MC_CMD_0xf4_PRIVILEGE_CTG
-
-#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MC2MC_PROXY_IN msgrequest */
-#define MC_CMD_MC2MC_PROXY_IN_LEN 0
-
-/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
-#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_LICENSED_APP_STATE
* Query the state of an individual licensed application. (Note that the actual
* state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
@@ -21610,424 +21264,6 @@
/***********************************/
-/* MC_CMD_GET_LICENSED_V3_APP_STATE
- * Query the state of an individual licensed application. (Note that the actual
- * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
- * operation or a reboot of the MC.) Used for V3 licensing (Medford)
- */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
-#undef MC_CMD_0xd2_PRIVILEGE_CTG
-
-#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
-/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
- * mask
- */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_WIDTH 32
-
-/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
-/* state of this application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
-/* enum: no (or invalid) license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
-/* enum: a valid license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
-
-
-/***********************************/
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
- * Query the state of an one or more licensed features. (Note that the actual
- * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
- * operation or a reboot of the MC.) Used for V3 licensing (Medford)
- */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
-#undef MC_CMD_0xd3_PRIVILEGE_CTG
-
-#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
-/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
- * more bits set
- */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_WIDTH 32
-
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
-/* states of these features - bit set for licensed, clear for not licensed */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_LICENSED_APP_OP
- * Perform an action for an individual licensed application - not used for V3
- * licensing.
- */
-#define MC_CMD_LICENSED_APP_OP 0xf6
-#undef MC_CMD_0xf6_PRIVILEGE_CTG
-
-#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
-#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
-#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4)
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
-/* enum: validate application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
-/* enum: mask application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
-/* arguments specific to this particular operation */
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253
-
-/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4)
-/* result specific to this particular operation */
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255
-
-/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
-/* validation challenge */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
-
-/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
-/* feature expiry (time_t) */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
-/* validation response */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
-
-/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
-/* flag */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
-
-/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LICENSED_V3_VALIDATE_APP
- * Perform validation for an individual licensed application - V3 licensing
- * (Medford)
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
-#undef MC_CMD_0xd4_PRIVILEGE_CTG
-
-#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
-/* challenge for validation (384 bits) */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
-/* application ID expressed as a single bit mask */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LEN 4
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LBN 384
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_WIDTH 32
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LEN 4
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LBN 416
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_WIDTH 32
-
-/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
-/* validation response to challenge in the form of ECDSA signature consisting
- * of two 384-bit integers, r and s, in big-endian order. The signature signs a
- * SHA-384 digest of a message constructed from the concatenation of the input
- * message and the remaining fields of this output message, e.g. challenge[48
- * bytes] ... expiry_time[4 bytes] ...
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
-/* application expiry time */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
-/* application expiry units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
-/* enum: expiry units are accounting units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
-/* enum: expiry units are calendar days */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
-/* base MAC address of the NIC stored in NVRAM (note that this is a constant
- * value for a given NIC regardless which function is calling, effectively this
- * is PF0 base MAC address)
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
-/* MAC address of v-adaptor associated with the client. If no such v-adapator
- * exists, then the field is filled with 0xFF.
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
-
-
-/***********************************/
-/* MC_CMD_LICENSED_V3_MASK_FEATURES
- * Mask features - V3 licensing (Medford)
- */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
-#undef MC_CMD_0xd5_PRIVILEGE_CTG
-
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
-/* mask to be applied to features to be changed */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LEN 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LBN 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_WIDTH 32
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LEN 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LBN 32
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_WIDTH 32
-/* whether to turn on or turn off the masked features */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
-/* enum: turn the features off */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
-/* enum: turn the features back on */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
-
-/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LICENSING_V3_TEMPORARY
- * Perform operations to support installation of a single temporary license in
- * the adapter, in addition to those found in the licensing partition. See
- * SF-116124-SW for an overview of how this could be used. The license is
- * stored in MC persistent data and so will survive a MC reboot, but will be
- * erased when the adapter is power cycled
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
-#undef MC_CMD_0xd6_PRIVILEGE_CTG
-
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
-/* operation code */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
-/* enum: install a new license, overwriting any existing temporary license.
- * This is an asynchronous operation owing to the time taken to validate an
- * ECDSA license
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
-/* enum: clear the license immediately rather than waiting for the next power
- * cycle
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
-/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
- * operation
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
-/* ECDSA license and signature */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
-/* status code */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
-/* enum: finished validating and installing license */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
-/* enum: license validation and installation in progress */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
-/* enum: licensing error. More specific error messages are not provided to
- * avoid exposing details of the licensing system to the client
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
-/* bitmask of licensed features */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LBN 32
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_WIDTH 32
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LBN 64
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_SET_PORT_SNIFF_CONFIG
- * Configure RX port sniffing for the physical port associated with the calling
- * function. Only a privileged function may change the port sniffing
- * configuration. A copy of all traffic delivered to the host (non-promiscuous
- * mode) or all traffic arriving at the port (promiscuous mode) may be
- * delivered to a specific queue, or a set of queues with RSS.
- */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
-#undef MC_CMD_0xf7_PRIVILEGE_CTG
-
-#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
-/* configuration flags */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
-/* receive queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
-/* enum: receive to just the specified queue */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
-/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
- * that these handles should be considered opaque to the host, although a value
- * of 0xFFFFFFFF is guaranteed never to be a valid handle.
- */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
-
-/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_PORT_SNIFF_CONFIG
- * Obtain the current RX port sniffing configuration for the physical port
- * associated with the calling function. Only a privileged function may read
- * the configuration.
- */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
-#undef MC_CMD_0xf8_PRIVILEGE_CTG
-
-#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
-/* configuration flags */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
-/* receiving queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
-/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
-/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
-
-
-/***********************************/
/* MC_CMD_SET_PARSER_DISP_CONFIG
* Change configuration related to the parser-dispatcher subsystem.
*/
@@ -22073,305 +21309,6 @@
/***********************************/
-/* MC_CMD_GET_PARSER_DISP_CONFIG
- * Read configuration related to the parser-dispatcher subsystem.
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
-#undef MC_CMD_0xfa_PRIVILEGE_CTG
-
-#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
-/* the type of configuration setting to read */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
-/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
- * the type of configuration setting being read
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
-
-/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4)
-/* current value: the details depend on the type of configuration setting being
- * read
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
- * Configure TX port sniffing for the physical port associated with the calling
- * function. Only a privileged function may change the port sniffing
- * configuration. A copy of all traffic transmitted through the port may be
- * delivered to a specific queue, or a set of queues with RSS. Note that these
- * packets are delivered with transmit timestamps in the packet prefix, not
- * receive timestamps, so it is likely that the queue(s) will need to be
- * dedicated as TX sniff receivers.
- */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
-#undef MC_CMD_0xfb_PRIVILEGE_CTG
-
-#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
-/* configuration flags */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
-/* receive queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
-/* enum: receive to just the specified queue */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
-/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
- * that these handles should be considered opaque to the host, although a value
- * of 0xFFFFFFFF is guaranteed never to be a valid handle.
- */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
-
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
- * Obtain the current TX port sniffing configuration for the physical port
- * associated with the calling function. Only a privileged function may read
- * the configuration.
- */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
-#undef MC_CMD_0xfc_PRIVILEGE_CTG
-
-#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
-/* configuration flags */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
-/* receiving queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
-/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
-/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_RMON_STATS_RX_ERRORS
- * Per queue rx error stats.
- */
-#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
-#undef MC_CMD_0xfe_PRIVILEGE_CTG
-
-#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
-/* The rx queue to get stats for. */
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_PCIE_RESOURCE_INFO
- * Find out about available PCIE resources
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
-#undef MC_CMD_0xfd_PRIVILEGE_CTG
-
-#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
-
-/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
-/* The maximum number of PFs the device can expose */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
-/* The maximum number of VFs the device can expose in total */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
-/* The maximum number of MSI-X vectors the device can provide in total */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
-/* the number of MSI-X vectors the device will allocate by default to each PF
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
-/* the number of MSI-X vectors the device will allocate by default to each VF
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
-/* the maximum number of MSI-X vectors the device can allocate to any one PF */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
-/* the maximum number of MSI-X vectors the device can allocate to any one VF */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_PORT_MODES
- * Find out about available port modes
- */
-#define MC_CMD_GET_PORT_MODES 0xff
-#undef MC_CMD_0xff_PRIVILEGE_CTG
-
-#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PORT_MODES_IN msgrequest */
-#define MC_CMD_GET_PORT_MODES_IN_LEN 0
-
-/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
-#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
-/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
- * that are supported for customer use in production firmware.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
-#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
-/* Default (canonical) board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
-#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
-/* Current board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
-#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
-
-/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16
-/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
- * that are supported for customer use in production firmware.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0
-#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4
-/* Default (canonical) board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4
-#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4
-/* Current board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8
-#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4
-/* Bitmask of engineering port modes available on the board (indexed by
- * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
- * contains all modes implemented in firmware for a particular board. Modes
- * listed in MODES are considered production modes and should be exposed in
- * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
- * should be considered hidden (not to be exposed in userland tools) and for
- * engineering use only. There are no other semantic differences and any mode
- * listed in either MODES or ENGINEERING_MODES can be set on the board.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12
-#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4
-
-
-/***********************************/
-/* MC_CMD_OVERRIDE_PORT_MODE
- * Override flash config port mode for subsequent MC reboot(s). Override data
- * is stored in the presistent data section of DMEM and activated on next MC
- * warm reboot. A cold reboot resets the override. It is assumed that a
- * sufficient number of PFs are available and that port mapping is valid for
- * the new port mode, as the override does not affect PF configuration.
- */
-#define MC_CMD_OVERRIDE_PORT_MODE 0x137
-#undef MC_CMD_0x137_PRIVILEGE_CTG
-
-#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1
-/* New mode (TLV_PORT_MODE_*) to set, if override enabled */
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4
-
-/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */
-#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_READ_ATB
- * Sample voltages on the ATB
- */
-#define MC_CMD_READ_ATB 0x100
-#undef MC_CMD_0x100_PRIVILEGE_CTG
-
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_ATB_IN msgrequest */
-#define MC_CMD_READ_ATB_IN_LEN 16
-#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
-#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
-#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
-#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
-#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
-#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
-#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
-#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
-#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
-
-/* MC_CMD_READ_ATB_OUT msgresponse */
-#define MC_CMD_READ_ATB_OUT_LEN 4
-#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
-#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
-
-
-/***********************************/
/* MC_CMD_GET_WORKAROUNDS
* Read the list of all implemented and all currently enabled workarounds. The
* enums here must correspond with those in MC_CMD_WORKAROUND.
@@ -22538,447 +21475,6 @@
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
-
-/***********************************/
-/* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
- * parameter to MC_CMD_INIT_RXQ.
- */
-#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
-#undef MC_CMD_0x101_PRIVILEGE_CTG
-
-#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
-
-/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
-/* Minimum acceptable snapshot length. */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
-/* Maximum acceptable snapshot length. */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
-
-
-/***********************************/
-/* MC_CMD_FUSE_DIAGS
- * Additional fuse diagnostics
- */
-#define MC_CMD_FUSE_DIAGS 0x102
-#undef MC_CMD_0x102_PRIVILEGE_CTG
-
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_FUSE_DIAGS_IN msgrequest */
-#define MC_CMD_FUSE_DIAGS_IN_LEN 0
-
-/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
-#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
-/* Total number of mismatched bits between pairs in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
-/* Total number of mismatched bits between pairs in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
-/* Total number of mismatched bits between pairs in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PRIVILEGE_MODIFY
- * Modify the privileges of a set of PCIe functions. Note that this operation
- * only effects non-admin functions unless the admin privilege itself is
- * included in one of the masks provided.
- */
-#define MC_CMD_PRIVILEGE_MODIFY 0x60
-#undef MC_CMD_0x60_PRIVILEGE_CTG
-
-#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
-/* The groups of functions to have their privilege masks modified. */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
-/* For VFS_OF_PF specify the PF, for ONE specify the target function */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
-/* Privileges to be added to the target functions. For privilege definitions
- * refer to the command MC_CMD_PRIVILEGE_MASK
- */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
-/* Privileges to be removed from the target functions. For privilege
- * definitions refer to the command MC_CMD_PRIVILEGE_MASK
- */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
-#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
-
-/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
-#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_READ_BYTES
- * Read XPM memory
- */
-#define MC_CMD_XPM_READ_BYTES 0x103
-#undef MC_CMD_0x103_PRIVILEGE_CTG
-
-#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
-#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
-#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
-#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_NUM(len) (((len)-0)/1)
-/* Data */
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM_MCDI2 1020
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_BYTES
- * Write XPM memory
- */
-#define MC_CMD_XPM_WRITE_BYTES 0x104
-#undef MC_CMD_0x104_PRIVILEGE_CTG
-
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_NUM(len) (((len)-8)/1)
-/* Start address (byte) */
-#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
-#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
-#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
-/* Data */
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM_MCDI2 1012
-
-/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_READ_SECTOR
- * Read XPM sector
- */
-#define MC_CMD_XPM_READ_SECTOR 0x105
-#undef MC_CMD_0x105_PRIVILEGE_CTG
-
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
-/* Sector index */
-#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
-/* Sector size */
-#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
-#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
-
-/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX_MCDI2 36
-#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Sector type */
-#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
-/* Sector data */
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM_MCDI2 32
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_SECTOR
- * Write XPM sector
- */
-#define MC_CMD_XPM_WRITE_SECTOR 0x106
-#undef MC_CMD_0x106_PRIVILEGE_CTG
-
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX_MCDI2 44
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_NUM(len) (((len)-12)/1)
-/* If writing fails due to an uncorrectable error, try up to RETRIES following
- * sectors (or until no more space available). If 0, only one write attempt is
- * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
- * mechanism.
- */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
-/* Sector type */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
-#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
-/* Sector size */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
-#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
-/* Sector data */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM_MCDI2 32
-
-/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
-/* New sector index */
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
-
-
-/***********************************/
-/* MC_CMD_XPM_INVALIDATE_SECTOR
- * Invalidate XPM sector
- */
-#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
-#undef MC_CMD_0x107_PRIVILEGE_CTG
-
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
-/* Sector index */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
-
-/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_BLANK_CHECK
- * Blank-check XPM memory and report bad locations
- */
-#define MC_CMD_XPM_BLANK_CHECK 0x108
-#undef MC_CMD_0x108_PRIVILEGE_CTG
-
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
-#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
-#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
-#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_NUM(len) (((len)-4)/2)
-/* Total number of bad (non-blank) locations */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
-/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
- * into MCDI response)
- */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM_MCDI2 508
-
-
-/***********************************/
-/* MC_CMD_XPM_REPAIR
- * Blank-check and repair XPM memory
- */
-#define MC_CMD_XPM_REPAIR 0x109
-#undef MC_CMD_0x109_PRIVILEGE_CTG
-
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_REPAIR_IN msgrequest */
-#define MC_CMD_XPM_REPAIR_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
-#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
-#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_REPAIR_OUT msgresponse */
-#define MC_CMD_XPM_REPAIR_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_DECODER_TEST
- * Test XPM memory address decoders for gross manufacturing defects. Can only
- * be performed on an unprogrammed part.
- */
-#define MC_CMD_XPM_DECODER_TEST 0x10a
-#undef MC_CMD_0x10a_PRIVILEGE_CTG
-
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
-#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
-
-/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
-#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_TEST
- * XPM memory write test. Test XPM write logic for gross manufacturing defects
- * by writing to a dedicated test row. There are 16 locations in the test row
- * and the test can only be performed on locations that have not been
- * previously used (i.e. can be run at most 16 times). The test will pick the
- * first available location to use, or fail with ENOSPC if none left.
- */
-#define MC_CMD_XPM_WRITE_TEST 0x10b
-#undef MC_CMD_0x10b_PRIVILEGE_CTG
-
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
-#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
-
-/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_EXEC_SIGNED
- * Check the CMAC of the contents of IMEM and DMEM against the value supplied
- * and if correct begin execution from the start of IMEM. The caller supplies a
- * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
- * computation runs from the start of IMEM, and from the start of DMEM + 16k,
- * to match flash booting. The command will respond with EINVAL if the CMAC
- * does match, otherwise it will respond with success before it jumps to IMEM.
- */
-#define MC_CMD_EXEC_SIGNED 0x10c
-#undef MC_CMD_0x10c_PRIVILEGE_CTG
-
-#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_EXEC_SIGNED_IN msgrequest */
-#define MC_CMD_EXEC_SIGNED_IN_LEN 28
-/* the length of code to include in the CMAC */
-#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
-#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
-/* the length of date to include in the CMAC */
-#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
-#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
-/* the XPM sector containing the key to use */
-#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
-#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
-/* the expected CMAC value */
-#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
-#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
-
-/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
-#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PREPARE_SIGNED
- * Prepare to upload a signed image. This will scrub the specified length of
- * the data region, which must be at least as large as the DATALEN supplied to
- * MC_CMD_EXEC_SIGNED.
- */
-#define MC_CMD_PREPARE_SIGNED 0x10d
-#undef MC_CMD_0x10d_PRIVILEGE_CTG
-
-#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
-#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
-/* the length of data area to clear */
-#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
-#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
-
-/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
-#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
-
-
/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
/* UDP port (the standard ports are named below but any port may be used) */
@@ -23049,110 +21545,6 @@
/***********************************/
-/* MC_CMD_RX_BALANCING
- * Configure a port upconverter to distribute the packets on both RX engines.
- * Packets are distributed based on a table with the destination vFIFO. The
- * index of the table is a hash of source and destination of IPV4 and VLAN
- * priority.
- */
-#define MC_CMD_RX_BALANCING 0x118
-#undef MC_CMD_0x118_PRIVILEGE_CTG
-
-#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_RX_BALANCING_IN msgrequest */
-#define MC_CMD_RX_BALANCING_IN_LEN 16
-/* The RX port whose upconverter table will be modified */
-#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
-#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4
-/* The VLAN priority associated to the table index and vFIFO */
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
-/* The resulting bit of SRC^DST for indexing the table */
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
-/* The RX engine to which the vFIFO in the table entry will point to */
-#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
-#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4
-
-/* MC_CMD_RX_BALANCING_OUT msgresponse */
-#define MC_CMD_RX_BALANCING_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_NVRAM_PRIVATE_APPEND
- * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST
- * if the tag is already present.
- */
-#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
-#undef MC_CMD_0x11c_PRIVILEGE_CTG
-
-#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX_MCDI2 1020
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_NUM(len) (((len)-8)/1)
-/* The tag to be appended */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
-/* The length of the data */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
-/* The data to be contained in the TLV structure */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM_MCDI2 1012
-
-/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_VERIFY_CONTENTS
- * Verify that the contents of the XPM memory is correct (Medford only). This
- * is used during manufacture to check that the XPM memory has been programmed
- * correctly at ATE.
- */
-#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b
-#undef MC_CMD_0x11b_PRIVILEGE_CTG
-
-#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
-/* Data type to be checked */
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
-
-/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_NUM(len) (((len)-12)/1)
-/* Number of sectors found (test builds only) */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
-/* Number of bytes found (test builds only) */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
-/* Length of signature */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
-/* Signature */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM_MCDI2 1008
-
-
-/***********************************/
/* MC_CMD_SET_EVQ_TMR
* Update the timer load, timer reload and timer mode values for a given EVQ.
* The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will
@@ -23262,576 +21654,6 @@
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
-
-/***********************************/
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP
- * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the
- * non used switch buffers.
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
-#undef MC_CMD_0x11d_PRIVILEGE_CTG
-
-#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
-/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index. The calling client must be the currently-assigned user of
- * this VI (see MC_CMD_SET_VI_USER).
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
-/* Will the common pool be used as TX_vFIFO_ULL (1) */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
-/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
-/* Number of buffers to reserve for the common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
-/* TX datapath to which the Common Pool is connected to. */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
-/* enum: Extracts information from function */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
-/* Network port or RX Engine to which the common pool connects. */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
-/* enum: Extracts information from function */
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
-/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
-/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
-/* ID of the common pool allocated */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO
- * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the
- * previously allocated common pools.
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
-#undef MC_CMD_0x11e_PRIVILEGE_CTG
-
-#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
-/* Common pool previously allocated to which the new vFIFO will be associated
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
-/* Port or RX engine to associate the vFIFO egress */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
-/* enum: Extracts information from common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
-/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
-/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
-/* Minimum number of buffers that the pool must have */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
-/* enum: Do not check the space available */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
-/* Will the vFIFO be used as TX_vFIFO_ULL */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
-/* Network priority of the vFIFO,if applicable */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
-/* enum: Search for the lowest unused priority */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
-/* Short vFIFO ID */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
-/* Network priority of the vFIFO */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
-
-
-/***********************************/
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF
- * This interface clears the configuration of the given vFIFO and leaves it
- * ready to be re-used.
- */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
-#undef MC_CMD_0x11f_PRIVILEGE_CTG
-
-#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
-/* Short vFIFO ID */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
-
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP
- * This interface clears the configuration of the given common pool and leaves
- * it ready to be re-used.
- */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
-#undef MC_CMD_0x121_PRIVILEGE_CTG
-
-#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
-/* Common pool ID given when pool allocated */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
-
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS
- * This interface allows the host to find out how many common pool buffers are
- * not yet assigned.
- */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
-#undef MC_CMD_0x124_PRIVILEGE_CTG
-
-#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
-
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
-/* Available buffers for the ENG to NET vFIFOs. */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
-/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
-
-
-/***********************************/
-/* MC_CMD_SUC_VERSION
- * Get the version of the SUC
- */
-#define MC_CMD_SUC_VERSION 0x134
-#undef MC_CMD_0x134_PRIVILEGE_CTG
-
-#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SUC_VERSION_IN msgrequest */
-#define MC_CMD_SUC_VERSION_IN_LEN 0
-
-/* MC_CMD_SUC_VERSION_OUT msgresponse */
-#define MC_CMD_SUC_VERSION_OUT_LEN 24
-/* The SUC firmware version as four numbers - a.b.c.d */
-#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0
-#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4
-#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4
-/* The date, in seconds since the Unix epoch, when the firmware image was
- * built.
- */
-#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16
-#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4
-/* The ID of the SUC chip. This is specific to the platform but typically
- * indicates family, memory sizes etc. See SF-116728-SW for further details.
- */
-#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20
-#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4
-
-/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot
- * loader.
- */
-#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4
-#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0
-#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4
-/* enum: Requests the SUC boot version. */
-#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b
-
-/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */
-#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4
-/* The SUC boot version */
-#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0
-#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_RX_PREFIX_ID
- * This command is part of the mechanism for configuring the format of the RX
- * packet prefix. It takes as input a bitmask of the fields the host would like
- * to be in the prefix. If the hardware supports RX prefixes with that
- * combination of fields, then this command returns a list of prefix-ids,
- * opaque identifiers suitable for use in the RX_PREFIX_ID field of a
- * MC_CMD_INIT_RXQ_V5_IN message. If the combination of fields is not
- * supported, returns ENOTSUP. If the firmware can't create any new prefix-ids
- * due to resource constraints, returns ENOSPC.
- */
-#define MC_CMD_GET_RX_PREFIX_ID 0x13b
-#undef MC_CMD_0x13b_PRIVILEGE_CTG
-
-#define MC_CMD_0x13b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_RX_PREFIX_ID_IN msgrequest */
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LEN 8
-/* Field bitmask. */
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LBN 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_WIDTH 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LBN 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_WIDTH 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_LBN 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_LBN 2
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_LBN 3
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_LBN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_LBN 5
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_LBN 7
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_LBN 8
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_LBN 10
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_LBN 11
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_WIDTH 1
-
-/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX 252
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LEN(num) (4+4*(num))
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_NUM(len) (((len)-4)/4)
-/* Number of prefix-ids returned */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_LEN 4
-/* Opaque prefix identifiers which can be passed into MC_CMD_INIT_RXQ_V5 or
- * MC_CMD_QUERY_PREFIX_ID
- */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_OFST 4
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MINNUM 1
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM 62
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM_MCDI2 254
-
-/* RX_PREFIX_FIELD_INFO structuredef: Information about a single RX prefix
- * field
- */
-#define RX_PREFIX_FIELD_INFO_LEN 4
-/* The offset of the field from the start of the prefix, in bits */
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_OFST 0
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LEN 2
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LBN 0
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_WIDTH 16
-/* The width of the field, in bits */
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_OFST 2
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LEN 1
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LBN 16
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_WIDTH 8
-/* The type of the field. These enum values are in the same order as the fields
- * in the MC_CMD_GET_RX_PREFIX_ID_IN bitmask
- */
-#define RX_PREFIX_FIELD_INFO_TYPE_OFST 3
-#define RX_PREFIX_FIELD_INFO_TYPE_LEN 1
-#define RX_PREFIX_FIELD_INFO_LENGTH 0x0 /* enum */
-#define RX_PREFIX_FIELD_INFO_RSS_HASH_VALID 0x1 /* enum */
-#define RX_PREFIX_FIELD_INFO_USER_FLAG 0x2 /* enum */
-#define RX_PREFIX_FIELD_INFO_CLASS 0x3 /* enum */
-#define RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */
-#define RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */
-#define RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */
-#define RX_PREFIX_FIELD_INFO_INGRESS_MPORT 0x7 /* enum */
-#define RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */
-#define RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */
-#define RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */
-#define RX_PREFIX_FIELD_INFO_VLAN_STRIPPED 0xa /* enum */
-#define RX_PREFIX_FIELD_INFO_VSWITCH_STATUS 0xb /* enum */
-#define RX_PREFIX_FIELD_INFO_TYPE_LBN 24
-#define RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8
-
-/* RX_PREFIX_FIXED_RESPONSE structuredef: Information about an RX prefix in
- * which every field has a fixed offset and width
- */
-#define RX_PREFIX_FIXED_RESPONSE_LENMIN 4
-#define RX_PREFIX_FIXED_RESPONSE_LENMAX 252
-#define RX_PREFIX_FIXED_RESPONSE_LENMAX_MCDI2 1020
-#define RX_PREFIX_FIXED_RESPONSE_LEN(num) (4+4*(num))
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_NUM(len) (((len)-4)/4)
-/* Length of the RX prefix in bytes */
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_OFST 0
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LEN 1
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LBN 0
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_WIDTH 8
-/* Number of fields present in the prefix */
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_OFST 1
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LEN 1
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LBN 8
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_WIDTH 8
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_OFST 2
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LEN 2
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LBN 16
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_WIDTH 16
-/* Array of RX_PREFIX_FIELD_INFO structures, of length FIELD_COUNT */
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_OFST 4
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LEN 4
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MINNUM 0
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM 62
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM_MCDI2 254
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LBN 32
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_QUERY_RX_PREFIX_ID
- * This command takes an RX prefix id (obtained from MC_CMD_GET_RX_PREFIX_ID)
- * and returns a description of the RX prefix of packets delievered to an RXQ
- * created with that prefix id
- */
-#define MC_CMD_QUERY_RX_PREFIX_ID 0x13c
-#undef MC_CMD_0x13c_PRIVILEGE_CTG
-
-#define MC_CMD_0x13c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_QUERY_RX_PREFIX_ID_IN msgrequest */
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_LEN 4
-/* Prefix id to query */
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_OFST 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_LEN 4
-
-/* MC_CMD_QUERY_RX_PREFIX_ID_OUT msgresponse */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMIN 4
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX 252
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_NUM(len) (((len)-4)/1)
-/* An enum describing the structure of this response. */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_OFST 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_LEN 1
-/* enum: The response is of format RX_PREFIX_FIXED_RESPONSE */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_FIXED 0x0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_OFST 1
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_LEN 3
-/* The response. Its format is as defined by the RESPONSE_TYPE value */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_OFST 4
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_LEN 1
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MINNUM 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM 248
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM_MCDI2 1016
-
-
-/***********************************/
-/* MC_CMD_BUNDLE
- * A command to perform various bundle-related operations on insecure cards.
- */
-#define MC_CMD_BUNDLE 0x13d
-#undef MC_CMD_0x13d_PRIVILEGE_CTG
-
-#define MC_CMD_0x13d_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_BUNDLE_IN msgrequest */
-#define MC_CMD_BUNDLE_IN_LEN 4
-/* Sub-command code */
-#define MC_CMD_BUNDLE_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_IN_OP_LEN 4
-/* enum: Get the current host access mode set on component partitions. */
-#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_GET 0x0
-/* enum: Set the host access mode set on component partitions. */
-#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_SET 0x1
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN msgrequest: Retrieve the current
- * access mode on component partitions such as MC_FIRMWARE, SUC_FIRMWARE and
- * EXPANSION_UEFI. This command only works on engineering (insecure) cards. On
- * secure adapters, this command returns MC_CMD_ERR_EPERM.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_LEN 4
-/* Sub-command code. Must be OP_COMPONENT_ACCESS_GET. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_LEN 4
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT msgresponse: Returns the access
- * control mode.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_LEN 4
-/* Access mode of component partitions. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_LEN 4
-/* enum: Component partitions are read-only from the host. */
-#define MC_CMD_BUNDLE_COMPONENTS_READ_ONLY 0x0
-/* enum: Component partitions can read read-from written-to by the host. */
-#define MC_CMD_BUNDLE_COMPONENTS_READ_WRITE 0x1
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN msgrequest: The component
- * partitions such as MC_FIRMWARE, SUC_FIRMWARE, EXPANSION_UEFI are set as
- * read-only on firmware built with bundle support. This command marks these
- * partitions as read/writeable. The access status set by this command does not
- * persist across MC reboots. This command only works on engineering (insecure)
- * cards. On secure adapters, this command returns MC_CMD_ERR_EPERM.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_LEN 8
-/* Sub-command code. Must be OP_COMPONENT_ACCESS_SET. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_LEN 4
-/* Access mode of component partitions. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_OFST 4
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT/ACCESS_MODE */
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT msgresponse */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_VPD
- * Read all VPD starting from a given address
- */
-#define MC_CMD_GET_VPD 0x165
-#undef MC_CMD_0x165_PRIVILEGE_CTG
-
-#define MC_CMD_0x165_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VPD_IN msgresponse */
-#define MC_CMD_GET_VPD_IN_LEN 4
-/* VPD address to start from. In case VPD is longer than MCDI buffer
- * (unlikely), user can make multiple calls with different starting addresses.
- */
-#define MC_CMD_GET_VPD_IN_ADDR_OFST 0
-#define MC_CMD_GET_VPD_IN_ADDR_LEN 4
-
-/* MC_CMD_GET_VPD_OUT msgresponse */
-#define MC_CMD_GET_VPD_OUT_LENMIN 0
-#define MC_CMD_GET_VPD_OUT_LENMAX 252
-#define MC_CMD_GET_VPD_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_VPD_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_GET_VPD_OUT_DATA_NUM(len) (((len)-0)/1)
-/* VPD data returned. */
-#define MC_CMD_GET_VPD_OUT_DATA_OFST 0
-#define MC_CMD_GET_VPD_OUT_DATA_LEN 1
-#define MC_CMD_GET_VPD_OUT_DATA_MINNUM 0
-#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM 252
-#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM_MCDI2 1020
-
-
-/***********************************/
-/* MC_CMD_GET_NCSI_INFO
- * Provide information about the NC-SI stack
- */
-#define MC_CMD_GET_NCSI_INFO 0x167
-#undef MC_CMD_0x167_PRIVILEGE_CTG
-
-#define MC_CMD_0x167_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_NCSI_INFO_IN msgrequest */
-#define MC_CMD_GET_NCSI_INFO_IN_LEN 8
-/* Operation to be performed */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_OFST 0
-#define MC_CMD_GET_NCSI_INFO_IN_OP_LEN 4
-/* enum: Information on the link settings. */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_LINK 0x0
-/* enum: Statistics associated with the channel */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_STATISTICS 0x1
-/* The NC-SI channel on which the operation is to be performed */
-#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_OFST 4
-#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_LEN 4
-
-/* MC_CMD_GET_NCSI_INFO_LINK_OUT msgresponse */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_LEN 12
-/* Settings as received from BMC. */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_OFST 0
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_LEN 4
-/* Advertised capabilities applied to channel. */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_OFST 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_LEN 4
-/* General status */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_LEN 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_LBN 0
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_WIDTH 2
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_LBN 2
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_WIDTH 1
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_LBN 3
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_WIDTH 1
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_LBN 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_WIDTH 1
-
-/* MC_CMD_GET_NCSI_INFO_STATISTICS_OUT msgresponse */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_LEN 28
-/* The number of NC-SI commands received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_OFST 0
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_LEN 4
-/* The number of NC-SI commands dropped. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_OFST 4
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_LEN 4
-/* The number of invalid NC-SI commands received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_OFST 8
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_LEN 4
-/* The number of checksum errors seen. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_OFST 12
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_LEN 4
-/* The number of NC-SI requests received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_OFST 16
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_LEN 4
-/* The number of NC-SI responses sent (includes AENs) */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_OFST 20
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_LEN 4
-/* The number of NC-SI AENs sent */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4
-
/* CLIENT_HANDLE structuredef: A client is an abstract entity that can make
* requests of the device and that can own resources managed by the device.
* Examples of clients include PCIe functions and dynamic clients. A client
@@ -23848,59 +21670,10 @@
#define CLIENT_HANDLE_OPAQUE_LBN 0
#define CLIENT_HANDLE_OPAQUE_WIDTH 32
-/* CLOCK_INFO structuredef: Information about a single hardware clock */
-#define CLOCK_INFO_LEN 28
-/* Enumeration that uniquely identifies the clock */
-#define CLOCK_INFO_CLOCK_ID_OFST 0
-#define CLOCK_INFO_CLOCK_ID_LEN 2
-/* enum: The Riverhead CMC (card MC) */
-#define CLOCK_INFO_CLOCK_CMC 0x0
-/* enum: The Riverhead NMC (network MC) */
-#define CLOCK_INFO_CLOCK_NMC 0x1
-/* enum: The Riverhead SDNET slice main logic */
-#define CLOCK_INFO_CLOCK_SDNET 0x2
-/* enum: The Riverhead SDNET LUT */
-#define CLOCK_INFO_CLOCK_SDNET_LUT 0x3
-/* enum: The Riverhead SDNET control logic */
-#define CLOCK_INFO_CLOCK_SDNET_CTRL 0x4
-/* enum: The Riverhead Streaming SubSystem */
-#define CLOCK_INFO_CLOCK_SSS 0x5
-/* enum: The Riverhead network MAC and associated CSR registers */
-#define CLOCK_INFO_CLOCK_MAC 0x6
-#define CLOCK_INFO_CLOCK_ID_LBN 0
-#define CLOCK_INFO_CLOCK_ID_WIDTH 16
-/* Assorted flags */
-#define CLOCK_INFO_FLAGS_OFST 2
-#define CLOCK_INFO_FLAGS_LEN 2
-#define CLOCK_INFO_SETTABLE_OFST 2
-#define CLOCK_INFO_SETTABLE_LBN 0
-#define CLOCK_INFO_SETTABLE_WIDTH 1
-#define CLOCK_INFO_FLAGS_LBN 16
-#define CLOCK_INFO_FLAGS_WIDTH 16
-/* The frequency in HZ */
-#define CLOCK_INFO_FREQUENCY_OFST 4
-#define CLOCK_INFO_FREQUENCY_LEN 8
-#define CLOCK_INFO_FREQUENCY_LO_OFST 4
-#define CLOCK_INFO_FREQUENCY_LO_LEN 4
-#define CLOCK_INFO_FREQUENCY_LO_LBN 32
-#define CLOCK_INFO_FREQUENCY_LO_WIDTH 32
-#define CLOCK_INFO_FREQUENCY_HI_OFST 8
-#define CLOCK_INFO_FREQUENCY_HI_LEN 4
-#define CLOCK_INFO_FREQUENCY_HI_LBN 64
-#define CLOCK_INFO_FREQUENCY_HI_WIDTH 32
-#define CLOCK_INFO_FREQUENCY_LBN 32
-#define CLOCK_INFO_FREQUENCY_WIDTH 64
-/* Human-readable ASCII name for clock, with NUL termination */
-#define CLOCK_INFO_NAME_OFST 12
-#define CLOCK_INFO_NAME_LEN 1
-#define CLOCK_INFO_NAME_NUM 16
-#define CLOCK_INFO_NAME_LBN 96
-#define CLOCK_INFO_NAME_WIDTH 8
-
/* SCHED_CREDIT_CHECK_RESULT structuredef */
#define SCHED_CREDIT_CHECK_RESULT_LEN 16
-/* The instance of the scheduler. Refer to XN-200389-AW for the location of
- * these schedulers in the hardware.
+/* The instance of the scheduler. Refer to XN-200389-AW (snic/hnic) and
+ * XN-200425-TC (cdx) for the location of these schedulers in the hardware.
*/
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_OFST 0
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LEN 1
@@ -23914,6 +21687,16 @@
#define SCHED_CREDIT_CHECK_RESULT_DMAC_H2C 0x7 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_B 0x8 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_REPLAY 0x9 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_ADAPTER_C2H_C 0xa /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A2_H2C_C 0xb /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A3_SOFT_ADAPTOR_C 0xc /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A4_DPU_WRITE_C 0xd /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_JRC_RRU 0xe /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_CDM_SINK 0xf /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_PCIE_SINK 0x10 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_UPORT_SINK 0x11 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_PSX_SINK 0x12 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A5_DPU_READ_C 0x13 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LBN 0
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_WIDTH 8
/* The type of node that this result refers to. */
@@ -23923,6 +21706,10 @@
#define SCHED_CREDIT_CHECK_RESULT_DEST 0x0
/* enum: Source node */
#define SCHED_CREDIT_CHECK_RESULT_SOURCE 0x1
+/* enum: Destination node credit type 1 (new to the Keystone schedulers, see
+ * SF-120268-TC)
+ */
+#define SCHED_CREDIT_CHECK_RESULT_DEST_CREDIT1 0x2
#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LBN 8
#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_WIDTH 8
/* Level of node in scheduler hierarchy (level 0 is the bottom of the
@@ -23950,592 +21737,6 @@
/***********************************/
-/* MC_CMD_GET_CLOCKS_INFO
- * Get information about the device clocks
- */
-#define MC_CMD_GET_CLOCKS_INFO 0x166
-#undef MC_CMD_0x166_PRIVILEGE_CTG
-
-#define MC_CMD_0x166_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_CLOCKS_INFO_IN msgrequest */
-#define MC_CMD_GET_CLOCKS_INFO_IN_LEN 0
-
-/* MC_CMD_GET_CLOCKS_INFO_OUT msgresponse */
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMIN 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX 252
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX_MCDI2 1008
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LEN(num) (0+28*(num))
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_NUM(len) (((len)-0)/28)
-/* An array of CLOCK_INFO structures. */
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_OFST 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_LEN 28
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MINNUM 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM 9
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM_MCDI2 36
-
-
-/***********************************/
-/* MC_CMD_VNIC_ENCAP_RULE_ADD
- * Add a rule for detecting encapsulations in the VNIC stage. Currently this
- * only affects checksum validation in VNIC RX - on TX the send descriptor
- * explicitly specifies encapsulation. These rules are per-VNIC, i.e. only
- * apply to the current driver. If a rule matches, then the packet is
- * considered to have the corresponding encapsulation type, and the inner
- * packet is parsed. It is up to the driver to ensure that overlapping rules
- * are not inserted. (If a packet would match multiple rules, a random one of
- * them will be used.) A rule with the exact same match criteria may not be
- * inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are
- * supported, use MC_CMD_GET_PARSER_DISP_INFO with OP
- * OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported
- * combinations. Each driver may only have a limited set of active rules -
- * returns ENOSPC if the caller's table is full.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d
-#undef MC_CMD_0x16d_PRIVILEGE_CTG
-
-#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36
-/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4
-/* Any non-zero bits other than the ones named below or an unsupported
- * combination will cause the NIC to return EOPNOTSUPP. In the future more
- * flags may be added.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1
-/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order.
- * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2
-/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order.
- * (Deprecated)
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12
-/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12
-/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the
- * case of IPv4, the IP should be in the first 4 bytes and all other bytes
- * should be zero.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16
-/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1
-/* Actions that should be applied to packets match the rule. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_LBN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_LBN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_WIDTH 1
-/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2
-/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4
-
-/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4
-/* Handle to inserted rule. Used for removing the rule. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE
- * Remove a VNIC encapsulation rule. Packets which would have previously
- * matched the rule will then be considered as unencapsulated. Returns EALREADY
- * if the input HANDLE doesn't correspond to an existing rule.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e
-#undef MC_CMD_0x16e_PRIVILEGE_CTG
-
-#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4
-/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4
-
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0
-
-/* UUID structuredef: An RFC4122 standard UUID. The values here are stored in
- * the endianness specified by the RFC; users should ignore the broken-out
- * fields and instead do straight memory copies to ensure correct ordering.
- */
-#define UUID_LEN 16
-#define UUID_TIME_LOW_OFST 0
-#define UUID_TIME_LOW_LEN 4
-#define UUID_TIME_LOW_LBN 0
-#define UUID_TIME_LOW_WIDTH 32
-#define UUID_TIME_MID_OFST 4
-#define UUID_TIME_MID_LEN 2
-#define UUID_TIME_MID_LBN 32
-#define UUID_TIME_MID_WIDTH 16
-#define UUID_TIME_HI_LBN 52
-#define UUID_TIME_HI_WIDTH 12
-#define UUID_VERSION_LBN 48
-#define UUID_VERSION_WIDTH 4
-#define UUID_RESERVED_LBN 64
-#define UUID_RESERVED_WIDTH 2
-#define UUID_CLK_SEQ_LBN 66
-#define UUID_CLK_SEQ_WIDTH 14
-#define UUID_NODE_OFST 10
-#define UUID_NODE_LEN 6
-#define UUID_NODE_LBN 80
-#define UUID_NODE_WIDTH 48
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_ALLOC
- * Create a handle to a datapath plugin's extension. This involves finding a
- * currently-loaded plugin offering the given functionality (as identified by
- * the UUID) and allocating a handle to track the usage of it. Plugin
- * functionality is identified by 'extension' rather than any other identifier
- * so that a single plugin bitfile may offer more than one piece of independent
- * functionality. If two bitfiles are loaded which both offer the same
- * extension, then the metadata is interrogated further to determine which is
- * the newest and that is the one opened. See SF-123625-SW for architectural
- * detail on datapath plugins.
- */
-#define MC_CMD_PLUGIN_ALLOC 0x1ad
-#undef MC_CMD_0x1ad_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ad_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_ALLOC_IN msgrequest */
-#define MC_CMD_PLUGIN_ALLOC_IN_LEN 24
-/* The functionality requested of the plugin, as a UUID structure */
-#define MC_CMD_PLUGIN_ALLOC_IN_UUID_OFST 0
-#define MC_CMD_PLUGIN_ALLOC_IN_UUID_LEN 16
-/* Additional options for opening the handle */
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_LBN 0
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_WIDTH 1
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_LBN 1
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_WIDTH 1
-/* Load the extension only if it is in the specified administrative group.
- * Specify ANY to load the extension wherever it is found (if there are
- * multiple choices then the extension with the highest MINOR_VER/PATCH_VER
- * will be loaded). See MC_CMD_PLUGIN_GET_META_GLOBAL for a description of
- * administrative groups.
- */
-#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_OFST 20
-#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_LEN 2
-/* enum: Load the extension from any ADMIN_GROUP. */
-#define MC_CMD_PLUGIN_ALLOC_IN_ANY 0xffff
-/* Reserved */
-#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_OFST 22
-#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_LEN 2
-
-/* MC_CMD_PLUGIN_ALLOC_OUT msgresponse */
-#define MC_CMD_PLUGIN_ALLOC_OUT_LEN 4
-/* Unique identifier of this usage */
-#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_FREE
- * Delete a handle to a plugin's extension.
- */
-#define MC_CMD_PLUGIN_FREE 0x1ae
-#undef MC_CMD_0x1ae_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ae_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_FREE_IN msgrequest */
-#define MC_CMD_PLUGIN_FREE_IN_LEN 4
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_FREE_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_FREE_IN_HANDLE_LEN 4
-
-/* MC_CMD_PLUGIN_FREE_OUT msgresponse */
-#define MC_CMD_PLUGIN_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_GLOBAL
- * Returns the global metadata applying to the whole plugin extension. See the
- * other metadata calls for subtypes of data.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL 0x1af
-#undef MC_CMD_0x1af_PRIVILEGE_CTG
-
-#define MC_CMD_0x1af_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_GLOBAL_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_LEN 4
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_GLOBAL_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_LEN 36
-/* Unique identifier of this plugin extension. This is identical to the value
- * which was requested when the handle was allocated.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_OFST 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_LEN 16
-/* semver sub-version of this plugin extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_OFST 16
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_LEN 2
-/* semver micro-version of this plugin extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_OFST 18
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_LEN 2
-/* Number of different messages which can be sent to this extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_OFST 20
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_LEN 4
-/* Byte offset within the VI window of the plugin's mapped CSR window. */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_OFST 24
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_LEN 2
-/* Number of bytes mapped through to the plugin's CSRs. 0 if that feature was
- * not requested by the plugin (in which case MAPPED_CSR_OFFSET and
- * MAPPED_CSR_FLAGS are ignored).
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_OFST 26
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_LEN 2
-/* Flags indicating how to perform the CSR window mapping. */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_LBN 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_WIDTH 1
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_LBN 1
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_WIDTH 1
-/* Identifier of the set of extensions which all change state together.
- * Extensions having the same ADMIN_GROUP will always load and unload at the
- * same time. ADMIN_GROUP values themselves are arbitrary (but they contain a
- * generation number as an implementation detail to ensure that they're not
- * reused rapidly).
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_OFST 32
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_LEN 1
-/* Bitshift in MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY's MASK parameters
- * corresponding to this extension, i.e. set the bit 1<<PRIVILEGE_BIT to permit
- * access to this extension.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_OFST 33
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_LEN 1
-/* Reserved */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_OFST 34
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_LEN 2
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER
- * Returns metadata supplied by the plugin author which describes this
- * extension in a human-readable way. Contrast with
- * MC_CMD_PLUGIN_GET_META_GLOBAL, which returns information needed for software
- * to operate.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER 0x1b0
-#undef MC_CMD_0x1b0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_LEN 12
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_LEN 4
-/* Category of data to return */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_OFST 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_LEN 4
-/* enum: Top-level information about the extension. The returned data is an
- * array of key/value pairs using the keys in RFC5013 (Dublin Core) to describe
- * the extension. The data is a back-to-back list of zero-terminated strings;
- * the even-numbered fields (0,2,4,...) are keys and their following odd-
- * numbered fields are the corresponding values. Both keys and values are
- * nominally UTF-8. Per RFC5013, the same key may be repeated any number of
- * times. Note that all information (including the key/value structure itself
- * and the UTF-8 encoding) may have been provided by the plugin author, so
- * callers must be cautious about parsing it. Callers should parse only the
- * top-level structure to separate out the keys and values; the contents of the
- * values is not expected to be machine-readable.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_EXTENSION_KVS 0x0
-/* Byte position of the data to be returned within the full data block of the
- * given SUBTYPE.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_OFST 8
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMIN 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX 252
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Full length of the data block of the requested SUBTYPE, in bytes. */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_LEN 4
-/* The information requested by SUBTYPE. */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_OFST 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_LEN 1
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM 248
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM_MCDI2 1016
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_MSG
- * Returns the simple metadata for a specific plugin request message. This
- * supplies information necessary for the host to know how to build an
- * MC_CMD_PLUGIN_REQ request.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG 0x1b1
-#undef MC_CMD_0x1b1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_MSG_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_LEN 8
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_LEN 4
-/* Unique message ID to obtain */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_OFST 4
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_MSG_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_LEN 44
-/* Unique message ID. This is the same value as the input parameter; it exists
- * to allow future MCDI extensions which enumerate all messages.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_OFST 0
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_LEN 4
-/* Packed index number of this message, assigned by the MC to give each message
- * a unique ID in an array to allow for more efficient storage/management.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_OFST 4
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_LEN 4
-/* Short human-readable codename for this message. This is conventionally
- * formatted as a C identifier in the basic ASCII character set with any spare
- * bytes at the end set to 0, however this convention is not enforced by the MC
- * so consumers must check for all potential malformations before using it for
- * a trusted purpose.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_OFST 8
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_LEN 32
-/* Number of bytes of data which must be passed from the host kernel to the MC
- * for this message's payload, and which are passed back again in the response.
- * The MC's plugin metadata loader will have validated that the number of bytes
- * specified here will fit in to MC_CMD_PLUGIN_REQ_IN_DATA in a single MCDI
- * message.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_OFST 40
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_LEN 4
-
-/* PLUGIN_EXTENSION structuredef: Used within MC_CMD_PLUGIN_GET_ALL to describe
- * an individual extension.
- */
-#define PLUGIN_EXTENSION_LEN 20
-#define PLUGIN_EXTENSION_UUID_OFST 0
-#define PLUGIN_EXTENSION_UUID_LEN 16
-#define PLUGIN_EXTENSION_UUID_LBN 0
-#define PLUGIN_EXTENSION_UUID_WIDTH 128
-#define PLUGIN_EXTENSION_ADMIN_GROUP_OFST 16
-#define PLUGIN_EXTENSION_ADMIN_GROUP_LEN 1
-#define PLUGIN_EXTENSION_ADMIN_GROUP_LBN 128
-#define PLUGIN_EXTENSION_ADMIN_GROUP_WIDTH 8
-#define PLUGIN_EXTENSION_FLAG_ENABLED_LBN 136
-#define PLUGIN_EXTENSION_FLAG_ENABLED_WIDTH 1
-#define PLUGIN_EXTENSION_RESERVED_LBN 137
-#define PLUGIN_EXTENSION_RESERVED_WIDTH 23
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_ALL
- * Returns a list of all plugin extensions currently loaded and available. The
- * UUIDs returned can be passed to MC_CMD_PLUGIN_ALLOC in order to obtain more
- * detailed metadata via the MC_CMD_PLUGIN_GET_META_* family of requests. The
- * ADMIN_GROUP field collects how extensions are grouped in to units which are
- * loaded/unloaded together; extensions with the same value are in the same
- * group.
- */
-#define MC_CMD_PLUGIN_GET_ALL 0x1b2
-#undef MC_CMD_0x1b2_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_ALL_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_ALL_IN_LEN 4
-/* Additional options for querying. Note that if neither FLAG_INCLUDE_ENABLED
- * nor FLAG_INCLUDE_DISABLED are specified then the result set will be empty.
- */
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_LBN 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_WIDTH 1
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_LBN 1
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_WIDTH 1
-
-/* MC_CMD_PLUGIN_GET_ALL_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMIN 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX 240
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LEN(num) (0+20*(num))
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_NUM(len) (((len)-0)/20)
-/* The list of available plugin extensions, as an array of PLUGIN_EXTENSION
- * structs.
- */
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_LEN 20
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MINNUM 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM 12
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM_MCDI2 51
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_REQ
- * Send a command to a plugin. A plugin may define an arbitrary number of
- * 'messages' which it allows applications on the host system to send, each
- * identified by a 32-bit ID.
- */
-#define MC_CMD_PLUGIN_REQ 0x1b3
-#undef MC_CMD_0x1b3_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_REQ_IN msgrequest */
-#define MC_CMD_PLUGIN_REQ_IN_LENMIN 8
-#define MC_CMD_PLUGIN_REQ_IN_LENMAX 252
-#define MC_CMD_PLUGIN_REQ_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_REQ_IN_LEN(num) (8+1*(num))
-#define MC_CMD_PLUGIN_REQ_IN_DATA_NUM(len) (((len)-8)/1)
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_REQ_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_REQ_IN_HANDLE_LEN 4
-/* Message ID defined by the plugin author */
-#define MC_CMD_PLUGIN_REQ_IN_ID_OFST 4
-#define MC_CMD_PLUGIN_REQ_IN_ID_LEN 4
-/* Data blob being the parameter to the message. This must be of the length
- * specified by MC_CMD_PLUGIN_GET_META_MSG_IN_MCDI_PARAM_SIZE.
- */
-#define MC_CMD_PLUGIN_REQ_IN_DATA_OFST 8
-#define MC_CMD_PLUGIN_REQ_IN_DATA_LEN 1
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM 244
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM_MCDI2 1012
-
-/* MC_CMD_PLUGIN_REQ_OUT msgresponse */
-#define MC_CMD_PLUGIN_REQ_OUT_LENMIN 0
-#define MC_CMD_PLUGIN_REQ_OUT_LENMAX 252
-#define MC_CMD_PLUGIN_REQ_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_REQ_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_NUM(len) (((len)-0)/1)
-/* The input data, as transformed and/or updated by the plugin's eBPF. Will be
- * the same size as the input DATA parameter.
- */
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_OFST 0
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_LEN 1
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM 252
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM_MCDI2 1020
-
-/* DESC_ADDR_REGION structuredef: Describes a contiguous region of DESC_ADDR
- * space that maps to a contiguous region of TRGT_ADDR space. Addresses
- * DESC_ADDR in the range [DESC_ADDR_BASE:DESC_ADDR_BASE + 1 <<
- * WINDOW_SIZE_LOG2) map to TRGT_ADDR = DESC_ADDR - DESC_ADDR_BASE +
- * TRGT_ADDR_BASE.
- */
-#define DESC_ADDR_REGION_LEN 32
-/* The start of the region in DESC_ADDR space. */
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_OFST 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LEN 8
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_OFST 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LEN 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LBN 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_WIDTH 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_OFST 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LEN 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LBN 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_WIDTH 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LBN 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_WIDTH 64
-/* The start of the region in TRGT_ADDR space. Drivers can set this via
- * MC_CMD_SET_DESC_ADDR_REGIONS.
- */
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_OFST 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LEN 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_OFST 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LBN 64
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_WIDTH 32
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_OFST 12
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LBN 96
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_WIDTH 32
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LBN 64
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_WIDTH 64
-/* The size of the region. */
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_OFST 16
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LEN 4
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LBN 128
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_WIDTH 32
-/* The alignment restriction on TRGT_ADDR. TRGT_ADDR values set by the driver
- * must be a multiple of 1 << TRGT_ADDR_ALIGN_LOG2.
- */
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_OFST 20
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LBN 160
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_OFST 24
-#define DESC_ADDR_REGION_RSVD_LEN 8
-#define DESC_ADDR_REGION_RSVD_LO_OFST 24
-#define DESC_ADDR_REGION_RSVD_LO_LEN 4
-#define DESC_ADDR_REGION_RSVD_LO_LBN 192
-#define DESC_ADDR_REGION_RSVD_LO_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_HI_OFST 28
-#define DESC_ADDR_REGION_RSVD_HI_LEN 4
-#define DESC_ADDR_REGION_RSVD_HI_LBN 224
-#define DESC_ADDR_REGION_RSVD_HI_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_LBN 192
-#define DESC_ADDR_REGION_RSVD_WIDTH 64
-
-
-/***********************************/
/* MC_CMD_GET_DESC_ADDR_INFO
* Returns a description of the mapping from DESC_ADDR to TRGT_ADDR for the calling function's address space.
*/
@@ -24836,122 +22037,6 @@
/***********************************/
-/* MC_CMD_GET_BOARD_ATTR
- * Retrieve physical build-level board attributes as configured at
- * manufacturing stage. Fields originate from EEPROM and per-platform constants
- * in firmware. Fields are used in development to identify/ differentiate
- * boards based on build levels/parameters, and also in manufacturing to cross
- * check "what was programmed in manufacturing" is same as "what firmware
- * thinks has been programmed" as there are two layers to translation within
- * firmware before the attributes reach this MCDI handler. Some parameters are
- * retrieved as part of other commands and therefore not replicated here. See
- * GET_VERSION_OUT.
- */
-#define MC_CMD_GET_BOARD_ATTR 0x1c6
-#undef MC_CMD_0x1c6_PRIVILEGE_CTG
-
-#define MC_CMD_0x1c6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_BOARD_ATTR_IN msgrequest */
-#define MC_CMD_GET_BOARD_ATTR_IN_LEN 0
-
-/* MC_CMD_GET_BOARD_ATTR_OUT msgresponse */
-#define MC_CMD_GET_BOARD_ATTR_OUT_LEN 16
-/* Defines board capabilities and validity of attributes returned in this
- * response-message.
- */
-#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_LBN 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_LBN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_LBN 2
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_LEN 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_LBN 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_LBN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_LBN 16
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_WIDTH 8
-/* enum: The FPGA voltage on the adapter can be set to low */
-#define MC_CMD_FPGA_VOLTAGE_LOW 0x0
-/* enum: The FPGA voltage on the adapter can be set to regular */
-#define MC_CMD_FPGA_VOLTAGE_REG 0x1
-/* enum: The FPGA voltage on the adapter can be set to high */
-#define MC_CMD_FPGA_VOLTAGE_HIGH 0x2
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_LBN 24
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_WIDTH 8
-/* An array of cage types on the board */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_OFST 8
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_LEN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_NUM 8
-/* enum: The cages are not known */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_UNKNOWN 0x0
-/* enum: The cages are SFP/SFP+ */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_SFP 0x1
-/* enum: The cages are QSFP/QSFP+ */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_QSFP 0x2
-
-
-/***********************************/
-/* MC_CMD_GET_SOC_STATE
- * Retrieve current state of the System-on-Chip. This command is valid when
- * MC_CMD_GET_BOARD_ATTR:HAS_SOC is set.
- */
-#define MC_CMD_GET_SOC_STATE 0x1c7
-#undef MC_CMD_0x1c7_PRIVILEGE_CTG
-
-#define MC_CMD_0x1c7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_SOC_STATE_IN msgrequest */
-#define MC_CMD_GET_SOC_STATE_IN_LEN 0
-
-/* MC_CMD_GET_SOC_STATE_OUT msgresponse */
-#define MC_CMD_GET_SOC_STATE_OUT_LEN 12
-/* Status flags for the SoC */
-#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_LBN 0
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_WIDTH 1
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_LBN 1
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_WIDTH 1
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_LBN 2
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_WIDTH 1
-/* Status fields for the SoC */
-#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_OFST 4
-#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_LEN 4
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_OFST 4
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_LBN 0
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_WIDTH 8
-/* enum: Power on (set by SUC on power up) */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOT 0x0
-/* enum: Running bootloader */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOTLOADER 0x1
-/* enum: Bootloader has started OS. OS is booting */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_START 0x2
-/* enum: OS is running */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_RUNNING 0x3
-/* enum: Maintenance OS is running */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_MAINTENANCE 0x4
-/* Number of SoC resets since power on */
-#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_OFST 8
-#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_LEN 4
-
-
-/***********************************/
/* MC_CMD_CHECK_SCHEDULER_CREDITS
* For debugging purposes. For each source and destination node in the hardware
* schedulers, check whether the number of credits is as it should be. This
@@ -25010,76 +22095,6 @@
/***********************************/
-/* MC_CMD_TXQ_STATS
- * Query per-TXQ statistics.
- */
-#define MC_CMD_TXQ_STATS 0x1d5
-#undef MC_CMD_0x1d5_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TXQ_STATS_IN msgrequest */
-#define MC_CMD_TXQ_STATS_IN_LEN 8
-/* Instance of TXQ to retrieve statistics for */
-#define MC_CMD_TXQ_STATS_IN_INSTANCE_OFST 0
-#define MC_CMD_TXQ_STATS_IN_INSTANCE_LEN 4
-/* Flags for the request */
-#define MC_CMD_TXQ_STATS_IN_FLAGS_OFST 4
-#define MC_CMD_TXQ_STATS_IN_FLAGS_LEN 4
-#define MC_CMD_TXQ_STATS_IN_CLEAR_OFST 4
-#define MC_CMD_TXQ_STATS_IN_CLEAR_LBN 0
-#define MC_CMD_TXQ_STATS_IN_CLEAR_WIDTH 1
-
-/* MC_CMD_TXQ_STATS_OUT msgresponse */
-#define MC_CMD_TXQ_STATS_OUT_LENMIN 0
-#define MC_CMD_TXQ_STATS_OUT_LENMAX 248
-#define MC_CMD_TXQ_STATS_OUT_LENMAX_MCDI2 1016
-#define MC_CMD_TXQ_STATS_OUT_LEN(num) (0+8*(num))
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_NUM(len) (((len)-0)/8)
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_OFST 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LEN 8
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_OFST 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LEN 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LBN 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_WIDTH 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_OFST 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LEN 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LBN 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_WIDTH 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MINNUM 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM 31
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM_MCDI2 127
-#define MC_CMD_TXQ_STATS_CTPIO_MAX_FILL 0x0 /* enum */
-
-/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are
- * defined in SF-120734-TC with more information in SF-122717-TC.
- */
-#define FUNCTION_PERSONALITY_LEN 4
-#define FUNCTION_PERSONALITY_ID_OFST 0
-#define FUNCTION_PERSONALITY_ID_LEN 4
-/* enum: Function has no assigned personality */
-#define FUNCTION_PERSONALITY_NULL 0x0
-/* enum: Function has an EF100-style function control window and VI windows
- * with both EF100 and vDPA doorbells.
- */
-#define FUNCTION_PERSONALITY_EF100 0x1
-/* enum: Function has virtio net device configuration registers and doorbells
- * for virtio queue pairs.
- */
-#define FUNCTION_PERSONALITY_VIRTIO_NET 0x2
-/* enum: Function has virtio block device configuration registers and a
- * doorbell for a single virtqueue.
- */
-#define FUNCTION_PERSONALITY_VIRTIO_BLK 0x3
-/* enum: Function is a Xilinx acceleration device - management function */
-#define FUNCTION_PERSONALITY_ACCEL_MGMT 0x4
-/* enum: Function is a Xilinx acceleration device - user function */
-#define FUNCTION_PERSONALITY_ACCEL_USR 0x5
-#define FUNCTION_PERSONALITY_ID_LBN 0
-#define FUNCTION_PERSONALITY_ID_WIDTH 32
-
-
-/***********************************/
/* MC_CMD_VIRTIO_GET_FEATURES
* Get a list of the virtio features supported by the device.
*/
@@ -25162,37 +22177,6 @@
/***********************************/
-/* MC_CMD_VIRTIO_GET_CAPABILITIES
- * Get virtio capabilities supported by the device. Returns general virtio
- * capabilities and limitations of the hardware / firmware implementation
- * (hardware device as a whole), rather than that of individual configured
- * virtio devices. At present, only the absolute maximum number of queues
- * allowed on multi-queue devices is returned. Response is expected to be
- * extended as necessary in the future.
- */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES 0x1d3
-#undef MC_CMD_0x1d3_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VIRTIO_GET_CAPABILITIES_IN msgrequest */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_LEN 4
-/* Type of device to get capabilities for. Matches the device id as defined by
- * the virtio spec.
- */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_OFST 0
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */
-
-/* MC_CMD_VIRTIO_GET_CAPABILITIES_OUT msgresponse */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_LEN 4
-/* Maximum number of queues supported for a single device instance */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_OFST 0
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_LEN 4
-
-
-/***********************************/
/* MC_CMD_VIRTIO_INIT_QUEUE
* Create a virtio virtqueue. Fails with EALREADY if the queue already exists.
* Fails with ENOSUP if a feature is requested that isn't supported. Fails with
@@ -25474,866 +22458,6 @@
#define PCIE_FUNCTION_INTF_LBN 32
#define PCIE_FUNCTION_INTF_WIDTH 32
-/* QUEUE_ID structuredef: Structure representing an absolute queue identifier
- * (absolute VI number + VI relative queue number). On Keystone, a VI can
- * contain multiple queues (at present, up to 2), each with separate controls
- * for direction. This structure is required to uniquely identify the absolute
- * source queue for descriptor proxy functions.
- */
-#define QUEUE_ID_LEN 4
-/* Absolute VI number */
-#define QUEUE_ID_ABS_VI_OFST 0
-#define QUEUE_ID_ABS_VI_LEN 2
-#define QUEUE_ID_ABS_VI_LBN 0
-#define QUEUE_ID_ABS_VI_WIDTH 16
-/* Relative queue number within the VI */
-#define QUEUE_ID_REL_QUEUE_LBN 16
-#define QUEUE_ID_REL_QUEUE_WIDTH 1
-#define QUEUE_ID_RESERVED_LBN 17
-#define QUEUE_ID_RESERVED_WIDTH 15
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CREATE
- * Descriptor proxy functions are abstract devices that forward all request
- * submitted to the host PCIe function (descriptors submitted to Virtio or
- * EF100 queues) to be handled on another function (most commonly on the
- * embedded Application Processor), via EF100 descriptor proxy, memory-to-
- * memory and descriptor-to-completion mechanisms. Primary user is Virtio-blk
- * subsystem, see SF-122927-TC. This function allocates a new descriptor proxy
- * function on the host and assigns a user-defined label. The actual function
- * configuration is not persisted until the caller configures it with
- * MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN and commits with
- * MC_CMD_DESC_PROXY_FUNC_COMMIT_IN.
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE 0x172
-#undef MC_CMD_0x172_PRIVILEGE_CTG
-
-#define MC_CMD_0x172_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CREATE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LEN 52
-/* PCIe Function ID to allocate (as struct PCIE_FUNCTION). Set to
- * {PF_ANY,VF_ANY,interface} for "any available function" Set to
- * {PF_ANY,VF_NULL,interface} for "any available PF"
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LBN 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_OFST 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_LEN 4
-/* The personality to set. The meanings of the personalities are defined in
- * SF-120734-TC with more information in SF-122717-TC. At present, we only
- * support proxying for VIRTIO_BLK
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_OFST 12
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_LEN 40
-
-/* MC_CMD_DESC_PROXY_FUNC_CREATE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_LEN 12
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_LEN 4
-/* Allocated function ID (as struct PCIE_FUNCTION) */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LBN 64
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_OFST 6
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY
- * Remove an existing descriptor proxy function. Underlying function
- * personality and configuration reverts back to factory default. Function
- * configuration is committed immediately to specified store and any function
- * ownership is released.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY 0x173
-#undef MC_CMD_0x173_PRIVILEGE_CTG
-
-#define MC_CMD_0x173_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LEN 44
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_LEN 40
-/* Store from which to remove function configuration */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_OFST 40
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT/MC_CMD_DESC_PROXY_FUNC_COMMIT_IN/STORE */
-
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT_LEN 0
-
-/* VIRTIO_BLK_CONFIG structuredef: Virtio block device configuration. See
- * Virtio specification v1.1, Sections 5.2.3 and 6 for definition of feature
- * bits. See Virtio specification v1.1, Section 5.2.4 (struct
- * virtio_blk_config) for definition of remaining configuration fields
- */
-#define VIRTIO_BLK_CONFIG_LEN 68
-/* Virtio block device features to advertise, per Virtio 1.1, 5.2.3 and 6 */
-#define VIRTIO_BLK_CONFIG_FEATURES_OFST 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LEN 8
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_OFST 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_LEN 4
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_LBN 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_WIDTH 32
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_OFST 4
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_LEN 4
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_LBN 32
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_WIDTH 32
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_LBN 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_LBN 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_LBN 2
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_LBN 4
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_LBN 5
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_LBN 6
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_LBN 7
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_LBN 9
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_LBN 10
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_LBN 11
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_LBN 12
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_LBN 13
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_LBN 14
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_LBN 28
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_LBN 29
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_LBN 32
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_LBN 33
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_LBN 34
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_LBN 35
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_LBN 36
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_LBN 37
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_LBN 38
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_WIDTH 1
-#define VIRTIO_BLK_CONFIG_FEATURES_LBN 0
-#define VIRTIO_BLK_CONFIG_FEATURES_WIDTH 64
-/* The capacity of the device (expressed in 512-byte sectors) */
-#define VIRTIO_BLK_CONFIG_CAPACITY_OFST 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LEN 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_OFST 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LEN 4
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LBN 64
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_WIDTH 32
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_OFST 12
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LEN 4
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LBN 96
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_WIDTH 32
-#define VIRTIO_BLK_CONFIG_CAPACITY_LBN 64
-#define VIRTIO_BLK_CONFIG_CAPACITY_WIDTH 64
-/* Maximum size of any single segment. Only valid when VIRTIO_BLK_F_SIZE_MAX is
- * set.
- */
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_OFST 16
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_LEN 4
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_LBN 128
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_WIDTH 32
-/* Maximum number of segments in a request. Only valid when
- * VIRTIO_BLK_F_SEG_MAX is set.
- */
-#define VIRTIO_BLK_CONFIG_SEG_MAX_OFST 20
-#define VIRTIO_BLK_CONFIG_SEG_MAX_LEN 4
-#define VIRTIO_BLK_CONFIG_SEG_MAX_LBN 160
-#define VIRTIO_BLK_CONFIG_SEG_MAX_WIDTH 32
-/* Disk-style geometry - cylinders. Only valid when VIRTIO_BLK_F_GEOMETRY is
- * set.
- */
-#define VIRTIO_BLK_CONFIG_CYLINDERS_OFST 24
-#define VIRTIO_BLK_CONFIG_CYLINDERS_LEN 2
-#define VIRTIO_BLK_CONFIG_CYLINDERS_LBN 192
-#define VIRTIO_BLK_CONFIG_CYLINDERS_WIDTH 16
-/* Disk-style geometry - heads. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
- */
-#define VIRTIO_BLK_CONFIG_HEADS_OFST 26
-#define VIRTIO_BLK_CONFIG_HEADS_LEN 1
-#define VIRTIO_BLK_CONFIG_HEADS_LBN 208
-#define VIRTIO_BLK_CONFIG_HEADS_WIDTH 8
-/* Disk-style geometry - sectors. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
- */
-#define VIRTIO_BLK_CONFIG_SECTORS_OFST 27
-#define VIRTIO_BLK_CONFIG_SECTORS_LEN 1
-#define VIRTIO_BLK_CONFIG_SECTORS_LBN 216
-#define VIRTIO_BLK_CONFIG_SECTORS_WIDTH 8
-/* Block size of disk. Only valid when VIRTIO_BLK_F_BLK_SIZE is set. */
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_OFST 28
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_LEN 4
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_LBN 224
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_WIDTH 32
-/* Block topology - number of logical blocks per physical block (log2). Only
- * valid when VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_OFST 32
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LEN 1
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LBN 256
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_WIDTH 8
-/* Block topology - offset of first aligned logical block. Only valid when
- * VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_OFST 33
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LEN 1
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LBN 264
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_WIDTH 8
-/* Block topology - suggested minimum I/O size in blocks. Only valid when
- * VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_OFST 34
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LEN 2
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LBN 272
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_WIDTH 16
-/* Block topology - optimal (suggested maximum) I/O size in blocks. Only valid
- * when VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_OFST 36
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LEN 4
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LBN 288
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_WIDTH 32
-/* Unused, set to zero. Note that virtio_blk_config.writeback is volatile and
- * not carried in config data.
- */
-#define VIRTIO_BLK_CONFIG_UNUSED0_OFST 40
-#define VIRTIO_BLK_CONFIG_UNUSED0_LEN 2
-#define VIRTIO_BLK_CONFIG_UNUSED0_LBN 320
-#define VIRTIO_BLK_CONFIG_UNUSED0_WIDTH 16
-/* Number of queues. Only valid if the VIRTIO_BLK_F_MQ feature is negotiated.
- */
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_OFST 42
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LEN 2
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LBN 336
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_WIDTH 16
-/* Maximum discard sectors size, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_OFST 44
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LBN 352
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_WIDTH 32
-/* Maximum discard segment number. Only valid if VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_OFST 48
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LBN 384
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_WIDTH 32
-/* Discard sector alignment, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_OFST 52
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LEN 4
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LBN 416
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_WIDTH 32
-/* Maximum write zeroes sectors size, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_WRITE_ZEROES is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_OFST 56
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LBN 448
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_WIDTH 32
-/* Maximum write zeroes segment number. Only valid if VIRTIO_BLK_F_WRITE_ZEROES
- * is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_OFST 60
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LBN 480
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_WIDTH 32
-/* Write zeroes request can result in deallocating one or more sectors. Only
- * valid if VIRTIO_BLK_F_WRITE_ZEROES is set.
- */
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_OFST 64
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LEN 1
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LBN 512
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_WIDTH 8
-/* Unused, set to zero. */
-#define VIRTIO_BLK_CONFIG_UNUSED1_OFST 65
-#define VIRTIO_BLK_CONFIG_UNUSED1_LEN 3
-#define VIRTIO_BLK_CONFIG_UNUSED1_LBN 520
-#define VIRTIO_BLK_CONFIG_UNUSED1_WIDTH 24
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET
- * Set configuration for an existing descriptor proxy function. Configuration
- * data must match function personality. The actual function configuration is
- * not persisted until the caller commits with MC_CMD_DESC_PROXY_FUNC_COMMIT_IN
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET 0x174
-#undef MC_CMD_0x174_PRIVILEGE_CTG
-
-#define MC_CMD_0x174_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMIN 20
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LEN(num) (20+1*(num))
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_NUM(len) (((len)-20)/1)
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_LEN 4
-/* Reserved for future extension, set to zero. */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_LEN 16
-/* Configuration data. Format of configuration data is determined implicitly
- * from function personality referred to by HANDLE. Currently, only supported
- * format is VIRTIO_BLK_CONFIG.
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_OFST 20
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_LEN 1
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM 232
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM_MCDI2 1000
-
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT
- * Commit function configuration to non-volatile or volatile store. Once
- * configuration is applied to hardware (which may happen immediately or on
- * next function/device reset) a DESC_PROXY_FUNC_CONFIG_SET MCDI event will be
- * delivered to callers MCDI event queue.
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT 0x175
-#undef MC_CMD_0x175_PRIVILEGE_CTG
-
-#define MC_CMD_0x175_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_LEN 4
-/* enum: Store into non-volatile (dynamic) config */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_NON_VOLATILE 0x0
-/* enum: Store into volatile (ephemeral) config */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_VOLATILE 0x1
-
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_LEN 4
-/* Generation count to be delivered in an event once configuration becomes live
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_OPEN
- * Retrieve a handle for an existing descriptor proxy function. Returns an
- * integer handle, valid until function is deallocated, MC rebooted or power-
- * cycle. Returns ENODEV if no function with given label exists.
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN 0x176
-#undef MC_CMD_0x176_PRIVILEGE_CTG
-
-#define MC_CMD_0x176_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_OPEN_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LEN 40
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_LEN 40
-
-/* MC_CMD_DESC_PROXY_FUNC_OPEN_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMIN 40
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LEN(num) (40+1*(num))
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_NUM(len) (((len)-40)/1)
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_LEN 4
-/* PCIe Function ID (as struct PCIE_FUNCTION) */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LBN 64
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_OFST 6
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_LEN 4
-/* Function personality */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_OFST 12
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-/* Function configuration state */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_OFST 16
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_LEN 4
-/* enum: Function configuration is visible to the host (live) */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LIVE 0x0
-/* enum: Function configuration is pending reset */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PENDING 0x1
-/* enum: Function configuration is missing (created, but no configuration
- * committed)
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_UNCONFIGURED 0x2
-/* Generation count to be delivered in an event once the configuration becomes
- * live (if status is "pending")
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_OFST 20
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_LEN 4
-/* Reserved for future extension, set to zero. */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_OFST 24
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_LEN 16
-/* Configuration data corresponding to function personality. Currently, only
- * supported format is VIRTIO_BLK_CONFIG. Not valid if status is UNCONFIGURED.
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_OFST 40
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_LEN 1
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM 212
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM_MCDI2 980
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE
- * Releases a handle for an open descriptor proxy function. If proxying was
- * enabled on the device, the caller is expected to gracefully stop it using
- * MC_CMD_DESC_PROXY_FUNC_DISABLE prior to calling this function. Closing an
- * active device without disabling proxying will result in forced close, which
- * will put the device into a failed state and signal the host driver of the
- * error (for virtio, DEVICE_NEEDS_RESET flag would be set on the host side)
- */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE 0x1a1
-#undef MC_CMD_0x1a1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_LEN 4
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT_LEN 0
-
-/* DESC_PROXY_FUNC_MAP structuredef */
-#define DESC_PROXY_FUNC_MAP_LEN 52
-/* PCIe function ID (as struct PCIE_FUNCTION) */
-#define DESC_PROXY_FUNC_MAP_FUNC_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LEN 8
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_WIDTH 32
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_OFST 4
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_LBN 32
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_WIDTH 32
-#define DESC_PROXY_FUNC_MAP_FUNC_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_WIDTH 64
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_LEN 2
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_WIDTH 16
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_OFST 2
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_LEN 2
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_LBN 16
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_WIDTH 16
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_OFST 4
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LBN 32
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_WIDTH 32
-/* Function personality */
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_OFST 8
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_LBN 64
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_WIDTH 32
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define DESC_PROXY_FUNC_MAP_LABEL_OFST 12
-#define DESC_PROXY_FUNC_MAP_LABEL_LEN 40
-#define DESC_PROXY_FUNC_MAP_LABEL_LBN 96
-#define DESC_PROXY_FUNC_MAP_LABEL_WIDTH 320
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENUM
- * Enumerate existing descriptor proxy functions
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM 0x177
-#undef MC_CMD_0x177_PRIVILEGE_CTG
-
-#define MC_CMD_0x177_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENUM_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_LEN 4
-/* Starting index, set to 0 on first request. See
- * MC_CMD_DESC_PROXY_FUNC_ENUM_OUT/FLAGS.
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENUM_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMIN 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX 212
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX_MCDI2 992
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LEN(num) (4+52*(num))
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_NUM(len) (((len)-4)/52)
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_LBN 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_WIDTH 1
-/* Function map, as array of DESC_PROXY_FUNC_MAP */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_LEN 52
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM_MCDI2 19
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE
- * Enable descriptor proxying for function into target event queue. Returns VI
- * allocation info for the proxy source function, so that the caller can map
- * absolute VI IDs from descriptor proxy events back to the originating
- * function. This is a legacy function that only supports single queue proxy
- * devices. It is also limited in that it can only be called after host driver
- * attach (once VI allocation is known) and will return MC_CMD_ERR_ENOTCONN
- * otherwise. For new code, see MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE which
- * supports multi-queue devices and has no dependency on host driver attach.
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE 0x178
-#undef MC_CMD_0x178_PRIVILEGE_CTG
-
-#define MC_CMD_0x178_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_LEN 4
-/* Descriptor proxy sink queue (caller function relative). Must be extended
- * width event queue
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_LEN 8
-/* The number of VIs allocated on the function */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_LEN 4
-/* The base absolute VI number allocated to the function. */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE
- * Enable descriptor proxying for a source queue on a host function into target
- * event queue. Source queue number is a relative virtqueue number on the
- * source function (0 to max_virtqueues-1). For a multi-queue device, the
- * caller must enable all source queues individually. To retrieve absolute VI
- * information for the source function (so that VI IDs from descriptor proxy
- * events can be mapped back to source function / queue) see
- * MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE 0x1d0
-#undef MC_CMD_0x1d0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_LEN 12
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_LEN 4
-/* Source relative queue number to enable proxying on */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
-/* Descriptor proxy sink queue (caller function relative). Must be extended
- * width event queue
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE
- * Disable descriptor proxying for function. For multi-queue functions,
- * disables all queues.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE 0x179
-#undef MC_CMD_0x179_PRIVILEGE_CTG
-
-#define MC_CMD_0x179_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_LEN 4
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE
- * Disable descriptor proxying for a specific source queue on a function.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE 0x1d1
-#undef MC_CMD_0x1d1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_LEN 4
-/* Source relative queue number to disable proxying on */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_GET_VI_INFO
- * Returns absolute VI allocation information for the descriptor proxy source
- * function referenced by HANDLE, so that the caller can map absolute VI IDs
- * from descriptor proxy events back to the originating function and queue. The
- * call is only valid after the host driver for the source function has
- * attached (after receiving a driver attach event for the descriptor proxy
- * function) and will fail with ENOTCONN otherwise.
- */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO 0x1d2
-#undef MC_CMD_0x1d2_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_GET_VI_INFO_IN msgrequest */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_LEN 4
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMIN 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_NUM(len) (((len)-0)/4)
-/* VI information (VI ID + VI relative queue number) for each of the source
- * queues (in order from 0 to max_virtqueues-1), as array of QUEUE_ID
- * structures.
- */
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM 63
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM_MCDI2 255
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_LBN 16
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_WIDTH 1
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_LBN 17
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_WIDTH 15
-
-
-/***********************************/
-/* MC_CMD_GET_ADDR_SPC_ID
- * Get Address space identifier for use in mem2mem descriptors for a given
- * target. See SF-120734-TC for details on ADDR_SPC_IDs and mem2mem
- * descriptors.
- */
-#define MC_CMD_GET_ADDR_SPC_ID 0x1a0
-#undef MC_CMD_0x1a0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_GET_ADDR_SPC_ID_IN msgrequest */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_LEN 16
-/* Resource type to get ADDR_SPC_ID for */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_LEN 4
-/* enum: Address space ID for host/AP memory DMA over the same interface this
- * MCDI was called on
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_SELF 0x0
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC 0x1
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC with PASID value specified by PASID
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC_PASID 0x2
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC with PASID value of relative VI specified by VI
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_REL_VI 0x3
-/* enum: Address space ID for host/AP memory DMA via PCI interface, function
- * and PASID value of absolute VI specified by VI
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_ABS_VI 0x4
-/* enum: Address space ID for host memory DMA via PCI interface and function of
- * descriptor proxy function specified by HANDLE
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_DESC_PROXY_HANDLE 0x5
-/* enum: Address space ID for DMA to/from MC memory */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_MC_MEM 0x6
-/* enum: Address space ID for DMA to/from other SmartNIC memory (on-chip, DDR)
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_NIC_MEM 0x7
-/* PCIe Function ID (as struct PCIE_FUNCTION). Only valid if TYPE is PCI_FUNC,
- * PCI_FUNC_PASID or REL_VI.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LEN 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LBN 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_OFST 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LBN 64
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_LEN 2
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_OFST 6
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_LEN 2
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_OFST 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_LEN 4
-/* PASID value. Only valid if TYPE is PCI_FUNC_PASID. */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_OFST 12
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_LEN 4
-/* Relative or absolute VI number. Only valid if TYPE is REL_VI or ABS_VI */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_OFST 12
-#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_LEN 4
-/* Descriptor proxy function handle. Only valid if TYPE is DESC_PROXY_HANDLE.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_LEN 4
-
-/* MC_CMD_GET_ADDR_SPC_ID_OUT msgresponse */
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_LEN 8
-/* Address Space ID for the requested target. Only the lower 36 bits are valid
- * in the current SmartNIC implementation.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LEN 8
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LBN 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LBN 32
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_WIDTH 32
-
/***********************************/
/* MC_CMD_GET_CLIENT_HANDLE
@@ -26359,7 +22483,8 @@
* INTF=CALLER, PF=PF_NULL, VF=... to refer to a VF child of the calling PF or
* a sibling VF of the calling VF. - INTF=CALLER, PF=..., VF=VF_NULL to refer
* to a PF on the calling interface - INTF=CALLER, PF=..., VF=... to refer to a
- * VF on the calling interface - INTF=..., PF=..., VF=VF_NULL to refer to a PF
+ * VF on the calling interface - INTF=..., PF=PF_NULL, VF=VF_NULL to refer to
+ * the named interface itself - INTF=..., PF=..., VF=VF_NULL to refer to a PF
* on a named interface - INTF=..., PF=..., VF=... to refer to a VF on a named
* interface where ... refers to a small integer for the VF/PF fields, and to
* values from the PCIE_INTERFACE enum for for the INTF field. It's only
@@ -26380,6 +22505,7 @@
* backwards compatibility only, callers should use PCIE_INTERFACE_CALLER.
*/
#define MC_CMD_GET_CLIENT_HANDLE_IN_PCIE_FUNCTION_INTF_NULL 0xffffffff
+/* See structuredef: PCIE_FUNCTION */
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_OFST 4
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_LEN 2
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_OFST 6
@@ -27350,7 +23476,7 @@
/* MAE_MPORT_SELECTOR structuredef: MPORTS are identified by an opaque unsigned
* integer value (mport_id) that is guaranteed to be representable within
* 32-bits or within any NIC interface field that needs store the value
- * (whichever is narrowers). This selector structure provides a stable way to
+ * (whichever is narrower). This selector structure provides a stable way to
* refer to m-ports.
*/
#define MAE_MPORT_SELECTOR_LEN 4
@@ -27425,10 +23551,22 @@
#define MAE_MPORT_SELECTOR_FLAT_WIDTH 32
/* MAE_LINK_ENDPOINT_SELECTOR structuredef: Structure that identifies a real or
- * virtual network port by MAE port and link end
+ * virtual network port by MAE port and link end. Intended to be used by
+ * network port MCDI commands. Setting FLAT to MAE_LINK_ENDPOINT_COMPAT is
+ * equivalent to using the previous version of the command. Not all possible
+ * combinations of MPORT_END and MPORT_SELECTOR in MAE_LINK_ENDPOINT_SELECTOR
+ * will work in all circumstances. 1. Some will always work (e.g. a VF can
+ * always address its logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC),
+ * 2. Some are not meaningful and will always fail with EINVAL (e.g. attempting
+ * to address the VNIC end of a link to a physical port), 3. Some are
+ * meaningful but require the MCDI client to have the required permission and
+ * fail with EPERM otherwise (e.g. trying to set the MAC on a VF the caller
+ * cannot administer), and 4. Some could be implementation-specific and fail
+ * with ENOTSUP if not available (no examples exist right now). See
+ * SF-123581-TC section 4.3 for more details.
*/
#define MAE_LINK_ENDPOINT_SELECTOR_LEN 8
-/* The MAE MPORT of interest */
+/* Identifier for the MAE MPORT of interest */
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_OFST 0
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LEN 4
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LBN 0
@@ -27829,6 +23967,8 @@
#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM_MCDI2 253
/* enum: A counter ID that is guaranteed never to represent a real counter */
#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL 0xffffffff
+/* Other enum values, see field(s): */
+/* MAE_COUNTER_ID */
/***********************************/
@@ -28266,6 +24406,24 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_OFST 0
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_LBN 14
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_WIDTH 1
/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_OFST 4
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_LEN 2
@@ -28291,19 +24449,23 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_OFST 20
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_LEN 4
/* Allows an action set to trigger several counter updates. Set to
- * COUNTER_LIST_ID_NULL to request no counter action.
+ * MAE_COUNTER_ID_NULL to request no counter action.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_OFST 24
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
/* If a driver only wished to update one counter within this action set, then
* it can supply a COUNTER_ID instead of allocating a single-element counter
* list. The ID must have been allocated with COUNTER_TYPE=AR. This field
- * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
- * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
* COUNTER_ID.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_OFST 28
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_OFST 32
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_LEN 4
/* Set to MAC_ID_NULL to request no source MAC replacement. */
@@ -28347,6 +24509,24 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_OFST 0
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_LBN 14
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_WIDTH 1
/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_OFST 4
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_LEN 2
@@ -28372,19 +24552,23 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_OFST 20
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_LEN 4
/* Allows an action set to trigger several counter updates. Set to
- * COUNTER_LIST_ID_NULL to request no counter action.
+ * MAE_COUNTER_ID_NULL to request no counter action.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_OFST 24
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
/* If a driver only wished to update one counter within this action set, then
* it can supply a COUNTER_ID instead of allocating a single-element counter
* list. The ID must have been allocated with COUNTER_TYPE=AR. This field
- * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
- * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
* COUNTER_ID.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_OFST 28
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_OFST 32
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_LEN 4
/* Set to MAC_ID_NULL to request no source MAC replacement. */
@@ -28437,6 +24621,172 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_LBN 6
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_WIDTH 1
+/* MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN msgrequest: Only supported if
+ * MAE_ACTION_SET_ALLOC_V3_SUPPORTED is advertised in
+ * MC_CMD_GET_CAPABILITIES_V10_OUT.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LEN 53
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_LBN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_LBN 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_LBN 9
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_LBN 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_LBN 11
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_LBN 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_LBN 13
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_LBN 14
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_WIDTH 1
+/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_OFST 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_LEN 2
+/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_OFST 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TCI value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_OFST 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TPID value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_OFST 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_LEN 2
+/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_OFST 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_LEN 4
+/* Set to ENCAP_HEADER_ID_NULL to request no encap action */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_OFST 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_LEN 4
+/* An m-port selector identifying the m-port that the modified packet should be
+ * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the
+ * packet.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_OFST 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_LEN 4
+/* Allows an action set to trigger several counter updates. Set to
+ * MAE_COUNTER_ID_NULL to request no counter action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_OFST 24
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
+/* If a driver only wished to update one counter within this action set, then
+ * it can supply a COUNTER_ID instead of allocating a single-element counter
+ * list. The ID must have been allocated with COUNTER_TYPE=AR. This field
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * COUNTER_ID.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_OFST 28
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_OFST 32
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_LEN 4
+/* Set to MAC_ID_NULL to request no source MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_OFST 36
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_LEN 4
+/* Set to MAC_ID_NULL to request no destination MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_OFST 40
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_LEN 4
+/* Source m-port ID to be reported for DO_SET_SRC_MPORT action. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_OFST 44
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_LEN 4
+/* Actions for modifying the Differentiated Services Code-Point (DSCP) bits
+ * within IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_LEN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_WIDTH 6
+/* Actions for modifying the Explicit Congestion Notification (ECN) bits within
+ * IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_LBN 5
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_LBN 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_WIDTH 1
+/* Actions for overwriting CH_ROUTE subfields. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_WIDTH 1
+/* Override outgoing CH_VC to network port for DO_SET_NET_CHAN action. Cannot
+ * be used in conjunction with DO_SET_SRC_MPORT action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_OFST 52
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_LEN 1
+
/* MC_CMD_MAE_ACTION_SET_ALLOC_OUT msgresponse */
#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN 4
/* The MSB of the AS_ID is guaranteed to be clear if the ID is not
@@ -28680,58 +25030,6 @@
#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM 32
#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM_MCDI2 32
-
-/***********************************/
-/* MC_CMD_MAE_OUTER_RULE_UPDATE
- * Atomically change the response of an Outer Rule.
- */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE 0x17d
-#undef MC_CMD_0x17d_PRIVILEGE_CTG
-
-#define MC_CMD_0x17d_PRIVILEGE_CTG SRIOV_CTG_MAE
-
-/* MC_CMD_MAE_OUTER_RULE_UPDATE_IN msgrequest */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_LEN 16
-/* ID of outer rule to update */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_OFST 0
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_LEN 4
-/* Packets matching the rule will be parsed with this encapsulation. */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_OFST 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MAE_MCDI_ENCAP_TYPE */
-/* This field controls the actions that are performed when a rule is hit. */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_LEN 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_LBN 0
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_LBN 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_WIDTH 2
-/* Enum values, see field(s): */
-/* MAE_CT_VNI_MODE */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_LBN 3
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_LBN 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_LBN 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_WIDTH 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_LBN 16
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_WIDTH 16
-/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT
- * flag is set. The ID must have been allocated with COUNTER_TYPE=OR.
- */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_OFST 12
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_LEN 4
-
-/* MC_CMD_MAE_OUTER_RULE_UPDATE_OUT msgresponse */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_OUT_LEN 0
-
/* MAE_ACTION_RULE_RESPONSE structuredef */
#define MAE_ACTION_RULE_RESPONSE_LEN 16
#define MAE_ACTION_RULE_RESPONSE_ASL_ID_OFST 0
@@ -29122,142 +25420,6 @@
#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LBN 352
#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_WIDTH 32
-/* MAE_MPORT_DESC_V2 structuredef */
-#define MAE_MPORT_DESC_V2_LEN 56
-#define MAE_MPORT_DESC_V2_MPORT_ID_OFST 0
-#define MAE_MPORT_DESC_V2_MPORT_ID_LEN 4
-#define MAE_MPORT_DESC_V2_MPORT_ID_LBN 0
-#define MAE_MPORT_DESC_V2_MPORT_ID_WIDTH 32
-/* Reserved for future purposes, contains information independent of caller */
-#define MAE_MPORT_DESC_V2_FLAGS_OFST 4
-#define MAE_MPORT_DESC_V2_FLAGS_LEN 4
-#define MAE_MPORT_DESC_V2_FLAGS_LBN 32
-#define MAE_MPORT_DESC_V2_FLAGS_WIDTH 32
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_OFST 8
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LEN 4
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_LBN 0
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_WIDTH 1
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_LBN 1
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_WIDTH 1
-#define MAE_MPORT_DESC_V2_CAN_DELETE_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_DELETE_LBN 2
-#define MAE_MPORT_DESC_V2_CAN_DELETE_WIDTH 1
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_OFST 8
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_LBN 3
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_WIDTH 1
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LBN 64
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_WIDTH 32
-/* Not the ideal name; it's really the type of thing connected to the m-port */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_OFST 12
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_LEN 4
-/* enum: Connected to a MAC... */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_NET_PORT 0x0
-/* enum: Adds metadata and delivers to another m-port */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_ALIAS 0x1
-/* enum: Connected to a VNIC. */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_VNIC 0x2
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_LBN 96
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_WIDTH 32
-/* 128-bit value available to drivers for m-port identification. */
-#define MAE_MPORT_DESC_V2_UUID_OFST 16
-#define MAE_MPORT_DESC_V2_UUID_LEN 16
-#define MAE_MPORT_DESC_V2_UUID_LBN 128
-#define MAE_MPORT_DESC_V2_UUID_WIDTH 128
-/* Big wadge of space reserved for other common properties */
-#define MAE_MPORT_DESC_V2_RESERVED_OFST 32
-#define MAE_MPORT_DESC_V2_RESERVED_LEN 8
-#define MAE_MPORT_DESC_V2_RESERVED_LO_OFST 32
-#define MAE_MPORT_DESC_V2_RESERVED_LO_LEN 4
-#define MAE_MPORT_DESC_V2_RESERVED_LO_LBN 256
-#define MAE_MPORT_DESC_V2_RESERVED_LO_WIDTH 32
-#define MAE_MPORT_DESC_V2_RESERVED_HI_OFST 36
-#define MAE_MPORT_DESC_V2_RESERVED_HI_LEN 4
-#define MAE_MPORT_DESC_V2_RESERVED_HI_LBN 288
-#define MAE_MPORT_DESC_V2_RESERVED_HI_WIDTH 32
-#define MAE_MPORT_DESC_V2_RESERVED_LBN 256
-#define MAE_MPORT_DESC_V2_RESERVED_WIDTH 64
-/* Logical port index. Only valid when type NET Port. */
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_OFST 40
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LEN 4
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LBN 320
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_WIDTH 32
-/* The m-port delivered to */
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_OFST 40
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LEN 4
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LBN 320
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_WIDTH 32
-/* The type of thing that owns the VNIC */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_OFST 40
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LBN 320
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_WIDTH 32
-/* The PCIe interface on which the function lives. CJK: We need an enumeration
- * of interfaces that we extend as new interface (types) appear. This belongs
- * elsewhere and should be referenced from here
- */
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_OFST 44
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LBN 352
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_WIDTH 32
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_OFST 48
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LEN 2
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LBN 384
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_WIDTH 16
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_OFST 50
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LEN 2
-/* enum: Indicates that the function is a PF */
-#define MAE_MPORT_DESC_V2_VF_IDX_NULL 0xffff
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LBN 400
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_WIDTH 16
-/* Reserved. Should be ignored for now. */
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_OFST 44
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LBN 352
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_WIDTH 32
-/* A client handle for the VNIC's owner. Only valid for type VNIC. */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_OFST 52
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LBN 416
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_MAE_MPORT_ENUMERATE
- * Deprecated in favour of MAE_MPORT_READ_JOURNAL. Support for this command
- * will be removed at some future point.
- */
-#define MC_CMD_MAE_MPORT_ENUMERATE 0x17c
-#undef MC_CMD_0x17c_PRIVILEGE_CTG
-
-#define MC_CMD_0x17c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MAE_MPORT_ENUMERATE_IN msgrequest */
-#define MC_CMD_MAE_MPORT_ENUMERATE_IN_LEN 0
-
-/* MC_CMD_MAE_MPORT_ENUMERATE_OUT msgresponse */
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMIN 8
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX 252
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LEN(num) (8+1*(num))
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_NUM(len) (((len)-8)/1)
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_OFST 0
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_LEN 4
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_OFST 4
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_LEN 4
-/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may
- * grow in future version of this command. Drivers should use a stride of
- * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present.
- */
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_OFST 8
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_LEN 1
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MINNUM 0
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM 244
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1012
-
/***********************************/
/* MC_CMD_MAE_MPORT_READ_JOURNAL
@@ -29570,73 +25732,6 @@
/***********************************/
-/* MC_CMD_TABLE_UPDATE
- * Update an existing entry in a table with a new response value. May return
- * EINVAL for unknown table ID or other bad request parameters, ENOENT if the
- * entry does not already exist, or EPERM if the operation is not permitted. In
- * case of an error, the additional MCDI error argument field returns the raw
- * error code from the underlying CAM driver.
- */
-#define MC_CMD_TABLE_UPDATE 0x1ce
-#undef MC_CMD_0x1ce_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ce_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TABLE_UPDATE_IN msgrequest */
-#define MC_CMD_TABLE_UPDATE_IN_LENMIN 16
-#define MC_CMD_TABLE_UPDATE_IN_LENMAX 252
-#define MC_CMD_TABLE_UPDATE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_TABLE_UPDATE_IN_LEN(num) (12+4*(num))
-#define MC_CMD_TABLE_UPDATE_IN_DATA_NUM(len) (((len)-12)/4)
-/* Table identifier. */
-#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_OFST 0
-#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_LEN 4
-/* Enum values, see field(s): */
-/* TABLE_ID */
-/* Width in bits of supplied key data (must match table properties). */
-#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_OFST 4
-#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_LEN 2
-/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
- * when allocated MASK_ID is used instead).
- */
-#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_OFST 6
-#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_LEN 2
-/* Width in bits of supplied response data (for INSERT and UPDATE operations
- * this must match the table properties; for DELETE operations, no response
- * data is required and this must be 0).
- */
-#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_OFST 8
-#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_LEN 2
-/* Mask ID for STCAM table - used instead of mask data if the table descriptor
- * reports ALLOC_MASKS==1. Otherwise set to 0.
- */
-#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_OFST 6
-#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_LEN 2
-/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
-#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_OFST 8
-#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_LEN 2
-/* (32-bit alignment padding - set to 0) */
-#define MC_CMD_TABLE_UPDATE_IN_RESERVED_OFST 10
-#define MC_CMD_TABLE_UPDATE_IN_RESERVED_LEN 2
-/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
- * data values. Each of these items is logically treated as a single wide N-bit
- * value, in which the individual fields have been placed within that value per
- * the LBN and WIDTH information from the table field descriptors. The wide
- * N-bit value is padded with 0 bits at the MSB end if necessary to make a
- * multiple of 32 bits. The value is then packed into this command as a
- * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
- */
-#define MC_CMD_TABLE_UPDATE_IN_DATA_OFST 12
-#define MC_CMD_TABLE_UPDATE_IN_DATA_LEN 4
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MINNUM 1
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM 60
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM_MCDI2 252
-
-/* MC_CMD_TABLE_UPDATE_OUT msgresponse */
-#define MC_CMD_TABLE_UPDATE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_TABLE_DELETE
* Delete an existing entry in a table. May return EINVAL for unknown table ID
* or other bad request parameters, ENOENT if the entry does not exist, or
@@ -29702,5 +25797,124 @@
/* MC_CMD_TABLE_DELETE_OUT msgresponse */
#define MC_CMD_TABLE_DELETE_OUT_LEN 0
+/* MC_CMD_QUEUE_HANDLE structuredef: On X4, to distinguish between full-
+ * featured (X2-style) VIs and low-latency (X3-style) queues, we use the top
+ * bits of the queue handle to specify the queue type in all MCDI calls which
+ * refer to VIs/queues. These bits should be masked off when indexing into a
+ * queue in the BAR.
+ */
+#define MC_CMD_QUEUE_HANDLE_LEN 4
+/* Combined queue number and type. This is the ID returned by and passed into
+ * MCDI calls that use queues.
+ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LEN 4
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_LBN 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_WIDTH 24
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LBN 24
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_WIDTH 8
+/* enum: Indicates that the queue instance is a full-featured VI */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_FF_VI 0x0
+/* enum: Indicates that the queue instance is a LL TXQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_TXQ 0x1
+/* enum: Indicates that the queue instance is a LL RXQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_RXQ 0x2
+/* enum: Indicates that the queue instance is a LL EVQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_EVQ 0x3
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LBN 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_ALLOC_LL_QUEUES
+ * Allocate low latency (X3-style) queues for current PCI function. Can be
+ * called more than once if desired to allocate more queues.
+ */
+#define MC_CMD_ALLOC_LL_QUEUES 0x1dd
+#undef MC_CMD_0x1dd_PRIVILEGE_CTG
+
+#define MC_CMD_0x1dd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_LL_QUEUES_IN msgrequest */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_LEN 24
+/* The minimum number of TXQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_OFST 0
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_LEN 4
+/* The maximum number of TXQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_OFST 4
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_LEN 4
+/* The minimum number of RXQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_OFST 8
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_LEN 4
+/* The maximum number of RXQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_OFST 12
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_LEN 4
+/* The minimum number of EVQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_OFST 16
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_LEN 4
+/* The maximum number of EVQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_OFST 20
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_LEN 4
+
+/* MC_CMD_ALLOC_LL_QUEUES_OUT msgresponse */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMIN 16
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX 252
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LEN(num) (12+4*(num))
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_NUM(len) (((len)-12)/4)
+/* The number of TXQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_OFST 0
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_LEN 4
+/* The number of RXQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_OFST 4
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_LEN 4
+/* The number of EVQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_OFST 8
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_LEN 4
+/* A list of allocated queues, returned as MC_CMD_QUEUE_HANDLEs, not
+ * necessarily contiguous. TXQs are first in the list, followed by RXQs then
+ * EVQs. The type of each queue is indicated by the top bits (see the
+ * QUEUE_TYPE enum)
+ */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_OFST 12
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_LEN 4
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MINNUM 1
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM 60
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM_MCDI2 252
+
+
+/***********************************/
+/* MC_CMD_FREE_LL_QUEUES
+ * Free low latency (X3-style) queues for current PCI function.
+ */
+#define MC_CMD_FREE_LL_QUEUES 0x1de
+#undef MC_CMD_0x1de_PRIVILEGE_CTG
+
+#define MC_CMD_0x1de_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_LL_QUEUES_IN msgrequest */
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMIN 8
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX 252
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX_MCDI2 1020
+#define MC_CMD_FREE_LL_QUEUES_IN_LEN(num) (4+4*(num))
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_NUM(len) (((len)-4)/4)
+/* The number of queues to free. */
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_OFST 0
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_LEN 4
+/* A list of queues to free, as a list of MC_CMD_QUEUE_HANDLEs. They must have
+ * all been previously allocated by MC_CMD_ALLOC_LL_QUEUES. The type of each
+ * queue should be indicated by the top bits.
+ */
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_OFST 4
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_LEN 4
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MINNUM 1
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM 62
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM_MCDI2 254
+
+/* MC_CMD_FREE_LL_QUEUES_OUT msgresponse */
+#define MC_CMD_FREE_LL_QUEUES_OUT_LEN 0
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index ad4694fa3dda..7b236d291d8c 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -17,58 +17,6 @@
#include "selftest.h"
#include "mcdi_port_common.h"
-static int efx_mcdi_mdio_read(struct net_device *net_dev,
- int prtad, int devad, u16 addr)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
- MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
-}
-
-static int efx_mcdi_mdio_write(struct net_device *net_dev,
- int prtad, int devad, u16 addr, u16 value)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
- MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return 0;
-}
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
{
@@ -97,12 +45,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
{
int rc;
- /* Set up MDIO structure for PHY */
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->mdio.mdio_read = efx_mcdi_mdio_read;
- efx->mdio.mdio_write = efx_mcdi_mdio_write;
-
- /* Fill out MDIO structure, loopback modes, and initial link state */
+ /* Fill out loopback modes and initial link state */
rc = efx_mcdi_phy_probe(efx);
if (rc != 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index 76ea26722ca4..dae684194ac8 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -448,15 +448,6 @@ int efx_mcdi_phy_probe(struct efx_nic *efx)
efx->phy_data = phy_data;
efx->phy_type = phy_data->type;
- efx->mdio_bus = phy_data->channel;
- efx->mdio.prtad = phy_data->port;
- efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
- efx->mdio.mode_support = 0;
- if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
- if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
-
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
mcdi_to_ethtool_linkset(phy_data->media, caps,
@@ -546,8 +537,6 @@ void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ks
cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
cmd->base.phy_address = phy_cfg->port;
cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
- cmd->base.mdio_support = (efx->mdio.mode_support &
- (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
cmd->link_modes.supported);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f70a7b7d6345..5c0f306fb019 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -15,7 +15,7 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/timer.h>
-#include <linux/mdio.h>
+#include <linux/mii.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/device.h>
@@ -956,8 +956,6 @@ struct efx_mae;
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
* @phy_data: PHY private data (including PHY-specific stats)
- * @mdio: PHY MDIO interface
- * @mdio_bus: PHY MDIO bus ID (only used by Siena)
* @phy_mode: PHY operating mode. Serialised by @mac_lock.
* @link_advertising: Autonegotiation advertising flags
* @fec_config: Forward Error Correction configuration flags. For bit positions
@@ -1006,6 +1004,7 @@ struct efx_mae;
* @dl_port: devlink port associated with the PF
* @mem_bar: The BAR that is mapped into membase.
* @reg_base: Offset from the start of the bar to the function control window.
+ * @reflash_mutex: Mutex for serialising firmware reflash operations.
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -1131,8 +1130,6 @@ struct efx_nic {
unsigned int phy_type;
void *phy_data;
- struct mdio_if_info mdio;
- unsigned int mdio_bus;
enum efx_phy_mode phy_mode;
__ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
@@ -1191,6 +1188,7 @@ struct efx_nic {
struct devlink_port *dl_port;
unsigned int mem_bar;
u32 reg_base;
+ struct mutex reflash_mutex;
/* The following fields may be written more often */
@@ -1383,6 +1381,8 @@ struct efx_udp_tunnel {
* @can_rx_scatter: NIC is able to scatter packets to multiple buffers
* @always_rx_scatter: NIC will always scatter packets to multiple buffers
* @option_descriptors: NIC supports TX option descriptors
+ * @flash_auto_partition: firmware flash uses AUTO partition, driver does
+ * not need to perform image parsing
* @min_interrupt_mode: Lowest capability interrupt mode supported
* from &enum efx_int_mode.
* @timer_period_max: Maximum period of interrupt timer (in ticks)
@@ -1559,6 +1559,7 @@ struct efx_nic_type {
bool can_rx_scatter;
bool always_rx_scatter;
bool option_descriptors;
+ bool flash_auto_partition;
unsigned int min_interrupt_mode;
unsigned int timer_period_max;
netdev_features_t offload_features;
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 0d93164988fc..fa94aa3cd5fe 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -1043,7 +1043,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
return -EOPNOTSUPP;
}
if (fa->ct.action) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule",
fa->ct.action);
return -EOPNOTSUPP;
}
@@ -1056,7 +1056,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
act->zone = ct_zone;
break;
default:
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule",
fa->id);
return -EOPNOTSUPP;
}
@@ -1581,7 +1581,7 @@ static int efx_tc_flower_replace_foreign_lhs(struct efx_nic *efx,
type = efx_tc_indr_netdev_type(net_dev);
if (type == EFX_ENCAP_TYPE_NONE) {
- NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device\n");
+ NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index f539813878f5..2e1106097965 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -43,7 +43,6 @@
#include <linux/smsc911x.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <linux/acpi.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index dc99821c6226..ee890de69ffe 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -970,7 +970,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
struct netsec_desc *desc = &dring->desc[idx];
struct page *page = virt_to_page(desc->addr);
- u32 xdp_result = NETSEC_XDP_PASS;
+ u32 metasize, xdp_result = NETSEC_XDP_PASS;
struct sk_buff *skb = NULL;
u16 pkt_len, desc_len;
dma_addr_t dma_handle;
@@ -1019,7 +1019,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
prefetch(desc->addr);
xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
- pkt_len, false);
+ pkt_len, true);
if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
@@ -1048,6 +1048,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
+ metasize = xdp.data - xdp.data_meta;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->protocol = eth_type_trans(skb, priv->ndev);
if (priv->rx_cksum_offload_flag &&
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 4cc85a36a1ab..3c820ef56775 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -181,6 +181,17 @@ config DWMAC_SOCFPGA
for the stmmac device driver. This driver is used for
arria5 and cyclone5 FPGA SoCs.
+config DWMAC_SOPHGO
+ tristate "Sophgo dwmac support"
+ depends on OF && (ARCH_SOPHGO || COMPILE_TEST)
+ default m if ARCH_SOPHGO
+ help
+ Support for ethernet controllers on Sophgo RISC-V SoCs
+
+ This selects the Sophgo SoC specific glue layer support
+ for the stmmac device driver. This driver is used for the
+ ethernet controllers on various Sophgo SoCs.
+
config DWMAC_STARFIVE
tristate "StarFive dwmac support"
depends on OF && (ARCH_STARFIVE || COMPILE_TEST)
@@ -307,6 +318,7 @@ config DWMAC_INTEL
default X86
depends on X86 && STMMAC_ETH && PCI
depends on COMMON_CLK
+ depends on ACPI
help
This selects the Intel platform specific bus support for the
stmmac driver. This driver is used for Intel Quark/EHL/TGL.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index b26f0e79c2b3..594883fb4164 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o
obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
+obj-$(CONFIG_DWMAC_SOPHGO) += dwmac-sophgo.o
obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e25db747a81a..412b07e77945 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -101,8 +101,8 @@ struct stmmac_rxq_stats {
/* Updates on each CPU protected by not allowing nested irqs. */
struct stmmac_pcpu_stats {
struct u64_stats_sync syncp;
- u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
- u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t rx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t tx_normal_irq_n[MTL_MAX_TX_QUEUES];
};
/* Extra statistic and debug information exposed by ethtool */
@@ -530,6 +530,20 @@ struct dma_features {
#define STMMAC_DEFAULT_TWT_LS 0x1E
#define STMMAC_ET_MAX 0xFFFFF
+/* Common LPI register bits */
+#define LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable, gmac4, xgmac2 only */
+#define LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable, gmac4 only */
+#define LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN BIT(18) /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST BIT(9) /* Receive LPI state, gmac1000 only? */
+#define LPI_CTRL_STATUS_TLPIST BIT(8) /* Transmit LPI state, gmac1000 only? */
+#define LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
+
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index ef99ef3f1ab4..37fe7c288878 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -59,10 +59,11 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1);
}
-static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
+static struct anarion_gmac *
+anarion_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
struct anarion_gmac *gmac;
- phy_interface_t phy_mode;
void __iomem *ctl_block;
int err;
@@ -79,11 +80,7 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
gmac->ctl_block = ctl_block;
- err = of_get_phy_mode(pdev->dev.of_node, &phy_mode);
- if (err)
- return ERR_PTR(err);
-
- switch (phy_mode) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
fallthrough;
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -93,7 +90,7 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
break;
default:
dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n",
- phy_mode);
+ plat_dat->phy_interface);
return ERR_PTR(-ENOTSUPP);
}
@@ -111,14 +108,14 @@ static int anarion_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- gmac = anarion_config_dt(pdev);
- if (IS_ERR(gmac))
- return PTR_ERR(gmac);
-
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
+ gmac = anarion_config_dt(pdev, plat_dat);
+ if (IS_ERR(gmac))
+ return PTR_ERR(gmac);
+
plat_dat->init = anarion_gmac_init;
plat_dat->exit = anarion_gmac_exit;
anarion_gmac_init(pdev, gmac);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index bd4eb187f8c6..cd431f84f34f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -29,14 +29,21 @@ struct tegra_eqos {
void __iomem *regs;
struct reset_control *rst;
- struct clk *clk_master;
struct clk *clk_slave;
- struct clk *clk_tx;
- struct clk *clk_rx;
struct gpio_desc *reset;
};
+static struct clk *dwc_eth_find_clk(struct plat_stmmacenet_data *plat_dat,
+ const char *name)
+{
+ for (int i = 0; i < plat_dat->num_clks; i++)
+ if (strcmp(plat_dat->clks[i].id, name) == 0)
+ return plat_dat->clks[i].clk;
+
+ return NULL;
+}
+
static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat)
{
@@ -46,7 +53,9 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
u32 a_index = 0;
if (!plat_dat->axi) {
- plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL);
+ plat_dat->axi = devm_kzalloc(&pdev->dev,
+ sizeof(struct stmmac_axi),
+ GFP_KERNEL);
if (!plat_dat->axi)
return -ENOMEM;
@@ -123,49 +132,9 @@ static int dwc_qos_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *stmmac_res)
{
- int err;
-
- plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(plat_dat->stmmac_clk)) {
- dev_err(&pdev->dev, "apb_pclk clock not found.\n");
- return PTR_ERR(plat_dat->stmmac_clk);
- }
-
- err = clk_prepare_enable(plat_dat->stmmac_clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
- err);
- return err;
- }
-
- plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
- if (IS_ERR(plat_dat->pclk)) {
- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
- err = PTR_ERR(plat_dat->pclk);
- goto disable;
- }
-
- err = clk_prepare_enable(plat_dat->pclk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
- err);
- goto disable;
- }
+ plat_dat->pclk = dwc_eth_find_clk(plat_dat, "phy_ref_clk");
return 0;
-
-disable:
- clk_disable_unprepare(plat_dat->stmmac_clk);
- return err;
-}
-
-static void dwc_qos_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- clk_disable_unprepare(priv->plat->pclk);
- clk_disable_unprepare(priv->plat->stmmac_clk);
}
#define SDMEMCOMPPADCTRL 0x8800
@@ -178,11 +147,10 @@ static void dwc_qos_remove(struct platform_device *pdev)
#define AUTO_CAL_STATUS 0x880c
#define AUTO_CAL_STATUS_ACTIVE BIT(31)
-static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode)
{
struct tegra_eqos *eqos = priv;
bool needs_calibration = false;
- long rate = 125000000;
u32 value;
int err;
@@ -193,11 +161,10 @@ static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mo
fallthrough;
case SPEED_10:
- rate = rgmii_clock(speed);
break;
default:
- dev_err(eqos->dev, "invalid speed %u\n", speed);
+ dev_err(eqos->dev, "invalid speed %d\n", speed);
break;
}
@@ -240,10 +207,6 @@ static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mo
value &= ~AUTO_CAL_CONFIG_ENABLE;
writel(value, eqos->regs + AUTO_CAL_CONFIG);
}
-
- err = clk_set_rate(eqos->clk_tx, rate);
- if (err < 0)
- dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
}
static int tegra_eqos_init(struct platform_device *pdev, void *priv)
@@ -261,7 +224,7 @@ static int tegra_eqos_init(struct platform_device *pdev, void *priv)
}
static int tegra_eqos_probe(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
+ struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res)
{
struct device *dev = &pdev->dev;
@@ -274,63 +237,24 @@ static int tegra_eqos_probe(struct platform_device *pdev,
eqos->dev = &pdev->dev;
eqos->regs = res->addr;
+ eqos->clk_slave = plat_dat->stmmac_clk;
if (!is_of_node(dev->fwnode))
goto bypass_clk_reset_gpio;
- eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
- if (IS_ERR(eqos->clk_master)) {
- err = PTR_ERR(eqos->clk_master);
- goto error;
- }
-
- err = clk_prepare_enable(eqos->clk_master);
- if (err < 0)
- goto error;
-
- eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
- if (IS_ERR(eqos->clk_slave)) {
- err = PTR_ERR(eqos->clk_slave);
- goto disable_master;
- }
-
- data->stmmac_clk = eqos->clk_slave;
-
- err = clk_prepare_enable(eqos->clk_slave);
- if (err < 0)
- goto disable_master;
-
- eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
- if (IS_ERR(eqos->clk_rx)) {
- err = PTR_ERR(eqos->clk_rx);
- goto disable_slave;
- }
-
- err = clk_prepare_enable(eqos->clk_rx);
- if (err < 0)
- goto disable_slave;
-
- eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
- if (IS_ERR(eqos->clk_tx)) {
- err = PTR_ERR(eqos->clk_tx);
- goto disable_rx;
- }
-
- err = clk_prepare_enable(eqos->clk_tx);
- if (err < 0)
- goto disable_rx;
+ plat_dat->clk_tx_i = dwc_eth_find_clk(plat_dat, "tx");
eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
if (IS_ERR(eqos->reset)) {
err = PTR_ERR(eqos->reset);
- goto disable_tx;
+ return err;
}
usleep_range(2000, 4000);
gpiod_set_value(eqos->reset, 0);
/* MDIO bus was already reset just above */
- data->mdio_bus_data->needs_reset = false;
+ plat_dat->mdio_bus_data->needs_reset = false;
eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
if (IS_ERR(eqos->rst)) {
@@ -351,10 +275,11 @@ static int tegra_eqos_probe(struct platform_device *pdev,
usleep_range(2000, 4000);
bypass_clk_reset_gpio:
- data->fix_mac_speed = tegra_eqos_fix_speed;
- data->init = tegra_eqos_init;
- data->bsp_priv = eqos;
- data->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->fix_mac_speed = tegra_eqos_fix_speed;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->init = tegra_eqos_init;
+ plat_dat->bsp_priv = eqos;
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
err = tegra_eqos_init(pdev, eqos);
if (err < 0)
@@ -365,15 +290,7 @@ reset:
reset_control_assert(eqos->rst);
reset_phy:
gpiod_set_value(eqos->reset, 1);
-disable_tx:
- clk_disable_unprepare(eqos->clk_tx);
-disable_rx:
- clk_disable_unprepare(eqos->clk_rx);
-disable_slave:
- clk_disable_unprepare(eqos->clk_slave);
-disable_master:
- clk_disable_unprepare(eqos->clk_master);
-error:
+
return err;
}
@@ -383,27 +300,29 @@ static void tegra_eqos_remove(struct platform_device *pdev)
reset_control_assert(eqos->rst);
gpiod_set_value(eqos->reset, 1);
- clk_disable_unprepare(eqos->clk_tx);
- clk_disable_unprepare(eqos->clk_rx);
- clk_disable_unprepare(eqos->clk_slave);
- clk_disable_unprepare(eqos->clk_master);
}
struct dwc_eth_dwmac_data {
int (*probe)(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
+ struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
void (*remove)(struct platform_device *pdev);
+ const char *stmmac_clk_name;
};
static const struct dwc_eth_dwmac_data dwc_qos_data = {
.probe = dwc_qos_probe,
- .remove = dwc_qos_remove,
+ .stmmac_clk_name = "apb_pclk",
};
static const struct dwc_eth_dwmac_data tegra_eqos_data = {
.probe = tegra_eqos_probe,
.remove = tegra_eqos_remove,
+ .stmmac_clk_name = "slave_bus",
+};
+
+static const struct dwc_eth_dwmac_data fsd_eqos_data = {
+ .stmmac_clk_name = "slave_bus",
};
static int dwc_eth_dwmac_probe(struct platform_device *pdev)
@@ -434,9 +353,23 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- ret = data->probe(pdev, plat_dat, &stmmac_res);
+ ret = devm_clk_bulk_get_all(&pdev->dev, &plat_dat->clks);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to retrieve all required clocks\n");
+ plat_dat->num_clks = ret;
+
+ ret = clk_bulk_prepare_enable(plat_dat->num_clks, plat_dat->clks);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n");
+
+ plat_dat->stmmac_clk = dwc_eth_find_clk(plat_dat,
+ data->stmmac_clk_name);
+
+ if (data->probe)
+ ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
+ clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
return ret;
}
@@ -451,7 +384,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
return ret;
remove:
- data->remove(pdev);
+ if (data->remove)
+ data->remove(pdev);
return ret;
}
@@ -459,15 +393,21 @@ remove:
static void dwc_eth_dwmac_remove(struct platform_device *pdev)
{
const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev);
+ struct plat_stmmacenet_data *plat_dat = dev_get_platdata(&pdev->dev);
stmmac_dvr_remove(&pdev->dev);
- data->remove(pdev);
+ if (data->remove)
+ data->remove(pdev);
+
+ if (plat_dat)
+ clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
{ .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
+ { .compatible = "tesla,fsd-ethqos", .data = &fsd_eqos_data },
{ }
};
MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 20d3a202bb8d..5d279fa54b3e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -51,7 +51,7 @@ struct imx_dwmac_ops {
int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat);
- void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
+ void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
};
struct imx_priv_data {
@@ -192,7 +192,20 @@ static void imx_dwmac_exit(struct platform_device *pdev, void *priv)
/* nothing to do now */
}
-static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int imx_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
+{
+ struct imx_priv_data *dwmac = bsp_priv;
+
+ interface = dwmac->plat_dat->mac_interface;
+ if (interface == PHY_INTERFACE_MODE_RMII ||
+ interface == PHY_INTERFACE_MODE_MII)
+ return 0;
+
+ return stmmac_set_clk_tx_rate(bsp_priv, clk_tx_i, interface, speed);
+}
+
+static void imx_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
{
struct plat_stmmacenet_data *plat_dat;
struct imx_priv_data *dwmac = priv;
@@ -208,7 +221,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
rate = rgmii_clock(speed);
if (rate < 0) {
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
+ dev_err(dwmac->dev, "invalid speed %d\n", speed);
return;
}
@@ -217,7 +230,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
}
-static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
{
struct imx_priv_data *dwmac = priv;
unsigned int iface;
@@ -358,7 +371,6 @@ static int imx_dwmac_probe(struct platform_device *pdev)
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
plat_dat->clks_config = imx_dwmac_clks_config;
- plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
plat_dat->bsp_priv = dwmac;
dwmac->plat_dat = plat_dat;
dwmac->base_addr = stmmac_res.addr;
@@ -371,8 +383,13 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (ret)
goto err_dwmac_init;
- if (dwmac->ops->fix_mac_speed)
+ if (dwmac->ops->fix_mac_speed) {
plat_dat->fix_mac_speed = dwmac->ops->fix_mac_speed;
+ } else if (!dwmac->ops->mac_rgmii_txclk_auto_adj) {
+ plat_dat->clk_tx_i = dwmac->clk_tx;
+ plat_dat->set_clk_tx_rate = imx_dwmac_set_clk_tx_rate;
+ }
+
dwmac->plat_dat->fix_soc_reset = dwmac->ops->fix_soc_reset;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index ddee6154d40b..599def7b3a64 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -22,31 +22,12 @@ struct intel_dwmac {
};
struct intel_dwmac_data {
- void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
unsigned long ptp_ref_clk_rate;
unsigned long tx_clk_rate;
bool tx_clk_en;
};
-static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct intel_dwmac *dwmac = priv;
- long rate;
- int ret;
-
- rate = rgmii_clock(speed);
- if (rate < 0) {
- dev_err(dwmac->dev, "Invalid speed\n");
- return;
- }
-
- ret = clk_set_rate(dwmac->tx_clk, rate);
- if (ret)
- dev_err(dwmac->dev, "Failed to configure tx clock rate\n");
-}
-
static const struct intel_dwmac_data kmb_data = {
- .fix_mac_speed = kmb_eth_fix_mac_speed,
.ptp_ref_clk_rate = 200000000,
.tx_clk_rate = 125000000,
.tx_clk_en = true,
@@ -89,8 +70,6 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
* platform_match().
*/
dwmac->data = device_get_match_data(&pdev->dev);
- if (dwmac->data->fix_mac_speed)
- plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed;
/* Enable TX clock */
if (dwmac->data->tx_clk_en) {
@@ -132,6 +111,9 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
}
}
+ plat_dat->clk_tx_i = dwmac->tx_clk;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+
plat_dat->bsp_priv = dwmac;
plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 48acba5eb178..c8bb9265bbb4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -5,15 +5,30 @@
#include <linux/clk-provider.h>
#include <linux/pci.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/intel_pmc_ipc.h>
#include "dwmac-intel.h"
#include "dwmac4.h"
#include "stmmac.h"
#include "stmmac_ptp.h"
+struct pmc_serdes_regs {
+ u8 index;
+ u32 val;
+};
+
+struct pmc_serdes_reg_info {
+ const struct pmc_serdes_regs *regs;
+ u8 num_regs;
+};
+
struct intel_priv_data {
int mdio_adhoc_addr; /* mdio address for serdes & etc */
unsigned long crossts_adj;
bool is_pse;
+ const int *tsn_lane_regs;
+ int max_tsn_lane_regs;
+ struct pmc_serdes_reg_info pid_1g;
+ struct pmc_serdes_reg_info pid_2p5g;
};
/* This struct is used to associate PCI Function of MAC controller on a board,
@@ -35,6 +50,45 @@ struct stmmac_pci_info {
int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
};
+static const struct pmc_serdes_regs pid_modphy3_1g_regs[] = {
+ { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy3_2p5g_regs[] = {
+ { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy1_1g_regs[] = {
+ { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy1_2p5g_regs[] = {
+ { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
+ {}
+};
+
+static const int ehl_tsn_lane_regs[] = {7, 8, 9, 10, 11};
+static const int adln_tsn_lane_regs[] = {6};
+
static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
const struct dmi_system_id *dmi_list)
{
@@ -93,7 +147,7 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
data &= ~SERDES_RATE_MASK;
data &= ~SERDES_PCLK_MASK;
- if (priv->plat->max_speed == 2500)
+ if (priv->plat->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
else
@@ -415,6 +469,95 @@ static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
}
}
+static int intel_tsn_lane_is_available(struct net_device *ndev,
+ struct intel_priv_data *intel_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct pmc_ipc_cmd tmp = {};
+ struct pmc_ipc_rbuf rbuf = {};
+ int ret = 0, i, j;
+ const int max_fia_regs = 5;
+
+ tmp.cmd = IPC_SOC_REGISTER_ACCESS;
+ tmp.sub_cmd = IPC_SOC_SUB_CMD_READ;
+
+ for (i = 0; i < max_fia_regs; i++) {
+ tmp.wbuf[0] = R_PCH_FIA_15_PCR_LOS1_REG_BASE + i;
+
+ ret = intel_pmc_ipc(&tmp, &rbuf);
+ if (ret < 0) {
+ netdev_info(priv->dev, "Failed to read from PMC.\n");
+ return ret;
+ }
+
+ for (j = 0; j <= intel_priv->max_tsn_lane_regs; j++)
+ if ((rbuf.buf[0] >>
+ (4 * (intel_priv->tsn_lane_regs[j] % 8)) &
+ B_PCH_FIA_PCR_L0O) == 0xB)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int intel_set_reg_access(const struct pmc_serdes_regs *regs, int max_regs)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < max_regs; i++) {
+ struct pmc_ipc_cmd tmp = {};
+ struct pmc_ipc_rbuf rbuf = {};
+
+ tmp.cmd = IPC_SOC_REGISTER_ACCESS;
+ tmp.sub_cmd = IPC_SOC_SUB_CMD_WRITE;
+ tmp.wbuf[0] = (u32)regs[i].index;
+ tmp.wbuf[1] = regs[i].val;
+
+ ret = intel_pmc_ipc(&tmp, &rbuf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int intel_mac_finish(struct net_device *ndev,
+ void *intel_data,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct intel_priv_data *intel_priv = intel_data;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ const struct pmc_serdes_regs *regs;
+ int max_regs = 0;
+ int ret = 0;
+
+ ret = intel_tsn_lane_is_available(ndev, intel_priv);
+ if (ret < 0) {
+ netdev_info(priv->dev, "No TSN lane available to set the registers.\n");
+ return ret;
+ }
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX) {
+ regs = intel_priv->pid_2p5g.regs;
+ max_regs = intel_priv->pid_2p5g.num_regs;
+ } else {
+ regs = intel_priv->pid_1g.regs;
+ max_regs = intel_priv->pid_1g.num_regs;
+ }
+
+ ret = intel_set_reg_access(regs, max_regs);
+ if (ret < 0)
+ return ret;
+
+ priv->plat->phy_interface = interface;
+
+ intel_serdes_powerdown(ndev, intel_priv);
+ intel_serdes_powerup(ndev, intel_priv);
+
+ return ret;
+}
+
static void common_default_data(struct plat_stmmacenet_data *plat)
{
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
@@ -624,6 +767,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
static int ehl_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
@@ -639,20 +784,29 @@ static int ehl_common_data(struct pci_dev *pdev,
plat->safety_feat_cfg->prtyen = 0;
plat->safety_feat_cfg->tmouten = 0;
+ intel_priv->tsn_lane_regs = ehl_tsn_lane_regs;
+ intel_priv->max_tsn_lane_regs = ARRAY_SIZE(ehl_tsn_lane_regs);
+
return intel_mgbe_common_data(pdev, plat);
}
static int ehl_sgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
-
+ plat->mac_finish = intel_mac_finish;
plat->clk_ptp_rate = 204800000;
+ intel_priv->pid_1g.regs = pid_modphy3_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy3_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy3_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy3_2p5g_regs);
+
return ehl_common_data(pdev, plat);
}
@@ -705,10 +859,18 @@ static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
return ehl_pse0_common_data(pdev, plat);
}
@@ -746,10 +908,18 @@ static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
return ehl_pse1_common_data(pdev, plat);
}
@@ -835,6 +1005,55 @@ static int adls_sgmii_phy1_data(struct pci_dev *pdev,
static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
.setup = adls_sgmii_phy1_data,
};
+
+static int adln_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ plat->rx_queues_to_use = 6;
+ plat->tx_queues_to_use = 4;
+ plat->clk_ptp_rate = 204800000;
+
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 0;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 0;
+ plat->safety_feat_cfg->prtyen = 0;
+ plat->safety_feat_cfg->tmouten = 0;
+
+ intel_priv->tsn_lane_regs = adln_tsn_lane_regs;
+ intel_priv->max_tsn_lane_regs = ARRAY_SIZE(adln_tsn_lane_regs);
+
+ return intel_mgbe_common_data(pdev, plat);
+}
+
+static int adln_sgmii_phy0_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ plat->bus_id = 1;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
+ return adln_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info adln_sgmii1g_phy0_info = {
+ .setup = adln_sgmii_phy0_data,
+};
+
static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
{
.func = 6,
@@ -1217,8 +1436,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
- { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
- { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &adln_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &adln_sgmii1g_phy0_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
index 0a37987478c1..a12f8e65f89f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
@@ -50,4 +50,33 @@
#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
#define PCH_PTP_CLK_FREQ_200MHZ (0)
+/* Modphy Register index */
+#define R_PCH_FIA_15_PCR_LOS1_REG_BASE 8
+#define R_PCH_FIA_15_PCR_LOS2_REG_BASE 9
+#define R_PCH_FIA_15_PCR_LOS3_REG_BASE 10
+#define R_PCH_FIA_15_PCR_LOS4_REG_BASE 11
+#define R_PCH_FIA_15_PCR_LOS5_REG_BASE 12
+#define B_PCH_FIA_PCR_L0O GENMASK(3, 0)
+#define PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0 13
+#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2 14
+#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7 15
+#define PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10 16
+#define PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30 17
+#define PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0 18
+#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2 19
+#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7 20
+#define PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10 21
+#define PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30 22
+
+#define B_MODPHY_PCR_LCPLL_DWORD0_1G 0x46AAAA41
+#define N_MODPHY_PCR_LCPLL_DWORD2_1G 0x00000139
+#define N_MODPHY_PCR_LCPLL_DWORD7_1G 0x002A0003
+#define N_MODPHY_PCR_LPPLL_DWORD10_1G 0x00170008
+#define N_MODPHY_PCR_CMN_ANA_DWORD30_1G 0x0000D4AC
+#define B_MODPHY_PCR_LCPLL_DWORD0_2P5G 0x58555551
+#define N_MODPHY_PCR_LCPLL_DWORD2_2P5G 0x0000012D
+#define N_MODPHY_PCR_LCPLL_DWORD7_2P5G 0x001F0003
+#define N_MODPHY_PCR_LPPLL_DWORD10_2P5G 0x00170008
+#define N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G 0x8200ACAC
+
#endif /* __DWMAC_INTEL_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 61227dcf56dc..ca4035cbb55b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -112,7 +112,7 @@ struct ipq806x_gmac {
phy_interface_t phy_mode;
};
-static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, int speed)
{
struct device *dev = &gmac->pdev->dev;
int div;
@@ -138,7 +138,7 @@ static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
return div;
}
-static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, int speed)
{
struct device *dev = &gmac->pdev->dev;
int div;
@@ -164,7 +164,7 @@ static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
return div;
}
-static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, int speed)
{
uint32_t clk_bits, val;
int div;
@@ -211,16 +211,12 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
return 0;
}
-static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac,
+ struct plat_stmmacenet_data *plat_dat)
{
struct device *dev = &gmac->pdev->dev;
- int ret;
- ret = of_get_phy_mode(dev->of_node, &gmac->phy_mode);
- if (ret) {
- dev_err(dev, "missing phy mode property\n");
- return -EINVAL;
- }
+ gmac->phy_mode = plat_dat->phy_interface;
if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
dev_err(dev, "missing qcom id property\n");
@@ -260,11 +256,12 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
return PTR_ERR_OR_ZERO(gmac->qsgmii_csr);
}
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int ipq806x_gmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct ipq806x_gmac *gmac = priv;
+ struct ipq806x_gmac *gmac = bsp_priv;
- ipq806x_gmac_set_speed(gmac, speed);
+ return ipq806x_gmac_set_speed(gmac, speed);
}
static int
@@ -397,7 +394,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
gmac->pdev = pdev;
- err = ipq806x_gmac_of_parse(gmac);
+ err = ipq806x_gmac_of_parse(gmac, plat_dat);
if (err) {
dev_err(dev, "device tree parsing error\n");
return err;
@@ -478,7 +475,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
plat_dat->has_gmac = true;
plat_dat->bsp_priv = gmac;
- plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = ipq806x_gmac_set_clk_tx_rate;
plat_dat->multicast_filter_bins = 0;
plat_dat->tx_fifo_size = 8192;
plat_dat->rx_fifo_size = 8192;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index ab7c2750c104..1a93787056a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -151,8 +151,7 @@ static struct stmmac_pci_info loongson_gmac_pci_info = {
.setup = loongson_gmac_data,
};
-static void loongson_gnet_fix_speed(void *priv, unsigned int speed,
- unsigned int mode)
+static void loongson_gnet_fix_speed(void *priv, int speed, unsigned int mode)
{
struct loongson_data *ld = (struct loongson_data *)priv;
struct net_device *ndev = dev_get_drvdata(ld->dev);
@@ -534,10 +533,10 @@ static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct plat_stmmacenet_data *plat;
+ struct stmmac_resources res = {};
struct stmmac_pci_info *info;
- struct stmmac_resources res;
struct loongson_data *ld;
- int ret, i;
+ int ret;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
@@ -567,17 +566,10 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
pci_set_master(pdev);
/* Get the base address of device */
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- ret = pcim_iomap_regions(pdev, BIT(0), DRIVER_NAME);
- if (ret)
- goto err_disable_device;
- break;
- }
-
- memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[0];
+ res.addr = pcim_iomap_region(pdev, 0, DRIVER_NAME);
+ ret = PTR_ERR_OR_ZERO(res.addr);
+ if (ret)
+ goto err_disable_device;
plat->bsp_priv = ld;
plat->setup = loongson_dwmac_setup;
@@ -590,6 +582,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (ret)
goto err_disable_device;
+ plat->tx_fifo_size = SZ_16K * plat->tx_queues_to_use;
+ plat->rx_fifo_size = SZ_16K * plat->rx_queues_to_use;
+
if (dev_of_node(&pdev->dev))
ret = loongson_dwmac_dt_config(pdev, plat, &res);
else
@@ -622,7 +617,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
struct net_device *ndev = dev_get_drvdata(&pdev->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
struct loongson_data *ld;
- int i;
ld = priv->plat->bsp_priv;
stmmac_dvr_remove(&pdev->dev);
@@ -633,13 +627,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
loongson_dwmac_msi_clear(pdev);
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index c9636832a570..d178d5ddc7c7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -456,7 +456,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
u32 tx_delay_ps, rx_delay_ps;
- int err;
plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg");
if (IS_ERR(plat->peri_regmap)) {
@@ -464,12 +463,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
return PTR_ERR(plat->peri_regmap);
}
- err = of_get_phy_mode(plat->np, &plat->phy_mode);
- if (err) {
- dev_err(plat->dev, "not find phy-mode\n");
- return err;
- }
-
if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) {
if (tx_delay_ps < plat->variant->tx_delay_max) {
mac_delay->tx_delay = tx_delay_ps;
@@ -587,6 +580,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
{
int i;
+ priv_plat->phy_mode = plat->phy_interface;
plat->mac_interface = priv_plat->phy_mode;
if (priv_plat->mac_wol)
plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 5469fa1b429e..07c504d07604 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -22,9 +22,10 @@ struct meson_dwmac {
void __iomem *reg;
};
-static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int meson6_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct meson_dwmac *dwmac = priv;
+ struct meson_dwmac *dwmac = bsp_priv;
unsigned int val;
val = readl(dwmac->reg);
@@ -39,6 +40,8 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned
}
writel(val, dwmac->reg);
+
+ return 0;
}
static int meson6_dwmac_probe(struct platform_device *pdev)
@@ -65,7 +68,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(dwmac->reg);
plat_dat->bsp_priv = dwmac;
- plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = meson6_dwmac_set_clk_tx_rate;
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 9c2d62d133ad..a50782994b97 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -417,11 +417,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(dwmac->regs);
dwmac->dev = &pdev->dev;
- ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode);
- if (ret) {
- dev_err(&pdev->dev, "missing phy-mode property\n");
- return ret;
- }
+ dwmac->phy_mode = plat_dat->phy_interface;
/* use 2ns as fallback since this value was previously hardcoded */
if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 2a5b38723635..0e4da216f942 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -111,7 +111,7 @@ struct qcom_ethqos {
unsigned int link_clk_rate;
struct clk *link_clk;
struct phy *serdes_phy;
- unsigned int speed;
+ int speed;
int serdes_speed;
phy_interface_t phy_mode;
@@ -169,30 +169,17 @@ static void rgmii_dump(void *priv)
rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG));
}
-/* Clock rates */
-#define RGMII_1000_NOM_CLK_FREQ (250 * 1000 * 1000UL)
-#define RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ (50 * 1000 * 1000UL)
-#define RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ (5 * 1000 * 1000UL)
-
static void
-ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
+ethqos_update_link_clk(struct qcom_ethqos *ethqos, int speed)
{
+ long rate;
+
if (!phy_interface_mode_is_rgmii(ethqos->phy_mode))
return;
- switch (speed) {
- case SPEED_1000:
- ethqos->link_clk_rate = RGMII_1000_NOM_CLK_FREQ;
- break;
-
- case SPEED_100:
- ethqos->link_clk_rate = RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ;
- break;
-
- case SPEED_10:
- ethqos->link_clk_rate = RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ;
- break;
- }
+ rate = rgmii_clock(speed);
+ if (rate > 0)
+ ethqos->link_clk_rate = rate * 2;
clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate);
}
@@ -699,7 +686,7 @@ static int ethqos_configure(struct qcom_ethqos *ethqos)
return ethqos->configure_func(ethqos);
}
-static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void ethqos_fix_mac_speed(void *priv, int speed, unsigned int mode)
{
struct qcom_ethqos *ethqos = priv;
@@ -807,9 +794,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
if (!ethqos)
return -ENOMEM;
- ret = of_get_phy_mode(np, &ethqos->phy_mode);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get phy mode\n");
+ ethqos->phy_mode = plat_dat->phy_interface;
switch (ethqos->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index a4dc89e23a68..700858ff6f7c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -33,6 +33,8 @@ struct rk_gmac_ops {
void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+ void (*integrated_phy_powerdown)(struct rk_priv_data *bsp_priv);
+ bool php_grf_required;
bool regs_valid;
u32 regs[];
};
@@ -91,6 +93,76 @@ struct rk_priv_data {
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+#define RK_GRF_MACPHY_CON0 0xb00
+#define RK_GRF_MACPHY_CON1 0xb04
+#define RK_GRF_MACPHY_CON2 0xb08
+#define RK_GRF_MACPHY_CON3 0xb0c
+
+#define RK_MACPHY_ENABLE GRF_BIT(0)
+#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
+#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
+#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7))
+#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0)
+#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0)
+
+static void rk_gmac_integrated_ephy_powerup(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE);
+
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID);
+
+ if (priv->phy_reset) {
+ /* PHY needs to be disabled before trying to reset it */
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+ usleep_range(10, 20);
+ if (priv->phy_reset)
+ reset_control_deassert(priv->phy_reset);
+ usleep_range(10, 20);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE);
+ msleep(30);
+ }
+}
+
+static void rk_gmac_integrated_ephy_powerdown(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+}
+
+#define RK_FEPHY_SHUTDOWN GRF_BIT(1)
+#define RK_FEPHY_POWERUP GRF_CLR_BIT(1)
+#define RK_FEPHY_INTERNAL_RMII_SEL GRF_BIT(6)
+#define RK_FEPHY_24M_CLK_SEL (GRF_BIT(8) | GRF_BIT(9))
+#define RK_FEPHY_PHY_ID GRF_BIT(11)
+
+static void rk_gmac_integrated_fephy_powerup(struct rk_priv_data *priv,
+ unsigned int reg)
+{
+ reset_control_assert(priv->phy_reset);
+ usleep_range(20, 30);
+
+ regmap_write(priv->grf, reg,
+ RK_FEPHY_POWERUP |
+ RK_FEPHY_INTERNAL_RMII_SEL |
+ RK_FEPHY_24M_CLK_SEL |
+ RK_FEPHY_PHY_ID);
+ usleep_range(10000, 12000);
+
+ reset_control_deassert(priv->phy_reset);
+ usleep_range(50000, 60000);
+}
+
+static void rk_gmac_integrated_fephy_powerdown(struct rk_priv_data *priv,
+ unsigned int reg)
+{
+ regmap_write(priv->grf, reg, RK_FEPHY_SHUTDOWN);
+}
+
#define PX30_GRF_GMAC_CON1 0x0904
/* PX30_GRF_GMAC_CON1 */
@@ -101,13 +173,6 @@ struct rk_priv_data {
static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
PX30_GMAC_PHY_INTF_SEL_RMII);
}
@@ -181,13 +246,6 @@ static const struct rk_gmac_ops px30_ops = {
static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_PHY_INTF_SEL_RGMII |
RK3128_GMAC_RMII_MODE_CLR);
@@ -199,13 +257,6 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
}
@@ -214,11 +265,6 @@ static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_CLK_2_5M);
@@ -236,11 +282,6 @@ static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_RMII_CLK_2_5M |
@@ -297,13 +338,6 @@ static const struct rk_gmac_ops rk3128_ops = {
static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_PHY_INTF_SEL_RGMII |
RK3228_GMAC_RMII_MODE_CLR |
@@ -316,13 +350,6 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_PHY_INTF_SEL_RMII |
RK3228_GMAC_RMII_MODE);
@@ -335,11 +362,6 @@ static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_CLK_2_5M);
@@ -357,11 +379,6 @@ static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_RMII_CLK_2_5M |
@@ -378,6 +395,8 @@ static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3228_GRF_CON_MUX,
RK3228_GRF_CON_MUX_GMAC_INTEGRATED_PHY);
+
+ rk_gmac_integrated_ephy_powerup(priv);
}
static const struct rk_gmac_ops rk3228_ops = {
@@ -385,7 +404,8 @@ static const struct rk_gmac_ops rk3228_ops = {
.set_to_rmii = rk3228_set_to_rmii,
.set_rgmii_speed = rk3228_set_rgmii_speed,
.set_rmii_speed = rk3228_set_rmii_speed,
- .integrated_phy_powerup = rk3228_integrated_phy_powerup,
+ .integrated_phy_powerup = rk3228_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
#define RK3288_GRF_SOC_CON1 0x0248
@@ -419,13 +439,6 @@ static const struct rk_gmac_ops rk3228_ops = {
static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_PHY_INTF_SEL_RGMII |
RK3288_GMAC_RMII_MODE_CLR);
@@ -437,13 +450,6 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE);
}
@@ -452,11 +458,6 @@ static void rk3288_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_CLK_2_5M);
@@ -474,11 +475,6 @@ static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_RMII_CLK_2_5M |
@@ -511,13 +507,6 @@ static const struct rk_gmac_ops rk3288_ops = {
static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
RK3308_GMAC_PHY_INTF_SEL_RMII);
}
@@ -526,11 +515,6 @@ static void rk3308_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
RK3308_GMAC_SPEED_10M);
@@ -583,13 +567,6 @@ static const struct rk_gmac_ops rk3308_ops = {
static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
RK3328_GMAC_PHY_INTF_SEL_RGMII |
RK3328_GMAC_RMII_MODE_CLR |
@@ -603,14 +580,8 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
RK3328_GRF_MAC_CON1;
@@ -623,11 +594,6 @@ static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
RK3328_GMAC_CLK_2_5M);
@@ -646,11 +612,6 @@ static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
RK3328_GRF_MAC_CON1;
@@ -670,6 +631,8 @@ static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3328_GRF_MACPHY_CON1,
RK3328_MACPHY_RMII_MODE);
+
+ rk_gmac_integrated_ephy_powerup(priv);
}
static const struct rk_gmac_ops rk3328_ops = {
@@ -677,7 +640,8 @@ static const struct rk_gmac_ops rk3328_ops = {
.set_to_rmii = rk3328_set_to_rmii,
.set_rgmii_speed = rk3328_set_rgmii_speed,
.set_rmii_speed = rk3328_set_rmii_speed,
- .integrated_phy_powerup = rk3328_integrated_phy_powerup,
+ .integrated_phy_powerup = rk3328_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
#define RK3366_GRF_SOC_CON6 0x0418
@@ -711,13 +675,6 @@ static const struct rk_gmac_ops rk3328_ops = {
static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_PHY_INTF_SEL_RGMII |
RK3366_GMAC_RMII_MODE_CLR);
@@ -729,13 +686,6 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE);
}
@@ -744,11 +694,6 @@ static void rk3366_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_CLK_2_5M);
@@ -766,11 +711,6 @@ static void rk3366_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_RMII_CLK_2_5M |
@@ -822,13 +762,6 @@ static const struct rk_gmac_ops rk3366_ops = {
static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_PHY_INTF_SEL_RGMII |
RK3368_GMAC_RMII_MODE_CLR);
@@ -840,13 +773,6 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE);
}
@@ -855,11 +781,6 @@ static void rk3368_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_CLK_2_5M);
@@ -877,11 +798,6 @@ static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_RMII_CLK_2_5M |
@@ -933,13 +849,6 @@ static const struct rk_gmac_ops rk3368_ops = {
static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_PHY_INTF_SEL_RGMII |
RK3399_GMAC_RMII_MODE_CLR);
@@ -951,13 +860,6 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE);
}
@@ -966,11 +868,6 @@ static void rk3399_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_CLK_2_5M);
@@ -988,11 +885,6 @@ static void rk3399_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_RMII_CLK_2_5M |
@@ -1013,6 +905,149 @@ static const struct rk_gmac_ops rk3399_ops = {
.set_rmii_speed = rk3399_set_rmii_speed,
};
+#define RK3528_VO_GRF_GMAC_CON 0x0018
+#define RK3528_VO_GRF_MACPHY_CON0 0x001c
+#define RK3528_VO_GRF_MACPHY_CON1 0x0020
+#define RK3528_VPU_GRF_GMAC_CON5 0x0018
+#define RK3528_VPU_GRF_GMAC_CON6 0x001c
+
+#define RK3528_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3528_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3528_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define RK3528_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+
+#define RK3528_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
+#define RK3528_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+
+#define RK3528_GMAC0_PHY_INTF_SEL_RMII GRF_BIT(1)
+#define RK3528_GMAC1_PHY_INTF_SEL_RGMII GRF_CLR_BIT(8)
+#define RK3528_GMAC1_PHY_INTF_SEL_RMII GRF_BIT(8)
+
+#define RK3528_GMAC1_CLK_SELECT_CRU GRF_CLR_BIT(12)
+#define RK3528_GMAC1_CLK_SELECT_IO GRF_BIT(12)
+
+#define RK3528_GMAC0_CLK_RMII_DIV2 GRF_BIT(3)
+#define RK3528_GMAC0_CLK_RMII_DIV20 GRF_CLR_BIT(3)
+#define RK3528_GMAC1_CLK_RMII_DIV2 GRF_BIT(10)
+#define RK3528_GMAC1_CLK_RMII_DIV20 GRF_CLR_BIT(10)
+
+#define RK3528_GMAC1_CLK_RGMII_DIV1 (GRF_CLR_BIT(11) | GRF_CLR_BIT(10))
+#define RK3528_GMAC1_CLK_RGMII_DIV5 (GRF_BIT(11) | GRF_BIT(10))
+#define RK3528_GMAC1_CLK_RGMII_DIV50 (GRF_BIT(11) | GRF_CLR_BIT(10))
+
+#define RK3528_GMAC0_CLK_RMII_GATE GRF_BIT(2)
+#define RK3528_GMAC0_CLK_RMII_NOGATE GRF_CLR_BIT(2)
+#define RK3528_GMAC1_CLK_RMII_GATE GRF_BIT(9)
+#define RK3528_GMAC1_CLK_RMII_NOGATE GRF_CLR_BIT(9)
+
+static void rk3528_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RGMII);
+
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ DELAY_ENABLE(RK3528, tx_delay, rx_delay));
+
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON6,
+ RK3528_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3528_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3528_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ if (bsp_priv->id == 1)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RMII);
+ else
+ regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON,
+ RK3528_GMAC0_PHY_INTF_SEL_RMII |
+ RK3528_GMAC0_CLK_RMII_DIV2);
+}
+
+static void rk3528_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_CLK_RGMII_DIV50);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_CLK_RGMII_DIV5);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_CLK_RGMII_DIV1);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3528_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int reg, val;
+
+ if (speed == 10)
+ val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV20 :
+ RK3528_GMAC0_CLK_RMII_DIV20;
+ else if (speed == 100)
+ val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV2 :
+ RK3528_GMAC0_CLK_RMII_DIV2;
+ else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ return;
+ }
+
+ reg = bsp_priv->id == 1 ? RK3528_VPU_GRF_GMAC_CON5 :
+ RK3528_VO_GRF_GMAC_CON;
+
+ regmap_write(bsp_priv->grf, reg, val);
+}
+
+static void rk3528_set_clock_selection(struct rk_priv_data *bsp_priv,
+ bool input, bool enable)
+{
+ unsigned int val;
+
+ if (bsp_priv->id == 1) {
+ val = input ? RK3528_GMAC1_CLK_SELECT_IO :
+ RK3528_GMAC1_CLK_SELECT_CRU;
+ val |= enable ? RK3528_GMAC1_CLK_RMII_NOGATE :
+ RK3528_GMAC1_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, val);
+ } else {
+ val = enable ? RK3528_GMAC0_CLK_RMII_NOGATE :
+ RK3528_GMAC0_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON, val);
+ }
+}
+
+static void rk3528_integrated_phy_powerup(struct rk_priv_data *bsp_priv)
+{
+ rk_gmac_integrated_fephy_powerup(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
+}
+
+static void rk3528_integrated_phy_powerdown(struct rk_priv_data *bsp_priv)
+{
+ rk_gmac_integrated_fephy_powerdown(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
+}
+
+static const struct rk_gmac_ops rk3528_ops = {
+ .set_to_rgmii = rk3528_set_to_rgmii,
+ .set_to_rmii = rk3528_set_to_rmii,
+ .set_rgmii_speed = rk3528_set_rgmii_speed,
+ .set_rmii_speed = rk3528_set_rmii_speed,
+ .set_clock_selection = rk3528_set_clock_selection,
+ .integrated_phy_powerup = rk3528_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk3528_integrated_phy_powerdown,
+ .regs_valid = true,
+ .regs = {
+ 0xffbd0000, /* gmac0 */
+ 0xffbe0000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
#define RK3568_GRF_GMAC0_CON0 0x0380
#define RK3568_GRF_GMAC0_CON1 0x0384
#define RK3568_GRF_GMAC1_CON0 0x0388
@@ -1037,14 +1072,8 @@ static const struct rk_gmac_ops rk3399_ops = {
static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 con0, con1;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
con0 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON0 :
RK3568_GRF_GMAC0_CON0;
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
@@ -1062,14 +1091,8 @@ static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 con1;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
RK3568_GRF_GMAC0_CON1;
regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII);
@@ -1147,14 +1170,8 @@ static const struct rk_gmac_ops rk3568_ops = {
static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int offset_con;
- if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "Missing rockchip,grf or rockchip,php-grf property\n");
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
RK3576_GRF_GMAC_CON0;
@@ -1180,14 +1197,8 @@ static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int offset_con;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
RK3576_GRF_GMAC_CON0;
@@ -1254,6 +1265,7 @@ static const struct rk_gmac_ops rk3576_ops = {
.set_rgmii_speed = rk3576_set_gmac_speed,
.set_rmii_speed = rk3576_set_gmac_speed,
.set_clock_selection = rk3576_set_clock_selection,
+ .php_grf_required = true,
.regs_valid = true,
.regs = {
0x2a220000, /* gmac0 */
@@ -1306,14 +1318,8 @@ static const struct rk_gmac_ops rk3576_ops = {
static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 offset_con, id = bsp_priv->id;
- if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "Missing rockchip,grf or rockchip,php_grf property\n");
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3588_GRF_GMAC_CON9 :
RK3588_GRF_GMAC_CON8;
@@ -1334,13 +1340,6 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "%s: Missing rockchip,php_grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id));
@@ -1401,6 +1400,7 @@ static const struct rk_gmac_ops rk3588_ops = {
.set_rgmii_speed = rk3588_set_gmac_speed,
.set_rmii_speed = rk3588_set_gmac_speed,
.set_clock_selection = rk3588_set_clock_selection,
+ .php_grf_required = true,
.regs_valid = true,
.regs = {
0xfe1b0000, /* gmac0 */
@@ -1423,13 +1423,6 @@ static const struct rk_gmac_ops rk3588_ops = {
static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
RV1108_GMAC_PHY_INTF_SEL_RMII);
}
@@ -1438,11 +1431,6 @@ static void rv1108_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
RV1108_GMAC_RMII_CLK_2_5M |
@@ -1491,13 +1479,6 @@ static const struct rk_gmac_ops rv1108_ops = {
static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
RV1126_GMAC_PHY_INTF_SEL_RGMII |
RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
@@ -1516,13 +1497,6 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
RV1126_GMAC_PHY_INTF_SEL_RMII);
}
@@ -1578,50 +1552,6 @@ static const struct rk_gmac_ops rv1126_ops = {
.set_rmii_speed = rv1126_set_rmii_speed,
};
-#define RK_GRF_MACPHY_CON0 0xb00
-#define RK_GRF_MACPHY_CON1 0xb04
-#define RK_GRF_MACPHY_CON2 0xb08
-#define RK_GRF_MACPHY_CON3 0xb0c
-
-#define RK_MACPHY_ENABLE GRF_BIT(0)
-#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
-#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
-#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7))
-#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0)
-#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0)
-
-static void rk_gmac_integrated_phy_powerup(struct rk_priv_data *priv)
-{
- if (priv->ops->integrated_phy_powerup)
- priv->ops->integrated_phy_powerup(priv);
-
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE);
-
- regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID);
-
- if (priv->phy_reset) {
- /* PHY needs to be disabled before trying to reset it */
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
- if (priv->phy_reset)
- reset_control_assert(priv->phy_reset);
- usleep_range(10, 20);
- if (priv->phy_reset)
- reset_control_deassert(priv->phy_reset);
- usleep_range(10, 20);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE);
- msleep(30);
- }
-}
-
-static void rk_gmac_integrated_phy_powerdown(struct rk_priv_data *priv)
-{
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
- if (priv->phy_reset)
- reset_control_assert(priv->phy_reset);
-}
-
static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
{
struct rk_priv_data *bsp_priv = plat->bsp_priv;
@@ -1749,7 +1679,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
if (!bsp_priv)
return ERR_PTR(-ENOMEM);
- of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
+ bsp_priv->phy_iface = plat->phy_interface;
bsp_priv->ops = ops;
/* Some SoCs have multiple MAC controllers, which need
@@ -1812,8 +1742,22 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
- bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
- "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->grf),
+ "failed to lookup rockchip,grf\n");
+ return ERR_CAST(bsp_priv->grf);
+ }
+
+ if (ops->php_grf_required) {
+ bsp_priv->php_grf =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->php_grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->php_grf),
+ "failed to lookup rockchip,php-grf\n");
+ return ERR_CAST(bsp_priv->php_grf);
+ }
+ }
if (plat->phy_node) {
bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node,
@@ -1903,16 +1847,16 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
pm_runtime_get_sync(dev);
- if (bsp_priv->integrated_phy)
- rk_gmac_integrated_phy_powerup(bsp_priv);
+ if (bsp_priv->integrated_phy && bsp_priv->ops->integrated_phy_powerup)
+ bsp_priv->ops->integrated_phy_powerup(bsp_priv);
return 0;
}
static void rk_gmac_powerdown(struct rk_priv_data *gmac)
{
- if (gmac->integrated_phy)
- rk_gmac_integrated_phy_powerdown(gmac);
+ if (gmac->integrated_phy && gmac->ops->integrated_phy_powerdown)
+ gmac->ops->integrated_phy_powerdown(gmac);
pm_runtime_put_sync(&gmac->pdev->dev);
@@ -1920,9 +1864,10 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
gmac_clk_enable(gmac, false);
}
-static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct rk_priv_data *bsp_priv = priv;
+ struct rk_priv_data *bsp_priv = bsp_priv_;
struct device *dev = &bsp_priv->pdev->dev;
switch (bsp_priv->phy_iface) {
@@ -1940,6 +1885,8 @@ static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode)
default:
dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
}
+
+ return 0;
}
static int rk_gmac_probe(struct platform_device *pdev)
@@ -1966,9 +1913,13 @@ static int rk_gmac_probe(struct platform_device *pdev)
/* If the stmmac is not already selected as gmac4,
* then make sure we fallback to gmac.
*/
- if (!plat_dat->has_gmac4)
+ if (!plat_dat->has_gmac4) {
plat_dat->has_gmac = true;
- plat_dat->fix_mac_speed = rk_fix_speed;
+ plat_dat->rx_fifo_size = 4096;
+ plat_dat->tx_fifo_size = 2048;
+ }
+
+ plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate;
plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
if (IS_ERR(plat_dat->bsp_priv))
@@ -2044,6 +1995,7 @@ static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops },
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
+ { .compatible = "rockchip,rk3528-gmac", .data = &rk3528_ops },
{ .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
{ .compatible = "rockchip,rk3576-gmac", .data = &rk3576_ops },
{ .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
index 9cc0e5817416..221539d760bc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -100,24 +100,6 @@ static void s32_gmac_exit(struct platform_device *pdev, void *priv)
clk_disable_unprepare(gmac->rx_clk);
}
-static void s32_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct s32_priv_data *gmac = priv;
- long tx_clk_rate;
- int ret;
-
- tx_clk_rate = rgmii_clock(speed);
- if (tx_clk_rate < 0) {
- dev_err(gmac->dev, "Unsupported/Invalid speed: %d\n", speed);
- return;
- }
-
- dev_dbg(gmac->dev, "Set tx clock to %ld Hz\n", tx_clk_rate);
- ret = clk_set_rate(gmac->tx_clk, tx_clk_rate);
- if (ret)
- dev_err(gmac->dev, "Can't set tx clock\n");
-}
-
static int s32_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat;
@@ -172,7 +154,9 @@ static int s32_dwmac_probe(struct platform_device *pdev)
plat->init = s32_gmac_init;
plat->exit = s32_gmac_exit;
- plat->fix_mac_speed = s32_fix_mac_speed;
+
+ plat->clk_tx_i = gmac->tx_clk;
+ plat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
plat->bsp_priv = gmac;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 16020b72dec8..116855658559 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -61,7 +61,7 @@ struct socfpga_dwmac {
struct mdio_device *pcs_mdiodev;
};
-static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void socfpga_dwmac_fix_mac_speed(void *priv, int speed, unsigned int mode)
{
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
void __iomem *splitter_base = dwmac->splitter_base;
@@ -523,24 +523,6 @@ static int socfpga_dwmac_resume(struct device *dev)
dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv);
- /* Before the enet controller is suspended, the phy is suspended.
- * This causes the phy clock to be gated. The enet controller is
- * resumed before the phy, so the clock is still gated "off" when
- * the enet controller is resumed. This code makes sure the phy
- * is "resumed" before reinitializing the enet controller since
- * the enet controller depends on an active phy clock to complete
- * a DMA reset. A DMA reset will "time out" if executed
- * with no phy clock input on the Synopsys enet controller.
- * Verified through Synopsys Case #8000711656.
- *
- * Note that the phy clock is also gated when the phy is isolated.
- * Phy "suspend" and "isolate" controls are located in phy basic
- * control register 0, and can be modified by the phy driver
- * framework.
- */
- if (ndev->phydev)
- phy_resume(ndev->phydev);
-
return stmmac_resume(dev);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
new file mode 100644
index 000000000000..3303784cbbf8
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Sophgo DWMAC platform driver
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include "stmmac_platform.h"
+
+static int sophgo_sg2044_dwmac_init(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *stmmac_res)
+{
+ plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(plat_dat->clk_tx_i))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i),
+ "failed to get tx clock\n");
+
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->multicast_filter_bins = 0;
+ plat_dat->unicast_filter_entries = 1;
+
+ return 0;
+}
+
+static int sophgo_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get platform resources\n");
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(dev, PTR_ERR(plat_dat),
+ "failed to parse DT parameters\n");
+
+ ret = sophgo_sg2044_dwmac_init(pdev, plat_dat, &stmmac_res);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(dev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id sophgo_dwmac_match[] = {
+ { .compatible = "sophgo,sg2044-dwmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sophgo_dwmac_match);
+
+static struct platform_driver sophgo_dwmac_driver = {
+ .probe = sophgo_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "sophgo-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sophgo_dwmac_match,
+ },
+};
+module_platform_driver(sophgo_dwmac_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo DWMAC platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 0a0a363d3730..2013d7477eb7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -27,27 +27,9 @@ struct starfive_dwmac_data {
struct starfive_dwmac {
struct device *dev;
- struct clk *clk_tx;
const struct starfive_dwmac_data *data;
};
-static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct starfive_dwmac *dwmac = priv;
- long rate;
- int err;
-
- rate = rgmii_clock(speed);
- if (rate < 0) {
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
- return;
- }
-
- err = clk_set_rate(dwmac->clk_tx, rate);
- if (err)
- dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
-}
-
static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct starfive_dwmac *dwmac = plat_dat->bsp_priv;
@@ -122,9 +104,9 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
dwmac->data = device_get_match_data(&pdev->dev);
- dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx");
- if (IS_ERR(dwmac->clk_tx))
- return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx),
+ plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(plat_dat->clk_tx_i))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i),
"error getting tx clock\n");
clk_gtx = devm_clk_get_enabled(&pdev->dev, "gtx");
@@ -139,9 +121,10 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
* internally, because rgmii_rxin will be adaptively adjusted.
*/
if (!device_property_read_bool(&pdev->dev, "starfive,tx-use-rgmii-clk"))
- plat_dat->fix_mac_speed = starfive_dwmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
dwmac->dev = &pdev->dev;
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
plat_dat->dma_cfg->dche = true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index f25461c292fe..be57c6c12c1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -99,12 +99,12 @@ struct sti_dwmac {
int clk_sel_reg; /* GMAC ext clk selection register */
struct regmap *regmap;
bool gmac_en;
- u32 speed;
- void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
+ int speed;
+ void (*fix_retime_src)(void *priv, int speed, unsigned int mode);
};
struct sti_dwmac_of_data {
- void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
+ void (*fix_retime_src)(void *priv, int speed, unsigned int mode);
};
static u32 phy_intf_sels[] = {
@@ -132,7 +132,7 @@ static u32 stih4xx_tx_retime_val[] = {
| STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
};
-static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
+static void stih4xx_fix_retime_src(void *priv, int spd, unsigned int mode)
{
struct sti_dwmac *dwmac = priv;
u32 src = dwmac->tx_retime_src;
@@ -185,7 +185,8 @@ static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
}
static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
- struct platform_device *pdev)
+ struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
struct resource *res;
struct device *dev = &pdev->dev;
@@ -204,12 +205,7 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- err = of_get_phy_mode(np, &dwmac->interface);
- if (err && err != -ENODEV) {
- dev_err(dev, "Can't get phy-mode\n");
- return err;
- }
-
+ dwmac->interface = plat_dat->phy_interface;
dwmac->regmap = regmap;
dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
@@ -268,7 +264,7 @@ static int sti_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- ret = sti_dwmac_parse_data(dwmac, pdev);
+ ret = sti_dwmac_parse_data(dwmac, pdev, plat_dat);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 1fcb74e9e3ff..c3d321192581 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -538,6 +538,7 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
return ret;
}
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
ret = stm32_dwmac_init(plat_dat, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 4b7b2582a120..85723a78793a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1155,11 +1155,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct sunxi_priv_data *gmac;
struct device *dev = &pdev->dev;
- phy_interface_t interface;
- int ret;
struct stmmac_priv *priv;
struct net_device *ndev;
struct regmap *regmap;
+ int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -1219,10 +1218,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return ret;
}
- ret = of_get_phy_mode(dev->of_node, &interface);
- if (ret)
- return -EINVAL;
-
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -1230,7 +1225,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
- plat_dat->mac_interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->flags |= STMMAC_FLAG_HAS_SUN8I;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 9ae318436c4a..9f098ff0ff05 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -72,7 +72,7 @@ static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
regulator_disable(gmac->regulator);
}
-static void sun7i_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void sun7i_fix_speed(void *priv, int speed, unsigned int mode)
{
struct sunxi_priv_data *gmac = priv;
@@ -116,11 +116,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
if (!gmac)
return -ENOMEM;
- ret = of_get_phy_mode(dev->of_node, &gmac->interface);
- if (ret && ret != -ENODEV) {
- dev_err(dev, "Can't get phy-mode\n");
- return ret;
- }
+ gmac->interface = plat_dat->phy_interface;
gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
if (IS_ERR(gmac->tx_clk)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
index dce84ed184e9..c72ee759aae5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -45,9 +45,6 @@
#define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0)
#define TXCLK_DIR_INPUT FIELD_PREP(TXCLK_DIR_MASK, 1)
-#define GMAC_GMII_RGMII_RATE 125000000
-#define GMAC_MII_RATE 25000000
-
struct thead_dwmac {
struct plat_stmmacenet_data *plat;
void __iomem *apb_base;
@@ -104,11 +101,13 @@ static int thead_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat)
return 0;
}
-static void thead_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
+ struct thead_dwmac *dwmac = bsp_priv;
struct plat_stmmacenet_data *plat;
- struct thead_dwmac *dwmac = priv;
unsigned long rate;
+ long tx_rate;
u32 div, reg;
plat = dwmac->plat;
@@ -116,44 +115,37 @@ static void thead_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int m
switch (plat->mac_interface) {
/* For MII, rxc/txc is provided by phy */
case PHY_INTERFACE_MODE_MII:
- return;
+ return 0;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
rate = clk_get_rate(plat->stmmac_clk);
- if (!rate || rate % GMAC_GMII_RGMII_RATE != 0 ||
- rate % GMAC_MII_RATE != 0) {
- dev_err(dwmac->dev, "invalid gmac rate %ld\n", rate);
- return;
- }
writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV);
- switch (speed) {
- case SPEED_1000:
- div = rate / GMAC_GMII_RGMII_RATE;
- break;
- case SPEED_100:
- div = rate / GMAC_MII_RATE;
- break;
- case SPEED_10:
- div = rate * 10 / GMAC_MII_RATE;
- break;
- default:
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
- return;
+ tx_rate = rgmii_clock(speed);
+ if (tx_rate < 0) {
+ dev_err(dwmac->dev, "invalid speed %d\n", speed);
+ return tx_rate;
+ }
+
+ div = rate / tx_rate;
+ if (rate != tx_rate * div) {
+ dev_err(dwmac->dev, "invalid gmac rate %lu\n", rate);
+ return -EINVAL;
}
reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) |
FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div);
writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV);
- break;
+ return 0;
+
default:
dev_err(dwmac->dev, "unsupported phy interface %d\n",
plat->mac_interface);
- return;
+ return -EINVAL;
}
}
@@ -245,7 +237,7 @@ static int thead_dwmac_probe(struct platform_device *pdev)
dwmac->plat = plat;
dwmac->apb_base = apb;
plat->bsp_priv = dwmac;
- plat->fix_mac_speed = thead_dwmac_fix_speed;
+ plat->set_clk_tx_rate = thead_set_clk_tx_rate;
plat->init = thead_dwmac_init;
return devm_stmmac_pltfr_probe(pdev, plat, &stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index eccf7f537467..33cf99797df5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -54,7 +54,7 @@ struct visconti_eth {
spinlock_t lock; /* lock to protect register update */
};
-static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void visconti_eth_fix_mac_speed(void *priv, int speed, unsigned int mode)
{
struct visconti_eth *dwmac = priv;
struct net_device *netdev = dev_get_drvdata(dwmac->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 600fea8f712f..967a16212faf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -59,22 +59,11 @@ enum power_event {
/* Energy Efficient Ethernet (EEE)
*
* LPI status, timer and control register offset
+ * For LPI control and status bit definitions, see common.h.
*/
#define LPI_CTRL_STATUS 0x0030
#define LPI_TIMER_CTRL 0x0034
-/* LPI control and status defines */
-#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
-#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
-#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
-#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
-#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
-#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
-#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
-#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
-#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
-#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
-
/* GMAC HW ADDR regs */
#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
0x00000040 + (reg * 8))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 96bcda0856ec..a8b901cdf5cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <linux/string_choices.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
#include "stmmac_ptp.h"
@@ -342,31 +343,24 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
return ret;
}
-static void dwmac1000_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwmac1000_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
- /*TODO - en_tx_lpi_clockgating treatment */
+ if (mode == STMMAC_LPI_TIMER)
+ return -EOPNOTSUPP;
- /* Enable the link status receive on RGMII, SGMII ore SMII
- * receive path and instruct the transmit to enter in LPI
- * state.
- */
value = readl(ioaddr + LPI_CTRL_STATUS);
- value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ if (mode == STMMAC_LPI_FORCED)
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ else
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
writel(value, ioaddr + LPI_CTRL_STATUS);
-}
-
-static void dwmac1000_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
- value = readl(ioaddr + LPI_CTRL_STATUS);
- value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
- writel(value, ioaddr + LPI_CTRL_STATUS);
+ return 0;
}
static void dwmac1000_set_eee_pls(struct mac_device_info *hw, int link)
@@ -509,8 +503,7 @@ const struct stmmac_ops dwmac1000_ops = {
.pmt = dwmac1000_pmt,
.set_umac_addr = dwmac1000_set_umac_addr,
.get_umac_addr = dwmac1000_get_umac_addr,
- .set_eee_mode = dwmac1000_set_eee_mode,
- .reset_eee_mode = dwmac1000_reset_eee_mode,
+ .set_lpi_mode = dwmac1000_set_lpi_mode,
.set_eee_timer = dwmac1000_set_eee_timer,
.set_eee_pls = dwmac1000_set_eee_pls,
.debug = dwmac1000_debug,
@@ -633,7 +626,7 @@ int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
}
netdev_dbg(priv->dev, "Auxiliary Snapshot %s.\n",
- on ? "enabled" : "disabled");
+ str_enabled_disabled(on));
writel(tcr_val, ptpaddr + PTP_TCR);
/* wait for auxts fifo clear to finish */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 184d41a306af..42fe29a4e300 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -177,23 +177,13 @@ enum power_event {
/* Energy Efficient Ethernet (EEE) for GMAC4
*
* LPI status, timer and control register offset
+ * For LPI control and status bit definitions, see common.h.
*/
#define GMAC4_LPI_CTRL_STATUS 0xd0
#define GMAC4_LPI_TIMER_CTRL 0xd4
#define GMAC4_LPI_ENTRY_TIMER 0xd8
#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc
-/* LPI control and status defines */
-#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */
-#define GMAC4_LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable */
-#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
-#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
-#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
-#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
-#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
-#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
-#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
-
/* MAC Debug bitmap */
#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
#define GMAC_DEBUG_TFCSTS_SHIFT 17
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 9ed8620580a8..cc4ddf608652 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -376,33 +376,46 @@ static void dwmac4_get_umac_addr(struct mac_device_info *hw,
GMAC_ADDR_LOW(reg_n));
}
-static void dwmac4_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwmac4_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ u32 value, mask;
- /* Enable the link status receive on RGMII, SGMII ore SMII
- * receive path and instruct the transmit to enter in LPI
- * state.
- */
- value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
+ if (mode == STMMAC_LPI_DISABLE) {
+ value = 0;
+ } else {
+ value = LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
- if (en_tx_lpi_clockgating)
- value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
+ if (mode == STMMAC_LPI_TIMER) {
+ /* Return ERANGE if the timer is larger than the
+ * register field.
+ */
+ if (et > STMMAC_ET_MAX)
+ return -ERANGE;
- writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
-}
+ /* Set the hardware LPI entry timer */
+ writel(et, ioaddr + GMAC4_LPI_ENTRY_TIMER);
-static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ /* Interpret a zero LPI entry timer to mean
+ * immediate entry into LPI mode.
+ */
+ if (et)
+ value |= LPI_CTRL_STATUS_LPIATE;
+ }
- value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
+ if (en_tx_lpi_clockgating)
+ value |= LPI_CTRL_STATUS_LPITCSE;
+ }
+
+ mask = LPI_CTRL_STATUS_LPIATE | LPI_CTRL_STATUS_LPIEN |
+ LPI_CTRL_STATUS_LPITXA | LPI_CTRL_STATUS_LPITCSE;
+
+ value |= readl(ioaddr + GMAC4_LPI_CTRL_STATUS) & ~mask;
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+
+ return 0;
}
static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
@@ -413,34 +426,13 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
if (link)
- value |= GMAC4_LPI_CTRL_STATUS_PLS;
+ value |= LPI_CTRL_STATUS_PLS;
else
- value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
+ value &= ~LPI_CTRL_STATUS_PLS;
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
}
-static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, u32 et)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value = et & STMMAC_ET_MAX;
- int regval;
-
- /* Program LPI entry timer value into register */
- writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
-
- /* Enable/disable LPI entry timer */
- regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
-
- if (et)
- regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
- else
- regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
-
- writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
-}
-
static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -849,17 +841,17 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
+ if (status & LPI_CTRL_STATUS_TLPIEN) {
ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
x->irq_tx_path_in_lpi_mode_n++;
}
- if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
+ if (status & LPI_CTRL_STATUS_TLPIEX) {
ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
x->irq_tx_path_exit_lpi_mode_n++;
}
- if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
+ if (status & LPI_CTRL_STATUS_RLPIEN)
x->irq_rx_path_in_lpi_mode_n++;
- if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
+ if (status & LPI_CTRL_STATUS_RLPIEX)
x->irq_rx_path_exit_lpi_mode_n++;
}
@@ -1201,9 +1193,7 @@ const struct stmmac_ops dwmac4_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
@@ -1245,9 +1235,7 @@ const struct stmmac_ops dwmac410_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
@@ -1291,9 +1279,7 @@ const struct stmmac_ops dwmac510_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 20027d3c25a7..a03f5d771566 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -112,14 +112,7 @@
#define XGMAC_MGKPKTEN BIT(1)
#define XGMAC_PWRDWN BIT(0)
#define XGMAC_LPI_CTRL 0x000000d0
-#define XGMAC_TXCGE BIT(21)
-#define XGMAC_LPITXA BIT(19)
-#define XGMAC_PLS BIT(17)
-#define XGMAC_LPITXEN BIT(16)
-#define XGMAC_RLPIEX BIT(3)
-#define XGMAC_RLPIEN BIT(2)
-#define XGMAC_TLPIEX BIT(1)
-#define XGMAC_TLPIEN BIT(0)
+/* For definitions, see LPI_CTRL_STATUS_xxx in common.h */
#define XGMAC_LPI_TIMER_CTRL 0x000000d4
#define XGMAC_HW_FEATURE0 0x0000011c
#define XGMAC_HWFEAT_EDMA BIT(31)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 9a60a6e8f633..a6d395c6bacd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -316,17 +316,17 @@ static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
if (stat & XGMAC_LPIIS) {
u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
- if (lpi & XGMAC_TLPIEN) {
+ if (lpi & LPI_CTRL_STATUS_TLPIEN) {
ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
x->irq_tx_path_in_lpi_mode_n++;
}
- if (lpi & XGMAC_TLPIEX) {
+ if (lpi & LPI_CTRL_STATUS_TLPIEX) {
ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
x->irq_tx_path_exit_lpi_mode_n++;
}
- if (lpi & XGMAC_RLPIEN)
+ if (lpi & LPI_CTRL_STATUS_RLPIEN)
x->irq_rx_path_in_lpi_mode_n++;
- if (lpi & XGMAC_RLPIEX)
+ if (lpi & LPI_CTRL_STATUS_RLPIEX)
x->irq_rx_path_exit_lpi_mode_n++;
}
@@ -425,29 +425,28 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
addr[5] = (hi_addr >> 8) & 0xff;
}
-static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwxgmac2_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
- value = readl(ioaddr + XGMAC_LPI_CTRL);
-
- value |= XGMAC_LPITXEN | XGMAC_LPITXA;
- if (en_tx_lpi_clockgating)
- value |= XGMAC_TXCGE;
-
- writel(value, ioaddr + XGMAC_LPI_CTRL);
-}
-
-static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ if (mode == STMMAC_LPI_TIMER)
+ return -EOPNOTSUPP;
value = readl(ioaddr + XGMAC_LPI_CTRL);
- value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
+ if (mode == STMMAC_LPI_FORCED) {
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ if (en_tx_lpi_clockgating)
+ value |= LPI_CTRL_STATUS_LPITCSE;
+ } else {
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA |
+ LPI_CTRL_STATUS_LPITCSE);
+ }
writel(value, ioaddr + XGMAC_LPI_CTRL);
+
+ return 0;
}
static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
@@ -457,9 +456,9 @@ static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
value = readl(ioaddr + XGMAC_LPI_CTRL);
if (link)
- value |= XGMAC_PLS;
+ value |= LPI_CTRL_STATUS_PLS;
else
- value &= ~XGMAC_PLS;
+ value &= ~LPI_CTRL_STATUS_PLS;
writel(value, ioaddr + XGMAC_LPI_CTRL);
}
@@ -1525,8 +1524,7 @@ const struct stmmac_ops dwxgmac210_ops = {
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = dwxgmac2_set_eee_mode,
- .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_lpi_mode = dwxgmac2_set_lpi_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
.debug = NULL,
@@ -1582,8 +1580,7 @@ const struct stmmac_ops dwxlgmac2_ops = {
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = dwxgmac2_set_eee_mode,
- .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_lpi_mode = dwxgmac2_set_lpi_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
.debug = NULL,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 0f200b72c225..27c63a9fc163 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -306,6 +306,12 @@ struct stmmac_pps_cfg;
struct stmmac_rss;
struct stmmac_est;
+enum stmmac_lpi_mode {
+ STMMAC_LPI_DISABLE,
+ STMMAC_LPI_FORCED,
+ STMMAC_LPI_TIMER,
+};
+
/* Helpers to program the MAC core */
struct stmmac_ops {
/* MAC core initialization */
@@ -360,10 +366,9 @@ struct stmmac_ops {
unsigned int reg_n);
void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
unsigned int reg_n);
- void (*set_eee_mode)(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating);
- void (*reset_eee_mode)(struct mac_device_info *hw);
- void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, u32 et);
+ int (*set_lpi_mode)(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
void (*debug)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -467,12 +472,8 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_umac_addr, __args)
#define stmmac_get_umac_addr(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, get_umac_addr, __args)
-#define stmmac_set_eee_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_eee_mode, __args)
-#define stmmac_reset_eee_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args)
-#define stmmac_set_eee_lpi_timer(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args)
+#define stmmac_set_lpi_mode(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, set_lpi_mode, __args)
#define stmmac_set_eee_timer(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_eee_timer, __args)
#define stmmac_set_eee_pls(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f05cae103d83..bddfa0f4aa21 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -106,6 +106,8 @@ struct stmmac_metadata_request {
struct stmmac_priv *priv;
struct dma_desc *tx_desc;
bool *set_ic;
+ struct dma_edesc *edesc;
+ int tbs;
};
struct stmmac_xsk_tx_complete {
@@ -257,7 +259,7 @@ struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
- u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
+ u32 rx_coal_frames[MTL_MAX_RX_QUEUES];
int hwts_tx_en;
bool tx_path_in_lpi_mode;
@@ -265,8 +267,7 @@ struct stmmac_priv {
int sph;
int sph_cap;
u32 sarc_type;
-
- u32 rx_riwt[MTL_MAX_TX_QUEUES];
+ u32 rx_riwt[MTL_MAX_RX_QUEUES];
int hwts_rx_en;
void __iomem *ioaddr;
@@ -281,9 +282,7 @@ struct stmmac_priv {
/* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX];
- int speed;
- unsigned int flow_ctrl;
- unsigned int pause;
+ unsigned int pause_time;
struct mii_bus *mii;
struct phylink_config phylink_config;
@@ -307,6 +306,7 @@ struct stmmac_priv {
struct timer_list eee_ctrl_timer;
int lpi_irq;
u32 tx_lpi_timer;
+ bool tx_lpi_clk_stop;
bool eee_enabled;
bool eee_active;
bool eee_sw_timer_en;
@@ -343,7 +343,7 @@ struct stmmac_priv {
char int_name_sfty[IFNAMSIZ + 10];
char int_name_sfty_ce[IFNAMSIZ + 10];
char int_name_sfty_ue[IFNAMSIZ + 10];
- char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
+ char int_name_rx_irq[MTL_MAX_RX_QUEUES][IFNAMSIZ + 14];
char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18];
#ifdef CONFIG_DEBUG_FS
@@ -407,6 +407,8 @@ int stmmac_dvr_probe(struct device *device,
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
+int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed);
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c0ae7db96f46..279532609707 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -88,19 +88,20 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_XDP_TX BIT(1)
#define STMMAC_XDP_REDIRECT BIT(2)
-static int flow_ctrl = FLOW_AUTO;
+static int flow_ctrl = 0xdead;
module_param(flow_ctrl, int, 0644);
-MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
static int pause = PAUSE_TIME;
module_param(pause, int, 0644);
-MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
#define TC_DEFAULT 64
static int tc = TC_DEFAULT;
module_param(tc, int, 0644);
MODULE_PARM_DESC(tc, "DMA threshold control value");
+/* This is unused */
#define DEFAULT_BUFSIZE 1536
static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, 0644);
@@ -178,6 +179,38 @@ int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
/**
+ * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
+ * @bsp_priv: BSP private data structure (unused)
+ * @clk_tx_i: the transmit clock
+ * @interface: the selected interface mode
+ * @speed: the speed that the MAC will be operating at
+ *
+ * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
+ * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
+ * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
+ * the plat_data->set_clk_tx_rate method directly, call it via their own
+ * implementation, or implement their own method should they have more
+ * complex requirements. It is intended to only be used in this method.
+ *
+ * plat_data->clk_tx_i must be filled in.
+ */
+int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
+{
+ long rate = rgmii_clock(speed);
+
+ /* Silently ignore unsupported speeds as rgmii_clock() only
+ * supports 10, 100 and 1000Mbps. We do not want to spit
+ * errors for 2500 and higher speeds here.
+ */
+ if (rate < 0)
+ return 0;
+
+ return clk_set_rate(clk_tx_i, rate);
+}
+EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
+
+/**
* stmmac_verify_args - verify the driver parameters.
* Description: it checks the driver parameters and set a default in case of
* errors.
@@ -186,14 +219,11 @@ static void stmmac_verify_args(void)
{
if (unlikely(watchdog < 0))
watchdog = TX_TIMEO;
- if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
- buf_sz = DEFAULT_BUFSIZE;
- if (unlikely(flow_ctrl > 1))
- flow_ctrl = FLOW_AUTO;
- else if (likely(flow_ctrl < 0))
- flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
+
+ if (flow_ctrl != 0xdead)
+ pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
}
static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
@@ -390,16 +420,6 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
return dirty;
}
-static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv)
-{
- stmmac_set_eee_lpi_timer(priv, priv->hw, 0);
-}
-
-static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv)
-{
- stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer);
-}
-
static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
@@ -436,8 +456,8 @@ static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
- stmmac_set_eee_mode(priv, priv->hw,
- priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
+ priv->tx_lpi_clk_stop, 0);
}
/**
@@ -447,8 +467,8 @@ static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
*/
static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
{
- stmmac_reset_eee_mode(priv, priv->hw);
del_timer_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
priv->tx_path_in_lpi_mode = false;
}
@@ -466,74 +486,6 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
stmmac_try_to_start_sw_lpi(priv);
}
-/**
- * stmmac_eee_init - init EEE
- * @priv: driver private structure
- * @active: indicates whether EEE should be enabled.
- * Description:
- * if the GMAC supports the EEE (from the HW cap reg) and the phy device
- * can also manage EEE, this function enable the LPI state and start related
- * timer.
- */
-static void stmmac_eee_init(struct stmmac_priv *priv, bool active)
-{
- priv->eee_active = active;
-
- /* Check if MAC core supports the EEE feature. */
- if (!priv->dma_cap.eee) {
- priv->eee_enabled = false;
- return;
- }
-
- mutex_lock(&priv->lock);
-
- /* Check if it needs to be deactivated */
- if (!priv->eee_active) {
- if (priv->eee_enabled) {
- netdev_dbg(priv->dev, "disable EEE\n");
- priv->eee_sw_timer_en = false;
- stmmac_disable_hw_lpi_timer(priv);
- del_timer_sync(&priv->eee_ctrl_timer);
- stmmac_set_eee_timer(priv, priv->hw, 0,
- STMMAC_DEFAULT_TWT_LS);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- false);
- }
- priv->eee_enabled = false;
- mutex_unlock(&priv->lock);
- return;
- }
-
- if (priv->eee_active && !priv->eee_enabled) {
- stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
- STMMAC_DEFAULT_TWT_LS);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- true);
- }
-
- if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
- /* Use hardware LPI mode */
- del_timer_sync(&priv->eee_ctrl_timer);
- priv->tx_path_in_lpi_mode = false;
- priv->eee_sw_timer_en = false;
- stmmac_enable_hw_lpi_timer(priv);
- } else {
- /* Use software LPI mode */
- priv->eee_sw_timer_en = true;
- stmmac_disable_hw_lpi_timer(priv);
- stmmac_restart_sw_lpi_timer(priv);
- }
-
- priv->eee_enabled = true;
-
- mutex_unlock(&priv->lock);
- netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
-}
-
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure
* @p : descriptor pointer
@@ -935,14 +887,16 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
* stmmac_mac_flow_ctrl - Configure flow control in all queues
* @priv: driver private structure
* @duplex: duplex passed to the next function
+ * @flow_ctrl: desired flow control modes
* Description: It is used for configuring the flow control in all queues
*/
-static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
+ unsigned int flow_ctrl)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
- stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
- priv->pause, tx_cnt);
+ stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
+ tx_cnt);
}
static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
@@ -1002,7 +956,9 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ unsigned int flow_ctrl;
u32 old_ctrl, ctrl;
+ int ret;
if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
priv->plat->serdes_powerup)
@@ -1070,8 +1026,6 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
}
- priv->speed = speed;
-
if (priv->plat->fix_mac_speed)
priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
@@ -1082,19 +1036,29 @@ static void stmmac_mac_link_up(struct phylink_config *config,
/* Flow Control operation */
if (rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_AUTO;
+ flow_ctrl = FLOW_AUTO;
else if (rx_pause && !tx_pause)
- priv->flow_ctrl = FLOW_RX;
+ flow_ctrl = FLOW_RX;
else if (!rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_TX;
+ flow_ctrl = FLOW_TX;
else
- priv->flow_ctrl = FLOW_OFF;
+ flow_ctrl = FLOW_OFF;
- stmmac_mac_flow_ctrl(priv, duplex);
+ stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
if (ctrl != old_ctrl)
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (priv->plat->set_clk_tx_rate) {
+ ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
+ priv->plat->clk_tx_i,
+ interface, speed);
+ if (ret < 0)
+ netdev_err(priv->dev,
+ "failed to configure transmit clock for %dMbps: %pe\n",
+ speed, ERR_PTR(ret));
+ }
+
stmmac_mac_set(priv, priv->ioaddr, true);
if (priv->dma_cap.eee)
stmmac_set_eee_pls(priv, priv->hw, true);
@@ -1110,16 +1074,70 @@ static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- stmmac_eee_init(priv, false);
+ priv->eee_active = false;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = false;
+
+ netdev_dbg(priv->dev, "disable EEE\n");
+ priv->eee_sw_timer_en = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
+ priv->tx_path_in_lpi_mode = false;
+
+ stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
+ mutex_unlock(&priv->lock);
}
static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
bool tx_clk_stop)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ int ret;
priv->tx_lpi_timer = timer;
- stmmac_eee_init(priv, true);
+ priv->eee_active = true;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = true;
+
+ /* Update the transmit clock stop according to PHY capability if
+ * the platform allows
+ */
+ if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
+ priv->tx_lpi_clk_stop = tx_clk_stop;
+
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
+ STMMAC_DEFAULT_TWT_LS);
+
+ /* Try to cnfigure the hardware timer. */
+ ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
+ priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
+
+ if (ret) {
+ /* Hardware timer mode not supported, or value out of range.
+ * Fall back to using software LPI mode
+ */
+ priv->eee_sw_timer_en = true;
+ stmmac_restart_sw_lpi_timer(priv);
+ }
+
+ mutex_unlock(&priv->lock);
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+
+ return 0;
+}
+
+static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ if (priv->plat->mac_finish)
+ priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
return 0;
}
@@ -1132,6 +1150,7 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
.mac_link_up = stmmac_mac_link_up,
.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
+ .mac_finish = stmmac_mac_finish,
};
/**
@@ -1254,6 +1273,10 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
priv->phylink_config.eee_rx_clk_stop_enable = true;
+ /* Set the default transmit clock stop bit based on the platform glue */
+ priv->tx_lpi_clk_stop = priv->plat->flags &
+ STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
+
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
priv->phylink_config.default_an_inband =
@@ -2524,9 +2547,20 @@ static u64 stmmac_xsk_fill_timestamp(void *_priv)
return 0;
}
+static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
+{
+ struct timespec64 ts = ns_to_timespec64(launch_time);
+ struct stmmac_metadata_request *meta_req = _priv;
+
+ if (meta_req->tbs & STMMAC_TBS_EN)
+ stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
+ ts.tv_nsec);
+}
+
static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
.tmo_request_timestamp = stmmac_xsk_request_timestamp,
.tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
+ .tmo_request_launch_time = stmmac_xsk_request_launch_time,
};
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -2610,6 +2644,8 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
meta_req.priv = priv;
meta_req.tx_desc = tx_desc;
meta_req.set_ic = &set_ic;
+ meta_req.tbs = tx_q->tbs;
+ meta_req.edesc = &tx_q->dma_entx[entry];
xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
&meta_req);
if (set_ic) {
@@ -3072,7 +3108,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
int ret = 0;
if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
- dev_err(priv->device, "Invalid DMA configuration\n");
+ netdev_err(priv->dev, "Invalid DMA configuration\n");
return -EINVAL;
}
@@ -3081,7 +3117,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
ret = stmmac_reset(priv, priv->ioaddr);
if (ret) {
- dev_err(priv->device, "Failed to reset the dma\n");
+ netdev_err(priv->dev, "Failed to reset the dma\n");
return ret;
}
@@ -3199,8 +3235,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
for (chan = 0; chan < rx_channel_count; chan++)
@@ -3448,9 +3483,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
if (priv->hw->phylink_pcs)
phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
+ /* Note that clk_rx_i must be running for reset to complete. This
+ * clock may also be required when setting the MAC address.
+ *
+ * Block the receive clock stop for LPI mode at the PHY in case
+ * the link is established with EEE mode active.
+ */
+ phylink_rx_clk_stop_block(priv->phylink);
+
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
+ phylink_rx_clk_stop_unblock(priv->phylink);
netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
__func__);
return ret;
@@ -3458,6 +3502,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Copy the MAC addr into the HW */
stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
/* PS and related bits will be programmed according to the speed */
if (priv->hw->pcs) {
@@ -3568,7 +3613,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
@@ -3640,7 +3687,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
enum request_irq_err irq_err;
- cpumask_t cpu_mask;
int irq_idx = 0;
char *int_name;
int ret;
@@ -3769,9 +3815,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->rx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
/* Request Tx MSI irq */
@@ -3794,9 +3839,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->tx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
return 0;
@@ -3997,7 +4041,6 @@ static int __stmmac_open(struct net_device *dev,
}
}
- buf_sz = dma_conf->dma_buf_sz;
for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
@@ -4102,9 +4145,6 @@ static int stmmac_release(struct net_device *dev)
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv, &priv->dma_conf);
- /* Disable the MAC Rx/Tx */
- stmmac_mac_set(priv, priv->ioaddr, false);
-
/* Powerdown Serdes if there is */
if (priv->plat->serdes_powerdown)
priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
@@ -5454,10 +5494,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
int xdp_status = 0;
- int buf_sz;
+ int bufsz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
- buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
if (netif_msg_rx_status(priv)) {
@@ -5570,7 +5610,7 @@ read_again:
net_prefetch(page_address(buf->page) +
buf->page_offset);
- xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
buf->page_offset, buf1_len, true);
@@ -5856,6 +5896,9 @@ static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
* whenever multicast addresses must be enabled/disabled.
* Return value:
* void.
+ *
+ * FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
*/
static void stmmac_set_rx_mode(struct net_device *dev)
{
@@ -5988,7 +6031,9 @@ static int stmmac_set_features(struct net_device *netdev,
else
priv->hw->hw_vlan_en = false;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
@@ -6272,7 +6317,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
if (ret)
goto set_mac_error;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
set_mac_error:
pm_runtime_put(priv->device);
@@ -6628,6 +6675,9 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6659,6 +6709,9 @@ err_pm_put:
return ret;
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6970,8 +7023,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
/* Enable the MAC Rx/Tx */
@@ -7444,7 +7496,7 @@ int stmmac_dvr_probe(struct device *device,
return -ENOMEM;
stmmac_set_ethtool_ops(ndev);
- priv->pause = pause;
+ priv->pause_time = pause;
priv->plat = plat_dat;
priv->ioaddr = res->addr;
priv->dev->base_addr = (unsigned long)res->addr;
@@ -7640,9 +7692,6 @@ int stmmac_dvr_probe(struct device *device,
"%s: warning: maxmtu having invalid value (%d)\n",
__func__, priv->plat->maxmtu);
- if (flow_ctrl)
- priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
-
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
/* Setup channels NAPI */
@@ -7744,8 +7793,6 @@ void stmmac_dvr_remove(struct device *dev)
pm_runtime_get_sync(dev);
- stmmac_stop_all_dma(priv);
- stmmac_mac_set(priv, priv->ioaddr, false);
unregister_netdev(ndev);
#ifdef CONFIG_DEBUG_FS
@@ -7816,19 +7863,16 @@ int stmmac_suspend(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_suspend(priv->phylink, true);
- } else {
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
- phylink_suspend(priv->phylink, false);
- }
+ if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ phylink_speed_down(priv->phylink, false);
+
+ phylink_suspend(priv->phylink,
+ device_may_wakeup(priv->device) && priv->plat->pmt);
rtnl_unlock();
if (stmmac_fpe_supported(priv))
timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
- priv->speed = SPEED_UNKNOWN;
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
@@ -7912,16 +7956,12 @@ int stmmac_resume(struct device *dev)
}
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_resume(priv->phylink);
- } else {
- phylink_resume(priv->phylink);
- if (device_may_wakeup(priv->device))
- phylink_speed_up(priv->phylink);
- }
- rtnl_unlock();
- rtnl_lock();
+ /* Prepare the PHY to resume, ensuring that its clocks which are
+ * necessary for the MAC DMA reset to complete are running
+ */
+ phylink_prepare_resume(priv->phylink);
+
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -7931,14 +7971,25 @@ int stmmac_resume(struct device *dev)
stmmac_hw_setup(ndev, false);
stmmac_init_coalesce(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_rx_mode(ndev);
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
stmmac_enable_all_queues(priv);
stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
+
+ /* phylink_resume() must be called after the hardware has been
+ * initialised because it may bring the link up immediately in a
+ * workqueue thread, which will race with initialisation.
+ */
+ phylink_resume(priv->phylink);
+ if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ phylink_speed_up(priv->phylink);
+
rtnl_unlock();
netif_device_attach(ndev);
@@ -7961,9 +8012,6 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "phyaddr:", 8)) {
if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
- } else if (!strncmp(opt, "buf_sz:", 7)) {
- if (kstrtoint(opt + 7, 0, &buf_sz))
- goto err;
} else if (!strncmp(opt, "tc:", 3)) {
if (kstrtoint(opt + 3, 0, &tc))
goto err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0c7d81ddd440..836f2848dfeb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -524,6 +524,9 @@ int stmmac_pcs_setup(struct net_device *ndev)
if (ret)
return dev_err_probe(priv->device, ret, "No xPCS found\n");
+ if (xpcs)
+ xpcs_config_eee_mult_fact(xpcs, priv->plat->mult_fact_100ns);
+
priv->hw->xpcs = xpcs;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 352b01678c22..9c1b54b701f7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -155,9 +155,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
{
struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
struct plat_stmmacenet_data *plat;
- struct stmmac_resources res;
- int i;
+ struct stmmac_resources res = {};
int ret;
+ int i;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
@@ -192,9 +192,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
- ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
- if (ret)
- return ret;
+ res.addr = pcim_iomap_region(pdev, i, STMMAC_RESOURCE_NAME);
+ if (IS_ERR(res.addr))
+ return PTR_ERR(res.addr);
break;
}
@@ -204,8 +204,6 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[i];
res.wol_irq = pdev->irq;
res.irq = pdev->irq;
@@ -226,21 +224,11 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
* stmmac_pci_remove
*
* @pdev: platform device pointer
- * Description: this function calls the main to free the net resources
- * and releases the PCI resources.
+ * Description: this function calls the main to free the net resources.
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
- int i;
-
stmmac_dvr_remove(&pdev->dev);
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
}
static int __maybe_unused stmmac_pci_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index d0e61aa1a495..c73eff6a56b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -405,6 +405,17 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
return -ENODEV;
}
+/* Compatible string array for all gmac4 devices */
+static const char * const stmmac_gmac4_compats[] = {
+ "snps,dwmac-4.00",
+ "snps,dwmac-4.10a",
+ "snps,dwmac-4.20a",
+ "snps,dwmac-5.10a",
+ "snps,dwmac-5.20",
+ "snps,dwmac-5.30a",
+ NULL
+};
+
/**
* stmmac_probe_config_dt - parse device-tree driver parameters
* @pdev: platform_device structure
@@ -486,8 +497,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->force_sf_dma_mode =
of_property_read_bool(np, "snps,force_sf_dma_mode");
- if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating"))
+ if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating")) {
+ dev_warn(&pdev->dev,
+ "OF property snps,en-tx-lpi-clockgating is deprecated, please convert driver to use STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP\n");
plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
+ }
/* Set the maxmtu to a default of JUMBO_LEN in case the
* parameter is not present in the device tree.
@@ -538,11 +552,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->pmt = 1;
}
- if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
- of_device_is_compatible(np, "snps,dwmac-4.10a") ||
- of_device_is_compatible(np, "snps,dwmac-4.20a") ||
- of_device_is_compatible(np, "snps,dwmac-5.10a") ||
- of_device_is_compatible(np, "snps,dwmac-5.20")) {
+ if (of_device_compatible_match(np, stmmac_gmac4_compats)) {
plat->has_gmac4 = 1;
plat->has_gmac = 0;
plat->pmt = 1;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 3ca1c2a816ff..a01bc394d1ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -382,14 +382,14 @@ static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
if (!priv->dev->phydev)
return -EOPNOTSUPP;
- ret = phy_loopback(priv->dev->phydev, true);
+ ret = phy_loopback(priv->dev->phydev, true, 0);
if (ret)
return ret;
attr.dst = priv->dev->dev_addr;
ret = __stmmac_test_loopback(priv, &attr);
- phy_loopback(priv->dev->phydev, false);
+ phy_loopback(priv->dev->phydev, false, 0);
return ret;
}
@@ -1985,7 +1985,7 @@ void stmmac_selftest_run(struct net_device *dev,
case STMMAC_LOOPBACK_PHY:
ret = -EOPNOTSUPP;
if (dev->phydev)
- ret = phy_loopback(dev->phydev, true);
+ ret = phy_loopback(dev->phydev, true, 0);
if (!ret)
break;
fallthrough;
@@ -2018,7 +2018,7 @@ void stmmac_selftest_run(struct net_device *dev,
case STMMAC_LOOPBACK_PHY:
ret = -EOPNOTSUPP;
if (dev->phydev)
- ret = phy_loopback(dev->phydev, false);
+ ret = phy_loopback(dev->phydev, false, 0);
if (!ret)
break;
fallthrough;
diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c
index 259bdac24cf2..558b791a97ed 100644
--- a/drivers/net/ethernet/tehuti/tn40.c
+++ b/drivers/net/ethernet/tehuti/tn40.c
@@ -1778,7 +1778,7 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = tn40_phy_register(priv);
if (ret) {
dev_err(&pdev->dev, "failed to set up PHY.\n");
- goto err_free_irq;
+ goto err_cleanup_swnodes;
}
ret = tn40_priv_init(priv);
@@ -1795,6 +1795,8 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_unregister_phydev:
tn40_phy_unregister(priv);
+err_cleanup_swnodes:
+ tn40_swnodes_cleanup(priv);
err_free_irq:
pci_free_irq_vectors(pdev);
err_unset_drvdata:
@@ -1816,6 +1818,7 @@ static void tn40_remove(struct pci_dev *pdev)
unregister_netdev(ndev);
tn40_phy_unregister(priv);
+ tn40_swnodes_cleanup(priv);
pci_free_irq_vectors(priv->pdev);
pci_set_drvdata(pdev, NULL);
iounmap(priv->regs);
@@ -1832,6 +1835,10 @@ static const struct pci_device_id tn40_id_table[] = {
PCI_VENDOR_ID_ASUSTEK, 0x8709) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
PCI_VENDOR_ID_EDIMAX, 0x8103) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_TEHUTI, 0x3015) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_EDIMAX, 0x8102) },
{ }
};
diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h
index 490781fe5120..25da8686d469 100644
--- a/drivers/net/ethernet/tehuti/tn40.h
+++ b/drivers/net/ethernet/tehuti/tn40.h
@@ -4,10 +4,13 @@
#ifndef _TN40_H_
#define _TN40_H_
+#include <linux/property.h>
#include "tn40_regs.h"
#define TN40_DRV_NAME "tn40xx"
+#define PCI_DEVICE_ID_TEHUTI_TN9510 0x4025
+
#define TN40_MDIO_SPEED_1MHZ (1)
#define TN40_MDIO_SPEED_6MHZ (6)
@@ -102,10 +105,39 @@ struct tn40_txdb {
int size; /* Number of elements in the db */
};
+#define NODE_PROP(_NAME, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .properties = _PROP, \
+ })
+
+#define NODE_PAR_PROP(_NAME, _PAR, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .parent = _PAR, \
+ .properties = _PROP, \
+ })
+
+enum tn40_swnodes {
+ SWNODE_MDIO,
+ SWNODE_PHY,
+ SWNODE_MAX
+};
+
+struct tn40_nodes {
+ char phy_name[32];
+ char mdio_name[32];
+ struct property_entry phy_props[3];
+ struct software_node swnodes[SWNODE_MAX];
+ const struct software_node *group[SWNODE_MAX + 1];
+};
+
struct tn40_priv {
struct net_device *ndev;
struct pci_dev *pdev;
+ struct tn40_nodes nodes;
+
struct napi_struct napi;
/* RX FIFOs: 1 for data (full) descs, and 2 for free descs */
struct tn40_rxd_fifo rxd_fifo0;
@@ -225,6 +257,7 @@ static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val)
int tn40_set_link_speed(struct tn40_priv *priv, u32 speed);
+void tn40_swnodes_cleanup(struct tn40_priv *priv);
int tn40_mdiobus_init(struct tn40_priv *priv);
int tn40_phy_register(struct tn40_priv *priv);
diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c
index af18615d64a8..fb1a4a2e4dbc 100644
--- a/drivers/net/ethernet/tehuti/tn40_mdio.c
+++ b/drivers/net/ethernet/tehuti/tn40_mdio.c
@@ -14,6 +14,8 @@
(FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port))))
#define TN40_MDIO_CMD_READ BIT(15)
+#define AQR105_FIRMWARE "tehuti/aqr105-tn40xx.cld"
+
static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed)
{
void __iomem *regs = priv->regs;
@@ -111,6 +113,56 @@ static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum,
return tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val);
}
+/* registers an mdio node and an aqr105 PHY at address 1
+ * tn40_mdio-%id {
+ * ethernet-phy@1 {
+ * compatible = "ethernet-phy-id03a1.b4a3";
+ * reg = <1>;
+ * firmware-name = AQR105_FIRMWARE;
+ * };
+ * };
+ */
+static int tn40_swnodes_register(struct tn40_priv *priv)
+{
+ struct tn40_nodes *nodes = &priv->nodes;
+ struct pci_dev *pdev = priv->pdev;
+ struct software_node *swnodes;
+ u32 id;
+
+ id = pci_dev_id(pdev);
+
+ snprintf(nodes->phy_name, sizeof(nodes->phy_name), "ethernet-phy@1");
+ snprintf(nodes->mdio_name, sizeof(nodes->mdio_name), "tn40_mdio-%x",
+ id);
+
+ swnodes = nodes->swnodes;
+
+ swnodes[SWNODE_MDIO] = NODE_PROP(nodes->mdio_name, NULL);
+
+ nodes->phy_props[0] = PROPERTY_ENTRY_STRING("compatible",
+ "ethernet-phy-id03a1.b4a3");
+ nodes->phy_props[1] = PROPERTY_ENTRY_U32("reg", 1);
+ nodes->phy_props[2] = PROPERTY_ENTRY_STRING("firmware-name",
+ AQR105_FIRMWARE);
+ swnodes[SWNODE_PHY] = NODE_PAR_PROP(nodes->phy_name,
+ &swnodes[SWNODE_MDIO],
+ nodes->phy_props);
+
+ nodes->group[SWNODE_PHY] = &swnodes[SWNODE_PHY];
+ nodes->group[SWNODE_MDIO] = &swnodes[SWNODE_MDIO];
+ return software_node_register_node_group(nodes->group);
+}
+
+void tn40_swnodes_cleanup(struct tn40_priv *priv)
+{
+ /* cleanup of swnodes is only needed for AQR105-based cards */
+ if (priv->pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ fwnode_handle_put(dev_fwnode(&priv->mdio->dev));
+ device_remove_software_node(&priv->mdio->dev);
+ software_node_unregister_node_group(priv->nodes.group);
+ }
+}
+
int tn40_mdiobus_init(struct tn40_priv *priv)
{
struct pci_dev *pdev = priv->pdev;
@@ -129,14 +181,40 @@ int tn40_mdiobus_init(struct tn40_priv *priv)
bus->read_c45 = tn40_mdio_read_c45;
bus->write_c45 = tn40_mdio_write_c45;
+ priv->mdio = bus;
+ /* provide swnodes for AQR105-based cards only */
+ if (pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ ret = tn40_swnodes_register(priv);
+ if (ret) {
+ pr_err("swnodes failed\n");
+ return ret;
+ }
+
+ ret = device_add_software_node(&bus->dev,
+ priv->nodes.group[SWNODE_MDIO]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "device_add_software_node failed: %d\n", ret);
+ goto err_swnodes_unregister;
+ }
+ }
+
+ tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
ret = devm_mdiobus_register(&pdev->dev, bus);
if (ret) {
dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n",
ret, bus->state, MDIOBUS_UNREGISTERED);
- return ret;
+ goto err_swnodes_cleanup;
}
- tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
- priv->mdio = bus;
return 0;
+
+err_swnodes_unregister:
+ software_node_unregister_node_group(priv->nodes.group);
+ return ret;
+err_swnodes_cleanup:
+ tn40_swnodes_cleanup(priv);
+ return ret;
}
+
+MODULE_FIRMWARE(AQR105_FIRMWARE);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 3a13d60a947a..a07c910c497a 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -205,6 +205,7 @@ config TI_ICSSG_PRUETH_SR1
select PHYLIB
select TI_ICSS_IEP
select TI_K3_CPPI_DESC_POOL
+ select PAGE_POOL
depends on PRU_REMOTEPROC
depends on NET_SWITCHDEV
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 2806238629f8..b3118bf0757e 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -164,6 +164,7 @@
#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
/* XDP */
+#define AM65_CPSW_XDP_TX BIT(2)
#define AM65_CPSW_XDP_CONSUMED BIT(1)
#define AM65_CPSW_XDP_REDIRECT BIT(0)
#define AM65_CPSW_XDP_PASS 0
@@ -829,19 +830,19 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_tx_chn *tx_chn = data;
enum am65_cpsw_tx_buf_type buf_type;
+ struct am65_cpsw_tx_swdata *swdata;
struct cppi5_host_desc_t *desc_tx;
struct xdp_frame *xdpf;
struct sk_buff *skb;
- void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
- skb = *(swdata);
+ skb = swdata->skb;
dev_kfree_skb_any(skb);
} else {
- xdpf = *(swdata);
+ xdpf = swdata->xdpf;
xdp_return_frame(xdpf);
}
@@ -1098,10 +1099,10 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct cppi5_host_desc_t *host_desc;
+ struct am65_cpsw_tx_swdata *swdata;
struct netdev_queue *netif_txq;
dma_addr_t dma_desc, dma_buf;
u32 pkt_len = xdpf->len;
- void **swdata;
int ret;
host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
@@ -1131,7 +1132,8 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
swdata = cppi5_hdesc_get_swdata(host_desc);
- *(swdata) = xdpf;
+ swdata->ndev = ndev;
+ swdata->xdpf = xdpf;
/* Report BQL before sending the packet */
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
@@ -1167,17 +1169,16 @@ pool_free:
static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct am65_cpsw_port *port,
- struct xdp_buff *xdp,
- int cpu, int *len)
+ struct xdp_buff *xdp, int *len)
{
struct am65_cpsw_common *common = flow->common;
struct net_device *ndev = port->ndev;
int ret = AM65_CPSW_XDP_CONSUMED;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
struct xdp_frame *xdpf;
struct bpf_prog *prog;
- struct page *page;
int pkt_len;
u32 act;
int err;
@@ -1193,8 +1194,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
switch (act) {
case XDP_PASS:
- ret = AM65_CPSW_XDP_PASS;
- goto out;
+ return AM65_CPSW_XDP_PASS;
case XDP_TX:
tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
@@ -1213,15 +1213,13 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
goto drop;
dev_sw_netstats_rx_add(ndev, pkt_len);
- ret = AM65_CPSW_XDP_CONSUMED;
- goto out;
+ return AM65_CPSW_XDP_TX;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
goto drop;
dev_sw_netstats_rx_add(ndev, pkt_len);
- ret = AM65_CPSW_XDP_REDIRECT;
- goto out;
+ return AM65_CPSW_XDP_REDIRECT;
default:
bpf_warn_invalid_xdp_action(ndev, prog, act);
fallthrough;
@@ -1233,10 +1231,6 @@ drop:
ndev->stats.rx_dropped++;
}
- page = virt_to_head_page(xdp->data);
- am65_cpsw_put_page(flow, page, true);
-
-out:
return ret;
}
@@ -1274,7 +1268,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
}
static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
- int cpu, int *xdp_state)
+ int *xdp_state)
{
struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
@@ -1334,8 +1328,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
pkt_len, false);
- *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp,
- cpu, &pkt_len);
+ *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, &pkt_len);
+ if (*xdp_state == AM65_CPSW_XDP_CONSUMED) {
+ page = virt_to_head_page(xdp.data);
+ am65_cpsw_put_page(flow, page, true);
+ goto allocate;
+ }
+
if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate;
@@ -1401,7 +1400,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx);
struct am65_cpsw_common *common = flow->common;
- int cpu = smp_processor_id();
int xdp_state_or = 0;
int cur_budget, ret;
int xdp_state;
@@ -1410,7 +1408,7 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
/* process only this flow */
cur_budget = budget;
while (cur_budget--) {
- ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state);
+ ret = am65_cpsw_nuss_rx_packets(flow, &xdp_state);
xdp_state_or |= xdp_state;
if (ret)
break;
@@ -1438,52 +1436,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
return num_rx;
}
-static struct sk_buff *
-am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma)
-{
- struct cppi5_host_desc_t *desc_tx;
- struct sk_buff *skb;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
-
- am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
-
- dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
-
- return skb;
-}
-
-static struct xdp_frame *
-am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
- struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma,
- struct net_device **ndev)
-{
- struct cppi5_host_desc_t *desc_tx;
- struct am65_cpsw_port *port;
- struct xdp_frame *xdpf;
- u32 port_id = 0;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
- cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- xdpf = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
-
- port = am65_common_get_port(common, port_id);
- dev_sw_netstats_tx_add(port->ndev, 1, xdpf->len);
- *ndev = port->ndev;
-
- return xdpf;
-}
-
static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
struct netdev_queue *netif_txq)
{
@@ -1504,13 +1456,17 @@ static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_d
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
int chn, unsigned int budget, bool *tdown)
{
+ bool single_port = AM65_CPSW_IS_CPSW2G(common);
enum am65_cpsw_tx_buf_type buf_type;
+ struct am65_cpsw_tx_swdata *swdata;
+ struct cppi5_host_desc_t *desc_tx;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
unsigned int total_bytes = 0;
struct net_device *ndev;
struct xdp_frame *xdpf;
+ unsigned int pkt_len;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -1518,9 +1474,12 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
tx_chn = &common->tx_chns[chn];
while (true) {
- spin_lock(&tx_chn->lock);
+ if (!single_port)
+ spin_lock(&tx_chn->lock);
res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- spin_unlock(&tx_chn->lock);
+ if (!single_port)
+ spin_unlock(&tx_chn->lock);
+
if (res == -ENODATA)
break;
@@ -1531,27 +1490,43 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
break;
}
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ ndev = swdata->ndev;
buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
- skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
- ndev = skb->dev;
- total_bytes = skb->len;
+ skb = swdata->skb;
+ am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
+ pkt_len = skb->len;
napi_consume_skb(skb, budget);
} else {
- xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
- desc_dma, &ndev);
- total_bytes = xdpf->len;
+ xdpf = swdata->xdpf;
+ pkt_len = xdpf->len;
if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
}
+
+ total_bytes += pkt_len;
num_tx++;
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+ dev_sw_netstats_tx_add(ndev, 1, pkt_len);
+ if (!single_port) {
+ /* as packets from multi ports can be interleaved
+ * on the same channel, we have to figure out the
+ * port/queue at every packet and report it/wake queue.
+ */
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
+ }
+ }
+ if (single_port) {
netif_txq = netdev_get_tx_queue(ndev, chn);
-
netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
}
@@ -1560,66 +1535,6 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
return num_tx;
}
-static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
- int chn, unsigned int budget, bool *tdown)
-{
- enum am65_cpsw_tx_buf_type buf_type;
- struct device *dev = common->dev;
- struct am65_cpsw_tx_chn *tx_chn;
- struct netdev_queue *netif_txq;
- unsigned int total_bytes = 0;
- struct net_device *ndev;
- struct xdp_frame *xdpf;
- struct sk_buff *skb;
- dma_addr_t desc_dma;
- int res, num_tx = 0;
-
- tx_chn = &common->tx_chns[chn];
-
- while (true) {
- res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- if (res == -ENODATA)
- break;
-
- if (cppi5_desc_is_tdcm(desc_dma)) {
- if (atomic_dec_and_test(&common->tdown_cnt))
- complete(&common->tdown_complete);
- *tdown = true;
- break;
- }
-
- buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
- if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
- skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
- ndev = skb->dev;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
- } else {
- xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
- desc_dma, &ndev);
- total_bytes += xdpf->len;
- if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
- xdp_return_frame_rx_napi(xdpf);
- else
- xdp_return_frame(xdpf);
- }
- num_tx++;
- }
-
- if (!num_tx)
- return 0;
-
- netif_txq = netdev_get_tx_queue(ndev, chn);
-
- netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
- am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
-
- dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
-
- return num_tx;
-}
-
static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer)
{
struct am65_cpsw_tx_chn *tx_chns =
@@ -1635,13 +1550,8 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
bool tdown = false;
int num_tx;
- if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
- num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id,
- budget, &tdown);
- else
- num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
- tx_chn->id, budget, &tdown);
-
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
+ tx_chn->id, budget, &tdown);
if (num_tx >= budget)
return budget;
@@ -1685,12 +1595,12 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_tx_swdata *swdata;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
dma_addr_t desc_dma, buf_dma;
int ret, q_idx, i;
- void **swdata;
u32 *psdata;
u32 pkt_len;
@@ -1736,7 +1646,8 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *(swdata) = skb;
+ swdata->ndev = ndev;
+ swdata->skb = skb;
psdata = cppi5_hdesc_get_psdata(first_desc);
/* HW csum offload if enabled */
@@ -2306,13 +2217,17 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
int i, ret = 0;
for (i = 0; i < common->tx_ch_num; i++) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ tx_chn = &common->tx_chns[i];
+
+ hrtimer_setup(&tx_chn->tx_hrtimer, &am65_cpsw_nuss_tx_timer_callback,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
+ netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
+ am65_cpsw_nuss_tx_poll);
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
@@ -2323,19 +2238,16 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
tx_chn->id, tx_chn->irq, ret);
goto err;
}
-
- netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
- am65_cpsw_nuss_tx_poll);
}
return 0;
err:
- for (--i ; i >= 0 ; i--) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
-
- netif_napi_del(&tx_chn->napi_tx);
+ netif_napi_del(&tx_chn->napi_tx);
+ for (--i; i >= 0; i--) {
+ tx_chn = &common->tx_chns[i];
devm_free_irq(dev, tx_chn->irq, tx_chn);
+ netif_napi_del(&tx_chn->napi_tx);
}
return ret;
@@ -2565,9 +2477,11 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
snprintf(flow->name,
sizeof(flow->name), "%s-rx%d",
dev_name(dev), i);
- hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
+ hrtimer_setup(&flow->rx_hrtimer, &am65_cpsw_nuss_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+
+ netif_napi_add(common->dma_ndev, &flow->napi_rx,
+ am65_cpsw_nuss_rx_poll);
ret = devm_request_irq(dev, flow->irq,
am65_cpsw_nuss_rx_irq,
@@ -2577,11 +2491,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
dev_err(dev, "failure requesting rx %d irq %u, %d\n",
i, flow->irq, ret);
flow->irq = -EINVAL;
- goto err_flow;
+ goto err_request_irq;
}
-
- netif_napi_add(common->dma_ndev, &flow->napi_rx,
- am65_cpsw_nuss_rx_poll);
}
/* setup classifier to route priorities to flows */
@@ -2589,11 +2500,14 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
return 0;
+err_request_irq:
+ netif_napi_del(&flow->napi_rx);
+
err_flow:
- for (--i; i >= 0 ; i--) {
+ for (--i; i >= 0; i--) {
flow = &rx_chn->flows[i];
- netif_napi_del(&flow->napi_rx);
devm_free_irq(dev, flow->irq, flow);
+ netif_napi_del(&flow->napi_rx);
}
err:
@@ -3578,6 +3492,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
__be64 id_temp;
int ret, i;
+ BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_tx_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
+ "TX SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
+ BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
+ "SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
if (!common)
return -ENOMEM;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index e7832a5cf3cc..917c37e4e89b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -104,6 +104,14 @@ struct am65_cpsw_rx_flow {
char name[32];
};
+struct am65_cpsw_tx_swdata {
+ struct net_device *ndev;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
+};
+
struct am65_cpsw_swdata {
u32 flow_id;
struct page *page;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 0cb6fa6e5b7d..a984b7d84e5e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -351,6 +351,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
int ret = 0, port, ch = xmeta->ch;
int headroom = CPSW_HEADROOM_NA;
struct net_device *ndev = xmeta->ndev;
+ u32 metasize = 0;
struct cpsw_priv *priv;
struct page_pool *pool;
struct sk_buff *skb;
@@ -400,7 +401,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_prepare_buff(&xdp, pa, headroom, size, false);
+ xdp_prepare_buff(&xdp, pa, headroom, size, true);
port = priv->emac_port + cpsw->data.dual_emac;
ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
@@ -408,6 +409,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
@@ -423,6 +425,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb_reserve(skb, headroom);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->dev = ndev;
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index cec0a90659d9..5b5b52e4e7a7 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -293,6 +293,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct page_pool *pool;
struct sk_buff *skb;
struct xdp_buff xdp;
+ u32 metasize = 0;
int ret = 0;
dma_addr_t dma;
@@ -345,13 +346,14 @@ static void cpsw_rx_handler(void *token, int len, int status)
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_prepare_buff(&xdp, pa, headroom, size, false);
+ xdp_prepare_buff(&xdp, pa, headroom, size, true);
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
if (ret != CPSW_XDP_PASS)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
@@ -368,6 +370,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->offload_fwd_mark = priv->offload_fwd_mark;
skb_reserve(skb, headroom);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->dev = ndev;
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
@@ -1409,7 +1413,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC;
- ndev->netns_local = true;
+ ndev->netns_immutable = true;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
@@ -1418,6 +1422,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
SET_NETDEV_DEV(ndev, dev);
+ ndev->dev.of_node = slave_data->slave_node;
if (!napi_ndev) {
/* CPSW Host port CPDMA interface is shared between
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index d59c1744840a..b4a34c57b7b4 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -406,9 +406,25 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
static int icss_iep_perout_enable_hw(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
+ struct timespec64 ts;
+ u64 ns_start;
+ u64 ns_width;
int ret;
u64 cmp;
+ /* Calculate width of the signal for PPS/PEROUT handling */
+ ts.tv_sec = req->on.sec;
+ ts.tv_nsec = req->on.nsec;
+ ns_width = timespec64_to_ns(&ts);
+
+ if (req->flags & PTP_PEROUT_PHASE) {
+ ts.tv_sec = req->phase.sec;
+ ts.tv_nsec = req->phase.nsec;
+ ns_start = timespec64_to_ns(&ts);
+ } else {
+ ns_start = 0;
+ }
+
if (iep->ops && iep->ops->perout_enable) {
ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
if (ret)
@@ -419,10 +435,12 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
- /* Configure SYNC, 1ms pulse width */
- regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
+ /* Configure SYNC, based on req on width */
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
- regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
/* Enable CMP 1 */
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
@@ -447,6 +465,10 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
+ req->period.nsec;
icss_iep_update_to_next_boundary(iep, start_ns);
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
/* Enable Sync in single shot mode */
regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
@@ -474,7 +496,37 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
static int icss_iep_perout_enable(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
- return -EOPNOTSUPP;
+ int ret = 0;
+
+ /* Reject requests with unsupported flags */
+ if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->pps_enabled) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (iep->perout_enabled == !!on)
+ goto exit;
+
+ /* Set default "on" time (1ms) for the signal if not passed by the app */
+ if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) {
+ req->on.sec = 0;
+ req->on.nsec = NSEC_PER_MSEC;
+ }
+
+ ret = icss_iep_perout_enable_hw(iep, req, on);
+ if (!ret)
+ iep->perout_enabled = !!on;
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
}
static void icss_iep_cap_cmp_work(struct work_struct *work)
@@ -549,10 +601,13 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
if (on) {
ns = icss_iep_gettime(iep, NULL);
ts = ns_to_timespec64(ns);
+ rq.perout.flags = 0;
rq.perout.period.sec = 1;
rq.perout.period.nsec = 0;
rq.perout.start.sec = ts.tv_sec + 2;
rq.perout.start.nsec = 0;
+ rq.perout.on.sec = 0;
+ rq.perout.on.nsec = NSEC_PER_MSEC;
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
} else {
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 74f0f200a89d..46f500b90b17 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -45,6 +45,11 @@ void prueth_cleanup_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
int max_rflows)
{
+ if (rx_chn->pg_pool) {
+ page_pool_destroy(rx_chn->pg_pool);
+ rx_chn->pg_pool = NULL;
+ }
+
if (rx_chn->desc_pool)
k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
@@ -93,11 +98,20 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
{
struct cppi5_host_desc_t *first_desc, *next_desc;
dma_addr_t buf_dma, next_desc_dma;
+ struct prueth_swdata *swdata;
+ struct page *page;
u32 buf_dma_len;
first_desc = desc;
next_desc = first_desc;
+ swdata = cppi5_hdesc_get_swdata(desc);
+ if (swdata->type == PRUETH_SWDATA_PAGE) {
+ page = swdata->data.page;
+ page_pool_recycle_direct(page->pp, swdata->data.page);
+ goto free_desc;
+ }
+
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
@@ -121,6 +135,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
+free_desc:
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
}
EXPORT_SYMBOL_GPL(prueth_xmit_free);
@@ -131,12 +146,13 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_tx;
struct netdev_queue *netif_txq;
+ struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
unsigned int total_bytes = 0;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
- void **swdata;
tx_chn = &emac->tx_chns[chn];
@@ -157,20 +173,27 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- /* was this command's TX complete? */
- if (emac->is_sr1 && *(swdata) == emac->cmd_data) {
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ dev_sw_netstats_tx_add(ndev, 1, xdpf->len);
+ total_bytes += xdpf->len;
+ xdp_return_frame(xdpf);
+ break;
+ default:
+ netdev_err(ndev, "tx_complete: invalid swdata type %d\n", swdata->type);
prueth_xmit_free(tx_chn, desc_tx);
+ ndev->stats.tx_dropped++;
continue;
}
- skb = *(swdata);
prueth_xmit_free(tx_chn, desc_tx);
-
- ndev = skb->dev;
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
num_tx++;
}
@@ -249,9 +272,8 @@ int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &emac_tx_timer_callback;
+ hrtimer_setup(&tx_chn->tx_hrtimer, &emac_tx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
ret = request_irq(tx_chn->irq, prueth_tx_irq,
IRQF_TRIGGER_HIGH, tx_chn->name,
tx_chn);
@@ -461,17 +483,17 @@ fail:
}
EXPORT_SYMBOL_GPL(prueth_init_rx_chns);
-int prueth_dma_rx_push(struct prueth_emac *emac,
- struct sk_buff *skb,
- struct prueth_rx_chn *rx_chn)
+int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct page *page, u32 buf_len)
{
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- u32 pkt_len = skb_tailroom(skb);
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma;
dma_addr_t buf_dma;
- void **swdata;
+ buf_dma = page_pool_get_dma_addr(page) + PRUETH_HEADROOM;
desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
if (!desc_rx) {
netdev_err(ndev, "rx push: failed to allocate descriptor\n");
@@ -479,25 +501,19 @@ int prueth_dma_rx_push(struct prueth_emac *emac,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
- return -EINVAL;
- }
-
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
PRUETH_NAV_PS_DATA_SIZE);
k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- *swdata = skb;
+ swdata->type = PRUETH_SWDATA_PAGE;
+ swdata->data.page = page;
- return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
desc_rx, desc_dma);
}
-EXPORT_SYMBOL_GPL(prueth_dma_rx_push);
+EXPORT_SYMBOL_GPL(prueth_dma_rx_push_mapped);
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
{
@@ -535,18 +551,170 @@ void emac_rx_timestamp(struct prueth_emac *emac,
ssh->hwtstamp = ns_to_ktime(ns);
}
-static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
+/**
+ * emac_xmit_xdp_frame - transmits an XDP frame
+ * @emac: emac device
+ * @xdpf: data to transmit
+ * @page: page from page pool if already DMA mapped
+ * @q_idx: queue id
+ *
+ * Return: XDP state
+ */
+u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
+ struct xdp_frame *xdpf,
+ struct page *page,
+ unsigned int q_idx)
+{
+ struct cppi5_host_desc_t *first_desc;
+ struct net_device *ndev = emac->ndev;
+ struct prueth_tx_chn *tx_chn;
+ dma_addr_t desc_dma, buf_dma;
+ struct prueth_swdata *swdata;
+ u32 *epib;
+ int ret;
+
+ if (q_idx >= PRUETH_MAX_TX_QUEUES) {
+ netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx);
+ return ICSSG_XDP_CONSUMED; /* drop */
+ }
+
+ tx_chn = &emac->tx_chns[q_idx];
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n");
+ goto drop_free_descs; /* drop */
+ }
+
+ if (page) { /* already DMA mapped by page_pool */
+ buf_dma = page_pool_get_dma_addr(page);
+ buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
+ } else { /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "xdp tx: failed to map data buffer\n");
+ goto drop_free_descs; /* drop */
+ }
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, 0);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+
+ /* set dst tag to indicate internal qid at the firmware which is at
+ * bit8..bit15. bit0..bit7 indicates port num for directed
+ * packets in case of switch mode operation
+ */
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ if (page) {
+ swdata->type = PRUETH_SWDATA_PAGE;
+ swdata->data.page = page;
+ } else {
+ swdata->type = PRUETH_SWDATA_XDPF;
+ swdata->data.xdpf = xdpf;
+ }
+
+ cppi5_hdesc_set_pktlen(first_desc, xdpf->len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(ndev, "xdp tx: push failed: %d\n", ret);
+ goto drop_free_descs;
+ }
+
+ return ICSSG_XDP_TX;
+
+drop_free_descs:
+ prueth_xmit_free(tx_chn, first_desc);
+ return ICSSG_XDP_CONSUMED;
+}
+EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame);
+
+/**
+ * emac_run_xdp - run an XDP program
+ * @emac: emac device
+ * @xdp: XDP buffer containing the frame
+ * @page: page with RX data if already DMA mapped
+ * @len: Rx descriptor packet length
+ *
+ * Return: XDP state
+ */
+static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
+ struct page *page, u32 *len)
+{
+ struct net_device *ndev = emac->ndev;
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ u32 pkt_len = *len;
+ u32 act, result;
+ int q_idx, err;
+
+ xdp_prog = READ_ONCE(emac->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ return ICSSG_XDP_PASS;
+ case XDP_TX:
+ /* Send packet to TX ring for immediate transmission */
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ ndev->stats.tx_dropped++;
+ goto drop;
+ }
+
+ q_idx = smp_processor_id() % emac->tx_ch_num;
+ result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx);
+ if (result == ICSSG_XDP_CONSUMED)
+ goto drop;
+
+ dev_sw_netstats_rx_add(ndev, xdpf->len);
+ return result;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(emac->ndev, xdp, xdp_prog);
+ if (err)
+ goto drop;
+
+ dev_sw_netstats_rx_add(ndev, pkt_len);
+ return ICSSG_XDP_REDIR;
+ default:
+ bpf_warn_invalid_xdp_action(emac->ndev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+drop:
+ trace_xdp_exception(emac->ndev, xdp_prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(emac->rx_chns.pg_pool, page);
+ return ICSSG_XDP_CONSUMED;
+ }
+}
+
+static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
{
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0;
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb, *new_skb;
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma, buf_dma;
- void **swdata;
+ struct page *page, *new_page;
+ struct page_pool *pool;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
u32 *psdata;
+ void *pa;
int ret;
+ *xdp_state = 0;
+ pool = rx_chn->pg_pool;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
if (ret) {
if (ret != -ENODATA)
@@ -558,15 +726,15 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
return 0;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
-
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
-
- psdata = cppi5_hdesc_get_psdata(desc_rx);
- /* RX HW timestamp */
- if (emac->rx_ts_enabled)
- emac_rx_timestamp(emac, skb, psdata);
+ if (swdata->type != PRUETH_SWDATA_PAGE) {
+ netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ return 0;
+ }
+ page = swdata->data.page;
+ page_pool_dma_sync_for_cpu(pool, page, 0, PAGE_SIZE);
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
@@ -574,32 +742,63 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
pkt_len -= 4;
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- skb->dev = ndev;
- new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
/* if allocation fails we drop the packet but push the
- * descriptor back to the ring with old skb to prevent a stall
+ * descriptor back to the ring with old page to prevent a stall
*/
- if (!new_skb) {
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ new_page = page;
ndev->stats.rx_dropped++;
- new_skb = skb;
+ goto requeue;
+ }
+
+ pa = page_address(page);
+ if (emac->xdp_prog) {
+ xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
+ xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
+
+ *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len);
+ if (*xdp_state == ICSSG_XDP_PASS)
+ skb = xdp_build_skb_from_buff(&xdp);
+ else
+ goto requeue;
} else {
- /* send the filled skb up the n/w stack */
- skb_put(skb, pkt_len);
- if (emac->prueth->is_switch_mode)
- skb->offload_fwd_mark = emac->offload_fwd_mark;
- skb->protocol = eth_type_trans(skb, ndev);
- napi_gro_receive(&emac->napi_rx, skb);
- ndev->stats.rx_bytes += pkt_len;
- ndev->stats.rx_packets++;
+ /* prepare skb and send to n/w stack */
+ skb = napi_build_skb(pa, PAGE_SIZE);
+ }
+
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(pool, page);
+ goto requeue;
}
+ skb_reserve(skb, PRUETH_HEADROOM);
+ skb_put(skb, pkt_len);
+ skb->dev = ndev;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ if (emac->prueth->is_switch_mode)
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb_mark_for_recycle(skb);
+ napi_gro_receive(&emac->napi_rx, skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+
+requeue:
/* queue another RX DMA */
- ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
+ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
+ PRUETH_MAX_PKT_SIZE);
if (WARN_ON(ret < 0)) {
- dev_kfree_skb_any(new_skb);
+ page_pool_recycle_direct(pool, new_page);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -611,22 +810,19 @@ static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
- dma_addr_t buf_dma;
- u32 buf_dma_len;
- void **swdata;
+ struct prueth_swdata *swdata;
+ struct page_pool *pool;
+ struct page *page;
+ pool = rx_chn->pg_pool;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
- cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ if (swdata->type == PRUETH_SWDATA_PAGE) {
+ page = swdata->data.page;
+ page_pool_recycle_direct(pool, page);
+ }
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- dev_kfree_skb_any(skb);
}
static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
@@ -662,13 +858,13 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
struct netdev_queue *netif_txq;
+ struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
u32 pkt_len, dst_tag_id;
int i, ret = 0, q_idx;
bool in_tx_ts = 0;
int tx_ts_cookie;
- void **swdata;
u32 *epib;
pkt_len = skb_headlen(skb);
@@ -730,7 +926,8 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *swdata = skb;
+ swdata->type = PRUETH_SWDATA_SKB;
+ swdata->data.skb = skb;
/* Handle the case where skb is fragmented in pages */
cur_desc = first_desc;
@@ -833,15 +1030,27 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_tx_chn *tx_chn = data;
struct cppi5_host_desc_t *desc_tx;
+ struct prueth_swdata *swdata;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
- void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- prueth_xmit_free(tx_chn, desc_tx);
- dev_kfree_skb_any(skb);
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ dev_kfree_skb_any(skb);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ xdp_return_frame(xdpf);
+ break;
+ default:
+ break;
+ }
+
+ prueth_xmit_free(tx_chn, desc_tx);
}
irqreturn_t prueth_rx_irq(int irq, void *dev_id)
@@ -875,15 +1084,18 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
int flow = emac->is_sr1 ?
PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
+ int xdp_state_or = 0;
int num_rx = 0;
int cur_budget;
+ u32 xdp_state;
int ret;
while (flow--) {
cur_budget = budget - num_rx;
while (cur_budget--) {
- ret = emac_rx_packet(emac, flow);
+ ret = emac_rx_packet(emac, flow, &xdp_state);
+ xdp_state_or |= xdp_state;
if (ret)
break;
num_rx++;
@@ -893,6 +1105,9 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
break;
}
+ if (xdp_state_or & ICSSG_XDP_REDIR)
+ xdp_do_flush();
+
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
if (unlikely(emac->rx_pace_timeout_ns)) {
hrtimer_start(&emac->rx_hrtimer,
@@ -907,29 +1122,71 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
}
EXPORT_SYMBOL_GPL(icssg_napi_rx_poll);
+static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
+ struct device *dma_dev,
+ int size)
+{
+ struct page_pool_params pp_params = { 0 };
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = size;
+ pp_params.nid = dev_to_node(emac->prueth->dev);
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = dma_dev;
+ pp_params.napi = &emac->napi_rx;
+ pp_params.max_len = PAGE_SIZE;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ netdev_err(emac->ndev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
int prueth_prepare_rx_chan(struct prueth_emac *emac,
struct prueth_rx_chn *chn,
int buf_size)
{
- struct sk_buff *skb;
+ struct page_pool *pool;
+ struct page *page;
int i, ret;
+ pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ chn->pg_pool = pool;
+
for (i = 0; i < chn->descs_num; i++) {
- skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ /* NOTE: we're not using memory efficiently here.
+ * 1 full page (4KB?) used here instead of
+ * PRUETH_MAX_PKT_SIZE (~1.5KB?)
+ */
+ page = page_pool_dev_alloc_pages(pool);
+ if (!page) {
+ netdev_err(emac->ndev, "couldn't allocate rx page\n");
+ ret = -ENOMEM;
+ goto recycle_alloc_pg;
+ }
- ret = prueth_dma_rx_push(emac, skb, chn);
+ ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
if (ret < 0) {
netdev_err(emac->ndev,
- "cannot submit skb for rx chan %s ret %d\n",
+ "cannot submit page for rx chan %s ret %d\n",
chn->name, ret);
- kfree_skb(skb);
- return ret;
+ page_pool_recycle_direct(pool, page);
+ goto recycle_alloc_pg;
}
}
return 0;
+
+recycle_alloc_pg:
+ prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan);
@@ -958,6 +1215,9 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
prueth_rx_cleanup, !!i);
if (disable)
k3_udma_glue_disable_rx_chn(chn->rx_chn);
+
+ page_pool_destroy(chn->pg_pool);
+ chn->pg_pool = NULL;
}
EXPORT_SYMBOL_GPL(prueth_reset_rx_chan);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 00ed97860547..443f90fa6557 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -559,6 +559,33 @@ const struct icss_iep_clockops prueth_iep_clockops = {
.perout_enable = prueth_perout_enable,
};
+static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+ struct page_pool *pool = emac->rx_chns.pg_pool;
+ int ret;
+
+ ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
+ if (ret)
+ return ret;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ xdp_rxq_info_unreg(rxq);
+
+ return ret;
+}
+
+static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+
+ if (!xdp_rxq_info_is_reg(rxq))
+ return;
+
+ xdp_rxq_info_unreg(rxq);
+}
+
static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
{
struct net_device *real_dev;
@@ -780,10 +807,14 @@ static int emac_ndo_open(struct net_device *ndev)
if (ret)
goto free_tx_ts_irq;
- ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ ret = prueth_create_xdp_rxqs(emac);
if (ret)
goto reset_rx_chn;
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ if (ret)
+ goto destroy_xdp_rxqs;
+
for (i = 0; i < emac->tx_ch_num; i++) {
ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
if (ret)
@@ -809,6 +840,8 @@ reset_tx_chan:
* any SKB for completion. So set false to free_skb
*/
prueth_reset_tx_chan(emac, i, false);
+destroy_xdp_rxqs:
+ prueth_destroy_xdp_rxqs(emac);
reset_rx_chn:
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
free_tx_ts_irq:
@@ -879,7 +912,7 @@ static int emac_ndo_stop(struct net_device *ndev)
k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
-
+ prueth_destroy_xdp_rxqs(emac);
napi_disable(&emac->napi_rx);
hrtimer_cancel(&emac->rx_hrtimer);
@@ -1024,6 +1057,93 @@ static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
return 0;
}
+/**
+ * emac_xdp_xmit - Implements ndo_xdp_xmit
+ * @dev: netdev
+ * @n: number of frames
+ * @frames: array of XDP buffer pointers
+ * @flags: XDP extra info
+ *
+ * Return: number of frames successfully sent. Failed frames
+ * will be free'ed by XDP core.
+ *
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
+ **/
+static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct prueth_emac *emac = netdev_priv(dev);
+ struct net_device *ndev = emac->ndev;
+ struct xdp_frame *xdpf;
+ unsigned int q_idx;
+ int nxmit = 0;
+ u32 err;
+ int i;
+
+ q_idx = smp_processor_id() % emac->tx_ch_num;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ xdpf = frames[i];
+ err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
+ if (err != ICSSG_XDP_TX) {
+ ndev->stats.tx_dropped++;
+ break;
+ }
+ nxmit++;
+ }
+
+ return nxmit;
+}
+
+/**
+ * emac_xdp_setup - add/remove an XDP program
+ * @emac: emac device
+ * @bpf: XDP program
+ *
+ * Return: Always 0 (Success)
+ **/
+static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog;
+ xdp_features_t val;
+
+ val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+ xdp_set_features_flag(emac->ndev, val);
+
+ if (!emac->xdpi.prog && !prog)
+ return 0;
+
+ WRITE_ONCE(emac->xdp_prog, prog);
+
+ xdp_attachment_setup(&emac->xdpi, bpf);
+
+ return 0;
+}
+
+/**
+ * emac_ndo_bpf - implements ndo_bpf for icssg_prueth
+ * @ndev: network adapter device
+ * @bpf: XDP program
+ *
+ * Return: 0 on success, error code on failure.
+ **/
+static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return emac_xdp_setup(emac, bpf);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -1038,6 +1158,8 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_fix_features = emac_ndo_fix_features,
.ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
+ .ndo_bpf = emac_ndo_bpf,
+ .ndo_xdp_xmit = emac_xdp_xmit,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -1066,6 +1188,8 @@ static int prueth_netdev_init(struct prueth *prueth,
emac->prueth = prueth;
emac->ndev = ndev;
emac->port_id = port;
+ emac->xdp_prog = NULL;
+ emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
if (!emac->cmd_wq) {
ret = -ENOMEM;
@@ -1169,9 +1293,8 @@ static int prueth_netdev_init(struct prueth *prueth,
ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
- hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- emac->rx_hrtimer.function = &emac_rx_timer_callback;
+ hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
prueth->emac[mac] = emac;
return 0;
@@ -1522,6 +1645,9 @@ static int prueth_probe(struct platform_device *pdev)
np = dev->of_node;
+ BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE),
+ "insufficient SW_DATA size");
+
prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
if (!prueth)
return -ENOMEM;
@@ -1679,6 +1805,7 @@ static int prueth_probe(struct platform_device *pdev)
}
spin_lock_init(&prueth->vtbl_lock);
+ spin_lock_init(&prueth->stats_lock);
/* setup netdev interfaces */
if (eth0_node) {
ret = prueth_netdev_init(prueth, eth0_node);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 329b46e9ee53..b6be4aa57a61 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -8,6 +8,8 @@
#ifndef __NET_TI_ICSSG_PRUETH_H
#define __NET_TI_ICSSG_PRUETH_H
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/etherdevice.h>
#include <linux/genalloc.h>
#include <linux/if_vlan.h>
@@ -33,6 +35,8 @@
#include <linux/dma/k3-udma-glue.h>
#include <net/devlink.h>
+#include <net/xdp.h>
+#include <net/page_pool/helpers.h>
#include "icssg_config.h"
#include "icss_iep.h"
@@ -131,6 +135,26 @@ struct prueth_rx_chn {
u32 descs_num;
unsigned int irq[ICSSG_MAX_RFLOWS]; /* separate irq per flow */
char name[32];
+ struct page_pool *pg_pool;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+enum prueth_swdata_type {
+ PRUETH_SWDATA_INVALID = 0,
+ PRUETH_SWDATA_SKB,
+ PRUETH_SWDATA_PAGE,
+ PRUETH_SWDATA_CMD,
+ PRUETH_SWDATA_XDPF,
+};
+
+struct prueth_swdata {
+ enum prueth_swdata_type type;
+ union prueth_data {
+ struct sk_buff *skb;
+ struct page *page;
+ u32 cmd;
+ struct xdp_frame *xdpf;
+ } data;
};
/* There are 4 Tx DMA channels, but the highest priority is CH3 (thread 3)
@@ -140,6 +164,12 @@ struct prueth_rx_chn {
#define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */
+/* XDP BPF state */
+#define ICSSG_XDP_PASS 0
+#define ICSSG_XDP_CONSUMED BIT(0)
+#define ICSSG_XDP_TX BIT(1)
+#define ICSSG_XDP_REDIR BIT(2)
+
/* Minimum coalesce time in usecs for both Tx and Rx */
#define ICSSG_MIN_COALESCE_USECS 20
@@ -208,8 +238,14 @@ struct prueth_emac {
unsigned long rx_pace_timeout_ns;
struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
+ struct bpf_prog *xdp_prog;
+ struct xdp_attachment_info xdpi;
};
+/* The buf includes headroom compatible with both skb and xdpf */
+#define PRUETH_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
+#define PRUETH_HEADROOM ALIGN(PRUETH_HEADROOM_NA, sizeof(long))
+
/**
* struct prueth_pdata - PRUeth platform data
* @fdqring_mode: Free desc queue mode
@@ -305,6 +341,8 @@ struct prueth {
int default_vlan;
/** @vtbl_lock: Lock for vtbl in shared memory */
spinlock_t vtbl_lock;
+ /** @stats_lock: Lock for reading icssg stats */
+ spinlock_t stats_lock;
};
struct emac_tx_ts_response {
@@ -410,9 +448,10 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
char *name, u32 max_rflows,
u32 max_desc_num);
-int prueth_dma_rx_push(struct prueth_emac *emac,
- struct sk_buff *skb,
- struct prueth_rx_chn *rx_chn);
+int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct page *page, u32 buf_len);
+unsigned int prueth_rxbuf_total_len(unsigned int len);
void emac_rx_timestamp(struct prueth_emac *emac,
struct sk_buff *skb, u32 *psdata);
enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
@@ -441,5 +480,9 @@ void prueth_put_cores(struct prueth *prueth, int slice);
/* Revision specific helper */
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns);
+u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
+ struct xdp_frame *xdpf,
+ struct page *page,
+ unsigned int q_idx);
#endif /* __NET_TI_ICSSG_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 64a19ff39562..ff5f41bf499e 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -84,7 +84,7 @@ static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
__le32 *data = emac->cmd_data;
dma_addr_t desc_dma, buf_dma;
struct prueth_tx_chn *tx_chn;
- void **swdata;
+ struct prueth_swdata *swdata;
int ret = 0;
u32 *epib;
@@ -122,7 +122,8 @@ static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *swdata = data;
+ swdata->type = PRUETH_SWDATA_CMD;
+ swdata->data.cmd = le32_to_cpu(data[0]);
cppi5_hdesc_set_pktlen(first_desc, pkt_len);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
@@ -268,16 +269,16 @@ static int emac_phy_connect(struct prueth_emac *emac)
* Returns skb pointer if packet found else NULL
* Caller must free the returned skb.
*/
-static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
- u32 flow_id)
+static struct page *prueth_process_rx_mgm(struct prueth_emac *emac,
+ u32 flow_id)
{
struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn;
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb, *new_skb;
+ struct page *page, *new_page;
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma, buf_dma;
- u32 buf_dma_len, pkt_len;
- void **swdata;
+ u32 buf_dma_len;
int ret;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
@@ -299,34 +300,31 @@ static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
}
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
+ page = swdata->data.page;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
+ new_page = page_pool_dev_alloc_pages(rx_chn->pg_pool);
/* if allocation fails we drop the packet but push the
* descriptor back to the ring with old skb to prevent a stall
*/
- if (!new_skb) {
+ if (!new_page) {
netdev_err(ndev,
- "skb alloc failed, dropped mgm pkt from flow %d\n",
+ "page alloc failed, dropped mgm pkt from flow %d\n",
flow_id);
- new_skb = skb;
- skb = NULL; /* return NULL */
- } else {
- /* return the filled skb */
- skb_put(skb, pkt_len);
+ new_page = page;
+ page = NULL; /* return NULL */
}
/* queue another DMA */
- ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_mgm_chn);
+ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
+ PRUETH_MAX_PKT_SIZE);
if (WARN_ON(ret < 0))
- dev_kfree_skb_any(new_skb);
+ page_pool_recycle_direct(rx_chn->pg_pool, new_page);
- return skb;
+ return page;
}
static void prueth_tx_ts_sr1(struct prueth_emac *emac,
@@ -362,14 +360,14 @@ static void prueth_tx_ts_sr1(struct prueth_emac *emac,
static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
- struct sk_buff *skb;
+ struct page *page;
- skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
- if (!skb)
+ page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
+ if (!page)
return IRQ_NONE;
- prueth_tx_ts_sr1(emac, (void *)skb->data);
- dev_kfree_skb_any(skb);
+ prueth_tx_ts_sr1(emac, (void *)page_address(page));
+ page_pool_recycle_direct(page->pp, page);
return IRQ_HANDLED;
}
@@ -377,15 +375,15 @@ static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
- struct sk_buff *skb;
+ struct page *page;
u32 rsp;
- skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
- if (!skb)
+ page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
+ if (!page)
return IRQ_NONE;
/* Process command response */
- rsp = le32_to_cpu(*(__le32 *)skb->data) & 0xffff0000;
+ rsp = le32_to_cpu(*(__le32 *)page_address(page)) & 0xffff0000;
if (rsp == ICSSG_SHUTDOWN_CMD_SR1) {
netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp);
complete(&emac->cmd_complete);
@@ -394,7 +392,7 @@ static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
complete(&emac->cmd_complete);
}
- dev_kfree_skb_any(skb);
+ page_pool_recycle_direct(page->pp, page);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 8800bd3a8d07..6f0edae38ea2 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -26,6 +26,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
u32 val, reg;
int i;
+ spin_lock(&prueth->stats_lock);
+
for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
regmap_read(prueth->miig_rt,
base + icssg_all_miig_stats[i].offset,
@@ -51,6 +53,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
emac->pa_stats[i] += val;
}
}
+
+ spin_unlock(&prueth->stats_lock);
}
void icssg_stats_work_handler(struct work_struct *work)
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
index 701e9b7c1c3b..b1e27e3b99eb 100644
--- a/drivers/net/ethernet/toshiba/Kconfig
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_TOSHIBA
bool "Toshiba devices"
default y
- depends on PCI && (PPC_IBM_CELL_BLADE || MIPS) || PPC_PS3
+ depends on PCI && MIPS || PPC_PS3
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -39,15 +39,6 @@ config GELIC_WIRELESS
the driver automatically distinguishes the models, you can
safely enable this option even if you have a wireless-less model.
-config SPIDER_NET
- tristate "Spider Gigabit Ethernet driver"
- depends on PCI && PPC_IBM_CELL_BLADE
- select FW_LOADER
- select SUNGEM_PHY
- help
- This driver supports the Gigabit Ethernet chips present on the
- Cell Processor-Based Blades from IBM.
-
config TC35815
tristate "TOSHIBA TC35815 Ethernet support"
depends on PCI && MIPS
diff --git a/drivers/net/ethernet/toshiba/Makefile b/drivers/net/ethernet/toshiba/Makefile
index f434fd0f429e..27e2164cf7e9 100644
--- a/drivers/net/ethernet/toshiba/Makefile
+++ b/drivers/net/ethernet/toshiba/Makefile
@@ -6,6 +6,4 @@
obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
-spidernet-y += spider_net.o spider_net_ethtool.o
-obj-$(CONFIG_SPIDER_NET) += spidernet.o
obj-$(CONFIG_TC35815) += tc35815.o
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
deleted file mode 100644
index a4937c18d7cb..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ /dev/null
@@ -1,2556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Network device driver for Cell Processor-Based Blade and Celleb platform
- *
- * (C) Copyright IBM Corp. 2005
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#include <linux/compiler.h>
-#include <linux/crc32.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/firmware.h>
-#include <linux/if_vlan.h>
-#include <linux/in.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/gfp.h>
-#include <linux/ioport.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/mii.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/skbuff.h>
-#include <linux/tcp.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include <linux/of.h>
-#include <net/checksum.h>
-
-#include "spider_net.h"
-
-MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
- "<Jens.Osterkamp@de.ibm.com>");
-MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(VERSION);
-MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
-
-static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
-static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
-
-module_param(rx_descriptors, int, 0444);
-module_param(tx_descriptors, int, 0444);
-
-MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
- "in rx chains");
-MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
- "in tx chain");
-
-char spider_net_driver_name[] = "spidernet";
-
-static const struct pci_device_id spider_net_pci_tbl[] = {
- { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
-
-/**
- * spider_net_read_reg - reads an SMMIO register of a card
- * @card: device structure
- * @reg: register to read from
- *
- * returns the content of the specified SMMIO register.
- */
-static inline u32
-spider_net_read_reg(struct spider_net_card *card, u32 reg)
-{
- /* We use the powerpc specific variants instead of readl_be() because
- * we know spidernet is not a real PCI device and we can thus avoid the
- * performance hit caused by the PCI workarounds.
- */
- return in_be32(card->regs + reg);
-}
-
-/**
- * spider_net_write_reg - writes to an SMMIO register of a card
- * @card: device structure
- * @reg: register to write to
- * @value: value to write into the specified SMMIO register
- */
-static inline void
-spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
-{
- /* We use the powerpc specific variants instead of writel_be() because
- * we know spidernet is not a real PCI device and we can thus avoid the
- * performance hit caused by the PCI workarounds.
- */
- out_be32(card->regs + reg, value);
-}
-
-/**
- * spider_net_write_phy - write to phy register
- * @netdev: adapter to be written to
- * @mii_id: id of MII
- * @reg: PHY register
- * @val: value to be written to phy register
- *
- * spider_net_write_phy_register writes to an arbitrary PHY
- * register via the spider GPCWOPCMD register. We assume the queue does
- * not run full (not more than 15 commands outstanding).
- **/
-static void
-spider_net_write_phy(struct net_device *netdev, int mii_id,
- int reg, int val)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 writevalue;
-
- writevalue = ((u32)mii_id << 21) |
- ((u32)reg << 16) | ((u32)val);
-
- spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
-}
-
-/**
- * spider_net_read_phy - read from phy register
- * @netdev: network device to be read from
- * @mii_id: id of MII
- * @reg: PHY register
- *
- * Returns value read from PHY register
- *
- * spider_net_write_phy reads from an arbitrary PHY
- * register via the spider GPCROPCMD register
- **/
-static int
-spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 readvalue;
-
- readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
- spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
-
- /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
- * interrupt, as we poll for the completion of the read operation
- * in spider_net_read_phy. Should take about 50 us
- */
- do {
- readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
- } while (readvalue & SPIDER_NET_GPREXEC);
-
- readvalue &= SPIDER_NET_GPRDAT_MASK;
-
- return readvalue;
-}
-
-/**
- * spider_net_setup_aneg - initial auto-negotiation setup
- * @card: device structure
- **/
-static void
-spider_net_setup_aneg(struct spider_net_card *card)
-{
- struct mii_phy *phy = &card->phy;
- u32 advertise = 0;
- u16 bmsr, estat;
-
- bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
- estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
-
- if (bmsr & BMSR_10HALF)
- advertise |= ADVERTISED_10baseT_Half;
- if (bmsr & BMSR_10FULL)
- advertise |= ADVERTISED_10baseT_Full;
- if (bmsr & BMSR_100HALF)
- advertise |= ADVERTISED_100baseT_Half;
- if (bmsr & BMSR_100FULL)
- advertise |= ADVERTISED_100baseT_Full;
-
- if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
- advertise |= SUPPORTED_1000baseT_Full;
- if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
- advertise |= SUPPORTED_1000baseT_Half;
-
- sungem_phy_probe(phy, phy->mii_id);
- phy->def->ops->setup_aneg(phy, advertise);
-
-}
-
-/**
- * spider_net_rx_irq_off - switch off rx irq on this spider card
- * @card: device structure
- *
- * switches off rx irq by masking them out in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_off(struct spider_net_card *card)
-{
- u32 regvalue;
-
- regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
-}
-
-/**
- * spider_net_rx_irq_on - switch on rx irq on this spider card
- * @card: device structure
- *
- * switches on rx irq by enabling them in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_on(struct spider_net_card *card)
-{
- u32 regvalue;
-
- regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
-}
-
-/**
- * spider_net_set_promisc - sets the unicast address or the promiscuous mode
- * @card: card structure
- *
- * spider_net_set_promisc sets the unicast destination address filter and
- * thus either allows for non-promisc mode or promisc mode
- */
-static void
-spider_net_set_promisc(struct spider_net_card *card)
-{
- u32 macu, macl;
- struct net_device *netdev = card->netdev;
-
- if (netdev->flags & IFF_PROMISC) {
- /* clear destination entry 0 */
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
- SPIDER_NET_PROMISC_VALUE);
- } else {
- macu = netdev->dev_addr[0];
- macu <<= 8;
- macu |= netdev->dev_addr[1];
- memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
-
- macu |= SPIDER_NET_UA_DESCR_VALUE;
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
- SPIDER_NET_NONPROMISC_VALUE);
- }
-}
-
-/**
- * spider_net_get_descr_status -- returns the status of a descriptor
- * @hwdescr: descriptor to look at
- *
- * returns the status as in the dmac_cmd_status field of the descriptor
- */
-static inline int
-spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
-{
- return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
-}
-
-/**
- * spider_net_free_chain - free descriptor chain
- * @card: card structure
- * @chain: address of chain
- *
- */
-static void
-spider_net_free_chain(struct spider_net_card *card,
- struct spider_net_descr_chain *chain)
-{
- struct spider_net_descr *descr;
-
- descr = chain->ring;
- do {
- descr->bus_addr = 0;
- descr->hwdescr->next_descr_addr = 0;
- descr = descr->next;
- } while (descr != chain->ring);
-
- dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
- chain->hwring, chain->dma_addr);
-}
-
-/**
- * spider_net_init_chain - alloc and link descriptor chain
- * @card: card structure
- * @chain: address of chain
- *
- * We manage a circular list that mirrors the hardware structure,
- * except that the hardware uses bus addresses.
- *
- * Returns 0 on success, <0 on failure
- */
-static int
-spider_net_init_chain(struct spider_net_card *card,
- struct spider_net_descr_chain *chain)
-{
- int i;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- dma_addr_t buf;
- size_t alloc_size;
-
- alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
-
- chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
- &chain->dma_addr, GFP_KERNEL);
- if (!chain->hwring)
- return -ENOMEM;
-
- /* Set up the hardware pointers in each descriptor */
- descr = chain->ring;
- hwdescr = chain->hwring;
- buf = chain->dma_addr;
- for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- hwdescr->next_descr_addr = 0;
-
- descr->hwdescr = hwdescr;
- descr->bus_addr = buf;
- descr->next = descr + 1;
- descr->prev = descr - 1;
-
- buf += sizeof(struct spider_net_hw_descr);
- }
- /* do actual circular list */
- (descr-1)->next = chain->ring;
- chain->ring->prev = descr-1;
-
- spin_lock_init(&chain->lock);
- chain->head = chain->ring;
- chain->tail = chain->ring;
- return 0;
-}
-
-/**
- * spider_net_free_rx_chain_contents - frees descr contents in rx chain
- * @card: card structure
- *
- * returns 0 on success, <0 on failure
- */
-static void
-spider_net_free_rx_chain_contents(struct spider_net_card *card)
-{
- struct spider_net_descr *descr;
-
- descr = card->rx_chain.head;
- do {
- if (descr->skb) {
- dma_unmap_single(&card->pdev->dev,
- descr->hwdescr->buf_addr,
- SPIDER_NET_MAX_FRAME,
- DMA_BIDIRECTIONAL);
- dev_kfree_skb(descr->skb);
- descr->skb = NULL;
- }
- descr = descr->next;
- } while (descr != card->rx_chain.head);
-}
-
-/**
- * spider_net_prepare_rx_descr - Reinitialize RX descriptor
- * @card: card structure
- * @descr: descriptor to re-init
- *
- * Return 0 on success, <0 on failure.
- *
- * Allocates a new rx skb, iommu-maps it and attaches it to the
- * descriptor. Mark the descriptor as activated, ready-to-use.
- */
-static int
-spider_net_prepare_rx_descr(struct spider_net_card *card,
- struct spider_net_descr *descr)
-{
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- dma_addr_t buf;
- int offset;
- int bufsize;
-
- /* we need to round up the buffer size to a multiple of 128 */
- bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
- (~(SPIDER_NET_RXBUF_ALIGN - 1));
-
- /* and we need to have it 128 byte aligned, therefore we allocate a
- * bit more
- */
- /* allocate an skb */
- descr->skb = netdev_alloc_skb(card->netdev,
- bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
- if (!descr->skb) {
- if (netif_msg_rx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev,
- "Not enough memory to allocate rx buffer\n");
- card->spider_stats.alloc_rx_skb_error++;
- return -ENOMEM;
- }
- hwdescr->buf_size = bufsize;
- hwdescr->result_size = 0;
- hwdescr->valid_size = 0;
- hwdescr->data_status = 0;
- hwdescr->data_error = 0;
-
- offset = ((unsigned long)descr->skb->data) &
- (SPIDER_NET_RXBUF_ALIGN - 1);
- if (offset)
- skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
- /* iommu-map the skb */
- buf = dma_map_single(&card->pdev->dev, descr->skb->data,
- SPIDER_NET_MAX_FRAME, DMA_FROM_DEVICE);
- if (dma_mapping_error(&card->pdev->dev, buf)) {
- dev_kfree_skb_any(descr->skb);
- descr->skb = NULL;
- if (netif_msg_rx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
- card->spider_stats.rx_iommu_map_error++;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- } else {
- hwdescr->buf_addr = buf;
- wmb();
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
- SPIDER_NET_DMAC_NOINTR_COMPLETE;
- }
-
- return 0;
-}
-
-/**
- * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
- * @card: card structure
- *
- * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
- * chip by writing to the appropriate register. DMA is enabled in
- * spider_net_enable_rxdmac.
- */
-static inline void
-spider_net_enable_rxchtails(struct spider_net_card *card)
-{
- /* assume chain is aligned correctly */
- spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
- card->rx_chain.tail->bus_addr);
-}
-
-/**
- * spider_net_enable_rxdmac - enables a receive DMA controller
- * @card: card structure
- *
- * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
- * in the GDADMACCNTR register
- */
-static inline void
-spider_net_enable_rxdmac(struct spider_net_card *card)
-{
- wmb();
- spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
- SPIDER_NET_DMA_RX_VALUE);
-}
-
-/**
- * spider_net_disable_rxdmac - disables the receive DMA controller
- * @card: card structure
- *
- * spider_net_disable_rxdmac terminates processing on the DMA controller
- * by turing off the DMA controller, with the force-end flag set.
- */
-static inline void
-spider_net_disable_rxdmac(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
- SPIDER_NET_DMA_RX_FEND_VALUE);
-}
-
-/**
- * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
- * @card: card structure
- *
- * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
- */
-static void
-spider_net_refill_rx_chain(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- unsigned long flags;
-
- /* one context doing the refill (and a second context seeing that
- * and omitting it) is ok. If called by NAPI, we'll be called again
- * as spider_net_decode_one_descr is called several times. If some
- * interrupt calls us, the NAPI is about to clean up anyway.
- */
- if (!spin_trylock_irqsave(&chain->lock, flags))
- return;
-
- while (spider_net_get_descr_status(chain->head->hwdescr) ==
- SPIDER_NET_DESCR_NOT_IN_USE) {
- if (spider_net_prepare_rx_descr(card, chain->head))
- break;
- chain->head = chain->head->next;
- }
-
- spin_unlock_irqrestore(&chain->lock, flags);
-}
-
-/**
- * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
- * @card: card structure
- *
- * Returns 0 on success, <0 on failure.
- */
-static int
-spider_net_alloc_rx_skbs(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *start = chain->tail;
- struct spider_net_descr *descr = start;
-
- /* Link up the hardware chain pointers */
- do {
- descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
- descr = descr->next;
- } while (descr != start);
-
- /* Put at least one buffer into the chain. if this fails,
- * we've got a problem. If not, spider_net_refill_rx_chain
- * will do the rest at the end of this function.
- */
- if (spider_net_prepare_rx_descr(card, chain->head))
- goto error;
- else
- chain->head = chain->head->next;
-
- /* This will allocate the rest of the rx buffers;
- * if not, it's business as usual later on.
- */
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- return 0;
-
-error:
- spider_net_free_rx_chain_contents(card);
- return -ENOMEM;
-}
-
-/**
- * spider_net_get_multicast_hash - generates hash for multicast filter table
- * @netdev: interface device structure
- * @addr: multicast address
- *
- * returns the hash value.
- *
- * spider_net_get_multicast_hash calculates a hash value for a given multicast
- * address, that is used to set the multicast filter tables
- */
-static u8
-spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
-{
- u32 crc;
- u8 hash;
- char addr_for_crc[ETH_ALEN] = { 0, };
- int i, bit;
-
- for (i = 0; i < ETH_ALEN * 8; i++) {
- bit = (addr[i / 8] >> (i % 8)) & 1;
- addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
- }
-
- crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
-
- hash = (crc >> 27);
- hash <<= 3;
- hash |= crc & 7;
- hash &= 0xff;
-
- return hash;
-}
-
-/**
- * spider_net_set_multi - sets multicast addresses and promisc flags
- * @netdev: interface device structure
- *
- * spider_net_set_multi configures multicast addresses as needed for the
- * netdev interface. It also sets up multicast, allmulti and promisc
- * flags appropriately
- */
-static void
-spider_net_set_multi(struct net_device *netdev)
-{
- struct netdev_hw_addr *ha;
- u8 hash;
- int i;
- u32 reg;
- struct spider_net_card *card = netdev_priv(netdev);
- DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES);
-
- spider_net_set_promisc(card);
-
- if (netdev->flags & IFF_ALLMULTI) {
- bitmap_fill(bitmask, SPIDER_NET_MULTICAST_HASHES);
- goto write_hash;
- }
-
- bitmap_zero(bitmask, SPIDER_NET_MULTICAST_HASHES);
-
- /* well, we know, what the broadcast hash value is: it's xfd
- hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
- __set_bit(0xfd, bitmask);
-
- netdev_for_each_mc_addr(ha, netdev) {
- hash = spider_net_get_multicast_hash(netdev, ha->addr);
- __set_bit(hash, bitmask);
- }
-
-write_hash:
- for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
- reg = 0;
- if (test_bit(i * 4, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 1, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 2, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 3, bitmask))
- reg += 0x08;
-
- spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
- }
-}
-
-/**
- * spider_net_prepare_tx_descr - fill tx descriptor with skb data
- * @card: card structure
- * @skb: packet to use
- *
- * returns 0 on success, <0 on failure.
- *
- * fills out the descriptor structure with skb data and len. Copies data,
- * if needed (32bit DMA!)
- */
-static int
-spider_net_prepare_tx_descr(struct spider_net_card *card,
- struct sk_buff *skb)
-{
- struct spider_net_descr_chain *chain = &card->tx_chain;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- dma_addr_t buf;
- unsigned long flags;
-
- buf = dma_map_single(&card->pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&card->pdev->dev, buf)) {
- if (netif_msg_tx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
- "Dropping packet\n", skb->data, skb->len);
- card->spider_stats.tx_iommu_map_error++;
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&chain->lock, flags);
- descr = card->tx_chain.head;
- if (descr->next == chain->tail->prev) {
- spin_unlock_irqrestore(&chain->lock, flags);
- dma_unmap_single(&card->pdev->dev, buf, skb->len,
- DMA_TO_DEVICE);
- return -ENOMEM;
- }
- hwdescr = descr->hwdescr;
- chain->head = descr->next;
-
- descr->skb = skb;
- hwdescr->buf_addr = buf;
- hwdescr->buf_size = skb->len;
- hwdescr->next_descr_addr = 0;
- hwdescr->data_status = 0;
-
- hwdescr->dmac_cmd_status =
- SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
- spin_unlock_irqrestore(&chain->lock, flags);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_TCP:
- hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
- break;
- case IPPROTO_UDP:
- hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
- break;
- }
-
- /* Chain the bus address, so that the DMA engine finds this descr. */
- wmb();
- descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
-
- netif_trans_update(card->netdev); /* set netdev watchdog timer */
- return 0;
-}
-
-static int
-spider_net_set_low_watermark(struct spider_net_card *card)
-{
- struct spider_net_descr *descr = card->tx_chain.tail;
- struct spider_net_hw_descr *hwdescr;
- unsigned long flags;
- int status;
- int cnt=0;
- int i;
-
- /* Measure the length of the queue. Measurement does not
- * need to be precise -- does not need a lock.
- */
- while (descr != card->tx_chain.head) {
- status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
- if (status == SPIDER_NET_DESCR_NOT_IN_USE)
- break;
- descr = descr->next;
- cnt++;
- }
-
- /* If TX queue is short, don't even bother with interrupts */
- if (cnt < card->tx_chain.num_desc/4)
- return cnt;
-
- /* Set low-watermark 3/4th's of the way into the queue. */
- descr = card->tx_chain.tail;
- cnt = (cnt*3)/4;
- for (i=0;i<cnt; i++)
- descr = descr->next;
-
- /* Set the new watermark, clear the old watermark */
- spin_lock_irqsave(&card->tx_chain.lock, flags);
- descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
- if (card->low_watermark && card->low_watermark != descr) {
- hwdescr = card->low_watermark->hwdescr;
- hwdescr->dmac_cmd_status =
- hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
- }
- card->low_watermark = descr;
- spin_unlock_irqrestore(&card->tx_chain.lock, flags);
- return cnt;
-}
-
-/**
- * spider_net_release_tx_chain - processes sent tx descriptors
- * @card: adapter structure
- * @brutal: if set, don't care about whether descriptor seems to be in use
- *
- * returns 0 if the tx ring is empty, otherwise 1.
- *
- * spider_net_release_tx_chain releases the tx descriptors that spider has
- * finished with (if non-brutal) or simply release tx descriptors (if brutal).
- * If some other context is calling this function, we return 1 so that we're
- * scheduled again (if we were scheduled) and will not lose initiative.
- */
-static int
-spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
-{
- struct net_device *dev = card->netdev;
- struct spider_net_descr_chain *chain = &card->tx_chain;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- struct sk_buff *skb;
- u32 buf_addr;
- unsigned long flags;
- int status;
-
- while (1) {
- spin_lock_irqsave(&chain->lock, flags);
- if (chain->tail == chain->head) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 0;
- }
- descr = chain->tail;
- hwdescr = descr->hwdescr;
-
- status = spider_net_get_descr_status(hwdescr);
- switch (status) {
- case SPIDER_NET_DESCR_COMPLETE:
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += descr->skb->len;
- break;
-
- case SPIDER_NET_DESCR_CARDOWNED:
- if (!brutal) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 1;
- }
-
- /* fallthrough, if we release the descriptors
- * brutally (then we don't care about
- * SPIDER_NET_DESCR_CARDOWNED)
- */
- fallthrough;
-
- case SPIDER_NET_DESCR_RESPONSE_ERROR:
- case SPIDER_NET_DESCR_PROTECTION_ERROR:
- case SPIDER_NET_DESCR_FORCE_END:
- if (netif_msg_tx_err(card))
- dev_err(&card->netdev->dev, "forcing end of tx descriptor "
- "with status x%02x\n", status);
- dev->stats.tx_errors++;
- break;
-
- default:
- dev->stats.tx_dropped++;
- if (!brutal) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 1;
- }
- }
-
- chain->tail = descr->next;
- hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
- skb = descr->skb;
- descr->skb = NULL;
- buf_addr = hwdescr->buf_addr;
- spin_unlock_irqrestore(&chain->lock, flags);
-
- /* unmap the skb */
- if (skb) {
- dma_unmap_single(&card->pdev->dev, buf_addr, skb->len,
- DMA_TO_DEVICE);
- dev_consume_skb_any(skb);
- }
- }
- return 0;
-}
-
-/**
- * spider_net_kick_tx_dma - enables TX DMA processing
- * @card: card structure
- *
- * This routine will start the transmit DMA running if
- * it is not already running. This routine ned only be
- * called when queueing a new packet to an empty tx queue.
- * Writes the current tx chain head as start address
- * of the tx descriptor chain and enables the transmission
- * DMA engine.
- */
-static inline void
-spider_net_kick_tx_dma(struct spider_net_card *card)
-{
- struct spider_net_descr *descr;
-
- if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
- SPIDER_NET_TX_DMA_EN)
- goto out;
-
- descr = card->tx_chain.tail;
- for (;;) {
- if (spider_net_get_descr_status(descr->hwdescr) ==
- SPIDER_NET_DESCR_CARDOWNED) {
- spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
- descr->bus_addr);
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_VALUE);
- break;
- }
- if (descr == card->tx_chain.head)
- break;
- descr = descr->next;
- }
-
-out:
- mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
-}
-
-/**
- * spider_net_xmit - transmits a frame over the device
- * @skb: packet to send out
- * @netdev: interface device structure
- *
- * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
- */
-static netdev_tx_t
-spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- int cnt;
- struct spider_net_card *card = netdev_priv(netdev);
-
- spider_net_release_tx_chain(card, 0);
-
- if (spider_net_prepare_tx_descr(card, skb) != 0) {
- netdev->stats.tx_dropped++;
- netif_stop_queue(netdev);
- return NETDEV_TX_BUSY;
- }
-
- cnt = spider_net_set_low_watermark(card);
- if (cnt < 5)
- spider_net_kick_tx_dma(card);
- return NETDEV_TX_OK;
-}
-
-/**
- * spider_net_cleanup_tx_ring - cleans up the TX ring
- * @t: timer context used to obtain the pointer to net card data structure
- *
- * spider_net_cleanup_tx_ring is called by either the tx_timer
- * or from the NAPI polling routine.
- * This routine releases resources associted with transmitted
- * packets, including updating the queue tail pointer.
- */
-static void
-spider_net_cleanup_tx_ring(struct timer_list *t)
-{
- struct spider_net_card *card = from_timer(card, t, tx_timer);
- if ((spider_net_release_tx_chain(card, 0) != 0) &&
- (card->netdev->flags & IFF_UP)) {
- spider_net_kick_tx_dma(card);
- netif_wake_queue(card->netdev);
- }
-}
-
-/**
- * spider_net_do_ioctl - called for device ioctls
- * @netdev: interface device structure
- * @ifr: request parameter structure for ioctl
- * @cmd: command code for ioctl
- *
- * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
- * -EOPNOTSUPP is returned, if an unknown ioctl was requested
- */
-static int
-spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
- * @descr: descriptor to process
- * @card: card structure
- *
- * Fills out skb structure and passes the data to the stack.
- * The descriptor state is not changed.
- */
-static void
-spider_net_pass_skb_up(struct spider_net_descr *descr,
- struct spider_net_card *card)
-{
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- struct sk_buff *skb = descr->skb;
- struct net_device *netdev = card->netdev;
- u32 data_status = hwdescr->data_status;
- u32 data_error = hwdescr->data_error;
-
- skb_put(skb, hwdescr->valid_size);
-
- /* the card seems to add 2 bytes of junk in front
- * of the ethernet frame
- */
-#define SPIDER_MISALIGN 2
- skb_pull(skb, SPIDER_MISALIGN);
- skb->protocol = eth_type_trans(skb, netdev);
-
- /* checksum offload */
- skb_checksum_none_assert(skb);
- if (netdev->features & NETIF_F_RXCSUM) {
- if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
- SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
- !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
-
- if (data_status & SPIDER_NET_VLAN_PACKET) {
- /* further enhancements: HW-accel VLAN */
- }
-
- /* update netdevice statistics */
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += skb->len;
-
- /* pass skb up to stack */
- netif_receive_skb(skb);
-}
-
-static void show_rx_chain(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *start= chain->tail;
- struct spider_net_descr *descr= start;
- struct spider_net_hw_descr *hwd = start->hwdescr;
- struct device *dev = &card->netdev->dev;
- u32 curr_desc, next_desc;
- int status;
-
- int tot = 0;
- int cnt = 0;
- int off = start - chain->ring;
- int cstat = hwd->dmac_cmd_status;
-
- dev_info(dev, "Total number of descrs=%d\n",
- chain->num_desc);
- dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
- off, cstat);
-
- curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
- next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
-
- status = cstat;
- do
- {
- hwd = descr->hwdescr;
- off = descr - chain->ring;
- status = hwd->dmac_cmd_status;
-
- if (descr == chain->head)
- dev_info(dev, "Chain head is at %d, head status=0x%x\n",
- off, status);
-
- if (curr_desc == descr->bus_addr)
- dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
- off, status);
-
- if (next_desc == descr->bus_addr)
- dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
- off, status);
-
- if (hwd->next_descr_addr == 0)
- dev_info(dev, "chain is cut at %d\n", off);
-
- if (cstat != status) {
- int from = (chain->num_desc + off - cnt) % chain->num_desc;
- int to = (chain->num_desc + off - 1) % chain->num_desc;
- dev_info(dev, "Have %d (from %d to %d) descrs "
- "with stat=0x%08x\n", cnt, from, to, cstat);
- cstat = status;
- cnt = 0;
- }
-
- cnt ++;
- tot ++;
- descr = descr->next;
- } while (descr != start);
-
- dev_info(dev, "Last %d descrs with stat=0x%08x "
- "for a total of %d descrs\n", cnt, cstat, tot);
-
-#ifdef DEBUG
- /* Now dump the whole ring */
- descr = start;
- do
- {
- struct spider_net_hw_descr *hwd = descr->hwdescr;
- status = spider_net_get_descr_status(hwd);
- cnt = descr - chain->ring;
- dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
- cnt, status, descr->skb);
- dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
- descr->bus_addr, hwd->buf_addr, hwd->buf_size);
- dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
- hwd->next_descr_addr, hwd->result_size,
- hwd->valid_size);
- dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
- hwd->dmac_cmd_status, hwd->data_status,
- hwd->data_error);
- dev_info(dev, "\n");
-
- descr = descr->next;
- } while (descr != start);
-#endif
-
-}
-
-/**
- * spider_net_resync_head_ptr - Advance head ptr past empty descrs
- * @card: card structure
- *
- * If the driver fails to keep up and empty the queue, then the
- * hardware wil run out of room to put incoming packets. This
- * will cause the hardware to skip descrs that are full (instead
- * of halting/retrying). Thus, once the driver runs, it wil need
- * to "catch up" to where the hardware chain pointer is at.
- */
-static void spider_net_resync_head_ptr(struct spider_net_card *card)
-{
- unsigned long flags;
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr;
- int i, status;
-
- /* Advance head pointer past any empty descrs */
- descr = chain->head;
- status = spider_net_get_descr_status(descr->hwdescr);
-
- if (status == SPIDER_NET_DESCR_NOT_IN_USE)
- return;
-
- spin_lock_irqsave(&chain->lock, flags);
-
- descr = chain->head;
- status = spider_net_get_descr_status(descr->hwdescr);
- for (i=0; i<chain->num_desc; i++) {
- if (status != SPIDER_NET_DESCR_CARDOWNED) break;
- descr = descr->next;
- status = spider_net_get_descr_status(descr->hwdescr);
- }
- chain->head = descr;
-
- spin_unlock_irqrestore(&chain->lock, flags);
-}
-
-static int spider_net_resync_tail_ptr(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr;
- int i, status;
-
- /* Advance tail pointer past any empty and reaped descrs */
- descr = chain->tail;
- status = spider_net_get_descr_status(descr->hwdescr);
-
- for (i=0; i<chain->num_desc; i++) {
- if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
- (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
- descr = descr->next;
- status = spider_net_get_descr_status(descr->hwdescr);
- }
- chain->tail = descr;
-
- if ((i == chain->num_desc) || (i == 0))
- return 1;
- return 0;
-}
-
-/**
- * spider_net_decode_one_descr - processes an RX descriptor
- * @card: card structure
- *
- * Returns 1 if a packet has been sent to the stack, otherwise 0.
- *
- * Processes an RX descriptor by iommu-unmapping the data buffer
- * and passing the packet up to the stack. This function is called
- * in softirq context, e.g. either bottom half from interrupt or
- * NAPI polling context.
- */
-static int
-spider_net_decode_one_descr(struct spider_net_card *card)
-{
- struct net_device *dev = card->netdev;
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr = chain->tail;
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- u32 hw_buf_addr;
- int status;
-
- status = spider_net_get_descr_status(hwdescr);
-
- /* Nothing in the descriptor, or ring must be empty */
- if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
- (status == SPIDER_NET_DESCR_NOT_IN_USE))
- return 0;
-
- /* descriptor definitively used -- move on tail */
- chain->tail = descr->next;
-
- /* unmap descriptor */
- hw_buf_addr = hwdescr->buf_addr;
- hwdescr->buf_addr = 0xffffffff;
- dma_unmap_single(&card->pdev->dev, hw_buf_addr, SPIDER_NET_MAX_FRAME,
- DMA_FROM_DEVICE);
-
- if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
- (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
- (status == SPIDER_NET_DESCR_FORCE_END) ) {
- if (netif_msg_rx_err(card))
- dev_err(&dev->dev,
- "dropping RX descriptor with state %d\n", status);
- dev->stats.rx_dropped++;
- goto bad_desc;
- }
-
- if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
- (status != SPIDER_NET_DESCR_FRAME_END) ) {
- if (netif_msg_rx_err(card))
- dev_err(&card->netdev->dev,
- "RX descriptor with unknown state %d\n", status);
- card->spider_stats.rx_desc_unk_state++;
- goto bad_desc;
- }
-
- /* The cases we'll throw away the packet immediately */
- if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
- if (netif_msg_rx_err(card))
- dev_err(&card->netdev->dev,
- "error in received descriptor found, "
- "data_status=x%08x, data_error=x%08x\n",
- hwdescr->data_status, hwdescr->data_error);
- goto bad_desc;
- }
-
- if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
- dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
- hwdescr->dmac_cmd_status);
- pr_err("buf_addr=x%08x\n", hw_buf_addr);
- pr_err("buf_size=x%08x\n", hwdescr->buf_size);
- pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
- pr_err("result_size=x%08x\n", hwdescr->result_size);
- pr_err("valid_size=x%08x\n", hwdescr->valid_size);
- pr_err("data_status=x%08x\n", hwdescr->data_status);
- pr_err("data_error=x%08x\n", hwdescr->data_error);
- pr_err("which=%ld\n", descr - card->rx_chain.ring);
-
- card->spider_stats.rx_desc_error++;
- goto bad_desc;
- }
-
- /* Ok, we've got a packet in descr */
- spider_net_pass_skb_up(descr, card);
- descr->skb = NULL;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- return 1;
-
-bad_desc:
- if (netif_msg_rx_err(card))
- show_rx_chain(card);
- dev_kfree_skb_irq(descr->skb);
- descr->skb = NULL;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- return 0;
-}
-
-/**
- * spider_net_poll - NAPI poll function called by the stack to return packets
- * @napi: napi device structure
- * @budget: number of packets we can pass to the stack at most
- *
- * returns 0 if no more packets available to the driver/stack. Returns 1,
- * if the quota is exceeded, but the driver has still packets.
- *
- * spider_net_poll returns all packets from the rx descriptors to the stack
- * (using netif_receive_skb). If all/enough packets are up, the driver
- * reenables interrupts and returns 0. If not, 1 is returned.
- */
-static int spider_net_poll(struct napi_struct *napi, int budget)
-{
- struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
- int packets_done = 0;
-
- while (packets_done < budget) {
- if (!spider_net_decode_one_descr(card))
- break;
-
- packets_done++;
- }
-
- if ((packets_done == 0) && (card->num_rx_ints != 0)) {
- if (!spider_net_resync_tail_ptr(card))
- packets_done = budget;
- spider_net_resync_head_ptr(card);
- }
- card->num_rx_ints = 0;
-
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
-
- spider_net_cleanup_tx_ring(&card->tx_timer);
-
- /* if all packets are in the stack, enable interrupts and return 0 */
- /* if not, return 1 */
- if (packets_done < budget) {
- napi_complete_done(napi, packets_done);
- spider_net_rx_irq_on(card);
- card->ignore_rx_ramfull = 0;
- }
-
- return packets_done;
-}
-
-/**
- * spider_net_set_mac - sets the MAC of an interface
- * @netdev: interface device structure
- * @p: pointer to new MAC address
- *
- * Returns 0 on success, <0 on failure. Currently, we don't support this
- * and will always return EOPNOTSUPP.
- */
-static int
-spider_net_set_mac(struct net_device *netdev, void *p)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 macl, macu, regvalue;
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(netdev, addr->sa_data);
-
- /* switch off GMACTPE and GMACRPE */
- regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
- regvalue &= ~((1 << 5) | (1 << 6));
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
-
- /* write mac */
- macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) +
- (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]);
- macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]);
- spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
- spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
-
- /* switch GMACTPE and GMACRPE back on */
- regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
- regvalue |= ((1 << 5) | (1 << 6));
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
-
- spider_net_set_promisc(card);
-
- return 0;
-}
-
-/**
- * spider_net_link_reset
- * @netdev: net device structure
- *
- * This is called when the PHY_LINK signal is asserted. For the blade this is
- * not connected so we should never get here.
- *
- */
-static void
-spider_net_link_reset(struct net_device *netdev)
-{
-
- struct spider_net_card *card = netdev_priv(netdev);
-
- del_timer_sync(&card->aneg_timer);
-
- /* clear interrupt, block further interrupts */
- spider_net_write_reg(card, SPIDER_NET_GMACST,
- spider_net_read_reg(card, SPIDER_NET_GMACST));
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
-
- /* reset phy and setup aneg */
- card->aneg_count = 0;
- card->medium = BCM54XX_COPPER;
- spider_net_setup_aneg(card);
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
-
-}
-
-/**
- * spider_net_handle_error_irq - handles errors raised by an interrupt
- * @card: card structure
- * @status_reg: interrupt status register 0 (GHIINT0STS)
- * @error_reg1: interrupt status register 1 (GHIINT1STS)
- * @error_reg2: interrupt status register 2 (GHIINT2STS)
- *
- * spider_net_handle_error_irq treats or ignores all error conditions
- * found when an interrupt is presented
- */
-static void
-spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
- u32 error_reg1, u32 error_reg2)
-{
- u32 i;
- int show_error = 1;
-
- /* check GHIINT0STS ************************************/
- if (status_reg)
- for (i = 0; i < 32; i++)
- if (status_reg & (1<<i))
- switch (i)
- {
- /* let error_reg1 and error_reg2 evaluation decide, what to do
- case SPIDER_NET_PHYINT:
- case SPIDER_NET_GMAC2INT:
- case SPIDER_NET_GMAC1INT:
- case SPIDER_NET_GFIFOINT:
- case SPIDER_NET_DMACINT:
- case SPIDER_NET_GSYSINT:
- break; */
-
- case SPIDER_NET_GIPSINT:
- show_error = 0;
- break;
-
- case SPIDER_NET_GPWOPCMPINT:
- /* PHY write operation completed */
- show_error = 0;
- break;
- case SPIDER_NET_GPROPCMPINT:
- /* PHY read operation completed */
- /* we don't use semaphores, as we poll for the completion
- * of the read operation in spider_net_read_phy. Should take
- * about 50 us
- */
- show_error = 0;
- break;
- case SPIDER_NET_GPWFFINT:
- /* PHY command queue full */
- if (netif_msg_intr(card))
- dev_err(&card->netdev->dev, "PHY write queue full\n");
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GRMDADRINT: not used. print a message */
- /* case SPIDER_NET_GRMARPINT: not used. print a message */
- /* case SPIDER_NET_GRMMPINT: not used. print a message */
-
- case SPIDER_NET_GDTDEN0INT:
- /* someone has set TX_DMA_EN to 0 */
- show_error = 0;
- break;
-
- case SPIDER_NET_GDDDEN0INT:
- case SPIDER_NET_GDCDEN0INT:
- case SPIDER_NET_GDBDEN0INT:
- case SPIDER_NET_GDADEN0INT:
- /* someone has set RX_DMA_EN to 0 */
- show_error = 0;
- break;
-
- /* RX interrupts */
- case SPIDER_NET_GDDFDCINT:
- case SPIDER_NET_GDCFDCINT:
- case SPIDER_NET_GDBFDCINT:
- case SPIDER_NET_GDAFDCINT:
- /* case SPIDER_NET_GDNMINT: not used. print a message */
- /* case SPIDER_NET_GCNMINT: not used. print a message */
- /* case SPIDER_NET_GBNMINT: not used. print a message */
- /* case SPIDER_NET_GANMINT: not used. print a message */
- /* case SPIDER_NET_GRFNMINT: not used. print a message */
- show_error = 0;
- break;
-
- /* TX interrupts */
- case SPIDER_NET_GDTFDCINT:
- show_error = 0;
- break;
- case SPIDER_NET_GTTEDINT:
- show_error = 0;
- break;
- case SPIDER_NET_GDTDCEINT:
- /* chain end. If a descriptor should be sent, kick off
- * tx dma
- if (card->tx_chain.tail != card->tx_chain.head)
- spider_net_kick_tx_dma(card);
- */
- show_error = 0;
- break;
-
- /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
- /* case SPIDER_NET_GFREECNTINT: not used. print a message */
- }
-
- /* check GHIINT1STS ************************************/
- if (error_reg1)
- for (i = 0; i < 32; i++)
- if (error_reg1 & (1<<i))
- switch (i)
- {
- case SPIDER_NET_GTMFLLINT:
- /* TX RAM full may happen on a usual case.
- * Logging is not needed.
- */
- show_error = 0;
- break;
- case SPIDER_NET_GRFDFLLINT:
- case SPIDER_NET_GRFCFLLINT:
- case SPIDER_NET_GRFBFLLINT:
- case SPIDER_NET_GRFAFLLINT:
- case SPIDER_NET_GRMFLLINT:
- /* Could happen when rx chain is full */
- if (card->ignore_rx_ramfull == 0) {
- card->ignore_rx_ramfull = 1;
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- }
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GTMSHTINT: problem, print a message */
- case SPIDER_NET_GDTINVDINT:
- /* allrighty. tx from previous descr ok */
- show_error = 0;
- break;
-
- /* chain end */
- case SPIDER_NET_GDDDCEINT:
- case SPIDER_NET_GDCDCEINT:
- case SPIDER_NET_GDBDCEINT:
- case SPIDER_NET_GDADCEINT:
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- show_error = 0;
- break;
-
- /* invalid descriptor */
- case SPIDER_NET_GDDINVDINT:
- case SPIDER_NET_GDCINVDINT:
- case SPIDER_NET_GDBINVDINT:
- case SPIDER_NET_GDAINVDINT:
- /* Could happen when rx chain is full */
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GDTRSERINT: problem, print a message */
- /* case SPIDER_NET_GDDRSERINT: problem, print a message */
- /* case SPIDER_NET_GDCRSERINT: problem, print a message */
- /* case SPIDER_NET_GDBRSERINT: problem, print a message */
- /* case SPIDER_NET_GDARSERINT: problem, print a message */
- /* case SPIDER_NET_GDSERINT: problem, print a message */
- /* case SPIDER_NET_GDTPTERINT: problem, print a message */
- /* case SPIDER_NET_GDDPTERINT: problem, print a message */
- /* case SPIDER_NET_GDCPTERINT: problem, print a message */
- /* case SPIDER_NET_GDBPTERINT: problem, print a message */
- /* case SPIDER_NET_GDAPTERINT: problem, print a message */
- default:
- show_error = 1;
- break;
- }
-
- /* check GHIINT2STS ************************************/
- if (error_reg2)
- for (i = 0; i < 32; i++)
- if (error_reg2 & (1<<i))
- switch (i)
- {
- /* there is nothing we can (want to) do at this time. Log a
- * message, we can switch on and off the specific values later on
- case SPIDER_NET_GPROPERINT:
- case SPIDER_NET_GMCTCRSNGINT:
- case SPIDER_NET_GMCTLCOLINT:
- case SPIDER_NET_GMCTTMOTINT:
- case SPIDER_NET_GMCRCAERINT:
- case SPIDER_NET_GMCRCALERINT:
- case SPIDER_NET_GMCRALNERINT:
- case SPIDER_NET_GMCROVRINT:
- case SPIDER_NET_GMCRRNTINT:
- case SPIDER_NET_GMCRRXERINT:
- case SPIDER_NET_GTITCSERINT:
- case SPIDER_NET_GTIFMTERINT:
- case SPIDER_NET_GTIPKTRVKINT:
- case SPIDER_NET_GTISPINGINT:
- case SPIDER_NET_GTISADNGINT:
- case SPIDER_NET_GTISPDNGINT:
- case SPIDER_NET_GRIFMTERINT:
- case SPIDER_NET_GRIPKTRVKINT:
- case SPIDER_NET_GRISPINGINT:
- case SPIDER_NET_GRISADNGINT:
- case SPIDER_NET_GRISPDNGINT:
- break;
- */
- default:
- break;
- }
-
- if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
- dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
- "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
- status_reg, error_reg1, error_reg2);
-
- /* clear interrupt sources */
- spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
-}
-
-/**
- * spider_net_interrupt - interrupt handler for spider_net
- * @irq: interrupt number
- * @ptr: pointer to net_device
- *
- * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
- * interrupt found raised by card.
- *
- * This is the interrupt handler, that turns off
- * interrupts for this device and makes the stack poll the driver
- */
-static irqreturn_t
-spider_net_interrupt(int irq, void *ptr)
-{
- struct net_device *netdev = ptr;
- struct spider_net_card *card = netdev_priv(netdev);
- u32 status_reg, error_reg1, error_reg2;
-
- status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
- error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
- error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
-
- if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
- !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
- !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
- return IRQ_NONE;
-
- if (status_reg & SPIDER_NET_RXINT ) {
- spider_net_rx_irq_off(card);
- napi_schedule(&card->napi);
- card->num_rx_ints ++;
- }
- if (status_reg & SPIDER_NET_TXINT)
- napi_schedule(&card->napi);
-
- if (status_reg & SPIDER_NET_LINKINT)
- spider_net_link_reset(netdev);
-
- if (status_reg & SPIDER_NET_ERRINT )
- spider_net_handle_error_irq(card, status_reg,
- error_reg1, error_reg2);
-
- /* clear interrupt sources */
- spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * spider_net_poll_controller - artificial interrupt for netconsole etc.
- * @netdev: interface device structure
- *
- * see Documentation/networking/netconsole.rst
- */
-static void
-spider_net_poll_controller(struct net_device *netdev)
-{
- disable_irq(netdev->irq);
- spider_net_interrupt(netdev->irq, netdev);
- enable_irq(netdev->irq);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
-/**
- * spider_net_enable_interrupts - enable interrupts
- * @card: card structure
- *
- * spider_net_enable_interrupt enables several interrupts
- */
-static void
-spider_net_enable_interrupts(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
- SPIDER_NET_INT0_MASK_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
- SPIDER_NET_INT1_MASK_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
- SPIDER_NET_INT2_MASK_VALUE);
-}
-
-/**
- * spider_net_disable_interrupts - disable interrupts
- * @card: card structure
- *
- * spider_net_disable_interrupts disables all the interrupts
- */
-static void
-spider_net_disable_interrupts(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
-}
-
-/**
- * spider_net_init_card - initializes the card
- * @card: card structure
- *
- * spider_net_init_card initializes the card so that other registers can
- * be used
- */
-static void
-spider_net_init_card(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- /* trigger ETOMOD signal */
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
- spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
-
- spider_net_disable_interrupts(card);
-}
-
-/**
- * spider_net_enable_card - enables the card by setting all kinds of regs
- * @card: card structure
- *
- * spider_net_enable_card sets a lot of SMMIO registers to enable the device
- */
-static void
-spider_net_enable_card(struct spider_net_card *card)
-{
- int i;
- /* the following array consists of (register),(value) pairs
- * that are set in this function. A register of 0 ends the list
- */
- u32 regs[][2] = {
- { SPIDER_NET_GRESUMINTNUM, 0 },
- { SPIDER_NET_GREINTNUM, 0 },
-
- /* set interrupt frame number registers */
- /* clear the single DMA engine registers first */
- { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- /* then set, what we really need */
- { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
-
- /* timer counter registers and stuff */
- { SPIDER_NET_GFREECNNUM, 0 },
- { SPIDER_NET_GONETIMENUM, 0 },
- { SPIDER_NET_GTOUTFRMNUM, 0 },
-
- /* RX mode setting */
- { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
- /* TX mode setting */
- { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
- /* IPSEC mode setting */
- { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
-
- { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
-
- { SPIDER_NET_GMRWOLCTRL, 0 },
- { SPIDER_NET_GTESTMD, 0x10000000 },
- { SPIDER_NET_GTTQMSK, 0x00400040 },
-
- { SPIDER_NET_GMACINTEN, 0 },
-
- /* flow control stuff */
- { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
- { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
-
- { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
- { 0, 0}
- };
-
- i = 0;
- while (regs[i][0]) {
- spider_net_write_reg(card, regs[i][0], regs[i][1]);
- i++;
- }
-
- /* clear unicast filter table entries 1 to 14 */
- for (i = 1; i <= 14; i++) {
- spider_net_write_reg(card,
- SPIDER_NET_GMRUAFILnR + i * 8,
- 0x00080000);
- spider_net_write_reg(card,
- SPIDER_NET_GMRUAFILnR + i * 8 + 4,
- 0x00000000);
- }
-
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
-
- spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
-
- /* set chain tail address for RX chains and
- * enable DMA
- */
- spider_net_enable_rxchtails(card);
- spider_net_enable_rxdmac(card);
-
- spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
- SPIDER_NET_LENLMT_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
- SPIDER_NET_OPMODE_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_GDTBSTA);
-}
-
-/**
- * spider_net_download_firmware - loads firmware into the adapter
- * @card: card structure
- * @firmware_ptr: pointer to firmware data
- *
- * spider_net_download_firmware loads the firmware data into the
- * adapter. It assumes the length etc. to be allright.
- */
-static int
-spider_net_download_firmware(struct spider_net_card *card,
- const void *firmware_ptr)
-{
- int sequencer, i;
- const u32 *fw_ptr = firmware_ptr;
-
- /* stop sequencers */
- spider_net_write_reg(card, SPIDER_NET_GSINIT,
- SPIDER_NET_STOP_SEQ_VALUE);
-
- for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
- sequencer++) {
- spider_net_write_reg(card,
- SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
- sequencer * 8, *fw_ptr);
- fw_ptr++;
- }
- }
-
- if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
- return -EIO;
-
- spider_net_write_reg(card, SPIDER_NET_GSINIT,
- SPIDER_NET_RUN_SEQ_VALUE);
-
- return 0;
-}
-
-/**
- * spider_net_init_firmware - reads in firmware parts
- * @card: card structure
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_init_firmware opens the sequencer firmware and does some basic
- * checks. This function opens and releases the firmware structure. A call
- * to download the firmware is performed before the release.
- *
- * Firmware format
- * ===============
- * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
- * the program for each sequencer. Use the command
- * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
- * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
- * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
- *
- * to generate spider_fw.bin, if you have sequencer programs with something
- * like the following contents for each sequencer:
- * <ONE LINE COMMENT>
- * <FIRST 4-BYTES-WORD FOR SEQUENCER>
- * <SECOND 4-BYTES-WORD FOR SEQUENCER>
- * ...
- * <1024th 4-BYTES-WORD FOR SEQUENCER>
- */
-static int
-spider_net_init_firmware(struct spider_net_card *card)
-{
- struct firmware *firmware = NULL;
- struct device_node *dn;
- const u8 *fw_prop = NULL;
- int err = -ENOENT;
- int fw_size;
-
- if (request_firmware((const struct firmware **)&firmware,
- SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
- if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
- netif_msg_probe(card) ) {
- dev_err(&card->netdev->dev,
- "Incorrect size of spidernet firmware in " \
- "filesystem. Looking in host firmware...\n");
- goto try_host_fw;
- }
- err = spider_net_download_firmware(card, firmware->data);
-
- release_firmware(firmware);
- if (err)
- goto try_host_fw;
-
- goto done;
- }
-
-try_host_fw:
- dn = pci_device_to_OF_node(card->pdev);
- if (!dn)
- goto out_err;
-
- fw_prop = of_get_property(dn, "firmware", &fw_size);
- if (!fw_prop)
- goto out_err;
-
- if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
- netif_msg_probe(card) ) {
- dev_err(&card->netdev->dev,
- "Incorrect size of spidernet firmware in host firmware\n");
- goto done;
- }
-
- err = spider_net_download_firmware(card, fw_prop);
-
-done:
- return err;
-out_err:
- if (netif_msg_probe(card))
- dev_err(&card->netdev->dev,
- "Couldn't find spidernet firmware in filesystem " \
- "or host firmware\n");
- return err;
-}
-
-/**
- * spider_net_open - called upon ifonfig up
- * @netdev: interface device structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_open allocates all the descriptors and memory needed for
- * operation, sets up multicast list and enables interrupts
- */
-int
-spider_net_open(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- int result;
-
- result = spider_net_init_firmware(card);
- if (result)
- goto init_firmware_failed;
-
- /* start probing with copper */
- card->aneg_count = 0;
- card->medium = BCM54XX_COPPER;
- spider_net_setup_aneg(card);
- if (card->phy.def->phy_id)
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
-
- result = spider_net_init_chain(card, &card->tx_chain);
- if (result)
- goto alloc_tx_failed;
- card->low_watermark = NULL;
-
- result = spider_net_init_chain(card, &card->rx_chain);
- if (result)
- goto alloc_rx_failed;
-
- /* Allocate rx skbs */
- result = spider_net_alloc_rx_skbs(card);
- if (result)
- goto alloc_skbs_failed;
-
- spider_net_set_multi(netdev);
-
- /* further enhancement: setup hw vlan, if needed */
-
- result = -EBUSY;
- if (request_irq(netdev->irq, spider_net_interrupt,
- IRQF_SHARED, netdev->name, netdev))
- goto register_int_failed;
-
- spider_net_enable_card(card);
-
- netif_start_queue(netdev);
- netif_carrier_on(netdev);
- napi_enable(&card->napi);
-
- spider_net_enable_interrupts(card);
-
- return 0;
-
-register_int_failed:
- spider_net_free_rx_chain_contents(card);
-alloc_skbs_failed:
- spider_net_free_chain(card, &card->rx_chain);
-alloc_rx_failed:
- spider_net_free_chain(card, &card->tx_chain);
-alloc_tx_failed:
- del_timer_sync(&card->aneg_timer);
-init_firmware_failed:
- return result;
-}
-
-/**
- * spider_net_link_phy
- * @t: timer context used to obtain the pointer to net card data structure
- */
-static void spider_net_link_phy(struct timer_list *t)
-{
- struct spider_net_card *card = from_timer(card, t, aneg_timer);
- struct mii_phy *phy = &card->phy;
-
- /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
- if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
-
- pr_debug("%s: link is down trying to bring it up\n",
- card->netdev->name);
-
- switch (card->medium) {
- case BCM54XX_COPPER:
- /* enable fiber with autonegotiation first */
- if (phy->def->ops->enable_fiber)
- phy->def->ops->enable_fiber(phy, 1);
- card->medium = BCM54XX_FIBER;
- break;
-
- case BCM54XX_FIBER:
- /* fiber didn't come up, try to disable fiber autoneg */
- if (phy->def->ops->enable_fiber)
- phy->def->ops->enable_fiber(phy, 0);
- card->medium = BCM54XX_UNKNOWN;
- break;
-
- case BCM54XX_UNKNOWN:
- /* copper, fiber with and without failed,
- * retry from beginning
- */
- spider_net_setup_aneg(card);
- card->medium = BCM54XX_COPPER;
- break;
- }
-
- card->aneg_count = 0;
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
- return;
- }
-
- /* link still not up, try again later */
- if (!(phy->def->ops->poll_link(phy))) {
- card->aneg_count++;
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
- return;
- }
-
- /* link came up, get abilities */
- phy->def->ops->read_link(phy);
-
- spider_net_write_reg(card, SPIDER_NET_GMACST,
- spider_net_read_reg(card, SPIDER_NET_GMACST));
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
-
- if (phy->speed == 1000)
- spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
- else
- spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
-
- card->aneg_count = 0;
-
- pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
- card->netdev->name, phy->speed,
- phy->duplex == 1 ? "Full" : "Half",
- phy->autoneg == 1 ? "" : "no ");
-}
-
-/**
- * spider_net_setup_phy - setup PHY
- * @card: card structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_setup_phy is used as part of spider_net_probe.
- **/
-static int
-spider_net_setup_phy(struct spider_net_card *card)
-{
- struct mii_phy *phy = &card->phy;
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
- SPIDER_NET_DMASEL_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
- SPIDER_NET_PHY_CTRL_VALUE);
-
- phy->dev = card->netdev;
- phy->mdio_read = spider_net_read_phy;
- phy->mdio_write = spider_net_write_phy;
-
- for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
- unsigned short id;
- id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
- if (id != 0x0000 && id != 0xffff) {
- if (!sungem_phy_probe(phy, phy->mii_id)) {
- pr_info("Found %s.\n", phy->def->name);
- break;
- }
- }
- }
-
- return 0;
-}
-
-/**
- * spider_net_workaround_rxramfull - work around firmware bug
- * @card: card structure
- *
- * no return value
- **/
-static void
-spider_net_workaround_rxramfull(struct spider_net_card *card)
-{
- int i, sequencer = 0;
-
- /* cancel reset */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- /* empty sequencer data */
- for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
- sequencer++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
- sequencer * 8, 0x0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
- sequencer * 8, 0x0);
- }
- }
-
- /* set sequencer operation */
- spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
-
- /* reset */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
-}
-
-/**
- * spider_net_stop - called upon ifconfig down
- * @netdev: interface device structure
- *
- * always returns 0
- */
-int
-spider_net_stop(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- napi_disable(&card->napi);
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- del_timer_sync(&card->tx_timer);
- del_timer_sync(&card->aneg_timer);
-
- spider_net_disable_interrupts(card);
-
- free_irq(netdev->irq, netdev);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_FEND_VALUE);
-
- /* turn off DMA, force end */
- spider_net_disable_rxdmac(card);
-
- /* release chains */
- spider_net_release_tx_chain(card, 1);
- spider_net_free_rx_chain_contents(card);
-
- spider_net_free_chain(card, &card->tx_chain);
- spider_net_free_chain(card, &card->rx_chain);
-
- return 0;
-}
-
-/**
- * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
- * function (to be called not under interrupt status)
- * @work: work context used to obtain the pointer to net card data structure
- *
- * called as task when tx hangs, resets interface (if interface is up)
- */
-static void
-spider_net_tx_timeout_task(struct work_struct *work)
-{
- struct spider_net_card *card =
- container_of(work, struct spider_net_card, tx_timeout_task);
- struct net_device *netdev = card->netdev;
-
- if (!(netdev->flags & IFF_UP))
- goto out;
-
- netif_device_detach(netdev);
- spider_net_stop(netdev);
-
- spider_net_workaround_rxramfull(card);
- spider_net_init_card(card);
-
- if (spider_net_setup_phy(card))
- goto out;
-
- spider_net_open(netdev);
- spider_net_kick_tx_dma(card);
- netif_device_attach(netdev);
-
-out:
- atomic_dec(&card->tx_timeout_task_counter);
-}
-
-/**
- * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
- * @netdev: interface device structure
- * @txqueue: unused
- *
- * called, if tx hangs. Schedules a task that resets the interface
- */
-static void
-spider_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
-{
- struct spider_net_card *card;
-
- card = netdev_priv(netdev);
- atomic_inc(&card->tx_timeout_task_counter);
- if (netdev->flags & IFF_UP)
- schedule_work(&card->tx_timeout_task);
- else
- atomic_dec(&card->tx_timeout_task_counter);
- card->spider_stats.tx_timeouts++;
-}
-
-static const struct net_device_ops spider_net_ops = {
- .ndo_open = spider_net_open,
- .ndo_stop = spider_net_stop,
- .ndo_start_xmit = spider_net_xmit,
- .ndo_set_rx_mode = spider_net_set_multi,
- .ndo_set_mac_address = spider_net_set_mac,
- .ndo_eth_ioctl = spider_net_do_ioctl,
- .ndo_tx_timeout = spider_net_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- /* HW VLAN */
-#ifdef CONFIG_NET_POLL_CONTROLLER
- /* poll controller */
- .ndo_poll_controller = spider_net_poll_controller,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-};
-
-/**
- * spider_net_setup_netdev_ops - initialization of net_device operations
- * @netdev: net_device structure
- *
- * fills out function pointers in the net_device structure
- */
-static void
-spider_net_setup_netdev_ops(struct net_device *netdev)
-{
- netdev->netdev_ops = &spider_net_ops;
- netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
- /* ethtool ops */
- netdev->ethtool_ops = &spider_net_ethtool_ops;
-}
-
-/**
- * spider_net_setup_netdev - initialization of net_device
- * @card: card structure
- *
- * Returns 0 on success or <0 on failure
- *
- * spider_net_setup_netdev initializes the net_device structure
- **/
-static int
-spider_net_setup_netdev(struct spider_net_card *card)
-{
- int result;
- struct net_device *netdev = card->netdev;
- struct device_node *dn;
- struct sockaddr addr;
- const u8 *mac;
-
- SET_NETDEV_DEV(netdev, &card->pdev->dev);
-
- pci_set_drvdata(card->pdev, netdev);
-
- timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
- netdev->irq = card->pdev->irq;
-
- card->aneg_count = 0;
- timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
-
- netif_napi_add(netdev, &card->napi, spider_net_poll);
-
- spider_net_setup_netdev_ops(netdev);
-
- netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
- if (SPIDER_NET_RX_CSUM_DEFAULT)
- netdev->features |= NETIF_F_RXCSUM;
- netdev->features |= NETIF_F_IP_CSUM;
- /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- * NETIF_F_HW_VLAN_CTAG_FILTER
- */
- netdev->lltx = true;
-
- /* MTU range: 64 - 2294 */
- netdev->min_mtu = SPIDER_NET_MIN_MTU;
- netdev->max_mtu = SPIDER_NET_MAX_MTU;
-
- netdev->irq = card->pdev->irq;
- card->num_rx_ints = 0;
- card->ignore_rx_ramfull = 0;
-
- dn = pci_device_to_OF_node(card->pdev);
- if (!dn)
- return -EIO;
-
- mac = of_get_property(dn, "local-mac-address", NULL);
- if (!mac)
- return -EIO;
- memcpy(addr.sa_data, mac, ETH_ALEN);
-
- result = spider_net_set_mac(netdev, &addr);
- if ((result) && (netif_msg_probe(card)))
- dev_err(&card->netdev->dev,
- "Failed to set MAC address: %i\n", result);
-
- result = register_netdev(netdev);
- if (result) {
- if (netif_msg_probe(card))
- dev_err(&card->netdev->dev,
- "Couldn't register net_device: %i\n", result);
- return result;
- }
-
- if (netif_msg_probe(card))
- pr_info("Initialized device %s.\n", netdev->name);
-
- return 0;
-}
-
-/**
- * spider_net_alloc_card - allocates net_device and card structure
- *
- * returns the card structure or NULL in case of errors
- *
- * the card and net_device structures are linked to each other
- */
-static struct spider_net_card *
-spider_net_alloc_card(void)
-{
- struct net_device *netdev;
- struct spider_net_card *card;
-
- netdev = alloc_etherdev(struct_size(card, darray,
- size_add(tx_descriptors, rx_descriptors)));
- if (!netdev)
- return NULL;
-
- card = netdev_priv(netdev);
- card->netdev = netdev;
- card->msg_enable = SPIDER_NET_DEFAULT_MSG;
- INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
- init_waitqueue_head(&card->waitq);
- atomic_set(&card->tx_timeout_task_counter, 0);
-
- card->rx_chain.num_desc = rx_descriptors;
- card->rx_chain.ring = card->darray;
- card->tx_chain.num_desc = tx_descriptors;
- card->tx_chain.ring = card->darray + rx_descriptors;
-
- return card;
-}
-
-/**
- * spider_net_undo_pci_setup - releases PCI ressources
- * @card: card structure
- *
- * spider_net_undo_pci_setup releases the mapped regions
- */
-static void
-spider_net_undo_pci_setup(struct spider_net_card *card)
-{
- iounmap(card->regs);
- pci_release_regions(card->pdev);
-}
-
-/**
- * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
- * @pdev: PCI device
- *
- * Returns the card structure or NULL if any errors occur
- *
- * spider_net_setup_pci_dev initializes pdev and together with the
- * functions called in spider_net_open configures the device so that
- * data can be transferred over it
- * The net_device structure is attached to the card structure, if the
- * function returns without error.
- **/
-static struct spider_net_card *
-spider_net_setup_pci_dev(struct pci_dev *pdev)
-{
- struct spider_net_card *card;
- unsigned long mmio_start, mmio_len;
-
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "Couldn't enable PCI device\n");
- return NULL;
- }
-
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev,
- "Couldn't find proper PCI device base address.\n");
- goto out_disable_dev;
- }
-
- if (pci_request_regions(pdev, spider_net_driver_name)) {
- dev_err(&pdev->dev,
- "Couldn't obtain PCI resources, aborting.\n");
- goto out_disable_dev;
- }
-
- pci_set_master(pdev);
-
- card = spider_net_alloc_card();
- if (!card) {
- dev_err(&pdev->dev,
- "Couldn't allocate net_device structure, aborting.\n");
- goto out_release_regions;
- }
- card->pdev = pdev;
-
- /* fetch base address and length of first resource */
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
-
- card->netdev->mem_start = mmio_start;
- card->netdev->mem_end = mmio_start + mmio_len;
- card->regs = ioremap(mmio_start, mmio_len);
-
- if (!card->regs) {
- dev_err(&pdev->dev,
- "Couldn't obtain PCI resources, aborting.\n");
- goto out_release_regions;
- }
-
- return card;
-
-out_release_regions:
- pci_release_regions(pdev);
-out_disable_dev:
- pci_disable_device(pdev);
- return NULL;
-}
-
-/**
- * spider_net_probe - initialization of a device
- * @pdev: PCI device
- * @ent: entry in the device id list
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_probe initializes pdev and registers a net_device
- * structure for it. After that, the device can be ifconfig'ed up
- **/
-static int
-spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = -EIO;
- struct spider_net_card *card;
-
- card = spider_net_setup_pci_dev(pdev);
- if (!card)
- goto out;
-
- spider_net_workaround_rxramfull(card);
- spider_net_init_card(card);
-
- err = spider_net_setup_phy(card);
- if (err)
- goto out_undo_pci;
-
- err = spider_net_setup_netdev(card);
- if (err)
- goto out_undo_pci;
-
- return 0;
-
-out_undo_pci:
- spider_net_undo_pci_setup(card);
- free_netdev(card->netdev);
-out:
- return err;
-}
-
-/**
- * spider_net_remove - removal of a device
- * @pdev: PCI device
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_remove is called to remove the device and unregisters the
- * net_device
- **/
-static void
-spider_net_remove(struct pci_dev *pdev)
-{
- struct net_device *netdev;
- struct spider_net_card *card;
-
- netdev = pci_get_drvdata(pdev);
- card = netdev_priv(netdev);
-
- wait_event(card->waitq,
- atomic_read(&card->tx_timeout_task_counter) == 0);
-
- unregister_netdev(netdev);
-
- /* switch off card */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- spider_net_undo_pci_setup(card);
- free_netdev(netdev);
-}
-
-static struct pci_driver spider_net_driver = {
- .name = spider_net_driver_name,
- .id_table = spider_net_pci_tbl,
- .probe = spider_net_probe,
- .remove = spider_net_remove
-};
-
-/**
- * spider_net_init - init function when the driver is loaded
- *
- * spider_net_init registers the device driver
- */
-static int __init spider_net_init(void)
-{
- printk(KERN_INFO "Spidernet version %s.\n", VERSION);
-
- if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
- rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
- pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
- }
- if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
- rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
- pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
- }
- if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
- tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
- pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
- }
- if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
- tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
- pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
- }
-
- return pci_register_driver(&spider_net_driver);
-}
-
-/**
- * spider_net_cleanup - exit function when driver is unloaded
- *
- * spider_net_cleanup unregisters the device driver
- */
-static void __exit spider_net_cleanup(void)
-{
- pci_unregister_driver(&spider_net_driver);
-}
-
-module_init(spider_net_init);
-module_exit(spider_net_cleanup);
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
deleted file mode 100644
index 51948e2b3a34..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ /dev/null
@@ -1,475 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Network device driver for Cell Processor-Based Blade and Celleb platform
- *
- * (C) Copyright IBM Corp. 2005
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#ifndef _SPIDER_NET_H
-#define _SPIDER_NET_H
-
-#define VERSION "2.0 B"
-
-#include <linux/sungem_phy.h>
-
-int spider_net_stop(struct net_device *netdev);
-int spider_net_open(struct net_device *netdev);
-
-extern const struct ethtool_ops spider_net_ethtool_ops;
-
-extern char spider_net_driver_name[];
-
-#define SPIDER_NET_MAX_FRAME 2312
-#define SPIDER_NET_MAX_MTU 2294
-#define SPIDER_NET_MIN_MTU 64
-
-#define SPIDER_NET_RXBUF_ALIGN 128
-
-#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256
-#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_RX_DESCRIPTORS_MAX 512
-
-#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256
-#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
-
-#define SPIDER_NET_TX_TIMER (HZ/5)
-#define SPIDER_NET_ANEG_TIMER (HZ)
-#define SPIDER_NET_ANEG_TIMEOUT 5
-
-#define SPIDER_NET_RX_CSUM_DEFAULT 1
-
-#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
-
-#define SPIDER_NET_FIRMWARE_SEQS 6
-#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
-#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \
- SPIDER_NET_FIRMWARE_SEQWORDS * \
- sizeof(u32))
-#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
-
-/** spider_net SMMIO registers */
-#define SPIDER_NET_GHIINT0STS 0x00000000
-#define SPIDER_NET_GHIINT1STS 0x00000004
-#define SPIDER_NET_GHIINT2STS 0x00000008
-#define SPIDER_NET_GHIINT0MSK 0x00000010
-#define SPIDER_NET_GHIINT1MSK 0x00000014
-#define SPIDER_NET_GHIINT2MSK 0x00000018
-
-#define SPIDER_NET_GRESUMINTNUM 0x00000020
-#define SPIDER_NET_GREINTNUM 0x00000024
-
-#define SPIDER_NET_GFFRMNUM 0x00000028
-#define SPIDER_NET_GFAFRMNUM 0x0000002c
-#define SPIDER_NET_GFBFRMNUM 0x00000030
-#define SPIDER_NET_GFCFRMNUM 0x00000034
-#define SPIDER_NET_GFDFRMNUM 0x00000038
-
-/* clear them (don't use it) */
-#define SPIDER_NET_GFREECNNUM 0x0000003c
-#define SPIDER_NET_GONETIMENUM 0x00000040
-
-#define SPIDER_NET_GTOUTFRMNUM 0x00000044
-
-#define SPIDER_NET_GTXMDSET 0x00000050
-#define SPIDER_NET_GPCCTRL 0x00000054
-#define SPIDER_NET_GRXMDSET 0x00000058
-#define SPIDER_NET_GIPSECINIT 0x0000005c
-#define SPIDER_NET_GFTRESTRT 0x00000060
-#define SPIDER_NET_GRXDMAEN 0x00000064
-#define SPIDER_NET_GMRWOLCTRL 0x00000068
-#define SPIDER_NET_GPCWOPCMD 0x0000006c
-#define SPIDER_NET_GPCROPCMD 0x00000070
-#define SPIDER_NET_GTTFRMCNT 0x00000078
-#define SPIDER_NET_GTESTMD 0x0000007c
-
-#define SPIDER_NET_GSINIT 0x00000080
-#define SPIDER_NET_GSnPRGADR 0x00000084
-#define SPIDER_NET_GSnPRGDAT 0x00000088
-
-#define SPIDER_NET_GMACOPEMD 0x00000100
-#define SPIDER_NET_GMACLENLMT 0x00000108
-#define SPIDER_NET_GMACST 0x00000110
-#define SPIDER_NET_GMACINTEN 0x00000118
-#define SPIDER_NET_GMACPHYCTRL 0x00000120
-
-#define SPIDER_NET_GMACAPAUSE 0x00000154
-#define SPIDER_NET_GMACTXPAUSE 0x00000164
-
-#define SPIDER_NET_GMACMODE 0x000001b0
-#define SPIDER_NET_GMACBSTLMT 0x000001b4
-
-#define SPIDER_NET_GMACUNIMACU 0x000001c0
-#define SPIDER_NET_GMACUNIMACL 0x000001c8
-
-#define SPIDER_NET_GMRMHFILnR 0x00000400
-#define SPIDER_NET_MULTICAST_HASHES 256
-
-#define SPIDER_NET_GMRUAFILnR 0x00000500
-#define SPIDER_NET_GMRUA0FIL15R 0x00000578
-
-#define SPIDER_NET_GTTQMSK 0x00000934
-
-/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
- * 0x00000b.. for DMA controller B, etc. */
-#define SPIDER_NET_GDADCHA 0x00000a00
-#define SPIDER_NET_GDADMACCNTR 0x00000a04
-#define SPIDER_NET_GDACTDPA 0x00000a08
-#define SPIDER_NET_GDACTDCNT 0x00000a0c
-#define SPIDER_NET_GDACDBADDR 0x00000a20
-#define SPIDER_NET_GDACDBSIZE 0x00000a24
-#define SPIDER_NET_GDACNEXTDA 0x00000a28
-#define SPIDER_NET_GDACCOMST 0x00000a2c
-#define SPIDER_NET_GDAWBCOMST 0x00000a30
-#define SPIDER_NET_GDAWBRSIZE 0x00000a34
-#define SPIDER_NET_GDAWBVSIZE 0x00000a38
-#define SPIDER_NET_GDAWBTRST 0x00000a3c
-#define SPIDER_NET_GDAWBTRERR 0x00000a40
-
-/* TX DMA controller registers */
-#define SPIDER_NET_GDTDCHA 0x00000e00
-#define SPIDER_NET_GDTDMACCNTR 0x00000e04
-#define SPIDER_NET_GDTCDPA 0x00000e08
-#define SPIDER_NET_GDTDMASEL 0x00000e14
-
-#define SPIDER_NET_ECMODE 0x00000f00
-/* clock and reset control register */
-#define SPIDER_NET_CKRCTRL 0x00000ff0
-
-/** SCONFIG registers */
-#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
-
-/** interrupt mask registers */
-#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
-#define SPIDER_NET_INT1_MASK_VALUE 0x0000fff2
-#define SPIDER_NET_INT2_MASK_VALUE 0x000003f1
-
-/* we rely on flagged descriptor interrupts */
-#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
-/* set this first, then the FRAMENUM_VALUE */
-#define SPIDER_NET_GFXFRAMES_VALUE 0x00000000
-
-#define SPIDER_NET_STOP_SEQ_VALUE 0x00000000
-#define SPIDER_NET_RUN_SEQ_VALUE 0x0000007e
-
-#define SPIDER_NET_PHY_CTRL_VALUE 0x00040040
-/* #define SPIDER_NET_PHY_CTRL_VALUE 0x01070080*/
-#define SPIDER_NET_RXMODE_VALUE 0x00000011
-/* auto retransmission in case of MAC aborts */
-#define SPIDER_NET_TXMODE_VALUE 0x00010000
-#define SPIDER_NET_RESTART_VALUE 0x00000000
-#define SPIDER_NET_WOL_VALUE 0x00001111
-#if 0
-#define SPIDER_NET_WOL_VALUE 0x00000000
-#endif
-#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71
-
-/* pause frames: automatic, no upper retransmission count */
-/* outside loopback mode: ETOMOD signal dont matter, not connected */
-/* ETOMOD signal is brought to PHY reset. bit 2 must be 1 in Celleb */
-#define SPIDER_NET_OPMODE_VALUE 0x00000067
-/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/
-#define SPIDER_NET_LENLMT_VALUE 0x00000908
-
-#define SPIDER_NET_MACAPAUSE_VALUE 0x00000800 /* about 1 ms */
-#define SPIDER_NET_TXPAUSE_VALUE 0x00000000
-
-#define SPIDER_NET_MACMODE_VALUE 0x00000001
-#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */
-
-/* DMAC control register GDMACCNTR
- *
- * 1(0) enable r/tx dma
- * 0000000 fixed to 0
- *
- * 000000 fixed to 0
- * 0(1) en/disable descr writeback on force end
- * 0(1) force end
- *
- * 000000 fixed to 0
- * 00 burst alignment: 128 bytes
- * 11 burst alignment: 1024 bytes
- *
- * 00000 fixed to 0
- * 0 descr writeback size 32 bytes
- * 0(1) descr chain end interrupt enable
- * 0(1) descr status writeback enable */
-
-/* to set RX_DMA_EN */
-#define SPIDER_NET_DMA_RX_VALUE 0x80000000
-#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
-/* to set TX_DMA_EN */
-#define SPIDER_NET_TX_DMA_EN 0x80000000
-#define SPIDER_NET_GDTBSTA 0x00000300
-#define SPIDER_NET_GDTDCEIDIS 0x00000002
-#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
- SPIDER_NET_GDTDCEIDIS | \
- SPIDER_NET_GDTBSTA
-
-#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
-
-/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
-#define SPIDER_NET_UA_DESCR_VALUE 0x00080000
-#define SPIDER_NET_PROMISC_VALUE 0x00080000
-#define SPIDER_NET_NONPROMISC_VALUE 0x00000000
-
-#define SPIDER_NET_DMASEL_VALUE 0x00000001
-
-#define SPIDER_NET_ECMODE_VALUE 0x00000000
-
-#define SPIDER_NET_CKRCTRL_RUN_VALUE 0x1fff010f
-#define SPIDER_NET_CKRCTRL_STOP_VALUE 0x0000010f
-
-#define SPIDER_NET_SBIMSTATE_VALUE 0x00000000
-#define SPIDER_NET_SBTMSTATE_VALUE 0x00000000
-
-/* SPIDER_NET_GHIINT0STS bits, in reverse order so that they can be used
- * with 1 << SPIDER_NET_... */
-enum spider_net_int0_status {
- SPIDER_NET_GPHYINT = 0,
- SPIDER_NET_GMAC2INT,
- SPIDER_NET_GMAC1INT,
- SPIDER_NET_GIPSINT,
- SPIDER_NET_GFIFOINT,
- SPIDER_NET_GDMACINT,
- SPIDER_NET_GSYSINT,
- SPIDER_NET_GPWOPCMPINT,
- SPIDER_NET_GPROPCMPINT,
- SPIDER_NET_GPWFFINT,
- SPIDER_NET_GRMDADRINT,
- SPIDER_NET_GRMARPINT,
- SPIDER_NET_GRMMPINT,
- SPIDER_NET_GDTDEN0INT,
- SPIDER_NET_GDDDEN0INT,
- SPIDER_NET_GDCDEN0INT,
- SPIDER_NET_GDBDEN0INT,
- SPIDER_NET_GDADEN0INT,
- SPIDER_NET_GDTFDCINT,
- SPIDER_NET_GDDFDCINT,
- SPIDER_NET_GDCFDCINT,
- SPIDER_NET_GDBFDCINT,
- SPIDER_NET_GDAFDCINT,
- SPIDER_NET_GTTEDINT,
- SPIDER_NET_GDTDCEINT,
- SPIDER_NET_GRFDNMINT,
- SPIDER_NET_GRFCNMINT,
- SPIDER_NET_GRFBNMINT,
- SPIDER_NET_GRFANMINT,
- SPIDER_NET_GRFNMINT,
- SPIDER_NET_G1TMCNTINT,
- SPIDER_NET_GFREECNTINT
-};
-/* GHIINT1STS bits */
-enum spider_net_int1_status {
- SPIDER_NET_GTMFLLINT = 0,
- SPIDER_NET_GRMFLLINT,
- SPIDER_NET_GTMSHTINT,
- SPIDER_NET_GDTINVDINT,
- SPIDER_NET_GRFDFLLINT,
- SPIDER_NET_GDDDCEINT,
- SPIDER_NET_GDDINVDINT,
- SPIDER_NET_GRFCFLLINT,
- SPIDER_NET_GDCDCEINT,
- SPIDER_NET_GDCINVDINT,
- SPIDER_NET_GRFBFLLINT,
- SPIDER_NET_GDBDCEINT,
- SPIDER_NET_GDBINVDINT,
- SPIDER_NET_GRFAFLLINT,
- SPIDER_NET_GDADCEINT,
- SPIDER_NET_GDAINVDINT,
- SPIDER_NET_GDTRSERINT,
- SPIDER_NET_GDDRSERINT,
- SPIDER_NET_GDCRSERINT,
- SPIDER_NET_GDBRSERINT,
- SPIDER_NET_GDARSERINT,
- SPIDER_NET_GDSERINT,
- SPIDER_NET_GDTPTERINT,
- SPIDER_NET_GDDPTERINT,
- SPIDER_NET_GDCPTERINT,
- SPIDER_NET_GDBPTERINT,
- SPIDER_NET_GDAPTERINT
-};
-/* GHIINT2STS bits */
-enum spider_net_int2_status {
- SPIDER_NET_GPROPERINT = 0,
- SPIDER_NET_GMCTCRSNGINT,
- SPIDER_NET_GMCTLCOLINT,
- SPIDER_NET_GMCTTMOTINT,
- SPIDER_NET_GMCRCAERINT,
- SPIDER_NET_GMCRCALERINT,
- SPIDER_NET_GMCRALNERINT,
- SPIDER_NET_GMCROVRINT,
- SPIDER_NET_GMCRRNTINT,
- SPIDER_NET_GMCRRXERINT,
- SPIDER_NET_GTITCSERINT,
- SPIDER_NET_GTIFMTERINT,
- SPIDER_NET_GTIPKTRVKINT,
- SPIDER_NET_GTISPINGINT,
- SPIDER_NET_GTISADNGINT,
- SPIDER_NET_GTISPDNGINT,
- SPIDER_NET_GRIFMTERINT,
- SPIDER_NET_GRIPKTRVKINT,
- SPIDER_NET_GRISPINGINT,
- SPIDER_NET_GRISADNGINT,
- SPIDER_NET_GRISPDNGINT
-};
-
-#define SPIDER_NET_TXINT (1 << SPIDER_NET_GDTFDCINT)
-
-/* We rely on flagged descriptor interrupts */
-#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
-
-#define SPIDER_NET_LINKINT ( 1 << SPIDER_NET_GMAC2INT )
-
-#define SPIDER_NET_ERRINT ( 0xffffffff & \
- (~SPIDER_NET_TXINT) & \
- (~SPIDER_NET_RXINT) & \
- (~SPIDER_NET_LINKINT) )
-
-#define SPIDER_NET_GPREXEC 0x80000000
-#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
-
-#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
-#define SPIDER_NET_DMAC_TXFRMTL 0x00040000
-#define SPIDER_NET_DMAC_TCP 0x00020000
-#define SPIDER_NET_DMAC_UDP 0x00030000
-#define SPIDER_NET_TXDCEST 0x08000000
-
-#define SPIDER_NET_DESCR_RXFDIS 0x00000001
-#define SPIDER_NET_DESCR_RXDCEIS 0x00000002
-#define SPIDER_NET_DESCR_RXDEN0IS 0x00000004
-#define SPIDER_NET_DESCR_RXINVDIS 0x00000008
-#define SPIDER_NET_DESCR_RXRERRIS 0x00000010
-#define SPIDER_NET_DESCR_RXFDCIMS 0x00000100
-#define SPIDER_NET_DESCR_RXDCEIMS 0x00000200
-#define SPIDER_NET_DESCR_RXDEN0IMS 0x00000400
-#define SPIDER_NET_DESCR_RXINVDIMS 0x00000800
-#define SPIDER_NET_DESCR_RXRERRMIS 0x00001000
-#define SPIDER_NET_DESCR_UNUSED 0x077fe0e0
-
-#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
-#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */
-#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
-#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
-
-#define SPIDER_NET_DESCR_BAD_STATUS (SPIDER_NET_DESCR_RXDEN0IS | \
- SPIDER_NET_DESCR_RXRERRIS | \
- SPIDER_NET_DESCR_RXDEN0IMS | \
- SPIDER_NET_DESCR_RXINVDIMS | \
- SPIDER_NET_DESCR_RXRERRMIS | \
- SPIDER_NET_DESCR_UNUSED)
-
-/* Descriptor, as defined by the hardware */
-struct spider_net_hw_descr {
- u32 buf_addr;
- u32 buf_size;
- u32 next_descr_addr;
- u32 dmac_cmd_status;
- u32 result_size;
- u32 valid_size; /* all zeroes for tx */
- u32 data_status;
- u32 data_error; /* all zeroes for tx */
-} __attribute__((aligned(32)));
-
-struct spider_net_descr {
- struct spider_net_hw_descr *hwdescr;
- struct sk_buff *skb;
- u32 bus_addr;
- struct spider_net_descr *next;
- struct spider_net_descr *prev;
-};
-
-struct spider_net_descr_chain {
- spinlock_t lock;
- struct spider_net_descr *head;
- struct spider_net_descr *tail;
- struct spider_net_descr *ring;
- int num_desc;
- struct spider_net_hw_descr *hwring;
- dma_addr_t dma_addr;
-};
-
-/* descriptor data_status bits */
-#define SPIDER_NET_RX_IPCHK 29
-#define SPIDER_NET_RX_TCPCHK 28
-#define SPIDER_NET_VLAN_PACKET 21
-#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
- (1 << SPIDER_NET_RX_TCPCHK) )
-
-/* descriptor data_error bits */
-#define SPIDER_NET_RX_IPCHKERR 27
-#define SPIDER_NET_RX_RXTCPCHKERR 28
-
-#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
-
-/* the cases we don't pass the packet to the stack.
- * 701b8000 would be correct, but every packets gets that flag */
-#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
-
-#define SPIDER_NET_DEFAULT_MSG ( NETIF_MSG_DRV | \
- NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR | \
- NETIF_MSG_TX_QUEUED | \
- NETIF_MSG_INTR | \
- NETIF_MSG_TX_DONE | \
- NETIF_MSG_RX_STATUS | \
- NETIF_MSG_PKTDATA | \
- NETIF_MSG_HW | \
- NETIF_MSG_WOL )
-
-struct spider_net_extra_stats {
- unsigned long rx_desc_error;
- unsigned long tx_timeouts;
- unsigned long alloc_rx_skb_error;
- unsigned long rx_iommu_map_error;
- unsigned long tx_iommu_map_error;
- unsigned long rx_desc_unk_state;
-};
-
-struct spider_net_card {
- struct net_device *netdev;
- struct pci_dev *pdev;
- struct mii_phy phy;
-
- struct napi_struct napi;
-
- int medium;
-
- void __iomem *regs;
-
- struct spider_net_descr_chain tx_chain;
- struct spider_net_descr_chain rx_chain;
- struct spider_net_descr *low_watermark;
-
- int aneg_count;
- struct timer_list aneg_timer;
- struct timer_list tx_timer;
- struct work_struct tx_timeout_task;
- atomic_t tx_timeout_task_counter;
- wait_queue_head_t waitq;
- int num_rx_ints;
- int ignore_rx_ramfull;
-
- /* for ethtool */
- int msg_enable;
- struct spider_net_extra_stats spider_stats;
-
- /* Must be last item in struct */
- struct spider_net_descr darray[];
-};
-
-#endif
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
deleted file mode 100644
index fef9fd127b5e..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Network device driver for Cell Processor-Based Blade
- *
- * (C) Copyright IBM Corp. 2005
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/pci.h>
-
-#include "spider_net.h"
-
-
-static struct {
- const char str[ETH_GSTRING_LEN];
-} ethtool_stats_keys[] = {
- { "tx_packets" },
- { "tx_bytes" },
- { "rx_packets" },
- { "rx_bytes" },
- { "tx_errors" },
- { "tx_dropped" },
- { "rx_dropped" },
- { "rx_descriptor_error" },
- { "tx_timeouts" },
- { "alloc_rx_skb_error" },
- { "rx_iommu_map_error" },
- { "tx_iommu_map_error" },
- { "rx_desc_unk_state" },
-};
-
-static int
-spider_net_ethtool_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
-
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
- cmd->base.speed = card->phy.speed;
- cmd->base.duplex = DUPLEX_FULL;
-
- return 0;
-}
-
-static void
-spider_net_ethtool_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
-
- /* clear and fill out info */
- strscpy(drvinfo->driver, spider_net_driver_name,
- sizeof(drvinfo->driver));
- strscpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
- strscpy(drvinfo->fw_version, "no information",
- sizeof(drvinfo->fw_version));
- strscpy(drvinfo->bus_info, pci_name(card->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void
-spider_net_ethtool_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wolinfo)
-{
- /* no support for wol */
- wolinfo->supported = 0;
- wolinfo->wolopts = 0;
-}
-
-static u32
-spider_net_ethtool_get_msglevel(struct net_device *netdev)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
- return card->msg_enable;
-}
-
-static void
-spider_net_ethtool_set_msglevel(struct net_device *netdev,
- u32 level)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
- card->msg_enable = level;
-}
-
-static int
-spider_net_ethtool_nway_reset(struct net_device *netdev)
-{
- if (netif_running(netdev)) {
- spider_net_stop(netdev);
- spider_net_open(netdev);
- }
- return 0;
-}
-
-static void
-spider_net_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering,
- struct kernel_ethtool_ringparam *kernel_ering,
- struct netlink_ext_ack *extack)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
- ering->tx_pending = card->tx_chain.num_desc;
- ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
- ering->rx_pending = card->rx_chain.num_desc;
-}
-
-static int spider_net_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(ethtool_stats_keys);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void spider_net_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- data[0] = netdev->stats.tx_packets;
- data[1] = netdev->stats.tx_bytes;
- data[2] = netdev->stats.rx_packets;
- data[3] = netdev->stats.rx_bytes;
- data[4] = netdev->stats.tx_errors;
- data[5] = netdev->stats.tx_dropped;
- data[6] = netdev->stats.rx_dropped;
- data[7] = card->spider_stats.rx_desc_error;
- data[8] = card->spider_stats.tx_timeouts;
- data[9] = card->spider_stats.alloc_rx_skb_error;
- data[10] = card->spider_stats.rx_iommu_map_error;
- data[11] = card->spider_stats.tx_iommu_map_error;
- data[12] = card->spider_stats.rx_desc_unk_state;
-}
-
-static void spider_net_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
-{
- memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
-}
-
-const struct ethtool_ops spider_net_ethtool_ops = {
- .get_drvinfo = spider_net_ethtool_get_drvinfo,
- .get_wol = spider_net_ethtool_get_wol,
- .get_msglevel = spider_net_ethtool_get_msglevel,
- .set_msglevel = spider_net_ethtool_set_msglevel,
- .get_link = ethtool_op_get_link,
- .nway_reset = spider_net_ethtool_nway_reset,
- .get_ringparam = spider_net_ethtool_get_ringparam,
- .get_strings = spider_net_get_strings,
- .get_sset_count = spider_net_get_sset_count,
- .get_ethtool_stats = spider_net_get_ethtool_stats,
- .get_link_ksettings = spider_net_ethtool_get_link_ksettings,
-};
-
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index e46ccebcfd22..47e3e8434b9e 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_WANGXUN
config LIBWX
tristate
+ depends on PTP_1588_CLOCK_OPTIONAL
select PAGE_POOL
help
Common library for Wangxun(R) Ethernet drivers.
@@ -25,6 +26,7 @@ config LIBWX
config NGBE
tristate "Wangxun(R) GbE PCI Express adapters support"
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
select LIBWX
select PHYLINK
help
@@ -42,6 +44,7 @@ config TXGBE
depends on PCI
depends on COMMON_CLK
depends on I2C_DESIGNWARE_PLATFORM
+ depends on PTP_1588_CLOCK_OPTIONAL
select MARVELL_10G_PHY
select REGMAP
select PHYLINK
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index 42ccd6e4052e..e9f0f1f2309b 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_LIBWX) += libwx.o
-libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o
+libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index abe5921dde02..43019ec9329c 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -41,6 +41,9 @@ static const struct wx_stats wx_gstrings_stats[] = {
WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good),
WX_STAT("rx_csum_offload_errors", hw_csum_rx_error),
WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
+ WX_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ WX_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
+ WX_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
};
static const struct wx_stats wx_gstrings_fdir_stats[] = {
@@ -69,7 +72,7 @@ int wx_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
- return (wx->mac.type == wx_mac_sp) ?
+ return (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) ?
WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN;
default:
return -EOPNOTSUPP;
@@ -87,7 +90,7 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
case ETH_SS_STATS:
for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
for (i = 0; i < WX_FDIR_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string);
}
@@ -121,7 +124,7 @@ void wx_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
for (k = 0; k < WX_FDIR_STATS_LEN; k++) {
p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset;
data[i++] = *(u64 *)p;
@@ -196,7 +199,7 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
unsigned int stats_len = WX_STATS_LEN;
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_sp)
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
stats_len += WX_FDIR_STATS_LEN;
strscpy(info->driver, wx->driver_name, sizeof(info->driver));
@@ -216,6 +219,9 @@ int wx_nway_reset(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_nway_reset(wx->phylink);
}
EXPORT_SYMBOL(wx_nway_reset);
@@ -225,6 +231,9 @@ int wx_get_link_ksettings(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_ksettings_get(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_get_link_ksettings);
@@ -234,6 +243,9 @@ int wx_set_link_ksettings(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_ksettings_set(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_set_link_ksettings);
@@ -243,6 +255,9 @@ void wx_get_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return;
+
phylink_ethtool_get_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_get_pauseparam);
@@ -252,6 +267,9 @@ int wx_set_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_set_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_set_pauseparam);
@@ -322,10 +340,17 @@ int wx_set_coalesce(struct net_device *netdev,
if (ec->tx_max_coalesced_frames_irq)
wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
max_eitr = WX_SP_MAX_EITR;
- else
+ break;
+ case wx_mac_aml:
+ max_eitr = WX_AML_MAX_EITR;
+ break;
+ default:
max_eitr = WX_EM_MAX_EITR;
+ break;
+ }
if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
(ec->tx_coalesce_usecs > (max_eitr >> 2)))
@@ -347,10 +372,15 @@ int wx_set_coalesce(struct net_device *netdev,
wx->tx_itr_setting = ec->tx_coalesce_usecs;
if (wx->tx_itr_setting == 1) {
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
tx_itr_param = WX_12K_ITR;
- else
+ break;
+ default:
tx_itr_param = WX_20K_ITR;
+ break;
+ }
} else {
tx_itr_param = wx->tx_itr_setting;
}
@@ -383,10 +413,15 @@ static unsigned int wx_max_channels(struct wx *wx)
max_combined = 1;
} else {
/* support up to max allowed queues with RSS */
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
max_combined = 63;
- else
+ break;
+ default:
max_combined = 8;
+ break;
+ }
}
return max_combined;
@@ -452,3 +487,53 @@ void wx_set_msglevel(struct net_device *netdev, u32 data)
wx->msg_enable = data;
}
EXPORT_SYMBOL(wx_set_msglevel);
+
+int wx_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (wx->ptp_clock)
+ info->phc_index = ptp_clock_index(wx->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_ts_info);
+
+void wx_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ if (wx->ptp_clock) {
+ ts_stats->pkts = wx->tx_hwtstamp_pkts;
+ ts_stats->lost = wx->tx_hwtstamp_timeouts +
+ wx->tx_hwtstamp_skipped +
+ wx->rx_hwtstamp_cleared;
+ ts_stats->err = wx->tx_hwtstamp_errors;
+ }
+}
+EXPORT_SYMBOL(wx_get_ptp_stats);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
index 600c3b597d1a..9e002e699eca 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
@@ -40,4 +40,8 @@ int wx_set_channels(struct net_device *dev,
struct ethtool_channels *ch);
u32 wx_get_msglevel(struct net_device *netdev);
void wx_set_msglevel(struct net_device *netdev, u32 data);
+int wx_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info);
+void wx_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats);
#endif /* _WX_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index deaf670c160e..aed45abafb1b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -112,10 +112,15 @@ static void wx_intr_disable(struct wx *wx, u64 qmask)
if (mask)
wr32(wx, WX_PX_IMS(0), mask);
- if (wx->mac.type == wx_mac_sp) {
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMS(1), mask);
+ break;
+ default:
+ break;
}
}
@@ -126,10 +131,16 @@ void wx_intr_enable(struct wx *wx, u64 qmask)
mask = (qmask & U32_MAX);
if (mask)
wr32(wx, WX_PX_IMC(0), mask);
- if (wx->mac.type == wx_mac_sp) {
+
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMC(1), mask);
+ break;
+ default:
+ break;
}
}
EXPORT_SYMBOL(wx_intr_enable);
@@ -278,22 +289,8 @@ static int wx_acquire_sw_sync(struct wx *wx, u32 mask)
return ret;
}
-/**
- * wx_host_interface_command - Issue command to manageability block
- * @wx: pointer to the HW structure
- * @buffer: contains the command to write and where the return status will
- * be placed
- * @length: length of buffer, must be multiple of 4 bytes
- * @timeout: time in ms to wait for command completion
- * @return_data: read and return data from the buffer (true) or not (false)
- * Needed because FW structures are big endian and decoding of
- * these fields can be 8 bit or 16 bit based on command. Decoding
- * is not easily understood without making a table of commands.
- * So we will leave this up to the caller to read back the data
- * in these cases.
- **/
-int wx_host_interface_command(struct wx *wx, u32 *buffer,
- u32 length, u32 timeout, bool return_data)
+static int wx_host_interface_command_s(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
{
u32 hdr_size = sizeof(struct wx_hic_hdr);
u32 hicr, i, bi, buf[64] = {};
@@ -301,22 +298,10 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 dword_len;
u16 buf_len;
- if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
- wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
- return -EINVAL;
- }
-
status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
if (status != 0)
return status;
- /* Calculate length in DWORDs. We must be DWORD aligned */
- if ((length % (sizeof(u32))) != 0) {
- wx_err(wx, "Buffer length failure, not aligned to dword");
- status = -EINVAL;
- goto rel_out;
- }
-
dword_len = length >> 2;
/* The device driver writes the relevant command block
@@ -391,8 +376,160 @@ rel_out:
wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
return status;
}
+
+static bool wx_poll_fw_reply(struct wx *wx, u32 *buffer, u8 send_cmd)
+{
+ u32 dword_len = sizeof(struct wx_hic_hdr) >> 2;
+ struct wx_hic_hdr *recv_hdr;
+ u32 i;
+
+ /* read hdr */
+ for (i = 0; i < dword_len; i++) {
+ buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
+ le32_to_cpus(&buffer[i]);
+ }
+
+ /* check hdr */
+ recv_hdr = (struct wx_hic_hdr *)buffer;
+ if (recv_hdr->cmd == send_cmd &&
+ recv_hdr->index == wx->swfw_index)
+ return true;
+
+ return false;
+}
+
+static int wx_host_interface_command_r(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
+{
+ struct wx_hic_hdr *hdr = (struct wx_hic_hdr *)buffer;
+ u32 hdr_size = sizeof(struct wx_hic_hdr);
+ bool busy, reply;
+ u32 dword_len;
+ u16 buf_len;
+ int err = 0;
+ u8 send_cmd;
+ u32 i;
+
+ /* wait to get lock */
+ might_sleep();
+ err = read_poll_timeout(test_and_set_bit, busy, !busy, 1000, timeout * 1000,
+ false, WX_STATE_SWFW_BUSY, wx->state);
+ if (err)
+ return err;
+
+ /* index to unique seq id for each mbox message */
+ hdr->index = wx->swfw_index;
+ send_cmd = hdr->cmd;
+
+ dword_len = length >> 2;
+ /* write data to SW-FW mbox array */
+ for (i = 0; i < dword_len; i++) {
+ wr32a(wx, WX_SW2FW_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
+ /* write flush */
+ rd32a(wx, WX_SW2FW_MBOX, i);
+ }
+
+ /* generate interrupt to notify FW */
+ wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, 0);
+ wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, WX_SW2FW_MBOX_CMD_VLD);
+
+ /* polling reply from FW */
+ err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 1000, 50000,
+ true, wx, buffer, send_cmd);
+ if (err) {
+ wx_err(wx, "Polling from FW messages timeout, cmd: 0x%x, index: %d\n",
+ send_cmd, wx->swfw_index);
+ goto rel_out;
+ }
+
+ /* expect no reply from FW then return */
+ if (!return_data)
+ goto rel_out;
+
+ /* If there is any thing in data position pull it in */
+ buf_len = hdr->buf_len;
+ if (buf_len == 0)
+ goto rel_out;
+
+ if (length < buf_len + hdr_size) {
+ wx_err(wx, "Buffer not large enough for reply message.\n");
+ err = -EFAULT;
+ goto rel_out;
+ }
+
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
+ for (i = hdr_size >> 2; i <= dword_len; i++) {
+ buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
+ le32_to_cpus(&buffer[i]);
+ }
+
+rel_out:
+ /* index++, index replace wx_hic_hdr.checksum */
+ if (wx->swfw_index == WX_HIC_HDR_INDEX_MAX)
+ wx->swfw_index = 0;
+ else
+ wx->swfw_index++;
+
+ clear_bit(WX_STATE_SWFW_BUSY, wx->state);
+ return err;
+}
+
+/**
+ * wx_host_interface_command - Issue command to manageability block
+ * @wx: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (true) or not (false)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
+ **/
+int wx_host_interface_command(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
+{
+ if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
+ wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
+ return -EINVAL;
+ }
+
+ /* Calculate length in DWORDs. We must be DWORD aligned */
+ if ((length % (sizeof(u32))) != 0) {
+ wx_err(wx, "Buffer length failure, not aligned to dword");
+ return -EINVAL;
+ }
+
+ if (test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ return wx_host_interface_command_r(wx, buffer, length,
+ timeout, return_data);
+
+ return wx_host_interface_command_s(wx, buffer, length, timeout, return_data);
+}
EXPORT_SYMBOL(wx_host_interface_command);
+int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles)
+{
+ struct wx_hic_set_pps pps_cmd;
+
+ pps_cmd.hdr.cmd = FW_PPS_SET_CMD;
+ pps_cmd.hdr.buf_len = FW_PPS_SET_LEN;
+ pps_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ pps_cmd.lan_id = wx->bus.func;
+ pps_cmd.enable = (u8)enable;
+ pps_cmd.nsec = nsec;
+ pps_cmd.cycles = cycles;
+ pps_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+
+ return wx_host_interface_command(wx, (u32 *)&pps_cmd,
+ sizeof(pps_cmd),
+ WX_HI_COMMAND_TIMEOUT,
+ false);
+}
+
/**
* wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd
* assuming that the semaphore is already obtained.
@@ -423,7 +560,10 @@ static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data)
if (status != 0)
return status;
- *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
+ if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
+ else
+ *data = (u16)rd32a(wx, WX_FW2SW_MBOX, FW_NVM_DATA_OFFSET);
return status;
}
@@ -467,6 +607,7 @@ int wx_read_ee_hostif_buffer(struct wx *wx,
u16 words_to_read;
u32 value = 0;
int status;
+ u32 mbox;
u32 i;
/* Take semaphore for the entire operation. */
@@ -499,8 +640,12 @@ int wx_read_ee_hostif_buffer(struct wx *wx,
goto out;
}
+ if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ mbox = WX_MNG_MBOX;
+ else
+ mbox = WX_FW2SW_MBOX;
for (i = 0; i < words_to_read; i++) {
- u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
+ u32 reg = mbox + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
value = rd32(wx, reg);
data[current_word] = (u16)(value & 0xffff);
@@ -550,12 +695,17 @@ void wx_init_eeprom_params(struct wx *wx)
}
}
- if (wx->mac.type == wx_mac_sp) {
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
wx_err(wx, "NVM Read Error\n");
return;
}
data = data >> 1;
+ break;
+ default:
+ break;
}
eeprom->sw_region_offset = data;
@@ -616,8 +766,15 @@ static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
/* setup VMDq pool mapping */
wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
- if (wx->mac.type == wx_mac_sp)
+
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
+ break;
+ default:
+ break;
+ }
/* HW expects these in little endian so we reverse the byte
* order from network order (big endian) to little endian
@@ -755,9 +912,14 @@ void wx_init_rx_addrs(struct wx *wx)
wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
- if (wx->mac.type == wx_mac_sp) {
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
/* clear VMDq pool/queue selection for RAR 0 */
wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
+ break;
+ default:
+ break;
}
}
@@ -1699,7 +1861,7 @@ void wx_configure_rx(struct wx *wx)
/* enable hw crc stripping */
wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) {
u32 psrctl;
/* RSC Setup */
@@ -2351,7 +2513,7 @@ void wx_update_stats(struct wx *wx)
hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH);
hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 11fb33349482..b883342bb576 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -18,6 +18,7 @@ void wx_control_hw(struct wx *wx, bool drv);
int wx_mng_present(struct wx *wx);
int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 length, u32 timeout, bool return_data);
+int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles);
int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data);
int wx_read_ee_hostif_buffer(struct wx *wx,
u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 2b3d6586f44a..00b0b318df27 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -13,6 +13,7 @@
#include "wx_type.h"
#include "wx_lib.h"
+#include "wx_ptp.h"
#include "wx_hw.h"
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
@@ -597,8 +598,17 @@ static void wx_process_skb_fields(struct wx_ring *rx_ring,
union wx_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ struct wx *wx = netdev_priv(rx_ring->netdev);
+
wx_rx_hash(rx_ring, rx_desc, skb);
wx_rx_checksum(rx_ring, rx_desc, skb);
+
+ if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, wx->flags)) &&
+ unlikely(wx_test_staterr(rx_desc, WX_RXD_STAT_TS))) {
+ wx_ptp_rx_hwtstamp(rx_ring->q_vector->wx, skb);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
+
wx_rx_vlan(rx_ring, rx_desc, skb);
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -705,6 +715,7 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
{
unsigned int budget = q_vector->wx->tx_work_limit;
unsigned int total_bytes = 0, total_packets = 0;
+ struct wx *wx = netdev_priv(tx_ring->netdev);
unsigned int i = tx_ring->next_to_clean;
struct wx_tx_buffer *tx_buffer;
union wx_tx_desc *tx_desc;
@@ -737,6 +748,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
+ /* schedule check for Tx timestamp */
+ if (unlikely(test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state)) &&
+ skb_shinfo(tx_buffer->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ ptp_schedule_worker(wx->ptp_clock, 0);
+
/* free the skb */
napi_consume_skb(tx_buffer->skb, napi_budget);
@@ -932,9 +948,9 @@ static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
-static void wx_tx_map(struct wx_ring *tx_ring,
- struct wx_tx_buffer *first,
- const u8 hdr_len)
+static int wx_tx_map(struct wx_ring *tx_ring,
+ struct wx_tx_buffer *first,
+ const u8 hdr_len)
{
struct sk_buff *skb = first->skb;
struct wx_tx_buffer *tx_buffer;
@@ -1013,6 +1029,8 @@ static void wx_tx_map(struct wx_ring *tx_ring,
netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount);
+ /* set the timestamp */
+ first->time_stamp = jiffies;
skb_tx_timestamp(skb);
/* Force memory writes to complete before letting h/w know there
@@ -1038,7 +1056,7 @@ static void wx_tx_map(struct wx_ring *tx_ring,
if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more())
writel(i, tx_ring->tail);
- return;
+ return 0;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
@@ -1062,6 +1080,8 @@ dma_error:
first->skb = NULL;
tx_ring->next_to_use = i;
+
+ return -ENOMEM;
}
static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
@@ -1082,26 +1102,6 @@ static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}
-static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr)
-{
- struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset);
-
- *nexthdr = hdr->nexthdr;
- offset += sizeof(struct ipv6hdr);
- while (ipv6_ext_hdr(*nexthdr)) {
- struct ipv6_opt_hdr _hdr, *hp;
-
- if (*nexthdr == NEXTHDR_NONE)
- return;
- hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr);
- if (!hp)
- return;
- if (*nexthdr == NEXTHDR_FRAGMENT)
- break;
- *nexthdr = hp->nexthdr;
- }
-}
-
union network_header {
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
@@ -1112,6 +1112,8 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
{
u8 tun_prot = 0, l4_prot = 0, ptype = 0;
struct sk_buff *skb = first->skb;
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
if (skb->encapsulation) {
union network_header hdr;
@@ -1122,14 +1124,18 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
ptype = WX_PTYPE_TUN_IPV4;
break;
case htons(ETH_P_IPV6):
- wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot);
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
+ tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off);
ptype = WX_PTYPE_TUN_IPV6;
break;
default:
return ptype;
}
- if (tun_prot == IPPROTO_IPIP) {
+ if (tun_prot == IPPROTO_IPIP || tun_prot == IPPROTO_IPV6) {
hdr.raw = (void *)inner_ip_hdr(skb);
ptype |= WX_PTYPE_PKT_IPIP;
} else if (tun_prot == IPPROTO_UDP) {
@@ -1166,7 +1172,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
l4_prot = hdr.ipv4->protocol;
break;
case 6:
- wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot);
+ l4_hdr = skb_inner_transport_header(skb);
+ exthdr = skb_inner_network_header(skb) + sizeof(struct ipv6hdr);
+ l4_prot = inner_ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
ptype |= WX_PTYPE_PKT_IPV6;
break;
default:
@@ -1179,7 +1189,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
ptype = WX_PTYPE_PKT_IP;
break;
case htons(ETH_P_IPV6):
- wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot);
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
+ l4_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6;
break;
default:
@@ -1269,13 +1283,20 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
if (enc) {
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
+
switch (first->protocol) {
case htons(ETH_P_IP):
tun_prot = ip_hdr(skb)->protocol;
first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4;
break;
case htons(ETH_P_IPV6):
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off);
break;
default:
break;
@@ -1298,6 +1319,7 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
(char *)ip_hdr(skb)) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT;
@@ -1335,12 +1357,15 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
u8 tun_prot = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
+csum_failed:
if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) &&
!(first->tx_flags & WX_TX_FLAGS_CC))
return;
vlan_macip_lens = skb_network_offset(skb) <<
WX_TXD_MACLEN_SHIFT;
} else {
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
u8 l4_prot = 0;
union {
struct iphdr *ipv4;
@@ -1362,7 +1387,12 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
tun_prot = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &tun_prot, &frag_off);
break;
default:
return;
@@ -1386,6 +1416,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
(char *)ip_hdr(skb)) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT;
@@ -1408,7 +1439,10 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
break;
case 6:
vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1;
+ exthdr = network_hdr.raw + sizeof(struct ipv6hdr);
l4_prot = network_hdr.ipv6->nexthdr;
+ if (transport_hdr.raw != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
break;
default:
break;
@@ -1428,7 +1462,8 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_L4LEN_SHIFT;
break;
default:
- break;
+ skb_checksum_help(skb);
+ goto csum_failed;
}
/* update TX checksum flag */
@@ -1486,6 +1521,20 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= WX_TX_FLAGS_HW_VLAN;
}
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ wx->ptp_clock) {
+ if (wx->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(WX_STATE_PTP_TX_IN_PROGRESS,
+ wx->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= WX_TX_FLAGS_TSTAMP;
+ wx->ptp_tx_skb = skb_get(skb);
+ wx->ptp_tx_start = jiffies;
+ } else {
+ wx->tx_hwtstamp_skipped++;
+ }
+ }
+
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = vlan_get_protocol(skb);
@@ -1501,12 +1550,20 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate)
wx->atr(tx_ring, first, ptype);
- wx_tx_map(tx_ring, first, hdr_len);
+ if (wx_tx_map(tx_ring, first, hdr_len))
+ goto cleanup_tx_tstamp;
return NETDEV_TX_OK;
out_drop:
dev_kfree_skb_any(first->skb);
first->skb = NULL;
+cleanup_tx_tstamp:
+ if (unlikely(tx_flags & WX_TX_FLAGS_TSTAMP)) {
+ dev_kfree_skb_any(wx->ptp_tx_skb);
+ wx->ptp_tx_skb = NULL;
+ wx->tx_hwtstamp_errors++;
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+ }
return NETDEV_TX_OK;
}
@@ -1781,10 +1838,16 @@ static int wx_alloc_q_vector(struct wx *wx,
/* initialize pointer to rings */
ring = q_vector->ring;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
default_itr = WX_12K_ITR;
- else
+ break;
+ default:
default_itr = WX_7K_ITR;
+ break;
+ }
+
/* initialize ITR */
if (txr_count && !rxr_count)
/* tx only vector */
@@ -2140,10 +2203,17 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
int v_idx = q_vector->v_idx;
u32 itr_reg;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
itr_reg = q_vector->itr & WX_SP_MAX_EITR;
- else
+ break;
+ case wx_mac_aml:
+ itr_reg = (q_vector->itr >> 3) & WX_AML_MAX_EITR;
+ break;
+ default:
itr_reg = q_vector->itr & WX_EM_MAX_EITR;
+ break;
+ }
itr_reg |= WX_PX_ITR_CNT_WDIS;
@@ -2719,7 +2789,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
netdev->features = features;
- if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX && wx->do_reset)
wx->do_reset(netdev);
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
wx_set_rx_mode(netdev);
@@ -2751,7 +2821,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
break;
}
- if (need_reset)
+ if (need_reset && wx->do_reset)
wx->do_reset(netdev);
return 0;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
new file mode 100644
index 000000000000..07c015ba338f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
@@ -0,0 +1,883 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+/* Copyright (c) 1999 - 2025 Intel Corporation. */
+
+#include <linux/ptp_classify.h>
+#include <linux/clocksource.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_ptp.h"
+#include "wx_hw.h"
+
+#define WX_INCVAL_10GB 0xCCCCCC
+#define WX_INCVAL_1GB 0x800000
+#define WX_INCVAL_100 0xA00000
+#define WX_INCVAL_10 0xC7F380
+#define WX_INCVAL_EM 0x2000000
+
+#define WX_INCVAL_SHIFT_10GB 20
+#define WX_INCVAL_SHIFT_1GB 18
+#define WX_INCVAL_SHIFT_100 15
+#define WX_INCVAL_SHIFT_10 12
+#define WX_INCVAL_SHIFT_EM 22
+
+#define WX_OVERFLOW_PERIOD (HZ * 30)
+#define WX_PTP_TX_TIMEOUT (HZ)
+
+#define WX_1588_PPS_WIDTH_EM 120
+
+#define WX_NS_PER_SEC 1000000000ULL
+
+static u64 wx_ptp_timecounter_cyc2time(struct wx *wx, u64 timestamp)
+{
+ unsigned int seq;
+ u64 ns;
+
+ do {
+ seq = read_seqbegin(&wx->hw_tc_lock);
+ ns = timecounter_cyc2time(&wx->hw_tc, timestamp);
+ } while (read_seqretry(&wx->hw_tc_lock, seq));
+
+ return ns;
+}
+
+static u64 wx_ptp_readtime(struct wx *wx, struct ptp_system_timestamp *sts)
+{
+ u32 timeh1, timeh2, timel;
+
+ timeh1 = rd32ptp(wx, WX_TSC_1588_SYSTIMH);
+ ptp_read_system_prets(sts);
+ timel = rd32ptp(wx, WX_TSC_1588_SYSTIML);
+ ptp_read_system_postts(sts);
+ timeh2 = rd32ptp(wx, WX_TSC_1588_SYSTIMH);
+
+ if (timeh1 != timeh2) {
+ ptp_read_system_prets(sts);
+ timel = rd32ptp(wx, WX_TSC_1588_SYSTIML);
+ ptp_read_system_prets(sts);
+ }
+ return (u64)timel | (u64)timeh2 << 32;
+}
+
+static int wx_ptp_adjfine(struct ptp_clock_info *ptp, long ppb)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ u64 incval, mask;
+
+ smp_mb(); /* Force any pending update before accessing. */
+ incval = READ_ONCE(wx->base_incval);
+ incval = adjust_by_scaled_ppm(incval, ppb);
+
+ mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF;
+ incval &= mask;
+ if (wx->mac.type != wx_mac_em)
+ incval |= 2 << 24;
+
+ wr32ptp(wx, WX_TSC_1588_INC, incval);
+
+ return 0;
+}
+
+static int wx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ unsigned long flags;
+
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_adjtime(&wx->hw_tc, delta);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ return 0;
+}
+
+static int wx_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ u64 ns, stamp;
+
+ stamp = wx_ptp_readtime(wx, sts);
+ ns = wx_ptp_timecounter_cyc2time(wx, stamp);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int wx_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ unsigned long flags;
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+ /* reset the timecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_init(&wx->hw_tc, &wx->hw_cc, ns);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ return 0;
+}
+
+/**
+ * wx_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state
+ * @wx: the private board structure
+ *
+ * This function should be called whenever the state related to a Tx timestamp
+ * needs to be cleared. This helps ensure that all related bits are reset for
+ * the next Tx timestamp event.
+ */
+static void wx_ptp_clear_tx_timestamp(struct wx *wx)
+{
+ rd32ptp(wx, WX_TSC_1588_STMPH);
+ if (wx->ptp_tx_skb) {
+ dev_kfree_skb_any(wx->ptp_tx_skb);
+ wx->ptp_tx_skb = NULL;
+ }
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+}
+
+/**
+ * wx_ptp_convert_to_hwtstamp - convert register value to hw timestamp
+ * @wx: private board structure
+ * @hwtstamp: stack timestamp structure
+ * @timestamp: unsigned 64bit system time value
+ *
+ * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value
+ * which can be used by the stack's ptp functions.
+ *
+ * The lock is used to protect consistency of the cyclecounter and the SYSTIME
+ * registers. However, it does not need to protect against the Rx or Tx
+ * timestamp registers, as there can't be a new timestamp until the old one is
+ * unlatched by reading.
+ *
+ * In addition to the timestamp in hardware, some controllers need a software
+ * overflow cyclecounter, and this function takes this into account as well.
+ **/
+static void wx_ptp_convert_to_hwtstamp(struct wx *wx,
+ struct skb_shared_hwtstamps *hwtstamp,
+ u64 timestamp)
+{
+ u64 ns;
+
+ ns = wx_ptp_timecounter_cyc2time(wx, timestamp);
+ hwtstamp->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * wx_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @wx: the private board struct
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+static void wx_ptp_tx_hwtstamp(struct wx *wx)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb = wx->ptp_tx_skb;
+ u64 regval = 0;
+
+ regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPL);
+ regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPH) << 32;
+
+ wx_ptp_convert_to_hwtstamp(wx, &shhwtstamps, regval);
+
+ wx->ptp_tx_skb = NULL;
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+ wx->tx_hwtstamp_pkts++;
+}
+
+static int wx_ptp_tx_hwtstamp_work(struct wx *wx)
+{
+ u32 tsynctxctl;
+
+ /* we have to have a valid skb to poll for a timestamp */
+ if (!wx->ptp_tx_skb) {
+ wx_ptp_clear_tx_timestamp(wx);
+ return 0;
+ }
+
+ /* stop polling once we have a valid timestamp */
+ tsynctxctl = rd32ptp(wx, WX_TSC_1588_CTL);
+ if (tsynctxctl & WX_TSC_1588_CTL_VALID) {
+ wx_ptp_tx_hwtstamp(wx);
+ return 0;
+ }
+
+ return -1;
+}
+
+/**
+ * wx_ptp_overflow_check - watchdog task to detect SYSTIME overflow
+ * @wx: pointer to wx struct
+ *
+ * this watchdog task periodically reads the timecounter
+ * in order to prevent missing when the system time registers wrap
+ * around. This needs to be run approximately twice a minute for the fastest
+ * overflowing hardware. We run it for all hardware since it shouldn't have a
+ * large impact.
+ */
+static void wx_ptp_overflow_check(struct wx *wx)
+{
+ bool timeout = time_is_before_jiffies(wx->last_overflow_check +
+ WX_OVERFLOW_PERIOD);
+ unsigned long flags;
+
+ if (timeout) {
+ /* Update the timecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_read(&wx->hw_tc);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ wx->last_overflow_check = jiffies;
+ }
+}
+
+/**
+ * wx_ptp_rx_hang - detect error case when Rx timestamp registers latched
+ * @wx: pointer to wx struct
+ *
+ * this watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to
+ * timestamp any future packets.
+ */
+static void wx_ptp_rx_hang(struct wx *wx)
+{
+ struct wx_ring *rx_ring;
+ unsigned long rx_event;
+ u32 tsyncrxctl;
+ int n;
+
+ tsyncrxctl = rd32(wx, WX_PSR_1588_CTL);
+
+ /* if we don't have a valid timestamp in the registers, just update the
+ * timeout counter and exit
+ */
+ if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID)) {
+ wx->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* determine the most recent watchdog or rx_timestamp event */
+ rx_event = wx->last_rx_ptp_check;
+ for (n = 0; n < wx->num_rx_queues; n++) {
+ rx_ring = wx->rx_ring[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ rd32(wx, WX_PSR_1588_STMPH);
+ wx->last_rx_ptp_check = jiffies;
+
+ wx->rx_hwtstamp_cleared++;
+ dev_warn(&wx->pdev->dev, "clearing RX Timestamp hang");
+ }
+}
+
+/**
+ * wx_ptp_tx_hang - detect error case where Tx timestamp never finishes
+ * @wx: private network wx structure
+ */
+static void wx_ptp_tx_hang(struct wx *wx)
+{
+ bool timeout = time_is_before_jiffies(wx->ptp_tx_start +
+ WX_PTP_TX_TIMEOUT);
+
+ if (!wx->ptp_tx_skb)
+ return;
+
+ if (!test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state))
+ return;
+
+ /* If we haven't received a timestamp within the timeout, it is
+ * reasonable to assume that it will never occur, so we can unlock the
+ * timestamp bit when this occurs.
+ */
+ if (timeout) {
+ wx_ptp_clear_tx_timestamp(wx);
+ wx->tx_hwtstamp_timeouts++;
+ dev_warn(&wx->pdev->dev, "clearing Tx timestamp hang\n");
+ }
+}
+
+static long wx_ptp_do_aux_work(struct ptp_clock_info *ptp)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ int ts_done;
+
+ ts_done = wx_ptp_tx_hwtstamp_work(wx);
+
+ wx_ptp_overflow_check(wx);
+ if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
+ wx->flags)))
+ wx_ptp_rx_hang(wx);
+ wx_ptp_tx_hang(wx);
+
+ return ts_done ? 1 : HZ;
+}
+
+static u64 wx_ptp_trigger_calc(struct wx *wx)
+{
+ struct cyclecounter *cc = &wx->hw_cc;
+ unsigned long flags;
+ u64 ns = 0;
+ u32 rem;
+
+ /* Read the current clock time, and save the cycle counter value */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ ns = timecounter_read(&wx->hw_tc);
+ wx->pps_edge_start = wx->hw_tc.cycle_last;
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+ wx->pps_edge_end = wx->pps_edge_start;
+
+ /* Figure out how far past the next second we are */
+ div_u64_rem(ns, WX_NS_PER_SEC, &rem);
+
+ /* Figure out how many nanoseconds to add to round the clock edge up
+ * to the next full second
+ */
+ rem = (WX_NS_PER_SEC - rem);
+
+ /* Adjust the clock edge to align with the next full second. */
+ wx->pps_edge_start += div_u64(((u64)rem << cc->shift), cc->mult);
+ wx->pps_edge_end += div_u64(((u64)(rem + wx->pps_width) <<
+ cc->shift), cc->mult);
+
+ return (ns + rem);
+}
+
+static int wx_ptp_setup_sdp(struct wx *wx)
+{
+ struct cyclecounter *cc = &wx->hw_cc;
+ u32 tsauxc;
+ u64 nsec;
+
+ if (wx->pps_width >= WX_NS_PER_SEC) {
+ wx_err(wx, "PTP pps width cannot be longer than 1s!\n");
+ return -EINVAL;
+ }
+
+ /* disable the pin first */
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0);
+ WX_WRITE_FLUSH(wx);
+
+ if (!test_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags)) {
+ if (wx->pps_enabled) {
+ wx->pps_enabled = false;
+ wx_set_pps(wx, false, 0, 0);
+ }
+ return 0;
+ }
+
+ wx->pps_enabled = true;
+ nsec = wx_ptp_trigger_calc(wx);
+ wx_set_pps(wx, wx->pps_enabled, nsec, wx->pps_edge_start);
+
+ tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 |
+ WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0;
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32));
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32));
+ wr32ptp(wx, WX_TSC_1588_SDP(0),
+ WX_TSC_1588_SDP_FUN_SEL_TT0 | WX_TSC_1588_SDP_OUT_LEVEL_H);
+ wr32ptp(wx, WX_TSC_1588_SDP(1), WX_TSC_1588_SDP_FUN_SEL_TS0);
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc);
+ wr32ptp(wx, WX_TSC_1588_INT_EN, WX_TSC_1588_INT_EN_TT1);
+ WX_WRITE_FLUSH(wx);
+
+ /* Adjust the clock edge to align with the next full second. */
+ wx->sec_to_cc = div_u64(((u64)WX_NS_PER_SEC << cc->shift), cc->mult);
+
+ return 0;
+}
+
+static int wx_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+
+ /**
+ * When PPS is enabled, unmask the interrupt for the ClockOut
+ * feature, so that the interrupt handler can send the PPS
+ * event when the clock SDP triggers. Clear mask when PPS is
+ * disabled
+ */
+ if (rq->type != PTP_CLK_REQ_PEROUT || !wx->ptp_setup_sdp)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ if (rq->perout.phase.sec || rq->perout.phase.nsec) {
+ wx_err(wx, "Absolute start time not supported.\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.period.sec != 1 || rq->perout.period.nsec) {
+ wx_err(wx, "Only 1pps is supported.\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on;
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+ wx->pps_width = timespec64_to_ns(&ts_on);
+ } else {
+ wx->pps_width = 120000000;
+ }
+
+ if (on)
+ set_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+ else
+ clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+
+ return wx->ptp_setup_sdp(wx);
+}
+
+void wx_ptp_check_pps_event(struct wx *wx)
+{
+ u32 tsauxc, int_status;
+
+ /* this check is necessary in case the interrupt was enabled via some
+ * alternative means (ex. debug_fs). Better to check here than
+ * everywhere that calls this function.
+ */
+ if (!wx->ptp_clock)
+ return;
+
+ int_status = rd32ptp(wx, WX_TSC_1588_INT_ST);
+ if (int_status & WX_TSC_1588_INT_ST_TT1) {
+ /* disable the pin first */
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0);
+ WX_WRITE_FLUSH(wx);
+
+ wx_ptp_trigger_calc(wx);
+
+ tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 |
+ WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0;
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32));
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32));
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc);
+ WX_WRITE_FLUSH(wx);
+ }
+}
+EXPORT_SYMBOL(wx_ptp_check_pps_event);
+
+static long wx_ptp_create_clock(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ long err;
+
+ /* do nothing if we already have a clock device */
+ if (!IS_ERR_OR_NULL(wx->ptp_clock))
+ return 0;
+
+ snprintf(wx->ptp_caps.name, sizeof(wx->ptp_caps.name),
+ "%s", netdev->name);
+ wx->ptp_caps.owner = THIS_MODULE;
+ wx->ptp_caps.n_alarm = 0;
+ wx->ptp_caps.n_ext_ts = 0;
+ wx->ptp_caps.pps = 0;
+ wx->ptp_caps.adjfine = wx_ptp_adjfine;
+ wx->ptp_caps.adjtime = wx_ptp_adjtime;
+ wx->ptp_caps.gettimex64 = wx_ptp_gettimex64;
+ wx->ptp_caps.settime64 = wx_ptp_settime64;
+ wx->ptp_caps.do_aux_work = wx_ptp_do_aux_work;
+ if (wx->mac.type == wx_mac_em) {
+ wx->ptp_caps.max_adj = 500000000;
+ wx->ptp_caps.n_per_out = 1;
+ wx->ptp_setup_sdp = wx_ptp_setup_sdp;
+ wx->ptp_caps.enable = wx_ptp_feature_enable;
+ } else {
+ wx->ptp_caps.max_adj = 250000000;
+ wx->ptp_caps.n_per_out = 0;
+ wx->ptp_setup_sdp = NULL;
+ }
+
+ wx->ptp_clock = ptp_clock_register(&wx->ptp_caps, &wx->pdev->dev);
+ if (IS_ERR(wx->ptp_clock)) {
+ err = PTR_ERR(wx->ptp_clock);
+ wx->ptp_clock = NULL;
+ wx_err(wx, "ptp clock register failed\n");
+ return err;
+ } else if (wx->ptp_clock) {
+ dev_info(&wx->pdev->dev, "registered PHC device on %s\n",
+ netdev->name);
+ }
+
+ /* Set the default timestamp mode to disabled here. We do this in
+ * create_clock instead of initialization, because we don't want to
+ * override the previous settings during a suspend/resume cycle.
+ */
+ wx->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ wx->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ return 0;
+}
+
+static int wx_ptp_set_timestamp_mode(struct wx *wx,
+ struct kernel_hwtstamp_config *config)
+{
+ u32 tsync_tx_ctl = WX_TSC_1588_CTL_ENABLED;
+ u32 tsync_rx_ctl = WX_PSR_1588_CTL_ENABLED;
+ DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS);
+ u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
+ bool is_l2 = false;
+ u32 regval;
+
+ memcpy(flags, wx->flags, sizeof(wx->flags));
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ tsync_rx_mtrl = 0;
+ clear_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ clear_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1;
+ tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_SYNC;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1;
+ tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_DELAY_REQ;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_EVENT_V2;
+ is_l2 = true;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ default:
+ /* register PSR_1588_MSG must be set in order to do V1 packets,
+ * therefore it is not possible to time stamp both V1 Sync and
+ * Delay_Req messages unless hardware supports timestamping all
+ * packets => return error
+ */
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ /* define ethertype filter for timestamping L2 packets */
+ if (is_l2)
+ wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588),
+ (WX_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */
+ WX_PSR_ETYPE_SWC_1588 | /* enable timestamping */
+ ETH_P_1588)); /* 1588 eth protocol type */
+ else
+ wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588), 0);
+
+ /* enable/disable TX */
+ regval = rd32ptp(wx, WX_TSC_1588_CTL);
+ regval &= ~WX_TSC_1588_CTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ wr32ptp(wx, WX_TSC_1588_CTL, regval);
+
+ /* enable/disable RX */
+ regval = rd32(wx, WX_PSR_1588_CTL);
+ regval &= ~(WX_PSR_1588_CTL_ENABLED | WX_PSR_1588_CTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ wr32(wx, WX_PSR_1588_CTL, regval);
+
+ /* define which PTP packets are time stamped */
+ wr32(wx, WX_PSR_1588_MSG, tsync_rx_mtrl);
+
+ WX_WRITE_FLUSH(wx);
+
+ /* configure adapter flags only when HW is actually configured */
+ memcpy(wx->flags, flags, sizeof(wx->flags));
+
+ /* clear TX/RX timestamp state, just to be sure */
+ wx_ptp_clear_tx_timestamp(wx);
+ rd32(wx, WX_PSR_1588_STMPH);
+
+ return 0;
+}
+
+static u64 wx_ptp_read(const struct cyclecounter *hw_cc)
+{
+ struct wx *wx = container_of(hw_cc, struct wx, hw_cc);
+
+ return wx_ptp_readtime(wx, NULL);
+}
+
+static void wx_ptp_link_speed_adjust(struct wx *wx, u32 *shift, u32 *incval)
+{
+ if (wx->mac.type == wx_mac_em) {
+ *shift = WX_INCVAL_SHIFT_EM;
+ *incval = WX_INCVAL_EM;
+ return;
+ }
+
+ switch (wx->speed) {
+ case SPEED_10:
+ *shift = WX_INCVAL_SHIFT_10;
+ *incval = WX_INCVAL_10;
+ break;
+ case SPEED_100:
+ *shift = WX_INCVAL_SHIFT_100;
+ *incval = WX_INCVAL_100;
+ break;
+ case SPEED_1000:
+ *shift = WX_INCVAL_SHIFT_1GB;
+ *incval = WX_INCVAL_1GB;
+ break;
+ case SPEED_10000:
+ default:
+ *shift = WX_INCVAL_SHIFT_10GB;
+ *incval = WX_INCVAL_10GB;
+ break;
+ }
+}
+
+/**
+ * wx_ptp_reset_cyclecounter - create the cycle counter from hw
+ * @wx: pointer to the wx structure
+ *
+ * This function should be called to set the proper values for the TSC_1588_INC
+ * register and tell the cyclecounter structure what the tick rate of SYSTIME
+ * is. It does not directly modify SYSTIME registers or the timecounter
+ * structure. It should be called whenever a new TSC_1588_INC value is
+ * necessary, such as during initialization or when the link speed changes.
+ */
+void wx_ptp_reset_cyclecounter(struct wx *wx)
+{
+ u32 incval = 0, mask = 0;
+ struct cyclecounter cc;
+ unsigned long flags;
+
+ /* For some of the boards below this mask is technically incorrect.
+ * The timestamp mask overflows at approximately 61bits. However the
+ * particular hardware does not overflow on an even bitmask value.
+ * Instead, it overflows due to conversion of upper 32bits billions of
+ * cycles. Timecounters are not really intended for this purpose so
+ * they do not properly function if the overflow point isn't 2^N-1.
+ * However, the actual SYSTIME values in question take ~138 years to
+ * overflow. In practice this means they won't actually overflow. A
+ * proper fix to this problem would require modification of the
+ * timecounter delta calculations.
+ */
+ cc.mask = CLOCKSOURCE_MASK(64);
+ cc.mult = 1;
+ cc.shift = 0;
+
+ cc.read = wx_ptp_read;
+ wx_ptp_link_speed_adjust(wx, &cc.shift, &incval);
+
+ /* update the base incval used to calculate frequency adjustment */
+ WRITE_ONCE(wx->base_incval, incval);
+
+ mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF;
+ incval &= mask;
+ if (wx->mac.type != wx_mac_em)
+ incval |= 2 << 24;
+ wr32ptp(wx, WX_TSC_1588_INC, incval);
+
+ smp_mb(); /* Force the above update. */
+
+ /* need lock to prevent incorrect read while modifying cyclecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ memcpy(&wx->hw_cc, &cc, sizeof(wx->hw_cc));
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+}
+EXPORT_SYMBOL(wx_ptp_reset_cyclecounter);
+
+void wx_ptp_reset(struct wx *wx)
+{
+ unsigned long flags;
+
+ /* reset the hardware timestamping mode */
+ wx_ptp_set_timestamp_mode(wx, &wx->tstamp_config);
+ wx_ptp_reset_cyclecounter(wx);
+
+ wr32ptp(wx, WX_TSC_1588_SYSTIML, 0);
+ wr32ptp(wx, WX_TSC_1588_SYSTIMH, 0);
+ WX_WRITE_FLUSH(wx);
+
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_init(&wx->hw_tc, &wx->hw_cc,
+ ktime_to_ns(ktime_get_real()));
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ wx->last_overflow_check = jiffies;
+ ptp_schedule_worker(wx->ptp_clock, HZ);
+
+ /* Now that the shift has been calculated and the systime
+ * registers reset, (re-)enable the Clock out feature
+ */
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+}
+EXPORT_SYMBOL(wx_ptp_reset);
+
+void wx_ptp_init(struct wx *wx)
+{
+ /* Initialize the seqlock_t first, since the user might call the clock
+ * functions any time after we've initialized the ptp clock device.
+ */
+ seqlock_init(&wx->hw_tc_lock);
+
+ /* obtain a ptp clock device, or re-use an existing device */
+ if (wx_ptp_create_clock(wx))
+ return;
+
+ wx->tx_hwtstamp_pkts = 0;
+ wx->tx_hwtstamp_timeouts = 0;
+ wx->tx_hwtstamp_skipped = 0;
+ wx->tx_hwtstamp_errors = 0;
+ wx->rx_hwtstamp_cleared = 0;
+ /* reset the ptp related hardware bits */
+ wx_ptp_reset(wx);
+
+ /* enter the WX_STATE_PTP_RUNNING state */
+ set_bit(WX_STATE_PTP_RUNNING, wx->state);
+}
+EXPORT_SYMBOL(wx_ptp_init);
+
+/**
+ * wx_ptp_suspend - stop ptp work items
+ * @wx: pointer to wx struct
+ *
+ * This function suspends ptp activity, and prevents more work from being
+ * generated, but does not destroy the clock device.
+ */
+void wx_ptp_suspend(struct wx *wx)
+{
+ /* leave the WX_STATE_PTP_RUNNING STATE */
+ if (!test_and_clear_bit(WX_STATE_PTP_RUNNING, wx->state))
+ return;
+
+ clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ wx_ptp_clear_tx_timestamp(wx);
+}
+EXPORT_SYMBOL(wx_ptp_suspend);
+
+/**
+ * wx_ptp_stop - destroy the ptp_clock device
+ * @wx: pointer to wx struct
+ *
+ * Completely destroy the ptp_clock device, and disable all PTP related
+ * features. Intended to be run when the device is being closed.
+ */
+void wx_ptp_stop(struct wx *wx)
+{
+ /* first, suspend ptp activity */
+ wx_ptp_suspend(wx);
+
+ /* now destroy the ptp clock device */
+ if (wx->ptp_clock) {
+ ptp_clock_unregister(wx->ptp_clock);
+ wx->ptp_clock = NULL;
+ dev_info(&wx->pdev->dev, "removed PHC on %s\n", wx->netdev->name);
+ }
+}
+EXPORT_SYMBOL(wx_ptp_stop);
+
+/**
+ * wx_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @wx: pointer to wx struct
+ * @skb: particular skb to send timestamp with
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb)
+{
+ u64 regval = 0;
+ u32 tsyncrxctl;
+
+ /* Read the tsyncrxctl register afterwards in order to prevent taking an
+ * I/O hit on every packet.
+ */
+ tsyncrxctl = rd32(wx, WX_PSR_1588_CTL);
+ if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID))
+ return;
+
+ regval |= (u64)rd32(wx, WX_PSR_1588_STMPL);
+ regval |= (u64)rd32(wx, WX_PSR_1588_STMPH) << 32;
+
+ wx_ptp_convert_to_hwtstamp(wx, skb_hwtstamps(skb), regval);
+}
+
+int wx_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ *cfg = wx->tstamp_config;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_hwtstamp_get);
+
+int wx_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(dev);
+ int err;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ err = wx_ptp_set_timestamp_mode(wx, cfg);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&wx->tstamp_config, cfg, sizeof(wx->tstamp_config));
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_hwtstamp_set);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.h b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h
new file mode 100644
index 000000000000..50db90a6e3ee
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_PTP_H_
+#define _WX_PTP_H_
+
+void wx_ptp_check_pps_event(struct wx *wx);
+void wx_ptp_reset_cyclecounter(struct wx *wx);
+void wx_ptp_reset(struct wx *wx);
+void wx_ptp_init(struct wx *wx);
+void wx_ptp_suspend(struct wx *wx);
+void wx_ptp_stop(struct wx *wx);
+void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb);
+int wx_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg);
+int wx_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+
+#endif /* _WX_PTP_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index b54bffda027b..5b230ecbbabb 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -4,6 +4,8 @@
#ifndef _WX_TYPE_H_
#define _WX_TYPE_H_
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#include <linux/bitfield.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
@@ -180,6 +182,23 @@
#define WX_PSR_VLAN_CTL 0x15088
#define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */
#define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */
+/* EType Queue Filter */
+#define WX_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4))
+#define WX_PSR_ETYPE_SWC_FILTER_1588 3
+#define WX_PSR_ETYPE_SWC_FILTER_EN BIT(31)
+#define WX_PSR_ETYPE_SWC_1588 BIT(30)
+/* 1588 */
+#define WX_PSR_1588_MSG 0x15120
+#define WX_PSR_1588_MSG_V1_SYNC FIELD_PREP(GENMASK(7, 0), 0)
+#define WX_PSR_1588_MSG_V1_DELAY_REQ FIELD_PREP(GENMASK(7, 0), 1)
+#define WX_PSR_1588_STMPL 0x151E8
+#define WX_PSR_1588_STMPH 0x151A4
+#define WX_PSR_1588_CTL 0x15188
+#define WX_PSR_1588_CTL_ENABLED BIT(4)
+#define WX_PSR_1588_CTL_TYPE_MASK GENMASK(3, 1)
+#define WX_PSR_1588_CTL_TYPE_L4_V1 FIELD_PREP(GENMASK(3, 1), 1)
+#define WX_PSR_1588_CTL_TYPE_EVENT_V2 FIELD_PREP(GENMASK(3, 1), 5)
+#define WX_PSR_1588_CTL_VALID BIT(0)
/* mcasst/ucast overflow tbl */
#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
@@ -253,6 +272,32 @@
#define WX_TSC_ST_SECTX_RDY BIT(0)
#define WX_TSC_BUF_AE 0x1D00C
#define WX_TSC_BUF_AE_THR GENMASK(9, 0)
+/* 1588 */
+#define WX_TSC_1588_CTL 0x11F00
+#define WX_TSC_1588_CTL_ENABLED BIT(4)
+#define WX_TSC_1588_CTL_VALID BIT(0)
+#define WX_TSC_1588_STMPL 0x11F04
+#define WX_TSC_1588_STMPH 0x11F08
+#define WX_TSC_1588_SYSTIML 0x11F0C
+#define WX_TSC_1588_SYSTIMH 0x11F10
+#define WX_TSC_1588_INC 0x11F14
+#define WX_TSC_1588_INT_ST 0x11F20
+#define WX_TSC_1588_INT_ST_TT1 BIT(5)
+#define WX_TSC_1588_INT_EN 0x11F24
+#define WX_TSC_1588_INT_EN_TT1 BIT(5)
+#define WX_TSC_1588_AUX_CTL 0x11F28
+#define WX_TSC_1588_AUX_CTL_EN_TS0 BIT(8)
+#define WX_TSC_1588_AUX_CTL_EN_TT1 BIT(2)
+#define WX_TSC_1588_AUX_CTL_PLSG BIT(1)
+#define WX_TSC_1588_AUX_CTL_EN_TT0 BIT(0)
+#define WX_TSC_1588_TRGT_L(i) (0x11F2C + ((i) * 8)) /* [0,1] */
+#define WX_TSC_1588_TRGT_H(i) (0x11F30 + ((i) * 8)) /* [0,1] */
+#define WX_TSC_1588_SDP(i) (0x11F5C + ((i) * 4)) /* [0,3] */
+#define WX_TSC_1588_SDP_OUT_LEVEL_H FIELD_PREP(BIT(4), 0)
+#define WX_TSC_1588_SDP_OUT_LEVEL_L FIELD_PREP(BIT(4), 1)
+#define WX_TSC_1588_SDP_FUN_SEL_MASK GENMASK(2, 0)
+#define WX_TSC_1588_SDP_FUN_SEL_TT0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 1)
+#define WX_TSC_1588_SDP_FUN_SEL_TS0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 5)
/************************************** MNG ********************************/
#define WX_MNG_SWFW_SYNC 0x1E008
@@ -264,6 +309,10 @@
#define WX_MNG_MBOX_CTL_FWRDY BIT(2)
#define WX_MNG_BMC2OS_CNT 0x1E090
#define WX_MNG_OS2BMC_CNT 0x1E094
+#define WX_SW2FW_MBOX_CMD 0x1E0A0
+#define WX_SW2FW_MBOX_CMD_VLD BIT(31)
+#define WX_SW2FW_MBOX 0x1E200
+#define WX_FW2SW_MBOX 0x1E300
/************************************* ETH MAC *****************************/
#define WX_MAC_TX_CFG 0x11000
@@ -327,6 +376,7 @@ enum WX_MSCA_CMD_value {
#define WX_12K_ITR 336
#define WX_20K_ITR 200
#define WX_SP_MAX_EITR 0x00000FF8U
+#define WX_AML_MAX_EITR 0x00000FFFU
#define WX_EM_MAX_EITR 0x00007FFCU
/* transmit DMA Registers */
@@ -370,6 +420,7 @@ enum WX_MSCA_CMD_value {
/****************** Manageablility Host Interface defines ********************/
#define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */
#define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */
+#define WX_HIC_HDR_INDEX_MAX 255
#define FW_READ_SHADOW_RAM_CMD 0x31
#define FW_READ_SHADOW_RAM_LEN 0x6
@@ -382,6 +433,8 @@ enum WX_MSCA_CMD_value {
#define FW_CEM_CMD_RESERVED 0X0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_PPS_SET_CMD 0xF6
+#define FW_PPS_SET_LEN 0x14
#define WX_SW_REGION_PTR 0x1C
@@ -460,6 +513,7 @@ enum WX_MSCA_CMD_value {
#define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */
#define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */
#define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/
+#define WX_RXD_STAT_TS BIT(14) /* IEEE1588 Time Stamp */
#define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */
#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */
@@ -663,21 +717,30 @@ struct wx_hic_hdr {
u8 cmd_resv;
u8 ret_status;
} cmd_or_resp;
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
struct wx_hic_hdr2_req {
u8 cmd;
u8 buf_lenh;
u8 buf_lenl;
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
struct wx_hic_hdr2_rsp {
u8 cmd;
u8 buf_lenl;
u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
union wx_hic_hdr2 {
@@ -701,6 +764,15 @@ struct wx_hic_reset {
u16 reset_type;
};
+struct wx_hic_set_pps {
+ struct wx_hic_hdr hdr;
+ u8 lan_id;
+ u8 enable;
+ u16 pad2;
+ u64 nsec;
+ u64 cycles;
+};
+
/* Bus parameters */
struct wx_bus_info {
u8 func;
@@ -716,7 +788,8 @@ struct wx_thermal_sensor_data {
enum wx_mac_type {
wx_mac_unknown = 0,
wx_mac_sp,
- wx_mac_em
+ wx_mac_em,
+ wx_mac_aml,
};
enum sp_media_type {
@@ -863,6 +936,7 @@ struct wx_tx_context_desc {
*/
struct wx_tx_buffer {
union wx_tx_desc *next_to_watch;
+ unsigned long time_stamp;
struct sk_buff *skb;
unsigned int bytecount;
unsigned short gso_segs;
@@ -924,6 +998,7 @@ struct wx_ring {
unsigned int size; /* length in bytes */
u16 count; /* amount of descriptors */
+ unsigned long last_rx_timestamp;
u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets
@@ -1026,13 +1101,21 @@ struct wx_hw_stats {
enum wx_state {
WX_STATE_RESETTING,
- WX_STATE_NBITS, /* must be last */
+ WX_STATE_SWFW_BUSY,
+ WX_STATE_PTP_RUNNING,
+ WX_STATE_PTP_TX_IN_PROGRESS,
+ WX_STATE_NBITS /* must be last */
};
enum wx_pf_flags {
+ WX_FLAG_SWFW_RING,
WX_FLAG_FDIR_CAPABLE,
WX_FLAG_FDIR_HASH,
WX_FLAG_FDIR_PERFECT,
+ WX_FLAG_RSC_CAPABLE,
+ WX_FLAG_RX_HWTSTAMP_ENABLED,
+ WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
+ WX_FLAG_PTP_PPS_ENABLED,
WX_PF_FLAGS_NBITS /* must be last */
};
@@ -1066,6 +1149,7 @@ struct wx {
char eeprom_id[32];
char *driver_name;
enum wx_reset_type reset_type;
+ u8 swfw_index;
/* PHY stuff */
unsigned int link;
@@ -1133,6 +1217,29 @@ struct wx {
void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype);
void (*configure_fdir)(struct wx *wx);
void (*do_reset)(struct net_device *netdev);
+ int (*ptp_setup_sdp)(struct wx *wx);
+
+ bool pps_enabled;
+ u64 pps_width;
+ u64 pps_edge_start;
+ u64 pps_edge_end;
+ u64 sec_to_cc;
+ u32 base_incval;
+ u32 tx_hwtstamp_pkts;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
+ u32 tx_hwtstamp_errors;
+ u32 rx_hwtstamp_cleared;
+ unsigned long last_overflow_check;
+ unsigned long last_rx_ptp_check;
+ unsigned long ptp_tx_start;
+ seqlock_t hw_tc_lock; /* seqlock for ptp */
+ struct cyclecounter hw_cc;
+ struct timecounter hw_tc;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct kernel_hwtstamp_config tstamp_config;
+ struct sk_buff *ptp_tx_skb;
};
#define WX_INTR_ALL (~0ULL)
@@ -1177,6 +1284,24 @@ rd64(struct wx *wx, u32 reg)
return (lsb | msb << 32);
}
+static inline u32
+rd32ptp(struct wx *wx, u32 reg)
+{
+ if (wx->mac.type == wx_mac_em)
+ return rd32(wx, reg);
+
+ return rd32(wx, reg + 0xB500);
+}
+
+static inline void
+wr32ptp(struct wx *wx, u32 reg, u32 value)
+{
+ if (wx->mac.type == wx_mac_em)
+ return wr32(wx, reg, value);
+
+ return wr32(wx, reg + 0xB500, value);
+}
+
/* On some domestic CPU platforms, sometimes IO is not synchronized with
* flushing memory, here use readl() to flush PCI read and write.
*/
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index e868f7ef4920..7e2d9ec38a30 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -138,6 +138,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = {
.set_channels = ngbe_set_channels,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
+ .get_ts_info = wx_get_ts_info,
+ .get_ts_stats = wx_get_ptp_stats,
};
void ngbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 53aeae2f884b..a6159214ec0a 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -14,6 +14,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_hw.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
#include "ngbe_hw.h"
@@ -167,7 +168,7 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
struct wx_q_vector *q_vector;
struct wx *wx = data;
struct pci_dev *pdev;
- u32 eicr;
+ u32 eicr, eicr_misc;
q_vector = wx->q_vector[0];
pdev = wx->pdev;
@@ -185,6 +186,10 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
if (!(pdev->msi_enabled))
wr32(wx, WX_PX_INTA, 1);
+ eicr_misc = wx_misc_isb(wx, WX_ISB_MISC);
+ if (unlikely(eicr_misc & NGBE_PX_MISC_IC_TIMESYNC))
+ wx_ptp_check_pps_event(wx);
+
wx->isb_mem[WX_ISB_MISC] = 0;
/* would disable interrupts here but it is auto disabled */
napi_schedule_irqoff(&q_vector->napi);
@@ -198,6 +203,12 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
{
struct wx *wx = data;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+
+ if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC))
+ wx_ptp_check_pps_event(wx);
/* re-enable the original interrupt state, no lsc, no queues */
if (netif_running(wx->netdev))
@@ -317,6 +328,8 @@ void ngbe_down(struct wx *wx)
{
phylink_stop(wx->phylink);
ngbe_disable_device(wx);
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset(wx);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
@@ -379,6 +392,8 @@ static int ngbe_open(struct net_device *netdev)
if (err)
goto err_dis_phy;
+ wx_ptp_init(wx);
+
ngbe_up(wx);
return 0;
@@ -407,6 +422,7 @@ static int ngbe_close(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ wx_ptp_stop(wx);
ngbe_down(wx);
wx_free_irq(wx);
wx_free_isb_resources(wx);
@@ -507,6 +523,8 @@ static const struct net_device_ops ngbe_netdev_ops = {
.ndo_get_stats64 = wx_get_stats64,
.ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = wx_hwtstamp_set,
+ .ndo_hwtstamp_get = wx_hwtstamp_get,
};
/**
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index a5e9b779c44d..ea1d7e9a91f3 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -7,6 +7,7 @@
#include <linux/phy.h>
#include "../libwx/wx_type.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
@@ -64,6 +65,11 @@ static void ngbe_mac_config(struct phylink_config *config, unsigned int mode,
static void ngbe_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
+ struct wx *wx = phylink_to_wx(config);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
}
static void ngbe_mac_link_up(struct phylink_config *config,
@@ -103,6 +109,11 @@ static void ngbe_mac_link_up(struct phylink_config *config,
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
}
static const struct phylink_mac_ops ngbe_mac_ops = {
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index f48ed7fc1805..992adbb98c7d 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -70,15 +70,20 @@
/* Extended Interrupt Enable Set */
#define NGBE_PX_MISC_IEN_DEV_RST BIT(10)
+#define NGBE_PX_MISC_IEN_TIMESYNC BIT(11)
#define NGBE_PX_MISC_IEN_ETH_LK BIT(18)
#define NGBE_PX_MISC_IEN_INT_ERR BIT(20)
#define NGBE_PX_MISC_IEN_GPIO BIT(26)
#define NGBE_PX_MISC_IEN_MASK ( \
NGBE_PX_MISC_IEN_DEV_RST | \
+ NGBE_PX_MISC_IEN_TIMESYNC | \
NGBE_PX_MISC_IEN_ETH_LK | \
NGBE_PX_MISC_IEN_INT_ERR | \
NGBE_PX_MISC_IEN_GPIO)
+/* Extended Interrupt Cause Read */
+#define NGBE_PX_MISC_IC_TIMESYNC BIT(11) /* time sync */
+
#define NGBE_INTR_ALL 0x1FF
#define NGBE_INTR_MISC BIT(0)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index d98314b26c19..78999d484f18 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -529,6 +529,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.set_rxnfc = txgbe_set_rxnfc,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
+ .get_ts_info = wx_get_ts_info,
+ .get_ts_stats = wx_get_ptp_stats,
};
void txgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index cd1372da92a9..4b9921b7bb11 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -197,6 +197,12 @@ int txgbe_reset_hw(struct wx *wx)
txgbe_reset_misc(wx);
+ if (wx->mac.type != wx_mac_sp) {
+ wr32(wx, TXGBE_PX_PF_BME, 0x1);
+ wr32m(wx, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CTL,
+ TXGBE_RDM_RSC_CTL_FREE_CTL);
+ }
+
wx_clear_hw_cntrs(wx);
/* Store the permanent mac address */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index 0ee73a265545..8658a51ee810 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -166,6 +166,9 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe)
void txgbe_free_misc_irq(struct txgbe *txgbe)
{
+ if (txgbe->wx->mac.type == wx_mac_aml)
+ return;
+
free_irq(txgbe->link_irq, txgbe);
free_irq(txgbe->misc.irq, txgbe);
txgbe_del_irq_domain(txgbe);
@@ -177,6 +180,9 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int hwirq, err;
+ if (wx->mac.type == wx_mac_aml)
+ goto skip_sp_irq;
+
txgbe->misc.nirqs = 1;
txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
&txgbe_misc_irq_domain_ops, txgbe);
@@ -206,6 +212,7 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
if (err)
goto free_msic_irq;
+skip_sp_irq:
wx->misc_irq_domain = true;
return 0;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index f77450268036..a2e245e3b016 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -13,6 +13,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
@@ -34,6 +35,12 @@ char txgbe_driver_name[] = "txgbe";
static const struct pci_device_id txgbe_pci_tbl[] = {
{ PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0},
{ PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5010), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5110), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5025), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5125), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5040), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5140), 0},
/* required last entry */
{ .device = 0 }
};
@@ -89,7 +96,18 @@ static void txgbe_up_complete(struct wx *wx)
smp_mb__before_atomic();
wx_napi_enable_all(wx);
- phylink_start(wx->phylink);
+ if (wx->mac.type == wx_mac_aml) {
+ u32 reg;
+
+ reg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
+ reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
+ reg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G;
+ wr32(wx, WX_MAC_TX_CFG, reg);
+ txgbe_enable_sec_tx_path(wx);
+ netif_carrier_on(wx->netdev);
+ } else {
+ phylink_start(wx->phylink);
+ }
/* clear any pending interrupts, may auto mask */
rd32(wx, WX_PX_IC(0));
@@ -116,6 +134,9 @@ static void txgbe_reset(struct wx *wx)
memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len);
wx_flush_sw_mac_table(wx);
wx_mac_set_default_filter(wx, old_addr);
+
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset(wx);
}
static void txgbe_disable_device(struct wx *wx)
@@ -167,7 +188,10 @@ void txgbe_down(struct wx *wx)
{
txgbe_disable_device(wx);
txgbe_reset(wx);
- phylink_stop(wx->phylink);
+ if (wx->mac.type == wx_mac_aml)
+ netif_carrier_off(wx->netdev);
+ else
+ phylink_stop(wx->phylink);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
@@ -176,6 +200,7 @@ void txgbe_down(struct wx *wx)
void txgbe_up(struct wx *wx)
{
wx_configure(wx);
+ wx_ptp_init(wx);
txgbe_up_complete(wx);
}
@@ -192,6 +217,14 @@ static void txgbe_init_type_code(struct wx *wx)
case TXGBE_DEV_ID_WX1820:
wx->mac.type = wx_mac_sp;
break;
+ case TXGBE_DEV_ID_AML5010:
+ case TXGBE_DEV_ID_AML5110:
+ case TXGBE_DEV_ID_AML5025:
+ case TXGBE_DEV_ID_AML5125:
+ case TXGBE_DEV_ID_AML5040:
+ case TXGBE_DEV_ID_AML5140:
+ wx->mac.type = wx_mac_aml;
+ break;
default:
wx->mac.type = wx_mac_unknown;
break;
@@ -265,6 +298,8 @@ static int txgbe_sw_init(struct wx *wx)
wx->atr = txgbe_atr;
wx->configure_fdir = txgbe_configure_fdir;
+ set_bit(WX_FLAG_RSC_CAPABLE, wx->flags);
+
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
@@ -279,6 +314,17 @@ static int txgbe_sw_init(struct wx *wx)
wx->do_reset = txgbe_do_reset;
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ break;
+ case wx_mac_aml:
+ set_bit(WX_FLAG_SWFW_RING, wx->flags);
+ wx->swfw_index = 0;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
@@ -321,6 +367,8 @@ static int txgbe_open(struct net_device *netdev)
if (err)
goto err_free_irq;
+ wx_ptp_init(wx);
+
txgbe_up_complete(wx);
return 0;
@@ -344,6 +392,7 @@ err_reset:
*/
static void txgbe_close_suspend(struct wx *wx)
{
+ wx_ptp_suspend(wx);
txgbe_disable_device(wx);
wx_free_resources(wx);
}
@@ -363,6 +412,7 @@ static int txgbe_close(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ wx_ptp_stop(wx);
txgbe_down(wx);
wx_free_irq(wx);
wx_free_resources(wx);
@@ -479,6 +529,8 @@ static const struct net_device_ops txgbe_netdev_ops = {
.ndo_get_stats64 = wx_get_stats64,
.ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = wx_hwtstamp_set,
+ .ndo_hwtstamp_get = wx_hwtstamp_get,
};
/**
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 1ae68f94dd49..85f022ceef4f 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -15,6 +15,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
#include "txgbe_phy.h"
@@ -179,6 +180,10 @@ static void txgbe_mac_link_down(struct phylink_config *config,
struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
}
static void txgbe_mac_link_up(struct phylink_config *config,
@@ -215,6 +220,11 @@ static void txgbe_mac_link_up(struct phylink_config *config,
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
wdg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, wdg);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
}
static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
@@ -557,6 +567,9 @@ int txgbe_init_phy(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int ret;
+ if (wx->mac.type == wx_mac_aml)
+ return 0;
+
if (txgbe->wx->media_type == sp_media_copper)
return txgbe_ext_phy_init(txgbe);
@@ -621,6 +634,9 @@ err_unregister_swnode:
void txgbe_remove_phy(struct txgbe *txgbe)
{
+ if (txgbe->wx->mac.type == wx_mac_aml)
+ return;
+
if (txgbe->wx->media_type == sp_media_copper) {
phylink_disconnect_phy(txgbe->wx->phylink);
phylink_destroy(txgbe->wx->phylink);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 629a13e96b85..9c1c26234cad 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -10,6 +10,12 @@
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
#define TXGBE_DEV_ID_WX1820 0x2001
+#define TXGBE_DEV_ID_AML5010 0x5010
+#define TXGBE_DEV_ID_AML5110 0x5110
+#define TXGBE_DEV_ID_AML5025 0x5025
+#define TXGBE_DEV_ID_AML5125 0x5125
+#define TXGBE_DEV_ID_AML5040 0x5040
+#define TXGBE_DEV_ID_AML5140 0x5140
/* Subsystem IDs */
/* SFP */
@@ -137,6 +143,14 @@
#define TXGBE_RDB_FDIR_FLEX_CFG_MSK BIT(2)
#define TXGBE_RDB_FDIR_FLEX_CFG_OFST(v) FIELD_PREP(GENMASK(7, 3), v)
+/*************************** Amber Lite Registers ****************************/
+#define TXGBE_PX_PF_BME 0x4B8
+#define TXGBE_AML_MAC_TX_CFG 0x11000
+#define TXGBE_AML_MAC_TX_CFG_SPEED_MASK GENMASK(30, 27)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_25G BIT(28)
+#define TXGBE_RDM_RSC_CTL 0x1200C
+#define TXGBE_RDM_RSC_CTL_FREE_CTL BIT(7)
+
/* Checksum and EEPROM pointers */
#define TXGBE_EEPROM_LAST_WORD 0x800
#define TXGBE_EEPROM_CHECKSUM 0x2F
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 35d96c633a33..7502214cc7d5 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -28,6 +28,7 @@ config XILINX_AXI_EMAC
depends on HAS_IOMEM
depends on XILINX_DMA
select PHYLINK
+ select DIMLIB
help
This driver supports the 10/100/1000 Ethernet from Xilinx for the
AXI bus interface used in Xilinx Virtex FPGAs and Soc's.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index a3f4f3e42587..5ff742103beb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -9,6 +9,7 @@
#ifndef XILINX_AXIENET_H
#define XILINX_AXIENET_H
+#include <linux/dim.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -112,9 +113,6 @@
#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
-#define XAXIDMA_DELAY_SHIFT 24
-#define XAXIDMA_COALESCE_SHIFT 16
-
#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
@@ -126,8 +124,7 @@
/* Default TX/RX Threshold and delay timer values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
#define XAXIDMA_DFT_TX_USEC 50
-#define XAXIDMA_DFT_RX_THRESHOLD 1
-#define XAXIDMA_DFT_RX_USEC 50
+#define XAXIDMA_DFT_RX_USEC 16
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
@@ -487,7 +484,12 @@ struct skbuf_dma_descriptor {
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
* @napi_rx: NAPI RX control structure
+ * @rx_dim: DIM state for the receive queue
+ * @rx_dim_enabled: Whether DIM is enabled or not
+ * @rx_irqs: Number of interrupts
+ * @rx_cr_lock: Lock protecting @rx_dma_cr, its register, and @rx_dma_started
* @rx_dma_cr: Nominal content of RX DMA control register
+ * @rx_dma_started: Set when RX DMA is started
* @rx_bd_v: Virtual address of the RX buffer descriptor ring
* @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
* @rx_bd_num: Size of RX buffer descriptor ring
@@ -497,7 +499,9 @@ struct skbuf_dma_descriptor {
* @rx_bytes: RX byte count for statistics
* @rx_stat_sync: Synchronization object for RX stats
* @napi_tx: NAPI TX control structure
+ * @tx_cr_lock: Lock protecting @tx_dma_cr, its register, and @tx_dma_started
* @tx_dma_cr: Nominal content of TX DMA control register
+ * @tx_dma_started: Set when TX DMA is started
* @tx_bd_v: Virtual address of the TX buffer descriptor ring
* @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
* @tx_bd_num: Size of TX buffer descriptor ring
@@ -532,10 +536,6 @@ struct skbuf_dma_descriptor {
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
* @rxmem: Stores rx memory size for jumbo frame handling.
- * @coalesce_count_rx: Store the irq coalesce on RX side.
- * @coalesce_usec_rx: IRQ coalesce delay for RX
- * @coalesce_count_tx: Store the irq coalesce on TX side.
- * @coalesce_usec_tx: IRQ coalesce delay for TX
* @use_dmaengine: flag to check dmaengine framework usage.
* @tx_chan: TX DMA channel.
* @rx_chan: RX DMA channel.
@@ -569,7 +569,12 @@ struct axienet_local {
void __iomem *dma_regs;
struct napi_struct napi_rx;
+ struct dim rx_dim;
+ bool rx_dim_enabled;
+ u16 rx_irqs;
+ spinlock_t rx_cr_lock;
u32 rx_dma_cr;
+ bool rx_dma_started;
struct axidma_bd *rx_bd_v;
dma_addr_t rx_bd_p;
u32 rx_bd_num;
@@ -579,7 +584,9 @@ struct axienet_local {
struct u64_stats_sync rx_stat_sync;
struct napi_struct napi_tx;
+ spinlock_t tx_cr_lock;
u32 tx_dma_cr;
+ bool tx_dma_started;
struct axidma_bd *tx_bd_v;
dma_addr_t tx_bd_p;
u32 tx_bd_num;
@@ -610,10 +617,6 @@ struct axienet_local {
u32 max_frm_size;
u32 rxmem;
- u32 coalesce_count_rx;
- u32 coalesce_usec_rx;
- u32 coalesce_count_tx;
- u32 coalesce_usec_tx;
u8 use_dmaengine;
struct dma_chan *tx_chan;
struct dma_chan *rx_chan;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index f33178f90c42..054abf283ab3 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -223,23 +223,62 @@ static void axienet_dma_bd_release(struct net_device *ndev)
lp->rx_bd_p);
}
+static u64 axienet_dma_rate(struct axienet_local *lp)
+{
+ if (lp->axi_clk)
+ return clk_get_rate(lp->axi_clk);
+ return 125000000; /* arbitrary guess if no clock rate set */
+}
+
/**
- * axienet_usec_to_timer - Calculate IRQ delay timer value
- * @lp: Pointer to the axienet_local structure
- * @coalesce_usec: Microseconds to convert into timer value
+ * axienet_calc_cr() - Calculate control register value
+ * @lp: Device private data
+ * @count: Number of completions before an interrupt
+ * @usec: Microseconds after the last completion before an interrupt
+ *
+ * Calculate a control register value based on the coalescing settings. The
+ * run/stop bit is not set.
*/
-static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
+static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
{
- u32 result;
- u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
+ u32 cr;
- if (lp->axi_clk)
- clk_rate = clk_get_rate(lp->axi_clk);
+ cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK |
+ XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (count > 1) {
+ u64 clk_rate = axienet_dma_rate(lp);
+ u32 timer;
+
+ /* 1 Timeout Interval = 125 * (clock period of SG clock) */
+ timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate,
+ XAXIDMA_DELAY_SCALE);
- /* 1 Timeout Interval = 125 * (clock period of SG clock) */
- result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
- XAXIDMA_DELAY_SCALE);
- return min(result, FIELD_MAX(XAXIDMA_DELAY_MASK));
+ timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK));
+ cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ }
+
+ return cr;
+}
+
+/**
+ * axienet_coalesce_params() - Extract coalesce parameters from the CR
+ * @lp: Device private data
+ * @cr: The control register to parse
+ * @count: Number of packets before an interrupt
+ * @usec: Idle time (in usec) before an interrupt
+ */
+static void axienet_coalesce_params(struct axienet_local *lp, u32 cr,
+ u32 *count, u32 *usec)
+{
+ u64 clk_rate = axienet_dma_rate(lp);
+ u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr);
+
+ *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr);
+ *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate);
}
/**
@@ -248,30 +287,12 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
*/
static void axienet_dma_start(struct axienet_local *lp)
{
+ spin_lock_irq(&lp->rx_cr_lock);
+
/* Start updating the Rx channel control register */
- lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
- XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
- /* Only set interrupt delay timer if not generating an interrupt on
- * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
- */
- if (lp->coalesce_count_rx > 1)
- lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
- << XAXIDMA_DELAY_SHIFT) |
- XAXIDMA_IRQ_DELAY_MASK;
+ lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
- /* Start updating the Tx channel control register */
- lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
- XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
- /* Only set interrupt delay timer if not generating an interrupt on
- * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
- */
- if (lp->coalesce_count_tx > 1)
- lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
- << XAXIDMA_DELAY_SHIFT) |
- XAXIDMA_IRQ_DELAY_MASK;
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
-
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
@@ -280,6 +301,14 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
(sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ lp->rx_dma_started = true;
+
+ spin_unlock_irq(&lp->rx_cr_lock);
+ spin_lock_irq(&lp->tx_cr_lock);
+
+ /* Start updating the Tx channel control register */
+ lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
@@ -288,6 +317,9 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ lp->tx_dma_started = true;
+
+ spin_unlock_irq(&lp->tx_cr_lock);
}
/**
@@ -623,14 +655,22 @@ static void axienet_dma_stop(struct axienet_local *lp)
int count;
u32 cr, sr;
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ spin_lock_irq(&lp->rx_cr_lock);
+
+ cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ lp->rx_dma_started = false;
+
+ spin_unlock_irq(&lp->rx_cr_lock);
synchronize_irq(lp->rx_irq);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ spin_lock_irq(&lp->tx_cr_lock);
+
+ cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ lp->tx_dma_started = false;
+
+ spin_unlock_irq(&lp->tx_cr_lock);
synchronize_irq(lp->tx_irq);
/* Give DMAs a chance to halt gracefully */
@@ -962,6 +1002,7 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
&size, budget);
if (packets) {
+ netdev_completed_queue(ndev, packets, size);
u64_stats_update_begin(&lp->tx_stat_sync);
u64_stats_add(&lp->tx_packets, packets);
u64_stats_add(&lp->tx_bytes, size);
@@ -979,7 +1020,9 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
* cause an immediate interrupt if any TX packets are
* already pending.
*/
+ spin_lock_irq(&lp->tx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ spin_unlock_irq(&lp->tx_cr_lock);
}
return packets;
}
@@ -1083,6 +1126,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++new_tail_ptr >= lp->tx_bd_num)
new_tail_ptr = 0;
WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
+ netdev_sent_queue(ndev, skb->len);
/* Start the transfer */
axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
@@ -1241,11 +1285,25 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
if (packets < budget && napi_complete_done(napi, packets)) {
+ if (READ_ONCE(lp->rx_dim_enabled)) {
+ struct dim_sample sample = {
+ .time = ktime_get(),
+ /* Safe because we are the only writer */
+ .pkt_ctr = u64_stats_read(&lp->rx_packets),
+ .byte_ctr = u64_stats_read(&lp->rx_bytes),
+ .event_ctr = READ_ONCE(lp->rx_irqs),
+ };
+
+ net_dim(&lp->rx_dim, &sample);
+ }
+
/* Re-enable RX completion interrupts. This should
* cause an immediate interrupt if any RX packets are
* already pending.
*/
+ spin_lock_irq(&lp->rx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ spin_unlock_irq(&lp->rx_cr_lock);
}
return packets;
}
@@ -1283,11 +1341,14 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
/* Disable further TX completion interrupts and schedule
* NAPI to handle the completions.
*/
- u32 cr = lp->tx_dma_cr;
-
- cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
if (napi_schedule_prep(&lp->napi_tx)) {
+ u32 cr;
+
+ spin_lock(&lp->tx_cr_lock);
+ cr = lp->tx_dma_cr;
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ spin_unlock(&lp->tx_cr_lock);
__napi_schedule(&lp->napi_tx);
}
}
@@ -1328,11 +1389,16 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
/* Disable further RX completion interrupts and schedule
* NAPI receive.
*/
- u32 cr = lp->rx_dma_cr;
-
- cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1);
if (napi_schedule_prep(&lp->napi_rx)) {
+ u32 cr;
+
+ spin_lock(&lp->rx_cr_lock);
+ cr = lp->rx_dma_cr;
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ spin_unlock(&lp->rx_cr_lock);
+
__napi_schedule(&lp->napi_rx);
}
}
@@ -1625,6 +1691,7 @@ err_free_eth_irq:
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
err_phy:
+ cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
@@ -1654,6 +1721,7 @@ static int axienet_stop(struct net_device *ndev)
napi_disable(&lp->napi_rx);
}
+ cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
@@ -1685,6 +1753,7 @@ static int axienet_stop(struct net_device *ndev)
dma_release_channel(lp->tx_chan);
}
+ netdev_reset_queue(ndev);
axienet_iow(lp, XAE_IE_OFFSET, 0);
if (lp->eth_irq > 0)
@@ -1999,6 +2068,87 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
}
/**
+ * axienet_update_coalesce_rx() - Set RX CR
+ * @lp: Device private data
+ * @cr: Value to write to the RX CR
+ * @mask: Bits to set from @cr
+ */
+static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
+ u32 mask)
+{
+ spin_lock_irq(&lp->rx_cr_lock);
+ lp->rx_dma_cr &= ~mask;
+ lp->rx_dma_cr |= cr;
+ /* If DMA isn't started, then the settings will be applied the next
+ * time dma_start() is called.
+ */
+ if (lp->rx_dma_started) {
+ u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+
+ /* Don't enable IRQs if they are disabled by NAPI */
+ if (reg & XAXIDMA_IRQ_ALL_MASK)
+ cr = lp->rx_dma_cr;
+ else
+ cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ }
+ spin_unlock_irq(&lp->rx_cr_lock);
+}
+
+/**
+ * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
+ * @lp: Device private data
+ */
+static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
+{
+ return min(1 << (lp->rx_dim.profile_ix << 1), 255);
+}
+
+/**
+ * axienet_rx_dim_work() - Adjust RX DIM settings
+ * @work: The work struct
+ */
+static void axienet_rx_dim_work(struct work_struct *work)
+{
+ struct axienet_local *lp =
+ container_of(work, struct axienet_local, rx_dim.work);
+ u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0);
+ u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK |
+ XAXIDMA_IRQ_ERROR_MASK;
+
+ axienet_update_coalesce_rx(lp, cr, mask);
+ lp->rx_dim.state = DIM_START_MEASURE;
+}
+
+/**
+ * axienet_update_coalesce_tx() - Set TX CR
+ * @lp: Device private data
+ * @cr: Value to write to the TX CR
+ * @mask: Bits to set from @cr
+ */
+static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
+ u32 mask)
+{
+ spin_lock_irq(&lp->tx_cr_lock);
+ lp->tx_dma_cr &= ~mask;
+ lp->tx_dma_cr |= cr;
+ /* If DMA isn't started, then the settings will be applied the next
+ * time dma_start() is called.
+ */
+ if (lp->tx_dma_started) {
+ u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+
+ /* Don't enable IRQs if they are disabled by NAPI */
+ if (reg & XAXIDMA_IRQ_ALL_MASK)
+ cr = lp->tx_dma_cr;
+ else
+ cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ }
+ spin_unlock_irq(&lp->tx_cr_lock);
+}
+
+/**
* axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
* @ndev: Pointer to net_device structure
* @ecoalesce: Pointer to ethtool_coalesce structure
@@ -2018,11 +2168,23 @@ axienet_ethtools_get_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
-
- ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
- ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
- ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
- ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
+ u32 cr;
+
+ ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled;
+
+ spin_lock_irq(&lp->rx_cr_lock);
+ cr = lp->rx_dma_cr;
+ spin_unlock_irq(&lp->rx_cr_lock);
+ axienet_coalesce_params(lp, cr,
+ &ecoalesce->rx_max_coalesced_frames,
+ &ecoalesce->rx_coalesce_usecs);
+
+ spin_lock_irq(&lp->tx_cr_lock);
+ cr = lp->tx_dma_cr;
+ spin_unlock_irq(&lp->tx_cr_lock);
+ axienet_coalesce_params(lp, cr,
+ &ecoalesce->tx_max_coalesced_frames,
+ &ecoalesce->tx_coalesce_usecs);
return 0;
}
@@ -2046,12 +2208,9 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
-
- if (netif_running(ndev)) {
- NL_SET_ERR_MSG(extack,
- "Please stop netif before applying configuration");
- return -EBUSY;
- }
+ bool new_dim = ecoalesce->use_adaptive_rx_coalesce;
+ bool old_dim = lp->rx_dim_enabled;
+ u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK;
if (ecoalesce->rx_max_coalesced_frames > 255 ||
ecoalesce->tx_max_coalesced_frames > 255) {
@@ -2065,7 +2224,7 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EINVAL;
}
- if ((ecoalesce->rx_max_coalesced_frames > 1 &&
+ if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) &&
!ecoalesce->rx_coalesce_usecs) ||
(ecoalesce->tx_max_coalesced_frames > 1 &&
!ecoalesce->tx_coalesce_usecs)) {
@@ -2074,11 +2233,31 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EINVAL;
}
- lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
- lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
- lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
- lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
+ if (new_dim && !old_dim) {
+ cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
+ ecoalesce->rx_coalesce_usecs);
+ } else if (!new_dim) {
+ if (old_dim) {
+ WRITE_ONCE(lp->rx_dim_enabled, false);
+ napi_synchronize(&lp->napi_rx);
+ flush_work(&lp->rx_dim.work);
+ }
+
+ cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames,
+ ecoalesce->rx_coalesce_usecs);
+ } else {
+ /* Dummy value for count just to calculate timer */
+ cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs);
+ mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK;
+ }
+
+ axienet_update_coalesce_rx(lp, cr, mask);
+ if (new_dim && !old_dim)
+ WRITE_ONCE(lp->rx_dim_enabled, true);
+ cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames,
+ ecoalesce->tx_coalesce_usecs);
+ axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK);
return 0;
}
@@ -2316,7 +2495,8 @@ axienet_ethtool_get_rmon_stats(struct net_device *dev,
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_USECS,
+ ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -2499,6 +2679,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_dma_stop(lp);
+ netdev_reset_queue(ndev);
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
@@ -2858,10 +3039,15 @@ static int axienet_probe(struct platform_device *pdev)
axienet_set_mac_address(ndev, NULL);
}
- lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
- lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
- lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
- lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
+ spin_lock_init(&lp->rx_cr_lock);
+ spin_lock_init(&lp->tx_cr_lock);
+ INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
+ lp->rx_dim_enabled = true;
+ lp->rx_dim.profile_ix = 1;
+ lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
+ XAXIDMA_DFT_RX_USEC);
+ lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD,
+ XAXIDMA_DFT_TX_USEC);
ret = axienet_mdio_setup(lp);
if (ret)
@@ -2891,7 +3077,6 @@ static int axienet_probe(struct platform_device *pdev)
}
of_node_put(np);
lp->pcs.ops = &axienet_pcs_ops;
- lp->pcs.neg_mode = true;
lp->pcs.poll = true;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index dbb3960126ee..66e38ce9cd1d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -18,6 +18,7 @@
#include <net/rtnetlink.h>
#include <net/geneve.h>
#include <net/gro.h>
+#include <net/netdev_lock.h>
#include <net/protocol.h>
#define GENEVE_NETDEV_VER "0.6"
@@ -57,6 +58,8 @@ struct geneve_config {
bool ttl_inherit;
enum ifla_geneve_df df;
bool inner_proto_inherit;
+ u16 port_min;
+ u16 port_max;
};
/* Pseudo network device */
@@ -835,7 +838,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
use_cache = ip_tunnel_dst_cache_usable(skb, info);
tos = geneve_get_dsfield(skb, dev, info, &use_cache);
- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ sport = udp_flow_src_port(geneve->net, skb,
+ geneve->cfg.port_min,
+ geneve->cfg.port_max, true);
rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr,
&info->key,
@@ -945,7 +950,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
use_cache = ip_tunnel_dst_cache_usable(skb, info);
prio = geneve_get_dsfield(skb, dev, info, &use_cache);
- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ sport = udp_flow_src_port(geneve->net, skb,
+ geneve->cfg.port_min,
+ geneve->cfg.port_max, true);
dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0,
&saddr, key, sport,
@@ -1084,7 +1091,8 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
use_cache = ip_tunnel_dst_cache_usable(skb, info);
tos = geneve_get_dsfield(skb, dev, info, &use_cache);
sport = udp_flow_src_port(geneve->net, skb,
- 1, USHRT_MAX, true);
+ geneve->cfg.port_min,
+ geneve->cfg.port_max, true);
rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr,
&info->key,
@@ -1110,7 +1118,8 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
use_cache = ip_tunnel_dst_cache_usable(skb, info);
prio = geneve_get_dsfield(skb, dev, info, &use_cache);
sport = udp_flow_src_port(geneve->net, skb,
- 1, USHRT_MAX, true);
+ geneve->cfg.port_min,
+ geneve->cfg.port_max, true);
dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0,
&saddr, &info->key, sport,
@@ -1234,6 +1243,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 },
[IFLA_GENEVE_DF] = { .type = NLA_U8 },
[IFLA_GENEVE_INNER_PROTO_INHERIT] = { .type = NLA_FLAG },
+ [IFLA_GENEVE_PORT_RANGE] = NLA_POLICY_EXACT_LEN(sizeof(struct ifla_geneve_port_range)),
};
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1279,6 +1289,17 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
}
}
+ if (data[IFLA_GENEVE_PORT_RANGE]) {
+ const struct ifla_geneve_port_range *p;
+
+ p = nla_data(data[IFLA_GENEVE_PORT_RANGE]);
+ if (ntohs(p->high) < ntohs(p->low)) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_PORT_RANGE],
+ "Invalid source port range");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -1506,6 +1527,18 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
info->key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]);
}
+ if (data[IFLA_GENEVE_PORT_RANGE]) {
+ const struct ifla_geneve_port_range *p;
+
+ if (changelink) {
+ attrtype = IFLA_GENEVE_PORT_RANGE;
+ goto change_notsup;
+ }
+ p = nla_data(data[IFLA_GENEVE_PORT_RANGE]);
+ cfg->port_min = ntohs(p->low);
+ cfg->port_max = ntohs(p->high);
+ }
+
if (data[IFLA_GENEVE_COLLECT_METADATA]) {
if (changelink) {
attrtype = IFLA_GENEVE_COLLECT_METADATA;
@@ -1614,15 +1647,20 @@ static void geneve_link_config(struct net_device *dev,
geneve_change_mtu(dev, ldev_mtu - info->options_len);
}
-static int geneve_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int geneve_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct geneve_config cfg = {
.df = GENEVE_DF_UNSET,
.use_udp6_rx_checksums = false,
.ttl_inherit = false,
.collect_md = false,
+ .port_min = 1,
+ .port_max = USHRT_MAX,
};
int err;
@@ -1631,7 +1669,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
if (err)
return err;
- err = geneve_configure(net, dev, extack, &cfg);
+ err = geneve_configure(link_net, dev, extack, &cfg);
if (err)
return err;
@@ -1741,6 +1779,7 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */
nla_total_size(0) + /* IFLA_GENEVE_INNER_PROTO_INHERIT */
+ nla_total_size(sizeof(struct ifla_geneve_port_range)) + /* IFLA_GENEVE_PORT_RANGE */
0;
}
@@ -1750,6 +1789,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct ip_tunnel_info *info = &geneve->cfg.info;
bool ttl_inherit = geneve->cfg.ttl_inherit;
bool metadata = geneve->cfg.collect_md;
+ struct ifla_geneve_port_range ports = {
+ .low = htons(geneve->cfg.port_min),
+ .high = htons(geneve->cfg.port_max),
+ };
__u8 tmp_vni[3];
__u32 vni;
@@ -1806,6 +1849,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_flag(skb, IFLA_GENEVE_INNER_PROTO_INHERIT))
goto nla_put_failure;
+ if (nla_put(skb, IFLA_GENEVE_PORT_RANGE, sizeof(ports), &ports))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -1838,6 +1884,8 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
.use_udp6_rx_checksums = true,
.ttl_inherit = false,
.collect_md = true,
+ .port_min = 1,
+ .port_max = USHRT_MAX,
};
memset(tb, 0, sizeof(tb));
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index b7b46c5e6399..ef793607890d 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1462,10 +1462,12 @@ static int gtp_create_sockets(struct gtp_dev *gtp, const struct nlattr *nla,
#define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
#define GTP_IPV6_MAXLEN (sizeof(struct ipv6hdr) + GTP_TH_MAXLEN)
-static int gtp_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int gtp_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
unsigned int role = GTP_ROLE_GGSN;
struct gtp_dev *gtp;
struct gtp_net *gn;
@@ -1496,7 +1498,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
gtp->restart_count = nla_get_u8_default(data[IFLA_GTP_RESTART_COUNT],
0);
- gtp->net = src_net;
+ gtp->net = link_net;
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
@@ -1526,7 +1528,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
goto out_encap;
}
- gn = net_generic(src_net, gtp_net_id);
+ gn = net_generic(link_net, gtp_net_id);
list_add(&gtp->list, &gn->gtp_dev_list);
dev->priv_destructor = gtp_destructor;
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 00ebc25d0b22..f03797103c6a 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -427,7 +427,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
break;
case HDLCDRVCTL_GETMODE:
- strcpy(hi->data.modename, bc->options ? "par96" : "picpar");
+ strscpy(hi->data.modename, bc->options ? "par96" : "picpar");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -439,7 +439,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
return baycom_setmode(bc, hi->data.modename);
case HDLCDRVCTL_MODELIST:
- strcpy(hi->data.modename, "par96,picpar");
+ strscpy(hi->data.modename, "par96,picpar");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 799f8ece7824..ee5bd3c12040 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -531,7 +531,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
return baycom_setmode(bc, hi->data.modename);
case HDLCDRVCTL_MODELIST:
- strcpy(hi->data.modename, "ser12,ser3,ser24");
+ strscpy(hi->data.modename, "ser12,ser3,ser24");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index 5d1ab4840753..05bdad214799 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -570,7 +570,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
break;
case HDLCDRVCTL_GETMODE:
- strcpy(hi->data.modename, "ser12");
+ strscpy(hi->data.modename, "ser12");
if (bc->opt_dcd <= 0)
strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : (bc->opt_dcd == -2) ? "@" : "+");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
@@ -584,7 +584,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
return baycom_setmode(bc, hi->data.modename);
case HDLCDRVCTL_MODELIST:
- strcpy(hi->data.modename, "ser12");
+ strscpy(hi->data.modename, "ser12");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index bac1bb69d63a..0e0fe32d2da4 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -77,6 +77,7 @@
#include <net/ip.h>
#include <net/arp.h>
+#include <net/netdev_lock.h>
#include <net/net_namespace.h>
#include <linux/bpqether.h>
@@ -107,27 +108,6 @@ struct bpqdev {
static LIST_HEAD(bpq_devices);
-/*
- * bpqether network devices are paired with ethernet devices below them, so
- * form a special "super class" of normal ethernet devices; split their locks
- * off into a separate class since they always nest.
- */
-static struct lock_class_key bpq_netdev_xmit_lock_key;
-static struct lock_class_key bpq_netdev_addr_lock_key;
-
-static void bpq_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
-}
-
-static void bpq_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
-}
-
/* ------------------------------------------------------------------------ */
@@ -454,6 +434,8 @@ static const struct net_device_ops bpq_netdev_ops = {
static void bpq_setup(struct net_device *dev)
{
+ netdev_lockdep_set_classes(dev);
+
dev->netdev_ops = &bpq_netdev_ops;
dev->needs_free_netdev = true;
@@ -499,7 +481,6 @@ static int bpq_new_device(struct net_device *edev)
err = register_netdevice(ndev);
if (err)
goto error;
- bpq_set_lockdep_class(ndev);
/* List protected by RTNL */
list_add_rcu(&bpq->bpq_list, &bpq_devices);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 234db693cefa..70f7cb383228 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -1166,6 +1166,8 @@ struct netvsc_device {
u32 max_chn;
u32 num_chn;
+ u32 netvsc_gso_max_size;
+
atomic_t open_chn;
struct work_struct subchan_work;
wait_queue_head_t subchan_open;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d6c4abfc3a28..c51b318b8a72 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -29,6 +29,7 @@
#include <linux/bpf.h>
#include <net/arp.h>
+#include <net/netdev_lock.h>
#include <net/route.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
@@ -2461,6 +2462,21 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
} else {
netdev_info(ndev, "Data path switched %s VF: %s\n",
vf_is_up ? "to" : "from", vf_netdev->name);
+
+ /* In Azure, when accelerated networking in enabled, other NICs
+ * like MANA, MLX, are configured as a bonded nic with
+ * Netvsc(failover) NIC. For bonded NICs, the min of the max
+ * pkt aggregate size of the members is propagated in the stack.
+ * In order to allow these NICs (MANA/MLX) to use up to
+ * GSO_MAX_SIZE gso packet size, we need to allow Netvsc NIC to
+ * also support this in the guest.
+ * This value is only increased for netvsc NIC when datapath is
+ * switched over to the VF
+ */
+ if (vf_is_up)
+ netif_set_tso_max_size(ndev, vf_netdev->tso_max_size);
+ else
+ netif_set_tso_max_size(ndev, netvsc_dev->netvsc_gso_max_size);
}
return NOTIFY_OK;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index c0ceeef4fcd8..82747dfacd70 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1356,9 +1356,10 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
struct net_device_context *net_device_ctx = netdev_priv(net);
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
- unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
int ret;
+ nvdev->netvsc_gso_max_size = GSO_LEGACY_MAX_SIZE;
+
/* Find HW offload capabilities */
ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
if (ret != 0)
@@ -1390,8 +1391,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO;
- if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
- gso_max_size = hwcaps.lsov2.ip4_maxsz;
+ if (hwcaps.lsov2.ip4_maxsz < nvdev->netvsc_gso_max_size)
+ nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip4_maxsz;
}
if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
@@ -1411,8 +1412,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO6;
- if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
- gso_max_size = hwcaps.lsov2.ip6_maxsz;
+ if (hwcaps.lsov2.ip6_maxsz < nvdev->netvsc_gso_max_size)
+ nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip6_maxsz;
}
if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
@@ -1438,7 +1439,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
*/
net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
- netif_set_tso_max_size(net, gso_max_size);
+ netif_set_tso_max_size(net, nvdev->netvsc_gso_max_size);
ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index f632b0cfd5ae..fd91f8a45bce 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -776,8 +776,8 @@ at86rf230_setup_spi_messages(struct at86rf230_local *lp,
state->trx.tx_buf = state->buf;
state->trx.rx_buf = state->buf;
spi_message_add_tail(&state->trx, &state->msg);
- hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- state->timer.function = at86rf230_async_state_timer;
+ hrtimer_setup(&state->timer, at86rf230_async_state_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static irqreturn_t at86rf230_isr(int irq, void *data)
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 753215ebc67c..ebc4f1b18e7b 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -52,12 +52,10 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/gpio.h>
#include <linux/ieee802154.h>
#include <linux/io.h>
#include <linux/kfifo.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h>
@@ -350,8 +348,8 @@ struct work_priv_container {
* @extclockenable: true if the external clock is to be enabled
* @extclockfreq: frequency of the external clock
* @extclockgpio: ca8210 output gpio of the external clock
- * @gpio_reset: gpio number of ca8210 reset line
- * @gpio_irq: gpio number of ca8210 interrupt line
+ * @reset_gpio: ca8210 reset GPIO descriptor
+ * @irq_gpio: ca8210 interrupt GPIO descriptor
* @irq_id: identifier for the ca8210 irq
*
*/
@@ -359,8 +357,8 @@ struct ca8210_platform_data {
bool extclockenable;
unsigned int extclockfreq;
unsigned int extclockgpio;
- int gpio_reset;
- int gpio_irq;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *irq_gpio;
int irq_id;
};
@@ -627,14 +625,15 @@ static int ca8210_spi_transfer(
*/
static void ca8210_reset_send(struct spi_device *spi, unsigned int ms)
{
- struct ca8210_platform_data *pdata = spi->dev.platform_data;
+ struct device *dev = &spi->dev;
+ struct ca8210_platform_data *pdata = dev_get_platdata(dev);
struct ca8210_priv *priv = spi_get_drvdata(spi);
long status;
- gpio_set_value(pdata->gpio_reset, 0);
+ gpiod_set_value(pdata->reset_gpio, 1);
reinit_completion(&priv->ca8210_is_awake);
msleep(ms);
- gpio_set_value(pdata->gpio_reset, 1);
+ gpiod_set_value(pdata->reset_gpio, 0);
priv->promiscuous = false;
/* Wait until wakeup indication seen */
@@ -1446,8 +1445,7 @@ static u8 mcps_data_request(
command.pdata.data_req.src_addr_mode = src_addr_mode;
command.pdata.data_req.dst.mode = dst_address_mode;
if (dst_address_mode != MAC_MODE_NO_ADDR) {
- command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id);
- command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id);
+ put_unaligned_le16(dst_pan_id, command.pdata.data_req.dst.pan_id);
if (dst_address_mode == MAC_MODE_SHORT_ADDR) {
command.pdata.data_req.dst.address[0] = LS_BYTE(
dst_addr->short_address
@@ -1795,12 +1793,12 @@ static int ca8210_skb_rx(
}
hdr.source.mode = data_ind[0];
dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode);
- hdr.source.pan_id = *(u16 *)&data_ind[1];
+ hdr.source.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[1]));
dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id);
memcpy(&hdr.source.extended_addr, &data_ind[3], 8);
hdr.dest.mode = data_ind[11];
dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode);
- hdr.dest.pan_id = *(u16 *)&data_ind[12];
+ hdr.dest.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[12]));
dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id);
memcpy(&hdr.dest.extended_addr, &data_ind[14], 8);
@@ -1927,7 +1925,7 @@ static int ca8210_skb_tx(
status = mcps_data_request(
header.source.mode,
header.dest.mode,
- header.dest.pan_id,
+ le16_to_cpu(header.dest.pan_id),
(union macaddr *)&header.dest.extended_addr,
skb->len - mac_len,
&skb->data[mac_len],
@@ -2737,9 +2735,10 @@ static int ca8210_config_extern_clk(
*/
static int ca8210_register_ext_clock(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
+ struct ca8210_platform_data *pdata = dev_get_platdata(dev);
struct device_node *np = spi->dev.of_node;
struct ca8210_priv *priv = spi_get_drvdata(spi);
- struct ca8210_platform_data *pdata = spi->dev.platform_data;
if (!np)
return -EFAULT;
@@ -2785,25 +2784,16 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi)
*/
static int ca8210_reset_init(struct spi_device *spi)
{
- int ret;
- struct ca8210_platform_data *pdata = spi->dev.platform_data;
-
- pdata->gpio_reset = of_get_named_gpio(
- spi->dev.of_node,
- "reset-gpio",
- 0
- );
+ struct device *dev = &spi->dev;
+ struct ca8210_platform_data *pdata = dev_get_platdata(dev);
- ret = gpio_direction_output(pdata->gpio_reset, 1);
- if (ret < 0) {
- dev_crit(
- &spi->dev,
- "Reset GPIO %d did not set to output mode\n",
- pdata->gpio_reset
- );
+ pdata->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->reset_gpio)) {
+ dev_crit(dev, "Reset GPIO did not set to output mode\n");
+ return PTR_ERR(pdata->reset_gpio);
}
- return ret;
+ return 0;
}
/**
@@ -2814,23 +2804,19 @@ static int ca8210_reset_init(struct spi_device *spi)
*/
static int ca8210_interrupt_init(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
+ struct ca8210_platform_data *pdata = dev_get_platdata(dev);
int ret;
- struct ca8210_platform_data *pdata = spi->dev.platform_data;
- pdata->gpio_irq = of_get_named_gpio(
- spi->dev.of_node,
- "irq-gpio",
- 0
- );
+ pdata->irq_gpio = devm_gpiod_get(dev, "irq", GPIOD_IN);
+ if (IS_ERR(pdata->irq_gpio)) {
+ dev_crit(dev, "Could not retrieve IRQ GPIO\n");
+ return PTR_ERR(pdata->irq_gpio);
+ }
- pdata->irq_id = gpio_to_irq(pdata->gpio_irq);
+ pdata->irq_id = gpiod_to_irq(pdata->irq_gpio);
if (pdata->irq_id < 0) {
- dev_crit(
- &spi->dev,
- "Could not get irq for gpio pin %d\n",
- pdata->gpio_irq
- );
- gpio_free(pdata->gpio_irq);
+ dev_crit(dev, "Could not get irq for IRQ GPIO\n");
return pdata->irq_id;
}
@@ -2841,10 +2827,8 @@ static int ca8210_interrupt_init(struct spi_device *spi)
"ca8210-irq",
spi_get_drvdata(spi)
);
- if (ret) {
+ if (ret)
dev_crit(&spi->dev, "request_irq %d failed\n", pdata->irq_id);
- gpio_free(pdata->gpio_irq);
- }
return ret;
}
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 025e0c19ec25..50de3ee204db 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -166,8 +166,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type);
void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
unsigned int len, bool success, bool mcast);
-int ipvlan_link_new(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack);
void ipvlan_link_delete(struct net_device *dev, struct list_head *head);
void ipvlan_link_setup(struct net_device *dev);
diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
index b4ef386bdb1b..7c017fe35522 100644
--- a/drivers/net/ipvlan/ipvlan_l3s.c
+++ b/drivers/net/ipvlan/ipvlan_l3s.c
@@ -226,5 +226,4 @@ void ipvlan_l3s_unregister(struct ipvl_port *port)
dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
- dev->l3mdev_ops = NULL;
}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index da3a97a65507..0ed2fd833a5d 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -3,6 +3,7 @@
*/
#include <linux/ethtool.h>
+#include <net/netdev_lock.h>
#include "ipvlan.h"
@@ -532,11 +533,13 @@ err:
return ret;
}
-int ipvlan_link_new(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct ipvl_port *port;
struct net_device *phy_dev;
int err;
@@ -545,7 +548,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
if (!tb[IFLA_LINK])
return -EINVAL;
- phy_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ phy_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!phy_dev)
return -ENODEV;
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 1afc4c47be73..edd13916831a 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -73,8 +73,8 @@ static void ipvtap_update_features(struct tap_dev *tap,
netdev_update_features(vlan->dev);
}
-static int ipvtap_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int ipvtap_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct ipvtap_dev *vlantap = netdev_priv(dev);
@@ -97,7 +97,7 @@ static int ipvtap_newlink(struct net *src_net, struct net_device *dev,
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
- err = ipvlan_link_new(src_net, dev, tb, data, extack);
+ err = ipvlan_link_new(dev, params, extack);
if (err) {
netdev_rx_handler_unregister(dev);
return err;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index f1d68153987e..1fb6ce6843ad 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -54,6 +54,7 @@
#include <linux/percpu.h>
#include <linux/net_tstamp.h>
#include <net/net_namespace.h>
+#include <net/netdev_lock.h>
#include <linux/u64_stats_sync.h>
/* blackhole_netdev - a device used for dsts that are marked expired!
@@ -172,7 +173,7 @@ static void gen_lo_setup(struct net_device *dev,
dev->flags = IFF_LOOPBACK;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
dev->lltx = true;
- dev->netns_local = true;
+ dev->netns_immutable = true;
netif_keep_dst(dev);
dev->hw_features = NETIF_F_GSO_SOFTWARE;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 1bc1e5993f56..3d315e30ee47 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -19,6 +19,7 @@
#include <net/gro_cells.h>
#include <net/macsec.h>
#include <net/dst_metadata.h>
+#include <net/netdev_lock.h>
#include <linux/phy.h>
#include <linux/byteorder/generic.h>
#include <linux/if_arp.h>
@@ -4141,11 +4142,14 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
static struct lock_class_key macsec_netdev_addr_lock_key;
-static int macsec_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int macsec_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct macsec_dev *macsec = macsec_priv(dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
rx_handler_func_t *rx_handler;
u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
struct net_device *real_dev;
@@ -4154,7 +4158,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (!tb[IFLA_LINK])
return -EINVAL;
- real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
+ real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev)
return -ENODEV;
if (real_dev->type != ARPHRD_ETHER)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index fed4fe2a4748..d0dfa6bca6cc 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -28,6 +28,7 @@
#include <linux/if_macvlan.h>
#include <linux/hash.h>
#include <linux/workqueue.h>
+#include <net/netdev_lock.h>
#include <net/rtnetlink.h>
#include <net/xfrm.h>
#include <linux/netpoll.h>
@@ -1440,21 +1441,24 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
return 0;
}
-int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+int macvlan_common_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct macvlan_dev *vlan = netdev_priv(dev);
- struct macvlan_port *port;
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct net_device *lowerdev;
- int err;
- int macmode;
+ struct macvlan_port *port;
bool create = false;
+ int macmode;
+ int err;
if (!tb[IFLA_LINK])
return -EINVAL;
- lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ lowerdev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (lowerdev == NULL)
return -ENODEV;
@@ -1565,11 +1569,11 @@ destroy_macvlan_port:
}
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
-static int macvlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int macvlan_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
- return macvlan_common_newlink(src_net, dev, tb, data, extack);
+ return macvlan_common_newlink(dev, params, extack);
}
void macvlan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 29a5929d48e5..b391a0f740a3 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -77,8 +77,8 @@ static void macvtap_update_features(struct tap_dev *tap,
netdev_update_features(vlan->dev);
}
-static int macvtap_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int macvtap_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct macvtap_dev *vlantap = netdev_priv(dev);
@@ -105,7 +105,7 @@ static int macvtap_newlink(struct net *src_net, struct net_device *dev,
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
- err = macvlan_common_newlink(src_net, dev, tb, data, extack);
+ err = macvlan_common_newlink(dev, params, extack);
if (err) {
netdev_rx_handler_unregister(dev);
return err;
diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig
index 15860d6ac39f..cf325ab0b1ef 100644
--- a/drivers/net/mctp/Kconfig
+++ b/drivers/net/mctp/Kconfig
@@ -47,6 +47,16 @@ config MCTP_TRANSPORT_I3C
A MCTP protocol network device is created for each I3C bus
having a "mctp-controller" devicetree property.
+config MCTP_TRANSPORT_USB
+ tristate "MCTP USB transport"
+ depends on USB
+ help
+ Provides a driver to access MCTP devices over USB transport,
+ defined by DMTF specification DSP0283.
+
+ MCTP-over-USB interfaces are peer-to-peer, so each interface
+ represents a physical connection to one remote MCTP endpoint.
+
endmenu
endif
diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile
index e1cb99ced54a..c36006849a1e 100644
--- a/drivers/net/mctp/Makefile
+++ b/drivers/net/mctp/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o
obj-$(CONFIG_MCTP_TRANSPORT_I2C) += mctp-i2c.o
obj-$(CONFIG_MCTP_TRANSPORT_I3C) += mctp-i3c.o
+obj-$(CONFIG_MCTP_TRANSPORT_USB) += mctp-usb.o
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index e3dcdeacc12c..f782d93f826e 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -537,7 +537,7 @@ static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
rc = __i2c_transfer(midev->adapter, &msg, 1);
/* on tx errors, the flow can no longer be considered valid */
- if (rc)
+ if (rc < 0)
mctp_i2c_invalidate_tx_flow(midev, skb);
break;
@@ -583,6 +583,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
struct mctp_i2c_hdr *hdr;
struct mctp_hdr *mhdr;
u8 lldst, llsrc;
+ int rc;
if (len > MCTP_I2C_MAXMTU)
return -EMSGSIZE;
@@ -593,6 +594,10 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
lldst = *((u8 *)daddr);
llsrc = *((u8 *)saddr);
+ rc = skb_cow_head(skb, sizeof(struct mctp_i2c_hdr));
+ if (rc)
+ return rc;
+
skb_push(skb, sizeof(struct mctp_i2c_hdr));
skb_reset_mac_header(skb);
hdr = (void *)skb_mac_header(skb);
diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
index c1e72253063b..c678f79aa356 100644
--- a/drivers/net/mctp/mctp-i3c.c
+++ b/drivers/net/mctp/mctp-i3c.c
@@ -506,10 +506,15 @@ static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev,
const void *saddr, unsigned int len)
{
struct mctp_i3c_internal_hdr *ihdr;
+ int rc;
if (!daddr || !saddr)
return -EINVAL;
+ rc = skb_cow_head(skb, sizeof(struct mctp_i3c_internal_hdr));
+ if (rc)
+ return rc;
+
skb_push(skb, sizeof(struct mctp_i3c_internal_hdr));
skb_reset_mac_header(skb);
ihdr = (void *)skb_mac_header(skb);
diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c
new file mode 100644
index 000000000000..e8d4b01c3f34
--- /dev/null
+++ b/drivers/net/mctp/mctp-usb.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mctp-usb.c - MCTP-over-USB (DMTF DSP0283) transport binding driver.
+ *
+ * DSP0283 is available at:
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0283_1.0.1.pdf
+ *
+ * Copyright (C) 2024-2025 Code Construct Pty Ltd
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+#include <linux/usb/mctp-usb.h>
+
+#include <net/mctp.h>
+#include <net/mctpdevice.h>
+#include <net/pkt_sched.h>
+
+#include <uapi/linux/if_arp.h>
+
+struct mctp_usb {
+ struct usb_device *usbdev;
+ struct usb_interface *intf;
+ bool stopped;
+
+ struct net_device *netdev;
+
+ u8 ep_in;
+ u8 ep_out;
+
+ struct urb *tx_urb;
+ struct urb *rx_urb;
+
+ struct delayed_work rx_retry_work;
+};
+
+static void mctp_usb_out_complete(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct net_device *netdev = skb->dev;
+ int status;
+
+ status = urb->status;
+
+ switch (status) {
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -EPROTO:
+ dev_dstats_tx_dropped(netdev);
+ break;
+ case 0:
+ dev_dstats_tx_add(netdev, skb->len);
+ netif_wake_queue(netdev);
+ consume_skb(skb);
+ return;
+ default:
+ netdev_dbg(netdev, "unexpected tx urb status: %d\n", status);
+ dev_dstats_tx_dropped(netdev);
+ }
+
+ kfree_skb(skb);
+}
+
+static netdev_tx_t mctp_usb_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mctp_usb *mctp_usb = netdev_priv(dev);
+ struct mctp_usb_hdr *hdr;
+ unsigned int plen;
+ struct urb *urb;
+ int rc;
+
+ plen = skb->len;
+
+ if (plen + sizeof(*hdr) > MCTP_USB_XFER_SIZE)
+ goto err_drop;
+
+ rc = skb_cow_head(skb, sizeof(*hdr));
+ if (rc)
+ goto err_drop;
+
+ hdr = skb_push(skb, sizeof(*hdr));
+ if (!hdr)
+ goto err_drop;
+
+ hdr->id = cpu_to_be16(MCTP_USB_DMTF_ID);
+ hdr->rsvd = 0;
+ hdr->len = plen + sizeof(*hdr);
+
+ urb = mctp_usb->tx_urb;
+
+ usb_fill_bulk_urb(urb, mctp_usb->usbdev,
+ usb_sndbulkpipe(mctp_usb->usbdev, mctp_usb->ep_out),
+ skb->data, skb->len,
+ mctp_usb_out_complete, skb);
+
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (rc)
+ goto err_drop;
+ else
+ netif_stop_queue(dev);
+
+ return NETDEV_TX_OK;
+
+err_drop:
+ dev_dstats_tx_dropped(dev);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void mctp_usb_in_complete(struct urb *urb);
+
+/* If we fail to queue an in urb atomically (either due to skb allocation or
+ * urb submission), we will schedule a rx queue in nonatomic context
+ * after a delay, specified in jiffies
+ */
+static const unsigned long RX_RETRY_DELAY = HZ / 4;
+
+static int mctp_usb_rx_queue(struct mctp_usb *mctp_usb, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ int rc;
+
+ skb = __netdev_alloc_skb(mctp_usb->netdev, MCTP_USB_XFER_SIZE, gfp);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto err_retry;
+ }
+
+ usb_fill_bulk_urb(mctp_usb->rx_urb, mctp_usb->usbdev,
+ usb_rcvbulkpipe(mctp_usb->usbdev, mctp_usb->ep_in),
+ skb->data, MCTP_USB_XFER_SIZE,
+ mctp_usb_in_complete, skb);
+
+ rc = usb_submit_urb(mctp_usb->rx_urb, gfp);
+ if (rc) {
+ netdev_dbg(mctp_usb->netdev, "rx urb submit failure: %d\n", rc);
+ kfree_skb(skb);
+ if (rc == -ENOMEM)
+ goto err_retry;
+ }
+
+ return rc;
+
+err_retry:
+ schedule_delayed_work(&mctp_usb->rx_retry_work, RX_RETRY_DELAY);
+ return rc;
+}
+
+static void mctp_usb_in_complete(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct net_device *netdev = skb->dev;
+ struct mctp_usb *mctp_usb = netdev_priv(netdev);
+ struct mctp_skb_cb *cb;
+ unsigned int len;
+ int status;
+
+ status = urb->status;
+
+ switch (status) {
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -EPROTO:
+ kfree_skb(skb);
+ return;
+ case 0:
+ break;
+ default:
+ netdev_dbg(netdev, "unexpected rx urb status: %d\n", status);
+ kfree_skb(skb);
+ return;
+ }
+
+ len = urb->actual_length;
+ __skb_put(skb, len);
+
+ while (skb) {
+ struct sk_buff *skb2 = NULL;
+ struct mctp_usb_hdr *hdr;
+ u8 pkt_len; /* length of MCTP packet, no USB header */
+
+ hdr = skb_pull_data(skb, sizeof(*hdr));
+ if (!hdr)
+ break;
+
+ if (be16_to_cpu(hdr->id) != MCTP_USB_DMTF_ID) {
+ netdev_dbg(netdev, "rx: invalid id %04x\n",
+ be16_to_cpu(hdr->id));
+ break;
+ }
+
+ if (hdr->len <
+ sizeof(struct mctp_hdr) + sizeof(struct mctp_usb_hdr)) {
+ netdev_dbg(netdev, "rx: short packet (hdr) %d\n",
+ hdr->len);
+ break;
+ }
+
+ /* we know we have at least sizeof(struct mctp_usb_hdr) here */
+ pkt_len = hdr->len - sizeof(struct mctp_usb_hdr);
+ if (pkt_len > skb->len) {
+ netdev_dbg(netdev,
+ "rx: short packet (xfer) %d, actual %d\n",
+ hdr->len, skb->len);
+ break;
+ }
+
+ if (pkt_len < skb->len) {
+ /* more packets may follow - clone to a new
+ * skb to use on the next iteration
+ */
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2) {
+ if (!skb_pull(skb2, pkt_len)) {
+ kfree_skb(skb2);
+ skb2 = NULL;
+ }
+ }
+ skb_trim(skb, pkt_len);
+ }
+
+ dev_dstats_rx_add(netdev, skb->len);
+
+ skb->protocol = htons(ETH_P_MCTP);
+ skb_reset_network_header(skb);
+ cb = __mctp_cb(skb);
+ cb->halen = 0;
+ netif_rx(skb);
+
+ skb = skb2;
+ }
+
+ if (skb)
+ kfree_skb(skb);
+
+ mctp_usb_rx_queue(mctp_usb, GFP_ATOMIC);
+}
+
+static void mctp_usb_rx_retry_work(struct work_struct *work)
+{
+ struct mctp_usb *mctp_usb = container_of(work, struct mctp_usb,
+ rx_retry_work.work);
+
+ if (READ_ONCE(mctp_usb->stopped))
+ return;
+
+ mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
+}
+
+static int mctp_usb_open(struct net_device *dev)
+{
+ struct mctp_usb *mctp_usb = netdev_priv(dev);
+
+ WRITE_ONCE(mctp_usb->stopped, false);
+
+ return mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
+}
+
+static int mctp_usb_stop(struct net_device *dev)
+{
+ struct mctp_usb *mctp_usb = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ /* prevent RX submission retry */
+ WRITE_ONCE(mctp_usb->stopped, true);
+
+ usb_kill_urb(mctp_usb->rx_urb);
+ usb_kill_urb(mctp_usb->tx_urb);
+
+ cancel_delayed_work_sync(&mctp_usb->rx_retry_work);
+
+ return 0;
+}
+
+static const struct net_device_ops mctp_usb_netdev_ops = {
+ .ndo_start_xmit = mctp_usb_start_xmit,
+ .ndo_open = mctp_usb_open,
+ .ndo_stop = mctp_usb_stop,
+};
+
+static void mctp_usb_netdev_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_MCTP;
+
+ dev->mtu = MCTP_USB_MTU_MIN;
+ dev->min_mtu = MCTP_USB_MTU_MIN;
+ dev->max_mtu = MCTP_USB_MTU_MAX;
+
+ dev->hard_header_len = sizeof(struct mctp_usb_hdr);
+ dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+ dev->flags = IFF_NOARP;
+ dev->netdev_ops = &mctp_usb_netdev_ops;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+}
+
+static int mctp_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_endpoint_descriptor *ep_in, *ep_out;
+ struct usb_host_interface *iface_desc;
+ struct net_device *netdev;
+ struct mctp_usb *dev;
+ int rc;
+
+ /* only one alternate */
+ iface_desc = intf->cur_altsetting;
+
+ rc = usb_find_common_endpoints(iface_desc, &ep_in, &ep_out, NULL, NULL);
+ if (rc) {
+ dev_err(&intf->dev, "invalid endpoints on device?\n");
+ return rc;
+ }
+
+ netdev = alloc_netdev(sizeof(*dev), "mctpusb%d", NET_NAME_ENUM,
+ mctp_usb_netdev_setup);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+ dev = netdev_priv(netdev);
+ dev->netdev = netdev;
+ dev->usbdev = usb_get_dev(interface_to_usbdev(intf));
+ dev->intf = intf;
+ usb_set_intfdata(intf, dev);
+
+ dev->ep_in = ep_in->bEndpointAddress;
+ dev->ep_out = ep_out->bEndpointAddress;
+
+ dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->tx_urb || !dev->rx_urb) {
+ rc = -ENOMEM;
+ goto err_free_urbs;
+ }
+
+ INIT_DELAYED_WORK(&dev->rx_retry_work, mctp_usb_rx_retry_work);
+
+ rc = mctp_register_netdev(netdev, NULL, MCTP_PHYS_BINDING_USB);
+ if (rc)
+ goto err_free_urbs;
+
+ return 0;
+
+err_free_urbs:
+ usb_free_urb(dev->tx_urb);
+ usb_free_urb(dev->rx_urb);
+ free_netdev(netdev);
+ return rc;
+}
+
+static void mctp_usb_disconnect(struct usb_interface *intf)
+{
+ struct mctp_usb *dev = usb_get_intfdata(intf);
+
+ mctp_unregister_netdev(dev->netdev);
+ usb_free_urb(dev->tx_urb);
+ usb_free_urb(dev->rx_urb);
+ usb_put_dev(dev->usbdev);
+ free_netdev(dev->netdev);
+}
+
+static const struct usb_device_id mctp_usb_devices[] = {
+ { USB_INTERFACE_INFO(USB_CLASS_MCTP, 0x0, 0x1) },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(usb, mctp_usb_devices);
+
+static struct usb_driver mctp_usb_driver = {
+ .name = "mctp-usb",
+ .id_table = mctp_usb_devices,
+ .probe = mctp_usb_probe,
+ .disconnect = mctp_usb_disconnect,
+};
+
+module_usb_driver(mctp_usb_driver)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
+MODULE_DESCRIPTION("MCTP USB transport");
diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c
index da2001ea1f99..53e96bfab542 100644
--- a/drivers/net/mdio/mdio-i2c.c
+++ b/drivers/net/mdio/mdio-i2c.c
@@ -106,6 +106,62 @@ static int i2c_mii_write_default_c22(struct mii_bus *bus, int phy_id, int reg,
return i2c_mii_write_default_c45(bus, phy_id, -1, reg, val);
}
+static int smbus_byte_mii_read_default_c22(struct mii_bus *bus, int phy_id,
+ int reg)
+{
+ struct i2c_adapter *i2c = bus->priv;
+ union i2c_smbus_data smbus_data;
+ int val = 0, ret;
+
+ if (!i2c_mii_valid_phy_id(phy_id))
+ return 0;
+
+ ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret < 0)
+ return ret;
+
+ val = (smbus_data.byte & 0xff) << 8;
+
+ ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret < 0)
+ return ret;
+
+ val |= smbus_data.byte & 0xff;
+
+ return val;
+}
+
+static int smbus_byte_mii_write_default_c22(struct mii_bus *bus, int phy_id,
+ int reg, u16 val)
+{
+ struct i2c_adapter *i2c = bus->priv;
+ union i2c_smbus_data smbus_data;
+ int ret;
+
+ if (!i2c_mii_valid_phy_id(phy_id))
+ return 0;
+
+ smbus_data.byte = (val & 0xff00) >> 8;
+
+ ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret < 0)
+ return ret;
+
+ smbus_data.byte = val & 0xff;
+
+ ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+
+ return ret < 0 ? ret : 0;
+}
+
/* RollBall SFPs do not access internal PHY via I2C address 0x56, but
* instead via address 0x51, when SFP page is set to 0x03 and password to
* 0xffffffff.
@@ -378,13 +434,26 @@ static int i2c_mii_init_rollball(struct i2c_adapter *i2c)
return 0;
}
+static bool mdio_i2c_check_functionality(struct i2c_adapter *i2c,
+ enum mdio_i2c_proto protocol)
+{
+ if (i2c_check_functionality(i2c, I2C_FUNC_I2C))
+ return true;
+
+ if (i2c_check_functionality(i2c, I2C_FUNC_SMBUS_BYTE_DATA) &&
+ protocol == MDIO_I2C_MARVELL_C22)
+ return true;
+
+ return false;
+}
+
struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
enum mdio_i2c_proto protocol)
{
struct mii_bus *mii;
int ret;
- if (!i2c_check_functionality(i2c, I2C_FUNC_I2C))
+ if (!mdio_i2c_check_functionality(i2c, protocol))
return ERR_PTR(-EINVAL);
mii = mdiobus_alloc();
@@ -395,6 +464,14 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
mii->parent = parent;
mii->priv = i2c;
+ /* Only use SMBus if we have no other choice */
+ if (i2c_check_functionality(i2c, I2C_FUNC_SMBUS_BYTE_DATA) &&
+ !i2c_check_functionality(i2c, I2C_FUNC_I2C)) {
+ mii->read = smbus_byte_mii_read_default_c22;
+ mii->write = smbus_byte_mii_write_default_c22;
+ return mii;
+ }
+
switch (protocol) {
case MDIO_I2C_ROLLBALL:
ret = i2c_mii_init_rollball(i2c);
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 54c8b9d5b5fc..5b50d9186f12 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -734,7 +734,7 @@ struct failover *net_failover_create(struct net_device *standby_dev)
failover_dev->lltx = true;
/* Don't allow failover devices to change network namespaces. */
- failover_dev->netns_local = true;
+ failover_dev->netns_immutable = true;
failover_dev->hw_features = FAILOVER_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_TX |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 86ab4a42769a..4289ccd3e41b 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -45,12 +45,12 @@ MODULE_DESCRIPTION("Console driver for network interfaces");
MODULE_LICENSE("GPL");
#define MAX_PARAM_LENGTH 256
-#define MAX_USERDATA_ENTRY_LENGTH 256
-#define MAX_USERDATA_VALUE_LENGTH 200
+#define MAX_EXTRADATA_ENTRY_LEN 256
+#define MAX_EXTRADATA_VALUE_LEN 200
/* The number 3 comes from userdata entry format characters (' ', '=', '\n') */
-#define MAX_USERDATA_NAME_LENGTH (MAX_USERDATA_ENTRY_LENGTH - \
- MAX_USERDATA_VALUE_LENGTH - 3)
-#define MAX_USERDATA_ITEMS 16
+#define MAX_EXTRADATA_NAME_LEN (MAX_EXTRADATA_ENTRY_LEN - \
+ MAX_EXTRADATA_VALUE_LEN - 3)
+#define MAX_EXTRADATA_ITEMS 16
#define MAX_PRINT_CHUNK 1000
static char config[MAX_PARAM_LENGTH];
@@ -97,13 +97,27 @@ struct netconsole_target_stats {
struct u64_stats_sync syncp;
};
+/* Features enabled in sysdata. Contrary to userdata, this data is populated by
+ * the kernel. The fields are designed as bitwise flags, allowing multiple
+ * features to be set in sysdata_fields.
+ */
+enum sysdata_feature {
+ /* Populate the CPU that sends the message */
+ SYSDATA_CPU_NR = BIT(0),
+ /* Populate the task name (as in current->comm) in sysdata */
+ SYSDATA_TASKNAME = BIT(1),
+ /* Kernel release/version as part of sysdata */
+ SYSDATA_RELEASE = BIT(2),
+};
+
/**
* struct netconsole_target - Represents a configured netconsole target.
* @list: Links this target into the target_list.
* @group: Links us into the configfs subsystem hierarchy.
* @userdata_group: Links to the userdata configfs hierarchy
- * @userdata_complete: Cached, formatted string of append
- * @userdata_length: String length of userdata_complete
+ * @extradata_complete: Cached, formatted string of append
+ * @userdata_length: String length of usedata in extradata_complete.
+ * @sysdata_fields: Sysdata features enabled.
* @stats: Packet send stats for the target. Used for debugging.
* @enabled: On / off knob to enable / disable target.
* Visible from userspace (read-write).
@@ -123,20 +137,25 @@ struct netconsole_target_stats {
* remote_ip (read-write)
* local_mac (read-only)
* remote_mac (read-write)
+ * @buf: The buffer used to send the full msg to the network stack
*/
struct netconsole_target {
struct list_head list;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
struct config_group group;
struct config_group userdata_group;
- char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS];
+ char extradata_complete[MAX_EXTRADATA_ENTRY_LEN * MAX_EXTRADATA_ITEMS];
size_t userdata_length;
+ /* bit-wise with sysdata_feature bits */
+ u32 sysdata_fields;
#endif
struct netconsole_target_stats stats;
bool enabled;
bool extended;
bool release;
struct netpoll np;
+ /* protected by target_list_lock */
+ char buf[MAX_PRINT_CHUNK];
};
#ifdef CONFIG_NETCONSOLE_DYNAMIC
@@ -396,6 +415,46 @@ static ssize_t transmit_errors_show(struct config_item *item, char *buf)
return sysfs_emit(buf, "%llu\n", xmit_drop_count + enomem_count);
}
+/* configfs helper to display if cpu_nr sysdata feature is enabled */
+static ssize_t sysdata_cpu_nr_enabled_show(struct config_item *item, char *buf)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool cpu_nr_enabled;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ cpu_nr_enabled = !!(nt->sysdata_fields & SYSDATA_CPU_NR);
+ mutex_unlock(&dynamic_netconsole_mutex);
+
+ return sysfs_emit(buf, "%d\n", cpu_nr_enabled);
+}
+
+/* configfs helper to display if taskname sysdata feature is enabled */
+static ssize_t sysdata_taskname_enabled_show(struct config_item *item,
+ char *buf)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool taskname_enabled;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ taskname_enabled = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
+ mutex_unlock(&dynamic_netconsole_mutex);
+
+ return sysfs_emit(buf, "%d\n", taskname_enabled);
+}
+
+static ssize_t sysdata_release_enabled_show(struct config_item *item,
+ char *buf)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool release_enabled;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ release_enabled = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
+ mutex_unlock(&dynamic_netconsole_mutex);
+
+ return sysfs_emit(buf, "%d\n", release_enabled);
+}
+
/*
* This one is special -- targets created through the configfs interface
* are not enabled (and the corresponding netpoll activated) by default.
@@ -659,6 +718,28 @@ out_unlock:
return ret;
}
+/* Count number of entries we have in extradata.
+ * This is important because the extradata_complete only supports
+ * MAX_EXTRADATA_ITEMS entries. Before enabling any new {user,sys}data
+ * feature, number of entries needs to checked for available space.
+ */
+static size_t count_extradata_entries(struct netconsole_target *nt)
+{
+ size_t entries;
+
+ /* Userdata entries */
+ entries = list_count_nodes(&nt->userdata_group.cg_children);
+ /* Plus sysdata entries */
+ if (nt->sysdata_fields & SYSDATA_CPU_NR)
+ entries += 1;
+ if (nt->sysdata_fields & SYSDATA_TASKNAME)
+ entries += 1;
+ if (nt->sysdata_fields & SYSDATA_RELEASE)
+ entries += 1;
+
+ return entries;
+}
+
static ssize_t remote_mac_store(struct config_item *item, const char *buf,
size_t count)
{
@@ -675,7 +756,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
if (!mac_pton(buf, remote_mac))
goto out_unlock;
- if (buf[3 * ETH_ALEN - 1] && buf[3 * ETH_ALEN - 1] != '\n')
+ if (buf[MAC_ADDR_STR_LEN] && buf[MAC_ADDR_STR_LEN] != '\n')
goto out_unlock;
memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN);
@@ -687,7 +768,7 @@ out_unlock:
struct userdatum {
struct config_item item;
- char value[MAX_USERDATA_VALUE_LENGTH];
+ char value[MAX_EXTRADATA_VALUE_LEN];
};
static struct userdatum *to_userdatum(struct config_item *item)
@@ -724,13 +805,13 @@ static void update_userdata(struct netconsole_target *nt)
/* Clear the current string in case the last userdatum was deleted */
nt->userdata_length = 0;
- nt->userdata_complete[0] = 0;
+ nt->extradata_complete[0] = 0;
list_for_each(entry, &nt->userdata_group.cg_children) {
struct userdatum *udm_item;
struct config_item *item;
- if (WARN_ON_ONCE(child_count >= MAX_USERDATA_ITEMS))
+ if (WARN_ON_ONCE(child_count >= MAX_EXTRADATA_ITEMS))
break;
child_count++;
@@ -738,19 +819,19 @@ static void update_userdata(struct netconsole_target *nt)
udm_item = to_userdatum(item);
/* Skip userdata with no value set */
- if (strnlen(udm_item->value, MAX_USERDATA_VALUE_LENGTH) == 0)
+ if (strnlen(udm_item->value, MAX_EXTRADATA_VALUE_LEN) == 0)
continue;
- /* This doesn't overflow userdata_complete since it will write
- * one entry length (1/MAX_USERDATA_ITEMS long), entry count is
+ /* This doesn't overflow extradata_complete since it will write
+ * one entry length (1/MAX_EXTRADATA_ITEMS long), entry count is
* checked to not exceed MAX items with child_count above
*/
- complete_idx += scnprintf(&nt->userdata_complete[complete_idx],
- MAX_USERDATA_ENTRY_LENGTH, " %s=%s\n",
+ complete_idx += scnprintf(&nt->extradata_complete[complete_idx],
+ MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n",
item->ci_name, udm_item->value);
}
- nt->userdata_length = strnlen(nt->userdata_complete,
- sizeof(nt->userdata_complete));
+ nt->userdata_length = strnlen(nt->extradata_complete,
+ sizeof(nt->extradata_complete));
}
static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
@@ -761,7 +842,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
struct userdata *ud;
ssize_t ret;
- if (count > MAX_USERDATA_VALUE_LENGTH)
+ if (count > MAX_EXTRADATA_VALUE_LEN)
return -EMSGSIZE;
mutex_lock(&dynamic_netconsole_mutex);
@@ -780,7 +861,132 @@ out_unlock:
return ret;
}
+/* disable_sysdata_feature - Disable sysdata feature and clean sysdata
+ * @nt: target that is disabling the feature
+ * @feature: feature being disabled
+ */
+static void disable_sysdata_feature(struct netconsole_target *nt,
+ enum sysdata_feature feature)
+{
+ nt->sysdata_fields &= ~feature;
+ nt->extradata_complete[nt->userdata_length] = 0;
+}
+
+static ssize_t sysdata_release_enabled_store(struct config_item *item,
+ const char *buf, size_t count)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool release_enabled, curr;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &release_enabled);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ curr = !!(nt->sysdata_fields & SYSDATA_RELEASE);
+ if (release_enabled == curr)
+ goto unlock_ok;
+
+ if (release_enabled &&
+ count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) {
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ if (release_enabled)
+ nt->sysdata_fields |= SYSDATA_RELEASE;
+ else
+ disable_sysdata_feature(nt, SYSDATA_RELEASE);
+
+unlock_ok:
+ ret = strnlen(buf, count);
+unlock:
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return ret;
+}
+
+static ssize_t sysdata_taskname_enabled_store(struct config_item *item,
+ const char *buf, size_t count)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool taskname_enabled, curr;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &taskname_enabled);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ curr = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
+ if (taskname_enabled == curr)
+ goto unlock_ok;
+
+ if (taskname_enabled &&
+ count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) {
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ if (taskname_enabled)
+ nt->sysdata_fields |= SYSDATA_TASKNAME;
+ else
+ disable_sysdata_feature(nt, SYSDATA_TASKNAME);
+
+unlock_ok:
+ ret = strnlen(buf, count);
+unlock:
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return ret;
+}
+
+/* configfs helper to sysdata cpu_nr feature */
+static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item,
+ const char *buf, size_t count)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool cpu_nr_enabled, curr;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &cpu_nr_enabled);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ curr = !!(nt->sysdata_fields & SYSDATA_CPU_NR);
+ if (cpu_nr_enabled == curr)
+ /* no change requested */
+ goto unlock_ok;
+
+ if (cpu_nr_enabled &&
+ count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) {
+ /* user wants the new feature, but there is no space in the
+ * buffer.
+ */
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ if (cpu_nr_enabled)
+ nt->sysdata_fields |= SYSDATA_CPU_NR;
+ else
+ /* This is special because extradata_complete might have
+ * remaining data from previous sysdata, and it needs to be
+ * cleaned.
+ */
+ disable_sysdata_feature(nt, SYSDATA_CPU_NR);
+
+unlock_ok:
+ ret = strnlen(buf, count);
+unlock:
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return ret;
+}
+
CONFIGFS_ATTR(userdatum_, value);
+CONFIGFS_ATTR(sysdata_, cpu_nr_enabled);
+CONFIGFS_ATTR(sysdata_, taskname_enabled);
+CONFIGFS_ATTR(sysdata_, release_enabled);
static struct configfs_attribute *userdatum_attrs[] = {
&userdatum_attr_value,
@@ -808,15 +1014,13 @@ static struct config_item *userdatum_make_item(struct config_group *group,
struct netconsole_target *nt;
struct userdatum *udm;
struct userdata *ud;
- size_t child_count;
- if (strlen(name) > MAX_USERDATA_NAME_LENGTH)
+ if (strlen(name) > MAX_EXTRADATA_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
ud = to_userdata(&group->cg_item);
nt = userdata_to_target(ud);
- child_count = list_count_nodes(&nt->userdata_group.cg_children);
- if (child_count >= MAX_USERDATA_ITEMS)
+ if (count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS)
return ERR_PTR(-ENOSPC);
udm = kzalloc(sizeof(*udm), GFP_KERNEL);
@@ -842,6 +1046,9 @@ static void userdatum_drop(struct config_group *group, struct config_item *item)
}
static struct configfs_attribute *userdata_attrs[] = {
+ &sysdata_attr_cpu_nr_enabled,
+ &sysdata_attr_taskname_enabled,
+ &sysdata_attr_release_enabled,
NULL,
};
@@ -1017,6 +1224,63 @@ static void populate_configfs_item(struct netconsole_target *nt,
init_target_config_group(nt, target_name);
}
+static int sysdata_append_cpu_nr(struct netconsole_target *nt, int offset)
+{
+ /* Append cpu=%d at extradata_complete after userdata str */
+ return scnprintf(&nt->extradata_complete[offset],
+ MAX_EXTRADATA_ENTRY_LEN, " cpu=%u\n",
+ raw_smp_processor_id());
+}
+
+static int sysdata_append_taskname(struct netconsole_target *nt, int offset)
+{
+ return scnprintf(&nt->extradata_complete[offset],
+ MAX_EXTRADATA_ENTRY_LEN, " taskname=%s\n",
+ current->comm);
+}
+
+static int sysdata_append_release(struct netconsole_target *nt, int offset)
+{
+ return scnprintf(&nt->extradata_complete[offset],
+ MAX_EXTRADATA_ENTRY_LEN, " release=%s\n",
+ init_utsname()->release);
+}
+
+/*
+ * prepare_extradata - append sysdata at extradata_complete in runtime
+ * @nt: target to send message to
+ */
+static int prepare_extradata(struct netconsole_target *nt)
+{
+ u32 fields = SYSDATA_CPU_NR | SYSDATA_TASKNAME;
+ int extradata_len;
+
+ /* userdata was appended when configfs write helper was called
+ * by update_userdata().
+ */
+ extradata_len = nt->userdata_length;
+
+ if (!(nt->sysdata_fields & fields))
+ goto out;
+
+ if (nt->sysdata_fields & SYSDATA_CPU_NR)
+ extradata_len += sysdata_append_cpu_nr(nt, extradata_len);
+ if (nt->sysdata_fields & SYSDATA_TASKNAME)
+ extradata_len += sysdata_append_taskname(nt, extradata_len);
+ if (nt->sysdata_fields & SYSDATA_RELEASE)
+ extradata_len += sysdata_append_release(nt, extradata_len);
+
+ WARN_ON_ONCE(extradata_len >
+ MAX_EXTRADATA_ENTRY_LEN * MAX_EXTRADATA_ITEMS);
+
+out:
+ return extradata_len;
+}
+#else /* CONFIG_NETCONSOLE_DYNAMIC not set */
+static int prepare_extradata(struct netconsole_target *nt)
+{
+ return 0;
+}
#endif /* CONFIG_NETCONSOLE_DYNAMIC */
/* Handle network interface device notifications */
@@ -1117,29 +1381,28 @@ static void send_msg_no_fragmentation(struct netconsole_target *nt,
int msg_len,
int release_len)
{
- static char buf[MAX_PRINT_CHUNK]; /* protected by target_list_lock */
- const char *userdata = NULL;
+ const char *extradata = NULL;
const char *release;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
- userdata = nt->userdata_complete;
+ extradata = nt->extradata_complete;
#endif
if (release_len) {
release = init_utsname()->release;
- scnprintf(buf, MAX_PRINT_CHUNK, "%s,%s", release, msg);
+ scnprintf(nt->buf, MAX_PRINT_CHUNK, "%s,%s", release, msg);
msg_len += release_len;
} else {
- memcpy(buf, msg, msg_len);
+ memcpy(nt->buf, msg, msg_len);
}
- if (userdata)
- msg_len += scnprintf(&buf[msg_len],
+ if (extradata)
+ msg_len += scnprintf(&nt->buf[msg_len],
MAX_PRINT_CHUNK - msg_len,
- "%s", userdata);
+ "%s", extradata);
- send_udp(nt, buf, msg_len);
+ send_udp(nt, nt->buf, msg_len);
}
static void append_release(char *buf)
@@ -1150,28 +1413,27 @@ static void append_release(char *buf)
scnprintf(buf, MAX_PRINT_CHUNK, "%s,", release);
}
-static void send_fragmented_body(struct netconsole_target *nt, char *buf,
+static void send_fragmented_body(struct netconsole_target *nt,
const char *msgbody, int header_len,
- int msgbody_len)
+ int msgbody_len, int extradata_len)
{
- const char *userdata = NULL;
+ int sent_extradata, preceding_bytes;
+ const char *extradata = NULL;
int body_len, offset = 0;
- int userdata_len = 0;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
- userdata = nt->userdata_complete;
- userdata_len = nt->userdata_length;
+ extradata = nt->extradata_complete;
#endif
/* body_len represents the number of bytes that will be sent. This is
* bigger than MAX_PRINT_CHUNK, thus, it will be split in multiple
* packets
*/
- body_len = msgbody_len + userdata_len;
+ body_len = msgbody_len + extradata_len;
/* In each iteration of the while loop below, we send a packet
* containing the header and a portion of the body. The body is
- * composed of two parts: msgbody and userdata. We keep track of how
+ * composed of two parts: msgbody and extradata. We keep track of how
* many bytes have been sent so far using the offset variable, which
* ranges from 0 to the total length of the body.
*/
@@ -1181,7 +1443,7 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf,
int this_offset = 0;
int this_chunk = 0;
- this_header += scnprintf(buf + this_header,
+ this_header += scnprintf(nt->buf + this_header,
MAX_PRINT_CHUNK - this_header,
",ncfrag=%d/%d;", offset,
body_len);
@@ -1192,47 +1454,48 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf,
MAX_PRINT_CHUNK - this_header);
if (WARN_ON_ONCE(this_chunk <= 0))
return;
- memcpy(buf + this_header, msgbody + offset, this_chunk);
+ memcpy(nt->buf + this_header, msgbody + offset,
+ this_chunk);
this_offset += this_chunk;
}
/* msgbody was finally written, either in the previous
* messages and/or in the current buf. Time to write
- * the userdata.
+ * the extradata.
*/
msgbody_written |= offset + this_offset >= msgbody_len;
- /* Msg body is fully written and there is pending userdata to
- * write, append userdata in this chunk
+ /* Msg body is fully written and there is pending extradata to
+ * write, append extradata in this chunk
*/
if (msgbody_written && offset + this_offset < body_len) {
/* Track how much user data was already sent. First
* time here, sent_userdata is zero
*/
- int sent_userdata = (offset + this_offset) - msgbody_len;
+ sent_extradata = (offset + this_offset) - msgbody_len;
/* offset of bytes used in current buf */
- int preceding_bytes = this_chunk + this_header;
+ preceding_bytes = this_chunk + this_header;
- if (WARN_ON_ONCE(sent_userdata < 0))
+ if (WARN_ON_ONCE(sent_extradata < 0))
return;
- this_chunk = min(userdata_len - sent_userdata,
+ this_chunk = min(extradata_len - sent_extradata,
MAX_PRINT_CHUNK - preceding_bytes);
if (WARN_ON_ONCE(this_chunk < 0))
/* this_chunk could be zero if all the previous
* message used all the buffer. This is not a
- * problem, userdata will be sent in the next
+ * problem, extradata will be sent in the next
* iteration
*/
return;
- memcpy(buf + this_header + this_offset,
- userdata + sent_userdata,
+ memcpy(nt->buf + this_header + this_offset,
+ extradata + sent_extradata,
this_chunk);
this_offset += this_chunk;
}
- send_udp(nt, buf, this_header + this_offset);
+ send_udp(nt, nt->buf, this_header + this_offset);
offset += this_offset;
}
}
@@ -1240,9 +1503,9 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf,
static void send_msg_fragmented(struct netconsole_target *nt,
const char *msg,
int msg_len,
- int release_len)
+ int release_len,
+ int extradata_len)
{
- static char buf[MAX_PRINT_CHUNK]; /* protected by target_list_lock */
int header_len, msgbody_len;
const char *msgbody;
@@ -1260,16 +1523,17 @@ static void send_msg_fragmented(struct netconsole_target *nt,
* "ncfrag=<byte-offset>/<total-bytes>"
*/
if (release_len)
- append_release(buf);
+ append_release(nt->buf);
/* Copy the header into the buffer */
- memcpy(buf + release_len, msg, header_len);
+ memcpy(nt->buf + release_len, msg, header_len);
header_len += release_len;
/* for now on, the header will be persisted, and the msgbody
* will be replaced
*/
- send_fragmented_body(nt, buf, msgbody, header_len, msgbody_len);
+ send_fragmented_body(nt, msgbody, header_len, msgbody_len,
+ extradata_len);
}
/**
@@ -1285,20 +1549,19 @@ static void send_msg_fragmented(struct netconsole_target *nt,
static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
int msg_len)
{
- int userdata_len = 0;
int release_len = 0;
+ int extradata_len;
-#ifdef CONFIG_NETCONSOLE_DYNAMIC
- userdata_len = nt->userdata_length;
-#endif
+ extradata_len = prepare_extradata(nt);
if (nt->release)
release_len = strlen(init_utsname()->release) + 1;
- if (msg_len + release_len + userdata_len <= MAX_PRINT_CHUNK)
+ if (msg_len + release_len + extradata_len <= MAX_PRINT_CHUNK)
return send_msg_no_fragmentation(nt, msg, msg_len, release_len);
- return send_msg_fragmented(nt, msg, msg_len, release_len);
+ return send_msg_fragmented(nt, msg, msg_len, release_len,
+ extradata_len);
}
static void write_ext_msg(struct console *con, const char *msg,
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 608953d4f98d..49537d3c4120 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -296,7 +296,8 @@ static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
NSIM_EA(bpf->extack, "attempt to load offloaded prog to drv");
return -EINVAL;
}
- if (ns->netdev->mtu > NSIM_XDP_MAX_MTU) {
+ if (bpf->prog && !bpf->prog->aux->xdp_has_frags &&
+ ns->netdev->mtu > NSIM_XDP_MAX_MTU) {
NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled");
return -EINVAL;
}
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 7ab358616e03..4d191a3293c7 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -107,10 +107,8 @@ nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct netdevsim *ns = netdev_priv(dev);
int err;
- netdev_lock(dev);
err = netif_set_real_num_queues(dev, ch->combined_count,
ch->combined_count);
- netdev_unlock(dev);
if (err)
return err;
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index 88187dd4eb2d..d88bdb9a1717 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -217,20 +217,9 @@ static void nsim_ipsec_del_sa(struct xfrm_state *xs)
ipsec->count--;
}
-static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
- struct nsim_ipsec *ipsec = &ns->ipsec;
-
- ipsec->ok++;
-
- return true;
-}
-
static const struct xfrmdev_ops nsim_xfrmdev_ops = {
.xdo_dev_state_add = nsim_ipsec_add_sa,
.xdo_dev_state_delete = nsim_ipsec_del_sa,
- .xdo_dev_offload_ok = nsim_ipsec_offload_ok,
};
bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 42f247cbdcee..b67af4651185 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -25,6 +25,7 @@
#include <net/page_pool/helpers.h>
#include <net/netlink.h>
#include <net/net_shaper.h>
+#include <net/netdev_lock.h>
#include <net/pkt_cls.h>
#include <net/rtnetlink.h>
#include <net/udp_tunnel.h>
@@ -87,7 +88,8 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP))
goto out_drop_cnt;
- napi_schedule(&rq->napi);
+ if (!hrtimer_active(&rq->napi_timer))
+ hrtimer_start(&rq->napi_timer, us_to_ktime(5), HRTIMER_MODE_REL);
rcu_read_unlock();
u64_stats_update_begin(&ns->syncp);
@@ -114,7 +116,8 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu)
{
struct netdevsim *ns = netdev_priv(dev);
- if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU)
+ if (ns->xdp.prog && !ns->xdp.prog->aux->xdp_has_frags &&
+ new_mtu > NSIM_XDP_MAX_MTU)
return -EBUSY;
WRITE_ONCE(dev->mtu, new_mtu);
@@ -401,7 +404,7 @@ static int nsim_init_napi(struct netdevsim *ns)
for (i = 0; i < dev->num_rx_queues; i++) {
rq = ns->rq[i];
- netif_napi_add_config(dev, &rq->napi, nsim_poll, i);
+ netif_napi_add_config_locked(dev, &rq->napi, nsim_poll, i);
}
for (i = 0; i < dev->num_rx_queues; i++) {
@@ -421,11 +424,27 @@ err_pp_destroy:
}
for (i = 0; i < dev->num_rx_queues; i++)
- __netif_napi_del(&ns->rq[i]->napi);
+ __netif_napi_del_locked(&ns->rq[i]->napi);
return err;
}
+static enum hrtimer_restart nsim_napi_schedule(struct hrtimer *timer)
+{
+ struct nsim_rq *rq;
+
+ rq = container_of(timer, struct nsim_rq, napi_timer);
+ napi_schedule(&rq->napi);
+
+ return HRTIMER_NORESTART;
+}
+
+static void nsim_rq_timer_init(struct nsim_rq *rq)
+{
+ hrtimer_init(&rq->napi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rq->napi_timer.function = nsim_napi_schedule;
+}
+
static void nsim_enable_napi(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
@@ -435,7 +454,7 @@ static void nsim_enable_napi(struct netdevsim *ns)
struct nsim_rq *rq = ns->rq[i];
netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi);
- napi_enable(&rq->napi);
+ napi_enable_locked(&rq->napi);
}
}
@@ -444,6 +463,8 @@ static int nsim_open(struct net_device *dev)
struct netdevsim *ns = netdev_priv(dev);
int err;
+ netdev_assert_locked(dev);
+
err = nsim_init_napi(ns);
if (err)
return err;
@@ -461,8 +482,8 @@ static void nsim_del_napi(struct netdevsim *ns)
for (i = 0; i < dev->num_rx_queues; i++) {
struct nsim_rq *rq = ns->rq[i];
- napi_disable(&rq->napi);
- __netif_napi_del(&rq->napi);
+ napi_disable_locked(&rq->napi);
+ __netif_napi_del_locked(&rq->napi);
}
synchronize_net();
@@ -477,6 +498,8 @@ static int nsim_stop(struct net_device *dev)
struct netdevsim *ns = netdev_priv(dev);
struct netdevsim *peer;
+ netdev_assert_locked(dev);
+
netif_carrier_off(dev);
peer = rtnl_dereference(ns->peer);
if (peer)
@@ -615,11 +638,13 @@ static struct nsim_rq *nsim_queue_alloc(void)
return NULL;
skb_queue_head_init(&rq->skb_queue);
+ nsim_rq_timer_init(rq);
return rq;
}
static void nsim_queue_free(struct nsim_rq *rq)
{
+ hrtimer_cancel(&rq->napi_timer);
skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
kfree(rq);
}
@@ -645,8 +670,11 @@ nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx)
if (ns->rq_reset_mode > 3)
return -EINVAL;
- if (ns->rq_reset_mode == 1)
+ if (ns->rq_reset_mode == 1) {
+ if (!netif_running(ns->netdev))
+ return -ENETDOWN;
return nsim_create_page_pool(&qmem->pp, &ns->rq[idx]->napi);
+ }
qmem->rq = nsim_queue_alloc();
if (!qmem->rq)
@@ -657,7 +685,8 @@ nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx)
goto err_free;
if (!ns->rq_reset_mode)
- netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
+ netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
+ idx);
return 0;
@@ -674,7 +703,7 @@ static void nsim_queue_mem_free(struct net_device *dev, void *per_queue_mem)
page_pool_destroy(qmem->pp);
if (qmem->rq) {
if (!ns->rq_reset_mode)
- netif_napi_del(&qmem->rq->napi);
+ netif_napi_del_locked(&qmem->rq->napi);
page_pool_destroy(qmem->rq->page_pool);
nsim_queue_free(qmem->rq);
}
@@ -686,9 +715,11 @@ nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx)
struct nsim_queue_mem *qmem = per_queue_mem;
struct netdevsim *ns = netdev_priv(dev);
+ netdev_assert_locked(dev);
+
if (ns->rq_reset_mode == 1) {
ns->rq[idx]->page_pool = qmem->pp;
- napi_enable(&ns->rq[idx]->napi);
+ napi_enable_locked(&ns->rq[idx]->napi);
return 0;
}
@@ -696,15 +727,17 @@ nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx)
* here we want to test various call orders.
*/
if (ns->rq_reset_mode == 2) {
- netif_napi_del(&ns->rq[idx]->napi);
- netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
+ netif_napi_del_locked(&ns->rq[idx]->napi);
+ netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
+ idx);
} else if (ns->rq_reset_mode == 3) {
- netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
- netif_napi_del(&ns->rq[idx]->napi);
+ netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
+ idx);
+ netif_napi_del_locked(&ns->rq[idx]->napi);
}
ns->rq[idx] = qmem->rq;
- napi_enable(&ns->rq[idx]->napi);
+ napi_enable_locked(&ns->rq[idx]->napi);
return 0;
}
@@ -714,7 +747,9 @@ static int nsim_queue_stop(struct net_device *dev, void *per_queue_mem, int idx)
struct nsim_queue_mem *qmem = per_queue_mem;
struct netdevsim *ns = netdev_priv(dev);
- napi_disable(&ns->rq[idx]->napi);
+ netdev_assert_locked(dev);
+
+ napi_disable_locked(&ns->rq[idx]->napi);
if (ns->rq_reset_mode == 1) {
qmem->pp = ns->rq[idx]->page_pool;
@@ -753,12 +788,7 @@ nsim_qreset_write(struct file *file, const char __user *data,
if (ret != 2)
return -EINVAL;
- rtnl_lock();
- if (!netif_running(ns->netdev)) {
- ret = -ENETDOWN;
- goto exit_unlock;
- }
-
+ netdev_lock(ns->netdev);
if (queue >= ns->netdev->real_num_rx_queues) {
ret = -EINVAL;
goto exit_unlock;
@@ -772,7 +802,7 @@ nsim_qreset_write(struct file *file, const char __user *data,
ret = count;
exit_unlock:
- rtnl_unlock();
+ netdev_unlock(ns->netdev);
return ret;
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 96d54c08043d..665020d18f29 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -54,7 +54,6 @@ struct nsim_ipsec {
struct dentry *pfile;
u32 count;
u32 tx;
- u32 ok;
};
#define NSIM_MACSEC_MAX_SECY_COUNT 3
@@ -97,6 +96,7 @@ struct nsim_rq {
struct napi_struct napi;
struct sk_buff_head skb_queue;
struct page_pool *page_pool;
+ struct hrtimer napi_timer;
};
struct netdevsim {
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 1e1b00756be7..d072a7968f56 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -65,7 +65,6 @@ static void netkit_prep_forward(struct sk_buff *skb,
skb_reset_mac_header(skb);
if (!xnet)
return;
- ipvs_reset(skb);
skb_clear_tstamp(skb);
if (xnet_scrub)
netkit_xnet(skb);
@@ -327,17 +326,20 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
static struct rtnl_link_ops netkit_link_ops;
-static int netkit_new_link(struct net *peer_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int netkit_new_link(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
- struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb, *attr;
- enum netkit_action policy_prim = NETKIT_PASS;
- enum netkit_action policy_peer = NETKIT_PASS;
+ struct net *peer_net = rtnl_newlink_peer_net(params);
enum netkit_scrub scrub_prim = NETKIT_SCRUB_DEFAULT;
enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT;
+ struct nlattr *peer_tb[IFLA_MAX + 1], **tbp, *attr;
+ enum netkit_action policy_prim = NETKIT_PASS;
+ enum netkit_action policy_peer = NETKIT_PASS;
+ struct nlattr **data = params->data;
enum netkit_mode mode = NETKIT_L3;
unsigned char ifname_assign_type;
+ struct nlattr **tb = params->tb;
u16 headroom = 0, tailroom = 0;
struct ifinfomsg *ifmp = NULL;
struct net_device *peer;
@@ -345,6 +347,7 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
struct netkit *nk;
int err;
+ tbp = tb;
if (data) {
if (data[IFLA_NETKIT_MODE])
mode = nla_get_u32(data[IFLA_NETKIT_MODE]);
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index e46f588cae7d..23b40e9eacbb 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -355,7 +355,6 @@ static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio)
mdio_device_get(mdio);
lynx->mdio = mdio;
lynx->pcs.ops = &lynx_pcs_phylink_ops;
- lynx->pcs.neg_mode = true;
lynx->pcs.poll = true;
for (i = 0; i < ARRAY_SIZE(lynx_interfaces); i++)
diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c
index 7d6261dee534..149ddf51d785 100644
--- a/drivers/net/pcs/pcs-mtk-lynxi.c
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
@@ -305,7 +305,6 @@ struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
mpcs->regmap = regmap;
mpcs->flags = flags;
mpcs->pcs.ops = &mtk_pcs_lynxi_ops;
- mpcs->pcs.neg_mode = true;
mpcs->pcs.poll = true;
mpcs->interface = PHY_INTERFACE_MODE_NA;
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index 61944574d087..d79bb9b06cd2 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -268,17 +268,6 @@ static void miic_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
(MIIC_CONVCTRL_CONV_SPEED | MIIC_CONVCTRL_FULLD), val);
}
-static int miic_validate(struct phylink_pcs *pcs, unsigned long *supported,
- const struct phylink_link_state *state)
-{
- if (phy_interface_mode_is_rgmii(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_RMII ||
- state->interface == PHY_INTERFACE_MODE_MII)
- return 1;
-
- return -EINVAL;
-}
-
static int miic_pre_init(struct phylink_pcs *pcs)
{
struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
@@ -307,7 +296,6 @@ static int miic_pre_init(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops miic_phylink_ops = {
- .pcs_validate = miic_validate,
.pcs_config = miic_config,
.pcs_link_up = miic_link_up,
.pcs_pre_init = miic_pre_init,
@@ -361,7 +349,10 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
miic_port->miic = miic;
miic_port->port = port - 1;
miic_port->pcs.ops = &miic_phylink_ops;
- miic_port->pcs.neg_mode = true;
+
+ phy_interface_set_rgmii(miic_port->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII, miic_port->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII, miic_port->pcs.supported_interfaces);
return &miic_port->pcs;
}
@@ -472,13 +463,10 @@ static int miic_parse_dt(struct device *dev, u32 *mode_cfg)
if (of_property_read_u32(np, "renesas,miic-switch-portin", &conf) == 0)
dt_val[0] = conf;
- for_each_child_of_node(np, conv) {
+ for_each_available_child_of_node(np, conv) {
if (of_property_read_u32(conv, "reg", &port))
continue;
- if (!of_device_is_available(conv))
- continue;
-
if (of_property_read_u32(conv, "renesas,miic-input", &conf) == 0)
dt_val[port] = conf;
}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 1faa37f0e7b9..3d1bd5aac093 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -602,35 +602,21 @@ static void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
__set_bit(compat->interface, interfaces);
}
-int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
+static int xpcs_switch_interface_mode(struct dw_xpcs *xpcs,
+ phy_interface_t interface)
{
- u16 mask, val;
- int ret;
-
- mask = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
- DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
- DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
- DW_VR_MII_EEE_MULT_FACT_100NS;
+ int ret = 0;
- if (enable)
- val = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
- DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
- DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
- FIELD_PREP(DW_VR_MII_EEE_MULT_FACT_100NS,
- mult_fact_100ns);
- else
- val = 0;
-
- ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0, mask,
- val);
- if (ret < 0)
- return ret;
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) {
+ ret = txgbe_xpcs_switch_mode(xpcs, interface);
+ } else if (xpcs->interface != interface) {
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ xpcs->need_reset = true;
+ xpcs->interface = interface;
+ }
- return xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1,
- DW_VR_MII_EEE_TRN_LPI,
- enable ? DW_VR_MII_EEE_TRN_LPI : 0);
+ return ret;
}
-EXPORT_SYMBOL_GPL(xpcs_config_eee);
static void xpcs_pre_config(struct phylink_pcs *pcs, phy_interface_t interface)
{
@@ -638,6 +624,11 @@ static void xpcs_pre_config(struct phylink_pcs *pcs, phy_interface_t interface)
const struct dw_xpcs_compat *compat;
int ret;
+ ret = xpcs_switch_interface_mode(xpcs, interface);
+ if (ret)
+ dev_err(&xpcs->mdiodev->dev, "switch interface failed: %pe\n",
+ ERR_PTR(ret));
+
if (!xpcs->need_reset)
return;
@@ -829,10 +820,6 @@ static int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
return -ENODEV;
if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) {
- ret = txgbe_xpcs_switch_mode(xpcs, interface);
- if (ret)
- return ret;
-
/* Wangxun devices need backplane CL37 AN enabled for
* SGMII and 1000base-X
*/
@@ -1193,6 +1180,63 @@ static void xpcs_an_restart(struct phylink_pcs *pcs)
BMCR_ANRESTART);
}
+static int xpcs_config_eee(struct dw_xpcs *xpcs, bool enable)
+{
+ u16 mask, val;
+ int ret;
+
+ mask = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
+ DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
+ DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
+ DW_VR_MII_EEE_MULT_FACT_100NS;
+
+ if (enable)
+ val = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
+ DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
+ DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
+ FIELD_PREP(DW_VR_MII_EEE_MULT_FACT_100NS,
+ xpcs->eee_mult_fact);
+ else
+ val = 0;
+
+ ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0, mask,
+ val);
+ if (ret < 0)
+ return ret;
+
+ return xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1,
+ DW_VR_MII_EEE_TRN_LPI,
+ enable ? DW_VR_MII_EEE_TRN_LPI : 0);
+}
+
+static void xpcs_disable_eee(struct phylink_pcs *pcs)
+{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+
+ xpcs_config_eee(xpcs, false);
+}
+
+static void xpcs_enable_eee(struct phylink_pcs *pcs)
+{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+
+ xpcs_config_eee(xpcs, true);
+}
+
+/**
+ * xpcs_config_eee_mult_fact() - set the EEE clock multiplying factor
+ * @xpcs: pointer to a &struct dw_xpcs instance
+ * @mult_fact: the multiplying factor
+ *
+ * Configure the EEE clock multiplying factor. This value should be such that
+ * clk_eee_time_period * (mult_fact + 1) is within the range 80 to 120ns.
+ */
+void xpcs_config_eee_mult_fact(struct dw_xpcs *xpcs, u8 mult_fact)
+{
+ xpcs->eee_mult_fact = mult_fact;
+}
+EXPORT_SYMBOL_GPL(xpcs_config_eee_mult_fact);
+
static int xpcs_read_ids(struct dw_xpcs *xpcs)
{
int ret;
@@ -1341,6 +1385,8 @@ static const struct phylink_pcs_ops xpcs_phylink_ops = {
.pcs_get_state = xpcs_get_state,
.pcs_an_restart = xpcs_an_restart,
.pcs_link_up = xpcs_link_up,
+ .pcs_disable_eee = xpcs_disable_eee,
+ .pcs_enable_eee = xpcs_enable_eee,
};
static int xpcs_identify(struct dw_xpcs *xpcs)
@@ -1374,7 +1420,6 @@ static struct dw_xpcs *xpcs_create_data(struct mdio_device *mdiodev)
mdio_device_get(mdiodev);
xpcs->mdiodev = mdiodev;
xpcs->pcs.ops = &xpcs_phylink_ops;
- xpcs->pcs.neg_mode = true;
xpcs->pcs.poll = true;
return xpcs;
diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h
index adc5a0b3c883..929fa238445e 100644
--- a/drivers/net/pcs/pcs-xpcs.h
+++ b/drivers/net/pcs/pcs-xpcs.h
@@ -55,23 +55,11 @@
/* Clause 37 Defines */
/* VR MII MMD registers offsets */
#define DW_VR_MII_DIG_CTRL1 0x8000
-#define DW_VR_MII_AN_CTRL 0x8001
-#define DW_VR_MII_AN_INTR_STS 0x8002
-/* EEE Mode Control Register */
-#define DW_VR_MII_EEE_MCTRL0 0x8006
-#define DW_VR_MII_EEE_MCTRL1 0x800b
-#define DW_VR_MII_DIG_CTRL2 0x80e1
-
-/* VR_MII_DIG_CTRL1 */
#define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9)
#define DW_VR_MII_DIG_CTRL1_2G5_EN BIT(2)
#define DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL BIT(0)
-/* VR_MII_DIG_CTRL2 */
-#define DW_VR_MII_DIG_CTRL2_TX_POL_INV BIT(4)
-#define DW_VR_MII_DIG_CTRL2_RX_POL_INV BIT(0)
-
-/* VR_MII_AN_CTRL */
+#define DW_VR_MII_AN_CTRL 0x8001
#define DW_VR_MII_AN_CTRL_8BIT BIT(8)
#define DW_VR_MII_TX_CONFIG_MASK BIT(3)
#define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1
@@ -81,7 +69,7 @@
#define DW_VR_MII_PCS_MODE_C37_SGMII 0x2
#define DW_VR_MII_AN_INTR_EN BIT(0)
-/* VR_MII_AN_INTR_STS */
+#define DW_VR_MII_AN_INTR_STS 0x8002
#define DW_VR_MII_AN_STS_C37_ANCMPLT_INTR BIT(0)
#define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1)
#define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2)
@@ -90,19 +78,22 @@
#define DW_VR_MII_C37_ANSGM_SP_1000 0x2
#define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4)
-/* VR MII EEE Control 0 defines */
+#define DW_VR_MII_EEE_MCTRL0 0x8006
#define DW_VR_MII_EEE_LTX_EN BIT(0) /* LPI Tx Enable */
#define DW_VR_MII_EEE_LRX_EN BIT(1) /* LPI Rx Enable */
#define DW_VR_MII_EEE_TX_QUIET_EN BIT(2) /* Tx Quiet Enable */
#define DW_VR_MII_EEE_RX_QUIET_EN BIT(3) /* Rx Quiet Enable */
#define DW_VR_MII_EEE_TX_EN_CTRL BIT(4) /* Tx Control Enable */
#define DW_VR_MII_EEE_RX_EN_CTRL BIT(7) /* Rx Control Enable */
-
#define DW_VR_MII_EEE_MULT_FACT_100NS GENMASK(11, 8)
-/* VR MII EEE Control 1 defines */
+#define DW_VR_MII_EEE_MCTRL1 0x800b
#define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */
+#define DW_VR_MII_DIG_CTRL2 0x80e1
+#define DW_VR_MII_DIG_CTRL2_TX_POL_INV BIT(4)
+#define DW_VR_MII_DIG_CTRL2_RX_POL_INV BIT(0)
+
#define DW_XPCS_INFO_DECLARE(_name, _pcs, _pma) \
static const struct dw_xpcs_info _name = { .pcs = _pcs, .pma = _pma }
@@ -122,6 +113,7 @@ struct dw_xpcs {
struct phylink_pcs pcs;
phy_interface_t interface;
bool need_reset;
+ u8 eee_mult_fact;
};
int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg);
diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c
index 68d0d9e92a22..f873a92d2445 100644
--- a/drivers/net/pfcp.c
+++ b/drivers/net/pfcp.c
@@ -184,15 +184,16 @@ static int pfcp_add_sock(struct pfcp_dev *pfcp)
return PTR_ERR_OR_ZERO(pfcp->sock);
}
-static int pfcp_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int pfcp_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct pfcp_dev *pfcp = netdev_priv(dev);
struct pfcp_net *pn;
int err;
- pfcp->net = net;
+ pfcp->net = link_net;
err = pfcp_add_sock(pfcp);
if (err) {
@@ -206,7 +207,7 @@ static int pfcp_newlink(struct net *net, struct net_device *dev,
goto exit_del_pfcp_sock;
}
- pn = net_generic(net, pfcp_net_id);
+ pn = net_generic(link_net, pfcp_net_id);
list_add(&pfcp->list, &pn->pfcp_dev_list);
netdev_dbg(dev, "registered new PFCP interface\n");
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 41c15a2c2037..d29f9f7fd2e1 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -328,7 +328,7 @@ config NXP_C45_TJA11XX_PHY
depends on MACSEC || !MACSEC
help
Enable support for NXP C45 TJA11XX PHYs.
- Currently supports the TJA1103, TJA1104 and TJA1120 PHYs.
+ Currently supports the TJA1103, TJA1104, TJA1120 and TJA1121 PHYs.
config NXP_TJA11XX_PHY
tristate "NXP TJA11xx PHYs support"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index c8dac6e92278..23ce205ae91d 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -2,7 +2,8 @@
# Makefile for Linux PHY drivers
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
- linkmode.o phy_link_topology.o
+ linkmode.o phy_link_topology.o \
+ phy_package.o phy_caps.o
mdio-bus-y += mdio_bus.o mdio_device.o
ifdef CONFIG_MDIO_DEVICE
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
index 6bb469429b9d..bd7a47a903ac 100644
--- a/drivers/net/phy/adin1100.c
+++ b/drivers/net/phy/adin1100.c
@@ -215,8 +215,11 @@ static int adin_resume(struct phy_device *phydev)
return adin_set_powerdown_mode(phydev, false);
}
-static int adin_set_loopback(struct phy_device *phydev, bool enable)
+static int adin_set_loopback(struct phy_device *phydev, bool enable, int speed)
{
+ if (enable && speed)
+ return -EOPNOTSUPP;
+
if (enable)
return phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL,
BMCR_LOOPBACK);
diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c
index dab3af80593f..bbbcc9736b00 100644
--- a/drivers/net/phy/aquantia/aquantia_firmware.c
+++ b/drivers/net/phy/aquantia/aquantia_firmware.c
@@ -328,10 +328,11 @@ static int aqr_firmware_load_fs(struct phy_device *phydev)
const char *fw_name;
int ret;
- ret = of_property_read_string(dev->of_node, "firmware-name",
- &fw_name);
- if (ret)
+ ret = device_property_read_string(dev, "firmware-name", &fw_name);
+ if (ret) {
+ phydev_err(phydev, "failed to read firmware-name: %d\n", ret);
return ret;
+ }
ret = request_firmware(&fw, fw_name, dev);
if (ret) {
diff --git a/drivers/net/phy/aquantia/aquantia_hwmon.c b/drivers/net/phy/aquantia/aquantia_hwmon.c
index 7b3c49c3bf49..1a714b56b765 100644
--- a/drivers/net/phy/aquantia/aquantia_hwmon.c
+++ b/drivers/net/phy/aquantia/aquantia_hwmon.c
@@ -172,33 +172,13 @@ static const struct hwmon_ops aqr_hwmon_ops = {
.write = aqr_hwmon_write,
};
-static u32 aqr_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0,
-};
-
-static const struct hwmon_channel_info aqr_hwmon_chip = {
- .type = hwmon_chip,
- .config = aqr_hwmon_chip_config,
-};
-
-static u32 aqr_hwmon_temp_config[] = {
- HWMON_T_INPUT |
- HWMON_T_MAX | HWMON_T_MIN |
- HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
- HWMON_T_CRIT | HWMON_T_LCRIT |
- HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM,
- 0,
-};
-
-static const struct hwmon_channel_info aqr_hwmon_temp = {
- .type = hwmon_temp,
- .config = aqr_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const aqr_hwmon_info[] = {
- &aqr_hwmon_chip,
- &aqr_hwmon_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT |
+ HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
+ HWMON_T_CRIT | HWMON_T_LCRIT |
+ HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM),
NULL,
};
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index e42ace4e682a..08b1c9cc902b 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -50,6 +50,7 @@
#define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11)
#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10)
+#define MDIO_AN_VEND_PROV_EXC_PHYID_INFO BIT(6)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4
@@ -333,6 +334,238 @@ static int aqr_read_status(struct phy_device *phydev)
return genphy_c45_read_status(phydev);
}
+static int aqr105_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Normal feature discovery */
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* The AQR105 PHY misses to indicate the 2.5G and 5G modes, so add them
+ * here
+ */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->supported);
+
+ /* The AQR105 PHY suppports both RJ45 and SFP+ interfaces */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+
+ return 0;
+}
+
+static int aqr105_setup_forced(struct phy_device *phydev)
+{
+ int vend = MDIO_AN_VEND_PROV_EXC_PHYID_INFO;
+ int ctrl10 = 0;
+ int adv = ADVERTISE_CSMA;
+ int ret;
+
+ switch (phydev->speed) {
+ case SPEED_100:
+ adv |= ADVERTISE_100FULL;
+ break;
+ case SPEED_1000:
+ adv |= ADVERTISE_NPAGE;
+ if (phydev->duplex == DUPLEX_FULL)
+ vend |= MDIO_AN_VEND_PROV_1000BASET_FULL;
+ else
+ vend |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+ break;
+ case SPEED_2500:
+ adv |= (ADVERTISE_NPAGE | ADVERTISE_RESV);
+ vend |= MDIO_AN_VEND_PROV_2500BASET_FULL;
+ break;
+ case SPEED_5000:
+ adv |= (ADVERTISE_NPAGE | ADVERTISE_RESV);
+ vend |= MDIO_AN_VEND_PROV_5000BASET_FULL;
+ break;
+ case SPEED_10000:
+ adv |= (ADVERTISE_NPAGE | ADVERTISE_RESV);
+ ctrl10 |= MDIO_AN_10GBT_CTRL_ADV10G;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, adv);
+ if (ret < 0)
+ return ret;
+ ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV, vend);
+ if (ret < 0)
+ return ret;
+ ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, ctrl10);
+ if (ret < 0)
+ return ret;
+
+ /* set by vendor driver, but should be on by default */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1,
+ MDIO_AN_CTRL1_XNP);
+ if (ret < 0)
+ return ret;
+
+ return genphy_c45_an_disable_aneg(phydev);
+}
+
+static int aqr105_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ u16 reg;
+ int ret;
+
+ ret = aqr_set_mdix(phydev, phydev->mdix_ctrl);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ return aqr105_setup_forced(phydev);
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ /* Clause 45 has no standardized support for 1000BaseT, therefore
+ * use vendor registers for this mode.
+ */
+ reg = 0;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_1000BASET_FULL;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+
+ /* Handle the case when the 2.5G and 5G speeds are not advertised */
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
+ MDIO_AN_VEND_PROV_1000BASET_HALF |
+ MDIO_AN_VEND_PROV_1000BASET_FULL |
+ MDIO_AN_VEND_PROV_2500BASET_FULL |
+ MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int aqr105_read_rate(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
+ case MDIO_AN_TX_VEND_STATUS1_10BASET:
+ phydev->speed = SPEED_10;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_100BASETX:
+ phydev->speed = SPEED_100;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_1000BASET:
+ phydev->speed = SPEED_1000;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_2500BASET:
+ phydev->speed = SPEED_2500;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_5000BASET:
+ phydev->speed = SPEED_5000;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_10GBASET:
+ phydev->speed = SPEED_10000;
+ break;
+ default:
+ phydev->speed = SPEED_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int aqr105_read_status(struct phy_device *phydev)
+{
+ int ret;
+ int val;
+
+ ret = aqr_read_status(phydev);
+ if (ret)
+ return ret;
+
+ if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
+ return 0;
+
+ /**
+ * The status register is not immediately correct on line side link up.
+ * Poll periodically until it reflects the correct ON state.
+ * Only return fail for read error, timeout defaults to OFF state.
+ */
+ ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_VEND_IF_STATUS, val,
+ (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val) !=
+ MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF),
+ AQR107_OP_IN_PROG_SLEEP,
+ AQR107_OP_IN_PROG_TIMEOUT, false);
+ if (ret && ret != -ETIMEDOUT)
+ return ret;
+
+ switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
+ phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
+ phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
+ phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
+ phydev->interface = PHY_INTERFACE_MODE_USXGMII;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
+ phydev->interface = PHY_INTERFACE_MODE_XAUI;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
+ phydev->interface = PHY_INTERFACE_MODE_RXAUI;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF:
+ default:
+ phydev->link = false;
+ phydev->interface = PHY_INTERFACE_MODE_NA;
+ break;
+ }
+
+ /* Read rate from vendor register */
+ return aqr105_read_rate(phydev);
+}
+
static int aqr107_read_rate(struct phy_device *phydev)
{
u32 config_reg;
@@ -911,10 +1144,13 @@ static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR105),
.name = "Aquantia AQR105",
- .config_aneg = aqr_config_aneg,
+ .get_features = aqr105_get_features,
+ .probe = aqr107_probe,
+ .config_init = aqr107_config_init,
+ .config_aneg = aqr105_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr_read_status,
+ .read_status = aqr105_read_status,
.suspend = aqr107_suspend,
.resume = aqr107_resume,
},
diff --git a/drivers/net/phy/ax88796b_rust.rs b/drivers/net/phy/ax88796b_rust.rs
index 8c7eb009d9fc..bc73ebccc2aa 100644
--- a/drivers/net/phy/ax88796b_rust.rs
+++ b/drivers/net/phy/ax88796b_rust.rs
@@ -19,7 +19,7 @@ kernel::module_phy_driver! {
DeviceId::new_with_driver::<PhyAX88796B>()
],
name: "rust_asix_phy",
- author: "FUJITA Tomonori <fujita.tomonori@gmail.com>",
+ authors: ["FUJITA Tomonori <fujita.tomonori@gmail.com>"],
description: "Rust Asix PHYs driver",
license: "GPL",
}
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
index 208e8f561e06..eba8b5fb1365 100644
--- a/drivers/net/phy/bcm-phy-ptp.c
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -597,7 +597,8 @@ static int bcm_ptp_perout_locked(struct bcm_ptp_private *priv,
period = BCM_MAX_PERIOD_8NS; /* write nonzero value */
- if (req->flags & PTP_PEROUT_PHASE)
+ /* Reject unsupported flags */
+ if (req->flags & ~PTP_PEROUT_DUTY_CYCLE)
return -EOPNOTSUPP;
if (req->flags & PTP_PEROUT_DUTY_CYCLE)
diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c
index 7969345f6b35..a8edf45fd733 100644
--- a/drivers/net/phy/bcm54140.c
+++ b/drivers/net/phy/bcm54140.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/phy.h>
+#include "phylib.h"
#include "bcm-phy-lib.h"
/* RDB per-port registers
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 22edb7e4c1a1..13e43fee1906 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -16,7 +16,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/phy.h>
-#include <linux/pm_wakeup.h>
+#include <linux/device.h>
#include <linux/brcmphy.h>
#include <linux/of.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 6599feca1967..14f361549638 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -31,6 +31,7 @@
#define MII_DP83822_RCSR 0x17
#define MII_DP83822_RESET_CTRL 0x1f
#define MII_DP83822_MLEDCR 0x25
+#define MII_DP83822_LDCTRL 0x403
#define MII_DP83822_LEDCFG1 0x460
#define MII_DP83822_IOCTRL1 0x462
#define MII_DP83822_IOCTRL2 0x463
@@ -123,6 +124,9 @@
#define DP83822_IOCTRL1_GPIO1_CTRL GENMASK(2, 0)
#define DP83822_IOCTRL1_GPIO1_CTRL_LED_1 BIT(0)
+/* LDCTRL bits */
+#define DP83822_100BASE_TX_LINE_DRIVER_SWING GENMASK(7, 4)
+
/* IOCTRL2 bits */
#define DP83822_IOCTRL2_GPIO2_CLK_SRC GENMASK(6, 4)
#define DP83822_IOCTRL2_GPIO2_CTRL GENMASK(2, 0)
@@ -197,6 +201,7 @@ struct dp83822_private {
bool set_gpio2_clk_out;
u32 gpio2_clk_out;
bool led_pin_enable[DP83822_MAX_LED_PINS];
+ int tx_amplitude_100base_tx_index;
};
static int dp83822_config_wol(struct phy_device *phydev,
@@ -522,6 +527,12 @@ static int dp83822_config_init(struct phy_device *phydev)
FIELD_PREP(DP83822_IOCTRL2_GPIO2_CLK_SRC,
dp83822->gpio2_clk_out));
+ if (dp83822->tx_amplitude_100base_tx_index >= 0)
+ phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_LDCTRL,
+ DP83822_100BASE_TX_LINE_DRIVER_SWING,
+ FIELD_PREP(DP83822_100BASE_TX_LINE_DRIVER_SWING,
+ dp83822->tx_amplitude_100base_tx_index));
+
err = dp83822_config_init_leds(phydev);
if (err)
return err;
@@ -720,6 +731,11 @@ static int dp83822_phy_reset(struct phy_device *phydev)
}
#ifdef CONFIG_OF_MDIO
+static const u32 tx_amplitude_100base_tx_gain[] = {
+ 80, 82, 83, 85, 87, 88, 90, 92,
+ 93, 95, 97, 98, 100, 102, 103, 105,
+};
+
static int dp83822_of_init_leds(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -780,6 +796,8 @@ static int dp83822_of_init(struct phy_device *phydev)
struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
const char *of_val;
+ int i, ret;
+ u32 val;
/* Signal detection for the PHY is only enabled if the FX_EN and the
* SD_EN pins are strapped. Signal detection can only enabled if FX_EN
@@ -815,6 +833,25 @@ static int dp83822_of_init(struct phy_device *phydev)
dp83822->set_gpio2_clk_out = true;
}
+ ret = phy_get_tx_amplitude_gain(phydev, dev,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ &val);
+ if (!ret) {
+ for (i = 0; i < ARRAY_SIZE(tx_amplitude_100base_tx_gain); i++) {
+ if (tx_amplitude_100base_tx_gain[i] == val) {
+ dp83822->tx_amplitude_100base_tx_index = i;
+ break;
+ }
+ }
+
+ if (dp83822->tx_amplitude_100base_tx_index < 0) {
+ phydev_err(phydev,
+ "Invalid value for tx-amplitude-100base-tx-percent property (%u)\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
return dp83822_of_init_leds(phydev);
}
@@ -893,6 +930,7 @@ static int dp8382x_probe(struct phy_device *phydev)
if (!dp83822)
return -ENOMEM;
+ dp83822->tx_amplitude_100base_tx_index = -1;
phydev->priv = dp83822;
return 0;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c1451df430ac..063266cafe9c 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -1009,8 +1009,11 @@ static void dp83867_link_change_notify(struct phy_device *phydev)
}
}
-static int dp83867_loopback(struct phy_device *phydev, bool enable)
+static int dp83867_loopback(struct phy_device *phydev, bool enable, int speed)
{
+ if (enable && speed)
+ return -EOPNOTSUPP;
+
return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
enable ? BMCR_LOOPBACK : 0);
}
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
index a42af9c168ec..23af1ac194fa 100644
--- a/drivers/net/phy/dp83td510.c
+++ b/drivers/net/phy/dp83td510.c
@@ -204,10 +204,191 @@ struct dp83td510_priv {
#define DP83TD510E_UNKN_030E 0x30e
#define DP83TD510E_030E_VAL 0x2520
+#define DP83TD510E_LEDS_CFG_1 0x460
+#define DP83TD510E_LED_FN(idx, val) (((val) & 0xf) << ((idx) * 4))
+#define DP83TD510E_LED_FN_MASK(idx) (0xf << ((idx) * 4))
+/* link OK */
+#define DP83TD510E_LED_MODE_LINK_OK 0x0
+/* TX/RX activity */
+#define DP83TD510E_LED_MODE_TX_RX_ACTIVITY 0x1
+/* TX activity */
+#define DP83TD510E_LED_MODE_TX_ACTIVITY 0x2
+/* RX activity */
+#define DP83TD510E_LED_MODE_RX_ACTIVITY 0x3
+/* LR */
+#define DP83TD510E_LED_MODE_LR 0x4
+/* SR */
+#define DP83TD510E_LED_MODE_SR 0x5
+/* LED SPEED: High for 10Base-T */
+#define DP83TD510E_LED_MODE_LED_SPEED 0x6
+/* Duplex mode */
+#define DP83TD510E_LED_MODE_DUPLEX 0x7
+/* link + blink on activity with stretch option */
+#define DP83TD510E_LED_MODE_LINK_BLINK 0x8
+/* blink on activity with stretch option */
+#define DP83TD510E_LED_MODE_BLINK_ACTIVITY 0x9
+/* blink on tx activity with stretch option */
+#define DP83TD510E_LED_MODE_BLINK_TX 0xa
+/* blink on rx activity with stretch option */
+#define DP83TD510E_LED_MODE_BLINK_RX 0xb
+/* link_lost */
+#define DP83TD510E_LED_MODE_LINK_LOST 0xc
+/* PRBS error: toggles on error */
+#define DP83TD510E_LED_MODE_PRBS_ERROR 0xd
+/* XMII TX/RX Error with stretch option */
+#define DP83TD510E_LED_MODE_XMII_ERR 0xe
+
+#define DP83TD510E_LED_COUNT 4
+
+#define DP83TD510E_LEDS_CFG_2 0x469
+#define DP83TD510E_LED_POLARITY(idx) BIT((idx) * 4 + 2)
+#define DP83TD510E_LED_DRV_VAL(idx) BIT((idx) * 4 + 1)
+#define DP83TD510E_LED_DRV_EN(idx) BIT((idx) * 4)
+
#define DP83TD510E_ALCD_STAT 0xa9f
#define DP83TD510E_ALCD_COMPLETE BIT(15)
#define DP83TD510E_ALCD_CABLE_LENGTH GENMASK(10, 0)
+static int dp83td510_led_brightness_set(struct phy_device *phydev, u8 index,
+ enum led_brightness brightness)
+{
+ u32 val;
+
+ if (index >= DP83TD510E_LED_COUNT)
+ return -EINVAL;
+
+ val = DP83TD510E_LED_DRV_EN(index);
+
+ if (brightness)
+ val |= DP83TD510E_LED_DRV_VAL(index);
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_2,
+ DP83TD510E_LED_DRV_VAL(index) |
+ DP83TD510E_LED_DRV_EN(index), val);
+}
+
+static int dp83td510_led_mode(u8 index, unsigned long rules)
+{
+ if (index >= DP83TD510E_LED_COUNT)
+ return -EINVAL;
+
+ switch (rules) {
+ case BIT(TRIGGER_NETDEV_LINK):
+ return DP83TD510E_LED_MODE_LINK_OK;
+ case BIT(TRIGGER_NETDEV_LINK_10):
+ return DP83TD510E_LED_MODE_LED_SPEED;
+ case BIT(TRIGGER_NETDEV_FULL_DUPLEX):
+ return DP83TD510E_LED_MODE_DUPLEX;
+ case BIT(TRIGGER_NETDEV_TX):
+ return DP83TD510E_LED_MODE_TX_ACTIVITY;
+ case BIT(TRIGGER_NETDEV_RX):
+ return DP83TD510E_LED_MODE_RX_ACTIVITY;
+ case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return DP83TD510E_LED_MODE_TX_RX_ACTIVITY;
+ case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX):
+ return DP83TD510E_LED_MODE_LINK_BLINK;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dp83td510_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int ret;
+
+ ret = dp83td510_led_mode(index, rules);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int dp83td510_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode, ret;
+
+ mode = dp83td510_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_1,
+ DP83TD510E_LED_FN_MASK(index),
+ DP83TD510E_LED_FN(index, mode));
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_2,
+ DP83TD510E_LED_DRV_EN(index), 0);
+}
+
+static int dp83td510_led_hw_control_get(struct phy_device *phydev,
+ u8 index, unsigned long *rules)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_1);
+ if (val < 0)
+ return val;
+
+ val &= DP83TD510E_LED_FN_MASK(index);
+ val >>= index * 4;
+
+ switch (val) {
+ case DP83TD510E_LED_MODE_LINK_OK:
+ *rules = BIT(TRIGGER_NETDEV_LINK);
+ break;
+ /* LED mode: LED SPEED (10BaseT1L indicator) */
+ case DP83TD510E_LED_MODE_LED_SPEED:
+ *rules = BIT(TRIGGER_NETDEV_LINK_10);
+ break;
+ case DP83TD510E_LED_MODE_DUPLEX:
+ *rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX);
+ break;
+ case DP83TD510E_LED_MODE_TX_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_TX);
+ break;
+ case DP83TD510E_LED_MODE_RX_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83TD510E_LED_MODE_TX_RX_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83TD510E_LED_MODE_LINK_BLINK:
+ *rules = BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX);
+ break;
+ default:
+ *rules = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int dp83td510_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ u16 polarity = DP83TD510E_LED_POLARITY(index);
+ u32 mode;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ polarity = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_2,
+ DP83TD510E_LED_POLARITY(index), polarity);
+}
+
/**
* dp83td510_update_stats - Update the PHY statistics for the DP83TD510 PHY.
* @phydev: Pointer to the phy_device structure.
@@ -712,6 +893,12 @@ static struct phy_driver dp83td510_driver[] = {
.get_phy_stats = dp83td510_get_phy_stats,
.update_stats = dp83td510_update_stats,
+ .led_brightness_set = dp83td510_led_brightness_set,
+ .led_hw_is_supported = dp83td510_led_hw_is_supported,
+ .led_hw_control_set = dp83td510_led_hw_control_set,
+ .led_hw_control_get = dp83td510_led_hw_control_get,
+ .led_polarity_set = dp83td510_led_polarity_set,
+
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index 050f4537d140..7e76323409c4 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -4,12 +4,31 @@
*/
#include <linux/bitfield.h>
#include <linux/ethtool_netlink.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
+#include <linux/random.h>
#include "open_alliance_helpers.h"
+/*
+ * DP83TG720S_POLL_ACTIVE_LINK - Polling interval in milliseconds when the link
+ * is active.
+ * DP83TG720S_POLL_NO_LINK_MIN - Minimum polling interval in milliseconds when
+ * the link is down.
+ * DP83TG720S_POLL_NO_LINK_MAX - Maximum polling interval in milliseconds when
+ * the link is down.
+ *
+ * These values are not documented or officially recommended by the vendor but
+ * were determined through empirical testing. They achieve a good balance in
+ * minimizing the number of reset retries while ensuring reliable link recovery
+ * within a reasonable timeframe.
+ */
+#define DP83TG720S_POLL_ACTIVE_LINK 1000
+#define DP83TG720S_POLL_NO_LINK_MIN 100
+#define DP83TG720S_POLL_NO_LINK_MAX 1000
+
#define DP83TG720S_PHY_ID 0x2000a284
/* MDIO_MMD_VEND2 registers */
@@ -371,6 +390,13 @@ static int dp83tg720_read_status(struct phy_device *phydev)
if (ret)
return ret;
+ /* Sleep 600ms for PHY stabilization post-reset.
+ * Empirically chosen value (not documented).
+ * Helps reduce reset bounces with link partners having similar
+ * issues.
+ */
+ msleep(600);
+
/* After HW reset we need to restore master/slave configuration.
* genphy_c45_pma_baset1_read_master_slave() call will be done
* by the dp83tg720_config_aneg() function.
@@ -498,6 +524,57 @@ static int dp83tg720_probe(struct phy_device *phydev)
return 0;
}
+/**
+ * dp83tg720_get_next_update_time - Determine the next update time for PHY
+ * state
+ * @phydev: Pointer to the phy_device structure
+ *
+ * This function addresses a limitation of the DP83TG720 PHY, which cannot
+ * reliably detect or report a stable link state. To recover from such
+ * scenarios, the PHY must be periodically reset when the link is down. However,
+ * if the link partner also runs Linux with the same driver, synchronized reset
+ * intervals can lead to a deadlock where the link never establishes due to
+ * simultaneous resets on both sides.
+ *
+ * To avoid this, the function implements randomized polling intervals when the
+ * link is down. It ensures that reset intervals are desynchronized by
+ * introducing a random delay between a configured minimum and maximum range.
+ * When the link is up, a fixed polling interval is used to minimize overhead.
+ *
+ * This mechanism guarantees that the link will reestablish within 10 seconds
+ * in the worst-case scenario.
+ *
+ * Return: Time (in jiffies) until the next update event for the PHY state
+ * machine.
+ */
+static unsigned int dp83tg720_get_next_update_time(struct phy_device *phydev)
+{
+ unsigned int next_time_jiffies;
+
+ if (phydev->link) {
+ /* When the link is up, use a fixed 1000ms interval
+ * (in jiffies)
+ */
+ next_time_jiffies =
+ msecs_to_jiffies(DP83TG720S_POLL_ACTIVE_LINK);
+ } else {
+ unsigned int min_jiffies, max_jiffies, rand_jiffies;
+
+ /* When the link is down, randomize interval between min/max
+ * (in jiffies)
+ */
+ min_jiffies = msecs_to_jiffies(DP83TG720S_POLL_NO_LINK_MIN);
+ max_jiffies = msecs_to_jiffies(DP83TG720S_POLL_NO_LINK_MAX);
+
+ rand_jiffies = min_jiffies +
+ get_random_u32_below(max_jiffies - min_jiffies + 1);
+ next_time_jiffies = rand_jiffies;
+ }
+
+ /* Ensure the polling time is at least one jiffy */
+ return max(next_time_jiffies, 1U);
+}
+
static struct phy_driver dp83tg720_driver[] = {
{
PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID),
@@ -516,6 +593,7 @@ static struct phy_driver dp83tg720_driver[] = {
.get_link_stats = dp83tg720_get_link_stats,
.get_phy_stats = dp83tg720_get_phy_stats,
.update_stats = dp83tg720_update_stats,
+ .get_next_update_time = dp83tg720_get_next_update_time,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index aef739c20ac4..ee7831a9849b 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -10,7 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/list.h>
#include <linux/mii.h>
#include <linux/phy.h>
@@ -40,7 +40,7 @@ struct fixed_phy {
struct gpio_desc *link_gpiod;
};
-static struct platform_device *pdev;
+static struct faux_device *fdev;
static struct fixed_mdio_bus platform_fmb = {
.phys = LIST_HEAD_INIT(platform_fmb.phys),
};
@@ -337,9 +337,9 @@ static int __init fixed_mdio_bus_init(void)
struct fixed_mdio_bus *fmb = &platform_fmb;
int ret;
- pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ fdev = faux_device_create("Fixed MDIO bus", NULL, NULL);
+ if (!fdev)
+ return -ENODEV;
fmb->mii_bus = mdiobus_alloc();
if (fmb->mii_bus == NULL) {
@@ -350,7 +350,7 @@ static int __init fixed_mdio_bus_init(void)
snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0");
fmb->mii_bus->name = "Fixed MDIO Bus";
fmb->mii_bus->priv = fmb;
- fmb->mii_bus->parent = &pdev->dev;
+ fmb->mii_bus->parent = &fdev->dev;
fmb->mii_bus->read = &fixed_mdio_read;
fmb->mii_bus->write = &fixed_mdio_write;
fmb->mii_bus->phy_mask = ~0;
@@ -364,7 +364,7 @@ static int __init fixed_mdio_bus_init(void)
err_mdiobus_alloc:
mdiobus_free(fmb->mii_bus);
err_mdiobus_reg:
- platform_device_unregister(pdev);
+ faux_device_destroy(fdev);
return ret;
}
module_init(fixed_mdio_bus_init);
@@ -376,7 +376,7 @@ static void __exit fixed_mdio_bus_exit(void)
mdiobus_unregister(fmb->mii_bus);
mdiobus_free(fmb->mii_bus);
- platform_device_unregister(pdev);
+ faux_device_destroy(fdev);
list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
list_del(&fp->node);
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index a3996471a1c9..23e1f0521f54 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -7,30 +7,34 @@
* Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
*/
#include <linux/ethtool_netlink.h>
+#include <linux/hwmon.h>
#include <linux/marvell_phy.h>
+#include <linux/of.h>
#include <linux/phy.h>
-#include <linux/hwmon.h>
-#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1)
-#define PHY_ID_88Q2220_REVB1 (MARVELL_PHY_ID_88Q2220 | 0x2)
-#define PHY_ID_88Q2220_REVB2 (MARVELL_PHY_ID_88Q2220 | 0x3)
+#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1)
+#define PHY_ID_88Q2220_REVB1 (MARVELL_PHY_ID_88Q2220 | 0x2)
+#define PHY_ID_88Q2220_REVB2 (MARVELL_PHY_ID_88Q2220 | 0x3)
-#define MDIO_MMD_AN_MV_STAT 32769
-#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
-#define MDIO_MMD_AN_MV_STAT_LOCAL_RX 0x1000
-#define MDIO_MMD_AN_MV_STAT_REMOTE_RX 0x2000
-#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000
-#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000
+#define MDIO_MMD_AN_MV_STAT 32769
+#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
+#define MDIO_MMD_AN_MV_STAT_LOCAL_RX 0x1000
+#define MDIO_MMD_AN_MV_STAT_REMOTE_RX 0x2000
+#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000
+#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000
-#define MDIO_MMD_AN_MV_STAT2 32794
-#define MDIO_MMD_AN_MV_STAT2_AN_RESOLVED 0x0800
-#define MDIO_MMD_AN_MV_STAT2_100BT1 0x2000
-#define MDIO_MMD_AN_MV_STAT2_1000BT1 0x4000
+#define MDIO_MMD_AN_MV_STAT2 32794
+#define MDIO_MMD_AN_MV_STAT2_AN_RESOLVED 0x0800
+#define MDIO_MMD_AN_MV_STAT2_100BT1 0x2000
+#define MDIO_MMD_AN_MV_STAT2_1000BT1 0x4000
-#define MDIO_MMD_PCS_MV_INT_EN 32784
-#define MDIO_MMD_PCS_MV_INT_EN_LINK_UP 0x0040
-#define MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN 0x0080
-#define MDIO_MMD_PCS_MV_INT_EN_100BT1 0x1000
+#define MDIO_MMD_PCS_MV_RESET_CTRL 32768
+#define MDIO_MMD_PCS_MV_RESET_CTRL_TX_DISABLE 0x8
+
+#define MDIO_MMD_PCS_MV_INT_EN 32784
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_UP 0x0040
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN 0x0080
+#define MDIO_MMD_PCS_MV_INT_EN_100BT1 0x1000
#define MDIO_MMD_PCS_MV_GPIO_INT_STAT 32785
#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP 0x0040
@@ -40,6 +44,22 @@
#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL 32787
#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS 0x0800
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL 32790
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK GENMASK(7, 4)
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK GENMASK(3, 0)
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK 0x0 /* Link established */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_RX_TX 0x1 /* Link established, blink for rx or tx activity */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1 0x2 /* Blink 3x for 1000BT1 link established */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX_ON 0x3 /* Receive or transmit activity */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX 0x4 /* Blink on receive or transmit activity */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_TX 0x5 /* Transmit activity */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_COPPER 0x6 /* Copper Link established */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1_ON 0x7 /* 1000BT1 link established */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_OFF 0x8 /* Force off */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_ON 0x9 /* Force on */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_HIGHZ 0xa /* Force Hi-Z */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_BLINK 0xb /* Force blink */
+
#define MDIO_MMD_PCS_MV_TEMP_SENSOR1 32833
#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT 0x0001
#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT 0x0040
@@ -60,11 +80,11 @@
#define MDIO_MMD_PCS_MV_100BT1_STAT1_REMOTE_RX 0x2000
#define MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_MASTER 0x4000
-#define MDIO_MMD_PCS_MV_100BT1_STAT2 33033
-#define MDIO_MMD_PCS_MV_100BT1_STAT2_JABBER 0x0001
-#define MDIO_MMD_PCS_MV_100BT1_STAT2_POL 0x0002
-#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004
-#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008
+#define MDIO_MMD_PCS_MV_100BT1_STAT2 33033
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_JABBER 0x0001
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_POL 0x0002
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008
#define MDIO_MMD_PCS_MV_100BT1_INT_EN 33042
#define MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT 0x0400
@@ -72,7 +92,7 @@
#define MDIO_MMD_PCS_MV_COPPER_INT_STAT 33043
#define MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT 0x0400
-#define MDIO_MMD_PCS_MV_RX_STAT 33328
+#define MDIO_MMD_PCS_MV_RX_STAT 33328
#define MDIO_MMD_PCS_MV_TDR_RESET 65226
#define MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST 0x1000
@@ -95,8 +115,12 @@
#define MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF 65246
+#define MV88Q2XXX_LED_INDEX_TX_ENABLE 0
+#define MV88Q2XXX_LED_INDEX_GPIO 1
+
struct mv88q2xxx_priv {
bool enable_temp;
+ bool enable_led0;
};
struct mmd_val {
@@ -460,6 +484,9 @@ static int mv88q2xxx_config_aneg(struct phy_device *phydev)
static int mv88q2xxx_config_init(struct phy_device *phydev)
{
+ struct mv88q2xxx_priv *priv = phydev->priv;
+ int ret;
+
/* The 88Q2XXX PHYs do have the extended ability register available, but
* register MDIO_PMA_EXTABLE where they should signalize it does not
* work according to specification. Therefore, we force it here.
@@ -469,10 +496,31 @@ static int mv88q2xxx_config_init(struct phy_device *phydev)
/* Configure interrupt with default settings, output is driven low for
* active interrupt and high for inactive.
*/
- if (phy_interrupt_is_valid(phydev))
- return phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
- MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
- MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
+ if (phy_interrupt_is_valid(phydev)) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable LED function and disable TX disable feature on LED/TX_ENABLE */
+ if (priv->enable_led0) {
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_RESET_CTRL,
+ MDIO_MMD_PCS_MV_RESET_CTRL_TX_DISABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable temperature sense */
+ if (priv->enable_temp) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
@@ -717,16 +765,10 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
struct mv88q2xxx_priv *priv = phydev->priv;
struct device *dev = &phydev->mdio.dev;
struct device *hwmon;
- char *hwmon_name;
priv->enable_temp = true;
- hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
- if (IS_ERR(hwmon_name))
- return PTR_ERR(hwmon_name);
- hwmon = devm_hwmon_device_register_with_info(dev,
- hwmon_name,
- phydev,
+ hwmon = devm_hwmon_device_register_with_info(dev, NULL, phydev,
&mv88q2xxx_hwmon_chip_info,
NULL);
@@ -740,6 +782,49 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
}
#endif
+#if IS_ENABLED(CONFIG_OF_MDIO)
+static int mv88q2xxx_leds_probe(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct mv88q2xxx_priv *priv = phydev->priv;
+ struct device_node *leds;
+ int ret = 0;
+ u32 index;
+
+ if (!node)
+ return 0;
+
+ leds = of_get_child_by_name(node, "leds");
+ if (!leds)
+ return 0;
+
+ for_each_available_child_of_node_scoped(leds, led) {
+ ret = of_property_read_u32(led, "reg", &index);
+ if (ret)
+ goto exit;
+
+ if (index > MV88Q2XXX_LED_INDEX_GPIO) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (index == MV88Q2XXX_LED_INDEX_TX_ENABLE)
+ priv->enable_led0 = true;
+ }
+
+exit:
+ of_node_put(leds);
+
+ return ret;
+}
+
+#else
+static int mv88q2xxx_leds_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif
+
static int mv88q2xxx_probe(struct phy_device *phydev)
{
struct mv88q2xxx_priv *priv;
@@ -750,6 +835,21 @@ static int mv88q2xxx_probe(struct phy_device *phydev)
phydev->priv = priv;
+ return 0;
+}
+
+static int mv88q222x_probe(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = mv88q2xxx_probe(phydev);
+ if (ret)
+ return ret;
+
+ ret = mv88q2xxx_leds_probe(phydev);
+ if (ret)
+ return ret;
+
return mv88q2xxx_hwmon_probe(phydev);
}
@@ -817,18 +917,6 @@ static int mv88q222x_revb1_revb2_config_init(struct phy_device *phydev)
static int mv88q222x_config_init(struct phy_device *phydev)
{
- struct mv88q2xxx_priv *priv = phydev->priv;
- int ret;
-
- /* Enable temperature sense */
- if (priv->enable_temp) {
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- MDIO_MMD_PCS_MV_TEMP_SENSOR2,
- MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
- if (ret < 0)
- return ret;
- }
-
if (phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB0)
return mv88q222x_revb0_config_init(phydev);
else
@@ -918,11 +1006,104 @@ static int mv88q222x_cable_test_get_status(struct phy_device *phydev,
return 0;
}
+static int mv88q2xxx_led_mode(u8 index, unsigned long rules)
+{
+ switch (rules) {
+ case BIT(TRIGGER_NETDEV_LINK):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK;
+ case BIT(TRIGGER_NETDEV_LINK_1000):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1_ON;
+ case BIT(TRIGGER_NETDEV_TX):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_TX;
+ case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX;
+ case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_RX_TX;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mv88q2xxx_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = mv88q2xxx_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ return 0;
+}
+
+static int mv88q2xxx_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = mv88q2xxx_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ if (index == MV88Q2XXX_LED_INDEX_TX_ENABLE)
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_LED_FUNC_CTRL,
+ MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK,
+ FIELD_PREP(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK,
+ mode));
+ else
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_LED_FUNC_CTRL,
+ MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK,
+ FIELD_PREP(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK,
+ mode));
+}
+
+static int mv88q2xxx_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_LED_FUNC_CTRL);
+ if (val < 0)
+ return val;
+
+ if (index == MV88Q2XXX_LED_INDEX_TX_ENABLE)
+ val = FIELD_GET(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK, val);
+ else
+ val = FIELD_GET(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK, val);
+
+ switch (val) {
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK:
+ *rules = BIT(TRIGGER_NETDEV_LINK);
+ break;
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1_ON:
+ *rules = BIT(TRIGGER_NETDEV_LINK_1000);
+ break;
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX);
+ break;
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX);
+ break;
+ default:
+ *rules = 0;
+ break;
+ }
+
+ return 0;
+}
+
static struct phy_driver mv88q2xxx_driver[] = {
{
.phy_id = MARVELL_PHY_ID_88Q2110,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88q2110",
+ .probe = mv88q2xxx_probe,
.get_features = mv88q2xxx_get_features,
.config_aneg = mv88q2xxx_config_aneg,
.config_init = mv88q2110_config_init,
@@ -937,7 +1118,7 @@ static struct phy_driver mv88q2xxx_driver[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88q2220",
.flags = PHY_POLL_CABLE_TEST,
- .probe = mv88q2xxx_probe,
+ .probe = mv88q222x_probe,
.get_features = mv88q2xxx_get_features,
.config_aneg = mv88q2xxx_config_aneg,
.aneg_done = genphy_c45_aneg_done,
@@ -953,6 +1134,9 @@ static struct phy_driver mv88q2xxx_driver[] = {
.get_sqi_max = mv88q2xxx_get_sqi_max,
.suspend = mv88q2xxx_suspend,
.resume = mv88q2xxx_resume,
+ .led_hw_is_supported = mv88q2xxx_led_hw_is_supported,
+ .led_hw_control_set = mv88q2xxx_led_hw_control_set,
+ .led_hw_control_get = mv88q2xxx_led_hw_control_get,
},
};
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 44e1927de499..623292948fa7 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -2131,52 +2131,52 @@ static void marvell_get_stats_simple(struct phy_device *phydev,
data[i] = marvell_get_stat_simple(phydev, i);
}
-static int m88e1510_loopback(struct phy_device *phydev, bool enable)
+static int m88e1510_loopback(struct phy_device *phydev, bool enable, int speed)
{
+ u16 bmcr_ctl, mscr2_ctl = 0;
int err;
- if (enable) {
- u16 bmcr_ctl, mscr2_ctl = 0;
+ if (!enable)
+ return genphy_loopback(phydev, enable, 0);
- bmcr_ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
-
- err = phy_write(phydev, MII_BMCR, bmcr_ctl);
- if (err < 0)
- return err;
+ if (speed == SPEED_10 || speed == SPEED_100 || speed == SPEED_1000)
+ phydev->speed = speed;
+ else if (speed)
+ return -EINVAL;
- if (phydev->speed == SPEED_1000)
- mscr2_ctl = BMCR_SPEED1000;
- else if (phydev->speed == SPEED_100)
- mscr2_ctl = BMCR_SPEED100;
+ bmcr_ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
- err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
- MII_88E1510_MSCR_2, BMCR_SPEED1000 |
- BMCR_SPEED100, mscr2_ctl);
- if (err < 0)
- return err;
+ err = phy_write(phydev, MII_BMCR, bmcr_ctl);
+ if (err < 0)
+ return err;
- /* Need soft reset to have speed configuration takes effect */
- err = genphy_soft_reset(phydev);
- if (err < 0)
- return err;
+ if (phydev->speed == SPEED_1000)
+ mscr2_ctl = BMCR_SPEED1000;
+ else if (phydev->speed == SPEED_100)
+ mscr2_ctl = BMCR_SPEED100;
- err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
- BMCR_LOOPBACK);
+ err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1510_MSCR_2, BMCR_SPEED1000 |
+ BMCR_SPEED100, mscr2_ctl);
+ if (err < 0)
+ return err;
- if (!err) {
- /* It takes some time for PHY device to switch
- * into/out-of loopback mode.
- */
- msleep(1000);
- }
+ /* Need soft reset to have speed configuration takes effect */
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
return err;
- } else {
- err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0);
- if (err < 0)
- return err;
- return phy_config_aneg(phydev);
+ err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+ BMCR_LOOPBACK);
+
+ if (!err) {
+ /*
+ * It takes some time for PHY device to switch into loopback
+ * mode.
+ */
+ msleep(1000);
}
+ return err;
}
static int marvell_vct5_wait_complete(struct phy_device *phydev)
@@ -3124,33 +3124,13 @@ static umode_t marvell_hwmon_is_visible(const void *data,
}
}
-static u32 marvell_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info marvell_hwmon_chip = {
- .type = hwmon_chip,
- .config = marvell_hwmon_chip_config,
-};
-
/* we can define HWMON_T_CRIT and HWMON_T_MAX_ALARM even though these are not
* defined for all PHYs, because the hwmon code checks whether the attributes
* exists via the .is_visible method
*/
-static u32 marvell_hwmon_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info marvell_hwmon_temp = {
- .type = hwmon_temp,
- .config = marvell_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const marvell_hwmon_info[] = {
- &marvell_hwmon_chip,
- &marvell_hwmon_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM),
NULL
};
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 623bdb8466b8..5354c8895163 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -230,29 +230,9 @@ static const struct hwmon_ops mv3310_hwmon_ops = {
.read = mv3310_hwmon_read,
};
-static u32 mv3310_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0,
-};
-
-static const struct hwmon_channel_info mv3310_hwmon_chip = {
- .type = hwmon_chip,
- .config = mv3310_hwmon_chip_config,
-};
-
-static u32 mv3310_hwmon_temp_config[] = {
- HWMON_T_INPUT,
- 0,
-};
-
-static const struct hwmon_channel_info mv3310_hwmon_temp = {
- .type = hwmon_temp,
- .config = mv3310_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const mv3310_hwmon_info[] = {
- &mv3310_hwmon_chip,
- &mv3310_hwmon_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
NULL,
};
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 7e2f10182c0c..ede596c1a69d 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -551,6 +551,8 @@ static int mdiobus_create_device(struct mii_bus *bus,
static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
{
struct phy_device *phydev = ERR_PTR(-ENODEV);
+ struct fwnode_handle *fwnode;
+ char node_name[16];
int err;
phydev = get_phy_device(bus, addr, c45);
@@ -562,6 +564,18 @@ static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
*/
of_mdiobus_link_mdiodev(bus, &phydev->mdio);
+ /* Search for a swnode for the phy in the swnode hierarchy of the bus.
+ * If there is no swnode for the phy provided, just ignore it.
+ */
+ if (dev_fwnode(&bus->dev) && !dev_fwnode(&phydev->mdio.dev)) {
+ snprintf(node_name, sizeof(node_name), "ethernet-phy@%d",
+ addr);
+ fwnode = fwnode_get_named_child_node(dev_fwnode(&bus->dev),
+ node_name);
+ if (fwnode)
+ device_set_node(&phydev->mdio.dev, fwnode);
+ }
+
err = phy_device_register(phydev);
if (err) {
phy_device_free(phydev);
diff --git a/drivers/net/phy/mediatek/mtk-ge-soc.c b/drivers/net/phy/mediatek/mtk-ge-soc.c
index bdf99b327029..175cf5239bba 100644
--- a/drivers/net/phy/mediatek/mtk-ge-soc.c
+++ b/drivers/net/phy/mediatek/mtk-ge-soc.c
@@ -8,6 +8,7 @@
#include <linux/phy.h>
#include <linux/regmap.h>
+#include "../phylib.h"
#include "mtk.h"
#define MTK_GPHY_ID_MT7981 0x03a29461
@@ -24,7 +25,107 @@
#define MTK_PHY_SMI_DET_ON_THRESH_MASK GENMASK(13, 8)
#define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30
-#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5
+
+/* Registers on Token Ring debug nodes */
+/* ch_addr = 0x0, node_addr = 0x7, data_addr = 0x15 */
+/* NormMseLoThresh */
+#define NORMAL_MSE_LO_THRESH_MASK GENMASK(15, 8)
+
+/* ch_addr = 0x0, node_addr = 0xf, data_addr = 0x3c */
+/* RemAckCntLimitCtrl */
+#define REMOTE_ACK_COUNT_LIMIT_CTRL_MASK GENMASK(2, 1)
+
+/* ch_addr = 0x1, node_addr = 0xd, data_addr = 0x20 */
+/* VcoSlicerThreshBitsHigh */
+#define VCO_SLICER_THRESH_HIGH_MASK GENMASK(23, 0)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x0 */
+/* DfeTailEnableVgaThresh1000 */
+#define DFE_TAIL_EANBLE_VGA_TRHESH_1000 GENMASK(5, 1)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x1 */
+/* MrvlTrFix100Kp */
+#define MRVL_TR_FIX_100KP_MASK GENMASK(22, 20)
+/* MrvlTrFix100Kf */
+#define MRVL_TR_FIX_100KF_MASK GENMASK(19, 17)
+/* MrvlTrFix1000Kp */
+#define MRVL_TR_FIX_1000KP_MASK GENMASK(16, 14)
+/* MrvlTrFix1000Kf */
+#define MRVL_TR_FIX_1000KF_MASK GENMASK(13, 11)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x12 */
+/* VgaDecRate */
+#define VGA_DECIMATION_RATE_MASK GENMASK(8, 5)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x17 */
+/* SlvDSPreadyTime */
+#define SLAVE_DSP_READY_TIME_MASK GENMASK(22, 15)
+/* MasDSPreadyTime */
+#define MASTER_DSP_READY_TIME_MASK GENMASK(14, 7)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x18 */
+/* EnabRandUpdTrig */
+#define ENABLE_RANDOM_UPDOWN_COUNTER_TRIGGER BIT(8)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x20 */
+/* ResetSyncOffset */
+#define RESET_SYNC_OFFSET_MASK GENMASK(11, 8)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x0 */
+/* FfeUpdGainForceVal */
+#define FFE_UPDATE_GAIN_FORCE_VAL_MASK GENMASK(9, 7)
+/* FfeUpdGainForce */
+#define FFE_UPDATE_GAIN_FORCE BIT(6)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x3 */
+/* TrFreeze */
+#define TR_FREEZE_MASK GENMASK(11, 0)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x6 */
+/* SS: Steady-state, KP: Proportional Gain */
+/* SSTrKp100 */
+#define SS_TR_KP100_MASK GENMASK(21, 19)
+/* SSTrKf100 */
+#define SS_TR_KF100_MASK GENMASK(18, 16)
+/* SSTrKp1000Mas */
+#define SS_TR_KP1000_MASTER_MASK GENMASK(15, 13)
+/* SSTrKf1000Mas */
+#define SS_TR_KF1000_MASTER_MASK GENMASK(12, 10)
+/* SSTrKp1000Slv */
+#define SS_TR_KP1000_SLAVE_MASK GENMASK(9, 7)
+/* SSTrKf1000Slv */
+#define SS_TR_KF1000_SLAVE_MASK GENMASK(6, 4)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x8 */
+/* clear this bit if wanna select from AFE */
+/* Regsigdet_sel_1000 */
+#define EEE1000_SELECT_SIGNAL_DETECTION_FROM_DFE BIT(4)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0xd */
+/* RegEEE_st2TrKf1000 */
+#define EEE1000_STAGE2_TR_KF_MASK GENMASK(13, 11)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0xf */
+/* RegEEE_slv_waketr_timer_tar */
+#define SLAVE_WAKETR_TIMER_MASK GENMASK(20, 11)
+/* RegEEE_slv_remtx_timer_tar */
+#define SLAVE_REMTX_TIMER_MASK GENMASK(10, 1)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x10 */
+/* RegEEE_slv_wake_int_timer_tar */
+#define SLAVE_WAKEINT_TIMER_MASK GENMASK(10, 1)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x14 */
+/* RegEEE_trfreeze_timer2 */
+#define TR_FREEZE_TIMER2_MASK GENMASK(9, 0)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x1c */
+/* RegEEE100Stg1_tar */
+#define EEE100_LPSYNC_STAGE1_UPDATE_TIMER_MASK GENMASK(8, 0)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x25 */
+/* REGEEE_wake_slv_tr_wait_dfesigdet_en */
+#define WAKE_SLAVE_TR_WAIT_DFE_DETECTION_EN BIT(11)
#define ANALOG_INTERNAL_OPERATION_MAX_US 20
#define TXRESERVE_MIN 0
@@ -701,40 +802,36 @@ restore:
static void mt798x_phy_common_finetune(struct phy_device *phydev)
{
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */
- __phy_write(phydev, 0x11, 0xc71);
- __phy_write(phydev, 0x12, 0xc);
- __phy_write(phydev, 0x10, 0x8fae);
-
- /* EnabRandUpdTrig = 1 */
- __phy_write(phydev, 0x11, 0x2f00);
- __phy_write(phydev, 0x12, 0xe);
- __phy_write(phydev, 0x10, 0x8fb0);
-
- /* NormMseLoThresh = 85 */
- __phy_write(phydev, 0x11, 0x55a0);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x83aa);
-
- /* FfeUpdGainForce = 1(Enable), FfeUpdGainForceVal = 4 */
- __phy_write(phydev, 0x11, 0x240);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x9680);
-
- /* TrFreeze = 0 (mt7988 default) */
- __phy_write(phydev, 0x11, 0x0);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x9686);
-
- /* SSTrKp100 = 5 */
- /* SSTrKf100 = 6 */
- /* SSTrKp1000Mas = 5 */
- /* SSTrKf1000Mas = 6 */
- /* SSTrKp1000Slv = 5 */
- /* SSTrKf1000Slv = 6 */
- __phy_write(phydev, 0x11, 0xbaef);
- __phy_write(phydev, 0x12, 0x2e);
- __phy_write(phydev, 0x10, 0x968c);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x17,
+ SLAVE_DSP_READY_TIME_MASK | MASTER_DSP_READY_TIME_MASK,
+ FIELD_PREP(SLAVE_DSP_READY_TIME_MASK, 0x18) |
+ FIELD_PREP(MASTER_DSP_READY_TIME_MASK, 0x18));
+
+ __mtk_tr_set_bits(phydev, 0x1, 0xf, 0x18,
+ ENABLE_RANDOM_UPDOWN_COUNTER_TRIGGER);
+
+ __mtk_tr_modify(phydev, 0x0, 0x7, 0x15,
+ NORMAL_MSE_LO_THRESH_MASK,
+ FIELD_PREP(NORMAL_MSE_LO_THRESH_MASK, 0x55));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x0,
+ FFE_UPDATE_GAIN_FORCE_VAL_MASK,
+ FIELD_PREP(FFE_UPDATE_GAIN_FORCE_VAL_MASK, 0x4) |
+ FFE_UPDATE_GAIN_FORCE);
+
+ __mtk_tr_clr_bits(phydev, 0x2, 0xd, 0x3, TR_FREEZE_MASK);
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x6,
+ SS_TR_KP100_MASK | SS_TR_KF100_MASK |
+ SS_TR_KP1000_MASTER_MASK | SS_TR_KF1000_MASTER_MASK |
+ SS_TR_KP1000_SLAVE_MASK | SS_TR_KF1000_SLAVE_MASK,
+ FIELD_PREP(SS_TR_KP100_MASK, 0x5) |
+ FIELD_PREP(SS_TR_KF100_MASK, 0x6) |
+ FIELD_PREP(SS_TR_KP1000_MASTER_MASK, 0x5) |
+ FIELD_PREP(SS_TR_KF1000_MASTER_MASK, 0x6) |
+ FIELD_PREP(SS_TR_KP1000_SLAVE_MASK, 0x5) |
+ FIELD_PREP(SS_TR_KF1000_SLAVE_MASK, 0x6));
+
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
}
@@ -757,27 +854,29 @@ static void mt7981_phy_finetune(struct phy_device *phydev)
}
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* ResetSyncOffset = 6 */
- __phy_write(phydev, 0x11, 0x600);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x8fc0);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x20,
+ RESET_SYNC_OFFSET_MASK,
+ FIELD_PREP(RESET_SYNC_OFFSET_MASK, 0x6));
- /* VgaDecRate = 1 */
- __phy_write(phydev, 0x11, 0x4c2a);
- __phy_write(phydev, 0x12, 0x3e);
- __phy_write(phydev, 0x10, 0x8fa4);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x12,
+ VGA_DECIMATION_RATE_MASK,
+ FIELD_PREP(VGA_DECIMATION_RATE_MASK, 0x1));
/* MrvlTrFix100Kp = 3, MrvlTrFix100Kf = 2,
* MrvlTrFix1000Kp = 3, MrvlTrFix1000Kf = 2
*/
- __phy_write(phydev, 0x11, 0xd10a);
- __phy_write(phydev, 0x12, 0x34);
- __phy_write(phydev, 0x10, 0x8f82);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x1,
+ MRVL_TR_FIX_100KP_MASK | MRVL_TR_FIX_100KF_MASK |
+ MRVL_TR_FIX_1000KP_MASK | MRVL_TR_FIX_1000KF_MASK,
+ FIELD_PREP(MRVL_TR_FIX_100KP_MASK, 0x3) |
+ FIELD_PREP(MRVL_TR_FIX_100KF_MASK, 0x2) |
+ FIELD_PREP(MRVL_TR_FIX_1000KP_MASK, 0x3) |
+ FIELD_PREP(MRVL_TR_FIX_1000KF_MASK, 0x2));
/* VcoSlicerThreshBitsHigh */
- __phy_write(phydev, 0x11, 0x5555);
- __phy_write(phydev, 0x12, 0x55);
- __phy_write(phydev, 0x10, 0x8ec0);
+ __mtk_tr_modify(phydev, 0x1, 0xd, 0x20,
+ VCO_SLICER_THRESH_HIGH_MASK,
+ FIELD_PREP(VCO_SLICER_THRESH_HIGH_MASK, 0x555555));
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
/* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9 */
@@ -829,25 +928,23 @@ static void mt7988_phy_finetune(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_TX_FILTER, 0x5);
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* ResetSyncOffset = 5 */
- __phy_write(phydev, 0x11, 0x500);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x8fc0);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x20,
+ RESET_SYNC_OFFSET_MASK,
+ FIELD_PREP(RESET_SYNC_OFFSET_MASK, 0x5));
/* VgaDecRate is 1 at default on mt7988 */
- /* MrvlTrFix100Kp = 6, MrvlTrFix100Kf = 7,
- * MrvlTrFix1000Kp = 6, MrvlTrFix1000Kf = 7
- */
- __phy_write(phydev, 0x11, 0xb90a);
- __phy_write(phydev, 0x12, 0x6f);
- __phy_write(phydev, 0x10, 0x8f82);
-
- /* RemAckCntLimitCtrl = 1 */
- __phy_write(phydev, 0x11, 0xfbba);
- __phy_write(phydev, 0x12, 0xc3);
- __phy_write(phydev, 0x10, 0x87f8);
-
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x1,
+ MRVL_TR_FIX_100KP_MASK | MRVL_TR_FIX_100KF_MASK |
+ MRVL_TR_FIX_1000KP_MASK | MRVL_TR_FIX_1000KF_MASK,
+ FIELD_PREP(MRVL_TR_FIX_100KP_MASK, 0x6) |
+ FIELD_PREP(MRVL_TR_FIX_100KF_MASK, 0x7) |
+ FIELD_PREP(MRVL_TR_FIX_1000KP_MASK, 0x6) |
+ FIELD_PREP(MRVL_TR_FIX_1000KF_MASK, 0x7));
+
+ __mtk_tr_modify(phydev, 0x0, 0xf, 0x3c,
+ REMOTE_ACK_COUNT_LIMIT_CTRL_MASK,
+ FIELD_PREP(REMOTE_ACK_COUNT_LIMIT_CTRL_MASK, 0x1));
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
/* TR_OPEN_LOOP_EN = 1, lpf_x_average = 10 */
@@ -923,45 +1020,37 @@ static void mt798x_phy_eee(struct phy_device *phydev)
MTK_PHY_TR_READY_SKIP_AFE_WAKEUP);
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* Regsigdet_sel_1000 = 0 */
- __phy_write(phydev, 0x11, 0xb);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x9690);
-
- /* REG_EEE_st2TrKf1000 = 2 */
- __phy_write(phydev, 0x11, 0x114f);
- __phy_write(phydev, 0x12, 0x2);
- __phy_write(phydev, 0x10, 0x969a);
-
- /* RegEEE_slv_wake_tr_timer_tar = 6, RegEEE_slv_remtx_timer_tar = 20 */
- __phy_write(phydev, 0x11, 0x3028);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x969e);
-
- /* RegEEE_slv_wake_int_timer_tar = 8 */
- __phy_write(phydev, 0x11, 0x5010);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x96a0);
-
- /* RegEEE_trfreeze_timer2 = 586 */
- __phy_write(phydev, 0x11, 0x24a);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x96a8);
-
- /* RegEEE100Stg1_tar = 16 */
- __phy_write(phydev, 0x11, 0x3210);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x96b8);
-
- /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 0 */
- __phy_write(phydev, 0x11, 0x1463);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x96ca);
-
- /* DfeTailEnableVgaThresh1000 = 27 */
- __phy_write(phydev, 0x11, 0x36);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x8f80);
+ __mtk_tr_clr_bits(phydev, 0x2, 0xd, 0x8,
+ EEE1000_SELECT_SIGNAL_DETECTION_FROM_DFE);
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0xd,
+ EEE1000_STAGE2_TR_KF_MASK,
+ FIELD_PREP(EEE1000_STAGE2_TR_KF_MASK, 0x2));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0xf,
+ SLAVE_WAKETR_TIMER_MASK | SLAVE_REMTX_TIMER_MASK,
+ FIELD_PREP(SLAVE_WAKETR_TIMER_MASK, 0x6) |
+ FIELD_PREP(SLAVE_REMTX_TIMER_MASK, 0x14));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x10,
+ SLAVE_WAKEINT_TIMER_MASK,
+ FIELD_PREP(SLAVE_WAKEINT_TIMER_MASK, 0x8));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x14,
+ TR_FREEZE_TIMER2_MASK,
+ FIELD_PREP(TR_FREEZE_TIMER2_MASK, 0x24a));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x1c,
+ EEE100_LPSYNC_STAGE1_UPDATE_TIMER_MASK,
+ FIELD_PREP(EEE100_LPSYNC_STAGE1_UPDATE_TIMER_MASK,
+ 0x10));
+
+ __mtk_tr_clr_bits(phydev, 0x2, 0xd, 0x25,
+ WAKE_SLAVE_TR_WAIT_DFE_DETECTION_EN);
+
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x0,
+ DFE_TAIL_EANBLE_VGA_TRHESH_1000,
+ FIELD_PREP(DFE_TAIL_EANBLE_VGA_TRHESH_1000, 0x1b));
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_3);
@@ -1190,7 +1279,7 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
static bool mt7988_phy_led_get_polarity(struct phy_device *phydev, int led_num)
{
- struct mtk_socphy_shared *priv = phydev->shared->priv;
+ struct mtk_socphy_shared *priv = phy_package_get_priv(phydev);
u32 polarities;
if (led_num == 0)
@@ -1229,7 +1318,7 @@ static int mt7988_phy_fix_leds_polarities(struct phy_device *phydev)
static int mt7988_phy_probe_shared(struct phy_device *phydev)
{
struct device_node *np = dev_of_node(&phydev->mdio.bus->dev);
- struct mtk_socphy_shared *shared = phydev->shared->priv;
+ struct mtk_socphy_shared *shared = phy_package_get_priv(phydev);
struct regmap *regmap;
u32 reg;
int ret;
@@ -1280,7 +1369,7 @@ static int mt7988_phy_probe(struct phy_device *phydev)
return err;
}
- shared = phydev->shared->priv;
+ shared = phy_package_get_priv(phydev);
priv = &shared->priv[phydev->mdio.addr];
phydev->priv = priv;
diff --git a/drivers/net/phy/mediatek/mtk-ge.c b/drivers/net/phy/mediatek/mtk-ge.c
index b517ca8573e7..73d9b72f9d9e 100644
--- a/drivers/net/phy/mediatek/mtk-ge.c
+++ b/drivers/net/phy/mediatek/mtk-ge.c
@@ -8,31 +8,58 @@
#define MTK_GPHY_ID_MT7530 0x03a29412
#define MTK_GPHY_ID_MT7531 0x03a29441
-#define MTK_EXT_PAGE_ACCESS 0x1f
-#define MTK_PHY_PAGE_STANDARD 0x0000
-#define MTK_PHY_PAGE_EXTENDED 0x0001
-#define MTK_PHY_PAGE_EXTENDED_2 0x0002
-#define MTK_PHY_PAGE_EXTENDED_3 0x0003
-#define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30
-#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5
+#define MTK_PHY_PAGE_EXTENDED_2 0x0002
+#define MTK_PHY_PAGE_EXTENDED_3 0x0003
+#define MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG11 0x11
+
+#define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30
+
+/* Registers on Token Ring debug nodes */
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x17 */
+#define SLAVE_DSP_READY_TIME_MASK GENMASK(22, 15)
+
+/* Registers on MDIO_MMD_VEND1 */
+#define MTK_PHY_GBE_MODE_TX_DELAY_SEL 0x13
+#define MTK_PHY_TEST_MODE_TX_DELAY_SEL 0x14
+#define MTK_TX_DELAY_PAIR_B_MASK GENMASK(10, 8)
+#define MTK_TX_DELAY_PAIR_D_MASK GENMASK(2, 0)
+
+#define MTK_PHY_MCC_CTRL_AND_TX_POWER_CTRL 0xa6
+#define MTK_MCC_NEARECHO_OFFSET_MASK GENMASK(15, 8)
+
+#define MTK_PHY_RXADC_CTRL_RG7 0xc6
+#define MTK_PHY_DA_AD_BUF_BIAS_LP_MASK GENMASK(9, 8)
+
+#define MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG123 0x123
+#define MTK_PHY_LPI_NORM_MSE_LO_THRESH100_MASK GENMASK(15, 8)
+#define MTK_PHY_LPI_NORM_MSE_HI_THRESH100_MASK GENMASK(7, 0)
static void mtk_gephy_config_init(struct phy_device *phydev)
{
/* Enable HW auto downshift */
- phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED, 0x14, 0, BIT(4));
+ phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED_1,
+ MTK_PHY_AUX_CTRL_AND_STATUS,
+ 0, MTK_PHY_ENABLE_DOWNSHIFT);
/* Increase SlvDPSready time */
- phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- __phy_write(phydev, 0x10, 0xafae);
- __phy_write(phydev, 0x12, 0x2f);
- __phy_write(phydev, 0x10, 0x8fae);
- phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+ mtk_tr_modify(phydev, 0x1, 0xf, 0x17, SLAVE_DSP_READY_TIME_MASK,
+ FIELD_PREP(SLAVE_DSP_READY_TIME_MASK, 0x5e));
/* Adjust 100_mse_threshold */
- phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x123, 0xffff);
-
- /* Disable mcc */
- phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xa6, 0x300);
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG123,
+ MTK_PHY_LPI_NORM_MSE_LO_THRESH100_MASK |
+ MTK_PHY_LPI_NORM_MSE_HI_THRESH100_MASK,
+ FIELD_PREP(MTK_PHY_LPI_NORM_MSE_LO_THRESH100_MASK,
+ 0xff) |
+ FIELD_PREP(MTK_PHY_LPI_NORM_MSE_HI_THRESH100_MASK,
+ 0xff));
+
+ /* If echo time is narrower than 0x3, it will be regarded as noise */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ MTK_PHY_MCC_CTRL_AND_TX_POWER_CTRL,
+ MTK_MCC_NEARECHO_OFFSET_MASK,
+ FIELD_PREP(MTK_MCC_NEARECHO_OFFSET_MASK, 0x3));
}
static int mt7530_phy_config_init(struct phy_device *phydev)
@@ -40,7 +67,8 @@ static int mt7530_phy_config_init(struct phy_device *phydev)
mtk_gephy_config_init(phydev);
/* Increase post_update_timer */
- phy_write_paged(phydev, MTK_PHY_PAGE_EXTENDED_3, 0x11, 0x4b);
+ phy_write_paged(phydev, MTK_PHY_PAGE_EXTENDED_3,
+ MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG11, 0x4b);
return 0;
}
@@ -51,11 +79,19 @@ static int mt7531_phy_config_init(struct phy_device *phydev)
/* PHY link down power saving enable */
phy_set_bits(phydev, 0x17, BIT(4));
- phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, 0xc6, 0x300);
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7,
+ MTK_PHY_DA_AD_BUF_BIAS_LP_MASK,
+ FIELD_PREP(MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3));
/* Set TX Pair delay selection */
- phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x13, 0x404);
- phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x14, 0x404);
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_GBE_MODE_TX_DELAY_SEL,
+ MTK_TX_DELAY_PAIR_B_MASK | MTK_TX_DELAY_PAIR_D_MASK,
+ FIELD_PREP(MTK_TX_DELAY_PAIR_B_MASK, 0x4) |
+ FIELD_PREP(MTK_TX_DELAY_PAIR_D_MASK, 0x4));
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TEST_MODE_TX_DELAY_SEL,
+ MTK_TX_DELAY_PAIR_B_MASK | MTK_TX_DELAY_PAIR_D_MASK,
+ FIELD_PREP(MTK_TX_DELAY_PAIR_B_MASK, 0x4) |
+ FIELD_PREP(MTK_TX_DELAY_PAIR_D_MASK, 0x4));
return 0;
}
diff --git a/drivers/net/phy/mediatek/mtk-phy-lib.c b/drivers/net/phy/mediatek/mtk-phy-lib.c
index 98a09d670e9c..dfd0f4e439a2 100644
--- a/drivers/net/phy/mediatek/mtk-phy-lib.c
+++ b/drivers/net/phy/mediatek/mtk-phy-lib.c
@@ -6,6 +6,83 @@
#include "mtk.h"
+/* Difference between functions with mtk_tr* and __mtk_tr* prefixes is
+ * mtk_tr* functions: wrapped by page switching operations
+ * __mtk_tr* functions: no page switching operations
+ */
+
+static void __mtk_tr_access(struct phy_device *phydev, bool read, u8 ch_addr,
+ u8 node_addr, u8 data_addr)
+{
+ u16 tr_cmd = BIT(15); /* bit 14 & 0 are reserved */
+
+ if (read)
+ tr_cmd |= BIT(13);
+
+ tr_cmd |= (((ch_addr & 0x3) << 11) |
+ ((node_addr & 0xf) << 7) |
+ ((data_addr & 0x3f) << 1));
+ dev_dbg(&phydev->mdio.dev, "tr_cmd: 0x%x\n", tr_cmd);
+ __phy_write(phydev, 0x10, tr_cmd);
+}
+
+static void __mtk_tr_read(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u16 *tr_high, u16 *tr_low)
+{
+ __mtk_tr_access(phydev, true, ch_addr, node_addr, data_addr);
+ *tr_low = __phy_read(phydev, 0x11);
+ *tr_high = __phy_read(phydev, 0x12);
+ dev_dbg(&phydev->mdio.dev, "tr_high read: 0x%x, tr_low read: 0x%x\n",
+ *tr_high, *tr_low);
+}
+
+static void __mtk_tr_write(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 tr_data)
+{
+ __phy_write(phydev, 0x11, tr_data & 0xffff);
+ __phy_write(phydev, 0x12, tr_data >> 16);
+ dev_dbg(&phydev->mdio.dev, "tr_high write: 0x%x, tr_low write: 0x%x\n",
+ tr_data >> 16, tr_data & 0xffff);
+ __mtk_tr_access(phydev, false, ch_addr, node_addr, data_addr);
+}
+
+void __mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 mask, u32 set)
+{
+ u32 tr_data;
+ u16 tr_high;
+ u16 tr_low;
+
+ __mtk_tr_read(phydev, ch_addr, node_addr, data_addr, &tr_high, &tr_low);
+ tr_data = (tr_high << 16) | tr_low;
+ tr_data = (tr_data & ~mask) | set;
+ __mtk_tr_write(phydev, ch_addr, node_addr, data_addr, tr_data);
+}
+EXPORT_SYMBOL_GPL(__mtk_tr_modify);
+
+void mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 mask, u32 set)
+{
+ phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
+ __mtk_tr_modify(phydev, ch_addr, node_addr, data_addr, mask, set);
+ phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+}
+EXPORT_SYMBOL_GPL(mtk_tr_modify);
+
+void __mtk_tr_set_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 set)
+{
+ __mtk_tr_modify(phydev, ch_addr, node_addr, data_addr, 0, set);
+}
+EXPORT_SYMBOL_GPL(__mtk_tr_set_bits);
+
+void __mtk_tr_clr_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 clr)
+{
+ __mtk_tr_modify(phydev, ch_addr, node_addr, data_addr, clr, 0);
+}
+EXPORT_SYMBOL_GPL(__mtk_tr_clr_bits);
+
int mtk_phy_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, MTK_EXT_PAGE_ACCESS);
diff --git a/drivers/net/phy/mediatek/mtk.h b/drivers/net/phy/mediatek/mtk.h
index 63d9fe179b8f..320f76ffa81f 100644
--- a/drivers/net/phy/mediatek/mtk.h
+++ b/drivers/net/phy/mediatek/mtk.h
@@ -8,7 +8,13 @@
#ifndef _MTK_EPHY_H_
#define _MTK_EPHY_H_
+#define MTK_PHY_AUX_CTRL_AND_STATUS 0x14
+#define MTK_PHY_ENABLE_DOWNSHIFT BIT(4)
+
#define MTK_EXT_PAGE_ACCESS 0x1f
+#define MTK_PHY_PAGE_EXTENDED_1 0x0001
+#define MTK_PHY_PAGE_STANDARD 0x0000
+#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5
/* Registers on MDIO_MMD_VEND2 */
#define MTK_PHY_LED0_ON_CTRL 0x24
@@ -66,6 +72,15 @@ struct mtk_socphy_priv {
unsigned long led_state;
};
+void __mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 mask, u32 set);
+void mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 mask, u32 set);
+void __mtk_tr_set_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 set);
+void __mtk_tr_clr_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 clr);
+
int mtk_phy_read_page(struct phy_device *phydev);
int mtk_phy_write_page(struct phy_device *phydev, int page);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9c0b1c229af6..24882d30f685 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -34,6 +34,8 @@
#include <linux/net_tstamp.h>
#include <linux/gpio/consumer.h>
+#include "phylib.h"
+
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
#define KSZPHY_OMSO_FACTORY_TEST BIT(15)
@@ -1030,6 +1032,29 @@ static int ksz9021_config_init(struct phy_device *phydev)
#define MII_KSZ9031RN_EDPD 0x23
#define MII_KSZ9031RN_EDPD_ENABLE BIT(0)
+static int ksz9031_set_loopback(struct phy_device *phydev, bool enable,
+ int speed)
+{
+ u16 ctl = BMCR_LOOPBACK;
+ int val;
+
+ if (!enable)
+ return genphy_loopback(phydev, enable, 0);
+
+ if (speed == SPEED_10 || speed == SPEED_100 || speed == SPEED_1000)
+ phydev->speed = speed;
+ else if (speed)
+ return -EINVAL;
+ phydev->duplex = DUPLEX_FULL;
+
+ ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
+
+ phy_write(phydev, MII_BMCR, ctl);
+
+ return phy_read_poll_timeout(phydev, MII_BMSR, val, val & BMSR_LSTATUS,
+ 5000, 500000, true);
+}
+
static int ksz9031_of_load_skew_values(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg, size_t field_sz,
@@ -2631,8 +2656,7 @@ static void lan8814_ptp_tx_ts_get(struct phy_device *phydev,
static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct kernel_ethtool_ts_info *info)
{
struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
- struct phy_device *phydev = ptp_priv->phydev;
- struct lan8814_shared_priv *shared = phydev->shared->priv;
+ struct lan8814_shared_priv *shared = phy_package_get_priv(ptp_priv->phydev);
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
@@ -3653,7 +3677,7 @@ static int lan8814_gpio_process_cap(struct lan8814_shared_priv *shared)
static int lan8814_handle_gpio_interrupt(struct phy_device *phydev, u16 status)
{
- struct lan8814_shared_priv *shared = phydev->shared->priv;
+ struct lan8814_shared_priv *shared = phy_package_get_priv(phydev);
int ret;
mutex_lock(&shared->shared_lock);
@@ -3864,7 +3888,7 @@ static void lan8814_ptp_init(struct phy_device *phydev)
static int lan8814_ptp_probe_once(struct phy_device *phydev)
{
- struct lan8814_shared_priv *shared = phydev->shared->priv;
+ struct lan8814_shared_priv *shared = phy_package_get_priv(phydev);
/* Initialise shared lock for clock*/
mutex_init(&shared->shared_lock);
@@ -5564,6 +5588,7 @@ static struct phy_driver ksphy_driver[] = {
.resume = kszphy_resume,
.cable_test_start = ksz9x31_cable_test_start,
.cable_test_get_status = ksz9x31_cable_test_get_status,
+ .set_loopback = ksz9031_set_loopback,
}, {
.phy_id = PHY_ID_LAN8814,
.phy_id_mask = MICREL_PHY_ID_MASK,
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 19cf12ee8990..7ff975efd8e7 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -17,6 +17,8 @@
#include <linux/of.h>
#include <linux/netdevice.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>
+
+#include "../phylib.h"
#include "mscc_serdes.h"
#include "mscc.h"
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index 738a8822fcf0..ed8fb14a7f21 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -17,6 +17,7 @@
#include <linux/udp.h>
#include <linux/unaligned.h>
+#include "../phylib.h"
#include "mscc.h"
#include "mscc_ptp.h"
@@ -645,11 +646,12 @@ static int __vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
{
struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
struct phy_device *phydev = ptp->phydev;
- struct vsc85xx_shared_private *shared =
- (struct vsc85xx_shared_private *)phydev->shared->priv;
struct vsc8531_private *priv = phydev->priv;
+ struct vsc85xx_shared_private *shared;
u32 val;
+ shared = phy_package_get_priv(phydev);
+
val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
val |= PTP_LTC_CTRL_SAVE_ENA;
vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
@@ -696,11 +698,12 @@ static int __vsc85xx_settime(struct ptp_clock_info *info,
{
struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
struct phy_device *phydev = ptp->phydev;
- struct vsc85xx_shared_private *shared =
- (struct vsc85xx_shared_private *)phydev->shared->priv;
struct vsc8531_private *priv = phydev->priv;
+ struct vsc85xx_shared_private *shared;
u32 val;
+ shared = phy_package_get_priv(phydev);
+
vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_MSB,
PTP_LTC_LOAD_SEC_MSB(ts->tv_sec));
vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_LSB,
@@ -1580,8 +1583,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
int vsc8584_ptp_probe_once(struct phy_device *phydev)
{
- struct vsc85xx_shared_private *shared =
- (struct vsc85xx_shared_private *)phydev->shared->priv;
+ struct vsc85xx_shared_private *shared = phy_package_get_priv(phydev);
/* Initialize shared GPIO lock */
mutex_init(&shared->gpio_lock);
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index 94d9cb727121..0c8dc16ee7bd 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -225,14 +225,8 @@ static int gpy_hwmon_register(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct device *hwmon_dev;
- char *hwmon_name;
- hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
- if (IS_ERR(hwmon_name))
- return PTR_ERR(hwmon_name);
-
- hwmon_dev = devm_hwmon_device_register_with_info(dev, hwmon_name,
- phydev,
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, NULL, phydev,
&gpy_hwmon_chip_info,
NULL);
@@ -813,7 +807,7 @@ static void gpy_get_wol(struct phy_device *phydev,
wol->wolopts = priv->wolopts;
}
-static int gpy_loopback(struct phy_device *phydev, bool enable)
+static int gpy_loopback(struct phy_device *phydev, bool enable, int speed)
{
struct gpy_priv *priv = phydev->priv;
u16 set = 0;
@@ -822,6 +816,9 @@ static int gpy_loopback(struct phy_device *phydev, bool enable)
if (enable) {
u64 now = get_jiffies_64();
+ if (speed)
+ return -EOPNOTSUPP;
+
/* wait until 3 seconds from last disable */
if (time_before64(now, priv->lb_dis_to))
msleep(jiffies64_to_msecs(priv->lb_dis_to - now));
@@ -845,15 +842,15 @@ static int gpy_loopback(struct phy_device *phydev, bool enable)
return 0;
}
-static int gpy115_loopback(struct phy_device *phydev, bool enable)
+static int gpy115_loopback(struct phy_device *phydev, bool enable, int speed)
{
struct gpy_priv *priv = phydev->priv;
if (enable)
- return gpy_loopback(phydev, enable);
+ return gpy_loopback(phydev, enable, speed);
if (priv->fw_minor > 0x76)
- return gpy_loopback(phydev, 0);
+ return gpy_loopback(phydev, 0, 0);
return genphy_soft_reset(phydev);
}
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 34231b5b9175..250a018d5546 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* NXP C45 PHY driver
- * Copyright 2021-2023 NXP
+ * Copyright 2021-2025 NXP
* Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
*/
@@ -19,9 +19,17 @@
#include "nxp-c45-tja11xx.h"
+#define PHY_ID_MASK GENMASK(31, 4)
+/* Same id: TJA1103, TJA1104 */
#define PHY_ID_TJA_1103 0x001BB010
+/* Same id: TJA1120, TJA1121 */
#define PHY_ID_TJA_1120 0x001BB031
+#define VEND1_DEVICE_ID3 0x0004
+#define TJA1120_DEV_ID3_SILICON_VERSION GENMASK(15, 12)
+#define TJA1120_DEV_ID3_SAMPLE_TYPE GENMASK(11, 8)
+#define DEVICE_ID3_SAMPLE_TYPE_R 0x9
+
#define VEND1_DEVICE_CONTROL 0x0040
#define DEVICE_CONTROL_RESET BIT(15)
#define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
@@ -109,6 +117,9 @@
#define MII_BASIC_CONFIG_RMII 0x5
#define MII_BASIC_CONFIG_MII 0x4
+#define VEND1_SGMII_BASIC_CONTROL 0xB000
+#define SGMII_LPM BIT(11)
+
#define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351
#define EXTENDED_CNT_EN BIT(15)
#define VEND1_MONITOR_STATUS 0xAC80
@@ -1593,6 +1604,63 @@ static int nxp_c45_set_phy_mode(struct phy_device *phydev)
return 0;
}
+/* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */
+static void nxp_c45_tja1120_errata(struct phy_device *phydev)
+{
+ bool macsec_ability, sgmii_ability;
+ int silicon_version, sample_type;
+ int phy_abilities;
+ int ret = 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3);
+ if (ret < 0)
+ return;
+
+ sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret);
+ if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R)
+ return;
+
+ silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret);
+
+ phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_ABILITIES);
+ macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
+ sgmii_ability = !!(phy_abilities & SGMII_ABILITY);
+ if ((!macsec_ability && silicon_version == 2) ||
+ (macsec_ability && silicon_version == 1)) {
+ /* TJA1120/TJA1121 PHY configuration errata workaround.
+ * Apply PHY writes sequence before link up.
+ */
+ if (!macsec_ability) {
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x4b95);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0xf3cd);
+ } else {
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x89c7);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0893);
+ }
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x0476, 0x58a0);
+
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x8921, 0xa3a);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x89F1, 0x16c1);
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x0);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0);
+
+ if (sgmii_ability) {
+ /* TJA1120B/TJA1121B SGMII PCS restart errata workaround.
+ * Put SGMII PCS into power down mode and back up.
+ */
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_SGMII_BASIC_CONTROL,
+ SGMII_LPM);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_SGMII_BASIC_CONTROL,
+ SGMII_LPM);
+ }
+ }
+}
+
static int nxp_c45_config_init(struct phy_device *phydev)
{
int ret;
@@ -1609,6 +1677,9 @@ static int nxp_c45_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
+ if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4)))
+ nxp_c45_tja1120_errata(phydev);
+
phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
PHY_CONFIG_AUTO);
@@ -1888,6 +1959,42 @@ static void tja1120_nmi_handler(struct phy_device *phydev,
}
}
+static int nxp_c45_macsec_ability(struct phy_device *phydev)
+{
+ bool macsec_ability;
+ int phy_abilities;
+
+ phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_ABILITIES);
+ macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
+
+ return macsec_ability;
+}
+
+static int tja1103_match_phy_device(struct phy_device *phydev)
+{
+ return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
+ !nxp_c45_macsec_ability(phydev);
+}
+
+static int tja1104_match_phy_device(struct phy_device *phydev)
+{
+ return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
+ nxp_c45_macsec_ability(phydev);
+}
+
+static int tja1120_match_phy_device(struct phy_device *phydev)
+{
+ return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
+ !nxp_c45_macsec_ability(phydev);
+}
+
+static int tja1121_match_phy_device(struct phy_device *phydev)
+{
+ return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
+ nxp_c45_macsec_ability(phydev);
+}
+
static const struct nxp_c45_regmap tja1120_regmap = {
.vend1_ptp_clk_period = 0x1020,
.vend1_event_msg_filt = 0x9010,
@@ -1958,7 +2065,6 @@ static const struct nxp_c45_phy_data tja1120_phy_data = {
static struct phy_driver nxp_c45_driver[] = {
{
- PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
.name = "NXP C45 TJA1103",
.get_features = nxp_c45_get_features,
.driver_data = &tja1103_phy_data,
@@ -1980,9 +2086,33 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
+ .match_phy_device = tja1103_match_phy_device,
+ },
+ {
+ .name = "NXP C45 TJA1104",
+ .get_features = nxp_c45_get_features,
+ .driver_data = &tja1103_phy_data,
+ .probe = nxp_c45_probe,
+ .soft_reset = nxp_c45_soft_reset,
+ .config_aneg = genphy_c45_config_aneg,
+ .config_init = nxp_c45_config_init,
+ .config_intr = tja1103_config_intr,
+ .handle_interrupt = nxp_c45_handle_interrupt,
+ .read_status = genphy_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = genphy_c45_pma_resume,
+ .get_sset_count = nxp_c45_get_sset_count,
+ .get_strings = nxp_c45_get_strings,
+ .get_stats = nxp_c45_get_stats,
+ .cable_test_start = nxp_c45_cable_test_start,
+ .cable_test_get_status = nxp_c45_cable_test_get_status,
+ .set_loopback = genphy_c45_loopback,
+ .get_sqi = nxp_c45_get_sqi,
+ .get_sqi_max = nxp_c45_get_sqi_max,
+ .remove = nxp_c45_remove,
+ .match_phy_device = tja1104_match_phy_device,
},
{
- PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
.name = "NXP C45 TJA1120",
.get_features = nxp_c45_get_features,
.driver_data = &tja1120_phy_data,
@@ -2005,6 +2135,32 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
+ .match_phy_device = tja1120_match_phy_device,
+ },
+ {
+ .name = "NXP C45 TJA1121",
+ .get_features = nxp_c45_get_features,
+ .driver_data = &tja1120_phy_data,
+ .probe = nxp_c45_probe,
+ .soft_reset = nxp_c45_soft_reset,
+ .config_aneg = genphy_c45_config_aneg,
+ .config_init = nxp_c45_config_init,
+ .config_intr = tja1120_config_intr,
+ .handle_interrupt = nxp_c45_handle_interrupt,
+ .read_status = genphy_c45_read_status,
+ .link_change_notify = tja1120_link_change_notify,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = genphy_c45_pma_resume,
+ .get_sset_count = nxp_c45_get_sset_count,
+ .get_strings = nxp_c45_get_strings,
+ .get_stats = nxp_c45_get_stats,
+ .cable_test_start = nxp_c45_cable_test_start,
+ .cable_test_get_status = nxp_c45_cable_test_get_status,
+ .set_loopback = genphy_c45_loopback,
+ .get_sqi = nxp_c45_get_sqi,
+ .get_sqi_max = nxp_c45_get_sqi_max,
+ .remove = nxp_c45_remove,
+ .match_phy_device = tja1121_match_phy_device,
},
};
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index ed7fa26bac8e..07e94a2478ac 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -21,12 +21,14 @@
#define PHY_ID_TJA1100 0x0180dc40
#define PHY_ID_TJA1101 0x0180dd00
#define PHY_ID_TJA1102 0x0180dc80
+#define PHY_ID_TJA1102S 0x0180dc90
#define MII_ECTRL 17
#define MII_ECTRL_LINK_CONTROL BIT(15)
#define MII_ECTRL_POWER_MODE_MASK GENMASK(14, 11)
#define MII_ECTRL_POWER_MODE_NO_CHANGE (0x0 << 11)
#define MII_ECTRL_POWER_MODE_NORMAL (0x3 << 11)
+#define MII_ECTRL_POWER_MODE_SLEEP (0xa << 11)
#define MII_ECTRL_POWER_MODE_STANDBY (0xc << 11)
#define MII_ECTRL_CABLE_TEST BIT(5)
#define MII_ECTRL_CONFIG_EN BIT(2)
@@ -78,12 +80,13 @@
#define MII_COMMCFG 27
#define MII_COMMCFG_AUTO_OP BIT(15)
+#define MII_CFG3 28
+#define MII_CFG3_PHY_EN BIT(0)
+
/* Configure REF_CLK as input in RMII mode */
#define TJA110X_RMII_MODE_REFCLK_IN BIT(0)
struct tja11xx_priv {
- char *hwmon_name;
- struct device *hwmon_dev;
struct phy_device *phydev;
struct work_struct phy_register_work;
u32 flags;
@@ -179,6 +182,14 @@ static int tja11xx_wakeup(struct phy_device *phydev)
return ret;
return tja11xx_enable_link_control(phydev);
+ case MII_ECTRL_POWER_MODE_SLEEP:
+ switch (phydev->phy_id & PHY_ID_MASK) {
+ case PHY_ID_TJA1102S:
+ /* Enable PHY, maybe it is disabled due to pin strapping */
+ return phy_set_bits(phydev, MII_CFG3, MII_CFG3_PHY_EN);
+ default:
+ return 0;
+ }
default:
break;
}
@@ -316,6 +327,7 @@ static int tja11xx_config_init(struct phy_device *phydev)
if (ret)
return ret;
break;
+ case PHY_ID_TJA1102S:
case PHY_ID_TJA1101:
reg_mask = MII_CFG1_INTERFACE_MODE_MASK;
ret = tja11xx_get_interface_mode(phydev);
@@ -494,19 +506,12 @@ static const struct hwmon_chip_info tja11xx_hwmon_chip_info = {
static int tja11xx_hwmon_register(struct phy_device *phydev,
struct tja11xx_priv *priv)
{
- struct device *dev = &phydev->mdio.dev;
-
- priv->hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
- if (IS_ERR(priv->hwmon_name))
- return PTR_ERR(priv->hwmon_name);
+ struct device *hdev, *dev = &phydev->mdio.dev;
- priv->hwmon_dev =
- devm_hwmon_device_register_with_info(dev, priv->hwmon_name,
- phydev,
- &tja11xx_hwmon_chip_info,
- NULL);
-
- return PTR_ERR_OR_ZERO(priv->hwmon_dev);
+ hdev = devm_hwmon_device_register_with_info(dev, NULL, phydev,
+ &tja11xx_hwmon_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hdev);
}
static int tja11xx_parse_dt(struct phy_device *phydev)
@@ -883,6 +888,29 @@ static struct phy_driver tja11xx_driver[] = {
.handle_interrupt = tja11xx_handle_interrupt,
.cable_test_start = tja11xx_cable_test_start,
.cable_test_get_status = tja11xx_cable_test_get_status,
+ }, {
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA1102S),
+ .name = "NXP TJA1102S",
+ .features = PHY_BASIC_T1_FEATURES,
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = tja11xx_probe,
+ .soft_reset = tja11xx_soft_reset,
+ .config_aneg = tja11xx_config_aneg,
+ .config_init = tja11xx_config_init,
+ .read_status = tja11xx_read_status,
+ .get_sqi = tja11xx_get_sqi,
+ .get_sqi_max = tja11xx_get_sqi_max,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .set_loopback = genphy_loopback,
+ /* Statistics */
+ .get_sset_count = tja11xx_get_sset_count,
+ .get_strings = tja11xx_get_strings,
+ .get_stats = tja11xx_get_stats,
+ .config_intr = tja11xx_config_intr,
+ .handle_interrupt = tja11xx_handle_interrupt,
+ .cable_test_start = tja11xx_cable_test_start,
+ .cable_test_get_status = tja11xx_cable_test_get_status,
}
};
@@ -892,6 +920,7 @@ static const struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1100) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1101) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1102) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_TJA1102S) },
{ }
};
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 0dac08e85304..bdd70d424491 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -9,6 +9,7 @@
#include <linux/phy.h>
#include "mdio-open-alliance.h"
+#include "phylib-internal.h"
/**
* genphy_c45_baset1_able - checks if the PMA has BASE-T1 extended abilities
@@ -683,13 +684,10 @@ EXPORT_SYMBOL_GPL(genphy_c45_read_mdix);
static int genphy_c45_write_eee_adv(struct phy_device *phydev,
unsigned long *adv)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
int val, changed = 0;
- linkmode_andnot(tmp, adv, phydev->eee_broken_modes);
-
if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP1_FEATURES)) {
- val = linkmode_to_mii_eee_cap1_t(tmp);
+ val = linkmode_to_mii_eee_cap1_t(adv);
/* IEEE 802.3-2018 45.2.7.13 EEE advertisement 1
* (Register 7.60)
@@ -707,7 +705,7 @@ static int genphy_c45_write_eee_adv(struct phy_device *phydev,
}
if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
- val = linkmode_to_mii_eee_cap2_t(tmp);
+ val = linkmode_to_mii_eee_cap2_t(adv);
/* IEEE 802.3-2022 45.2.7.16 EEE advertisement 2
* (Register 7.62)
@@ -1230,8 +1228,11 @@ int gen10g_config_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(gen10g_config_aneg);
-int genphy_c45_loopback(struct phy_device *phydev, bool enable)
+int genphy_c45_loopback(struct phy_device *phydev, bool enable, int speed)
{
+ if (enable && speed)
+ return -EOPNOTSUPP;
+
return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
MDIO_PCS_CTRL1_LOOPBACK,
enable ? MDIO_PCS_CTRL1_LOOPBACK : 0);
@@ -1467,42 +1468,32 @@ EXPORT_SYMBOL_GPL(genphy_c45_plca_get_status);
/**
* genphy_c45_eee_is_active - get EEE status
* @phydev: target phy_device struct
- * @adv: variable to store advertised linkmodes
* @lp: variable to store LP advertised linkmodes
*
- * Description: this function will read local and link partner PHY
- * advertisements. Compare them return current EEE state.
+ * Description: this function will read link partner PHY advertisement
+ * and compare it to local advertisement to return current EEE state.
*/
-int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
- unsigned long *lp)
+int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *lp)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_adv) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_lp) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- bool eee_active;
int ret;
- ret = genphy_c45_read_eee_adv(phydev, tmp_adv);
- if (ret)
- return ret;
+ if (!phydev->eee_cfg.eee_enabled)
+ return 0;
ret = genphy_c45_read_eee_lpa(phydev, tmp_lp);
if (ret)
return ret;
- linkmode_and(common, tmp_adv, tmp_lp);
- if (!linkmode_empty(tmp_adv) && !linkmode_empty(common))
- eee_active = phy_check_valid(phydev->speed, phydev->duplex,
- common);
- else
- eee_active = false;
-
- if (adv)
- linkmode_copy(adv, tmp_adv);
if (lp)
linkmode_copy(lp, tmp_lp);
- return eee_active;
+ linkmode_and(common, phydev->advertising_eee, tmp_lp);
+ if (linkmode_empty(common))
+ return 0;
+
+ return phy_check_valid(phydev->speed, phydev->duplex, common);
}
EXPORT_SYMBOL(genphy_c45_eee_is_active);
@@ -1519,14 +1510,14 @@ int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
{
int ret;
- ret = genphy_c45_eee_is_active(phydev, data->advertised,
- data->lp_advertised);
+ ret = genphy_c45_eee_is_active(phydev, data->lp_advertised);
if (ret < 0)
return ret;
data->eee_active = phydev->eee_active;
- linkmode_copy(data->supported, phydev->supported_eee);
-
+ linkmode_andnot(data->supported, phydev->supported_eee,
+ phydev->eee_disabled_modes);
+ linkmode_copy(data->advertised, phydev->advertising_eee);
return 0;
}
EXPORT_SYMBOL(genphy_c45_ethtool_get_eee);
@@ -1559,7 +1550,9 @@ int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
phydev_warn(phydev, "At least some EEE link modes are not supported.\n");
return -EINVAL;
}
- linkmode_copy(phydev->advertising_eee, adv);
+
+ linkmode_andnot(phydev->advertising_eee, adv,
+ phydev->eee_disabled_modes);
} else if (linkmode_empty(phydev->advertising_eee)) {
phy_advertise_eee_all(phydev);
}
diff --git a/drivers/net/phy/phy-caps.h b/drivers/net/phy/phy-caps.h
new file mode 100644
index 000000000000..157759966650
--- /dev/null
+++ b/drivers/net/phy/phy-caps.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * link caps internal header, for link modes <-> capabilities <-> interfaces
+ * conversions.
+ */
+
+#ifndef __PHY_CAPS_H
+#define __PHY_CAPS_H
+
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+enum {
+ LINK_CAPA_10HD = 0,
+ LINK_CAPA_10FD,
+ LINK_CAPA_100HD,
+ LINK_CAPA_100FD,
+ LINK_CAPA_1000HD,
+ LINK_CAPA_1000FD,
+ LINK_CAPA_2500FD,
+ LINK_CAPA_5000FD,
+ LINK_CAPA_10000FD,
+ LINK_CAPA_20000FD,
+ LINK_CAPA_25000FD,
+ LINK_CAPA_40000FD,
+ LINK_CAPA_50000FD,
+ LINK_CAPA_56000FD,
+ LINK_CAPA_100000FD,
+ LINK_CAPA_200000FD,
+ LINK_CAPA_400000FD,
+ LINK_CAPA_800000FD,
+
+ __LINK_CAPA_MAX,
+};
+
+#define LINK_CAPA_ALL GENMASK((__LINK_CAPA_MAX - 1), 0)
+
+struct link_capabilities {
+ int speed;
+ unsigned int duplex;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(linkmodes);
+};
+
+int phy_caps_init(void);
+
+size_t phy_caps_speeds(unsigned int *speeds, size_t size,
+ unsigned long *linkmodes);
+void phy_caps_linkmode_max_speed(u32 max_speed, unsigned long *linkmodes);
+bool phy_caps_valid(int speed, int duplex, const unsigned long *linkmodes);
+void phy_caps_linkmodes(unsigned long caps, unsigned long *linkmodes);
+unsigned long phy_caps_from_interface(phy_interface_t interface);
+
+const struct link_capabilities *
+phy_caps_lookup_by_linkmode(const unsigned long *linkmodes);
+
+const struct link_capabilities *
+phy_caps_lookup_by_linkmode_rev(const unsigned long *linkmodes, bool fdx_only);
+
+const struct link_capabilities *
+phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported,
+ bool exact);
+
+#endif /* __PHY_CAPS_H */
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 6bf3ec985f3d..e177037f9110 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -6,6 +6,10 @@
#include <linux/phy.h>
#include <linux/of.h>
+#include "phylib.h"
+#include "phylib-internal.h"
+#include "phy-caps.h"
+
/**
* phy_speed_to_str - Return a string representing the PHY link speed
*
@@ -13,7 +17,7 @@
*/
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 103,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 121,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -153,203 +157,9 @@ int phy_interface_num_ports(phy_interface_t interface)
}
EXPORT_SYMBOL_GPL(phy_interface_num_ports);
-/* A mapping of all SUPPORTED settings to speed/duplex. This table
- * must be grouped by speed and sorted in descending match priority
- * - iow, descending speed.
- */
-
-#define PHY_SETTING(s, d, b) { .speed = SPEED_ ## s, .duplex = DUPLEX_ ## d, \
- .bit = ETHTOOL_LINK_MODE_ ## b ## _BIT}
-
-static const struct phy_setting settings[] = {
- /* 800G */
- PHY_SETTING( 800000, FULL, 800000baseCR8_Full ),
- PHY_SETTING( 800000, FULL, 800000baseKR8_Full ),
- PHY_SETTING( 800000, FULL, 800000baseDR8_Full ),
- PHY_SETTING( 800000, FULL, 800000baseDR8_2_Full ),
- PHY_SETTING( 800000, FULL, 800000baseSR8_Full ),
- PHY_SETTING( 800000, FULL, 800000baseVR8_Full ),
- /* 400G */
- PHY_SETTING( 400000, FULL, 400000baseCR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseKR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseLR8_ER8_FR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseDR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseSR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseCR4_Full ),
- PHY_SETTING( 400000, FULL, 400000baseKR4_Full ),
- PHY_SETTING( 400000, FULL, 400000baseLR4_ER4_FR4_Full ),
- PHY_SETTING( 400000, FULL, 400000baseDR4_Full ),
- PHY_SETTING( 400000, FULL, 400000baseSR4_Full ),
- /* 200G */
- PHY_SETTING( 200000, FULL, 200000baseCR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseKR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseLR4_ER4_FR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseDR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseSR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseCR2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseKR2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseLR2_ER2_FR2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseDR2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseSR2_Full ),
- /* 100G */
- PHY_SETTING( 100000, FULL, 100000baseCR4_Full ),
- PHY_SETTING( 100000, FULL, 100000baseKR4_Full ),
- PHY_SETTING( 100000, FULL, 100000baseLR4_ER4_Full ),
- PHY_SETTING( 100000, FULL, 100000baseSR4_Full ),
- PHY_SETTING( 100000, FULL, 100000baseCR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseKR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseLR2_ER2_FR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseDR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseSR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseCR_Full ),
- PHY_SETTING( 100000, FULL, 100000baseKR_Full ),
- PHY_SETTING( 100000, FULL, 100000baseLR_ER_FR_Full ),
- PHY_SETTING( 100000, FULL, 100000baseDR_Full ),
- PHY_SETTING( 100000, FULL, 100000baseSR_Full ),
- /* 56G */
- PHY_SETTING( 56000, FULL, 56000baseCR4_Full ),
- PHY_SETTING( 56000, FULL, 56000baseKR4_Full ),
- PHY_SETTING( 56000, FULL, 56000baseLR4_Full ),
- PHY_SETTING( 56000, FULL, 56000baseSR4_Full ),
- /* 50G */
- PHY_SETTING( 50000, FULL, 50000baseCR2_Full ),
- PHY_SETTING( 50000, FULL, 50000baseKR2_Full ),
- PHY_SETTING( 50000, FULL, 50000baseSR2_Full ),
- PHY_SETTING( 50000, FULL, 50000baseCR_Full ),
- PHY_SETTING( 50000, FULL, 50000baseKR_Full ),
- PHY_SETTING( 50000, FULL, 50000baseLR_ER_FR_Full ),
- PHY_SETTING( 50000, FULL, 50000baseDR_Full ),
- PHY_SETTING( 50000, FULL, 50000baseSR_Full ),
- /* 40G */
- PHY_SETTING( 40000, FULL, 40000baseCR4_Full ),
- PHY_SETTING( 40000, FULL, 40000baseKR4_Full ),
- PHY_SETTING( 40000, FULL, 40000baseLR4_Full ),
- PHY_SETTING( 40000, FULL, 40000baseSR4_Full ),
- /* 25G */
- PHY_SETTING( 25000, FULL, 25000baseCR_Full ),
- PHY_SETTING( 25000, FULL, 25000baseKR_Full ),
- PHY_SETTING( 25000, FULL, 25000baseSR_Full ),
- /* 20G */
- PHY_SETTING( 20000, FULL, 20000baseKR2_Full ),
- PHY_SETTING( 20000, FULL, 20000baseMLD2_Full ),
- /* 10G */
- PHY_SETTING( 10000, FULL, 10000baseCR_Full ),
- PHY_SETTING( 10000, FULL, 10000baseER_Full ),
- PHY_SETTING( 10000, FULL, 10000baseKR_Full ),
- PHY_SETTING( 10000, FULL, 10000baseKX4_Full ),
- PHY_SETTING( 10000, FULL, 10000baseLR_Full ),
- PHY_SETTING( 10000, FULL, 10000baseLRM_Full ),
- PHY_SETTING( 10000, FULL, 10000baseR_FEC ),
- PHY_SETTING( 10000, FULL, 10000baseSR_Full ),
- PHY_SETTING( 10000, FULL, 10000baseT_Full ),
- /* 5G */
- PHY_SETTING( 5000, FULL, 5000baseT_Full ),
- /* 2.5G */
- PHY_SETTING( 2500, FULL, 2500baseT_Full ),
- PHY_SETTING( 2500, FULL, 2500baseX_Full ),
- /* 1G */
- PHY_SETTING( 1000, FULL, 1000baseT_Full ),
- PHY_SETTING( 1000, HALF, 1000baseT_Half ),
- PHY_SETTING( 1000, FULL, 1000baseT1_Full ),
- PHY_SETTING( 1000, FULL, 1000baseX_Full ),
- PHY_SETTING( 1000, FULL, 1000baseKX_Full ),
- /* 100M */
- PHY_SETTING( 100, FULL, 100baseT_Full ),
- PHY_SETTING( 100, FULL, 100baseT1_Full ),
- PHY_SETTING( 100, HALF, 100baseT_Half ),
- PHY_SETTING( 100, HALF, 100baseFX_Half ),
- PHY_SETTING( 100, FULL, 100baseFX_Full ),
- /* 10M */
- PHY_SETTING( 10, FULL, 10baseT_Full ),
- PHY_SETTING( 10, HALF, 10baseT_Half ),
- PHY_SETTING( 10, FULL, 10baseT1L_Full ),
- PHY_SETTING( 10, FULL, 10baseT1S_Full ),
- PHY_SETTING( 10, HALF, 10baseT1S_Half ),
- PHY_SETTING( 10, HALF, 10baseT1S_P2MP_Half ),
- PHY_SETTING( 10, FULL, 10baseT1BRR_Full ),
-};
-#undef PHY_SETTING
-
-/**
- * phy_lookup_setting - lookup a PHY setting
- * @speed: speed to match
- * @duplex: duplex to match
- * @mask: allowed link modes
- * @exact: an exact match is required
- *
- * Search the settings array for a setting that matches the speed and
- * duplex, and which is supported.
- *
- * If @exact is unset, either an exact match or %NULL for no match will
- * be returned.
- *
- * If @exact is set, an exact match, the fastest supported setting at
- * or below the specified speed, the slowest supported setting, or if
- * they all fail, %NULL will be returned.
- */
-const struct phy_setting *
-phy_lookup_setting(int speed, int duplex, const unsigned long *mask, bool exact)
-{
- const struct phy_setting *p, *match = NULL, *last = NULL;
- int i;
-
- for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
- if (p->bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
- test_bit(p->bit, mask)) {
- last = p;
- if (p->speed == speed && p->duplex == duplex) {
- /* Exact match for speed and duplex */
- match = p;
- break;
- } else if (!exact) {
- if (!match && p->speed <= speed)
- /* Candidate */
- match = p;
-
- if (p->speed < speed)
- break;
- }
- }
- }
-
- if (!match && !exact)
- match = last;
-
- return match;
-}
-EXPORT_SYMBOL_GPL(phy_lookup_setting);
-
-size_t phy_speeds(unsigned int *speeds, size_t size,
- unsigned long *mask)
-{
- size_t count;
- int i;
-
- for (i = 0, count = 0; i < ARRAY_SIZE(settings) && count < size; i++)
- if (settings[i].bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
- test_bit(settings[i].bit, mask) &&
- (count == 0 || speeds[count - 1] != settings[i].speed))
- speeds[count++] = settings[i].speed;
-
- return count;
-}
-
-static void __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
-{
- const struct phy_setting *p;
- int i;
-
- for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
- if (p->speed > max_speed)
- linkmode_clear_bit(p->bit, addr);
- else
- break;
- }
-}
-
static void __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- __set_linkmode_max_speed(max_speed, phydev->supported);
+ phy_caps_linkmode_max_speed(max_speed, phydev->supported);
}
/**
@@ -388,7 +198,7 @@ void of_set_phy_supported(struct phy_device *phydev)
void of_set_phy_eee_broken(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
- unsigned long *modes = phydev->eee_broken_modes;
+ unsigned long *modes = phydev->eee_disabled_modes;
if (!IS_ENABLED(CONFIG_OF_MDIO) || !node)
return;
@@ -475,16 +285,15 @@ EXPORT_SYMBOL_GPL(phy_resolve_aneg_pause);
void phy_resolve_aneg_linkmode(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- int i;
+ const struct link_capabilities *c;
linkmode_and(common, phydev->lp_advertising, phydev->advertising);
- for (i = 0; i < ARRAY_SIZE(settings); i++)
- if (test_bit(settings[i].bit, common)) {
- phydev->speed = settings[i].speed;
- phydev->duplex = settings[i].duplex;
- break;
- }
+ c = phy_caps_lookup_by_linkmode(common);
+ if (c) {
+ phydev->speed = c->speed;
+ phydev->duplex = c->duplex;
+ }
phy_resolve_aneg_pause(phydev);
}
@@ -502,7 +311,8 @@ EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
void phy_check_downshift(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- int i, speed = SPEED_UNKNOWN;
+ const struct link_capabilities *c;
+ int speed = SPEED_UNKNOWN;
phydev->downshifted_rate = 0;
@@ -512,11 +322,9 @@ void phy_check_downshift(struct phy_device *phydev)
linkmode_and(common, phydev->lp_advertising, phydev->advertising);
- for (i = 0; i < ARRAY_SIZE(settings); i++)
- if (test_bit(settings[i].bit, common)) {
- speed = settings[i].speed;
- break;
- }
+ c = phy_caps_lookup_by_linkmode(common);
+ if (c)
+ speed = c->speed;
if (speed == SPEED_UNKNOWN || phydev->speed >= speed)
return;
@@ -526,22 +334,17 @@ void phy_check_downshift(struct phy_device *phydev)
phydev->downshifted_rate = 1;
}
-EXPORT_SYMBOL_GPL(phy_check_downshift);
static int phy_resolve_min_speed(struct phy_device *phydev, bool fdx_only)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- int i = ARRAY_SIZE(settings);
+ const struct link_capabilities *c;
linkmode_and(common, phydev->lp_advertising, phydev->advertising);
- while (--i >= 0) {
- if (test_bit(settings[i].bit, common)) {
- if (fdx_only && settings[i].duplex != DUPLEX_FULL)
- continue;
- return settings[i].speed;
- }
- }
+ c = phy_caps_lookup_by_linkmode_rev(common, fdx_only);
+ if (c)
+ return c->speed;
return SPEED_UNKNOWN;
}
@@ -553,7 +356,7 @@ int phy_speed_down_core(struct phy_device *phydev)
if (min_common_speed == SPEED_UNKNOWN)
return -EINVAL;
- __set_linkmode_max_speed(min_common_speed, phydev->advertising);
+ phy_caps_linkmode_max_speed(min_common_speed, phydev->advertising);
return 0;
}
@@ -715,43 +518,6 @@ int __phy_package_read_mmd(struct phy_device *phydev,
EXPORT_SYMBOL(__phy_package_read_mmd);
/**
- * phy_package_read_mmd - read MMD reg relative to PHY package base addr
- * @phydev: The phy_device struct
- * @addr_offset: The offset to be added to PHY package base_addr
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- *
- * Convenience helper for reading a register of an MMD on a given PHY
- * using the PHY package base address. The base address is added to
- * the addr_offset value.
- *
- * Same calling rules as for phy_read();
- *
- * NOTE: It's assumed that the entire PHY package is either C22 or C45.
- */
-int phy_package_read_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum)
-{
- int addr = phy_package_address(phydev, addr_offset);
- int val;
-
- if (addr < 0)
- return addr;
-
- if (regnum > (u16)~0 || devad > 32)
- return -EINVAL;
-
- phy_lock_mdio_bus(phydev);
- val = mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad,
- regnum);
- phy_unlock_mdio_bus(phydev);
-
- return val;
-}
-EXPORT_SYMBOL(phy_package_read_mmd);
-
-/**
* __phy_package_write_mmd - write MMD reg relative to PHY package base addr
* @phydev: The phy_device struct
* @addr_offset: The offset to be added to PHY package base_addr
@@ -785,44 +551,6 @@ int __phy_package_write_mmd(struct phy_device *phydev,
EXPORT_SYMBOL(__phy_package_write_mmd);
/**
- * phy_package_write_mmd - write MMD reg relative to PHY package base addr
- * @phydev: The phy_device struct
- * @addr_offset: The offset to be added to PHY package base_addr
- * @devad: The MMD to write to
- * @regnum: The register on the MMD to write
- * @val: value to write to @regnum
- *
- * Convenience helper for writing a register of an MMD on a given PHY
- * using the PHY package base address. The base address is added to
- * the addr_offset value.
- *
- * Same calling rules as for phy_write();
- *
- * NOTE: It's assumed that the entire PHY package is either C22 or C45.
- */
-int phy_package_write_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum, u16 val)
-{
- int addr = phy_package_address(phydev, addr_offset);
- int ret;
-
- if (addr < 0)
- return addr;
-
- if (regnum > (u16)~0 || devad > 32)
- return -EINVAL;
-
- phy_lock_mdio_bus(phydev);
- ret = mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad,
- regnum, val);
- phy_unlock_mdio_bus(phydev);
-
- return ret;
-}
-EXPORT_SYMBOL(phy_package_write_mmd);
-
-/**
* phy_modify_changed - Function for modifying a PHY register
* @phydev: the phy_device struct
* @regnum: register number to modify
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index d0c1718e2b16..13df28445f02 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -36,6 +36,9 @@
#include <net/genetlink.h>
#include <net/sock.h>
+#include "phylib-internal.h"
+#include "phy-caps.h"
+
#define PHY_STATE_TIME HZ
#define PHY_STATE_STR(_state) \
@@ -211,25 +214,6 @@ int phy_aneg_done(struct phy_device *phydev)
EXPORT_SYMBOL(phy_aneg_done);
/**
- * phy_find_valid - find a PHY setting that matches the requested parameters
- * @speed: desired speed
- * @duplex: desired duplex
- * @supported: mask of supported link modes
- *
- * Locate a supported phy setting that is, in priority order:
- * - an exact match for the specified speed and duplex mode
- * - a match for the specified speed, or slower speed
- * - the slowest supported speed
- * Returns the matched phy_setting entry, or %NULL if no supported phy
- * settings were found.
- */
-static const struct phy_setting *
-phy_find_valid(int speed, int duplex, unsigned long *supported)
-{
- return phy_lookup_setting(speed, duplex, supported, false);
-}
-
-/**
* phy_supported_speeds - return all speeds currently supported by a phy device
* @phy: The phy device to return supported speeds of.
* @speeds: buffer to store supported speeds in.
@@ -243,7 +227,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int *speeds,
unsigned int size)
{
- return phy_speeds(speeds, size, phy->supported);
+ return phy_caps_speeds(speeds, size, phy->supported);
}
/**
@@ -257,7 +241,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
*/
bool phy_check_valid(int speed, int duplex, unsigned long *features)
{
- return !!phy_lookup_setting(speed, duplex, features, true);
+ return phy_caps_valid(speed, duplex, features);
}
EXPORT_SYMBOL(phy_check_valid);
@@ -271,13 +255,14 @@ EXPORT_SYMBOL(phy_check_valid);
*/
static void phy_sanitize_settings(struct phy_device *phydev)
{
- const struct phy_setting *setting;
+ const struct link_capabilities *c;
+
+ c = phy_caps_lookup(phydev->speed, phydev->duplex, phydev->supported,
+ false);
- setting = phy_find_valid(phydev->speed, phydev->duplex,
- phydev->supported);
- if (setting) {
- phydev->speed = setting->speed;
- phydev->duplex = setting->duplex;
+ if (c) {
+ phydev->speed = c->speed;
+ phydev->duplex = c->duplex;
} else {
/* We failed to find anything (no supported speeds?) */
phydev->speed = SPEED_UNKNOWN;
@@ -302,7 +287,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.port = PORT_BNC;
else
cmd->base.port = phydev->port;
- cmd->base.transceiver = phy_is_internal(phydev) ?
+ cmd->base.transceiver = phydev->is_internal ?
XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->base.phy_address = phydev->mdio.addr;
cmd->base.autoneg = phydev->autoneg;
@@ -520,12 +505,12 @@ int __phy_hwtstamp_set(struct phy_device *phydev,
* @phydev: the phy_device struct
* @jiffies: Run the state machine after these jiffies
*/
-void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
+static void phy_queue_state_machine(struct phy_device *phydev,
+ unsigned long jiffies)
{
mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
jiffies);
}
-EXPORT_SYMBOL(phy_queue_state_machine);
/**
* phy_trigger_machine - Trigger the state machine to run now
@@ -1031,7 +1016,7 @@ static int phy_check_link_status(struct phy_device *phydev)
if (phydev->link && phydev->state != PHY_RUNNING) {
phy_check_downshift(phydev);
phydev->state = PHY_RUNNING;
- err = genphy_c45_eee_is_active(phydev, NULL, NULL);
+ err = genphy_c45_eee_is_active(phydev, NULL);
phydev->eee_active = err > 0;
phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled &&
phydev->eee_active;
@@ -1501,6 +1486,24 @@ void phy_free_interrupt(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_free_interrupt);
+/**
+ * phy_get_next_update_time - Determine the next PHY update time
+ * @phydev: Pointer to the phy_device structure
+ *
+ * This function queries the PHY driver to get the time for the next polling
+ * event. If the driver does not implement the callback, a default value is
+ * used.
+ *
+ * Return: The time for the next polling event in jiffies
+ */
+static unsigned int phy_get_next_update_time(struct phy_device *phydev)
+{
+ if (phydev->drv && phydev->drv->get_next_update_time)
+ return phydev->drv->get_next_update_time(phydev);
+
+ return PHY_STATE_TIME;
+}
+
enum phy_state_work {
PHY_STATE_WORK_NONE,
PHY_STATE_WORK_ANEG,
@@ -1580,7 +1583,8 @@ static enum phy_state_work _phy_state_machine(struct phy_device *phydev)
* called from phy_disconnect() synchronously.
*/
if (phy_polling_mode(phydev) && phy_is_started(phydev))
- phy_queue_state_machine(phydev, PHY_STATE_TIME);
+ phy_queue_state_machine(phydev,
+ phy_get_next_update_time(phydev));
return state_work;
}
@@ -1704,6 +1708,93 @@ void phy_mac_interrupt(struct phy_device *phydev)
EXPORT_SYMBOL(phy_mac_interrupt);
/**
+ * phy_loopback - Configure loopback mode of PHY
+ * @phydev: target phy_device struct
+ * @enable: enable or disable loopback mode
+ * @speed: enable loopback mode with speed
+ *
+ * Configure loopback mode of PHY and signal link down and link up if speed is
+ * changing.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int phy_loopback(struct phy_device *phydev, bool enable, int speed)
+{
+ bool link_up = false;
+ int ret = 0;
+
+ if (!phydev->drv)
+ return -EIO;
+
+ mutex_lock(&phydev->lock);
+
+ if (enable && phydev->loopback_enabled) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (!enable && !phydev->loopback_enabled) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (enable) {
+ /*
+ * Link up is signaled with a defined speed. If speed changes,
+ * then first link down and after that link up needs to be
+ * signaled.
+ */
+ if (phydev->link && phydev->state == PHY_RUNNING) {
+ /* link is up and signaled */
+ if (speed && phydev->speed != speed) {
+ /* signal link down and up for new speed */
+ phydev->link = false;
+ phydev->state = PHY_NOLINK;
+ phy_link_down(phydev);
+
+ link_up = true;
+ }
+ } else {
+ /* link is not signaled */
+ if (speed) {
+ /* signal link up for new speed */
+ link_up = true;
+ }
+ }
+ }
+
+ if (phydev->drv->set_loopback)
+ ret = phydev->drv->set_loopback(phydev, enable, speed);
+ else
+ ret = genphy_loopback(phydev, enable, speed);
+
+ if (ret) {
+ if (enable) {
+ /* try to restore link if enabling loopback fails */
+ if (phydev->drv->set_loopback)
+ phydev->drv->set_loopback(phydev, false, 0);
+ else
+ genphy_loopback(phydev, false, 0);
+ }
+
+ goto out;
+ }
+
+ if (link_up) {
+ phydev->link = true;
+ phydev->state = PHY_RUNNING;
+ phy_link_up(phydev);
+ }
+
+ phydev->loopback_enabled = enable;
+
+out:
+ mutex_unlock(&phydev->lock);
+ return ret;
+}
+EXPORT_SYMBOL(phy_loopback);
+
+/**
* phy_eee_tx_clock_stop_capable() - indicate whether the MAC can stop tx clock
* @phydev: target phy_device struct
*
@@ -1761,7 +1852,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if (!phydev->drv)
return -EIO;
- ret = genphy_c45_eee_is_active(phydev, NULL, NULL);
+ ret = genphy_c45_eee_is_active(phydev, NULL);
if (ret < 0)
return ret;
if (!ret)
diff --git a/drivers/net/phy/phy_caps.c b/drivers/net/phy/phy_caps.c
new file mode 100644
index 000000000000..703321689726
--- /dev/null
+++ b/drivers/net/phy/phy_caps.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/ethtool.h>
+#include <linux/linkmode.h>
+#include <linux/phy.h>
+
+#include "phy-caps.h"
+
+static struct link_capabilities link_caps[__LINK_CAPA_MAX] __ro_after_init = {
+ { SPEED_10, DUPLEX_HALF, {0} }, /* LINK_CAPA_10HD */
+ { SPEED_10, DUPLEX_FULL, {0} }, /* LINK_CAPA_10FD */
+ { SPEED_100, DUPLEX_HALF, {0} }, /* LINK_CAPA_100HD */
+ { SPEED_100, DUPLEX_FULL, {0} }, /* LINK_CAPA_100FD */
+ { SPEED_1000, DUPLEX_HALF, {0} }, /* LINK_CAPA_1000HD */
+ { SPEED_1000, DUPLEX_FULL, {0} }, /* LINK_CAPA_1000FD */
+ { SPEED_2500, DUPLEX_FULL, {0} }, /* LINK_CAPA_2500FD */
+ { SPEED_5000, DUPLEX_FULL, {0} }, /* LINK_CAPA_5000FD */
+ { SPEED_10000, DUPLEX_FULL, {0} }, /* LINK_CAPA_10000FD */
+ { SPEED_20000, DUPLEX_FULL, {0} }, /* LINK_CAPA_20000FD */
+ { SPEED_25000, DUPLEX_FULL, {0} }, /* LINK_CAPA_25000FD */
+ { SPEED_40000, DUPLEX_FULL, {0} }, /* LINK_CAPA_40000FD */
+ { SPEED_50000, DUPLEX_FULL, {0} }, /* LINK_CAPA_50000FD */
+ { SPEED_56000, DUPLEX_FULL, {0} }, /* LINK_CAPA_56000FD */
+ { SPEED_100000, DUPLEX_FULL, {0} }, /* LINK_CAPA_100000FD */
+ { SPEED_200000, DUPLEX_FULL, {0} }, /* LINK_CAPA_200000FD */
+ { SPEED_400000, DUPLEX_FULL, {0} }, /* LINK_CAPA_400000FD */
+ { SPEED_800000, DUPLEX_FULL, {0} }, /* LINK_CAPA_800000FD */
+};
+
+static int speed_duplex_to_capa(int speed, unsigned int duplex)
+{
+ if (duplex == DUPLEX_UNKNOWN ||
+ (speed > SPEED_1000 && duplex != DUPLEX_FULL))
+ return -EINVAL;
+
+ switch (speed) {
+ case SPEED_10: return duplex == DUPLEX_FULL ?
+ LINK_CAPA_10FD : LINK_CAPA_10HD;
+ case SPEED_100: return duplex == DUPLEX_FULL ?
+ LINK_CAPA_100FD : LINK_CAPA_100HD;
+ case SPEED_1000: return duplex == DUPLEX_FULL ?
+ LINK_CAPA_1000FD : LINK_CAPA_1000HD;
+ case SPEED_2500: return LINK_CAPA_2500FD;
+ case SPEED_5000: return LINK_CAPA_5000FD;
+ case SPEED_10000: return LINK_CAPA_10000FD;
+ case SPEED_20000: return LINK_CAPA_20000FD;
+ case SPEED_25000: return LINK_CAPA_25000FD;
+ case SPEED_40000: return LINK_CAPA_40000FD;
+ case SPEED_50000: return LINK_CAPA_50000FD;
+ case SPEED_56000: return LINK_CAPA_56000FD;
+ case SPEED_100000: return LINK_CAPA_100000FD;
+ case SPEED_200000: return LINK_CAPA_200000FD;
+ case SPEED_400000: return LINK_CAPA_400000FD;
+ case SPEED_800000: return LINK_CAPA_800000FD;
+ }
+
+ return -EINVAL;
+}
+
+#define for_each_link_caps_asc_speed(cap) \
+ for (cap = link_caps; cap < &link_caps[__LINK_CAPA_MAX]; cap++)
+
+#define for_each_link_caps_desc_speed(cap) \
+ for (cap = &link_caps[__LINK_CAPA_MAX - 1]; cap >= link_caps; cap--)
+
+/**
+ * phy_caps_init() - Initializes the link_caps array from the link_mode_params.
+ *
+ * Returns: 0 if phy caps init was successful, -EINVAL if we found an
+ * unexpected linkmode setting that requires LINK_CAPS update.
+ *
+ */
+int phy_caps_init(void)
+{
+ const struct link_mode_info *linkmode;
+ int i, capa;
+
+ /* Fill the caps array from net/ethtool/common.c */
+ for (i = 0; i < __ETHTOOL_LINK_MODE_MASK_NBITS; i++) {
+ linkmode = &link_mode_params[i];
+ capa = speed_duplex_to_capa(linkmode->speed, linkmode->duplex);
+
+ if (capa < 0) {
+ if (linkmode->speed != SPEED_UNKNOWN) {
+ pr_err("Unknown speed %d, please update LINK_CAPS\n",
+ linkmode->speed);
+ return -EINVAL;
+ }
+ continue;
+ }
+
+ __set_bit(i, link_caps[capa].linkmodes);
+ }
+
+ return 0;
+}
+
+/**
+ * phy_caps_speeds() - Fill an array of supported SPEED_* values for given modes
+ * @speeds: Output array to store the speeds list into
+ * @size: Size of the output array
+ * @linkmodes: Linkmodes to get the speeds from
+ *
+ * Fills the speeds array with all possible speeds that can be achieved with
+ * the specified linkmodes.
+ *
+ * Returns: The number of speeds filled into the array. If the input array isn't
+ * big enough to store all speeds, fill it as much as possible.
+ */
+size_t phy_caps_speeds(unsigned int *speeds, size_t size,
+ unsigned long *linkmodes)
+{
+ struct link_capabilities *lcap;
+ size_t count = 0;
+
+ for_each_link_caps_asc_speed(lcap) {
+ if (linkmode_intersects(lcap->linkmodes, linkmodes) &&
+ (count == 0 || speeds[count - 1] != lcap->speed)) {
+ speeds[count++] = lcap->speed;
+ if (count >= size)
+ break;
+ }
+ }
+
+ return count;
+}
+
+/**
+ * phy_caps_lookup_by_linkmode() - Lookup the fastest matching link_capabilities
+ * @linkmodes: Linkmodes to match against
+ *
+ * Returns: The highest-speed link_capabilities that intersects the given
+ * linkmodes. In case several DUPLEX_ options exist at that speed,
+ * DUPLEX_FULL is matched first. NULL is returned if no match.
+ */
+const struct link_capabilities *
+phy_caps_lookup_by_linkmode(const unsigned long *linkmodes)
+{
+ struct link_capabilities *lcap;
+
+ for_each_link_caps_desc_speed(lcap)
+ if (linkmode_intersects(lcap->linkmodes, linkmodes))
+ return lcap;
+
+ return NULL;
+}
+
+/**
+ * phy_caps_lookup_by_linkmode_rev() - Lookup the slowest matching link_capabilities
+ * @linkmodes: Linkmodes to match against
+ * @fdx_only: Full duplex match only when set
+ *
+ * Returns: The lowest-speed link_capabilities that intersects the given
+ * linkmodes. When set, fdx_only will ignore half-duplex matches.
+ * NULL is returned if no match.
+ */
+const struct link_capabilities *
+phy_caps_lookup_by_linkmode_rev(const unsigned long *linkmodes, bool fdx_only)
+{
+ struct link_capabilities *lcap;
+
+ for_each_link_caps_asc_speed(lcap) {
+ if (fdx_only && lcap->duplex != DUPLEX_FULL)
+ continue;
+
+ if (linkmode_intersects(lcap->linkmodes, linkmodes))
+ return lcap;
+ }
+
+ return NULL;
+}
+
+/**
+ * phy_caps_lookup() - Lookup capabilities by speed/duplex that matches a mask
+ * @speed: Speed to match
+ * @duplex: Duplex to match
+ * @supported: Mask of linkmodes to match
+ * @exact: Perform an exact match or not.
+ *
+ * Lookup a link_capabilities entry that intersect the supported linkmodes mask,
+ * and that matches the passed speed and duplex.
+ *
+ * When @exact is set, an exact match is performed on speed and duplex, meaning
+ * that if the linkmodes for the given speed and duplex intersect the supported
+ * mask, this capability is returned, otherwise we don't have a match and return
+ * NULL.
+ *
+ * When @exact is not set, we return either an exact match, or matching capabilities
+ * at lower speed, or the lowest matching speed, or NULL.
+ *
+ * Returns: a matched link_capabilities according to the above process, NULL
+ * otherwise.
+ */
+const struct link_capabilities *
+phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported,
+ bool exact)
+{
+ const struct link_capabilities *lcap, *last = NULL;
+
+ for_each_link_caps_desc_speed(lcap) {
+ if (linkmode_intersects(lcap->linkmodes, supported)) {
+ last = lcap;
+ /* exact match on speed and duplex*/
+ if (lcap->speed == speed && lcap->duplex == duplex) {
+ return lcap;
+ } else if (!exact) {
+ if (lcap->speed <= speed)
+ return lcap;
+ }
+ }
+ }
+
+ if (!exact)
+ return last;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(phy_caps_lookup);
+
+/**
+ * phy_caps_linkmode_max_speed() - Clamp a linkmodes set to a max speed
+ * @max_speed: Speed limit for the linkmode set
+ * @linkmodes: Linkmodes to limit
+ */
+void phy_caps_linkmode_max_speed(u32 max_speed, unsigned long *linkmodes)
+{
+ struct link_capabilities *lcap;
+
+ for_each_link_caps_desc_speed(lcap)
+ if (lcap->speed > max_speed)
+ linkmode_andnot(linkmodes, linkmodes, lcap->linkmodes);
+ else
+ break;
+}
+
+/**
+ * phy_caps_valid() - Validate a linkmodes set agains given speed and duplex
+ * @speed: input speed to validate
+ * @duplex: input duplex to validate. Passing DUPLEX_UNKNOWN is always not valid
+ * @linkmodes: The linkmodes to validate
+ *
+ * Returns: True if at least one of the linkmodes in @linkmodes can function at
+ * the given speed and duplex, false otherwise.
+ */
+bool phy_caps_valid(int speed, int duplex, const unsigned long *linkmodes)
+{
+ int capa = speed_duplex_to_capa(speed, duplex);
+
+ if (capa < 0)
+ return false;
+
+ return linkmode_intersects(link_caps[capa].linkmodes, linkmodes);
+}
+
+/**
+ * phy_caps_linkmodes() - Convert a bitfield of capabilities into linkmodes
+ * @caps: The list of caps, each bit corresponding to a LINK_CAPA value
+ * @linkmodes: The set of linkmodes to fill. Must be previously initialized.
+ */
+void phy_caps_linkmodes(unsigned long caps, unsigned long *linkmodes)
+{
+ unsigned long capa;
+
+ for_each_set_bit(capa, &caps, __LINK_CAPA_MAX)
+ linkmode_or(linkmodes, linkmodes, link_caps[capa].linkmodes);
+}
+EXPORT_SYMBOL_GPL(phy_caps_linkmodes);
+
+/**
+ * phy_caps_from_interface() - Get the link capa from a given PHY interface
+ * @interface: The PHY interface we want to get the possible Speed/Duplex from
+ *
+ * Returns: A bitmask of LINK_CAPA_xxx values that can be achieved with the
+ * provided interface.
+ */
+unsigned long phy_caps_from_interface(phy_interface_t interface)
+{
+ unsigned long link_caps = 0;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_USXGMII:
+ link_caps |= BIT(LINK_CAPA_10000FD) | BIT(LINK_CAPA_5000FD);
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ link_caps |= BIT(LINK_CAPA_2500FD);
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_PSGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_GMII:
+ link_caps |= BIT(LINK_CAPA_1000HD) | BIT(LINK_CAPA_1000FD);
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_REVRMII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_SMII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_MII:
+ link_caps |= BIT(LINK_CAPA_10HD) | BIT(LINK_CAPA_10FD);
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_100BASEX:
+ link_caps |= BIT(LINK_CAPA_100HD) | BIT(LINK_CAPA_100FD);
+ break;
+
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_MOCA:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ link_caps |= BIT(LINK_CAPA_1000HD);
+ fallthrough;
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ case PHY_INTERFACE_MODE_TRGMII:
+ link_caps |= BIT(LINK_CAPA_1000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ link_caps |= BIT(LINK_CAPA_2500FD);
+ break;
+
+ case PHY_INTERFACE_MODE_5GBASER:
+ link_caps |= BIT(LINK_CAPA_5000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ link_caps |= BIT(LINK_CAPA_10000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_25GBASER:
+ link_caps |= BIT(LINK_CAPA_25000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_XLGMII:
+ link_caps |= BIT(LINK_CAPA_40000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_INTERNAL:
+ link_caps |= LINK_CAPA_ALL;
+ break;
+
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MAX:
+ break;
+ }
+
+ return link_caps;
+}
+EXPORT_SYMBOL_GPL(phy_caps_from_interface);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 46713d27412b..675fbd225378 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -41,10 +41,24 @@
#include <linux/uaccess.h>
#include <linux/unistd.h>
+#include "phylib-internal.h"
+#include "phy-caps.h"
+
MODULE_DESCRIPTION("PHY library");
MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
+#define PHY_ANY_ID "MATCH ANY PHY"
+#define PHY_ANY_UID 0xffffffff
+
+struct phy_fixup {
+ struct list_head list;
+ char bus_id[MII_BUS_ID_SIZE + 3];
+ u32 phy_uid;
+ u32 phy_uid_mask;
+ int (*run)(struct phy_device *phydev);
+};
+
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_basic_features);
@@ -80,37 +94,28 @@ static const int phy_all_ports_features_array[7] = {
ETHTOOL_LINK_MODE_Backplane_BIT,
};
-const int phy_10_100_features_array[4] = {
+static const int phy_10_100_features_array[4] = {
ETHTOOL_LINK_MODE_10baseT_Half_BIT,
ETHTOOL_LINK_MODE_10baseT_Full_BIT,
ETHTOOL_LINK_MODE_100baseT_Half_BIT,
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
};
-EXPORT_SYMBOL_GPL(phy_10_100_features_array);
-const int phy_basic_t1_features_array[3] = {
+static const int phy_basic_t1_features_array[3] = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
};
-EXPORT_SYMBOL_GPL(phy_basic_t1_features_array);
-const int phy_basic_t1s_p2mp_features_array[2] = {
+static const int phy_basic_t1s_p2mp_features_array[2] = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT,
};
-EXPORT_SYMBOL_GPL(phy_basic_t1s_p2mp_features_array);
-const int phy_gbit_features_array[2] = {
+static const int phy_gbit_features_array[2] = {
ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
};
-EXPORT_SYMBOL_GPL(phy_gbit_features_array);
-
-const int phy_10gbit_features_array[1] = {
- ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
-};
-EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
static const int phy_eee_cap1_features_array[] = {
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
@@ -185,9 +190,8 @@ static void features_init(void)
linkmode_set_bit_array(phy_gbit_features_array,
ARRAY_SIZE(phy_gbit_features_array),
phy_10gbit_features);
- linkmode_set_bit_array(phy_10gbit_features_array,
- ARRAY_SIZE(phy_10gbit_features_array),
- phy_10gbit_features);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phy_10gbit_features);
linkmode_set_bit_array(phy_eee_cap1_features_array,
ARRAY_SIZE(phy_eee_cap1_features_array),
@@ -378,8 +382,8 @@ static SIMPLE_DEV_PM_OPS(mdio_bus_phy_pm_ops, mdio_bus_phy_suspend,
* comparison
* @run: The actual code to be run when a matching PHY is found
*/
-int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *))
+static int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
+ int (*run)(struct phy_device *))
{
struct phy_fixup *fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
@@ -397,7 +401,6 @@ int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
return 0;
}
-EXPORT_SYMBOL(phy_register_fixup);
/* Registers a fixup to be run on any PHY with the UID in phy_uid */
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
@@ -544,7 +547,7 @@ phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf)
struct phy_device *phydev = to_phy_device(dev);
const char *mode = NULL;
- if (phy_is_internal(phydev))
+ if (phydev->is_internal)
mode = "internal";
else
mode = phy_modes(phydev->interface);
@@ -1685,243 +1688,6 @@ bool phy_driver_is_genphy_10g(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
/**
- * phy_package_join - join a common PHY group
- * @phydev: target phy_device struct
- * @base_addr: cookie and base PHY address of PHY package for offset
- * calculation of global register access
- * @priv_size: if non-zero allocate this amount of bytes for private data
- *
- * This joins a PHY group and provides a shared storage for all phydevs in
- * this group. This is intended to be used for packages which contain
- * more than one PHY, for example a quad PHY transceiver.
- *
- * The base_addr parameter serves as cookie which has to have the same values
- * for all members of one group and as the base PHY address of the PHY package
- * for offset calculation to access generic registers of a PHY package.
- * Usually, one of the PHY addresses of the different PHYs in the package
- * provides access to these global registers.
- * The address which is given here, will be used in the phy_package_read()
- * and phy_package_write() convenience functions as base and added to the
- * passed offset in those functions.
- *
- * This will set the shared pointer of the phydev to the shared storage.
- * If this is the first call for a this cookie the shared storage will be
- * allocated. If priv_size is non-zero, the given amount of bytes are
- * allocated for the priv member.
- *
- * Returns < 1 on error, 0 on success. Esp. calling phy_package_join()
- * with the same cookie but a different priv_size is an error.
- */
-int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
-{
- struct mii_bus *bus = phydev->mdio.bus;
- struct phy_package_shared *shared;
- int ret;
-
- if (base_addr < 0 || base_addr >= PHY_MAX_ADDR)
- return -EINVAL;
-
- mutex_lock(&bus->shared_lock);
- shared = bus->shared[base_addr];
- if (!shared) {
- ret = -ENOMEM;
- shared = kzalloc(sizeof(*shared), GFP_KERNEL);
- if (!shared)
- goto err_unlock;
- if (priv_size) {
- shared->priv = kzalloc(priv_size, GFP_KERNEL);
- if (!shared->priv)
- goto err_free;
- shared->priv_size = priv_size;
- }
- shared->base_addr = base_addr;
- shared->np = NULL;
- refcount_set(&shared->refcnt, 1);
- bus->shared[base_addr] = shared;
- } else {
- ret = -EINVAL;
- if (priv_size && priv_size != shared->priv_size)
- goto err_unlock;
- refcount_inc(&shared->refcnt);
- }
- mutex_unlock(&bus->shared_lock);
-
- phydev->shared = shared;
-
- return 0;
-
-err_free:
- kfree(shared);
-err_unlock:
- mutex_unlock(&bus->shared_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(phy_package_join);
-
-/**
- * of_phy_package_join - join a common PHY group in PHY package
- * @phydev: target phy_device struct
- * @priv_size: if non-zero allocate this amount of bytes for private data
- *
- * This is a variant of phy_package_join for PHY package defined in DT.
- *
- * The parent node of the @phydev is checked as a valid PHY package node
- * structure (by matching the node name "ethernet-phy-package") and the
- * base_addr for the PHY package is passed to phy_package_join.
- *
- * With this configuration the shared struct will also have the np value
- * filled to use additional DT defined properties in PHY specific
- * probe_once and config_init_once PHY package OPs.
- *
- * Returns < 0 on error, 0 on success. Esp. calling phy_package_join()
- * with the same cookie but a different priv_size is an error. Or a parent
- * node is not detected or is not valid or doesn't match the expected node
- * name for PHY package.
- */
-int of_phy_package_join(struct phy_device *phydev, size_t priv_size)
-{
- struct device_node *node = phydev->mdio.dev.of_node;
- struct device_node *package_node;
- u32 base_addr;
- int ret;
-
- if (!node)
- return -EINVAL;
-
- package_node = of_get_parent(node);
- if (!package_node)
- return -EINVAL;
-
- if (!of_node_name_eq(package_node, "ethernet-phy-package")) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (of_property_read_u32(package_node, "reg", &base_addr)) {
- ret = -EINVAL;
- goto exit;
- }
-
- ret = phy_package_join(phydev, base_addr, priv_size);
- if (ret)
- goto exit;
-
- phydev->shared->np = package_node;
-
- return 0;
-exit:
- of_node_put(package_node);
- return ret;
-}
-EXPORT_SYMBOL_GPL(of_phy_package_join);
-
-/**
- * phy_package_leave - leave a common PHY group
- * @phydev: target phy_device struct
- *
- * This leaves a PHY group created by phy_package_join(). If this phydev
- * was the last user of the shared data between the group, this data is
- * freed. Resets the phydev->shared pointer to NULL.
- */
-void phy_package_leave(struct phy_device *phydev)
-{
- struct phy_package_shared *shared = phydev->shared;
- struct mii_bus *bus = phydev->mdio.bus;
-
- if (!shared)
- return;
-
- /* Decrease the node refcount on leave if present */
- if (shared->np)
- of_node_put(shared->np);
-
- if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
- bus->shared[shared->base_addr] = NULL;
- mutex_unlock(&bus->shared_lock);
- kfree(shared->priv);
- kfree(shared);
- }
-
- phydev->shared = NULL;
-}
-EXPORT_SYMBOL_GPL(phy_package_leave);
-
-static void devm_phy_package_leave(struct device *dev, void *res)
-{
- phy_package_leave(*(struct phy_device **)res);
-}
-
-/**
- * devm_phy_package_join - resource managed phy_package_join()
- * @dev: device that is registering this PHY package
- * @phydev: target phy_device struct
- * @base_addr: cookie and base PHY address of PHY package for offset
- * calculation of global register access
- * @priv_size: if non-zero allocate this amount of bytes for private data
- *
- * Managed phy_package_join(). Shared storage fetched by this function,
- * phy_package_leave() is automatically called on driver detach. See
- * phy_package_join() for more information.
- */
-int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
- int base_addr, size_t priv_size)
-{
- struct phy_device **ptr;
- int ret;
-
- ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- ret = phy_package_join(phydev, base_addr, priv_size);
-
- if (!ret) {
- *ptr = phydev;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(devm_phy_package_join);
-
-/**
- * devm_of_phy_package_join - resource managed of_phy_package_join()
- * @dev: device that is registering this PHY package
- * @phydev: target phy_device struct
- * @priv_size: if non-zero allocate this amount of bytes for private data
- *
- * Managed of_phy_package_join(). Shared storage fetched by this function,
- * phy_package_leave() is automatically called on driver detach. See
- * of_phy_package_join() for more information.
- */
-int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
- size_t priv_size)
-{
- struct phy_device **ptr;
- int ret;
-
- ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- ret = of_phy_package_join(phydev, priv_size);
-
- if (!ret) {
- *ptr = phydev;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(devm_of_phy_package_join);
-
-/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
*
@@ -2052,41 +1818,6 @@ int phy_resume(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_resume);
-int phy_loopback(struct phy_device *phydev, bool enable)
-{
- int ret = 0;
-
- if (!phydev->drv)
- return -EIO;
-
- mutex_lock(&phydev->lock);
-
- if (enable && phydev->loopback_enabled) {
- ret = -EBUSY;
- goto out;
- }
-
- if (!enable && !phydev->loopback_enabled) {
- ret = -EINVAL;
- goto out;
- }
-
- if (phydev->drv->set_loopback)
- ret = phydev->drv->set_loopback(phydev, enable);
- else
- ret = genphy_loopback(phydev, enable);
-
- if (ret)
- goto out;
-
- phydev->loopback_enabled = enable;
-
-out:
- mutex_unlock(&phydev->lock);
- return ret;
-}
-EXPORT_SYMBOL(phy_loopback);
-
/**
* phy_reset_after_clk_enable - perform a PHY reset if needed
* @phydev: target phy_device struct
@@ -2354,7 +2085,7 @@ EXPORT_SYMBOL(genphy_check_and_restart_aneg);
int __genphy_config_aneg(struct phy_device *phydev, bool changed)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(fixed_advert);
- const struct phy_setting *set;
+ const struct link_capabilities *c;
unsigned long *advert;
int err;
@@ -2380,10 +2111,11 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
} else {
linkmode_zero(fixed_advert);
- set = phy_lookup_setting(phydev->speed, phydev->duplex,
- phydev->supported, true);
- if (set)
- linkmode_set_bit(set->bit, fixed_advert);
+ c = phy_caps_lookup(phydev->speed, phydev->duplex,
+ phydev->supported, true);
+ if (c)
+ linkmode_and(fixed_advert, phydev->supported,
+ c->linkmodes);
advert = fixed_advert;
}
@@ -2843,12 +2575,18 @@ int genphy_resume(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_resume);
-int genphy_loopback(struct phy_device *phydev, bool enable)
+int genphy_loopback(struct phy_device *phydev, bool enable, int speed)
{
if (enable) {
u16 ctl = BMCR_LOOPBACK;
int ret, val;
+ if (speed == SPEED_10 || speed == SPEED_100 ||
+ speed == SPEED_1000)
+ phydev->speed = speed;
+ else if (speed)
+ return -EINVAL;
+
ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
phy_modify(phydev, MII_BMCR, ~0, ctl);
@@ -2966,7 +2704,7 @@ void phy_disable_eee(struct phy_device *phydev)
phydev->eee_cfg.tx_lpi_enabled = false;
phydev->eee_cfg.eee_enabled = false;
/* don't let userspace re-enable EEE advertisement */
- linkmode_fill(phydev->eee_broken_modes);
+ linkmode_fill(phydev->eee_disabled_modes);
}
EXPORT_SYMBOL_GPL(phy_disable_eee);
@@ -3096,19 +2834,12 @@ void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause)
EXPORT_SYMBOL(phy_get_pause);
#if IS_ENABLED(CONFIG_OF_MDIO)
-static int phy_get_int_delay_property(struct device *dev, const char *name)
+static int phy_get_u32_property(struct device *dev, const char *name, u32 *val)
{
- s32 int_delay;
- int ret;
-
- ret = device_property_read_u32(dev, name, &int_delay);
- if (ret)
- return ret;
-
- return int_delay;
+ return device_property_read_u32(dev, name, val);
}
#else
-static int phy_get_int_delay_property(struct device *dev, const char *name)
+static int phy_get_u32_property(struct device *dev, const char *name, u32 *val)
{
return -EINVAL;
}
@@ -3133,12 +2864,12 @@ static int phy_get_int_delay_property(struct device *dev, const char *name)
s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
const int *delay_values, int size, bool is_rx)
{
- s32 delay;
- int i;
+ int i, ret;
+ u32 delay;
if (is_rx) {
- delay = phy_get_int_delay_property(dev, "rx-internal-delay-ps");
- if (delay < 0 && size == 0) {
+ ret = phy_get_u32_property(dev, "rx-internal-delay-ps", &delay);
+ if (ret < 0 && size == 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
return 1;
@@ -3147,8 +2878,8 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
} else {
- delay = phy_get_int_delay_property(dev, "tx-internal-delay-ps");
- if (delay < 0 && size == 0) {
+ ret = phy_get_u32_property(dev, "tx-internal-delay-ps", &delay);
+ if (ret < 0 && size == 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
return 1;
@@ -3157,8 +2888,8 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
}
- if (delay < 0)
- return delay;
+ if (ret < 0)
+ return ret;
if (size == 0)
return delay;
@@ -3193,6 +2924,30 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
EXPORT_SYMBOL(phy_get_internal_delay);
+/**
+ * phy_get_tx_amplitude_gain - stores tx amplitude gain in @val
+ * @phydev: phy_device struct
+ * @dev: pointer to the devices device struct
+ * @linkmode: linkmode for which the tx amplitude gain should be retrieved
+ * @val: tx amplitude gain
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev,
+ enum ethtool_link_mode_bit_indices linkmode,
+ u32 *val)
+{
+ switch (linkmode) {
+ case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
+ return phy_get_u32_property(dev,
+ "tx-amplitude-100base-tx-percent",
+ val);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(phy_get_tx_amplitude_gain);
+
static int phy_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -3563,22 +3318,21 @@ static int phy_probe(struct device *dev)
if (err)
goto out;
- /* There is no "enabled" flag. If PHY is advertising, assume it is
- * kind of enabled.
- */
- phydev->eee_cfg.eee_enabled = !linkmode_empty(phydev->advertising_eee);
+ /* Get the EEE modes we want to prohibit. */
+ of_set_phy_eee_broken(phydev);
/* Some PHYs may advertise, by default, not support EEE modes. So,
- * we need to clean them.
+ * we need to clean them. In addition remove all disabled EEE modes.
*/
- if (phydev->eee_cfg.eee_enabled)
- linkmode_and(phydev->advertising_eee, phydev->supported_eee,
- phydev->advertising_eee);
+ linkmode_and(phydev->advertising_eee, phydev->supported_eee,
+ phydev->advertising_eee);
+ linkmode_andnot(phydev->advertising_eee, phydev->advertising_eee,
+ phydev->eee_disabled_modes);
- /* Get the EEE modes we want to prohibit. We will ask
- * the PHY stop advertising these mode later on
+ /* There is no "enabled" flag. If PHY is advertising, assume it is
+ * kind of enabled.
*/
- of_set_phy_eee_broken(phydev);
+ phydev->eee_cfg.eee_enabled = !linkmode_empty(phydev->advertising_eee);
/* Get master/slave strap overrides */
of_set_phy_timing_role(phydev);
@@ -3777,6 +3531,10 @@ static int __init phy_init(void)
if (rc)
goto err_ethtool_phy_ops;
+ rc = phy_caps_init();
+ if (rc)
+ goto err_mdio_bus;
+
features_init();
rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE);
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index f550576eb9da..bd3c9554f6ac 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -5,6 +5,8 @@
#include <linux/phy_led_triggers.h>
#include <linux/netdevice.h>
+#include "phylib-internal.h"
+
static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
unsigned int speed)
{
diff --git a/drivers/net/phy/phy_link_topology.c b/drivers/net/phy/phy_link_topology.c
index 4a5d73002a1a..0e9e987f37dd 100644
--- a/drivers/net/phy/phy_link_topology.c
+++ b/drivers/net/phy/phy_link_topology.c
@@ -73,7 +73,7 @@ int phy_link_topo_add_phy(struct net_device *dev,
xa_limit_32b, &topo->next_phy_index,
GFP_KERNEL);
- if (ret)
+ if (ret < 0)
goto err;
return 0;
diff --git a/drivers/net/phy/phy_package.c b/drivers/net/phy/phy_package.c
new file mode 100644
index 000000000000..c738f76e8664
--- /dev/null
+++ b/drivers/net/phy/phy_package.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PHY package support
+ */
+
+#include <linux/of.h>
+#include <linux/phy.h>
+
+#include "phylib.h"
+#include "phylib-internal.h"
+
+/**
+ * struct phy_package_shared - Shared information in PHY packages
+ * @base_addr: Base PHY address of PHY package used to combine PHYs
+ * in one package and for offset calculation of phy_package_read/write
+ * @np: Pointer to the Device Node if PHY package defined in DT
+ * @refcnt: Number of PHYs connected to this shared data
+ * @flags: Initialization of PHY package
+ * @priv_size: Size of the shared private data @priv
+ * @priv: Driver private data shared across a PHY package
+ *
+ * Represents a shared structure between different phydev's in the same
+ * package, for example a quad PHY. See phy_package_join() and
+ * phy_package_leave().
+ */
+struct phy_package_shared {
+ u8 base_addr;
+ /* With PHY package defined in DT this points to the PHY package node */
+ struct device_node *np;
+ refcount_t refcnt;
+ unsigned long flags;
+ size_t priv_size;
+
+ /* private data pointer */
+ /* note that this pointer is shared between different phydevs and
+ * the user has to take care of appropriate locking. It is allocated
+ * and freed automatically by phy_package_join() and
+ * phy_package_leave().
+ */
+ void *priv;
+};
+
+struct device_node *phy_package_get_node(struct phy_device *phydev)
+{
+ return phydev->shared->np;
+}
+EXPORT_SYMBOL_GPL(phy_package_get_node);
+
+void *phy_package_get_priv(struct phy_device *phydev)
+{
+ return phydev->shared->priv;
+}
+EXPORT_SYMBOL_GPL(phy_package_get_priv);
+
+int phy_package_address(struct phy_device *phydev, unsigned int addr_offset)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ u8 base_addr = shared->base_addr;
+
+ if (addr_offset >= PHY_MAX_ADDR - base_addr)
+ return -EIO;
+
+ /* we know that addr will be in the range 0..31 and thus the
+ * implicit cast to a signed int is not a problem.
+ */
+ return base_addr + addr_offset;
+}
+
+int __phy_package_read(struct phy_device *phydev, unsigned int addr_offset,
+ u32 regnum)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ return __mdiobus_read(phydev->mdio.bus, addr, regnum);
+}
+EXPORT_SYMBOL_GPL(__phy_package_read);
+
+int __phy_package_write(struct phy_device *phydev, unsigned int addr_offset,
+ u32 regnum, u16 val)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ return __mdiobus_write(phydev->mdio.bus, addr, regnum, val);
+}
+EXPORT_SYMBOL_GPL(__phy_package_write);
+
+static bool __phy_package_set_once(struct phy_device *phydev, unsigned int b)
+{
+ struct phy_package_shared *shared = phydev->shared;
+
+ if (!shared)
+ return false;
+
+ return !test_and_set_bit(b, &shared->flags);
+}
+
+bool phy_package_init_once(struct phy_device *phydev)
+{
+ return __phy_package_set_once(phydev, 0);
+}
+EXPORT_SYMBOL_GPL(phy_package_init_once);
+
+bool phy_package_probe_once(struct phy_device *phydev)
+{
+ return __phy_package_set_once(phydev, 1);
+}
+EXPORT_SYMBOL_GPL(phy_package_probe_once);
+
+/**
+ * phy_package_join - join a common PHY group
+ * @phydev: target phy_device struct
+ * @base_addr: cookie and base PHY address of PHY package for offset
+ * calculation of global register access
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * This joins a PHY group and provides a shared storage for all phydevs in
+ * this group. This is intended to be used for packages which contain
+ * more than one PHY, for example a quad PHY transceiver.
+ *
+ * The base_addr parameter serves as cookie which has to have the same values
+ * for all members of one group and as the base PHY address of the PHY package
+ * for offset calculation to access generic registers of a PHY package.
+ * Usually, one of the PHY addresses of the different PHYs in the package
+ * provides access to these global registers.
+ * The address which is given here, will be used in the phy_package_read()
+ * and phy_package_write() convenience functions as base and added to the
+ * passed offset in those functions.
+ *
+ * This will set the shared pointer of the phydev to the shared storage.
+ * If this is the first call for a this cookie the shared storage will be
+ * allocated. If priv_size is non-zero, the given amount of bytes are
+ * allocated for the priv member.
+ *
+ * Returns < 1 on error, 0 on success. Esp. calling phy_package_join()
+ * with the same cookie but a different priv_size is an error.
+ */
+int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ struct phy_package_shared *shared;
+ int ret;
+
+ if (base_addr < 0 || base_addr >= PHY_MAX_ADDR)
+ return -EINVAL;
+
+ mutex_lock(&bus->shared_lock);
+ shared = bus->shared[base_addr];
+ if (!shared) {
+ ret = -ENOMEM;
+ shared = kzalloc(sizeof(*shared), GFP_KERNEL);
+ if (!shared)
+ goto err_unlock;
+ if (priv_size) {
+ shared->priv = kzalloc(priv_size, GFP_KERNEL);
+ if (!shared->priv)
+ goto err_free;
+ shared->priv_size = priv_size;
+ }
+ shared->base_addr = base_addr;
+ shared->np = NULL;
+ refcount_set(&shared->refcnt, 1);
+ bus->shared[base_addr] = shared;
+ } else {
+ ret = -EINVAL;
+ if (priv_size && priv_size != shared->priv_size)
+ goto err_unlock;
+ refcount_inc(&shared->refcnt);
+ }
+ mutex_unlock(&bus->shared_lock);
+
+ phydev->shared = shared;
+
+ return 0;
+
+err_free:
+ kfree(shared);
+err_unlock:
+ mutex_unlock(&bus->shared_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_package_join);
+
+/**
+ * of_phy_package_join - join a common PHY group in PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * This is a variant of phy_package_join for PHY package defined in DT.
+ *
+ * The parent node of the @phydev is checked as a valid PHY package node
+ * structure (by matching the node name "ethernet-phy-package") and the
+ * base_addr for the PHY package is passed to phy_package_join.
+ *
+ * With this configuration the shared struct will also have the np value
+ * filled to use additional DT defined properties in PHY specific
+ * probe_once and config_init_once PHY package OPs.
+ *
+ * Returns < 0 on error, 0 on success. Esp. calling phy_package_join()
+ * with the same cookie but a different priv_size is an error. Or a parent
+ * node is not detected or is not valid or doesn't match the expected node
+ * name for PHY package.
+ */
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct device_node *package_node;
+ u32 base_addr;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ package_node = of_get_parent(node);
+ if (!package_node)
+ return -EINVAL;
+
+ if (!of_node_name_eq(package_node, "ethernet-phy-package")) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (of_property_read_u32(package_node, "reg", &base_addr)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = phy_package_join(phydev, base_addr, priv_size);
+ if (ret)
+ goto exit;
+
+ phydev->shared->np = package_node;
+
+ return 0;
+exit:
+ of_node_put(package_node);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_phy_package_join);
+
+/**
+ * phy_package_leave - leave a common PHY group
+ * @phydev: target phy_device struct
+ *
+ * This leaves a PHY group created by phy_package_join(). If this phydev
+ * was the last user of the shared data between the group, this data is
+ * freed. Resets the phydev->shared pointer to NULL.
+ */
+void phy_package_leave(struct phy_device *phydev)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ struct mii_bus *bus = phydev->mdio.bus;
+
+ if (!shared)
+ return;
+
+ /* Decrease the node refcount on leave if present */
+ if (shared->np)
+ of_node_put(shared->np);
+
+ if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
+ bus->shared[shared->base_addr] = NULL;
+ mutex_unlock(&bus->shared_lock);
+ kfree(shared->priv);
+ kfree(shared);
+ }
+
+ phydev->shared = NULL;
+}
+EXPORT_SYMBOL_GPL(phy_package_leave);
+
+static void devm_phy_package_leave(struct device *dev, void *res)
+{
+ phy_package_leave(*(struct phy_device **)res);
+}
+
+/**
+ * devm_phy_package_join - resource managed phy_package_join()
+ * @dev: device that is registering this PHY package
+ * @phydev: target phy_device struct
+ * @base_addr: cookie and base PHY address of PHY package for offset
+ * calculation of global register access
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * Managed phy_package_join(). Shared storage fetched by this function,
+ * phy_package_leave() is automatically called on driver detach. See
+ * phy_package_join() for more information.
+ */
+int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
+ int base_addr, size_t priv_size)
+{
+ struct phy_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = phy_package_join(phydev, base_addr, priv_size);
+
+ if (!ret) {
+ *ptr = phydev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_phy_package_join);
+
+/**
+ * devm_of_phy_package_join - resource managed of_phy_package_join()
+ * @dev: device that is registering this PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * Managed of_phy_package_join(). Shared storage fetched by this function,
+ * phy_package_leave() is automatically called on driver detach. See
+ * of_phy_package_join() for more information.
+ */
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size)
+{
+ struct phy_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = of_phy_package_join(phydev, priv_size);
+
+ if (!ret) {
+ *ptr = phydev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_of_phy_package_join);
diff --git a/drivers/net/phy/phylib-internal.h b/drivers/net/phy/phylib-internal.h
new file mode 100644
index 000000000000..afac2bd15b50
--- /dev/null
+++ b/drivers/net/phy/phylib-internal.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * phylib-internal header
+ */
+
+#ifndef __PHYLIB_INTERNAL_H
+#define __PHYLIB_INTERNAL_H
+
+struct phy_device;
+
+/*
+ * phy_supported_speeds - return all speeds currently supported by a PHY device
+ */
+unsigned int phy_supported_speeds(struct phy_device *phy,
+ unsigned int *speeds,
+ unsigned int size);
+void of_set_phy_supported(struct phy_device *phydev);
+void of_set_phy_eee_broken(struct phy_device *phydev);
+void of_set_phy_timing_role(struct phy_device *phydev);
+int phy_speed_down_core(struct phy_device *phydev);
+void phy_check_downshift(struct phy_device *phydev);
+
+int phy_package_address(struct phy_device *phydev, unsigned int addr_offset);
+
+int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv);
+
+#endif /* __PHYLIB_INTERNAL_H */
diff --git a/drivers/net/phy/phylib.h b/drivers/net/phy/phylib.h
new file mode 100644
index 000000000000..c15484a805b3
--- /dev/null
+++ b/drivers/net/phy/phylib.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * phylib header
+ */
+
+#ifndef __PHYLIB_H
+#define __PHYLIB_H
+
+struct device_node;
+struct phy_device;
+
+struct device_node *phy_package_get_node(struct phy_device *phydev);
+void *phy_package_get_priv(struct phy_device *phydev);
+int __phy_package_read(struct phy_device *phydev, unsigned int addr_offset,
+ u32 regnum);
+int __phy_package_write(struct phy_device *phydev, unsigned int addr_offset,
+ u32 regnum, u16 val);
+int __phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum);
+int __phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val);
+bool phy_package_init_once(struct phy_device *phydev);
+bool phy_package_probe_once(struct phy_device *phydev);
+int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size);
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size);
+void phy_package_leave(struct phy_device *phydev);
+int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
+ int base_addr, size_t priv_size);
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size);
+
+#endif /* __PHYLIB_H */
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index b00a315de060..69ca765485db 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -20,6 +20,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include "phy-caps.h"
#include "sfp.h"
#include "swphy.h"
@@ -71,8 +72,6 @@ struct phylink {
struct gpio_desc *link_gpio;
unsigned int link_irq;
struct timer_list link_poll;
- void (*get_fixed_state)(struct net_device *dev,
- struct phylink_link_state *s);
struct mutex state_mutex;
struct phylink_link_state phy_state;
@@ -82,12 +81,14 @@ struct phylink {
unsigned int pcs_state;
bool link_failed;
+ bool major_config_failed;
bool mac_supports_eee_ops;
bool mac_supports_eee;
bool phy_enable_tx_lpi;
bool mac_enable_tx_lpi;
bool mac_tx_clk_stop;
u32 mac_tx_lpi_timer;
+ u8 mac_rx_clk_stop_blocked;
struct sfp_bus *sfp_bus;
bool sfp_may_have_phy;
@@ -291,6 +292,61 @@ static int phylink_interface_max_speed(phy_interface_t interface)
return SPEED_UNKNOWN;
}
+static struct {
+ unsigned long mask;
+ int speed;
+ unsigned int duplex;
+ unsigned int caps_bit;
+} phylink_caps_params[] = {
+ { MAC_400000FD, SPEED_400000, DUPLEX_FULL, BIT(LINK_CAPA_400000FD) },
+ { MAC_200000FD, SPEED_200000, DUPLEX_FULL, BIT(LINK_CAPA_200000FD) },
+ { MAC_100000FD, SPEED_100000, DUPLEX_FULL, BIT(LINK_CAPA_100000FD) },
+ { MAC_56000FD, SPEED_56000, DUPLEX_FULL, BIT(LINK_CAPA_56000FD) },
+ { MAC_50000FD, SPEED_50000, DUPLEX_FULL, BIT(LINK_CAPA_50000FD) },
+ { MAC_40000FD, SPEED_40000, DUPLEX_FULL, BIT(LINK_CAPA_40000FD) },
+ { MAC_25000FD, SPEED_25000, DUPLEX_FULL, BIT(LINK_CAPA_25000FD) },
+ { MAC_20000FD, SPEED_20000, DUPLEX_FULL, BIT(LINK_CAPA_20000FD) },
+ { MAC_10000FD, SPEED_10000, DUPLEX_FULL, BIT(LINK_CAPA_10000FD) },
+ { MAC_5000FD, SPEED_5000, DUPLEX_FULL, BIT(LINK_CAPA_5000FD) },
+ { MAC_2500FD, SPEED_2500, DUPLEX_FULL, BIT(LINK_CAPA_2500FD) },
+ { MAC_1000FD, SPEED_1000, DUPLEX_FULL, BIT(LINK_CAPA_1000FD) },
+ { MAC_1000HD, SPEED_1000, DUPLEX_HALF, BIT(LINK_CAPA_1000HD) },
+ { MAC_100FD, SPEED_100, DUPLEX_FULL, BIT(LINK_CAPA_100FD) },
+ { MAC_100HD, SPEED_100, DUPLEX_HALF, BIT(LINK_CAPA_100HD) },
+ { MAC_10FD, SPEED_10, DUPLEX_FULL, BIT(LINK_CAPA_10FD) },
+ { MAC_10HD, SPEED_10, DUPLEX_HALF, BIT(LINK_CAPA_10HD) },
+};
+
+/**
+ * phylink_caps_to_link_caps() - Convert a set of MAC capabilities LINK caps
+ * @caps: A set of MAC capabilities
+ *
+ * Returns: The corresponding set of LINK_CAPA as defined in phy-caps.h
+ */
+static unsigned long phylink_caps_to_link_caps(unsigned long caps)
+{
+ unsigned long link_caps = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phylink_caps_params); i++)
+ if (caps & phylink_caps_params[i].mask)
+ link_caps |= phylink_caps_params[i].caps_bit;
+
+ return link_caps;
+}
+
+static unsigned long phylink_link_caps_to_mac_caps(unsigned long link_caps)
+{
+ unsigned long caps = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phylink_caps_params); i++)
+ if (link_caps & phylink_caps_params[i].caps_bit)
+ caps |= phylink_caps_params[i].mask;
+
+ return caps;
+}
+
/**
* phylink_caps_to_linkmodes() - Convert capabilities to ethtool link modes
* @linkmodes: ethtool linkmode mask (must be already initialised)
@@ -302,172 +358,17 @@ static int phylink_interface_max_speed(phy_interface_t interface)
static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
unsigned long caps)
{
+ unsigned long link_caps = phylink_caps_to_link_caps(caps);
+
if (caps & MAC_SYM_PAUSE)
__set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes);
if (caps & MAC_ASYM_PAUSE)
__set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes);
- if (caps & MAC_10HD) {
- __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Half_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT, linkmodes);
- }
-
- if (caps & MAC_10FD) {
- __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_100HD) {
- __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT, linkmodes);
- }
-
- if (caps & MAC_100FD) {
- __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_1000HD)
- __set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, linkmodes);
-
- if (caps & MAC_1000FD) {
- __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_2500FD) {
- __set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_5000FD)
- __set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, linkmodes);
-
- if (caps & MAC_10000FD) {
- __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_25000FD) {
- __set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_40000FD) {
- __set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_50000FD) {
- __set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_56000FD) {
- __set_bit(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_100000FD) {
- __set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseDR_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_200000FD) {
- __set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_400000FD) {
- __set_bit(ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes);
- }
+ phy_caps_linkmodes(link_caps, linkmodes);
}
-static struct {
- unsigned long mask;
- int speed;
- unsigned int duplex;
-} phylink_caps_params[] = {
- { MAC_400000FD, SPEED_400000, DUPLEX_FULL },
- { MAC_200000FD, SPEED_200000, DUPLEX_FULL },
- { MAC_100000FD, SPEED_100000, DUPLEX_FULL },
- { MAC_56000FD, SPEED_56000, DUPLEX_FULL },
- { MAC_50000FD, SPEED_50000, DUPLEX_FULL },
- { MAC_40000FD, SPEED_40000, DUPLEX_FULL },
- { MAC_25000FD, SPEED_25000, DUPLEX_FULL },
- { MAC_20000FD, SPEED_20000, DUPLEX_FULL },
- { MAC_10000FD, SPEED_10000, DUPLEX_FULL },
- { MAC_5000FD, SPEED_5000, DUPLEX_FULL },
- { MAC_2500FD, SPEED_2500, DUPLEX_FULL },
- { MAC_1000FD, SPEED_1000, DUPLEX_FULL },
- { MAC_1000HD, SPEED_1000, DUPLEX_HALF },
- { MAC_100FD, SPEED_100, DUPLEX_FULL },
- { MAC_100HD, SPEED_100, DUPLEX_HALF },
- { MAC_10FD, SPEED_10, DUPLEX_FULL },
- { MAC_10HD, SPEED_10, DUPLEX_HALF },
-};
-
/**
* phylink_limit_mac_speed - limit the phylink_config to a maximum speed
* @config: pointer to a &struct phylink_config
@@ -523,86 +424,12 @@ static unsigned long phylink_get_capabilities(phy_interface_t interface,
unsigned long mac_capabilities,
int rate_matching)
{
+ unsigned long link_caps = phy_caps_from_interface(interface);
int max_speed = phylink_interface_max_speed(interface);
unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
unsigned long matched_caps = 0;
- switch (interface) {
- case PHY_INTERFACE_MODE_USXGMII:
- caps |= MAC_10000FD | MAC_5000FD;
- fallthrough;
-
- case PHY_INTERFACE_MODE_10G_QXGMII:
- caps |= MAC_2500FD;
- fallthrough;
-
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_PSGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_QUSGMII:
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_GMII:
- caps |= MAC_1000HD | MAC_1000FD;
- fallthrough;
-
- case PHY_INTERFACE_MODE_REVRMII:
- case PHY_INTERFACE_MODE_RMII:
- case PHY_INTERFACE_MODE_SMII:
- case PHY_INTERFACE_MODE_REVMII:
- case PHY_INTERFACE_MODE_MII:
- caps |= MAC_10HD | MAC_10FD;
- fallthrough;
-
- case PHY_INTERFACE_MODE_100BASEX:
- caps |= MAC_100HD | MAC_100FD;
- break;
-
- case PHY_INTERFACE_MODE_TBI:
- case PHY_INTERFACE_MODE_MOCA:
- case PHY_INTERFACE_MODE_RTBI:
- case PHY_INTERFACE_MODE_1000BASEX:
- caps |= MAC_1000HD;
- fallthrough;
- case PHY_INTERFACE_MODE_1000BASEKX:
- case PHY_INTERFACE_MODE_TRGMII:
- caps |= MAC_1000FD;
- break;
-
- case PHY_INTERFACE_MODE_2500BASEX:
- caps |= MAC_2500FD;
- break;
-
- case PHY_INTERFACE_MODE_5GBASER:
- caps |= MAC_5000FD;
- break;
-
- case PHY_INTERFACE_MODE_XGMII:
- case PHY_INTERFACE_MODE_RXAUI:
- case PHY_INTERFACE_MODE_XAUI:
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_10GKR:
- caps |= MAC_10000FD;
- break;
-
- case PHY_INTERFACE_MODE_25GBASER:
- caps |= MAC_25000FD;
- break;
-
- case PHY_INTERFACE_MODE_XLGMII:
- caps |= MAC_40000FD;
- break;
-
- case PHY_INTERFACE_MODE_INTERNAL:
- caps |= ~0;
- break;
-
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_MAX:
- break;
- }
+ caps |= phylink_link_caps_to_mac_caps(link_caps);
switch (rate_matching) {
case RATE_MATCH_OPEN_LOOP:
@@ -801,12 +628,26 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported,
return phylink_validate_mac_and_pcs(pl, supported, state);
}
+static void phylink_fill_fixedlink_supported(unsigned long *supported)
+{
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, supported);
+}
+
static int phylink_parse_fixedlink(struct phylink *pl,
const struct fwnode_handle *fwnode)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(match) = { 0, };
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ const struct link_capabilities *c;
struct fwnode_handle *fixed_node;
- const struct phy_setting *s;
struct gpio_desc *desc;
u32 speed;
int ret;
@@ -874,12 +715,16 @@ static int phylink_parse_fixedlink(struct phylink *pl,
phylink_warn(pl, "fixed link specifies half duplex for %dMbps link?\n",
pl->link_config.speed);
- linkmode_fill(pl->supported);
+ linkmode_zero(pl->supported);
+ phylink_fill_fixedlink_supported(pl->supported);
+
linkmode_copy(pl->link_config.advertising, pl->supported);
phylink_validate(pl, pl->supported, &pl->link_config);
- s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex,
- pl->supported, true);
+ c = phy_caps_lookup(pl->link_config.speed, pl->link_config.duplex,
+ pl->supported, true);
+ if (c)
+ linkmode_and(match, pl->supported, c->linkmodes);
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mask);
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, mask);
@@ -888,9 +733,10 @@ static int phylink_parse_fixedlink(struct phylink *pl,
phylink_set(pl->supported, MII);
- if (s) {
- __set_bit(s->bit, pl->supported);
- __set_bit(s->bit, pl->link_config.lp_advertising);
+ if (c) {
+ linkmode_or(pl->supported, pl->supported, match);
+ linkmode_or(pl->link_config.lp_advertising,
+ pl->link_config.lp_advertising, match);
} else {
phylink_warn(pl, "fixed link %s duplex %dMbps not recognised\n",
pl->link_config.duplex == DUPLEX_FULL ? "full" : "half",
@@ -1073,6 +919,18 @@ static void phylink_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
pcs->ops->pcs_link_up(pcs, neg_mode, interface, speed, duplex);
}
+static void phylink_pcs_disable_eee(struct phylink_pcs *pcs)
+{
+ if (pcs && pcs->ops->pcs_disable_eee)
+ pcs->ops->pcs_disable_eee(pcs);
+}
+
+static void phylink_pcs_enable_eee(struct phylink_pcs *pcs)
+{
+ if (pcs && pcs->ops->pcs_enable_eee)
+ pcs->ops->pcs_enable_eee(pcs);
+}
+
/* Query inband for a specific interface mode, asking the MAC for the
* PCS which will be used to handle the interface mode.
*/
@@ -1353,19 +1211,22 @@ static void phylink_major_config(struct phylink *pl, bool restart,
struct phylink_pcs *pcs = NULL;
bool pcs_changed = false;
unsigned int rate_kbd;
- unsigned int neg_mode;
int err;
phylink_dbg(pl, "major config, requested %s/%s\n",
phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(state->interface));
+ pl->major_config_failed = false;
+
if (pl->mac_ops->mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
if (IS_ERR(pcs)) {
phylink_err(pl,
"mac_select_pcs unexpectedly failed: %pe\n",
pcs);
+
+ pl->major_config_failed = true;
return;
}
@@ -1387,6 +1248,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
if (err < 0) {
phylink_err(pl, "mac_prepare failed: %pe\n",
ERR_PTR(err));
+ pl->major_config_failed = true;
return;
}
}
@@ -1410,23 +1272,27 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_mac_config(pl, state);
- if (pl->pcs)
- phylink_pcs_post_config(pl->pcs, state->interface);
+ if (pl->pcs) {
+ err = phylink_pcs_post_config(pl->pcs, state->interface);
+ if (err < 0) {
+ phylink_err(pl, "pcs_post_config failed: %pe\n",
+ ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
+ }
if (pl->pcs_state == PCS_STATE_STARTING || pcs_changed)
phylink_pcs_enable(pl->pcs);
- neg_mode = pl->act_link_an_mode;
- if (pl->pcs && pl->pcs->neg_mode)
- neg_mode = pl->pcs_neg_mode;
-
- err = phylink_pcs_config(pl->pcs, neg_mode, state,
+ err = phylink_pcs_config(pl->pcs, pl->pcs_neg_mode, state,
!!(pl->link_config.pause & MLO_PAUSE_AN));
- if (err < 0)
- phylink_err(pl, "pcs_config failed: %pe\n",
- ERR_PTR(err));
- else if (err > 0)
+ if (err < 0) {
+ phylink_err(pl, "pcs_config failed: %pe\n", ERR_PTR(err));
+ pl->major_config_failed = true;
+ } else if (err > 0) {
restart = true;
+ }
if (restart)
phylink_pcs_an_restart(pl);
@@ -1434,16 +1300,22 @@ static void phylink_major_config(struct phylink *pl, bool restart,
if (pl->mac_ops->mac_finish) {
err = pl->mac_ops->mac_finish(pl->config, pl->act_link_an_mode,
state->interface);
- if (err < 0)
+ if (err < 0) {
phylink_err(pl, "mac_finish failed: %pe\n",
ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
}
if (pl->phydev && pl->phy_ib_mode) {
err = phy_config_inband(pl->phydev, pl->phy_ib_mode);
- if (err < 0)
+ if (err < 0) {
phylink_err(pl, "phy_config_inband: %pe\n",
ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
}
if (pl->sfp_bus) {
@@ -1463,7 +1335,6 @@ static void phylink_major_config(struct phylink *pl, bool restart,
*/
static int phylink_change_inband_advert(struct phylink *pl)
{
- unsigned int neg_mode;
int ret;
if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
@@ -1479,15 +1350,11 @@ static int phylink_change_inband_advert(struct phylink *pl)
phylink_pcs_neg_mode(pl, pl->pcs, pl->link_config.interface,
pl->link_config.advertising);
- neg_mode = pl->act_link_an_mode;
- if (pl->pcs->neg_mode)
- neg_mode = pl->pcs_neg_mode;
-
/* Modern PCS-based method; update the advert at the PCS, and
* restart negotiation if the pcs_config() helper indicates that
* the programmed advertisement has changed.
*/
- ret = phylink_pcs_config(pl->pcs, neg_mode, &pl->link_config,
+ ret = phylink_pcs_config(pl->pcs, pl->pcs_neg_mode, &pl->link_config,
!!(pl->link_config.pause & MLO_PAUSE_AN));
if (ret < 0)
return ret;
@@ -1511,13 +1378,7 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
state->an_complete = 0;
state->link = 1;
- pcs = pl->pcs;
- if (!pcs || pcs->neg_mode)
- autoneg = pl->pcs_neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED;
- else
- autoneg = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- state->advertising);
-
+ autoneg = pl->pcs_neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED;
if (autoneg) {
state->speed = SPEED_UNKNOWN;
state->duplex = DUPLEX_UNKNOWN;
@@ -1528,6 +1389,7 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
state->pause = pl->link_config.pause;
}
+ pcs = pl->pcs;
if (pcs)
pcs->ops->pcs_get_state(pcs, pl->pcs_neg_mode, state);
else
@@ -1601,6 +1463,8 @@ static void phylink_deactivate_lpi(struct phylink *pl)
phylink_dbg(pl, "disabling LPI\n");
pl->mac_ops->mac_disable_tx_lpi(pl->config);
+
+ phylink_pcs_disable_eee(pl->pcs);
}
}
@@ -1617,20 +1481,24 @@ static void phylink_activate_lpi(struct phylink *pl)
phylink_dbg(pl, "LPI timer %uus, tx clock stop %u\n",
pl->mac_tx_lpi_timer, pl->mac_tx_clk_stop);
+ phylink_pcs_enable_eee(pl->pcs);
+
err = pl->mac_ops->mac_enable_tx_lpi(pl->config, pl->mac_tx_lpi_timer,
pl->mac_tx_clk_stop);
- if (!err)
- pl->mac_enable_tx_lpi = true;
- else
+ if (err) {
+ phylink_pcs_disable_eee(pl->pcs);
phylink_err(pl, "%ps() failed: %pe\n",
pl->mac_ops->mac_enable_tx_lpi, ERR_PTR(err));
+ return;
+ }
+
+ pl->mac_enable_tx_lpi = true;
}
static void phylink_link_up(struct phylink *pl,
struct phylink_link_state link_state)
{
struct net_device *ndev = pl->netdev;
- unsigned int neg_mode;
int speed, duplex;
bool rx_pause;
@@ -1661,11 +1529,7 @@ static void phylink_link_up(struct phylink *pl,
pl->cur_interface = link_state.interface;
- neg_mode = pl->act_link_an_mode;
- if (pl->pcs && pl->pcs->neg_mode)
- neg_mode = pl->pcs_neg_mode;
-
- phylink_pcs_link_up(pl->pcs, neg_mode, pl->cur_interface, speed,
+ phylink_pcs_link_up(pl->pcs, pl->pcs_neg_mode, pl->cur_interface, speed,
duplex);
pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->act_link_an_mode,
@@ -1795,6 +1659,12 @@ static void phylink_resolve(struct work_struct *w)
}
}
+ /* If configuration of the interface failed, force the link down
+ * until we get a successful configuration.
+ */
+ if (pl->major_config_failed)
+ link_state.link = false;
+
if (link_state.link != cur_link_state) {
pl->old_link_state = link_state.link;
if (!link_state.link)
@@ -1879,21 +1749,20 @@ static int phylink_register_sfp(struct phylink *pl,
int phylink_set_fixed_link(struct phylink *pl,
const struct phylink_link_state *state)
{
- const struct phy_setting *s;
+ const struct link_capabilities *c;
unsigned long *adv;
if (pl->cfg_link_an_mode != MLO_AN_PHY || !state ||
!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
return -EINVAL;
- s = phy_lookup_setting(state->speed, state->duplex,
- pl->supported, true);
- if (!s)
+ c = phy_caps_lookup(state->speed, state->duplex,
+ pl->supported, true);
+ if (!c)
return -EINVAL;
adv = pl->link_config.advertising;
- linkmode_zero(adv);
- linkmode_set_bit(s->bit, adv);
+ linkmode_and(adv, pl->supported, c->linkmodes);
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv);
pl->link_config.speed = state->speed;
@@ -1957,8 +1826,7 @@ struct phylink *phylink_create(struct phylink_config *config,
return ERR_PTR(-EINVAL);
}
- pl->mac_supports_eee_ops = mac_ops->mac_disable_tx_lpi &&
- mac_ops->mac_enable_tx_lpi;
+ pl->mac_supports_eee_ops = phylink_mac_implements_lpi(mac_ops);
pl->mac_supports_eee = pl->mac_supports_eee_ops &&
pl->config->lpi_capabilities &&
!phy_interface_empty(pl->config->lpi_interfaces);
@@ -2046,7 +1914,7 @@ bool phylink_expects_phy(struct phylink *pl)
{
if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
(pl->cfg_link_an_mode == MLO_AN_INBAND &&
- phy_interface_mode_is_8023z(pl->link_config.interface)))
+ phy_interface_mode_is_8023z(pl->link_interface)))
return false;
return true;
}
@@ -2595,6 +2463,64 @@ void phylink_stop(struct phylink *pl)
EXPORT_SYMBOL_GPL(phylink_stop);
/**
+ * phylink_rx_clk_stop_block() - block PHY ability to stop receive clock in LPI
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Disable the PHY's ability to stop the receive clock while the receive path
+ * is in EEE LPI state, until the number of calls to phylink_rx_clk_stop_block()
+ * are balanced by calls to phylink_rx_clk_stop_unblock().
+ */
+void phylink_rx_clk_stop_block(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ if (pl->mac_rx_clk_stop_blocked == U8_MAX) {
+ phylink_warn(pl, "%s called too many times - ignoring\n",
+ __func__);
+ dump_stack();
+ return;
+ }
+
+ /* Disable PHY receive clock stop if this is the first time this
+ * function has been called and clock-stop was previously enabled.
+ */
+ if (pl->mac_rx_clk_stop_blocked++ == 0 &&
+ pl->mac_supports_eee_ops && pl->phydev &&
+ pl->config->eee_rx_clk_stop_enable)
+ phy_eee_rx_clock_stop(pl->phydev, false);
+}
+EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_block);
+
+/**
+ * phylink_rx_clk_stop_unblock() - unblock PHY ability to stop receive clock
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * All calls to phylink_rx_clk_stop_block() must be balanced with a
+ * corresponding call to phylink_rx_clk_stop_unblock() to restore the PHYs
+ * ability to stop the receive clock when the receive path is in EEE LPI mode.
+ */
+void phylink_rx_clk_stop_unblock(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ if (pl->mac_rx_clk_stop_blocked == 0) {
+ phylink_warn(pl, "%s called too many times - ignoring\n",
+ __func__);
+ dump_stack();
+ return;
+ }
+
+ /* Re-enable PHY receive clock stop if the number of unblocks matches
+ * the number of calls to the block function above.
+ */
+ if (--pl->mac_rx_clk_stop_blocked == 0 &&
+ pl->mac_supports_eee_ops && pl->phydev &&
+ pl->config->eee_rx_clk_stop_enable)
+ phy_eee_rx_clock_stop(pl->phydev, true);
+}
+EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_unblock);
+
+/**
* phylink_suspend() - handle a network device suspend event
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
@@ -2639,6 +2565,31 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
EXPORT_SYMBOL_GPL(phylink_suspend);
/**
+ * phylink_prepare_resume() - prepare to resume a network device
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Optional, but if called must be called prior to phylink_resume().
+ *
+ * Prepare to resume a network device, preparing the PHY as necessary.
+ */
+void phylink_prepare_resume(struct phylink *pl)
+{
+ struct phy_device *phydev = pl->phydev;
+
+ ASSERT_RTNL();
+
+ /* IEEE 802.3 22.2.4.1.5 allows PHYs to stop their receive clock
+ * when PDOWN is set. However, some MACs require RXC to be running
+ * in order to resume. If the MAC requires RXC, and we have a PHY,
+ * then resume the PHY. Note that 802.3 allows PHYs 500ms before
+ * the clock meets requirements. We do not implement this delay.
+ */
+ if (pl->config->mac_requires_rxc && phydev && phydev->suspended)
+ phy_resume(phydev);
+}
+EXPORT_SYMBOL_GPL(phylink_prepare_resume);
+
+/**
* phylink_resume() - handle a network device resume event
* @pl: a pointer to a &struct phylink returned from phylink_create()
*
@@ -2854,8 +2805,8 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
const struct ethtool_link_ksettings *kset)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
+ const struct link_capabilities *c;
struct phylink_link_state config;
- const struct phy_setting *s;
ASSERT_RTNL();
@@ -2898,23 +2849,23 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
/* Autonegotiation disabled, select a suitable speed and
* duplex.
*/
- s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
- pl->supported, false);
- if (!s)
+ c = phy_caps_lookup(kset->base.speed, kset->base.duplex,
+ pl->supported, false);
+ if (!c)
return -EINVAL;
/* If we have a fixed link, refuse to change link parameters.
* If the link parameters match, accept them but do nothing.
*/
if (pl->req_link_an_mode == MLO_AN_FIXED) {
- if (s->speed != pl->link_config.speed ||
- s->duplex != pl->link_config.duplex)
+ if (c->speed != pl->link_config.speed ||
+ c->duplex != pl->link_config.duplex)
return -EINVAL;
return 0;
}
- config.speed = s->speed;
- config.duplex = s->duplex;
+ config.speed = c->speed;
+ config.duplex = c->duplex;
break;
case AUTONEG_ENABLE:
@@ -3160,24 +3111,6 @@ int phylink_get_eee_err(struct phylink *pl)
EXPORT_SYMBOL_GPL(phylink_get_eee_err);
/**
- * phylink_init_eee() - init and check the EEE features
- * @pl: a pointer to a &struct phylink returned from phylink_create()
- * @clk_stop_enable: allow PHY to stop receive clock
- *
- * Must be called either with RTNL held or within mac_link_up()
- */
-int phylink_init_eee(struct phylink *pl, bool clk_stop_enable)
-{
- int ret = -EOPNOTSUPP;
-
- if (pl->phydev)
- ret = phy_init_eee(pl->phydev, clk_stop_enable);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(phylink_init_eee);
-
-/**
* phylink_ethtool_get_eee() - read the energy efficient ethernet parameters
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @eee: a pointer to a &struct ethtool_keee for the read parameters
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 2ad8c2586d64..1af6b5ead74b 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -15,6 +15,7 @@
#include <linux/gpio/driver.h>
#include <linux/sfp.h>
+#include "../phylib.h"
#include "qcom.h"
#define QCA807X_CHIP_CONFIGURATION 0x1f
@@ -486,13 +487,13 @@ static int qca807x_read_status(struct phy_device *phydev)
static int qca807x_phy_package_probe_once(struct phy_device *phydev)
{
- struct phy_package_shared *shared = phydev->shared;
- struct qca807x_shared_priv *priv = shared->priv;
+ struct qca807x_shared_priv *priv = phy_package_get_priv(phydev);
+ struct device_node *np = phy_package_get_node(phydev);
unsigned int tx_drive_strength;
const char *package_mode_name;
/* Default to 600mw if not defined */
- if (of_property_read_u32(shared->np, "qcom,tx-drive-strength-milliwatt",
+ if (of_property_read_u32(np, "qcom,tx-drive-strength-milliwatt",
&tx_drive_strength))
tx_drive_strength = 600;
@@ -541,7 +542,7 @@ static int qca807x_phy_package_probe_once(struct phy_device *phydev)
}
priv->package_mode = PHY_INTERFACE_MODE_NA;
- if (!of_property_read_string(shared->np, "qcom,package-mode",
+ if (!of_property_read_string(np, "qcom,package-mode",
&package_mode_name)) {
if (!strcasecmp(package_mode_name,
phy_modes(PHY_INTERFACE_MODE_PSGMII)))
@@ -558,8 +559,7 @@ static int qca807x_phy_package_probe_once(struct phy_device *phydev)
static int qca807x_phy_package_config_init_once(struct phy_device *phydev)
{
- struct phy_package_shared *shared = phydev->shared;
- struct qca807x_shared_priv *priv = shared->priv;
+ struct qca807x_shared_priv *priv = phy_package_get_priv(phydev);
int val, ret;
/* Make sure PHY follow PHY package mode if enforced */
@@ -708,7 +708,6 @@ static int qca807x_probe(struct phy_device *phydev)
struct device_node *node = phydev->mdio.dev.of_node;
struct qca807x_shared_priv *shared_priv;
struct device *dev = &phydev->mdio.dev;
- struct phy_package_shared *shared;
struct qca807x_priv *priv;
int ret;
@@ -722,8 +721,7 @@ static int qca807x_probe(struct phy_device *phydev)
return ret;
}
- shared = phydev->shared;
- shared_priv = shared->priv;
+ shared_priv = phy_package_get_priv(phydev);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
diff --git a/drivers/net/phy/qt2025.rs b/drivers/net/phy/qt2025.rs
index 1ab065798175..0b9400dcb4c1 100644
--- a/drivers/net/phy/qt2025.rs
+++ b/drivers/net/phy/qt2025.rs
@@ -26,7 +26,7 @@ kernel::module_phy_driver! {
phy::DeviceId::new_with_driver::<PhyQT2025>(),
],
name: "qt2025_phy",
- author: "FUJITA Tomonori <fujita.tomonori@gmail.com>",
+ authors: ["FUJITA Tomonori <fujita.tomonori@gmail.com>"],
description: "AMCC QT2025 PHY driver",
license: "GPL",
firmware: ["qt2025-2.0.3.3.fw"],
@@ -41,7 +41,7 @@ impl Driver for PhyQT2025 {
fn probe(dev: &mut phy::Device) -> Result<()> {
// Check the hardware revision code.
- // Only 0x3b works with this driver and firmware.
+ // Only 0xb3 works with this driver and firmware.
let hw_rev = dev.read(C45::new(Mmd::PMAPMD, 0xd001))?;
if (hw_rev >> 8) != 0xb3 {
return Err(code::ENODEV);
diff --git a/drivers/net/phy/realtek/Kconfig b/drivers/net/phy/realtek/Kconfig
index 31935f147d87..b05c2a1e9024 100644
--- a/drivers/net/phy/realtek/Kconfig
+++ b/drivers/net/phy/realtek/Kconfig
@@ -4,8 +4,12 @@ config REALTEK_PHY
help
Currently supports RTL821x/RTL822x and fast ethernet PHYs
+if REALTEK_PHY
+
config REALTEK_PHY_HWMON
- def_bool REALTEK_PHY && HWMON
- depends on !(REALTEK_PHY=y && HWMON=m)
+ bool "HWMON support for Realtek PHYs"
+ depends on HWMON && !(REALTEK_PHY=y && HWMON=m)
help
Optional hwmon support for the temperature sensor
+
+endif # REALTEK_PHY
diff --git a/drivers/net/phy/realtek/realtek_hwmon.c b/drivers/net/phy/realtek/realtek_hwmon.c
index 1ecb410bb941..ac96e2d1ebe8 100644
--- a/drivers/net/phy/realtek/realtek_hwmon.c
+++ b/drivers/net/phy/realtek/realtek_hwmon.c
@@ -63,16 +63,11 @@ static const struct hwmon_chip_info rtl822x_hwmon_chip_info = {
int rtl822x_hwmon_init(struct phy_device *phydev)
{
struct device *hwdev, *dev = &phydev->mdio.dev;
- const char *name;
/* Ensure over-temp alarm is reset. */
phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSALRM, 3);
- name = devm_hwmon_sanitize_name(dev, dev_name(dev));
- if (IS_ERR(name))
- return PTR_ERR(name);
-
- hwdev = devm_hwmon_device_register_with_info(dev, name, phydev,
+ hwdev = devm_hwmon_device_register_with_info(dev, NULL, phydev,
&rtl822x_hwmon_chip_info,
NULL);
return PTR_ERR_OR_ZERO(hwdev);
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index 572a933636b0..893c82479671 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/clk.h>
+#include <linux/string_choices.h>
#include "realtek.h"
@@ -32,6 +33,9 @@
#define RTL8211F_PHYCR1 0x18
#define RTL8211F_PHYCR2 0x19
+#define RTL8211F_CLKOUT_EN BIT(0)
+#define RTL8211F_PHYCR2_PHY_EEE_ENABLE BIT(5)
+
#define RTL8211F_INSR 0x1d
#define RTL8211F_LEDCR 0x10
@@ -54,8 +58,6 @@
#define RTL8211E_TX_DELAY BIT(12)
#define RTL8211E_RX_DELAY BIT(11)
-#define RTL8211F_CLKOUT_EN BIT(0)
-
#define RTL8201F_ISR 0x1e
#define RTL8201F_ISR_ANERR BIT(15)
#define RTL8201F_ISR_DUPLEX BIT(13)
@@ -78,9 +80,7 @@
/* RTL822X_VND2_XXXXX registers are only accessible when phydev->is_c45
* is set, they cannot be accessed by C45-over-C22.
*/
-#define RTL822X_VND2_GBCR 0xa412
-
-#define RTL822X_VND2_GANLPAR 0xa414
+#define RTL822X_VND2_C22_REG(reg) (0xa400 + 2 * (reg))
#define RTL8366RB_POWER_SAVE 0x15
#define RTL8366RB_POWER_SAVE_ON BIT(12)
@@ -95,6 +95,16 @@
#define RTL_VND2_PHYSR_MASTER BIT(11)
#define RTL_VND2_PHYSR_SPEED_MASK (RTL_VND2_PHYSR_SPEEDL | RTL_VND2_PHYSR_SPEEDH)
+#define RTL_MDIO_PCS_EEE_ABLE 0xa5c4
+#define RTL_MDIO_AN_EEE_ADV 0xa5d0
+#define RTL_MDIO_AN_EEE_LPABLE 0xa5d2
+#define RTL_MDIO_AN_10GBT_CTRL 0xa5d4
+#define RTL_MDIO_AN_10GBT_STAT 0xa5d6
+#define RTL_MDIO_PMA_SPEED 0xa616
+#define RTL_MDIO_AN_EEE_LPABLE2 0xa6d0
+#define RTL_MDIO_AN_EEE_ADV2 0xa6d4
+#define RTL_MDIO_PCS_EEE_ABLE2 0xa6ec
+
#define RTL_GENERIC_PHYID 0x001cc800
#define RTL_8211FVD_PHYID 0x001cc878
#define RTL_8221B 0x001cc840
@@ -422,11 +432,11 @@ static int rtl8211f_config_init(struct phy_device *phydev)
} else if (ret) {
dev_dbg(dev,
"%s 2ns TX delay (and changing the value from pin-strapping RXD1 or the bootloader)\n",
- val_txdly ? "Enabling" : "Disabling");
+ str_enable_disable(val_txdly));
} else {
dev_dbg(dev,
"2ns TX delay was already %s (by pin-strapping RXD1 or bootloader configuration)\n",
- val_txdly ? "enabled" : "disabled");
+ str_enabled_disabled(val_txdly));
}
ret = phy_modify_paged_changed(phydev, 0xd08, 0x15, RTL8211F_RX_DELAY,
@@ -437,13 +447,19 @@ static int rtl8211f_config_init(struct phy_device *phydev)
} else if (ret) {
dev_dbg(dev,
"%s 2ns RX delay (and changing the value from pin-strapping RXD0 or the bootloader)\n",
- val_rxdly ? "Enabling" : "Disabling");
+ str_enable_disable(val_rxdly));
} else {
dev_dbg(dev,
"2ns RX delay was already %s (by pin-strapping RXD0 or bootloader configuration)\n",
- val_rxdly ? "enabled" : "disabled");
+ str_enabled_disabled(val_rxdly));
}
+ /* Disable PHY-mode EEE so LPI is passed to the MAC */
+ ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
+ RTL8211F_PHYCR2_PHY_EEE_ENABLE, 0);
+ if (ret)
+ return ret;
+
if (priv->has_phycr2) {
ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
RTL8211F_CLKOUT_EN, priv->phycr2);
@@ -734,29 +750,31 @@ static int rtlgen_read_status(struct phy_device *phydev)
return 0;
}
+static int rtlgen_read_vend2(struct phy_device *phydev, int regnum)
+{
+ return __mdiobus_c45_read(phydev->mdio.bus, 0, MDIO_MMD_VEND2, regnum);
+}
+
+static int rtlgen_write_vend2(struct phy_device *phydev, int regnum, u16 val)
+{
+ return __mdiobus_c45_write(phydev->mdio.bus, 0, MDIO_MMD_VEND2, regnum,
+ val);
+}
+
static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
{
int ret;
- if (devnum == MDIO_MMD_VEND2) {
- rtl821x_write_page(phydev, regnum >> 4);
- ret = __phy_read(phydev, 0x10 + ((regnum & 0xf) >> 1));
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) {
- rtl821x_write_page(phydev, 0xa5c);
- ret = __phy_read(phydev, 0x12);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
- rtl821x_write_page(phydev, 0xa5d);
- ret = __phy_read(phydev, 0x10);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE) {
- rtl821x_write_page(phydev, 0xa5d);
- ret = __phy_read(phydev, 0x11);
- rtl821x_write_page(phydev, 0);
- } else {
+ if (devnum == MDIO_MMD_VEND2)
+ ret = rtlgen_read_vend2(phydev, regnum);
+ else if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_PCS_EEE_ABLE);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_ADV);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_LPABLE);
+ else
ret = -EOPNOTSUPP;
- }
return ret;
}
@@ -766,17 +784,12 @@ static int rtlgen_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
{
int ret;
- if (devnum == MDIO_MMD_VEND2) {
- rtl821x_write_page(phydev, regnum >> 4);
- ret = __phy_write(phydev, 0x10 + ((regnum & 0xf) >> 1), val);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
- rtl821x_write_page(phydev, 0xa5d);
- ret = __phy_write(phydev, 0x10, val);
- rtl821x_write_page(phydev, 0);
- } else {
+ if (devnum == MDIO_MMD_VEND2)
+ ret = rtlgen_write_vend2(phydev, regnum, val);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV)
+ ret = rtlgen_write_vend2(phydev, regnum, RTL_MDIO_AN_EEE_ADV);
+ else
ret = -EOPNOTSUPP;
- }
return ret;
}
@@ -788,19 +801,12 @@ static int rtl822x_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
if (ret != -EOPNOTSUPP)
return ret;
- if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE2) {
- rtl821x_write_page(phydev, 0xa6e);
- ret = __phy_read(phydev, 0x16);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) {
- rtl821x_write_page(phydev, 0xa6d);
- ret = __phy_read(phydev, 0x12);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE2) {
- rtl821x_write_page(phydev, 0xa6d);
- ret = __phy_read(phydev, 0x10);
- rtl821x_write_page(phydev, 0);
- }
+ if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE2)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_PCS_EEE_ABLE2);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_ADV2);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE2)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_LPABLE2);
return ret;
}
@@ -813,11 +819,8 @@ static int rtl822x_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
if (ret != -EOPNOTSUPP)
return ret;
- if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) {
- rtl821x_write_page(phydev, 0xa6d);
- ret = __phy_write(phydev, 0x12, val);
- rtl821x_write_page(phydev, 0);
- }
+ if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2)
+ ret = rtlgen_write_vend2(phydev, RTL_MDIO_AN_EEE_ADV2, val);
return ret;
}
@@ -913,7 +916,7 @@ static int rtl822x_get_features(struct phy_device *phydev)
{
int val;
- val = phy_read_paged(phydev, 0xa61, 0x13);
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_MDIO_PMA_SPEED);
if (val < 0)
return val;
@@ -934,10 +937,10 @@ static int rtl822x_config_aneg(struct phy_device *phydev)
if (phydev->autoneg == AUTONEG_ENABLE) {
u16 adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising);
- ret = phy_modify_paged_changed(phydev, 0xa5d, 0x12,
- MDIO_AN_10GBT_CTRL_ADV2_5G |
- MDIO_AN_10GBT_CTRL_ADV5G,
- adv);
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2,
+ RTL_MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV2_5G |
+ MDIO_AN_10GBT_CTRL_ADV5G, adv);
if (ret < 0)
return ret;
}
@@ -981,7 +984,7 @@ static int rtl822x_read_status(struct phy_device *phydev)
!phydev->autoneg_complete)
return 0;
- lpadv = phy_read_paged(phydev, 0xa5d, 0x13);
+ lpadv = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_MDIO_AN_10GBT_STAT);
if (lpadv < 0)
return lpadv;
@@ -1028,7 +1031,8 @@ static int rtl822x_c45_config_aneg(struct phy_device *phydev)
val = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
/* Vendor register as C45 has no standardized support for 1000BaseT */
- ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, RTL822X_VND2_GBCR,
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2,
+ RTL822X_VND2_C22_REG(MII_CTRL1000),
ADVERTISE_1000FULL, val);
if (ret < 0)
return ret;
@@ -1045,7 +1049,7 @@ static int rtl822x_c45_read_status(struct phy_device *phydev)
/* Vendor register as C45 has no standardized support for 1000BaseT */
if (phydev->autoneg == AUTONEG_ENABLE && genphy_c45_aneg_done(phydev)) {
val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
- RTL822X_VND2_GANLPAR);
+ RTL822X_VND2_C22_REG(MII_STAT1000));
if (val < 0)
return val;
} else {
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 7dbcbf0a4ee2..347c1e0e94d9 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -234,6 +234,7 @@ struct sfp {
enum mdio_i2c_proto mdio_protocol;
struct phy_device *mod_phy;
const struct sff_data *type;
+ size_t i2c_max_block_size;
size_t i2c_block_size;
u32 max_power_mW;
@@ -385,7 +386,7 @@ static void sfp_fixup_rollball(struct sfp *sfp)
sfp->phy_t_retry = msecs_to_jiffies(1000);
}
-static void sfp_fixup_fs_2_5gt(struct sfp *sfp)
+static void sfp_fixup_rollball_wait4s(struct sfp *sfp)
{
sfp_fixup_rollball(sfp);
@@ -399,7 +400,7 @@ static void sfp_fixup_fs_2_5gt(struct sfp *sfp)
static void sfp_fixup_fs_10gt(struct sfp *sfp)
{
sfp_fixup_10gbaset_30m(sfp);
- sfp_fixup_fs_2_5gt(sfp);
+ sfp_fixup_rollball_wait4s(sfp);
}
static void sfp_fixup_halny_gsfp(struct sfp *sfp)
@@ -479,9 +480,10 @@ static const struct sfp_quirk sfp_quirks[] = {
// PHY.
SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
- // Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and
- // needs 4 sec wait before probing the PHY.
- SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt),
+ // Fiberstore SFP-2.5G-T and SFP-10GM-T uses Rollball protocol to talk
+ // to the PHY and needs 4 sec wait before probing the PHY.
+ SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_rollball_wait4s),
+ SFP_QUIRK_F("FS", "SFP-10GM-T", sfp_fixup_rollball_wait4s),
// Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
// NRZ in their EEPROM
@@ -515,6 +517,8 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
+ SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-D", sfp_quirk_2500basex),
+ SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-U", sfp_quirk_2500basex),
SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc),
SFP_QUIRK_F("Turris", "RTSFP-2.5G", sfp_fixup_rollball),
@@ -688,14 +692,71 @@ static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
return ret == ARRAY_SIZE(msgs) ? len : 0;
}
-static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
+static int sfp_smbus_byte_read(struct sfp *sfp, bool a2, u8 dev_addr,
+ void *buf, size_t len)
{
- if (!i2c_check_functionality(i2c, I2C_FUNC_I2C))
- return -EINVAL;
+ union i2c_smbus_data smbus_data;
+ u8 bus_addr = a2 ? 0x51 : 0x50;
+ u8 *data = buf;
+ int ret;
+
+ while (len) {
+ ret = i2c_smbus_xfer(sfp->i2c, bus_addr, 0,
+ I2C_SMBUS_READ, dev_addr,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret < 0)
+ return ret;
+
+ *data = smbus_data.byte;
+
+ len--;
+ data++;
+ dev_addr++;
+ }
+
+ return data - (u8 *)buf;
+}
+
+static int sfp_smbus_byte_write(struct sfp *sfp, bool a2, u8 dev_addr,
+ void *buf, size_t len)
+{
+ union i2c_smbus_data smbus_data;
+ u8 bus_addr = a2 ? 0x51 : 0x50;
+ u8 *data = buf;
+ int ret;
+ while (len) {
+ smbus_data.byte = *data;
+ ret = i2c_smbus_xfer(sfp->i2c, bus_addr, 0,
+ I2C_SMBUS_WRITE, dev_addr,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret)
+ return ret;
+
+ len--;
+ data++;
+ dev_addr++;
+ }
+
+ return 0;
+}
+
+static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
+{
sfp->i2c = i2c;
- sfp->read = sfp_i2c_read;
- sfp->write = sfp_i2c_write;
+
+ if (i2c_check_functionality(i2c, I2C_FUNC_I2C)) {
+ sfp->read = sfp_i2c_read;
+ sfp->write = sfp_i2c_write;
+ sfp->i2c_max_block_size = SFP_EEPROM_BLOCK_SIZE;
+ } else if (i2c_check_functionality(i2c, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ sfp->read = sfp_smbus_byte_read;
+ sfp->write = sfp_smbus_byte_write;
+ sfp->i2c_max_block_size = 1;
+ } else {
+ sfp->i2c = NULL;
+ return -EINVAL;
+ }
return 0;
}
@@ -1591,7 +1652,7 @@ static void sfp_hwmon_probe(struct work_struct *work)
*/
if (sfp->i2c_block_size < 2) {
dev_info(sfp->dev,
- "skipping hwmon device registration due to broken EEPROM\n");
+ "skipping hwmon device registration\n");
dev_info(sfp->dev,
"diagnostic EEPROM area cannot be read atomically to guarantee data coherency\n");
return;
@@ -2198,7 +2259,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
u8 check;
int ret;
- sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
+ sfp->i2c_block_size = sfp->i2c_max_block_size;
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
@@ -2938,7 +2999,6 @@ static struct sfp *sfp_alloc(struct device *dev)
return ERR_PTR(-ENOMEM);
sfp->dev = dev;
- sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
mutex_init(&sfp->sm_mutex);
mutex_init(&sfp->st_mutex);
@@ -3112,6 +3172,15 @@ static int sfp_probe(struct platform_device *pdev)
if (!sfp->sfp_bus)
return -ENOMEM;
+ if (sfp->i2c_max_block_size < 2)
+ dev_warn(sfp->dev,
+ "Please note:\n"
+ "This SFP cage is accessed via an SMBus only capable of single byte\n"
+ "transactions. Some features are disabled, other may be unreliable or\n"
+ "sporadically fail. Use with caution. There is nothing that the kernel\n"
+ "or community can do to fix it, the kernel will try best efforts. Please\n"
+ "verify any problems on hardware that supports multi-byte I2C transactions.\n");
+
sfp_debugfs_init(sfp);
return 0;
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 7c51daecf18e..2024d8ef36d9 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -64,15 +64,16 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
return 0;
}
-static int xgmiitorgmii_set_loopback(struct phy_device *phydev, bool enable)
+static int xgmiitorgmii_set_loopback(struct phy_device *phydev, bool enable,
+ int speed)
{
struct gmii2rgmii *priv = mdiodev_get_drvdata(&phydev->mdio);
int err;
if (priv->phy_drv->set_loopback)
- err = priv->phy_drv->set_loopback(phydev, enable);
+ err = priv->phy_drv->set_loopback(phydev, enable, speed);
else
- err = genphy_loopback(phydev, enable);
+ err = genphy_loopback(phydev, enable, speed);
if (err < 0)
return err;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 1420c4efa48e..53463767cc43 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -45,6 +45,7 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/unaligned.h>
+#include <net/netdev_lock.h>
#include <net/slhc_vj.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
@@ -1314,10 +1315,13 @@ static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int ppp_nl_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct ppp_config conf = {
.unit = -1,
.ifname_is_set = true,
@@ -1354,7 +1358,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
conf.ifname_is_set = false;
- err = ppp_dev_configure(src_net, dev, &conf);
+ err = ppp_dev_configure(link_net, dev, &conf);
out_unlock:
mutex_unlock(&ppp_mutex);
@@ -3500,6 +3504,10 @@ ppp_connect_channel(struct channel *pch, int unit)
ret = -ENOTCONN;
goto outl;
}
+ if (pch->chan->direct_xmit)
+ ppp->dev->priv_flags |= IFF_NO_QUEUE;
+ else
+ ppp->dev->priv_flags &= ~IFF_NO_QUEUE;
spin_unlock_bh(&pch->downl);
if (pch->file.hdrlen > ppp->file.hdrlen)
ppp->file.hdrlen = pch->file.hdrlen;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2ea4f4890d23..68e631718ab0 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -693,6 +693,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
po->chan.private = sk;
po->chan.ops = &pppoe_chan_ops;
+ po->chan.direct_xmit = true;
error = ppp_register_net_channel(dev_net(dev), &po->chan);
if (error) {
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 689687bd2574..5feaa70b5f47 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -465,6 +465,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.mtu -= PPTP_HEADER_OVERHEAD;
po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
+ po->chan.direct_xmit = true;
error = ppp_register_channel(&po->chan);
if (error) {
pr_err("PPTP: failed to register PPP channel (%d)\n", error);
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
deleted file mode 100644
index c3f8020571ad..000000000000
--- a/drivers/net/sb1000.c
+++ /dev/null
@@ -1,1179 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* sb1000.c: A General Instruments SB1000 driver for linux. */
-/*
- Written 1998 by Franco Venturi.
-
- Copyright 1998 by Franco Venturi.
- Copyright 1994,1995 by Donald Becker.
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This driver is for the General Instruments SB1000 (internal SURFboard)
-
- The author may be reached as fventuri@mediaone.net
-
-
- Changes:
-
- 981115 Steven Hirsch <shirsch@adelphia.net>
-
- Linus changed the timer interface. Should work on all recent
- development kernels.
-
- 980608 Steven Hirsch <shirsch@adelphia.net>
-
- Small changes to make it work with 2.1.x kernels. Hopefully,
- nothing major will change before official release of Linux 2.2.
-
- Merged with 2.2 - Alan Cox
-*/
-
-static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/if_cablemodem.h> /* for SIOGCM/SIOSCM stuff */
-#include <linux/in.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h> /* for udelay() */
-#include <linux/etherdevice.h>
-#include <linux/pnp.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <linux/uaccess.h>
-
-#ifdef SB1000_DEBUG
-static int sb1000_debug = SB1000_DEBUG;
-#else
-static const int sb1000_debug = 1;
-#endif
-
-static const int SB1000_IO_EXTENT = 8;
-/* SB1000 Maximum Receive Unit */
-static const int SB1000_MRU = 1500; /* octects */
-
-#define NPIDS 4
-struct sb1000_private {
- struct sk_buff *rx_skb[NPIDS];
- short rx_dlen[NPIDS];
- unsigned int rx_frames;
- short rx_error_count;
- short rx_error_dpc_count;
- unsigned char rx_session_id[NPIDS];
- unsigned char rx_frame_id[NPIDS];
- unsigned char rx_pkt_type[NPIDS];
-};
-
-/* prototypes for Linux interface */
-extern int sb1000_probe(struct net_device *dev);
-static int sb1000_open(struct net_device *dev);
-static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd);
-static netdev_tx_t sb1000_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t sb1000_interrupt(int irq, void *dev_id);
-static int sb1000_close(struct net_device *dev);
-
-
-/* SB1000 hardware routines to be used during open/configuration phases */
-static int card_wait_for_busy_clear(const int ioaddr[],
- const char* name);
-static int card_wait_for_ready(const int ioaddr[], const char* name,
- unsigned char in[]);
-static int card_send_command(const int ioaddr[], const char* name,
- const unsigned char out[], unsigned char in[]);
-
-/* SB1000 hardware routines to be used during frame rx interrupt */
-static int sb1000_wait_for_ready(const int ioaddr[], const char* name);
-static int sb1000_wait_for_ready_clear(const int ioaddr[],
- const char* name);
-static void sb1000_send_command(const int ioaddr[], const char* name,
- const unsigned char out[]);
-static void sb1000_read_status(const int ioaddr[], unsigned char in[]);
-static void sb1000_issue_read_command(const int ioaddr[],
- const char* name);
-
-/* SB1000 commands for open/configuration */
-static int sb1000_reset(const int ioaddr[], const char* name);
-static int sb1000_check_CRC(const int ioaddr[], const char* name);
-static inline int sb1000_start_get_set_command(const int ioaddr[],
- const char* name);
-static int sb1000_end_get_set_command(const int ioaddr[],
- const char* name);
-static int sb1000_activate(const int ioaddr[], const char* name);
-static int sb1000_get_firmware_version(const int ioaddr[],
- const char* name, unsigned char version[], int do_end);
-static int sb1000_get_frequency(const int ioaddr[], const char* name,
- int* frequency);
-static int sb1000_set_frequency(const int ioaddr[], const char* name,
- int frequency);
-static int sb1000_get_PIDs(const int ioaddr[], const char* name,
- short PID[]);
-static int sb1000_set_PIDs(const int ioaddr[], const char* name,
- const short PID[]);
-
-/* SB1000 commands for frame rx interrupt */
-static int sb1000_rx(struct net_device *dev);
-static void sb1000_error_dpc(struct net_device *dev);
-
-static const struct pnp_device_id sb1000_pnp_ids[] = {
- { "GIC1000", 0 },
- { "", 0 }
-};
-MODULE_DEVICE_TABLE(pnp, sb1000_pnp_ids);
-
-static const struct net_device_ops sb1000_netdev_ops = {
- .ndo_open = sb1000_open,
- .ndo_start_xmit = sb1000_start_xmit,
- .ndo_siocdevprivate = sb1000_siocdevprivate,
- .ndo_stop = sb1000_close,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int
-sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id)
-{
- struct net_device *dev;
- unsigned short ioaddr[2], irq;
- unsigned int serial_number;
- int error = -ENODEV;
- u8 addr[ETH_ALEN];
-
- if (pnp_device_attach(pdev) < 0)
- return -ENODEV;
- if (pnp_activate_dev(pdev) < 0)
- goto out_detach;
-
- if (!pnp_port_valid(pdev, 0) || !pnp_port_valid(pdev, 1))
- goto out_disable;
- if (!pnp_irq_valid(pdev, 0))
- goto out_disable;
-
- serial_number = pdev->card->serial;
-
- ioaddr[0] = pnp_port_start(pdev, 0);
- ioaddr[1] = pnp_port_start(pdev, 0);
-
- irq = pnp_irq(pdev, 0);
-
- if (!request_region(ioaddr[0], 16, "sb1000"))
- goto out_disable;
- if (!request_region(ioaddr[1], 16, "sb1000"))
- goto out_release_region0;
-
- dev = alloc_etherdev(sizeof(struct sb1000_private));
- if (!dev) {
- error = -ENOMEM;
- goto out_release_regions;
- }
-
-
- dev->base_addr = ioaddr[0];
- /* mem_start holds the second I/O address */
- dev->mem_start = ioaddr[1];
- dev->irq = irq;
-
- if (sb1000_debug > 0)
- printk(KERN_NOTICE "%s: sb1000 at (%#3.3lx,%#3.3lx), "
- "S/N %#8.8x, IRQ %d.\n", dev->name, dev->base_addr,
- dev->mem_start, serial_number, dev->irq);
-
- /*
- * The SB1000 is an rx-only cable modem device. The uplink is a modem
- * and we do not want to arp on it.
- */
- dev->flags = IFF_POINTOPOINT|IFF_NOARP;
-
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- if (sb1000_debug > 0)
- printk(KERN_NOTICE "%s", version);
-
- dev->netdev_ops = &sb1000_netdev_ops;
-
- /* hardware address is 0:0:serial_number */
- addr[0] = 0;
- addr[1] = 0;
- addr[2] = serial_number >> 24 & 0xff;
- addr[3] = serial_number >> 16 & 0xff;
- addr[4] = serial_number >> 8 & 0xff;
- addr[5] = serial_number >> 0 & 0xff;
- eth_hw_addr_set(dev, addr);
-
- pnp_set_drvdata(pdev, dev);
-
- error = register_netdev(dev);
- if (error)
- goto out_free_netdev;
- return 0;
-
- out_free_netdev:
- free_netdev(dev);
- out_release_regions:
- release_region(ioaddr[1], 16);
- out_release_region0:
- release_region(ioaddr[0], 16);
- out_disable:
- pnp_disable_dev(pdev);
- out_detach:
- pnp_device_detach(pdev);
- return error;
-}
-
-static void
-sb1000_remove_one(struct pnp_dev *pdev)
-{
- struct net_device *dev = pnp_get_drvdata(pdev);
-
- unregister_netdev(dev);
- release_region(dev->base_addr, 16);
- release_region(dev->mem_start, 16);
- free_netdev(dev);
-}
-
-static struct pnp_driver sb1000_driver = {
- .name = "sb1000",
- .id_table = sb1000_pnp_ids,
- .probe = sb1000_probe_one,
- .remove = sb1000_remove_one,
-};
-
-
-/*
- * SB1000 hardware routines to be used during open/configuration phases
- */
-
-static const int TimeOutJiffies = (875 * HZ) / 100;
-
-/* Card Wait For Busy Clear (cannot be used during an interrupt) */
-static int
-card_wait_for_busy_clear(const int ioaddr[], const char* name)
-{
- unsigned char a;
- unsigned long timeout;
-
- a = inb(ioaddr[0] + 7);
- timeout = jiffies + TimeOutJiffies;
- while (a & 0x80 || a & 0x40) {
- /* a little sleep */
- yield();
-
- a = inb(ioaddr[0] + 7);
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: card_wait_for_busy_clear timeout\n",
- name);
- return -ETIME;
- }
- }
-
- return 0;
-}
-
-/* Card Wait For Ready (cannot be used during an interrupt) */
-static int
-card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[])
-{
- unsigned char a;
- unsigned long timeout;
-
- a = inb(ioaddr[1] + 6);
- timeout = jiffies + TimeOutJiffies;
- while (a & 0x80 || !(a & 0x40)) {
- /* a little sleep */
- yield();
-
- a = inb(ioaddr[1] + 6);
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: card_wait_for_ready timeout\n",
- name);
- return -ETIME;
- }
- }
-
- in[1] = inb(ioaddr[0] + 1);
- in[2] = inb(ioaddr[0] + 2);
- in[3] = inb(ioaddr[0] + 3);
- in[4] = inb(ioaddr[0] + 4);
- in[0] = inb(ioaddr[0] + 5);
- in[6] = inb(ioaddr[0] + 6);
- in[5] = inb(ioaddr[1] + 6);
- return 0;
-}
-
-/* Card Send Command (cannot be used during an interrupt) */
-static int
-card_send_command(const int ioaddr[], const char* name,
- const unsigned char out[], unsigned char in[])
-{
- int status;
-
- if ((status = card_wait_for_busy_clear(ioaddr, name)))
- return status;
- outb(0xa0, ioaddr[0] + 6);
- outb(out[2], ioaddr[0] + 1);
- outb(out[3], ioaddr[0] + 2);
- outb(out[4], ioaddr[0] + 3);
- outb(out[5], ioaddr[0] + 4);
- outb(out[1], ioaddr[0] + 5);
- outb(0xa0, ioaddr[0] + 6);
- outb(out[0], ioaddr[0] + 7);
- if (out[0] != 0x20 && out[0] != 0x30) {
- if ((status = card_wait_for_ready(ioaddr, name, in)))
- return status;
- inb(ioaddr[0] + 7);
- if (sb1000_debug > 3)
- printk(KERN_DEBUG "%s: card_send_command "
- "out: %02x%02x%02x%02x%02x%02x "
- "in: %02x%02x%02x%02x%02x%02x%02x\n", name,
- out[0], out[1], out[2], out[3], out[4], out[5],
- in[0], in[1], in[2], in[3], in[4], in[5], in[6]);
- } else {
- if (sb1000_debug > 3)
- printk(KERN_DEBUG "%s: card_send_command "
- "out: %02x%02x%02x%02x%02x%02x\n", name,
- out[0], out[1], out[2], out[3], out[4], out[5]);
- }
-
- if (out[1] != 0x1b) {
- if (out[0] >= 0x80 && in[0] != (out[1] | 0x80))
- return -EIO;
- }
- return 0;
-}
-
-
-/*
- * SB1000 hardware routines to be used during frame rx interrupt
- */
-static const int Sb1000TimeOutJiffies = 7 * HZ;
-
-/* Card Wait For Ready (to be used during frame rx) */
-static int
-sb1000_wait_for_ready(const int ioaddr[], const char* name)
-{
- unsigned long timeout;
-
- timeout = jiffies + Sb1000TimeOutJiffies;
- while (inb(ioaddr[1] + 6) & 0x80) {
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n",
- name);
- return -ETIME;
- }
- }
- timeout = jiffies + Sb1000TimeOutJiffies;
- while (!(inb(ioaddr[1] + 6) & 0x40)) {
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n",
- name);
- return -ETIME;
- }
- }
- inb(ioaddr[0] + 7);
- return 0;
-}
-
-/* Card Wait For Ready Clear (to be used during frame rx) */
-static int
-sb1000_wait_for_ready_clear(const int ioaddr[], const char* name)
-{
- unsigned long timeout;
-
- timeout = jiffies + Sb1000TimeOutJiffies;
- while (inb(ioaddr[1] + 6) & 0x80) {
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n",
- name);
- return -ETIME;
- }
- }
- timeout = jiffies + Sb1000TimeOutJiffies;
- while (inb(ioaddr[1] + 6) & 0x40) {
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n",
- name);
- return -ETIME;
- }
- }
- return 0;
-}
-
-/* Card Send Command (to be used during frame rx) */
-static void
-sb1000_send_command(const int ioaddr[], const char* name,
- const unsigned char out[])
-{
- outb(out[2], ioaddr[0] + 1);
- outb(out[3], ioaddr[0] + 2);
- outb(out[4], ioaddr[0] + 3);
- outb(out[5], ioaddr[0] + 4);
- outb(out[1], ioaddr[0] + 5);
- outb(out[0], ioaddr[0] + 7);
- if (sb1000_debug > 3)
- printk(KERN_DEBUG "%s: sb1000_send_command out: %02x%02x%02x%02x"
- "%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5]);
-}
-
-/* Card Read Status (to be used during frame rx) */
-static void
-sb1000_read_status(const int ioaddr[], unsigned char in[])
-{
- in[1] = inb(ioaddr[0] + 1);
- in[2] = inb(ioaddr[0] + 2);
- in[3] = inb(ioaddr[0] + 3);
- in[4] = inb(ioaddr[0] + 4);
- in[0] = inb(ioaddr[0] + 5);
-}
-
-/* Issue Read Command (to be used during frame rx) */
-static void
-sb1000_issue_read_command(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x20, 0x00, 0x00, 0x01, 0x00, 0x00};
-
- sb1000_wait_for_ready_clear(ioaddr, name);
- outb(0xa0, ioaddr[0] + 6);
- sb1000_send_command(ioaddr, name, Command0);
-}
-
-
-/*
- * SB1000 commands for open/configuration
- */
-/* reset SB1000 card */
-static int
-sb1000_reset(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int port, status;
-
- port = ioaddr[1] + 6;
- outb(0x4, port);
- inb(port);
- udelay(1000);
- outb(0x0, port);
- inb(port);
- ssleep(1);
- outb(0x4, port);
- inb(port);
- udelay(1000);
- outb(0x0, port);
- inb(port);
- udelay(0);
-
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- if (st[3] != 0xf0)
- return -EIO;
- return 0;
-}
-
-/* check SB1000 firmware CRC */
-static int
-sb1000_check_CRC(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- /* check CRC */
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- if (st[1] != st[3] || st[2] != st[4])
- return -EIO;
- return 0;
-}
-
-static inline int
-sb1000_start_get_set_command(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x1b, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
-
- return card_send_command(ioaddr, name, Command0, st);
-}
-
-static int
-sb1000_end_get_set_command(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x1b, 0x02, 0x00, 0x00, 0x00};
- static const unsigned char Command1[6] = {0x20, 0x00, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- return card_send_command(ioaddr, name, Command1, st);
-}
-
-static int
-sb1000_activate(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x11, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command1[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- ssleep(1);
- status = card_send_command(ioaddr, name, Command0, st);
- if (status)
- return status;
- status = card_send_command(ioaddr, name, Command1, st);
- if (status)
- return status;
- if (st[3] != 0xf1) {
- status = sb1000_start_get_set_command(ioaddr, name);
- if (status)
- return status;
- return -EIO;
- }
- udelay(1000);
- return sb1000_start_get_set_command(ioaddr, name);
-}
-
-/* get SB1000 firmware version */
-static int
-sb1000_get_firmware_version(const int ioaddr[], const char* name,
- unsigned char version[], int do_end)
-{
- static const unsigned char Command0[6] = {0x80, 0x23, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- if (st[0] != 0xa3)
- return -EIO;
- version[0] = st[1];
- version[1] = st[2];
- if (do_end)
- return sb1000_end_get_set_command(ioaddr, name);
- else
- return 0;
-}
-
-/* get SB1000 frequency */
-static int
-sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency)
-{
- static const unsigned char Command0[6] = {0x80, 0x44, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- udelay(1000);
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- *frequency = ((st[1] << 8 | st[2]) << 8 | st[3]) << 8 | st[4];
- return sb1000_end_get_set_command(ioaddr, name);
-}
-
-/* set SB1000 frequency */
-static int
-sb1000_set_frequency(const int ioaddr[], const char* name, int frequency)
-{
- unsigned char st[7];
- int status;
- unsigned char Command0[6] = {0x80, 0x29, 0x00, 0x00, 0x00, 0x00};
-
- const int FrequencyLowerLimit = 57000;
- const int FrequencyUpperLimit = 804000;
-
- if (frequency < FrequencyLowerLimit || frequency > FrequencyUpperLimit) {
- printk(KERN_ERR "%s: frequency chosen (%d kHz) is not in the range "
- "[%d,%d] kHz\n", name, frequency, FrequencyLowerLimit,
- FrequencyUpperLimit);
- return -EINVAL;
- }
- udelay(1000);
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
- Command0[5] = frequency & 0xff;
- frequency >>= 8;
- Command0[4] = frequency & 0xff;
- frequency >>= 8;
- Command0[3] = frequency & 0xff;
- frequency >>= 8;
- Command0[2] = frequency & 0xff;
- return card_send_command(ioaddr, name, Command0, st);
-}
-
-/* get SB1000 PIDs */
-static int
-sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[])
-{
- static const unsigned char Command0[6] = {0x80, 0x40, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command1[6] = {0x80, 0x41, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command2[6] = {0x80, 0x42, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command3[6] = {0x80, 0x43, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- udelay(1000);
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
-
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- PID[0] = st[1] << 8 | st[2];
-
- if ((status = card_send_command(ioaddr, name, Command1, st)))
- return status;
- PID[1] = st[1] << 8 | st[2];
-
- if ((status = card_send_command(ioaddr, name, Command2, st)))
- return status;
- PID[2] = st[1] << 8 | st[2];
-
- if ((status = card_send_command(ioaddr, name, Command3, st)))
- return status;
- PID[3] = st[1] << 8 | st[2];
-
- return sb1000_end_get_set_command(ioaddr, name);
-}
-
-/* set SB1000 PIDs */
-static int
-sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[])
-{
- static const unsigned char Command4[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- short p;
- int status;
- unsigned char Command0[6] = {0x80, 0x31, 0x00, 0x00, 0x00, 0x00};
- unsigned char Command1[6] = {0x80, 0x32, 0x00, 0x00, 0x00, 0x00};
- unsigned char Command2[6] = {0x80, 0x33, 0x00, 0x00, 0x00, 0x00};
- unsigned char Command3[6] = {0x80, 0x34, 0x00, 0x00, 0x00, 0x00};
-
- udelay(1000);
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
-
- p = PID[0];
- Command0[3] = p & 0xff;
- p >>= 8;
- Command0[2] = p & 0xff;
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
-
- p = PID[1];
- Command1[3] = p & 0xff;
- p >>= 8;
- Command1[2] = p & 0xff;
- if ((status = card_send_command(ioaddr, name, Command1, st)))
- return status;
-
- p = PID[2];
- Command2[3] = p & 0xff;
- p >>= 8;
- Command2[2] = p & 0xff;
- if ((status = card_send_command(ioaddr, name, Command2, st)))
- return status;
-
- p = PID[3];
- Command3[3] = p & 0xff;
- p >>= 8;
- Command3[2] = p & 0xff;
- if ((status = card_send_command(ioaddr, name, Command3, st)))
- return status;
-
- if ((status = card_send_command(ioaddr, name, Command4, st)))
- return status;
- return sb1000_end_get_set_command(ioaddr, name);
-}
-
-
-static void
-sb1000_print_status_buffer(const char* name, unsigned char st[],
- unsigned char buffer[], int size)
-{
- int i, j, k;
-
- printk(KERN_DEBUG "%s: status: %02x %02x\n", name, st[0], st[1]);
- if (buffer[24] == 0x08 && buffer[25] == 0x00 && buffer[26] == 0x45) {
- printk(KERN_DEBUG "%s: length: %d protocol: %d from: %d.%d.%d.%d:%d "
- "to %d.%d.%d.%d:%d\n", name, buffer[28] << 8 | buffer[29],
- buffer[35], buffer[38], buffer[39], buffer[40], buffer[41],
- buffer[46] << 8 | buffer[47],
- buffer[42], buffer[43], buffer[44], buffer[45],
- buffer[48] << 8 | buffer[49]);
- } else {
- for (i = 0, k = 0; i < (size + 7) / 8; i++) {
- printk(KERN_DEBUG "%s: %s", name, i ? " " : "buffer:");
- for (j = 0; j < 8 && k < size; j++, k++)
- printk(" %02x", buffer[k]);
- printk("\n");
- }
- }
-}
-
-/*
- * SB1000 commands for frame rx interrupt
- */
-/* receive a single frame and assemble datagram
- * (this is the heart of the interrupt routine)
- */
-static int
-sb1000_rx(struct net_device *dev)
-{
-
-#define FRAMESIZE 184
- unsigned char st[2], buffer[FRAMESIZE], session_id, frame_id;
- short dlen;
- int ioaddr, ns;
- unsigned int skbsize;
- struct sk_buff *skb;
- struct sb1000_private *lp = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
-
- /* SB1000 frame constants */
- const int FrameSize = FRAMESIZE;
- const int NewDatagramHeaderSkip = 8;
- const int NewDatagramHeaderSize = NewDatagramHeaderSkip + 18;
- const int NewDatagramDataSize = FrameSize - NewDatagramHeaderSize;
- const int ContDatagramHeaderSkip = 7;
- const int ContDatagramHeaderSize = ContDatagramHeaderSkip + 1;
- const int ContDatagramDataSize = FrameSize - ContDatagramHeaderSize;
- const int TrailerSize = 4;
-
- ioaddr = dev->base_addr;
-
- insw(ioaddr, (unsigned short*) st, 1);
-#ifdef XXXDEBUG
-printk("cm0: received: %02x %02x\n", st[0], st[1]);
-#endif /* XXXDEBUG */
- lp->rx_frames++;
-
- /* decide if it is a good or bad frame */
- for (ns = 0; ns < NPIDS; ns++) {
- session_id = lp->rx_session_id[ns];
- frame_id = lp->rx_frame_id[ns];
- if (st[0] == session_id) {
- if (st[1] == frame_id || (!frame_id && (st[1] & 0xf0) == 0x30)) {
- goto good_frame;
- } else if ((st[1] & 0xf0) == 0x30 && (st[0] & 0x40)) {
- goto skipped_frame;
- } else {
- goto bad_frame;
- }
- } else if (st[0] == (session_id | 0x40)) {
- if ((st[1] & 0xf0) == 0x30) {
- goto skipped_frame;
- } else {
- goto bad_frame;
- }
- }
- }
- goto bad_frame;
-
-skipped_frame:
- stats->rx_frame_errors++;
- skb = lp->rx_skb[ns];
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: missing frame(s): got %02x %02x "
- "expecting %02x %02x\n", dev->name, st[0], st[1],
- skb ? session_id : session_id | 0x40, frame_id);
- if (skb) {
- dev_kfree_skb(skb);
- skb = NULL;
- }
-
-good_frame:
- lp->rx_frame_id[ns] = 0x30 | ((st[1] + 1) & 0x0f);
- /* new datagram */
- if (st[0] & 0x40) {
- /* get data length */
- insw(ioaddr, buffer, NewDatagramHeaderSize / 2);
-#ifdef XXXDEBUG
-printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[30], buffer[31], buffer[32], buffer[33]);
-#endif /* XXXDEBUG */
- if (buffer[0] != NewDatagramHeaderSkip) {
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: new datagram header skip error: "
- "got %02x expecting %02x\n", dev->name, buffer[0],
- NewDatagramHeaderSkip);
- stats->rx_length_errors++;
- insw(ioaddr, buffer, NewDatagramDataSize / 2);
- goto bad_frame_next;
- }
- dlen = ((buffer[NewDatagramHeaderSkip + 3] & 0x0f) << 8 |
- buffer[NewDatagramHeaderSkip + 4]) - 17;
- if (dlen > SB1000_MRU) {
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: datagram length (%d) greater "
- "than MRU (%d)\n", dev->name, dlen, SB1000_MRU);
- stats->rx_length_errors++;
- insw(ioaddr, buffer, NewDatagramDataSize / 2);
- goto bad_frame_next;
- }
- lp->rx_dlen[ns] = dlen;
- /* compute size to allocate for datagram */
- skbsize = dlen + FrameSize;
- if ((skb = alloc_skb(skbsize, GFP_ATOMIC)) == NULL) {
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: can't allocate %d bytes long "
- "skbuff\n", dev->name, skbsize);
- stats->rx_dropped++;
- insw(ioaddr, buffer, NewDatagramDataSize / 2);
- goto dropped_frame;
- }
- skb->dev = dev;
- skb_reset_mac_header(skb);
- skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16];
- insw(ioaddr, skb_put(skb, NewDatagramDataSize),
- NewDatagramDataSize / 2);
- lp->rx_skb[ns] = skb;
- } else {
- /* continuation of previous datagram */
- insw(ioaddr, buffer, ContDatagramHeaderSize / 2);
- if (buffer[0] != ContDatagramHeaderSkip) {
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: cont datagram header skip error: "
- "got %02x expecting %02x\n", dev->name, buffer[0],
- ContDatagramHeaderSkip);
- stats->rx_length_errors++;
- insw(ioaddr, buffer, ContDatagramDataSize / 2);
- goto bad_frame_next;
- }
- skb = lp->rx_skb[ns];
- insw(ioaddr, skb_put(skb, ContDatagramDataSize),
- ContDatagramDataSize / 2);
- dlen = lp->rx_dlen[ns];
- }
- if (skb->len < dlen + TrailerSize) {
- lp->rx_session_id[ns] &= ~0x40;
- return 0;
- }
-
- /* datagram completed: send to upper level */
- skb_trim(skb, dlen);
- __netif_rx(skb);
- stats->rx_bytes+=dlen;
- stats->rx_packets++;
- lp->rx_skb[ns] = NULL;
- lp->rx_session_id[ns] |= 0x40;
- return 0;
-
-bad_frame:
- insw(ioaddr, buffer, FrameSize / 2);
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: frame error: got %02x %02x\n",
- dev->name, st[0], st[1]);
- stats->rx_frame_errors++;
-bad_frame_next:
- if (sb1000_debug > 2)
- sb1000_print_status_buffer(dev->name, st, buffer, FrameSize);
-dropped_frame:
- stats->rx_errors++;
- if (ns < NPIDS) {
- if ((skb = lp->rx_skb[ns])) {
- dev_kfree_skb(skb);
- lp->rx_skb[ns] = NULL;
- }
- lp->rx_session_id[ns] |= 0x40;
- }
- return -1;
-}
-
-static void
-sb1000_error_dpc(struct net_device *dev)
-{
- static const unsigned char Command0[6] = {0x80, 0x26, 0x00, 0x00, 0x00, 0x00};
-
- char *name;
- unsigned char st[5];
- int ioaddr[2];
- struct sb1000_private *lp = netdev_priv(dev);
- const int ErrorDpcCounterInitialize = 200;
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
- name = dev->name;
-
- sb1000_wait_for_ready_clear(ioaddr, name);
- sb1000_send_command(ioaddr, name, Command0);
- sb1000_wait_for_ready(ioaddr, name);
- sb1000_read_status(ioaddr, st);
- if (st[1] & 0x10)
- lp->rx_error_dpc_count = ErrorDpcCounterInitialize;
-}
-
-
-/*
- * Linux interface functions
- */
-static int
-sb1000_open(struct net_device *dev)
-{
- char *name;
- int ioaddr[2], status;
- struct sb1000_private *lp = netdev_priv(dev);
- const unsigned short FirmwareVersion[] = {0x01, 0x01};
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
- name = dev->name;
-
- /* initialize sb1000 */
- if ((status = sb1000_reset(ioaddr, name)))
- return status;
- ssleep(1);
- if ((status = sb1000_check_CRC(ioaddr, name)))
- return status;
-
- /* initialize private data before board can catch interrupts */
- lp->rx_skb[0] = NULL;
- lp->rx_skb[1] = NULL;
- lp->rx_skb[2] = NULL;
- lp->rx_skb[3] = NULL;
- lp->rx_dlen[0] = 0;
- lp->rx_dlen[1] = 0;
- lp->rx_dlen[2] = 0;
- lp->rx_dlen[3] = 0;
- lp->rx_frames = 0;
- lp->rx_error_count = 0;
- lp->rx_error_dpc_count = 0;
- lp->rx_session_id[0] = 0x50;
- lp->rx_session_id[1] = 0x48;
- lp->rx_session_id[2] = 0x44;
- lp->rx_session_id[3] = 0x42;
- lp->rx_frame_id[0] = 0;
- lp->rx_frame_id[1] = 0;
- lp->rx_frame_id[2] = 0;
- lp->rx_frame_id[3] = 0;
- if (request_irq(dev->irq, sb1000_interrupt, 0, "sb1000", dev)) {
- return -EAGAIN;
- }
-
- if (sb1000_debug > 2)
- printk(KERN_DEBUG "%s: Opening, IRQ %d\n", name, dev->irq);
-
- /* Activate board and check firmware version */
- udelay(1000);
- if ((status = sb1000_activate(ioaddr, name)))
- return status;
- udelay(0);
- if ((status = sb1000_get_firmware_version(ioaddr, name, version, 0)))
- return status;
- if (version[0] != FirmwareVersion[0] || version[1] != FirmwareVersion[1])
- printk(KERN_WARNING "%s: found firmware version %x.%02x "
- "(should be %x.%02x)\n", name, version[0], version[1],
- FirmwareVersion[0], FirmwareVersion[1]);
-
-
- netif_start_queue(dev);
- return 0; /* Always succeed */
-}
-
-static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd)
-{
- char* name;
- unsigned char version[2];
- short PID[4];
- int ioaddr[2], status, frequency;
- unsigned int stats[5];
- struct sb1000_private *lp = netdev_priv(dev);
-
- if (!(dev && dev->flags & IFF_UP))
- return -ENODEV;
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
- name = dev->name;
-
- switch (cmd) {
- case SIOCGCMSTATS: /* get statistics */
- stats[0] = dev->stats.rx_bytes;
- stats[1] = lp->rx_frames;
- stats[2] = dev->stats.rx_packets;
- stats[3] = dev->stats.rx_errors;
- stats[4] = dev->stats.rx_dropped;
- if (copy_to_user(data, stats, sizeof(stats)))
- return -EFAULT;
- status = 0;
- break;
-
- case SIOCGCMFIRMWARE: /* get firmware version */
- if ((status = sb1000_get_firmware_version(ioaddr, name, version, 1)))
- return status;
- if (copy_to_user(data, version, sizeof(version)))
- return -EFAULT;
- break;
-
- case SIOCGCMFREQUENCY: /* get frequency */
- if ((status = sb1000_get_frequency(ioaddr, name, &frequency)))
- return status;
- if (put_user(frequency, (int __user *)data))
- return -EFAULT;
- break;
-
- case SIOCSCMFREQUENCY: /* set frequency */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (get_user(frequency, (int __user *)data))
- return -EFAULT;
- if ((status = sb1000_set_frequency(ioaddr, name, frequency)))
- return status;
- break;
-
- case SIOCGCMPIDS: /* get PIDs */
- if ((status = sb1000_get_PIDs(ioaddr, name, PID)))
- return status;
- if (copy_to_user(data, PID, sizeof(PID)))
- return -EFAULT;
- break;
-
- case SIOCSCMPIDS: /* set PIDs */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (copy_from_user(PID, data, sizeof(PID)))
- return -EFAULT;
- if ((status = sb1000_set_PIDs(ioaddr, name, PID)))
- return status;
- /* set session_id, frame_id and pkt_type too */
- lp->rx_session_id[0] = 0x50 | (PID[0] & 0x0f);
- lp->rx_session_id[1] = 0x48;
- lp->rx_session_id[2] = 0x44;
- lp->rx_session_id[3] = 0x42;
- lp->rx_frame_id[0] = 0;
- lp->rx_frame_id[1] = 0;
- lp->rx_frame_id[2] = 0;
- lp->rx_frame_id[3] = 0;
- break;
-
- default:
- status = -EINVAL;
- break;
- }
- return status;
-}
-
-/* transmit function: do nothing since SB1000 can't send anything out */
-static netdev_tx_t
-sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- printk(KERN_WARNING "%s: trying to transmit!!!\n", dev->name);
- /* sb1000 can't xmit datagrams */
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/* SB1000 interrupt handler. */
-static irqreturn_t sb1000_interrupt(int irq, void *dev_id)
-{
- static const unsigned char Command0[6] = {0x80, 0x2c, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command1[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
-
- char *name;
- unsigned char st;
- int ioaddr[2];
- struct net_device *dev = dev_id;
- struct sb1000_private *lp = netdev_priv(dev);
-
- const int MaxRxErrorCount = 6;
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
- name = dev->name;
-
- /* is it a good interrupt? */
- st = inb(ioaddr[1] + 6);
- if (!(st & 0x08 && st & 0x20)) {
- return IRQ_NONE;
- }
-
- if (sb1000_debug > 3)
- printk(KERN_DEBUG "%s: entering interrupt\n", dev->name);
-
- st = inb(ioaddr[0] + 7);
- if (sb1000_rx(dev))
- lp->rx_error_count++;
-#ifdef SB1000_DELAY
- udelay(SB1000_DELAY);
-#endif /* SB1000_DELAY */
- sb1000_issue_read_command(ioaddr, name);
- if (st & 0x01) {
- sb1000_error_dpc(dev);
- sb1000_issue_read_command(ioaddr, name);
- }
- if (lp->rx_error_dpc_count && !(--lp->rx_error_dpc_count)) {
- sb1000_wait_for_ready_clear(ioaddr, name);
- sb1000_send_command(ioaddr, name, Command0);
- sb1000_wait_for_ready(ioaddr, name);
- sb1000_issue_read_command(ioaddr, name);
- }
- if (lp->rx_error_count >= MaxRxErrorCount) {
- sb1000_wait_for_ready_clear(ioaddr, name);
- sb1000_send_command(ioaddr, name, Command1);
- sb1000_wait_for_ready(ioaddr, name);
- sb1000_issue_read_command(ioaddr, name);
- lp->rx_error_count = 0;
- }
-
- return IRQ_HANDLED;
-}
-
-static int sb1000_close(struct net_device *dev)
-{
- int i;
- int ioaddr[2];
- struct sb1000_private *lp = netdev_priv(dev);
-
- if (sb1000_debug > 2)
- printk(KERN_DEBUG "%s: Shutting down sb1000.\n", dev->name);
-
- netif_stop_queue(dev);
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
-
- free_irq(dev->irq, dev);
- /* If we don't do this, we can't re-insmod it later. */
- release_region(ioaddr[1], SB1000_IO_EXTENT);
- release_region(ioaddr[0], SB1000_IO_EXTENT);
-
- /* free rx_skb's if needed */
- for (i=0; i<4; i++) {
- if (lp->rx_skb[i]) {
- dev_kfree_skb(lp->rx_skb[i]);
- }
- }
- return 0;
-}
-
-MODULE_AUTHOR("Franco Venturi <fventuri@mediaone.net>");
-MODULE_DESCRIPTION("General Instruments SB1000 driver");
-MODULE_LICENSE("GPL");
-
-module_pnp_driver(sb1000_driver);
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 5ca6ecf0ce5f..d4ece538f1b2 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -26,74 +26,9 @@
#include <linux/virtio_net.h>
#include <linux/skb_array.h>
-#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
-
-#define TAP_VNET_LE 0x80000000
-#define TAP_VNET_BE 0x40000000
-
-#ifdef CONFIG_TUN_VNET_CROSS_LE
-static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
-{
- return q->flags & TAP_VNET_BE ? false :
- virtio_legacy_is_little_endian();
-}
-
-static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
-{
- int s = !!(q->flags & TAP_VNET_BE);
-
- if (put_user(s, sp))
- return -EFAULT;
-
- return 0;
-}
-
-static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
-{
- int s;
-
- if (get_user(s, sp))
- return -EFAULT;
-
- if (s)
- q->flags |= TAP_VNET_BE;
- else
- q->flags &= ~TAP_VNET_BE;
-
- return 0;
-}
-#else
-static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
-{
- return virtio_legacy_is_little_endian();
-}
-
-static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
-{
- return -EINVAL;
-}
-
-static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_TUN_VNET_CROSS_LE */
-
-static inline bool tap_is_little_endian(struct tap_queue *q)
-{
- return q->flags & TAP_VNET_LE ||
- tap_legacy_is_little_endian(q);
-}
-
-static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
-{
- return __virtio16_to_cpu(tap_is_little_endian(q), val);
-}
+#include "tun_vnet.h"
-static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
-{
- return __cpu_to_virtio16(tap_is_little_endian(q), val);
-}
+#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
static struct proto tap_proto = {
.name = "tap",
@@ -645,6 +580,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
int err;
struct virtio_net_hdr vnet_hdr = { 0 };
int vnet_hdr_len = 0;
+ int hdr_len = 0;
int copylen = 0;
int depth;
bool zerocopy = false;
@@ -654,25 +590,13 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
- err = -EINVAL;
- if (len < vnet_hdr_len)
+ hdr_len = tun_vnet_hdr_get(vnet_hdr_len, q->flags, from, &vnet_hdr);
+ if (hdr_len < 0) {
+ err = hdr_len;
goto err;
- len -= vnet_hdr_len;
+ }
- err = -EFAULT;
- if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
- goto err;
- iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
- if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
- tap16_to_cpu(q, vnet_hdr.csum_start) +
- tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
- tap16_to_cpu(q, vnet_hdr.hdr_len))
- vnet_hdr.hdr_len = cpu_to_tap16(q,
- tap16_to_cpu(q, vnet_hdr.csum_start) +
- tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
- err = -EINVAL;
- if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
- goto err;
+ len -= vnet_hdr_len;
}
err = -EINVAL;
@@ -682,12 +606,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
struct iov_iter i;
- copylen = vnet_hdr.hdr_len ?
- tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
- if (copylen > good_linear)
- copylen = good_linear;
- else if (copylen < ETH_HLEN)
- copylen = ETH_HLEN;
+ copylen = clamp(hdr_len ?: GOODCOPY_LEN, ETH_HLEN, good_linear);
linear = copylen;
i = *from;
iov_iter_advance(&i, copylen);
@@ -697,11 +616,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
if (!zerocopy) {
copylen = len;
- linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
- if (linear > good_linear)
- linear = good_linear;
- else if (linear < ETH_HLEN)
- linear = ETH_HLEN;
+ linear = clamp(hdr_len, ETH_HLEN, good_linear);
}
skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
@@ -733,8 +648,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb->dev = tap->dev;
if (vnet_hdr_len) {
- err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
- tap_is_little_endian(q));
+ err = tun_vnet_hdr_to_skb(q->flags, skb, &vnet_hdr);
if (err) {
rcu_read_unlock();
drop_reason = SKB_DROP_REASON_DEV_HDR;
@@ -797,23 +711,17 @@ static ssize_t tap_put_user(struct tap_queue *q,
int total;
if (q->flags & IFF_VNET_HDR) {
- int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
struct virtio_net_hdr vnet_hdr;
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
- if (iov_iter_count(iter) < vnet_hdr_len)
- return -EINVAL;
- if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
- tap_is_little_endian(q), true,
- vlan_hlen))
- BUG();
-
- if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
- sizeof(vnet_hdr))
- return -EFAULT;
+ ret = tun_vnet_hdr_from_skb(q->flags, NULL, skb, &vnet_hdr);
+ if (ret)
+ return ret;
- iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
+ ret = tun_vnet_hdr_put(vnet_hdr_len, iter, &vnet_hdr);
+ if (ret)
+ return ret;
}
total = vnet_hdr_len;
total += skb->len;
@@ -1072,42 +980,6 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
q->sk.sk_sndbuf = s;
return 0;
- case TUNGETVNETHDRSZ:
- s = q->vnet_hdr_sz;
- if (put_user(s, sp))
- return -EFAULT;
- return 0;
-
- case TUNSETVNETHDRSZ:
- if (get_user(s, sp))
- return -EFAULT;
- if (s < (int)sizeof(struct virtio_net_hdr))
- return -EINVAL;
-
- q->vnet_hdr_sz = s;
- return 0;
-
- case TUNGETVNETLE:
- s = !!(q->flags & TAP_VNET_LE);
- if (put_user(s, sp))
- return -EFAULT;
- return 0;
-
- case TUNSETVNETLE:
- if (get_user(s, sp))
- return -EFAULT;
- if (s)
- q->flags |= TAP_VNET_LE;
- else
- q->flags &= ~TAP_VNET_LE;
- return 0;
-
- case TUNGETVNETBE:
- return tap_get_vnet_be(q, sp);
-
- case TUNSETVNETBE:
- return tap_set_vnet_be(q, sp);
-
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
@@ -1151,7 +1023,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
return ret;
default:
- return -EINVAL;
+ return tun_vnet_ioctl(&q->vnet_hdr_sz, &q->flags, cmd, sp);
}
}
@@ -1198,7 +1070,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
skb->protocol = eth_hdr(skb)->h_proto;
if (vnet_hdr_len) {
- err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q));
+ err = tun_vnet_hdr_to_skb(q->flags, skb, gso);
if (err)
goto err_kfree;
}
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index f4019815f473..d8fc0c79745d 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -23,6 +23,7 @@
#include <linux/rtnetlink.h>
#include <net/rtnetlink.h>
#include <net/genetlink.h>
+#include <net/netdev_lock.h>
#include <net/netlink.h>
#include <net/sch_generic.h>
#include <linux/if_team.h>
@@ -2203,7 +2204,7 @@ static void team_setup(struct net_device *dev)
dev->lltx = true;
/* Don't allow team devices to change network namespaces. */
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->features |= NETIF_F_GRO;
@@ -2218,10 +2219,12 @@ static void team_setup(struct net_device *dev)
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
}
-static int team_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int team_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct nlattr **tb = params->tb;
+
if (tb[IFLA_ADDRESS] == NULL)
eth_hw_addr_random(dev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index acf96f262488..f75f912a0225 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -83,6 +83,8 @@
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
+#include "tun_vnet.h"
+
static void tun_default_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd);
@@ -94,9 +96,6 @@ static void tun_default_link_ksettings(struct net_device *dev,
* overload it to mean fasync when stored there.
*/
#define TUN_FASYNC IFF_ATTACH_QUEUE
-/* High bits in flags field are unused. */
-#define TUN_VNET_LE 0x80000000
-#define TUN_VNET_BE 0x40000000
#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
@@ -298,70 +297,6 @@ static bool tun_napi_frags_enabled(const struct tun_file *tfile)
return tfile->napi_frags_enabled;
}
-#ifdef CONFIG_TUN_VNET_CROSS_LE
-static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
-{
- return tun->flags & TUN_VNET_BE ? false :
- virtio_legacy_is_little_endian();
-}
-
-static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
-{
- int be = !!(tun->flags & TUN_VNET_BE);
-
- if (put_user(be, argp))
- return -EFAULT;
-
- return 0;
-}
-
-static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
-{
- int be;
-
- if (get_user(be, argp))
- return -EFAULT;
-
- if (be)
- tun->flags |= TUN_VNET_BE;
- else
- tun->flags &= ~TUN_VNET_BE;
-
- return 0;
-}
-#else
-static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
-{
- return virtio_legacy_is_little_endian();
-}
-
-static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
-{
- return -EINVAL;
-}
-
-static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_TUN_VNET_CROSS_LE */
-
-static inline bool tun_is_little_endian(struct tun_struct *tun)
-{
- return tun->flags & TUN_VNET_LE ||
- tun_legacy_is_little_endian(tun);
-}
-
-static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
-{
- return __virtio16_to_cpu(tun_is_little_endian(tun), val);
-}
-
-static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
-{
- return __cpu_to_virtio16(tun_is_little_endian(tun), val);
-}
-
static inline u32 tun_hashfn(u32 rxhash)
{
return rxhash & TUN_MASK_FLOW_ENTRIES;
@@ -1600,7 +1535,8 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
struct page_frag *alloc_frag, char *buf,
- int buflen, int len, int pad)
+ int buflen, int len, int pad,
+ int metasize)
{
struct sk_buff *skb = build_skb(buf, buflen);
@@ -1609,6 +1545,8 @@ static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
skb_reserve(skb, pad);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb_set_owner_w(skb, tfile->socket.sk);
get_page(alloc_frag->page);
@@ -1668,6 +1606,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
char *buf;
size_t copied;
int pad = TUN_RX_PAD;
+ int metasize = 0;
int err = 0;
rcu_read_lock();
@@ -1695,7 +1634,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
if (hdr->gso_type || !xdp_prog) {
*skb_xdp = 1;
return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
- pad);
+ pad, metasize);
}
*skb_xdp = 0;
@@ -1709,7 +1648,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
u32 act;
xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
- xdp_prepare_buff(&xdp, buf, pad, len, false);
+ xdp_prepare_buff(&xdp, buf, pad, len, true);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
if (act == XDP_REDIRECT || act == XDP_TX) {
@@ -1730,12 +1669,18 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
pad = xdp.data - xdp.data_hard_start;
len = xdp.data_end - xdp.data;
+
+ /* It is known that the xdp_buff was prepared with metadata
+ * support, so the metasize will never be negative.
+ */
+ metasize = xdp.data - xdp.data_meta;
}
bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock();
local_bh_enable();
- return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
+ return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad,
+ metasize);
out:
bpf_net_ctx_clear(bpf_net_ctx);
@@ -1756,6 +1701,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
struct virtio_net_hdr gso = { 0 };
int good_linear;
int copylen;
+ int hdr_len = 0;
bool zerocopy = false;
int err;
u32 rxhash = 0;
@@ -1775,26 +1721,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (tun->flags & IFF_VNET_HDR) {
int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
- if (len < vnet_hdr_sz)
- return -EINVAL;
- len -= vnet_hdr_sz;
+ hdr_len = tun_vnet_hdr_get(vnet_hdr_sz, tun->flags, from, &gso);
+ if (hdr_len < 0)
+ return hdr_len;
- if (!copy_from_iter_full(&gso, sizeof(gso), from))
- return -EFAULT;
-
- if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
- tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
- gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
-
- if (tun16_to_cpu(tun, gso.hdr_len) > len)
- return -EINVAL;
- iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
+ len -= vnet_hdr_sz;
}
if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
align += NET_IP_ALIGN;
- if (unlikely(len < ETH_HLEN ||
- (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
+ if (unlikely(len < ETH_HLEN || (hdr_len && hdr_len < ETH_HLEN)))
return -EINVAL;
}
@@ -1807,9 +1743,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
* enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace.
*/
- copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
- if (copylen > good_linear)
- copylen = good_linear;
+ copylen = min(hdr_len ? hdr_len : GOODCOPY_LEN, good_linear);
linear = copylen;
iov_iter_advance(&i, copylen);
if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
@@ -1830,10 +1764,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
} else {
if (!zerocopy) {
copylen = len;
- if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
- linear = good_linear;
- else
- linear = tun16_to_cpu(tun, gso.hdr_len);
+ linear = min(hdr_len, good_linear);
}
if (frags) {
@@ -1868,7 +1799,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
}
- if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
+ if (tun_vnet_hdr_to_skb(tun->flags, skb, &gso)) {
atomic_long_inc(&tun->rx_frame_errors);
err = -EINVAL;
goto free_skb;
@@ -2063,18 +1994,15 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
{
int vnet_hdr_sz = 0;
size_t size = xdp_frame->len;
- size_t ret;
+ ssize_t ret;
if (tun->flags & IFF_VNET_HDR) {
struct virtio_net_hdr gso = { 0 };
vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
- if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
- return -EINVAL;
- if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
- sizeof(gso)))
- return -EFAULT;
- iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
+ ret = tun_vnet_hdr_put(vnet_hdr_sz, iter, &gso);
+ if (ret)
+ return ret;
}
ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
@@ -2097,6 +2025,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
int vlan_offset = 0;
int vlan_hlen = 0;
int vnet_hdr_sz = 0;
+ int ret;
if (skb_vlan_tag_present(skb))
vlan_hlen = VLAN_HLEN;
@@ -2123,31 +2052,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
if (vnet_hdr_sz) {
struct virtio_net_hdr gso;
- if (iov_iter_count(iter) < vnet_hdr_sz)
- return -EINVAL;
-
- if (virtio_net_hdr_from_skb(skb, &gso,
- tun_is_little_endian(tun), true,
- vlan_hlen)) {
- struct skb_shared_info *sinfo = skb_shinfo(skb);
-
- if (net_ratelimit()) {
- netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
- sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
- tun16_to_cpu(tun, gso.hdr_len));
- print_hex_dump(KERN_ERR, "tun: ",
- DUMP_PREFIX_NONE,
- 16, 1, skb->head,
- min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
- }
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
- if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
- return -EFAULT;
+ ret = tun_vnet_hdr_from_skb(tun->flags, tun->dev, skb, &gso);
+ if (ret)
+ return ret;
- iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
+ ret = tun_vnet_hdr_put(vnet_hdr_sz, iter, &gso);
+ if (ret)
+ return ret;
}
if (vlan_hlen) {
@@ -2452,6 +2363,7 @@ static int tun_xdp_one(struct tun_struct *tun,
struct sk_buff_head *queue;
u32 rxhash = 0, act;
int buflen = hdr->buflen;
+ int metasize = 0;
int ret = 0;
bool skb_xdp = false;
struct page *page;
@@ -2467,7 +2379,6 @@ static int tun_xdp_one(struct tun_struct *tun,
}
xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
- xdp_set_data_meta_invalid(xdp);
act = bpf_prog_run_xdp(xdp_prog, xdp);
ret = tun_xdp_act(tun, xdp_prog, xdp, act);
@@ -2507,7 +2418,15 @@ build:
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
- if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
+ /* The externally provided xdp_buff may have no metadata support, which
+ * is marked by xdp->data_meta being xdp->data + 1. This will lead to a
+ * metasize of -1 and is the reason why the condition checks for > 0.
+ */
+ metasize = xdp->data - xdp->data_meta;
+ if (metasize > 0)
+ skb_metadata_set(skb, metasize);
+
+ if (tun_vnet_hdr_to_skb(tun->flags, skb, gso)) {
atomic_long_inc(&tun->rx_frame_errors);
kfree_skb(skb);
ret = -EINVAL;
@@ -3091,8 +3010,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
kgid_t group;
int ifindex;
int sndbuf;
- int vnet_hdr_sz;
- int le;
int ret;
bool do_notify = false;
@@ -3299,50 +3216,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun_set_sndbuf(tun);
break;
- case TUNGETVNETHDRSZ:
- vnet_hdr_sz = tun->vnet_hdr_sz;
- if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
- ret = -EFAULT;
- break;
-
- case TUNSETVNETHDRSZ:
- if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
- ret = -EFAULT;
- break;
- }
- if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
- ret = -EINVAL;
- break;
- }
-
- tun->vnet_hdr_sz = vnet_hdr_sz;
- break;
-
- case TUNGETVNETLE:
- le = !!(tun->flags & TUN_VNET_LE);
- if (put_user(le, (int __user *)argp))
- ret = -EFAULT;
- break;
-
- case TUNSETVNETLE:
- if (get_user(le, (int __user *)argp)) {
- ret = -EFAULT;
- break;
- }
- if (le)
- tun->flags |= TUN_VNET_LE;
- else
- tun->flags &= ~TUN_VNET_LE;
- break;
-
- case TUNGETVNETBE:
- ret = tun_get_vnet_be(tun, argp);
- break;
-
- case TUNSETVNETBE:
- ret = tun_set_vnet_be(tun, argp);
- break;
-
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
@@ -3398,7 +3271,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
default:
- ret = -EINVAL;
+ ret = tun_vnet_ioctl(&tun->vnet_hdr_sz, &tun->flags, cmd, argp);
break;
}
diff --git a/drivers/net/tun_vnet.h b/drivers/net/tun_vnet.h
new file mode 100644
index 000000000000..58b9ac7a5fc4
--- /dev/null
+++ b/drivers/net/tun_vnet.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef TUN_VNET_H
+#define TUN_VNET_H
+
+/* High bits in flags field are unused. */
+#define TUN_VNET_LE 0x80000000
+#define TUN_VNET_BE 0x40000000
+
+static inline bool tun_vnet_legacy_is_little_endian(unsigned int flags)
+{
+ bool be = IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE) &&
+ (flags & TUN_VNET_BE);
+
+ return !be && virtio_legacy_is_little_endian();
+}
+
+static inline long tun_get_vnet_be(unsigned int flags, int __user *argp)
+{
+ int be = !!(flags & TUN_VNET_BE);
+
+ if (!IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE))
+ return -EINVAL;
+
+ if (put_user(be, argp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline long tun_set_vnet_be(unsigned int *flags, int __user *argp)
+{
+ int be;
+
+ if (!IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE))
+ return -EINVAL;
+
+ if (get_user(be, argp))
+ return -EFAULT;
+
+ if (be)
+ *flags |= TUN_VNET_BE;
+ else
+ *flags &= ~TUN_VNET_BE;
+
+ return 0;
+}
+
+static inline bool tun_vnet_is_little_endian(unsigned int flags)
+{
+ return flags & TUN_VNET_LE || tun_vnet_legacy_is_little_endian(flags);
+}
+
+static inline u16 tun_vnet16_to_cpu(unsigned int flags, __virtio16 val)
+{
+ return __virtio16_to_cpu(tun_vnet_is_little_endian(flags), val);
+}
+
+static inline __virtio16 cpu_to_tun_vnet16(unsigned int flags, u16 val)
+{
+ return __cpu_to_virtio16(tun_vnet_is_little_endian(flags), val);
+}
+
+static inline long tun_vnet_ioctl(int *vnet_hdr_sz, unsigned int *flags,
+ unsigned int cmd, int __user *sp)
+{
+ int s;
+
+ switch (cmd) {
+ case TUNGETVNETHDRSZ:
+ s = *vnet_hdr_sz;
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETHDRSZ:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s < (int)sizeof(struct virtio_net_hdr))
+ return -EINVAL;
+
+ *vnet_hdr_sz = s;
+ return 0;
+
+ case TUNGETVNETLE:
+ s = !!(*flags & TUN_VNET_LE);
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETLE:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s)
+ *flags |= TUN_VNET_LE;
+ else
+ *flags &= ~TUN_VNET_LE;
+ return 0;
+
+ case TUNGETVNETBE:
+ return tun_get_vnet_be(*flags, sp);
+
+ case TUNSETVNETBE:
+ return tun_set_vnet_be(flags, sp);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int tun_vnet_hdr_get(int sz, unsigned int flags,
+ struct iov_iter *from,
+ struct virtio_net_hdr *hdr)
+{
+ u16 hdr_len;
+
+ if (iov_iter_count(from) < sz)
+ return -EINVAL;
+
+ if (!copy_from_iter_full(hdr, sizeof(*hdr), from))
+ return -EFAULT;
+
+ hdr_len = tun_vnet16_to_cpu(flags, hdr->hdr_len);
+
+ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ hdr_len = max(tun_vnet16_to_cpu(flags, hdr->csum_start) + tun_vnet16_to_cpu(flags, hdr->csum_offset) + 2, hdr_len);
+ hdr->hdr_len = cpu_to_tun_vnet16(flags, hdr_len);
+ }
+
+ if (hdr_len > iov_iter_count(from))
+ return -EINVAL;
+
+ iov_iter_advance(from, sz - sizeof(*hdr));
+
+ return hdr_len;
+}
+
+static inline int tun_vnet_hdr_put(int sz, struct iov_iter *iter,
+ const struct virtio_net_hdr *hdr)
+{
+ if (unlikely(iov_iter_count(iter) < sz))
+ return -EINVAL;
+
+ if (unlikely(copy_to_iter(hdr, sizeof(*hdr), iter) != sizeof(*hdr)))
+ return -EFAULT;
+
+ if (iov_iter_zero(sz - sizeof(*hdr), iter) != sz - sizeof(*hdr))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int tun_vnet_hdr_to_skb(unsigned int flags, struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr)
+{
+ return virtio_net_hdr_to_skb(skb, hdr, tun_vnet_is_little_endian(flags));
+}
+
+static inline int tun_vnet_hdr_from_skb(unsigned int flags,
+ const struct net_device *dev,
+ const struct sk_buff *skb,
+ struct virtio_net_hdr *hdr)
+{
+ int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
+
+ if (virtio_net_hdr_from_skb(skb, hdr,
+ tun_vnet_is_little_endian(flags), true,
+ vlan_hlen)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ if (net_ratelimit()) {
+ netdev_err(dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
+ sinfo->gso_type, tun_vnet16_to_cpu(flags, hdr->gso_size),
+ tun_vnet16_to_cpu(flags, hdr->hdr_len));
+ print_hex_dump(KERN_ERR, "tun: ",
+ DUMP_PREFIX_NONE,
+ 16, 1, skb->head,
+ min(tun_vnet16_to_cpu(flags, hdr->hdr_len), 64), true);
+ }
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#endif /* TUN_VNET_H */
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 57d6e5abc30e..da24941a6e44 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -1421,6 +1421,19 @@ static const struct driver_info hg20f9_info = {
.data = FLAG_EEPROM_MAC,
};
+static const struct driver_info lyconsys_fibergecko100_info = {
+ .description = "LyconSys FiberGecko 100 USB 2.0 to SFP Adapter",
+ .bind = ax88178_bind,
+ .status = asix_status,
+ .link_reset = ax88178_link_reset,
+ .reset = ax88178_link_reset,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+ FLAG_MULTI_PACKET,
+ .rx_fixup = asix_rx_fixup_common,
+ .tx_fixup = asix_tx_fixup,
+ .data = 0x20061201,
+};
+
static const struct usb_device_id products [] = {
{
// Linksys USB200M
@@ -1578,6 +1591,10 @@ static const struct usb_device_id products [] = {
// Linux Automation GmbH USB 10Base-T1L
USB_DEVICE(0x33f7, 0x0004),
.driver_info = (unsigned long) &lxausb_t1l_info,
+}, {
+ /* LyconSys FiberGecko 100 */
+ USB_DEVICE(0x1d2a, 0x0801),
+ .driver_info = (unsigned long) &lyconsys_fibergecko100_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index e47bb125048d..f613e4bc68c8 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -18,8 +18,8 @@
struct ax88172a_private {
struct mii_bus *mdio;
struct phy_device *phydev;
- char phy_name[20];
- u16 phy_addr;
+ char phy_name[PHY_ID_SIZE];
+ u8 phy_addr;
u16 oldmode;
int use_embdphy;
struct asix_rx_fixup_info rx_fixup_info;
@@ -210,7 +210,11 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
ret = asix_read_phy_addr(dev, priv->use_embdphy);
if (ret < 0)
goto free;
-
+ if (ret >= PHY_MAX_ADDR) {
+ netdev_err(dev->net, "Invalid PHY address %#x\n", ret);
+ ret = -ENODEV;
+ goto free;
+ }
priv->phy_addr = ret;
ax88172a_reset_phy(dev, priv->use_embdphy);
@@ -308,7 +312,7 @@ static int ax88172a_reset(struct usbnet *dev)
rx_ctl);
/* Connect to PHY */
- snprintf(priv->phy_name, 20, PHY_ID_FMT,
+ snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT,
priv->mdio->id, priv->phy_addr);
priv->phydev = phy_connect(dev->net, priv->phy_name,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index a6469235d904..a032c1ded406 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -783,6 +783,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Lenovo ThinkPad Hybrid USB-C with USB-A Dock (40af0135eu, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa359, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* Aquantia AQtion USB to 5GbE Controller (based on AQC111U) */
{
USB_DEVICE_AND_INTERFACE_INFO(AQUANTIA_VENDOR_ID, 0xc101,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index e13e4920ee9b..dbf01210b0e7 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -660,12 +660,12 @@ static const struct usb_device_id mbim_devs[] = {
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
- /* Telit FN990 */
+ /* Telit FN990A */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
- /* Telit FE990 */
+ /* Telit FE990A */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d5c47a2a62dc..34e82f1e37d9 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -833,8 +833,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
ctx->dev = dev;
- hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
+ hrtimer_setup(&ctx->tx_timer, &cdc_ncm_tx_timer_cb, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
tasklet_setup(&ctx->bh, cdc_ncm_txpath_bh);
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index a91bf9c7e31d..137adf6d5b08 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -627,7 +627,7 @@ static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
kfree(buf);
- return ret;
+ return ret < 0 ? ret : 0;
}
static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
@@ -658,7 +658,7 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
kfree(buf);
- return ret;
+ return ret < 0 ? ret : 0;
}
static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e9208a8d2bfa..b586b1c13a47 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1360,14 +1360,16 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
- {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
- {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990A */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10b0, 0)}, /* Telit FE990B */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c0, 0)}, /* Telit FE910C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c4, 0)}, /* Telit FE910C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c8, 0)}, /* Telit FE910C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10d0, 0)}, /* Telit FN990B */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 468c73974046..2cab046749a9 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -785,6 +785,7 @@ enum rtl8152_flags {
#define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3 0x3062
+#define DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK 0xa359
struct tally_counter {
__le64 tx_packets;
@@ -9787,6 +9788,7 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3:
case DEVICE_ID_THINKPAD_USB_C_DONGLE:
+ case DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK:
return 1;
}
} else if (vendor_id == VENDOR_ID_REALTEK && parent_vendor_id == VENDOR_ID_LENOVO) {
@@ -10064,6 +10066,8 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) },
{ USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) },
{ USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) },
+
+ /* Lenovo */
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x304f) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x3054) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) },
@@ -10074,11 +10078,14 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x721e) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0xa359) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0xa387) },
+
{ USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) },
{ USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
{ USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
{ USB_DEVICE(VENDOR_ID_DLINK, 0xb301) },
+ { USB_DEVICE(VENDOR_ID_DELL, 0xb097) },
{ USB_DEVICE(VENDOR_ID_ASUS, 0x1976) },
{}
};
diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c
index 20b2df8d74ae..8d860dacdf49 100644
--- a/drivers/net/usb/r8153_ecm.c
+++ b/drivers/net/usb/r8153_ecm.c
@@ -135,6 +135,12 @@ static const struct usb_device_id products[] = {
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&r8153_info,
},
+/* Lenovo ThinkPad Hybrid USB-C with USB-A Dock (40af0135eu, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_LENOVO, 0xa359, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&r8153_info,
+},
{ }, /* END */
};
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 01251868a9c2..7bb53961c0ea 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -17,6 +17,7 @@
#include <net/rtnetlink.h>
#include <net/dst.h>
+#include <net/netdev_lock.h>
#include <net/xfrm.h>
#include <net/xdp.h>
#include <linux/veth.h>
@@ -684,8 +685,7 @@ static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
void *skbs[VETH_XDP_BATCH];
int i;
- if (xdp_alloc_skb_bulk(skbs, n_xdpf,
- GFP_ATOMIC | __GFP_ZERO) < 0) {
+ if (unlikely(!napi_skb_cache_get_bulk(skbs, n_xdpf))) {
for (i = 0; i < n_xdpf; i++)
xdp_return_frame(frames[i]);
stats->rx_drops += n_xdpf;
@@ -1765,10 +1765,13 @@ static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
return 0;
}
-static int veth_newlink(struct net *peer_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int veth_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *peer_net = rtnl_newlink_peer_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
int err;
struct net_device *peer;
struct veth_priv *priv;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7646ddd9bef7..7e4617216a4b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -360,24 +360,7 @@ struct receive_queue {
struct xdp_buff **xsk_buffs;
};
-/* This structure can contain rss message with maximum settings for indirection table and keysize
- * Note, that default structure that describes RSS configuration virtio_net_rss_config
- * contains same info but can't handle table values.
- * In any case, structure would be passed to virtio hw through sg_buf split by parts
- * because table sizes may be differ according to the device configuration.
- */
#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
-struct virtio_net_ctrl_rss {
- u32 hash_types;
- u16 indirection_table_mask;
- u16 unclassified_queue;
- u16 hash_cfg_reserved; /* for HASH_CONFIG (see virtio_net_hash_config for details) */
- u16 max_tx_vq;
- u8 hash_key_length;
- u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
-
- u16 *indirection_table;
-};
/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {
@@ -421,7 +404,9 @@ struct virtnet_info {
u16 rss_indir_table_size;
u32 rss_hash_types_supported;
u32 rss_hash_types_saved;
- struct virtio_net_ctrl_rss rss;
+ struct virtio_net_rss_config_hdr *rss_hdr;
+ struct virtio_net_rss_config_trailer rss_trailer;
+ u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
/* Has control virtqueue */
bool has_cvq;
@@ -523,23 +508,16 @@ enum virtnet_xmit_type {
VIRTNET_XMIT_TYPE_XSK,
};
-static int rss_indirection_table_alloc(struct virtio_net_ctrl_rss *rss, u16 indir_table_size)
+static size_t virtnet_rss_hdr_size(const struct virtnet_info *vi)
{
- if (!indir_table_size) {
- rss->indirection_table = NULL;
- return 0;
- }
-
- rss->indirection_table = kmalloc_array(indir_table_size, sizeof(u16), GFP_KERNEL);
- if (!rss->indirection_table)
- return -ENOMEM;
+ u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1;
- return 0;
+ return struct_size(vi->rss_hdr, indirection_table, indir_table_size);
}
-static void rss_indirection_table_free(struct virtio_net_ctrl_rss *rss)
+static size_t virtnet_rss_trailer_size(const struct virtnet_info *vi)
{
- kfree(rss->indirection_table);
+ return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size);
}
/* We use the last two bits of the pointer to distinguish the xmit type. */
@@ -1088,11 +1066,10 @@ static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
return false;
}
-static void check_sq_full_and_disable(struct virtnet_info *vi,
- struct net_device *dev,
- struct send_queue *sq)
+static bool tx_may_stop(struct virtnet_info *vi,
+ struct net_device *dev,
+ struct send_queue *sq)
{
- bool use_napi = sq->napi.weight;
int qnum;
qnum = sq - vi->sq;
@@ -1114,6 +1091,25 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.stop);
u64_stats_update_end(&sq->stats.syncp);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void check_sq_full_and_disable(struct virtnet_info *vi,
+ struct net_device *dev,
+ struct send_queue *sq)
+{
+ bool use_napi = sq->napi.weight;
+ int qnum;
+
+ qnum = sq - vi->sq;
+
+ if (tx_may_stop(vi, dev, sq)) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
+
if (use_napi) {
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
virtqueue_napi_schedule(&sq->napi, sq->vq);
@@ -2789,7 +2785,8 @@ static void skb_recv_done(struct virtqueue *rvq)
virtqueue_napi_schedule(&rq->napi, rvq);
}
-static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
+static void virtnet_napi_do_enable(struct virtqueue *vq,
+ struct napi_struct *napi)
{
napi_enable(napi);
@@ -2802,10 +2799,21 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
local_bh_enable();
}
-static void virtnet_napi_tx_enable(struct virtnet_info *vi,
- struct virtqueue *vq,
- struct napi_struct *napi)
+static void virtnet_napi_enable(struct receive_queue *rq)
{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ int qidx = vq2rxq(rq->vq);
+
+ virtnet_napi_do_enable(rq->vq, &rq->napi);
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi);
+}
+
+static void virtnet_napi_tx_enable(struct send_queue *sq)
+{
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct napi_struct *napi = &sq->napi;
+ int qidx = vq2txq(sq->vq);
+
if (!napi->weight)
return;
@@ -2817,13 +2825,30 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi,
return;
}
- return virtnet_napi_enable(vq, napi);
+ virtnet_napi_do_enable(sq->vq, napi);
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi);
}
-static void virtnet_napi_tx_disable(struct napi_struct *napi)
+static void virtnet_napi_tx_disable(struct send_queue *sq)
{
- if (napi->weight)
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct napi_struct *napi = &sq->napi;
+ int qidx = vq2txq(sq->vq);
+
+ if (napi->weight) {
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL);
napi_disable(napi);
+ }
+}
+
+static void virtnet_napi_disable(struct receive_queue *rq)
+{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ struct napi_struct *napi = &rq->napi;
+ int qidx = vq2rxq(rq->vq);
+
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL);
+ napi_disable(napi);
}
static void refill_work(struct work_struct *work)
@@ -2836,9 +2861,23 @@ static void refill_work(struct work_struct *work)
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
+ /*
+ * When queue API support is added in the future and the call
+ * below becomes napi_disable_locked, this driver will need to
+ * be refactored.
+ *
+ * One possible solution would be to:
+ * - cancel refill_work with cancel_delayed_work (note:
+ * non-sync)
+ * - cancel refill_work with cancel_delayed_work_sync in
+ * virtnet_remove after the netdev is unregistered
+ * - wrap all of the work in a lock (perhaps the netdev
+ * instance lock)
+ * - check netif_running() and return early to avoid a race
+ */
napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq->vq, &rq->napi);
+ virtnet_napi_do_enable(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -3035,8 +3074,8 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{
- virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
- napi_disable(&vi->rq[qp_index].napi);
+ virtnet_napi_tx_disable(&vi->sq[qp_index]);
+ virtnet_napi_disable(&vi->rq[qp_index]);
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
}
@@ -3055,8 +3094,8 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
if (err < 0)
goto err_xdp_reg_mem_model;
- virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
- virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
+ virtnet_napi_enable(&vi->rq[qp_index]);
+ virtnet_napi_tx_enable(&vi->sq[qp_index]);
return 0;
@@ -3253,15 +3292,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
bool use_napi = sq->napi.weight;
bool kick;
- /* Free up any pending old buffers before queueing new ones. */
- do {
- if (use_napi)
- virtqueue_disable_cb(sq->vq);
-
+ if (!use_napi)
free_old_xmit(sq, txq, false);
-
- } while (use_napi && !xmit_more &&
- unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
+ else
+ virtqueue_disable_cb(sq->vq);
/* timestamp packet in software */
skb_tx_timestamp(skb);
@@ -3287,7 +3321,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset_ct(skb);
}
- check_sq_full_and_disable(vi, dev, sq);
+ if (use_napi)
+ tx_may_stop(vi, dev, sq);
+ else
+ check_sq_full_and_disable(vi, dev,sq);
kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) :
!xmit_more || netif_xmit_stopped(txq);
@@ -3299,6 +3336,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ if (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+
return NETDEV_TX_OK;
}
@@ -3307,7 +3347,7 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
bool running = netif_running(vi->dev);
if (running) {
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq);
virtnet_cancel_dim(vi, &rq->dim);
}
}
@@ -3320,7 +3360,7 @@ static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
schedule_delayed_work(&vi->refill, 0);
if (running)
- virtnet_napi_enable(rq->vq, &rq->napi);
+ virtnet_napi_enable(rq);
}
static int virtnet_rx_resize(struct virtnet_info *vi,
@@ -3349,7 +3389,7 @@ static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
qindex = sq - vi->sq;
if (running)
- virtnet_napi_tx_disable(&sq->napi);
+ virtnet_napi_tx_disable(sq);
txq = netdev_get_tx_queue(vi->dev, qindex);
@@ -3383,7 +3423,7 @@ static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
__netif_tx_unlock_bh(txq);
if (running)
- virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+ virtnet_napi_tx_enable(sq);
}
static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
@@ -3576,15 +3616,16 @@ static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pair
for (; i < vi->rss_indir_table_size; ++i) {
indir_val = ethtool_rxfh_indir_default(i, queue_pairs);
- vi->rss.indirection_table[i] = indir_val;
+ vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val);
}
- vi->rss.max_tx_vq = queue_pairs;
+ vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs);
}
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
- struct virtio_net_ctrl_rss old_rss;
+ struct virtio_net_rss_config_hdr *old_rss_hdr;
+ struct virtio_net_rss_config_trailer old_rss_trailer;
struct net_device *dev = vi->dev;
struct scatterlist sg;
@@ -3599,24 +3640,28 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
* update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly.
*/
if (vi->has_rss && !netif_is_rxfh_configured(dev)) {
- memcpy(&old_rss, &vi->rss, sizeof(old_rss));
- if (rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size)) {
- vi->rss.indirection_table = old_rss.indirection_table;
+ old_rss_hdr = vi->rss_hdr;
+ old_rss_trailer = vi->rss_trailer;
+ vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
+ if (!vi->rss_hdr) {
+ vi->rss_hdr = old_rss_hdr;
return -ENOMEM;
}
+ *vi->rss_hdr = *old_rss_hdr;
virtnet_rss_update_by_qpairs(vi, queue_pairs);
if (!virtnet_commit_rss_command(vi)) {
/* restore ctrl_rss if commit_rss_command failed */
- rss_indirection_table_free(&vi->rss);
- memcpy(&vi->rss, &old_rss, sizeof(old_rss));
+ devm_kfree(&dev->dev, vi->rss_hdr);
+ vi->rss_hdr = old_rss_hdr;
+ vi->rss_trailer = old_rss_trailer;
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n",
queue_pairs);
return -EINVAL;
}
- rss_indirection_table_free(&old_rss);
+ devm_kfree(&dev->dev, old_rss_hdr);
goto succ;
}
@@ -3826,7 +3871,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
cpumask_var_t mask;
int stragglers;
int group_size;
- int i, j, cpu;
+ int i, start = 0, cpu;
int num_cpu;
int stride;
@@ -3840,16 +3885,18 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
stragglers = num_cpu >= vi->curr_queue_pairs ?
num_cpu % vi->curr_queue_pairs :
0;
- cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < vi->curr_queue_pairs; i++) {
group_size = stride + (i < stragglers ? 1 : 0);
- for (j = 0; j < group_size; j++) {
+ for_each_online_cpu_wrap(cpu, start) {
+ if (!group_size--) {
+ start = cpu;
+ break;
+ }
cpumask_set_cpu(cpu, mask);
- cpu = cpumask_next_wrap(cpu, cpu_online_mask,
- nr_cpu_ids, false);
}
+
virtqueue_set_affinity(vi->rq[i].vq, mask);
virtqueue_set_affinity(vi->sq[i].vq, mask);
__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
@@ -4059,28 +4106,12 @@ static int virtnet_set_ringparam(struct net_device *dev,
static bool virtnet_commit_rss_command(struct virtnet_info *vi)
{
struct net_device *dev = vi->dev;
- struct scatterlist sgs[4];
- unsigned int sg_buf_size;
+ struct scatterlist sgs[2];
/* prepare sgs */
- sg_init_table(sgs, 4);
-
- sg_buf_size = offsetof(struct virtio_net_ctrl_rss, hash_cfg_reserved);
- sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
-
- if (vi->has_rss) {
- sg_buf_size = sizeof(uint16_t) * vi->rss_indir_table_size;
- sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
- } else {
- sg_set_buf(&sgs[1], &vi->rss.hash_cfg_reserved, sizeof(uint16_t));
- }
-
- sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
- - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
- sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size);
-
- sg_buf_size = vi->rss_key_size;
- sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size);
+ sg_init_table(sgs, 2);
+ sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi));
+ sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
@@ -4097,17 +4128,17 @@ err:
static void virtnet_init_default_rss(struct virtnet_info *vi)
{
- vi->rss.hash_types = vi->rss_hash_types_supported;
+ vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported);
vi->rss_hash_types_saved = vi->rss_hash_types_supported;
- vi->rss.indirection_table_mask = vi->rss_indir_table_size
- ? vi->rss_indir_table_size - 1 : 0;
- vi->rss.unclassified_queue = 0;
+ vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size
+ ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0;
+ vi->rss_hdr->unclassified_queue = 0;
virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs);
- vi->rss.hash_key_length = vi->rss_key_size;
+ vi->rss_trailer.hash_key_length = vi->rss_key_size;
- netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
+ netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size);
}
static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
@@ -4218,7 +4249,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *
if (new_hashtypes != vi->rss_hash_types_saved) {
vi->rss_hash_types_saved = new_hashtypes;
- vi->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
if (vi->dev->features & NETIF_F_RXHASH)
return virtnet_commit_rss_command(vi);
}
@@ -5398,11 +5429,11 @@ static int virtnet_get_rxfh(struct net_device *dev,
if (rxfh->indir) {
for (i = 0; i < vi->rss_indir_table_size; ++i)
- rxfh->indir[i] = vi->rss.indirection_table[i];
+ rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]);
}
if (rxfh->key)
- memcpy(rxfh->key, vi->rss.key, vi->rss_key_size);
+ memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size);
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -5426,7 +5457,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
return -EOPNOTSUPP;
for (i = 0; i < vi->rss_indir_table_size; ++i)
- vi->rss.indirection_table[i] = rxfh->indir[i];
+ vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]);
update = true;
}
@@ -5438,7 +5469,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
if (!vi->has_rss && !vi->has_rss_hash_report)
return -EOPNOTSUPP;
- memcpy(vi->rss.key, rxfh->key, vi->rss_key_size);
+ memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size);
update = true;
}
@@ -5615,8 +5646,11 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
- if (netif_running(vi->dev))
+ if (netif_running(vi->dev)) {
+ rtnl_lock();
virtnet_close(vi->dev);
+ rtnl_unlock();
+ }
}
static int init_vqs(struct virtnet_info *vi);
@@ -5636,7 +5670,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
enable_rx_mode_work(vi);
if (netif_running(vi->dev)) {
+ rtnl_lock();
err = virtnet_open(vi->dev);
+ rtnl_unlock();
if (err)
return err;
}
@@ -5926,8 +5962,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
+ virtnet_napi_disable(&vi->rq[i]);
+ virtnet_napi_tx_disable(&vi->sq[i]);
}
}
@@ -5964,9 +6000,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (old_prog)
bpf_prog_put(old_prog);
if (netif_running(dev)) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
+ virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_tx_enable(&vi->sq[i]);
}
}
@@ -5981,9 +6016,8 @@ err:
if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
+ virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_tx_enable(&vi->sq[i]);
}
}
if (prog)
@@ -6044,9 +6078,9 @@ static int virtnet_set_features(struct net_device *dev,
if ((dev->features ^ features) & NETIF_F_RXHASH) {
if (features & NETIF_F_RXHASH)
- vi->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
else
- vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+ vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE);
if (!virtnet_commit_rss_command(vi))
return -EINVAL;
@@ -6390,8 +6424,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
INIT_DELAYED_WORK(&vi->refill, refill_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].pages = NULL;
- netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
- napi_weight);
+ netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
+ i);
+ vi->rq[i].napi.weight = napi_weight;
netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
virtnet_poll_tx,
napi_tx ? napi_weight : 0);
@@ -6735,9 +6770,11 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_cread16(vdev, offsetof(struct virtio_net_config,
rss_max_indirection_table_length));
}
- err = rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size);
- if (err)
+ vi->rss_hdr = devm_kzalloc(&vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
+ if (!vi->rss_hdr) {
+ err = -ENOMEM;
goto free;
+ }
if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_key_size =
@@ -7016,8 +7053,6 @@ static void virtnet_remove(struct virtio_device *vdev)
remove_vq_common(vi);
- rss_indirection_table_free(&vi->rss);
-
free_netdev(vi->dev);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 6793fa09f9d1..3df6aabc7e33 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2033,6 +2033,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
rq->comp_ring.gen = VMXNET3_INIT_GEN;
rq->comp_ring.next2proc = 0;
+
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ page_pool_destroy(rq->page_pool);
+ rq->page_pool = NULL;
}
@@ -2073,11 +2078,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
}
}
- if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- page_pool_destroy(rq->page_pool);
- rq->page_pool = NULL;
-
if (rq->data_ring.base) {
dma_free_coherent(&adapter->pdev->dev,
rq->rx_ring[0].size * rq->data_ring.desc_size,
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index ca81b212a246..7168b33adadb 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -34,6 +34,7 @@
#include <net/addrconf.h>
#include <net/l3mdev.h>
#include <net/fib_rules.h>
+#include <net/netdev_lock.h>
#include <net/sch_generic.h>
#include <net/netns/generic.h>
#include <net/netfilter/nf_conntrack.h>
@@ -1537,14 +1538,12 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
nlmsg_end(skb, nlh);
- /* fib_nl_{new,del}rule handling looks for net from skb->sk */
- skb->sk = dev_net(dev)->rtnl;
if (add_it) {
- err = fib_nl_newrule(skb, nlh, NULL);
+ err = fib_newrule(dev_net(dev), skb, nlh, NULL, true);
if (err == -EEXIST)
err = 0;
} else {
- err = fib_nl_delrule(skb, nlh, NULL);
+ err = fib_delrule(dev_net(dev), skb, nlh, NULL, true);
if (err == -ENOENT)
err = 0;
}
@@ -1619,7 +1618,7 @@ static void vrf_setup(struct net_device *dev)
dev->lltx = true;
/* don't allow vrf devices to change network namespaces. */
- dev->netns_local = true;
+ dev->netns_immutable = true;
/* does not make sense for a VLAN to be added to a vrf device */
dev->features |= NETIF_F_VLAN_CHALLENGED;
@@ -1677,11 +1676,12 @@ static void vrf_dellink(struct net_device *dev, struct list_head *head)
unregister_netdevice_queue(dev, head);
}
-static int vrf_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int vrf_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct net_vrf *vrf = netdev_priv(dev);
+ struct nlattr **data = params->data;
struct netns_vrf *nn_vrf;
bool *add_fib_rules;
struct net *net;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 92516189e792..8c49e903cb3a 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -25,6 +25,7 @@
#include <net/inet_ecn.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/netdev_lock.h>
#include <net/tun_proto.h>
#include <net/vxlan.h>
#include <net/nexthop.h>
@@ -227,9 +228,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
be32_to_cpu(fdb->vni)))
goto nla_put_failure;
- ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
+ ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used));
ci.ndm_confirmed = 0;
- ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
+ ci.ndm_updated = jiffies_to_clock_t(now - READ_ONCE(fdb->updated));
ci.ndm_refcnt = 0;
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
@@ -434,8 +435,12 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
struct vxlan_fdb *f;
f = __vxlan_find_mac(vxlan, mac, vni);
- if (f && f->used != jiffies)
- f->used = jiffies;
+ if (f) {
+ unsigned long now = jiffies;
+
+ if (READ_ONCE(f->used) != now)
+ WRITE_ONCE(f->used, now);
+ }
return f;
}
@@ -1009,12 +1014,10 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
!(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
if (f->state != state) {
f->state = state;
- f->updated = jiffies;
notify = 1;
}
if (f->flags != fdb_flags) {
f->flags = fdb_flags;
- f->updated = jiffies;
notify = 1;
}
}
@@ -1048,12 +1051,13 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
}
if (ndm_flags & NTF_USE)
- f->used = jiffies;
+ WRITE_ONCE(f->updated, jiffies);
if (notify) {
if (rd == NULL)
rd = first_remote_rtnl(f);
+ WRITE_ONCE(f->updated, jiffies);
err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH,
swdev_notify, extack);
if (err)
@@ -1292,7 +1296,7 @@ int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
struct vxlan_fdb *f;
int err = -ENOENT;
- f = vxlan_find_mac(vxlan, addr, src_vni);
+ f = __vxlan_find_mac(vxlan, addr, src_vni);
if (!f)
return err;
@@ -1459,9 +1463,13 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
ifindex = src_ifindex;
#endif
- f = vxlan_find_mac(vxlan, src_mac, vni);
+ f = __vxlan_find_mac(vxlan, src_mac, vni);
if (likely(f)) {
struct vxlan_rdst *rdst = first_remote_rcu(f);
+ unsigned long now = jiffies;
+
+ if (READ_ONCE(f->updated) != now)
+ WRITE_ONCE(f->updated, now);
if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) &&
rdst->remote_ifindex == ifindex))
@@ -1481,7 +1489,6 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
src_mac, &rdst->remote_ip.sa, &src_ip->sa);
rdst->remote_ip = *src_ip;
- f->updated = jiffies;
vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
} else {
u32 hash_index = fdb_head_index(vxlan, src_mac, vni);
@@ -1664,7 +1671,6 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
return err <= 1;
}
-/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_vni_node *vninode = NULL;
@@ -1834,7 +1840,6 @@ drop:
return 0;
}
-/* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */
static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_dev *vxlan;
@@ -2852,7 +2857,7 @@ static void vxlan_cleanup(struct timer_list *t)
if (f->flags & NTF_EXT_LEARNED)
continue;
- timeout = f->used + vxlan->cfg.age_interval * HZ;
+ timeout = READ_ONCE(f->updated) + vxlan->cfg.age_interval * HZ;
if (time_before_eq(timeout, jiffies)) {
netdev_dbg(vxlan->dev,
"garbage collect %pM\n",
@@ -3932,7 +3937,7 @@ static void vxlan_config_apply(struct net_device *dev,
}
static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
- struct vxlan_config *conf, bool changelink,
+ struct vxlan_config *conf,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -3943,7 +3948,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
if (ret)
return ret;
- vxlan_config_apply(dev, conf, lowerdev, src_net, changelink);
+ vxlan_config_apply(dev, conf, lowerdev, src_net, false);
return 0;
}
@@ -3961,7 +3966,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
int err;
dst = &vxlan->default_dst;
- err = vxlan_dev_configure(net, dev, conf, false, extack);
+ err = vxlan_dev_configure(net, dev, conf, extack);
if (err)
return err;
@@ -4396,10 +4401,13 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static int vxlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int vxlan_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct vxlan_config conf;
int err;
@@ -4407,7 +4415,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (err)
return err;
- return __vxlan_dev_create(src_net, dev, &conf, extack);
+ return __vxlan_dev_create(link_net, dev, &conf, extack);
}
static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -4415,6 +4423,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ bool rem_ip_changed, change_igmp;
struct net_device *lowerdev;
struct vxlan_config conf;
struct vxlan_rdst *dst;
@@ -4438,8 +4447,13 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (err)
return err;
+ rem_ip_changed = !vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip);
+ change_igmp = vxlan->dev->flags & IFF_UP &&
+ (rem_ip_changed ||
+ dst->remote_ifindex != conf.remote_ifindex);
+
/* handle default dst entry */
- if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
+ if (rem_ip_changed) {
u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
@@ -4483,6 +4497,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
}
}
+ if (change_igmp && vxlan_addr_multicast(&dst->remote_ip))
+ err = vxlan_multicast_leave(vxlan);
+
if (conf.age_interval != vxlan->cfg.age_interval)
mod_timer(&vxlan->age_timer, jiffies);
@@ -4490,7 +4507,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (lowerdev && lowerdev != dst->remote_dev)
dst->remote_dev = lowerdev;
vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
- return 0;
+
+ if (!err && change_igmp &&
+ vxlan_addr_multicast(&dst->remote_ip))
+ err = vxlan_multicast_join(vxlan);
+
+ return err;
}
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
@@ -4768,7 +4790,7 @@ vxlan_fdb_offloaded_set(struct net_device *dev,
spin_lock_bh(&vxlan->hash_lock[hash_index]);
- f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ f = __vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
goto out;
@@ -4824,7 +4846,7 @@ vxlan_fdb_external_learn_del(struct net_device *dev,
hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
- f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ f = __vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
err = -ENOENT;
else if (f->flags & NTF_EXT_LEARNED)
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 6cf173a008e7..c496d35b266d 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -307,14 +307,15 @@ static void wg_setup(struct net_device *dev)
wg->dev = dev;
}
-static int wg_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int wg_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct wg_device *wg = netdev_priv(dev);
int ret = -ENOMEM;
- rcu_assign_pointer(wg->creating_net, src_net);
+ rcu_assign_pointer(wg->creating_net, link_net);
init_rwsem(&wg->static_identity.lock);
mutex_init(&wg->socket_update_lock);
mutex_init(&wg->device_update_lock);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index b3294287bce1..6d336e39d673 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1163,9 +1163,12 @@ int ath10k_core_check_dt(struct ath10k *ar)
if (!node)
return -ENOENT;
- of_property_read_string(node, "qcom,ath10k-calibration-variant",
+ of_property_read_string(node, "qcom,calibration-variant",
&variant);
if (!variant)
+ of_property_read_string(node, "qcom,ath10k-calibration-variant",
+ &variant);
+ if (!variant)
return -ENODATA;
if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0)
@@ -2259,7 +2262,9 @@ static int ath10k_core_pre_cal_download(struct ath10k *ar)
"boot did not find a pre calibration file, try DT next: %d\n",
ret);
- ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
+ ret = ath10k_download_cal_dt(ar, "qcom,pre-calibration-data");
+ if (ret == -ENOENT)
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
if (ret) {
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"unable to load pre cal data from DT: %d\n", ret);
@@ -2337,7 +2342,9 @@ static int ath10k_download_cal_data(struct ath10k *ar)
"boot did not find a calibration file, try DT next: %d\n",
ret);
- ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
+ ret = ath10k_download_cal_dt(ar, "qcom,calibration-data");
+ if (ret == -ENOENT)
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_DT;
goto done;
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
index 43d2d8ddcdc0..d9092414b362 100644
--- a/drivers/net/wireless/ath/ath11k/Makefile
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -27,6 +27,7 @@ ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
ath11k-$(CONFIG_THERMAL) += thermal.o
ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
ath11k-$(CONFIG_PM) += wow.o
+ath11k-$(CONFIG_DEV_COREDUMP) += coredump.o
obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o
ath11k_ahb-y += ahb.o
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index f2fc04596d48..eedba3766ba2 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -1290,6 +1290,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev)
ath11k_core_deinit(ab);
qmi_fail:
+ ath11k_fw_destroy(ab);
ath11k_ahb_free_resources(ab);
}
@@ -1309,6 +1310,7 @@ static void ath11k_ahb_shutdown(struct platform_device *pdev)
ath11k_core_deinit(ab);
free_resources:
+ ath11k_fw_destroy(ab);
ath11k_ahb_free_resources(ab);
}
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index c576bbba52bf..3d39ff85ba94 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -1175,9 +1175,12 @@ int ath11k_core_check_dt(struct ath11k_base *ab)
if (!node)
return -ENOENT;
- of_property_read_string(node, "qcom,ath11k-calibration-variant",
+ of_property_read_string(node, "qcom,calibration-variant",
&variant);
if (!variant)
+ of_property_read_string(node, "qcom,ath11k-calibration-variant",
+ &variant);
+ if (!variant)
return -ENODATA;
if (strscpy(ab->qmi.target.bdf_ext, variant, max_len) < 0)
@@ -2056,6 +2059,7 @@ void ath11k_core_halt(struct ath11k *ar)
ath11k_mac_scan_finish(ar);
ath11k_mac_peer_cleanup_all(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->channel_update_work);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ab->update_11d_work);
@@ -2257,6 +2261,7 @@ static void ath11k_core_reset(struct work_struct *work)
reinit_completion(&ab->recovery_start);
atomic_set(&ab->recovery_start_count, 0);
+ ath11k_coredump_collect(ab);
ath11k_core_pre_reconfigure_recovery(ab);
reinit_completion(&ab->reconfigure_complete);
@@ -2346,7 +2351,6 @@ void ath11k_core_deinit(struct ath11k_base *ab)
ath11k_hif_power_down(ab);
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
- ath11k_fw_destroy(ab);
}
EXPORT_SYMBOL(ath11k_core_deinit);
@@ -2393,6 +2397,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
INIT_WORK(&ab->restart_work, ath11k_core_restart);
INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
INIT_WORK(&ab->reset_work, ath11k_core_reset);
+ INIT_WORK(&ab->dump_work, ath11k_coredump_upload);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index a9dc7fe7765a..1a3d0de4afde 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_CORE_H
@@ -32,6 +32,7 @@
#include "spectral.h"
#include "wow.h"
#include "fw.h"
+#include "coredump.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -370,6 +371,7 @@ struct ath11k_vif {
struct ieee80211_vif *vif;
struct wmi_wmm_params_all_arg wmm_params;
+ struct wmi_wmm_params_all_arg muedca_params;
struct list_head list;
union {
struct {
@@ -685,7 +687,7 @@ struct ath11k {
struct mutex conf_mutex;
/* protects the radio specific data like debug stats, ppdu_stats_info stats,
* vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info,
- * channel context data, survey info, test mode data.
+ * channel context data, survey info, test mode data, channel_update_queue.
*/
spinlock_t data_lock;
@@ -743,6 +745,9 @@ struct ath11k {
struct completion bss_survey_done;
struct work_struct regd_update_work;
+ struct work_struct channel_update_work;
+ /* protected with data_lock */
+ struct list_head channel_update_queue;
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
@@ -900,6 +905,10 @@ struct ath11k_base {
/* HW channel counters frequency value in hertz common to all MACs */
u32 cc_freq_hz;
+ struct ath11k_dump_file_data *dump_data;
+ size_t ath11k_coredump_len;
+ struct work_struct dump_work;
+
struct ath11k_htc htc;
struct ath11k_dp dp;
diff --git a/drivers/net/wireless/ath/ath11k/coredump.c b/drivers/net/wireless/ath/ath11k/coredump.c
new file mode 100644
index 000000000000..b8bad358cebe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/coredump.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/devcoredump.h>
+#include "hif.h"
+#include "coredump.h"
+#include "debug.h"
+
+enum
+ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type)
+{
+ enum ath11k_fw_crash_dump_type dump_type;
+
+ switch (type) {
+ case HOST_DDR_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_REMOTE_MEM_DATA;
+ break;
+ case M3_DUMP_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_M3_DUMP;
+ break;
+ case PAGEABLE_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_PAGEABLE_DATA;
+ break;
+ case BDF_MEM_REGION_TYPE:
+ case CALDB_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_NONE;
+ break;
+ default:
+ dump_type = FW_CRASH_DUMP_TYPE_MAX;
+ break;
+ }
+
+ return dump_type;
+}
+EXPORT_SYMBOL(ath11k_coredump_get_dump_type);
+
+void ath11k_coredump_upload(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, dump_work);
+
+ ath11k_info(ab, "Uploading coredump\n");
+ /* dev_coredumpv() takes ownership of the buffer */
+ dev_coredumpv(ab->dev, ab->dump_data, ab->ath11k_coredump_len, GFP_KERNEL);
+ ab->dump_data = NULL;
+}
+
+void ath11k_coredump_collect(struct ath11k_base *ab)
+{
+ ath11k_hif_coredump_download(ab);
+}
diff --git a/drivers/net/wireless/ath/ath11k/coredump.h b/drivers/net/wireless/ath/ath11k/coredump.h
new file mode 100644
index 000000000000..3960d9385261
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/coredump.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _ATH11K_COREDUMP_H_
+#define _ATH11K_COREDUMP_H_
+
+#define ATH11K_FW_CRASH_DUMP_V2 2
+
+enum ath11k_fw_crash_dump_type {
+ FW_CRASH_DUMP_PAGING_DATA,
+ FW_CRASH_DUMP_RDDM_DATA,
+ FW_CRASH_DUMP_REMOTE_MEM_DATA,
+ FW_CRASH_DUMP_PAGEABLE_DATA,
+ FW_CRASH_DUMP_M3_DUMP,
+ FW_CRASH_DUMP_NONE,
+
+ /* keep last */
+ FW_CRASH_DUMP_TYPE_MAX,
+};
+
+#define COREDUMP_TLV_HDR_SIZE 8
+
+struct ath11k_tlv_dump_data {
+ /* see ath11k_fw_crash_dump_type above */
+ __le32 type;
+
+ /* in bytes */
+ __le32 tlv_len;
+
+ /* pad to 32-bit boundaries as needed */
+ u8 tlv_data[];
+} __packed;
+
+struct ath11k_dump_file_data {
+ /* "ATH11K-FW-DUMP" */
+ char df_magic[16];
+ /* total dump len in bytes */
+ __le32 len;
+ /* file dump version */
+ __le32 version;
+ /* pci device id */
+ __le32 chip_id;
+ /* qrtr instance id */
+ __le32 qrtr_id;
+ /* pci domain id */
+ __le32 bus_id;
+ guid_t guid;
+ /* time-of-day stamp */
+ __le64 tv_sec;
+ /* time-of-day stamp, nano-seconds */
+ __le64 tv_nsec;
+ /* room for growth w/out changing binary format */
+ u8 unused[128];
+ u8 data[];
+} __packed;
+
+#ifdef CONFIG_DEV_COREDUMP
+enum ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type);
+void ath11k_coredump_upload(struct work_struct *work);
+void ath11k_coredump_collect(struct ath11k_base *ab);
+#else
+static inline enum
+ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type)
+{
+ return FW_CRASH_DUMP_TYPE_MAX;
+}
+
+static inline void ath11k_coredump_upload(struct work_struct *work)
+{
+}
+
+static inline void ath11k_coredump_collect(struct ath11k_base *ab)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index fbf666d0ecf1..f124b7329e1a 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -104,14 +104,12 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
if (!ring->vaddr_unaligned)
return;
- if (ring->cached) {
- dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size,
- DMA_FROM_DEVICE);
- kfree(ring->vaddr_unaligned);
- } else {
+ if (ring->cached)
+ dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned,
+ ring->paddr_unaligned, DMA_FROM_DEVICE);
+ else
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
- }
ring->vaddr_unaligned = NULL;
}
@@ -249,25 +247,14 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
default:
cached = false;
}
-
- if (cached) {
- ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
- if (!ring->vaddr_unaligned)
- return -ENOMEM;
-
- ring->paddr_unaligned = dma_map_single(ab->dev,
- ring->vaddr_unaligned,
- ring->size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) {
- kfree(ring->vaddr_unaligned);
- ring->vaddr_unaligned = NULL;
- return -ENOMEM;
- }
- }
}
- if (!cached)
+ if (cached)
+ ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size,
+ &ring->paddr_unaligned,
+ DMA_FROM_DEVICE,
+ GFP_KERNEL);
+ else
ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
&ring->paddr_unaligned,
GFP_KERNEL);
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index f777314db8b3..7a55afd33be8 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_H
@@ -20,7 +20,6 @@ struct ath11k_ext_irq_grp;
struct dp_rx_tid {
u8 tid;
- u32 *vaddr;
dma_addr_t paddr;
u32 size;
u32 ba_win_sz;
@@ -37,6 +36,9 @@ struct dp_rx_tid {
/* Timer info related to fragments */
struct timer_list frag_timer;
struct ath11k_base *ab;
+ u32 *vaddr_unaligned;
+ dma_addr_t paddr_unaligned;
+ u32 unaligned_size;
};
#define DP_REO_DESC_FREE_THRESHOLD 64
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 029ecf51c9ef..f2bdbac2a0b7 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -675,11 +675,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
list_del(&cmd->list);
rx_tid = &cmd->data;
- if (rx_tid->vaddr) {
- dma_unmap_single(ab->dev, rx_tid->paddr,
- rx_tid->size, DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
kfree(cmd);
}
@@ -689,11 +689,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
rx_tid = &cmd_cache->data;
- if (rx_tid->vaddr) {
- dma_unmap_single(ab->dev, rx_tid->paddr,
- rx_tid->size, DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
kfree(cmd_cache);
}
@@ -708,11 +708,11 @@ static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
if (status != HAL_REO_CMD_SUCCESS)
ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
- if (rx_tid->vaddr) {
- dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
}
@@ -749,10 +749,10 @@ static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
if (ret) {
ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
rx_tid->tid, ret);
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
}
@@ -802,10 +802,10 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
return;
free_desc:
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
void ath11k_peer_rx_tid_delete(struct ath11k *ar,
@@ -831,14 +831,16 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar,
if (ret != -ESHUTDOWN)
ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
- dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
rx_tid->paddr = 0;
+ rx_tid->paddr_unaligned = 0;
rx_tid->size = 0;
+ rx_tid->unaligned_size = 0;
}
static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
@@ -982,10 +984,9 @@ static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
if (!rx_tid->active)
goto unlock_exit;
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
rx_tid->active = false;
@@ -1000,9 +1001,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
struct dp_rx_tid *rx_tid;
- u32 hw_desc_sz;
- u32 *addr_aligned;
- void *vaddr;
+ u32 hw_desc_sz, *vaddr;
+ void *vaddr_unaligned;
dma_addr_t paddr;
int ret;
@@ -1050,37 +1050,34 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
else
hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
- vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
- if (!vaddr) {
+ rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1;
+ vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr,
+ DMA_BIDIRECTIONAL, GFP_ATOMIC);
+ if (!vaddr_unaligned) {
spin_unlock_bh(&ab->base_lock);
return -ENOMEM;
}
- addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
-
- ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
- ssn, pn_type);
-
- paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
- DMA_BIDIRECTIONAL);
-
- ret = dma_mapping_error(ab->dev, paddr);
- if (ret) {
- spin_unlock_bh(&ab->base_lock);
- ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
- peer_mac, tid, ret);
- goto err_mem_free;
- }
-
- rx_tid->vaddr = vaddr;
- rx_tid->paddr = paddr;
+ rx_tid->vaddr_unaligned = vaddr_unaligned;
+ vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN);
+ rx_tid->paddr_unaligned = paddr;
+ rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr -
+ (unsigned long)rx_tid->vaddr_unaligned);
+ ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
rx_tid->size = hw_desc_sz;
rx_tid->active = true;
+ /* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup.
+ * Since these changes are not reflected in the device, driver now needs to
+ * explicitly call dma_sync_single_for_device.
+ */
+ dma_sync_single_for_device(ab->dev, rx_tid->paddr,
+ rx_tid->size,
+ DMA_TO_DEVICE);
spin_unlock_bh(&ab->base_lock);
- ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
- paddr, tid, 1, ba_win_sz);
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr,
+ tid, 1, ba_win_sz);
if (ret) {
ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
peer_mac, tid, ret);
@@ -1088,12 +1085,6 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
}
return ret;
-
-err_mem_free:
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
-
- return ret;
}
int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
@@ -2831,8 +2822,6 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
rx_stats->dcm_count += ppdu_info->dcm;
rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
- arsta->rssi_comb = ppdu_info->rssi_comb;
-
BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
@@ -4783,7 +4772,7 @@ u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
if (!msdu) {
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"msdu_pop: invalid buf_id %d\n", buf_id);
- break;
+ goto next_msdu;
}
rxcb = ATH11K_SKB_RXCB(msdu);
if (!rxcb->unmapped) {
@@ -5148,7 +5137,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
const struct ath11k_hw_hal_params *hal_params;
void *ring_entry;
- void *mon_dst_srng;
+ struct hal_srng *mon_dst_srng;
u32 ppdu_id;
u32 rx_bufs_used;
u32 ring_id;
@@ -5165,6 +5154,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
spin_lock_bh(&pmon->mon_lock);
+ spin_lock_bh(&mon_dst_srng->lock);
ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
ppdu_id = pmon->mon_ppdu_info.ppdu_id;
@@ -5223,6 +5213,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
mon_dst_srng);
}
ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+ spin_unlock_bh(&mon_dst_srng->lock);
spin_unlock_bh(&pmon->mon_lock);
@@ -5410,7 +5401,7 @@ ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
"full mon msdu_pop: invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
- break;
+ goto next_msdu;
}
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
@@ -5612,7 +5603,7 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
struct hal_sw_mon_ring_entries *sw_mon_entries;
struct ath11k_pdev_mon_stats *rx_mon_stats;
struct sk_buff *head_msdu, *tail_msdu;
- void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+ struct hal_srng *mon_dst_srng;
void *ring_entry;
u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
int quota = 0, ret;
@@ -5628,6 +5619,9 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
goto reap_status_ring;
}
+ mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+ spin_lock_bh(&mon_dst_srng->lock);
+
ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
head_msdu = NULL;
@@ -5671,6 +5665,7 @@ next_entry:
}
ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+ spin_unlock_bh(&mon_dst_srng->lock);
spin_unlock_bh(&pmon->mon_lock);
if (rx_bufs_used) {
diff --git a/drivers/net/wireless/ath/ath11k/fw.c b/drivers/net/wireless/ath/ath11k/fw.c
index 4e36292a79db..cbbd8e57119f 100644
--- a/drivers/net/wireless/ath/ath11k/fw.c
+++ b/drivers/net/wireless/ath/ath11k/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
- * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -166,3 +166,4 @@ void ath11k_fw_destroy(struct ath11k_base *ab)
{
release_firmware(ab->fw.fw);
}
+EXPORT_SYMBOL(ath11k_fw_destroy);
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index 674ff772b181..770c39ff99b4 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -31,6 +31,7 @@ struct ath11k_hif_ops {
void (*ce_irq_enable)(struct ath11k_base *ab);
void (*ce_irq_disable)(struct ath11k_base *ab);
void (*get_ce_msi_idx)(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
+ void (*coredump_download)(struct ath11k_base *ab);
};
static inline void ath11k_hif_ce_irq_enable(struct ath11k_base *ab)
@@ -146,4 +147,10 @@ static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
*msi_data_idx = ce_id;
}
+static inline void ath11k_hif_coredump_download(struct ath11k_base *ab)
+{
+ if (ab->hif.ops->coredump_download)
+ ab->hif.ops->coredump_download(ab);
+}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 1556392f7ad4..97816916abac 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@@ -1529,17 +1529,23 @@ static int ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
return ret;
}
-static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
+static struct ath11k_vif *ath11k_mac_get_tx_arvif(struct ath11k_vif *arvif)
+{
+ if (arvif->vif->mbssid_tx_vif)
+ return ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
+
+ return NULL;
+}
+
+static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif,
+ struct ath11k_vif *tx_arvif)
{
- struct ath11k_vif *tx_arvif;
struct ieee80211_ema_beacons *beacons;
int ret = 0;
bool nontx_vif_params_set = false;
u32 params = 0;
u8 i = 0;
- tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
-
beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw,
tx_arvif->vif, 0);
if (!beacons || !beacons->cnt) {
@@ -1585,25 +1591,22 @@ static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
return ret;
}
-static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif)
+static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif,
+ struct ath11k_vif *tx_arvif)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
- struct ath11k_vif *tx_arvif = arvif;
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
struct sk_buff *bcn;
int ret;
- if (vif->mbssid_tx_vif) {
- tx_arvif = ath11k_vif_to_arvif(vif->mbssid_tx_vif);
- if (tx_arvif != arvif) {
- ar = tx_arvif->ar;
- ab = ar->ab;
- hw = ar->hw;
- vif = tx_arvif->vif;
- }
+ if (tx_arvif != arvif) {
+ ar = tx_arvif->ar;
+ ab = ar->ab;
+ hw = ar->hw;
+ vif = tx_arvif->vif;
}
bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
@@ -1632,6 +1635,7 @@ static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif)
static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
{
struct ieee80211_vif *vif = arvif->vif;
+ struct ath11k_vif *tx_arvif;
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
@@ -1639,14 +1643,18 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
/* Target does not expect beacon templates for the already up
* non-transmitting interfaces, and results in a crash if sent.
*/
- if (vif->mbssid_tx_vif &&
- arvif != ath11k_vif_to_arvif(vif->mbssid_tx_vif) && arvif->is_up)
- return 0;
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
+ if (tx_arvif) {
+ if (arvif != tx_arvif && arvif->is_up)
+ return 0;
- if (vif->bss_conf.ema_ap && vif->mbssid_tx_vif)
- return ath11k_mac_setup_bcn_tmpl_ema(arvif);
+ if (vif->bss_conf.ema_ap)
+ return ath11k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif);
+ } else {
+ tx_arvif = arvif;
+ }
- return ath11k_mac_setup_bcn_tmpl_mbssid(arvif);
+ return ath11k_mac_setup_bcn_tmpl_mbssid(arvif, tx_arvif);
}
void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
@@ -1674,7 +1682,7 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif,
struct ieee80211_bss_conf *info)
{
struct ath11k *ar = arvif->ar;
- struct ath11k_vif *tx_arvif = NULL;
+ struct ath11k_vif *tx_arvif;
int ret = 0;
lockdep_assert_held(&arvif->ar->conf_mutex);
@@ -1701,9 +1709,7 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif,
ether_addr_copy(arvif->bssid, info->bssid);
- if (arvif->vif->mbssid_tx_vif)
- tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
-
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid,
tx_arvif ? tx_arvif->bssid : NULL,
@@ -5204,6 +5210,45 @@ exit:
return ret;
}
+static int ath11k_mac_op_conf_tx_mu_edca(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k *ar = hw->priv;
+ struct wmi_wmm_params_arg *p;
+ int ret;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->muedca_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->muedca_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->muedca_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->muedca_params.ac_bk;
+ break;
+ default:
+ ath11k_warn(ar->ab, "error ac: %d", ac);
+ return -EINVAL;
+ }
+
+ p->cwmin = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(3, 0));
+ p->cwmax = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(7, 4));
+ p->aifs = u8_get_bits(params->mu_edca_param_rec.aifsn, GENMASK(3, 0));
+ p->txop = params->mu_edca_param_rec.mu_edca_timer;
+
+ ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
+ &arvif->muedca_params,
+ WMI_WMM_PARAM_TYPE_11AX_MU_EDCA);
+ return ret;
+}
+
static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id, u16 ac,
@@ -5242,12 +5287,22 @@ static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
p->txop = params->txop;
ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
- &arvif->wmm_params);
+ &arvif->wmm_params,
+ WMI_WMM_PARAM_TYPE_LEGACY);
if (ret) {
ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
goto exit;
}
+ if (params->mu_edca) {
+ ret = ath11k_mac_op_conf_tx_mu_edca(hw, vif, link_id, ac,
+ params);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set mu_edca params: %d\n", ret);
+ goto exit;
+ }
+ }
+
ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
if (ret)
@@ -5336,8 +5391,6 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
- if (nsts > (ar->num_rx_chains - 1))
- nsts = ar->num_rx_chains - 1;
value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
}
@@ -5421,9 +5474,6 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
/* Enable Beamformee STS Field only if SU BF is enabled */
if (subfee) {
- if (nsts > (ar->num_rx_chains - 1))
- nsts = ar->num_rx_chains - 1;
-
nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
*vht_cap |= nsts;
@@ -6288,6 +6338,7 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath11k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+ struct scan_chan_list_params *params;
int ret;
ath11k_mac_drain_tx(ar);
@@ -6303,6 +6354,7 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->channel_update_work);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->update_11d_work);
@@ -6312,10 +6364,19 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
}
spin_lock_bh(&ar->data_lock);
+
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
list_del(&ppdu_stats->list);
kfree(ppdu_stats);
}
+
+ while ((params = list_first_entry_or_null(&ar->channel_update_queue,
+ struct scan_chan_list_params,
+ list))) {
+ list_del(&params->list);
+ kfree(params);
+ }
+
spin_unlock_bh(&ar->data_lock);
rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
@@ -6330,23 +6391,20 @@ static int ath11k_mac_setup_vdev_params_mbssid(struct ath11k_vif *arvif,
{
struct ath11k *ar = arvif->ar;
struct ath11k_vif *tx_arvif;
- struct ieee80211_vif *tx_vif;
*tx_vdev_id = 0;
- tx_vif = arvif->vif->mbssid_tx_vif;
- if (!tx_vif) {
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
+ if (!tx_arvif) {
*flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
return 0;
}
- tx_arvif = ath11k_vif_to_arvif(tx_vif);
-
if (arvif->vif->bss_conf.nontransmitted) {
- if (ar->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
+ if (ar->hw->wiphy != tx_arvif->ar->hw->wiphy)
return -EINVAL;
*flags = WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP;
- *tx_vdev_id = ath11k_vif_to_arvif(tx_vif)->vdev_id;
+ *tx_vdev_id = tx_arvif->vdev_id;
} else if (tx_arvif == arvif) {
*flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP;
} else {
@@ -7306,8 +7364,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
int n_vifs)
{
struct ath11k_base *ab = ar->ab;
- struct ath11k_vif *arvif, *tx_arvif = NULL;
- struct ieee80211_vif *mbssid_tx_vif;
+ struct ath11k_vif *arvif, *tx_arvif;
int ret;
int i;
bool monitor_vif = false;
@@ -7361,10 +7418,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
ret);
- mbssid_tx_vif = arvif->vif->mbssid_tx_vif;
- if (mbssid_tx_vif)
- tx_arvif = ath11k_vif_to_arvif(mbssid_tx_vif);
-
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid,
tx_arvif ? tx_arvif->bssid : NULL,
@@ -10019,6 +10073,7 @@ static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = {
static void __ath11k_mac_unregister(struct ath11k *ar)
{
+ cancel_work_sync(&ar->channel_update_work);
cancel_work_sync(&ar->regd_update_work);
ieee80211_unregister_hw(ar->hw);
@@ -10418,6 +10473,8 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
init_completion(&ar->thermal.wmi_sync);
INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
+ INIT_WORK(&ar->channel_update_work, ath11k_regd_update_chan_list_work);
+ INIT_LIST_HEAD(&ar->channel_update_queue);
INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 6e45f464a429..fc77eac83e95 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -491,3 +491,8 @@ int ath11k_mhi_resume(struct ath11k_pci *ab_pci)
return 0;
}
+
+void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic)
+{
+ mhi_download_rddm_image(mhi_ctrl, in_panic);
+}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
index a682aad52fc5..651470091bd5 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.h
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -26,5 +26,6 @@ void ath11k_mhi_clear_vector(struct ath11k_base *ab);
int ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
int ath11k_mhi_resume(struct ath11k_pci *ar_pci);
+void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index b93f04973ad7..412f4a134e4a 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/time.h>
+#include <linux/vmalloc.h>
#include "pci.h"
#include "core.h"
@@ -610,6 +612,187 @@ static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
PCI_EXP_LNKCTL_ASPMC);
}
+#ifdef CONFIG_DEV_COREDUMP
+static int ath11k_pci_coredump_calculate_size(struct ath11k_base *ab, u32 *dump_seg_sz)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+ struct image_info *rddm_img, *fw_img;
+ struct ath11k_tlv_dump_data *dump_tlv;
+ enum ath11k_fw_crash_dump_type mem_type;
+ u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0;
+ struct ath11k_dump_file_data *file_data;
+ int i;
+
+ rddm_img = mhi_ctrl->rddm_image;
+ if (!rddm_img) {
+ ath11k_err(ab, "No RDDM dump found\n");
+ return 0;
+ }
+
+ fw_img = mhi_ctrl->fbc_image;
+
+ for (i = 0; i < fw_img->entries ; i++) {
+ if (!fw_img->mhi_buf[i].buf)
+ continue;
+
+ paging_tlv_sz += fw_img->mhi_buf[i].len;
+ }
+ dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz;
+
+ for (i = 0; i < rddm_img->entries; i++) {
+ if (!rddm_img->mhi_buf[i].buf)
+ continue;
+
+ rddm_tlv_sz += rddm_img->mhi_buf[i].len;
+ }
+ dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ mem_type = ath11k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
+
+ if (mem_type == FW_CRASH_DUMP_NONE)
+ continue;
+
+ if (mem_type == FW_CRASH_DUMP_TYPE_MAX) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "target mem region type %d not supported",
+ ab->qmi.target_mem[i].type);
+ continue;
+ }
+
+ if (!ab->qmi.target_mem[i].anyaddr)
+ continue;
+
+ dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size;
+ }
+
+ for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) {
+ if (!dump_seg_sz[i])
+ continue;
+
+ len += sizeof(*dump_tlv) + dump_seg_sz[i];
+ }
+
+ if (len)
+ len += sizeof(*file_data);
+
+ return len;
+}
+
+static void ath11k_pci_coredump_download(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+ struct image_info *rddm_img, *fw_img;
+ struct timespec64 timestamp;
+ int i, len, mem_idx;
+ enum ath11k_fw_crash_dump_type mem_type;
+ struct ath11k_dump_file_data *file_data;
+ struct ath11k_tlv_dump_data *dump_tlv;
+ size_t hdr_len = sizeof(*file_data);
+ void *buf;
+ u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = { 0 };
+
+ ath11k_mhi_coredump(mhi_ctrl, false);
+
+ len = ath11k_pci_coredump_calculate_size(ab, dump_seg_sz);
+ if (!len) {
+ ath11k_warn(ab, "No crash dump data found for devcoredump");
+ return;
+ }
+
+ rddm_img = mhi_ctrl->rddm_image;
+ fw_img = mhi_ctrl->fbc_image;
+
+ /* dev_coredumpv() requires vmalloc data */
+ buf = vzalloc(len);
+ if (!buf)
+ return;
+
+ ab->dump_data = buf;
+ ab->ath11k_coredump_len = len;
+ file_data = ab->dump_data;
+ strscpy(file_data->df_magic, "ATH11K-FW-DUMP", sizeof(file_data->df_magic));
+ file_data->len = cpu_to_le32(len);
+ file_data->version = cpu_to_le32(ATH11K_FW_CRASH_DUMP_V2);
+ file_data->chip_id = cpu_to_le32(ab_pci->dev_id);
+ file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id);
+ file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus));
+ guid_gen(&file_data->guid);
+ ktime_get_real_ts64(&timestamp);
+ file_data->tv_sec = cpu_to_le64(timestamp.tv_sec);
+ file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec);
+ buf += hdr_len;
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ /* append all segments together as they are all part of a single contiguous
+ * block of memory
+ */
+ for (i = 0; i < fw_img->entries ; i++) {
+ if (!fw_img->mhi_buf[i].buf)
+ continue;
+
+ memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf,
+ fw_img->mhi_buf[i].len);
+ buf += fw_img->mhi_buf[i].len;
+ }
+
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ /* append all segments together as they are all part of a single contiguous
+ * block of memory
+ */
+ for (i = 0; i < rddm_img->entries; i++) {
+ if (!rddm_img->mhi_buf[i].buf)
+ continue;
+
+ memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf,
+ rddm_img->mhi_buf[i].len);
+ buf += rddm_img->mhi_buf[i].len;
+ }
+
+ mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA;
+ for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) {
+ if (mem_idx == FW_CRASH_DUMP_NONE)
+ continue;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ mem_type = ath11k_coredump_get_dump_type
+ (ab->qmi.target_mem[i].type);
+
+ if (mem_type != mem_idx)
+ continue;
+
+ if (!ab->qmi.target_mem[i].anyaddr) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "Skipping mem region type %d",
+ ab->qmi.target_mem[i].type);
+ continue;
+ }
+
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(mem_idx);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ memcpy_fromio(buf, ab->qmi.target_mem[i].iaddr,
+ ab->qmi.target_mem[i].size);
+
+ buf += ab->qmi.target_mem[i].size;
+ }
+ }
+
+ queue_work(ab->workqueue, &ab->dump_work);
+}
+#endif
+
static int ath11k_pci_power_up(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -713,6 +896,9 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
.ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
.get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx,
+#ifdef CONFIG_DEV_COREDUMP
+ .coredump_download = ath11k_pci_coredump_download,
+#endif
};
static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
@@ -735,7 +921,7 @@ static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags))
return 0;
- return irq_set_affinity_hint(ab_pci->pdev->irq, m);
+ return irq_set_affinity_and_hint(ab_pci->pdev->irq, m);
}
static int ath11k_pci_probe(struct pci_dev *pdev,
@@ -939,6 +1125,8 @@ unsupported_wcn6855_soc:
return 0;
err_free_irq:
+ /* __free_irq() expects the caller to have cleared the affinity hint */
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
ath11k_pcic_free_irq(ab);
err_ce_free:
@@ -981,9 +1169,12 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
+ cancel_work_sync(&ab->reset_work);
+ cancel_work_sync(&ab->dump_work);
ath11k_core_deinit(ab);
qmi_fail:
+ ath11k_fw_destroy(ab);
ath11k_mhi_unregister(ab_pci);
ath11k_pcic_free_irq(ab);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 5759fc521316..4f8b08ed1bbc 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1957,13 +1957,15 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
int i;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
- if ((ab->hw_params.fixed_mem_region ||
- test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
- ab->qmi.target_mem[i].iaddr)
- iounmap(ab->qmi.target_mem[i].iaddr);
+ if (!ab->qmi.target_mem[i].anyaddr)
+ continue;
- if (!ab->qmi.target_mem[i].vaddr)
+ if (ab->hw_params.fixed_mem_region ||
+ test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
+ iounmap(ab->qmi.target_mem[i].iaddr);
+ ab->qmi.target_mem[i].iaddr = NULL;
continue;
+ }
dma_free_coherent(ab->dev,
ab->qmi.target_mem[i].prev_size,
@@ -2070,7 +2072,7 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
break;
case BDF_MEM_REGION_TYPE:
ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr;
- ab->qmi.target_mem[idx].vaddr = NULL;
+ ab->qmi.target_mem[idx].iaddr = NULL;
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
idx++;
@@ -2093,10 +2095,11 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
} else {
ab->qmi.target_mem[idx].paddr =
ATH11K_QMI_CALDB_ADDRESS;
+ ab->qmi.target_mem[idx].iaddr = NULL;
}
} else {
ab->qmi.target_mem[idx].paddr = 0;
- ab->qmi.target_mem[idx].vaddr = NULL;
+ ab->qmi.target_mem[idx].iaddr = NULL;
}
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 7e06d100af57..7968ab122b65 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_QMI_H
@@ -102,8 +102,11 @@ struct target_mem_chunk {
u32 prev_size;
u32 prev_type;
dma_addr_t paddr;
- u32 *vaddr;
- void __iomem *iaddr;
+ union {
+ u32 *vaddr;
+ void __iomem *iaddr;
+ void *anyaddr;
+ };
};
struct target_info {
@@ -154,6 +157,7 @@ struct ath11k_qmi {
#define BDF_MEM_REGION_TYPE 0x2
#define M3_DUMP_REGION_TYPE 0x3
#define CALDB_MEM_REGION_TYPE 0x4
+#define PAGEABLE_MEM_REGION_TYPE 0x9
struct qmi_wlanfw_host_cap_req_msg_v01 {
u8 num_clients_valid;
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index b0f289784dd3..d62a2014315a 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
@@ -55,6 +55,19 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"Regulatory Notification received for %s\n", wiphy_name(wiphy));
+ if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "driver initiated regd update\n");
+ if (ar->state != ATH11K_STATE_ON)
+ return;
+
+ ret = ath11k_reg_update_chan_list(ar, true);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to update channel list: %d\n", ret);
+
+ return;
+ }
+
/* Currently supporting only General User Hints. Cell base user
* hints to be handled later.
* Hints from other sources like Core, Beacons are not expected for
@@ -111,32 +124,7 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
struct channel_param *ch;
enum nl80211_band band;
int num_channels = 0;
- int i, ret, left;
-
- if (wait && ar->state_11d != ATH11K_11D_IDLE) {
- left = wait_for_completion_timeout(&ar->completed_11d_scan,
- ATH11K_SCAN_TIMEOUT_HZ);
- if (!left) {
- ath11k_dbg(ar->ab, ATH11K_DBG_REG,
- "failed to receive 11d scan complete: timed out\n");
- ar->state_11d = ATH11K_11D_IDLE;
- }
- ath11k_dbg(ar->ab, ATH11K_DBG_REG,
- "11d scan wait left time %d\n", left);
- }
-
- if (wait &&
- (ar->scan.state == ATH11K_SCAN_STARTING ||
- ar->scan.state == ATH11K_SCAN_RUNNING)) {
- left = wait_for_completion_timeout(&ar->scan.completed,
- ATH11K_SCAN_TIMEOUT_HZ);
- if (!left)
- ath11k_dbg(ar->ab, ATH11K_DBG_REG,
- "failed to receive hw scan complete: timed out\n");
-
- ath11k_dbg(ar->ab, ATH11K_DBG_REG,
- "hw scan wait left time %d\n", left);
- }
+ int i, ret = 0;
if (ar->state == ATH11K_STATE_RESTARTING)
return 0;
@@ -218,6 +206,16 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
}
}
+ if (wait) {
+ spin_lock_bh(&ar->data_lock);
+ list_add_tail(&params->list, &ar->channel_update_queue);
+ spin_unlock_bh(&ar->data_lock);
+
+ queue_work(ar->ab->workqueue, &ar->channel_update_work);
+
+ return 0;
+ }
+
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
kfree(params);
@@ -293,12 +291,6 @@ int ath11k_regd_update(struct ath11k *ar)
if (ret)
goto err;
- if (ar->state == ATH11K_STATE_ON) {
- ret = ath11k_reg_update_chan_list(ar, true);
- if (ret)
- goto err;
- }
-
return 0;
err:
ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
@@ -804,6 +796,54 @@ ret:
return new_regd;
}
+void ath11k_regd_update_chan_list_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ channel_update_work);
+ struct scan_chan_list_params *params;
+ struct list_head local_update_list;
+ int left;
+
+ INIT_LIST_HEAD(&local_update_list);
+
+ spin_lock_bh(&ar->data_lock);
+ list_splice_tail_init(&ar->channel_update_queue, &local_update_list);
+ spin_unlock_bh(&ar->data_lock);
+
+ while ((params = list_first_entry_or_null(&local_update_list,
+ struct scan_chan_list_params,
+ list))) {
+ if (ar->state_11d != ATH11K_11D_IDLE) {
+ left = wait_for_completion_timeout(&ar->completed_11d_scan,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive 11d scan complete: timed out\n");
+ ar->state_11d = ATH11K_11D_IDLE;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg 11d scan wait left time %d\n", left);
+ }
+
+ if ((ar->scan.state == ATH11K_SCAN_STARTING ||
+ ar->scan.state == ATH11K_SCAN_RUNNING)) {
+ left = wait_for_completion_timeout(&ar->scan.completed,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left)
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive hw scan complete: timed out\n");
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg hw scan wait left time %d\n", left);
+ }
+
+ ath11k_wmi_send_scan_chan_list_cmd(ar, params);
+ list_del(&params->list);
+ kfree(params);
+ }
+}
+
static bool ath11k_reg_is_world_alpha(char *alpha)
{
if (alpha[0] == '0' && alpha[1] == '0')
@@ -977,6 +1017,7 @@ void ath11k_regd_update_work(struct work_struct *work)
void ath11k_reg_init(struct ath11k *ar)
{
ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER;
ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index 263ea9061948..72b483594015 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_REG_H
@@ -33,6 +33,7 @@ void ath11k_reg_init(struct ath11k *ar);
void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info);
void ath11k_reg_free(struct ath11k_base *ab);
void ath11k_regd_update_work(struct work_struct *work);
+void ath11k_regd_update_chan_list_work(struct work_struct *work);
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect,
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
index 302d66092b97..9be1cd742339 100644
--- a/drivers/net/wireless/ath/ath11k/testmode.c
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "testmode.h"
@@ -10,18 +10,18 @@
#include "wmi.h"
#include "hw.h"
#include "core.h"
-#include "testmode_i.h"
+#include "../testmode_i.h"
#define ATH11K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
#define ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
-static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
- [ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 },
- [ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY,
- .len = ATH11K_TM_DATA_MAX_LEN },
- [ATH11K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
- [ATH11K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
- [ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
+static const struct nla_policy ath11k_tm_policy[ATH_TM_ATTR_MAX + 1] = {
+ [ATH_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH_TM_DATA_MAX_LEN },
+ [ATH_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
};
static struct ath11k *ath11k_tm_get_ar(struct ath11k_base *ab)
@@ -73,9 +73,9 @@ static void ath11k_tm_wmi_event_unsegmented(struct ath11k_base *ab, u32 cmd_id,
goto out;
}
- if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI) ||
- nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
- nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data)) {
+ if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) ||
+ nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) {
ath11k_warn(ab, "failed to populate testmode unsegmented event\n");
kfree_skb(nl_skb);
goto out;
@@ -140,7 +140,7 @@ static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
data_pos = ab->testmode.data_pos;
- if ((data_pos + datalen) > ATH11K_FTM_EVENT_MAX_BUF_LENGTH) {
+ if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
ath11k_warn(ab, "Invalid ftm event length at %d: %d\n",
data_pos, datalen);
ret = -EINVAL;
@@ -172,10 +172,10 @@ static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
goto out;
}
- if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD,
- ATH11K_TM_CMD_WMI_FTM) ||
- nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
- nla_put(nl_skb, ATH11K_TM_ATTR_DATA, data_pos,
+ if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD,
+ ATH_TM_CMD_WMI_FTM) ||
+ nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos,
&ab->testmode.eventdata[0])) {
ath11k_warn(ab, "failed to populate segmented testmode event");
kfree_skb(nl_skb);
@@ -235,23 +235,23 @@ static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"cmd get version_major %d version_minor %d\n",
- ATH11K_TESTMODE_VERSION_MAJOR,
- ATH11K_TESTMODE_VERSION_MINOR);
+ ATH_TESTMODE_VERSION_MAJOR,
+ ATH_TESTMODE_VERSION_MINOR);
skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
nla_total_size(sizeof(u32)));
if (!skb)
return -ENOMEM;
- ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR,
- ATH11K_TESTMODE_VERSION_MAJOR);
+ ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR,
+ ATH_TESTMODE_VERSION_MAJOR);
if (ret) {
kfree_skb(skb);
return ret;
}
- ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR,
- ATH11K_TESTMODE_VERSION_MINOR);
+ ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR,
+ ATH_TESTMODE_VERSION_MINOR);
if (ret) {
kfree_skb(skb);
return ret;
@@ -277,7 +277,7 @@ static int ath11k_tm_cmd_testmode_start(struct ath11k *ar, struct nlattr *tb[])
goto err;
}
- ar->ab->testmode.eventdata = kzalloc(ATH11K_FTM_EVENT_MAX_BUF_LENGTH,
+ ar->ab->testmode.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH,
GFP_KERNEL);
if (!ar->ab->testmode.eventdata) {
ret = -ENOMEM;
@@ -310,25 +310,25 @@ static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[],
mutex_lock(&ar->conf_mutex);
- if (!tb[ATH11K_TM_ATTR_DATA]) {
+ if (!tb[ATH_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
- if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) {
+ if (!tb[ATH_TM_ATTR_WMI_CMDID]) {
ret = -EINVAL;
goto out;
}
- buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
- buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
+ buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
if (!buf_len) {
ath11k_warn(ar->ab, "No data present in testmode wmi command\n");
ret = -EINVAL;
goto out;
}
- cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
+ cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]);
/* Make sure that the buffer length is long enough to
* hold TLV and pdev/vdev id.
@@ -409,13 +409,13 @@ static int ath11k_tm_cmd_wmi_ftm(struct ath11k *ar, struct nlattr *tb[])
goto out;
}
- if (!tb[ATH11K_TM_ATTR_DATA]) {
+ if (!tb[ATH_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
- buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
- buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
+ buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
cmd_id = WMI_PDEV_UTF_CMDID;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
@@ -476,25 +476,25 @@ int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
struct ath11k *ar = hw->priv;
- struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1];
+ struct nlattr *tb[ATH_TM_ATTR_MAX + 1];
int ret;
- ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy,
+ ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath11k_tm_policy,
NULL);
if (ret)
return ret;
- if (!tb[ATH11K_TM_ATTR_CMD])
+ if (!tb[ATH_TM_ATTR_CMD])
return -EINVAL;
- switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) {
- case ATH11K_TM_CMD_GET_VERSION:
+ switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) {
+ case ATH_TM_CMD_GET_VERSION:
return ath11k_tm_cmd_get_version(ar, tb);
- case ATH11K_TM_CMD_WMI:
+ case ATH_TM_CMD_WMI:
return ath11k_tm_cmd_wmi(ar, tb, vif);
- case ATH11K_TM_CMD_TESTMODE_START:
+ case ATH_TM_CMD_TESTMODE_START:
return ath11k_tm_cmd_testmode_start(ar, tb);
- case ATH11K_TM_CMD_WMI_FTM:
+ case ATH_TM_CMD_WMI_FTM:
return ath11k_tm_cmd_wmi_ftm(ar, tb);
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 87abfa547529..d7f852bebf4a 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -2662,7 +2662,8 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
}
int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
- struct wmi_wmm_params_all_arg *param)
+ struct wmi_wmm_params_all_arg *param,
+ enum wmi_wmm_params_type wmm_param_type)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_set_wmm_params_cmd *cmd;
@@ -2681,7 +2682,7 @@ int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
- cmd->wmm_param_type = 0;
+ cmd->wmm_param_type = wmm_param_type;
for (ac = 0; ac < WME_NUM_AC; ac++) {
switch (ac) {
@@ -2714,8 +2715,8 @@ int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
wmm_param->no_ack = wmi_wmm_arg->no_ack;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
- "wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
- ac, wmm_param->aifs, wmm_param->cwmin,
+ "wmm set type %d ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
+ wmm_param_type, ac, wmm_param->aifs, wmm_param->cwmin,
wmm_param->cwmax, wmm_param->txoplimit,
wmm_param->acm, wmm_param->no_ack);
}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 8982b909c821..9fcffaa2f383 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_WMI_H
@@ -3817,6 +3817,7 @@ struct wmi_stop_scan_cmd {
};
struct scan_chan_list_params {
+ struct list_head list;
u32 pdev_id;
u16 nallchans;
struct channel_param ch_param[];
@@ -6346,6 +6347,11 @@ enum wmi_sta_keepalive_method {
#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+enum wmi_wmm_params_type {
+ WMI_WMM_PARAM_TYPE_LEGACY = 0,
+ WMI_WMM_PARAM_TYPE_11AX_MU_EDCA = 1,
+};
+
const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
struct sk_buff *skb, gfp_t gfp);
int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
@@ -6402,7 +6408,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
struct scan_cancel_param *param);
int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
- struct wmi_wmm_params_all_arg *param);
+ struct wmi_wmm_params_all_arg *param,
+ enum wmi_wmm_params_type wmm_param_type);
int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
u32 pdev_id);
int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id);
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index b5bb3e2599cd..60644cb42c76 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -23,11 +23,12 @@ ath12k-y += core.o \
fw.o \
p2p.o
-ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o
+ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath12k-$(CONFIG_ACPI) += acpi.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
ath12k-$(CONFIG_PM) += wow.o
ath12k-$(CONFIG_ATH12K_COREDUMP) += coredump.o
+ath12k-$(CONFIG_NL80211_TESTMODE) += testmode.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c
index 0555d35aab47..d81367ce6929 100644
--- a/drivers/net/wireless/ath/ath12k/acpi.c
+++ b/drivers/net/wireless/ath/ath12k/acpi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -12,7 +12,7 @@ static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func)
{
union acpi_object *obj;
acpi_handle root_handle;
- int ret;
+ int ret, i;
root_handle = ACPI_HANDLE(ab->dev);
if (!root_handle) {
@@ -29,9 +29,48 @@ static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func)
}
if (obj->type == ACPI_TYPE_INTEGER) {
- ab->acpi.func_bit = obj->integer.value;
+ switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS:
+ ab->acpi.func_bit = obj->integer.value;
+ break;
+ case ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG:
+ ab->acpi.bit_flag = obj->integer.value;
+ break;
+ }
+ } else if (obj->type == ACPI_TYPE_STRING) {
+ switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_BDF_EXT:
+ if (obj->string.length <= ATH12K_ACPI_BDF_ANCHOR_STRING_LEN ||
+ obj->string.length > ATH12K_ACPI_BDF_MAX_LEN ||
+ memcmp(obj->string.pointer, ATH12K_ACPI_BDF_ANCHOR_STRING,
+ ATH12K_ACPI_BDF_ANCHOR_STRING_LEN)) {
+ ath12k_warn(ab, "invalid ACPI DSM BDF size: %d\n",
+ obj->string.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(ab->acpi.bdf_string, obj->string.pointer,
+ obj->buffer.length);
+
+ break;
+ }
} else if (obj->type == ACPI_TYPE_BUFFER) {
switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS:
+ if (obj->buffer.length < ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE ||
+ obj->buffer.length > ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM func size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ab->acpi.func_bit = 0;
+ for (i = 0; i < obj->buffer.length; i++)
+ ab->acpi.func_bit += obj->buffer.pointer[i] << (i * 8);
+
+ break;
case ATH12K_ACPI_DSM_FUNC_TAS_CFG:
if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_CFG_SIZE) {
ath12k_warn(ab, "invalid ACPI DSM TAS config size: %d\n",
@@ -247,24 +286,118 @@ static int ath12k_acpi_set_tas_params(struct ath12k_base *ab)
return 0;
}
+bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab)
+{
+ return ab->acpi.acpi_disable_rfkill;
+}
+
+bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab)
+{
+ return ab->acpi.acpi_disable_11be;
+}
+
+void ath12k_acpi_set_dsm_func(struct ath12k_base *ab)
+{
+ int ret;
+ u8 *buf;
+
+ if (!ab->hw_params->acpi_guid)
+ /* not supported with this hardware */
+ return;
+
+ if (ab->acpi.acpi_tas_enable) {
+ ret = ath12k_acpi_set_tas_params(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS parameters: %d\n", ret);
+ return;
+ }
+ }
+
+ if (ab->acpi.acpi_bios_sar_enable) {
+ ret = ath12k_acpi_set_bios_sar_params(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI BIOS SAR: %d\n", ret);
+ return;
+ }
+ }
+
+ if (ab->acpi.acpi_cca_enable) {
+ buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET;
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE,
+ buf,
+ ATH12K_ACPI_CCA_THR_OFFSET_LEN);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ if (ab->acpi.acpi_band_edge_enable) {
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_TYPE_BANDEDGE,
+ ab->acpi.band_edge_power,
+ sizeof(ab->acpi.band_edge_power));
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to set ACPI DSM band edge channel power: %d\n",
+ ret);
+ return;
+ }
+ }
+}
+
int ath12k_acpi_start(struct ath12k_base *ab)
{
acpi_status status;
- u8 *buf;
int ret;
+ ab->acpi.acpi_tas_enable = false;
+ ab->acpi.acpi_disable_11be = false;
+ ab->acpi.acpi_disable_rfkill = false;
+ ab->acpi.acpi_bios_sar_enable = false;
+ ab->acpi.acpi_cca_enable = false;
+ ab->acpi.acpi_band_edge_enable = false;
+ ab->acpi.acpi_enable_bdf = false;
+ ab->acpi.bdf_string[0] = '\0';
+
if (!ab->hw_params->acpi_guid)
/* not supported with this hardware */
return 0;
- ab->acpi.acpi_tas_enable = false;
-
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS);
if (ret) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to get ACPI DSM data: %d\n", ret);
return ret;
}
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI DISABLE FLAG: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi,
+ ATH12K_ACPI_DSM_DISABLE_11BE_BIT))
+ ab->acpi.acpi_disable_11be = true;
+
+ if (!ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi,
+ ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT))
+ ab->acpi.acpi_disable_rfkill = true;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BDF_EXT)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BDF_EXT);
+ if (ret || ab->acpi.bdf_string[0] == '\0') {
+ ath12k_warn(ab, "failed to get ACPI BDF EXT: %d\n", ret);
+ return ret;
+ }
+
+ ab->acpi.acpi_enable_bdf = true;
+ }
+
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG)) {
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_CFG);
if (ret) {
@@ -308,20 +441,6 @@ int ath12k_acpi_start(struct ath12k_base *ab)
ab->acpi.acpi_bios_sar_enable = true;
}
- if (ab->acpi.acpi_tas_enable) {
- ret = ath12k_acpi_set_tas_params(ab);
- if (ret) {
- ath12k_warn(ab, "failed to send ACPI parameters: %d\n", ret);
- return ret;
- }
- }
-
- if (ab->acpi.acpi_bios_sar_enable) {
- ret = ath12k_acpi_set_bios_sar_params(ab);
- if (ret)
- return ret;
- }
-
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_CCA)) {
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_CCA);
if (ret) {
@@ -332,18 +451,8 @@ int ath12k_acpi_start(struct ath12k_base *ab)
if (ab->acpi.cca_data[0] == ATH12K_ACPI_CCA_THR_VERSION &&
ab->acpi.cca_data[ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET] ==
- ATH12K_ACPI_CCA_THR_ENABLE_FLAG) {
- buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET;
- ret = ath12k_wmi_set_bios_cmd(ab,
- WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE,
- buf,
- ATH12K_ACPI_CCA_THR_OFFSET_LEN);
- if (ret) {
- ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n",
- ret);
- return ret;
- }
- }
+ ATH12K_ACPI_CCA_THR_ENABLE_FLAG)
+ ab->acpi.acpi_cca_enable = true;
}
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi,
@@ -356,18 +465,8 @@ int ath12k_acpi_start(struct ath12k_base *ab)
}
if (ab->acpi.band_edge_power[0] == ATH12K_ACPI_BAND_EDGE_VERSION &&
- ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG) {
- ret = ath12k_wmi_set_bios_cmd(ab,
- WMI_BIOS_PARAM_TYPE_BANDEDGE,
- ab->acpi.band_edge_power,
- sizeof(ab->acpi.band_edge_power));
- if (ret) {
- ath12k_warn(ab,
- "failed to set ACPI DSM band edge channel power: %d\n",
- ret);
- return ret;
- }
- }
+ ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG)
+ ab->acpi.acpi_band_edge_enable = true;
}
status = acpi_install_notify_handler(ACPI_HANDLE(ab->dev),
@@ -383,6 +482,21 @@ int ath12k_acpi_start(struct ath12k_base *ab)
return 0;
}
+int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab)
+{
+ size_t max_len = sizeof(ab->qmi.target.bdf_ext);
+
+ if (!ab->acpi.acpi_enable_bdf)
+ return -ENODATA;
+
+ if (strscpy(ab->qmi.target.bdf_ext, ab->acpi.bdf_string + 4, max_len) < 0)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "acpi bdf variant longer than the buffer (variant: %s)\n",
+ ab->acpi.bdf_string);
+
+ return 0;
+}
+
void ath12k_acpi_stop(struct ath12k_base *ab)
{
if (!ab->acpi.started)
diff --git a/drivers/net/wireless/ath/ath12k/acpi.h b/drivers/net/wireless/ath/ath12k/acpi.h
index 39e003d86a48..3a26fea6af1a 100644
--- a/drivers/net/wireless/ath/ath12k/acpi.h
+++ b/drivers/net/wireless/ath/ath12k/acpi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_ACPI_H
#define ATH12K_ACPI_H
@@ -9,6 +9,8 @@
#include <linux/acpi.h>
#define ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS 0
+#define ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG 2
+#define ATH12K_ACPI_DSM_FUNC_BDF_EXT 3
#define ATH12K_ACPI_DSM_FUNC_BIOS_SAR 4
#define ATH12K_ACPI_DSM_FUNC_GEO_OFFSET 5
#define ATH12K_ACPI_DSM_FUNC_INDEX_CCA 6
@@ -16,6 +18,8 @@
#define ATH12K_ACPI_DSM_FUNC_TAS_DATA 9
#define ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE 10
+#define ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG BIT(1)
+#define ATH12K_ACPI_FUNC_BIT_BDF_EXT BIT(2)
#define ATH12K_ACPI_FUNC_BIT_BIOS_SAR BIT(3)
#define ATH12K_ACPI_FUNC_BIT_GEO_OFFSET BIT(4)
#define ATH12K_ACPI_FUNC_BIT_CCA BIT(5)
@@ -25,6 +29,7 @@
#define ATH12K_ACPI_NOTIFY_EVENT 0x86
#define ATH12K_ACPI_FUNC_BIT_VALID(_acdata, _func) (((_acdata).func_bit) & (_func))
+#define ATH12K_ACPI_CHEK_BIT_VALID(_acdata, _func) (((_acdata).bit_flag) & (_func))
#define ATH12K_ACPI_TAS_DATA_VERSION 0x1
#define ATH12K_ACPI_TAS_DATA_ENABLE 0x1
@@ -48,6 +53,16 @@
#define ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE 100
#define ATH12K_ACPI_DSM_TAS_CFG_SIZE 108
+#define ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE 1
+#define ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE 4
+
+#define ATH12K_ACPI_DSM_DISABLE_11BE_BIT BIT(0)
+#define ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT BIT(2)
+
+#define ATH12K_ACPI_BDF_ANCHOR_STRING_LEN 3
+#define ATH12K_ACPI_BDF_ANCHOR_STRING "BDF"
+#define ATH12K_ACPI_BDF_MAX_LEN 100
+
#define ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE (ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET + \
ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN)
#define ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE (ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET + \
@@ -59,6 +74,10 @@
int ath12k_acpi_start(struct ath12k_base *ab);
void ath12k_acpi_stop(struct ath12k_base *ab);
+bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab);
+bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab);
+void ath12k_acpi_set_dsm_func(struct ath12k_base *ab);
+int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab);
#else
@@ -71,6 +90,25 @@ static inline void ath12k_acpi_stop(struct ath12k_base *ab)
{
}
+static inline bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab)
+{
+ return false;
+}
+
+static inline bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab)
+{
+ return false;
+}
+
+static inline void ath12k_acpi_set_dsm_func(struct ath12k_base *ab)
+{
+}
+
+static inline int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab)
+{
+ return 0;
+}
+
#endif /* CONFIG_ACPI */
#endif /* ATH12K_ACPI_H */
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 0606116d6b9c..0b2dec081c6e 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -23,6 +23,10 @@ unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
+bool ath12k_ftm_mode;
+module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
+MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
+
/* protected with ath12k_hw_group_mutex */
static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
@@ -36,6 +40,9 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab)
if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
return 0;
+ if (ath12k_acpi_get_disable_rfkill(ab))
+ return 0;
+
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
@@ -173,7 +180,7 @@ EXPORT_SYMBOL(ath12k_core_resume);
static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len, bool with_variant,
- bool bus_type_mode)
+ bool bus_type_mode, bool with_default)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
@@ -204,7 +211,9 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
"bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
ath12k_bus_str(ab->hif.bus),
ab->qmi.target.chip_id,
- ab->qmi.target.board_id, variant);
+ with_default ?
+ ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
+ variant);
break;
}
@@ -216,19 +225,19 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
- return __ath12k_core_create_board_name(ab, name, name_len, true, false);
+ return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
}
static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
- return __ath12k_core_create_board_name(ab, name, name_len, false, false);
+ return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
}
static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
- return __ath12k_core_create_board_name(ab, name, name_len, false, true);
+ return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
}
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
@@ -693,6 +702,11 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
{
int ret;
+ if (ath12k_ftm_mode) {
+ ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
+ ath12k_info(ab, "Booting in ftm mode\n");
+ }
+
ret = ath12k_qmi_init_service(ab);
if (ret) {
ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
@@ -741,8 +755,7 @@ static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
ath12k_dp_pdev_free(ab);
}
-static int ath12k_core_start(struct ath12k_base *ab,
- enum ath12k_firmware_mode mode)
+static int ath12k_core_start(struct ath12k_base *ab)
{
int ret;
@@ -836,10 +849,7 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_reo_cleanup;
}
- ret = ath12k_acpi_start(ab);
- if (ret)
- /* ACPI is optional so continue in case of an error */
- ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
+ ath12k_acpi_set_dsm_func(ab);
if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
/* Indicate the core start in the appropriate group */
@@ -887,10 +897,41 @@ static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
ath12k_mac_destroy(ag);
}
+u8 ath12k_get_num_partner_link(struct ath12k *ar)
+{
+ struct ath12k_base *partner_ab, *ab = ar->ab;
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_pdev *pdev;
+ u8 num_link = 0;
+ int i, j;
+
+ lockdep_assert_held(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ partner_ab = ag->ab[i];
+
+ for (j = 0; j < partner_ab->num_radios; j++) {
+ pdev = &partner_ab->pdevs[j];
+
+ /* Avoid the self link */
+ if (ar == pdev->ar)
+ continue;
+
+ num_link++;
+ }
+ }
+
+ return num_link;
+}
+
static int __ath12k_mac_mlo_ready(struct ath12k *ar)
{
+ u8 num_link = ath12k_get_num_partner_link(ar);
int ret;
+ if (num_link == 0)
+ return 0;
+
ret = ath12k_wmi_mlo_ready(ar);
if (ret) {
ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
@@ -920,19 +961,18 @@ int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
ar = &ah->radio[j];
ret = __ath12k_mac_mlo_ready(ar);
if (ret)
- goto out;
+ return ret;
}
}
-out:
- return ret;
+ return 0;
}
static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
{
int ret, i;
- if (!ag->mlo_capable || ag->num_devices == 1)
+ if (!ag->mlo_capable)
return 0;
ret = ath12k_mac_mlo_setup(ag);
@@ -1068,7 +1108,7 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
int ret, i;
- ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+ ret = ath12k_core_start_firmware(ab, ab->fw_mode);
if (ret) {
ath12k_err(ab, "failed to start firmware: %d\n", ret);
return ret;
@@ -1089,7 +1129,7 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
mutex_lock(&ag->mutex);
mutex_lock(&ab->core_lock);
- ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+ ret = ath12k_core_start(ab);
if (ret) {
ath12k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
@@ -1122,16 +1162,18 @@ err_core_stop:
ath12k_core_stop(ab);
mutex_unlock(&ab->core_lock);
}
+ mutex_unlock(&ag->mutex);
goto exit;
err_dp_free:
ath12k_dp_free(ab);
mutex_unlock(&ab->core_lock);
+ mutex_unlock(&ag->mutex);
+
err_firmware_stop:
ath12k_qmi_firmware_stop(ab);
exit:
- mutex_unlock(&ag->mutex);
return ret;
}
@@ -1239,7 +1281,8 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
for (i = 0; i < ag->num_hw; i++) {
ah = ath12k_ag_to_ah(ag, i);
- if (!ah || ah->state == ATH12K_HW_STATE_OFF)
+ if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
+ ah->state == ATH12K_HW_STATE_TM)
continue;
ieee80211_stop_queues(ah->hw);
@@ -1309,6 +1352,9 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
ath12k_warn(ab,
"device is wedged, will not restart hw %d\n", i);
break;
+ case ATH12K_HW_STATE_TM:
+ ath12k_warn(ab, "fw mode reset done radio %d\n", i);
+ break;
}
mutex_unlock(&ah->hw_mutex);
@@ -1602,6 +1648,9 @@ static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *a
lockdep_assert_held(&ath12k_hw_group_mutex);
+ if (ath12k_ftm_mode)
+ goto invalid_group;
+
/* The grouping of multiple devices will be done based on device tree file.
* The platforms that do not have any valid group information would have
* each device to be part of its own invalid group.
@@ -1789,19 +1838,19 @@ void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
struct ath12k_base *ab;
int i;
+ if (ath12k_ftm_mode)
+ return;
+
lockdep_assert_held(&ag->mutex);
/* If more than one devices are grouped, then inter MLO
* functionality can work still independent of whether internally
* each device supports single_chip_mlo or not.
- * Only when there is one device, then it depends whether the
- * device can support intra chip MLO or not
+ * Only when there is one device, then disable for WCN chipsets
+ * till the required driver implementation is in place.
*/
- if (ag->num_devices > 1) {
- ag->mlo_capable = true;
- } else {
+ if (ag->num_devices == 1) {
ab = ag->ab[0];
- ag->mlo_capable = ab->single_chip_mlo_supp;
/* WCN chipsets does not advertise in firmware features
* hence skip checking
@@ -1810,8 +1859,7 @@ void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
return;
}
- if (!ag->mlo_capable)
- return;
+ ag->mlo_capable = true;
for (i = 0; i < ag->num_devices; i++) {
ab = ag->ab[i];
@@ -1927,7 +1975,6 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
- ab->single_chip_mlo_supp = false;
/* Device index used to identify the devices in a group.
*
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index ee595794a7ae..3fac4f00d383 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -15,6 +15,7 @@
#include <linux/ctype.h>
#include <linux/firmware.h>
#include <linux/panic_notifier.h>
+#include <linux/average.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -52,8 +53,6 @@
#define ATH12K_INVALID_HW_MAC_ID 0xFF
#define ATH12K_CONNECTION_LOSS_HZ (3 * HZ)
-#define ATH12K_RX_RATE_TABLE_NUM 320
-#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
#define ATH12K_MON_TIMER_INTERVAL 10
#define ATH12K_RESET_TIMEOUT_HZ (20 * HZ)
@@ -87,6 +86,7 @@ enum wme_ac {
#define ATH12K_HT_MCS_MAX 7
#define ATH12K_VHT_MCS_MAX 9
#define ATH12K_HE_MCS_MAX 11
+#define ATH12K_EHT_MCS_MAX 15
enum ath12k_crypt_mode {
/* Only use hardware crypto engine */
@@ -141,6 +141,7 @@ struct ath12k_skb_rxcb {
u8 is_frag;
u8 tid;
u16 peer_id;
+ bool is_end_of_ppdu;
};
enum ath12k_hw_rev {
@@ -166,6 +167,7 @@ struct ath12k_ext_irq_grp {
u32 num_irq;
u32 grp_id;
u64 timestamp;
+ bool napi_enabled;
struct napi_struct napi;
struct net_device *napi_ndev;
};
@@ -235,6 +237,7 @@ enum ath12k_dev_flags {
ATH12K_FLAG_CE_IRQ_ENABLED,
ATH12K_FLAG_EXT_IRQ_ENABLED,
ATH12K_FLAG_QMI_FW_READY_COMPLETE,
+ ATH12K_FLAG_FTM_SEGMENTED,
};
struct ath12k_tx_conf {
@@ -298,6 +301,8 @@ struct ath12k_link_vif {
u8 link_id;
struct ath12k_vif *ahvif;
struct ath12k_rekey_data rekey_data;
+
+ u8 current_cntdown_counter;
};
struct ath12k_vif {
@@ -327,6 +332,7 @@ struct ath12k_vif {
u32 key_cipher;
u8 tx_encap_type;
bool ps;
+ atomic_t mcbc_gsn;
struct ath12k_link_vif deflink;
struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS];
@@ -354,20 +360,20 @@ struct ath12k_vif_iter {
#define HAL_RX_MAX_MCS_HT 31
#define HAL_RX_MAX_MCS_VHT 9
#define HAL_RX_MAX_MCS_HE 11
+#define HAL_RX_MAX_MCS_BE 15
#define HAL_RX_MAX_NSS 8
#define HAL_RX_MAX_NUM_LEGACY_RATES 12
-#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
-#define ATH12K_RX_RATE_TABLE_NUM 320
struct ath12k_rx_peer_rate_stats {
u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1];
u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1];
u64 he_mcs_count[HAL_RX_MAX_MCS_HE + 1];
+ u64 be_mcs_count[HAL_RX_MAX_MCS_BE + 1];
u64 nss_count[HAL_RX_MAX_NSS];
u64 bw_count[HAL_RX_BW_MAX];
u64 gi_count[HAL_RX_GI_MAX];
u64 legacy_count[HAL_RX_MAX_NUM_LEGACY_RATES];
- u64 rx_rate[ATH12K_RX_RATE_TABLE_11AX_NUM];
+ u64 rx_rate[HAL_RX_BW_MAX][HAL_RX_GI_MAX][HAL_RX_MAX_NSS][HAL_RX_MAX_MCS_HT + 1];
};
struct ath12k_rx_peer_stats {
@@ -477,6 +483,8 @@ struct ath12k_wbm_tx_stats {
u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX];
};
+DECLARE_EWMA(avg_rssi, 10, 8)
+
struct ath12k_link_sta {
struct ath12k_link_vif *arvif;
struct ath12k_sta *ahsta;
@@ -496,10 +504,13 @@ struct ath12k_link_sta {
u64 rx_duration;
u64 tx_duration;
u8 rssi_comb;
+ struct ewma_avg_rssi avg_rssi;
u8 link_id;
struct ath12k_rx_peer_stats *rx_stats;
struct ath12k_wbm_tx_stats *wbm_tx_stats;
u32 bw_prev;
+ u32 peer_nss;
+ s8 rssi_beacon;
/* For now the assoc link will be considered primary */
bool is_assoc_link;
@@ -534,18 +545,26 @@ enum ath12k_hw_state {
ATH12K_HW_STATE_RESTARTING,
ATH12K_HW_STATE_RESTARTED,
ATH12K_HW_STATE_WEDGED,
+ ATH12K_HW_STATE_TM,
/* Add other states as required */
};
/* Antenna noise floor */
#define ATH12K_DEFAULT_NOISE_FLOOR -95
+struct ath12k_ftm_event_obj {
+ u32 data_pos;
+ u32 expected_seq;
+ u8 *eventdata;
+};
+
struct ath12k_fw_stats {
u32 pdev_id;
u32 stats_id;
struct list_head pdevs;
struct list_head vdevs;
struct list_head bcn;
+ bool fw_stats_done;
};
struct ath12k_dbg_htt_stats {
@@ -559,6 +578,12 @@ struct ath12k_debug {
struct dentry *debugfs_pdev;
struct dentry *debugfs_pdev_symlink;
struct ath12k_dbg_htt_stats htt_stats;
+ enum wmi_halphy_ctrl_path_stats_id tpc_stats_type;
+ bool tpc_request;
+ struct completion tpc_complete;
+ struct wmi_tpc_stats_arg *tpc_stats;
+ u32 rx_filter;
+ bool extd_rx_stats;
};
struct ath12k_per_peer_tx_stats {
@@ -712,8 +737,12 @@ struct ath12k {
bool nlo_enabled;
+ struct completion fw_stats_complete;
+
struct completion mlo_setup_done;
u32 mlo_setup_status;
+ u8 ftm_msgref;
+ struct ath12k_fw_stats fw_stats;
};
struct ath12k_hw {
@@ -1026,9 +1055,6 @@ struct ath12k_base {
const struct hal_rx_ops *hal_rx_ops;
- /* Denotes the whether MLO is possible within the chip */
- bool single_chip_mlo_supp;
-
struct completion restart_completed;
#ifdef CONFIG_ACPI
@@ -1038,6 +1064,13 @@ struct ath12k_base {
u32 func_bit;
bool acpi_tas_enable;
bool acpi_bios_sar_enable;
+ bool acpi_disable_11be;
+ bool acpi_disable_rfkill;
+ bool acpi_cca_enable;
+ bool acpi_band_edge_enable;
+ bool acpi_enable_bdf;
+ u32 bit_flag;
+ char bdf_string[ATH12K_ACPI_BDF_MAX_LEN];
u8 tas_cfg[ATH12K_ACPI_DSM_TAS_CFG_SIZE];
u8 tas_sar_power_table[ATH12K_ACPI_DSM_TAS_DATA_SIZE];
u8 bios_sar_data[ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE];
@@ -1052,6 +1085,8 @@ struct ath12k_base {
struct ath12k_hw_group *ag;
struct ath12k_wsi_info wsi_info;
+ enum ath12k_firmware_mode fw_mode;
+ struct ath12k_ftm_event_obj ftm_event_obj;
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
@@ -1062,6 +1097,93 @@ struct ath12k_pdev_map {
u8 pdev_idx;
};
+struct ath12k_fw_stats_vdev {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[WLAN_MAX_AC];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[WLAN_MAX_AC];
+ u32 num_tx_frames_failures[WLAN_MAX_AC];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[MAX_TX_RATE_VALUES];
+ u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+};
+
+struct ath12k_fw_stats_bcn {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 tx_bcn_succ_cnt;
+ u32 tx_bcn_outage_cnt;
+};
+
+struct ath12k_fw_stats_pdev {
+ struct list_head list;
+
+ /* PDEV stats */
+ s32 ch_noise_floor;
+ u32 tx_frame_count;
+ u32 rx_frame_count;
+ u32 rx_clear_count;
+ u32 cycle_count;
+ u32 phy_err_count;
+ u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+
+ /* PDEV TX stats */
+ s32 comp_queued;
+ s32 comp_delivered;
+ s32 msdu_enqued;
+ s32 mpdu_enqued;
+ s32 wmm_drop;
+ s32 local_enqued;
+ s32 local_freed;
+ s32 hw_queued;
+ s32 hw_reaped;
+ s32 underrun;
+ s32 tx_abort;
+ s32 mpdus_requed;
+ u32 tx_ko;
+ u32 data_rc;
+ u32 self_triggers;
+ u32 sw_retry_failure;
+ u32 illgl_rate_phy_err;
+ u32 pdev_cont_xretry;
+ u32 pdev_tx_timeout;
+ u32 pdev_resets;
+ u32 stateless_tid_alloc_failure;
+ u32 phy_underrun;
+ u32 txop_ovf;
+
+ /* PDEV RX stats */
+ s32 mid_ppdu_route_change;
+ s32 status_rcvd;
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+ s32 htt_msdus;
+ s32 htt_mpdus;
+ s32 loc_msdus;
+ s32 loc_mpdus;
+ s32 oversize_amsdu;
+ s32 phy_errs;
+ s32 phy_err_drop;
+ s32 mpdu_errs;
+};
+
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
int ath12k_core_pre_init(struct ath12k_base *ab);
int ath12k_core_init(struct ath12k_base *ath12k);
@@ -1084,6 +1206,7 @@ int ath12k_core_resume(struct ath12k_base *ab);
int ath12k_core_suspend(struct ath12k_base *ab);
int ath12k_core_suspend_late(struct ath12k_base *ab);
void ath12k_core_hw_group_unassign(struct ath12k_base *ab);
+u8 ath12k_get_num_partner_link(struct ath12k *ar);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c
index ff6eaeafa092..5ce100cd9a9d 100644
--- a/drivers/net/wireless/ath/ath12k/debug.c
+++ b/drivers/net/wireless/ath/ath12k/debug.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -63,8 +63,10 @@ void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask,
vaf.fmt = fmt;
vaf.va = &args;
- if (ath12k_debug_mask & mask)
+ if (likely(ab))
dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf);
+ else
+ printk(KERN_DEBUG "ath12k: %pV", &vaf);
/* TODO: trace log */
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
index 90e801136bc6..48916e4e1f60 100644
--- a/drivers/net/wireless/ath/ath12k/debug.h
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_DEBUG_H_
@@ -37,6 +37,7 @@ __printf(2, 3) void __ath12k_warn(struct device *dev, const char *fmt, ...);
#define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__)
extern unsigned int ath12k_debug_mask;
+extern bool ath12k_ftm_mode;
#ifdef CONFIG_ATH12K_DEBUG
__printf(3, 4) void __ath12k_dbg(struct ath12k_base *ab,
@@ -61,11 +62,14 @@ static inline void ath12k_dbg_dump(struct ath12k_base *ab,
}
#endif /* CONFIG_ATH12K_DEBUG */
-#define ath12k_dbg(ar, dbg_mask, fmt, ...) \
+#define ath12k_dbg(ab, dbg_mask, fmt, ...) \
do { \
typeof(dbg_mask) mask = (dbg_mask); \
if (ath12k_debug_mask & mask) \
- __ath12k_dbg(ar, mask, fmt, ##__VA_ARGS__); \
+ __ath12k_dbg(ab, mask, fmt, ##__VA_ARGS__); \
} while (0)
+#define ath12k_generic_dbg(dbg_mask, fmt, ...) \
+ ath12k_dbg(NULL, dbg_mask, fmt, ##__VA_ARGS__)
+
#endif /* _ATH12K_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c
index d4b32d1a431c..57002215ddf1 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs.c
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
+#include "dp_tx.h"
+#include "debug.h"
#include "debugfs.h"
#include "debugfs_htt_stats.h"
@@ -31,6 +33,806 @@ static const struct file_operations fops_simulate_radar = {
.open = simple_open
};
+static ssize_t ath12k_write_tpc_stats_type(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ u8 type;
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= WMI_HALPHY_PDEV_TX_STATS_MAX)
+ return -EINVAL;
+
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.tpc_stats_type = type;
+ spin_unlock_bh(&ar->data_lock);
+
+ return count;
+}
+
+static int ath12k_debug_tpc_stats_request(struct ath12k *ar)
+{
+ enum wmi_halphy_ctrl_path_stats_id tpc_stats_sub_id;
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ reinit_completion(&ar->debug.tpc_complete);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.tpc_request = true;
+ tpc_stats_sub_id = ar->debug.tpc_stats_type;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath12k_wmi_send_tpc_stats_request(ar, tpc_stats_sub_id);
+ if (ret) {
+ ath12k_warn(ab, "failed to request pdev tpc stats: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.tpc_request = false;
+ spin_unlock_bh(&ar->data_lock);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_get_tpc_ctl_mode_idx(struct wmi_tpc_stats_arg *tpc_stats,
+ enum wmi_tpc_pream_bw pream_bw, int *mode_idx)
+{
+ u32 chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq);
+ u8 band;
+
+ band = ((chan_freq > ATH12K_MIN_6G_FREQ) ? NL80211_BAND_6GHZ :
+ ((chan_freq > ATH12K_MIN_5G_FREQ) ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ));
+
+ if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
+ switch (pream_bw) {
+ case WMI_TPC_PREAM_HT20:
+ case WMI_TPC_PREAM_VHT20:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_HE20:
+ case WMI_TPC_PREAM_EHT20:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_HT40:
+ case WMI_TPC_PREAM_VHT40:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_HE40:
+ case WMI_TPC_PREAM_EHT40:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_VHT80:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT80_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_EHT60:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20;
+ break;
+ case WMI_TPC_PREAM_HE80:
+ case WMI_TPC_PREAM_EHT80:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_VHT160:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT160_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_EHT120:
+ case WMI_TPC_PREAM_EHT140:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20;
+ break;
+ case WMI_TPC_PREAM_HE160:
+ case WMI_TPC_PREAM_EHT160:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_EHT200:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120;
+ break;
+ case WMI_TPC_PREAM_EHT240:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80;
+ break;
+ case WMI_TPC_PREAM_EHT280:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40;
+ break;
+ case WMI_TPC_PREAM_EHT320:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5GHZ_6GHZ;
+ break;
+ default:
+ /* for 5GHZ and 6GHZ, default case will be for OFDM */
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_5GHZ_6GHZ;
+ break;
+ }
+ } else {
+ switch (pream_bw) {
+ case WMI_TPC_PREAM_OFDM:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_2GHZ;
+ break;
+ case WMI_TPC_PREAM_HT20:
+ case WMI_TPC_PREAM_VHT20:
+ case WMI_TPC_PREAM_HE20:
+ case WMI_TPC_PREAM_EHT20:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT20_2GHZ;
+ break;
+ case WMI_TPC_PREAM_HT40:
+ case WMI_TPC_PREAM_VHT40:
+ case WMI_TPC_PREAM_HE40:
+ case WMI_TPC_PREAM_EHT40:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT40_2GHZ;
+ break;
+ default:
+ /* for 2GHZ, default case will be CCK */
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_CCK_2GHZ;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static s16 ath12k_tpc_get_rate(struct ath12k *ar,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ u32 rate_idx, u32 num_chains, u32 rate_code,
+ enum wmi_tpc_pream_bw pream_bw,
+ enum wmi_halphy_ctrl_path_stats_id type,
+ u32 eht_rate_idx)
+{
+ u32 tot_nss, tot_modes, txbf_on_off, index_offset1, index_offset2, index_offset3;
+ u8 chain_idx, stm_idx, num_streams;
+ bool is_mu, txbf_enabled = 0;
+ s8 rates_ctl_min, tpc_ctl;
+ s16 rates, tpc, reg_pwr;
+ u16 rate1, rate2;
+ int mode, ret;
+
+ num_streams = 1 + ATH12K_HW_NSS(rate_code);
+ chain_idx = num_chains - 1;
+ stm_idx = num_streams - 1;
+ mode = -1;
+
+ ret = ath12k_get_tpc_ctl_mode_idx(tpc_stats, pream_bw, &mode);
+ if (ret) {
+ ath12k_warn(ar->ab, "Invalid mode index received\n");
+ tpc = TPC_INVAL;
+ goto out;
+ }
+
+ if (num_chains < num_streams) {
+ tpc = TPC_INVAL;
+ goto out;
+ }
+
+ if (le32_to_cpu(tpc_stats->tpc_config.num_tx_chain) <= 1) {
+ tpc = TPC_INVAL;
+ goto out;
+ }
+
+ if (type == WMI_HALPHY_PDEV_TX_SUTXBF_STATS ||
+ type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS)
+ txbf_enabled = 1;
+
+ if (type == WMI_HALPHY_PDEV_TX_MU_STATS ||
+ type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) {
+ is_mu = true;
+ } else {
+ is_mu = false;
+ }
+
+ /* Below is the min calculation of ctl array, rates array and
+ * regulator power table. tpc is minimum of all 3
+ */
+ if (pream_bw >= WMI_TPC_PREAM_EHT20 && pream_bw <= WMI_TPC_PREAM_EHT320) {
+ rate2 = tpc_stats->rates_array2.rate_array[eht_rate_idx];
+ if (is_mu)
+ rates = u32_get_bits(rate2, ATH12K_TPC_RATE_ARRAY_MU);
+ else
+ rates = u32_get_bits(rate2, ATH12K_TPC_RATE_ARRAY_SU);
+ } else {
+ rate1 = tpc_stats->rates_array1.rate_array[rate_idx];
+ if (is_mu)
+ rates = u32_get_bits(rate1, ATH12K_TPC_RATE_ARRAY_MU);
+ else
+ rates = u32_get_bits(rate1, ATH12K_TPC_RATE_ARRAY_SU);
+ }
+
+ if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
+ tot_nss = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d1);
+ tot_modes = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d2);
+ txbf_on_off = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d3);
+ index_offset1 = txbf_on_off * tot_modes * tot_nss;
+ index_offset2 = tot_modes * tot_nss;
+ index_offset3 = tot_nss;
+
+ tpc_ctl = *(tpc_stats->ctl_array.ctl_pwr_table +
+ chain_idx * index_offset1 + txbf_enabled * index_offset2
+ + mode * index_offset3 + stm_idx);
+ } else {
+ tpc_ctl = TPC_MAX;
+ ath12k_warn(ar->ab,
+ "ctl array for tpc stats not received from fw\n");
+ }
+
+ rates_ctl_min = min_t(s16, rates, tpc_ctl);
+
+ reg_pwr = tpc_stats->max_reg_allowed_power.reg_pwr_array[chain_idx];
+
+ if (reg_pwr < 0)
+ reg_pwr = TPC_INVAL;
+
+ tpc = min_t(s16, rates_ctl_min, reg_pwr);
+
+ /* MODULATION_LIMIT is the maximum power limit,tpc should not exceed
+ * modulation limit even if min tpc of all three array is greater
+ * modulation limit
+ */
+ tpc = min_t(s16, tpc, MODULATION_LIMIT);
+
+out:
+ return tpc;
+}
+
+static u16 ath12k_get_ratecode(u16 pream_idx, u16 nss, u16 mcs_rate)
+{
+ u16 mode_type = ~0;
+
+ /* Below assignments are just for printing purpose only */
+ switch (pream_idx) {
+ case WMI_TPC_PREAM_CCK:
+ mode_type = WMI_RATE_PREAMBLE_CCK;
+ break;
+ case WMI_TPC_PREAM_OFDM:
+ mode_type = WMI_RATE_PREAMBLE_OFDM;
+ break;
+ case WMI_TPC_PREAM_HT20:
+ case WMI_TPC_PREAM_HT40:
+ mode_type = WMI_RATE_PREAMBLE_HT;
+ break;
+ case WMI_TPC_PREAM_VHT20:
+ case WMI_TPC_PREAM_VHT40:
+ case WMI_TPC_PREAM_VHT80:
+ case WMI_TPC_PREAM_VHT160:
+ mode_type = WMI_RATE_PREAMBLE_VHT;
+ break;
+ case WMI_TPC_PREAM_HE20:
+ case WMI_TPC_PREAM_HE40:
+ case WMI_TPC_PREAM_HE80:
+ case WMI_TPC_PREAM_HE160:
+ mode_type = WMI_RATE_PREAMBLE_HE;
+ break;
+ case WMI_TPC_PREAM_EHT20:
+ case WMI_TPC_PREAM_EHT40:
+ case WMI_TPC_PREAM_EHT60:
+ case WMI_TPC_PREAM_EHT80:
+ case WMI_TPC_PREAM_EHT120:
+ case WMI_TPC_PREAM_EHT140:
+ case WMI_TPC_PREAM_EHT160:
+ case WMI_TPC_PREAM_EHT200:
+ case WMI_TPC_PREAM_EHT240:
+ case WMI_TPC_PREAM_EHT280:
+ case WMI_TPC_PREAM_EHT320:
+ mode_type = WMI_RATE_PREAMBLE_EHT;
+ if (mcs_rate == 0 || mcs_rate == 1)
+ mcs_rate += 14;
+ else
+ mcs_rate -= 2;
+ break;
+ default:
+ return mode_type;
+ }
+ return ((mode_type << 8) | ((nss & 0x7) << 5) | (mcs_rate & 0x1F));
+}
+
+static bool ath12k_he_supports_extra_mcs(struct ath12k *ar, int freq)
+{
+ struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+ struct ath12k_band_cap *cap_band;
+ bool extra_mcs_supported;
+
+ if (freq <= ATH12K_2GHZ_MAX_FREQUENCY)
+ cap_band = &cap->band[NL80211_BAND_2GHZ];
+ else if (freq <= ATH12K_5GHZ_MAX_FREQUENCY)
+ cap_band = &cap->band[NL80211_BAND_5GHZ];
+ else
+ cap_band = &cap->band[NL80211_BAND_6GHZ];
+
+ extra_mcs_supported = u32_get_bits(cap_band->he_cap_info[1],
+ HE_EXTRA_MCS_SUPPORT);
+ return extra_mcs_supported;
+}
+
+static int ath12k_tpc_fill_pream(struct ath12k *ar, char *buf, int buf_len, int len,
+ enum wmi_tpc_pream_bw pream_bw, u32 max_rix,
+ int max_nss, int max_rates, int pream_type,
+ enum wmi_halphy_ctrl_path_stats_id tpc_type,
+ int rate_idx, int eht_rate_idx)
+{
+ struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
+ int nss, rates, chains;
+ u8 active_tx_chains;
+ u16 rate_code;
+ s16 tpc;
+
+ static const char *const pream_str[] = {
+ [WMI_TPC_PREAM_CCK] = "CCK",
+ [WMI_TPC_PREAM_OFDM] = "OFDM",
+ [WMI_TPC_PREAM_HT20] = "HT20",
+ [WMI_TPC_PREAM_HT40] = "HT40",
+ [WMI_TPC_PREAM_VHT20] = "VHT20",
+ [WMI_TPC_PREAM_VHT40] = "VHT40",
+ [WMI_TPC_PREAM_VHT80] = "VHT80",
+ [WMI_TPC_PREAM_VHT160] = "VHT160",
+ [WMI_TPC_PREAM_HE20] = "HE20",
+ [WMI_TPC_PREAM_HE40] = "HE40",
+ [WMI_TPC_PREAM_HE80] = "HE80",
+ [WMI_TPC_PREAM_HE160] = "HE160",
+ [WMI_TPC_PREAM_EHT20] = "EHT20",
+ [WMI_TPC_PREAM_EHT40] = "EHT40",
+ [WMI_TPC_PREAM_EHT60] = "EHT60",
+ [WMI_TPC_PREAM_EHT80] = "EHT80",
+ [WMI_TPC_PREAM_EHT120] = "EHT120",
+ [WMI_TPC_PREAM_EHT140] = "EHT140",
+ [WMI_TPC_PREAM_EHT160] = "EHT160",
+ [WMI_TPC_PREAM_EHT200] = "EHT200",
+ [WMI_TPC_PREAM_EHT240] = "EHT240",
+ [WMI_TPC_PREAM_EHT280] = "EHT280",
+ [WMI_TPC_PREAM_EHT320] = "EHT320"};
+
+ active_tx_chains = ar->num_tx_chains;
+
+ for (nss = 0; nss < max_nss; nss++) {
+ for (rates = 0; rates < max_rates; rates++, rate_idx++, max_rix++) {
+ /* FW send extra MCS(10&11) for VHT and HE rates,
+ * this is not used. Hence skipping it here
+ */
+ if (pream_type == WMI_RATE_PREAMBLE_VHT &&
+ rates > ATH12K_VHT_MCS_MAX)
+ continue;
+
+ if (pream_type == WMI_RATE_PREAMBLE_HE &&
+ rates > ATH12K_HE_MCS_MAX)
+ continue;
+
+ if (pream_type == WMI_RATE_PREAMBLE_EHT &&
+ rates > ATH12K_EHT_MCS_MAX)
+ continue;
+
+ rate_code = ath12k_get_ratecode(pream_bw, nss, rates);
+ len += scnprintf(buf + len, buf_len - len,
+ "%d\t %s\t 0x%03x\t", max_rix,
+ pream_str[pream_bw], rate_code);
+
+ for (chains = 0; chains < active_tx_chains; chains++) {
+ if (nss > chains) {
+ len += scnprintf(buf + len,
+ buf_len - len,
+ "\t%s", "NA");
+ } else {
+ tpc = ath12k_tpc_get_rate(ar, tpc_stats,
+ rate_idx, chains + 1,
+ rate_code, pream_bw,
+ tpc_type,
+ eht_rate_idx);
+
+ if (tpc == TPC_INVAL) {
+ len += scnprintf(buf + len,
+ buf_len - len, "\tNA");
+ } else {
+ len += scnprintf(buf + len,
+ buf_len - len, "\t%d",
+ tpc);
+ }
+ }
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (pream_type == WMI_RATE_PREAMBLE_EHT)
+ /*For fetching the next eht rates pwr from rates array2*/
+ ++eht_rate_idx;
+ }
+ }
+
+ return len;
+}
+
+static int ath12k_tpc_stats_print(struct ath12k *ar,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ char *buf, size_t len,
+ enum wmi_halphy_ctrl_path_stats_id type)
+{
+ u32 eht_idx = 0, pream_idx = 0, rate_pream_idx = 0, total_rates = 0, max_rix = 0;
+ u32 chan_freq, num_tx_chain, caps, i, j = 1;
+ size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE;
+ u8 nss, active_tx_chains;
+ bool he_ext_mcs;
+ static const char *const type_str[WMI_HALPHY_PDEV_TX_STATS_MAX] = {
+ [WMI_HALPHY_PDEV_TX_SU_STATS] = "SU",
+ [WMI_HALPHY_PDEV_TX_SUTXBF_STATS] = "SU WITH TXBF",
+ [WMI_HALPHY_PDEV_TX_MU_STATS] = "MU",
+ [WMI_HALPHY_PDEV_TX_MUTXBF_STATS] = "MU WITH TXBF"};
+
+ u8 max_rates[WMI_TPC_PREAM_MAX] = {
+ [WMI_TPC_PREAM_CCK] = ATH12K_CCK_RATES,
+ [WMI_TPC_PREAM_OFDM] = ATH12K_OFDM_RATES,
+ [WMI_TPC_PREAM_HT20] = ATH12K_HT_RATES,
+ [WMI_TPC_PREAM_HT40] = ATH12K_HT_RATES,
+ [WMI_TPC_PREAM_VHT20] = ATH12K_VHT_RATES,
+ [WMI_TPC_PREAM_VHT40] = ATH12K_VHT_RATES,
+ [WMI_TPC_PREAM_VHT80] = ATH12K_VHT_RATES,
+ [WMI_TPC_PREAM_VHT160] = ATH12K_VHT_RATES,
+ [WMI_TPC_PREAM_HE20] = ATH12K_HE_RATES,
+ [WMI_TPC_PREAM_HE40] = ATH12K_HE_RATES,
+ [WMI_TPC_PREAM_HE80] = ATH12K_HE_RATES,
+ [WMI_TPC_PREAM_HE160] = ATH12K_HE_RATES,
+ [WMI_TPC_PREAM_EHT20] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT40] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT60] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT80] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT120] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT140] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT160] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT200] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT240] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT280] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT320] = ATH12K_EHT_RATES};
+ static const u8 max_nss[WMI_TPC_PREAM_MAX] = {
+ [WMI_TPC_PREAM_CCK] = ATH12K_NSS_1,
+ [WMI_TPC_PREAM_OFDM] = ATH12K_NSS_1,
+ [WMI_TPC_PREAM_HT20] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_HT40] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_VHT20] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_VHT40] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_VHT80] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_VHT160] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_HE20] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_HE40] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_HE80] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_HE160] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT20] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT40] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT60] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT80] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT120] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT140] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT160] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT200] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT240] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT280] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT320] = ATH12K_NSS_4};
+
+ u16 rate_idx[WMI_TPC_PREAM_MAX] = {}, eht_rate_idx[WMI_TPC_PREAM_MAX] = {};
+ static const u8 pream_type[WMI_TPC_PREAM_MAX] = {
+ [WMI_TPC_PREAM_CCK] = WMI_RATE_PREAMBLE_CCK,
+ [WMI_TPC_PREAM_OFDM] = WMI_RATE_PREAMBLE_OFDM,
+ [WMI_TPC_PREAM_HT20] = WMI_RATE_PREAMBLE_HT,
+ [WMI_TPC_PREAM_HT40] = WMI_RATE_PREAMBLE_HT,
+ [WMI_TPC_PREAM_VHT20] = WMI_RATE_PREAMBLE_VHT,
+ [WMI_TPC_PREAM_VHT40] = WMI_RATE_PREAMBLE_VHT,
+ [WMI_TPC_PREAM_VHT80] = WMI_RATE_PREAMBLE_VHT,
+ [WMI_TPC_PREAM_VHT160] = WMI_RATE_PREAMBLE_VHT,
+ [WMI_TPC_PREAM_HE20] = WMI_RATE_PREAMBLE_HE,
+ [WMI_TPC_PREAM_HE40] = WMI_RATE_PREAMBLE_HE,
+ [WMI_TPC_PREAM_HE80] = WMI_RATE_PREAMBLE_HE,
+ [WMI_TPC_PREAM_HE160] = WMI_RATE_PREAMBLE_HE,
+ [WMI_TPC_PREAM_EHT20] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT40] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT60] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT80] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT120] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT140] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT160] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT200] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT240] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT280] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT320] = WMI_RATE_PREAMBLE_EHT};
+
+ chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq);
+ num_tx_chain = le32_to_cpu(tpc_stats->tpc_config.num_tx_chain);
+ caps = le32_to_cpu(tpc_stats->tpc_config.caps);
+
+ active_tx_chains = ar->num_tx_chains;
+ he_ext_mcs = ath12k_he_supports_extra_mcs(ar, chan_freq);
+
+ /* mcs 12&13 is sent by FW for certain HWs in rate array, skipping it as
+ * it is not supported
+ */
+ if (he_ext_mcs) {
+ for (i = WMI_TPC_PREAM_HE20; i <= WMI_TPC_PREAM_HE160; ++i)
+ max_rates[i] = ATH12K_HE_RATES;
+ }
+
+ if (type == WMI_HALPHY_PDEV_TX_MU_STATS ||
+ type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) {
+ pream_idx = WMI_TPC_PREAM_VHT20;
+
+ for (i = WMI_TPC_PREAM_CCK; i <= WMI_TPC_PREAM_HT40; ++i)
+ max_rix += max_nss[i] * max_rates[i];
+ }
+ /* Enumerate all the rate indices */
+ for (i = rate_pream_idx + 1; i < WMI_TPC_PREAM_MAX; i++) {
+ nss = (max_nss[i - 1] < num_tx_chain ?
+ max_nss[i - 1] : num_tx_chain);
+
+ rate_idx[i] = rate_idx[i - 1] + max_rates[i - 1] * nss;
+
+ if (pream_type[i] == WMI_RATE_PREAMBLE_EHT) {
+ eht_rate_idx[j] = eht_rate_idx[j - 1] + max_rates[i] * nss;
+ ++j;
+ }
+ }
+
+ for (i = 0; i < WMI_TPC_PREAM_MAX; i++) {
+ nss = (max_nss[i] < num_tx_chain ?
+ max_nss[i] : num_tx_chain);
+ total_rates += max_rates[i] * nss;
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "No.of rates-%d\n", total_rates);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "**************** %s ****************\n",
+ type_str[type]);
+ len += scnprintf(buf + len, buf_len - len,
+ "\t\t\t\tTPC values for Active chains\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "Rate idx Preamble Rate code");
+
+ for (i = 1; i <= active_tx_chains; ++i) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\t%d-Chain", i);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ for (i = pream_idx; i < WMI_TPC_PREAM_MAX; i++) {
+ if (chan_freq <= 2483) {
+ if (i == WMI_TPC_PREAM_VHT80 ||
+ i == WMI_TPC_PREAM_VHT160 ||
+ i == WMI_TPC_PREAM_HE80 ||
+ i == WMI_TPC_PREAM_HE160 ||
+ (i >= WMI_TPC_PREAM_EHT60 &&
+ i <= WMI_TPC_PREAM_EHT320)) {
+ max_rix += max_nss[i] * max_rates[i];
+ continue;
+ }
+ } else {
+ if (i == WMI_TPC_PREAM_CCK) {
+ max_rix += max_rates[i];
+ continue;
+ }
+ }
+
+ nss = (max_nss[i] < ar->num_tx_chains ? max_nss[i] : ar->num_tx_chains);
+
+ if (!(caps &
+ (1 << ATH12K_TPC_STATS_SUPPORT_BE_PUNC))) {
+ if (i == WMI_TPC_PREAM_EHT60 || i == WMI_TPC_PREAM_EHT120 ||
+ i == WMI_TPC_PREAM_EHT140 || i == WMI_TPC_PREAM_EHT200 ||
+ i == WMI_TPC_PREAM_EHT240 || i == WMI_TPC_PREAM_EHT280) {
+ max_rix += max_nss[i] * max_rates[i];
+ continue;
+ }
+ }
+
+ len = ath12k_tpc_fill_pream(ar, buf, buf_len, len, i, max_rix, nss,
+ max_rates[i], pream_type[i],
+ type, rate_idx[i], eht_rate_idx[eht_idx]);
+
+ if (pream_type[i] == WMI_RATE_PREAMBLE_EHT)
+ /*For fetch the next index eht rates from rates array2*/
+ ++eht_idx;
+
+ max_rix += max_nss[i] * max_rates[i];
+ }
+ return len;
+}
+
+static void ath12k_tpc_stats_fill(struct ath12k *ar,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ char *buf)
+{
+ size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE;
+ struct wmi_tpc_config_params *tpc;
+ size_t len = 0;
+
+ if (!tpc_stats) {
+ ath12k_warn(ar->ab, "failed to find tpc stats\n");
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ tpc = &tpc_stats->tpc_config;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "*************** TPC config **************\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "* powers are in 0.25 dBm steps\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "reg domain-%d\t\tchan freq-%d\n",
+ tpc->reg_domain, tpc->chan_freq);
+ len += scnprintf(buf + len, buf_len - len,
+ "power limit-%d\t\tmax reg-domain Power-%d\n",
+ le32_to_cpu(tpc->twice_max_reg_power) / 2, tpc->power_limit);
+ len += scnprintf(buf + len, buf_len - len,
+ "No.of tx chain-%d\t",
+ ar->num_tx_chains);
+
+ ath12k_tpc_stats_print(ar, tpc_stats, buf, len,
+ ar->debug.tpc_stats_type);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath12k_open_tpc_stats(struct inode *inode, struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ int ret;
+
+ guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (ah->state != ATH12K_HW_STATE_ON) {
+ ath12k_warn(ar->ab, "Interface not up\n");
+ return -ENETDOWN;
+ }
+
+ void *buf __free(kfree) = kzalloc(ATH12K_TPC_STATS_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ath12k_debug_tpc_stats_request(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request tpc stats: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&ar->debug.tpc_complete, TPC_STATS_WAIT_TIME)) {
+ spin_lock_bh(&ar->data_lock);
+ ath12k_wmi_free_tpc_stats_mem(ar);
+ ar->debug.tpc_request = false;
+ spin_unlock_bh(&ar->data_lock);
+ return -ETIMEDOUT;
+ }
+
+ ath12k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf);
+ file->private_data = no_free_ptr(buf);
+
+ spin_lock_bh(&ar->data_lock);
+ ath12k_wmi_free_tpc_stats_mem(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_tpc_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static int ath12k_release_tpc_stats(struct inode *inode,
+ struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations fops_tpc_stats = {
+ .open = ath12k_open_tpc_stats,
+ .release = ath12k_release_tpc_stats,
+ .read = ath12k_read_tpc_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations fops_tpc_stats_type = {
+ .write = ath12k_write_tpc_stats_type,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_extd_rx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 ring_id, rx_filter = 0;
+ bool enable;
+ int ret, i;
+
+ if (kstrtobool_from_user(ubuf, count, &enable))
+ return -EINVAL;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (!ar->ab->hw_params->rxdma1_enable) {
+ ret = count;
+ goto exit;
+ }
+
+ if (ar->ah->state != ATH12K_HW_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (enable == ar->debug.extd_rx_stats) {
+ ret = count;
+ goto exit;
+ }
+
+ if (enable) {
+ rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO;
+
+ tlv_filter.rx_filter = rx_filter;
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath12k_mac_mon_status_filter_default;
+ }
+
+ ar->debug.rx_filter = tlv_filter.rx_filter;
+
+ for (i = 0; i < ar->ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_DST,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto exit;
+ }
+ }
+
+ ar->debug.extd_rx_stats = !!enable;
+ ret = count;
+exit:
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+ return ret;
+}
+
+static ssize_t ath12k_read_extd_rx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ char buf[32];
+ int len = 0;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->debug.extd_rx_stats);
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_rx_stats = {
+ .read = ath12k_read_extd_rx_stats,
+ .write = ath12k_write_extd_rx_stats,
+ .open = simple_open,
+};
+
void ath12k_debugfs_soc_create(struct ath12k_base *ab)
{
bool dput_needed;
@@ -68,6 +870,382 @@ void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
*/
}
+static void ath12k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath12k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath12k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath12k_debugfs_fw_stats_reset(struct ath12k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->fw_stats.fw_stats_done = false;
+ ath12k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
+ ath12k_fw_stats_bcn_free(&ar->fw_stats.bcn);
+ ath12k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath12k_debugfs_fw_stats_request(struct ath12k *ar,
+ struct ath12k_fw_stats_req_params *param)
+{
+ struct ath12k_base *ab = ar->ab;
+ unsigned long timeout, time_left;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ timeout = jiffies + msecs_to_jiffies(3 * 1000);
+
+ ath12k_debugfs_fw_stats_reset(ar);
+
+ reinit_completion(&ar->fw_stats_complete);
+
+ ret = ath12k_wmi_send_stats_request_cmd(ar, param->stats_id,
+ param->vdev_id, param->pdev_id);
+
+ if (ret) {
+ ath12k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete,
+ 1 * HZ);
+ /* If the wait timed out, return -ETIMEDOUT */
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ /* Firmware sends WMI_UPDATE_STATS_EVENTID back-to-back
+ * when stats data buffer limit is reached. fw_stats_complete
+ * is completed once host receives first event from firmware, but
+ * still end might not be marked in the TLV.
+ * Below loop is to confirm that firmware completed sending all the event
+ * and fw_stats_done is marked true when end is marked in the TLV
+ */
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->fw_stats.fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ return 0;
+}
+
+void
+ath12k_debugfs_fw_stats_process(struct ath12k *ar,
+ struct ath12k_fw_stats *stats)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev;
+ bool is_end;
+ static unsigned int num_vdev, num_bcn;
+ size_t total_vdevs_started = 0;
+ int i;
+
+ if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats->vdevs)) {
+ ath12k_warn(ab, "empty vdev stats");
+ return;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ rcu_read_lock();
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += pdev->ar->num_started_vdevs;
+ }
+ rcu_read_unlock();
+
+ is_end = ((++num_vdev) == total_vdevs_started);
+
+ list_splice_tail_init(&stats->vdevs,
+ &ar->fw_stats.vdevs);
+
+ if (is_end) {
+ ar->fw_stats.fw_stats_done = true;
+ num_vdev = 0;
+ }
+ return;
+ }
+ if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats->bcn)) {
+ ath12k_warn(ab, "empty beacon stats");
+ return;
+ }
+ /* Mark end until we reached the count of all started VDEVs
+ * within the PDEV
+ */
+ is_end = ((++num_bcn) == ar->num_started_vdevs);
+
+ list_splice_tail_init(&stats->bcn,
+ &ar->fw_stats.bcn);
+
+ if (is_end) {
+ ar->fw_stats.fw_stats_done = true;
+ num_bcn = 0;
+ }
+ }
+ if (stats->stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats->pdevs, &ar->fw_stats.pdevs);
+ ar->fw_stats.fw_stats_done = true;
+ }
+}
+
+static int ath12k_open_vdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct ath12k_fw_stats_req_params param;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ int ret;
+
+ guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (!ah)
+ return -ENETDOWN;
+
+ if (ah->state != ATH12K_HW_STATE_ON)
+ return -ENETDOWN;
+
+ void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ param.pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ /* VDEV stats is always sent for all active VDEVs from FW */
+ param.vdev_id = 0;
+ param.stats_id = WMI_REQUEST_VDEV_STAT;
+
+ ret = ath12k_debugfs_fw_stats_request(ar, &param);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
+ return ret;
+ }
+
+ ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id,
+ buf);
+
+ file->private_data = no_free_ptr(buf);
+
+ return 0;
+}
+
+static int ath12k_release_vdev_stats(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_vdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_vdev_stats = {
+ .open = ath12k_open_vdev_stats,
+ .release = ath12k_release_vdev_stats,
+ .read = ath12k_read_vdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath12k_open_bcn_stats(struct inode *inode, struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_fw_stats_req_params param;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ int ret;
+
+ guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (ah && ah->state != ATH12K_HW_STATE_ON)
+ return -ENETDOWN;
+
+ void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ param.pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ param.stats_id = WMI_REQUEST_BCN_STAT;
+
+ /* loop all active VDEVs for bcn stats */
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_up)
+ continue;
+
+ param.vdev_id = arvif->vdev_id;
+ ret = ath12k_debugfs_fw_stats_request(ar, &param);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id,
+ buf);
+ /* since beacon stats request is looped for all active VDEVs, saved fw
+ * stats is not freed for each request until done for all active VDEVs
+ */
+ spin_lock_bh(&ar->data_lock);
+ ath12k_fw_stats_bcn_free(&ar->fw_stats.bcn);
+ spin_unlock_bh(&ar->data_lock);
+
+ file->private_data = no_free_ptr(buf);
+
+ return 0;
+}
+
+static int ath12k_release_bcn_stats(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_bcn_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_bcn_stats = {
+ .open = ath12k_open_bcn_stats,
+ .release = ath12k_release_bcn_stats,
+ .read = ath12k_read_bcn_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath12k_open_pdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_fw_stats_req_params param;
+ int ret;
+
+ guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (ah && ah->state != ATH12K_HW_STATE_ON)
+ return -ENETDOWN;
+
+ void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ param.pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ param.vdev_id = 0;
+ param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath12k_debugfs_fw_stats_request(ar, &param);
+ if (ret) {
+ ath12k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ return ret;
+ }
+
+ ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id,
+ buf);
+
+ file->private_data = no_free_ptr(buf);
+
+ return 0;
+}
+
+static int ath12k_release_pdev_stats(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_pdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pdev_stats = {
+ .open = ath12k_open_pdev_stats,
+ .release = ath12k_release_pdev_stats,
+ .read = ath12k_read_pdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static
+void ath12k_debugfs_fw_stats_register(struct ath12k *ar)
+{
+ struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
+ ar->debug.debugfs_pdev);
+
+ /* all stats debugfs files created are under "fw_stats" directory
+ * created per PDEV
+ */
+ debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
+ &fops_vdev_stats);
+ debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
+ &fops_bcn_stats);
+ debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
+ &fops_pdev_stats);
+
+ INIT_LIST_HEAD(&ar->fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.bcn);
+ INIT_LIST_HEAD(&ar->fw_stats.pdevs);
+
+ init_completion(&ar->fw_stats_complete);
+}
+
void ath12k_debugfs_register(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
@@ -91,7 +1269,18 @@ void ath12k_debugfs_register(struct ath12k *ar)
&fops_simulate_radar);
}
+ debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_pdev, ar,
+ &fops_tpc_stats);
+ debugfs_create_file("tpc_stats_type", 0200, ar->debug.debugfs_pdev,
+ ar, &fops_tpc_stats_type);
+ init_completion(&ar->debug.tpc_complete);
+
ath12k_debugfs_htt_stats_register(ar);
+ ath12k_debugfs_fw_stats_register(ar);
+
+ debugfs_create_file("ext_rx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_rx_stats);
}
void ath12k_debugfs_unregister(struct ath12k *ar)
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h
index 8d64ba03aa9a..d7041297d5d8 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_DEBUGFS_H_
@@ -12,6 +12,101 @@ void ath12k_debugfs_soc_create(struct ath12k_base *ab);
void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
void ath12k_debugfs_register(struct ath12k *ar);
void ath12k_debugfs_unregister(struct ath12k *ar);
+void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
+ struct ath12k_fw_stats *stats);
+void ath12k_debugfs_fw_stats_reset(struct ath12k *ar);
+
+static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
+{
+ return ar->debug.extd_rx_stats;
+}
+
+static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
+{
+ return ar->debug.rx_filter;
+}
+
+#define ATH12K_CCK_RATES 4
+#define ATH12K_OFDM_RATES 8
+#define ATH12K_HT_RATES 8
+#define ATH12K_VHT_RATES 12
+#define ATH12K_HE_RATES 12
+#define ATH12K_HE_RATES_WITH_EXTRA_MCS 14
+#define ATH12K_EHT_RATES 16
+#define HE_EXTRA_MCS_SUPPORT GENMASK(31, 16)
+#define ATH12K_NSS_1 1
+#define ATH12K_NSS_4 4
+#define ATH12K_NSS_8 8
+#define ATH12K_HW_NSS(_rcode) (((_rcode) >> 5) & 0x7)
+#define TPC_STATS_WAIT_TIME (1 * HZ)
+#define MAX_TPC_PREAM_STR_LEN 7
+#define TPC_INVAL -128
+#define TPC_MAX 127
+#define TPC_STATS_WAIT_TIME (1 * HZ)
+#define TPC_STATS_TOT_ROW 700
+#define TPC_STATS_TOT_COLUMN 100
+#define MODULATION_LIMIT 126
+
+#define ATH12K_TPC_STATS_BUF_SIZE (TPC_STATS_TOT_ROW * TPC_STATS_TOT_COLUMN)
+
+enum wmi_tpc_pream_bw {
+ WMI_TPC_PREAM_CCK,
+ WMI_TPC_PREAM_OFDM,
+ WMI_TPC_PREAM_HT20,
+ WMI_TPC_PREAM_HT40,
+ WMI_TPC_PREAM_VHT20,
+ WMI_TPC_PREAM_VHT40,
+ WMI_TPC_PREAM_VHT80,
+ WMI_TPC_PREAM_VHT160,
+ WMI_TPC_PREAM_HE20,
+ WMI_TPC_PREAM_HE40,
+ WMI_TPC_PREAM_HE80,
+ WMI_TPC_PREAM_HE160,
+ WMI_TPC_PREAM_EHT20,
+ WMI_TPC_PREAM_EHT40,
+ WMI_TPC_PREAM_EHT60,
+ WMI_TPC_PREAM_EHT80,
+ WMI_TPC_PREAM_EHT120,
+ WMI_TPC_PREAM_EHT140,
+ WMI_TPC_PREAM_EHT160,
+ WMI_TPC_PREAM_EHT200,
+ WMI_TPC_PREAM_EHT240,
+ WMI_TPC_PREAM_EHT280,
+ WMI_TPC_PREAM_EHT320,
+ WMI_TPC_PREAM_MAX
+};
+
+enum ath12k_debug_tpc_stats_ctl_mode {
+ ATH12K_TPC_STATS_CTL_MODE_LEGACY_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_VHT80_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_VHT160_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_CCK_2GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_LEGACY_2GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HT20_2GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HT40_2GHZ,
+
+ ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20 = 23,
+ ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20,
+ ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40,
+ ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80,
+ ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120
+};
+
+enum ath12k_debug_tpc_stats_support_modes {
+ ATH12K_TPC_STATS_SUPPORT_160 = 0,
+ ATH12K_TPC_STATS_SUPPORT_320,
+ ATH12K_TPC_STATS_SUPPORT_AX,
+ ATH12K_TPC_STATS_SUPPORT_AX_EXTRA_MCS,
+ ATH12K_TPC_STATS_SUPPORT_BE,
+ ATH12K_TPC_STATS_SUPPORT_BE_PUNC,
+};
#else
static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab)
{
@@ -29,6 +124,24 @@ static inline void ath12k_debugfs_unregister(struct ath12k *ar)
{
}
+static inline void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
+ struct ath12k_fw_stats *stats)
+{
+}
+
+static inline void ath12k_debugfs_fw_stats_reset(struct ath12k *ar)
+{
+}
+
+static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
+{
+ return false;
+}
+
+static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
+{
+ return 0;
+}
#endif /* CONFIG_ATH12K_DEBUGFS */
#endif /* _ATH12K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
index 41e4ef2ef3af..1c0d5fa39a8d 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -48,6 +48,34 @@ print_array_to_buf(u8 *buf, u32 offset, const char *header,
footer);
}
+static u32
+print_array_to_buf_s8(u8 *buf, u32 offset, const char *header, u32 stats_index,
+ const s8 *array, u32 array_len, const char *footer)
+{
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ int index = 0;
+ u8 i;
+
+ if (header)
+ index += scnprintf(buf + offset, buf_len - offset, "%s = ", header);
+
+ for (i = 0; i < array_len; i++) {
+ index += scnprintf(buf + offset + index, (buf_len - offset) - index,
+ " %u:%d,", stats_index++, array[i]);
+ }
+
+ index--;
+ if ((offset + index) < buf_len)
+ buf[offset + index] = '\0';
+
+ if (footer) {
+ index += scnprintf(buf + offset + index, (buf_len - offset) - index,
+ "%s", footer);
+ }
+
+ return index;
+}
+
static const char *ath12k_htt_ax_tx_rx_ru_size_to_str(u8 ru_size)
{
switch (ru_size) {
@@ -2512,6 +2540,268 @@ ath12k_htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf, u16 tag_len,
}
static void
+ath12k_htt_print_tx_sounding_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf;
+ const __le32 *cbf_20, *cbf_40, *cbf_80, *cbf_160, *cbf_320;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 tx_sounding_mode;
+ u8 i, u;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ cbf_20 = htt_stats_buf->cbf_20;
+ cbf_40 = htt_stats_buf->cbf_40;
+ cbf_80 = htt_stats_buf->cbf_80;
+ cbf_160 = htt_stats_buf->cbf_160;
+ cbf_320 = htt_stats_buf->cbf_320;
+ tx_sounding_mode = le32_to_cpu(htt_stats_buf->tx_sounding_mode);
+
+ if (tx_sounding_mode == ATH12K_HTT_TX_AC_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_AC_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+
+ for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; u++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User_%u = 20MHz: %u, ", u,
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "160MHz: %u\n",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ }
+ } else if (tx_sounding_mode == ATH12K_HTT_TX_AX_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+
+ for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; u++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User_%u = 20MHz: %u, ", u,
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "160MHz: %u\n",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ }
+ } else if (tx_sounding_mode == ATH12K_HTT_TX_BE_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_BE_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_320 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_320[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; u++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User_%u = 20MHz: %u, ", u,
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len,
+ "160MHz: %u, 320MHz: %u\n",
+ le32_to_cpu(htt_stats_buf->sounding[i++]),
+ le32_to_cpu(htt_stats_buf->sounding_320[u]));
+ }
+ } else if (tx_sounding_mode == ATH12K_HTT_TX_CMN_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\nCV UPLOAD HANDLER STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "cv_nc_mismatch_err = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_nc_mismatch_err));
+ len += scnprintf(buf + len, buf_len - len, "cv_fcs_err = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_fcs_err));
+ len += scnprintf(buf + len, buf_len - len, "cv_frag_idx_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_frag_idx_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_invalid_peer_id = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_invalid_peer_id));
+ len += scnprintf(buf + len, buf_len - len, "cv_no_txbf_setup = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_no_txbf_setup));
+ len += scnprintf(buf + len, buf_len - len, "cv_expiry_in_update = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_expiry_in_update));
+ len += scnprintf(buf + len, buf_len - len, "cv_pkt_bw_exceed = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_pkt_bw_exceed));
+ len += scnprintf(buf + len, buf_len - len, "cv_dma_not_done_err = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_dma_not_done_err));
+ len += scnprintf(buf + len, buf_len - len, "cv_update_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_update_failed));
+ len += scnprintf(buf + len, buf_len - len, "cv_dma_timeout_error = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_dma_timeout_error));
+ len += scnprintf(buf + len, buf_len - len, "cv_buf_ibf_uploads = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_buf_ibf_uploads));
+ len += scnprintf(buf + len, buf_len - len, "cv_buf_ebf_uploads = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_buf_ebf_uploads));
+ len += scnprintf(buf + len, buf_len - len, "cv_buf_received = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_buf_received));
+ len += scnprintf(buf + len, buf_len - len, "cv_buf_fed_back = %u\n\n",
+ le32_to_cpu(htt_stats_buf->cv_buf_fed_back));
+
+ len += scnprintf(buf + len, buf_len - len, "CV QUERY STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "cv_total_query = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_total_query));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_total_pattern_query = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_total_pattern_query));
+ len += scnprintf(buf + len, buf_len - len, "cv_total_bw_query = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_total_bw_query));
+ len += scnprintf(buf + len, buf_len - len, "cv_invalid_bw_coding = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_invalid_bw_coding));
+ len += scnprintf(buf + len, buf_len - len, "cv_forced_sounding = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_forced_sounding));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_standalone_sounding = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_standalone_sounding));
+ len += scnprintf(buf + len, buf_len - len, "cv_nc_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_nc_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_fb_type_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_fb_type_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_ofdma_bw_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_ofdma_bw_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_bw_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_bw_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_pattern_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_pattern_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_preamble_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_preamble_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_nr_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_nr_mismatch));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_in_use_cnt_exceeded = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_in_use_cnt_exceeded));
+ len += scnprintf(buf + len, buf_len - len, "cv_ntbr_sounding = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_ntbr_sounding));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_found_upload_in_progress = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_found_upload_in_progress));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_expired_during_query = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_expired_during_query));
+ len += scnprintf(buf + len, buf_len - len, "cv_found = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_found));
+ len += scnprintf(buf + len, buf_len - len, "cv_not_found = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_not_found));
+ len += scnprintf(buf + len, buf_len - len, "cv_total_query_ibf = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_total_query_ibf));
+ len += scnprintf(buf + len, buf_len - len, "cv_found_ibf = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_found_ibf));
+ len += scnprintf(buf + len, buf_len - len, "cv_not_found_ibf = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_not_found_ibf));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_expired_during_query_ibf = %u\n\n",
+ le32_to_cpu(htt_stats_buf->cv_expired_during_query_ibf));
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
@@ -2577,6 +2867,428 @@ ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len,
}
static void
+ath12k_htt_print_latency_prof_ctx_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_latency_prof_ctx_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_CTX_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "duration = %u\n",
+ le32_to_cpu(htt_stats_buf->duration));
+ len += scnprintf(buf + len, buf_len - len, "tx_msdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_msdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "tx_mpdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_mpdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_msdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_msdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_mpdu_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->rx_mpdu_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_latency_prof_cnt(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_latency_prof_cnt_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_CNT_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "prof_enable_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->prof_enable_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_latency_prof_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_latency_prof_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ if (le32_to_cpu(htt_stats_buf->print_header) == 1) {
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_LATENCY_PROF_TLV:\n");
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "Latency name = %s\n",
+ htt_stats_buf->latency_prof_name);
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n",
+ le32_to_cpu(htt_stats_buf->cnt));
+ len += scnprintf(buf + len, buf_len - len, "minimum = %u\n",
+ le32_to_cpu(htt_stats_buf->min));
+ len += scnprintf(buf + len, buf_len - len, "maximum = %u\n",
+ le32_to_cpu(htt_stats_buf->max));
+ len += scnprintf(buf + len, buf_len - len, "last = %u\n",
+ le32_to_cpu(htt_stats_buf->last));
+ len += scnprintf(buf + len, buf_len - len, "total = %u\n",
+ le32_to_cpu(htt_stats_buf->tot));
+ len += scnprintf(buf + len, buf_len - len, "average = %u\n",
+ le32_to_cpu(htt_stats_buf->avg));
+ len += scnprintf(buf + len, buf_len - len, "histogram interval = %u\n",
+ le32_to_cpu(htt_stats_buf->hist_intvl));
+ len += print_array_to_buf(buf, len, "histogram", htt_stats_buf->hist,
+ ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ul_ofdma_trigger_stats(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_pdev_ul_trigger_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 mac_id;
+ u8 j;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_UL_TRIGGER_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_ul_ofdma));
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs",
+ htt_stats_buf->ul_ofdma_rx_mcs,
+ ATH12K_HTT_RX_NUM_MCS_CNTRS, "\n");
+ for (j = 0; j < ATH12K_HTT_RX_NUM_GI_CNTRS; j++) {
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u]", j);
+ len += print_array_to_buf(buf, len, "",
+ htt_stats_buf->ul_ofdma_rx_gi[j],
+ ATH12K_HTT_RX_NUM_MCS_CNTRS, "\n");
+ }
+
+ len += print_array_to_buf_index(buf, len, "ul_ofdma_rx_nss", 1,
+ htt_stats_buf->ul_ofdma_rx_nss,
+ ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_bw",
+ htt_stats_buf->ul_ofdma_rx_bw,
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+
+ for (j = 0; j < ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES; j++) {
+ len += scnprintf(buf + len, buf_len - len, j == 0 ?
+ "half_ul_ofdma_rx_bw" :
+ "quarter_ul_ofdma_rx_bw");
+ len += print_array_to_buf(buf, len, "", htt_stats_buf->red_bw[j],
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+ }
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_rx_stbc));
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_rx_ldpc));
+
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_ru_size_ppdu = ");
+ for (j = 0; j < ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_htt_ax_tx_rx_ru_size_to_str(j),
+ le32_to_cpu(htt_stats_buf->data_ru_size_ppdu[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ulofdma_non_data_ru_size_ppdu = ");
+ for (j = 0; j < ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_htt_ax_tx_rx_ru_size_to_str(j),
+ le32_to_cpu(htt_stats_buf->non_data_ru_size_ppdu[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += print_array_to_buf(buf, len, "rx_rssi_track_sta_aid",
+ htt_stats_buf->uplink_sta_aid,
+ ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+ len += print_array_to_buf(buf, len, "rx_sta_target_rssi",
+ htt_stats_buf->uplink_sta_target_rssi,
+ ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+ len += print_array_to_buf(buf, len, "rx_sta_fd_rssi",
+ htt_stats_buf->uplink_sta_fd_rssi,
+ ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+ len += print_array_to_buf(buf, len, "rx_sta_power_headroom",
+ htt_stats_buf->uplink_sta_power_headroom,
+ ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_ofdma_basic_trigger_rx_qos_null_only = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_bsc_trig_rx_qos_null_only));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ul_ofdma_user_stats(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_pdev_ul_ofdma_user_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 user_index;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ user_index = __le32_to_cpu(htt_stats_buf->user_index);
+
+ if (!user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_UL_OFDMA_USER_STAS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_non_data_ppdu_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_non_data_ppdu));
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_ppdu_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_data_ppdu));
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_mpdu_ok));
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_mpdu_fail));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ulofdma_non_data_nusers_%u = %u\n", user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_non_data_nusers));
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_nusers_%u = %u\n\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_data_nusers));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ul_mumimo_trig_stats(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_ul_mumimo_trig_stats_tlv *htt_stats_buf = tag_buf;
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0};
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 mac_id;
+ u16 index;
+ u8 i, j;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_UL_MUMIMO_TRIG_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_mumimo = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_ul_mumimo));
+ index = 0;
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ for (i = 0; i < ATH12K_HTT_RX_NUM_MCS_CNTRS; i++)
+ index += scnprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", i,
+ le32_to_cpu(htt_stats_buf->ul_mumimo_rx_mcs[i]));
+
+ for (i = 0; i < ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS; i++)
+ index += scnprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", i + ATH12K_HTT_RX_NUM_MCS_CNTRS,
+ le32_to_cpu(htt_stats_buf->ul_mumimo_rx_mcs_ext[i]));
+ str_buf[--index] = '\0';
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_mcs = %s\n", str_buf);
+
+ for (j = 0; j < ATH12K_HTT_RX_NUM_GI_CNTRS; j++) {
+ index = 0;
+ memset(&str_buf[index], 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ for (i = 0; i < ATH12K_HTT_RX_NUM_MCS_CNTRS; i++)
+ index += scnprintf(&str_buf[index],
+ ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", i,
+ le32_to_cpu(htt_stats_buf->ul_rx_gi[j][i]));
+
+ for (i = 0; i < ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS; i++)
+ index += scnprintf(&str_buf[index],
+ ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", i + ATH12K_HTT_RX_NUM_MCS_CNTRS,
+ le32_to_cpu(htt_stats_buf->ul_gi_ext[j][i]));
+ str_buf[--index] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_mumimo_rx_gi_%u = %s\n", j, str_buf);
+ }
+
+ index = 0;
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ len += print_array_to_buf_index(buf, len, "ul_mumimo_rx_nss", 1,
+ htt_stats_buf->ul_mumimo_rx_nss,
+ ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+
+ len += print_array_to_buf(buf, len, "ul_mumimo_rx_bw",
+ htt_stats_buf->ul_mumimo_rx_bw,
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES; i++) {
+ index = 0;
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ for (j = 0; j < ATH12K_HTT_RX_NUM_BW_CNTRS; j++)
+ index += scnprintf(&str_buf[index],
+ ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", j,
+ le32_to_cpu(htt_stats_buf->red_bw[i][j]));
+ str_buf[--index] = '\0';
+ len += scnprintf(buf + len, buf_len - len, "%s = %s\n",
+ i == 0 ? "half_ul_mumimo_rx_bw" :
+ "quarter_ul_mumimo_rx_bw", str_buf);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_stbc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_rx_stbc));
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_rx_ldpc));
+
+ for (j = 0; j < ATH12K_HTT_RX_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_mumimo_rssi_in_dbm: chain%u ", j);
+ len += print_array_to_buf_s8(buf, len, "", 0,
+ htt_stats_buf->ul_rssi[j],
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_mumimo_target_rssi: user_%u ", j);
+ len += print_array_to_buf_s8(buf, len, "", 0,
+ htt_stats_buf->tgt_rssi[j],
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_mumimo_fd_rssi: user_%u ", j);
+ len += print_array_to_buf_s8(buf, len, "", 0,
+ htt_stats_buf->fd[j],
+ ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ulmumimo_pilot_evm_db_mean: user_%u ", j);
+ len += print_array_to_buf_s8(buf, len, "", 0,
+ htt_stats_buf->db[j],
+ ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_mumimo_basic_trigger_rx_qos_null_only = %u\n\n",
+ le32_to_cpu(htt_stats_buf->mumimo_bsc_trig_rx_qos_null_only));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_rx_fse_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_fse_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_RX_FSE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "=== Software RX FSE STATS ===\n");
+ len += scnprintf(buf + len, buf_len - len, "Enable count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_enable_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Disable count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_disable_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Cache invalidate entry count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_cache_invalidate_entry_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Full cache invalidate count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_full_cache_invalidate_cnt));
+
+ len += scnprintf(buf + len, buf_len - len, "\n=== Hardware RX FSE STATS ===\n");
+ len += scnprintf(buf + len, buf_len - len, "Cache hits count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_num_cache_hits_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Cache no. of searches = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_num_searches_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Cache occupancy peak count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-16] = %u [17-32] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[33-48] = %u [49-64] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[3]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[4]));
+ len += scnprintf(buf + len, buf_len - len, "[65-80] = %u [81-96] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[5]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[6]));
+ len += scnprintf(buf + len, buf_len - len, "[97-112] = %u [113-127] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[7]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[8]));
+ len += scnprintf(buf + len, buf_len - len, "[128] = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[9]));
+ len += scnprintf(buf + len, buf_len - len, "Cache occupancy current count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-16] = %u [17-32] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[33-48] = %u [49-64] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[3]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[4]));
+ len += scnprintf(buf + len, buf_len - len, "[65-80] = %u [81-96] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[5]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[6]));
+ len += scnprintf(buf + len, buf_len - len, "[97-112] = %u [113-127] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[7]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[8]));
+ len += scnprintf(buf + len, buf_len - len, "[128] = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[9]));
+ len += scnprintf(buf + len, buf_len - len, "Cache search square count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-50] = %u [51-100] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[101-200] = %u [201-255] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[3]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[4]));
+ len += scnprintf(buf + len, buf_len - len, "[256] = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[5]));
+ len += scnprintf(buf + len, buf_len - len, "Cache search peak pending count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-2] = %u [3-4] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[Greater/Equal to 5] = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[3]));
+ len += scnprintf(buf + len, buf_len - len, "Cache search tot pending count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-2] = %u [3-4] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[Greater/Equal to 5] = %u\n\n",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[3]));
+
+ stats_req->buf_len = len;
+}
+
+static void
ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
@@ -3812,6 +4524,497 @@ ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(const void *tag_buf, u16 tag_l
stats_req->buf_len = len;
}
+static inline void
+ath12k_htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 i, j;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id_word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_ldpc));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_tx_ldpc));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_ldpc));
+ len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ofdma_tx_ldpc));
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rts_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n",
+ le32_to_cpu(htt_stats_buf->rts_success));
+ len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+ le32_to_cpu(htt_stats_buf->ack_rssi));
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 12 Mbps: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[0]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[1]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[2]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[3]));
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
+ " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[0]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[1]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[2]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[3]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[4]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[5]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[6]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[7]));
+ len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_he_ltf[1]),
+ le32_to_cpu(htt_stats_buf->tx_he_ltf[2]),
+ le32_to_cpu(htt_stats_buf->tx_he_ltf[3]));
+
+ len += print_array_to_buf(buf, len, "tx_mcs", htt_stats_buf->tx_mcs,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->tx_mcs_ext[j]));
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS +
+ ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->tx_mcs_ext_2[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += print_array_to_buf(buf, len, "ax_mu_mimo_tx_mcs",
+ htt_stats_buf->ax_mu_mimo_tx_mcs,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_mcs_ext[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += print_array_to_buf(buf, len, "ofdma_tx_mcs",
+ htt_stats_buf->ofdma_tx_mcs,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->ofdma_tx_mcs_ext[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "tx_nss =");
+ for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,",
+ j, le32_to_cpu(htt_stats_buf->tx_nss[j - 1]));
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_nss =");
+ for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,",
+ j, le32_to_cpu(htt_stats_buf->ac_mu_mimo_tx_nss[j - 1]));
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_nss =");
+ for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,",
+ j, le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_nss[j - 1]));
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "ofdma_tx_nss =");
+ for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,",
+ j, le32_to_cpu(htt_stats_buf->ofdma_tx_nss[j - 1]));
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += print_array_to_buf(buf, len, "tx_bw", htt_stats_buf->tx_bw,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, NULL);
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u\n",
+ ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS,
+ le32_to_cpu(htt_stats_buf->tx_bw_320mhz));
+
+ len += print_array_to_buf(buf, len, "tx_stbc",
+ htt_stats_buf->tx_stbc,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->tx_stbc_ext[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "tx_gi[%u] =", j);
+ len += print_array_to_buf(buf, len, NULL, htt_stats_buf->tx_gi[j],
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ NULL);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->tx_gi_ext[j][i]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "ac_mu_mimo_tx_gi[%u] =", j);
+ len += print_array_to_buf(buf, len, NULL,
+ htt_stats_buf->ac_mu_mimo_tx_gi[j],
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "ax_mu_mimo_tx_gi[%u] =", j);
+ len += print_array_to_buf(buf, len, NULL, htt_stats_buf->ax_mimo_tx_gi[j],
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ NULL);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->ax_tx_gi_ext[j][i]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "ofdma_tx_gi[%u] = ", j);
+ len += print_array_to_buf(buf, len, NULL, htt_stats_buf->ofdma_tx_gi[j],
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ NULL);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->ofd_tx_gi_ext[j][i]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ len += print_array_to_buf(buf, len, "tx_su_mcs", htt_stats_buf->tx_su_mcs,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "tx_mu_mcs", htt_stats_buf->tx_mu_mcs,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_tx_mcs",
+ htt_stats_buf->ac_mu_mimo_tx_mcs,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_tx_bw",
+ htt_stats_buf->ac_mu_mimo_tx_bw,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_mimo_tx_bw",
+ htt_stats_buf->ax_mu_mimo_tx_bw,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "ofdma_tx_bw",
+ htt_stats_buf->ofdma_tx_bw,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "tx_pream", htt_stats_buf->tx_pream,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+ len += print_array_to_buf(buf, len, "tx_dcm", htt_stats_buf->tx_dcm,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+ath12k_htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 i, j;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id_word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+ le32_to_cpu(htt_stats_buf->nsts));
+ len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_ldpc));
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rts_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+ le32_to_cpu(htt_stats_buf->rssi_mgmt));
+ len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+ le32_to_cpu(htt_stats_buf->rssi_data));
+ len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+ le32_to_cpu(htt_stats_buf->rssi_comb));
+ len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n",
+ le32_to_cpu(htt_stats_buf->rssi_in_dbm));
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n",
+ le32_to_cpu(htt_stats_buf->nss_count));
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n",
+ le32_to_cpu(htt_stats_buf->pilot_count));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_su_ext));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ac_mumimo));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_mumimo));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_ofdma));
+ len += scnprintf(buf + len, buf_len - len, "txbf = %u\n",
+ le32_to_cpu(htt_stats_buf->txbf));
+ len += scnprintf(buf + len, buf_len - len, "rx_su_ndpa = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_su_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "rx_mu_ndpa = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_mu_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "rx_br_poll = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_br_poll));
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_active_dur_us_low));
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_active_dur_us_high));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_ul_ofdma));
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_rx_stbc));
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_rx_ldpc));
+ len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n",
+ le32_to_cpu(htt_stats_buf->per_chain_rssi_pkt_type));
+
+ len += print_array_to_buf(buf, len, "rx_nss", htt_stats_buf->rx_nss,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ len += print_array_to_buf(buf, len, "rx_dcm", htt_stats_buf->rx_dcm,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_stbc", htt_stats_buf->rx_stbc,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_bw", htt_stats_buf->rx_bw,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_pream", htt_stats_buf->rx_pream,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_su_txbf_mcs",
+ htt_stats_buf->rx_11ax_su_txbf_mcs,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_mu_txbf_mcs",
+ htt_stats_buf->rx_11ax_mu_txbf_mcs,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_legacy_cck_rate",
+ htt_stats_buf->rx_legacy_cck_rate,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n");
+ len += print_array_to_buf(buf, len, "rx_legacy_ofdm_rate",
+ htt_stats_buf->rx_legacy_ofdm_rate,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs",
+ htt_stats_buf->ul_ofdma_rx_mcs,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_nss",
+ htt_stats_buf->ul_ofdma_rx_nss,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_bw",
+ htt_stats_buf->ul_ofdma_rx_bw,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulofdma_non_data_ppdu",
+ htt_stats_buf->rx_ulofdma_non_data_ppdu,
+ ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulofdma_data_ppdu",
+ htt_stats_buf->rx_ulofdma_data_ppdu,
+ ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulofdma_mpdu_ok",
+ htt_stats_buf->rx_ulofdma_mpdu_ok,
+ ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulofdma_mpdu_fail",
+ htt_stats_buf->rx_ulofdma_mpdu_fail,
+ ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulofdma_non_data_nusers",
+ htt_stats_buf->rx_ulofdma_non_data_nusers,
+ ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulofdma_data_nusers",
+ htt_stats_buf->rx_ulofdma_data_nusers,
+ ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_mcs",
+ htt_stats_buf->rx_11ax_dl_ofdma_mcs,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_ru",
+ htt_stats_buf->rx_11ax_dl_ofdma_ru,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulmumimo_non_data_ppdu",
+ htt_stats_buf->rx_ulmumimo_non_data_ppdu,
+ ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulmumimo_data_ppdu",
+ htt_stats_buf->rx_ulmumimo_data_ppdu,
+ ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulmumimo_mpdu_ok",
+ htt_stats_buf->rx_ulmumimo_mpdu_ok,
+ ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulmumimo_mpdu_fail",
+ htt_stats_buf->rx_ulmumimo_mpdu_fail,
+ ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n");
+
+ len += print_array_to_buf(buf, len, "rx_mcs",
+ htt_stats_buf->rx_mcs,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->rx_mcs_ext[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "pilot_evm_db[%u] =", j);
+ len += print_array_to_buf(buf, len, NULL,
+ htt_stats_buf->rx_pil_evm_db[j],
+ ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS,
+ "\n");
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "pilot_evm_db_mean =");
+ for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,", i,
+ le32_to_cpu(htt_stats_buf->rx_pilot_evm_db_mean[i]));
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rssi_chain_in_db[%u] = ", j);
+ for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u: %d,", i,
+ htt_stats_buf->rssi_chain_in_db[j][i]);
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_gi[%u] = ", j);
+ len += print_array_to_buf(buf, len, NULL,
+ htt_stats_buf->rx_gi[j],
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_ofdma_rx_gi[%u] = ", j);
+ len += print_array_to_buf(buf, len, NULL,
+ htt_stats_buf->ul_ofdma_rx_gi[j],
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_fd_rssi: nss[%u] = ", j);
+ for (i = 0; i < ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_per_chain_rssi_in_dbm[%u] =", j);
+ for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+ath12k_htt_print_rx_pdev_rate_ext_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_pdev_rate_ext_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 j;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_EXT_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt_in_dbm = %d\n",
+ le32_to_cpu(htt_stats_buf->rssi_mgmt_in_dbm));
+
+ len += print_array_to_buf(buf, len, "rx_stbc_ext",
+ htt_stats_buf->rx_stbc_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs_ext",
+ htt_stats_buf->ul_ofdma_rx_mcs_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_su_txbf_mcs_ext",
+ htt_stats_buf->rx_11ax_su_txbf_mcs_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_mu_txbf_mcs_ext",
+ htt_stats_buf->rx_11ax_mu_txbf_mcs_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_mcs_ext",
+ htt_stats_buf->rx_11ax_dl_ofdma_mcs_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+ len += print_array_to_buf(buf, len, "rx_bw_ext",
+ htt_stats_buf->rx_bw_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_su_punctured_mode",
+ htt_stats_buf->rx_su_punctured_mode,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS,
+ "\n");
+
+ len += print_array_to_buf(buf, len, "rx_mcs_ext",
+ htt_stats_buf->rx_mcs_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT,
+ NULL);
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT,
+ le32_to_cpu(htt_stats_buf->rx_mcs_ext_2[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_gi_ext[%u] = ", j);
+ len += print_array_to_buf(buf, len, NULL,
+ htt_stats_buf->rx_gi_ext[j],
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT,
+ "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_ofdma_rx_gi_ext[%u] = ", j);
+ len += print_array_to_buf(buf, len, NULL,
+ htt_stats_buf->ul_ofdma_rx_gi_ext[j],
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT,
+ "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -3982,9 +5185,33 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_PDEV_CCA_COUNTERS_TAG:
ath12k_htt_print_pdev_stats_cca_counters_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_TX_SOUNDING_STATS_TAG:
+ ath12k_htt_print_tx_sounding_stats_tlv(tag_buf, len, stats_req);
+ break;
case HTT_STATS_PDEV_OBSS_PD_TAG:
ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_LATENCY_CTX_TAG:
+ ath12k_htt_print_latency_prof_ctx_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_LATENCY_CNT_TAG:
+ ath12k_htt_print_latency_prof_cnt(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_LATENCY_PROF_STATS_TAG:
+ ath12k_htt_print_latency_prof_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_UL_TRIG_STATS_TAG:
+ ath12k_htt_print_ul_ofdma_trigger_stats(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_UL_OFDMA_USER_STATS_TAG:
+ ath12k_htt_print_ul_ofdma_user_stats(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_STATS_TAG:
+ ath12k_htt_print_ul_mumimo_trig_stats(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_FSE_STATS_TAG:
+ ath12k_htt_print_rx_fse_stats_tlv(tag_buf, len, stats_req);
+ break;
case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG:
ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, len, stats_req);
break;
@@ -4047,6 +5274,15 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(tag_buf, len,
stats_req);
break;
+ case HTT_STATS_TX_PDEV_RATE_STATS_TAG:
+ ath12k_htt_print_tx_pdev_rate_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_RATE_STATS_TAG:
+ ath12k_htt_print_rx_pdev_rate_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG:
+ ath12k_htt_print_rx_pdev_rate_ext_stats_tlv(tag_buf, len, stats_req);
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
index 4b59976fbc35..c2a02cf8a38b 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef DEBUG_HTT_STATS_H
@@ -123,30 +123,38 @@ struct ath12k_htt_extd_stats_msg {
/* htt_dbg_ext_stats_type */
enum ath12k_dbg_htt_ext_stats_type {
- ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
- ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
- ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
- ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
- ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31,
- ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
- ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36,
- ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
- ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38,
- ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40,
- ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41,
- ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45,
- ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49,
- ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51,
- ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54,
+ ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+ ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10,
+ ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
+ ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
+ ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
+ ATH12K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
+ ATH12K_DBG_HTT_EXT_STATS_LATENCY_PROF_STATS = 25,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_TRIG_STATS = 26,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_MUMIMO_TRIG_STATS = 27,
+ ATH12K_DBG_HTT_EXT_STATS_FSE_RX = 28,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE_EXT = 30,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31,
+ ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
+ ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36,
+ ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
+ ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38,
+ ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40,
+ ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41,
+ ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45,
+ ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49,
+ ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51,
+ ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54,
/* keep this last */
ATH12K_DBG_HTT_NUM_EXT_STATS,
@@ -173,6 +181,8 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25,
HTT_STATS_SFM_CMN_TAG = 26,
HTT_STATS_SRING_STATS_TAG = 27,
+ HTT_STATS_TX_PDEV_RATE_STATS_TAG = 34,
+ HTT_STATS_RX_PDEV_RATE_STATS_TAG = 35,
HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
HTT_STATS_TX_SCHED_CMN_TAG = 37,
HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
@@ -195,12 +205,21 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72,
HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73,
HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74,
+ HTT_STATS_TX_SOUNDING_STATS_TAG = 80,
HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
HTT_STATS_PDEV_OBSS_PD_TAG = 88,
HTT_STATS_HW_WAR_TAG = 89,
+ HTT_STATS_LATENCY_PROF_STATS_TAG = 91,
+ HTT_STATS_LATENCY_CTX_TAG = 92,
+ HTT_STATS_LATENCY_CNT_TAG = 93,
+ HTT_STATS_RX_PDEV_UL_TRIG_STATS_TAG = 94,
+ HTT_STATS_RX_PDEV_UL_OFDMA_USER_STATS_TAG = 95,
+ HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_STATS_TAG = 97,
+ HTT_STATS_RX_FSE_STATS_TAG = 98,
HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100,
HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102,
+ HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG = 103,
HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108,
HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111,
HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112,
@@ -387,6 +406,182 @@ struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv {
__le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS];
} __packed;
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES 7
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_LTF 4
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS 2
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS 2
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES 6
+
+struct ath12k_htt_tx_pdev_rate_stats_tlv {
+ __le32 mac_id_word;
+ __le32 tx_ldpc;
+ __le32 rts_cnt;
+ __le32 ack_rssi;
+ __le32 tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_su_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_mu_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 tx_stbc[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_pream[ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+ __le32 tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_dcm[ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS];
+ __le32 rts_success;
+ __le32 tx_legacy_cck_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ __le32 tx_legacy_ofdm_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+ __le32 ac_mu_mimo_tx_ldpc;
+ __le32 ax_mu_mimo_tx_ldpc;
+ __le32 ofdma_tx_ldpc;
+ __le32 tx_he_ltf[ATH12K_HTT_TX_PDEV_STATS_NUM_LTF];
+ __le32 ac_mu_mimo_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ax_mu_mimo_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ac_mu_mimo_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 ax_mu_mimo_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 ac_mu_mimo_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 ax_mu_mimo_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 ofdma_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 ac_mu_mimo_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ax_mimo_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ofdma_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 trigger_type_11ax[ATH12K_HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES];
+ __le32 tx_11ax_su_ext;
+ __le32 tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 tx_stbc_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 ax_mu_mimo_tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 ofdma_tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 ax_tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 ofd_tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 tx_mcs_ext_2[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+ __le32 tx_bw_320mhz;
+};
+
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES 7
+#define ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER 8
+#define ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS 16
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS 6
+#define ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER 8
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS 2
+
+struct ath12k_htt_rx_pdev_rate_stats_tlv {
+ __le32 mac_id_word;
+ __le32 nsts;
+ __le32 rx_ldpc;
+ __le32 rts_cnt;
+ __le32 rssi_mgmt;
+ __le32 rssi_data;
+ __le32 rssi_comb;
+ __le32 rx_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 rx_nss[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 rx_dcm[ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS];
+ __le32 rx_stbc[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 rx_bw[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 rx_pream[ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+ u8 rssi_chain_in_db[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 rx_gi[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 rssi_in_dbm;
+ __le32 rx_11ax_su_ext;
+ __le32 rx_11ac_mumimo;
+ __le32 rx_11ax_mumimo;
+ __le32 rx_11ax_ofdma;
+ __le32 txbf;
+ __le32 rx_legacy_cck_rate[ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ __le32 rx_legacy_ofdm_rate[ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+ __le32 rx_active_dur_us_low;
+ __le32 rx_active_dur_us_high;
+ __le32 rx_11ax_ul_ofdma;
+ __le32 ul_ofdma_rx_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ul_ofdma_rx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ul_ofdma_rx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 ul_ofdma_rx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 ul_ofdma_rx_stbc;
+ __le32 ul_ofdma_rx_ldpc;
+ __le32 rx_ulofdma_non_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 rx_ulofdma_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 rx_ulofdma_mpdu_ok[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 rx_ulofdma_mpdu_fail[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 nss_count;
+ __le32 pilot_count;
+ __le32 rx_pil_evm_db[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS];
+ __le32 rx_pilot_evm_db_mean[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ s8 rx_ul_fd_rssi[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 per_chain_rssi_pkt_type;
+ s8 rx_per_chain_rssi_in_dbm[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 rx_su_ndpa;
+ __le32 rx_11ax_su_txbf_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 rx_mu_ndpa;
+ __le32 rx_11ax_mu_txbf_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 rx_br_poll;
+ __le32 rx_11ax_dl_ofdma_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 rx_11ax_dl_ofdma_ru[ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS];
+ __le32 rx_ulmumimo_non_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ __le32 rx_ulmumimo_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ __le32 rx_ulmumimo_mpdu_ok[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ __le32 rx_ulmumimo_mpdu_fail[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ __le32 rx_ulofdma_non_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 rx_ulofdma_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+};
+
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS 4
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT 14
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS 2
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS 5
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS 5
+
+struct ath12k_htt_rx_pdev_rate_ext_stats_tlv {
+ u8 rssi_chain_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS];
+ s8 rx_per_chain_rssi_ext_in_dbm[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS];
+ __le32 rssi_mcast_in_dbm;
+ __le32 rssi_mgmt_in_dbm;
+ __le32 rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 rx_stbc_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 rx_gi_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 ul_ofdma_rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 ul_ofdma_rx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 rx_11ax_su_txbf_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 rx_11ax_mu_txbf_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 rx_11ax_dl_ofdma_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 rx_mcs_ext_2[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+ __le32 rx_bw_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS];
+ __le32 rx_gi_ext_2[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+ __le32 rx_su_punctured_mode[ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS];
+};
+
#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
@@ -1058,6 +1253,82 @@ struct ath12k_htt_pdev_cca_stats_hist_v1_tlv {
__le32 collection_interval;
} __packed;
+#define ATH12K_HTT_TX_CV_CORR_MAX_NUM_COLUMNS 8
+#define ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS 4
+#define ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS 8
+#define ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS 8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define ATH12K_HTT_TX_NUM_MCS_CNTRS 12
+#define ATH12K_HTT_TX_NUM_EXTRA_MCS_CNTRS 2
+
+#define ATH12K_HTT_TX_NUM_OF_SOUNDING_STATS_WORDS \
+ (ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS * \
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS)
+
+enum ath12k_htt_txbf_sound_steer_modes {
+ ATH12K_HTT_IMPL_STEER_STATS = 0,
+ ATH12K_HTT_EXPL_SUSIFS_STEER_STATS = 1,
+ ATH12K_HTT_EXPL_SURBO_STEER_STATS = 2,
+ ATH12K_HTT_EXPL_MUSIFS_STEER_STATS = 3,
+ ATH12K_HTT_EXPL_MURBO_STEER_STATS = 4,
+ ATH12K_HTT_TXBF_MAX_NUM_OF_MODES = 5
+};
+
+enum ath12k_htt_stats_sounding_tx_mode {
+ ATH12K_HTT_TX_AC_SOUNDING_MODE = 0,
+ ATH12K_HTT_TX_AX_SOUNDING_MODE = 1,
+ ATH12K_HTT_TX_BE_SOUNDING_MODE = 2,
+ ATH12K_HTT_TX_CMN_SOUNDING_MODE = 3,
+};
+
+struct ath12k_htt_tx_sounding_stats_tlv {
+ __le32 tx_sounding_mode;
+ __le32 cbf_20[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+ __le32 cbf_40[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+ __le32 cbf_80[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+ __le32 cbf_160[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+ __le32 sounding[ATH12K_HTT_TX_NUM_OF_SOUNDING_STATS_WORDS];
+ __le32 cv_nc_mismatch_err;
+ __le32 cv_fcs_err;
+ __le32 cv_frag_idx_mismatch;
+ __le32 cv_invalid_peer_id;
+ __le32 cv_no_txbf_setup;
+ __le32 cv_expiry_in_update;
+ __le32 cv_pkt_bw_exceed;
+ __le32 cv_dma_not_done_err;
+ __le32 cv_update_failed;
+ __le32 cv_total_query;
+ __le32 cv_total_pattern_query;
+ __le32 cv_total_bw_query;
+ __le32 cv_invalid_bw_coding;
+ __le32 cv_forced_sounding;
+ __le32 cv_standalone_sounding;
+ __le32 cv_nc_mismatch;
+ __le32 cv_fb_type_mismatch;
+ __le32 cv_ofdma_bw_mismatch;
+ __le32 cv_bw_mismatch;
+ __le32 cv_pattern_mismatch;
+ __le32 cv_preamble_mismatch;
+ __le32 cv_nr_mismatch;
+ __le32 cv_in_use_cnt_exceeded;
+ __le32 cv_found;
+ __le32 cv_not_found;
+ __le32 sounding_320[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
+ __le32 cbf_320[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+ __le32 cv_ntbr_sounding;
+ __le32 cv_found_upload_in_progress;
+ __le32 cv_expired_during_query;
+ __le32 cv_dma_timeout_error;
+ __le32 cv_buf_ibf_uploads;
+ __le32 cv_buf_ebf_uploads;
+ __le32 cv_buf_received;
+ __le32 cv_buf_fed_back;
+ __le32 cv_total_query_ibf;
+ __le32 cv_found_ibf;
+ __le32 cv_not_found_ibf;
+ __le32 cv_expired_during_query_ibf;
+} __packed;
+
struct ath12k_htt_pdev_obss_pd_stats_tlv {
__le32 num_obss_tx_ppdu_success;
__le32 num_obss_tx_ppdu_failure;
@@ -1080,6 +1351,127 @@ struct ath12k_htt_pdev_obss_pd_stats_tlv {
__le32 num_sr_ppdu_abort_flush_cnt;
} __packed;
+#define ATH12K_HTT_STATS_MAX_PROF_STATS_NAME_LEN 32
+#define ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST 3
+#define ATH12K_HTT_INTERRUPTS_LATENCY_PROFILE_MAX_HIST 3
+
+struct ath12k_htt_latency_prof_stats_tlv {
+ __le32 print_header;
+ s8 latency_prof_name[ATH12K_HTT_STATS_MAX_PROF_STATS_NAME_LEN];
+ __le32 cnt;
+ __le32 min;
+ __le32 max;
+ __le32 last;
+ __le32 tot;
+ __le32 avg;
+ __le32 hist_intvl;
+ __le32 hist[ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST];
+} __packed;
+
+struct ath12k_htt_latency_prof_ctx_tlv {
+ __le32 duration;
+ __le32 tx_msdu_cnt;
+ __le32 tx_mpdu_cnt;
+ __le32 tx_ppdu_cnt;
+ __le32 rx_msdu_cnt;
+ __le32 rx_mpdu_cnt;
+} __packed;
+
+struct ath12k_htt_latency_prof_cnt_tlv {
+ __le32 prof_enable_cnt;
+} __packed;
+
+#define ATH12K_HTT_RX_NUM_MCS_CNTRS 12
+#define ATH12K_HTT_RX_NUM_GI_CNTRS 4
+#define ATH12K_HTT_RX_NUM_SPATIAL_STREAMS 8
+#define ATH12K_HTT_RX_NUM_BW_CNTRS 4
+#define ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS 6
+#define ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS 7
+#define ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK 5
+#define ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES 2
+#define ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS 2
+
+enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE {
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2,
+ ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS,
+};
+
+struct ath12k_htt_rx_pdev_ul_ofdma_user_stats_tlv {
+ __le32 user_index;
+ __le32 rx_ulofdma_non_data_ppdu;
+ __le32 rx_ulofdma_data_ppdu;
+ __le32 rx_ulofdma_mpdu_ok;
+ __le32 rx_ulofdma_mpdu_fail;
+ __le32 rx_ulofdma_non_data_nusers;
+ __le32 rx_ulofdma_data_nusers;
+} __packed;
+
+struct ath12k_htt_rx_pdev_ul_trigger_stats_tlv {
+ __le32 mac_id__word;
+ __le32 rx_11ax_ul_ofdma;
+ __le32 ul_ofdma_rx_mcs[ATH12K_HTT_RX_NUM_MCS_CNTRS];
+ __le32 ul_ofdma_rx_gi[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_MCS_CNTRS];
+ __le32 ul_ofdma_rx_nss[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
+ __le32 ul_ofdma_rx_bw[ATH12K_HTT_RX_NUM_BW_CNTRS];
+ __le32 ul_ofdma_rx_stbc;
+ __le32 ul_ofdma_rx_ldpc;
+ __le32 data_ru_size_ppdu[ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS];
+ __le32 non_data_ru_size_ppdu[ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS];
+ __le32 uplink_sta_aid[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+ __le32 uplink_sta_target_rssi[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+ __le32 uplink_sta_fd_rssi[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+ __le32 uplink_sta_power_headroom[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+ __le32 red_bw[ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_RX_NUM_BW_CNTRS];
+ __le32 ul_ofdma_bsc_trig_rx_qos_null_only;
+} __packed;
+
+#define ATH12K_HTT_TX_UL_MUMIMO_USER_STATS 8
+
+struct ath12k_htt_rx_ul_mumimo_trig_stats_tlv {
+ __le32 mac_id__word;
+ __le32 rx_11ax_ul_mumimo;
+ __le32 ul_mumimo_rx_mcs[ATH12K_HTT_RX_NUM_MCS_CNTRS];
+ __le32 ul_rx_gi[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_MCS_CNTRS];
+ __le32 ul_mumimo_rx_nss[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
+ __le32 ul_mumimo_rx_bw[ATH12K_HTT_RX_NUM_BW_CNTRS];
+ __le32 ul_mumimo_rx_stbc;
+ __le32 ul_mumimo_rx_ldpc;
+ __le32 ul_mumimo_rx_mcs_ext[ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS];
+ __le32 ul_gi_ext[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS];
+ s8 ul_rssi[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS][ATH12K_HTT_RX_NUM_BW_CNTRS];
+ s8 tgt_rssi[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_BW_CNTRS];
+ s8 fd[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
+ s8 db[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
+ __le32 red_bw[ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_RX_NUM_BW_CNTRS];
+ __le32 mumimo_bsc_trig_rx_qos_null_only;
+} __packed;
+
+#define ATH12K_HTT_RX_NUM_MAX_PEAK_OCCUPANCY_INDEX 10
+#define ATH12K_HTT_RX_NUM_MAX_CURR_OCCUPANCY_INDEX 10
+#define ATH12K_HTT_RX_NUM_SQUARE_INDEX 6
+#define ATH12K_HTT_RX_NUM_MAX_PEAK_SEARCH_INDEX 4
+#define ATH12K_HTT_RX_NUM_MAX_PENDING_SEARCH_INDEX 4
+
+struct ath12k_htt_rx_fse_stats_tlv {
+ __le32 fse_enable_cnt;
+ __le32 fse_disable_cnt;
+ __le32 fse_cache_invalidate_entry_cnt;
+ __le32 fse_full_cache_invalidate_cnt;
+ __le32 fse_num_cache_hits_cnt;
+ __le32 fse_num_searches_cnt;
+ __le32 fse_cache_occupancy_peak_cnt[ATH12K_HTT_RX_NUM_MAX_PEAK_OCCUPANCY_INDEX];
+ __le32 fse_cache_occupancy_curr_cnt[ATH12K_HTT_RX_NUM_MAX_CURR_OCCUPANCY_INDEX];
+ __le32 fse_search_stat_square_cnt[ATH12K_HTT_RX_NUM_SQUARE_INDEX];
+ __le32 fse_search_stat_peak_cnt[ATH12K_HTT_RX_NUM_MAX_PEAK_SEARCH_INDEX];
+ __le32 fse_search_stat_pending_cnt[ATH12K_HTT_RX_NUM_MAX_PENDING_SEARCH_INDEX];
+} __packed;
+
#define ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS 14
#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
@@ -1417,17 +1809,6 @@ enum ATH12K_HTT_RC_MODE {
ATH12K_HTT_RC_MODE_2D_COUNT
};
-enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE {
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26,
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52,
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106,
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242,
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484,
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996,
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2,
- ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS
-};
-
enum ath12k_htt_stats_rc_mode {
ATH12K_HTT_STATS_RC_MODE_DLSU = 0,
ATH12K_HTT_STATS_RC_MODE_DLMUMIMO = 1,
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_sta.c b/drivers/net/wireless/ath/ath12k/debugfs_sta.c
new file mode 100644
index 000000000000..5bd2bf4c9dac
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs_sta.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+
+#include "debugfs_sta.h"
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+#include "debugfs_htt_stats.h"
+#include "debugfs.h"
+
+static
+u32 ath12k_dbg_sta_dump_rate_stats(u8 *buf, u32 offset, const int size,
+ bool he_rates_avail,
+ const struct ath12k_rx_peer_rate_stats *stats)
+{
+ static const char *legacy_rate_str[HAL_RX_MAX_NUM_LEGACY_RATES] = {
+ "1 Mbps", "2 Mbps", "5.5 Mbps", "6 Mbps",
+ "9 Mbps", "11 Mbps", "12 Mbps", "18 Mbps",
+ "24 Mbps", "36 Mbps", "48 Mbps", "54 Mbps"};
+ u8 max_bw = HAL_RX_BW_MAX, max_gi = HAL_RX_GI_MAX, max_mcs = HAL_RX_MAX_NSS;
+ int mcs = 0, bw = 0, nss = 0, gi = 0, bw_num = 0;
+ u32 i, len = offset, max = max_bw * max_gi * max_mcs;
+ bool found;
+
+ len += scnprintf(buf + len, size - len, "\nEHT stats:\n");
+ for (i = 0; i <= HAL_RX_MAX_MCS_BE; i++)
+ len += scnprintf(buf + len, size - len,
+ "MCS %d: %llu%s", i, stats->be_mcs_count[i],
+ (i + 1) % 8 ? "\t" : "\n");
+
+ len += scnprintf(buf + len, size - len, "\nHE stats:\n");
+ for (i = 0; i <= HAL_RX_MAX_MCS_HE; i++)
+ len += scnprintf(buf + len, size - len,
+ "MCS %d: %llu%s", i, stats->he_mcs_count[i],
+ (i + 1) % 6 ? "\t" : "\n");
+
+ len += scnprintf(buf + len, size - len, "\nVHT stats:\n");
+ for (i = 0; i <= HAL_RX_MAX_MCS_VHT; i++)
+ len += scnprintf(buf + len, size - len,
+ "MCS %d: %llu%s", i, stats->vht_mcs_count[i],
+ (i + 1) % 5 ? "\t" : "\n");
+
+ len += scnprintf(buf + len, size - len, "\nHT stats:\n");
+ for (i = 0; i <= HAL_RX_MAX_MCS_HT; i++)
+ len += scnprintf(buf + len, size - len,
+ "MCS %d: %llu%s", i, stats->ht_mcs_count[i],
+ (i + 1) % 8 ? "\t" : "\n");
+
+ len += scnprintf(buf + len, size - len, "\nLegacy stats:\n");
+ for (i = 0; i < HAL_RX_MAX_NUM_LEGACY_RATES; i++)
+ len += scnprintf(buf + len, size - len,
+ "%s: %llu%s", legacy_rate_str[i],
+ stats->legacy_count[i],
+ (i + 1) % 4 ? "\t" : "\n");
+
+ len += scnprintf(buf + len, size - len, "\nNSS stats:\n");
+ for (i = 0; i < HAL_RX_MAX_NSS; i++)
+ len += scnprintf(buf + len, size - len,
+ "%dx%d: %llu ", i + 1, i + 1,
+ stats->nss_count[i]);
+
+ len += scnprintf(buf + len, size - len,
+ "\n\nGI: 0.8 us %llu 0.4 us %llu 1.6 us %llu 3.2 us %llu\n",
+ stats->gi_count[0],
+ stats->gi_count[1],
+ stats->gi_count[2],
+ stats->gi_count[3]);
+
+ len += scnprintf(buf + len, size - len,
+ "BW: 20 MHz %llu 40 MHz %llu 80 MHz %llu 160 MHz %llu 320 MHz %llu\n",
+ stats->bw_count[0],
+ stats->bw_count[1],
+ stats->bw_count[2],
+ stats->bw_count[3],
+ stats->bw_count[4]);
+
+ for (i = 0; i < max; i++) {
+ found = false;
+
+ for (mcs = 0; mcs <= HAL_RX_MAX_MCS_HT; mcs++) {
+ if (stats->rx_rate[bw][gi][nss][mcs]) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ goto skip_report;
+
+ switch (bw) {
+ case HAL_RX_BW_20MHZ:
+ bw_num = 20;
+ break;
+ case HAL_RX_BW_40MHZ:
+ bw_num = 40;
+ break;
+ case HAL_RX_BW_80MHZ:
+ bw_num = 80;
+ break;
+ case HAL_RX_BW_160MHZ:
+ bw_num = 160;
+ break;
+ case HAL_RX_BW_320MHZ:
+ bw_num = 320;
+ break;
+ }
+
+ len += scnprintf(buf + len, size - len, "\n%d Mhz gi %d us %dx%d : ",
+ bw_num, gi, nss + 1, nss + 1);
+
+ for (mcs = 0; mcs <= HAL_RX_MAX_MCS_HT; mcs++) {
+ if (stats->rx_rate[bw][gi][nss][mcs])
+ len += scnprintf(buf + len, size - len,
+ " %d:%llu", mcs,
+ stats->rx_rate[bw][gi][nss][mcs]);
+ }
+
+skip_report:
+ if (nss++ >= max_mcs - 1) {
+ nss = 0;
+ if (gi++ >= max_gi - 1) {
+ gi = 0;
+ if (bw < max_bw - 1)
+ bw++;
+ }
+ }
+ }
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ return len - offset;
+}
+
+static ssize_t ath12k_dbg_sta_dump_rx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_link_sta *link_sta = file->private_data;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta);
+ const int size = ATH12K_STA_RX_STATS_BUF_SIZE;
+ struct ath12k_hw *ah = ahsta->ahvif->ah;
+ struct ath12k_rx_peer_stats *rx_stats;
+ struct ath12k_link_sta *arsta;
+ u8 link_id = link_sta->link_id;
+ int len = 0, i, ret = 0;
+ bool he_rates_avail;
+ struct ath12k *ar;
+
+ wiphy_lock(ah->hw->wiphy);
+
+ if (!(BIT(link_id) & ahsta->links_map)) {
+ wiphy_unlock(ah->hw->wiphy);
+ return -ENOENT;
+ }
+
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arsta || !arsta->arvif->ar) {
+ wiphy_unlock(ah->hw->wiphy);
+ return -ENOENT;
+ }
+
+ ar = arsta->arvif->ar;
+
+ u8 *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ rx_stats = arsta->rx_stats;
+ if (!rx_stats) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ len += scnprintf(buf + len, size - len, "RX peer stats:\n\n");
+ len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n",
+ rx_stats->num_msdu);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n",
+ rx_stats->tcp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n",
+ rx_stats->udp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of other MSDUs: %llu\n",
+ rx_stats->other_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n",
+ rx_stats->ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n",
+ rx_stats->non_ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n",
+ rx_stats->stbc_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n",
+ rx_stats->beamformed_count);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n",
+ rx_stats->num_mpdu_fcs_ok);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n",
+ rx_stats->num_mpdu_fcs_err);
+
+ he_rates_avail = (rx_stats->pream_cnt[HAL_RX_PREAMBLE_11AX] > 1) ? true : false;
+
+ len += scnprintf(buf + len, size - len,
+ "preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu 11BE %llu\n",
+ rx_stats->pream_cnt[0], rx_stats->pream_cnt[1],
+ rx_stats->pream_cnt[2], rx_stats->pream_cnt[3],
+ rx_stats->pream_cnt[4], rx_stats->pream_cnt[6]);
+ len += scnprintf(buf + len, size - len,
+ "reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n",
+ rx_stats->reception_type[0], rx_stats->reception_type[1],
+ rx_stats->reception_type[2], rx_stats->reception_type[3]);
+
+ len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):");
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]);
+
+ len += scnprintf(buf + len, size - len, "\nRX Duration:%llu\n",
+ rx_stats->rx_duration);
+
+ len += scnprintf(buf + len, size - len,
+ "\nDCM: %llu\nRU26: %llu\nRU52: %llu\nRU106: %llu\nRU242: %llu\nRU484: %llu\nRU996: %llu\nRU996x2: %llu\n",
+ rx_stats->dcm_count, rx_stats->ru_alloc_cnt[0],
+ rx_stats->ru_alloc_cnt[1], rx_stats->ru_alloc_cnt[2],
+ rx_stats->ru_alloc_cnt[3], rx_stats->ru_alloc_cnt[4],
+ rx_stats->ru_alloc_cnt[5], rx_stats->ru_alloc_cnt[6]);
+
+ len += scnprintf(buf + len, size - len, "\nRX success packet stats:\n");
+ len += ath12k_dbg_sta_dump_rate_stats(buf, len, size, he_rates_avail,
+ &rx_stats->pkt_stats);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ len += scnprintf(buf + len, size - len, "\nRX success byte stats:\n");
+ len += ath12k_dbg_sta_dump_rate_stats(buf, len, size, he_rates_avail,
+ &rx_stats->byte_stats);
+
+unlock:
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (len)
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+out:
+ wiphy_unlock(ah->hw->wiphy);
+ return ret;
+}
+
+static const struct file_operations fops_rx_stats = {
+ .read = ath12k_dbg_sta_dump_rx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath12k_dbg_sta_reset_rx_stats(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_link_sta *link_sta = file->private_data;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta);
+ struct ath12k_hw *ah = ahsta->ahvif->ah;
+ struct ath12k_rx_peer_stats *rx_stats;
+ struct ath12k_link_sta *arsta;
+ u8 link_id = link_sta->link_id;
+ struct ath12k *ar;
+ bool reset;
+ int ret;
+
+ ret = kstrtobool_from_user(buf, count, &reset);
+ if (ret)
+ return ret;
+
+ if (!reset)
+ return -EINVAL;
+
+ wiphy_lock(ah->hw->wiphy);
+
+ if (!(BIT(link_id) & ahsta->links_map)) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arsta || !arsta->arvif->ar) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ar = arsta->arvif->ar;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ rx_stats = arsta->rx_stats;
+ if (!rx_stats) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ memset(rx_stats, 0, sizeof(*rx_stats));
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ret = count;
+out:
+ wiphy_unlock(ah->hw->wiphy);
+ return ret;
+}
+
+static const struct file_operations fops_reset_rx_stats = {
+ .write = ath12k_dbg_sta_reset_rx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir)
+{
+ struct ath12k *ar;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ar = ath12k_get_ar_by_vif(hw, vif, link_sta->link_id);
+ if (!ar)
+ return;
+
+ if (ath12k_debugfs_is_extd_rx_stats_enabled(ar)) {
+ debugfs_create_file("rx_stats", 0400, dir, link_sta,
+ &fops_rx_stats);
+ debugfs_create_file("reset_rx_stats", 0200, dir, link_sta,
+ &fops_reset_rx_stats);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_sta.h b/drivers/net/wireless/ath/ath12k/debugfs_sta.h
new file mode 100644
index 000000000000..8de924f4d7d5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs_sta.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH12K_DEBUGFS_STA_H_
+#define _ATH12K_DEBUGFS_STA_H_
+
+#include <net/mac80211.h>
+
+#include "core.h"
+
+#define ATH12K_STA_RX_STATS_BUF_SIZE (1024 * 16)
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+
+void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir);
+
+#endif /* CONFIG_ATH12K_DEBUGFS */
+
+#endif /* _ATH12K_DEBUGFS_STA_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index 9e5a4e75f2f6..b1f27c3ac723 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -1315,6 +1315,9 @@ void ath12k_dp_cc_config(struct ath12k_base *ab)
u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
u32 val = 0;
+ if (ath12k_ftm_mode)
+ return;
+
ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 7ac3143de016..75435a931548 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_H
@@ -125,7 +125,6 @@ struct ath12k_mon_data {
struct sk_buff_head rx_status_q;
struct dp_mon_mpdu *mon_mpdu;
struct list_head dp_rx_mon_mpdu_list;
- struct sk_buff *dest_skb_q[DP_MON_MAX_STATUS_BUF];
struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info;
struct dp_mon_tx_ppdu_info *tx_data_ppdu_info;
};
@@ -176,7 +175,7 @@ struct ath12k_pdev_dp {
#define DP_RXDMA_ERR_DST_RING_SIZE 1024
#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
#define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096
-#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
+#define DP_RXDMA_MONITOR_DST_RING_SIZE 8092
#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
#define DP_TX_MONITOR_BUF_RING_SIZE 4096
#define DP_TX_MONITOR_DEST_RING_SIZE 2048
@@ -373,17 +372,24 @@ struct ath12k_dp {
};
/* HTT definitions */
+#define HTT_TAG_TCL_METADATA_VERSION 5
-#define HTT_TCL_META_DATA_TYPE BIT(0)
-#define HTT_TCL_META_DATA_VALID_HTT BIT(1)
+#define HTT_TCL_META_DATA_TYPE GENMASK(1, 0)
+#define HTT_TCL_META_DATA_VALID_HTT BIT(2)
/* vdev meta data */
-#define HTT_TCL_META_DATA_VDEV_ID GENMASK(9, 2)
-#define HTT_TCL_META_DATA_PDEV_ID GENMASK(11, 10)
-#define HTT_TCL_META_DATA_HOST_INSPECTED BIT(12)
+#define HTT_TCL_META_DATA_VDEV_ID GENMASK(10, 3)
+#define HTT_TCL_META_DATA_PDEV_ID GENMASK(12, 11)
+#define HTT_TCL_META_DATA_HOST_INSPECTED_MISSION BIT(13)
/* peer meta data */
-#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2)
+#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 3)
+
+/* Global sequence number */
+#define HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM 3
+#define HTT_TCL_META_DATA_GLOBAL_SEQ_HOST_INSPECTED BIT(2)
+#define HTT_TCL_META_DATA_GLOBAL_SEQ_NUM GENMASK(14, 3)
+#define HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID 128
/* HTT tx completion is overlaid in wbm_release_ring */
#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13)
@@ -414,9 +420,15 @@ enum htt_h2t_msg_type {
};
#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
+#define HTT_OPTION_TCL_METADATA_VER_V2 2
+#define HTT_OPTION_TAG GENMASK(7, 0)
+#define HTT_OPTION_LEN GENMASK(15, 8)
+#define HTT_OPTION_VALUE GENMASK(31, 16)
+#define HTT_TCL_METADATA_VER_SZ 4
struct htt_ver_req_cmd {
__le32 ver_reg_info;
+ __le32 tcl_metadata_version;
} __packed;
enum htt_srng_ring_type {
@@ -434,8 +446,11 @@ enum htt_srng_ring_id {
HTT_HOST1_TO_FW_RXBUF_RING,
HTT_HOST2_TO_FW_RXBUF_RING,
HTT_RXDMA_NON_MONITOR_DEST_RING,
+ HTT_RXDMA_HOST_BUF_RING2,
HTT_TX_MON_HOST2MON_BUF_RING,
HTT_TX_MON_MON2HOST_DEST_RING,
+ HTT_RX_MON_HOST2MON_BUF_RING,
+ HTT_RX_MON_MON2HOST_DEST_RING,
};
/* host -> target HTT_SRING_SETUP message
@@ -767,8 +782,22 @@ enum htt_stats_internal_ppdu_frametype {
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
-#define HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID BIT(26)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID BIT(26)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL BIT(27)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON BIT(28)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(18, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(21, 19)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(24, 22)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD GENMASK(9, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE BIT(17)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE BIT(18)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE BIT(19)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET BIT(0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET GENMASK(14, 1)
#define HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET GENMASK(15, 0)
#define HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET GENMASK(31, 16)
@@ -797,6 +826,7 @@ enum htt_rx_filter_tlv_flags {
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10),
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11),
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO = BIT(13),
};
enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
@@ -1085,6 +1115,21 @@ enum htt_rx_data_pkt_filter_tlv_flasg3 {
HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO)
+
/* msdu start. mpdu end, attention, rx hdr tlv's are not subscribed */
#define HTT_RX_TLV_FLAGS_RXDMA_RING \
(HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
@@ -1113,6 +1158,10 @@ struct htt_rx_ring_selection_cfg_cmd {
__le32 info3;
} __packed;
+#define HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE 32
+#define HTT_RX_RING_DEFAULT_DMA_LENGTH 0x7
+#define HTT_RX_RING_PKT_TLV_OFFSET 0x1
+
struct htt_rx_ring_tlv_filter {
u32 rx_filter; /* see htt_rx_filter_tlv_flags */
u32 pkt_filter_flags0; /* MGMT */
@@ -1130,6 +1179,17 @@ struct htt_rx_ring_tlv_filter {
u16 rx_mpdu_start_wmask;
u16 rx_mpdu_end_wmask;
u32 rx_msdu_end_wmask;
+ u32 conf_len_ctrl;
+ u32 conf_len_mgmt;
+ u32 conf_len_data;
+ u16 rx_drop_threshold;
+ bool enable_log_mgmt_type;
+ bool enable_log_ctrl_type;
+ bool enable_log_data_type;
+ bool enable_rx_tlv_offset;
+ u16 rx_tlv_offset;
+ bool drop_threshold_valid;
+ bool rxmon_disable;
};
#define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 5a21961cfd46..d22800e89485 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_mon.h"
@@ -10,6 +10,12 @@
#include "dp_tx.h"
#include "peer.h"
+#define ATH12K_LE32_DEC_ENC(value, dec_bits, enc_bits) \
+ u32_encode_bits(le32_get_bits(value, dec_bits), enc_bits)
+
+#define ATH12K_LE64_DEC_ENC(value, dec_bits, enc_bits) \
+ u32_encode_bits(le64_get_bits(value, dec_bits), enc_bits)
+
static void
ath12k_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user,
struct hal_rx_user_status *rx_user_status)
@@ -75,7 +81,7 @@ ath12k_dp_mon_rx_populate_mu_user_info(const struct hal_rx_ppdu_end_user_stats *
static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- u32 nsts, group_id, info0, info1;
+ u32 nsts, info0, info1;
u8 gi_setting;
info0 = __le32_to_cpu(vht_sig->info0);
@@ -103,12 +109,8 @@ static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vh
ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW);
ppdu_info->beamformed = u32_get_bits(info1,
HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED);
- group_id = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID);
- if (group_id == 0 || group_id == 63)
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
- else
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
- ppdu_info->vht_flag_values5 = group_id;
+ ppdu_info->vht_flag_values5 = u32_get_bits(info0,
+ HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID);
ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
ppdu_info->nss);
ppdu_info->vht_flag_values2 = ppdu_info->bw;
@@ -128,7 +130,6 @@ static void ath12k_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig,
ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING);
ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI);
ppdu_info->nss = (ppdu_info->mcs >> 3);
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb,
@@ -160,7 +161,6 @@ static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb,
ppdu_info->rate = rate;
ppdu_info->cck_flag = 1;
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga,
@@ -200,7 +200,6 @@ static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga,
}
ppdu_info->rate = rate;
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
static void
@@ -237,7 +236,6 @@ ath12k_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *of
ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS);
ppdu_info->beamformed = u32_get_bits(info0,
HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF);
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
}
static void
@@ -277,7 +275,6 @@ ath12k_dp_mon_parse_he_sig_b1_mu(const struct hal_rx_he_sig_b1_mu_info *he_sig_b
HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION);
ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
ppdu_info->he_RU[0] = ru_tones;
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
}
static void
@@ -411,7 +408,6 @@ ath12k_dp_mon_parse_he_sig_mu(const struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_
ppdu_info->is_stbc = info1 &
HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC;
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
}
static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a,
@@ -559,17 +555,887 @@ static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *
dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
ppdu_info->dcm = dcm;
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_u_sig_cmn(const struct hal_mon_usig_cmn *cmn,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 common;
+
+ ppdu_info->u_sig_info.bw = le32_get_bits(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_BW);
+ ppdu_info->u_sig_info.ul_dl = le32_get_bits(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_UL_DL);
+
+ common = __le32_to_cpu(ppdu_info->u_sig_info.usig.common);
+ common |= IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN |
+ ATH12K_LE32_DEC_ENC(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_PHY_VERSION,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER) |
+ u32_encode_bits(ppdu_info->u_sig_info.bw,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW) |
+ u32_encode_bits(ppdu_info->u_sig_info.ul_dl,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL) |
+ ATH12K_LE32_DEC_ENC(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_BSS_COLOR,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR) |
+ ATH12K_LE32_DEC_ENC(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_TXOP,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
+ ppdu_info->u_sig_info.usig.common = cpu_to_le32(common);
+
+ switch (ppdu_info->u_sig_info.bw) {
+ default:
+ fallthrough;
+ case HAL_EHT_BW_20:
+ ppdu_info->bw = HAL_RX_BW_20MHZ;
+ break;
+ case HAL_EHT_BW_40:
+ ppdu_info->bw = HAL_RX_BW_40MHZ;
+ break;
+ case HAL_EHT_BW_80:
+ ppdu_info->bw = HAL_RX_BW_80MHZ;
+ break;
+ case HAL_EHT_BW_160:
+ ppdu_info->bw = HAL_RX_BW_160MHZ;
+ break;
+ case HAL_EHT_BW_320_1:
+ case HAL_EHT_BW_320_2:
+ ppdu_info->bw = HAL_RX_BW_320MHZ;
+ break;
+ }
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_u_sig_tb(const struct hal_mon_usig_tb *usig_tb,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig;
+ enum ieee80211_radiotap_eht_usig_tb spatial_reuse1, spatial_reuse2;
+ u32 common, value, mask;
+
+ spatial_reuse1 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1;
+ spatial_reuse2 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2;
+
+ common = __le32_to_cpu(usig->common);
+ value = __le32_to_cpu(usig->value);
+ mask = __le32_to_cpu(usig->mask);
+
+ ppdu_info->u_sig_info.ppdu_type_comp_mode =
+ le32_get_bits(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE);
+
+ common |= ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
+
+ value |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD |
+ u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE) |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE |
+ ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1,
+ spatial_reuse1) |
+ ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2,
+ spatial_reuse2) |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD |
+ ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_CRC,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC) |
+ ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_TAIL,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL);
+
+ mask |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE |
+ spatial_reuse1 | spatial_reuse2 |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL;
+
+ usig->common = cpu_to_le32(common);
+ usig->value = cpu_to_le32(value);
+ usig->mask = cpu_to_le32(mask);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_u_sig_mu(const struct hal_mon_usig_mu *usig_mu,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig;
+ enum ieee80211_radiotap_eht_usig_mu sig_symb, punc;
+ u32 common, value, mask;
+
+ sig_symb = IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS;
+ punc = IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO;
+
+ common = __le32_to_cpu(usig->common);
+ value = __le32_to_cpu(usig->value);
+ mask = __le32_to_cpu(usig->mask);
+
+ ppdu_info->u_sig_info.ppdu_type_comp_mode =
+ le32_get_bits(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE);
+ ppdu_info->u_sig_info.eht_sig_mcs =
+ le32_get_bits(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS);
+ ppdu_info->u_sig_info.num_eht_sig_sym =
+ le32_get_bits(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM);
+
+ common |= ATH12K_LE32_DEC_ENC(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
+
+ value |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD |
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE |
+ u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE) |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE |
+ ATH12K_LE32_DEC_ENC(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO,
+ punc) |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE |
+ u32_encode_bits(ppdu_info->u_sig_info.eht_sig_mcs,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS) |
+ u32_encode_bits(ppdu_info->u_sig_info.num_eht_sig_sym,
+ sig_symb) |
+ ATH12K_LE32_DEC_ENC(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_CRC,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC) |
+ ATH12K_LE32_DEC_ENC(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_TAIL,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL);
+
+ mask |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD |
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE |
+ punc |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS |
+ sig_symb |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL;
+
+ usig->common = cpu_to_le32(common);
+ usig->value = cpu_to_le32(value);
+ usig->mask = cpu_to_le32(mask);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_u_sig_hdr(const struct hal_mon_usig_hdr *usig,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u8 comp_mode;
+
+ ppdu_info->eht_usig = true;
+
+ ath12k_dp_mon_hal_rx_parse_u_sig_cmn(&usig->cmn, ppdu_info);
+
+ comp_mode = le32_get_bits(usig->non_cmn.mu.info0,
+ HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE);
+
+ if (comp_mode == 0 && ppdu_info->u_sig_info.ul_dl)
+ ath12k_dp_mon_hal_rx_parse_u_sig_tb(&usig->non_cmn.tb, ppdu_info);
+ else
+ ath12k_dp_mon_hal_rx_parse_u_sig_mu(&usig->non_cmn.mu, ppdu_info);
+}
+
+static void
+ath12k_dp_mon_hal_aggr_tlv(struct hal_rx_mon_ppdu_info *ppdu_info,
+ u16 tlv_len, const void *tlv_data)
+{
+ if (tlv_len <= HAL_RX_MON_MAX_AGGR_SIZE - ppdu_info->tlv_aggr.cur_len) {
+ memcpy(ppdu_info->tlv_aggr.buf + ppdu_info->tlv_aggr.cur_len,
+ tlv_data, tlv_len);
+ ppdu_info->tlv_aggr.cur_len += tlv_len;
+ }
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_frame_type_ndp(const struct hal_rx_u_sig_info *usig_info)
+{
+ if (usig_info->ppdu_type_comp_mode == 1 &&
+ usig_info->eht_sig_mcs == 0 &&
+ usig_info->num_eht_sig_sym == 0)
+ return true;
+
+ return false;
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_non_ofdma(const struct hal_rx_u_sig_info *usig_info)
+{
+ u32 ppdu_type_comp_mode = usig_info->ppdu_type_comp_mode;
+ u32 ul_dl = usig_info->ul_dl;
+
+ if ((ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 0) ||
+ (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_OFDMA && ul_dl == 0) ||
+ (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 1))
+ return true;
+
+ return false;
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_ofdma(const struct hal_rx_u_sig_info *usig_info)
+{
+ if (usig_info->ppdu_type_comp_mode == 0 && usig_info->ul_dl == 0)
+ return true;
+
+ return false;
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(const struct hal_eht_sig_ndp_cmn_eb *eht_sig_ndp,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE |
+ IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF |
+ IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S |
+ IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S |
+ IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_S |
+ IEEE80211_RADIOTAP_EHT_KNOWN_CRC1 |
+ IEEE80211_RADIOTAP_EHT_KNOWN_TAIL1;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[0]);
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE,
+ IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+ /* GI and LTF size are separately indicated in radiotap header
+ * and hence will be parsed from other TLV
+ */
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM,
+ IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC,
+ IEEE80211_RADIOTAP_EHT_DATA0_CRC1_O);
+
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_S);
+ eht->data[0] = cpu_to_le32(data);
+
+ data = __le32_to_cpu(eht->data[7]);
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS,
+ IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
+
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED,
+ IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S);
+ eht->data[7] = cpu_to_le32(data);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_usig_overflow(const struct hal_eht_sig_usig_overflow *ovflow,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE |
+ IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF |
+ IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM |
+ IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM |
+ IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM |
+ IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_O;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[0]);
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE,
+ IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+
+ /* GI and LTF size are separately indicated in radiotap header
+ * and hence will be parsed from other TLV
+ */
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM,
+ IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM,
+ IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM);
+
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR,
+ IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM);
+
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY,
+ IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM);
+
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_O);
+ eht->data[0] = cpu_to_le32(data);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_non_ofdma_users(const struct hal_eht_sig_non_ofdma_cmn_eb *eb,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[7]);
+ data |= ATH12K_LE32_DEC_ENC(eb->info0,
+ HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS,
+ IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS);
+ eht->data[7] = cpu_to_le32(data);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(const struct hal_eht_sig_mu_mimo *user,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info;
+ u32 user_idx;
+
+ if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info))
+ return;
+
+ user_idx = eht_info->num_user_info++;
+
+ eht_info->user_info[user_idx] |=
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_KNOWN_M |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_M);
+
+ ppdu_info->mcs = le32_get_bits(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(const struct hal_eht_sig_non_mu_mimo *user,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info;
+ u32 user_idx;
+
+ if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info))
+ return;
+
+ user_idx = eht_info->num_user_info++;
+
+ eht_info->user_info[user_idx] |=
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O);
+
+ ppdu_info->mcs = le32_get_bits(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS);
+
+ ppdu_info->nss = le32_get_bits(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS) + 1;
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_mu_mimo_user(const struct hal_rx_u_sig_info *usig_info)
+{
+ if (usig_info->ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_SU &&
+ usig_info->ul_dl == 1)
+ return true;
+
+ return false;
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(const void *tlv,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ const struct hal_eht_sig_non_ofdma_cmn_eb *eb = tlv;
+
+ ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info);
+ ath12k_dp_mon_hal_rx_parse_non_ofdma_users(eb, ppdu_info);
+
+ if (ath12k_dp_mon_hal_rx_is_mu_mimo_user(&ppdu_info->u_sig_info))
+ ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(&eb->user_field.mu_mimo,
+ ppdu_info);
+ else
+ ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&eb->user_field.n_mu_mimo,
+ ppdu_info);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_ru_allocation(const struct hal_eht_sig_ofdma_cmn_eb *eb,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ const struct hal_eht_sig_ofdma_cmn_eb1 *ofdma_cmn_eb1 = &eb->eb1;
+ const struct hal_eht_sig_ofdma_cmn_eb2 *ofdma_cmn_eb2 = &eb->eb2;
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ enum ieee80211_radiotap_eht_data ru_123, ru_124, ru_125, ru_126;
+ enum ieee80211_radiotap_eht_data ru_121, ru_122, ru_112, ru_111;
+ u32 data;
+
+ ru_123 = IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3;
+ ru_124 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4;
+ ru_125 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5;
+ ru_126 = IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6;
+ ru_121 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1;
+ ru_122 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2;
+ ru_112 = IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2;
+ ru_111 = IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1;
+
+ switch (ppdu_info->u_sig_info.bw) {
+ case HAL_EHT_BW_320_2:
+ case HAL_EHT_BW_320_1:
+ data = __le32_to_cpu(eht->data[4]);
+ /* CC1 2::3 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3,
+ ru_123);
+ eht->data[4] = cpu_to_le32(data);
+
+ data = __le32_to_cpu(eht->data[5]);
+ /* CC1 2::4 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4,
+ ru_124);
+
+ /* CC1 2::5 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5,
+ ru_125);
+ eht->data[5] = cpu_to_le32(data);
+
+ data = __le32_to_cpu(eht->data[6]);
+ /* CC1 2::6 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6,
+ ru_126);
+ eht->data[6] = cpu_to_le32(data);
+
+ fallthrough;
+ case HAL_EHT_BW_160:
+ data = __le32_to_cpu(eht->data[3]);
+ /* CC1 2::1 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1,
+ ru_121);
+ /* CC1 2::2 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2,
+ ru_122);
+ eht->data[3] = cpu_to_le32(data);
+
+ fallthrough;
+ case HAL_EHT_BW_80:
+ data = __le32_to_cpu(eht->data[2]);
+ /* CC1 1::2 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2,
+ ru_112);
+ eht->data[2] = cpu_to_le32(data);
+
+ fallthrough;
+ case HAL_EHT_BW_40:
+ fallthrough;
+ case HAL_EHT_BW_20:
+ data = __le32_to_cpu(eht->data[1]);
+ /* CC1 1::1 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1,
+ ru_111);
+ eht->data[1] = cpu_to_le32(data);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(const void *tlv,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ const struct hal_eht_sig_ofdma_cmn_eb *ofdma = tlv;
+
+ ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info);
+ ath12k_dp_mon_hal_rx_parse_ru_allocation(ofdma, ppdu_info);
+
+ ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&ofdma->user_field.n_mu_mimo,
+ ppdu_info);
+}
+
+static void
+ath12k_dp_mon_parse_eht_sig_hdr(struct hal_rx_mon_ppdu_info *ppdu_info,
+ const void *tlv_data)
+{
+ ppdu_info->is_eht = true;
+
+ if (ath12k_dp_mon_hal_rx_is_frame_type_ndp(&ppdu_info->u_sig_info))
+ ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(tlv_data, ppdu_info);
+ else if (ath12k_dp_mon_hal_rx_is_non_ofdma(&ppdu_info->u_sig_info))
+ ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(tlv_data, ppdu_info);
+ else if (ath12k_dp_mon_hal_rx_is_ofdma(&ppdu_info->u_sig_info))
+ ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(tlv_data, ppdu_info);
+}
+
+static inline enum ath12k_eht_ru_size
+hal_rx_mon_hal_ru_size_to_ath12k_ru_size(u32 hal_ru_size)
+{
+ switch (hal_ru_size) {
+ case HAL_EHT_RU_26:
+ return ATH12K_EHT_RU_26;
+ case HAL_EHT_RU_52:
+ return ATH12K_EHT_RU_52;
+ case HAL_EHT_RU_78:
+ return ATH12K_EHT_RU_52_26;
+ case HAL_EHT_RU_106:
+ return ATH12K_EHT_RU_106;
+ case HAL_EHT_RU_132:
+ return ATH12K_EHT_RU_106_26;
+ case HAL_EHT_RU_242:
+ return ATH12K_EHT_RU_242;
+ case HAL_EHT_RU_484:
+ return ATH12K_EHT_RU_484;
+ case HAL_EHT_RU_726:
+ return ATH12K_EHT_RU_484_242;
+ case HAL_EHT_RU_996:
+ return ATH12K_EHT_RU_996;
+ case HAL_EHT_RU_996x2:
+ return ATH12K_EHT_RU_996x2;
+ case HAL_EHT_RU_996x3:
+ return ATH12K_EHT_RU_996x3;
+ case HAL_EHT_RU_996x4:
+ return ATH12K_EHT_RU_996x4;
+ case HAL_EHT_RU_NONE:
+ return ATH12K_EHT_RU_INVALID;
+ case HAL_EHT_RU_996_484:
+ return ATH12K_EHT_RU_996_484;
+ case HAL_EHT_RU_996x2_484:
+ return ATH12K_EHT_RU_996x2_484;
+ case HAL_EHT_RU_996x3_484:
+ return ATH12K_EHT_RU_996x3_484;
+ case HAL_EHT_RU_996_484_242:
+ return ATH12K_EHT_RU_996_484_242;
+ default:
+ return ATH12K_EHT_RU_INVALID;
+ }
+}
+
+static inline u32
+hal_rx_ul_ofdma_ru_size_to_width(enum ath12k_eht_ru_size ru_size)
+{
+ switch (ru_size) {
+ case ATH12K_EHT_RU_26:
+ return RU_26;
+ case ATH12K_EHT_RU_52:
+ return RU_52;
+ case ATH12K_EHT_RU_52_26:
+ return RU_52_26;
+ case ATH12K_EHT_RU_106:
+ return RU_106;
+ case ATH12K_EHT_RU_106_26:
+ return RU_106_26;
+ case ATH12K_EHT_RU_242:
+ return RU_242;
+ case ATH12K_EHT_RU_484:
+ return RU_484;
+ case ATH12K_EHT_RU_484_242:
+ return RU_484_242;
+ case ATH12K_EHT_RU_996:
+ return RU_996;
+ case ATH12K_EHT_RU_996_484:
+ return RU_996_484;
+ case ATH12K_EHT_RU_996_484_242:
+ return RU_996_484_242;
+ case ATH12K_EHT_RU_996x2:
+ return RU_2X996;
+ case ATH12K_EHT_RU_996x2_484:
+ return RU_2X996_484;
+ case ATH12K_EHT_RU_996x3:
+ return RU_3X996;
+ case ATH12K_EHT_RU_996x3_484:
+ return RU_3X996_484;
+ case ATH12K_EHT_RU_996x4:
+ return RU_4X996;
+ default:
+ return RU_INVALID;
+ }
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_info,
+ u16 user_id,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_user_status *mon_rx_user_status = NULL;
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ enum ath12k_eht_ru_size rtap_ru_size = ATH12K_EHT_RU_INVALID;
+ u32 ru_width, reception_type, ru_index = HAL_EHT_RU_INVALID;
+ u32 ru_type_80_0, ru_start_index_80_0;
+ u32 ru_type_80_1, ru_start_index_80_1;
+ u32 ru_type_80_2, ru_start_index_80_2;
+ u32 ru_type_80_3, ru_start_index_80_3;
+ u32 ru_size = 0, num_80mhz_with_ru = 0;
+ u64 ru_index_320mhz = 0;
+ u32 ru_index_per80mhz;
+
+ reception_type = le32_get_bits(rx_usr_info->info0,
+ HAL_RX_USR_INFO0_RECEPTION_TYPE);
+
+ switch (reception_type) {
+ case HAL_RECEPTION_TYPE_SU:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ case HAL_RECEPTION_TYPE_DL_MU_MIMO:
+ case HAL_RECEPTION_TYPE_UL_MU_MIMO:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ case HAL_RECEPTION_TYPE_DL_MU_OFMA:
+ case HAL_RECEPTION_TYPE_UL_MU_OFDMA:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+ break;
+ case HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO:
+ case HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO;
+ }
+
+ ppdu_info->is_stbc = le32_get_bits(rx_usr_info->info0, HAL_RX_USR_INFO0_STBC);
+ ppdu_info->ldpc = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_LDPC);
+ ppdu_info->dcm = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_STA_DCM);
+ ppdu_info->bw = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_RX_BW);
+ ppdu_info->mcs = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_MCS);
+ ppdu_info->nss = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_NSS) + 1;
+
+ if (user_id < HAL_MAX_UL_MU_USERS) {
+ mon_rx_user_status = &ppdu_info->userstats[user_id];
+ mon_rx_user_status->mcs = ppdu_info->mcs;
+ mon_rx_user_status->nss = ppdu_info->nss;
+ }
+
+ if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO ||
+ ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
+ ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO))
+ return;
+
+ /* RU allocation present only for OFDMA reception */
+ ru_type_80_0 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_0);
+ ru_start_index_80_0 = le32_get_bits(rx_usr_info->info3,
+ HAL_RX_USR_INFO3_RU_START_IDX_80_0);
+ if (ru_type_80_0 != HAL_EHT_RU_NONE) {
+ ru_size += ru_type_80_0;
+ ru_index_per80mhz = ru_start_index_80_0;
+ ru_index = ru_index_per80mhz;
+ ru_index_320mhz |= HAL_RU_PER80(ru_type_80_0, 0, ru_index_per80mhz);
+ num_80mhz_with_ru++;
+ }
+
+ ru_type_80_1 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_1);
+ ru_start_index_80_1 = le32_get_bits(rx_usr_info->info3,
+ HAL_RX_USR_INFO3_RU_START_IDX_80_1);
+ if (ru_type_80_1 != HAL_EHT_RU_NONE) {
+ ru_size += ru_type_80_1;
+ ru_index_per80mhz = ru_start_index_80_1;
+ ru_index = ru_index_per80mhz;
+ ru_index_320mhz |= HAL_RU_PER80(ru_type_80_1, 1, ru_index_per80mhz);
+ num_80mhz_with_ru++;
+ }
+
+ ru_type_80_2 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_2);
+ ru_start_index_80_2 = le32_get_bits(rx_usr_info->info3,
+ HAL_RX_USR_INFO3_RU_START_IDX_80_2);
+ if (ru_type_80_2 != HAL_EHT_RU_NONE) {
+ ru_size += ru_type_80_2;
+ ru_index_per80mhz = ru_start_index_80_2;
+ ru_index = ru_index_per80mhz;
+ ru_index_320mhz |= HAL_RU_PER80(ru_type_80_2, 2, ru_index_per80mhz);
+ num_80mhz_with_ru++;
+ }
+
+ ru_type_80_3 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_3);
+ ru_start_index_80_3 = le32_get_bits(rx_usr_info->info2,
+ HAL_RX_USR_INFO3_RU_START_IDX_80_3);
+ if (ru_type_80_3 != HAL_EHT_RU_NONE) {
+ ru_size += ru_type_80_3;
+ ru_index_per80mhz = ru_start_index_80_3;
+ ru_index = ru_index_per80mhz;
+ ru_index_320mhz |= HAL_RU_PER80(ru_type_80_3, 3, ru_index_per80mhz);
+ num_80mhz_with_ru++;
+ }
+
+ if (num_80mhz_with_ru > 1) {
+ /* Calculate the MRU index */
+ switch (ru_index_320mhz) {
+ case HAL_EHT_RU_996_484_0:
+ case HAL_EHT_RU_996x2_484_0:
+ case HAL_EHT_RU_996x3_484_0:
+ ru_index = 0;
+ break;
+ case HAL_EHT_RU_996_484_1:
+ case HAL_EHT_RU_996x2_484_1:
+ case HAL_EHT_RU_996x3_484_1:
+ ru_index = 1;
+ break;
+ case HAL_EHT_RU_996_484_2:
+ case HAL_EHT_RU_996x2_484_2:
+ case HAL_EHT_RU_996x3_484_2:
+ ru_index = 2;
+ break;
+ case HAL_EHT_RU_996_484_3:
+ case HAL_EHT_RU_996x2_484_3:
+ case HAL_EHT_RU_996x3_484_3:
+ ru_index = 3;
+ break;
+ case HAL_EHT_RU_996_484_4:
+ case HAL_EHT_RU_996x2_484_4:
+ case HAL_EHT_RU_996x3_484_4:
+ ru_index = 4;
+ break;
+ case HAL_EHT_RU_996_484_5:
+ case HAL_EHT_RU_996x2_484_5:
+ case HAL_EHT_RU_996x3_484_5:
+ ru_index = 5;
+ break;
+ case HAL_EHT_RU_996_484_6:
+ case HAL_EHT_RU_996x2_484_6:
+ case HAL_EHT_RU_996x3_484_6:
+ ru_index = 6;
+ break;
+ case HAL_EHT_RU_996_484_7:
+ case HAL_EHT_RU_996x2_484_7:
+ case HAL_EHT_RU_996x3_484_7:
+ ru_index = 7;
+ break;
+ case HAL_EHT_RU_996x2_484_8:
+ ru_index = 8;
+ break;
+ case HAL_EHT_RU_996x2_484_9:
+ ru_index = 9;
+ break;
+ case HAL_EHT_RU_996x2_484_10:
+ ru_index = 10;
+ break;
+ case HAL_EHT_RU_996x2_484_11:
+ ru_index = 11;
+ break;
+ default:
+ ru_index = HAL_EHT_RU_INVALID;
+ break;
+ }
+
+ ru_size += 4;
+ }
+
+ rtap_ru_size = hal_rx_mon_hal_ru_size_to_ath12k_ru_size(ru_size);
+ if (rtap_ru_size != ATH12K_EHT_RU_INVALID) {
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_SIZE_OM;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[1]);
+ data |= u32_encode_bits(rtap_ru_size,
+ IEEE80211_RADIOTAP_EHT_DATA1_RU_SIZE);
+ eht->data[1] = cpu_to_le32(data);
+ }
+
+ if (ru_index != HAL_EHT_RU_INVALID) {
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_INDEX_OM;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[1]);
+ data |= u32_encode_bits(rtap_ru_size,
+ IEEE80211_RADIOTAP_EHT_DATA1_RU_INDEX);
+ eht->data[1] = cpu_to_le32(data);
+ }
+
+ if (mon_rx_user_status && ru_index != HAL_EHT_RU_INVALID &&
+ rtap_ru_size != ATH12K_EHT_RU_INVALID) {
+ mon_rx_user_status->ul_ofdma_ru_start_index = ru_index;
+ mon_rx_user_status->ul_ofdma_ru_size = rtap_ru_size;
+
+ ru_width = hal_rx_ul_ofdma_ru_size_to_width(rtap_ru_size);
+
+ mon_rx_user_status->ul_ofdma_ru_width = ru_width;
+ mon_rx_user_status->ofdma_info_valid = 1;
+ }
}
static enum hal_rx_mon_status
-ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
+ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
struct ath12k_mon_data *pmon,
- u32 tlv_tag, const void *tlv_data,
- u32 userid)
+ const struct hal_tlv_64_hdr *tlv)
{
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
- u32 info[7];
+ const void *tlv_data = tlv->value;
+ u32 info[7], userid;
+ u16 tlv_tag, tlv_len;
+
+ tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
+ tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
+ userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID);
+
+ if (ppdu_info->tlv_aggr.in_progress && ppdu_info->tlv_aggr.tlv_tag != tlv_tag) {
+ ath12k_dp_mon_parse_eht_sig_hdr(ppdu_info, ppdu_info->tlv_aggr.buf);
+
+ ppdu_info->tlv_aggr.in_progress = false;
+ ppdu_info->tlv_aggr.cur_len = 0;
+ }
switch (tlv_tag) {
case HAL_RX_PPDU_START: {
@@ -638,6 +1504,9 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
ppdu_info->num_mpdu_fcs_err =
u32_get_bits(info[0],
HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR);
+ ppdu_info->peer_id =
+ u32_get_bits(info[0], HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID);
+
switch (ppdu_info->preamble_type) {
case HAL_RX_PREAMBLE_11N:
ppdu_info->ht_flags = 1;
@@ -648,6 +1517,9 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
case HAL_RX_PREAMBLE_11AX:
ppdu_info->he_flags = 1;
break;
+ case HAL_RX_PREAMBLE_11BE:
+ ppdu_info->is_eht = true;
+ break;
default:
break;
}
@@ -655,6 +1527,11 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
if (userid < HAL_MAX_UL_MU_USERS) {
struct hal_rx_user_status *rxuser_stats =
&ppdu_info->userstats[userid];
+
+ if (ppdu_info->num_mpdu_fcs_ok > 1 ||
+ ppdu_info->num_mpdu_fcs_err > 1)
+ ppdu_info->userstats[userid].ampdu_present = true;
+
ppdu_info->num_users += 1;
ath12k_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats);
@@ -730,6 +1607,17 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW);
break;
}
+ case HAL_PHYRX_OTHER_RECEIVE_INFO: {
+ const struct hal_phyrx_common_user_info *cmn_usr_info = tlv_data;
+
+ ppdu_info->gi = le32_get_bits(cmn_usr_info->info0,
+ HAL_RX_PHY_CMN_USER_INFO0_GI);
+ break;
+ }
+ case HAL_RX_PPDU_START_USER_INFO:
+ ath12k_dp_mon_hal_rx_parse_user_info(tlv_data, userid, ppdu_info);
+ break;
+
case HAL_RXPCU_PPDU_END_INFO: {
const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data;
@@ -743,7 +1631,6 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_RX_MPDU_START: {
const struct hal_rx_mpdu_start *mpdu_start = tlv_data;
- struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
u16 peer_id;
info[1] = __le32_to_cpu(mpdu_start->info1);
@@ -756,68 +1643,38 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
if (userid < HAL_MAX_UL_MU_USERS) {
info[0] = __le32_to_cpu(mpdu_start->info0);
ppdu_info->userid = userid;
- ppdu_info->ampdu_id[userid] =
- u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID);
+ ppdu_info->userstats[userid].ampdu_id =
+ u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID);
}
- mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
- if (!mon_mpdu)
- return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
-
break;
}
case HAL_RX_MSDU_START:
/* TODO: add msdu start parsing logic */
break;
- case HAL_MON_BUF_ADDR: {
- struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
- const struct dp_mon_packet_info *packet_info = tlv_data;
- int buf_id = u32_get_bits(packet_info->cookie,
- DP_RXDMA_BUF_COOKIE_BUF_ID);
- struct sk_buff *msdu;
- struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
- struct ath12k_skb_rxcb *rxcb;
-
- spin_lock_bh(&buf_ring->idr_lock);
- msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
- spin_unlock_bh(&buf_ring->idr_lock);
-
- if (unlikely(!msdu)) {
- ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
- buf_id);
- return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+ case HAL_MON_BUF_ADDR:
+ return HAL_RX_MON_STATUS_BUF_ADDR;
+ case HAL_RX_MSDU_END:
+ return HAL_RX_MON_STATUS_MSDU_END;
+ case HAL_RX_MPDU_END:
+ return HAL_RX_MON_STATUS_MPDU_END;
+ case HAL_PHYRX_GENERIC_U_SIG:
+ ath12k_dp_mon_hal_rx_parse_u_sig_hdr(tlv_data, ppdu_info);
+ break;
+ case HAL_PHYRX_GENERIC_EHT_SIG:
+ /* Handle the case where aggregation is in progress
+ * or the current TLV is one of the TLVs which should be
+ * aggregated
+ */
+ if (!ppdu_info->tlv_aggr.in_progress) {
+ ppdu_info->tlv_aggr.in_progress = true;
+ ppdu_info->tlv_aggr.tlv_tag = tlv_tag;
+ ppdu_info->tlv_aggr.cur_len = 0;
}
- rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
- msdu->len + skb_tailroom(msdu),
- DMA_FROM_DEVICE);
-
- if (mon_mpdu->tail)
- mon_mpdu->tail->next = msdu;
- else
- mon_mpdu->tail = msdu;
-
- ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+ ppdu_info->is_eht = true;
- break;
- }
- case HAL_RX_MSDU_END: {
- const struct rx_msdu_end_qcn9274 *msdu_end = tlv_data;
- bool is_first_msdu_in_mpdu;
- u16 msdu_end_info;
-
- msdu_end_info = __le16_to_cpu(msdu_end->info5);
- is_first_msdu_in_mpdu = u32_get_bits(msdu_end_info,
- RX_MSDU_END_INFO5_FIRST_MSDU);
- if (is_first_msdu_in_mpdu) {
- pmon->mon_mpdu->head = pmon->mon_mpdu->tail;
- pmon->mon_mpdu->tail = NULL;
- }
- break;
- }
- case HAL_RX_MPDU_END:
- list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list);
+ ath12k_dp_mon_hal_aggr_tlv(ppdu_info, tlv_len, tlv_data);
break;
case HAL_DUMMY:
return HAL_RX_MON_STATUS_BUF_DONE;
@@ -844,7 +1701,7 @@ static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar,
}
static struct sk_buff *
-ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mac_id,
+ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
struct ieee80211_rx_status *rxs, bool *fcs_err)
{
@@ -1005,18 +1862,70 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
{
struct ieee80211_supported_band *sband;
u8 *ptr = NULL;
- u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid];
rxs->flag |= RX_FLAG_MACTIME_START;
rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
rxs->nss = ppduinfo->nss + 1;
- if (ampdu_id) {
+ if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) {
rxs->flag |= RX_FLAG_AMPDU_DETAILS;
- rxs->ampdu_reference = ampdu_id;
+ rxs->ampdu_reference = ppduinfo->userstats[ppduinfo->userid].ampdu_id;
}
- if (ppduinfo->he_mu_flags) {
+ if (ppduinfo->is_eht || ppduinfo->eht_usig) {
+ struct ieee80211_radiotap_tlv *tlv;
+ struct ieee80211_radiotap_eht *eht;
+ struct ieee80211_radiotap_eht_usig *usig;
+ u16 len = 0, i, eht_len, usig_len;
+ u8 user;
+
+ if (ppduinfo->is_eht) {
+ eht_len = struct_size(eht,
+ user_info,
+ ppduinfo->eht_info.num_user_info);
+ len += sizeof(*tlv) + eht_len;
+ }
+
+ if (ppduinfo->eht_usig) {
+ usig_len = sizeof(*usig);
+ len += sizeof(*tlv) + usig_len;
+ }
+
+ rxs->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+ rxs->encoding = RX_ENC_EHT;
+
+ skb_reset_mac_header(mon_skb);
+
+ tlv = skb_push(mon_skb, len);
+
+ if (ppduinfo->is_eht) {
+ tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT);
+ tlv->len = cpu_to_le16(eht_len);
+
+ eht = (struct ieee80211_radiotap_eht *)tlv->data;
+ eht->known = ppduinfo->eht_info.eht.known;
+
+ for (i = 0;
+ i < ARRAY_SIZE(eht->data) &&
+ i < ARRAY_SIZE(ppduinfo->eht_info.eht.data);
+ i++)
+ eht->data[i] = ppduinfo->eht_info.eht.data[i];
+
+ for (user = 0; user < ppduinfo->eht_info.num_user_info; user++)
+ put_unaligned_le32(ppduinfo->eht_info.user_info[user],
+ &eht->user_info[user]);
+
+ tlv = (struct ieee80211_radiotap_tlv *)&tlv->data[eht_len];
+ }
+
+ if (ppduinfo->eht_usig) {
+ tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT_USIG);
+ tlv->len = cpu_to_le16(usig_len);
+
+ usig = (struct ieee80211_radiotap_eht_usig *)tlv->data;
+ *usig = ppduinfo->u_sig_info.usig;
+ }
+ } else if (ppduinfo->he_mu_flags) {
rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
rxs->encoding = RX_ENC_HE;
ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
@@ -1125,7 +2034,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
}
-static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
+static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct napi_struct *napi)
@@ -1135,7 +2044,7 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
struct ieee80211_rx_status *rxs = &dp->rx_status;
bool fcs_err = false;
- mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id,
+ mon_skb = ath12k_dp_mon_rx_merg_msdus(ar,
head_msdu, tail_msdu,
rxs, &fcs_err);
if (!mon_skb)
@@ -1180,24 +2089,18 @@ mon_deliver_fail:
}
static enum hal_rx_mon_status
-ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon,
+ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon,
struct sk_buff *skb)
{
- struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
struct hal_tlv_64_hdr *tlv;
+ struct ath12k_skb_rxcb *rxcb;
enum hal_rx_mon_status hal_status;
- u32 tlv_userid;
u16 tlv_tag, tlv_len;
u8 *ptr = skb->data;
- memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
-
do {
tlv = (struct hal_tlv_64_hdr *)ptr;
tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
- tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
- tlv_userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID);
- ptr += sizeof(*tlv);
/* The actual length of PPDU_END is the combined length of many PHY
* TLVs that follow. Skip the TLV header and
@@ -1207,16 +2110,24 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon
if (tlv_tag == HAL_RX_PPDU_END)
tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
+ else
+ tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
- hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon,
- tlv_tag, ptr, tlv_userid);
- ptr += tlv_len;
+ hal_status = ath12k_dp_mon_rx_parse_status_tlv(ar, pmon, tlv);
+ ptr += sizeof(*tlv) + tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN);
if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
break;
- } while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
+ } while ((hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE) ||
+ (hal_status == HAL_RX_MON_STATUS_BUF_ADDR) ||
+ (hal_status == HAL_RX_MON_STATUS_MPDU_END) ||
+ (hal_status == HAL_RX_MON_STATUS_MSDU_END));
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+ if (rxcb->is_end_of_ppdu)
+ hal_status = HAL_RX_MON_STATUS_PPDU_DONE;
return hal_status;
}
@@ -1224,18 +2135,16 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon
enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
struct ath12k_mon_data *pmon,
- int mac_id,
struct sk_buff *skb,
struct napi_struct *napi)
{
- struct ath12k_base *ab = ar->ab;
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
struct dp_mon_mpdu *tmp;
struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
struct sk_buff *head_msdu, *tail_msdu;
enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
- ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
+ ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
@@ -1243,7 +2152,7 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
tail_msdu = mon_mpdu->tail;
if (head_msdu && tail_msdu) {
- ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
+ ath12k_dp_mon_rx_deliver(ar, head_msdu,
tail_msdu, ppdu_info, napi);
}
@@ -1924,7 +2833,7 @@ ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag,
}
static void
-ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id,
+ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar,
struct napi_struct *napi,
struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
@@ -1938,7 +2847,7 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id,
tail_msdu = mon_mpdu->tail;
if (head_msdu)
- ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, tail_msdu,
+ ath12k_dp_mon_rx_deliver(ar, head_msdu, tail_msdu,
&tx_ppdu_info->rx_status, napi);
kfree(mon_mpdu);
@@ -1948,7 +2857,6 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id,
enum hal_rx_mon_status
ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
struct ath12k_mon_data *pmon,
- int mac_id,
struct sk_buff *skb,
struct napi_struct *napi,
u32 ppdu_id)
@@ -1995,155 +2903,43 @@ ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
break;
} while (tlv_status != DP_MON_TX_FES_STATUS_END);
- ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_data_ppdu_info);
- ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_prot_ppdu_info);
+ ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_data_ppdu_info);
+ ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_prot_ppdu_info);
return tlv_status;
}
-int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
- enum dp_monitor_mode monitor_mode,
- struct napi_struct *napi)
-{
- struct hal_mon_dest_desc *mon_dst_desc;
- struct ath12k_pdev_dp *pdev_dp = &ar->dp;
- struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data;
- struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
- struct sk_buff *skb;
- struct ath12k_skb_rxcb *rxcb;
- struct dp_srng *mon_dst_ring;
- struct hal_srng *srng;
- struct dp_rxdma_mon_ring *buf_ring;
- u64 cookie;
- u32 ppdu_id;
- int num_buffs_reaped = 0, srng_id, buf_id;
- u8 dest_idx = 0, i;
- bool end_of_ppdu;
- struct hal_rx_mon_ppdu_info *ppdu_info;
- struct ath12k_peer *peer = NULL;
-
- ppdu_info = &pmon->mon_ppdu_info;
- memset(ppdu_info, 0, sizeof(*ppdu_info));
- ppdu_info->peer_id = HAL_INVALID_PEERID;
-
- srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
-
- if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) {
- mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
- buf_ring = &dp->rxdma_mon_buf_ring;
- } else {
- return 0;
- }
-
- srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
-
- spin_lock_bh(&srng->lock);
- ath12k_hal_srng_access_begin(ab, srng);
-
- while (likely(*budget)) {
- *budget -= 1;
- mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
- if (unlikely(!mon_dst_desc))
- break;
-
- cookie = le32_to_cpu(mon_dst_desc->cookie);
- buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
-
- spin_lock_bh(&buf_ring->idr_lock);
- skb = idr_remove(&buf_ring->bufs_idr, buf_id);
- spin_unlock_bh(&buf_ring->idr_lock);
-
- if (unlikely(!skb)) {
- ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
- buf_id);
- goto move_next;
- }
-
- rxcb = ATH12K_SKB_RXCB(skb);
- dma_unmap_single(ab->dev, rxcb->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
-
- pmon->dest_skb_q[dest_idx] = skb;
- dest_idx++;
- ppdu_id = le32_to_cpu(mon_dst_desc->ppdu_id);
- end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
- HAL_MON_DEST_INFO0_END_OF_PPDU);
- if (!end_of_ppdu)
- continue;
-
- for (i = 0; i < dest_idx; i++) {
- skb = pmon->dest_skb_q[i];
-
- if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
- ath12k_dp_mon_rx_parse_mon_status(ar, pmon, mac_id,
- skb, napi);
- else
- ath12k_dp_mon_tx_parse_mon_status(ar, pmon, mac_id,
- skb, napi, ppdu_id);
-
- peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
-
- if (!peer || !peer->sta) {
- ath12k_dbg(ab, ATH12K_DBG_DATA,
- "failed to find the peer with peer_id %d\n",
- ppdu_info->peer_id);
- dev_kfree_skb_any(skb);
- continue;
- }
-
- dev_kfree_skb_any(skb);
- pmon->dest_skb_q[i] = NULL;
- }
-
- dest_idx = 0;
-move_next:
- ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
- ath12k_hal_srng_src_get_next_entry(ab, srng);
- num_buffs_reaped++;
- }
-
- ath12k_hal_srng_access_end(ab, srng);
- spin_unlock_bh(&srng->lock);
-
- return num_buffs_reaped;
-}
-
static void
ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_stats,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct hal_rx_user_status *user_stats,
u32 num_msdu)
{
- u32 rate_idx = 0;
+ struct ath12k_rx_peer_rate_stats *stats;
u32 mcs_idx = (user_stats) ? user_stats->mcs : ppdu_info->mcs;
u32 nss_idx = (user_stats) ? user_stats->nss - 1 : ppdu_info->nss - 1;
u32 bw_idx = ppdu_info->bw;
u32 gi_idx = ppdu_info->gi;
+ u32 len;
- if ((mcs_idx > HAL_RX_MAX_MCS_HE) || (nss_idx >= HAL_RX_MAX_NSS) ||
- (bw_idx >= HAL_RX_BW_MAX) || (gi_idx >= HAL_RX_GI_MAX)) {
+ if (mcs_idx > HAL_RX_MAX_MCS_HT || nss_idx >= HAL_RX_MAX_NSS ||
+ bw_idx >= HAL_RX_BW_MAX || gi_idx >= HAL_RX_GI_MAX) {
return;
}
- if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N ||
- ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC) {
- rate_idx = mcs_idx * 8 + 8 * 10 * nss_idx;
- rate_idx += bw_idx * 2 + gi_idx;
- } else if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX) {
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX ||
+ ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE)
gi_idx = ath12k_he_gi_to_nl80211_he_gi(ppdu_info->gi);
- rate_idx = mcs_idx * 12 + 12 * 12 * nss_idx;
- rate_idx += bw_idx * 3 + gi_idx;
- } else {
- return;
- }
- rx_stats->pkt_stats.rx_rate[rate_idx] += num_msdu;
+ rx_stats->pkt_stats.rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += num_msdu;
+ stats = &rx_stats->byte_stats;
+
if (user_stats)
- rx_stats->byte_stats.rx_rate[rate_idx] += user_stats->mpdu_ok_byte_count;
+ len = user_stats->mpdu_ok_byte_count;
else
- rx_stats->byte_stats.rx_rate[rate_idx] += ppdu_info->mpdu_len;
+ len = ppdu_info->mpdu_len;
+
+ stats->rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += len;
}
static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
@@ -2157,6 +2953,7 @@ static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
return;
arsta->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
@@ -2229,6 +3026,12 @@ static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
rx_stats->byte_stats.he_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
}
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE &&
+ ppdu_info->mcs <= HAL_RX_MAX_MCS_BE) {
+ rx_stats->pkt_stats.be_mcs_count[ppdu_info->mcs] += num_msdu;
+ rx_stats->byte_stats.be_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
+ }
+
if ((ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) &&
ppdu_info->rate < HAL_RX_LEGACY_RATE_INVALID) {
@@ -2329,6 +3132,7 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
return;
arsta->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count +
user_stats->udp_msdu_count + user_stats->other_msdu_count;
@@ -2415,8 +3219,15 @@ ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k *ar,
ath12k_dp_mon_rx_update_user_stats(ar, ppdu_info, i);
}
-int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
- struct napi_struct *napi, int *budget)
+static void
+ath12k_dp_mon_rx_memset_ppdu_info(struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+}
+
+int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget,
+ struct napi_struct *napi)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev_dp *pdev_dp = &ar->dp;
@@ -2432,13 +3243,14 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
struct ath12k_sta *ahsta = NULL;
struct ath12k_link_sta *arsta;
struct ath12k_peer *peer;
+ struct sk_buff_head skb_list;
u64 cookie;
int num_buffs_reaped = 0, srng_id, buf_id;
- u8 dest_idx = 0, i;
- bool end_of_ppdu;
- u32 hal_status;
+ u32 hal_status, end_offset, info0, end_reason;
+ u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, ar->pdev_idx);
- srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
+ __skb_queue_head_init(&skb_list);
+ srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, pdev_idx);
mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
buf_ring = &dp->rxdma_mon_buf_ring;
@@ -2451,6 +3263,15 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
if (unlikely(!mon_dst_desc))
break;
+
+ /* In case of empty descriptor, the cookie in the ring descriptor
+ * is invalid. Therefore, this entry is skipped, and ring processing
+ * continues.
+ */
+ info0 = le32_to_cpu(mon_dst_desc->info0);
+ if (u32_get_bits(info0, HAL_MON_DEST_INFO0_EMPTY_DESC))
+ goto move_next;
+
cookie = le32_to_cpu(mon_dst_desc->cookie);
buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
@@ -2468,63 +3289,102 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
dma_unmap_single(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
- pmon->dest_skb_q[dest_idx] = skb;
- dest_idx++;
- end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
- HAL_MON_DEST_INFO0_END_OF_PPDU);
- if (!end_of_ppdu)
- continue;
- for (i = 0; i < dest_idx; i++) {
- skb = pmon->dest_skb_q[i];
- hal_status = ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
+ end_reason = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_REASON);
- if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
- hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
- dev_kfree_skb_any(skb);
- continue;
- }
-
- rcu_read_lock();
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
- if (!peer || !peer->sta) {
- ath12k_dbg(ab, ATH12K_DBG_DATA,
- "failed to find the peer with peer_id %d\n",
- ppdu_info->peer_id);
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* HAL_MON_FLUSH_DETECTED implies that an rx flush received at the end of
+ * rx PPDU and HAL_MON_PPDU_TRUNCATED implies that the PPDU got
+ * truncated due to a system level error. In both the cases, buffer data
+ * can be discarded
+ */
+ if ((end_reason == HAL_MON_FLUSH_DETECTED) ||
+ (end_reason == HAL_MON_PPDU_TRUNCATED)) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Monitor dest descriptor end reason %d", end_reason);
+ dev_kfree_skb_any(skb);
+ goto move_next;
+ }
- if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
- ahsta = ath12k_sta_to_ahsta(peer->sta);
- arsta = &ahsta->deflink;
- ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
- ppdu_info);
- } else if ((ppdu_info->fc_valid) &&
- (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) {
- ath12k_dp_mon_rx_process_ulofdma(ppdu_info);
- ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info);
- }
+ /* Calculate the budget when the ring descriptor with the
+ * HAL_MON_END_OF_PPDU to ensure that one PPDU worth of data is always
+ * reaped. This helps to efficiently utilize the NAPI budget.
+ */
+ if (end_reason == HAL_MON_END_OF_PPDU) {
+ *budget -= 1;
+ rxcb->is_end_of_ppdu = true;
+ }
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
- dev_kfree_skb_any(skb);
- memset(ppdu_info, 0, sizeof(*ppdu_info));
- ppdu_info->peer_id = HAL_INVALID_PEERID;
+ end_offset = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_OFFSET);
+ if (likely(end_offset <= DP_RX_BUFFER_SIZE)) {
+ skb_put(skb, end_offset);
+ } else {
+ ath12k_warn(ab,
+ "invalid offset on mon stats destination %u\n",
+ end_offset);
+ skb_put(skb, DP_RX_BUFFER_SIZE);
}
- dest_idx = 0;
+ __skb_queue_tail(&skb_list, skb);
+
move_next:
ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
- ath12k_hal_srng_src_get_next_entry(ab, srng);
+ ath12k_hal_srng_dst_get_next_entry(ab, srng);
num_buffs_reaped++;
}
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
+
+ if (!num_buffs_reaped)
+ return 0;
+
+ /* In some cases, one PPDU worth of data can be spread across multiple NAPI
+ * schedules, To avoid losing existing parsed ppdu_info information, skip
+ * the memset of the ppdu_info structure and continue processing it.
+ */
+ if (!ppdu_info->ppdu_continuation)
+ ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
+
+ while ((skb = __skb_dequeue(&skb_list))) {
+ hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+ if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ ppdu_info->ppdu_continuation = true;
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ if (ppdu_info->peer_id == HAL_INVALID_PEERID)
+ goto free_skb;
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "failed to find the peer with monitor peer_id %d\n",
+ ppdu_info->peer_id);
+ goto next_skb;
+ }
+
+ if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
+ ahsta = ath12k_sta_to_ahsta(peer->sta);
+ arsta = &ahsta->deflink;
+ ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
+ ppdu_info);
+ } else if ((ppdu_info->fc_valid) &&
+ (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) {
+ ath12k_dp_mon_rx_process_ulofdma(ppdu_info);
+ ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info);
+ }
+
+next_skb:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+free_skb:
+ dev_kfree_skb_any(skb);
+ ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
+ }
+
return num_buffs_reaped;
}
@@ -2535,11 +3395,10 @@ int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct ath12k *ar = ath12k_ab_to_ar(ab, mac_id);
int num_buffs_reaped = 0;
- if (!ar->monitor_started)
- ath12k_dp_mon_rx_process_stats(ar, mac_id, napi, &budget);
- else
- num_buffs_reaped = ath12k_dp_mon_srng_process(ar, mac_id, &budget,
- monitor_mode, napi);
+ if (ab->hw_params->rxdma1_enable) {
+ if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
+ num_buffs_reaped = ath12k_dp_mon_srng_process(ar, &budget, napi);
+ }
return num_buffs_reaped;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.h b/drivers/net/wireless/ath/ath12k/dp_mon.h
index fb9e9c176ce5..e4368eb42aca 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.h
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_MON_H
@@ -77,14 +77,11 @@ struct dp_mon_tx_ppdu_info {
enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
struct ath12k_mon_data *pmon,
- int mac_id, struct sk_buff *skb,
+ struct sk_buff *skb,
struct napi_struct *napi);
int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *buf_ring,
int req_entries);
-int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id,
- int *budget, enum dp_monitor_mode monitor_mode,
- struct napi_struct *napi);
int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode);
@@ -96,11 +93,9 @@ ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag,
enum hal_rx_mon_status
ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
struct ath12k_mon_data *pmon,
- int mac_id,
struct sk_buff *skb,
struct napi_struct *napi,
u32 ppdu_id);
void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info);
-int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
- struct napi_struct *napi, int *budget);
+int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget, struct napi_struct *napi);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index dad35bfd83f6..ff6a709b5042 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -2392,6 +2392,23 @@ static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
break;
+ case RX_MSDU_START_PKT_TYPE_11BE:
+ rx_status->rate_idx = rate_mcs;
+
+ if (rate_mcs > ATH12K_EHT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in EHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+
+ rx_status->encoding = RX_ENC_EHT;
+ rx_status->nss = nss;
+ rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
+ rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
+ break;
+ default:
+ break;
}
}
@@ -2486,7 +2503,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
spin_unlock_bh(&ab->base_lock);
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
@@ -2497,6 +2514,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
(status->encoding == RX_ENC_HE) ? "he" : "",
+ (status->encoding == RX_ENC_EHT) ? "eht" : "",
(status->bw == RATE_INFO_BW_40) ? "40" : "",
(status->bw == RATE_INFO_BW_80) ? "80" : "",
(status->bw == RATE_INFO_BW_160) ? "160" : "",
@@ -2530,6 +2548,29 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
}
+static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc,
+ struct sk_buff *msdu)
+{
+ struct ieee80211_hdr *hdr;
+ u8 decap_type;
+ u32 hdr_len;
+
+ decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
+ if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI)
+ return true;
+
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
+ return true;
+
+ ab->soc_stats.invalid_rbm++;
+ WARN_ON_ONCE(1);
+ return false;
+}
+
static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
struct sk_buff *msdu,
struct sk_buff_head *msdu_list,
@@ -2588,6 +2629,11 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
}
}
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) {
+ ret = -EINVAL;
+ goto free_out;
+ }
+
ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
@@ -2978,6 +3024,9 @@ mic_fail:
RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
skb_pull(msdu, hal_rx_desc_sz);
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu)))
+ return -EINVAL;
+
ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
@@ -3720,6 +3769,9 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
}
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
+ return -EINVAL;
+
ath12k_dp_rx_h_ppdu(ar, desc, status);
ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
@@ -3764,7 +3816,7 @@ static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
return drop;
}
-static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
+static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
struct ieee80211_rx_status *status)
{
struct ath12k_base *ab = ar->ab;
@@ -3782,6 +3834,9 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
+ return true;
+
ath12k_dp_rx_h_ppdu(ar, desc, status);
status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
@@ -3789,6 +3844,7 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
ath12k_dp_rx_h_undecap(ar, msdu, desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+ return false;
}
static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
@@ -3807,7 +3863,7 @@ static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
- ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+ drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
break;
}
fallthrough;
@@ -4032,7 +4088,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
hw_links[hw_link_id].pdev_idx);
ar = partner_ab->pdevs[pdev_id].ar;
- if (!ar || !rcu_dereference(ar->ab->pdevs_active[hw_link_id])) {
+ if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) {
dev_kfree_skb_any(msdu);
continue;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index 1ce82088c954..88e42365a9d8 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_RX_H
#define ATH12K_DP_RX_H
@@ -79,6 +79,9 @@ static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
case RX_MSDU_START_SGI_3_2_US:
ret = NL80211_RATE_INFO_HE_GI_3_2;
break;
+ default:
+ ret = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
}
return ret;
@@ -135,9 +138,6 @@ u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
struct hal_rx_desc *desc);
void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status);
-struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
-
int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index a8d341a6df01..ced232bf4aed 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "dp_tx.h"
#include "debug.h"
#include "hw.h"
+#include "peer.h"
+#include "mac.h"
static enum hal_tcl_encap_type
ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb)
@@ -117,7 +119,7 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
le32_encode_bits(ti->data_len,
HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
- tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
+ tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
le32_encode_bits(ti->encap_type,
HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
le32_encode_bits(ti->encrypt_type,
@@ -217,7 +219,7 @@ out:
}
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool gsn_valid, int mcbc_gsn)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
@@ -290,13 +292,27 @@ tcl_ring_sel:
msdu_ext_desc = true;
}
+ if (gsn_valid) {
+ /* Reset and Initialize meta_data_flags with Global Sequence
+ * Number (GSN) info.
+ */
+ ti.meta_data_flags =
+ u32_encode_bits(HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM,
+ HTT_TCL_META_DATA_TYPE) |
+ u32_encode_bits(mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM);
+ }
+
ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb);
ti.addr_search_flags = arvif->hal_addr_search_flags;
ti.search_type = arvif->search_type;
ti.type = HAL_TCL_DESC_TYPE_BUFFER;
ti.pkt_offset = 0;
ti.lmac_id = ar->lmac_id;
+
ti.vdev_id = arvif->vdev_id;
+ if (gsn_valid)
+ ti.vdev_id += HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID;
+
ti.bss_ast_hash = arvif->ast_hash;
ti.bss_ast_idx = arvif->ast_idx;
ti.dscp_tid_tbl_idx = 0;
@@ -368,6 +384,7 @@ map:
add_htt_metadata = true;
msdu_ext_desc = true;
ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
+ ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
}
@@ -398,6 +415,7 @@ map:
if (ret < 0) {
ath12k_dbg(ab, ATH12K_DBG_DP_TX,
"Failed to add HTT meta data, dropping packet\n");
+ kfree_skb(skb_ext_desc);
goto fail_unmap_dma;
}
}
@@ -558,13 +576,13 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
switch (wbm_status) {
case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
ts.ack_rssi = le32_get_bits(status_desc->info2,
HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
@@ -580,6 +598,124 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
}
}
+static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status *ts)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
+ struct rate_info txrate = {0};
+ u16 rate, ru_tones;
+ u8 rate_idx = 0;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+ "failed to find the peer by id %u\n", ts->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+ sta = peer->sta;
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = &ahsta->deflink;
+
+ /* This is to prefer choose the real NSS value arsta->last_txrate.nss,
+ * if it is invalid, then choose the NSS value while assoc.
+ */
+ if (arsta->last_txrate.nss)
+ txrate.nss = arsta->last_txrate.nss;
+ else
+ txrate.nss = arsta->peer_nss;
+ spin_unlock_bh(&ab->base_lock);
+
+ switch (ts->pkt_type) {
+ case HAL_TX_RATE_STATS_PKT_TYPE_11A:
+ case HAL_TX_RATE_STATS_PKT_TYPE_11B:
+ ret = ath12k_mac_hw_ratecode_to_legacy_rate(ts->mcs,
+ ts->pkt_type,
+ &rate_idx,
+ &rate);
+ if (ret < 0) {
+ ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret);
+ return;
+ }
+
+ txrate.legacy = rate;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11N:
+ if (ts->mcs > ATH12K_HT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ if (txrate.nss != 0)
+ txrate.mcs = ts->mcs + 8 * (txrate.nss - 1);
+
+ txrate.flags = RATE_INFO_FLAGS_MCS;
+
+ if (ts->sgi)
+ txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11AC:
+ if (ts->mcs > ATH12K_VHT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+
+ if (ts->sgi)
+ txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11AX:
+ if (ts->mcs > ATH12K_HE_MCS_MAX) {
+ ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(ts->sgi);
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11BE:
+ if (ts->mcs > ATH12K_EHT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
+ txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(ts->sgi);
+ break;
+ default:
+ ath12k_warn(ab, "Invalid tx pkt type: %d\n", ts->pkt_type);
+ return;
+ }
+
+ txrate.bw = ath12k_mac_bw_to_mac80211_bw(ts->bw);
+
+ if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ txrate.bw = RATE_INFO_BW_HE_RU;
+ ru_tones = ath12k_mac_he_convert_tones_to_ru_tones(ts->tones);
+ txrate.he_ru_alloc =
+ ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+ }
+
+ if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11BE) {
+ txrate.bw = RATE_INFO_BW_EHT_RU;
+ txrate.eht_ru_alloc =
+ ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(ts->tones);
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ arsta->txrate = txrate;
+ spin_unlock_bh(&ab->base_lock);
+}
+
static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
@@ -658,6 +794,8 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
* Might end up reporting it out-of-band from HTT stats.
*/
+ ath12k_dp_tx_update_txcompl(ar, ts);
+
ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
exit:
@@ -668,6 +806,8 @@ static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
struct hal_wbm_completion_ring_tx *desc,
struct hal_tx_status *ts)
{
+ u32 info0 = le32_to_cpu(desc->rate_stats.info0);
+
ts->buf_rel_source =
le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
@@ -682,10 +822,17 @@ static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
ts->ppdu_id = le32_get_bits(desc->info1,
HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
- if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID)
- ts->rate_stats = le32_to_cpu(desc->rate_stats.info0);
- else
- ts->rate_stats = 0;
+
+ ts->peer_id = le32_get_bits(desc->info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
+
+ if (info0 & HAL_TX_RATE_STATS_INFO0_VALID) {
+ ts->pkt_type = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_PKT_TYPE);
+ ts->mcs = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_MCS);
+ ts->sgi = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_SGI);
+ ts->bw = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_BW);
+ ts->tones = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_TONES_IN_RU);
+ ts->ofdma = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_OFDMA_TX);
+ }
}
void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
@@ -814,7 +961,7 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_BUF:
- *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+ *htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_STATUS:
@@ -822,7 +969,7 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_DST:
- *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+ *htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING;
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_DESC:
@@ -971,7 +1118,14 @@ int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
skb_put(skb, len);
cmd = (struct htt_ver_req_cmd *)skb->data;
cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
- HTT_VER_REQ_INFO_MSG_ID);
+ HTT_OPTION_TAG);
+
+ cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION,
+ HTT_OPTION_TAG) |
+ le32_encode_bits(HTT_TCL_METADATA_VER_SZ,
+ HTT_OPTION_LEN) |
+ le32_encode_bits(HTT_OPTION_TCL_METADATA_VER_V2,
+ HTT_OPTION_VALUE);
ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
if (ret) {
@@ -1077,15 +1231,46 @@ int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
- HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID);
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID);
+ cmd->info0 |=
+ le32_encode_bits(tlv_filter->drop_threshold_valid,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL);
+ cmd->info0 |= le32_encode_bits(!tlv_filter->rxmon_disable,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON);
+
cmd->info1 = le32_encode_bits(rx_buf_size,
HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
+ cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_mgmt,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
+ cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_ctrl,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
+ cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_data,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
+ cmd->info2 = le32_encode_bits(tlv_filter->rx_drop_threshold,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD);
+ cmd->info2 |=
+ le32_encode_bits(tlv_filter->enable_log_mgmt_type,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE);
+ cmd->info2 |=
+ le32_encode_bits(tlv_filter->enable_log_ctrl_type,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE);
+ cmd->info2 |=
+ le32_encode_bits(tlv_filter->enable_log_data_type,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE);
+
+ cmd->info3 =
+ le32_encode_bits(tlv_filter->enable_rx_tlv_offset,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET);
+ cmd->info3 |=
+ le32_encode_bits(tlv_filter->rx_tlv_offset,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET);
+
if (tlv_filter->offset_valid) {
cmd->rx_packet_offset =
le32_encode_bits(tlv_filter->rx_packet_offset,
@@ -1210,15 +1395,28 @@ int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
{
struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
- int ret, ring_id;
+ int ret, ring_id, i;
- ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
tlv_filter.offset_valid = false;
if (!reset) {
- tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
+ tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING;
+
+ tlv_filter.drop_threshold_valid = true;
+ tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE;
+
+ tlv_filter.enable_log_mgmt_type = true;
+ tlv_filter.enable_log_ctrl_type = true;
+ tlv_filter.enable_log_data_type = true;
+
+ tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+ tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+ tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+
+ tlv_filter.enable_rx_tlv_offset = true;
+ tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET;
+
tlv_filter.pkt_filter_flags0 =
HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
@@ -1236,14 +1434,19 @@ int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
}
if (ab->hw_params->rxdma1_enable) {
- ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0,
- HAL_RXDMA_MONITOR_BUF,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
- if (ret) {
- ath12k_err(ab,
- "failed to setup filter for monitor buf %d\n", ret);
- return ret;
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_DST,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for monitor buf %d\n",
+ ret);
+ return ret;
+ }
}
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h
index 46dce23501f3..a5904097dc34 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_TX_H
@@ -17,7 +17,7 @@ struct ath12k_dp_htt_wbm_tx_status {
int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
- struct sk_buff *skb);
+ struct sk_buff *skb, bool gsn_valid, int mcbc_gsn);
void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id);
int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask);
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 7b0403d245e5..3e8983b85de8 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -2968,9 +2968,8 @@ struct hal_mon_buf_ring {
#define HAL_MON_DEST_COOKIE_BUF_ID GENMASK(17, 0)
-#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(15, 0)
-#define HAL_MON_DEST_INFO0_FLUSH_DETECTED BIT(16)
-#define HAL_MON_DEST_INFO0_END_OF_PPDU BIT(17)
+#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(11, 0)
+#define HAL_MON_DEST_INFO0_END_REASON GENMASK(17, 16)
#define HAL_MON_DEST_INFO0_INITIATOR BIT(18)
#define HAL_MON_DEST_INFO0_EMPTY_DESC BIT(19)
#define HAL_MON_DEST_INFO0_RING_ID GENMASK(27, 20)
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
index b08aa2e79f41..6bdcd0867d86 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_RX_H
@@ -22,9 +22,6 @@ struct hal_rx_wbm_rel_info {
#define HAL_INVALID_PEERID 0x3fff
#define VHT_SIG_SU_NSS_MASK 0x7
-#define HAL_RX_MAX_MCS 12
-#define HAL_RX_MAX_NSS 8
-
#define HAL_RX_MPDU_INFO_PN_GET_BYTE1(__val) \
le32_get_bits((__val), GENMASK(7, 0))
@@ -71,6 +68,8 @@ enum hal_rx_preamble {
HAL_RX_PREAMBLE_11N,
HAL_RX_PREAMBLE_11AC,
HAL_RX_PREAMBLE_11AX,
+ HAL_RX_PREAMBLE_11BA,
+ HAL_RX_PREAMBLE_11BE,
HAL_RX_PREAMBLE_MAX,
};
@@ -108,6 +107,9 @@ enum hal_rx_mon_status {
HAL_RX_MON_STATUS_PPDU_NOT_DONE,
HAL_RX_MON_STATUS_PPDU_DONE,
HAL_RX_MON_STATUS_BUF_DONE,
+ HAL_RX_MON_STATUS_BUF_ADDR,
+ HAL_RX_MON_STATUS_MPDU_END,
+ HAL_RX_MON_STATUS_MSDU_END,
};
#define HAL_RX_MAX_MPDU 256
@@ -143,10 +145,43 @@ struct hal_rx_user_status {
u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
u32 mpdu_ok_byte_count;
u32 mpdu_err_byte_count;
+ bool ampdu_present;
+ u16 ampdu_id;
};
#define HAL_MAX_UL_MU_USERS 37
+struct hal_rx_u_sig_info {
+ bool ul_dl;
+ u8 bw;
+ u8 ppdu_type_comp_mode;
+ u8 eht_sig_mcs;
+ u8 num_eht_sig_sym;
+ struct ieee80211_radiotap_eht_usig usig;
+};
+
+#define HAL_RX_MON_MAX_AGGR_SIZE 128
+
+struct hal_rx_tlv_aggr_info {
+ bool in_progress;
+ u16 cur_len;
+ u16 tlv_tag;
+ u8 buf[HAL_RX_MON_MAX_AGGR_SIZE];
+};
+
+struct hal_rx_radiotap_eht {
+ __le32 known;
+ __le32 data[9];
+};
+
+#define EHT_MAX_USER_INFO 4
+
+struct hal_rx_eht_info {
+ u8 num_user_info;
+ struct hal_rx_radiotap_eht eht;
+ u32 user_info[EHT_MAX_USER_INFO];
+};
+
struct hal_rx_mon_ppdu_info {
u32 ppdu_id;
u32 last_ppdu_id;
@@ -227,10 +262,15 @@ struct hal_rx_mon_ppdu_info {
u8 addr4[ETH_ALEN];
struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS];
u8 userid;
- u16 ampdu_id[HAL_MAX_UL_MU_USERS];
bool first_msdu_in_mpdu;
bool is_ampdu;
u8 medium_prot_type;
+ bool ppdu_continuation;
+ bool eht_usig;
+ struct hal_rx_u_sig_info u_sig_info;
+ bool is_eht;
+ struct hal_rx_eht_info eht_info;
+ struct hal_rx_tlv_aggr_info tlv_aggr;
};
#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
@@ -641,6 +681,395 @@ struct hal_rx_resp_req_info {
#define HAL_RX_MPDU_ERR_MPDU_LEN BIT(6)
#define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
+#define HAL_RX_PHY_CMN_USER_INFO0_GI GENMASK(17, 16)
+
+struct hal_phyrx_common_user_info {
+ __le32 rsvd[2];
+ __le32 info0;
+ __le32 rsvd1;
+} __packed;
+
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE GENMASK(3, 0)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_GI_LTF GENMASK(5, 4)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM GENMASK(8, 6)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS GENMASK(10, 7)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED BIT(11)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD GENMASK(13, 12)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC GENMASK(17, 14)
+
+struct hal_eht_sig_ndp_cmn_eb {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE GENMASK(3, 0)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_GI_LTF GENMASK(5, 4)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM GENMASK(8, 6)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM BIT(9)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR GENMASK(11, 10)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY BIT(12)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD GENMASK(16, 13)
+
+struct hal_eht_sig_usig_overflow {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID GENMASK(10, 0)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS GENMASK(14, 11)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_VALIDATE BIT(15)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS GENMASK(19, 16)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED BIT(20)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING BIT(21)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CRC GENMASK(25, 22)
+
+struct hal_eht_sig_non_mu_mimo {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID GENMASK(10, 0)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS GENMASK(14, 11)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING BIT(15)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING GENMASK(22, 16)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CRC GENMASK(26, 23)
+
+struct hal_eht_sig_mu_mimo {
+ __le32 info0;
+} __packed;
+
+union hal_eht_sig_user_field {
+ struct hal_eht_sig_mu_mimo mu_mimo;
+ struct hal_eht_sig_non_mu_mimo n_mu_mimo;
+};
+
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_SPATIAL_REUSE GENMASK(3, 0)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_GI_LTF GENMASK(5, 4)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_LTF_SYM GENMASK(8, 6)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_LDPC_EXTA_SYM BIT(9)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_PRE_FEC_PAD_FACTOR GENMASK(11, 10)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISAMBIGUITY BIT(12)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISREGARD GENMASK(16, 13)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS GENMASK(19, 17)
+
+struct hal_eht_sig_non_ofdma_cmn_eb {
+ __le32 info0;
+ union hal_eht_sig_user_field user_field;
+} __packed;
+
+#define HAL_RX_EHT_SIG_OFDMA_EB1_SPATIAL_REUSE GENMASK_ULL(3, 0)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_GI_LTF GENMASK_ULL(5, 4)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_NUM_LFT_SYM GENMASK_ULL(8, 6)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_LDPC_EXTRA_SYM BIT(9)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_FEC_PAD_FACTOR GENMASK_ULL(11, 10)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_DISAMBIGUITY BIT(12)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_DISREGARD GENMASK_ULL(16, 13)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1 GENMASK_ULL(25, 17)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2 GENMASK_ULL(34, 26)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_CRC GENMASK_ULL(30, 27)
+
+struct hal_eht_sig_ofdma_cmn_eb1 {
+ __le64 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1 GENMASK_ULL(8, 0)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2 GENMASK_ULL(17, 9)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3 GENMASK_ULL(26, 18)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4 GENMASK_ULL(35, 27)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5 GENMASK_ULL(44, 36)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6 GENMASK_ULL(53, 45)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_MCS GNEMASK_ULL(57, 54)
+
+struct hal_eht_sig_ofdma_cmn_eb2 {
+ __le64 info0;
+} __packed;
+
+struct hal_eht_sig_ofdma_cmn_eb {
+ struct hal_eht_sig_ofdma_cmn_eb1 eb1;
+ struct hal_eht_sig_ofdma_cmn_eb2 eb2;
+ union hal_eht_sig_user_field user_field;
+} __packed;
+
+enum hal_eht_bw {
+ HAL_EHT_BW_20,
+ HAL_EHT_BW_40,
+ HAL_EHT_BW_80,
+ HAL_EHT_BW_160,
+ HAL_EHT_BW_320_1,
+ HAL_EHT_BW_320_2,
+};
+
+#define HAL_RX_USIG_CMN_INFO0_PHY_VERSION GENMASK(2, 0)
+#define HAL_RX_USIG_CMN_INFO0_BW GENMASK(5, 3)
+#define HAL_RX_USIG_CMN_INFO0_UL_DL BIT(6)
+#define HAL_RX_USIG_CMN_INFO0_BSS_COLOR GENMASK(12, 7)
+#define HAL_RX_USIG_CMN_INFO0_TXOP GENMASK(19, 13)
+#define HAL_RX_USIG_CMN_INFO0_DISREGARD GENMASK(25, 20)
+#define HAL_RX_USIG_CMN_INFO0_VALIDATE BIT(26)
+
+struct hal_mon_usig_cmn {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE GENMASK(1, 0)
+#define HAL_RX_USIG_TB_INFO0_VALIDATE BIT(2)
+#define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1 GENMASK(6, 3)
+#define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2 GENMASK(10, 7)
+#define HAL_RX_USIG_TB_INFO0_DISREGARD_1 GENMASK(15, 11)
+#define HAL_RX_USIG_TB_INFO0_CRC GENMASK(19, 16)
+#define HAL_RX_USIG_TB_INFO0_TAIL GENMASK(25, 20)
+#define HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS BIT(31)
+
+struct hal_mon_usig_tb {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE GENMASK(1, 0)
+#define HAL_RX_USIG_MU_INFO0_VALIDATE_1 BIT(2)
+#define HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO GENMASK(7, 3)
+#define HAL_RX_USIG_MU_INFO0_VALIDATE_2 BIT(8)
+#define HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS GENMASK(10, 9)
+#define HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM GENMASK(15, 11)
+#define HAL_RX_USIG_MU_INFO0_CRC GENMASK(20, 16)
+#define HAL_RX_USIG_MU_INFO0_TAIL GENMASK(26, 21)
+#define HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS BIT(31)
+
+struct hal_mon_usig_mu {
+ __le32 info0;
+} __packed;
+
+union hal_mon_usig_non_cmn {
+ struct hal_mon_usig_tb tb;
+ struct hal_mon_usig_mu mu;
+};
+
+struct hal_mon_usig_hdr {
+ struct hal_mon_usig_cmn cmn;
+ union hal_mon_usig_non_cmn non_cmn;
+} __packed;
+
+#define HAL_RX_USR_INFO0_PHY_PPDU_ID GENMASK(15, 0)
+#define HAL_RX_USR_INFO0_USR_RSSI GENMASK(23, 16)
+#define HAL_RX_USR_INFO0_PKT_TYPE GENMASK(27, 24)
+#define HAL_RX_USR_INFO0_STBC BIT(28)
+#define HAL_RX_USR_INFO0_RECEPTION_TYPE GENMASK(31, 29)
+
+#define HAL_RX_USR_INFO1_MCS GENMASK(3, 0)
+#define HAL_RX_USR_INFO1_SGI GENMASK(5, 4)
+#define HAL_RX_USR_INFO1_HE_RANGING_NDP BIT(6)
+#define HAL_RX_USR_INFO1_MIMO_SS_BITMAP GENMASK(15, 8)
+#define HAL_RX_USR_INFO1_RX_BW GENMASK(18, 16)
+#define HAL_RX_USR_INFO1_DL_OFMDA_USR_IDX GENMASK(31, 24)
+
+#define HAL_RX_USR_INFO2_DL_OFDMA_CONTENT_CHAN BIT(0)
+#define HAL_RX_USR_INFO2_NSS GENMASK(10, 8)
+#define HAL_RX_USR_INFO2_STREAM_OFFSET GENMASK(13, 11)
+#define HAL_RX_USR_INFO2_STA_DCM BIT(14)
+#define HAL_RX_USR_INFO2_LDPC BIT(15)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_0 GENMASK(19, 16)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_1 GENMASK(23, 20)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_2 GENMASK(27, 24)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_3 GENMASK(31, 28)
+
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_0 GENMASK(5, 0)
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_1 GENMASK(13, 8)
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_2 GENMASK(21, 16)
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_3 GENMASK(29, 24)
+
+struct hal_receive_user_info {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 user_fd_rssi_seg0;
+ __le32 user_fd_rssi_seg1;
+ __le32 user_fd_rssi_seg2;
+ __le32 user_fd_rssi_seg3;
+} __packed;
+
+enum hal_mon_reception_type {
+ HAL_RECEPTION_TYPE_SU,
+ HAL_RECEPTION_TYPE_DL_MU_MIMO,
+ HAL_RECEPTION_TYPE_DL_MU_OFMA,
+ HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
+ HAL_RECEPTION_TYPE_UL_MU_MIMO,
+ HAL_RECEPTION_TYPE_UL_MU_OFDMA,
+ HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
+};
+
+/* Different allowed RU in 11BE */
+#define HAL_EHT_RU_26 0ULL
+#define HAL_EHT_RU_52 1ULL
+#define HAL_EHT_RU_78 2ULL
+#define HAL_EHT_RU_106 3ULL
+#define HAL_EHT_RU_132 4ULL
+#define HAL_EHT_RU_242 5ULL
+#define HAL_EHT_RU_484 6ULL
+#define HAL_EHT_RU_726 7ULL
+#define HAL_EHT_RU_996 8ULL
+#define HAL_EHT_RU_996x2 9ULL
+#define HAL_EHT_RU_996x3 10ULL
+#define HAL_EHT_RU_996x4 11ULL
+#define HAL_EHT_RU_NONE 15ULL
+#define HAL_EHT_RU_INVALID 31ULL
+/* MRUs spanning above 80Mhz
+ * HAL_EHT_RU_996_484 = HAL_EHT_RU_484 + HAL_EHT_RU_996 + 4 (reserved)
+ */
+#define HAL_EHT_RU_996_484 18ULL
+#define HAL_EHT_RU_996x2_484 28ULL
+#define HAL_EHT_RU_996x3_484 40ULL
+#define HAL_EHT_RU_996_484_242 23ULL
+
+#define NUM_RU_BITS_PER80 16
+#define NUM_RU_BITS_PER20 4
+
+/* Different per_80Mhz band in 320Mhz bandwidth */
+#define HAL_80_0 0
+#define HAL_80_1 1
+#define HAL_80_2 2
+#define HAL_80_3 3
+
+#define HAL_RU_80MHZ(num_band) ((num_band) * NUM_RU_BITS_PER80)
+#define HAL_RU_20MHZ(idx_per_80) ((idx_per_80) * NUM_RU_BITS_PER20)
+
+#define HAL_RU_SHIFT(num_band, idx_per_80) \
+ (HAL_RU_80MHZ(num_band) + HAL_RU_20MHZ(idx_per_80))
+
+#define HAL_RU(ru, num_band, idx_per_80) \
+ ((u64)(ru) << HAL_RU_SHIFT(num_band, idx_per_80))
+
+/* MRU-996+484 */
+#define HAL_EHT_RU_996_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \
+ HAL_RU(HAL_EHT_RU_996, HAL_80_1, 0))
+#define HAL_EHT_RU_996_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996, HAL_80_1, 0))
+#define HAL_EHT_RU_996_484_2 (HAL_RU(HAL_EHT_RU_996, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1))
+#define HAL_EHT_RU_996_484_3 (HAL_RU(HAL_EHT_RU_996, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0))
+#define HAL_EHT_RU_996_484_4 (HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \
+ HAL_RU(HAL_EHT_RU_996, HAL_80_3, 0))
+#define HAL_EHT_RU_996_484_5 (HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996, HAL_80_3, 0))
+#define HAL_EHT_RU_996_484_6 (HAL_RU(HAL_EHT_RU_996, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1))
+#define HAL_EHT_RU_996_484_7 (HAL_RU(HAL_EHT_RU_996, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0))
+
+/* MRU-996x2+484 */
+#define HAL_EHT_RU_996x2_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_2 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_3 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_4 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1))
+#define HAL_EHT_RU_996x2_484_5 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_6 (HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
+#define HAL_EHT_RU_996x2_484_7 (HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
+#define HAL_EHT_RU_996x2_484_8 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
+#define HAL_EHT_RU_996x2_484_9 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
+#define HAL_EHT_RU_996x2_484_10 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1))
+#define HAL_EHT_RU_996x2_484_11 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0))
+
+/* MRU-996x3+484 */
+#define HAL_EHT_RU_996x3_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_2 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_3 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_4 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_5 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_6 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1))
+#define HAL_EHT_RU_996x3_484_7 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0))
+
+#define HAL_RU_PER80(ru_per80, num_80mhz, ru_idx_per80mhz) \
+ (HAL_RU(ru_per80, num_80mhz, ru_idx_per80mhz))
+
+#define RU_INVALID 0
+#define RU_26 1
+#define RU_52 2
+#define RU_106 4
+#define RU_242 9
+#define RU_484 18
+#define RU_996 37
+#define RU_2X996 74
+#define RU_3X996 111
+#define RU_4X996 148
+#define RU_52_26 (RU_52 + RU_26)
+#define RU_106_26 (RU_106 + RU_26)
+#define RU_484_242 (RU_484 + RU_242)
+#define RU_996_484 (RU_996 + RU_484)
+#define RU_996_484_242 (RU_996 + RU_484_242)
+#define RU_2X996_484 (RU_2X996 + RU_484)
+#define RU_3X996_484 (RU_3X996 + RU_484)
+
+enum ath12k_eht_ru_size {
+ ATH12K_EHT_RU_26,
+ ATH12K_EHT_RU_52,
+ ATH12K_EHT_RU_106,
+ ATH12K_EHT_RU_242,
+ ATH12K_EHT_RU_484,
+ ATH12K_EHT_RU_996,
+ ATH12K_EHT_RU_996x2,
+ ATH12K_EHT_RU_996x4,
+ ATH12K_EHT_RU_52_26,
+ ATH12K_EHT_RU_106_26,
+ ATH12K_EHT_RU_484_242,
+ ATH12K_EHT_RU_996_484,
+ ATH12K_EHT_RU_996_484_242,
+ ATH12K_EHT_RU_996x2_484,
+ ATH12K_EHT_RU_996x3,
+ ATH12K_EHT_RU_996x3_484,
+
+ /* Keep last */
+ ATH12K_EHT_RU_INVALID,
+};
+
+#define HAL_RX_RU_ALLOC_TYPE_MAX ATH12K_EHT_RU_INVALID
+
static inline
enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
{
@@ -662,6 +1091,9 @@ enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
case RU_996:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
break;
+ case RU_2X996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
case RU_26:
fallthrough;
default:
diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.h b/drivers/net/wireless/ath/ath12k/hal_tx.h
index 3cf5973771d7..eb065a79f6c6 100644
--- a/drivers/net/wireless/ath/ath12k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_tx.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc.
+ * All rights reserved.
*/
#ifndef ATH12K_HAL_TX_H
@@ -63,7 +64,12 @@ struct hal_tx_status {
u8 try_cnt;
u8 tid;
u16 peer_id;
- u32 rate_stats;
+ enum hal_tx_rate_stats_pkt_type pkt_type;
+ enum hal_tx_rate_stats_sgi sgi;
+ enum ath12k_supported_bw bw;
+ u8 mcs;
+ u16 tones;
+ u8 ofdma;
};
#define HAL_TX_PHY_DESC_INFO0_BF_TYPE GENMASK(17, 16)
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index b7b583fadb5a..a106ebed7870 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -543,7 +543,11 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
ATH12K_TX_RING_MASK_3,
},
.rx_mon_dest = {
- 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ ATH12K_RX_MON_RING_MASK_0,
+ ATH12K_RX_MON_RING_MASK_1,
+ ATH12K_RX_MON_RING_MASK_2,
},
.rx = {
0, 0, 0, 0,
@@ -1035,7 +1039,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_qcn9274,
- .rxdma1_enable = false,
+ .rxdma1_enable = true,
.num_rxdma_per_pdev = 1,
.num_rxdma_dst_ring = 0,
.rx_mac_buf_ring = false,
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 2d062b5904a8..dfa05f0ee6c9 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -15,10 +15,12 @@
#include "hw.h"
#include "dp_tx.h"
#include "dp_rx.h"
+#include "testmode.h"
#include "peer.h"
#include "debugfs.h"
#include "hif.h"
#include "wow.h"
+#include "debugfs_sta.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
@@ -337,6 +339,82 @@ static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
return "<unknown>";
}
+u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones)
+{
+ switch (tones) {
+ case 26:
+ return RU_26;
+ case 52:
+ return RU_52;
+ case 106:
+ return RU_106;
+ case 242:
+ return RU_242;
+ case 484:
+ return RU_484;
+ case 996:
+ return RU_996;
+ case (996 * 2):
+ return RU_2X996;
+ default:
+ return RU_26;
+ }
+}
+
+enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi)
+{
+ switch (sgi) {
+ case RX_MSDU_START_SGI_0_8_US:
+ return NL80211_RATE_INFO_EHT_GI_0_8;
+ case RX_MSDU_START_SGI_1_6_US:
+ return NL80211_RATE_INFO_EHT_GI_1_6;
+ case RX_MSDU_START_SGI_3_2_US:
+ return NL80211_RATE_INFO_EHT_GI_3_2;
+ default:
+ return NL80211_RATE_INFO_EHT_GI_0_8;
+ }
+}
+
+enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones)
+{
+ switch (ru_tones) {
+ case 26:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+ case 52:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_52;
+ case (52 + 26):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_52P26;
+ case 106:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_106;
+ case (106 + 26):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_106P26;
+ case 242:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_242;
+ case 484:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_484;
+ case (484 + 242):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_484P242;
+ case 996:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_996;
+ case (996 + 484):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484;
+ case (996 + 484 + 242):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242;
+ case (2 * 996):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996;
+ case (2 * 996 + 484):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484;
+ case (3 * 996):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996;
+ case (3 * 996 + 484):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484;
+ case (4 * 996):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_4x996;
+ default:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+ }
+}
+
enum rate_info_bw
ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw)
{
@@ -502,7 +580,20 @@ static int ath12k_mac_vif_link_chan(struct ieee80211_vif *vif, u8 link_id,
return 0;
}
-static struct ieee80211_bss_conf *
+static struct ath12k_link_vif *ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *tx_ahvif;
+
+ if (arvif->ahvif->vif->mbssid_tx_vif) {
+ tx_ahvif = ath12k_vif_to_ahvif(arvif->ahvif->vif->mbssid_tx_vif);
+ if (tx_ahvif)
+ return &tx_ahvif->deflink;
+ }
+
+ return NULL;
+}
+
+struct ieee80211_bss_conf *
ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif)
{
struct ieee80211_vif *vif = arvif->ahvif->vif;
@@ -675,7 +766,10 @@ struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id)
return NULL;
for (i = 0; i < ab->num_radios; i++) {
- pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (ab->fw_mode == ATH12K_FIRMWARE_MODE_FTM)
+ pdev = &ab->pdevs[i];
+ else
+ pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->pdev_id == pdev_id)
return (pdev->ar ? pdev->ar : NULL);
@@ -725,9 +819,9 @@ static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw,
return ath12k_mac_get_ar_by_chan(hw, ctx->def.chan);
}
-static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- u8 link_id)
+struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 link_id)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
@@ -1549,30 +1643,18 @@ static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif, struct sk_bu
}
}
-static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
+static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif,
+ struct ath12k_link_vif *tx_arvif,
+ u8 bssid_index)
{
- struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_bss_conf *bss_conf;
struct ath12k_wmi_bcn_tmpl_ema_arg ema_args;
struct ieee80211_ema_beacons *beacons;
- struct ath12k_link_vif *tx_arvif;
bool nontx_profile_found = false;
- struct ath12k_vif *tx_ahvif;
int ret = 0;
u8 i;
- bss_conf = ath12k_mac_get_link_bss_conf(arvif);
- if (!bss_conf) {
- ath12k_warn(arvif->ar->ab,
- "failed to get link bss conf to update bcn tmpl for vif %pM link %u\n",
- ahvif->vif->addr, arvif->link_id);
- return -ENOLINK;
- }
-
- tx_ahvif = ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif);
- tx_arvif = &tx_ahvif->deflink;
beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar),
- tx_ahvif->vif,
+ tx_arvif->ahvif->vif,
tx_arvif->link_id);
if (!beacons || !beacons->cnt) {
ath12k_warn(arvif->ar->ab,
@@ -1586,13 +1668,12 @@ static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
for (i = 0; i < beacons->cnt; i++) {
if (tx_arvif != arvif && !nontx_profile_found)
ath12k_mac_set_arvif_ies(arvif, beacons->bcn[i].skb,
- bss_conf->bssid_index,
+ bssid_index,
&nontx_profile_found);
ema_args.bcn_cnt = beacons->cnt;
ema_args.bcn_index = i;
- ret = ath12k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id,
- &beacons->bcn[i].offs,
+ ret = ath12k_wmi_bcn_tmpl(tx_arvif, &beacons->bcn[i].offs,
beacons->bcn[i].skb, &ema_args);
if (ret) {
ath12k_warn(tx_arvif->ar->ab,
@@ -1605,7 +1686,7 @@ static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
if (tx_arvif != arvif && !nontx_profile_found)
ath12k_warn(arvif->ar->ab,
"nontransmitted bssid index %u not found in beacon template\n",
- bss_conf->bssid_index);
+ bssid_index);
ieee80211_beacon_free_ema_list(beacons);
return ret;
@@ -1616,11 +1697,10 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ieee80211_bss_conf *link_conf;
- struct ath12k_link_vif *tx_arvif = arvif;
+ struct ath12k_link_vif *tx_arvif;
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
struct ieee80211_mutable_offsets offs = {};
- struct ath12k_vif *tx_ahvif = ahvif;
bool nontx_profile_found = false;
struct sk_buff *bcn;
int ret;
@@ -1635,17 +1715,20 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
return -ENOLINK;
}
- if (vif->mbssid_tx_vif) {
- tx_ahvif = ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
- tx_arvif = &tx_ahvif->deflink;
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ if (tx_arvif) {
if (tx_arvif != arvif && arvif->is_up)
return 0;
if (link_conf->ema_ap)
- return ath12k_mac_setup_bcn_tmpl_ema(arvif);
+ return ath12k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif,
+ link_conf->bssid_index);
+ } else {
+ tx_arvif = arvif;
}
- bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_ahvif->vif,
+ bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar),
+ tx_arvif->ahvif->vif,
&offs, tx_arvif->link_id);
if (!bcn) {
ath12k_warn(ab, "failed to get beacon template from mac80211\n");
@@ -1686,7 +1769,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
}
}
- ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, NULL);
+ ret = ath12k_wmi_bcn_tmpl(arvif, &offs, bcn, NULL);
if (ret)
ath12k_warn(ab, "failed to submit beacon template command: %d\n",
@@ -1702,6 +1785,7 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
{
struct ath12k_wmi_vdev_up_params params = {};
struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k_link_vif *tx_arvif;
struct ath12k *ar = arvif->ar;
int ret;
@@ -1732,11 +1816,9 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
params.vdev_id = arvif->vdev_id;
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
- if (ahvif->vif->mbssid_tx_vif) {
- struct ath12k_vif *tx_ahvif =
- ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif);
- struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink;
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ if (tx_arvif) {
params.tx_bssid = tx_arvif->bssid;
params.nontx_profile_idx = info->bssid_index;
params.nontx_profile_cnt = 1 << info->bssid_indicator;
@@ -3116,6 +3198,7 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar,
ath12k_peer_assoc_h_smps(arsta, arg);
ath12k_peer_assoc_h_mlo(arsta, arg);
+ arsta->peer_nss = arg->peer_nss;
/* TODO: amsdu_disable req? */
}
@@ -3138,6 +3221,37 @@ static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_link_vif *arv
ath12k_smps_map[smps]);
}
+static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
+ struct ieee80211_link_sta *link_sta)
+{
+ u32 bw;
+
+ switch (link_sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ bw = WMI_PEER_CHWIDTH_160MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_320:
+ bw = WMI_PEER_CHWIDTH_320MHZ;
+ break;
+ default:
+ ath12k_warn(ar->ab, "Invalid bandwidth %d for link station %pM\n",
+ link_sta->bandwidth, link_sta->addr);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ return bw;
+}
+
static void ath12k_bss_assoc(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ieee80211_bss_conf *bss_conf)
@@ -3358,12 +3472,178 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
}
+static void ath12k_mac_init_arvif(struct ath12k_vif *ahvif,
+ struct ath12k_link_vif *arvif, int link_id)
+{
+ struct ath12k_hw *ah = ahvif->ah;
+ u8 _link_id;
+ int i;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (WARN_ON(!arvif))
+ return;
+
+ if (WARN_ON(link_id >= ATH12K_NUM_MAX_LINKS))
+ return;
+
+ if (link_id < 0)
+ _link_id = 0;
+ else
+ _link_id = link_id;
+
+ arvif->ahvif = ahvif;
+ arvif->link_id = _link_id;
+
+ INIT_LIST_HEAD(&arvif->list);
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath12k_mac_vif_sta_connection_loss_work);
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
+
+ /* Handle MLO related assignments */
+ if (link_id >= 0) {
+ rcu_assign_pointer(ahvif->link[arvif->link_id], arvif);
+ ahvif->links_map |= BIT(_link_id);
+ }
+
+ ath12k_generic_dbg(ATH12K_DBG_MAC,
+ "mac init link arvif (link_id %d%s) for vif %pM. links_map 0x%x",
+ _link_id, (link_id < 0) ? " deflink" : "", ahvif->vif->addr,
+ ahvif->links_map);
+}
+
+static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw,
+ struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)",
+ arvif->vdev_id, arvif->link_id);
+
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d",
+ arvif->vdev_id, arvif->link_id, ret);
+ }
+ ath12k_mac_vdev_delete(ar, arvif);
+}
+
+static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
+ struct ieee80211_vif *vif,
+ u8 link_id)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ if (arvif)
+ return arvif;
+
+ if (!vif->valid_links) {
+ /* Use deflink for Non-ML VIFs and mark the link id as 0
+ */
+ link_id = 0;
+ arvif = &ahvif->deflink;
+ } else {
+ /* If this is the first link arvif being created for an ML VIF
+ * use the preallocated deflink memory except for scan arvifs
+ */
+ if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
+ arvif = &ahvif->deflink;
+ } else {
+ arvif = (struct ath12k_link_vif *)
+ kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL);
+ if (!arvif)
+ return NULL;
+ }
+ }
+
+ ath12k_mac_init_arvif(ahvif, arvif, link_id);
+
+ return arvif;
+}
+
+static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k_hw *ah = ahvif->ah;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ rcu_assign_pointer(ahvif->link[arvif->link_id], NULL);
+ synchronize_rcu();
+ ahvif->links_map &= ~BIT(arvif->link_id);
+
+ if (arvif != &ahvif->deflink)
+ kfree(arvif);
+ else
+ memset(arvif, 0, sizeof(*arvif));
+}
+
static int
ath12k_mac_op_change_vif_links(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 old_links, u16 new_links,
struct ieee80211_bss_conf *ol[IEEE80211_MLD_MAX_NUM_LINKS])
{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ unsigned long to_remove = old_links & ~new_links;
+ unsigned long to_add = ~old_links & new_links;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_vif *arvif;
+ u8 link_id;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ath12k_generic_dbg(ATH12K_DBG_MAC,
+ "mac vif link changed for MLD %pM old_links 0x%x new_links 0x%x\n",
+ vif->addr, old_links, new_links);
+
+ for_each_set_bit(link_id, &to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ /* mac80211 wants to add link but driver already has the
+ * link. This should not happen ideally.
+ */
+ if (WARN_ON(arvif))
+ return -EINVAL;
+
+ arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+ if (WARN_ON(!arvif))
+ return -EINVAL;
+ }
+
+ for_each_set_bit(link_id, &to_remove, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (WARN_ON(!arvif))
+ return -EINVAL;
+
+ if (!arvif->is_created)
+ continue;
+
+ if (WARN_ON(!arvif->ar))
+ return -EINVAL;
+
+ ath12k_mac_remove_link_interface(hw, arvif);
+ ath12k_mac_unassign_link_vif(arvif);
+ }
+
return 0;
}
@@ -3862,109 +4142,6 @@ static void ath12k_mac_op_link_info_changed(struct ieee80211_hw *hw,
ath12k_mac_bss_info_changed(ar, arvif, info, changed);
}
-static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
- struct ieee80211_vif *vif,
- u8 link_id)
-{
- struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_link_vif *arvif;
- int i;
-
- lockdep_assert_wiphy(ah->hw->wiphy);
-
- arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
- if (arvif)
- return arvif;
-
- if (!vif->valid_links) {
- /* Use deflink for Non-ML VIFs and mark the link id as 0
- */
- link_id = 0;
- arvif = &ahvif->deflink;
- } else {
- /* If this is the first link arvif being created for an ML VIF
- * use the preallocated deflink memory except for scan arvifs
- */
- if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
- arvif = &ahvif->deflink;
- } else {
- arvif = (struct ath12k_link_vif *)
- kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL);
- if (!arvif)
- return NULL;
- }
- }
-
- arvif->ahvif = ahvif;
- arvif->link_id = link_id;
- ahvif->links_map |= BIT(link_id);
-
- INIT_LIST_HEAD(&arvif->list);
- INIT_DELAYED_WORK(&arvif->connection_loss_work,
- ath12k_mac_vif_sta_connection_loss_work);
-
- for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
- arvif->bitrate_mask.control[i].legacy = 0xffffffff;
- memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].ht_mcs));
- memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].vht_mcs));
- }
-
- /* Allocate Default Queue now and reassign during actual vdev create */
- vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE;
- for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
- vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
-
- vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
-
- rcu_assign_pointer(ahvif->link[arvif->link_id], arvif);
- ahvif->links_map |= BIT(link_id);
- synchronize_rcu();
- return arvif;
-}
-
-static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif)
-{
- struct ath12k_vif *ahvif = arvif->ahvif;
- struct ath12k_hw *ah = ahvif->ah;
-
- lockdep_assert_wiphy(ah->hw->wiphy);
-
- rcu_assign_pointer(ahvif->link[arvif->link_id], NULL);
- synchronize_rcu();
- ahvif->links_map &= ~BIT(arvif->link_id);
-
- if (arvif != &ahvif->deflink)
- kfree(arvif);
- else
- memset(arvif, 0, sizeof(*arvif));
-}
-
-static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw,
- struct ath12k_link_vif *arvif)
-{
- struct ath12k_vif *ahvif = arvif->ahvif;
- struct ath12k_hw *ah = hw->priv;
- struct ath12k *ar = arvif->ar;
- int ret;
-
- lockdep_assert_wiphy(ah->hw->wiphy);
-
- cancel_delayed_work_sync(&arvif->connection_loss_work);
-
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)",
- arvif->vdev_id, arvif->link_id);
-
- if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
- ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
- if (ret)
- ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d",
- arvif->vdev_id, arvif->link_id, ret);
- }
- ath12k_mac_vdev_delete(ar, arvif);
-}
-
static struct ath12k*
ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -4534,9 +4711,6 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
struct ath12k_link_sta *arsta,
struct ieee80211_key_conf *key)
{
- struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
- struct ieee80211_bss_conf *link_conf;
struct ieee80211_sta *sta = NULL;
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
@@ -4553,19 +4727,10 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
return 1;
- link_conf = ath12k_mac_get_link_bss_conf(arvif);
- if (!link_conf) {
- ath12k_warn(ab, "unable to access bss link conf in set key for vif %pM link %u\n",
- vif->addr, arvif->link_id);
- return -ENOLINK;
- }
-
if (sta)
peer_addr = arsta->addr;
- else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
- peer_addr = link_conf->bssid;
else
- peer_addr = link_conf->addr;
+ peer_addr = arvif->bssid;
key->hw_key_idx = key->keyidx;
@@ -4909,6 +5074,11 @@ static int ath12k_mac_station_assoc(struct ath12k *ar,
return -EINVAL;
}
+ spin_lock_bh(&ar->data_lock);
+ arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, link_sta);
+ arsta->bw_prev = link_sta->bandwidth;
+ spin_unlock_bh(&ar->data_lock);
+
if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
band);
@@ -5403,6 +5573,7 @@ static int ath12k_mac_station_add(struct ath12k *ar,
}
}
+ ewma_avg_rssi_init(&arsta->avg_rssi);
return 0;
free_peer:
@@ -5415,37 +5586,6 @@ exit:
return ret;
}
-static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
- struct ieee80211_sta *sta)
-{
- u32 bw = WMI_PEER_CHWIDTH_20MHZ;
-
- switch (sta->deflink.bandwidth) {
- case IEEE80211_STA_RX_BW_20:
- bw = WMI_PEER_CHWIDTH_20MHZ;
- break;
- case IEEE80211_STA_RX_BW_40:
- bw = WMI_PEER_CHWIDTH_40MHZ;
- break;
- case IEEE80211_STA_RX_BW_80:
- bw = WMI_PEER_CHWIDTH_80MHZ;
- break;
- case IEEE80211_STA_RX_BW_160:
- bw = WMI_PEER_CHWIDTH_160MHZ;
- break;
- case IEEE80211_STA_RX_BW_320:
- bw = WMI_PEER_CHWIDTH_320MHZ;
- break;
- default:
- ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
- sta->deflink.bandwidth, sta->addr);
- bw = WMI_PEER_CHWIDTH_20MHZ;
- break;
- }
-
- return bw;
-}
-
static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah,
struct ath12k_sta *ahsta,
struct ath12k_link_sta *arsta,
@@ -5529,7 +5669,6 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state new_state)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
- struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ath12k *ar = arvif->ar;
int ret = 0;
@@ -5572,13 +5711,6 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
arsta->addr);
- spin_lock_bh(&ar->data_lock);
-
- arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
- arsta->bw_prev = sta->deflink.bandwidth;
-
- spin_unlock_bh(&ar->data_lock);
-
/* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as
* authorized
*/
@@ -5846,7 +5978,7 @@ static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
spin_lock_bh(&ar->data_lock);
if (changed & IEEE80211_RC_BW_CHANGED) {
- bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
+ bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, link_sta);
arsta->bw_prev = arsta->bw;
arsta->bw = bw;
}
@@ -6674,7 +6806,8 @@ static void ath12k_mac_copy_eht_cap(struct ath12k *ar,
memset(eht_cap, 0, sizeof(struct ieee80211_sta_eht_cap));
- if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map)))
+ if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map)) ||
+ ath12k_acpi_get_disable_11be(ar->ab))
return;
eht_cap->has_eht = true;
@@ -7071,6 +7204,22 @@ static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
}
/* Note: called under rcu_read_lock() */
+static void ath12k_mlo_mcast_update_tx_link_address(struct ieee80211_vif *vif,
+ u8 link_id, struct sk_buff *skb,
+ u32 info_flags)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_bss_conf *bss_conf;
+
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ return;
+
+ bss_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (bss_conf)
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+}
+
+/* Note: called under rcu_read_lock() */
static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
u8 link, struct sk_buff *skb, u32 info_flags)
{
@@ -7181,9 +7330,16 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_sta *sta = control->sta;
+ struct ath12k_link_vif *tmp_arvif;
u32 info_flags = info->flags;
- struct ath12k *ar;
+ struct sk_buff *msdu_copied;
+ struct ath12k *ar, *tmp_ar;
+ struct ath12k_peer *peer;
+ unsigned long links_map;
+ bool is_mcast = false;
+ struct ethhdr *eth;
bool is_prb_rsp;
+ u16 mcbc_gsn;
u8 link_id;
int ret;
@@ -7220,6 +7376,9 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+ eth = (struct ethhdr *)skb->data;
+ is_mcast = is_multicast_ether_addr(eth->h_dest);
+
skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
@@ -7231,14 +7390,99 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
return;
}
+ if (!(info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
+ is_mcast = is_multicast_ether_addr(hdr->addr1);
+
/* This is case only for P2P_GO */
if (vif->type == NL80211_IFTYPE_AP && vif->p2p)
ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
- ret = ath12k_dp_tx(ar, arvif, skb);
- if (ret) {
- ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
- ieee80211_free_txskb(hw, skb);
+ if (!vif->valid_links || !is_mcast ||
+ test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) {
+ ret = ath12k_dp_tx(ar, arvif, skb, false, 0);
+ if (unlikely(ret)) {
+ ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
+ ieee80211_free_txskb(ar->ah->hw, skb);
+ return;
+ }
+ } else {
+ mcbc_gsn = atomic_inc_return(&ahvif->mcbc_gsn) & 0xfff;
+
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ tmp_arvif = rcu_dereference(ahvif->link[link_id]);
+ if (!tmp_arvif || !tmp_arvif->is_up)
+ continue;
+
+ tmp_ar = tmp_arvif->ar;
+ msdu_copied = skb_copy(skb, GFP_ATOMIC);
+ if (!msdu_copied) {
+ ath12k_err(ar->ab,
+ "skb copy failure link_id 0x%X vdevid 0x%X\n",
+ link_id, tmp_arvif->vdev_id);
+ continue;
+ }
+
+ ath12k_mlo_mcast_update_tx_link_address(vif, link_id,
+ msdu_copied,
+ info_flags);
+
+ skb_cb = ATH12K_SKB_CB(msdu_copied);
+ info = IEEE80211_SKB_CB(msdu_copied);
+ skb_cb->link_id = link_id;
+
+ /* For open mode, skip peer find logic */
+ if (unlikely(ahvif->key_cipher == WMI_CIPHER_NONE))
+ goto skip_peer_find;
+
+ spin_lock_bh(&tmp_ar->ab->base_lock);
+ peer = ath12k_peer_find_by_addr(tmp_ar->ab, tmp_arvif->bssid);
+ if (!peer) {
+ spin_unlock_bh(&tmp_ar->ab->base_lock);
+ ath12k_warn(tmp_ar->ab,
+ "failed to find peer for vdev_id 0x%X addr %pM link_map 0x%X\n",
+ tmp_arvif->vdev_id, tmp_arvif->bssid,
+ ahvif->links_map);
+ dev_kfree_skb_any(msdu_copied);
+ continue;
+ }
+
+ key = peer->keys[peer->mcast_keyidx];
+ if (key) {
+ skb_cb->cipher = key->cipher;
+ skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
+ info->control.hw_key = key;
+
+ hdr = (struct ieee80211_hdr *)msdu_copied->data;
+ if (!ieee80211_has_protected(hdr->frame_control))
+ hdr->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
+ spin_unlock_bh(&tmp_ar->ab->base_lock);
+
+skip_peer_find:
+ ret = ath12k_dp_tx(tmp_ar, tmp_arvif,
+ msdu_copied, true, mcbc_gsn);
+ if (unlikely(ret)) {
+ if (ret == -ENOMEM) {
+ /* Drops are expected during heavy multicast
+ * frame flood. Print with debug log
+ * level to avoid lot of console prints
+ */
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "failed to transmit frame %d\n",
+ ret);
+ } else {
+ ath12k_warn(ar->ab,
+ "failed to transmit frame %d\n",
+ ret);
+ }
+
+ dev_kfree_skb_any(msdu_copied);
+ }
+ }
+ ieee80211_free_txskb(ar->ah->hw, skb);
}
}
@@ -7255,8 +7499,40 @@ void ath12k_mac_drain_tx(struct ath12k *ar)
static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
{
- return -EOPNOTSUPP;
- /* TODO: Need to support new monitor mode */
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
+ struct ath12k_base *ab = ar->ab;
+ u32 ring_id, i;
+ int ret = 0;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (!ab->hw_params->rxdma1_enable)
+ return ret;
+
+ if (enable) {
+ tlv_filter = ath12k_mac_mon_status_filter_default;
+
+ if (ath12k_debugfs_rx_filter(ar))
+ tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
+ } else {
+ tlv_filter.rxmon_disable = true;
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_DST,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for monitor buf %d\n",
+ ret);
+ }
+ }
+
+ return ret;
}
static int ath12k_mac_start(struct ath12k *ar)
@@ -7363,9 +7639,14 @@ err:
static void ath12k_drain_tx(struct ath12k_hw *ah)
{
- struct ath12k *ar;
+ struct ath12k *ar = ah->radio;
int i;
+ if (ath12k_ftm_mode) {
+ ath12k_err(ar->ab, "fail to start mac operations in ftm mode\n");
+ return;
+ }
+
lockdep_assert_wiphy(ah->hw->wiphy);
for_each_ar(ah, ar, i)
@@ -7394,6 +7675,7 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
case ATH12K_HW_STATE_RESTARTED:
case ATH12K_HW_STATE_WEDGED:
case ATH12K_HW_STATE_ON:
+ case ATH12K_HW_STATE_TM:
ah->state = ATH12K_HW_STATE_OFF;
WARN_ON(1);
@@ -7561,14 +7843,9 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
u32 *flags, u32 *tx_vdev_id)
{
struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_vif *tx_vif = ahvif->vif->mbssid_tx_vif;
struct ieee80211_bss_conf *link_conf;
struct ath12k *ar = arvif->ar;
struct ath12k_link_vif *tx_arvif;
- struct ath12k_vif *tx_ahvif;
-
- if (!tx_vif)
- return 0;
link_conf = ath12k_mac_get_link_bss_conf(arvif);
if (!link_conf) {
@@ -7577,11 +7854,13 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
return -ENOLINK;
}
- tx_ahvif = ath12k_vif_to_ahvif(tx_vif);
- tx_arvif = &tx_ahvif->deflink;
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ if (!tx_arvif)
+ return 0;
if (link_conf->nontransmitted) {
- if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
+ if (ath12k_ar_to_hw(ar)->wiphy !=
+ ath12k_ar_to_hw(tx_arvif->ar)->wiphy)
return -EINVAL;
*flags = WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP;
@@ -8066,6 +8345,7 @@ static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ath12k_link_vif
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ath12k_vif_cache *cache = ahvif->cache[arvif->link_id];
struct ath12k_base *ab = ar->ab;
+ struct ieee80211_bss_conf *link_conf;
int ret;
@@ -8084,7 +8364,13 @@ static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ath12k_link_vif
}
if (cache->bss_conf_changed) {
- ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf,
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in cache flush for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+ ath12k_mac_bss_info_changed(ar, arvif, link_conf,
cache->bss_conf_changed);
}
@@ -8207,19 +8493,8 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
ahvif->ah = ah;
ahvif->vif = vif;
arvif = &ahvif->deflink;
- arvif->ahvif = ahvif;
- INIT_LIST_HEAD(&arvif->list);
- INIT_DELAYED_WORK(&arvif->connection_loss_work,
- ath12k_mac_vif_sta_connection_loss_work);
-
- for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
- arvif->bitrate_mask.control[i].legacy = 0xffffffff;
- memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].ht_mcs));
- memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].vht_mcs));
- }
+ ath12k_mac_init_arvif(ahvif, arvif, -1);
/* Allocate Default Queue now and reassign during actual vdev create */
vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE;
@@ -8381,29 +8656,6 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
FIF_PROBE_REQ | \
FIF_FCSFAIL)
-static void ath12k_mac_configure_filter(struct ath12k *ar,
- unsigned int total_flags)
-{
- bool reset_flag;
- int ret;
-
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
-
- ar->filter_flags = total_flags;
-
- /* For monitor mode */
- reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
-
- ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
- if (ret)
- ath12k_warn(ar->ab,
- "fail to set monitor filter: %d\n", ret);
-
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
- "total_flags:0x%x, reset_flag:%d\n",
- total_flags, reset_flag);
-}
-
static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
@@ -8417,7 +8669,7 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
ar = ath12k_ah_to_ar(ah, 0);
*total_flags &= SUPPORTED_FILTERS;
- ath12k_mac_configure_filter(ar, *total_flags);
+ ar->filter_flags = *total_flags;
}
static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
@@ -8677,6 +8929,9 @@ ath12k_mac_mlo_get_vdev_args(struct ath12k_link_vif *arvif,
if (arvif == arvif_p)
continue;
+ if (!arvif_p->is_created)
+ continue;
+
link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
ahvif->vif->link_conf[arvif_p->link_id]);
@@ -8982,9 +9237,9 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
int n_vifs)
{
struct ath12k_wmi_vdev_up_params params = {};
+ struct ath12k_link_vif *arvif, *tx_arvif;
struct ieee80211_bss_conf *link_conf;
struct ath12k_base *ab = ar->ab;
- struct ath12k_link_vif *arvif;
struct ieee80211_vif *vif;
struct ath12k_vif *ahvif;
u8 link_id;
@@ -9052,11 +9307,9 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
params.vdev_id = arvif->vdev_id;
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
- if (vif->mbssid_tx_vif) {
- struct ath12k_vif *tx_ahvif =
- ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
- struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink;
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ if (tx_arvif) {
params.tx_bssid = tx_arvif->bssid;
params.nontx_profile_idx = link_conf->bssid_index;
params.nontx_profile_cnt = 1 << link_conf->bssid_indicator;
@@ -9322,9 +9575,6 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
ath12k_mac_monitor_stop(ar);
-
- ath12k_mac_remove_link_interface(hw, arvif);
- ath12k_mac_unassign_link_vif(arvif);
}
static int
@@ -10017,6 +10267,40 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
+static int ath12k_mac_get_fw_stats(struct ath12k *ar, u32 pdev_id,
+ u32 vdev_id, u32 stats_id)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ unsigned long time_left;
+ int ret;
+
+ guard(mutex)(&ah->hw_mutex);
+
+ if (ah->state != ATH12K_HW_STATE_ON)
+ return -ENETDOWN;
+
+ reinit_completion(&ar->fw_stats_complete);
+
+ ret = ath12k_wmi_send_stats_request_cmd(ar, stats_id, vdev_id, pdev_id);
+
+ if (ret) {
+ ath12k_warn(ab, "failed to request fw stats: %d\n", ret);
+ return ret;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "get fw stat pdev id %d vdev id %d stats id 0x%x\n",
+ pdev_id, vdev_id, stats_id);
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+
+ if (!time_left)
+ ath12k_warn(ab, "time out while waiting for get fw stats\n");
+
+ return ret;
+}
+
static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -10024,10 +10308,19 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
{
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k_link_sta *arsta;
+ struct ath12k *ar;
+ s8 signal;
+ bool db2dbm;
lockdep_assert_wiphy(hw->wiphy);
arsta = &ahsta->deflink;
+ ar = ath12k_get_ar_by_vif(hw, vif, arsta->link_id);
+ if (!ar)
+ return;
+
+ db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ar->ab->wmi_ab.svc_map);
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
@@ -10035,25 +10328,43 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->tx_duration = arsta->tx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
- if (!arsta->txrate.legacy && !arsta->txrate.nss)
- return;
-
- if (arsta->txrate.legacy) {
- sinfo->txrate.legacy = arsta->txrate.legacy;
- } else {
- sinfo->txrate.mcs = arsta->txrate.mcs;
- sinfo->txrate.nss = arsta->txrate.nss;
- sinfo->txrate.bw = arsta->txrate.bw;
- sinfo->txrate.he_gi = arsta->txrate.he_gi;
- sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
- sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ if (arsta->txrate.legacy || arsta->txrate.nss) {
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ sinfo->txrate.he_gi = arsta->txrate.he_gi;
+ sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
+ sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ sinfo->txrate.eht_gi = arsta->txrate.eht_gi;
+ sinfo->txrate.eht_ru_alloc = arsta->txrate.eht_ru_alloc;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
- sinfo->txrate.flags = arsta->txrate.flags;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
/* TODO: Use real NF instead of default one. */
- sinfo->signal = arsta->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ signal = arsta->rssi_comb;
+
+ if (!signal &&
+ ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ !(ath12k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_VDEV_STAT)))
+ signal = arsta->rssi_beacon;
+
+ if (signal) {
+ sinfo->signal = db2dbm ? signal : signal + ATH12K_DEFAULT_NOISE_FLOOR;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ }
+
+ sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi);
+
+ if (!db2dbm)
+ sinfo->signal_avg += ATH12K_DEFAULT_NOISE_FLOOR;
+
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
@@ -10299,6 +10610,10 @@ static const struct ieee80211_ops ath12k_ops = {
.resume = ath12k_wow_op_resume,
.set_wakeup = ath12k_wow_op_set_wakeup,
#endif
+ CFG80211_TESTMODE_CMD(ath12k_tm_cmd)
+#ifdef CONFIG_ATH12K_DEBUGFS
+ .link_sta_add_debugfs = ath12k_debugfs_link_sta_op_add,
+#endif
};
static void ath12k_mac_update_ch_list(struct ath12k *ar,
@@ -10950,6 +11265,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ath12k_iftypes_ext_capa[2].eml_capabilities = cap->eml_cap;
ath12k_iftypes_ext_capa[2].mld_capa_and_ops = cap->mld_cap;
wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+
+ ieee80211_hw_set(hw, MLO_MCAST_MULTI_LINK_TX);
}
hw->queues = ATH12K_HW_MAX_QUEUES;
@@ -11033,6 +11350,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ath12k_debugfs_register(ar);
}
+ init_completion(&ar->fw_stats_complete);
+
return 0;
err_unregister_hw:
@@ -11133,6 +11452,9 @@ static int __ath12k_mac_mlo_setup(struct ath12k *ar)
}
}
+ if (num_link == 0)
+ return 0;
+
mlo.group_id = cpu_to_le32(ag->id);
mlo.partner_link_id = partner_link_id;
mlo.num_partner_links = num_link;
@@ -11162,10 +11484,16 @@ static int __ath12k_mac_mlo_teardown(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
int ret;
+ u8 num_link;
if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
return 0;
+ num_link = ath12k_get_num_partner_link(ar);
+
+ if (num_link == 0)
+ return 0;
+
ret = ath12k_wmi_mlo_teardown(ar);
if (ret) {
ath12k_warn(ab, "failed to send MLO teardown WMI command for pdev %d: %d\n",
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 3594729b6397..ae35b73312bf 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_MAC_H
@@ -108,5 +108,11 @@ int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif);
void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf,
void *data);
-
+u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones);
+enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones);
+enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi);
+struct ieee80211_bss_conf *ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif);
+struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 link_id);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 06cff3849ab8..b474696ac6d8 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -483,8 +483,11 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
ath12k_pci_ext_grp_disable(irq_grp);
- napi_synchronize(&irq_grp->napi);
- napi_disable(&irq_grp->napi);
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
}
}
@@ -646,7 +649,7 @@ static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci,
if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
return 0;
- return irq_set_affinity_hint(ab_pci->pdev->irq, m);
+ return irq_set_affinity_and_hint(ab_pci->pdev->irq, m);
}
static int ath12k_pci_config_irq(struct ath12k_base *ab)
@@ -1114,7 +1117,11 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
- napi_enable(&irq_grp->napi);
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
+
ath12k_pci_ext_grp_enable(irq_grp);
}
@@ -1561,6 +1568,7 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
ab_pci->ab = ab;
ab_pci->pdev = pdev;
ab->hif.ops = &ath12k_pci_hif_ops;
+ ab->fw_mode = ATH12K_FIRMWARE_MODE_NORMAL;
pci_set_drvdata(pdev, ab);
spin_lock_init(&ab_pci->window_lock);
@@ -1689,6 +1697,8 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
return 0;
err_free_irq:
+ /* __free_irq() expects the caller to have cleared the affinity hint */
+ ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
ath12k_pci_free_irq(ab);
err_ce_free:
@@ -1734,9 +1744,9 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
cancel_work_sync(&ab->reset_work);
cancel_work_sync(&ab->dump_work);
ath12k_core_deinit(ab);
- ath12k_fw_unmap(ab);
qmi_fail:
+ ath12k_fw_unmap(ab);
ath12k_mhi_unregister(ab_pci);
ath12k_pci_free_irq(ab);
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 5c3563383fab..348dbc81bad8 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@@ -2056,8 +2056,7 @@ static int ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
}
if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
- ab->single_chip_mlo_supp = false;
-
+ ag->mlo_capable = false;
ath12k_dbg(ab, ATH12K_DBG_QMI,
"skip QMI MLO cap due to invalid num_radio %d\n",
ab->qmi.num_radios);
@@ -2265,10 +2264,6 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
goto out;
}
- if (resp.single_chip_mlo_support_valid &&
- resp.single_chip_mlo_support)
- ab->single_chip_mlo_supp = true;
-
if (!resp.num_phy_valid) {
ret = -ENODATA;
goto out;
@@ -2277,10 +2272,9 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
ab->qmi.num_radios = resp.num_phy;
ath12k_dbg(ab, ATH12K_DBG_QMI,
- "phy capability resp valid %d num_phy %d valid %d board_id %d valid %d single_chip_mlo_support %d\n",
+ "phy capability resp valid %d num_phy %d valid %d board_id %d\n",
resp.num_phy_valid, resp.num_phy,
- resp.board_id_valid, resp.board_id,
- resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support);
+ resp.board_id_valid, resp.board_id);
return;
@@ -2740,6 +2734,15 @@ int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
if (r)
ath12k_dbg(ab, ATH12K_DBG_QMI, "SMBIOS bdf variant name not set.\n");
+ r = ath12k_acpi_start(ab);
+ if (r)
+ /* ACPI is optional so continue in case of an error */
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", r);
+
+ r = ath12k_acpi_check_bdf_variant_name(ab);
+ if (r)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "ACPI bdf variant name not set.\n");
+
out:
return ret;
}
diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h
index 29c7ec3260da..75f80df2aa0c 100644
--- a/drivers/net/wireless/ath/ath12k/reg.h
+++ b/drivers/net/wireless/ath/ath12k/reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_REG_H
@@ -13,6 +13,9 @@
struct ath12k_base;
struct ath12k;
+#define ATH12K_2GHZ_MAX_FREQUENCY 2495
+#define ATH12K_5GHZ_MAX_FREQUENCY 5920
+
/* DFS regdomains supported by Firmware */
enum ath12k_dfs_region {
ATH12K_DFS_REG_UNSET,
diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h
index 10366bbe9999..6c600473b402 100644
--- a/drivers/net/wireless/ath/ath12k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath12k/rx_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_RX_DESC_H
#define ATH12K_RX_DESC_H
@@ -637,6 +637,8 @@ enum rx_msdu_start_pkt_type {
RX_MSDU_START_PKT_TYPE_11N,
RX_MSDU_START_PKT_TYPE_11AC,
RX_MSDU_START_PKT_TYPE_11AX,
+ RX_MSDU_START_PKT_TYPE_11BA,
+ RX_MSDU_START_PKT_TYPE_11BE,
};
enum rx_msdu_start_sgi {
@@ -1539,12 +1541,4 @@ struct hal_rx_desc {
#define MAX_MU_GROUP_SHOW 16
#define MAX_MU_GROUP_LENGTH (6 * MAX_MU_GROUP_SHOW)
-#define HAL_RX_RU_ALLOC_TYPE_MAX 6
-#define RU_26 1
-#define RU_52 2
-#define RU_106 4
-#define RU_242 9
-#define RU_484 18
-#define RU_996 37
-
#endif /* ATH12K_RX_DESC_H */
diff --git a/drivers/net/wireless/ath/ath12k/testmode.c b/drivers/net/wireless/ath/ath12k/testmode.c
new file mode 100644
index 000000000000..18d56a976dc7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/testmode.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "testmode.h"
+#include <net/netlink.h>
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "core.h"
+#include "hif.h"
+#include "../testmode_i.h"
+
+#define ATH12K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
+#define ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
+
+static const struct nla_policy ath12k_tm_policy[ATH_TM_ATTR_MAX + 1] = {
+ [ATH_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH_TM_DATA_MAX_LEN },
+ [ATH_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
+};
+
+static struct ath12k *ath12k_tm_get_ar(struct ath12k_base *ab)
+{
+ struct ath12k_pdev *pdev;
+ struct ath12k *ar;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ if (ar && ar->ah->state == ATH12K_HW_STATE_TM)
+ return ar;
+ }
+
+ return NULL;
+}
+
+void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nl_skb;
+ struct ath12k *ar;
+
+ ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+ "testmode event wmi cmd_id %d skb length %d\n",
+ cmd_id, skb->len);
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+ ar = ath12k_tm_get_ar(ab);
+ if (!ar) {
+ ath12k_warn(ab, "testmode event not handled due to invalid pdev\n");
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy,
+ 2 * nla_total_size(sizeof(u32)) +
+ nla_total_size(skb->len),
+ GFP_ATOMIC);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!nl_skb) {
+ ath12k_warn(ab,
+ "failed to allocate skb for unsegmented testmode wmi event\n");
+ return;
+ }
+
+ if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) ||
+ nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) {
+ ath12k_warn(ab, "failed to populate testmode unsegmented event\n");
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
+ const struct ath12k_wmi_ftm_event *ftm_msg,
+ u16 length)
+{
+ struct sk_buff *nl_skb;
+ struct ath12k *ar;
+ u32 data_pos, pdev_id;
+ u16 datalen;
+ u8 total_segments, current_seq;
+ u8 const *buf_pos;
+
+ ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+ "testmode event wmi cmd_id %d ftm event msg %pK datalen %d\n",
+ cmd_id, ftm_msg, length);
+ ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", ftm_msg, length);
+ pdev_id = DP_HW2SW_MACID(le32_to_cpu(ftm_msg->seg_hdr.pdev_id));
+
+ if (pdev_id >= ab->num_radios) {
+ ath12k_warn(ab, "testmode event not handled due to invalid pdev id\n");
+ return;
+ }
+
+ ar = ab->pdevs[pdev_id].ar;
+
+ if (!ar) {
+ ath12k_warn(ab, "testmode event not handled due to absence of pdev\n");
+ return;
+ }
+
+ current_seq = le32_get_bits(ftm_msg->seg_hdr.segmentinfo,
+ ATH12K_FTM_SEGHDR_CURRENT_SEQ);
+ total_segments = le32_get_bits(ftm_msg->seg_hdr.segmentinfo,
+ ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS);
+ datalen = length - (sizeof(struct ath12k_wmi_ftm_seg_hdr_params));
+ buf_pos = ftm_msg->data;
+
+ if (current_seq == 0) {
+ ab->ftm_event_obj.expected_seq = 0;
+ ab->ftm_event_obj.data_pos = 0;
+ }
+
+ data_pos = ab->ftm_event_obj.data_pos;
+
+ if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
+ ath12k_warn(ab,
+ "Invalid event length date_pos[%d] datalen[%d]\n",
+ data_pos, datalen);
+ return;
+ }
+
+ memcpy(&ab->ftm_event_obj.eventdata[data_pos], buf_pos, datalen);
+ data_pos += datalen;
+
+ if (++ab->ftm_event_obj.expected_seq != total_segments) {
+ ab->ftm_event_obj.data_pos = data_pos;
+ ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+ "partial data received current_seq[%d], total_seg[%d]\n",
+ current_seq, total_segments);
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+ "total data length[%d] = [%d]\n",
+ data_pos, ftm_msg->seg_hdr.len);
+
+ spin_lock_bh(&ar->data_lock);
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy,
+ 2 * nla_total_size(sizeof(u32)) +
+ nla_total_size(data_pos),
+ GFP_ATOMIC);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!nl_skb) {
+ ath12k_warn(ab,
+ "failed to allocate skb for testmode wmi event\n");
+ return;
+ }
+
+ if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD,
+ ATH_TM_CMD_WMI_FTM) ||
+ nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos,
+ &ab->ftm_event_obj.eventdata[0])) {
+ ath12k_warn(ab, "failed to populate testmode event");
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+static int ath12k_tm_cmd_get_version(struct ath12k *ar, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
+ "testmode cmd get version_major %d version_minor %d\n",
+ ATH_TESTMODE_VERSION_MAJOR,
+ ATH_TESTMODE_VERSION_MINOR);
+
+ spin_lock_bh(&ar->data_lock);
+ skb = cfg80211_testmode_alloc_reply_skb(ar->ah->hw->wiphy,
+ 2 * nla_total_size(sizeof(u32)));
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!skb)
+ return -ENOMEM;
+
+ if (nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR,
+ ATH_TESTMODE_VERSION_MAJOR) ||
+ nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR,
+ ATH_TESTMODE_VERSION_MINOR)) {
+ kfree_skb(skb);
+ return -ENOBUFS;
+ }
+
+ return cfg80211_testmode_reply(skb);
+}
+
+static int ath12k_tm_cmd_process_ftm(struct ath12k *ar, struct nlattr *tb[])
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ struct ath12k_wmi_ftm_cmd *ftm_cmd;
+ int ret = 0;
+ void *buf;
+ size_t aligned_len;
+ u32 cmd_id, buf_len;
+ u16 chunk_len, total_bytes, num_segments;
+ u8 segnumber = 0, *bufpos;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, "ah->state %d\n", ar->ah->state);
+ if (ar->ah->state != ATH12K_HW_STATE_TM)
+ return -ENETDOWN;
+
+ if (!tb[ATH_TM_ATTR_DATA])
+ return -EINVAL;
+
+ buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
+ cmd_id = WMI_PDEV_UTF_CMDID;
+ ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
+ "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
+ cmd_id, buf, buf_len);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len);
+ bufpos = buf;
+ total_bytes = buf_len;
+ num_segments = total_bytes / MAX_WMI_UTF_LEN;
+
+ if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
+ num_segments++;
+
+ while (buf_len) {
+ if (buf_len > MAX_WMI_UTF_LEN)
+ chunk_len = MAX_WMI_UTF_LEN; /* MAX message */
+ else
+ chunk_len = buf_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, (chunk_len +
+ sizeof(struct ath12k_wmi_ftm_cmd)));
+
+ if (!skb)
+ return -ENOMEM;
+
+ ftm_cmd = (struct ath12k_wmi_ftm_cmd *)skb->data;
+ aligned_len = chunk_len + sizeof(struct ath12k_wmi_ftm_seg_hdr_params);
+ ftm_cmd->tlv_header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
+ ftm_cmd->seg_hdr.len = cpu_to_le32(total_bytes);
+ ftm_cmd->seg_hdr.msgref = cpu_to_le32(ar->ftm_msgref);
+ ftm_cmd->seg_hdr.segmentinfo =
+ le32_encode_bits(num_segments,
+ ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS) |
+ le32_encode_bits(segnumber,
+ ATH12K_FTM_SEGHDR_CURRENT_SEQ);
+ ftm_cmd->seg_hdr.pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+ segnumber++;
+ memcpy(&ftm_cmd->data, bufpos, chunk_len);
+ ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "ftm wmi command fail: %d\n", ret);
+ kfree_skb(skb);
+ return ret;
+ }
+
+ buf_len -= chunk_len;
+ bufpos += chunk_len;
+ }
+
+ ++ar->ftm_msgref;
+ return ret;
+}
+
+static int ath12k_tm_cmd_testmode_start(struct ath12k *ar, struct nlattr *tb[])
+{
+ if (ar->ah->state == ATH12K_HW_STATE_TM)
+ return -EALREADY;
+
+ if (ar->ah->state != ATH12K_HW_STATE_OFF)
+ return -EBUSY;
+
+ ar->ab->ftm_event_obj.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH,
+ GFP_KERNEL);
+
+ if (!ar->ab->ftm_event_obj.eventdata)
+ return -ENOMEM;
+
+ ar->ah->state = ATH12K_HW_STATE_TM;
+ ar->ftm_msgref = 0;
+ return 0;
+}
+
+static int ath12k_tm_cmd_wmi(struct ath12k *ar, struct nlattr *tb[])
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ struct wmi_pdev_set_param_cmd *cmd;
+ int ret = 0, tag;
+ void *buf;
+ u32 cmd_id, buf_len;
+
+ if (!tb[ATH_TM_ATTR_DATA])
+ return -EINVAL;
+
+ if (!tb[ATH_TM_ATTR_WMI_CMDID])
+ return -EINVAL;
+
+ buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
+
+ if (!buf_len) {
+ ath12k_warn(ar->ab, "No data present in testmode command\n");
+ return -EINVAL;
+ }
+
+ cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]);
+
+ cmd = buf;
+ tag = le32_get_bits(cmd->tlv_header, WMI_TLV_TAG);
+
+ if (tag == WMI_TAG_PDEV_SET_PARAM_CMD)
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
+ "testmode cmd wmi cmd_id %d buf length %d\n",
+ cmd_id, buf_len);
+
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb->data, buf, buf_len);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id);
+ if (ret) {
+ dev_kfree_skb(skb);
+ ath12k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k *ar = NULL;
+ struct nlattr *tb[ATH_TM_ATTR_MAX + 1];
+ struct ath12k_base *ab;
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ lockdep_assert_held(&wiphy->mtx);
+
+ ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath12k_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (!tb[ATH_TM_ATTR_CMD])
+ return -EINVAL;
+
+ /* TODO: have to handle ar for MLO case */
+ if (ah->num_radio)
+ ar = ah->radio;
+
+ if (!ar)
+ return -EINVAL;
+
+ ab = ar->ab;
+ switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) {
+ case ATH_TM_CMD_WMI:
+ return ath12k_tm_cmd_wmi(ar, tb);
+ case ATH_TM_CMD_TESTMODE_START:
+ return ath12k_tm_cmd_testmode_start(ar, tb);
+ case ATH_TM_CMD_GET_VERSION:
+ return ath12k_tm_cmd_get_version(ar, tb);
+ case ATH_TM_CMD_WMI_FTM:
+ set_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags);
+ return ath12k_tm_cmd_process_ftm(ar, tb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath12k/testmode.h b/drivers/net/wireless/ath/ath12k/testmode.h
new file mode 100644
index 000000000000..ef6ab21d19b8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/testmode.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "hif.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id,
+ struct sk_buff *skb);
+void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
+ const struct ath12k_wmi_ftm_event *ftm_msg,
+ u16 length);
+int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+
+#else
+
+static inline void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id,
+ struct sk_buff *skb)
+{
+}
+
+static inline void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
+ const struct ath12k_wmi_ftm_event *msg,
+ u16 length)
+{
+}
+
+static inline int ath12k_tm_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index abb510d235a5..6d1ea5f3a791 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -15,16 +15,22 @@
#include <linux/time.h>
#include <linux/of.h>
#include "core.h"
+#include "debugfs.h"
#include "debug.h"
#include "mac.h"
#include "hw.h"
#include "peer.h"
#include "p2p.h"
+#include "testmode.h"
struct ath12k_wmi_svc_ready_parse {
bool wmi_svc_bitmap_done;
};
+struct wmi_tlv_fw_stats_parse {
+ const struct wmi_stats_event *ev;
+};
+
struct ath12k_wmi_dma_ring_caps_parse {
struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
u32 n_dma_ring_caps;
@@ -173,7 +179,7 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_p2p_noa_event) },
};
-static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
+__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
{
return le32_encode_bits(cmd, WMI_TLV_TAG) |
le32_encode_bits(len, WMI_TLV_LEN);
@@ -814,6 +820,39 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
return ret;
}
+int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
+ u32 vdev_id, u32 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_request_stats_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_request_stats_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD,
+ sizeof(*cmd));
+
+ cmd->stats_id = cpu_to_le32(stats_id);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI request stats 0x%x vdev id %d pdev id %d\n",
+ stats_id, vdev_id, pdev_id);
+
+ return ret;
+}
+
int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
struct ath12k_wmi_vdev_create_arg *args)
{
@@ -1888,14 +1927,19 @@ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
return ret;
}
-int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
+int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn,
struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
{
+ struct ath12k *ar = arvif->ar;
struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct ath12k_base *ab = ar->ab;
struct wmi_bcn_tmpl_cmd *cmd;
struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_bss_conf *conf;
+ u32 vdev_id = arvif->vdev_id;
struct wmi_tlv *tlv;
struct sk_buff *skb;
u32 ema_params = 0;
@@ -1903,6 +1947,14 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
int ret, len;
size_t aligned_len = roundup(bcn->len, 4);
+ conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!conf) {
+ ath12k_warn(ab,
+ "unable to access bss link conf in beacon template command for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -EINVAL;
+ }
+
len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
@@ -1914,8 +1966,16 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
- cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
- cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
+
+ if (conf->csa_active) {
+ cmd->csa_switch_count_offset =
+ cpu_to_le32(offs->cntdwn_counter_offs[0]);
+ cmd->ext_csa_switch_count_offset =
+ cpu_to_le32(offs->cntdwn_counter_offs[1]);
+ cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF);
+ arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]];
+ }
+
cmd->buf_len = cpu_to_le32(bcn->len);
cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
if (ema_args) {
@@ -1945,7 +2005,7 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
if (ret) {
- ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
+ ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n");
dev_kfree_skb(skb);
}
@@ -2373,8 +2433,8 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar,
arg->dwell_time_active = 50;
arg->dwell_time_active_2g = 0;
arg->dwell_time_passive = 150;
- arg->dwell_time_active_6g = 40;
- arg->dwell_time_passive_6g = 30;
+ arg->dwell_time_active_6g = 70;
+ arg->dwell_time_passive_6g = 70;
arg->min_rest_time = 50;
arg->max_rest_time = 500;
arg->repeat_probe_time = 0;
@@ -2794,6 +2854,8 @@ int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
WMI_CHAN_REG_INFO1_REG_CLS);
*reg2 |= le32_encode_bits(channel_arg->antennamax,
WMI_CHAN_REG_INFO2_ANT_MAX);
+ *reg2 |= le32_encode_bits(channel_arg->maxregpower,
+ WMI_CHAN_REG_INFO2_MAX_TX_PWR);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
@@ -6842,8 +6904,584 @@ static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff
rcu_read_unlock();
}
+static void
+ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar,
+ struct ath12k_fw_stats *fw_stats,
+ char *buf, u32 *length)
+{
+ const struct ath12k_fw_stats_vdev *vdev;
+ u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+ struct ath12k_link_vif *arvif;
+ u32 len = *length;
+ u8 *vif_macaddr;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath12k VDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+ arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id);
+ if (!arvif)
+ continue;
+ vif_macaddr = arvif->ahvif->vif->addr;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", vif_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "beacon snr", vdev->beacon_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "data snr", vdev->data_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx frames", vdev->num_rx_frames);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts fail", vdev->num_rts_fail);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts success", vdev->num_rts_success);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx err", vdev->num_rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx discard", vdev->num_rx_discard);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num tx not acked", vdev->num_tx_not_acked);
+
+ for (i = 0 ; i < WLAN_MAX_AC; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames", i,
+ vdev->num_tx_frames[i]);
+
+ for (i = 0 ; i < WLAN_MAX_AC; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames retries", i,
+ vdev->num_tx_frames_retries[i]);
+
+ for (i = 0 ; i < WLAN_MAX_AC; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames failures", i,
+ vdev->num_tx_frames_failures[i]);
+
+ for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] 0x%08x\n",
+ "tx rate history", i,
+ vdev->tx_rate_history[i]);
+ for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "beacon rssi history", i,
+ vdev->beacon_rssi_history[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+ }
+}
+
+static void
+ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar,
+ struct ath12k_fw_stats *fw_stats,
+ char *buf, u32 *length)
+{
+ const struct ath12k_fw_stats_bcn *bcn;
+ u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+ struct ath12k_link_vif *arvif;
+ u32 len = *length;
+ size_t num_bcn;
+
+ num_bcn = list_count_nodes(&fw_stats->bcn);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath12k Beacon stats", num_bcn);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "===================");
+
+ list_for_each_entry(bcn, &fw_stats->bcn, list) {
+ arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id);
+ if (!arvif)
+ continue;
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", bcn->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", arvif->ahvif->vif->addr);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "================");
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+ }
+}
+
+static void
+ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
+ char *buf, u32 *length, u64 fw_soc_drop_cnt)
+{
+ u32 len = *length;
+ u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+ len = scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath12k PDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Channel noise floor", pdev->ch_noise_floor);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Channel TX power", pdev->chan_tx_power);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX frame count", pdev->tx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX frame count", pdev->rx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX clear count", pdev->rx_clear_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Cycle count", pdev->cycle_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY error count", pdev->phy_err_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n",
+ "soc drop count", fw_soc_drop_cnt);
+
+ *length = len;
+}
+
+static void
+ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath12k PDEV TX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies queued", pdev->comp_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies disp.", pdev->comp_delivered);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDU queued", pdev->msdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU queued", pdev->mpdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs dropped", pdev->wmm_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local enqued", pdev->local_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local freed", pdev->local_freed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW queued", pdev->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs reaped", pdev->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num underruns", pdev->underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs cleaned", pdev->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs requeued", pdev->mpdus_requed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Excessive retries", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "HW rate", pdev->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Sched self triggers", pdev->self_triggers);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Dropped due to SW retries",
+ pdev->sw_retry_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Illegal rate phy errors",
+ pdev->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV continuous xretry", pdev->pdev_cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX timeout", pdev->pdev_tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV resets", pdev->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Stateless TIDs alloc failures",
+ pdev->stateless_tid_alloc_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY underrun", pdev->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MPDU is more than txop limit", pdev->txop_ovf);
+ *length = len;
+}
+
+static void
+ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath12k PDEV RX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Mid PPDU route change",
+ pdev->mid_ppdu_route_change);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Tot. number of statuses", pdev->status_rcvd);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 0", pdev->r0_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 1", pdev->r1_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 2", pdev->r2_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 3", pdev->r3_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to HTT", pdev->htt_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to HTT", pdev->htt_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to stack", pdev->loc_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to stack", pdev->loc_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Oversized AMSUs", pdev->oversize_amsdu);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors", pdev->phy_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors drops", pdev->phy_err_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+ *length = len;
+}
+
+static void
+ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar,
+ struct ath12k_fw_stats *fw_stats,
+ char *buf, u32 *length)
+{
+ const struct ath12k_fw_stats_pdev *pdev;
+ u32 len = *length;
+
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath12k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath12k_warn(ar->ab, "failed to get pdev stats\n");
+ return;
+ }
+
+ ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len,
+ ar->ab->fw_soc_drop_count);
+ ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len);
+ ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len);
+
+ *length = len;
+}
+
+void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
+ struct ath12k_fw_stats *fw_stats,
+ u32 stats_id, char *buf)
+{
+ u32 len = 0;
+ u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (stats_id) {
+ case WMI_REQUEST_VDEV_STAT:
+ ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len);
+ break;
+ case WMI_REQUEST_BCN_STAT:
+ ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len);
+ break;
+ case WMI_REQUEST_PDEV_STAT:
+ ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len);
+ break;
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ ath12k_debugfs_fw_stats_reset(ar);
+}
+
+static void
+ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src,
+ struct ath12k_fw_stats_vdev *dst)
+{
+ int i;
+
+ dst->vdev_id = le32_to_cpu(src->vdev_id);
+ dst->beacon_snr = le32_to_cpu(src->beacon_snr);
+ dst->data_snr = le32_to_cpu(src->data_snr);
+ dst->num_rx_frames = le32_to_cpu(src->num_rx_frames);
+ dst->num_rts_fail = le32_to_cpu(src->num_rts_fail);
+ dst->num_rts_success = le32_to_cpu(src->num_rts_success);
+ dst->num_rx_err = le32_to_cpu(src->num_rx_err);
+ dst->num_rx_discard = le32_to_cpu(src->num_rx_discard);
+ dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked);
+
+ for (i = 0; i < WLAN_MAX_AC; i++)
+ dst->num_tx_frames[i] =
+ le32_to_cpu(src->num_tx_frames[i]);
+
+ for (i = 0; i < WLAN_MAX_AC; i++)
+ dst->num_tx_frames_retries[i] =
+ le32_to_cpu(src->num_tx_frames_retries[i]);
+
+ for (i = 0; i < WLAN_MAX_AC; i++)
+ dst->num_tx_frames_failures[i] =
+ le32_to_cpu(src->num_tx_frames_failures[i]);
+
+ for (i = 0; i < MAX_TX_RATE_VALUES; i++)
+ dst->tx_rate_history[i] =
+ le32_to_cpu(src->tx_rate_history[i]);
+
+ for (i = 0; i < MAX_TX_RATE_VALUES; i++)
+ dst->beacon_rssi_history[i] =
+ le32_to_cpu(src->beacon_rssi_history[i]);
+}
+
+static void
+ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src,
+ struct ath12k_fw_stats_bcn *dst)
+{
+ dst->vdev_id = le32_to_cpu(src->vdev_id);
+ dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt);
+ dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt);
+}
+
+static void
+ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src,
+ struct ath12k_fw_stats_pdev *dst)
+{
+ dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf);
+ dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
+ dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
+ dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
+ dst->cycle_count = __le32_to_cpu(src->cycle_count);
+ dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
+ dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
+}
+
+static void
+ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src,
+ struct ath12k_fw_stats_pdev *dst)
+{
+ dst->comp_queued = a_sle32_to_cpu(src->comp_queued);
+ dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered);
+ dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued);
+ dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued);
+ dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop);
+ dst->local_enqued = a_sle32_to_cpu(src->local_enqued);
+ dst->local_freed = a_sle32_to_cpu(src->local_freed);
+ dst->hw_queued = a_sle32_to_cpu(src->hw_queued);
+ dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped);
+ dst->underrun = a_sle32_to_cpu(src->underrun);
+ dst->tx_abort = a_sle32_to_cpu(src->tx_abort);
+ dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed);
+ dst->tx_ko = __le32_to_cpu(src->tx_ko);
+ dst->data_rc = __le32_to_cpu(src->data_rc);
+ dst->self_triggers = __le32_to_cpu(src->self_triggers);
+ dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
+ dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
+ dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
+ dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
+ dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
+ dst->stateless_tid_alloc_failure =
+ __le32_to_cpu(src->stateless_tid_alloc_failure);
+ dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
+ dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
+}
+
+static void
+ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src,
+ struct ath12k_fw_stats_pdev *dst)
+{
+ dst->mid_ppdu_route_change =
+ a_sle32_to_cpu(src->mid_ppdu_route_change);
+ dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd);
+ dst->r0_frags = a_sle32_to_cpu(src->r0_frags);
+ dst->r1_frags = a_sle32_to_cpu(src->r1_frags);
+ dst->r2_frags = a_sle32_to_cpu(src->r2_frags);
+ dst->r3_frags = a_sle32_to_cpu(src->r3_frags);
+ dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus);
+ dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus);
+ dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus);
+ dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus);
+ dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu);
+ dst->phy_errs = a_sle32_to_cpu(src->phy_errs);
+ dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop);
+ dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs);
+}
+
+static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
+ struct wmi_tlv_fw_stats_parse *parse,
+ const void *ptr,
+ u16 len)
+{
+ const struct wmi_stats_event *ev = parse->ev;
+ struct ath12k_fw_stats stats = {0};
+ struct ath12k *ar;
+ struct ath12k_link_vif *arvif;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
+ int i, ret = 0;
+ const void *data = ptr;
+
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+ INIT_LIST_HEAD(&stats.pdevs);
+
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch update stats ev");
+ return -EPROTO;
+ }
+
+ rcu_read_lock();
+
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id %d in update stats event\n",
+ le32_to_cpu(ev->pdev_id));
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) {
+ const struct wmi_vdev_stats_params *src;
+ struct ath12k_fw_stats_vdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id));
+ if (arvif) {
+ sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
+ arvif->bssid,
+ NULL);
+ if (sta) {
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = &ahsta->deflink;
+ arsta->rssi_beacon = le32_to_cpu(src->beacon_snr);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi stats vdev id %d snr %d\n",
+ src->vdev_id, src->beacon_snr);
+ } else {
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "not found station bssid %pM for vdev stat\n",
+ arvif->bssid);
+ }
+ }
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+ ath12k_wmi_pull_vdev_stats(src, dst);
+ stats.stats_id = WMI_REQUEST_VDEV_STAT;
+ list_add_tail(&dst->list, &stats.vdevs);
+ }
+ for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) {
+ const struct ath12k_wmi_bcn_stats_params *src;
+ struct ath12k_fw_stats_bcn *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+ ath12k_wmi_pull_bcn_stats(src, dst);
+ stats.stats_id = WMI_REQUEST_BCN_STAT;
+ list_add_tail(&dst->list, &stats.bcn);
+ }
+ for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) {
+ const struct ath12k_wmi_pdev_stats_params *src;
+ struct ath12k_fw_stats_pdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ stats.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath12k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ list_add_tail(&dst->list, &stats.pdevs);
+ }
+
+ complete(&ar->fw_stats_complete);
+ ath12k_debugfs_fw_stats_process(ar, &stats);
+exit:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_fw_stats_parse *parse = data;
+ int ret = 0;
+
+ switch (tag) {
+ case WMI_TAG_STATS_EVENT:
+ parse->ev = ptr;
+ break;
+ case WMI_TAG_ARRAY_BYTE:
+ ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
{
+ int ret;
+ struct wmi_tlv_fw_stats_parse parse = {};
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_tlv_fw_stats_parse,
+ &parse);
+ if (ret)
+ ath12k_warn(ab, "failed to parse fw stats %d\n", ret);
}
/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
@@ -6889,17 +7527,15 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
const struct ath12k_wmi_pdev_csa_event *ev,
const u32 *vdev_ids)
{
- int i;
+ u32 current_switch_count = le32_to_cpu(ev->current_switch_count);
+ u32 num_vdevs = le32_to_cpu(ev->num_vdevs);
struct ieee80211_bss_conf *conf;
struct ath12k_link_vif *arvif;
struct ath12k_vif *ahvif;
-
- /* Finish CSA once the switch count becomes NULL */
- if (ev->current_switch_count)
- return;
+ int i;
rcu_read_lock();
- for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) {
+ for (i = 0; i < num_vdevs; i++) {
arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
if (!arvif) {
@@ -6922,8 +7558,26 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
continue;
}
- if (arvif->is_up && conf->csa_active)
- ieee80211_csa_finish(ahvif->vif, 0);
+ if (!arvif->is_up || !conf->csa_active)
+ continue;
+
+ /* Finish CSA when counter reaches zero */
+ if (!current_switch_count) {
+ ieee80211_csa_finish(ahvif->vif, arvif->link_id);
+ arvif->current_cntdown_counter = 0;
+ } else if (current_switch_count > 1) {
+ /* If the count in event is not what we expect, don't update the
+ * mac80211 count. Since during beacon Tx failure, count in the
+ * firmware will not decrement and this event will come with the
+ * previous count value again
+ */
+ if (current_switch_count != arvif->current_cntdown_counter)
+ continue;
+
+ arvif->current_cntdown_counter =
+ ieee80211_beacon_update_cntdwn(ahvif->vif,
+ arvif->link_id);
+ }
}
rcu_read_unlock();
}
@@ -7026,6 +7680,35 @@ exit:
kfree(tb);
}
+static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ const struct ath12k_wmi_ftm_event *ev;
+ const void **tb;
+ int ret;
+ u16 length;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_ARRAY_BYTE];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch ftm msg\n");
+ kfree(tb);
+ return;
+ }
+
+ length = skb->len - TLV_HDR_SIZE;
+ ath12k_tm_process_event(ab, cmd_id, ev, length);
+ kfree(tb);
+ tb = NULL;
+}
+
static void
ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
struct sk_buff *skb)
@@ -7446,6 +8129,389 @@ static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
kfree(tb);
}
+#ifdef CONFIG_ATH12K_DEBUGFS
+static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab,
+ const void *ptr, u16 tag, u16 len,
+ struct wmi_tpc_stats_arg *tpc_stats)
+{
+ u32 len1, len2, len3, len4;
+ s16 *dst_ptr;
+ s8 *dst_ptr_ctl;
+
+ len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len);
+ len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len);
+ len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len);
+ len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len);
+
+ switch (tpc_stats->event_count) {
+ case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT:
+ if (len1 > len)
+ return -ENOBUFS;
+
+ if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) {
+ dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array;
+ memcpy(dst_ptr, ptr, len1);
+ }
+ break;
+ case ATH12K_TPC_STATS_RATES_EVENT1:
+ if (len2 > len)
+ return -ENOBUFS;
+
+ if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) {
+ dst_ptr = tpc_stats->rates_array1.rate_array;
+ memcpy(dst_ptr, ptr, len2);
+ }
+ break;
+ case ATH12K_TPC_STATS_RATES_EVENT2:
+ if (len3 > len)
+ return -ENOBUFS;
+
+ if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) {
+ dst_ptr = tpc_stats->rates_array2.rate_array;
+ memcpy(dst_ptr, ptr, len3);
+ }
+ break;
+ case ATH12K_TPC_STATS_CTL_TABLE_EVENT:
+ if (len4 > len)
+ return -ENOBUFS;
+
+ if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
+ dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table;
+ memcpy(dst_ptr_ctl, ptr, len4);
+ }
+ break;
+ }
+ return 0;
+}
+
+static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ struct wmi_max_reg_power_fixed_params *ev)
+{
+ struct wmi_max_reg_power_allowed_arg *reg_pwr;
+ u32 total_size;
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "Received reg power array type %d length %d for tpc stats\n",
+ ev->reg_power_type, ev->reg_array_len);
+
+ switch (le32_to_cpu(ev->reg_power_type)) {
+ case TPC_STATS_REG_PWR_ALLOWED_TYPE:
+ reg_pwr = &tpc_stats->max_reg_allowed_power;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Each entry is 2 byte hence multiplying the indices with 2 */
+ total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
+ le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2;
+ if (le32_to_cpu(ev->reg_array_len) != total_size) {
+ ath12k_warn(ab,
+ "Total size and reg_array_len doesn't match for tpc stats\n");
+ return -EINVAL;
+ }
+
+ memcpy(&reg_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params));
+
+ reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len),
+ GFP_ATOMIC);
+ if (!reg_pwr->reg_pwr_array)
+ return -ENOMEM;
+
+ tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED;
+
+ return 0;
+}
+
+static int ath12k_tpc_get_rate_array(struct ath12k_base *ab,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ struct wmi_tpc_rates_array_fixed_params *ev)
+{
+ struct wmi_tpc_rates_array_arg *rates_array;
+ u32 flag = 0, rate_array_len;
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "Received rates array type %d length %d for tpc stats\n",
+ ev->rate_array_type, ev->rate_array_len);
+
+ switch (le32_to_cpu(ev->rate_array_type)) {
+ case ATH12K_TPC_STATS_RATES_ARRAY1:
+ rates_array = &tpc_stats->rates_array1;
+ flag = WMI_TPC_RATES_ARRAY1;
+ break;
+ case ATH12K_TPC_STATS_RATES_ARRAY2:
+ rates_array = &tpc_stats->rates_array2;
+ flag = WMI_TPC_RATES_ARRAY2;
+ break;
+ default:
+ ath12k_warn(ab,
+ "Received invalid type of rates array for tpc stats\n");
+ return -EINVAL;
+ }
+ memcpy(&rates_array->tpc_rates_array, ev,
+ sizeof(struct wmi_tpc_rates_array_fixed_params));
+ rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len);
+ rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC);
+ if (!rates_array->rate_array)
+ return -ENOMEM;
+
+ tpc_stats->tlvs_rcvd |= flag;
+ return 0;
+}
+
+static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ struct wmi_tpc_ctl_pwr_fixed_params *ev)
+{
+ struct wmi_tpc_ctl_pwr_table_arg *ctl_array;
+ u32 total_size, ctl_array_len, flag = 0;
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "Received ctl array type %d length %d for tpc stats\n",
+ ev->ctl_array_type, ev->ctl_array_len);
+
+ switch (le32_to_cpu(ev->ctl_array_type)) {
+ case ATH12K_TPC_STATS_CTL_ARRAY:
+ ctl_array = &tpc_stats->ctl_array;
+ flag = WMI_TPC_CTL_PWR_ARRAY;
+ break;
+ default:
+ ath12k_warn(ab,
+ "Received invalid type of ctl pwr table for tpc stats\n");
+ return -EINVAL;
+ }
+
+ total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
+ le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4);
+ if (le32_to_cpu(ev->ctl_array_len) != total_size) {
+ ath12k_warn(ab,
+ "Total size and ctl_array_len doesn't match for tpc stats\n");
+ return -EINVAL;
+ }
+
+ memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params));
+ ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len);
+ ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC);
+ if (!ctl_array->ctl_pwr_table)
+ return -ENOMEM;
+
+ tpc_stats->tlvs_rcvd |= flag;
+ return 0;
+}
+
+static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tpc_rates_array_fixed_params *tpc_rates_array;
+ struct wmi_max_reg_power_fixed_params *tpc_reg_pwr;
+ struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr;
+ struct wmi_tpc_stats_arg *tpc_stats = data;
+ struct wmi_tpc_config_params *tpc_config;
+ int ret = 0;
+
+ if (!tpc_stats) {
+ ath12k_warn(ab, "tpc stats memory unavailable\n");
+ return -EINVAL;
+ }
+
+ switch (tag) {
+ case WMI_TAG_TPC_STATS_CONFIG_EVENT:
+ tpc_config = (struct wmi_tpc_config_params *)ptr;
+ memcpy(&tpc_stats->tpc_config, tpc_config,
+ sizeof(struct wmi_tpc_config_params));
+ break;
+ case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED:
+ tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr;
+ ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr);
+ break;
+ case WMI_TAG_TPC_STATS_RATES_ARRAY:
+ tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr;
+ ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array);
+ break;
+ case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT:
+ tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr;
+ ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr);
+ break;
+ default:
+ ath12k_warn(ab,
+ "Received invalid tag for tpc stats in subtlvs\n");
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM:
+ ret = 0;
+ /* Fixed param is already processed*/
+ break;
+ case WMI_TAG_ARRAY_STRUCT:
+ /* len 0 is expected for array of struct when there
+ * is no content of that type to pack inside that tlv
+ */
+ if (len == 0)
+ return 0;
+ ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+ ath12k_wmi_tpc_stats_subtlv_parser,
+ tpc_stats);
+ break;
+ case WMI_TAG_ARRAY_INT16:
+ if (len == 0)
+ return 0;
+ ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
+ WMI_TAG_ARRAY_INT16,
+ len, tpc_stats);
+ break;
+ case WMI_TAG_ARRAY_BYTE:
+ if (len == 0)
+ return 0;
+ ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
+ WMI_TAG_ARRAY_BYTE,
+ len, tpc_stats);
+ break;
+ default:
+ ath12k_warn(ab, "Received invalid tag for tpc stats\n");
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar)
+{
+ struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
+
+ lockdep_assert_held(&ar->data_lock);
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n");
+ if (tpc_stats) {
+ kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array);
+ kfree(tpc_stats->rates_array1.rate_array);
+ kfree(tpc_stats->rates_array2.rate_array);
+ kfree(tpc_stats->ctl_array.ctl_pwr_table);
+ kfree(tpc_stats);
+ ar->debug.tpc_stats = NULL;
+ }
+}
+
+static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param;
+ struct wmi_tpc_stats_arg *tpc_stats;
+ const struct wmi_tlv *tlv;
+ void *ptr = skb->data;
+ struct ath12k *ar;
+ u16 tlv_tag;
+ u32 event_count;
+ int ret;
+
+ if (!skb->data) {
+ ath12k_warn(ab, "No data present in tpc stats event\n");
+ return;
+ }
+
+ if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
+ ath12k_warn(ab, "TPC stats event size invalid\n");
+ return;
+ }
+
+ tlv = (struct wmi_tlv *)ptr;
+ tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
+ ptr += sizeof(*tlv);
+
+ if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) {
+ ath12k_warn(ab, "TPC stats without fixed param tlv at start\n");
+ return;
+ }
+
+ fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr;
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1);
+ if (!ar) {
+ ath12k_warn(ab, "Failed to get ar for tpc stats\n");
+ rcu_read_unlock();
+ return;
+ }
+ spin_lock_bh(&ar->data_lock);
+ if (!ar->debug.tpc_request) {
+ /* Event is received either without request or the
+ * timeout, if memory is already allocated free it
+ */
+ if (ar->debug.tpc_stats) {
+ ath12k_warn(ab, "Freeing memory for tpc_stats\n");
+ ath12k_wmi_free_tpc_stats_mem(ar);
+ }
+ goto unlock;
+ }
+
+ event_count = le32_to_cpu(fixed_param->event_count);
+ if (event_count == 0) {
+ if (ar->debug.tpc_stats) {
+ ath12k_warn(ab,
+ "Invalid tpc memory present\n");
+ goto unlock;
+ }
+ ar->debug.tpc_stats =
+ kzalloc(sizeof(struct wmi_tpc_stats_arg),
+ GFP_ATOMIC);
+ if (!ar->debug.tpc_stats) {
+ ath12k_warn(ab,
+ "Failed to allocate memory for tpc stats\n");
+ goto unlock;
+ }
+ }
+
+ tpc_stats = ar->debug.tpc_stats;
+ if (!tpc_stats) {
+ ath12k_warn(ab, "tpc stats memory unavailable\n");
+ goto unlock;
+ }
+
+ if (!(event_count == 0)) {
+ if (event_count != tpc_stats->event_count + 1) {
+ ath12k_warn(ab,
+ "Invalid tpc event received\n");
+ goto unlock;
+ }
+ }
+ tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id);
+ tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event);
+ tpc_stats->event_count = le32_to_cpu(fixed_param->event_count);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "tpc stats event_count %d\n",
+ tpc_stats->event_count);
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_tpc_stats_event_parser,
+ tpc_stats);
+ if (ret) {
+ ath12k_wmi_free_tpc_stats_mem(ar);
+ ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret);
+ goto unlock;
+ }
+
+ if (tpc_stats->end_of_event)
+ complete(&ar->debug.tpc_complete);
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+#else
+static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+}
+#endif
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -7571,6 +8637,9 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
ath12k_wmi_event_teardown_complete(ab, skb);
break;
+ case WMI_HALPHY_STATS_CTRL_PATH_EVENTID:
+ ath12k_wmi_process_tpc_stats(ab, skb);
+ break;
/* add Unsupported events (rare) here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
@@ -7584,7 +8653,12 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
/* debug might flood hence silently ignore (no-op) */
break;
- /* TODO: Add remaining events */
+ case WMI_PDEV_UTF_EVENTID:
+ if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
+ ath12k_tm_wmi_event_segmented(ab, id, skb);
+ else
+ ath12k_tm_wmi_event_unsegmented(ab, id, skb);
+ break;
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
break;
@@ -7721,6 +8795,74 @@ int ath12k_wmi_simulate_radar(struct ath12k *ar)
return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
}
+int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
+ enum wmi_halphy_ctrl_path_stats_id tpc_stats_type)
+{
+ struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ __le32 *pdev_id;
+ u32 buf_len;
+ void *ptr;
+ int ret;
+
+ buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb)
+ return -ENOMEM;
+ cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM,
+ sizeof(*cmd));
+
+ cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT);
+ cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET);
+ cmd->subid = cpu_to_le32(tpc_stats_type);
+
+ ptr = skb->data + sizeof(*cmd);
+
+ /* The below TLV arrays optionally follow this fixed param TLV structure
+ * 1. ARRAY_UINT32 pdev_ids[]
+ * If this array is present and non-zero length, stats should only
+ * be provided from the pdevs identified in the array.
+ * 2. ARRAY_UNIT32 vdev_ids[]
+ * If this array is present and non-zero length, stats should only
+ * be provided from the vdevs identified in the array.
+ * 3. ath12k_wmi_mac_addr_params peer_macaddr[];
+ * If this array is present and non-zero length, stats should only
+ * be provided from the peers with the MAC addresses specified
+ * in the array
+ */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
+ ptr += TLV_HDR_SIZE;
+
+ pdev_id = ptr;
+ *pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar));
+ ptr += sizeof(*pdev_id);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+ ptr += TLV_HDR_SIZE;
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0);
+ ptr += TLV_HDR_SIZE;
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n",
+ ar->pdev->pdev_id);
+
+ return ret;
+}
+
int ath12k_wmi_connect(struct ath12k_base *ab)
{
u32 i;
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 45fe699ce8a5..1ba33e30ddd2 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_WMI_H
@@ -25,6 +25,7 @@
struct ath12k_base;
struct ath12k;
struct ath12k_link_vif;
+struct ath12k_fw_stats;
/* There is no signed version of __le32, so for a temporary solution come
* up with our own version. The idea is from fs/ntfs/endian.h.
@@ -516,6 +517,9 @@ enum wmi_tlv_cmd_id {
WMI_REQUEST_RCPI_CMDID,
WMI_REQUEST_PEER_STATS_INFO_CMDID,
WMI_REQUEST_RADIO_CHAN_STATS_CMDID,
+ WMI_REQUEST_WLM_STATS_CMDID,
+ WMI_REQUEST_CTRL_PATH_STATS_CMDID,
+ WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID = WMI_REQUEST_CTRL_PATH_STATS_CMDID + 3,
WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL),
WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
@@ -785,6 +789,9 @@ enum wmi_tlv_event_id {
WMI_UPDATE_RCPI_EVENTID,
WMI_PEER_STATS_INFO_EVENTID,
WMI_RADIO_CHAN_STATS_EVENTID,
+ WMI_WLM_STATS_EVENTID,
+ WMI_CTRL_PATH_STATS_EVENTID,
+ WMI_HALPHY_STATS_CTRL_PATH_EVENTID,
WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
WMI_NLO_SCAN_COMPLETE_EVENTID,
WMI_APFIND_EVENTID,
@@ -1191,6 +1198,7 @@ enum wmi_tlv_tag {
WMI_TAG_ARRAY_BYTE,
WMI_TAG_ARRAY_STRUCT,
WMI_TAG_ARRAY_FIXED_STRUCT,
+ WMI_TAG_ARRAY_INT16,
WMI_TAG_LAST_ARRAY_ENUM = 31,
WMI_TAG_SERVICE_READY_EVENT,
WMI_TAG_HAL_REG_CAPABILITIES,
@@ -1941,6 +1949,12 @@ enum wmi_tlv_tag {
WMI_TAG_MAC_PHY_CAPABILITIES_EXT = 0x36F,
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
+ WMI_TAG_TPC_STATS_GET_CMD = 0x38B,
+ WMI_TAG_TPC_STATS_EVENT_FIXED_PARAM,
+ WMI_TAG_TPC_STATS_CONFIG_EVENT,
+ WMI_TAG_TPC_STATS_REG_PWR_ALLOWED,
+ WMI_TAG_TPC_STATS_RATES_ARRAY,
+ WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT,
WMI_TAG_EHT_RATE_SET = 0x3C4,
WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
WMI_TAG_MLO_TX_SEND_PARAMS,
@@ -1958,6 +1972,8 @@ enum wmi_tlv_tag {
WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9,
WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB,
+ WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM = 0x442,
+ WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM,
WMI_TAG_MAX
};
@@ -3624,6 +3640,26 @@ struct ath12k_wmi_p2p_noa_info {
struct ath12k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
} __packed;
+#define MAX_WMI_UTF_LEN 252
+
+struct ath12k_wmi_ftm_seg_hdr_params {
+ __le32 len;
+ __le32 msgref;
+ __le32 segmentinfo;
+ __le32 pdev_id;
+} __packed;
+
+struct ath12k_wmi_ftm_cmd {
+ __le32 tlv_header;
+ struct ath12k_wmi_ftm_seg_hdr_params seg_hdr;
+ u8 data[];
+} __packed;
+
+struct ath12k_wmi_ftm_event {
+ struct ath12k_wmi_ftm_seg_hdr_params seg_hdr;
+ u8 data[];
+} __packed;
+
#define WMI_BEACON_TX_BUFFER_SIZE 512
#define WMI_EMA_BEACON_CNT GENMASK(7, 0)
@@ -4604,6 +4640,7 @@ enum wmi_rate_preamble {
WMI_RATE_PREAMBLE_HT,
WMI_RATE_PREAMBLE_VHT,
WMI_RATE_PREAMBLE_HE,
+ WMI_RATE_PREAMBLE_EHT,
};
/**
@@ -5628,6 +5665,245 @@ enum wmi_sta_keepalive_method {
#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+struct wmi_stats_event {
+ __le32 stats_id;
+ __le32 num_pdev_stats;
+ __le32 num_vdev_stats;
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ __le32 num_chan_stats;
+ __le32 num_mib_stats;
+ __le32 pdev_id;
+ __le32 num_bcn_stats;
+ __le32 num_peer_extd_stats;
+ __le32 num_peer_extd2_stats;
+} __packed;
+
+enum wmi_stats_id {
+ WMI_REQUEST_PDEV_STAT = BIT(2),
+ WMI_REQUEST_VDEV_STAT = BIT(3),
+ WMI_REQUEST_BCN_STAT = BIT(11),
+};
+
+struct wmi_request_stats_cmd {
+ __le32 tlv_header;
+ __le32 stats_id;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 pdev_id;
+} __packed;
+
+#define WLAN_MAX_AC 4
+#define MAX_TX_RATE_VALUES 10
+
+struct wmi_vdev_stats_params {
+ __le32 vdev_id;
+ __le32 beacon_snr;
+ __le32 data_snr;
+ __le32 num_tx_frames[WLAN_MAX_AC];
+ __le32 num_rx_frames;
+ __le32 num_tx_frames_retries[WLAN_MAX_AC];
+ __le32 num_tx_frames_failures[WLAN_MAX_AC];
+ __le32 num_rts_fail;
+ __le32 num_rts_success;
+ __le32 num_rx_err;
+ __le32 num_rx_discard;
+ __le32 num_tx_not_acked;
+ __le32 tx_rate_history[MAX_TX_RATE_VALUES];
+ __le32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+} __packed;
+
+struct ath12k_wmi_bcn_stats_params {
+ __le32 vdev_id;
+ __le32 tx_bcn_succ_cnt;
+ __le32 tx_bcn_outage_cnt;
+} __packed;
+
+struct ath12k_wmi_pdev_base_stats_params {
+ a_sle32 chan_nf;
+ __le32 tx_frame_count; /* Cycles spent transmitting frames */
+ __le32 rx_frame_count; /* Cycles spent receiving frames */
+ __le32 rx_clear_count; /* Total channel busy time, evidently */
+ __le32 cycle_count; /* Total on-channel time */
+ __le32 phy_err_count;
+ __le32 chan_tx_pwr;
+} __packed;
+
+struct ath12k_wmi_pdev_tx_stats_params {
+ a_sle32 comp_queued;
+ a_sle32 comp_delivered;
+ a_sle32 msdu_enqued;
+ a_sle32 mpdu_enqued;
+ a_sle32 wmm_drop;
+ a_sle32 local_enqued;
+ a_sle32 local_freed;
+ a_sle32 hw_queued;
+ a_sle32 hw_reaped;
+ a_sle32 underrun;
+ a_sle32 tx_abort;
+ a_sle32 mpdus_requed;
+ __le32 tx_ko;
+ __le32 data_rc;
+ __le32 self_triggers;
+ __le32 sw_retry_failure;
+ __le32 illgl_rate_phy_err;
+ __le32 pdev_cont_xretry;
+ __le32 pdev_tx_timeout;
+ __le32 pdev_resets;
+ __le32 stateless_tid_alloc_failure;
+ __le32 phy_underrun;
+ __le32 txop_ovf;
+} __packed;
+
+struct ath12k_wmi_pdev_rx_stats_params {
+ a_sle32 mid_ppdu_route_change;
+ a_sle32 status_rcvd;
+ a_sle32 r0_frags;
+ a_sle32 r1_frags;
+ a_sle32 r2_frags;
+ a_sle32 r3_frags;
+ a_sle32 htt_msdus;
+ a_sle32 htt_mpdus;
+ a_sle32 loc_msdus;
+ a_sle32 loc_mpdus;
+ a_sle32 oversize_amsdu;
+ a_sle32 phy_errs;
+ a_sle32 phy_err_drop;
+ a_sle32 mpdu_errs;
+} __packed;
+
+struct ath12k_wmi_pdev_stats_params {
+ struct ath12k_wmi_pdev_base_stats_params base;
+ struct ath12k_wmi_pdev_tx_stats_params tx;
+ struct ath12k_wmi_pdev_rx_stats_params rx;
+} __packed;
+
+struct ath12k_fw_stats_req_params {
+ u32 stats_id;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+#define WMI_REQ_CTRL_PATH_PDEV_TX_STAT 1
+#define WMI_REQUEST_CTRL_PATH_STAT_GET 1
+
+#define WMI_TPC_CONFIG BIT(1)
+#define WMI_TPC_REG_PWR_ALLOWED BIT(2)
+#define WMI_TPC_RATES_ARRAY1 BIT(3)
+#define WMI_TPC_RATES_ARRAY2 BIT(4)
+#define WMI_TPC_RATES_DL_OFDMA_ARRAY BIT(5)
+#define WMI_TPC_CTL_PWR_ARRAY BIT(6)
+#define WMI_TPC_CONFIG_PARAM 0x1
+#define ATH12K_TPC_RATE_ARRAY_MU GENMASK(15, 8)
+#define ATH12K_TPC_RATE_ARRAY_SU GENMASK(7, 0)
+#define TPC_STATS_REG_PWR_ALLOWED_TYPE 0
+
+enum wmi_halphy_ctrl_path_stats_id {
+ WMI_HALPHY_PDEV_TX_SU_STATS = 0,
+ WMI_HALPHY_PDEV_TX_SUTXBF_STATS,
+ WMI_HALPHY_PDEV_TX_MU_STATS,
+ WMI_HALPHY_PDEV_TX_MUTXBF_STATS,
+ WMI_HALPHY_PDEV_TX_STATS_MAX,
+};
+
+enum ath12k_wmi_tpc_stats_rates_array {
+ ATH12K_TPC_STATS_RATES_ARRAY1,
+ ATH12K_TPC_STATS_RATES_ARRAY2,
+};
+
+enum ath12k_wmi_tpc_stats_ctl_array {
+ ATH12K_TPC_STATS_CTL_ARRAY,
+ ATH12K_TPC_STATS_CTL_160ARRAY,
+};
+
+enum ath12k_wmi_tpc_stats_events {
+ ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT,
+ ATH12K_TPC_STATS_RATES_EVENT1,
+ ATH12K_TPC_STATS_RATES_EVENT2,
+ ATH12K_TPC_STATS_CTL_TABLE_EVENT
+};
+
+struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params {
+ __le32 tlv_header;
+ __le32 stats_id_mask;
+ __le32 request_id;
+ __le32 action;
+ __le32 subid;
+} __packed;
+
+struct ath12k_wmi_pdev_tpc_stats_event_fixed_params {
+ __le32 pdev_id;
+ __le32 end_of_event;
+ __le32 event_count;
+} __packed;
+
+struct wmi_tpc_config_params {
+ __le32 reg_domain;
+ __le32 chan_freq;
+ __le32 phy_mode;
+ __le32 twice_antenna_reduction;
+ __le32 twice_max_reg_power;
+ __le32 twice_antenna_gain;
+ __le32 power_limit;
+ __le32 rate_max;
+ __le32 num_tx_chain;
+ __le32 ctl;
+ __le32 flags;
+ __le32 caps;
+} __packed;
+
+struct wmi_max_reg_power_fixed_params {
+ __le32 reg_power_type;
+ __le32 reg_array_len;
+ __le32 d1;
+ __le32 d2;
+ __le32 d3;
+ __le32 d4;
+} __packed;
+
+struct wmi_max_reg_power_allowed_arg {
+ struct wmi_max_reg_power_fixed_params tpc_reg_pwr;
+ s16 *reg_pwr_array;
+};
+
+struct wmi_tpc_rates_array_fixed_params {
+ __le32 rate_array_type;
+ __le32 rate_array_len;
+} __packed;
+
+struct wmi_tpc_rates_array_arg {
+ struct wmi_tpc_rates_array_fixed_params tpc_rates_array;
+ s16 *rate_array;
+};
+
+struct wmi_tpc_ctl_pwr_fixed_params {
+ __le32 ctl_array_type;
+ __le32 ctl_array_len;
+ __le32 end_of_ctl_pwr;
+ __le32 ctl_pwr_count;
+ __le32 d1;
+ __le32 d2;
+ __le32 d3;
+ __le32 d4;
+} __packed;
+
+struct wmi_tpc_ctl_pwr_table_arg {
+ struct wmi_tpc_ctl_pwr_fixed_params tpc_ctl_pwr;
+ s8 *ctl_pwr_table;
+};
+
+struct wmi_tpc_stats_arg {
+ u32 pdev_id;
+ u32 event_count;
+ u32 end_of_event;
+ u32 tlvs_rcvd;
+ struct wmi_max_reg_power_allowed_arg max_reg_allowed_power;
+ struct wmi_tpc_rates_array_arg rates_array1;
+ struct wmi_tpc_rates_array_arg rates_array2;
+ struct wmi_tpc_config_params tpc_config;
+ struct wmi_tpc_ctl_pwr_table_arg ctl_array;
+};
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -5639,7 +5915,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame);
int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
const u8 *p2p_ie);
-int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
+int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn,
struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args);
@@ -5753,6 +6029,13 @@ int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
const u8 *buf, size_t buf_len);
int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table);
int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table);
+int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
+ u32 vdev_id, u32 pdev_id);
+__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len);
+
+int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
+ enum wmi_halphy_ctrl_path_stats_id tpc_stats_type);
+void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar);
static inline u32
ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param)
@@ -5806,5 +6089,8 @@ int ath12k_wmi_sta_keepalive(struct ath12k *ar,
int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params);
int ath12k_wmi_mlo_ready(struct ath12k *ar);
int ath12k_wmi_mlo_teardown(struct ath12k *ar);
+void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
+ struct ath12k_fw_stats *fw_stats, u32 stats_id,
+ char *buf);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c
index 9e1c0bfd212f..dce9bd0bcaef 100644
--- a/drivers/net/wireless/ath/ath12k/wow.c
+++ b/drivers/net/wireless/ath/ath12k/wow.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -990,6 +990,7 @@ exit:
case ATH12K_HW_STATE_RESTARTING:
case ATH12K_HW_STATE_RESTARTED:
case ATH12K_HW_STATE_WEDGED:
+ case ATH12K_HW_STATE_TM:
ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
ah->state);
ret = -EIO;
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index a728cc0387df..6e38aa7351e3 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -274,7 +274,6 @@ struct ath_node {
struct ath_tx_control {
struct ath_txq *txq;
- struct ath_node *an;
struct ieee80211_sta *sta;
u8 paprd;
};
@@ -1018,7 +1017,7 @@ struct ath_softc {
u8 gtt_cnt;
u32 intrstatus;
- u32 rx_active_check_time;
+ unsigned long rx_active_check_time;
u32 rx_active_count;
u16 ps_flags; /* PS_* */
bool ps_enabled;
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 628eeec4b82f..300d178830ad 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -628,12 +628,12 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
else
RX_STAT_INC(sc, rx_spectral_sample_err);
- memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
-
/* Mix the received bins to the /dev/random
* pool
*/
add_device_randomness(sample_buf, num_bins);
+
+ memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
}
/* Process a normal frame */
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index f9e77c4624d9..01e0dffbf57e 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -647,7 +647,9 @@ static int ath9k_of_init(struct ath_softc *sc)
ah->ah_flags |= AH_NO_EEP_SWAP;
}
- of_get_mac_address(np, common->macaddr);
+ ret = of_get_mac_address(np, common->macaddr);
+ if (ret == -EPROBE_DEFER)
+ return ret;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index db07ce6dbc08..0ac9212e42f7 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2291,19 +2291,10 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = txctl->sta;
struct ieee80211_vif *vif = info->control.vif;
- struct ath_vif *avp;
struct ath_softc *sc = hw->priv;
int frmlen = skb->len + FCS_LEN;
int padpos, padsize;
- /* NOTE: sta can be NULL according to net/mac80211.h */
- if (sta)
- txctl->an = (struct ath_node *)sta->drv_priv;
- else if (vif && ieee80211_is_data(hdr->frame_control)) {
- avp = (void *)vif->drv_priv;
- txctl->an = &avp->mcast_node;
- }
-
if (info->control.hw_key)
frmlen += info->control.hw_key->icv_len;
diff --git a/drivers/net/wireless/ath/ath11k/testmode_i.h b/drivers/net/wireless/ath/testmode_i.h
index 91b83873d660..980ef2f3f05f 100644
--- a/drivers/net/wireless/ath/ath11k/testmode_i.h
+++ b/drivers/net/wireless/ath/testmode_i.h
@@ -1,59 +1,59 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
-/* "API" level of the ath11k testmode interface. Bump it after every
+/* "API" level of the ath testmode interface. Bump it after every
* incompatible interface change.
*/
-#define ATH11K_TESTMODE_VERSION_MAJOR 1
+#define ATH_TESTMODE_VERSION_MAJOR 1
/* Bump this after every _compatible_ interface change, for example
* addition of a new command or an attribute.
*/
-#define ATH11K_TESTMODE_VERSION_MINOR 1
+#define ATH_TESTMODE_VERSION_MINOR 1
-#define ATH11K_TM_DATA_MAX_LEN 5000
-#define ATH11K_FTM_EVENT_MAX_BUF_LENGTH 2048
+#define ATH_TM_DATA_MAX_LEN 5000
+#define ATH_FTM_EVENT_MAX_BUF_LENGTH 2048
-enum ath11k_tm_attr {
- __ATH11K_TM_ATTR_INVALID = 0,
- ATH11K_TM_ATTR_CMD = 1,
- ATH11K_TM_ATTR_DATA = 2,
- ATH11K_TM_ATTR_WMI_CMDID = 3,
- ATH11K_TM_ATTR_VERSION_MAJOR = 4,
- ATH11K_TM_ATTR_VERSION_MINOR = 5,
- ATH11K_TM_ATTR_WMI_OP_VERSION = 6,
+enum ath_tm_attr {
+ __ATH_TM_ATTR_INVALID = 0,
+ ATH_TM_ATTR_CMD = 1,
+ ATH_TM_ATTR_DATA = 2,
+ ATH_TM_ATTR_WMI_CMDID = 3,
+ ATH_TM_ATTR_VERSION_MAJOR = 4,
+ ATH_TM_ATTR_VERSION_MINOR = 5,
+ ATH_TM_ATTR_WMI_OP_VERSION = 6,
/* keep last */
- __ATH11K_TM_ATTR_AFTER_LAST,
- ATH11K_TM_ATTR_MAX = __ATH11K_TM_ATTR_AFTER_LAST - 1,
+ __ATH_TM_ATTR_AFTER_LAST,
+ ATH_TM_ATTR_MAX = __ATH_TM_ATTR_AFTER_LAST - 1,
};
-/* All ath11k testmode interface commands specified in
- * ATH11K_TM_ATTR_CMD
+/* All ath testmode interface commands specified in
+ * ATH_TM_ATTR_CMD
*/
-enum ath11k_tm_cmd {
- /* Returns the supported ath11k testmode interface version in
- * ATH11K_TM_ATTR_VERSION. Always guaranteed to work. User space
+enum ath_tm_cmd {
+ /* Returns the supported ath testmode interface version in
+ * ATH_TM_ATTR_VERSION. Always guaranteed to work. User space
* uses this to verify it's using the correct version of the
* testmode interface
*/
- ATH11K_TM_CMD_GET_VERSION = 0,
+ ATH_TM_CMD_GET_VERSION = 0,
/* The command used to transmit a WMI command to the firmware and
* the event to receive WMI events from the firmware. Without
* struct wmi_cmd_hdr header, only the WMI payload. Command id is
- * provided with ATH11K_TM_ATTR_WMI_CMDID and payload in
- * ATH11K_TM_ATTR_DATA.
+ * provided with ATH_TM_ATTR_WMI_CMDID and payload in
+ * ATH_TM_ATTR_DATA.
*/
- ATH11K_TM_CMD_WMI = 1,
+ ATH_TM_CMD_WMI = 1,
/* Boots the UTF firmware, the netdev interface must be down at the
* time.
*/
- ATH11K_TM_CMD_TESTMODE_START = 2,
+ ATH_TM_CMD_TESTMODE_START = 2,
/* The command used to transmit a FTM WMI command to the firmware
* and the event to receive WMI events from the firmware. The data
@@ -62,5 +62,5 @@ enum ath11k_tm_cmd {
* The data payload size could be large and the driver needs to
* send segmented data to firmware.
*/
- ATH11K_TM_CMD_WMI_FTM = 3,
+ ATH_TM_CMD_WMI_FTM = 3,
};
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 25b4ef9d3c9a..7529afd24aed 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -2166,7 +2166,7 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
{
const char text[] =
"You must go to " \
- "https://wireless.wiki.kernel.org/en/users/Drivers/b43#devicefirmware " \
+ "https://wireless.docs.kernel.org/en/latest/en/users/drivers/b43/developers.html#list-of-firmware " \
"and download the correct firmware for this driver version. " \
"Please carefully read all instructions on this website.\n";
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index be1d971b3d32..24a5624ef207 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -3295,7 +3295,7 @@ static int ipw_init_nic(struct ipw_priv *priv)
rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
if (rc < 0)
- IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
+ IPW_DEBUG_INFO("FAILED wait for clock stabilization\n");
/* assert SW reset */
ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h
index 3c20353e5a41..e031e8692ca6 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw.h
+++ b/drivers/net/wireless/intel/ipw2x00/libipw.h
@@ -1011,8 +1011,6 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev);
void libipw_txb_free(struct libipw_txb *);
/* libipw_rx.c */
-void libipw_rx_any(struct libipw_device *ieee, struct sk_buff *skb,
- struct libipw_rx_stats *stats);
int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
struct libipw_rx_stats *rx_stats);
/* make sure to set stats->len */
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
index dc4e91f58bb4..b7bc94f7abd8 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -823,96 +823,6 @@ int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
return 0;
}
-/* Filter out unrelated packets, call libipw_rx[_mgt]
- * This function takes over the skb, it should not be used again after calling
- * this function. */
-void libipw_rx_any(struct libipw_device *ieee,
- struct sk_buff *skb, struct libipw_rx_stats *stats)
-{
- struct libipw_hdr_4addr *hdr;
- int is_packet_for_us;
- u16 fc;
-
- if (ieee->iw_mode == IW_MODE_MONITOR) {
- if (!libipw_rx(ieee, skb, stats))
- dev_kfree_skb_irq(skb);
- return;
- }
-
- if (skb->len < sizeof(struct ieee80211_hdr))
- goto drop_free;
-
- hdr = (struct libipw_hdr_4addr *)skb->data;
- fc = le16_to_cpu(hdr->frame_ctl);
-
- if ((fc & IEEE80211_FCTL_VERS) != 0)
- goto drop_free;
-
- switch (fc & IEEE80211_FCTL_FTYPE) {
- case IEEE80211_FTYPE_MGMT:
- if (skb->len < sizeof(struct libipw_hdr_3addr))
- goto drop_free;
- libipw_rx_mgt(ieee, hdr, stats);
- dev_kfree_skb_irq(skb);
- return;
- case IEEE80211_FTYPE_DATA:
- break;
- case IEEE80211_FTYPE_CTL:
- return;
- default:
- return;
- }
-
- is_packet_for_us = 0;
- switch (ieee->iw_mode) {
- case IW_MODE_ADHOC:
- /* our BSS and not from/to DS */
- if (ether_addr_equal(hdr->addr3, ieee->bssid) &&
- ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == 0)) {
- /* promisc: get all */
- if (ieee->dev->flags & IFF_PROMISC)
- is_packet_for_us = 1;
- /* to us */
- else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
- is_packet_for_us = 1;
- /* mcast */
- else if (is_multicast_ether_addr(hdr->addr1))
- is_packet_for_us = 1;
- }
- break;
- case IW_MODE_INFRA:
- /* our BSS (== from our AP) and from DS */
- if (ether_addr_equal(hdr->addr2, ieee->bssid) &&
- ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS)) {
- /* promisc: get all */
- if (ieee->dev->flags & IFF_PROMISC)
- is_packet_for_us = 1;
- /* to us */
- else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
- is_packet_for_us = 1;
- /* mcast */
- else if (is_multicast_ether_addr(hdr->addr1)) {
- /* not our own packet bcasted from AP */
- if (!ether_addr_equal(hdr->addr3, ieee->dev->dev_addr))
- is_packet_for_us = 1;
- }
- }
- break;
- default:
- /* ? */
- break;
- }
-
- if (is_packet_for_us)
- if (!libipw_rx(ieee, skb, stats))
- dev_kfree_skb_irq(skb);
- return;
-
-drop_free:
- dev_kfree_skb_irq(skb);
- ieee->dev->stats.rx_dropped++;
-}
-
#define MGMT_FRAME_FIXED_PART_LENGTH 0x24
static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
@@ -1729,6 +1639,5 @@ void libipw_rx_mgt(struct libipw_device *ieee,
}
}
-EXPORT_SYMBOL_GPL(libipw_rx_any);
EXPORT_SYMBOL(libipw_rx_mgt);
EXPORT_SYMBOL(libipw_rx);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index 718efb1aa1b0..0e5130d1fccd 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -132,15 +132,8 @@ static void il4965_rs_fill_link_cmd(struct il_priv *il,
static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta,
bool force_search);
-#ifdef CONFIG_MAC80211_DEBUGFS
static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta,
u32 *rate_n_flags, int idx);
-#else
-static void
-il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
-{
-}
-#endif
/*
* The following tables contain the expected throughput metrics for all rates
@@ -2495,8 +2488,6 @@ il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta)
D_RATE("leave\n");
}
-#ifdef CONFIG_MAC80211_DEBUGFS
-
static void
il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
{
@@ -2504,6 +2495,9 @@ il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
u8 valid_tx_ant;
u8 ant_sel_tx;
+ if (!IS_ENABLED(CONFIG_MAC80211_DEBUGFS))
+ return;
+
il = lq_sta->drv;
valid_tx_ant = il->hw_params.valid_tx_ant;
if (lq_sta->dbg_fixed_rate) {
@@ -2758,7 +2752,6 @@ il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
&lq_sta->tx_agg_tid_en);
}
-#endif
/*
* Initialization of rate scaling information is done by driver after
@@ -2781,9 +2774,8 @@ static const struct rate_control_ops rs_4965_ops = {
.free = il4965_rs_free,
.alloc_sta = il4965_rs_alloc_sta,
.free_sta = il4965_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = il4965_rs_add_debugfs,
-#endif
+ .add_sta_debugfs = PTR_IF(IS_ENABLED(CONFIG_MAC80211_DEBUGFS),
+ il4965_rs_add_debugfs),
};
int
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 92285412ab10..52610f5e57a3 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -2815,9 +2815,7 @@ struct il_lq_sta {
struct il_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
struct il_traffic_load load[TID_MAX_LOAD_COUNT];
u8 tx_agg_tid_en;
-#ifdef CONFIG_MAC80211_DEBUGFS
u32 dbg_fixed_rate;
-#endif
struct il_priv *drv;
/* used to be in sta_info */
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 4b04865fc2c9..82f577da1a8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -81,14 +81,25 @@ config IWLMVM
of the devices that use this firmware is available here:
https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
+config IWLMLD
+ tristate "Intel Wireless WiFi MLD Firmware support"
+ select WANT_DEV_COREDUMP
+ depends on MAC80211
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This is the driver that supports firmwares of MLD capable devices.
+ The list of the devices that use this firmware is available here:
+ https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
+
# don't call it _MODULE -- will confuse Kconfig/fixdep/...
config IWLWIFI_OPMODE_MODULAR
bool
default y if IWLDVM=m
default y if IWLMVM=m
+ default y if IWLMLD=m
-comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
- depends on IWLDVM=n && IWLMVM=n
+comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM or IWLMLD"
+ depends on IWLDVM=n && IWLMVM=n && IWLMLD=n
menu "Debugging Options"
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 19c4ce6f2465..9546ceeaf5e3 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -12,7 +12,8 @@ iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
-iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o cfg/dr.o
+iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o
+iwlwifi-$(CONFIG_IWLMLD) += cfg/bz.o cfg/sc.o cfg/dr.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
@@ -20,6 +21,7 @@ iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
iwlwifi-objs += fw/regulatory.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
+iwlwifi-$(CONFIG_IWLMLD) += fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_EFI) += fw/uefi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
@@ -33,6 +35,7 @@ ccflags-y += -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/
obj-$(CONFIG_IWLMEI) += mei/
+obj-$(CONFIG_IWLMLD) += mld/
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 2e2fcb3807ef..130b9a8aa7eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -205,7 +205,6 @@ const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
-const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz";
const char iwl_ax200_killer_1650w_name[] =
"Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
index dcba1a5d793b..e87b57b9e2c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
@@ -180,7 +180,6 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
};
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
-const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index efa3e0e35f79..f055255a7c93 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 96
+#define IWL_BZ_UCODE_API_MAX 98
/* Lowest firmware API version supported */
-#define IWL_BZ_UCODE_API_MIN 92
+#define IWL_BZ_UCODE_API_MIN 93
/* NVM versions */
#define IWL_BZ_NVM_VERSION 0x0a1d
@@ -38,6 +38,11 @@
#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
+#if !IS_ENABLED(CONFIG_IWLMVM)
+const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
+const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
+#endif
+
static const struct iwl_base_params iwl_bz_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 512,
@@ -50,6 +55,13 @@ static const struct iwl_base_params iwl_bz_base_params = {
.pcie_l1_allowed = true,
};
+const struct iwl_ht_params iwl_bz_ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) |
+ BIT(NL80211_BAND_6GHZ),
+};
+
#define IWL_DEVICE_BZ_COMMON \
.ucode_api_max = IWL_BZ_UCODE_API_MAX, \
.ucode_api_min = IWL_BZ_UCODE_API_MIN, \
@@ -113,7 +125,7 @@ static const struct iwl_base_params iwl_bz_base_params = {
#define IWL_DEVICE_BZ \
IWL_DEVICE_BZ_COMMON, \
- .ht_params = &iwl_22000_ht_params
+ .ht_params = &iwl_bz_ht_params
/*
* This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
@@ -145,7 +157,6 @@ const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
.low_latency_xtal = true,
};
-const char iwl_bz_name[] = "Intel(R) TBD Bz device";
const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz";
const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
index ab7c0f8d54f4..282b9b846c3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -9,7 +9,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_DR_UCODE_API_MAX 96
+#define IWL_DR_UCODE_API_MAX 98
/* Lowest firmware API version supported */
#define IWL_DR_UCODE_API_MIN 96
@@ -114,7 +114,7 @@ static const struct iwl_base_params iwl_dr_base_params = {
.uhb_supported = true, \
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.num_rbds = IWL_NUM_RBDS_DR_EHT, \
- .ht_params = &iwl_22000_ht_params
+ .ht_params = &iwl_bz_ht_params
/*
* This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
@@ -148,11 +148,9 @@ const struct iwl_cfg_trans_params iwl_br_trans_cfg = {
.mq_rx_supported = true,
.rf_id = true,
.gen2 = true,
- .integrated = true,
.umac_prph_offset = 0x300000,
.xtal_latency = 12000,
.low_latency_xtal = true,
- .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
const char iwl_br_name[] = "Intel(R) TBD Br device";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index c9eeb3f6704e..670031fd60dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 96
+#define IWL_SC_UCODE_API_MAX 98
/* Lowest firmware API version supported */
-#define IWL_SC_UCODE_API_MIN 92
+#define IWL_SC_UCODE_API_MIN 93
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d
@@ -121,7 +121,7 @@ static const struct iwl_base_params iwl_sc_base_params = {
.uhb_supported = true, \
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.num_rbds = IWL_NUM_RBDS_SC_EHT, \
- .ht_params = &iwl_22000_ht_params
+ .ht_params = &iwl_bz_ht_params
/*
* This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
@@ -142,22 +142,18 @@ const struct iwl_cfg_trans_params iwl_sc_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-const char iwl_sc_name[] = "Intel(R) TBD Sc device";
+const char iwl_sp_name[] = "Intel(R) Wi-Fi 7 BE213 160MHz";
const struct iwl_cfg iwl_cfg_sc = {
.fw_name_mac = "sc",
IWL_DEVICE_SC,
};
-const char iwl_sc2_name[] = "Intel(R) TBD Sc2 device";
-
const struct iwl_cfg iwl_cfg_sc2 = {
.fw_name_mac = "sc2",
IWL_DEVICE_SC,
};
-const char iwl_sc2f_name[] = "Intel(R) TBD Sc2f device";
-
const struct iwl_cfg iwl_cfg_sc2f = {
.fw_name_mac = "sc2f",
IWL_DEVICE_SC,
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
index 3f49c0bccb28..96ea6c8dfc89 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
@@ -1180,85 +1180,87 @@ struct iwl_dram_scratch {
} __packed;
struct iwl_tx_cmd {
- /*
- * MPDU byte count:
- * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
- * + 8 byte IV for CCM or TKIP (not used for WEP)
- * + Data payload
- * + 8-byte MIC (not used for CCM/WEP)
- * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
- * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
- * Range: 14-2342 bytes.
- */
- __le16 len;
-
- /*
- * MPDU or MSDU byte count for next frame.
- * Used for fragmentation and bursting, but not 11n aggregation.
- * Same as "len", but for next frame. Set to 0 if not applicable.
- */
- __le16 next_frame_len;
-
- __le32 tx_flags; /* TX_CMD_FLG_* */
-
- /* uCode may modify this field of the Tx command (in host DRAM!).
- * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
- struct iwl_dram_scratch scratch;
-
- /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
- __le32 rate_n_flags; /* RATE_MCS_* */
-
- /* Index of destination station in uCode's station table */
- u8 sta_id;
-
- /* Type of security encryption: CCM or TKIP */
- u8 sec_ctl; /* TX_CMD_SEC_* */
-
- /*
- * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
- * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
- * data frames, this field may be used to selectively reduce initial
- * rate (via non-0 value) for special frames (e.g. management), while
- * still supporting rate scaling for all frames.
- */
- u8 initial_rate_index;
- u8 reserved;
- u8 key[16];
- __le16 next_frame_flags;
- __le16 reserved2;
- union {
- __le32 life_time;
- __le32 attempt;
- } stop_time;
-
- /* Host DRAM physical address pointer to "scratch" in this command.
- * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
- __le32 dram_lsb_ptr;
- u8 dram_msb_ptr;
-
- u8 rts_retry_limit; /*byte 50 */
- u8 data_retry_limit; /*byte 51 */
- u8 tid_tspec;
- union {
- __le16 pm_frame_timeout;
- __le16 attempt_duration;
- } timeout;
-
- /*
- * Duration of EDCA burst Tx Opportunity, in 32-usec units.
- * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
- */
- __le16 driver_txop;
-
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(iwl_tx_cmd_hdr, __hdr, __packed,
+ /*
+ * MPDU byte count:
+ * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+ * Range: 14-2342 bytes.
+ */
+ __le16 len;
+
+ /*
+ * MPDU or MSDU byte count for next frame.
+ * Used for fragmentation and bursting, but not 11n aggregation.
+ * Same as "len", but for next frame. Set to 0 if not applicable.
+ */
+ __le16 next_frame_len;
+
+ __le32 tx_flags; /* TX_CMD_FLG_* */
+
+ /* uCode may modify this field of the Tx command (in host DRAM!).
+ * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
+ struct iwl_dram_scratch scratch;
+
+ /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
+ __le32 rate_n_flags; /* RATE_MCS_* */
+
+ /* Index of destination station in uCode's station table */
+ u8 sta_id;
+
+ /* Type of security encryption: CCM or TKIP */
+ u8 sec_ctl; /* TX_CMD_SEC_* */
+
+ /*
+ * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
+ * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
+ * data frames, this field may be used to selectively reduce initial
+ * rate (via non-0 value) for special frames (e.g. management), while
+ * still supporting rate scaling for all frames.
+ */
+ u8 initial_rate_index;
+ u8 reserved;
+ u8 key[16];
+ __le16 next_frame_flags;
+ __le16 reserved2;
+ union {
+ __le32 life_time;
+ __le32 attempt;
+ } stop_time;
+
+ /* Host DRAM physical address pointer to "scratch" in this command.
+ * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
+ __le32 dram_lsb_ptr;
+ u8 dram_msb_ptr;
+
+ u8 rts_retry_limit; /*byte 50 */
+ u8 data_retry_limit; /*byte 51 */
+ u8 tid_tspec;
+ union {
+ __le16 pm_frame_timeout;
+ __le16 attempt_duration;
+ } timeout;
+
+ /*
+ * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+ * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+ */
+ __le16 driver_txop;
+
+ );
/*
* MAC header goes here, followed by 2 bytes padding if MAC header
* length is 26 or 30 bytes, followed by payload data
*/
- union {
- DECLARE_FLEX_ARRAY(u8, payload);
- DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
- };
+ struct ieee80211_hdr hdr[];
} __packed;
+static_assert(offsetof(struct iwl_tx_cmd, hdr) == sizeof(struct iwl_tx_cmd_hdr),
+ "struct member likely outside of __struct_group()");
/*
* TX command response is sent after *agn* transmission attempts.
@@ -2312,7 +2314,7 @@ struct iwl_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct iwl_tx_cmd tx_cmd;
+ struct iwl_tx_cmd_hdr tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
@@ -2423,7 +2425,7 @@ struct iwlagn_beacon_notif {
*/
struct iwl_tx_beacon_cmd {
- struct iwl_tx_cmd tx;
+ struct iwl_tx_cmd_hdr tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
index 43e8d04d5a8b..e1d78550e443 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
@@ -124,17 +124,6 @@ enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
return restriction->tx_stream;
}
-enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
-{
- struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- struct iwl_tt_restriction *restriction;
-
- if (!priv->thermal_throttle.advanced_tt)
- return IWL_ANT_OK_MULTI;
- restriction = tt->restriction + tt->state;
- return restriction->rx_stream;
-}
-
#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
index 4af792d41dce..198f934a0d16 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
@@ -100,7 +100,6 @@ u8 iwl_tt_current_power_mode(struct iwl_priv *priv);
bool iwl_tt_is_low_power_state(struct iwl_priv *priv);
bool iwl_ht_enabled(struct iwl_priv *priv);
enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
-enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
void iwl_tt_exit_ct_kill(struct iwl_priv *priv);
void iwl_tt_handler(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index ebe85fdf08d3..42360a8f23aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020-2021, 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2021, 2024-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -117,7 +117,7 @@ struct iwl_alive_ntf_v6 {
* finishing init flow
* @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
* @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
- * @IWL_INIT_PHY: driver is going to send PHY_DB commands
+ * @IWL_INIT_PHY: driver is going to send the PHY_CONFIGURATION_CMD
*/
enum iwl_extended_cfg_flags {
IWL_INIT_DEBUG_CFG,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 34a1f97653c0..d43adb6f9220 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -502,11 +502,16 @@ enum iwl_legacy_cmds {
/**
* @DTS_MEASUREMENT_NOTIFICATION:
* &struct iwl_dts_measurement_notif_v1 or
- * &struct iwl_dts_measurement_notif_v2
+ * &struct iwl_dts_measurement_notif
*/
DTS_MEASUREMENT_NOTIFICATION = 0xdd,
/**
+ * @DEBUG_HOST_COMMAND: &struct iwl_dhc_cmd
+ */
+ DEBUG_HOST_COMMAND = 0xf1,
+
+ /**
* @LDBG_CONFIG_CMD: configure continuous trace recording
*/
LDBG_CONFIG_CMD = 0xf6,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h
index a9fa5f054ce0..464eed9b5e71 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2022, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -14,6 +14,9 @@
* @FW_CTXT_COLOR_POS: position of the color
* @FW_CTXT_COLOR_MSK: mask of the color
* @FW_CTXT_INVALID: value used to indicate unused/invalid
+ * @FW_CTXT_ID_INVALID: value used to indicate unused/invalid. This can be
+ * used with newer firmware which no longer use the color. Typically,
+ * firmware versions supported by iwlmld can use this value.
*/
enum iwl_ctxt_id_and_color {
FW_CTXT_ID_POS = 0,
@@ -21,6 +24,7 @@ enum iwl_ctxt_id_and_color {
FW_CTXT_COLOR_POS = 8,
FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS,
FW_CTXT_INVALID = 0xffffffff,
+ FW_CTXT_ID_INVALID = 0xff,
};
#define FW_CMD_ID_AND_COLOR(_id, _color) (((_id) << FW_CTXT_ID_POS) |\
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index c2362bc786b2..9c271ea67155 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -1006,7 +1006,7 @@ struct iwl_wowlan_wake_pkt_notif {
* struct iwl_mvm_d3_end_notif - d3 end notification
* @flags: See &enum iwl_d0i3_flags
*/
-struct iwl_mvm_d3_end_notif {
+struct iwl_d3_end_notif {
__le32 flags;
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 570a3f722510..c139b965980d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
@@ -91,8 +91,14 @@ enum iwl_data_path_subcmd_ids {
SEC_KEY_CMD = 0x18,
/**
+ * @OMI_SEND_STATUS_NOTIF: notification after OMI was sent
+ * uses &struct iwl_omi_send_status_notif
+ */
+ OMI_SEND_STATUS_NOTIF = 0xF2,
+
+ /**
* @ESR_MODE_NOTIF: notification to recommend/force a wanted esr mode,
- * uses &struct iwl_mvm_esr_mode_notif
+ * uses &struct iwl_esr_mode_notif
*/
ESR_MODE_NOTIF = 0xF3,
@@ -688,4 +694,13 @@ struct iwl_sec_key_cmd {
} __packed u; /* SEC_KEY_OPERATION_API_U_VER_1 */
} __packed; /* SEC_KEY_CMD_API_S_VER_1 */
+/**
+ * struct iwl_omi_send_status_notif - OMI status notification
+ * @success: indicates that the OMI was sent successfully
+ * (currently always set)
+ */
+struct iwl_omi_send_status_notif {
+ __le32 success;
+} __packed; /* OMI_SEND_STATUS_NTFY_API_S_VER_1 */
+
#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 550de6db1834..4fab6c66994e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@@ -527,6 +527,8 @@ enum iwl_fw_ini_time_point {
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
* Append otherwise
* @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected
+ * @IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE: perform reset handshake and
+ * split dump to before/after with region marking
*/
enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
@@ -535,6 +537,7 @@ enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16),
+ IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE = BIT(17),
};
/**
@@ -556,12 +559,14 @@ enum iwl_fw_ini_trigger_reset_fw_policy {
* @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size
* @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump
* @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump
+ * @IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET: dump this region before reset
+ * handshake (if requested by %IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE)
*/
enum iwl_fw_ini_dump_policy {
IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0),
IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = BIT(1),
IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = BIT(2),
-
+ IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET = BIT(3),
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index aa88e91d117e..0cf1e5124fba 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -51,7 +51,7 @@ enum iwl_debug_cmds {
/**
* @GET_TAS_STATUS:
* sends command to fw to get TAS status
- * the response is &struct iwl_mvm_tas_status_resp
+ * the response is &struct iwl_tas_status_resp
*/
GET_TAS_STATUS = 0xA,
/**
@@ -439,25 +439,20 @@ struct iwl_dbg_dump_complete_cmd {
__le32 tp_data;
} __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */
-#define TAS_LMAC_BAND_HB 0
-#define TAS_LMAC_BAND_LB 1
-#define TAS_LMAC_BAND_UHB 2
-#define TAS_LMAC_BAND_INVALID 3
-
/**
- * struct iwl_mvm_tas_status_per_mac - tas status per lmac
+ * struct iwl_tas_status_per_mac - tas status per lmac
* @static_status: tas statically enabled or disabled per lmac - TRUE/FALSE
* @static_dis_reason: TAS static disable reason, uses
- * &enum iwl_mvm_tas_statically_disabled_reason
+ * &enum iwl_tas_statically_disabled_reason
* @dynamic_status: Current TAS status. uses
- * &enum iwl_mvm_tas_dyna_status
+ * &enum iwl_tas_dyna_status
* @near_disconnection: is TAS currently near disconnection per lmac? - TRUE/FALSE
* @max_reg_pwr_limit: Regulatory power limits in dBm
* @sar_limit: SAR limits per lmac in dBm
* @band: Band per lmac
* @reserved: reserved
*/
-struct iwl_mvm_tas_status_per_mac {
+struct iwl_tas_status_per_mac {
u8 static_status;
u8 static_dis_reason;
u8 dynamic_status;
@@ -466,35 +461,35 @@ struct iwl_mvm_tas_status_per_mac {
__le16 sar_limit;
u8 band;
u8 reserved[3];
-} __packed; /*DEBUG_GET_TAS_STATUS_PER_MAC_S_VER_1*/
+} __packed; /* DEBUG_GET_TAS_STATUS_PER_MAC_S_VER_1 */
/**
- * struct iwl_mvm_tas_status_resp - Response to GET_TAS_STATUS
+ * struct iwl_tas_status_resp - Response to GET_TAS_STATUS
* @tas_fw_version: TAS FW version
* @is_uhb_for_usa_enable: is UHB enabled in USA? - TRUE/FALSE
* @curr_mcc: current mcc
* @block_list: country block list
* @tas_status_mac: TAS status per lmac, uses
- * &struct iwl_mvm_tas_status_per_mac
+ * &struct iwl_tas_status_per_mac
* @in_dual_radio: is TAS in dual radio? - TRUE/FALSE
* @uhb_allowed_flags: see &enum iwl_tas_uhb_allowed_flags.
* This member is valid only when fw has
* %IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT capability.
* @reserved: reserved
*/
-struct iwl_mvm_tas_status_resp {
+struct iwl_tas_status_resp {
u8 tas_fw_version;
u8 is_uhb_for_usa_enable;
__le16 curr_mcc;
__le16 block_list[16];
- struct iwl_mvm_tas_status_per_mac tas_status_mac[2];
+ struct iwl_tas_status_per_mac tas_status_mac[2];
u8 in_dual_radio;
u8 uhb_allowed_flags;
u8 reserved[2];
-} __packed; /*DEBUG_GET_TAS_STATUS_RSP_API_S_VER_3*/
+} __packed; /* DEBUG_GET_TAS_STATUS_RSP_API_S_VER_3 */
/**
- * enum iwl_mvm_tas_dyna_status - TAS current running status
+ * enum iwl_tas_dyna_status - TAS current running status
* @TAS_DYNA_INACTIVE: TAS status is inactive
* @TAS_DYNA_INACTIVE_MVM_MODE: TAS is disabled due because FW is in MVM mode
* or is in softap mode.
@@ -507,7 +502,7 @@ struct iwl_mvm_tas_status_resp {
* @TAS_DYNA_ACTIVE: TAS is currently active
* @TAS_DYNA_STATUS_MAX: TAS status max value
*/
-enum iwl_mvm_tas_dyna_status {
+enum iwl_tas_dyna_status {
TAS_DYNA_INACTIVE,
TAS_DYNA_INACTIVE_MVM_MODE,
TAS_DYNA_INACTIVE_TRIGGER_MODE,
@@ -516,19 +511,22 @@ enum iwl_mvm_tas_dyna_status {
TAS_DYNA_ACTIVE,
TAS_DYNA_STATUS_MAX,
-}; /*_TAS_DYNA_STATUS_E*/
+};
/**
- * enum iwl_mvm_tas_statically_disabled_reason - TAS statically disabled reason
+ * enum iwl_tas_statically_disabled_reason - TAS statically disabled reason
* @TAS_DISABLED_DUE_TO_BIOS: TAS is disabled because TAS is disabled in BIOS
* @TAS_DISABLED_DUE_TO_SAR_6DBM: TAS is disabled because SAR limit is less than 6 Dbm
* @TAS_DISABLED_REASON_INVALID: TAS disable reason is invalid
+ * @TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID: TAS is disabled due to
+ * table source invalid
* @TAS_DISABLED_REASON_MAX: TAS disable reason max value
*/
-enum iwl_mvm_tas_statically_disabled_reason {
+enum iwl_tas_statically_disabled_reason {
TAS_DISABLED_DUE_TO_BIOS,
TAS_DISABLED_DUE_TO_SAR_6DBM,
TAS_DISABLED_REASON_INVALID,
+ TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID,
TAS_DISABLED_REASON_MAX,
}; /*_TAS_STATICALLY_DISABLED_REASON_E*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h
new file mode 100644
index 000000000000..b6d79c678cd8
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#ifndef __iwl_fw_api_dhc_h__
+#define __iwl_fw_api_dhc_h__
+
+#define DHC_TABLE_MASK_POS (28)
+
+/**
+ * enum iwl_dhc_table_id - DHC table operations index
+ */
+enum iwl_dhc_table_id {
+ /**
+ * @DHC_TABLE_INTEGRATION: select the integration table
+ */
+ DHC_TABLE_INTEGRATION = 2 << DHC_TABLE_MASK_POS,
+ /**
+ * @DHC_TABLE_TOOLS: select the tools table
+ */
+ DHC_TABLE_TOOLS = 0,
+};
+
+/**
+ * enum iwl_dhc_umac_tools_table - tools operations
+ * @DHC_TOOLS_UMAC_GET_TAS_STATUS: Get TAS status.
+ * See @struct iwl_dhc_tas_status_resp
+ */
+enum iwl_dhc_umac_tools_table {
+ DHC_TOOLS_UMAC_GET_TAS_STATUS = 0,
+};
+
+/**
+ * enum iwl_dhc_umac_integration_table - integration operations
+ */
+enum iwl_dhc_umac_integration_table {
+ /**
+ * @DHC_INT_UMAC_TWT_OPERATION: trigger a TWT operation
+ */
+ DHC_INT_UMAC_TWT_OPERATION = 4,
+ /**
+ * @DHC_INTEGRATION_TLC_DEBUG_CONFIG: TLC debug
+ */
+ DHC_INTEGRATION_TLC_DEBUG_CONFIG = 1,
+ /**
+ * @DHC_INTEGRATION_MAX: Maximum UMAC integration table entries
+ */
+ DHC_INTEGRATION_MAX
+};
+
+#define DHC_TARGET_UMAC BIT(27)
+
+/**
+ * struct iwl_dhc_cmd - debug host command
+ * @length: length in DWs of the data structure that is concatenated to the end
+ * of this struct
+ * @index_and_mask: bit 31 is 1 for data set operation else it's 0
+ * bits 28-30 is the index of the table of the operation -
+ * &enum iwl_dhc_table_id *
+ * bit 27 is 0 if the cmd targeted to LMAC and 1 if targeted to UMAC,
+ * (LMAC is 0 for backward compatibility)
+ * bit 26 is 0 if the cmd targeted to LMAC0 and 1 if targeted to LMAC1,
+ * relevant only if bit 27 set to 0
+ * bits 0-25 is a specific entry index in the table specified in bits 28-30
+ *
+ * @data: the concatenated data.
+ */
+struct iwl_dhc_cmd {
+ __le32 length;
+ __le32 index_and_mask;
+ __le32 data[];
+} __packed; /* DHC_CMD_API_S */
+
+/**
+ * struct iwl_dhc_payload_hdr - DHC payload header
+ * @version: a version of a payload
+ * @reserved: reserved for alignment
+ */
+struct iwl_dhc_payload_hdr {
+ u8 version;
+ u8 reserved[3];
+} __packed; /* DHC_PAYLOAD_HDR_API_S_VER_1 */
+
+/**
+ * struct iwl_dhc_tas_status_per_radio - TAS status per radio
+ * @band: &PHY_BAND_5 for high band, PHY_BAND_24 for low band and
+ * &PHY_BAND_6 for ultra high band.
+ * @static_status: TAS statically enabled or disabled
+ * @static_disable_reason: TAS static disable reason, uses
+ * &enum iwl_tas_statically_disabled_reason
+ * @near_disconnection: is TAS currently near disconnection per radio
+ * @dynamic_status_ant_a: Antenna A current TAS status.
+ * uses &enum iwl_tas_dyna_status
+ * @dynamic_status_ant_b: Antenna B current TAS status.
+ * uses &enum iwl_tas_dyna_status
+ * @max_reg_pwr_limit_ant_a: Antenna A regulatory power limits in dBm
+ * @max_reg_pwr_limit_ant_b: Antenna B regulatory power limits in dBm
+ * @sar_limit_ant_a: Antenna A SAR limit per radio in dBm
+ * @sar_limit_ant_b: Antenna B SAR limit per radio in dBm
+ * @reserved: reserved for alignment
+ */
+struct iwl_dhc_tas_status_per_radio {
+ u8 band;
+ u8 static_status;
+ u8 static_disable_reason;
+ u8 near_disconnection;
+ u8 dynamic_status_ant_a;
+ u8 dynamic_status_ant_b;
+ __le16 max_reg_pwr_limit_ant_a;
+ __le16 max_reg_pwr_limit_ant_b;
+ __le16 sar_limit_ant_a;
+ __le16 sar_limit_ant_b;
+ u8 reserved[2];
+} __packed; /* DHC_TAS_STATUS_PER_RADIO_S_VER_1 */
+
+/**
+ * struct iwl_dhc_tas_status_resp - Response to DHC_TOOLS_UMAC_GET_TAS_STATUS
+ * @header: DHC payload header, uses &struct iwl_dhc_payload_hdr
+ * @tas_config_info: see @struct bios_value_u32
+ * @mcc_block_list: block listed country codes
+ * @tas_status_radio: TAS status, uses &struct iwl_dhc_tas_status_per_radio
+ * @curr_mcc: current mcc
+ * @valid_radio_mask: represent entry in tas_status_per_radio is valid.
+ * @reserved: reserved for alignment
+ */
+struct iwl_dhc_tas_status_resp {
+ struct iwl_dhc_payload_hdr header;
+ struct bios_value_u32 tas_config_info;
+ __le16 mcc_block_list[IWL_WTAS_BLACK_LIST_MAX];
+ struct iwl_dhc_tas_status_per_radio tas_status_radio[2];
+ __le16 curr_mcc;
+ u8 valid_radio_mask;
+ u8 reserved;
+} __packed; /* DHC_TAS_STATUS_RSP_API_S_VER_1 */
+
+/**
+ * struct iwl_dhc_cmd_resp_v1 - debug host command response
+ * @status: status of the command
+ * @data: the response data
+ */
+struct iwl_dhc_cmd_resp_v1 {
+ __le32 status;
+ __le32 data[];
+} __packed; /* DHC_RESP_API_S_VER_1 */
+
+/**
+ * struct iwl_dhc_cmd_resp - debug host command response
+ * @status: status of the command
+ * @descriptor: command descriptor (index_and_mask) returned
+ * @data: the response data
+ */
+struct iwl_dhc_cmd_resp {
+ __le32 status;
+ __le32 descriptor;
+ __le32 data[];
+} __packed; /* DHC_RESP_API_S_VER_2 and DHC_RESP_API_S_VER_3 */
+
+/**
+ * enum iwl_dhc_twt_operation_type - describes the TWT operation type
+ *
+ * @DHC_TWT_REQUEST: Send a Request TWT command
+ * @DHC_TWT_SUGGEST: Send a Suggest TWT command
+ * @DHC_TWT_DEMAND: Send a Demand TWT command
+ * @DHC_TWT_GROUPING: Send a Grouping TWT command
+ * @DHC_TWT_ACCEPT: Send a Accept TWT command
+ * @DHC_TWT_ALTERNATE: Send a Alternate TWT command
+ * @DHC_TWT_DICTATE: Send a Dictate TWT command
+ * @DHC_TWT_REJECT: Send a Reject TWT command
+ * @DHC_TWT_TEARDOWN: Send a TearDown TWT command
+ */
+enum iwl_dhc_twt_operation_type {
+ DHC_TWT_REQUEST,
+ DHC_TWT_SUGGEST,
+ DHC_TWT_DEMAND,
+ DHC_TWT_GROUPING,
+ DHC_TWT_ACCEPT,
+ DHC_TWT_ALTERNATE,
+ DHC_TWT_DICTATE,
+ DHC_TWT_REJECT,
+ DHC_TWT_TEARDOWN,
+}; /* DHC_TWT_OPERATION_TYPE_E */
+
+/**
+ * struct iwl_dhc_twt_operation - trigger a TWT operation
+ *
+ * @mac_id: the mac Id on which to trigger TWT operation
+ * @twt_operation: see &enum iwl_dhc_twt_operation_type
+ * @target_wake_time: when should we be on channel
+ * @interval_exp: the exponent for the interval
+ * @interval_mantissa: the mantissa for the interval
+ * @min_wake_duration: the minimum duration for the wake period
+ * @trigger: is the TWT triggered or not
+ * @flow_type: is the TWT announced or not
+ * @flow_id: the TWT flow identifier from 0 to 7
+ * @protection: is the TWT protected
+ * @ndo_paging_indicator: is ndo_paging_indicator set
+ * @responder_pm_mode: is responder_pm_mode set
+ * @negotiation_type: if the responder wants to doze outside the TWT SP
+ * @twt_request: 1 for TWT request, 0 otherwise
+ * @implicit: is TWT implicit
+ * @twt_group_assignment: the TWT group assignment
+ * @twt_channel: the TWT channel
+ * @reserved: reserved
+ */
+struct iwl_dhc_twt_operation {
+ __le32 mac_id;
+ __le32 twt_operation;
+ __le64 target_wake_time;
+ __le32 interval_exp;
+ __le32 interval_mantissa;
+ __le32 min_wake_duration;
+ u8 trigger;
+ u8 flow_type;
+ u8 flow_id;
+ u8 protection;
+ u8 ndo_paging_indicator;
+ u8 responder_pm_mode;
+ u8 negotiation_type;
+ u8 twt_request;
+ u8 implicit;
+ u8 twt_group_assignment;
+ u8 twt_channel;
+ u8 reserved;
+}; /* DHC_TWT_OPERATION_API_S */
+
+#endif /* __iwl_fw_api_dhc_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index b8dff139aa05..e1952fc6d149 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2022 Intel Corporation
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_fw_api_location_h__
#define __iwl_fw_api_location_h__
@@ -1015,7 +1015,7 @@ struct iwl_tof_range_req_ap_entry_v9 {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */
/**
- * struct iwl_tof_range_req_ap_entry_v10 - AP configuration parameters
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
* @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz
* @channel_num: AP Channel number
@@ -1063,7 +1063,7 @@ struct iwl_tof_range_req_ap_entry_v9 {
* @min_time_between_msr: For non trigger based NDP ranging, the minimum time
* between measurements in units of milliseconds
*/
-struct iwl_tof_range_req_ap_entry_v10 {
+struct iwl_tof_range_req_ap_entry {
__le32 initiator_ap_flags;
u8 band;
u8 channel_num;
@@ -1134,7 +1134,7 @@ enum iwl_tof_initiator_flags {
IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT = BIT(20),
}; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
-#define IWL_MVM_TOF_MAX_APS 5
+#define IWL_TOF_MAX_APS 5
#define IWL_MVM_TOF_MAX_TWO_SIDED_APS 5
/**
@@ -1153,7 +1153,7 @@ enum iwl_tof_initiator_flags {
* when the session is done (successfully / partially).
* one of iwl_tof_response_mode.
* @reserved0: reserved
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
* '1' Use MAC Address randomization according to the below
* @range_req_bssid: ranging request BSSID
@@ -1183,7 +1183,7 @@ struct iwl_tof_range_req_cmd_v5 {
u8 ftm_tx_chains;
__le16 common_calib;
__le16 specific_calib;
- struct iwl_tof_range_req_ap_entry_v2 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v2 ap[IWL_TOF_MAX_APS];
} __packed;
/* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
@@ -1192,7 +1192,7 @@ struct iwl_tof_range_req_cmd_v5 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1216,7 +1216,7 @@ struct iwl_tof_range_req_cmd_v7 {
__le32 tsf_mac_id;
__le16 common_calib;
__le16 specific_calib;
- struct iwl_tof_range_req_ap_entry_v3 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v3 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
/**
@@ -1224,7 +1224,7 @@ struct iwl_tof_range_req_cmd_v7 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1248,7 +1248,7 @@ struct iwl_tof_range_req_cmd_v8 {
__le32 tsf_mac_id;
__le16 common_calib;
__le16 specific_calib;
- struct iwl_tof_range_req_ap_entry_v4 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v4 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */
/**
@@ -1256,7 +1256,7 @@ struct iwl_tof_range_req_cmd_v8 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1276,7 +1276,7 @@ struct iwl_tof_range_req_cmd_v9 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
- struct iwl_tof_range_req_ap_entry_v6 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v6 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_9 */
/**
@@ -1284,7 +1284,7 @@ struct iwl_tof_range_req_cmd_v9 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1304,7 +1304,7 @@ struct iwl_tof_range_req_cmd_v11 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
- struct iwl_tof_range_req_ap_entry_v7 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v7 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_11 */
/**
@@ -1312,7 +1312,7 @@ struct iwl_tof_range_req_cmd_v11 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1332,7 +1332,7 @@ struct iwl_tof_range_req_cmd_v12 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
- struct iwl_tof_range_req_ap_entry_v8 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v8 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_12 */
/**
@@ -1340,7 +1340,7 @@ struct iwl_tof_range_req_cmd_v12 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1360,15 +1360,15 @@ struct iwl_tof_range_req_cmd_v13 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
- struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v9 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
/**
- * struct iwl_tof_range_req_cmd_v14 - start measurement cmd
+ * struct iwl_tof_range_req_cmd - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1377,9 +1377,9 @@ struct iwl_tof_range_req_cmd_v13 {
* This is the session time for completing the measurement.
* @tsf_mac_id: report the measurement start time for each ap in terms of the
* TSF of this mac id. 0xff to disable TSF reporting.
- * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v10.
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry.
*/
-struct iwl_tof_range_req_cmd_v14 {
+struct iwl_tof_range_req_cmd {
__le32 initiator_flags;
u8 request_id;
u8 num_of_ap;
@@ -1388,8 +1388,8 @@ struct iwl_tof_range_req_cmd_v14 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
- struct iwl_tof_range_req_ap_entry_v10 ap[IWL_MVM_TOF_MAX_APS];
-} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
+ struct iwl_tof_range_req_ap_entry ap[IWL_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_15 */
/*
* enum iwl_tof_range_request_status - status of the sent request
@@ -1609,7 +1609,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 {
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */
/**
- * struct iwl_tof_range_rsp_ap_entry_ntfy_v6 - AP parameters (response)
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
* @bssid: BSSID of the AP
* @measure_status: current APs measurement status, one of
* &enum iwl_tof_entry_status.
@@ -1645,7 +1645,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 {
* @tx_pn: the last PN used for this responder Tx in case PMF is configured in
* LE byte order.
*/
-struct iwl_tof_range_rsp_ap_entry_ntfy_v6 {
+struct iwl_tof_range_rsp_ap_entry_ntfy {
u8 bssid[ETH_ALEN];
u8 measure_status;
u8 measure_bw;
@@ -1695,7 +1695,7 @@ enum iwl_tof_response_status {
* @request_status: status of current measurement session, one of
* &enum iwl_tof_response_status.
* @last_in_batch: reprot policy (when not all responses are uploaded at once)
- * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_aps: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @ap: per-AP data
*/
struct iwl_tof_range_rsp_ntfy_v5 {
@@ -1703,7 +1703,7 @@ struct iwl_tof_range_rsp_ntfy_v5 {
u8 request_status;
u8 last_in_batch;
u8 num_of_aps;
- struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_5 */
/**
@@ -1719,7 +1719,7 @@ struct iwl_tof_range_rsp_ntfy_v6 {
u8 num_of_aps;
u8 last_report;
u8 reserved;
- struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
/**
@@ -1735,23 +1735,23 @@ struct iwl_tof_range_rsp_ntfy_v7 {
u8 num_of_aps;
u8 last_report;
u8 reserved;
- struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */
/**
- * struct iwl_tof_range_rsp_ntfy_v8 - ranging response notification
+ * struct iwl_tof_range_rsp_ntfy - ranging response notification
* @request_id: A Token ID of the corresponding Range request
* @num_of_aps: Number of APs results
* @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
* @reserved: reserved
* @ap: per-AP data
*/
-struct iwl_tof_range_rsp_ntfy_v8 {
+struct iwl_tof_range_rsp_ntfy {
u8 request_id;
u8 num_of_aps;
u8 last_report;
u8 reserved;
- struct iwl_tof_range_rsp_ap_entry_ntfy_v6 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8,
LOCATION_RANGE_RSP_NTFY_API_S_VER_9 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index 37bb7002c1c9..b511e3aa6bb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -46,7 +46,7 @@ enum iwl_mac_conf_subcmd_ids {
*/
STA_CONFIG_CMD = 0xA,
/**
- * @AUX_STA_CMD: &struct iwl_mvm_aux_sta_cmd
+ * @AUX_STA_CMD: &struct iwl_aux_sta_cmd
*/
AUX_STA_CMD = 0xB,
/**
@@ -62,6 +62,10 @@ enum iwl_mac_conf_subcmd_ids {
*/
ROC_CMD = 0xE,
/**
+ * @TWT_OPERATION_CMD: &struct iwl_twt_operation_cmd
+ */
+ TWT_OPERATION_CMD = 0x10,
+ /**
* @MISSED_BEACONS_NOTIF: &struct iwl_missed_beacons_notif
*/
MISSED_BEACONS_NOTIF = 0xF6,
@@ -641,7 +645,7 @@ struct iwl_sta_cfg_cmd {
} __packed; /* STA_CMD_API_S_VER_1 */
/**
- * struct iwl_mvm_aux_sta_cmd - command for AUX STA configuration
+ * struct iwl_aux_sta_cmd - command for AUX STA configuration
* ( AUX_STA_CMD = 0xB )
*
* @sta_id: index of aux sta to configure
@@ -649,7 +653,7 @@ struct iwl_sta_cfg_cmd {
* @mac_addr: mac addr of the auxilary sta
* @reserved_for_mac_addr: reserved
*/
-struct iwl_mvm_aux_sta_cmd {
+struct iwl_aux_sta_cmd {
__le32 sta_id;
__le32 lmac_id;
u8 mac_addr[ETH_ALEN];
@@ -693,11 +697,11 @@ enum iwl_mvm_fw_esr_recommendation {
}; /* ESR_MODE_RECOMMENDATION_CODE_API_E_VER_1 */
/**
- * struct iwl_mvm_esr_mode_notif - FWs recommendation/force for esr mode
+ * struct iwl_esr_mode_notif - FWs recommendation/force for esr mode
*
* @action: the action to apply on esr state. See &iwl_mvm_fw_esr_recommendation
*/
-struct iwl_mvm_esr_mode_notif {
+struct iwl_esr_mode_notif {
__le32 action;
} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */
@@ -748,4 +752,83 @@ struct iwl_esr_trans_fail_notif {
__le32 err_code;
} __packed; /* ESR_TRANSITION_FAILED_NTFY_API_S_VER_1 */
+/*
+ * enum iwl_twt_operation_type: TWT operation in a TWT action frame
+ *
+ * @TWT_OPERATION_REQUEST: TWT Request
+ * @TWT_OPERATION_SUGGEST: TWT Suggest
+ * @TWT_OPERATION_DEMAND: TWT Demand
+ * @TWT_OPERATION_GROUPING: TWT Grouping
+ * @TWT_OPERATION_ACCEPT: TWT Accept
+ * @TWT_OPERATION_ALTERNATE: TWT Alternate
+ * @TWT_OPERATION_DICTATE: TWT Dictate
+ * @TWT_OPERATION_REJECT: TWT Reject
+ * @TWT_OPERATION_TEARDOWN: TWT Teardown
+ * @TWT_OPERATION_UNAVAILABILITY: TWT Unavailability
+ */
+enum iwl_twt_operation_type {
+ TWT_OPERATION_REQUEST,
+ TWT_OPERATION_SUGGEST,
+ TWT_OPERATION_DEMAND,
+ TWT_OPERATION_GROUPING,
+ TWT_OPERATION_ACCEPT,
+ TWT_OPERATION_ALTERNATE,
+ TWT_OPERATION_DICTATE,
+ TWT_OPERATION_REJECT,
+ TWT_OPERATION_TEARDOWN,
+ TWT_OPERATION_UNAVAILABILITY,
+ TWT_OPERATION_MAX,
+}; /* TWT_OPERATION_TYPE_E_VER_1 */
+
+/**
+ * struct iwl_twt_operation_cmd - initiate a TWT session from driver
+ *
+ * @link_id: FW link id to initiate the TWT
+ * @twt_operation: &enum iwl_twt_operation_type
+ * @target_wake_time: TSF time to start the TWT
+ * @interval_exponent: the exponent for the interval
+ * @interval_mantissa: the mantissa for the interval
+ * @minimum_wake_duration: the minimum duration for the wake period
+ * @trigger: is the TWT triggered or not
+ * @flow_type: is the TWT announced (0) or not (1)
+ * @flow_id: the TWT flow identifier 0 - 7
+ * @twt_protection: is the TWT protected
+ * @ndp_paging_indicator: is ndp paging indicator set
+ * @responder_pm_mode: is responder pm mode set
+ * @negotiation_type: if the responder wants to doze outside the TWT SP
+ * @twt_request: 1 for TWT request (STA), 0 for TWT response (AP)
+ * @implicit: is TWT implicit
+ * @twt_group_assignment: the TWT group assignment
+ * @twt_channel: the TWT channel
+ * @restricted_info_present: is this a restricted TWT
+ * @dl_bitmap_valid: is DL (download) bitmap valid (restricted TWT)
+ * @ul_bitmap_valid: is UL (upload) bitmap valid (restricted TWT)
+ * @dl_tid_bitmap: DL TID bitmap (restricted TWT)
+ * @ul_tid_bitmap: UL TID bitmap (restricted TWT)
+ */
+struct iwl_twt_operation_cmd {
+ __le32 link_id;
+ __le32 twt_operation;
+ __le64 target_wake_time;
+ __le32 interval_exponent;
+ __le32 interval_mantissa;
+ __le32 minimum_wake_duration;
+ u8 trigger;
+ u8 flow_type;
+ u8 flow_id;
+ u8 twt_protection;
+ u8 ndp_paging_indicator;
+ u8 responder_pm_mode;
+ u8 negotiation_type;
+ u8 twt_request;
+ u8 implicit;
+ u8 twt_group_assignment;
+ u8 twt_channel;
+ u8 restricted_info_present;
+ u8 dl_bitmap_valid;
+ u8 ul_bitmap_valid;
+ u8 dl_tid_bitmap;
+ u8 ul_tid_bitmap;
+} __packed; /* TWT_OPERATION_API_S_VER_1 */
+
#endif /* __iwl_fw_api_mac_cfg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
index c73d4d597857..f63b25b03b7e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2022, 2024-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_phy_h__
#define __iwl_fw_api_phy_h__
+#include <linux/types.h>
+#include <linux/bits.h>
/**
* enum iwl_phy_ops_subcmd_ids - PHY group commands
@@ -19,7 +21,7 @@ enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
/**
- * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd
+ * @CTDP_CONFIG_CMD: &struct iwl_ctdp_cmd
*/
CTDP_CONFIG_CMD = 0x03,
@@ -55,7 +57,7 @@ enum iwl_phy_ops_subcmd_ids {
/**
* @DTS_MEASUREMENT_NOTIF_WIDE:
* &struct iwl_dts_measurement_notif_v1 or
- * &struct iwl_dts_measurement_notif_v2
+ * &struct iwl_dts_measurement_notif
*/
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
};
@@ -152,13 +154,13 @@ struct iwl_dts_measurement_notif_v1 {
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/
/**
- * struct iwl_dts_measurement_notif_v2 - measurements notification
+ * struct iwl_dts_measurement_notif - measurements notification
*
* @temp: the measured temperature
* @voltage: the measured voltage
* @threshold_idx: the trip index that was crossed
*/
-struct iwl_dts_measurement_notif_v2 {
+struct iwl_dts_measurement_notif {
__le32 temp;
__le32 voltage;
__le32 threshold_idx;
@@ -195,25 +197,25 @@ struct ct_kill_notif {
} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
/**
-* enum iwl_mvm_ctdp_cmd_operation - CTDP command operations
+* enum iwl_ctdp_cmd_operation - CTDP command operations
* @CTDP_CMD_OPERATION_START: update the current budget
* @CTDP_CMD_OPERATION_STOP: stop ctdp
* @CTDP_CMD_OPERATION_REPORT: get the average budget
*/
-enum iwl_mvm_ctdp_cmd_operation {
+enum iwl_ctdp_cmd_operation {
CTDP_CMD_OPERATION_START = 0x1,
CTDP_CMD_OPERATION_STOP = 0x2,
CTDP_CMD_OPERATION_REPORT = 0x4,
};/* CTDP_CMD_OPERATION_TYPE_E */
/**
- * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget
+ * struct iwl_ctdp_cmd - track and manage the FW power consumption budget
*
- * @operation: see &enum iwl_mvm_ctdp_cmd_operation
+ * @operation: see &enum iwl_ctdp_cmd_operation
* @budget: the budget in milliwatt
* @window_size: defined in API but not used
*/
-struct iwl_mvm_ctdp_cmd {
+struct iwl_ctdp_cmd {
__le32 operation;
__le32 budget;
__le32 window_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 1a60f0cdf972..c2f806cbab59 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -226,6 +226,58 @@ struct iwl_tlc_update_notif {
__le32 amsdu_enabled;
} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
+/**
+ * enum iwl_tlc_debug_types - debug options
+ */
+enum iwl_tlc_debug_types {
+ /**
+ * @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling
+ */
+ IWL_TLC_DEBUG_FIXED_RATE,
+ /**
+ * @IWL_TLC_DEBUG_AGG_DURATION_LIM: time limit for a BA
+ * session, in usec
+ */
+ IWL_TLC_DEBUG_AGG_DURATION_LIM,
+ /**
+ * @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames
+ * in an aggregation
+ */
+ IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM,
+ /**
+ * @IWL_TLC_DEBUG_TPC_ENABLED: enable or disable tpc
+ */
+ IWL_TLC_DEBUG_TPC_ENABLED,
+ /**
+ * @IWL_TLC_DEBUG_TPC_STATS: get number of frames Tx'ed in each
+ * tpc step
+ */
+ IWL_TLC_DEBUG_TPC_STATS,
+ /**
+ * @IWL_TLC_DEBUG_RTS_DISABLE: disable RTS (bool true/false).
+ */
+ IWL_TLC_DEBUG_RTS_DISABLE,
+ /**
+ * @IWL_TLC_DEBUG_PARTIAL_FIXED_RATE: set partial fixed rate to fw
+ */
+ IWL_TLC_DEBUG_PARTIAL_FIXED_RATE,
+}; /* TLC_MNG_DEBUG_TYPES_API_E */
+
+#define MAX_DATA_IN_DHC_TLC_CMD 10
+
+/**
+ * struct iwl_dhc_tlc_cmd - fixed debug config
+ * @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id
+ * @reserved1: reserved
+ * @type: type id of %enum iwl_tlc_debug_types
+ * @data: data to send
+ */
+struct iwl_dhc_tlc_cmd {
+ u8 sta_id;
+ u8 reserved1[3];
+ __le32 type;
+ __le32 data[MAX_DATA_IN_DHC_TLC_CMD];
+} __packed; /* TLC_MNG_DEBUG_CMD_S */
#define IWL_MAX_MCS_DISPLAY_SIZE 12
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index d7f8a276b683..ecbcd5084cd8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -191,6 +191,7 @@ enum iwl_sta_sleep_flag {
#define STA_KEY_IDX_INVALID (0xff)
#define STA_KEY_MAX_DATA_KEY_NUM (4)
#define IWL_MAX_GLOBAL_KEYS (4)
+#define IWL_MAX_NUM_IGTKS 2
#define STA_KEY_LEN_WEP40 (5)
#define STA_KEY_LEN_WEP104 (13)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index 18d030334a6a..f586379d66dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020, 2022-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2020, 2022-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -351,7 +351,7 @@ enum iwl_roc_activity {
}; /* ROC_ACTIVITY_API_E_VER_1 */
/*
- * ROC command
+ * ROC command v5
*
* Command requests the firmware to remain on a channel for a certain duration.
*
@@ -366,7 +366,7 @@ enum iwl_roc_activity {
* @max_delay: max delay the ROC can start in TU
* @duration: remain on channel duration in TU
*/
-struct iwl_roc_req {
+struct iwl_roc_req_v5 {
__le32 action;
__le32 activity;
__le32 sta_id;
@@ -375,7 +375,41 @@ struct iwl_roc_req {
__le16 reserved;
__le32 max_delay;
__le32 duration;
-} __packed; /* ROC_CMD_API_S_VER_3 */
+} __packed; /* ROC_CMD_API_S_VER_5 */
+
+/*
+ * ROC command
+ *
+ * Command requests the firmware to remain on a channel for a certain duration.
+ *
+ * ( MAC_CONF_GROUP 0x3, ROC_CMD 0xE )
+ *
+ * @action: action to perform, see &enum iwl_ctxt_action
+ * @activity: type of activity, see &enum iwl_roc_activity
+ * @sta_id: station id, resumed during "Remain On Channel" activity.
+ * @channel_info: &struct iwl_fw_channel_info
+ * @node_addr: node MAC address for Rx filtering
+ * @reserved1: align to a dword
+ * @max_delay: max delay the ROC can start in TU
+ * @duration: remain on channel duration in TU
+ * @interval: interval between repetitions (when repetitions > 1).
+ * @repetitions: number of repetitions
+ * 0xFF: infinite repetitions. 0 or 1: single repetition.
+ * @reserved2: align to a dword
+ */
+struct iwl_roc_req {
+ __le32 action;
+ __le32 activity;
+ __le32 sta_id;
+ struct iwl_fw_channel_info channel_info;
+ u8 node_addr[ETH_ALEN];
+ __le16 reserved1;
+ __le32 max_delay;
+ __le32 duration;
+ __le32 interval;
+ u8 repetitions;
+ u8 reserved2[3];
+} __packed; /* ROC_CMD_API_S_VER_6 */
/*
* ROC notification
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 6594216f873c..03f639fbf9b6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -2618,29 +2618,28 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
},
};
-static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
- struct iwl_fwrt_dump_data *dump_data,
- struct list_head *list)
+enum iwl_dump_ini_region_selector {
+ IWL_INI_DUMP_ALL_REGIONS,
+ IWL_INI_DUMP_EARLY_REGIONS,
+ IWL_INI_DUMP_LATE_REGIONS,
+};
+
+static u32
+iwl_dump_ini_dump_regions(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ struct list_head *list,
+ enum iwl_fw_ini_time_point tp_id,
+ u64 regions_mask,
+ struct iwl_dump_ini_region_data *imr_reg_data,
+ enum iwl_dump_ini_region_selector which)
{
- struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
- enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point);
- struct iwl_dump_ini_region_data reg_data = {
- .dump_data = dump_data,
- };
- struct iwl_dump_ini_region_data imr_reg_data = {
- .dump_data = dump_data,
- };
- int i;
u32 size = 0;
- u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
- ~(fwrt->trans->dbg.unsupported_region_msk);
- BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask));
- BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
- ARRAY_SIZE(fwrt->trans->dbg.active_regions));
-
- for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) {
- u32 reg_type;
+ for (int i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) {
+ struct iwl_dump_ini_region_data reg_data = {
+ .dump_data = dump_data,
+ };
+ u32 reg_type, dp;
struct iwl_fw_ini_region_tlv *reg;
if (!(BIT_ULL(i) & regions_mask))
@@ -2658,6 +2657,8 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops))
continue;
+ dp = le32_get_bits(reg->id, IWL_FW_INI_REGION_DUMP_POLICY_MASK);
+
if ((reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY ||
reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE ||
reg_type == IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP) &&
@@ -2667,6 +2668,20 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
tp_id);
continue;
}
+
+ switch (which) {
+ case IWL_INI_DUMP_ALL_REGIONS:
+ break;
+ case IWL_INI_DUMP_EARLY_REGIONS:
+ if (!(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET))
+ continue;
+ break;
+ case IWL_INI_DUMP_LATE_REGIONS:
+ if (dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET)
+ continue;
+ break;
+ }
+
/*
* DRAM_IMR can be collected only for FW/HW error timepoint
* when fw is not alive. In addition, it must be collected
@@ -2676,7 +2691,8 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) {
if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT ||
tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR)
- imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
+ imr_reg_data->reg_tlv =
+ fwrt->trans->dbg.active_regions[i];
else
IWL_INFO(fwrt,
"WRT: trying to collect DRAM_IMR at time point: %d, skipping\n",
@@ -2689,9 +2705,44 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
size += iwl_dump_ini_mem(fwrt, list, &reg_data,
&iwl_dump_ini_region_ops[reg_type]);
}
+
+ return size;
+}
+
+static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ struct list_head *list)
+{
+ struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
+ enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point);
+ struct iwl_dump_ini_region_data imr_reg_data = {
+ .dump_data = dump_data,
+ };
+ u32 size = 0;
+ u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
+ ~(fwrt->trans->dbg.unsupported_region_msk);
+
+ BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask));
+ BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
+ ARRAY_SIZE(fwrt->trans->dbg.active_regions));
+
+ if (trigger->time_point &
+ cpu_to_le32(IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE)) {
+ size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id,
+ regions_mask, &imr_reg_data,
+ IWL_INI_DUMP_EARLY_REGIONS);
+ iwl_trans_pcie_fw_reset_handshake(fwrt->trans);
+ size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id,
+ regions_mask, &imr_reg_data,
+ IWL_INI_DUMP_LATE_REGIONS);
+ } else {
+ size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id,
+ regions_mask, &imr_reg_data,
+ IWL_INI_DUMP_ALL_REGIONS);
+ }
/* collect DRAM_IMR region in the last */
if (imr_reg_data.reg_tlv)
- size += iwl_dump_ini_mem(fwrt, list, &reg_data,
+ size += iwl_dump_ini_mem(fwrt, list, &imr_reg_data,
&iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
if (size) {
@@ -3072,9 +3123,8 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
-void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
- u32 timepoint,
- u32 timepoint_data)
+static void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
+ u32 timepoint, u32 timepoint_data)
{
struct iwl_dbg_dump_complete_cmd hcmd_data;
struct iwl_host_cmd hcmd = {
@@ -3102,6 +3152,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
+
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
@@ -3126,9 +3177,9 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n");
if (iwl_trans_dbg_ini_valid(fwrt->trans))
- iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
+ iwl_fw_error_ini_dump(fwrt, dump_data);
else
- iwl_fw_error_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
+ iwl_fw_error_dump(fwrt, dump_data);
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n");
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
@@ -3145,7 +3196,6 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
-
out:
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
iwl_fw_error_dump_data_free(dump_data);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 87998374f459..35e30e5db462 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -324,9 +324,6 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
}
void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
-void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
- u32 timepoint,
- u32 timepoint_data);
bool iwl_fwrt_read_err_table(struct iwl_trans *trans, u32 base, u32 *err_id);
void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt);
void iwl_fw_dbg_clear_monitor_buf(struct iwl_fw_runtime *fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h b/drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h
new file mode 100644
index 000000000000..983acee5cd7d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2021, 2025 Intel Corporation
+ */
+#ifndef __iwl_fw_dhc_utils_h__
+#define __iwl_fw_dhc_utils_h__
+
+#include <linux/types.h>
+#include "fw/img.h"
+#include "api/commands.h"
+#include "api/dhc.h"
+
+/**
+ * iwl_dhc_resp_status - return status of DHC response
+ * @fw: firwmware image information
+ * @pkt: response packet, must not be %NULL
+ *
+ * Returns: the status value of the DHC command or (u32)-1 if the
+ * response was too short.
+ */
+static inline u32 iwl_dhc_resp_status(const struct iwl_fw *fw,
+ struct iwl_rx_packet *pkt)
+{
+ if (iwl_fw_lookup_notif_ver(fw, IWL_ALWAYS_LONG_GROUP,
+ DEBUG_HOST_COMMAND, 1) >= 2) {
+ struct iwl_dhc_cmd_resp *resp = (void *)pkt->data;
+
+ if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp))
+ return (u32)-1;
+
+ return le32_to_cpu(resp->status);
+ } else {
+ struct iwl_dhc_cmd_resp_v1 *resp = (void *)pkt->data;
+
+ if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp))
+ return (u32)-1;
+
+ return le32_to_cpu(resp->status);
+ }
+}
+
+/**
+ * iwl_dhc_resp_data - return data pointer of DHC response
+ * @fw: firwmware image information
+ * @pkt: response packet, must not be %NULL
+ * @len: where to store the length
+ *
+ * Returns: The data pointer, or an ERR_PTR() if the data was
+ * not valid (too short).
+ */
+static inline void *iwl_dhc_resp_data(const struct iwl_fw *fw,
+ struct iwl_rx_packet *pkt,
+ unsigned int *len)
+{
+ if (iwl_fw_lookup_notif_ver(fw, IWL_ALWAYS_LONG_GROUP,
+ DEBUG_HOST_COMMAND, 1) >= 2) {
+ struct iwl_dhc_cmd_resp *resp = (void *)pkt->data;
+
+ if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp))
+ return ERR_PTR(-EINVAL);
+
+ *len = iwl_rx_packet_payload_len(pkt) - sizeof(*resp);
+ return (void *)&resp->data;
+ } else {
+ struct iwl_dhc_cmd_resp_v1 *resp = (void *)pkt->data;
+
+ if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp))
+ return ERR_PTR(-EINVAL);
+
+ *len = iwl_rx_packet_payload_len(pkt) - sizeof(*resp);
+ return (void *)&resp->data;
+ }
+}
+
+#endif /* __iwl_fw_dhc_utils_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index ea435ee94312..6adcfa6e214a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -456,7 +456,25 @@ iwl_parse_tas_selection(const u32 tas_selection_in, const u8 tbl_rev)
}
IWL_EXPORT_SYMBOL(iwl_parse_tas_selection);
-static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+bool iwl_add_mcc_to_tas_block_list(u16 *list, u8 *size, u16 mcc)
+{
+ for (int i = 0; i < *size; i++) {
+ if (list[i] == mcc)
+ return true;
+ }
+
+ /* Verify that there is room for another country
+ * If *size == IWL_WTAS_BLACK_LIST_MAX, then the table is full.
+ */
+ if (*size >= IWL_WTAS_BLACK_LIST_MAX)
+ return false;
+
+ list[*size++] = mcc;
+ return true;
+}
+IWL_EXPORT_SYMBOL(iwl_add_mcc_to_tas_block_list);
+
+__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
{
int ret;
u32 val;
@@ -503,6 +521,7 @@ static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
return config_bitmap;
}
+IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap);
static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver)
{
@@ -553,6 +572,10 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE), 1);
+ if (WARN_ONCE(cmd_ver > 12,
+ "Don't add newer versions to this function\n"))
+ return -EINVAL;
+
memset(cmd, 0, sizeof(*cmd));
*cmd_size = iwl_get_lari_config_cmd_size(cmd_ver);
@@ -674,3 +697,34 @@ bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc)
}
}
IWL_EXPORT_SYMBOL(iwl_puncturing_is_allowed_in_bios);
+
+bool iwl_rfi_is_enabled_in_bios(struct iwl_fw_runtime *fwrt)
+{
+ /* default behaviour is disabled */
+ u32 value = 0;
+ int ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_RFI_CONFIG, &value);
+
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(fwrt, "Failed to get DSM RFI, ret=%d\n", ret);
+ return false;
+ }
+
+ value &= DSM_VALUE_RFI_DISABLE;
+ /* RFI BIOS CONFIG value can be 0 or 3 only.
+ * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled.
+ * 1 and 2 are invalid BIOS configurations, So, it's not possible to
+ * disable ddr/dlvr separately.
+ */
+ if (!value) {
+ IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to enable\n");
+ return true;
+ } else if (value == DSM_VALUE_RFI_DISABLE) {
+ IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to disable\n");
+ } else {
+ IWL_DEBUG_RADIO(fwrt,
+ "DSM RFI got invalid value, value=%d\n", value);
+ }
+
+ return false;
+}
+IWL_EXPORT_SYMBOL(iwl_rfi_is_enabled_in_bios);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index b355d7bef14c..53693314d505 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -167,6 +167,8 @@ enum iwl_dsm_values_rfi {
#define DSM_VALUE_RFI_DISABLE (DSM_VALUE_RFI_DLVR_DISABLE |\
DSM_VALUE_RFI_DDR_DISABLE)
+bool iwl_rfi_is_enabled_in_bios(struct iwl_fw_runtime *fwrt);
+
enum iwl_dsm_masks_reg {
DSM_MASK_CHINA_22_REG = BIT(2)
};
@@ -190,6 +192,7 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt);
bool iwl_is_tas_approved(void);
+bool iwl_add_mcc_to_tas_block_list(u16 *list, u8 *size, u16 mcc);
struct iwl_tas_selection_data
iwl_parse_tas_selection(const u32 tas_selection, const u8 tbl_rev);
@@ -212,6 +215,7 @@ int iwl_bios_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
int iwl_bios_get_eckv(struct iwl_fw_runtime *fwrt, u32 *ext_clk);
int iwl_bios_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
+__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
struct iwl_lari_config_change_cmd *cmd,
size_t *cmd_size);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 048877fa7c71..a9e6bba2419e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -104,6 +104,7 @@ struct iwl_txf_iter_data {
* the driver by calling &iwl_fw_set_current_image()
* @dump: debug dump data
* @uats_table: AP type table
+ * @uats_valid: is AP type table valid
* @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock:
* 0: Unlocked, 1 and 2: Locked.
* Only read the UEFI variables if locked.
@@ -181,6 +182,7 @@ struct iwl_fw_runtime {
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
struct iwl_mcc_allowed_ap_type_cmd uats_table;
+ bool uats_valid;
u8 uefi_tables_lock_status;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 434eed4130b9..386aadbce2a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2021-2024 Intel Corporation
+ * Copyright(c) 2021-2025 Intel Corporation
*/
#include "iwl-drv.h"
@@ -402,6 +402,9 @@ static int iwl_uefi_uats_parse(struct uefi_cnv_wlan_uats_data *uats_data,
memcpy(fwrt->uats_table.offset_map, uats_data->offset_map,
sizeof(fwrt->uats_table.offset_map));
+
+ fwrt->uats_valid = true;
+
return 0;
}
@@ -678,14 +681,16 @@ int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
struct uefi_cnv_var_eckv *data;
int ret = 0;
- data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_ECKV_NAME,
- "ECKV", sizeof(*data), NULL);
+ data = iwl_uefi_get_verified_variable_guid(fwrt->trans,
+ &IWL_EFI_WIFI_BT_GUID,
+ IWL_UEFI_ECKV_NAME,
+ "ECKV", sizeof(*data), NULL);
if (IS_ERR(data))
return -EINVAL;
if (data->revision != IWL_UEFI_ECKV_REVISION) {
ret = -EINVAL;
- IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n",
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI ECKV revision:%d\n",
data->revision);
goto out;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index 0c8943a8bd01..eb3c05417da3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright(c) 2021-2024 Intel Corporation
+ * Copyright(c) 2021-2025 Intel Corporation
*/
#ifndef __iwl_fw_uefi__
#define __iwl_fw_uefi__
@@ -19,7 +19,7 @@
#define IWL_UEFI_WTAS_NAME L"UefiCnvWlanWTAS"
#define IWL_UEFI_SPLC_NAME L"UefiCnvWlanSPLC"
#define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD"
-#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV"
+#define IWL_UEFI_ECKV_NAME L"UefiCnvCommonECKV"
#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM"
#define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing"
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 2b6a80142aba..b9bd89bfdd74 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#ifndef __IWL_CONFIG_H__
#define __IWL_CONFIG_H__
@@ -451,11 +451,8 @@ struct iwl_cfg {
#define IWL_CFG_RF_ID_HR 0x7
#define IWL_CFG_RF_ID_HR1 0x4
-#define IWL_CFG_NO_160 0x1
-#define IWL_CFG_160 0x0
-
-#define IWL_CFG_NO_320 0x1
-#define IWL_CFG_320 0x0
+#define IWL_CFG_BW_NO_LIM (U16_MAX - 1)
+#define IWL_CFG_BW_ANY U16_MAX
#define IWL_CFG_CORES_BT 0x0
#define IWL_CFG_CORES_BT_GNSS 0x5
@@ -467,7 +464,7 @@ struct iwl_cfg {
#define IWL_CFG_IS_JACKET 0x1
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
-#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
+#define IWL_SUBDEVICE_BW_LIM(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
struct iwl_dev_info {
@@ -475,10 +472,10 @@ struct iwl_dev_info {
u16 subdevice;
u16 mac_type;
u16 rf_type;
+ u16 bw_limit;
u8 mac_step;
u8 rf_step;
u8 rf_id;
- u8 no_160;
u8 cores;
u8 cdb;
u8 jacket;
@@ -492,7 +489,7 @@ extern const unsigned int iwl_dev_info_table_size;
const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
- u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step);
+ u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step);
extern const struct pci_device_id iwl_hw_card_ids[];
#endif
@@ -534,7 +531,6 @@ extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
extern const char iwl_ax203_name[];
-extern const char iwl_ax204_name[];
extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
@@ -550,17 +546,13 @@ extern const char iwl_ax211_killer_1675i_name[];
extern const char iwl_ax411_killer_1690s_name[];
extern const char iwl_ax411_killer_1690i_name[];
extern const char iwl_ax211_name[];
-extern const char iwl_ax221_name[];
extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
-extern const char iwl_bz_name[];
extern const char iwl_fm_name[];
extern const char iwl_wh_name[];
+extern const char iwl_sp_name[];
extern const char iwl_gl_name[];
extern const char iwl_mtp_name[];
-extern const char iwl_sc_name[];
-extern const char iwl_sc2_name[];
-extern const char iwl_sc2f_name[];
extern const char iwl_dr_name[];
extern const char iwl_br_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
@@ -662,6 +654,12 @@ extern const struct iwl_cfg iwl_cfg_ma;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
+#endif /* CONFIG_IWLMVM */
+
+#if IS_ENABLED(CONFIG_IWLMLD)
+extern const struct iwl_ht_params iwl_bz_ht_params;
+
+extern const struct iwl_ht_params iwl_bz_ht_params;
extern const struct iwl_cfg iwl_cfg_bz;
extern const struct iwl_cfg iwl_cfg_gl;
@@ -671,6 +669,6 @@ extern const struct iwl_cfg iwl_cfg_sc2;
extern const struct iwl_cfg iwl_cfg_sc2f;
extern const struct iwl_cfg iwl_cfg_dr;
extern const struct iwl_cfg iwl_cfg_br;
-#endif /* CONFIG_IWLMVM */
+#endif /* CONFIG_IWLMLD */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index cd25a1b9f2ff..20563a32a21a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018, 2020-2024 Intel Corporation
+ * Copyright (C) 2018, 2020-2025 Intel Corporation
*/
#ifndef __iwl_context_info_file_gen3_h__
#define __iwl_context_info_file_gen3_h__
@@ -80,10 +80,12 @@ enum iwl_prph_scratch_flags {
* enum iwl_prph_scratch_ext_flags - PRPH scratch control ext flags
* @IWL_PRPH_SCRATCH_EXT_URM_FW: switch to URM mode based on fw setting
* @IWL_PRPH_SCRATCH_EXT_URM_PERM: switch to permanent URM mode
+ * @IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID: use external 32 KHz clock
*/
enum iwl_prph_scratch_ext_flags {
- IWL_PRPH_SCRATCH_EXT_URM_FW = BIT(4),
- IWL_PRPH_SCRATCH_EXT_URM_PERM = BIT(5),
+ IWL_PRPH_SCRATCH_EXT_URM_FW = BIT(4),
+ IWL_PRPH_SCRATCH_EXT_URM_PERM = BIT(5),
+ IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID = BIT(8),
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 08d990ba8a79..ce787326aa69 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@@ -1372,15 +1372,15 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
switch (tp_id) {
case IWL_FW_INI_TIME_POINT_EARLY:
iwl_dbg_tlv_init_cfg(fwrt);
- iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_update_drams(fwrt);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
break;
case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
iwl_dbg_tlv_apply_buffers(fwrt);
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
- iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
break;
case IWL_FW_INI_TIME_POINT_PERIODIC:
iwl_dbg_tlv_set_periodic_trigs(fwrt);
@@ -1390,14 +1390,14 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
- iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data,
iwl_dbg_tlv_check_fw_pkt);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
break;
default:
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
- iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
break;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
index bf52c2edaad1..ea32dc88584f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2021, 2024 Intel Corporation
+ * Copyright(c) 2018 - 2021, 2024-2025 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project.
*****************************************************************************/
@@ -156,6 +156,7 @@ do { \
#define IWL_DL_FW 0x00010000
#define IWL_DL_RF_KILL 0x00020000
#define IWL_DL_TPT 0x00040000
+#define IWL_DL_PTP 0x00080000
/* 0x00F00000 - 0x00100000 */
#define IWL_DL_RATE 0x00100000
#define IWL_DL_CALIB 0x00200000
@@ -165,7 +166,7 @@ do { \
#define IWL_DL_RX 0x01000000
#define IWL_DL_ISR 0x02000000
#define IWL_DL_HT 0x04000000
-#define IWL_DL_EXTERNAL 0x08000000
+#define IWL_DL_EHT 0x08000000
/* 0xF0000000 - 0x10000000 */
#define IWL_DL_11H 0x10000000
#define IWL_DL_STATS 0x20000000
@@ -175,7 +176,6 @@ do { \
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_TDLS(p, f, a...) IWL_DEBUG(p, IWL_DL_TDLS, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
@@ -216,5 +216,6 @@ do { \
#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
#define IWL_DEBUG_FW_INFO(p, f, a...) \
IWL_DEBUG(p, IWL_DL_INFO | IWL_DL_FW, f, ## a)
-
+#define IWL_DEBUG_PTP(p, f, a...) IWL_DEBUG(p, IWL_DL_PTP, f, ## a)
+#define IWL_DEBUG_EHT(p, f, a...) IWL_DEBUG(p, IWL_DL_EHT, f, ## a)
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 352b6e73e08f..d36837501e08 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -75,6 +75,9 @@ struct iwl_drv {
enum {
DVM_OP_MODE,
MVM_OP_MODE,
+#if IS_ENABLED(CONFIG_IWLMLD)
+ MLD_OP_MODE,
+#endif
};
/* Protects the table contents, i.e. the ops pointer & drv list */
@@ -86,6 +89,9 @@ static struct iwlwifi_opmode_table {
} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
[DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL },
[MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL },
+#if IS_ENABLED(CONFIG_IWLMLD)
+ [MLD_OP_MODE] = { .name = "iwlmld", .ops = NULL },
+#endif
};
#define IWL_DEFAULT_SCAN_CHANNELS 40
@@ -168,6 +174,11 @@ static inline char iwl_drv_get_step(int step)
return 'a' + step;
}
+static bool iwl_drv_is_wifi7_supported(struct iwl_trans *trans)
+{
+ return CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM;
+}
+
const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
{
char mac_step, rf_step;
@@ -316,6 +327,7 @@ struct iwl_firmware_pieces {
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
size_t n_mem_tlv;
+ u32 major;
};
static void alloc_sec_data(struct iwl_firmware_pieces *pieces,
@@ -957,19 +969,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
case IWL_UCODE_TLV_FW_VERSION: {
const __le32 *ptr = (const void *)tlv_data;
- u32 major, minor;
+ u32 minor;
u8 local_comp;
if (tlv_len != sizeof(u32) * 3)
goto invalid_tlv_len;
- major = le32_to_cpup(ptr++);
+ pieces->major = le32_to_cpup(ptr++);
minor = le32_to_cpup(ptr++);
local_comp = le32_to_cpup(ptr);
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%08x.%u %s", major, minor,
+ "%u.%08x.%u %s", pieces->major, minor,
local_comp, iwl_reduced_fw_name(drv));
break;
}
@@ -1468,6 +1480,8 @@ static void _iwl_op_mode_stop(struct iwl_drv *drv)
}
}
+#define IWL_MLD_SUPPORTED_FW_VERSION 97
+
/*
* iwl_req_fw_callback - callback when firmware was loaded
*
@@ -1720,6 +1734,20 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
break;
}
+#if IS_ENABLED(CONFIG_IWLMLD)
+ if (pieces->major >= IWL_MLD_SUPPORTED_FW_VERSION &&
+ iwl_drv_is_wifi7_supported(drv->trans))
+ op = &iwlwifi_opmode_table[MLD_OP_MODE];
+#else
+ if (pieces->major >= IWL_MLD_SUPPORTED_FW_VERSION &&
+ iwl_drv_is_wifi7_supported(drv->trans)) {
+ IWL_ERR(drv,
+ "IWLMLD needs to be compiled to support this firmware\n");
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+ goto out_unbind;
+ }
+#endif
+
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 9f7e013252fe..cd1b0048bb6d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -684,10 +684,13 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 |
IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC,
+ .mac_cap_info[1] =
+ IEEE80211_EHT_MAC_CAP1_UNSOL_EPCS_PRIO_ACCESS,
.phy_cap_info[0] =
IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
@@ -913,11 +916,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
{
bool is_ap = iftype_data->types_mask & (BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO));
- bool no_320;
-
- no_320 = (!trans->trans_cfg->integrated &&
- trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB) ||
- trans->reduced_cap_sku;
+ bool slow_pcie = (!trans->trans_cfg->integrated &&
+ trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB);
if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be)
iftype_data->eht_cap.has_eht = false;
@@ -944,7 +944,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
break;
case NL80211_BAND_6GHZ:
- if (!no_320) {
+ if (!trans->reduced_cap_sku &&
+ trans->bw_limit >= 320) {
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |=
IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[1] |=
@@ -986,6 +987,14 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] |= 0x10;
}
}
+
+ if (slow_pcie) {
+ struct ieee80211_eht_mcs_nss_supp *mcs_nss =
+ &iftype_data->eht_cap.eht_mcs_nss_supp;
+
+ mcs_nss->bw._320.rx_tx_mcs11_max_nss = 0;
+ mcs_nss->bw._320.rx_tx_mcs13_max_nss = 0;
+ }
} else {
struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp =
&iftype_data->he_cap.he_mcs_nss_supp;
@@ -1086,19 +1095,22 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0;
}
- if (trans->no_160)
+ if (trans->bw_limit < 160)
iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &=
~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
- if (trans->reduced_cap_sku) {
+ if (trans->bw_limit < 320 || trans->reduced_cap_sku) {
memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0,
sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320));
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
+ }
+
+ if (trans->reduced_cap_sku) {
iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0;
iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0;
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &=
~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA;
- iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
- ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 49c8507d1a6b..c1607b6d0759 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021, 2023-2024 Intel Corporation
+ * Copyright (C) 2019-2021, 2023-2025 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/bsearch.h>
@@ -97,7 +97,7 @@ static void iwl_trans_reprobe_wk(struct work_struct *wk)
module_put(THIS_MODULE);
}
-#define IWL_TRANS_RESET_OK_TIME 180 /* seconds */
+#define IWL_TRANS_RESET_OK_TIME 7 /* seconds */
static enum iwl_reset_mode
iwl_trans_determine_restart_mode(struct iwl_trans *trans)
@@ -403,6 +403,8 @@ void iwl_trans_op_mode_leave(struct iwl_trans *trans)
iwl_trans_pcie_op_mode_leave(trans);
+ cancel_work_sync(&trans->restart.wk);
+
trans->op_mode = NULL;
trans->state = IWL_TRANS_NO_FW;
@@ -639,6 +641,9 @@ IWL_EXPORT_SYMBOL(iwl_trans_tx);
void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs, bool is_flush)
{
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+ return;
+
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return;
@@ -671,6 +676,9 @@ IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg);
int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
{
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+ return -EIO;
+
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return -EIO;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index f6234065dbdd..25fb4c50e38b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -876,7 +876,7 @@ struct iwl_txq {
* only valid for discrete (not integrated) NICs
* @invalid_tx_cmd: invalid TX command buffer
* @reduced_cap_sku: reduced capability supported SKU
- * @no_160: device not supporting 160 MHz
+ * @bw_limit: the max bandwidth
* @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
* @restart: restart worker data
* @restart.wk: restart worker
@@ -888,6 +888,7 @@ struct iwl_txq {
* @trans_specific: data for the specific transport this is allocated for/with
* @dsbr_urm_fw_dependent: switch to URM based on fw settings
* @dsbr_urm_permanent: switch to URM permanently
+ * @ext_32khz_clock_valid: if true, the external 32 KHz clock can be used
*/
struct iwl_trans {
bool csme_own;
@@ -910,11 +911,14 @@ struct iwl_trans {
char hw_id_str[52];
u32 sku_id[3];
bool reduced_cap_sku;
- u8 no_160:1, step_urm:1;
+ u16 bw_limit;
+ bool step_urm;
u8 dsbr_urm_fw_dependent:1,
dsbr_urm_permanent:1;
+ bool ext_32khz_clock_valid;
+
u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
bool pm_support;
@@ -1261,6 +1265,7 @@ enum iwl_reset_mode {
};
void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode);
+void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/Makefile b/drivers/net/wireless/intel/iwlwifi/mld/Makefile
new file mode 100644
index 000000000000..ece66e7a9be4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+obj-$(CONFIG_IWLMLD) += iwlmld.o
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
+
+iwlmld-y += mld.o notif.o mac80211.o fw.o power.o iface.o link.o rx.o mcc.o session-protect.o phy.o
+iwlmld-y += scan.o sta.o tx.o coex.o tlc.o agg.o key.o regulatory.o ap.o thermal.o roc.o stats.o
+iwlmld-y += low_latency.o mlo.o ptp.o time_sync.o ftm-initiator.o
+iwlmld-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
+iwlmld-$(CONFIG_IWLWIFI_LEDS) += led.o
+iwlmld-$(CONFIG_PM_SLEEP) += d3.o
+
+# non-upstream things
+iwlmld-$(CONFIG_IWL_VENDOR_CMDS) += vendor-cmd.o
+iwlmld-$(CONFIG_IWLMVM_AX_SOFTAP_TESTMODE) += ax-softap-testmode.o
+
+subdir-ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/agg.c
new file mode 100644
index 000000000000..db9e0f04f4b7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/agg.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "agg.h"
+#include "sta.h"
+#include "hcmd.h"
+
+static void
+iwl_mld_reorder_release_frames(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct napi_struct *napi,
+ struct iwl_mld_baid_data *baid_data,
+ struct iwl_mld_reorder_buffer *reorder_buf,
+ u16 nssn)
+{
+ struct iwl_mld_reorder_buf_entry *entries =
+ &baid_data->entries[reorder_buf->queue *
+ baid_data->entries_per_queue];
+ u16 ssn = reorder_buf->head_sn;
+
+ while (ieee80211_sn_less(ssn, nssn)) {
+ int index = ssn % baid_data->buf_size;
+ struct sk_buff_head *skb_list = &entries[index].frames;
+ struct sk_buff *skb;
+
+ ssn = ieee80211_sn_inc(ssn);
+
+ /* Empty the list. Will have more than one frame for A-MSDU.
+ * Empty list is valid as well since nssn indicates frames were
+ * received.
+ */
+ while ((skb = __skb_dequeue(skb_list))) {
+ iwl_mld_pass_packet_to_mac80211(mld, napi, skb,
+ reorder_buf->queue,
+ sta);
+ reorder_buf->num_stored--;
+ }
+ }
+ reorder_buf->head_sn = nssn;
+}
+
+static void iwl_mld_release_frames_from_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ u8 baid, u16 nssn, int queue)
+{
+ struct iwl_mld_reorder_buffer *reorder_buf;
+ struct iwl_mld_baid_data *ba_data;
+ struct ieee80211_link_sta *link_sta;
+ u32 sta_id;
+
+ IWL_DEBUG_HT(mld, "Frame release notification for BAID %u, NSSN %d\n",
+ baid, nssn);
+
+ if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
+ baid >= ARRAY_SIZE(mld->fw_id_to_ba)))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+ if (!ba_data) {
+ IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid);
+ goto out_unlock;
+ }
+
+ /* pick any STA ID to find the pointer */
+ sta_id = ffs(ba_data->sta_mask) - 1;
+ link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
+ goto out_unlock;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ iwl_mld_reorder_release_frames(mld, link_sta->sta, napi, ba_data,
+ reorder_buf, nssn);
+out_unlock:
+ rcu_read_unlock();
+}
+
+void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct iwl_rx_packet *pkt, int queue)
+{
+ struct iwl_frame_release *release = (void *)pkt->data;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+
+ if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
+ "Unexpected frame release notif size %u (expected %zu)\n",
+ pkt_len, sizeof(*release)))
+ return;
+
+ iwl_mld_release_frames_from_notif(mld, napi, release->baid,
+ le16_to_cpu(release->nssn),
+ queue);
+}
+
+void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct iwl_rx_packet *pkt,
+ int queue)
+{
+ struct iwl_bar_frame_release *release = (void *)pkt->data;
+ struct iwl_mld_baid_data *baid_data;
+ unsigned int baid, nssn, sta_id, tid;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+
+ if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
+ "Unexpected frame release notif size %u (expected %zu)\n",
+ pkt_len, sizeof(*release)))
+ return;
+
+ baid = le32_get_bits(release->ba_info,
+ IWL_BAR_FRAME_RELEASE_BAID_MASK);
+ nssn = le32_get_bits(release->ba_info,
+ IWL_BAR_FRAME_RELEASE_NSSN_MASK);
+ sta_id = le32_get_bits(release->sta_tid,
+ IWL_BAR_FRAME_RELEASE_STA_MASK);
+ tid = le32_get_bits(release->sta_tid,
+ IWL_BAR_FRAME_RELEASE_TID_MASK);
+
+ if (IWL_FW_CHECK(mld, baid >= ARRAY_SIZE(mld->fw_id_to_ba),
+ "BAR release: invalid BAID (%x)\n", baid))
+ return;
+
+ rcu_read_lock();
+ baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+ if (!IWL_FW_CHECK(mld, !baid_data,
+ "Got valid BAID %d but not allocated, invalid BAR release!\n",
+ baid))
+ goto out_unlock;
+
+ if (IWL_FW_CHECK(mld, tid != baid_data->tid ||
+ sta_id > mld->fw->ucode_capa.num_stations ||
+ !(baid_data->sta_mask & BIT(sta_id)),
+ "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n",
+ baid, baid_data->sta_mask, baid_data->tid, sta_id,
+ tid))
+ goto out_unlock;
+
+ IWL_DEBUG_DROP(mld, "Received a BAR, expect packet loss: nssn %d\n",
+ nssn);
+
+ iwl_mld_release_frames_from_notif(mld, napi, baid, nssn, queue);
+out_unlock:
+ rcu_read_unlock();
+}
+
+void iwl_mld_del_ba(struct iwl_mld *mld, int queue,
+ struct iwl_mld_delba_data *data)
+{
+ struct iwl_mld_baid_data *ba_data;
+ struct iwl_mld_reorder_buffer *reorder_buf;
+ struct ieee80211_link_sta *link_sta;
+ u8 baid = data->baid;
+ u32 sta_id;
+
+ if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+ if (WARN_ON_ONCE(!ba_data))
+ goto out_unlock;
+
+ /* pick any STA ID to find the pointer */
+ sta_id = ffs(ba_data->sta_mask) - 1;
+ link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
+ goto out_unlock;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ /* release all frames that are in the reorder buffer to the stack */
+ iwl_mld_reorder_release_frames(mld, link_sta->sta, NULL,
+ ba_data, reorder_buf,
+ ieee80211_sn_add(reorder_buf->head_sn,
+ ba_data->buf_size));
+out_unlock:
+ rcu_read_unlock();
+}
+
+/* Returns true if the MPDU was buffered\dropped, false if it should be passed
+ * to upper layer.
+ */
+enum iwl_mld_reorder_result
+iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi,
+ int queue, struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc)
+{
+ struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
+ struct iwl_mld_baid_data *baid_data;
+ struct iwl_mld_reorder_buffer *buffer;
+ struct iwl_mld_reorder_buf_entry *entries;
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_link_sta *mld_link_sta;
+ u32 reorder = le32_to_cpu(desc->reorder_data);
+ bool amsdu, last_subframe, is_old_sn, is_dup;
+ u8 tid = ieee80211_get_tid(hdr);
+ u8 baid;
+ u16 nssn, sn;
+ u32 sta_mask = 0;
+ int index;
+ u8 link_id;
+
+ baid = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_BAID_MASK);
+
+ /* This also covers the case of receiving a Block Ack Request
+ * outside a BA session; we'll pass it to mac80211 and that
+ * then sends a delBA action frame.
+ * This also covers pure monitor mode, in which case we won't
+ * have any BA sessions.
+ */
+ if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
+ return IWL_MLD_PASS_SKB;
+
+ /* no sta yet */
+ if (WARN_ONCE(!sta,
+ "Got valid BAID without a valid station assigned\n"))
+ return IWL_MLD_PASS_SKB;
+
+ /* not a data packet */
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return IWL_MLD_PASS_SKB;
+
+ if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
+ return IWL_MLD_PASS_SKB;
+
+ baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+ if (!baid_data) {
+ IWL_DEBUG_HT(mld,
+ "Got valid BAID but no baid allocated, bypass re-ordering (BAID=%d reorder=0x%x)\n",
+ baid, reorder);
+ return IWL_MLD_PASS_SKB;
+ }
+
+ for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
+ sta_mask |= BIT(mld_link_sta->fw_id);
+
+ /* verify the BAID is correctly mapped to the sta and tid */
+ if (IWL_FW_CHECK(mld,
+ tid != baid_data->tid ||
+ !(sta_mask & baid_data->sta_mask),
+ "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n",
+ baid, baid_data->sta_mask, baid_data->tid,
+ sta_mask, tid))
+ return IWL_MLD_PASS_SKB;
+
+ buffer = &baid_data->reorder_buf[queue];
+ entries = &baid_data->entries[queue * baid_data->entries_per_queue];
+
+ is_old_sn = !!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN);
+
+ if (!buffer->valid && is_old_sn)
+ return IWL_MLD_PASS_SKB;
+
+ buffer->valid = true;
+
+ is_dup = !!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE));
+
+ /* drop any duplicated or outdated packets */
+ if (is_dup || is_old_sn)
+ return IWL_MLD_DROP_SKB;
+
+ sn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_SN_MASK);
+ nssn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_NSSN_MASK);
+ amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
+ last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
+
+ /* release immediately if allowed by nssn and no stored frames */
+ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
+ if (!amsdu || last_subframe)
+ buffer->head_sn = nssn;
+ return IWL_MLD_PASS_SKB;
+ }
+
+ /* release immediately if there are no stored frames, and the sn is
+ * equal to the head.
+ * This can happen due to reorder timer, where NSSN is behind head_sn.
+ * When we released everything, and we got the next frame in the
+ * sequence, according to the NSSN we can't release immediately,
+ * while technically there is no hole and we can move forward.
+ */
+ if (!buffer->num_stored && sn == buffer->head_sn) {
+ if (!amsdu || last_subframe)
+ buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
+ return IWL_MLD_PASS_SKB;
+ }
+
+ /* put in reorder buffer */
+ index = sn % baid_data->buf_size;
+ __skb_queue_tail(&entries[index].frames, skb);
+ buffer->num_stored++;
+
+ /* We cannot trust NSSN for AMSDU sub-frames that are not the last. The
+ * reason is that NSSN advances on the first sub-frame, and may cause
+ * the reorder buffer to advance before all the sub-frames arrive.
+ *
+ * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
+ * SN 1. NSSN for first sub frame will be 3 with the result of driver
+ * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
+ * already ahead and it will be dropped.
+ * If the last sub-frame is not on this queue - we will get frame
+ * release notification with up to date NSSN.
+ */
+ if (!amsdu || last_subframe)
+ iwl_mld_reorder_release_frames(mld, sta, napi, baid_data,
+ buffer, nssn);
+
+ return IWL_MLD_BUFFERED_SKB;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_reorder);
+
+static void iwl_mld_rx_agg_session_expired(struct timer_list *t)
+{
+ struct iwl_mld_baid_data *data =
+ from_timer(data, t, session_timer);
+ struct iwl_mld_baid_data __rcu **rcu_ptr = data->rcu_ptr;
+ struct iwl_mld_baid_data *ba_data;
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_sta *mld_sta;
+ unsigned long timeout;
+ unsigned int sta_id;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(*rcu_ptr);
+ if (WARN_ON(!ba_data))
+ goto unlock;
+
+ if (WARN_ON(!ba_data->timeout))
+ goto unlock;
+
+ timeout = ba_data->last_rx_timestamp +
+ TU_TO_JIFFIES(ba_data->timeout * 2);
+ if (time_is_after_jiffies(timeout)) {
+ mod_timer(&ba_data->session_timer, timeout);
+ goto unlock;
+ }
+
+ /* timer expired, pick any STA ID to find the pointer */
+ sta_id = ffs(ba_data->sta_mask) - 1;
+ link_sta = rcu_dereference(ba_data->mld->fw_id_to_link_sta[sta_id]);
+
+ /* sta should be valid unless the following happens:
+ * The firmware asserts which triggers a reconfig flow, but
+ * the reconfig fails before we set the pointer to sta into
+ * the fw_id_to_link_sta pointer table. mac80211 can't stop
+ * A-MPDU and hence the timer continues to run. Then, the
+ * timer expires and sta is NULL.
+ */
+ if (IS_ERR_OR_NULL(link_sta) || WARN_ON(!link_sta->sta))
+ goto unlock;
+
+ mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+ ieee80211_rx_ba_timer_expired(mld_sta->vif, link_sta->sta->addr,
+ ba_data->tid);
+unlock:
+ rcu_read_unlock();
+}
+
+static int
+iwl_mld_stop_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta, int tid)
+{
+ struct iwl_rx_baid_cfg_cmd cmd = {
+ .action = cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
+ .remove.sta_id_mask =
+ cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
+ .remove.tid = cpu_to_le32(tid),
+
+ };
+ int ret;
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(DATA_PATH_GROUP,
+ RX_BAID_ALLOCATION_CONFIG_CMD),
+ &cmd);
+ if (ret)
+ return ret;
+
+ IWL_DEBUG_HT(mld, "RX BA Session stopped in fw\n");
+
+ return ret;
+}
+
+static int
+iwl_mld_start_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ int tid, u16 ssn, u16 buf_size)
+{
+ struct iwl_rx_baid_cfg_cmd cmd = {
+ .action = cpu_to_le32(IWL_RX_BAID_ACTION_ADD),
+ .alloc.sta_id_mask =
+ cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
+ .alloc.tid = tid,
+ .alloc.ssn = cpu_to_le16(ssn),
+ .alloc.win_size = cpu_to_le16(buf_size),
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
+ .flags = CMD_WANT_SKB,
+ .len[0] = sizeof(cmd),
+ .data[0] = &cmd,
+ };
+ struct iwl_rx_baid_cfg_resp *resp;
+ struct iwl_rx_packet *pkt;
+ u32 resp_len;
+ int ret, baid;
+
+ BUILD_BUG_ON(sizeof(*resp) != sizeof(baid));
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (ret)
+ return ret;
+
+ pkt = hcmd.resp_pkt;
+
+ resp_len = iwl_rx_packet_payload_len(pkt);
+ if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp),
+ "BAID_ALLOC_CMD: unexpected response length %d\n",
+ resp_len)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ IWL_DEBUG_HT(mld, "RX BA Session started in fw\n");
+
+ resp = (void *)pkt->data;
+ baid = le32_to_cpu(resp->baid);
+
+ if (IWL_FW_CHECK(mld, baid < 0 || baid >= ARRAY_SIZE(mld->fw_id_to_ba),
+ "BAID_ALLOC_CMD: invalid BAID response %d\n", baid)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = baid;
+out:
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
+static void iwl_mld_init_reorder_buffer(struct iwl_mld *mld,
+ struct iwl_mld_baid_data *data,
+ u16 ssn)
+{
+ for (int i = 0; i < mld->trans->num_rx_queues; i++) {
+ struct iwl_mld_reorder_buffer *reorder_buf =
+ &data->reorder_buf[i];
+ struct iwl_mld_reorder_buf_entry *entries =
+ &data->entries[i * data->entries_per_queue];
+
+ reorder_buf->head_sn = ssn;
+ reorder_buf->queue = i;
+
+ for (int j = 0; j < data->buf_size; j++)
+ __skb_queue_head_init(&entries[j].frames);
+ }
+}
+
+static void iwl_mld_free_reorder_buffer(struct iwl_mld *mld,
+ struct iwl_mld_baid_data *data)
+{
+ struct iwl_mld_delba_data delba_data = {
+ .baid = data->baid,
+ };
+
+ iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_NOTIF_DEL_BA,
+ &delba_data, sizeof(delba_data));
+
+ for (int i = 0; i < mld->trans->num_rx_queues; i++) {
+ struct iwl_mld_reorder_buffer *reorder_buf =
+ &data->reorder_buf[i];
+ struct iwl_mld_reorder_buf_entry *entries =
+ &data->entries[i * data->entries_per_queue];
+
+ if (likely(!reorder_buf->num_stored))
+ continue;
+
+ /* This shouldn't happen in regular DELBA since the RX queues
+ * sync internal DELBA notification should trigger a release
+ * of all frames in the reorder buffer.
+ */
+ WARN_ON(1);
+
+ for (int j = 0; j < data->buf_size; j++)
+ __skb_queue_purge(&entries[j].frames);
+ }
+}
+
+int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ int tid, u16 ssn, u16 buf_size, u16 timeout)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_baid_data *baid_data = NULL;
+ u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
+ int ret, baid;
+ u32 sta_mask;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (mld->num_rx_ba_sessions >= IWL_MAX_BAID) {
+ IWL_DEBUG_HT(mld,
+ "Max num of RX BA sessions reached; blocking new session\n");
+ return -ENOSPC;
+ }
+
+ sta_mask = iwl_mld_fw_sta_id_mask(mld, sta);
+ if (WARN_ON(!sta_mask))
+ return -EINVAL;
+
+ /* sparse doesn't like the __align() so don't check */
+#ifndef __CHECKER__
+ /* The division below will be OK if either the cache line size
+ * can be divided by the entry size (ALIGN will round up) or if
+ * the entry size can be divided by the cache line size, in which
+ * case the ALIGN() will do nothing.
+ */
+ BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
+ sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
+#endif
+
+ /* Upward align the reorder buffer size to fill an entire cache
+ * line for each queue, to avoid sharing cache lines between
+ * different queues.
+ */
+ reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
+
+ /* Allocate here so if allocation fails we can bail out early
+ * before starting the BA session in the firmware
+ */
+ baid_data = kzalloc(sizeof(*baid_data) +
+ mld->trans->num_rx_queues * reorder_buf_size,
+ GFP_KERNEL);
+ if (!baid_data)
+ return -ENOMEM;
+
+ /* This division is why we need the above BUILD_BUG_ON(),
+ * if that doesn't hold then this will not be right.
+ */
+ baid_data->entries_per_queue =
+ reorder_buf_size / sizeof(baid_data->entries[0]);
+
+ baid = iwl_mld_start_ba_in_fw(mld, sta, tid, ssn, buf_size);
+ if (baid < 0) {
+ ret = baid;
+ goto out_free;
+ }
+
+ mld->num_rx_ba_sessions++;
+ mld_sta->tid_to_baid[tid] = baid;
+
+ baid_data->baid = baid;
+ baid_data->mld = mld;
+ baid_data->tid = tid;
+ baid_data->buf_size = buf_size;
+ baid_data->sta_mask = sta_mask;
+ baid_data->timeout = timeout;
+ baid_data->last_rx_timestamp = jiffies;
+ baid_data->rcu_ptr = &mld->fw_id_to_ba[baid];
+
+ iwl_mld_init_reorder_buffer(mld, baid_data, ssn);
+
+ timer_setup(&baid_data->session_timer, iwl_mld_rx_agg_session_expired,
+ 0);
+ if (timeout)
+ mod_timer(&baid_data->session_timer,
+ TU_TO_EXP_TIME(timeout * 2));
+
+ IWL_DEBUG_HT(mld, "STA mask=0x%x (tid=%d) is assigned to BAID %d\n",
+ baid_data->sta_mask, tid, baid);
+
+ /* protect the BA data with RCU to cover a case where our
+ * internal RX sync mechanism will timeout (not that it's
+ * supposed to happen) and we will free the session data while
+ * RX is being processed in parallel
+ */
+ WARN_ON(rcu_access_pointer(mld->fw_id_to_ba[baid]));
+ rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data);
+
+ return 0;
+
+out_free:
+ kfree(baid_data);
+ return ret;
+}
+
+int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ int tid)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ int baid = mld_sta->tid_to_baid[tid];
+ struct iwl_mld_baid_data *baid_data;
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* during firmware restart, do not send the command as the firmware no
+ * longer recognizes the session. instead, only clear the driver BA
+ * session data.
+ */
+ if (!mld->fw_status.in_hw_restart) {
+ ret = iwl_mld_stop_ba_in_fw(mld, sta, tid);
+ if (ret)
+ return ret;
+ }
+
+ if (!WARN_ON(mld->num_rx_ba_sessions == 0))
+ mld->num_rx_ba_sessions--;
+
+ baid_data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
+ if (WARN_ON(!baid_data))
+ return -EINVAL;
+
+ if (timer_pending(&baid_data->session_timer))
+ timer_shutdown_sync(&baid_data->session_timer);
+
+ iwl_mld_free_reorder_buffer(mld, baid_data);
+
+ RCU_INIT_POINTER(mld->fw_id_to_ba[baid], NULL);
+ kfree_rcu(baid_data, rcu_head);
+
+ IWL_DEBUG_HT(mld, "BAID %d is free\n", baid);
+
+ return 0;
+}
+
+int iwl_mld_update_sta_baids(struct iwl_mld *mld,
+ u32 old_sta_mask,
+ u32 new_sta_mask)
+{
+ struct iwl_rx_baid_cfg_cmd cmd = {
+ .action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY),
+ .modify.old_sta_id_mask = cpu_to_le32(old_sta_mask),
+ .modify.new_sta_id_mask = cpu_to_le32(new_sta_mask),
+ };
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+ int baid;
+
+ /* mac80211 will remove sessions later, but we ignore all that */
+ if (mld->fw_status.in_hw_restart)
+ return 0;
+
+ BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
+
+ for (baid = 0; baid < ARRAY_SIZE(mld->fw_id_to_ba); baid++) {
+ struct iwl_mld_baid_data *data;
+ int ret;
+
+ data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
+ if (!data)
+ continue;
+
+ if (!(data->sta_mask & old_sta_mask))
+ continue;
+
+ WARN_ONCE(data->sta_mask != old_sta_mask,
+ "BAID data for %d corrupted - expected 0x%x found 0x%x\n",
+ baid, old_sta_mask, data->sta_mask);
+
+ cmd.modify.tid = cpu_to_le32(data->tid);
+
+ ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
+ if (ret)
+ return ret;
+ data->sta_mask = new_sta_mask;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/agg.h b/drivers/net/wireless/intel/iwlwifi/mld/agg.h
new file mode 100644
index 000000000000..651c80d1c7cd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/agg.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_agg_h__
+#define __iwl_agg_h__
+
+#include "mld.h"
+#include "fw/api/rx.h"
+
+/**
+ * struct iwl_mld_reorder_buffer - per ra/tid/queue reorder buffer
+ * @head_sn: reorder window head sequence number
+ * @num_stored: number of MPDUs stored in the buffer
+ * @queue: queue of this reorder buffer
+ * @valid: true if reordering is valid for this queue
+ */
+struct iwl_mld_reorder_buffer {
+ u16 head_sn;
+ u16 num_stored;
+ int queue;
+ bool valid;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mld_reorder_buf_entry - reorder buffer entry per-queue/per-seqno
+ * @frames: list of skbs stored. a list is necessary because in an A-MSDU,
+ * all sub-frames share the same sequence number, so they are stored
+ * together in the same list.
+ */
+struct iwl_mld_reorder_buf_entry {
+ struct sk_buff_head frames;
+}
+#ifndef __CHECKER__
+/* sparse doesn't like this construct: "bad integer constant expression" */
+__aligned(roundup_pow_of_two(sizeof(struct sk_buff_head)))
+#endif
+;
+
+/**
+ * struct iwl_mld_baid_data - Block Ack session data
+ * @rcu_head: RCU head for freeing this data
+ * @sta_mask: station mask for the BAID
+ * @tid: tid of the session
+ * @baid: baid of the session
+ * @buf_size: the reorder buffer size as set by the last ADDBA request
+ * @entries_per_queue: number of buffers per queue, this actually gets
+ * aligned up to avoid cache line sharing between queues
+ * @timeout: the timeout value specified in the ADDBA request.
+ * @last_rx_timestamp: timestamp of the last received packet (in jiffies). This
+ * value is updated only when the configured @timeout has passed since
+ * the last update to minimize cache bouncing between RX queues.
+ * @session_timer: timer is set to expire after 2 * @timeout (since we want
+ * to minimize the cache bouncing by updating @last_rx_timestamp only once
+ * after @timeout has passed). If no packets are received within this
+ * period, it informs mac80211 to initiate delBA flow, terminating the
+ * BA session.
+ * @rcu_ptr: BA data RCU protected access
+ * @mld: mld pointer, needed for timer context
+ * @reorder_buf: reorder buffer, allocated per queue
+ * @entries: data
+ */
+struct iwl_mld_baid_data {
+ struct rcu_head rcu_head;
+ u32 sta_mask;
+ u8 tid;
+ u8 baid;
+ u16 buf_size;
+ u16 entries_per_queue;
+ u16 timeout;
+ struct timer_list session_timer;
+ unsigned long last_rx_timestamp;
+ struct iwl_mld_baid_data __rcu **rcu_ptr;
+ struct iwl_mld *mld;
+ struct iwl_mld_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
+ struct iwl_mld_reorder_buf_entry entries[] ____cacheline_aligned_in_smp;
+};
+
+/**
+ * struct iwl_mld_delba_data - RX queue sync data for %IWL_MLD_RXQ_NOTIF_DEL_BA
+ *
+ * @baid: Block Ack id, used to identify the BA session to be removed
+ */
+struct iwl_mld_delba_data {
+ u32 baid;
+} __packed;
+
+/**
+ * enum iwl_mld_reorder_result - Possible return values for iwl_mld_reorder()
+ * indicating how the caller should handle the skb based on the result.
+ *
+ * @IWL_MLD_PASS_SKB: skb should be passed to upper layer.
+ * @IWL_MLD_BUFFERED_SKB: skb has been buffered, don't pass it to upper layer.
+ * @IWL_MLD_DROP_SKB: skb should be dropped and freed by the caller.
+ */
+enum iwl_mld_reorder_result {
+ IWL_MLD_PASS_SKB,
+ IWL_MLD_BUFFERED_SKB,
+ IWL_MLD_DROP_SKB
+};
+
+int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ int tid, u16 ssn, u16 buf_size, u16 timeout);
+int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ int tid);
+
+enum iwl_mld_reorder_result
+iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi,
+ int queue, struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc);
+
+void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct iwl_rx_packet *pkt, int queue);
+void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct iwl_rx_packet *pkt,
+ int queue);
+
+void iwl_mld_del_ba(struct iwl_mld *mld, int queue,
+ struct iwl_mld_delba_data *data);
+
+int iwl_mld_update_sta_baids(struct iwl_mld *mld,
+ u32 old_sta_mask,
+ u32 new_sta_mask);
+
+#endif /* __iwl_agg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ap.c b/drivers/net/wireless/intel/iwlwifi/mld/ap.c
new file mode 100644
index 000000000000..571eabd0b511
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ap.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/crc32.h>
+
+#include <net/mac80211.h>
+
+#include "ap.h"
+#include "hcmd.h"
+#include "tx.h"
+#include "power.h"
+#include "key.h"
+#include "iwl-utils.h"
+
+#include "fw/api/sta.h"
+
+void iwl_mld_set_tim_idx(struct iwl_mld *mld, __le32 *tim_index,
+ u8 *beacon, u32 frame_size)
+{
+ u32 tim_idx;
+ struct ieee80211_mgmt *mgmt = (void *)beacon;
+
+ /* The index is relative to frame start but we start looking at the
+ * variable-length part of the beacon.
+ */
+ tim_idx = mgmt->u.beacon.variable - beacon;
+
+ /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+ while ((tim_idx < (frame_size - 2)) &&
+ (beacon[tim_idx] != WLAN_EID_TIM))
+ tim_idx += beacon[tim_idx + 1] + 2;
+
+ /* If TIM field was found, set variables */
+ if ((tim_idx < (frame_size - 1)) && beacon[tim_idx] == WLAN_EID_TIM)
+ *tim_index = cpu_to_le32(tim_idx);
+ else
+ IWL_WARN(mld, "Unable to find TIM Element in beacon\n");
+}
+
+u8 iwl_mld_get_rate_flags(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ enum nl80211_band band)
+{
+ u32 legacy = link->beacon_tx_rate.control[band].legacy;
+ u32 rate_idx, rate_flags = 0, fw_rate;
+
+ /* if beacon rate was configured try using it */
+ if (hweight32(legacy) == 1) {
+ u32 rate = ffs(legacy) - 1;
+ struct ieee80211_supported_band *sband =
+ mld->hw->wiphy->bands[band];
+
+ rate_idx = sband->bitrates[rate].hw_value;
+ } else {
+ rate_idx = iwl_mld_get_lowest_rate(mld, info, vif);
+ }
+
+ if (rate_idx <= IWL_LAST_CCK_RATE)
+ rate_flags = IWL_MAC_BEACON_CCK;
+
+ /* Legacy rates are indexed as follows:
+ * 0 - 3 for CCK and 0 - 7 for OFDM.
+ */
+ fw_rate = (rate_idx >= IWL_FIRST_OFDM_RATE ?
+ rate_idx - IWL_FIRST_OFDM_RATE : rate_idx);
+
+ return fw_rate | rate_flags;
+}
+
+int iwl_mld_send_beacon_template_cmd(struct iwl_mld *mld,
+ struct sk_buff *beacon,
+ struct iwl_mac_beacon_cmd *cmd)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = BEACON_TEMPLATE_CMD,
+ };
+
+ hcmd.len[0] = sizeof(*cmd);
+ hcmd.data[0] = cmd;
+
+ hcmd.len[1] = beacon->len;
+ hcmd.data[1] = beacon->data;
+ hcmd.dataflags[1] = IWL_HCMD_DFL_DUP;
+
+ return iwl_mld_send_cmd(mld, &hcmd);
+}
+
+static int iwl_mld_fill_beacon_template_cmd(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon,
+ struct iwl_mac_beacon_cmd *cmd,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon);
+ struct ieee80211_chanctx_conf *ctx;
+ bool enable_fils;
+ u16 flags = 0;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ cmd->link_id = cpu_to_le32(mld_link->fw_id);
+
+ ctx = wiphy_dereference(mld->wiphy, link->chanctx_conf);
+ if (WARN_ON(!ctx || !ctx->def.chan))
+ return -EINVAL;
+
+ enable_fils = cfg80211_channel_is_psc(ctx->def.chan) ||
+ (ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ ctx->def.width >= NL80211_CHAN_WIDTH_80);
+
+ if (enable_fils) {
+ flags |= IWL_MAC_BEACON_FILS;
+ cmd->short_ssid = cpu_to_le32(~crc32_le(~0, vif->cfg.ssid,
+ vif->cfg.ssid_len));
+ }
+
+ cmd->byte_cnt = cpu_to_le16((u16)beacon->len);
+
+ flags |= iwl_mld_get_rate_flags(mld, info, vif, link,
+ ctx->def.chan->band);
+
+ cmd->flags = cpu_to_le16(flags);
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ iwl_mld_set_tim_idx(mld, &cmd->tim_idx,
+ beacon->data, beacon->len);
+
+ cmd->btwt_offset =
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len));
+ }
+
+ cmd->csa_offset =
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_CHANNEL_SWITCH,
+ beacon->len));
+ cmd->ecsa_offset =
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_EXT_CHANSWITCH_ANN,
+ beacon->len));
+
+ return 0;
+}
+
+/* The beacon template for the AP/GO/IBSS has changed and needs update */
+int iwl_mld_update_beacon_template(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mac_beacon_cmd cmd = {};
+ struct sk_buff *beacon;
+ int ret;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+#endif
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC);
+
+ if (IWL_MLD_NON_TRANSMITTING_AP)
+ return 0;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mld_vif->beacon_inject_active) {
+ IWL_DEBUG_INFO(mld,
+ "Can't update template, beacon injection's active\n");
+ return -EBUSY;
+ }
+
+#endif
+ beacon = ieee80211_beacon_get_template(mld->hw, vif, NULL,
+ link_conf->link_id);
+ if (!beacon)
+ return -ENOMEM;
+
+ ret = iwl_mld_fill_beacon_template_cmd(mld, vif, beacon, &cmd,
+ link_conf);
+
+ if (!ret)
+ ret = iwl_mld_send_beacon_template_cmd(mld, beacon, &cmd);
+
+ dev_kfree_skb(beacon);
+
+ return ret;
+}
+
+void iwl_mld_free_ap_early_key(struct iwl_mld *mld,
+ struct ieee80211_key_conf *key,
+ struct iwl_mld_vif *mld_vif)
+{
+ struct iwl_mld_link *link;
+
+ if (WARN_ON(key->link_id < 0))
+ return;
+
+ link = iwl_mld_link_dereference_check(mld_vif, key->link_id);
+ if (WARN_ON(!link))
+ return;
+
+ for (int i = 0; i < ARRAY_SIZE(link->ap_early_keys); i++) {
+ if (link->ap_early_keys[i] != key)
+ continue;
+ /* Those weren't sent to FW, so should be marked as INVALID */
+ if (WARN_ON(key->hw_key_idx != STA_KEY_IDX_INVALID))
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+ link->ap_early_keys[i] = NULL;
+ }
+}
+
+int iwl_mld_store_ap_early_key(struct iwl_mld *mld,
+ struct ieee80211_key_conf *key,
+ struct iwl_mld_vif *mld_vif)
+{
+ struct iwl_mld_link *link;
+
+ if (WARN_ON(key->link_id < 0))
+ return -EINVAL;
+
+ link = iwl_mld_link_dereference_check(mld_vif, key->link_id);
+ if (WARN_ON(!link))
+ return -EINVAL;
+
+ for (int i = 0; i < ARRAY_SIZE(link->ap_early_keys); i++) {
+ if (!link->ap_early_keys[i]) {
+ link->ap_early_keys[i] = key;
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static int iwl_mld_send_ap_early_keys(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ int ret = 0;
+
+ if (WARN_ON(!link))
+ return -EINVAL;
+
+ for (int i = 0; i < ARRAY_SIZE(mld_link->ap_early_keys); i++) {
+ struct ieee80211_key_conf *key = mld_link->ap_early_keys[i];
+
+ if (!key)
+ continue;
+
+ mld_link->ap_early_keys[i] = NULL;
+
+ ret = iwl_mld_add_key(mld, vif, NULL, key);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link);
+
+ ret = iwl_mld_update_beacon_template(mld, vif, link);
+ if (ret)
+ return ret;
+
+ /* the link should be already activated when assigning chan context,
+ * and LINK_CONTEXT_MODIFY_EHT_PARAMS is deprecated
+ */
+ ret = iwl_mld_change_link_in_fw(mld, link,
+ LINK_CONTEXT_MODIFY_ALL &
+ ~(LINK_CONTEXT_MODIFY_ACTIVE |
+ LINK_CONTEXT_MODIFY_EHT_PARAMS));
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_add_mcast_sta(mld, vif, link);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_add_bcast_sta(mld, vif, link);
+ if (ret)
+ goto rm_mcast;
+
+ /* Those keys were configured by the upper layers before starting the
+ * AP. Now that it is started and the bcast and mcast sta were added to
+ * the FW, we can add the keys too.
+ */
+ ret = iwl_mld_send_ap_early_keys(mld, vif, link);
+ if (ret)
+ goto rm_bcast;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
+ iwl_mld_vif_update_low_latency(mld, vif, true,
+ LOW_LATENCY_VIF_TYPE);
+
+ mld_vif->ap_ibss_active = true;
+
+ if (vif->p2p && mld->p2p_device_vif)
+ return iwl_mld_mac_fw_action(mld, mld->p2p_device_vif,
+ FW_CTXT_ACTION_MODIFY);
+
+ return 0;
+rm_bcast:
+ iwl_mld_remove_bcast_sta(mld, vif, link);
+rm_mcast:
+ iwl_mld_remove_mcast_sta(mld, vif, link);
+ return ret;
+}
+
+void iwl_mld_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ mld_vif->ap_ibss_active = false;
+
+ if (vif->p2p && mld->p2p_device_vif)
+ iwl_mld_mac_fw_action(mld, mld->p2p_device_vif,
+ FW_CTXT_ACTION_MODIFY);
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
+ iwl_mld_vif_update_low_latency(mld, vif, false,
+ LOW_LATENCY_VIF_TYPE);
+
+ iwl_mld_remove_bcast_sta(mld, vif, link);
+
+ iwl_mld_remove_mcast_sta(mld, vif, link);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ap.h b/drivers/net/wireless/intel/iwlwifi/mld/ap.h
new file mode 100644
index 000000000000..4a6f52b9552d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ap.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_ap_h__
+#define __iwl_ap_h__
+
+#include "mld.h"
+#include "iface.h"
+
+#include "fw/api/tx.h"
+
+int iwl_mld_update_beacon_template(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+
+int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+void iwl_mld_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+int iwl_mld_store_ap_early_key(struct iwl_mld *mld,
+ struct ieee80211_key_conf *key,
+ struct iwl_mld_vif *mld_vif);
+
+void iwl_mld_free_ap_early_key(struct iwl_mld *mld,
+ struct ieee80211_key_conf *key,
+ struct iwl_mld_vif *mld_vif);
+
+u8 iwl_mld_get_rate_flags(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ enum nl80211_band band);
+
+void iwl_mld_set_tim_idx(struct iwl_mld *mld, __le32 *tim_index,
+ u8 *beacon, u32 frame_size);
+
+int iwl_mld_send_beacon_template_cmd(struct iwl_mld *mld,
+ struct sk_buff *beacon,
+ struct iwl_mac_beacon_cmd *cmd);
+
+#endif /* __iwl_ap_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/coex.c b/drivers/net/wireless/intel/iwlwifi/mld/coex.c
new file mode 100644
index 000000000000..5f262bd43f21
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/coex.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "fw/api/coex.h"
+
+#include "coex.h"
+#include "mld.h"
+#include "hcmd.h"
+#include "mlo.h"
+
+int iwl_mld_send_bt_init_conf(struct iwl_mld *mld)
+{
+ struct iwl_bt_coex_cmd cmd = {
+ .mode = cpu_to_le32(BT_COEX_NW),
+ .enabled_modules = cpu_to_le32(BT_COEX_MPLUT_ENABLED |
+ BT_COEX_HIGH_BAND_RET),
+ };
+
+ return iwl_mld_send_cmd_pdu(mld, BT_CONFIG, &cmd);
+}
+
+void iwl_mld_handle_bt_coex_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
+ const struct iwl_bt_coex_profile_notif zero_notif = {};
+ /* zeroed structure means that BT is OFF */
+ bool bt_is_active = memcmp(notif, &zero_notif, sizeof(*notif));
+
+ if (bt_is_active == mld->bt_is_active)
+ return;
+
+ IWL_DEBUG_INFO(mld, "BT was turned %s\n", bt_is_active ? "ON" : "OFF");
+
+ mld->bt_is_active = bt_is_active;
+
+ iwl_mld_emlsr_check_bt(mld);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/coex.h b/drivers/net/wireless/intel/iwlwifi/mld/coex.h
new file mode 100644
index 000000000000..a77c5dc9613c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/coex.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_coex_h__
+#define __iwl_mld_coex_h__
+
+#include "mld.h"
+
+int iwl_mld_send_bt_init_conf(struct iwl_mld *mld);
+
+void iwl_mld_handle_bt_coex_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+#endif /* __iwl_mld_coex_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/constants.h b/drivers/net/wireless/intel/iwlwifi/mld/constants.h
new file mode 100644
index 000000000000..2a59b29b75cb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/constants.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_constants_h__
+#define __iwl_mld_constants_h__
+
+#define IWL_MLD_MISSED_BEACONS_SINCE_RX_THOLD 4
+#define IWL_MLD_MISSED_BEACONS_THRESHOLD 8
+#define IWL_MLD_MISSED_BEACONS_THRESHOLD_LONG 19
+#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5
+#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH 15
+#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED 11
+#define IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH -72
+
+#define IWL_MLD_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
+#define IWL_MLD_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
+#define IWL_MLD_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
+#define IWL_MLD_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
+#define IWL_MLD_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
+#define IWL_MLD_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
+
+#define IWL_MLD_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
+#define IWL_MLD_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
+
+#define IWL_MLD_PS_SNOOZE_INTERVAL 25
+#define IWL_MLD_PS_SNOOZE_INTERVAL 25
+#define IWL_MLD_PS_SNOOZE_WINDOW 50
+
+#define IWL_MLD_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30
+#define IWL_MLD_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20
+
+#define IWL_MLD_PS_HEAVY_TX_THLD_PERCENT 50
+#define IWL_MLD_PS_HEAVY_RX_THLD_PERCENT 50
+#define IWL_MLD_PS_HEAVY_TX_THLD_PACKETS 20
+#define IWL_MLD_PS_HEAVY_RX_THLD_PACKETS 8
+
+#define IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC 30
+#define IWL_MLD_SCAN_EXPIRE_TIME_SEC 20
+
+#define IWL_MLD_TPT_COUNT_WINDOW (5 * HZ)
+
+/* OMI reduced BW thresholds (channel load percentage) */
+#define IWL_MLD_OMI_ENTER_CHAN_LOAD 10
+#define IWL_MLD_OMI_EXIT_CHAN_LOAD_160 20
+#define IWL_MLD_OMI_EXIT_CHAN_LOAD_320 30
+/* time (in milliseconds) to let AP "settle" the OMI */
+#define IWL_MLD_OMI_AP_SETTLE_DELAY 27
+/* time (in milliseconds) to not enter OMI reduced BW after leaving */
+#define IWL_MLD_OMI_EXIT_PROTECTION 5000
+
+#define IWL_MLD_DIS_RANDOM_FW_ID false
+#define IWL_MLD_D3_DEBUG false
+#define IWL_MLD_NON_TRANSMITTING_AP false
+#define IWL_MLD_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */
+#define IWL_MLD_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */
+#define IWL_MLD_CONN_LISTEN_INTERVAL 10
+#define IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE 0
+#define IWL_MLD_AUTO_EML_ENABLE true
+
+#define IWL_MLD_HIGH_RSSI_THRESH_20MHZ -67
+#define IWL_MLD_LOW_RSSI_THRESH_20MHZ -72
+#define IWL_MLD_HIGH_RSSI_THRESH_40MHZ -64
+#define IWL_MLD_LOW_RSSI_THRESH_40MHZ -72
+#define IWL_MLD_HIGH_RSSI_THRESH_80MHZ -61
+#define IWL_MLD_LOW_RSSI_THRESH_80MHZ -72
+#define IWL_MLD_HIGH_RSSI_THRESH_160MHZ -58
+#define IWL_MLD_LOW_RSSI_THRESH_160MHZ -72
+
+#define IWL_MLD_ENTER_EMLSR_TPT_THRESH 400
+#define IWL_MLD_EXIT_EMLSR_CHAN_LOAD 2 /* in percentage */
+
+#define IWL_MLD_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE
+#define IWL_MLD_FTM_INITIATOR_DYNACK true
+#define IWL_MLD_FTM_LMR_FEEDBACK_TERMINATE false
+#define IWL_MLD_FTM_TEST_INCORRECT_SAC false
+#define IWL_MLD_FTM_R2I_MAX_REP 7
+#define IWL_MLD_FTM_I2R_MAX_REP 7
+#define IWL_MLD_FTM_R2I_MAX_STS 1
+#define IWL_MLD_FTM_I2R_MAX_STS 1
+#define IWL_MLD_FTM_R2I_MAX_TOTAL_LTF 3
+#define IWL_MLD_FTM_I2R_MAX_TOTAL_LTF 3
+#define IWL_MLD_FTM_RESP_NDP_SUPPORT true
+#define IWL_MLD_FTM_RESP_LMR_FEEDBACK_SUPPORT true
+#define IWL_MLD_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7
+#define IWL_MLD_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000
+
+#endif /* __iwl_mld_constants_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
new file mode 100644
index 000000000000..5a7207accd86
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -0,0 +1,1998 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "mld.h"
+
+#include "d3.h"
+#include "power.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "mcc.h"
+#include "sta.h"
+#include "mlo.h"
+
+#include "fw/api/d3.h"
+#include "fw/api/offload.h"
+#include "fw/api/sta.h"
+#include "fw/dbg.h"
+
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include <linux/bitops.h>
+
+/**
+ * enum iwl_mld_d3_notif - d3 notifications
+ * @IWL_D3_NOTIF_WOWLAN_INFO: WOWLAN_INFO_NOTIF is expected/was received
+ * @IWL_D3_NOTIF_WOWLAN_WAKE_PKT: WOWLAN_WAKE_PKT_NOTIF is expected/was received
+ * @IWL_D3_NOTIF_PROT_OFFLOAD: PROT_OFFLOAD_NOTIF is expected/was received
+ * @IWL_D3_ND_MATCH_INFO: OFFLOAD_MATCH_INFO_NOTIF is expected/was received
+ * @IWL_D3_NOTIF_D3_END_NOTIF: D3_END_NOTIF is expected/was received
+ */
+enum iwl_mld_d3_notif {
+ IWL_D3_NOTIF_WOWLAN_INFO = BIT(0),
+ IWL_D3_NOTIF_WOWLAN_WAKE_PKT = BIT(1),
+ IWL_D3_NOTIF_PROT_OFFLOAD = BIT(2),
+ IWL_D3_ND_MATCH_INFO = BIT(3),
+ IWL_D3_NOTIF_D3_END_NOTIF = BIT(4)
+};
+
+struct iwl_mld_resume_key_iter_data {
+ struct iwl_mld *mld;
+ struct iwl_mld_wowlan_status *wowlan_status;
+ u32 num_keys, gtk_cipher, igtk_cipher, bigtk_cipher;
+ bool unhandled_cipher;
+};
+
+struct iwl_mld_suspend_key_iter_data {
+ struct iwl_wowlan_rsc_tsc_params_cmd *rsc;
+ bool have_rsc;
+ int gtks;
+ int found_gtk_idx[4];
+ __le32 gtk_cipher;
+ __le32 igtk_cipher;
+ __le32 bigtk_cipher;
+};
+
+struct iwl_mld_mcast_key_data {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 len;
+ u8 flags;
+ u8 id;
+ union {
+ struct {
+ struct ieee80211_key_seq aes_seq[IWL_MAX_TID_COUNT];
+ struct ieee80211_key_seq tkip_seq[IWL_MAX_TID_COUNT];
+ } gtk;
+ struct {
+ struct ieee80211_key_seq cmac_gmac_seq;
+ } igtk_bigtk;
+ };
+
+};
+
+/**
+ * struct iwl_mld_wowlan_status - contains wowlan status data from
+ * all wowlan notifications
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched patterns on packets
+ * @last_qos_seq: QoS sequence counter of offloaded tid
+ * @num_of_gtk_rekeys: number of GTK rekeys during D3
+ * @tid_offloaded_tx: tid used by the firmware to transmit data packets
+ * while in wowlan
+ * @wake_packet: wakeup packet received
+ * @wake_packet_length: wake packet length
+ * @wake_packet_bufsize: wake packet bufsize
+ * @gtk: data of the last two used gtk's by the FW upon resume
+ * @igtk: data of the last used igtk by the FW upon resume
+ * @bigtk: data of the last two used gtk's by the FW upon resume
+ * @ptk: last seq numbers per tid passed by the FW,
+ * holds both in tkip and aes formats
+ */
+struct iwl_mld_wowlan_status {
+ u32 wakeup_reasons;
+ u64 replay_ctr;
+ u16 pattern_number;
+ u16 last_qos_seq;
+ u32 num_of_gtk_rekeys;
+ u8 tid_offloaded_tx;
+ u8 *wake_packet;
+ u32 wake_packet_length;
+ u32 wake_packet_bufsize;
+ struct iwl_mld_mcast_key_data gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_mld_mcast_key_data igtk;
+ struct iwl_mld_mcast_key_data bigtk[WOWLAN_BIGTK_KEYS_NUM];
+ struct {
+ struct ieee80211_key_seq aes_seq[IWL_MAX_TID_COUNT];
+ struct ieee80211_key_seq tkip_seq[IWL_MAX_TID_COUNT];
+
+ } ptk;
+};
+
+#define NETDETECT_QUERY_BUF_LEN \
+ (sizeof(struct iwl_scan_offload_profile_match) * \
+ IWL_SCAN_MAX_PROFILES_V2)
+
+/**
+ * struct iwl_mld_netdetect_res - contains netdetect results from
+ * match_info_notif
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ * matches passed in the scan offload request
+ * @matches: array of match information, one for each match
+ */
+struct iwl_mld_netdetect_res {
+ u32 matched_profiles;
+ u8 matches[NETDETECT_QUERY_BUF_LEN];
+};
+
+/**
+ * struct iwl_mld_resume_data - d3 resume flow data
+ * @notifs_expected: bitmap of expected notifications from fw,
+ * see &enum iwl_mld_d3_notif
+ * @notifs_received: bitmap of received notifications from fw,
+ * see &enum iwl_mld_d3_notif
+ * @d3_end_flags: bitmap of flags from d3_end_notif
+ * @notif_handling_err: error handling one of the resume notifications
+ * @wowlan_status: wowlan status data from all wowlan notifications
+ * @netdetect_res: contains netdetect results from match_info_notif
+ */
+struct iwl_mld_resume_data {
+ u32 notifs_expected;
+ u32 notifs_received;
+ u32 d3_end_flags;
+ bool notif_handling_err;
+ struct iwl_mld_wowlan_status *wowlan_status;
+ struct iwl_mld_netdetect_res *netdetect_res;
+};
+
+#define IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT \
+ (IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET | \
+ IWL_WOWLAN_WAKEUP_BY_PATTERN | \
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN |\
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD |\
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN |\
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD)
+
+#define IWL_WOWLAN_OFFLOAD_TID 0
+
+void iwl_mld_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_wowlan_data *wowlan_data = &mld_vif->wowlan_data;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ wowlan_data->rekey_data.kek_len = data->kek_len;
+ wowlan_data->rekey_data.kck_len = data->kck_len;
+ memcpy(wowlan_data->rekey_data.kek, data->kek, data->kek_len);
+ memcpy(wowlan_data->rekey_data.kck, data->kck, data->kck_len);
+ wowlan_data->rekey_data.akm = data->akm & 0xFF;
+ wowlan_data->rekey_data.replay_ctr =
+ cpu_to_le64(be64_to_cpup((const __be64 *)data->replay_ctr));
+ wowlan_data->rekey_data.valid = true;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+void iwl_mld_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_wowlan_data *wowlan_data = &mld_vif->wowlan_data;
+ struct inet6_ifaddr *ifa;
+ int idx = 0;
+
+ memset(wowlan_data->tentative_addrs, 0,
+ sizeof(wowlan_data->tentative_addrs));
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ wowlan_data->target_ipv6_addrs[idx] = ifa->addr;
+ if (ifa->flags & IFA_F_TENTATIVE)
+ __set_bit(idx, wowlan_data->tentative_addrs);
+ idx++;
+ if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+
+ wowlan_data->num_target_ipv6_addrs = idx;
+}
+#endif
+
+enum rt_status {
+ FW_ALIVE,
+ FW_NEEDS_RESET,
+ FW_ERROR,
+};
+
+static enum rt_status iwl_mld_check_err_tables(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ u32 err_id;
+
+ /* check for lmac1 error */
+ if (iwl_fwrt_read_err_table(mld->trans,
+ mld->trans->dbg.lmac_error_event_table[0],
+ &err_id)) {
+ if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+
+ return FW_NEEDS_RESET;
+ }
+ return FW_ERROR;
+ }
+
+ /* check if we have lmac2 set and check for error */
+ if (iwl_fwrt_read_err_table(mld->trans,
+ mld->trans->dbg.lmac_error_event_table[1],
+ NULL))
+ return FW_ERROR;
+
+ /* check for umac error */
+ if (iwl_fwrt_read_err_table(mld->trans,
+ mld->trans->dbg.umac_error_event_table,
+ NULL))
+ return FW_ERROR;
+
+ return FW_ALIVE;
+}
+
+static bool iwl_mld_fw_needs_restart(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ enum rt_status rt_status = iwl_mld_check_err_tables(mld, vif);
+
+ if (rt_status == FW_ALIVE)
+ return false;
+
+ if (rt_status == FW_ERROR) {
+ IWL_ERR(mld, "FW Error occurred during suspend\n");
+ iwl_fwrt_dump_error_logs(&mld->fwrt);
+ iwl_dbg_tlv_time_point(&mld->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
+ }
+
+ return true;
+}
+
+static int
+iwl_mld_netdetect_config(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_wowlan *wowlan)
+{
+ int ret;
+ struct cfg80211_sched_scan_request *netdetect_cfg =
+ wowlan->nd_config;
+ struct ieee80211_scan_ies ies = {};
+
+ ret = iwl_mld_scan_stop(mld, IWL_MLD_SCAN_SCHED, true);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_sched_scan_start(mld, vif, netdetect_cfg, &ies,
+ IWL_MLD_SCAN_NETDETECT);
+ return ret;
+}
+
+static void
+iwl_mld_le64_to_tkip_seq(__le64 le_pn, struct ieee80211_key_seq *seq)
+{
+ u64 pn = le64_to_cpu(le_pn);
+
+ seq->tkip.iv16 = (u16)pn;
+ seq->tkip.iv32 = (u32)(pn >> 16);
+}
+
+static void
+iwl_mld_le64_to_aes_seq(__le64 le_pn, struct ieee80211_key_seq *seq)
+{
+ u64 pn = le64_to_cpu(le_pn);
+
+ seq->ccmp.pn[0] = pn >> 40;
+ seq->ccmp.pn[1] = pn >> 32;
+ seq->ccmp.pn[2] = pn >> 24;
+ seq->ccmp.pn[3] = pn >> 16;
+ seq->ccmp.pn[4] = pn >> 8;
+ seq->ccmp.pn[5] = pn;
+}
+
+static void
+iwl_mld_convert_gtk_resume_seq(struct iwl_mld_mcast_key_data *gtk_data,
+ const struct iwl_wowlan_all_rsc_tsc_v5 *sc,
+ int rsc_idx)
+{
+ struct ieee80211_key_seq *aes_seq = gtk_data->gtk.aes_seq;
+ struct ieee80211_key_seq *tkip_seq = gtk_data->gtk.tkip_seq;
+
+ if (rsc_idx >= ARRAY_SIZE(sc->mcast_rsc))
+ return;
+
+ /* We store both the TKIP and AES representations coming from the
+ * FW because we decode the data from there before we iterate
+ * the keys and know which type is used.
+ */
+ for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ iwl_mld_le64_to_tkip_seq(sc->mcast_rsc[rsc_idx][tid],
+ &tkip_seq[tid]);
+ iwl_mld_le64_to_aes_seq(sc->mcast_rsc[rsc_idx][tid],
+ &aes_seq[tid]);
+ }
+}
+
+static void
+iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ const struct iwl_wowlan_gtk_status_v3 *gtk_data,
+ const struct iwl_wowlan_all_rsc_tsc_v5 *sc)
+{
+ int status_idx = 0;
+
+ BUILD_BUG_ON(sizeof(wowlan_status->gtk[0].key) <
+ sizeof(gtk_data[0].key));
+ BUILD_BUG_ON(ARRAY_SIZE(wowlan_status->gtk) < WOWLAN_GTK_KEYS_NUM);
+
+ for (int notif_idx = 0; notif_idx < ARRAY_SIZE(wowlan_status->gtk);
+ notif_idx++) {
+ int rsc_idx;
+
+ if (!(gtk_data[notif_idx].key_len))
+ continue;
+
+ wowlan_status->gtk[status_idx].len =
+ gtk_data[notif_idx].key_len;
+ wowlan_status->gtk[status_idx].flags =
+ gtk_data[notif_idx].key_flags;
+ wowlan_status->gtk[status_idx].id =
+ wowlan_status->gtk[status_idx].flags &
+ IWL_WOWLAN_GTK_IDX_MASK;
+ memcpy(wowlan_status->gtk[status_idx].key,
+ gtk_data[notif_idx].key,
+ sizeof(gtk_data[notif_idx].key));
+
+ /* The rsc for both gtk keys are stored in gtk[0]->sc->mcast_rsc
+ * The gtk ids can be any two numbers between 0 and 3,
+ * the id_map maps between the key id and the index in sc->mcast
+ */
+ rsc_idx =
+ sc->mcast_key_id_map[wowlan_status->gtk[status_idx].id];
+ iwl_mld_convert_gtk_resume_seq(&wowlan_status->gtk[status_idx],
+ sc, rsc_idx);
+
+ /* if it's as long as the TKIP encryption key, copy MIC key */
+ if (wowlan_status->gtk[status_idx].len ==
+ NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
+ memcpy(wowlan_status->gtk[status_idx].key +
+ NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ gtk_data[notif_idx].tkip_mic_key,
+ sizeof(gtk_data[notif_idx].tkip_mic_key));
+ status_idx++;
+ }
+}
+
+static void
+iwl_mld_convert_ptk_resume_seq(struct iwl_mld *mld,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ const struct iwl_wowlan_all_rsc_tsc_v5 *sc)
+{
+ struct ieee80211_key_seq *aes_seq = wowlan_status->ptk.aes_seq;
+ struct ieee80211_key_seq *tkip_seq = wowlan_status->ptk.tkip_seq;
+
+ BUILD_BUG_ON(ARRAY_SIZE(sc->ucast_rsc) != IWL_MAX_TID_COUNT);
+
+ for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ iwl_mld_le64_to_aes_seq(sc->ucast_rsc[tid], &aes_seq[tid]);
+ iwl_mld_le64_to_tkip_seq(sc->ucast_rsc[tid], &tkip_seq[tid]);
+ }
+}
+
+static void
+iwl_mld_convert_mcast_ipn(struct iwl_mld_mcast_key_data *key_status,
+ const struct iwl_wowlan_igtk_status *key)
+{
+ struct ieee80211_key_seq *seq =
+ &key_status->igtk_bigtk.cmac_gmac_seq;
+ u8 ipn_len = ARRAY_SIZE(key->ipn);
+
+ BUILD_BUG_ON(ipn_len != ARRAY_SIZE(seq->aes_gmac.pn));
+ BUILD_BUG_ON(ipn_len != ARRAY_SIZE(seq->aes_cmac.pn));
+ BUILD_BUG_ON(offsetof(struct ieee80211_key_seq, aes_gmac) !=
+ offsetof(struct ieee80211_key_seq, aes_cmac));
+
+ /* mac80211 expects big endian for memcmp() to work, convert.
+ * We don't have the key cipher yet so copy to both to cmac and gmac
+ */
+ for (int i = 0; i < ipn_len; i++) {
+ seq->aes_gmac.pn[i] = key->ipn[ipn_len - i - 1];
+ seq->aes_cmac.pn[i] = key->ipn[ipn_len - i - 1];
+ }
+}
+
+static void
+iwl_mld_convert_igtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
+ const struct iwl_wowlan_igtk_status *igtk)
+{
+ BUILD_BUG_ON(sizeof(wowlan_status->igtk.key) < sizeof(igtk->key));
+
+ if (!igtk->key_len)
+ return;
+
+ wowlan_status->igtk.len = igtk->key_len;
+ wowlan_status->igtk.flags = igtk->key_flags;
+ wowlan_status->igtk.id =
+ u32_get_bits(igtk->key_flags,
+ IWL_WOWLAN_IGTK_BIGTK_IDX_MASK) +
+ WOWLAN_IGTK_MIN_INDEX;
+
+ memcpy(wowlan_status->igtk.key, igtk->key, sizeof(igtk->key));
+ iwl_mld_convert_mcast_ipn(&wowlan_status->igtk, igtk);
+}
+
+static void
+iwl_mld_convert_bigtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
+ const struct iwl_wowlan_igtk_status *bigtk)
+{
+ int status_idx = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(wowlan_status->bigtk) < WOWLAN_BIGTK_KEYS_NUM);
+
+ for (int notif_idx = 0; notif_idx < WOWLAN_BIGTK_KEYS_NUM;
+ notif_idx++) {
+ if (!bigtk[notif_idx].key_len)
+ continue;
+
+ wowlan_status->bigtk[status_idx].len = bigtk[notif_idx].key_len;
+ wowlan_status->bigtk[status_idx].flags =
+ bigtk[notif_idx].key_flags;
+ wowlan_status->bigtk[status_idx].id =
+ u32_get_bits(bigtk[notif_idx].key_flags,
+ IWL_WOWLAN_IGTK_BIGTK_IDX_MASK)
+ + WOWLAN_BIGTK_MIN_INDEX;
+
+ BUILD_BUG_ON(sizeof(wowlan_status->bigtk[status_idx].key) <
+ sizeof(bigtk[notif_idx].key));
+ memcpy(wowlan_status->bigtk[status_idx].key,
+ bigtk[notif_idx].key, sizeof(bigtk[notif_idx].key));
+ iwl_mld_convert_mcast_ipn(&wowlan_status->bigtk[status_idx],
+ &bigtk[notif_idx]);
+ status_idx++;
+ }
+}
+
+static bool
+iwl_mld_handle_wowlan_info_notif(struct iwl_mld *mld,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_wowlan_info_notif *notif = (void *)pkt->data;
+ u32 expected_len, len = iwl_rx_packet_payload_len(pkt);
+
+ expected_len = sizeof(*notif);
+
+ if (IWL_FW_CHECK(mld, len < expected_len,
+ "Invalid wowlan_info_notif (expected=%ud got=%ud)\n",
+ expected_len, len))
+ return true;
+
+ if (IWL_FW_CHECK(mld, notif->tid_offloaded_tx != IWL_WOWLAN_OFFLOAD_TID,
+ "Invalid tid_offloaded_tx %d\n",
+ wowlan_status->tid_offloaded_tx))
+ return true;
+
+ iwl_mld_convert_gtk_resume_data(mld, wowlan_status, notif->gtk,
+ &notif->gtk[0].sc);
+ iwl_mld_convert_ptk_resume_seq(mld, wowlan_status, &notif->gtk[0].sc);
+ /* only one igtk is passed by FW */
+ iwl_mld_convert_igtk_resume_data(wowlan_status, &notif->igtk[0]);
+ iwl_mld_convert_bigtk_resume_data(wowlan_status, notif->bigtk);
+
+ wowlan_status->replay_ctr = le64_to_cpu(notif->replay_ctr);
+ wowlan_status->pattern_number = le16_to_cpu(notif->pattern_number);
+
+ wowlan_status->tid_offloaded_tx = notif->tid_offloaded_tx;
+ wowlan_status->last_qos_seq = le16_to_cpu(notif->qos_seq_ctr);
+ wowlan_status->num_of_gtk_rekeys =
+ le32_to_cpu(notif->num_of_gtk_rekeys);
+ wowlan_status->wakeup_reasons = le32_to_cpu(notif->wakeup_reasons);
+ return false;
+ /* TODO: mlo_links (task=MLO)*/
+}
+
+static bool
+iwl_mld_handle_wake_pkt_notif(struct iwl_mld *mld,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_wowlan_wake_pkt_notif *notif = (void *)pkt->data;
+ u32 actual_size, len = iwl_rx_packet_payload_len(pkt);
+ u32 expected_size = le32_to_cpu(notif->wake_packet_length);
+
+ if (IWL_FW_CHECK(mld, len < sizeof(*notif),
+ "Invalid WoWLAN wake packet notification (expected size=%zu got=%u)\n",
+ sizeof(*notif), len))
+ return true;
+
+ if (IWL_FW_CHECK(mld, !(wowlan_status->wakeup_reasons &
+ IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT),
+ "Got wake packet but wakeup reason is %x\n",
+ wowlan_status->wakeup_reasons))
+ return true;
+
+ actual_size = len - offsetof(struct iwl_wowlan_wake_pkt_notif,
+ wake_packet);
+
+ /* actual_size got the padding from the notification, remove it. */
+ if (expected_size < actual_size)
+ actual_size = expected_size;
+ wowlan_status->wake_packet = kmemdup(notif->wake_packet, actual_size,
+ GFP_ATOMIC);
+ if (!wowlan_status->wake_packet)
+ return true;
+
+ wowlan_status->wake_packet_length = expected_size;
+ wowlan_status->wake_packet_bufsize = actual_size;
+
+ return false;
+}
+
+static void
+iwl_mld_set_wake_packet(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ const struct iwl_mld_wowlan_status *wowlan_status,
+ struct cfg80211_wowlan_wakeup *wakeup,
+ struct sk_buff **_pkt)
+{
+ int pkt_bufsize = wowlan_status->wake_packet_bufsize;
+ int expected_pktlen = wowlan_status->wake_packet_length;
+ const u8 *pktdata = wowlan_status->wake_packet;
+ const struct ieee80211_hdr *hdr = (const void *)pktdata;
+ int truncated = expected_pktlen - pkt_bufsize;
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ int ivlen = 0, icvlen = 4; /* also FCS */
+
+ struct sk_buff *pkt = alloc_skb(pkt_bufsize, GFP_KERNEL);
+ *_pkt = pkt;
+ if (!pkt)
+ return;
+
+ skb_put_data(pkt, pktdata, hdrlen);
+ pktdata += hdrlen;
+ pkt_bufsize -= hdrlen;
+
+ /* if truncated, FCS/ICV is (partially) gone */
+ if (truncated >= icvlen) {
+ truncated -= icvlen;
+ icvlen = 0;
+ } else {
+ icvlen -= truncated;
+ truncated = 0;
+ }
+
+ pkt_bufsize -= ivlen + icvlen;
+ pktdata += ivlen;
+
+ skb_put_data(pkt, pktdata, pkt_bufsize);
+
+ if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
+ return;
+ wakeup->packet = pkt->data;
+ wakeup->packet_present_len = pkt->len;
+ wakeup->packet_len = pkt->len - truncated;
+ wakeup->packet_80211 = false;
+ } else {
+ int fcslen = 4;
+
+ if (truncated >= 4) {
+ truncated -= 4;
+ fcslen = 0;
+ } else {
+ fcslen -= truncated;
+ truncated = 0;
+ }
+ pkt_bufsize -= fcslen;
+ wakeup->packet = wowlan_status->wake_packet;
+ wakeup->packet_present_len = pkt_bufsize;
+ wakeup->packet_len = expected_pktlen - truncated;
+ wakeup->packet_80211 = true;
+ }
+}
+
+static void
+iwl_mld_report_wowlan_wakeup(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_wowlan_status *wowlan_status)
+{
+ struct sk_buff *pkt = NULL;
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+ u32 reasons = wowlan_status->wakeup_reasons;
+
+ if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+ ieee80211_report_wowlan_wakeup(vif, NULL, GFP_KERNEL);
+ return;
+ }
+
+ pm_wakeup_event(mld->dev, 0);
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
+ wakeup.magic_pkt = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
+ wakeup.pattern_idx =
+ wowlan_status->pattern_number;
+
+ if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH |
+ IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE))
+ wakeup.disconnect = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
+ wakeup.gtk_rekey_failure = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+ wakeup.rfkill_release = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
+ wakeup.eap_identity_req = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
+ wakeup.four_way_handshake = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
+ wakeup.tcp_connlost = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
+ wakeup.tcp_nomoretokens = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
+ wakeup.tcp_match = true;
+
+ if (reasons & IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC)
+ wakeup.unprot_deauth_disassoc = true;
+
+ if (wowlan_status->wake_packet)
+ iwl_mld_set_wake_packet(mld, vif, wowlan_status, &wakeup, &pkt);
+
+ ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
+ kfree_skb(pkt);
+}
+
+static void
+iwl_mld_set_key_rx_seq_tids(struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
+{
+ int tid;
+
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ ieee80211_set_key_rx_seq(key, tid, &seq[tid]);
+}
+
+static void
+iwl_mld_set_key_rx_seq(struct ieee80211_key_conf *key,
+ struct iwl_mld_mcast_key_data *key_data)
+{
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ iwl_mld_set_key_rx_seq_tids(key,
+ key_data->gtk.aes_seq);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iwl_mld_set_key_rx_seq_tids(key,
+ key_data->gtk.tkip_seq);
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ /* igtk/bigtk ciphers*/
+ ieee80211_set_key_rx_seq(key, 0,
+ &key_data->igtk_bigtk.cmac_gmac_seq);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void
+iwl_mld_d3_update_mcast_key(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ struct ieee80211_key_conf *key,
+ struct iwl_mld_mcast_key_data *key_data)
+{
+ if (key->keyidx != key_data->id &&
+ (key->keyidx < 4 || key->keyidx > 5)) {
+ IWL_ERR(mld,
+ "Unexpected keyId mismatch. Old keyId:%d, New keyId:%d\n",
+ key->keyidx, key_data->id);
+ return;
+ }
+
+ /* All installed keys are sent by the FW, even weren't
+ * rekeyed during D3.
+ * We remove an existing key if it has the same index as
+ * a new key and a rekey has occurred during d3
+ */
+ if (wowlan_status->num_of_gtk_rekeys && key_data->len) {
+ if (key->keyidx == 4 || key->keyidx == 5) {
+ struct iwl_mld_vif *mld_vif =
+ iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *mld_link;
+ int link_id = vif->active_links ?
+ __ffs(vif->active_links) : 0;
+
+ mld_link = iwl_mld_link_dereference_check(mld_vif,
+ link_id);
+ if (WARN_ON(!mld_link))
+ return;
+
+ if (mld_link->igtk == key)
+ mld_link->igtk = NULL;
+ mld->num_igtks--;
+ }
+
+ ieee80211_remove_key(key);
+ return;
+ }
+
+ iwl_mld_set_key_rx_seq(key, key_data);
+}
+
+static void
+iwl_mld_update_ptk_rx_seq(struct iwl_mld *mld,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ bool is_tkip)
+{
+ struct iwl_mld_sta *mld_sta =
+ iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_ptk_pn *mld_ptk_pn =
+ wiphy_dereference(mld->wiphy,
+ mld_sta->ptk_pn[key->keyidx]);
+
+ iwl_mld_set_key_rx_seq_tids(key, is_tkip ?
+ wowlan_status->ptk.tkip_seq :
+ wowlan_status->ptk.aes_seq);
+ if (is_tkip)
+ return;
+
+ if (WARN_ON(!mld_ptk_pn))
+ return;
+
+ for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ for (int i = 1; i < mld->trans->num_rx_queues; i++)
+ memcpy(mld_ptk_pn->q[i].pn[tid],
+ wowlan_status->ptk.aes_seq[tid].ccmp.pn,
+ IEEE80211_CCMP_PN_LEN);
+ }
+}
+
+static void
+iwl_mld_resume_keys_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mld_resume_key_iter_data *data = _data;
+ struct iwl_mld_wowlan_status *wowlan_status = data->wowlan_status;
+ u8 status_idx;
+
+ /* TODO: check key link id (task=MLO) */
+ if (data->unhandled_cipher)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* ignore WEP completely, nothing to do */
+ return;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (sta) {
+ iwl_mld_update_ptk_rx_seq(data->mld, wowlan_status,
+ sta, key,
+ key->cipher ==
+ WLAN_CIPHER_SUITE_TKIP);
+ return;
+ }
+
+ if (WARN_ON(data->gtk_cipher &&
+ data->gtk_cipher != key->cipher))
+ return;
+
+ data->gtk_cipher = key->cipher;
+ status_idx = key->keyidx == wowlan_status->gtk[1].id;
+ iwl_mld_d3_update_mcast_key(data->mld, vif, wowlan_status, key,
+ &wowlan_status->gtk[status_idx]);
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ if (key->keyidx == 4 || key->keyidx == 5) {
+ if (WARN_ON(data->igtk_cipher &&
+ data->igtk_cipher != key->cipher))
+ return;
+
+ data->igtk_cipher = key->cipher;
+ iwl_mld_d3_update_mcast_key(data->mld, vif,
+ wowlan_status,
+ key, &wowlan_status->igtk);
+ }
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ if (WARN_ON(data->bigtk_cipher &&
+ data->bigtk_cipher != key->cipher))
+ return;
+
+ data->bigtk_cipher = key->cipher;
+ status_idx = key->keyidx == wowlan_status->bigtk[1].id;
+ iwl_mld_d3_update_mcast_key(data->mld, vif,
+ wowlan_status, key,
+ &wowlan_status->bigtk[status_idx]);
+ }
+ break;
+ default:
+ data->unhandled_cipher = true;
+ return;
+ }
+ data->num_keys++;
+}
+
+static bool
+iwl_mld_add_mcast_rekey(struct ieee80211_vif *vif,
+ struct iwl_mld *mld,
+ struct iwl_mld_mcast_key_data *key_data,
+ struct ieee80211_bss_conf *link_conf,
+ u32 cipher)
+{
+ struct ieee80211_key_conf *key_config;
+ struct {
+ struct ieee80211_key_conf conf;
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ } conf = {
+ .conf.cipher = cipher,
+ .conf.keyidx = key_data->id,
+ };
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+
+ BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_128);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_256);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_AES_CMAC);
+ BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key));
+
+ if (!key_data->len)
+ return true;
+
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ conf.conf.keylen = WLAN_KEY_LEN_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ memcpy(conf.conf.key, key_data->key, conf.conf.keylen);
+ key_config = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
+ if (IS_ERR(key_config))
+ return false;
+
+ iwl_mld_set_key_rx_seq(key_config, key_data);
+
+ /* The FW holds only one igtk so we keep track of the valid one */
+ if (key_config->keyidx == 4 || key_config->keyidx == 5) {
+ struct iwl_mld_link *mld_link =
+ iwl_mld_link_from_mac80211(link_conf);
+ mld_link->igtk = key_config;
+ mld->num_igtks++;
+ }
+ return true;
+}
+
+static bool
+iwl_mld_add_all_rekeys(struct ieee80211_vif *vif,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ struct iwl_mld_resume_key_iter_data *key_iter_data,
+ struct ieee80211_bss_conf *link_conf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wowlan_status->gtk); i++)
+ if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
+ &wowlan_status->gtk[i],
+ link_conf,
+ key_iter_data->gtk_cipher))
+ return false;
+
+ if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
+ &wowlan_status->igtk,
+ link_conf, key_iter_data->igtk_cipher))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(wowlan_status->bigtk); i++)
+ if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
+ &wowlan_status->bigtk[i],
+ link_conf,
+ key_iter_data->bigtk_cipher))
+ return false;
+
+ return true;
+}
+
+static bool
+iwl_mld_update_sec_keys(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_wowlan_status *wowlan_status)
+{
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+ __be64 replay_ctr = cpu_to_be64(wowlan_status->replay_ctr);
+ struct iwl_mld_resume_key_iter_data key_iter_data = {
+ .mld = mld,
+ .wowlan_status = wowlan_status,
+ };
+
+ if (WARN_ON(!link_conf))
+ return false;
+
+ ieee80211_iter_keys(mld->hw, vif, iwl_mld_resume_keys_iter,
+ &key_iter_data);
+
+ if (key_iter_data.unhandled_cipher)
+ return false;
+
+ IWL_DEBUG_WOWLAN(mld,
+ "Number of installed keys: %d, Number of rekeys: %d\n",
+ key_iter_data.num_keys,
+ wowlan_status->num_of_gtk_rekeys);
+
+ if (!key_iter_data.num_keys || !wowlan_status->num_of_gtk_rekeys)
+ return true;
+
+ iwl_mld_add_all_rekeys(vif, wowlan_status, &key_iter_data,
+ link_conf);
+
+ ieee80211_gtk_rekey_notify(vif, link_conf->bssid,
+ (void *)&replay_ctr, GFP_KERNEL);
+ /* TODO: MLO rekey (task=MLO) */
+ return true;
+}
+
+static bool
+iwl_mld_process_wowlan_status(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_wowlan_status *wowlan_status)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_sta *ap_sta = mld_vif->ap_sta;
+ struct iwl_mld_txq *mld_txq;
+
+ iwl_mld_report_wowlan_wakeup(mld, vif, wowlan_status);
+
+ if (WARN_ON(!ap_sta))
+ return false;
+
+ mld_txq =
+ iwl_mld_txq_from_mac80211(ap_sta->txq[wowlan_status->tid_offloaded_tx]);
+
+ /* Update the pointers of the Tx queue that may have moved during
+ * suspend if the firmware sent frames.
+ * The firmware stores last-used value, we store next value.
+ */
+ WARN_ON(!mld_txq->status.allocated);
+ iwl_trans_set_q_ptrs(mld->trans, mld_txq->fw_id,
+ (wowlan_status->last_qos_seq +
+ 0x10) >> 4);
+
+ if (!iwl_mld_update_sec_keys(mld, vif, wowlan_status))
+ return false;
+
+ if (wowlan_status->wakeup_reasons &
+ (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH |
+ IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE))
+ return false;
+
+ return true;
+}
+
+static bool
+iwl_mld_netdetect_match_info_handler(struct iwl_mld *mld,
+ struct iwl_mld_resume_data *resume_data,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mld_netdetect_res *results = resume_data->netdetect_res;
+ const struct iwl_scan_offload_match_info *notif = (void *)pkt->data;
+ u32 len = iwl_rx_packet_payload_len(pkt);
+
+ if (IWL_FW_CHECK(mld, !mld->netdetect,
+ "Got scan match info notif when mld->netdetect==%d\n",
+ mld->netdetect))
+ return true;
+
+ if (IWL_FW_CHECK(mld, len < sizeof(*notif),
+ "Invalid scan offload match notif of length: %d\n",
+ len))
+ return true;
+
+ if (IWL_FW_CHECK(mld, resume_data->wowlan_status->wakeup_reasons !=
+ IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS,
+ "Ignore scan match info: unexpected wakeup reason (expected=0x%x got=0x%x)\n",
+ IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS,
+ resume_data->wowlan_status->wakeup_reasons))
+ return true;
+
+ results->matched_profiles = le32_to_cpu(notif->matched_profiles);
+ IWL_DEBUG_WOWLAN(mld, "number of matched profiles=%u\n",
+ results->matched_profiles);
+
+ if (results->matched_profiles)
+ memcpy(results->matches, notif->matches,
+ NETDETECT_QUERY_BUF_LEN);
+
+ /* No scan should be active at this point */
+ mld->scan.status = 0;
+ memset(mld->scan.uid_status, 0, sizeof(mld->scan.uid_status));
+ return false;
+}
+
+static void
+iwl_mld_set_netdetect_info(struct iwl_mld *mld,
+ const struct cfg80211_sched_scan_request *netdetect_cfg,
+ struct cfg80211_wowlan_nd_info *netdetect_info,
+ struct iwl_mld_netdetect_res *netdetect_res,
+ unsigned long matched_profiles)
+{
+ int i;
+
+ for_each_set_bit(i, &matched_profiles, netdetect_cfg->n_match_sets) {
+ struct cfg80211_wowlan_nd_match *match;
+ int idx, j, n_channels = 0;
+ struct iwl_scan_offload_profile_match *matches =
+ (void *)netdetect_res->matches;
+
+ for (int k = 0; k < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; k++)
+ n_channels +=
+ hweight8(matches[i].matching_channels[k]);
+ match = kzalloc(struct_size(match, channels, n_channels),
+ GFP_KERNEL);
+ if (!match)
+ return;
+
+ netdetect_info->matches[netdetect_info->n_matches++] = match;
+
+ /* We inverted the order of the SSIDs in the scan
+ * request, so invert the index here.
+ */
+ idx = netdetect_cfg->n_match_sets - i - 1;
+ match->ssid.ssid_len =
+ netdetect_cfg->match_sets[idx].ssid.ssid_len;
+ memcpy(match->ssid.ssid,
+ netdetect_cfg->match_sets[idx].ssid.ssid,
+ match->ssid.ssid_len);
+
+ if (netdetect_cfg->n_channels < n_channels)
+ continue;
+
+ for_each_set_bit(j,
+ (unsigned long *)&matches[i].matching_channels[0],
+ sizeof(matches[i].matching_channels))
+ match->channels[match->n_channels++] =
+ netdetect_cfg->channels[j]->center_freq;
+ }
+}
+
+static void
+iwl_mld_process_netdetect_res(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_resume_data *resume_data)
+{
+ struct cfg80211_wowlan_nd_info *netdetect_info = NULL;
+ const struct cfg80211_sched_scan_request *netdetect_cfg;
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+ struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+ unsigned long matched_profiles;
+ u32 wakeup_reasons;
+ int n_matches;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld->wiphy->wowlan_config ||
+ !mld->wiphy->wowlan_config->nd_config)) {
+ IWL_DEBUG_WOWLAN(mld,
+ "Netdetect isn't configured on resume flow\n");
+ goto out;
+ }
+
+ netdetect_cfg = mld->wiphy->wowlan_config->nd_config;
+ wakeup_reasons = resume_data->wowlan_status->wakeup_reasons;
+
+ if (wakeup_reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+ wakeup.rfkill_release = true;
+
+ if (wakeup_reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
+ goto out;
+
+ if (!resume_data->netdetect_res->matched_profiles) {
+ IWL_DEBUG_WOWLAN(mld,
+ "Netdetect results aren't valid\n");
+ wakeup_report = NULL;
+ goto out;
+ }
+
+ matched_profiles = resume_data->netdetect_res->matched_profiles;
+ if (!netdetect_cfg->n_match_sets) {
+ IWL_DEBUG_WOWLAN(mld,
+ "No netdetect match sets are configured\n");
+ goto out;
+ }
+ n_matches = hweight_long(matched_profiles);
+ netdetect_info = kzalloc(struct_size(netdetect_info, matches,
+ n_matches), GFP_KERNEL);
+ if (netdetect_info)
+ iwl_mld_set_netdetect_info(mld, netdetect_cfg, netdetect_info,
+ resume_data->netdetect_res,
+ matched_profiles);
+
+ wakeup.net_detect = netdetect_info;
+ out:
+ ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+ if (netdetect_info) {
+ for (int i = 0; i < netdetect_info->n_matches; i++)
+ kfree(netdetect_info->matches[i]);
+ kfree(netdetect_info);
+ }
+}
+
+static bool iwl_mld_handle_d3_notif(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mld_resume_data *resume_data = data;
+ struct iwl_mld *mld =
+ container_of(notif_wait, struct iwl_mld, notif_wait);
+
+ switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
+ case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): {
+ if (resume_data->notifs_received & IWL_D3_NOTIF_WOWLAN_INFO) {
+ IWL_DEBUG_WOWLAN(mld,
+ "got additional wowlan_info notif\n");
+ break;
+ }
+ resume_data->notif_handling_err =
+ iwl_mld_handle_wowlan_info_notif(mld,
+ resume_data->wowlan_status,
+ pkt);
+ resume_data->notifs_received |= IWL_D3_NOTIF_WOWLAN_INFO;
+
+ if (resume_data->wowlan_status->wakeup_reasons &
+ IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT)
+ resume_data->notifs_expected |=
+ IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+ break;
+ }
+ case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION): {
+ if (resume_data->notifs_received &
+ IWL_D3_NOTIF_WOWLAN_WAKE_PKT) {
+ /* We shouldn't get two wake packet notifications */
+ IWL_DEBUG_WOWLAN(mld,
+ "Got additional wowlan wake packet notification\n");
+ break;
+ }
+ resume_data->notif_handling_err =
+ iwl_mld_handle_wake_pkt_notif(mld,
+ resume_data->wowlan_status,
+ pkt);
+ resume_data->notifs_received |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+ break;
+ }
+ case WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF): {
+ if (resume_data->notifs_received & IWL_D3_ND_MATCH_INFO) {
+ IWL_ERR(mld,
+ "Got additional netdetect match info\n");
+ break;
+ }
+
+ resume_data->notif_handling_err =
+ iwl_mld_netdetect_match_info_handler(mld, resume_data,
+ pkt);
+ resume_data->notifs_received |= IWL_D3_ND_MATCH_INFO;
+ break;
+ }
+ case WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION): {
+ struct iwl_d3_end_notif *notif = (void *)pkt->data;
+
+ resume_data->d3_end_flags = le32_to_cpu(notif->flags);
+ resume_data->notifs_received |= IWL_D3_NOTIF_D3_END_NOTIF;
+ break;
+ }
+ default:
+ WARN_ON(1);
+ }
+
+ return resume_data->notifs_received == resume_data->notifs_expected;
+}
+
+#define IWL_MLD_D3_NOTIF_TIMEOUT (HZ / 3)
+
+static int iwl_mld_wait_d3_notif(struct iwl_mld *mld,
+ struct iwl_mld_resume_data *resume_data,
+ bool with_wowlan)
+{
+ static const u16 wowlan_resume_notif[] = {
+ WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION),
+ WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION),
+ WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF),
+ WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+ };
+ static const u16 d3_resume_notif[] = {
+ WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+ };
+ struct iwl_notification_wait wait_d3_notif;
+ enum iwl_d3_status d3_status;
+ int ret;
+
+ if (with_wowlan)
+ iwl_init_notification_wait(&mld->notif_wait, &wait_d3_notif,
+ wowlan_resume_notif,
+ ARRAY_SIZE(wowlan_resume_notif),
+ iwl_mld_handle_d3_notif,
+ resume_data);
+ else
+ iwl_init_notification_wait(&mld->notif_wait, &wait_d3_notif,
+ d3_resume_notif,
+ ARRAY_SIZE(d3_resume_notif),
+ iwl_mld_handle_d3_notif,
+ resume_data);
+
+ ret = iwl_trans_d3_resume(mld->trans, &d3_status, false, false);
+ if (ret || d3_status != IWL_D3_STATUS_ALIVE) {
+ if (d3_status != IWL_D3_STATUS_ALIVE) {
+ IWL_INFO(mld, "Device was reset during suspend\n");
+ ret = -ENOENT;
+ } else {
+ IWL_ERR(mld, "Transport resume failed\n");
+ }
+ iwl_remove_notification(&mld->notif_wait, &wait_d3_notif);
+ return ret;
+ }
+
+ ret = iwl_wait_notification(&mld->notif_wait, &wait_d3_notif,
+ IWL_MLD_D3_NOTIF_TIMEOUT);
+ if (ret)
+ IWL_ERR(mld, "Couldn't get the d3 notif %d\n", ret);
+
+ if (resume_data->notif_handling_err)
+ ret = -EIO;
+
+ return ret;
+}
+
+int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld)
+{
+ struct iwl_d3_manager_config d3_cfg_cmd_data = {};
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ IWL_DEBUG_WOWLAN(mld, "Starting the no wowlan suspend flow\n");
+
+ iwl_mld_low_latency_stop(mld);
+
+ /* This will happen if iwl_mld_supsend failed with FW error */
+ if (mld->trans->state == IWL_TRANS_NO_FW &&
+ test_bit(STATUS_FW_ERROR, &mld->trans->status))
+ return -ENODEV;
+
+ ret = iwl_mld_update_device_power(mld, true);
+ if (ret) {
+ IWL_ERR(mld,
+ "d3 suspend: couldn't send power_device %d\n", ret);
+ goto out;
+ }
+
+ ret = iwl_mld_send_cmd_pdu(mld, D3_CONFIG_CMD,
+ &d3_cfg_cmd_data);
+ if (ret) {
+ IWL_ERR(mld,
+ "d3 suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
+ goto out;
+ }
+
+ ret = iwl_trans_d3_suspend(mld->trans, false, false);
+ if (ret) {
+ IWL_ERR(mld, "d3 suspend: trans_d3_suspend failed %d\n", ret);
+ } else {
+ mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+ mld->fw_status.in_d3 = true;
+ }
+
+ out:
+ if (ret) {
+ mld->trans->state = IWL_TRANS_NO_FW;
+ set_bit(STATUS_FW_ERROR, &mld->trans->status);
+ }
+
+ return ret;
+}
+
+int iwl_mld_no_wowlan_resume(struct iwl_mld *mld)
+{
+ struct iwl_mld_resume_data resume_data = {
+ .notifs_expected =
+ IWL_D3_NOTIF_D3_END_NOTIF,
+ };
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ IWL_DEBUG_WOWLAN(mld, "Starting the no wowlan resume flow\n");
+
+ mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ mld->fw_status.in_d3 = false;
+ iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
+
+ if (iwl_mld_fw_needs_restart(mld, NULL))
+ ret = -ENODEV;
+ else
+ ret = iwl_mld_wait_d3_notif(mld, &resume_data, false);
+
+ if (!ret && (resume_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE))
+ return -ENODEV;
+
+ if (ret) {
+ mld->trans->state = IWL_TRANS_NO_FW;
+ set_bit(STATUS_FW_ERROR, &mld->trans->status);
+ return ret;
+ }
+ iwl_mld_low_latency_restart(mld);
+
+ return iwl_mld_update_device_power(mld, false);
+}
+
+static void
+iwl_mld_aes_seq_to_le64_pn(struct ieee80211_key_conf *key,
+ __le64 *key_rsc)
+{
+ for (int i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ struct ieee80211_key_seq seq;
+ u8 *pn = key->cipher == WLAN_CIPHER_SUITE_CCMP ? seq.ccmp.pn :
+ seq.gcmp.pn;
+
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ key_rsc[i] = cpu_to_le64((u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+}
+
+static void
+iwl_mld_suspend_set_ucast_pn(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, __le64 *key_rsc)
+{
+ struct iwl_mld_sta *mld_sta =
+ iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_ptk_pn *mld_ptk_pn;
+
+ if (WARN_ON(key->keyidx >= ARRAY_SIZE(mld_sta->ptk_pn)))
+ return;
+
+ mld_ptk_pn = wiphy_dereference(mld->wiphy,
+ mld_sta->ptk_pn[key->keyidx]);
+ if (WARN_ON(!mld_ptk_pn))
+ return;
+
+ for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ struct ieee80211_key_seq seq;
+ u8 *max_pn = seq.ccmp.pn;
+
+ /* get the PN from mac80211, used on the default queue */
+ ieee80211_get_key_rx_seq(key, tid, &seq);
+
+ /* and use the internal data for all queues */
+ for (int que = 1; que < mld->trans->num_rx_queues; que++) {
+ u8 *cur_pn = mld_ptk_pn->q[que].pn[tid];
+
+ if (memcmp(max_pn, cur_pn, IEEE80211_CCMP_PN_LEN) < 0)
+ max_pn = cur_pn;
+ }
+ key_rsc[tid] = cpu_to_le64((u64)max_pn[5] |
+ ((u64)max_pn[4] << 8) |
+ ((u64)max_pn[3] << 16) |
+ ((u64)max_pn[2] << 24) |
+ ((u64)max_pn[1] << 32) |
+ ((u64)max_pn[0] << 40));
+ }
+}
+
+static void
+iwl_mld_suspend_convert_tkip_ipn(struct ieee80211_key_conf *key,
+ __le64 *rsc)
+{
+ struct ieee80211_key_seq seq;
+
+ for (int i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ rsc[i] =
+ cpu_to_le64(((u64)seq.tkip.iv32 << 16) |
+ seq.tkip.iv16);
+ }
+}
+
+static void
+iwl_mld_suspend_key_data_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_suspend_key_iter_data *data = _data;
+ __le64 *key_rsc;
+ __le32 cipher = 0;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+ fallthrough;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (!cipher)
+ cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ fallthrough;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (!cipher)
+ cipher = cpu_to_le32(STA_KEY_FLG_TKIP);
+ if (sta) {
+ key_rsc = data->rsc->ucast_rsc;
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ iwl_mld_suspend_convert_tkip_ipn(key, key_rsc);
+ else
+ iwl_mld_suspend_set_ucast_pn(mld, sta, key,
+ key_rsc);
+
+ data->have_rsc = true;
+ return;
+ }
+ /* We're iterating from old to new, there're 4 possible
+ * gtk ids, and only the last two keys matter
+ */
+ if (WARN_ON(data->gtks >=
+ ARRAY_SIZE(data->found_gtk_idx)))
+ return;
+
+ if (WARN_ON(key->keyidx >=
+ ARRAY_SIZE(data->rsc->mcast_key_id_map)))
+ return;
+ data->gtk_cipher = cipher;
+ data->found_gtk_idx[data->gtks] = key->keyidx;
+ key_rsc = data->rsc->mcast_rsc[data->gtks % 2];
+ data->rsc->mcast_key_id_map[key->keyidx] =
+ data->gtks % 2;
+
+ if (data->gtks >= 2) {
+ int prev = data->gtks % 2;
+ int prev_idx = data->found_gtk_idx[prev];
+
+ data->rsc->mcast_key_id_map[prev_idx] =
+ IWL_MCAST_KEY_MAP_INVALID;
+ }
+
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ iwl_mld_suspend_convert_tkip_ipn(key, key_rsc);
+ else
+ iwl_mld_aes_seq_to_le64_pn(key, key_rsc);
+
+ data->gtks++;
+ data->have_rsc = true;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ fallthrough;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ if (!cipher)
+ cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+ if (key->keyidx == 4 || key->keyidx == 5)
+ data->igtk_cipher = cipher;
+
+ if (key->keyidx == 6 || key->keyidx == 7)
+ data->bigtk_cipher = cipher;
+
+ break;
+ }
+}
+
+static int
+iwl_mld_send_kek_kck_cmd(struct iwl_mld *mld,
+ struct iwl_mld_vif *mld_vif,
+ struct iwl_mld_suspend_key_iter_data data,
+ int ap_sta_id)
+{
+ struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {};
+ struct iwl_mld_rekey_data *rekey_data =
+ &mld_vif->wowlan_data.rekey_data;
+
+ memcpy(kek_kck_cmd.kck, rekey_data->kck,
+ rekey_data->kck_len);
+ kek_kck_cmd.kck_len = cpu_to_le16(rekey_data->kck_len);
+ memcpy(kek_kck_cmd.kek, rekey_data->kek,
+ rekey_data->kek_len);
+ kek_kck_cmd.kek_len = cpu_to_le16(rekey_data->kek_len);
+ kek_kck_cmd.replay_ctr = rekey_data->replay_ctr;
+ kek_kck_cmd.akm = cpu_to_le32(rekey_data->akm);
+ kek_kck_cmd.sta_id = cpu_to_le32(ap_sta_id);
+ kek_kck_cmd.gtk_cipher = data.gtk_cipher;
+ kek_kck_cmd.igtk_cipher = data.igtk_cipher;
+ kek_kck_cmd.bigtk_cipher = data.bigtk_cipher;
+
+ IWL_DEBUG_WOWLAN(mld, "setting akm %d\n",
+ rekey_data->akm);
+
+ return iwl_mld_send_cmd_pdu(mld, WOWLAN_KEK_KCK_MATERIAL,
+ &kek_kck_cmd);
+}
+
+static int
+iwl_mld_suspend_send_security_cmds(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_vif *mld_vif,
+ int ap_sta_id)
+{
+ struct iwl_mld_suspend_key_iter_data data = {};
+ int ret;
+
+ data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL);
+ if (!data.rsc)
+ return -ENOMEM;
+
+ memset(data.rsc->mcast_key_id_map, IWL_MCAST_KEY_MAP_INVALID,
+ ARRAY_SIZE(data.rsc->mcast_key_id_map));
+
+ data.rsc->sta_id = cpu_to_le32(ap_sta_id);
+ ieee80211_iter_keys(mld->hw, vif,
+ iwl_mld_suspend_key_data_iter,
+ &data);
+
+ if (data.have_rsc)
+ ret = iwl_mld_send_cmd_pdu(mld, WOWLAN_TSC_RSC_PARAM,
+ data.rsc);
+ else
+ ret = 0;
+
+ if (!ret && mld_vif->wowlan_data.rekey_data.valid)
+ ret = iwl_mld_send_kek_kck_cmd(mld, mld_vif, data, ap_sta_id);
+
+ kfree(data.rsc);
+
+ return ret;
+}
+
+static void
+iwl_mld_set_wowlan_config_cmd(struct iwl_mld *mld,
+ struct cfg80211_wowlan *wowlan,
+ struct iwl_wowlan_config_cmd *wowlan_config_cmd,
+ struct ieee80211_sta *ap_sta)
+{
+ wowlan_config_cmd->is_11n_connection =
+ ap_sta->deflink.ht_cap.ht_supported;
+ wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
+ ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+
+ if (ap_sta->mfp)
+ wowlan_config_cmd->flags |= IS_11W_ASSOC;
+
+ if (wowlan->disconnect)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+ if (wowlan->magic_pkt)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
+ if (wowlan->gtk_rekey_failure)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+ if (wowlan->eap_identity_req)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+ if (wowlan->four_way_handshake)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+ if (wowlan->n_patterns)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+ if (wowlan->rfkill_release)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+
+ if (wowlan->any) {
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+ IWL_WOWLAN_WAKEUP_RX_FRAME |
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING);
+ }
+}
+
+static int iwl_mld_send_patterns(struct iwl_mld *mld,
+ struct cfg80211_wowlan *wowlan,
+ int ap_sta_id)
+{
+ struct iwl_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_PATTERNS,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int ret;
+
+ if (!wowlan->n_patterns)
+ return 0;
+
+ cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns);
+
+ pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL);
+ if (!pattern_cmd)
+ return -ENOMEM;
+
+ pattern_cmd->n_patterns = wowlan->n_patterns;
+ pattern_cmd->sta_id = ap_sta_id;
+
+ for (int i = 0; i < wowlan->n_patterns; i++) {
+ int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+ pattern_cmd->patterns[i].pattern_type =
+ WOWLAN_PATTERN_TYPE_BITMASK;
+
+ memcpy(&pattern_cmd->patterns[i].u.bitmask.mask,
+ wowlan->patterns[i].mask, mask_len);
+ memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern,
+ wowlan->patterns[i].pattern,
+ wowlan->patterns[i].pattern_len);
+ pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len;
+ pattern_cmd->patterns[i].u.bitmask.pattern_size =
+ wowlan->patterns[i].pattern_len;
+ }
+
+ cmd.data[0] = pattern_cmd;
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ kfree(pattern_cmd);
+ return ret;
+}
+
+static int
+iwl_mld_send_proto_offload(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ u8 ap_sta_id)
+{
+ struct iwl_proto_offload_cmd_v4 *cmd __free(kfree);
+ struct iwl_host_cmd hcmd = {
+ .id = PROT_OFFLOAD_CONFIG_CMD,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .len[0] = sizeof(*cmd),
+ };
+ u32 enabled = 0;
+
+ cmd = kzalloc(hcmd.len[0], GFP_KERNEL);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_wowlan_data *wowlan_data = &mld_vif->wowlan_data;
+ struct iwl_ns_config *nsc;
+ struct iwl_targ_addr *addrs;
+ int n_nsc, n_addrs;
+ int i, c;
+ int num_skipped = 0;
+
+ nsc = cmd->ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+ addrs = cmd->targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+
+ /* For each address we have (and that will fit) fill a target
+ * address struct and combine for NS offload structs with the
+ * solicited node addresses.
+ */
+ for (i = 0, c = 0;
+ i < wowlan_data->num_target_ipv6_addrs &&
+ i < n_addrs && c < n_nsc; i++) {
+ int j;
+ struct in6_addr solicited_addr;
+
+ /* Because ns is offloaded skip tentative address to avoid
+ * violating RFC4862.
+ */
+ if (test_bit(i, wowlan_data->tentative_addrs)) {
+ num_skipped++;
+ continue;
+ }
+
+ addrconf_addr_solict_mult(&wowlan_data->target_ipv6_addrs[i],
+ &solicited_addr);
+ for (j = 0; j < c; j++)
+ if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+ &solicited_addr) == 0)
+ break;
+ if (j == c)
+ c++;
+ addrs[i].addr = wowlan_data->target_ipv6_addrs[i];
+ addrs[i].config_num = cpu_to_le32(j);
+ nsc[j].dest_ipv6_addr = solicited_addr;
+ memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (wowlan_data->num_target_ipv6_addrs - num_skipped)
+ enabled |= IWL_D3_PROTO_IPV6_VALID;
+
+ cmd->num_valid_ipv6_addrs = cpu_to_le32(i - num_skipped);
+ if (enabled & IWL_D3_PROTO_IPV6_VALID)
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+#endif
+
+ if (vif->cfg.arp_addr_cnt) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID;
+ cmd->common.host_ipv4_addr = vif->cfg.arp_addr_list[0];
+ ether_addr_copy(cmd->common.arp_mac_addr, vif->addr);
+ }
+
+ enabled |= IWL_D3_PROTO_OFFLOAD_BTM;
+ cmd->common.enabled = cpu_to_le32(enabled);
+ cmd->sta_id = cpu_to_le32(ap_sta_id);
+ hcmd.data[0] = cmd;
+ return iwl_mld_send_cmd(mld, &hcmd);
+}
+
+static int
+iwl_mld_wowlan_config(struct iwl_mld *mld, struct ieee80211_vif *bss_vif,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_vif);
+ struct ieee80211_sta *ap_sta = mld_vif->ap_sta;
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .offloading_tid = IWL_WOWLAN_OFFLOAD_TID,
+ };
+ u32 sta_id_mask;
+ int ap_sta_id, ret;
+ int link_id = iwl_mld_get_primary_link(bss_vif);
+ struct ieee80211_bss_conf *link_conf;
+
+ ret = iwl_mld_block_emlsr_sync(mld, bss_vif,
+ IWL_MLD_EMLSR_BLOCKED_WOWLAN, link_id);
+ if (ret)
+ return ret;
+
+ link_conf = link_conf_dereference_protected(bss_vif, link_id);
+
+ if (WARN_ON(!ap_sta || !link_conf))
+ return -EINVAL;
+
+ sta_id_mask = iwl_mld_fw_sta_id_mask(mld, ap_sta);
+ if (WARN_ON(hweight32(sta_id_mask) != 1))
+ return -EINVAL;
+
+ ap_sta_id = __ffs(sta_id_mask);
+ wowlan_config_cmd.sta_id = ap_sta_id;
+
+ ret = iwl_mld_ensure_queue(mld,
+ ap_sta->txq[wowlan_config_cmd.offloading_tid]);
+ if (ret)
+ return ret;
+
+ iwl_mld_set_wowlan_config_cmd(mld, wowlan,
+ &wowlan_config_cmd, ap_sta);
+ ret = iwl_mld_send_cmd_pdu(mld, WOWLAN_CONFIGURATION,
+ &wowlan_config_cmd);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_suspend_send_security_cmds(mld, bss_vif, mld_vif,
+ ap_sta_id);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_send_patterns(mld, wowlan, ap_sta_id);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_send_proto_offload(mld, bss_vif, ap_sta_id);
+ if (ret)
+ return ret;
+
+ iwl_mld_enable_beacon_filter(mld, link_conf, true);
+ return iwl_mld_update_mac_power(mld, bss_vif, true);
+}
+
+int iwl_mld_wowlan_suspend(struct iwl_mld *mld, struct cfg80211_wowlan *wowlan)
+{
+ struct ieee80211_vif *bss_vif;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!wowlan))
+ return 1;
+
+ IWL_DEBUG_WOWLAN(mld, "Starting the wowlan suspend flow\n");
+
+ bss_vif = iwl_mld_get_bss_vif(mld);
+ if (WARN_ON(!bss_vif))
+ return 1;
+
+ if (!bss_vif->cfg.assoc) {
+ int ret;
+ /* If we're not associated, this must be netdetect */
+ if (WARN_ON(!wowlan->nd_config))
+ return 1;
+
+ ret = iwl_mld_netdetect_config(mld, bss_vif, wowlan);
+ if (!ret)
+ mld->netdetect = true;
+
+ return ret;
+ }
+
+ return iwl_mld_wowlan_config(mld, bss_vif, wowlan);
+}
+
+/* Returns 0 on success, 1 if an error occurred in firmware during d3,
+ * A negative value is expected only in unrecovreable cases.
+ */
+int iwl_mld_wowlan_resume(struct iwl_mld *mld)
+{
+ struct ieee80211_vif *bss_vif;
+ struct ieee80211_bss_conf *link_conf;
+ struct iwl_mld_netdetect_res netdetect_res;
+ struct iwl_mld_resume_data resume_data = {
+ .notifs_expected =
+ IWL_D3_NOTIF_WOWLAN_INFO |
+ IWL_D3_NOTIF_D3_END_NOTIF,
+ .netdetect_res = &netdetect_res,
+ };
+ int link_id;
+ int ret;
+ bool fw_err = false;
+ bool keep_connection;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ IWL_DEBUG_WOWLAN(mld, "Starting the wowlan resume flow\n");
+
+ mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ if (!mld->fw_status.in_d3) {
+ IWL_DEBUG_WOWLAN(mld,
+ "Device_powered_off() was called during wowlan\n");
+ goto err;
+ }
+
+ mld->fw_status.in_d3 = false;
+ mld->scan.last_start_time_jiffies = jiffies;
+
+ bss_vif = iwl_mld_get_bss_vif(mld);
+ if (WARN_ON(!bss_vif))
+ goto err;
+
+ /* We can't have several links upon wowlan entry,
+ * this is enforced in the suspend flow.
+ */
+ WARN_ON(hweight16(bss_vif->active_links) > 1);
+ link_id = bss_vif->active_links ? __ffs(bss_vif->active_links) : 0;
+ link_conf = link_conf_dereference_protected(bss_vif, link_id);
+
+ if (WARN_ON(!link_conf))
+ goto err;
+
+ iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
+
+ if (iwl_mld_fw_needs_restart(mld, bss_vif)) {
+ fw_err = true;
+ goto err;
+ }
+
+ resume_data.wowlan_status = kzalloc(sizeof(*resume_data.wowlan_status),
+ GFP_KERNEL);
+ if (!resume_data.wowlan_status)
+ return -1;
+
+ if (mld->netdetect)
+ resume_data.notifs_expected |= IWL_D3_ND_MATCH_INFO;
+
+ ret = iwl_mld_wait_d3_notif(mld, &resume_data, true);
+ if (ret) {
+ IWL_ERR(mld, "Couldn't get the d3 notifs %d\n", ret);
+ fw_err = true;
+ goto err;
+ }
+
+ if (resume_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE) {
+ mld->fw_status.in_hw_restart = true;
+ goto process_wakeup_results;
+ }
+
+ iwl_mld_update_changed_regdomain(mld);
+ iwl_mld_update_mac_power(mld, bss_vif, false);
+ iwl_mld_enable_beacon_filter(mld, link_conf, false);
+ iwl_mld_update_device_power(mld, false);
+
+ if (mld->netdetect)
+ ret = iwl_mld_scan_stop(mld, IWL_MLD_SCAN_NETDETECT, false);
+
+ process_wakeup_results:
+ if (mld->netdetect) {
+ iwl_mld_process_netdetect_res(mld, bss_vif, &resume_data);
+ mld->netdetect = false;
+ } else {
+ keep_connection =
+ iwl_mld_process_wowlan_status(mld, bss_vif,
+ resume_data.wowlan_status);
+
+ /* EMLSR state will be cleared if the connection is not kept */
+ if (keep_connection)
+ iwl_mld_unblock_emlsr(mld, bss_vif,
+ IWL_MLD_EMLSR_BLOCKED_WOWLAN);
+ }
+
+ if (!mld->netdetect && !keep_connection)
+ ieee80211_resume_disconnect(bss_vif);
+
+ goto out;
+
+ err:
+ if (fw_err) {
+ mld->trans->state = IWL_TRANS_NO_FW;
+ set_bit(STATUS_FW_ERROR, &mld->trans->status);
+ }
+
+ mld->fw_status.in_hw_restart = true;
+ ret = 1;
+ out:
+ if (resume_data.wowlan_status) {
+ kfree(resume_data.wowlan_status->wake_packet);
+ kfree(resume_data.wowlan_status);
+ }
+
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.h b/drivers/net/wireless/intel/iwlwifi/mld/d3.h
new file mode 100644
index 000000000000..618d6fb3c796
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_d3_h__
+#define __iwl_mld_d3_h__
+
+#include "fw/api/d3.h"
+
+struct iwl_mld_rekey_data {
+ bool valid;
+ u8 kck[NL80211_KCK_EXT_LEN];
+ u8 kek[NL80211_KEK_EXT_LEN];
+ size_t kck_len;
+ size_t kek_len;
+ __le64 replay_ctr;
+ u32 akm;
+};
+
+/**
+ * struct iwl_mld_wowlan_data - data used by the wowlan suspend flow
+ *
+ * @target_ipv6_addrs: IPv6 addresses on this interface for offload
+ * @tentative_addrs: bitmap of tentative IPv6 addresses in @target_ipv6_addrs
+ * @num_target_ipv6_addrs: number of @target_ipv6_addrs
+ * @rekey_data: security key data used for rekeying during D3
+ */
+struct iwl_mld_wowlan_data {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
+ unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)];
+ int num_target_ipv6_addrs;
+#endif
+ struct iwl_mld_rekey_data rekey_data;
+};
+
+int iwl_mld_no_wowlan_resume(struct iwl_mld *mld);
+int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld);
+int iwl_mld_wowlan_suspend(struct iwl_mld *mld,
+ struct cfg80211_wowlan *wowlan);
+int iwl_mld_wowlan_resume(struct iwl_mld *mld);
+void iwl_mld_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data);
+#if IS_ENABLED(CONFIG_IPV6)
+void iwl_mld_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev);
+#endif
+
+#endif /* __iwl_mld_d3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
new file mode 100644
index 000000000000..453ce2ba39d1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
@@ -0,0 +1,1082 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "debugfs.h"
+#include "iwl-io.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "sta.h"
+#include "tlc.h"
+#include "power.h"
+#include "notif.h"
+#include "ap.h"
+#include "iwl-utils.h"
+#include "scan.h"
+#ifdef CONFIG_THERMAL
+#include "thermal.h"
+#endif
+
+#include "fw/api/rs.h"
+#include "fw/api/dhc.h"
+#include "fw/api/rfi.h"
+#include "fw/dhc-utils.h"
+#include <linux/dmi.h>
+
+#define MLD_DEBUGFS_READ_FILE_OPS(name, bufsz) \
+ _MLD_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_mld)
+
+#define MLD_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \
+ debugfs_create_file(alias, mode, parent, mld, \
+ &iwl_dbgfs_##name##_ops)
+#define MLD_DEBUGFS_ADD_FILE(name, parent, mode) \
+ MLD_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static bool iwl_mld_dbgfs_fw_cmd_disabled(struct iwl_mld *mld)
+{
+#ifdef CONFIG_PM_SLEEP
+ return !mld->fw_status.running || mld->fw_status.in_d3;
+#else
+ return !mld->fw_status.running;
+#endif /* CONFIG_PM_SLEEP */
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mld *mld,
+ char *buf, size_t count)
+{
+ /* If the firmware is not running, silently succeed since there is
+ * no data to clear.
+ */
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return 0;
+
+ iwl_fw_dbg_clear_monitor_buf(&mld->fwrt);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mld *mld, char *buf,
+ size_t count)
+{
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ IWL_ERR(mld, "Triggering an NMI from debugfs\n");
+
+ if (count == 6 && !strcmp(buf, "nolog\n"))
+ mld->fw_status.do_not_dump_once = true;
+
+ iwl_force_nmi(mld->trans);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mld *mld, char *buf,
+ size_t count)
+{
+ int __maybe_unused ret;
+
+ if (!iwlwifi_mod_params.fw_restart)
+ return -EPERM;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ if (count == 6 && !strcmp(buf, "nolog\n")) {
+ mld->fw_status.do_not_dump_once = true;
+ set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mld->trans->status);
+ }
+
+ /* take the return value to make compiler happy - it will
+ * fail anyway
+ */
+ ret = iwl_mld_send_cmd_empty(mld, WIDE_ID(LONG_GROUP, REPLY_ERROR));
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_send_echo_cmd_write(struct iwl_mld *mld, char *buf,
+ size_t count)
+{
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ return iwl_mld_send_cmd_empty(mld, ECHO_CMD) ?: count;
+}
+
+struct iwl_mld_sniffer_apply {
+ struct iwl_mld *mld;
+ const u8 *bssid;
+ u16 aid;
+};
+
+static bool iwl_mld_sniffer_apply(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mld_sniffer_apply *apply = data;
+
+ apply->mld->monitor.cur_aid = cpu_to_le16(apply->aid);
+ memcpy(apply->mld->monitor.cur_bssid, apply->bssid,
+ sizeof(apply->mld->monitor.cur_bssid));
+
+ return true;
+}
+
+static ssize_t
+iwl_dbgfs_he_sniffer_params_write(struct iwl_mld *mld, char *buf,
+ size_t count)
+{
+ struct iwl_notification_wait wait;
+ struct iwl_he_monitor_cmd he_mon_cmd = {};
+ struct iwl_mld_sniffer_apply apply = {
+ .mld = mld,
+ };
+ u16 wait_cmds[] = {
+ WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
+ };
+ u32 aid;
+ int ret;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ if (!mld->monitor.on)
+ return -ENODEV;
+
+ ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid,
+ &he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1],
+ &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3],
+ &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]);
+ if (ret != 7)
+ return -EINVAL;
+
+ he_mon_cmd.aid = cpu_to_le16(aid);
+
+ apply.aid = aid;
+ apply.bssid = (void *)he_mon_cmd.bssid;
+
+ /* Use the notification waiter to get our function triggered
+ * in sequence with other RX. This ensures that frames we get
+ * on the RX queue _before_ the new configuration is applied
+ * still have mld->cur_aid pointing to the old AID, and that
+ * frames on the RX queue _after_ the firmware processed the
+ * new configuration (and sent the response, synchronously)
+ * get mld->cur_aid correctly set to the new AID.
+ */
+ iwl_init_notification_wait(&mld->notif_wait, &wait,
+ wait_cmds, ARRAY_SIZE(wait_cmds),
+ iwl_mld_sniffer_apply, &apply);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(DATA_PATH_GROUP,
+ HE_AIR_SNIFFER_CONFIG_CMD),
+ &he_mon_cmd);
+
+ /* no need to really wait, we already did anyway */
+ iwl_remove_notification(&mld->notif_wait, &wait);
+
+ return ret ?: count;
+}
+
+static ssize_t
+iwl_dbgfs_he_sniffer_params_read(struct iwl_mld *mld, char *buf, size_t count)
+{
+ return scnprintf(buf, count,
+ "%d %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
+ le16_to_cpu(mld->monitor.cur_aid),
+ mld->monitor.cur_bssid[0], mld->monitor.cur_bssid[1],
+ mld->monitor.cur_bssid[2], mld->monitor.cur_bssid[3],
+ mld->monitor.cur_bssid[4], mld->monitor.cur_bssid[5]);
+}
+
+/* The size computation is as follows:
+ * each number needs at most 3 characters, number of rows is the size of
+ * the table; So, need 5 chars for the "freq: " part and each tuple afterwards
+ * needs 6 characters for numbers and 5 for the punctuation around. 32 bytes
+ * for feature support message.
+ */
+#define IWL_RFI_DDR_BUF_SIZE (IWL_RFI_DDR_LUT_INSTALLED_SIZE *\
+ (5 + IWL_RFI_DDR_LUT_ENTRY_CHANNELS_NUM *\
+ (6 + 5)) + 32)
+#define IWL_RFI_DLVR_BUF_SIZE (IWL_RFI_DLVR_LUT_INSTALLED_SIZE *\
+ (5 + IWL_RFI_DLVR_LUT_ENTRY_CHANNELS_NUM *\
+ (6 + 5)) + 32)
+#define IWL_RFI_DESENSE_BUF_SIZE IWL_RFI_DDR_BUF_SIZE
+
+/* Extra 32 for "DDR and DLVR table" message */
+#define IWL_RFI_BUF_SIZE (IWL_RFI_DDR_BUF_SIZE + IWL_RFI_DLVR_BUF_SIZE +\
+ IWL_RFI_DESENSE_BUF_SIZE + 32)
+
+static size_t iwl_mld_dump_tas_resp(struct iwl_dhc_tas_status_resp *resp,
+ size_t count, u8 *buf)
+{
+ const char * const tas_dis_reason[TAS_DISABLED_REASON_MAX] = {
+ [TAS_DISABLED_DUE_TO_BIOS] =
+ "Due To BIOS",
+ [TAS_DISABLED_DUE_TO_SAR_6DBM] =
+ "Due To SAR Limit Less Than 6 dBm",
+ [TAS_DISABLED_REASON_INVALID] =
+ "N/A",
+ [TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID] =
+ "Due to table source invalid"
+ };
+ const char * const tas_current_status[TAS_DYNA_STATUS_MAX] = {
+ [TAS_DYNA_INACTIVE] = "INACTIVE",
+ [TAS_DYNA_INACTIVE_MVM_MODE] =
+ "inactive due to mvm mode",
+ [TAS_DYNA_INACTIVE_TRIGGER_MODE] =
+ "inactive due to trigger mode",
+ [TAS_DYNA_INACTIVE_BLOCK_LISTED] =
+ "inactive due to block listed",
+ [TAS_DYNA_INACTIVE_UHB_NON_US] =
+ "inactive due to uhb non US",
+ [TAS_DYNA_ACTIVE] = "ACTIVE",
+ };
+ ssize_t pos = 0;
+
+ if (resp->header.version != 1) {
+ pos += scnprintf(buf + pos, count - pos,
+ "Unsupported TAS response version:%d",
+ resp->header.version);
+ return pos;
+ }
+
+ pos += scnprintf(buf + pos, count - pos, "TAS Report\n");
+ switch (resp->tas_config_info.table_source) {
+ case BIOS_SOURCE_NONE:
+ pos += scnprintf(buf + pos, count - pos,
+ "BIOS SOURCE NONE ");
+ break;
+ case BIOS_SOURCE_ACPI:
+ pos += scnprintf(buf + pos, count - pos,
+ "BIOS SOURCE ACPI ");
+ break;
+ case BIOS_SOURCE_UEFI:
+ pos += scnprintf(buf + pos, count - pos,
+ "BIOS SOURCE UEFI ");
+ break;
+ default:
+ pos += scnprintf(buf + pos, count - pos,
+ "BIOS SOURCE UNKNOWN (%d) ",
+ resp->tas_config_info.table_source);
+ break;
+ }
+
+ pos += scnprintf(buf + pos, count - pos,
+ "revision is: %d data is: 0x%08x\n",
+ resp->tas_config_info.table_revision,
+ resp->tas_config_info.value);
+ pos += scnprintf(buf + pos, count - pos, "Current MCC: 0x%x\n",
+ le16_to_cpu(resp->curr_mcc));
+
+ pos += scnprintf(buf + pos, count - pos, "Block list entries:");
+ for (int i = 0; i < ARRAY_SIZE(resp->mcc_block_list); i++)
+ pos += scnprintf(buf + pos, count - pos, " 0x%x",
+ le16_to_cpu(resp->mcc_block_list[i]));
+
+ pos += scnprintf(buf + pos, count - pos,
+ "\nDo TAS Support Dual Radio?: %s\n",
+ hweight8(resp->valid_radio_mask) > 1 ?
+ "TRUE" : "FALSE");
+
+ for (int i = 0; i < ARRAY_SIZE(resp->tas_status_radio); i++) {
+ int tmp;
+ unsigned long dynamic_status;
+
+ if (!(resp->valid_radio_mask & BIT(i)))
+ continue;
+
+ pos += scnprintf(buf + pos, count - pos,
+ "TAS report for radio:%d\n", i + 1);
+ pos += scnprintf(buf + pos, count - pos,
+ "Static status: %sabled\n",
+ resp->tas_status_radio[i].static_status ?
+ "En" : "Dis");
+ if (!resp->tas_status_radio[i].static_status) {
+ u8 static_disable_reason =
+ resp->tas_status_radio[i].static_disable_reason;
+
+ pos += scnprintf(buf + pos, count - pos,
+ "\tStatic Disabled Reason: ");
+ if (static_disable_reason >= TAS_DISABLED_REASON_MAX) {
+ pos += scnprintf(buf + pos, count - pos,
+ "unsupported value (%d)\n",
+ static_disable_reason);
+ continue;
+ }
+
+ pos += scnprintf(buf + pos, count - pos,
+ "%s (%d)\n",
+ tas_dis_reason[static_disable_reason],
+ static_disable_reason);
+ continue;
+ }
+
+ pos += scnprintf(buf + pos, count - pos, "\tANT A %s and ",
+ (resp->tas_status_radio[i].dynamic_status_ant_a
+ & BIT(TAS_DYNA_ACTIVE)) ? "ON" : "OFF");
+
+ pos += scnprintf(buf + pos, count - pos, "ANT B %s for ",
+ (resp->tas_status_radio[i].dynamic_status_ant_b
+ & BIT(TAS_DYNA_ACTIVE)) ? "ON" : "OFF");
+
+ switch (resp->tas_status_radio[i].band) {
+ case PHY_BAND_5:
+ pos += scnprintf(buf + pos, count - pos, "HB\n");
+ break;
+ case PHY_BAND_24:
+ pos += scnprintf(buf + pos, count - pos, "LB\n");
+ break;
+ case PHY_BAND_6:
+ pos += scnprintf(buf + pos, count - pos, "UHB\n");
+ break;
+ default:
+ pos += scnprintf(buf + pos, count - pos,
+ "Unsupported band (%d)\n",
+ resp->tas_status_radio[i].band);
+ break;
+ }
+
+ pos += scnprintf(buf + pos, count - pos,
+ "Is near disconnection?: %s\n",
+ resp->tas_status_radio[i].near_disconnection ?
+ "True" : "False");
+
+ pos += scnprintf(buf + pos, count - pos,
+ "Dynamic status antenna A:\n");
+ dynamic_status = resp->tas_status_radio[i].dynamic_status_ant_a;
+ for_each_set_bit(tmp, &dynamic_status, TAS_DYNA_STATUS_MAX) {
+ pos += scnprintf(buf + pos, count - pos, "\t%s (%d)\n",
+ tas_current_status[tmp], tmp);
+ }
+ pos += scnprintf(buf + pos, count - pos,
+ "\nDynamic status antenna B:\n");
+ dynamic_status = resp->tas_status_radio[i].dynamic_status_ant_b;
+ for_each_set_bit(tmp, &dynamic_status, TAS_DYNA_STATUS_MAX) {
+ pos += scnprintf(buf + pos, count - pos, "\t%s (%d)\n",
+ tas_current_status[tmp], tmp);
+ }
+
+ tmp = le16_to_cpu(resp->tas_status_radio[i].max_reg_pwr_limit_ant_a);
+ pos += scnprintf(buf + pos, count - pos,
+ "Max antenna A regulatory pwr limit (dBm): %d.%03d\n",
+ tmp / 8, 125 * (tmp % 8));
+ tmp = le16_to_cpu(resp->tas_status_radio[i].max_reg_pwr_limit_ant_b);
+ pos += scnprintf(buf + pos, count - pos,
+ "Max antenna B regulatory pwr limit (dBm): %d.%03d\n",
+ tmp / 8, 125 * (tmp % 8));
+
+ tmp = le16_to_cpu(resp->tas_status_radio[i].sar_limit_ant_a);
+ pos += scnprintf(buf + pos, count - pos,
+ "Antenna A SAR limit (dBm): %d.%03d\n",
+ tmp / 8, 125 * (tmp % 8));
+ tmp = le16_to_cpu(resp->tas_status_radio[i].sar_limit_ant_b);
+ pos += scnprintf(buf + pos, count - pos,
+ "Antenna B SAR limit (dBm): %d.%03d\n",
+ tmp / 8, 125 * (tmp % 8));
+ }
+
+ return pos;
+}
+
+static ssize_t iwl_dbgfs_tas_get_status_read(struct iwl_mld *mld, char *buf,
+ size_t count)
+{
+ struct iwl_dhc_cmd cmd = {
+ .index_and_mask = cpu_to_le32(DHC_TABLE_TOOLS |
+ DHC_TARGET_UMAC |
+ DHC_TOOLS_UMAC_GET_TAS_STATUS),
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LEGACY_GROUP, DEBUG_HOST_COMMAND),
+ .flags = CMD_WANT_SKB,
+ .len[0] = sizeof(cmd),
+ .data[0] = &cmd,
+ };
+ struct iwl_dhc_tas_status_resp *resp = NULL;
+ ssize_t pos = 0;
+ u32 resp_len;
+ u32 status;
+ int ret;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(buf + pos, count - pos, "\nOEM name: %s\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ pos += scnprintf(buf + pos, count - pos,
+ "\tVendor In Approved List: %s\n",
+ iwl_is_tas_approved() ? "YES" : "NO");
+
+ status = iwl_dhc_resp_status(mld->fwrt.fw, hcmd.resp_pkt);
+ if (status != 1) {
+ pos += scnprintf(buf + pos, count - pos,
+ "response status is not success: %d\n",
+ status);
+ goto out;
+ }
+
+ resp = iwl_dhc_resp_data(mld->fwrt.fw, hcmd.resp_pkt, &resp_len);
+ if (IS_ERR(resp) || resp_len != sizeof(*resp)) {
+ pos += scnprintf(buf + pos, count - pos,
+ "Invalid size for TAS response (%u instead of %zd)\n",
+ resp_len, sizeof(*resp));
+ goto out;
+ }
+
+ pos += iwl_mld_dump_tas_resp(resp, count - pos, buf + pos);
+
+out:
+ iwl_free_resp(&hcmd);
+ return pos;
+}
+
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_nmi, 10);
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_restart, 10);
+WIPHY_DEBUGFS_READ_WRITE_FILE_OPS_MLD(he_sniffer_params, 32);
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_dbg_clear, 10);
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(send_echo_cmd, 8);
+WIPHY_DEBUGFS_READ_FILE_OPS_MLD(tas_get_status, 2048);
+
+static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct iwl_mld *mld,
+ size_t count, u8 *buf)
+{
+ int err;
+ u32 value;
+
+ err = iwl_bios_get_dsm(&mld->fwrt, DSM_FUNC_ENABLE_6E, &value);
+ if (err)
+ return err;
+
+ return scnprintf(buf, count, "0x%08x\n", value);
+}
+
+MLD_DEBUGFS_READ_FILE_OPS(wifi_6e_enable, 64);
+
+static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mld *mld,
+ char *buf, size_t count)
+{
+ struct iwl_op_mode *opmode = container_of((void *)mld,
+ struct iwl_op_mode,
+ op_mode_specific);
+ struct iwl_rx_cmd_buffer rxb = {};
+ struct iwl_rx_packet *pkt;
+ int n_bytes = count / 2;
+ int ret = -EINVAL;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ rxb._page = alloc_pages(GFP_KERNEL, 0);
+ if (!rxb._page)
+ return -ENOMEM;
+ pkt = rxb_addr(&rxb);
+
+ ret = hex2bin(page_address(rxb._page), buf, n_bytes);
+ if (ret)
+ goto out;
+
+ /* avoid invalid memory access and malformed packet */
+ if (n_bytes < sizeof(*pkt) ||
+ n_bytes != sizeof(*pkt) + iwl_rx_packet_payload_len(pkt))
+ goto out;
+
+ local_bh_disable();
+ iwl_mld_rx(opmode, NULL, &rxb);
+ local_bh_enable();
+ ret = 0;
+
+out:
+ iwl_free_rxb(&rxb);
+
+ return ret ?: count;
+}
+
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(inject_packet, 512);
+
+#ifdef CONFIG_THERMAL
+
+static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mld *mld,
+ char *buf, size_t count)
+{
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ return iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
+ CTDP_CMD_OPERATION_STOP) ? : count;
+}
+
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(stop_ctdp, 8);
+
+static ssize_t iwl_dbgfs_start_ctdp_write(struct iwl_mld *mld,
+ char *buf, size_t count)
+{
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ return iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
+ CTDP_CMD_OPERATION_START) ? : count;
+}
+
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(start_ctdp, 8);
+
+#endif /* CONFIG_THERMAL */
+
+void
+iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir)
+{
+ /* Add debugfs files here */
+
+ MLD_DEBUGFS_ADD_FILE(fw_nmi, debugfs_dir, 0200);
+ MLD_DEBUGFS_ADD_FILE(fw_restart, debugfs_dir, 0200);
+ MLD_DEBUGFS_ADD_FILE(wifi_6e_enable, debugfs_dir, 0400);
+ MLD_DEBUGFS_ADD_FILE(he_sniffer_params, debugfs_dir, 0600);
+ MLD_DEBUGFS_ADD_FILE(fw_dbg_clear, debugfs_dir, 0200);
+ MLD_DEBUGFS_ADD_FILE(send_echo_cmd, debugfs_dir, 0200);
+ MLD_DEBUGFS_ADD_FILE(tas_get_status, debugfs_dir, 0400);
+#ifdef CONFIG_THERMAL
+ MLD_DEBUGFS_ADD_FILE(start_ctdp, debugfs_dir, 0200);
+ MLD_DEBUGFS_ADD_FILE(stop_ctdp, debugfs_dir, 0200);
+#endif
+ MLD_DEBUGFS_ADD_FILE(inject_packet, debugfs_dir, 0200);
+
+ /* Create a symlink with mac80211. It will be removed when mac80211
+ * exits (before the opmode exits which removes the target.)
+ */
+ if (!IS_ERR(debugfs_dir)) {
+ char buf[100];
+
+ snprintf(buf, 100, "../../%pd2", debugfs_dir->d_parent);
+ debugfs_create_symlink("iwlwifi", mld->wiphy->debugfsdir,
+ buf);
+ }
+}
+
+#define VIF_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ WIPHY_DEBUGFS_WRITE_FILE_OPS(vif_##name, bufsz, vif)
+
+#define VIF_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ IEEE80211_WIPHY_DEBUGFS_READ_WRITE_FILE_OPS(vif_##name, bufsz, vif) \
+
+#define VIF_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \
+ debugfs_create_file(alias, mode, parent, vif, \
+ &iwl_dbgfs_vif_##name##_ops)
+#define VIF_DEBUGFS_ADD_FILE(name, parent, mode) \
+ VIF_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static ssize_t iwl_dbgfs_vif_bf_params_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data)
+{
+ struct ieee80211_vif *vif = data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct ieee80211_bss_conf *link_conf;
+ int val;
+
+ if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
+ if (sscanf(buf + 24, "%d", &val) != 1)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ link_conf = link_conf_dereference_protected(vif, link_id);
+ if (WARN_ON(!link_conf))
+ return -ENODEV;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ mld_vif->disable_bf = !val;
+
+ if (val)
+ return iwl_mld_enable_beacon_filter(mld, link_conf,
+ false) ?: count;
+ else
+ return iwl_mld_disable_beacon_filter(mld, vif) ?: count;
+}
+
+static ssize_t iwl_dbgfs_vif_pm_params_write(struct iwl_mld *mld,
+ char *buf,
+ size_t count, void *data)
+{
+ struct ieee80211_vif *vif = data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int val;
+
+ if (!strncmp("use_ps_poll=", buf, 12)) {
+ if (sscanf(buf + 12, "%d", &val) != 1)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ mld_vif->use_ps_poll = val;
+
+ return iwl_mld_update_mac_power(mld, vif, false) ?: count;
+}
+
+static ssize_t iwl_dbgfs_vif_low_latency_write(struct iwl_mld *mld,
+ char *buf, size_t count,
+ void *data)
+{
+ struct ieee80211_vif *vif = data;
+ u8 value;
+ int ret;
+
+ ret = kstrtou8(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ if (value > 1)
+ return -EINVAL;
+
+ iwl_mld_vif_update_low_latency(mld, vif, value, LOW_LATENCY_DEBUGFS);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_vif_low_latency_read(struct ieee80211_vif *vif,
+ size_t count, char *buf)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ char format[] = "traffic=%d\ndbgfs=%d\nvif_type=%d\nactual=%d\n";
+ u8 ll_causes;
+
+ if (WARN_ON(count < sizeof(format)))
+ return -EINVAL;
+
+ ll_causes = READ_ONCE(mld_vif->low_latency_causes);
+
+ /* all values in format are boolean so the size of format is enough
+ * for holding the result string
+ */
+ return scnprintf(buf, count, format,
+ !!(ll_causes & LOW_LATENCY_TRAFFIC),
+ !!(ll_causes & LOW_LATENCY_DEBUGFS),
+ !!(ll_causes & LOW_LATENCY_VIF_TYPE),
+ !!(ll_causes));
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(pm_params, 32);
+VIF_DEBUGFS_WRITE_FILE_OPS(bf_params, 32);
+VIF_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 45);
+
+static int
+_iwl_dbgfs_inject_beacon_ie(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ char *bin, ssize_t len,
+ bool restore)
+{
+ struct iwl_mld_vif *mld_vif;
+ struct iwl_mld_link *mld_link;
+ struct iwl_mac_beacon_cmd beacon_cmd = {};
+ int n_bytes = len / 2;
+
+ /* Element len should be represented by u8 */
+ if (n_bytes >= U8_MAX)
+ return -EINVAL;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ if (!vif)
+ return -EINVAL;
+
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ mld_vif->beacon_inject_active = true;
+ mld->hw->extra_beacon_tailroom = n_bytes;
+
+ for_each_mld_vif_valid_link(mld_vif, mld_link) {
+ u32 offset;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+ struct ieee80211_chanctx_conf *ctx =
+ wiphy_dereference(mld->wiphy, link_conf->chanctx_conf);
+ struct sk_buff *beacon =
+ ieee80211_beacon_get_template(mld->hw, vif,
+ NULL, link_id);
+
+ if (!beacon)
+ return -EINVAL;
+
+ if (!restore && (WARN_ON(!n_bytes || !bin) ||
+ hex2bin(skb_put_zero(beacon, n_bytes),
+ bin, n_bytes))) {
+ dev_kfree_skb(beacon);
+ return -EINVAL;
+ }
+
+ info = IEEE80211_SKB_CB(beacon);
+
+ beacon_cmd.flags =
+ cpu_to_le16(iwl_mld_get_rate_flags(mld, info, vif,
+ link_conf,
+ ctx->def.chan->band));
+ beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len);
+ beacon_cmd.link_id =
+ cpu_to_le32(mld_link->fw_id);
+
+ iwl_mld_set_tim_idx(mld, &beacon_cmd.tim_idx,
+ beacon->data, beacon->len);
+
+ offset = iwl_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len);
+
+ beacon_cmd.btwt_offset = cpu_to_le32(offset);
+
+ iwl_mld_send_beacon_template_cmd(mld, beacon, &beacon_cmd);
+ dev_kfree_skb(beacon);
+ }
+
+ if (restore)
+ mld_vif->beacon_inject_active = false;
+
+ return 0;
+}
+
+static ssize_t
+iwl_dbgfs_vif_inject_beacon_ie_write(struct iwl_mld *mld,
+ char *buf, size_t count,
+ void *data)
+{
+ struct ieee80211_vif *vif = data;
+ int ret = _iwl_dbgfs_inject_beacon_ie(mld, vif, buf,
+ count, false);
+
+ mld->hw->extra_beacon_tailroom = 0;
+ return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie, 512);
+
+static ssize_t
+iwl_dbgfs_vif_inject_beacon_ie_restore_write(struct iwl_mld *mld,
+ char *buf,
+ size_t count,
+ void *data)
+{
+ struct ieee80211_vif *vif = data;
+ int ret = _iwl_dbgfs_inject_beacon_ie(mld, vif, NULL,
+ 0, true);
+
+ mld->hw->extra_beacon_tailroom = 0;
+ return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512);
+
+static ssize_t
+iwl_dbgfs_vif_twt_setup_write(struct iwl_mld *mld, char *buf, size_t count,
+ void *data)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, DEBUG_HOST_COMMAND),
+ };
+ struct ieee80211_vif *vif = data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_dhc_cmd *cmd __free(kfree) = NULL;
+ struct iwl_dhc_twt_operation *dhc_twt_cmd;
+ u64 target_wake_time;
+ u32 twt_operation, interval_exp, interval_mantissa, min_wake_duration;
+ u8 trigger, flow_type, flow_id, protection, tenth_param;
+ u8 twt_request = 1, broadcast = 0;
+ int ret;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ ret = sscanf(buf, "%u %llu %u %u %u %hhu %hhu %hhu %hhu %hhu",
+ &twt_operation, &target_wake_time, &interval_exp,
+ &interval_mantissa, &min_wake_duration, &trigger,
+ &flow_type, &flow_id, &protection, &tenth_param);
+
+ /* the new twt_request parameter is optional for station */
+ if ((ret != 9 && ret != 10) ||
+ (ret == 10 && vif->type != NL80211_IFTYPE_STATION &&
+ tenth_param == 1))
+ return -EINVAL;
+
+ /* The 10th parameter:
+ * In STA mode - the TWT type (broadcast or individual)
+ * In AP mode - the role (0 responder, 2 unsolicited)
+ */
+ if (ret == 10) {
+ if (vif->type == NL80211_IFTYPE_STATION)
+ broadcast = tenth_param;
+ else
+ twt_request = tenth_param;
+ }
+
+ cmd = kzalloc(sizeof(*cmd) + sizeof(*dhc_twt_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ dhc_twt_cmd = (void *)cmd->data;
+ dhc_twt_cmd->mac_id = cpu_to_le32(mld_vif->fw_id);
+ dhc_twt_cmd->twt_operation = cpu_to_le32(twt_operation);
+ dhc_twt_cmd->target_wake_time = cpu_to_le64(target_wake_time);
+ dhc_twt_cmd->interval_exp = cpu_to_le32(interval_exp);
+ dhc_twt_cmd->interval_mantissa = cpu_to_le32(interval_mantissa);
+ dhc_twt_cmd->min_wake_duration = cpu_to_le32(min_wake_duration);
+ dhc_twt_cmd->trigger = trigger;
+ dhc_twt_cmd->flow_type = flow_type;
+ dhc_twt_cmd->flow_id = flow_id;
+ dhc_twt_cmd->protection = protection;
+ dhc_twt_cmd->twt_request = twt_request;
+ dhc_twt_cmd->negotiation_type = broadcast ? 3 : 0;
+
+ cmd->length = cpu_to_le32(sizeof(*dhc_twt_cmd) >> 2);
+ cmd->index_and_mask =
+ cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC |
+ DHC_INT_UMAC_TWT_OPERATION);
+
+ hcmd.len[0] = sizeof(*cmd) + sizeof(*dhc_twt_cmd);
+ hcmd.data[0] = cmd;
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+
+ return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(twt_setup, 256);
+
+static ssize_t
+iwl_dbgfs_vif_twt_operation_write(struct iwl_mld *mld, char *buf, size_t count,
+ void *data)
+{
+ struct ieee80211_vif *vif = data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_twt_operation_cmd twt_cmd = {};
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct iwl_mld_link *mld_link = iwl_mld_link_dereference_check(mld_vif,
+ link_id);
+ int ret;
+
+ if (WARN_ON(!mld_link))
+ return -ENODEV;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ if (hweight16(vif->active_links) > 1)
+ return -EOPNOTSUPP;
+
+ ret = sscanf(buf,
+ "%u %llu %u %u %u %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu",
+ &twt_cmd.twt_operation, &twt_cmd.target_wake_time,
+ &twt_cmd.interval_exponent, &twt_cmd.interval_mantissa,
+ &twt_cmd.minimum_wake_duration, &twt_cmd.trigger,
+ &twt_cmd.flow_type, &twt_cmd.flow_id,
+ &twt_cmd.twt_protection, &twt_cmd.ndp_paging_indicator,
+ &twt_cmd.responder_pm_mode, &twt_cmd.negotiation_type,
+ &twt_cmd.twt_request, &twt_cmd.implicit,
+ &twt_cmd.twt_group_assignment, &twt_cmd.twt_channel,
+ &twt_cmd.restricted_info_present, &twt_cmd.dl_bitmap_valid,
+ &twt_cmd.ul_bitmap_valid, &twt_cmd.dl_tid_bitmap,
+ &twt_cmd.ul_tid_bitmap);
+
+ if (ret != 21)
+ return -EINVAL;
+
+ twt_cmd.link_id = cpu_to_le32(mld_link->fw_id);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, TWT_OPERATION_CMD),
+ &twt_cmd);
+ return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(twt_operation, 256);
+
+static ssize_t iwl_dbgfs_vif_int_mlo_scan_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data)
+{
+ struct ieee80211_vif *vif = data;
+ u32 action;
+ int ret;
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return -EINVAL;
+
+ if (kstrtou32(buf, 0, &action))
+ return -EINVAL;
+
+ if (action == 0) {
+ ret = iwl_mld_scan_stop(mld, IWL_MLD_SCAN_INT_MLO, false);
+ } else if (action == 1) {
+ iwl_mld_int_mlo_scan(mld, vif);
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32);
+
+void iwl_mld_add_vif_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct dentry *mld_vif_dbgfs =
+ debugfs_create_dir("iwlmld", vif->debugfs_dir);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ char target[3 * 3 + 11 + (NL80211_WIPHY_NAME_MAXLEN + 1) +
+ (7 + IFNAMSIZ + 1) + 6 + 1];
+ char name[7 + IFNAMSIZ + 1];
+
+ /* Create symlink for convenience pointing to interface specific
+ * debugfs entries for the driver. For example, under
+ * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmld/
+ * find
+ * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmld/
+ */
+ snprintf(name, sizeof(name), "%pd", vif->debugfs_dir);
+ snprintf(target, sizeof(target), "../../../%pd3/iwlmld",
+ vif->debugfs_dir);
+ mld_vif->dbgfs_slink =
+ debugfs_create_symlink(name, mld->debugfs_dir, target);
+
+ if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ vif->type == NL80211_IFTYPE_STATION) {
+ VIF_DEBUGFS_ADD_FILE(pm_params, mld_vif_dbgfs, 0200);
+ VIF_DEBUGFS_ADD_FILE(bf_params, mld_vif_dbgfs, 0200);
+ }
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ VIF_DEBUGFS_ADD_FILE(inject_beacon_ie, mld_vif_dbgfs, 0200);
+ VIF_DEBUGFS_ADD_FILE(inject_beacon_ie_restore,
+ mld_vif_dbgfs, 0200);
+ }
+
+ VIF_DEBUGFS_ADD_FILE(low_latency, mld_vif_dbgfs, 0600);
+ VIF_DEBUGFS_ADD_FILE(twt_setup, mld_vif_dbgfs, 0200);
+ VIF_DEBUGFS_ADD_FILE(twt_operation, mld_vif_dbgfs, 0200);
+ VIF_DEBUGFS_ADD_FILE(int_mlo_scan, mld_vif_dbgfs, 0200);
+}
+#define LINK_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ WIPHY_DEBUGFS_WRITE_FILE_OPS(link_##name, bufsz, bss_conf)
+
+#define LINK_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \
+ debugfs_create_file(alias, mode, parent, link_conf, \
+ &iwl_dbgfs_link_##name##_ops)
+#define LINK_DEBUGFS_ADD_FILE(name, parent, mode) \
+ LINK_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct dentry *dir)
+{
+ struct dentry *mld_link_dir;
+
+ mld_link_dir = debugfs_lookup("iwlmld", dir);
+
+ /* For non-MLO vifs, the dir of deflink is the same as the vif's one.
+ * so if iwlmld dir already exists, this means that this is deflink.
+ * If not, this is a per-link dir of a MLO vif, add in it the iwlmld
+ * dir.
+ */
+ if (!mld_link_dir)
+ mld_link_dir = debugfs_create_dir("iwlmld", dir);
+}
+
+static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data)
+{
+ struct ieee80211_link_sta *link_sta = data;
+ struct iwl_mld_link_sta *mld_link_sta;
+ u32 rate;
+ u32 partial = false;
+ char pretty_rate[100];
+ int ret;
+ u8 fw_sta_id;
+
+ mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+ if (WARN_ON(!mld_link_sta))
+ return -EINVAL;
+
+ fw_sta_id = mld_link_sta->fw_id;
+
+ if (sscanf(buf, "%i %i", &rate, &partial) == 0)
+ return -EINVAL;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ ret = iwl_mld_send_tlc_dhc(mld, fw_sta_id,
+ partial ? IWL_TLC_DEBUG_PARTIAL_FIXED_RATE :
+ IWL_TLC_DEBUG_FIXED_RATE,
+ rate);
+
+ rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate), rate);
+
+ IWL_DEBUG_RATE(mld, "sta_id %d rate %s partial: %d, ret:%d\n",
+ fw_sta_id, pretty_rate, partial, ret);
+
+ return ret ? : count;
+}
+
+static ssize_t iwl_dbgfs_tlc_dhc_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data)
+{
+ struct ieee80211_link_sta *link_sta = data;
+ struct iwl_mld_link_sta *mld_link_sta;
+ u32 type, value;
+ int ret;
+ u8 fw_sta_id;
+
+ mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+ if (WARN_ON(!mld_link_sta))
+ return -EINVAL;
+
+ fw_sta_id = mld_link_sta->fw_id;
+
+ if (sscanf(buf, "%i %i", &type, &value) != 2) {
+ IWL_DEBUG_RATE(mld, "usage <type> <value>\n");
+ return -EINVAL;
+ }
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ ret = iwl_mld_send_tlc_dhc(mld, fw_sta_id, type, value);
+
+ return ret ? : count;
+}
+
+#define LINK_STA_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \
+ debugfs_create_file(alias, mode, parent, link_sta, \
+ &iwl_dbgfs_##name##_ops)
+#define LINK_STA_DEBUGFS_ADD_FILE(name, parent, mode) \
+ LINK_STA_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+#define LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(name, bufsz) \
+ WIPHY_DEBUGFS_WRITE_FILE_OPS(name, bufsz, link_sta)
+
+LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(tlc_dhc, 64);
+LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(fixed_rate, 64);
+
+void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir)
+{
+ LINK_STA_DEBUGFS_ADD_FILE(fixed_rate, dir, 0200);
+ LINK_STA_DEBUGFS_ADD_FILE(tlc_dhc, dir, 0200);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.h b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.h
new file mode 100644
index 000000000000..eeba35342ba1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.h
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "iface.h"
+#include "sta.h"
+
+#define MLD_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
+struct dbgfs_##name##_data { \
+ argtype *arg; \
+ bool read_done; \
+ ssize_t rlen; \
+ char buf[buflen]; \
+}; \
+static int _iwl_dbgfs_##name##_open(struct inode *inode, \
+ struct file *file) \
+{ \
+ struct dbgfs_##name##_data *data; \
+ \
+ if ((file->f_flags & O_ACCMODE) == O_RDWR) \
+ return -EOPNOTSUPP; \
+ \
+ data = kzalloc(sizeof(*data), GFP_KERNEL); \
+ if (!data) \
+ return -ENOMEM; \
+ \
+ data->read_done = false; \
+ data->arg = inode->i_private; \
+ file->private_data = data; \
+ \
+ return 0; \
+}
+
+#define MLD_DEBUGFS_READ_WRAPPER(name) \
+static ssize_t _iwl_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct dbgfs_##name##_data *data = file->private_data; \
+ \
+ if (!data->read_done) { \
+ data->read_done = true; \
+ data->rlen = iwl_dbgfs_##name##_read(data->arg, \
+ sizeof(data->buf),\
+ data->buf); \
+ } \
+ \
+ if (data->rlen < 0) \
+ return data->rlen; \
+ return simple_read_from_buffer(user_buf, count, ppos, \
+ data->buf, data->rlen); \
+}
+
+static int _iwl_dbgfs_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+#define _MLD_DEBUGFS_READ_FILE_OPS(name, buflen, argtype) \
+MLD_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
+MLD_DEBUGFS_READ_WRAPPER(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = _iwl_dbgfs_##name##_read, \
+ .open = _iwl_dbgfs_##name##_open, \
+ .llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
+}
+
+#define WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER(name) \
+static ssize_t iwl_dbgfs_##name##_write_handler(struct wiphy *wiphy, \
+ struct file *file, char *buf, \
+ size_t count, void *data) \
+{ \
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); \
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); \
+ return iwl_dbgfs_##name##_write(mld, buf, count, data); \
+}
+
+static inline struct iwl_mld *
+iwl_mld_from_link_sta(struct ieee80211_link_sta *link_sta)
+{
+ struct ieee80211_vif *vif =
+ iwl_mld_sta_from_mac80211(link_sta->sta)->vif;
+ return iwl_mld_vif_from_mac80211(vif)->mld;
+}
+
+static inline struct iwl_mld *
+iwl_mld_from_bss_conf(struct ieee80211_bss_conf *link)
+{
+ return iwl_mld_vif_from_mac80211(link->vif)->mld;
+}
+
+static inline struct iwl_mld *iwl_mld_from_vif(struct ieee80211_vif *vif)
+{
+ return iwl_mld_vif_from_mac80211(vif)->mld;
+}
+
+#define WIPHY_DEBUGFS_WRITE_WRAPPER(name, bufsz, objtype) \
+WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER(name) \
+static ssize_t __iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct ieee80211_##objtype *arg = file->private_data; \
+ struct iwl_mld *mld = iwl_mld_from_##objtype(arg); \
+ char buf[bufsz] = {}; \
+ \
+ return wiphy_locked_debugfs_write(mld->wiphy, file, \
+ buf, sizeof(buf), \
+ user_buf, count, \
+ iwl_dbgfs_##name##_write_handler, \
+ arg); \
+}
+
+#define WIPHY_DEBUGFS_WRITE_FILE_OPS(name, bufsz, objtype) \
+ WIPHY_DEBUGFS_WRITE_WRAPPER(name, bufsz, objtype) \
+ static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = __iwl_dbgfs_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+ }
+
+#define WIPHY_DEBUGFS_READ_HANDLER_WRAPPER_MLD(name) \
+static ssize_t iwl_dbgfs_##name##_read_handler(struct wiphy *wiphy, \
+ struct file *file, char *buf, \
+ size_t count, void *data) \
+{ \
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); \
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); \
+ return iwl_dbgfs_##name##_read(mld, buf, count); \
+}
+
+#define WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER_MLD(name) \
+static ssize_t iwl_dbgfs_##name##_write_handler(struct wiphy *wiphy, \
+ struct file *file, char *buf, \
+ size_t count, void *data) \
+{ \
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); \
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); \
+ return iwl_dbgfs_##name##_write(mld, buf, count); \
+}
+
+#define WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name) \
+WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER_MLD(name) \
+static ssize_t __iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct dbgfs_##name##_data *data = file->private_data; \
+ struct iwl_mld *mld = data->arg; \
+ \
+ return wiphy_locked_debugfs_write(mld->wiphy, file, \
+ data->buf, sizeof(data->buf), \
+ user_buf, count, \
+ iwl_dbgfs_##name##_write_handler, \
+ NULL); \
+}
+
+#define WIPHY_DEBUGFS_READ_WRAPPER_MLD(name) \
+WIPHY_DEBUGFS_READ_HANDLER_WRAPPER_MLD(name) \
+static ssize_t __iwl_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct dbgfs_##name##_data *data = file->private_data; \
+ struct iwl_mld *mld = data->arg; \
+ \
+ if (!data->read_done) { \
+ data->read_done = true; \
+ data->rlen = wiphy_locked_debugfs_read(mld->wiphy, \
+ file, data->buf, sizeof(data->buf), \
+ user_buf, count, ppos, \
+ iwl_dbgfs_##name##_read_handler, NULL); \
+ return data->rlen; \
+ } \
+ \
+ if (data->rlen < 0) \
+ return data->rlen; \
+ return simple_read_from_buffer(user_buf, count, ppos, \
+ data->buf, data->rlen); \
+}
+
+#define WIPHY_DEBUGFS_READ_FILE_OPS_MLD(name, bufsz) \
+ MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct iwl_mld) \
+ WIPHY_DEBUGFS_READ_WRAPPER_MLD(name) \
+ static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = __iwl_dbgfs_##name##_read, \
+ .open = _iwl_dbgfs_##name##_open, \
+ .llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
+ }
+
+#define WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(name, bufsz) \
+ MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct iwl_mld) \
+ WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name) \
+ static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = __iwl_dbgfs_##name##_write, \
+ .open = _iwl_dbgfs_##name##_open, \
+ .llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
+ }
+
+#define WIPHY_DEBUGFS_READ_WRITE_FILE_OPS_MLD(name, bufsz) \
+ MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct iwl_mld) \
+ WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name) \
+ WIPHY_DEBUGFS_READ_WRAPPER_MLD(name) \
+ static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = __iwl_dbgfs_##name##_write, \
+ .read = __iwl_dbgfs_##name##_read, \
+ .open = _iwl_dbgfs_##name##_open, \
+ .llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
+ }
+
+#define WIPHY_DEBUGFS_WRITE_WRAPPER_IEEE80211(name, bufsz, objtype) \
+WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER(name) \
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct dbgfs_##name##_data *data = file->private_data; \
+ struct ieee80211_##objtype *arg = data->arg; \
+ struct iwl_mld *mld = iwl_mld_from_##objtype(arg); \
+ char buf[bufsz] = {}; \
+ \
+ return wiphy_locked_debugfs_write(mld->wiphy, file, \
+ buf, sizeof(buf), \
+ user_buf, count, \
+ iwl_dbgfs_##name##_write_handler, \
+ arg); \
+}
+
+#define IEEE80211_WIPHY_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, objtype) \
+ MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct ieee80211_##objtype) \
+ WIPHY_DEBUGFS_WRITE_WRAPPER_IEEE80211(name, bufsz, objtype) \
+ MLD_DEBUGFS_READ_WRAPPER(name) \
+ static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .read = _iwl_dbgfs_##name##_read, \
+ .open = _iwl_dbgfs_##name##_open, \
+ .llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
+ }
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c
new file mode 100644
index 000000000000..f77ba21a174d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <linux/etherdevice.h>
+#include <linux/math64.h>
+#include <net/cfg80211.h>
+#include "mld.h"
+#include "iface.h"
+#include "phy.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "constants.h"
+#include "fw/api/location.h"
+#include "ftm-initiator.h"
+
+static void iwl_mld_ftm_cmd_common(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_tof_range_req_cmd *cmd,
+ struct cfg80211_pmsr_request *req)
+{
+ int i;
+
+ cmd->initiator_flags =
+ cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM |
+ IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT);
+ cmd->request_id = req->cookie;
+ cmd->num_of_ap = req->n_peers;
+
+ /* Use a large value for "no timeout". Don't use the maximum value
+ * because of fw limitations.
+ */
+ if (req->timeout)
+ cmd->req_timeout_ms = cpu_to_le32(min(req->timeout, 0xfffff));
+ else
+ cmd->req_timeout_ms = cpu_to_le32(0xfffff);
+
+ memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
+ for (i = 0; i < ETH_ALEN; i++)
+ cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
+
+ if (vif->cfg.assoc) {
+ memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
+
+ /* AP's TSF is only relevant if associated */
+ for (i = 0; i < req->n_peers; i++) {
+ if (req->peers[i].report_ap_tsf) {
+ struct iwl_mld_vif *mld_vif =
+ iwl_mld_vif_from_mac80211(vif);
+
+ cmd->tsf_mac_id = cpu_to_le32(mld_vif->fw_id);
+ return;
+ }
+ }
+ } else {
+ eth_broadcast_addr(cmd->range_req_bssid);
+ }
+
+ /* Don't report AP's TSF */
+ cmd->tsf_mac_id = cpu_to_le32(0xff);
+}
+
+static int
+iwl_mld_ftm_set_target_chandef(struct iwl_mld *mld,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ u32 freq = peer->chandef.chan->center_freq;
+
+ target->channel_num = ieee80211_frequency_to_channel(freq);
+
+ switch (peer->chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ target->format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
+ target->format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ target->format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ target->format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ target->format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ target->format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ target->format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
+ target->format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ target->format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
+ target->format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
+ break;
+ default:
+ IWL_ERR(mld, "Unsupported BW in FTM request (%d)\n",
+ peer->chandef.width);
+ return -EINVAL;
+}
+
+ /* non EDCA based measurement must use HE preamble */
+ if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
+ target->format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
+
+ target->ctrl_ch_position =
+ (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
+ iwl_mld_get_fw_ctrl_pos(&peer->chandef) : 0;
+
+ target->band = iwl_mld_nl80211_band_to_fw(peer->chandef.chan->band);
+ return 0;
+}
+
+#define FTM_SET_FLAG(flag) (target->initiator_ap_flags |= \
+ cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
+
+static void
+iwl_mld_ftm_set_target_flags(struct iwl_mld *mld,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ target->initiator_ap_flags = cpu_to_le32(0);
+
+ if (peer->ftm.asap)
+ FTM_SET_FLAG(ASAP);
+
+ if (peer->ftm.request_lci)
+ FTM_SET_FLAG(LCI_REQUEST);
+
+ if (peer->ftm.request_civicloc)
+ FTM_SET_FLAG(CIVIC_REQUEST);
+
+ if (IWL_MLD_FTM_INITIATOR_DYNACK)
+ FTM_SET_FLAG(DYN_ACK);
+
+ if (IWL_MLD_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
+ FTM_SET_FLAG(ALGO_LR);
+ else if (IWL_MLD_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
+ FTM_SET_FLAG(ALGO_FFT);
+
+ if (peer->ftm.trigger_based)
+ FTM_SET_FLAG(TB);
+ else if (peer->ftm.non_trigger_based)
+ FTM_SET_FLAG(NON_TB);
+
+ if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
+ peer->ftm.lmr_feedback)
+ FTM_SET_FLAG(LMR_FEEDBACK);
+}
+
+static void iwl_mld_ftm_set_sta(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ u32 sta_id_mask;
+
+ target->sta_id = IWL_INVALID_STA;
+
+ /* TODO: add ftm_unprotected debugfs support */
+
+ if (!vif->cfg.assoc || !mld_vif->ap_sta)
+ return;
+
+ sta_id_mask = iwl_mld_fw_sta_id_mask(mld, mld_vif->ap_sta);
+ if (WARN_ON(hweight32(sta_id_mask) != 1))
+ return;
+
+ target->sta_id = __ffs(sta_id_mask);
+
+ if (mld_vif->ap_sta->mfp &&
+ (peer->ftm.trigger_based || peer->ftm.non_trigger_based))
+ FTM_SET_FLAG(PMF);
+}
+
+static int
+iwl_mld_ftm_set_target(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ u32 i2r_max_sts;
+ int ret;
+
+ ret = iwl_mld_ftm_set_target_chandef(mld, peer, target);
+ if (ret)
+ return ret;
+
+ memcpy(target->bssid, peer->addr, ETH_ALEN);
+ target->burst_period = cpu_to_le16(peer->ftm.burst_period);
+ target->samples_per_burst = peer->ftm.ftms_per_burst;
+ target->num_of_bursts = peer->ftm.num_bursts_exp;
+ iwl_mld_ftm_set_target_flags(mld, peer, target);
+ iwl_mld_ftm_set_sta(mld, vif, peer, target);
+
+ /* TODO: add secured ranging support */
+
+ i2r_max_sts = IWL_MLD_FTM_I2R_MAX_STS > 1 ? 1 :
+ IWL_MLD_FTM_I2R_MAX_STS;
+
+ target->r2i_ndp_params = IWL_MLD_FTM_R2I_MAX_REP |
+ (IWL_MLD_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) |
+ (IWL_MLD_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+ target->i2r_ndp_params = IWL_MLD_FTM_I2R_MAX_REP |
+ (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) |
+ (IWL_MLD_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+
+ if (peer->ftm.non_trigger_based) {
+ target->min_time_between_msr =
+ cpu_to_le16(IWL_MLD_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
+ target->burst_period =
+ cpu_to_le16(IWL_MLD_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
+ } else {
+ target->min_time_between_msr = cpu_to_le16(0);
+ }
+
+ /* TODO: Beacon interval is currently unknown, so use the common value
+ * of 100 TUs.
+ */
+ target->beacon_interval = cpu_to_le16(100);
+
+ return 0;
+}
+
+int iwl_mld_ftm_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (mld->ftm_initiator.req)
+ return -EBUSY;
+
+ if (req->n_peers > ARRAY_SIZE(cmd.ap))
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ iwl_mld_ftm_cmd_common(mld, vif, (void *)&cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i];
+
+ ret = iwl_mld_ftm_set_target(mld, vif, peer, target);
+ if (ret)
+ return ret;
+ }
+
+ /* TODO: get the status from the response*/
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (!ret) {
+ mld->ftm_initiator.req = req;
+ mld->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif);
+ }
+
+ return ret;
+}
+
+static void iwl_mld_ftm_reset(struct iwl_mld *mld)
+{
+ lockdep_assert_wiphy(mld->wiphy);
+
+ mld->ftm_initiator.req = NULL;
+ mld->ftm_initiator.req_wdev = NULL;
+ memset(mld->ftm_initiator.responses, 0,
+ sizeof(mld->ftm_initiator.responses));
+}
+
+static int iwl_mld_ftm_range_resp_valid(struct iwl_mld *mld, u8 request_id,
+ u8 num_of_aps)
+{
+ if (IWL_FW_CHECK(mld, request_id != (u8)mld->ftm_initiator.req->cookie,
+ "Request ID mismatch, got %u, active %u\n",
+ request_id, (u8)mld->ftm_initiator.req->cookie))
+ return -EINVAL;
+
+ if (IWL_FW_CHECK(mld, num_of_aps > mld->ftm_initiator.req->n_peers ||
+ num_of_aps > IWL_TOF_MAX_APS,
+ "FTM range response: invalid num of APs (%u)\n",
+ num_of_aps))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int iwl_mld_ftm_find_peer(struct cfg80211_pmsr_request *req,
+ const u8 *addr)
+{
+ for (int i = 0; i < req->n_peers; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+
+ if (ether_addr_equal_unaligned(peer->addr, addr))
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+static void iwl_mld_debug_range_resp(struct iwl_mld *mld, u8 index,
+ struct cfg80211_pmsr_result *res)
+{
+ s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
+
+ IWL_DEBUG_INFO(mld, "entry %d\n", index);
+ IWL_DEBUG_INFO(mld, "\tstatus: %d\n", res->status);
+ IWL_DEBUG_INFO(mld, "\tBSSID: %pM\n", res->addr);
+ IWL_DEBUG_INFO(mld, "\thost time: %llu\n", res->host_time);
+ IWL_DEBUG_INFO(mld, "\tburst index: %d\n", res->ftm.burst_index);
+ IWL_DEBUG_INFO(mld, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
+ IWL_DEBUG_INFO(mld, "\trssi: %d\n", res->ftm.rssi_avg);
+ IWL_DEBUG_INFO(mld, "\trssi spread: %d\n", res->ftm.rssi_spread);
+ IWL_DEBUG_INFO(mld, "\trtt: %lld\n", res->ftm.rtt_avg);
+ IWL_DEBUG_INFO(mld, "\trtt var: %llu\n", res->ftm.rtt_variance);
+ IWL_DEBUG_INFO(mld, "\trtt spread: %llu\n", res->ftm.rtt_spread);
+ IWL_DEBUG_INFO(mld, "\tdistance: %lld\n", rtt_avg);
+}
+
+void iwl_mld_handle_ftm_resp_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data;
+ u8 num_of_aps, last_in_batch;
+
+ if (IWL_FW_CHECK(mld, !mld->ftm_initiator.req,
+ "FTM response without a pending request\n"))
+ return;
+
+ if (iwl_mld_ftm_range_resp_valid(mld, fw_resp->request_id,
+ fw_resp->num_of_aps))
+ return;
+
+ num_of_aps = fw_resp->num_of_aps;
+ last_in_batch = fw_resp->last_report;
+
+ IWL_DEBUG_INFO(mld, "Range response received\n");
+ IWL_DEBUG_INFO(mld, "request id: %llu, num of entries: %u\n",
+ mld->ftm_initiator.req->cookie, num_of_aps);
+
+ for (int i = 0; i < num_of_aps; i++) {
+ struct cfg80211_pmsr_result result = {};
+ struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap;
+ int peer_idx;
+
+ fw_ap = &fw_resp->ap[i];
+ result.final = fw_ap->last_burst;
+ result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
+ result.ap_tsf_valid = 1;
+
+ peer_idx = iwl_mld_ftm_find_peer(mld->ftm_initiator.req,
+ fw_ap->bssid);
+ if (peer_idx < 0) {
+ IWL_WARN(mld,
+ "Unknown address (%pM, target #%d) in FTM response\n",
+ fw_ap->bssid, i);
+ continue;
+ }
+
+ switch (fw_ap->measure_status) {
+ case IWL_TOF_ENTRY_SUCCESS:
+ result.status = NL80211_PMSR_STATUS_SUCCESS;
+ break;
+ case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT:
+ result.status = NL80211_PMSR_STATUS_TIMEOUT;
+ break;
+ case IWL_TOF_ENTRY_NO_RESPONSE:
+ result.status = NL80211_PMSR_STATUS_FAILURE;
+ result.ftm.failure_reason =
+ NL80211_PMSR_FTM_FAILURE_NO_RESPONSE;
+ break;
+ case IWL_TOF_ENTRY_REQUEST_REJECTED:
+ result.status = NL80211_PMSR_STATUS_FAILURE;
+ result.ftm.failure_reason =
+ NL80211_PMSR_FTM_FAILURE_PEER_BUSY;
+ result.ftm.busy_retry_time = fw_ap->refusal_period;
+ break;
+ default:
+ result.status = NL80211_PMSR_STATUS_FAILURE;
+ result.ftm.failure_reason =
+ NL80211_PMSR_FTM_FAILURE_UNSPECIFIED;
+ break;
+ }
+ memcpy(result.addr, fw_ap->bssid, ETH_ALEN);
+
+ /* TODO: convert the timestamp from the result to systime */
+ result.host_time = ktime_get_boottime_ns();
+
+ result.type = NL80211_PMSR_TYPE_FTM;
+ result.ftm.burst_index = mld->ftm_initiator.responses[peer_idx];
+ mld->ftm_initiator.responses[peer_idx]++;
+ result.ftm.rssi_avg = fw_ap->rssi;
+ result.ftm.rssi_avg_valid = 1;
+ result.ftm.rssi_spread = fw_ap->rssi_spread;
+ result.ftm.rssi_spread_valid = 1;
+ result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt);
+ result.ftm.rtt_avg_valid = 1;
+ result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance);
+ result.ftm.rtt_variance_valid = 1;
+ result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread);
+ result.ftm.rtt_spread_valid = 1;
+
+ cfg80211_pmsr_report(mld->ftm_initiator.req_wdev,
+ mld->ftm_initiator.req,
+ &result, GFP_KERNEL);
+
+ if (fw_has_api(&mld->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
+ IWL_DEBUG_INFO(mld, "RTT confidence: %u\n",
+ fw_ap->rttConfidence);
+
+ iwl_mld_debug_range_resp(mld, i, &result);
+ }
+
+ if (last_in_batch) {
+ cfg80211_pmsr_complete(mld->ftm_initiator.req_wdev,
+ mld->ftm_initiator.req,
+ GFP_KERNEL);
+ iwl_mld_ftm_reset(mld);
+ }
+}
+
+void iwl_mld_ftm_restart_cleanup(struct iwl_mld *mld)
+{
+ struct cfg80211_pmsr_result result = {
+ .status = NL80211_PMSR_STATUS_FAILURE,
+ .final = 1,
+ .host_time = ktime_get_boottime_ns(),
+ .type = NL80211_PMSR_TYPE_FTM,
+ };
+
+ if (!mld->ftm_initiator.req)
+ return;
+
+ for (int i = 0; i < mld->ftm_initiator.req->n_peers; i++) {
+ memcpy(result.addr, mld->ftm_initiator.req->peers[i].addr,
+ ETH_ALEN);
+
+ cfg80211_pmsr_report(mld->ftm_initiator.req_wdev,
+ mld->ftm_initiator.req,
+ &result, GFP_KERNEL);
+ }
+
+ cfg80211_pmsr_complete(mld->ftm_initiator.req_wdev,
+ mld->ftm_initiator.req, GFP_KERNEL);
+ iwl_mld_ftm_reset(mld);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h
new file mode 100644
index 000000000000..3fab25a52508
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#ifndef __iwl_mld_ftm_initiator_h__
+#define __iwl_mld_ftm_initiator_h__
+
+/**
+ * struct ftm_initiator_data - FTM initiator data
+ *
+ * @req: a pointer to cfg80211 FTM request
+ * @req_wdev: a pointer to the wdev that requested the current FTM request
+ * @responses: the number of responses received for the current FTM session.
+ * Used for tracking the burst index in a periodic request.
+ */
+struct ftm_initiator_data {
+ struct cfg80211_pmsr_request *req;
+ struct wireless_dev *req_wdev;
+ int responses[IWL_TOF_MAX_APS];
+};
+
+int iwl_mld_ftm_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req);
+
+void iwl_mld_handle_ftm_resp_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+void iwl_mld_ftm_restart_cleanup(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_ftm_initiator_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/fw.c b/drivers/net/wireless/intel/iwlwifi/mld/fw.c
new file mode 100644
index 000000000000..62da137e1024
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/fw.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "mld.h"
+
+#include "fw/api/alive.h"
+#include "fw/api/scan.h"
+#include "fw/api/rx.h"
+#include "fw/dbg.h"
+#include "fw/pnvm.h"
+#include "hcmd.h"
+#include "iwl-nvm-parse.h"
+#include "power.h"
+#include "mcc.h"
+#include "led.h"
+#include "coex.h"
+#include "regulatory.h"
+#include "thermal.h"
+
+static int iwl_mld_send_tx_ant_cfg(struct iwl_mld *mld)
+{
+ struct iwl_tx_ant_cfg_cmd cmd;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ cmd.valid = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld));
+
+ IWL_DEBUG_FW(mld, "select valid tx ant: %u\n", cmd.valid);
+
+ return iwl_mld_send_cmd_pdu(mld, TX_ANT_CONFIGURATION_CMD, &cmd);
+}
+
+static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld)
+{
+ struct iwl_rss_config_cmd cmd = {
+ .flags = cpu_to_le32(IWL_RSS_ENABLE),
+ .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
+ };
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* Do not direct RSS traffic to Q 0 which is our fallback queue */
+ for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
+ cmd.indirection_table[i] =
+ 1 + (i % (mld->trans->num_rx_queues - 1));
+ netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
+
+ return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd);
+}
+
+static int iwl_mld_config_scan(struct iwl_mld *mld)
+{
+ struct iwl_scan_config cmd = {
+ .tx_chains = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)),
+ .rx_chains = cpu_to_le32(iwl_mld_get_valid_rx_ant(mld))
+ };
+
+ return iwl_mld_send_cmd_pdu(mld, WIDE_ID(LONG_GROUP, SCAN_CFG_CMD),
+ &cmd);
+}
+
+static void iwl_mld_alive_imr_data(struct iwl_trans *trans,
+ const struct iwl_imr_alive_info *imr_info)
+{
+ struct iwl_imr_data *imr_data = &trans->dbg.imr_data;
+
+ imr_data->imr_enable = le32_to_cpu(imr_info->enabled);
+ imr_data->imr_size = le32_to_cpu(imr_info->size);
+ imr_data->imr2sram_remainbyte = imr_data->imr_size;
+ imr_data->imr_base_addr = imr_info->base_addr;
+ imr_data->imr_curr_addr = le64_to_cpu(imr_data->imr_base_addr);
+
+ if (imr_data->imr_enable)
+ return;
+
+ for (int i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
+ struct iwl_fw_ini_region_tlv *reg;
+
+ if (!trans->dbg.active_regions[i])
+ continue;
+
+ reg = (void *)trans->dbg.active_regions[i]->data;
+
+ /* We have only one DRAM IMR region, so we
+ * can break as soon as we find the first
+ * one.
+ */
+ if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
+ trans->dbg.unsupported_region_msk |= BIT(i);
+ break;
+ }
+ }
+}
+
+static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_mld *mld =
+ container_of(notif_wait, struct iwl_mld, notif_wait);
+ struct iwl_trans *trans = mld->trans;
+ u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP,
+ UCODE_ALIVE_NTFY, 0);
+ struct iwl_alive_ntf_v6 *palive;
+ bool *alive_valid = data;
+ struct iwl_umac_alive *umac;
+ struct iwl_lmac_alive *lmac1;
+ struct iwl_lmac_alive *lmac2 = NULL;
+ u32 lmac_error_event_table;
+ u32 umac_error_table;
+ u16 status;
+
+ if (version < 6 || version > 7 || pkt_len != sizeof(*palive))
+ return false;
+
+ palive = (void *)pkt->data;
+
+ iwl_mld_alive_imr_data(trans, &palive->imr);
+
+ umac = &palive->umac_data;
+ lmac1 = &palive->lmac_data[0];
+ lmac2 = &palive->lmac_data[1];
+ status = le16_to_cpu(palive->status);
+
+ trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
+ trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
+ trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
+
+ IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
+ trans->sku_id[0], trans->sku_id[1], trans->sku_id[2]);
+
+ lmac_error_event_table =
+ le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
+ iwl_fw_lmac1_set_alive_err_table(trans, lmac_error_event_table);
+
+ if (lmac2)
+ trans->dbg.lmac_error_event_table[1] =
+ le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
+
+ umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
+ ~FW_ADDR_CACHE_CONTROL;
+
+ if (umac_error_table >= trans->cfg->min_umac_error_event_table)
+ iwl_fw_umac_set_alive_err_table(trans, umac_error_table);
+ else
+ IWL_ERR(mld, "Not valid error log pointer 0x%08X\n",
+ umac_error_table);
+
+ *alive_valid = status == IWL_ALIVE_STATUS_OK;
+
+ IWL_DEBUG_FW(mld,
+ "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
+ status, lmac1->ver_type, lmac1->ver_subtype);
+
+ if (lmac2)
+ IWL_DEBUG_FW(mld, "Alive ucode CDB\n");
+
+ IWL_DEBUG_FW(mld,
+ "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+ le32_to_cpu(umac->umac_major),
+ le32_to_cpu(umac->umac_minor));
+
+ if (version >= 7)
+ IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n",
+ le16_to_cpu(palive->flags));
+
+ iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac);
+
+ return true;
+}
+
+#define MLD_ALIVE_TIMEOUT (2 * HZ)
+#define MLD_INIT_COMPLETE_TIMEOUT (2 * HZ)
+
+static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld)
+{
+ struct iwl_trans *trans = mld->trans;
+ struct iwl_pc_data *pc_data;
+ u8 count;
+
+ IWL_ERR(mld,
+ "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+ iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
+ iwl_read_umac_prph(trans,
+ UMAG_SB_CPU_2_STATUS));
+#define IWL_FW_PRINT_REG_INFO(reg_name) \
+ IWL_ERR(mld, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
+
+ IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION);
+
+ IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE);
+
+ /* print OTP info */
+ IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR);
+ IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA);
+#undef IWL_FW_PRINT_REG_INFO
+
+ pc_data = trans->dbg.pc_data;
+ for (count = 0; count < trans->dbg.num_pc; count++, pc_data++)
+ IWL_ERR(mld, "%s: 0x%x\n", pc_data->pc_name,
+ pc_data->pc_address);
+}
+
+static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld)
+{
+ const struct fw_img *fw =
+ iwl_get_ucode_image(mld->fw, IWL_UCODE_REGULAR);
+ static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
+ struct iwl_notification_wait alive_wait;
+ bool alive_valid = false;
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ iwl_init_notification_wait(&mld->notif_wait, &alive_wait,
+ alive_cmd, ARRAY_SIZE(alive_cmd),
+ iwl_alive_fn, &alive_valid);
+
+ iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
+
+ ret = iwl_trans_start_fw(mld->trans, fw, true);
+ if (ret) {
+ iwl_remove_notification(&mld->notif_wait, &alive_wait);
+ return ret;
+ }
+
+ ret = iwl_wait_notification(&mld->notif_wait, &alive_wait,
+ MLD_ALIVE_TIMEOUT);
+
+ if (ret) {
+ if (ret == -ETIMEDOUT)
+ iwl_fw_dbg_error_collect(&mld->fwrt,
+ FW_DBG_TRIGGER_ALIVE_TIMEOUT);
+ iwl_mld_print_alive_notif_timeout(mld);
+ goto alive_failure;
+ }
+
+ if (!alive_valid) {
+ IWL_ERR(mld, "Loaded firmware is not valid!\n");
+ ret = -EIO;
+ goto alive_failure;
+ }
+
+ iwl_trans_fw_alive(mld->trans, 0);
+
+ return 0;
+
+alive_failure:
+ iwl_trans_stop_device(mld->trans);
+ return ret;
+}
+
+static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld)
+{
+ struct iwl_notification_wait init_wait;
+ struct iwl_init_extended_cfg_cmd init_cfg = {};
+ static const u16 init_complete[] = {
+ INIT_COMPLETE_NOTIF,
+ };
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_load_fw_wait_alive(mld);
+ if (ret)
+ return ret;
+
+ mld->trans->step_urm =
+ !!(iwl_read_umac_prph(mld->trans, CNVI_PMU_STEP_FLOW) &
+ CNVI_PMU_STEP_FLOW_FORCE_URM);
+
+ ret = iwl_pnvm_load(mld->trans, &mld->notif_wait,
+ &mld->fw->ucode_capa);
+ if (ret) {
+ IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret);
+ goto init_failure;
+ }
+
+ iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
+ NULL);
+
+ iwl_init_notification_wait(&mld->notif_wait,
+ &init_wait,
+ init_complete,
+ ARRAY_SIZE(init_complete),
+ NULL, NULL);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD),
+ &init_cfg);
+ if (ret) {
+ IWL_ERR(mld, "Failed to send init config command: %d\n", ret);
+ iwl_remove_notification(&mld->notif_wait, &init_wait);
+ goto init_failure;
+ }
+
+ ret = iwl_wait_notification(&mld->notif_wait, &init_wait,
+ MLD_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ IWL_ERR(mld, "Failed to get INIT_COMPLETE %d\n", ret);
+ goto init_failure;
+ }
+
+ if (!mld->nvm_data) {
+ mld->nvm_data = iwl_get_nvm(mld->trans, mld->fw, 0, 0);
+ if (IS_ERR(mld->nvm_data)) {
+ ret = PTR_ERR(mld->nvm_data);
+ mld->nvm_data = NULL;
+ IWL_ERR(mld, "Failed to read NVM: %d\n", ret);
+ goto init_failure;
+ }
+ }
+
+ return 0;
+
+init_failure:
+ iwl_trans_stop_device(mld->trans);
+ return ret;
+}
+
+int iwl_mld_load_fw(struct iwl_mld *mld)
+{
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_trans_start_hw(mld->trans);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_run_fw_init_sequence(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_init_mcc(mld);
+ if (ret)
+ return ret;
+
+ mld->fw_status.running = true;
+
+ return 0;
+}
+
+void iwl_mld_stop_fw(struct iwl_mld *mld)
+{
+ lockdep_assert_wiphy(mld->wiphy);
+
+ iwl_abort_notification_waits(&mld->notif_wait);
+
+ iwl_fw_dbg_stop_sync(&mld->fwrt);
+
+ iwl_trans_stop_device(mld->trans);
+
+ mld->fw_status.running = false;
+}
+
+static void iwl_mld_restart_disconnect_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_hw_restart_disconnect(vif);
+}
+
+void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags)
+{
+ u32 error_log_size = mld->fw->ucode_capa.error_log_size;
+ struct iwl_fw_error_recovery_cmd recovery_cmd = {
+ .flags = cpu_to_le32(flags),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
+ .flags = CMD_WANT_SKB,
+ .data = {&recovery_cmd, },
+ .len = {sizeof(recovery_cmd), },
+ };
+ int ret;
+
+ /* no error log was defined in TLV */
+ if (!error_log_size)
+ return;
+
+ if (flags & ERROR_RECOVERY_UPDATE_DB) {
+ /* no buf was allocated upon NIC error */
+ if (!mld->error_recovery_buf)
+ return;
+
+ cmd.data[1] = mld->error_recovery_buf;
+ cmd.len[1] = error_log_size;
+ cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
+ recovery_cmd.buf_size = cpu_to_le32(error_log_size);
+ }
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+
+ /* we no longer need the recovery buffer */
+ kfree(mld->error_recovery_buf);
+ mld->error_recovery_buf = NULL;
+
+ if (ret) {
+ IWL_ERR(mld, "Failed to send recovery cmd %d\n", ret);
+ return;
+ }
+
+ if (flags & ERROR_RECOVERY_UPDATE_DB) {
+ struct iwl_rx_packet *pkt = cmd.resp_pkt;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+ u32 resp;
+
+ if (IWL_FW_CHECK(mld, pkt_len != sizeof(resp),
+ "Unexpected recovery cmd response size %u (expected %zu)\n",
+ pkt_len, sizeof(resp)))
+ goto out;
+
+ resp = le32_to_cpup((__le32 *)cmd.resp_pkt->data);
+ if (!resp)
+ goto out;
+
+ IWL_ERR(mld,
+ "Failed to send recovery cmd blob was invalid %d\n",
+ resp);
+
+ ieee80211_iterate_interfaces(mld->hw, 0,
+ iwl_mld_restart_disconnect_iter,
+ NULL);
+ }
+
+out:
+ iwl_free_resp(&cmd);
+}
+
+static int iwl_mld_config_fw(struct iwl_mld *mld)
+{
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ iwl_fw_disable_dbg_asserts(&mld->fwrt);
+ iwl_get_shared_mem_conf(&mld->fwrt);
+
+ ret = iwl_mld_send_tx_ant_cfg(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_send_bt_init_conf(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_set_soc_latency(&mld->fwrt);
+ if (ret)
+ return ret;
+
+ iwl_mld_configure_lari(mld);
+
+ ret = iwl_mld_config_temp_report_ths(mld);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_THERMAL
+ ret = iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
+ CTDP_CMD_OPERATION_START);
+ if (ret)
+ return ret;
+#endif
+
+ ret = iwl_configure_rxq(&mld->fwrt);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_send_rss_cfg_cmd(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_config_scan(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_update_device_power(mld, false);
+ if (ret)
+ return ret;
+
+ if (mld->fw_status.in_hw_restart) {
+ iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_UPDATE_DB);
+ iwl_mld_time_sync_fw_config(mld);
+ }
+
+ iwl_mld_led_config_fw(mld);
+
+ ret = iwl_mld_init_ppag(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_init_sar(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_init_sgom(mld);
+ if (ret)
+ return ret;
+
+ iwl_mld_init_tas(mld);
+ iwl_mld_init_uats(mld);
+
+ return 0;
+}
+
+int iwl_mld_start_fw(struct iwl_mld *mld)
+{
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_load_fw(mld);
+ if (IWL_FW_CHECK(mld, ret, "Failed to start firmware %d\n", ret)) {
+ iwl_fw_dbg_error_collect(&mld->fwrt, FW_DBG_TRIGGER_DRIVER);
+ goto error;
+ }
+
+ IWL_DEBUG_INFO(mld, "uCode started.\n");
+
+ ret = iwl_mld_config_fw(mld);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ iwl_mld_stop_fw(mld);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/hcmd.h b/drivers/net/wireless/intel/iwlwifi/mld/hcmd.h
new file mode 100644
index 000000000000..64a8d4248324
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/hcmd.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_hcmd_h__
+#define __iwl_mld_hcmd_h__
+
+static inline int iwl_mld_send_cmd(struct iwl_mld *mld, struct iwl_host_cmd *cmd)
+{
+ /* No commands, including the d3 related commands, should be sent
+ * after entering d3
+ */
+#ifdef CONFIG_PM_SLEEP
+ if (WARN_ON(mld->fw_status.in_d3))
+ return -EIO;
+#endif
+
+ if (!(cmd->flags & CMD_ASYNC))
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* Devices that need to shutdown immediately on rfkill are not
+ * supported, so we can send all the cmds in rfkill
+ */
+ cmd->flags |= CMD_SEND_IN_RFKILL;
+
+ return iwl_trans_send_cmd(mld->trans, cmd);
+}
+
+static inline int
+__iwl_mld_send_cmd_with_flags_pdu(struct iwl_mld *mld, u32 id,
+ u32 flags, const void *data, u16 len)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { data ? len : 0, },
+ .data = { data, },
+ .flags = flags,
+ };
+
+ return iwl_mld_send_cmd(mld, &cmd);
+}
+
+#define _iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len, \
+ ignored...) \
+ __iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len)
+#define iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len...) \
+ _iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, ##len, \
+ sizeof(*(data)))
+
+#define iwl_mld_send_cmd_pdu(mld, id, ...) \
+ iwl_mld_send_cmd_with_flags_pdu(mld, id, 0, __VA_ARGS__)
+
+#define iwl_mld_send_cmd_empty(mld, id) \
+ iwl_mld_send_cmd_with_flags_pdu(mld, id, 0, NULL, 0)
+
+#endif /* __iwl_mld_hcmd_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
new file mode 100644
index 000000000000..e49e2260ac05
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <net/cfg80211.h>
+
+#include "iface.h"
+#include "hcmd.h"
+#include "key.h"
+#include "mlo.h"
+#include "mac80211.h"
+
+#include "fw/api/context.h"
+#include "fw/api/mac.h"
+#include "fw/api/time-event.h"
+#include "fw/api/datapath.h"
+
+/* Cleanup function for struct iwl_mld_vif, will be called in restart */
+void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = mld_vif->mld;
+ struct iwl_mld_link *link;
+
+ /* EMLSR is turned back on during recovery */
+ vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
+
+ mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
+
+ for_each_mld_vif_valid_link(mld_vif, link) {
+ iwl_mld_cleanup_link(mld_vif->mld, link);
+
+ /* Correctly allocated primary link in non-MLO mode */
+ if (!ieee80211_vif_is_mld(vif) &&
+ link_id == 0 && link == &mld_vif->deflink)
+ continue;
+
+ if (vif->active_links & BIT(link_id))
+ continue;
+
+ /* Should not happen as link removal should always succeed */
+ WARN_ON(1);
+ if (link != &mld_vif->deflink)
+ kfree_rcu(link, rcu_head);
+ RCU_INIT_POINTER(mld_vif->link[link_id], NULL);
+ }
+
+ ieee80211_iter_keys(mld->hw, vif, iwl_mld_cleanup_keys_iter, NULL);
+
+ CLEANUP_STRUCT(mld_vif);
+}
+
+static int iwl_mld_send_mac_cmd(struct iwl_mld *mld,
+ struct iwl_mac_config_cmd *cmd)
+{
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD),
+ cmd);
+ if (ret)
+ IWL_ERR(mld, "Failed to send MAC_CONFIG_CMD ret = %d\n", ret);
+
+ return ret;
+}
+
+int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif)
+{
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ return vif->p2p ? FW_MAC_TYPE_P2P_STA : FW_MAC_TYPE_BSS_STA;
+ case NL80211_IFTYPE_AP:
+ return FW_MAC_TYPE_GO;
+ case NL80211_IFTYPE_MONITOR:
+ return FW_MAC_TYPE_LISTENER;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return FW_MAC_TYPE_P2P_DEVICE;
+ case NL80211_IFTYPE_ADHOC:
+ return FW_MAC_TYPE_IBSS;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ return FW_MAC_TYPE_BSS_STA;
+}
+
+static bool iwl_mld_is_nic_ack_enabled(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_sta_he_cap *own_he_cap;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* This capability is the same for all bands,
+ * so take it from one of them.
+ */
+ sband = mld->hw->wiphy->bands[NL80211_BAND_2GHZ];
+ own_he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+
+ return own_he_cap && (own_he_cap->he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_ACK_EN);
+}
+
+/* fill the common part for all interface types */
+static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_config_cmd *cmd,
+ u32 action)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ cmd->id_and_color = cpu_to_le32(mld_vif->fw_id);
+ cmd->action = cpu_to_le32(action);
+
+ cmd->mac_type =
+ cpu_to_le32(iwl_mld_mac80211_iftype_to_fw(vif));
+
+ memcpy(cmd->local_mld_addr, vif->addr, ETH_ALEN);
+
+ if (iwlwifi_mod_params.disable_11ax)
+ return;
+
+ cmd->nic_not_ack_enabled =
+ cpu_to_le32(!iwl_mld_is_nic_ack_enabled(mld, vif));
+
+ /* If we have MLO enabled, then the firmware needs to enable
+ * address translation for the station(s) we add. That depends
+ * on having EHT enabled in firmware, which in turn depends on
+ * mac80211 in the code below.
+ * However, mac80211 doesn't enable HE/EHT until it has parsed
+ * the association response successfully, so just skip all that
+ * and enable both when we have MLO.
+ */
+ if (ieee80211_vif_is_mld(vif)) {
+ if (vif->type == NL80211_IFTYPE_AP)
+ cmd->he_ap_support = cpu_to_le16(1);
+ else
+ cmd->he_support = cpu_to_le16(1);
+
+ cmd->eht_support = cpu_to_le32(1);
+ return;
+ }
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ if (!link_conf->he_support)
+ continue;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ cmd->he_ap_support = cpu_to_le16(1);
+ else
+ cmd->he_support = cpu_to_le16(1);
+
+ /* EHT, if supported, was already set above */
+ break;
+ }
+}
+
+static void iwl_mld_fill_mac_cmd_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif, u32 action,
+ struct iwl_mac_config_cmd *cmd)
+{
+ struct ieee80211_bss_conf *link;
+ u32 twt_policy = 0;
+ int link_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ WARN_ON(vif->type != NL80211_IFTYPE_STATION);
+
+ /* We always want to hear MCAST frames, if we're not authorized yet,
+ * we'll drop them.
+ */
+ cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_GRP);
+
+ /* Adding a MAC ctxt with is_assoc set is not allowed in fw
+ * (and shouldn't happen)
+ */
+ if (vif->cfg.assoc && action != FW_CTXT_ACTION_ADD) {
+ cmd->client.is_assoc = 1;
+
+ if (!iwl_mld_vif_from_mac80211(vif)->authorized)
+ cmd->client.data_policy |=
+ cpu_to_le16(COEX_HIGH_PRIORITY_ENABLE);
+ } else {
+ /* Allow beacons to pass through as long as we are not
+ * associated
+ */
+ cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON);
+ }
+
+ cmd->client.assoc_id = cpu_to_le16(vif->cfg.aid);
+
+ if (ieee80211_vif_is_mld(vif)) {
+ u16 esr_transition_timeout =
+ u16_get_bits(vif->cfg.eml_cap,
+ IEEE80211_EML_CAP_TRANSITION_TIMEOUT);
+
+ cmd->client.esr_transition_timeout =
+ min_t(u16, IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU,
+ esr_transition_timeout);
+ cmd->client.medium_sync_delay =
+ cpu_to_le16(vif->cfg.eml_med_sync_delay);
+ }
+
+ for_each_vif_active_link(vif, link, link_id) {
+ if (!link->he_support)
+ continue;
+
+ if (link->twt_requester)
+ twt_policy |= TWT_SUPPORTED;
+ if (link->twt_protected)
+ twt_policy |= PROTECTED_TWT_SUPPORTED;
+ if (link->twt_broadcast)
+ twt_policy |= BROADCAST_TWT_SUPPORTED;
+ }
+
+ if (!iwlwifi_mod_params.disable_11ax)
+ cmd->client.data_policy |= cpu_to_le16(twt_policy);
+
+ if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p)
+ cmd->filter_flags |=
+ cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
+
+ if (vif->p2p)
+ cmd->client.ctwin =
+ cpu_to_le32(vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+}
+
+static void iwl_mld_fill_mac_cmd_ap(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_config_cmd *cmd)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP);
+
+ cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
+
+ /* in AP mode, pass beacons from other APs (needed for ht protection).
+ * When there're no any associated station, which means that we are not
+ * TXing anyway, don't ask FW to pass beacons to prevent unnecessary
+ * wake-ups.
+ */
+ if (mld_vif->num_associated_stas)
+ cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON);
+}
+
+static void iwl_mld_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ bool *go_active = _data;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO &&
+ iwl_mld_vif_from_mac80211(vif)->ap_ibss_active)
+ *go_active = true;
+}
+
+static bool iwl_mld_p2p_dev_has_extended_disc(struct iwl_mld *mld)
+{
+ bool go_active = false;
+
+ /* This flag should be set to true when the P2P Device is
+ * discoverable and there is at least a P2P GO. Setting
+ * this flag will allow the P2P Device to be discoverable on other
+ * channels in addition to its listen channel.
+ * Note that this flag should not be set in other cases as it opens the
+ * Rx filters on all MAC and increases the number of interrupts.
+ */
+ ieee80211_iterate_active_interfaces(mld->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mld_go_iterator, &go_active);
+
+ return go_active;
+}
+
+static void iwl_mld_fill_mac_cmd_p2p_dev(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_config_cmd *cmd)
+{
+ bool ext_disc = iwl_mld_p2p_dev_has_extended_disc(mld);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* Override the filter flags to accept all management frames. This is
+ * needed to support both P2P device discovery using probe requests and
+ * P2P service discovery using action frames
+ */
+ cmd->filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT);
+
+ if (ext_disc)
+ cmd->p2p_dev.is_disc_extended = cpu_to_le32(1);
+}
+
+static void iwl_mld_fill_mac_cmd_ibss(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_config_cmd *cmd)
+{
+ lockdep_assert_wiphy(mld->wiphy);
+
+ WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
+
+ cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON |
+ MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
+ MAC_CFG_FILTER_ACCEPT_GRP);
+}
+
+static int
+iwl_mld_rm_mac_from_fw(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mac_config_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .id_and_color = cpu_to_le32(mld_vif->fw_id),
+ };
+
+ return iwl_mld_send_mac_cmd(mld, &cmd);
+}
+
+int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_config_cmd cmd = {};
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (action == FW_CTXT_ACTION_REMOVE)
+ return iwl_mld_rm_mac_from_fw(mld, vif);
+
+ iwl_mld_mac_cmd_fill_common(mld, vif, &cmd, action);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ iwl_mld_fill_mac_cmd_sta(mld, vif, action, &cmd);
+ break;
+ case NL80211_IFTYPE_AP:
+ iwl_mld_fill_mac_cmd_ap(mld, vif, &cmd);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ cmd.filter_flags =
+ cpu_to_le32(MAC_CFG_FILTER_PROMISC |
+ MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT |
+ MAC_CFG_FILTER_ACCEPT_BEACON |
+ MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
+ MAC_CFG_FILTER_ACCEPT_GRP);
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ iwl_mld_fill_mac_cmd_p2p_dev(mld, vif, &cmd);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ iwl_mld_fill_mac_cmd_ibss(mld, vif, &cmd);
+ break;
+ default:
+ WARN(1, "not supported yet\n");
+ return -EOPNOTSUPP;
+ }
+
+ return iwl_mld_send_mac_cmd(mld, &cmd);
+}
+
+IWL_MLD_ALLOC_FN(vif, vif)
+
+/* Constructor function for struct iwl_mld_vif */
+static int
+iwl_mld_init_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ mld_vif->mld = mld;
+ mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
+
+ ret = iwl_mld_allocate_vif_fw_id(mld, &mld_vif->fw_id, vif);
+ if (ret)
+ return ret;
+
+ if (!mld->fw_status.in_hw_restart) {
+ wiphy_work_init(&mld_vif->emlsr.unblock_tpt_wk,
+ iwl_mld_emlsr_unblock_tpt_wk);
+ wiphy_delayed_work_init(&mld_vif->emlsr.check_tpt_wk,
+ iwl_mld_emlsr_check_tpt);
+ wiphy_delayed_work_init(&mld_vif->emlsr.prevent_done_wk,
+ iwl_mld_emlsr_prevent_done_wk);
+ wiphy_delayed_work_init(&mld_vif->emlsr.tmp_non_bss_done_wk,
+ iwl_mld_emlsr_tmp_non_bss_done_wk);
+ }
+
+ return 0;
+}
+
+int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_init_vif(mld, vif);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_ADD);
+ if (ret)
+ RCU_INIT_POINTER(mld->fw_id_to_vif[mld_vif->fw_id], NULL);
+
+ return ret;
+}
+
+int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_REMOVE);
+
+ if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->fw_id_to_vif)))
+ return -EINVAL;
+
+ RCU_INIT_POINTER(mld->fw_id_to_vif[mld_vif->fw_id], NULL);
+
+ iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_VIF,
+ mld_vif->fw_id);
+
+ return ret;
+}
+
+void iwl_mld_set_vif_associated(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_bss_conf *link;
+ unsigned int link_id;
+
+ for_each_vif_active_link(vif, link, link_id) {
+ if (iwl_mld_link_set_associated(mld, vif, link))
+ IWL_ERR(mld, "failed to update link %d\n", link_id);
+ }
+
+ iwl_mld_recalc_multicast_filter(mld);
+}
+
+static void iwl_mld_get_fw_id_bss_bitmap_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u8 *fw_id_bitmap = _data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+ return;
+
+ *fw_id_bitmap |= BIT(mld_vif->fw_id);
+}
+
+u8 iwl_mld_get_fw_bss_vifs_ids(struct iwl_mld *mld)
+{
+ u8 fw_id_bitmap = 0;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
+ iwl_mld_get_fw_id_bss_bitmap_iter,
+ &fw_id_bitmap);
+
+ return fw_id_bitmap;
+}
+
+void iwl_mld_handle_probe_resp_data_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
+ struct iwl_probe_resp_data *old_data, *new_data;
+ struct ieee80211_vif *vif;
+ struct iwl_mld_link *mld_link;
+
+ IWL_DEBUG_INFO(mld, "Probe response data notif: noa %d, csa %d\n",
+ notif->noa_active, notif->csa_counter);
+
+ if (IWL_FW_CHECK(mld, le32_to_cpu(notif->mac_id) >=
+ ARRAY_SIZE(mld->fw_id_to_vif),
+ "mac id is invalid: %d\n",
+ le32_to_cpu(notif->mac_id)))
+ return;
+
+ vif = wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_vif[le32_to_cpu(notif->mac_id)]);
+
+ /* the firmware gives us the mac_id (and not the link_id), mac80211
+ * gets a vif and not a link, bottom line, this flow is not MLD ready
+ * yet.
+ */
+ if (WARN_ON(!vif) || ieee80211_vif_is_mld(vif))
+ return;
+
+ if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA &&
+ notif->csa_counter >= 1)
+ ieee80211_beacon_set_cntdwn(vif, notif->csa_counter);
+
+ if (!vif->p2p)
+ return;
+
+ mld_link = &iwl_mld_vif_from_mac80211(vif)->deflink;
+
+ new_data = kzalloc(sizeof(*new_data), GFP_KERNEL);
+ if (!new_data)
+ return;
+
+ memcpy(&new_data->notif, notif, sizeof(new_data->notif));
+
+ /* noa_attr contains 1 reserved byte, need to substruct it */
+ new_data->noa_len = sizeof(struct ieee80211_vendor_ie) +
+ sizeof(new_data->notif.noa_attr) - 1;
+
+ /*
+ * If it's a one time NoA, only one descriptor is needed,
+ * adjust the length according to len_low.
+ */
+ if (new_data->notif.noa_attr.len_low ==
+ sizeof(struct ieee80211_p2p_noa_desc) + 2)
+ new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc);
+
+ old_data = wiphy_dereference(mld->wiphy, mld_link->probe_resp_data);
+ rcu_assign_pointer(mld_link->probe_resp_data, new_data);
+
+ if (old_data)
+ kfree_rcu(old_data, rcu_head);
+}
+
+void iwl_mld_handle_uapsd_misbehaving_ap_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif;
+
+ if (IWL_FW_CHECK(mld, notif->mac_id >= ARRAY_SIZE(mld->fw_id_to_vif),
+ "mac id is invalid: %d\n", notif->mac_id))
+ return;
+
+ vif = wiphy_dereference(mld->wiphy, mld->fw_id_to_vif[notif->mac_id]);
+
+ if (WARN_ON(!vif) || ieee80211_vif_is_mld(vif))
+ return;
+
+ IWL_WARN(mld, "uapsd misbehaving AP: %pM\n", vif->bss_conf.bssid);
+}
+
+void iwl_mld_handle_datapath_monitor_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_datapath_monitor_notif *notif = (void *)pkt->data;
+ struct ieee80211_bss_conf *link;
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_vif *vif;
+ struct iwl_mld_vif *mld_vif;
+
+ if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
+ return;
+
+ link = iwl_mld_fw_id_to_link_conf(mld, notif->link_id);
+ if (WARN_ON(!link))
+ return;
+
+ vif = link->vif;
+ if (WARN_ON(!vif) || vif->type != NL80211_IFTYPE_STATION ||
+ !vif->cfg.assoc)
+ return;
+
+ if (!link->chanreq.oper.chan ||
+ link->chanreq.oper.chan->band != NL80211_BAND_2GHZ ||
+ link->chanreq.oper.width < NL80211_CHAN_WIDTH_40)
+ return;
+
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ /* this shouldn't happen *again*, ignore it */
+ if (mld_vif->cca_40mhz_workaround != CCA_40_MHZ_WA_NONE)
+ return;
+
+ mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_RECONNECT;
+
+ /*
+ * This capability manipulation isn't really ideal, but it's the
+ * easiest choice - otherwise we'd have to do some major changes
+ * in mac80211 to support this, which isn't worth it. This does
+ * mean that userspace may have outdated information, but that's
+ * actually not an issue at all.
+ */
+ sband = mld->wiphy->bands[NL80211_BAND_2GHZ];
+
+ WARN_ON(!sband->ht_cap.ht_supported);
+ WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40));
+ sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+
+ if (he_cap) {
+ /* we know that ours is writable */
+ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
+
+ WARN_ON(!he->has_he);
+ WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G));
+ he->he_cap_elem.phy_cap_info[0] &=
+ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ }
+
+ ieee80211_disconnect(vif, true);
+}
+
+void iwl_mld_reset_cca_40mhz_workaround(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (mld_vif->cca_40mhz_workaround == CCA_40_MHZ_WA_NONE)
+ return;
+
+ /* Now we are just reconnecting with the new capabilities,
+ * but remember to reset the capabilities when we disconnect for real
+ */
+ if (mld_vif->cca_40mhz_workaround == CCA_40_MHZ_WA_RECONNECT) {
+ mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_RESET;
+ return;
+ }
+
+ /* Now cca_40mhz_workaround == CCA_40_MHZ_WA_RESET */
+
+ sband = mld->wiphy->bands[NL80211_BAND_2GHZ];
+
+ sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+
+ if (he_cap) {
+ /* we know that ours is writable */
+ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
+
+ he->he_cap_elem.phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ }
+
+ mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_NONE;
+}
+
+struct ieee80211_vif *iwl_mld_get_bss_vif(struct iwl_mld *mld)
+{
+ unsigned long fw_id_bitmap = iwl_mld_get_fw_bss_vifs_ids(mld);
+ int fw_id;
+
+ if (hweight8(fw_id_bitmap) != 1)
+ return NULL;
+
+ fw_id = __ffs(fw_id_bitmap);
+
+ return wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_vif[fw_id]);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.h b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
new file mode 100644
index 000000000000..d1d56b081bf6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_iface_h__
+#define __iwl_mld_iface_h__
+
+#include <net/mac80211.h>
+
+#include "link.h"
+#include "session-protect.h"
+#include "d3.h"
+
+enum iwl_mld_cca_40mhz_wa_status {
+ CCA_40_MHZ_WA_NONE,
+ CCA_40_MHZ_WA_RESET,
+ CCA_40_MHZ_WA_RECONNECT,
+};
+
+/**
+ * enum iwl_mld_emlsr_blocked - defines reasons for which EMLSR is blocked
+ *
+ * These blocks are applied/stored per-VIF.
+ *
+ * @IWL_MLD_EMLSR_BLOCKED_PREVENTION: Prevent repeated EMLSR enter/exit
+ * @IWL_MLD_EMLSR_BLOCKED_WOWLAN: WOWLAN is preventing EMLSR
+ * @IWL_MLD_EMLSR_BLOCKED_ROC: remain-on-channel is preventing EMLSR
+ * @IWL_MLD_EMLSR_BLOCKED_NON_BSS: An active non-BSS interface's link is
+ * preventing EMLSR
+ * @IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS: An expected active non-BSS interface's
+ * link is preventing EMLSR. This is a temporary blocking that is set when
+ * there is an indication that a non-BSS interface is to be added.
+ * @IWL_MLD_EMLSR_BLOCKED_TPT: throughput is too low to make EMLSR worthwhile
+ */
+enum iwl_mld_emlsr_blocked {
+ IWL_MLD_EMLSR_BLOCKED_PREVENTION = 0x1,
+ IWL_MLD_EMLSR_BLOCKED_WOWLAN = 0x2,
+ IWL_MLD_EMLSR_BLOCKED_ROC = 0x4,
+ IWL_MLD_EMLSR_BLOCKED_NON_BSS = 0x8,
+ IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS = 0x10,
+ IWL_MLD_EMLSR_BLOCKED_TPT = 0x20,
+};
+
+/**
+ * enum iwl_mld_emlsr_exit - defines reasons for exiting EMLSR
+ *
+ * Reasons to exit EMLSR may be either link specific or even specific to a
+ * combination of links.
+ *
+ * @IWL_MLD_EMLSR_EXIT_BLOCK: Exit due to a block reason being set
+ * @IWL_MLD_EMLSR_EXIT_MISSED_BEACON: Exit due to missed beacons
+ * @IWL_MLD_EMLSR_EXIT_FAIL_ENTRY: FW failed to enter EMLSR
+ * @IWL_MLD_EMLSR_EXIT_CSA: EMLSR prevented due to channel switch on link
+ * @IWL_MLD_EMLSR_EXIT_EQUAL_BAND: EMLSR prevented as both links share the band
+ * @IWL_MLD_EMLSR_EXIT_LOW_RSSI: Link RSSI is unsuitable for EMLSR
+ * @IWL_MLD_EMLSR_EXIT_LINK_USAGE: Exit EMLSR due to low TPT on secondary link
+ * @IWL_MLD_EMLSR_EXIT_BT_COEX: Exit EMLSR due to BT coexistence
+ * @IWL_MLD_EMLSR_EXIT_CHAN_LOAD: Exit EMLSR because the primary channel is not
+ * loaded enough to justify EMLSR.
+ * @IWL_MLD_EMLSR_EXIT_RFI: Exit EMLSR due to RFI
+ * @IWL_MLD_EMLSR_EXIT_FW_REQUEST: Exit EMLSR because the FW requested it
+ */
+enum iwl_mld_emlsr_exit {
+ IWL_MLD_EMLSR_EXIT_BLOCK = 0x1,
+ IWL_MLD_EMLSR_EXIT_MISSED_BEACON = 0x2,
+ IWL_MLD_EMLSR_EXIT_FAIL_ENTRY = 0x4,
+ IWL_MLD_EMLSR_EXIT_CSA = 0x8,
+ IWL_MLD_EMLSR_EXIT_EQUAL_BAND = 0x10,
+ IWL_MLD_EMLSR_EXIT_LOW_RSSI = 0x20,
+ IWL_MLD_EMLSR_EXIT_LINK_USAGE = 0x40,
+ IWL_MLD_EMLSR_EXIT_BT_COEX = 0x80,
+ IWL_MLD_EMLSR_EXIT_CHAN_LOAD = 0x100,
+ IWL_MLD_EMLSR_EXIT_RFI = 0x200,
+ IWL_MLD_EMLSR_EXIT_FW_REQUEST = 0x400,
+};
+
+/**
+ * struct iwl_mld_emlsr - per-VIF data about EMLSR operation
+ *
+ * @primary: The current primary link
+ * @selected_primary: Primary link as selected during the last link selection
+ * @selected_links: Links as selected during the last link selection
+ * @blocked_reasons: Reasons preventing EMLSR from being enabled
+ * @last_exit_reason: Reason for the last EMLSR exit
+ * @last_exit_ts: Time of the last EMLSR exit (if @last_exit_reason is non-zero)
+ * @exit_repeat_count: Number of times EMLSR was exited for the same reason
+ * @unblock_tpt_wk: Unblock EMLSR because the throughput limit was reached
+ * @check_tpt_wk: a worker to check if IWL_MLD_EMLSR_BLOCKED_TPT should be
+ * added, for example if there is no longer enough traffic.
+ * @prevent_done_wk: Worker to remove %IWL_MLD_EMLSR_BLOCKED_PREVENTION
+ * @tmp_non_bss_done_wk: Worker to remove %IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS
+ */
+struct iwl_mld_emlsr {
+ struct_group(zeroed_on_not_authorized,
+ u8 primary;
+
+ u8 selected_primary;
+ u16 selected_links;
+
+ enum iwl_mld_emlsr_blocked blocked_reasons;
+
+ enum iwl_mld_emlsr_exit last_exit_reason;
+ unsigned long last_exit_ts;
+ u8 exit_repeat_count;
+ );
+
+ struct wiphy_work unblock_tpt_wk;
+ struct wiphy_delayed_work check_tpt_wk;
+
+ struct wiphy_delayed_work prevent_done_wk;
+ struct wiphy_delayed_work tmp_non_bss_done_wk;
+};
+
+/**
+ * struct iwl_mld_vif - virtual interface (MAC context) configuration parameters
+ *
+ * @fw_id: fw id of the mac context.
+ * @session_protect: session protection parameters
+ * @ap_sta: pointer to AP sta, for easier access to it.
+ * Relevant only for STA vifs.
+ * @authorized: indicates the AP station was set to authorized
+ * @bigtks: BIGTKs of the AP, for beacon protection.
+ * Only valid for STA. (FIXME: needs to be per link)
+ * @num_associated_stas: number of associated STAs. Relevant only for AP mode.
+ * @ap_ibss_active: whether the AP/IBSS was started
+ * @roc_activity: the id of the roc_activity running. Relevant for p2p device
+ * only. Set to %ROC_NUM_ACTIVITIES when not in use.
+ * @cca_40mhz_workaround: When we are connected in 2.4 GHz and 40 MHz, and the
+ * environment is too loaded, we work around this by reconnecting to the
+ * same AP with 20 MHz. This manages the status of the workaround.
+ * @beacon_inject_active: indicates an active debugfs beacon ie injection
+ * @low_latency_causes: bit flags, indicating the causes for low-latency,
+ * see @iwl_mld_low_latency_cause.
+ * @ps_disabled: indicates that PS is disabled for this interface
+ * @mld: pointer to the mld structure.
+ * @deflink: default link data, for use in non-MLO,
+ * @link: reference to link data for each valid link, for use in MLO.
+ * @emlsr: information related to EMLSR
+ * @wowlan_data: data used by the wowlan suspend flow
+ * @use_ps_poll: use ps_poll frames
+ * @disable_bf: disable beacon filter
+ * @dbgfs_slink: debugfs symlink for this interface
+ */
+struct iwl_mld_vif {
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ u8 fw_id;
+ struct iwl_mld_session_protect session_protect;
+ struct ieee80211_sta *ap_sta;
+ bool authorized;
+ struct ieee80211_key_conf __rcu *bigtks[2];
+ u8 num_associated_stas;
+ bool ap_ibss_active;
+ u32 roc_activity;
+ enum iwl_mld_cca_40mhz_wa_status cca_40mhz_workaround;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ bool beacon_inject_active;
+#endif
+ u8 low_latency_causes;
+ bool ps_disabled;
+ );
+ /* And here fields that survive a fw restart */
+ struct iwl_mld *mld;
+ struct iwl_mld_link deflink;
+ struct iwl_mld_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ struct iwl_mld_emlsr emlsr;
+
+#if CONFIG_PM_SLEEP
+ struct iwl_mld_wowlan_data wowlan_data;
+#endif
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ bool use_ps_poll;
+ bool disable_bf;
+ struct dentry *dbgfs_slink;
+#endif
+};
+
+static inline struct iwl_mld_vif *
+iwl_mld_vif_from_mac80211(struct ieee80211_vif *vif)
+{
+ return (void *)vif->drv_priv;
+}
+
+#define iwl_mld_link_dereference_check(mld_vif, link_id) \
+ rcu_dereference_check((mld_vif)->link[link_id], \
+ lockdep_is_held(&mld_vif->mld->wiphy->mtx))
+
+#define for_each_mld_vif_valid_link(mld_vif, mld_link) \
+ for (int link_id = 0; link_id < ARRAY_SIZE((mld_vif)->link); \
+ link_id++) \
+ if ((mld_link = iwl_mld_link_dereference_check(mld_vif, link_id)))
+
+/* Retrieve pointer to mld link from mac80211 structures */
+static inline struct iwl_mld_link *
+iwl_mld_link_from_mac80211(struct ieee80211_bss_conf *bss_conf)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
+
+ return iwl_mld_link_dereference_check(mld_vif, bss_conf->link_id);
+}
+
+int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif);
+
+/* Cleanup function for struct iwl_mld_vif, will be called in restart */
+void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif);
+int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ u32 action);
+int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
+int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
+void iwl_mld_set_vif_associated(struct iwl_mld *mld,
+ struct ieee80211_vif *vif);
+u8 iwl_mld_get_fw_bss_vifs_ids(struct iwl_mld *mld);
+void iwl_mld_handle_probe_resp_data_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+void iwl_mld_handle_datapath_monitor_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+void iwl_mld_handle_uapsd_misbehaving_ap_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+void iwl_mld_reset_cca_40mhz_workaround(struct iwl_mld *mld,
+ struct ieee80211_vif *vif);
+
+static inline bool iwl_mld_vif_low_latency(const struct iwl_mld_vif *mld_vif)
+{
+ return !!mld_vif->low_latency_causes;
+}
+
+struct ieee80211_vif *iwl_mld_get_bss_vif(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_iface_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/key.c b/drivers/net/wireless/intel/iwlwifi/mld/key.c
new file mode 100644
index 000000000000..0eff13e5ffd5
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/key.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include "key.h"
+#include "iface.h"
+#include "sta.h"
+#include "fw/api/datapath.h"
+
+static u32 iwl_mld_get_key_flags(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ bool igtk = key->keyidx == 4 || key->keyidx == 5;
+ u32 flags = 0;
+
+ if (!pairwise)
+ flags |= IWL_SEC_KEY_FLAG_MCAST_KEY;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_CCMP:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ flags |= IWL_SEC_KEY_FLAG_KEY_SIZE;
+ fallthrough;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_GCMP;
+ break;
+ }
+
+ if (!sta && vif->type == NL80211_IFTYPE_STATION)
+ sta = mld_vif->ap_sta;
+
+ /* If we are installing an iGTK (in AP or STA mode), we need to tell
+ * the firmware this key will en/decrypt MGMT frames.
+ * Same goes if we are installing a pairwise key for an MFP station.
+ * In case we're installing a groupwise key (which is not an iGTK),
+ * then, we will not use this key for MGMT frames.
+ */
+ if ((sta && sta->mfp && pairwise) || igtk)
+ flags |= IWL_SEC_KEY_FLAG_MFP;
+
+ if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ flags |= IWL_SEC_KEY_FLAG_SPP_AMSDU;
+
+ return flags;
+}
+
+static u32 iwl_mld_get_key_sta_mask(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_link_sta *link_sta;
+ int sta_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* AP group keys are per link and should be on the mcast/bcast STA */
+ if (vif->type == NL80211_IFTYPE_AP &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ struct iwl_mld_link *link = NULL;
+
+ if (key->link_id >= 0)
+ link = iwl_mld_link_dereference_check(mld_vif,
+ key->link_id);
+
+ if (WARN_ON(!link))
+ return 0;
+
+ /* In this stage we should have both the bcast and mcast STAs */
+ if (WARN_ON(link->bcast_sta.sta_id == IWL_INVALID_STA ||
+ link->mcast_sta.sta_id == IWL_INVALID_STA))
+ return 0;
+
+ /* IGTK/BIGTK to bcast STA */
+ if (key->keyidx >= 4)
+ return BIT(link->bcast_sta.sta_id);
+
+ /* GTK for data to mcast STA */
+ return BIT(link->mcast_sta.sta_id);
+ }
+
+ /* for client mode use the AP STA also for group keys */
+ if (!sta && vif->type == NL80211_IFTYPE_STATION)
+ sta = mld_vif->ap_sta;
+
+ /* STA should be non-NULL now */
+ if (WARN_ON(!sta))
+ return 0;
+
+ /* Key is not per-link, get the full sta mask */
+ if (key->link_id < 0)
+ return iwl_mld_fw_sta_id_mask(mld, sta);
+
+ /* The link_sta shouldn't be NULL now, but this is checked in
+ * iwl_mld_fw_sta_id_mask
+ */
+ link_sta = link_sta_dereference_check(sta, key->link_id);
+
+ sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+ if (sta_id < 0)
+ return 0;
+
+ return BIT(sta_id);
+}
+
+static int iwl_mld_add_key_to_fw(struct iwl_mld *mld, u32 sta_mask,
+ u32 key_flags, struct ieee80211_key_conf *key)
+{
+ struct iwl_sec_key_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .u.add.sta_mask = cpu_to_le32(sta_mask),
+ .u.add.key_id = cpu_to_le32(key->keyidx),
+ .u.add.key_flags = cpu_to_le32(key_flags),
+ .u.add.tx_seq = cpu_to_le64(atomic64_read(&key->tx_pn)),
+ };
+ bool tkip = key->cipher == WLAN_CIPHER_SUITE_TKIP;
+ int max_key_len = sizeof(cmd.u.add.key);
+
+ if (WARN_ON(!sta_mask))
+ return -EINVAL;
+
+ if (WARN_ON(key->keylen > max_key_len))
+ return -EINVAL;
+
+ memcpy(cmd.u.add.key, key->key, key->keylen);
+
+ if (tkip) {
+ memcpy(cmd.u.add.tkip_mic_rx_key,
+ key->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ 8);
+ memcpy(cmd.u.add.tkip_mic_tx_key,
+ key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY,
+ 8);
+ }
+
+ return iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD),
+ &cmd);
+}
+
+static void iwl_mld_remove_key_from_fw(struct iwl_mld *mld, u32 sta_mask,
+ u32 key_flags, u32 keyidx)
+{
+ struct iwl_sec_key_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .u.remove.sta_mask = cpu_to_le32(sta_mask),
+ .u.remove.key_id = cpu_to_le32(keyidx),
+ .u.remove.key_flags = cpu_to_le32(key_flags),
+ };
+
+ if (WARN_ON(!sta_mask))
+ return;
+
+ iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD), &cmd);
+}
+
+void iwl_mld_remove_key(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ u32 sta_mask = iwl_mld_get_key_sta_mask(mld, vif, sta, key);
+ u32 key_flags = iwl_mld_get_key_flags(mld, vif, sta, key);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!sta_mask)
+ return;
+
+ if (key->keyidx == 4 || key->keyidx == 5) {
+ struct iwl_mld_link *mld_link;
+ unsigned int link_id = 0;
+
+ /* set to -1 for non-MLO right now */
+ if (key->link_id >= 0)
+ link_id = key->link_id;
+
+ mld_link = iwl_mld_link_dereference_check(mld_vif, link_id);
+ if (WARN_ON(!mld_link))
+ return;
+
+ if (mld_link->igtk == key)
+ mld_link->igtk = NULL;
+
+ mld->num_igtks--;
+ }
+
+ iwl_mld_remove_key_from_fw(mld, sta_mask, key_flags, key->keyidx);
+
+ /* no longer in HW */
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+}
+
+int iwl_mld_add_key(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ u32 sta_mask = iwl_mld_get_key_sta_mask(mld, vif, sta, key);
+ u32 key_flags = iwl_mld_get_key_flags(mld, vif, sta, key);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *mld_link = NULL;
+ bool igtk = key->keyidx == 4 || key->keyidx == 5;
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!sta_mask)
+ return -EINVAL;
+
+ if (igtk) {
+ if (mld->num_igtks == IWL_MAX_NUM_IGTKS)
+ return -EOPNOTSUPP;
+
+ u8 link_id = 0;
+
+ /* set to -1 for non-MLO right now */
+ if (key->link_id >= 0)
+ link_id = key->link_id;
+
+ mld_link = iwl_mld_link_dereference_check(mld_vif, link_id);
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ if (mld_link->igtk) {
+ IWL_DEBUG_MAC80211(mld, "remove old IGTK %d\n",
+ mld_link->igtk->keyidx);
+ iwl_mld_remove_key(mld, vif, sta, mld_link->igtk);
+ }
+
+ WARN_ON(mld_link->igtk);
+ }
+
+ ret = iwl_mld_add_key_to_fw(mld, sta_mask, key_flags, key);
+ if (ret)
+ return ret;
+
+ if (mld_link) {
+ mld_link->igtk = key;
+ mld->num_igtks++;
+ }
+
+ /* We don't really need this, but need it to be not invalid,
+ * so we will know if the key is in fw.
+ */
+ key->hw_key_idx = 0;
+
+ return 0;
+}
+
+struct remove_ap_keys_iter_data {
+ u8 link_id;
+ struct ieee80211_sta *sta;
+};
+
+static void iwl_mld_remove_ap_keys_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct remove_ap_keys_iter_data *data = _data;
+
+ if (key->hw_key_idx == STA_KEY_IDX_INVALID)
+ return;
+
+ /* All the pairwise keys should have been removed by now */
+ if (WARN_ON(sta))
+ return;
+
+ if (key->link_id >= 0 && key->link_id != data->link_id)
+ return;
+
+ iwl_mld_remove_key(mld, vif, data->sta, key);
+}
+
+void iwl_mld_remove_ap_keys(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, unsigned int link_id)
+{
+ struct remove_ap_keys_iter_data iter_data = {
+ .link_id = link_id,
+ .sta = sta,
+ };
+
+ if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION))
+ return;
+
+ ieee80211_iter_keys(mld->hw, vif,
+ iwl_mld_remove_ap_keys_iter,
+ &iter_data);
+}
+
+struct iwl_mvm_sta_key_update_data {
+ struct ieee80211_sta *sta;
+ u32 old_sta_mask;
+ u32 new_sta_mask;
+ int err;
+};
+
+static void iwl_mld_update_sta_key_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm_sta_key_update_data *data = _data;
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_sec_key_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY),
+ .u.modify.old_sta_mask = cpu_to_le32(data->old_sta_mask),
+ .u.modify.new_sta_mask = cpu_to_le32(data->new_sta_mask),
+ .u.modify.key_id = cpu_to_le32(key->keyidx),
+ .u.modify.key_flags =
+ cpu_to_le32(iwl_mld_get_key_flags(mld, vif, sta, key)),
+ };
+ int err;
+
+ /* only need to do this for pairwise keys (link_id == -1) */
+ if (sta != data->sta || key->link_id >= 0)
+ return;
+
+ err = iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD),
+ &cmd);
+
+ if (err)
+ data->err = err;
+}
+
+int iwl_mld_update_sta_keys(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask,
+ u32 new_sta_mask)
+{
+ struct iwl_mvm_sta_key_update_data data = {
+ .sta = sta,
+ .old_sta_mask = old_sta_mask,
+ .new_sta_mask = new_sta_mask,
+ };
+
+ ieee80211_iter_keys(mld->hw, vif, iwl_mld_update_sta_key_iter,
+ &data);
+ return data.err;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/key.h b/drivers/net/wireless/intel/iwlwifi/mld/key.h
new file mode 100644
index 000000000000..a68ea48913be
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/key.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_key_h__
+#define __iwl_mld_key_h__
+
+#include "mld.h"
+#include <net/mac80211.h>
+#include "fw/api/sta.h"
+
+void iwl_mld_remove_key(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+int iwl_mld_add_key(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void iwl_mld_remove_ap_keys(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ unsigned int link_id);
+
+int iwl_mld_update_sta_keys(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask,
+ u32 new_sta_mask);
+
+static inline void
+iwl_mld_cleanup_keys_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, void *data)
+{
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+}
+
+#endif /* __iwl_mld_key_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/led.c b/drivers/net/wireless/intel/iwlwifi/mld/led.c
new file mode 100644
index 000000000000..a37b32cbc6e6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/led.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/leds.h>
+#include <net/mac80211.h>
+
+#include "fw/api/led.h"
+#include "mld.h"
+#include "led.h"
+#include "hcmd.h"
+
+static void iwl_mld_send_led_fw_cmd(struct iwl_mld *mld, bool on)
+{
+ struct iwl_led_cmd led_cmd = {
+ .status = cpu_to_le32(on),
+ };
+ int err;
+
+ if (WARN_ON(!mld->fw_status.running))
+ return;
+
+ err = iwl_mld_send_cmd_with_flags_pdu(mld, WIDE_ID(LONG_GROUP,
+ LEDS_CMD),
+ CMD_ASYNC, &led_cmd);
+
+ if (err)
+ IWL_WARN(mld, "LED command failed: %d\n", err);
+}
+
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct iwl_mld *mld = container_of(led_cdev, struct iwl_mld, led);
+
+ if (!mld->fw_status.running)
+ return;
+
+ iwl_mld_send_led_fw_cmd(mld, brightness > 0);
+}
+
+int iwl_mld_leds_init(struct iwl_mld *mld)
+{
+ int mode = iwlwifi_mod_params.led_mode;
+ int ret;
+
+ switch (mode) {
+ case IWL_LED_BLINK:
+ IWL_ERR(mld, "Blink led mode not supported, used default\n");
+ fallthrough;
+ case IWL_LED_DEFAULT:
+ case IWL_LED_RF_STATE:
+ mode = IWL_LED_RF_STATE;
+ break;
+ case IWL_LED_DISABLE:
+ IWL_INFO(mld, "Led disabled\n");
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ mld->led.name = kasprintf(GFP_KERNEL, "%s-led",
+ wiphy_name(mld->hw->wiphy));
+ if (!mld->led.name)
+ return -ENOMEM;
+
+ mld->led.brightness_set = iwl_led_brightness_set;
+ mld->led.max_brightness = 1;
+
+ if (mode == IWL_LED_RF_STATE)
+ mld->led.default_trigger =
+ ieee80211_get_radio_led_name(mld->hw);
+
+ ret = led_classdev_register(mld->trans->dev, &mld->led);
+ if (ret) {
+ kfree(mld->led.name);
+ mld->led.name = NULL;
+ IWL_INFO(mld, "Failed to enable led\n");
+ }
+
+ return ret;
+}
+
+void iwl_mld_led_config_fw(struct iwl_mld *mld)
+{
+ if (!mld->led.name)
+ return;
+
+ iwl_mld_send_led_fw_cmd(mld, mld->led.brightness > 0);
+}
+
+void iwl_mld_leds_exit(struct iwl_mld *mld)
+{
+ if (!mld->led.name)
+ return;
+
+ led_classdev_unregister(&mld->led);
+ kfree(mld->led.name);
+ mld->led.name = NULL;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/led.h b/drivers/net/wireless/intel/iwlwifi/mld/led.h
new file mode 100644
index 000000000000..0954e214e73d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/led.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_led_h__
+#define __iwl_mld_led_h__
+
+#include "mld.h"
+
+#ifdef CONFIG_IWLWIFI_LEDS
+int iwl_mld_leds_init(struct iwl_mld *mld);
+void iwl_mld_leds_exit(struct iwl_mld *mld);
+void iwl_mld_led_config_fw(struct iwl_mld *mld);
+#else
+static inline int iwl_mld_leds_init(struct iwl_mld *mld)
+{
+ return 0;
+}
+
+static inline void iwl_mld_leds_exit(struct iwl_mld *mld)
+{
+}
+
+static inline void iwl_mld_led_config_fw(struct iwl_mld *mld)
+{
+}
+#endif
+
+#endif /* __iwl_mld_led_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c
new file mode 100644
index 000000000000..82a4979a3af3
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c
@@ -0,0 +1,1213 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "constants.h"
+#include "link.h"
+#include "iface.h"
+#include "mlo.h"
+#include "hcmd.h"
+#include "phy.h"
+#include "fw/api/rs.h"
+#include "fw/api/txq.h"
+#include "fw/api/mac.h"
+
+#include "fw/api/context.h"
+#include "fw/dbg.h"
+
+static int iwl_mld_send_link_cmd(struct iwl_mld *mld,
+ struct iwl_link_config_cmd *cmd,
+ enum iwl_ctxt_action action)
+{
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ cmd->action = cpu_to_le32(action);
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD),
+ cmd);
+ if (ret)
+ IWL_ERR(mld, "Failed to send LINK_CONFIG_CMD (action:%d): %d\n",
+ action, ret);
+ return ret;
+}
+
+static int iwl_mld_add_link_to_fw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link = iwl_mld_link_from_mac80211(link_conf);
+ struct iwl_link_config_cmd cmd = {};
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!link))
+ return -EINVAL;
+
+ cmd.link_id = cpu_to_le32(link->fw_id);
+ cmd.mac_id = cpu_to_le32(mld_vif->fw_id);
+ cmd.spec_link_id = link_conf->link_id;
+ cmd.phy_id = cpu_to_le32(FW_CTXT_ID_INVALID);
+
+ ether_addr_copy(cmd.local_link_addr, link_conf->addr);
+
+ if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid)
+ ether_addr_copy(cmd.ibss_bssid_addr, link_conf->bssid);
+
+ return iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_ADD);
+}
+
+/* Get the basic rates of the used band and add the mandatory ones */
+static void iwl_mld_fill_rates(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ struct ieee80211_chanctx_conf *chan_ctx,
+ __le32 *cck_rates, __le32 *ofdm_rates)
+{
+ struct cfg80211_chan_def *chandef =
+ iwl_mld_get_chandef_from_chanctx(mld, chan_ctx);
+ struct ieee80211_supported_band *sband =
+ mld->hw->wiphy->bands[chandef->chan->band];
+ unsigned long basic = link->basic_rates;
+ int lowest_present_ofdm = 100;
+ int lowest_present_cck = 100;
+ u32 cck = 0;
+ u32 ofdm = 0;
+ int i;
+
+ for_each_set_bit(i, &basic, BITS_PER_LONG) {
+ int hw = sband->bitrates[i].hw_value;
+
+ if (hw >= IWL_FIRST_OFDM_RATE) {
+ ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
+ if (lowest_present_ofdm > hw)
+ lowest_present_ofdm = hw;
+ } else {
+ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+ cck |= BIT(hw);
+ if (lowest_present_cck > hw)
+ lowest_present_cck = hw;
+ }
+ }
+
+ /* Now we've got the basic rates as bitmaps in the ofdm and cck
+ * variables. This isn't sufficient though, as there might not
+ * be all the right rates in the bitmap. E.g. if the only basic
+ * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+ * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+ *
+ * [...] a STA responding to a received frame shall transmit
+ * its Control Response frame [...] at the highest rate in the
+ * BSSBasicRateSet parameter that is less than or equal to the
+ * rate of the immediately previous frame in the frame exchange
+ * sequence ([...]) and that is of the same modulation class
+ * ([...]) as the received frame. If no rate contained in the
+ * BSSBasicRateSet parameter meets these conditions, then the
+ * control frame sent in response to a received frame shall be
+ * transmitted at the highest mandatory rate of the PHY that is
+ * less than or equal to the rate of the received frame, and
+ * that is of the same modulation class as the received frame.
+ *
+ * As a consequence, we need to add all mandatory rates that are
+ * lower than all of the basic rates to these bitmaps.
+ */
+
+ if (lowest_present_ofdm > IWL_RATE_24M_INDEX)
+ ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE;
+ if (lowest_present_ofdm > IWL_RATE_12M_INDEX)
+ ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE;
+ /* 6M already there or needed so always add */
+ ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE;
+
+ /* CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+ * Note, however:
+ * - if no CCK rates are basic, it must be ERP since there must
+ * be some basic rates at all, so they're OFDM => ERP PHY
+ * (or we're in 5 GHz, and the cck bitmap will never be used)
+ * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+ * - if 5.5M is basic, 1M and 2M are mandatory
+ * - if 2M is basic, 1M is mandatory
+ * - if 1M is basic, that's the only valid ACK rate.
+ * As a consequence, it's not as complicated as it sounds, just add
+ * any lower rates to the ACK rate bitmap.
+ */
+ if (lowest_present_cck > IWL_RATE_11M_INDEX)
+ cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE;
+ if (lowest_present_cck > IWL_RATE_5M_INDEX)
+ cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE;
+ if (lowest_present_cck > IWL_RATE_2M_INDEX)
+ cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE;
+ /* 1M already there or needed so always add */
+ cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE;
+
+ *cck_rates = cpu_to_le32((u32)cck);
+ *ofdm_rates = cpu_to_le32((u32)ofdm);
+}
+
+static void iwl_mld_fill_protection_flags(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ __le32 *protection_flags)
+{
+ u8 protection_mode = link->ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION;
+ u8 ht_flag = LINK_PROT_FLG_HT_PROT | LINK_PROT_FLG_FAT_PROT;
+
+ IWL_DEBUG_RATE(mld, "HT protection mode: %d\n", protection_mode);
+
+ if (link->use_cts_prot)
+ *protection_flags |= cpu_to_le32(LINK_PROT_FLG_TGG_PROTECT);
+
+ /* See section 9.23.3.1 of IEEE 80211-2012.
+ * Nongreenfield HT STAs Present is not supported.
+ */
+ switch (protection_mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ *protection_flags |= cpu_to_le32(ht_flag);
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ /* Protect when channel wider than 20MHz */
+ if (link->chanreq.oper.width > NL80211_CHAN_WIDTH_20)
+ *protection_flags |= cpu_to_le32(ht_flag);
+ break;
+ }
+}
+
+static u8 iwl_mld_mac80211_ac_to_fw_ac(enum ieee80211_ac_numbers ac)
+{
+ static const u8 mac80211_ac_to_fw[] = {
+ AC_VO,
+ AC_VI,
+ AC_BE,
+ AC_BK
+ };
+
+ return mac80211_ac_to_fw[ac];
+}
+
+static void iwl_mld_fill_qos_params(struct ieee80211_bss_conf *link,
+ struct iwl_ac_qos *ac, __le32 *qos_flags)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+ /* no need to check mld_link since it is done in the caller */
+
+ for (int mac_ac = 0; mac_ac < IEEE80211_NUM_ACS; mac_ac++) {
+ u8 txf = iwl_mld_mac80211_ac_to_fw_tx_fifo(mac_ac);
+ u8 fw_ac = iwl_mld_mac80211_ac_to_fw_ac(mac_ac);
+
+ ac[fw_ac].cw_min =
+ cpu_to_le16(mld_link->queue_params[mac_ac].cw_min);
+ ac[fw_ac].cw_max =
+ cpu_to_le16(mld_link->queue_params[mac_ac].cw_max);
+ ac[fw_ac].edca_txop =
+ cpu_to_le16(mld_link->queue_params[mac_ac].txop * 32);
+ ac[fw_ac].aifsn = mld_link->queue_params[mac_ac].aifs;
+ ac[fw_ac].fifos_mask = BIT(txf);
+ }
+
+ if (link->qos)
+ *qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
+
+ if (link->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)
+ *qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
+}
+
+static bool iwl_mld_fill_mu_edca(struct iwl_mld *mld,
+ const struct iwl_mld_link *mld_link,
+ struct iwl_he_backoff_conf *trig_based_txf)
+{
+ for (int mac_ac = 0; mac_ac < IEEE80211_NUM_ACS; mac_ac++) {
+ const struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
+ &mld_link->queue_params[mac_ac].mu_edca_param_rec;
+ u8 fw_ac = iwl_mld_mac80211_ac_to_fw_ac(mac_ac);
+
+ if (!mld_link->queue_params[mac_ac].mu_edca)
+ return false;
+
+ trig_based_txf[fw_ac].cwmin =
+ cpu_to_le16(mu_edca->ecw_min_max & 0xf);
+ trig_based_txf[fw_ac].cwmax =
+ cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
+ trig_based_txf[fw_ac].aifsn =
+ cpu_to_le16(mu_edca->aifsn & 0xf);
+ trig_based_txf[fw_ac].mu_time =
+ cpu_to_le16(mu_edca->mu_edca_timer);
+ }
+ return true;
+}
+
+static u8 iwl_mld_sta_rx_bw_to_fw(enum ieee80211_sta_rx_bandwidth bw)
+{
+ switch (bw) {
+ default: /* potential future values not supported by this hw/driver */
+ case IEEE80211_STA_RX_BW_20:
+ return IWL_LINK_MODIFY_BW_20;
+ case IEEE80211_STA_RX_BW_40:
+ return IWL_LINK_MODIFY_BW_40;
+ case IEEE80211_STA_RX_BW_80:
+ return IWL_LINK_MODIFY_BW_80;
+ case IEEE80211_STA_RX_BW_160:
+ return IWL_LINK_MODIFY_BW_160;
+ case IEEE80211_STA_RX_BW_320:
+ return IWL_LINK_MODIFY_BW_320;
+ }
+}
+
+static int _iwl_mld_change_link_in_fw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ enum ieee80211_sta_rx_bandwidth bw,
+ u32 changes)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ struct ieee80211_vif *vif = link->vif;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_chanctx_conf *chan_ctx;
+ struct iwl_link_config_cmd cmd = {};
+ u32 flags = 0;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ cmd.link_id = cpu_to_le32(mld_link->fw_id);
+ cmd.spec_link_id = link->link_id;
+ cmd.mac_id = cpu_to_le32(mld_vif->fw_id);
+
+ chan_ctx = wiphy_dereference(mld->wiphy, mld_link->chan_ctx);
+
+ cmd.phy_id = cpu_to_le32(chan_ctx ?
+ iwl_mld_phy_from_mac80211(chan_ctx)->fw_id :
+ FW_CTXT_ID_INVALID);
+
+ ether_addr_copy(cmd.local_link_addr, link->addr);
+
+ cmd.active = cpu_to_le32(mld_link->active);
+
+ if ((changes & LINK_CONTEXT_MODIFY_ACTIVE) && !mld_link->active &&
+ mld_link->silent_deactivation) {
+ /* We are de-activating a link that is having CSA with
+ * immediate quiet in EMLSR. Tell the firmware not to send any
+ * frame.
+ */
+ cmd.block_tx = 1;
+ mld_link->silent_deactivation = false;
+ }
+
+ if (vif->type == NL80211_IFTYPE_ADHOC && link->bssid)
+ ether_addr_copy(cmd.ibss_bssid_addr, link->bssid);
+
+ /* Channel context is needed to get the rates */
+ if (chan_ctx)
+ iwl_mld_fill_rates(mld, link, chan_ctx, &cmd.cck_rates,
+ &cmd.ofdm_rates);
+
+ cmd.cck_short_preamble = cpu_to_le32(link->use_short_preamble);
+ cmd.short_slot = cpu_to_le32(link->use_short_slot);
+
+ iwl_mld_fill_protection_flags(mld, link, &cmd.protection_flags);
+
+ iwl_mld_fill_qos_params(link, cmd.ac, &cmd.qos_flags);
+
+ cmd.bi = cpu_to_le32(link->beacon_int);
+ cmd.dtim_interval = cpu_to_le32(link->beacon_int * link->dtim_period);
+
+ if (changes & LINK_CONTEXT_MODIFY_BANDWIDTH)
+ cmd.modify_bandwidth = iwl_mld_sta_rx_bw_to_fw(bw);
+
+ /* Configure HE parameters only if HE is supported, and only after
+ * the parameters are set in mac80211 (meaning after assoc)
+ */
+ if (!link->he_support || iwlwifi_mod_params.disable_11ax ||
+ (vif->type == NL80211_IFTYPE_STATION && !vif->cfg.assoc)) {
+ changes &= ~LINK_CONTEXT_MODIFY_HE_PARAMS;
+ goto send_cmd;
+ }
+
+ /* ap_sta may be NULL if we're disconnecting */
+ if (mld_vif->ap_sta) {
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_check(mld_vif->ap_sta,
+ link->link_id);
+
+ if (!WARN_ON(!link_sta) && link_sta->he_cap.has_he &&
+ link_sta->he_cap.he_cap_elem.mac_cap_info[5] &
+ IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX)
+ cmd.ul_mu_data_disable = 1;
+ }
+
+ cmd.htc_trig_based_pkt_ext = link->htc_trig_based_pkt_ext;
+
+ if (link->uora_exists) {
+ cmd.rand_alloc_ecwmin = link->uora_ocw_range & 0x7;
+ cmd.rand_alloc_ecwmax = (link->uora_ocw_range >> 3) & 0x7;
+ }
+
+ if (iwl_mld_fill_mu_edca(mld, mld_link, cmd.trig_based_txf))
+ flags |= LINK_FLG_MU_EDCA_CW;
+
+ cmd.bss_color = link->he_bss_color.color;
+
+ if (!link->he_bss_color.enabled)
+ flags |= LINK_FLG_BSS_COLOR_DIS;
+
+ cmd.frame_time_rts_th = cpu_to_le16(link->frame_time_rts_th);
+
+ /* Block 26-tone RU OFDMA transmissions */
+ if (mld_link->he_ru_2mhz_block)
+ flags |= LINK_FLG_RU_2MHZ_BLOCK;
+
+ if (link->nontransmitted) {
+ ether_addr_copy(cmd.ref_bssid_addr, link->transmitter_bssid);
+ cmd.bssid_index = link->bssid_index;
+ }
+
+ /* The only EHT parameter is puncturing, and starting from PHY cmd
+ * version 6 - it is sent there. For older versions of the PHY cmd,
+ * puncturing is not needed at all.
+ */
+ if (WARN_ON(changes & LINK_CONTEXT_MODIFY_EHT_PARAMS))
+ changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
+
+send_cmd:
+ cmd.modify_mask = cpu_to_le32(changes);
+ cmd.flags = cpu_to_le32(flags);
+
+ return iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_MODIFY);
+}
+
+int iwl_mld_change_link_in_fw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ u32 changes)
+{
+ if (WARN_ON(changes & LINK_CONTEXT_MODIFY_BANDWIDTH))
+ changes &= ~LINK_CONTEXT_MODIFY_BANDWIDTH;
+
+ return _iwl_mld_change_link_in_fw(mld, link, 0, changes);
+}
+
+int iwl_mld_change_link_omi_bw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ enum ieee80211_sta_rx_bandwidth bw)
+{
+ return _iwl_mld_change_link_in_fw(mld, link, bw,
+ LINK_CONTEXT_MODIFY_BANDWIDTH);
+}
+
+int iwl_mld_activate_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link || mld_link->active))
+ return -EINVAL;
+
+ mld_link->rx_omi.exit_ts = jiffies;
+ mld_link->active = true;
+
+ ret = iwl_mld_change_link_in_fw(mld, link,
+ LINK_CONTEXT_MODIFY_ACTIVE);
+ if (ret)
+ mld_link->active = false;
+
+ return ret;
+}
+
+void iwl_mld_deactivate_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ struct iwl_probe_resp_data *probe_data;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link || !mld_link->active))
+ return;
+
+ iwl_mld_cancel_session_protection(mld, link->vif, link->link_id);
+
+ /* If we deactivate the link, we will probably remove it, or switch
+ * channel. In both cases, the CSA or Notice of Absence information is
+ * now irrelevant. Remove the data here.
+ */
+ probe_data = wiphy_dereference(mld->wiphy, mld_link->probe_resp_data);
+ RCU_INIT_POINTER(mld_link->probe_resp_data, NULL);
+ if (probe_data)
+ kfree_rcu(probe_data, rcu_head);
+
+ mld_link->active = false;
+
+ iwl_mld_change_link_in_fw(mld, link, LINK_CONTEXT_MODIFY_ACTIVE);
+
+ /* Now that the link is not active in FW, we don't expect any new
+ * notifications for it. Cancel the ones that are already pending
+ */
+ iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_LINK,
+ mld_link->fw_id);
+}
+
+static void
+iwl_mld_rm_link_from_fw(struct iwl_mld *mld, struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ struct iwl_link_config_cmd cmd = {};
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link))
+ return;
+
+ cmd.link_id = cpu_to_le32(mld_link->fw_id);
+ cmd.spec_link_id = link->link_id;
+ cmd.phy_id = cpu_to_le32(FW_CTXT_ID_INVALID);
+
+ iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_REMOVE);
+}
+
+static void iwl_mld_omi_bw_update(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ struct iwl_mld_link *mld_link,
+ struct ieee80211_link_sta *link_sta,
+ enum ieee80211_sta_rx_bandwidth bw,
+ bool ap_update)
+{
+ enum ieee80211_sta_rx_bandwidth apply_bw;
+
+ mld_link->rx_omi.desired_bw = bw;
+
+ /* Can't update OMI while already in progress, desired_bw was
+ * set so on FW notification the worker will see the change
+ * and apply new the new desired bw.
+ */
+ if (mld_link->rx_omi.bw_in_progress)
+ return;
+
+ if (bw == IEEE80211_STA_RX_BW_MAX)
+ apply_bw = ieee80211_chan_width_to_rx_bw(link_conf->chanreq.oper.width);
+ else
+ apply_bw = bw;
+
+ if (!ap_update) {
+ /* The update isn't due to AP tracking after leaving OMI,
+ * where the AP could increase BW and then we must tell
+ * it that we can do the increased BW as well, if we did
+ * update the chandef.
+ * In this case, if we want MAX, then we will need to send
+ * a new OMI to the AP if it increases its own bandwidth as
+ * we can (due to internal and FW limitations, and being
+ * worried the AP might break) only send to what we're doing
+ * at the moment. In this case, set last_max_bw; otherwise
+ * if we really want to decrease our bandwidth set it to 0
+ * to indicate no updates are needed if the AP changes.
+ */
+ if (bw != IEEE80211_STA_RX_BW_MAX)
+ mld_link->rx_omi.last_max_bw = apply_bw;
+ else
+ mld_link->rx_omi.last_max_bw = 0;
+ } else {
+ /* Otherwise, if we're already trying to do maximum and
+ * the AP is changing, set last_max_bw to the new max the
+ * AP is using, we'll only get to this code path if the
+ * new bandwidth of the AP is bigger than what we sent it
+ * previously. This avoids repeatedly sending updates if
+ * it changes bandwidth, only doing it once on an increase.
+ */
+ mld_link->rx_omi.last_max_bw = apply_bw;
+ }
+
+ if (ieee80211_prepare_rx_omi_bw(link_sta, bw)) {
+ mld_link->rx_omi.bw_in_progress = apply_bw;
+ iwl_mld_change_link_omi_bw(mld, link_conf, apply_bw);
+ }
+}
+
+static void iwl_mld_omi_bw_finished_work(struct wiphy *wiphy,
+ struct wiphy_work *work)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_link *mld_link =
+ container_of(work, typeof(*mld_link), rx_omi.finished_work.work);
+ enum ieee80211_sta_rx_bandwidth desired_bw, switched_to_bw;
+ struct ieee80211_vif *vif = mld_link->vif;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+
+ if (!mld_vif->ap_sta)
+ return;
+
+ link_sta = wiphy_dereference(mld->wiphy,
+ mld_vif->ap_sta->link[mld_link->link_id]);
+ if (WARN_ON_ONCE(!link_sta))
+ return;
+
+ link_conf = link_conf_dereference_protected(vif, link_sta->link_id);
+ if (WARN_ON_ONCE(!link_conf))
+ return;
+
+ if (WARN_ON(!mld_link->rx_omi.bw_in_progress))
+ return;
+
+ desired_bw = mld_link->rx_omi.desired_bw;
+ switched_to_bw = mld_link->rx_omi.bw_in_progress;
+
+ ieee80211_finalize_rx_omi_bw(link_sta);
+ mld_link->rx_omi.bw_in_progress = 0;
+
+ if (desired_bw != switched_to_bw)
+ iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta,
+ desired_bw, false);
+}
+
+static struct ieee80211_vif *
+iwl_mld_get_omi_bw_reduction_pointers(struct iwl_mld *mld,
+ struct ieee80211_link_sta **link_sta,
+ struct iwl_mld_link **mld_link)
+{
+ struct iwl_mld_vif *mld_vif;
+ struct ieee80211_vif *vif;
+ int n_link_stas = 0;
+
+ *link_sta = NULL;
+
+ if (mld->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_SC)
+ return NULL;
+
+ vif = iwl_mld_get_bss_vif(mld);
+ if (!vif)
+ return NULL;
+
+ for (int i = 0; i < ARRAY_SIZE(mld->fw_id_to_link_sta); i++) {
+ struct ieee80211_link_sta *tmp;
+
+ tmp = wiphy_dereference(mld->wiphy, mld->fw_id_to_link_sta[i]);
+ if (IS_ERR_OR_NULL(tmp))
+ continue;
+
+ n_link_stas++;
+ *link_sta = tmp;
+ }
+
+ /* can't do anything if we have TDLS peers or EMLSR */
+ if (n_link_stas != 1)
+ return NULL;
+
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ *mld_link = iwl_mld_link_dereference_check(mld_vif,
+ (*link_sta)->link_id);
+ if (WARN_ON(!*mld_link))
+ return NULL;
+
+ return vif;
+}
+
+void iwl_mld_omi_ap_changed_bw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ enum ieee80211_sta_rx_bandwidth bw)
+{
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_link *mld_link;
+ struct ieee80211_vif *vif;
+
+ vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
+ if (!vif)
+ return;
+
+ if (WARN_ON(link_conf->vif != vif))
+ return;
+
+ /* This is 0 if we requested an OMI BW reduction and don't want to
+ * be sending an OMI when the AP's bandwidth changes.
+ */
+ if (!mld_link->rx_omi.last_max_bw)
+ return;
+
+ /* We only need to tell the AP if it increases BW over what we last
+ * told it we were using, if it reduces then our last OMI to it will
+ * not get used anyway (e.g. we said we want 160 but it's doing 80.)
+ */
+ if (bw < mld_link->rx_omi.last_max_bw)
+ return;
+
+ iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta, bw, true);
+}
+
+void iwl_mld_handle_omi_status_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_link *mld_link;
+ struct ieee80211_vif *vif;
+
+ vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
+ if (IWL_FW_CHECK(mld, !vif, "unexpected OMI notification\n"))
+ return;
+
+ if (IWL_FW_CHECK(mld, !mld_link->rx_omi.bw_in_progress,
+ "OMI notification when not requested\n"))
+ return;
+
+ wiphy_delayed_work_queue(mld->hw->wiphy,
+ &mld_link->rx_omi.finished_work,
+ msecs_to_jiffies(IWL_MLD_OMI_AP_SETTLE_DELAY));
+}
+
+void iwl_mld_leave_omi_bw_reduction(struct iwl_mld *mld)
+{
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_link *mld_link;
+ struct ieee80211_vif *vif;
+
+ vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
+ if (!vif)
+ return;
+
+ link_conf = link_conf_dereference_protected(vif, link_sta->link_id);
+ if (WARN_ON_ONCE(!link_conf))
+ return;
+
+ if (!link_conf->he_support)
+ return;
+
+ mld_link->rx_omi.exit_ts = jiffies;
+
+ iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta,
+ IEEE80211_STA_RX_BW_MAX, false);
+}
+
+void iwl_mld_check_omi_bw_reduction(struct iwl_mld *mld)
+{
+ enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_MAX;
+ struct ieee80211_chanctx_conf *chanctx;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+ struct cfg80211_chan_def chandef;
+ struct iwl_mld_link *mld_link;
+ struct iwl_mld_vif *mld_vif;
+ struct ieee80211_vif *vif;
+ struct iwl_mld_phy *phy;
+ u16 punctured;
+ int exit_thr;
+
+ /* not allowed in CAM mode */
+ if (iwlmld_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ return;
+
+ /* must have one BSS connection (no P2P), no TDLS, nor EMLSR */
+ vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
+ if (!vif)
+ return;
+
+ link_conf = link_conf_dereference_protected(vif, link_sta->link_id);
+ if (WARN_ON_ONCE(!link_conf))
+ return;
+
+ if (!link_conf->he_support)
+ return;
+
+ chanctx = wiphy_dereference(mld->wiphy, mld_link->chan_ctx);
+ if (WARN_ON(!chanctx))
+ return;
+
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ if (!mld_vif->authorized)
+ goto apply;
+
+ /* must not be in low-latency mode */
+ if (iwl_mld_vif_low_latency(mld_vif))
+ goto apply;
+
+ chandef = link_conf->chanreq.oper;
+
+ switch (chandef.width) {
+ case NL80211_CHAN_WIDTH_320:
+ exit_thr = IWL_MLD_OMI_EXIT_CHAN_LOAD_320;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ exit_thr = IWL_MLD_OMI_EXIT_CHAN_LOAD_160;
+ break;
+ default:
+ /* since we reduce to 80 MHz, must have more to start with */
+ goto apply;
+ }
+
+ /* not to be done if primary 80 MHz is punctured */
+ if (cfg80211_chandef_primary(&chandef, NL80211_CHAN_WIDTH_80,
+ &punctured) < 0 ||
+ punctured != 0)
+ goto apply;
+
+ phy = iwl_mld_phy_from_mac80211(chanctx);
+
+ if (phy->channel_load_by_us > exit_thr) {
+ /* send OMI for max bandwidth */
+ goto apply;
+ }
+
+ if (phy->channel_load_by_us > IWL_MLD_OMI_ENTER_CHAN_LOAD) {
+ /* no changes between enter/exit thresholds */
+ return;
+ }
+
+ if (time_is_after_jiffies(mld_link->rx_omi.exit_ts +
+ msecs_to_jiffies(IWL_MLD_OMI_EXIT_PROTECTION)))
+ return;
+
+ /* reduce bandwidth to 80 MHz to save power */
+ bw = IEEE80211_STA_RX_BW_80;
+apply:
+ iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta, bw, false);
+}
+
+IWL_MLD_ALLOC_FN(link, bss_conf)
+
+/* Constructor function for struct iwl_mld_link */
+static int
+iwl_mld_init_link(struct iwl_mld *mld, struct ieee80211_bss_conf *link,
+ struct iwl_mld_link *mld_link)
+{
+ mld_link->vif = link->vif;
+ mld_link->link_id = link->link_id;
+
+ iwl_mld_init_internal_sta(&mld_link->bcast_sta);
+ iwl_mld_init_internal_sta(&mld_link->mcast_sta);
+ iwl_mld_init_internal_sta(&mld_link->aux_sta);
+
+ wiphy_delayed_work_init(&mld_link->rx_omi.finished_work,
+ iwl_mld_omi_bw_finished_work);
+
+ return iwl_mld_allocate_link_fw_id(mld, &mld_link->fw_id, link);
+}
+
+/* Initializes the link structure, maps fw id to the ieee80211_bss_conf, and
+ * adds a link to the fw
+ */
+int iwl_mld_add_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
+ struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf);
+ bool is_deflink = bss_conf == &bss_conf->vif->bss_conf;
+ int ret;
+
+ if (!link) {
+ if (is_deflink)
+ link = &mld_vif->deflink;
+ else
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ } else {
+ WARN_ON(!mld->fw_status.in_hw_restart);
+ }
+
+ ret = iwl_mld_init_link(mld, bss_conf, link);
+ if (ret)
+ goto free;
+
+ rcu_assign_pointer(mld_vif->link[bss_conf->link_id], link);
+
+ ret = iwl_mld_add_link_to_fw(mld, bss_conf);
+ if (ret) {
+ RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL);
+ RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL);
+ goto free;
+ }
+
+ return ret;
+
+free:
+ if (!is_deflink)
+ kfree(link);
+ return ret;
+}
+
+/* Remove link from fw, unmap the bss_conf, and destroy the link structure */
+void iwl_mld_remove_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
+ struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf);
+ bool is_deflink = link == &mld_vif->deflink;
+
+ if (WARN_ON(!link || link->active))
+ return;
+
+ iwl_mld_rm_link_from_fw(mld, bss_conf);
+ /* Continue cleanup on failure */
+
+ if (!is_deflink)
+ kfree_rcu(link, rcu_head);
+
+ RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL);
+
+ wiphy_delayed_work_cancel(mld->wiphy, &link->rx_omi.finished_work);
+
+ if (WARN_ON(link->fw_id >= mld->fw->ucode_capa.num_links))
+ return;
+
+ RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL);
+}
+
+void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_missed_beacons_notif *notif = (const void *)pkt->data;
+ union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
+ u32 link_id = le32_to_cpu(notif->link_id);
+ u32 missed_bcon = le32_to_cpu(notif->consec_missed_beacons);
+ u32 missed_bcon_since_rx =
+ le32_to_cpu(notif->consec_missed_beacons_since_last_rx);
+ u32 scnd_lnk_bcn_lost =
+ le32_to_cpu(notif->consec_missed_beacons_other_link);
+ struct ieee80211_bss_conf *link_conf =
+ iwl_mld_fw_id_to_link_conf(mld, link_id);
+ u32 bss_param_ch_cnt_link_id;
+ struct ieee80211_vif *vif;
+
+ if (WARN_ON(!link_conf))
+ return;
+
+ vif = link_conf->vif;
+ bss_param_ch_cnt_link_id = link_conf->bss_param_ch_cnt_link_id;
+
+ IWL_DEBUG_INFO(mld,
+ "missed bcn link_id=%u, %u consecutive=%u\n",
+ link_id, missed_bcon, missed_bcon_since_rx);
+
+ if (WARN_ON(!vif))
+ return;
+
+ mld->trans->dbg.dump_file_name_ext_valid = true;
+ snprintf(mld->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
+ "LinkId_%d_MacType_%d", link_id,
+ iwl_mld_mac80211_iftype_to_fw(vif));
+
+ iwl_dbg_tlv_time_point(&mld->fwrt,
+ IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
+
+ if (missed_bcon >= IWL_MLD_MISSED_BEACONS_THRESHOLD_LONG) {
+ if (missed_bcon_since_rx >=
+ IWL_MLD_MISSED_BEACONS_SINCE_RX_THOLD) {
+ ieee80211_connection_loss(vif);
+ return;
+ }
+ IWL_WARN(mld,
+ "missed beacons exceeds threshold, but receiving data. Stay connected, Expect bugs.\n");
+ return;
+ }
+
+ if (missed_bcon_since_rx > IWL_MLD_MISSED_BEACONS_THRESHOLD) {
+ ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
+
+ /* try to switch links, no-op if we don't have MLO */
+ iwl_mld_int_mlo_scan(mld, vif);
+ }
+
+ /* no more logic if we're not in EMLSR */
+ if (hweight16(vif->active_links) <= 1)
+ return;
+
+ /* We are processing a notification before link activation */
+ if (le32_to_cpu(notif->other_link_id) == FW_CTXT_ID_INVALID)
+ return;
+
+ /* Exit EMLSR if we lost more than
+ * IWL_MLD_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links
+ * OR more than IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH on current link.
+ * OR more than IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED
+ * on current link and the link's bss_param_ch_count has changed on
+ * the other link's beacon.
+ */
+ if ((missed_bcon >= IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS &&
+ scnd_lnk_bcn_lost >= IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS) ||
+ missed_bcon >= IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH ||
+ (bss_param_ch_cnt_link_id != link_id &&
+ missed_bcon >=
+ IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED)) {
+ iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_MISSED_BEACON,
+ iwl_mld_get_primary_link(vif));
+ }
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_handle_missed_beacon_notif);
+
+bool iwl_mld_cancel_missed_beacon_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt,
+ u32 removed_link_id)
+{
+ struct iwl_missed_beacons_notif *notif = (void *)pkt->data;
+
+ if (le32_to_cpu(notif->other_link_id) == removed_link_id) {
+ /* Second link is being removed. Don't cancel the notification,
+ * but mark second link as invalid.
+ */
+ notif->other_link_id = cpu_to_le32(FW_CTXT_ID_INVALID);
+ }
+
+ /* If the primary link is removed, cancel the notification */
+ return le32_to_cpu(notif->link_id) == removed_link_id;
+}
+
+int iwl_mld_link_set_associated(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ return iwl_mld_change_link_in_fw(mld, link, LINK_CONTEXT_MODIFY_ALL &
+ ~(LINK_CONTEXT_MODIFY_ACTIVE |
+ LINK_CONTEXT_MODIFY_EHT_PARAMS));
+}
+
+struct iwl_mld_rssi_to_grade {
+ s8 rssi[2];
+ u16 grade;
+};
+
+#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \
+ { \
+ .rssi = {_lb, _hb_uhb}, \
+ .grade = _grade \
+ }
+
+/*
+ * This array must be sorted by increasing RSSI for proper functionality.
+ * The grades are actually estimated throughput, represented as fixed-point
+ * with a scale factor of 1/10.
+ */
+static const struct iwl_mld_rssi_to_grade rssi_to_grade_map[] = {
+ RSSI_TO_GRADE_LINE(-85, -89, 172),
+ RSSI_TO_GRADE_LINE(-83, -86, 344),
+ RSSI_TO_GRADE_LINE(-82, -85, 516),
+ RSSI_TO_GRADE_LINE(-80, -83, 688),
+ RSSI_TO_GRADE_LINE(-77, -79, 1032),
+ RSSI_TO_GRADE_LINE(-73, -76, 1376),
+ RSSI_TO_GRADE_LINE(-70, -74, 1548),
+ RSSI_TO_GRADE_LINE(-69, -72, 1720),
+ RSSI_TO_GRADE_LINE(-65, -68, 2064),
+ RSSI_TO_GRADE_LINE(-61, -66, 2294),
+ RSSI_TO_GRADE_LINE(-58, -61, 2580),
+ RSSI_TO_GRADE_LINE(-55, -58, 2868),
+ RSSI_TO_GRADE_LINE(-46, -55, 3098),
+ RSSI_TO_GRADE_LINE(-43, -54, 3442)
+};
+
+#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade)
+
+#define DEFAULT_CHAN_LOAD_2GHZ 30
+#define DEFAULT_CHAN_LOAD_5GHZ 15
+#define DEFAULT_CHAN_LOAD_6GHZ 0
+
+/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */
+#define SCALE_FACTOR 256
+#define MAX_CHAN_LOAD 256
+
+static unsigned int
+iwl_mld_get_n_subchannels(const struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_chan_width chan_width =
+ link_conf->chanreq.oper.width;
+ int mhz = nl80211_chan_width_to_mhz(chan_width);
+ unsigned int n_subchannels;
+
+ if (WARN_ONCE(mhz < 20 || mhz > 320,
+ "Invalid channel width : (%d)\n", mhz))
+ return 1;
+
+ /* total number of subchannels */
+ n_subchannels = mhz / 20;
+
+ /* No puncturing if less than 80 MHz */
+ if (mhz >= 80)
+ n_subchannels -= hweight16(link_conf->chanreq.oper.punctured);
+
+ return n_subchannels;
+}
+
+static int
+iwl_mld_get_chan_load_from_element(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct ieee80211_vif *vif = link_conf->vif;
+ const struct cfg80211_bss_ies *ies;
+ const struct element *bss_load_elem = NULL;
+ const struct ieee80211_bss_load_elem *bss_load;
+
+ guard(rcu)();
+
+ if (ieee80211_vif_link_active(vif, link_conf->link_id))
+ ies = rcu_dereference(link_conf->bss->beacon_ies);
+ else
+ ies = rcu_dereference(link_conf->bss->ies);
+
+ if (ies)
+ bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD,
+ ies->data, ies->len);
+
+ if (!bss_load_elem ||
+ bss_load_elem->datalen != sizeof(*bss_load))
+ return -EINVAL;
+
+ bss_load = (const void *)bss_load_elem->data;
+
+ return bss_load->channel_util;
+}
+
+static unsigned int
+iwl_mld_get_chan_load_by_us(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ bool expect_active_link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
+ struct ieee80211_chanctx_conf *chan_ctx;
+ struct iwl_mld_phy *phy;
+
+ if (!mld_link || !mld_link->active) {
+ WARN_ON(expect_active_link);
+ return 0;
+ }
+
+ if (WARN_ONCE(!rcu_access_pointer(mld_link->chan_ctx),
+ "Active link (%u) without channel ctxt assigned!\n",
+ link_conf->link_id))
+ return 0;
+
+ chan_ctx = wiphy_dereference(mld->wiphy, mld_link->chan_ctx);
+ phy = iwl_mld_phy_from_mac80211(chan_ctx);
+
+ return phy->channel_load_by_us;
+}
+
+/* Returns error if the channel utilization element is invalid/unavailable */
+int iwl_mld_get_chan_load_by_others(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ bool expect_active_link)
+{
+ int chan_load;
+ unsigned int chan_load_by_us;
+
+ /* get overall load */
+ chan_load = iwl_mld_get_chan_load_from_element(mld, link_conf);
+ if (chan_load < 0)
+ return chan_load;
+
+ chan_load_by_us = iwl_mld_get_chan_load_by_us(mld, link_conf,
+ expect_active_link);
+
+ /* channel load by us is given in percentage */
+ chan_load_by_us =
+ NORMALIZE_PERCENT_TO_255(chan_load_by_us);
+
+ /* Use only values that firmware sends that can possibly be valid */
+ if (chan_load_by_us <= chan_load)
+ chan_load -= chan_load_by_us;
+
+ return chan_load;
+}
+
+static unsigned int
+iwl_mld_get_default_chan_load(struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_band band = link_conf->chanreq.oper.chan->band;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return DEFAULT_CHAN_LOAD_2GHZ;
+ case NL80211_BAND_5GHZ:
+ return DEFAULT_CHAN_LOAD_5GHZ;
+ case NL80211_BAND_6GHZ:
+ return DEFAULT_CHAN_LOAD_6GHZ;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+unsigned int iwl_mld_get_chan_load(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf)
+{
+ int chan_load;
+
+ chan_load = iwl_mld_get_chan_load_by_others(mld, link_conf, false);
+ if (chan_load >= 0)
+ return chan_load;
+
+ /* No information from the element, take the defaults */
+ chan_load = iwl_mld_get_default_chan_load(link_conf);
+
+ /* The defaults are given in percentage */
+ return NORMALIZE_PERCENT_TO_255(chan_load);
+}
+
+static unsigned int
+iwl_mld_get_avail_chan_load(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf)
+{
+ return MAX_CHAN_LOAD - iwl_mld_get_chan_load(mld, link_conf);
+}
+
+/* This function calculates the grade of a link. Returns 0 in error case */
+unsigned int iwl_mld_get_link_grade(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_band band;
+ int rssi_idx;
+ s32 link_rssi;
+ unsigned int grade = MAX_GRADE;
+
+ if (WARN_ON_ONCE(!link_conf))
+ return 0;
+
+ band = link_conf->chanreq.oper.chan->band;
+ if (WARN_ONCE(band != NL80211_BAND_2GHZ &&
+ band != NL80211_BAND_5GHZ &&
+ band != NL80211_BAND_6GHZ,
+ "Invalid band (%u)\n", band))
+ return 0;
+
+ link_rssi = MBM_TO_DBM(link_conf->bss->signal);
+ /*
+ * For 6 GHz the RSSI of the beacons is lower than
+ * the RSSI of the data.
+ */
+ if (band == NL80211_BAND_6GHZ && link_rssi)
+ link_rssi += 4;
+
+ rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1;
+
+ /* No valid RSSI - take the lowest grade */
+ if (!link_rssi)
+ link_rssi = rssi_to_grade_map[0].rssi[rssi_idx];
+
+ IWL_DEBUG_EHT(mld,
+ "Calculating grade of link %d: band = %d, bandwidth = %d, punctured subchannels =0x%x RSSI = %d\n",
+ link_conf->link_id, band,
+ link_conf->chanreq.oper.width,
+ link_conf->chanreq.oper.punctured, link_rssi);
+
+ /* Get grade based on RSSI */
+ for (int i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) {
+ const struct iwl_mld_rssi_to_grade *line =
+ &rssi_to_grade_map[i];
+
+ if (link_rssi > line->rssi[rssi_idx])
+ continue;
+ grade = line->grade;
+ break;
+ }
+
+ /* Apply the channel load and puncturing factors */
+ grade = grade * iwl_mld_get_avail_chan_load(mld, link_conf) / SCALE_FACTOR;
+ grade = grade * iwl_mld_get_n_subchannels(link_conf);
+
+ IWL_DEBUG_EHT(mld, "Link %d's grade: %d\n", link_conf->link_id, grade);
+
+ return grade;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_get_link_grade);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.h b/drivers/net/wireless/intel/iwlwifi/mld/link.h
new file mode 100644
index 000000000000..42b7bdcbd741
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_link_h__
+#define __iwl_mld_link_h__
+
+#include <net/mac80211.h>
+
+#include "mld.h"
+#include "sta.h"
+
+/**
+ * struct iwl_probe_resp_data - data for NoA/CSA updates
+ * @rcu_head: used for freeing the data on update
+ * @notif: notification data
+ * @noa_len: length of NoA attribute, calculated from the notification
+ */
+struct iwl_probe_resp_data {
+ struct rcu_head rcu_head;
+ struct iwl_probe_resp_data_notif notif;
+ int noa_len;
+};
+
+/**
+ * struct iwl_mld_link - link configuration parameters
+ *
+ * @rcu_head: RCU head for freeing this data.
+ * @fw_id: the fw id of the link.
+ * @active: if the link is active or not.
+ * @queue_params: QoS data from mac80211. This is updated with a call to
+ * drv_conf_tx per each AC, and then notified once with BSS_CHANGED_QOS.
+ * So we store it here and then send one link cmd for all the ACs.
+ * @chan_ctx: pointer to the channel context assigned to the link. If a link
+ * has an assigned channel context it means that it is active.
+ * @he_ru_2mhz_block: 26-tone RU OFDMA transmissions should be blocked.
+ * @igtk: fw can only have one IGTK at a time, whereas mac80211 can have two.
+ * This tracks the one IGTK that currently exists in FW.
+ * @vif: the vif this link belongs to
+ * @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS.
+ * @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS.
+ * @aux_sta: station used for remain on channel. Used in P2P device.
+ * @link_id: over the air link ID
+ * @ap_early_keys: The firmware cannot install keys before bcast/mcast STAs,
+ * but higher layers work differently, so we store the keys here for
+ * later installation.
+ * @silent_deactivation: next deactivation needs to be silent.
+ * @probe_resp_data: data from FW notification to store NOA related data to be
+ * inserted into probe response.
+ * @rx_omi: data for BW reduction with OMI
+ * @rx_omi.bw_in_progress: update is in progress (indicates target BW)
+ * @rx_omi.exit_ts: timestamp of last exit
+ * @rx_omi.finished_work: work for the delayed reaction to the firmware saying
+ * the change was applied, and for then applying a new mode if it was
+ * updated while waiting for firmware/AP settle delay.
+ * @rx_omi.desired_bw: desired bandwidth
+ * @rx_omi.last_max_bw: last maximum BW used by firmware, for AP BW changes
+ */
+struct iwl_mld_link {
+ struct rcu_head rcu_head;
+
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ u8 fw_id;
+ bool active;
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+ struct ieee80211_chanctx_conf __rcu *chan_ctx;
+ bool he_ru_2mhz_block;
+ struct ieee80211_key_conf *igtk;
+ );
+ /* And here fields that survive a fw restart */
+ struct ieee80211_vif *vif;
+ struct iwl_mld_int_sta bcast_sta;
+ struct iwl_mld_int_sta mcast_sta;
+ struct iwl_mld_int_sta aux_sta;
+ u8 link_id;
+
+ struct {
+ struct wiphy_delayed_work finished_work;
+ unsigned long exit_ts;
+ enum ieee80211_sta_rx_bandwidth bw_in_progress,
+ desired_bw,
+ last_max_bw;
+ } rx_omi;
+
+ /* we can only have 2 GTK + 2 IGTK + 2 BIGTK active at a time */
+ struct ieee80211_key_conf *ap_early_keys[6];
+ bool silent_deactivation;
+ struct iwl_probe_resp_data __rcu *probe_resp_data;
+};
+
+/* Cleanup function for struct iwl_mld_phy, will be called in restart */
+static inline void
+iwl_mld_cleanup_link(struct iwl_mld *mld, struct iwl_mld_link *link)
+{
+ struct iwl_probe_resp_data *probe_data;
+
+ probe_data = wiphy_dereference(mld->wiphy, link->probe_resp_data);
+ RCU_INIT_POINTER(link->probe_resp_data, NULL);
+ if (probe_data)
+ kfree_rcu(probe_data, rcu_head);
+
+ CLEANUP_STRUCT(link);
+ if (link->bcast_sta.sta_id != IWL_INVALID_STA)
+ iwl_mld_free_internal_sta(mld, &link->bcast_sta);
+ if (link->mcast_sta.sta_id != IWL_INVALID_STA)
+ iwl_mld_free_internal_sta(mld, &link->mcast_sta);
+ if (link->aux_sta.sta_id != IWL_INVALID_STA)
+ iwl_mld_free_internal_sta(mld, &link->aux_sta);
+}
+
+/* Convert a percentage from [0,100] to [0,255] */
+#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * 256 / 100)
+
+int iwl_mld_add_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *bss_conf);
+void iwl_mld_remove_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *bss_conf);
+int iwl_mld_activate_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link);
+void iwl_mld_deactivate_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link);
+int iwl_mld_change_link_omi_bw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ enum ieee80211_sta_rx_bandwidth bw);
+int iwl_mld_change_link_in_fw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link, u32 changes);
+void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+bool iwl_mld_cancel_missed_beacon_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt,
+ u32 removed_link_id);
+int iwl_mld_link_set_associated(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+unsigned int iwl_mld_get_link_grade(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf);
+
+unsigned int iwl_mld_get_chan_load(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf);
+
+int iwl_mld_get_chan_load_by_others(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ bool expect_active_link);
+void iwl_mld_handle_omi_status_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+void iwl_mld_leave_omi_bw_reduction(struct iwl_mld *mld);
+void iwl_mld_check_omi_bw_reduction(struct iwl_mld *mld);
+void iwl_mld_omi_ap_changed_bw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ enum ieee80211_sta_rx_bandwidth bw);
+
+#endif /* __iwl_mld_link_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
new file mode 100644
index 000000000000..a4a612afb3b3
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "mld.h"
+#include "iface.h"
+#include "low_latency.h"
+#include "hcmd.h"
+#include "power.h"
+#include "mlo.h"
+
+#define MLD_LL_WK_INTERVAL_MSEC 500
+#define MLD_LL_PERIOD (HZ * MLD_LL_WK_INTERVAL_MSEC / 1000)
+#define MLD_LL_ACTIVE_WK_PERIOD (HZ * 10)
+
+/* packets/MLD_LL_WK_PERIOD seconds */
+#define MLD_LL_ENABLE_THRESH 100
+
+static bool iwl_mld_calc_low_latency(struct iwl_mld *mld,
+ unsigned long timestamp)
+{
+ struct iwl_mld_low_latency *ll = &mld->low_latency;
+ bool global_low_latency = false;
+ u8 num_rx_q = mld->trans->num_rx_queues;
+
+ for (int mac_id = 0; mac_id < NUM_MAC_INDEX_DRIVER; mac_id++) {
+ u32 total_vo_vi_pkts = 0;
+ bool ll_period_expired;
+
+ /* If it's not initialized yet, it means we have not yet
+ * received/transmitted any vo/vi packet on this MAC.
+ */
+ if (!ll->window_start[mac_id])
+ continue;
+
+ ll_period_expired =
+ time_after(timestamp, ll->window_start[mac_id] +
+ MLD_LL_ACTIVE_WK_PERIOD);
+
+ if (ll_period_expired)
+ ll->window_start[mac_id] = timestamp;
+
+ for (int q = 0; q < num_rx_q; q++) {
+ struct iwl_mld_low_latency_packets_counters *counters =
+ &mld->low_latency.pkts_counters[q];
+
+ spin_lock_bh(&counters->lock);
+
+ total_vo_vi_pkts += counters->vo_vi[mac_id];
+
+ if (ll_period_expired)
+ counters->vo_vi[mac_id] = 0;
+
+ spin_unlock_bh(&counters->lock);
+ }
+
+ /* enable immediately with enough packets but defer
+ * disabling only if the low-latency period expired and
+ * below threshold.
+ */
+ if (total_vo_vi_pkts > MLD_LL_ENABLE_THRESH)
+ mld->low_latency.result[mac_id] = true;
+ else if (ll_period_expired)
+ mld->low_latency.result[mac_id] = false;
+
+ global_low_latency |= mld->low_latency.result[mac_id];
+ }
+
+ return global_low_latency;
+}
+
+static void iwl_mld_low_latency_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld *mld = _data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ bool prev = mld_vif->low_latency_causes & LOW_LATENCY_TRAFFIC;
+ bool low_latency;
+
+ if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->low_latency.result)))
+ return;
+
+ low_latency = mld->low_latency.result[mld_vif->fw_id];
+
+ if (prev != low_latency)
+ iwl_mld_vif_update_low_latency(mld, vif, low_latency,
+ LOW_LATENCY_TRAFFIC);
+}
+
+static void iwl_mld_low_latency_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld *mld = container_of(wk, struct iwl_mld,
+ low_latency.work.work);
+ unsigned long timestamp = jiffies;
+ bool low_latency_active;
+
+ if (mld->fw_status.in_hw_restart)
+ return;
+
+ /* It is assumed that the work was scheduled only after checking
+ * at least MLD_LL_PERIOD has passed since the last update.
+ */
+
+ low_latency_active = iwl_mld_calc_low_latency(mld, timestamp);
+
+ /* Update the timestamp now after the low-latency calculation */
+ mld->low_latency.timestamp = timestamp;
+
+ /* If low-latency is active we need to force re-evaluation after
+ * 10 seconds, so that we can disable low-latency when
+ * the low-latency traffic ends.
+ *
+ * Otherwise, we don't need to run the work because there is nothing to
+ * disable.
+ *
+ * Note that this has no impact on the regular scheduling of the
+ * updates triggered by traffic - those happen whenever the
+ * MLD_LL_PERIOD timeout expire.
+ */
+ if (low_latency_active)
+ wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
+ MLD_LL_ACTIVE_WK_PERIOD);
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_low_latency_iter, mld);
+}
+
+int iwl_mld_low_latency_init(struct iwl_mld *mld)
+{
+ struct iwl_mld_low_latency *ll = &mld->low_latency;
+ unsigned long ts = jiffies;
+
+ ll->pkts_counters = kcalloc(mld->trans->num_rx_queues,
+ sizeof(*ll->pkts_counters), GFP_KERNEL);
+ if (!ll->pkts_counters)
+ return -ENOMEM;
+
+ for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ spin_lock_init(&ll->pkts_counters[q].lock);
+
+ wiphy_delayed_work_init(&ll->work, iwl_mld_low_latency_wk);
+
+ ll->timestamp = ts;
+
+ /* The low-latency window_start will be initialized per-MAC on
+ * the first vo/vi packet received/transmitted.
+ */
+
+ return 0;
+}
+
+void iwl_mld_low_latency_free(struct iwl_mld *mld)
+{
+ struct iwl_mld_low_latency *ll = &mld->low_latency;
+
+ kfree(ll->pkts_counters);
+ ll->pkts_counters = NULL;
+}
+
+void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld)
+{
+ struct iwl_mld_low_latency *ll = &mld->low_latency;
+
+ ll->timestamp = jiffies;
+
+ memset(ll->window_start, 0, sizeof(ll->window_start));
+ memset(ll->result, 0, sizeof(ll->result));
+
+ for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ memset(ll->pkts_counters[q].vo_vi, 0,
+ sizeof(ll->pkts_counters[q].vo_vi));
+}
+
+static int iwl_mld_send_low_latency_cmd(struct iwl_mld *mld, bool low_latency,
+ u16 mac_id)
+{
+ struct iwl_mac_low_latency_cmd cmd = {
+ .mac_id = cpu_to_le32(mac_id)
+ };
+ u16 cmd_id = WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD);
+ int ret;
+
+ if (low_latency) {
+ /* Currently we don't care about the direction */
+ cmd.low_latency_rx = 1;
+ cmd.low_latency_tx = 1;
+ }
+
+ ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
+ if (ret)
+ IWL_ERR(mld, "Failed to send low latency command\n");
+
+ return ret;
+}
+
+static void iwl_mld_vif_set_low_latency(struct iwl_mld_vif *mld_vif, bool set,
+ enum iwl_mld_low_latency_cause cause)
+{
+ if (set)
+ mld_vif->low_latency_causes |= cause;
+ else
+ mld_vif->low_latency_causes &= ~cause;
+}
+
+void iwl_mld_vif_update_low_latency(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ bool low_latency,
+ enum iwl_mld_low_latency_cause cause)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ bool prev;
+
+ prev = iwl_mld_vif_low_latency(mld_vif);
+ iwl_mld_vif_set_low_latency(mld_vif, low_latency, cause);
+
+ low_latency = iwl_mld_vif_low_latency(mld_vif);
+ if (low_latency == prev)
+ return;
+
+ if (iwl_mld_send_low_latency_cmd(mld, low_latency, mld_vif->fw_id)) {
+ /* revert to previous low-latency state */
+ iwl_mld_vif_set_low_latency(mld_vif, prev, cause);
+ return;
+ }
+
+ if (low_latency)
+ iwl_mld_leave_omi_bw_reduction(mld);
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_P2P_CLIENT)
+ return;
+
+ iwl_mld_update_mac_power(mld, vif, false);
+
+ if (low_latency)
+ iwl_mld_retry_emlsr(mld, vif);
+}
+
+static bool iwl_mld_is_vo_vi_pkt(struct ieee80211_hdr *hdr)
+{
+ u8 tid;
+ static const u8 tid_to_mac80211_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO,
+ };
+
+ if (!hdr || !ieee80211_is_data_qos(hdr->frame_control))
+ return false;
+
+ tid = ieee80211_get_tid(hdr);
+ if (tid >= IWL_MAX_TID_COUNT)
+ return false;
+
+ return tid_to_mac80211_ac[tid] < IEEE80211_AC_VI;
+}
+
+void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_sta *sta,
+ u8 queue)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
+ struct iwl_mld_low_latency_packets_counters *counters;
+ unsigned long ts = jiffies ? jiffies : 1;
+ u8 fw_id = mld_vif->fw_id;
+
+ /* we should have failed op mode init if NULL */
+ if (WARN_ON_ONCE(!mld->low_latency.pkts_counters))
+ return;
+
+ if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) ||
+ queue >= mld->trans->num_rx_queues))
+ return;
+
+ if (mld->low_latency.stopped)
+ return;
+
+ if (!iwl_mld_is_vo_vi_pkt(hdr))
+ return;
+
+ counters = &mld->low_latency.pkts_counters[queue];
+
+ spin_lock_bh(&counters->lock);
+ counters->vo_vi[fw_id]++;
+ spin_unlock_bh(&counters->lock);
+
+ /* Initialize the window_start on the first vo/vi packet */
+ if (!mld->low_latency.window_start[fw_id])
+ mld->low_latency.window_start[fw_id] = ts;
+
+ if (time_is_before_jiffies(mld->low_latency.timestamp + MLD_LL_PERIOD))
+ wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
+ 0);
+}
+
+void iwl_mld_low_latency_stop(struct iwl_mld *mld)
+{
+ lockdep_assert_wiphy(mld->wiphy);
+
+ mld->low_latency.stopped = true;
+
+ wiphy_delayed_work_cancel(mld->wiphy, &mld->low_latency.work);
+}
+
+void iwl_mld_low_latency_restart(struct iwl_mld *mld)
+{
+ struct iwl_mld_low_latency *ll = &mld->low_latency;
+ bool low_latency = false;
+ unsigned long ts = jiffies;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ll->timestamp = ts;
+ mld->low_latency.stopped = false;
+
+ for (int mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+ ll->window_start[mac] = 0;
+ low_latency |= ll->result[mac];
+
+ for (int q = 0; q < mld->trans->num_rx_queues; q++) {
+ spin_lock_bh(&ll->pkts_counters[q].lock);
+ ll->pkts_counters[q].vo_vi[mac] = 0;
+ spin_unlock_bh(&ll->pkts_counters[q].lock);
+ }
+ }
+
+ /* if low latency is active, force re-evaluation to cover the case of
+ * no traffic.
+ */
+ if (low_latency)
+ wiphy_delayed_work_queue(mld->wiphy, &ll->work, MLD_LL_PERIOD);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.h b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.h
new file mode 100644
index 000000000000..f59684d235af
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_low_latency_h__
+#define __iwl_mld_low_latency_h__
+
+/**
+ * struct iwl_mld_low_latency_packets_counters - Packets counters
+ * @lock: synchronize the counting in data path against the worker
+ * @vo_vi: per-mac, counts the number of TX and RX voice and video packets
+ */
+struct iwl_mld_low_latency_packets_counters {
+ spinlock_t lock;
+ u32 vo_vi[NUM_MAC_INDEX_DRIVER];
+} ____cacheline_aligned_in_smp;
+
+/**
+ * enum iwl_mld_low_latency_cause - low-latency set causes
+ *
+ * @LOW_LATENCY_TRAFFIC: indicates low-latency traffic was detected
+ * @LOW_LATENCY_DEBUGFS: low-latency mode set from debugfs
+ * @LOW_LATENCY_VIF_TYPE: low-latency mode set because of vif type (AP)
+ */
+enum iwl_mld_low_latency_cause {
+ LOW_LATENCY_TRAFFIC = BIT(0),
+ LOW_LATENCY_DEBUGFS = BIT(1),
+ LOW_LATENCY_VIF_TYPE = BIT(2),
+};
+
+/**
+ * struct iwl_mld_low_latency - Manage low-latency detection and activation.
+ * @work: this work is used to detect low-latency by monitoring the number of
+ * voice and video packets transmitted in a period of time. If the
+ * threshold is reached, low-latency is activated. When active,
+ * it is deactivated if the threshold is not reached within a
+ * 10-second period.
+ * @timestamp: timestamp of the last update.
+ * @window_start: per-mac, timestamp of the start of the current window. when
+ * the window is over, the counters are reset.
+ * @pkts_counters: per-queue array voice/video packet counters
+ * @result: per-mac latest low-latency result
+ * @stopped: if true, ignore the requests to update the counters
+ */
+struct iwl_mld_low_latency {
+ struct wiphy_delayed_work work;
+ unsigned long timestamp;
+ unsigned long window_start[NUM_MAC_INDEX_DRIVER];
+ struct iwl_mld_low_latency_packets_counters *pkts_counters;
+ bool result[NUM_MAC_INDEX_DRIVER];
+ bool stopped;
+};
+
+int iwl_mld_low_latency_init(struct iwl_mld *mld);
+void iwl_mld_low_latency_free(struct iwl_mld *mld);
+void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld);
+void iwl_mld_vif_update_low_latency(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ bool low_latency,
+ enum iwl_mld_low_latency_cause cause);
+void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_sta *sta,
+ u8 queue);
+void iwl_mld_low_latency_stop(struct iwl_mld *mld);
+void iwl_mld_low_latency_restart(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_low_latency_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
new file mode 100644
index 000000000000..6851064b82da
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
@@ -0,0 +1,2670 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <net/mac80211.h>
+#include <linux/ip.h>
+
+#include "mld.h"
+#include "mac80211.h"
+#include "phy.h"
+#include "iface.h"
+#include "power.h"
+#include "sta.h"
+#include "agg.h"
+#include "scan.h"
+#include "d3.h"
+#include "tlc.h"
+#include "key.h"
+#include "ap.h"
+#include "tx.h"
+#include "roc.h"
+#include "mlo.h"
+#include "stats.h"
+#include "ftm-initiator.h"
+#include "low_latency.h"
+#include "fw/api/scan.h"
+#include "fw/api/context.h"
+#include "fw/api/filter.h"
+#include "fw/api/sta.h"
+#include "fw/api/tdls.h"
+#ifdef CONFIG_PM_SLEEP
+#include "fw/api/d3.h"
+#endif /* CONFIG_PM_SLEEP */
+#include "iwl-trans.h"
+
+#define IWL_MLD_LIMITS(ap) \
+ { \
+ .max = 2, \
+ .types = BIT(NL80211_IFTYPE_STATION), \
+ }, \
+ { \
+ .max = 1, \
+ .types = ap | \
+ BIT(NL80211_IFTYPE_P2P_CLIENT) | \
+ BIT(NL80211_IFTYPE_P2P_GO), \
+ }, \
+ { \
+ .max = 1, \
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE), \
+ }
+
+static const struct ieee80211_iface_limit iwl_mld_limits[] = {
+ IWL_MLD_LIMITS(0)
+};
+
+static const struct ieee80211_iface_limit iwl_mld_limits_ap[] = {
+ IWL_MLD_LIMITS(BIT(NL80211_IFTYPE_AP))
+};
+
+static const struct ieee80211_iface_combination
+iwl_mld_iface_combinations[] = {
+ {
+ .num_different_channels = 2,
+ .max_interfaces = 4,
+ .limits = iwl_mld_limits,
+ .n_limits = ARRAY_SIZE(iwl_mld_limits),
+ },
+ {
+ .num_different_channels = 1,
+ .max_interfaces = 4,
+ .limits = iwl_mld_limits_ap,
+ .n_limits = ARRAY_SIZE(iwl_mld_limits_ap),
+ },
+};
+
+static const u8 if_types_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF |
+ WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB,
+ [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB,
+ [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
+};
+
+#define IWL_MLD_EMLSR_CAPA (IEEE80211_EML_CAP_EMLSR_SUPP | \
+ IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US << \
+ __bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \
+ IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \
+ __bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY))
+#define IWL_MLD_CAPA_OPS (FIELD_PREP_CONST( \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \
+ IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT)
+
+static const struct wiphy_iftype_ext_capab iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = if_types_ext_capa_sta,
+ .extended_capabilities_mask = if_types_ext_capa_sta,
+ .extended_capabilities_len = sizeof(if_types_ext_capa_sta),
+ /* relevant only if EHT is supported */
+ .eml_capabilities = IWL_MLD_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MLD_CAPA_OPS,
+ },
+};
+
+static void iwl_mld_hw_set_addresses(struct iwl_mld *mld)
+{
+ struct wiphy *wiphy = mld->wiphy;
+ int num_addrs = 1;
+
+ /* Extract MAC address */
+ memcpy(mld->addresses[0].addr, mld->nvm_data->hw_addr, ETH_ALEN);
+ wiphy->addresses = mld->addresses;
+ wiphy->n_addresses = 1;
+
+ /* Extract additional MAC addresses if available */
+ if (mld->nvm_data->n_hw_addrs > 1)
+ num_addrs = min(mld->nvm_data->n_hw_addrs,
+ IWL_MLD_MAX_ADDRESSES);
+
+ for (int i = 1; i < num_addrs; i++) {
+ memcpy(mld->addresses[i].addr,
+ mld->addresses[i - 1].addr,
+ ETH_ALEN);
+ mld->addresses[i].addr[ETH_ALEN - 1]++;
+ wiphy->n_addresses++;
+ }
+}
+
+static void iwl_mld_hw_set_channels(struct iwl_mld *mld)
+{
+ struct wiphy *wiphy = mld->wiphy;
+ struct ieee80211_supported_band *bands = mld->nvm_data->bands;
+
+ wiphy->bands[NL80211_BAND_2GHZ] = &bands[NL80211_BAND_2GHZ];
+ wiphy->bands[NL80211_BAND_5GHZ] = &bands[NL80211_BAND_5GHZ];
+
+ if (bands[NL80211_BAND_6GHZ].n_channels)
+ wiphy->bands[NL80211_BAND_6GHZ] = &bands[NL80211_BAND_6GHZ];
+}
+
+static void iwl_mld_hw_set_security(struct iwl_mld *mld)
+{
+ struct ieee80211_hw *hw = mld->hw;
+ static const u32 mld_ciphers[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256
+ };
+
+ hw->wiphy->n_cipher_suites = ARRAY_SIZE(mld_ciphers);
+ hw->wiphy->cipher_suites = mld_ciphers;
+
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION);
+}
+
+static void iwl_mld_hw_set_antennas(struct iwl_mld *mld)
+{
+ struct wiphy *wiphy = mld->wiphy;
+
+ wiphy->available_antennas_tx = iwl_mld_get_valid_tx_ant(mld);
+ wiphy->available_antennas_rx = iwl_mld_get_valid_rx_ant(mld);
+}
+
+static void iwl_mld_hw_set_pm(struct iwl_mld *mld)
+{
+#ifdef CONFIG_PM_SLEEP
+ struct wiphy *wiphy = mld->wiphy;
+
+ if (!device_can_wakeup(mld->trans->dev))
+ return;
+
+ mld->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_RFKILL_RELEASE |
+ WIPHY_WOWLAN_NET_DETECT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE;
+
+ mld->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
+ mld->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
+ mld->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+ mld->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES_V2;
+
+ wiphy->wowlan = &mld->wowlan;
+#endif /* CONFIG_PM_SLEEP */
+}
+
+static void iwl_mac_hw_set_radiotap(struct iwl_mld *mld)
+{
+ struct ieee80211_hw *hw = mld->hw;
+
+ hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
+ IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+
+ hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
+ IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
+
+ hw->radiotap_timestamp.units_pos =
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
+ IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
+
+ /* this is the case for CCK frames, it's better (only 8) for OFDM */
+ hw->radiotap_timestamp.accuracy = 22;
+}
+
+static void iwl_mac_hw_set_flags(struct iwl_mld *mld)
+{
+ struct ieee80211_hw *hw = mld->hw;
+
+ ieee80211_hw_set(hw, USES_RSS);
+ ieee80211_hw_set(hw, HANDLES_QUIET_CSA);
+ ieee80211_hw_set(hw, AP_LINK_PS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+ ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+ ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
+ ieee80211_hw_set(hw, STA_MMPDU_TXQ);
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
+ ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ);
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, TDLS_WIDER_BW);
+}
+
+static void iwl_mac_hw_set_wiphy(struct iwl_mld *mld)
+{
+ struct ieee80211_hw *hw = mld->hw;
+ struct wiphy *wiphy = hw->wiphy;
+ const struct iwl_ucode_capabilities *ucode_capa = &mld->fw->ucode_capa;
+
+ snprintf(wiphy->fw_version,
+ sizeof(wiphy->fw_version),
+ "%.31s", mld->fw->fw_version);
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_ADHOC);
+
+ wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_HT_IBSS |
+ NL80211_FEATURE_P2P_GO_CTWIN |
+ NL80211_FEATURE_LOW_PRIORITY_SCAN |
+ NL80211_FEATURE_P2P_GO_OPPPS |
+ NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_SUPPORTS_WMM_ADMISSION |
+ NL80211_FEATURE_TX_POWER_INSERTION |
+ NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
+
+ wiphy->flags |= WIPHY_FLAG_IBSS_RSN |
+ WIPHY_FLAG_AP_UAPSD |
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH |
+ WIPHY_FLAG_SPLIT_SCAN_6GHZ |
+ WIPHY_FLAG_SUPPORTS_TDLS |
+ WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
+
+ if (mld->nvm_data->sku_cap_11be_enable &&
+ !iwlwifi_mod_params.disable_11ax &&
+ !iwlwifi_mod_params.disable_11be)
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+
+ /* the firmware uses u8 for num of iterations, but 0xff is saved for
+ * infinite loop, so the maximum number of iterations is actually 254.
+ */
+ wiphy->max_sched_scan_plan_iterations = 254;
+ wiphy->max_sched_scan_ie_len = iwl_mld_scan_max_template_size();
+ wiphy->max_scan_ie_len = iwl_mld_scan_max_template_size();
+ wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
+ wiphy->max_sched_scan_reqs = 1;
+ wiphy->max_sched_scan_plan_interval = U16_MAX;
+ wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES_V2;
+
+ wiphy->max_remain_on_channel_duration = 10000;
+
+ wiphy->hw_version = mld->trans->hw_id;
+
+ wiphy->hw_timestamp_max_peers = 1;
+
+ wiphy->iface_combinations = iwl_mld_iface_combinations;
+ wiphy->n_iface_combinations = ARRAY_SIZE(iwl_mld_iface_combinations);
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_CONCURRENT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_START_TIME);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_PARENT_TSF);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
+
+ if (fw_has_capa(ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT))
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_PROTECTED_TWT);
+
+ wiphy->iftype_ext_capab = NULL;
+ wiphy->num_iftype_ext_capab = 0;
+
+ if (!iwlwifi_mod_params.disable_11ax) {
+ wiphy->iftype_ext_capab = iftypes_ext_capa;
+ wiphy->num_iftype_ext_capab = ARRAY_SIZE(iftypes_ext_capa);
+
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
+ }
+
+ if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+ wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ else
+ wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+}
+
+static void iwl_mac_hw_set_misc(struct iwl_mld *mld)
+{
+ struct ieee80211_hw *hw = mld->hw;
+
+ hw->queues = IEEE80211_NUM_ACS;
+
+ hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
+ hw->netdev_features |= mld->cfg->features;
+
+ hw->max_tx_fragments = mld->trans->max_skb_frags;
+ hw->max_listen_interval = IWL_MLD_CONN_LISTEN_INTERVAL;
+
+ hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
+ hw->uapsd_queues = IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BE;
+
+ hw->chanctx_data_size = sizeof(struct iwl_mld_phy);
+ hw->vif_data_size = sizeof(struct iwl_mld_vif);
+ hw->sta_data_size = sizeof(struct iwl_mld_sta);
+ hw->txq_data_size = sizeof(struct iwl_mld_txq);
+
+ /* TODO: Remove this division when IEEE80211_MAX_AMPDU_BUF_EHT size
+ * is supported.
+ * Note: ensure that IWL_DEFAULT_QUEUE_SIZE_EHT is updated accordingly.
+ */
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT / 2;
+}
+
+static int iwl_mld_hw_verify_preconditions(struct iwl_mld *mld)
+{
+ /* 11ax is expected to be enabled for all supported devices */
+ if (WARN_ON(!mld->nvm_data->sku_cap_11ax_enable))
+ return -EINVAL;
+
+ /* LAR is expected to be enabled for all supported devices */
+ if (WARN_ON(!mld->nvm_data->lar_enabled))
+ return -EINVAL;
+
+ /* All supported devices are currently using version 3 of the cmd.
+ * Since version 3, IWL_SCAN_MAX_PROFILES_V2 shall be used where
+ * necessary.
+ */
+ if (WARN_ON(iwl_fw_lookup_cmd_ver(mld->fw,
+ SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ IWL_FW_CMD_VER_UNKNOWN) != 3))
+ return -EINVAL;
+
+ return 0;
+}
+
+int iwl_mld_register_hw(struct iwl_mld *mld)
+{
+ /* verify once essential preconditions required for setting
+ * the hw capabilities
+ */
+ if (iwl_mld_hw_verify_preconditions(mld))
+ return -EINVAL;
+
+ iwl_mld_hw_set_addresses(mld);
+ iwl_mld_hw_set_channels(mld);
+ iwl_mld_hw_set_security(mld);
+ iwl_mld_hw_set_pm(mld);
+ iwl_mld_hw_set_antennas(mld);
+ iwl_mac_hw_set_radiotap(mld);
+ iwl_mac_hw_set_flags(mld);
+ iwl_mac_hw_set_wiphy(mld);
+ iwl_mac_hw_set_misc(mld);
+
+ SET_IEEE80211_DEV(mld->hw, mld->trans->dev);
+
+ return ieee80211_register_hw(mld->hw);
+}
+
+static void
+iwl_mld_mac80211_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control, struct sk_buff *skb)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u32 link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+
+ /* In AP mode, mgmt frames are sent on the bcast station,
+ * so the FW can't translate the MLD addr to the link addr. Do it here
+ */
+ if (ieee80211_is_mgmt(hdr->frame_control) && sta &&
+ link_id != IEEE80211_LINK_UNSPECIFIED &&
+ !ieee80211_is_probe_resp(hdr->frame_control)) {
+ /* translate MLD addresses to LINK addresses */
+ struct ieee80211_link_sta *link_sta =
+ rcu_dereference(sta->link[link_id]);
+ struct ieee80211_bss_conf *link_conf =
+ rcu_dereference(info->control.vif->link_conf[link_id]);
+ struct ieee80211_mgmt *mgmt;
+
+ if (WARN_ON(!link_sta || !link_conf)) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+ mgmt = (void *)hdr;
+ memcpy(mgmt->da, link_sta->addr, ETH_ALEN);
+ memcpy(mgmt->sa, link_conf->addr, ETH_ALEN);
+ memcpy(mgmt->bssid, link_conf->bssid, ETH_ALEN);
+ }
+
+ iwl_mld_tx_skb(mld, skb, NULL);
+}
+
+static void
+iwl_mld_restart_cleanup(struct iwl_mld *mld)
+{
+ iwl_cleanup_mld(mld);
+
+ ieee80211_iterate_interfaces(mld->hw, IEEE80211_IFACE_ITER_ACTIVE,
+ iwl_mld_cleanup_vif, NULL);
+
+ ieee80211_iterate_stations_atomic(mld->hw,
+ iwl_mld_cleanup_sta, NULL);
+
+ iwl_mld_ftm_restart_cleanup(mld);
+}
+
+static
+int iwl_mld_mac80211_start(struct ieee80211_hw *hw)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+ bool in_d3 = false;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+#ifdef CONFIG_PM_SLEEP
+ /* Unless the host goes into hibernate the FW always stays on and
+ * the d3_resume flow is used. When wowlan is configured, mac80211
+ * would call it's resume callback and the wowlan_resume flow
+ * would be used.
+ */
+
+ in_d3 = mld->fw_status.in_d3;
+ if (in_d3) {
+ /* mac80211 already cleaned up the state, no need for cleanup */
+ ret = iwl_mld_no_wowlan_resume(mld);
+ if (ret)
+ iwl_mld_stop_fw(mld);
+ }
+#endif /* CONFIG_PM_SLEEP */
+
+ if (mld->fw_status.in_hw_restart) {
+ iwl_mld_stop_fw(mld);
+ iwl_mld_restart_cleanup(mld);
+ }
+
+ if (!in_d3 || ret) {
+ ret = iwl_mld_start_fw(mld);
+ if (ret)
+ goto error;
+ }
+
+ mld->scan.last_start_time_jiffies = jiffies;
+
+ iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT,
+ NULL);
+ iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
+ NULL);
+
+ return 0;
+
+error:
+ /* If we failed to restart the hw, there is nothing useful
+ * we can do but indicate we are no longer in restart.
+ */
+ mld->fw_status.in_hw_restart = false;
+
+ return ret;
+}
+
+static
+void iwl_mld_mac80211_stop(struct ieee80211_hw *hw, bool suspend)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ wiphy_work_cancel(mld->wiphy, &mld->add_txqs_wk);
+
+ /* if the suspend flow fails the fw is in error. Stop it here, and it
+ * will be started upon wakeup
+ */
+ if (!suspend || iwl_mld_no_wowlan_suspend(mld))
+ iwl_mld_stop_fw(mld);
+
+ /* HW is stopped, no more coming RX. OTOH, the worker can't run as the
+ * wiphy lock is held. Cancel it in case it was scheduled just before
+ * we stopped the HW.
+ */
+ wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk);
+
+ /* Empty out the list, as the worker won't do that */
+ iwl_mld_purge_async_handlers_list(mld);
+
+ /* Clear in_hw_restart flag when stopping the hw, as mac80211 won't
+ * execute the restart.
+ */
+ mld->fw_status.in_hw_restart = false;
+
+ /* We shouldn't have any UIDs still set. Loop over all the UIDs to
+ * make sure there's nothing left there and warn if any is found.
+ */
+ for (int i = 0; i < ARRAY_SIZE(mld->scan.uid_status); i++)
+ if (WARN_ONCE(mld->scan.uid_status[i],
+ "UMAC scan UID %d status was not cleaned (0x%x 0x%x)\n",
+ i, mld->scan.uid_status[i], mld->scan.status))
+ mld->scan.uid_status[i] = 0;
+}
+
+static
+int iwl_mld_mac80211_config(struct ieee80211_hw *hw, u32 changed)
+{
+ return 0;
+}
+
+static
+int iwl_mld_mac80211_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* Construct mld_vif, add it to fw, and map its ID to ieee80211_vif */
+ ret = iwl_mld_add_vif(mld, vif);
+ if (ret)
+ return ret;
+
+ /*
+ * Add the default link, but not if this is an MLD vif as that implies
+ * the HW is restarting and it will be configured by change_vif_links.
+ */
+ if (!ieee80211_vif_is_mld(vif))
+ ret = iwl_mld_add_link(mld, &vif->bss_conf);
+ if (ret)
+ goto err;
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ vif->driver_flags |= IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC;
+ if (!vif->p2p)
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+ }
+
+ if (vif->p2p || iwl_fw_lookup_cmd_ver(mld->fw, PHY_CONTEXT_CMD, 0) < 5)
+ vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
+
+ /*
+ * For an MLD vif (in restart) we may not have a link; delay the call
+ * the initial change_vif_links.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ !ieee80211_vif_is_mld(vif))
+ iwl_mld_update_mac_power(mld, vif, false);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ mld->monitor.on = true;
+ ieee80211_hw_set(mld->hw, RX_INCLUDES_FCS);
+ }
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ mld->p2p_device_vif = vif;
+
+ return 0;
+
+err:
+ iwl_mld_rm_vif(mld, vif);
+ return ret;
+}
+
+static
+void iwl_mld_mac80211_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
+ vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mld->hw->flags);
+ mld->monitor.on = false;
+ }
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ mld->p2p_device_vif = NULL;
+
+ iwl_mld_remove_link(mld, &vif->bss_conf);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ debugfs_remove(iwl_mld_vif_from_mac80211(vif)->dbgfs_slink);
+#endif
+
+ iwl_mld_rm_vif(mld, vif);
+}
+
+struct iwl_mld_mc_iter_data {
+ struct iwl_mld *mld;
+ int port_id;
+};
+
+static void iwl_mld_mc_iface_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_mc_iter_data *mc_data = data;
+ struct iwl_mld *mld = mc_data->mld;
+ struct iwl_mcast_filter_cmd *cmd = mld->mcast_filter_cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = MCAST_FILTER_CMD,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int ret, len;
+
+ /* If we don't have free ports, mcast frames will be dropped */
+ if (WARN_ON_ONCE(mc_data->port_id >= MAX_PORT_ID_NUM))
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
+ return;
+
+ cmd->port_id = mc_data->port_id++;
+ ether_addr_copy(cmd->bssid, vif->bss_conf.bssid);
+ len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
+
+ hcmd.len[0] = len;
+ hcmd.data[0] = cmd;
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (ret)
+ IWL_ERR(mld, "mcast filter cmd error. ret=%d\n", ret);
+}
+
+void iwl_mld_recalc_multicast_filter(struct iwl_mld *mld)
+{
+ struct iwl_mld_mc_iter_data iter_data = {
+ .mld = mld,
+ };
+
+ if (WARN_ON_ONCE(!mld->mcast_filter_cmd))
+ return;
+
+ ieee80211_iterate_active_interfaces(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_mc_iface_iterator,
+ &iter_data);
+}
+
+static u64
+iwl_mld_mac80211_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mcast_filter_cmd *cmd;
+ struct netdev_hw_addr *addr;
+ int addr_count = netdev_hw_addr_list_count(mc_list);
+ bool pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES;
+ int len;
+
+ if (pass_all)
+ addr_count = 0;
+
+ /* len must be a multiple of 4 */
+ len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
+ cmd = kzalloc(len, GFP_ATOMIC);
+ if (!cmd)
+ return 0;
+
+ if (pass_all) {
+ cmd->pass_all = 1;
+ goto out;
+ }
+
+ netdev_hw_addr_list_for_each(addr, mc_list) {
+ IWL_DEBUG_MAC80211(mld, "mcast addr (%d): %pM\n",
+ cmd->count, addr->addr);
+ ether_addr_copy(&cmd->addr_list[cmd->count * ETH_ALEN],
+ addr->addr);
+ cmd->count++;
+ }
+
+out:
+ return (u64)(unsigned long)cmd;
+}
+
+static
+void iwl_mld_mac80211_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
+
+ /* Replace previous configuration */
+ kfree(mld->mcast_filter_cmd);
+ mld->mcast_filter_cmd = cmd;
+
+ if (!cmd)
+ goto out;
+
+ if (changed_flags & FIF_ALLMULTI)
+ cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
+
+ if (cmd->pass_all)
+ cmd->count = 0;
+
+ iwl_mld_recalc_multicast_filter(mld);
+out:
+ *total_flags = 0;
+}
+
+static
+void iwl_mld_mac80211_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+
+ if (likely(mld_txq->status.allocated) || !txq->sta) {
+ iwl_mld_tx_from_txq(mld, txq);
+ return;
+ }
+
+ /* We don't support TSPEC tids. %IEEE80211_NUM_TIDS is for mgmt */
+ if (txq->tid != IEEE80211_NUM_TIDS && txq->tid >= IWL_MAX_TID_COUNT) {
+ IWL_DEBUG_MAC80211(mld, "TID %d is not supported\n", txq->tid);
+ return;
+ }
+
+ /* The worker will handle any packets we leave on the txq now */
+
+ spin_lock_bh(&mld->add_txqs_lock);
+ /* The list is being deleted only after the queue is fully allocated. */
+ if (list_empty(&mld_txq->list) &&
+ /* recheck under lock, otherwise it can be added twice */
+ !mld_txq->status.allocated) {
+ list_add_tail(&mld_txq->list, &mld->txqs_to_add);
+ wiphy_work_queue(mld->wiphy, &mld->add_txqs_wk);
+ }
+ spin_unlock_bh(&mld->add_txqs_lock);
+}
+
+static void iwl_mld_teardown_tdls_peers(struct iwl_mld *mld)
+{
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) {
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_sta *mld_sta;
+
+ link_sta = wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_link_sta[i]);
+ if (IS_ERR_OR_NULL(link_sta))
+ continue;
+
+ if (!link_sta->sta->tdls)
+ continue;
+
+ mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+
+ ieee80211_tdls_oper_request(mld_sta->vif, link_sta->addr,
+ NL80211_TDLS_TEARDOWN,
+ WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
+ GFP_KERNEL);
+ }
+}
+
+static
+int iwl_mld_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+ int fw_id = iwl_mld_allocate_fw_phy_id(mld);
+ int ret;
+
+ if (fw_id < 0)
+ return fw_id;
+
+ phy->mld = mld;
+ phy->fw_id = fw_id;
+ phy->chandef = *iwl_mld_get_chandef_from_chanctx(mld, ctx);
+
+ ret = iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_ADD);
+ if (ret) {
+ mld->used_phy_ids &= ~BIT(phy->fw_id);
+ return ret;
+ }
+
+ if (hweight8(mld->used_phy_ids) > 1)
+ iwl_mld_teardown_tdls_peers(mld);
+
+ return 0;
+}
+
+static
+void iwl_mld_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+
+ iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_REMOVE);
+ mld->used_phy_ids &= ~BIT(phy->fw_id);
+}
+
+static
+void iwl_mld_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx, u32 changed)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+ struct cfg80211_chan_def *chandef =
+ iwl_mld_get_chandef_from_chanctx(mld, ctx);
+
+ /* We don't care about these */
+ if (!(changed & ~(IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
+ IEEE80211_CHANCTX_CHANGE_RADAR |
+ IEEE80211_CHANCTX_CHANGE_CHANNEL)))
+ return;
+
+ /* Check if a FW update is required */
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_AP)
+ goto update;
+
+ if (chandef->chan == phy->chandef.chan &&
+ chandef->center_freq1 == phy->chandef.center_freq1 &&
+ chandef->punctured == phy->chandef.punctured) {
+ /* Check if we are toggling between HT and non-HT, no-op */
+ if (phy->chandef.width == chandef->width ||
+ (phy->chandef.width <= NL80211_CHAN_WIDTH_20 &&
+ chandef->width <= NL80211_CHAN_WIDTH_20))
+ return;
+ }
+update:
+ phy->chandef = *chandef;
+
+ iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_MODIFY);
+}
+
+static u8
+iwl_mld_chandef_get_primary_80(struct cfg80211_chan_def *chandef)
+{
+ int data_start;
+ int control_start;
+ int bw;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_320)
+ bw = 320;
+ else if (chandef->width == NL80211_CHAN_WIDTH_160)
+ bw = 160;
+ else
+ return 0;
+
+ /* data is bw wide so the start is half the width */
+ data_start = chandef->center_freq1 - bw / 2;
+ /* control is 20Mhz width */
+ control_start = chandef->chan->center_freq - 10;
+
+ return (control_start - data_start) / 80;
+}
+
+static bool iwl_mld_can_activate_link(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_link_sta *link_sta;
+
+ /* In association, we activate the assoc link before adding the STA. */
+ if (!mld_vif->ap_sta || !vif->cfg.assoc)
+ return true;
+
+ mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
+
+ /* When switching links, we need to wait with the activation until the
+ * STA was added to the FW. It'll be activated in
+ * iwl_mld_update_link_stas
+ */
+ link_sta = wiphy_dereference(mld->wiphy, mld_sta->link[link->link_id]);
+
+ /* In restart we can have a link_sta that doesn't exist in FW yet */
+ return link_sta && link_sta->in_fw;
+}
+
+static
+int iwl_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ unsigned int n_active = iwl_mld_count_active_links(mld, vif);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ /* if the assigned one was not counted yet, count it now */
+ if (!rcu_access_pointer(mld_link->chan_ctx)) {
+ n_active++;
+
+ /* Track addition of non-BSS link */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+ ret = iwl_mld_emlsr_check_non_bss_block(mld, 1);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* for AP, mac parameters such as HE support are updated at this stage. */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_MODIFY);
+
+ if (ret) {
+ IWL_ERR(mld, "failed to update MAC %pM\n", vif->addr);
+ return -EINVAL;
+ }
+ }
+
+ rcu_assign_pointer(mld_link->chan_ctx, ctx);
+
+ if (n_active > 1) {
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ iwl_mld_leave_omi_bw_reduction(mld);
+
+ /* Indicate to mac80211 that EML is enabled */
+ vif->driver_flags |= IEEE80211_VIF_EML_ACTIVE;
+
+ if (vif->active_links & BIT(mld_vif->emlsr.selected_links))
+ mld_vif->emlsr.primary = mld_vif->emlsr.selected_primary;
+ else
+ mld_vif->emlsr.primary = __ffs(vif->active_links);
+
+ iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_ESR_LINK_UP,
+ NULL);
+ }
+
+ /* First send the link command with the phy context ID.
+ * Now that we have the phy, we know the band so also the rates
+ */
+ ret = iwl_mld_change_link_in_fw(mld, link,
+ LINK_CONTEXT_MODIFY_RATES_INFO);
+ if (ret)
+ goto err;
+
+ /* TODO: Initialize rate control for the AP station, since we might be
+ * doing a link switch here - we cannot initialize it before since
+ * this needs the phy context assigned (and in FW?), and we cannot
+ * do it later because it needs to be initialized as soon as we're
+ * able to TX on the link, i.e. when active. (task=link-switch)
+ */
+
+ /* Now activate the link */
+ if (iwl_mld_can_activate_link(mld, vif, link)) {
+ ret = iwl_mld_activate_link(mld, link);
+ if (ret)
+ goto err;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ /* TODO: task=sniffer add sniffer station */
+ mld->monitor.p80 =
+ iwl_mld_chandef_get_primary_80(&vif->bss_conf.chanreq.oper);
+ }
+
+ return 0;
+err:
+ RCU_INIT_POINTER(mld_link->chan_ctx, NULL);
+ return ret;
+}
+
+static
+void iwl_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ unsigned int n_active = iwl_mld_count_active_links(mld, vif);
+
+ if (WARN_ON(!mld_link))
+ return;
+
+ /* Track removal of non-BSS link */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+ iwl_mld_emlsr_check_non_bss_block(mld, -1);
+
+ iwl_mld_deactivate_link(mld, link);
+
+ /* TODO: task=sniffer remove sniffer station */
+
+ if (n_active > 1) {
+ /* Indicate to mac80211 that EML is disabled */
+ vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
+
+ iwl_dbg_tlv_time_point(&mld->fwrt,
+ IWL_FW_INI_TIME_ESR_LINK_DOWN,
+ NULL);
+ }
+
+ RCU_INIT_POINTER(mld_link->chan_ctx, NULL);
+
+ /* in the non-MLO case, remove/re-add the link to clean up FW state.
+ * In MLO, it'll be done in drv_change_vif_link
+ */
+ if (!ieee80211_vif_is_mld(vif) && !mld_vif->ap_sta &&
+ !WARN_ON_ONCE(vif->cfg.assoc) &&
+ vif->type != NL80211_IFTYPE_AP && !mld->fw_status.in_hw_restart) {
+ iwl_mld_remove_link(mld, link);
+ iwl_mld_add_link(mld, link);
+ }
+}
+
+static
+int iwl_mld_mac80211_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ return 0;
+}
+
+static void
+iwl_mld_link_info_changed_ap_ibss(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ u64 changes)
+{
+ u32 link_changes = 0;
+
+ if (changes & BSS_CHANGED_ERP_SLOT)
+ link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO;
+
+ if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT))
+ link_changes |= LINK_CONTEXT_MODIFY_PROTECT_FLAGS;
+
+ if (changes & (BSS_CHANGED_QOS | BSS_CHANGED_BANDWIDTH))
+ link_changes |= LINK_CONTEXT_MODIFY_QOS_PARAMS;
+
+ if (changes & BSS_CHANGED_HE_BSS_COLOR)
+ link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
+
+ if (link_changes)
+ iwl_mld_change_link_in_fw(mld, link, link_changes);
+
+ if (changes & BSS_CHANGED_BEACON)
+ iwl_mld_update_beacon_template(mld, vif, link);
+}
+
+static
+u32 iwl_mld_link_changed_mapping(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u64 changes)
+{
+ u32 link_changes = 0;
+ bool has_he, has_eht;
+
+ if (changes & BSS_CHANGED_QOS && vif->cfg.assoc && link_conf->qos)
+ link_changes |= LINK_CONTEXT_MODIFY_QOS_PARAMS;
+
+ if (changes & (BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_BASIC_RATES |
+ BSS_CHANGED_ERP_SLOT))
+ link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO;
+
+ if (changes & (BSS_CHANGED_HT | BSS_CHANGED_ERP_CTS_PROT))
+ link_changes |= LINK_CONTEXT_MODIFY_PROTECT_FLAGS;
+
+ /* TODO: task=MLO check mac80211's HE flags and if command is needed
+ * every time there's a link change. Currently used flags are
+ * BSS_CHANGED_HE_OBSS_PD and BSS_CHANGED_HE_BSS_COLOR.
+ */
+ has_he = link_conf->he_support && !iwlwifi_mod_params.disable_11ax;
+ has_eht = link_conf->eht_support && !iwlwifi_mod_params.disable_11be;
+
+ if (vif->cfg.assoc && (has_he || has_eht)) {
+ IWL_DEBUG_MAC80211(mld, "Associated in HE mode\n");
+ link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
+ }
+
+ return link_changes;
+}
+
+static void
+iwl_mld_mac80211_link_info_changed_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u64 changes)
+{
+ u32 link_changes = iwl_mld_link_changed_mapping(mld, vif, link_conf,
+ changes);
+
+ if (link_changes)
+ iwl_mld_change_link_in_fw(mld, link_conf, link_changes);
+
+ if (changes & BSS_CHANGED_TPE)
+ iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link_conf);
+
+ if (changes & BSS_CHANGED_BEACON_INFO)
+ iwl_mld_update_mac_power(mld, vif, false);
+
+ /* The firmware will wait quite a while after association before it
+ * starts filtering the beacons. We can safely enable beacon filtering
+ * upon CQM configuration, even if we didn't get a beacon yet.
+ */
+ if (changes & (BSS_CHANGED_CQM | BSS_CHANGED_BEACON_INFO))
+ iwl_mld_enable_beacon_filter(mld, link_conf, false);
+
+ /* If we have used OMI before to reduce bandwidth to 80 MHz and then
+ * increased to 160 MHz again, and then the AP changes to 320 MHz, it
+ * will think that we're limited to 160 MHz right now. Update it by
+ * requesting a new OMI bandwidth.
+ */
+ if (changes & BSS_CHANGED_BANDWIDTH) {
+ enum ieee80211_sta_rx_bandwidth bw;
+
+ bw = ieee80211_chan_width_to_rx_bw(link_conf->chanreq.oper.width);
+
+ iwl_mld_omi_ap_changed_bw(mld, link_conf, bw);
+
+ }
+
+ if (changes & BSS_CHANGED_BANDWIDTH)
+ iwl_mld_retry_emlsr(mld, vif);
+}
+
+static int iwl_mld_update_mu_groups(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mu_group_mgmt_cmd cmd = {};
+
+ BUILD_BUG_ON(sizeof(cmd.membership_status) !=
+ sizeof(link_conf->mu_group.membership));
+ BUILD_BUG_ON(sizeof(cmd.user_position) !=
+ sizeof(link_conf->mu_group.position));
+
+ memcpy(cmd.membership_status, link_conf->mu_group.membership,
+ WLAN_MEMBERSHIP_LEN);
+ memcpy(cmd.user_position, link_conf->mu_group.position,
+ WLAN_USER_POSITION_LEN);
+
+ return iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(DATA_PATH_GROUP,
+ UPDATE_MU_GROUPS_CMD),
+ &cmd);
+}
+
+static void
+iwl_mld_mac80211_link_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u64 changes)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ iwl_mld_mac80211_link_info_changed_sta(mld, vif, link_conf,
+ changes);
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ iwl_mld_link_info_changed_ap_ibss(mld, vif, link_conf,
+ changes);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ /* The firmware tracks this on its own in STATION mode, but
+ * obviously not in sniffer mode.
+ */
+ if (changes & BSS_CHANGED_MU_GROUPS)
+ iwl_mld_update_mu_groups(mld, link_conf);
+ break;
+ default:
+ /* shouldn't happen */
+ WARN_ON_ONCE(1);
+ }
+
+ /* We now know our BSSID, we can configure the MAC context with
+ * eht_support if needed.
+ */
+ if (changes & BSS_CHANGED_BSSID)
+ iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_MODIFY);
+
+ if (changes & BSS_CHANGED_TXPOWER)
+ iwl_mld_set_tx_power(mld, link_conf, link_conf->txpower);
+}
+
+static void
+iwl_mld_smps_wa(struct iwl_mld *mld, struct ieee80211_vif *vif, bool enable)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ /* Send the device-level power commands since the
+ * firmware checks the POWER_TABLE_CMD's POWER_SAVE_EN bit to
+ * determine SMPS mode.
+ */
+ if (mld_vif->ps_disabled == !enable)
+ return;
+
+ mld_vif->ps_disabled = !enable;
+
+ iwl_mld_update_device_power(mld, false);
+}
+
+static
+void iwl_mld_mac80211_vif_cfg_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 changes)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_MODIFY);
+ if (ret)
+ IWL_ERR(mld, "failed to update context\n");
+
+ if (vif->cfg.assoc) {
+ /* Clear statistics to get clean beacon counter, and
+ * ask for periodic statistics, as they are needed for
+ * link selection and RX OMI decisions.
+ */
+ iwl_mld_clear_stats_in_fw(mld);
+ iwl_mld_request_periodic_fw_stats(mld, true);
+
+ iwl_mld_set_vif_associated(mld, vif);
+ } else {
+ iwl_mld_request_periodic_fw_stats(mld, false);
+ }
+ }
+
+ if (changes & BSS_CHANGED_PS) {
+ iwl_mld_smps_wa(mld, vif, vif->cfg.ps);
+ iwl_mld_update_mac_power(mld, vif, false);
+ }
+
+ /* TODO: task=MLO BSS_CHANGED_MLD_VALID_LINKS/CHANGED_MLD_TTLM */
+}
+
+static int
+iwl_mld_mac80211_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ if (WARN_ON(!hw_req->req.n_channels ||
+ hw_req->req.n_channels >
+ mld->fw->ucode_capa.n_scan_channels))
+ return -EINVAL;
+
+ return iwl_mld_regular_scan_start(mld, vif, &hw_req->req, &hw_req->ies);
+}
+
+static void
+iwl_mld_mac80211_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ /* Due to a race condition, it's possible that mac80211 asks
+ * us to stop a hw_scan when it's already stopped. This can
+ * happen, for instance, if we stopped the scan ourselves,
+ * called ieee80211_scan_completed() and the userspace called
+ * cancel scan before ieee80211_scan_work() could run.
+ * To handle that, simply return if the scan is not running.
+ */
+ if (mld->scan.status & IWL_MLD_SCAN_REGULAR)
+ iwl_mld_scan_stop(mld, IWL_MLD_SCAN_REGULAR, true);
+}
+
+static int
+iwl_mld_mac80211_sched_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ return iwl_mld_sched_scan_start(mld, vif, req, ies, IWL_MLD_SCAN_SCHED);
+}
+
+static int
+iwl_mld_mac80211_sched_scan_stop(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ /* Due to a race condition, it's possible that mac80211 asks
+ * us to stop a sched_scan when it's already stopped. This
+ * can happen, for instance, if we stopped the scan ourselves,
+ * called ieee80211_sched_scan_stopped() and the userspace called
+ * stop sched scan before ieee80211_sched_scan_stopped_work()
+ * could run. To handle this, simply return if the scan is
+ * not running.
+ */
+ if (!(mld->scan.status & IWL_MLD_SCAN_SCHED))
+ return 0;
+
+ return iwl_mld_scan_stop(mld, IWL_MLD_SCAN_SCHED, false);
+}
+
+static void
+iwl_mld_restart_complete_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_bss_conf *link_conf;
+ struct iwl_mld *mld = data;
+ int link_id;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ enum ieee80211_sta_rx_bandwidth bw;
+ struct iwl_mld_link *mld_link;
+
+ mld_link = wiphy_dereference(mld->wiphy,
+ mld_vif->link[link_id]);
+
+ if (WARN_ON_ONCE(!mld_link))
+ continue;
+
+ bw = mld_link->rx_omi.bw_in_progress;
+ if (bw)
+ iwl_mld_change_link_omi_bw(mld, link_conf, bw);
+ }
+}
+
+static void
+iwl_mld_mac80211_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ switch (reconfig_type) {
+ case IEEE80211_RECONFIG_TYPE_RESTART:
+ mld->fw_status.in_hw_restart = false;
+ iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_END_OF_RECOVERY);
+
+ ieee80211_iterate_interfaces(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_restart_complete_vif, mld);
+
+ iwl_trans_finish_sw_reset(mld->trans);
+ /* no need to lock, adding in parallel would schedule too */
+ if (!list_empty(&mld->txqs_to_add))
+ wiphy_work_queue(mld->wiphy, &mld->add_txqs_wk);
+
+ IWL_INFO(mld, "restart completed\n");
+ break;
+ case IEEE80211_RECONFIG_TYPE_SUSPEND:
+ break;
+ }
+}
+
+static
+void iwl_mld_mac80211_mgd_prepare_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ u32 duration = IWL_MLD_SESSION_PROTECTION_ASSOC_TIME_MS;
+
+ /* After a successful association the connection is etalibeshed
+ * and we can rely on the quota to send the disassociation frame.
+ */
+ if (info->was_assoc)
+ return;
+
+ if (info->duration > duration)
+ duration = info->duration;
+
+ iwl_mld_schedule_session_protection(mld, vif, duration,
+ IWL_MLD_SESSION_PROTECTION_MIN_TIME_MS,
+ info->link_id);
+}
+
+static
+void iwl_mld_mac_mgd_complete_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ /* Successful authentication is the only case that requires to let
+ * the session protection go. We'll need it for the upcoming
+ * association. For all the other cases, we need to cancel the session
+ * protection.
+ * After successful association the connection is established and
+ * further mgd tx can rely on the quota.
+ */
+ if (info->success && info->subtype == IEEE80211_STYPE_AUTH)
+ return;
+
+ /* The firmware will be on medium after we configure the vif as
+ * associated. Removing the session protection allows the firmware
+ * to stop being on medium. In order to ensure the continuity of our
+ * presence on medium, we need first to configure the vif as associated
+ * and only then, remove the session protection.
+ * Currently, mac80211 calls vif_cfg_changed() first and then,
+ * drv_mgd_complete_tx(). Ensure that this assumption stays true by
+ * a warning.
+ */
+ WARN_ON(info->success &&
+ (info->subtype == IEEE80211_STYPE_ASSOC_REQ ||
+ info->subtype == IEEE80211_STYPE_REASSOC_REQ) &&
+ !vif->cfg.assoc);
+
+ iwl_mld_cancel_session_protection(mld, vif, info->link_id);
+}
+
+static int
+iwl_mld_mac80211_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ link = iwl_mld_link_dereference_check(mld_vif, link_id);
+ if (!link)
+ return -EINVAL;
+
+ link->queue_params[ac] = *params;
+
+ /* No need to update right away, we'll get BSS_CHANGED_QOS
+ * The exception is P2P_DEVICE interface which needs immediate update.
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ iwl_mld_change_link_in_fw(mld, &vif->bss_conf,
+ LINK_CONTEXT_MODIFY_QOS_PARAMS);
+
+ return 0;
+}
+
+static void iwl_mld_set_uapsd(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (vif->p2p &&
+ !(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_P2P_CLIENT))
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ if (!vif->p2p &&
+ !(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS))
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+}
+
+int iwl_mld_tdls_sta_count(struct iwl_mld *mld)
+{
+ int count = 0;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) {
+ struct ieee80211_link_sta *link_sta;
+
+ link_sta = wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_link_sta[i]);
+ if (IS_ERR_OR_NULL(link_sta))
+ continue;
+
+ if (!link_sta->sta->tdls)
+ continue;
+
+ count++;
+ }
+
+ return count;
+}
+
+static void iwl_mld_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
+ struct cfg80211_bss *bss,
+ void *_data)
+{
+ bool *tolerated = _data;
+ const struct cfg80211_bss_ies *ies;
+ const struct element *elem;
+
+ rcu_read_lock();
+ ies = rcu_dereference(bss->ies);
+ elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data,
+ ies->len);
+
+ if (!elem || elem->datalen < 10 ||
+ !(elem->data[10] &
+ WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) {
+ *tolerated = false;
+ }
+ rcu_read_unlock();
+}
+
+static void
+iwl_mld_check_he_obss_narrow_bw_ru(struct iwl_mld *mld,
+ struct iwl_mld_link *mld_link,
+ struct ieee80211_bss_conf *link_conf)
+{
+ bool tolerated = true;
+
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
+ return;
+
+ if (!(link_conf->chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR)) {
+ mld_link->he_ru_2mhz_block = false;
+ return;
+ }
+
+ cfg80211_bss_iter(mld->wiphy, &link_conf->chanreq.oper,
+ iwl_mld_check_he_obss_narrow_bw_ru_iter, &tolerated);
+
+ /* If there is at least one AP on radar channel that cannot
+ * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU.
+ */
+ mld_link->he_ru_2mhz_block = !tolerated;
+}
+
+static void iwl_mld_link_set_2mhz_block(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+ struct iwl_mld_link *mld_link =
+ iwl_mld_link_from_mac80211(link_conf);
+
+ if (WARN_ON(!link_conf || !mld_link))
+ continue;
+
+ if (link_sta->he_cap.has_he)
+ iwl_mld_check_he_obss_narrow_bw_ru(mld, mld_link,
+ link_conf);
+ }
+}
+
+static int iwl_mld_move_sta_state_up(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int tdls_count = 0;
+ int ret;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ if (sta->tdls) {
+ if (vif->p2p || hweight8(mld->used_phy_ids) != 1)
+ return -EBUSY;
+
+ tdls_count = iwl_mld_tdls_sta_count(mld);
+ if (tdls_count >= IWL_TDLS_STA_COUNT)
+ return -EBUSY;
+ }
+
+ /*
+ * If this is the first STA (i.e. the AP) it won't do
+ * anything, otherwise must leave for any new STA on
+ * any other interface, or for TDLS, etc.
+ * Need to call this _before_ adding the STA so it can
+ * look up the one STA to use to ask mac80211 to leave
+ * OMI; in the unlikely event that adding the new STA
+ * then fails we'll just re-enter OMI later (via the
+ * statistics notification handling.)
+ */
+ iwl_mld_leave_omi_bw_reduction(mld);
+
+ ret = iwl_mld_add_sta(mld, sta, vif, STATION_TYPE_PEER);
+ if (ret)
+ return ret;
+
+ /* just added first TDLS STA, so disable PM */
+ if (sta->tdls && tdls_count == 0)
+ iwl_mld_update_mac_power(mld, vif, false);
+
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ mld_vif->ap_sta = sta;
+
+ /* Initialize TLC here already - this really tells
+ * the firmware only what the supported legacy rates are
+ * (may be) since it's initialized already from what the
+ * AP advertised in the beacon/probe response. This will
+ * allow the firmware to send auth/assoc frames with one
+ * of the supported rates already, rather than having to
+ * use a mandatory rate.
+ * If we're the AP, we'll just assume mandatory rates at
+ * this point, but we know nothing about the STA anyway.
+ */
+ iwl_mld_config_tlc(mld, vif, sta);
+
+ return ret;
+ } else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_AUTH) {
+ iwl_mld_set_uapsd(mld, vif);
+ return 0;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = iwl_mld_update_all_link_stations(mld, sta);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mld_link_set_2mhz_block(mld, vif, sta);
+ /* Now the link_sta's capabilities are set, update the FW */
+ iwl_mld_config_tlc(mld, vif, sta);
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ /* Update MAC_CFG_FILTER_ACCEPT_BEACON if at least
+ * one sta is associated
+ */
+ if (++mld_vif->num_associated_stas == 1)
+ ret = iwl_mld_mac_fw_action(mld, vif,
+ FW_CTXT_ACTION_MODIFY);
+ }
+
+ return ret;
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ ret = 0;
+
+ if (!sta->tdls) {
+ mld_vif->authorized = true;
+
+ /* Ensure any block due to a non-BSS link is synced */
+ iwl_mld_emlsr_check_non_bss_block(mld, 0);
+
+ /* Block EMLSR until a certain throughput it reached */
+ if (!mld->fw_status.in_hw_restart &&
+ IWL_MLD_ENTER_EMLSR_TPT_THRESH > 0)
+ iwl_mld_block_emlsr(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_TPT,
+ 0);
+
+ /* clear COEX_HIGH_PRIORITY_ENABLE */
+ ret = iwl_mld_mac_fw_action(mld, vif,
+ FW_CTXT_ACTION_MODIFY);
+ if (ret)
+ return ret;
+ iwl_mld_smps_wa(mld, vif, vif->cfg.ps);
+ }
+
+ /* MFP is set by default before the station is authorized.
+ * Clear it here in case it's not used.
+ */
+ if (!sta->mfp)
+ ret = iwl_mld_update_all_link_stations(mld, sta);
+
+ /* We can use wide bandwidth now, not only 20 MHz */
+ iwl_mld_config_tlc(mld, vif, sta);
+
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static int iwl_mld_move_sta_state_down(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ if (!sta->tdls) {
+ mld_vif->authorized = false;
+
+ memset(&mld_vif->emlsr.zeroed_on_not_authorized, 0,
+ sizeof(mld_vif->emlsr.zeroed_on_not_authorized));
+
+ wiphy_delayed_work_cancel(mld->wiphy,
+ &mld_vif->emlsr.prevent_done_wk);
+ wiphy_delayed_work_cancel(mld->wiphy,
+ &mld_vif->emlsr.tmp_non_bss_done_wk);
+ wiphy_work_cancel(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk);
+ wiphy_delayed_work_cancel(mld->wiphy,
+ &mld_vif->emlsr.check_tpt_wk);
+
+ iwl_mld_reset_cca_40mhz_workaround(mld, vif);
+ iwl_mld_smps_wa(mld, vif, true);
+ }
+
+ /* once we move into assoc state, need to update the FW to
+ * stop using wide bandwidth
+ */
+ iwl_mld_config_tlc(mld, vif, sta);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ if (vif->type == NL80211_IFTYPE_AP &&
+ !WARN_ON(!mld_vif->num_associated_stas)) {
+ /* Update MAC_CFG_FILTER_ACCEPT_BEACON if the last sta
+ * is disassociating
+ */
+ if (--mld_vif->num_associated_stas == 0)
+ iwl_mld_mac_fw_action(mld, vif,
+ FW_CTXT_ACTION_MODIFY);
+ }
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_NONE) {
+ /* nothing */
+ } else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST) {
+ iwl_mld_remove_sta(mld, sta);
+
+ if (sta->tdls && iwl_mld_tdls_sta_count(mld) == 0) {
+ /* just removed last TDLS STA, so enable PM */
+ iwl_mld_update_mac_power(mld, vif, false);
+ }
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int iwl_mld_mac80211_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ IWL_DEBUG_MAC80211(mld, "station %pM state change %d->%d\n",
+ sta->addr, old_state, new_state);
+
+ mld_sta->sta_state = new_state;
+
+ if (old_state < new_state)
+ return iwl_mld_move_sta_state_up(mld, vif, sta, old_state,
+ new_state);
+ else
+ return iwl_mld_move_sta_state_down(mld, vif, sta, old_state,
+ new_state);
+}
+
+static void iwl_mld_mac80211_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ /* Make sure we're done with the deferred traffic before flushing */
+ iwl_mld_add_txq_list(mld);
+
+ for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) {
+ struct ieee80211_link_sta *link_sta =
+ wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_link_sta[i]);
+
+ if (IS_ERR_OR_NULL(link_sta))
+ continue;
+
+ /* Check that the sta belongs to the given vif */
+ if (vif && vif != iwl_mld_sta_from_mac80211(link_sta->sta)->vif)
+ continue;
+
+ if (drop)
+ iwl_mld_flush_sta_txqs(mld, link_sta->sta);
+ else
+ iwl_mld_wait_sta_txqs_empty(mld, link_sta->sta);
+ }
+}
+
+static void iwl_mld_mac80211_flush_sta(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ iwl_mld_flush_sta_txqs(mld, sta);
+}
+
+static int
+iwl_mld_mac80211_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 ssn = params->ssn;
+ u16 buf_size = params->buf_size;
+ u16 timeout = params->timeout;
+ int ret;
+
+ IWL_DEBUG_HT(mld, "A-MPDU action on addr %pM tid: %d action: %d\n",
+ sta->addr, tid, action);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ ret = iwl_mld_ampdu_rx_start(mld, sta, tid, ssn, buf_size,
+ timeout);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = iwl_mld_ampdu_rx_stop(mld, sta, tid);
+ break;
+ default:
+ /* The mac80211 TX_AMPDU_SETUP_IN_HW flag is set for all
+ * devices, since all support TX A-MPDU offload in hardware.
+ * Therefore, no TX action should be requested here.
+ */
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static bool iwl_mld_can_hw_csum(struct sk_buff *skb)
+{
+ u8 protocol = ip_hdr(skb)->protocol;
+
+ return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP;
+}
+
+static bool iwl_mld_mac80211_can_aggregate(struct ieee80211_hw *hw,
+ struct sk_buff *head,
+ struct sk_buff *skb)
+{
+ if (!IS_ENABLED(CONFIG_INET))
+ return false;
+
+ /* For now don't aggregate IPv6 in AMSDU */
+ if (skb->protocol != htons(ETH_P_IP))
+ return false;
+
+ /* Allow aggregation only if both frames have the same HW csum offload
+ * capability, ensuring consistent HW or SW csum handling in A-MSDU.
+ */
+ return iwl_mld_can_hw_csum(skb) == iwl_mld_can_hw_csum(head);
+}
+
+static void iwl_mld_mac80211_sync_rx_queues(struct ieee80211_hw *hw)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_EMPTY, NULL, 0);
+}
+
+static void iwl_mld_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ if (changed & (IEEE80211_RC_BW_CHANGED |
+ IEEE80211_RC_SUPP_RATES_CHANGED |
+ IEEE80211_RC_NSS_CHANGED)) {
+ struct ieee80211_bss_conf *link =
+ link_conf_dereference_check(vif, link_sta->link_id);
+
+ if (WARN_ON(!link))
+ return;
+
+ iwl_mld_config_tlc_link(mld, vif, link, link_sta);
+ }
+}
+
+static void iwl_mld_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ device_set_wakeup_enable(mld->trans->dev, enabled);
+}
+
+/* Returns 0 on success. 1 if failed to suspend with wowlan:
+ * If the circumstances didn't satisfy the conditions for suspension
+ * with wowlan, mac80211 would use the no_wowlan flow.
+ * If an error had occurred we update the trans status and state here
+ * and the result will be stopping the FW.
+ */
+static int
+iwl_mld_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ iwl_fw_runtime_suspend(&mld->fwrt);
+
+ ret = iwl_mld_wowlan_suspend(mld, wowlan);
+ if (ret) {
+ if (ret < 0) {
+ mld->trans->state = IWL_TRANS_NO_FW;
+ set_bit(STATUS_FW_ERROR, &mld->trans->status);
+ }
+ return 1;
+ }
+
+ if (iwl_mld_no_wowlan_suspend(mld))
+ return 1;
+
+ return 0;
+}
+
+static int iwl_mld_resume(struct ieee80211_hw *hw)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ ret = iwl_mld_wowlan_resume(mld);
+ if (ret)
+ return ret;
+
+ iwl_fw_runtime_resume(&mld->fwrt);
+
+ iwl_mld_low_latency_restart(mld);
+
+ return 0;
+}
+
+static int iwl_mld_alloc_ptk_pn(struct iwl_mld *mld,
+ struct iwl_mld_sta *mld_sta,
+ struct ieee80211_key_conf *key,
+ struct iwl_mld_ptk_pn **ptk_pn)
+{
+ u8 num_rx_queues = mld->trans->num_rx_queues;
+ int keyidx = key->keyidx;
+ struct ieee80211_key_seq seq;
+
+ if (WARN_ON(keyidx >= ARRAY_SIZE(mld_sta->ptk_pn)))
+ return -EINVAL;
+
+ WARN_ON(rcu_access_pointer(mld_sta->ptk_pn[keyidx]));
+ *ptk_pn = kzalloc(struct_size(*ptk_pn, q, num_rx_queues),
+ GFP_KERNEL);
+ if (!*ptk_pn)
+ return -ENOMEM;
+
+ for (u8 tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ ieee80211_get_key_rx_seq(key, tid, &seq);
+ for (u8 q = 0; q < num_rx_queues; q++)
+ memcpy((*ptk_pn)->q[q].pn[tid], seq.ccmp.pn,
+ IEEE80211_CCMP_PN_LEN);
+ }
+
+ rcu_assign_pointer(mld_sta->ptk_pn[keyidx], *ptk_pn);
+
+ return 0;
+}
+
+static int iwl_mld_set_key_add(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_sta *mld_sta =
+ sta ? iwl_mld_sta_from_mac80211(sta) : NULL;
+ struct iwl_mld_ptk_pn *ptk_pn = NULL;
+ int keyidx = key->keyidx;
+ int ret;
+
+ /* Will be set to 0 if added successfully */
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ IWL_DEBUG_MAC80211(mld, "Use SW encryption for WEP\n");
+ return -EOPNOTSUPP;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE;
+ break;
+ }
+ IWL_DEBUG_MAC80211(mld, "Use SW encryption for TKIP\n");
+ return -EOPNOTSUPP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ (keyidx == 6 || keyidx == 7))
+ rcu_assign_pointer(mld_vif->bigtks[keyidx - 6], key);
+
+ /* After exiting from RFKILL, hostapd configures GTK/ITGK before the
+ * AP is started, but those keys can't be sent to the FW before the
+ * MCAST/BCAST STAs are added to it (which happens upon AP start).
+ * Store it here to be sent later when the AP is started.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP) && !sta &&
+ !mld_vif->ap_ibss_active)
+ return iwl_mld_store_ap_early_key(mld, key, mld_vif);
+
+ if (!mld->fw_status.in_hw_restart && mld_sta &&
+ key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
+ (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
+ ret = iwl_mld_alloc_ptk_pn(mld, mld_sta, key, &ptk_pn);
+ if (ret)
+ return ret;
+ }
+
+ IWL_DEBUG_MAC80211(mld, "set hwcrypto key (sta:%pM, id:%d)\n",
+ sta ? sta->addr : NULL, keyidx);
+
+ ret = iwl_mld_add_key(mld, vif, sta, key);
+ if (ret) {
+ IWL_WARN(mld, "set key failed (%d)\n", ret);
+ if (ptk_pn) {
+ RCU_INIT_POINTER(mld_sta->ptk_pn[keyidx], NULL);
+ kfree(ptk_pn);
+ }
+
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void iwl_mld_set_key_remove(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_sta *mld_sta =
+ sta ? iwl_mld_sta_from_mac80211(sta) : NULL;
+ int keyidx = key->keyidx;
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ (keyidx == 6 || keyidx == 7))
+ RCU_INIT_POINTER(mld_vif->bigtks[keyidx - 6], NULL);
+
+ if (mld_sta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
+ (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
+ struct iwl_mld_ptk_pn *ptk_pn;
+
+ if (WARN_ON(keyidx >= ARRAY_SIZE(mld_sta->ptk_pn)))
+ return;
+
+ ptk_pn = wiphy_dereference(mld->wiphy,
+ mld_sta->ptk_pn[keyidx]);
+ RCU_INIT_POINTER(mld_sta->ptk_pn[keyidx], NULL);
+ if (!WARN_ON(!ptk_pn))
+ kfree_rcu(ptk_pn, rcu_head);
+ }
+
+ /* if this key was stored to be added later to the FW - free it here */
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ iwl_mld_free_ap_early_key(mld, key, mld_vif);
+
+ /* We already removed it */
+ if (key->hw_key_idx == STA_KEY_IDX_INVALID)
+ return;
+
+ IWL_DEBUG_MAC80211(mld, "disable hwcrypto key\n");
+
+ iwl_mld_remove_key(mld, vif, sta, key);
+}
+
+static int iwl_mld_mac80211_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ switch (cmd) {
+ case SET_KEY:
+ ret = iwl_mld_set_key_add(mld, vif, sta, key);
+ if (ret)
+ ret = -EOPNOTSUPP;
+ break;
+ case DISABLE_KEY:
+ iwl_mld_set_key_remove(mld, vif, sta, key);
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+iwl_mld_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *mld_link =
+ iwl_mld_link_dereference_check(mld_vif, chsw->link_id);
+ u8 primary;
+ int selected;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ IWL_DEBUG_MAC80211(mld, "pre CSA to freq %d\n",
+ chsw->chandef.center_freq1);
+
+ if (!iwl_mld_emlsr_active(vif))
+ return 0;
+
+ primary = iwl_mld_get_primary_link(vif);
+
+ /* stay on the primary link unless it is undergoing a CSA with quiet */
+ if (chsw->link_id == primary && chsw->block_tx)
+ selected = iwl_mld_get_other_link(vif, primary);
+ else
+ selected = primary;
+
+ /* Remember to tell the firmware that this link can't tx
+ * Note that this logic seems to be unrelated to emlsr, but it
+ * really is needed only when emlsr is active. When we have a
+ * single link, the firmware will handle all this on its own.
+ * In multi-link scenarios, we can learn about the CSA from
+ * another link and this logic is too complex for the firmware
+ * to track.
+ * Since we want to de-activate the link that got a CSA with mode=1,
+ * we need to tell the firmware not to send any frame on that link
+ * as the firmware may not be aware that link is under a CSA
+ * with mode=1 (no Tx allowed).
+ */
+ mld_link->silent_deactivation = chsw->block_tx;
+ iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_CSA, selected);
+
+ return 0;
+}
+
+static void
+iwl_mld_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ /* By implementing this operation, we prevent mac80211 from
+ * starting its own channel switch timer, so that we can call
+ * ieee80211_chswitch_done() ourselves at the right time
+ * (Upon receiving the channel_switch_start notification from the fw)
+ */
+ IWL_DEBUG_MAC80211(mld,
+ "dummy channel switch op\n");
+}
+
+static int
+iwl_mld_post_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ WARN_ON(mld_link->silent_deactivation);
+
+ return 0;
+}
+
+static void
+iwl_mld_abort_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
+
+ IWL_DEBUG_MAC80211(mld,
+ "abort channel switch op\n");
+ mld_link->silent_deactivation = false;
+}
+
+static int
+iwl_mld_switch_vif_chanctx_swap(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ iwl_mld_unassign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].old_ctx);
+ iwl_mld_remove_chanctx(hw, vifs[0].old_ctx);
+
+ ret = iwl_mld_add_chanctx(hw, vifs[0].new_ctx);
+ if (ret) {
+ IWL_ERR(mld, "failed to add new_ctx during channel switch\n");
+ goto out_reassign;
+ }
+
+ ret = iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].new_ctx);
+ if (ret) {
+ IWL_ERR(mld,
+ "failed to assign new_ctx during channel switch\n");
+ goto out_remove;
+ }
+
+ return 0;
+
+ out_remove:
+ iwl_mld_remove_chanctx(hw, vifs[0].new_ctx);
+ out_reassign:
+ if (iwl_mld_add_chanctx(hw, vifs[0].old_ctx)) {
+ IWL_ERR(mld, "failed to add old_ctx after failure\n");
+ return ret;
+ }
+
+ if (iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].old_ctx))
+ IWL_ERR(mld, "failed to reassign old_ctx after failure\n");
+
+ return ret;
+}
+
+static int
+iwl_mld_switch_vif_chanctx_reassign(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ iwl_mld_unassign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].old_ctx);
+ ret = iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].new_ctx);
+ if (ret) {
+ IWL_ERR(mld,
+ "failed to assign new_ctx during channel switch\n");
+ goto out_reassign;
+ }
+
+ return 0;
+
+out_reassign:
+ if (iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].old_ctx))
+ IWL_ERR(mld, "failed to reassign old_ctx after failure\n");
+
+ return ret;
+}
+
+static int
+iwl_mld_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ int ret;
+
+ /* we only support a single-vif right now */
+ if (n_vifs > 1)
+ return -EOPNOTSUPP;
+
+ switch (mode) {
+ case CHANCTX_SWMODE_SWAP_CONTEXTS:
+ ret = iwl_mld_switch_vif_chanctx_swap(hw, vifs);
+ break;
+ case CHANCTX_SWMODE_REASSIGN_VIF:
+ ret = iwl_mld_switch_vif_chanctx_reassign(hw, vifs);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static void iwl_mld_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link_sta *mld_link_sta;
+ u8 link_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* This is called before mac80211 does RCU synchronisation,
+ * so here we already invalidate our internal RCU-protected
+ * station pointer. The rest of the code will thus no longer
+ * be able to find the station this way, and we don't rely
+ * on further RCU synchronisation after the sta_state()
+ * callback deleted the station.
+ */
+ for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
+ RCU_INIT_POINTER(mld->fw_id_to_link_sta[mld_link_sta->fw_id],
+ NULL);
+
+ if (sta == mld_vif->ap_sta)
+ mld_vif->ap_sta = NULL;
+}
+
+static void
+iwl_mld_mac80211_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct ieee80211_bss_conf *link_conf;
+ u32 duration;
+ int ret;
+
+ link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
+ if (WARN_ON_ONCE(!link_conf))
+ return;
+
+ /* Protect the session to hear the TDLS setup response on the channel */
+
+ duration = 2 * link_conf->dtim_period * link_conf->beacon_int;
+
+ ret = iwl_mld_start_session_protection(mld, vif, duration, duration,
+ link_id, HZ / 5);
+ if (ret)
+ IWL_ERR(mld,
+ "Failed to start session protection for TDLS: %d\n",
+ ret);
+}
+
+static bool iwl_mld_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 desired_links)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int n_links = hweight16(desired_links);
+
+ /* Check if HW supports the wanted number of links */
+ return n_links <= iwl_mld_max_active_links(mld, vif);
+}
+
+static int
+iwl_mld_change_vif_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct ieee80211_bss_conf *link_conf;
+ u16 removed = old_links & ~new_links;
+ u16 added = new_links & ~old_links;
+ int err;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /*
+ * No bits designate non-MLO mode. We can handle MLO exit/enter by
+ * simply mapping that to link ID zero internally.
+ * Note that mac80211 does such a non-MLO to MLO switch during restart
+ * if it was in MLO before. In that case, we do not have a link to
+ * remove.
+ */
+ if (old_links == 0 && !mld->fw_status.in_hw_restart)
+ removed |= BIT(0);
+
+ if (new_links == 0)
+ added |= BIT(0);
+
+ for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ if (removed & BIT(i))
+ iwl_mld_remove_link(mld, old[i]);
+ }
+
+ for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ if (added & BIT(i)) {
+ link_conf = link_conf_dereference_protected(vif, i);
+ if (WARN_ON(!link_conf))
+ return -EINVAL;
+
+ err = iwl_mld_add_link(mld, link_conf);
+ if (err)
+ goto remove_added_links;
+ }
+ }
+
+ /*
+ * Ensure we always have a valid primary_link. When using multiple
+ * links the proper value is set in assign_vif_chanctx.
+ */
+ mld_vif->emlsr.primary = new_links ? __ffs(new_links) : 0;
+
+ /*
+ * Special MLO restart case. We did not have a link when the interface
+ * was added, so do the power configuration now.
+ */
+ if (old_links == 0 && mld->fw_status.in_hw_restart)
+ iwl_mld_update_mac_power(mld, vif, false);
+
+ return 0;
+
+remove_added_links:
+ for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ if (!(added & BIT(i)))
+ continue;
+
+ link_conf = link_conf_dereference_protected(vif, i);
+ if (!link_conf || !iwl_mld_link_from_mac80211(link_conf))
+ continue;
+
+ iwl_mld_remove_link(mld, link_conf);
+ }
+
+ return err;
+}
+
+static int iwl_mld_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ return iwl_mld_update_link_stas(mld, vif, sta, old_links, new_links);
+}
+
+static int iwl_mld_mac80211_join_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ return iwl_mld_start_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
+static void iwl_mld_mac80211_leave_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ return iwl_mld_stop_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
+static int iwl_mld_mac80211_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ return mld->ibss_manager;
+}
+
+#define IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT (5 * HZ)
+
+static void iwl_mld_vif_iter_emlsr_block_tmp_non_bss(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ if (!iwl_mld_vif_has_emlsr_cap(vif))
+ return;
+
+ ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS,
+ iwl_mld_get_primary_link(vif));
+ if (ret)
+ return;
+
+ wiphy_delayed_work_queue(mld_vif->mld->wiphy,
+ &mld_vif->emlsr.tmp_non_bss_done_wk,
+ IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT);
+}
+
+static void iwl_mld_prep_add_interface(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ IWL_DEBUG_MAC80211(mld, "prep_add_interface: type=%u\n", type);
+
+ if (!(type == NL80211_IFTYPE_AP ||
+ type == NL80211_IFTYPE_P2P_GO ||
+ type == NL80211_IFTYPE_P2P_CLIENT))
+ return;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_emlsr_block_tmp_non_bss,
+ NULL);
+}
+
+static int iwl_mld_set_hw_timestamp(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_set_hw_timestamp *hwts)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ u32 protocols = 0;
+
+ /* HW timestamping is only supported for a specific station */
+ if (!hwts->macaddr)
+ return -EOPNOTSUPP;
+
+ if (hwts->enable)
+ protocols =
+ IWL_TIME_SYNC_PROTOCOL_TM | IWL_TIME_SYNC_PROTOCOL_FTM;
+
+ return iwl_mld_time_sync_config(mld, hwts->macaddr, protocols);
+}
+
+static int iwl_mld_start_pmsr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ return iwl_mld_ftm_start(mld, vif, request);
+}
+
+const struct ieee80211_ops iwl_mld_hw_ops = {
+ .tx = iwl_mld_mac80211_tx,
+ .start = iwl_mld_mac80211_start,
+ .stop = iwl_mld_mac80211_stop,
+ .config = iwl_mld_mac80211_config,
+ .add_interface = iwl_mld_mac80211_add_interface,
+ .remove_interface = iwl_mld_mac80211_remove_interface,
+ .conf_tx = iwl_mld_mac80211_conf_tx,
+ .prepare_multicast = iwl_mld_mac80211_prepare_multicast,
+ .configure_filter = iwl_mld_mac80211_configure_filter,
+ .reconfig_complete = iwl_mld_mac80211_reconfig_complete,
+ .wake_tx_queue = iwl_mld_mac80211_wake_tx_queue,
+ .add_chanctx = iwl_mld_add_chanctx,
+ .remove_chanctx = iwl_mld_remove_chanctx,
+ .change_chanctx = iwl_mld_change_chanctx,
+ .assign_vif_chanctx = iwl_mld_assign_vif_chanctx,
+ .unassign_vif_chanctx = iwl_mld_unassign_vif_chanctx,
+ .set_rts_threshold = iwl_mld_mac80211_set_rts_threshold,
+ .link_info_changed = iwl_mld_mac80211_link_info_changed,
+ .vif_cfg_changed = iwl_mld_mac80211_vif_cfg_changed,
+ .set_key = iwl_mld_mac80211_set_key,
+ .hw_scan = iwl_mld_mac80211_hw_scan,
+ .cancel_hw_scan = iwl_mld_mac80211_cancel_hw_scan,
+ .sched_scan_start = iwl_mld_mac80211_sched_scan_start,
+ .sched_scan_stop = iwl_mld_mac80211_sched_scan_stop,
+ .mgd_prepare_tx = iwl_mld_mac80211_mgd_prepare_tx,
+ .mgd_complete_tx = iwl_mld_mac_mgd_complete_tx,
+ .sta_state = iwl_mld_mac80211_sta_state,
+ .sta_statistics = iwl_mld_mac80211_sta_statistics,
+ .flush = iwl_mld_mac80211_flush,
+ .flush_sta = iwl_mld_mac80211_flush_sta,
+ .ampdu_action = iwl_mld_mac80211_ampdu_action,
+ .can_aggregate_in_amsdu = iwl_mld_mac80211_can_aggregate,
+ .sync_rx_queues = iwl_mld_mac80211_sync_rx_queues,
+ .link_sta_rc_update = iwl_mld_sta_rc_update,
+ .start_ap = iwl_mld_start_ap_ibss,
+ .stop_ap = iwl_mld_stop_ap_ibss,
+ .pre_channel_switch = iwl_mld_pre_channel_switch,
+ .channel_switch = iwl_mld_channel_switch,
+ .post_channel_switch = iwl_mld_post_channel_switch,
+ .abort_channel_switch = iwl_mld_abort_channel_switch,
+ .switch_vif_chanctx = iwl_mld_switch_vif_chanctx,
+ .sta_pre_rcu_remove = iwl_mld_sta_pre_rcu_remove,
+ .remain_on_channel = iwl_mld_start_roc,
+ .cancel_remain_on_channel = iwl_mld_cancel_roc,
+ .can_activate_links = iwl_mld_can_activate_links,
+ .change_vif_links = iwl_mld_change_vif_links,
+ .change_sta_links = iwl_mld_change_sta_links,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = iwl_mld_suspend,
+ .resume = iwl_mld_resume,
+ .set_wakeup = iwl_mld_set_wakeup,
+ .set_rekey_data = iwl_mld_set_rekey_data,
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = iwl_mld_ipv6_addr_change,
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+#endif /* CONFIG_PM_SLEEP */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .vif_add_debugfs = iwl_mld_add_vif_debugfs,
+ .link_add_debugfs = iwl_mld_add_link_debugfs,
+ .link_sta_add_debugfs = iwl_mld_add_link_sta_debugfs,
+#endif
+ .mgd_protect_tdls_discover = iwl_mld_mac80211_mgd_protect_tdls_discover,
+ .join_ibss = iwl_mld_mac80211_join_ibss,
+ .leave_ibss = iwl_mld_mac80211_leave_ibss,
+ .tx_last_beacon = iwl_mld_mac80211_tx_last_beacon,
+ .prep_add_interface = iwl_mld_prep_add_interface,
+ .set_hw_timestamp = iwl_mld_set_hw_timestamp,
+ .start_pmsr = iwl_mld_start_pmsr,
+};
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.h b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.h
new file mode 100644
index 000000000000..aad04d7b2617
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_mac80211_h__
+#define __iwl_mld_mac80211_h__
+
+#include "mld.h"
+
+int iwl_mld_register_hw(struct iwl_mld *mld);
+void iwl_mld_recalc_multicast_filter(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_mac80211_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mcc.c b/drivers/net/wireless/intel/iwlwifi/mld/mcc.c
new file mode 100644
index 000000000000..daca14e208bd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mcc.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+
+#include <fw/dbg.h>
+#include <iwl-nvm-parse.h>
+
+#include "mld.h"
+#include "hcmd.h"
+#include "mcc.h"
+
+/* It is the caller's responsibility to free the pointer returned here */
+static struct iwl_mcc_update_resp_v8 *
+iwl_mld_parse_mcc_update_resp_v8(const struct iwl_rx_packet *pkt)
+{
+ const struct iwl_mcc_update_resp_v8 *mcc_resp_v8 = (const void *)pkt->data;
+ int n_channels = __le32_to_cpu(mcc_resp_v8->n_channels);
+ struct iwl_mcc_update_resp_v8 *resp_cp;
+ int notif_len = struct_size(resp_cp, channels, n_channels);
+
+ if (iwl_rx_packet_payload_len(pkt) != notif_len)
+ return ERR_PTR(-EINVAL);
+
+ resp_cp = kmemdup(mcc_resp_v8, notif_len, GFP_KERNEL);
+ if (!resp_cp)
+ return ERR_PTR(-ENOMEM);
+
+ return resp_cp;
+}
+
+/* It is the caller's responsibility to free the pointer returned here */
+static struct iwl_mcc_update_resp_v8 *
+iwl_mld_parse_mcc_update_resp_v5_v6(const struct iwl_rx_packet *pkt)
+{
+ const struct iwl_mcc_update_resp_v4 *mcc_resp_v4 = (const void *)pkt->data;
+ struct iwl_mcc_update_resp_v8 *resp_cp;
+ int n_channels = __le32_to_cpu(mcc_resp_v4->n_channels);
+ int resp_len;
+
+ if (iwl_rx_packet_payload_len(pkt) !=
+ struct_size(mcc_resp_v4, channels, n_channels))
+ return ERR_PTR(-EINVAL);
+
+ resp_len = struct_size(resp_cp, channels, n_channels);
+ resp_cp = kzalloc(resp_len, GFP_KERNEL);
+ if (!resp_cp)
+ return ERR_PTR(-ENOMEM);
+
+ resp_cp->status = mcc_resp_v4->status;
+ resp_cp->mcc = mcc_resp_v4->mcc;
+ resp_cp->cap = cpu_to_le32(le16_to_cpu(mcc_resp_v4->cap));
+ resp_cp->source_id = mcc_resp_v4->source_id;
+ resp_cp->geo_info = mcc_resp_v4->geo_info;
+ resp_cp->n_channels = mcc_resp_v4->n_channels;
+ memcpy(resp_cp->channels, mcc_resp_v4->channels,
+ n_channels * sizeof(__le32));
+
+ return resp_cp;
+}
+
+/* It is the caller's responsibility to free the pointer returned here */
+static struct iwl_mcc_update_resp_v8 *
+iwl_mld_update_mcc(struct iwl_mld *mld, const char *alpha2,
+ enum iwl_mcc_source src_id)
+{
+ int resp_ver = iwl_fw_lookup_notif_ver(mld->fw, LONG_GROUP,
+ MCC_UPDATE_CMD, 0);
+ struct iwl_mcc_update_cmd mcc_update_cmd = {
+ .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
+ .source_id = (u8)src_id,
+ };
+ struct iwl_mcc_update_resp_v8 *resp_cp;
+ struct iwl_rx_packet *pkt;
+ struct iwl_host_cmd cmd = {
+ .id = MCC_UPDATE_CMD,
+ .flags = CMD_WANT_SKB,
+ .data = { &mcc_update_cmd },
+ .len[0] = sizeof(mcc_update_cmd),
+ };
+ int ret;
+ u16 mcc;
+
+ IWL_DEBUG_LAR(mld, "send MCC update to FW with '%c%c' src = %d\n",
+ alpha2[0], alpha2[1], src_id);
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ if (ret)
+ return ERR_PTR(ret);
+
+ pkt = cmd.resp_pkt;
+
+ /* For Wifi-7 radios, we get version 8
+ * For Wifi-6E radios, we get version 6
+ * For Wifi-6 radios, we get version 5, but 5, 6, and 4 are compatible.
+ */
+ switch (resp_ver) {
+ case 5:
+ case 6:
+ resp_cp = iwl_mld_parse_mcc_update_resp_v5_v6(pkt);
+ break;
+ case 8:
+ resp_cp = iwl_mld_parse_mcc_update_resp_v8(pkt);
+ break;
+ default:
+ IWL_FW_CHECK_FAILED(mld, "Unknown MCC_UPDATE_CMD version %d\n", resp_ver);
+ resp_cp = ERR_PTR(-EINVAL);
+ }
+
+ if (IS_ERR(resp_cp))
+ goto exit;
+
+ mcc = le16_to_cpu(resp_cp->mcc);
+
+ IWL_FW_CHECK(mld, !mcc, "mcc can't be 0: %d\n", mcc);
+
+ IWL_DEBUG_LAR(mld,
+ "MCC response status: 0x%x. new MCC: 0x%x ('%c%c')\n",
+ le32_to_cpu(resp_cp->status), mcc, mcc >> 8, mcc & 0xff);
+
+exit:
+ iwl_free_resp(&cmd);
+ return resp_cp;
+}
+
+/* It is the caller's responsibility to free the pointer returned here */
+struct ieee80211_regdomain *
+iwl_mld_get_regdomain(struct iwl_mld *mld,
+ const char *alpha2,
+ enum iwl_mcc_source src_id,
+ bool *changed)
+{
+ struct ieee80211_regdomain *regd = NULL;
+ struct iwl_mcc_update_resp_v8 *resp;
+ u8 resp_ver = iwl_fw_lookup_notif_ver(mld->fw, IWL_ALWAYS_LONG_GROUP,
+ MCC_UPDATE_CMD, 0);
+
+ IWL_DEBUG_LAR(mld, "Getting regdomain data for %s from FW\n", alpha2);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ resp = iwl_mld_update_mcc(mld, alpha2, src_id);
+ if (IS_ERR(resp)) {
+ IWL_DEBUG_LAR(mld, "Could not get update from FW %ld\n",
+ PTR_ERR(resp));
+ resp = NULL;
+ goto out;
+ }
+
+ if (changed) {
+ u32 status = le32_to_cpu(resp->status);
+
+ *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
+ status == MCC_RESP_ILLEGAL);
+ }
+ IWL_DEBUG_LAR(mld, "MCC update response version: %d\n", resp_ver);
+
+ regd = iwl_parse_nvm_mcc_info(mld->trans->dev, mld->cfg,
+ __le32_to_cpu(resp->n_channels),
+ resp->channels,
+ __le16_to_cpu(resp->mcc),
+ __le16_to_cpu(resp->geo_info),
+ le32_to_cpu(resp->cap), resp_ver);
+
+ if (IS_ERR(regd)) {
+ IWL_DEBUG_LAR(mld, "Could not get parse update from FW %ld\n",
+ PTR_ERR(regd));
+ goto out;
+ }
+
+ IWL_DEBUG_LAR(mld, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
+ regd->alpha2, regd->alpha2[0],
+ regd->alpha2[1], resp->source_id);
+
+ mld->mcc_src = resp->source_id;
+
+ if (!iwl_puncturing_is_allowed_in_bios(mld->bios_enable_puncturing,
+ le16_to_cpu(resp->mcc)))
+ ieee80211_hw_set(mld->hw, DISALLOW_PUNCTURING);
+ else
+ __clear_bit(IEEE80211_HW_DISALLOW_PUNCTURING, mld->hw->flags);
+
+out:
+ kfree(resp);
+ return regd;
+}
+
+/* It is the caller's responsibility to free the pointer returned here */
+static struct ieee80211_regdomain *
+iwl_mld_get_current_regdomain(struct iwl_mld *mld,
+ bool *changed)
+{
+ return iwl_mld_get_regdomain(mld, "ZZ",
+ MCC_SOURCE_GET_CURRENT, changed);
+}
+
+void iwl_mld_update_changed_regdomain(struct iwl_mld *mld)
+{
+ struct ieee80211_regdomain *regd;
+ bool changed;
+
+ regd = iwl_mld_get_current_regdomain(mld, &changed);
+
+ if (IS_ERR_OR_NULL(regd))
+ return;
+
+ if (changed)
+ regulatory_set_wiphy_regd(mld->wiphy, regd);
+ kfree(regd);
+}
+
+static int iwl_mld_apply_last_mcc(struct iwl_mld *mld,
+ const char *alpha2)
+{
+ struct ieee80211_regdomain *regd;
+ u32 used_src;
+ bool changed;
+ int ret;
+
+ /* save the last source in case we overwrite it below */
+ used_src = mld->mcc_src;
+
+ /* Notify the firmware we support wifi location updates */
+ regd = iwl_mld_get_current_regdomain(mld, NULL);
+ if (!IS_ERR_OR_NULL(regd))
+ kfree(regd);
+
+ /* Now set our last stored MCC and source */
+ regd = iwl_mld_get_regdomain(mld, alpha2, used_src,
+ &changed);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+
+ /* update cfg80211 if the regdomain was changed */
+ if (changed)
+ ret = regulatory_set_wiphy_regd_sync(mld->wiphy, regd);
+ else
+ ret = 0;
+
+ kfree(regd);
+ return ret;
+}
+
+int iwl_mld_init_mcc(struct iwl_mld *mld)
+{
+ const struct ieee80211_regdomain *r;
+ struct ieee80211_regdomain *regd;
+ char mcc[3];
+ int retval;
+
+ /* try to replay the last set MCC to FW */
+ r = wiphy_dereference(mld->wiphy, mld->wiphy->regd);
+
+ if (r)
+ return iwl_mld_apply_last_mcc(mld, r->alpha2);
+
+ regd = iwl_mld_get_current_regdomain(mld, NULL);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+
+ if (!iwl_bios_get_mcc(&mld->fwrt, mcc)) {
+ kfree(regd);
+ regd = iwl_mld_get_regdomain(mld, mcc, MCC_SOURCE_BIOS, NULL);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+ }
+
+ retval = regulatory_set_wiphy_regd_sync(mld->wiphy, regd);
+
+ kfree(regd);
+ return retval;
+}
+
+static void iwl_mld_find_assoc_vif_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ bool *assoc = data;
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ vif->cfg.assoc)
+ *assoc = true;
+}
+
+static bool iwl_mld_is_a_vif_assoc(struct iwl_mld *mld)
+{
+ bool assoc = false;
+
+ ieee80211_iterate_active_interfaces_atomic(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_find_assoc_vif_iterator,
+ &assoc);
+ return assoc;
+}
+
+void iwl_mld_handle_update_mcc(struct iwl_mld *mld, struct iwl_rx_packet *pkt)
+{
+ struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
+ enum iwl_mcc_source src;
+ char mcc[3];
+ struct ieee80211_regdomain *regd;
+ bool changed;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (iwl_mld_is_a_vif_assoc(mld) &&
+ notif->source_id == MCC_SOURCE_WIFI) {
+ IWL_DEBUG_LAR(mld, "Ignore mcc update while associated\n");
+ return;
+ }
+
+ mcc[0] = le16_to_cpu(notif->mcc) >> 8;
+ mcc[1] = le16_to_cpu(notif->mcc) & 0xff;
+ mcc[2] = '\0';
+ src = notif->source_id;
+
+ IWL_DEBUG_LAR(mld,
+ "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
+ mcc, src);
+ regd = iwl_mld_get_regdomain(mld, mcc, src, &changed);
+ if (IS_ERR_OR_NULL(regd))
+ return;
+
+ if (changed)
+ regulatory_set_wiphy_regd(mld->hw->wiphy, regd);
+ kfree(regd);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mcc.h b/drivers/net/wireless/intel/iwlwifi/mld/mcc.h
new file mode 100644
index 000000000000..2b31e5b5e2ed
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mcc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_mcc_h__
+#define __iwl_mld_mcc_h__
+
+int iwl_mld_init_mcc(struct iwl_mld *mld);
+void iwl_mld_handle_update_mcc(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
+void iwl_mld_update_changed_regdomain(struct iwl_mld *mld);
+struct ieee80211_regdomain *
+iwl_mld_get_regdomain(struct iwl_mld *mld,
+ const char *alpha2,
+ enum iwl_mcc_source src_id,
+ bool *changed);
+
+#endif /* __iwl_mld_mcc_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
new file mode 100644
index 000000000000..d4a99ae64074
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <linux/rtnetlink.h>
+#include <net/mac80211.h>
+
+#include "fw/api/rx.h"
+#include "fw/api/datapath.h"
+#include "fw/api/commands.h"
+#include "fw/api/offload.h"
+#include "fw/api/coex.h"
+#include "fw/dbg.h"
+#include "fw/uefi.h"
+
+#include "mld.h"
+#include "mlo.h"
+#include "mac80211.h"
+#include "led.h"
+#include "scan.h"
+#include "tx.h"
+#include "sta.h"
+#include "regulatory.h"
+#include "thermal.h"
+#include "low_latency.h"
+#include "hcmd.h"
+#include "fw/api/location.h"
+
+#define DRV_DESCRIPTION "Intel(R) MLD wireless driver for Linux"
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IWLWIFI");
+
+static const struct iwl_op_mode_ops iwl_mld_ops;
+
+static int __init iwl_mld_init(void)
+{
+ int ret = iwl_opmode_register("iwlmld", &iwl_mld_ops);
+
+ if (ret)
+ pr_err("Unable to register MLD op_mode: %d\n", ret);
+
+ return ret;
+}
+module_init(iwl_mld_init);
+
+static void __exit iwl_mld_exit(void)
+{
+ iwl_opmode_deregister("iwlmld");
+}
+module_exit(iwl_mld_exit);
+
+static void iwl_mld_hw_set_regulatory(struct iwl_mld *mld)
+{
+ struct wiphy *wiphy = mld->wiphy;
+
+ wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
+ wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
+}
+
+VISIBLE_IF_IWLWIFI_KUNIT
+void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
+ const struct iwl_cfg *cfg, const struct iwl_fw *fw,
+ struct ieee80211_hw *hw, struct dentry *dbgfs_dir)
+{
+ mld->dev = trans->dev;
+ mld->trans = trans;
+ mld->cfg = cfg;
+ mld->fw = fw;
+ mld->hw = hw;
+ mld->wiphy = hw->wiphy;
+ mld->debugfs_dir = dbgfs_dir;
+
+ iwl_notification_wait_init(&mld->notif_wait);
+
+ /* Setup async RX handling */
+ spin_lock_init(&mld->async_handlers_lock);
+ wiphy_work_init(&mld->async_handlers_wk,
+ iwl_mld_async_handlers_wk);
+
+ /* Dynamic Queue Allocation */
+ spin_lock_init(&mld->add_txqs_lock);
+ INIT_LIST_HEAD(&mld->txqs_to_add);
+ wiphy_work_init(&mld->add_txqs_wk, iwl_mld_add_txqs_wk);
+
+ /* Setup RX queues sync wait queue */
+ init_waitqueue_head(&mld->rxq_sync.waitq);
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_construct_mld);
+
+static void __acquires(&mld->wiphy->mtx)
+iwl_mld_fwrt_dump_start(void *ctx)
+{
+ struct iwl_mld *mld = ctx;
+
+ wiphy_lock(mld->wiphy);
+}
+
+static void __releases(&mld->wiphy->mtx)
+iwl_mld_fwrt_dump_end(void *ctx)
+{
+ struct iwl_mld *mld = ctx;
+
+ wiphy_unlock(mld->wiphy);
+}
+
+static bool iwl_mld_d3_debug_enable(void *ctx)
+{
+ return IWL_MLD_D3_DEBUG;
+}
+
+static int iwl_mld_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
+{
+ struct iwl_mld *mld = (struct iwl_mld *)ctx;
+ int ret;
+
+ wiphy_lock(mld->wiphy);
+ ret = iwl_mld_send_cmd(mld, host_cmd);
+ wiphy_unlock(mld->wiphy);
+
+ return ret;
+}
+
+static const struct iwl_fw_runtime_ops iwl_mld_fwrt_ops = {
+ .dump_start = iwl_mld_fwrt_dump_start,
+ .dump_end = iwl_mld_fwrt_dump_end,
+ .send_hcmd = iwl_mld_fwrt_send_hcmd,
+ .d3_debug_enable = iwl_mld_d3_debug_enable,
+};
+
+static void
+iwl_mld_construct_fw_runtime(struct iwl_mld *mld, struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ struct dentry *debugfs_dir)
+{
+ iwl_fw_runtime_init(&mld->fwrt, trans, fw, &iwl_mld_fwrt_ops, mld,
+ NULL, NULL, debugfs_dir);
+
+ iwl_fw_set_current_image(&mld->fwrt, IWL_UCODE_REGULAR);
+}
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_legacy_names[] = {
+ HCMD_NAME(UCODE_ALIVE_NTFY),
+ HCMD_NAME(INIT_COMPLETE_NOTIF),
+ HCMD_NAME(PHY_CONTEXT_CMD),
+ HCMD_NAME(SCAN_CFG_CMD),
+ HCMD_NAME(SCAN_REQ_UMAC),
+ HCMD_NAME(SCAN_ABORT_UMAC),
+ HCMD_NAME(SCAN_COMPLETE_UMAC),
+ HCMD_NAME(TX_CMD),
+ HCMD_NAME(TXPATH_FLUSH),
+ HCMD_NAME(LEDS_CMD),
+ HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION),
+ HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION),
+ HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
+ HCMD_NAME(POWER_TABLE_CMD),
+ HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
+ HCMD_NAME(BEACON_NOTIFICATION),
+ HCMD_NAME(BEACON_TEMPLATE_CMD),
+ HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
+ HCMD_NAME(REDUCE_TX_POWER_CMD),
+ HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
+ HCMD_NAME(MAC_PM_POWER_TABLE),
+ HCMD_NAME(MFUART_LOAD_NOTIFICATION),
+ HCMD_NAME(RSS_CONFIG_CMD),
+ HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
+ HCMD_NAME(REPLY_RX_MPDU_CMD),
+ HCMD_NAME(BA_NOTIF),
+ HCMD_NAME(MCC_UPDATE_CMD),
+ HCMD_NAME(MCC_CHUB_UPDATE_CMD),
+ HCMD_NAME(MCAST_FILTER_CMD),
+ HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
+ HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
+ HCMD_NAME(MATCH_FOUND_NOTIFICATION),
+ HCMD_NAME(WOWLAN_PATTERNS),
+ HCMD_NAME(WOWLAN_CONFIGURATION),
+ HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
+ HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
+ HCMD_NAME(DEBUG_HOST_COMMAND),
+ HCMD_NAME(LDBG_CONFIG_CMD),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_system_names[] = {
+ HCMD_NAME(SHARED_MEM_CFG_CMD),
+ HCMD_NAME(SOC_CONFIGURATION_CMD),
+ HCMD_NAME(INIT_EXTENDED_CFG_CMD),
+ HCMD_NAME(FW_ERROR_RECOVERY_CMD),
+ HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
+ HCMD_NAME(SYSTEM_STATISTICS_CMD),
+ HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_reg_and_nvm_names[] = {
+ HCMD_NAME(LARI_CONFIG_CHANGE),
+ HCMD_NAME(NVM_GET_INFO),
+ HCMD_NAME(TAS_CONFIG),
+ HCMD_NAME(SAR_OFFSET_MAPPING_TABLE_CMD),
+ HCMD_NAME(MCC_ALLOWED_AP_TYPE_CMD),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_debug_names[] = {
+ HCMD_NAME(HOST_EVENT_CFG),
+ HCMD_NAME(DBGC_SUSPEND_RESUME),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_mac_conf_names[] = {
+ HCMD_NAME(LOW_LATENCY_CMD),
+ HCMD_NAME(SESSION_PROTECTION_CMD),
+ HCMD_NAME(MAC_CONFIG_CMD),
+ HCMD_NAME(LINK_CONFIG_CMD),
+ HCMD_NAME(STA_CONFIG_CMD),
+ HCMD_NAME(AUX_STA_CMD),
+ HCMD_NAME(STA_REMOVE_CMD),
+ HCMD_NAME(ROC_CMD),
+ HCMD_NAME(MISSED_BEACONS_NOTIF),
+ HCMD_NAME(EMLSR_TRANS_FAIL_NOTIF),
+ HCMD_NAME(ROC_NOTIF),
+ HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF),
+ HCMD_NAME(SESSION_PROTECTION_NOTIF),
+ HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF),
+ HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_data_path_names[] = {
+ HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+ HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD),
+ HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
+ HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
+ HCMD_NAME(TLC_MNG_CONFIG_CMD),
+ HCMD_NAME(RX_BAID_ALLOCATION_CONFIG_CMD),
+ HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
+ HCMD_NAME(OMI_SEND_STATUS_NOTIF),
+ HCMD_NAME(ESR_MODE_NOTIF),
+ HCMD_NAME(MONITOR_NOTIF),
+ HCMD_NAME(TLC_MNG_UPDATE_NOTIF),
+ HCMD_NAME(MU_GROUP_MGMT_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_location_names[] = {
+ HCMD_NAME(TOF_RANGE_REQ_CMD),
+ HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_phy_names[] = {
+ HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
+ HCMD_NAME(CTDP_CONFIG_CMD),
+ HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
+ HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
+ HCMD_NAME(CT_KILL_NOTIFICATION),
+ HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_statistics_names[] = {
+ HCMD_NAME(STATISTICS_OPER_NOTIF),
+ HCMD_NAME(STATISTICS_OPER_PART1_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_prot_offload_names[] = {
+ HCMD_NAME(STORED_BEACON_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_coex_names[] = {
+ HCMD_NAME(PROFILE_NOTIF),
+};
+
+VISIBLE_IF_IWLWIFI_KUNIT
+const struct iwl_hcmd_arr iwl_mld_groups[] = {
+ [LEGACY_GROUP] = HCMD_ARR(iwl_mld_legacy_names),
+ [LONG_GROUP] = HCMD_ARR(iwl_mld_legacy_names),
+ [SYSTEM_GROUP] = HCMD_ARR(iwl_mld_system_names),
+ [MAC_CONF_GROUP] = HCMD_ARR(iwl_mld_mac_conf_names),
+ [DATA_PATH_GROUP] = HCMD_ARR(iwl_mld_data_path_names),
+ [LOCATION_GROUP] = HCMD_ARR(iwl_mld_location_names),
+ [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mld_reg_and_nvm_names),
+ [DEBUG_GROUP] = HCMD_ARR(iwl_mld_debug_names),
+ [PHY_OPS_GROUP] = HCMD_ARR(iwl_mld_phy_names),
+ [STATISTICS_GROUP] = HCMD_ARR(iwl_mld_statistics_names),
+ [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mld_prot_offload_names),
+ [BT_COEX_GROUP] = HCMD_ARR(iwl_mld_coex_names),
+};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_groups);
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int global_iwl_mld_goups_size = ARRAY_SIZE(iwl_mld_groups);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(global_iwl_mld_goups_size);
+#endif
+
+static void
+iwl_mld_configure_trans(struct iwl_op_mode *op_mode)
+{
+ const struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ static const u8 no_reclaim_cmds[] = {TX_CMD};
+ struct iwl_trans_config trans_cfg = {
+ .op_mode = op_mode,
+ /* Rx is not supported yet, but add it to avoid warnings */
+ .rx_buf_size = iwl_amsdu_size_to_rxb_size(),
+ .command_groups = iwl_mld_groups,
+ .command_groups_size = ARRAY_SIZE(iwl_mld_groups),
+ .fw_reset_handshake = true,
+ .queue_alloc_cmd_ver =
+ iwl_fw_lookup_cmd_ver(mld->fw,
+ WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD),
+ 0),
+ .no_reclaim_cmds = no_reclaim_cmds,
+ .n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds),
+ .cb_data_offs = offsetof(struct ieee80211_tx_info,
+ driver_data[2]),
+ };
+ struct iwl_trans *trans = mld->trans;
+
+ trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+ trans->iml = mld->fw->iml;
+ trans->iml_len = mld->fw->iml_len;
+ trans->wide_cmd_header = true;
+
+ iwl_trans_configure(trans, &trans_cfg);
+}
+
+/*
+ *****************************************************
+ * op mode ops functions
+ *****************************************************
+ */
+
+#define NUM_FW_LOAD_RETRIES 3
+static struct iwl_op_mode *
+iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_fw *fw, struct dentry *dbgfs_dir)
+{
+ struct ieee80211_hw *hw;
+ struct iwl_op_mode *op_mode;
+ struct iwl_mld *mld;
+ u32 eckv_value;
+ int ret;
+
+ /* Allocate and initialize a new hardware device */
+ hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
+ sizeof(struct iwl_mld),
+ &iwl_mld_hw_ops);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
+
+ op_mode = hw->priv;
+
+ op_mode->ops = &iwl_mld_ops;
+
+ mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ iwl_construct_mld(mld, trans, cfg, fw, hw, dbgfs_dir);
+
+ iwl_mld_construct_fw_runtime(mld, trans, fw, dbgfs_dir);
+
+ iwl_mld_get_bios_tables(mld);
+ iwl_uefi_get_sgom_table(trans, &mld->fwrt);
+ iwl_uefi_get_step_table(trans);
+ if (iwl_bios_get_eckv(&mld->fwrt, &eckv_value))
+ IWL_DEBUG_RADIO(mld, "ECKV table doesn't exist in BIOS\n");
+ else
+ trans->ext_32khz_clock_valid = !!eckv_value;
+ iwl_bios_setup_step(trans, &mld->fwrt);
+ mld->bios_enable_puncturing = iwl_uefi_get_puncturing(&mld->fwrt);
+
+ iwl_mld_hw_set_regulatory(mld);
+
+ /* Configure transport layer with the opmode specific params */
+ iwl_mld_configure_trans(op_mode);
+
+ /* needed for regulatory init */
+ rtnl_lock();
+ /* Needed for sending commands */
+ wiphy_lock(mld->wiphy);
+
+ for (int i = 0; i < NUM_FW_LOAD_RETRIES; i++) {
+ ret = iwl_mld_load_fw(mld);
+ if (!ret)
+ break;
+ }
+
+ if (ret) {
+ wiphy_unlock(mld->wiphy);
+ rtnl_unlock();
+ iwl_fw_flush_dumps(&mld->fwrt);
+ goto free_hw;
+ }
+
+ iwl_mld_stop_fw(mld);
+
+ wiphy_unlock(mld->wiphy);
+ rtnl_unlock();
+
+ ret = iwl_mld_leds_init(mld);
+ if (ret)
+ goto free_nvm;
+
+ ret = iwl_mld_alloc_scan_cmd(mld);
+ if (ret)
+ goto leds_exit;
+
+ ret = iwl_mld_low_latency_init(mld);
+ if (ret)
+ goto free_scan_cmd;
+
+ ret = iwl_mld_register_hw(mld);
+ if (ret)
+ goto low_latency_free;
+
+ iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant);
+
+ iwl_mld_add_debugfs_files(mld, dbgfs_dir);
+ iwl_mld_thermal_initialize(mld);
+
+ iwl_mld_ptp_init(mld);
+
+ return op_mode;
+
+low_latency_free:
+ iwl_mld_low_latency_free(mld);
+free_scan_cmd:
+ kfree(mld->scan.cmd);
+leds_exit:
+ iwl_mld_leds_exit(mld);
+free_nvm:
+ kfree(mld->nvm_data);
+free_hw:
+ ieee80211_free_hw(mld->hw);
+ return ERR_PTR(ret);
+}
+
+static void
+iwl_op_mode_mld_stop(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ iwl_mld_ptp_remove(mld);
+ iwl_mld_leds_exit(mld);
+
+ wiphy_lock(mld->wiphy);
+ iwl_mld_thermal_exit(mld);
+ iwl_mld_low_latency_stop(mld);
+ iwl_mld_deinit_time_sync(mld);
+ wiphy_unlock(mld->wiphy);
+
+ ieee80211_unregister_hw(mld->hw);
+
+ iwl_fw_runtime_free(&mld->fwrt);
+ iwl_mld_low_latency_free(mld);
+
+ iwl_trans_op_mode_leave(mld->trans);
+
+ kfree(mld->nvm_data);
+ kfree(mld->scan.cmd);
+ kfree(mld->error_recovery_buf);
+ kfree(mld->mcast_filter_cmd);
+
+ ieee80211_free_hw(mld->hw);
+}
+
+static void iwl_mld_queue_state_change(struct iwl_op_mode *op_mode,
+ int hw_queue, bool queue_full)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ struct ieee80211_txq *txq;
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_txq *mld_txq;
+
+ rcu_read_lock();
+
+ txq = rcu_dereference(mld->fw_id_to_txq[hw_queue]);
+ if (!txq) {
+ rcu_read_unlock();
+
+ if (queue_full) {
+ /* An internal queue is not expected to become full */
+ IWL_WARN(mld,
+ "Internal hw_queue %d is full! stopping all queues\n",
+ hw_queue);
+ /* Stop all queues, as an internal queue is not
+ * mapped to a mac80211 one
+ */
+ ieee80211_stop_queues(mld->hw);
+ } else {
+ ieee80211_wake_queues(mld->hw);
+ }
+
+ return;
+ }
+
+ mld_txq = iwl_mld_txq_from_mac80211(txq);
+ mld_sta = txq->sta ? iwl_mld_sta_from_mac80211(txq->sta) : NULL;
+
+ mld_txq->status.stop_full = queue_full;
+
+ if (!queue_full && mld_sta &&
+ mld_sta->sta_state != IEEE80211_STA_NOTEXIST) {
+ local_bh_disable();
+ iwl_mld_tx_from_txq(mld, txq);
+ local_bh_enable();
+ }
+
+ rcu_read_unlock();
+}
+
+static void
+iwl_mld_queue_full(struct iwl_op_mode *op_mode, int hw_queue)
+{
+ iwl_mld_queue_state_change(op_mode, hw_queue, true);
+}
+
+static void
+iwl_mld_queue_not_full(struct iwl_op_mode *op_mode, int hw_queue)
+{
+ iwl_mld_queue_state_change(op_mode, hw_queue, false);
+}
+
+static bool
+iwl_mld_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ iwl_mld_set_hwkill(mld, state);
+
+ return false;
+}
+
+static void
+iwl_mld_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
+ ieee80211_free_txskb(mld->hw, skb);
+}
+
+static void iwl_mld_read_error_recovery_buffer(struct iwl_mld *mld)
+{
+ u32 src_size = mld->fw->ucode_capa.error_log_size;
+ u32 src_addr = mld->fw->ucode_capa.error_log_addr;
+ u8 *recovery_buf;
+ int ret;
+
+ /* no recovery buffer size defined in a TLV */
+ if (!src_size)
+ return;
+
+ recovery_buf = kzalloc(src_size, GFP_ATOMIC);
+ if (!recovery_buf)
+ return;
+
+ ret = iwl_trans_read_mem_bytes(mld->trans, src_addr,
+ recovery_buf, src_size);
+ if (ret) {
+ IWL_ERR(mld, "Failed to read error recovery buffer (%d)\n",
+ ret);
+ kfree(recovery_buf);
+ return;
+ }
+
+ mld->error_recovery_buf = recovery_buf;
+}
+
+static void iwl_mld_restart_nic(struct iwl_mld *mld)
+{
+ iwl_mld_read_error_recovery_buffer(mld);
+
+ mld->fwrt.trans->dbg.restart_required = false;
+
+ ieee80211_restart_hw(mld->hw);
+}
+
+static void
+iwl_mld_nic_error(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ bool trans_dead = test_bit(STATUS_TRANS_DEAD, &mld->trans->status);
+
+ if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL)
+ IWL_ERR(mld, "Command queue full!\n");
+ else if (!trans_dead && !mld->fw_status.do_not_dump_once)
+ iwl_fwrt_dump_error_logs(&mld->fwrt);
+
+ mld->fw_status.do_not_dump_once = false;
+
+ /* It is necessary to abort any os scan here because mac80211 requires
+ * having the scan cleared before restarting.
+ * We'll reset the scan_status to NONE in restart cleanup in
+ * the next drv_start() call from mac80211. If ieee80211_hw_restart
+ * isn't called scan status will stay busy.
+ */
+ iwl_mld_report_scan_aborted(mld);
+
+ /*
+ * This should be first thing before trying to collect any
+ * data to avoid endless loops if any HW error happens while
+ * collecting debug data.
+ * It might not actually be true that we'll restart, but the
+ * setting doesn't matter if we're going to be unbound either.
+ */
+ if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT)
+ mld->fw_status.in_hw_restart = true;
+}
+
+static void iwl_mld_dump_error(struct iwl_op_mode *op_mode,
+ struct iwl_fw_error_dump_mode *mode)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ /* if we come in from opmode we have the mutex held */
+ if (mode->context == IWL_ERR_CONTEXT_FROM_OPMODE) {
+ lockdep_assert_wiphy(mld->wiphy);
+ iwl_fw_error_collect(&mld->fwrt);
+ } else {
+ wiphy_lock(mld->wiphy);
+ if (mode->context != IWL_ERR_CONTEXT_ABORT)
+ iwl_fw_error_collect(&mld->fwrt);
+ wiphy_unlock(mld->wiphy);
+ }
+}
+
+static bool iwl_mld_sw_reset(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ /* Do restart only in the following conditions are met:
+ * - we consider the FW as running
+ * - The trigger that brought us here is defined as one that requires
+ * a restart (in the debug TLVs)
+ */
+ if (!mld->fw_status.running || !mld->fwrt.trans->dbg.restart_required)
+ return false;
+
+ iwl_mld_restart_nic(mld);
+ return true;
+}
+
+static void
+iwl_mld_time_point(struct iwl_op_mode *op_mode,
+ enum iwl_fw_ini_time_point tp_id,
+ union iwl_dbg_tlv_tp_data *tp_data)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ iwl_dbg_tlv_time_point(&mld->fwrt, tp_id, tp_data);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ wiphy_lock(mld->wiphy);
+ mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ iwl_mld_stop_fw(mld);
+ mld->fw_status.in_d3 = false;
+ wiphy_unlock(mld->wiphy);
+}
+#else
+static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode)
+{}
+#endif
+
+static const struct iwl_op_mode_ops iwl_mld_ops = {
+ .start = iwl_op_mode_mld_start,
+ .stop = iwl_op_mode_mld_stop,
+ .rx = iwl_mld_rx,
+ .rx_rss = iwl_mld_rx_rss,
+ .queue_full = iwl_mld_queue_full,
+ .queue_not_full = iwl_mld_queue_not_full,
+ .hw_rf_kill = iwl_mld_set_hw_rfkill_state,
+ .free_skb = iwl_mld_free_skb,
+ .nic_error = iwl_mld_nic_error,
+ .dump_error = iwl_mld_dump_error,
+ .sw_reset = iwl_mld_sw_reset,
+ .time_point = iwl_mld_time_point,
+ .device_powered_off = pm_sleep_ptr(iwl_mld_device_powered_off),
+};
+
+struct iwl_mld_mod_params iwlmld_mod_params = {
+ .power_scheme = IWL_POWER_SCHEME_BPS,
+};
+
+module_param_named(power_scheme, iwlmld_mod_params.power_scheme, int, 0444);
+MODULE_PARM_DESC(power_scheme,
+ "power management scheme: 1-active, 2-balanced, default: 2");
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.h b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
new file mode 100644
index 000000000000..5eceaaf7696d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
@@ -0,0 +1,582 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_h__
+#define __iwl_mld_h__
+
+#include <linux/leds.h>
+#include <net/mac80211.h>
+
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "fw/runtime.h"
+#include "fw/notif-wait.h"
+#include "fw/api/commands.h"
+#include "fw/api/scan.h"
+#include "fw/api/mac-cfg.h"
+#include "fw/api/mac.h"
+#include "fw/api/phy-ctxt.h"
+#include "fw/api/datapath.h"
+#include "fw/api/rx.h"
+#include "fw/api/rs.h"
+#include "fw/api/context.h"
+#include "fw/api/coex.h"
+#include "fw/api/location.h"
+
+#include "fw/dbg.h"
+
+#include "notif.h"
+#include "scan.h"
+#include "rx.h"
+#include "thermal.h"
+#include "low_latency.h"
+#include "constants.h"
+#include "ptp.h"
+#include "time_sync.h"
+#include "ftm-initiator.h"
+
+/**
+ * DOC: Introduction
+ *
+ * iwlmld is an operation mode (a.k.a. op_mode) for Intel wireless devices.
+ * It is used for devices that ship after 2024 which typically support
+ * the WiFi-7 features. MLD stands for multi-link device. Note that there are
+ * devices that do not support WiFi-7 or even WiFi 6E and yet use iwlmld, but
+ * the firmware APIs used in this driver are WiFi-7 compatible.
+ *
+ * In the architecture of iwlwifi, an op_mode is a layer that translates
+ * mac80211's APIs into commands for the firmware and, of course, notifications
+ * from the firmware to mac80211's APIs. An op_mode must implement the
+ * interface defined in iwl-op-mode.h to interact with the transport layer
+ * which allows to send and receive data to the device, start the hardware,
+ * etc...
+ */
+
+/**
+ * DOC: Locking policy
+ *
+ * iwlmld has a very simple locking policy: it doesn't have any mutexes. It
+ * relies on cfg80211's wiphy->mtx and takes the lock when needed. All the
+ * control flows originating from mac80211 already acquired the lock, so that
+ * part is trivial, but also notifications that are received from the firmware
+ * and handled asynchronously are handled only after having taken the lock.
+ * This is described in notif.c.
+ * There are spin_locks needed to synchronize with the data path, around the
+ * allocation of the queues, for example.
+ */
+
+/**
+ * DOC: Debugfs
+ *
+ * iwlmld adds its share of debugfs hooks and its handlers are synchronized
+ * with the wiphy_lock using wiphy_locked_debugfs. This avoids races against
+ * resources deletion while the debugfs hook is being used.
+ */
+
+/**
+ * DOC: Main resources
+ *
+ * iwlmld is designed with the life cycle of the resource in mind. The
+ * resources are:
+ *
+ * - struct iwl_mld (matches mac80211's struct ieee80211_hw)
+ *
+ * - struct iwl_mld_vif (matches macu80211's struct ieee80211_vif)
+ * iwl_mld_vif contains an array of pointers to struct iwl_mld_link
+ * which describe the links for this vif.
+ *
+ * - struct iwl_mld_sta (matches mac80211's struct ieee80211_sta)
+ * iwl_mld_sta contains an array of points to struct iwl_mld_link_sta
+ * which describes the link stations for this station
+ *
+ * Each object has properties that can survive a firmware reset or not.
+ * Asynchronous firmware notifications can declare themselves as dependent on a
+ * certain instance of those resources and that means that the notifications
+ * will be cancelled once the instance is destroyed.
+ */
+
+#define IWL_MLD_MAX_ADDRESSES 5
+
+/**
+ * struct iwl_mld - MLD op mode
+ *
+ * @fw_id_to_bss_conf: maps a fw id of a link to the corresponding
+ * ieee80211_bss_conf.
+ * @fw_id_to_vif: maps a fw id of a MAC context to the corresponding
+ * ieee80211_vif. Mapping is valid only when the MAC exists in the fw.
+ * @fw_id_to_txq: maps a fw id of a txq to the corresponding
+ * ieee80211_txq.
+ * @used_phy_ids: a bitmap of the phy IDs used. If a bit is set, it means
+ * that the index of this bit is already used as a PHY id.
+ * @num_igtks: the number if iGTKs that were sent to the FW.
+ * @monitor: monitor related data
+ * @monitor.on: does a monitor vif exist (singleton hence bool)
+ * @monitor.ampdu_ref: the id of the A-MPDU for sniffer
+ * @monitor.ampdu_toggle: the state of the previous packet to track A-MPDU
+ * @monitor.cur_aid: current association id tracked by the sniffer
+ * @monitor.cur_bssid: current bssid tracked by the sniffer
+ * @monitor.p80: primary channel position relative to he whole bandwidth, in
+ * steps of 80 MHz
+ * @fw_id_to_link_sta: maps a fw id of a sta to the corresponding
+ * ieee80211_link_sta. This is not cleaned up on restart since we want to
+ * preserve the fw sta ids during a restart (for SN/PN restoring).
+ * FW ids of internal stations will be mapped to ERR_PTR, and will be
+ * re-allocated during a restart, so make sure to free it in restart
+ * cleanup using iwl_mld_free_internal_sta
+ * @netdetect: indicates the FW is in suspend mode with netdetect configured
+ * @p2p_device_vif: points to the p2p device vif if exists
+ * @bt_is_active: indicates that BT is active
+ * @dev: pointer to device struct. For printing purposes
+ * @trans: pointer to the transport layer
+ * @cfg: pointer to the device configuration
+ * @fw: a pointer to the fw object
+ * @hw: pointer to the hw object.
+ * @wiphy: a pointer to the wiphy struct, for easier access to it.
+ * @nvm_data: pointer to the nvm_data that includes all our capabilities
+ * @fwrt: fw runtime data
+ * @debugfs_dir: debugfs directory
+ * @notif_wait: notification wait related data.
+ * @async_handlers_list: a list of all async RX handlers. When a notifciation
+ * with an async handler is received, it is added to this list.
+ * When &async_handlers_wk runs - it runs these handlers one by one.
+ * @async_handlers_lock: a lock for &async_handlers_list. Sync
+ * &async_handlers_wk and RX notifcation path.
+ * @async_handlers_wk: A work to run all async RX handlers from
+ * &async_handlers_list.
+ * @ct_kill_exit_wk: worker to exit thermal kill
+ * @fw_status: bitmap of fw status bits
+ * @running: true if the firmware is running
+ * @do_not_dump_once: true if firmware dump must be prevented once
+ * @in_d3: indicates FW is in suspend mode and should be resumed
+ * @in_hw_restart: indicates that we are currently in restart flow.
+ * rather than restarted. Should be unset upon restart.
+ * @radio_kill: bitmap of radio kill status
+ * @radio_kill.hw: radio is killed by hw switch
+ * @radio_kill.ct: radio is killed because the device it too hot
+ * @addresses: device MAC addresses.
+ * @scan: instance of the scan object
+ * @wowlan: WoWLAN support data.
+ * @led: the led device
+ * @mcc_src: the source id of the MCC, comes from the firmware
+ * @bios_enable_puncturing: is puncturing enabled by bios
+ * @fw_id_to_ba: maps a fw (BA) id to a corresponding Block Ack session data.
+ * @num_rx_ba_sessions: tracks the number of active Rx Block Ack (BA) sessions.
+ * the driver ensures that new BA sessions are blocked once the maximum
+ * supported by the firmware is reached, preventing firmware asserts.
+ * @rxq_sync: manages RX queue sync state
+ * @txqs_to_add: a list of &ieee80211_txq's to allocate in &add_txqs_wk
+ * @add_txqs_wk: a worker to allocate txqs.
+ * @add_txqs_lock: to lock the &txqs_to_add list.
+ * @error_recovery_buf: pointer to the recovery buffer that will be read
+ * from firmware upon fw/hw error and sent back to the firmware in
+ * reconfig flow (after NIC reset).
+ * @mcast_filter_cmd: pointer to the multicast filter command.
+ * @mgmt_tx_ant: stores the last TX antenna index; used for setting
+ * TX rate_n_flags for non-STA mgmt frames (toggles on every TX failure).
+ * @low_latency: low-latency manager.
+ * @tzone: thermal zone device's data
+ * @cooling_dev: cooling device's related data
+ * @ibss_manager: in IBSS mode (only one vif can be active), indicates what
+ * firmware indicated about having transmitted the last beacon, i.e.
+ * being IBSS manager for that time and needing to respond to probe
+ * requests
+ * @ptp_data: data of the PTP clock
+ * @time_sync: time sync data.
+ * @ftm_initiator: FTM initiator data
+ */
+struct iwl_mld {
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ struct ieee80211_bss_conf __rcu *fw_id_to_bss_conf[IWL_FW_MAX_LINK_ID + 1];
+ struct ieee80211_vif __rcu *fw_id_to_vif[NUM_MAC_INDEX_DRIVER];
+ struct ieee80211_txq __rcu *fw_id_to_txq[IWL_MAX_TVQM_QUEUES];
+ u8 used_phy_ids: NUM_PHY_CTX;
+ u8 num_igtks;
+ struct {
+ bool on;
+ u32 ampdu_ref;
+ bool ampdu_toggle;
+ u8 p80;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ __le16 cur_aid;
+ u8 cur_bssid[ETH_ALEN];
+#endif
+ } monitor;
+#ifdef CONFIG_PM_SLEEP
+ bool netdetect;
+#endif /* CONFIG_PM_SLEEP */
+ struct ieee80211_vif *p2p_device_vif;
+ bool bt_is_active;
+ );
+ struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX];
+ /* And here fields that survive a fw restart */
+ struct device *dev;
+ struct iwl_trans *trans;
+ const struct iwl_cfg *cfg;
+ const struct iwl_fw *fw;
+ struct ieee80211_hw *hw;
+ struct wiphy *wiphy;
+ struct iwl_nvm_data *nvm_data;
+ struct iwl_fw_runtime fwrt;
+ struct dentry *debugfs_dir;
+ struct iwl_notif_wait_data notif_wait;
+ struct list_head async_handlers_list;
+ spinlock_t async_handlers_lock;
+ struct wiphy_work async_handlers_wk;
+ struct wiphy_delayed_work ct_kill_exit_wk;
+
+ struct {
+ u32 running:1,
+ do_not_dump_once:1,
+#ifdef CONFIG_PM_SLEEP
+ in_d3:1,
+#endif
+ in_hw_restart:1;
+
+ } fw_status;
+
+ struct {
+ u32 hw:1,
+ ct:1;
+ } radio_kill;
+
+ struct mac_address addresses[IWL_MLD_MAX_ADDRESSES];
+ struct iwl_mld_scan scan;
+#ifdef CONFIG_PM_SLEEP
+ struct wiphy_wowlan_support wowlan;
+#endif /* CONFIG_PM_SLEEP */
+#ifdef CONFIG_IWLWIFI_LEDS
+ struct led_classdev led;
+#endif
+ enum iwl_mcc_source mcc_src;
+ bool bios_enable_puncturing;
+
+ struct iwl_mld_baid_data __rcu *fw_id_to_ba[IWL_MAX_BAID];
+ u8 num_rx_ba_sessions;
+
+ struct iwl_mld_rx_queues_sync rxq_sync;
+
+ struct list_head txqs_to_add;
+ struct wiphy_work add_txqs_wk;
+ spinlock_t add_txqs_lock;
+
+ u8 *error_recovery_buf;
+ struct iwl_mcast_filter_cmd *mcast_filter_cmd;
+
+ u8 mgmt_tx_ant;
+
+ struct iwl_mld_low_latency low_latency;
+
+ bool ibss_manager;
+#ifdef CONFIG_THERMAL
+ struct thermal_zone_device *tzone;
+ struct iwl_mld_cooling_device cooling_dev;
+#endif
+
+ struct ptp_data ptp_data;
+
+ struct iwl_mld_time_sync_data __rcu *time_sync;
+
+ struct ftm_initiator_data ftm_initiator;
+};
+
+/* memset the part of the struct that requires cleanup on restart */
+#define CLEANUP_STRUCT(_ptr) \
+ memset((void *)&(_ptr)->zeroed_on_hw_restart, 0, \
+ sizeof((_ptr)->zeroed_on_hw_restart))
+
+/* Cleanup function for struct iwl_mld_vif, will be called in restart */
+static inline void
+iwl_cleanup_mld(struct iwl_mld *mld)
+{
+ CLEANUP_STRUCT(mld);
+ CLEANUP_STRUCT(&mld->scan);
+
+#ifdef CONFIG_PM_SLEEP
+ mld->fw_status.in_d3 = false;
+#endif
+
+ iwl_mld_low_latency_restart_cleanup(mld);
+
+ /* Empty the list of async notification handlers so we won't process
+ * notifications from the dead fw after the reconfig flow.
+ */
+ iwl_mld_purge_async_handlers_list(mld);
+}
+
+enum iwl_power_scheme {
+ IWL_POWER_SCHEME_CAM = 1,
+ IWL_POWER_SCHEME_BPS,
+};
+
+/**
+ * struct iwl_mld_mod_params - module parameters for iwlmld
+ * @power_scheme: one of enum iwl_power_scheme
+ */
+struct iwl_mld_mod_params {
+ int power_scheme;
+};
+
+extern struct iwl_mld_mod_params iwlmld_mod_params;
+
+/* Extract MLD priv from op_mode */
+#define IWL_OP_MODE_GET_MLD(_iwl_op_mode) \
+ ((struct iwl_mld *)(_iwl_op_mode)->op_mode_specific)
+
+#define IWL_MAC80211_GET_MLD(_hw) \
+ IWL_OP_MODE_GET_MLD((struct iwl_op_mode *)((_hw)->priv))
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void
+iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir);
+#else
+static inline void
+iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir)
+{}
+#endif
+
+int iwl_mld_load_fw(struct iwl_mld *mld);
+void iwl_mld_stop_fw(struct iwl_mld *mld);
+int iwl_mld_start_fw(struct iwl_mld *mld);
+void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags);
+
+static inline void iwl_mld_set_ctkill(struct iwl_mld *mld, bool state)
+{
+ mld->radio_kill.ct = state;
+
+ wiphy_rfkill_set_hw_state(mld->wiphy,
+ mld->radio_kill.hw || mld->radio_kill.ct);
+}
+
+static inline void iwl_mld_set_hwkill(struct iwl_mld *mld, bool state)
+{
+ mld->radio_kill.hw = state;
+
+ wiphy_rfkill_set_hw_state(mld->wiphy,
+ mld->radio_kill.hw || mld->radio_kill.ct);
+}
+
+static inline u8 iwl_mld_get_valid_tx_ant(const struct iwl_mld *mld)
+{
+ u8 tx_ant = mld->fw->valid_tx_ant;
+
+ if (mld->nvm_data && mld->nvm_data->valid_tx_ant)
+ tx_ant &= mld->nvm_data->valid_tx_ant;
+
+ return tx_ant;
+}
+
+static inline u8 iwl_mld_get_valid_rx_ant(const struct iwl_mld *mld)
+{
+ u8 rx_ant = mld->fw->valid_rx_ant;
+
+ if (mld->nvm_data && mld->nvm_data->valid_rx_ant)
+ rx_ant &= mld->nvm_data->valid_rx_ant;
+
+ return rx_ant;
+}
+
+static inline u8 iwl_mld_nl80211_band_to_fw(enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return PHY_BAND_24;
+ case NL80211_BAND_5GHZ:
+ return PHY_BAND_5;
+ case NL80211_BAND_6GHZ:
+ return PHY_BAND_6;
+ default:
+ WARN_ONCE(1, "Unsupported band (%u)\n", band);
+ return PHY_BAND_5;
+ }
+}
+
+static inline u8 iwl_mld_phy_band_to_nl80211(u8 phy_band)
+{
+ switch (phy_band) {
+ case PHY_BAND_24:
+ return NL80211_BAND_2GHZ;
+ case PHY_BAND_5:
+ return NL80211_BAND_5GHZ;
+ case PHY_BAND_6:
+ return NL80211_BAND_6GHZ;
+ default:
+ WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
+ return NL80211_BAND_5GHZ;
+ }
+}
+
+static inline int
+iwl_mld_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
+ enum nl80211_band band)
+{
+ int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
+ bool is_lb = band == NL80211_BAND_2GHZ;
+
+ if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ return is_lb ? rate + IWL_FIRST_OFDM_RATE : rate;
+
+ /* CCK is not allowed in 5 GHz */
+ return is_lb ? rate : -1;
+}
+
+extern const struct ieee80211_ops iwl_mld_hw_ops;
+
+/**
+ * enum iwl_rx_handler_context: context for Rx handler
+ * @RX_HANDLER_SYNC: this means that it will be called in the Rx path
+ * which can't acquire the wiphy->mutex.
+ * @RX_HANDLER_ASYNC: If the handler needs to hold wiphy->mutex
+ * (and only in this case!), it should be set as ASYNC. In that case,
+ * it will be called from a worker with wiphy->mutex held.
+ */
+enum iwl_rx_handler_context {
+ RX_HANDLER_SYNC,
+ RX_HANDLER_ASYNC,
+};
+
+/**
+ * struct iwl_rx_handler: handler for FW notification
+ * @val_fn: input validation function.
+ * @sizes: an array that mapps a version to the expected size.
+ * @fn: the function is called when notification is handled
+ * @cmd_id: command id
+ * @n_sizes: number of elements in &sizes.
+ * @context: see &iwl_rx_handler_context
+ * @obj_type: the type of the object that this handler is related to.
+ * See &iwl_mld_object_type. Use IWL_MLD_OBJECT_TYPE_NONE if not related.
+ * @cancel: function to cancel the notification. valid only if obj_type is not
+ * IWL_MLD_OBJECT_TYPE_NONE.
+ */
+struct iwl_rx_handler {
+ union {
+ bool (*val_fn)(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
+ const struct iwl_notif_struct_size *sizes;
+ };
+ void (*fn)(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
+ u16 cmd_id;
+ u8 n_sizes;
+ u8 context;
+ enum iwl_mld_object_type obj_type;
+ bool (*cancel)(struct iwl_mld *mld, struct iwl_rx_packet *pkt,
+ u32 obj_id);
+};
+
+/**
+ * struct iwl_notif_struct_size: map a notif ver to the expected size
+ *
+ * @size: the size to expect
+ * @ver: the version of the notification
+ */
+struct iwl_notif_struct_size {
+ u32 size:24, ver:8;
+};
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+extern const struct iwl_hcmd_arr iwl_mld_groups[];
+extern const unsigned int global_iwl_mld_goups_size;
+extern const struct iwl_rx_handler iwl_mld_rx_handlers[];
+extern const unsigned int iwl_mld_rx_handlers_num;
+
+bool
+iwl_mld_is_dup(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ const struct iwl_rx_mpdu_desc *mpdu_desc,
+ struct ieee80211_rx_status *rx_status, int queue);
+
+void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
+ const struct iwl_cfg *cfg, const struct iwl_fw *fw,
+ struct ieee80211_hw *hw, struct dentry *dbgfs_dir);
+#endif
+
+#define IWL_MLD_INVALID_FW_ID 0xff
+
+#define IWL_MLD_ALLOC_FN(_type, _mac80211_type) \
+static int \
+iwl_mld_allocate_##_type##_fw_id(struct iwl_mld *mld, \
+ u8 *fw_id, \
+ struct ieee80211_##_mac80211_type *mac80211_ptr) \
+{ \
+ u8 rand = IWL_MLD_DIS_RANDOM_FW_ID ? 0 : get_random_u8(); \
+ u8 arr_sz = ARRAY_SIZE(mld->fw_id_to_##_mac80211_type); \
+ if (__builtin_types_compatible_p(typeof(*mac80211_ptr), \
+ struct ieee80211_link_sta)) \
+ arr_sz = mld->fw->ucode_capa.num_stations; \
+ if (__builtin_types_compatible_p(typeof(*mac80211_ptr), \
+ struct ieee80211_bss_conf)) \
+ arr_sz = mld->fw->ucode_capa.num_links; \
+ for (int i = 0; i < arr_sz; i++) { \
+ u8 idx = (i + rand) % arr_sz; \
+ if (rcu_access_pointer(mld->fw_id_to_##_mac80211_type[idx])) \
+ continue; \
+ IWL_DEBUG_INFO(mld, "Allocated at index %d / %d\n", idx, arr_sz); \
+ *fw_id = idx; \
+ rcu_assign_pointer(mld->fw_id_to_##_mac80211_type[idx], mac80211_ptr); \
+ return 0; \
+ } \
+ return -ENOSPC; \
+}
+
+static inline struct ieee80211_bss_conf *
+iwl_mld_fw_id_to_link_conf(struct iwl_mld *mld, u8 fw_link_id)
+{
+ if (IWL_FW_CHECK(mld, fw_link_id >= mld->fw->ucode_capa.num_links,
+ "Invalid fw_link_id: %d\n", fw_link_id))
+ return NULL;
+
+ return wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_bss_conf[fw_link_id]);
+}
+
+#define MSEC_TO_TU(_msec) ((_msec) * 1000 / 1024)
+
+void iwl_mld_add_vif_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct dentry *dir);
+void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir);
+
+/* Utilities */
+
+static inline u8 iwl_mld_mac80211_ac_to_fw_tx_fifo(enum ieee80211_ac_numbers ac)
+{
+ static const u8 mac80211_ac_to_fw_tx_fifo[] = {
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+ };
+ return mac80211_ac_to_fw_tx_fifo[ac];
+}
+
+static inline u32
+iwl_mld_get_lmac_id(struct iwl_mld *mld, enum nl80211_band band)
+{
+ if (!fw_has_capa(&mld->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CDB_SUPPORT) ||
+ band == NL80211_BAND_2GHZ)
+ return IWL_LMAC_24G_INDEX;
+ return IWL_LMAC_5G_INDEX;
+}
+
+/* Check if we had an error, but reconfig flow didn't start yet */
+static inline bool iwl_mld_error_before_recovery(struct iwl_mld *mld)
+{
+ return mld->fw_status.in_hw_restart &&
+ !iwl_trans_fw_running(mld->trans);
+}
+
+int iwl_mld_tdls_sta_count(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
new file mode 100644
index 000000000000..a870e169e265
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
@@ -0,0 +1,1076 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "mlo.h"
+#include "phy.h"
+
+/* Block reasons helper */
+#define HANDLE_EMLSR_BLOCKED_REASONS(HOW) \
+ HOW(PREVENTION) \
+ HOW(WOWLAN) \
+ HOW(ROC) \
+ HOW(NON_BSS) \
+ HOW(TMP_NON_BSS) \
+ HOW(TPT)
+
+static const char *
+iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked)
+{
+ /* Using switch without "default" will warn about missing entries */
+ switch (blocked) {
+#define REASON_CASE(x) case IWL_MLD_EMLSR_BLOCKED_##x: return #x;
+ HANDLE_EMLSR_BLOCKED_REASONS(REASON_CASE)
+#undef REASON_CASE
+ }
+
+ return "ERROR";
+}
+
+static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask)
+{
+#define NAME_FMT(x) "%s"
+#define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "",
+ IWL_DEBUG_INFO(mld,
+ "EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT)
+ " (0x%x)\n",
+ HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR)
+ mask);
+#undef NAME_FMT
+#undef NAME_PR
+}
+
+/* Exit reasons helper */
+#define HANDLE_EMLSR_EXIT_REASONS(HOW) \
+ HOW(BLOCK) \
+ HOW(MISSED_BEACON) \
+ HOW(FAIL_ENTRY) \
+ HOW(CSA) \
+ HOW(EQUAL_BAND) \
+ HOW(LOW_RSSI) \
+ HOW(LINK_USAGE) \
+ HOW(BT_COEX) \
+ HOW(CHAN_LOAD) \
+ HOW(RFI) \
+ HOW(FW_REQUEST)
+
+static const char *
+iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit)
+{
+ /* Using switch without "default" will warn about missing entries */
+ switch (exit) {
+#define REASON_CASE(x) case IWL_MLD_EMLSR_EXIT_##x: return #x;
+ HANDLE_EMLSR_EXIT_REASONS(REASON_CASE)
+#undef REASON_CASE
+ }
+
+ return "ERROR";
+}
+
+static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask)
+{
+#define NAME_FMT(x) "%s"
+#define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "",
+ IWL_DEBUG_INFO(mld,
+ "EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT)
+ " (0x%x)\n",
+ HANDLE_EMLSR_EXIT_REASONS(NAME_PR)
+ mask);
+#undef NAME_FMT
+#undef NAME_PR
+}
+
+void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
+ emlsr.prevent_done_wk.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
+
+ if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
+ IWL_MLD_EMLSR_BLOCKED_PREVENTION)))
+ return;
+
+ iwl_mld_unblock_emlsr(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_PREVENTION);
+}
+
+void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
+ emlsr.tmp_non_bss_done_wk.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
+
+ if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
+ IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS)))
+ return;
+
+ iwl_mld_unblock_emlsr(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS);
+}
+
+#define IWL_MLD_TRIGGER_LINK_SEL_TIME (HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC)
+#define IWL_MLD_SCAN_EXPIRE_TIME (HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC)
+
+/* Exit reasons that can cause longer EMLSR prevention */
+#define IWL_MLD_PREVENT_EMLSR_REASONS (IWL_MLD_EMLSR_EXIT_MISSED_BEACON | \
+ IWL_MLD_EMLSR_EXIT_LINK_USAGE | \
+ IWL_MLD_EMLSR_EXIT_FW_REQUEST)
+#define IWL_MLD_PREVENT_EMLSR_TIMEOUT (HZ * 400)
+
+#define IWL_MLD_EMLSR_PREVENT_SHORT (HZ * 300)
+#define IWL_MLD_EMLSR_PREVENT_LONG (HZ * 600)
+
+static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld,
+ struct iwl_mld_vif *mld_vif,
+ enum iwl_mld_emlsr_exit reason)
+{
+ unsigned long delay;
+
+ /*
+ * Reset the counter if more than 400 seconds have passed between one
+ * exit and the other, or if we exited due to a different reason.
+ * Will also reset the counter after the long prevention is done.
+ */
+ if (time_after(jiffies, mld_vif->emlsr.last_exit_ts +
+ IWL_MLD_PREVENT_EMLSR_TIMEOUT) ||
+ mld_vif->emlsr.last_exit_reason != reason)
+ mld_vif->emlsr.exit_repeat_count = 0;
+
+ mld_vif->emlsr.last_exit_reason = reason;
+ mld_vif->emlsr.last_exit_ts = jiffies;
+ mld_vif->emlsr.exit_repeat_count++;
+
+ /*
+ * Do not add a prevention when the reason was a block. For a block,
+ * EMLSR will be enabled again on unblock.
+ */
+ if (reason == IWL_MLD_EMLSR_EXIT_BLOCK)
+ return;
+
+ /* Set prevention for a minimum of 30 seconds */
+ mld_vif->emlsr.blocked_reasons |= IWL_MLD_EMLSR_BLOCKED_PREVENTION;
+ delay = IWL_MLD_TRIGGER_LINK_SEL_TIME;
+
+ /* Handle repeats for reasons that can cause long prevention */
+ if (mld_vif->emlsr.exit_repeat_count > 1 &&
+ reason & IWL_MLD_PREVENT_EMLSR_REASONS) {
+ if (mld_vif->emlsr.exit_repeat_count == 2)
+ delay = IWL_MLD_EMLSR_PREVENT_SHORT;
+ else
+ delay = IWL_MLD_EMLSR_PREVENT_LONG;
+
+ /*
+ * The timeouts are chosen so that this will not happen, i.e.
+ * IWL_MLD_EMLSR_PREVENT_LONG > IWL_MLD_PREVENT_EMLSR_TIMEOUT
+ */
+ WARN_ON(mld_vif->emlsr.exit_repeat_count > 3);
+ }
+
+ IWL_DEBUG_INFO(mld,
+ "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
+ delay / HZ, mld_vif->emlsr.exit_repeat_count,
+ iwl_mld_get_emlsr_exit_string(reason), reason);
+
+ wiphy_delayed_work_queue(mld->wiphy,
+ &mld_vif->emlsr.prevent_done_wk, delay);
+}
+
+static void iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ void *dat)
+{
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+
+ /* It is ok to do it for all chanctx (and not only for the ones that
+ * belong to the EMLSR vif) since EMLSR is not allowed if there is
+ * another vif.
+ */
+ phy->avg_channel_load_not_by_us = 0;
+}
+
+static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_exit exit, u8 link_to_keep,
+ bool sync)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ u16 new_active_links;
+ int ret = 0;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* On entry failure need to exit anyway, even if entered from debugfs */
+ if (exit != IWL_MLD_EMLSR_EXIT_FAIL_ENTRY && !IWL_MLD_AUTO_EML_ENABLE)
+ return 0;
+
+ /* Ignore exit request if EMLSR is not active */
+ if (!iwl_mld_emlsr_active(vif))
+ return 0;
+
+ if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mld_vif->authorized))
+ return 0;
+
+ if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
+ link_to_keep = __ffs(vif->active_links);
+
+ new_active_links = BIT(link_to_keep);
+ IWL_DEBUG_INFO(mld,
+ "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
+ iwl_mld_get_emlsr_exit_string(exit), exit,
+ vif->active_links, new_active_links);
+
+ if (sync)
+ ret = ieee80211_set_active_links(vif, new_active_links);
+ else
+ ieee80211_set_active_links_async(vif, new_active_links);
+
+ /* Update latest exit reason and check EMLSR prevention */
+ iwl_mld_check_emlsr_prevention(mld, mld_vif, exit);
+
+ /* channel_load_not_by_us is invalid when in EMLSR.
+ * Clear it so wrong values won't be used.
+ */
+ ieee80211_iter_chan_contexts_atomic(mld->hw,
+ iwl_mld_clear_avg_chan_load_iter,
+ NULL);
+
+ return ret;
+}
+
+void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_exit exit, u8 link_to_keep)
+{
+ _iwl_mld_exit_emlsr(mld, vif, exit, link_to_keep, false);
+}
+
+static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason,
+ u8 link_to_keep, bool sync)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
+ return 0;
+
+ if (mld_vif->emlsr.blocked_reasons & reason)
+ return 0;
+
+ mld_vif->emlsr.blocked_reasons |= reason;
+
+ IWL_DEBUG_INFO(mld,
+ "Blocking EMLSR mode. reason = %s (0x%x)\n",
+ iwl_mld_get_emlsr_blocked_string(reason), reason);
+ iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
+
+ if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
+ wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
+ &mld_vif->emlsr.check_tpt_wk);
+
+ return _iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BLOCK,
+ link_to_keep, sync);
+}
+
+void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
+{
+ _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, false);
+}
+
+int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
+{
+ return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true);
+}
+
+static void _iwl_mld_select_links(struct iwl_mld *mld,
+ struct ieee80211_vif *vif);
+
+void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
+ return;
+
+ if (!(mld_vif->emlsr.blocked_reasons & reason))
+ return;
+
+ mld_vif->emlsr.blocked_reasons &= ~reason;
+
+ IWL_DEBUG_INFO(mld,
+ "Unblocking EMLSR mode. reason = %s (0x%x)\n",
+ iwl_mld_get_emlsr_blocked_string(reason), reason);
+ iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
+
+ if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
+ wiphy_delayed_work_queue(mld_vif->mld->wiphy,
+ &mld_vif->emlsr.check_tpt_wk,
+ round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
+
+ if (mld_vif->emlsr.blocked_reasons)
+ return;
+
+ IWL_DEBUG_INFO(mld, "EMLSR is unblocked\n");
+ iwl_mld_int_mlo_scan(mld, vif);
+}
+
+static void
+iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_esr_mode_notif *notif = (void *)data;
+
+ if (!iwl_mld_vif_has_emlsr_cap(vif))
+ return;
+
+ switch (le32_to_cpu(notif->action)) {
+ case ESR_RECOMMEND_LEAVE:
+ iwl_mld_exit_emlsr(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_EXIT_FW_REQUEST,
+ iwl_mld_get_primary_link(vif));
+ break;
+ case ESR_RECOMMEND_ENTER:
+ case ESR_FORCE_LEAVE:
+ default:
+ IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n",
+ le32_to_cpu(notif->action));
+ }
+}
+
+void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_emlsr_mode_notif,
+ pkt->data);
+}
+
+static void
+iwl_mld_vif_iter_disconnect_emlsr(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ if (!iwl_mld_vif_has_emlsr_cap(vif))
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
+void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_esr_trans_fail_notif *notif = (const void *)pkt->data;
+ u32 fw_link_id = le32_to_cpu(notif->link_id);
+ struct ieee80211_bss_conf *bss_conf =
+ iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
+
+ IWL_DEBUG_INFO(mld, "Failed to %s EMLSR on link %d (FW: %d), reason %d\n",
+ le32_to_cpu(notif->activation) ? "enter" : "exit",
+ bss_conf ? bss_conf->link_id : -1,
+ le32_to_cpu(notif->link_id),
+ le32_to_cpu(notif->err_code));
+
+ if (IWL_FW_CHECK(mld, !bss_conf,
+ "FW reported failure to %sactivate EMLSR on a non-existing link: %d\n",
+ le32_to_cpu(notif->activation) ? "" : "de",
+ fw_link_id)) {
+ ieee80211_iterate_active_interfaces_mtx(
+ mld->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_disconnect_emlsr, NULL);
+ return;
+ }
+
+ /* Disconnect if we failed to deactivate a link */
+ if (!le32_to_cpu(notif->activation)) {
+ ieee80211_connection_loss(bss_conf->vif);
+ return;
+ }
+
+ /*
+ * We failed to activate the second link, go back to the link specified
+ * by the firmware as that is the one that is still valid now.
+ */
+ iwl_mld_exit_emlsr(mld, bss_conf->vif, IWL_MLD_EMLSR_EXIT_FAIL_ENTRY,
+ bss_conf->link_id);
+}
+
+/* Active non-station link tracking */
+static void iwl_mld_count_non_bss_links(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int *count = _data;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
+ return;
+
+ *count += iwl_mld_count_active_links(mld_vif->mld, vif);
+}
+
+struct iwl_mld_update_emlsr_block_data {
+ bool block;
+ int result;
+};
+
+static void
+iwl_mld_vif_iter_update_emlsr_non_bss_block(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_update_emlsr_block_data *data = _data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ if (data->block) {
+ ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_NON_BSS,
+ iwl_mld_get_primary_link(vif));
+ if (ret)
+ data->result = ret;
+ } else {
+ iwl_mld_unblock_emlsr(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_NON_BSS);
+ }
+}
+
+int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
+ int pending_link_changes)
+{
+ /* An active link of a non-station vif blocks EMLSR. Upon activation
+ * block EMLSR on the bss vif. Upon deactivation, check if this link
+ * was the last non-station link active, and if so unblock the bss vif
+ */
+ struct iwl_mld_update_emlsr_block_data block_data = {};
+ int count = pending_link_changes;
+
+ /* No need to count if we are activating a non-BSS link */
+ if (count <= 0)
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_count_non_bss_links,
+ &count);
+
+ /*
+ * We could skip updating it if the block change did not change (and
+ * pending_link_changes is non-zero).
+ */
+ block_data.block = !!count;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_update_emlsr_non_bss_block,
+ &block_data);
+
+ return block_data.result;
+}
+
+#define EMLSR_SEC_LINK_MIN_PERC 10
+#define EMLSR_MIN_TX 3000
+#define EMLSR_MIN_RX 400
+
+void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
+ emlsr.check_tpt_wk.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
+ struct iwl_mld *mld = mld_vif->mld;
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_link *sec_link;
+ unsigned long total_tx = 0, total_rx = 0;
+ unsigned long sec_link_tx = 0, sec_link_rx = 0;
+ u8 sec_link_tx_perc, sec_link_rx_perc;
+ s8 sec_link_id;
+
+ if (!iwl_mld_vif_has_emlsr_cap(vif) || !mld_vif->ap_sta)
+ return;
+
+ mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
+
+ /* We only count for the AP sta in a MLO connection */
+ if (!mld_sta->mpdu_counters)
+ return;
+
+ /* This wk should only run when the TPT blocker isn't set.
+ * When the blocker is set, the decision to remove it, as well as
+ * clearing the counters is done in DP (to avoid having a wk every
+ * 5 seconds when idle. When the blocker is unset, we are not idle anyway)
+ */
+ if (WARN_ON(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
+ return;
+ /*
+ * TPT is unblocked, need to check if the TPT criteria is still met.
+ *
+ * If EMLSR is active, then we also need to check the secondar link
+ * requirements.
+ */
+ if (iwl_mld_emlsr_active(vif)) {
+ sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif));
+ sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id);
+ if (WARN_ON_ONCE(!sec_link))
+ return;
+ /* We need the FW ID here */
+ sec_link_id = sec_link->fw_id;
+ } else {
+ sec_link_id = -1;
+ }
+
+ /* Sum up RX and TX MPDUs from the different queues/links */
+ for (int q = 0; q < mld->trans->num_rx_queues; q++) {
+ struct iwl_mld_per_q_mpdu_counter *queue_counter =
+ &mld_sta->mpdu_counters[q];
+
+ spin_lock_bh(&queue_counter->lock);
+
+ /* The link IDs that doesn't exist will contain 0 */
+ for (int link = 0;
+ link < ARRAY_SIZE(queue_counter->per_link);
+ link++) {
+ total_tx += queue_counter->per_link[link].tx;
+ total_rx += queue_counter->per_link[link].rx;
+ }
+
+ if (sec_link_id != -1) {
+ sec_link_tx += queue_counter->per_link[sec_link_id].tx;
+ sec_link_rx += queue_counter->per_link[sec_link_id].rx;
+ }
+
+ memset(queue_counter->per_link, 0,
+ sizeof(queue_counter->per_link));
+
+ spin_unlock_bh(&queue_counter->lock);
+ }
+
+ IWL_DEBUG_INFO(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
+ total_tx, total_rx);
+
+ /* If we don't have enough MPDUs - exit EMLSR */
+ if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH &&
+ total_rx < IWL_MLD_ENTER_EMLSR_TPT_THRESH) {
+ iwl_mld_block_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT,
+ iwl_mld_get_primary_link(vif));
+ return;
+ }
+
+ /* EMLSR is not active */
+ if (sec_link_id == -1)
+ return;
+
+ IWL_DEBUG_INFO(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
+ sec_link_id, sec_link_tx, sec_link_rx);
+
+ /* Calculate the percentage of the secondary link TX/RX */
+ sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
+ sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
+
+ /*
+ * The TX/RX percentage is checked only if it exceeds the required
+ * minimum. In addition, RX is checked only if the TX check failed.
+ */
+ if ((total_tx > EMLSR_MIN_TX &&
+ sec_link_tx_perc < EMLSR_SEC_LINK_MIN_PERC) ||
+ (total_rx > EMLSR_MIN_RX &&
+ sec_link_rx_perc < EMLSR_SEC_LINK_MIN_PERC)) {
+ iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LINK_USAGE,
+ iwl_mld_get_primary_link(vif));
+ return;
+ }
+
+ /* Check again when the next window ends */
+ wiphy_delayed_work_queue(mld_vif->mld->wiphy,
+ &mld_vif->emlsr.check_tpt_wk,
+ round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
+}
+
+void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
+ emlsr.unblock_tpt_wk);
+ struct ieee80211_vif *vif =
+ container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
+
+ iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT);
+}
+
+/*
+ * Link selection
+ */
+
+s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
+ const struct cfg80211_chan_def *chandef,
+ bool low)
+{
+ if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
+ chandef->chan->band != NL80211_BAND_5GHZ &&
+ chandef->chan->band != NL80211_BAND_6GHZ))
+ return S8_MAX;
+
+#define RSSI_THRESHOLD(_low, _bw) \
+ (_low) ? IWL_MLD_LOW_RSSI_THRESH_##_bw##MHZ \
+ : IWL_MLD_HIGH_RSSI_THRESH_##_bw##MHZ
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ /* 320 MHz has the same thresholds as 20 MHz */
+ case NL80211_CHAN_WIDTH_320:
+ return RSSI_THRESHOLD(low, 20);
+ case NL80211_CHAN_WIDTH_40:
+ return RSSI_THRESHOLD(low, 40);
+ case NL80211_CHAN_WIDTH_80:
+ return RSSI_THRESHOLD(low, 80);
+ case NL80211_CHAN_WIDTH_160:
+ return RSSI_THRESHOLD(low, 160);
+ default:
+ WARN_ON(1);
+ return S8_MAX;
+ }
+#undef RSSI_THRESHOLD
+}
+
+static u32
+iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_link_sel_data *link,
+ bool primary)
+{
+ struct wiphy *wiphy = mld->wiphy;
+ struct ieee80211_bss_conf *conf;
+ enum iwl_mld_emlsr_exit ret = 0;
+
+ conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
+ if (WARN_ON_ONCE(!conf))
+ return false;
+
+ if (link->chandef->chan->band == NL80211_BAND_2GHZ && mld->bt_is_active)
+ ret |= IWL_MLD_EMLSR_EXIT_BT_COEX;
+
+ if (link->signal <
+ iwl_mld_get_emlsr_rssi_thresh(mld, link->chandef, false))
+ ret |= IWL_MLD_EMLSR_EXIT_LOW_RSSI;
+
+ if (conf->csa_active)
+ ret |= IWL_MLD_EMLSR_EXIT_CSA;
+
+ if (ret) {
+ IWL_DEBUG_INFO(mld,
+ "Link %d is not allowed for EMLSR as %s\n",
+ link->link_id,
+ primary ? "primary" : "secondary");
+ iwl_mld_print_emlsr_exit(mld, ret);
+ }
+
+ return ret;
+}
+
+static u8
+iwl_mld_set_link_sel_data(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_link_sel_data *data,
+ unsigned long usable_links,
+ u8 *best_link_idx)
+{
+ u8 n_data = 0;
+ u16 max_grade = 0;
+ unsigned long link_id;
+
+ /*
+ * TODO: don't select links that weren't discovered in the last scan
+ * This requires mac80211 (or cfg80211) changes to forward/track when
+ * a BSS was last updated. cfg80211 already tracks this information but
+ * it is not exposed within the kernel.
+ */
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ /* Ignore any BSS that was not seen in the last MLO scan */
+ if (ktime_before(link_conf->bss->ts_boottime,
+ mld->scan.last_mlo_scan_time))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].chandef = &link_conf->chanreq.oper;
+ data[n_data].signal = MBM_TO_DBM(link_conf->bss->signal);
+ data[n_data].grade = iwl_mld_get_link_grade(mld, link_conf);
+
+ if (n_data == 0 || data[n_data].grade > max_grade) {
+ max_grade = data[n_data].grade;
+ *best_link_idx = n_data;
+ }
+ n_data++;
+ }
+
+ return n_data;
+}
+
+static u32
+iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx)
+{
+ const struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(chanctx);
+
+ switch (phy->chandef.width) {
+ case NL80211_CHAN_WIDTH_320:
+ case NL80211_CHAN_WIDTH_160:
+ return 5;
+ case NL80211_CHAN_WIDTH_80:
+ return 7;
+ default:
+ break;
+ }
+ return 10;
+}
+
+VISIBLE_IF_IWLWIFI_KUNIT bool
+iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ const struct iwl_mld_link_sel_data *a,
+ const struct iwl_mld_link_sel_data *b)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link_a =
+ iwl_mld_link_dereference_check(mld_vif, a->link_id);
+ struct ieee80211_chanctx_conf *chanctx_a = NULL;
+ u32 bw_a, bw_b, ratio;
+ u32 primary_load_perc;
+
+ if (!link_a || !link_a->active) {
+ IWL_DEBUG_EHT(mld, "Primary link is not active. Can't enter EMLSR\n");
+ return false;
+ }
+
+ chanctx_a = wiphy_dereference(mld->wiphy, link_a->chan_ctx);
+
+ if (WARN_ON(!chanctx_a))
+ return false;
+
+ primary_load_perc =
+ iwl_mld_phy_from_mac80211(chanctx_a)->avg_channel_load_not_by_us;
+
+ IWL_DEBUG_EHT(mld, "Average channel load not by us: %u\n", primary_load_perc);
+
+ if (primary_load_perc < iwl_mld_get_min_chan_load_thresh(chanctx_a)) {
+ IWL_DEBUG_EHT(mld, "Channel load is below the minimum threshold\n");
+ return false;
+ }
+
+ if (iwl_mld_vif_low_latency(mld_vif)) {
+ IWL_DEBUG_EHT(mld, "Low latency vif, EMLSR is allowed\n");
+ return true;
+ }
+
+ if (a->chandef->width <= b->chandef->width)
+ return true;
+
+ bw_a = nl80211_chan_width_to_mhz(a->chandef->width);
+ bw_b = nl80211_chan_width_to_mhz(b->chandef->width);
+ ratio = bw_a / bw_b;
+
+ switch (ratio) {
+ case 2:
+ return primary_load_perc > 25;
+ case 4:
+ return primary_load_perc > 40;
+ case 8:
+ case 16:
+ return primary_load_perc > 50;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_channel_load_allows_emlsr);
+
+static bool
+iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif,
+ struct iwl_mld_link_sel_data *a,
+ struct iwl_mld_link_sel_data *b)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = mld_vif->mld;
+ u32 reason_mask = 0;
+
+ /* Per-link considerations */
+ if (iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true) ||
+ iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false))
+ return false;
+
+ if (a->chandef->chan->band == b->chandef->chan->band)
+ reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND;
+ if (!iwl_mld_channel_load_allows_emlsr(mld, vif, a, b))
+ reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD;
+
+ if (reason_mask) {
+ IWL_DEBUG_INFO(mld,
+ "Links %d and %d are not a valid pair for EMLSR\n",
+ a->link_id, b->link_id);
+ IWL_DEBUG_INFO(mld,
+ "Links bandwidth are: %d and %d\n",
+ nl80211_chan_width_to_mhz(a->chandef->width),
+ nl80211_chan_width_to_mhz(b->chandef->width));
+ iwl_mld_print_emlsr_exit(mld, reason_mask);
+ return false;
+ }
+
+ return true;
+}
+
+/* Calculation is done with fixed-point with a scaling factor of 1/256 */
+#define SCALE_FACTOR 256
+
+/*
+ * Returns the combined grade of two given links.
+ * Returns 0 if EMLSR is not allowed with these 2 links.
+ */
+static
+unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_link_sel_data *a,
+ struct iwl_mld_link_sel_data *b,
+ u8 *primary_id)
+{
+ struct ieee80211_bss_conf *primary_conf;
+ struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
+ unsigned int primary_load;
+
+ lockdep_assert_wiphy(wiphy);
+
+ /* a is always primary, b is always secondary */
+ if (b->grade > a->grade)
+ swap(a, b);
+
+ *primary_id = a->link_id;
+
+ if (!iwl_mld_valid_emlsr_pair(vif, a, b))
+ return 0;
+
+ primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
+
+ if (WARN_ON_ONCE(!primary_conf))
+ return 0;
+
+ primary_load = iwl_mld_get_chan_load(mld, primary_conf);
+
+ /* The more the primary link is loaded, the more worthwhile EMLSR becomes */
+ return a->grade + ((b->grade * primary_load) / SCALE_FACTOR);
+}
+
+static void _iwl_mld_select_links(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct iwl_mld_link_sel_data *best_link;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int max_active_links = iwl_mld_max_active_links(mld, vif);
+ u16 new_active, usable_links = ieee80211_vif_usable_links(vif);
+ u8 best_idx, new_primary, n_data;
+ u16 max_grade;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!mld_vif->authorized || hweight16(usable_links) <= 1)
+ return;
+
+ if (WARN(ktime_before(mld->scan.last_mlo_scan_time,
+ ktime_sub_ns(ktime_get_boottime_ns(),
+ 5ULL * NSEC_PER_SEC)),
+ "Last MLO scan was too long ago, can't select links\n"))
+ return;
+
+ /* The logic below is simple and not suited for more than 2 links */
+ WARN_ON_ONCE(max_active_links > 2);
+
+ n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links,
+ &best_idx);
+
+ if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
+ return;
+
+ /* Default to selecting the single best link */
+ best_link = &data[best_idx];
+ new_primary = best_link->link_id;
+ new_active = BIT(best_link->link_id);
+ max_grade = best_link->grade;
+
+ /* If EMLSR is not possible, activate the best link */
+ if (max_active_links == 1 || n_data == 1 ||
+ !iwl_mld_vif_has_emlsr_cap(vif) || !IWL_MLD_AUTO_EML_ENABLE ||
+ mld_vif->emlsr.blocked_reasons)
+ goto set_active;
+
+ /* Try to find the best link combination */
+ for (u8 a = 0; a < n_data; a++) {
+ for (u8 b = a + 1; b < n_data; b++) {
+ u8 best_in_pair;
+ u16 emlsr_grade =
+ iwl_mld_get_emlsr_grade(mld, vif,
+ &data[a], &data[b],
+ &best_in_pair);
+
+ /*
+ * Prefer (new) EMLSR combination to prefer EMLSR over
+ * a single link.
+ */
+ if (emlsr_grade < max_grade)
+ continue;
+
+ max_grade = emlsr_grade;
+ new_primary = best_in_pair;
+ new_active = BIT(data[a].link_id) |
+ BIT(data[b].link_id);
+ }
+ }
+
+set_active:
+ IWL_DEBUG_INFO(mld, "Link selection result: 0x%x. Primary = %d\n",
+ new_active, new_primary);
+
+ mld_vif->emlsr.selected_primary = new_primary;
+ mld_vif->emlsr.selected_links = new_active;
+
+ ieee80211_set_active_links_async(vif, new_active);
+}
+
+static void iwl_mld_vif_iter_select_links(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = mld_vif->mld;
+
+ _iwl_mld_select_links(mld, vif);
+}
+
+void iwl_mld_select_links(struct iwl_mld *mld)
+{
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_select_links,
+ NULL);
+}
+
+static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = mld_vif->mld;
+ struct ieee80211_bss_conf *link;
+ unsigned int link_id;
+
+ if (!mld->bt_is_active) {
+ iwl_mld_retry_emlsr(mld, vif);
+ return;
+ }
+
+ /* BT is turned ON but we are not in EMLSR, nothing to do */
+ if (!iwl_mld_emlsr_active(vif))
+ return;
+
+ /* In EMLSR and BT is turned ON */
+
+ for_each_vif_active_link(vif, link, link_id) {
+ if (WARN_ON(!link->chanreq.oper.chan))
+ continue;
+
+ if (link->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
+ iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BT_COEX,
+ iwl_mld_get_primary_link(vif));
+ return;
+ }
+ }
+}
+
+void iwl_mld_emlsr_check_bt(struct iwl_mld *mld)
+{
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_emlsr_check_bt_iter,
+ NULL);
+}
+
+struct iwl_mld_chan_load_data {
+ struct iwl_mld_phy *phy;
+ u32 prev_chan_load_not_by_us;
+};
+
+static void iwl_mld_chan_load_update_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_chan_load_data *data = _data;
+ const struct iwl_mld_phy *phy = data->phy;
+ struct ieee80211_chanctx_conf *chanctx =
+ container_of((const void *)phy, struct ieee80211_chanctx_conf,
+ drv_priv);
+ struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld;
+ struct ieee80211_bss_conf *prim_link;
+ unsigned int prim_link_id;
+
+ prim_link_id = iwl_mld_get_primary_link(vif);
+ prim_link = link_conf_dereference_protected(vif, prim_link_id);
+
+ if (WARN_ON(!prim_link))
+ return;
+
+ if (chanctx != rcu_access_pointer(prim_link->chanctx_conf))
+ return;
+
+ if (iwl_mld_emlsr_active(vif)) {
+ int chan_load = iwl_mld_get_chan_load_by_others(mld, prim_link,
+ true);
+
+ if (chan_load < 0)
+ return;
+
+ /* chan_load is in range [0,255] */
+ if (chan_load < NORMALIZE_PERCENT_TO_255(IWL_MLD_EXIT_EMLSR_CHAN_LOAD))
+ iwl_mld_exit_emlsr(mld, vif,
+ IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
+ prim_link_id);
+ } else {
+ u32 old_chan_load = data->prev_chan_load_not_by_us;
+ u32 new_chan_load = phy->avg_channel_load_not_by_us;
+ u32 min_thresh = iwl_mld_get_min_chan_load_thresh(chanctx);
+
+#define THRESHOLD_CROSSED(threshold) \
+ (old_chan_load <= (threshold) && new_chan_load > (threshold))
+
+ if (THRESHOLD_CROSSED(min_thresh) || THRESHOLD_CROSSED(25) ||
+ THRESHOLD_CROSSED(40) || THRESHOLD_CROSSED(50))
+ iwl_mld_retry_emlsr(mld, vif);
+#undef THRESHOLD_CROSSED
+ }
+}
+
+void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw,
+ struct iwl_mld_phy *phy,
+ u32 prev_chan_load_not_by_us)
+{
+ struct iwl_mld_chan_load_data data = {
+ .phy = phy,
+ .prev_chan_load_not_by_us = prev_chan_load_not_by_us,
+ };
+
+ ieee80211_iterate_active_interfaces_mtx(hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_chan_load_update_iter,
+ &data);
+}
+
+void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ if (!iwl_mld_vif_has_emlsr_cap(vif) || iwl_mld_emlsr_active(vif) ||
+ mld_vif->emlsr.blocked_reasons)
+ return;
+
+ iwl_mld_int_mlo_scan(mld, vif);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.h b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
new file mode 100644
index 000000000000..4fb1fdbe3df9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_mlo_h__
+#define __iwl_mld_mlo_h__
+
+#include <linux/ieee80211.h>
+#include <linux/types.h>
+#include <net/mac80211.h>
+#include "iwl-config.h"
+#include "iwl-trans.h"
+#include "iface.h"
+#include "phy.h"
+
+struct iwl_mld;
+
+void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk);
+void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk);
+
+static inline bool iwl_mld_emlsr_active(struct ieee80211_vif *vif)
+{
+ /* Set on phy context activation, so should be a good proxy */
+ return !!(vif->driver_flags & IEEE80211_VIF_EML_ACTIVE);
+}
+
+static inline bool iwl_mld_vif_has_emlsr_cap(struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ /* We only track/permit EMLSR state once authorized */
+ if (!mld_vif->authorized)
+ return false;
+
+ /* No EMLSR on dual radio devices */
+ return ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION &&
+ ieee80211_vif_is_mld(vif) &&
+ vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP &&
+ !CSR_HW_RFID_IS_CDB(mld_vif->mld->trans->hw_rf_id);
+}
+
+static inline int
+iwl_mld_max_active_links(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ if (vif->type == NL80211_IFTYPE_AP)
+ return mld->fw->ucode_capa.num_beacons;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
+ return IWL_FW_MAX_ACTIVE_LINKS_NUM;
+
+ /* For now, do not accept more links on other interface types */
+ return 1;
+}
+
+static inline int
+iwl_mld_count_active_links(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *mld_link;
+ int n_active = 0;
+
+ for_each_mld_vif_valid_link(mld_vif, mld_link) {
+ if (rcu_access_pointer(mld_link->chan_ctx))
+ n_active++;
+ }
+
+ return n_active;
+}
+
+static inline u8 iwl_mld_get_primary_link(struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ lockdep_assert_wiphy(mld_vif->mld->wiphy);
+
+ if (!ieee80211_vif_is_mld(vif) || WARN_ON(!vif->active_links))
+ return 0;
+
+ /* In AP mode, there is no primary link */
+ if (vif->type == NL80211_IFTYPE_AP)
+ return __ffs(vif->active_links);
+
+ if (iwl_mld_emlsr_active(vif) &&
+ !WARN_ON(!(BIT(mld_vif->emlsr.primary) & vif->active_links)))
+ return mld_vif->emlsr.primary;
+
+ return __ffs(vif->active_links);
+}
+
+/*
+ * For non-MLO/single link, this will return the deflink/single active link,
+ * respectively
+ */
+static inline u8 iwl_mld_get_other_link(struct ieee80211_vif *vif, u8 link_id)
+{
+ switch (hweight16(vif->active_links)) {
+ case 0:
+ return 0;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case 1:
+ return __ffs(vif->active_links);
+ case 2:
+ return __ffs(vif->active_links & ~BIT(link_id));
+ }
+}
+
+s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
+ const struct cfg80211_chan_def *chandef,
+ bool low);
+
+/* EMLSR block/unblock and exit */
+void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason, u8 link_to_keep);
+int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason, u8 link_to_keep);
+void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason);
+void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_exit exit, u8 link_to_keep);
+
+int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
+ int pending_link_changes);
+
+void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk);
+void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk);
+
+void iwl_mld_select_links(struct iwl_mld *mld);
+
+void iwl_mld_emlsr_check_bt(struct iwl_mld *mld);
+
+void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw,
+ struct iwl_mld_phy *phy,
+ u32 prev_chan_load_not_by_us);
+
+/**
+ * iwl_mld_retry_emlsr - Retry entering EMLSR
+ * @mld: MLD context
+ * @vif: VIF to retry EMLSR on
+ *
+ * Retry entering EMLSR on the given VIF.
+ * Use this if one of the parameters that can prevent EMLSR has changed.
+ */
+void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif);
+
+struct iwl_mld_link_sel_data {
+ u8 link_id;
+ const struct cfg80211_chan_def *chandef;
+ s32 signal;
+ u16 grade;
+};
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+bool iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ const struct iwl_mld_link_sel_data *a,
+ const struct iwl_mld_link_sel_data *b);
+#endif
+
+#endif /* __iwl_mld_mlo_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.c b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
new file mode 100644
index 000000000000..fc18cba8aaa8
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
@@ -0,0 +1,759 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "notif.h"
+#include "scan.h"
+#include "iface.h"
+#include "mlo.h"
+#include "iwl-trans.h"
+#include "fw/file.h"
+#include "fw/dbg.h"
+#include "fw/api/cmdhdr.h"
+#include "fw/api/mac-cfg.h"
+#include "session-protect.h"
+#include "fw/api/time-event.h"
+#include "fw/api/tx.h"
+#include "fw/api/rs.h"
+#include "fw/api/offload.h"
+#include "fw/api/stats.h"
+#include "fw/api/rfi.h"
+#include "fw/api/coex.h"
+
+#include "mcc.h"
+#include "link.h"
+#include "tx.h"
+#include "rx.h"
+#include "tlc.h"
+#include "agg.h"
+#include "mac80211.h"
+#include "thermal.h"
+#include "roc.h"
+#include "stats.h"
+#include "coex.h"
+#include "time_sync.h"
+#include "ftm-initiator.h"
+
+/* Please use this in an increasing order of the versions */
+#define CMD_VER_ENTRY(_ver, _struct) \
+ { .size = sizeof(struct _struct), .ver = _ver },
+#define CMD_VERSIONS(name, ...) \
+ static const struct iwl_notif_struct_size \
+ iwl_notif_struct_sizes_##name[] = { __VA_ARGS__ };
+
+#define RX_HANDLER_NO_OBJECT(_grp, _cmd, _name, _context) \
+ {.cmd_id = WIDE_ID(_grp, _cmd), \
+ .context = _context, \
+ .fn = iwl_mld_handle_##_name, \
+ .sizes = iwl_notif_struct_sizes_##_name, \
+ .n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name), \
+ },
+
+/* Use this for Rx handlers that do not need notification validation */
+#define RX_HANDLER_NO_VAL(_grp, _cmd, _name, _context) \
+ {.cmd_id = WIDE_ID(_grp, _cmd), \
+ .context = _context, \
+ .fn = iwl_mld_handle_##_name, \
+ },
+
+#define RX_HANDLER_VAL_FN(_grp, _cmd, _name, _context) \
+ { .cmd_id = WIDE_ID(_grp, _cmd), \
+ .context = _context, \
+ .fn = iwl_mld_handle_##_name, \
+ .val_fn = iwl_mld_validate_##_name, \
+ },
+
+#define DEFINE_SIMPLE_CANCELLATION(name, notif_struct, id_member) \
+static bool iwl_mld_cancel_##name##_notif(struct iwl_mld *mld, \
+ struct iwl_rx_packet *pkt, \
+ u32 obj_id) \
+{ \
+ const struct notif_struct *notif = (const void *)pkt->data; \
+ \
+ return obj_id == _Generic((notif)->id_member, \
+ __le32: le32_to_cpu((notif)->id_member), \
+ __le16: le16_to_cpu((notif)->id_member), \
+ u8: (notif)->id_member); \
+}
+
+static bool iwl_mld_always_cancel(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt,
+ u32 obj_id)
+{
+ return true;
+}
+
+/* Currently only defined for the RX_HANDLER_SIZES options. Use this for
+ * notifications that belong to a specific object, and that should be
+ * canceled when the object is removed
+ */
+#define RX_HANDLER_OF_OBJ(_grp, _cmd, _name, _obj_type) \
+ {.cmd_id = WIDE_ID(_grp, _cmd), \
+ /* Only async handlers can be canceled */ \
+ .context = RX_HANDLER_ASYNC, \
+ .fn = iwl_mld_handle_##_name, \
+ .sizes = iwl_notif_struct_sizes_##_name, \
+ .n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name), \
+ .obj_type = IWL_MLD_OBJECT_TYPE_##_obj_type, \
+ .cancel = iwl_mld_cancel_##_name, \
+ },
+
+#define RX_HANDLER_OF_LINK(_grp, _cmd, _name) \
+ RX_HANDLER_OF_OBJ(_grp, _cmd, _name, LINK) \
+
+#define RX_HANDLER_OF_VIF(_grp, _cmd, _name) \
+ RX_HANDLER_OF_OBJ(_grp, _cmd, _name, VIF) \
+
+#define RX_HANDLER_OF_STA(_grp, _cmd, _name) \
+ RX_HANDLER_OF_OBJ(_grp, _cmd, _name, STA) \
+
+#define RX_HANDLER_OF_ROC(_grp, _cmd, _name) \
+ RX_HANDLER_OF_OBJ(_grp, _cmd, _name, ROC)
+
+#define RX_HANDLER_OF_SCAN(_grp, _cmd, _name) \
+ RX_HANDLER_OF_OBJ(_grp, _cmd, _name, SCAN)
+
+#define RX_HANDLER_OF_FTM_REQ(_grp, _cmd, _name) \
+ RX_HANDLER_OF_OBJ(_grp, _cmd, _name, FTM_REQ)
+
+static void iwl_mld_handle_mfuart_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
+
+ IWL_DEBUG_INFO(mld,
+ "MFUART: installed ver: 0x%08x, external ver: 0x%08x\n",
+ le32_to_cpu(mfuart_notif->installed_ver),
+ le32_to_cpu(mfuart_notif->external_ver));
+ IWL_DEBUG_INFO(mld,
+ "MFUART: status: 0x%08x, duration: 0x%08x image size: 0x%08x\n",
+ le32_to_cpu(mfuart_notif->status),
+ le32_to_cpu(mfuart_notif->duration),
+ le32_to_cpu(mfuart_notif->image_size));
+}
+
+static void iwl_mld_mu_mimo_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ unsigned int link_id = 0;
+
+ if (WARN(hweight16(vif->active_links) > 1,
+ "no support for this notif while in EMLSR 0x%x\n",
+ vif->active_links))
+ return;
+
+ if (ieee80211_vif_is_mld(vif)) {
+ link_id = __ffs(vif->active_links);
+ bss_conf = link_conf_dereference_check(vif, link_id);
+ }
+
+ if (!WARN_ON(!bss_conf) && bss_conf->mu_mimo_owner) {
+ const struct iwl_mu_group_mgmt_notif *notif = _data;
+
+ BUILD_BUG_ON(sizeof(notif->membership_status) !=
+ WLAN_MEMBERSHIP_LEN);
+ BUILD_BUG_ON(sizeof(notif->user_position) !=
+ WLAN_USER_POSITION_LEN);
+
+ /* MU-MIMO Group Id action frame is little endian. We treat
+ * the data received from firmware as if it came from the
+ * action frame, so no conversion is needed.
+ */
+ ieee80211_update_mu_groups(vif, link_id,
+ (u8 *)&notif->membership_status,
+ (u8 *)&notif->user_position);
+ }
+}
+
+/* This handler is called in SYNC mode because it needs to be serialized with
+ * Rx as specified in ieee80211_update_mu_groups()'s documentation.
+ */
+static void iwl_mld_handle_mu_mimo_grp_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
+
+ ieee80211_iterate_active_interfaces_atomic(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_mu_mimo_iface_iterator,
+ notif);
+}
+
+static void
+iwl_mld_handle_stored_beacon_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
+ struct ieee80211_rx_status rx_status = {};
+ struct sk_buff *skb;
+ u32 size = le32_to_cpu(sb->common.byte_count);
+
+ if (size == 0)
+ return;
+
+ if (pkt_len < struct_size(sb, data, size))
+ return;
+
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mld, "alloc_skb failed\n");
+ return;
+ }
+
+ /* update rx_status according to the notification's metadata */
+ rx_status.mactime = le64_to_cpu(sb->common.tsf);
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
+ rx_status.device_timestamp = le32_to_cpu(sb->common.system_time);
+ rx_status.band =
+ iwl_mld_phy_band_to_nl80211(le16_to_cpu(sb->common.band));
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(sb->common.channel),
+ rx_status.band);
+
+ /* copy the data */
+ skb_put_data(skb, sb->data, size);
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+ /* pass it as regular rx to mac80211 */
+ ieee80211_rx_napi(mld->hw, NULL, skb, NULL);
+}
+
+static void
+iwl_mld_handle_channel_switch_start_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_channel_switch_start_notif *notif = (void *)pkt->data;
+ u32 link_id = le32_to_cpu(notif->link_id);
+ struct ieee80211_bss_conf *link_conf =
+ iwl_mld_fw_id_to_link_conf(mld, link_id);
+ struct ieee80211_vif *vif;
+
+ if (WARN_ON(!link_conf))
+ return;
+
+ vif = link_conf->vif;
+
+ IWL_DEBUG_INFO(mld,
+ "CSA Start Notification with vif type: %d, link_id: %d\n",
+ vif->type,
+ link_conf->link_id);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ /* We don't support canceling a CSA as it was advertised
+ * by the AP itself
+ */
+ if (!link_conf->csa_active)
+ return;
+
+ ieee80211_csa_finish(vif, link_conf->link_id);
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!link_conf->csa_active) {
+ /* Either unexpected cs notif or mac80211 chose to
+ * ignore, for example in channel switch to same channel
+ */
+ struct iwl_cancel_channel_switch_cmd cmd = {
+ .id = cpu_to_le32(link_id),
+ };
+
+ if (iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP,
+ CANCEL_CHANNEL_SWITCH_CMD),
+ &cmd))
+ IWL_ERR(mld,
+ "Failed to cancel the channel switch\n");
+ return;
+ }
+
+ ieee80211_chswitch_done(vif, true, link_conf->link_id);
+ break;
+
+ default:
+ WARN(1, "CSA on invalid vif type: %d", vif->type);
+ }
+}
+
+static void
+iwl_mld_handle_channel_switch_error_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_channel_switch_error_notif *notif = (void *)pkt->data;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_vif *vif;
+ u32 link_id = le32_to_cpu(notif->link_id);
+ u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask);
+
+ link_conf = iwl_mld_fw_id_to_link_conf(mld, link_id);
+ if (WARN_ON(!link_conf))
+ return;
+
+ vif = link_conf->vif;
+
+ IWL_DEBUG_INFO(mld, "FW reports CSA error: id=%u, csa_err_mask=%u\n",
+ link_id, csa_err_mask);
+
+ if (csa_err_mask & (CS_ERR_COUNT_ERROR |
+ CS_ERR_LONG_DELAY_AFTER_CS |
+ CS_ERR_TX_BLOCK_TIMER_EXPIRED))
+ ieee80211_channel_switch_disconnect(vif);
+}
+
+static void iwl_mld_handle_beacon_notification(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
+
+ mld->ibss_manager = !!beacon->ibss_mgr_status;
+}
+
+/**
+ * DOC: Notification versioning
+ *
+ * The firmware's notifications change from time to time. In order to
+ * differentiate between different versions of the same notification, the
+ * firmware advertises the version of each notification.
+ * Here are listed all the notifications that are supported. Several versions
+ * of the same notification can be allowed at the same time:
+ *
+ * CMD_VERSION(my_multi_version_notif,
+ * CMD_VER_ENTRY(1, iwl_my_multi_version_notif_ver1)
+ * CMD_VER_ENTRY(2, iwl_my_multi_version_notif_ver2)
+ *
+ * etc...
+ *
+ * The driver will enforce that the notification coming from the firmware
+ * has its version listed here and it'll also enforce that the firmware sent
+ * at least enough bytes to cover the structure listed in the CMD_VER_ENTRY.
+ */
+
+CMD_VERSIONS(scan_complete_notif,
+ CMD_VER_ENTRY(1, iwl_umac_scan_complete))
+CMD_VERSIONS(scan_iter_complete_notif,
+ CMD_VER_ENTRY(2, iwl_umac_scan_iter_complete_notif))
+CMD_VERSIONS(mfuart_notif,
+ CMD_VER_ENTRY(2, iwl_mfuart_load_notif))
+CMD_VERSIONS(update_mcc,
+ CMD_VER_ENTRY(1, iwl_mcc_chub_notif))
+CMD_VERSIONS(session_prot_notif,
+ CMD_VER_ENTRY(3, iwl_session_prot_notif))
+CMD_VERSIONS(missed_beacon_notif,
+ CMD_VER_ENTRY(5, iwl_missed_beacons_notif))
+CMD_VERSIONS(tx_resp_notif,
+ CMD_VER_ENTRY(8, iwl_tx_resp))
+CMD_VERSIONS(compressed_ba_notif,
+ CMD_VER_ENTRY(5, iwl_compressed_ba_notif)
+ CMD_VER_ENTRY(6, iwl_compressed_ba_notif))
+CMD_VERSIONS(tlc_notif,
+ CMD_VER_ENTRY(3, iwl_tlc_update_notif))
+CMD_VERSIONS(mu_mimo_grp_notif,
+ CMD_VER_ENTRY(1, iwl_mu_group_mgmt_notif))
+CMD_VERSIONS(channel_switch_start_notif,
+ CMD_VER_ENTRY(3, iwl_channel_switch_start_notif))
+CMD_VERSIONS(channel_switch_error_notif,
+ CMD_VER_ENTRY(2, iwl_channel_switch_error_notif))
+CMD_VERSIONS(ct_kill_notif,
+ CMD_VER_ENTRY(2, ct_kill_notif))
+CMD_VERSIONS(temp_notif,
+ CMD_VER_ENTRY(2, iwl_dts_measurement_notif))
+CMD_VERSIONS(stored_beacon_notif,
+ CMD_VER_ENTRY(4, iwl_stored_beacon_notif))
+CMD_VERSIONS(roc_notif,
+ CMD_VER_ENTRY(1, iwl_roc_notif))
+CMD_VERSIONS(probe_resp_data_notif,
+ CMD_VER_ENTRY(1, iwl_probe_resp_data_notif))
+CMD_VERSIONS(datapath_monitor_notif,
+ CMD_VER_ENTRY(1, iwl_datapath_monitor_notif))
+CMD_VERSIONS(stats_oper_notif,
+ CMD_VER_ENTRY(3, iwl_system_statistics_notif_oper))
+CMD_VERSIONS(stats_oper_part1_notif,
+ CMD_VER_ENTRY(4, iwl_system_statistics_part1_notif_oper))
+CMD_VERSIONS(bt_coex_notif,
+ CMD_VER_ENTRY(1, iwl_bt_coex_profile_notif))
+CMD_VERSIONS(beacon_notification,
+ CMD_VER_ENTRY(6, iwl_extended_beacon_notif))
+CMD_VERSIONS(emlsr_mode_notif,
+ CMD_VER_ENTRY(1, iwl_esr_mode_notif))
+CMD_VERSIONS(emlsr_trans_fail_notif,
+ CMD_VER_ENTRY(1, iwl_esr_trans_fail_notif))
+CMD_VERSIONS(uapsd_misbehaving_ap_notif,
+ CMD_VER_ENTRY(1, iwl_uapsd_misbehaving_ap_notif))
+CMD_VERSIONS(time_msmt_notif,
+ CMD_VER_ENTRY(1, iwl_time_msmt_notify))
+CMD_VERSIONS(time_sync_confirm_notif,
+ CMD_VER_ENTRY(1, iwl_time_msmt_cfm_notify))
+CMD_VERSIONS(omi_status_notif,
+ CMD_VER_ENTRY(1, iwl_omi_send_status_notif))
+CMD_VERSIONS(ftm_resp_notif, CMD_VER_ENTRY(9, iwl_tof_range_rsp_ntfy))
+
+DEFINE_SIMPLE_CANCELLATION(session_prot, iwl_session_prot_notif, mac_link_id)
+DEFINE_SIMPLE_CANCELLATION(tlc, iwl_tlc_update_notif, sta_id)
+DEFINE_SIMPLE_CANCELLATION(channel_switch_start,
+ iwl_channel_switch_start_notif, link_id)
+DEFINE_SIMPLE_CANCELLATION(channel_switch_error,
+ iwl_channel_switch_error_notif, link_id)
+DEFINE_SIMPLE_CANCELLATION(datapath_monitor, iwl_datapath_monitor_notif,
+ link_id)
+DEFINE_SIMPLE_CANCELLATION(roc, iwl_roc_notif, activity)
+DEFINE_SIMPLE_CANCELLATION(scan_complete, iwl_umac_scan_complete, uid)
+DEFINE_SIMPLE_CANCELLATION(probe_resp_data, iwl_probe_resp_data_notif,
+ mac_id)
+DEFINE_SIMPLE_CANCELLATION(uapsd_misbehaving_ap, iwl_uapsd_misbehaving_ap_notif,
+ mac_id)
+#define iwl_mld_cancel_omi_status_notif iwl_mld_always_cancel
+DEFINE_SIMPLE_CANCELLATION(ftm_resp, iwl_tof_range_rsp_ntfy, request_id)
+
+/**
+ * DOC: Handlers for fw notifications
+ *
+ * Here are listed the notifications IDs (including the group ID), the handler
+ * of the notification and how it should be called:
+ *
+ * - RX_HANDLER_SYNC: will be called as part of the Rx path
+ * - RX_HANDLER_ASYNC: will be handled in a working with the wiphy_lock held
+ *
+ * This means that if the firmware sends two notifications A and B in that
+ * order and notification A is RX_HANDLER_ASYNC and notification is
+ * RX_HANDLER_SYNC, the handler of B will likely be called before the handler
+ * of A.
+ *
+ * This list should be in order of frequency for performance purposes.
+ * The handler can be one from two contexts, see &iwl_rx_handler_context
+ *
+ * A handler can declare that it relies on a specific object in which case it
+ * can be cancelled in case the object is deleted. In order to use this
+ * mechanism, a cancellation function is needed. The cancellation function must
+ * receive an object id (the index of that object in the firmware) and a
+ * notification payload. It'll return true if that specific notification should
+ * be cancelled upon the obliteration of the specific instance of the object.
+ *
+ * DEFINE_SIMPLE_CANCELLATION allows to easily create a cancellation function
+ * that wills simply return true if a given object id matches the object id in
+ * the firmware notification.
+ */
+
+VISIBLE_IF_IWLWIFI_KUNIT
+const struct iwl_rx_handler iwl_mld_rx_handlers[] = {
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP, TX_CMD, tx_resp_notif,
+ RX_HANDLER_SYNC)
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BA_NOTIF, compressed_ba_notif,
+ RX_HANDLER_SYNC)
+ RX_HANDLER_OF_SCAN(LEGACY_GROUP, SCAN_COMPLETE_UMAC,
+ scan_complete_notif)
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP, SCAN_ITERATION_COMPLETE_UMAC,
+ scan_iter_complete_notif,
+ RX_HANDLER_SYNC)
+ RX_HANDLER_NO_VAL(LEGACY_GROUP, MATCH_FOUND_NOTIFICATION,
+ match_found_notif, RX_HANDLER_SYNC)
+
+ RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
+ stats_oper_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
+ stats_oper_part1_notif, RX_HANDLER_ASYNC)
+
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MFUART_LOAD_NOTIFICATION,
+ mfuart_notif, RX_HANDLER_SYNC)
+
+ RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
+ temp_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_OF_LINK(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
+ session_prot_notif)
+ RX_HANDLER_OF_LINK(MAC_CONF_GROUP, MISSED_BEACONS_NOTIF,
+ missed_beacon_notif)
+ RX_HANDLER_OF_STA(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, tlc_notif)
+ RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
+ channel_switch_start_notif)
+ RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
+ channel_switch_error_notif)
+ RX_HANDLER_OF_ROC(MAC_CONF_GROUP, ROC_NOTIF, roc_notif)
+ RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
+ mu_mimo_grp_notif, RX_HANDLER_SYNC)
+ RX_HANDLER_NO_OBJECT(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
+ stored_beacon_notif, RX_HANDLER_SYNC)
+ RX_HANDLER_OF_VIF(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
+ probe_resp_data_notif)
+ RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
+ ct_kill_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_OF_LINK(DATA_PATH_GROUP, MONITOR_NOTIF,
+ datapath_monitor_notif)
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MCC_CHUB_UPDATE_CMD, update_mcc,
+ RX_HANDLER_ASYNC)
+ RX_HANDLER_NO_OBJECT(BT_COEX_GROUP, PROFILE_NOTIF,
+ bt_coex_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BEACON_NOTIFICATION,
+ beacon_notification, RX_HANDLER_ASYNC)
+ RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, ESR_MODE_NOTIF,
+ emlsr_mode_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_NO_OBJECT(MAC_CONF_GROUP, EMLSR_TRANS_FAIL_NOTIF,
+ emlsr_trans_fail_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_OF_VIF(LEGACY_GROUP, PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
+ uapsd_misbehaving_ap_notif)
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP,
+ WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION,
+ time_msmt_notif, RX_HANDLER_SYNC)
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP,
+ WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION,
+ time_sync_confirm_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_OF_LINK(DATA_PATH_GROUP, OMI_SEND_STATUS_NOTIF,
+ omi_status_notif)
+ RX_HANDLER_OF_FTM_REQ(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
+ ftm_resp_notif)
+};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers);
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int iwl_mld_rx_handlers_num = ARRAY_SIZE(iwl_mld_rx_handlers);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers_num);
+#endif
+
+static bool
+iwl_mld_notif_is_valid(struct iwl_mld *mld, struct iwl_rx_packet *pkt,
+ const struct iwl_rx_handler *handler)
+{
+ unsigned int size = iwl_rx_packet_payload_len(pkt);
+ size_t notif_ver;
+
+ /* If n_sizes == 0, it indicates that a validation function may be used
+ * or that no validation is required.
+ */
+ if (!handler->n_sizes) {
+ if (handler->val_fn)
+ return handler->val_fn(mld, pkt);
+ return true;
+ }
+
+ notif_ver = iwl_fw_lookup_notif_ver(mld->fw,
+ iwl_cmd_groupid(handler->cmd_id),
+ iwl_cmd_opcode(handler->cmd_id),
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ for (int i = 0; i < handler->n_sizes; i++) {
+ if (handler->sizes[i].ver != notif_ver)
+ continue;
+
+ if (IWL_FW_CHECK(mld, size < handler->sizes[i].size,
+ "unexpected notification 0x%04x size %d, need %d\n",
+ handler->cmd_id, size, handler->sizes[i].size))
+ return false;
+ return true;
+ }
+
+ IWL_FW_CHECK_FAILED(mld,
+ "notif 0x%04x ver %zu missing expected size, use version %u size\n",
+ handler->cmd_id, notif_ver,
+ handler->sizes[handler->n_sizes - 1].ver);
+
+ return size < handler->sizes[handler->n_sizes - 1].size;
+}
+
+struct iwl_async_handler_entry {
+ struct list_head list;
+ struct iwl_rx_cmd_buffer rxb;
+ const struct iwl_rx_handler *rx_h;
+};
+
+static void
+iwl_mld_log_async_handler_op(struct iwl_mld *mld, const char *op,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ IWL_DEBUG_HC(mld,
+ "%s async handler for notif %s (%.2x.%2x, seq 0x%x)\n",
+ op, iwl_get_cmd_string(mld->trans,
+ WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
+ pkt->hdr.group_id, pkt->hdr.cmd,
+ le16_to_cpu(pkt->hdr.sequence));
+}
+
+static void iwl_mld_rx_notif(struct iwl_mld *mld,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_rx_packet *pkt)
+{
+ for (int i = 0; i < ARRAY_SIZE(iwl_mld_rx_handlers); i++) {
+ const struct iwl_rx_handler *rx_h = &iwl_mld_rx_handlers[i];
+ struct iwl_async_handler_entry *entry;
+
+ if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
+ continue;
+
+ if (!iwl_mld_notif_is_valid(mld, pkt, rx_h))
+ return;
+
+ if (rx_h->context == RX_HANDLER_SYNC) {
+ rx_h->fn(mld, pkt);
+ break;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ /* we can't do much... */
+ if (!entry)
+ return;
+
+ /* Set the async handler entry */
+ entry->rxb._page = rxb_steal_page(rxb);
+ entry->rxb._offset = rxb->_offset;
+ entry->rxb._rx_page_order = rxb->_rx_page_order;
+
+ entry->rx_h = rx_h;
+
+ /* Add it to the list and queue the work */
+ spin_lock(&mld->async_handlers_lock);
+ list_add_tail(&entry->list, &mld->async_handlers_list);
+ spin_unlock(&mld->async_handlers_lock);
+
+ wiphy_work_queue(mld->hw->wiphy,
+ &mld->async_handlers_wk);
+
+ iwl_mld_log_async_handler_op(mld, "Queued", rxb);
+ break;
+ }
+
+ iwl_notification_wait_notify(&mld->notif_wait, pkt);
+}
+
+void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+ if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+ iwl_mld_rx_mpdu(mld, napi, rxb, 0);
+ else if (cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
+ iwl_mld_handle_frame_release_notif(mld, napi, pkt, 0);
+ else if (cmd_id == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE))
+ iwl_mld_handle_bar_frame_release_notif(mld, napi, pkt, 0);
+ else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
+ iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, 0);
+ else if (cmd_id == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
+ iwl_mld_rx_monitor_no_data(mld, napi, pkt, 0);
+ else
+ iwl_mld_rx_notif(mld, rxb, pkt);
+}
+
+void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, unsigned int queue)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+ if (unlikely(queue >= mld->trans->num_rx_queues))
+ return;
+
+ if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+ iwl_mld_rx_mpdu(mld, napi, rxb, queue);
+ else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
+ iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, queue);
+ else if (unlikely(cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
+ iwl_mld_handle_frame_release_notif(mld, napi, pkt, queue);
+}
+
+void iwl_mld_delete_handlers(struct iwl_mld *mld, const u16 *cmds, int n_cmds)
+{
+ struct iwl_async_handler_entry *entry, *tmp;
+
+ spin_lock_bh(&mld->async_handlers_lock);
+ list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
+ bool match = false;
+
+ for (int i = 0; i < n_cmds; i++) {
+ if (entry->rx_h->cmd_id == cmds[i]) {
+ match = true;
+ break;
+ }
+ }
+
+ if (!match)
+ continue;
+
+ iwl_mld_log_async_handler_op(mld, "Delete", &entry->rxb);
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&mld->async_handlers_lock);
+}
+
+void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld *mld =
+ container_of(wk, struct iwl_mld, async_handlers_wk);
+ struct iwl_async_handler_entry *entry, *tmp;
+ LIST_HEAD(local_list);
+
+ /* Sync with Rx path with a lock. Remove all the entries from this
+ * list, add them to a local one (lock free), and then handle them.
+ */
+ spin_lock_bh(&mld->async_handlers_lock);
+ list_splice_init(&mld->async_handlers_list, &local_list);
+ spin_unlock_bh(&mld->async_handlers_lock);
+
+ list_for_each_entry_safe(entry, tmp, &local_list, list) {
+ iwl_mld_log_async_handler_op(mld, "Handle", &entry->rxb);
+ entry->rx_h->fn(mld, rxb_addr(&entry->rxb));
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld)
+{
+ struct iwl_async_handler_entry *entry, *tmp;
+
+ spin_lock_bh(&mld->async_handlers_lock);
+ list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
+ iwl_mld_log_async_handler_op(mld, "Purged", &entry->rxb);
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&mld->async_handlers_lock);
+}
+
+void iwl_mld_cancel_notifications_of_object(struct iwl_mld *mld,
+ enum iwl_mld_object_type obj_type,
+ u32 obj_id)
+{
+ struct iwl_async_handler_entry *entry, *tmp;
+ LIST_HEAD(cancel_list);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(obj_type == IWL_MLD_OBJECT_TYPE_NONE))
+ return;
+
+ /* Sync with RX path and remove matching entries from the async list */
+ spin_lock_bh(&mld->async_handlers_lock);
+ list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
+ const struct iwl_rx_handler *rx_h = entry->rx_h;
+
+ if (rx_h->obj_type != obj_type || WARN_ON(!rx_h->cancel))
+ continue;
+
+ if (rx_h->cancel(mld, rxb_addr(&entry->rxb), obj_id)) {
+ iwl_mld_log_async_handler_op(mld, "Cancel", &entry->rxb);
+ list_del(&entry->list);
+ list_add_tail(&entry->list, &cancel_list);
+ }
+ }
+
+ spin_unlock_bh(&mld->async_handlers_lock);
+
+ /* Free the matching entries outside of the spinlock */
+ list_for_each_entry_safe(entry, tmp, &cancel_list, list) {
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.h b/drivers/net/wireless/intel/iwlwifi/mld/notif.h
new file mode 100644
index 000000000000..2eaa1d4e138e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_notif_h__
+#define __iwl_mld_notif_h__
+
+struct iwl_mld;
+
+void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb);
+
+void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
+
+void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk);
+
+void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld);
+
+enum iwl_mld_object_type {
+ IWL_MLD_OBJECT_TYPE_NONE,
+ IWL_MLD_OBJECT_TYPE_LINK,
+ IWL_MLD_OBJECT_TYPE_STA,
+ IWL_MLD_OBJECT_TYPE_VIF,
+ IWL_MLD_OBJECT_TYPE_ROC,
+ IWL_MLD_OBJECT_TYPE_SCAN,
+ IWL_MLD_OBJECT_TYPE_FTM_REQ,
+};
+
+void iwl_mld_cancel_notifications_of_object(struct iwl_mld *mld,
+ enum iwl_mld_object_type obj_type,
+ u32 obj_id);
+void iwl_mld_delete_handlers(struct iwl_mld *mld, const u16 *cmds, int n_cmds);
+
+#endif /* __iwl_mld_notif_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/phy.c b/drivers/net/wireless/intel/iwlwifi/mld/phy.c
new file mode 100644
index 000000000000..2fbc8090088b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/phy.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <net/mac80211.h>
+
+#include "phy.h"
+#include "hcmd.h"
+#include "fw/api/phy-ctxt.h"
+
+int iwl_mld_allocate_fw_phy_id(struct iwl_mld *mld)
+{
+ int id;
+ unsigned long used = mld->used_phy_ids;
+
+ for_each_clear_bit(id, &used, NUM_PHY_CTX) {
+ mld->used_phy_ids |= BIT(id);
+ return id;
+ }
+
+ return -ENOSPC;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_allocate_fw_phy_id);
+
+struct iwl_mld_chanctx_usage_data {
+ struct iwl_mld *mld;
+ struct ieee80211_chanctx_conf *ctx;
+ bool use_def;
+};
+
+static bool iwl_mld_chanctx_fils_enabled(struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ if (vif->type != NL80211_IFTYPE_AP)
+ return false;
+
+ return cfg80211_channel_is_psc(ctx->def.chan) ||
+ (ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ ctx->def.width >= NL80211_CHAN_WIDTH_80);
+}
+
+static void iwl_mld_chanctx_usage_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_chanctx_usage_data *data = _data;
+ struct ieee80211_bss_conf *link_conf;
+ int link_id;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ if (rcu_access_pointer(link_conf->chanctx_conf) != data->ctx)
+ continue;
+
+ if (iwl_mld_chanctx_fils_enabled(vif, data->ctx))
+ data->use_def = true;
+ }
+}
+
+struct cfg80211_chan_def *
+iwl_mld_get_chandef_from_chanctx(struct iwl_mld *mld,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mld_chanctx_usage_data data = {
+ .mld = mld,
+ .ctx = ctx,
+ };
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_chanctx_usage_iter,
+ &data);
+
+ return data.use_def ? &ctx->def : &ctx->min_def;
+}
+
+static u8
+iwl_mld_nl80211_width_to_fw(enum nl80211_chan_width width)
+{
+ switch (width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return IWL_PHY_CHANNEL_MODE20;
+ case NL80211_CHAN_WIDTH_40:
+ return IWL_PHY_CHANNEL_MODE40;
+ case NL80211_CHAN_WIDTH_80:
+ return IWL_PHY_CHANNEL_MODE80;
+ case NL80211_CHAN_WIDTH_160:
+ return IWL_PHY_CHANNEL_MODE160;
+ case NL80211_CHAN_WIDTH_320:
+ return IWL_PHY_CHANNEL_MODE320;
+ default:
+ WARN(1, "Invalid channel width=%u", width);
+ return IWL_PHY_CHANNEL_MODE20;
+ }
+}
+
+/* Maps the driver specific control channel position (relative to the center
+ * freq) definitions to the fw values
+ */
+u8 iwl_mld_get_fw_ctrl_pos(const struct cfg80211_chan_def *chandef)
+{
+ int offs = chandef->chan->center_freq - chandef->center_freq1;
+ int abs_offs = abs(offs);
+ u8 ret;
+
+ if (offs == 0) {
+ /* The FW is expected to check the control channel position only
+ * when in HT/VHT and the channel width is not 20MHz. Return
+ * this value as the default one.
+ */
+ return 0;
+ }
+
+ /* this results in a value 0-7, i.e. fitting into 0b0111 */
+ ret = (abs_offs - 10) / 20;
+ /* But we need the value to be in 0b1011 because 0b0100 is
+ * IWL_PHY_CTRL_POS_ABOVE, so shift bit 2 up to land in
+ * IWL_PHY_CTRL_POS_OFFS_EXT (0b1000)
+ */
+ ret = (ret & IWL_PHY_CTRL_POS_OFFS_MSK) |
+ ((ret & BIT(2)) << 1);
+ /* and add the above bit */
+ ret |= (offs > 0) * IWL_PHY_CTRL_POS_ABOVE;
+
+ return ret;
+}
+
+int iwl_mld_phy_fw_action(struct iwl_mld *mld,
+ struct ieee80211_chanctx_conf *ctx, u32 action)
+{
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+ struct cfg80211_chan_def *chandef = &phy->chandef;
+ struct iwl_phy_context_cmd cmd = {
+ .id_and_color = cpu_to_le32(phy->fw_id),
+ .action = cpu_to_le32(action),
+ .puncture_mask = cpu_to_le16(chandef->punctured),
+ /* Channel info */
+ .ci.channel = cpu_to_le32(chandef->chan->hw_value),
+ .ci.band = iwl_mld_nl80211_band_to_fw(chandef->chan->band),
+ .ci.width = iwl_mld_nl80211_width_to_fw(chandef->width),
+ .ci.ctrl_pos = iwl_mld_get_fw_ctrl_pos(chandef),
+ };
+ int ret;
+
+ if (ctx->ap.chan) {
+ cmd.sbb_bandwidth =
+ iwl_mld_nl80211_width_to_fw(ctx->ap.width);
+ cmd.sbb_ctrl_channel_loc = iwl_mld_get_fw_ctrl_pos(&ctx->ap);
+ }
+
+ ret = iwl_mld_send_cmd_pdu(mld, PHY_CONTEXT_CMD, &cmd);
+ if (ret)
+ IWL_ERR(mld, "Failed to send PHY_CONTEXT_CMD ret = %d\n", ret);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/phy.h b/drivers/net/wireless/intel/iwlwifi/mld/phy.h
new file mode 100644
index 000000000000..2212a89321b7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/phy.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_phy_h__
+#define __iwl_mld_phy_h__
+
+#include "mld.h"
+
+/**
+ * struct iwl_mld_phy - PHY configuration parameters
+ *
+ * @fw_id: fw id of the phy.
+ * @chandef: the last chandef that mac80211 configured the driver
+ * with. Used to detect a no-op when the chanctx changes.
+ * @channel_load_by_us: channel load on this channel caused by
+ * the NIC itself, as indicated by firmware
+ * @avg_channel_load_not_by_us: averaged channel load on this channel caused by
+ * others. This value is invalid when in EMLSR (due to FW limitations)
+ * @mld: pointer to the MLD context
+ */
+struct iwl_mld_phy {
+ /* Add here fields that need clean up on hw restart */
+ struct_group(zeroed_on_hw_restart,
+ u8 fw_id;
+ struct cfg80211_chan_def chandef;
+ );
+ /* And here fields that survive a hw restart */
+ u32 channel_load_by_us;
+ u32 avg_channel_load_not_by_us;
+ struct iwl_mld *mld;
+};
+
+static inline struct iwl_mld_phy *
+iwl_mld_phy_from_mac80211(struct ieee80211_chanctx_conf *channel)
+{
+ return (void *)channel->drv_priv;
+}
+
+/* Cleanup function for struct iwl_mld_phy, will be called in restart */
+static inline void
+iwl_mld_cleanup_phy(struct iwl_mld *mld, struct iwl_mld_phy *phy)
+{
+ CLEANUP_STRUCT(phy);
+}
+
+int iwl_mld_allocate_fw_phy_id(struct iwl_mld *mld);
+int iwl_mld_phy_fw_action(struct iwl_mld *mld,
+ struct ieee80211_chanctx_conf *ctx, u32 action);
+struct cfg80211_chan_def *
+iwl_mld_get_chandef_from_chanctx(struct iwl_mld *mld,
+ struct ieee80211_chanctx_conf *ctx);
+u8 iwl_mld_get_fw_ctrl_pos(const struct cfg80211_chan_def *chandef);
+
+#endif /* __iwl_mld_phy_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/power.c b/drivers/net/wireless/intel/iwlwifi/mld/power.c
new file mode 100644
index 000000000000..2f16c174b57e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/power.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <net/mac80211.h>
+
+#include "mld.h"
+#include "hcmd.h"
+#include "power.h"
+#include "iface.h"
+#include "link.h"
+#include "constants.h"
+
+static void iwl_mld_vif_ps_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ bool *ps_enable = (bool *)data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ *ps_enable &= !mld_vif->ps_disabled;
+}
+
+int iwl_mld_update_device_power(struct iwl_mld *mld, bool d3)
+{
+ struct iwl_device_power_cmd cmd = {};
+ bool enable_ps = false;
+
+ if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) {
+ enable_ps = true;
+
+ /* Disable power save if any STA interface has
+ * power save turned off
+ */
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_ps_iterator,
+ &enable_ps);
+ }
+
+ if (enable_ps)
+ cmd.flags |=
+ cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+ if (d3)
+ cmd.flags |=
+ cpu_to_le16(DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK);
+
+ IWL_DEBUG_POWER(mld,
+ "Sending device power command with flags = 0x%X\n",
+ cmd.flags);
+
+ return iwl_mld_send_cmd_pdu(mld, POWER_TABLE_CMD, &cmd);
+}
+
+int iwl_mld_enable_beacon_filter(struct iwl_mld *mld,
+ const struct ieee80211_bss_conf *link_conf,
+ bool d3)
+{
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ .ba_enable_beacon_abort = cpu_to_le32(1),
+ };
+
+ if (ieee80211_vif_type_p2p(link_conf->vif) != NL80211_IFTYPE_STATION)
+ return 0;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (iwl_mld_vif_from_mac80211(link_conf->vif)->disable_bf)
+ return 0;
+#endif
+
+ if (link_conf->cqm_rssi_thold) {
+ cmd.bf_energy_delta =
+ cpu_to_le32(link_conf->cqm_rssi_hyst);
+ /* fw uses an absolute value for this */
+ cmd.bf_roaming_state =
+ cpu_to_le32(-link_conf->cqm_rssi_thold);
+ }
+
+ if (d3)
+ cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
+
+ return iwl_mld_send_cmd_pdu(mld, REPLY_BEACON_FILTERING_CMD,
+ &cmd);
+}
+
+int iwl_mld_disable_beacon_filter(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_beacon_filter_cmd cmd = {};
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+ return 0;
+
+ return iwl_mld_send_cmd_pdu(mld, REPLY_BEACON_FILTERING_CMD,
+ &cmd);
+}
+
+static bool iwl_mld_power_is_radar(struct iwl_mld *mld,
+ const struct ieee80211_bss_conf *link_conf)
+{
+ const struct ieee80211_chanctx_conf *chanctx_conf;
+
+ chanctx_conf = wiphy_dereference(mld->wiphy, link_conf->chanctx_conf);
+
+ if (WARN_ON(!chanctx_conf))
+ return false;
+
+ return chanctx_conf->def.chan->flags & IEEE80211_CHAN_RADAR;
+}
+
+static void iwl_mld_power_configure_uapsd(struct iwl_mld *mld,
+ struct iwl_mld_link *link,
+ struct iwl_mac_power_cmd *cmd,
+ bool ps_poll)
+{
+ bool tid_found = false;
+
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MLD_UAPSD_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MLD_UAPSD_TX_DATA_TIMEOUT);
+
+ /* set advanced pm flag with no uapsd ACs to enable ps-poll */
+ if (ps_poll) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+ return;
+ }
+
+ for (enum ieee80211_ac_numbers ac = IEEE80211_AC_VO;
+ ac <= IEEE80211_AC_BK;
+ ac++) {
+ if (!link->queue_params[ac].uapsd)
+ continue;
+
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK |
+ POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
+
+ cmd->uapsd_ac_flags |= BIT(ac);
+
+ /* QNDP TID - the highest TID with no admission control */
+ if (!tid_found && !link->queue_params[ac].acm) {
+ tid_found = true;
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ cmd->qndp_tid = 6;
+ break;
+ case IEEE80211_AC_VI:
+ cmd->qndp_tid = 5;
+ break;
+ case IEEE80211_AC_BE:
+ cmd->qndp_tid = 0;
+ break;
+ case IEEE80211_AC_BK:
+ cmd->qndp_tid = 1;
+ break;
+ }
+ }
+ }
+
+ if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
+ BIT(IEEE80211_AC_VI) |
+ BIT(IEEE80211_AC_BE) |
+ BIT(IEEE80211_AC_BK))) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+ cmd->snooze_interval = cpu_to_le16(IWL_MLD_PS_SNOOZE_INTERVAL);
+ cmd->snooze_window = cpu_to_le16(IWL_MLD_PS_SNOOZE_WINDOW);
+ }
+
+ cmd->uapsd_max_sp = mld->hw->uapsd_max_sp_len;
+}
+
+static void
+iwl_mld_power_config_skip_dtim(struct iwl_mld *mld,
+ const struct ieee80211_bss_conf *link_conf,
+ struct iwl_mac_power_cmd *cmd)
+{
+ unsigned int dtimper_tu;
+ unsigned int dtimper;
+ unsigned int skip;
+
+ dtimper = link_conf->dtim_period ?: 1;
+ dtimper_tu = dtimper * link_conf->beacon_int;
+
+ if (dtimper >= 10 || iwl_mld_power_is_radar(mld, link_conf))
+ return;
+
+ if (WARN_ON(!dtimper_tu))
+ return;
+
+ /* configure skip over dtim up to 900 TU DTIM interval */
+ skip = max_t(int, 1, 900 / dtimper_tu);
+
+ cmd->skip_dtim_periods = skip;
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+}
+
+#define POWER_KEEP_ALIVE_PERIOD_SEC 25
+static void iwl_mld_power_build_cmd(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_power_cmd *cmd,
+ bool d3)
+{
+ int dtimper, bi;
+ int keep_alive;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_bss_conf *link_conf = &vif->bss_conf;
+ struct iwl_mld_link *link = &mld_vif->deflink;
+ bool ps_poll = false;
+
+ cmd->id_and_color = cpu_to_le32(mld_vif->fw_id);
+
+ if (ieee80211_vif_is_mld(vif)) {
+ int link_id;
+
+ if (WARN_ON(!vif->active_links))
+ return;
+
+ /* The firmware consumes one single configuration for the vif
+ * and can't differentiate between links, just pick the lowest
+ * link_id's configuration and use that.
+ */
+ link_id = __ffs(vif->active_links);
+ link_conf = link_conf_dereference_check(vif, link_id);
+ link = iwl_mld_link_dereference_check(mld_vif, link_id);
+
+ if (WARN_ON(!link_conf || !link))
+ return;
+ }
+ dtimper = link_conf->dtim_period;
+ bi = link_conf->beacon_int;
+
+ /* Regardless of power management state the driver must set
+ * keep alive period. FW will use it for sending keep alive NDPs
+ * immediately after association. Check that keep alive period
+ * is at least 3 * DTIM
+ */
+ keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi),
+ USEC_PER_SEC);
+ keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
+ cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
+
+ if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+ if (!vif->cfg.ps || iwl_mld_tdls_sta_count(mld) > 0)
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+ /* firmware supports LPRX for beacons at rate 1 Mbps or 6 Mbps only */
+ if (link_conf->beacon_rate &&
+ (link_conf->beacon_rate->bitrate == 10 ||
+ link_conf->beacon_rate->bitrate == 60)) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+ cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
+ }
+
+ if (d3) {
+ iwl_mld_power_config_skip_dtim(mld, link_conf, cmd);
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MLD_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MLD_WOWLAN_PS_TX_DATA_TIMEOUT);
+ } else if (iwl_mld_vif_low_latency(mld_vif) && vif->p2p) {
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MLD_SHORT_PS_TX_DATA_TIMEOUT);
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MLD_SHORT_PS_RX_DATA_TIMEOUT);
+ } else {
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MLD_DEFAULT_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MLD_DEFAULT_PS_TX_DATA_TIMEOUT);
+ }
+
+ /* uAPSD is only enabled for specific certifications. For those cases,
+ * mac80211 will allow uAPSD. Always call iwl_mld_power_configure_uapsd
+ * which will look at what mac80211 is saying.
+ */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ ps_poll = mld_vif->use_ps_poll;
+#endif
+ iwl_mld_power_configure_uapsd(mld, link, cmd, ps_poll);
+}
+
+int iwl_mld_update_mac_power(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ bool d3)
+{
+ struct iwl_mac_power_cmd cmd = {};
+
+ iwl_mld_power_build_cmd(mld, vif, &cmd, d3);
+
+ return iwl_mld_send_cmd_pdu(mld, MAC_PM_POWER_TABLE, &cmd);
+}
+
+static void
+iwl_mld_tpe_sta_cmd_data(struct iwl_txpower_constraints_cmd *cmd,
+ const struct ieee80211_bss_conf *link)
+{
+ u8 i;
+
+ /* NOTE: the 0 here is IEEE80211_TPE_CAT_6GHZ_DEFAULT,
+ * we fully ignore IEEE80211_TPE_CAT_6GHZ_SUBORDINATE
+ */
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd->psd_pwr) !=
+ ARRAY_SIZE(link->tpe.psd_local[0].power));
+
+ /* if not valid, mac80211 puts default (max value) */
+ for (i = 0; i < ARRAY_SIZE(cmd->psd_pwr); i++)
+ cmd->psd_pwr[i] = min(link->tpe.psd_local[0].power[i],
+ link->tpe.psd_reg_client[0].power[i]);
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd->eirp_pwr) !=
+ ARRAY_SIZE(link->tpe.max_local[0].power));
+
+ for (i = 0; i < ARRAY_SIZE(cmd->eirp_pwr); i++)
+ cmd->eirp_pwr[i] = min(link->tpe.max_local[0].power[i],
+ link->tpe.max_reg_client[0].power[i]);
+}
+
+void
+iwl_mld_send_ap_tx_power_constraint_cmd(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_txpower_constraints_cmd cmd = {};
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!mld_link->active)
+ return;
+
+ if (link->chanreq.oper.chan->band != NL80211_BAND_6GHZ)
+ return;
+
+ cmd.link_id = cpu_to_le16(mld_link->fw_id);
+ memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr));
+ memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr));
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP);
+ } else if (link->power_type == IEEE80211_REG_UNSET_AP) {
+ return;
+ } else {
+ cmd.ap_type = cpu_to_le16(link->power_type - 1);
+ iwl_mld_tpe_sta_cmd_data(&cmd, link);
+ }
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(PHY_OPS_GROUP,
+ AP_TX_POWER_CONSTRAINTS_CMD),
+ &cmd);
+ if (ret)
+ IWL_ERR(mld,
+ "failed to send AP_TX_POWER_CONSTRAINTS_CMD (%d)\n",
+ ret);
+}
+
+int iwl_mld_set_tx_power(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ s16 tx_power)
+{
+ u32 cmd_id = REDUCE_TX_POWER_CMD;
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
+ u16 u_tx_power = tx_power == IWL_DEFAULT_MAX_TX_POWER ?
+ IWL_DEV_MAX_TX_POWER : 8 * tx_power;
+ struct iwl_dev_tx_power_cmd cmd = {
+ /* Those fields sit on the same place for v9 and v10 */
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK),
+ .common.pwr_restriction = cpu_to_le16(u_tx_power),
+ };
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+ int len = sizeof(cmd.common);
+
+ if (WARN_ON(!mld_link))
+ return -ENODEV;
+
+ cmd.common.link_id = cpu_to_le32(mld_link->fw_id);
+
+ if (cmd_ver == 10)
+ len += sizeof(cmd.v10);
+ else if (cmd_ver == 9)
+ len += sizeof(cmd.v9);
+
+ return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/power.h b/drivers/net/wireless/intel/iwlwifi/mld/power.h
new file mode 100644
index 000000000000..05ce27bef106
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/power.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_power_h__
+#define __iwl_mld_power_h__
+
+#include <net/mac80211.h>
+
+#include "mld.h"
+
+int iwl_mld_update_device_power(struct iwl_mld *mld, bool d3);
+
+int iwl_mld_enable_beacon_filter(struct iwl_mld *mld,
+ const struct ieee80211_bss_conf *link_conf,
+ bool d3);
+
+int iwl_mld_disable_beacon_filter(struct iwl_mld *mld,
+ struct ieee80211_vif *vif);
+
+int iwl_mld_update_mac_power(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ bool d3);
+
+void
+iwl_mld_send_ap_tx_power_constraint_cmd(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+int iwl_mld_set_tx_power(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ s16 tx_power);
+
+#endif /* __iwl_mld_power_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ptp.c b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
new file mode 100644
index 000000000000..d5c3f853d96c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "iwl-debug.h"
+#include "hcmd.h"
+#include "ptp.h"
+#include <linux/timekeeping.h>
+
+/* The scaled_ppm parameter is ppm (parts per million) with a 16-bit fractional
+ * part, which means that a value of 1 in one of those fields actually means
+ * 2^-16 ppm, and 2^16=65536 is 1 ppm.
+ */
+#define PTP_SCALE_FACTOR 65536000000ULL
+
+#define IWL_PTP_GP2_WRAP 0x100000000ULL
+#define IWL_PTP_WRAP_TIME (3600 * HZ)
+#define IWL_PTP_WRAP_THRESHOLD_USEC (5000)
+
+static int iwl_mld_get_systime(struct iwl_mld *mld, u32 *gp2)
+{
+ *gp2 = iwl_read_prph(mld->trans, mld->trans->cfg->gp2_reg_addr);
+
+ if (*gp2 == 0x5a5a5a5a)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void iwl_mld_ptp_update_new_read(struct iwl_mld *mld, u32 gp2)
+{
+ IWL_DEBUG_PTP(mld, "PTP: last_gp2=%u, new gp2 read=%u\n",
+ mld->ptp_data.last_gp2, gp2);
+
+ /* If the difference is above the threshold, assume it's a wraparound.
+ * Otherwise assume it's an old read and ignore it.
+ */
+ if (gp2 < mld->ptp_data.last_gp2) {
+ if (mld->ptp_data.last_gp2 - gp2 <
+ IWL_PTP_WRAP_THRESHOLD_USEC) {
+ IWL_DEBUG_PTP(mld,
+ "PTP: ignore old read (gp2=%u, last_gp2=%u)\n",
+ gp2, mld->ptp_data.last_gp2);
+ return;
+ }
+
+ mld->ptp_data.wrap_counter++;
+ IWL_DEBUG_PTP(mld,
+ "PTP: wraparound detected (new counter=%u)\n",
+ mld->ptp_data.wrap_counter);
+ }
+
+ mld->ptp_data.last_gp2 = gp2;
+ schedule_delayed_work(&mld->ptp_data.dwork, IWL_PTP_WRAP_TIME);
+}
+
+u64 iwl_mld_ptp_get_adj_time(struct iwl_mld *mld, u64 base_time_ns)
+{
+ struct ptp_data *data = &mld->ptp_data;
+ u64 scale_time_gp2_ns = mld->ptp_data.scale_update_gp2 * NSEC_PER_USEC;
+ u64 res;
+ u64 diff;
+ s64 scaled_diff;
+
+ lockdep_assert_held(&data->lock);
+
+ iwl_mld_ptp_update_new_read(mld,
+ div64_u64(base_time_ns, NSEC_PER_USEC));
+
+ base_time_ns = base_time_ns +
+ (data->wrap_counter * IWL_PTP_GP2_WRAP * NSEC_PER_USEC);
+
+ /* It is possible that a GP2 timestamp was received from fw before the
+ * last scale update.
+ */
+ if (base_time_ns < scale_time_gp2_ns) {
+ diff = scale_time_gp2_ns - base_time_ns;
+ scaled_diff = -mul_u64_u64_div_u64(diff,
+ data->scaled_freq,
+ PTP_SCALE_FACTOR);
+ } else {
+ diff = base_time_ns - scale_time_gp2_ns;
+ scaled_diff = mul_u64_u64_div_u64(diff,
+ data->scaled_freq,
+ PTP_SCALE_FACTOR);
+ }
+
+ IWL_DEBUG_PTP(mld, "base_time=%llu diff ns=%llu scaled_diff_ns=%lld\n",
+ (unsigned long long)base_time_ns,
+ (unsigned long long)diff, (long long)scaled_diff);
+
+ res = data->scale_update_adj_time_ns + data->delta + scaled_diff;
+
+ IWL_DEBUG_PTP(mld, "scale_update_ns=%llu delta=%lld adj=%llu\n",
+ (unsigned long long)data->scale_update_adj_time_ns,
+ (long long)data->delta, (unsigned long long)res);
+ return res;
+}
+
+static int iwl_mld_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
+ ptp_data.ptp_clock_info);
+ struct ptp_data *data = &mld->ptp_data;
+ u32 gp2;
+ u64 ns;
+
+ if (iwl_mld_get_systime(mld, &gp2)) {
+ IWL_DEBUG_PTP(mld, "PTP: gettime: failed to read systime\n");
+ return -EIO;
+ }
+
+ spin_lock_bh(&data->lock);
+ ns = iwl_mld_ptp_get_adj_time(mld, (u64)gp2 * NSEC_PER_USEC);
+ spin_unlock_bh(&data->lock);
+
+ *ts = ns_to_timespec64(ns);
+ return 0;
+}
+
+static int iwl_mld_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
+ ptp_data.ptp_clock_info);
+ struct ptp_data *data = &mld->ptp_data;
+
+ spin_lock_bh(&data->lock);
+ data->delta += delta;
+ IWL_DEBUG_PTP(mld, "delta=%lld, new delta=%lld\n", (long long)delta,
+ (long long)data->delta);
+ spin_unlock_bh(&data->lock);
+ return 0;
+}
+
+static int iwl_mld_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
+ ptp_data.ptp_clock_info);
+ struct ptp_data *data = &mld->ptp_data;
+ u32 gp2;
+
+ /* Must call iwl_mld_ptp_get_adj_time() before updating
+ * data->scale_update_gp2 or data->scaled_freq since
+ * scale_update_adj_time_ns should reflect the previous scaled_freq.
+ */
+ if (iwl_mld_get_systime(mld, &gp2)) {
+ IWL_DEBUG_PTP(mld, "adjfine: failed to read systime\n");
+ return -EBUSY;
+ }
+
+ spin_lock_bh(&data->lock);
+ data->scale_update_adj_time_ns =
+ iwl_mld_ptp_get_adj_time(mld, gp2 * NSEC_PER_USEC);
+ data->scale_update_gp2 = gp2;
+
+ /* scale_update_adj_time_ns now relects the configured delta, the
+ * wrap_counter and the previous scaled frequency. Thus delta and
+ * wrap_counter should be reset, and the scale frequency is updated
+ * to the new frequency.
+ */
+ data->delta = 0;
+ data->wrap_counter = 0;
+ data->scaled_freq = PTP_SCALE_FACTOR + scaled_ppm;
+ IWL_DEBUG_PTP(mld, "adjfine: scaled_ppm=%ld new=%llu\n",
+ scaled_ppm, (unsigned long long)data->scaled_freq);
+ spin_unlock_bh(&data->lock);
+ return 0;
+}
+
+static void iwl_mld_ptp_work(struct work_struct *wk)
+{
+ struct iwl_mld *mld = container_of(wk, struct iwl_mld,
+ ptp_data.dwork.work);
+ struct ptp_data *data = &mld->ptp_data;
+ u32 gp2;
+
+ spin_lock_bh(&data->lock);
+ if (!iwl_mld_get_systime(mld, &gp2))
+ iwl_mld_ptp_update_new_read(mld, gp2);
+ else
+ IWL_DEBUG_PTP(mld, "PTP work: failed to read GP2\n");
+ spin_unlock_bh(&data->lock);
+}
+
+static int
+iwl_mld_get_crosstimestamp_fw(struct iwl_mld *mld, u32 *gp2, u64 *sys_time)
+{
+ struct iwl_synced_time_cmd synced_time_cmd = {
+ .operation = cpu_to_le32(IWL_SYNCED_TIME_OPERATION_READ_BOTH)
+ };
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, WNM_PLATFORM_PTM_REQUEST_CMD),
+ .flags = CMD_WANT_SKB,
+ .data[0] = &synced_time_cmd,
+ .len[0] = sizeof(synced_time_cmd),
+ };
+ struct iwl_synced_time_rsp *resp;
+ struct iwl_rx_packet *pkt;
+ int ret;
+ u64 gp2_10ns;
+
+ wiphy_lock(mld->wiphy);
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ wiphy_unlock(mld->wiphy);
+ if (ret)
+ return ret;
+
+ pkt = cmd.resp_pkt;
+
+ if (iwl_rx_packet_payload_len(pkt) != sizeof(*resp)) {
+ IWL_DEBUG_PTP(mld, "PTP: Invalid PTM command response\n");
+ iwl_free_resp(&cmd);
+ return -EIO;
+ }
+
+ resp = (void *)pkt->data;
+
+ gp2_10ns = (u64)le32_to_cpu(resp->gp2_timestamp_hi) << 32 |
+ le32_to_cpu(resp->gp2_timestamp_lo);
+ *gp2 = div_u64(gp2_10ns, 100);
+
+ *sys_time = (u64)le32_to_cpu(resp->platform_timestamp_hi) << 32 |
+ le32_to_cpu(resp->platform_timestamp_lo);
+
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static int
+iwl_mld_phc_get_crosstimestamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *xtstamp)
+{
+ struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
+ ptp_data.ptp_clock_info);
+ struct ptp_data *data = &mld->ptp_data;
+ int ret = 0;
+ /* Raw value read from GP2 register in usec */
+ u32 gp2;
+ /* GP2 value in ns*/
+ s64 gp2_ns;
+ /* System (wall) time */
+ ktime_t sys_time;
+
+ memset(xtstamp, 0, sizeof(struct system_device_crosststamp));
+
+ ret = iwl_mld_get_crosstimestamp_fw(mld, &gp2, &sys_time);
+ if (ret) {
+ IWL_DEBUG_PTP(mld,
+ "PTP: fw get_crosstimestamp failed (ret=%d)\n",
+ ret);
+ return ret;
+ }
+
+ spin_lock_bh(&data->lock);
+ gp2_ns = iwl_mld_ptp_get_adj_time(mld, (u64)gp2 * NSEC_PER_USEC);
+ spin_unlock_bh(&data->lock);
+
+ IWL_DEBUG_PTP(mld,
+ "Got Sync Time: GP2:%u, last_GP2: %u, GP2_ns: %lld, sys_time: %lld\n",
+ gp2, mld->ptp_data.last_gp2, gp2_ns, (s64)sys_time);
+
+ /* System monotonic raw time is not used */
+ xtstamp->device = ns_to_ktime(gp2_ns);
+ xtstamp->sys_realtime = sys_time;
+
+ return ret;
+}
+
+void iwl_mld_ptp_init(struct iwl_mld *mld)
+{
+ if (WARN_ON(mld->ptp_data.ptp_clock))
+ return;
+
+ spin_lock_init(&mld->ptp_data.lock);
+ INIT_DELAYED_WORK(&mld->ptp_data.dwork, iwl_mld_ptp_work);
+
+ mld->ptp_data.ptp_clock_info.owner = THIS_MODULE;
+ mld->ptp_data.ptp_clock_info.gettime64 = iwl_mld_ptp_gettime;
+ mld->ptp_data.ptp_clock_info.max_adj = 0x7fffffff;
+ mld->ptp_data.ptp_clock_info.adjtime = iwl_mld_ptp_adjtime;
+ mld->ptp_data.ptp_clock_info.adjfine = iwl_mld_ptp_adjfine;
+ mld->ptp_data.scaled_freq = PTP_SCALE_FACTOR;
+ mld->ptp_data.ptp_clock_info.getcrosststamp =
+ iwl_mld_phc_get_crosstimestamp;
+
+ /* Give a short 'friendly name' to identify the PHC clock */
+ snprintf(mld->ptp_data.ptp_clock_info.name,
+ sizeof(mld->ptp_data.ptp_clock_info.name),
+ "%s", "iwlwifi-PTP");
+
+ mld->ptp_data.ptp_clock =
+ ptp_clock_register(&mld->ptp_data.ptp_clock_info, mld->dev);
+
+ if (IS_ERR_OR_NULL(mld->ptp_data.ptp_clock)) {
+ IWL_ERR(mld, "Failed to register PHC clock (%ld)\n",
+ PTR_ERR(mld->ptp_data.ptp_clock));
+ mld->ptp_data.ptp_clock = NULL;
+ } else {
+ IWL_INFO(mld, "Registered PHC clock: %s, with index: %d\n",
+ mld->ptp_data.ptp_clock_info.name,
+ ptp_clock_index(mld->ptp_data.ptp_clock));
+ }
+}
+
+void iwl_mld_ptp_remove(struct iwl_mld *mld)
+{
+ if (mld->ptp_data.ptp_clock) {
+ IWL_INFO(mld, "Unregistering PHC clock: %s, with index: %d\n",
+ mld->ptp_data.ptp_clock_info.name,
+ ptp_clock_index(mld->ptp_data.ptp_clock));
+
+ ptp_clock_unregister(mld->ptp_data.ptp_clock);
+ mld->ptp_data.ptp_clock = NULL;
+ mld->ptp_data.last_gp2 = 0;
+ mld->ptp_data.wrap_counter = 0;
+ cancel_delayed_work_sync(&mld->ptp_data.dwork);
+ }
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ptp.h b/drivers/net/wireless/intel/iwlwifi/mld/ptp.h
new file mode 100644
index 000000000000..f3d18dd304e5
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ptp.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#ifndef __iwl_mld_ptp_h__
+#define __iwl_mld_ptp_h__
+
+#include <linux/ptp_clock_kernel.h>
+
+/**
+ * struct ptp_data - PTP hardware clock data
+ *
+ * @ptp_clock: struct ptp_clock pointer returned by the ptp_clock_register()
+ * function.
+ * @ptp_clock_info: struct ptp_clock_info that describes a PTP hardware clock
+ * @lock: protects the time adjustments data
+ * @delta: delta between hardware clock and ptp clock in nanoseconds
+ * @scale_update_gp2: GP2 time when the scale was last updated
+ * @scale_update_adj_time_ns: adjusted time when the scale was last updated,
+ * in nanoseconds
+ * @scaled_freq: clock frequency offset, scaled to 65536000000
+ * @last_gp2: the last GP2 reading from the hardware, used for tracking GP2
+ * wraparounds
+ * @wrap_counter: number of wraparounds since scale_update_adj_time_ns
+ * @dwork: worker scheduled every 1 hour to detect workarounds
+ */
+struct ptp_data {
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+
+ spinlock_t lock;
+ s64 delta;
+ u32 scale_update_gp2;
+ u64 scale_update_adj_time_ns;
+ u64 scaled_freq;
+ u32 last_gp2;
+ u32 wrap_counter;
+ struct delayed_work dwork;
+};
+
+void iwl_mld_ptp_init(struct iwl_mld *mld);
+void iwl_mld_ptp_remove(struct iwl_mld *mld);
+u64 iwl_mld_ptp_get_adj_time(struct iwl_mld *mld, u64 base_time_ns);
+
+#endif /* __iwl_mld_ptp_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
new file mode 100644
index 000000000000..a75af8c1e8ab
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <linux/dmi.h>
+
+#include "fw/regulatory.h"
+#include "fw/acpi.h"
+#include "fw/uefi.h"
+
+#include "regulatory.h"
+#include "mld.h"
+#include "hcmd.h"
+
+void iwl_mld_get_bios_tables(struct iwl_mld *mld)
+{
+ int ret;
+
+ iwl_acpi_get_guid_lock_status(&mld->fwrt);
+
+ ret = iwl_bios_get_ppag_table(&mld->fwrt);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mld,
+ "PPAG BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ }
+
+ ret = iwl_bios_get_wrds_table(&mld->fwrt);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mld,
+ "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+
+ /* If not available, don't fail and don't bother with EWRD and
+ * WGDS
+ */
+
+ if (!iwl_bios_get_wgds_table(&mld->fwrt)) {
+ /* If basic SAR is not available, we check for WGDS,
+ * which should *not* be available either. If it is
+ * available, issue an error, because we can't use SAR
+ * Geo without basic SAR.
+ */
+ IWL_ERR(mld, "BIOS contains WGDS but no WRDS\n");
+ }
+
+ } else {
+ ret = iwl_bios_get_ewrd_table(&mld->fwrt);
+ /* If EWRD is not available, we can still use
+ * WRDS, so don't fail.
+ */
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mld,
+ "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+
+ ret = iwl_bios_get_wgds_table(&mld->fwrt);
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mld,
+ "Geo SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ /* we don't fail if the table is not available */
+ }
+
+ ret = iwl_uefi_get_uats_table(mld->trans, &mld->fwrt);
+ if (ret)
+ IWL_DEBUG_RADIO(mld, "failed to read UATS table (%d)\n", ret);
+}
+
+static int iwl_mld_geo_sar_init(struct iwl_mld *mld)
+{
+ u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
+ union iwl_geo_tx_power_profiles_cmd cmd;
+ u16 len;
+ u32 n_bands;
+ __le32 sk = cpu_to_le32(0);
+ int ret;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops));
+
+ cmd.v4.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
+
+ /* Only set to South Korea if the table revision is 1 */
+ if (mld->fwrt.geo_rev == 1)
+ sk = cpu_to_le32(1);
+
+ if (cmd_ver == 5) {
+ len = sizeof(cmd.v5);
+ n_bands = ARRAY_SIZE(cmd.v5.table[0]);
+ cmd.v5.table_revision = sk;
+ } else if (cmd_ver == 4) {
+ len = sizeof(cmd.v4);
+ n_bands = ARRAY_SIZE(cmd.v4.table[0]);
+ cmd.v4.table_revision = sk;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
+ /* the table is at the same position for all versions, so set use v4 */
+ ret = iwl_sar_geo_fill_table(&mld->fwrt, &cmd.v4.table[0][0],
+ n_bands, BIOS_GEO_MAX_PROFILE_NUM);
+
+ /* It is a valid scenario to not support SAR, or miss wgds table,
+ * but in that case there is no need to send the command.
+ */
+ if (ret)
+ return 0;
+
+ return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len);
+}
+
+int iwl_mld_config_sar_profile(struct iwl_mld *mld, int prof_a, int prof_b)
+{
+ u32 cmd_id = REDUCE_TX_POWER_CMD;
+ struct iwl_dev_tx_power_cmd cmd = {
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ };
+ __le16 *per_chain;
+ int ret;
+ u16 len = sizeof(cmd.common);
+ u32 n_subbands;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver == 10) {
+ len += sizeof(cmd.v10);
+ n_subbands = IWL_NUM_SUB_BANDS_V2;
+ per_chain = &cmd.v10.per_chain[0][0][0];
+ cmd.v10.flags =
+ cpu_to_le32(mld->fwrt.reduced_power_flags);
+ } else if (cmd_ver == 9) {
+ len += sizeof(cmd.v9);
+ n_subbands = IWL_NUM_SUB_BANDS_V1;
+ per_chain = &cmd.v9.per_chain[0][0];
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ /* TODO: CDB - support IWL_NUM_CHAIN_TABLES_V2 */
+ ret = iwl_sar_fill_profile(&mld->fwrt, per_chain,
+ IWL_NUM_CHAIN_TABLES,
+ n_subbands, prof_a, prof_b);
+ /* return on error or if the profile is disabled (positive number) */
+ if (ret)
+ return ret;
+
+ return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len);
+}
+
+int iwl_mld_init_sar(struct iwl_mld *mld)
+{
+ int chain_a_prof = 1;
+ int chain_b_prof = 1;
+ int ret;
+
+ /* If no profile was chosen by the user yet, choose profile 1 (WRDS) as
+ * default for both chains
+ */
+ if (mld->fwrt.sar_chain_a_profile && mld->fwrt.sar_chain_b_profile) {
+ chain_a_prof = mld->fwrt.sar_chain_a_profile;
+ chain_b_prof = mld->fwrt.sar_chain_b_profile;
+ }
+
+ ret = iwl_mld_config_sar_profile(mld, chain_a_prof, chain_b_prof);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ return 0;
+
+ return iwl_mld_geo_sar_init(mld);
+}
+
+int iwl_mld_init_sgom(struct iwl_mld *mld)
+{
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ SAR_OFFSET_MAPPING_TABLE_CMD),
+ .data[0] = &mld->fwrt.sgom_table,
+ .len[0] = sizeof(mld->fwrt.sgom_table),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ if (!mld->fwrt.sgom_enabled) {
+ IWL_DEBUG_RADIO(mld, "SGOM table is disabled\n");
+ return 0;
+ }
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ if (ret)
+ IWL_ERR(mld,
+ "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret);
+
+ return ret;
+}
+
+static int iwl_mld_ppag_send_cmd(struct iwl_mld *mld)
+{
+ union iwl_ppag_table_cmd cmd = {};
+ int ret, len;
+
+ ret = iwl_fill_ppag_table(&mld->fwrt, &cmd, &len);
+ /* Not supporting PPAG table is a valid scenario */
+ if (ret < 0)
+ return 0;
+
+ IWL_DEBUG_RADIO(mld, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP,
+ PER_PLATFORM_ANT_GAIN_CMD),
+ &cmd, len);
+ if (ret < 0)
+ IWL_ERR(mld, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
+ ret);
+
+ return ret;
+}
+
+int iwl_mld_init_ppag(struct iwl_mld *mld)
+{
+ /* no need to read the table, done in INIT stage */
+
+ if (!(iwl_is_ppag_approved(&mld->fwrt)))
+ return 0;
+
+ return iwl_mld_ppag_send_cmd(mld);
+}
+
+void iwl_mld_configure_lari(struct iwl_mld *mld)
+{
+ struct iwl_fw_runtime *fwrt = &mld->fwrt;
+ struct iwl_lari_config_change_cmd cmd = {
+ .config_bitmap = iwl_get_lari_config_bitmap(fwrt),
+ };
+ int ret;
+ u32 value;
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
+ if (!ret)
+ cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
+ if (!ret)
+ cmd.oem_unii4_allow_bitmap =
+ cpu_to_le32(value &= DSM_UNII4_ALLOW_BITMAP);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
+ if (!ret)
+ cmd.chan_state_active_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value);
+ if (!ret)
+ cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value);
+ if (!ret)
+ cmd.force_disable_channels_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
+ &value);
+ if (!ret)
+ cmd.edt_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_wbem(fwrt, &value);
+ if (!ret)
+ cmd.oem_320mhz_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value);
+ if (!ret)
+ cmd.oem_11be_allow_bitmap = cpu_to_le32(value);
+
+ if (!cmd.config_bitmap &&
+ !cmd.oem_uhb_allow_bitmap &&
+ !cmd.oem_11ax_allow_bitmap &&
+ !cmd.oem_unii4_allow_bitmap &&
+ !cmd.chan_state_active_bitmap &&
+ !cmd.force_disable_channels_bitmap &&
+ !cmd.edt_bitmap &&
+ !cmd.oem_320mhz_allow_bitmap &&
+ !cmd.oem_11be_allow_bitmap)
+ return;
+
+ IWL_DEBUG_RADIO(mld,
+ "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd.config_bitmap),
+ le32_to_cpu(cmd.oem_11ax_allow_bitmap));
+ IWL_DEBUG_RADIO(mld,
+ "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x\n",
+ le32_to_cpu(cmd.oem_unii4_allow_bitmap),
+ le32_to_cpu(cmd.chan_state_active_bitmap));
+ IWL_DEBUG_RADIO(mld,
+ "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
+ le32_to_cpu(cmd.oem_uhb_allow_bitmap),
+ le32_to_cpu(cmd.force_disable_channels_bitmap));
+ IWL_DEBUG_RADIO(mld,
+ "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd.edt_bitmap),
+ le32_to_cpu(cmd.oem_320mhz_allow_bitmap));
+ IWL_DEBUG_RADIO(mld,
+ "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd.oem_11be_allow_bitmap));
+
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE), &cmd);
+ if (ret)
+ IWL_DEBUG_RADIO(mld,
+ "Failed to send LARI_CONFIG_CHANGE (%d)\n",
+ ret);
+}
+
+void iwl_mld_init_uats(struct iwl_mld *mld)
+{
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ MCC_ALLOWED_AP_TYPE_CMD),
+ .data[0] = &mld->fwrt.uats_table,
+ .len[0] = sizeof(mld->fwrt.uats_table),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ if (!mld->fwrt.uats_valid)
+ return;
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ if (ret)
+ IWL_ERR(mld, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n",
+ ret);
+}
+
+void iwl_mld_init_tas(struct iwl_mld *mld)
+{
+ int ret;
+ struct iwl_tas_data data = {};
+ struct iwl_tas_config_cmd cmd = {};
+ u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
+
+ BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+
+ if (!fw_has_capa(&mld->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
+ IWL_DEBUG_RADIO(mld, "TAS not enabled in FW\n");
+ return;
+ }
+
+ ret = iwl_bios_get_tas_table(&mld->fwrt, &data);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mld,
+ "TAS table invalid or unavailable. (%d)\n",
+ ret);
+ return;
+ }
+
+ if (!iwl_is_tas_approved()) {
+ IWL_DEBUG_RADIO(mld,
+ "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ if ((!iwl_add_mcc_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_US)) ||
+ (!iwl_add_mcc_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_CANADA))) {
+ IWL_DEBUG_RADIO(mld,
+ "Unable to add US/Canada to TAS block list, disabling TAS\n");
+ return;
+ }
+ } else {
+ IWL_DEBUG_RADIO(mld,
+ "System vendor '%s' is in the approved list.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ }
+
+ cmd.block_list_size = cpu_to_le16(data.block_list_size);
+ for (u8 i = 0; i < data.block_list_size; i++)
+ cmd.block_list_array[i] =
+ cpu_to_le16(data.block_list_array[i]);
+ cmd.tas_config_info.table_source = data.table_source;
+ cmd.tas_config_info.table_revision = data.table_revision;
+ cmd.tas_config_info.value = cpu_to_le32(data.tas_selection);
+
+ ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
+ if (ret)
+ IWL_DEBUG_RADIO(mld, "failed to send TAS_CONFIG (%d)\n", ret);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.h b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.h
new file mode 100644
index 000000000000..3b01c645adda
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_regulatory_h__
+#define __iwl_mld_regulatory_h__
+
+#include "mld.h"
+
+void iwl_mld_get_bios_tables(struct iwl_mld *mld);
+void iwl_mld_configure_lari(struct iwl_mld *mld);
+void iwl_mld_init_uats(struct iwl_mld *mld);
+void iwl_mld_init_tas(struct iwl_mld *mld);
+
+int iwl_mld_init_ppag(struct iwl_mld *mld);
+
+int iwl_mld_init_sgom(struct iwl_mld *mld);
+
+int iwl_mld_init_sar(struct iwl_mld *mld);
+
+int iwl_mld_config_sar_profile(struct iwl_mld *mld, int prof_a, int prof_b);
+
+#endif /* __iwl_mld_regulatory_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/roc.c b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
new file mode 100644
index 000000000000..b87faca23ceb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 - 2025 Intel Corporation
+ */
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+
+#include "mld.h"
+#include "roc.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "sta.h"
+#include "mlo.h"
+
+#include "fw/api/context.h"
+#include "fw/api/time-event.h"
+
+#define AUX_ROC_MAX_DELAY MSEC_TO_TU(200)
+
+static void
+iwl_mld_vif_iter_emlsr_block_roc(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int *result = data;
+ int ret;
+
+ ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_ROC,
+ iwl_mld_get_primary_link(vif));
+ if (ret)
+ *result = ret;
+}
+
+int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel *channel, int duration,
+ enum ieee80211_roc_type type)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_int_sta *aux_sta;
+ struct iwl_roc_req cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ };
+ u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
+ WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
+ enum iwl_roc_activity activity;
+ int ret = 0;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_emlsr_block_roc,
+ &ret);
+ if (ret)
+ return ret;
+
+ /* TODO: task=Hotspot 2.0 */
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
+ IWL_ERR(mld, "NOT SUPPORTED: ROC on vif->type %d\n",
+ vif->type);
+
+ return -EOPNOTSUPP;
+ }
+
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ activity = ROC_ACTIVITY_P2P_DISC;
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ activity = ROC_ACTIVITY_P2P_NEG;
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid P2P ROC type\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(mld_vif->roc_activity != ROC_NUM_ACTIVITIES))
+ return -EBUSY;
+
+ /* No MLO on P2P device */
+ aux_sta = &mld_vif->deflink.aux_sta;
+
+ ret = iwl_mld_add_aux_sta(mld, aux_sta);
+ if (ret)
+ return ret;
+
+ cmd.activity = cpu_to_le32(activity);
+ cmd.sta_id = cpu_to_le32(aux_sta->sta_id);
+ cmd.channel_info.channel = cpu_to_le32(channel->hw_value);
+ cmd.channel_info.band = iwl_mld_nl80211_band_to_fw(channel->band);
+ cmd.channel_info.width = IWL_PHY_CHANNEL_MODE20;
+ /* TODO: task=Hotspot 2.0, revisit those parameters when we add an ROC
+ * on the BSS vif
+ */
+ cmd.max_delay = cpu_to_le32(AUX_ROC_MAX_DELAY);
+ cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
+
+ memcpy(cmd.node_addr, vif->addr, ETH_ALEN);
+
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
+ &cmd, cmd_len);
+ if (ret) {
+ IWL_ERR(mld, "Couldn't send the ROC_CMD\n");
+ return ret;
+ }
+ mld_vif->roc_activity = activity;
+
+ return 0;
+}
+
+static void
+iwl_mld_vif_iter_emlsr_unblock_roc(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_ROC);
+}
+
+static void iwl_mld_destroy_roc(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_vif *mld_vif)
+{
+ mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_emlsr_unblock_roc,
+ NULL);
+
+ /* wait until every tx has seen that roc_activity has been reset */
+ synchronize_net();
+ /* from here, no new tx will be added
+ * we can flush the Tx on the queues
+ */
+
+ iwl_mld_flush_link_sta_txqs(mld, mld_vif->deflink.aux_sta.sta_id);
+
+ iwl_mld_remove_aux_sta(mld, vif, &vif->bss_conf);
+}
+
+int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_roc_req cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ };
+ u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
+ WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* TODO: task=Hotspot 2.0 */
+ if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE))
+ return -EOPNOTSUPP;
+
+ /* No roc activity running it's probably already done */
+ if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES)
+ return 0;
+
+ cmd.activity = cpu_to_le32(mld_vif->roc_activity);
+
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
+ &cmd, cmd_len);
+ if (ret)
+ IWL_ERR(mld, "Couldn't send the command to cancel the ROC\n");
+
+ /* We may have raced with the firmware expiring the ROC instance at
+ * this very moment. In that case, we can have a notification in the
+ * async processing queue. However, none can arrive _after_ this as
+ * ROC_CMD was sent synchronously, i.e. we waited for a response and
+ * the firmware cannot refer to this ROC after the response. Thus,
+ * if we just cancel the notification (if there's one) we'll be at a
+ * clean state for any possible next ROC.
+ */
+ iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_ROC,
+ mld_vif->roc_activity);
+
+ iwl_mld_destroy_roc(mld, vif, mld_vif);
+
+ return 0;
+}
+
+void iwl_mld_handle_roc_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_roc_notif *notif = (void *)pkt->data;
+ u32 activity = le32_to_cpu(notif->activity);
+ /* TODO: task=Hotspot 2.0 - roc can run on BSS */
+ struct ieee80211_vif *vif = mld->p2p_device_vif;
+ struct iwl_mld_vif *mld_vif;
+
+ if (WARN_ON(!vif))
+ return;
+
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ /* It is possible that the ROC was canceled
+ * but the notification was already fired.
+ */
+ if (mld_vif->roc_activity != activity)
+ return;
+
+ if (le32_to_cpu(notif->success) &&
+ le32_to_cpu(notif->started)) {
+ /* We had a successful start */
+ ieee80211_ready_on_channel(mld->hw);
+ } else {
+ /* ROC was not successful, tell the firmware to remove it */
+ if (le32_to_cpu(notif->started))
+ iwl_mld_cancel_roc(mld->hw, vif);
+ else
+ iwl_mld_destroy_roc(mld, vif, mld_vif);
+ /* we need to let know mac80211 about end OR
+ * an unsuccessful start
+ */
+ ieee80211_remain_on_channel_expired(mld->hw);
+ }
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/roc.h b/drivers/net/wireless/intel/iwlwifi/mld/roc.h
new file mode 100644
index 000000000000..985d7f20a3d7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/roc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_roc_h__
+#define __iwl_mld_roc_h__
+
+#include <net/mac80211.h>
+
+int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel *channel, int duration,
+ enum ieee80211_roc_type type);
+
+int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+void iwl_mld_handle_roc_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+#endif /* __iwl_mld_roc_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
new file mode 100644
index 000000000000..c4f189bcece2
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
@@ -0,0 +1,2060 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <net/mac80211.h>
+#include <kunit/static_stub.h>
+
+#include "mld.h"
+#include "sta.h"
+#include "agg.h"
+#include "rx.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "time_sync.h"
+#include "fw/dbg.h"
+#include "fw/api/rx.h"
+
+/* stores relevant PHY data fields extracted from iwl_rx_mpdu_desc */
+struct iwl_mld_rx_phy_data {
+ enum iwl_rx_phy_info_type info_type;
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
+ __le32 eht_data4;
+ __le32 data5;
+ __le16 data4;
+ bool first_subframe;
+ bool with_data;
+ __le32 rx_vec[4];
+ u32 rate_n_flags;
+ u32 gp2_on_air_rise;
+ u16 phy_info;
+ u8 energy_a, energy_b;
+ u8 channel;
+};
+
+static void
+iwl_mld_fill_phy_data(struct iwl_rx_mpdu_desc *desc,
+ struct iwl_mld_rx_phy_data *phy_data)
+{
+ phy_data->phy_info = le16_to_cpu(desc->phy_info);
+ phy_data->rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ phy_data->gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+ phy_data->channel = desc->v3.channel;
+ phy_data->energy_a = desc->v3.energy_a;
+ phy_data->energy_b = desc->v3.energy_b;
+ phy_data->data0 = desc->v3.phy_data0;
+ phy_data->data1 = desc->v3.phy_data1;
+ phy_data->data2 = desc->v3.phy_data2;
+ phy_data->data3 = desc->v3.phy_data3;
+ phy_data->data4 = desc->phy_data4;
+ phy_data->eht_data4 = desc->phy_eht_data4;
+ phy_data->data5 = desc->v3.phy_data5;
+ phy_data->with_data = true;
+}
+
+static inline int iwl_mld_check_pn(struct iwl_mld *mld, struct sk_buff *skb,
+ int queue, struct ieee80211_sta *sta)
+{
+ struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
+ struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_ptk_pn *ptk_pn;
+ int res;
+ u8 tid, keyidx;
+ u8 pn[IEEE80211_CCMP_PN_LEN];
+ u8 *extiv;
+
+ /* multicast and non-data only arrives on default queue; avoid checking
+ * for default queue - we don't want to replicate all the logic that's
+ * necessary for checking the PN on fragmented frames, leave that
+ * to mac80211
+ */
+ if (queue == 0 || !ieee80211_is_data(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return 0;
+
+ if (!(stats->flag & RX_FLAG_DECRYPTED))
+ return 0;
+
+ /* if we are here - this for sure is either CCMP or GCMP */
+ if (!sta) {
+ IWL_DEBUG_DROP(mld,
+ "expected hw-decrypted unicast frame for station\n");
+ return -1;
+ }
+
+ mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
+ keyidx = extiv[3] >> 6;
+
+ ptk_pn = rcu_dereference(mld_sta->ptk_pn[keyidx]);
+ if (!ptk_pn)
+ return -1;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ else
+ tid = 0;
+
+ /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
+ if (tid >= IWL_MAX_TID_COUNT)
+ return -1;
+
+ /* load pn */
+ pn[0] = extiv[7];
+ pn[1] = extiv[6];
+ pn[2] = extiv[5];
+ pn[3] = extiv[4];
+ pn[4] = extiv[1];
+ pn[5] = extiv[0];
+
+ res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
+ if (res < 0)
+ return -1;
+ if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
+ return -1;
+
+ memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
+ stats->flag |= RX_FLAG_PN_VALIDATED;
+
+ return 0;
+}
+
+/* iwl_mld_pass_packet_to_mac80211 - passes the packet for mac80211 */
+void iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct sk_buff *skb, int queue,
+ struct ieee80211_sta *sta)
+{
+ KUNIT_STATIC_STUB_REDIRECT(iwl_mld_pass_packet_to_mac80211,
+ mld, napi, skb, queue, sta);
+
+ if (unlikely(iwl_mld_check_pn(mld, skb, queue, sta))) {
+ kfree_skb(skb);
+ return;
+ }
+
+ ieee80211_rx_napi(mld->hw, sta, skb, napi);
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_pass_packet_to_mac80211);
+
+static void iwl_mld_fill_signal(struct iwl_mld *mld,
+ struct ieee80211_rx_status *rx_status,
+ struct iwl_mld_rx_phy_data *phy_data)
+{
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ int energy_a = phy_data->energy_a;
+ int energy_b = phy_data->energy_b;
+ int max_energy;
+
+ energy_a = energy_a ? -energy_a : S8_MIN;
+ energy_b = energy_b ? -energy_b : S8_MIN;
+ max_energy = max(energy_a, energy_b);
+
+ IWL_DEBUG_STATS(mld, "energy in A %d B %d, and max %d\n",
+ energy_a, energy_b, max_energy);
+
+ rx_status->signal = max_energy;
+ rx_status->chains =
+ (rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
+ rx_status->chain_signal[0] = energy_a;
+ rx_status->chain_signal[1] = energy_b;
+}
+
+static void
+iwl_mld_decode_he_phy_ru_alloc(struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_radiotap_he *he,
+ struct ieee80211_radiotap_he_mu *he_mu,
+ struct ieee80211_rx_status *rx_status)
+{
+ /* Unfortunately, we have to leave the mac80211 data
+ * incorrect for the case that we receive an HE-MU
+ * transmission and *don't* have the HE phy data (due
+ * to the bits being used for TSF). This shouldn't
+ * happen though as management frames where we need
+ * the TSF/timers are not be transmitted in HE-MU.
+ */
+ u8 ru = le32_get_bits(phy_data->data1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ u8 offs = 0;
+
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+
+ switch (ru) {
+ case 0 ... 36:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+ he->data2 |= le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+ he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
+ if (phy_data->data1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80))
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+
+#define CHECK_BW(bw) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
+ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \
+ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
+ CHECK_BW(20);
+ CHECK_BW(40);
+ CHECK_BW(80);
+ CHECK_BW(160);
+
+ if (he_mu)
+ he_mu->flags2 |=
+ le16_encode_bits(u32_get_bits(rate_n_flags,
+ RATE_MCS_CHAN_WIDTH_MSK),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
+ else if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ he->data6 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
+ le16_encode_bits(u32_get_bits(rate_n_flags,
+ RATE_MCS_CHAN_WIDTH_MSK),
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
+}
+
+static void
+iwl_mld_decode_he_mu_ext(struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_radiotap_he_mu *he_mu)
+{
+ u32 phy_data2 = le32_to_cpu(phy_data->data2);
+ u32 phy_data3 = le32_to_cpu(phy_data->data3);
+ u16 phy_data4 = le16_to_cpu(phy_data->data4);
+ u32 rate_n_flags = phy_data->rate_n_flags;
+
+ if (u32_get_bits(phy_data4, IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK)) {
+ he_mu->flags1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN);
+
+ he_mu->flags1 |=
+ le16_encode_bits(u32_get_bits(phy_data4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU);
+
+ he_mu->ru_ch1[0] = u32_get_bits(phy_data2,
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0);
+ he_mu->ru_ch1[1] = u32_get_bits(phy_data3,
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1);
+ he_mu->ru_ch1[2] = u32_get_bits(phy_data2,
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2);
+ he_mu->ru_ch1[3] = u32_get_bits(phy_data3,
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3);
+ }
+
+ if (u32_get_bits(phy_data4, IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK) &&
+ (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) {
+ he_mu->flags1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN);
+
+ he_mu->flags2 |=
+ le16_encode_bits(u32_get_bits(phy_data4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU);
+
+ he_mu->ru_ch2[0] = u32_get_bits(phy_data2,
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0);
+ he_mu->ru_ch2[1] = u32_get_bits(phy_data3,
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1);
+ he_mu->ru_ch2[2] = u32_get_bits(phy_data2,
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2);
+ he_mu->ru_ch2[3] = u32_get_bits(phy_data3,
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3);
+ }
+}
+
+static void
+iwl_mld_decode_he_phy_data(struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_radiotap_he *he,
+ struct ieee80211_radiotap_he_mu *he_mu,
+ struct ieee80211_rx_status *rx_status,
+ int queue)
+{
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_NONE:
+ case IWL_RX_PHY_INFO_TYPE_CCK:
+ case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY:
+ case IWL_RX_PHY_INFO_TYPE_HT:
+ case IWL_RX_PHY_INFO_TYPE_VHT_SU:
+ case IWL_RX_PHY_INFO_TYPE_VHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
+ return;
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4);
+ fallthrough;
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
+ /* HE common */
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN);
+ he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK),
+ IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
+ if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB &&
+ phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) {
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_UPLINK),
+ IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+ }
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM),
+ IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK),
+ IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_PE_DISAMBIG),
+ IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data1,
+ IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK),
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+ he->data6 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK),
+ IEEE80211_RADIOTAP_HE_DATA6_TXOP);
+ he->data6 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_DOPPLER),
+ IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
+ break;
+ }
+
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK),
+ IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
+ break;
+ default:
+ /* nothing here */
+ break;
+ }
+
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ he_mu->flags1 |=
+ le16_encode_bits(le16_get_bits(phy_data->data4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+ he_mu->flags1 |=
+ le16_encode_bits(le16_get_bits(phy_data->data4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+ he_mu->flags2 |=
+ le16_encode_bits(le16_get_bits(phy_data->data4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
+ iwl_mld_decode_he_mu_ext(phy_data, he_mu);
+ fallthrough;
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ he_mu->flags2 |=
+ le16_encode_bits(le32_get_bits(phy_data->data1,
+ IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+ he_mu->flags2 |=
+ le16_encode_bits(le32_get_bits(phy_data->data1,
+ IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+ fallthrough;
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+ iwl_mld_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status);
+ break;
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_BEAM_CHNG),
+ IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+}
+
+static void iwl_mld_rx_he(struct iwl_mld *mld, struct sk_buff *skb,
+ struct iwl_mld_rx_phy_data *phy_data,
+ int queue)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_radiotap_he *he = NULL;
+ struct ieee80211_radiotap_he_mu *he_mu = NULL;
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ u8 ltf;
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
+ };
+ static const struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
+ .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
+ };
+ u16 phy_info = phy_data->phy_info;
+
+ he = skb_put_data(skb, &known, sizeof(known));
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+
+ if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU ||
+ phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) {
+ he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ }
+
+ /* report the AMPDU-EOF bit on single frames */
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (phy_data->data0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+ iwl_mld_decode_he_phy_data(phy_data, he, he_mu, rx_status,
+ queue);
+
+ /* update aggregation data for monitor sake on default queue */
+ if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
+ (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) {
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (phy_data->data0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+
+ if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
+ rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ /* actually data is filled in mac80211 */
+ if (he_type == RATE_MCS_HE_TYPE_SU ||
+ he_type == RATE_MCS_HE_TYPE_EXT_SU)
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+
+#define CHECK_TYPE(F) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
+ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+ CHECK_TYPE(SU);
+ CHECK_TYPE(EXT_SU);
+ CHECK_TYPE(MU);
+ CHECK_TYPE(TRIG);
+
+ he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
+
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
+
+ switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
+ RATE_MCS_HE_GI_LTF_POS) {
+ case 0:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ if (he_type == RATE_MCS_HE_TYPE_MU)
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ else
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
+ break;
+ case 1:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ break;
+ case 2:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG) {
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ } else {
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ }
+ break;
+ case 3:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ break;
+ case 4:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ break;
+ default:
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
+ }
+
+ he->data5 |= le16_encode_bits(ltf,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+}
+
+static void iwl_mld_decode_lsig(struct sk_buff *skb,
+ struct iwl_mld_rx_phy_data *phy_data)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_radiotap_lsig *lsig;
+
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_HT:
+ case IWL_RX_PHY_INFO_TYPE_VHT_SU:
+ case IWL_RX_PHY_INFO_TYPE_VHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
+ lsig = skb_put(skb, sizeof(*lsig));
+ lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN);
+ lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->data1,
+ IWL_RX_PHY_DATA1_LSIG_LEN_MASK),
+ IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH);
+ rx_status->flag |= RX_FLAG_RADIOTAP_LSIG;
+ break;
+ default:
+ break;
+ }
+}
+
+/* Put a TLV on the skb and return data pointer
+ *
+ * Also pad the len to 4 and zero out all data part
+ */
+static void *
+iwl_mld_radiotap_put_tlv(struct sk_buff *skb, u16 type, u16 len)
+{
+ struct ieee80211_radiotap_tlv *tlv;
+
+ tlv = skb_put(skb, sizeof(*tlv));
+ tlv->type = cpu_to_le16(type);
+ tlv->len = cpu_to_le16(len);
+ return skb_put_zero(skb, ALIGN(len, 4));
+}
+
+#define LE32_DEC_ENC(value, dec_bits, enc_bits) \
+ le32_encode_bits(le32_get_bits(value, dec_bits), enc_bits)
+
+#define IWL_MLD_ENC_USIG_VALUE_MASK(usig, in_value, dec_bits, enc_bits) do { \
+ typeof(enc_bits) _enc_bits = enc_bits; \
+ typeof(usig) _usig = usig; \
+ (_usig)->mask |= cpu_to_le32(_enc_bits); \
+ (_usig)->value |= LE32_DEC_ENC(in_value, dec_bits, _enc_bits); \
+} while (0)
+
+#define __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \
+ eht->data[(rt_data)] |= \
+ (cpu_to_le32 \
+ (IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru ## _KNOWN) | \
+ LE32_DEC_ENC(data ## fw_data, \
+ IWL_RX_PHY_DATA ## fw_data ## _EHT_MU_EXT_RU_ALLOC_ ## fw_ru, \
+ IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru))
+
+#define _IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \
+ __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru)
+
+#define IEEE80211_RADIOTAP_RU_DATA_1_1_1 1
+#define IEEE80211_RADIOTAP_RU_DATA_2_1_1 2
+#define IEEE80211_RADIOTAP_RU_DATA_1_1_2 2
+#define IEEE80211_RADIOTAP_RU_DATA_2_1_2 2
+#define IEEE80211_RADIOTAP_RU_DATA_1_2_1 3
+#define IEEE80211_RADIOTAP_RU_DATA_2_2_1 3
+#define IEEE80211_RADIOTAP_RU_DATA_1_2_2 3
+#define IEEE80211_RADIOTAP_RU_DATA_2_2_2 4
+
+#define IWL_RX_RU_DATA_A1 2
+#define IWL_RX_RU_DATA_A2 2
+#define IWL_RX_RU_DATA_B1 2
+#define IWL_RX_RU_DATA_B2 4
+#define IWL_RX_RU_DATA_C1 3
+#define IWL_RX_RU_DATA_C2 3
+#define IWL_RX_RU_DATA_D1 4
+#define IWL_RX_RU_DATA_D2 4
+
+#define IWL_MLD_ENC_EHT_RU(rt_ru, fw_ru) \
+ _IWL_MLD_ENC_EHT_RU(IEEE80211_RADIOTAP_RU_DATA_ ## rt_ru, \
+ rt_ru, \
+ IWL_RX_RU_DATA_ ## fw_ru, \
+ fw_ru)
+
+static void iwl_mld_decode_eht_ext_mu(struct iwl_mld *mld,
+ struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_radiotap_eht *eht,
+ struct ieee80211_radiotap_eht_usig *usig)
+{
+ if (phy_data->with_data) {
+ __le32 data1 = phy_data->data1;
+ __le32 data2 = phy_data->data2;
+ __le32 data3 = phy_data->data3;
+ __le32 data4 = phy_data->eht_data4;
+ __le32 data5 = phy_data->data5;
+ u32 phy_bw = phy_data->rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, data4,
+ IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
+ IWL_MLD_ENC_USIG_VALUE_MASK
+ (usig, data1, IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
+
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN) |
+ LE32_DEC_ENC(data5, IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M);
+ eht->data[7] |= LE32_DEC_ENC
+ (data5, IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA,
+ IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS);
+
+ /*
+ * Hardware labels the content channels/RU allocation values
+ * as follows:
+ * Content Channel 1 Content Channel 2
+ * 20 MHz: A1
+ * 40 MHz: A1 B1
+ * 80 MHz: A1 C1 B1 D1
+ * 160 MHz: A1 C1 A2 C2 B1 D1 B2 D2
+ * 320 MHz: A1 C1 A2 C2 A3 C3 A4 C4 B1 D1 B2 D2 B3 D3 B4 D4
+ *
+ * However firmware can only give us A1-D2, so the higher
+ * frequencies are missing.
+ */
+
+ switch (phy_bw) {
+ case RATE_MCS_CHAN_WIDTH_320:
+ /* additional values are missing in RX metadata */
+ fallthrough;
+ case RATE_MCS_CHAN_WIDTH_160:
+ /* content channel 1 */
+ IWL_MLD_ENC_EHT_RU(1_2_1, A2);
+ IWL_MLD_ENC_EHT_RU(1_2_2, C2);
+ /* content channel 2 */
+ IWL_MLD_ENC_EHT_RU(2_2_1, B2);
+ IWL_MLD_ENC_EHT_RU(2_2_2, D2);
+ fallthrough;
+ case RATE_MCS_CHAN_WIDTH_80:
+ /* content channel 1 */
+ IWL_MLD_ENC_EHT_RU(1_1_2, C1);
+ /* content channel 2 */
+ IWL_MLD_ENC_EHT_RU(2_1_2, D1);
+ fallthrough;
+ case RATE_MCS_CHAN_WIDTH_40:
+ /* content channel 2 */
+ IWL_MLD_ENC_EHT_RU(2_1_1, B1);
+ fallthrough;
+ case RATE_MCS_CHAN_WIDTH_20:
+ IWL_MLD_ENC_EHT_RU(1_1_1, A1);
+ break;
+ }
+ } else {
+ __le32 usig_a1 = phy_data->rx_vec[0];
+ __le32 usig_a2 = phy_data->rx_vec[1];
+
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1,
+ IWL_RX_USIG_A1_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1,
+ IWL_RX_USIG_A1_VALIDATE,
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_PPDU_TYPE,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_PUNC_CHANNEL,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_SIG_MCS,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
+ IWL_MLD_ENC_USIG_VALUE_MASK
+ (usig, usig_a2, IWL_RX_USIG_A2_EHT_SIG_SYM_NUM,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_CRC_OK,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC);
+ }
+}
+
+static void iwl_mld_decode_eht_ext_tb(struct iwl_mld *mld,
+ struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_radiotap_eht *eht,
+ struct ieee80211_radiotap_eht_usig *usig)
+{
+ if (phy_data->with_data) {
+ __le32 data5 = phy_data->data5;
+
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
+
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
+ } else {
+ __le32 usig_a1 = phy_data->rx_vec[0];
+ __le32 usig_a2 = phy_data->rx_vec[1];
+
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1,
+ IWL_RX_USIG_A1_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_PPDU_TYPE,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_CRC_OK,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC);
+ }
+}
+
+static void iwl_mld_decode_eht_ru(struct iwl_mld *mld,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_radiotap_eht *eht)
+{
+ u32 ru = le32_get_bits(eht->data[8],
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
+ enum nl80211_eht_ru_alloc nl_ru;
+
+ /* Using D1.5 Table 9-53a - Encoding of PS160 and RU Allocation subfields
+ * in an EHT variant User Info field
+ */
+
+ switch (ru) {
+ case 0 ... 36:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+ break;
+ case 37 ... 52:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52;
+ break;
+ case 53 ... 60:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106;
+ break;
+ case 61 ... 64:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_242;
+ break;
+ case 65 ... 66:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484;
+ break;
+ case 67:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996;
+ break;
+ case 68:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996;
+ break;
+ case 69:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_4x996;
+ break;
+ case 70 ... 81:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52P26;
+ break;
+ case 82 ... 89:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106P26;
+ break;
+ case 90 ... 93:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484P242;
+ break;
+ case 94 ... 95:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484;
+ break;
+ case 96 ... 99:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242;
+ break;
+ case 100 ... 103:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484;
+ break;
+ case 104:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996;
+ break;
+ case 105 ... 106:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484;
+ break;
+ default:
+ return;
+ }
+
+ rx_status->bw = RATE_INFO_BW_EHT_RU;
+ rx_status->eht.ru = nl_ru;
+}
+
+static void iwl_mld_decode_eht_phy_data(struct iwl_mld *mld,
+ struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_radiotap_eht *eht,
+ struct ieee80211_radiotap_eht_usig *usig)
+
+{
+ __le32 data0 = phy_data->data0;
+ __le32 data1 = phy_data->data1;
+ __le32 usig_a1 = phy_data->rx_vec[0];
+ u8 info_type = phy_data->info_type;
+
+ /* Not in EHT range */
+ if (info_type < IWL_RX_PHY_INFO_TYPE_EHT_MU ||
+ info_type > IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT)
+ return;
+
+ usig->common |= cpu_to_le32
+ (IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN);
+ if (phy_data->with_data) {
+ usig->common |= LE32_DEC_ENC(data0,
+ IWL_RX_PHY_DATA0_EHT_UPLINK,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
+ usig->common |= LE32_DEC_ENC(data0,
+ IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
+ } else {
+ usig->common |= LE32_DEC_ENC(usig_a1,
+ IWL_RX_USIG_A1_UL_FLAG,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
+ usig->common |= LE32_DEC_ENC(usig_a1,
+ IWL_RX_USIG_A1_BSS_COLOR,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
+ }
+
+ usig->common |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED);
+ usig->common |=
+ LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_VALIDATE,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE);
+ eht->data[0] |= LE32_DEC_ENC(data0,
+ IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK,
+ IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+
+ /* All RU allocating size/index is in TB format */
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT);
+ eht->data[8] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PS160,
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160);
+ eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0,
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0);
+ eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7,
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
+
+ iwl_mld_decode_eht_ru(mld, rx_status, eht);
+
+ /* We only get here in case of IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+ * which is on only in case of monitor mode so no need to check monitor
+ * mode
+ */
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80);
+ eht->data[1] |=
+ le32_encode_bits(mld->monitor.p80,
+ IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80);
+
+ usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN);
+ if (phy_data->with_data)
+ usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
+ else
+ usig->common |= LE32_DEC_ENC(usig_a1, IWL_RX_USIG_A1_TXOP_DURATION,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM);
+ eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM,
+ IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM);
+ eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK,
+ IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM);
+ eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG,
+ IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM);
+
+ /* TODO: what about IWL_RX_PHY_DATA0_EHT_BW320_SLOT */
+
+ if (!le32_get_bits(data0, IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK))
+ usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
+
+ usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN);
+ usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PHY_VER,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER);
+
+ /*
+ * TODO: what about TB - IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE,
+ * IWL_RX_PHY_DATA1_EHT_TB_LOW_SS
+ */
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF);
+ eht->data[0] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM,
+ IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+ if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT ||
+ info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB)
+ iwl_mld_decode_eht_ext_tb(mld, phy_data, rx_status, eht, usig);
+
+ if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT ||
+ info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU)
+ iwl_mld_decode_eht_ext_mu(mld, phy_data, rx_status, eht, usig);
+}
+
+static void iwl_mld_rx_eht(struct iwl_mld *mld, struct sk_buff *skb,
+ struct iwl_mld_rx_phy_data *phy_data,
+ int queue)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_radiotap_eht *eht;
+ struct ieee80211_radiotap_eht_usig *usig;
+ size_t eht_len = sizeof(*eht);
+
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ /* EHT and HE have the same values for LTF */
+ u8 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
+ u16 phy_info = phy_data->phy_info;
+ u32 bw;
+
+ /* u32 for 1 user_info */
+ if (phy_data->with_data)
+ eht_len += sizeof(u32);
+
+ eht = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT, eht_len);
+
+ usig = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG,
+ sizeof(*usig));
+ rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+ usig->common |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN);
+
+ /* specific handling for 320MHz */
+ bw = u32_get_bits(rate_n_flags, RATE_MCS_CHAN_WIDTH_MSK);
+ if (bw == RATE_MCS_CHAN_WIDTH_320_VAL)
+ bw += le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_EHT_BW320_SLOT);
+
+ usig->common |= cpu_to_le32
+ (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW, bw));
+
+ /* report the AMPDU-EOF bit on single frames */
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (phy_data->data0 &
+ cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+ iwl_mld_decode_eht_phy_data(mld, phy_data, rx_status, eht, usig);
+
+#define CHECK_TYPE(F) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
+ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+ CHECK_TYPE(SU);
+ CHECK_TYPE(EXT_SU);
+ CHECK_TYPE(MU);
+ CHECK_TYPE(TRIG);
+
+ switch (u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK)) {
+ case 0:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG) {
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
+ } else {
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ }
+ break;
+ case 1:
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ break;
+ case 2:
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
+ else
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
+ break;
+ case 3:
+ if (he_type != RATE_MCS_HE_TYPE_TRIG) {
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
+ }
+ break;
+ default:
+ /* nothing here */
+ break;
+ }
+
+ if (ltf != IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN) {
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI);
+ eht->data[0] |= cpu_to_le32
+ (FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF,
+ ltf) |
+ FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI,
+ rx_status->eht.gi));
+ }
+
+ if (!phy_data->with_data) {
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S |
+ IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S);
+ eht->data[7] |=
+ le32_encode_bits(le32_get_bits(phy_data->rx_vec[2],
+ RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK),
+ IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ eht->data[7] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S);
+ } else {
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_DATA_FOR_USER);
+
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O);
+
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING);
+
+ eht->user_info[0] |= cpu_to_le32
+ (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS,
+ u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_RATE_CODE_MSK)) |
+ FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O,
+ u32_get_bits(rate_n_flags,
+ RATE_MCS_NSS_MSK)));
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static void iwl_mld_add_rtap_sniffer_config(struct iwl_mld *mld,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_radiotap_vendor_content *radiotap;
+ const u16 vendor_data_len = sizeof(mld->monitor.cur_aid);
+
+ if (!mld->monitor.cur_aid)
+ return;
+
+ radiotap =
+ iwl_mld_radiotap_put_tlv(skb,
+ IEEE80211_RADIOTAP_VENDOR_NAMESPACE,
+ sizeof(*radiotap) + vendor_data_len);
+
+ /* Intel OUI */
+ radiotap->oui[0] = 0xf6;
+ radiotap->oui[1] = 0x54;
+ radiotap->oui[2] = 0x25;
+ /* radiotap sniffer config sub-namespace */
+ radiotap->oui_subtype = 1;
+ radiotap->vendor_type = 0;
+
+ /* fill the data now */
+ memcpy(radiotap->data, &mld->monitor.cur_aid,
+ sizeof(mld->monitor.cur_aid));
+
+ rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+}
+#endif
+
+static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb,
+ struct iwl_mld_rx_phy_data *phy_data,
+ struct iwl_rx_mpdu_desc *mpdu_desc,
+ struct ieee80211_hdr *hdr,
+ int queue)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ u32 format = phy_data->rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK);
+ bool is_sgi = rate_n_flags & RATE_MCS_SGI_MSK;
+
+ if (WARN_ON_ONCE(phy_data->with_data && (!mpdu_desc || !hdr)))
+ return;
+
+ /* Keep packets with CRC errors (and with overrun) for monitor mode
+ * (otherwise the firmware discards them) but mark them as bad.
+ */
+ if (phy_data->with_data &&
+ (!(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
+ !(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK)))) {
+ IWL_DEBUG_RX(mld, "Bad CRC or FIFO: 0x%08X.\n",
+ le32_to_cpu(mpdu_desc->status));
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ }
+
+ phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE;
+
+ if (phy_data->with_data &&
+ likely(!(phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
+ rx_status->mactime =
+ le64_to_cpu(mpdu_desc->v3.tsf_on_air_rise);
+
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+ } else {
+ phy_data->info_type =
+ le32_get_bits(phy_data->data1,
+ IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
+ }
+
+ /* management stuff on default queue */
+ if (!queue && phy_data->with_data &&
+ unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))) {
+ rx_status->boottime_ns = ktime_get_boottime_ns();
+
+ if (mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_ENABLED)
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_FOUND;
+ }
+
+ /* set the preamble flag if appropriate */
+ if (format == RATE_MCS_CCK_MSK &&
+ phy_data->phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+
+ iwl_mld_fill_signal(mld, rx_status, phy_data);
+
+ /* This may be overridden by iwl_mld_rx_he() to HE_RU */
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rx_status->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rx_status->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rx_status->bw = RATE_INFO_BW_160;
+ break;
+ case RATE_MCS_CHAN_WIDTH_320:
+ rx_status->bw = RATE_INFO_BW_320;
+ break;
+ }
+
+ /* must be before L-SIG data */
+ if (format == RATE_MCS_HE_MSK)
+ iwl_mld_rx_he(mld, skb, phy_data, queue);
+
+ iwl_mld_decode_lsig(skb, phy_data);
+
+ rx_status->device_timestamp = phy_data->gp2_on_air_rise;
+
+ /* using TLV format and must be after all fixed len fields */
+ if (format == RATE_MCS_EHT_MSK)
+ iwl_mld_rx_eht(mld, skb, phy_data, queue);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (unlikely(mld->monitor.on))
+ iwl_mld_add_rtap_sniffer_config(mld, skb);
+#endif
+
+ if (format != RATE_MCS_CCK_MSK && is_sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ switch (format) {
+ case RATE_MCS_HT_MSK:
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ break;
+ case RATE_MCS_VHT_MSK:
+ case RATE_MCS_HE_MSK:
+ case RATE_MCS_EHT_MSK:
+ if (format == RATE_MCS_VHT_MSK) {
+ rx_status->encoding = RX_ENC_VHT;
+ } else if (format == RATE_MCS_HE_MSK) {
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->he_dcm =
+ !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+ } else if (format == RATE_MCS_EHT_MSK) {
+ rx_status->encoding = RX_ENC_EHT;
+ }
+
+ rx_status->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ break;
+ default: {
+ int rate =
+ iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ /* valid rate */
+ if (rate >= 0 && rate <= 0xFF) {
+ rx_status->rate_idx = rate;
+ break;
+ }
+
+ /* invalid rate */
+ rx_status->rate_idx = 0;
+
+ if (net_ratelimit())
+ IWL_ERR(mld, "invalid rate_n_flags=0x%x, band=%d\n",
+ rate_n_flags, rx_status->band);
+ break;
+ }
+ }
+}
+
+/* iwl_mld_create_skb adds the rxb to a new skb */
+static int iwl_mld_build_rx_skb(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_hdr *hdr, u16 len,
+ u8 crypt_len, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+ unsigned int headlen, fraglen, pad_len = 0;
+ unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ u8 mic_crc_len = u8_get_bits(desc->mac_flags1,
+ IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1;
+
+ if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+ len -= 2;
+ pad_len = 2;
+ }
+
+ /* For non monitor interface strip the bytes the RADA might not have
+ * removed (it might be disabled, e.g. for mgmt frames). As a monitor
+ * interface cannot exist with other interfaces, this removal is safe
+ * and sufficient, in monitor mode there's no decryption being done.
+ */
+ if (len > mic_crc_len && !ieee80211_hw_check(mld->hw, RX_INCLUDES_FCS))
+ len -= mic_crc_len;
+
+ /* If frame is small enough to fit in skb->head, pull it completely.
+ * If not, only pull ieee80211_hdr (including crypto if present, and
+ * an additional 8 bytes for SNAP/ethertype, see below) so that
+ * splice() or TCP coalesce are more efficient.
+ *
+ * Since, in addition, ieee80211_data_to_8023() always pull in at
+ * least 8 bytes (possibly more for mesh) we can do the same here
+ * to save the cost of doing it later. That still doesn't pull in
+ * the actual IP header since the typical case has a SNAP header.
+ * If the latter changes (there are efforts in the standards group
+ * to do so) we should revisit this and ieee80211_data_to_8023().
+ */
+ headlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
+
+ /* The firmware may align the packet to DWORD.
+ * The padding is inserted after the IV.
+ * After copying the header + IV skip the padding if
+ * present before copying packet data.
+ */
+ hdrlen += crypt_len;
+
+ if (unlikely(headlen < hdrlen))
+ return -EINVAL;
+
+ /* Since data doesn't move data while putting data on skb and that is
+ * the only way we use, data + len is the next place that hdr would
+ * be put
+ */
+ skb_set_mac_header(skb, skb->len);
+ skb_put_data(skb, hdr, hdrlen);
+ skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ struct {
+ u8 hdr[6];
+ __be16 type;
+ } __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len);
+
+ if (unlikely(headlen - hdrlen < sizeof(*shdr) ||
+ !ether_addr_equal(shdr->hdr, rfc1042_header) ||
+ (shdr->type != htons(ETH_P_IP) &&
+ shdr->type != htons(ETH_P_ARP) &&
+ shdr->type != htons(ETH_P_IPV6) &&
+ shdr->type != htons(ETH_P_8021Q) &&
+ shdr->type != htons(ETH_P_PAE) &&
+ shdr->type != htons(ETH_P_TDLS))))
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ fraglen = len - headlen;
+
+ if (fraglen) {
+ int offset = (u8 *)hdr + headlen + pad_len -
+ (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
+
+ skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+ fraglen, rxb->truesize);
+ }
+
+ return 0;
+}
+
+/* returns true if a packet is a duplicate or invalid tid and
+ * should be dropped. Updates AMSDU PN tracking info
+ */
+VISIBLE_IF_IWLWIFI_KUNIT
+bool
+iwl_mld_is_dup(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ const struct iwl_rx_mpdu_desc *mpdu_desc,
+ struct ieee80211_rx_status *rx_status, int queue)
+{
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_rxq_dup_data *dup_data;
+ u8 tid, sub_frame_idx;
+
+ if (WARN_ON(!sta))
+ return false;
+
+ mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ if (WARN_ON_ONCE(!mld_sta->dup_data))
+ return false;
+
+ dup_data = &mld_sta->dup_data[queue];
+
+ /* Drop duplicate 802.11 retransmissions
+ * (IEEE 802.11-2020: 10.3.2.14 "Duplicate detection and recovery")
+ */
+ if (ieee80211_is_ctl(hdr->frame_control) ||
+ ieee80211_is_any_nullfunc(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return false;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ /* frame has qos control */
+ tid = ieee80211_get_tid(hdr);
+ if (tid >= IWL_MAX_TID_COUNT)
+ return true;
+ } else {
+ tid = IWL_MAX_TID_COUNT;
+ }
+
+ /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
+ sub_frame_idx = mpdu_desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+ if (IWL_FW_CHECK(mld,
+ sub_frame_idx > 0 &&
+ !(mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU),
+ "got sub_frame_idx=%d but A-MSDU flag is not set\n",
+ sub_frame_idx))
+ return true;
+
+ if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
+ dup_data->last_seq[tid] == hdr->seq_ctrl &&
+ dup_data->last_sub_frame_idx[tid] >= sub_frame_idx))
+ return true;
+
+ /* Allow same PN as the first subframe for following sub frames */
+ if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
+ sub_frame_idx > dup_data->last_sub_frame_idx[tid])
+ rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
+
+ dup_data->last_seq[tid] = hdr->seq_ctrl;
+ dup_data->last_sub_frame_idx[tid] = sub_frame_idx;
+
+ rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+
+ return false;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_is_dup);
+
+static void iwl_mld_update_last_rx_timestamp(struct iwl_mld *mld, u8 baid)
+{
+ unsigned long now = jiffies;
+ unsigned long timeout;
+ struct iwl_mld_baid_data *ba_data;
+
+ ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+ if (!ba_data) {
+ IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid);
+ return;
+ }
+
+ if (!ba_data->timeout)
+ return;
+
+ /* To minimize cache bouncing between RX queues, avoid frequent updates
+ * to last_rx_timestamp. update it only when the timeout period has
+ * passed. The worst-case scenario is the session expiring after
+ * approximately 2 * timeout, which is negligible (the update is
+ * atomic).
+ */
+ timeout = TU_TO_JIFFIES(ba_data->timeout);
+ if (time_is_before_jiffies(ba_data->last_rx_timestamp + timeout))
+ ba_data->last_rx_timestamp = now;
+}
+
+/* Processes received packets for a station.
+ * Sets *drop to true if the packet should be dropped.
+ * Returns the station if found, or NULL otherwise.
+ */
+static struct ieee80211_sta *
+iwl_mld_rx_with_sta(struct iwl_mld *mld, struct ieee80211_hdr *hdr,
+ struct sk_buff *skb,
+ const struct iwl_rx_mpdu_desc *mpdu_desc,
+ const struct iwl_rx_packet *pkt, int queue, bool *drop)
+{
+ struct ieee80211_sta *sta = NULL;
+ struct ieee80211_link_sta *link_sta = NULL;
+ struct ieee80211_rx_status *rx_status;
+ u8 baid;
+
+ if (mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
+ u8 sta_id = le32_get_bits(mpdu_desc->status,
+ IWL_RX_MPDU_STATUS_STA_ID);
+
+ if (IWL_FW_CHECK(mld,
+ sta_id >= mld->fw->ucode_capa.num_stations,
+ "rx_mpdu: invalid sta_id %d\n", sta_id))
+ return NULL;
+
+ link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+ if (!IS_ERR_OR_NULL(link_sta))
+ sta = link_sta->sta;
+ } else if (!is_multicast_ether_addr(hdr->addr2)) {
+ /* Passing NULL is fine since we prevent two stations with the
+ * same address from being added.
+ */
+ sta = ieee80211_find_sta_by_ifaddr(mld->hw, hdr->addr2, NULL);
+ }
+
+ /* we may not have any station yet */
+ if (!sta)
+ return NULL;
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ if (link_sta && sta->valid_links) {
+ rx_status->link_valid = true;
+ rx_status->link_id = link_sta->link_id;
+ }
+
+ /* fill checksum */
+ if (ieee80211_is_data(hdr->frame_control) &&
+ pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) {
+ u16 hwsum = be16_to_cpu(mpdu_desc->v3.raw_xsum);
+
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold(~(__force __sum16)hwsum);
+ }
+
+ if (iwl_mld_is_dup(mld, sta, hdr, mpdu_desc, rx_status, queue)) {
+ IWL_DEBUG_DROP(mld, "Dropping duplicate packet 0x%x\n",
+ le16_to_cpu(hdr->seq_ctrl));
+ *drop = true;
+ return NULL;
+ }
+
+ baid = le32_get_bits(mpdu_desc->reorder_data,
+ IWL_RX_MPDU_REORDER_BAID_MASK);
+ if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
+ iwl_mld_update_last_rx_timestamp(mld, baid);
+
+ if (link_sta && ieee80211_is_data(hdr->frame_control)) {
+ u8 sub_frame_idx = mpdu_desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+ /* 0 means not an A-MSDU, and 1 means a new A-MSDU */
+ if (!sub_frame_idx || sub_frame_idx == 1)
+ iwl_mld_count_mpdu_rx(link_sta, queue, 1);
+
+ if (!is_multicast_ether_addr(hdr->addr1))
+ iwl_mld_low_latency_update_counters(mld, hdr, sta,
+ queue);
+ }
+
+ return sta;
+}
+
+#define KEY_IDX_LEN 2
+
+static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rx_status,
+ u32 mpdu_status,
+ u32 mpdu_len)
+{
+ struct wireless_dev *wdev;
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_vif *mld_vif;
+ u8 keyidx;
+ struct ieee80211_key_conf *key;
+ const u8 *frame = (void *)hdr;
+
+ if ((mpdu_status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+ IWL_RX_MPDU_STATUS_SEC_NONE)
+ return 0;
+
+ /* For non-beacon, we don't really care. But beacons may
+ * be filtered out, and we thus need the firmware's replay
+ * detection, otherwise beacons the firmware previously
+ * filtered could be replayed, or something like that, and
+ * it can filter a lot - though usually only if nothing has
+ * changed.
+ */
+ if (!ieee80211_is_beacon(hdr->frame_control))
+ return 0;
+
+ if (!sta)
+ return -1;
+
+ mld_sta = iwl_mld_sta_from_mac80211(sta);
+ mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
+
+ /* key mismatch - will also report !MIC_OK but we shouldn't count it */
+ if (!(mpdu_status & IWL_RX_MPDU_STATUS_KEY_VALID))
+ goto report;
+
+ /* good cases */
+ if (likely(mpdu_status & IWL_RX_MPDU_STATUS_MIC_OK &&
+ !(mpdu_status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) {
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ return 0;
+ }
+
+ /* both keys will have the same cipher and MIC length, use
+ * whichever one is available
+ */
+ key = rcu_dereference(mld_vif->bigtks[0]);
+ if (!key) {
+ key = rcu_dereference(mld_vif->bigtks[1]);
+ if (!key)
+ goto report;
+ }
+
+ if (mpdu_len < key->icv_len + IEEE80211_GMAC_PN_LEN + KEY_IDX_LEN)
+ goto report;
+
+ /* get the real key ID */
+ keyidx = frame[mpdu_len - key->icv_len - IEEE80211_GMAC_PN_LEN - KEY_IDX_LEN];
+ /* and if that's the other key, look it up */
+ if (keyidx != key->keyidx) {
+ /* shouldn't happen since firmware checked, but be safe
+ * in case the MIC length is wrong too, for example
+ */
+ if (keyidx != 6 && keyidx != 7)
+ return -1;
+
+ key = rcu_dereference(mld_vif->bigtks[keyidx - 6]);
+ if (!key)
+ goto report;
+ }
+
+ /* Report status to mac80211 */
+ if (!(mpdu_status & IWL_RX_MPDU_STATUS_MIC_OK))
+ ieee80211_key_mic_failure(key);
+ else if (mpdu_status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
+ ieee80211_key_replay(key);
+report:
+ wdev = ieee80211_vif_to_wdev(mld_sta->vif);
+ if (wdev->netdev)
+ cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr,
+ mpdu_len);
+
+ return -1;
+}
+
+static int iwl_mld_rx_crypto(struct iwl_mld *mld,
+ struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rx_status,
+ struct iwl_rx_mpdu_desc *desc, int queue,
+ u32 pkt_flags, u8 *crypto_len)
+{
+ u32 status = le32_to_cpu(desc->status);
+
+ if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_has_protected(hdr->frame_control)))
+ return iwl_mld_rx_mgmt_prot(sta, hdr, rx_status, status,
+ le16_to_cpu(desc->mpdu_len));
+
+ if (!ieee80211_has_protected(hdr->frame_control) ||
+ (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+ IWL_RX_MPDU_STATUS_SEC_NONE)
+ return 0;
+
+ switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
+ case IWL_RX_MPDU_STATUS_SEC_CCM:
+ case IWL_RX_MPDU_STATUS_SEC_GCM:
+ BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
+ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) {
+ IWL_DEBUG_DROP(mld,
+ "Dropping packet, bad MIC (CCM/GCM)\n");
+ return -1;
+ }
+
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
+ *crypto_len = IEEE80211_CCMP_HDR_LEN;
+ return 0;
+ case IWL_RX_MPDU_STATUS_SEC_TKIP:
+ if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
+ return -1;
+
+ if (!(status & RX_MPDU_RES_STATUS_MIC_OK))
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (pkt_flags & FH_RSCSR_RADA_EN) {
+ rx_status->flag |= RX_FLAG_ICV_STRIPPED;
+ rx_status->flag |= RX_FLAG_MMIC_STRIPPED;
+ }
+
+ *crypto_len = IEEE80211_TKIP_IV_LEN;
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ return 0;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void iwl_mld_rx_update_ampdu_ref(struct iwl_mld *mld,
+ struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_rx_status *rx_status)
+{
+ bool toggle_bit =
+ phy_data->phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ /* Toggle is switched whenever new aggregation starts. Make
+ * sure ampdu_reference is never 0 so we can later use it to
+ * see if the frame was really part of an A-MPDU or not.
+ */
+ if (toggle_bit != mld->monitor.ampdu_toggle) {
+ mld->monitor.ampdu_ref++;
+ if (mld->monitor.ampdu_ref == 0)
+ mld->monitor.ampdu_ref++;
+ mld->monitor.ampdu_toggle = toggle_bit;
+ phy_data->first_subframe = true;
+ }
+ rx_status->ampdu_reference = mld->monitor.ampdu_ref;
+}
+
+static void
+iwl_mld_fill_rx_status_band_freq(struct iwl_mld_rx_phy_data *phy_data,
+ struct iwl_rx_mpdu_desc *mpdu_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ enum nl80211_band band;
+
+ band = BAND_IN_RX_STATUS(mpdu_desc->mac_phy_idx);
+ rx_status->band = iwl_mld_phy_band_to_nl80211(band);
+ rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
+ rx_status->band);
+}
+
+void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mld_rx_phy_data phy_data = {};
+ struct iwl_rx_mpdu_desc *mpdu_desc = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ size_t mpdu_desc_size = sizeof(*mpdu_desc);
+ bool drop = false;
+ u8 crypto_len = 0;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+ u32 mpdu_len;
+ enum iwl_mld_reorder_result reorder_res;
+ struct ieee80211_rx_status *rx_status;
+
+ if (unlikely(mld->fw_status.in_hw_restart))
+ return;
+
+ if (IWL_FW_CHECK(mld, pkt_len < mpdu_desc_size,
+ "Bad REPLY_RX_MPDU_CMD size (%d)\n", pkt_len))
+ return;
+
+ mpdu_len = le16_to_cpu(mpdu_desc->mpdu_len);
+
+ if (IWL_FW_CHECK(mld, mpdu_len + mpdu_desc_size > pkt_len,
+ "FW lied about packet len (%d)\n", pkt_len))
+ return;
+
+ /* Don't use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mld, "alloc_skb failed\n");
+ return;
+ }
+
+ hdr = (void *)(pkt->data + mpdu_desc_size);
+
+ iwl_mld_fill_phy_data(mpdu_desc, &phy_data);
+
+ if (mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+ /* If the device inserted padding it means that (it thought)
+ * the 802.11 header wasn't a multiple of 4 bytes long. In
+ * this case, reserve two bytes at the start of the SKB to
+ * align the payload properly in case we end up copying it.
+ */
+ skb_reserve(skb, 2);
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ /* this is needed early */
+ iwl_mld_fill_rx_status_band_freq(&phy_data, mpdu_desc, rx_status);
+
+ rcu_read_lock();
+
+ sta = iwl_mld_rx_with_sta(mld, hdr, skb, mpdu_desc, pkt, queue, &drop);
+ if (drop)
+ goto drop;
+
+ /* update aggregation data for monitor sake on default queue */
+ if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU))
+ iwl_mld_rx_update_ampdu_ref(mld, &phy_data, rx_status);
+
+ iwl_mld_rx_fill_status(mld, skb, &phy_data, mpdu_desc, hdr, queue);
+
+ if (iwl_mld_rx_crypto(mld, sta, hdr, rx_status, mpdu_desc, queue,
+ le32_to_cpu(pkt->len_n_flags), &crypto_len))
+ goto drop;
+
+ if (iwl_mld_build_rx_skb(mld, skb, hdr, mpdu_len, crypto_len, rxb))
+ goto drop;
+
+ /* time sync frame is saved and will be released later when the
+ * notification with the timestamps arrives.
+ */
+ if (iwl_mld_time_sync_frame(mld, skb, hdr->addr2))
+ goto out;
+
+ reorder_res = iwl_mld_reorder(mld, napi, queue, sta, skb, mpdu_desc);
+ switch (reorder_res) {
+ case IWL_MLD_PASS_SKB:
+ break;
+ case IWL_MLD_DROP_SKB:
+ goto drop;
+ case IWL_MLD_BUFFERED_SKB:
+ goto out;
+ default:
+ WARN_ON(1);
+ goto drop;
+ }
+
+ iwl_mld_pass_packet_to_mac80211(mld, napi, skb, queue, sta);
+
+ goto out;
+
+drop:
+ kfree_skb(skb);
+out:
+ rcu_read_unlock();
+}
+
+#define SYNC_RX_QUEUE_TIMEOUT (HZ)
+void iwl_mld_sync_rx_queues(struct iwl_mld *mld,
+ enum iwl_mld_internal_rxq_notif_type type,
+ const void *notif_payload, u32 notif_payload_size)
+{
+ u8 num_rx_queues = mld->trans->num_rx_queues;
+ struct {
+ struct iwl_rxq_sync_cmd sync_cmd;
+ struct iwl_mld_internal_rxq_notif notif;
+ } __packed cmd = {
+ .sync_cmd.rxq_mask = cpu_to_le32(BIT(num_rx_queues) - 1),
+ .sync_cmd.count =
+ cpu_to_le32(sizeof(struct iwl_mld_internal_rxq_notif) +
+ notif_payload_size),
+ .notif.type = type,
+ .notif.cookie = mld->rxq_sync.cookie,
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ .data[1] = notif_payload,
+ .len[1] = notif_payload_size,
+ };
+ int ret;
+
+ /* size must be a multiple of DWORD */
+ if (WARN_ON(cmd.sync_cmd.count & cpu_to_le32(3)))
+ return;
+
+ mld->rxq_sync.state = (1 << num_rx_queues) - 1;
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (ret) {
+ IWL_ERR(mld, "Failed to trigger RX queues sync (%d)\n", ret);
+ goto out;
+ }
+
+ ret = wait_event_timeout(mld->rxq_sync.waitq,
+ READ_ONCE(mld->rxq_sync.state) == 0,
+ SYNC_RX_QUEUE_TIMEOUT);
+ WARN_ONCE(!ret, "RXQ sync failed: state=0x%lx, cookie=%d\n",
+ mld->rxq_sync.state, mld->rxq_sync.cookie);
+
+out:
+ mld->rxq_sync.state = 0;
+ mld->rxq_sync.cookie++;
+}
+
+void iwl_mld_handle_rx_queues_sync_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct iwl_rx_packet *pkt, int queue)
+{
+ struct iwl_rxq_sync_notification *notif;
+ struct iwl_mld_internal_rxq_notif *internal_notif;
+ u32 len = iwl_rx_packet_payload_len(pkt);
+ size_t combined_notif_len = sizeof(*notif) + sizeof(*internal_notif);
+
+ notif = (void *)pkt->data;
+ internal_notif = (void *)notif->payload;
+
+ if (IWL_FW_CHECK(mld, len < combined_notif_len,
+ "invalid notification size %u (%zu)\n",
+ len, combined_notif_len))
+ return;
+
+ len -= combined_notif_len;
+
+ if (IWL_FW_CHECK(mld, mld->rxq_sync.cookie != internal_notif->cookie,
+ "received expired RX queue sync message (cookie=%d expected=%d q[%d])\n",
+ internal_notif->cookie, mld->rxq_sync.cookie, queue))
+ return;
+
+ switch (internal_notif->type) {
+ case IWL_MLD_RXQ_EMPTY:
+ IWL_FW_CHECK(mld, len,
+ "invalid empty notification size %d\n", len);
+ break;
+ case IWL_MLD_RXQ_NOTIF_DEL_BA:
+ if (IWL_FW_CHECK(mld, len != sizeof(struct iwl_mld_delba_data),
+ "invalid delba notification size %u (%zu)\n",
+ len, sizeof(struct iwl_mld_delba_data)))
+ break;
+ iwl_mld_del_ba(mld, queue, (void *)internal_notif->payload);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ IWL_FW_CHECK(mld, !test_and_clear_bit(queue, &mld->rxq_sync.state),
+ "RXQ sync: queue %d responded a second time!\n", queue);
+
+ if (READ_ONCE(mld->rxq_sync.state) == 0)
+ wake_up(&mld->rxq_sync.waitq);
+}
+
+void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
+ struct iwl_rx_packet *pkt, int queue)
+{
+ struct iwl_rx_no_data_ver_3 *desc;
+ struct iwl_mld_rx_phy_data phy_data;
+ struct ieee80211_rx_status *rx_status;
+ struct sk_buff *skb;
+ u32 format, rssi;
+
+ if (unlikely(mld->fw_status.in_hw_restart))
+ return;
+
+ if (IWL_FW_CHECK(mld, iwl_rx_packet_payload_len(pkt) < sizeof(*desc),
+ "Bad RX_NO_DATA_NOTIF size (%d)\n",
+ iwl_rx_packet_payload_len(pkt)))
+ return;
+
+ desc = (void *)pkt->data;
+
+ rssi = le32_to_cpu(desc->rssi);
+ phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK);
+ phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK);
+ phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK);
+ phy_data.data0 = desc->phy_info[0];
+ phy_data.data1 = desc->phy_info[1];
+ phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
+ phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
+ phy_data.rate_n_flags = le32_to_cpu(desc->rate);
+ phy_data.with_data = false;
+
+ BUILD_BUG_ON(sizeof(phy_data.rx_vec) != sizeof(desc->rx_vec));
+ memcpy(phy_data.rx_vec, desc->rx_vec, sizeof(phy_data.rx_vec));
+
+ format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+ /* Don't use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mld, "alloc_skb failed\n");
+ return;
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ /* 0-length PSDU */
+ rx_status->flag |= RX_FLAG_NO_PSDU;
+
+ /* mark as failed PLCP on any errors to skip checks in mac80211 */
+ if (le32_get_bits(desc->info, RX_NO_DATA_INFO_ERR_MSK) !=
+ RX_NO_DATA_INFO_ERR_NONE)
+ rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
+
+ switch (le32_get_bits(desc->info, RX_NO_DATA_INFO_TYPE_MSK)) {
+ case RX_NO_DATA_INFO_TYPE_NDP:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
+ break;
+ case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED:
+ case RX_NO_DATA_INFO_TYPE_TB_UNMATCHED:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED;
+ break;
+ default:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR;
+ break;
+ }
+
+ rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
+
+ rx_status->freq = ieee80211_channel_to_frequency(phy_data.channel,
+ rx_status->band);
+
+ iwl_mld_rx_fill_status(mld, skb, &phy_data, NULL, NULL, queue);
+
+ /* No more radiotap info should be added after this point.
+ * Mark it as mac header for upper layers to know where
+ * the radiotap header ends.
+ */
+ skb_set_mac_header(skb, skb->len);
+
+ /* Override the nss from the rx_vec since the rate_n_flags has
+ * only 1 bit for the nss which gives a max of 2 ss but there
+ * may be up to 8 spatial streams.
+ */
+ switch (format) {
+ case RATE_MCS_VHT_MSK:
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[0],
+ RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
+ break;
+ case RATE_MCS_HE_MSK:
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[0],
+ RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
+ break;
+ case RATE_MCS_EHT_MSK:
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[2],
+ RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1;
+ }
+
+ /* pass the packet to mac80211 */
+ rcu_read_lock();
+ ieee80211_rx_napi(mld->hw, NULL, skb, napi);
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.h b/drivers/net/wireless/intel/iwlwifi/mld/rx.h
new file mode 100644
index 000000000000..2beabd7e70b1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_rx_h__
+#define __iwl_mld_rx_h__
+
+#include "mld.h"
+
+/**
+ * enum iwl_mld_internal_rxq_notif_type - RX queue sync notif types
+ *
+ * @IWL_MLD_RXQ_EMPTY: empty sync notification
+ * @IWL_MLD_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
+ */
+enum iwl_mld_internal_rxq_notif_type {
+ IWL_MLD_RXQ_EMPTY,
+ IWL_MLD_RXQ_NOTIF_DEL_BA,
+};
+
+/**
+ * struct iwl_mld_internal_rxq_notif - @iwl_rxq_sync_cmd internal data.
+ * This data is echoed by the firmware to all RSS queues and should be DWORD
+ * aligned. FW is agnostic to the data, so there are no endianness requirements
+ *
+ * @type: one of &iwl_mld_internal_rxq_notif_type
+ * @cookie: unique internal cookie to identify old notifications
+ * @reserved: reserved for alignment
+ * @payload: data to send to RX queues based on the type (may be empty)
+ */
+struct iwl_mld_internal_rxq_notif {
+ u8 type;
+ u8 reserved[3];
+ u32 cookie;
+ u8 payload[];
+} __packed;
+
+/**
+ * struct iwl_mld_rx_queues_sync - RX queues sync data
+ *
+ * @waitq: wait queue for RX queues sync completion
+ * @cookie: unique id to correlate sync requests with responses
+ * @state: bitmask representing the sync state of RX queues
+ * all RX queues bits are set before sending the command, and the
+ * corresponding queue bit cleared upon handling the notification
+ */
+struct iwl_mld_rx_queues_sync {
+ wait_queue_head_t waitq;
+ u32 cookie;
+ unsigned long state;
+};
+
+void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
+
+void iwl_mld_sync_rx_queues(struct iwl_mld *mld,
+ enum iwl_mld_internal_rxq_notif_type type,
+ const void *notif_payload, u32 notif_payload_size);
+
+void iwl_mld_handle_rx_queues_sync_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct iwl_rx_packet *pkt, int queue);
+
+void iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct sk_buff *skb, int queue,
+ struct ieee80211_sta *sta);
+
+void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
+ struct iwl_rx_packet *pkt, int queue);
+
+#endif /* __iwl_mld_agg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.c b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
new file mode 100644
index 000000000000..7ec04318ec2f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
@@ -0,0 +1,2008 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <linux/crc32.h>
+
+#include "mld.h"
+#include "scan.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "phy.h"
+#include "mlo.h"
+
+#include "fw/api/scan.h"
+#include "fw/dbg.h"
+
+#define IWL_SCAN_DWELL_ACTIVE 10
+#define IWL_SCAN_DWELL_PASSIVE 110
+#define IWL_SCAN_NUM_OF_FRAGS 3
+
+/* adaptive dwell max budget time [TU] for full scan */
+#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
+
+/* adaptive dwell max budget time [TU] for directed scan */
+#define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
+
+/* adaptive dwell default high band APs number */
+#define IWL_SCAN_ADWELL_DEFAULT_HB_N_APS 8
+
+/* adaptive dwell default low band APs number */
+#define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2
+
+/* adaptive dwell default APs number for P2P social channels (1, 6, 11) */
+#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
+
+/* adaptive dwell number of APs override for P2P friendly GO channels */
+#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
+
+/* adaptive dwell number of APs override for P2P social channels */
+#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
+
+/* adaptive dwell number of APs override mask for p2p friendly GO */
+#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT BIT(20)
+
+/* adaptive dwell number of APs override mask for social channels */
+#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21)
+
+#define SCAN_TIMEOUT_MSEC (30000 * HZ)
+
+/* minimal number of 2GHz and 5GHz channels in the regular scan request */
+#define IWL_MLD_6GHZ_PASSIVE_SCAN_MIN_CHANS 4
+
+enum iwl_mld_scan_type {
+ IWL_SCAN_TYPE_NOT_SET,
+ IWL_SCAN_TYPE_UNASSOC,
+ IWL_SCAN_TYPE_WILD,
+ IWL_SCAN_TYPE_MILD,
+ IWL_SCAN_TYPE_FRAGMENTED,
+ IWL_SCAN_TYPE_FAST_BALANCE,
+};
+
+struct iwl_mld_scan_timing_params {
+ u32 suspend_time;
+ u32 max_out_time;
+};
+
+static const struct iwl_mld_scan_timing_params scan_timing[] = {
+ [IWL_SCAN_TYPE_UNASSOC] = {
+ .suspend_time = 0,
+ .max_out_time = 0,
+ },
+ [IWL_SCAN_TYPE_WILD] = {
+ .suspend_time = 30,
+ .max_out_time = 120,
+ },
+ [IWL_SCAN_TYPE_MILD] = {
+ .suspend_time = 120,
+ .max_out_time = 120,
+ },
+ [IWL_SCAN_TYPE_FRAGMENTED] = {
+ .suspend_time = 95,
+ .max_out_time = 44,
+ },
+ [IWL_SCAN_TYPE_FAST_BALANCE] = {
+ .suspend_time = 30,
+ .max_out_time = 37,
+ },
+};
+
+struct iwl_mld_scan_params {
+ enum iwl_mld_scan_type type;
+ u32 n_channels;
+ u16 delay;
+ int n_ssids;
+ struct cfg80211_ssid *ssids;
+ struct ieee80211_channel **channels;
+ u32 flags;
+ u8 *mac_addr;
+ u8 *mac_addr_mask;
+ bool no_cck;
+ bool pass_all;
+ int n_match_sets;
+ struct iwl_scan_probe_req preq;
+ struct cfg80211_match_set *match_sets;
+ int n_scan_plans;
+ struct cfg80211_sched_scan_plan *scan_plans;
+ bool iter_notif;
+ bool respect_p2p_go;
+ u8 fw_link_id;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params;
+ u32 n_6ghz_params;
+ bool scan_6ghz;
+ bool enable_6ghz_passive;
+ u8 bssid[ETH_ALEN] __aligned(2);
+};
+
+struct iwl_mld_scan_respect_p2p_go_iter_data {
+ struct ieee80211_vif *current_vif;
+ bool p2p_go;
+};
+
+static void iwl_mld_scan_respect_p2p_go_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_scan_respect_p2p_go_iter_data *data = _data;
+
+ /* exclude the given vif */
+ if (vif == data->current_vif)
+ return;
+
+ /* TODO: CDB check the band of the GO */
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO &&
+ iwl_mld_vif_from_mac80211(vif)->ap_ibss_active)
+ data->p2p_go = true;
+}
+
+static bool iwl_mld_get_respect_p2p_go(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ bool low_latency)
+{
+ struct iwl_mld_scan_respect_p2p_go_iter_data data = {
+ .current_vif = vif,
+ .p2p_go = false,
+ };
+
+ if (!low_latency)
+ return false;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_scan_respect_p2p_go_iter,
+ &data);
+
+ return data.p2p_go;
+}
+
+struct iwl_mld_scan_iter_data {
+ struct ieee80211_vif *current_vif;
+ bool active_vif;
+ bool is_dcm_with_p2p_go;
+ bool global_low_latency;
+};
+
+static void iwl_mld_scan_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_scan_iter_data *data = _data;
+ struct ieee80211_vif *curr_vif = data->current_vif;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_vif *curr_mld_vif;
+ unsigned long curr_vif_active_links;
+ u16 link_id;
+
+ data->global_low_latency |= iwl_mld_vif_low_latency(mld_vif);
+
+ if ((ieee80211_vif_is_mld(vif) && vif->active_links) ||
+ (vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ mld_vif->deflink.active))
+ data->active_vif = true;
+
+ if (vif == curr_vif)
+ return;
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_P2P_GO)
+ return;
+
+ /* Currently P2P GO can't be AP MLD so the logic below assumes that */
+ WARN_ON_ONCE(ieee80211_vif_is_mld(vif));
+
+ curr_vif_active_links =
+ ieee80211_vif_is_mld(curr_vif) ? curr_vif->active_links : 1;
+
+ curr_mld_vif = iwl_mld_vif_from_mac80211(curr_vif);
+
+ for_each_set_bit(link_id, &curr_vif_active_links,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct iwl_mld_link *curr_mld_link =
+ iwl_mld_link_dereference_check(curr_mld_vif, link_id);
+
+ if (WARN_ON(!curr_mld_link))
+ return;
+
+ if (rcu_access_pointer(curr_mld_link->chan_ctx) &&
+ rcu_access_pointer(mld_vif->deflink.chan_ctx) !=
+ rcu_access_pointer(curr_mld_link->chan_ctx)) {
+ data->is_dcm_with_p2p_go = true;
+ return;
+ }
+ }
+}
+
+static enum
+iwl_mld_scan_type iwl_mld_get_scan_type(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_scan_iter_data *data)
+{
+ enum iwl_mld_traffic_load load = mld->scan.traffic_load.status;
+
+ /* A scanning AP interface probably wants to generate a survey to do
+ * ACS (automatic channel selection).
+ * Force a non-fragmented scan in that case.
+ */
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
+ return IWL_SCAN_TYPE_WILD;
+
+ if (!data->active_vif)
+ return IWL_SCAN_TYPE_UNASSOC;
+
+ if ((load == IWL_MLD_TRAFFIC_HIGH || data->global_low_latency) &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ return IWL_SCAN_TYPE_FRAGMENTED;
+
+ /* In case of DCM with P2P GO set all scan requests as
+ * fast-balance scan
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ data->is_dcm_with_p2p_go)
+ return IWL_SCAN_TYPE_FAST_BALANCE;
+
+ if (load >= IWL_MLD_TRAFFIC_MEDIUM || data->global_low_latency)
+ return IWL_SCAN_TYPE_MILD;
+
+ return IWL_SCAN_TYPE_WILD;
+}
+
+static u8 *
+iwl_mld_scan_add_2ghz_elems(struct iwl_mld *mld, const u8 *ies,
+ size_t len, u8 *const pos)
+{
+ static const u8 before_ds_params[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_REQUEST,
+ WLAN_EID_EXT_SUPP_RATES,
+ };
+ size_t offs;
+ u8 *newpos = pos;
+
+ offs = ieee80211_ie_split(ies, len,
+ before_ds_params,
+ ARRAY_SIZE(before_ds_params),
+ 0);
+
+ memcpy(newpos, ies, offs);
+ newpos += offs;
+
+ /* Add a placeholder for DS Parameter Set element */
+ *newpos++ = WLAN_EID_DS_PARAMS;
+ *newpos++ = 1;
+ *newpos++ = 0;
+
+ memcpy(newpos, ies + offs, len - offs);
+ newpos += len - offs;
+
+ return newpos;
+}
+
+static void
+iwl_mld_scan_add_tpc_report_elem(u8 *pos)
+{
+ pos[0] = WLAN_EID_VENDOR_SPECIFIC;
+ pos[1] = WFA_TPC_IE_LEN - 2;
+ pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
+ pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
+ pos[4] = WLAN_OUI_MICROSOFT & 0xff;
+ pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
+ pos[6] = 0;
+ /* pos[7] - tx power will be inserted by the FW */
+ pos[7] = 0;
+ pos[8] = 0;
+}
+
+static u32
+iwl_mld_scan_ooc_priority(enum iwl_mld_scan_status scan_status)
+{
+ if (scan_status == IWL_MLD_SCAN_REGULAR)
+ return IWL_SCAN_PRIORITY_EXT_6;
+ if (scan_status == IWL_MLD_SCAN_INT_MLO)
+ return IWL_SCAN_PRIORITY_EXT_4;
+
+ return IWL_SCAN_PRIORITY_EXT_2;
+}
+
+static bool
+iwl_mld_scan_is_regular(struct iwl_mld_scan_params *params)
+{
+ return params->n_scan_plans == 1 &&
+ params->scan_plans[0].iterations == 1;
+}
+
+static bool
+iwl_mld_scan_is_fragmented(enum iwl_mld_scan_type type)
+{
+ return (type == IWL_SCAN_TYPE_FRAGMENTED ||
+ type == IWL_SCAN_TYPE_FAST_BALANCE);
+}
+
+static int
+iwl_mld_scan_uid_by_status(struct iwl_mld *mld, int status)
+{
+ for (int i = 0; i < ARRAY_SIZE(mld->scan.uid_status); i++)
+ if (mld->scan.uid_status[i] == status)
+ return i;
+
+ return -ENOENT;
+}
+
+static const char *
+iwl_mld_scan_ebs_status_str(enum iwl_scan_ebs_status status)
+{
+ switch (status) {
+ case IWL_SCAN_EBS_SUCCESS:
+ return "successful";
+ case IWL_SCAN_EBS_INACTIVE:
+ return "inactive";
+ case IWL_SCAN_EBS_FAILED:
+ case IWL_SCAN_EBS_CHAN_NOT_FOUND:
+ default:
+ return "failed";
+ }
+}
+
+static int
+iwl_mld_scan_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
+{
+ for (int i = 0; i < PROBE_OPTION_MAX; i++) {
+ if (!ssid_list[i].len)
+ return -1;
+ if (ssid_list[i].len == ssid_len &&
+ !memcmp(ssid_list[i].ssid, ssid, ssid_len))
+ return i;
+ }
+
+ return -1;
+}
+
+static bool
+iwl_mld_scan_fits(struct iwl_mld *mld, int n_ssids,
+ struct ieee80211_scan_ies *ies, int n_channels)
+{
+ return ((n_ssids <= PROBE_OPTION_MAX) &&
+ (n_channels <= mld->fw->ucode_capa.n_scan_channels) &
+ (ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
+ ies->len[NL80211_BAND_5GHZ] + ies->len[NL80211_BAND_6GHZ] <=
+ iwl_mld_scan_max_template_size()));
+}
+
+static void
+iwl_mld_scan_build_probe_req(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_scan_ies *ies,
+ struct iwl_mld_scan_params *params)
+{
+ struct ieee80211_mgmt *frame = (void *)params->preq.buf;
+ u8 *pos, *newpos;
+ const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ params->mac_addr : NULL;
+
+ if (mac_addr)
+ get_random_mask_addr(frame->sa, mac_addr,
+ params->mac_addr_mask);
+ else
+ memcpy(frame->sa, vif->addr, ETH_ALEN);
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ eth_broadcast_addr(frame->da);
+ ether_addr_copy(frame->bssid, params->bssid);
+ frame->seq_ctrl = 0;
+
+ pos = frame->u.probe_req.variable;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = 0;
+
+ params->preq.mac_header.offset = 0;
+ params->preq.mac_header.len = cpu_to_le16(24 + 2);
+
+ /* Insert DS parameter set element on 2.4 GHz band */
+ newpos = iwl_mld_scan_add_2ghz_elems(mld,
+ ies->ies[NL80211_BAND_2GHZ],
+ ies->len[NL80211_BAND_2GHZ],
+ pos);
+ params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
+ pos = newpos;
+
+ memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
+ ies->len[NL80211_BAND_5GHZ]);
+ params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[1].len =
+ cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
+ pos += ies->len[NL80211_BAND_5GHZ];
+
+ memcpy(pos, ies->ies[NL80211_BAND_6GHZ],
+ ies->len[NL80211_BAND_6GHZ]);
+ params->preq.band_data[2].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[2].len =
+ cpu_to_le16(ies->len[NL80211_BAND_6GHZ]);
+ pos += ies->len[NL80211_BAND_6GHZ];
+
+ memcpy(pos, ies->common_ies, ies->common_ie_len);
+ params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
+
+ iwl_mld_scan_add_tpc_report_elem(pos + ies->common_ie_len);
+ params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
+ WFA_TPC_IE_LEN);
+}
+
+static u16
+iwl_mld_scan_get_cmd_gen_flags(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif,
+ enum iwl_mld_scan_status scan_status)
+{
+ u16 flags = 0;
+
+ /* If no direct SSIDs are provided perform a passive scan. Otherwise,
+ * if there is a single SSID which is not the broadcast SSID, assume
+ * that the scan is intended for roaming purposes and thus enable Rx on
+ * all chains to improve chances of hearing the beacons/probe responses.
+ */
+ if (params->n_ssids == 0)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
+ else if (params->n_ssids == 1 && params->ssids[0].ssid_len)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS;
+
+ if (params->pass_all)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
+ else
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH;
+
+ if (iwl_mld_scan_is_fragmented(params->type))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1;
+
+ if (!iwl_mld_scan_is_regular(params))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC;
+
+ if (params->iter_notif ||
+ mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_ENABLED)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
+
+ if (scan_status == IWL_MLD_SCAN_SCHED ||
+ scan_status == IWL_MLD_SCAN_NETDETECT)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE;
+
+ if (params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP |
+ NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE |
+ NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE;
+
+ if ((scan_status == IWL_MLD_SCAN_SCHED ||
+ scan_status == IWL_MLD_SCAN_NETDETECT) &&
+ params->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN;
+
+ if (params->enable_6ghz_passive)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN;
+
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
+
+ return flags;
+}
+
+static u8
+iwl_mld_scan_get_cmd_gen_flags2(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif, u16 gen_flags)
+{
+ u8 flags = 0;
+
+ /* TODO: CDB */
+ if (params->respect_p2p_go)
+ flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB |
+ IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
+
+ if (params->scan_6ghz)
+ flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT;
+
+ return flags;
+}
+
+static void
+iwl_mld_scan_cmd_set_dwell(struct iwl_mld *mld,
+ struct iwl_scan_general_params_v11 *gp,
+ struct iwl_mld_scan_params *params)
+{
+ const struct iwl_mld_scan_timing_params *timing =
+ &scan_timing[params->type];
+
+ gp->adwell_default_social_chn =
+ IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
+ gp->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
+ gp->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
+
+ if (params->n_ssids && params->ssids[0].ssid_len)
+ gp->adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ else
+ gp->adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
+
+ gp->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+
+ gp->max_out_of_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->max_out_time);
+ gp->suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->suspend_time);
+
+ gp->active_dwell[SCAN_LB_LMAC_IDX] = IWL_SCAN_DWELL_ACTIVE;
+ gp->passive_dwell[SCAN_LB_LMAC_IDX] = IWL_SCAN_DWELL_PASSIVE;
+ gp->active_dwell[SCAN_HB_LMAC_IDX] = IWL_SCAN_DWELL_ACTIVE;
+ gp->passive_dwell[SCAN_HB_LMAC_IDX] = IWL_SCAN_DWELL_PASSIVE;
+
+ IWL_DEBUG_SCAN(mld,
+ "Scan: adwell_max_budget=%d max_out_of_time=%d suspend_time=%d\n",
+ gp->adwell_max_budget,
+ gp->max_out_of_time[SCAN_LB_LMAC_IDX],
+ gp->suspend_time[SCAN_LB_LMAC_IDX]);
+}
+
+static void
+iwl_mld_scan_cmd_set_gen_params(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_general_params_v11 *gp,
+ enum iwl_mld_scan_status scan_status)
+{
+ u16 gen_flags = iwl_mld_scan_get_cmd_gen_flags(mld, params, vif,
+ scan_status);
+ u8 gen_flags2 = iwl_mld_scan_get_cmd_gen_flags2(mld, params, vif,
+ gen_flags);
+
+ IWL_DEBUG_SCAN(mld, "General: flags=0x%x, flags2=0x%x\n",
+ gen_flags, gen_flags2);
+
+ gp->flags = cpu_to_le16(gen_flags);
+ gp->flags2 = gen_flags2;
+
+ iwl_mld_scan_cmd_set_dwell(mld, gp, params);
+
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
+ gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
+
+ if (params->fw_link_id != IWL_MLD_INVALID_FW_ID)
+ gp->scan_start_mac_or_link_id = params->fw_link_id;
+}
+
+static int
+iwl_mld_scan_cmd_set_sched_params(struct iwl_mld_scan_params *params,
+ struct iwl_scan_umac_schedule *schedule,
+ __le16 *delay)
+{
+ if (WARN_ON(!params->n_scan_plans ||
+ params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+ return -EINVAL;
+
+ for (int i = 0; i < params->n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *scan_plan =
+ &params->scan_plans[i];
+
+ schedule[i].iter_count = scan_plan->iterations;
+ schedule[i].interval =
+ cpu_to_le16(scan_plan->interval);
+ }
+
+ /* If the number of iterations of the last scan plan is set to zero,
+ * it should run infinitely. However, this is not always the case.
+ * For example, when regular scan is requested the driver sets one scan
+ * plan with one iteration.
+ */
+ if (!schedule[params->n_scan_plans - 1].iter_count)
+ schedule[params->n_scan_plans - 1].iter_count = 0xff;
+
+ *delay = cpu_to_le16(params->delay);
+
+ return 0;
+}
+
+/* We insert the SSIDs in an inverted order, because the FW will
+ * invert it back.
+ */
+static void
+iwl_mld_scan_cmd_build_ssids(struct iwl_mld_scan_params *params,
+ struct iwl_ssid_ie *ssids, u32 *ssid_bitmap)
+{
+ int i, j;
+ int index;
+ u32 tmp_bitmap = 0;
+
+ /* copy SSIDs from match list. iwl_config_sched_scan_profiles()
+ * uses the order of these ssids to config match list.
+ */
+ for (i = 0, j = params->n_match_sets - 1;
+ j >= 0 && i < PROBE_OPTION_MAX;
+ i++, j--) {
+ /* skip empty SSID match_sets */
+ if (!params->match_sets[j].ssid.ssid_len)
+ continue;
+
+ ssids[i].id = WLAN_EID_SSID;
+ ssids[i].len = params->match_sets[j].ssid.ssid_len;
+ memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
+ ssids[i].len);
+ }
+
+ /* add SSIDs from scan SSID list */
+ for (j = params->n_ssids - 1;
+ j >= 0 && i < PROBE_OPTION_MAX;
+ i++, j--) {
+ index = iwl_mld_scan_ssid_exist(params->ssids[j].ssid,
+ params->ssids[j].ssid_len,
+ ssids);
+ if (index < 0) {
+ ssids[i].id = WLAN_EID_SSID;
+ ssids[i].len = params->ssids[j].ssid_len;
+ memcpy(ssids[i].ssid, params->ssids[j].ssid,
+ ssids[i].len);
+ tmp_bitmap |= BIT(i);
+ } else {
+ tmp_bitmap |= BIT(index);
+ }
+ }
+
+ if (ssid_bitmap)
+ *ssid_bitmap = tmp_bitmap;
+}
+
+static void
+iwl_mld_scan_fill_6g_chan_list(struct iwl_mld_scan_params *params,
+ struct iwl_scan_probe_params_v4 *pp)
+{
+ int j, idex_s = 0, idex_b = 0;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params =
+ params->scan_6ghz_params;
+
+ for (j = 0;
+ j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE;
+ j++) {
+ if (!params->ssids[j].ssid_len)
+ continue;
+
+ pp->short_ssid[idex_s] =
+ cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid,
+ params->ssids[j].ssid_len));
+
+ /* hidden 6ghz scan */
+ pp->direct_scan[idex_s].id = WLAN_EID_SSID;
+ pp->direct_scan[idex_s].len = params->ssids[j].ssid_len;
+ memcpy(pp->direct_scan[idex_s].ssid, params->ssids[j].ssid,
+ params->ssids[j].ssid_len);
+ idex_s++;
+ }
+
+ /* Populate the arrays of the short SSIDs and the BSSIDs using the 6GHz
+ * collocated parameters. This might not be optimal, as this processing
+ * does not (yet) correspond to the actual channels, so it is possible
+ * that some entries would be left out.
+ */
+ for (j = 0; j < params->n_6ghz_params; j++) {
+ int k;
+
+ /* First, try to place the short SSID */
+ if (scan_6ghz_params[j].short_ssid_valid) {
+ for (k = 0; k < idex_s; k++) {
+ if (pp->short_ssid[k] ==
+ cpu_to_le32(scan_6ghz_params[j].short_ssid))
+ break;
+ }
+
+ if (k == idex_s && idex_s < SCAN_SHORT_SSID_MAX_SIZE) {
+ pp->short_ssid[idex_s++] =
+ cpu_to_le32(scan_6ghz_params[j].short_ssid);
+ }
+ }
+
+ /* try to place BSSID for the same entry */
+ for (k = 0; k < idex_b; k++) {
+ if (!memcmp(&pp->bssid_array[k],
+ scan_6ghz_params[j].bssid, ETH_ALEN))
+ break;
+ }
+
+ if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
+ !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
+ "scan: invalid BSSID at index %u, index_b=%u\n",
+ j, idex_b)) {
+ memcpy(&pp->bssid_array[idex_b++],
+ scan_6ghz_params[j].bssid, ETH_ALEN);
+ }
+ }
+
+ pp->short_ssid_num = idex_s;
+ pp->bssid_num = idex_b;
+}
+
+static void
+iwl_mld_scan_cmd_set_probe_params(struct iwl_mld_scan_params *params,
+ struct iwl_scan_probe_params_v4 *pp,
+ u32 *bitmap_ssid)
+{
+ pp->preq = params->preq;
+
+ if (params->scan_6ghz) {
+ iwl_mld_scan_fill_6g_chan_list(params, pp);
+ return;
+ }
+
+ /* relevant only for 2.4 GHz /5 GHz scan */
+ iwl_mld_scan_cmd_build_ssids(params, pp->direct_scan, bitmap_ssid);
+}
+
+static bool
+iwl_mld_scan_use_ebs(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ bool low_latency)
+{
+ const struct iwl_ucode_capabilities *capa = &mld->fw->ucode_capa;
+
+ /* We can only use EBS if:
+ * 1. the feature is supported.
+ * 2. the last EBS was successful.
+ * 3. it's not a p2p find operation.
+ * 4. we are not in low latency mode,
+ * or if fragmented ebs is supported by the FW
+ * 5. the VIF is not an AP interface (scan wants survey results)
+ */
+ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
+ !mld->scan.last_ebs_failed &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ (!low_latency || fw_has_api(capa, IWL_UCODE_TLV_API_FRAG_EBS)) &&
+ ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_AP);
+}
+
+static u8
+iwl_mld_scan_cmd_set_chan_flags(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif,
+ bool low_latency)
+{
+ u8 flags = 0;
+
+ flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
+
+ if (iwl_mld_scan_use_ebs(mld, vif, low_latency))
+ flags |= IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
+ /* set fragmented ebs for fragmented scan */
+ if (iwl_mld_scan_is_fragmented(params->type))
+ flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+
+ /* Force EBS in case the scan is a fragmented and there is a need
+ * to take P2P GO operation into consideration during scan operation.
+ */
+ /* TODO: CDB */
+ if (iwl_mld_scan_is_fragmented(params->type) &&
+ params->respect_p2p_go) {
+ IWL_DEBUG_SCAN(mld, "Respect P2P GO. Force EBS\n");
+ flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS;
+ }
+
+ return flags;
+}
+
+static const u8 p2p_go_friendly_chs[] = {
+ 36, 40, 44, 48, 149, 153, 157, 161, 165,
+};
+
+static const u8 social_chs[] = {
+ 1, 6, 11
+};
+
+static u32 iwl_mld_scan_ch_n_aps_flag(enum nl80211_iftype vif_type, u8 ch_id)
+{
+ if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
+ return 0;
+
+ for (int i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
+ if (ch_id == p2p_go_friendly_chs[i])
+ return IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT;
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(social_chs); i++) {
+ if (ch_id == social_chs[i])
+ return IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT;
+ }
+
+ return 0;
+}
+
+static void
+iwl_mld_scan_cmd_set_channels(struct iwl_mld *mld,
+ struct ieee80211_channel **channels,
+ struct iwl_scan_channel_params_v7 *cp,
+ int n_channels, u32 flags,
+ enum nl80211_iftype vif_type)
+{
+ for (int i = 0; i < n_channels; i++) {
+ enum nl80211_band band = channels[i]->band;
+ struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
+ u8 iwl_band = iwl_mld_nl80211_band_to_fw(band);
+ u32 n_aps_flag =
+ iwl_mld_scan_ch_n_aps_flag(vif_type,
+ channels[i]->hw_value);
+
+ if (IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE)
+ n_aps_flag = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT;
+
+ cfg->flags = cpu_to_le32(flags | n_aps_flag);
+ cfg->channel_num = channels[i]->hw_value;
+ if (cfg80211_channel_is_psc(channels[i]))
+ cfg->flags = 0;
+
+ if (band == NL80211_BAND_6GHZ) {
+ /* 6 GHz channels should only appear in a scan request
+ * that has scan_6ghz set. The only exception is MLO
+ * scan, which has to be passive.
+ */
+ WARN_ON_ONCE(cfg->flags != 0);
+ cfg->flags =
+ cpu_to_le32(IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE);
+ }
+
+ cfg->v2.iter_count = 1;
+ cfg->v2.iter_interval = 0;
+ cfg->flags |= cpu_to_le32(iwl_band <<
+ IWL_CHAN_CFG_FLAGS_BAND_POS);
+ }
+}
+
+static u8
+iwl_mld_scan_cfg_channels_6g(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ u32 n_channels,
+ struct iwl_scan_probe_params_v4 *pp,
+ struct iwl_scan_channel_params_v7 *cp,
+ enum nl80211_iftype vif_type)
+{
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params =
+ params->scan_6ghz_params;
+ u32 i;
+ u8 ch_cnt;
+
+ for (i = 0, ch_cnt = 0; i < params->n_channels; i++) {
+ struct iwl_scan_channel_cfg_umac *cfg =
+ &cp->channel_config[ch_cnt];
+
+ u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
+ u8 k, n_s_ssids = 0, n_bssids = 0;
+ u8 max_s_ssids, max_bssids;
+ bool force_passive = false, found = false, allow_passive = true,
+ unsolicited_probe_on_chan = false, psc_no_listen = false;
+ s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
+
+ /* Avoid performing passive scan on non PSC channels unless the
+ * scan is specifically a passive scan, i.e., no SSIDs
+ * configured in the scan command.
+ */
+ if (!cfg80211_channel_is_psc(params->channels[i]) &&
+ !params->n_6ghz_params && params->n_ssids)
+ continue;
+
+ cfg->channel_num = params->channels[i]->hw_value;
+ cfg->flags |=
+ cpu_to_le32(PHY_BAND_6 << IWL_CHAN_CFG_FLAGS_BAND_POS);
+
+ cfg->v5.iter_count = 1;
+ cfg->v5.iter_interval = 0;
+
+ for (u32 j = 0; j < params->n_6ghz_params; j++) {
+ s8 tmp_psd_20;
+
+ if (!(scan_6ghz_params[j].channel_idx == i))
+ continue;
+
+ unsolicited_probe_on_chan |=
+ scan_6ghz_params[j].unsolicited_probe;
+
+ /* Use the highest PSD value allowed as advertised by
+ * APs for this channel
+ */
+ tmp_psd_20 = scan_6ghz_params[j].psd_20;
+ if (tmp_psd_20 !=
+ IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED &&
+ (psd_20 ==
+ IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED ||
+ psd_20 < tmp_psd_20))
+ psd_20 = tmp_psd_20;
+
+ psc_no_listen |= scan_6ghz_params[j].psc_no_listen;
+ }
+
+ /* In the following cases apply passive scan:
+ * 1. Non fragmented scan:
+ * - PSC channel with NO_LISTEN_FLAG on should be treated
+ * like non PSC channel
+ * - Non PSC channel with more than 3 short SSIDs or more
+ * than 9 BSSIDs.
+ * - Non PSC Channel with unsolicited probe response and
+ * more than 2 short SSIDs or more than 6 BSSIDs.
+ * - PSC channel with more than 2 short SSIDs or more than
+ * 6 BSSIDs.
+ * 2. Fragmented scan:
+ * - PSC channel with more than 1 SSID or 3 BSSIDs.
+ * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs.
+ * - Non PSC channel with unsolicited probe response and
+ * more than 1 SSID or more than 3 BSSIDs.
+ */
+ if (!iwl_mld_scan_is_fragmented(params->type)) {
+ if (!cfg80211_channel_is_psc(params->channels[i]) ||
+ psc_no_listen) {
+ if (unsolicited_probe_on_chan) {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ } else {
+ max_s_ssids = 3;
+ max_bssids = 9;
+ }
+ } else {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ }
+ } else if (cfg80211_channel_is_psc(params->channels[i])) {
+ max_s_ssids = 1;
+ max_bssids = 3;
+ } else {
+ if (unsolicited_probe_on_chan) {
+ max_s_ssids = 1;
+ max_bssids = 3;
+ } else {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ }
+ }
+
+ /* To optimize the scan time, i.e., reduce the scan dwell time
+ * on each channel, the below logic tries to set 3 direct BSSID
+ * probe requests for each broadcast probe request with a short
+ * SSID.
+ */
+ for (u32 j = 0; j < params->n_6ghz_params; j++) {
+ if (!(scan_6ghz_params[j].channel_idx == i))
+ continue;
+
+ found = false;
+
+ for (k = 0;
+ k < pp->short_ssid_num && n_s_ssids < max_s_ssids;
+ k++) {
+ if (!scan_6ghz_params[j].unsolicited_probe &&
+ le32_to_cpu(pp->short_ssid[k]) ==
+ scan_6ghz_params[j].short_ssid) {
+ /* Relevant short SSID bit set */
+ if (s_ssid_bitmap & BIT(k)) {
+ found = true;
+ break;
+ }
+
+ /* Prefer creating BSSID entries unless
+ * the short SSID probe can be done in
+ * the same channel dwell iteration.
+ *
+ * We also need to create a short SSID
+ * entry for any hidden AP.
+ */
+ if (3 * n_s_ssids > n_bssids &&
+ !pp->direct_scan[k].len)
+ break;
+
+ /* Hidden AP, cannot do passive scan */
+ if (pp->direct_scan[k].len)
+ allow_passive = false;
+
+ s_ssid_bitmap |= BIT(k);
+ n_s_ssids++;
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ for (k = 0; k < pp->bssid_num; k++) {
+ if (memcmp(&pp->bssid_array[k],
+ scan_6ghz_params[j].bssid,
+ ETH_ALEN))
+ continue;
+
+ if (bssid_bitmap & BIT(k))
+ break;
+
+ if (n_bssids < max_bssids) {
+ bssid_bitmap |= BIT(k);
+ n_bssids++;
+ } else {
+ force_passive = TRUE;
+ }
+
+ break;
+ }
+ }
+
+ if (cfg80211_channel_is_psc(params->channels[i]) &&
+ psc_no_listen)
+ flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN;
+
+ if (unsolicited_probe_on_chan)
+ flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES;
+
+ if ((allow_passive && force_passive) ||
+ (!(bssid_bitmap | s_ssid_bitmap) &&
+ !cfg80211_channel_is_psc(params->channels[i])))
+ flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE;
+ else
+ flags |= bssid_bitmap | (s_ssid_bitmap << 16);
+
+ cfg->flags |= cpu_to_le32(flags);
+ cfg->v5.psd_20 = psd_20;
+
+ ch_cnt++;
+ }
+
+ if (params->n_channels > ch_cnt)
+ IWL_DEBUG_SCAN(mld,
+ "6GHz: reducing number channels: (%u->%u)\n",
+ params->n_channels, ch_cnt);
+
+ return ch_cnt;
+}
+
+static int
+iwl_mld_scan_cmd_set_6ghz_chan_params(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_req_params_v17 *scan_p,
+ enum iwl_mld_scan_status scan_status)
+{
+ struct iwl_scan_channel_params_v7 *chan_p = &scan_p->channel_params;
+ struct iwl_scan_probe_params_v4 *probe_p = &scan_p->probe_params;
+
+ chan_p->flags = iwl_mld_scan_get_cmd_gen_flags(mld, params, vif,
+ scan_status);
+ chan_p->count = iwl_mld_scan_cfg_channels_6g(mld, params,
+ params->n_channels,
+ probe_p, chan_p,
+ vif->type);
+ if (!chan_p->count)
+ return -EINVAL;
+
+ if (!params->n_ssids ||
+ (params->n_ssids == 1 && !params->ssids[0].ssid_len))
+ chan_p->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
+
+ return 0;
+}
+
+static int
+iwl_mld_scan_cmd_set_chan_params(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_req_params_v17 *scan_p,
+ bool low_latency,
+ enum iwl_mld_scan_status scan_status,
+ u32 channel_cfg_flags)
+{
+ struct iwl_scan_channel_params_v7 *cp = &scan_p->channel_params;
+ struct ieee80211_supported_band *sband =
+ &mld->nvm_data->bands[NL80211_BAND_6GHZ];
+
+ cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
+ cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
+
+ if (IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE)
+ cp->n_aps_override[0] = IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE;
+
+ if (params->scan_6ghz)
+ return iwl_mld_scan_cmd_set_6ghz_chan_params(mld, params,
+ vif, scan_p,
+ scan_status);
+
+ /* relevant only for 2.4 GHz/5 GHz scan */
+ cp->flags = iwl_mld_scan_cmd_set_chan_flags(mld, params, vif,
+ low_latency);
+ cp->count = params->n_channels;
+
+ iwl_mld_scan_cmd_set_channels(mld, params->channels, cp,
+ params->n_channels, channel_cfg_flags,
+ vif->type);
+
+ if (!params->enable_6ghz_passive)
+ return 0;
+
+ /* fill 6 GHz passive scan cfg */
+ for (int i = 0; i < sband->n_channels; i++) {
+ struct ieee80211_channel *channel =
+ &sband->channels[i];
+ struct iwl_scan_channel_cfg_umac *cfg =
+ &cp->channel_config[cp->count];
+
+ if (!cfg80211_channel_is_psc(channel))
+ continue;
+
+ cfg->channel_num = channel->hw_value;
+ cfg->v5.iter_count = 1;
+ cfg->v5.iter_interval = 0;
+ cfg->v5.psd_20 =
+ IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
+ cfg->flags = cpu_to_le32(PHY_BAND_6 <<
+ IWL_CHAN_CFG_FLAGS_BAND_POS);
+ cp->count++;
+ }
+
+ return 0;
+}
+
+static int
+iwl_mld_scan_build_cmd(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct iwl_mld_scan_params *params,
+ enum iwl_mld_scan_status scan_status,
+ bool low_latency)
+{
+ struct iwl_scan_req_umac_v17 *cmd = mld->scan.cmd;
+ struct iwl_scan_req_params_v17 *scan_p = &cmd->scan_params;
+ u32 bitmap_ssid = 0;
+ int uid, ret;
+
+ memset(mld->scan.cmd, 0, mld->scan.cmd_size);
+
+ /* find a free UID entry */
+ uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_NONE);
+ if (uid < 0)
+ return uid;
+
+ cmd->uid = cpu_to_le32(uid);
+ cmd->ooc_priority =
+ cpu_to_le32(iwl_mld_scan_ooc_priority(scan_status));
+
+ iwl_mld_scan_cmd_set_gen_params(mld, params, vif,
+ &scan_p->general_params, scan_status);
+
+ ret = iwl_mld_scan_cmd_set_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ iwl_mld_scan_cmd_set_probe_params(params, &scan_p->probe_params,
+ &bitmap_ssid);
+
+ ret = iwl_mld_scan_cmd_set_chan_params(mld, params, vif, scan_p,
+ low_latency, scan_status,
+ bitmap_ssid);
+ if (ret)
+ return ret;
+
+ return uid;
+}
+
+static bool
+iwl_mld_scan_pass_all(struct iwl_mld *mld,
+ struct cfg80211_sched_scan_request *req)
+{
+ if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+ IWL_DEBUG_SCAN(mld,
+ "Sending scheduled scan with filtering, n_match_sets %d\n",
+ req->n_match_sets);
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+ return false;
+ }
+
+ IWL_DEBUG_SCAN(mld, "Sending Scheduled scan without filtering\n");
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_ENABLED;
+
+ return true;
+}
+
+static int
+iwl_mld_config_sched_scan_profiles(struct iwl_mld *mld,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+ struct iwl_scan_offload_profile *profile;
+ struct iwl_scan_offload_profile_cfg_data *cfg_data;
+ struct iwl_scan_offload_profile_cfg *profile_cfg;
+ struct iwl_scan_offload_blocklist *blocklist;
+ u32 blocklist_size = IWL_SCAN_MAX_BLACKLIST_LEN * sizeof(*blocklist);
+ u32 cmd_size = blocklist_size + sizeof(*profile_cfg);
+ u8 *cmd;
+ int ret;
+
+ if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES_V2))
+ return -EIO;
+
+ cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ hcmd.data[0] = cmd;
+ hcmd.len[0] = cmd_size;
+
+ blocklist = (struct iwl_scan_offload_blocklist *)cmd;
+ profile_cfg = (struct iwl_scan_offload_profile_cfg *)(cmd + blocklist_size);
+
+ /* No blocklist configuration */
+ cfg_data = &profile_cfg->data;
+ cfg_data->num_profiles = req->n_match_sets;
+ cfg_data->active_clients = SCAN_CLIENT_SCHED_SCAN;
+ cfg_data->pass_match = SCAN_CLIENT_SCHED_SCAN;
+ cfg_data->match_notify = SCAN_CLIENT_SCHED_SCAN;
+
+ if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
+ cfg_data->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
+
+ for (int i = 0; i < req->n_match_sets; i++) {
+ profile = &profile_cfg->profiles[i];
+
+ /* Support any cipher and auth algorithm */
+ profile->unicast_cipher = 0xff;
+ profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED |
+ IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK |
+ IWL_AUTH_ALGO_8021X | IWL_AUTH_ALGO_SAE |
+ IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE;
+ profile->network_type = IWL_NETWORK_TYPE_ANY;
+ profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
+ profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
+ profile->ssid_index = i;
+ }
+
+ IWL_DEBUG_SCAN(mld,
+ "Sending scheduled scan profile config (n_match_sets=%u)\n",
+ req->n_match_sets);
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int
+iwl_mld_sched_scan_handle_non_psc_channels(struct iwl_mld_scan_params *params,
+ bool *non_psc_included)
+{
+ int i, j;
+
+ *non_psc_included = false;
+ /* for 6 GHZ band only PSC channels need to be added */
+ for (i = 0; i < params->n_channels; i++) {
+ struct ieee80211_channel *channel = params->channels[i];
+
+ if (channel->band == NL80211_BAND_6GHZ &&
+ !cfg80211_channel_is_psc(channel)) {
+ *non_psc_included = true;
+ break;
+ }
+ }
+
+ if (!*non_psc_included)
+ return 0;
+
+ params->channels =
+ kmemdup(params->channels,
+ sizeof(params->channels[0]) * params->n_channels,
+ GFP_KERNEL);
+ if (!params->channels)
+ return -ENOMEM;
+
+ for (i = j = 0; i < params->n_channels; i++) {
+ if (params->channels[i]->band == NL80211_BAND_6GHZ &&
+ !cfg80211_channel_is_psc(params->channels[i]))
+ continue;
+ params->channels[j++] = params->channels[i];
+ }
+
+ params->n_channels = j;
+
+ return 0;
+}
+
+static void
+iwl_mld_scan_6ghz_passive_scan(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband =
+ &mld->nvm_data->bands[NL80211_BAND_6GHZ];
+ u32 n_disabled, i;
+
+ params->enable_6ghz_passive = false;
+
+ /* 6 GHz passive scan may be enabled in the first 2.4 GHz/5 GHz scan
+ * phase to discover geo location if no AP's are found. Skip it when
+ * we're in the 6 GHz scan phase.
+ */
+ if (params->scan_6ghz)
+ return;
+
+ /* 6 GHz passive scan allowed only on station interface */
+ if (vif->type != NL80211_IFTYPE_STATION) {
+ IWL_DEBUG_SCAN(mld,
+ "6GHz passive scan: not station interface\n");
+ return;
+ }
+
+ /* 6 GHz passive scan is allowed in a defined time interval following
+ * HW reset or resume flow, or while not associated and a large
+ * interval has passed since the last 6 GHz passive scan.
+ */
+ if ((vif->cfg.assoc ||
+ time_after(mld->scan.last_6ghz_passive_jiffies +
+ (IWL_MLD_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) &&
+ (time_before(mld->scan.last_start_time_jiffies +
+ (IWL_MLD_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ),
+ jiffies))) {
+ IWL_DEBUG_SCAN(mld, "6GHz passive scan: %s\n",
+ vif->cfg.assoc ? "associated" :
+ "timeout did not expire");
+ return;
+ }
+
+ /* not enough channels in the regular scan request */
+ if (params->n_channels < IWL_MLD_6GHZ_PASSIVE_SCAN_MIN_CHANS) {
+ IWL_DEBUG_SCAN(mld,
+ "6GHz passive scan: not enough channels %d\n",
+ params->n_channels);
+ return;
+ }
+
+ for (i = 0; i < params->n_ssids; i++) {
+ if (!params->ssids[i].ssid_len)
+ break;
+ }
+
+ /* not a wildcard scan, so cannot enable passive 6 GHz scan */
+ if (i == params->n_ssids) {
+ IWL_DEBUG_SCAN(mld,
+ "6GHz passive scan: no wildcard SSID\n");
+ return;
+ }
+
+ if (!sband || !sband->n_channels) {
+ IWL_DEBUG_SCAN(mld,
+ "6GHz passive scan: no 6GHz channels\n");
+ return;
+ }
+
+ for (i = 0, n_disabled = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED))
+ n_disabled++;
+ }
+
+ /* Not all the 6 GHz channels are disabled, so no need for 6 GHz
+ * passive scan
+ */
+ if (n_disabled != sband->n_channels) {
+ IWL_DEBUG_SCAN(mld,
+ "6GHz passive scan: 6GHz channels enabled\n");
+ return;
+ }
+
+ /* all conditions to enable 6 GHz passive scan are satisfied */
+ IWL_DEBUG_SCAN(mld, "6GHz passive scan: can be enabled\n");
+ params->enable_6ghz_passive = true;
+}
+
+static void
+iwl_mld_scan_set_link_id(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct iwl_mld_scan_params *params,
+ s8 tsf_report_link_id,
+ enum iwl_mld_scan_status scan_status)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link;
+
+ if (tsf_report_link_id < 0) {
+ if (vif->active_links)
+ tsf_report_link_id = __ffs(vif->active_links);
+ else
+ tsf_report_link_id = 0;
+ }
+
+ link = iwl_mld_link_dereference_check(mld_vif, tsf_report_link_id);
+ if (!WARN_ON(!link)) {
+ params->fw_link_id = link->fw_id;
+ /* we to store fw_link_id only for regular scan,
+ * and use it in scan complete notif
+ */
+ if (scan_status == IWL_MLD_SCAN_REGULAR)
+ mld->scan.fw_link_id = link->fw_id;
+ } else {
+ mld->scan.fw_link_id = IWL_MLD_INVALID_FW_ID;
+ params->fw_link_id = IWL_MLD_INVALID_FW_ID;
+ }
+}
+
+static int
+_iwl_mld_single_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ enum iwl_mld_scan_status scan_status)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LONG_GROUP, SCAN_REQ_UMAC),
+ .len = { mld->scan.cmd_size, },
+ .data = { mld->scan.cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_mld_scan_iter_data scan_iter_data = {
+ .current_vif = vif,
+ };
+ struct cfg80211_sched_scan_plan scan_plan = {.iterations = 1};
+ struct iwl_mld_scan_params params = {};
+ int ret, uid;
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(!mld->scan.cmd))
+ return -ENOMEM;
+
+ if (!iwl_mld_scan_fits(mld, req->n_ssids, ies, req->n_channels))
+ return -ENOBUFS;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_scan_iterator,
+ &scan_iter_data);
+
+ params.type = iwl_mld_get_scan_type(mld, vif, &scan_iter_data);
+ params.n_ssids = req->n_ssids;
+ params.flags = req->flags;
+ params.n_channels = req->n_channels;
+ params.delay = 0;
+ params.ssids = req->ssids;
+ params.channels = req->channels;
+ params.mac_addr = req->mac_addr;
+ params.mac_addr_mask = req->mac_addr_mask;
+ params.no_cck = req->no_cck;
+ params.pass_all = true;
+ params.n_match_sets = 0;
+ params.match_sets = NULL;
+ params.scan_plans = &scan_plan;
+ params.n_scan_plans = 1;
+
+ params.n_6ghz_params = req->n_6ghz_params;
+ params.scan_6ghz_params = req->scan_6ghz_params;
+ params.scan_6ghz = req->scan_6ghz;
+
+ ether_addr_copy(params.bssid, req->bssid);
+ /* TODO: CDB - per-band flag */
+ params.respect_p2p_go =
+ iwl_mld_get_respect_p2p_go(mld, vif,
+ scan_iter_data.global_low_latency);
+
+ if (req->duration)
+ params.iter_notif = true;
+
+ iwl_mld_scan_set_link_id(mld, vif, &params, req->tsf_report_link_id,
+ scan_status);
+
+ iwl_mld_scan_build_probe_req(mld, vif, ies, &params);
+
+ iwl_mld_scan_6ghz_passive_scan(mld, &params, vif);
+
+ uid = iwl_mld_scan_build_cmd(mld, vif, &params, scan_status,
+ scan_iter_data.global_low_latency);
+ if (uid < 0)
+ return uid;
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (ret) {
+ IWL_ERR(mld, "Scan failed! ret %d\n", ret);
+ return ret;
+ }
+
+ IWL_DEBUG_SCAN(mld, "Scan request send success: status=%u, uid=%u\n",
+ scan_status, uid);
+
+ mld->scan.uid_status[uid] = scan_status;
+ mld->scan.status |= scan_status;
+
+ if (params.enable_6ghz_passive)
+ mld->scan.last_6ghz_passive_jiffies = jiffies;
+
+ return 0;
+}
+
+static int
+iwl_mld_scan_send_abort_cmd_status(struct iwl_mld *mld, int uid, u32 *status)
+{
+ struct iwl_umac_scan_abort abort_cmd = {
+ .uid = cpu_to_le32(uid),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(LONG_GROUP, SCAN_ABORT_UMAC),
+ .flags = CMD_WANT_SKB,
+ .data = { &abort_cmd },
+ .len[0] = sizeof(abort_cmd),
+ };
+ struct iwl_rx_packet *pkt;
+ struct iwl_cmd_response *resp;
+ u32 resp_len;
+ int ret;
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = cmd.resp_pkt;
+
+ resp_len = iwl_rx_packet_payload_len(pkt);
+ if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp),
+ "Scan Abort: unexpected response length %d\n",
+ resp_len)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ resp = (void *)pkt->data;
+ *status = le32_to_cpu(resp->status);
+
+out:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static int
+iwl_mld_scan_abort(struct iwl_mld *mld, int type, int uid, bool *wait)
+{
+ enum iwl_umac_scan_abort_status status;
+ int ret;
+
+ *wait = true;
+
+ IWL_DEBUG_SCAN(mld, "Sending scan abort, uid %u\n", uid);
+
+ ret = iwl_mld_scan_send_abort_cmd_status(mld, uid, &status);
+
+ IWL_DEBUG_SCAN(mld, "Scan abort: ret=%d status=%u\n", ret, status);
+
+ /* We don't need to wait to scan complete in the following cases:
+ * 1. Driver failed to send the scan abort cmd.
+ * 2. The FW is no longer familiar with the scan that needs to be
+ * stopped. It is expected that the scan complete notification was
+ * already received but not yet processed.
+ *
+ * In both cases the flow should continue similar to the case that the
+ * scan was really aborted.
+ */
+ if (ret || status == IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND)
+ *wait = false;
+
+ return ret;
+}
+
+static int
+iwl_mld_scan_stop_wait(struct iwl_mld *mld, int type, int uid)
+{
+ struct iwl_notification_wait wait_scan_done;
+ static const u16 scan_comp_notif[] = { SCAN_COMPLETE_UMAC };
+ bool wait = true;
+ int ret;
+
+ iwl_init_notification_wait(&mld->notif_wait, &wait_scan_done,
+ scan_comp_notif,
+ ARRAY_SIZE(scan_comp_notif),
+ NULL, NULL);
+
+ IWL_DEBUG_SCAN(mld, "Preparing to stop scan, type=%x\n", type);
+
+ ret = iwl_mld_scan_abort(mld, type, uid, &wait);
+ if (ret) {
+ IWL_DEBUG_SCAN(mld, "couldn't stop scan type=%d\n", type);
+ goto return_no_wait;
+ }
+
+ if (!wait) {
+ IWL_DEBUG_SCAN(mld, "no need to wait for scan type=%d\n", type);
+ goto return_no_wait;
+ }
+
+ return iwl_wait_notification(&mld->notif_wait, &wait_scan_done, HZ);
+
+return_no_wait:
+ iwl_remove_notification(&mld->notif_wait, &wait_scan_done);
+ return ret;
+}
+
+int iwl_mld_sched_scan_start(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LONG_GROUP, SCAN_REQ_UMAC),
+ .len = { mld->scan.cmd_size, },
+ .data = { mld->scan.cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_mld_scan_params params = {};
+ struct iwl_mld_scan_iter_data scan_iter_data = {
+ .current_vif = vif,
+ };
+ bool non_psc_included = false;
+ int ret, uid;
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(!mld->scan.cmd))
+ return -ENOMEM;
+
+ /* FW supports only a single periodic scan */
+ if (mld->scan.status & (IWL_MLD_SCAN_SCHED | IWL_MLD_SCAN_NETDETECT))
+ return -EBUSY;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_scan_iterator,
+ &scan_iter_data);
+
+ params.type = iwl_mld_get_scan_type(mld, vif, &scan_iter_data);
+ params.flags = req->flags;
+ params.n_ssids = req->n_ssids;
+ params.ssids = req->ssids;
+ params.n_channels = req->n_channels;
+ params.channels = req->channels;
+ params.mac_addr = req->mac_addr;
+ params.mac_addr_mask = req->mac_addr_mask;
+ params.no_cck = false;
+ params.pass_all = iwl_mld_scan_pass_all(mld, req);
+ params.n_match_sets = req->n_match_sets;
+ params.match_sets = req->match_sets;
+ params.n_scan_plans = req->n_scan_plans;
+ params.scan_plans = req->scan_plans;
+ /* TODO: CDB - per-band flag */
+ params.respect_p2p_go =
+ iwl_mld_get_respect_p2p_go(mld, vif,
+ scan_iter_data.global_low_latency);
+
+ /* UMAC scan supports up to 16-bit delays, trim it down to 16-bits */
+ params.delay = req->delay > U16_MAX ? U16_MAX : req->delay;
+
+ eth_broadcast_addr(params.bssid);
+
+ ret = iwl_mld_config_sched_scan_profiles(mld, req);
+ if (ret)
+ return ret;
+
+ iwl_mld_scan_build_probe_req(mld, vif, ies, &params);
+
+ ret = iwl_mld_sched_scan_handle_non_psc_channels(&params,
+ &non_psc_included);
+ if (ret)
+ goto out;
+
+ if (!iwl_mld_scan_fits(mld, req->n_ssids, ies, params.n_channels)) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ uid = iwl_mld_scan_build_cmd(mld, vif, &params, type,
+ scan_iter_data.global_low_latency);
+ if (uid < 0) {
+ ret = uid;
+ goto out;
+ }
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (!ret) {
+ IWL_DEBUG_SCAN(mld,
+ "Sched scan request send success: type=%u, uid=%u\n",
+ type, uid);
+ mld->scan.uid_status[uid] = type;
+ mld->scan.status |= type;
+ } else {
+ IWL_ERR(mld, "Sched scan failed! ret %d\n", ret);
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+ }
+
+out:
+ if (non_psc_included)
+ kfree(params.channels);
+ return ret;
+}
+
+int iwl_mld_scan_stop(struct iwl_mld *mld, int type, bool notify)
+{
+ int uid, ret;
+
+ IWL_DEBUG_SCAN(mld,
+ "Request to stop scan: type=0x%x, status=0x%x\n",
+ type, mld->scan.status);
+
+ if (!(mld->scan.status & type))
+ return 0;
+
+ uid = iwl_mld_scan_uid_by_status(mld, type);
+ /* must be valid, we just checked it's running */
+ if (WARN_ON_ONCE(uid < 0))
+ return uid;
+
+ ret = iwl_mld_scan_stop_wait(mld, type, uid);
+ if (ret)
+ IWL_DEBUG_SCAN(mld, "Failed to stop scan\n");
+
+ /* Clear the scan status so the next scan requests will
+ * succeed and mark the scan as stopping, so that the Rx
+ * handler doesn't do anything, as the scan was stopped from
+ * above. Also remove the handler to not notify mac80211
+ * erroneously after a new scan starts, for example.
+ */
+ mld->scan.status &= ~type;
+ mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+ iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_SCAN,
+ uid);
+
+ if (type == IWL_MLD_SCAN_REGULAR) {
+ if (notify) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(mld->hw, &info);
+ }
+ } else if (notify) {
+ ieee80211_sched_scan_stopped(mld->hw);
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+ }
+
+ return ret;
+}
+
+int iwl_mld_regular_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ return _iwl_mld_single_scan_start(mld, vif, req, ies,
+ IWL_MLD_SCAN_REGULAR);
+}
+
+static void iwl_mld_int_mlo_scan_start(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel **channels,
+ size_t n_channels)
+{
+ struct cfg80211_scan_request *req __free(kfree) = NULL;
+ struct ieee80211_scan_ies ies = {};
+ size_t size;
+ int ret;
+
+ IWL_DEBUG_SCAN(mld, "Starting Internal MLO scan: n_channels=%zu\n",
+ n_channels);
+
+ size = struct_size(req, channels, n_channels);
+ req = kzalloc(size, GFP_KERNEL);
+ if (!req)
+ return;
+
+ /* set the requested channels */
+ for (int i = 0; i < n_channels; i++)
+ req->channels[i] = channels[i];
+
+ req->n_channels = n_channels;
+
+ /* set the rates */
+ for (int i = 0; i < NUM_NL80211_BANDS; i++)
+ if (mld->wiphy->bands[i])
+ req->rates[i] =
+ (1 << mld->wiphy->bands[i]->n_bitrates) - 1;
+
+ req->wdev = ieee80211_vif_to_wdev(vif);
+ req->wiphy = mld->wiphy;
+ req->scan_start = jiffies;
+ req->tsf_report_link_id = -1;
+
+ ret = _iwl_mld_single_scan_start(mld, vif, req, &ies,
+ IWL_MLD_SCAN_INT_MLO);
+
+ if (!ret)
+ mld->scan.last_mlo_scan_time = ktime_get_boottime_ns();
+
+ IWL_DEBUG_SCAN(mld, "Internal MLO scan: ret=%d\n", ret);
+}
+
+void iwl_mld_int_mlo_scan(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS];
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ size_t n_channels = 0;
+ u8 link_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif) ||
+ hweight16(vif->valid_links) == 1)
+ return;
+
+ if (mld->scan.status & IWL_MLD_SCAN_INT_MLO) {
+ IWL_DEBUG_SCAN(mld, "Internal MLO scan is already running\n");
+ return;
+ }
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_check(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ channels[n_channels++] = link_conf->chanreq.oper.chan;
+ }
+
+ if (!n_channels)
+ return;
+
+ iwl_mld_int_mlo_scan_start(mld, vif, channels, n_channels);
+}
+
+void iwl_mld_handle_scan_iter_complete_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
+ u32 uid = __le32_to_cpu(notif->uid);
+
+ if (IWL_FW_CHECK(mld, uid >= ARRAY_SIZE(mld->scan.uid_status),
+ "FW reports out-of-range scan UID %d\n", uid))
+ return;
+
+ if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_REGULAR)
+ mld->scan.start_tsf = le64_to_cpu(notif->start_tsf);
+
+ IWL_DEBUG_SCAN(mld,
+ "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n",
+ notif->status, notif->scanned_channels);
+
+ if (mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_FOUND) {
+ IWL_DEBUG_SCAN(mld, "Pass all scheduled scan results found\n");
+ ieee80211_sched_scan_results(mld->hw);
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_ENABLED;
+ }
+
+ IWL_DEBUG_SCAN(mld,
+ "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
+ le64_to_cpu(notif->start_tsf));
+}
+
+void iwl_mld_handle_match_found_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ IWL_DEBUG_SCAN(mld, "Scheduled scan results\n");
+ ieee80211_sched_scan_results(mld->hw);
+}
+
+void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_umac_scan_complete *notif = (void *)pkt->data;
+ bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+ u32 uid = __le32_to_cpu(notif->uid);
+
+ if (IWL_FW_CHECK(mld, uid >= ARRAY_SIZE(mld->scan.uid_status),
+ "FW reports out-of-range scan UID %d\n", uid))
+ return;
+
+ IWL_DEBUG_SCAN(mld,
+ "Scan completed: uid=%u type=%u, status=%s, EBS=%s\n",
+ uid, mld->scan.uid_status[uid],
+ notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+ "completed" : "aborted",
+ iwl_mld_scan_ebs_status_str(notif->ebs_status));
+ IWL_DEBUG_SCAN(mld, "Scan completed: scan_status=0x%x\n",
+ mld->scan.status);
+ IWL_DEBUG_SCAN(mld,
+ "Scan completed: line=%u, iter=%u, elapsed time=%u\n",
+ notif->last_schedule, notif->last_iter,
+ __le32_to_cpu(notif->time_from_last_iter));
+
+ if (IWL_FW_CHECK(mld, !(mld->scan.uid_status[uid] & mld->scan.status),
+ "FW reports scan UID %d we didn't trigger\n", uid))
+ return;
+
+ /* if the scan is already stopping, we don't need to notify mac80211 */
+ if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_REGULAR) {
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ .scan_start_tsf = mld->scan.start_tsf,
+ };
+ int fw_link_id = mld->scan.fw_link_id;
+ struct ieee80211_bss_conf *link_conf = NULL;
+
+ if (fw_link_id != IWL_MLD_INVALID_FW_ID)
+ link_conf =
+ wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_bss_conf[fw_link_id]);
+
+ /* It is possible that by the time the scan is complete the
+ * link was already removed and is not valid.
+ */
+ if (link_conf)
+ ether_addr_copy(info.tsf_bssid, link_conf->bssid);
+ else
+ IWL_DEBUG_SCAN(mld, "Scan link is no longer valid\n");
+
+ ieee80211_scan_completed(mld->hw, &info);
+ } else if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_SCHED) {
+ ieee80211_sched_scan_stopped(mld->hw);
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+ } else if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_INT_MLO) {
+ IWL_DEBUG_SCAN(mld, "Internal MLO scan completed\n");
+
+ /*
+ * We limit link selection to internal MLO scans as otherwise
+ * we do not know whether all channels were covered.
+ */
+ iwl_mld_select_links(mld);
+ }
+
+ mld->scan.status &= ~mld->scan.uid_status[uid];
+
+ IWL_DEBUG_SCAN(mld, "Scan completed: after update: scan_status=0x%x\n",
+ mld->scan.status);
+
+ mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+
+ if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
+ notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
+ mld->scan.last_ebs_failed = true;
+}
+
+/* This function is used in nic restart flow, to inform mac80211 about scans
+ * that were aborted by restart flow or by an assert.
+ */
+void iwl_mld_report_scan_aborted(struct iwl_mld *mld)
+{
+ int uid;
+
+ uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_REGULAR);
+ if (uid >= 0) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(mld->hw, &info);
+ mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+ }
+
+ uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_SCHED);
+ if (uid >= 0) {
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+ mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+
+ /* sched scan will be restarted by mac80211 in reconfig.
+ * report to mac80211 that sched scan stopped only if we won't
+ * restart the firmware.
+ */
+ if (!iwlwifi_mod_params.fw_restart)
+ ieee80211_sched_scan_stopped(mld->hw);
+ }
+
+ uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_INT_MLO);
+ if (uid >= 0) {
+ IWL_DEBUG_SCAN(mld, "Internal MLO scan aborted\n");
+ mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+ }
+
+ BUILD_BUG_ON(IWL_MLD_SCAN_NONE != 0);
+ memset(mld->scan.uid_status, 0, sizeof(mld->scan.uid_status));
+}
+
+int iwl_mld_alloc_scan_cmd(struct iwl_mld *mld)
+{
+ u8 scan_cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, SCAN_REQ_UMAC,
+ IWL_FW_CMD_VER_UNKNOWN);
+ size_t scan_cmd_size;
+
+ if (scan_cmd_ver == 17) {
+ scan_cmd_size = sizeof(struct iwl_scan_req_umac_v17);
+ } else {
+ IWL_ERR(mld, "Unexpected scan cmd version %d\n", scan_cmd_ver);
+ return -EINVAL;
+ }
+
+ mld->scan.cmd = kmalloc(scan_cmd_size, GFP_KERNEL);
+ if (!mld->scan.cmd)
+ return -ENOMEM;
+
+ mld->scan.cmd_size = scan_cmd_size;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.h b/drivers/net/wireless/intel/iwlwifi/mld/scan.h
new file mode 100644
index 000000000000..3ae940d55065
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_scan_h__
+#define __iwl_mld_scan_h__
+
+int iwl_mld_alloc_scan_cmd(struct iwl_mld *mld);
+
+int iwl_mld_regular_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies);
+
+void iwl_mld_int_mlo_scan(struct iwl_mld *mld, struct ieee80211_vif *vif);
+
+void iwl_mld_handle_scan_iter_complete_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+int iwl_mld_scan_stop(struct iwl_mld *mld, int type, bool notify);
+
+int iwl_mld_sched_scan_start(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type);
+
+void iwl_mld_handle_match_found_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+#define WFA_TPC_IE_LEN 9
+
+static inline int iwl_mld_scan_max_template_size(void)
+{
+#define MAC_HDR_LEN 24
+#define DS_IE_LEN 3
+#define SSID_IE_LEN 2
+
+/* driver create the 802.11 header, WFA TPC IE, DS parameter and SSID IE */
+#define DRIVER_TOTAL_IES_LEN \
+ (MAC_HDR_LEN + WFA_TPC_IE_LEN + DS_IE_LEN + SSID_IE_LEN)
+
+ BUILD_BUG_ON(SCAN_OFFLOAD_PROBE_REQ_SIZE < DRIVER_TOTAL_IES_LEN);
+
+ return SCAN_OFFLOAD_PROBE_REQ_SIZE - DRIVER_TOTAL_IES_LEN;
+}
+
+void iwl_mld_report_scan_aborted(struct iwl_mld *mld);
+
+enum iwl_mld_scan_status {
+ IWL_MLD_SCAN_NONE = 0,
+ IWL_MLD_SCAN_REGULAR = BIT(0),
+ IWL_MLD_SCAN_SCHED = BIT(1),
+ IWL_MLD_SCAN_NETDETECT = BIT(2),
+ IWL_MLD_SCAN_INT_MLO = BIT(3),
+};
+
+/* enum iwl_mld_pass_all_sched_results_states - Defines the states for
+ * handling/passing scheduled scan results to mac80211
+ * @SCHED_SCAN_PASS_ALL_STATE_DISABLED: Don't pass all scan results, only when
+ * a match found.
+ * @SCHED_SCAN_PASS_ALL_STATE_ENABLED: Pass all scan results is enabled
+ * (no filtering).
+ * @SCHED_SCAN_PASS_ALL_STATE_FOUND: A scan result is found, pass it on the
+ * next scan iteration complete notification.
+ */
+enum iwl_mld_pass_all_sched_results_states {
+ SCHED_SCAN_PASS_ALL_STATE_DISABLED,
+ SCHED_SCAN_PASS_ALL_STATE_ENABLED,
+ SCHED_SCAN_PASS_ALL_STATE_FOUND,
+};
+
+/**
+ * enum iwl_mld_traffic_load - Levels of traffic load
+ *
+ * @IWL_MLD_TRAFFIC_LOW: low traffic load
+ * @IWL_MLD_TRAFFIC_MEDIUM: medium traffic load
+ * @IWL_MLD_TRAFFIC_HIGH: high traffic load
+ */
+enum iwl_mld_traffic_load {
+ IWL_MLD_TRAFFIC_LOW,
+ IWL_MLD_TRAFFIC_MEDIUM,
+ IWL_MLD_TRAFFIC_HIGH,
+};
+
+/**
+ * struct iwl_mld_scan - Scan data
+ * @status: scan status, a combination of %enum iwl_mld_scan_status,
+ * reflects the %scan.uid_status array.
+ * @uid_status: array to track the scan status per uid.
+ * @start_tsf: start time of last scan in TSF of the link that requested
+ * the scan.
+ * @last_ebs_failed: true if the last EBS (Energy Based Scan) failed.
+ * @pass_all_sched_res: see %enum iwl_mld_pass_all_sched_results_states.
+ * @fw_link_id: the current (regular) scan fw link id, used by scan
+ * complete notif.
+ * @traffic_load: traffic load related data
+ * @traffic_load.last_stats_ts_usec: The timestamp of the last statistics
+ * notification, used to calculate the elapsed time between two
+ * notifications and determine the traffic load
+ * @traffic_load.status: The current traffic load status, see
+ * &enum iwl_mld_traffic_load
+ * @cmd_size: size of %cmd.
+ * @cmd: pointer to scan cmd buffer (allocated once in op mode start).
+ * @last_6ghz_passive_jiffies: stores the last 6GHz passive scan time
+ * in jiffies.
+ * @last_start_time_jiffies: stores the last start time in jiffies
+ * (interface up/reset/resume).
+ * @last_mlo_scan_time: start time of the last MLO scan in nanoseconds since
+ * boot.
+ */
+struct iwl_mld_scan {
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ unsigned int status;
+ u32 uid_status[IWL_MAX_UMAC_SCANS];
+ u64 start_tsf;
+ bool last_ebs_failed;
+ enum iwl_mld_pass_all_sched_results_states pass_all_sched_res;
+ u8 fw_link_id;
+ struct {
+ u32 last_stats_ts_usec;
+ enum iwl_mld_traffic_load status;
+ } traffic_load;
+ );
+ /* And here fields that survive a fw restart */
+ size_t cmd_size;
+ void *cmd;
+ unsigned long last_6ghz_passive_jiffies;
+ unsigned long last_start_time_jiffies;
+ unsigned long last_mlo_scan_time;
+};
+
+#endif /* __iwl_mld_scan_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/session-protect.c b/drivers/net/wireless/intel/iwlwifi/mld/session-protect.c
new file mode 100644
index 000000000000..dbb5615dc3f6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/session-protect.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include "session-protect.h"
+#include "fw/api/time-event.h"
+#include "fw/api/context.h"
+#include "iface.h"
+#include <net/mac80211.h>
+
+void iwl_mld_handle_session_prot_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_session_prot_notif *notif = (void *)pkt->data;
+ int fw_link_id = le32_to_cpu(notif->mac_link_id);
+ struct ieee80211_bss_conf *link_conf =
+ iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
+ struct ieee80211_vif *vif;
+ struct iwl_mld_vif *mld_vif;
+ struct iwl_mld_session_protect *session_protect;
+
+ if (WARN_ON(!link_conf))
+ return;
+
+ vif = link_conf->vif;
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ session_protect = &mld_vif->session_protect;
+
+ if (!le32_to_cpu(notif->status)) {
+ memset(session_protect, 0, sizeof(*session_protect));
+ } else if (le32_to_cpu(notif->start)) {
+ /* End_jiffies indicates an active session */
+ session_protect->session_requested = false;
+ session_protect->end_jiffies =
+ TU_TO_EXP_TIME(session_protect->duration);
+ /* !session_protect->end_jiffies means inactive session */
+ if (!session_protect->end_jiffies)
+ session_protect->end_jiffies = 1;
+ } else {
+ memset(session_protect, 0, sizeof(*session_protect));
+ }
+}
+
+static int _iwl_mld_schedule_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ int link_id)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link =
+ iwl_mld_link_dereference_check(mld_vif, link_id);
+ struct iwl_mld_session_protect *session_protect =
+ &mld_vif->session_protect;
+ struct iwl_session_prot_cmd cmd = {
+ .id_and_color = cpu_to_le32(link->fw_id),
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
+ .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
+ };
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ WARN(hweight16(vif->active_links) > 1,
+ "Session protection isn't allowed with more than one active link");
+
+ if (session_protect->end_jiffies &&
+ time_after(session_protect->end_jiffies,
+ TU_TO_EXP_TIME(min_duration))) {
+ IWL_DEBUG_TE(mld, "We have ample in the current session: %u\n",
+ jiffies_to_msecs(session_protect->end_jiffies -
+ jiffies));
+ return -EALREADY;
+ }
+
+ IWL_DEBUG_TE(mld, "Add a new session protection, duration %d TU\n",
+ le32_to_cpu(cmd.duration_tu));
+
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP,
+ SESSION_PROTECTION_CMD), &cmd);
+
+ if (ret)
+ return ret;
+
+ /* end_jiffies will be updated when handling session_prot_notif */
+ session_protect->end_jiffies = 0;
+ session_protect->duration = duration;
+ session_protect->session_requested = true;
+
+ return 0;
+}
+
+void iwl_mld_schedule_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ int link_id)
+{
+ int ret;
+
+ ret = _iwl_mld_schedule_session_protection(mld, vif, duration,
+ min_duration, link_id);
+ if (ret && ret != -EALREADY)
+ IWL_ERR(mld,
+ "Couldn't send the SESSION_PROTECTION_CMD (%d)\n",
+ ret);
+}
+
+struct iwl_mld_session_start_data {
+ struct iwl_mld *mld;
+ struct ieee80211_bss_conf *link_conf;
+ bool success;
+};
+
+static bool iwl_mld_session_start_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *_data)
+{
+ struct iwl_session_prot_notif *notif = (void *)pkt->data;
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_mld_session_start_data *data = _data;
+ struct ieee80211_bss_conf *link_conf;
+ struct iwl_mld *mld = data->mld;
+ int fw_link_id;
+
+ if (IWL_FW_CHECK(mld, pkt_len < sizeof(*notif),
+ "short session prot notif (%d)\n",
+ pkt_len))
+ return false;
+
+ fw_link_id = le32_to_cpu(notif->mac_link_id);
+ link_conf = iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
+
+ if (link_conf != data->link_conf)
+ return false;
+
+ if (!le32_to_cpu(notif->status))
+ return true;
+
+ if (notif->start) {
+ data->success = true;
+ return true;
+ }
+
+ return false;
+}
+
+int iwl_mld_start_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ int link_id, unsigned long timeout)
+{
+ static const u16 start_notif[] = { SESSION_PROTECTION_NOTIF };
+ struct iwl_notification_wait start_wait;
+ struct iwl_mld_session_start_data data = {
+ .mld = mld,
+ .link_conf = wiphy_dereference(mld->wiphy,
+ vif->link_conf[link_id]),
+ };
+ int ret;
+
+ if (WARN_ON(!data.link_conf))
+ return -EINVAL;
+
+ iwl_init_notification_wait(&mld->notif_wait, &start_wait,
+ start_notif, ARRAY_SIZE(start_notif),
+ iwl_mld_session_start_fn, &data);
+
+ ret = _iwl_mld_schedule_session_protection(mld, vif, duration,
+ min_duration, link_id);
+
+ if (ret) {
+ iwl_remove_notification(&mld->notif_wait, &start_wait);
+ return ret == -EALREADY ? 0 : ret;
+ }
+
+ ret = iwl_wait_notification(&mld->notif_wait, &start_wait, timeout);
+ if (ret)
+ return ret;
+ return data.success ? 0 : -EIO;
+}
+
+int iwl_mld_cancel_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ int link_id)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link =
+ iwl_mld_link_dereference_check(mld_vif, link_id);
+ struct iwl_mld_session_protect *session_protect =
+ &mld_vif->session_protect;
+ struct iwl_session_prot_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
+ };
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* If there isn't an active session or a requested one for this
+ * link do nothing
+ */
+ if (!session_protect->session_requested &&
+ !session_protect->end_jiffies)
+ return 0;
+
+ if (WARN_ON(!link))
+ return -EINVAL;
+
+ cmd.id_and_color = cpu_to_le32(link->fw_id);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP,
+ SESSION_PROTECTION_CMD), &cmd);
+ if (ret) {
+ IWL_ERR(mld,
+ "Couldn't send the SESSION_PROTECTION_CMD\n");
+ return ret;
+ }
+
+ memset(session_protect, 0, sizeof(*session_protect));
+
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/session-protect.h b/drivers/net/wireless/intel/iwlwifi/mld/session-protect.h
new file mode 100644
index 000000000000..642bec8451a1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/session-protect.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#ifndef __session_protect_h__
+#define __session_protect_h__
+
+#include "mld.h"
+#include "hcmd.h"
+#include <net/mac80211.h>
+#include "fw/api/mac-cfg.h"
+
+/**
+ * DOC: session protection
+ *
+ * Session protection is an API from the firmware that allows the driver to
+ * request time on medium. This is needed before the association when we need
+ * to be on medium for the association frame exchange. Once we configure the
+ * firmware as 'associated', the firmware will allocate time on medium without
+ * needed a session protection.
+ *
+ * TDLS discover uses this API as well even after association to ensure that
+ * other activities internal to the firmware will not interrupt our presence
+ * on medium.
+ */
+
+/**
+ * struct iwl_mld_session_protect - session protection parameters
+ * @end_jiffies: expected end_jiffies of current session protection.
+ * 0 if not active
+ * @duration: the duration in tu of current session
+ * @session_requested: A session protection command was sent and wasn't yet
+ * answered
+ */
+struct iwl_mld_session_protect {
+ unsigned long end_jiffies;
+ u32 duration;
+ bool session_requested;
+};
+
+#define IWL_MLD_SESSION_PROTECTION_ASSOC_TIME_MS 900
+#define IWL_MLD_SESSION_PROTECTION_MIN_TIME_MS 400
+
+/**
+ * iwl_mld_handle_session_prot_notif - handles %SESSION_PROTECTION_NOTIF
+ * @mld: the mld component
+ * @pkt: the RX packet containing the notification
+ */
+void iwl_mld_handle_session_prot_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+/**
+ * iwl_mld_schedule_session_protection - schedule a session protection
+ * @mld: the mld component
+ * @vif: the virtual interface for which the protection issued
+ * @duration: the requested duration of the protection
+ * @min_duration: the minimum duration of the protection
+ * @link_id: The link to schedule a session protection for
+ */
+void iwl_mld_schedule_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ int link_id);
+
+/**
+ * iwl_mld_start_session_protection - start a session protection
+ * @mld: the mld component
+ * @vif: the virtual interface for which the protection issued
+ * @duration: the requested duration of the protection
+ * @min_duration: the minimum duration of the protection
+ * @link_id: The link to schedule a session protection for
+ * @timeout: timeout for waiting
+ *
+ * This schedules the session protection, and waits for it to start
+ * (with timeout)
+ *
+ * Returns: 0 if successful, error code otherwise
+ */
+int iwl_mld_start_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ int link_id, unsigned long timeout);
+
+/**
+ * iwl_mld_cancel_session_protection - cancel the session protection.
+ * @mld: the mld component
+ * @vif: the virtual interface for which the session is issued
+ * @link_id: cancel the session protection for given link
+ *
+ * This functions cancels the session protection which is an act of good
+ * citizenship. If it is not needed any more it should be canceled because
+ * the other mac contexts wait for the medium during that time.
+ *
+ * Returns: 0 if successful, error code otherwise
+ *
+ */
+int iwl_mld_cancel_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ int link_id);
+
+#endif /* __session_protect_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.c b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
new file mode 100644
index 000000000000..332a7aecec2d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
@@ -0,0 +1,1289 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <linux/ieee80211.h>
+#include <kunit/static_stub.h>
+
+#include "sta.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "mlo.h"
+#include "key.h"
+#include "agg.h"
+#include "tlc.h"
+#include "fw/api/sta.h"
+#include "fw/api/mac.h"
+#include "fw/api/rx.h"
+
+int iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta)
+{
+ struct iwl_mld_link_sta *mld_link_sta;
+
+ /* This function should only be used with the wiphy lock held,
+ * In other cases, it is not guaranteed that the link_sta will exist
+ * in the driver too, and it is checked here.
+ */
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* This is not meant to be called with a NULL pointer */
+ if (WARN_ON(!link_sta))
+ return -ENOENT;
+
+ mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+ if (!mld_link_sta) {
+ WARN_ON(!iwl_mld_error_before_recovery(mld));
+ return -ENOENT;
+ }
+
+ return mld_link_sta->fw_id;
+}
+
+static void
+iwl_mld_fill_ampdu_size_and_dens(struct ieee80211_link_sta *link_sta,
+ struct ieee80211_bss_conf *link,
+ __le32 *tx_ampdu_max_size,
+ __le32 *tx_ampdu_spacing)
+{
+ u32 agg_size = 0, mpdu_dens = 0;
+
+ if (WARN_ON(!link_sta || !link))
+ return;
+
+ /* Note that we always use only legacy & highest supported PPDUs, so
+ * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating
+ * the maximum A-MPDU size of various PPDU types in different bands,
+ * we only need to worry about the highest supported PPDU type here.
+ */
+
+ if (link_sta->ht_cap.ht_supported) {
+ agg_size = link_sta->ht_cap.ampdu_factor;
+ mpdu_dens = link_sta->ht_cap.ampdu_density;
+ }
+
+ if (link->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
+ /* overwrite HT values on 6 GHz */
+ mpdu_dens =
+ le16_get_bits(link_sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+ agg_size =
+ le16_get_bits(link_sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ } else if (link_sta->vht_cap.vht_supported) {
+ /* if VHT supported overwrite HT value */
+ agg_size =
+ u32_get_bits(link_sta->vht_cap.cap,
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
+ }
+
+ /* D6.0 10.12.2 A-MPDU length limit rules
+ * A STA indicates the maximum length of the A-MPDU preEOF padding
+ * that it can receive in an HE PPDU in the Maximum A-MPDU Length
+ * Exponent field in its HT Capabilities, VHT Capabilities,
+ * and HE 6 GHz Band Capabilities elements (if present) and the
+ * Maximum AMPDU Length Exponent Extension field in its HE
+ * Capabilities element
+ */
+ if (link_sta->he_cap.has_he)
+ agg_size +=
+ u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
+
+ if (link_sta->eht_cap.has_eht)
+ agg_size +=
+ u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1],
+ IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK);
+
+ /* Limit to max A-MPDU supported by FW */
+ agg_size = min_t(u32, agg_size,
+ STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT);
+
+ *tx_ampdu_max_size = cpu_to_le32(agg_size);
+ *tx_ampdu_spacing = cpu_to_le32(mpdu_dens);
+}
+
+static u8 iwl_mld_get_uapsd_acs(struct ieee80211_sta *sta)
+{
+ u8 uapsd_acs = 0;
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd_acs |= BIT(AC_BK);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd_acs |= BIT(AC_BE);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd_acs |= BIT(AC_VI);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd_acs |= BIT(AC_VO);
+
+ return uapsd_acs | uapsd_acs << 4;
+}
+
+static u8 iwl_mld_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
+{
+ u8 byte_num = ppe_pos_bit / 8;
+ u8 bit_num = ppe_pos_bit % 8;
+ u8 residue_bits;
+ u8 res;
+
+ if (bit_num <= 5)
+ return (ppe[byte_num] >> bit_num) &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
+
+ /* If bit_num > 5, we have to combine bits with next byte.
+ * Calculate how many bits we need to take from current byte (called
+ * here "residue_bits"), and add them to bits from next byte.
+ */
+
+ residue_bits = 8 - bit_num;
+
+ res = (ppe[byte_num + 1] &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
+ residue_bits;
+ res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
+
+ return res;
+}
+
+static void iwl_mld_parse_ppe(struct iwl_mld *mld,
+ struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
+ u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit,
+ bool inheritance)
+{
+ /* FW currently supports only nss == MAX_HE_SUPP_NSS
+ *
+ * If nss > MAX: we can ignore values we don't support
+ * If nss < MAX: we can set zeros in other streams
+ */
+ if (nss > MAX_HE_SUPP_NSS) {
+ IWL_DEBUG_INFO(mld, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
+ nss = MAX_HE_SUPP_NSS;
+ }
+
+ for (int i = 0; i < nss; i++) {
+ u8 ru_index_tmp = ru_index_bitmap << 1;
+ u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE;
+
+ for (u8 bw = 0;
+ bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ ru_index_tmp >>= 1;
+
+ /* According to the 11be spec, if for a specific BW the PPE Thresholds
+ * isn't present - it should inherit the thresholds from the last
+ * BW for which we had PPE Thresholds. In 11ax though, we don't have
+ * this inheritance - continue in this case
+ */
+ if (!(ru_index_tmp & 1)) {
+ if (inheritance)
+ goto set_thresholds;
+ else
+ continue;
+ }
+
+ high_th = iwl_mld_he_get_ppe_val(ppe, ppe_pos_bit);
+ ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ low_th = iwl_mld_he_get_ppe_val(ppe, ppe_pos_bit);
+ ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+
+set_thresholds:
+ pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+ }
+ }
+}
+
+static void iwl_mld_set_pkt_ext_from_he_ppe(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta,
+ struct iwl_he_pkt_ext_v2 *pkt_ext,
+ bool inheritance)
+{
+ u8 nss = (link_sta->he_cap.ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK) + 1;
+ u8 *ppe = &link_sta->he_cap.ppe_thres[0];
+ u8 ru_index_bitmap =
+ u8_get_bits(*ppe,
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ /* Starting after PPE header */
+ u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
+
+ iwl_mld_parse_ppe(mld, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit,
+ inheritance);
+}
+
+static int
+iwl_mld_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
+ u8 nominal_padding)
+{
+ int low_th = -1;
+ int high_th = -1;
+
+ /* all the macros are the same for EHT and HE */
+ switch (nominal_padding) {
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US:
+ low_th = IWL_HE_PKT_EXT_BPSK;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US:
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_BPSK;
+ break;
+ }
+
+ if (low_th < 0 || high_th < 0)
+ return -EINVAL;
+
+ /* Set the PPE thresholds accordingly */
+ for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ for (u8 bw = 0;
+ bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+ }
+ }
+
+ return 0;
+}
+
+static void iwl_mld_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 *pkt_ext,
+ u8 nominal_padding)
+{
+ for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ for (u8 bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ u8 *qam_th = &pkt_ext->pkt_ext_qam_th[i][bw][0];
+
+ if (nominal_padding >
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
+ qam_th[1] == IWL_HE_PKT_EXT_NONE)
+ qam_th[1] = IWL_HE_PKT_EXT_4096QAM;
+ else if (nominal_padding ==
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
+ qam_th[0] == IWL_HE_PKT_EXT_NONE &&
+ qam_th[1] == IWL_HE_PKT_EXT_NONE)
+ qam_th[0] = IWL_HE_PKT_EXT_4096QAM;
+ }
+ }
+}
+
+static void iwl_mld_fill_pkt_ext(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta,
+ struct iwl_he_pkt_ext_v2 *pkt_ext)
+{
+ if (WARN_ON(!link_sta))
+ return;
+
+ /* Initialize the PPE thresholds to "None" (7), as described in Table
+ * 9-262ac of 80211.ax/D3.0.
+ */
+ memset(pkt_ext, IWL_HE_PKT_EXT_NONE, sizeof(*pkt_ext));
+
+ if (link_sta->eht_cap.has_eht) {
+ u8 nominal_padding =
+ u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+
+ /* If PPE Thresholds exists, parse them into a FW-familiar
+ * format.
+ */
+ if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+ u8 nss = (link_sta->eht_cap.eht_ppe_thres[0] &
+ IEEE80211_EHT_PPE_THRES_NSS_MASK) + 1;
+ u8 *ppe = &link_sta->eht_cap.eht_ppe_thres[0];
+ u8 ru_index_bitmap =
+ u16_get_bits(*ppe,
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ /* Starting after PPE header */
+ u8 ppe_pos_bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+
+ iwl_mld_parse_ppe(mld, pkt_ext, nss, ru_index_bitmap,
+ ppe, ppe_pos_bit, true);
+ /* EHT PPE Thresholds doesn't exist - set the API according to
+ * HE PPE Tresholds
+ */
+ } else if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ /* Even though HE Capabilities IE doesn't contain PPE
+ * Thresholds for BW 320Mhz, thresholds for this BW will
+ * be filled in with the same values as 160Mhz, due to
+ * the inheritance, as required.
+ */
+ iwl_mld_set_pkt_ext_from_he_ppe(mld, link_sta, pkt_ext,
+ true);
+
+ /* According to the requirements, for MCSs 12-13 the
+ * maximum value between HE PPE Threshold and Common
+ * Nominal Packet Padding needs to be taken
+ */
+ iwl_mld_get_optimal_ppe_info(pkt_ext, nominal_padding);
+
+ /* if PPE Thresholds doesn't present in both EHT IE and HE IE -
+ * take the Thresholds from Common Nominal Packet Padding field
+ */
+ } else {
+ iwl_mld_set_pkt_ext_from_nominal_padding(pkt_ext,
+ nominal_padding);
+ }
+ } else if (link_sta->he_cap.has_he) {
+ /* If PPE Thresholds exist, parse them into a FW-familiar format. */
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ iwl_mld_set_pkt_ext_from_he_ppe(mld, link_sta, pkt_ext,
+ false);
+ /* PPE Thresholds doesn't exist - set the API PPE values
+ * according to Common Nominal Packet Padding field.
+ */
+ } else {
+ u8 nominal_padding =
+ u8_get_bits(link_sta->he_cap.he_cap_elem.phy_cap_info[9],
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
+ iwl_mld_set_pkt_ext_from_nominal_padding(pkt_ext,
+ nominal_padding);
+ }
+ }
+
+ for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ for (int bw = 0;
+ bw < ARRAY_SIZE(*pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ u8 *qam_th =
+ &pkt_ext->pkt_ext_qam_th[i][bw][0];
+
+ IWL_DEBUG_HT(mld,
+ "PPE table: nss[%d] bw[%d] PPET8 = %d, PPET16 = %d\n",
+ i, bw, qam_th[0], qam_th[1]);
+ }
+ }
+}
+
+static u32 iwl_mld_get_htc_flags(struct ieee80211_link_sta *link_sta)
+{
+ u8 *mac_cap_info =
+ &link_sta->he_cap.he_cap_elem.mac_cap_info[0];
+ u32 htc_flags = 0;
+
+ if (mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE)
+ htc_flags |= IWL_HE_HTC_SUPPORT;
+ if ((mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
+ (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
+ u8 link_adap =
+ ((mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
+ (mac_cap_info[1] &
+ IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
+
+ if (link_adap == 2)
+ htc_flags |=
+ IWL_HE_HTC_LINK_ADAP_UNSOLICITED;
+ else if (link_adap == 3)
+ htc_flags |= IWL_HE_HTC_LINK_ADAP_BOTH;
+ }
+ if (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ htc_flags |= IWL_HE_HTC_BSR_SUPP;
+ if (mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+ htc_flags |= IWL_HE_HTC_OMI_SUPP;
+ if (mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ htc_flags |= IWL_HE_HTC_BQR_SUPP;
+
+ return htc_flags;
+}
+
+static int iwl_mld_send_sta_cmd(struct iwl_mld *mld,
+ const struct iwl_sta_cfg_cmd *cmd)
+{
+ int ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
+ cmd);
+ if (ret)
+ IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
+ return ret;
+}
+
+static int
+iwl_mld_add_modify_sta_cmd(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta)
+{
+ struct ieee80211_sta *sta = link_sta->sta;
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct ieee80211_bss_conf *link;
+ struct iwl_mld_link *mld_link;
+ struct iwl_sta_cfg_cmd cmd = {};
+ int fw_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ link = link_conf_dereference_protected(mld_sta->vif,
+ link_sta->link_id);
+
+ mld_link = iwl_mld_link_from_mac80211(link);
+
+ if (WARN_ON(!link || !mld_link) || fw_id < 0)
+ return -EINVAL;
+
+ cmd.sta_id = cpu_to_le32(fw_id);
+ cmd.station_type = cpu_to_le32(mld_sta->sta_type);
+ cmd.link_id = cpu_to_le32(mld_link->fw_id);
+
+ memcpy(&cmd.peer_mld_address, sta->addr, ETH_ALEN);
+ memcpy(&cmd.peer_link_address, link_sta->addr, ETH_ALEN);
+
+ if (mld_sta->sta_state >= IEEE80211_STA_ASSOC)
+ cmd.assoc_id = cpu_to_le32(sta->aid);
+
+ if (sta->mfp || mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)
+ cmd.mfp = cpu_to_le32(1);
+
+ switch (link_sta->rx_nss) {
+ case 1:
+ cmd.mimo = cpu_to_le32(0);
+ break;
+ case 2 ... 8:
+ cmd.mimo = cpu_to_le32(1);
+ break;
+ }
+
+ switch (link_sta->smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_NUM_MODES:
+ WARN_ON(1);
+ break;
+ case IEEE80211_SMPS_STATIC:
+ /* override NSS */
+ cmd.mimo = cpu_to_le32(0);
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ cmd.mimo_protection = cpu_to_le32(1);
+ break;
+ case IEEE80211_SMPS_OFF:
+ /* nothing */
+ break;
+ }
+
+ iwl_mld_fill_ampdu_size_and_dens(link_sta, link,
+ &cmd.tx_ampdu_max_size,
+ &cmd.tx_ampdu_spacing);
+
+ if (sta->wme) {
+ cmd.sp_length =
+ cpu_to_le32(sta->max_sp ? sta->max_sp * 2 : 128);
+ cmd.uapsd_acs = cpu_to_le32(iwl_mld_get_uapsd_acs(sta));
+ }
+
+ if (link_sta->he_cap.has_he) {
+ cmd.trig_rnd_alloc =
+ cpu_to_le32(link->uora_exists ? 1 : 0);
+
+ /* PPE Thresholds */
+ iwl_mld_fill_pkt_ext(mld, link_sta, &cmd.pkt_ext);
+
+ /* HTC flags */
+ cmd.htc_flags =
+ cpu_to_le32(iwl_mld_get_htc_flags(link_sta));
+
+ if (link_sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_ACK_EN)
+ cmd.ack_enabled = cpu_to_le32(1);
+ }
+
+ return iwl_mld_send_sta_cmd(mld, &cmd);
+}
+
+IWL_MLD_ALLOC_FN(link_sta, link_sta)
+
+static int
+iwl_mld_add_link_sta(struct iwl_mld *mld, struct ieee80211_link_sta *link_sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+ struct iwl_mld_link_sta *mld_link_sta;
+ int ret;
+ u8 fw_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* We will fail to add it to the FW anyway */
+ if (iwl_mld_error_before_recovery(mld))
+ return -ENODEV;
+
+ mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+
+ /* We need to preserve the fw sta ids during a restart, since the fw
+ * will recover SN/PN for them, this is why the mld_link_sta exists.
+ */
+ if (mld_link_sta) {
+ /* But if we are not restarting, this is not OK */
+ WARN_ON(!mld->fw_status.in_hw_restart);
+
+ /* Avoid adding a STA that is already in FW to avoid an assert */
+ if (WARN_ON(mld_link_sta->in_fw))
+ return -EINVAL;
+
+ fw_id = mld_link_sta->fw_id;
+ goto add_to_fw;
+ }
+
+ /* Allocate a fw id and map it to the link_sta */
+ ret = iwl_mld_allocate_link_sta_fw_id(mld, &fw_id, link_sta);
+ if (ret)
+ return ret;
+
+ if (link_sta == &link_sta->sta->deflink) {
+ mld_link_sta = &mld_sta->deflink;
+ } else {
+ mld_link_sta = kzalloc(sizeof(*mld_link_sta), GFP_KERNEL);
+ if (!mld_link_sta)
+ return -ENOMEM;
+ }
+
+ mld_link_sta->fw_id = fw_id;
+ rcu_assign_pointer(mld_sta->link[link_sta->link_id], mld_link_sta);
+
+add_to_fw:
+ ret = iwl_mld_add_modify_sta_cmd(mld, link_sta);
+ if (ret) {
+ RCU_INIT_POINTER(mld->fw_id_to_link_sta[fw_id], NULL);
+ RCU_INIT_POINTER(mld_sta->link[link_sta->link_id], NULL);
+ if (link_sta != &link_sta->sta->deflink)
+ kfree(mld_link_sta);
+ return ret;
+ }
+ mld_link_sta->in_fw = true;
+
+ return 0;
+}
+
+static int iwl_mld_rm_sta_from_fw(struct iwl_mld *mld, u8 fw_sta_id)
+{
+ struct iwl_remove_sta_cmd cmd = {
+ .sta_id = cpu_to_le32(fw_sta_id),
+ };
+ int ret;
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, STA_REMOVE_CMD),
+ &cmd);
+ if (ret)
+ IWL_ERR(mld, "Failed to remove station. Id=%d\n", fw_sta_id);
+
+ return ret;
+}
+
+static void
+iwl_mld_remove_link_sta(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+ struct iwl_mld_link_sta *mld_link_sta =
+ iwl_mld_link_sta_from_mac80211(link_sta);
+
+ if (WARN_ON(!mld_link_sta))
+ return;
+
+ iwl_mld_rm_sta_from_fw(mld, mld_link_sta->fw_id);
+ mld_link_sta->in_fw = false;
+
+ /* Now that the STA doesn't exist in FW, we don't expect any new
+ * notifications for it. Cancel the ones that are already pending
+ */
+ iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_STA,
+ mld_link_sta->fw_id);
+
+ /* This will not be done upon reconfig, so do it also when
+ * failed to remove from fw
+ */
+ RCU_INIT_POINTER(mld->fw_id_to_link_sta[mld_link_sta->fw_id], NULL);
+ RCU_INIT_POINTER(mld_sta->link[link_sta->link_id], NULL);
+ if (mld_link_sta != &mld_sta->deflink)
+ kfree_rcu(mld_link_sta, rcu_head);
+}
+
+static void iwl_mld_set_max_amsdu_len(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+
+ /* For EHT, HE and VHT we can use the value as it was calculated by
+ * mac80211. For HT, mac80211 doesn't enforce to 4095, so force it
+ * here
+ */
+ if (link_sta->eht_cap.has_eht || link_sta->he_cap.has_he ||
+ link_sta->vht_cap.vht_supported ||
+ !ht_cap->ht_supported ||
+ !(ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU))
+ return;
+
+ link_sta->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA;
+ ieee80211_sta_recalc_aggregates(link_sta->sta);
+}
+
+int iwl_mld_update_all_link_stations(struct iwl_mld *mld,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
+ int link_id;
+
+ for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
+ int ret = iwl_mld_add_modify_sta_cmd(mld, link_sta);
+
+ if (ret)
+ return ret;
+
+ if (mld_sta->sta_state == IEEE80211_STA_ASSOC)
+ iwl_mld_set_max_amsdu_len(mld, link_sta);
+ }
+ return 0;
+}
+
+static void iwl_mld_destroy_sta(struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ kfree(mld_sta->dup_data);
+ kfree(mld_sta->mpdu_counters);
+}
+
+static int
+iwl_mld_alloc_dup_data(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta)
+{
+ struct iwl_mld_rxq_dup_data *dup_data;
+
+ if (mld->fw_status.in_hw_restart)
+ return 0;
+
+ dup_data = kcalloc(mld->trans->num_rx_queues, sizeof(*dup_data),
+ GFP_KERNEL);
+ if (!dup_data)
+ return -ENOMEM;
+
+ /* Initialize all the last_seq values to 0xffff which can never
+ * compare equal to the frame's seq_ctrl in the check in
+ * iwl_mld_is_dup() since the lower 4 bits are the fragment
+ * number and fragmented packets don't reach that function.
+ *
+ * This thus allows receiving a packet with seqno 0 and the
+ * retry bit set as the very first packet on a new TID.
+ */
+ for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ memset(dup_data[q].last_seq, 0xff,
+ sizeof(dup_data[q].last_seq));
+ mld_sta->dup_data = dup_data;
+
+ return 0;
+}
+
+static void iwl_mld_alloc_mpdu_counters(struct iwl_mld *mld,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct ieee80211_vif *vif = mld_sta->vif;
+
+ if (mld->fw_status.in_hw_restart)
+ return;
+
+ /* MPDUs are counted only when EMLSR is possible */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION ||
+ sta->tdls || !ieee80211_vif_is_mld(vif))
+ return;
+
+ mld_sta->mpdu_counters = kcalloc(mld->trans->num_rx_queues,
+ sizeof(*mld_sta->mpdu_counters),
+ GFP_KERNEL);
+ if (!mld_sta->mpdu_counters)
+ return;
+
+ for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ spin_lock_init(&mld_sta->mpdu_counters[q].lock);
+}
+
+static int
+iwl_mld_init_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, enum iwl_fw_sta_type type)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ mld_sta->vif = vif;
+ mld_sta->sta_type = type;
+ mld_sta->mld = mld;
+
+ if (!mld->fw_status.in_hw_restart)
+ for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ iwl_mld_init_txq(iwl_mld_txq_from_mac80211(sta->txq[i]));
+
+ iwl_mld_alloc_mpdu_counters(mld, sta);
+
+ iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant);
+
+ return iwl_mld_alloc_dup_data(mld, mld_sta);
+}
+
+int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, enum iwl_fw_sta_type type)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
+ int link_id;
+ int ret;
+
+ ret = iwl_mld_init_sta(mld, sta, vif, type);
+ if (ret)
+ return ret;
+
+ /* We could have add only the deflink link_sta, but it will not work
+ * in the restart case if the single link that is active during
+ * reconfig is not the deflink one.
+ */
+ for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
+ ret = iwl_mld_add_link_sta(mld, link_sta);
+ if (ret)
+ goto destroy_sta;
+ }
+
+ return 0;
+
+destroy_sta:
+ iwl_mld_destroy_sta(sta);
+
+ return ret;
+}
+
+void iwl_mld_flush_sta_txqs(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
+ int link_id;
+
+ for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
+ int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+
+ if (fw_sta_id < 0)
+ continue;
+
+ iwl_mld_flush_link_sta_txqs(mld, fw_sta_id);
+ }
+}
+
+void iwl_mld_wait_sta_txqs_empty(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+ /* Avoid a warning in iwl_trans_wait_txq_empty if are anyway on the way
+ * to a restart.
+ */
+ if (iwl_mld_error_before_recovery(mld))
+ return;
+
+ for (int i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mld_txq *mld_txq =
+ iwl_mld_txq_from_mac80211(sta->txq[i]);
+
+ if (!mld_txq->status.allocated)
+ continue;
+
+ iwl_trans_wait_txq_empty(mld->trans, mld_txq->fw_id);
+ }
+}
+
+void iwl_mld_remove_sta(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct ieee80211_vif *vif = mld_sta->vif;
+ struct ieee80211_link_sta *link_sta;
+ u8 link_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* Tell the HW to flush the queues */
+ iwl_mld_flush_sta_txqs(mld, sta);
+
+ /* Wait for trans to empty its queues */
+ iwl_mld_wait_sta_txqs_empty(mld, sta);
+
+ /* Now we can remove the queues */
+ for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ iwl_mld_remove_txq(mld, sta->txq[i]);
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ /* Mac8011 will remove the groupwise keys after the sta is
+ * removed, but FW expects all the keys to be removed before
+ * the STA is, so remove them all here.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ iwl_mld_remove_ap_keys(mld, vif, sta, link_id);
+
+ /* Remove the link_sta */
+ iwl_mld_remove_link_sta(mld, link_sta);
+ }
+
+ iwl_mld_destroy_sta(sta);
+}
+
+u32 iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+ struct ieee80211_vif *vif = iwl_mld_sta_from_mac80211(sta)->vif;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ u32 result = 0;
+
+ KUNIT_STATIC_STUB_REDIRECT(iwl_mld_fw_sta_id_mask, mld, sta);
+
+ /* This function should only be used with the wiphy lock held,
+ * In other cases, it is not guaranteed that the link_sta will exist
+ * in the driver too, and it is checked in
+ * iwl_mld_fw_sta_id_from_link_sta.
+ */
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ int fw_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+
+ if (!(fw_id < 0))
+ result |= BIT(fw_id);
+ }
+
+ return result;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_fw_sta_id_mask);
+
+static void iwl_mld_count_mpdu(struct ieee80211_link_sta *link_sta, int queue,
+ u32 count, bool tx)
+{
+ struct iwl_mld_per_q_mpdu_counter *queue_counter;
+ struct iwl_mld_per_link_mpdu_counter *link_counter;
+ struct iwl_mld_vif *mld_vif;
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_link *mld_link;
+ struct iwl_mld *mld;
+ int total_mpdus = 0;
+
+ if (WARN_ON(!link_sta))
+ return;
+
+ mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+ if (!mld_sta->mpdu_counters)
+ return;
+
+ mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
+ mld_link = iwl_mld_link_dereference_check(mld_vif, link_sta->link_id);
+
+ if (WARN_ON_ONCE(!mld_link))
+ return;
+
+ queue_counter = &mld_sta->mpdu_counters[queue];
+
+ mld = mld_vif->mld;
+
+ /* If it the window is over, first clear the counters.
+ * When we are not blocked by TPT, the window is managed by check_tpt_wk
+ */
+ if ((mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT) &&
+ time_is_before_jiffies(queue_counter->window_start_time +
+ IWL_MLD_TPT_COUNT_WINDOW)) {
+ memset(queue_counter->per_link, 0,
+ sizeof(queue_counter->per_link));
+ queue_counter->window_start_time = jiffies;
+
+ IWL_DEBUG_INFO(mld, "MPDU counters are cleared\n");
+ }
+
+ link_counter = &queue_counter->per_link[mld_link->fw_id];
+
+ spin_lock_bh(&queue_counter->lock);
+
+ /* Update the statistics for this TPT measurement window */
+ if (tx)
+ link_counter->tx += count;
+ else
+ link_counter->rx += count;
+
+ /*
+ * Next, evaluate whether we should queue an unblock,
+ * skip this if we are not blocked due to low throughput.
+ */
+ if (!(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
+ goto unlock;
+
+ for (int i = 0; i <= IWL_FW_MAX_LINK_ID; i++)
+ total_mpdus += tx ? queue_counter->per_link[i].tx :
+ queue_counter->per_link[i].rx;
+
+ /* Unblock is already queued if the threshold was reached before */
+ if (total_mpdus - count >= IWL_MLD_ENTER_EMLSR_TPT_THRESH)
+ goto unlock;
+
+ if (total_mpdus >= IWL_MLD_ENTER_EMLSR_TPT_THRESH)
+ wiphy_work_queue(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk);
+
+unlock:
+ spin_unlock_bh(&queue_counter->lock);
+}
+
+/* must be called under rcu_read_lock() */
+void iwl_mld_count_mpdu_rx(struct ieee80211_link_sta *link_sta, int queue,
+ u32 count)
+{
+ iwl_mld_count_mpdu(link_sta, queue, count, false);
+}
+
+/* must be called under rcu_read_lock() */
+void iwl_mld_count_mpdu_tx(struct ieee80211_link_sta *link_sta, u32 count)
+{
+ /* use queue 0 for all TX */
+ iwl_mld_count_mpdu(link_sta, 0, count, true);
+}
+
+static int iwl_mld_allocate_internal_txq(struct iwl_mld *mld,
+ struct iwl_mld_int_sta *internal_sta,
+ u8 tid)
+{
+ u32 sta_mask = BIT(internal_sta->sta_id);
+ int queue, size;
+
+ size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
+ mld->trans->cfg->min_txq_size);
+
+ queue = iwl_trans_txq_alloc(mld->trans, 0, sta_mask, tid, size,
+ IWL_WATCHDOG_DISABLED);
+
+ if (queue >= 0)
+ IWL_DEBUG_TX_QUEUES(mld,
+ "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
+ queue, sta_mask, tid);
+ return queue;
+}
+
+static int iwl_mld_send_aux_sta_cmd(struct iwl_mld *mld,
+ const struct iwl_mld_int_sta *internal_sta)
+{
+ struct iwl_aux_sta_cmd cmd = {
+ .sta_id = cpu_to_le32(internal_sta->sta_id),
+ /* TODO: CDB - properly set the lmac_id */
+ .lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX),
+ };
+
+ return iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, AUX_STA_CMD),
+ &cmd);
+}
+
+static int
+iwl_mld_add_internal_sta_to_fw(struct iwl_mld *mld,
+ const struct iwl_mld_int_sta *internal_sta,
+ u8 fw_link_id,
+ const u8 *addr)
+{
+ struct iwl_sta_cfg_cmd cmd = {};
+
+ if (internal_sta->sta_type == STATION_TYPE_AUX)
+ return iwl_mld_send_aux_sta_cmd(mld, internal_sta);
+
+ cmd.sta_id = cpu_to_le32((u8)internal_sta->sta_id);
+ cmd.link_id = cpu_to_le32(fw_link_id);
+ cmd.station_type = cpu_to_le32(internal_sta->sta_type);
+
+ /* FW doesn't allow to add a IGTK/BIGTK if the sta isn't marked as MFP.
+ * On the other hand, FW will never check this flag during RX since
+ * an AP/GO doesn't receive protected broadcast management frames.
+ * So, we can set it unconditionally.
+ */
+ if (internal_sta->sta_type == STATION_TYPE_BCAST_MGMT)
+ cmd.mfp = cpu_to_le32(1);
+
+ if (addr) {
+ memcpy(cmd.peer_mld_address, addr, ETH_ALEN);
+ memcpy(cmd.peer_link_address, addr, ETH_ALEN);
+ }
+
+ return iwl_mld_send_sta_cmd(mld, &cmd);
+}
+
+static int iwl_mld_add_internal_sta(struct iwl_mld *mld,
+ struct iwl_mld_int_sta *internal_sta,
+ enum iwl_fw_sta_type sta_type,
+ u8 fw_link_id, const u8 *addr, u8 tid)
+{
+ int ret, queue_id;
+
+ ret = iwl_mld_allocate_link_sta_fw_id(mld,
+ &internal_sta->sta_id,
+ ERR_PTR(-EINVAL));
+ if (ret)
+ return ret;
+
+ internal_sta->sta_type = sta_type;
+
+ ret = iwl_mld_add_internal_sta_to_fw(mld, internal_sta, fw_link_id,
+ addr);
+ if (ret)
+ goto err;
+
+ queue_id = iwl_mld_allocate_internal_txq(mld, internal_sta, tid);
+ if (queue_id < 0) {
+ iwl_mld_rm_sta_from_fw(mld, internal_sta->sta_id);
+ ret = queue_id;
+ goto err;
+ }
+
+ internal_sta->queue_id = queue_id;
+
+ return 0;
+err:
+ iwl_mld_free_internal_sta(mld, internal_sta);
+ return ret;
+}
+
+int iwl_mld_add_bcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ const u8 bcast_addr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ const u8 *addr;
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC))
+ return -EINVAL;
+
+ addr = vif->type == NL80211_IFTYPE_ADHOC ? link->bssid : bcast_addr;
+
+ return iwl_mld_add_internal_sta(mld, &mld_link->bcast_sta,
+ STATION_TYPE_BCAST_MGMT,
+ mld_link->fw_id, addr,
+ IWL_MGMT_TID);
+}
+
+int iwl_mld_add_mcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ const u8 mcast_addr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC))
+ return -EINVAL;
+
+ return iwl_mld_add_internal_sta(mld, &mld_link->mcast_sta,
+ STATION_TYPE_MCAST,
+ mld_link->fw_id, mcast_addr, 0);
+}
+
+int iwl_mld_add_aux_sta(struct iwl_mld *mld,
+ struct iwl_mld_int_sta *internal_sta)
+{
+ return iwl_mld_add_internal_sta(mld, internal_sta, STATION_TYPE_AUX,
+ 0, NULL, IWL_MAX_TID_COUNT);
+}
+
+static void iwl_mld_remove_internal_sta(struct iwl_mld *mld,
+ struct iwl_mld_int_sta *internal_sta,
+ bool flush, u8 tid)
+{
+ if (WARN_ON_ONCE(internal_sta->sta_id == IWL_INVALID_STA ||
+ internal_sta->queue_id == IWL_MLD_INVALID_QUEUE))
+ return;
+
+ if (flush)
+ iwl_mld_flush_link_sta_txqs(mld, internal_sta->sta_id);
+
+ iwl_mld_free_txq(mld, BIT(internal_sta->sta_id),
+ tid, internal_sta->queue_id);
+
+ iwl_mld_rm_sta_from_fw(mld, internal_sta->sta_id);
+
+ iwl_mld_free_internal_sta(mld, internal_sta);
+}
+
+void iwl_mld_remove_bcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+ if (WARN_ON(!mld_link))
+ return;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC))
+ return;
+
+ iwl_mld_remove_internal_sta(mld, &mld_link->bcast_sta, true,
+ IWL_MGMT_TID);
+}
+
+void iwl_mld_remove_mcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+ if (WARN_ON(!mld_link))
+ return;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC))
+ return;
+
+ iwl_mld_remove_internal_sta(mld, &mld_link->mcast_sta, true, 0);
+}
+
+void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+ if (WARN_ON(!mld_link))
+ return;
+
+ /* TODO: Hotspot 2.0 */
+ if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE))
+ return;
+
+ iwl_mld_remove_internal_sta(mld, &mld_link->aux_sta, false,
+ IWL_MAX_TID_COUNT);
+}
+
+static int iwl_mld_update_sta_resources(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask,
+ u32 new_sta_mask)
+{
+ int ret;
+
+ ret = iwl_mld_update_sta_txqs(mld, sta, old_sta_mask, new_sta_mask);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_update_sta_keys(mld, vif, sta, old_sta_mask, new_sta_mask);
+ if (ret)
+ return ret;
+
+ return iwl_mld_update_sta_baids(mld, old_sta_mask, new_sta_mask);
+}
+
+int iwl_mld_update_link_stas(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_link_sta *mld_link_sta;
+ unsigned long links_to_add = ~old_links & new_links;
+ unsigned long links_to_rem = old_links & ~new_links;
+ unsigned long old_links_long = old_links;
+ unsigned long sta_mask_added = 0;
+ u32 current_sta_mask = 0, sta_mask_to_rem = 0;
+ unsigned int link_id, sta_id;
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for_each_set_bit(link_id, &old_links_long,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ mld_link_sta =
+ iwl_mld_link_sta_dereference_check(mld_sta, link_id);
+
+ if (WARN_ON(!mld_link_sta))
+ return -EINVAL;
+
+ current_sta_mask |= BIT(mld_link_sta->fw_id);
+ if (links_to_rem & BIT(link_id))
+ sta_mask_to_rem |= BIT(mld_link_sta->fw_id);
+ }
+
+ if (sta_mask_to_rem) {
+ ret = iwl_mld_update_sta_resources(mld, vif, sta,
+ current_sta_mask,
+ current_sta_mask &
+ ~sta_mask_to_rem);
+ if (ret)
+ return ret;
+
+ current_sta_mask &= ~sta_mask_to_rem;
+ }
+
+ for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_protected(sta, link_id);
+
+ if (WARN_ON(!link_sta))
+ return -EINVAL;
+
+ iwl_mld_remove_link_sta(mld, link_sta);
+ }
+
+ for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_protected(sta, link_id);
+ struct ieee80211_bss_conf *link;
+
+ if (WARN_ON(!link_sta))
+ return -EINVAL;
+
+ ret = iwl_mld_add_link_sta(mld, link_sta);
+ if (ret)
+ goto remove_added_link_stas;
+
+ mld_link_sta =
+ iwl_mld_link_sta_dereference_check(mld_sta,
+ link_id);
+
+ link = link_conf_dereference_protected(mld_sta->vif,
+ link_sta->link_id);
+
+ iwl_mld_set_max_amsdu_len(mld, link_sta);
+ iwl_mld_config_tlc_link(mld, vif, link, link_sta);
+
+ sta_mask_added |= BIT(mld_link_sta->fw_id);
+ }
+
+ if (sta_mask_added) {
+ ret = iwl_mld_update_sta_resources(mld, vif, sta,
+ current_sta_mask,
+ current_sta_mask |
+ sta_mask_added);
+ if (ret)
+ goto remove_added_link_stas;
+ }
+
+ /* We couldn't activate the links before it has a STA. Now we can */
+ for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link =
+ link_conf_dereference_protected(mld_sta->vif, link_id);
+
+ if (WARN_ON(!link))
+ continue;
+
+ iwl_mld_activate_link(mld, link);
+ }
+
+ return 0;
+
+remove_added_link_stas:
+ for_each_set_bit(sta_id, &sta_mask_added, mld->fw->ucode_capa.num_stations) {
+ struct ieee80211_link_sta *link_sta =
+ wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_link_sta[sta_id]);
+
+ if (WARN_ON(!link_sta))
+ continue;
+
+ iwl_mld_remove_link_sta(mld, link_sta);
+ }
+
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.h b/drivers/net/wireless/intel/iwlwifi/mld/sta.h
new file mode 100644
index 000000000000..ddcffd7b9fde
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#ifndef __iwl_mld_sta_h__
+#define __iwl_mld_sta_h__
+
+#include <net/mac80211.h>
+
+#include "mld.h"
+#include "tx.h"
+
+/**
+ * struct iwl_mld_rxq_dup_data - Duplication detection data, per STA & Rx queue
+ * @last_seq: last sequence per tid.
+ * @last_sub_frame_idx: the index of the last subframe in an A-MSDU. This value
+ * will be zero if the packet is not part of an A-MSDU.
+ */
+struct iwl_mld_rxq_dup_data {
+ __le16 last_seq[IWL_MAX_TID_COUNT + 1];
+ u8 last_sub_frame_idx[IWL_MAX_TID_COUNT + 1];
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mld_link_sta - link-level station
+ *
+ * This represents the link-level sta - the driver level equivalent to the
+ * ieee80211_link_sta
+ *
+ * @last_rate_n_flags: rate_n_flags from the last &iwl_tlc_update_notif
+ * @signal_avg: the signal average coming from the firmware
+ * @in_fw: whether the link STA is uploaded to the FW (false during restart)
+ * @rcu_head: RCU head for freeing this object
+ * @fw_id: the FW id of this link sta.
+ */
+struct iwl_mld_link_sta {
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ u32 last_rate_n_flags;
+ bool in_fw;
+ s8 signal_avg;
+ );
+ /* And here fields that survive a fw restart */
+ struct rcu_head rcu_head;
+ u32 fw_id;
+};
+
+#define iwl_mld_link_sta_dereference_check(mld_sta, link_id) \
+ rcu_dereference_check((mld_sta)->link[link_id], \
+ lockdep_is_held(&mld_sta->mld->wiphy->mtx))
+
+#define for_each_mld_link_sta(mld_sta, link_sta, link_id) \
+ for (link_id = 0; link_id < ARRAY_SIZE((mld_sta)->link); \
+ link_id++) \
+ if ((link_sta = \
+ iwl_mld_link_sta_dereference_check(mld_sta, link_id)))
+
+#define IWL_NUM_DEFAULT_KEYS 4
+
+/* struct iwl_mld_ptk_pn - Holds Packet Number (PN) per TID.
+ * @rcu_head: RCU head for freeing this data.
+ * @pn: Array storing PN for each TID.
+ */
+struct iwl_mld_ptk_pn {
+ struct rcu_head rcu_head;
+ struct {
+ u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN];
+ } ____cacheline_aligned_in_smp q[];
+};
+
+/**
+ * struct iwl_mld_per_link_mpdu_counter - per-link TX/RX MPDU counters
+ *
+ * @tx: Number of TX MPDUs.
+ * @rx: Number of RX MPDUs.
+ */
+struct iwl_mld_per_link_mpdu_counter {
+ u32 tx;
+ u32 rx;
+};
+
+/**
+ * struct iwl_mld_per_q_mpdu_counter - per-queue MPDU counter
+ *
+ * @lock: Needed to protect the counters when modified from statistics.
+ * @per_link: per-link counters.
+ * @window_start_time: timestamp of the counting-window start
+ */
+struct iwl_mld_per_q_mpdu_counter {
+ spinlock_t lock;
+ struct iwl_mld_per_link_mpdu_counter per_link[IWL_FW_MAX_LINK_ID + 1];
+ unsigned long window_start_time;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mld_sta - representation of a station in the driver.
+ *
+ * This represent the MLD-level sta, and will not be added to the FW.
+ * Embedded in ieee80211_sta.
+ *
+ * @vif: pointer the vif object.
+ * @sta_state: station state according to enum %ieee80211_sta_state
+ * @sta_type: type of this station. See &enum iwl_fw_sta_type
+ * @mld: a pointer to the iwl_mld object
+ * @dup_data: per queue duplicate packet detection data
+ * @data_tx_ant: stores the last TX antenna index; used for setting
+ * TX rate_n_flags for injected data frames (toggles on every TX failure).
+ * @tid_to_baid: a simple map of TID to Block-Ack fw id
+ * @deflink: This holds the default link STA information, for non MLO STA all
+ * link specific STA information is accessed through @deflink or through
+ * link[0] which points to address of @deflink. For MLO Link STA
+ * the first added link STA will point to deflink.
+ * @link: reference to Link Sta entries. For Non MLO STA, except 1st link,
+ * i.e link[0] all links would be assigned to NULL by default and
+ * would access link information via @deflink or link[0]. For MLO
+ * STA, first link STA being added will point its link pointer to
+ * @deflink address and remaining would be allocated and the address
+ * would be assigned to link[link_id] where link_id is the id assigned
+ * by the AP.
+ * @ptk_pn: Array of pointers to PTK PN data, used to track the Packet Number
+ * per key index and per queue (TID).
+ * @mpdu_counters: RX/TX MPDUs counters for each queue.
+ */
+struct iwl_mld_sta {
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ enum ieee80211_sta_state sta_state;
+ enum iwl_fw_sta_type sta_type;
+ );
+ /* And here fields that survive a fw restart */
+ struct iwl_mld *mld;
+ struct ieee80211_vif *vif;
+ struct iwl_mld_rxq_dup_data *dup_data;
+ u8 tid_to_baid[IWL_MAX_TID_COUNT];
+ u8 data_tx_ant;
+
+ struct iwl_mld_link_sta deflink;
+ struct iwl_mld_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct iwl_mld_ptk_pn __rcu *ptk_pn[IWL_NUM_DEFAULT_KEYS];
+ struct iwl_mld_per_q_mpdu_counter *mpdu_counters;
+};
+
+static inline struct iwl_mld_sta *
+iwl_mld_sta_from_mac80211(struct ieee80211_sta *sta)
+{
+ return (void *)sta->drv_priv;
+}
+
+static inline void
+iwl_mld_cleanup_sta(void *data, struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_link_sta *mld_link_sta;
+ u8 link_id;
+
+ for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ CLEANUP_STRUCT(iwl_mld_txq_from_mac80211(sta->txq[i]));
+
+ for_each_mld_link_sta(mld_sta, mld_link_sta, link_id) {
+ CLEANUP_STRUCT(mld_link_sta);
+
+ if (!ieee80211_vif_is_mld(mld_sta->vif)) {
+ /* not an MLD STA; only has the deflink with ID zero */
+ WARN_ON(link_id);
+ continue;
+ }
+
+ if (mld_sta->vif->active_links & BIT(link_id))
+ continue;
+
+ /* Should not happen as link removal should always succeed */
+ WARN_ON(1);
+ RCU_INIT_POINTER(mld_sta->link[link_id], NULL);
+ RCU_INIT_POINTER(mld_sta->mld->fw_id_to_link_sta[mld_link_sta->fw_id],
+ NULL);
+ if (mld_link_sta != &mld_sta->deflink)
+ kfree_rcu(mld_link_sta, rcu_head);
+ }
+
+ CLEANUP_STRUCT(mld_sta);
+}
+
+static inline struct iwl_mld_link_sta *
+iwl_mld_link_sta_from_mac80211(struct ieee80211_link_sta *link_sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+
+ return iwl_mld_link_sta_dereference_check(mld_sta, link_sta->link_id);
+}
+
+int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, enum iwl_fw_sta_type type);
+void iwl_mld_remove_sta(struct iwl_mld *mld, struct ieee80211_sta *sta);
+int iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta);
+u32 iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta);
+int iwl_mld_update_all_link_stations(struct iwl_mld *mld,
+ struct ieee80211_sta *sta);
+void iwl_mld_flush_sta_txqs(struct iwl_mld *mld, struct ieee80211_sta *sta);
+void iwl_mld_wait_sta_txqs_empty(struct iwl_mld *mld,
+ struct ieee80211_sta *sta);
+void iwl_mld_count_mpdu_rx(struct ieee80211_link_sta *link_sta, int queue,
+ u32 count);
+void iwl_mld_count_mpdu_tx(struct ieee80211_link_sta *link_sta, u32 count);
+
+/**
+ * struct iwl_mld_int_sta - representation of an internal station
+ * (a station that exist in FW and in driver, but not in mac80211)
+ *
+ * @sta_id: the index of the station in the fw
+ * @queue_id: the if of the queue used by the station
+ * @sta_type: station type. One of &iwl_fw_sta_type
+ */
+struct iwl_mld_int_sta {
+ u8 sta_id;
+ u32 queue_id;
+ enum iwl_fw_sta_type sta_type;
+};
+
+static inline void
+iwl_mld_init_internal_sta(struct iwl_mld_int_sta *internal_sta)
+{
+ internal_sta->sta_id = IWL_INVALID_STA;
+ internal_sta->queue_id = IWL_MLD_INVALID_QUEUE;
+}
+
+static inline void
+iwl_mld_free_internal_sta(struct iwl_mld *mld,
+ struct iwl_mld_int_sta *internal_sta)
+{
+ if (WARN_ON(internal_sta->sta_id == IWL_INVALID_STA))
+ return;
+
+ RCU_INIT_POINTER(mld->fw_id_to_link_sta[internal_sta->sta_id], NULL);
+ iwl_mld_init_internal_sta(internal_sta);
+}
+
+int iwl_mld_add_bcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+int iwl_mld_add_mcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+int iwl_mld_add_aux_sta(struct iwl_mld *mld,
+ struct iwl_mld_int_sta *internal_sta);
+
+void iwl_mld_remove_bcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+void iwl_mld_remove_mcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+int iwl_mld_update_link_stas(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links);
+#endif /* __iwl_mld_sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/stats.c b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
new file mode 100644
index 000000000000..0715bbc31031
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "stats.h"
+#include "sta.h"
+#include "mlo.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "scan.h"
+#include "phy.h"
+#include "fw/api/stats.h"
+
+static int iwl_mld_send_fw_stats_cmd(struct iwl_mld *mld, u32 cfg_mask,
+ u32 cfg_time, u32 type_mask)
+{
+ u32 cmd_id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD);
+ struct iwl_system_statistics_cmd stats_cmd = {
+ .cfg_mask = cpu_to_le32(cfg_mask),
+ .config_time_sec = cpu_to_le32(cfg_time),
+ .type_id_mask = cpu_to_le32(type_mask),
+ };
+
+ return iwl_mld_send_cmd_pdu(mld, cmd_id, &stats_cmd);
+}
+
+int iwl_mld_clear_stats_in_fw(struct iwl_mld *mld)
+{
+ u32 cfg_mask = IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK;
+ u32 type_mask = IWL_STATS_NTFY_TYPE_ID_OPER |
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART1;
+
+ return iwl_mld_send_fw_stats_cmd(mld, cfg_mask, 0, type_mask);
+}
+
+static void
+iwl_mld_fill_stats_from_oper_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt,
+ u8 fw_sta_id, struct station_info *sinfo)
+{
+ const struct iwl_system_statistics_notif_oper *notif =
+ (void *)&pkt->data;
+ const struct iwl_stats_ntfy_per_sta *per_sta =
+ &notif->per_sta[fw_sta_id];
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_link_sta *mld_link_sta;
+
+ /* 0 isn't a valid value, but FW might send 0.
+ * In that case, set the latest non-zero value we stored
+ */
+ rcu_read_lock();
+
+ link_sta = rcu_dereference(mld->fw_id_to_link_sta[fw_sta_id]);
+ if (IS_ERR_OR_NULL(link_sta))
+ goto unlock;
+
+ mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+ if (WARN_ON(!mld_link_sta))
+ goto unlock;
+
+ if (per_sta->average_energy)
+ mld_link_sta->signal_avg =
+ -(s8)le32_to_cpu(per_sta->average_energy);
+
+ sinfo->signal_avg = mld_link_sta->signal_avg;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+
+unlock:
+ rcu_read_unlock();
+}
+
+struct iwl_mld_stats_data {
+ u8 fw_sta_id;
+ struct station_info *sinfo;
+ struct iwl_mld *mld;
+};
+
+static bool iwl_mld_wait_stats_handler(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mld_stats_data *stats_data = data;
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+ switch (cmd) {
+ case WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_NOTIF):
+ iwl_mld_fill_stats_from_oper_notif(stats_data->mld, pkt,
+ stats_data->fw_sta_id,
+ stats_data->sinfo);
+ break;
+ case WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF):
+ break;
+ case WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF):
+ return true;
+ }
+
+ return false;
+}
+
+static int
+iwl_mld_fw_stats_to_mac80211(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta,
+ struct station_info *sinfo)
+{
+ u32 cfg_mask = IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK |
+ IWL_STATS_CFG_FLG_RESET_MSK;
+ u32 type_mask = IWL_STATS_NTFY_TYPE_ID_OPER |
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART1;
+ static const u16 notifications[] = {
+ WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_NOTIF),
+ WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF),
+ WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF),
+ };
+ struct iwl_mld_stats_data wait_stats_data = {
+ /* We don't support drv_sta_statistics in EMLSR */
+ .fw_sta_id = mld_sta->deflink.fw_id,
+ .sinfo = sinfo,
+ .mld = mld,
+ };
+ struct iwl_notification_wait stats_wait;
+ int ret;
+
+ iwl_init_notification_wait(&mld->notif_wait, &stats_wait,
+ notifications, ARRAY_SIZE(notifications),
+ iwl_mld_wait_stats_handler,
+ &wait_stats_data);
+
+ ret = iwl_mld_send_fw_stats_cmd(mld, cfg_mask, 0, type_mask);
+ if (ret) {
+ iwl_remove_notification(&mld->notif_wait, &stats_wait);
+ return ret;
+ }
+
+ /* Wait 500ms for OPERATIONAL, PART1, and END notifications,
+ * which should be sufficient for the firmware to gather data
+ * from all LMACs and send notifications to the host.
+ */
+ ret = iwl_wait_notification(&mld->notif_wait, &stats_wait, HZ / 2);
+ if (ret)
+ return ret;
+
+ /* When periodic statistics are sent, FW will clear its statistics DB.
+ * If the statistics request here happens shortly afterwards,
+ * the response will contain data collected over a short time
+ * interval. The response we got here shouldn't be processed by
+ * the general statistics processing because it's incomplete.
+ * So, we delete it from the list so it won't be processed.
+ */
+ iwl_mld_delete_handlers(mld, notifications, ARRAY_SIZE(notifications));
+
+ return 0;
+}
+
+#define PERIODIC_STATS_SECONDS 5
+
+int iwl_mld_request_periodic_fw_stats(struct iwl_mld *mld, bool enable)
+{
+ u32 cfg_mask = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK;
+ u32 type_mask = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER |
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0;
+ u32 cfg_time = enable ? PERIODIC_STATS_SECONDS : 0;
+
+ return iwl_mld_send_fw_stats_cmd(mld, cfg_mask, cfg_time, type_mask);
+}
+
+static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta,
+ struct station_info *sinfo)
+{
+ struct rate_info *rinfo = &sinfo->txrate;
+ u32 rate_n_flags = mld_sta->deflink.last_rate_n_flags;
+ u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ u32 gi_ltf;
+
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ rinfo->bw = RATE_INFO_BW_20;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rinfo->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rinfo->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rinfo->bw = RATE_INFO_BW_160;
+ break;
+ case RATE_MCS_CHAN_WIDTH_320:
+ rinfo->bw = RATE_INFO_BW_320;
+ break;
+ }
+
+ if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) {
+ int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK);
+
+ /* add the offset needed to get to the legacy ofdm indices */
+ if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ rate += IWL_FIRST_OFDM_RATE;
+
+ switch (rate) {
+ case IWL_RATE_1M_INDEX:
+ rinfo->legacy = 10;
+ break;
+ case IWL_RATE_2M_INDEX:
+ rinfo->legacy = 20;
+ break;
+ case IWL_RATE_5M_INDEX:
+ rinfo->legacy = 55;
+ break;
+ case IWL_RATE_11M_INDEX:
+ rinfo->legacy = 110;
+ break;
+ case IWL_RATE_6M_INDEX:
+ rinfo->legacy = 60;
+ break;
+ case IWL_RATE_9M_INDEX:
+ rinfo->legacy = 90;
+ break;
+ case IWL_RATE_12M_INDEX:
+ rinfo->legacy = 120;
+ break;
+ case IWL_RATE_18M_INDEX:
+ rinfo->legacy = 180;
+ break;
+ case IWL_RATE_24M_INDEX:
+ rinfo->legacy = 240;
+ break;
+ case IWL_RATE_36M_INDEX:
+ rinfo->legacy = 360;
+ break;
+ case IWL_RATE_48M_INDEX:
+ rinfo->legacy = 480;
+ break;
+ case IWL_RATE_54M_INDEX:
+ rinfo->legacy = 540;
+ }
+ return;
+ }
+
+ rinfo->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
+
+ if (format == RATE_MCS_HT_MSK)
+ rinfo->mcs = RATE_HT_MCS_INDEX(rate_n_flags);
+ else
+ rinfo->mcs = u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK);
+
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ switch (format) {
+ case RATE_MCS_EHT_MSK:
+ rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS;
+ break;
+ case RATE_MCS_HE_MSK:
+ gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
+
+ rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
+
+ if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rinfo->bw = RATE_INFO_BW_HE_RU;
+ rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
+ case RATE_MCS_HE_TYPE_SU:
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else if (gi_ltf == 3)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case RATE_MCS_HE_TYPE_MU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ case RATE_MCS_HE_TYPE_TRIG:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
+ rinfo->he_dcm = 1;
+ break;
+ case RATE_MCS_HT_MSK:
+ rinfo->flags |= RATE_INFO_FLAGS_MCS;
+ break;
+ case RATE_MCS_VHT_MSK:
+ rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ break;
+ }
+}
+
+void iwl_mld_mac80211_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ /* This API is not EMLSR ready, so we cannot provide complete
+ * information if EMLSR is active
+ */
+ if (hweight16(vif->active_links) > 1)
+ return;
+
+ if (iwl_mld_fw_stats_to_mac80211(mld_sta->mld, mld_sta, sinfo))
+ return;
+
+ iwl_mld_sta_stats_fill_txrate(mld_sta, sinfo);
+
+ /* TODO: NL80211_STA_INFO_BEACON_RX */
+
+ /* TODO: NL80211_STA_INFO_BEACON_SIGNAL_AVG */
+}
+
+#define IWL_MLD_TRAFFIC_LOAD_MEDIUM_THRESH 10 /* percentage */
+#define IWL_MLD_TRAFFIC_LOAD_HIGH_THRESH 50 /* percentage */
+#define IWL_MLD_TRAFFIC_LOAD_MIN_WINDOW_USEC (500 * 1000)
+
+static u8 iwl_mld_stats_load_percentage(u32 last_ts_usec, u32 curr_ts_usec,
+ u32 total_airtime_usec)
+{
+ u32 elapsed_usec = curr_ts_usec - last_ts_usec;
+
+ if (elapsed_usec < IWL_MLD_TRAFFIC_LOAD_MIN_WINDOW_USEC)
+ return 0;
+
+ return (100 * total_airtime_usec / elapsed_usec);
+}
+
+static void iwl_mld_stats_recalc_traffic_load(struct iwl_mld *mld,
+ u32 total_airtime_usec,
+ u32 curr_ts_usec)
+{
+ u32 last_ts_usec = mld->scan.traffic_load.last_stats_ts_usec;
+ u8 load_prec;
+
+ /* Skip the calculation as this is the first notification received */
+ if (!last_ts_usec)
+ goto out;
+
+ load_prec = iwl_mld_stats_load_percentage(last_ts_usec, curr_ts_usec,
+ total_airtime_usec);
+
+ if (load_prec > IWL_MLD_TRAFFIC_LOAD_HIGH_THRESH)
+ mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_HIGH;
+ else if (load_prec > IWL_MLD_TRAFFIC_LOAD_MEDIUM_THRESH)
+ mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_MEDIUM;
+ else
+ mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_LOW;
+
+out:
+ mld->scan.traffic_load.last_stats_ts_usec = curr_ts_usec;
+}
+
+static void iwl_mld_update_link_sig(struct ieee80211_vif *vif, int sig,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld;
+ int exit_emlsr_thresh;
+
+ if (sig == 0) {
+ IWL_DEBUG_RX(mld, "RSSI is 0 - skip signal based decision\n");
+ return;
+ }
+
+ /* TODO: task=statistics handle CQM notifications */
+
+ if (sig < IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH)
+ iwl_mld_int_mlo_scan(mld, vif);
+
+ if (!iwl_mld_emlsr_active(vif))
+ return;
+
+ /* We are in EMLSR, check if we need to exit */
+ exit_emlsr_thresh =
+ iwl_mld_get_emlsr_rssi_thresh(mld, &bss_conf->chanreq.oper,
+ true);
+
+ if (sig < exit_emlsr_thresh)
+ iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LOW_RSSI,
+ iwl_mld_get_other_link(vif,
+ bss_conf->link_id));
+}
+
+static void
+iwl_mld_process_per_link_stats(struct iwl_mld *mld,
+ const struct iwl_stats_ntfy_per_link *per_link,
+ u32 curr_ts_usec)
+{
+ u32 total_airtime_usec = 0;
+
+ for (u32 fw_id = 0;
+ fw_id < ARRAY_SIZE(mld->fw_id_to_bss_conf);
+ fw_id++) {
+ const struct iwl_stats_ntfy_per_link *link_stats;
+ struct ieee80211_bss_conf *bss_conf;
+ int sig;
+
+ bss_conf = wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_bss_conf[fw_id]);
+ if (!bss_conf || bss_conf->vif->type != NL80211_IFTYPE_STATION)
+ continue;
+
+ link_stats = &per_link[fw_id];
+
+ total_airtime_usec += le32_to_cpu(link_stats->air_time);
+
+ sig = -le32_to_cpu(link_stats->beacon_filter_average_energy);
+ iwl_mld_update_link_sig(bss_conf->vif, sig, bss_conf);
+
+ /* TODO: parse more fields here (task=statistics)*/
+ }
+
+ iwl_mld_stats_recalc_traffic_load(mld, total_airtime_usec,
+ curr_ts_usec);
+}
+
+static void
+iwl_mld_process_per_sta_stats(struct iwl_mld *mld,
+ const struct iwl_stats_ntfy_per_sta *per_sta)
+{
+ for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) {
+ struct ieee80211_link_sta *link_sta =
+ wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_link_sta[i]);
+ struct iwl_mld_link_sta *mld_link_sta;
+ s8 avg_energy =
+ -(s8)le32_to_cpu(per_sta[i].average_energy);
+
+ if (IS_ERR_OR_NULL(link_sta) || !avg_energy)
+ continue;
+
+ mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+ if (WARN_ON(!mld_link_sta))
+ continue;
+
+ mld_link_sta->signal_avg = avg_energy;
+ }
+}
+
+static void iwl_mld_fill_chanctx_stats(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ void *data)
+{
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+ const struct iwl_stats_ntfy_per_phy *per_phy = data;
+ u32 new_load, old_load;
+
+ if (WARN_ON(phy->fw_id >= IWL_STATS_MAX_PHY_OPERATIONAL))
+ return;
+
+ phy->channel_load_by_us =
+ le32_to_cpu(per_phy[phy->fw_id].channel_load_by_us);
+
+ old_load = phy->avg_channel_load_not_by_us;
+ new_load = le32_to_cpu(per_phy[phy->fw_id].channel_load_not_by_us);
+ if (IWL_FW_CHECK(phy->mld, new_load > 100, "Invalid channel load %u\n",
+ new_load))
+ return;
+
+ /* give a weight of 0.5 for the old value */
+ phy->avg_channel_load_not_by_us = (new_load >> 1) + (old_load >> 1);
+
+ iwl_mld_emlsr_check_chan_load(hw, phy, old_load);
+}
+
+static void
+iwl_mld_process_per_phy_stats(struct iwl_mld *mld,
+ const struct iwl_stats_ntfy_per_phy *per_phy)
+{
+ ieee80211_iter_chan_contexts_mtx(mld->hw,
+ iwl_mld_fill_chanctx_stats,
+ (void *)(uintptr_t)per_phy);
+
+}
+
+void iwl_mld_handle_stats_oper_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_system_statistics_notif_oper *stats =
+ (void *)&pkt->data;
+ u32 curr_ts_usec = le32_to_cpu(stats->time_stamp);
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->per_sta) != IWL_STATION_COUNT_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(stats->per_link) <
+ ARRAY_SIZE(mld->fw_id_to_bss_conf));
+
+ iwl_mld_process_per_link_stats(mld, stats->per_link, curr_ts_usec);
+ iwl_mld_process_per_sta_stats(mld, stats->per_sta);
+ iwl_mld_process_per_phy_stats(mld, stats->per_phy);
+
+ iwl_mld_check_omi_bw_reduction(mld);
+}
+
+void iwl_mld_handle_stats_oper_part1_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ /* TODO */
+}
+
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/stats.h b/drivers/net/wireless/intel/iwlwifi/mld/stats.h
new file mode 100644
index 000000000000..de434e66d555
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/stats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_stats_h__
+#define __iwl_mld_stats_h__
+
+int iwl_mld_request_periodic_fw_stats(struct iwl_mld *mld, bool enable);
+
+void iwl_mld_mac80211_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo);
+
+void iwl_mld_handle_stats_oper_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+void iwl_mld_handle_stats_oper_part1_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+int iwl_mld_clear_stats_in_fw(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_stats_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile
new file mode 100644
index 000000000000..36317feb923b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+iwlmld-tests-y += module.o hcmd.o utils.o link.o rx.o agg.o link-selection.o
+
+ccflags-y += -I$(src)/../
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmld-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c
new file mode 100644
index 000000000000..1fd664be1a7c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <kunit/test.h>
+#include <kunit/static_stub.h>
+#include <kunit/skbuff.h>
+
+#include "utils.h"
+#include "mld.h"
+#include "sta.h"
+#include "agg.h"
+#include "rx.h"
+
+#define FC_QOS_DATA (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)
+#define BA_WINDOW_SIZE 64
+#define QUEUE 0
+
+static const struct reorder_buffer_case {
+ const char *desc;
+ struct {
+ /* ieee80211_hdr fields */
+ u16 fc;
+ u8 tid;
+ bool multicast;
+ /* iwl_rx_mpdu_desc fields */
+ u16 nssn;
+ /* used also for setting hdr::seq_ctrl */
+ u16 sn;
+ u8 baid;
+ bool amsdu;
+ bool last_subframe;
+ bool old_sn;
+ bool dup;
+ } rx_pkt;
+ struct {
+ bool valid;
+ u16 head_sn;
+ u8 baid;
+ u16 num_entries;
+ /* The test prepares the reorder buffer with fake skbs based
+ * on the sequence numbers provided in @entries array.
+ */
+ struct {
+ u16 sn;
+ /* Set add_subframes > 0 to simulate an A-MSDU by
+ * queueing additional @add_subframes skbs in the
+ * appropriate reorder buffer entry (based on the @sn)
+ */
+ u8 add_subframes;
+ } entries[BA_WINDOW_SIZE];
+ } reorder_buf_state;
+ struct {
+ enum iwl_mld_reorder_result reorder_res;
+ u16 head_sn;
+ u16 num_stored;
+ u16 skb_release_order[BA_WINDOW_SIZE];
+ u16 skb_release_order_count;
+ } expected;
+} reorder_buffer_cases[] = {
+ {
+ .desc = "RX packet with invalid BAID",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .baid = IWL_RX_REORDER_DATA_INVALID_BAID,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ },
+ .expected = {
+ /* Invalid BAID should not be buffered. The frame is
+ * passed to the network stack immediately.
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ .num_stored = 0,
+ },
+ },
+ {
+ .desc = "RX Multicast packet",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .multicast = true,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ },
+ .expected = {
+ /* Multicast packets are not buffered. The packet is
+ * passed to the network stack immediately.
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ .num_stored = 0,
+ },
+ },
+ {
+ .desc = "RX non-QoS data",
+ .rx_pkt = {
+ .fc = IEEE80211_FTYPE_DATA,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ },
+ .expected = {
+ /* non-QoS data frames do not require reordering.
+ * The packet is passed to the network stack
+ * immediately.
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ },
+ },
+ {
+ .desc = "RX old SN packet, reorder buffer is not yet valid",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .old_sn = true,
+ },
+ .reorder_buf_state = {
+ .valid = false,
+ },
+ .expected = {
+ /* The buffer is invalid and the RX packet has an old
+ * SN. The packet is passed to the network stack
+ * immediately.
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ },
+ },
+ {
+ .desc = "RX old SN packet, reorder buffer valid",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .old_sn = true,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ },
+ .expected = {
+ /* The buffer is valid and the RX packet has an old SN.
+ * The packet should be dropped.
+ */
+ .reorder_res = IWL_MLD_DROP_SKB,
+ .num_stored = 0,
+ .head_sn = 100,
+ },
+ },
+ {
+ .desc = "RX duplicate packet",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .dup = true,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ },
+ .expected = {
+ /* Duplicate packets should be dropped */
+ .reorder_res = IWL_MLD_DROP_SKB,
+ .num_stored = 0,
+ .head_sn = 100,
+ },
+ },
+ {
+ .desc = "RX In-order packet, sn < nssn",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 100,
+ .nssn = 101,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ },
+ .expected = {
+ /* 1. Reorder buffer is empty.
+ * 2. RX packet SN is in order and less than NSSN.
+ * Packet is released to the network stack immediately
+ * and buffer->head_sn is updated to NSSN.
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ .num_stored = 0,
+ .head_sn = 101,
+ },
+ },
+ {
+ .desc = "RX In-order packet, sn == head_sn",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 101,
+ .nssn = 100,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 101,
+ },
+ .expected = {
+ /* 1. Reorder buffer is empty.
+ * 2. RX packet SN is equal to buffer->head_sn.
+ * Packet is released to the network stack immediately
+ * and buffer->head_sn is incremented.
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ .num_stored = 0,
+ .head_sn = 102,
+ },
+ },
+ {
+ .desc = "RX In-order packet, IEEE80211_MAX_SN wrap around",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = IEEE80211_MAX_SN,
+ .nssn = IEEE80211_MAX_SN - 1,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = IEEE80211_MAX_SN,
+ },
+ .expected = {
+ /* 1. Reorder buffer is empty.
+ * 2. RX SN == buffer->head_sn == IEEE80211_MAX_SN
+ * Packet is released to the network stack immediately
+ * and buffer->head_sn is incremented correctly (wraps
+ * around to 0).
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ .num_stored = 0,
+ .head_sn = 0,
+ },
+ },
+ {
+ .desc = "RX Out-of-order packet, pending packet in buffer",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 100,
+ .nssn = 102,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ .num_entries = 1,
+ .entries[0].sn = 101,
+ },
+ .expected = {
+ /* 1. Reorder buffer contains one packet with SN=101.
+ * 2. RX packet SN = buffer->head_sn.
+ * Both packets are released (in order) to the network
+ * stack as there are no gaps.
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 0,
+ .head_sn = 102,
+ .skb_release_order = {100, 101},
+ .skb_release_order_count = 2,
+ },
+ },
+ {
+ .desc = "RX Out-of-order packet, pending packet in buffer (wrap around)",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 0,
+ .nssn = 1,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = IEEE80211_MAX_SN - 1,
+ .num_entries = 1,
+ .entries[0].sn = IEEE80211_MAX_SN,
+ },
+ .expected = {
+ /* 1. Reorder buffer contains one packet with
+ * SN=IEEE80211_MAX_SN.
+ * 2. RX Packet SN = 0 (after wrap around)
+ * Both packets are released (in order) to the network
+ * stack as there are no gaps.
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 0,
+ .head_sn = 1,
+ .skb_release_order = { 4095, 0 },
+ .skb_release_order_count = 2,
+ },
+ },
+ {
+ .desc = "RX Out-of-order packet, filling 1/2 holes in buffer, release RX packet",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 100,
+ .nssn = 101,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ .num_entries = 1,
+ .entries[0].sn = 102,
+ },
+ .expected = {
+ /* 1. Reorder buffer contains one packet with SN=102.
+ * 2. There are 2 holes at SN={100, 101}.
+ * Only the RX packet (SN=100) is released, there is
+ * still a hole at 101.
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 1,
+ .head_sn = 101,
+ .skb_release_order = {100},
+ .skb_release_order_count = 1,
+ },
+ },
+ {
+ .desc = "RX Out-of-order packet, filling 1/2 holes, release 2 packets",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 102,
+ .nssn = 103,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ .num_entries = 3,
+ .entries[0].sn = 101,
+ .entries[1].sn = 104,
+ .entries[2].sn = 105,
+ },
+ .expected = {
+ /* 1. Reorder buffer contains three packets.
+ * 2. RX packet fills one of two holes (at SN=102).
+ * Two packets are released (until the next hole at
+ * SN=103).
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 2,
+ .head_sn = 103,
+ .skb_release_order = {101, 102},
+ .skb_release_order_count = 2,
+ },
+ },
+ {
+ .desc = "RX Out-of-order packet, filling 1/1 holes, no packets released",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 102,
+ .nssn = 100,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ .num_entries = 3,
+ .entries[0].sn = 101,
+ .entries[1].sn = 103,
+ .entries[2].sn = 104,
+ },
+ .expected = {
+ /* 1. Reorder buffer contains three packets:
+ * SN={101, 103, 104}.
+ * 2. RX packet fills a hole (SN=102), but NSSN is
+ * smaller than buffered frames.
+ * No packets can be released yet and buffer->head_sn
+ * is not updated.
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 4,
+ .head_sn = 100,
+ },
+ },
+ {
+ .desc = "RX In-order A-MSDU, last subframe",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 100,
+ .nssn = 101,
+ .amsdu = true,
+ .last_subframe = true,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ .num_entries = 1,
+ .entries[0] = {
+ .sn = 100,
+ .add_subframes = 1,
+ },
+ },
+ .expected = {
+ /* 1. Reorder buffer contains a 2-sub frames A-MSDU
+ * at SN=100.
+ * 2. RX packet is the last SN=100 A-MSDU subframe
+ * All packets are released in order (3 x SN=100).
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 0,
+ .head_sn = 101,
+ .skb_release_order = {100, 100, 100},
+ .skb_release_order_count = 3,
+ },
+ },
+ {
+ .desc = "RX In-order A-MSDU, not the last subframe",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 100,
+ .nssn = 101,
+ .amsdu = true,
+ .last_subframe = false,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ .num_entries = 1,
+ .entries[0] = {
+ .sn = 100,
+ .add_subframes = 1,
+ },
+ },
+ .expected = {
+ /* 1. Reorder buffer contains a 2-sub frames A-MSDU
+ * at SN=100.
+ * 2. RX packet additional SN=100 A-MSDU subframe,
+ * but not the last one
+ * No packets are released and head_sn is not updated.
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 3,
+ .head_sn = 100,
+ },
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(test_reorder_buffer, reorder_buffer_cases, desc);
+
+static struct sk_buff_head g_released_skbs;
+static u16 g_num_released_skbs;
+
+/* Add released skbs from reorder buffer to a global list; This allows us
+ * to verify the correct release order of packets after they pass through the
+ * simulated reorder logic.
+ */
+static void
+fake_iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct sk_buff *skb, int queue,
+ struct ieee80211_sta *sta)
+{
+ __skb_queue_tail(&g_released_skbs, skb);
+ g_num_released_skbs++;
+}
+
+static u32
+fake_iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_link_sta *mld_link_sta;
+ u8 link_id;
+ u32 sta_mask = 0;
+
+ /* This is the expectation in the real function */
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* We can't use for_each_sta_active_link */
+ for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
+ sta_mask |= BIT(mld_link_sta->fw_id);
+ return sta_mask;
+}
+
+static struct iwl_rx_mpdu_desc *setup_mpdu_desc(void)
+{
+ struct kunit *test = kunit_get_current_test();
+ const struct reorder_buffer_case *param =
+ (const void *)(test->param_value);
+ struct iwl_rx_mpdu_desc *mpdu_desc;
+
+ KUNIT_ALLOC_AND_ASSERT(test, mpdu_desc);
+
+ mpdu_desc->reorder_data |=
+ cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_BAID_MASK,
+ param->rx_pkt.baid));
+ mpdu_desc->reorder_data |=
+ cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_SN_MASK,
+ param->rx_pkt.sn));
+ mpdu_desc->reorder_data |=
+ cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_NSSN_MASK,
+ param->rx_pkt.nssn));
+ if (param->rx_pkt.old_sn)
+ mpdu_desc->reorder_data |=
+ cpu_to_le32(IWL_RX_MPDU_REORDER_BA_OLD_SN);
+
+ if (param->rx_pkt.dup)
+ mpdu_desc->status |= cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE);
+
+ if (param->rx_pkt.amsdu) {
+ mpdu_desc->mac_flags2 |= IWL_RX_MPDU_MFLG2_AMSDU;
+ if (param->rx_pkt.last_subframe)
+ mpdu_desc->amsdu_info |=
+ IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
+ }
+
+ return mpdu_desc;
+}
+
+static struct sk_buff *alloc_and_setup_skb(u16 fc, u16 seq_ctrl, u8 tid,
+ bool mcast)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct ieee80211_hdr hdr = {
+ .frame_control = cpu_to_le16(fc),
+ .seq_ctrl = cpu_to_le16(seq_ctrl),
+ };
+ struct sk_buff *skb;
+
+ skb = kunit_zalloc_skb(test, 128, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, skb);
+
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(&hdr);
+
+ qc[0] = tid & IEEE80211_QOS_CTL_TID_MASK;
+ }
+
+ if (mcast)
+ hdr.addr1[0] = 0x1;
+
+ skb_set_mac_header(skb, skb->len);
+ skb_put_data(skb, &hdr, ieee80211_hdrlen(hdr.frame_control));
+
+ return skb;
+}
+
+static struct iwl_mld_reorder_buffer *
+setup_reorder_buffer(struct iwl_mld_baid_data *baid_data)
+{
+ struct kunit *test = kunit_get_current_test();
+ const struct reorder_buffer_case *param =
+ (const void *)(test->param_value);
+ struct iwl_mld_reorder_buffer *buffer = baid_data->reorder_buf;
+ struct iwl_mld_reorder_buf_entry *entries = baid_data->entries;
+ struct sk_buff *fake_skb;
+
+ buffer->valid = param->reorder_buf_state.valid;
+ buffer->head_sn = param->reorder_buf_state.head_sn;
+ buffer->queue = QUEUE;
+
+ for (int i = 0; i < baid_data->buf_size; i++)
+ __skb_queue_head_init(&entries[i].frames);
+
+ for (int i = 0; i < param->reorder_buf_state.num_entries; i++) {
+ u16 sn = param->reorder_buf_state.entries[i].sn;
+ int index = sn % baid_data->buf_size;
+ u8 add_subframes =
+ param->reorder_buf_state.entries[i].add_subframes;
+ /* create 1 skb per entry + additional skbs per num of
+ * requested subframes
+ */
+ u8 num_skbs = 1 + add_subframes;
+
+ for (int j = 0; j < num_skbs; j++) {
+ fake_skb = alloc_and_setup_skb(FC_QOS_DATA, sn, 0,
+ false);
+ __skb_queue_tail(&entries[index].frames, fake_skb);
+ buffer->num_stored++;
+ }
+ }
+
+ return buffer;
+}
+
+static struct iwl_mld_reorder_buffer *setup_ba_data(struct ieee80211_sta *sta)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ const struct reorder_buffer_case *param =
+ (const void *)(test->param_value);
+ struct iwl_mld_baid_data *baid_data = NULL;
+ struct iwl_mld_reorder_buffer *buffer;
+ u32 reorder_buf_size = BA_WINDOW_SIZE * sizeof(baid_data->entries[0]);
+ u8 baid = param->reorder_buf_state.baid;
+
+ /* Assuming only 1 RXQ */
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, baid_data,
+ sizeof(*baid_data) + reorder_buf_size);
+
+ baid_data->baid = baid;
+ baid_data->tid = param->rx_pkt.tid;
+ baid_data->buf_size = BA_WINDOW_SIZE;
+
+ wiphy_lock(mld->wiphy);
+ baid_data->sta_mask = iwl_mld_fw_sta_id_mask(mld, sta);
+ wiphy_unlock(mld->wiphy);
+
+ baid_data->entries_per_queue = BA_WINDOW_SIZE;
+
+ buffer = setup_reorder_buffer(baid_data);
+
+ KUNIT_EXPECT_NULL(test, rcu_access_pointer(mld->fw_id_to_ba[baid]));
+ rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data);
+
+ return buffer;
+}
+
+static void test_reorder_buffer(struct kunit *test)
+{
+ struct iwl_mld *mld = test->priv;
+ const struct reorder_buffer_case *param =
+ (const void *)(test->param_value);
+ struct iwl_rx_mpdu_desc *mpdu_desc;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ struct sk_buff *skb;
+ struct iwl_mld_reorder_buffer *buffer;
+ enum iwl_mld_reorder_result reorder_res;
+ u16 skb_release_order_count = param->expected.skb_release_order_count;
+ u16 skb_idx = 0;
+
+ /* Init globals and activate stubs */
+ __skb_queue_head_init(&g_released_skbs);
+ g_num_released_skbs = 0;
+ kunit_activate_static_stub(test, iwl_mld_fw_sta_id_mask,
+ fake_iwl_mld_fw_sta_id_mask);
+ kunit_activate_static_stub(test, iwl_mld_pass_packet_to_mac80211,
+ fake_iwl_mld_pass_packet_to_mac80211);
+
+ vif = iwlmld_kunit_add_vif(false, NL80211_IFTYPE_STATION);
+ sta = iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, -1);
+
+ /* Prepare skb, mpdu_desc, BA data and the reorder buffer */
+ skb = alloc_and_setup_skb(param->rx_pkt.fc, param->rx_pkt.sn,
+ param->rx_pkt.tid, param->rx_pkt.multicast);
+ buffer = setup_ba_data(sta);
+ mpdu_desc = setup_mpdu_desc();
+
+ rcu_read_lock();
+ reorder_res = iwl_mld_reorder(mld, NULL, QUEUE, sta, skb, mpdu_desc);
+ rcu_read_unlock();
+
+ KUNIT_ASSERT_EQ(test, reorder_res, param->expected.reorder_res);
+ KUNIT_ASSERT_EQ(test, buffer->num_stored, param->expected.num_stored);
+ KUNIT_ASSERT_EQ(test, buffer->head_sn, param->expected.head_sn);
+
+ /* Verify skbs release order */
+ KUNIT_ASSERT_EQ(test, skb_release_order_count, g_num_released_skbs);
+ while ((skb = __skb_dequeue(&g_released_skbs))) {
+ struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
+
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(hdr->seq_ctrl),
+ param->expected.skb_release_order[skb_idx]);
+ skb_idx++;
+ }
+ KUNIT_ASSERT_EQ(test, skb_idx, skb_release_order_count);
+}
+
+static struct kunit_case reorder_buffer_test_cases[] = {
+ KUNIT_CASE_PARAM(test_reorder_buffer, test_reorder_buffer_gen_params),
+ {},
+};
+
+static struct kunit_suite reorder_buffer = {
+ .name = "iwlmld-reorder-buffer",
+ .test_cases = reorder_buffer_test_cases,
+ .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(reorder_buffer);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c
new file mode 100644
index 000000000000..4e189bf8b3fb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <kunit/test.h>
+
+#include <iwl-trans.h>
+#include "mld.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static void test_hcmd_names_sorted(struct kunit *test)
+{
+ int i;
+
+ for (i = 0; i < global_iwl_mld_goups_size; i++) {
+ const struct iwl_hcmd_arr *arr = &iwl_mld_groups[i];
+ int j;
+
+ if (!arr->arr)
+ continue;
+ for (j = 0; j < arr->size - 1; j++)
+ KUNIT_EXPECT_LE(test, arr->arr[j].cmd_id,
+ arr->arr[j + 1].cmd_id);
+ }
+}
+
+static void test_hcmd_names_for_rx(struct kunit *test)
+{
+ static struct iwl_trans t = {
+ .command_groups = iwl_mld_groups,
+ };
+
+ t.command_groups_size = global_iwl_mld_goups_size;
+
+ for (unsigned int i = 0; i < iwl_mld_rx_handlers_num; i++) {
+ const struct iwl_rx_handler *rxh;
+ const char *name;
+
+ rxh = &iwl_mld_rx_handlers[i];
+
+ name = iwl_get_cmd_string(&t, rxh->cmd_id);
+ KUNIT_EXPECT_NOT_NULL(test, name);
+ KUNIT_EXPECT_NE_MSG(test, strcmp(name, "UNKNOWN"), 0,
+ "ID 0x%04x is UNKNOWN", rxh->cmd_id);
+ }
+}
+
+static struct kunit_case hcmd_names_cases[] = {
+ KUNIT_CASE(test_hcmd_names_sorted),
+ KUNIT_CASE(test_hcmd_names_for_rx),
+ {},
+};
+
+static struct kunit_suite hcmd_names = {
+ .name = "iwlmld-hcmd-names",
+ .test_cases = hcmd_names_cases,
+};
+
+kunit_test_suite(hcmd_names);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c
new file mode 100644
index 000000000000..295dcfd3f85d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for link selection functions
+ *
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <kunit/static_stub.h>
+
+#include "utils.h"
+#include "mld.h"
+#include "link.h"
+#include "iface.h"
+#include "phy.h"
+#include "mlo.h"
+
+static const struct link_grading_test_case {
+ const char *desc;
+ struct {
+ struct {
+ u8 link_id;
+ const struct cfg80211_chan_def *chandef;
+ bool active;
+ s32 signal;
+ bool has_chan_util_elem;
+ u8 chan_util; /* 0-255 , used only if has_chan_util_elem is true */
+ u8 chan_load_by_us; /* 0-100, used only if active is true */;
+ } link;
+ } input;
+ unsigned int expected_grade;
+} link_grading_cases[] = {
+ {
+ .desc = "channel util of 128 (50%)",
+ .input.link = {
+ .link_id = 0,
+ .chandef = &chandef_2ghz,
+ .active = false,
+ .has_chan_util_elem = true,
+ .chan_util = 128,
+ },
+ .expected_grade = 86,
+ },
+ {
+ .desc = "channel util of 180 (70%)",
+ .input.link = {
+ .link_id = 0,
+ .chandef = &chandef_2ghz,
+ .active = false,
+ .has_chan_util_elem = true,
+ .chan_util = 180,
+ },
+ .expected_grade = 51,
+ },
+ {
+ .desc = "channel util of 180 (70%), channel load by us of 10%",
+ .input.link = {
+ .link_id = 0,
+ .chandef = &chandef_2ghz,
+ .has_chan_util_elem = true,
+ .chan_util = 180,
+ .active = true,
+ .chan_load_by_us = 10,
+ },
+ .expected_grade = 67,
+ },
+ {
+ .desc = "no channel util element",
+ .input.link = {
+ .link_id = 0,
+ .chandef = &chandef_2ghz,
+ .active = true,
+ },
+ .expected_grade = 120,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(link_grading, link_grading_cases, desc);
+
+static void setup_link(struct ieee80211_bss_conf *link)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ const struct link_grading_test_case *test_param =
+ (const void *)(test->param_value);
+
+ KUNIT_ALLOC_AND_ASSERT(test, link->bss);
+
+ link->bss->signal = DBM_TO_MBM(test_param->input.link.signal);
+
+ link->chanreq.oper = *test_param->input.link.chandef;
+
+ if (test_param->input.link.has_chan_util_elem) {
+ struct cfg80211_bss_ies *ies;
+ struct ieee80211_bss_load_elem bss_load = {
+ .channel_util = test_param->input.link.chan_util,
+ };
+ struct element *elem =
+ iwlmld_kunit_gen_element(WLAN_EID_QBSS_LOAD,
+ &bss_load,
+ sizeof(bss_load));
+ unsigned int elem_len = sizeof(*elem) + sizeof(bss_load);
+
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, ies, sizeof(*ies) + elem_len);
+ memcpy(ies->data, elem, elem_len);
+ ies->len = elem_len;
+ rcu_assign_pointer(link->bss->beacon_ies, ies);
+ rcu_assign_pointer(link->bss->ies, ies);
+ }
+
+ if (test_param->input.link.active) {
+ struct ieee80211_chanctx_conf *chan_ctx =
+ wiphy_dereference(mld->wiphy, link->chanctx_conf);
+ struct iwl_mld_phy *phy;
+
+ KUNIT_ASSERT_NOT_NULL(test, chan_ctx);
+
+ phy = iwl_mld_phy_from_mac80211(chan_ctx);
+
+ phy->channel_load_by_us = test_param->input.link.chan_load_by_us;
+ }
+}
+
+static void test_link_grading(struct kunit *test)
+{
+ struct iwl_mld *mld = test->priv;
+ const struct link_grading_test_case *test_param =
+ (const void *)(test->param_value);
+ struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *link;
+ unsigned int actual_grade;
+ /* Extract test case parameters */
+ u8 link_id = test_param->input.link.link_id;
+ bool active = test_param->input.link.active;
+ u16 valid_links;
+ struct iwl_mld_kunit_link assoc_link = {
+ .band = test_param->input.link.chandef->chan->band,
+ };
+
+ /* If the link is not active, use a different link as the assoc link */
+ if (active) {
+ assoc_link.id = link_id;
+ valid_links = BIT(link_id);
+ } else {
+ assoc_link.id = BIT(ffz(BIT(link_id)));
+ valid_links = BIT(assoc_link.id) | BIT(link_id);
+ }
+
+ vif = iwlmld_kunit_setup_mlo_assoc(valid_links, &assoc_link);
+
+ wiphy_lock(mld->wiphy);
+ link = wiphy_dereference(mld->wiphy, vif->link_conf[link_id]);
+ KUNIT_ASSERT_NOT_NULL(test, link);
+
+ setup_link(link);
+
+ actual_grade = iwl_mld_get_link_grade(mld, link);
+ wiphy_unlock(mld->wiphy);
+
+ /* Assert that the returned grade matches the expected grade */
+ KUNIT_EXPECT_EQ(test, actual_grade, test_param->expected_grade);
+}
+
+static struct kunit_case link_selection_cases[] = {
+ KUNIT_CASE_PARAM(test_link_grading, link_grading_gen_params),
+ {},
+};
+
+static struct kunit_suite link_selection = {
+ .name = "iwlmld-link-selection-tests",
+ .test_cases = link_selection_cases,
+ .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(link_selection);
+
+static const struct channel_load_case {
+ const char *desc;
+ bool low_latency_vif;
+ u32 chan_load_not_by_us;
+ enum nl80211_chan_width bw_a;
+ enum nl80211_chan_width bw_b;
+ bool primary_link_active;
+ bool expected_result;
+} channel_load_cases[] = {
+ {
+ .desc = "Unequal bandwidth, primary link inactive, EMLSR not allowed",
+ .low_latency_vif = false,
+ .primary_link_active = false,
+ .bw_a = NL80211_CHAN_WIDTH_40,
+ .bw_b = NL80211_CHAN_WIDTH_20,
+ .expected_result = false,
+ },
+ {
+ .desc = "Equal bandwidths, sufficient channel load, EMLSR allowed",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 11,
+ .bw_a = NL80211_CHAN_WIDTH_40,
+ .bw_b = NL80211_CHAN_WIDTH_40,
+ .expected_result = true,
+ },
+ {
+ .desc = "Equal bandwidths, insufficient channel load, EMLSR not allowed",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 6,
+ .bw_a = NL80211_CHAN_WIDTH_80,
+ .bw_b = NL80211_CHAN_WIDTH_80,
+ .expected_result = false,
+ },
+ {
+ .desc = "Low latency VIF, sufficient channel load, EMLSR allowed",
+ .low_latency_vif = true,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 6,
+ .bw_a = NL80211_CHAN_WIDTH_160,
+ .bw_b = NL80211_CHAN_WIDTH_160,
+ .expected_result = true,
+ },
+ {
+ .desc = "Different bandwidths (2x ratio), primary link load permits EMLSR",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 30,
+ .bw_a = NL80211_CHAN_WIDTH_40,
+ .bw_b = NL80211_CHAN_WIDTH_20,
+ .expected_result = true,
+ },
+ {
+ .desc = "Different bandwidths (4x ratio), primary link load permits EMLSR",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 45,
+ .bw_a = NL80211_CHAN_WIDTH_80,
+ .bw_b = NL80211_CHAN_WIDTH_20,
+ .expected_result = true,
+ },
+ {
+ .desc = "Different bandwidths (16x ratio), primary link load insufficient",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 45,
+ .bw_a = NL80211_CHAN_WIDTH_320,
+ .bw_b = NL80211_CHAN_WIDTH_20,
+ .expected_result = false,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(channel_load, channel_load_cases, desc);
+
+static void test_iwl_mld_channel_load_allows_emlsr(struct kunit *test)
+{
+ const struct channel_load_case *params = test->param_value;
+ struct iwl_mld *mld = test->priv;
+ struct ieee80211_vif *vif;
+ struct cfg80211_chan_def chandef_a, chandef_b;
+ struct iwl_mld_link_sel_data a = {.chandef = &chandef_a,
+ .link_id = 4};
+ struct iwl_mld_link_sel_data b = {.chandef = &chandef_b,
+ .link_id = 5};
+ struct iwl_mld_kunit_link assoc_link = {
+ .id = params->primary_link_active ? a.link_id : b.link_id,
+ .bandwidth = params->primary_link_active ? params->bw_a : params->bw_b,
+ };
+ bool result;
+
+ vif = iwlmld_kunit_setup_mlo_assoc(BIT(a.link_id) | BIT(b.link_id),
+ &assoc_link);
+
+ chandef_a.width = params->bw_a;
+ chandef_b.width = params->bw_b;
+
+ if (params->low_latency_vif)
+ iwl_mld_vif_from_mac80211(vif)->low_latency_causes = 1;
+
+ wiphy_lock(mld->wiphy);
+
+ /* Simulate channel load */
+ if (params->primary_link_active) {
+ struct iwl_mld_phy *phy =
+ iwlmld_kunit_get_phy_of_link(vif, a.link_id);
+
+ phy->avg_channel_load_not_by_us = params->chan_load_not_by_us;
+ }
+
+ result = iwl_mld_channel_load_allows_emlsr(mld, vif, &a, &b);
+
+ wiphy_unlock(mld->wiphy);
+
+ KUNIT_EXPECT_EQ(test, result, params->expected_result);
+}
+
+static struct kunit_case channel_load_criteria_test_cases[] = {
+ KUNIT_CASE_PARAM(test_iwl_mld_channel_load_allows_emlsr, channel_load_gen_params),
+ {}
+};
+
+static struct kunit_suite channel_load_criteria_tests = {
+ .name = "iwlmld_channel_load_allows_emlsr",
+ .test_cases = channel_load_criteria_test_cases,
+ .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(channel_load_criteria_tests);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c
new file mode 100644
index 000000000000..4a4eaa134bd3
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <kunit/static_stub.h>
+
+#include "utils.h"
+#include "mld.h"
+#include "link.h"
+#include "iface.h"
+#include "fw/api/mac-cfg.h"
+
+static const struct missed_beacon_test_case {
+ const char *desc;
+ struct {
+ struct iwl_missed_beacons_notif notif;
+ bool emlsr;
+ } input;
+ struct {
+ bool disconnected;
+ bool emlsr;
+ } output;
+} missed_beacon_cases[] = {
+ {
+ .desc = "no EMLSR, no disconnect",
+ .input.notif = {
+ .consec_missed_beacons = cpu_to_le32(4),
+ },
+ },
+ {
+ .desc = "no EMLSR, no beacon loss since Rx, no disconnect",
+ .input.notif = {
+ .consec_missed_beacons = cpu_to_le32(20),
+ },
+ },
+ {
+ .desc = "no EMLSR, beacon loss since Rx, disconnect",
+ .input.notif = {
+ .consec_missed_beacons = cpu_to_le32(20),
+ .consec_missed_beacons_since_last_rx =
+ cpu_to_le32(10),
+ },
+ .output.disconnected = true,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(test_missed_beacon, missed_beacon_cases, desc);
+
+static void fake_ieee80211_connection_loss(struct ieee80211_vif *vif)
+{
+ vif->cfg.assoc = false;
+}
+
+static void test_missed_beacon(struct kunit *test)
+{
+ struct iwl_mld *mld = test->priv;
+ struct iwl_missed_beacons_notif *notif;
+ const struct missed_beacon_test_case *test_param =
+ (const void *)(test->param_value);
+ struct ieee80211_vif *vif;
+ struct iwl_rx_packet *pkt;
+ struct iwl_mld_kunit_link link1 = {
+ .id = 0,
+ .band = NL80211_BAND_6GHZ,
+ };
+ struct iwl_mld_kunit_link link2 = {
+ .id = 1,
+ .band = NL80211_BAND_5GHZ,
+ };
+
+ kunit_activate_static_stub(test, ieee80211_connection_loss,
+ fake_ieee80211_connection_loss);
+ pkt = iwl_mld_kunit_create_pkt(test_param->input.notif);
+ notif = (void *)pkt->data;
+
+ if (test_param->input.emlsr) {
+ vif = iwlmld_kunit_assoc_emlsr(&link1, &link2);
+ } else {
+ struct iwl_mld_vif *mld_vif;
+
+ vif = iwlmld_kunit_setup_non_mlo_assoc(&link1);
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ notif->link_id = cpu_to_le32(mld_vif->deflink.fw_id);
+ }
+
+ wiphy_lock(mld->wiphy);
+
+ iwl_mld_handle_missed_beacon_notif(mld, pkt);
+
+ wiphy_unlock(mld->wiphy);
+
+ KUNIT_ASSERT_NE(test, vif->cfg.assoc, test_param->output.disconnected);
+
+ /* TODO: add test cases for esr and check */
+}
+
+static struct kunit_case link_cases[] = {
+ KUNIT_CASE_PARAM(test_missed_beacon, test_missed_beacon_gen_params),
+ {},
+};
+
+static struct kunit_suite link = {
+ .name = "iwlmld-link",
+ .test_cases = link_cases,
+ .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(link);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/module.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/module.c
new file mode 100644
index 000000000000..5d9818587b23
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/module.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * This is just module boilerplate for the iwlmld kunit module.
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_IMPORT_NS("IWLWIFI");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("kunit tests for iwlmld");
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c
new file mode 100644
index 000000000000..20cb4e03ab41
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <kunit/test.h>
+#include "utils.h"
+#include "iwl-trans.h"
+#include "mld.h"
+#include "sta.h"
+
+static const struct is_dup_case {
+ const char *desc;
+ struct {
+ /* ieee80211_hdr fields */
+ __le16 fc;
+ __le16 seq;
+ u8 tid;
+ bool multicast;
+ /* iwl_rx_mpdu_desc fields */
+ bool is_amsdu;
+ u8 sub_frame_idx;
+ } rx_pkt;
+ struct {
+ __le16 last_seq;
+ u8 last_sub_frame_idx;
+ u8 tid;
+ } dup_data_state;
+ struct {
+ bool is_dup;
+ u32 rx_status_flag;
+ } result;
+} is_dup_cases[] = {
+ {
+ .desc = "Control frame",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_CTL),
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = 0,
+ }
+ },
+ {
+ .desc = "Null func frame",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC),
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = 0,
+ }
+ },
+ {
+ .desc = "Multicast data",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA),
+ .multicast = true,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = 0,
+ }
+ },
+ {
+ .desc = "QoS null func frame",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC),
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = 0,
+ }
+ },
+ {
+ .desc = "QoS data new sequence",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA),
+ .seq = __cpu_to_le16(0x101),
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "QoS data same sequence, no retry",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA),
+ .seq = __cpu_to_le16(0x100),
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "QoS data same sequence, has retry",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA |
+ IEEE80211_FCTL_RETRY),
+ .seq = __cpu_to_le16(0x100),
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = true,
+ .rx_status_flag = 0,
+ },
+ },
+ {
+ .desc = "QoS data invalid tid",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA),
+ .seq = __cpu_to_le16(0x100),
+ .tid = IWL_MAX_TID_COUNT + 1,
+ },
+ .result = {
+ .is_dup = true,
+ .rx_status_flag = 0,
+ },
+ },
+ {
+ .desc = "non-QoS data, same sequence, same tid, no retry",
+ .rx_pkt = {
+ /* Driver will use tid = IWL_MAX_TID_COUNT */
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA),
+ .seq = __cpu_to_le16(0x100),
+ },
+ .dup_data_state = {
+ .tid = IWL_MAX_TID_COUNT,
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "non-QoS data, same sequence, same tid, has retry",
+ .rx_pkt = {
+ /* Driver will use tid = IWL_MAX_TID_COUNT */
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_FCTL_RETRY),
+ .seq = __cpu_to_le16(0x100),
+ },
+ .dup_data_state = {
+ .tid = IWL_MAX_TID_COUNT,
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = true,
+ .rx_status_flag = 0,
+ },
+ },
+ {
+ .desc = "non-QoS data, same sequence on different tid's",
+ .rx_pkt = {
+ /* Driver will use tid = IWL_MAX_TID_COUNT */
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA),
+ .seq = __cpu_to_le16(0x100),
+ },
+ .dup_data_state = {
+ .tid = 7,
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "A-MSDU new subframe, allow same PN",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA),
+ .seq = __cpu_to_le16(0x100),
+ .is_amsdu = true,
+ .sub_frame_idx = 1,
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_ALLOW_SAME_PN |
+ RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "A-MSDU subframe with smaller idx, disallow same PN",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA),
+ .seq = __cpu_to_le16(0x100),
+ .is_amsdu = true,
+ .sub_frame_idx = 1,
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 2,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "A-MSDU same subframe, no retry, disallow same PN",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA),
+ .seq = __cpu_to_le16(0x100),
+ .is_amsdu = true,
+ .sub_frame_idx = 0,
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "A-MSDU same subframe, has retry",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA |
+ IEEE80211_FCTL_RETRY),
+ .seq = __cpu_to_le16(0x100),
+ .is_amsdu = true,
+ .sub_frame_idx = 0,
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = true,
+ .rx_status_flag = 0,
+ },
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(test_is_dup, is_dup_cases, desc);
+
+static void
+setup_dup_data_state(struct ieee80211_sta *sta)
+{
+ struct kunit *test = kunit_get_current_test();
+ const struct is_dup_case *param = (const void *)(test->param_value);
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ u8 tid = param->dup_data_state.tid;
+ struct iwl_mld_rxq_dup_data *dup_data;
+
+ /* Allocate dup_data only for 1 queue */
+ KUNIT_ALLOC_AND_ASSERT(test, dup_data);
+
+ /* Initialize dup data, see iwl_mld_alloc_dup_data */
+ memset(dup_data->last_seq, 0xff, sizeof(dup_data->last_seq));
+
+ dup_data->last_seq[tid] = param->dup_data_state.last_seq;
+ dup_data->last_sub_frame_idx[tid] =
+ param->dup_data_state.last_sub_frame_idx;
+
+ mld_sta->dup_data = dup_data;
+}
+
+static void setup_rx_pkt(const struct is_dup_case *param,
+ struct ieee80211_hdr *hdr,
+ struct iwl_rx_mpdu_desc *mpdu_desc)
+{
+ u8 tid = param->rx_pkt.tid;
+
+ /* Set "new rx packet" header */
+ hdr->frame_control = param->rx_pkt.fc;
+ hdr->seq_ctrl = param->rx_pkt.seq;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+ qc[0] = tid & IEEE80211_QOS_CTL_TID_MASK;
+ }
+
+ if (param->rx_pkt.multicast)
+ hdr->addr1[0] = 0x1;
+
+ /* Set mpdu_desc */
+ mpdu_desc->amsdu_info = param->rx_pkt.sub_frame_idx &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+ if (param->rx_pkt.is_amsdu)
+ mpdu_desc->mac_flags2 |= IWL_RX_MPDU_MFLG2_AMSDU;
+}
+
+static void test_is_dup(struct kunit *test)
+{
+ const struct is_dup_case *param = (const void *)(test->param_value);
+ struct iwl_mld *mld = test->priv;
+ struct iwl_rx_mpdu_desc mpdu_desc = { };
+ struct ieee80211_rx_status rx_status = { };
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr hdr;
+
+ vif = iwlmld_kunit_add_vif(false, NL80211_IFTYPE_STATION);
+ sta = iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, -1);
+
+ /* Prepare test case state */
+ setup_dup_data_state(sta);
+ setup_rx_pkt(param, &hdr, &mpdu_desc);
+
+ KUNIT_EXPECT_EQ(test,
+ iwl_mld_is_dup(mld, sta, &hdr, &mpdu_desc, &rx_status,
+ 0), /* assuming only 1 queue */
+ param->result.is_dup);
+ KUNIT_EXPECT_EQ(test, rx_status.flag, param->result.rx_status_flag);
+}
+
+static struct kunit_case is_dup_test_cases[] = {
+ KUNIT_CASE_PARAM(test_is_dup, test_is_dup_gen_params),
+ {},
+};
+
+static struct kunit_suite is_dup = {
+ .name = "iwlmld-rx-is-dup",
+ .test_cases = is_dup_test_cases,
+ .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(is_dup);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c
new file mode 100644
index 000000000000..9712ee696509
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include "utils.h"
+
+#include <linux/device.h>
+
+#include "fw/api/scan.h"
+#include "fw/api/mac-cfg.h"
+#include "iwl-trans.h"
+#include "mld.h"
+#include "iface.h"
+#include "link.h"
+#include "phy.h"
+#include "sta.h"
+
+int iwlmld_kunit_test_init(struct kunit *test)
+{
+ struct iwl_mld *mld;
+ struct iwl_trans *trans;
+ const struct iwl_cfg *cfg;
+ struct iwl_fw *fw;
+ struct ieee80211_hw *hw;
+
+ KUNIT_ALLOC_AND_ASSERT(test, trans);
+ KUNIT_ALLOC_AND_ASSERT(test, trans->dev);
+ KUNIT_ALLOC_AND_ASSERT(test, cfg);
+ KUNIT_ALLOC_AND_ASSERT(test, fw);
+ KUNIT_ALLOC_AND_ASSERT(test, hw);
+ KUNIT_ALLOC_AND_ASSERT(test, hw->wiphy);
+
+ mutex_init(&hw->wiphy->mtx);
+
+ /* Allocate and initialize the mld structure */
+ KUNIT_ALLOC_AND_ASSERT(test, mld);
+ iwl_construct_mld(mld, trans, cfg, fw, hw, NULL);
+
+ fw->ucode_capa.num_stations = IWL_STATION_COUNT_MAX;
+ fw->ucode_capa.num_links = IWL_FW_MAX_LINK_ID + 1;
+
+ mld->fwrt.trans = trans;
+ mld->fwrt.fw = fw;
+ mld->fwrt.dev = trans->dev;
+
+ /* TODO: add priv_size to hw allocation and setup hw->priv to enable
+ * testing mac80211 callbacks
+ */
+
+ KUNIT_ALLOC_AND_ASSERT(test, mld->nvm_data);
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, mld->scan.cmd,
+ sizeof(struct iwl_scan_req_umac_v17));
+ mld->scan.cmd_size = sizeof(struct iwl_scan_req_umac_v17);
+
+ /* This is not the state at the end of the regular opmode_start,
+ * but it is more common to need it. Explicitly undo this if needed.
+ */
+ mld->trans->state = IWL_TRANS_FW_ALIVE;
+ mld->fw_status.running = true;
+
+ /* Avoid passing mld struct around */
+ test->priv = mld;
+ return 0;
+}
+
+IWL_MLD_ALLOC_FN(link, bss_conf)
+
+static void iwlmld_kunit_init_link(struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ struct iwl_mld_link *mld_link, int link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ /* setup mac80211 link */
+ rcu_assign_pointer(vif->link_conf[link_id], link);
+ link->link_id = link_id;
+ link->vif = vif;
+ link->beacon_int = 100;
+ link->dtim_period = 3;
+ link->qos = true;
+
+ /* and mld_link */
+ ret = iwl_mld_allocate_link_fw_id(mld, &mld_link->fw_id, link);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ rcu_assign_pointer(mld_vif->link[link_id], mld_link);
+ rcu_assign_pointer(vif->link_conf[link_id], link);
+}
+
+IWL_MLD_ALLOC_FN(vif, vif)
+
+/* Helper function to add and initialize a VIF for KUnit tests */
+struct ieee80211_vif *iwlmld_kunit_add_vif(bool mlo, enum nl80211_iftype type)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct ieee80211_vif *vif;
+ struct iwl_mld_vif *mld_vif;
+ int ret;
+
+ /* TODO: support more types */
+ KUNIT_ASSERT_EQ(test, type, NL80211_IFTYPE_STATION);
+
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, vif,
+ sizeof(*vif) + sizeof(*mld_vif));
+
+ vif->type = type;
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ mld_vif->mld = mld;
+
+ ret = iwl_mld_allocate_vif_fw_id(mld, &mld_vif->fw_id, vif);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* TODO: revisit (task=EHT) */
+ if (mlo)
+ return vif;
+
+ /* Initialize the default link */
+ iwlmld_kunit_init_link(vif, &vif->bss_conf, &mld_vif->deflink, 0);
+
+ return vif;
+}
+
+/* Use only for MLO vif */
+struct ieee80211_bss_conf *
+iwlmld_kunit_add_link(struct ieee80211_vif *vif, int link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct ieee80211_bss_conf *link;
+ struct iwl_mld_link *mld_link;
+
+ KUNIT_ALLOC_AND_ASSERT(test, link);
+ KUNIT_ALLOC_AND_ASSERT(test, mld_link);
+
+ iwlmld_kunit_init_link(vif, link, mld_link, link_id);
+ vif->valid_links |= BIT(link_id);
+
+ return link;
+}
+
+struct ieee80211_chanctx_conf *
+iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct ieee80211_chanctx_conf *ctx;
+ struct iwl_mld_phy *phy;
+ int fw_id;
+
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, ctx, sizeof(*ctx) + sizeof(*phy));
+
+ /* Setup the chanctx conf */
+ ctx->def = *def;
+ ctx->min_def = *def;
+ ctx->ap = *def;
+
+ /* and the iwl_mld_phy */
+ phy = iwl_mld_phy_from_mac80211(ctx);
+
+ fw_id = iwl_mld_allocate_fw_phy_id(mld);
+ KUNIT_ASSERT_GE(test, fw_id, 0);
+
+ phy->fw_id = fw_id;
+ phy->mld = mld;
+ phy->chandef = *def;
+
+ return ctx;
+}
+
+void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct iwl_mld_link *mld_link;
+
+ KUNIT_EXPECT_NULL(test, rcu_access_pointer(link->chanctx_conf));
+ rcu_assign_pointer(link->chanctx_conf, ctx);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ mld_link = iwl_mld_link_from_mac80211(link);
+
+ KUNIT_EXPECT_NULL(test, rcu_access_pointer(mld_link->chan_ctx));
+ KUNIT_EXPECT_FALSE(test, mld_link->active);
+
+ rcu_assign_pointer(mld_link->chan_ctx, ctx);
+ mld_link->active = true;
+
+ if (ieee80211_vif_is_mld(vif))
+ vif->active_links |= BIT(link->link_id);
+}
+
+IWL_MLD_ALLOC_FN(link_sta, link_sta)
+
+static void iwlmld_kunit_add_link_sta(struct ieee80211_sta *sta,
+ struct ieee80211_link_sta *link_sta,
+ struct iwl_mld_link_sta *mld_link_sta,
+ u8 link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld *mld = test->priv;
+ u8 fw_id;
+ int ret;
+
+ /* initialize mac80211's link_sta */
+ link_sta->link_id = link_id;
+ rcu_assign_pointer(sta->link[link_id], link_sta);
+
+ link_sta->sta = sta;
+
+ /* and the iwl_mld_link_sta */
+ ret = iwl_mld_allocate_link_sta_fw_id(mld, &fw_id, link_sta);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ mld_link_sta->fw_id = fw_id;
+
+ rcu_assign_pointer(mld_sta->link[link_id], mld_link_sta);
+}
+
+static struct ieee80211_link_sta *
+iwlmld_kunit_alloc_link_sta(struct ieee80211_sta *sta, int link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_link_sta *mld_link_sta;
+
+ /* Only valid for MLO */
+ KUNIT_ASSERT_TRUE(test, sta->valid_links);
+
+ KUNIT_ALLOC_AND_ASSERT(test, link_sta);
+ KUNIT_ALLOC_AND_ASSERT(test, mld_link_sta);
+
+ iwlmld_kunit_add_link_sta(sta, link_sta, mld_link_sta, link_id);
+
+ sta->valid_links |= BIT(link_id);
+
+ return link_sta;
+}
+
+/* Allocate and initialize a STA with the first link_sta */
+static struct ieee80211_sta *
+iwlmld_kunit_add_sta(struct ieee80211_vif *vif, int link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct ieee80211_sta *sta;
+ struct iwl_mld_sta *mld_sta;
+
+ /* Allocate memory for ieee80211_sta with embedded iwl_mld_sta */
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, sta, sizeof(*sta) + sizeof(*mld_sta));
+
+ /* TODO: allocate and initialize the TXQs ? */
+
+ mld_sta = iwl_mld_sta_from_mac80211(sta);
+ mld_sta->vif = vif;
+ mld_sta->mld = test->priv;
+
+ /* TODO: adjust for internal stations */
+ mld_sta->sta_type = STATION_TYPE_PEER;
+
+ if (link_id >= 0) {
+ iwlmld_kunit_add_link_sta(sta, &sta->deflink,
+ &mld_sta->deflink, link_id);
+ sta->valid_links = BIT(link_id);
+ } else {
+ iwlmld_kunit_add_link_sta(sta, &sta->deflink,
+ &mld_sta->deflink, 0);
+ }
+ return sta;
+}
+
+/* Move s STA to a state */
+static void iwlmld_kunit_move_sta_state(struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state state)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_vif *mld_vif;
+
+ /* The sta will be removed automatically at the end of the test */
+ KUNIT_ASSERT_NE(test, state, IEEE80211_STA_NOTEXIST);
+
+ mld_sta = iwl_mld_sta_from_mac80211(sta);
+ mld_sta->sta_state = state;
+
+ mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
+ mld_vif->authorized = state == IEEE80211_STA_AUTHORIZED;
+
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ mld_vif->ap_sta = sta;
+}
+
+struct ieee80211_sta *iwlmld_kunit_setup_sta(struct ieee80211_vif *vif,
+ enum ieee80211_sta_state state,
+ int link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct ieee80211_sta *sta;
+
+ /* The sta will be removed automatically at the end of the test */
+ KUNIT_ASSERT_NE(test, state, IEEE80211_STA_NOTEXIST);
+
+ /* First - allocate and init the STA */
+ sta = iwlmld_kunit_add_sta(vif, link_id);
+
+ /* Now move it all the way to the wanted state */
+ for (enum ieee80211_sta_state _state = IEEE80211_STA_NONE;
+ _state <= state; _state++)
+ iwlmld_kunit_move_sta_state(vif, sta, state);
+
+ return sta;
+}
+
+static void iwlmld_kunit_set_vif_associated(struct ieee80211_vif *vif)
+{
+ /* TODO: setup chanreq */
+ /* TODO setup capabilities */
+
+ vif->cfg.assoc = 1;
+}
+
+static struct ieee80211_vif *
+iwlmld_kunit_setup_assoc(bool mlo, struct iwl_mld_kunit_link *assoc_link)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *link;
+ struct ieee80211_chanctx_conf *chan_ctx;
+
+ KUNIT_ASSERT_TRUE(test, mlo || assoc_link->id == 0);
+
+ vif = iwlmld_kunit_add_vif(mlo, NL80211_IFTYPE_STATION);
+
+ if (mlo)
+ link = iwlmld_kunit_add_link(vif, assoc_link->id);
+ else
+ link = &vif->bss_conf;
+
+ chan_ctx = iwlmld_kunit_add_chanctx(assoc_link->band,
+ assoc_link->bandwidth);
+
+ wiphy_lock(mld->wiphy);
+ iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx);
+ wiphy_unlock(mld->wiphy);
+
+ /* The AP sta will now be pointer to by mld_vif->ap_sta */
+ iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, assoc_link->id);
+
+ iwlmld_kunit_set_vif_associated(vif);
+
+ return vif;
+}
+
+struct ieee80211_vif *
+iwlmld_kunit_setup_mlo_assoc(u16 valid_links,
+ struct iwl_mld_kunit_link *assoc_link)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct ieee80211_vif *vif;
+
+ KUNIT_ASSERT_TRUE(test,
+ hweight16(valid_links) == 1 ||
+ hweight16(valid_links) == 2);
+ KUNIT_ASSERT_TRUE(test, valid_links & BIT(assoc_link->id));
+
+ vif = iwlmld_kunit_setup_assoc(true, assoc_link);
+
+ /* Add the other link, if applicable */
+ if (hweight16(valid_links) > 1) {
+ u8 other_link_id = ffs(valid_links & ~BIT(assoc_link->id)) - 1;
+
+ iwlmld_kunit_add_link(vif, other_link_id);
+ }
+
+ return vif;
+}
+
+struct ieee80211_vif *
+iwlmld_kunit_setup_non_mlo_assoc(struct iwl_mld_kunit_link *assoc_link)
+{
+ return iwlmld_kunit_setup_assoc(false, assoc_link);
+}
+
+struct iwl_rx_packet *
+_iwl_mld_kunit_create_pkt(const void *notif, size_t notif_sz)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_rx_packet *pkt;
+
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, pkt, sizeof(pkt) + notif_sz);
+
+ memcpy(pkt->data, notif, notif_sz);
+ pkt->len_n_flags = cpu_to_le32(sizeof(pkt->hdr) + notif_sz);
+
+ return pkt;
+}
+
+struct ieee80211_vif *iwlmld_kunit_assoc_emlsr(struct iwl_mld_kunit_link *link1,
+ struct iwl_mld_kunit_link *link2)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *link;
+ struct ieee80211_chanctx_conf *chan_ctx;
+ struct ieee80211_sta *sta;
+ struct iwl_mld_vif *mld_vif;
+ u16 valid_links = BIT(link1->id) | BIT(link2->id);
+
+ KUNIT_ASSERT_TRUE(test, hweight16(valid_links) == 2);
+
+ vif = iwlmld_kunit_setup_mlo_assoc(valid_links, link1);
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ /* Activate second link */
+ wiphy_lock(mld->wiphy);
+
+ link = wiphy_dereference(mld->wiphy, vif->link_conf[link2->id]);
+ KUNIT_EXPECT_NOT_NULL(test, link);
+
+ chan_ctx = iwlmld_kunit_add_chanctx(link2->band, link2->bandwidth);
+ iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx);
+
+ wiphy_unlock(mld->wiphy);
+
+ /* And other link sta */
+ sta = mld_vif->ap_sta;
+ KUNIT_EXPECT_NOT_NULL(test, sta);
+
+ iwlmld_kunit_alloc_link_sta(sta, link2->id);
+
+ return vif;
+}
+
+struct element *iwlmld_kunit_gen_element(u8 id, const void *data, size_t len)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct element *elem;
+
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, elem, sizeof(*elem) + len);
+
+ elem->id = id;
+ elem->datalen = len;
+ memcpy(elem->data, data, len);
+
+ return elem;
+}
+
+struct iwl_mld_phy *iwlmld_kunit_get_phy_of_link(struct ieee80211_vif *vif,
+ u8 link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct ieee80211_chanctx_conf *chanctx;
+ struct ieee80211_bss_conf *link =
+ wiphy_dereference(mld->wiphy, vif->link_conf[link_id]);
+
+ KUNIT_EXPECT_NOT_NULL(test, link);
+
+ chanctx = wiphy_dereference(mld->wiphy, link->chanctx_conf);
+ KUNIT_EXPECT_NOT_NULL(test, chanctx);
+
+ return iwl_mld_phy_from_mac80211(chanctx);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h
new file mode 100644
index 000000000000..d3723653cf1b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#ifndef __iwl_mld_kunit_utils_h__
+#define __iwl_mld_kunit_utils_h__
+
+#include <net/mac80211.h>
+#include <kunit/test-bug.h>
+
+struct iwl_mld;
+
+int iwlmld_kunit_test_init(struct kunit *test);
+
+struct iwl_mld_kunit_link {
+ u8 id;
+ enum nl80211_band band;
+ enum nl80211_chan_width bandwidth;
+};
+
+enum nl80211_iftype;
+
+struct ieee80211_vif *iwlmld_kunit_add_vif(bool mlo, enum nl80211_iftype type);
+
+struct ieee80211_bss_conf *
+iwlmld_kunit_add_link(struct ieee80211_vif *vif, int link_id);
+
+#define KUNIT_ALLOC_AND_ASSERT_SIZE(test, ptr, size) \
+do { \
+ (ptr) = kunit_kzalloc((test), (size), GFP_KERNEL); \
+ KUNIT_ASSERT_NOT_NULL((test), (ptr)); \
+} while (0)
+
+#define KUNIT_ALLOC_AND_ASSERT(test, ptr) \
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, ptr, sizeof(*(ptr)))
+
+#define CHANNEL(_name, _band, _freq) \
+static struct ieee80211_channel _name = { \
+ .band = (_band), \
+ .center_freq = (_freq), \
+ .hw_value = (_freq), \
+}
+
+#define CHANDEF(_name, _channel, _freq1, _width) \
+__maybe_unused static struct cfg80211_chan_def _name = { \
+ .chan = &(_channel), \
+ .center_freq1 = (_freq1), \
+ .width = (_width), \
+}
+
+CHANNEL(chan_2ghz, NL80211_BAND_2GHZ, 2412);
+CHANNEL(chan_5ghz, NL80211_BAND_5GHZ, 5200);
+CHANNEL(chan_6ghz, NL80211_BAND_6GHZ, 6115);
+/* Feel free to add more */
+
+CHANDEF(chandef_2ghz, chan_2ghz, 2412, NL80211_CHAN_WIDTH_20);
+CHANDEF(chandef_5ghz, chan_5ghz, 5200, NL80211_CHAN_WIDTH_40);
+CHANDEF(chandef_6ghz, chan_6ghz, 6115, NL80211_CHAN_WIDTH_160);
+/* Feel free to add more */
+
+//struct cfg80211_chan_def;
+
+struct ieee80211_chanctx_conf *
+iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def);
+
+static inline struct ieee80211_chanctx_conf *
+iwlmld_kunit_add_chanctx(enum nl80211_band band, enum nl80211_chan_width width)
+{
+ struct cfg80211_chan_def chandef;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ chandef = chandef_2ghz;
+ break;
+ case NL80211_BAND_5GHZ:
+ chandef = chandef_5ghz;
+ break;
+ default:
+ case NL80211_BAND_6GHZ:
+ chandef = chandef_6ghz;
+ break;
+ }
+
+ chandef.width = width;
+
+ return iwlmld_kunit_add_chanctx_from_def(&chandef);
+}
+
+void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ struct ieee80211_chanctx_conf *ctx);
+
+/* Allocate a sta, initialize it and move it to the wanted state */
+struct ieee80211_sta *iwlmld_kunit_setup_sta(struct ieee80211_vif *vif,
+ enum ieee80211_sta_state state,
+ int link_id);
+
+struct ieee80211_vif *
+iwlmld_kunit_setup_mlo_assoc(u16 valid_links,
+ struct iwl_mld_kunit_link *assoc_link);
+
+struct ieee80211_vif *
+iwlmld_kunit_setup_non_mlo_assoc(struct iwl_mld_kunit_link *assoc_link);
+
+struct iwl_rx_packet *
+_iwl_mld_kunit_create_pkt(const void *notif, size_t notif_sz);
+
+#define iwl_mld_kunit_create_pkt(_notif) \
+ _iwl_mld_kunit_create_pkt(&(_notif), sizeof(_notif))
+
+struct ieee80211_vif *
+iwlmld_kunit_assoc_emlsr(struct iwl_mld_kunit_link *link1,
+ struct iwl_mld_kunit_link *link2);
+
+struct element *iwlmld_kunit_gen_element(u8 id, const void *data, size_t len);
+
+/**
+ * iwlmld_kunit_get_phy_of_link - Get the phy of a link
+ *
+ * @vif: The vif to get the phy from.
+ * @link_id: The id of the link to get the phy for.
+ *
+ * given a vif and link id, return the phy pointer of that link.
+ * This assumes that the link exists, and that it had a chanctx
+ * assigned.
+ * If this is not the case, the test will fail.
+ *
+ * Return: phy pointer.
+ */
+struct iwl_mld_phy *iwlmld_kunit_get_phy_of_link(struct ieee80211_vif *vif,
+ u8 link_id);
+
+#endif /* __iwl_mld_kunit_utils_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/thermal.c b/drivers/net/wireless/intel/iwlwifi/mld/thermal.c
new file mode 100644
index 000000000000..1909953a9be9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/thermal.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifdef CONFIG_THERMAL
+#include <linux/sort.h>
+#include <linux/thermal.h>
+#endif
+
+#include "fw/api/phy.h"
+
+#include "thermal.h"
+#include "mld.h"
+#include "hcmd.h"
+
+#define IWL_MLD_CT_KILL_DURATION (5 * HZ)
+
+void iwl_mld_handle_ct_kill_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct ct_kill_notif *notif = (const void *)pkt->data;
+
+ IWL_ERR(mld,
+ "CT Kill notification: temp = %d, DTS = 0x%x, Scheme 0x%x - Enter CT Kill\n",
+ le16_to_cpu(notif->temperature), notif->dts,
+ notif->scheme);
+
+ iwl_mld_set_ctkill(mld, true);
+
+ wiphy_delayed_work_queue(mld->wiphy, &mld->ct_kill_exit_wk,
+ round_jiffies_relative(IWL_MLD_CT_KILL_DURATION));
+}
+
+static void iwl_mld_exit_ctkill(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld *mld;
+
+ mld = container_of(wk, struct iwl_mld, ct_kill_exit_wk.work);
+
+ IWL_ERR(mld, "Exit CT Kill\n");
+ iwl_mld_set_ctkill(mld, false);
+}
+
+void iwl_mld_handle_temp_notif(struct iwl_mld *mld, struct iwl_rx_packet *pkt)
+{
+ const struct iwl_dts_measurement_notif *notif =
+ (const void *)pkt->data;
+ int temp;
+ u32 ths_crossed;
+
+ temp = le32_to_cpu(notif->temp);
+
+ /* shouldn't be negative, but since it's s32, make sure it isn't */
+ if (IWL_FW_CHECK(mld, temp < 0, "negative temperature %d\n", temp))
+ return;
+
+ ths_crossed = le32_to_cpu(notif->threshold_idx);
+
+ /* 0xFF in ths_crossed means the notification is not related
+ * to a trip, so we can ignore it here.
+ */
+ if (ths_crossed == 0xFF)
+ return;
+
+ IWL_DEBUG_TEMP(mld, "Temp = %d Threshold crossed = %d\n",
+ temp, ths_crossed);
+
+ if (IWL_FW_CHECK(mld, ths_crossed >= IWL_MAX_DTS_TRIPS,
+ "bad threshold: %d\n", ths_crossed))
+ return;
+
+#ifdef CONFIG_THERMAL
+ if (mld->tzone)
+ thermal_zone_device_update(mld->tzone, THERMAL_TRIP_VIOLATED);
+#endif /* CONFIG_THERMAL */
+}
+
+#ifdef CONFIG_THERMAL
+static int iwl_mld_get_temp(struct iwl_mld *mld, s32 *temp)
+{
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
+ .flags = CMD_WANT_SKB,
+ };
+ const struct iwl_dts_measurement_resp *resp;
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ if (ret) {
+ IWL_ERR(mld,
+ "Failed to send the temperature measurement command (err=%d)\n",
+ ret);
+ return ret;
+ }
+
+ if (iwl_rx_packet_payload_len(cmd.resp_pkt) < sizeof(*resp)) {
+ IWL_ERR(mld,
+ "Failed to get a valid response to DTS measurement\n");
+ ret = -EIO;
+ goto free_resp;
+ }
+
+ resp = (const void *)cmd.resp_pkt->data;
+ *temp = le32_to_cpu(resp->temp);
+
+ IWL_DEBUG_TEMP(mld,
+ "Got temperature measurement response: temp=%d\n",
+ *temp);
+
+free_resp:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static int compare_temps(const void *a, const void *b)
+{
+ return ((s16)le16_to_cpu(*(__le16 *)a) -
+ (s16)le16_to_cpu(*(__le16 *)b));
+}
+
+struct iwl_trip_walk_data {
+ __le16 *thresholds;
+ int count;
+};
+
+static int iwl_trip_temp_iter(struct thermal_trip *trip, void *arg)
+{
+ struct iwl_trip_walk_data *twd = arg;
+
+ if (trip->temperature == THERMAL_TEMP_INVALID)
+ return 0;
+
+ twd->thresholds[twd->count++] =
+ cpu_to_le16((s16)(trip->temperature / 1000));
+ return 0;
+}
+#endif
+
+int iwl_mld_config_temp_report_ths(struct iwl_mld *mld)
+{
+ struct temp_report_ths_cmd cmd = {0};
+ int ret;
+#ifdef CONFIG_THERMAL
+ struct iwl_trip_walk_data twd = {
+ .thresholds = cmd.thresholds,
+ .count = 0
+ };
+
+ if (!mld->tzone)
+ goto send;
+
+ /* The thermal core holds an array of temperature trips that are
+ * unsorted and uncompressed, the FW should get it compressed and
+ * sorted.
+ */
+
+ /* compress trips to cmd array, remove uninitialized values*/
+ for_each_thermal_trip(mld->tzone, iwl_trip_temp_iter, &twd);
+
+ cmd.num_temps = cpu_to_le32(twd.count);
+ if (twd.count)
+ sort(cmd.thresholds, twd.count, sizeof(s16),
+ compare_temps, NULL);
+
+send:
+#endif
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP,
+ TEMP_REPORTING_THRESHOLDS_CMD),
+ &cmd);
+ if (ret)
+ IWL_ERR(mld, "TEMP_REPORT_THS_CMD command failed (err=%d)\n",
+ ret);
+
+ return ret;
+}
+
+#ifdef CONFIG_THERMAL
+static int iwl_mld_tzone_get_temp(struct thermal_zone_device *device,
+ int *temperature)
+{
+ struct iwl_mld *mld = thermal_zone_device_priv(device);
+ int temp;
+ int ret = 0;
+
+ wiphy_lock(mld->wiphy);
+
+ if (!mld->fw_status.running) {
+ /* Tell the core that there is no valid temperature value to
+ * return, but it need not worry about this.
+ */
+ *temperature = THERMAL_TEMP_INVALID;
+ goto unlock;
+ }
+
+ ret = iwl_mld_get_temp(mld, &temp);
+ if (ret)
+ goto unlock;
+
+ *temperature = temp * 1000;
+unlock:
+ wiphy_unlock(mld->wiphy);
+ return ret;
+}
+
+static int iwl_mld_tzone_set_trip_temp(struct thermal_zone_device *device,
+ const struct thermal_trip *trip,
+ int temp)
+{
+ struct iwl_mld *mld = thermal_zone_device_priv(device);
+ int ret;
+
+ wiphy_lock(mld->wiphy);
+
+ if (!mld->fw_status.running) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ if ((temp / 1000) > S16_MAX) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = iwl_mld_config_temp_report_ths(mld);
+unlock:
+ wiphy_unlock(mld->wiphy);
+ return ret;
+}
+
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = iwl_mld_tzone_get_temp,
+ .set_trip_temp = iwl_mld_tzone_set_trip_temp,
+};
+
+static void iwl_mld_thermal_zone_register(struct iwl_mld *mld)
+{
+ int ret;
+ char name[16];
+ static atomic_t counter = ATOMIC_INIT(0);
+ struct thermal_trip trips[IWL_MAX_DTS_TRIPS] = {
+ [0 ... IWL_MAX_DTS_TRIPS - 1] = {
+ .temperature = THERMAL_TEMP_INVALID,
+ .type = THERMAL_TRIP_PASSIVE,
+ .flags = THERMAL_TRIP_FLAG_RW_TEMP,
+ },
+ };
+
+ BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+ sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
+ mld->tzone =
+ thermal_zone_device_register_with_trips(name, trips,
+ IWL_MAX_DTS_TRIPS,
+ mld, &tzone_ops,
+ NULL, 0, 0);
+ if (IS_ERR(mld->tzone)) {
+ IWL_DEBUG_TEMP(mld,
+ "Failed to register to thermal zone (err = %ld)\n",
+ PTR_ERR(mld->tzone));
+ mld->tzone = NULL;
+ return;
+ }
+
+ ret = thermal_zone_device_enable(mld->tzone);
+ if (ret) {
+ IWL_DEBUG_TEMP(mld, "Failed to enable thermal zone\n");
+ thermal_zone_device_unregister(mld->tzone);
+ }
+}
+
+/* budget in mWatt */
+static const u32 iwl_mld_cdev_budgets[] = {
+ 2400, /* cooling state 0 */
+ 2000, /* cooling state 1 */
+ 1800, /* cooling state 2 */
+ 1600, /* cooling state 3 */
+ 1400, /* cooling state 4 */
+ 1200, /* cooling state 5 */
+ 1000, /* cooling state 6 */
+ 900, /* cooling state 7 */
+ 800, /* cooling state 8 */
+ 700, /* cooling state 9 */
+ 650, /* cooling state 10 */
+ 600, /* cooling state 11 */
+ 550, /* cooling state 12 */
+ 500, /* cooling state 13 */
+ 450, /* cooling state 14 */
+ 400, /* cooling state 15 */
+ 350, /* cooling state 16 */
+ 300, /* cooling state 17 */
+ 250, /* cooling state 18 */
+ 200, /* cooling state 19 */
+ 150, /* cooling state 20 */
+};
+
+int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state,
+ enum iwl_ctdp_cmd_operation op)
+{
+ struct iwl_ctdp_cmd cmd = {
+ .operation = cpu_to_le32(op),
+ .budget = cpu_to_le32(iwl_mld_cdev_budgets[state]),
+ .window_size = 0,
+ };
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP, CTDP_CONFIG_CMD),
+ &cmd);
+
+ if (ret) {
+ IWL_ERR(mld, "cTDP command failed (err=%d)\n", ret);
+ return ret;
+ }
+
+ if (op == CTDP_CMD_OPERATION_START)
+ mld->cooling_dev.cur_state = state;
+
+ return 0;
+}
+
+static int iwl_mld_tcool_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = ARRAY_SIZE(iwl_mld_cdev_budgets) - 1;
+
+ return 0;
+}
+
+static int iwl_mld_tcool_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct iwl_mld *mld = (struct iwl_mld *)(cdev->devdata);
+
+ *state = mld->cooling_dev.cur_state;
+
+ return 0;
+}
+
+static int iwl_mld_tcool_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long new_state)
+{
+ struct iwl_mld *mld = (struct iwl_mld *)(cdev->devdata);
+ int ret;
+
+ wiphy_lock(mld->wiphy);
+
+ if (!mld->fw_status.running) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ if (new_state >= ARRAY_SIZE(iwl_mld_cdev_budgets)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = iwl_mld_config_ctdp(mld, new_state, CTDP_CMD_OPERATION_START);
+
+unlock:
+ wiphy_unlock(mld->wiphy);
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops tcooling_ops = {
+ .get_max_state = iwl_mld_tcool_get_max_state,
+ .get_cur_state = iwl_mld_tcool_get_cur_state,
+ .set_cur_state = iwl_mld_tcool_set_cur_state,
+};
+
+static void iwl_mld_cooling_device_register(struct iwl_mld *mld)
+{
+ char name[] = "iwlwifi";
+
+ BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+ mld->cooling_dev.cdev =
+ thermal_cooling_device_register(name,
+ mld,
+ &tcooling_ops);
+
+ if (IS_ERR(mld->cooling_dev.cdev)) {
+ IWL_DEBUG_TEMP(mld,
+ "Failed to register to cooling device (err = %ld)\n",
+ PTR_ERR(mld->cooling_dev.cdev));
+ mld->cooling_dev.cdev = NULL;
+ return;
+ }
+}
+
+static void iwl_mld_thermal_zone_unregister(struct iwl_mld *mld)
+{
+ if (!mld->tzone)
+ return;
+
+ IWL_DEBUG_TEMP(mld, "Thermal zone device unregister\n");
+ if (mld->tzone) {
+ thermal_zone_device_unregister(mld->tzone);
+ mld->tzone = NULL;
+ }
+}
+
+static void iwl_mld_cooling_device_unregister(struct iwl_mld *mld)
+{
+ if (!mld->cooling_dev.cdev)
+ return;
+
+ IWL_DEBUG_TEMP(mld, "Cooling device unregister\n");
+ if (mld->cooling_dev.cdev) {
+ thermal_cooling_device_unregister(mld->cooling_dev.cdev);
+ mld->cooling_dev.cdev = NULL;
+ }
+}
+#endif /* CONFIG_THERMAL */
+
+void iwl_mld_thermal_initialize(struct iwl_mld *mld)
+{
+ wiphy_delayed_work_init(&mld->ct_kill_exit_wk, iwl_mld_exit_ctkill);
+
+#ifdef CONFIG_THERMAL
+ iwl_mld_cooling_device_register(mld);
+ iwl_mld_thermal_zone_register(mld);
+#endif
+}
+
+void iwl_mld_thermal_exit(struct iwl_mld *mld)
+{
+ wiphy_delayed_work_cancel(mld->wiphy, &mld->ct_kill_exit_wk);
+
+#ifdef CONFIG_THERMAL
+ iwl_mld_cooling_device_unregister(mld);
+ iwl_mld_thermal_zone_unregister(mld);
+#endif
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/thermal.h b/drivers/net/wireless/intel/iwlwifi/mld/thermal.h
new file mode 100644
index 000000000000..8c8893331b05
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/thermal.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_thermal_h__
+#define __iwl_mld_thermal_h__
+
+#include "iwl-trans.h"
+
+struct iwl_mld;
+
+#ifdef CONFIG_THERMAL
+#include <linux/thermal.h>
+
+/*
+ * struct iwl_mld_cooling_device
+ * @cur_state: current state
+ * @cdev: struct thermal cooling device
+ */
+struct iwl_mld_cooling_device {
+ u32 cur_state;
+ struct thermal_cooling_device *cdev;
+};
+
+int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state,
+ enum iwl_ctdp_cmd_operation op);
+#endif
+
+void iwl_mld_handle_temp_notif(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
+void iwl_mld_handle_ct_kill_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+int iwl_mld_config_temp_report_ths(struct iwl_mld *mld);
+void iwl_mld_thermal_initialize(struct iwl_mld *mld);
+void iwl_mld_thermal_exit(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_thermal_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/time_sync.c b/drivers/net/wireless/intel/iwlwifi/mld/time_sync.c
new file mode 100644
index 000000000000..50799f9bfccb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/time_sync.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "hcmd.h"
+#include "ptp.h"
+#include "time_sync.h"
+#include <linux/ieee80211.h>
+
+static int iwl_mld_init_time_sync(struct iwl_mld *mld, u32 protocols,
+ const u8 *addr)
+{
+ struct iwl_mld_time_sync_data *time_sync = kzalloc(sizeof(*time_sync),
+ GFP_KERNEL);
+
+ if (!time_sync)
+ return -ENOMEM;
+
+ time_sync->active_protocols = protocols;
+ ether_addr_copy(time_sync->peer_addr, addr);
+ skb_queue_head_init(&time_sync->frame_list);
+ rcu_assign_pointer(mld->time_sync, time_sync);
+
+ return 0;
+}
+
+int iwl_mld_time_sync_fw_config(struct iwl_mld *mld)
+{
+ struct iwl_time_sync_cfg_cmd cmd = {};
+ struct iwl_mld_time_sync_data *time_sync;
+ int err;
+
+ time_sync = wiphy_dereference(mld->wiphy, mld->time_sync);
+ if (!time_sync)
+ return -EINVAL;
+
+ cmd.protocols = cpu_to_le32(time_sync->active_protocols);
+ ether_addr_copy(cmd.peer_addr, time_sync->peer_addr);
+
+ err = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(DATA_PATH_GROUP,
+ WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
+ &cmd);
+ if (err)
+ IWL_ERR(mld, "Failed to send time sync cfg cmd: %d\n", err);
+
+ return err;
+}
+
+int iwl_mld_time_sync_config(struct iwl_mld *mld, const u8 *addr, u32 protocols)
+{
+ struct iwl_mld_time_sync_data *time_sync;
+ int err;
+
+ time_sync = wiphy_dereference(mld->wiphy, mld->time_sync);
+
+ /* The fw only supports one peer. We do allow reconfiguration of the
+ * same peer for cases of fw reset etc.
+ */
+ if (time_sync && time_sync->active_protocols &&
+ !ether_addr_equal(addr, time_sync->peer_addr)) {
+ IWL_DEBUG_INFO(mld, "Time sync: reject config for peer: %pM\n",
+ addr);
+ return -ENOBUFS;
+ }
+
+ if (protocols & ~(IWL_TIME_SYNC_PROTOCOL_TM |
+ IWL_TIME_SYNC_PROTOCOL_FTM))
+ return -EINVAL;
+
+ IWL_DEBUG_INFO(mld, "Time sync: set peer addr=%pM\n", addr);
+
+ iwl_mld_deinit_time_sync(mld);
+ err = iwl_mld_init_time_sync(mld, protocols, addr);
+ if (err)
+ return err;
+
+ err = iwl_mld_time_sync_fw_config(mld);
+ return err;
+}
+
+void iwl_mld_deinit_time_sync(struct iwl_mld *mld)
+{
+ struct iwl_mld_time_sync_data *time_sync =
+ wiphy_dereference(mld->wiphy, mld->time_sync);
+
+ if (!time_sync)
+ return;
+
+ RCU_INIT_POINTER(mld->time_sync, NULL);
+ skb_queue_purge(&time_sync->frame_list);
+ kfree_rcu(time_sync, rcu_head);
+}
+
+bool iwl_mld_time_sync_frame(struct iwl_mld *mld, struct sk_buff *skb, u8 *addr)
+{
+ struct iwl_mld_time_sync_data *time_sync;
+
+ rcu_read_lock();
+ time_sync = rcu_dereference(mld->time_sync);
+ if (time_sync && ether_addr_equal(time_sync->peer_addr, addr) &&
+ (ieee80211_is_timing_measurement(skb) || ieee80211_is_ftm(skb))) {
+ skb_queue_tail(&time_sync->frame_list, skb);
+ rcu_read_unlock();
+ return true;
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
+static bool iwl_mld_is_skb_match(struct sk_buff *skb, u8 *addr, u8 dialog_token)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ u8 skb_dialog_token;
+
+ if (ieee80211_is_timing_measurement(skb))
+ skb_dialog_token = mgmt->u.action.u.wnm_timing_msr.dialog_token;
+ else
+ skb_dialog_token = mgmt->u.action.u.ftm.dialog_token;
+
+ if ((ether_addr_equal(mgmt->sa, addr) ||
+ ether_addr_equal(mgmt->da, addr)) &&
+ skb_dialog_token == dialog_token)
+ return true;
+
+ return false;
+}
+
+static struct sk_buff *iwl_mld_time_sync_find_skb(struct iwl_mld *mld, u8 *addr,
+ u8 dialog_token)
+{
+ struct iwl_mld_time_sync_data *time_sync;
+ struct sk_buff *skb;
+
+ rcu_read_lock();
+
+ time_sync = rcu_dereference(mld->time_sync);
+ if (IWL_FW_CHECK(mld, !time_sync,
+ "Time sync notification but time sync is not initialized\n")) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ /* The notifications are expected to arrive in the same order of the
+ * frames. If the incoming notification doesn't match the first SKB
+ * in the queue, it means there was no time sync notification for this
+ * SKB and it can be dropped.
+ */
+ while ((skb = skb_dequeue(&time_sync->frame_list))) {
+ if (iwl_mld_is_skb_match(skb, addr, dialog_token))
+ break;
+
+ kfree_skb(skb);
+ skb = NULL;
+ IWL_DEBUG_DROP(mld,
+ "Time sync: drop SKB without matching notification\n");
+ }
+ rcu_read_unlock();
+
+ return skb;
+}
+
+static u64 iwl_mld_get_64_bit(__le32 high, __le32 low)
+{
+ return ((u64)le32_to_cpu(high) << 32) | le32_to_cpu(low);
+}
+
+void iwl_mld_handle_time_msmt_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct ptp_data *data = &mld->ptp_data;
+ struct iwl_time_msmt_notify *notif = (void *)pkt->data;
+ struct ieee80211_rx_status *rx_status;
+ struct skb_shared_hwtstamps *shwt;
+ u64 ts_10ns;
+ struct sk_buff *skb =
+ iwl_mld_time_sync_find_skb(mld, notif->peer_addr,
+ le32_to_cpu(notif->dialog_token));
+ u64 adj_time;
+
+ if (IWL_FW_CHECK(mld, !skb, "Time sync event but no pending skb\n"))
+ return;
+
+ spin_lock_bh(&data->lock);
+ ts_10ns = iwl_mld_get_64_bit(notif->t2_hi, notif->t2_lo);
+ adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
+ shwt = skb_hwtstamps(skb);
+ shwt->hwtstamp = ktime_set(0, adj_time);
+
+ ts_10ns = iwl_mld_get_64_bit(notif->t3_hi, notif->t3_lo);
+ adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
+ rx_status = IEEE80211_SKB_RXCB(skb);
+ rx_status->ack_tx_hwtstamp = ktime_set(0, adj_time);
+ spin_unlock_bh(&data->lock);
+
+ IWL_DEBUG_INFO(mld,
+ "Time sync: RX event - report frame t2=%llu t3=%llu\n",
+ ktime_to_ns(shwt->hwtstamp),
+ ktime_to_ns(rx_status->ack_tx_hwtstamp));
+ ieee80211_rx_napi(mld->hw, NULL, skb, NULL);
+}
+
+void iwl_mld_handle_time_sync_confirm_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct ptp_data *data = &mld->ptp_data;
+ struct iwl_time_msmt_cfm_notify *notif = (void *)pkt->data;
+ struct ieee80211_tx_status status = {};
+ struct skb_shared_hwtstamps *shwt;
+ u64 ts_10ns, adj_time;
+
+ status.skb =
+ iwl_mld_time_sync_find_skb(mld, notif->peer_addr,
+ le32_to_cpu(notif->dialog_token));
+
+ if (IWL_FW_CHECK(mld, !status.skb,
+ "Time sync confirm but no pending skb\n"))
+ return;
+
+ spin_lock_bh(&data->lock);
+ ts_10ns = iwl_mld_get_64_bit(notif->t1_hi, notif->t1_lo);
+ adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
+ shwt = skb_hwtstamps(status.skb);
+ shwt->hwtstamp = ktime_set(0, adj_time);
+
+ ts_10ns = iwl_mld_get_64_bit(notif->t4_hi, notif->t4_lo);
+ adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
+ status.info = IEEE80211_SKB_CB(status.skb);
+ status.ack_hwtstamp = ktime_set(0, adj_time);
+ spin_unlock_bh(&data->lock);
+
+ IWL_DEBUG_INFO(mld,
+ "Time sync: TX event - report frame t1=%llu t4=%llu\n",
+ ktime_to_ns(shwt->hwtstamp),
+ ktime_to_ns(status.ack_hwtstamp));
+ ieee80211_tx_status_ext(mld->hw, &status);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/time_sync.h b/drivers/net/wireless/intel/iwlwifi/mld/time_sync.h
new file mode 100644
index 000000000000..2d4c5512e961
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/time_sync.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#ifndef __iwl_mld_time_sync_h__
+#define __iwl_mld_time_sync_h__
+
+struct iwl_mld_time_sync_data {
+ struct rcu_head rcu_head;
+ u8 peer_addr[ETH_ALEN];
+ u32 active_protocols;
+ struct sk_buff_head frame_list;
+};
+
+int iwl_mld_time_sync_config(struct iwl_mld *mld, const u8 *addr,
+ u32 protocols);
+int iwl_mld_time_sync_fw_config(struct iwl_mld *mld);
+void iwl_mld_deinit_time_sync(struct iwl_mld *mld);
+void iwl_mld_handle_time_msmt_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+bool iwl_mld_time_sync_frame(struct iwl_mld *mld, struct sk_buff *skb,
+ u8 *addr);
+void iwl_mld_handle_time_sync_confirm_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+#endif /* __iwl_mld_time_sync_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
new file mode 100644
index 000000000000..f054cc921d9d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
@@ -0,0 +1,700 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <net/mac80211.h>
+
+#include "tlc.h"
+#include "hcmd.h"
+#include "sta.h"
+
+#include "fw/api/rs.h"
+#include "fw/api/context.h"
+#include "fw/api/dhc.h"
+
+static u8 iwl_mld_fw_bw_from_sta_bw(const struct ieee80211_link_sta *link_sta)
+{
+ switch (link_sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_320:
+ return IWL_TLC_MNG_CH_WIDTH_320MHZ;
+ case IEEE80211_STA_RX_BW_160:
+ return IWL_TLC_MNG_CH_WIDTH_160MHZ;
+ case IEEE80211_STA_RX_BW_80:
+ return IWL_TLC_MNG_CH_WIDTH_80MHZ;
+ case IEEE80211_STA_RX_BW_40:
+ return IWL_TLC_MNG_CH_WIDTH_40MHZ;
+ case IEEE80211_STA_RX_BW_20:
+ default:
+ return IWL_TLC_MNG_CH_WIDTH_20MHZ;
+ }
+}
+
+static __le16
+iwl_mld_get_tlc_cmd_flags(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ const struct ieee80211_sta_he_cap *own_he_cap,
+ const struct ieee80211_sta_eht_cap *own_eht_cap)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+ bool has_vht = vht_cap->vht_supported;
+ u16 flags = 0;
+
+ /* STBC flags */
+ if (mld->cfg->ht_params->stbc &&
+ (hweight8(iwl_mld_get_valid_tx_ant(mld)) > 1)) {
+ if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ }
+
+ /* LDPC */
+ if (mld->cfg->ht_params->ldpc &&
+ ((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) ||
+ (has_vht && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+ if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+ if (own_he_cap &&
+ !(own_he_cap->he_cap_elem.phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+ flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+ /* DCM */
+ if (he_cap->has_he &&
+ (he_cap->he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK &&
+ own_he_cap &&
+ own_he_cap->he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
+
+ /* Extra EHT LTF */
+ if (own_eht_cap &&
+ own_eht_cap->eht_cap_elem.phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF &&
+ link_sta->eht_cap.has_eht &&
+ link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF) {
+ flags |= IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK;
+ }
+
+ return cpu_to_le16(flags);
+}
+
+static u8 iwl_mld_get_fw_chains(struct iwl_mld *mld)
+{
+ u8 chains = iwl_mld_get_valid_tx_ant(mld);
+ u8 fw_chains = 0;
+
+ if (chains & ANT_A)
+ fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
+ if (chains & ANT_B)
+ fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
+
+ return fw_chains;
+}
+
+static u8 iwl_mld_get_fw_sgi(struct ieee80211_link_sta *link_sta)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+ u8 sgi_chwidths = 0;
+
+ /* If the association supports HE, HT/VHT rates will never be used for
+ * Tx and therefor there's no need to set the
+ * sgi-per-channel-width-support bits
+ */
+ if (he_cap->has_he)
+ return 0;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+ sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ);
+ if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ);
+ if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ);
+
+ return sgi_chwidths;
+}
+
+static int
+iwl_mld_get_highest_fw_mcs(const struct ieee80211_sta_vht_cap *vht_cap,
+ int nss)
+{
+ u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+ (0x3 << (2 * (nss - 1)));
+ rx_mcs >>= (2 * (nss - 1));
+
+ switch (rx_mcs) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ return IWL_TLC_MNG_HT_RATE_MCS7;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ return IWL_TLC_MNG_HT_RATE_MCS8;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ return IWL_TLC_MNG_HT_RATE_MCS9;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta,
+ const struct ieee80211_sta_vht_cap *vht_cap,
+ struct iwl_tlc_config_cmd_v4 *cmd)
+{
+ u16 supp;
+ int i, highest_mcs;
+ u8 max_nss = link_sta->rx_nss;
+ struct ieee80211_vht_cap ieee_vht_cap = {
+ .vht_cap_info = cpu_to_le32(vht_cap->cap),
+ .supp_mcs = vht_cap->vht_mcs,
+ };
+
+ /* the station support only a single receive chain */
+ if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
+ max_nss = 1;
+
+ for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) {
+ int nss = i + 1;
+
+ highest_mcs = iwl_mld_get_highest_fw_mcs(vht_cap, nss);
+ if (!highest_mcs)
+ continue;
+
+ supp = BIT(highest_mcs + 1) - 1;
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
+
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp);
+ /* Check if VHT extended NSS indicates that the bandwidth/NSS
+ * configuration is supported - only for MCS 0 since we already
+ * decoded the MCS bits anyway ourselves.
+ */
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160 &&
+ ieee80211_get_vht_max_nss(&ieee_vht_cap,
+ IEEE80211_VHT_CHANWIDTH_160MHZ,
+ 0, true, nss) >= nss)
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80];
+ }
+}
+
+static u16 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs)
+{
+ switch (mcs) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
+ case IEEE80211_HE_MCS_NOT_SUPPORTED:
+ return 0;
+ }
+
+ WARN(1, "invalid HE MCS %d\n", mcs);
+ return 0;
+}
+
+static void
+iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta,
+ const struct ieee80211_sta_he_cap *own_he_cap,
+ struct iwl_tlc_config_cmd_v4 *cmd)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+ u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ u16 tx_mcs_80 = le16_to_cpu(own_he_cap->he_mcs_nss_supp.tx_mcs_80);
+ u16 tx_mcs_160 = le16_to_cpu(own_he_cap->he_mcs_nss_supp.tx_mcs_160);
+ int i;
+ u8 nss = link_sta->rx_nss;
+
+ /* the station support only a single receive chain */
+ if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
+ nss = 1;
+
+ for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
+ u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
+ u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
+ u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3;
+ u16 _tx_mcs_80 = (tx_mcs_80 >> (2 * i)) & 0x3;
+
+ /* If one side doesn't support - mark both as not supporting */
+ if (_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ _tx_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ _mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ _tx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ }
+ if (_mcs_80 > _tx_mcs_80)
+ _mcs_80 = _tx_mcs_80;
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] =
+ cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_80));
+
+ /* If one side doesn't support - mark both as not supporting */
+ if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ _tx_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ _mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ _tx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ }
+ if (_mcs_160 > _tx_mcs_160)
+ _mcs_160 = _tx_mcs_160;
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
+ cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_160));
+ }
+}
+
+static void iwl_mld_set_eht_mcs(__le16 ht_rates[][3],
+ enum IWL_TLC_MCS_PER_BW bw,
+ u8 max_nss, u16 mcs_msk)
+{
+ if (max_nss >= 2)
+ ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk);
+
+ if (max_nss >= 1)
+ ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk);
+}
+
+static const
+struct ieee80211_eht_mcs_nss_supp_bw *
+iwl_mld_get_eht_mcs_of_bw(enum IWL_TLC_MCS_PER_BW bw,
+ const struct ieee80211_eht_mcs_nss_supp *eht_mcs)
+{
+ switch (bw) {
+ case IWL_TLC_MCS_PER_BW_80:
+ return &eht_mcs->bw._80;
+ case IWL_TLC_MCS_PER_BW_160:
+ return &eht_mcs->bw._160;
+ case IWL_TLC_MCS_PER_BW_320:
+ return &eht_mcs->bw._320;
+ default:
+ return NULL;
+ }
+}
+
+static u8 iwl_mld_get_eht_max_nss(u8 rx_nss, u8 tx_nss)
+{
+ u8 tx = u8_get_bits(tx_nss, IEEE80211_EHT_MCS_NSS_TX);
+ u8 rx = u8_get_bits(rx_nss, IEEE80211_EHT_MCS_NSS_RX);
+ /* the max nss that can be used,
+ * is the min with our tx capa and the peer rx capa.
+ */
+ return min(tx, rx);
+}
+
+#define MAX_NSS_MCS(mcs_num, rx, tx) \
+ iwl_mld_get_eht_max_nss((rx)->rx_tx_mcs ##mcs_num## _max_nss, \
+ (tx)->rx_tx_mcs ##mcs_num## _max_nss)
+
+static void
+iwl_mld_fill_eht_rates(struct ieee80211_vif *vif,
+ const struct ieee80211_link_sta *link_sta,
+ const struct ieee80211_sta_he_cap *own_he_cap,
+ const struct ieee80211_sta_eht_cap *own_eht_cap,
+ struct iwl_tlc_config_cmd_v4 *cmd)
+{
+ /* peer RX mcs capa */
+ const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs =
+ &link_sta->eht_cap.eht_mcs_nss_supp;
+ /* our TX mcs capa */
+ const struct ieee80211_eht_mcs_nss_supp *eht_tx_mcs =
+ &own_eht_cap->eht_mcs_nss_supp;
+
+ enum IWL_TLC_MCS_PER_BW bw;
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_rx_20;
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_tx_20;
+
+ /* peer is 20 MHz only */
+ if (vif->type == NL80211_IFTYPE_AP &&
+ !(link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+ mcs_rx_20 = eht_rx_mcs->only_20mhz;
+ } else {
+ mcs_rx_20.rx_tx_mcs7_max_nss =
+ eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_rx_20.rx_tx_mcs9_max_nss =
+ eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_rx_20.rx_tx_mcs11_max_nss =
+ eht_rx_mcs->bw._80.rx_tx_mcs11_max_nss;
+ mcs_rx_20.rx_tx_mcs13_max_nss =
+ eht_rx_mcs->bw._80.rx_tx_mcs13_max_nss;
+ }
+
+ /* NIC is capable of 20 MHz only */
+ if (!(own_he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+ mcs_tx_20 = eht_tx_mcs->only_20mhz;
+ } else {
+ mcs_tx_20.rx_tx_mcs7_max_nss =
+ eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_tx_20.rx_tx_mcs9_max_nss =
+ eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_tx_20.rx_tx_mcs11_max_nss =
+ eht_tx_mcs->bw._80.rx_tx_mcs11_max_nss;
+ mcs_tx_20.rx_tx_mcs13_max_nss =
+ eht_tx_mcs->bw._80.rx_tx_mcs13_max_nss;
+ }
+
+ /* rates for 20/40/80 MHz */
+ bw = IWL_TLC_MCS_PER_BW_80;
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(7, &mcs_rx_20, &mcs_tx_20),
+ GENMASK(7, 0));
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(9, &mcs_rx_20, &mcs_tx_20),
+ GENMASK(9, 8));
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(11, &mcs_rx_20, &mcs_tx_20),
+ GENMASK(11, 10));
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(13, &mcs_rx_20, &mcs_tx_20),
+ GENMASK(13, 12));
+
+ /* rates for 160/320 MHz */
+ for (bw = IWL_TLC_MCS_PER_BW_160; bw <= IWL_TLC_MCS_PER_BW_320; bw++) {
+ const struct ieee80211_eht_mcs_nss_supp_bw *mcs_rx =
+ iwl_mld_get_eht_mcs_of_bw(bw, eht_rx_mcs);
+ const struct ieee80211_eht_mcs_nss_supp_bw *mcs_tx =
+ iwl_mld_get_eht_mcs_of_bw(bw, eht_tx_mcs);
+
+ /* got unsupported index for bw */
+ if (!mcs_rx || !mcs_tx)
+ continue;
+
+ /* break out if we don't support the bandwidth */
+ if (cmd->max_ch_width < (bw + IWL_TLC_MNG_CH_WIDTH_80MHZ))
+ break;
+
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(9, mcs_rx, mcs_tx),
+ GENMASK(9, 0));
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(11, mcs_rx, mcs_tx),
+ GENMASK(11, 10));
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(13, mcs_rx, mcs_tx),
+ GENMASK(13, 12));
+ }
+
+ /* the station support only a single receive chain */
+ if (link_sta->smps_mode == IEEE80211_SMPS_STATIC ||
+ link_sta->rx_nss < 2)
+ memset(cmd->ht_rates[IWL_TLC_NSS_2], 0,
+ sizeof(cmd->ht_rates[IWL_TLC_NSS_2]));
+}
+
+static void
+iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct ieee80211_supported_band *sband,
+ const struct ieee80211_sta_he_cap *own_he_cap,
+ const struct ieee80211_sta_eht_cap *own_eht_cap,
+ struct iwl_tlc_config_cmd_v4 *cmd)
+{
+ int i;
+ u16 non_ht_rates = 0;
+ unsigned long rates_bitmap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+
+ /* non HT rates */
+ rates_bitmap = link_sta->supp_rates[sband->band];
+ for_each_set_bit(i, &rates_bitmap, BITS_PER_LONG)
+ non_ht_rates |= BIT(sband->bitrates[i].hw_value);
+
+ cmd->non_ht_rates = cpu_to_le16(non_ht_rates);
+ cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
+
+ if (link_sta->eht_cap.has_eht && own_he_cap && own_eht_cap) {
+ cmd->mode = IWL_TLC_MNG_MODE_EHT;
+ iwl_mld_fill_eht_rates(vif, link_sta, own_he_cap,
+ own_eht_cap, cmd);
+ } else if (he_cap->has_he && own_he_cap) {
+ cmd->mode = IWL_TLC_MNG_MODE_HE;
+ iwl_mld_fill_he_rates(link_sta, own_he_cap, cmd);
+ } else if (vht_cap->vht_supported) {
+ cmd->mode = IWL_TLC_MNG_MODE_VHT;
+ iwl_mld_fill_vht_rates(link_sta, vht_cap, cmd);
+ } else if (ht_cap->ht_supported) {
+ cmd->mode = IWL_TLC_MNG_MODE_HT;
+ cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] =
+ cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+
+ /* the station support only a single receive chain */
+ if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
+ cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
+ 0;
+ else
+ cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
+ cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+ }
+}
+
+static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ enum nl80211_band band)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+ struct ieee80211_supported_band *sband = mld->hw->wiphy->bands[band];
+ const struct ieee80211_sta_he_cap *own_he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, vif);
+ const struct ieee80211_sta_eht_cap *own_eht_cap =
+ ieee80211_get_eht_iftype_cap_vif(sband, vif);
+ struct iwl_tlc_config_cmd_v4 cmd = {
+ /* For AP mode, use 20 MHz until the STA is authorized */
+ .max_ch_width = mld_sta->sta_state > IEEE80211_STA_ASSOC ?
+ iwl_mld_fw_bw_from_sta_bw(link_sta) :
+ IWL_TLC_MNG_CH_WIDTH_20MHZ,
+ .flags = iwl_mld_get_tlc_cmd_flags(mld, vif, link_sta,
+ own_he_cap, own_eht_cap),
+ .chains = iwl_mld_get_fw_chains(mld),
+ .sgi_ch_width_supp = iwl_mld_get_fw_sgi(link_sta),
+ .max_mpdu_len = cpu_to_le16(link_sta->agg.max_amsdu_len),
+ };
+ int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+ int ret;
+
+ if (fw_sta_id < 0)
+ return;
+
+ cmd.sta_id = fw_sta_id;
+
+ iwl_mld_fill_supp_rates(mld, vif, link_sta, sband,
+ own_he_cap, own_eht_cap,
+ &cmd);
+
+ IWL_DEBUG_RATE(mld,
+ "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
+ cmd.sta_id, cmd.max_ch_width, cmd.mode);
+
+ /* Send async since this can be called within a RCU-read section */
+ ret = iwl_mld_send_cmd_with_flags_pdu(mld, WIDE_ID(DATA_PATH_GROUP,
+ TLC_MNG_CONFIG_CMD),
+ CMD_ASYNC, &cmd);
+ if (ret)
+ IWL_ERR(mld, "Failed to send TLC cmd (%d)\n", ret);
+}
+
+int iwl_mld_send_tlc_dhc(struct iwl_mld *mld, u8 sta_id, u32 type, u32 data)
+{
+ struct {
+ struct iwl_dhc_cmd dhc;
+ struct iwl_dhc_tlc_cmd tlc;
+ } __packed cmd = {
+ .tlc.sta_id = sta_id,
+ .tlc.type = cpu_to_le32(type),
+ .tlc.data[0] = cpu_to_le32(data),
+ .dhc.length = cpu_to_le32(sizeof(cmd.tlc) >> 2),
+ .dhc.index_and_mask =
+ cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC |
+ DHC_INTEGRATION_TLC_DEBUG_CONFIG),
+ };
+ int ret;
+
+ ret = iwl_mld_send_cmd_with_flags_pdu(mld,
+ WIDE_ID(IWL_ALWAYS_LONG_GROUP,
+ DEBUG_HOST_COMMAND),
+ CMD_ASYNC, &cmd);
+ IWL_DEBUG_RATE(mld, "sta_id %d, type: 0x%X, value: 0x%X, ret%d\n",
+ sta_id, type, data, ret);
+ return ret;
+}
+
+void iwl_mld_config_tlc_link(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+ enum nl80211_band band;
+
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
+ return;
+
+ /* Before we have information about a station, configure the A-MSDU RC
+ * limit such that iwlmd and mac80211 would not be allowed to build
+ * A-MSDUs.
+ */
+ if (mld_sta->sta_state < IEEE80211_STA_ASSOC) {
+ link_sta->agg.max_rc_amsdu_len = 1;
+ ieee80211_sta_recalc_aggregates(link_sta->sta);
+ }
+
+ band = link_conf->chanreq.oper.chan->band;
+ iwl_mld_send_tlc_cmd(mld, vif, link_sta, band);
+}
+
+void iwl_mld_config_tlc(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_bss_conf *link;
+ int link_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for_each_vif_active_link(vif, link, link_id) {
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_check(sta, link_id);
+
+ if (!link || !link_sta)
+ continue;
+
+ iwl_mld_config_tlc_link(mld, vif, link, link_sta);
+ }
+}
+
+static u16
+iwl_mld_get_amsdu_size_of_tid(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta,
+ unsigned int tid)
+{
+ struct ieee80211_sta *sta = link_sta->sta;
+ struct ieee80211_vif *vif = iwl_mld_sta_from_mac80211(sta)->vif;
+ const u8 tid_to_mac80211_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO,
+ };
+ unsigned int result = link_sta->agg.max_rc_amsdu_len;
+ u8 ac, txf, lmac;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* Don't send an AMSDU that will be longer than the TXF.
+ * Add a security margin of 256 for the TX command + headers.
+ * We also want to have the start of the next packet inside the
+ * fifo to be able to send bursts.
+ */
+
+ if (WARN_ON(tid >= ARRAY_SIZE(tid_to_mac80211_ac)))
+ return 0;
+
+ ac = tid_to_mac80211_ac[tid];
+
+ /* For HE redirect to trigger based fifos */
+ if (link_sta->he_cap.has_he)
+ ac += 4;
+
+ txf = iwl_mld_mac80211_ac_to_fw_tx_fifo(ac);
+
+ /* Only one link: take the lmac according to the band */
+ if (hweight16(sta->valid_links) <= 1) {
+ enum nl80211_band band;
+ struct ieee80211_bss_conf *link =
+ wiphy_dereference(mld->wiphy,
+ vif->link_conf[link_sta->link_id]);
+
+ if (WARN_ON(!link || !link->chanreq.oper.chan))
+ band = NL80211_BAND_2GHZ;
+ else
+ band = link->chanreq.oper.chan->band;
+ lmac = iwl_mld_get_lmac_id(mld, band);
+
+ /* More than one link but with 2 lmacs: take the minimum */
+ } else if (fw_has_capa(&mld->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CDB_SUPPORT)) {
+ lmac = IWL_LMAC_5G_INDEX;
+ result = min_t(unsigned int, result,
+ mld->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+ lmac = IWL_LMAC_24G_INDEX;
+ /* More than one link but only one lmac */
+ } else {
+ lmac = IWL_LMAC_24G_INDEX;
+ }
+
+ return min_t(unsigned int, result,
+ mld->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+}
+
+void iwl_mld_handle_tlc_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_tlc_update_notif *notif = (void *)pkt->data;
+ struct ieee80211_link_sta *link_sta;
+ u32 flags = le32_to_cpu(notif->flags);
+ u32 enabled;
+ u16 size;
+
+ if (IWL_FW_CHECK(mld, notif->sta_id >= mld->fw->ucode_capa.num_stations,
+ "Invalid sta id (%d) in TLC notification\n",
+ notif->sta_id))
+ return;
+
+ link_sta = wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_link_sta[notif->sta_id]);
+
+ if (WARN(IS_ERR_OR_NULL(link_sta),
+ "link_sta of sta id (%d) doesn't exist\n", notif->sta_id))
+ return;
+
+ if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
+ struct iwl_mld_link_sta *mld_link_sta =
+ iwl_mld_link_sta_from_mac80211(link_sta);
+ char pretty_rate[100];
+
+ if (WARN_ON(!mld_link_sta))
+ return;
+
+ mld_link_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
+
+ rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
+ mld_link_sta->last_rate_n_flags);
+ IWL_DEBUG_RATE(mld, "TLC notif: new rate = %s\n", pretty_rate);
+ }
+
+ /* We are done processing the notif */
+ if (!(flags & IWL_TLC_NOTIF_FLAG_AMSDU))
+ return;
+
+ enabled = le32_to_cpu(notif->amsdu_enabled);
+ size = le32_to_cpu(notif->amsdu_size);
+
+ if (size < 2000) {
+ size = 0;
+ enabled = 0;
+ }
+
+ if (IWL_FW_CHECK(mld, size > link_sta->agg.max_amsdu_len,
+ "Invalid AMSDU len in TLC notif: %d (Max AMSDU len: %d)\n",
+ size, link_sta->agg.max_amsdu_len))
+ return;
+
+ link_sta->agg.max_rc_amsdu_len = size;
+
+ for (int i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ if (enabled & BIT(i))
+ link_sta->agg.max_tid_amsdu_len[i] =
+ iwl_mld_get_amsdu_size_of_tid(mld, link_sta, i);
+ else
+ link_sta->agg.max_tid_amsdu_len[i] = 1;
+ }
+
+ ieee80211_sta_recalc_aggregates(link_sta->sta);
+
+ IWL_DEBUG_RATE(mld,
+ "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
+ le32_to_cpu(notif->amsdu_size), size, enabled);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tlc.h b/drivers/net/wireless/intel/iwlwifi/mld/tlc.h
new file mode 100644
index 000000000000..c32f42e8840b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tlc.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_tlc_h__
+#define __iwl_mld_tlc_h__
+
+#include "mld.h"
+
+void iwl_mld_config_tlc_link(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta);
+
+void iwl_mld_config_tlc(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+void iwl_mld_handle_tlc_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+int iwl_mld_send_tlc_dhc(struct iwl_mld *mld, u8 sta_id, u32 type, u32 data);
+
+#endif /* __iwl_mld_tlc_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tx.c b/drivers/net/wireless/intel/iwlwifi/mld/tx.c
new file mode 100644
index 000000000000..543abe72e465
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tx.c
@@ -0,0 +1,1374 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 - 2025 Intel Corporation
+ */
+#include <net/ip.h>
+
+#include "tx.h"
+#include "sta.h"
+#include "hcmd.h"
+#include "iwl-utils.h"
+#include "iface.h"
+
+#include "fw/dbg.h"
+
+#include "fw/api/tx.h"
+#include "fw/api/rs.h"
+#include "fw/api/txq.h"
+#include "fw/api/datapath.h"
+#include "fw/api/time-event.h"
+
+#define MAX_ANT_NUM 2
+
+/* Toggles between TX antennas. Receives the bitmask of valid TX antennas and
+ * the *index* used for the last TX, and returns the next valid *index* to use.
+ * In order to set it in the tx_cmd, must do BIT(idx).
+ */
+static u8 iwl_mld_next_ant(u8 valid, u8 last_idx)
+{
+ u8 index = last_idx;
+
+ for (int i = 0; i < MAX_ANT_NUM; i++) {
+ index = (index + 1) % MAX_ANT_NUM;
+ if (valid & BIT(index))
+ return index;
+ }
+
+ WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
+
+ return last_idx;
+}
+
+void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant)
+{
+ *ant = iwl_mld_next_ant(iwl_mld_get_valid_tx_ant(mld), *ant);
+}
+
+static int
+iwl_mld_get_queue_size(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+ struct ieee80211_sta *sta = txq->sta;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ int max_size = IWL_DEFAULT_QUEUE_SIZE;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for_each_sta_active_link(txq->vif, sta, link_sta, link_id) {
+ if (link_sta->eht_cap.has_eht) {
+ max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
+ break;
+ }
+
+ if (link_sta->he_cap.has_he)
+ max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
+ }
+
+ return max_size;
+}
+
+static int iwl_mld_allocate_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+ u8 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : txq->tid;
+ u32 fw_sta_mask = iwl_mld_fw_sta_id_mask(mld, txq->sta);
+ /* We can't know when the station is asleep or awake, so we
+ * must disable the queue hang detection.
+ */
+ unsigned int watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ?
+ IWL_WATCHDOG_DISABLED :
+ mld->trans->trans_cfg->base_params->wd_timeout;
+ int queue, size;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (tid == IWL_MGMT_TID)
+ size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
+ mld->trans->cfg->min_txq_size);
+ else
+ size = iwl_mld_get_queue_size(mld, txq);
+
+ queue = iwl_trans_txq_alloc(mld->trans, 0, fw_sta_mask, tid, size,
+ watchdog_timeout);
+
+ if (queue >= 0)
+ IWL_DEBUG_TX_QUEUES(mld,
+ "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
+ queue, fw_sta_mask, tid);
+ return queue;
+}
+
+static int iwl_mld_add_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+ struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+ int id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* This will alse send the SCD_QUEUE_CONFIG_CMD */
+ id = iwl_mld_allocate_txq(mld, txq);
+ if (id < 0)
+ return id;
+
+ mld_txq->fw_id = id;
+ mld_txq->status.allocated = true;
+
+ rcu_assign_pointer(mld->fw_id_to_txq[id], txq);
+
+ return 0;
+}
+
+void iwl_mld_add_txq_list(struct iwl_mld *mld)
+{
+ lockdep_assert_wiphy(mld->wiphy);
+
+ while (!list_empty(&mld->txqs_to_add)) {
+ struct ieee80211_txq *txq;
+ struct iwl_mld_txq *mld_txq =
+ list_first_entry(&mld->txqs_to_add, struct iwl_mld_txq,
+ list);
+ int failed;
+
+ txq = container_of((void *)mld_txq, struct ieee80211_txq,
+ drv_priv);
+
+ failed = iwl_mld_add_txq(mld, txq);
+
+ local_bh_disable();
+ spin_lock(&mld->add_txqs_lock);
+ list_del_init(&mld_txq->list);
+ spin_unlock(&mld->add_txqs_lock);
+ /* If the queue allocation failed, we can't transmit. Leave the
+ * frames on the txq, maybe the attempt to allocate the queue
+ * will succeed.
+ */
+ if (!failed)
+ iwl_mld_tx_from_txq(mld, txq);
+ local_bh_enable();
+ }
+}
+
+void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld *mld = container_of(wk, struct iwl_mld,
+ add_txqs_wk);
+
+ /* will reschedule to run after restart */
+ if (mld->fw_status.in_hw_restart)
+ return;
+
+ iwl_mld_add_txq_list(mld);
+}
+
+void
+iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id)
+{
+ struct iwl_scd_queue_cfg_cmd remove_cmd = {
+ .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
+ .u.remove.tid = cpu_to_le32(tid),
+ .u.remove.sta_mask = cpu_to_le32(fw_sta_mask),
+ };
+
+ iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD),
+ &remove_cmd);
+
+ iwl_trans_txq_free(mld->trans, queue_id);
+}
+
+void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+ struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+ u32 sta_msk, tid;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ spin_lock_bh(&mld->add_txqs_lock);
+ if (!list_empty(&mld_txq->list))
+ list_del_init(&mld_txq->list);
+ spin_unlock_bh(&mld->add_txqs_lock);
+
+ if (!mld_txq->status.allocated ||
+ WARN_ON(mld_txq->fw_id >= ARRAY_SIZE(mld->fw_id_to_txq)))
+ return;
+
+ sta_msk = iwl_mld_fw_sta_id_mask(mld, txq->sta);
+
+ tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID :
+ txq->tid;
+
+ iwl_mld_free_txq(mld, sta_msk, tid, mld_txq->fw_id);
+
+ RCU_INIT_POINTER(mld->fw_id_to_txq[mld_txq->fw_id], NULL);
+ mld_txq->status.allocated = false;
+}
+
+#define OPT_HDR(type, skb, off) \
+ (type *)(skb_network_header(skb) + (off))
+
+static __le32
+iwl_mld_get_offload_assist(struct sk_buff *skb, bool amsdu)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
+ u16 offload_assist = 0;
+#if IS_ENABLED(CONFIG_INET)
+ u8 protocol = 0;
+
+ /* Do not compute checksum if already computed */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ goto out;
+
+ /* We do not expect to be requested to csum stuff we do not support */
+
+ /* TBD: do we also need to check
+ * !(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) now that all
+ * the devices we support has this flags?
+ */
+ if (WARN_ONCE(skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6),
+ "No support for requested checksum\n")) {
+ skb_checksum_help(skb);
+ goto out;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ protocol = ip_hdr(skb)->protocol;
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ipv6h =
+ (struct ipv6hdr *)skb_network_header(skb);
+ unsigned int off = sizeof(*ipv6h);
+
+ protocol = ipv6h->nexthdr;
+ while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
+ struct ipv6_opt_hdr *hp;
+
+ /* only supported extension headers */
+ if (protocol != NEXTHDR_ROUTING &&
+ protocol != NEXTHDR_HOP &&
+ protocol != NEXTHDR_DEST) {
+ skb_checksum_help(skb);
+ goto out;
+ }
+
+ hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
+ protocol = hp->nexthdr;
+ off += ipv6_optlen(hp);
+ }
+ /* if we get here - protocol now should be TCP/UDP */
+#endif
+ }
+
+ if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
+ WARN_ON_ONCE(1);
+ skb_checksum_help(skb);
+ goto out;
+ }
+
+ /* enable L4 csum */
+ offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
+
+ /* Set offset to IP header (snap).
+ * We don't support tunneling so no need to take care of inner header.
+ * Size is in words.
+ */
+ offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
+
+ /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
+ if (skb->protocol == htons(ETH_P_IP) && amsdu) {
+ ip_hdr(skb)->check = 0;
+ offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
+ }
+
+ /* reset UDP/TCP header csum */
+ if (protocol == IPPROTO_TCP)
+ tcp_hdr(skb)->check = 0;
+ else
+ udp_hdr(skb)->check = 0;
+
+out:
+#endif
+ mh_len /= 2;
+ offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
+
+ if (amsdu)
+ offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
+ else if (ieee80211_hdrlen(hdr->frame_control) % 4)
+ /* padding is inserted later in transport */
+ offload_assist |= BIT(TX_CMD_OFFLD_PAD);
+
+ return cpu_to_le32(offload_assist);
+}
+
+static void iwl_mld_get_basic_rates_and_band(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_tx_info *info,
+ unsigned long *basic_rates,
+ u8 *band)
+{
+ u32 link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+
+ *basic_rates = vif->bss_conf.basic_rates;
+ *band = info->band;
+
+ if (link_id == IEEE80211_LINK_UNSPECIFIED &&
+ ieee80211_vif_is_mld(vif)) {
+ /* shouldn't do this when >1 link is active */
+ WARN_ON(hweight16(vif->active_links) != 1);
+ link_id = __ffs(vif->active_links);
+ }
+
+ if (link_id < IEEE80211_LINK_UNSPECIFIED) {
+ struct ieee80211_bss_conf *link_conf;
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (link_conf) {
+ *basic_rates = link_conf->basic_rates;
+ if (link_conf->chanreq.oper.chan)
+ *band = link_conf->chanreq.oper.chan->band;
+ }
+ rcu_read_unlock();
+ }
+}
+
+u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband;
+ u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT;
+ unsigned long basic_rates;
+ u8 band, rate;
+ u32 i;
+
+ iwl_mld_get_basic_rates_and_band(mld, vif, info, &basic_rates, &band);
+
+ sband = mld->hw->wiphy->bands[band];
+ for_each_set_bit(i, &basic_rates, BITS_PER_LONG) {
+ u16 hw = sband->bitrates[i].hw_value;
+
+ if (hw >= IWL_FIRST_OFDM_RATE) {
+ if (lowest_ofdm > hw)
+ lowest_ofdm = hw;
+ } else if (lowest_cck > hw) {
+ lowest_cck = hw;
+ }
+ }
+
+ if (band == NL80211_BAND_2GHZ && !vif->p2p &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) {
+ if (lowest_cck != IWL_RATE_COUNT)
+ rate = lowest_cck;
+ else if (lowest_ofdm != IWL_RATE_COUNT)
+ rate = lowest_ofdm;
+ else
+ rate = IWL_FIRST_CCK_RATE;
+ } else if (lowest_ofdm != IWL_RATE_COUNT) {
+ rate = lowest_ofdm;
+ } else {
+ rate = IWL_FIRST_OFDM_RATE;
+ }
+
+ return rate;
+}
+
+static u32 iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ int rate_idx)
+{
+ u32 rate_flags = 0;
+ u8 rate_plcp;
+
+ /* if the rate isn't a well known legacy rate, take the lowest one */
+ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
+ rate_idx = iwl_mld_get_lowest_rate(mld, info,
+ info->control.vif);
+
+ WARN_ON_ONCE(rate_idx < 0);
+
+ /* Set CCK or OFDM flag */
+ if (rate_idx <= IWL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+ else
+ rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
+
+ /* Legacy rates are indexed:
+ * 0 - 3 for CCK and 0 - 7 for OFDM
+ */
+ rate_plcp = (rate_idx >= IWL_FIRST_OFDM_RATE ?
+ rate_idx - IWL_FIRST_OFDM_RATE : rate_idx);
+
+ return (u32)rate_plcp | rate_flags;
+}
+
+static u32 iwl_mld_get_tx_ant(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
+{
+ if (sta && ieee80211_is_data(fc)) {
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ return BIT(mld_sta->data_tx_ant) << RATE_MCS_ANT_POS;
+ }
+
+ return BIT(mld->mgmt_tx_ant) << RATE_MCS_ANT_POS;
+}
+
+static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ __le16 fc)
+{
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ u32 result;
+
+ /* we only care about legacy/HT/VHT so far, so we can
+ * build in v1 and use iwl_new_rate_from_v1()
+ * FIXME: in newer devices we only support the new rates, build
+ * the rate_n_flags in the new format here instead of using v1 and
+ * converting it.
+ */
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+ u8 nss = ieee80211_rate_get_vht_nss(rate);
+
+ result = RATE_MCS_VHT_MSK_V1;
+ result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK);
+ result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
+
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ result |= RATE_MCS_SGI_MSK_V1;
+
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1);
+
+ result = iwl_new_rate_from_v1(result);
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ result = RATE_MCS_HT_MSK_V1;
+ result |= u32_encode_bits(rate->idx,
+ RATE_HT_MCS_RATE_CODE_MSK_V1 |
+ RATE_HT_MCS_NSS_MSK_V1);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ result |= RATE_MCS_SGI_MSK_V1;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ result |= RATE_MCS_LDPC_MSK_V1;
+ if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
+ result |= RATE_MCS_STBC_MSK;
+
+ result = iwl_new_rate_from_v1(result);
+ } else {
+ result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx);
+ }
+
+ if (info->control.antennas)
+ result |= u32_encode_bits(info->control.antennas,
+ RATE_MCS_ANT_AB_MSK);
+ else
+ result |= iwl_mld_get_tx_ant(mld, info, sta, fc);
+
+ return result;
+}
+
+static u32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
+{
+ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
+ return iwl_mld_get_inject_tx_rate(mld, info, sta, fc);
+
+ return iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) |
+ iwl_mld_get_tx_ant(mld, info, sta, fc);
+}
+
+static void
+iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd_gen3 *tx_cmd,
+ struct sk_buff *skb, bool amsdu)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_vif *vif;
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+
+ if (!amsdu || !skb_is_gso(skb))
+ return;
+
+ /* As described in IEEE sta 802.11-2020, table 9-30 (Address
+ * field contents), A-MSDU address 3 should contain the BSSID
+ * address.
+ *
+ * In TSO, the skb header address 3 contains the original address 3 to
+ * correctly create all the A-MSDU subframes headers from it.
+ * Override now the address 3 in the command header with the BSSID.
+ *
+ * Note: we fill in the MLD address, but the firmware will do the
+ * necessary translation to link address after encryption.
+ */
+ vif = info->control.vif;
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ ether_addr_copy(tx_cmd->hdr->addr3, vif->cfg.ap_addr);
+ break;
+ case NL80211_IFTYPE_AP:
+ ether_addr_copy(tx_cmd->hdr->addr3, vif->addr);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_tx_cmd,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct iwl_mld_sta *mld_sta = sta ? iwl_mld_sta_from_mac80211(sta) :
+ NULL;
+ struct iwl_tx_cmd_gen3 *tx_cmd;
+ bool amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+ u32 rate_n_flags = 0;
+ u16 flags = 0;
+
+ dev_tx_cmd->hdr.cmd = TX_CMD;
+
+ if (!info->control.hw_key)
+ flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
+
+ /* For data and mgmt packets rate info comes from the fw.
+ * Only set rate/antenna for injected frames with fixed rate, or
+ * when no sta is given.
+ */
+ if (unlikely(!sta ||
+ info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
+ flags |= IWL_TX_FLAGS_CMD_RATE;
+ rate_n_flags = iwl_mld_get_tx_rate_n_flags(mld, info, sta,
+ hdr->frame_control);
+ } else if (!ieee80211_is_data(hdr->frame_control) ||
+ (mld_sta &&
+ mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)) {
+ /* These are important frames */
+ flags |= IWL_TX_FLAGS_HIGH_PRI;
+ }
+
+ tx_cmd = (void *)dev_tx_cmd->payload;
+
+ iwl_mld_fill_tx_cmd_hdr(tx_cmd, skb, amsdu);
+
+ tx_cmd->offload_assist = iwl_mld_get_offload_assist(skb, amsdu);
+
+ /* Total # bytes to be transmitted */
+ tx_cmd->len = cpu_to_le16((u16)skb->len);
+
+ tx_cmd->flags = cpu_to_le16(flags);
+
+ tx_cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+}
+
+/* Caller of this need to check that info->control.vif is not NULL */
+static struct iwl_mld_link *
+iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info *info)
+{
+ struct iwl_mld_vif *mld_vif =
+ iwl_mld_vif_from_mac80211(info->control.vif);
+ u32 link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+
+ if (link_id == IEEE80211_LINK_UNSPECIFIED) {
+ if (info->control.vif->active_links)
+ link_id = ffs(info->control.vif->active_links) - 1;
+ else
+ link_id = 0;
+ }
+
+ return rcu_dereference(mld_vif->link[link_id]);
+}
+
+static int
+iwl_mld_get_tx_queue_id(struct iwl_mld *mld, struct ieee80211_txq *txq,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+ struct iwl_mld_vif *mld_vif;
+ struct iwl_mld_link *link;
+
+ if (txq && txq->sta)
+ return iwl_mld_txq_from_mac80211(txq)->fw_id;
+
+ if (!info->control.vif)
+ return IWL_MLD_INVALID_QUEUE;
+
+ switch (info->control.vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ link = iwl_mld_get_link_from_tx_info(info);
+
+ if (WARN_ON(!link))
+ break;
+
+ /* ucast disassociate/deauth frames without a station might
+ * happen, especially with reason 7 ("Class 3 frame received
+ * from nonassociated STA").
+ */
+ if (ieee80211_is_mgmt(fc) &&
+ (!ieee80211_is_bufferable_mmpdu(skb) ||
+ ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
+ return link->bcast_sta.queue_id;
+
+ if (is_multicast_ether_addr(hdr->addr1) &&
+ !ieee80211_has_order(fc))
+ return link->mcast_sta.queue_id;
+
+ WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
+ "Couldn't find a TXQ. fc=0x%02x", le16_to_cpu(fc));
+ return link->bcast_sta.queue_id;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
+
+ if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES) {
+ IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n");
+ return IWL_MLD_INVALID_DROP_TX;
+ }
+
+ WARN_ON(!ieee80211_is_mgmt(fc));
+
+ return mld_vif->deflink.aux_sta.queue_id;
+ default:
+ /* TODO: consider monitor (task=monitor) */
+ WARN_ONCE(1, "Unsupported vif type\n");
+ break;
+ }
+
+ return IWL_MLD_INVALID_QUEUE;
+}
+
+static void iwl_mld_probe_resp_set_noa(struct iwl_mld *mld,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_mld_link *mld_link =
+ &iwl_mld_vif_from_mac80211(info->control.vif)->deflink;
+ struct iwl_probe_resp_data *resp_data;
+ u8 *pos;
+
+ if (!info->control.vif->p2p)
+ return;
+
+ rcu_read_lock();
+
+ resp_data = rcu_dereference(mld_link->probe_resp_data);
+ if (!resp_data)
+ goto out;
+
+ if (!resp_data->notif.noa_active)
+ goto out;
+
+ if (skb_tailroom(skb) < resp_data->noa_len) {
+ if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
+ IWL_ERR(mld,
+ "Failed to reallocate probe resp\n");
+ goto out;
+ }
+ }
+
+ pos = skb_put(skb, resp_data->noa_len);
+
+ *pos++ = WLAN_EID_VENDOR_SPECIFIC;
+ /* Set length of IE body (not including ID and length itself) */
+ *pos++ = resp_data->noa_len - 2;
+ *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
+ *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
+ *pos++ = WLAN_OUI_WFA & 0xff;
+ *pos++ = WLAN_OUI_TYPE_WFA_P2P;
+
+ memcpy(pos, &resp_data->notif.noa_attr,
+ resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
+
+out:
+ rcu_read_unlock();
+}
+
+/* This function must be called with BHs disabled */
+static int iwl_mld_tx_mpdu(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_txq *txq)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = txq ? txq->sta : NULL;
+ struct iwl_device_tx_cmd *dev_tx_cmd;
+ int queue = iwl_mld_get_tx_queue_id(mld, txq, skb);
+ u8 tid = IWL_MAX_TID_COUNT;
+
+ if (WARN_ONCE(queue == IWL_MLD_INVALID_QUEUE, "Invalid TX Queue id") ||
+ queue == IWL_MLD_INVALID_DROP_TX)
+ return -1;
+
+ if (unlikely(ieee80211_is_any_nullfunc(hdr->frame_control)))
+ return -1;
+
+ dev_tx_cmd = iwl_trans_alloc_tx_cmd(mld->trans);
+ if (unlikely(!dev_tx_cmd))
+ return -1;
+
+ if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
+ if (IWL_MLD_NON_TRANSMITTING_AP)
+ return -1;
+
+ iwl_mld_probe_resp_set_noa(mld, skb);
+ }
+
+ iwl_mld_fill_tx_cmd(mld, skb, dev_tx_cmd, sta);
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ else
+ tid = IWL_TID_NON_QOS;
+ }
+
+ IWL_DEBUG_TX(mld, "TX TID:%d from Q:%d len %d\n",
+ tid, queue, skb->len);
+
+ /* From now on, we cannot access info->control */
+ memset(&info->status, 0, sizeof(info->status));
+ memset(info->driver_data, 0, sizeof(info->driver_data));
+
+ info->driver_data[1] = dev_tx_cmd;
+
+ if (iwl_trans_tx(mld->trans, skb, dev_tx_cmd, queue))
+ goto err;
+
+ /* Update low-latency counter when a packet is queued instead
+ * of after TX, it makes sense for early low-latency detection
+ */
+ if (sta)
+ iwl_mld_low_latency_update_counters(mld, hdr, sta, 0);
+
+ return 0;
+
+err:
+ iwl_trans_free_tx_cmd(mld->trans, dev_tx_cmd);
+ IWL_DEBUG_TX(mld, "TX from Q:%d dropped\n", queue);
+ return -1;
+}
+
+#ifdef CONFIG_INET
+
+/* This function handles the segmentation of a large TSO packet into multiple
+ * MPDUs, ensuring that the resulting segments conform to AMSDU limits and
+ * constraints.
+ */
+static int iwl_mld_tx_tso_segment(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct sk_buff_head *mpdus_skbs)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int num_subframes, tcp_payload_len, subf_len;
+ u16 snap_ip_tcp, pad, max_tid_amsdu_len;
+ u8 tid;
+
+ snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
+
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ !sta->cur->max_rc_amsdu_len)
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
+
+ /* Do not build AMSDU for IPv6 with extension headers.
+ * Ask stack to segment and checksum the generated MPDUs for us.
+ */
+ if (skb->protocol == htons(ETH_P_IPV6) &&
+ ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
+ IPPROTO_TCP) {
+ netdev_flags &= ~NETIF_F_CSUM_MASK;
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
+ }
+
+ tid = ieee80211_get_tid(hdr);
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ return -EINVAL;
+
+ max_tid_amsdu_len = sta->cur->max_tid_amsdu_len[tid];
+ if (!max_tid_amsdu_len)
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
+
+ /* Sub frame header + SNAP + IP header + TCP header + MSS */
+ subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
+ pad = (4 - subf_len) & 0x3;
+
+ /* If we have N subframes in the A-MSDU, then the A-MSDU's size is
+ * N * subf_len + (N - 1) * pad.
+ */
+ num_subframes = (max_tid_amsdu_len + pad) / (subf_len + pad);
+
+ if (sta->max_amsdu_subframes &&
+ num_subframes > sta->max_amsdu_subframes)
+ num_subframes = sta->max_amsdu_subframes;
+
+ tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+ tcp_hdrlen(skb) + skb->data_len;
+
+ /* Make sure we have enough TBs for the A-MSDU:
+ * 2 for each subframe
+ * 1 more for each fragment
+ * 1 more for the potential data in the header
+ */
+ if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
+ mld->trans->max_skb_frags)
+ num_subframes = 1;
+
+ if (num_subframes > 1)
+ *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ /* This skb fits in one single A-MSDU */
+ if (tcp_payload_len <= num_subframes * mss) {
+ __skb_queue_tail(mpdus_skbs, skb);
+ return 0;
+ }
+
+ /* Trick the segmentation function to make it create SKBs that can fit
+ * into one A-MSDU.
+ */
+ return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skbs);
+}
+
+/* Manages TSO (TCP Segmentation Offload) packet transmission by segmenting
+ * large packets when necessary and transmitting each segment as MPDU.
+ */
+static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_txq *txq)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct sk_buff *orig_skb = skb;
+ struct sk_buff_head mpdus_skbs;
+ unsigned int payload_len;
+ int ret;
+
+ if (WARN_ON(!txq || !txq->sta))
+ return -1;
+
+ payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+ tcp_hdrlen(skb) + skb->data_len;
+
+ if (payload_len <= skb_shinfo(skb)->gso_size)
+ return iwl_mld_tx_mpdu(mld, skb, txq);
+
+ if (!info->control.vif)
+ return -1;
+
+ __skb_queue_head_init(&mpdus_skbs);
+
+ ret = iwl_mld_tx_tso_segment(mld, skb, txq->sta, &mpdus_skbs);
+ if (ret)
+ return ret;
+
+ WARN_ON(skb_queue_empty(&mpdus_skbs));
+
+ while (!skb_queue_empty(&mpdus_skbs)) {
+ skb = __skb_dequeue(&mpdus_skbs);
+
+ ret = iwl_mld_tx_mpdu(mld, skb, txq);
+ if (!ret)
+ continue;
+
+ /* Free skbs created as part of TSO logic that have not yet
+ * been dequeued
+ */
+ __skb_queue_purge(&mpdus_skbs);
+
+ /* skb here is not necessarily same as skb that entered
+ * this method, so free it explicitly.
+ */
+ if (skb == orig_skb)
+ ieee80211_free_txskb(mld->hw, skb);
+ else
+ kfree_skb(skb);
+
+ /* there was error, but we consumed skb one way or
+ * another, so return 0
+ */
+ return 0;
+ }
+
+ return 0;
+}
+#else
+static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_txq *txq)
+{
+ /* Impossible to get TSO without CONFIG_INET */
+ WARN_ON(1);
+
+ return -1;
+}
+#endif /* CONFIG_INET */
+
+void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_txq *txq)
+{
+ if (skb_is_gso(skb)) {
+ if (!iwl_mld_tx_tso(mld, skb, txq))
+ return;
+ goto err;
+ }
+
+ if (likely(!iwl_mld_tx_mpdu(mld, skb, txq)))
+ return;
+
+err:
+ ieee80211_free_txskb(mld->hw, skb);
+}
+
+void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+ struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+ struct sk_buff *skb = NULL;
+ u8 zero_addr[ETH_ALEN] = {};
+
+ /*
+ * No need for threads to be pending here, they can leave the first
+ * taker all the work.
+ *
+ * mld_txq->tx_request logic:
+ *
+ * If 0, no one is currently TXing, set to 1 to indicate current thread
+ * will now start TX and other threads should quit.
+ *
+ * If 1, another thread is currently TXing, set to 2 to indicate to
+ * that thread that there was another request. Since that request may
+ * have raced with the check whether the queue is empty, the TXing
+ * thread should check the queue's status one more time before leaving.
+ * This check is done in order to not leave any TX hanging in the queue
+ * until the next TX invocation (which may not even happen).
+ *
+ * If 2, another thread is currently TXing, and it will already double
+ * check the queue, so do nothing.
+ */
+ if (atomic_fetch_add_unless(&mld_txq->tx_request, 1, 2))
+ return;
+
+ rcu_read_lock();
+ do {
+ while (likely(!mld_txq->status.stop_full) &&
+ (skb = ieee80211_tx_dequeue(mld->hw, txq)))
+ iwl_mld_tx_skb(mld, skb, txq);
+ } while (atomic_dec_return(&mld_txq->tx_request));
+
+ IWL_DEBUG_TX(mld, "TXQ of sta %pM tid %d is now empty\n",
+ txq->sta ? txq->sta->addr : zero_addr, txq->tid);
+
+ rcu_read_unlock();
+}
+
+static void iwl_mld_hwrate_to_tx_rate(u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ enum nl80211_band band = info->band;
+ struct ieee80211_tx_rate *tx_rate = &info->status.rates[0];
+ u32 sgi = rate_n_flags & RATE_MCS_SGI_MSK;
+ u32 chan_width = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+ u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+ if (sgi)
+ tx_rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ switch (chan_width) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ tx_rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ tx_rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ tx_rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
+ break;
+ default:
+ break;
+ }
+
+ switch (format) {
+ case RATE_MCS_HT_MSK:
+ tx_rate->flags |= IEEE80211_TX_RC_MCS;
+ tx_rate->idx = RATE_HT_MCS_INDEX(rate_n_flags);
+ break;
+ case RATE_MCS_VHT_MSK:
+ ieee80211_rate_set_vht(tx_rate,
+ rate_n_flags & RATE_MCS_CODE_MSK,
+ FIELD_GET(RATE_MCS_NSS_MSK,
+ rate_n_flags) + 1);
+ tx_rate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ break;
+ case RATE_MCS_HE_MSK:
+ /* mac80211 cannot do this without ieee80211_tx_status_ext()
+ * but it only matters for radiotap
+ */
+ tx_rate->idx = 0;
+ break;
+ default:
+ tx_rate->idx =
+ iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
+ band);
+ break;
+ }
+}
+
+void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
+ int txq_id = le16_to_cpu(tx_resp->tx_queue);
+ struct agg_tx_status *agg_status = &tx_resp->status;
+ u32 status = le16_to_cpu(agg_status->status);
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+ size_t notif_size = sizeof(*tx_resp) + sizeof(u32);
+ int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_sta *mld_sta;
+ u16 ssn;
+ struct sk_buff_head skbs;
+ u8 skb_freed = 0;
+ bool mgmt = false;
+ bool tx_failure = (status & TX_STATUS_MSK) != TX_STATUS_SUCCESS;
+
+ if (IWL_FW_CHECK(mld, tx_resp->frame_count != 1,
+ "Invalid tx_resp notif frame_count (%d)\n",
+ tx_resp->frame_count))
+ return;
+
+ /* validate the size of the variable part of the notif */
+ if (IWL_FW_CHECK(mld, notif_size != pkt_len,
+ "Invalid tx_resp notif size (expected=%zu got=%u)\n",
+ notif_size, pkt_len))
+ return;
+
+ ssn = le32_to_cpup((__le32 *)agg_status +
+ tx_resp->frame_count) & 0xFFFF;
+
+ __skb_queue_head_init(&skbs);
+
+ /* we can free until ssn % q.n_bd not inclusive */
+ iwl_trans_reclaim(mld->trans, txq_id, ssn, &skbs, false);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ skb_freed++;
+
+ iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
+
+ /* inform mac80211 about what happened with the frame */
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ break;
+ default:
+ break;
+ }
+
+ /* If we are freeing multiple frames, mark all the frames
+ * but the first one as acked, since they were acknowledged
+ * before
+ */
+ if (skb_freed > 1)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (tx_failure) {
+ enum iwl_fw_ini_time_point tp =
+ IWL_FW_INI_TIME_POINT_TX_FAILED;
+
+ if (ieee80211_is_action(hdr->frame_control))
+ tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
+ else if (ieee80211_is_mgmt(hdr->frame_control))
+ mgmt = true;
+
+ iwl_dbg_tlv_time_point(&mld->fwrt, tp, NULL);
+ }
+
+ iwl_mld_hwrate_to_tx_rate(le32_to_cpu(tx_resp->initial_rate),
+ info);
+
+ if (likely(!iwl_mld_time_sync_frame(mld, skb, hdr->addr1)))
+ ieee80211_tx_status_skb(mld->hw, skb);
+ }
+
+ IWL_DEBUG_TX_REPLY(mld,
+ "TXQ %d status 0x%08x ssn=%d initial_rate 0x%x retries %d\n",
+ txq_id, status, ssn, le32_to_cpu(tx_resp->initial_rate),
+ tx_resp->failure_frame);
+
+ if (tx_failure && mgmt)
+ iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant);
+
+ if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
+ "Got invalid sta_id (%d)\n", sta_id))
+ return;
+
+ rcu_read_lock();
+
+ link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+ if (!link_sta) {
+ /* This can happen if the TX cmd was sent before pre_rcu_remove
+ * but the TX response was received after
+ */
+ IWL_DEBUG_TX_REPLY(mld,
+ "Got valid sta_id (%d) but sta is NULL\n",
+ sta_id);
+ goto out;
+ }
+
+ if (IS_ERR(link_sta))
+ goto out;
+
+ mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+
+ if (tx_failure && mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)
+ iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant);
+
+ if (tid < IWL_MAX_TID_COUNT)
+ iwl_mld_count_mpdu_tx(link_sta, 1);
+
+out:
+ rcu_read_unlock();
+}
+
+static void iwl_mld_tx_reclaim_txq(struct iwl_mld *mld, int txq, int index,
+ bool in_flush)
+{
+ struct sk_buff_head reclaimed_skbs;
+
+ __skb_queue_head_init(&reclaimed_skbs);
+
+ iwl_trans_reclaim(mld->trans, txq, index, &reclaimed_skbs, in_flush);
+
+ while (!skb_queue_empty(&reclaimed_skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&reclaimed_skbs);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ if (!in_flush)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status_skb(mld->hw, skb);
+ }
+}
+
+int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id)
+{
+ struct iwl_tx_path_flush_cmd_rsp *rsp;
+ struct iwl_tx_path_flush_cmd flush_cmd = {
+ .sta_id = cpu_to_le32(fw_sta_id),
+ .tid_mask = cpu_to_le16(0xffff),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = TXPATH_FLUSH,
+ .len = { sizeof(flush_cmd), },
+ .data = { &flush_cmd, },
+ .flags = CMD_WANT_SKB,
+ };
+ int ret, num_flushed_queues;
+ u32 resp_len;
+
+ IWL_DEBUG_TX_QUEUES(mld, "flush for sta id %d tid mask 0x%x\n",
+ fw_sta_id, 0xffff);
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ if (ret) {
+ IWL_ERR(mld, "Failed to send flush command (%d)\n", ret);
+ return ret;
+ }
+
+ resp_len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (IWL_FW_CHECK(mld, resp_len != sizeof(*rsp),
+ "Invalid TXPATH_FLUSH response len: %d\n",
+ resp_len)) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ rsp = (void *)cmd.resp_pkt->data;
+
+ if (IWL_FW_CHECK(mld, le16_to_cpu(rsp->sta_id) != fw_sta_id,
+ "sta_id %d != rsp_sta_id %d\n", fw_sta_id,
+ le16_to_cpu(rsp->sta_id))) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
+ if (IWL_FW_CHECK(mld, num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
+ "num_flushed_queues %d\n", num_flushed_queues)) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ for (int i = 0; i < num_flushed_queues; i++) {
+ struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
+ int read_after = le16_to_cpu(queue_info->read_after_flush);
+ int txq_id = le16_to_cpu(queue_info->queue_num);
+
+ if (IWL_FW_CHECK(mld,
+ txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
+ "Invalid txq id %d\n", txq_id))
+ continue;
+
+ IWL_DEBUG_TX_QUEUES(mld,
+ "tid %d txq_id %d read-before %d read-after %d\n",
+ le16_to_cpu(queue_info->tid), txq_id,
+ le16_to_cpu(queue_info->read_before_flush),
+ read_after);
+
+ iwl_mld_tx_reclaim_txq(mld, txq_id, read_after, true);
+ }
+
+free_rsp:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+ struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (likely(mld_txq->status.allocated))
+ return 0;
+
+ ret = iwl_mld_add_txq(mld, txq);
+
+ spin_lock_bh(&mld->add_txqs_lock);
+ if (!list_empty(&mld_txq->list))
+ list_del_init(&mld_txq->list);
+ spin_unlock_bh(&mld->add_txqs_lock);
+
+ return ret;
+}
+
+int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask, u32 new_sta_mask)
+{
+ struct iwl_scd_queue_cfg_cmd cmd = {
+ .operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY),
+ .u.modify.old_sta_mask = cpu_to_le32(old_sta_mask),
+ .u.modify.new_sta_mask = cpu_to_le32(new_sta_mask),
+ };
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for (int tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) {
+ struct ieee80211_txq *txq =
+ sta->txq[tid != IWL_MAX_TID_COUNT ?
+ tid : IEEE80211_NUM_TIDS];
+ struct iwl_mld_txq *mld_txq =
+ iwl_mld_txq_from_mac80211(txq);
+ int ret;
+
+ if (!mld_txq->status.allocated)
+ continue;
+
+ if (tid == IWL_MAX_TID_COUNT)
+ cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID);
+ else
+ cmd.u.modify.tid = cpu_to_le32(tid);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD),
+ &cmd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_compressed_ba_notif *ba_res = (void *)pkt->data;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+ u16 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
+ u8 sta_id = ba_res->sta_id;
+ struct ieee80211_link_sta *link_sta;
+
+ if (!tfd_cnt)
+ return;
+
+ if (IWL_FW_CHECK(mld, struct_size(ba_res, tfd, tfd_cnt) > pkt_len,
+ "Short BA notif (tfd_cnt=%d, size:0x%x)\n",
+ tfd_cnt, pkt_len))
+ return;
+
+ IWL_DEBUG_TX_REPLY(mld,
+ "BA notif received from sta_id=%d, flags=0x%x, sent:%d, acked:%d\n",
+ sta_id, le32_to_cpu(ba_res->flags),
+ le16_to_cpu(ba_res->txed),
+ le16_to_cpu(ba_res->done));
+
+ for (int i = 0; i < tfd_cnt; i++) {
+ struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
+ int txq_id = le16_to_cpu(ba_tfd->q_num);
+ int index = le16_to_cpu(ba_tfd->tfd_index);
+
+ if (IWL_FW_CHECK(mld,
+ txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
+ "Invalid txq id %d\n", txq_id))
+ continue;
+
+ iwl_mld_tx_reclaim_txq(mld, txq_id, index, false);
+ }
+
+ if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
+ "Got invalid sta_id (%d)\n", sta_id))
+ return;
+
+ rcu_read_lock();
+
+ link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+ if (IWL_FW_CHECK(mld, IS_ERR_OR_NULL(link_sta),
+ "Got valid sta_id (%d) but link_sta is NULL\n",
+ sta_id))
+ goto out;
+
+ iwl_mld_count_mpdu_tx(link_sta, le16_to_cpu(ba_res->txed));
+out:
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tx.h b/drivers/net/wireless/intel/iwlwifi/mld/tx.h
new file mode 100644
index 000000000000..520f15f9d33c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tx.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_tx_h__
+#define __iwl_mld_tx_h__
+
+#include "mld.h"
+
+#define IWL_MLD_INVALID_QUEUE 0xFFFF
+#define IWL_MLD_INVALID_DROP_TX 0xFFFE
+
+/**
+ * struct iwl_mld_txq - TX Queue data
+ *
+ * @fw_id: the fw id of this txq. Only valid when &status.allocated is true.
+ * @status: bitmap of the txq status
+ * @status.allocated: Indicates that the queue was allocated.
+ * @status.stop_full: Indicates that the queue is full and should stop TXing.
+ * @list: list pointer, for &mld::txqs_to_add
+ * @tx_request: makes sure that if there are multiple threads that want to tx
+ * from this txq, only one of them will do all the TXing.
+ * This is needed to avoid spinning the trans txq lock, which is expensive
+ */
+struct iwl_mld_txq {
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ u16 fw_id;
+ struct {
+ u8 allocated:1;
+ u8 stop_full:1;
+ } status;
+ );
+ struct list_head list;
+ atomic_t tx_request;
+ /* And here fields that survive a fw restart */
+};
+
+static inline void iwl_mld_init_txq(struct iwl_mld_txq *mld_txq)
+{
+ INIT_LIST_HEAD(&mld_txq->list);
+ atomic_set(&mld_txq->tx_request, 0);
+}
+
+static inline struct iwl_mld_txq *
+iwl_mld_txq_from_mac80211(struct ieee80211_txq *txq)
+{
+ return (void *)txq->drv_priv;
+}
+
+void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk);
+void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq);
+void iwl_mld_add_txq_list(struct iwl_mld *mld);
+void
+iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id);
+void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq);
+void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id);
+int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq);
+
+int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask, u32 new_sta_mask);
+
+void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant);
+
+u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif);
+
+void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_txq *txq);
+
+#endif /* __iwl_mld_tx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 82ca7f8b1bb2..3e8b7168af01 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -3388,7 +3388,7 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
break;
}
case WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION): {
- struct iwl_mvm_d3_end_notif *notif = (void *)pkt->data;
+ struct iwl_d3_end_notif *notif = (void *)pkt->data;
d3_data->d3_end_flags = __le32_to_cpu(notif->flags);
d3_data->notif_received |= IWL_D3_NOTIF_D3_END_NOTIF;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 55d035b896e9..671d3f8d79c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2023, 2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -542,7 +542,7 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
- struct iwl_mvm_tas_status_resp *rsp = NULL;
+ struct iwl_tas_status_resp *rsp = NULL;
static const size_t bufsz = 1024;
char *buff, *pos, *endpos;
const char * const tas_dis_reason[TAS_DISABLED_REASON_MAX] = {
@@ -552,6 +552,8 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
"Due To SAR Limit Less Than 6 dBm",
[TAS_DISABLED_REASON_INVALID] =
"N/A",
+ [TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID] =
+ "Due to table source invalid",
};
const char * const tas_current_status[TAS_DYNA_STATUS_MAX] = {
[TAS_DYNA_INACTIVE] = "INACTIVE",
@@ -598,23 +600,19 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
pos += scnprintf(pos, endpos - pos, "TAS Conclusion:\n");
for (i = 0; i < rsp->in_dual_radio + 1; i++) {
- if (rsp->tas_status_mac[i].band != TAS_LMAC_BAND_INVALID &&
- rsp->tas_status_mac[i].dynamic_status & BIT(TAS_DYNA_ACTIVE)) {
+ if (rsp->tas_status_mac[i].dynamic_status &
+ BIT(TAS_DYNA_ACTIVE)) {
pos += scnprintf(pos, endpos - pos, "\tON for ");
switch (rsp->tas_status_mac[i].band) {
- case TAS_LMAC_BAND_HB:
+ case PHY_BAND_5:
pos += scnprintf(pos, endpos - pos, "HB\n");
break;
- case TAS_LMAC_BAND_LB:
+ case PHY_BAND_24:
pos += scnprintf(pos, endpos - pos, "LB\n");
break;
- case TAS_LMAC_BAND_UHB:
+ case PHY_BAND_6:
pos += scnprintf(pos, endpos - pos, "UHB\n");
break;
- case TAS_LMAC_BAND_INVALID:
- pos += scnprintf(pos, endpos - pos,
- "INVALID BAND\n");
- break;
default:
pos += scnprintf(pos, endpos - pos,
"Unsupported band (%d)\n",
@@ -668,20 +666,16 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
pos += scnprintf(pos, endpos - pos, "TAS status for ");
switch (rsp->tas_status_mac[i].band) {
- case TAS_LMAC_BAND_HB:
+ case PHY_BAND_5:
pos += scnprintf(pos, endpos - pos, "High band\n");
break;
- case TAS_LMAC_BAND_LB:
+ case PHY_BAND_24:
pos += scnprintf(pos, endpos - pos, "Low band\n");
break;
- case TAS_LMAC_BAND_UHB:
+ case PHY_BAND_6:
pos += scnprintf(pos, endpos - pos,
"Ultra high band\n");
break;
- case TAS_LMAC_BAND_INVALID:
- pos += scnprintf(pos, endpos - pos,
- "INVALID band\n");
- break;
default:
pos += scnprintf(pos, endpos - pos,
"Unsupported band (%d)\n",
@@ -704,11 +698,9 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
pos += scnprintf(pos, endpos - pos, "Dynamic status:\n");
dyn_status = (rsp->tas_status_mac[i].dynamic_status);
- for_each_set_bit(tmp, &dyn_status, sizeof(dyn_status)) {
- if (tmp >= 0 && tmp < TAS_DYNA_STATUS_MAX)
- pos += scnprintf(pos, endpos - pos,
- "\t%s (%d)\n",
- tas_current_status[tmp], tmp);
+ for_each_set_bit(tmp, &dyn_status, TAS_DYNA_STATUS_MAX) {
+ pos += scnprintf(pos, endpos - pos, "\t%s (%d)\n",
+ tas_current_status[tmp], tmp);
}
pos += scnprintf(pos, endpos - pos,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index b26141c30c61..a493ef6bedc3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
@@ -46,107 +46,6 @@ struct iwl_mvm_ftm_iter_data {
u8 *tk;
};
-int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
- u8 *hltk, u32 hltk_len)
-{
- struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn),
- GFP_KERNEL);
- u32 expected_tk_len;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!pasn)
- return -ENOBUFS;
-
- iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
-
- pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
-
- switch (pasn->cipher) {
- case IWL_LOCATION_CIPHER_CCMP_128:
- case IWL_LOCATION_CIPHER_GCMP_128:
- expected_tk_len = WLAN_KEY_LEN_CCMP;
- break;
- case IWL_LOCATION_CIPHER_GCMP_256:
- expected_tk_len = WLAN_KEY_LEN_GCMP_256;
- break;
- default:
- goto out;
- }
-
- /*
- * If associated to this AP and already have security context,
- * the TK is already configured for this station, so it
- * shouldn't be set again here.
- */
- if (vif->cfg.assoc) {
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct ieee80211_bss_conf *link_conf;
- unsigned int link_id;
- struct ieee80211_sta *sta;
- u8 sta_id;
-
- rcu_read_lock();
- for_each_vif_active_link(vif, link_conf, link_id) {
- if (memcmp(addr, link_conf->bssid, ETH_ALEN))
- continue;
-
- sta_id = mvmvif->link[link_id]->ap_sta_id;
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
- if (!IS_ERR_OR_NULL(sta) && sta->mfp)
- expected_tk_len = 0;
- break;
- }
- rcu_read_unlock();
- }
-
- if (tk_len != expected_tk_len ||
- (hltk_len && hltk_len != sizeof(pasn->hltk))) {
- IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n",
- tk_len, hltk_len);
- goto out;
- }
-
- if (!expected_tk_len && !hltk_len) {
- IWL_ERR(mvm, "TK and HLTK not set\n");
- goto out;
- }
-
- memcpy(pasn->addr, addr, sizeof(pasn->addr));
-
- if (hltk_len) {
- memcpy(pasn->hltk, hltk, sizeof(pasn->hltk));
- pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK;
- }
-
- if (tk && tk_len)
- memcpy(pasn->tk, tk, sizeof(pasn->tk));
-
- list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list);
- return 0;
-out:
- kfree(pasn);
- return -EINVAL;
-}
-
-void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr)
-{
- struct iwl_mvm_ftm_pasn_entry *entry, *prev;
-
- lockdep_assert_held(&mvm->mutex);
-
- list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list,
- list) {
- if (memcmp(entry->addr, addr, sizeof(entry->addr)))
- continue;
-
- list_del(&entry->list);
- kfree(entry);
- return;
- }
-}
-
static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
{
struct iwl_mvm_loc_entry *e, *t;
@@ -773,7 +672,11 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
target.bssid = bssid;
target.cipher = cipher;
+ target.tk = NULL;
ieee80211_iter_keys(mvm->hw, vif, iter, &target);
+
+ if (!WARN_ON(!target.tk))
+ memcpy(tk, target.tk, TK_11AZ_LEN);
} else {
memcpy(tk, entry->tk, sizeof(entry->tk));
}
@@ -949,7 +852,7 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
static int
iwl_mvm_ftm_put_target_v10(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry_v10 *target)
+ struct iwl_tof_range_req_ap_entry *target)
{
u32 i2r_max_sts, flags;
int ret;
@@ -1021,7 +924,7 @@ static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
- struct iwl_tof_range_req_cmd_v14 cmd;
+ struct iwl_tof_range_req_cmd cmd;
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
@@ -1035,7 +938,7 @@ static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm,
for (i = 0; i < cmd.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
- struct iwl_tof_range_req_ap_entry_v10 *target = &cmd.ap[i];
+ struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i];
err = iwl_mvm_ftm_put_target_v10(mvm, vif, peer, target);
if (err)
@@ -1301,7 +1204,7 @@ static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
static void
iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm,
- struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap)
+ struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap)
{
struct iwl_mvm_ftm_pasn_entry *entry;
@@ -1339,7 +1242,7 @@ static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len)
switch (ver) {
case 9:
case 8:
- return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8);
+ return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy);
case 7:
return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7);
case 6:
@@ -1359,7 +1262,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data;
- struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data;
+ struct iwl_tof_range_rsp_ntfy *fw_resp_v8 = (void *)pkt->data;
int i;
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
@@ -1395,9 +1298,9 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n",
mvm->ftm_initiator.req->cookie, num_of_aps);
- for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) {
+ for (i = 0; i < num_of_aps && i < IWL_TOF_MAX_APS; i++) {
struct cfg80211_pmsr_result result = {};
- struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap;
+ struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap;
int peer_idx;
if (new_api) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index e6e468e81ab3..83f6e508a094 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -324,92 +324,6 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
kfree(sta);
}
-int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
- u8 *hltk, u32 hltk_len)
-{
- int ret;
- struct iwl_mvm_pasn_sta *sta = NULL;
- struct iwl_mvm_pasn_hltk_data hltk_data = {
- .addr = addr,
- .hltk = hltk,
- };
- struct iwl_mvm_pasn_hltk_data *hltk_data_ptr = NULL;
-
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
- 2);
-
- lockdep_assert_held(&mvm->mutex);
-
- if (cmd_ver < 3) {
- IWL_ERR(mvm, "Adding PASN station not supported by FW\n");
- return -EOPNOTSUPP;
- }
-
- if ((!hltk || !hltk_len) && (!tk || !tk_len)) {
- IWL_ERR(mvm, "TK and HLTK not set\n");
- return -EINVAL;
- }
-
- if (hltk && hltk_len) {
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT)) {
- IWL_ERR(mvm, "No support for secure LTF measurement\n");
- return -EINVAL;
- }
-
- hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher);
- if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) {
- IWL_ERR(mvm, "invalid cipher: %u\n", cipher);
- return -EINVAL;
- }
-
- hltk_data_ptr = &hltk_data;
- }
-
- if (tk && tk_len) {
- sta = kzalloc(sizeof(*sta) + tk_len, GFP_KERNEL);
- if (!sta)
- return -ENOBUFS;
-
- ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr,
- cipher, tk, tk_len, &sta->keyconf);
- if (ret) {
- kfree(sta);
- return ret;
- }
-
- memcpy(sta->addr, addr, ETH_ALEN);
- list_add_tail(&sta->list, &mvm->resp_pasn_list);
- }
-
- ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, NULL, hltk_data_ptr);
- if (ret && sta)
- iwl_mvm_resp_del_pasn_sta(mvm, vif, sta);
-
- return ret;
-}
-
-int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, u8 *addr)
-{
- struct iwl_mvm_pasn_sta *sta, *prev;
-
- lockdep_assert_held(&mvm->mutex);
-
- list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list) {
- if (!memcmp(sta->addr, addr, ETH_ALEN)) {
- iwl_mvm_resp_del_pasn_sta(mvm, vif, sta);
- return 0;
- }
- }
-
- IWL_ERR(mvm, "FTM: PASN station %pM not found\n", addr);
- return -EINVAL;
-}
-
int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index df49dd2e2026..2b5a62604fc4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -422,6 +422,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
/* if reached this point, Alive notification was received */
iwl_mei_alive_notif(true);
+ iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
+
ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait,
&mvm->fw->ucode_capa);
if (ret) {
@@ -430,8 +432,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return ret;
}
- iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
-
/*
* Note: all the queues are enabled as part of the interface
* initialization, but in firmware restart scenarios they
@@ -1094,22 +1094,6 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
return iwl_mvm_ppag_send_cmd(mvm);
}
-static bool
-iwl_mvm_add_to_tas_block_list(u16 *list, u8 *size, u16 mcc)
-{
- /* Verify that there is room for another country */
- if (*size >= IWL_WTAS_BLACK_LIST_MAX)
- return false;
-
- for (u8 i = 0; i < *size; i++) {
- if (list[i] == mcc)
- return true;
- }
-
- list[*size++] = mcc;
- return true;
-}
-
static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
@@ -1150,10 +1134,10 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
- if ((!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ if ((!iwl_add_mcc_to_tas_block_list(data.block_list_array,
&data.block_list_size,
IWL_MCC_US)) ||
- (!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ (!iwl_add_mcc_to_tas_block_list(data.block_list_array,
&data.block_list_size,
IWL_MCC_CANADA))) {
IWL_DEBUG_RADIO(mvm,
@@ -1213,38 +1197,6 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
-static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
-{
- u32 value = 0;
- /* default behaviour is disabled */
- bool bios_enable_rfi = false;
- int ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_RFI_CONFIG, &value);
-
-
- if (ret < 0) {
- IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret);
- return bios_enable_rfi;
- }
-
- value &= DSM_VALUE_RFI_DISABLE;
- /* RFI BIOS CONFIG value can be 0 or 3 only.
- * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled.
- * 1 and 2 are invalid BIOS configurations, So, it's not possible to
- * disable ddr/dlvr separately.
- */
- if (!value) {
- IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n");
- bios_enable_rfi = true;
- } else if (value == DSM_VALUE_RFI_DISABLE) {
- IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to disable\n");
- } else {
- IWL_DEBUG_RADIO(mvm,
- "DSM RFI got invalid value, value=%d\n", value);
- }
-
- return bios_enable_rfi;
-}
-
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
struct iwl_lari_config_change_cmd cmd;
@@ -1631,7 +1583,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_uats_init(mvm);
if (iwl_rfi_supported(mvm)) {
- if (iwl_mvm_eval_dsm_rfi(mvm))
+ if (iwl_rfi_is_enabled_in_bios(&mvm->fwrt))
iwl_rfi_send_config_cmd(mvm, NULL);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 6b06732441c3..bec18d197f31 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1960,26 +1960,3 @@ void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
ieee80211_channel_switch_disconnect(vif);
rcu_read_unlock();
}
-
-void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_vap_notif *mb = (void *)pkt->data;
- struct ieee80211_vif *vif;
- u32 id = le32_to_cpu(mb->mac_id);
-
- IWL_DEBUG_INFO(mvm,
- "missed_vap notify mac_id=%u, num_beacon_intervals_elapsed=%u, profile_periodicity=%u\n",
- le32_to_cpu(mb->mac_id),
- mb->num_beacon_intervals_elapsed,
- mb->profile_periodicity);
-
- rcu_read_lock();
-
- vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
- if (vif)
- iwl_mvm_connection_loss(mvm, vif, "missed vap beacon");
-
- rcu_read_unlock();
-}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index af6644b7e95f..1e916a0ce082 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -70,7 +70,7 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
};
static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
- .max_peers = IWL_MVM_TOF_MAX_APS,
+ .max_peers = IWL_TOF_MAX_APS,
.report_ap_tsf = 1,
.randomize_mac_addr = 1,
@@ -272,9 +272,10 @@ static const u8 tm_if_types_ext_capa_sta[] = {
__bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \
IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \
__bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY))
-#define IWL_MVM_MLD_CAPA_OPS FIELD_PREP_CONST( \
+#define IWL_MVM_MLD_CAPA_OPS (FIELD_PREP_CONST( \
IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
- IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME)
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \
+ IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT)
static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
{
@@ -4099,6 +4100,20 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
return 0;
}
+void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool update)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!iwl_mvm_has_rlc_offload(mvm))
+ return;
+
+ mvmvif->ps_disabled = !vif->cfg.ps;
+
+ if (update)
+ iwl_mvm_power_update_mac(mvm);
+}
+
/* Common part for MLD and non-MLD modes */
int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -4191,6 +4206,7 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_AUTHORIZED) {
ret = iwl_mvm_sta_state_assoc_to_authorized(mvm, vif, sta,
callbacks);
+ iwl_mvm_smps_workaround(mvm, vif, true);
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
ret = iwl_mvm_sta_state_authorized_to_assoc(mvm, vif, sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 341a2a7a49ec..78d7153a0cfc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022-2024 Intel Corporation
+ * Copyright (C) 2022-2025 Intel Corporation
*/
#include "mvm.h"
@@ -887,6 +887,7 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
}
if (changes & BSS_CHANGED_PS) {
+ iwl_mvm_smps_workaround(mvm, vif, false);
ret = iwl_mvm_power_update_mac(mvm);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index 2f159024eeb8..9dd670041137 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -121,7 +121,7 @@ static int iwl_mvm_add_aux_sta_to_fw(struct iwl_mvm *mvm,
{
int ret;
- struct iwl_mvm_aux_sta_cmd cmd = {
+ struct iwl_aux_sta_cmd cmd = {
.sta_id = cpu_to_le32(sta->sta_id),
.lmac_id = cpu_to_le32(lmac_id),
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index ee769da72e68..f6391c7a3e29 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1310,7 +1310,7 @@ struct iwl_mvm {
struct cfg80211_pmsr_request *req;
struct wireless_dev *req_wdev;
struct list_head loc_list;
- int responses[IWL_MVM_TOF_MAX_APS];
+ int responses[IWL_TOF_MAX_APS];
struct {
struct list_head resp;
} smooth;
@@ -2095,8 +2095,6 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
@@ -2520,12 +2518,6 @@ void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm,
struct ieee80211_bss_conf *bss_conf);
void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
-int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, u8 *addr);
-int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
- u8 *hltk, u32 hltk_len);
void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
@@ -2540,10 +2532,6 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req);
void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm);
void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm);
-int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
- u8 *hltk, u32 hltk_len);
-void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr);
/* TDLS */
@@ -3043,4 +3031,7 @@ iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
bool is_ap);
+
+void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool update);
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 984f407f7027..76603ef02704 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -145,7 +145,7 @@ static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_mvm_esr_mode_notif *notif = (void *)pkt->data;
+ struct iwl_esr_mode_notif *notif = (void *)pkt->data;
struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
/* FW recommendations is only for entering EMLSR */
@@ -495,7 +495,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF,
iwl_mvm_rx_esr_mode_notif,
RX_HANDLER_ASYNC_LOCKED_WIPHY,
- struct iwl_mvm_esr_mode_notif),
+ struct iwl_esr_mode_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index a8c4e354e2ce..068c58e9c1eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1783,7 +1783,7 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
if ((high_tpt != IWL_INVALID_VALUE) &&
(high_tpt > current_tpt)) {
IWL_DEBUG_RATE(mvm,
- "Higher rate is better. Increate rate\n");
+ "Higher rate is better. Increase rate\n");
return RS_ACTION_UPSCALE;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 7a4844ec3c10..78fd7faaed97 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -4311,67 +4311,6 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
}
-int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len,
- struct ieee80211_key_conf *keyconf)
-{
- int ret;
- u16 queue;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif);
- bool mld = iwl_mvm_has_mld_api(mvm->fw);
- u32 type = IWL_STA_LINK;
-
- if (mld)
- type = STATION_TYPE_PEER;
-
- ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
- NL80211_IFTYPE_UNSPECIFIED, type);
- if (ret)
- return ret;
-
- if (mld)
- ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, sta, addr,
- mvmvif->deflink.fw_link_id,
- &queue,
- IWL_MAX_TID_COUNT,
- &wdg_timeout);
- else
- ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id,
- mvmvif->color, addr, sta,
- &queue,
- IWL_MVM_TX_FIFO_BE);
- if (ret)
- goto out;
-
- keyconf->cipher = cipher;
- memcpy(keyconf->key, key, key_len);
- keyconf->keylen = key_len;
- keyconf->flags = IEEE80211_KEY_FLAG_PAIRWISE;
-
- if (mld) {
- /* The MFP flag is set according to the station mfp field. Since
- * we don't have a station, set it manually.
- */
- u32 key_flags =
- iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) |
- IWL_SEC_KEY_FLAG_MFP;
- u32 sta_mask = BIT(sta->sta_id);
-
- ret = iwl_mvm_mld_send_key(mvm, sta_mask, key_flags, keyconf);
- } else {
- ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
- 0, NULL, 0, 0, true);
- }
-
-out:
- if (ret)
- iwl_mvm_dealloc_int_sta(mvm, sta);
- return ret;
-}
-
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 id)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 6856f7440ef3..19c905b641e2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -597,10 +597,6 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
-int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len,
- struct ieee80211_key_conf *key_conf_out);
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index ebfa88b38b71..1a30bb1ff8ca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -771,7 +771,7 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
{
- struct iwl_roc_req roc_cmd = {
+ struct iwl_roc_req_v5 roc_cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.activity = cpu_to_le32(activity),
};
@@ -1102,7 +1102,7 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
{
int res;
u32 duration_tu, delay;
- struct iwl_roc_req roc_req = {
+ struct iwl_roc_req_v5 roc_req = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.activity = cpu_to_le32(activity),
.sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index d92470960b38..c851290e75a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -105,7 +105,7 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_dts_measurement_notif_v2 *notif_v2;
+ struct iwl_dts_measurement_notif *notif_v2;
int len = iwl_rx_packet_payload_len(pkt);
int temp;
u32 ths_crossed;
@@ -506,7 +506,7 @@ static const u32 iwl_mvm_cdev_budgets[] = {
int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
{
- struct iwl_mvm_ctdp_cmd cmd = {
+ struct iwl_ctdp_cmd cmd = {
.operation = cpu_to_le32(op),
.budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]),
.window_size = 0,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 838c426db7f0..8aa7c455bdee 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/dmi.h>
#include "iwl-trans.h"
@@ -137,6 +137,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
if (trans->dsbr_urm_permanent)
control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM;
+ if (trans->ext_32khz_clock_valid)
+ control_flags_ext |= IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID;
+
/* Allocate prph scratch */
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
&trans_pcie->prph_scratch_dma_addr,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index e0b657b2f74b..93446c374008 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -498,7 +498,8 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
/* Ma devices */
{IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
-
+#endif /* CONFIG_IWLMVM */
+#if IS_ENABLED(CONFIG_IWLMLD)
/* Bz devices */
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)},
@@ -543,7 +544,7 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
/* Dr devices */
{IWL_PCI_DEVICE(0x272F, PCI_ANY_ID, iwl_dr_trans_cfg)},
-#endif /* CONFIG_IWLMVM */
+#endif /* CONFIG_IWLMLD */
{0}
};
@@ -551,16 +552,17 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids);
#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
- _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \
+ _rf_id, _rf_step, _bw_limit, _cores, _cdb, _cfg, _name) \
{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
.name = _name, .mac_type = _mac_type, .rf_type = _rf_type, .rf_step = _rf_step, \
- .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
+ .bw_limit = _bw_limit, .cores = _cores, .rf_id = _rf_id, \
.mac_step = _mac_step, .cdb = _cdb, .jacket = IWL_CFG_ANY }
#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
_IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
- IWL_CFG_ANY, _cfg, _name)
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, \
+ _cfg, _name)
VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -723,66 +725,66 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9560_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9270_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9270_name),
_IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9162_160_name),
_IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9162_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9260_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9260_name),
/* Qu with Jf */
@@ -790,132 +792,132 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* Qu with Hr */
@@ -923,202 +925,204 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_b0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_b0_hr_b0, iwl_ax203_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_c0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_c0_hr_b0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_c0_hr_b0, iwl_ax201_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_quz_a0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_quz_a0_hr_b0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_quz_a0_hr_b0, iwl_ax201_name),
/* Ma */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma, iwl_ax201_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_ma, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma, iwl_ax231_name),
/* So with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
/* So-F with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
/* So-F with Gf */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB,
iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* SoF with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
/* So with GF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB,
iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
/* So with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* So with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+#endif /* CONFIG_IWLMVM */
+#if IS_ENABLED(CONFIG_IWLMLD)
/* Bz */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax201_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1130,75 +1134,121 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_wh_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax201_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_fm_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_wh_name),
/* Ga (Gl) */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_gl, iwl_gl_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_gl, iwl_mtp_name),
/* Sc */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc, iwl_sc_name),
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc, iwl_ax211_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc, iwl_fm_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc, iwl_wh_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ 160, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc, iwl_sp_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2, iwl_sc2_name),
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_ax211_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_fm_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_wh_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ 160, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_sp_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2f, iwl_sc2f_name),
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_ax211_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_fm_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_wh_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ 160, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_sp_name),
+
/* Dr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_dr, iwl_dr_name),
/* Br */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_br, iwl_br_name),
-#endif /* CONFIG_IWLMVM */
+#endif /* CONFIG_IWLMLD */
};
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);
@@ -1348,7 +1398,7 @@ out:
VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
- u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step)
+ u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step)
{
int num_devices = ARRAY_SIZE(iwl_dev_info_table);
int i;
@@ -1391,8 +1441,15 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
dev_info->rf_id != rf_id)
continue;
- if (dev_info->no_160 != (u8)IWL_CFG_ANY &&
- dev_info->no_160 != no_160)
+ /*
+ * Check that bw_limit have the same "boolean" value since
+ * IWL_SUBDEVICE_BW_LIM can only return a boolean value and
+ * dev_info->bw_limit encodes a non-boolean value.
+ * dev_info->bw_limit == IWL_CFG_BW_NO_LIM must be equal to
+ * !bw_limit to have a match.
+ */
+ if (dev_info->bw_limit != IWL_CFG_BW_ANY &&
+ (dev_info->bw_limit == IWL_CFG_BW_NO_LIM) == !!bw_limit)
continue;
if (dev_info->cores != (u8)IWL_CFG_ANY &&
@@ -1530,13 +1587,13 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id),
CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id),
IWL_SUBDEVICE_RF_ID(pdev->subsystem_device),
- IWL_SUBDEVICE_NO_160(pdev->subsystem_device),
+ IWL_SUBDEVICE_BW_LIM(pdev->subsystem_device),
IWL_SUBDEVICE_CORES(pdev->subsystem_device),
CSR_HW_RFID_STEP(iwl_trans->hw_rf_id));
if (dev_info) {
iwl_trans->cfg = dev_info->cfg;
iwl_trans->name = dev_info->name;
- iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160;
+ iwl_trans->bw_limit = dev_info->bw_limit;
}
#if IS_ENABLED(CONFIG_IWLMVM)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 793514a1852a..3ece34e30d58 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-prph.h"
@@ -95,7 +95,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
-static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
+void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 7b6071a59b69..7c1dd5cc084a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1869,12 +1869,12 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
unsigned int offset)
{
struct sg_table *sgt;
- unsigned int n_segments;
+ unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1;
+ int orig_nents;
if (WARN_ON_ONCE(skb_has_frag_list(skb)))
return NULL;
- n_segments = DIV_ROUND_UP(skb->len - offset, skb_shinfo(skb)->gso_size);
*hdr = iwl_pcie_get_page_hdr(trans,
hdr_room + __alignof__(struct sg_table) +
sizeof(struct sg_table) +
@@ -1889,11 +1889,12 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
sg_init_table(sgt->sgl, n_segments);
/* Only map the data, not the header (it is copied to the TSO page) */
- sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, offset,
- skb->len - offset);
- if (WARN_ON_ONCE(sgt->orig_nents <= 0))
+ orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset);
+ if (WARN_ON_ONCE(orig_nents <= 0))
return NULL;
+ sgt->orig_nents = orig_nents;
+
/* And map the entire SKB */
if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
return NULL;
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
index d0bda23c628a..7ef5e89c6af2 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
+++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for the iwlwifi device info table
*
- * Copyright (C) 2023-2024 Intel Corporation
+ * Copyright (C) 2023-2025 Intel Corporation
*/
#include <kunit/test.h>
#include <linux/pci.h>
@@ -13,9 +13,9 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di)
{
- printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n",
+ printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,bw_limit=%d,cores=%.2x\n",
pfx, di->device, di->subdevice, di->mac_type, di->mac_step,
- di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160,
+ di->rf_type, di->cdb, di->jacket, di->rf_id, di->bw_limit,
di->cores);
}
@@ -31,8 +31,13 @@ static void devinfo_table_order(struct kunit *test)
di->mac_type, di->mac_step,
di->rf_type, di->cdb,
di->jacket, di->rf_id,
- di->no_160, di->cores, di->rf_step);
- if (ret != di) {
+ di->bw_limit != IWL_CFG_BW_NO_LIM,
+ di->cores, di->rf_step);
+ if (!ret) {
+ iwl_pci_print_dev_info("No entry found for: ", di);
+ KUNIT_FAIL(test,
+ "No entry found for entry at index %d\n", idx);
+ } else if (ret != di) {
iwl_pci_print_dev_info("searched: ", di);
iwl_pci_print_dev_info("found: ", ret);
KUNIT_FAIL(test,
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 5a525da434c2..21fde876bb0d 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -453,46 +453,6 @@ out:
}
/**
- * lbs_get_snmp_mib - Get an SNMP MIB value
- *
- * @priv: A pointer to &struct lbs_private structure
- * @oid: The OID to retrieve from the firmware
- * @out_val: Location for the returned value
- *
- * returns: 0 on success, error on failure
- */
-int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val)
-{
- struct cmd_ds_802_11_snmp_mib cmd;
- int ret;
-
- memset(&cmd, 0, sizeof (cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(CMD_ACT_GET);
- cmd.oid = cpu_to_le16(oid);
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_SNMP_MIB, &cmd);
- if (ret)
- goto out;
-
- switch (le16_to_cpu(cmd.bufsize)) {
- case sizeof(u8):
- *out_val = cmd.value[0];
- break;
- case sizeof(u16):
- *out_val = le16_to_cpu(*((__le16 *)(&cmd.value)));
- break;
- default:
- lbs_deb_cmd("SNMP_CMD: (get) unhandled OID 0x%x size %d\n",
- oid, le16_to_cpu(cmd.bufsize));
- break;
- }
-
-out:
- return ret;
-}
-
-/**
* lbs_get_tx_power - Get the min, max, and current TX power
*
* @priv: A pointer to &struct lbs_private structure
@@ -525,31 +485,6 @@ int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
}
/**
- * lbs_set_tx_power - Set the TX power
- *
- * @priv: A pointer to &struct lbs_private structure
- * @dbm: The desired power level in dBm
- *
- * returns: 0 on success, error on failure
- */
-int lbs_set_tx_power(struct lbs_private *priv, s16 dbm)
-{
- struct cmd_ds_802_11_rf_tx_power cmd;
- int ret;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(CMD_ACT_SET);
- cmd.curlevel = cpu_to_le16(dbm);
-
- lbs_deb_cmd("SET_RF_TX_POWER: %d dBm\n", dbm);
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_RF_TX_POWER, &cmd);
-
- return ret;
-}
-
-/**
* lbs_set_monitor_mode - Enable or disable monitor mode
* (only implemented on OLPC usb8388 FW)
*
@@ -968,10 +903,6 @@ static void lbs_submit_command(struct lbs_private *priv,
}
if (command == CMD_802_11_DEEP_SLEEP) {
- if (priv->is_auto_deep_sleep_enabled) {
- priv->wakeup_dev_required = 1;
- priv->dnld_sent = 0;
- }
priv->is_deep_sleep = 1;
lbs_complete_command(priv, cmdnode, 0);
} else {
@@ -1440,70 +1371,6 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv)
}
-/**
- * lbs_set_tpc_cfg - Configures the transmission power control functionality
- *
- * @priv: A pointer to &struct lbs_private structure
- * @enable: Transmission power control enable
- * @p0: Power level when link quality is good (dBm).
- * @p1: Power level when link quality is fair (dBm).
- * @p2: Power level when link quality is poor (dBm).
- * @usesnr: Use Signal to Noise Ratio in TPC
- *
- * returns: 0 on success
- */
-int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
- int8_t p2, int usesnr)
-{
- struct cmd_ds_802_11_tpc_cfg cmd;
- int ret;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(CMD_ACT_SET);
- cmd.enable = !!enable;
- cmd.usesnr = !!usesnr;
- cmd.P0 = p0;
- cmd.P1 = p1;
- cmd.P2 = p2;
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_TPC_CFG, &cmd);
-
- return ret;
-}
-
-/**
- * lbs_set_power_adapt_cfg - Configures the power adaptation settings
- *
- * @priv: A pointer to &struct lbs_private structure
- * @enable: Power adaptation enable
- * @p0: Power level for 1, 2, 5.5 and 11 Mbps (dBm).
- * @p1: Power level for 6, 9, 12, 18, 22, 24 and 36 Mbps (dBm).
- * @p2: Power level for 48 and 54 Mbps (dBm).
- *
- * returns: 0 on Success
- */
-
-int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
- int8_t p1, int8_t p2)
-{
- struct cmd_ds_802_11_pa_cfg cmd;
- int ret;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(CMD_ACT_SET);
- cmd.enable = !!enable;
- cmd.P0 = p0;
- cmd.P1 = p1;
- cmd.P2 = p2;
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_PA_CFG , &cmd);
-
- return ret;
-}
-
-
struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
@@ -1520,12 +1387,10 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
/* No commands are allowed in Deep Sleep until we toggle the GPIO
* to wake up the card and it has signaled that it's ready.
*/
- if (!priv->is_auto_deep_sleep_enabled) {
- if (priv->is_deep_sleep) {
- lbs_deb_cmd("command not allowed in deep sleep\n");
- cmdnode = ERR_PTR(-EBUSY);
- goto done;
- }
+ if (priv->is_deep_sleep) {
+ lbs_deb_cmd("command not allowed in deep sleep\n");
+ cmdnode = ERR_PTR(-EBUSY);
+ goto done;
}
cmdnode = lbs_get_free_cmd_node(priv);
diff --git a/drivers/net/wireless/marvell/libertas/cmd.h b/drivers/net/wireless/marvell/libertas/cmd.h
index d7be232f5739..a95c2651e67f 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.h
+++ b/drivers/net/wireless/marvell/libertas/cmd.h
@@ -105,19 +105,9 @@ int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
-int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
-
/* Commands only used in wext.c, assoc. and scan.c */
-int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
- int8_t p1, int8_t p2);
-
-int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
- int8_t p2, int usesnr);
-
-int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
-
int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
int lbs_set_host_sleep(struct lbs_private *priv, int host_sleep);
diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
index f2aa659e7714..8393f396eebe 100644
--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
+++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
@@ -279,7 +279,6 @@ void lbs_process_event(struct lbs_private *priv, u32 event)
priv->reset_deep_sleep_wakeup(priv);
lbs_deb_cmd("EVENT: ds awake\n");
priv->is_deep_sleep = 0;
- priv->wakeup_dev_required = 0;
wake_up_interruptible(&priv->ds_awake_q);
break;
diff --git a/drivers/net/wireless/marvell/libertas/decl.h b/drivers/net/wireless/marvell/libertas/decl.h
index c1e0388ef01d..ea69007a2958 100644
--- a/drivers/net/wireless/marvell/libertas/decl.h
+++ b/drivers/net/wireless/marvell/libertas/decl.h
@@ -64,11 +64,7 @@ int lbs_resume(struct lbs_private *priv);
void lbs_queue_event(struct lbs_private *priv, u32 event);
void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
-int lbs_enter_auto_deep_sleep(struct lbs_private *priv);
-int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
-
u32 lbs_fw_index_to_data_rate(u8 index);
-u8 lbs_data_rate_to_fw_index(u32 rate);
int lbs_get_firmware(struct device *dev, u32 card_model,
const struct lbs_fw_table *fw_table,
diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h
index 4b6e05a8e5d5..c4708ce4eb83 100644
--- a/drivers/net/wireless/marvell/libertas/dev.h
+++ b/drivers/net/wireless/marvell/libertas/dev.h
@@ -83,12 +83,8 @@ struct lbs_private {
/* Deep sleep */
int is_deep_sleep;
int deep_sleep_required;
- int is_auto_deep_sleep_enabled;
- int wakeup_dev_required;
int is_activity_detected;
- int auto_deep_sleep_timeout; /* in ms */
wait_queue_head_t ds_awake_q;
- struct timer_list auto_deepsleep_timer;
/* Host sleep*/
int is_host_sleep_configured;
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 78e8b5aecec0..017e5c6bbade 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -79,26 +79,6 @@ u32 lbs_fw_index_to_data_rate(u8 idx)
return fw_data_rates[idx];
}
-/**
- * lbs_data_rate_to_fw_index - use rate to get the index
- *
- * @rate: data rate
- * returns: index or 0
- */
-u8 lbs_data_rate_to_fw_index(u32 rate)
-{
- u8 i;
-
- if (!rate)
- return 0;
-
- for (i = 0; i < sizeof(fw_data_rates); i++) {
- if (rate == fw_data_rates[i])
- return i;
- }
- return 0;
-}
-
int lbs_set_iface_type(struct lbs_private *priv, enum nl80211_iftype type)
{
int ret = 0;
@@ -276,8 +256,7 @@ void lbs_host_to_card_done(struct lbs_private *priv)
/* Wake main thread if commands are pending */
if (!priv->cur_cmd || priv->tx_pending_len > 0) {
- if (!priv->wakeup_dev_required)
- wake_up(&priv->waitq);
+ wake_up(&priv->waitq);
}
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -468,8 +447,7 @@ static int lbs_thread(void *data)
shouldsleep = 0; /* We have a command response */
else if (priv->cur_cmd)
shouldsleep = 1; /* Can't send a command; one already running */
- else if (!list_empty(&priv->cmdpendingq) &&
- !(priv->wakeup_dev_required))
+ else if (!list_empty(&priv->cmdpendingq))
shouldsleep = 0; /* We have a command to send */
else if (kfifo_len(&priv->event_fifo))
shouldsleep = 0; /* We have an event to process */
@@ -536,14 +514,6 @@ static int lbs_thread(void *data)
}
spin_unlock_irq(&priv->driver_lock);
- if (priv->wakeup_dev_required) {
- lbs_deb_thread("Waking up device...\n");
- /* Wake up device */
- if (priv->exit_deep_sleep(priv))
- lbs_deb_thread("Wakeup device failed\n");
- continue;
- }
-
/* command timeout stuff */
if (priv->cmd_timed_out && priv->cur_cmd) {
struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
@@ -626,7 +596,6 @@ static int lbs_thread(void *data)
del_timer(&priv->command_timer);
del_timer(&priv->tx_lockup_timer);
- del_timer(&priv->auto_deepsleep_timer);
return 0;
}
@@ -773,55 +742,6 @@ static void lbs_tx_lockup_handler(struct timer_list *t)
spin_unlock_irqrestore(&priv->driver_lock, flags);
}
-/**
- * auto_deepsleep_timer_fn - put the device back to deep sleep mode when
- * timer expires and no activity (command, event, data etc.) is detected.
- * @t: Context from which to retrieve a &struct lbs_private pointer
- * returns: N/A
- */
-static void auto_deepsleep_timer_fn(struct timer_list *t)
-{
- struct lbs_private *priv = from_timer(priv, t, auto_deepsleep_timer);
-
- if (priv->is_activity_detected) {
- priv->is_activity_detected = 0;
- } else {
- if (priv->is_auto_deep_sleep_enabled &&
- (!priv->wakeup_dev_required) &&
- (priv->connect_status != LBS_CONNECTED)) {
- struct cmd_header cmd;
-
- lbs_deb_main("Entering auto deep sleep mode...\n");
- memset(&cmd, 0, sizeof(cmd));
- cmd.size = cpu_to_le16(sizeof(cmd));
- lbs_cmd_async(priv, CMD_802_11_DEEP_SLEEP, &cmd,
- sizeof(cmd));
- }
- }
- mod_timer(&priv->auto_deepsleep_timer , jiffies +
- (priv->auto_deep_sleep_timeout * HZ)/1000);
-}
-
-int lbs_enter_auto_deep_sleep(struct lbs_private *priv)
-{
- priv->is_auto_deep_sleep_enabled = 1;
- if (priv->is_deep_sleep)
- priv->wakeup_dev_required = 1;
- mod_timer(&priv->auto_deepsleep_timer ,
- jiffies + (priv->auto_deep_sleep_timeout * HZ)/1000);
-
- return 0;
-}
-
-int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
-{
- priv->is_auto_deep_sleep_enabled = 0;
- priv->auto_deep_sleep_timeout = 0;
- del_timer(&priv->auto_deepsleep_timer);
-
- return 0;
-}
-
static int lbs_init_adapter(struct lbs_private *priv)
{
int ret;
@@ -835,9 +755,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
priv->psmode = LBS802_11POWERMODECAM;
priv->psstate = PS_STATE_FULL_POWER;
priv->is_deep_sleep = 0;
- priv->is_auto_deep_sleep_enabled = 0;
priv->deep_sleep_required = 0;
- priv->wakeup_dev_required = 0;
init_waitqueue_head(&priv->ds_awake_q);
init_waitqueue_head(&priv->scan_q);
priv->authtype_auto = 1;
@@ -849,7 +767,6 @@ static int lbs_init_adapter(struct lbs_private *priv)
timer_setup(&priv->command_timer, lbs_cmd_timeout_handler, 0);
timer_setup(&priv->tx_lockup_timer, lbs_tx_lockup_handler, 0);
- timer_setup(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn, 0);
INIT_LIST_HEAD(&priv->cmdfreeq);
INIT_LIST_HEAD(&priv->cmdpendingq);
@@ -883,7 +800,6 @@ static void lbs_free_adapter(struct lbs_private *priv)
kfifo_free(&priv->event_fifo);
del_timer(&priv->command_timer);
del_timer(&priv->tx_lockup_timer);
- del_timer(&priv->auto_deepsleep_timer);
}
static const struct net_device_ops lbs_netdev_ops = {
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 66f0f5377ac1..738bafc3749b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -403,12 +403,14 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
bss_desc->bcn_ht_oper->ht_param &
- IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
+ IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) {
+ chan_list->chan_scan_param[0].radio_type |=
+ CHAN_BW_40MHZ << 2;
SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
radio_type,
(bss_desc->bcn_ht_oper->ht_param &
IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
-
+ }
*buffer += struct_size(chan_list, chan_scan_param, 1);
ret_len += struct_size(chan_list, chan_scan_param, 1);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index d39092b99212..d7fd79214bcf 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -150,7 +150,7 @@ static const u16 ac_mcs_rate_nss2[8][10] = {
struct region_code_mapping {
u8 code;
- u8 region[IEEE80211_COUNTRY_STRING_LEN];
+ u8 region[IEEE80211_COUNTRY_STRING_LEN] __nonstring;
};
static struct region_code_mapping region_code_mapping_t[] = {
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 4a96281792cc..91458f3bd14a 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -454,6 +454,11 @@ enum mwifiex_channel_flags {
#define HostCmd_RET_BIT 0x8000
#define HostCmd_ACT_GEN_GET 0x0000
#define HostCmd_ACT_GEN_SET 0x0001
+#define HOST_CMD_ACT_GEN_SET 0x0001
+/* Add this non-CamelCase-style macro to comply with checkpatch requirements.
+ * This macro will eventually replace all existing CamelCase-style macros in
+ * the future for consistency.
+ */
#define HostCmd_ACT_GEN_REMOVE 0x0004
#define HostCmd_ACT_BITWISE_SET 0x0002
#define HostCmd_ACT_BITWISE_CLR 0x0003
@@ -2352,6 +2357,14 @@ struct host_cmd_ds_add_station {
u8 tlv[];
} __packed;
+#define MWIFIEX_CFG_TYPE_CAL 0x2
+
+struct host_cmd_ds_802_11_cfg_data {
+ __le16 action;
+ __le16 type;
+ __le16 data_len;
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -2431,6 +2444,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_pkt_aggr_ctrl pkt_aggr_ctrl;
struct host_cmd_ds_sta_configure sta_cfg;
struct host_cmd_ds_add_station sta_info;
+ struct host_cmd_ds_802_11_cfg_data cfg_data;
} params;
} __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 855019fe5485..b07cb302a00c 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -54,7 +54,7 @@ const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
* proper cleanup before exiting.
*/
static int mwifiex_register(void *card, struct device *dev,
- struct mwifiex_if_ops *if_ops, void **padapter)
+ const struct mwifiex_if_ops *if_ops, void **padapter)
{
struct mwifiex_adapter *adapter;
int i;
@@ -691,10 +691,6 @@ err_dnld_fw:
init_failed = true;
done:
- if (adapter->cal_data) {
- release_firmware(adapter->cal_data);
- adapter->cal_data = NULL;
- }
if (adapter->firmware) {
release_firmware(adapter->firmware);
adapter->firmware = NULL;
@@ -1713,7 +1709,7 @@ err_exit:
*/
int
mwifiex_add_card(void *card, struct completion *fw_done,
- struct mwifiex_if_ops *if_ops, u8 iface_type,
+ const struct mwifiex_if_ops *if_ops, u8 iface_type,
struct device *dev)
{
struct mwifiex_adapter *adapter;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 0674dcf7a537..63f1c900e096 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1470,7 +1470,7 @@ int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
u32 func_init_shutdown);
int mwifiex_add_card(void *card, struct completion *fw_done,
- struct mwifiex_if_ops *if_ops, u8 iface_type,
+ const struct mwifiex_if_ops *if_ops, u8 iface_type,
struct device *dev);
int mwifiex_remove_card(struct mwifiex_adapter *adapter);
@@ -1571,8 +1571,6 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
struct cfg80211_chan_def chandef);
int mwifiex_config_start_uap(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg);
-void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
- struct mwifiex_sta_node *node);
void mwifiex_config_uap_11d(struct mwifiex_private *priv,
struct cfg80211_beacon_data *beacon_data);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 5f997becdbaa..e11458fd4d50 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -21,7 +21,7 @@
#define PCIE_VERSION "1.0"
#define DRV_NAME "Marvell mwifiex PCIe"
-static struct mwifiex_if_ops pcie_ops;
+static const struct mwifiex_if_ops pcie_ops;
static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
.cmd_addr_lo = PCIE_SCRATCH_0_REG,
@@ -3240,7 +3240,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
mwifiex_pcie_free_buffers(adapter);
}
-static struct mwifiex_if_ops pcie_ops = {
+static const struct mwifiex_if_ops pcie_ops = {
.init_if = mwifiex_init_pcie,
.cleanup_if = mwifiex_cleanup_pcie,
.check_fw_status = mwifiex_check_fw_status,
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 490ffd981164..c1fe48448839 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -21,7 +21,7 @@
static void mwifiex_sdio_work(struct work_struct *work);
-static struct mwifiex_if_ops sdio_ops;
+static const struct mwifiex_if_ops sdio_ops;
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
.start_rd_port = 1,
@@ -3167,7 +3167,7 @@ static void mwifiex_sdio_up_dev(struct mwifiex_adapter *adapter)
dev_err(&card->func->dev, "error enabling SDIO port\n");
}
-static struct mwifiex_if_ops sdio_ops = {
+static const struct mwifiex_if_ops sdio_ops = {
.init_if = mwifiex_init_sdio,
.cleanup_if = mwifiex_cleanup_sdio,
.check_fw_status = mwifiex_check_fw_status,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index e2800a831c8e..c4689f5a1acc 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1507,6 +1507,7 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
u32 len;
u8 *data = (u8 *)cmd + S_DS_GEN;
int ret;
+ struct host_cmd_ds_802_11_cfg_data *pcfg_data;
if (prop) {
len = prop->length;
@@ -1514,12 +1515,20 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
data, len);
if (ret)
return ret;
+
+ cmd->size = cpu_to_le16(S_DS_GEN + len);
mwifiex_dbg(adapter, INFO,
"download cfg_data from device tree: %s\n",
prop->name);
} else if (adapter->cal_data->data && adapter->cal_data->size > 0) {
len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data,
- adapter->cal_data->size, data);
+ adapter->cal_data->size,
+ data + sizeof(*pcfg_data));
+ pcfg_data = &cmd->params.cfg_data;
+ pcfg_data->action = cpu_to_le16(HOST_CMD_ACT_GEN_SET);
+ pcfg_data->type = cpu_to_le16(MWIFIEX_CFG_TYPE_CAL);
+ pcfg_data->data_len = cpu_to_le16(len);
+ cmd->size = cpu_to_le16(S_DS_GEN + sizeof(*pcfg_data) + len);
mwifiex_dbg(adapter, INFO,
"download cfg_data from config file\n");
} else {
@@ -1527,7 +1536,6 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
}
cmd->command = cpu_to_le16(HostCmd_CMD_CFG_DATA);
- cmd->size = cpu_to_le16(S_DS_GEN + len);
return 0;
}
@@ -2293,9 +2301,13 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
"marvell,caldata");
}
- if (adapter->cal_data)
+ if (adapter->cal_data) {
mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
HostCmd_ACT_GEN_SET, 0, NULL, true);
+ release_firmware(adapter->cal_data);
+ adapter->cal_data = NULL;
+ }
+
/* Read MAC address from HW */
ret = mwifiex_send_cmd(priv, HostCmd_CMD_GET_HW_SPEC,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index 58ef5020a46a..245cb99a3daa 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -325,19 +325,3 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
return 0;
}
-
-/* This function deletes station entry from associated station list.
- * Also if both AP and STA are 11n enabled, RxReorder tables and TxBA stream
- * tables created for this station are deleted.
- */
-void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
- struct mwifiex_sta_node *node)
-{
- if (priv->ap_11n_enabled && node->is_11n_enabled) {
- mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, node->mac_addr);
- mwifiex_del_tx_ba_stream_tbl_by_ra(priv, node->mac_addr);
- }
- mwifiex_del_sta_entry(priv, node->mac_addr);
-
- return;
-}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 6085cd50970d..3034c4405cb5 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -10,7 +10,7 @@
#define USB_VERSION "1.0"
-static struct mwifiex_if_ops usb_ops;
+static const struct mwifiex_if_ops usb_ops;
static const struct usb_device_id mwifiex_usb_table[] = {
/* 8766 */
@@ -1585,7 +1585,7 @@ mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
return 0;
}
-static struct mwifiex_if_ops usb_ops = {
+static const struct mwifiex_if_ops usb_ops = {
.register_dev = mwifiex_register_dev,
.unregister_dev = mwifiex_unregister_dev,
.wakeup = mwifiex_pm_wakeup_card,
diff --git a/drivers/net/wireless/mediatek/mt76/channel.c b/drivers/net/wireless/mediatek/mt76/channel.c
index 6a35c6ebd823..e7b839e74290 100644
--- a/drivers/net/wireless/mediatek/mt76/channel.c
+++ b/drivers/net/wireless/mediatek/mt76/channel.c
@@ -293,6 +293,7 @@ struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy,
kfree(mlink);
return ERR_PTR(ret);
}
+ rcu_assign_pointer(mvif->offchannel_link, mlink);
return mlink;
}
@@ -301,10 +302,12 @@ void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct mt76_vif_link *mlink)
{
struct mt76_dev *dev = phy->dev;
+ struct mt76_vif_data *mvif = mlink->mvif;
if (IS_ERR_OR_NULL(mlink) || !mlink->offchannel)
return;
+ rcu_assign_pointer(mvif->offchannel_link, NULL);
dev->drv->vif_link_remove(phy, vif, &vif->bss_conf, mlink);
kfree(mlink);
}
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 0bc66cc19acd..443517d06c9f 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -95,6 +95,10 @@ int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int l
#ifdef CONFIG_NL80211_TESTMODE
dev->test_mtd.name = devm_kstrdup(dev->dev, part, GFP_KERNEL);
+ if (!dev->test_mtd.name) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
dev->test_mtd.offset = offset;
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 508b472408c2..b88d7e10742e 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -816,8 +816,8 @@ void mt76_free_device(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_free_device);
-static struct mt76_phy *
-mt76_vif_phy(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_chanctx *ctx;
@@ -831,6 +831,7 @@ mt76_vif_phy(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
return ctx->phy;
}
+EXPORT_SYMBOL_GPL(mt76_vif_phy);
static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
{
@@ -1697,6 +1698,17 @@ void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
}
EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
+s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower)
+{
+ int n_chains = hweight16(phy->chainmask);
+
+ txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2);
+ txpower -= mt76_tx_power_nss_delta(n_chains);
+
+ return txpower;
+}
+EXPORT_SYMBOL_GPL(mt76_get_power_bound);
+
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned int link_id, int *dbm)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 132148f7b107..d7cd467b812f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -351,6 +351,7 @@ struct mt76_wcid {
u8 hw_key_idx;
u8 hw_key_idx2;
+ u8 offchannel:1;
u8 sta:1;
u8 sta_disabled:1;
u8 amsdu:1;
@@ -491,6 +492,7 @@ struct mt76_hw_cap {
#define MT_DRV_RX_DMA_HDR BIT(3)
#define MT_DRV_HW_MGMT_TXQ BIT(4)
#define MT_DRV_AMSDU_OFFLOAD BIT(5)
+#define MT_DRV_IGNORE_TXS_FAILED BIT(6)
struct mt76_driver_ops {
u32 drv_flags;
@@ -769,6 +771,7 @@ struct mt76_testmode_data {
struct mt76_vif_link {
u8 idx;
+ u8 link_idx;
u8 omac_idx;
u8 band_idx;
u8 wmm_idx;
@@ -786,6 +789,7 @@ struct mt76_vif_link {
struct mt76_vif_data {
struct mt76_vif_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct mt76_vif_link __rcu *offchannel_link;
struct mt76_phy *roc_phy;
u16 valid_links;
@@ -1224,6 +1228,8 @@ struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
u8 band_idx);
int mt76_register_phy(struct mt76_phy *phy, bool vht,
struct ieee80211_rate *rates, int n_rates);
+struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy,
const struct file_operations *ops);
@@ -1482,6 +1488,8 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int mt76_get_min_avg_rssi(struct mt76_dev *dev, u8 phy_idx);
+s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower);
+
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned int link_id, int *dbm);
int mt76_init_sar_power(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index db0c29e65185..487ad716f872 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -314,6 +314,9 @@ enum tx_frag_idx {
#define MT_TXFREE_INFO_COUNT GENMASK(27, 24)
#define MT_TXFREE_INFO_STAT GENMASK(29, 28)
+#define MT_TXS_HDR_SIZE 4 /* Unit: DW */
+#define MT_TXS_SIZE 12 /* Unit: DW */
+
#define MT_TXS0_BW GENMASK(31, 29)
#define MT_TXS0_TID GENMASK(28, 26)
#define MT_TXS0_AMPDU BIT(25)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index f30cf9e71610..bafcf5a279e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -391,7 +391,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
if (vif->type == NL80211_IFTYPE_STATION &&
- !is_zero_ether_addr(link_conf->bssid)) {
+ link_conf && !is_zero_ether_addr(link_conf->bssid)) {
memcpy(basic->peer_addr, link_conf->bssid, ETH_ALEN);
basic->aid = cpu_to_le16(vif->cfg.aid);
} else {
@@ -1168,7 +1168,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
.tag = cpu_to_le16(DEV_INFO_ACTIVE),
.len = cpu_to_le16(sizeof(struct req_tlv)),
.active = enable,
- .link_idx = mvif->idx,
+ .link_idx = mvif->link_idx,
},
};
struct {
@@ -1191,7 +1191,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
.bmc_tx_wlan_idx = cpu_to_le16(wcid->idx),
.sta_idx = cpu_to_le16(wcid->idx),
.conn_state = 1,
- .link_idx = mvif->idx,
+ .link_idx = mvif->link_idx,
},
};
int err, idx, cmd, len;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index b456ccd00d58..11c16d1fc70f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -156,7 +156,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
.drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
- MT_DRV_SW_RX_AIRTIME,
+ MT_DRV_SW_RX_AIRTIME |
+ MT_DRV_IGNORE_TXS_FAILED,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.set_channel = mt76x0_set_channel,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index b031c500b741..90e5666c0857 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -214,7 +214,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
const struct usb_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
- .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .drv_flags = MT_DRV_SW_RX_AIRTIME |
+ MT_DRV_IGNORE_TXS_FAILED,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.set_channel = mt76x0_set_channel,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 0e1ede9314d8..4840d0b500b3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -264,8 +264,8 @@ void mt76x02u_init_beacon_config(struct mt76x02_dev *dev)
};
dev->beacon_ops = &beacon_ops;
- hrtimer_init(&dev->pre_tbtt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dev->pre_tbtt_timer.function = mt76x02u_pre_tbtt_interrupt;
+ hrtimer_setup(&dev->pre_tbtt_timer, mt76x02u_pre_tbtt_interrupt, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
INIT_WORK(&dev->pre_tbtt_work, mt76x02u_pre_tbtt_work);
mt76x02_init_beacon_config(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 727bfdd00b40..2303019670e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -22,7 +22,8 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
.drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
- MT_DRV_SW_RX_AIRTIME,
+ MT_DRV_SW_RX_AIRTIME |
+ MT_DRV_IGNORE_TXS_FAILED,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.set_channel = mt76x2e_set_channel,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index e832ad53e239..84ef80ab4afb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -22,6 +22,7 @@ static const struct usb_device_id mt76x2u_device_table[] = {
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
{ USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
{ USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */
+ { USB_DEVICE(0x2357, 0x0137) }, /* TP-Link TL-WDN6200 */
{ },
};
@@ -29,7 +30,8 @@ static int mt76x2u_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
- .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .drv_flags = MT_DRV_SW_RX_AIRTIME |
+ MT_DRV_IGNORE_TXS_FAILED,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.set_channel = mt76x2u_set_channel,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 578013884e43..192e8eff970b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -303,9 +303,9 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data)
phy->mib.dl_vht_3mu_cnt,
phy->mib.dl_vht_4mu_cnt);
- sub_total_cnt = phy->mib.dl_vht_2mu_cnt +
- phy->mib.dl_vht_3mu_cnt +
- phy->mib.dl_vht_4mu_cnt;
+ sub_total_cnt = (u64)phy->mib.dl_vht_2mu_cnt +
+ phy->mib.dl_vht_3mu_cnt +
+ phy->mib.dl_vht_4mu_cnt;
seq_printf(file, "\nTotal non-HE MU-MIMO DL PPDU count: %lld",
sub_total_cnt);
@@ -353,26 +353,27 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data)
phy->mib.dl_he_9to16ru_cnt,
phy->mib.dl_he_gtr16ru_cnt);
- sub_total_cnt = phy->mib.dl_he_2mu_cnt +
- phy->mib.dl_he_3mu_cnt +
- phy->mib.dl_he_4mu_cnt;
+ sub_total_cnt = (u64)phy->mib.dl_he_2mu_cnt +
+ phy->mib.dl_he_3mu_cnt +
+ phy->mib.dl_he_4mu_cnt;
total_ppdu_cnt = sub_total_cnt;
seq_printf(file, "\nTotal HE MU-MIMO DL PPDU count: %lld",
sub_total_cnt);
- sub_total_cnt = phy->mib.dl_he_2ru_cnt +
- phy->mib.dl_he_3ru_cnt +
- phy->mib.dl_he_4ru_cnt +
- phy->mib.dl_he_5to8ru_cnt +
- phy->mib.dl_he_9to16ru_cnt +
- phy->mib.dl_he_gtr16ru_cnt;
+ sub_total_cnt = (u64)phy->mib.dl_he_2ru_cnt +
+ phy->mib.dl_he_3ru_cnt +
+ phy->mib.dl_he_4ru_cnt +
+ phy->mib.dl_he_5to8ru_cnt +
+ phy->mib.dl_he_9to16ru_cnt +
+ phy->mib.dl_he_gtr16ru_cnt;
total_ppdu_cnt += sub_total_cnt;
seq_printf(file, "\nTotal HE OFDMA DL PPDU count: %lld",
sub_total_cnt);
- total_ppdu_cnt += phy->mib.dl_he_su_cnt + phy->mib.dl_he_ext_su_cnt;
+ total_ppdu_cnt += (u64)phy->mib.dl_he_su_cnt +
+ phy->mib.dl_he_ext_su_cnt;
seq_printf(file, "\nAll HE DL PPDU count: %lld", total_ppdu_cnt);
@@ -404,20 +405,20 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data)
phy->mib.ul_hetrig_9to16ru_cnt,
phy->mib.ul_hetrig_gtr16ru_cnt);
- sub_total_cnt = phy->mib.ul_hetrig_2mu_cnt +
- phy->mib.ul_hetrig_3mu_cnt +
- phy->mib.ul_hetrig_4mu_cnt;
+ sub_total_cnt = (u64)phy->mib.ul_hetrig_2mu_cnt +
+ phy->mib.ul_hetrig_3mu_cnt +
+ phy->mib.ul_hetrig_4mu_cnt;
total_ppdu_cnt = sub_total_cnt;
seq_printf(file, "\nTotal HE MU-MIMO UL TB PPDU count: %lld",
sub_total_cnt);
- sub_total_cnt = phy->mib.ul_hetrig_2ru_cnt +
- phy->mib.ul_hetrig_3ru_cnt +
- phy->mib.ul_hetrig_4ru_cnt +
- phy->mib.ul_hetrig_5to8ru_cnt +
- phy->mib.ul_hetrig_9to16ru_cnt +
- phy->mib.ul_hetrig_gtr16ru_cnt;
+ sub_total_cnt = (u64)phy->mib.ul_hetrig_2ru_cnt +
+ phy->mib.ul_hetrig_3ru_cnt +
+ phy->mib.ul_hetrig_4ru_cnt +
+ phy->mib.ul_hetrig_5to8ru_cnt +
+ phy->mib.ul_hetrig_9to16ru_cnt +
+ phy->mib.ul_hetrig_gtr16ru_cnt;
total_ppdu_cnt += sub_total_cnt;
seq_printf(file, "\nTotal HE OFDMA UL TB PPDU count: %lld",
@@ -1084,13 +1085,13 @@ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
return -EINVAL;
if (pwr160)
- pwr160 = mt7915_get_power_bound(phy, pwr160);
+ pwr160 = mt76_get_power_bound(mphy, pwr160);
if (pwr80)
- pwr80 = mt7915_get_power_bound(phy, pwr80);
+ pwr80 = mt76_get_power_bound(mphy, pwr80);
if (pwr40)
- pwr40 = mt7915_get_power_bound(phy, pwr40);
+ pwr40 = mt76_get_power_bound(mphy, pwr40);
if (pwr20)
- pwr20 = mt7915_get_power_bound(phy, pwr20);
+ pwr20 = mt76_get_power_bound(mphy, pwr20);
if (pwr160 < 0 || pwr80 < 0 || pwr40 < 0 || pwr20 < 0)
return -EINVAL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 9d790f234e82..3643c72bb68d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -3323,7 +3323,7 @@ int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
if (ret)
return ret;
- txpower = mt7915_get_power_bound(phy, txpower);
+ txpower = mt76_get_power_bound(mphy, txpower);
if (txpower > mphy->txpower_cur || txpower < 0)
return -EINVAL;
@@ -3373,7 +3373,7 @@ int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
int i, idx;
int tx_power;
- tx_power = mt7915_get_power_bound(phy, hw->conf.power_level);
+ tx_power = mt76_get_power_bound(mphy, hw->conf.power_level);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&limits_array, tx_power);
mphy->txpower_cur = tx_power;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 49476a4182fd..092ed504a8f2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -515,16 +515,4 @@ enum {
sizeof(struct bss_info_bmc_rate) +\
sizeof(struct bss_info_ext_bss))
-static inline s8
-mt7915_get_power_bound(struct mt7915_phy *phy, s8 txpower)
-{
- struct mt76_phy *mphy = phy->mt76;
- int n_chains = hweight16(mphy->chainmask);
-
- txpower = mt76_get_sar_power(mphy, mphy->chandef.chan, txpower * 2);
- txpower -= mt76_tx_power_nss_delta(n_chains);
-
- return txpower;
-}
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 13e58c328aff..78b77a54d195 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -811,6 +811,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx;
msta->deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->deflink.last_txs = jiffies;
+ msta->deflink.sta = msta;
ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
index f41ca4248497..63cb08f4d87c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
@@ -58,15 +58,110 @@ static int mt7925_thermal_init(struct mt792x_phy *phy)
return PTR_ERR_OR_ZERO(hwmon);
}
+void mt7925_regd_be_ctrl(struct mt792x_dev *dev, u8 *alpha2)
+{
+ struct mt792x_phy *phy = &dev->phy;
+ struct mt7925_clc_rule_v2 *rule;
+ struct mt7925_clc *clc;
+ bool old = dev->has_eht, new = true;
+ u32 mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, alpha2);
+ u8 *pos;
+
+ if (mtcl_conf != MT792X_ACPI_MTCL_INVALID &&
+ (((mtcl_conf >> 4) & 0x3) == 0)) {
+ new = false;
+ goto out;
+ }
+
+ if (!phy->clc[MT792x_CLC_BE_CTRL])
+ goto out;
+
+ clc = (struct mt7925_clc *)phy->clc[MT792x_CLC_BE_CTRL];
+ pos = clc->data;
+
+ while (1) {
+ rule = (struct mt7925_clc_rule_v2 *)pos;
+
+ if (rule->alpha2[0] == alpha2[0] &&
+ rule->alpha2[1] == alpha2[1]) {
+ new = false;
+ break;
+ }
+
+ /* Check the last one */
+ if (rule->flag && BIT(0))
+ break;
+
+ pos += sizeof(*rule);
+ }
+
+out:
+ if (old == new)
+ return;
+
+ dev->has_eht = new;
+ mt7925_set_stream_he_eht_caps(phy);
+}
+
+static void
+mt7925_regd_channel_update(struct wiphy *wiphy, struct mt792x_dev *dev)
+{
+#define IS_UNII_INVALID(idx, sfreq, efreq, cfreq) \
+ (!(dev->phy.clc_chan_conf & BIT(idx)) && (cfreq) >= (sfreq) && (cfreq) <= (efreq))
+#define MT7925_UNII_59G_IS_VALID 0x1
+#define MT7925_UNII_6G_IS_VALID 0x1e
+ struct ieee80211_supported_band *sband;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct ieee80211_channel *ch;
+ u32 mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, mdev->alpha2);
+ int i;
+
+ if (mtcl_conf != MT792X_ACPI_MTCL_INVALID) {
+ if ((mtcl_conf & 0x3) == 0)
+ dev->phy.clc_chan_conf &= ~MT7925_UNII_59G_IS_VALID;
+ if (((mtcl_conf >> 2) & 0x3) == 0)
+ dev->phy.clc_chan_conf &= ~MT7925_UNII_6G_IS_VALID;
+ }
+
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ /* UNII-4 */
+ if (IS_UNII_INVALID(0, 5845, 5925, ch->center_freq))
+ ch->flags |= IEEE80211_CHAN_DISABLED;
+ }
+
+ sband = wiphy->bands[NL80211_BAND_6GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ /* UNII-5/6/7/8 */
+ if (IS_UNII_INVALID(1, 5925, 6425, ch->center_freq) ||
+ IS_UNII_INVALID(2, 6425, 6525, ch->center_freq) ||
+ IS_UNII_INVALID(3, 6525, 6875, ch->center_freq) ||
+ IS_UNII_INVALID(4, 6875, 7125, ch->center_freq))
+ ch->flags |= IEEE80211_CHAN_DISABLED;
+ }
+}
+
void mt7925_regd_update(struct mt792x_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
struct ieee80211_hw *hw = mdev->hw;
+ struct wiphy *wiphy = hw->wiphy;
if (!dev->regd_change)
return;
mt7925_mcu_set_clc(dev, mdev->alpha2, dev->country_ie_env);
+ mt7925_regd_channel_update(wiphy, dev);
mt7925_mcu_set_channel_domain(hw->priv);
mt7925_set_tx_sar_pwr(hw, NULL);
dev->regd_change = false;
@@ -244,6 +339,7 @@ int mt7925_register_device(struct mt792x_dev *dev)
dev->mt76.tx_worker.fn = mt792x_tx_worker;
INIT_DELAYED_WORK(&dev->pm.ps_work, mt792x_pm_power_save_work);
+ INIT_DELAYED_WORK(&dev->mlo_pm_work, mt7925_mlo_pm_work);
INIT_WORK(&dev->pm.wake_work, mt792x_pm_wake_work);
spin_lock_init(&dev->pm.wake.lock);
mutex_init(&dev->pm.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 98daf80ac131..e79364ac129e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -251,12 +251,12 @@ int mt7925_init_mlo_caps(struct mt792x_phy *phy)
},
};
- if (!(phy->chip_cap & MT792x_CHIP_CAP_MLO_EVT_EN))
+ if (!(phy->chip_cap & MT792x_CHIP_CAP_MLO_EN))
return 0;
ext_capab[0].eml_capabilities = phy->eml_cap;
ext_capab[0].mld_capa_and_ops =
- u16_encode_bits(1, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS);
+ u16_encode_bits(0, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS);
wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
wiphy->iftype_ext_capab = ext_capab;
@@ -360,10 +360,15 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
struct mt76_txq *mtxq;
int idx, ret = 0;
- mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask);
- if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) {
- ret = -ENOSPC;
- goto out;
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ mconf->mt76.idx = MT792x_MAX_INTERFACES;
+ } else {
+ mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask);
+
+ if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) {
+ ret = -ENOSPC;
+ goto out;
+ }
}
mconf->mt76.omac_idx = ieee80211_vif_is_mld(vif) ?
@@ -371,6 +376,7 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
mconf->mt76.band_idx = 0xff;
mconf->mt76.wmm_idx = ieee80211_vif_is_mld(vif) ?
0 : mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
+ mconf->mt76.link_idx = hweight16(mvif->valid_links);
if (mvif->phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ)
mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4;
@@ -421,6 +427,7 @@ mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mvif->bss_conf.vif = mvif;
mvif->sta.vif = mvif;
mvif->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ mvif->mlo_pm_state = MT792x_MLO_LINK_DISASSOC;
ret = mt7925_mac_link_bss_add(dev, &vif->bss_conf, &mvif->sta.deflink);
if (ret < 0)
@@ -1149,7 +1156,12 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
struct mt792x_bss_conf *mconf;
mconf = mt792x_link_conf_to_mconf(link_conf);
- mt792x_mac_link_bss_remove(dev, mconf, mlink);
+
+ if (ieee80211_vif_is_mld(vif))
+ mt792x_mac_link_bss_remove(dev, mconf, mlink);
+ else
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
+ link_sta, false);
}
spin_lock_bh(&mdev->sta_poll_lock);
@@ -1169,6 +1181,31 @@ mt7925_mac_sta_remove_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
struct mt76_wcid *wcid;
unsigned int link_id;
+ /* clean up bss before starec */
+ for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_link_sta *link_sta;
+ struct ieee80211_bss_conf *link_conf;
+ struct mt792x_bss_conf *mconf;
+ struct mt792x_link_sta *mlink;
+
+ link_sta = mt792x_sta_to_link_sta(vif, sta, link_id);
+ if (!link_sta)
+ continue;
+
+ mlink = mt792x_sta_to_link(msta, link_id);
+ if (!mlink)
+ continue;
+
+ link_conf = mt792x_vif_to_bss_conf(vif, link_id);
+ if (!link_conf)
+ continue;
+
+ mconf = mt792x_link_conf_to_mconf(link_conf);
+
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
+ link_sta, false);
+ }
+
for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_link_sta *link_sta;
struct mt792x_link_sta *mlink;
@@ -1206,51 +1243,22 @@ void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- struct {
- struct {
- u8 omac_idx;
- u8 band_idx;
- __le16 pad;
- } __packed hdr;
- struct req_tlv {
- __le16 tag;
- __le16 len;
- u8 active;
- u8 link_idx; /* hw link idx */
- u8 omac_addr[ETH_ALEN];
- } __packed tlv;
- } dev_req = {
- .hdr = {
- .omac_idx = 0,
- .band_idx = 0,
- },
- .tlv = {
- .tag = cpu_to_le16(DEV_INFO_ACTIVE),
- .len = cpu_to_le16(sizeof(struct req_tlv)),
- .active = true,
- },
- };
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
unsigned long rem;
rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0);
mt7925_mac_sta_remove_links(dev, vif, sta, rem);
- if (ieee80211_vif_is_mld(vif)) {
- mt7925_mcu_set_dbdc(&dev->mphy, false);
-
- /* recovery omac address for the legacy interface */
- memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
- mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE),
- &dev_req, sizeof(dev_req), true);
- }
+ if (ieee80211_vif_is_mld(vif))
+ mt7925_mcu_del_dev(mdev, vif);
if (vif->type == NL80211_IFTYPE_STATION) {
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
-
mvif->wep_sta = NULL;
ewma_rssi_init(&mvif->bss_conf.rssi);
}
+
+ mvif->mlo_pm_state = MT792x_MLO_LINK_DISASSOC;
}
EXPORT_SYMBOL_GPL(mt7925_mac_sta_remove);
@@ -1289,22 +1297,22 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn,
params->buf_size);
- mt7925_mcu_uni_rx_ba(dev, vif, params, true);
+ mt7925_mcu_uni_rx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_RX_STOP:
mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
- mt7925_mcu_uni_rx_ba(dev, vif, params, false);
+ mt7925_mcu_uni_rx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
- mt7925_mcu_uni_tx_ba(dev, vif, params, true);
+ mt7925_mcu_uni_tx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
clear_bit(tid, &msta->deflink.wcid.ampdu_state);
- mt7925_mcu_uni_tx_ba(dev, vif, params, false);
+ mt7925_mcu_uni_tx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_START:
set_bit(tid, &msta->deflink.wcid.ampdu_state);
@@ -1313,7 +1321,7 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
clear_bit(tid, &msta->deflink.wcid.ampdu_state);
- mt7925_mcu_uni_tx_ba(dev, vif, params, false);
+ mt7925_mcu_uni_tx_ba(dev, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
@@ -1322,6 +1330,38 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return ret;
}
+static void
+mt7925_mlo_pm_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt792x_dev *dev = priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ unsigned long valid = ieee80211_vif_is_mld(vif) ?
+ mvif->valid_links : BIT(0);
+ struct ieee80211_bss_conf *bss_conf;
+ int i;
+
+ if (mvif->mlo_pm_state != MT792x_MLO_CHANGED_PS)
+ return;
+
+ mt792x_mutex_acquire(dev);
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ mt7925_mcu_uni_bss_ps(dev, bss_conf);
+ }
+ mt792x_mutex_release(dev);
+}
+
+void mt7925_mlo_pm_work(struct work_struct *work)
+{
+ struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
+ mlo_pm_work.work);
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7925_mlo_pm_iter, dev);
+}
+
static bool is_valid_alpha2(const char *alpha2)
{
if (!alpha2)
@@ -1378,6 +1418,8 @@ void mt7925_scan_work(struct work_struct *work)
if (!is_valid_alpha2(evt->alpha2))
break;
+ mt7925_regd_be_ctrl(phy->dev, evt->alpha2);
+
if (mdev->alpha2[0] != '0' && mdev->alpha2[1] != '0')
break;
@@ -1871,6 +1913,9 @@ static void mt7925_vif_cfg_changed(struct ieee80211_hw *hw,
mt7925_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_ASSOC);
mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc);
+
+ if (ieee80211_vif_is_mld(vif))
+ mvif->mlo_pm_state = MT792x_MLO_LINK_ASSOC;
}
if (changed & BSS_CHANGED_ARP_FILTER) {
@@ -1881,9 +1926,19 @@ static void mt7925_vif_cfg_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_PS) {
- for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
- bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ if (hweight16(mvif->valid_links) < 2) {
+ /* legacy */
+ bss_conf = &vif->bss_conf;
mt7925_mcu_uni_bss_ps(dev, bss_conf);
+ } else {
+ if (mvif->mlo_pm_state == MT792x_MLO_LINK_ASSOC) {
+ mvif->mlo_pm_state = MT792x_MLO_CHANGED_PS_PENDING;
+ } else if (mvif->mlo_pm_state == MT792x_MLO_CHANGED_PS) {
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ mt7925_mcu_uni_bss_ps(dev, bss_conf);
+ }
+ }
}
}
@@ -1934,11 +1989,12 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw,
if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
mt7925_mcu_set_tx(dev, info);
- if (changed & BSS_CHANGED_BSSID) {
- if (ieee80211_vif_is_mld(vif) &&
- hweight16(mvif->valid_links) == 2)
- /* Indicate the secondary setup done */
- mt7925_mcu_uni_bss_bcnft(dev, info, true);
+ if (mvif->mlo_pm_state == MT792x_MLO_CHANGED_PS_PENDING) {
+ /* Indicate the secondary setup done */
+ mt7925_mcu_uni_bss_bcnft(dev, info, true);
+
+ ieee80211_queue_delayed_work(hw, &dev->mlo_pm_work, 5 * HZ);
+ mvif->mlo_pm_state = MT792x_MLO_CHANGED_PS;
}
mt792x_mutex_release(dev);
@@ -2022,8 +2078,6 @@ mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto free;
if (mconf != &mvif->bss_conf) {
- mt7925_mcu_set_bss_pm(dev, link_conf, true);
-
err = mt7925_set_mlo_roc(phy, &mvif->bss_conf,
vif->active_links);
if (err < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index 15815ad84713..e61da76b2097 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -348,14 +348,10 @@ mt7925_mcu_handle_hif_ctrl_basic(struct mt792x_dev *dev, struct tlv *tlv)
basic = (struct mt7925_mcu_hif_ctrl_basic_tlv *)tlv;
if (basic->hifsuspend) {
- if (basic->hif_tx_traffic_status == HIF_TRAFFIC_IDLE &&
- basic->hif_rx_traffic_status == HIF_TRAFFIC_IDLE)
- /* success */
- dev->hif_idle = true;
- else
- /* busy */
- /* invalid */
- dev->hif_idle = false;
+ dev->hif_idle = true;
+ if (!(basic->hif_tx_traffic_status == HIF_TRAFFIC_IDLE &&
+ basic->hif_rx_traffic_status == HIF_TRAFFIC_IDLE))
+ dev_info(dev->mt76.dev, "Hif traffic not idle.\n");
} else {
dev->hif_resumed = true;
}
@@ -576,10 +572,10 @@ void mt7925_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb)
static int
mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif,
- struct mt76_wcid *wcid,
struct ieee80211_ampdu_params *params,
bool enable, bool tx)
{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
struct sta_rec_ba_uni *ba;
struct sk_buff *skb;
struct tlv *tlv;
@@ -607,60 +603,76 @@ mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif,
/** starec & wtbl **/
int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
- struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable)
{
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_link_sta *mlink;
- struct mt792x_bss_conf *mconf;
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- struct mt76_wcid *wcid;
- u8 link_id, ret;
-
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- mconf = mt792x_vif_to_link(mvif, link_id);
- mlink = mt792x_sta_to_link(msta, link_id);
- wcid = &mlink->wcid;
-
- if (enable && !params->amsdu)
- mlink->wcid.amsdu = false;
+ struct mt792x_vif *mvif = msta->vif;
- ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
- enable, true);
- if (ret < 0)
- break;
- }
+ if (enable && !params->amsdu)
+ msta->deflink.wcid.amsdu = false;
- return ret;
+ return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
+ enable, true);
}
int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
- struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable)
{
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_link_sta *mlink;
- struct mt792x_bss_conf *mconf;
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- struct mt76_wcid *wcid;
- u8 link_id, ret;
+ struct mt792x_vif *mvif = msta->vif;
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- mconf = mt792x_vif_to_link(mvif, link_id);
- mlink = mt792x_sta_to_link(msta, link_id);
- wcid = &mlink->wcid;
+ return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
+ enable, false);
+}
- ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
- enable, false);
- if (ret < 0)
- break;
- }
+static int mt7925_mcu_read_eeprom(struct mt792x_dev *dev, u32 offset, u8 *val)
+{
+ struct {
+ u8 rsv[4];
- return ret;
+ __le16 tag;
+ __le16 len;
+
+ __le32 addr;
+ __le32 valid;
+ u8 data[MT7925_EEPROM_BLOCK_SIZE];
+ } __packed req = {
+ .tag = cpu_to_le16(1),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .addr = cpu_to_le32(round_down(offset,
+ MT7925_EEPROM_BLOCK_SIZE)),
+ };
+ struct evt {
+ u8 rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ __le32 ver;
+ __le32 addr;
+ __le32 valid;
+ __le32 size;
+ __le32 magic_num;
+ __le32 type;
+ __le32 rsv1[4];
+ u8 data[32];
+ } __packed *res;
+ struct sk_buff *skb;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ res = (struct evt *)skb->data;
+ *val = res->data[offset % MT7925_EEPROM_BLOCK_SIZE];
+
+ dev_kfree_skb(skb);
+
+ return 0;
}
static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
@@ -671,13 +683,21 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
struct mt76_dev *mdev = &dev->mt76;
struct mt792x_phy *phy = &dev->phy;
const struct firmware *fw;
+ u8 *clc_base = NULL, hw_encap = 0;
int ret, i, len, offset = 0;
- u8 *clc_base = NULL;
+ dev->phy.clc_chan_conf = 0xff;
if (mt7925_disable_clc ||
mt76_is_usb(&dev->mt76))
return 0;
+ if (mt76_is_mmio(&dev->mt76)) {
+ ret = mt7925_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap);
+ if (ret)
+ return ret;
+ hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP);
+ }
+
ret = request_firmware(&fw, fw_name, mdev->dev);
if (ret)
return ret;
@@ -722,6 +742,12 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
if (phy->clc[clc->idx])
continue;
+ /* header content sanity */
+ if ((clc->idx == MT792x_CLC_BE_CTRL &&
+ u8_get_bits(clc->t2.type, MT_EE_HW_TYPE_ENCAP) != hw_encap) ||
+ u8_get_bits(clc->t0.type, MT_EE_HW_TYPE_ENCAP) != hw_encap)
+ continue;
+
phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc,
le32_to_cpu(clc->len),
GFP_KERNEL);
@@ -828,7 +854,6 @@ mt7925_mcu_parse_phy_cap(struct mt792x_dev *dev, char *data)
mdev->phy.chainmask = mdev->phy.antenna_mask;
mdev->phy.cap.has_2ghz = cap->hw_path & BIT(WF0_24G);
mdev->phy.cap.has_5ghz = cap->hw_path & BIT(WF0_5G);
- dev->has_eht = cap->eht;
}
static void
@@ -1850,49 +1875,6 @@ mt7925_mcu_sta_mld_tlv(struct sk_buff *skb,
}
}
-static int
-mt7925_mcu_sta_cmd(struct mt76_phy *phy,
- struct mt76_sta_cmd_info *info)
-{
- struct mt76_vif_link *mvif = (struct mt76_vif_link *)info->vif->drv_priv;
- struct mt76_dev *dev = phy->dev;
- struct sk_buff *skb;
- int conn_state;
-
- skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid,
- MT7925_STA_UPDATE_MAX_SIZE);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- conn_state = info->enable ? CONN_STATE_PORT_SECURE :
- CONN_STATE_DISCONNECT;
- if (info->link_sta)
- mt76_connac_mcu_sta_basic_tlv(dev, skb, info->link_conf,
- info->link_sta,
- conn_state, info->newly);
- if (info->link_sta && info->enable) {
- mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta);
- mt7925_mcu_sta_ht_tlv(skb, info->link_sta);
- mt7925_mcu_sta_vht_tlv(skb, info->link_sta);
- mt76_connac_mcu_sta_uapsd(skb, info->vif, info->link_sta->sta);
- mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->link_sta);
- mt7925_mcu_sta_he_tlv(skb, info->link_sta);
- mt7925_mcu_sta_he_6g_tlv(skb, info->link_sta);
- mt7925_mcu_sta_eht_tlv(skb, info->link_sta);
- mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif,
- info->link_sta);
- mt7925_mcu_sta_state_v2_tlv(phy, skb, info->link_sta,
- info->vif, info->rcpi,
- info->state);
- mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta);
- }
-
- if (info->enable)
- mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
-
- return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
-}
-
static void
mt7925_mcu_sta_remove_tlv(struct sk_buff *skb)
{
@@ -1905,8 +1887,8 @@ mt7925_mcu_sta_remove_tlv(struct sk_buff *skb)
}
static int
-mt7925_mcu_mlo_sta_cmd(struct mt76_phy *phy,
- struct mt76_sta_cmd_info *info)
+mt7925_mcu_sta_cmd(struct mt76_phy *phy,
+ struct mt76_sta_cmd_info *info)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
@@ -1920,12 +1902,10 @@ mt7925_mcu_mlo_sta_cmd(struct mt76_phy *phy,
if (IS_ERR(skb))
return PTR_ERR(skb);
- if (info->enable)
+ if (info->enable && info->link_sta) {
mt76_connac_mcu_sta_basic_tlv(dev, skb, info->link_conf,
info->link_sta,
info->enable, info->newly);
-
- if (info->enable && info->link_sta) {
mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta);
mt7925_mcu_sta_ht_tlv(skb, info->link_sta);
mt7925_mcu_sta_vht_tlv(skb, info->link_sta);
@@ -1976,25 +1956,15 @@ int mt7925_mcu_sta_update(struct mt792x_dev *dev,
};
struct mt792x_sta *msta;
struct mt792x_link_sta *mlink;
- int err;
if (link_sta) {
msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
mlink = mt792x_sta_to_link(msta, link_sta->link_id);
}
info.wcid = link_sta ? &mlink->wcid : &mvif->sta.deflink.wcid;
+ info.newly = state != MT76_STA_INFO_STATE_ASSOC;
- if (link_sta)
- info.newly = state != MT76_STA_INFO_STATE_ASSOC;
- else
- info.newly = state == MT76_STA_INFO_STATE_ASSOC ? false : true;
-
- if (ieee80211_vif_is_mld(vif))
- err = mt7925_mcu_mlo_sta_cmd(&dev->mphy, &info);
- else
- err = mt7925_mcu_sta_cmd(&dev->mphy, &info);
-
- return err;
+ return mt7925_mcu_sta_cmd(&dev->mphy, &info);
}
int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
@@ -2559,6 +2529,7 @@ mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif = link_conf->vif;
struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv;
+ struct mt792x_phy *phy = mvif->phy;
struct bss_mld_tlv *mld;
struct tlv *tlv;
bool is_mld;
@@ -2574,8 +2545,13 @@ mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
mld->group_mld_id = is_mld ? mvif->bss_conf.mt76.idx : 0xff;
mld->own_mld_id = mconf->mt76.idx + 32;
mld->remap_idx = 0xff;
- mld->eml_enable = !!(link_conf->vif->cfg.eml_cap &
- IEEE80211_EML_CAP_EMLSR_SUPP);
+
+ if (phy->chip_cap & MT792x_CHIP_CAP_MLO_EML_EN) {
+ mld->eml_enable = !!(link_conf->vif->cfg.eml_cap &
+ IEEE80211_EML_CAP_EMLSR_SUPP);
+ } else {
+ mld->eml_enable = 0;
+ }
memcpy(mld->mac_addr, vif->addr, ETH_ALEN);
}
@@ -2668,6 +2644,62 @@ int mt7925_mcu_set_timing(struct mt792x_phy *phy,
MCU_UNI_CMD(BSS_INFO_UPDATE), true);
}
+void mt7925_mcu_del_dev(struct mt76_dev *mdev,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
+ struct {
+ struct {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 pad;
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 link_idx; /* hw link idx */
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } dev_req = {
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = true,
+ },
+ };
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_bss_basic_tlv basic;
+ } basic_req = {
+ .basic = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
+ .active = true,
+ .conn_state = 1,
+ },
+ };
+
+ dev_req.hdr.omac_idx = mvif->omac_idx;
+ dev_req.hdr.band_idx = mvif->band_idx;
+
+ basic_req.hdr.bss_idx = mvif->idx;
+ basic_req.basic.omac_idx = mvif->omac_idx;
+ basic_req.basic.band_idx = mvif->band_idx;
+ basic_req.basic.link_idx = mvif->link_idx;
+
+ mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE),
+ &basic_req, sizeof(basic_req), true);
+
+ /* recovery omac address for the legacy interface */
+ memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
+ mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE),
+ &dev_req, sizeof(dev_req), true);
+}
+
int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
struct ieee80211_bss_conf *link_conf,
@@ -3155,16 +3187,17 @@ __mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
.idx = idx,
.env = env_cap,
- .acpi_conf = mt792x_acpi_get_flags(&dev->phy),
};
int ret, valid_cnt = 0;
- u8 i, *pos;
+ u8 *pos, *last_pos;
if (!clc)
return 0;
- pos = clc->data + sizeof(*seg) * clc->nr_seg;
- for (i = 0; i < clc->nr_country; i++) {
+ req.ver = clc->ver;
+ pos = clc->data + sizeof(*seg) * clc->t0.nr_seg;
+ last_pos = clc->data + le32_to_cpu(*(__le32 *)(clc->data + 4));
+ while (pos < last_pos) {
struct mt7925_clc_rule *rule = (struct mt7925_clc_rule *)pos;
pos += sizeof(*rule);
@@ -3179,6 +3212,7 @@ __mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
memcpy(req.type, rule->type, 2);
req.size = cpu_to_le16(seg->len);
+ dev->phy.clc_chan_conf = clc->ver == 1 ? 0xff : rule->flag;
skb = __mt76_mcu_msg_alloc(&dev->mt76, &req,
le16_to_cpu(req.size) + sizeof(req),
sizeof(req), GFP_KERNEL);
@@ -3194,8 +3228,10 @@ __mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
valid_cnt++;
}
- if (!valid_cnt)
+ if (!valid_cnt) {
+ dev->phy.clc_chan_conf = 0xff;
return -ENOENT;
+ }
return 0;
}
@@ -3208,6 +3244,9 @@ int mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
/* submit all clc config */
for (i = 0; i < ARRAY_SIZE(phy->clc); i++) {
+ if (i == MT792x_CLC_BE_CTRL)
+ continue;
+
ret = __mt7925_mcu_set_clc(dev, alpha2, env_cap,
phy->clc[i], i);
@@ -3266,6 +3305,9 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
else
uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+ if (cmd == MCU_UNI_CMD(HIF_CTRL))
+ uni_txd->option &= ~MCU_CMD_ACK;
+
goto exit;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index 1e47d2c61b54..8ac43feb26d6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -566,8 +566,8 @@ struct mt7925_wow_pattern_tlv {
u8 offset;
u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN];
u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN];
- u8 rsv[7];
-} __packed;
+ u8 rsv[4];
+};
struct roc_acquire_tlv {
__le16 tag;
@@ -627,6 +627,8 @@ int mt7925_mcu_sched_scan_req(struct mt76_phy *phy,
int mt7925_mcu_sched_scan_enable(struct mt76_phy *phy,
struct ieee80211_vif *vif,
bool enable);
+void mt7925_mcu_del_dev(struct mt76_dev *mdev,
+ struct ieee80211_vif *vif);
int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
struct ieee80211_bss_conf *link_conf,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index 8707b5d04743..4e50f2597ccd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -137,11 +137,18 @@ enum {
MT7925_CLC_MAX_NUM,
};
+struct mt7925_clc_rule_v2 {
+ u32 flag;
+ u8 alpha2[2];
+ u8 rsv[10];
+} __packed;
+
struct mt7925_clc_rule {
u8 alpha2[2];
u8 type[2];
u8 seg_idx;
- u8 rsv[3];
+ u8 flag; /* UNII4~8 ctrl flag */
+ u8 rsv[2];
} __packed;
struct mt7925_clc_segment {
@@ -152,14 +159,26 @@ struct mt7925_clc_segment {
u8 rsv2[4];
} __packed;
-struct mt7925_clc {
- __le32 len;
- u8 idx;
- u8 ver;
+struct mt7925_clc_type0 {
u8 nr_country;
u8 type;
u8 nr_seg;
u8 rsv[7];
+} __packed;
+
+struct mt7925_clc_type2 {
+ u8 type;
+ u8 rsv[9];
+} __packed;
+
+struct mt7925_clc {
+ __le32 len;
+ u8 idx;
+ u8 ver;
+ union {
+ struct mt7925_clc_type0 t0;
+ struct mt7925_clc_type2 t2;
+ };
u8 data[];
} __packed;
@@ -167,9 +186,12 @@ enum mt7925_eeprom_field {
MT_EE_CHIP_ID = 0x000,
MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004,
+ MT_EE_HW_TYPE = 0xa71,
__MT_EE_MAX = 0x9ff
};
+#define MT_EE_HW_TYPE_ENCAP GENMASK(1, 0)
+
enum {
TXPWR_USER,
TXPWR_EEPROM,
@@ -235,6 +257,7 @@ int mt7925_mcu_chip_config(struct mt792x_dev *dev, const char *cmd);
int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
u8 bit_op, u32 bit_map);
+void mt7925_regd_be_ctrl(struct mt792x_dev *dev, u8 *alpha2);
void mt7925_regd_update(struct mt792x_dev *dev);
int mt7925_mac_init(struct mt792x_dev *dev);
int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -263,13 +286,12 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
struct ieee80211_vif *vif,
bool enable);
int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
- struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable);
int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
- struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable);
+void mt7925_mlo_pm_work(struct work_struct *work);
void mt7925_scan_work(struct work_struct *work);
void mt7925_roc_work(struct work_struct *work);
int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index 32ed01a96bf7..e0359d431eca 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -27,8 +27,9 @@
#define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0)
#define MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN BIT(1)
-#define MT792x_CHIP_CAP_MLO_EVT_EN BIT(2)
#define MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN BIT(3)
+#define MT792x_CHIP_CAP_MLO_EN BIT(8)
+#define MT792x_CHIP_CAP_MLO_EML_EN BIT(9)
/* NOTE: used to map mt76_rates. idx may change if firmware expands table */
#define MT792x_BASIC_RATES_TBL 11
@@ -70,6 +71,7 @@ struct mt792x_fw_features {
enum {
MT792x_CLC_POWER,
MT792x_CLC_POWER_EXT,
+ MT792x_CLC_BE_CTRL,
MT792x_CLC_MAX_NUM,
};
@@ -81,6 +83,13 @@ enum mt792x_reg_power_type {
MT_AP_VLP,
};
+enum mt792x_mlo_pm_state {
+ MT792x_MLO_LINK_DISASSOC,
+ MT792x_MLO_LINK_ASSOC,
+ MT792x_MLO_CHANGED_PS_PENDING,
+ MT792x_MLO_CHANGED_PS,
+};
+
DECLARE_EWMA(avg_signal, 10, 8)
struct mt792x_link_sta {
@@ -134,6 +143,7 @@ struct mt792x_vif {
struct mt792x_phy *phy;
u16 valid_links;
u8 deflink_id;
+ enum mt792x_mlo_pm_state mlo_pm_state;
struct work_struct csa_work;
struct timer_list csa_timer;
@@ -239,6 +249,7 @@ struct mt792x_dev {
const struct mt792x_irq_map *irq_map;
struct work_struct ipv6_ns_work;
+ struct delayed_work mlo_pm_work;
/* IPv6 addresses for WoWLAN */
struct sk_buff_head ipv6_ns_list;
@@ -497,7 +508,7 @@ int mt792xe_mcu_fw_pmctrl(struct mt792x_dev *dev);
int mt792x_init_acpi_sar(struct mt792x_dev *dev);
int mt792x_init_acpi_sar_power(struct mt792x_phy *phy, bool set_default);
u8 mt792x_acpi_get_flags(struct mt792x_phy *phy);
-u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2);
+u32 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2);
#else
static inline int mt792x_init_acpi_sar(struct mt792x_dev *dev)
{
@@ -515,9 +526,9 @@ static inline u8 mt792x_acpi_get_flags(struct mt792x_phy *phy)
return 0;
}
-static inline u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
+static inline u32 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
{
- return 0xf;
+ return MT792X_ACPI_MTCL_INVALID;
}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
index 9317f8ff2070..d1aebadd50aa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
@@ -4,6 +4,28 @@
#include <linux/acpi.h>
#include "mt792x.h"
+static const char * const cc_list_all[] = {
+ "00", "EU", "AR", "AU", "AZ", "BY", "BO", "BR",
+ "CA", "CL", "CN", "ID", "JP", "MY", "MX", "ME",
+ "MA", "NZ", "NG", "PH", "RU", "RS", "SG", "KR",
+ "TW", "TH", "UA", "GB", "US", "VN", "KH", "PY",
+};
+
+static const char * const cc_list_eu[] = {
+ "AD", "AT", "BE", "BG", "CY", "CZ", "HR", "DK",
+ "EE", "FI", "FR", "DE", "GR", "HU", "IS", "IE",
+ "IT", "LV", "LI", "LT", "LU", "MC", "MT", "NL",
+ "NO", "PL", "PT", "RO", "SK", "SI", "ES", "SE",
+ "CH",
+};
+
+static const char * const cc_list_be[] = {
+ "AR", "BR", "BY", "CL", "IQ", "MX", "OM", "RU",
+ "RW", "VN", "KR", "UA", "", "", "", "",
+ "EU", "AT", "CN", "CA", "TW", "NZ", "PH", "UK",
+ "US",
+};
+
static int
mt792x_acpi_read(struct mt792x_dev *dev, u8 *method, u8 **tbl, u32 *len)
{
@@ -66,13 +88,22 @@ free:
}
/* MTCL : Country List Table for 6G band */
+/* MTCL : Country List Table for 6G band and 11BE */
static int
mt792x_asar_acpi_read_mtcl(struct mt792x_dev *dev, u8 **table, u8 *version)
{
- int ret;
+ int len, ret;
- *version = ((ret = mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL)) < 0)
- ? 1 : 2;
+ ret = mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, &len);
+ if (ret)
+ return ret;
+
+ if (len == sizeof(struct mt792x_asar_cl))
+ *version = ((struct mt792x_asar_cl *)*table)->version;
+ else if (len == sizeof(struct mt792x_asar_cl_v3))
+ *version = ((struct mt792x_asar_cl_v3 *)*table)->version;
+ else
+ return -EINVAL;
return ret;
}
@@ -351,10 +382,24 @@ u8 mt792x_acpi_get_flags(struct mt792x_phy *phy)
}
EXPORT_SYMBOL_GPL(mt792x_acpi_get_flags);
-static u8
+static u32
+mt792x_acpi_get_mtcl_map_v3(int row, int column, struct mt792x_asar_cl_v3 *cl)
+{
+ u32 config = 0;
+ u8 mode_be = 0;
+
+ mode_be = (cl->mode_be > 0x02) ? 0 : cl->mode_be;
+
+ if (cl->version > 2 && cl->clbe[row] & BIT(column))
+ config |= (mode_be & 0x3) << 4;
+
+ return config;
+}
+
+static u32
mt792x_acpi_get_mtcl_map(int row, int column, struct mt792x_asar_cl *cl)
{
- u8 config = 0;
+ u32 config = 0;
u8 mode_6g, mode_5g9;
mode_6g = (cl->mode_6g > 0x02) ? 0 : cl->mode_6g;
@@ -368,30 +413,44 @@ mt792x_acpi_get_mtcl_map(int row, int column, struct mt792x_asar_cl *cl)
return config;
}
-u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
+static u32
+mt792x_acpi_parse_mtcl_tbl_v3(struct mt792x_phy *phy, char *alpha2)
{
- static const char * const cc_list_all[] = {
- "00", "EU", "AR", "AU", "AZ", "BY", "BO", "BR",
- "CA", "CL", "CN", "ID", "JP", "MY", "MX", "ME",
- "MA", "NZ", "NG", "PH", "RU", "RS", "SG", "KR",
- "TW", "TH", "UA", "GB", "US", "VN", "KH", "PY",
- };
- static const char * const cc_list_eu[] = {
- "AT", "BE", "BG", "CY", "CZ", "HR", "DK", "EE",
- "FI", "FR", "DE", "GR", "HU", "IS", "IE", "IT",
- "LV", "LI", "LT", "LU", "MT", "NL", "NO", "PL",
- "PT", "RO", "SK", "SI", "ES", "SE", "CH",
- };
struct mt792x_acpi_sar *sar = phy->acpisar;
- struct mt792x_asar_cl *cl;
+ struct mt792x_asar_cl_v3 *cl = sar->countrylist_v3;
int col, row, i;
- if (!sar)
- return 0xf;
+ if (sar->ver != 3)
+ goto out;
- cl = sar->countrylist;
if (!cl)
- return 0xc;
+ return MT792X_ACPI_MTCL_INVALID;
+
+ for (i = 0; i < ARRAY_SIZE(cc_list_be); i++) {
+ col = 7 - i % 8;
+ row = i / 8;
+ if (!memcmp(cc_list_be[i], alpha2, 2))
+ return mt792x_acpi_get_mtcl_map_v3(row, col, cl);
+ }
+ for (i = 0; i < ARRAY_SIZE(cc_list_eu); i++) {
+ if (!memcmp(cc_list_eu[i], alpha2, 2))
+ return mt792x_acpi_get_mtcl_map_v3(3, 7, cl);
+ }
+
+out:
+ /* Depends on driver */
+ return 0x20;
+}
+
+static u32
+mt792x_acpi_parse_mtcl_tbl(struct mt792x_phy *phy, char *alpha2)
+{
+ struct mt792x_acpi_sar *sar = phy->acpisar;
+ struct mt792x_asar_cl *cl = sar->countrylist;
+ int col, row, i;
+
+ if (!cl)
+ return MT792X_ACPI_MTCL_INVALID;
for (i = 0; i < ARRAY_SIZE(cc_list_all); i++) {
col = 7 - i % 8;
@@ -406,4 +465,22 @@ u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
return mt792x_acpi_get_mtcl_map(0, 7, cl);
}
+
+u32 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
+{
+ struct mt792x_acpi_sar *sar = phy->acpisar;
+ u32 config = 0;
+
+ if (!sar)
+ return MT792X_ACPI_MTCL_INVALID;
+
+ config = mt792x_acpi_parse_mtcl_tbl_v3(phy, alpha2);
+
+ if (config == MT792X_ACPI_MTCL_INVALID)
+ return MT792X_ACPI_MTCL_INVALID;
+
+ config |= mt792x_acpi_parse_mtcl_tbl(phy, alpha2);
+
+ return config;
+}
EXPORT_SYMBOL_GPL(mt792x_acpi_get_mtcl_conf);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h
index 2298983b6342..e45dcd7fbdb1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h
@@ -15,6 +15,8 @@
#define MT792x_ACPI_MTGS "MTGS"
#define MT792x_ACPI_MTFG "MTFG"
+#define MT792X_ACPI_MTCL_INVALID 0xffffffff
+
struct mt792x_asar_dyn_limit {
u8 idx;
u8 frp[5];
@@ -72,6 +74,17 @@ struct mt792x_asar_geo_v2 {
DECLARE_FLEX_ARRAY(struct mt792x_asar_geo_limit_v2, tbl);
} __packed;
+struct mt792x_asar_cl_v3 {
+ u8 names[4];
+ u8 version;
+ u8 mode_6g;
+ u8 cl6g[6];
+ u8 mode_5g9;
+ u8 cl5g9[6];
+ u8 mode_be;
+ u8 clbe[6];
+} __packed;
+
struct mt792x_asar_cl {
u8 names[4];
u8 version;
@@ -100,7 +113,10 @@ struct mt792x_acpi_sar {
struct mt792x_asar_geo *geo;
struct mt792x_asar_geo_v2 *geo_v2;
};
- struct mt792x_asar_cl *countrylist;
+ union {
+ struct mt792x_asar_cl *countrylist;
+ struct mt792x_asar_cl_v3 *countrylist_v3;
+ };
struct mt792x_asar_fg *fg;
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index 8799627f6292..0f7806f6338d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -665,7 +665,8 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
- ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ if (is_mt7921(&dev->mt76))
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
if (dev->pm.enable)
ieee80211_hw_set(hw, CONNECTION_MONITOR);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
index 7b2bb72b407d..4a28db17a287 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
@@ -616,28 +616,51 @@ static void
mt7996_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
+ struct mt7996_vif *mvif = msta->vif;
+ struct mt7996_dev *dev = mvif->deflink.phy->dev;
+ struct ieee80211_link_sta *link_sta;
struct seq_file *s = data;
- u8 ac;
+ struct ieee80211_vif *vif;
+ unsigned int link_id;
+
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ rcu_read_lock();
- for (ac = 0; ac < 4; ac++) {
- u32 qlen, ctrl, val;
- u32 idx = msta->wcid.idx >> 5;
- u8 offs = msta->wcid.idx & GENMASK(4, 0);
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt76_vif_link *mlink;
+ u8 ac;
- ctrl = BIT(31) | BIT(11) | (ac << 24);
- val = mt76_rr(dev, MT_PLE_AC_QEMPTY(ac, idx));
+ mlink = rcu_dereference(mvif->mt76.link[link_id]);
+ if (!mlink)
+ continue;
- if (val & BIT(offs))
+ msta_link = rcu_dereference(msta->link[link_id]);
+ if (!msta_link)
continue;
- mt76_wr(dev, MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
- qlen = mt76_get_field(dev, MT_FL_Q3_CTRL,
- GENMASK(11, 0));
- seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
- sta->addr, msta->wcid.idx,
- msta->vif->deflink.mt76.wmm_idx, ac, qlen);
+ for (ac = 0; ac < 4; ac++) {
+ u32 idx = msta_link->wcid.idx >> 5, qlen, ctrl, val;
+ u8 offs = msta_link->wcid.idx & GENMASK(4, 0);
+
+ ctrl = BIT(31) | BIT(11) | (ac << 24);
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(ac, idx));
+
+ if (val & BIT(offs))
+ continue;
+
+ mt76_wr(dev,
+ MT_FL_Q0_CTRL, ctrl | msta_link->wcid.idx);
+ qlen = mt76_get_field(dev, MT_FL_Q3_CTRL,
+ GENMASK(11, 0));
+ seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
+ sta->addr, msta_link->wcid.idx,
+ mlink->wmm_idx, ac, qlen);
+ }
}
+
+ rcu_read_unlock();
}
static int
@@ -930,6 +953,7 @@ static ssize_t mt7996_sta_fixed_rate_set(struct file *file,
struct ieee80211_sta *sta = file->private_data;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
+ struct mt7996_sta_link *msta_link = &msta->deflink;
struct ra_rate phy = {};
char buf[100];
int ret;
@@ -964,7 +988,7 @@ static ssize_t mt7996_sta_fixed_rate_set(struct file *file,
goto out;
}
- phy.wlan_idx = cpu_to_le16(msta->wcid.idx);
+ phy.wlan_idx = cpu_to_le16(msta_link->wcid.idx);
phy.gi = cpu_to_le16(gi);
phy.ltf = cpu_to_le16(ltf);
phy.ldpc = phy.ldpc ? 7 : 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 019c925ae600..d89c06f47997 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -53,26 +53,48 @@ static const struct mt7996_dfs_radar_spec jp_radar_specs = {
};
static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
- u16 idx, bool unicast)
+ u16 idx, u8 band_idx)
{
- struct mt7996_sta *sta;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_sta *msta;
+ struct mt7996_vif *mvif;
struct mt76_wcid *wcid;
+ int i;
if (idx >= ARRAY_SIZE(dev->mt76.wcid))
return NULL;
wcid = rcu_dereference(dev->mt76.wcid[idx]);
- if (unicast || !wcid)
- return wcid;
+ if (!wcid)
+ return NULL;
- if (!wcid->sta)
+ if (!mt7996_band_valid(dev, band_idx))
return NULL;
- sta = container_of(wcid, struct mt7996_sta, wcid);
- if (!sta->vif)
+ if (wcid->phy_idx == band_idx)
+ return wcid;
+
+ msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
+ msta = msta_link->sta;
+ if (!msta || !msta->vif)
return NULL;
- return &sta->vif->deflink.sta.wcid;
+ mvif = msta->vif;
+ for (i = 0; i < ARRAY_SIZE(mvif->mt76.link); i++) {
+ struct mt76_vif_link *mlink;
+
+ mlink = rcu_dereference(mvif->mt76.link[i]);
+ if (!mlink)
+ continue;
+
+ if (mlink->band_idx != band_idx)
+ continue;
+
+ msta_link = rcu_dereference(msta->link[i]);
+ break;
+ }
+
+ return &msta_link->wcid;
}
bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
@@ -100,10 +122,13 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
[IEEE80211_AC_VI] = 4,
[IEEE80211_AC_VO] = 6
};
+ struct mt7996_sta_link *msta_link;
+ struct mt76_vif_link *mlink;
struct ieee80211_sta *sta;
struct mt7996_sta *msta;
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
LIST_HEAD(sta_poll_list);
+ struct mt76_wcid *wcid;
int i;
spin_lock_bh(&dev->mt76.sta_poll_lock);
@@ -123,25 +148,28 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
spin_unlock_bh(&dev->mt76.sta_poll_lock);
break;
}
- msta = list_first_entry(&sta_poll_list,
- struct mt7996_sta, wcid.poll_list);
- list_del_init(&msta->wcid.poll_list);
+ msta_link = list_first_entry(&sta_poll_list,
+ struct mt7996_sta_link,
+ wcid.poll_list);
+ msta = msta_link->sta;
+ wcid = &msta_link->wcid;
+ list_del_init(&wcid->poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
- idx = msta->wcid.idx;
+ idx = wcid->idx;
/* refresh peer's airtime reporting */
addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u32 tx_last = msta->airtime_ac[i];
- u32 rx_last = msta->airtime_ac[i + 4];
+ u32 tx_last = msta_link->airtime_ac[i];
+ u32 rx_last = msta_link->airtime_ac[i + 4];
- msta->airtime_ac[i] = mt76_rr(dev, addr);
- msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+ msta_link->airtime_ac[i] = mt76_rr(dev, addr);
+ msta_link->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
- tx_time[i] = msta->airtime_ac[i] - tx_last;
- rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+ tx_time[i] = msta_link->airtime_ac[i] - tx_last;
+ rx_time[i] = msta_link->airtime_ac[i + 4] - rx_last;
if ((tx_last | rx_last) & BIT(30))
clear = true;
@@ -152,10 +180,11 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
if (clear) {
mt7996_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ memset(msta_link->airtime_ac, 0,
+ sizeof(msta_link->airtime_ac));
}
- if (!msta->wcid.sta)
+ if (!wcid->sta)
continue;
sta = container_of((void *)msta, struct ieee80211_sta,
@@ -181,28 +210,23 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
rssi[2] = to_rssi(GENMASK(23, 16), val);
rssi[3] = to_rssi(GENMASK(31, 14), val);
- msta->ack_signal =
- mt76_rx_signal(msta->vif->deflink.phy->mt76->antenna_mask, rssi);
+ mlink = rcu_dereference(msta->vif->mt76.link[wcid->link_id]);
+ if (mlink) {
+ struct mt76_phy *mphy = mt76_vif_link_phy(mlink);
+
+ if (mphy)
+ msta_link->ack_signal =
+ mt76_rx_signal(mphy->antenna_mask,
+ rssi);
+ }
- ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
+ ewma_avg_signal_add(&msta_link->avg_ack_signal,
+ -msta_link->ack_signal);
}
rcu_read_unlock();
}
-void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
- struct ieee80211_vif *vif, bool enable)
-{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- u32 addr;
-
- addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->deflink.sta.wcid.idx, 5);
- if (enable)
- mt76_set(dev, addr, BIT(5));
- else
- mt76_clear(dev, addr, BIT(5));
-}
-
/* The HW does not translate the mac header to 802.3 for mesh point */
static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
@@ -474,11 +498,15 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
- status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
+ status->wcid = mt7996_rx_get_wcid(dev, idx, band_idx);
if (status->wcid) {
- msta = container_of(status->wcid, struct mt7996_sta, wcid);
- mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
+ struct mt7996_sta_link *msta_link;
+
+ msta_link = container_of(status->wcid, struct mt7996_sta_link,
+ wcid);
+ msta = msta_link->sta;
+ mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
}
status->freq = mphy->chandef.chan->center_freq;
@@ -717,9 +745,8 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
u32 val;
if (wcid->sta) {
- struct ieee80211_sta *sta;
+ struct ieee80211_sta *sta = wcid_to_sta(wcid);
- sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
wmm = sta->wme;
}
@@ -746,7 +773,9 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
static void
mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct ieee80211_key_conf *key)
+ struct sk_buff *skb,
+ struct ieee80211_key_conf *key,
+ struct mt76_wcid *wcid)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
@@ -754,6 +783,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
bool multicast = is_multicast_ether_addr(hdr->addr1);
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
__le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
+ u16 seqno = le16_to_cpu(sc);
u8 fc_type, fc_stype;
u32 val;
@@ -773,8 +803,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
info->flags & IEEE80211_TX_CTL_USE_MINRATE)
val |= MT_TXD1_FIXED_RATE;
- if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
- key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ if (key && multicast && ieee80211_is_robust_mgmt_frame(skb)) {
val |= MT_TXD1_BIP;
txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
}
@@ -804,9 +833,13 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
}
- if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(sc);
+ if (multicast && ieee80211_vif_is_mld(info->control.vif)) {
+ val = MT_TXD3_SN_VALID |
+ FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
+ txwi[3] |= cpu_to_le32(val);
+ }
+ if (info->flags & IEEE80211_TX_CTL_INJECTED) {
if (ieee80211_is_back_req(hdr->frame_control)) {
struct ieee80211_bar *bar;
@@ -819,6 +852,19 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
txwi[3] |= cpu_to_le32(val);
txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
}
+
+ if (ieee80211_vif_is_mld(info->control.vif) &&
+ (multicast || unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))))
+ txwi[5] |= cpu_to_le32(MT_TXD5_FL);
+
+ if (ieee80211_is_nullfunc(fc) && ieee80211_has_a4(fc) &&
+ ieee80211_vif_is_mld(info->control.vif)) {
+ txwi[5] |= cpu_to_le32(MT_TXD5_FL);
+ txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
+ }
+
+ if (!wcid->sta && ieee80211_is_mgmt(fc))
+ txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
}
void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
@@ -832,7 +878,9 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
- struct mt76_vif_link *mvif;
+ struct mt76_vif_link *mlink = NULL;
+ struct mt7996_vif *mvif;
+ unsigned int link_id;
u16 tx_count = 15;
u32 val;
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
@@ -840,11 +888,20 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
bool beacon = !!(changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
- mvif = vif ? (struct mt76_vif_link *)vif->drv_priv : NULL;
- if (mvif) {
- omac_idx = mvif->omac_idx;
- wmm_idx = mvif->wmm_idx;
- band_idx = mvif->band_idx;
+ if (wcid != &dev->mt76.global_wcid)
+ link_id = wcid->link_id;
+ else
+ link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+
+ mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
+ if (mvif)
+ mlink = rcu_dereference(mvif->mt76.link[link_id]);
+
+ if (mlink) {
+ omac_idx = mlink->omac_idx;
+ wmm_idx = mlink->wmm_idx;
+ band_idx = mlink->band_idx;
}
if (inband_disc) {
@@ -891,7 +948,10 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val |= MT_TXD5_TX_STATUS_HOST;
txwi[5] = cpu_to_le32(val);
- val = MT_TXD6_DIS_MAT | MT_TXD6_DAS;
+ val = MT_TXD6_DAS;
+ if (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
+ val |= MT_TXD6_DIS_MAT;
+
if (is_mt7996(&dev->mt76))
val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control))
@@ -903,23 +963,25 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
if (is_8023)
mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
else
- mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
+ mt7996_mac_write_txwi_80211(dev, txwi, skb, key, wcid);
if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
bool mcast = ieee80211_is_data(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1);
u8 idx = MT7996_BASIC_RATES_TBL;
- if (mvif) {
- if (mcast && mvif->mcast_rates_idx)
- idx = mvif->mcast_rates_idx;
- else if (beacon && mvif->beacon_rates_idx)
- idx = mvif->beacon_rates_idx;
+ if (mlink) {
+ if (mcast && mlink->mcast_rates_idx)
+ idx = mlink->mcast_rates_idx;
+ else if (beacon && mlink->beacon_rates_idx)
+ idx = mlink->beacon_rates_idx;
else
- idx = mvif->basic_rates_idx;
+ idx = mlink->basic_rates_idx;
}
val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW;
+ if (mcast)
+ val |= MT_TXD6_DIS_MAT;
txwi[6] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
}
@@ -984,8 +1046,14 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (vif) {
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt76_vif_link *mlink = NULL;
+
+ if (wcid->offchannel)
+ mlink = rcu_dereference(mvif->mt76.offchannel_link);
+ if (!mlink)
+ mlink = &mvif->deflink.mt76;
- txp->fw.bss_idx = mvif->deflink.mt76.idx;
+ txp->fw.bss_idx = mlink->idx;
}
txp->fw.token = cpu_to_le16(id);
@@ -1027,9 +1095,10 @@ u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
static void
mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb)
{
- struct mt7996_sta *msta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_sta *msta;
u16 fc, tid;
if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
@@ -1058,7 +1127,9 @@ mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb)
return;
msta = (struct mt7996_sta *)sta->drv_priv;
- if (!test_and_set_bit(tid, &msta->wcid.ampdu_state))
+ msta_link = &msta->deflink;
+
+ if (!test_and_set_bit(tid, &msta_link->wcid.ampdu_state))
ieee80211_start_tx_ba_session(sta, tid, 0);
}
@@ -1136,7 +1207,7 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
*/
info = le32_to_cpu(*cur_info);
if (info & MT_TXFREE_INFO_PAIR) {
- struct mt7996_sta *msta;
+ struct mt7996_sta_link *msta_link;
u16 idx;
idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
@@ -1145,8 +1216,9 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
if (!sta)
continue;
- msta = container_of(wcid, struct mt7996_sta, wcid);
- mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
+ msta_link = container_of(wcid, struct mt7996_sta_link,
+ wcid);
+ mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
continue;
} else if (info & MT_TXFREE_INFO_HEADER) {
u32 tx_retries = 0, tx_failed = 0;
@@ -1230,7 +1302,7 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_sta *sta;
u8 tid;
- sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
+ sta = wcid_to_sta(wcid);
tid = FIELD_GET(MT_TXS0_TID, txs);
ieee80211_refresh_tx_agg_session_timer(sta, tid);
}
@@ -1344,7 +1416,7 @@ out:
static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
{
- struct mt7996_sta *msta = NULL;
+ struct mt7996_sta_link *msta_link;
struct mt76_wcid *wcid;
__le32 *txs_data = data;
u16 wcidx;
@@ -1365,14 +1437,13 @@ static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
if (!wcid)
goto out;
- msta = container_of(wcid, struct mt7996_sta, wcid);
-
mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data);
if (!wcid->sta)
goto out;
- mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
+ msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
+ mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
out:
rcu_read_unlock();
@@ -1399,7 +1470,7 @@ bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
mt7996_mac_tx_free(dev, data, len);
return false;
case PKT_TYPE_TXS:
- for (rxd += 4; rxd + 8 <= end; rxd += 8)
+ for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
mt7996_mac_add_txs(dev, rxd);
return false;
case PKT_TYPE_RX_FW_MONITOR:
@@ -1442,7 +1513,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt7996_mcu_rx_event(dev, skb);
break;
case PKT_TYPE_TXS:
- for (rxd += 4; rxd + 8 <= end; rxd += 8)
+ for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
mt7996_mac_add_txs(dev, rxd);
dev_kfree_skb(skb);
break;
@@ -2239,38 +2310,70 @@ void mt7996_mac_update_stats(struct mt7996_phy *phy)
void mt7996_mac_sta_rc_work(struct work_struct *work)
{
struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+ struct mt76_vif_link *mlink;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct mt7996_sta *msta;
- u32 changed;
+ struct mt7996_vif *mvif;
LIST_HEAD(list);
+ u32 changed;
+ u8 link_id;
+ rcu_read_lock();
spin_lock_bh(&dev->mt76.sta_poll_lock);
list_splice_init(&dev->sta_rc_list, &list);
while (!list_empty(&list)) {
- msta = list_first_entry(&list, struct mt7996_sta, rc_list);
- list_del_init(&msta->rc_list);
- changed = msta->changed;
- msta->changed = 0;
+ msta_link = list_first_entry(&list, struct mt7996_sta_link,
+ rc_list);
+ list_del_init(&msta_link->rc_list);
+
+ changed = msta_link->changed;
+ msta_link->changed = 0;
+
+ sta = wcid_to_sta(&msta_link->wcid);
+ link_id = msta_link->wcid.link_id;
+ msta = msta_link->sta;
+ mvif = msta->vif;
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ mlink = rcu_dereference(mvif->mt76.link[link_id]);
+ if (!mlink)
+ continue;
+
+ link_sta = rcu_dereference(sta->link[link_id]);
+ if (!link_sta)
+ continue;
+
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (!link_conf)
+ continue;
+
spin_unlock_bh(&dev->mt76.sta_poll_lock);
- sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
- vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+ link = (struct mt7996_vif_link *)mlink;
if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
IEEE80211_RC_NSS_CHANGED |
IEEE80211_RC_BW_CHANGED))
- mt7996_mcu_add_rate_ctrl(dev, vif, sta, true);
+ mt7996_mcu_add_rate_ctrl(dev, vif, link_conf,
+ link_sta, link, msta_link,
+ true);
if (changed & IEEE80211_RC_SMPS_CHANGED)
- mt7996_mcu_set_fixed_field(dev, vif, sta, NULL,
+ mt7996_mcu_set_fixed_field(dev, link_sta, link,
+ msta_link, NULL,
RATE_PARAM_MMPS_UPDATE);
spin_lock_bh(&dev->mt76.sta_poll_lock);
}
spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ rcu_read_unlock();
}
void mt7996_mac_work(struct work_struct *work)
@@ -2538,7 +2641,7 @@ static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
}
static bool
-mt7996_mac_twt_param_equal(struct mt7996_sta *msta,
+mt7996_mac_twt_param_equal(struct mt7996_sta_link *msta_link,
struct ieee80211_twt_params *twt_agrt)
{
u16 type = le16_to_cpu(twt_agrt->req_type);
@@ -2549,10 +2652,10 @@ mt7996_mac_twt_param_equal(struct mt7996_sta *msta,
for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
struct mt7996_twt_flow *f;
- if (!(msta->twt.flowid_mask & BIT(i)))
+ if (!(msta_link->twt.flowid_mask & BIT(i)))
continue;
- f = &msta->twt.flow[i];
+ f = &msta_link->twt.flow[i];
if (f->duration == twt_agrt->min_twt_dur &&
f->mantissa == twt_agrt->mantissa &&
f->exp == exp &&
@@ -2572,6 +2675,7 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
+ struct mt7996_sta_link *msta_link = &msta->deflink;
u16 req_type = le16_to_cpu(twt_agrt->req_type);
enum ieee80211_twt_setup_cmd sta_setup_cmd;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
@@ -2586,7 +2690,8 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
goto unlock;
- if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
+ if (hweight8(msta_link->twt.flowid_mask) ==
+ ARRAY_SIZE(msta_link->twt.flow))
goto unlock;
if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
@@ -2595,10 +2700,10 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
goto unlock;
}
- if (mt7996_mac_twt_param_equal(msta, twt_agrt))
+ if (mt7996_mac_twt_param_equal(msta_link, twt_agrt))
goto unlock;
- flowid = ffs(~msta->twt.flowid_mask) - 1;
+ flowid = ffs(~msta_link->twt.flowid_mask) - 1;
twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
twt_agrt->req_type |= le16_encode_bits(flowid,
IEEE80211_TWT_REQTYPE_FLOWID);
@@ -2607,10 +2712,10 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
- flow = &msta->twt.flow[flowid];
+ flow = &msta_link->twt.flow[flowid];
memset(flow, 0, sizeof(*flow));
INIT_LIST_HEAD(&flow->list);
- flow->wcid = msta->wcid.idx;
+ flow->wcid = msta_link->wcid.idx;
flow->table_id = table_id;
flow->id = flowid;
flow->duration = twt_agrt->min_twt_dur;
@@ -2628,7 +2733,7 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
flow->sched = true;
flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
- curr_tsf = __mt7996_get_tsf(hw, msta->vif);
+ curr_tsf = __mt7996_get_tsf(hw, &msta->vif->deflink);
div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
flow_tsf = curr_tsf + interval - rem;
twt_agrt->twt = cpu_to_le64(flow_tsf);
@@ -2637,12 +2742,13 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
}
flow->tsf = le64_to_cpu(twt_agrt->twt);
- if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
+ if (mt7996_mcu_twt_agrt_update(dev, &msta->vif->deflink, flow,
+ MCU_TWT_AGRT_ADD))
goto unlock;
setup_cmd = TWT_SETUP_CMD_ACCEPT;
dev->twt.table_mask |= BIT(table_id);
- msta->twt.flowid_mask |= BIT(flowid);
+ msta_link->twt.flowid_mask |= BIT(flowid);
dev->twt.n_agrt++;
unlock:
@@ -2655,26 +2761,26 @@ out:
}
void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
- struct mt7996_sta *msta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
u8 flowid)
{
struct mt7996_twt_flow *flow;
lockdep_assert_held(&dev->mt76.mutex);
- if (flowid >= ARRAY_SIZE(msta->twt.flow))
+ if (flowid >= ARRAY_SIZE(msta_link->twt.flow))
return;
- if (!(msta->twt.flowid_mask & BIT(flowid)))
+ if (!(msta_link->twt.flowid_mask & BIT(flowid)))
return;
- flow = &msta->twt.flow[flowid];
- if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow,
- MCU_TWT_AGRT_DELETE))
+ flow = &msta_link->twt.flow[flowid];
+ if (mt7996_mcu_twt_agrt_update(dev, link, flow, MCU_TWT_AGRT_DELETE))
return;
list_del_init(&flow->list);
- msta->twt.flowid_mask &= ~BIT(flowid);
+ msta_link->twt.flowid_mask &= ~BIT(flowid);
dev->twt.table_mask &= ~BIT(flow->table_id);
dev->twt.n_agrt--;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 69dd565d8319..91c64e3a0860 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -158,58 +158,101 @@ mt7996_init_bitrate_mask(struct ieee80211_vif *vif, struct mt7996_vif_link *mlin
static int
mt7996_set_hw_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct mt7996_vif_link *mlink, struct ieee80211_key_conf *key)
+ struct ieee80211_key_conf *key)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv :
- &mlink->sta;
- struct mt76_wcid *wcid = &msta->wcid;
- u8 *wcid_keyidx = &wcid->hw_key_idx;
- struct mt7996_phy *phy;
int idx = key->keyidx;
+ unsigned int link_id;
+ unsigned long links;
+
+ if (key->link_id >= 0)
+ links = BIT(key->link_id);
+ else if (sta && sta->valid_links)
+ links = sta->valid_links;
+ else if (vif->valid_links)
+ links = vif->valid_links;
+ else
+ links = BIT(0);
- phy = mt7996_vif_link_phy(mlink);
- if (!phy)
- return -EINVAL;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+ u8 *wcid_keyidx;
+ int err;
- if (sta && !wcid->sta)
- return -EOPNOTSUPP;
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_AES_CMAC:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- if (key->keyidx == 6 || key->keyidx == 7) {
- wcid_keyidx = &wcid->hw_key_idx2;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ if (sta) {
+ struct mt7996_sta *msta;
+
+ msta = (struct mt7996_sta *)sta->drv_priv;
+ msta_link = mt76_dereference(msta->link[link_id],
+ &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ if (!msta_link->wcid.sta)
+ return -EOPNOTSUPP;
+ } else {
+ msta_link = &link->msta_link;
+ }
+ wcid_keyidx = &msta_link->wcid.hw_key_idx;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ wcid_keyidx = &msta_link->wcid.hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ }
+ break;
+ default:
+ break;
}
- break;
- default:
- break;
- }
- if (cmd == SET_KEY && !sta && !mlink->mt76.cipher) {
- mlink->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
- mt7996_mcu_add_bss_info(phy, vif, &vif->bss_conf, &mlink->mt76, true);
- }
+ if (cmd == SET_KEY && !sta && !link->mt76.cipher) {
+ struct ieee80211_bss_conf *link_conf;
- if (cmd == SET_KEY) {
- *wcid_keyidx = idx;
- } else {
- if (idx == *wcid_keyidx)
- *wcid_keyidx = -1;
- return 0;
- }
+ link_conf = link_conf_dereference_protected(vif,
+ link_id);
+ if (!link_conf)
+ link_conf = &vif->bss_conf;
+
+ link->mt76.cipher =
+ mt76_connac_mcu_get_cipher(key->cipher);
+ mt7996_mcu_add_bss_info(link->phy, vif, link_conf,
+ &link->mt76, msta_link, true);
+ }
- mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ if (cmd == SET_KEY) {
+ *wcid_keyidx = idx;
+ } else {
+ if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
+ continue;
+ }
- if (key->keyidx == 6 || key->keyidx == 7)
- return mt7996_mcu_bcn_prot_enable(dev, vif, key);
+ mt76_wcid_key_setup(&dev->mt76, &msta_link->wcid, key);
+
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ err = mt7996_mcu_bcn_prot_enable(dev, link,
+ msta_link, key);
+ if (err)
+ return err;
+ }
+
+ err = mt7996_mcu_add_key(&dev->mt76, vif, key,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
+ &msta_link->wcid, cmd);
+ if (err)
+ return err;
+ }
- return mt7996_mcu_add_key(&dev->mt76, vif, key,
- MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
- &msta->wcid, cmd);
+ return 0;
}
static void
@@ -217,12 +260,10 @@ mt7996_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct ieee80211_key_conf *key,
void *data)
{
- struct mt7996_vif_link *mlink = data;
-
if (sta)
return;
- WARN_ON(mt7996_set_hw_key(hw, SET_KEY, vif, NULL, mlink, key));
+ WARN_ON(mt7996_set_hw_key(hw, SET_KEY, vif, NULL, key));
}
int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
@@ -230,6 +271,8 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
struct mt76_vif_link *mlink)
{
struct mt7996_vif_link *link = container_of(mlink, struct mt7996_vif_link, mt76);
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_sta_link *msta_link = &link->msta_link;
struct mt7996_phy *phy = mphy->priv;
struct mt7996_dev *dev = phy->dev;
u8 band_idx = phy->mt76->band_idx;
@@ -248,7 +291,8 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
mlink->omac_idx = idx;
mlink->band_idx = band_idx;
mlink->wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
- mlink->wcid = &link->sta.wcid;
+ mlink->wcid = &msta_link->wcid;
+ mlink->wcid->offchannel = mlink->offchannel;
ret = mt7996_mcu_add_dev_info(phy, vif, link_conf, mlink, true);
if (ret)
@@ -259,10 +303,11 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
idx = MT7996_WTBL_RESERVED - mlink->idx;
- INIT_LIST_HEAD(&link->sta.rc_list);
- link->sta.wcid.idx = idx;
- link->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mt76_wcid_init(&link->sta.wcid, band_idx);
+ INIT_LIST_HEAD(&msta_link->rc_list);
+ msta_link->wcid.idx = idx;
+ msta_link->wcid.link_id = link_conf->link_id;
+ msta_link->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt76_wcid_init(&msta_link->wcid, band_idx);
mt7996_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -283,15 +328,19 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
mt7996_init_bitrate_mask(vif, link);
- mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, true);
+ mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, msta_link, true);
/* defer the first STA_REC of BMC entry to BSS_CHANGED_BSSID for STA
* interface, since firmware only records BSSID when the entry is new
*/
if (vif->type != NL80211_IFTYPE_STATION)
- mt7996_mcu_add_sta(dev, vif, mlink, NULL, CONN_STATE_PORT_SECURE, true);
- rcu_assign_pointer(dev->mt76.wcid[idx], &link->sta.wcid);
+ mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL,
+ CONN_STATE_PORT_SECURE, true);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &msta_link->wcid);
- ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, link);
+ ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, NULL);
+
+ if (mvif->mt76.deflink_id == IEEE80211_LINK_UNSPECIFIED)
+ mvif->mt76.deflink_id = link_conf->link_id;
return 0;
}
@@ -301,29 +350,42 @@ void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
struct mt76_vif_link *mlink)
{
struct mt7996_vif_link *link = container_of(mlink, struct mt7996_vif_link, mt76);
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_sta_link *msta_link = &link->msta_link;
struct mt7996_phy *phy = mphy->priv;
struct mt7996_dev *dev = phy->dev;
- struct mt7996_sta *msta;
- int idx;
+ int idx = msta_link->wcid.idx;
- msta = &link->sta;
- idx = msta->wcid.idx;
- mt7996_mcu_add_sta(dev, vif, mlink, NULL, CONN_STATE_DISCONNECT, false);
- mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, false);
+ mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL,
+ CONN_STATE_DISCONNECT, false);
+ mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, msta_link, false);
mt7996_mcu_add_dev_info(phy, vif, link_conf, mlink, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+ if (mvif->mt76.deflink_id == link_conf->link_id) {
+ struct ieee80211_bss_conf *iter;
+ unsigned int link_id;
+
+ mvif->mt76.deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ for_each_vif_active_link(vif, iter, link_id) {
+ if (link_id != IEEE80211_LINK_UNSPECIFIED) {
+ mvif->mt76.deflink_id = link_id;
+ break;
+ }
+ }
+ }
+
dev->mt76.vif_mask &= ~BIT_ULL(mlink->idx);
phy->omac_mask &= ~BIT_ULL(mlink->omac_idx);
spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (!list_empty(&msta->wcid.poll_list))
- list_del_init(&msta->wcid.poll_list);
+ if (!list_empty(&msta_link->wcid.poll_list))
+ list_del_init(&msta_link->wcid.poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
- mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
+ mt76_wcid_cleanup(&dev->mt76, &msta_link->wcid);
}
static void mt7996_phy_set_rxfilter(struct mt7996_phy *phy)
@@ -399,6 +461,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
mt76_vif_init(vif, &mvif->mt76);
vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
+ mvif->mt76.deflink_id = IEEE80211_LINK_UNSPECIFIED;
out:
mutex_unlock(&dev->mt76.mutex);
@@ -480,7 +543,6 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_vif_link *mlink = &mvif->deflink;
int err;
/* The hardware does not support per-STA RX GTK, fallback
@@ -515,11 +577,11 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- if (!mt7996_vif_link_phy(mlink))
- return 0; /* defer until after link add */
+ if (!mt7996_vif_link_phy(&mvif->deflink))
+ return 0; /* defer until after link add */
mutex_lock(&dev->mt76.mutex);
- err = mt7996_set_hw_key(hw, cmd, vif, sta, mlink, key);
+ err = mt7996_set_hw_key(hw, cmd, vif, sta, key);
mutex_unlock(&dev->mt76.mutex);
return err;
@@ -601,6 +663,33 @@ static void mt7996_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
+static int
+mt7996_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, int *dbm)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct wireless_dev *wdev;
+ int n_chains, delta, i;
+
+ if (!phy) {
+ wdev = ieee80211_vif_to_wdev(vif);
+ for (i = 0; i < hw->wiphy->n_radio; i++)
+ if (wdev->radio_mask & BIT(i))
+ phy = dev->radio_phy[i];
+
+ if (!phy)
+ return -EINVAL;
+ }
+
+ n_chains = hweight16(phy->mt76->chainmask);
+ delta = mt76_tx_power_nss_delta(n_chains);
+ *dbm = DIV_ROUND_UP(phy->mt76->txpower_cur + delta, 2);
+
+ return 0;
+}
+
static u8
mt7996_get_rates_table(struct mt7996_phy *phy, struct ieee80211_bss_conf *conf,
bool beacon, bool mcast)
@@ -630,12 +719,11 @@ mt7996_get_rates_table(struct mt7996_phy *phy, struct ieee80211_bss_conf *conf,
}
static void
-mt7996_update_mu_group(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+mt7996_update_mu_group(struct ieee80211_hw *hw, struct mt7996_vif_link *link,
struct ieee80211_bss_conf *info)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- u8 band = mvif->deflink.mt76.band_idx;
+ u8 band = link->mt76.band_idx;
u32 *mu;
mu = (u32 *)info->mu_group.membership;
@@ -649,23 +737,56 @@ mt7996_update_mu_group(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_WF_PHYRX_BAND_GID_TAB_POS3(band), mu[3]);
}
-static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u64 changed)
+static void
+mt7996_vif_cfg_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 changed)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt76_vif_link *mvif;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if ((changed & BSS_CHANGED_ASSOC) && vif->cfg.assoc) {
+ struct ieee80211_bss_conf *link_conf;
+ unsigned long link_id;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ struct mt7996_vif_link *link;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
+
+ if (!link->phy)
+ continue;
+
+ mt7996_mcu_add_bss_info(link->phy, vif, link_conf,
+ &link->mt76, &link->msta_link,
+ true);
+ mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL,
+ CONN_STATE_PORT_SECURE,
+ !!(changed & BSS_CHANGED_BSSID));
+ }
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7996_link_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u64 changed)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_vif_link *link;
struct mt7996_phy *phy;
struct mt76_phy *mphy;
mutex_lock(&dev->mt76.mutex);
- mvif = mt76_vif_conf_link(&dev->mt76, vif, info);
- if (!mvif)
+ link = mt7996_vif_conf_link(dev, vif, info);
+ if (!link)
goto out;
- mphy = mt76_vif_link_phy(mvif);
+ mphy = mt76_vif_link_phy(&link->mt76);
if (!mphy)
goto out;
@@ -675,16 +796,14 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
* and then peer references bss_info_rfch to set bandwidth cap.
*/
if ((changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) ||
- (changed & BSS_CHANGED_ASSOC && vif->cfg.assoc) ||
(changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon)) {
- mt7996_mcu_add_bss_info(phy, vif, info, mvif, true);
- mt7996_mcu_add_sta(dev, vif, mvif, NULL, CONN_STATE_PORT_SECURE,
+ mt7996_mcu_add_bss_info(phy, vif, info, &link->mt76,
+ &link->msta_link, true);
+ mt7996_mcu_add_sta(dev, info, NULL, link, NULL,
+ CONN_STATE_PORT_SECURE,
!!(changed & BSS_CHANGED_BSSID));
}
- if (changed & BSS_CHANGED_ERP_CTS_PROT)
- mt7996_mac_enable_rtscts(dev, vif, info->use_cts_prot);
-
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
@@ -695,11 +814,11 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_MCAST_RATE)
- mvif->mcast_rates_idx =
+ link->mt76.mcast_rates_idx =
mt7996_get_rates_table(phy, info, false, true);
if (changed & BSS_CHANGED_BASIC_RATES)
- mvif->basic_rates_idx =
+ link->mt76.basic_rates_idx =
mt7996_get_rates_table(phy, info, false, false);
/* ensure that enable txcmd_mode after bss_info */
@@ -707,19 +826,19 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
mt7996_mcu_set_tx(dev, vif, info);
if (changed & BSS_CHANGED_HE_OBSS_PD)
- mt7996_mcu_add_obss_spr(phy, vif, &info->he_obss_pd);
+ mt7996_mcu_add_obss_spr(phy, link, &info->he_obss_pd);
if (changed & BSS_CHANGED_HE_BSS_COLOR) {
if ((vif->type == NL80211_IFTYPE_AP &&
- mvif->omac_idx <= HW_BSSID_MAX) ||
+ link->mt76.omac_idx <= HW_BSSID_MAX) ||
vif->type == NL80211_IFTYPE_STATION)
- mt7996_mcu_update_bss_color(dev, mvif,
+ mt7996_mcu_update_bss_color(dev, &link->mt76,
&info->he_bss_color);
}
if (changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED)) {
- mvif->beacon_rates_idx =
+ link->mt76.beacon_rates_idx =
mt7996_get_rates_table(phy, info, true, false);
mt7996_mcu_add_beacon(hw, vif, info);
@@ -727,10 +846,10 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
BSS_CHANGED_FILS_DISCOVERY))
- mt7996_mcu_beacon_inband_discov(dev, vif, changed);
+ mt7996_mcu_beacon_inband_discov(dev, info, link, changed);
if (changed & BSS_CHANGED_MU_GROUPS)
- mt7996_update_mu_group(hw, vif, info);
+ mt7996_update_mu_group(hw, link, info);
if (changed & BSS_CHANGED_TXPOWER &&
info->txpower != phy->txpower) {
@@ -754,96 +873,322 @@ mt7996_channel_switch_beacon(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
-int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static int
+mt7996_mac_sta_init_link(struct mt7996_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link, unsigned int link_id)
{
- struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct ieee80211_sta *sta = link_sta->sta;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_vif_link *link = &mvif->deflink;
- u8 band_idx = link->phy->mt76->band_idx;
+ struct mt7996_phy *phy = link->phy;
+ struct mt7996_sta_link *msta_link;
int idx;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA);
if (idx < 0)
return -ENOSPC;
- INIT_LIST_HEAD(&msta->rc_list);
- INIT_LIST_HEAD(&msta->wcid.poll_list);
- msta->vif = mvif;
- msta->wcid.sta = 1;
- msta->wcid.idx = idx;
- msta->wcid.phy_idx = band_idx;
+ if (msta->deflink_id == IEEE80211_LINK_UNSPECIFIED) {
+ int i;
- ewma_avg_signal_init(&msta->avg_ack_signal);
+ msta_link = &msta->deflink;
+ msta->deflink_id = link_id;
- mt7996_mac_wtbl_update(dev, idx,
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct mt76_txq *mtxq;
+
+ if (!sta->txq[i])
+ continue;
+
+ mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
+ mtxq->wcid = idx;
+ }
+ } else {
+ msta_link = kzalloc(sizeof(*msta_link), GFP_KERNEL);
+ if (!msta_link)
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&msta_link->rc_list);
+ INIT_LIST_HEAD(&msta_link->wcid.poll_list);
+ msta_link->sta = msta;
+ msta_link->wcid.sta = 1;
+ msta_link->wcid.idx = idx;
+ msta_link->wcid.link_id = link_id;
+
+ ewma_avg_signal_init(&msta_link->avg_ack_signal);
+ ewma_signal_init(&msta_link->wcid.rssi);
+
+ rcu_assign_pointer(msta->link[link_id], msta_link);
+
+ mt7996_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ mt7996_mcu_add_sta(dev, link_conf, link_sta, link, msta_link,
+ CONN_STATE_DISCONNECT, true);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], &msta_link->wcid);
+ mt76_wcid_init(&msta_link->wcid, phy->mt76->band_idx);
+
+ return 0;
+}
+
+static void
+mt7996_mac_sta_deinit_link(struct mt7996_dev *dev,
+ struct mt7996_sta_link *msta_link)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(msta_link->wcid.aggr); i++)
+ mt76_rx_aggr_stop(&dev->mt76, &msta_link->wcid, i);
+
+ mt7996_mac_wtbl_update(dev, msta_link->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- mt7996_mcu_add_sta(dev, vif, &link->mt76, sta, CONN_STATE_DISCONNECT,
- true);
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ if (!list_empty(&msta_link->wcid.poll_list))
+ list_del_init(&msta_link->wcid.poll_list);
+ if (!list_empty(&msta_link->rc_list))
+ list_del_init(&msta_link->rc_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+ mt76_wcid_cleanup(&dev->mt76, &msta_link->wcid);
+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, msta_link->wcid.idx);
+}
+
+static void
+mt7996_mac_sta_remove_links(struct mt7996_dev *dev, struct ieee80211_sta *sta,
+ unsigned long links)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt76_dev *mdev = &dev->mt76;
+ unsigned int link_id;
+
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct mt7996_sta_link *msta_link = NULL;
+
+ msta_link = rcu_replace_pointer(msta->link[link_id], msta_link,
+ lockdep_is_held(&mdev->mutex));
+ if (!msta_link)
+ continue;
+
+ mt7996_mac_sta_deinit_link(dev, msta_link);
+ if (msta->deflink_id == link_id) {
+ msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ continue;
+ }
+
+ kfree_rcu(msta_link, rcu_head);
+ }
+}
+
+static int
+mt7996_mac_sta_add_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, unsigned long new_links)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ unsigned int link_id;
+ int err;
+
+ for_each_set_bit(link_id, &new_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+ struct mt7996_vif_link *link;
+
+ if (rcu_access_pointer(msta->link[link_id]))
+ continue;
+
+ link_conf = link_conf_dereference_protected(vif, link_id);
+ if (!link_conf)
+ goto error_unlink;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ goto error_unlink;
+
+ link_sta = link_sta_dereference_protected(sta, link_id);
+ if (!link_sta)
+ goto error_unlink;
+
+ err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link,
+ link_id);
+ if (err)
+ goto error_unlink;
+ }
return 0;
+
+error_unlink:
+ mt7996_mac_sta_remove_links(dev, sta, new_links);
+
+ return err;
+}
+
+static int
+mt7996_mac_sta_change_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 old_links,
+ u16 new_links)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ unsigned long add = new_links & ~old_links;
+ unsigned long rem = old_links & ~new_links;
+ int ret;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt7996_mac_sta_remove_links(dev, sta, rem);
+ ret = mt7996_mac_sta_add_links(dev, vif, sta, add);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
}
-int mt7996_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, enum mt76_sta_event ev)
+static int
+mt7996_mac_sta_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
+ struct mt76_dev *mdev = mphy->dev;
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_vif_link *link = &mvif->deflink;
- int i, ret;
+ unsigned long links = sta->mlo ? sta->valid_links : BIT(0);
+ int err;
- switch (ev) {
- case MT76_STA_EVENT_ASSOC:
- ret = mt7996_mcu_add_sta(dev, vif, &link->mt76, sta,
- CONN_STATE_CONNECT, true);
- if (ret)
- return ret;
+ mutex_lock(&mdev->mutex);
- ret = mt7996_mcu_add_rate_ctrl(dev, vif, sta, false);
- if (ret)
- return ret;
+ msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ msta->vif = mvif;
+ err = mt7996_mac_sta_add_links(dev, vif, sta, links);
+ if (!err)
+ mphy->num_sta++;
- msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
- msta->wcid.sta = 1;
+ mutex_unlock(&mdev->mutex);
- return 0;
+ return err;
+}
- case MT76_STA_EVENT_AUTHORIZE:
- return mt7996_mcu_add_sta(dev, vif, &link->mt76, sta,
- CONN_STATE_PORT_SECURE, false);
+static int
+mt7996_mac_sta_event(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ unsigned long links = sta->valid_links;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct ieee80211_bss_conf *link_conf;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+ int i, err;
+
+ link_conf = link_conf_dereference_protected(vif, link_id);
+ if (!link_conf)
+ continue;
- case MT76_STA_EVENT_DISASSOC:
- for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
- mt7996_mac_twt_teardown_flow(dev, msta, i);
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
- mt7996_mcu_add_sta(dev, vif, &link->mt76, sta,
- CONN_STATE_DISCONNECT, false);
- msta->wcid.sta_disabled = 1;
- msta->wcid.sta = 0;
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
- return 0;
+ switch (ev) {
+ case MT76_STA_EVENT_ASSOC:
+ err = mt7996_mcu_add_sta(dev, link_conf, link_sta,
+ link, msta_link,
+ CONN_STATE_CONNECT, true);
+ if (err)
+ return err;
+
+ err = mt7996_mcu_add_rate_ctrl(dev, vif, link_conf,
+ link_sta, link,
+ msta_link, false);
+ if (err)
+ return err;
+
+ msta_link->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta_link->wcid.sta = 1;
+ break;
+ case MT76_STA_EVENT_AUTHORIZE:
+ err = mt7996_mcu_add_sta(dev, link_conf, link_sta,
+ link, msta_link,
+ CONN_STATE_PORT_SECURE, false);
+ if (err)
+ return err;
+ break;
+ case MT76_STA_EVENT_DISASSOC:
+ for (i = 0; i < ARRAY_SIZE(msta_link->twt.flow); i++)
+ mt7996_mac_twt_teardown_flow(dev, link,
+ msta_link, i);
+
+ if (sta->mlo && links == BIT(link_id)) /* last link */
+ mt7996_mcu_teardown_mld_sta(dev, link,
+ msta_link);
+ else
+ mt7996_mcu_add_sta(dev, link_conf, link_sta,
+ link, msta_link,
+ CONN_STATE_DISCONNECT, false);
+ msta_link->wcid.sta_disabled = 1;
+ msta_link->wcid.sta = 0;
+ links = links & ~BIT(link_id);
+ break;
+ }
}
return 0;
}
-void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static void
+mt7996_mac_sta_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
+ struct mt76_dev *mdev = mphy->dev;
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ unsigned long links = sta->mlo ? sta->valid_links : BIT(0);
- mt7996_mac_wtbl_update(dev, msta->wcid.idx,
- MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ mutex_lock(&mdev->mutex);
+
+ mt7996_mac_sta_remove_links(dev, sta, links);
+ mphy->num_sta--;
+
+ mutex_unlock(&mdev->mutex);
+}
+
+static int
+mt7996_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct mt76_phy *mphy = mt76_vif_phy(hw, vif);
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ enum mt76_sta_event ev;
- spin_lock_bh(&mdev->sta_poll_lock);
- if (!list_empty(&msta->wcid.poll_list))
- list_del_init(&msta->wcid.poll_list);
- if (!list_empty(&msta->rc_list))
- list_del_init(&msta->rc_list);
- spin_unlock_bh(&mdev->sta_poll_lock);
+ if (!mphy)
+ return -EINVAL;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ return mt7996_mac_sta_add(mphy, vif, sta);
+
+ if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)
+ mt7996_mac_sta_remove(mphy, vif, sta);
+
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC)
+ ev = MT76_STA_EVENT_ASSOC;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED)
+ ev = MT76_STA_EVENT_AUTHORIZE;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH)
+ ev = MT76_STA_EVENT_DISASSOC;
+ else
+ return 0;
+
+ return mt7996_mac_sta_event(dev, vif, sta, ev);
}
static void mt7996_tx(struct ieee80211_hw *hw,
@@ -855,12 +1200,18 @@ static void mt7996_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ u8 link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+
+ rcu_read_lock();
if (vif) {
- struct mt7996_vif *mvif;
+ struct mt7996_vif *mvif = (void *)vif->drv_priv;
+ struct mt76_vif_link *mlink;
- mvif = (struct mt7996_vif *)vif->drv_priv;
- wcid = &mvif->deflink.sta.wcid;
+ mlink = rcu_dereference(mvif->mt76.link[link_id]);
+ if (mlink && mlink->wcid)
+ wcid = mlink->wcid;
if (mvif->mt76.roc_phy &&
(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) {
@@ -872,19 +1223,22 @@ static void mt7996_tx(struct ieee80211_hw *hw,
}
}
- if (control->sta) {
- struct mt7996_sta *sta;
-
- sta = (struct mt7996_sta *)control->sta->drv_priv;
- wcid = &sta->wcid;
- }
-
if (!mphy) {
ieee80211_free_txskb(hw, skb);
- return;
+ goto unlock;
}
+ if (control->sta) {
+ struct mt7996_sta *msta = (void *)control->sta->drv_priv;
+ struct mt7996_sta_link *msta_link;
+
+ msta_link = rcu_dereference(msta->link[link_id]);
+ if (msta_link)
+ wcid = &msta_link->wcid;
+ }
mt76_tx(mphy, control->sta, wcid, skb);
+unlock:
+ rcu_read_unlock();
}
static int mt7996_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
@@ -914,11 +1268,13 @@ mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action = params->action;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct ieee80211_sta *sta = params->sta;
- struct ieee80211_txq *txq = sta->txq[params->tid];
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ struct ieee80211_link_sta *link_sta;
u16 tid = params->tid;
u16 ssn = params->ssn;
struct mt76_txq *mtxq;
+ unsigned int link_id;
int ret = 0;
if (!txq)
@@ -927,38 +1283,61 @@ mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mtxq = (struct mt76_txq *)txq->drv_priv;
mutex_lock(&dev->mt76.mutex);
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
- params->buf_size);
- ret = mt7996_mcu_add_rx_ba(dev, params, true);
- break;
- case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
- ret = mt7996_mcu_add_rx_ba(dev, params, false);
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- mtxq->aggr = true;
- mtxq->send_bar = false;
- ret = mt7996_mcu_add_tx_ba(dev, params, true);
- break;
- case IEEE80211_AMPDU_TX_STOP_FLUSH:
- case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- mtxq->aggr = false;
- clear_bit(tid, &msta->wcid.ampdu_state);
- ret = mt7996_mcu_add_tx_ba(dev, params, false);
- break;
- case IEEE80211_AMPDU_TX_START:
- set_bit(tid, &msta->wcid.ampdu_state);
- ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
- break;
- case IEEE80211_AMPDU_TX_STOP_CONT:
- mtxq->aggr = false;
- clear_bit(tid, &msta->wcid.ampdu_state);
- ret = mt7996_mcu_add_tx_ba(dev, params, false);
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta_link->wcid, tid,
+ ssn, params->buf_size);
+ ret = mt7996_mcu_add_rx_ba(dev, params, link, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta_link->wcid, tid);
+ ret = mt7996_mcu_add_rx_ba(dev, params, link, false);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ret = mt7996_mcu_add_tx_ba(dev, params, link,
+ msta_link, true);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta_link->wcid.ampdu_state);
+ ret = mt7996_mcu_add_tx_ba(dev, params, link,
+ msta_link, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ set_bit(tid, &msta_link->wcid.ampdu_state);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta_link->wcid.ampdu_state);
+ ret = mt7996_mcu_add_tx_ba(dev, params, link,
+ msta_link, false);
+ break;
+ }
+
+ if (ret)
+ break;
}
+
+ if (action == IEEE80211_AMPDU_TX_STOP_CONT)
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
mutex_unlock(&dev->mt76.mutex);
return ret;
@@ -989,10 +1368,10 @@ mt7996_get_stats(struct ieee80211_hw *hw,
return 0;
}
-u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif)
+u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif_link *link)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
+ struct mt7996_phy *phy = link->phy;
union {
u64 t64;
u32 t32[2];
@@ -1004,8 +1383,8 @@ u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif)
lockdep_assert_held(&dev->mt76.mutex);
- n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
- : mvif->deflink.mt76.omac_idx;
+ n = link->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : link->mt76.omac_idx;
/* TSF software read */
mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_READ);
@@ -1023,7 +1402,7 @@ mt7996_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
u64 ret;
mutex_lock(&dev->mt76.mutex);
- ret = __mt7996_get_tsf(hw, mvif);
+ ret = __mt7996_get_tsf(hw, &mvif->deflink);
mutex_unlock(&dev->mt76.mutex);
return ret;
@@ -1035,26 +1414,33 @@ mt7996_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
+ struct mt7996_vif_link *link;
+ struct mt7996_phy *phy;
union {
u64 t64;
u32 t32[2];
} tsf = { .t64 = timestamp, };
u16 n;
- if (!phy)
- return;
-
mutex_lock(&dev->mt76.mutex);
- n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
- : mvif->deflink.mt76.omac_idx;
+ link = mt7996_vif_link(dev, vif, mvif->mt76.deflink_id);
+ if (!link)
+ goto unlock;
+
+ n = link->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : link->mt76.omac_idx;
+ phy = link->phy;
+ if (!phy)
+ goto unlock;
+
mt76_wr(dev, MT_LPON_UTTR0(phy->mt76->band_idx), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(phy->mt76->band_idx), tsf.t32[1]);
/* TSF software overwrite */
mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_WRITE);
+unlock:
mutex_unlock(&dev->mt76.mutex);
}
@@ -1064,26 +1450,33 @@ mt7996_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
+ struct mt7996_vif_link *link;
+ struct mt7996_phy *phy;
union {
u64 t64;
u32 t32[2];
} tsf = { .t64 = timestamp, };
u16 n;
- if (!phy)
- return;
-
mutex_lock(&dev->mt76.mutex);
- n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
- : mvif->deflink.mt76.omac_idx;
+ link = mt7996_vif_link(dev, vif, mvif->mt76.deflink_id);
+ if (!link)
+ goto unlock;
+
+ phy = link->phy;
+ if (!phy)
+ goto unlock;
+
+ n = link->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : link->mt76.omac_idx;
mt76_wr(dev, MT_LPON_UTTR0(phy->mt76->band_idx), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(phy->mt76->band_idx), tsf.t32[1]);
/* TSF software adjust*/
mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_ADJUST);
+unlock:
mutex_unlock(&dev->mt76.mutex);
}
@@ -1145,7 +1538,8 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct rate_info *txrate = &msta->wcid.rate;
+ struct mt7996_sta_link *msta_link = &msta->deflink;
+ struct rate_info *txrate = &msta_link->wcid.rate;
if (txrate->legacy || txrate->flags) {
if (txrate->legacy) {
@@ -1165,55 +1559,67 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
sinfo->txrate.flags = txrate->flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
- sinfo->tx_failed = msta->wcid.stats.tx_failed;
+ sinfo->tx_failed = msta_link->wcid.stats.tx_failed;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
- sinfo->tx_retries = msta->wcid.stats.tx_retries;
+ sinfo->tx_retries = msta_link->wcid.stats.tx_retries;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
- sinfo->ack_signal = (s8)msta->ack_signal;
+ sinfo->ack_signal = (s8)msta_link->ack_signal;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
- sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal);
+ sinfo->avg_ack_signal =
+ -(s8)ewma_avg_signal_read(&msta_link->avg_ack_signal);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
- sinfo->tx_bytes = msta->wcid.stats.tx_bytes;
+ sinfo->tx_bytes = msta_link->wcid.stats.tx_bytes;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
- sinfo->rx_bytes = msta->wcid.stats.rx_bytes;
+ sinfo->rx_bytes = msta_link->wcid.stats.rx_bytes;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
- sinfo->tx_packets = msta->wcid.stats.tx_packets;
+ sinfo->tx_packets = msta_link->wcid.stats.tx_packets;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
- sinfo->rx_packets = msta->wcid.stats.rx_packets;
+ sinfo->rx_packets = msta_link->wcid.stats.rx_packets;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
}
}
-static void mt7996_sta_rc_work(void *data, struct ieee80211_sta *sta)
+static void mt7996_link_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
+ struct mt7996_sta_link *msta_link;
u32 *changed = data;
+ rcu_read_lock();
+
+ msta_link = rcu_dereference(msta->link[msta->deflink_id]);
+ if (!msta_link)
+ goto out;
+
spin_lock_bh(&dev->mt76.sta_poll_lock);
- msta->changed |= *changed;
- if (list_empty(&msta->rc_list))
- list_add_tail(&msta->rc_list, &dev->sta_rc_list);
+
+ msta_link->changed |= *changed;
+ if (list_empty(&msta_link->rc_list))
+ list_add_tail(&msta_link->rc_list, &dev->sta_rc_list);
+
spin_unlock_bh(&dev->mt76.sta_poll_lock);
+out:
+ rcu_read_unlock();
}
-static void mt7996_sta_rc_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_link_sta *link_sta,
- u32 changed)
+static void mt7996_link_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct ieee80211_sta *sta = link_sta->sta;
- mt7996_sta_rc_work(&changed, sta);
+ mt7996_link_rate_ctrl_update(&changed, sta);
ieee80211_queue_work(hw, &dev->rc_work);
}
@@ -1235,7 +1641,8 @@ mt7996_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* - multiple rates: if it's not in range format i.e 0-{7,8,9} for VHT
* then multiple MCS setting (MCS 4,5,6) is not supported.
*/
- ieee80211_iterate_stations_atomic(hw, mt7996_sta_rc_work, &changed);
+ ieee80211_iterate_stations_atomic(hw, mt7996_link_rate_ctrl_update,
+ &changed);
ieee80211_queue_work(hw, &dev->rc_work);
return 0;
@@ -1246,18 +1653,37 @@ static void mt7996_sta_set_4addr(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
bool enabled)
{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
- if (enabled)
- set_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
- else
- clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
+ mutex_lock(&dev->mt76.mutex);
- if (!msta->wcid.sta)
- return;
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
- mt7996_mcu_wtbl_update_hdr_trans(dev, vif, sta);
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ if (enabled)
+ set_bit(MT_WCID_FLAG_4ADDR, &msta_link->wcid.flags);
+ else
+ clear_bit(MT_WCID_FLAG_4ADDR, &msta_link->wcid.flags);
+
+ if (!msta_link->wcid.sta)
+ continue;
+
+ mt7996_mcu_wtbl_update_hdr_trans(dev, vif, link, msta_link);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
}
static void mt7996_sta_set_decap_offload(struct ieee80211_hw *hw,
@@ -1265,18 +1691,39 @@ static void mt7996_sta_set_decap_offload(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
bool enabled)
{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
- if (enabled)
- set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
- else
- clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ mutex_lock(&dev->mt76.mutex);
- if (!msta->wcid.sta)
- return;
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
+
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ if (enabled)
+ set_bit(MT_WCID_FLAG_HDR_TRANS,
+ &msta_link->wcid.flags);
+ else
+ clear_bit(MT_WCID_FLAG_HDR_TRANS,
+ &msta_link->wcid.flags);
- mt7996_mcu_wtbl_update_hdr_trans(dev, vif, sta);
+ if (!msta_link->wcid.sta)
+ continue;
+
+ mt7996_mcu_wtbl_update_hdr_trans(dev, vif, link, msta_link);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
}
static const char mt7996_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -1408,11 +1855,12 @@ static void mt7996_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
{
struct mt76_ethtool_worker_info *wi = wi_data;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta_link *msta_link = &msta->deflink;
if (msta->vif->deflink.mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->wcid.stats, true);
+ mt76_ethtool_worker(wi, &msta_link->wcid.stats, true);
}
static
@@ -1516,10 +1964,12 @@ mt7996_twt_teardown_request(struct ieee80211_hw *hw,
u8 flowid)
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta_link *msta_link = &msta->deflink;
+ struct mt7996_vif_link *link = &msta->vif->deflink;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7996_mac_twt_teardown_flow(dev, msta, flowid);
+ mt7996_mac_twt_teardown_flow(dev, link, msta_link, flowid);
mutex_unlock(&dev->mt76.mutex);
}
@@ -1589,12 +2039,26 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct mt7996_vif_link *mlink = &mvif->deflink;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+ struct mt76_vif_link *mlink;
struct mt7996_phy *phy;
- phy = mt7996_vif_link_phy(mlink);
+ mlink = rcu_dereference(mvif->mt76.link[msta->deflink_id]);
+ if (!mlink)
+ return -EIO;
+
+ msta_link = rcu_dereference(msta->link[msta->deflink_id]);
+ if (!msta_link)
+ return -EIO;
+
+ if (!msta_link->wcid.sta || msta_link->wcid.idx > MT7996_WTBL_STA)
+ return -EIO;
+
+ link = (struct mt7996_vif_link *)mlink;
+ phy = mt7996_vif_link_phy(link);
if (!phy)
return -ENODEV;
@@ -1604,15 +2068,12 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
if (!mtk_wed_device_active(wed))
return -ENODEV;
- if (!msta->wcid.sta || msta->wcid.idx > MT7996_WTBL_STA)
- return -EIO;
-
path->type = DEV_PATH_MTK_WDMA;
path->dev = ctx->dev;
path->mtk_wdma.wdma_idx = wed->wdma_idx;
- path->mtk_wdma.bss = mvif->deflink.mt76.idx;
+ path->mtk_wdma.bss = mlink->idx;
path->mtk_wdma.queue = 0;
- path->mtk_wdma.wcid = msta->wcid.idx;
+ path->mtk_wdma.wcid = msta_link->wcid.idx;
path->mtk_wdma.amsdu = mtk_wed_is_amsdu_supported(wed);
ctx->dev = NULL;
@@ -1622,6 +2083,14 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
#endif
+static int
+mt7996_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ return 0;
+}
+
const struct ieee80211_ops mt7996_ops = {
.add_chanctx = mt76_add_chanctx,
.remove_chanctx = mt76_remove_chanctx,
@@ -1637,10 +2106,11 @@ const struct ieee80211_ops mt7996_ops = {
.config = mt7996_config,
.conf_tx = mt7996_conf_tx,
.configure_filter = mt7996_configure_filter,
- .bss_info_changed = mt7996_bss_info_changed,
- .sta_state = mt76_sta_state,
+ .vif_cfg_changed = mt7996_vif_cfg_changed,
+ .link_info_changed = mt7996_link_info_changed,
+ .sta_state = mt7996_sta_state,
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
- .link_sta_rc_update = mt7996_sta_rc_update,
+ .link_sta_rc_update = mt7996_link_sta_rc_update,
.set_key = mt7996_set_key,
.ampdu_action = mt7996_ampdu_action,
.set_rts_threshold = mt7996_set_rts_threshold,
@@ -1650,7 +2120,7 @@ const struct ieee80211_ops mt7996_ops = {
.remain_on_channel = mt76_remain_on_channel,
.cancel_remain_on_channel = mt76_cancel_remain_on_channel,
.release_buffered_frames = mt76_release_buffered_frames,
- .get_txpower = mt76_get_txpower,
+ .get_txpower = mt7996_get_txpower,
.channel_switch_beacon = mt7996_channel_switch_beacon,
.get_stats = mt7996_get_stats,
.get_et_sset_count = mt7996_get_et_sset_count,
@@ -1677,4 +2147,6 @@ const struct ieee80211_ops mt7996_ops = {
.net_fill_forward_path = mt7996_net_fill_forward_path,
.net_setup_tc = mt76_wed_net_setup_tc,
#endif
+ .change_vif_links = mt7996_change_vif_links,
+ .change_sta_links = mt7996_mac_sta_change_links,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index e4569d032221..ddd555942c73 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -118,13 +118,13 @@ mt7996_mcu_get_sta_nss(u16 mcs_map)
}
static void
-mt7996_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
- u16 mcs_map)
+mt7996_mcu_set_sta_he_mcs(struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ __le16 *he_mcs, u16 mcs_map)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- enum nl80211_band band = msta->vif->deflink.phy->mt76->chandef.chan->band;
- const u16 *mask = msta->vif->deflink.bitrate_mask.control[band].he_mcs;
- int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
+ int nss, max_nss = link_sta->rx_nss > 3 ? 4 : link_sta->rx_nss;
+ enum nl80211_band band = link->phy->mt76->chandef.chan->band;
+ const u16 *mask = link->bitrate_mask.control[band].he_mcs;
for (nss = 0; nss < max_nss; nss++) {
int mcs;
@@ -167,11 +167,11 @@ mt7996_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
}
static void
-mt7996_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
- const u16 *mask)
+mt7996_mcu_set_sta_vht_mcs(struct ieee80211_link_sta *link_sta,
+ __le16 *vht_mcs, const u16 *mask)
{
- u16 mcs, mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
- int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
+ u16 mcs, mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map);
+ int nss, max_nss = link_sta->rx_nss > 3 ? 4 : link_sta->rx_nss;
for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) {
switch (mcs_map & 0x3) {
@@ -193,13 +193,13 @@ mt7996_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
}
static void
-mt7996_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs,
- const u8 *mask)
+mt7996_mcu_set_sta_ht_mcs(struct ieee80211_link_sta *link_sta,
+ u8 *ht_mcs, const u8 *mask)
{
- int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
+ int nss, max_nss = link_sta->rx_nss > 3 ? 4 : link_sta->rx_nss;
for (nss = 0; nss < max_nss; nss++)
- ht_mcs[nss] = sta->deflink.ht_cap.mcs.rx_mask[nss] & mask[nss];
+ ht_mcs[nss] = link_sta->ht_cap.mcs.rx_mask[nss] & mask[nss];
}
static int
@@ -1064,7 +1064,8 @@ __mt7996_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif_link *mvif, int
int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
- struct mt76_vif_link *mlink, int enable)
+ struct mt76_vif_link *mlink,
+ struct mt7996_sta_link *msta_link, int enable)
{
struct mt7996_dev *dev = phy->dev;
struct sk_buff *skb;
@@ -1081,7 +1082,7 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
/* bss_basic must be first */
mt7996_mcu_bss_basic_tlv(skb, vif, link_conf, mlink, phy->mt76,
- mlink->wcid->idx, enable);
+ msta_link->wcid.idx, enable);
mt7996_mcu_bss_sec_tlv(skb, mlink);
if (vif->type == NL80211_IFTYPE_MONITOR)
@@ -1159,37 +1160,34 @@ mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
/** starec & wtbl **/
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- bool enable)
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link, bool enable)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)params->sta->drv_priv;
- struct mt7996_vif *mvif = msta->vif;
-
if (enable && !params->amsdu)
- msta->wcid.amsdu = false;
+ msta_link->wcid.amsdu = false;
- return mt7996_mcu_sta_ba(dev, &mvif->deflink.mt76, params, enable, true);
+ return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, true);
}
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- bool enable)
+ struct mt7996_vif_link *link, bool enable)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)params->sta->drv_priv;
- struct mt7996_vif *mvif = msta->vif;
-
- return mt7996_mcu_sta_ba(dev, &mvif->deflink.mt76, params, enable, false);
+ return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, false);
}
static void
-mt7996_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7996_mcu_sta_he_tlv(struct sk_buff *skb,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link)
{
- struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
+ struct ieee80211_he_cap_elem *elem = &link_sta->he_cap.he_cap_elem;
struct ieee80211_he_mcs_nss_supp mcs_map;
struct sta_rec_he_v2 *he;
struct tlv *tlv;
int i = 0;
- if (!sta->deflink.he_cap.has_he)
+ if (!link_sta->he_cap.has_he)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_V2, sizeof(*he));
@@ -1201,21 +1199,21 @@ mt7996_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
he->he_phy_cap[i] = elem->phy_cap_info[i];
}
- mcs_map = sta->deflink.he_cap.he_mcs_nss_supp;
- switch (sta->deflink.bandwidth) {
+ mcs_map = link_sta->he_cap.he_mcs_nss_supp;
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (elem->phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
- mt7996_mcu_set_sta_he_mcs(sta,
+ mt7996_mcu_set_sta_he_mcs(link_sta, link,
&he->max_nss_mcs[CMD_HE_MCS_BW8080],
le16_to_cpu(mcs_map.rx_mcs_80p80));
- mt7996_mcu_set_sta_he_mcs(sta,
+ mt7996_mcu_set_sta_he_mcs(link_sta, link,
&he->max_nss_mcs[CMD_HE_MCS_BW160],
le16_to_cpu(mcs_map.rx_mcs_160));
fallthrough;
default:
- mt7996_mcu_set_sta_he_mcs(sta,
+ mt7996_mcu_set_sta_he_mcs(link_sta, link,
&he->max_nss_mcs[CMD_HE_MCS_BW80],
le16_to_cpu(mcs_map.rx_mcs_80));
break;
@@ -1225,24 +1223,26 @@ mt7996_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
}
static void
-mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb,
+ struct ieee80211_link_sta *link_sta)
{
struct sta_rec_he_6g_capa *he_6g;
struct tlv *tlv;
- if (!sta->deflink.he_6ghz_capa.capa)
+ if (!link_sta->he_6ghz_capa.capa)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G, sizeof(*he_6g));
he_6g = (struct sta_rec_he_6g_capa *)tlv;
- he_6g->capa = sta->deflink.he_6ghz_capa.capa;
+ he_6g->capa = link_sta->he_6ghz_capa.capa;
}
static void
-mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7996_mcu_sta_eht_tlv(struct sk_buff *skb,
+ struct ieee80211_link_sta *link_sta)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta *msta = (struct mt7996_sta *)link_sta->sta->drv_priv;
struct ieee80211_vif *vif = container_of((void *)msta->vif,
struct ieee80211_vif, drv_priv);
struct ieee80211_eht_mcs_nss_supp *mcs_map;
@@ -1250,11 +1250,11 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
struct sta_rec_eht *eht;
struct tlv *tlv;
- if (!sta->deflink.eht_cap.has_eht)
+ if (!link_sta->eht_cap.has_eht)
return;
- mcs_map = &sta->deflink.eht_cap.eht_mcs_nss_supp;
- elem = &sta->deflink.eht_cap.eht_cap_elem;
+ mcs_map = &link_sta->eht_cap.eht_mcs_nss_supp;
+ elem = &link_sta->eht_cap.eht_cap_elem;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT, sizeof(*eht));
@@ -1265,7 +1265,7 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]);
if (vif->type != NL80211_IFTYPE_STATION &&
- (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
@@ -1281,47 +1281,48 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
}
static void
-mt7996_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7996_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta)
{
struct sta_rec_ht_uni *ht;
struct tlv *tlv;
- if (!sta->deflink.ht_cap.ht_supported)
+ if (!link_sta->ht_cap.ht_supported)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht_uni *)tlv;
- ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap);
- ht->ampdu_param = u8_encode_bits(sta->deflink.ht_cap.ampdu_factor,
+ ht->ht_cap = cpu_to_le16(link_sta->ht_cap.cap);
+ ht->ampdu_param = u8_encode_bits(link_sta->ht_cap.ampdu_factor,
IEEE80211_HT_AMPDU_PARM_FACTOR) |
- u8_encode_bits(sta->deflink.ht_cap.ampdu_density,
+ u8_encode_bits(link_sta->ht_cap.ampdu_density,
IEEE80211_HT_AMPDU_PARM_DENSITY);
}
static void
-mt7996_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7996_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta)
{
struct sta_rec_vht *vht;
struct tlv *tlv;
/* For 6G band, this tlv is necessary to let hw work normally */
- if (!sta->deflink.he_6ghz_capa.capa && !sta->deflink.vht_cap.vht_supported)
+ if (!link_sta->he_6ghz_capa.capa && !link_sta->vht_cap.vht_supported)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
vht = (struct sta_rec_vht *)tlv;
- vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap);
- vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
- vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
+ vht->vht_cap = cpu_to_le32(link_sta->vht_cap.cap);
+ vht->vht_rx_mcs_map = link_sta->vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = link_sta->vht_cap.vht_mcs.tx_mcs_map;
}
static void
mt7996_mcu_sta_amsdu_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_sta_link *msta_link)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct sta_rec_amsdu *amsdu;
struct tlv *tlv;
@@ -1330,16 +1331,16 @@ mt7996_mcu_sta_amsdu_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
vif->type != NL80211_IFTYPE_AP)
return;
- if (!sta->deflink.agg.max_amsdu_len)
+ if (!link_sta->agg.max_amsdu_len)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct sta_rec_amsdu *)tlv;
amsdu->max_amsdu_num = 8;
amsdu->amsdu_en = true;
- msta->wcid.amsdu = true;
+ msta_link->wcid.amsdu = true;
- switch (sta->deflink.agg.max_amsdu_len) {
+ switch (link_sta->agg.max_amsdu_len) {
case IEEE80211_MAX_MPDU_LEN_VHT_11454:
amsdu->max_mpdu_size =
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
@@ -1356,30 +1357,31 @@ mt7996_mcu_sta_amsdu_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
static void
mt7996_mcu_sta_muru_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta)
{
- struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
+ struct ieee80211_he_cap_elem *elem = &link_sta->he_cap.he_cap_elem;
struct sta_rec_muru *muru;
struct tlv *tlv;
- if (vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_AP)
+ if (link_conf->vif->type != NL80211_IFTYPE_STATION &&
+ link_conf->vif->type != NL80211_IFTYPE_AP)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
muru = (struct sta_rec_muru *)tlv;
- muru->cfg.mimo_dl_en = vif->bss_conf.eht_mu_beamformer ||
- vif->bss_conf.he_mu_beamformer ||
- vif->bss_conf.vht_mu_beamformer ||
- vif->bss_conf.vht_mu_beamformee;
+ muru->cfg.mimo_dl_en = link_conf->eht_mu_beamformer ||
+ link_conf->he_mu_beamformer ||
+ link_conf->vht_mu_beamformer ||
+ link_conf->vht_mu_beamformee;
muru->cfg.ofdma_dl_en = true;
- if (sta->deflink.vht_cap.vht_supported)
+ if (link_sta->vht_cap.vht_supported)
muru->mimo_dl.vht_mu_bfee =
- !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+ !!(link_sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
- if (!sta->deflink.he_cap.has_he)
+ if (!link_sta->he_cap.has_he)
return;
muru->mimo_dl.partial_bw_dl_mimo =
@@ -1410,49 +1412,50 @@ mt7996_mcu_sta_muru_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
}
static inline bool
-mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool bfee)
+mt7996_is_ebf_supported(struct mt7996_phy *phy,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta, bool bfee)
{
int sts = hweight16(phy->mt76->chainmask);
- if (vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_AP)
+ if (link_conf->vif->type != NL80211_IFTYPE_STATION &&
+ link_conf->vif->type != NL80211_IFTYPE_AP)
return false;
if (!bfee && sts < 2)
return false;
- if (sta->deflink.eht_cap.has_eht) {
- struct ieee80211_sta_eht_cap *pc = &sta->deflink.eht_cap;
+ if (link_sta->eht_cap.has_eht) {
+ struct ieee80211_sta_eht_cap *pc = &link_sta->eht_cap;
struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem;
if (bfee)
- return vif->bss_conf.eht_su_beamformee &&
+ return link_conf->eht_su_beamformee &&
EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
else
- return vif->bss_conf.eht_su_beamformer &&
+ return link_conf->eht_su_beamformer &&
EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
}
- if (sta->deflink.he_cap.has_he) {
- struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
+ if (link_sta->he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &link_sta->he_cap.he_cap_elem;
if (bfee)
- return vif->bss_conf.he_su_beamformee &&
+ return link_conf->he_su_beamformee &&
HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]);
else
- return vif->bss_conf.he_su_beamformer &&
+ return link_conf->he_su_beamformer &&
HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]);
}
- if (sta->deflink.vht_cap.vht_supported) {
- u32 cap = sta->deflink.vht_cap.cap;
+ if (link_sta->vht_cap.vht_supported) {
+ u32 cap = link_sta->vht_cap.cap;
if (bfee)
- return vif->bss_conf.vht_su_beamformee &&
+ return link_conf->vht_su_beamformee &&
(cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
else
- return vif->bss_conf.vht_su_beamformer &&
+ return link_conf->vht_su_beamformer &&
(cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
}
@@ -1473,10 +1476,11 @@ mt7996_mcu_sta_sounding_rate(struct sta_rec_bf *bf, struct mt7996_phy *phy)
}
static void
-mt7996_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
- struct sta_rec_bf *bf, bool explicit)
+mt7996_mcu_sta_bfer_ht(struct ieee80211_link_sta *link_sta,
+ struct mt7996_phy *phy, struct sta_rec_bf *bf,
+ bool explicit)
{
- struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs;
+ struct ieee80211_mcs_info *mcs = &link_sta->ht_cap.mcs;
u8 n = 0;
bf->tx_mode = MT_PHY_TYPE_HT;
@@ -1499,10 +1503,11 @@ mt7996_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
}
static void
-mt7996_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
- struct sta_rec_bf *bf, bool explicit)
+mt7996_mcu_sta_bfer_vht(struct ieee80211_link_sta *link_sta,
+ struct mt7996_phy *phy, struct sta_rec_bf *bf,
+ bool explicit)
{
- struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
+ struct ieee80211_sta_vht_cap *pc = &link_sta->vht_cap;
struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap;
u16 mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map);
u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map);
@@ -1523,24 +1528,24 @@ mt7996_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
bf->ibf_ncol = min_t(u8, MT7996_IBF_MAX_NC, bf->ncol);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
bf->nrow = 1;
} else {
bf->nrow = tx_ant;
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
bf->ibf_ncol = min_t(u8, MT7996_IBF_MAX_NC, nss_mcs);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
bf->ibf_nrow = 1;
}
}
static void
-mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
- struct mt7996_phy *phy, struct sta_rec_bf *bf,
- bool explicit)
+mt7996_mcu_sta_bfer_he(struct ieee80211_link_sta *link_sta,
+ struct ieee80211_vif *vif, struct mt7996_phy *phy,
+ struct sta_rec_bf *bf, bool explicit)
{
- struct ieee80211_sta_he_cap *pc = &sta->deflink.he_cap;
+ struct ieee80211_sta_he_cap *pc = &link_sta->he_cap;
struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
const struct ieee80211_sta_he_cap *vc =
mt76_connac_get_he_phy_cap(phy->mt76, vif);
@@ -1569,7 +1574,7 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
bf->ibf_ncol = explicit ? min_t(u8, MT7996_IBF_MAX_NC, bf->ncol) :
min_t(u8, MT7996_IBF_MAX_NC, nss_mcs);
- if (sta->deflink.bandwidth != IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth != IEEE80211_STA_RX_BW_160)
return;
/* go over for 160MHz and 80p80 */
@@ -1601,11 +1606,11 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
}
static void
-mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
- struct mt7996_phy *phy, struct sta_rec_bf *bf,
- bool explicit)
+mt7996_mcu_sta_bfer_eht(struct ieee80211_link_sta *link_sta,
+ struct ieee80211_vif *vif, struct mt7996_phy *phy,
+ struct sta_rec_bf *bf, bool explicit)
{
- struct ieee80211_sta_eht_cap *pc = &sta->deflink.eht_cap;
+ struct ieee80211_sta_eht_cap *pc = &link_sta->eht_cap;
struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem;
struct ieee80211_eht_mcs_nss_supp *eht_nss = &pc->eht_mcs_nss_supp;
const struct ieee80211_sta_eht_cap *vc =
@@ -1629,10 +1634,10 @@ mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
bf->ibf_ncol = explicit ? min_t(u8, MT7996_IBF_MAX_NC, bf->ncol) :
min_t(u8, MT7996_IBF_MAX_NC, nss_mcs);
- if (sta->deflink.bandwidth < IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth < IEEE80211_STA_RX_BW_160)
return;
- switch (sta->deflink.bandwidth) {
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
snd_dim = EHT_PHY(CAP2_SOUNDING_DIM_160MHZ_MASK, ve->phy_cap_info[2]);
sts = EHT_PHY(CAP1_BEAMFORMEE_SS_160MHZ_MASK, pe->phy_cap_info[1]);
@@ -1660,13 +1665,15 @@ mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
static void
mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link)
{
#define EBF_MODE BIT(0)
#define IBF_MODE BIT(1)
#define BF_MAT_ORDER 4
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mvif->deflink.phy;
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct mt7996_phy *phy = link->phy;
int tx_ant = hweight16(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
@@ -1678,10 +1685,10 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
};
bool ebf;
- if (!(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
+ if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he))
return;
- ebf = mt7996_is_ebf_supported(phy, vif, sta, false);
+ ebf = mt7996_is_ebf_supported(phy, link_conf, link_sta, false);
if (!ebf && !dev->ibf)
return;
@@ -1692,28 +1699,29 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
* vht: support eBF and iBF
* ht: iBF only, since mac80211 lacks of eBF support
*/
- if (sta->deflink.eht_cap.has_eht)
- mt7996_mcu_sta_bfer_eht(sta, vif, phy, bf, ebf);
- else if (sta->deflink.he_cap.has_he)
- mt7996_mcu_sta_bfer_he(sta, vif, phy, bf, ebf);
- else if (sta->deflink.vht_cap.vht_supported)
- mt7996_mcu_sta_bfer_vht(sta, phy, bf, ebf);
- else if (sta->deflink.ht_cap.ht_supported)
- mt7996_mcu_sta_bfer_ht(sta, phy, bf, ebf);
+ if (link_sta->eht_cap.has_eht)
+ mt7996_mcu_sta_bfer_eht(link_sta, vif, link->phy, bf, ebf);
+ else if (link_sta->he_cap.has_he)
+ mt7996_mcu_sta_bfer_he(link_sta, vif, link->phy, bf, ebf);
+ else if (link_sta->vht_cap.vht_supported)
+ mt7996_mcu_sta_bfer_vht(link_sta, link->phy, bf, ebf);
+ else if (link_sta->ht_cap.ht_supported)
+ mt7996_mcu_sta_bfer_ht(link_sta, link->phy, bf, ebf);
else
return;
bf->bf_cap = ebf ? EBF_MODE : (dev->ibf ? IBF_MODE : 0);
if (is_mt7992(&dev->mt76) && tx_ant == 4)
bf->bf_cap |= IBF_MODE;
- bf->bw = sta->deflink.bandwidth;
- bf->ibf_dbw = sta->deflink.bandwidth;
+
+ bf->bw = link_sta->bandwidth;
+ bf->ibf_dbw = link_sta->bandwidth;
bf->ibf_nrow = tx_ant;
- if (sta->deflink.eht_cap.has_eht || sta->deflink.he_cap.has_he)
+ if (link_sta->eht_cap.has_eht || link_sta->he_cap.has_he)
bf->ibf_timeout = is_mt7996(&dev->mt76) ? MT7996_IBF_TIMEOUT :
MT7992_IBF_TIMEOUT;
- else if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
+ else if (!ebf && link_sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
bf->ibf_timeout = MT7996_IBF_TIMEOUT_LEGACY;
else
bf->ibf_timeout = MT7996_IBF_TIMEOUT;
@@ -1727,7 +1735,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
matrix[bf->nrow][bf->ncol] : 0;
}
- switch (sta->deflink.bandwidth) {
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
case IEEE80211_STA_RX_BW_80:
bf->mem_total = bf->mem_20m * 2;
@@ -1743,31 +1751,32 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
static void
mt7996_mcu_sta_bfee_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mvif->deflink.phy;
+ struct mt7996_phy *phy = link->phy;
int tx_ant = hweight8(phy->mt76->antenna_mask) - 1;
struct sta_rec_bfee *bfee;
struct tlv *tlv;
u8 nrow = 0;
- if (!(sta->deflink.vht_cap.vht_supported || sta->deflink.he_cap.has_he))
+ if (!(link_sta->vht_cap.vht_supported || link_sta->he_cap.has_he))
return;
- if (!mt7996_is_ebf_supported(phy, vif, sta, true))
+ if (!mt7996_is_ebf_supported(phy, link_conf, link_sta, true))
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
bfee = (struct sta_rec_bfee *)tlv;
- if (sta->deflink.he_cap.has_he) {
- struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
+ if (link_sta->he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &link_sta->he_cap.he_cap_elem;
nrow = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
pe->phy_cap_info[5]);
- } else if (sta->deflink.vht_cap.vht_supported) {
- struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
+ } else if (link_sta->vht_cap.vht_supported) {
+ struct ieee80211_sta_vht_cap *pc = &link_sta->vht_cap;
nrow = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
pc->cap);
@@ -1874,18 +1883,19 @@ int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
MCU_WM_UNI_CMD(RA), true);
}
-int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *data, u32 field)
+int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
+ void *data, u32 field)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct sta_phy_uni *phy = data;
struct sta_rec_ra_fixed_uni *ra;
struct sk_buff *skb;
struct tlv *tlv;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76,
- &msta->wcid,
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76,
+ &msta_link->wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1904,7 +1914,7 @@ int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif
ra->phy = *phy;
break;
case RATE_PARAM_MMPS_UPDATE:
- ra->mmps_mode = mt7996_mcu_get_mmps_mode(sta->deflink.smps_mode);
+ ra->mmps_mode = mt7996_mcu_get_mmps_mode(link_sta->smps_mode);
break;
default:
break;
@@ -1916,12 +1926,13 @@ int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif
}
static int
-mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->deflink.phy->mt76->chandef;
- struct cfg80211_bitrate_mask *mask = &mvif->deflink.bitrate_mask;
+ struct cfg80211_chan_def *chandef = &link->phy->mt76->chandef;
+ struct cfg80211_bitrate_mask *mask = &link->bitrate_mask;
enum nl80211_band band = chandef->chan->band;
struct sta_phy_uni phy = {};
int ret, nrates = 0;
@@ -1942,11 +1953,11 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif
} \
} while (0)
- if (sta->deflink.he_cap.has_he) {
+ if (link_sta->he_cap.has_he) {
__sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1);
- } else if (sta->deflink.vht_cap.vht_supported) {
+ } else if (link_sta->vht_cap.vht_supported) {
__sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0);
- } else if (sta->deflink.ht_cap.ht_supported) {
+ } else if (link_sta->ht_cap.ht_supported) {
__sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0);
} else {
nrates = hweight32(mask->control[band].legacy);
@@ -1963,7 +1974,8 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif
/* fixed single rate */
if (nrates == 1) {
- ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy,
+ ret = mt7996_mcu_set_fixed_field(dev, link_sta, link,
+ msta_link, &phy,
RATE_PARAM_FIXED_MCS);
if (ret)
return ret;
@@ -1972,20 +1984,20 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif
/* fixed GI */
if (mask->control[band].gi != NL80211_TXRATE_DEFAULT_GI ||
mask->control[band].he_gi != GENMASK(7, 0)) {
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
u32 addr;
/* firmware updates only TXCMD but doesn't take WTBL into
* account, so driver should update here to reflect the
* actual txrate hardware sends out.
*/
- addr = mt7996_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7);
- if (sta->deflink.he_cap.has_he)
+ addr = mt7996_mac_wtbl_lmac_addr(dev, msta_link->wcid.idx, 7);
+ if (link_sta->he_cap.has_he)
mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi);
else
mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi);
- ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy,
+ ret = mt7996_mcu_set_fixed_field(dev, link_sta, link,
+ msta_link, &phy,
RATE_PARAM_FIXED_GI);
if (ret)
return ret;
@@ -1993,7 +2005,8 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif
/* fixed HE_LTF */
if (mask->control[band].he_ltf != GENMASK(7, 0)) {
- ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy,
+ ret = mt7996_mcu_set_fixed_field(dev, link_sta, link,
+ msta_link, &phy,
RATE_PARAM_FIXED_HE_LTF);
if (ret)
return ret;
@@ -2004,30 +2017,32 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif
static void
mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link)
{
#define INIT_RCPI 180
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt76_phy *mphy = mvif->deflink.phy->mt76;
+ struct mt76_phy *mphy = link->phy->mt76;
struct cfg80211_chan_def *chandef = &mphy->chandef;
- struct cfg80211_bitrate_mask *mask = &mvif->deflink.bitrate_mask;
+ struct cfg80211_bitrate_mask *mask = &link->bitrate_mask;
+ u32 cap = link_sta->sta->wme ? STA_CAP_WMM : 0;
enum nl80211_band band = chandef->chan->band;
struct sta_rec_ra_uni *ra;
struct tlv *tlv;
- u32 supp_rate = sta->deflink.supp_rates[band];
- u32 cap = sta->wme ? STA_CAP_WMM : 0;
+ u32 supp_rate = link_sta->supp_rates[band];
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
ra = (struct sta_rec_ra_uni *)tlv;
ra->valid = true;
ra->auto_rate = true;
- ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, &sta->deflink);
+ ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, link_sta);
ra->channel = chandef->chan->hw_value;
- ra->bw = (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) ?
- CMD_CBW_320MHZ : sta->deflink.bandwidth;
+ ra->bw = (link_sta->bandwidth == IEEE80211_STA_RX_BW_320) ?
+ CMD_CBW_320MHZ : link_sta->bandwidth;
ra->phy.bw = ra->bw;
- ra->mmps_mode = mt7996_mcu_get_mmps_mode(sta->deflink.smps_mode);
+ ra->mmps_mode = mt7996_mcu_get_mmps_mode(link_sta->smps_mode);
if (supp_rate) {
supp_rate &= mask->control[band].legacy;
@@ -2047,60 +2062,60 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
}
}
- if (sta->deflink.ht_cap.ht_supported) {
+ if (link_sta->ht_cap.ht_supported) {
ra->supp_mode |= MODE_HT;
- ra->af = sta->deflink.ht_cap.ampdu_factor;
- ra->ht_gf = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
+ ra->af = link_sta->ht_cap.ampdu_factor;
+ ra->ht_gf = !!(link_sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
cap |= STA_CAP_HT;
- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
cap |= STA_CAP_SGI_20;
- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
cap |= STA_CAP_SGI_40;
- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
+ if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
cap |= STA_CAP_TX_STBC;
- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
cap |= STA_CAP_RX_STBC;
- if (vif->bss_conf.ht_ldpc &&
- (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
+ if (link_conf->ht_ldpc &&
+ (link_sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
cap |= STA_CAP_LDPC;
- mt7996_mcu_set_sta_ht_mcs(sta, ra->ht_mcs,
+ mt7996_mcu_set_sta_ht_mcs(link_sta, ra->ht_mcs,
mask->control[band].ht_mcs);
ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
}
- if (sta->deflink.vht_cap.vht_supported) {
+ if (link_sta->vht_cap.vht_supported) {
u8 af;
ra->supp_mode |= MODE_VHT;
af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
- sta->deflink.vht_cap.cap);
+ link_sta->vht_cap.cap);
ra->af = max_t(u8, ra->af, af);
cap |= STA_CAP_VHT;
- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
cap |= STA_CAP_VHT_SGI_80;
- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
cap |= STA_CAP_VHT_SGI_160;
- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
+ if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
cap |= STA_CAP_VHT_TX_STBC;
- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
+ if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
cap |= STA_CAP_VHT_RX_STBC;
- if ((vif->type != NL80211_IFTYPE_AP || vif->bss_conf.vht_ldpc) &&
- (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
+ if ((vif->type != NL80211_IFTYPE_AP || link_conf->vht_ldpc) &&
+ (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
cap |= STA_CAP_VHT_LDPC;
- mt7996_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs,
+ mt7996_mcu_set_sta_vht_mcs(link_sta, ra->supp_vht_mcs,
mask->control[band].vht_mcs);
}
- if (sta->deflink.he_cap.has_he) {
+ if (link_sta->he_cap.has_he) {
ra->supp_mode |= MODE_HE;
cap |= STA_CAP_HE;
- if (sta->deflink.he_6ghz_capa.capa)
- ra->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ if (link_sta->he_6ghz_capa.capa)
+ ra->af = le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
}
ra->sta_cap = cpu_to_le32(cap);
@@ -2108,16 +2123,18 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
memset(ra->rx_rcpi, INIT_RCPI, sizeof(ra->rx_rcpi));
}
-int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool changed)
+int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link, bool changed)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct sk_buff *skb;
int ret;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76,
- &msta->wcid,
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76,
+ &msta_link->wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2127,19 +2144,19 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
* update sta_rec_he here.
*/
if (changed)
- mt7996_mcu_sta_he_tlv(skb, sta);
+ mt7996_mcu_sta_he_tlv(skb, link_sta, link);
/* sta_rec_ra accommodates BW, NSS and only MCS range format
* i.e 0-{7,8,9} for VHT.
*/
- mt7996_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta);
+ mt7996_mcu_sta_rate_ctrl_tlv(skb, dev, vif, link_conf, link_sta, link);
ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
if (ret)
return ret;
- return mt7996_mcu_add_rate_ctrl_fixed(dev, vif, sta);
+ return mt7996_mcu_add_rate_ctrl_fixed(dev, link_sta, link, msta_link);
}
static int
@@ -2148,6 +2165,7 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
{
#define MT_STA_BSS_GROUP 1
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_sta_link *msta_link;
struct mt7996_sta *msta;
struct {
u8 __rsv1[4];
@@ -2166,73 +2184,154 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
.val = cpu_to_le32(mvif->deflink.mt76.idx % 16),
};
- msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->deflink.sta;
- req.wlan_idx = cpu_to_le16(msta->wcid.idx);
+ msta = sta ? (struct mt7996_sta *)sta->drv_priv : NULL;
+ msta_link = msta ? &msta->deflink : &mvif->deflink.msta_link;
+ req.wlan_idx = cpu_to_le16(msta_link->wcid.idx);
return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(VOW), &req,
sizeof(req), true);
}
-int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct mt76_vif_link *mlink,
- struct ieee80211_sta *sta, int conn_state, bool newly)
+static void
+mt7996_mcu_sta_mld_setup_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
{
- struct ieee80211_link_sta *link_sta = NULL;
- struct mt76_wcid *wcid = mlink->wcid;
- struct sk_buff *skb;
- int ret;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ unsigned long links = sta->valid_links;
+ unsigned int nlinks = hweight16(links);
+ struct mld_setup_link *mld_setup_link;
+ struct sta_rec_mld_setup *mld_setup;
+ struct mt7996_sta_link *msta_link;
+ struct ieee80211_vif *vif;
+ unsigned int link_id;
+ struct tlv *tlv;
+
+ msta_link = mt76_dereference(msta->link[msta->deflink_id], &dev->mt76);
+ if (!msta_link)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MLD,
+ sizeof(struct sta_rec_mld_setup) +
+ sizeof(struct mld_setup_link) * nlinks);
+
+ mld_setup = (struct sta_rec_mld_setup *)tlv;
+ memcpy(mld_setup->mld_addr, sta->addr, ETH_ALEN);
+ mld_setup->setup_wcid = cpu_to_le16(msta_link->wcid.idx);
+ mld_setup->primary_id = cpu_to_le16(msta_link->wcid.idx);
+
+ if (nlinks > 1) {
+ link_id = __ffs(links & ~BIT(msta->deflink_id));
+ msta_link = mt76_dereference(msta->link[msta->deflink_id],
+ &dev->mt76);
+ if (!msta_link)
+ return;
+ }
+ mld_setup->seconed_id = cpu_to_le16(msta_link->wcid.idx);
+ mld_setup->link_num = nlinks;
+
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+ mld_setup_link = (struct mld_setup_link *)mld_setup->link_info;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct mt7996_vif_link *link;
+
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
- if (sta) {
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
- wcid = &msta->wcid;
- link_sta = &sta->deflink;
+ if (!msta_link)
+ continue;
+
+ mld_setup_link->wcid = cpu_to_le16(msta_link->wcid.idx);
+ mld_setup_link->bss_idx = link->mt76.idx;
+ mld_setup_link++;
}
+}
+
+static void
+mt7996_mcu_sta_eht_mld_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+{
+ struct sta_rec_eht_mld *eht_mld;
+ struct tlv *tlv;
+ int i;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT_MLD, sizeof(*eht_mld));
+ eht_mld = (struct sta_rec_eht_mld *)tlv;
+
+ for (i = 0; i < ARRAY_SIZE(eht_mld->str_cap); i++)
+ eht_mld->str_cap[i] = 0x7;
+}
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, mlink, wcid,
+int mt7996_mcu_add_sta(struct mt7996_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
+ int conn_state, bool newly)
+{
+ struct mt76_wcid *wcid = msta_link ? &msta_link->wcid : link->mt76.wcid;
+ struct ieee80211_sta *sta = link_sta ? link_sta->sta : NULL;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76, wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, &vif->bss_conf, link_sta,
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, link_conf, link_sta,
conn_state, newly);
if (conn_state == CONN_STATE_DISCONNECT)
goto out;
/* starec hdr trans */
- mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, wcid);
+ mt7996_mcu_sta_hdr_trans_tlv(dev, skb, link_conf->vif, wcid);
/* starec tx proc */
mt7996_mcu_sta_tx_proc_tlv(skb);
/* tag order is in accordance with firmware dependency. */
- if (sta) {
+ if (link_sta) {
/* starec hdrt mode */
mt7996_mcu_sta_hdrt_tlv(dev, skb);
- /* starec bfer */
- mt7996_mcu_sta_bfer_tlv(dev, skb, vif, sta);
+ if (conn_state == CONN_STATE_CONNECT) {
+ /* starec bfer */
+ mt7996_mcu_sta_bfer_tlv(dev, skb, link_conf, link_sta,
+ link);
+ /* starec bfee */
+ mt7996_mcu_sta_bfee_tlv(dev, skb, link_conf, link_sta,
+ link);
+ }
/* starec ht */
- mt7996_mcu_sta_ht_tlv(skb, sta);
+ mt7996_mcu_sta_ht_tlv(skb, link_sta);
/* starec vht */
- mt7996_mcu_sta_vht_tlv(skb, sta);
+ mt7996_mcu_sta_vht_tlv(skb, link_sta);
/* starec uapsd */
- mt76_connac_mcu_sta_uapsd(skb, vif, sta);
+ mt76_connac_mcu_sta_uapsd(skb, link_conf->vif, sta);
/* starec amsdu */
- mt7996_mcu_sta_amsdu_tlv(dev, skb, vif, sta);
+ mt7996_mcu_sta_amsdu_tlv(dev, skb, link_conf->vif, link_sta,
+ msta_link);
/* starec he */
- mt7996_mcu_sta_he_tlv(skb, sta);
+ mt7996_mcu_sta_he_tlv(skb, link_sta, link);
/* starec he 6g*/
- mt7996_mcu_sta_he_6g_tlv(skb, sta);
+ mt7996_mcu_sta_he_6g_tlv(skb, link_sta);
/* starec eht */
- mt7996_mcu_sta_eht_tlv(skb, sta);
+ mt7996_mcu_sta_eht_tlv(skb, link_sta);
/* starec muru */
- mt7996_mcu_sta_muru_tlv(dev, skb, vif, sta);
- /* starec bfee */
- mt7996_mcu_sta_bfee_tlv(dev, skb, vif, sta);
+ mt7996_mcu_sta_muru_tlv(dev, skb, link_conf, link_sta);
+
+ if (sta->mlo) {
+ mt7996_mcu_sta_mld_setup_tlv(dev, skb, sta);
+ mt7996_mcu_sta_eht_mld_tlv(dev, skb, sta);
+ }
}
- ret = mt7996_mcu_add_group(dev, vif, sta);
+ ret = mt7996_mcu_add_group(dev, link_conf->vif, sta);
if (ret) {
dev_kfree_skb(skb);
return ret;
@@ -2242,6 +2341,24 @@ out:
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
}
+int mt7996_mcu_teardown_mld_sta(struct mt7996_dev *dev,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link)
+{
+ struct sk_buff *skb;
+
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76,
+ &msta_link->wcid,
+ MT7996_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt76_connac_mcu_add_tlv(skb, STA_REC_MLD_OFF, sizeof(struct tlv));
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
+}
+
static int
mt7996_mcu_sta_key_tlv(struct mt76_wcid *wcid,
struct sk_buff *skb,
@@ -2307,17 +2424,18 @@ int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
}
-static int mt7996_mcu_get_pn(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- u8 *pn)
+static int mt7996_mcu_get_pn(struct mt7996_dev *dev,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link, u8 *pn)
{
#define TSC_TYPE_BIGTK_PN 2
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct sta_rec_pn_info *pn_info;
struct sk_buff *skb, *rskb;
struct tlv *tlv;
int ret;
- skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76, &mvif->deflink.sta.wcid);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76,
+ &msta_link->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2341,10 +2459,11 @@ static int mt7996_mcu_get_pn(struct mt7996_dev *dev, struct ieee80211_vif *vif,
return 0;
}
-int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
struct ieee80211_key_conf *key)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_mcu_bcn_prot_tlv *bcn_prot;
struct sk_buff *skb;
struct tlv *tlv;
@@ -2353,7 +2472,7 @@ int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif
sizeof(struct mt7996_mcu_bcn_prot_tlv);
int ret;
- skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->deflink.mt76, len);
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &link->mt76, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2361,7 +2480,7 @@ int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif
bcn_prot = (struct mt7996_mcu_bcn_prot_tlv *)tlv;
- ret = mt7996_mcu_get_pn(dev, vif, pn);
+ ret = mt7996_mcu_get_pn(dev, link, msta_link, pn);
if (ret) {
dev_kfree_skb(skb);
return ret;
@@ -2592,13 +2711,14 @@ out:
}
int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
- struct ieee80211_vif *vif, u32 changed)
+ struct ieee80211_bss_conf *link_conf,
+ struct mt7996_vif_link *link, u32 changed)
{
#define OFFLOAD_TX_MODE_SU BIT(0)
#define OFFLOAD_TX_MODE_MU BIT(1)
+ struct ieee80211_vif *vif = link_conf->vif;
struct ieee80211_hw *hw = mt76_hw(dev);
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
+ struct mt7996_phy *phy = link->phy;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
struct bss_inband_discovery_tlv *discov;
struct ieee80211_tx_info *info;
@@ -2615,21 +2735,21 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
chandef = &phy->mt76->chandef;
band = chandef->chan->band;
- if (vif->bss_conf.nontransmitted)
+ if (link_conf->nontransmitted)
return 0;
- rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->deflink.mt76,
+ rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &link->mt76,
MT7996_MAX_BSS_OFFLOAD_SIZE);
if (IS_ERR(rskb))
return PTR_ERR(rskb);
if (changed & BSS_CHANGED_FILS_DISCOVERY &&
- vif->bss_conf.fils_discovery.max_interval) {
- interval = vif->bss_conf.fils_discovery.max_interval;
+ link_conf->fils_discovery.max_interval) {
+ interval = link_conf->fils_discovery.max_interval;
skb = ieee80211_get_fils_discovery_tmpl(hw, vif);
} else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP &&
- vif->bss_conf.unsol_bcast_probe_resp_interval) {
- interval = vif->bss_conf.unsol_bcast_probe_resp_interval;
+ link_conf->unsol_bcast_probe_resp_interval) {
+ interval = link_conf->unsol_bcast_probe_resp_interval;
skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
}
@@ -4068,12 +4188,12 @@ mt7996_mcu_set_obss_spr_pd(struct mt7996_phy *phy,
}
static int
-mt7996_mcu_set_obss_spr_siga(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+mt7996_mcu_set_obss_spr_siga(struct mt7996_phy *phy,
+ struct mt7996_vif_link *link,
struct ieee80211_he_obss_pd *he_obss_pd)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = phy->dev;
- u8 omac = mvif->deflink.mt76.omac_idx;
+ u8 omac = link->mt76.omac_idx;
struct {
u8 band_idx;
u8 __rsv[3];
@@ -4145,7 +4265,8 @@ mt7996_mcu_set_obss_spr_bitmap(struct mt7996_phy *phy,
sizeof(req), true);
}
-int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy,
+ struct mt7996_vif_link *link,
struct ieee80211_he_obss_pd *he_obss_pd)
{
int ret;
@@ -4179,7 +4300,7 @@ int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
return ret;
/* Set SR prohibit */
- ret = mt7996_mcu_set_obss_spr_siga(phy, vif, he_obss_pd);
+ ret = mt7996_mcu_set_obss_spr_siga(phy, link, he_obss_pd);
if (ret)
return ret;
@@ -4215,7 +4336,7 @@ int mt7996_mcu_update_bss_color(struct mt7996_dev *dev,
#define TWT_AGRT_PROTECT BIT(2)
int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
- struct mt7996_vif *mvif,
+ struct mt7996_vif_link *link,
struct mt7996_twt_flow *flow,
int cmd)
{
@@ -4246,12 +4367,12 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
.len = cpu_to_le16(sizeof(req) - 4),
.tbl_idx = flow->table_id,
.cmd = cmd,
- .own_mac_idx = mvif->deflink.mt76.omac_idx,
+ .own_mac_idx = link->mt76.omac_idx,
.flowid = flow->id,
.peer_id = cpu_to_le16(flow->wcid),
.duration = flow->duration,
- .bss = mvif->deflink.mt76.idx,
- .bss_idx = mvif->deflink.mt76.idx,
+ .bss = link->mt76.idx,
+ .bss_idx = link->mt76.idx,
.start_tsf = cpu_to_le64(flow->tsf),
.mantissa = flow->mantissa,
.exponent = flow->exp,
@@ -4341,22 +4462,19 @@ int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index,
int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_sta *msta;
struct sk_buff *skb;
- msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->deflink.sta;
-
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76,
- &msta->wcid,
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76,
+ &msta_link->wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec hdr trans */
- mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, &msta->wcid);
+ mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, &msta_link->wcid);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
}
@@ -4579,7 +4697,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
struct sk_buff *skb;
int i, tx_power;
- tx_power = mt7996_get_power_bound(phy, phy->txpower);
+ tx_power = mt76_get_power_bound(mphy, phy->txpower);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&la, tx_power);
mphy->txpower_cur = tx_power;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 43468bcaffc6..2ab6a53bee86 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -625,6 +625,35 @@ struct sta_rec_hdr_trans {
u8 mesh;
} __packed;
+struct sta_rec_mld_setup {
+ __le16 tag;
+ __le16 len;
+ u8 mld_addr[ETH_ALEN];
+ __le16 primary_id;
+ __le16 seconed_id;
+ __le16 setup_wcid;
+ u8 link_num;
+ u8 info;
+ u8 __rsv[2];
+ u8 link_info[];
+} __packed;
+
+struct sta_rec_eht_mld {
+ __le16 tag;
+ __le16 len;
+ u8 nsep;
+ u8 __rsv1[2];
+ u8 str_cap[__MT_MAX_BAND];
+ __le16 eml_cap;
+ u8 __rsv2[4];
+} __packed;
+
+struct mld_setup_link {
+ __le16 wcid;
+ u8 bss_idx;
+ u8 __rsv;
+} __packed;
+
struct hdr_trans_en {
__le16 tag;
__le16 len;
@@ -798,6 +827,9 @@ enum {
sizeof(struct sta_rec_eht) + \
sizeof(struct sta_rec_hdrt) + \
sizeof(struct sta_rec_hdr_trans) + \
+ sizeof(struct sta_rec_mld_setup) + \
+ sizeof(struct mld_setup_link) * 3 + \
+ sizeof(struct sta_rec_eht_mld) + \
sizeof(struct tlv))
#define MT7996_MAX_BEACON_SIZE 1338
@@ -809,18 +841,6 @@ enum {
#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \
MT7996_BEACON_UPDATE_SIZE)
-static inline s8
-mt7996_get_power_bound(struct mt7996_phy *phy, s8 txpower)
-{
- struct mt76_phy *mphy = phy->mt76;
- int n_chains = hweight16(mphy->chainmask);
-
- txpower = mt76_get_sar_power(mphy, mphy->chandef.chan, txpower * 2);
- txpower -= mt76_tx_power_nss_delta(n_chains);
-
- return txpower;
-}
-
enum {
UNI_BAND_CONFIG_RADIO_ENABLE,
UNI_BAND_CONFIG_RTS_THRESHOLD = 0x08,
@@ -908,7 +928,8 @@ enum {
UNI_CMD_SER_SET_RECOVER_L3_TX_DISABLE,
UNI_CMD_SER_SET_RECOVER_L3_BF,
UNI_CMD_SER_SET_RECOVER_L4_MDP,
- UNI_CMD_SER_SET_RECOVER_FULL,
+ UNI_CMD_SER_SET_RECOVER_FROM_ETH,
+ UNI_CMD_SER_SET_RECOVER_FULL = 8,
UNI_CMD_SER_SET_SYSTEM_ASSERT,
/* action */
UNI_CMD_SER_ENABLE = 1,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 7a8ee6c75cf2..13b188e281bd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -281,7 +281,7 @@ static int mt7996_mmio_wed_reset(struct mtk_wed_device *wed)
if (test_and_set_bit(MT76_STATE_WED_RESET, &mphy->state))
return -EBUSY;
- ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_L1,
+ ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_FROM_ETH,
mphy->band_idx);
if (ret)
goto out;
@@ -618,9 +618,6 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
.rx_skb = mt7996_queue_rx_skb,
.rx_check = mt7996_rx_check,
.rx_poll_complete = mt7996_rx_poll_complete,
- .sta_add = mt7996_mac_sta_add,
- .sta_event = mt7996_mac_sta_event,
- .sta_remove = mt7996_mac_sta_remove,
.update_survey = mt7996_update_channel,
.set_channel = mt7996_set_channel,
.vif_link_add = mt7996_vif_link_add,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 29fabb9b04ae..43e646ed6094 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -185,10 +185,10 @@ struct mt7996_twt_flow {
DECLARE_EWMA(avg_signal, 10, 8)
-struct mt7996_sta {
+struct mt7996_sta_link {
struct mt76_wcid wcid; /* must be first */
- struct mt7996_vif *vif;
+ struct mt7996_sta *sta;
struct list_head rc_list;
u32 airtime_ac[8];
@@ -204,12 +204,22 @@ struct mt7996_sta {
u8 flowid_mask;
struct mt7996_twt_flow flow[MT7996_MAX_STA_TWT_AGRT];
} twt;
+
+ struct rcu_head rcu_head;
+};
+
+struct mt7996_sta {
+ struct mt7996_sta_link deflink; /* must be first */
+ struct mt7996_sta_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ u8 deflink_id;
+
+ struct mt7996_vif *vif;
};
struct mt7996_vif_link {
struct mt76_vif_link mt76; /* must be first */
- struct mt7996_sta sta;
+ struct mt7996_sta_link msta_link;
struct mt7996_phy *phy;
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
@@ -525,7 +535,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
void __iomem *mem_base, u32 device_id);
void mt7996_wfsys_reset(struct mt7996_dev *dev);
irqreturn_t mt7996_irq_handler(int irq, void *dev_instance);
-u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif);
+u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif_link *link);
int mt7996_register_device(struct mt7996_dev *dev);
void mt7996_unregister_device(struct mt7996_dev *dev);
int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
@@ -553,7 +563,7 @@ int mt7996_run(struct mt7996_phy *phy);
int mt7996_mcu_init(struct mt7996_dev *dev);
int mt7996_mcu_init_firmware(struct mt7996_dev *dev);
int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
- struct mt7996_vif *mvif,
+ struct mt7996_vif_link *link,
struct mt7996_twt_flow *flow,
int cmd);
int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
@@ -561,35 +571,52 @@ int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
struct mt76_vif_link *mlink, bool enable);
int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
- struct mt76_vif_link *mlink, int enable);
-int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct mt76_vif_link *mlink,
- struct ieee80211_sta *sta, int conn_state, bool newly);
+ struct mt76_vif_link *mlink,
+ struct mt7996_sta_link *msta_link, int enable);
+int mt7996_mcu_add_sta(struct mt7996_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
+ int conn_state, bool newly);
+int mt7996_mcu_teardown_mld_sta(struct mt7996_dev *dev,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link);
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- bool add);
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link, bool enable);
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- bool add);
+ struct mt7996_vif_link *link, bool enable);
int mt7996_mcu_update_bss_color(struct mt7996_dev *dev,
struct mt76_vif_link *mlink,
struct cfg80211_he_bss_color *he_bss_color);
int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
- struct ieee80211_vif *vif, u32 changed);
-int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt7996_vif_link *link, u32 changed);
+int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy,
+ struct mt7996_vif_link *link,
struct ieee80211_he_obss_pd *he_obss_pd);
-int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool changed);
+int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link, bool changed);
int mt7996_set_channel(struct mt76_phy *mphy);
int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag);
int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
void *data, u16 version);
-int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *data, u32 field);
+int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
+ void *data, u32 field);
int mt7996_mcu_set_eeprom(struct mt7996_dev *dev);
int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset, u8 *buf, u32 buf_len);
int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num);
@@ -683,26 +710,19 @@ bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask);
void mt7996_mac_reset_counters(struct mt7996_phy *phy);
void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy);
void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band);
-void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
- struct ieee80211_vif *vif, bool enable);
void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed);
void mt7996_mac_set_coverage_class(struct mt7996_phy *phy);
-int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-int mt7996_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, enum mt76_sta_event ev);
-void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
void mt7996_mac_work(struct work_struct *work);
void mt7996_mac_reset_work(struct work_struct *work);
void mt7996_mac_dump_work(struct work_struct *work);
void mt7996_mac_sta_rc_work(struct work_struct *work);
void mt7996_mac_update_stats(struct mt7996_phy *phy);
void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
- struct mt7996_sta *msta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
u8 flowid);
void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
@@ -727,11 +747,14 @@ bool mt7996_debugfs_rx_log(struct mt7996_dev *dev, const void *data, int len);
int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_key_conf *key, int mcu_cmd,
struct mt76_wcid *wcid, enum set_key_cmd cmd);
-int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
struct ieee80211_key_conf *key);
int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link);
int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode);
#ifdef CONFIG_MAC80211_DEBUGFS
void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/mediatek/mt76/scan.c b/drivers/net/wireless/mediatek/mt76/scan.c
index 1c4f9deaaada..9b20ccbeb8cf 100644
--- a/drivers/net/wireless/mediatek/mt76/scan.c
+++ b/drivers/net/wireless/mediatek/mt76/scan.c
@@ -52,11 +52,6 @@ mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid)
ether_addr_copy(hdr->addr3, req->bssid);
}
- info = IEEE80211_SKB_CB(skb);
- if (req->no_cck)
- info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
- info->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
-
if (req->ie_len)
skb_put_data(skb, req->ie, req->ie_len);
@@ -64,10 +59,20 @@ mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid)
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
rcu_read_lock();
- if (ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL))
- mt76_tx(phy, NULL, mvif->wcid, skb);
- else
+
+ if (!ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL)) {
ieee80211_free_txskb(phy->hw, skb);
+ goto out;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ if (req->no_cck)
+ info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
+ info->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
+
+ mt76_tx(phy, NULL, mvif->wcid, skb);
+
+out:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index af0c50c983ec..513916469ca2 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -100,7 +100,8 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
return;
/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
- if (flags & MT_TX_CB_TXS_FAILED) {
+ if (flags & MT_TX_CB_TXS_FAILED &&
+ (dev->drv->drv_flags & MT_DRV_IGNORE_TXS_FAILED)) {
info->status.rates[0].count = 0;
info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 674461fa7fb3..eae35b678952 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -1510,10 +1510,15 @@ enum qlink_tlv_id {
};
struct qlink_tlv_hdr {
- __le16 type;
- __le16 len;
+ /* New members MUST be added within the struct_group() macro below. */
+ __struct_group(qlink_tlv_hdr_fixed, __hdr, __packed,
+ __le16 type;
+ __le16 len;
+ );
u8 val[];
} __packed;
+static_assert(offsetof(struct qlink_tlv_hdr, val) == sizeof(struct qlink_tlv_hdr_fixed),
+ "struct member likely outside of __struct_group()");
struct qlink_iface_limit {
__le16 max_num;
@@ -1567,7 +1572,7 @@ enum qlink_reg_rule_flags {
* @dfs_cac_ms: DFS CAC period.
*/
struct qlink_tlv_reg_rule {
- struct qlink_tlv_hdr hdr;
+ struct qlink_tlv_hdr_fixed hdr;
__le32 start_freq_khz;
__le32 end_freq_khz;
__le32 max_bandwidth_khz;
@@ -1606,7 +1611,7 @@ enum qlink_dfs_state {
* @channel: ieee80211 channel settings.
*/
struct qlink_tlv_channel {
- struct qlink_tlv_hdr hdr;
+ struct qlink_tlv_hdr_fixed hdr;
struct qlink_channel chan;
} __packed;
@@ -1618,7 +1623,7 @@ struct qlink_tlv_channel {
* @chan: channel definition data.
*/
struct qlink_tlv_chandef {
- struct qlink_tlv_hdr hdr;
+ struct qlink_tlv_hdr_fixed hdr;
struct qlink_chandef chdef;
} __packed;
@@ -1643,7 +1648,7 @@ enum qlink_ie_set_type {
* @ie_data: IEs data.
*/
struct qlink_tlv_ie_set {
- struct qlink_tlv_hdr hdr;
+ struct qlink_tlv_hdr_fixed hdr;
u8 type;
u8 flags;
u8 rsvd[2];
@@ -1657,7 +1662,7 @@ struct qlink_tlv_ie_set {
* @ie_data: IEs data.
*/
struct qlink_tlv_ext_ie {
- struct qlink_tlv_hdr hdr;
+ struct qlink_tlv_hdr_fixed hdr;
u8 eid_ext;
u8 rsvd[3];
u8 ie_data[];
@@ -1678,7 +1683,7 @@ struct qlink_sband_iftype_data {
* @iftype_data: interface type data entries.
*/
struct qlink_tlv_iftype_data {
- struct qlink_tlv_hdr hdr;
+ struct qlink_tlv_hdr_fixed hdr;
u8 n_iftype_data;
u8 rsvd[3];
struct qlink_sband_iftype_data iftype_data[];
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
index 5323acff962a..45775ecdf221 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
@@ -842,7 +842,7 @@ int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Set txstatus timer function.
*/
- rt2x00dev->txstatus_timer.function = rt2800mmio_tx_sta_fifo_timeout;
+ hrtimer_update_function(&rt2x00dev->txstatus_timer, rt2800mmio_tx_sta_fifo_timeout);
/*
* Overwrite TX done handler
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 160bef79acdb..b51a23300ba2 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -618,7 +618,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Set txstatus timer function.
*/
- rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout;
+ hrtimer_update_function(&rt2x00dev->txstatus_timer, rt2800usb_tx_sta_fifo_timeout);
/*
* Overwrite TX done handler
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 9e7d9dbe954c..432ddfac2c33 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1391,8 +1391,8 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
mutex_init(&rt2x00dev->conf_mutex);
INIT_LIST_HEAD(&rt2x00dev->bar_list);
spin_lock_init(&rt2x00dev->bar_list_lock);
- hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
+ hrtimer_setup(&rt2x00dev->txstatus_timer, hrtimer_dummy_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/8192c.c
index 0abb1b092bc2..73034e7e41d1 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8192c.c
@@ -644,6 +644,8 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .supports_ap = 1,
+ .max_macid_num = 32,
.max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index 4ce0c05c5129..569856ca677f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -860,9 +860,10 @@ rtl8xxxu_writeN(struct rtl8xxxu_priv *priv, u16 addr, u8 *buf, u16 len)
return len;
write_error:
- dev_info(&udev->dev,
- "%s: Failed to write block at addr: %04x size: %04x\n",
- __func__, addr, blocksize);
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+ dev_info(&udev->dev,
+ "%s: Failed to write block at addr: %04x size: %04x\n",
+ __func__, addr, blocksize);
return -EAGAIN;
}
@@ -4064,8 +4065,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
*/
rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, fops->trxff_boundary);
- ret = rtl8xxxu_download_firmware(priv);
- dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret);
+ for (int retry = 5; retry >= 0 ; retry--) {
+ ret = rtl8xxxu_download_firmware(priv);
+ dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret);
+ if (ret != -EAGAIN)
+ break;
+ if (retry)
+ dev_dbg(dev, "%s: retry firmware download\n", __func__);
+ }
if (ret)
goto exit;
ret = rtl8xxxu_start_firmware(priv);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index d429560009bb..e07402e73ba3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -484,7 +484,7 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
* pathA or mac1 has to set phy0&phy1 pathA */
if ((content == radiob_txt) && (rfpath == RF90_PATH_A)) {
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- " ===> althougth Path A, we load radiob.txt\n");
+ " ===> although Path A, we load radiob.txt\n");
radioa_arraylen = radiob_arraylen;
radioa_array_table = radiob_array_table;
}
@@ -750,7 +750,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
&& rtlhal->interfaceindex == 1) {
need_pwr_down = rtl92d_phy_enable_anotherphy(hw, false);
rtlhal->during_mac1init_radioa = true;
- /* asume no this case */
+ /* assume no this case */
if (need_pwr_down)
rtl92d_phy_enable_rf_env(hw, path,
&u4regvalue);
@@ -1885,7 +1885,7 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
bneed_powerdown_radio =
rtl92d_phy_enable_anotherphy(hw, false);
rtlpriv->rtlhal.during_mac1init_radioa = true;
- /* asume no this case */
+ /* assume no this case */
if (bneed_powerdown_radio)
rtl92d_phy_enable_rf_env(hw, erfpath,
&u4regvalue);
diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig
index ab21b8059e0b..3736f290bd42 100644
--- a/drivers/net/wireless/realtek/rtw88/Kconfig
+++ b/drivers/net/wireless/realtek/rtw88/Kconfig
@@ -54,6 +54,9 @@ config RTW88_8812A
tristate
select RTW88_88XXA
+config RTW88_8814A
+ tristate
+
config RTW88_8822BE
tristate "Realtek 8822BE PCI wireless network adapter"
depends on PCI
@@ -222,6 +225,28 @@ config RTW88_8812AU
802.11ac USB wireless network adapter
+config RTW88_8814AE
+ tristate "Realtek 8814AE PCI wireless network adapter"
+ depends on PCI
+ select RTW88_CORE
+ select RTW88_PCI
+ select RTW88_8814A
+ help
+ Select this option will enable support for 8814AE chipset
+
+ 802.11ac PCIe wireless network adapter
+
+config RTW88_8814AU
+ tristate "Realtek 8814AU USB wireless network adapter"
+ depends on USB
+ select RTW88_CORE
+ select RTW88_USB
+ select RTW88_8814A
+ help
+ Select this option will enable support for 8814AU chipset
+
+ 802.11ac USB wireless network adapter
+
config RTW88_DEBUG
bool "Realtek rtw88 debug support"
depends on RTW88_CORE
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index 0cbbb366e393..0b3da05a2938 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -94,6 +94,15 @@ rtw88_8821au-objs := rtw8821au.o
obj-$(CONFIG_RTW88_8812AU) += rtw88_8812au.o
rtw88_8812au-objs := rtw8812au.o
+obj-$(CONFIG_RTW88_8814A) += rtw88_8814a.o
+rtw88_8814a-objs := rtw8814a.o rtw8814a_table.o
+
+obj-$(CONFIG_RTW88_8814AE) += rtw88_8814ae.o
+rtw88_8814ae-objs := rtw8814ae.o
+
+obj-$(CONFIG_RTW88_8814AU) += rtw88_8814au.o
+rtw88_8814au-objs := rtw8814au.o
+
obj-$(CONFIG_RTW88_PCI) += rtw88_pci.o
rtw88_pci-objs := pci.o
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 364ec0436d0f..b67d69b01f87 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -654,10 +654,10 @@ static void rtw_print_rate(struct seq_file *m, u8 rate)
case DESC_RATE6M...DESC_RATE54M:
rtw_print_ofdm_rate_txt(m, rate);
break;
- case DESC_RATEMCS0...DESC_RATEMCS15:
+ case DESC_RATEMCS0...DESC_RATEMCS31:
rtw_print_ht_rate_txt(m, rate);
break;
- case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9:
+ case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT4SS_MCS9:
rtw_print_vht_rate_txt(m, rate);
break;
default:
@@ -692,9 +692,11 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_power_params pwr_param = {0};
struct rtw_hal *hal = &rtwdev->hal;
+ u8 nss = rtwdev->efuse.hw_cap.nss;
u8 path, rate, bw, ch, regd;
- struct rtw_power_params pwr_param = {0};
+ u8 max_ht_rate, max_rate;
mutex_lock(&rtwdev->mutex);
bw = hal->current_band_width;
@@ -707,19 +709,23 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
seq_printf(m, "%-4s %-10s %-9s %-9s (%-4s %-4s %-4s) %-4s\n",
"path", "rate", "pwr", "base", "byr", "lmt", "sar", "rem");
+ max_ht_rate = DESC_RATEMCS0 + nss * 8 - 1;
+
+ if (rtwdev->chip->vht_supported)
+ max_rate = DESC_RATEVHT1SS_MCS0 + nss * 10 - 1;
+ else
+ max_rate = max_ht_rate;
+
mutex_lock(&hal->tx_power_mutex);
- for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
+ for (path = RF_PATH_A; path < hal->rf_path_num; path++) {
/* there is no CCK rates used in 5G */
if (hal->current_band_type == RTW_BAND_5G)
rate = DESC_RATE6M;
else
rate = DESC_RATE1M;
- /* now, not support vht 3ss and vht 4ss*/
- for (; rate <= DESC_RATEVHT2SS_MCS9; rate++) {
- /* now, not support ht 3ss and ht 4ss*/
- if (rate > DESC_RATEMCS15 &&
- rate < DESC_RATEVHT1SS_MCS0)
+ for (; rate <= max_rate; rate++) {
+ if (rate > max_ht_rate && rate <= DESC_RATEMCS31)
continue;
rtw_get_tx_power_params(rtwdev, path, rate, bw,
@@ -849,20 +855,28 @@ static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v)
last_cnt->num_qry_pkt[rate_id + 9]);
}
- seq_printf(m, "[RSSI(dBm)] = {%d, %d}\n",
+ seq_printf(m, "[RSSI(dBm)] = {%d, %d, %d, %d}\n",
dm_info->rssi[RF_PATH_A] - 100,
- dm_info->rssi[RF_PATH_B] - 100);
- seq_printf(m, "[Rx EVM(dB)] = {-%d, -%d}\n",
+ dm_info->rssi[RF_PATH_B] - 100,
+ dm_info->rssi[RF_PATH_C] - 100,
+ dm_info->rssi[RF_PATH_D] - 100);
+ seq_printf(m, "[Rx EVM(dB)] = {-%d, -%d, -%d, -%d}\n",
dm_info->rx_evm_dbm[RF_PATH_A],
- dm_info->rx_evm_dbm[RF_PATH_B]);
- seq_printf(m, "[Rx SNR] = {%d, %d}\n",
+ dm_info->rx_evm_dbm[RF_PATH_B],
+ dm_info->rx_evm_dbm[RF_PATH_C],
+ dm_info->rx_evm_dbm[RF_PATH_D]);
+ seq_printf(m, "[Rx SNR] = {%d, %d, %d, %d}\n",
dm_info->rx_snr[RF_PATH_A],
- dm_info->rx_snr[RF_PATH_B]);
- seq_printf(m, "[CFO_tail(KHz)] = {%d, %d}\n",
+ dm_info->rx_snr[RF_PATH_B],
+ dm_info->rx_snr[RF_PATH_C],
+ dm_info->rx_snr[RF_PATH_D]);
+ seq_printf(m, "[CFO_tail(KHz)] = {%d, %d, %d, %d}\n",
dm_info->cfo_tail[RF_PATH_A],
- dm_info->cfo_tail[RF_PATH_B]);
+ dm_info->cfo_tail[RF_PATH_B],
+ dm_info->cfo_tail[RF_PATH_C],
+ dm_info->cfo_tail[RF_PATH_D]);
- if (dm_info->curr_rx_rate >= DESC_RATE11M) {
+ if (dm_info->curr_rx_rate >= DESC_RATE6M) {
seq_puts(m, "[Rx Average Status]:\n");
seq_printf(m, " * OFDM, EVM: {-%d}, SNR: {%d}\n",
(u8)ewma_evm_read(&ewma_evm[RTW_EVM_OFDM]),
@@ -875,6 +889,13 @@ static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v)
(u8)ewma_evm_read(&ewma_evm[RTW_EVM_2SS_B]),
(u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_A]),
(u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_B]));
+ seq_printf(m, " * 3SS, EVM: {-%d, -%d, -%d}, SNR: {%d, %d, %d}\n",
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_3SS_A]),
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_3SS_B]),
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_3SS_C]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_3SS_A]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_3SS_B]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_3SS_C]));
}
seq_puts(m, "[Rx Counter]:\n");
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 02389b7c6876..6b563ac489a7 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -735,6 +735,7 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
bool disable_pt = true;
+ u32 mask_hi;
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
@@ -755,6 +756,20 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
si->init_ra_lv = 0;
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+
+ if (rtwdev->chip->id != RTW_CHIP_TYPE_8814A)
+ return;
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO_HI);
+
+ mask_hi = si->ra_mask >> 32;
+
+ SET_RA_INFO_RA_MASK0(h2c_pkt, (mask_hi & 0xff));
+ SET_RA_INFO_RA_MASK1(h2c_pkt, (mask_hi & 0xff00) >> 8);
+ SET_RA_INFO_RA_MASK2(h2c_pkt, (mask_hi & 0xff0000) >> 16);
+ SET_RA_INFO_RA_MASK3(h2c_pkt, (mask_hi & 0xff000000) >> 24);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 404de1b0c407..48ad9ceab6ea 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -557,6 +557,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_DEFAULT_PORT 0x2c
#define H2C_CMD_RA_INFO 0x40
#define H2C_CMD_RSSI_MONITOR 0x42
+#define H2C_CMD_RA_INFO_HI 0x46
#define H2C_CMD_BCN_FILTER_OFFLOAD_P0 0x56
#define H2C_CMD_BCN_FILTER_OFFLOAD_P1 0x57
#define H2C_CMD_WL_PHY_INFO 0x58
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index cae9cca6dca3..0491f501c138 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -291,6 +291,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
if (rtw_read8(rtwdev, REG_CR) == 0xea)
cur_pwr = false;
else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
+ chip->id != RTW_CHIP_TYPE_8814A &&
(rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
cur_pwr = false;
else
@@ -784,7 +785,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev,
if (!check_firmware_size(data, size))
return -EINVAL;
- if (!ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
+ if (rtwdev->chip->ltecoex_addr &&
+ !ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
return -EBUSY;
wlan_cpu_enable(rtwdev, false);
@@ -802,7 +804,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev,
wlan_cpu_enable(rtwdev, true);
- if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) {
+ if (rtwdev->chip->ltecoex_addr &&
+ !ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) {
ret = -EBUSY;
goto dlfw_fail;
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 0cee0fd8c0ef..959f56a3cc1a 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -136,7 +136,7 @@ u16 rtw_desc_to_bitrate(u8 desc_rate)
return rate.bitrate;
}
-static struct ieee80211_supported_band rtw_band_2ghz = {
+static const struct ieee80211_supported_band rtw_band_2ghz = {
.band = NL80211_BAND_2GHZ,
.channels = rtw_channeltable_2g,
@@ -149,7 +149,7 @@ static struct ieee80211_supported_band rtw_band_2ghz = {
.vht_cap = {0},
};
-static struct ieee80211_supported_band rtw_band_5ghz = {
+static const struct ieee80211_supported_band rtw_band_5ghz = {
.band = NL80211_BAND_5GHZ,
.channels = rtw_channeltable_5g,
@@ -1234,7 +1234,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
ldpc_en = VHT_LDPC_EN;
} else if (sta->deflink.ht_cap.ht_supported) {
- ra_mask |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) |
+ ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 36) |
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 28) |
+ (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) |
(sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = HT_STBC_EN;
@@ -1244,6 +1246,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
if (efuse->hw_cap.nss == 1 || rtwdev->hal.txrx_1ss)
ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
+ else if (efuse->hw_cap.nss == 2)
+ ra_mask &= RA_MASK_VHT_RATES_2SS | RA_MASK_HT_RATES_2SS |
+ RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
if (hal->current_band_type == RTW_BAND_5G) {
ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
@@ -1302,10 +1307,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
break;
}
- if (sta->deflink.vht_cap.vht_supported && ra_mask & 0xffc00000)
- tx_num = 2;
- else if (sta->deflink.ht_cap.ht_supported && ra_mask & 0xfff00000)
- tx_num = 2;
+ if (sta->deflink.vht_cap.vht_supported ||
+ sta->deflink.ht_cap.ht_supported)
+ tx_num = efuse->hw_cap.nss;
rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
@@ -1561,6 +1565,7 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
{
const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
+ int i;
ht_cap->ht_supported = true;
ht_cap->cap = 0;
@@ -1580,25 +1585,20 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_cap->ampdu_density = chip->ampdu_density;
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- if (efuse->hw_cap.nss > 1) {
- ht_cap->mcs.rx_mask[0] = 0xFF;
- ht_cap->mcs.rx_mask[1] = 0xFF;
- ht_cap->mcs.rx_mask[4] = 0x01;
- ht_cap->mcs.rx_highest = cpu_to_le16(300);
- } else {
- ht_cap->mcs.rx_mask[0] = 0xFF;
- ht_cap->mcs.rx_mask[1] = 0x00;
- ht_cap->mcs.rx_mask[4] = 0x01;
- ht_cap->mcs.rx_highest = cpu_to_le16(150);
- }
+
+ for (i = 0; i < efuse->hw_cap.nss; i++)
+ ht_cap->mcs.rx_mask[i] = 0xFF;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+ ht_cap->mcs.rx_highest = cpu_to_le16(150 * efuse->hw_cap.nss);
}
static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
struct ieee80211_sta_vht_cap *vht_cap)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
- u16 mcs_map;
+ u16 mcs_map = 0;
__le16 highest;
+ int i;
if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE &&
efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT)
@@ -1621,21 +1621,15 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
if (rtw_chip_has_rx_ldpc(rtwdev))
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
- mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
- if (efuse->hw_cap.nss > 1) {
- highest = cpu_to_le16(780);
- mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2;
- } else {
- highest = cpu_to_le16(390);
- mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2;
+ for (i = 0; i < 8; i++) {
+ if (i < efuse->hw_cap.nss)
+ mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
}
+ highest = cpu_to_le16(390 * efuse->hw_cap.nss);
+
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.rx_highest = highest;
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 62cd4c526301..02343e059fd9 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -61,7 +61,7 @@ enum rtw_hci_type {
};
struct rtw_hci {
- struct rtw_hci_ops *ops;
+ const struct rtw_hci_ops *ops;
enum rtw_hci_type type;
u32 rpwm_addr;
@@ -166,9 +166,14 @@ enum rtw_rate_section {
RTW_RATE_SECTION_HT_2S,
RTW_RATE_SECTION_VHT_1S,
RTW_RATE_SECTION_VHT_2S,
+ __RTW_RATE_SECTION_2SS_MAX = RTW_RATE_SECTION_VHT_2S,
+ RTW_RATE_SECTION_HT_3S,
+ RTW_RATE_SECTION_HT_4S,
+ RTW_RATE_SECTION_VHT_3S,
+ RTW_RATE_SECTION_VHT_4S,
/* keep last */
- RTW_RATE_SECTION_MAX,
+ RTW_RATE_SECTION_NUM,
};
enum rtw_wireless_set {
@@ -191,6 +196,7 @@ enum rtw_chip_type {
RTW_CHIP_TYPE_8703B,
RTW_CHIP_TYPE_8821A,
RTW_CHIP_TYPE_8812A,
+ RTW_CHIP_TYPE_8814A,
};
enum rtw_tx_queue_type {
@@ -380,6 +386,9 @@ enum rtw_evm {
RTW_EVM_1SS,
RTW_EVM_2SS_A,
RTW_EVM_2SS_B,
+ RTW_EVM_3SS_A,
+ RTW_EVM_3SS_B,
+ RTW_EVM_3SS_C,
/* keep it last */
RTW_EVM_NUM
};
@@ -397,6 +406,10 @@ enum rtw_snr {
RTW_SNR_2SS_B,
RTW_SNR_2SS_C,
RTW_SNR_2SS_D,
+ RTW_SNR_3SS_A,
+ RTW_SNR_3SS_B,
+ RTW_SNR_3SS_C,
+ RTW_SNR_3SS_D,
/* keep it last */
RTW_SNR_NUM
};
@@ -822,7 +835,7 @@ struct rtw_vif {
};
struct rtw_regulatory {
- char alpha2[2];
+ char alpha2[2] __nonstring;
u8 txpwr_regd_2g;
u8 txpwr_regd_5g;
};
@@ -1130,14 +1143,26 @@ struct rtw_rfe_def {
* For 2G there are cck rate and ofdm rate with different settings.
*/
struct rtw_pwr_track_tbl {
+ const u8 *pwrtrk_5gd_n[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5gd_p[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5gc_n[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5gc_p[RTW_PWR_TRK_5G_NUM];
const u8 *pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM];
const u8 *pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM];
const u8 *pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM];
const u8 *pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_2gd_n;
+ const u8 *pwrtrk_2gd_p;
+ const u8 *pwrtrk_2gc_n;
+ const u8 *pwrtrk_2gc_p;
const u8 *pwrtrk_2gb_n;
const u8 *pwrtrk_2gb_p;
const u8 *pwrtrk_2ga_n;
const u8 *pwrtrk_2ga_p;
+ const u8 *pwrtrk_2g_cckd_n;
+ const u8 *pwrtrk_2g_cckd_p;
+ const u8 *pwrtrk_2g_cckc_n;
+ const u8 *pwrtrk_2g_cckc_p;
const u8 *pwrtrk_2g_cckb_n;
const u8 *pwrtrk_2g_cckb_p;
const u8 *pwrtrk_2g_ccka_n;
@@ -1227,8 +1252,8 @@ struct rtw_chip_info {
const struct rtw_hw_reg *dig;
const struct rtw_hw_reg *dig_cck;
- u32 rf_base_addr[2];
- u32 rf_sipi_addr[2];
+ u32 rf_base_addr[RTW_RF_PATH_MAX];
+ u32 rf_sipi_addr[RTW_RF_PATH_MAX];
const struct rtw_rf_sipi_addr *rf_sipi_read_addr;
u8 fix_rf_phy_num;
const struct rtw_ltecoex_addr *ltecoex_addr;
@@ -1924,7 +1949,7 @@ union rtw_sar_cfg {
struct rtw_sar {
enum rtw_sar_sources src;
- union rtw_sar_cfg cfg[RTW_RF_PATH_MAX][RTW_RATE_SECTION_MAX];
+ union rtw_sar_cfg cfg[RTW_RF_PATH_MAX][RTW_RATE_SECTION_NUM];
};
struct rtw_hal {
@@ -1968,16 +1993,16 @@ struct rtw_hal {
s8 tx_pwr_by_rate_offset_5g[RTW_RF_PATH_MAX]
[DESC_RATE_MAX];
s8 tx_pwr_by_rate_base_2g[RTW_RF_PATH_MAX]
- [RTW_RATE_SECTION_MAX];
+ [RTW_RATE_SECTION_NUM];
s8 tx_pwr_by_rate_base_5g[RTW_RF_PATH_MAX]
- [RTW_RATE_SECTION_MAX];
+ [RTW_RATE_SECTION_NUM];
s8 tx_pwr_limit_2g[RTW_REGD_MAX]
[RTW_CHANNEL_WIDTH_MAX]
- [RTW_RATE_SECTION_MAX]
+ [RTW_RATE_SECTION_NUM]
[RTW_MAX_CHANNEL_NUM_2G];
s8 tx_pwr_limit_5g[RTW_REGD_MAX]
[RTW_CHANNEL_WIDTH_MAX]
- [RTW_RATE_SECTION_MAX]
+ [RTW_RATE_SECTION_NUM]
[RTW_MAX_CHANNEL_NUM_5G];
s8 tx_pwr_tbl[RTW_RF_PATH_MAX]
[DESC_RATE_MAX];
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 0ecaefc4c83d..bb4c4ccb31d4 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -20,7 +20,7 @@ module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
-static u32 rtw_pci_tx_queue_idx_addr[] = {
+static const u32 rtw_pci_tx_queue_idx_addr[] = {
[RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ,
[RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ,
[RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ,
@@ -1591,7 +1591,7 @@ static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
rtw_pci_io_unmapping(rtwdev, pdev);
}
-static struct rtw_hci_ops rtw_pci_ops = {
+static const struct rtw_hci_ops rtw_pci_ops = {
.tx_write = rtw_pci_tx_write,
.tx_kick_off = rtw_pci_tx_kick_off,
.flush_queues = rtw_pci_flush_queues,
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 8ed20c89d216..55be0d8e0c28 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -52,60 +52,93 @@ static const u32 db_invert_table[12][8] = {
1995262315, 2511886432U, 3162277660U, 3981071706U}
};
-u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M };
-u8 rtw_ofdm_rates[] = {
+const u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M };
+
+const u8 rtw_ofdm_rates[] = {
DESC_RATE6M, DESC_RATE9M, DESC_RATE12M,
DESC_RATE18M, DESC_RATE24M, DESC_RATE36M,
DESC_RATE48M, DESC_RATE54M
};
-u8 rtw_ht_1s_rates[] = {
+
+const u8 rtw_ht_1s_rates[] = {
DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2,
DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5,
DESC_RATEMCS6, DESC_RATEMCS7
};
-u8 rtw_ht_2s_rates[] = {
+
+const u8 rtw_ht_2s_rates[] = {
DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10,
DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13,
DESC_RATEMCS14, DESC_RATEMCS15
};
-u8 rtw_vht_1s_rates[] = {
+
+const u8 rtw_vht_1s_rates[] = {
DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1,
DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3,
DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5,
DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9
};
-u8 rtw_vht_2s_rates[] = {
+
+const u8 rtw_vht_2s_rates[] = {
DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1,
DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3,
DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5,
DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9
};
-u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = {
+
+const u8 rtw_ht_3s_rates[] = {
+ DESC_RATEMCS16, DESC_RATEMCS17, DESC_RATEMCS18,
+ DESC_RATEMCS19, DESC_RATEMCS20, DESC_RATEMCS21,
+ DESC_RATEMCS22, DESC_RATEMCS23
+};
+
+const u8 rtw_ht_4s_rates[] = {
+ DESC_RATEMCS24, DESC_RATEMCS25, DESC_RATEMCS26,
+ DESC_RATEMCS27, DESC_RATEMCS28, DESC_RATEMCS29,
+ DESC_RATEMCS30, DESC_RATEMCS31
+};
+
+const u8 rtw_vht_3s_rates[] = {
+ DESC_RATEVHT3SS_MCS0, DESC_RATEVHT3SS_MCS1,
+ DESC_RATEVHT3SS_MCS2, DESC_RATEVHT3SS_MCS3,
+ DESC_RATEVHT3SS_MCS4, DESC_RATEVHT3SS_MCS5,
+ DESC_RATEVHT3SS_MCS6, DESC_RATEVHT3SS_MCS7,
+ DESC_RATEVHT3SS_MCS8, DESC_RATEVHT3SS_MCS9
+};
+
+const u8 rtw_vht_4s_rates[] = {
+ DESC_RATEVHT4SS_MCS0, DESC_RATEVHT4SS_MCS1,
+ DESC_RATEVHT4SS_MCS2, DESC_RATEVHT4SS_MCS3,
+ DESC_RATEVHT4SS_MCS4, DESC_RATEVHT4SS_MCS5,
+ DESC_RATEVHT4SS_MCS6, DESC_RATEVHT4SS_MCS7,
+ DESC_RATEVHT4SS_MCS8, DESC_RATEVHT4SS_MCS9
+};
+
+const u8 * const rtw_rate_section[RTW_RATE_SECTION_NUM] = {
rtw_cck_rates, rtw_ofdm_rates,
rtw_ht_1s_rates, rtw_ht_2s_rates,
- rtw_vht_1s_rates, rtw_vht_2s_rates
+ rtw_vht_1s_rates, rtw_vht_2s_rates,
+ rtw_ht_3s_rates, rtw_ht_4s_rates,
+ rtw_vht_3s_rates, rtw_vht_4s_rates
};
EXPORT_SYMBOL(rtw_rate_section);
-u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = {
+const u8 rtw_rate_size[RTW_RATE_SECTION_NUM] = {
ARRAY_SIZE(rtw_cck_rates),
ARRAY_SIZE(rtw_ofdm_rates),
ARRAY_SIZE(rtw_ht_1s_rates),
ARRAY_SIZE(rtw_ht_2s_rates),
ARRAY_SIZE(rtw_vht_1s_rates),
- ARRAY_SIZE(rtw_vht_2s_rates)
+ ARRAY_SIZE(rtw_vht_2s_rates),
+ ARRAY_SIZE(rtw_ht_3s_rates),
+ ARRAY_SIZE(rtw_ht_4s_rates),
+ ARRAY_SIZE(rtw_vht_3s_rates),
+ ARRAY_SIZE(rtw_vht_4s_rates)
};
EXPORT_SYMBOL(rtw_rate_size);
-static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
-static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
-static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
-static const u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates);
-static const u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates);
-static const u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates);
-
enum rtw_phy_band_type {
PHY_BAND_2G = 0,
PHY_BAND_5G = 1,
@@ -1590,7 +1623,7 @@ static void rtw_phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
ch_idx = rtw_channel_to_idx(band, ch);
if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX ||
- rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) {
+ rs >= RTW_RATE_SECTION_NUM || ch_idx < 0) {
WARN(1,
"wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n",
regd, band, bw, rs, ch_idx, pwr_limit);
@@ -1634,11 +1667,15 @@ rtw_xref_5g_txpwr_lmt(struct rtw_dev *rtwdev, u8 regd,
static void
rtw_xref_txpwr_lmt_by_rs(struct rtw_dev *rtwdev, u8 regd, u8 bw, u8 ch_idx)
{
+ static const u8 rs_cmp[4][2] = {
+ {RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S},
+ {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S},
+ {RTW_RATE_SECTION_HT_3S, RTW_RATE_SECTION_VHT_3S},
+ {RTW_RATE_SECTION_HT_4S, RTW_RATE_SECTION_VHT_4S}
+ };
u8 rs_idx, rs_ht, rs_vht;
- u8 rs_cmp[2][2] = {{RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S},
- {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S} };
- for (rs_idx = 0; rs_idx < 2; rs_idx++) {
+ for (rs_idx = 0; rs_idx < 4; rs_idx++) {
rs_ht = rs_cmp[rs_idx][0];
rs_vht = rs_cmp[rs_idx][1];
@@ -1695,7 +1732,7 @@ rtw_cfg_txpwr_lmt_by_alt(struct rtw_dev *rtwdev, u8 regd, u8 regd_alt)
u8 bw, rs;
for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ for (rs = 0; rs < RTW_RATE_SECTION_NUM; rs++)
__cfg_txpwr_lmt_by_alt(&rtwdev->hal, regd, regd_alt,
bw, rs);
}
@@ -1959,10 +1996,10 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
u8 rate, u8 group)
{
const struct rtw_chip_info *chip = rtwdev->chip;
- u8 tx_power;
- bool mcs_rate;
- bool above_2ss;
+ bool above_2ss, above_3ss, above_4ss;
u8 factor = chip->txgi_factor;
+ bool mcs_rate;
+ u8 tx_power;
if (rate <= DESC_RATE11M)
tx_power = pwr_idx_2g->cck_base[group];
@@ -1972,11 +2009,15 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor;
- mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
+ mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS31) ||
(rate >= DESC_RATEVHT1SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9);
- above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
+ rate <= DESC_RATEVHT4SS_MCS9);
+ above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS31) ||
(rate >= DESC_RATEVHT2SS_MCS0);
+ above_3ss = (rate >= DESC_RATEMCS16 && rate <= DESC_RATEMCS31) ||
+ (rate >= DESC_RATEVHT3SS_MCS0);
+ above_4ss = (rate >= DESC_RATEMCS24 && rate <= DESC_RATEMCS31) ||
+ (rate >= DESC_RATEVHT4SS_MCS0);
if (!mcs_rate)
return tx_power;
@@ -1989,11 +2030,19 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor;
if (above_2ss)
tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor;
+ if (above_3ss)
+ tx_power += pwr_idx_2g->ht_3s_diff.bw20 * factor;
+ if (above_4ss)
+ tx_power += pwr_idx_2g->ht_4s_diff.bw20 * factor;
break;
case RTW_CHANNEL_WIDTH_40:
/* bw40 is the base power */
if (above_2ss)
tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor;
+ if (above_3ss)
+ tx_power += pwr_idx_2g->ht_3s_diff.bw40 * factor;
+ if (above_4ss)
+ tx_power += pwr_idx_2g->ht_4s_diff.bw40 * factor;
break;
}
@@ -2006,19 +2055,23 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
u8 rate, u8 group)
{
const struct rtw_chip_info *chip = rtwdev->chip;
- u8 tx_power;
+ bool above_2ss, above_3ss, above_4ss;
+ u8 factor = chip->txgi_factor;
u8 upper, lower;
bool mcs_rate;
- bool above_2ss;
- u8 factor = chip->txgi_factor;
+ u8 tx_power;
tx_power = pwr_idx_5g->bw40_base[group];
- mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
+ mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS31) ||
(rate >= DESC_RATEVHT1SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9);
- above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
+ rate <= DESC_RATEVHT4SS_MCS9);
+ above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS31) ||
(rate >= DESC_RATEVHT2SS_MCS0);
+ above_3ss = (rate >= DESC_RATEMCS16 && rate <= DESC_RATEMCS31) ||
+ (rate >= DESC_RATEVHT3SS_MCS0);
+ above_4ss = (rate >= DESC_RATEMCS24 && rate <= DESC_RATEMCS31) ||
+ (rate >= DESC_RATEVHT4SS_MCS0);
if (!mcs_rate) {
tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor;
@@ -2033,11 +2086,19 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor;
if (above_2ss)
tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor;
+ if (above_3ss)
+ tx_power += pwr_idx_5g->ht_3s_diff.bw20 * factor;
+ if (above_4ss)
+ tx_power += pwr_idx_5g->ht_4s_diff.bw20 * factor;
break;
case RTW_CHANNEL_WIDTH_40:
/* bw40 is the base power */
if (above_2ss)
tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor;
+ if (above_3ss)
+ tx_power += pwr_idx_5g->ht_3s_diff.bw40 * factor;
+ if (above_4ss)
+ tx_power += pwr_idx_5g->ht_4s_diff.bw40 * factor;
break;
case RTW_CHANNEL_WIDTH_80:
/* the base idx of bw80 is the average of bw40+/bw40- */
@@ -2048,13 +2109,17 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor;
if (above_2ss)
tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor;
+ if (above_3ss)
+ tx_power += pwr_idx_5g->vht_3s_diff.bw80 * factor;
+ if (above_4ss)
+ tx_power += pwr_idx_5g->vht_4s_diff.bw80 * factor;
break;
}
return tx_power;
}
-/* return RTW_RATE_SECTION_MAX to indicate rate is invalid */
+/* return RTW_RATE_SECTION_NUM to indicate rate is invalid */
static u8 rtw_phy_rate_to_rate_section(u8 rate)
{
if (rate >= DESC_RATE1M && rate <= DESC_RATE11M)
@@ -2065,12 +2130,20 @@ static u8 rtw_phy_rate_to_rate_section(u8 rate)
return RTW_RATE_SECTION_HT_1S;
else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15)
return RTW_RATE_SECTION_HT_2S;
+ else if (rate >= DESC_RATEMCS16 && rate <= DESC_RATEMCS23)
+ return RTW_RATE_SECTION_HT_3S;
+ else if (rate >= DESC_RATEMCS24 && rate <= DESC_RATEMCS31)
+ return RTW_RATE_SECTION_HT_4S;
else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9)
return RTW_RATE_SECTION_VHT_1S;
else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9)
return RTW_RATE_SECTION_VHT_2S;
+ else if (rate >= DESC_RATEVHT3SS_MCS0 && rate <= DESC_RATEVHT3SS_MCS9)
+ return RTW_RATE_SECTION_VHT_3S;
+ else if (rate >= DESC_RATEVHT4SS_MCS0 && rate <= DESC_RATEVHT4SS_MCS9)
+ return RTW_RATE_SECTION_VHT_4S;
else
- return RTW_RATE_SECTION_MAX;
+ return RTW_RATE_SECTION_NUM;
}
static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
@@ -2088,7 +2161,7 @@ static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
if (regd > RTW_REGD_WW)
return power_limit;
- if (rs == RTW_RATE_SECTION_MAX)
+ if (rs == RTW_RATE_SECTION_NUM)
goto err;
/* only 20M BW with cck and ofdm */
@@ -2096,7 +2169,7 @@ static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
bw = RTW_CHANNEL_WIDTH_20;
/* only 20/40M BW with ht */
- if (rs == RTW_RATE_SECTION_HT_1S || rs == RTW_RATE_SECTION_HT_2S)
+ if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS31)
bw = min_t(u8, bw, RTW_CHANNEL_WIDTH_40);
/* select min power limit among [20M BW ~ current BW] */
@@ -2132,7 +2205,7 @@ static s8 rtw_phy_get_tx_power_sar(struct rtw_dev *rtwdev, u8 sar_band,
.rs = rs,
};
- if (rs == RTW_RATE_SECTION_MAX)
+ if (rs == RTW_RATE_SECTION_NUM)
goto err;
return rtw_query_sar(rtwdev, &arg);
@@ -2214,14 +2287,14 @@ static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev,
{
struct rtw_hal *hal = &rtwdev->hal;
u8 regd = rtw_regd_get(rtwdev);
- u8 *rates;
+ const u8 *rates;
u8 size;
u8 rate;
u8 pwr_idx;
u8 bw;
int i;
- if (rs >= RTW_RATE_SECTION_MAX)
+ if (rs >= RTW_RATE_SECTION_NUM)
return;
rates = rtw_rate_section[rs];
@@ -2252,7 +2325,7 @@ static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev,
else
rs = RTW_RATE_SECTION_OFDM;
- for (; rs < RTW_RATE_SECTION_MAX; rs++)
+ for (; rs < RTW_RATE_SECTION_NUM; rs++)
rtw_phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs);
}
@@ -2274,13 +2347,13 @@ EXPORT_SYMBOL(rtw_phy_set_tx_power_level);
static void
rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
- u8 rs, u8 size, u8 *rates)
+ u8 rs, u8 size, const u8 *rates)
{
u8 rate;
u8 base_idx, rate_idx;
s8 base_2g, base_5g;
- if (rs >= RTW_RATE_SECTION_VHT_1S)
+ if (size == 10) /* VHT rates */
base_idx = rates[size - 3];
else
base_idx = rates[size - 1];
@@ -2297,28 +2370,12 @@ rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal)
{
- u8 path;
+ u8 path, rs;
- for (path = 0; path < RTW_RF_PATH_MAX; path++) {
- rtw_phy_tx_power_by_rate_config_by_path(hal, path,
- RTW_RATE_SECTION_CCK,
- rtw_cck_size, rtw_cck_rates);
- rtw_phy_tx_power_by_rate_config_by_path(hal, path,
- RTW_RATE_SECTION_OFDM,
- rtw_ofdm_size, rtw_ofdm_rates);
- rtw_phy_tx_power_by_rate_config_by_path(hal, path,
- RTW_RATE_SECTION_HT_1S,
- rtw_ht_1s_size, rtw_ht_1s_rates);
- rtw_phy_tx_power_by_rate_config_by_path(hal, path,
- RTW_RATE_SECTION_HT_2S,
- rtw_ht_2s_size, rtw_ht_2s_rates);
- rtw_phy_tx_power_by_rate_config_by_path(hal, path,
- RTW_RATE_SECTION_VHT_1S,
- rtw_vht_1s_size, rtw_vht_1s_rates);
- rtw_phy_tx_power_by_rate_config_by_path(hal, path,
- RTW_RATE_SECTION_VHT_2S,
- rtw_vht_2s_size, rtw_vht_2s_rates);
- }
+ for (path = 0; path < RTW_RF_PATH_MAX; path++)
+ for (rs = 0; rs < RTW_RATE_SECTION_NUM; rs++)
+ rtw_phy_tx_power_by_rate_config_by_path(hal, path, rs,
+ rtw_rate_size[rs], rtw_rate_section[rs]);
}
static void
@@ -2347,7 +2404,7 @@ void rtw_phy_tx_power_limit_config(struct rtw_hal *hal)
for (regd = 0; regd < RTW_REGD_MAX; regd++)
for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ for (rs = 0; rs < RTW_RATE_SECTION_NUM; rs++)
__rtw_phy_tx_power_limit_config(hal, regd, bw, rs);
}
@@ -2383,7 +2440,7 @@ void rtw_phy_init_tx_power(struct rtw_dev *rtwdev)
/* init tx power limit */
for (regd = 0; regd < RTW_REGD_MAX; regd++)
for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ for (rs = 0; rs < RTW_RATE_SECTION_NUM; rs++)
rtw_phy_init_tx_power_limit(rtwdev, regd, bw,
rs);
}
@@ -2401,32 +2458,56 @@ void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
swing_table->n[RF_PATH_A] = tbl->pwrtrk_2g_ccka_n;
swing_table->p[RF_PATH_B] = tbl->pwrtrk_2g_cckb_p;
swing_table->n[RF_PATH_B] = tbl->pwrtrk_2g_cckb_n;
+ swing_table->p[RF_PATH_C] = tbl->pwrtrk_2g_cckc_p;
+ swing_table->n[RF_PATH_C] = tbl->pwrtrk_2g_cckc_n;
+ swing_table->p[RF_PATH_D] = tbl->pwrtrk_2g_cckd_p;
+ swing_table->n[RF_PATH_D] = tbl->pwrtrk_2g_cckd_n;
} else {
swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
+ swing_table->p[RF_PATH_C] = tbl->pwrtrk_2gc_p;
+ swing_table->n[RF_PATH_C] = tbl->pwrtrk_2gc_n;
+ swing_table->p[RF_PATH_D] = tbl->pwrtrk_2gd_p;
+ swing_table->n[RF_PATH_D] = tbl->pwrtrk_2gd_n;
}
} else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) {
swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_1];
swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_1];
swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_1];
swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_1];
+ swing_table->p[RF_PATH_C] = tbl->pwrtrk_5gc_p[RTW_PWR_TRK_5G_1];
+ swing_table->n[RF_PATH_C] = tbl->pwrtrk_5gc_n[RTW_PWR_TRK_5G_1];
+ swing_table->p[RF_PATH_D] = tbl->pwrtrk_5gd_p[RTW_PWR_TRK_5G_1];
+ swing_table->n[RF_PATH_D] = tbl->pwrtrk_5gd_n[RTW_PWR_TRK_5G_1];
} else if (IS_CH_5G_BAND_3(channel)) {
swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_2];
swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_2];
swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_2];
swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_2];
+ swing_table->p[RF_PATH_C] = tbl->pwrtrk_5gc_p[RTW_PWR_TRK_5G_2];
+ swing_table->n[RF_PATH_C] = tbl->pwrtrk_5gc_n[RTW_PWR_TRK_5G_2];
+ swing_table->p[RF_PATH_D] = tbl->pwrtrk_5gd_p[RTW_PWR_TRK_5G_2];
+ swing_table->n[RF_PATH_D] = tbl->pwrtrk_5gd_n[RTW_PWR_TRK_5G_2];
} else if (IS_CH_5G_BAND_4(channel)) {
swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_3];
swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_3];
swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_3];
swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_3];
+ swing_table->p[RF_PATH_C] = tbl->pwrtrk_5gc_p[RTW_PWR_TRK_5G_3];
+ swing_table->n[RF_PATH_C] = tbl->pwrtrk_5gc_n[RTW_PWR_TRK_5G_3];
+ swing_table->p[RF_PATH_D] = tbl->pwrtrk_5gd_p[RTW_PWR_TRK_5G_3];
+ swing_table->n[RF_PATH_D] = tbl->pwrtrk_5gd_n[RTW_PWR_TRK_5G_3];
} else {
swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
+ swing_table->p[RF_PATH_C] = tbl->pwrtrk_2gc_p;
+ swing_table->n[RF_PATH_C] = tbl->pwrtrk_2gc_n;
+ swing_table->p[RF_PATH_D] = tbl->pwrtrk_2gd_p;
+ swing_table->n[RF_PATH_D] = tbl->pwrtrk_2gd_n;
}
}
EXPORT_SYMBOL(rtw_phy_config_swing_table);
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index ccfcbd3ced03..c9e6b869661d 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -7,14 +7,18 @@
#include "debug.h"
-extern u8 rtw_cck_rates[];
-extern u8 rtw_ofdm_rates[];
-extern u8 rtw_ht_1s_rates[];
-extern u8 rtw_ht_2s_rates[];
-extern u8 rtw_vht_1s_rates[];
-extern u8 rtw_vht_2s_rates[];
-extern u8 *rtw_rate_section[];
-extern u8 rtw_rate_size[];
+extern const u8 rtw_cck_rates[];
+extern const u8 rtw_ofdm_rates[];
+extern const u8 rtw_ht_1s_rates[];
+extern const u8 rtw_ht_2s_rates[];
+extern const u8 rtw_vht_1s_rates[];
+extern const u8 rtw_vht_2s_rates[];
+extern const u8 rtw_ht_3s_rates[];
+extern const u8 rtw_ht_4s_rates[];
+extern const u8 rtw_vht_3s_rates[];
+extern const u8 rtw_vht_4s_rates[];
+extern const u8 * const rtw_rate_section[];
+extern const u8 rtw_rate_size[];
void rtw_phy_init(struct rtw_dev *rtwdev);
void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index e438405fba56..08e9494977e0 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -8,6 +8,7 @@
#define REG_SYS_FUNC_EN 0x0002
#define BIT_FEN_EN_25_1 BIT(13)
#define BIT_FEN_ELDR BIT(12)
+#define BIT_FEN_PCIEA BIT(6)
#define BIT_FEN_CPUEN BIT(2)
#define BIT_FEN_USBA BIT(2)
#define BIT_FEN_BB_GLB_RST BIT(1)
@@ -39,6 +40,9 @@
#define BIT_RF_RSTB BIT(1)
#define BIT_RF_EN BIT(0)
+#define REG_RF_CTRL1 0x0020
+#define REG_RF_CTRL2 0x0021
+
#define REG_AFE_CTRL1 0x0024
#define BIT_MAC_CLK_SEL (BIT(20) | BIT(21))
#define REG_EFUSE_CTRL 0x0030
@@ -73,6 +77,8 @@
#define BIT_BT_PTA_EN BIT(5)
#define BIT_WLRFE_4_5_EN BIT(2)
+#define REG_GPIO_PIN_CTRL 0x0044
+
#define REG_LED_CFG 0x004C
#define BIT_LNAON_SEL_EN BIT(26)
#define BIT_PAPE_SEL_EN BIT(25)
@@ -110,6 +116,7 @@
#define BIT_SDIO_PAD_E5 BIT(18)
#define REG_RF_B_CTRL 0x76
+#define REG_RF_CTRL3 0x0076
#define REG_AFE_CTRL_4 0x0078
#define BIT_CK320M_AFE_EN BIT(4)
@@ -130,6 +137,7 @@
#define BIT_SHIFT_ROM_PGE 16
#define BIT_FW_INIT_RDY BIT(15)
#define BIT_FW_DW_RDY BIT(14)
+#define BIT_CPU_CLK_SEL (BIT(12) | BIT(13))
#define BIT_RPWM_TOGGLE BIT(7)
#define BIT_RAM_DL_SEL BIT(7) /* legacy only */
#define BIT_DMEM_CHKSUM_OK BIT(6)
@@ -147,7 +155,7 @@
BIT_CHECK_SUM_OK)
#define FW_READY_LEGACY (BIT_MCUFWDL_RDY | BIT_FWDL_CHK_RPT | \
BIT_WINTINI_RDY | BIT_RAM_DL_SEL)
-#define FW_READY_MASK 0xffff
+#define FW_READY_MASK (0xffff & ~BIT_CPU_CLK_SEL)
#define REG_MCU_TST_CFG 0x84
#define VAL_FW_TRIGGER 0x1
@@ -602,15 +610,25 @@
#define REG_CCA2ND 0x0838
#define REG_L1PKTH 0x0848
#define REG_CLKTRK 0x0860
+#define REG_CSI_MASK_SETTING1 0x0874
+#define REG_NBI_SETTING 0x087c
+#define BIT_NBI_ENABLE BIT(13)
+#define REG_CSI_FIX_MASK0 0x0880
+#define REG_CSI_FIX_MASK1 0x0884
+#define REG_CSI_FIX_MASK6 0x0898
+#define REG_CSI_FIX_MASK7 0x089c
#define REG_ADCCLK 0x08AC
#define REG_HSSI_READ 0x08B0
#define REG_FPGA0_XCD_RF_PARA 0x08B4
#define REG_RX_MCS_LIMIT 0x08BC
#define REG_ADC160 0x08C4
+#define REG_DBGSEL 0x08fc
#define REG_ANTSEL_SW 0x0900
#define REG_DAC_RSTB 0x090c
+#define REG_PSD 0x0910
+#define BIT_PSD_INI GENMASK(23, 22)
#define REG_SINGLE_TONE_CONT_TX 0x0914
-
+#define REG_AGC_TABLE 0x0958
#define REG_RFE_CTRL_E 0x0974
#define REG_2ND_CCA_CTRL 0x0976
#define REG_IQK_COM00 0x0978
@@ -620,10 +638,18 @@
#define REG_FAS 0x09a4
#define REG_RXSB 0x0a00
+#define BIT_RXSB_ANA_DIV BIT(15)
#define REG_CCK_RX 0x0a04
#define REG_CCK_PD_TH 0x0a0a
-
-#define REG_CCK0_FAREPORT 0xa2c
+#define REG_PRECTRL 0x0a14
+#define BIT_DIS_CO_PATHSEL BIT(7)
+#define BIT_IQ_WGT GENMASK(9, 8)
+#define REG_CCA_MF 0x0a20
+#define BIT_MBC_WIN GENMASK(5, 4)
+#define REG_CCK0_TX_FILTER1 0x0a20
+#define REG_CCK0_TX_FILTER2 0x0a24
+#define REG_CCK0_DEBUG_PORT 0x0a28
+#define REG_CCK0_FAREPORT 0x0a2c
#define BIT_CCK0_2RX BIT(18)
#define BIT_CCK0_MRC BIT(22)
#define REG_FA_CCK 0x0a5c
@@ -642,10 +668,18 @@
#define DIS_DPD_RATEVHT2SS_MCS1 BIT(9)
#define DIS_DPD_RATEALL GENMASK(9, 0)
+#define REG_CCA 0x0a70
+#define BIT_CCA_CO BIT(7)
+#define REG_ANTSEL 0x0a74
+#define BIT_ANT_BYCO BIT(8)
+#define REG_CCKTX 0x0a84
+#define BIT_CMB_CCA_2R BIT(28)
+
#define REG_CNTRST 0x0b58
#define REG_3WIRE_SWA 0x0c00
#define REG_RX_IQC_AB_A 0x0c10
+#define REG_RX_IQC_CD_A 0x0c14
#define REG_TXSCALE_A 0x0c1c
#define BB_SWING_MASK GENMASK(31, 21)
#define REG_TX_AGC_A_CCK_11_CCK_1 0xc20
@@ -673,7 +707,7 @@
#define REG_LSSI_WRITE_A 0x0c90
#define REG_PREDISTA 0x0c90
#define REG_TXAGCIDX 0x0c94
-
+#define REG_TX_AGC_A 0x0c94
#define REG_RFE_PINMUX_A 0x0cb0
#define REG_RFE_INV_A 0x0cb4
#define REG_RFE_CTRL8 0x0cb4
@@ -682,6 +716,7 @@
#define DPDT_CTRL_PIN 0x77
#define RFE_INV_MASK 0x3ff00000
#define REG_RFECTL_A 0x0cb8
+#define REG_RFE_INV0 0x0cbc
#define REG_RFE_INV8 0x0cbd
#define BIT_MASK_RFE_INV89 GENMASK(1, 0)
#define REG_RFE_INV16 0x0cbe
@@ -702,6 +737,7 @@
#define REG_3WIRE_SWB 0x0e00
#define REG_RX_IQC_AB_B 0x0e10
+#define REG_RX_IQC_CD_B 0x0e14
#define REG_TXSCALE_B 0x0e1c
#define REG_TX_AGC_B_CCK_11_CCK_1 0xe20
#define REG_TX_AGC_B_OFDM18_OFDM6 0xe24
@@ -728,6 +764,7 @@
#define REG_LSSI_WRITE_B 0x0e90
#define REG_PREDISTB 0x0e90
#define REG_INIDLYB 0x0e94
+#define REG_TX_AGC_B 0x0e94
#define REG_RFE_PINMUX_B 0x0eb0
#define REG_RFE_INV_B 0x0eb4
#define REG_RFECTL_B 0x0eb8
@@ -743,8 +780,11 @@
#define REG_CRC_HT 0x0f10
#define REG_CRC_OFDM 0x0f14
#define REG_FA_OFDM 0x0f48
+#define REG_DBGRPT 0x0fa0
#define REG_CCA_CCK 0x0fcc
+#define REG_SYS_CFG3_8814A 0x1000
+
#define REG_ANAPARSW_MAC_0 0x1010
#define BIT_CF_L_V2 GENMASK(29, 28)
@@ -862,9 +902,27 @@
#define LTECOEX_WRITE_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1
#define LTECOEX_READ_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1
+#define REG_RX_IQC_AB_C 0x1810
+#define REG_RX_IQC_CD_C 0x1814
+#define REG_TXSCALE_C 0x181c
+#define REG_CK_MONHC 0x185c
+#define REG_AFE_PWR1_C 0x1860
#define REG_IGN_GNT_BT1 0x1860
+#define REG_TX_AGC_C 0x1894
+#define REG_RFE_PINMUX_C 0x18b4
#define REG_RFESEL_CTRL 0x1990
+#define REG_AGC_TBL 0x1998
+
+#define REG_RX_IQC_AB_D 0x1a10
+#define REG_RX_IQC_CD_D 0x1a14
+#define REG_TXSCALE_D 0x1a1c
+#define REG_CK_MONHD 0x1a5c
+#define REG_AFE_PWR1_D 0x1a60
+#define REG_TX_AGC_D 0x1a94
+#define REG_RFE_PINMUX_D 0x1ab4
+#define REG_RFE_INVSEL_D 0x1abc
+#define BIT_RFE_SELSW0_D GENMASK(27, 20)
#define REG_NOMASK_TXBT 0x1ca7
#define REG_ANAPAR 0x1c30
@@ -905,6 +963,7 @@
#define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8))
#define RF18_CHANNEL_MASK (MASKBYTE0)
#define RF18_RFSI_MASK (BIT(18) | BIT(17))
+#define RF_RCK1_V1 0x1c
#define RF_RCK 0x1d
#define RF_MODE_TABLE_ADDR 0x30
#define RF_MODE_TABLE_DATA0 0x31
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index eeca31bf71f1..87715bd54860 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -444,7 +444,7 @@ static u8 rtw8723d_iqk_check_tx_failed(struct rtw_dev *rtwdev,
rtw_read32(rtwdev, REG_IQK_RES_TX),
rtw_read32(rtwdev, REG_IQK_RES_TY));
rtw_dbg(rtwdev, RTW_DBG_RFK,
- "[IQK] 0xe90(before IQK)= 0x%x, 0xe98(afer IQK) = 0x%x\n",
+ "[IQK] 0xe90(before IQK)= 0x%x, 0xe98(after IQK) = 0x%x\n",
rtw_read32(rtwdev, 0xe90),
rtw_read32(rtwdev, 0xe98));
@@ -472,7 +472,7 @@ static u8 rtw8723d_iqk_check_rx_failed(struct rtw_dev *rtwdev,
rtw_read32(rtwdev, REG_IQK_RES_RY));
rtw_dbg(rtwdev, RTW_DBG_RFK,
- "[IQK] 0xea0(before IQK)= 0x%x, 0xea8(afer IQK) = 0x%x\n",
+ "[IQK] 0xea0(before IQK)= 0x%x, 0xea8(after IQK) = 0x%x\n",
rtw_read32(rtwdev, 0xea0),
rtw_read32(rtwdev, 0xea8));
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a.c b/drivers/net/wireless/realtek/rtw88/rtw8814a.c
new file mode 100644
index 000000000000..cfd35d40d46e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814a.c
@@ -0,0 +1,2257 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#include <linux/usb.h>
+#include "main.h"
+#include "coex.h"
+#include "tx.h"
+#include "phy.h"
+#include "rtw8814a.h"
+#include "rtw8814a_table.h"
+#include "rtw88xxa.h"
+#include "reg.h"
+#include "debug.h"
+#include "efuse.h"
+#include "regd.h"
+#include "usb.h"
+
+static void rtw8814a_efuse_grant(struct rtw_dev *rtwdev, bool on)
+{
+ if (on) {
+ rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
+
+ rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_ELDR);
+ rtw_write16_set(rtwdev, REG_SYS_CLKR,
+ BIT_LOADER_CLK_EN | BIT_ANA8M);
+ } else {
+ rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
+ }
+}
+
+static void rtw8814a_read_rfe_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+
+ if (!(efuse->rfe_option & BIT(7)))
+ return;
+
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
+ efuse->rfe_option = 0;
+ else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB)
+ efuse->rfe_option = 1;
+}
+
+static void rtw8814a_read_amplifier_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+
+ switch (efuse->rfe_option) {
+ case 1:
+ /* Internal 2G */
+ efuse->pa_type_2g = 0;
+ efuse->lna_type_2g = 0;
+ /* External 5G */
+ efuse->pa_type_5g = BIT(0);
+ efuse->lna_type_5g = BIT(3);
+ break;
+ case 2 ... 5:
+ /* External everything */
+ efuse->pa_type_2g = BIT(4);
+ efuse->lna_type_2g = BIT(3);
+ efuse->pa_type_5g = BIT(0);
+ efuse->lna_type_5g = BIT(3);
+ break;
+ case 6:
+ efuse->lna_type_5g = BIT(3);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtw8814a_read_rf_type(struct rtw_dev *rtwdev,
+ struct rtw8814a_efuse *map)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ switch (map->trx_antenna_option) {
+ case 0xff: /* 4T4R */
+ case 0xee: /* 3T3R */
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
+ rtwusb->udev->speed != USB_SPEED_SUPER)
+ hal->rf_type = RF_2T2R;
+ else
+ hal->rf_type = RF_3T3R;
+
+ break;
+ case 0x66: /* 2T2R */
+ case 0x6f: /* 2T4R */
+ default:
+ hal->rf_type = RF_2T2R;
+ break;
+ }
+
+ hal->rf_path_num = 4;
+ hal->rf_phy_num = 4;
+
+ if (hal->rf_type == RF_3T3R) {
+ hal->antenna_rx = BB_PATH_ABC;
+ hal->antenna_tx = BB_PATH_ABC;
+ } else {
+ hal->antenna_rx = BB_PATH_AB;
+ hal->antenna_tx = BB_PATH_AB;
+ }
+}
+
+static void rtw8814a_init_hwcap(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ efuse->hw_cap.bw = BIT(RTW_CHANNEL_WIDTH_20) |
+ BIT(RTW_CHANNEL_WIDTH_40) |
+ BIT(RTW_CHANNEL_WIDTH_80);
+ efuse->hw_cap.ptcl = EFUSE_HW_CAP_PTCL_VHT;
+
+ if (hal->rf_type == RF_3T3R)
+ efuse->hw_cap.nss = 3;
+ else
+ efuse->hw_cap.nss = 2;
+
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "hw cap: hci=0x%02x, bw=0x%02x, ptcl=0x%02x, ant_num=%d, nss=%d\n",
+ efuse->hw_cap.hci, efuse->hw_cap.bw, efuse->hw_cap.ptcl,
+ efuse->hw_cap.ant_num, efuse->hw_cap.nss);
+}
+
+static int rtw8814a_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw8814a_efuse *map;
+ int i;
+
+ if (rtw_dbg_is_enabled(rtwdev, RTW_DBG_EFUSE))
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
+ log_map, rtwdev->chip->log_efuse_size, true);
+
+ map = (struct rtw8814a_efuse *)log_map;
+
+ efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(4));
+ efuse->rfe_option = map->rfe_option;
+ efuse->rf_board_option = map->rf_board_option;
+ efuse->crystal_cap = map->xtal_k;
+ efuse->channel_plan = map->channel_plan;
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ efuse->bt_setting = map->rf_bt_setting;
+ efuse->regd = map->rf_board_option & 0x7;
+ efuse->thermal_meter[RF_PATH_A] = map->thermal_meter;
+ efuse->thermal_meter_k = map->thermal_meter;
+ efuse->tx_bb_swing_setting_2g = map->tx_bb_swing_setting_2g;
+ efuse->tx_bb_swing_setting_5g = map->tx_bb_swing_setting_5g;
+
+ rtw8814a_read_rfe_type(rtwdev);
+ rtw8814a_read_amplifier_type(rtwdev);
+
+ /* Override rtw_chip_parameter_setup() */
+ rtw8814a_read_rf_type(rtwdev, map);
+
+ rtw8814a_init_hwcap(rtwdev);
+
+ for (i = 0; i < 4; i++)
+ efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_USB:
+ ether_addr_copy(efuse->addr, map->u.mac_addr);
+ break;
+ case RTW_HCI_TYPE_PCIE:
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+ break;
+ case RTW_HCI_TYPE_SDIO:
+ default:
+ /* unsupported now */
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtw8814a_init_rfe_reg(struct rtw_dev *rtwdev)
+{
+ u8 rfe_option = rtwdev->efuse.rfe_option;
+
+ if (rfe_option == 2 || rfe_option == 1) {
+ rtw_write32_mask(rtwdev, 0x1994, 0xf, 0xf);
+ rtw_write8_set(rtwdev, REG_GPIO_MUXCFG + 2, 0xf0);
+ } else if (rfe_option == 0) {
+ rtw_write32_mask(rtwdev, 0x1994, 0xf, 0xf);
+ rtw_write8_set(rtwdev, REG_GPIO_MUXCFG + 2, 0xc0);
+ }
+}
+
+#define RTW_TXSCALE_SIZE 37
+static const u32 rtw8814a_txscale_tbl[RTW_TXSCALE_SIZE] = {
+ 0x081, 0x088, 0x090, 0x099, 0x0a2, 0x0ac, 0x0b6, 0x0c0, 0x0cc, 0x0d8,
+ 0x0e5, 0x0f2, 0x101, 0x110, 0x120, 0x131, 0x143, 0x156, 0x16a, 0x180,
+ 0x197, 0x1af, 0x1c8, 0x1e3, 0x200, 0x21e, 0x23e, 0x261, 0x285, 0x2ab,
+ 0x2d3, 0x2fe, 0x32b, 0x35c, 0x38e, 0x3c4, 0x3fe
+};
+
+static u32 rtw8814a_get_bb_swing(struct rtw_dev *rtwdev, u8 band, u8 rf_path)
+{
+ static const u32 swing2setting[4] = {0x200, 0x16a, 0x101, 0x0b6};
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 tx_bb_swing;
+
+ if (band == RTW_BAND_2G)
+ tx_bb_swing = efuse->tx_bb_swing_setting_2g;
+ else
+ tx_bb_swing = efuse->tx_bb_swing_setting_5g;
+
+ tx_bb_swing >>= 2 * rf_path;
+ tx_bb_swing &= 0x3;
+
+ return swing2setting[tx_bb_swing];
+}
+
+static u8 rtw8814a_get_swing_index(struct rtw_dev *rtwdev)
+{
+ u32 swing, table_value;
+ u8 i;
+
+ swing = rtw8814a_get_bb_swing(rtwdev, rtwdev->hal.current_band_type,
+ RF_PATH_A);
+
+ for (i = 0; i < ARRAY_SIZE(rtw8814a_txscale_tbl); i++) {
+ table_value = rtw8814a_txscale_tbl[i];
+ if (swing == table_value)
+ return i;
+ }
+
+ return 24;
+}
+
+static void rtw8814a_pwrtrack_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 path;
+
+ dm_info->default_ofdm_index = rtw8814a_get_swing_index(rtwdev);
+
+ for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) {
+ ewma_thermal_init(&dm_info->avg_thermal[path]);
+ dm_info->delta_power_index[path] = 0;
+ dm_info->delta_power_index_last[path] = 0;
+ }
+ dm_info->pwr_trk_triggered = false;
+ dm_info->pwr_trk_init_trigger = true;
+ dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
+}
+
+static void rtw8814a_config_trx_path(struct rtw_dev *rtwdev)
+{
+ /* RX CCK disable 2R CCA */
+ rtw_write32_clr(rtwdev, REG_CCK0_FAREPORT,
+ BIT_CCK0_2RX | BIT_CCK0_MRC);
+ /* pathB tx on, path A/C/D tx off */
+ rtw_write32_mask(rtwdev, REG_CCK_RX, 0xf0000000, 0x4);
+ /* pathB rx */
+ rtw_write32_mask(rtwdev, REG_CCK_RX, 0x0f000000, 0x5);
+}
+
+static void rtw8814a_config_cck_rx_antenna_init(struct rtw_dev *rtwdev)
+{
+ /* CCK 2R CCA parameters */
+
+ /* Disable Ant diversity */
+ rtw_write32_mask(rtwdev, REG_RXSB, BIT_RXSB_ANA_DIV, 0x0);
+ /* Concurrent CCA at LSB & USB */
+ rtw_write32_mask(rtwdev, REG_CCA, BIT_CCA_CO, 0);
+ /* RX path diversity enable */
+ rtw_write32_mask(rtwdev, REG_ANTSEL, BIT_ANT_BYCO, 0);
+ /* r_en_mrc_antsel */
+ rtw_write32_mask(rtwdev, REG_PRECTRL, BIT_DIS_CO_PATHSEL, 0);
+ /* MBC weighting */
+ rtw_write32_mask(rtwdev, REG_CCA_MF, BIT_MBC_WIN, 1);
+ /* 2R CCA only */
+ rtw_write32_mask(rtwdev, REG_CCKTX, BIT_CMB_CCA_2R, 1);
+}
+
+static void rtw8814a_phy_set_param(struct rtw_dev *rtwdev)
+{
+ u32 crystal_cap, val32;
+ u8 val8, rf_path;
+
+ /* power on BB/RF domain */
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB)
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_USBA);
+ else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_PCIEA);
+
+ rtw_write8_set(rtwdev, REG_SYS_CFG3_8814A + 2,
+ BIT_FEN_BB_GLB_RST | BIT_FEN_BB_RSTB);
+
+ /* Power on RF paths A..D */
+ val8 = BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB;
+ rtw_write8(rtwdev, REG_RF_CTRL, val8);
+ rtw_write8(rtwdev, REG_RF_CTRL1, val8);
+ rtw_write8(rtwdev, REG_RF_CTRL2, val8);
+ rtw_write8(rtwdev, REG_RF_CTRL3, val8);
+
+ rtw_load_table(rtwdev, rtwdev->chip->bb_tbl);
+ rtw_load_table(rtwdev, rtwdev->chip->agc_tbl);
+
+ crystal_cap = rtwdev->efuse.crystal_cap & 0x3F;
+ crystal_cap |= crystal_cap << 6;
+ rtw_write32_mask(rtwdev, REG_AFE_CTRL3, 0x07ff8000, crystal_cap);
+
+ rtw8814a_config_trx_path(rtwdev);
+
+ for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++)
+ rtw_load_table(rtwdev, rtwdev->chip->rf_tbl[rf_path]);
+
+ val32 = rtw_read_rf(rtwdev, RF_PATH_A, RF_RCK1_V1, RFREG_MASK);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_RCK1_V1, RFREG_MASK, val32);
+ rtw_write_rf(rtwdev, RF_PATH_C, RF_RCK1_V1, RFREG_MASK, val32);
+ rtw_write_rf(rtwdev, RF_PATH_D, RF_RCK1_V1, RFREG_MASK, val32);
+
+ rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
+
+ rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0xFF);
+
+ rtw_write32(rtwdev, REG_BAR_MODE_CTRL, 0x0201ffff);
+
+ rtw_write8(rtwdev, REG_MISC_CTRL, BIT_DIS_SECOND_CCA);
+
+ rtw_write8(rtwdev, REG_NAV_CTRL + 2, 0);
+
+ rtw_write8_clr(rtwdev, REG_GPIO_MUXCFG, BIT(5));
+
+ rtw8814a_config_cck_rx_antenna_init(rtwdev);
+
+ rtw_phy_init(rtwdev);
+ rtw8814a_pwrtrack_init(rtwdev);
+
+ rtw8814a_init_rfe_reg(rtwdev);
+
+ rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT(3));
+
+ rtw_write8(rtwdev, REG_NAV_CTRL + 2, 235);
+
+ /* enable Tx report. */
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, 0x1F);
+
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) {
+ /* Reset USB mode switch setting */
+ rtw_write8(rtwdev, REG_SYS_SDIO_CTRL, 0x0);
+ rtw_write8(rtwdev, REG_ACLK_MON, 0x0);
+ }
+}
+
+static void rtw8814ae_enable_rf_1_2v(struct rtw_dev *rtwdev)
+{
+ /* This is for fullsize card, because GPIO7 there is floating.
+ * We should pull GPIO7 high to enable RF 1.2V Switch Power Supply
+ */
+
+ /* 1. set 0x40[1:0] to 0, BIT_GPIOSEL=0, select pin as GPIO */
+ rtw_write8_clr(rtwdev, REG_GPIO_MUXCFG, BIT(1) | BIT(0));
+
+ /* 2. set 0x44[31] to 0
+ * mode=0: data port;
+ * mode=1 and BIT_GPIO_IO_SEL=0: interrupt mode;
+ */
+ rtw_write8_clr(rtwdev, REG_GPIO_PIN_CTRL + 3, BIT(7));
+
+ /* 3. data mode
+ * 3.1 set 0x44[23] to 1
+ * sel=0: input;
+ * sel=1: output;
+ */
+ rtw_write8_set(rtwdev, REG_GPIO_PIN_CTRL + 2, BIT(7));
+
+ /* 3.2 set 0x44[15] to 1
+ * output high value;
+ */
+ rtw_write8_set(rtwdev, REG_GPIO_PIN_CTRL + 1, BIT(7));
+}
+
+static int rtw8814a_mac_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+
+ rtw_write16(rtwdev, REG_CR,
+ MAC_TRX_ENABLE | BIT_MAC_SEC_EN | BIT_32K_CAL_TMR_EN);
+
+ rtw_load_table(rtwdev, rtwdev->chip->mac_tbl);
+
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB)
+ rtw_write8(rtwdev, REG_AUTO_LLT_V1 + 3,
+ rtwdev->chip->usb_tx_agg_desc_num << 1);
+
+ rtw_write32(rtwdev, REG_HIMR0, 0);
+ rtw_write32(rtwdev, REG_HIMR1, 0);
+
+ rtw_write32_mask(rtwdev, REG_RRSR, 0xfffff, 0xfffff);
+
+ rtw_write16(rtwdev, REG_RETRY_LIMIT, 0x3030);
+
+ rtw_write16(rtwdev, REG_RXFLTMAP0, 0xffff);
+ rtw_write16(rtwdev, REG_RXFLTMAP1, 0x0400);
+ rtw_write16(rtwdev, REG_RXFLTMAP2, 0xffff);
+
+ rtw_write8(rtwdev, REG_MAX_AGGR_NUM, 0x36);
+ rtw_write8(rtwdev, REG_MAX_AGGR_NUM + 1, 0x36);
+
+ /* Set Spec SIFS (used in NAV) */
+ rtw_write16(rtwdev, REG_SPEC_SIFS, 0x100a);
+ rtw_write16(rtwdev, REG_MAC_SPEC_SIFS, 0x100a);
+
+ /* Set SIFS for CCK */
+ rtw_write16(rtwdev, REG_SIFS, 0x100a);
+
+ /* Set SIFS for OFDM */
+ rtw_write16(rtwdev, REG_SIFS + 2, 0x100a);
+
+ /* TXOP */
+ rtw_write32(rtwdev, REG_EDCA_BE_PARAM, 0x005EA42B);
+ rtw_write32(rtwdev, REG_EDCA_BK_PARAM, 0x0000A44F);
+ rtw_write32(rtwdev, REG_EDCA_VI_PARAM, 0x005EA324);
+ rtw_write32(rtwdev, REG_EDCA_VO_PARAM, 0x002FA226);
+
+ rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL, BIT(7));
+
+ rtw_write8(rtwdev, REG_ACKTO, 0x80);
+
+ rtw_write16(rtwdev, REG_BCN_CTRL,
+ BIT_DIS_TSF_UDT | (BIT_DIS_TSF_UDT << 8));
+ rtw_write32_mask(rtwdev, REG_TBTT_PROHIBIT, 0xfffff, WLAN_TBTT_TIME);
+ rtw_write8(rtwdev, REG_DRVERLYINT, 0x05);
+ rtw_write8(rtwdev, REG_BCNDMATIM, WLAN_BCN_DMA_TIME);
+ rtw_write16(rtwdev, REG_BCNTCFG, 0x4413);
+ rtw_write8(rtwdev, REG_BCN_MAX_ERR, 0xFF);
+
+ rtw_write32(rtwdev, REG_FAST_EDCA_VOVI_SETTING, 0x08070807);
+ rtw_write32(rtwdev, REG_FAST_EDCA_BEBK_SETTING, 0x08070807);
+
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
+ rtwusb->udev->speed == USB_SPEED_SUPER) {
+ /* Disable U1/U2 Mode to avoid 2.5G spur in USB3.0. */
+ rtw_write8_clr(rtwdev, REG_USB_MOD, BIT(4) | BIT(3));
+ /* To avoid usb 3.0 H2C fail. */
+ rtw_write16(rtwdev, 0xf002, 0);
+
+ rtw_write8_clr(rtwdev, REG_SW_AMPDU_BURST_MODE_CTRL,
+ BIT_PRE_TX_CMD);
+ } else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) {
+ rtw8814ae_enable_rf_1_2v(rtwdev);
+
+ /* Force the antenna b to wifi. */
+ rtw_write8_set(rtwdev, REG_PAD_CTRL1, BIT(2));
+ rtw_write8_set(rtwdev, REG_PAD_CTRL1 + 1, BIT(0));
+ rtw_write8_set(rtwdev, REG_LED_CFG + 3,
+ (BIT(27) | BIT_DPDT_WL_SEL) >> 24);
+ }
+
+ return 0;
+}
+
+static void rtw8814a_set_rfe_reg_24g(struct rtw_dev *rtwdev)
+{
+ switch (rtwdev->efuse.rfe_option) {
+ case 2:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x72707270);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x72707270);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x72707270);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77707770);
+
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D,
+ BIT_RFE_SELSW0_D, 0x72);
+
+ break;
+ case 1:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77777777);
+
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D,
+ BIT_RFE_SELSW0_D, 0x77);
+
+ break;
+ case 0:
+ default:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x77777777);
+ /* Is it not necessary to set REG_RFE_PINMUX_D ? */
+
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D,
+ BIT_RFE_SELSW0_D, 0x77);
+
+ break;
+ }
+}
+
+static void rtw8814a_set_rfe_reg_5g(struct rtw_dev *rtwdev)
+{
+ switch (rtwdev->efuse.rfe_option) {
+ case 2:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x37173717);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x37173717);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x37173717);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77177717);
+
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D,
+ BIT_RFE_SELSW0_D, 0x37);
+
+ break;
+ case 1:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x33173317);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x33173317);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x33173317);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77177717);
+
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D,
+ BIT_RFE_SELSW0_D, 0x33);
+
+ break;
+ case 0:
+ default:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x54775477);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x54775477);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x54775477);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x54775477);
+
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D,
+ BIT_RFE_SELSW0_D, 0x54);
+
+ break;
+ }
+}
+
+static void rtw8814a_set_channel_bb_swing(struct rtw_dev *rtwdev, u8 band)
+{
+ rtw_write32_mask(rtwdev, REG_TXSCALE_A, BB_SWING_MASK,
+ rtw8814a_get_bb_swing(rtwdev, band, RF_PATH_A));
+ rtw_write32_mask(rtwdev, REG_TXSCALE_B, BB_SWING_MASK,
+ rtw8814a_get_bb_swing(rtwdev, band, RF_PATH_B));
+ rtw_write32_mask(rtwdev, REG_TXSCALE_C, BB_SWING_MASK,
+ rtw8814a_get_bb_swing(rtwdev, band, RF_PATH_C));
+ rtw_write32_mask(rtwdev, REG_TXSCALE_D, BB_SWING_MASK,
+ rtw8814a_get_bb_swing(rtwdev, band, RF_PATH_D));
+ rtw8814a_pwrtrack_init(rtwdev);
+}
+
+static void rtw8814a_set_bw_reg_adc(struct rtw_dev *rtwdev, u8 bw)
+{
+ u32 adc = 0;
+
+ if (bw == RTW_CHANNEL_WIDTH_20)
+ adc = 0;
+ else if (bw == RTW_CHANNEL_WIDTH_40)
+ adc = 1;
+ else if (bw == RTW_CHANNEL_WIDTH_80)
+ adc = 2;
+
+ rtw_write32_mask(rtwdev, REG_ADCCLK, BIT(1) | BIT(0), adc);
+}
+
+static void rtw8814a_set_bw_reg_agc(struct rtw_dev *rtwdev, u8 new_band, u8 bw)
+{
+ u32 agc = 7;
+
+ if (bw == RTW_CHANNEL_WIDTH_20) {
+ agc = 6;
+ } else if (bw == RTW_CHANNEL_WIDTH_40) {
+ if (new_band == RTW_BAND_5G)
+ agc = 8;
+ else
+ agc = 7;
+ } else if (bw == RTW_CHANNEL_WIDTH_80) {
+ agc = 3;
+ }
+
+ rtw_write32_mask(rtwdev, REG_CCASEL, 0xf000, agc);
+}
+
+static void rtw8814a_switch_band(struct rtw_dev *rtwdev, u8 new_band, u8 bw)
+{
+ /* Clear 0x1000[16], When this bit is set to 0, CCK and OFDM
+ * are disabled, and clock are gated. Otherwise, CCK and OFDM
+ * are enabled.
+ */
+ rtw_write8_clr(rtwdev, REG_SYS_CFG3_8814A + 2, BIT_FEN_BB_RSTB);
+
+ if (new_band == RTW_BAND_2G) {
+ rtw_write32_mask(rtwdev, REG_AGC_TABLE, 0x1f, 0);
+
+ rtw8814a_set_rfe_reg_24g(rtwdev);
+
+ rtw_write32_mask(rtwdev, REG_TXPSEL, 0xf0, 0x2);
+ rtw_write32_mask(rtwdev, REG_CCK_RX, 0x0f000000, 0x5);
+
+ rtw_write32_mask(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST, 0x3);
+
+ rtw_write8(rtwdev, REG_CCK_CHECK, 0);
+
+ rtw_write32_mask(rtwdev, 0xa80, BIT(18), 0);
+ } else {
+ rtw_write8(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN);
+
+ /* Enable CCK Tx function, even when CCK is off */
+ rtw_write32_mask(rtwdev, 0xa80, BIT(18), 1);
+
+ rtw8814a_set_rfe_reg_5g(rtwdev);
+
+ rtw_write32_mask(rtwdev, REG_TXPSEL, 0xf0, 0x0);
+ rtw_write32_mask(rtwdev, REG_CCK_RX, 0x0f000000, 0xf);
+
+ rtw_write32_mask(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST, 0x2);
+ }
+
+ rtw8814a_set_channel_bb_swing(rtwdev, new_band);
+
+ rtw8814a_set_bw_reg_adc(rtwdev, bw);
+ rtw8814a_set_bw_reg_agc(rtwdev, new_band, bw);
+
+ rtw_write8_set(rtwdev, REG_SYS_CFG3_8814A + 2, BIT_FEN_BB_RSTB);
+}
+
+static void rtw8814a_switch_channel(struct rtw_dev *rtwdev, u8 channel)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u32 fc_area, rf_mod_ag, cfgch;
+ u8 path;
+
+ switch (channel) {
+ case 36 ... 48:
+ fc_area = 0x494;
+ break;
+ case 50 ... 64:
+ fc_area = 0x453;
+ break;
+ case 100 ... 116:
+ fc_area = 0x452;
+ break;
+ default:
+ if (channel >= 118)
+ fc_area = 0x412;
+ else
+ fc_area = 0x96a;
+ break;
+ }
+
+ rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, fc_area);
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ switch (channel) {
+ case 36 ... 64:
+ rf_mod_ag = 0x101;
+ break;
+ case 100 ... 140:
+ rf_mod_ag = 0x301;
+ break;
+ default:
+ if (channel > 140)
+ rf_mod_ag = 0x501;
+ else
+ rf_mod_ag = 0x000;
+ break;
+ }
+
+ cfgch = (rf_mod_ag << 8) | channel;
+
+ rtw_write_rf(rtwdev, path, RF_CFGCH,
+ RF18_RFSI_MASK | RF18_BAND_MASK | RF18_CHANNEL_MASK, cfgch);
+ }
+
+ switch (channel) {
+ case 36 ... 64:
+ rtw_write32_mask(rtwdev, REG_AGC_TABLE, 0x1f, 1);
+ break;
+ case 100 ... 144:
+ rtw_write32_mask(rtwdev, REG_AGC_TABLE, 0x1f, 2);
+ break;
+ default:
+ if (channel >= 149)
+ rtw_write32_mask(rtwdev, REG_AGC_TABLE, 0x1f, 3);
+
+ break;
+ }
+}
+
+static void rtw8814a_24g_cck_tx_dfir(struct rtw_dev *rtwdev, u8 channel)
+{
+ if (channel >= 1 && channel <= 11) {
+ rtw_write32(rtwdev, REG_CCK0_TX_FILTER1, 0x1a1b0030);
+ rtw_write32(rtwdev, REG_CCK0_TX_FILTER2, 0x090e1317);
+ rtw_write32(rtwdev, REG_CCK0_DEBUG_PORT, 0x00000204);
+ } else if (channel >= 12 && channel <= 13) {
+ rtw_write32(rtwdev, REG_CCK0_TX_FILTER1, 0x1a1b0030);
+ rtw_write32(rtwdev, REG_CCK0_TX_FILTER2, 0x090e1217);
+ rtw_write32(rtwdev, REG_CCK0_DEBUG_PORT, 0x00000305);
+ } else if (channel == 14) {
+ rtw_write32(rtwdev, REG_CCK0_TX_FILTER1, 0x1a1b0030);
+ rtw_write32(rtwdev, REG_CCK0_TX_FILTER2, 0x00000E17);
+ rtw_write32(rtwdev, REG_CCK0_DEBUG_PORT, 0x00000000);
+ }
+}
+
+static void rtw8814a_set_bw_reg_mac(struct rtw_dev *rtwdev, u8 bw)
+{
+ u16 val16 = rtw_read16(rtwdev, REG_WMAC_TRXPTCL_CTL);
+
+ val16 &= ~BIT_RFMOD;
+ if (bw == RTW_CHANNEL_WIDTH_80)
+ val16 |= BIT_RFMOD_80M;
+ else if (bw == RTW_CHANNEL_WIDTH_40)
+ val16 |= BIT_RFMOD_40M;
+
+ rtw_write16(rtwdev, REG_WMAC_TRXPTCL_CTL, val16);
+}
+
+static void rtw8814a_set_bw_rf(struct rtw_dev *rtwdev, u8 bw)
+{
+ u8 path;
+
+ for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) {
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_5:
+ case RTW_CHANNEL_WIDTH_10:
+ case RTW_CHANNEL_WIDTH_20:
+ default:
+ rtw_write_rf(rtwdev, path, RF_CFGCH, RF18_BW_MASK, 3);
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rtw_write_rf(rtwdev, path, RF_CFGCH, RF18_BW_MASK, 1);
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ rtw_write_rf(rtwdev, path, RF_CFGCH, RF18_BW_MASK, 0);
+ break;
+ }
+ }
+}
+
+static void rtw8814a_adc_clk(struct rtw_dev *rtwdev)
+{
+ static const u32 rxiqc_reg[2][4] = {
+ { REG_RX_IQC_AB_A, REG_RX_IQC_AB_B,
+ REG_RX_IQC_AB_C, REG_RX_IQC_AB_D },
+ { REG_RX_IQC_CD_A, REG_RX_IQC_CD_B,
+ REG_RX_IQC_CD_C, REG_RX_IQC_CD_D }
+ };
+ u32 bb_reg_8fc, bb_reg_808, rxiqc[4];
+ u32 i = 0, mac_active = 1;
+ u8 mac_reg_522;
+
+ if (rtwdev->hal.cut_version != RTW_CHIP_VER_CUT_A)
+ return;
+
+ /* 1 Step1. MAC TX pause */
+ mac_reg_522 = rtw_read8(rtwdev, REG_TXPAUSE);
+ bb_reg_8fc = rtw_read32(rtwdev, REG_DBGSEL);
+ bb_reg_808 = rtw_read32(rtwdev, REG_RXPSEL);
+ rtw_write8(rtwdev, REG_TXPAUSE, 0x3f);
+
+ /* 1 Step 2. Backup rxiqc & rxiqc = 0 */
+ for (i = 0; i < 4; i++) {
+ rxiqc[i] = rtw_read32(rtwdev, rxiqc_reg[0][i]);
+ rtw_write32(rtwdev, rxiqc_reg[0][i], 0x0);
+ rtw_write32(rtwdev, rxiqc_reg[1][i], 0x0);
+ }
+ rtw_write32_mask(rtwdev, REG_PRECTRL, BIT_IQ_WGT, 0x3);
+ i = 0;
+
+ /* 1 Step 3. Monitor MAC IDLE */
+ rtw_write32(rtwdev, REG_DBGSEL, 0x0);
+ while (mac_active) {
+ mac_active = rtw_read32(rtwdev, REG_DBGRPT) & 0x803e0008;
+ i++;
+ if (i > 1000)
+ break;
+ }
+
+ /* 1 Step 4. ADC clk flow */
+ rtw_write8(rtwdev, REG_RXPSEL, 0x11);
+ rtw_write32_mask(rtwdev, REG_DAC_RSTB, BIT(13), 0x1);
+ rtw_write8_mask(rtwdev, REG_GNT_BT, BIT(2) | BIT(1), 0x3);
+ rtw_write32_mask(rtwdev, REG_CCK_RPT_FORMAT, BIT(2), 0x1);
+
+ /* 0xc1c/0xe1c/0x181c/0x1a1c[4] must=1 to ensure table can be
+ * written when bbrstb=0
+ * 0xc60/0xe60/0x1860/0x1a60[15] always = 1 after this line
+ * 0xc60/0xe60/0x1860/0x1a60[14] always = 0 bcz its error in A-cut
+ */
+
+ /* power_off/clk_off @ anapar_state=idle mode */
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x15800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x01808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x15800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x01808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x15800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x01808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x15800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x01808003);
+
+ rtw_write8_mask(rtwdev, REG_GNT_BT, BIT(2), 0x0);
+ rtw_write32_mask(rtwdev, REG_CCK_RPT_FORMAT, BIT(2), 0x0);
+ /* [19] = 1 to turn off ADC */
+ rtw_write32(rtwdev, REG_CK_MONHA, 0x0D080058);
+ rtw_write32(rtwdev, REG_CK_MONHB, 0x0D080058);
+ rtw_write32(rtwdev, REG_CK_MONHC, 0x0D080058);
+ rtw_write32(rtwdev, REG_CK_MONHD, 0x0D080058);
+
+ /* power_on/clk_off */
+ /* [19] = 0 to turn on ADC */
+ rtw_write32(rtwdev, REG_CK_MONHA, 0x0D000058);
+ rtw_write32(rtwdev, REG_CK_MONHB, 0x0D000058);
+ rtw_write32(rtwdev, REG_CK_MONHC, 0x0D000058);
+ rtw_write32(rtwdev, REG_CK_MONHD, 0x0D000058);
+
+ /* power_on/clk_on @ anapar_state=BT mode */
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x05808032);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x05808032);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x05808032);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x05808032);
+ rtw_write8_mask(rtwdev, REG_GNT_BT, BIT(2), 0x1);
+ rtw_write32_mask(rtwdev, REG_CCK_RPT_FORMAT, BIT(2), 0x1);
+
+ /* recover original setting @ anapar_state=BT mode */
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x05808032);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x05808032);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x05808032);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x05808032);
+
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x05800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x07808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x05800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x07808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x05800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x07808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x05800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x07808003);
+
+ rtw_write8_mask(rtwdev, REG_GNT_BT, BIT(2) | BIT(1), 0x0);
+ rtw_write32_mask(rtwdev, REG_CCK_RPT_FORMAT, BIT(2), 0x0);
+ rtw_write32_mask(rtwdev, REG_DAC_RSTB, BIT(13), 0x0);
+
+ /* 1 Step 5. Recover MAC TX & IQC */
+ rtw_write8(rtwdev, REG_TXPAUSE, mac_reg_522);
+ rtw_write32(rtwdev, REG_DBGSEL, bb_reg_8fc);
+ rtw_write32(rtwdev, REG_RXPSEL, bb_reg_808);
+ for (i = 0; i < 4; i++) {
+ rtw_write32(rtwdev, rxiqc_reg[0][i], rxiqc[i]);
+ rtw_write32(rtwdev, rxiqc_reg[1][i], 0x01000000);
+ }
+ rtw_write32_mask(rtwdev, REG_PRECTRL, BIT_IQ_WGT, 0x0);
+}
+
+static void rtw8814a_spur_calibration_ch140(struct rtw_dev *rtwdev, u8 channel)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ /* Add for 8814AE module ch140 MP Rx */
+ if (channel == 140) {
+ if (hal->ch_param[0] == 0)
+ hal->ch_param[0] = rtw_read32(rtwdev, REG_CCASEL);
+ if (hal->ch_param[1] == 0)
+ hal->ch_param[1] = rtw_read32(rtwdev, REG_PDMFTH);
+
+ rtw_write32(rtwdev, REG_CCASEL, 0x75438170);
+ rtw_write32(rtwdev, REG_PDMFTH, 0x79a18a0a);
+ } else {
+ if (rtw_read32(rtwdev, REG_CCASEL) == 0x75438170 &&
+ hal->ch_param[0] != 0)
+ rtw_write32(rtwdev, REG_CCASEL, hal->ch_param[0]);
+
+ if (rtw_read32(rtwdev, REG_PDMFTH) == 0x79a18a0a &&
+ hal->ch_param[1] != 0)
+ rtw_write32(rtwdev, REG_PDMFTH, hal->ch_param[1]);
+
+ hal->ch_param[0] = rtw_read32(rtwdev, REG_CCASEL);
+ hal->ch_param[1] = rtw_read32(rtwdev, REG_PDMFTH);
+ }
+}
+
+static void rtw8814a_set_nbi_reg(struct rtw_dev *rtwdev, u32 tone_idx)
+{
+ /* tone_idx X 10 */
+ static const u32 nbi_128[] = {
+ 25, 55, 85, 115, 135,
+ 155, 185, 205, 225, 245,
+ 265, 285, 305, 335, 355,
+ 375, 395, 415, 435, 455,
+ 485, 505, 525, 555, 585, 615, 635
+ };
+ u32 reg_idx = 0;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(nbi_128); i++) {
+ if (tone_idx < nbi_128[i]) {
+ reg_idx = i + 1;
+ break;
+ }
+ }
+
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING, 0xfc000, reg_idx);
+}
+
+static void rtw8814a_nbi_setting(struct rtw_dev *rtwdev, u32 ch, u32 f_intf)
+{
+ u32 fc, int_distance, tone_idx;
+
+ fc = 2412 + (ch - 1) * 5;
+ int_distance = abs_diff(fc, f_intf);
+
+ /* 10 * (int_distance / 0.3125) */
+ tone_idx = int_distance << 5;
+
+ rtw8814a_set_nbi_reg(rtwdev, tone_idx);
+
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING, BIT_NBI_ENABLE, 1);
+}
+
+static void rtw8814a_spur_nbi_setting(struct rtw_dev *rtwdev)
+{
+ u8 primary_channel = rtwdev->hal.primary_channel;
+ u8 rfe_type = rtwdev->efuse.rfe_option;
+
+ if (rfe_type != 0 && rfe_type != 1 && rfe_type != 6 && rfe_type != 7)
+ return;
+
+ if (primary_channel == 14)
+ rtw8814a_nbi_setting(rtwdev, primary_channel, 2480);
+ else if (primary_channel >= 4 && primary_channel <= 8)
+ rtw8814a_nbi_setting(rtwdev, primary_channel, 2440);
+ else
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING, BIT_NBI_ENABLE, 0);
+}
+
+/* A workaround to eliminate the 5280 MHz & 5600 MHz & 5760 MHz spur of 8814A */
+static void rtw8814a_spur_calibration(struct rtw_dev *rtwdev, u8 channel, u8 bw)
+{
+ u8 rfe_type = rtwdev->efuse.rfe_option;
+ bool reset_nbi_csi = true;
+
+ if (rfe_type == 0) {
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_40:
+ if (channel == 54 || channel == 118) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x3e >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0);
+
+ reset_nbi_csi = false;
+ } else if (channel == 151) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x1e >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK0,
+ BIT(16), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0);
+
+ reset_nbi_csi = false;
+ }
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ if (channel == 58 || channel == 122) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x3a >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK7,
+ BIT(0), 1);
+
+ reset_nbi_csi = false;
+ } else if (channel == 155) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x5a >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK6,
+ BIT(16), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0);
+
+ reset_nbi_csi = false;
+ }
+ break;
+ case RTW_CHANNEL_WIDTH_20:
+ if (channel == 153) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x1e >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK7,
+ BIT(16), 1);
+
+ reset_nbi_csi = false;
+ }
+
+ rtw8814a_spur_calibration_ch140(rtwdev, channel);
+ break;
+ default:
+ break;
+ }
+ } else if (rfe_type == 1 || rfe_type == 2) {
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ if (channel == 153) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x1E >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK7,
+ BIT(16), 1);
+
+ reset_nbi_csi = false;
+ }
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ if (channel == 151) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x1e >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK0,
+ BIT(16), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0);
+
+ reset_nbi_csi = false;
+ }
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ if (channel == 155) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x5a >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK6,
+ BIT(16), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0);
+
+ reset_nbi_csi = false;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (reset_nbi_csi) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0xfc >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1, BIT(0), 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0);
+ }
+
+ rtw8814a_spur_nbi_setting(rtwdev);
+}
+
+static void rtw8814a_set_bw_mode(struct rtw_dev *rtwdev, u8 new_band,
+ u8 channel, u8 bw, u8 primary_chan_idx)
+{
+ u8 txsc40 = 0, txsc20, txsc;
+
+ rtw8814a_set_bw_reg_mac(rtwdev, bw);
+
+ txsc20 = primary_chan_idx;
+ if (bw == RTW_CHANNEL_WIDTH_80) {
+ if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST)
+ txsc40 = RTW_SC_40_UPPER;
+ else
+ txsc40 = RTW_SC_40_LOWER;
+ }
+
+ txsc = BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40);
+ rtw_write8(rtwdev, REG_DATA_SC, txsc);
+
+ rtw8814a_set_bw_reg_adc(rtwdev, bw);
+ rtw8814a_set_bw_reg_agc(rtwdev, new_band, bw);
+
+ if (bw == RTW_CHANNEL_WIDTH_80) {
+ rtw_write32_mask(rtwdev, REG_ADCCLK, 0x3c, txsc);
+ } else if (bw == RTW_CHANNEL_WIDTH_40) {
+ rtw_write32_mask(rtwdev, REG_ADCCLK, 0x3c, txsc);
+
+ if (txsc == RTW_SC_20_UPPER)
+ rtw_write32_set(rtwdev, REG_RXSB, BIT(4));
+ else
+ rtw_write32_clr(rtwdev, REG_RXSB, BIT(4));
+ }
+
+ rtw8814a_set_bw_rf(rtwdev, bw);
+
+ rtw8814a_adc_clk(rtwdev);
+
+ rtw8814a_spur_calibration(rtwdev, channel, bw);
+}
+
+static void rtw8814a_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_chan_idx)
+{
+ u8 old_band, new_band;
+
+ if (rtw_read8(rtwdev, REG_CCK_CHECK) & BIT_CHECK_CCK_EN)
+ old_band = RTW_BAND_5G;
+ else
+ old_band = RTW_BAND_2G;
+
+ if (channel > 14)
+ new_band = RTW_BAND_5G;
+ else
+ new_band = RTW_BAND_2G;
+
+ if (new_band != old_band)
+ rtw8814a_switch_band(rtwdev, new_band, bw);
+
+ rtw8814a_switch_channel(rtwdev, channel);
+
+ rtw8814a_24g_cck_tx_dfir(rtwdev, channel);
+
+ rtw8814a_set_bw_mode(rtwdev, new_band, channel, bw, primary_chan_idx);
+}
+
+static s8 rtw8814a_cck_rx_pwr(u8 lna_idx, u8 vga_idx)
+{
+ s8 rx_pwr_all = 0;
+
+ switch (lna_idx) {
+ case 7:
+ rx_pwr_all = -38 - 2 * vga_idx;
+ break;
+ case 5:
+ rx_pwr_all = -28 - 2 * vga_idx;
+ break;
+ case 3:
+ rx_pwr_all = -8 - 2 * vga_idx;
+ break;
+ case 2:
+ rx_pwr_all = -1 - 2 * vga_idx;
+ break;
+ default:
+ break;
+ }
+
+ return rx_pwr_all;
+}
+
+static void rtw8814a_query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_jaguar_phy_status_rpt *rpt;
+ u8 gain[RTW_RF_PATH_MAX], rssi, i;
+ s8 rx_pwr_db, middle1, middle2;
+ s8 snr[RTW_RF_PATH_MAX];
+ s8 evm[RTW_RF_PATH_MAX];
+ u8 rfmode, subchannel;
+ u8 lna, vga;
+ s8 cfo[2];
+
+ rpt = (struct rtw_jaguar_phy_status_rpt *)phy_status;
+
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
+
+ if (pkt_stat->rate <= DESC_RATE11M) {
+ lna = le32_get_bits(rpt->w1, RTW_JGRPHY_W1_AGC_RPT_LNA_IDX);
+ vga = le32_get_bits(rpt->w1, RTW_JGRPHY_W1_AGC_RPT_VGA_IDX);
+
+ rx_pwr_db = rtw8814a_cck_rx_pwr(lna, vga);
+
+ pkt_stat->rx_power[RF_PATH_A] = rx_pwr_db;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
+ pkt_stat->signal_power = rx_pwr_db;
+ } else { /* OFDM rate */
+ gain[RF_PATH_A] = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_GAIN_A);
+ gain[RF_PATH_B] = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_GAIN_B);
+ gain[RF_PATH_C] = le32_get_bits(rpt->w5, RTW_JGRPHY_W5_GAIN_C);
+ gain[RF_PATH_D] = le32_get_bits(rpt->w6, RTW_JGRPHY_W6_GAIN_D);
+
+ snr[RF_PATH_A] = le32_get_bits(rpt->w3, RTW_JGRPHY_W3_RXSNR_A);
+ snr[RF_PATH_B] = le32_get_bits(rpt->w4, RTW_JGRPHY_W4_RXSNR_B);
+ snr[RF_PATH_C] = le32_get_bits(rpt->w5, RTW_JGRPHY_W5_RXSNR_C);
+ snr[RF_PATH_D] = le32_get_bits(rpt->w5, RTW_JGRPHY_W5_RXSNR_D);
+
+ evm[RF_PATH_A] = le32_get_bits(rpt->w3, RTW_JGRPHY_W3_RXEVM_1);
+ evm[RF_PATH_B] = le32_get_bits(rpt->w3, RTW_JGRPHY_W3_RXEVM_2);
+ evm[RF_PATH_C] = le32_get_bits(rpt->w4, RTW_JGRPHY_W4_RXEVM_3);
+ evm[RF_PATH_D] = le32_get_bits(rpt->w5, RTW_JGRPHY_W5_RXEVM_4);
+
+ if (pkt_stat->rate <= DESC_RATE54M)
+ evm[RF_PATH_A] = le32_get_bits(rpt->w6,
+ RTW_JGRPHY_W6_SIGEVM);
+
+ for (i = RF_PATH_A; i < RTW_RF_PATH_MAX; i++) {
+ pkt_stat->rx_power[i] = gain[i] - 110;
+
+ rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[i], 1);
+ dm_info->rssi[i] = rssi;
+
+ pkt_stat->rx_snr[i] = snr[i];
+ dm_info->rx_snr[i] = snr[i] >> 1;
+
+ pkt_stat->rx_evm[i] = evm[i];
+ evm[i] = max_t(s8, -127, evm[i]);
+ dm_info->rx_evm_dbm[i] = abs(evm[i]) >> 1;
+ }
+
+ rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power,
+ RTW_RF_PATH_MAX);
+ pkt_stat->rssi = rssi;
+
+ /* When power saving is enabled the hardware sometimes
+ * reports unbelievably high gain for paths A and C
+ * (e.g. one frame 64 68 68 72, the next frame 106 66 88 72,
+ * the next 66 66 68 72), so use the second lowest gain
+ * instead of the highest.
+ */
+ middle1 = max(min(gain[RF_PATH_A], gain[RF_PATH_B]),
+ min(gain[RF_PATH_C], gain[RF_PATH_D]));
+ middle2 = min(max(gain[RF_PATH_A], gain[RF_PATH_B]),
+ max(gain[RF_PATH_C], gain[RF_PATH_D]));
+ rx_pwr_db = min(middle1, middle2);
+ rx_pwr_db -= 110;
+ pkt_stat->signal_power = rx_pwr_db;
+
+ rfmode = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_R_RFMOD);
+ subchannel = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_SUB_CHNL);
+
+ if (rfmode == 1 && subchannel == 0) {
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_40;
+ } else if (rfmode == 2) {
+ if (subchannel == 0)
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_80;
+ else if (subchannel == 9 || subchannel == 10)
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_40;
+ }
+
+ cfo[RF_PATH_A] = le32_get_bits(rpt->w2, RTW_JGRPHY_W2_CFO_TAIL_A);
+ cfo[RF_PATH_B] = le32_get_bits(rpt->w2, RTW_JGRPHY_W2_CFO_TAIL_B);
+
+ for (i = RF_PATH_A; i < 2; i++) {
+ pkt_stat->cfo_tail[i] = cfo[i];
+ dm_info->cfo_tail[i] = (cfo[i] * 5) >> 1;
+ }
+ }
+}
+
+static void
+rtw8814a_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u32 txagc_table_wd;
+ u8 rate, pwr_index;
+ int j;
+
+ for (j = 0; j < rtw_rate_size[rs]; j++) {
+ rate = rtw_rate_section[rs][j];
+
+ pwr_index = hal->tx_pwr_tbl[path][rate] + 2;
+ if (pwr_index > rtwdev->chip->max_power_index)
+ pwr_index = rtwdev->chip->max_power_index;
+
+ txagc_table_wd = 0x00801000;
+ txagc_table_wd |= (pwr_index << 24) | (path << 8) | rate;
+
+ rtw_write32(rtwdev, REG_AGC_TBL, txagc_table_wd);
+
+ /* first time to turn on the txagc table
+ * second to write the addr0
+ */
+ if (rate == DESC_RATE1M)
+ rtw_write32(rtwdev, REG_AGC_TBL, txagc_table_wd);
+ }
+}
+
+static void rtw8814a_set_tx_power_index(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ int path;
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ if (hal->current_band_type == RTW_BAND_2G)
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_CCK);
+
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_OFDM);
+
+ if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ continue;
+
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_HT_1S);
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_VHT_1S);
+
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_HT_2S);
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_VHT_2S);
+
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_HT_3S);
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_VHT_3S);
+ }
+}
+
+static void rtw8814a_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
+{
+}
+
+static void rtw8814a_false_alarm_statistics(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 cck_fa_cnt, ofdm_fa_cnt;
+ u32 crc32_cnt, cca32_cnt;
+ u32 cck_enable;
+
+ cck_enable = rtw_read32(rtwdev, REG_RXPSEL) & BIT(28);
+ cck_fa_cnt = rtw_read16(rtwdev, REG_FA_CCK);
+ ofdm_fa_cnt = rtw_read16(rtwdev, REG_FA_OFDM);
+
+ dm_info->cck_fa_cnt = cck_fa_cnt;
+ dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = ofdm_fa_cnt;
+ if (cck_enable)
+ dm_info->total_fa_cnt += cck_fa_cnt;
+
+ crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK);
+ dm_info->cck_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD);
+ dm_info->cck_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD);
+
+ crc32_cnt = rtw_read32(rtwdev, REG_CRC_OFDM);
+ dm_info->ofdm_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD);
+ dm_info->ofdm_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD);
+
+ crc32_cnt = rtw_read32(rtwdev, REG_CRC_HT);
+ dm_info->ht_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD);
+ dm_info->ht_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD);
+
+ crc32_cnt = rtw_read32(rtwdev, REG_CRC_VHT);
+ dm_info->vht_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD);
+ dm_info->vht_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD);
+
+ cca32_cnt = rtw_read32(rtwdev, REG_CCA_OFDM);
+ dm_info->ofdm_cca_cnt = u32_get_bits(cca32_cnt, MASKHWORD);
+ dm_info->total_cca_cnt = dm_info->ofdm_cca_cnt;
+ if (cck_enable) {
+ cca32_cnt = rtw_read32(rtwdev, REG_CCA_CCK);
+ dm_info->cck_cca_cnt = u32_get_bits(cca32_cnt, MASKLWORD);
+ dm_info->total_cca_cnt += dm_info->cck_cca_cnt;
+ }
+
+ rtw_write32_set(rtwdev, REG_FAS, BIT(17));
+ rtw_write32_clr(rtwdev, REG_FAS, BIT(17));
+ rtw_write32_clr(rtwdev, REG_CCK0_FAREPORT, BIT(15));
+ rtw_write32_set(rtwdev, REG_CCK0_FAREPORT, BIT(15));
+ rtw_write32_set(rtwdev, REG_CNTRST, BIT(0));
+ rtw_write32_clr(rtwdev, REG_CNTRST, BIT(0));
+}
+
+#define MAC_REG_NUM_8814 2
+#define BB_REG_NUM_8814 14
+#define RF_REG_NUM_8814 1
+
+static void rtw8814a_iqk_backup_mac_bb(struct rtw_dev *rtwdev,
+ u32 *mac_backup, u32 *bb_backup,
+ const u32 *mac_regs,
+ const u32 *bb_regs)
+{
+ u32 i;
+
+ /* save MACBB default value */
+ for (i = 0; i < MAC_REG_NUM_8814; i++)
+ mac_backup[i] = rtw_read32(rtwdev, mac_regs[i]);
+
+ for (i = 0; i < BB_REG_NUM_8814; i++)
+ bb_backup[i] = rtw_read32(rtwdev, bb_regs[i]);
+}
+
+static void rtw8814a_iqk_backup_rf(struct rtw_dev *rtwdev,
+ u32 rf_backup[][4], const u32 *rf_regs)
+{
+ u32 i;
+
+ /* Save RF Parameters */
+ for (i = 0; i < RF_REG_NUM_8814; i++) {
+ rf_backup[i][RF_PATH_A] = rtw_read_rf(rtwdev, RF_PATH_A,
+ rf_regs[i], RFREG_MASK);
+ rf_backup[i][RF_PATH_B] = rtw_read_rf(rtwdev, RF_PATH_B,
+ rf_regs[i], RFREG_MASK);
+ rf_backup[i][RF_PATH_C] = rtw_read_rf(rtwdev, RF_PATH_C,
+ rf_regs[i], RFREG_MASK);
+ rf_backup[i][RF_PATH_D] = rtw_read_rf(rtwdev, RF_PATH_D,
+ rf_regs[i], RFREG_MASK);
+ }
+}
+
+static void rtw8814a_iqk_afe_setting(struct rtw_dev *rtwdev, bool do_iqk)
+{
+ if (do_iqk) {
+ /* IQK AFE setting RX_WAIT_CCA mode */
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x0e808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x0e808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x0e808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x0e808003);
+ } else {
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x07808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x07808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x07808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x07808003);
+ }
+
+ rtw_write32_mask(rtwdev, REG_DAC_RSTB, BIT(13), 0x1);
+
+ rtw_write8_set(rtwdev, REG_GNT_BT, BIT(2) | BIT(1));
+ rtw_write8_clr(rtwdev, REG_GNT_BT, BIT(2) | BIT(1));
+
+ rtw_write32_set(rtwdev, REG_CCK_RPT_FORMAT, BIT(2));
+ rtw_write32_clr(rtwdev, REG_CCK_RPT_FORMAT, BIT(2));
+}
+
+static void rtw8814a_iqk_restore_mac_bb(struct rtw_dev *rtwdev,
+ u32 *mac_backup, u32 *bb_backup,
+ const u32 *mac_regs,
+ const u32 *bb_regs)
+{
+ u32 i;
+
+ /* Reload MacBB Parameters */
+ for (i = 0; i < MAC_REG_NUM_8814; i++)
+ rtw_write32(rtwdev, mac_regs[i], mac_backup[i]);
+
+ for (i = 0; i < BB_REG_NUM_8814; i++)
+ rtw_write32(rtwdev, bb_regs[i], bb_backup[i]);
+}
+
+static void rtw8814a_iqk_restore_rf(struct rtw_dev *rtwdev,
+ const u32 rf_backup[][4],
+ const u32 *rf_regs)
+{
+ u32 i;
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWE, RFREG_MASK, 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_C, RF_LUTWE, RFREG_MASK, 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_D, RF_LUTWE, RFREG_MASK, 0x0);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_RXBB2, RFREG_MASK, 0x88001);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_RXBB2, RFREG_MASK, 0x88001);
+ rtw_write_rf(rtwdev, RF_PATH_C, RF_RXBB2, RFREG_MASK, 0x88001);
+ rtw_write_rf(rtwdev, RF_PATH_D, RF_RXBB2, RFREG_MASK, 0x88001);
+
+ for (i = 0; i < RF_REG_NUM_8814; i++) {
+ rtw_write_rf(rtwdev, RF_PATH_A, rf_regs[i],
+ RFREG_MASK, rf_backup[i][RF_PATH_A]);
+ rtw_write_rf(rtwdev, RF_PATH_B, rf_regs[i],
+ RFREG_MASK, rf_backup[i][RF_PATH_B]);
+ rtw_write_rf(rtwdev, RF_PATH_C, rf_regs[i],
+ RFREG_MASK, rf_backup[i][RF_PATH_C]);
+ rtw_write_rf(rtwdev, RF_PATH_D, rf_regs[i],
+ RFREG_MASK, rf_backup[i][RF_PATH_D]);
+ }
+}
+
+static void rtw8814a_iqk_reset_nctl(struct rtw_dev *rtwdev)
+{
+ rtw_write32(rtwdev, 0x1b00, 0xf8000000);
+ rtw_write32(rtwdev, 0x1b80, 0x00000006);
+
+ rtw_write32(rtwdev, 0x1b00, 0xf8000000);
+ rtw_write32(rtwdev, 0x1b80, 0x00000002);
+}
+
+static void rtw8814a_iqk_configure_mac(struct rtw_dev *rtwdev)
+{
+ rtw_write8(rtwdev, REG_TXPAUSE, 0x3f);
+ rtw_write32_clr(rtwdev, REG_BCN_CTRL,
+ (BIT_EN_BCN_FUNCTION << 8) | BIT_EN_BCN_FUNCTION);
+
+ /* RX ante off */
+ rtw_write8(rtwdev, REG_RXPSEL, 0x00);
+ /* CCA off */
+ rtw_write32_mask(rtwdev, REG_CCA2ND, 0xf, 0xe);
+ /* CCK RX path off */
+ rtw_write32_set(rtwdev, REG_PRECTRL, BIT_IQ_WGT);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77777777);
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D, BIT_RFE_SELSW0_D, 0x77);
+ rtw_write32_mask(rtwdev, REG_PSD, BIT_PSD_INI, 0x0);
+
+ rtw_write32_mask(rtwdev, REG_RFE_INV0, 0xf, 0x0);
+}
+
+static void rtw8814a_lok_one_shot(struct rtw_dev *rtwdev, u8 path)
+{
+ u32 lok_temp1, lok_temp2;
+ bool lok_ready;
+ u8 ii;
+
+ /* ADC Clock source */
+ rtw_write32_mask(rtwdev, REG_FAS, BIT(21) | BIT(20), path);
+ /* LOK: CMD ID = 0
+ * {0xf8000011, 0xf8000021, 0xf8000041, 0xf8000081}
+ */
+ rtw_write32(rtwdev, 0x1b00, 0xf8000001 | (BIT(path) << 4));
+
+ usleep_range(1000, 1100);
+
+ if (read_poll_timeout(!rtw_read32_mask, lok_ready, lok_ready,
+ 1000, 10000, false,
+ rtwdev, 0x1b00, BIT(0))) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "==>S%d LOK timed out\n", path);
+
+ rtw8814a_iqk_reset_nctl(rtwdev);
+
+ rtw_write_rf(rtwdev, path, RF_DTXLOK, RFREG_MASK, 0x08400);
+
+ return;
+ }
+
+ rtw_write32(rtwdev, 0x1b00, 0xf8000000 | (path << 1));
+ rtw_write32(rtwdev, 0x1bd4, 0x003f0001);
+
+ lok_temp2 = rtw_read32_mask(rtwdev, 0x1bfc, 0x003e0000);
+ lok_temp2 = (lok_temp2 + 0x10) & 0x1f;
+
+ lok_temp1 = rtw_read32_mask(rtwdev, 0x1bfc, 0x0000003e);
+ lok_temp1 = (lok_temp1 + 0x10) & 0x1f;
+
+ for (ii = 1; ii < 5; ii++) {
+ lok_temp1 += (lok_temp1 & BIT(4 - ii)) << (ii * 2);
+ lok_temp2 += (lok_temp2 & BIT(4 - ii)) << (ii * 2);
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "path %d lok_temp1 = %#x, lok_temp2 = %#x\n",
+ path, lok_temp1 >> 4, lok_temp2 >> 4);
+
+ rtw_write_rf(rtwdev, path, RF_DTXLOK, 0x07c00, lok_temp1 >> 4);
+ rtw_write_rf(rtwdev, path, RF_DTXLOK, 0xf8000, lok_temp2 >> 4);
+}
+
+static void rtw8814a_iqk_tx_one_shot(struct rtw_dev *rtwdev, u8 path,
+ u32 *tx_matrix, bool *tx_ok)
+{
+ u8 bw = rtwdev->hal.current_band_width;
+ u8 cal_retry;
+ u32 iqk_cmd;
+
+ for (cal_retry = 0; cal_retry < 4; cal_retry++) {
+ rtw_write32_mask(rtwdev, REG_FAS, BIT(21) | BIT(20), path);
+
+ iqk_cmd = 0xf8000001 | ((bw + 3) << 8) | (BIT(path) << 4);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "TXK_Trigger = %#x\n", iqk_cmd);
+
+ rtw_write32(rtwdev, 0x1b00, iqk_cmd);
+
+ usleep_range(10000, 11000);
+
+ if (read_poll_timeout(!rtw_read32_mask, *tx_ok, *tx_ok,
+ 1000, 20000, false,
+ rtwdev, 0x1b00, BIT(0))) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "tx iqk S%d timed out\n", path);
+
+ rtw8814a_iqk_reset_nctl(rtwdev);
+ } else {
+ *tx_ok = !rtw_read32_mask(rtwdev, 0x1b08, BIT(26));
+
+ if (*tx_ok)
+ break;
+ }
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d tx ==> 0x1b00 = 0x%x\n",
+ path, rtw_read32(rtwdev, 0x1b00));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d tx ==> 0x1b08 = 0x%x\n",
+ path, rtw_read32(rtwdev, 0x1b08));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d tx ==> cal_retry = %x\n",
+ path, cal_retry);
+
+ rtw_write32(rtwdev, 0x1b00, 0xf8000000 | (path << 1));
+
+ if (*tx_ok) {
+ *tx_matrix = rtw_read32(rtwdev, 0x1b38);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d_IQC = 0x%x\n",
+ path, *tx_matrix);
+ }
+}
+
+static void rtw8814a_iqk_rx_one_shot(struct rtw_dev *rtwdev, u8 path,
+ u32 *tx_matrix, bool *tx_ok)
+{
+ static const u16 iqk_apply[RTW_RF_PATH_MAX] = {
+ REG_TXAGCIDX, REG_TX_AGC_B, REG_TX_AGC_C, REG_TX_AGC_D
+ };
+ u8 band = rtwdev->hal.current_band_type;
+ u8 bw = rtwdev->hal.current_band_width;
+ u32 rx_matrix;
+ u8 cal_retry;
+ u32 iqk_cmd;
+ bool rx_ok;
+
+ for (cal_retry = 0; cal_retry < 4; cal_retry++) {
+ rtw_write32_mask(rtwdev, REG_FAS, BIT(21) | BIT(20), path);
+
+ if (band == RTW_BAND_2G) {
+ rtw_write_rf(rtwdev, path, RF_LUTDBG, BIT(11), 0x1);
+ rtw_write_rf(rtwdev, path, RF_GAINTX, 0xfffff, 0x51ce1);
+
+ switch (path) {
+ case 0:
+ case 1:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B,
+ 0x54775477);
+ break;
+ case 2:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C,
+ 0x54775477);
+ break;
+ case 3:
+ rtw_write32(rtwdev, REG_RFE_INVSEL_D, 0x75400000);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D,
+ 0x77777777);
+ break;
+ }
+ }
+
+ iqk_cmd = 0xf8000001 | ((9 - bw) << 8) | (BIT(path) << 4);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "RXK_Trigger = 0x%x\n", iqk_cmd);
+
+ rtw_write32(rtwdev, 0x1b00, iqk_cmd);
+
+ usleep_range(10000, 11000);
+
+ if (read_poll_timeout(!rtw_read32_mask, rx_ok, rx_ok,
+ 1000, 20000, false,
+ rtwdev, 0x1b00, BIT(0))) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "rx iqk S%d timed out\n", path);
+
+ rtw8814a_iqk_reset_nctl(rtwdev);
+ } else {
+ rx_ok = !rtw_read32_mask(rtwdev, 0x1b08, BIT(26));
+
+ if (rx_ok)
+ break;
+ }
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d rx ==> 0x1b00 = 0x%x\n",
+ path, rtw_read32(rtwdev, 0x1b00));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d rx ==> 0x1b08 = 0x%x\n",
+ path, rtw_read32(rtwdev, 0x1b08));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d rx ==> cal_retry = %x\n",
+ path, cal_retry);
+
+ rtw_write32(rtwdev, 0x1b00, 0xf8000000 | (path << 1));
+
+ if (rx_ok) {
+ rtw_write32(rtwdev, 0x1b3c, 0x20000000);
+ rx_matrix = rtw_read32(rtwdev, 0x1b3c);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d_IQC = 0x%x\n",
+ path, rx_matrix);
+ }
+
+ if (*tx_ok)
+ rtw_write32(rtwdev, 0x1b38, *tx_matrix);
+ else
+ rtw_write32_mask(rtwdev, iqk_apply[path], BIT(0), 0x0);
+
+ if (!rx_ok)
+ rtw_write32_mask(rtwdev, iqk_apply[path],
+ BIT(11) | BIT(10), 0x0);
+
+ if (band == RTW_BAND_2G)
+ rtw_write_rf(rtwdev, path, RF_LUTDBG, BIT(11), 0x0);
+}
+
+static void rtw8814a_iqk(struct rtw_dev *rtwdev)
+{
+ u8 band = rtwdev->hal.current_band_type;
+ u8 bw = rtwdev->hal.current_band_width;
+ u32 tx_matrix[RTW_RF_PATH_MAX];
+ bool tx_ok[RTW_RF_PATH_MAX];
+ u8 path;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "IQK band = %d GHz bw = %d MHz\n",
+ band == RTW_BAND_2G ? 2 : 5, (1 << (bw + 1)) * 10);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_TXMOD, BIT(19), 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_TXMOD, BIT(19), 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_C, RF_TXMOD, BIT(19), 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_D, RF_TXMOD, BIT(19), 0x1);
+
+ rtw_write32_mask(rtwdev, REG_TXAGCIDX,
+ (BIT(11) | BIT(10) | BIT(0)), 0x401);
+ rtw_write32_mask(rtwdev, REG_TX_AGC_B,
+ (BIT(11) | BIT(10) | BIT(0)), 0x401);
+ rtw_write32_mask(rtwdev, REG_TX_AGC_C,
+ (BIT(11) | BIT(10) | BIT(0)), 0x401);
+ rtw_write32_mask(rtwdev, REG_TX_AGC_D,
+ (BIT(11) | BIT(10) | BIT(0)), 0x401);
+
+ if (band == RTW_BAND_5G)
+ rtw_write32(rtwdev, 0x1b00, 0xf8000ff1);
+ else
+ rtw_write32(rtwdev, 0x1b00, 0xf8000ef1);
+
+ usleep_range(1000, 1100);
+
+ rtw_write32(rtwdev, 0x810, 0x20101063);
+ rtw_write32(rtwdev, REG_DAC_RSTB, 0x0B00C000);
+
+ for (path = RF_PATH_A; path < RTW_RF_PATH_MAX; path++)
+ rtw8814a_lok_one_shot(rtwdev, path);
+
+ for (path = RF_PATH_A; path < RTW_RF_PATH_MAX; path++)
+ rtw8814a_iqk_tx_one_shot(rtwdev, path,
+ &tx_matrix[path], &tx_ok[path]);
+
+ for (path = RF_PATH_A; path < RTW_RF_PATH_MAX; path++)
+ rtw8814a_iqk_rx_one_shot(rtwdev, path,
+ &tx_matrix[path], &tx_ok[path]);
+}
+
+static void rtw8814a_do_iqk(struct rtw_dev *rtwdev)
+{
+ static const u32 backup_mac_reg[MAC_REG_NUM_8814] = {0x520, 0x550};
+ static const u32 backup_bb_reg[BB_REG_NUM_8814] = {
+ 0xa14, 0x808, 0x838, 0x90c, 0x810, 0xcb0, 0xeb0,
+ 0x18b4, 0x1ab4, 0x1abc, 0x9a4, 0x764, 0xcbc, 0x910
+ };
+ static const u32 backup_rf_reg[RF_REG_NUM_8814] = {0x0};
+ u32 rf_backup[RF_REG_NUM_8814][RTW_RF_PATH_MAX];
+ u32 mac_backup[MAC_REG_NUM_8814];
+ u32 bb_backup[BB_REG_NUM_8814];
+
+ rtw8814a_iqk_backup_mac_bb(rtwdev, mac_backup, bb_backup,
+ backup_mac_reg, backup_bb_reg);
+ rtw8814a_iqk_afe_setting(rtwdev, true);
+ rtw8814a_iqk_backup_rf(rtwdev, rf_backup, backup_rf_reg);
+ rtw8814a_iqk_configure_mac(rtwdev);
+ rtw8814a_iqk(rtwdev);
+ rtw8814a_iqk_reset_nctl(rtwdev); /* for 3-wire to BB use */
+ rtw8814a_iqk_afe_setting(rtwdev, false);
+ rtw8814a_iqk_restore_mac_bb(rtwdev, mac_backup, bb_backup,
+ backup_mac_reg, backup_bb_reg);
+ rtw8814a_iqk_restore_rf(rtwdev, rf_backup, backup_rf_reg);
+}
+
+static void rtw8814a_phy_calibration(struct rtw_dev *rtwdev)
+{
+ rtw8814a_do_iqk(rtwdev);
+}
+
+static void rtw8814a_coex_cfg_init(struct rtw_dev *rtwdev)
+{
+}
+
+static void rtw8814a_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
+ u8 pos_type)
+{
+ /* Override rtw_coex_coex_ctrl_owner(). RF path C does not
+ * function when BIT_LTE_MUX_CTRL_PATH is set.
+ */
+ rtw_write8_clr(rtwdev, REG_SYS_SDIO_CTRL + 3,
+ BIT_LTE_MUX_CTRL_PATH >> 24);
+}
+
+static void rtw8814a_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
+{
+}
+
+static void rtw8814a_coex_cfg_gnt_debug(struct rtw_dev *rtwdev)
+{
+}
+
+static void rtw8814a_coex_cfg_rfe_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+
+ /* Only needed to make rtw8814a_coex_cfg_ant_switch() run. */
+ coex_rfe->ant_switch_exist = true;
+}
+
+static void rtw8814a_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
+{
+}
+
+static void rtw8814a_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
+{
+}
+
+static void rtw8814a_txagc_swing_offset(struct rtw_dev *rtwdev, u8 path,
+ u8 tx_pwr_idx_offset,
+ s8 *txagc_idx, u8 *swing_idx)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 swing_upper_bound = dm_info->default_ofdm_index + 10;
+ s8 delta_pwr_idx = dm_info->delta_power_index[path];
+ u8 swing_index = dm_info->default_ofdm_index;
+ u8 max_tx_pwr_idx_offset = 0xf;
+ u8 swing_lower_bound = 0;
+ s8 agc_index = 0;
+
+ tx_pwr_idx_offset = min_t(u8, tx_pwr_idx_offset, max_tx_pwr_idx_offset);
+
+ if (delta_pwr_idx >= 0) {
+ if (delta_pwr_idx <= tx_pwr_idx_offset) {
+ agc_index = delta_pwr_idx;
+ swing_index = dm_info->default_ofdm_index;
+ } else if (delta_pwr_idx > tx_pwr_idx_offset) {
+ agc_index = tx_pwr_idx_offset;
+ swing_index = dm_info->default_ofdm_index +
+ delta_pwr_idx - tx_pwr_idx_offset;
+ swing_index = min_t(u8, swing_index, swing_upper_bound);
+ }
+ } else {
+ if (dm_info->default_ofdm_index > abs(delta_pwr_idx))
+ swing_index =
+ dm_info->default_ofdm_index + delta_pwr_idx;
+ else
+ swing_index = swing_lower_bound;
+ swing_index = max_t(u8, swing_index, swing_lower_bound);
+
+ agc_index = 0;
+ }
+
+ if (swing_index >= RTW_TXSCALE_SIZE) {
+ rtw_warn(rtwdev, "swing index overflow\n");
+ swing_index = RTW_TXSCALE_SIZE - 1;
+ }
+ *txagc_idx = agc_index;
+ *swing_idx = swing_index;
+}
+
+static void rtw8814a_pwrtrack_set_pwr(struct rtw_dev *rtwdev, u8 path,
+ u8 pwr_idx_offset)
+{
+ static const u32 txagc_reg[RTW_RF_PATH_MAX] = {
+ REG_TX_AGC_A, REG_TX_AGC_B, REG_TX_AGC_C, REG_TX_AGC_D
+ };
+ static const u32 txscale_reg[RTW_RF_PATH_MAX] = {
+ REG_TXSCALE_A, REG_TXSCALE_B, REG_TXSCALE_C, REG_TXSCALE_D
+ };
+ s8 txagc_idx;
+ u8 swing_idx;
+
+ rtw8814a_txagc_swing_offset(rtwdev, path, pwr_idx_offset,
+ &txagc_idx, &swing_idx);
+ rtw_write32_mask(rtwdev, txagc_reg[path], GENMASK(29, 25),
+ txagc_idx);
+ rtw_write32_mask(rtwdev, txscale_reg[path], BB_SWING_MASK,
+ rtw8814a_txscale_tbl[swing_idx]);
+}
+
+static void rtw8814a_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
+{
+ u8 max_pwr_idx = rtwdev->chip->max_power_index;
+ u8 band_width = rtwdev->hal.current_band_width;
+ u8 channel = rtwdev->hal.current_channel;
+ u8 tx_rate = rtwdev->dm_info.tx_rate;
+ u8 regd = rtw_regd_get(rtwdev);
+ u8 pwr_idx_offset, tx_pwr_idx;
+
+ tx_pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, tx_rate,
+ band_width, channel, regd);
+
+ tx_pwr_idx = min_t(u8, tx_pwr_idx, max_pwr_idx);
+
+ pwr_idx_offset = max_pwr_idx - tx_pwr_idx;
+
+ rtw8814a_pwrtrack_set_pwr(rtwdev, path, pwr_idx_offset);
+}
+
+static void rtw8814a_phy_pwrtrack_path(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 power_idx_cur, power_idx_last;
+ u8 delta;
+
+ /* 8814A only has one thermal meter at PATH A */
+ delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A);
+
+ power_idx_last = dm_info->delta_power_index[path];
+ power_idx_cur = rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table,
+ path, RF_PATH_A, delta);
+
+ /* if delta of power indexes are the same, just skip */
+ if (power_idx_cur == power_idx_last)
+ return;
+
+ dm_info->delta_power_index[path] = power_idx_cur;
+ rtw8814a_pwrtrack_set(rtwdev, path);
+}
+
+static void rtw8814a_phy_pwrtrack(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_swing_table swing_table;
+ u8 thermal_value, path;
+
+ rtw_phy_config_swing_table(rtwdev, &swing_table);
+
+ if (rtwdev->efuse.thermal_meter[RF_PATH_A] == 0xff)
+ return;
+
+ thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00);
+
+ rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A);
+
+ if (dm_info->pwr_trk_init_trigger)
+ dm_info->pwr_trk_init_trigger = false;
+ else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value,
+ RF_PATH_A))
+ goto iqk;
+
+ for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++)
+ rtw8814a_phy_pwrtrack_path(rtwdev, &swing_table, path);
+
+iqk:
+ if (rtw_phy_pwrtrack_need_iqk(rtwdev))
+ rtw8814a_do_iqk(rtwdev);
+}
+
+static void rtw8814a_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ if (!dm_info->pwr_trk_triggered) {
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER,
+ GENMASK(17, 16), 0x03);
+ dm_info->pwr_trk_triggered = true;
+ return;
+ }
+
+ rtw8814a_phy_pwrtrack(rtwdev);
+ dm_info->pwr_trk_triggered = false;
+}
+
+static void rtw8814a_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
+{
+ static const u8 pd[CCK_PD_LV_MAX] = {0x40, 0x83, 0xcd, 0xdd, 0xed};
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ /* Override rtw_phy_cck_pd_lv_link(). It implements something
+ * like type 2/3/4. We need type 1 here.
+ */
+ if (rtw_is_assoc(rtwdev)) {
+ if (dm_info->min_rssi > 60) {
+ new_lvl = CCK_PD_LV3;
+ } else if (dm_info->min_rssi > 35) {
+ new_lvl = CCK_PD_LV2;
+ } else if (dm_info->min_rssi > 20) {
+ if (dm_info->cck_fa_avg > 500)
+ new_lvl = CCK_PD_LV2;
+ else if (dm_info->cck_fa_avg < 250)
+ new_lvl = CCK_PD_LV1;
+ else
+ return;
+ } else {
+ new_lvl = CCK_PD_LV1;
+ }
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_PHY, "lv: (%d) -> (%d)\n",
+ dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A], new_lvl);
+
+ if (dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] == new_lvl)
+ return;
+
+ dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
+ dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] = new_lvl;
+
+ rtw_write8(rtwdev, REG_CCK_PD_TH, pd[new_lvl]);
+}
+
+static void rtw8814a_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ u32 led_gpio_cfg;
+
+ led_gpio_cfg = rtw_read32(rtwdev, REG_GPIO_PIN_CTRL_2);
+ led_gpio_cfg |= BIT(16) | BIT(17) | BIT(21) | BIT(22);
+
+ if (brightness == LED_OFF) {
+ led_gpio_cfg |= BIT(8) | BIT(9) | BIT(13) | BIT(14);
+ } else {
+ led_gpio_cfg &= ~(BIT(8) | BIT(9) | BIT(13) | BIT(14));
+ led_gpio_cfg &= ~(BIT(0) | BIT(1) | BIT(5) | BIT(6));
+ }
+
+ rtw_write32(rtwdev, REG_GPIO_PIN_CTRL_2, led_gpio_cfg);
+}
+
+static void rtw8814a_fill_txdesc_checksum(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *txdesc)
+{
+ size_t words = 32 / 2; /* calculate the first 32 bytes (16 words) */
+
+ fill_txdesc_checksum_common(txdesc, words);
+}
+
+static const struct rtw_chip_ops rtw8814a_ops = {
+ .power_on = rtw_power_on,
+ .power_off = rtw_power_off,
+ .phy_set_param = rtw8814a_phy_set_param,
+ .read_efuse = rtw8814a_read_efuse,
+ .query_phy_status = rtw8814a_query_phy_status,
+ .set_channel = rtw8814a_set_channel,
+ .mac_init = rtw8814a_mac_init,
+ .read_rf = rtw_phy_read_rf,
+ .write_rf = rtw_phy_write_rf_reg_sipi,
+ .set_tx_power_index = rtw8814a_set_tx_power_index,
+ .set_antenna = NULL,
+ .cfg_ldo25 = rtw8814a_cfg_ldo25,
+ .efuse_grant = rtw8814a_efuse_grant,
+ .false_alarm_statistics = rtw8814a_false_alarm_statistics,
+ .phy_calibration = rtw8814a_phy_calibration,
+ .cck_pd_set = rtw8814a_phy_cck_pd_set,
+ .pwr_track = rtw8814a_pwr_track,
+ .config_bfee = NULL,
+ .set_gid_table = NULL,
+ .cfg_csi_rate = NULL,
+ .led_set = rtw8814a_led_set,
+ .fill_txdesc_checksum = rtw8814a_fill_txdesc_checksum,
+
+ .coex_set_init = rtw8814a_coex_cfg_init,
+ .coex_set_ant_switch = rtw8814a_coex_cfg_ant_switch,
+ .coex_set_gnt_fix = rtw8814a_coex_cfg_gnt_fix,
+ .coex_set_gnt_debug = rtw8814a_coex_cfg_gnt_debug,
+ .coex_set_rfe_type = rtw8814a_coex_cfg_rfe_type,
+ .coex_set_wl_tx_power = rtw8814a_coex_cfg_wl_tx_power,
+ .coex_set_wl_rx_gain = rtw8814a_coex_cfg_wl_rx_gain,
+};
+
+static const struct rtw_rqpn rqpn_table_8814a[] = {
+ /* SDIO */
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, /* vo vi */
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, /* be bk */
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, /* mg hi */
+ /* PCIE */
+ {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ /* USB, 2 bulk out */
+ {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH,
+ RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ /* USB, 3 bulk out */
+ {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ /* USB, 4 bulk out */
+ {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+};
+
+static const struct rtw_prioq_addrs prioq_addrs_8814a = {
+ .prio[RTW_DMA_MAPPING_EXTRA] = {
+ .rsvd = REG_FIFOPAGE_INFO_4, .avail = REG_FIFOPAGE_INFO_4 + 2,
+ },
+ .prio[RTW_DMA_MAPPING_LOW] = {
+ .rsvd = REG_FIFOPAGE_INFO_2, .avail = REG_FIFOPAGE_INFO_2 + 2,
+ },
+ .prio[RTW_DMA_MAPPING_NORMAL] = {
+ .rsvd = REG_FIFOPAGE_INFO_3, .avail = REG_FIFOPAGE_INFO_3 + 2,
+ },
+ .prio[RTW_DMA_MAPPING_HIGH] = {
+ .rsvd = REG_FIFOPAGE_INFO_1, .avail = REG_FIFOPAGE_INFO_1 + 2,
+ },
+ .wsize = true,
+};
+
+static const struct rtw_page_table page_table_8814a[] = {
+ /* SDIO */
+ {0, 0, 0, 0, 0}, /* hq nq lq exq gapq */
+ /* PCIE */
+ {32, 32, 32, 32, 0},
+ /* USB, 2 bulk out */
+ {32, 32, 32, 32, 0},
+ /* USB, 3 bulk out */
+ {32, 32, 32, 32, 0},
+ /* USB, 4 bulk out */
+ {32, 32, 32, 32, 0},
+};
+
+static const struct rtw_intf_phy_para_table phy_para_table_8814a = {};
+
+static const struct rtw_hw_reg rtw8814a_dig[] = {
+ [0] = { .addr = 0xc50, .mask = 0x7f },
+ [1] = { .addr = 0xe50, .mask = 0x7f },
+ [2] = { .addr = 0x1850, .mask = 0x7f },
+ [3] = { .addr = 0x1a50, .mask = 0x7f },
+};
+
+static const struct rtw_rfe_def rtw8814a_rfe_defs[] = {
+ [0] = { .phy_pg_tbl = &rtw8814a_bb_pg_type0_tbl,
+ .txpwr_lmt_tbl = &rtw8814a_txpwr_lmt_type0_tbl,
+ .pwr_track_tbl = &rtw8814a_rtw_pwrtrk_type0_tbl },
+ [1] = { .phy_pg_tbl = &rtw8814a_bb_pg_tbl,
+ .txpwr_lmt_tbl = &rtw8814a_txpwr_lmt_type1_tbl,
+ .pwr_track_tbl = &rtw8814a_rtw_pwrtrk_tbl },
+};
+
+/* rssi in percentage % (dbm = % - 100) */
+static const u8 wl_rssi_step_8814a[] = {60, 50, 44, 30};
+static const u8 bt_rssi_step_8814a[] = {30, 30, 30, 30};
+
+/* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */
+static const struct coex_rf_para rf_para_tx_8814a[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 16, false, 7}, /* for WL-CPT */
+ {4, 0, true, 1},
+ {3, 6, true, 1},
+ {2, 9, true, 1},
+ {1, 13, true, 1}
+};
+
+static const struct coex_rf_para rf_para_rx_8814a[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 16, false, 7}, /* for WL-CPT */
+ {4, 0, true, 1},
+ {3, 6, true, 1},
+ {2, 9, true, 1},
+ {1, 13, true, 1}
+};
+
+static_assert(ARRAY_SIZE(rf_para_tx_8814a) == ARRAY_SIZE(rf_para_rx_8814a));
+
+const struct rtw_chip_info rtw8814a_hw_spec = {
+ .ops = &rtw8814a_ops,
+ .id = RTW_CHIP_TYPE_8814A,
+ .fw_name = "rtw88/rtw8814a_fw.bin",
+ .wlan_cpu = RTW_WCPU_11AC,
+ .tx_pkt_desc_sz = 40,
+ .tx_buf_desc_sz = 16,
+ .rx_pkt_desc_sz = 24,
+ .rx_buf_desc_sz = 8,
+ .phy_efuse_size = 1024,
+ .log_efuse_size = 512,
+ .ptct_efuse_size = 0,
+ .txff_size = (2048 - 10) * TX_PAGE_SIZE,
+ .rxff_size = 23552,
+ .rsvd_drv_pg_num = 8,
+ .band = RTW_BAND_2G | RTW_BAND_5G,
+ .page_size = TX_PAGE_SIZE,
+ .csi_buf_pg_num = 0,
+ .dig_min = 0x1c,
+ .txgi_factor = 1,
+ .is_pwr_by_rate_dec = true,
+ .rx_ldpc = true,
+ .max_power_index = 0x3f,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
+ .usb_tx_agg_desc_num = 3,
+ .hw_feature_report = false,
+ .c2h_ra_report_size = 6,
+ .old_datarate_fb_limit = false,
+ .ht_supported = true,
+ .vht_supported = true,
+ .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK),
+ .sys_func_en = 0xDC,
+ .pwr_on_seq = card_enable_flow_8814a,
+ .pwr_off_seq = card_disable_flow_8814a,
+ .rqpn_table = rqpn_table_8814a,
+ .prioq_addrs = &prioq_addrs_8814a,
+ .page_table = page_table_8814a,
+ .intf_table = &phy_para_table_8814a,
+ .dig = rtw8814a_dig,
+ .dig_cck = NULL,
+ .rf_base_addr = {0x2800, 0x2c00, 0x3800, 0x3c00},
+ .rf_sipi_addr = {0xc90, 0xe90, 0x1890, 0x1a90},
+ .ltecoex_addr = NULL,
+ .mac_tbl = &rtw8814a_mac_tbl,
+ .agc_tbl = &rtw8814a_agc_tbl,
+ .bb_tbl = &rtw8814a_bb_tbl,
+ .rf_tbl = {&rtw8814a_rf_a_tbl, &rtw8814a_rf_b_tbl,
+ &rtw8814a_rf_c_tbl, &rtw8814a_rf_d_tbl},
+ .rfe_defs = rtw8814a_rfe_defs,
+ .rfe_defs_size = ARRAY_SIZE(rtw8814a_rfe_defs),
+ .iqk_threshold = 8,
+ .max_scan_ie_len = IEEE80211_MAX_DATA_LEN,
+
+ .coex_para_ver = 0,
+ .bt_desired_ver = 0,
+ .scbd_support = false,
+ .new_scbd10_def = false,
+ .ble_hid_profile_support = false,
+ .wl_mimo_ps_support = false,
+ .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
+ .bt_rssi_type = COEX_BTRSSI_RATIO,
+ .ant_isolation = 15,
+ .rssi_tolerance = 2,
+ .wl_rssi_step = wl_rssi_step_8814a,
+ .bt_rssi_step = bt_rssi_step_8814a,
+ .table_sant_num = 0,
+ .table_sant = NULL,
+ .table_nsant_num = 0,
+ .table_nsant = NULL,
+ .tdma_sant_num = 0,
+ .tdma_sant = NULL,
+ .tdma_nsant_num = 0,
+ .tdma_nsant = NULL,
+ .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8814a),
+ .wl_rf_para_tx = rf_para_tx_8814a,
+ .wl_rf_para_rx = rf_para_rx_8814a,
+ .bt_afh_span_bw20 = 0x24,
+ .bt_afh_span_bw40 = 0x36,
+ .afh_5g_num = 0,
+ .afh_5g = NULL,
+ .coex_info_hw_regs_num = 0,
+ .coex_info_hw_regs = NULL,
+};
+EXPORT_SYMBOL(rtw8814a_hw_spec);
+
+MODULE_FIRMWARE("rtw88/rtw8814a_fw.bin");
+
+MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless 8814a driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a.h b/drivers/net/wireless/realtek/rtw88/rtw8814a.h
new file mode 100644
index 000000000000..c57c7c8f915e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814a.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#ifndef __RTW8814A_H__
+#define __RTW8814A_H__
+
+struct rtw8814au_efuse {
+ u8 vid[2]; /* 0xd0 */
+ u8 pid[2]; /* 0xd2 */
+ u8 res[4]; /* 0xd4 */
+ u8 mac_addr[ETH_ALEN]; /* 0xd8 */
+} __packed;
+
+struct rtw8814ae_efuse {
+ u8 mac_addr[ETH_ALEN]; /* 0xd0 */
+ u8 vid[2]; /* 0xd6 */
+ u8 did[2]; /* 0xd8 */
+ u8 svid[2]; /* 0xda */
+ u8 smid[2]; /* 0xdc */
+} __packed;
+
+struct rtw8814a_efuse {
+ __le16 rtl_id;
+ u8 res0[0x0c];
+ u8 usb_mode; /* 0x0e */
+ u8 res1;
+
+ /* power index for four RF paths */
+ struct rtw_txpwr_idx txpwr_idx_table[4];
+
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k; /* 0xb9 */
+ u8 thermal_meter; /* 0xba */
+ u8 iqk_lck; /* 0xbb */
+ u8 pa_type; /* 0xbc */
+ u8 lna_type_2g[2]; /* 0xbd */
+ u8 lna_type_5g[2]; /* 0xbf */
+ u8 rf_board_option; /* 0xc1 */
+ u8 res2;
+ u8 rf_bt_setting; /* 0xc3 */
+ u8 eeprom_version; /* 0xc4 */
+ u8 eeprom_customer_id; /* 0xc5 */
+ u8 tx_bb_swing_setting_2g; /* 0xc6 */
+ u8 tx_bb_swing_setting_5g; /* 0xc7 */
+ u8 res3;
+ u8 trx_antenna_option; /* 0xc9 */
+ u8 rfe_option; /* 0xca */
+ u8 country_code[2]; /* 0xcb */
+ u8 res4[3];
+ union {
+ struct rtw8814au_efuse u;
+ struct rtw8814ae_efuse e;
+ };
+ u8 res5[0x122]; /* 0xde */
+} __packed;
+
+static_assert(sizeof(struct rtw8814a_efuse) == 512);
+
+extern const struct rtw_chip_info rtw8814a_hw_spec;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a_table.c b/drivers/net/wireless/realtek/rtw88/rtw8814a_table.c
new file mode 100644
index 000000000000..b9ce51a09fc8
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814a_table.c
@@ -0,0 +1,23930 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#include "main.h"
+#include "phy.h"
+#include "rtw8814a_table.h"
+
+static const u32 rtw8814a_mac[] = {
+ 0x010, 0x0000007C,
+ 0x014, 0x000000DB,
+ 0x016, 0x00000002,
+ 0x073, 0x00000010,
+ 0x420, 0x00000080,
+ 0x421, 0x0000000F,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000007,
+ 0x437, 0x00000008,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000007,
+ 0x43F, 0x00000008,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x000000F0,
+ 0x446, 0x00000001,
+ 0x447, 0x000000FE,
+ 0x448, 0x00000000,
+ 0x449, 0x00000000,
+ 0x44A, 0x00000000,
+ 0x44B, 0x00000040,
+ 0x44C, 0x00000010,
+ 0x44D, 0x000000F0,
+ 0x44E, 0x0000003F,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x00000000,
+ 0x452, 0x00000000,
+ 0x453, 0x00000040,
+ 0x45E, 0x00000004,
+ 0x49C, 0x00000010,
+ 0x49D, 0x000000F0,
+ 0x49E, 0x00000000,
+ 0x49F, 0x00000006,
+ 0x4A0, 0x000000E0,
+ 0x4A1, 0x00000003,
+ 0x4A2, 0x00000000,
+ 0x4A3, 0x00000040,
+ 0x4A4, 0x00000015,
+ 0x4A5, 0x000000F0,
+ 0x4A6, 0x00000000,
+ 0x4A7, 0x00000006,
+ 0x4A8, 0x000000E0,
+ 0x4A9, 0x00000000,
+ 0x4AA, 0x00000000,
+ 0x4AB, 0x00000000,
+ 0x7DA, 0x00000008,
+ 0x1448, 0x00000006,
+ 0x144A, 0x00000006,
+ 0x144C, 0x00000006,
+ 0x144E, 0x00000006,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CA, 0x0000003C,
+ 0x4CB, 0x0000003C,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x4CF, 0x00000008,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x521, 0x0000002F,
+ 0x525, 0x00000047,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000064,
+ 0x55D, 0x000000FF,
+ 0x577, 0x00000003,
+ 0x5BE, 0x00000064,
+ 0x604, 0x00000001,
+ 0x605, 0x00000030,
+ 0x607, 0x00000001,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x60A, 0x00000000,
+ 0x60C, 0x00000018,
+ 0x60D, 0x00000050,
+ 0x6A0, 0x000000FF,
+ 0x6A1, 0x000000FF,
+ 0x6A2, 0x000000FF,
+ 0x6A3, 0x000000FF,
+ 0x6A4, 0x000000FF,
+ 0x6A5, 0x000000FF,
+ 0x6DE, 0x00000084,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000064,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x640, 0x00000040,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+ 0x718, 0x00000040,
+ 0x7D5, 0x000000BC,
+ 0x7D8, 0x00000028,
+ 0x7D9, 0x00000000,
+ 0x7DA, 0x0000000B,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8814a_mac, rtw_phy_cfg_mac);
+
+static const u32 rtw8814a_agc[] = {
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xCF240003,
+ 0x81C, 0xCE260003,
+ 0x81C, 0xCD280003,
+ 0x81C, 0xCC2A0003,
+ 0x81C, 0xCB2C0003,
+ 0x81C, 0xCA2E0003,
+ 0x81C, 0xC9300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xA63C0003,
+ 0x81C, 0xA53E0003,
+ 0x81C, 0xA4400003,
+ 0x81C, 0xA3420003,
+ 0x81C, 0xA2440003,
+ 0x81C, 0xA1460003,
+ 0x81C, 0x86480003,
+ 0x81C, 0x854A0003,
+ 0x81C, 0x844C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x66500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x27620003,
+ 0x81C, 0x26640003,
+ 0x81C, 0x25660003,
+ 0x81C, 0x24680003,
+ 0x81C, 0x236A0003,
+ 0x81C, 0x226C0003,
+ 0x81C, 0x216E0003,
+ 0x81C, 0x21700003,
+ 0x81C, 0x21720003,
+ 0x81C, 0x21740003,
+ 0x81C, 0x21760003,
+ 0x81C, 0x21780003,
+ 0x81C, 0x217A0003,
+ 0x81C, 0x217C0003,
+ 0x81C, 0x217E0003,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE020003,
+ 0x81C, 0xFD040003,
+ 0x81C, 0xFC060003,
+ 0x81C, 0xFB080003,
+ 0x81C, 0xFA0A0003,
+ 0x81C, 0xF90C0003,
+ 0x81C, 0xF80E0003,
+ 0x81C, 0xF7100003,
+ 0x81C, 0xF6120003,
+ 0x81C, 0xF5140003,
+ 0x81C, 0xF4160003,
+ 0x81C, 0xF3180003,
+ 0x81C, 0xF21A0003,
+ 0x81C, 0xF11C0003,
+ 0x81C, 0xF01E0003,
+ 0x81C, 0xEF200003,
+ 0x81C, 0xEE220003,
+ 0x81C, 0xED240003,
+ 0x81C, 0xEC260003,
+ 0x81C, 0xEB280003,
+ 0x81C, 0xEA2A0003,
+ 0x81C, 0xE92C0003,
+ 0x81C, 0xE82E0003,
+ 0x81C, 0xE7300003,
+ 0x81C, 0xE6320003,
+ 0x81C, 0xE5340003,
+ 0x81C, 0xE4360003,
+ 0x81C, 0xE3380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0xA44C0003,
+ 0x81C, 0xA34E0003,
+ 0x81C, 0xA2500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x475C0003,
+ 0x81C, 0x465E0003,
+ 0x81C, 0x45600003,
+ 0x81C, 0x44620003,
+ 0x81C, 0x43640003,
+ 0x81C, 0x42660003,
+ 0x81C, 0x41680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xE4340003,
+ 0x81C, 0xE3360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xA9420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0xA44C0003,
+ 0x81C, 0xA34E0003,
+ 0x81C, 0x66500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x49580003,
+ 0x81C, 0x485A0003,
+ 0x81C, 0x475C0003,
+ 0x81C, 0x465E0003,
+ 0x81C, 0x45600003,
+ 0x81C, 0x44620003,
+ 0x81C, 0x43640003,
+ 0x81C, 0x42660003,
+ 0x81C, 0x41680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE020003,
+ 0x81C, 0xFD040003,
+ 0x81C, 0xFC060003,
+ 0x81C, 0xFB080003,
+ 0x81C, 0xFA0A0003,
+ 0x81C, 0xF90C0003,
+ 0x81C, 0xF80E0003,
+ 0x81C, 0xF7100003,
+ 0x81C, 0xF6120003,
+ 0x81C, 0xF5140003,
+ 0x81C, 0xF4160003,
+ 0x81C, 0xF3180003,
+ 0x81C, 0xF21A0003,
+ 0x81C, 0xF11C0003,
+ 0x81C, 0xF01E0003,
+ 0x81C, 0xEF200003,
+ 0x81C, 0xEE220003,
+ 0x81C, 0xED240003,
+ 0x81C, 0xEC260003,
+ 0x81C, 0xEB280003,
+ 0x81C, 0xEA2A0003,
+ 0x81C, 0xE92C0003,
+ 0x81C, 0xE82E0003,
+ 0x81C, 0xE7300003,
+ 0x81C, 0xE6320003,
+ 0x81C, 0xE5340003,
+ 0x81C, 0xE4360003,
+ 0x81C, 0xE3380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0xA44C0003,
+ 0x81C, 0xA34E0003,
+ 0x81C, 0xA2500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x475C0003,
+ 0x81C, 0x465E0003,
+ 0x81C, 0x45600003,
+ 0x81C, 0x44620003,
+ 0x81C, 0x43640003,
+ 0x81C, 0x42660003,
+ 0x81C, 0x41680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xE4340003,
+ 0x81C, 0xE3360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xA9420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0xA44C0003,
+ 0x81C, 0xA34E0003,
+ 0x81C, 0x66500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x49580003,
+ 0x81C, 0x485A0003,
+ 0x81C, 0x475C0003,
+ 0x81C, 0x465E0003,
+ 0x81C, 0x45600003,
+ 0x81C, 0x44620003,
+ 0x81C, 0x43640003,
+ 0x81C, 0x42660003,
+ 0x81C, 0x41680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xDF000003,
+ 0x81C, 0xDF020003,
+ 0x81C, 0xDF040003,
+ 0x81C, 0xDE060003,
+ 0x81C, 0xDD080003,
+ 0x81C, 0xDC0A0003,
+ 0x81C, 0xDB0C0003,
+ 0x81C, 0xDA0E0003,
+ 0x81C, 0xD9100003,
+ 0x81C, 0xD8120003,
+ 0x81C, 0xD7140003,
+ 0x81C, 0xD6160003,
+ 0x81C, 0xD5180003,
+ 0x81C, 0xD41A0003,
+ 0x81C, 0xD31C0003,
+ 0x81C, 0xD21E0003,
+ 0x81C, 0xD1200003,
+ 0x81C, 0xD0220003,
+ 0x81C, 0xCF240003,
+ 0x81C, 0xCE260003,
+ 0x81C, 0xCD280003,
+ 0x81C, 0xCC2A0003,
+ 0x81C, 0xCB2C0003,
+ 0x81C, 0xCA2E0003,
+ 0x81C, 0xC9300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xA73A0003,
+ 0x81C, 0xA63C0003,
+ 0x81C, 0xA53E0003,
+ 0x81C, 0xA4400003,
+ 0x81C, 0xA3420003,
+ 0x81C, 0xA2440003,
+ 0x81C, 0x87460003,
+ 0x81C, 0x86480003,
+ 0x81C, 0x854A0003,
+ 0x81C, 0x844C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x445C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x27640003,
+ 0x81C, 0x26660003,
+ 0x81C, 0x25680003,
+ 0x81C, 0x246A0003,
+ 0x81C, 0x236C0003,
+ 0x81C, 0x226E0003,
+ 0x81C, 0x21700003,
+ 0x81C, 0x21720003,
+ 0x81C, 0x21740003,
+ 0x81C, 0x21760003,
+ 0x81C, 0x21780003,
+ 0x81C, 0x217A0003,
+ 0x81C, 0x217C0003,
+ 0x81C, 0x217E0003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE020003,
+ 0x81C, 0xFD040003,
+ 0x81C, 0xFC060003,
+ 0x81C, 0xFB080003,
+ 0x81C, 0xFA0A0003,
+ 0x81C, 0xF90C0003,
+ 0x81C, 0xF80E0003,
+ 0x81C, 0xF7100003,
+ 0x81C, 0xF6120003,
+ 0x81C, 0xF5140003,
+ 0x81C, 0xF4160003,
+ 0x81C, 0xF3180003,
+ 0x81C, 0xF21A0003,
+ 0x81C, 0xF11C0003,
+ 0x81C, 0xF01E0003,
+ 0x81C, 0xEF200003,
+ 0x81C, 0xEE220003,
+ 0x81C, 0xED240003,
+ 0x81C, 0xEC260003,
+ 0x81C, 0xEB280003,
+ 0x81C, 0xEA2A0003,
+ 0x81C, 0xE92C0003,
+ 0x81C, 0xE82E0003,
+ 0x81C, 0xE7300003,
+ 0x81C, 0xE6320003,
+ 0x81C, 0xE5340003,
+ 0x81C, 0xE4360003,
+ 0x81C, 0xE3380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0xA44C0003,
+ 0x81C, 0xA34E0003,
+ 0x81C, 0xA2500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x475C0003,
+ 0x81C, 0x465E0003,
+ 0x81C, 0x45600003,
+ 0x81C, 0x44620003,
+ 0x81C, 0x43640003,
+ 0x81C, 0x42660003,
+ 0x81C, 0x41680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xDF000003,
+ 0x81C, 0xDF020003,
+ 0x81C, 0xDF040003,
+ 0x81C, 0xDE060003,
+ 0x81C, 0xDD080003,
+ 0x81C, 0xDC0A0003,
+ 0x81C, 0xDB0C0003,
+ 0x81C, 0xDA0E0003,
+ 0x81C, 0xD9100003,
+ 0x81C, 0xD8120003,
+ 0x81C, 0xD7140003,
+ 0x81C, 0xD6160003,
+ 0x81C, 0xD5180003,
+ 0x81C, 0xD41A0003,
+ 0x81C, 0xD31C0003,
+ 0x81C, 0xD21E0003,
+ 0x81C, 0xD1200003,
+ 0x81C, 0xD0220003,
+ 0x81C, 0xCF240003,
+ 0x81C, 0xCE260003,
+ 0x81C, 0xCD280003,
+ 0x81C, 0xCC2A0003,
+ 0x81C, 0xCB2C0003,
+ 0x81C, 0xCA2E0003,
+ 0x81C, 0xC9300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xA73A0003,
+ 0x81C, 0xA63C0003,
+ 0x81C, 0xA53E0003,
+ 0x81C, 0xA4400003,
+ 0x81C, 0xA3420003,
+ 0x81C, 0xA2440003,
+ 0x81C, 0x87460003,
+ 0x81C, 0x86480003,
+ 0x81C, 0x854A0003,
+ 0x81C, 0x844C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x445C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x27640003,
+ 0x81C, 0x26660003,
+ 0x81C, 0x25680003,
+ 0x81C, 0x246A0003,
+ 0x81C, 0x236C0003,
+ 0x81C, 0x226E0003,
+ 0x81C, 0x21700003,
+ 0x81C, 0x21720003,
+ 0x81C, 0x21740003,
+ 0x81C, 0x21760003,
+ 0x81C, 0x21780003,
+ 0x81C, 0x217A0003,
+ 0x81C, 0x217C0003,
+ 0x81C, 0x217E0003,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFE020003,
+ 0x81C, 0xFD040003,
+ 0x81C, 0xFC060003,
+ 0x81C, 0xFB080003,
+ 0x81C, 0xFA0A0003,
+ 0x81C, 0xF90C0003,
+ 0x81C, 0xF80E0003,
+ 0x81C, 0xF7100003,
+ 0x81C, 0xF6120003,
+ 0x81C, 0xF5140003,
+ 0x81C, 0xF4160003,
+ 0x81C, 0xF3180003,
+ 0x81C, 0xF21A0003,
+ 0x81C, 0xF11C0003,
+ 0x81C, 0xF01E0003,
+ 0x81C, 0xEF200003,
+ 0x81C, 0xEE220003,
+ 0x81C, 0xED240003,
+ 0x81C, 0x0F260003,
+ 0x81C, 0x0E280003,
+ 0x81C, 0x0D2A0003,
+ 0x81C, 0x0C2C0003,
+ 0x81C, 0x0B2E0003,
+ 0x81C, 0x0A300003,
+ 0x81C, 0x09320003,
+ 0x81C, 0x08340003,
+ 0x81C, 0x07360003,
+ 0x81C, 0x06380003,
+ 0x81C, 0x053A0003,
+ 0x81C, 0x043C0003,
+ 0x81C, 0x033E0003,
+ 0x81C, 0x23400003,
+ 0x81C, 0x22420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0xA44C0003,
+ 0x81C, 0x684E0003,
+ 0x81C, 0x67500003,
+ 0x81C, 0x66520003,
+ 0x81C, 0x65540003,
+ 0x81C, 0x64560003,
+ 0x81C, 0x63580003,
+ 0x81C, 0x625A0003,
+ 0x81C, 0x615C0003,
+ 0x81C, 0x475E0003,
+ 0x81C, 0x46600003,
+ 0x81C, 0x45620003,
+ 0x81C, 0x44640003,
+ 0x81C, 0x43660003,
+ 0x81C, 0x42680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFE020003,
+ 0x81C, 0xFD040003,
+ 0x81C, 0xFC060003,
+ 0x81C, 0xFB080003,
+ 0x81C, 0xFA0A0003,
+ 0x81C, 0xF90C0003,
+ 0x81C, 0xF80E0003,
+ 0x81C, 0xF7100003,
+ 0x81C, 0xF6120003,
+ 0x81C, 0xF5140003,
+ 0x81C, 0xF4160003,
+ 0x81C, 0xF3180003,
+ 0x81C, 0xF21A0003,
+ 0x81C, 0xF11C0003,
+ 0x81C, 0xF01E0003,
+ 0x81C, 0xEF200003,
+ 0x81C, 0xEE220003,
+ 0x81C, 0xED240003,
+ 0x81C, 0xEC260003,
+ 0x81C, 0xEB280003,
+ 0x81C, 0xEA2A0003,
+ 0x81C, 0xE92C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0x08340003,
+ 0x81C, 0x07360003,
+ 0x81C, 0x06380003,
+ 0x81C, 0x053A0003,
+ 0x81C, 0x043C0003,
+ 0x81C, 0x033E0003,
+ 0x81C, 0x02400003,
+ 0x81C, 0xA9420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0x684C0003,
+ 0x81C, 0x674E0003,
+ 0x81C, 0x66500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x475C0003,
+ 0x81C, 0x465E0003,
+ 0x81C, 0x45600003,
+ 0x81C, 0x44620003,
+ 0x81C, 0x43640003,
+ 0x81C, 0x42660003,
+ 0x81C, 0x41680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xCF240003,
+ 0x81C, 0xCE260003,
+ 0x81C, 0xCD280003,
+ 0x81C, 0xCC2A0003,
+ 0x81C, 0xCB2C0003,
+ 0x81C, 0xCA2E0003,
+ 0x81C, 0xC9300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xA63C0003,
+ 0x81C, 0xA53E0003,
+ 0x81C, 0xA4400003,
+ 0x81C, 0xA3420003,
+ 0x81C, 0xA2440003,
+ 0x81C, 0xA1460003,
+ 0x81C, 0x86480003,
+ 0x81C, 0x854A0003,
+ 0x81C, 0x844C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x66500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x27620003,
+ 0x81C, 0x26640003,
+ 0x81C, 0x25660003,
+ 0x81C, 0x24680003,
+ 0x81C, 0x236A0003,
+ 0x81C, 0x226C0003,
+ 0x81C, 0x216E0003,
+ 0x81C, 0x21700003,
+ 0x81C, 0x21720003,
+ 0x81C, 0x21740003,
+ 0x81C, 0x21760003,
+ 0x81C, 0x21780003,
+ 0x81C, 0x217A0003,
+ 0x81C, 0x217C0003,
+ 0x81C, 0x217E0003,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000103,
+ 0x81C, 0xF8020103,
+ 0x81C, 0xF7040103,
+ 0x81C, 0xF6060103,
+ 0x81C, 0xF5080103,
+ 0x81C, 0xF40A0103,
+ 0x81C, 0xF30C0103,
+ 0x81C, 0xF20E0103,
+ 0x81C, 0xF1100103,
+ 0x81C, 0xF0120103,
+ 0x81C, 0xEF140103,
+ 0x81C, 0xEE160103,
+ 0x81C, 0xED180103,
+ 0x81C, 0xEC1A0103,
+ 0x81C, 0xEB1C0103,
+ 0x81C, 0xEA1E0103,
+ 0x81C, 0xE9200103,
+ 0x81C, 0xE8220103,
+ 0x81C, 0xE7240103,
+ 0x81C, 0xE6260103,
+ 0x81C, 0xE5280103,
+ 0x81C, 0xE42A0103,
+ 0x81C, 0xE32C0103,
+ 0x81C, 0xC32E0103,
+ 0x81C, 0xC2300103,
+ 0x81C, 0xC1320103,
+ 0x81C, 0xA5340103,
+ 0x81C, 0xA4360103,
+ 0x81C, 0xA3380103,
+ 0x81C, 0xA23A0103,
+ 0x81C, 0xA13C0103,
+ 0x81C, 0x843E0103,
+ 0x81C, 0x83400103,
+ 0x81C, 0x82420103,
+ 0x81C, 0x81440103,
+ 0x81C, 0x64460103,
+ 0x81C, 0x63480103,
+ 0x81C, 0x624A0103,
+ 0x81C, 0x614C0103,
+ 0x81C, 0x444E0103,
+ 0x81C, 0x43500103,
+ 0x81C, 0x42520103,
+ 0x81C, 0x41540103,
+ 0x81C, 0x25560103,
+ 0x81C, 0x24580103,
+ 0x81C, 0x235A0103,
+ 0x81C, 0x065C0103,
+ 0x81C, 0x055E0103,
+ 0x81C, 0x04600103,
+ 0x81C, 0x03620103,
+ 0x81C, 0x02640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xE22C0103,
+ 0x81C, 0xE12E0103,
+ 0x81C, 0xA5300103,
+ 0x81C, 0xA4320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0x843A0103,
+ 0x81C, 0x833C0103,
+ 0x81C, 0x823E0103,
+ 0x81C, 0x81400103,
+ 0x81C, 0x64420103,
+ 0x81C, 0x63440103,
+ 0x81C, 0x62460103,
+ 0x81C, 0x61480103,
+ 0x81C, 0x454A0103,
+ 0x81C, 0x444C0103,
+ 0x81C, 0x434E0103,
+ 0x81C, 0x42500103,
+ 0x81C, 0x25520103,
+ 0x81C, 0x24540103,
+ 0x81C, 0x23560103,
+ 0x81C, 0x06580103,
+ 0x81C, 0x055A0103,
+ 0x81C, 0x045C0103,
+ 0x81C, 0x035E0103,
+ 0x81C, 0x02600103,
+ 0x81C, 0x01620103,
+ 0x81C, 0x01640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000103,
+ 0x81C, 0xFB020103,
+ 0x81C, 0xFA040103,
+ 0x81C, 0xF9060103,
+ 0x81C, 0xF8080103,
+ 0x81C, 0xF70A0103,
+ 0x81C, 0xF60C0103,
+ 0x81C, 0xF50E0103,
+ 0x81C, 0xF4100103,
+ 0x81C, 0xF3120103,
+ 0x81C, 0xF2140103,
+ 0x81C, 0xF1160103,
+ 0x81C, 0xF0180103,
+ 0x81C, 0xEF1A0103,
+ 0x81C, 0xEE1C0103,
+ 0x81C, 0xED1E0103,
+ 0x81C, 0xEC200103,
+ 0x81C, 0xEB220103,
+ 0x81C, 0xEA240103,
+ 0x81C, 0xE9260103,
+ 0x81C, 0xE8280103,
+ 0x81C, 0xE72A0103,
+ 0x81C, 0xE62C0103,
+ 0x81C, 0xE52E0103,
+ 0x81C, 0xE4300103,
+ 0x81C, 0xE3320103,
+ 0x81C, 0xE2340103,
+ 0x81C, 0xE1360103,
+ 0x81C, 0x87380103,
+ 0x81C, 0x863A0103,
+ 0x81C, 0x853C0103,
+ 0x81C, 0x843E0103,
+ 0x81C, 0x83400103,
+ 0x81C, 0x82420103,
+ 0x81C, 0x81440103,
+ 0x81C, 0x64460103,
+ 0x81C, 0x63480103,
+ 0x81C, 0x624A0103,
+ 0x81C, 0x464C0103,
+ 0x81C, 0x454E0103,
+ 0x81C, 0x44500103,
+ 0x81C, 0x43520103,
+ 0x81C, 0x26540103,
+ 0x81C, 0x25560103,
+ 0x81C, 0x24580103,
+ 0x81C, 0x075A0103,
+ 0x81C, 0x065C0103,
+ 0x81C, 0x055E0103,
+ 0x81C, 0x04600103,
+ 0x81C, 0x03620103,
+ 0x81C, 0x02640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000103,
+ 0x81C, 0xF8020103,
+ 0x81C, 0xF7040103,
+ 0x81C, 0xF6060103,
+ 0x81C, 0xF5080103,
+ 0x81C, 0xF40A0103,
+ 0x81C, 0xF30C0103,
+ 0x81C, 0xF20E0103,
+ 0x81C, 0xF1100103,
+ 0x81C, 0xF0120103,
+ 0x81C, 0xEF140103,
+ 0x81C, 0xEE160103,
+ 0x81C, 0xED180103,
+ 0x81C, 0xEC1A0103,
+ 0x81C, 0xEB1C0103,
+ 0x81C, 0xEA1E0103,
+ 0x81C, 0xE9200103,
+ 0x81C, 0xE8220103,
+ 0x81C, 0xE7240103,
+ 0x81C, 0xE6260103,
+ 0x81C, 0xE5280103,
+ 0x81C, 0xE42A0103,
+ 0x81C, 0xE32C0103,
+ 0x81C, 0xE22E0103,
+ 0x81C, 0xA6300103,
+ 0x81C, 0xA5320103,
+ 0x81C, 0xA4340103,
+ 0x81C, 0xA3360103,
+ 0x81C, 0xA2380103,
+ 0x81C, 0xA13A0103,
+ 0x81C, 0x843C0103,
+ 0x81C, 0x833E0103,
+ 0x81C, 0x82400103,
+ 0x81C, 0x81420103,
+ 0x81C, 0x64440103,
+ 0x81C, 0x63460103,
+ 0x81C, 0x62480103,
+ 0x81C, 0x614A0103,
+ 0x81C, 0x444C0103,
+ 0x81C, 0x434E0103,
+ 0x81C, 0x42500103,
+ 0x81C, 0x41520103,
+ 0x81C, 0x25540103,
+ 0x81C, 0x24560103,
+ 0x81C, 0x23580103,
+ 0x81C, 0x225A0103,
+ 0x81C, 0x055C0103,
+ 0x81C, 0x045E0103,
+ 0x81C, 0x03600103,
+ 0x81C, 0x02620103,
+ 0x81C, 0x01640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000103,
+ 0x81C, 0xFC020103,
+ 0x81C, 0xFB040103,
+ 0x81C, 0xFA060103,
+ 0x81C, 0xF9080103,
+ 0x81C, 0xF80A0103,
+ 0x81C, 0xF70C0103,
+ 0x81C, 0xF60E0103,
+ 0x81C, 0xF5100103,
+ 0x81C, 0xF4120103,
+ 0x81C, 0xF3140103,
+ 0x81C, 0xF2160103,
+ 0x81C, 0xF1180103,
+ 0x81C, 0xF01A0103,
+ 0x81C, 0xEF1C0103,
+ 0x81C, 0xEE1E0103,
+ 0x81C, 0xED200103,
+ 0x81C, 0xEC220103,
+ 0x81C, 0xEB240103,
+ 0x81C, 0xEA260103,
+ 0x81C, 0xE9280103,
+ 0x81C, 0xE82A0103,
+ 0x81C, 0xE72C0103,
+ 0x81C, 0xE62E0103,
+ 0x81C, 0xE5300103,
+ 0x81C, 0xE4320103,
+ 0x81C, 0xE3340103,
+ 0x81C, 0xE2360103,
+ 0x81C, 0xE1380103,
+ 0x81C, 0xA33A0103,
+ 0x81C, 0xA23C0103,
+ 0x81C, 0xA13E0103,
+ 0x81C, 0x84400103,
+ 0x81C, 0x83420103,
+ 0x81C, 0x82440103,
+ 0x81C, 0x81460103,
+ 0x81C, 0x64480103,
+ 0x81C, 0x634A0103,
+ 0x81C, 0x624C0103,
+ 0x81C, 0x614E0103,
+ 0x81C, 0x45500103,
+ 0x81C, 0x44520103,
+ 0x81C, 0x43540103,
+ 0x81C, 0x42560103,
+ 0x81C, 0x25580103,
+ 0x81C, 0x245A0103,
+ 0x81C, 0x235C0103,
+ 0x81C, 0x065E0103,
+ 0x81C, 0x05600103,
+ 0x81C, 0x04620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFA000103,
+ 0x81C, 0xF9020103,
+ 0x81C, 0xF8040103,
+ 0x81C, 0xF7060103,
+ 0x81C, 0xF6080103,
+ 0x81C, 0xF50A0103,
+ 0x81C, 0xF40C0103,
+ 0x81C, 0xF30E0103,
+ 0x81C, 0xF2100103,
+ 0x81C, 0xF1120103,
+ 0x81C, 0xF0140103,
+ 0x81C, 0xEF160103,
+ 0x81C, 0xEE180103,
+ 0x81C, 0xED1A0103,
+ 0x81C, 0xEC1C0103,
+ 0x81C, 0xEB1E0103,
+ 0x81C, 0xEA200103,
+ 0x81C, 0xE9220103,
+ 0x81C, 0xE8240103,
+ 0x81C, 0xE7260103,
+ 0x81C, 0xE6280103,
+ 0x81C, 0xE52A0103,
+ 0x81C, 0xE42C0103,
+ 0x81C, 0xE32E0103,
+ 0x81C, 0xE2300103,
+ 0x81C, 0xE1320103,
+ 0x81C, 0xA5340103,
+ 0x81C, 0xA4360103,
+ 0x81C, 0xA3380103,
+ 0x81C, 0xA23A0103,
+ 0x81C, 0xA13C0103,
+ 0x81C, 0x843E0103,
+ 0x81C, 0x83400103,
+ 0x81C, 0x82420103,
+ 0x81C, 0x81440103,
+ 0x81C, 0x64460103,
+ 0x81C, 0x63480103,
+ 0x81C, 0x624A0103,
+ 0x81C, 0x614C0103,
+ 0x81C, 0x454E0103,
+ 0x81C, 0x44500103,
+ 0x81C, 0x43520103,
+ 0x81C, 0x42540103,
+ 0x81C, 0x41560103,
+ 0x81C, 0x24580103,
+ 0x81C, 0x235A0103,
+ 0x81C, 0x225C0103,
+ 0x81C, 0x055E0103,
+ 0x81C, 0x04600103,
+ 0x81C, 0x03620103,
+ 0x81C, 0x02640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000103,
+ 0x81C, 0xFF020103,
+ 0x81C, 0xFE040103,
+ 0x81C, 0xFD060103,
+ 0x81C, 0xFC080103,
+ 0x81C, 0xFB0A0103,
+ 0x81C, 0xFA0C0103,
+ 0x81C, 0xF90E0103,
+ 0x81C, 0xF8100103,
+ 0x81C, 0xF7120103,
+ 0x81C, 0xF6140103,
+ 0x81C, 0xF5160103,
+ 0x81C, 0xF4180103,
+ 0x81C, 0xF31A0103,
+ 0x81C, 0xF21C0103,
+ 0x81C, 0xF11E0103,
+ 0x81C, 0xF0200103,
+ 0x81C, 0xEF220103,
+ 0x81C, 0xEE240103,
+ 0x81C, 0xED260103,
+ 0x81C, 0xEC280103,
+ 0x81C, 0xEB2A0103,
+ 0x81C, 0xEA2C0103,
+ 0x81C, 0xE92E0103,
+ 0x81C, 0xE8300103,
+ 0x81C, 0xE7320103,
+ 0x81C, 0xE6340103,
+ 0x81C, 0xE5360103,
+ 0x81C, 0xE4380103,
+ 0x81C, 0xE33A0103,
+ 0x81C, 0xA53C0103,
+ 0x81C, 0xA43E0103,
+ 0x81C, 0xA3400103,
+ 0x81C, 0xA2420103,
+ 0x81C, 0xA1440103,
+ 0x81C, 0x85460103,
+ 0x81C, 0x84480103,
+ 0x81C, 0x834A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x64500103,
+ 0x81C, 0x63520103,
+ 0x81C, 0x62540103,
+ 0x81C, 0x44560103,
+ 0x81C, 0x43580103,
+ 0x81C, 0x425A0103,
+ 0x81C, 0x265C0103,
+ 0x81C, 0x255E0103,
+ 0x81C, 0x24600103,
+ 0x81C, 0x07620103,
+ 0x81C, 0x06640103,
+ 0x81C, 0x05660103,
+ 0x81C, 0x04680103,
+ 0x81C, 0x036A0103,
+ 0x81C, 0x026C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xE22C0103,
+ 0x81C, 0xE12E0103,
+ 0x81C, 0xA4300103,
+ 0x81C, 0xA3320103,
+ 0x81C, 0xA2340103,
+ 0x81C, 0xA1360103,
+ 0x81C, 0x85380103,
+ 0x81C, 0x843A0103,
+ 0x81C, 0x833C0103,
+ 0x81C, 0x823E0103,
+ 0x81C, 0x65400103,
+ 0x81C, 0x64420103,
+ 0x81C, 0x63440103,
+ 0x81C, 0x62460103,
+ 0x81C, 0x45480103,
+ 0x81C, 0x444A0103,
+ 0x81C, 0x434C0103,
+ 0x81C, 0x264E0103,
+ 0x81C, 0x25500103,
+ 0x81C, 0x24520103,
+ 0x81C, 0x08540103,
+ 0x81C, 0x07560103,
+ 0x81C, 0x06580103,
+ 0x81C, 0x055A0103,
+ 0x81C, 0x045C0103,
+ 0x81C, 0x035E0103,
+ 0x81C, 0x02600103,
+ 0x81C, 0x01620103,
+ 0x81C, 0x01640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000103,
+ 0x81C, 0xFF020103,
+ 0x81C, 0xFE040103,
+ 0x81C, 0xFD060103,
+ 0x81C, 0xFC080103,
+ 0x81C, 0xFB0A0103,
+ 0x81C, 0xFA0C0103,
+ 0x81C, 0xF90E0103,
+ 0x81C, 0xF8100103,
+ 0x81C, 0xF7120103,
+ 0x81C, 0xF6140103,
+ 0x81C, 0xF5160103,
+ 0x81C, 0xF4180103,
+ 0x81C, 0xF31A0103,
+ 0x81C, 0xF21C0103,
+ 0x81C, 0xF11E0103,
+ 0x81C, 0xF0200103,
+ 0x81C, 0xEF220103,
+ 0x81C, 0xEE240103,
+ 0x81C, 0xED260103,
+ 0x81C, 0xEC280103,
+ 0x81C, 0xEB2A0103,
+ 0x81C, 0xEA2C0103,
+ 0x81C, 0xE92E0103,
+ 0x81C, 0xE8300103,
+ 0x81C, 0xE7320103,
+ 0x81C, 0xE6340103,
+ 0x81C, 0xE5360103,
+ 0x81C, 0xE4380103,
+ 0x81C, 0xE33A0103,
+ 0x81C, 0xA53C0103,
+ 0x81C, 0xA43E0103,
+ 0x81C, 0xA3400103,
+ 0x81C, 0xA2420103,
+ 0x81C, 0xA1440103,
+ 0x81C, 0x85460103,
+ 0x81C, 0x84480103,
+ 0x81C, 0x834A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x64500103,
+ 0x81C, 0x63520103,
+ 0x81C, 0x62540103,
+ 0x81C, 0x44560103,
+ 0x81C, 0x43580103,
+ 0x81C, 0x425A0103,
+ 0x81C, 0x265C0103,
+ 0x81C, 0x255E0103,
+ 0x81C, 0x24600103,
+ 0x81C, 0x07620103,
+ 0x81C, 0x06640103,
+ 0x81C, 0x05660103,
+ 0x81C, 0x04680103,
+ 0x81C, 0x036A0103,
+ 0x81C, 0x026C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000103,
+ 0x81C, 0xF8020103,
+ 0x81C, 0xF7040103,
+ 0x81C, 0xF6060103,
+ 0x81C, 0xF5080103,
+ 0x81C, 0xF40A0103,
+ 0x81C, 0xF30C0103,
+ 0x81C, 0xF20E0103,
+ 0x81C, 0xF1100103,
+ 0x81C, 0xF0120103,
+ 0x81C, 0xEF140103,
+ 0x81C, 0xEE160103,
+ 0x81C, 0xED180103,
+ 0x81C, 0xEC1A0103,
+ 0x81C, 0xEB1C0103,
+ 0x81C, 0xEA1E0103,
+ 0x81C, 0xE9200103,
+ 0x81C, 0xE8220103,
+ 0x81C, 0xE7240103,
+ 0x81C, 0xE6260103,
+ 0x81C, 0xE5280103,
+ 0x81C, 0xE42A0103,
+ 0x81C, 0xE32C0103,
+ 0x81C, 0xE22E0103,
+ 0x81C, 0xA6300103,
+ 0x81C, 0xA5320103,
+ 0x81C, 0xA4340103,
+ 0x81C, 0xA3360103,
+ 0x81C, 0xA2380103,
+ 0x81C, 0xA13A0103,
+ 0x81C, 0x843C0103,
+ 0x81C, 0x833E0103,
+ 0x81C, 0x82400103,
+ 0x81C, 0x81420103,
+ 0x81C, 0x64440103,
+ 0x81C, 0x63460103,
+ 0x81C, 0x62480103,
+ 0x81C, 0x614A0103,
+ 0x81C, 0x444C0103,
+ 0x81C, 0x434E0103,
+ 0x81C, 0x42500103,
+ 0x81C, 0x41520103,
+ 0x81C, 0x25540103,
+ 0x81C, 0x24560103,
+ 0x81C, 0x23580103,
+ 0x81C, 0x225A0103,
+ 0x81C, 0x055C0103,
+ 0x81C, 0x045E0103,
+ 0x81C, 0x03600103,
+ 0x81C, 0x02620103,
+ 0x81C, 0x01640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFA000103,
+ 0x81C, 0xF9020103,
+ 0x81C, 0xF8040103,
+ 0x81C, 0xF7060103,
+ 0x81C, 0xF6080103,
+ 0x81C, 0xF50A0103,
+ 0x81C, 0xF40C0103,
+ 0x81C, 0xF30E0103,
+ 0x81C, 0xF2100103,
+ 0x81C, 0xF1120103,
+ 0x81C, 0xF0140103,
+ 0x81C, 0xEF160103,
+ 0x81C, 0xEE180103,
+ 0x81C, 0xED1A0103,
+ 0x81C, 0xCC1C0103,
+ 0x81C, 0xCB1E0103,
+ 0x81C, 0xCA200103,
+ 0x81C, 0xE9220103,
+ 0x81C, 0xE8240103,
+ 0x81C, 0xE7260103,
+ 0x81C, 0xE6280103,
+ 0x81C, 0xE42A0103,
+ 0x81C, 0xE32C0103,
+ 0x81C, 0xE22E0103,
+ 0x81C, 0xA7300103,
+ 0x81C, 0xA6320103,
+ 0x81C, 0xA5340103,
+ 0x81C, 0xA4360103,
+ 0x81C, 0xA3380103,
+ 0x81C, 0xA23A0103,
+ 0x81C, 0xA13C0103,
+ 0x81C, 0x843E0103,
+ 0x81C, 0x83400103,
+ 0x81C, 0x82420103,
+ 0x81C, 0x65440103,
+ 0x81C, 0x64460103,
+ 0x81C, 0x63480103,
+ 0x81C, 0x624A0103,
+ 0x81C, 0x614C0103,
+ 0x81C, 0x444E0103,
+ 0x81C, 0x43500103,
+ 0x81C, 0x42520103,
+ 0x81C, 0x41540103,
+ 0x81C, 0x24560103,
+ 0x81C, 0x23580103,
+ 0x81C, 0x055A0103,
+ 0x81C, 0x045C0103,
+ 0x81C, 0x035E0103,
+ 0x81C, 0x02600103,
+ 0x81C, 0x01620103,
+ 0x81C, 0x01640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFF000103,
+ 0x81C, 0xFE020103,
+ 0x81C, 0xFD040103,
+ 0x81C, 0xFC060103,
+ 0x81C, 0xFB080103,
+ 0x81C, 0xFA0A0103,
+ 0x81C, 0xF90C0103,
+ 0x81C, 0xF80E0103,
+ 0x81C, 0xF7100103,
+ 0x81C, 0xF6120103,
+ 0x81C, 0xF5140103,
+ 0x81C, 0xF4160103,
+ 0x81C, 0xF3180103,
+ 0x81C, 0xF21A0103,
+ 0x81C, 0xF11C0103,
+ 0x81C, 0xF01E0103,
+ 0x81C, 0xEF200103,
+ 0x81C, 0xEE220103,
+ 0x81C, 0xED240103,
+ 0x81C, 0xEC260103,
+ 0x81C, 0xEB280103,
+ 0x81C, 0xEA2A0103,
+ 0x81C, 0xE92C0103,
+ 0x81C, 0xE82E0103,
+ 0x81C, 0xE7300103,
+ 0x81C, 0xE6320103,
+ 0x81C, 0xE5340103,
+ 0x81C, 0xE4360103,
+ 0x81C, 0xE3380103,
+ 0x81C, 0xE23A0103,
+ 0x81C, 0xE13C0103,
+ 0x81C, 0xA43E0103,
+ 0x81C, 0xA3400103,
+ 0x81C, 0xA2420103,
+ 0x81C, 0xA1440103,
+ 0x81C, 0x86460103,
+ 0x81C, 0x85480103,
+ 0x81C, 0x844A0103,
+ 0x81C, 0x834C0103,
+ 0x81C, 0x824E0103,
+ 0x81C, 0x81500103,
+ 0x81C, 0x64520103,
+ 0x81C, 0x63540103,
+ 0x81C, 0x62560103,
+ 0x81C, 0x61580103,
+ 0x81C, 0x435A0103,
+ 0x81C, 0x425C0103,
+ 0x81C, 0x415E0103,
+ 0x81C, 0x25600103,
+ 0x81C, 0x24620103,
+ 0x81C, 0x06640103,
+ 0x81C, 0x05660103,
+ 0x81C, 0x04680103,
+ 0x81C, 0x036A0103,
+ 0x81C, 0x026C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFA000203,
+ 0x81C, 0xF9020203,
+ 0x81C, 0xF8040203,
+ 0x81C, 0xF7060203,
+ 0x81C, 0xF6080203,
+ 0x81C, 0xF50A0203,
+ 0x81C, 0xF40C0203,
+ 0x81C, 0xF30E0203,
+ 0x81C, 0xF2100203,
+ 0x81C, 0xF1120203,
+ 0x81C, 0xF0140203,
+ 0x81C, 0xEF160203,
+ 0x81C, 0xEE180203,
+ 0x81C, 0xED1A0203,
+ 0x81C, 0xEC1C0203,
+ 0x81C, 0xEB1E0203,
+ 0x81C, 0xEA200203,
+ 0x81C, 0xE9220203,
+ 0x81C, 0xE8240203,
+ 0x81C, 0xE7260203,
+ 0x81C, 0xE6280203,
+ 0x81C, 0xE52A0203,
+ 0x81C, 0xE42C0203,
+ 0x81C, 0xE32E0203,
+ 0x81C, 0xE2300203,
+ 0x81C, 0xE1320203,
+ 0x81C, 0xA5340203,
+ 0x81C, 0xA4360203,
+ 0x81C, 0xA3380203,
+ 0x81C, 0xA23A0203,
+ 0x81C, 0xA13C0203,
+ 0x81C, 0x843E0203,
+ 0x81C, 0x83400203,
+ 0x81C, 0x82420203,
+ 0x81C, 0x81440203,
+ 0x81C, 0x63460203,
+ 0x81C, 0x62480203,
+ 0x81C, 0x614A0203,
+ 0x81C, 0x464C0203,
+ 0x81C, 0x454E0203,
+ 0x81C, 0x44500203,
+ 0x81C, 0x43520203,
+ 0x81C, 0x42540203,
+ 0x81C, 0x41560203,
+ 0x81C, 0x24580203,
+ 0x81C, 0x235A0203,
+ 0x81C, 0x065C0203,
+ 0x81C, 0x055E0203,
+ 0x81C, 0x04600203,
+ 0x81C, 0x03620203,
+ 0x81C, 0x02640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000203,
+ 0x81C, 0xF7020203,
+ 0x81C, 0xF6040203,
+ 0x81C, 0xF5060203,
+ 0x81C, 0xF4080203,
+ 0x81C, 0xF30A0203,
+ 0x81C, 0xF20C0203,
+ 0x81C, 0xF10E0203,
+ 0x81C, 0xF0100203,
+ 0x81C, 0xEF120203,
+ 0x81C, 0xEE140203,
+ 0x81C, 0xED160203,
+ 0x81C, 0xEC180203,
+ 0x81C, 0xEB1A0203,
+ 0x81C, 0xEA1C0203,
+ 0x81C, 0xE91E0203,
+ 0x81C, 0xE8200203,
+ 0x81C, 0xE7220203,
+ 0x81C, 0xE6240203,
+ 0x81C, 0xE5260203,
+ 0x81C, 0xE4280203,
+ 0x81C, 0xE32A0203,
+ 0x81C, 0xE22C0203,
+ 0x81C, 0xE12E0203,
+ 0x81C, 0xA6300203,
+ 0x81C, 0xA5320203,
+ 0x81C, 0xA4340203,
+ 0x81C, 0xA3360203,
+ 0x81C, 0xA2380203,
+ 0x81C, 0x853A0203,
+ 0x81C, 0x843C0203,
+ 0x81C, 0x833E0203,
+ 0x81C, 0x82400203,
+ 0x81C, 0x81420203,
+ 0x81C, 0x64440203,
+ 0x81C, 0x63460203,
+ 0x81C, 0x62480203,
+ 0x81C, 0x614A0203,
+ 0x81C, 0x444C0203,
+ 0x81C, 0x434E0203,
+ 0x81C, 0x42500203,
+ 0x81C, 0x25520203,
+ 0x81C, 0x24540203,
+ 0x81C, 0x23560203,
+ 0x81C, 0x06580203,
+ 0x81C, 0x055A0203,
+ 0x81C, 0x045C0203,
+ 0x81C, 0x035E0203,
+ 0x81C, 0x02600203,
+ 0x81C, 0x01620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xE1360203,
+ 0x81C, 0x87380203,
+ 0x81C, 0x863A0203,
+ 0x81C, 0x853C0203,
+ 0x81C, 0x843E0203,
+ 0x81C, 0x83400203,
+ 0x81C, 0x82420203,
+ 0x81C, 0x81440203,
+ 0x81C, 0x64460203,
+ 0x81C, 0x63480203,
+ 0x81C, 0x624A0203,
+ 0x81C, 0x474C0203,
+ 0x81C, 0x464E0203,
+ 0x81C, 0x45500203,
+ 0x81C, 0x44520203,
+ 0x81C, 0x43540203,
+ 0x81C, 0x42560203,
+ 0x81C, 0x24580203,
+ 0x81C, 0x235A0203,
+ 0x81C, 0x075C0203,
+ 0x81C, 0x065E0203,
+ 0x81C, 0x05600203,
+ 0x81C, 0x04620203,
+ 0x81C, 0x03640203,
+ 0x81C, 0x02660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000203,
+ 0x81C, 0xF7020203,
+ 0x81C, 0xF6040203,
+ 0x81C, 0xF5060203,
+ 0x81C, 0xF4080203,
+ 0x81C, 0xF30A0203,
+ 0x81C, 0xF20C0203,
+ 0x81C, 0xF10E0203,
+ 0x81C, 0xF0100203,
+ 0x81C, 0xEF120203,
+ 0x81C, 0xEE140203,
+ 0x81C, 0xED160203,
+ 0x81C, 0xEC180203,
+ 0x81C, 0xEB1A0203,
+ 0x81C, 0xEA1C0203,
+ 0x81C, 0xE91E0203,
+ 0x81C, 0xE8200203,
+ 0x81C, 0xE7220203,
+ 0x81C, 0xE6240203,
+ 0x81C, 0xE5260203,
+ 0x81C, 0xE4280203,
+ 0x81C, 0xE32A0203,
+ 0x81C, 0xE22C0203,
+ 0x81C, 0xE12E0203,
+ 0x81C, 0xA6300203,
+ 0x81C, 0xA5320203,
+ 0x81C, 0xA4340203,
+ 0x81C, 0xA3360203,
+ 0x81C, 0xA2380203,
+ 0x81C, 0xA13A0203,
+ 0x81C, 0x843C0203,
+ 0x81C, 0x833E0203,
+ 0x81C, 0x82400203,
+ 0x81C, 0x81420203,
+ 0x81C, 0x64440203,
+ 0x81C, 0x63460203,
+ 0x81C, 0x62480203,
+ 0x81C, 0x614A0203,
+ 0x81C, 0x444C0203,
+ 0x81C, 0x434E0203,
+ 0x81C, 0x42500203,
+ 0x81C, 0x41520203,
+ 0x81C, 0x25540203,
+ 0x81C, 0x24560203,
+ 0x81C, 0x23580203,
+ 0x81C, 0x065A0203,
+ 0x81C, 0x055C0203,
+ 0x81C, 0x045E0203,
+ 0x81C, 0x03600203,
+ 0x81C, 0x02620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000203,
+ 0x81C, 0xFA020203,
+ 0x81C, 0xF9040203,
+ 0x81C, 0xF8060203,
+ 0x81C, 0xF7080203,
+ 0x81C, 0xF60A0203,
+ 0x81C, 0xF50C0203,
+ 0x81C, 0xF40E0203,
+ 0x81C, 0xF3100203,
+ 0x81C, 0xF2120203,
+ 0x81C, 0xF1140203,
+ 0x81C, 0xF0160203,
+ 0x81C, 0xEF180203,
+ 0x81C, 0xEE1A0203,
+ 0x81C, 0xED1C0203,
+ 0x81C, 0xEC1E0203,
+ 0x81C, 0xEB200203,
+ 0x81C, 0xEA220203,
+ 0x81C, 0xE9240203,
+ 0x81C, 0xE8260203,
+ 0x81C, 0xE7280203,
+ 0x81C, 0xE62A0203,
+ 0x81C, 0xE52C0203,
+ 0x81C, 0xE42E0203,
+ 0x81C, 0xE3300203,
+ 0x81C, 0xE2320203,
+ 0x81C, 0xE1340203,
+ 0x81C, 0xA5360203,
+ 0x81C, 0xA4380203,
+ 0x81C, 0xA33A0203,
+ 0x81C, 0xA23C0203,
+ 0x81C, 0x843E0203,
+ 0x81C, 0x83400203,
+ 0x81C, 0x82420203,
+ 0x81C, 0x81440203,
+ 0x81C, 0x64460203,
+ 0x81C, 0x63480203,
+ 0x81C, 0x624A0203,
+ 0x81C, 0x614C0203,
+ 0x81C, 0x474E0203,
+ 0x81C, 0x46500203,
+ 0x81C, 0x45520203,
+ 0x81C, 0x44540203,
+ 0x81C, 0x43560203,
+ 0x81C, 0x25580203,
+ 0x81C, 0x245A0203,
+ 0x81C, 0x235C0203,
+ 0x81C, 0x075E0203,
+ 0x81C, 0x06600203,
+ 0x81C, 0x05620203,
+ 0x81C, 0x04640203,
+ 0x81C, 0x03660203,
+ 0x81C, 0x02680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xE1360203,
+ 0x81C, 0xA5380203,
+ 0x81C, 0xA43A0203,
+ 0x81C, 0xA33C0203,
+ 0x81C, 0x853E0203,
+ 0x81C, 0x84400203,
+ 0x81C, 0x83420203,
+ 0x81C, 0x82440203,
+ 0x81C, 0x81460203,
+ 0x81C, 0x64480203,
+ 0x81C, 0x634A0203,
+ 0x81C, 0x624C0203,
+ 0x81C, 0x614E0203,
+ 0x81C, 0x46500203,
+ 0x81C, 0x45520203,
+ 0x81C, 0x44540203,
+ 0x81C, 0x43560203,
+ 0x81C, 0x25580203,
+ 0x81C, 0x245A0203,
+ 0x81C, 0x235C0203,
+ 0x81C, 0x075E0203,
+ 0x81C, 0x06600203,
+ 0x81C, 0x05620203,
+ 0x81C, 0x04640203,
+ 0x81C, 0x03660203,
+ 0x81C, 0x02680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000203,
+ 0x81C, 0xFF020203,
+ 0x81C, 0xFE040203,
+ 0x81C, 0xFD060203,
+ 0x81C, 0xFC080203,
+ 0x81C, 0xFB0A0203,
+ 0x81C, 0xFA0C0203,
+ 0x81C, 0xF90E0203,
+ 0x81C, 0xF8100203,
+ 0x81C, 0xF7120203,
+ 0x81C, 0xF6140203,
+ 0x81C, 0xF5160203,
+ 0x81C, 0xF4180203,
+ 0x81C, 0xF31A0203,
+ 0x81C, 0xF21C0203,
+ 0x81C, 0xF11E0203,
+ 0x81C, 0xF0200203,
+ 0x81C, 0xEF220203,
+ 0x81C, 0xEE240203,
+ 0x81C, 0xED260203,
+ 0x81C, 0xEC280203,
+ 0x81C, 0xEB2A0203,
+ 0x81C, 0xEA2C0203,
+ 0x81C, 0xE92E0203,
+ 0x81C, 0xE8300203,
+ 0x81C, 0xE7320203,
+ 0x81C, 0xE6340203,
+ 0x81C, 0xE5360203,
+ 0x81C, 0xE4380203,
+ 0x81C, 0xE33A0203,
+ 0x81C, 0xE23C0203,
+ 0x81C, 0xE13E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x84480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x45580203,
+ 0x81C, 0x445A0203,
+ 0x81C, 0x435C0203,
+ 0x81C, 0x425E0203,
+ 0x81C, 0x24600203,
+ 0x81C, 0x23620203,
+ 0x81C, 0x07640203,
+ 0x81C, 0x06660203,
+ 0x81C, 0x05680203,
+ 0x81C, 0x046A0203,
+ 0x81C, 0x036C0203,
+ 0x81C, 0x026E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000203,
+ 0x81C, 0xF6020203,
+ 0x81C, 0xF5040203,
+ 0x81C, 0xF4060203,
+ 0x81C, 0xF3080203,
+ 0x81C, 0xF20A0203,
+ 0x81C, 0xF10C0203,
+ 0x81C, 0xF00E0203,
+ 0x81C, 0xEF100203,
+ 0x81C, 0xEE120203,
+ 0x81C, 0xED140203,
+ 0x81C, 0xEC160203,
+ 0x81C, 0xEB180203,
+ 0x81C, 0xEA1A0203,
+ 0x81C, 0xE91C0203,
+ 0x81C, 0xE81E0203,
+ 0x81C, 0xE7200203,
+ 0x81C, 0xE6220203,
+ 0x81C, 0xE5240203,
+ 0x81C, 0xE4260203,
+ 0x81C, 0xE3280203,
+ 0x81C, 0xE22A0203,
+ 0x81C, 0xA62C0203,
+ 0x81C, 0xA52E0203,
+ 0x81C, 0xA4300203,
+ 0x81C, 0xA3320203,
+ 0x81C, 0xA2340203,
+ 0x81C, 0xA1360203,
+ 0x81C, 0x86380203,
+ 0x81C, 0x853A0203,
+ 0x81C, 0x843C0203,
+ 0x81C, 0x833E0203,
+ 0x81C, 0x65400203,
+ 0x81C, 0x64420203,
+ 0x81C, 0x63440203,
+ 0x81C, 0x46460203,
+ 0x81C, 0x45480203,
+ 0x81C, 0x444A0203,
+ 0x81C, 0x434C0203,
+ 0x81C, 0x264E0203,
+ 0x81C, 0x25500203,
+ 0x81C, 0x24520203,
+ 0x81C, 0x08540203,
+ 0x81C, 0x07560203,
+ 0x81C, 0x06580203,
+ 0x81C, 0x055A0203,
+ 0x81C, 0x045C0203,
+ 0x81C, 0x035E0203,
+ 0x81C, 0x02600203,
+ 0x81C, 0x01620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000203,
+ 0x81C, 0xFF020203,
+ 0x81C, 0xFE040203,
+ 0x81C, 0xFD060203,
+ 0x81C, 0xFC080203,
+ 0x81C, 0xFB0A0203,
+ 0x81C, 0xFA0C0203,
+ 0x81C, 0xF90E0203,
+ 0x81C, 0xF8100203,
+ 0x81C, 0xF7120203,
+ 0x81C, 0xF6140203,
+ 0x81C, 0xF5160203,
+ 0x81C, 0xF4180203,
+ 0x81C, 0xF31A0203,
+ 0x81C, 0xF21C0203,
+ 0x81C, 0xF11E0203,
+ 0x81C, 0xF0200203,
+ 0x81C, 0xEF220203,
+ 0x81C, 0xEE240203,
+ 0x81C, 0xED260203,
+ 0x81C, 0xEC280203,
+ 0x81C, 0xEB2A0203,
+ 0x81C, 0xEA2C0203,
+ 0x81C, 0xE92E0203,
+ 0x81C, 0xE8300203,
+ 0x81C, 0xE7320203,
+ 0x81C, 0xE6340203,
+ 0x81C, 0xE5360203,
+ 0x81C, 0xE4380203,
+ 0x81C, 0xE33A0203,
+ 0x81C, 0xE23C0203,
+ 0x81C, 0xE13E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x84480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x45580203,
+ 0x81C, 0x445A0203,
+ 0x81C, 0x435C0203,
+ 0x81C, 0x425E0203,
+ 0x81C, 0x24600203,
+ 0x81C, 0x23620203,
+ 0x81C, 0x07640203,
+ 0x81C, 0x06660203,
+ 0x81C, 0x05680203,
+ 0x81C, 0x046A0203,
+ 0x81C, 0x036C0203,
+ 0x81C, 0x026E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000203,
+ 0x81C, 0xF7020203,
+ 0x81C, 0xF6040203,
+ 0x81C, 0xF5060203,
+ 0x81C, 0xF4080203,
+ 0x81C, 0xF30A0203,
+ 0x81C, 0xF20C0203,
+ 0x81C, 0xF10E0203,
+ 0x81C, 0xF0100203,
+ 0x81C, 0xEF120203,
+ 0x81C, 0xEE140203,
+ 0x81C, 0xED160203,
+ 0x81C, 0xEC180203,
+ 0x81C, 0xEB1A0203,
+ 0x81C, 0xEA1C0203,
+ 0x81C, 0xE91E0203,
+ 0x81C, 0xE8200203,
+ 0x81C, 0xE7220203,
+ 0x81C, 0xE6240203,
+ 0x81C, 0xE5260203,
+ 0x81C, 0xE4280203,
+ 0x81C, 0xE32A0203,
+ 0x81C, 0xE22C0203,
+ 0x81C, 0xE12E0203,
+ 0x81C, 0xA6300203,
+ 0x81C, 0xA5320203,
+ 0x81C, 0xA4340203,
+ 0x81C, 0xA3360203,
+ 0x81C, 0xA2380203,
+ 0x81C, 0xA13A0203,
+ 0x81C, 0x843C0203,
+ 0x81C, 0x833E0203,
+ 0x81C, 0x82400203,
+ 0x81C, 0x81420203,
+ 0x81C, 0x64440203,
+ 0x81C, 0x63460203,
+ 0x81C, 0x62480203,
+ 0x81C, 0x614A0203,
+ 0x81C, 0x444C0203,
+ 0x81C, 0x434E0203,
+ 0x81C, 0x42500203,
+ 0x81C, 0x41520203,
+ 0x81C, 0x25540203,
+ 0x81C, 0x24560203,
+ 0x81C, 0x23580203,
+ 0x81C, 0x065A0203,
+ 0x81C, 0x055C0203,
+ 0x81C, 0x045E0203,
+ 0x81C, 0x03600203,
+ 0x81C, 0x02620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000203,
+ 0x81C, 0xF8020203,
+ 0x81C, 0xF7040203,
+ 0x81C, 0xF6060203,
+ 0x81C, 0xF5080203,
+ 0x81C, 0xF40A0203,
+ 0x81C, 0xF30C0203,
+ 0x81C, 0xF20E0203,
+ 0x81C, 0xF1100203,
+ 0x81C, 0xF0120203,
+ 0x81C, 0xEF140203,
+ 0x81C, 0xCE160203,
+ 0x81C, 0xCD180203,
+ 0x81C, 0xCC1A0203,
+ 0x81C, 0xCB1C0203,
+ 0x81C, 0xCA1E0203,
+ 0x81C, 0xC9200203,
+ 0x81C, 0xC8220203,
+ 0x81C, 0xC7240203,
+ 0x81C, 0xC6260203,
+ 0x81C, 0xC5280203,
+ 0x81C, 0xC42A0203,
+ 0x81C, 0xC32C0203,
+ 0x81C, 0xC22E0203,
+ 0x81C, 0xC1300203,
+ 0x81C, 0xA5320203,
+ 0x81C, 0xA4340203,
+ 0x81C, 0xA3360203,
+ 0x81C, 0xA2380203,
+ 0x81C, 0xA13A0203,
+ 0x81C, 0x853C0203,
+ 0x81C, 0x843E0203,
+ 0x81C, 0x83400203,
+ 0x81C, 0x82420203,
+ 0x81C, 0x81440203,
+ 0x81C, 0x64460203,
+ 0x81C, 0x63480203,
+ 0x81C, 0x624A0203,
+ 0x81C, 0x614C0203,
+ 0x81C, 0x444E0203,
+ 0x81C, 0x43500203,
+ 0x81C, 0x42520203,
+ 0x81C, 0x41540203,
+ 0x81C, 0x24560203,
+ 0x81C, 0x23580203,
+ 0x81C, 0x075A0203,
+ 0x81C, 0x065C0203,
+ 0x81C, 0x055E0203,
+ 0x81C, 0x04600203,
+ 0x81C, 0x03620203,
+ 0x81C, 0x02640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFF000203,
+ 0x81C, 0xFF020203,
+ 0x81C, 0xFE040203,
+ 0x81C, 0xFD060203,
+ 0x81C, 0xFC080203,
+ 0x81C, 0xFB0A0203,
+ 0x81C, 0xFA0C0203,
+ 0x81C, 0xF90E0203,
+ 0x81C, 0xF8100203,
+ 0x81C, 0xF7120203,
+ 0x81C, 0xF6140203,
+ 0x81C, 0xF5160203,
+ 0x81C, 0xF4180203,
+ 0x81C, 0xF31A0203,
+ 0x81C, 0xF21C0203,
+ 0x81C, 0xF11E0203,
+ 0x81C, 0xF0200203,
+ 0x81C, 0xEF220203,
+ 0x81C, 0xEE240203,
+ 0x81C, 0xED260203,
+ 0x81C, 0xEC280203,
+ 0x81C, 0xEB2A0203,
+ 0x81C, 0xEA2C0203,
+ 0x81C, 0xE92E0203,
+ 0x81C, 0xE8300203,
+ 0x81C, 0xE7320203,
+ 0x81C, 0xE6340203,
+ 0x81C, 0xE5360203,
+ 0x81C, 0xE4380203,
+ 0x81C, 0xE33A0203,
+ 0x81C, 0xE23C0203,
+ 0x81C, 0xE13E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x85480203,
+ 0x81C, 0x844A0203,
+ 0x81C, 0x834C0203,
+ 0x81C, 0x824E0203,
+ 0x81C, 0x81500203,
+ 0x81C, 0x64520203,
+ 0x81C, 0x63540203,
+ 0x81C, 0x62560203,
+ 0x81C, 0x61580203,
+ 0x81C, 0x445A0203,
+ 0x81C, 0x435C0203,
+ 0x81C, 0x425E0203,
+ 0x81C, 0x25600203,
+ 0x81C, 0x24620203,
+ 0x81C, 0x06640203,
+ 0x81C, 0x05660203,
+ 0x81C, 0x04680203,
+ 0x81C, 0x036A0203,
+ 0x81C, 0x026C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000303,
+ 0x81C, 0xF7020303,
+ 0x81C, 0xF6040303,
+ 0x81C, 0xF5060303,
+ 0x81C, 0xF4080303,
+ 0x81C, 0xF30A0303,
+ 0x81C, 0xF20C0303,
+ 0x81C, 0xF10E0303,
+ 0x81C, 0xF0100303,
+ 0x81C, 0xEF120303,
+ 0x81C, 0xEE140303,
+ 0x81C, 0xED160303,
+ 0x81C, 0xEC180303,
+ 0x81C, 0xEB1A0303,
+ 0x81C, 0xEA1C0303,
+ 0x81C, 0xE91E0303,
+ 0x81C, 0xE8200303,
+ 0x81C, 0xE7220303,
+ 0x81C, 0xE6240303,
+ 0x81C, 0xE5260303,
+ 0x81C, 0xE4280303,
+ 0x81C, 0xE32A0303,
+ 0x81C, 0xE22C0303,
+ 0x81C, 0xE12E0303,
+ 0x81C, 0xA6300303,
+ 0x81C, 0xA5320303,
+ 0x81C, 0xA4340303,
+ 0x81C, 0xA3360303,
+ 0x81C, 0xA2380303,
+ 0x81C, 0xA13A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x82400303,
+ 0x81C, 0x81420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x454C0303,
+ 0x81C, 0x444E0303,
+ 0x81C, 0x43500303,
+ 0x81C, 0x42520303,
+ 0x81C, 0x41540303,
+ 0x81C, 0x24560303,
+ 0x81C, 0x23580303,
+ 0x81C, 0x065A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xE81E0303,
+ 0x81C, 0xE7200303,
+ 0x81C, 0xE6220303,
+ 0x81C, 0xE5240303,
+ 0x81C, 0xE4260303,
+ 0x81C, 0xE3280303,
+ 0x81C, 0xC32A0303,
+ 0x81C, 0xC22C0303,
+ 0x81C, 0xC12E0303,
+ 0x81C, 0xA5300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0x853A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x82400303,
+ 0x81C, 0x81420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x454C0303,
+ 0x81C, 0x444E0303,
+ 0x81C, 0x43500303,
+ 0x81C, 0x25520303,
+ 0x81C, 0x24540303,
+ 0x81C, 0x23560303,
+ 0x81C, 0x06580303,
+ 0x81C, 0x055A0303,
+ 0x81C, 0x045C0303,
+ 0x81C, 0x035E0303,
+ 0x81C, 0x02600303,
+ 0x81C, 0x01620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000303,
+ 0x81C, 0xF8020303,
+ 0x81C, 0xF7040303,
+ 0x81C, 0xF6060303,
+ 0x81C, 0xF5080303,
+ 0x81C, 0xF40A0303,
+ 0x81C, 0xF30C0303,
+ 0x81C, 0xF20E0303,
+ 0x81C, 0xF1100303,
+ 0x81C, 0xF0120303,
+ 0x81C, 0xEF140303,
+ 0x81C, 0xEE160303,
+ 0x81C, 0xED180303,
+ 0x81C, 0xEC1A0303,
+ 0x81C, 0xEB1C0303,
+ 0x81C, 0xEA1E0303,
+ 0x81C, 0xE9200303,
+ 0x81C, 0xE8220303,
+ 0x81C, 0xE7240303,
+ 0x81C, 0xE6260303,
+ 0x81C, 0xE5280303,
+ 0x81C, 0xE42A0303,
+ 0x81C, 0xE32C0303,
+ 0x81C, 0xE22E0303,
+ 0x81C, 0xE1300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0x853A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x82400303,
+ 0x81C, 0x81420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x444C0303,
+ 0x81C, 0x434E0303,
+ 0x81C, 0x42500303,
+ 0x81C, 0x25520303,
+ 0x81C, 0x24540303,
+ 0x81C, 0x23560303,
+ 0x81C, 0x07580303,
+ 0x81C, 0x065A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xE81E0303,
+ 0x81C, 0xE7200303,
+ 0x81C, 0xE6220303,
+ 0x81C, 0xE5240303,
+ 0x81C, 0xE4260303,
+ 0x81C, 0xE3280303,
+ 0x81C, 0xE22A0303,
+ 0x81C, 0xE12C0303,
+ 0x81C, 0xA72E0303,
+ 0x81C, 0xA6300303,
+ 0x81C, 0xA5320303,
+ 0x81C, 0xA4340303,
+ 0x81C, 0xA3360303,
+ 0x81C, 0xA2380303,
+ 0x81C, 0xA13A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x82400303,
+ 0x81C, 0x81420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x454C0303,
+ 0x81C, 0x444E0303,
+ 0x81C, 0x43500303,
+ 0x81C, 0x42520303,
+ 0x81C, 0x41540303,
+ 0x81C, 0x24560303,
+ 0x81C, 0x23580303,
+ 0x81C, 0x065A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000303,
+ 0x81C, 0xFA020303,
+ 0x81C, 0xF9040303,
+ 0x81C, 0xF8060303,
+ 0x81C, 0xF7080303,
+ 0x81C, 0xF60A0303,
+ 0x81C, 0xF50C0303,
+ 0x81C, 0xF40E0303,
+ 0x81C, 0xF3100303,
+ 0x81C, 0xF2120303,
+ 0x81C, 0xF1140303,
+ 0x81C, 0xF0160303,
+ 0x81C, 0xEF180303,
+ 0x81C, 0xEE1A0303,
+ 0x81C, 0xED1C0303,
+ 0x81C, 0xEC1E0303,
+ 0x81C, 0xEB200303,
+ 0x81C, 0xEA220303,
+ 0x81C, 0xE9240303,
+ 0x81C, 0xE8260303,
+ 0x81C, 0xE7280303,
+ 0x81C, 0xE62A0303,
+ 0x81C, 0xE52C0303,
+ 0x81C, 0xE42E0303,
+ 0x81C, 0xE3300303,
+ 0x81C, 0xE2320303,
+ 0x81C, 0xE1340303,
+ 0x81C, 0xC2360303,
+ 0x81C, 0xC1380303,
+ 0x81C, 0xA33A0303,
+ 0x81C, 0xA23C0303,
+ 0x81C, 0x853E0303,
+ 0x81C, 0x84400303,
+ 0x81C, 0x83420303,
+ 0x81C, 0x66440303,
+ 0x81C, 0x65460303,
+ 0x81C, 0x64480303,
+ 0x81C, 0x634A0303,
+ 0x81C, 0x624C0303,
+ 0x81C, 0x614E0303,
+ 0x81C, 0x45500303,
+ 0x81C, 0x44520303,
+ 0x81C, 0x43540303,
+ 0x81C, 0x42560303,
+ 0x81C, 0x25580303,
+ 0x81C, 0x245A0303,
+ 0x81C, 0x235C0303,
+ 0x81C, 0x065E0303,
+ 0x81C, 0x05600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000303,
+ 0x81C, 0xF8020303,
+ 0x81C, 0xF7040303,
+ 0x81C, 0xF6060303,
+ 0x81C, 0xF5080303,
+ 0x81C, 0xF40A0303,
+ 0x81C, 0xF30C0303,
+ 0x81C, 0xF20E0303,
+ 0x81C, 0xF1100303,
+ 0x81C, 0xF0120303,
+ 0x81C, 0xEF140303,
+ 0x81C, 0xEE160303,
+ 0x81C, 0xED180303,
+ 0x81C, 0xEC1A0303,
+ 0x81C, 0xEB1C0303,
+ 0x81C, 0xEA1E0303,
+ 0x81C, 0xE9200303,
+ 0x81C, 0xE8220303,
+ 0x81C, 0xE7240303,
+ 0x81C, 0xE6260303,
+ 0x81C, 0xE5280303,
+ 0x81C, 0xE42A0303,
+ 0x81C, 0xE32C0303,
+ 0x81C, 0xE22E0303,
+ 0x81C, 0xE1300303,
+ 0x81C, 0xA6320303,
+ 0x81C, 0xA5340303,
+ 0x81C, 0xA4360303,
+ 0x81C, 0xA3380303,
+ 0x81C, 0xA23A0303,
+ 0x81C, 0xA13C0303,
+ 0x81C, 0x853E0303,
+ 0x81C, 0x84400303,
+ 0x81C, 0x83420303,
+ 0x81C, 0x82440303,
+ 0x81C, 0x81460303,
+ 0x81C, 0x64480303,
+ 0x81C, 0x634A0303,
+ 0x81C, 0x624C0303,
+ 0x81C, 0x614E0303,
+ 0x81C, 0x44500303,
+ 0x81C, 0x43520303,
+ 0x81C, 0x42540303,
+ 0x81C, 0x41560303,
+ 0x81C, 0x25580303,
+ 0x81C, 0x245A0303,
+ 0x81C, 0x235C0303,
+ 0x81C, 0x055E0303,
+ 0x81C, 0x04600303,
+ 0x81C, 0x03620303,
+ 0x81C, 0x02640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000303,
+ 0x81C, 0xFD020303,
+ 0x81C, 0xFC040303,
+ 0x81C, 0xFB060303,
+ 0x81C, 0xFA080303,
+ 0x81C, 0xF90A0303,
+ 0x81C, 0xF80C0303,
+ 0x81C, 0xF70E0303,
+ 0x81C, 0xF6100303,
+ 0x81C, 0xF5120303,
+ 0x81C, 0xF4140303,
+ 0x81C, 0xF3160303,
+ 0x81C, 0xF2180303,
+ 0x81C, 0xF11A0303,
+ 0x81C, 0xF01C0303,
+ 0x81C, 0xEF1E0303,
+ 0x81C, 0xEE200303,
+ 0x81C, 0xED220303,
+ 0x81C, 0xEC240303,
+ 0x81C, 0xEB260303,
+ 0x81C, 0xEA280303,
+ 0x81C, 0xE92A0303,
+ 0x81C, 0xE82C0303,
+ 0x81C, 0xE72E0303,
+ 0x81C, 0xE6300303,
+ 0x81C, 0xE5320303,
+ 0x81C, 0xE4340303,
+ 0x81C, 0xE3360303,
+ 0x81C, 0xC3380303,
+ 0x81C, 0xC23A0303,
+ 0x81C, 0xC13C0303,
+ 0x81C, 0xA43E0303,
+ 0x81C, 0xA3400303,
+ 0x81C, 0xA2420303,
+ 0x81C, 0xA1440303,
+ 0x81C, 0x85460303,
+ 0x81C, 0x84480303,
+ 0x81C, 0x834A0303,
+ 0x81C, 0x824C0303,
+ 0x81C, 0x814E0303,
+ 0x81C, 0x64500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x44580303,
+ 0x81C, 0x435A0303,
+ 0x81C, 0x425C0303,
+ 0x81C, 0x265E0303,
+ 0x81C, 0x25600303,
+ 0x81C, 0x24620303,
+ 0x81C, 0x06640303,
+ 0x81C, 0x05660303,
+ 0x81C, 0x04680303,
+ 0x81C, 0x036A0303,
+ 0x81C, 0x026C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xE81E0303,
+ 0x81C, 0xE7200303,
+ 0x81C, 0xE6220303,
+ 0x81C, 0xE5240303,
+ 0x81C, 0xE4260303,
+ 0x81C, 0xE3280303,
+ 0x81C, 0xE22A0303,
+ 0x81C, 0xA62C0303,
+ 0x81C, 0xA52E0303,
+ 0x81C, 0xA4300303,
+ 0x81C, 0xA3320303,
+ 0x81C, 0xA2340303,
+ 0x81C, 0x87360303,
+ 0x81C, 0x86380303,
+ 0x81C, 0x853A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x66400303,
+ 0x81C, 0x65420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x45460303,
+ 0x81C, 0x44480303,
+ 0x81C, 0x434A0303,
+ 0x81C, 0x274C0303,
+ 0x81C, 0x264E0303,
+ 0x81C, 0x25500303,
+ 0x81C, 0x24520303,
+ 0x81C, 0x23540303,
+ 0x81C, 0x08560303,
+ 0x81C, 0x07580303,
+ 0x81C, 0x065A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000303,
+ 0x81C, 0xFD020303,
+ 0x81C, 0xFC040303,
+ 0x81C, 0xFB060303,
+ 0x81C, 0xFA080303,
+ 0x81C, 0xF90A0303,
+ 0x81C, 0xF80C0303,
+ 0x81C, 0xF70E0303,
+ 0x81C, 0xF6100303,
+ 0x81C, 0xF5120303,
+ 0x81C, 0xF4140303,
+ 0x81C, 0xF3160303,
+ 0x81C, 0xF2180303,
+ 0x81C, 0xF11A0303,
+ 0x81C, 0xF01C0303,
+ 0x81C, 0xEF1E0303,
+ 0x81C, 0xEE200303,
+ 0x81C, 0xED220303,
+ 0x81C, 0xEC240303,
+ 0x81C, 0xEB260303,
+ 0x81C, 0xEA280303,
+ 0x81C, 0xE92A0303,
+ 0x81C, 0xE82C0303,
+ 0x81C, 0xE72E0303,
+ 0x81C, 0xE6300303,
+ 0x81C, 0xE5320303,
+ 0x81C, 0xE4340303,
+ 0x81C, 0xE3360303,
+ 0x81C, 0xC3380303,
+ 0x81C, 0xC23A0303,
+ 0x81C, 0xC13C0303,
+ 0x81C, 0xA43E0303,
+ 0x81C, 0xA3400303,
+ 0x81C, 0xA2420303,
+ 0x81C, 0xA1440303,
+ 0x81C, 0x85460303,
+ 0x81C, 0x84480303,
+ 0x81C, 0x834A0303,
+ 0x81C, 0x824C0303,
+ 0x81C, 0x814E0303,
+ 0x81C, 0x64500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x44580303,
+ 0x81C, 0x435A0303,
+ 0x81C, 0x425C0303,
+ 0x81C, 0x265E0303,
+ 0x81C, 0x25600303,
+ 0x81C, 0x24620303,
+ 0x81C, 0x06640303,
+ 0x81C, 0x05660303,
+ 0x81C, 0x04680303,
+ 0x81C, 0x036A0303,
+ 0x81C, 0x026C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xE81E0303,
+ 0x81C, 0xE7200303,
+ 0x81C, 0xE6220303,
+ 0x81C, 0xE5240303,
+ 0x81C, 0xE4260303,
+ 0x81C, 0xE3280303,
+ 0x81C, 0xE22A0303,
+ 0x81C, 0xE12C0303,
+ 0x81C, 0xA72E0303,
+ 0x81C, 0xA6300303,
+ 0x81C, 0xA5320303,
+ 0x81C, 0xA4340303,
+ 0x81C, 0xA3360303,
+ 0x81C, 0xA2380303,
+ 0x81C, 0xA13A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x82400303,
+ 0x81C, 0x81420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x454C0303,
+ 0x81C, 0x444E0303,
+ 0x81C, 0x43500303,
+ 0x81C, 0x42520303,
+ 0x81C, 0x41540303,
+ 0x81C, 0x24560303,
+ 0x81C, 0x23580303,
+ 0x81C, 0x065A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xAF1C0303,
+ 0x81C, 0xAE1E0303,
+ 0x81C, 0xAD200303,
+ 0x81C, 0xAC220303,
+ 0x81C, 0xAB240303,
+ 0x81C, 0xAA260303,
+ 0x81C, 0xC5280303,
+ 0x81C, 0xC42A0303,
+ 0x81C, 0xC32C0303,
+ 0x81C, 0xC22E0303,
+ 0x81C, 0xA5300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0x853A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x82400303,
+ 0x81C, 0x81420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x444C0303,
+ 0x81C, 0x434E0303,
+ 0x81C, 0x42500303,
+ 0x81C, 0x41520303,
+ 0x81C, 0x25540303,
+ 0x81C, 0x24560303,
+ 0x81C, 0x06580303,
+ 0x81C, 0x055A0303,
+ 0x81C, 0x045C0303,
+ 0x81C, 0x035E0303,
+ 0x81C, 0x02600303,
+ 0x81C, 0x01620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFD000303,
+ 0x81C, 0xFC020303,
+ 0x81C, 0xFB040303,
+ 0x81C, 0xFA060303,
+ 0x81C, 0xF9080303,
+ 0x81C, 0xF80A0303,
+ 0x81C, 0xF70C0303,
+ 0x81C, 0xF60E0303,
+ 0x81C, 0xF5100303,
+ 0x81C, 0xF4120303,
+ 0x81C, 0xF3140303,
+ 0x81C, 0xF2160303,
+ 0x81C, 0xF1180303,
+ 0x81C, 0xF01A0303,
+ 0x81C, 0xEF1C0303,
+ 0x81C, 0xEE1E0303,
+ 0x81C, 0xED200303,
+ 0x81C, 0xEC220303,
+ 0x81C, 0xEB240303,
+ 0x81C, 0xEA260303,
+ 0x81C, 0xE9280303,
+ 0x81C, 0xE82A0303,
+ 0x81C, 0xE72C0303,
+ 0x81C, 0xE62E0303,
+ 0x81C, 0xE5300303,
+ 0x81C, 0xE4320303,
+ 0x81C, 0xE3340303,
+ 0x81C, 0xE2360303,
+ 0x81C, 0xE1380303,
+ 0x81C, 0xA53A0303,
+ 0x81C, 0xA43C0303,
+ 0x81C, 0xA33E0303,
+ 0x81C, 0xA2400303,
+ 0x81C, 0xA1420303,
+ 0x81C, 0x87440303,
+ 0x81C, 0x86460303,
+ 0x81C, 0x85480303,
+ 0x81C, 0x844A0303,
+ 0x81C, 0x834C0303,
+ 0x81C, 0x824E0303,
+ 0x81C, 0x81500303,
+ 0x81C, 0x64520303,
+ 0x81C, 0x63540303,
+ 0x81C, 0x62560303,
+ 0x81C, 0x61580303,
+ 0x81C, 0x435A0303,
+ 0x81C, 0x425C0303,
+ 0x81C, 0x415E0303,
+ 0x81C, 0x07600303,
+ 0x81C, 0x06620303,
+ 0x81C, 0x05640303,
+ 0x81C, 0x04660303,
+ 0x81C, 0x03680303,
+ 0x81C, 0x026A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0xB0000000, 0x00000000,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+ 0xE50, 0x00000022,
+ 0xE50, 0x00000020,
+ 0x1850, 0x00000022,
+ 0x1850, 0x00000020,
+ 0x1A50, 0x00000022,
+ 0x1A50, 0x00000020,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8814a_agc, rtw_phy_cfg_agc);
+
+static const u32 rtw8814a_bb[] = {
+ 0x800, 0x9020D010,
+ 0x804, 0x08011280,
+ 0x808, 0x0E0282FF,
+ 0x80C, 0x1000002F,
+ 0x80000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x810, 0x33303265,
+ 0xA0000000, 0x00000000,
+ 0x810, 0x33303265,
+ 0xB0000000, 0x00000000,
+ 0x814, 0x020C3D10,
+ 0x818, 0x04A10385,
+ 0x820, 0x00000000,
+ 0x824, 0x00033E40,
+ 0x828, 0x00000000,
+ 0x82C, 0x73985170,
+ 0x830, 0x79A0EA08,
+ 0x834, 0x042E708A,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x838, 0x86667640,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x838, 0x86667641,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x838, 0x86667641,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x838, 0x86667641,
+ 0xA0000000, 0x00000000,
+ 0x838, 0x86667640,
+ 0xB0000000, 0x00000000,
+ 0x83C, 0x9798B9B9,
+ 0x840, 0x17578F60,
+ 0x844, 0x4BBDFCDE,
+ 0x848, 0x5CD07F8B,
+ 0x84C, 0x6CFBF7B5,
+ 0x850, 0x28834706,
+ 0x854, 0x0001520C,
+ 0x858, 0x4060C000,
+ 0x85C, 0x74210368,
+ 0x860, 0x6929C321,
+ 0x864, 0x79727432,
+ 0x868, 0x8CA7A314,
+ 0x86C, 0x438C2878,
+ 0x870, 0x44444444,
+ 0x874, 0x21612C2E,
+ 0x878, 0x00003152,
+ 0x87C, 0x000FC000,
+ 0x8A0, 0x00000013,
+ 0x8A4, 0x7F7F7F7F,
+ 0x8A8, 0xA202033E,
+ 0x8AC, 0xF40F550A,
+ 0x8B0, 0x00000600,
+ 0x8B4, 0x000FC080,
+ 0x8B8, 0xEC0057FF,
+ 0x8BC, 0x8CA520C3,
+ 0x8C0, 0x3FF00020,
+ 0x8C4, 0x44C00000,
+ 0x80000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x8C8, 0x80025969,
+ 0xA0000000, 0x00000000,
+ 0x8C8, 0x80025167,
+ 0xB0000000, 0x00000000,
+ 0x8CC, 0x08250492,
+ 0x8D0, 0x0000B800,
+ 0x8D4, 0x940008A0,
+ 0x8D8, 0x290B5612,
+ 0x8DC, 0x00000000,
+ 0x8E0, 0x32316407,
+ 0x8E4, 0x4A092925,
+ 0x8E8, 0xFFFFC42C,
+ 0x8EC, 0x99999999,
+ 0x8F0, 0x00009999,
+ 0x8F4, 0x00F80FA1,
+ 0x8F8, 0x400082C0,
+ 0x8FC, 0x00000000,
+ 0x900, 0x00400700,
+ 0x90C, 0x09004000,
+ 0x910, 0x0000FC00,
+ 0x914, 0xD6400404,
+ 0x918, 0x1C1028C0,
+ 0x91C, 0x64B11A1C,
+ 0x920, 0xE0767233,
+ 0x924, 0x055AA500,
+ 0x928, 0x4AB0E4E4,
+ 0x92C, 0xFFFE0000,
+ 0x930, 0xFFFFFFFE,
+ 0x934, 0x001FFFFF,
+ 0x938, 0x00008400,
+ 0x93C, 0x932C0642,
+ 0x940, 0x093E9360,
+ 0x944, 0x08000000,
+ 0x948, 0x04000000,
+ 0x950, 0x02010080,
+ 0x954, 0x86510080,
+ 0x960, 0x00000000,
+ 0x964, 0x00000000,
+ 0x968, 0x00000000,
+ 0x96C, 0x00000000,
+ 0x970, 0x801FFFFF,
+ 0x978, 0x00000000,
+ 0x97C, 0x00000000,
+ 0x980, 0x00000000,
+ 0x984, 0x00000000,
+ 0x988, 0x00000000,
+ 0x98C, 0x03440000,
+ 0x990, 0x27100000,
+ 0x994, 0xFFFF0100,
+ 0x998, 0xFFFFFF5C,
+ 0x99C, 0xFFFFFFFF,
+ 0x9A0, 0x000000FF,
+ 0x9A4, 0x00080080,
+ 0x9A8, 0x0C2F0000,
+ 0x9AC, 0x00560000,
+ 0x9B0, 0x81081008,
+ 0x9B4, 0x00000000,
+ 0x9B8, 0x01081008,
+ 0x9BC, 0x01081008,
+ 0x9D0, 0x00000000,
+ 0x9D4, 0x00000000,
+ 0x9D8, 0x00000000,
+ 0x9DC, 0x00000000,
+ 0x9E4, 0x00000002,
+ 0x9E8, 0x000022D5,
+ 0x9FC, 0xEFFFF7FF,
+ 0xB00, 0xE3100000,
+ 0xB04, 0x0000B000,
+ 0xB0C, 0x31EAA006,
+ 0xB5C, 0x41CFFFFF,
+ 0xC00, 0x00000007,
+ 0xC04, 0x00042020,
+ 0xC08, 0x80410231,
+ 0xC0C, 0x00000000,
+ 0xC10, 0x00000100,
+ 0xC14, 0x01000000,
+ 0xC1C, 0x40000053,
+ 0xC50, 0x00000020,
+ 0xC54, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0xC58, 0x3C0A0C14,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0xC58, 0x3C0A0C14,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0xC58, 0x3C0A0C14,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xC58, 0x3C0A0C14,
+ 0xA0000000, 0x00000000,
+ 0xC58, 0x3C020C14,
+ 0xB0000000, 0x00000000,
+ 0xC5C, 0x0D000058,
+ 0xC60, 0x1B800000,
+ 0xC60, 0x0B800001,
+ 0xC60, 0x05800002,
+ 0xC60, 0x07800003,
+ 0xC60, 0x1A800004,
+ 0xC60, 0x0B800005,
+ 0xC60, 0x05800006,
+ 0xC60, 0x0E800007,
+ 0xC60, 0x1A800008,
+ 0xC60, 0x0B800009,
+ 0xC60, 0x1580000A,
+ 0xC60, 0x0880000B,
+ 0xC60, 0x1A80000C,
+ 0xC60, 0x0B80000D,
+ 0xC60, 0x0580000E,
+ 0xC60, 0x0E80000F,
+ 0xC60, 0x1A800010,
+ 0xC60, 0x0B800011,
+ 0xC60, 0x15800012,
+ 0xC60, 0x08800013,
+ 0xC60, 0x1A800014,
+ 0xC60, 0x0B800015,
+ 0xC60, 0x05800016,
+ 0xC60, 0x07800017,
+ 0xC60, 0x1A800018,
+ 0xC60, 0x0B800019,
+ 0xC60, 0x1580001A,
+ 0xC60, 0x0880001B,
+ 0xC60, 0x1B80001C,
+ 0xC60, 0x0B80001D,
+ 0xC60, 0x0580001E,
+ 0xC60, 0x0780001F,
+ 0xC60, 0x1B800020,
+ 0xC60, 0x0B800021,
+ 0xC60, 0x05800022,
+ 0xC60, 0x07800023,
+ 0xC60, 0x1B800024,
+ 0xC60, 0x0B800025,
+ 0xC60, 0x05800026,
+ 0xC60, 0x07800027,
+ 0xC60, 0x1B800028,
+ 0xC60, 0x0B800029,
+ 0xC60, 0x0580002A,
+ 0xC60, 0x0780002B,
+ 0xC60, 0x1B800030,
+ 0xC60, 0x0B800031,
+ 0xC60, 0x05800032,
+ 0xC60, 0x00800033,
+ 0xC60, 0x1B800034,
+ 0xC60, 0x0B800035,
+ 0xC60, 0x05800036,
+ 0xC60, 0x00800037,
+ 0xC60, 0x1B800038,
+ 0xC60, 0x0B800039,
+ 0xC60, 0x0580003A,
+ 0xC60, 0x0E80803B,
+ 0xC94, 0x01000401,
+ 0xC98, 0x00188000,
+ 0xCA0, 0x00002929,
+ 0xCA4, 0x08040201,
+ 0xCA8, 0x80402010,
+ 0xCAC, 0x77777000,
+ 0xCB0, 0x54775477,
+ 0xCB4, 0x54775477,
+ 0xCB8, 0x00500000,
+ 0xCBC, 0x77700000,
+ 0xCC0, 0x00000010,
+ 0xCC8, 0x00000010,
+ 0xE00, 0x00000007,
+ 0xE04, 0x00042020,
+ 0xE08, 0x80410231,
+ 0xE0C, 0x00000000,
+ 0xE10, 0x00000100,
+ 0xE14, 0x01000000,
+ 0xE1C, 0x40000053,
+ 0xE50, 0x00000020,
+ 0xE54, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0xE58, 0x3C0A0C14,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0xE58, 0x3C0A0C14,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0xE58, 0x3C0A0C14,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xE58, 0x3C0A0C14,
+ 0xA0000000, 0x00000000,
+ 0xE58, 0x3C020C14,
+ 0xB0000000, 0x00000000,
+ 0xE5C, 0x0D000058,
+ 0xE60, 0x1B800000,
+ 0xE60, 0x0B800001,
+ 0xE60, 0x05800002,
+ 0xE60, 0x07800003,
+ 0xE60, 0x1A800004,
+ 0xE60, 0x0B800005,
+ 0xE60, 0x05800006,
+ 0xE60, 0x0E800007,
+ 0xE60, 0x1A800008,
+ 0xE60, 0x0B800009,
+ 0xE60, 0x1580000A,
+ 0xE60, 0x0880000B,
+ 0xE60, 0x1A80000C,
+ 0xE60, 0x0B80000D,
+ 0xE60, 0x0580000E,
+ 0xE60, 0x0E80000F,
+ 0xE60, 0x1A800010,
+ 0xE60, 0x0B800011,
+ 0xE60, 0x15800012,
+ 0xE60, 0x08800013,
+ 0xE60, 0x1A800014,
+ 0xE60, 0x0B800015,
+ 0xE60, 0x05800016,
+ 0xE60, 0x07800017,
+ 0xE60, 0x1A800018,
+ 0xE60, 0x0B800019,
+ 0xE60, 0x1580001A,
+ 0xE60, 0x0880001B,
+ 0xE60, 0x1B80001C,
+ 0xE60, 0x0B80001D,
+ 0xE60, 0x0580001E,
+ 0xE60, 0x0780001F,
+ 0xE60, 0x1B800020,
+ 0xE60, 0x0B800021,
+ 0xE60, 0x05800022,
+ 0xE60, 0x07800023,
+ 0xE60, 0x1B800024,
+ 0xE60, 0x0B800025,
+ 0xE60, 0x05800026,
+ 0xE60, 0x07800027,
+ 0xE60, 0x1B800028,
+ 0xE60, 0x0B800029,
+ 0xE60, 0x0580002A,
+ 0xE60, 0x0780002B,
+ 0xE60, 0x1B800030,
+ 0xE60, 0x0B800031,
+ 0xE60, 0x05800032,
+ 0xE60, 0x00800033,
+ 0xE60, 0x1B800034,
+ 0xE60, 0x0B800035,
+ 0xE60, 0x05800036,
+ 0xE60, 0x00800037,
+ 0xE60, 0x1B800038,
+ 0xE60, 0x0B800039,
+ 0xE60, 0x0580003A,
+ 0xE60, 0x0E80803B,
+ 0xE94, 0x01000401,
+ 0xE98, 0x00188000,
+ 0xEA0, 0x00002929,
+ 0xEA4, 0x08040201,
+ 0xEA8, 0x80402010,
+ 0xEAC, 0x77777000,
+ 0xEB0, 0x54775477,
+ 0xEB4, 0x54775477,
+ 0xEB8, 0x00500000,
+ 0xEBC, 0x77700000,
+ 0x1800, 0x00000007,
+ 0x1804, 0x00042020,
+ 0x1808, 0x80410231,
+ 0x180C, 0x00000000,
+ 0x1810, 0x00000100,
+ 0x1814, 0x01000000,
+ 0x181C, 0x40000053,
+ 0x1850, 0x00000020,
+ 0x1854, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1858, 0x3C0A0C14,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1858, 0x3C0A0C14,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1858, 0x3C0A0C14,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1858, 0x3C0A0C14,
+ 0xA0000000, 0x00000000,
+ 0x1858, 0x3C020C14,
+ 0xB0000000, 0x00000000,
+ 0x185C, 0x0D000058,
+ 0x1860, 0x1B800000,
+ 0x1860, 0x0B800001,
+ 0x1860, 0x05800002,
+ 0x1860, 0x07800003,
+ 0x1860, 0x1A800004,
+ 0x1860, 0x0B800005,
+ 0x1860, 0x05800006,
+ 0x1860, 0x0E800007,
+ 0x1860, 0x1A800008,
+ 0x1860, 0x0B800009,
+ 0x1860, 0x1580000A,
+ 0x1860, 0x0880000B,
+ 0x1860, 0x1A80000C,
+ 0x1860, 0x0B80000D,
+ 0x1860, 0x0580000E,
+ 0x1860, 0x0E80000F,
+ 0x1860, 0x1A800010,
+ 0x1860, 0x0B800011,
+ 0x1860, 0x15800012,
+ 0x1860, 0x08800013,
+ 0x1860, 0x1A800014,
+ 0x1860, 0x0B800015,
+ 0x1860, 0x05800016,
+ 0x1860, 0x07800017,
+ 0x1860, 0x1A800018,
+ 0x1860, 0x0B800019,
+ 0x1860, 0x1580001A,
+ 0x1860, 0x0880001B,
+ 0x1860, 0x1B80001C,
+ 0x1860, 0x0B80001D,
+ 0x1860, 0x0580001E,
+ 0x1860, 0x0780001F,
+ 0x1860, 0x1B800020,
+ 0x1860, 0x0B800021,
+ 0x1860, 0x05800022,
+ 0x1860, 0x07800023,
+ 0x1860, 0x1B800024,
+ 0x1860, 0x0B800025,
+ 0x1860, 0x05800026,
+ 0x1860, 0x07800027,
+ 0x1860, 0x1B800028,
+ 0x1860, 0x0B800029,
+ 0x1860, 0x0580002A,
+ 0x1860, 0x0780002B,
+ 0x1860, 0x1B800030,
+ 0x1860, 0x0B800031,
+ 0x1860, 0x05800032,
+ 0x1860, 0x00800033,
+ 0x1860, 0x1B800034,
+ 0x1860, 0x0B800035,
+ 0x1860, 0x05800036,
+ 0x1860, 0x00800037,
+ 0x1860, 0x1B800038,
+ 0x1860, 0x0B800039,
+ 0x1860, 0x0580003A,
+ 0x1860, 0x0E80803B,
+ 0x1894, 0x01000401,
+ 0x1898, 0x00188000,
+ 0x18A0, 0x00002929,
+ 0x18A4, 0x08040201,
+ 0x18A8, 0x80402010,
+ 0x18AC, 0x77777000,
+ 0x18B0, 0x54775477,
+ 0x18B4, 0x54775477,
+ 0x18B8, 0x00500000,
+ 0x18BC, 0x77700000,
+ 0x1A00, 0x00000007,
+ 0x1A04, 0x00042020,
+ 0x1A08, 0x80410231,
+ 0x1A0C, 0x00000000,
+ 0x1A10, 0x00000100,
+ 0x1A14, 0x01000000,
+ 0x1A1C, 0x40000053,
+ 0x1A50, 0x00000020,
+ 0x1A54, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1A58, 0x3C0A0C14,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1A58, 0x3C0A0C14,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1A58, 0x3C0A0C14,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1A58, 0x3C0A0C14,
+ 0xA0000000, 0x00000000,
+ 0x1A58, 0x3C020C14,
+ 0xB0000000, 0x00000000,
+ 0x1A5C, 0x0D000058,
+ 0x1A60, 0x1B800000,
+ 0x1A60, 0x0B800001,
+ 0x1A60, 0x05800002,
+ 0x1A60, 0x07800003,
+ 0x1A60, 0x1A800004,
+ 0x1A60, 0x0B800005,
+ 0x1A60, 0x05800006,
+ 0x1A60, 0x0E800007,
+ 0x1A60, 0x1A800008,
+ 0x1A60, 0x0B800009,
+ 0x1A60, 0x1580000A,
+ 0x1A60, 0x0880000B,
+ 0x1A60, 0x1A80000C,
+ 0x1A60, 0x0B80000D,
+ 0x1A60, 0x0580000E,
+ 0x1A60, 0x0E80000F,
+ 0x1A60, 0x1A800010,
+ 0x1A60, 0x0B800011,
+ 0x1A60, 0x15800012,
+ 0x1A60, 0x08800013,
+ 0x1A60, 0x1A800014,
+ 0x1A60, 0x0B800015,
+ 0x1A60, 0x05800016,
+ 0x1A60, 0x07800017,
+ 0x1A60, 0x1A800018,
+ 0x1A60, 0x0B800019,
+ 0x1A60, 0x1580001A,
+ 0x1A60, 0x0880001B,
+ 0x1A60, 0x1B80001C,
+ 0x1A60, 0x0B80001D,
+ 0x1A60, 0x0580001E,
+ 0x1A60, 0x0780001F,
+ 0x1A60, 0x1B800020,
+ 0x1A60, 0x0B800021,
+ 0x1A60, 0x05800022,
+ 0x1A60, 0x07800023,
+ 0x1A60, 0x1B800024,
+ 0x1A60, 0x0B800025,
+ 0x1A60, 0x05800026,
+ 0x1A60, 0x07800027,
+ 0x1A60, 0x1B800028,
+ 0x1A60, 0x0B800029,
+ 0x1A60, 0x0580002A,
+ 0x1A60, 0x0780002B,
+ 0x1A60, 0x1B800030,
+ 0x1A60, 0x0B800031,
+ 0x1A60, 0x05800032,
+ 0x1A60, 0x00800033,
+ 0x1A60, 0x1B800034,
+ 0x1A60, 0x0B800035,
+ 0x1A60, 0x05800036,
+ 0x1A60, 0x00800037,
+ 0x1A60, 0x1B800038,
+ 0x1A60, 0x0B800039,
+ 0x1A60, 0x0580003A,
+ 0x1A60, 0x0E80803B,
+ 0x1A94, 0x01000401,
+ 0x1A98, 0x00188000,
+ 0x1AA0, 0x00002929,
+ 0x1AA4, 0x08040201,
+ 0x1AA8, 0x80402010,
+ 0x1AAC, 0x77777000,
+ 0x1AB0, 0x54775477,
+ 0x1AB4, 0x54775477,
+ 0x1AB8, 0x00500000,
+ 0x1ABC, 0x77700000,
+ 0x1904, 0x00030000,
+ 0x1914, 0x00030000,
+ 0x1984, 0x03000000,
+ 0x1988, 0x00000087,
+ 0x198C, 0x00000007,
+ 0x1990, 0xFFAA5500,
+ 0x1994, 0x00000077,
+ 0x1998, 0x12801000,
+ 0x1998, 0x12801000,
+ 0x1998, 0x12801001,
+ 0x1998, 0x12801002,
+ 0x1998, 0x12801003,
+ 0x1998, 0x12801004,
+ 0x1998, 0x12801005,
+ 0x1998, 0x12801006,
+ 0x1998, 0x12801007,
+ 0x1998, 0x12801008,
+ 0x1998, 0x12801009,
+ 0x1998, 0x1280100A,
+ 0x1998, 0x1280100B,
+ 0x1998, 0x1280100C,
+ 0x1998, 0x1280100D,
+ 0x1998, 0x1280100E,
+ 0x1998, 0x1280100F,
+ 0x1998, 0x12801010,
+ 0x1998, 0x12801011,
+ 0x1998, 0x12801012,
+ 0x1998, 0x12801013,
+ 0x1998, 0x12801014,
+ 0x1998, 0x12801015,
+ 0x1998, 0x12801016,
+ 0x1998, 0x12801017,
+ 0x1998, 0x12801018,
+ 0x1998, 0x12801019,
+ 0x1998, 0x1280101A,
+ 0x1998, 0x1280101B,
+ 0x1998, 0x1280101C,
+ 0x1998, 0x1280101D,
+ 0x1998, 0x1280101E,
+ 0x1998, 0x1280101F,
+ 0x1998, 0x12801020,
+ 0x1998, 0x12801021,
+ 0x1998, 0x12801022,
+ 0x1998, 0x12801023,
+ 0x1998, 0x1280102C,
+ 0x1998, 0x1280102D,
+ 0x1998, 0x1280102E,
+ 0x1998, 0x1280102F,
+ 0x1998, 0x12801030,
+ 0x1998, 0x12801031,
+ 0x1998, 0x12801032,
+ 0x1998, 0x12801033,
+ 0x1998, 0x12801034,
+ 0x1998, 0x12801035,
+ 0x1998, 0x12801036,
+ 0x1998, 0x12801037,
+ 0x1998, 0x12801038,
+ 0x1998, 0x12801039,
+ 0x1998, 0x1280103A,
+ 0x1998, 0x1280103B,
+ 0x1998, 0x1280103C,
+ 0x1998, 0x1280103D,
+ 0x1998, 0x1280103E,
+ 0x1998, 0x1280103F,
+ 0x1998, 0x12801040,
+ 0x1998, 0x12801041,
+ 0x1998, 0x12801042,
+ 0x1998, 0x12801043,
+ 0x1998, 0x12801044,
+ 0x1998, 0x12801045,
+ 0x1998, 0x12801046,
+ 0x1998, 0x12801047,
+ 0x1998, 0x12801048,
+ 0x1998, 0x12801049,
+ 0x1998, 0x12801100,
+ 0x1998, 0x12801101,
+ 0x1998, 0x12801102,
+ 0x1998, 0x12801103,
+ 0x1998, 0x12801104,
+ 0x1998, 0x12801105,
+ 0x1998, 0x12801106,
+ 0x1998, 0x12801107,
+ 0x1998, 0x12801108,
+ 0x1998, 0x12801109,
+ 0x1998, 0x1280110A,
+ 0x1998, 0x1280110B,
+ 0x1998, 0x1280110C,
+ 0x1998, 0x1280110D,
+ 0x1998, 0x1280110E,
+ 0x1998, 0x1280110F,
+ 0x1998, 0x12801110,
+ 0x1998, 0x12801111,
+ 0x1998, 0x12801112,
+ 0x1998, 0x12801113,
+ 0x1998, 0x12801114,
+ 0x1998, 0x12801115,
+ 0x1998, 0x12801116,
+ 0x1998, 0x12801117,
+ 0x1998, 0x12801118,
+ 0x1998, 0x12801119,
+ 0x1998, 0x1280111A,
+ 0x1998, 0x1280111B,
+ 0x1998, 0x1280111C,
+ 0x1998, 0x1280111D,
+ 0x1998, 0x1280111E,
+ 0x1998, 0x1280111F,
+ 0x1998, 0x12801120,
+ 0x1998, 0x12801121,
+ 0x1998, 0x12801122,
+ 0x1998, 0x12801123,
+ 0x1998, 0x1280112C,
+ 0x1998, 0x1280112D,
+ 0x1998, 0x1280112E,
+ 0x1998, 0x1280112F,
+ 0x1998, 0x12801130,
+ 0x1998, 0x12801131,
+ 0x1998, 0x12801132,
+ 0x1998, 0x12801133,
+ 0x1998, 0x12801134,
+ 0x1998, 0x12801135,
+ 0x1998, 0x12801136,
+ 0x1998, 0x12801137,
+ 0x1998, 0x12801138,
+ 0x1998, 0x12801139,
+ 0x1998, 0x1280113A,
+ 0x1998, 0x1280113B,
+ 0x1998, 0x1280113C,
+ 0x1998, 0x1280113D,
+ 0x1998, 0x1280113E,
+ 0x1998, 0x1280113F,
+ 0x1998, 0x12801140,
+ 0x1998, 0x12801141,
+ 0x1998, 0x12801142,
+ 0x1998, 0x12801143,
+ 0x1998, 0x12801144,
+ 0x1998, 0x12801145,
+ 0x1998, 0x12801146,
+ 0x1998, 0x12801147,
+ 0x1998, 0x12801148,
+ 0x1998, 0x12801149,
+ 0x1998, 0x12801200,
+ 0x1998, 0x12801201,
+ 0x1998, 0x12801202,
+ 0x1998, 0x12801203,
+ 0x1998, 0x12801204,
+ 0x1998, 0x12801205,
+ 0x1998, 0x12801206,
+ 0x1998, 0x12801207,
+ 0x1998, 0x12801208,
+ 0x1998, 0x12801209,
+ 0x1998, 0x1280120A,
+ 0x1998, 0x1280120B,
+ 0x1998, 0x1280120C,
+ 0x1998, 0x1280120D,
+ 0x1998, 0x1280120E,
+ 0x1998, 0x1280120F,
+ 0x1998, 0x12801210,
+ 0x1998, 0x12801211,
+ 0x1998, 0x12801212,
+ 0x1998, 0x12801213,
+ 0x1998, 0x12801214,
+ 0x1998, 0x12801215,
+ 0x1998, 0x12801216,
+ 0x1998, 0x12801217,
+ 0x1998, 0x12801218,
+ 0x1998, 0x12801219,
+ 0x1998, 0x1280121A,
+ 0x1998, 0x1280121B,
+ 0x1998, 0x1280121C,
+ 0x1998, 0x1280121D,
+ 0x1998, 0x1280121E,
+ 0x1998, 0x1280121F,
+ 0x1998, 0x12801220,
+ 0x1998, 0x12801221,
+ 0x1998, 0x12801222,
+ 0x1998, 0x12801223,
+ 0x1998, 0x1280122C,
+ 0x1998, 0x1280122D,
+ 0x1998, 0x1280122E,
+ 0x1998, 0x1280122F,
+ 0x1998, 0x12801230,
+ 0x1998, 0x12801231,
+ 0x1998, 0x12801232,
+ 0x1998, 0x12801233,
+ 0x1998, 0x12801234,
+ 0x1998, 0x12801235,
+ 0x1998, 0x12801236,
+ 0x1998, 0x12801237,
+ 0x1998, 0x12801238,
+ 0x1998, 0x12801239,
+ 0x1998, 0x1280123A,
+ 0x1998, 0x1280123B,
+ 0x1998, 0x1280123C,
+ 0x1998, 0x1280123D,
+ 0x1998, 0x1280123E,
+ 0x1998, 0x1280123F,
+ 0x1998, 0x12801240,
+ 0x1998, 0x12801241,
+ 0x1998, 0x12801242,
+ 0x1998, 0x12801243,
+ 0x1998, 0x12801244,
+ 0x1998, 0x12801245,
+ 0x1998, 0x12801246,
+ 0x1998, 0x12801247,
+ 0x1998, 0x12801248,
+ 0x1998, 0x12801249,
+ 0x1998, 0x12801300,
+ 0x1998, 0x12801301,
+ 0x1998, 0x12801302,
+ 0x1998, 0x12801303,
+ 0x1998, 0x12801304,
+ 0x1998, 0x12801305,
+ 0x1998, 0x12801306,
+ 0x1998, 0x12801307,
+ 0x1998, 0x12801308,
+ 0x1998, 0x12801309,
+ 0x1998, 0x1280130A,
+ 0x1998, 0x1280130B,
+ 0x1998, 0x1280130C,
+ 0x1998, 0x1280130D,
+ 0x1998, 0x1280130E,
+ 0x1998, 0x1280130F,
+ 0x1998, 0x12801310,
+ 0x1998, 0x12801311,
+ 0x1998, 0x12801312,
+ 0x1998, 0x12801313,
+ 0x1998, 0x12801314,
+ 0x1998, 0x12801315,
+ 0x1998, 0x12801316,
+ 0x1998, 0x12801317,
+ 0x1998, 0x12801318,
+ 0x1998, 0x12801319,
+ 0x1998, 0x1280131A,
+ 0x1998, 0x1280131B,
+ 0x1998, 0x1280131C,
+ 0x1998, 0x1280131D,
+ 0x1998, 0x1280131E,
+ 0x1998, 0x1280131F,
+ 0x1998, 0x12801320,
+ 0x1998, 0x12801321,
+ 0x1998, 0x12801322,
+ 0x1998, 0x12801323,
+ 0x1998, 0x1280132C,
+ 0x1998, 0x1280132D,
+ 0x1998, 0x1280132E,
+ 0x1998, 0x1280132F,
+ 0x1998, 0x12801330,
+ 0x1998, 0x12801331,
+ 0x1998, 0x12801332,
+ 0x1998, 0x12801333,
+ 0x1998, 0x12801334,
+ 0x1998, 0x12801335,
+ 0x1998, 0x12801336,
+ 0x1998, 0x12801337,
+ 0x1998, 0x12801338,
+ 0x1998, 0x12801339,
+ 0x1998, 0x1280133A,
+ 0x1998, 0x1280133B,
+ 0x1998, 0x1280133C,
+ 0x1998, 0x1280133D,
+ 0x1998, 0x1280133E,
+ 0x1998, 0x1280133F,
+ 0x1998, 0x12801340,
+ 0x1998, 0x12801341,
+ 0x1998, 0x12801342,
+ 0x1998, 0x12801343,
+ 0x1998, 0x12801344,
+ 0x1998, 0x12801345,
+ 0x1998, 0x12801346,
+ 0x1998, 0x12801347,
+ 0x1998, 0x12801348,
+ 0x1998, 0x12801349,
+ 0x19D4, 0x88888888,
+ 0x19D8, 0x00000888,
+ 0xB00, 0xE3100100,
+ 0xB00, 0xE7100100,
+ 0xC60, 0x15808002,
+ 0xC60, 0x01808003,
+ 0xE60, 0x15808002,
+ 0xE60, 0x01808003,
+ 0x1860, 0x15808002,
+ 0x1860, 0x01808003,
+ 0x1A60, 0x15808002,
+ 0x1A60, 0x01808003,
+ 0xB00, 0xE3100100,
+ 0xC5C, 0x0D080058,
+ 0xE5C, 0x0D080058,
+ 0x185C, 0x0D080058,
+ 0x1A5C, 0x0D080058,
+ 0xC5C, 0x0D000058,
+ 0xE5C, 0x0D000058,
+ 0x185C, 0x0D000058,
+ 0x1A5C, 0x0D000058,
+ 0xC60, 0x05808002,
+ 0xC60, 0x0E808003,
+ 0xE60, 0x05808002,
+ 0xE60, 0x0E808003,
+ 0x1860, 0x05808002,
+ 0x1860, 0x0E808003,
+ 0x1A60, 0x05808002,
+ 0x1A60, 0x0E808003,
+ 0xB00, 0xE7100100,
+ 0xB00, 0xE3100100,
+ 0xB00, 0xE3100000,
+ 0x1C38, 0x00000002,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x46FF800C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E7E000F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x11144028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x1A1B0030,
+ 0xA24, 0x090E1317,
+ 0xA28, 0x00000204,
+ 0xA2C, 0x00900000,
+ 0xA70, 0x101FFF00,
+ 0xA74, 0x00000128,
+ 0xA78, 0x00000900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x218075B2,
+ 0xA84, 0x9C1F8C00,
+ 0x1B04, 0xE24628D2,
+ 0x1B10, 0x88010D46,
+ 0x1B14, 0x00000000,
+ 0x1B18, 0x00292903,
+ 0x1B00, 0xF8000000,
+ 0x1B00, 0xF800D000,
+ 0x1B00, 0xF801F000,
+ 0x1B1C, 0xA2123DB2,
+ 0x1B20, 0x07040001,
+ 0x1B24, 0x07060807,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0xA0000000, 0x00000000,
+ 0x1B28, 0xC0060348,
+ 0xB0000000, 0x00000000,
+ 0x1B2C, 0x20000003,
+ 0x1B30, 0x20000000,
+ 0x1B38, 0x20000000,
+ 0x1B3C, 0x20000000,
+ 0x1BD4, 0x00000001,
+ 0x1B94, 0x80000000,
+ 0x1B34, 0x00000000,
+ 0x1B34, 0x00000002,
+ 0x1B34, 0x00000000,
+ 0x1B00, 0xF8000002,
+ 0x1B00, 0xF800D002,
+ 0x1B00, 0xF801F002,
+ 0x1B1C, 0xA2123DB2,
+ 0x1B20, 0x07040001,
+ 0x1B24, 0x07060807,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0xA0000000, 0x00000000,
+ 0x1B28, 0xC0060348,
+ 0xB0000000, 0x00000000,
+ 0x1B2C, 0x20000003,
+ 0x1B30, 0x20000000,
+ 0x1B38, 0x20000000,
+ 0x1B3C, 0x20000000,
+ 0x1BD4, 0x00000001,
+ 0x1B94, 0x80000000,
+ 0x1B34, 0x00000000,
+ 0x1B34, 0x00000002,
+ 0x1B34, 0x00000000,
+ 0x1B00, 0xF8000004,
+ 0x1B00, 0xF800D004,
+ 0x1B00, 0xF801F004,
+ 0x1B1C, 0xA2123DB2,
+ 0x1B20, 0x07040001,
+ 0x1B24, 0x07060807,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0xA0000000, 0x00000000,
+ 0x1B28, 0xC0060348,
+ 0xB0000000, 0x00000000,
+ 0x1B2C, 0x20000003,
+ 0x1B30, 0x20000000,
+ 0x1B38, 0x20000000,
+ 0x1B3C, 0x20000000,
+ 0x1BD4, 0x00000001,
+ 0x1B94, 0x80000000,
+ 0x1B34, 0x00000000,
+ 0x1B34, 0x00000002,
+ 0x1B34, 0x00000000,
+ 0x1B00, 0xF8000006,
+ 0x1B00, 0xF800D006,
+ 0x1B00, 0xF801F006,
+ 0x1B1C, 0xA2123DB2,
+ 0x1B20, 0x07040001,
+ 0x1B24, 0x07060807,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0xA0000000, 0x00000000,
+ 0x1B28, 0xC0060348,
+ 0xB0000000, 0x00000000,
+ 0x1B2C, 0x20000003,
+ 0x1B30, 0x20000000,
+ 0x1B38, 0x20000000,
+ 0x1B3C, 0x20000000,
+ 0x1BD4, 0x00000001,
+ 0x1B94, 0x80000000,
+ 0x1B34, 0x00000000,
+ 0x1B34, 0x00000002,
+ 0x1B34, 0x00000000,
+ 0x1B00, 0xF8000000,
+ 0x1B80, 0x00000007,
+ 0x1B80, 0x09060005,
+ 0x1B80, 0x09060007,
+ 0x1B80, 0x0FFE0015,
+ 0x1B80, 0x0FFE0017,
+ 0x1B80, 0x00240025,
+ 0x1B80, 0x00240027,
+ 0x1B80, 0x00040035,
+ 0x1B80, 0x00040037,
+ 0x1B80, 0x05C00045,
+ 0x1B80, 0x05C00047,
+ 0x1B80, 0x00070055,
+ 0x1B80, 0x00070057,
+ 0x1B80, 0x64000065,
+ 0x1B80, 0x64000067,
+ 0x1B80, 0x00020075,
+ 0x1B80, 0x00020077,
+ 0x1B80, 0x00080085,
+ 0x1B80, 0x00080087,
+ 0x1B80, 0x80000095,
+ 0x1B80, 0x80000097,
+ 0x1B80, 0x090100A5,
+ 0x1B80, 0x090100A7,
+ 0x1B80, 0x0F0200B5,
+ 0x1B80, 0x0F0200B7,
+ 0x1B80, 0x002400C5,
+ 0x1B80, 0x002400C7,
+ 0x1B80, 0x000400D5,
+ 0x1B80, 0x000400D7,
+ 0x1B80, 0x05C000E5,
+ 0x1B80, 0x05C000E7,
+ 0x1B80, 0x000700F5,
+ 0x1B80, 0x000700F7,
+ 0x1B80, 0x64020105,
+ 0x1B80, 0x64020107,
+ 0x1B80, 0x00020115,
+ 0x1B80, 0x00020117,
+ 0x1B80, 0x00040125,
+ 0x1B80, 0x00040127,
+ 0x1B80, 0x4A000135,
+ 0x1B80, 0x4A000137,
+ 0x1B80, 0x4B040145,
+ 0x1B80, 0x4B040147,
+ 0x1B80, 0x85030155,
+ 0x1B80, 0x85030157,
+ 0x1B80, 0x40010165,
+ 0x1B80, 0x40010167,
+ 0x1B80, 0xE0290175,
+ 0x1B80, 0xE0290177,
+ 0x1B80, 0x00040185,
+ 0x1B80, 0x00040187,
+ 0x1B80, 0x4B050195,
+ 0x1B80, 0x4B050197,
+ 0x1B80, 0x860301A5,
+ 0x1B80, 0x860301A7,
+ 0x1B80, 0x400301B5,
+ 0x1B80, 0x400301B7,
+ 0x1B80, 0xE02901C5,
+ 0x1B80, 0xE02901C7,
+ 0x1B80, 0x000401D5,
+ 0x1B80, 0x000401D7,
+ 0x1B80, 0x4B0601E5,
+ 0x1B80, 0x4B0601E7,
+ 0x1B80, 0x870301F5,
+ 0x1B80, 0x870301F7,
+ 0x1B80, 0x40050205,
+ 0x1B80, 0x40050207,
+ 0x1B80, 0xE0290215,
+ 0x1B80, 0xE0290217,
+ 0x1B80, 0x00040225,
+ 0x1B80, 0x00040227,
+ 0x1B80, 0x4B070235,
+ 0x1B80, 0x4B070237,
+ 0x1B80, 0x88030245,
+ 0x1B80, 0x88030247,
+ 0x1B80, 0x40070255,
+ 0x1B80, 0x40070257,
+ 0x1B80, 0xE0290265,
+ 0x1B80, 0xE0290267,
+ 0x1B80, 0x4B000275,
+ 0x1B80, 0x4B000277,
+ 0x1B80, 0x30000285,
+ 0x1B80, 0x30000287,
+ 0x1B80, 0xFE100295,
+ 0x1B80, 0xFE100297,
+ 0x1B80, 0xFF1002A5,
+ 0x1B80, 0xFF1002A7,
+ 0x1B80, 0xE18602B5,
+ 0x1B80, 0xE18602B7,
+ 0x1B80, 0xF00A02C5,
+ 0x1B80, 0xF00A02C7,
+ 0x1B80, 0xF10A02D5,
+ 0x1B80, 0xF10A02D7,
+ 0x1B80, 0xF20A02E5,
+ 0x1B80, 0xF20A02E7,
+ 0x1B80, 0xF30802F5,
+ 0x1B80, 0xF30802F7,
+ 0x1B80, 0xF4070305,
+ 0x1B80, 0xF4070307,
+ 0x1B80, 0xF5060315,
+ 0x1B80, 0xF5060317,
+ 0x1B80, 0xF7060325,
+ 0x1B80, 0xF7060327,
+ 0x1B80, 0xF8050335,
+ 0x1B80, 0xF8050337,
+ 0x1B80, 0xF9040345,
+ 0x1B80, 0xF9040347,
+ 0x1B80, 0x00010355,
+ 0x1B80, 0x00010357,
+ 0x1B80, 0x303B0365,
+ 0x1B80, 0x303B0367,
+ 0x1B80, 0x30500375,
+ 0x1B80, 0x30500377,
+ 0x1B80, 0x305C0385,
+ 0x1B80, 0x305C0387,
+ 0x1B80, 0x31D50395,
+ 0x1B80, 0x31D50397,
+ 0x1B80, 0x31C503A5,
+ 0x1B80, 0x31C503A7,
+ 0x1B80, 0x4D0403B5,
+ 0x1B80, 0x4D0403B7,
+ 0x1B80, 0x2EF003C5,
+ 0x1B80, 0x2EF003C7,
+ 0x1B80, 0x000203D5,
+ 0x1B80, 0x000203D7,
+ 0x1B80, 0x208003E5,
+ 0x1B80, 0x208003E7,
+ 0x1B80, 0x000003F5,
+ 0x1B80, 0x000003F7,
+ 0x1B80, 0x4D000405,
+ 0x1B80, 0x4D000407,
+ 0x1B80, 0x55070415,
+ 0x1B80, 0x55070417,
+ 0x1B80, 0xE1230425,
+ 0x1B80, 0xE1230427,
+ 0x1B80, 0xE1230435,
+ 0x1B80, 0xE1230437,
+ 0x1B80, 0x4D040445,
+ 0x1B80, 0x4D040447,
+ 0x1B80, 0x20800455,
+ 0x1B80, 0x20800457,
+ 0x1B80, 0x84000465,
+ 0x1B80, 0x84000467,
+ 0x1B80, 0x4D000475,
+ 0x1B80, 0x4D000477,
+ 0x1B80, 0x550F0485,
+ 0x1B80, 0x550F0487,
+ 0x1B80, 0xE1230495,
+ 0x1B80, 0xE1230497,
+ 0x1B80, 0x4F0204A5,
+ 0x1B80, 0x4F0204A7,
+ 0x1B80, 0x4E0004B5,
+ 0x1B80, 0x4E0004B7,
+ 0x1B80, 0x530204C5,
+ 0x1B80, 0x530204C7,
+ 0x1B80, 0x520104D5,
+ 0x1B80, 0x520104D7,
+ 0x1B80, 0xE12704E5,
+ 0x1B80, 0xE12704E7,
+ 0x1B80, 0x000104F5,
+ 0x1B80, 0x000104F7,
+ 0x1B80, 0x5C720505,
+ 0x1B80, 0x5C720507,
+ 0x1B80, 0xE1320515,
+ 0x1B80, 0xE1320517,
+ 0x1B80, 0x54E50525,
+ 0x1B80, 0x54E50527,
+ 0x1B80, 0x54BF0535,
+ 0x1B80, 0x54BF0537,
+ 0x1B80, 0x54C50545,
+ 0x1B80, 0x54C50547,
+ 0x1B80, 0x54BE0555,
+ 0x1B80, 0x54BE0557,
+ 0x1B80, 0x54DF0565,
+ 0x1B80, 0x54DF0567,
+ 0x1B80, 0x0BA60575,
+ 0x1B80, 0x0BA60577,
+ 0x1B80, 0xF3130585,
+ 0x1B80, 0xF3130587,
+ 0x1B80, 0xF41E0595,
+ 0x1B80, 0xF41E0597,
+ 0x1B80, 0xF53C05A5,
+ 0x1B80, 0xF53C05A7,
+ 0x1B80, 0x000105B5,
+ 0x1B80, 0x000105B7,
+ 0x1B80, 0x620605C5,
+ 0x1B80, 0x620605C7,
+ 0x1B80, 0x600605D5,
+ 0x1B80, 0x600605D7,
+ 0x1B80, 0xE1A905E5,
+ 0x1B80, 0xE1A905E7,
+ 0x1B80, 0x0C0005F5,
+ 0x1B80, 0x0C0005F7,
+ 0x1B80, 0x5C720605,
+ 0x1B80, 0x5C720607,
+ 0x1B80, 0xE1320615,
+ 0x1B80, 0xE1320617,
+ 0x1B80, 0x5CF10625,
+ 0x1B80, 0x5CF10627,
+ 0x1B80, 0x0C010635,
+ 0x1B80, 0x0C010637,
+ 0x1B80, 0xF2020645,
+ 0x1B80, 0xF2020647,
+ 0x1B80, 0x30D60655,
+ 0x1B80, 0x30D60657,
+ 0x1B80, 0x0AC60665,
+ 0x1B80, 0x0AC60667,
+ 0x1B80, 0xE1B60675,
+ 0x1B80, 0xE1B60677,
+ 0x1B80, 0xE1580685,
+ 0x1B80, 0xE1580687,
+ 0x1B80, 0x54E50695,
+ 0x1B80, 0x54E50697,
+ 0x1B80, 0x000106A5,
+ 0x1B80, 0x000106A7,
+ 0x1B80, 0x560106B5,
+ 0x1B80, 0x560106B7,
+ 0x1B80, 0x5CE206C5,
+ 0x1B80, 0x5CE206C7,
+ 0x1B80, 0x0AE106D5,
+ 0x1B80, 0x0AE106D7,
+ 0x1B80, 0x630C06E5,
+ 0x1B80, 0x630C06E7,
+ 0x1B80, 0xE13F06F5,
+ 0x1B80, 0xE13F06F7,
+ 0x1B80, 0x00270705,
+ 0x1B80, 0x00270707,
+ 0x1B80, 0xE16C0715,
+ 0x1B80, 0xE16C0717,
+ 0x1B80, 0x00020725,
+ 0x1B80, 0x00020727,
+ 0x1B80, 0x002A0735,
+ 0x1B80, 0x002A0737,
+ 0x1B80, 0x07140745,
+ 0x1B80, 0x07140747,
+ 0x1B80, 0x00020755,
+ 0x1B80, 0x00020757,
+ 0x1B80, 0x30C30765,
+ 0x1B80, 0x30C30767,
+ 0x1B80, 0x56010775,
+ 0x1B80, 0x56010777,
+ 0x1B80, 0x5CE20785,
+ 0x1B80, 0x5CE20787,
+ 0x1B80, 0x0AE10795,
+ 0x1B80, 0x0AE10797,
+ 0x1B80, 0x631707A5,
+ 0x1B80, 0x631707A7,
+ 0x1B80, 0xE13F07B5,
+ 0x1B80, 0xE13F07B7,
+ 0x1B80, 0x002507C5,
+ 0x1B80, 0x002507C7,
+ 0x1B80, 0xE16C07D5,
+ 0x1B80, 0xE16C07D7,
+ 0x1B80, 0x000207E5,
+ 0x1B80, 0x000207E7,
+ 0x1B80, 0x630F07F5,
+ 0x1B80, 0x630F07F7,
+ 0x1B80, 0xE13F0805,
+ 0x1B80, 0xE13F0807,
+ 0x1B80, 0x63070815,
+ 0x1B80, 0x63070817,
+ 0x1B80, 0xE13F0825,
+ 0x1B80, 0xE13F0827,
+ 0x1B80, 0x07140835,
+ 0x1B80, 0x07140837,
+ 0x1B80, 0x56000845,
+ 0x1B80, 0x56000847,
+ 0x1B80, 0x5CF20855,
+ 0x1B80, 0x5CF20857,
+ 0x1B80, 0x0AF10865,
+ 0x1B80, 0x0AF10867,
+ 0x1B80, 0x07140875,
+ 0x1B80, 0x07140877,
+ 0x1B80, 0x07140885,
+ 0x1B80, 0x07140887,
+ 0x1B80, 0x630F0895,
+ 0x1B80, 0x630F0897,
+ 0x1B80, 0xE13F08A5,
+ 0x1B80, 0xE13F08A7,
+ 0x1B80, 0x631708B5,
+ 0x1B80, 0x631708B7,
+ 0x1B80, 0xE13F08C5,
+ 0x1B80, 0xE13F08C7,
+ 0x1B80, 0x002508D5,
+ 0x1B80, 0x002508D7,
+ 0x1B80, 0xE16C08E5,
+ 0x1B80, 0xE16C08E7,
+ 0x1B80, 0x000208F5,
+ 0x1B80, 0x000208F7,
+ 0x1B80, 0x30C30905,
+ 0x1B80, 0x30C30907,
+ 0x1B80, 0xE1A90915,
+ 0x1B80, 0xE1A90917,
+ 0x1B80, 0x62060925,
+ 0x1B80, 0x62060927,
+ 0x1B80, 0x60060935,
+ 0x1B80, 0x60060937,
+ 0x1B80, 0xE1160945,
+ 0x1B80, 0xE1160947,
+ 0x1B80, 0x54BE0955,
+ 0x1B80, 0x54BE0957,
+ 0x1B80, 0x56010965,
+ 0x1B80, 0x56010967,
+ 0x1B80, 0x5CE20975,
+ 0x1B80, 0x5CE20977,
+ 0x1B80, 0x0AE10985,
+ 0x1B80, 0x0AE10987,
+ 0x1B80, 0x633A0995,
+ 0x1B80, 0x633A0997,
+ 0x1B80, 0xE13F09A5,
+ 0x1B80, 0xE13F09A7,
+ 0x1B80, 0x633709B5,
+ 0x1B80, 0x633709B7,
+ 0x1B80, 0xE13F09C5,
+ 0x1B80, 0xE13F09C7,
+ 0x1B80, 0x632F09D5,
+ 0x1B80, 0x632F09D7,
+ 0x1B80, 0xE13F09E5,
+ 0x1B80, 0xE13F09E7,
+ 0x1B80, 0x632709F5,
+ 0x1B80, 0x632709F7,
+ 0x1B80, 0xE13F0A05,
+ 0x1B80, 0xE13F0A07,
+ 0x1B80, 0x631F0A15,
+ 0x1B80, 0x631F0A17,
+ 0x1B80, 0xE13F0A25,
+ 0x1B80, 0xE13F0A27,
+ 0x1B80, 0x63170A35,
+ 0x1B80, 0x63170A37,
+ 0x1B80, 0xE13F0A45,
+ 0x1B80, 0xE13F0A47,
+ 0x1B80, 0x630F0A55,
+ 0x1B80, 0x630F0A57,
+ 0x1B80, 0xE13F0A65,
+ 0x1B80, 0xE13F0A67,
+ 0x1B80, 0x63070A75,
+ 0x1B80, 0x63070A77,
+ 0x1B80, 0xE13F0A85,
+ 0x1B80, 0xE13F0A87,
+ 0x1B80, 0xE16C0A95,
+ 0x1B80, 0xE16C0A97,
+ 0x1B80, 0x56000AA5,
+ 0x1B80, 0x56000AA7,
+ 0x1B80, 0x5CF20AB5,
+ 0x1B80, 0x5CF20AB7,
+ 0x1B80, 0x0AF10AC5,
+ 0x1B80, 0x0AF10AC7,
+ 0x1B80, 0xF5040AD5,
+ 0x1B80, 0xF5040AD7,
+ 0x1B80, 0xE13F0AE5,
+ 0x1B80, 0xE13F0AE7,
+ 0x1B80, 0xE16C0AF5,
+ 0x1B80, 0xE16C0AF7,
+ 0x1B80, 0x30B30B05,
+ 0x1B80, 0x30B30B07,
+ 0x1B80, 0x07140B15,
+ 0x1B80, 0x07140B17,
+ 0x1B80, 0x07140B25,
+ 0x1B80, 0x07140B27,
+ 0x1B80, 0x630F0B35,
+ 0x1B80, 0x630F0B37,
+ 0x1B80, 0xE13F0B45,
+ 0x1B80, 0xE13F0B47,
+ 0x1B80, 0x63170B55,
+ 0x1B80, 0x63170B57,
+ 0x1B80, 0xE13F0B65,
+ 0x1B80, 0xE13F0B67,
+ 0x1B80, 0x631F0B75,
+ 0x1B80, 0x631F0B77,
+ 0x1B80, 0xE13F0B85,
+ 0x1B80, 0xE13F0B87,
+ 0x1B80, 0x63270B95,
+ 0x1B80, 0x63270B97,
+ 0x1B80, 0xE13F0BA5,
+ 0x1B80, 0xE13F0BA7,
+ 0x1B80, 0x632F0BB5,
+ 0x1B80, 0x632F0BB7,
+ 0x1B80, 0xE13F0BC5,
+ 0x1B80, 0xE13F0BC7,
+ 0x1B80, 0x63370BD5,
+ 0x1B80, 0x63370BD7,
+ 0x1B80, 0xE13F0BE5,
+ 0x1B80, 0xE13F0BE7,
+ 0x1B80, 0x633A0BF5,
+ 0x1B80, 0x633A0BF7,
+ 0x1B80, 0xE13F0C05,
+ 0x1B80, 0xE13F0C07,
+ 0x1B80, 0xF60B0C15,
+ 0x1B80, 0xF60B0C17,
+ 0x1B80, 0xF7170C25,
+ 0x1B80, 0xF7170C27,
+ 0x1B80, 0x4D300C35,
+ 0x1B80, 0x4D300C37,
+ 0x1B80, 0x57040C45,
+ 0x1B80, 0x57040C47,
+ 0x1B80, 0x57000C55,
+ 0x1B80, 0x57000C57,
+ 0x1B80, 0x96000C65,
+ 0x1B80, 0x96000C67,
+ 0x1B80, 0x57080C75,
+ 0x1B80, 0x57080C77,
+ 0x1B80, 0x57000C85,
+ 0x1B80, 0x57000C87,
+ 0x1B80, 0x95000C95,
+ 0x1B80, 0x95000C97,
+ 0x1B80, 0x4D000CA5,
+ 0x1B80, 0x4D000CA7,
+ 0x1B80, 0x6C070CB5,
+ 0x1B80, 0x6C070CB7,
+ 0x1B80, 0x00010CC5,
+ 0x1B80, 0x00010CC7,
+ 0x1B80, 0x00220CD5,
+ 0x1B80, 0x00220CD7,
+ 0x1B80, 0x06140CE5,
+ 0x1B80, 0x06140CE7,
+ 0x1B80, 0xE16C0CF5,
+ 0x1B80, 0xE16C0CF7,
+ 0x1B80, 0x00020D05,
+ 0x1B80, 0x00020D07,
+ 0x1B80, 0x00250D15,
+ 0x1B80, 0x00250D17,
+ 0x1B80, 0x06140D25,
+ 0x1B80, 0x06140D27,
+ 0x1B80, 0xE16C0D35,
+ 0x1B80, 0xE16C0D37,
+ 0x1B80, 0x00020D45,
+ 0x1B80, 0x00020D47,
+ 0x1B80, 0x00010D55,
+ 0x1B80, 0x00010D57,
+ 0x1B80, 0x00320D65,
+ 0x1B80, 0x00320D67,
+ 0x1B80, 0xE16C0D75,
+ 0x1B80, 0xE16C0D77,
+ 0x1B80, 0x00020D85,
+ 0x1B80, 0x00020D87,
+ 0x1B80, 0xE1860D95,
+ 0x1B80, 0xE1860D97,
+ 0x1B80, 0xE1B60DA5,
+ 0x1B80, 0xE1B60DA7,
+ 0x1B80, 0x5CD10DB5,
+ 0x1B80, 0x5CD10DB7,
+ 0x1B80, 0x673A0DC5,
+ 0x1B80, 0x673A0DC7,
+ 0x1B80, 0xE1230DD5,
+ 0x1B80, 0xE1230DD7,
+ 0x1B80, 0xF80B0DE5,
+ 0x1B80, 0xF80B0DE7,
+ 0x1B80, 0xF9110DF5,
+ 0x1B80, 0xF9110DF7,
+ 0x1B80, 0xE1580E05,
+ 0x1B80, 0xE1580E07,
+ 0x1B80, 0x67370E15,
+ 0x1B80, 0x67370E17,
+ 0x1B80, 0xE1580E25,
+ 0x1B80, 0xE1580E27,
+ 0x1B80, 0x672F0E35,
+ 0x1B80, 0x672F0E37,
+ 0x1B80, 0xE1580E45,
+ 0x1B80, 0xE1580E47,
+ 0x1B80, 0x67270E55,
+ 0x1B80, 0x67270E57,
+ 0x1B80, 0xE1580E65,
+ 0x1B80, 0xE1580E67,
+ 0x1B80, 0x671F0E75,
+ 0x1B80, 0x671F0E77,
+ 0x1B80, 0xE1580E85,
+ 0x1B80, 0xE1580E87,
+ 0x1B80, 0x67170E95,
+ 0x1B80, 0x67170E97,
+ 0x1B80, 0xE1580EA5,
+ 0x1B80, 0xE1580EA7,
+ 0x1B80, 0xF8020EB5,
+ 0x1B80, 0xF8020EB7,
+ 0x1B80, 0x30EE0EC5,
+ 0x1B80, 0x30EE0EC7,
+ 0x1B80, 0xE0D10ED5,
+ 0x1B80, 0xE0D10ED7,
+ 0x1B80, 0x670F0EE5,
+ 0x1B80, 0x670F0EE7,
+ 0x1B80, 0xE1580EF5,
+ 0x1B80, 0xE1580EF7,
+ 0x1B80, 0x67070F05,
+ 0x1B80, 0x67070F07,
+ 0x1B80, 0xE1580F15,
+ 0x1B80, 0xE1580F17,
+ 0x1B80, 0xF9020F25,
+ 0x1B80, 0xF9020F27,
+ 0x1B80, 0x30F50F35,
+ 0x1B80, 0x30F50F37,
+ 0x1B80, 0xE0CD0F45,
+ 0x1B80, 0xE0CD0F47,
+ 0x1B80, 0x06140F55,
+ 0x1B80, 0x06140F57,
+ 0x1B80, 0xE16C0F65,
+ 0x1B80, 0xE16C0F67,
+ 0x1B80, 0x5CF10F75,
+ 0x1B80, 0x5CF10F77,
+ 0x1B80, 0xE1580F85,
+ 0x1B80, 0xE1580F87,
+ 0x1B80, 0x06140F95,
+ 0x1B80, 0x06140F97,
+ 0x1B80, 0xE16C0FA5,
+ 0x1B80, 0xE16C0FA7,
+ 0x1B80, 0xF9020FB5,
+ 0x1B80, 0xF9020FB7,
+ 0x1B80, 0x30FF0FC5,
+ 0x1B80, 0x30FF0FC7,
+ 0x1B80, 0xE0CD0FD5,
+ 0x1B80, 0xE0CD0FD7,
+ 0x1B80, 0x31130FE5,
+ 0x1B80, 0x31130FE7,
+ 0x1B80, 0x670F0FF5,
+ 0x1B80, 0x670F0FF7,
+ 0x1B80, 0xE1581005,
+ 0x1B80, 0xE1581007,
+ 0x1B80, 0x67171015,
+ 0x1B80, 0x67171017,
+ 0x1B80, 0xE1581025,
+ 0x1B80, 0xE1581027,
+ 0x1B80, 0xF8021035,
+ 0x1B80, 0xF8021037,
+ 0x1B80, 0x31071045,
+ 0x1B80, 0x31071047,
+ 0x1B80, 0xE0D11055,
+ 0x1B80, 0xE0D11057,
+ 0x1B80, 0x31131065,
+ 0x1B80, 0x31131067,
+ 0x1B80, 0x670F1075,
+ 0x1B80, 0x670F1077,
+ 0x1B80, 0xE1581085,
+ 0x1B80, 0xE1581087,
+ 0x1B80, 0x671F1095,
+ 0x1B80, 0x671F1097,
+ 0x1B80, 0xE15810A5,
+ 0x1B80, 0xE15810A7,
+ 0x1B80, 0x672710B5,
+ 0x1B80, 0x672710B7,
+ 0x1B80, 0xE15810C5,
+ 0x1B80, 0xE15810C7,
+ 0x1B80, 0x672F10D5,
+ 0x1B80, 0x672F10D7,
+ 0x1B80, 0xE15810E5,
+ 0x1B80, 0xE15810E7,
+ 0x1B80, 0x673710F5,
+ 0x1B80, 0x673710F7,
+ 0x1B80, 0xE1581105,
+ 0x1B80, 0xE1581107,
+ 0x1B80, 0x673A1115,
+ 0x1B80, 0x673A1117,
+ 0x1B80, 0xE1581125,
+ 0x1B80, 0xE1581127,
+ 0x1B80, 0x4D101135,
+ 0x1B80, 0x4D101137,
+ 0x1B80, 0x30C41145,
+ 0x1B80, 0x30C41147,
+ 0x1B80, 0x00011155,
+ 0x1B80, 0x00011157,
+ 0x1B80, 0x6F241165,
+ 0x1B80, 0x6F241167,
+ 0x1B80, 0x6E401175,
+ 0x1B80, 0x6E401177,
+ 0x1B80, 0x6D001185,
+ 0x1B80, 0x6D001187,
+ 0x1B80, 0x55031195,
+ 0x1B80, 0x55031197,
+ 0x1B80, 0x312311A5,
+ 0x1B80, 0x312311A7,
+ 0x1B80, 0x6F1C11B5,
+ 0x1B80, 0x6F1C11B7,
+ 0x1B80, 0x6E4011C5,
+ 0x1B80, 0x6E4011C7,
+ 0x1B80, 0x550B11D5,
+ 0x1B80, 0x550B11D7,
+ 0x1B80, 0x312311E5,
+ 0x1B80, 0x312311E7,
+ 0x1B80, 0x061C11F5,
+ 0x1B80, 0x061C11F7,
+ 0x1B80, 0x54DE1205,
+ 0x1B80, 0x54DE1207,
+ 0x1B80, 0x06DC1215,
+ 0x1B80, 0x06DC1217,
+ 0x1B80, 0x55131225,
+ 0x1B80, 0x55131227,
+ 0x1B80, 0x74011235,
+ 0x1B80, 0x74011237,
+ 0x1B80, 0x74001245,
+ 0x1B80, 0x74001247,
+ 0x1B80, 0x8E001255,
+ 0x1B80, 0x8E001257,
+ 0x1B80, 0x00011265,
+ 0x1B80, 0x00011267,
+ 0x1B80, 0x57021275,
+ 0x1B80, 0x57021277,
+ 0x1B80, 0x57001285,
+ 0x1B80, 0x57001287,
+ 0x1B80, 0x97001295,
+ 0x1B80, 0x97001297,
+ 0x1B80, 0x000112A5,
+ 0x1B80, 0x000112A7,
+ 0x1B80, 0x54BF12B5,
+ 0x1B80, 0x54BF12B7,
+ 0x1B80, 0x54C112C5,
+ 0x1B80, 0x54C112C7,
+ 0x1B80, 0x54A212D5,
+ 0x1B80, 0x54A212D7,
+ 0x1B80, 0x54C012E5,
+ 0x1B80, 0x54C012E7,
+ 0x1B80, 0x54A112F5,
+ 0x1B80, 0x54A112F7,
+ 0x1B80, 0x54DF1305,
+ 0x1B80, 0x54DF1307,
+ 0x1B80, 0x00011315,
+ 0x1B80, 0x00011317,
+ 0x1B80, 0x55001325,
+ 0x1B80, 0x55001327,
+ 0x1B80, 0xE1231335,
+ 0x1B80, 0xE1231337,
+ 0x1B80, 0x54811345,
+ 0x1B80, 0x54811347,
+ 0x1B80, 0xE1231355,
+ 0x1B80, 0xE1231357,
+ 0x1B80, 0x54801365,
+ 0x1B80, 0x54801367,
+ 0x1B80, 0x002A1375,
+ 0x1B80, 0x002A1377,
+ 0x1B80, 0xE12B1385,
+ 0x1B80, 0xE12B1387,
+ 0x1B80, 0xE1231395,
+ 0x1B80, 0xE1231397,
+ 0x1B80, 0x548013A5,
+ 0x1B80, 0x548013A7,
+ 0x1B80, 0xE17213B5,
+ 0x1B80, 0xE17213B7,
+ 0x1B80, 0xBF3013C5,
+ 0x1B80, 0xBF3013C7,
+ 0x1B80, 0x000213D5,
+ 0x1B80, 0x000213D7,
+ 0x1B80, 0x302813E5,
+ 0x1B80, 0x302813E7,
+ 0x1B80, 0x4F7813F5,
+ 0x1B80, 0x4F7813F7,
+ 0x1B80, 0x4E001405,
+ 0x1B80, 0x4E001407,
+ 0x1B80, 0x53871415,
+ 0x1B80, 0x53871417,
+ 0x1B80, 0x52F11425,
+ 0x1B80, 0x52F11427,
+ 0x1B80, 0xE1161435,
+ 0x1B80, 0xE1161437,
+ 0x1B80, 0xE11B1445,
+ 0x1B80, 0xE11B1447,
+ 0x1B80, 0xE11F1455,
+ 0x1B80, 0xE11F1457,
+ 0x1B80, 0xE1271465,
+ 0x1B80, 0xE1271467,
+ 0x1B80, 0x54811475,
+ 0x1B80, 0x54811477,
+ 0x1B80, 0xE1161485,
+ 0x1B80, 0xE1161487,
+ 0x1B80, 0xE11B1495,
+ 0x1B80, 0xE11B1497,
+ 0x1B80, 0xE11F14A5,
+ 0x1B80, 0xE11F14A7,
+ 0x1B80, 0xE12714B5,
+ 0x1B80, 0xE12714B7,
+ 0x1B80, 0x548014C5,
+ 0x1B80, 0x548014C7,
+ 0x1B80, 0x002A14D5,
+ 0x1B80, 0x002A14D7,
+ 0x1B80, 0xE12B14E5,
+ 0x1B80, 0xE12B14E7,
+ 0x1B80, 0xE11614F5,
+ 0x1B80, 0xE11614F7,
+ 0x1B80, 0xE11B1505,
+ 0x1B80, 0xE11B1507,
+ 0x1B80, 0xE11F1515,
+ 0x1B80, 0xE11F1517,
+ 0x1B80, 0xE1271525,
+ 0x1B80, 0xE1271527,
+ 0x1B80, 0x54801535,
+ 0x1B80, 0x54801537,
+ 0x1B80, 0xE1721545,
+ 0x1B80, 0xE1721547,
+ 0x1B80, 0xBF171555,
+ 0x1B80, 0xBF171557,
+ 0x1B80, 0x00021565,
+ 0x1B80, 0x00021567,
+ 0x1B80, 0x30281575,
+ 0x1B80, 0x30281577,
+ 0x1B80, 0x06141585,
+ 0x1B80, 0x06141587,
+ 0x1B80, 0x73201595,
+ 0x1B80, 0x73201597,
+ 0x1B80, 0x720015A5,
+ 0x1B80, 0x720015A7,
+ 0x1B80, 0x710015B5,
+ 0x1B80, 0x710015B7,
+ 0x1B80, 0x550115C5,
+ 0x1B80, 0x550115C7,
+ 0x1B80, 0xE12315D5,
+ 0x1B80, 0xE12315D7,
+ 0x1B80, 0xE12715E5,
+ 0x1B80, 0xE12715E7,
+ 0x1B80, 0x548115F5,
+ 0x1B80, 0x548115F7,
+ 0x1B80, 0xE1231605,
+ 0x1B80, 0xE1231607,
+ 0x1B80, 0xE1271615,
+ 0x1B80, 0xE1271617,
+ 0x1B80, 0x54801625,
+ 0x1B80, 0x54801627,
+ 0x1B80, 0x002A1635,
+ 0x1B80, 0x002A1637,
+ 0x1B80, 0xE12B1645,
+ 0x1B80, 0xE12B1647,
+ 0x1B80, 0xE1231655,
+ 0x1B80, 0xE1231657,
+ 0x1B80, 0xE1271665,
+ 0x1B80, 0xE1271667,
+ 0x1B80, 0x54801675,
+ 0x1B80, 0x54801677,
+ 0x1B80, 0xE1721685,
+ 0x1B80, 0xE1721687,
+ 0x1B80, 0xBF031695,
+ 0x1B80, 0xBF031697,
+ 0x1B80, 0x000216A5,
+ 0x1B80, 0x000216A7,
+ 0x1B80, 0x302816B5,
+ 0x1B80, 0x302816B7,
+ 0x1B80, 0x54BF16C5,
+ 0x1B80, 0x54BF16C7,
+ 0x1B80, 0x54C516D5,
+ 0x1B80, 0x54C516D7,
+ 0x1B80, 0x050A16E5,
+ 0x1B80, 0x050A16E7,
+ 0x1B80, 0x071416F5,
+ 0x1B80, 0x071416F7,
+ 0x1B80, 0x54DF1705,
+ 0x1B80, 0x54DF1707,
+ 0x1B80, 0x00011715,
+ 0x1B80, 0x00011717,
+ 0x1B80, 0x54BF1725,
+ 0x1B80, 0x54BF1727,
+ 0x1B80, 0x54C01735,
+ 0x1B80, 0x54C01737,
+ 0x1B80, 0x54A31745,
+ 0x1B80, 0x54A31747,
+ 0x1B80, 0x54C11755,
+ 0x1B80, 0x54C11757,
+ 0x1B80, 0x54A41765,
+ 0x1B80, 0x54A41767,
+ 0x1B80, 0x4C831775,
+ 0x1B80, 0x4C831777,
+ 0x1B80, 0x4C031785,
+ 0x1B80, 0x4C031787,
+ 0x1B80, 0xBF0B1795,
+ 0x1B80, 0xBF0B1797,
+ 0x1B80, 0x54C217A5,
+ 0x1B80, 0x54C217A7,
+ 0x1B80, 0x54A417B5,
+ 0x1B80, 0x54A417B7,
+ 0x1B80, 0x4C8517C5,
+ 0x1B80, 0x4C8517C7,
+ 0x1B80, 0x4C0517D5,
+ 0x1B80, 0x4C0517D7,
+ 0x1B80, 0xBF0617E5,
+ 0x1B80, 0xBF0617E7,
+ 0x1B80, 0x54C117F5,
+ 0x1B80, 0x54C117F7,
+ 0x1B80, 0x54A31805,
+ 0x1B80, 0x54A31807,
+ 0x1B80, 0x4C861815,
+ 0x1B80, 0x4C861817,
+ 0x1B80, 0x4C061825,
+ 0x1B80, 0x4C061827,
+ 0x1B80, 0xBF011835,
+ 0x1B80, 0xBF011837,
+ 0x1B80, 0x54DF1845,
+ 0x1B80, 0x54DF1847,
+ 0x1B80, 0x00011855,
+ 0x1B80, 0x00011857,
+ 0x1B80, 0x00071865,
+ 0x1B80, 0x00071867,
+ 0x1B80, 0x54011875,
+ 0x1B80, 0x54011877,
+ 0x1B80, 0x00041885,
+ 0x1B80, 0x00041887,
+ 0x1B80, 0x56001895,
+ 0x1B80, 0x56001897,
+ 0x1B80, 0x5CF218A5,
+ 0x1B80, 0x5CF218A7,
+ 0x1B80, 0x630718B5,
+ 0x1B80, 0x630718B7,
+ 0x1B80, 0x620418C5,
+ 0x1B80, 0x620418C7,
+ 0x1B80, 0x610018D5,
+ 0x1B80, 0x610018D7,
+ 0x1B80, 0x670718E5,
+ 0x1B80, 0x670718E7,
+ 0x1B80, 0x660618F5,
+ 0x1B80, 0x660618F7,
+ 0x1B80, 0x6F201905,
+ 0x1B80, 0x6F201907,
+ 0x1B80, 0x6E001915,
+ 0x1B80, 0x6E001917,
+ 0x1B80, 0x6D001925,
+ 0x1B80, 0x6D001927,
+ 0x1B80, 0x6C031935,
+ 0x1B80, 0x6C031937,
+ 0x1B80, 0x73201945,
+ 0x1B80, 0x73201947,
+ 0x1B80, 0x72001955,
+ 0x1B80, 0x72001957,
+ 0x1B80, 0x71001965,
+ 0x1B80, 0x71001967,
+ 0x1B80, 0x7B201975,
+ 0x1B80, 0x7B201977,
+ 0x1B80, 0x7A001985,
+ 0x1B80, 0x7A001987,
+ 0x1B80, 0x79001995,
+ 0x1B80, 0x79001997,
+ 0x1B80, 0x7F2019A5,
+ 0x1B80, 0x7F2019A7,
+ 0x1B80, 0x7E0019B5,
+ 0x1B80, 0x7E0019B7,
+ 0x1B80, 0x7D0019C5,
+ 0x1B80, 0x7D0019C7,
+ 0x1B80, 0x090119D5,
+ 0x1B80, 0x090119D7,
+ 0x1B80, 0x0AC619E5,
+ 0x1B80, 0x0AC619E7,
+ 0x1B80, 0x0BA619F5,
+ 0x1B80, 0x0BA619F7,
+ 0x1B80, 0x0C011A05,
+ 0x1B80, 0x0C011A07,
+ 0x1B80, 0x0D021A15,
+ 0x1B80, 0x0D021A17,
+ 0x1B80, 0x0E041A25,
+ 0x1B80, 0x0E041A27,
+ 0x1B80, 0x0FFF1A35,
+ 0x1B80, 0x0FFF1A37,
+ 0x1B80, 0x4D041A45,
+ 0x1B80, 0x4D041A47,
+ 0x1B80, 0x28F81A55,
+ 0x1B80, 0x28F81A57,
+ 0x1B80, 0xE0001A65,
+ 0x1B80, 0xE0001A67,
+ 0x1B80, 0x4D001A75,
+ 0x1B80, 0x4D001A77,
+ 0x1B80, 0x00011A85,
+ 0x1B80, 0x00011A87,
+ 0x1B80, 0x4D041A95,
+ 0x1B80, 0x4D041A97,
+ 0x1B80, 0x2EF81AA5,
+ 0x1B80, 0x2EF81AA7,
+ 0x1B80, 0x00021AB5,
+ 0x1B80, 0x00021AB7,
+ 0x1B80, 0x23031AC5,
+ 0x1B80, 0x23031AC7,
+ 0x1B80, 0x00001AD5,
+ 0x1B80, 0x00001AD7,
+ 0x1B80, 0x23131AE5,
+ 0x1B80, 0x23131AE7,
+ 0x1B80, 0xE77F1AF5,
+ 0x1B80, 0xE77F1AF7,
+ 0x1B80, 0x232F1B05,
+ 0x1B80, 0x232F1B07,
+ 0x1B80, 0xEFBF1B15,
+ 0x1B80, 0xEFBF1B17,
+ 0x1B80, 0x2EF01B25,
+ 0x1B80, 0x2EF01B27,
+ 0x1B80, 0x00021B35,
+ 0x1B80, 0x00021B37,
+ 0x1B80, 0x4D001B45,
+ 0x1B80, 0x4D001B47,
+ 0x1B80, 0x00011B55,
+ 0x1B80, 0x00011B57,
+ 0x1B80, 0x4D041B65,
+ 0x1B80, 0x4D041B67,
+ 0x1B80, 0x2EF81B75,
+ 0x1B80, 0x2EF81B77,
+ 0x1B80, 0x00021B85,
+ 0x1B80, 0x00021B87,
+ 0x1B80, 0x23031B95,
+ 0x1B80, 0x23031B97,
+ 0x1B80, 0x00001BA5,
+ 0x1B80, 0x00001BA7,
+ 0x1B80, 0x23131BB5,
+ 0x1B80, 0x23131BB7,
+ 0x1B80, 0xE77F1BC5,
+ 0x1B80, 0xE77F1BC7,
+ 0x1B80, 0x232F1BD5,
+ 0x1B80, 0x232F1BD7,
+ 0x1B80, 0xE79F1BE5,
+ 0x1B80, 0xE79F1BE7,
+ 0x1B80, 0x2EF01BF5,
+ 0x1B80, 0x2EF01BF7,
+ 0x1B80, 0x00021C05,
+ 0x1B80, 0x00021C07,
+ 0x1B80, 0x28F81C15,
+ 0x1B80, 0x28F81C17,
+ 0x1B80, 0x80001C25,
+ 0x1B80, 0x80001C27,
+ 0x1B80, 0x4D001C35,
+ 0x1B80, 0x4D001C37,
+ 0x1B80, 0x00011C45,
+ 0x1B80, 0x00011C47,
+ 0x1B80, 0x00041C55,
+ 0x1B80, 0x00041C57,
+ 0x1B80, 0x6BC01C65,
+ 0x1B80, 0x6BC01C67,
+ 0x1B80, 0x4D041C75,
+ 0x1B80, 0x4D041C77,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x68481C85,
+ 0x1B80, 0x68481C87,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x66061C95,
+ 0x1B80, 0x66061C97,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x65041CA5,
+ 0x1B80, 0x65041CA7,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x64471CB5,
+ 0x1B80, 0x64471CB7,
+ 0x1B80, 0x23411CC5,
+ 0x1B80, 0x23411CC7,
+ 0x1B80, 0x100E1CD5,
+ 0x1B80, 0x100E1CD7,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x60011CE5,
+ 0x1B80, 0x60011CE7,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x23411CF5,
+ 0x1B80, 0x23411CF7,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x60611D05,
+ 0x1B80, 0x60611D07,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x23411D15,
+ 0x1B80, 0x23411D17,
+ 0x1B80, 0x70E11D25,
+ 0x1B80, 0x70E11D27,
+ 0x1B80, 0x4D001D35,
+ 0x1B80, 0x4D001D37,
+ 0x1B80, 0x00011D45,
+ 0x1B80, 0x00011D47,
+ 0x1B80, 0x00041D55,
+ 0x1B80, 0x00041D57,
+ 0x1B80, 0x6B401D65,
+ 0x1B80, 0x6B401D67,
+ 0x1B80, 0x4D041D75,
+ 0x1B80, 0x4D041D77,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241D85,
+ 0x1B80, 0x68241D87,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241D85,
+ 0x1B80, 0x68241D87,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241D85,
+ 0x1B80, 0x68241D87,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241D85,
+ 0x1B80, 0x68241D87,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241D85,
+ 0x1B80, 0x68241D87,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241D85,
+ 0x1B80, 0x68241D87,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x68481D85,
+ 0x1B80, 0x68481D87,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x66061D95,
+ 0x1B80, 0x66061D97,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65081DA5,
+ 0x1B80, 0x65081DA7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65181DA5,
+ 0x1B80, 0x65181DA7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65181DA5,
+ 0x1B80, 0x65181DA7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65181DA5,
+ 0x1B80, 0x65181DA7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65181DA5,
+ 0x1B80, 0x65181DA7,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65081DA5,
+ 0x1B80, 0x65081DA7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65181DA5,
+ 0x1B80, 0x65181DA7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65181DA5,
+ 0x1B80, 0x65181DA7,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x65081DA5,
+ 0x1B80, 0x65081DA7,
+ 0xB0000000, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x64481DB5,
+ 0x1B80, 0x64481DB7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x64481DB5,
+ 0x1B80, 0x64481DB7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x64481DB5,
+ 0x1B80, 0x64481DB7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x64481DB5,
+ 0x1B80, 0x64481DB7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x64481DB5,
+ 0x1B80, 0x64481DB7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x64481DB5,
+ 0x1B80, 0x64481DB7,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x64471DB5,
+ 0x1B80, 0x64471DB7,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x23411DC5,
+ 0x1B80, 0x23411DC7,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E41DD5,
+ 0x1B80, 0x11E41DD7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E81DD5,
+ 0x1B80, 0x11E81DD7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E81DD5,
+ 0x1B80, 0x11E81DD7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E81DD5,
+ 0x1B80, 0x11E81DD7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E81DD5,
+ 0x1B80, 0x11E81DD7,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E41DD5,
+ 0x1B80, 0x11E41DD7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E81DD5,
+ 0x1B80, 0x11E81DD7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E81DD5,
+ 0x1B80, 0x11E81DD7,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x11E41DD5,
+ 0x1B80, 0x11E41DD7,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x60011DE5,
+ 0x1B80, 0x60011DE7,
+ 0x1B80, 0x23411DF5,
+ 0x1B80, 0x23411DF7,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60E11E05,
+ 0x1B80, 0x60E11E07,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x61E11E05,
+ 0x1B80, 0x61E11E07,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x61E11E05,
+ 0x1B80, 0x61E11E07,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x61E11E05,
+ 0x1B80, 0x61E11E07,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x61E11E05,
+ 0x1B80, 0x61E11E07,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60E11E05,
+ 0x1B80, 0x60E11E07,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x61E11E05,
+ 0x1B80, 0x61E11E07,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x61E11E05,
+ 0x1B80, 0x61E11E07,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x60E11E05,
+ 0x1B80, 0x60E11E07,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x23411E15,
+ 0x1B80, 0x23411E17,
+ 0x1B80, 0x70611E25,
+ 0x1B80, 0x70611E27,
+ 0x1B80, 0x4D001E35,
+ 0x1B80, 0x4D001E37,
+ 0x1B80, 0x00011E45,
+ 0x1B80, 0x00011E47,
+ 0x1B80, 0x00001E55,
+ 0x1B80, 0x00001E57,
+ 0x1B80, 0x00001E65,
+ 0x1B80, 0x00001E67,
+ 0x1B80, 0x00001E75,
+ 0x1B80, 0x00001E77,
+ 0x1B80, 0x00001E85,
+ 0x1B80, 0x00001E87,
+ 0x1B80, 0x00001E95,
+ 0x1B80, 0x00001E97,
+ 0x1B80, 0x00001EA5,
+ 0x1B80, 0x00001EA7,
+ 0x1B80, 0x00001EB5,
+ 0x1B80, 0x00001EB7,
+ 0x1B80, 0x00001EC5,
+ 0x1B80, 0x00001EC7,
+ 0x1B80, 0x00001ED5,
+ 0x1B80, 0x00001ED7,
+ 0x1B80, 0x00001EE5,
+ 0x1B80, 0x00001EE7,
+ 0x1B80, 0x00001EF5,
+ 0x1B80, 0x00001EF7,
+ 0x1B80, 0x00001F05,
+ 0x1B80, 0x00001F07,
+ 0x1B80, 0x00001F15,
+ 0x1B80, 0x00001F17,
+ 0x1B80, 0x00001F25,
+ 0x1B80, 0x00001F27,
+ 0x1B80, 0x00001F35,
+ 0x1B80, 0x00001F37,
+ 0x1B80, 0x00001F45,
+ 0x1B80, 0x00001F47,
+ 0x1B80, 0x00001F55,
+ 0x1B80, 0x00001F57,
+ 0x1B80, 0x00001F65,
+ 0x1B80, 0x00001F67,
+ 0x1B80, 0x00001F75,
+ 0x1B80, 0x00001F77,
+ 0x1B80, 0x00001F85,
+ 0x1B80, 0x00001F87,
+ 0x1B80, 0x00001F95,
+ 0x1B80, 0x00001F97,
+ 0x1B80, 0x00001FA5,
+ 0x1B80, 0x00001FA7,
+ 0x1B80, 0x00001FB5,
+ 0x1B80, 0x00001FB7,
+ 0x1B80, 0x00001FC5,
+ 0x1B80, 0x00001FC7,
+ 0x1B80, 0x00001FD5,
+ 0x1B80, 0x00001FD7,
+ 0x1B80, 0x00001FE5,
+ 0x1B80, 0x00001FE7,
+ 0x1B80, 0x00001FF5,
+ 0x1B80, 0x00001FF7,
+ 0x1B80, 0x00000006,
+ 0x1B80, 0x00000002,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8814a_bb, rtw_phy_cfg_bb);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x30303030, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x24262830, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x30303030, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x24262830, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x30323434, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x28303234, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x32323232, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x26283032, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x30303030, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x24262830, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x32322426, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x30323232, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x32323232, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x26283032, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x30303030, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x24262830, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x30303030, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x30303030, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x30323434, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x28303234, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x32323232, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x26283032, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x30303030, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x24262830, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x32322426, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x30323232, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x32323232, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x26283032, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x30303030, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x24262830, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type0[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32323232, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x32323232, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303232, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x32323232, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x30303030, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x24262830, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x28282828, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x22242628, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x32323232, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x30302224, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x28303030, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x28282828, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x22242628, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x18202020, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x32323232, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x32323232, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303232, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x32323232, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x30303030, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x24262830, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x28282828, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x22242628, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x32323232, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x30302224, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x28303030, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x28282828, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x22242628, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x18202020, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x32323232, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x32323232, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x28303232, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x32323232, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x26283032, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x30303030, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x24262830, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x28282828, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x22242628, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x32323232, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x26283032, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x30302224, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x28303030, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x20222426, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x28282828, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x22242628, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x18202020, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x32323232, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x32323232, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x28303232, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x32323232, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x26283032, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x30303030, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x24262830, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x28282828, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x22242628, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x32323232, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x26283032, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x30302224, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x28303030, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x20222426, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x28282828, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x22242628, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x18202020, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x32323232, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x28303232, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32323232, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x30303030, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x28282828, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x22242628, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32323232, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x30302224, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303030, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x28282828, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x22242628, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x18202020, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x32323232, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x28303232, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32323232, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x30303030, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x28282828, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x22242628, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32323232, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x30302224, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303030, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x28282828, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x22242628, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x18202020, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x32323232, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x28303232, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x32323232, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x26283032, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x30303030, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x24262830, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x28282828, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x22242628, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x32323232, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x26283032, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x30302224, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x28303030, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x20222426, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x28282828, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x22242628, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x18202020, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x32323232, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x28303232, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x32323232, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x26283032, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x30303030, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x24262830, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x28282828, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x22242628, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x32323232, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x26283032, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x30302224, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x28303030, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x20222426, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x28282828, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x22242628, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x18202020, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type0);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type2[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x30303030, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x24262830, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x30303030, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x24262830, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x30323434, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x28303234, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x32323232, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x26283032, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x30303030, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x24262830, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x32322426, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x30323232, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x32323232, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x26283032, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x30303030, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x24262830, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x30303030, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x30303030, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x30323434, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x28303234, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x32323232, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x26283032, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x30303030, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x24262830, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x32322426, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x30323232, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x32323232, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x26283032, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x30303030, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x24262830, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type2);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type3[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x48484848, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x44464646, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x42444646, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x46464646, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x42444646, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x46464646, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x42444646, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x42444646, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x46463840, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x46464646, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x38404244, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x46464646, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x42444646, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x38383840, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x48484848, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x44464646, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x42444646, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x46464646, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x42444646, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x46464646, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x42444646, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x42444646, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x46463840, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x46464646, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x38404244, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x46464646, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x42444646, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x38383840, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x48484848, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x46464646, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x44464646, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x42444646, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x46464646, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x42444646, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x46464646, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x42444646, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x42444646, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x46463840, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x46464646, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x38404244, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x46464646, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x42444646, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x38383840, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x48484848, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x44464646, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x42444646, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x46464646, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x42444646, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x46464646, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x42444646, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x42444646, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x46463840, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x46464646, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x38404244, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x46464646, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x42444646, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x38383840, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x44464646, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x42444646, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x46464646, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x42444646, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x46464646, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x42444646, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x42444646, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x46463840, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x46464646, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x38404244, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x46464646, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x42444646, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x38383840, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x44464646, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x42444646, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x46464646, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x42444646, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x46464646, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x42444646, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x42444646, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x46463840, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x46464646, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x38404244, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x46464646, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x42444646, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x38383840, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x44464646, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x42444646, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x46464646, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x42444646, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x46464646, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x42444646, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x42444646, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x46463840, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x46464646, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x38404244, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x46464646, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x42444646, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x38383840, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x44464646, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x42444646, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x46464646, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x42444646, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x46464646, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x42444646, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x42444646, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x46463840, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x46464646, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x38404244, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x46464646, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x42444646, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x38383840, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type3);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type4[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x42424242, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x42424242, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x36384042, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x42424242, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x34363840, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x42424242, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x34363840, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x42424242, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x42424242, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x42423032, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x38404242, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x30323436, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x42424242, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x34363840, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x30303032, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x42424242, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x42424242, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x36384042, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x42424242, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x34363840, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x42424242, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x34363840, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x42424242, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x42424242, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x42423032, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x38404242, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x30323436, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x42424242, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x34363840, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x30303032, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x42424242, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x42424242, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x36384042, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x42424242, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x34363840, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x42424242, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x34363840, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x42424242, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x34363840, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x42424242, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x34363840, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x42423032, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x38404242, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x30323436, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x42424242, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x34363840, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x30303032, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x42424242, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x42424242, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x36384042, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x42424242, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x34363840, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x42424242, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x34363840, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x42424242, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x34363840, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x42424242, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x34363840, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x42423032, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x38404242, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x30323436, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x42424242, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x34363840, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x30303032, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x42424242, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x36384042, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x42424242, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x34363840, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x42424242, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x34363840, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x42424242, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x34363840, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x42424242, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x34363840, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x42423032, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x38404242, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x30323436, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x42424242, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x34363840, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x30303032, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x42424242, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x36384042, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x42424242, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x34363840, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x42424242, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x34363840, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x42424242, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x34363840, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x42424242, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x34363840, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x42423032, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x38404242, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x30323436, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x42424242, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x34363840, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x30303032, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x42424242, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x36384042, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x42424242, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x34363840, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x42424242, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x34363840, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x42424242, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x34363840, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x42424242, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x34363840, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x42423032, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x38404242, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x30323436, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x42424242, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x34363840, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x30303032, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x42424242, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x36384042, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x42424242, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x34363840, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x42424242, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x34363840, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x42424242, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x34363840, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x42424242, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x34363840, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x42423032, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x38404242, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x30323436, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x42424242, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x34363840, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x30303032, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type4);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type5[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x48484848, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x44464646, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x42444646, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x44444444, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x40424444, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x42424242, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x38404242, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x42444646, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x44444040, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x44444444, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x38384042, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x42424242, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x38404242, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x20203636, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x48484848, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x44464646, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x42444646, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x44444444, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x40424444, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x42424242, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x38404242, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x42444646, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x44444040, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x44444444, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x38384042, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x42424242, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x38404242, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x20203636, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x48484848, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x46464646, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x44464646, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x42444646, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x44444444, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x40424444, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x42424242, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x38404242, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x42444646, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x44444040, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x44444444, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x38384042, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x42424242, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x38404242, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x20203636, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x48484848, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x44464646, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x42444646, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x44444444, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x40424444, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x42424242, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x38404242, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x42444646, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x44444040, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x44444444, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x38384042, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x42424242, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x38404242, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x20203636, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x44464646, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x42444646, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x44444444, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x40424444, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x42424242, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x38404242, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x42444646, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x44443840, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x44444444, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x36384042, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x42424242, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x38404242, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x20203436, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x44464646, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x42444646, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x44444444, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x40424444, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x42424242, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x38404242, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x42444646, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x44443840, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x44444444, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x36384042, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x42424242, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x38404242, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x20203436, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x44464646, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x42444646, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x44444444, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x40424444, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x42424242, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x38404242, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x42444646, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x44443840, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x44444444, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x36384042, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x42424242, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x38404242, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x20203436, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x44464646, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x42444646, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x44444444, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x40424444, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x42424242, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x38404242, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x42444646, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x44443840, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x44444444, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x36384042, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x42424242, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x38404242, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x20203436, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type5);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type7[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x34343434, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x28303234, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x34343434, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x34342426, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x32343434, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x24262830, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x34343434, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x28303234, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x24263434, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x34343434, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x28303234, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x34343434, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x34342426, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x32343434, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x24262830, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x34343434, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x28303234, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x24263434, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x30323434, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x28303234, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x34343434, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x28303234, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x34343434, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x28303234, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x34342426, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x32343434, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x24262830, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x34343434, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x28303234, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x24263434, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x34343434, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x28303234, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x34343434, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x28303234, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x34342426, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x32343434, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x24262830, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x34343434, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x28303234, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x24263434, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x34343434, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x28303234, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x34343434, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x28303234, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x34342426, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x32343434, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x24262830, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x34343434, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x28303234, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x24263434, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x34343434, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x28303234, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x34343434, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x28303234, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x34342426, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x32343434, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x24262830, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x34343434, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x28303234, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x24263434, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x30323434, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x28303234, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x34343434, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x28303234, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x34343434, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x28303234, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x34342426, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x32343434, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x24262830, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x34343434, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x28303234, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x24263434, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x34343434, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x28303234, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x34343434, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x28303234, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x34342426, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x32343434, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x24262830, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x34343434, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x28303234, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x24263434, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type7);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type8[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x43434343, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x43434343, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x35373941, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x43434343, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x33353739, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x43434343, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x31333537, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x43434343, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x29313335, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x43434343, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x43434343, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x35373941, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x41434343, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x33353739, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x39414141, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x31333537, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x37393939, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x29313335, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x43434343, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x43434343, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x35373941, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x41434343, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x33353739, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x39414141, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x31333537, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x37393939, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x29313335, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x32322426, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x30323232, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x43434343, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x43434343, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x35373941, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x41434343, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x33353739, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x39414141, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x31333537, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x37393939, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x29313335, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x39414345, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x38404244, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x46464646, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x36384042, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x46464646, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x34363840, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x38404244, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x46463738, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x42444646, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x35373840, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x46464646, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x37394143, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x33333335, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x39414345, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x38404244, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x46464646, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x36384042, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x46464646, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x34363840, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x38404244, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x46463738, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x42444646, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x35373840, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x46464646, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x37394143, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x33333335, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x39414345, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x38404244, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x46464646, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x36384042, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x46464646, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x34363840, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x38404244, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x46463738, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x42444646, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x35373840, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x46464646, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x37394143, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x33333335, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x39414345, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x38404244, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x46464646, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x36384042, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x46464646, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x34363840, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x38404244, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x46463738, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x42444646, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x35373840, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x46464646, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x37394143, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x33333335, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type8);
+
+static const u32 rtw8814a_rf_a[] = {
+ 0x018, 0x00013124,
+ 0x040, 0x00000C00,
+ 0x058, 0x00000F98,
+ 0x07F, 0x00068004,
+ 0x0B0, 0x000FFFFE,
+ 0x0B1, 0x0003FF48,
+ 0x0B2, 0x0006AA3F,
+ 0x0B3, 0x000FFC9A,
+ 0x0B4, 0x0000A78F,
+ 0x0B5, 0x00000A3F,
+ 0x0B6, 0x0000C09C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0xA0000000, 0x00000000,
+ 0x0B7, 0x0003000C,
+ 0xB0000000, 0x00000000,
+ 0x0B8, 0x0007400E,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0xA0000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0xB0000000, 0x00000000,
+ 0x0BA, 0x00050780,
+ 0x0BB, 0x00000000,
+ 0x0BC, 0x00040009,
+ 0x0BD, 0x00000000,
+ 0x0BE, 0x00000000,
+ 0x0BF, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x03E, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0xB0000000, 0x00000000,
+ 0x03E, 0x00020000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00040000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00040000,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00040000,
+ 0xB0000000, 0x00000000,
+ 0x03E, 0x00040000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0xB0000000, 0x00000000,
+ 0x03E, 0x00060000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00006800,
+ 0x03E, 0x00000080,
+ 0x03F, 0x00006000,
+ 0x03E, 0x00000100,
+ 0x03F, 0x00004800,
+ 0x03E, 0x00000180,
+ 0x03F, 0x00004000,
+ 0x03E, 0x00000200,
+ 0x03F, 0x00004000,
+ 0x03E, 0x00000280,
+ 0x03F, 0x00002800,
+ 0x03E, 0x00000300,
+ 0x03F, 0x00002800,
+ 0x03E, 0x00000380,
+ 0x03F, 0x00002000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00040000,
+ 0x03E, 0x00000000,
+ 0x03F, 0x000000BC,
+ 0x03E, 0x00000040,
+ 0x03F, 0x00000053,
+ 0x03E, 0x00000050,
+ 0x03F, 0x00000050,
+ 0x03E, 0x00000060,
+ 0x03F, 0x00000050,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000400,
+ 0x03E, 0x00000006,
+ 0x041, 0x000EE080,
+ 0x03E, 0x00000008,
+ 0x041, 0x000EE0C0,
+ 0x03E, 0x0000000A,
+ 0x041, 0x000EE100,
+ 0x03E, 0x0000000C,
+ 0x041, 0x000EE100,
+ 0x0EF, 0x00000000,
+ 0x018, 0x00000006,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0xA0000000, 0x00000000,
+ 0x086, 0x000E4B58,
+ 0x087, 0x00049F80,
+ 0xB0000000, 0x00000000,
+ 0x0DF, 0x00000008,
+ 0x0EF, 0x00002000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0003F258,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000ADF6,
+ 0x034, 0x00009DF3,
+ 0x034, 0x00008DF0,
+ 0x034, 0x00007DED,
+ 0x034, 0x00006DEA,
+ 0x034, 0x00005CED,
+ 0x034, 0x00004CEA,
+ 0x034, 0x000034EA,
+ 0x034, 0x000024E7,
+ 0x034, 0x0000146A,
+ 0x034, 0x0000006B,
+ 0xB0000000, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008ADF6,
+ 0x034, 0x00089DF3,
+ 0x034, 0x00088DF0,
+ 0x034, 0x00087DED,
+ 0x034, 0x00086DEA,
+ 0x034, 0x00085CED,
+ 0x034, 0x00084CEA,
+ 0x034, 0x000834EA,
+ 0x034, 0x000824E7,
+ 0x034, 0x0008146A,
+ 0x034, 0x0008006B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000192,
+ 0x035, 0x00008192,
+ 0x035, 0x00010192,
+ 0x036, 0x00000024,
+ 0x036, 0x00008024,
+ 0x036, 0x00010024,
+ 0x036, 0x00018024,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C21,
+ 0x052, 0x000006D9,
+ 0x053, 0x000FC649,
+ 0x054, 0x0000017E,
+ 0x018, 0x0001012A,
+ 0x081, 0x0007FC00,
+ 0x089, 0x00050110,
+ 0x08A, 0x00043E50,
+ 0x08B, 0x0002E180,
+ 0x08C, 0x00093C3C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xA0000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0xA0000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00001000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00028000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00030023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00028623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00021633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0001C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00010293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00009593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0000118B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0000078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x000AC000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00040000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0004C000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00070023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00068623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00061633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0005C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00050293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00049593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0004138B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0004078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0008C000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00060000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00004000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B0023,
+ 0x80000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A8623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A1633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0009C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00090293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00089593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0008118B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0008078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x03B, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000801,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00040000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001801,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000003,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000003,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001001,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00080000,
+ 0x80000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001802,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000800,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001002,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x0EF, 0x00000100,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A1AD,
+ 0x034, 0x000491AA,
+ 0x034, 0x000481A7,
+ 0x034, 0x000470AA,
+ 0x034, 0x000460A7,
+ 0x034, 0x00045049,
+ 0x034, 0x00044046,
+ 0x034, 0x00043026,
+ 0x034, 0x00042009,
+ 0x034, 0x00041006,
+ 0x034, 0x00040003,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AF,
+ 0x034, 0x000483AB,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004406A,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AF,
+ 0x034, 0x000483AB,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004406A,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AF,
+ 0x034, 0x000483AB,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004406A,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AF,
+ 0x034, 0x000483AB,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004406A,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3F5,
+ 0x034, 0x000493F2,
+ 0x034, 0x000483B0,
+ 0x034, 0x00047370,
+ 0x034, 0x0004636D,
+ 0x034, 0x0004536A,
+ 0x034, 0x00044349,
+ 0x034, 0x0004316A,
+ 0x034, 0x00042167,
+ 0x034, 0x00041129,
+ 0x034, 0x00040049,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AF,
+ 0x034, 0x000483AB,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004406A,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AF,
+ 0x034, 0x000483AB,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004406A,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0004AFF1,
+ 0x034, 0x00049FEE,
+ 0x034, 0x00048FEB,
+ 0x034, 0x00047FE8,
+ 0x034, 0x00046DEA,
+ 0x034, 0x00045DE7,
+ 0x034, 0x00044CEA,
+ 0x034, 0x00043CE7,
+ 0x034, 0x00042C69,
+ 0x034, 0x00041C66,
+ 0x034, 0x00040C28,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A1AD,
+ 0x034, 0x000291AA,
+ 0x034, 0x000281A7,
+ 0x034, 0x000270AA,
+ 0x034, 0x000260A7,
+ 0x034, 0x00025049,
+ 0x034, 0x00024046,
+ 0x034, 0x00023026,
+ 0x034, 0x00022009,
+ 0x034, 0x00021006,
+ 0x034, 0x00020003,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3F5,
+ 0x034, 0x000293F2,
+ 0x034, 0x000282F1,
+ 0x034, 0x000272B0,
+ 0x034, 0x000262AD,
+ 0x034, 0x000252AA,
+ 0x034, 0x000242A7,
+ 0x034, 0x000230EC,
+ 0x034, 0x000220E9,
+ 0x034, 0x0002106A,
+ 0x034, 0x00020067,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0002AFF1,
+ 0x034, 0x00029FEE,
+ 0x034, 0x00028FEB,
+ 0x034, 0x00027FE8,
+ 0x034, 0x00026DEA,
+ 0x034, 0x00025DE7,
+ 0x034, 0x00024CEA,
+ 0x034, 0x00023CE7,
+ 0x034, 0x00022C69,
+ 0x034, 0x00021C66,
+ 0x034, 0x00020C28,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EC,
+ 0x034, 0x0000938C,
+ 0x034, 0x000081AD,
+ 0x034, 0x000071AA,
+ 0x034, 0x000061A7,
+ 0x034, 0x000050AA,
+ 0x034, 0x000040A7,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x0000100C,
+ 0x034, 0x00000009,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3F4,
+ 0x034, 0x000093F1,
+ 0x034, 0x000082B1,
+ 0x034, 0x000071D1,
+ 0x034, 0x000061CE,
+ 0x034, 0x000051CB,
+ 0x034, 0x000041C8,
+ 0x034, 0x000030CB,
+ 0x034, 0x000020C8,
+ 0x034, 0x00001087,
+ 0x034, 0x00000084,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000AFF1,
+ 0x034, 0x00009FEE,
+ 0x034, 0x00008FEB,
+ 0x034, 0x00007FE8,
+ 0x034, 0x00006DEA,
+ 0x034, 0x00005DE7,
+ 0x034, 0x00004CEA,
+ 0x034, 0x00003CE7,
+ 0x034, 0x00002C69,
+ 0x034, 0x00001C66,
+ 0x034, 0x00000C28,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA1AD,
+ 0x034, 0x000C91AA,
+ 0x034, 0x000C81A7,
+ 0x034, 0x000C70AA,
+ 0x034, 0x000C60A7,
+ 0x034, 0x000C5049,
+ 0x034, 0x000C4046,
+ 0x034, 0x000C3026,
+ 0x034, 0x000C2009,
+ 0x034, 0x000C1006,
+ 0x034, 0x000C0003,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AF,
+ 0x034, 0x000C83AB,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C406A,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AF,
+ 0x034, 0x000C83AB,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C406A,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AF,
+ 0x034, 0x000C83AB,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C406A,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AF,
+ 0x034, 0x000C83AB,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C406A,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3F5,
+ 0x034, 0x000C93F2,
+ 0x034, 0x000C83B0,
+ 0x034, 0x000C7370,
+ 0x034, 0x000C636D,
+ 0x034, 0x000C536A,
+ 0x034, 0x000C4349,
+ 0x034, 0x000C316A,
+ 0x034, 0x000C2167,
+ 0x034, 0x000C1129,
+ 0x034, 0x000C0049,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AF,
+ 0x034, 0x000C83AB,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C406A,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AF,
+ 0x034, 0x000C83AB,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C406A,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000CA794,
+ 0x034, 0x000C9791,
+ 0x034, 0x000C878E,
+ 0x034, 0x000C778B,
+ 0x034, 0x000C658D,
+ 0x034, 0x000C558A,
+ 0x034, 0x000C448D,
+ 0x034, 0x000C348A,
+ 0x034, 0x000C244C,
+ 0x034, 0x000C1449,
+ 0x034, 0x000C042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA1AD,
+ 0x034, 0x000A91AA,
+ 0x034, 0x000A81A7,
+ 0x034, 0x000A70AA,
+ 0x034, 0x000A60A7,
+ 0x034, 0x000A5049,
+ 0x034, 0x000A4046,
+ 0x034, 0x000A3026,
+ 0x034, 0x000A2009,
+ 0x034, 0x000A1006,
+ 0x034, 0x000A0003,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3F5,
+ 0x034, 0x000A93F2,
+ 0x034, 0x000A82F1,
+ 0x034, 0x000A72B0,
+ 0x034, 0x000A62AD,
+ 0x034, 0x000A52AA,
+ 0x034, 0x000A42A7,
+ 0x034, 0x000A30EC,
+ 0x034, 0x000A20E9,
+ 0x034, 0x000A106A,
+ 0x034, 0x000A0067,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000AA794,
+ 0x034, 0x000A9791,
+ 0x034, 0x000A878E,
+ 0x034, 0x000A778B,
+ 0x034, 0x000A658D,
+ 0x034, 0x000A558A,
+ 0x034, 0x000A448D,
+ 0x034, 0x000A348A,
+ 0x034, 0x000A244C,
+ 0x034, 0x000A1449,
+ 0x034, 0x000A042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EC,
+ 0x034, 0x0008938C,
+ 0x034, 0x000881AD,
+ 0x034, 0x000871AA,
+ 0x034, 0x000861A7,
+ 0x034, 0x000850AA,
+ 0x034, 0x000840A7,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x0008100C,
+ 0x034, 0x00080009,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3F4,
+ 0x034, 0x000893F1,
+ 0x034, 0x000882B1,
+ 0x034, 0x000871D1,
+ 0x034, 0x000861CE,
+ 0x034, 0x000851CB,
+ 0x034, 0x000841C8,
+ 0x034, 0x000830CB,
+ 0x034, 0x000820C8,
+ 0x034, 0x00081087,
+ 0x034, 0x00080084,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008A794,
+ 0x034, 0x00089791,
+ 0x034, 0x0008878E,
+ 0x034, 0x0008778B,
+ 0x034, 0x0008658D,
+ 0x034, 0x0008558A,
+ 0x034, 0x0008448D,
+ 0x034, 0x0008348A,
+ 0x034, 0x0008244C,
+ 0x034, 0x00081449,
+ 0x034, 0x0008042B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0xA0000000, 0x00000000,
+ 0x035, 0x00000747,
+ 0x035, 0x00008747,
+ 0x035, 0x00010747,
+ 0x035, 0x00020747,
+ 0x035, 0x00028747,
+ 0x035, 0x00030747,
+ 0x035, 0x00040747,
+ 0x035, 0x00048747,
+ 0x035, 0x00050747,
+ 0x035, 0x000805FB,
+ 0x035, 0x000885FB,
+ 0x035, 0x000905FB,
+ 0x035, 0x000A05FB,
+ 0x035, 0x000A85FB,
+ 0x035, 0x000B05FB,
+ 0x035, 0x000C05FB,
+ 0x035, 0x000C85FB,
+ 0x035, 0x000D05FB,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000473,
+ 0x036, 0x00008473,
+ 0x036, 0x00010473,
+ 0x036, 0x00020473,
+ 0x036, 0x00028473,
+ 0x036, 0x00030473,
+ 0x036, 0x00040473,
+ 0x036, 0x00048473,
+ 0x036, 0x00050473,
+ 0x036, 0x00080473,
+ 0x036, 0x00088473,
+ 0x036, 0x00090473,
+ 0x036, 0x000A0473,
+ 0x036, 0x000A8473,
+ 0x036, 0x000B0473,
+ 0x036, 0x000C0473,
+ 0x036, 0x000C8473,
+ 0x036, 0x000D0473,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0xA0000000, 0x00000000,
+ 0x036, 0x00000473,
+ 0x036, 0x00008473,
+ 0x036, 0x00010473,
+ 0x036, 0x00020473,
+ 0x036, 0x00028473,
+ 0x036, 0x00030473,
+ 0x036, 0x00040473,
+ 0x036, 0x00048473,
+ 0x036, 0x00050473,
+ 0x036, 0x00080473,
+ 0x036, 0x00088473,
+ 0x036, 0x00090473,
+ 0x036, 0x000A0473,
+ 0x036, 0x000A8473,
+ 0x036, 0x000B0473,
+ 0x036, 0x000C0473,
+ 0x036, 0x000C8473,
+ 0x036, 0x000D0473,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x037, 0x00000000,
+ 0x038, 0x00005146,
+ 0x037, 0x00004000,
+ 0x038, 0x00005146,
+ 0x037, 0x00008000,
+ 0x038, 0x00005146,
+ 0x037, 0x00010000,
+ 0x038, 0x00005146,
+ 0x037, 0x00014000,
+ 0x038, 0x00005146,
+ 0x037, 0x00018000,
+ 0x038, 0x00004D4E,
+ 0x037, 0x0001C000,
+ 0x038, 0x00004D4E,
+ 0x037, 0x00020000,
+ 0x038, 0x00004D4E,
+ 0x037, 0x00024000,
+ 0x038, 0x000071C6,
+ 0x037, 0x00028000,
+ 0x038, 0x000071C6,
+ 0x037, 0x0002C000,
+ 0x038, 0x000071C6,
+ 0x037, 0x00030000,
+ 0x038, 0x000071CE,
+ 0x037, 0x00034000,
+ 0x038, 0x000071CE,
+ 0x037, 0x00038000,
+ 0x038, 0x00005126,
+ 0x037, 0x0003C000,
+ 0x038, 0x00005126,
+ 0x037, 0x00040000,
+ 0x038, 0x00005126,
+ 0x037, 0x00044000,
+ 0x038, 0x00005126,
+ 0x037, 0x00048000,
+ 0x038, 0x00005126,
+ 0x037, 0x00080000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00084000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00088000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00090000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00094000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00098000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x0009C000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000AC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000BC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C8000,
+ 0x038, 0x00005ECE,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000007D,
+ 0x03C, 0x0000047D,
+ 0x03C, 0x0000087D,
+ 0x03C, 0x0000107D,
+ 0x03C, 0x0000147D,
+ 0x03C, 0x0000187D,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000037E,
+ 0x03C, 0x00000575,
+ 0x03C, 0x00000971,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001575,
+ 0x03C, 0x00001871,
+ 0x03C, 0x0000217E,
+ 0x03C, 0x00002575,
+ 0x03C, 0x00002871,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x061, 0x000C0D47,
+ 0x062, 0x0000133C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0xA0000000, 0x00000000,
+ 0x063, 0x0007D0E7,
+ 0xB0000000, 0x00000000,
+ 0x064, 0x00014FEC,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0xA0000000, 0x00000000,
+ 0x065, 0x000933FF,
+ 0xB0000000, 0x00000000,
+ 0x066, 0x00000040,
+ 0x057, 0x00050000,
+ 0x056, 0x00051DF0,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0xA0000000, 0x00000000,
+ 0x055, 0x00082060,
+ 0xB0000000, 0x00000000,
+ 0x01C, 0x000739D2,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x01F, 0x0002255C,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x01F, 0x0002255C,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x01F, 0x0002255C,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x01F, 0x0002255C,
+ 0xA0000000, 0x00000000,
+ 0x01F, 0x0002255C,
+ 0xB0000000, 0x00000000,
+ 0x0B1, 0x0007FF48,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0xA0000000, 0x00000000,
+ 0x0C4, 0x00083F00,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001B126,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0x018, 0x00013126,
+ 0x018, 0x00013124,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8814a_rf_a, A);
+
+static const u32 rtw8814a_rf_b[] = {
+ 0x018, 0x00013124,
+ 0x040, 0x00000C00,
+ 0x058, 0x00000F98,
+ 0x07F, 0x00068004,
+ 0x018, 0x00000006,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0xA0000000, 0x00000000,
+ 0x086, 0x000E4B58,
+ 0x087, 0x00049F80,
+ 0xB0000000, 0x00000000,
+ 0x0DF, 0x00000008,
+ 0x0EF, 0x00002000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F39B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F39B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0003F258,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000ADF6,
+ 0x034, 0x00009DF3,
+ 0x034, 0x00008DF0,
+ 0x034, 0x00007DED,
+ 0x034, 0x00006DEA,
+ 0x034, 0x00005CED,
+ 0x034, 0x00004CEA,
+ 0x034, 0x000034EA,
+ 0x034, 0x000024E7,
+ 0x034, 0x0000146A,
+ 0x034, 0x0000006B,
+ 0xB0000000, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008ADF6,
+ 0x034, 0x00089DF3,
+ 0x034, 0x00088DF0,
+ 0x034, 0x00087DED,
+ 0x034, 0x00086DEA,
+ 0x034, 0x00085CED,
+ 0x034, 0x00084CEA,
+ 0x034, 0x000834EA,
+ 0x034, 0x000824E7,
+ 0x034, 0x0008146A,
+ 0x034, 0x0008006B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000192,
+ 0x035, 0x00008192,
+ 0x035, 0x00010192,
+ 0x036, 0x00000024,
+ 0x036, 0x00008024,
+ 0x036, 0x00010024,
+ 0x036, 0x00018024,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C21,
+ 0x052, 0x000006D9,
+ 0x053, 0x000FC649,
+ 0x054, 0x0000017E,
+ 0x018, 0x0001012A,
+ 0x081, 0x0007FC00,
+ 0x089, 0x00050110,
+ 0x08A, 0x00043E50,
+ 0x08B, 0x0002E180,
+ 0x08C, 0x00093C3C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xA0000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xB0000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x0EF, 0x00001000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00040000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00030023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00028623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00021633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0001C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00010293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00009593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x00000F8B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0000078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00060000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00070023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00068623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00061633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0005C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00050293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00049593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x03B, 0x0004078B,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00060000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0004C000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0004C000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00004000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B0023,
+ 0x80000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A8623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A1633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0009C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00090293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00089593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0008138B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0008078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x03B, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00040000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000800,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000800,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00080000,
+ 0x80000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001802,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001802,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001002,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x0EF, 0x00000100,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38C,
+ 0x034, 0x000491AD,
+ 0x034, 0x000481AA,
+ 0x034, 0x000471A7,
+ 0x034, 0x000460AA,
+ 0x034, 0x000450A7,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x0004200C,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38C,
+ 0x034, 0x00049389,
+ 0x034, 0x0004816D,
+ 0x034, 0x0004716A,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38B,
+ 0x034, 0x00049388,
+ 0x034, 0x0004818B,
+ 0x034, 0x00047188,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38C,
+ 0x034, 0x00049389,
+ 0x034, 0x0004816D,
+ 0x034, 0x0004716A,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38B,
+ 0x034, 0x00049388,
+ 0x034, 0x0004818B,
+ 0x034, 0x00047188,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3F5,
+ 0x034, 0x000493F3,
+ 0x034, 0x000483B2,
+ 0x034, 0x00047390,
+ 0x034, 0x0004638D,
+ 0x034, 0x0004538A,
+ 0x034, 0x00044387,
+ 0x034, 0x0004324A,
+ 0x034, 0x00042247,
+ 0x034, 0x0004104D,
+ 0x034, 0x0004004A,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004AFF7,
+ 0x034, 0x00049FF6,
+ 0x034, 0x00048FF3,
+ 0x034, 0x00047FF0,
+ 0x034, 0x00046FED,
+ 0x034, 0x00045FEA,
+ 0x034, 0x00044FE7,
+ 0x034, 0x00043DEA,
+ 0x034, 0x00042DE7,
+ 0x034, 0x00041DE4,
+ 0x034, 0x00040CE7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38C,
+ 0x034, 0x00049389,
+ 0x034, 0x0004816D,
+ 0x034, 0x0004716A,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38C,
+ 0x034, 0x00049389,
+ 0x034, 0x0004816D,
+ 0x034, 0x0004716A,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0004AFF4,
+ 0x034, 0x00049FF1,
+ 0x034, 0x00048FEE,
+ 0x034, 0x00047FEB,
+ 0x034, 0x00046FE8,
+ 0x034, 0x00045DEA,
+ 0x034, 0x00044CED,
+ 0x034, 0x00043CEA,
+ 0x034, 0x00042C6C,
+ 0x034, 0x00041C69,
+ 0x034, 0x00040C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A38C,
+ 0x034, 0x000291AD,
+ 0x034, 0x000281AA,
+ 0x034, 0x000271A7,
+ 0x034, 0x000260AA,
+ 0x034, 0x000250A7,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x0002200C,
+ 0x034, 0x00021009,
+ 0x034, 0x00020006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EE,
+ 0x034, 0x000293AC,
+ 0x034, 0x00028389,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AD,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EE,
+ 0x034, 0x000293AC,
+ 0x034, 0x00028389,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AD,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3F5,
+ 0x034, 0x000293F3,
+ 0x034, 0x000283D0,
+ 0x034, 0x00027371,
+ 0x034, 0x0002636E,
+ 0x034, 0x0002536B,
+ 0x034, 0x00024368,
+ 0x034, 0x0002332A,
+ 0x034, 0x00022327,
+ 0x034, 0x0002104C,
+ 0x034, 0x00020049,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002AFF7,
+ 0x034, 0x00029FF6,
+ 0x034, 0x00028FF3,
+ 0x034, 0x00027FF0,
+ 0x034, 0x00026FED,
+ 0x034, 0x00025FEA,
+ 0x034, 0x00024FE7,
+ 0x034, 0x00023DEA,
+ 0x034, 0x00022DE7,
+ 0x034, 0x00021DE4,
+ 0x034, 0x00020F25,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EE,
+ 0x034, 0x000293AC,
+ 0x034, 0x00028389,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EE,
+ 0x034, 0x000293AC,
+ 0x034, 0x00028389,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0002AFF4,
+ 0x034, 0x00029FF1,
+ 0x034, 0x00028FEE,
+ 0x034, 0x00027FEB,
+ 0x034, 0x00026FE8,
+ 0x034, 0x00025DEA,
+ 0x034, 0x00024CED,
+ 0x034, 0x00023CEA,
+ 0x034, 0x00022C6C,
+ 0x034, 0x00021C69,
+ 0x034, 0x00020C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A38C,
+ 0x034, 0x000091AD,
+ 0x034, 0x000081AA,
+ 0x034, 0x000071A7,
+ 0x034, 0x000060AA,
+ 0x034, 0x000050A7,
+ 0x034, 0x0000402C,
+ 0x034, 0x00003029,
+ 0x034, 0x00002026,
+ 0x034, 0x00001009,
+ 0x034, 0x00000006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EC,
+ 0x034, 0x000093AC,
+ 0x034, 0x000081EC,
+ 0x034, 0x0000716D,
+ 0x034, 0x0000616A,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000404C,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EF,
+ 0x034, 0x000093AD,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EC,
+ 0x034, 0x000093AC,
+ 0x034, 0x000081EC,
+ 0x034, 0x0000716D,
+ 0x034, 0x0000616A,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000404C,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EF,
+ 0x034, 0x000093AD,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3F4,
+ 0x034, 0x000093F0,
+ 0x034, 0x000083AE,
+ 0x034, 0x00007350,
+ 0x034, 0x0000634D,
+ 0x034, 0x0000534A,
+ 0x034, 0x00004347,
+ 0x034, 0x0000312D,
+ 0x034, 0x0000212A,
+ 0x034, 0x00001127,
+ 0x034, 0x0000002A,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000AFF7,
+ 0x034, 0x00009FF4,
+ 0x034, 0x00008FF1,
+ 0x034, 0x00007FEE,
+ 0x034, 0x00006FEB,
+ 0x034, 0x00005FE8,
+ 0x034, 0x00004DEB,
+ 0x034, 0x00003DE8,
+ 0x034, 0x00002DE5,
+ 0x034, 0x00001C8B,
+ 0x034, 0x00000C88,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EC,
+ 0x034, 0x000093AC,
+ 0x034, 0x000081EC,
+ 0x034, 0x0000716D,
+ 0x034, 0x0000616A,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000404C,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EC,
+ 0x034, 0x000093AC,
+ 0x034, 0x000081EC,
+ 0x034, 0x0000716D,
+ 0x034, 0x0000616A,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000404C,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000AFF4,
+ 0x034, 0x00009FF1,
+ 0x034, 0x00008FEE,
+ 0x034, 0x00007FEB,
+ 0x034, 0x00006FE8,
+ 0x034, 0x00005DEA,
+ 0x034, 0x00004CED,
+ 0x034, 0x00003CEA,
+ 0x034, 0x00002C6C,
+ 0x034, 0x00001C69,
+ 0x034, 0x00000C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38C,
+ 0x034, 0x000C91AD,
+ 0x034, 0x000C81AA,
+ 0x034, 0x000C71A7,
+ 0x034, 0x000C60AA,
+ 0x034, 0x000C50A7,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C200C,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38C,
+ 0x034, 0x000C9389,
+ 0x034, 0x000C816D,
+ 0x034, 0x000C716A,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38B,
+ 0x034, 0x000C9388,
+ 0x034, 0x000C818B,
+ 0x034, 0x000C7188,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38C,
+ 0x034, 0x000C9389,
+ 0x034, 0x000C816D,
+ 0x034, 0x000C716A,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38B,
+ 0x034, 0x000C9388,
+ 0x034, 0x000C818B,
+ 0x034, 0x000C7188,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3F5,
+ 0x034, 0x000C93F3,
+ 0x034, 0x000C83B2,
+ 0x034, 0x000C7390,
+ 0x034, 0x000C638D,
+ 0x034, 0x000C538A,
+ 0x034, 0x000C4387,
+ 0x034, 0x000C324A,
+ 0x034, 0x000C2247,
+ 0x034, 0x000C104D,
+ 0x034, 0x000C004A,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CAFF7,
+ 0x034, 0x000C9FF6,
+ 0x034, 0x000C8FF3,
+ 0x034, 0x000C7FF0,
+ 0x034, 0x000C6FED,
+ 0x034, 0x000C5FEA,
+ 0x034, 0x000C4FE7,
+ 0x034, 0x000C3DEA,
+ 0x034, 0x000C2DE7,
+ 0x034, 0x000C1DE4,
+ 0x034, 0x000C0CE7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38C,
+ 0x034, 0x000C9389,
+ 0x034, 0x000C816D,
+ 0x034, 0x000C716A,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38C,
+ 0x034, 0x000C9389,
+ 0x034, 0x000C816D,
+ 0x034, 0x000C716A,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000CA794,
+ 0x034, 0x000C9791,
+ 0x034, 0x000C878E,
+ 0x034, 0x000C778B,
+ 0x034, 0x000C658D,
+ 0x034, 0x000C558A,
+ 0x034, 0x000C448D,
+ 0x034, 0x000C348A,
+ 0x034, 0x000C244C,
+ 0x034, 0x000C1449,
+ 0x034, 0x000C042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA38C,
+ 0x034, 0x000A91AD,
+ 0x034, 0x000A81AA,
+ 0x034, 0x000A71A7,
+ 0x034, 0x000A60AA,
+ 0x034, 0x000A50A7,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A200C,
+ 0x034, 0x000A1009,
+ 0x034, 0x000A0006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EE,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A8389,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AD,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EE,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A8389,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AD,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3F5,
+ 0x034, 0x000A93F3,
+ 0x034, 0x000A83D0,
+ 0x034, 0x000A7371,
+ 0x034, 0x000A636E,
+ 0x034, 0x000A536B,
+ 0x034, 0x000A4368,
+ 0x034, 0x000A332A,
+ 0x034, 0x000A2327,
+ 0x034, 0x000A104C,
+ 0x034, 0x000A0049,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AAFF7,
+ 0x034, 0x000A9FF6,
+ 0x034, 0x000A8FF3,
+ 0x034, 0x000A7FF0,
+ 0x034, 0x000A6FED,
+ 0x034, 0x000A5FEA,
+ 0x034, 0x000A4FE7,
+ 0x034, 0x000A3DEA,
+ 0x034, 0x000A2DE7,
+ 0x034, 0x000A1DE4,
+ 0x034, 0x000A0F25,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EE,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A8389,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EE,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A8389,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000AA794,
+ 0x034, 0x000A9791,
+ 0x034, 0x000A878E,
+ 0x034, 0x000A778B,
+ 0x034, 0x000A658D,
+ 0x034, 0x000A558A,
+ 0x034, 0x000A448D,
+ 0x034, 0x000A348A,
+ 0x034, 0x000A244C,
+ 0x034, 0x000A1449,
+ 0x034, 0x000A042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A38C,
+ 0x034, 0x000891AD,
+ 0x034, 0x000881AA,
+ 0x034, 0x000871A7,
+ 0x034, 0x000860AA,
+ 0x034, 0x000850A7,
+ 0x034, 0x0008402C,
+ 0x034, 0x00083029,
+ 0x034, 0x00082026,
+ 0x034, 0x00081009,
+ 0x034, 0x00080006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EC,
+ 0x034, 0x000893AC,
+ 0x034, 0x000881EC,
+ 0x034, 0x0008716D,
+ 0x034, 0x0008616A,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008404C,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EF,
+ 0x034, 0x000893AD,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EC,
+ 0x034, 0x000893AC,
+ 0x034, 0x000881EC,
+ 0x034, 0x0008716D,
+ 0x034, 0x0008616A,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008404C,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EF,
+ 0x034, 0x000893AD,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3F4,
+ 0x034, 0x000893F0,
+ 0x034, 0x000883AE,
+ 0x034, 0x00087350,
+ 0x034, 0x0008634D,
+ 0x034, 0x0008534A,
+ 0x034, 0x00084347,
+ 0x034, 0x0008312D,
+ 0x034, 0x0008212A,
+ 0x034, 0x00081127,
+ 0x034, 0x0008002A,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008AFF7,
+ 0x034, 0x00089FF4,
+ 0x034, 0x00088FF1,
+ 0x034, 0x00087FEE,
+ 0x034, 0x00086FEB,
+ 0x034, 0x00085FE8,
+ 0x034, 0x00084DEB,
+ 0x034, 0x00083DE8,
+ 0x034, 0x00082DE5,
+ 0x034, 0x00081C8B,
+ 0x034, 0x00080C88,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EC,
+ 0x034, 0x000893AC,
+ 0x034, 0x000881EC,
+ 0x034, 0x0008716D,
+ 0x034, 0x0008616A,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008404C,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EC,
+ 0x034, 0x000893AC,
+ 0x034, 0x000881EC,
+ 0x034, 0x0008716D,
+ 0x034, 0x0008616A,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008404C,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008A794,
+ 0x034, 0x00089791,
+ 0x034, 0x0008878E,
+ 0x034, 0x0008778B,
+ 0x034, 0x0008658D,
+ 0x034, 0x0008558A,
+ 0x034, 0x0008448D,
+ 0x034, 0x0008348A,
+ 0x034, 0x0008244C,
+ 0x034, 0x00081449,
+ 0x034, 0x0008042B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x0DF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0xA0000000, 0x00000000,
+ 0x035, 0x00000484,
+ 0x035, 0x00008484,
+ 0x035, 0x00010484,
+ 0x035, 0x00020584,
+ 0x035, 0x00028584,
+ 0x035, 0x00030584,
+ 0x035, 0x00040584,
+ 0x035, 0x00048584,
+ 0x035, 0x00050584,
+ 0x035, 0x000805FB,
+ 0x035, 0x000885FB,
+ 0x035, 0x000905FB,
+ 0x035, 0x000A05FB,
+ 0x035, 0x000A85FB,
+ 0x035, 0x000B05FB,
+ 0x035, 0x000C05FB,
+ 0x035, 0x000C85FB,
+ 0x035, 0x000D05FB,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x0DF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000473,
+ 0x036, 0x00008473,
+ 0x036, 0x00010473,
+ 0x036, 0x00020473,
+ 0x036, 0x00028473,
+ 0x036, 0x00030473,
+ 0x036, 0x00040473,
+ 0x036, 0x00048473,
+ 0x036, 0x00050473,
+ 0x036, 0x00080473,
+ 0x036, 0x00088473,
+ 0x036, 0x00090473,
+ 0x036, 0x000A0473,
+ 0x036, 0x000A8473,
+ 0x036, 0x000B0473,
+ 0x036, 0x000C0473,
+ 0x036, 0x000C8473,
+ 0x036, 0x000D0473,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0xA0000000, 0x00000000,
+ 0x036, 0x00000474,
+ 0x036, 0x00008474,
+ 0x036, 0x00010474,
+ 0x036, 0x00020474,
+ 0x036, 0x00028474,
+ 0x036, 0x00030474,
+ 0x036, 0x00040474,
+ 0x036, 0x00048474,
+ 0x036, 0x00050474,
+ 0x036, 0x00080474,
+ 0x036, 0x00088474,
+ 0x036, 0x00090474,
+ 0x036, 0x000A0474,
+ 0x036, 0x000A8474,
+ 0x036, 0x000B0474,
+ 0x036, 0x000C0474,
+ 0x036, 0x000C8474,
+ 0x036, 0x000D0474,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x037, 0x00000000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00004000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00008000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00010000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00014000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00018000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0001C000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00020000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00024000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00028000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0002C000,
+ 0x038, 0x0000714E,
+ 0x037, 0x00030000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00034000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00038000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0003C000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00040000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00044000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00048000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00080000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00084000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00088000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00090000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00094000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00098000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x0009C000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000AC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000BC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C8000,
+ 0x038, 0x00005ECE,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000007D,
+ 0x03C, 0x0000047D,
+ 0x03C, 0x0000087D,
+ 0x03C, 0x0000107D,
+ 0x03C, 0x0000147D,
+ 0x03C, 0x0000187D,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000037E,
+ 0x03C, 0x00000575,
+ 0x03C, 0x00000971,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001575,
+ 0x03C, 0x00001871,
+ 0x03C, 0x0000217E,
+ 0x03C, 0x00002575,
+ 0x03C, 0x00002871,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x061, 0x000C0D47,
+ 0x062, 0x0000133C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0xA0000000, 0x00000000,
+ 0x063, 0x0007D0E7,
+ 0xB0000000, 0x00000000,
+ 0x064, 0x00014FEC,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0xA0000000, 0x00000000,
+ 0x065, 0x000923FF,
+ 0xB0000000, 0x00000000,
+ 0x066, 0x00000040,
+ 0x057, 0x00050000,
+ 0x056, 0x00051DF0,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x055, 0x00082060,
+ 0xB0000000, 0x00000000,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8814a_rf_b, B);
+
+static const u32 rtw8814a_rf_c[] = {
+ 0x018, 0x00013124,
+ 0x040, 0x00000C00,
+ 0x058, 0x00000F98,
+ 0x07F, 0x00068004,
+ 0x018, 0x00000006,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0xA0000000, 0x00000000,
+ 0x086, 0x000E4B58,
+ 0x087, 0x00049F80,
+ 0xB0000000, 0x00000000,
+ 0x0DF, 0x00000008,
+ 0x0EF, 0x00002000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0003F258,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000ADF6,
+ 0x034, 0x00009DF3,
+ 0x034, 0x00008DF0,
+ 0x034, 0x00007DED,
+ 0x034, 0x00006DEA,
+ 0x034, 0x00005CED,
+ 0x034, 0x00004CEA,
+ 0x034, 0x000034EA,
+ 0x034, 0x000024E7,
+ 0x034, 0x0000146A,
+ 0x034, 0x0000006B,
+ 0xB0000000, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008ADF6,
+ 0x034, 0x00089DF3,
+ 0x034, 0x00088DF0,
+ 0x034, 0x00087DED,
+ 0x034, 0x00086DEA,
+ 0x034, 0x00085CED,
+ 0x034, 0x00084CEA,
+ 0x034, 0x000834EA,
+ 0x034, 0x000824E7,
+ 0x034, 0x0008146A,
+ 0x034, 0x0008006B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000192,
+ 0x035, 0x00008192,
+ 0x035, 0x00010192,
+ 0x036, 0x00000024,
+ 0x036, 0x00008024,
+ 0x036, 0x00010024,
+ 0x036, 0x00018024,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C21,
+ 0x052, 0x000006D9,
+ 0x053, 0x000FC649,
+ 0x054, 0x0000017E,
+ 0x018, 0x0001012A,
+ 0x081, 0x0007FC00,
+ 0x089, 0x00050110,
+ 0x08A, 0x00043E50,
+ 0x08B, 0x0002E180,
+ 0x08C, 0x00093C3C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xA0000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0xA0000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00001000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0006C000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x000D4000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00080000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0006C000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0008C000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00004000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x000A0000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00030023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00028623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00021633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0001C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00010293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00009593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0000118B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0000078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0004C000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00080000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0004C000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x000D0000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00080000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00080000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00028000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00070023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00068623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00061633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0005C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00050293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00049593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x03B, 0x0004078B,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00060000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00080000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B0023,
+ 0x80000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A8623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A1633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0009C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00090293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00089593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0008128B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0008078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x03B, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001803,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001803,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00040000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000800,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00080000,
+ 0x80000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001802,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001802,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001002,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0xA0000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38C,
+ 0x034, 0x000491AD,
+ 0x034, 0x000481AA,
+ 0x034, 0x000471A7,
+ 0x034, 0x000460AA,
+ 0x034, 0x000450A7,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x0004200C,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AD,
+ 0x034, 0x0004838A,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004404C,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AD,
+ 0x034, 0x0004838A,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004404C,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AD,
+ 0x034, 0x0004838A,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004404C,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AD,
+ 0x034, 0x0004838A,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004404C,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3F5,
+ 0x034, 0x000493F3,
+ 0x034, 0x00048393,
+ 0x034, 0x00047390,
+ 0x034, 0x0004638D,
+ 0x034, 0x0004538A,
+ 0x034, 0x00044387,
+ 0x034, 0x000430ED,
+ 0x034, 0x000420EA,
+ 0x034, 0x000410E7,
+ 0x034, 0x0004002D,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004AFF7,
+ 0x034, 0x00049FF6,
+ 0x034, 0x00048FF3,
+ 0x034, 0x00047FF0,
+ 0x034, 0x00046FED,
+ 0x034, 0x00045FEA,
+ 0x034, 0x00044FE7,
+ 0x034, 0x00043CD0,
+ 0x034, 0x00042CCD,
+ 0x034, 0x00041CCA,
+ 0x034, 0x00040CC7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AD,
+ 0x034, 0x0004838A,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004404C,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AD,
+ 0x034, 0x0004838A,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004404C,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0004AFF4,
+ 0x034, 0x00049FF1,
+ 0x034, 0x00048FEE,
+ 0x034, 0x00047FEB,
+ 0x034, 0x00046FE8,
+ 0x034, 0x00045DEA,
+ 0x034, 0x00044CED,
+ 0x034, 0x00043CEA,
+ 0x034, 0x00042C6C,
+ 0x034, 0x00041C69,
+ 0x034, 0x00040C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x0002938C,
+ 0x034, 0x000281AD,
+ 0x034, 0x000271AA,
+ 0x034, 0x000261A7,
+ 0x034, 0x000250AA,
+ 0x034, 0x000240A7,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x0002100C,
+ 0x034, 0x00020009,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x0002936D,
+ 0x034, 0x0002836A,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x0002936D,
+ 0x034, 0x0002836A,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3F5,
+ 0x034, 0x000293F3,
+ 0x034, 0x000282F2,
+ 0x034, 0x000272D0,
+ 0x034, 0x000262CD,
+ 0x034, 0x000252CA,
+ 0x034, 0x000242C7,
+ 0x034, 0x000230CD,
+ 0x034, 0x000220CA,
+ 0x034, 0x000210C7,
+ 0x034, 0x00020086,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002AFF7,
+ 0x034, 0x00029FF6,
+ 0x034, 0x00028FF3,
+ 0x034, 0x00027FF0,
+ 0x034, 0x00026FED,
+ 0x034, 0x00025FEA,
+ 0x034, 0x00024FE7,
+ 0x034, 0x00023DEA,
+ 0x034, 0x00022DE7,
+ 0x034, 0x00021DE4,
+ 0x034, 0x00020E44,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x0002936D,
+ 0x034, 0x0002836A,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x0002936D,
+ 0x034, 0x0002836A,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0002AFF4,
+ 0x034, 0x00029FF1,
+ 0x034, 0x00028FEE,
+ 0x034, 0x00027FEB,
+ 0x034, 0x00026FE8,
+ 0x034, 0x00025DEA,
+ 0x034, 0x00024CED,
+ 0x034, 0x00023CEA,
+ 0x034, 0x00022C6C,
+ 0x034, 0x00021C69,
+ 0x034, 0x00020C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A38C,
+ 0x034, 0x000091AD,
+ 0x034, 0x000081AA,
+ 0x034, 0x000071A7,
+ 0x034, 0x000060AA,
+ 0x034, 0x000050A7,
+ 0x034, 0x0000402C,
+ 0x034, 0x00003029,
+ 0x034, 0x0000200C,
+ 0x034, 0x00001009,
+ 0x034, 0x00000006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AB,
+ 0x034, 0x00008389,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AB,
+ 0x034, 0x00008389,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AB,
+ 0x034, 0x00008389,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AB,
+ 0x034, 0x00008389,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3F5,
+ 0x034, 0x000093F1,
+ 0x034, 0x000083B0,
+ 0x034, 0x00007370,
+ 0x034, 0x0000636D,
+ 0x034, 0x0000536A,
+ 0x034, 0x00004367,
+ 0x034, 0x0000308E,
+ 0x034, 0x0000208B,
+ 0x034, 0x00001088,
+ 0x034, 0x00000085,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000AFF7,
+ 0x034, 0x00009FF5,
+ 0x034, 0x00008FF2,
+ 0x034, 0x00007FEF,
+ 0x034, 0x00006FEC,
+ 0x034, 0x00005FE9,
+ 0x034, 0x00004EAA,
+ 0x034, 0x00003EA7,
+ 0x034, 0x00002C70,
+ 0x034, 0x00001C6D,
+ 0x034, 0x00000C6A,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AB,
+ 0x034, 0x00008389,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AB,
+ 0x034, 0x00008389,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000AFF4,
+ 0x034, 0x00009FF1,
+ 0x034, 0x00008FEE,
+ 0x034, 0x00007FEB,
+ 0x034, 0x00006FE8,
+ 0x034, 0x00005DEA,
+ 0x034, 0x00004CED,
+ 0x034, 0x00003CEA,
+ 0x034, 0x00002C6C,
+ 0x034, 0x00001C69,
+ 0x034, 0x00000C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38C,
+ 0x034, 0x000C91AD,
+ 0x034, 0x000C81AA,
+ 0x034, 0x000C71A7,
+ 0x034, 0x000C60AA,
+ 0x034, 0x000C50A7,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C200C,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AD,
+ 0x034, 0x000C838A,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C404C,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AD,
+ 0x034, 0x000C838A,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C404C,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AD,
+ 0x034, 0x000C838A,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C404C,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AD,
+ 0x034, 0x000C838A,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C404C,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3F5,
+ 0x034, 0x000C93F3,
+ 0x034, 0x000C8393,
+ 0x034, 0x000C7390,
+ 0x034, 0x000C638D,
+ 0x034, 0x000C538A,
+ 0x034, 0x000C4387,
+ 0x034, 0x000C30ED,
+ 0x034, 0x000C20EA,
+ 0x034, 0x000C10E7,
+ 0x034, 0x000C002D,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CAFF7,
+ 0x034, 0x000C9FF6,
+ 0x034, 0x000C8FF3,
+ 0x034, 0x000C7FF0,
+ 0x034, 0x000C6FED,
+ 0x034, 0x000C5FEA,
+ 0x034, 0x000C4FE7,
+ 0x034, 0x000C3CD0,
+ 0x034, 0x000C2CCD,
+ 0x034, 0x000C1CCA,
+ 0x034, 0x000C0CC7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AD,
+ 0x034, 0x000C838A,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C404C,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AD,
+ 0x034, 0x000C838A,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C404C,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000CA794,
+ 0x034, 0x000C9791,
+ 0x034, 0x000C878E,
+ 0x034, 0x000C778B,
+ 0x034, 0x000C658D,
+ 0x034, 0x000C558A,
+ 0x034, 0x000C448D,
+ 0x034, 0x000C348A,
+ 0x034, 0x000C244C,
+ 0x034, 0x000C1449,
+ 0x034, 0x000C042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A938C,
+ 0x034, 0x000A81AD,
+ 0x034, 0x000A71AA,
+ 0x034, 0x000A61A7,
+ 0x034, 0x000A50AA,
+ 0x034, 0x000A40A7,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A100C,
+ 0x034, 0x000A0009,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A936D,
+ 0x034, 0x000A836A,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A936D,
+ 0x034, 0x000A836A,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3F5,
+ 0x034, 0x000A93F3,
+ 0x034, 0x000A82F2,
+ 0x034, 0x000A72D0,
+ 0x034, 0x000A62CD,
+ 0x034, 0x000A52CA,
+ 0x034, 0x000A42C7,
+ 0x034, 0x000A30CD,
+ 0x034, 0x000A20CA,
+ 0x034, 0x000A10C7,
+ 0x034, 0x000A0086,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AAFF7,
+ 0x034, 0x000A9FF6,
+ 0x034, 0x000A8FF3,
+ 0x034, 0x000A7FF0,
+ 0x034, 0x000A6FED,
+ 0x034, 0x000A5FEA,
+ 0x034, 0x000A4FE7,
+ 0x034, 0x000A3DEA,
+ 0x034, 0x000A2DE7,
+ 0x034, 0x000A1DE4,
+ 0x034, 0x000A0E44,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A936D,
+ 0x034, 0x000A836A,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A936D,
+ 0x034, 0x000A836A,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000AA794,
+ 0x034, 0x000A9791,
+ 0x034, 0x000A878E,
+ 0x034, 0x000A778B,
+ 0x034, 0x000A658D,
+ 0x034, 0x000A558A,
+ 0x034, 0x000A448D,
+ 0x034, 0x000A348A,
+ 0x034, 0x000A244C,
+ 0x034, 0x000A1449,
+ 0x034, 0x000A042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A38C,
+ 0x034, 0x000891AD,
+ 0x034, 0x000881AA,
+ 0x034, 0x000871A7,
+ 0x034, 0x000860AA,
+ 0x034, 0x000850A7,
+ 0x034, 0x0008402C,
+ 0x034, 0x00083029,
+ 0x034, 0x0008200C,
+ 0x034, 0x00081009,
+ 0x034, 0x00000006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AB,
+ 0x034, 0x00088389,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AB,
+ 0x034, 0x00088389,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AB,
+ 0x034, 0x00088389,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AB,
+ 0x034, 0x00088389,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3F5,
+ 0x034, 0x000893F1,
+ 0x034, 0x000883B0,
+ 0x034, 0x00087370,
+ 0x034, 0x0008636D,
+ 0x034, 0x0008536A,
+ 0x034, 0x00084367,
+ 0x034, 0x0008308E,
+ 0x034, 0x0008208B,
+ 0x034, 0x00081088,
+ 0x034, 0x00080085,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008AFF7,
+ 0x034, 0x00089FF5,
+ 0x034, 0x00088FF2,
+ 0x034, 0x00087FEF,
+ 0x034, 0x00086FEC,
+ 0x034, 0x00085FE9,
+ 0x034, 0x00084EAA,
+ 0x034, 0x00083EA7,
+ 0x034, 0x00082C70,
+ 0x034, 0x00081C6D,
+ 0x034, 0x00080C6A,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AB,
+ 0x034, 0x00088389,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AB,
+ 0x034, 0x00088389,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008A794,
+ 0x034, 0x00089791,
+ 0x034, 0x0008878E,
+ 0x034, 0x0008778B,
+ 0x034, 0x0008658D,
+ 0x034, 0x0008558A,
+ 0x034, 0x0008448D,
+ 0x034, 0x0008348A,
+ 0x034, 0x0008244C,
+ 0x034, 0x00081449,
+ 0x034, 0x0008042B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x0DF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0xA0000000, 0x00000000,
+ 0x035, 0x00000484,
+ 0x035, 0x00008484,
+ 0x035, 0x00010484,
+ 0x035, 0x00020584,
+ 0x035, 0x00028584,
+ 0x035, 0x00030584,
+ 0x035, 0x00040584,
+ 0x035, 0x00048584,
+ 0x035, 0x00050584,
+ 0x035, 0x000805FB,
+ 0x035, 0x000885FB,
+ 0x035, 0x000905FB,
+ 0x035, 0x000A05FB,
+ 0x035, 0x000A85FB,
+ 0x035, 0x000B05FB,
+ 0x035, 0x000C05FB,
+ 0x035, 0x000C85FB,
+ 0x035, 0x000D05FB,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x0DF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000473,
+ 0x036, 0x00008473,
+ 0x036, 0x00010473,
+ 0x036, 0x00020473,
+ 0x036, 0x00028473,
+ 0x036, 0x00030473,
+ 0x036, 0x00040473,
+ 0x036, 0x00048473,
+ 0x036, 0x00050473,
+ 0x036, 0x00080473,
+ 0x036, 0x00088473,
+ 0x036, 0x00090473,
+ 0x036, 0x000A0473,
+ 0x036, 0x000A8473,
+ 0x036, 0x000B0473,
+ 0x036, 0x000C0473,
+ 0x036, 0x000C8473,
+ 0x036, 0x000D0473,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0xA0000000, 0x00000000,
+ 0x036, 0x00000474,
+ 0x036, 0x00008474,
+ 0x036, 0x00010474,
+ 0x036, 0x00020474,
+ 0x036, 0x00028474,
+ 0x036, 0x00030474,
+ 0x036, 0x00040474,
+ 0x036, 0x00048474,
+ 0x036, 0x00050474,
+ 0x036, 0x00080474,
+ 0x036, 0x00088474,
+ 0x036, 0x00090474,
+ 0x036, 0x000A0474,
+ 0x036, 0x000A8474,
+ 0x036, 0x000B0474,
+ 0x036, 0x000C0474,
+ 0x036, 0x000C8474,
+ 0x036, 0x000D0474,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x037, 0x00000000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00004000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00008000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00010000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00014000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00018000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0001C000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00020000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00024000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00028000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0002C000,
+ 0x038, 0x0000714E,
+ 0x037, 0x00030000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00034000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00038000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0003C000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00040000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00044000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00048000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00080000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00084000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00088000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00090000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00094000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00098000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x0009C000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000AC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000BC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C8000,
+ 0x038, 0x00005ECE,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000007D,
+ 0x03C, 0x0000047D,
+ 0x03C, 0x0000087D,
+ 0x03C, 0x0000107D,
+ 0x03C, 0x0000147D,
+ 0x03C, 0x0000187D,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000541,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001541,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002541,
+ 0x03C, 0x00002821,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000037E,
+ 0x03C, 0x00000575,
+ 0x03C, 0x00000971,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001575,
+ 0x03C, 0x00001871,
+ 0x03C, 0x0000217E,
+ 0x03C, 0x00002575,
+ 0x03C, 0x00002871,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x061, 0x000C0D47,
+ 0x062, 0x0000133C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0xA0000000, 0x00000000,
+ 0x063, 0x0007D0E7,
+ 0xB0000000, 0x00000000,
+ 0x064, 0x00014FEC,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0xA0000000, 0x00000000,
+ 0x065, 0x000923FF,
+ 0xB0000000, 0x00000000,
+ 0x066, 0x00000040,
+ 0x057, 0x00050000,
+ 0x056, 0x00051DF0,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x055, 0x00082060,
+ 0xB0000000, 0x00000000,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8814a_rf_c, C);
+
+static const u32 rtw8814a_rf_d[] = {
+ 0x018, 0x00013124,
+ 0x040, 0x00000C00,
+ 0x058, 0x00000F98,
+ 0x07F, 0x00068004,
+ 0x018, 0x00000006,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0xA0000000, 0x00000000,
+ 0x086, 0x000E4B58,
+ 0x087, 0x00049F80,
+ 0xB0000000, 0x00000000,
+ 0x0DF, 0x00000008,
+ 0x0EF, 0x00002000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F09B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F09B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0003F258,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000ADF6,
+ 0x034, 0x00009DF3,
+ 0x034, 0x00008DF0,
+ 0x034, 0x00007DED,
+ 0x034, 0x00006DEA,
+ 0x034, 0x00005CED,
+ 0x034, 0x00004CEA,
+ 0x034, 0x000034EA,
+ 0x034, 0x000024E7,
+ 0x034, 0x0000146A,
+ 0x034, 0x0000006B,
+ 0xB0000000, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008ADF6,
+ 0x034, 0x00089DF3,
+ 0x034, 0x00088DF0,
+ 0x034, 0x00087DED,
+ 0x034, 0x00086DEA,
+ 0x034, 0x00085CED,
+ 0x034, 0x00084CEA,
+ 0x034, 0x000834EA,
+ 0x034, 0x000824E7,
+ 0x034, 0x0008146A,
+ 0x034, 0x0008006B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000192,
+ 0x035, 0x00008192,
+ 0x035, 0x00010192,
+ 0x036, 0x00000024,
+ 0x036, 0x00008024,
+ 0x036, 0x00010024,
+ 0x036, 0x00018024,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C21,
+ 0x052, 0x000006D9,
+ 0x053, 0x000FC649,
+ 0x054, 0x0000017E,
+ 0x018, 0x0001012A,
+ 0x081, 0x0007FC00,
+ 0x089, 0x00050110,
+ 0x08A, 0x00043E50,
+ 0x08B, 0x0002E180,
+ 0x08C, 0x00093C3C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xA0000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0xA0000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00001000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00040000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00030023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00028623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00021633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0001C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00010293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00009593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x00000F8B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0000078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00070023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00068623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00061633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0005C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00050293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00049593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x00040F8B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0004078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00004000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00060000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00004000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00060000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00004000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B0023,
+ 0x80000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A8623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A1633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0009C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00090293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00089593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0008138B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0008078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x03B, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001803,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00040000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001002,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000001,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000802,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001803,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00080000,
+ 0x80000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001802,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000802,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001002,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x0EF, 0x00000100,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EB,
+ 0x034, 0x0004938B,
+ 0x034, 0x000481AC,
+ 0x034, 0x000471A9,
+ 0x034, 0x000460AC,
+ 0x034, 0x000450A9,
+ 0x034, 0x0004402E,
+ 0x034, 0x0004302B,
+ 0x034, 0x00042028,
+ 0x034, 0x0004100B,
+ 0x034, 0x00040008,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3AD,
+ 0x034, 0x0004938A,
+ 0x034, 0x0004818C,
+ 0x034, 0x00047189,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3AD,
+ 0x034, 0x0004938A,
+ 0x034, 0x0004818C,
+ 0x034, 0x00047189,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3AD,
+ 0x034, 0x0004938A,
+ 0x034, 0x0004818C,
+ 0x034, 0x00047189,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3AD,
+ 0x034, 0x0004938A,
+ 0x034, 0x0004818C,
+ 0x034, 0x00047189,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3F4,
+ 0x034, 0x000493D2,
+ 0x034, 0x000482D1,
+ 0x034, 0x000471F1,
+ 0x034, 0x000461EE,
+ 0x034, 0x000451EB,
+ 0x034, 0x000441E8,
+ 0x034, 0x0004314B,
+ 0x034, 0x00042148,
+ 0x034, 0x0004104B,
+ 0x034, 0x00040048,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004AFF7,
+ 0x034, 0x00049FF6,
+ 0x034, 0x00048FF3,
+ 0x034, 0x00047FF0,
+ 0x034, 0x00046FED,
+ 0x034, 0x00045FEA,
+ 0x034, 0x00044FE7,
+ 0x034, 0x00043CB1,
+ 0x034, 0x00042CAE,
+ 0x034, 0x00041CAB,
+ 0x034, 0x00040CA8,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3AD,
+ 0x034, 0x0004938A,
+ 0x034, 0x0004818C,
+ 0x034, 0x00047189,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3AD,
+ 0x034, 0x0004938A,
+ 0x034, 0x0004818C,
+ 0x034, 0x00047189,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0004AFF4,
+ 0x034, 0x00049FF1,
+ 0x034, 0x00048FEE,
+ 0x034, 0x00047FEB,
+ 0x034, 0x00046FE8,
+ 0x034, 0x00045DEA,
+ 0x034, 0x00044CED,
+ 0x034, 0x00043CEA,
+ 0x034, 0x00042C6C,
+ 0x034, 0x00041C69,
+ 0x034, 0x00040C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EE,
+ 0x034, 0x000293EB,
+ 0x034, 0x0002838B,
+ 0x034, 0x000271AC,
+ 0x034, 0x000261A9,
+ 0x034, 0x000250AC,
+ 0x034, 0x000240A9,
+ 0x034, 0x000230A6,
+ 0x034, 0x0002202C,
+ 0x034, 0x00021029,
+ 0x034, 0x00020026,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3AD,
+ 0x034, 0x0002938A,
+ 0x034, 0x0002818C,
+ 0x034, 0x00027189,
+ 0x034, 0x0002606D,
+ 0x034, 0x0002504C,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020006,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3AD,
+ 0x034, 0x0002938A,
+ 0x034, 0x0002818C,
+ 0x034, 0x00027189,
+ 0x034, 0x0002606D,
+ 0x034, 0x0002504C,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020006,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3AD,
+ 0x034, 0x0002938A,
+ 0x034, 0x0002818C,
+ 0x034, 0x00027189,
+ 0x034, 0x0002606D,
+ 0x034, 0x0002504C,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020006,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3AD,
+ 0x034, 0x0002938A,
+ 0x034, 0x0002818C,
+ 0x034, 0x00027189,
+ 0x034, 0x0002606D,
+ 0x034, 0x0002504C,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020006,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3F5,
+ 0x034, 0x000293D2,
+ 0x034, 0x000283CE,
+ 0x034, 0x00027290,
+ 0x034, 0x0002628D,
+ 0x034, 0x0002528A,
+ 0x034, 0x00024287,
+ 0x034, 0x0002308D,
+ 0x034, 0x0002208A,
+ 0x034, 0x00021087,
+ 0x034, 0x00020048,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002AFF7,
+ 0x034, 0x00029FF6,
+ 0x034, 0x00028FF3,
+ 0x034, 0x00027FF0,
+ 0x034, 0x00026FED,
+ 0x034, 0x00025FEA,
+ 0x034, 0x00024FE7,
+ 0x034, 0x00023DEA,
+ 0x034, 0x00022DE7,
+ 0x034, 0x00021DE4,
+ 0x034, 0x00020D48,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3AD,
+ 0x034, 0x0002938A,
+ 0x034, 0x0002818C,
+ 0x034, 0x00027189,
+ 0x034, 0x0002606D,
+ 0x034, 0x0002504C,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020006,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3AD,
+ 0x034, 0x0002938A,
+ 0x034, 0x0002818C,
+ 0x034, 0x00027189,
+ 0x034, 0x0002606D,
+ 0x034, 0x0002504C,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020006,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0002AFF4,
+ 0x034, 0x00029FF1,
+ 0x034, 0x00028FEE,
+ 0x034, 0x00027FEB,
+ 0x034, 0x00026FE8,
+ 0x034, 0x00025DEA,
+ 0x034, 0x00024CED,
+ 0x034, 0x00023CEA,
+ 0x034, 0x00022C6C,
+ 0x034, 0x00021C69,
+ 0x034, 0x00020C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EF,
+ 0x034, 0x000093EC,
+ 0x034, 0x0000838C,
+ 0x034, 0x000071AD,
+ 0x034, 0x000061AA,
+ 0x034, 0x000050AD,
+ 0x034, 0x000040AA,
+ 0x034, 0x0000306A,
+ 0x034, 0x0000202D,
+ 0x034, 0x0000102A,
+ 0x034, 0x00000027,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3F1,
+ 0x034, 0x000092B1,
+ 0x034, 0x000081CF,
+ 0x034, 0x00007170,
+ 0x034, 0x0000616D,
+ 0x034, 0x0000516A,
+ 0x034, 0x00004167,
+ 0x034, 0x0000302F,
+ 0x034, 0x0000202C,
+ 0x034, 0x00001029,
+ 0x034, 0x00000026,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000AFF7,
+ 0x034, 0x00009FF6,
+ 0x034, 0x00008FF3,
+ 0x034, 0x00007FF0,
+ 0x034, 0x00006FED,
+ 0x034, 0x00005FEA,
+ 0x034, 0x00004FE7,
+ 0x034, 0x00003EC7,
+ 0x034, 0x00002EC4,
+ 0x034, 0x00001D4B,
+ 0x034, 0x00000D48,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000AFF4,
+ 0x034, 0x00009FF1,
+ 0x034, 0x00008FEE,
+ 0x034, 0x00007FEB,
+ 0x034, 0x00006FE8,
+ 0x034, 0x00005DEA,
+ 0x034, 0x00004CED,
+ 0x034, 0x00003CEA,
+ 0x034, 0x00002C6C,
+ 0x034, 0x00001C69,
+ 0x034, 0x00000C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EB,
+ 0x034, 0x000C938B,
+ 0x034, 0x000C81AC,
+ 0x034, 0x000C71A9,
+ 0x034, 0x000C60AC,
+ 0x034, 0x000C50A9,
+ 0x034, 0x000C402E,
+ 0x034, 0x000C302B,
+ 0x034, 0x000C2028,
+ 0x034, 0x000C100B,
+ 0x034, 0x000C0008,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3AD,
+ 0x034, 0x000C938A,
+ 0x034, 0x000C818C,
+ 0x034, 0x000C7189,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3AD,
+ 0x034, 0x000C938A,
+ 0x034, 0x000C818C,
+ 0x034, 0x000C7189,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3AD,
+ 0x034, 0x000C938A,
+ 0x034, 0x000C818C,
+ 0x034, 0x000C7189,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3AD,
+ 0x034, 0x000C938A,
+ 0x034, 0x000C818C,
+ 0x034, 0x000C7189,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3F4,
+ 0x034, 0x000C93D2,
+ 0x034, 0x000C82D1,
+ 0x034, 0x000C71F1,
+ 0x034, 0x000C61EE,
+ 0x034, 0x000C51EB,
+ 0x034, 0x000C41E8,
+ 0x034, 0x000C314B,
+ 0x034, 0x000C2148,
+ 0x034, 0x000C104B,
+ 0x034, 0x000C0048,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CAFF7,
+ 0x034, 0x000C9FF6,
+ 0x034, 0x000C8FF3,
+ 0x034, 0x000C7FF0,
+ 0x034, 0x000C6FED,
+ 0x034, 0x000C5FEA,
+ 0x034, 0x000C4FE7,
+ 0x034, 0x000C3CB1,
+ 0x034, 0x000C2CAE,
+ 0x034, 0x000C1CAB,
+ 0x034, 0x000C0CA8,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3AD,
+ 0x034, 0x000C938A,
+ 0x034, 0x000C818C,
+ 0x034, 0x000C7189,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3AD,
+ 0x034, 0x000C938A,
+ 0x034, 0x000C818C,
+ 0x034, 0x000C7189,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000CA794,
+ 0x034, 0x000C9791,
+ 0x034, 0x000C878E,
+ 0x034, 0x000C778B,
+ 0x034, 0x000C658D,
+ 0x034, 0x000C558A,
+ 0x034, 0x000C448D,
+ 0x034, 0x000C348A,
+ 0x034, 0x000C244C,
+ 0x034, 0x000C1449,
+ 0x034, 0x000C042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EE,
+ 0x034, 0x000A93EB,
+ 0x034, 0x000A838B,
+ 0x034, 0x000A71AC,
+ 0x034, 0x000A61A9,
+ 0x034, 0x000A50AC,
+ 0x034, 0x000A40A9,
+ 0x034, 0x000A30A6,
+ 0x034, 0x000A202C,
+ 0x034, 0x000A1029,
+ 0x034, 0x000A0026,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3AD,
+ 0x034, 0x000A938A,
+ 0x034, 0x000A818C,
+ 0x034, 0x000A7189,
+ 0x034, 0x000A606D,
+ 0x034, 0x000A504C,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A2026,
+ 0x034, 0x000A1023,
+ 0x034, 0x000A0006,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3AD,
+ 0x034, 0x000A938A,
+ 0x034, 0x000A818C,
+ 0x034, 0x000A7189,
+ 0x034, 0x000A606D,
+ 0x034, 0x000A504C,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A2026,
+ 0x034, 0x000A1023,
+ 0x034, 0x000A0006,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3AD,
+ 0x034, 0x000A938A,
+ 0x034, 0x000A818C,
+ 0x034, 0x000A7189,
+ 0x034, 0x000A606D,
+ 0x034, 0x000A504C,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A2026,
+ 0x034, 0x000A1023,
+ 0x034, 0x000A0006,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3AD,
+ 0x034, 0x000A938A,
+ 0x034, 0x000A818C,
+ 0x034, 0x000A7189,
+ 0x034, 0x000A606D,
+ 0x034, 0x000A504C,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A2026,
+ 0x034, 0x000A1023,
+ 0x034, 0x000A0006,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3F5,
+ 0x034, 0x000A93D2,
+ 0x034, 0x000A83CE,
+ 0x034, 0x000A7290,
+ 0x034, 0x000A628D,
+ 0x034, 0x000A528A,
+ 0x034, 0x000A4287,
+ 0x034, 0x000A308D,
+ 0x034, 0x000A208A,
+ 0x034, 0x000A1087,
+ 0x034, 0x000A0048,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AAFF7,
+ 0x034, 0x000A9FF6,
+ 0x034, 0x000A8FF3,
+ 0x034, 0x000A7FF0,
+ 0x034, 0x000A6FED,
+ 0x034, 0x000A5FEA,
+ 0x034, 0x000A4FE7,
+ 0x034, 0x000A3DEA,
+ 0x034, 0x000A2DE7,
+ 0x034, 0x000A1DE4,
+ 0x034, 0x000A0D48,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3AD,
+ 0x034, 0x000A938A,
+ 0x034, 0x000A818C,
+ 0x034, 0x000A7189,
+ 0x034, 0x000A606D,
+ 0x034, 0x000A504C,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A2026,
+ 0x034, 0x000A1023,
+ 0x034, 0x000A0006,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3AD,
+ 0x034, 0x000A938A,
+ 0x034, 0x000A818C,
+ 0x034, 0x000A7189,
+ 0x034, 0x000A606D,
+ 0x034, 0x000A504C,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A2026,
+ 0x034, 0x000A1023,
+ 0x034, 0x000A0006,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000AA794,
+ 0x034, 0x000A9791,
+ 0x034, 0x000A878E,
+ 0x034, 0x000A778B,
+ 0x034, 0x000A658D,
+ 0x034, 0x000A558A,
+ 0x034, 0x000A448D,
+ 0x034, 0x000A348A,
+ 0x034, 0x000A244C,
+ 0x034, 0x000A1449,
+ 0x034, 0x000A042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EF,
+ 0x034, 0x000893EC,
+ 0x034, 0x0008838C,
+ 0x034, 0x000871AD,
+ 0x034, 0x000861AA,
+ 0x034, 0x000850AD,
+ 0x034, 0x000840AA,
+ 0x034, 0x0008306A,
+ 0x034, 0x0008202D,
+ 0x034, 0x0008102A,
+ 0x034, 0x00080027,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3F1,
+ 0x034, 0x000892B1,
+ 0x034, 0x000881CF,
+ 0x034, 0x00087170,
+ 0x034, 0x0008616D,
+ 0x034, 0x0008516A,
+ 0x034, 0x00084167,
+ 0x034, 0x0008302F,
+ 0x034, 0x0008202C,
+ 0x034, 0x00081029,
+ 0x034, 0x00080026,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008AFF7,
+ 0x034, 0x00089FF6,
+ 0x034, 0x00088FF3,
+ 0x034, 0x00087FF0,
+ 0x034, 0x00086FED,
+ 0x034, 0x00085FEA,
+ 0x034, 0x00084FE7,
+ 0x034, 0x00083EC7,
+ 0x034, 0x00082EC4,
+ 0x034, 0x00081D4B,
+ 0x034, 0x00080D48,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008A794,
+ 0x034, 0x00089791,
+ 0x034, 0x0008878E,
+ 0x034, 0x0008778B,
+ 0x034, 0x0008658D,
+ 0x034, 0x0008558A,
+ 0x034, 0x0008448D,
+ 0x034, 0x0008348A,
+ 0x034, 0x0008244C,
+ 0x034, 0x00081449,
+ 0x034, 0x0008042B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x0DF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0xA0000000, 0x00000000,
+ 0x035, 0x00000484,
+ 0x035, 0x00008484,
+ 0x035, 0x00010484,
+ 0x035, 0x00020584,
+ 0x035, 0x00028584,
+ 0x035, 0x00030584,
+ 0x035, 0x00040584,
+ 0x035, 0x00048584,
+ 0x035, 0x00050584,
+ 0x035, 0x000805FB,
+ 0x035, 0x000885FB,
+ 0x035, 0x000905FB,
+ 0x035, 0x000A05FB,
+ 0x035, 0x000A85FB,
+ 0x035, 0x000B05FB,
+ 0x035, 0x000C05FB,
+ 0x035, 0x000C85FB,
+ 0x035, 0x000D05FB,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x0DF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000473,
+ 0x036, 0x00008473,
+ 0x036, 0x00010473,
+ 0x036, 0x00020473,
+ 0x036, 0x00028473,
+ 0x036, 0x00030473,
+ 0x036, 0x00040473,
+ 0x036, 0x00048473,
+ 0x036, 0x00050473,
+ 0x036, 0x00080473,
+ 0x036, 0x00088473,
+ 0x036, 0x00090473,
+ 0x036, 0x000A0473,
+ 0x036, 0x000A8473,
+ 0x036, 0x000B0473,
+ 0x036, 0x000C0473,
+ 0x036, 0x000C8473,
+ 0x036, 0x000D0473,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0xA0000000, 0x00000000,
+ 0x036, 0x00000474,
+ 0x036, 0x00008474,
+ 0x036, 0x00010474,
+ 0x036, 0x00020474,
+ 0x036, 0x00028474,
+ 0x036, 0x00030474,
+ 0x036, 0x00040474,
+ 0x036, 0x00048474,
+ 0x036, 0x00050474,
+ 0x036, 0x00080474,
+ 0x036, 0x00088474,
+ 0x036, 0x00090474,
+ 0x036, 0x000A0474,
+ 0x036, 0x000A8474,
+ 0x036, 0x000B0474,
+ 0x036, 0x000C0474,
+ 0x036, 0x000C8474,
+ 0x036, 0x000D0474,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x037, 0x00000000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00004000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00008000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00010000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00014000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00018000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0001C000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00020000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00024000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00028000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0002C000,
+ 0x038, 0x0000714E,
+ 0x037, 0x00030000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00034000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00038000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0003C000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00040000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00044000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00048000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00080000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00084000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00088000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00090000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00094000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00098000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x0009C000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000AC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000BC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C8000,
+ 0x038, 0x00005ECE,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000007D,
+ 0x03C, 0x0000047D,
+ 0x03C, 0x0000087D,
+ 0x03C, 0x0000107D,
+ 0x03C, 0x0000147D,
+ 0x03C, 0x0000187D,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000275,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x00001275,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x00002275,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000037E,
+ 0x03C, 0x00000575,
+ 0x03C, 0x00000971,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001575,
+ 0x03C, 0x00001871,
+ 0x03C, 0x0000217E,
+ 0x03C, 0x00002575,
+ 0x03C, 0x00002871,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x061, 0x000C0D47,
+ 0x062, 0x0000133C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0xA0000000, 0x00000000,
+ 0x063, 0x0007D0E7,
+ 0xB0000000, 0x00000000,
+ 0x064, 0x00014FEC,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0xA0000000, 0x00000000,
+ 0x065, 0x000923FF,
+ 0xB0000000, 0x00000000,
+ 0x066, 0x00000040,
+ 0x057, 0x00050000,
+ 0x056, 0x00051DF0,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x055, 0x00082060,
+ 0xB0000000, 0x00000000,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8814a_rf_d, D);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt[] = {
+ { 0, 0, 0, 0, 1, 36, },
+ { 2, 0, 0, 0, 1, 32, },
+ { 1, 0, 0, 0, 1, 32, },
+ { 0, 0, 0, 0, 2, 36, },
+ { 2, 0, 0, 0, 2, 32, },
+ { 1, 0, 0, 0, 2, 32, },
+ { 0, 0, 0, 0, 3, 36, },
+ { 2, 0, 0, 0, 3, 32, },
+ { 1, 0, 0, 0, 3, 32, },
+ { 0, 0, 0, 0, 4, 36, },
+ { 2, 0, 0, 0, 4, 32, },
+ { 1, 0, 0, 0, 4, 32, },
+ { 0, 0, 0, 0, 5, 36, },
+ { 2, 0, 0, 0, 5, 32, },
+ { 1, 0, 0, 0, 5, 32, },
+ { 0, 0, 0, 0, 6, 36, },
+ { 2, 0, 0, 0, 6, 32, },
+ { 1, 0, 0, 0, 6, 32, },
+ { 0, 0, 0, 0, 7, 36, },
+ { 2, 0, 0, 0, 7, 32, },
+ { 1, 0, 0, 0, 7, 32, },
+ { 0, 0, 0, 0, 8, 36, },
+ { 2, 0, 0, 0, 8, 32, },
+ { 1, 0, 0, 0, 8, 32, },
+ { 0, 0, 0, 0, 9, 36, },
+ { 2, 0, 0, 0, 9, 32, },
+ { 1, 0, 0, 0, 9, 32, },
+ { 0, 0, 0, 0, 10, 36, },
+ { 2, 0, 0, 0, 10, 32, },
+ { 1, 0, 0, 0, 10, 32, },
+ { 0, 0, 0, 0, 11, 36, },
+ { 2, 0, 0, 0, 11, 32, },
+ { 1, 0, 0, 0, 11, 32, },
+ { 0, 0, 0, 0, 12, 63, },
+ { 2, 0, 0, 0, 12, 32, },
+ { 1, 0, 0, 0, 12, 32, },
+ { 0, 0, 0, 0, 13, 63, },
+ { 2, 0, 0, 0, 13, 32, },
+ { 1, 0, 0, 0, 13, 32, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 34, },
+ { 2, 0, 0, 1, 1, 32, },
+ { 1, 0, 0, 1, 1, 32, },
+ { 0, 0, 0, 1, 2, 36, },
+ { 2, 0, 0, 1, 2, 32, },
+ { 1, 0, 0, 1, 2, 32, },
+ { 0, 0, 0, 1, 3, 36, },
+ { 2, 0, 0, 1, 3, 32, },
+ { 1, 0, 0, 1, 3, 32, },
+ { 0, 0, 0, 1, 4, 36, },
+ { 2, 0, 0, 1, 4, 32, },
+ { 1, 0, 0, 1, 4, 32, },
+ { 0, 0, 0, 1, 5, 36, },
+ { 2, 0, 0, 1, 5, 32, },
+ { 1, 0, 0, 1, 5, 32, },
+ { 0, 0, 0, 1, 6, 36, },
+ { 2, 0, 0, 1, 6, 32, },
+ { 1, 0, 0, 1, 6, 32, },
+ { 0, 0, 0, 1, 7, 36, },
+ { 2, 0, 0, 1, 7, 32, },
+ { 1, 0, 0, 1, 7, 32, },
+ { 0, 0, 0, 1, 8, 36, },
+ { 2, 0, 0, 1, 8, 32, },
+ { 1, 0, 0, 1, 8, 32, },
+ { 0, 0, 0, 1, 9, 36, },
+ { 2, 0, 0, 1, 9, 32, },
+ { 1, 0, 0, 1, 9, 32, },
+ { 0, 0, 0, 1, 10, 36, },
+ { 2, 0, 0, 1, 10, 32, },
+ { 1, 0, 0, 1, 10, 32, },
+ { 0, 0, 0, 1, 11, 32, },
+ { 2, 0, 0, 1, 11, 32, },
+ { 1, 0, 0, 1, 11, 32, },
+ { 0, 0, 0, 1, 12, 63, },
+ { 2, 0, 0, 1, 12, 32, },
+ { 1, 0, 0, 1, 12, 32, },
+ { 0, 0, 0, 1, 13, 63, },
+ { 2, 0, 0, 1, 13, 32, },
+ { 1, 0, 0, 1, 13, 32, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 34, },
+ { 2, 0, 0, 2, 1, 32, },
+ { 1, 0, 0, 2, 1, 32, },
+ { 0, 0, 0, 2, 2, 36, },
+ { 2, 0, 0, 2, 2, 32, },
+ { 1, 0, 0, 2, 2, 32, },
+ { 0, 0, 0, 2, 3, 36, },
+ { 2, 0, 0, 2, 3, 32, },
+ { 1, 0, 0, 2, 3, 32, },
+ { 0, 0, 0, 2, 4, 36, },
+ { 2, 0, 0, 2, 4, 32, },
+ { 1, 0, 0, 2, 4, 32, },
+ { 0, 0, 0, 2, 5, 36, },
+ { 2, 0, 0, 2, 5, 32, },
+ { 1, 0, 0, 2, 5, 32, },
+ { 0, 0, 0, 2, 6, 36, },
+ { 2, 0, 0, 2, 6, 32, },
+ { 1, 0, 0, 2, 6, 32, },
+ { 0, 0, 0, 2, 7, 36, },
+ { 2, 0, 0, 2, 7, 32, },
+ { 1, 0, 0, 2, 7, 32, },
+ { 0, 0, 0, 2, 8, 36, },
+ { 2, 0, 0, 2, 8, 32, },
+ { 1, 0, 0, 2, 8, 32, },
+ { 0, 0, 0, 2, 9, 36, },
+ { 2, 0, 0, 2, 9, 32, },
+ { 1, 0, 0, 2, 9, 32, },
+ { 0, 0, 0, 2, 10, 36, },
+ { 2, 0, 0, 2, 10, 32, },
+ { 1, 0, 0, 2, 10, 32, },
+ { 0, 0, 0, 2, 11, 32, },
+ { 2, 0, 0, 2, 11, 32, },
+ { 1, 0, 0, 2, 11, 32, },
+ { 0, 0, 0, 2, 12, 63, },
+ { 2, 0, 0, 2, 12, 32, },
+ { 1, 0, 0, 2, 12, 32, },
+ { 0, 0, 0, 2, 13, 63, },
+ { 2, 0, 0, 2, 13, 32, },
+ { 1, 0, 0, 2, 13, 32, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 32, },
+ { 2, 0, 0, 3, 1, 30, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 34, },
+ { 2, 0, 0, 3, 2, 30, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 34, },
+ { 2, 0, 0, 3, 3, 30, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 34, },
+ { 2, 0, 0, 3, 4, 30, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 34, },
+ { 2, 0, 0, 3, 5, 30, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 34, },
+ { 2, 0, 0, 3, 6, 30, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 34, },
+ { 2, 0, 0, 3, 7, 30, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 34, },
+ { 2, 0, 0, 3, 8, 30, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 34, },
+ { 2, 0, 0, 3, 9, 30, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 34, },
+ { 2, 0, 0, 3, 10, 30, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 30, },
+ { 2, 0, 0, 3, 11, 30, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 63, },
+ { 2, 0, 0, 3, 12, 30, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 63, },
+ { 2, 0, 0, 3, 13, 30, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 30, },
+ { 2, 0, 0, 6, 1, 28, },
+ { 1, 0, 0, 6, 1, 28, },
+ { 0, 0, 0, 6, 2, 32, },
+ { 2, 0, 0, 6, 2, 28, },
+ { 1, 0, 0, 6, 2, 28, },
+ { 0, 0, 0, 6, 3, 32, },
+ { 2, 0, 0, 6, 3, 28, },
+ { 1, 0, 0, 6, 3, 28, },
+ { 0, 0, 0, 6, 4, 32, },
+ { 2, 0, 0, 6, 4, 28, },
+ { 1, 0, 0, 6, 4, 28, },
+ { 0, 0, 0, 6, 5, 32, },
+ { 2, 0, 0, 6, 5, 28, },
+ { 1, 0, 0, 6, 5, 28, },
+ { 0, 0, 0, 6, 6, 32, },
+ { 2, 0, 0, 6, 6, 28, },
+ { 1, 0, 0, 6, 6, 28, },
+ { 0, 0, 0, 6, 7, 32, },
+ { 2, 0, 0, 6, 7, 28, },
+ { 1, 0, 0, 6, 7, 28, },
+ { 0, 0, 0, 6, 8, 32, },
+ { 2, 0, 0, 6, 8, 28, },
+ { 1, 0, 0, 6, 8, 28, },
+ { 0, 0, 0, 6, 9, 32, },
+ { 2, 0, 0, 6, 9, 28, },
+ { 1, 0, 0, 6, 9, 28, },
+ { 0, 0, 0, 6, 10, 32, },
+ { 2, 0, 0, 6, 10, 28, },
+ { 1, 0, 0, 6, 10, 28, },
+ { 0, 0, 0, 6, 11, 28, },
+ { 2, 0, 0, 6, 11, 28, },
+ { 1, 0, 0, 6, 11, 28, },
+ { 0, 0, 0, 6, 12, 63, },
+ { 2, 0, 0, 6, 12, 28, },
+ { 1, 0, 0, 6, 12, 28, },
+ { 0, 0, 0, 6, 13, 63, },
+ { 2, 0, 0, 6, 13, 28, },
+ { 1, 0, 0, 6, 13, 28, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 28, },
+ { 2, 0, 0, 7, 1, 26, },
+ { 1, 0, 0, 7, 1, 26, },
+ { 0, 0, 0, 7, 2, 30, },
+ { 2, 0, 0, 7, 2, 26, },
+ { 1, 0, 0, 7, 2, 26, },
+ { 0, 0, 0, 7, 3, 30, },
+ { 2, 0, 0, 7, 3, 26, },
+ { 1, 0, 0, 7, 3, 26, },
+ { 0, 0, 0, 7, 4, 30, },
+ { 2, 0, 0, 7, 4, 26, },
+ { 1, 0, 0, 7, 4, 26, },
+ { 0, 0, 0, 7, 5, 30, },
+ { 2, 0, 0, 7, 5, 26, },
+ { 1, 0, 0, 7, 5, 26, },
+ { 0, 0, 0, 7, 6, 30, },
+ { 2, 0, 0, 7, 6, 26, },
+ { 1, 0, 0, 7, 6, 26, },
+ { 0, 0, 0, 7, 7, 30, },
+ { 2, 0, 0, 7, 7, 26, },
+ { 1, 0, 0, 7, 7, 26, },
+ { 0, 0, 0, 7, 8, 30, },
+ { 2, 0, 0, 7, 8, 26, },
+ { 1, 0, 0, 7, 8, 26, },
+ { 0, 0, 0, 7, 9, 30, },
+ { 2, 0, 0, 7, 9, 26, },
+ { 1, 0, 0, 7, 9, 26, },
+ { 0, 0, 0, 7, 10, 30, },
+ { 2, 0, 0, 7, 10, 26, },
+ { 1, 0, 0, 7, 10, 26, },
+ { 0, 0, 0, 7, 11, 26, },
+ { 2, 0, 0, 7, 11, 26, },
+ { 1, 0, 0, 7, 11, 26, },
+ { 0, 0, 0, 7, 12, 63, },
+ { 2, 0, 0, 7, 12, 26, },
+ { 1, 0, 0, 7, 12, 26, },
+ { 0, 0, 0, 7, 13, 63, },
+ { 2, 0, 0, 7, 13, 26, },
+ { 1, 0, 0, 7, 13, 26, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 32, },
+ { 2, 0, 1, 2, 3, 32, },
+ { 1, 0, 1, 2, 3, 32, },
+ { 0, 0, 1, 2, 4, 36, },
+ { 2, 0, 1, 2, 4, 32, },
+ { 1, 0, 1, 2, 4, 32, },
+ { 0, 0, 1, 2, 5, 36, },
+ { 2, 0, 1, 2, 5, 32, },
+ { 1, 0, 1, 2, 5, 32, },
+ { 0, 0, 1, 2, 6, 36, },
+ { 2, 0, 1, 2, 6, 32, },
+ { 1, 0, 1, 2, 6, 32, },
+ { 0, 0, 1, 2, 7, 36, },
+ { 2, 0, 1, 2, 7, 32, },
+ { 1, 0, 1, 2, 7, 32, },
+ { 0, 0, 1, 2, 8, 36, },
+ { 2, 0, 1, 2, 8, 32, },
+ { 1, 0, 1, 2, 8, 32, },
+ { 0, 0, 1, 2, 9, 36, },
+ { 2, 0, 1, 2, 9, 32, },
+ { 1, 0, 1, 2, 9, 32, },
+ { 0, 0, 1, 2, 10, 36, },
+ { 2, 0, 1, 2, 10, 32, },
+ { 1, 0, 1, 2, 10, 32, },
+ { 0, 0, 1, 2, 11, 32, },
+ { 2, 0, 1, 2, 11, 32, },
+ { 1, 0, 1, 2, 11, 32, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 32, },
+ { 1, 0, 1, 2, 12, 32, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 32, },
+ { 1, 0, 1, 2, 13, 32, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 30, },
+ { 2, 0, 1, 3, 3, 30, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 34, },
+ { 2, 0, 1, 3, 4, 30, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 34, },
+ { 2, 0, 1, 3, 5, 30, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 34, },
+ { 2, 0, 1, 3, 6, 30, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 34, },
+ { 2, 0, 1, 3, 7, 30, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 34, },
+ { 2, 0, 1, 3, 8, 30, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 34, },
+ { 2, 0, 1, 3, 9, 30, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 34, },
+ { 2, 0, 1, 3, 10, 30, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 30, },
+ { 2, 0, 1, 3, 11, 30, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 30, },
+ { 1, 0, 1, 3, 12, 30, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 30, },
+ { 1, 0, 1, 3, 13, 30, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 28, },
+ { 2, 0, 1, 6, 3, 28, },
+ { 1, 0, 1, 6, 3, 28, },
+ { 0, 0, 1, 6, 4, 32, },
+ { 2, 0, 1, 6, 4, 28, },
+ { 1, 0, 1, 6, 4, 28, },
+ { 0, 0, 1, 6, 5, 32, },
+ { 2, 0, 1, 6, 5, 28, },
+ { 1, 0, 1, 6, 5, 28, },
+ { 0, 0, 1, 6, 6, 32, },
+ { 2, 0, 1, 6, 6, 28, },
+ { 1, 0, 1, 6, 6, 28, },
+ { 0, 0, 1, 6, 7, 32, },
+ { 2, 0, 1, 6, 7, 28, },
+ { 1, 0, 1, 6, 7, 28, },
+ { 0, 0, 1, 6, 8, 32, },
+ { 2, 0, 1, 6, 8, 28, },
+ { 1, 0, 1, 6, 8, 28, },
+ { 0, 0, 1, 6, 9, 32, },
+ { 2, 0, 1, 6, 9, 28, },
+ { 1, 0, 1, 6, 9, 28, },
+ { 0, 0, 1, 6, 10, 32, },
+ { 2, 0, 1, 6, 10, 28, },
+ { 1, 0, 1, 6, 10, 28, },
+ { 0, 0, 1, 6, 11, 28, },
+ { 2, 0, 1, 6, 11, 28, },
+ { 1, 0, 1, 6, 11, 28, },
+ { 0, 0, 1, 6, 12, 63, },
+ { 2, 0, 1, 6, 12, 28, },
+ { 1, 0, 1, 6, 12, 28, },
+ { 0, 0, 1, 6, 13, 63, },
+ { 2, 0, 1, 6, 13, 28, },
+ { 1, 0, 1, 6, 13, 28, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 26, },
+ { 2, 0, 1, 7, 3, 26, },
+ { 1, 0, 1, 7, 3, 26, },
+ { 0, 0, 1, 7, 4, 30, },
+ { 2, 0, 1, 7, 4, 26, },
+ { 1, 0, 1, 7, 4, 26, },
+ { 0, 0, 1, 7, 5, 30, },
+ { 2, 0, 1, 7, 5, 26, },
+ { 1, 0, 1, 7, 5, 26, },
+ { 0, 0, 1, 7, 6, 30, },
+ { 2, 0, 1, 7, 6, 26, },
+ { 1, 0, 1, 7, 6, 26, },
+ { 0, 0, 1, 7, 7, 30, },
+ { 2, 0, 1, 7, 7, 26, },
+ { 1, 0, 1, 7, 7, 26, },
+ { 0, 0, 1, 7, 8, 30, },
+ { 2, 0, 1, 7, 8, 26, },
+ { 1, 0, 1, 7, 8, 26, },
+ { 0, 0, 1, 7, 9, 30, },
+ { 2, 0, 1, 7, 9, 26, },
+ { 1, 0, 1, 7, 9, 26, },
+ { 0, 0, 1, 7, 10, 30, },
+ { 2, 0, 1, 7, 10, 26, },
+ { 1, 0, 1, 7, 10, 26, },
+ { 0, 0, 1, 7, 11, 26, },
+ { 2, 0, 1, 7, 11, 26, },
+ { 1, 0, 1, 7, 11, 26, },
+ { 0, 0, 1, 7, 12, 63, },
+ { 2, 0, 1, 7, 12, 26, },
+ { 1, 0, 1, 7, 12, 26, },
+ { 0, 0, 1, 7, 13, 63, },
+ { 2, 0, 1, 7, 13, 26, },
+ { 1, 0, 1, 7, 13, 26, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 30, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 32, },
+ { 0, 1, 0, 1, 40, 30, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 32, },
+ { 0, 1, 0, 1, 44, 30, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 32, },
+ { 0, 1, 0, 1, 48, 30, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 32, },
+ { 0, 1, 0, 1, 52, 36, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 32, },
+ { 0, 1, 0, 1, 56, 34, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 32, },
+ { 0, 1, 0, 1, 60, 32, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 32, },
+ { 0, 1, 0, 1, 64, 28, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 32, },
+ { 0, 1, 0, 1, 100, 30, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 30, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 32, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 34, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 34, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 36, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 34, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 32, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 30, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 30, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 28, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 149, 36, },
+ { 2, 1, 0, 1, 149, 32, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 36, },
+ { 2, 1, 0, 1, 153, 32, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 36, },
+ { 2, 1, 0, 1, 157, 32, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 36, },
+ { 2, 1, 0, 1, 161, 32, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 36, },
+ { 2, 1, 0, 1, 165, 32, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 30, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 32, },
+ { 0, 1, 0, 2, 40, 30, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 32, },
+ { 0, 1, 0, 2, 44, 30, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 32, },
+ { 0, 1, 0, 2, 48, 30, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 32, },
+ { 0, 1, 0, 2, 52, 36, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 32, },
+ { 0, 1, 0, 2, 56, 34, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 32, },
+ { 0, 1, 0, 2, 60, 32, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 32, },
+ { 0, 1, 0, 2, 64, 28, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 32, },
+ { 0, 1, 0, 2, 100, 30, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 30, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 32, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 34, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 34, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 36, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 34, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 32, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 30, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 30, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 28, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 149, 36, },
+ { 2, 1, 0, 2, 149, 32, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 36, },
+ { 2, 1, 0, 2, 153, 32, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 36, },
+ { 2, 1, 0, 2, 157, 32, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 36, },
+ { 2, 1, 0, 2, 161, 32, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 36, },
+ { 2, 1, 0, 2, 165, 32, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 28, },
+ { 2, 1, 0, 3, 36, 30, },
+ { 1, 1, 0, 3, 36, 30, },
+ { 0, 1, 0, 3, 40, 28, },
+ { 2, 1, 0, 3, 40, 30, },
+ { 1, 1, 0, 3, 40, 30, },
+ { 0, 1, 0, 3, 44, 28, },
+ { 2, 1, 0, 3, 44, 30, },
+ { 1, 1, 0, 3, 44, 30, },
+ { 0, 1, 0, 3, 48, 28, },
+ { 2, 1, 0, 3, 48, 30, },
+ { 1, 1, 0, 3, 48, 30, },
+ { 0, 1, 0, 3, 52, 34, },
+ { 2, 1, 0, 3, 52, 30, },
+ { 1, 1, 0, 3, 52, 30, },
+ { 0, 1, 0, 3, 56, 32, },
+ { 2, 1, 0, 3, 56, 30, },
+ { 1, 1, 0, 3, 56, 30, },
+ { 0, 1, 0, 3, 60, 30, },
+ { 2, 1, 0, 3, 60, 30, },
+ { 1, 1, 0, 3, 60, 30, },
+ { 0, 1, 0, 3, 64, 26, },
+ { 2, 1, 0, 3, 64, 30, },
+ { 1, 1, 0, 3, 64, 30, },
+ { 0, 1, 0, 3, 100, 28, },
+ { 2, 1, 0, 3, 100, 30, },
+ { 1, 1, 0, 3, 100, 30, },
+ { 0, 1, 0, 3, 104, 28, },
+ { 2, 1, 0, 3, 104, 30, },
+ { 1, 1, 0, 3, 104, 30, },
+ { 0, 1, 0, 3, 108, 30, },
+ { 2, 1, 0, 3, 108, 30, },
+ { 1, 1, 0, 3, 108, 30, },
+ { 0, 1, 0, 3, 112, 32, },
+ { 2, 1, 0, 3, 112, 30, },
+ { 1, 1, 0, 3, 112, 30, },
+ { 0, 1, 0, 3, 116, 32, },
+ { 2, 1, 0, 3, 116, 30, },
+ { 1, 1, 0, 3, 116, 30, },
+ { 0, 1, 0, 3, 120, 34, },
+ { 2, 1, 0, 3, 120, 30, },
+ { 1, 1, 0, 3, 120, 30, },
+ { 0, 1, 0, 3, 124, 32, },
+ { 2, 1, 0, 3, 124, 30, },
+ { 1, 1, 0, 3, 124, 30, },
+ { 0, 1, 0, 3, 128, 30, },
+ { 2, 1, 0, 3, 128, 30, },
+ { 1, 1, 0, 3, 128, 30, },
+ { 0, 1, 0, 3, 132, 28, },
+ { 2, 1, 0, 3, 132, 30, },
+ { 1, 1, 0, 3, 132, 30, },
+ { 0, 1, 0, 3, 136, 28, },
+ { 2, 1, 0, 3, 136, 30, },
+ { 1, 1, 0, 3, 136, 30, },
+ { 0, 1, 0, 3, 140, 26, },
+ { 2, 1, 0, 3, 140, 30, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 149, 34, },
+ { 2, 1, 0, 3, 149, 30, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 34, },
+ { 2, 1, 0, 3, 153, 30, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 34, },
+ { 2, 1, 0, 3, 157, 30, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 34, },
+ { 2, 1, 0, 3, 161, 30, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 34, },
+ { 2, 1, 0, 3, 165, 30, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 26, },
+ { 2, 1, 0, 6, 36, 28, },
+ { 1, 1, 0, 6, 36, 28, },
+ { 0, 1, 0, 6, 40, 26, },
+ { 2, 1, 0, 6, 40, 28, },
+ { 1, 1, 0, 6, 40, 28, },
+ { 0, 1, 0, 6, 44, 26, },
+ { 2, 1, 0, 6, 44, 28, },
+ { 1, 1, 0, 6, 44, 28, },
+ { 0, 1, 0, 6, 48, 26, },
+ { 2, 1, 0, 6, 48, 28, },
+ { 1, 1, 0, 6, 48, 28, },
+ { 0, 1, 0, 6, 52, 32, },
+ { 2, 1, 0, 6, 52, 28, },
+ { 1, 1, 0, 6, 52, 28, },
+ { 0, 1, 0, 6, 56, 30, },
+ { 2, 1, 0, 6, 56, 28, },
+ { 1, 1, 0, 6, 56, 28, },
+ { 0, 1, 0, 6, 60, 28, },
+ { 2, 1, 0, 6, 60, 28, },
+ { 1, 1, 0, 6, 60, 28, },
+ { 0, 1, 0, 6, 64, 24, },
+ { 2, 1, 0, 6, 64, 28, },
+ { 1, 1, 0, 6, 64, 28, },
+ { 0, 1, 0, 6, 100, 26, },
+ { 2, 1, 0, 6, 100, 28, },
+ { 1, 1, 0, 6, 100, 28, },
+ { 0, 1, 0, 6, 104, 26, },
+ { 2, 1, 0, 6, 104, 28, },
+ { 1, 1, 0, 6, 104, 28, },
+ { 0, 1, 0, 6, 108, 28, },
+ { 2, 1, 0, 6, 108, 28, },
+ { 1, 1, 0, 6, 108, 28, },
+ { 0, 1, 0, 6, 112, 30, },
+ { 2, 1, 0, 6, 112, 28, },
+ { 1, 1, 0, 6, 112, 28, },
+ { 0, 1, 0, 6, 116, 30, },
+ { 2, 1, 0, 6, 116, 28, },
+ { 1, 1, 0, 6, 116, 28, },
+ { 0, 1, 0, 6, 120, 32, },
+ { 2, 1, 0, 6, 120, 28, },
+ { 1, 1, 0, 6, 120, 28, },
+ { 0, 1, 0, 6, 124, 30, },
+ { 2, 1, 0, 6, 124, 28, },
+ { 1, 1, 0, 6, 124, 28, },
+ { 0, 1, 0, 6, 128, 28, },
+ { 2, 1, 0, 6, 128, 28, },
+ { 1, 1, 0, 6, 128, 28, },
+ { 0, 1, 0, 6, 132, 26, },
+ { 2, 1, 0, 6, 132, 28, },
+ { 1, 1, 0, 6, 132, 28, },
+ { 0, 1, 0, 6, 136, 26, },
+ { 2, 1, 0, 6, 136, 28, },
+ { 1, 1, 0, 6, 136, 28, },
+ { 0, 1, 0, 6, 140, 24, },
+ { 2, 1, 0, 6, 140, 28, },
+ { 1, 1, 0, 6, 140, 28, },
+ { 0, 1, 0, 6, 149, 32, },
+ { 2, 1, 0, 6, 149, 28, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 32, },
+ { 2, 1, 0, 6, 153, 28, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 32, },
+ { 2, 1, 0, 6, 157, 28, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 32, },
+ { 2, 1, 0, 6, 161, 28, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 32, },
+ { 2, 1, 0, 6, 165, 28, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 24, },
+ { 2, 1, 0, 7, 36, 26, },
+ { 1, 1, 0, 7, 36, 26, },
+ { 0, 1, 0, 7, 40, 24, },
+ { 2, 1, 0, 7, 40, 26, },
+ { 1, 1, 0, 7, 40, 26, },
+ { 0, 1, 0, 7, 44, 24, },
+ { 2, 1, 0, 7, 44, 26, },
+ { 1, 1, 0, 7, 44, 26, },
+ { 0, 1, 0, 7, 48, 24, },
+ { 2, 1, 0, 7, 48, 26, },
+ { 1, 1, 0, 7, 48, 26, },
+ { 0, 1, 0, 7, 52, 30, },
+ { 2, 1, 0, 7, 52, 26, },
+ { 1, 1, 0, 7, 52, 26, },
+ { 0, 1, 0, 7, 56, 28, },
+ { 2, 1, 0, 7, 56, 26, },
+ { 1, 1, 0, 7, 56, 26, },
+ { 0, 1, 0, 7, 60, 26, },
+ { 2, 1, 0, 7, 60, 26, },
+ { 1, 1, 0, 7, 60, 26, },
+ { 0, 1, 0, 7, 64, 22, },
+ { 2, 1, 0, 7, 64, 26, },
+ { 1, 1, 0, 7, 64, 26, },
+ { 0, 1, 0, 7, 100, 24, },
+ { 2, 1, 0, 7, 100, 26, },
+ { 1, 1, 0, 7, 100, 26, },
+ { 0, 1, 0, 7, 104, 24, },
+ { 2, 1, 0, 7, 104, 26, },
+ { 1, 1, 0, 7, 104, 26, },
+ { 0, 1, 0, 7, 108, 26, },
+ { 2, 1, 0, 7, 108, 26, },
+ { 1, 1, 0, 7, 108, 26, },
+ { 0, 1, 0, 7, 112, 28, },
+ { 2, 1, 0, 7, 112, 26, },
+ { 1, 1, 0, 7, 112, 26, },
+ { 0, 1, 0, 7, 116, 28, },
+ { 2, 1, 0, 7, 116, 26, },
+ { 1, 1, 0, 7, 116, 26, },
+ { 0, 1, 0, 7, 120, 30, },
+ { 2, 1, 0, 7, 120, 26, },
+ { 1, 1, 0, 7, 120, 26, },
+ { 0, 1, 0, 7, 124, 28, },
+ { 2, 1, 0, 7, 124, 26, },
+ { 1, 1, 0, 7, 124, 26, },
+ { 0, 1, 0, 7, 128, 26, },
+ { 2, 1, 0, 7, 128, 26, },
+ { 1, 1, 0, 7, 128, 26, },
+ { 0, 1, 0, 7, 132, 24, },
+ { 2, 1, 0, 7, 132, 26, },
+ { 1, 1, 0, 7, 132, 26, },
+ { 0, 1, 0, 7, 136, 24, },
+ { 2, 1, 0, 7, 136, 26, },
+ { 1, 1, 0, 7, 136, 26, },
+ { 0, 1, 0, 7, 140, 22, },
+ { 2, 1, 0, 7, 140, 26, },
+ { 1, 1, 0, 7, 140, 26, },
+ { 0, 1, 0, 7, 149, 30, },
+ { 2, 1, 0, 7, 149, 26, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 30, },
+ { 2, 1, 0, 7, 153, 26, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 30, },
+ { 2, 1, 0, 7, 157, 26, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 30, },
+ { 2, 1, 0, 7, 161, 26, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 30, },
+ { 2, 1, 0, 7, 165, 26, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 30, },
+ { 2, 1, 1, 2, 38, 32, },
+ { 1, 1, 1, 2, 38, 32, },
+ { 0, 1, 1, 2, 46, 30, },
+ { 2, 1, 1, 2, 46, 32, },
+ { 1, 1, 1, 2, 46, 32, },
+ { 0, 1, 1, 2, 54, 32, },
+ { 2, 1, 1, 2, 54, 32, },
+ { 1, 1, 1, 2, 54, 32, },
+ { 0, 1, 1, 2, 62, 32, },
+ { 2, 1, 1, 2, 62, 32, },
+ { 1, 1, 1, 2, 62, 32, },
+ { 0, 1, 1, 2, 102, 28, },
+ { 2, 1, 1, 2, 102, 32, },
+ { 1, 1, 1, 2, 102, 32, },
+ { 0, 1, 1, 2, 110, 32, },
+ { 2, 1, 1, 2, 110, 32, },
+ { 1, 1, 1, 2, 110, 32, },
+ { 0, 1, 1, 2, 118, 36, },
+ { 2, 1, 1, 2, 118, 32, },
+ { 1, 1, 1, 2, 118, 32, },
+ { 0, 1, 1, 2, 126, 34, },
+ { 2, 1, 1, 2, 126, 32, },
+ { 1, 1, 1, 2, 126, 32, },
+ { 0, 1, 1, 2, 134, 32, },
+ { 2, 1, 1, 2, 134, 32, },
+ { 1, 1, 1, 2, 134, 32, },
+ { 0, 1, 1, 2, 151, 36, },
+ { 2, 1, 1, 2, 151, 32, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 36, },
+ { 2, 1, 1, 2, 159, 32, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 28, },
+ { 2, 1, 1, 3, 38, 30, },
+ { 1, 1, 1, 3, 38, 30, },
+ { 0, 1, 1, 3, 46, 28, },
+ { 2, 1, 1, 3, 46, 30, },
+ { 1, 1, 1, 3, 46, 30, },
+ { 0, 1, 1, 3, 54, 30, },
+ { 2, 1, 1, 3, 54, 30, },
+ { 1, 1, 1, 3, 54, 30, },
+ { 0, 1, 1, 3, 62, 30, },
+ { 2, 1, 1, 3, 62, 30, },
+ { 1, 1, 1, 3, 62, 30, },
+ { 0, 1, 1, 3, 102, 26, },
+ { 2, 1, 1, 3, 102, 30, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 30, },
+ { 2, 1, 1, 3, 110, 30, },
+ { 1, 1, 1, 3, 110, 30, },
+ { 0, 1, 1, 3, 118, 34, },
+ { 2, 1, 1, 3, 118, 30, },
+ { 1, 1, 1, 3, 118, 30, },
+ { 0, 1, 1, 3, 126, 32, },
+ { 2, 1, 1, 3, 126, 30, },
+ { 1, 1, 1, 3, 126, 30, },
+ { 0, 1, 1, 3, 134, 30, },
+ { 2, 1, 1, 3, 134, 30, },
+ { 1, 1, 1, 3, 134, 30, },
+ { 0, 1, 1, 3, 151, 34, },
+ { 2, 1, 1, 3, 151, 30, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 34, },
+ { 2, 1, 1, 3, 159, 30, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 26, },
+ { 2, 1, 1, 6, 38, 28, },
+ { 1, 1, 1, 6, 38, 28, },
+ { 0, 1, 1, 6, 46, 26, },
+ { 2, 1, 1, 6, 46, 28, },
+ { 1, 1, 1, 6, 46, 28, },
+ { 0, 1, 1, 6, 54, 28, },
+ { 2, 1, 1, 6, 54, 28, },
+ { 1, 1, 1, 6, 54, 28, },
+ { 0, 1, 1, 6, 62, 28, },
+ { 2, 1, 1, 6, 62, 28, },
+ { 1, 1, 1, 6, 62, 28, },
+ { 0, 1, 1, 6, 102, 24, },
+ { 2, 1, 1, 6, 102, 28, },
+ { 1, 1, 1, 6, 102, 28, },
+ { 0, 1, 1, 6, 110, 28, },
+ { 2, 1, 1, 6, 110, 28, },
+ { 1, 1, 1, 6, 110, 28, },
+ { 0, 1, 1, 6, 118, 32, },
+ { 2, 1, 1, 6, 118, 28, },
+ { 1, 1, 1, 6, 118, 28, },
+ { 0, 1, 1, 6, 126, 30, },
+ { 2, 1, 1, 6, 126, 28, },
+ { 1, 1, 1, 6, 126, 28, },
+ { 0, 1, 1, 6, 134, 28, },
+ { 2, 1, 1, 6, 134, 28, },
+ { 1, 1, 1, 6, 134, 28, },
+ { 0, 1, 1, 6, 151, 32, },
+ { 2, 1, 1, 6, 151, 28, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 32, },
+ { 2, 1, 1, 6, 159, 28, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 24, },
+ { 2, 1, 1, 7, 38, 26, },
+ { 1, 1, 1, 7, 38, 26, },
+ { 0, 1, 1, 7, 46, 24, },
+ { 2, 1, 1, 7, 46, 26, },
+ { 1, 1, 1, 7, 46, 26, },
+ { 0, 1, 1, 7, 54, 26, },
+ { 2, 1, 1, 7, 54, 26, },
+ { 1, 1, 1, 7, 54, 26, },
+ { 0, 1, 1, 7, 62, 26, },
+ { 2, 1, 1, 7, 62, 26, },
+ { 1, 1, 1, 7, 62, 26, },
+ { 0, 1, 1, 7, 102, 22, },
+ { 2, 1, 1, 7, 102, 26, },
+ { 1, 1, 1, 7, 102, 26, },
+ { 0, 1, 1, 7, 110, 26, },
+ { 2, 1, 1, 7, 110, 26, },
+ { 1, 1, 1, 7, 110, 26, },
+ { 0, 1, 1, 7, 118, 30, },
+ { 2, 1, 1, 7, 118, 26, },
+ { 1, 1, 1, 7, 118, 26, },
+ { 0, 1, 1, 7, 126, 28, },
+ { 2, 1, 1, 7, 126, 26, },
+ { 1, 1, 1, 7, 126, 26, },
+ { 0, 1, 1, 7, 134, 26, },
+ { 2, 1, 1, 7, 134, 26, },
+ { 1, 1, 1, 7, 134, 26, },
+ { 0, 1, 1, 7, 151, 30, },
+ { 2, 1, 1, 7, 151, 26, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 30, },
+ { 2, 1, 1, 7, 159, 26, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 30, },
+ { 2, 1, 2, 4, 42, 32, },
+ { 1, 1, 2, 4, 42, 32, },
+ { 0, 1, 2, 4, 58, 28, },
+ { 2, 1, 2, 4, 58, 32, },
+ { 1, 1, 2, 4, 58, 32, },
+ { 0, 1, 2, 4, 106, 30, },
+ { 2, 1, 2, 4, 106, 32, },
+ { 1, 1, 2, 4, 106, 32, },
+ { 0, 1, 2, 4, 122, 34, },
+ { 2, 1, 2, 4, 122, 32, },
+ { 1, 1, 2, 4, 122, 32, },
+ { 0, 1, 2, 4, 155, 36, },
+ { 2, 1, 2, 4, 155, 32, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 28, },
+ { 2, 1, 2, 5, 42, 30, },
+ { 1, 1, 2, 5, 42, 30, },
+ { 0, 1, 2, 5, 58, 26, },
+ { 2, 1, 2, 5, 58, 30, },
+ { 1, 1, 2, 5, 58, 30, },
+ { 0, 1, 2, 5, 106, 28, },
+ { 2, 1, 2, 5, 106, 30, },
+ { 1, 1, 2, 5, 106, 30, },
+ { 0, 1, 2, 5, 122, 32, },
+ { 2, 1, 2, 5, 122, 30, },
+ { 1, 1, 2, 5, 122, 30, },
+ { 0, 1, 2, 5, 155, 34, },
+ { 2, 1, 2, 5, 155, 30, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 26, },
+ { 2, 1, 2, 8, 42, 28, },
+ { 1, 1, 2, 8, 42, 28, },
+ { 0, 1, 2, 8, 58, 24, },
+ { 2, 1, 2, 8, 58, 28, },
+ { 1, 1, 2, 8, 58, 28, },
+ { 0, 1, 2, 8, 106, 26, },
+ { 2, 1, 2, 8, 106, 28, },
+ { 1, 1, 2, 8, 106, 28, },
+ { 0, 1, 2, 8, 122, 30, },
+ { 2, 1, 2, 8, 122, 28, },
+ { 1, 1, 2, 8, 122, 28, },
+ { 0, 1, 2, 8, 155, 32, },
+ { 2, 1, 2, 8, 155, 28, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 24, },
+ { 2, 1, 2, 9, 42, 26, },
+ { 1, 1, 2, 9, 42, 26, },
+ { 0, 1, 2, 9, 58, 22, },
+ { 2, 1, 2, 9, 58, 26, },
+ { 1, 1, 2, 9, 58, 26, },
+ { 0, 1, 2, 9, 106, 24, },
+ { 2, 1, 2, 9, 106, 26, },
+ { 1, 1, 2, 9, 106, 26, },
+ { 0, 1, 2, 9, 122, 28, },
+ { 2, 1, 2, 9, 122, 26, },
+ { 1, 1, 2, 9, 122, 26, },
+ { 0, 1, 2, 9, 155, 30, },
+ { 2, 1, 2, 9, 155, 26, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type0[] = {
+ { 0, 0, 0, 0, 1, 32, },
+ { 2, 0, 0, 0, 1, 32, },
+ { 1, 0, 0, 0, 1, 32, },
+ { 0, 0, 0, 0, 2, 32, },
+ { 2, 0, 0, 0, 2, 32, },
+ { 1, 0, 0, 0, 2, 32, },
+ { 0, 0, 0, 0, 3, 32, },
+ { 2, 0, 0, 0, 3, 32, },
+ { 1, 0, 0, 0, 3, 32, },
+ { 0, 0, 0, 0, 4, 32, },
+ { 2, 0, 0, 0, 4, 32, },
+ { 1, 0, 0, 0, 4, 32, },
+ { 0, 0, 0, 0, 5, 32, },
+ { 2, 0, 0, 0, 5, 32, },
+ { 1, 0, 0, 0, 5, 32, },
+ { 0, 0, 0, 0, 6, 32, },
+ { 2, 0, 0, 0, 6, 32, },
+ { 1, 0, 0, 0, 6, 32, },
+ { 0, 0, 0, 0, 7, 32, },
+ { 2, 0, 0, 0, 7, 32, },
+ { 1, 0, 0, 0, 7, 32, },
+ { 0, 0, 0, 0, 8, 32, },
+ { 2, 0, 0, 0, 8, 32, },
+ { 1, 0, 0, 0, 8, 32, },
+ { 0, 0, 0, 0, 9, 32, },
+ { 2, 0, 0, 0, 9, 32, },
+ { 1, 0, 0, 0, 9, 32, },
+ { 0, 0, 0, 0, 10, 32, },
+ { 2, 0, 0, 0, 10, 32, },
+ { 1, 0, 0, 0, 10, 32, },
+ { 0, 0, 0, 0, 11, 32, },
+ { 2, 0, 0, 0, 11, 32, },
+ { 1, 0, 0, 0, 11, 32, },
+ { 0, 0, 0, 0, 12, 24, },
+ { 2, 0, 0, 0, 12, 32, },
+ { 1, 0, 0, 0, 12, 32, },
+ { 0, 0, 0, 0, 13, 16, },
+ { 2, 0, 0, 0, 13, 32, },
+ { 1, 0, 0, 0, 13, 32, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 28, },
+ { 2, 0, 0, 1, 1, 32, },
+ { 1, 0, 0, 1, 1, 32, },
+ { 0, 0, 0, 1, 2, 32, },
+ { 2, 0, 0, 1, 2, 32, },
+ { 1, 0, 0, 1, 2, 32, },
+ { 0, 0, 0, 1, 3, 32, },
+ { 2, 0, 0, 1, 3, 32, },
+ { 1, 0, 0, 1, 3, 32, },
+ { 0, 0, 0, 1, 4, 32, },
+ { 2, 0, 0, 1, 4, 32, },
+ { 1, 0, 0, 1, 4, 32, },
+ { 0, 0, 0, 1, 5, 32, },
+ { 2, 0, 0, 1, 5, 32, },
+ { 1, 0, 0, 1, 5, 32, },
+ { 0, 0, 0, 1, 6, 32, },
+ { 2, 0, 0, 1, 6, 32, },
+ { 1, 0, 0, 1, 6, 32, },
+ { 0, 0, 0, 1, 7, 32, },
+ { 2, 0, 0, 1, 7, 32, },
+ { 1, 0, 0, 1, 7, 32, },
+ { 0, 0, 0, 1, 8, 32, },
+ { 2, 0, 0, 1, 8, 32, },
+ { 1, 0, 0, 1, 8, 32, },
+ { 0, 0, 0, 1, 9, 32, },
+ { 2, 0, 0, 1, 9, 32, },
+ { 1, 0, 0, 1, 9, 32, },
+ { 0, 0, 0, 1, 10, 32, },
+ { 2, 0, 0, 1, 10, 32, },
+ { 1, 0, 0, 1, 10, 32, },
+ { 0, 0, 0, 1, 11, 28, },
+ { 2, 0, 0, 1, 11, 32, },
+ { 1, 0, 0, 1, 11, 32, },
+ { 0, 0, 0, 1, 12, 18, },
+ { 2, 0, 0, 1, 12, 32, },
+ { 1, 0, 0, 1, 12, 32, },
+ { 0, 0, 0, 1, 13, 8, },
+ { 2, 0, 0, 1, 13, 32, },
+ { 1, 0, 0, 1, 13, 32, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 26, },
+ { 2, 0, 0, 2, 1, 32, },
+ { 1, 0, 0, 2, 1, 32, },
+ { 0, 0, 0, 2, 2, 32, },
+ { 2, 0, 0, 2, 2, 32, },
+ { 1, 0, 0, 2, 2, 32, },
+ { 0, 0, 0, 2, 3, 32, },
+ { 2, 0, 0, 2, 3, 32, },
+ { 1, 0, 0, 2, 3, 32, },
+ { 0, 0, 0, 2, 4, 32, },
+ { 2, 0, 0, 2, 4, 32, },
+ { 1, 0, 0, 2, 4, 32, },
+ { 0, 0, 0, 2, 5, 32, },
+ { 2, 0, 0, 2, 5, 32, },
+ { 1, 0, 0, 2, 5, 32, },
+ { 0, 0, 0, 2, 6, 32, },
+ { 2, 0, 0, 2, 6, 32, },
+ { 1, 0, 0, 2, 6, 32, },
+ { 0, 0, 0, 2, 7, 32, },
+ { 2, 0, 0, 2, 7, 32, },
+ { 1, 0, 0, 2, 7, 32, },
+ { 0, 0, 0, 2, 8, 32, },
+ { 2, 0, 0, 2, 8, 32, },
+ { 1, 0, 0, 2, 8, 32, },
+ { 0, 0, 0, 2, 9, 32, },
+ { 2, 0, 0, 2, 9, 32, },
+ { 1, 0, 0, 2, 9, 32, },
+ { 0, 0, 0, 2, 10, 32, },
+ { 2, 0, 0, 2, 10, 32, },
+ { 1, 0, 0, 2, 10, 32, },
+ { 0, 0, 0, 2, 11, 26, },
+ { 2, 0, 0, 2, 11, 32, },
+ { 1, 0, 0, 2, 11, 32, },
+ { 0, 0, 0, 2, 12, 16, },
+ { 2, 0, 0, 2, 12, 32, },
+ { 1, 0, 0, 2, 12, 32, },
+ { 0, 0, 0, 2, 13, 6, },
+ { 2, 0, 0, 2, 13, 32, },
+ { 1, 0, 0, 2, 13, 32, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 24, },
+ { 2, 0, 0, 3, 1, 30, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 30, },
+ { 2, 0, 0, 3, 2, 30, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 30, },
+ { 2, 0, 0, 3, 3, 30, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 30, },
+ { 2, 0, 0, 3, 4, 30, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 30, },
+ { 2, 0, 0, 3, 5, 30, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 30, },
+ { 2, 0, 0, 3, 6, 30, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 30, },
+ { 2, 0, 0, 3, 7, 30, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 30, },
+ { 2, 0, 0, 3, 8, 30, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 30, },
+ { 2, 0, 0, 3, 9, 30, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 30, },
+ { 2, 0, 0, 3, 10, 30, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 24, },
+ { 2, 0, 0, 3, 11, 30, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 14, },
+ { 2, 0, 0, 3, 12, 30, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 4, },
+ { 2, 0, 0, 3, 13, 30, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 22, },
+ { 2, 0, 0, 6, 1, 28, },
+ { 1, 0, 0, 6, 1, 28, },
+ { 0, 0, 0, 6, 2, 28, },
+ { 2, 0, 0, 6, 2, 28, },
+ { 1, 0, 0, 6, 2, 28, },
+ { 0, 0, 0, 6, 3, 28, },
+ { 2, 0, 0, 6, 3, 28, },
+ { 1, 0, 0, 6, 3, 28, },
+ { 0, 0, 0, 6, 4, 28, },
+ { 2, 0, 0, 6, 4, 28, },
+ { 1, 0, 0, 6, 4, 28, },
+ { 0, 0, 0, 6, 5, 28, },
+ { 2, 0, 0, 6, 5, 28, },
+ { 1, 0, 0, 6, 5, 28, },
+ { 0, 0, 0, 6, 6, 28, },
+ { 2, 0, 0, 6, 6, 28, },
+ { 1, 0, 0, 6, 6, 28, },
+ { 0, 0, 0, 6, 7, 28, },
+ { 2, 0, 0, 6, 7, 28, },
+ { 1, 0, 0, 6, 7, 28, },
+ { 0, 0, 0, 6, 8, 28, },
+ { 2, 0, 0, 6, 8, 28, },
+ { 1, 0, 0, 6, 8, 28, },
+ { 0, 0, 0, 6, 9, 28, },
+ { 2, 0, 0, 6, 9, 28, },
+ { 1, 0, 0, 6, 9, 28, },
+ { 0, 0, 0, 6, 10, 28, },
+ { 2, 0, 0, 6, 10, 28, },
+ { 1, 0, 0, 6, 10, 28, },
+ { 0, 0, 0, 6, 11, 22, },
+ { 2, 0, 0, 6, 11, 28, },
+ { 1, 0, 0, 6, 11, 28, },
+ { 0, 0, 0, 6, 12, 14, },
+ { 2, 0, 0, 6, 12, 28, },
+ { 1, 0, 0, 6, 12, 28, },
+ { 0, 0, 0, 6, 13, 4, },
+ { 2, 0, 0, 6, 13, 28, },
+ { 1, 0, 0, 6, 13, 28, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 20, },
+ { 2, 0, 0, 7, 1, 26, },
+ { 1, 0, 0, 7, 1, 26, },
+ { 0, 0, 0, 7, 2, 26, },
+ { 2, 0, 0, 7, 2, 26, },
+ { 1, 0, 0, 7, 2, 26, },
+ { 0, 0, 0, 7, 3, 26, },
+ { 2, 0, 0, 7, 3, 26, },
+ { 1, 0, 0, 7, 3, 26, },
+ { 0, 0, 0, 7, 4, 26, },
+ { 2, 0, 0, 7, 4, 26, },
+ { 1, 0, 0, 7, 4, 26, },
+ { 0, 0, 0, 7, 5, 26, },
+ { 2, 0, 0, 7, 5, 26, },
+ { 1, 0, 0, 7, 5, 26, },
+ { 0, 0, 0, 7, 6, 26, },
+ { 2, 0, 0, 7, 6, 26, },
+ { 1, 0, 0, 7, 6, 26, },
+ { 0, 0, 0, 7, 7, 26, },
+ { 2, 0, 0, 7, 7, 26, },
+ { 1, 0, 0, 7, 7, 26, },
+ { 0, 0, 0, 7, 8, 26, },
+ { 2, 0, 0, 7, 8, 26, },
+ { 1, 0, 0, 7, 8, 26, },
+ { 0, 0, 0, 7, 9, 26, },
+ { 2, 0, 0, 7, 9, 26, },
+ { 1, 0, 0, 7, 9, 26, },
+ { 0, 0, 0, 7, 10, 26, },
+ { 2, 0, 0, 7, 10, 26, },
+ { 1, 0, 0, 7, 10, 26, },
+ { 0, 0, 0, 7, 11, 20, },
+ { 2, 0, 0, 7, 11, 26, },
+ { 1, 0, 0, 7, 11, 26, },
+ { 0, 0, 0, 7, 12, 14, },
+ { 2, 0, 0, 7, 12, 26, },
+ { 1, 0, 0, 7, 12, 26, },
+ { 0, 0, 0, 7, 13, 4, },
+ { 2, 0, 0, 7, 13, 26, },
+ { 1, 0, 0, 7, 13, 26, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 26, },
+ { 2, 0, 1, 2, 3, 32, },
+ { 1, 0, 1, 2, 3, 32, },
+ { 0, 0, 1, 2, 4, 32, },
+ { 2, 0, 1, 2, 4, 32, },
+ { 1, 0, 1, 2, 4, 32, },
+ { 0, 0, 1, 2, 5, 32, },
+ { 2, 0, 1, 2, 5, 32, },
+ { 1, 0, 1, 2, 5, 32, },
+ { 0, 0, 1, 2, 6, 32, },
+ { 2, 0, 1, 2, 6, 32, },
+ { 1, 0, 1, 2, 6, 32, },
+ { 0, 0, 1, 2, 7, 32, },
+ { 2, 0, 1, 2, 7, 32, },
+ { 1, 0, 1, 2, 7, 32, },
+ { 0, 0, 1, 2, 8, 32, },
+ { 2, 0, 1, 2, 8, 32, },
+ { 1, 0, 1, 2, 8, 32, },
+ { 0, 0, 1, 2, 9, 32, },
+ { 2, 0, 1, 2, 9, 32, },
+ { 1, 0, 1, 2, 9, 32, },
+ { 0, 0, 1, 2, 10, 32, },
+ { 2, 0, 1, 2, 10, 32, },
+ { 1, 0, 1, 2, 10, 32, },
+ { 0, 0, 1, 2, 11, 26, },
+ { 2, 0, 1, 2, 11, 32, },
+ { 1, 0, 1, 2, 11, 32, },
+ { 0, 0, 1, 2, 12, 16, },
+ { 2, 0, 1, 2, 12, 32, },
+ { 1, 0, 1, 2, 12, 32, },
+ { 0, 0, 1, 2, 13, 10, },
+ { 2, 0, 1, 2, 13, 32, },
+ { 1, 0, 1, 2, 13, 32, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 24, },
+ { 2, 0, 1, 3, 3, 30, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 30, },
+ { 2, 0, 1, 3, 4, 30, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 30, },
+ { 2, 0, 1, 3, 5, 30, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 30, },
+ { 2, 0, 1, 3, 6, 30, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 30, },
+ { 2, 0, 1, 3, 7, 30, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 30, },
+ { 2, 0, 1, 3, 8, 30, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 30, },
+ { 2, 0, 1, 3, 9, 30, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 30, },
+ { 2, 0, 1, 3, 10, 30, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 24, },
+ { 2, 0, 1, 3, 11, 30, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 14, },
+ { 2, 0, 1, 3, 12, 30, },
+ { 1, 0, 1, 3, 12, 30, },
+ { 0, 0, 1, 3, 13, 8, },
+ { 2, 0, 1, 3, 13, 30, },
+ { 1, 0, 1, 3, 13, 30, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 22, },
+ { 2, 0, 1, 6, 3, 28, },
+ { 1, 0, 1, 6, 3, 28, },
+ { 0, 0, 1, 6, 4, 28, },
+ { 2, 0, 1, 6, 4, 28, },
+ { 1, 0, 1, 6, 4, 28, },
+ { 0, 0, 1, 6, 5, 28, },
+ { 2, 0, 1, 6, 5, 28, },
+ { 1, 0, 1, 6, 5, 28, },
+ { 0, 0, 1, 6, 6, 28, },
+ { 2, 0, 1, 6, 6, 28, },
+ { 1, 0, 1, 6, 6, 28, },
+ { 0, 0, 1, 6, 7, 28, },
+ { 2, 0, 1, 6, 7, 28, },
+ { 1, 0, 1, 6, 7, 28, },
+ { 0, 0, 1, 6, 8, 28, },
+ { 2, 0, 1, 6, 8, 28, },
+ { 1, 0, 1, 6, 8, 28, },
+ { 0, 0, 1, 6, 9, 28, },
+ { 2, 0, 1, 6, 9, 28, },
+ { 1, 0, 1, 6, 9, 28, },
+ { 0, 0, 1, 6, 10, 28, },
+ { 2, 0, 1, 6, 10, 28, },
+ { 1, 0, 1, 6, 10, 28, },
+ { 0, 0, 1, 6, 11, 22, },
+ { 2, 0, 1, 6, 11, 28, },
+ { 1, 0, 1, 6, 11, 28, },
+ { 0, 0, 1, 6, 12, 14, },
+ { 2, 0, 1, 6, 12, 28, },
+ { 1, 0, 1, 6, 12, 28, },
+ { 0, 0, 1, 6, 13, 8, },
+ { 2, 0, 1, 6, 13, 28, },
+ { 1, 0, 1, 6, 13, 28, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 20, },
+ { 2, 0, 1, 7, 3, 26, },
+ { 1, 0, 1, 7, 3, 26, },
+ { 0, 0, 1, 7, 4, 26, },
+ { 2, 0, 1, 7, 4, 26, },
+ { 1, 0, 1, 7, 4, 26, },
+ { 0, 0, 1, 7, 5, 26, },
+ { 2, 0, 1, 7, 5, 26, },
+ { 1, 0, 1, 7, 5, 26, },
+ { 0, 0, 1, 7, 6, 26, },
+ { 2, 0, 1, 7, 6, 26, },
+ { 1, 0, 1, 7, 6, 26, },
+ { 0, 0, 1, 7, 7, 26, },
+ { 2, 0, 1, 7, 7, 26, },
+ { 1, 0, 1, 7, 7, 26, },
+ { 0, 0, 1, 7, 8, 26, },
+ { 2, 0, 1, 7, 8, 26, },
+ { 1, 0, 1, 7, 8, 26, },
+ { 0, 0, 1, 7, 9, 26, },
+ { 2, 0, 1, 7, 9, 26, },
+ { 1, 0, 1, 7, 9, 26, },
+ { 0, 0, 1, 7, 10, 26, },
+ { 2, 0, 1, 7, 10, 26, },
+ { 1, 0, 1, 7, 10, 26, },
+ { 0, 0, 1, 7, 11, 20, },
+ { 2, 0, 1, 7, 11, 26, },
+ { 1, 0, 1, 7, 11, 26, },
+ { 0, 0, 1, 7, 12, 14, },
+ { 2, 0, 1, 7, 12, 26, },
+ { 1, 0, 1, 7, 12, 26, },
+ { 0, 0, 1, 7, 13, 8, },
+ { 2, 0, 1, 7, 13, 26, },
+ { 1, 0, 1, 7, 13, 26, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 28, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 32, },
+ { 0, 1, 0, 1, 40, 32, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 32, },
+ { 0, 1, 0, 1, 44, 32, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 32, },
+ { 0, 1, 0, 1, 48, 32, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 32, },
+ { 0, 1, 0, 1, 52, 32, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 32, },
+ { 0, 1, 0, 1, 56, 32, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 32, },
+ { 0, 1, 0, 1, 60, 32, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 32, },
+ { 0, 1, 0, 1, 64, 28, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 32, },
+ { 0, 1, 0, 1, 100, 28, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 32, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 32, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 32, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 32, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 32, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 32, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 32, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 32, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 32, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 28, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 149, 28, },
+ { 2, 1, 0, 1, 149, 32, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 32, },
+ { 2, 1, 0, 1, 153, 32, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 32, },
+ { 2, 1, 0, 1, 157, 32, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 32, },
+ { 2, 1, 0, 1, 161, 32, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 32, },
+ { 2, 1, 0, 1, 165, 32, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 26, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 32, },
+ { 0, 1, 0, 2, 40, 32, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 32, },
+ { 0, 1, 0, 2, 44, 32, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 32, },
+ { 0, 1, 0, 2, 48, 32, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 32, },
+ { 0, 1, 0, 2, 52, 32, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 32, },
+ { 0, 1, 0, 2, 56, 32, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 32, },
+ { 0, 1, 0, 2, 60, 32, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 32, },
+ { 0, 1, 0, 2, 64, 26, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 32, },
+ { 0, 1, 0, 2, 100, 26, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 32, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 32, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 32, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 32, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 32, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 32, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 32, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 32, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 32, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 26, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 149, 26, },
+ { 2, 1, 0, 2, 149, 32, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 32, },
+ { 2, 1, 0, 2, 153, 32, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 32, },
+ { 2, 1, 0, 2, 157, 32, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 32, },
+ { 2, 1, 0, 2, 161, 32, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 32, },
+ { 2, 1, 0, 2, 165, 32, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 24, },
+ { 2, 1, 0, 3, 36, 28, },
+ { 1, 1, 0, 3, 36, 28, },
+ { 0, 1, 0, 3, 40, 28, },
+ { 2, 1, 0, 3, 40, 28, },
+ { 1, 1, 0, 3, 40, 28, },
+ { 0, 1, 0, 3, 44, 28, },
+ { 2, 1, 0, 3, 44, 28, },
+ { 1, 1, 0, 3, 44, 28, },
+ { 0, 1, 0, 3, 48, 28, },
+ { 2, 1, 0, 3, 48, 28, },
+ { 1, 1, 0, 3, 48, 28, },
+ { 0, 1, 0, 3, 52, 28, },
+ { 2, 1, 0, 3, 52, 28, },
+ { 1, 1, 0, 3, 52, 28, },
+ { 0, 1, 0, 3, 56, 28, },
+ { 2, 1, 0, 3, 56, 28, },
+ { 1, 1, 0, 3, 56, 28, },
+ { 0, 1, 0, 3, 60, 28, },
+ { 2, 1, 0, 3, 60, 28, },
+ { 1, 1, 0, 3, 60, 28, },
+ { 0, 1, 0, 3, 64, 24, },
+ { 2, 1, 0, 3, 64, 28, },
+ { 1, 1, 0, 3, 64, 28, },
+ { 0, 1, 0, 3, 100, 24, },
+ { 2, 1, 0, 3, 100, 28, },
+ { 1, 1, 0, 3, 100, 28, },
+ { 0, 1, 0, 3, 104, 28, },
+ { 2, 1, 0, 3, 104, 28, },
+ { 1, 1, 0, 3, 104, 28, },
+ { 0, 1, 0, 3, 108, 28, },
+ { 2, 1, 0, 3, 108, 28, },
+ { 1, 1, 0, 3, 108, 28, },
+ { 0, 1, 0, 3, 112, 28, },
+ { 2, 1, 0, 3, 112, 28, },
+ { 1, 1, 0, 3, 112, 28, },
+ { 0, 1, 0, 3, 116, 28, },
+ { 2, 1, 0, 3, 116, 28, },
+ { 1, 1, 0, 3, 116, 28, },
+ { 0, 1, 0, 3, 120, 28, },
+ { 2, 1, 0, 3, 120, 28, },
+ { 1, 1, 0, 3, 120, 28, },
+ { 0, 1, 0, 3, 124, 28, },
+ { 2, 1, 0, 3, 124, 28, },
+ { 1, 1, 0, 3, 124, 28, },
+ { 0, 1, 0, 3, 128, 28, },
+ { 2, 1, 0, 3, 128, 28, },
+ { 1, 1, 0, 3, 128, 28, },
+ { 0, 1, 0, 3, 132, 28, },
+ { 2, 1, 0, 3, 132, 28, },
+ { 1, 1, 0, 3, 132, 28, },
+ { 0, 1, 0, 3, 136, 28, },
+ { 2, 1, 0, 3, 136, 28, },
+ { 1, 1, 0, 3, 136, 28, },
+ { 0, 1, 0, 3, 140, 24, },
+ { 2, 1, 0, 3, 140, 28, },
+ { 1, 1, 0, 3, 140, 28, },
+ { 0, 1, 0, 3, 149, 24, },
+ { 2, 1, 0, 3, 149, 28, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 28, },
+ { 2, 1, 0, 3, 153, 28, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 28, },
+ { 2, 1, 0, 3, 157, 28, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 28, },
+ { 2, 1, 0, 3, 161, 28, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 28, },
+ { 2, 1, 0, 3, 165, 28, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 22, },
+ { 2, 1, 0, 6, 36, 26, },
+ { 1, 1, 0, 6, 36, 26, },
+ { 0, 1, 0, 6, 40, 26, },
+ { 2, 1, 0, 6, 40, 26, },
+ { 1, 1, 0, 6, 40, 26, },
+ { 0, 1, 0, 6, 44, 26, },
+ { 2, 1, 0, 6, 44, 26, },
+ { 1, 1, 0, 6, 44, 26, },
+ { 0, 1, 0, 6, 48, 26, },
+ { 2, 1, 0, 6, 48, 26, },
+ { 1, 1, 0, 6, 48, 26, },
+ { 0, 1, 0, 6, 52, 26, },
+ { 2, 1, 0, 6, 52, 26, },
+ { 1, 1, 0, 6, 52, 26, },
+ { 0, 1, 0, 6, 56, 26, },
+ { 2, 1, 0, 6, 56, 26, },
+ { 1, 1, 0, 6, 56, 26, },
+ { 0, 1, 0, 6, 60, 26, },
+ { 2, 1, 0, 6, 60, 26, },
+ { 1, 1, 0, 6, 60, 26, },
+ { 0, 1, 0, 6, 64, 22, },
+ { 2, 1, 0, 6, 64, 26, },
+ { 1, 1, 0, 6, 64, 26, },
+ { 0, 1, 0, 6, 100, 22, },
+ { 2, 1, 0, 6, 100, 26, },
+ { 1, 1, 0, 6, 100, 26, },
+ { 0, 1, 0, 6, 104, 26, },
+ { 2, 1, 0, 6, 104, 26, },
+ { 1, 1, 0, 6, 104, 26, },
+ { 0, 1, 0, 6, 108, 26, },
+ { 2, 1, 0, 6, 108, 26, },
+ { 1, 1, 0, 6, 108, 26, },
+ { 0, 1, 0, 6, 112, 26, },
+ { 2, 1, 0, 6, 112, 26, },
+ { 1, 1, 0, 6, 112, 26, },
+ { 0, 1, 0, 6, 116, 26, },
+ { 2, 1, 0, 6, 116, 26, },
+ { 1, 1, 0, 6, 116, 26, },
+ { 0, 1, 0, 6, 120, 26, },
+ { 2, 1, 0, 6, 120, 26, },
+ { 1, 1, 0, 6, 120, 26, },
+ { 0, 1, 0, 6, 124, 26, },
+ { 2, 1, 0, 6, 124, 26, },
+ { 1, 1, 0, 6, 124, 26, },
+ { 0, 1, 0, 6, 128, 26, },
+ { 2, 1, 0, 6, 128, 26, },
+ { 1, 1, 0, 6, 128, 26, },
+ { 0, 1, 0, 6, 132, 26, },
+ { 2, 1, 0, 6, 132, 26, },
+ { 1, 1, 0, 6, 132, 26, },
+ { 0, 1, 0, 6, 136, 26, },
+ { 2, 1, 0, 6, 136, 26, },
+ { 1, 1, 0, 6, 136, 26, },
+ { 0, 1, 0, 6, 140, 22, },
+ { 2, 1, 0, 6, 140, 26, },
+ { 1, 1, 0, 6, 140, 26, },
+ { 0, 1, 0, 6, 149, 22, },
+ { 2, 1, 0, 6, 149, 26, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 26, },
+ { 2, 1, 0, 6, 153, 26, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 26, },
+ { 2, 1, 0, 6, 157, 26, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 26, },
+ { 2, 1, 0, 6, 161, 26, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 26, },
+ { 2, 1, 0, 6, 165, 26, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 20, },
+ { 2, 1, 0, 7, 36, 24, },
+ { 1, 1, 0, 7, 36, 24, },
+ { 0, 1, 0, 7, 40, 24, },
+ { 2, 1, 0, 7, 40, 24, },
+ { 1, 1, 0, 7, 40, 24, },
+ { 0, 1, 0, 7, 44, 24, },
+ { 2, 1, 0, 7, 44, 24, },
+ { 1, 1, 0, 7, 44, 24, },
+ { 0, 1, 0, 7, 48, 24, },
+ { 2, 1, 0, 7, 48, 24, },
+ { 1, 1, 0, 7, 48, 24, },
+ { 0, 1, 0, 7, 52, 24, },
+ { 2, 1, 0, 7, 52, 24, },
+ { 1, 1, 0, 7, 52, 24, },
+ { 0, 1, 0, 7, 56, 24, },
+ { 2, 1, 0, 7, 56, 24, },
+ { 1, 1, 0, 7, 56, 24, },
+ { 0, 1, 0, 7, 60, 24, },
+ { 2, 1, 0, 7, 60, 24, },
+ { 1, 1, 0, 7, 60, 24, },
+ { 0, 1, 0, 7, 64, 20, },
+ { 2, 1, 0, 7, 64, 24, },
+ { 1, 1, 0, 7, 64, 24, },
+ { 0, 1, 0, 7, 100, 20, },
+ { 2, 1, 0, 7, 100, 24, },
+ { 1, 1, 0, 7, 100, 24, },
+ { 0, 1, 0, 7, 104, 24, },
+ { 2, 1, 0, 7, 104, 24, },
+ { 1, 1, 0, 7, 104, 24, },
+ { 0, 1, 0, 7, 108, 24, },
+ { 2, 1, 0, 7, 108, 24, },
+ { 1, 1, 0, 7, 108, 24, },
+ { 0, 1, 0, 7, 112, 24, },
+ { 2, 1, 0, 7, 112, 24, },
+ { 1, 1, 0, 7, 112, 24, },
+ { 0, 1, 0, 7, 116, 24, },
+ { 2, 1, 0, 7, 116, 24, },
+ { 1, 1, 0, 7, 116, 24, },
+ { 0, 1, 0, 7, 120, 24, },
+ { 2, 1, 0, 7, 120, 24, },
+ { 1, 1, 0, 7, 120, 24, },
+ { 0, 1, 0, 7, 124, 24, },
+ { 2, 1, 0, 7, 124, 24, },
+ { 1, 1, 0, 7, 124, 24, },
+ { 0, 1, 0, 7, 128, 24, },
+ { 2, 1, 0, 7, 128, 24, },
+ { 1, 1, 0, 7, 128, 24, },
+ { 0, 1, 0, 7, 132, 24, },
+ { 2, 1, 0, 7, 132, 24, },
+ { 1, 1, 0, 7, 132, 24, },
+ { 0, 1, 0, 7, 136, 24, },
+ { 2, 1, 0, 7, 136, 24, },
+ { 1, 1, 0, 7, 136, 24, },
+ { 0, 1, 0, 7, 140, 20, },
+ { 2, 1, 0, 7, 140, 24, },
+ { 1, 1, 0, 7, 140, 24, },
+ { 0, 1, 0, 7, 149, 20, },
+ { 2, 1, 0, 7, 149, 24, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 24, },
+ { 2, 1, 0, 7, 153, 24, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 24, },
+ { 2, 1, 0, 7, 157, 24, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 24, },
+ { 2, 1, 0, 7, 161, 24, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 24, },
+ { 2, 1, 0, 7, 165, 24, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 26, },
+ { 2, 1, 1, 2, 38, 32, },
+ { 1, 1, 1, 2, 38, 32, },
+ { 0, 1, 1, 2, 46, 32, },
+ { 2, 1, 1, 2, 46, 32, },
+ { 1, 1, 1, 2, 46, 32, },
+ { 0, 1, 1, 2, 54, 32, },
+ { 2, 1, 1, 2, 54, 32, },
+ { 1, 1, 1, 2, 54, 32, },
+ { 0, 1, 1, 2, 62, 26, },
+ { 2, 1, 1, 2, 62, 32, },
+ { 1, 1, 1, 2, 62, 32, },
+ { 0, 1, 1, 2, 102, 26, },
+ { 2, 1, 1, 2, 102, 32, },
+ { 1, 1, 1, 2, 102, 32, },
+ { 0, 1, 1, 2, 110, 32, },
+ { 2, 1, 1, 2, 110, 32, },
+ { 1, 1, 1, 2, 110, 32, },
+ { 0, 1, 1, 2, 118, 32, },
+ { 2, 1, 1, 2, 118, 32, },
+ { 1, 1, 1, 2, 118, 32, },
+ { 0, 1, 1, 2, 126, 32, },
+ { 2, 1, 1, 2, 126, 32, },
+ { 1, 1, 1, 2, 126, 32, },
+ { 0, 1, 1, 2, 134, 32, },
+ { 2, 1, 1, 2, 134, 32, },
+ { 1, 1, 1, 2, 134, 32, },
+ { 0, 1, 1, 2, 151, 26, },
+ { 2, 1, 1, 2, 151, 32, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 32, },
+ { 2, 1, 1, 2, 159, 32, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 24, },
+ { 2, 1, 1, 3, 38, 28, },
+ { 1, 1, 1, 3, 38, 28, },
+ { 0, 1, 1, 3, 46, 28, },
+ { 2, 1, 1, 3, 46, 28, },
+ { 1, 1, 1, 3, 46, 28, },
+ { 0, 1, 1, 3, 54, 28, },
+ { 2, 1, 1, 3, 54, 28, },
+ { 1, 1, 1, 3, 54, 28, },
+ { 0, 1, 1, 3, 62, 24, },
+ { 2, 1, 1, 3, 62, 28, },
+ { 1, 1, 1, 3, 62, 28, },
+ { 0, 1, 1, 3, 102, 24, },
+ { 2, 1, 1, 3, 102, 28, },
+ { 1, 1, 1, 3, 102, 28, },
+ { 0, 1, 1, 3, 110, 28, },
+ { 2, 1, 1, 3, 110, 28, },
+ { 1, 1, 1, 3, 110, 28, },
+ { 0, 1, 1, 3, 118, 28, },
+ { 2, 1, 1, 3, 118, 28, },
+ { 1, 1, 1, 3, 118, 28, },
+ { 0, 1, 1, 3, 126, 28, },
+ { 2, 1, 1, 3, 126, 28, },
+ { 1, 1, 1, 3, 126, 28, },
+ { 0, 1, 1, 3, 134, 28, },
+ { 2, 1, 1, 3, 134, 28, },
+ { 1, 1, 1, 3, 134, 28, },
+ { 0, 1, 1, 3, 151, 24, },
+ { 2, 1, 1, 3, 151, 28, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 28, },
+ { 2, 1, 1, 3, 159, 28, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 20, },
+ { 2, 1, 1, 6, 38, 26, },
+ { 1, 1, 1, 6, 38, 26, },
+ { 0, 1, 1, 6, 46, 26, },
+ { 2, 1, 1, 6, 46, 26, },
+ { 1, 1, 1, 6, 46, 26, },
+ { 0, 1, 1, 6, 54, 26, },
+ { 2, 1, 1, 6, 54, 26, },
+ { 1, 1, 1, 6, 54, 26, },
+ { 0, 1, 1, 6, 62, 20, },
+ { 2, 1, 1, 6, 62, 26, },
+ { 1, 1, 1, 6, 62, 26, },
+ { 0, 1, 1, 6, 102, 20, },
+ { 2, 1, 1, 6, 102, 26, },
+ { 1, 1, 1, 6, 102, 26, },
+ { 0, 1, 1, 6, 110, 26, },
+ { 2, 1, 1, 6, 110, 26, },
+ { 1, 1, 1, 6, 110, 26, },
+ { 0, 1, 1, 6, 118, 26, },
+ { 2, 1, 1, 6, 118, 26, },
+ { 1, 1, 1, 6, 118, 26, },
+ { 0, 1, 1, 6, 126, 26, },
+ { 2, 1, 1, 6, 126, 26, },
+ { 1, 1, 1, 6, 126, 26, },
+ { 0, 1, 1, 6, 134, 26, },
+ { 2, 1, 1, 6, 134, 26, },
+ { 1, 1, 1, 6, 134, 26, },
+ { 0, 1, 1, 6, 151, 20, },
+ { 2, 1, 1, 6, 151, 26, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 26, },
+ { 2, 1, 1, 6, 159, 26, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 18, },
+ { 2, 1, 1, 7, 38, 24, },
+ { 1, 1, 1, 7, 38, 24, },
+ { 0, 1, 1, 7, 46, 24, },
+ { 2, 1, 1, 7, 46, 24, },
+ { 1, 1, 1, 7, 46, 24, },
+ { 0, 1, 1, 7, 54, 24, },
+ { 2, 1, 1, 7, 54, 24, },
+ { 1, 1, 1, 7, 54, 24, },
+ { 0, 1, 1, 7, 62, 18, },
+ { 2, 1, 1, 7, 62, 24, },
+ { 1, 1, 1, 7, 62, 24, },
+ { 0, 1, 1, 7, 102, 18, },
+ { 2, 1, 1, 7, 102, 24, },
+ { 1, 1, 1, 7, 102, 24, },
+ { 0, 1, 1, 7, 110, 24, },
+ { 2, 1, 1, 7, 110, 24, },
+ { 1, 1, 1, 7, 110, 24, },
+ { 0, 1, 1, 7, 118, 24, },
+ { 2, 1, 1, 7, 118, 24, },
+ { 1, 1, 1, 7, 118, 24, },
+ { 0, 1, 1, 7, 126, 24, },
+ { 2, 1, 1, 7, 126, 24, },
+ { 1, 1, 1, 7, 126, 24, },
+ { 0, 1, 1, 7, 134, 24, },
+ { 2, 1, 1, 7, 134, 24, },
+ { 1, 1, 1, 7, 134, 24, },
+ { 0, 1, 1, 7, 151, 18, },
+ { 2, 1, 1, 7, 151, 24, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 24, },
+ { 2, 1, 1, 7, 159, 24, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 22, },
+ { 2, 1, 2, 4, 42, 30, },
+ { 1, 1, 2, 4, 42, 30, },
+ { 0, 1, 2, 4, 58, 22, },
+ { 2, 1, 2, 4, 58, 30, },
+ { 1, 1, 2, 4, 58, 30, },
+ { 0, 1, 2, 4, 106, 22, },
+ { 2, 1, 2, 4, 106, 30, },
+ { 1, 1, 2, 4, 106, 30, },
+ { 0, 1, 2, 4, 122, 30, },
+ { 2, 1, 2, 4, 122, 30, },
+ { 1, 1, 2, 4, 122, 30, },
+ { 0, 1, 2, 4, 155, 22, },
+ { 2, 1, 2, 4, 155, 30, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 20, },
+ { 2, 1, 2, 5, 42, 28, },
+ { 1, 1, 2, 5, 42, 28, },
+ { 0, 1, 2, 5, 58, 20, },
+ { 2, 1, 2, 5, 58, 28, },
+ { 1, 1, 2, 5, 58, 28, },
+ { 0, 1, 2, 5, 106, 20, },
+ { 2, 1, 2, 5, 106, 28, },
+ { 1, 1, 2, 5, 106, 28, },
+ { 0, 1, 2, 5, 122, 28, },
+ { 2, 1, 2, 5, 122, 28, },
+ { 1, 1, 2, 5, 122, 28, },
+ { 0, 1, 2, 5, 155, 20, },
+ { 2, 1, 2, 5, 155, 28, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 18, },
+ { 2, 1, 2, 8, 42, 26, },
+ { 1, 1, 2, 8, 42, 26, },
+ { 0, 1, 2, 8, 58, 18, },
+ { 2, 1, 2, 8, 58, 26, },
+ { 1, 1, 2, 8, 58, 26, },
+ { 0, 1, 2, 8, 106, 18, },
+ { 2, 1, 2, 8, 106, 26, },
+ { 1, 1, 2, 8, 106, 26, },
+ { 0, 1, 2, 8, 122, 26, },
+ { 2, 1, 2, 8, 122, 26, },
+ { 1, 1, 2, 8, 122, 26, },
+ { 0, 1, 2, 8, 155, 18, },
+ { 2, 1, 2, 8, 155, 26, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 16, },
+ { 2, 1, 2, 9, 42, 24, },
+ { 1, 1, 2, 9, 42, 24, },
+ { 0, 1, 2, 9, 58, 16, },
+ { 2, 1, 2, 9, 58, 24, },
+ { 1, 1, 2, 9, 58, 24, },
+ { 0, 1, 2, 9, 106, 16, },
+ { 2, 1, 2, 9, 106, 24, },
+ { 1, 1, 2, 9, 106, 24, },
+ { 0, 1, 2, 9, 122, 24, },
+ { 2, 1, 2, 9, 122, 24, },
+ { 1, 1, 2, 9, 122, 24, },
+ { 0, 1, 2, 9, 155, 16, },
+ { 2, 1, 2, 9, 155, 24, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type0);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type1[] = {
+ { 0, 0, 0, 0, 1, 34, },
+ { 2, 0, 0, 0, 1, 32, },
+ { 1, 0, 0, 0, 1, 32, },
+ { 0, 0, 0, 0, 2, 34, },
+ { 2, 0, 0, 0, 2, 32, },
+ { 1, 0, 0, 0, 2, 32, },
+ { 0, 0, 0, 0, 3, 34, },
+ { 2, 0, 0, 0, 3, 32, },
+ { 1, 0, 0, 0, 3, 32, },
+ { 0, 0, 0, 0, 4, 34, },
+ { 2, 0, 0, 0, 4, 32, },
+ { 1, 0, 0, 0, 4, 32, },
+ { 0, 0, 0, 0, 5, 34, },
+ { 2, 0, 0, 0, 5, 32, },
+ { 1, 0, 0, 0, 5, 32, },
+ { 0, 0, 0, 0, 6, 34, },
+ { 2, 0, 0, 0, 6, 32, },
+ { 1, 0, 0, 0, 6, 32, },
+ { 0, 0, 0, 0, 7, 34, },
+ { 2, 0, 0, 0, 7, 32, },
+ { 1, 0, 0, 0, 7, 32, },
+ { 0, 0, 0, 0, 8, 34, },
+ { 2, 0, 0, 0, 8, 32, },
+ { 1, 0, 0, 0, 8, 32, },
+ { 0, 0, 0, 0, 9, 34, },
+ { 2, 0, 0, 0, 9, 32, },
+ { 1, 0, 0, 0, 9, 32, },
+ { 0, 0, 0, 0, 10, 34, },
+ { 2, 0, 0, 0, 10, 32, },
+ { 1, 0, 0, 0, 10, 32, },
+ { 0, 0, 0, 0, 11, 34, },
+ { 2, 0, 0, 0, 11, 32, },
+ { 1, 0, 0, 0, 11, 32, },
+ { 0, 0, 0, 0, 12, 24, },
+ { 2, 0, 0, 0, 12, 32, },
+ { 1, 0, 0, 0, 12, 32, },
+ { 0, 0, 0, 0, 13, 16, },
+ { 2, 0, 0, 0, 13, 32, },
+ { 1, 0, 0, 0, 13, 32, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 30, },
+ { 2, 0, 0, 1, 1, 32, },
+ { 1, 0, 0, 1, 1, 32, },
+ { 0, 0, 0, 1, 2, 32, },
+ { 2, 0, 0, 1, 2, 32, },
+ { 1, 0, 0, 1, 2, 32, },
+ { 0, 0, 0, 1, 3, 32, },
+ { 2, 0, 0, 1, 3, 32, },
+ { 1, 0, 0, 1, 3, 32, },
+ { 0, 0, 0, 1, 4, 32, },
+ { 2, 0, 0, 1, 4, 32, },
+ { 1, 0, 0, 1, 4, 32, },
+ { 0, 0, 0, 1, 5, 32, },
+ { 2, 0, 0, 1, 5, 32, },
+ { 1, 0, 0, 1, 5, 32, },
+ { 0, 0, 0, 1, 6, 32, },
+ { 2, 0, 0, 1, 6, 32, },
+ { 1, 0, 0, 1, 6, 32, },
+ { 0, 0, 0, 1, 7, 32, },
+ { 2, 0, 0, 1, 7, 32, },
+ { 1, 0, 0, 1, 7, 32, },
+ { 0, 0, 0, 1, 8, 32, },
+ { 2, 0, 0, 1, 8, 32, },
+ { 1, 0, 0, 1, 8, 32, },
+ { 0, 0, 0, 1, 9, 32, },
+ { 2, 0, 0, 1, 9, 32, },
+ { 1, 0, 0, 1, 9, 32, },
+ { 0, 0, 0, 1, 10, 32, },
+ { 2, 0, 0, 1, 10, 32, },
+ { 1, 0, 0, 1, 10, 32, },
+ { 0, 0, 0, 1, 11, 30, },
+ { 2, 0, 0, 1, 11, 32, },
+ { 1, 0, 0, 1, 11, 32, },
+ { 0, 0, 0, 1, 12, 18, },
+ { 2, 0, 0, 1, 12, 32, },
+ { 1, 0, 0, 1, 12, 32, },
+ { 0, 0, 0, 1, 13, 8, },
+ { 2, 0, 0, 1, 13, 32, },
+ { 1, 0, 0, 1, 13, 32, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 28, },
+ { 2, 0, 0, 2, 1, 32, },
+ { 1, 0, 0, 2, 1, 32, },
+ { 0, 0, 0, 2, 2, 32, },
+ { 2, 0, 0, 2, 2, 32, },
+ { 1, 0, 0, 2, 2, 32, },
+ { 0, 0, 0, 2, 3, 32, },
+ { 2, 0, 0, 2, 3, 32, },
+ { 1, 0, 0, 2, 3, 32, },
+ { 0, 0, 0, 2, 4, 32, },
+ { 2, 0, 0, 2, 4, 32, },
+ { 1, 0, 0, 2, 4, 32, },
+ { 0, 0, 0, 2, 5, 32, },
+ { 2, 0, 0, 2, 5, 32, },
+ { 1, 0, 0, 2, 5, 32, },
+ { 0, 0, 0, 2, 6, 32, },
+ { 2, 0, 0, 2, 6, 32, },
+ { 1, 0, 0, 2, 6, 32, },
+ { 0, 0, 0, 2, 7, 32, },
+ { 2, 0, 0, 2, 7, 32, },
+ { 1, 0, 0, 2, 7, 32, },
+ { 0, 0, 0, 2, 8, 32, },
+ { 2, 0, 0, 2, 8, 32, },
+ { 1, 0, 0, 2, 8, 32, },
+ { 0, 0, 0, 2, 9, 32, },
+ { 2, 0, 0, 2, 9, 32, },
+ { 1, 0, 0, 2, 9, 32, },
+ { 0, 0, 0, 2, 10, 32, },
+ { 2, 0, 0, 2, 10, 32, },
+ { 1, 0, 0, 2, 10, 32, },
+ { 0, 0, 0, 2, 11, 28, },
+ { 2, 0, 0, 2, 11, 32, },
+ { 1, 0, 0, 2, 11, 32, },
+ { 0, 0, 0, 2, 12, 16, },
+ { 2, 0, 0, 2, 12, 32, },
+ { 1, 0, 0, 2, 12, 32, },
+ { 0, 0, 0, 2, 13, 6, },
+ { 2, 0, 0, 2, 13, 32, },
+ { 1, 0, 0, 2, 13, 32, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 26, },
+ { 2, 0, 0, 3, 1, 30, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 30, },
+ { 2, 0, 0, 3, 2, 30, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 30, },
+ { 2, 0, 0, 3, 3, 30, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 30, },
+ { 2, 0, 0, 3, 4, 30, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 30, },
+ { 2, 0, 0, 3, 5, 30, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 30, },
+ { 2, 0, 0, 3, 6, 30, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 30, },
+ { 2, 0, 0, 3, 7, 30, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 30, },
+ { 2, 0, 0, 3, 8, 30, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 30, },
+ { 2, 0, 0, 3, 9, 30, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 30, },
+ { 2, 0, 0, 3, 10, 30, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 26, },
+ { 2, 0, 0, 3, 11, 30, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 14, },
+ { 2, 0, 0, 3, 12, 30, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 4, },
+ { 2, 0, 0, 3, 13, 30, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 24, },
+ { 2, 0, 0, 6, 1, 28, },
+ { 1, 0, 0, 6, 1, 28, },
+ { 0, 0, 0, 6, 2, 28, },
+ { 2, 0, 0, 6, 2, 28, },
+ { 1, 0, 0, 6, 2, 28, },
+ { 0, 0, 0, 6, 3, 28, },
+ { 2, 0, 0, 6, 3, 28, },
+ { 1, 0, 0, 6, 3, 28, },
+ { 0, 0, 0, 6, 4, 28, },
+ { 2, 0, 0, 6, 4, 28, },
+ { 1, 0, 0, 6, 4, 28, },
+ { 0, 0, 0, 6, 5, 28, },
+ { 2, 0, 0, 6, 5, 28, },
+ { 1, 0, 0, 6, 5, 28, },
+ { 0, 0, 0, 6, 6, 28, },
+ { 2, 0, 0, 6, 6, 28, },
+ { 1, 0, 0, 6, 6, 28, },
+ { 0, 0, 0, 6, 7, 28, },
+ { 2, 0, 0, 6, 7, 28, },
+ { 1, 0, 0, 6, 7, 28, },
+ { 0, 0, 0, 6, 8, 28, },
+ { 2, 0, 0, 6, 8, 28, },
+ { 1, 0, 0, 6, 8, 28, },
+ { 0, 0, 0, 6, 9, 28, },
+ { 2, 0, 0, 6, 9, 28, },
+ { 1, 0, 0, 6, 9, 28, },
+ { 0, 0, 0, 6, 10, 28, },
+ { 2, 0, 0, 6, 10, 28, },
+ { 1, 0, 0, 6, 10, 28, },
+ { 0, 0, 0, 6, 11, 24, },
+ { 2, 0, 0, 6, 11, 28, },
+ { 1, 0, 0, 6, 11, 28, },
+ { 0, 0, 0, 6, 12, 14, },
+ { 2, 0, 0, 6, 12, 28, },
+ { 1, 0, 0, 6, 12, 28, },
+ { 0, 0, 0, 6, 13, 4, },
+ { 2, 0, 0, 6, 13, 28, },
+ { 1, 0, 0, 6, 13, 28, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 22, },
+ { 2, 0, 0, 7, 1, 26, },
+ { 1, 0, 0, 7, 1, 26, },
+ { 0, 0, 0, 7, 2, 26, },
+ { 2, 0, 0, 7, 2, 26, },
+ { 1, 0, 0, 7, 2, 26, },
+ { 0, 0, 0, 7, 3, 26, },
+ { 2, 0, 0, 7, 3, 26, },
+ { 1, 0, 0, 7, 3, 26, },
+ { 0, 0, 0, 7, 4, 26, },
+ { 2, 0, 0, 7, 4, 26, },
+ { 1, 0, 0, 7, 4, 26, },
+ { 0, 0, 0, 7, 5, 26, },
+ { 2, 0, 0, 7, 5, 26, },
+ { 1, 0, 0, 7, 5, 26, },
+ { 0, 0, 0, 7, 6, 26, },
+ { 2, 0, 0, 7, 6, 26, },
+ { 1, 0, 0, 7, 6, 26, },
+ { 0, 0, 0, 7, 7, 26, },
+ { 2, 0, 0, 7, 7, 26, },
+ { 1, 0, 0, 7, 7, 26, },
+ { 0, 0, 0, 7, 8, 26, },
+ { 2, 0, 0, 7, 8, 26, },
+ { 1, 0, 0, 7, 8, 26, },
+ { 0, 0, 0, 7, 9, 26, },
+ { 2, 0, 0, 7, 9, 26, },
+ { 1, 0, 0, 7, 9, 26, },
+ { 0, 0, 0, 7, 10, 26, },
+ { 2, 0, 0, 7, 10, 26, },
+ { 1, 0, 0, 7, 10, 26, },
+ { 0, 0, 0, 7, 11, 22, },
+ { 2, 0, 0, 7, 11, 26, },
+ { 1, 0, 0, 7, 11, 26, },
+ { 0, 0, 0, 7, 12, 14, },
+ { 2, 0, 0, 7, 12, 26, },
+ { 1, 0, 0, 7, 12, 26, },
+ { 0, 0, 0, 7, 13, 4, },
+ { 2, 0, 0, 7, 13, 26, },
+ { 1, 0, 0, 7, 13, 26, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 28, },
+ { 2, 0, 1, 2, 3, 32, },
+ { 1, 0, 1, 2, 3, 32, },
+ { 0, 0, 1, 2, 4, 32, },
+ { 2, 0, 1, 2, 4, 32, },
+ { 1, 0, 1, 2, 4, 32, },
+ { 0, 0, 1, 2, 5, 32, },
+ { 2, 0, 1, 2, 5, 32, },
+ { 1, 0, 1, 2, 5, 32, },
+ { 0, 0, 1, 2, 6, 32, },
+ { 2, 0, 1, 2, 6, 32, },
+ { 1, 0, 1, 2, 6, 32, },
+ { 0, 0, 1, 2, 7, 32, },
+ { 2, 0, 1, 2, 7, 32, },
+ { 1, 0, 1, 2, 7, 32, },
+ { 0, 0, 1, 2, 8, 32, },
+ { 2, 0, 1, 2, 8, 32, },
+ { 1, 0, 1, 2, 8, 32, },
+ { 0, 0, 1, 2, 9, 32, },
+ { 2, 0, 1, 2, 9, 32, },
+ { 1, 0, 1, 2, 9, 32, },
+ { 0, 0, 1, 2, 10, 32, },
+ { 2, 0, 1, 2, 10, 32, },
+ { 1, 0, 1, 2, 10, 32, },
+ { 0, 0, 1, 2, 11, 28, },
+ { 2, 0, 1, 2, 11, 32, },
+ { 1, 0, 1, 2, 11, 32, },
+ { 0, 0, 1, 2, 12, 16, },
+ { 2, 0, 1, 2, 12, 32, },
+ { 1, 0, 1, 2, 12, 32, },
+ { 0, 0, 1, 2, 13, 10, },
+ { 2, 0, 1, 2, 13, 32, },
+ { 1, 0, 1, 2, 13, 32, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 26, },
+ { 2, 0, 1, 3, 3, 30, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 30, },
+ { 2, 0, 1, 3, 4, 30, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 30, },
+ { 2, 0, 1, 3, 5, 30, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 30, },
+ { 2, 0, 1, 3, 6, 30, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 30, },
+ { 2, 0, 1, 3, 7, 30, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 30, },
+ { 2, 0, 1, 3, 8, 30, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 30, },
+ { 2, 0, 1, 3, 9, 30, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 30, },
+ { 2, 0, 1, 3, 10, 30, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 26, },
+ { 2, 0, 1, 3, 11, 30, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 14, },
+ { 2, 0, 1, 3, 12, 30, },
+ { 1, 0, 1, 3, 12, 30, },
+ { 0, 0, 1, 3, 13, 8, },
+ { 2, 0, 1, 3, 13, 30, },
+ { 1, 0, 1, 3, 13, 30, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 24, },
+ { 2, 0, 1, 6, 3, 28, },
+ { 1, 0, 1, 6, 3, 28, },
+ { 0, 0, 1, 6, 4, 28, },
+ { 2, 0, 1, 6, 4, 28, },
+ { 1, 0, 1, 6, 4, 28, },
+ { 0, 0, 1, 6, 5, 28, },
+ { 2, 0, 1, 6, 5, 28, },
+ { 1, 0, 1, 6, 5, 28, },
+ { 0, 0, 1, 6, 6, 28, },
+ { 2, 0, 1, 6, 6, 28, },
+ { 1, 0, 1, 6, 6, 28, },
+ { 0, 0, 1, 6, 7, 28, },
+ { 2, 0, 1, 6, 7, 28, },
+ { 1, 0, 1, 6, 7, 28, },
+ { 0, 0, 1, 6, 8, 28, },
+ { 2, 0, 1, 6, 8, 28, },
+ { 1, 0, 1, 6, 8, 28, },
+ { 0, 0, 1, 6, 9, 28, },
+ { 2, 0, 1, 6, 9, 28, },
+ { 1, 0, 1, 6, 9, 28, },
+ { 0, 0, 1, 6, 10, 28, },
+ { 2, 0, 1, 6, 10, 28, },
+ { 1, 0, 1, 6, 10, 28, },
+ { 0, 0, 1, 6, 11, 24, },
+ { 2, 0, 1, 6, 11, 28, },
+ { 1, 0, 1, 6, 11, 28, },
+ { 0, 0, 1, 6, 12, 14, },
+ { 2, 0, 1, 6, 12, 28, },
+ { 1, 0, 1, 6, 12, 28, },
+ { 0, 0, 1, 6, 13, 8, },
+ { 2, 0, 1, 6, 13, 28, },
+ { 1, 0, 1, 6, 13, 28, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 22, },
+ { 2, 0, 1, 7, 3, 26, },
+ { 1, 0, 1, 7, 3, 26, },
+ { 0, 0, 1, 7, 4, 26, },
+ { 2, 0, 1, 7, 4, 26, },
+ { 1, 0, 1, 7, 4, 26, },
+ { 0, 0, 1, 7, 5, 26, },
+ { 2, 0, 1, 7, 5, 26, },
+ { 1, 0, 1, 7, 5, 26, },
+ { 0, 0, 1, 7, 6, 26, },
+ { 2, 0, 1, 7, 6, 26, },
+ { 1, 0, 1, 7, 6, 26, },
+ { 0, 0, 1, 7, 7, 26, },
+ { 2, 0, 1, 7, 7, 26, },
+ { 1, 0, 1, 7, 7, 26, },
+ { 0, 0, 1, 7, 8, 26, },
+ { 2, 0, 1, 7, 8, 26, },
+ { 1, 0, 1, 7, 8, 26, },
+ { 0, 0, 1, 7, 9, 26, },
+ { 2, 0, 1, 7, 9, 26, },
+ { 1, 0, 1, 7, 9, 26, },
+ { 0, 0, 1, 7, 10, 26, },
+ { 2, 0, 1, 7, 10, 26, },
+ { 1, 0, 1, 7, 10, 26, },
+ { 0, 0, 1, 7, 11, 22, },
+ { 2, 0, 1, 7, 11, 26, },
+ { 1, 0, 1, 7, 11, 26, },
+ { 0, 0, 1, 7, 12, 14, },
+ { 2, 0, 1, 7, 12, 26, },
+ { 1, 0, 1, 7, 12, 26, },
+ { 0, 0, 1, 7, 13, 8, },
+ { 2, 0, 1, 7, 13, 26, },
+ { 1, 0, 1, 7, 13, 26, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 28, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 32, },
+ { 0, 1, 0, 1, 40, 32, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 32, },
+ { 0, 1, 0, 1, 44, 32, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 32, },
+ { 0, 1, 0, 1, 48, 32, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 32, },
+ { 0, 1, 0, 1, 52, 32, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 32, },
+ { 0, 1, 0, 1, 56, 32, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 32, },
+ { 0, 1, 0, 1, 60, 32, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 32, },
+ { 0, 1, 0, 1, 64, 30, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 32, },
+ { 0, 1, 0, 1, 100, 28, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 32, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 32, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 32, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 32, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 32, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 32, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 32, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 32, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 32, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 28, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 149, 28, },
+ { 2, 1, 0, 1, 149, 32, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 32, },
+ { 2, 1, 0, 1, 153, 32, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 32, },
+ { 2, 1, 0, 1, 157, 32, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 32, },
+ { 2, 1, 0, 1, 161, 32, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 32, },
+ { 2, 1, 0, 1, 165, 32, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 28, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 32, },
+ { 0, 1, 0, 2, 40, 32, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 32, },
+ { 0, 1, 0, 2, 44, 32, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 32, },
+ { 0, 1, 0, 2, 48, 32, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 32, },
+ { 0, 1, 0, 2, 52, 32, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 32, },
+ { 0, 1, 0, 2, 56, 32, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 32, },
+ { 0, 1, 0, 2, 60, 32, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 32, },
+ { 0, 1, 0, 2, 64, 30, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 32, },
+ { 0, 1, 0, 2, 100, 28, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 32, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 32, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 32, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 32, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 32, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 32, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 32, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 32, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 32, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 28, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 149, 28, },
+ { 2, 1, 0, 2, 149, 32, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 32, },
+ { 2, 1, 0, 2, 153, 32, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 32, },
+ { 2, 1, 0, 2, 157, 32, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 32, },
+ { 2, 1, 0, 2, 161, 32, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 32, },
+ { 2, 1, 0, 2, 165, 32, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 26, },
+ { 2, 1, 0, 3, 36, 30, },
+ { 1, 1, 0, 3, 36, 30, },
+ { 0, 1, 0, 3, 40, 30, },
+ { 2, 1, 0, 3, 40, 30, },
+ { 1, 1, 0, 3, 40, 30, },
+ { 0, 1, 0, 3, 44, 30, },
+ { 2, 1, 0, 3, 44, 30, },
+ { 1, 1, 0, 3, 44, 30, },
+ { 0, 1, 0, 3, 48, 30, },
+ { 2, 1, 0, 3, 48, 30, },
+ { 1, 1, 0, 3, 48, 30, },
+ { 0, 1, 0, 3, 52, 30, },
+ { 2, 1, 0, 3, 52, 30, },
+ { 1, 1, 0, 3, 52, 30, },
+ { 0, 1, 0, 3, 56, 30, },
+ { 2, 1, 0, 3, 56, 30, },
+ { 1, 1, 0, 3, 56, 30, },
+ { 0, 1, 0, 3, 60, 30, },
+ { 2, 1, 0, 3, 60, 30, },
+ { 1, 1, 0, 3, 60, 30, },
+ { 0, 1, 0, 3, 64, 28, },
+ { 2, 1, 0, 3, 64, 30, },
+ { 1, 1, 0, 3, 64, 30, },
+ { 0, 1, 0, 3, 100, 28, },
+ { 2, 1, 0, 3, 100, 30, },
+ { 1, 1, 0, 3, 100, 30, },
+ { 0, 1, 0, 3, 104, 30, },
+ { 2, 1, 0, 3, 104, 30, },
+ { 1, 1, 0, 3, 104, 30, },
+ { 0, 1, 0, 3, 108, 30, },
+ { 2, 1, 0, 3, 108, 30, },
+ { 1, 1, 0, 3, 108, 30, },
+ { 0, 1, 0, 3, 112, 30, },
+ { 2, 1, 0, 3, 112, 30, },
+ { 1, 1, 0, 3, 112, 30, },
+ { 0, 1, 0, 3, 116, 30, },
+ { 2, 1, 0, 3, 116, 30, },
+ { 1, 1, 0, 3, 116, 30, },
+ { 0, 1, 0, 3, 120, 30, },
+ { 2, 1, 0, 3, 120, 30, },
+ { 1, 1, 0, 3, 120, 30, },
+ { 0, 1, 0, 3, 124, 30, },
+ { 2, 1, 0, 3, 124, 30, },
+ { 1, 1, 0, 3, 124, 30, },
+ { 0, 1, 0, 3, 128, 30, },
+ { 2, 1, 0, 3, 128, 30, },
+ { 1, 1, 0, 3, 128, 30, },
+ { 0, 1, 0, 3, 132, 30, },
+ { 2, 1, 0, 3, 132, 30, },
+ { 1, 1, 0, 3, 132, 30, },
+ { 0, 1, 0, 3, 136, 30, },
+ { 2, 1, 0, 3, 136, 30, },
+ { 1, 1, 0, 3, 136, 30, },
+ { 0, 1, 0, 3, 140, 26, },
+ { 2, 1, 0, 3, 140, 30, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 149, 26, },
+ { 2, 1, 0, 3, 149, 30, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 30, },
+ { 2, 1, 0, 3, 153, 30, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 30, },
+ { 2, 1, 0, 3, 157, 30, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 30, },
+ { 2, 1, 0, 3, 161, 30, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 30, },
+ { 2, 1, 0, 3, 165, 30, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 24, },
+ { 2, 1, 0, 6, 36, 28, },
+ { 1, 1, 0, 6, 36, 28, },
+ { 0, 1, 0, 6, 40, 28, },
+ { 2, 1, 0, 6, 40, 28, },
+ { 1, 1, 0, 6, 40, 28, },
+ { 0, 1, 0, 6, 44, 28, },
+ { 2, 1, 0, 6, 44, 28, },
+ { 1, 1, 0, 6, 44, 28, },
+ { 0, 1, 0, 6, 48, 28, },
+ { 2, 1, 0, 6, 48, 28, },
+ { 1, 1, 0, 6, 48, 28, },
+ { 0, 1, 0, 6, 52, 28, },
+ { 2, 1, 0, 6, 52, 28, },
+ { 1, 1, 0, 6, 52, 28, },
+ { 0, 1, 0, 6, 56, 28, },
+ { 2, 1, 0, 6, 56, 28, },
+ { 1, 1, 0, 6, 56, 28, },
+ { 0, 1, 0, 6, 60, 28, },
+ { 2, 1, 0, 6, 60, 28, },
+ { 1, 1, 0, 6, 60, 28, },
+ { 0, 1, 0, 6, 64, 26, },
+ { 2, 1, 0, 6, 64, 28, },
+ { 1, 1, 0, 6, 64, 28, },
+ { 0, 1, 0, 6, 100, 24, },
+ { 2, 1, 0, 6, 100, 28, },
+ { 1, 1, 0, 6, 100, 28, },
+ { 0, 1, 0, 6, 104, 28, },
+ { 2, 1, 0, 6, 104, 28, },
+ { 1, 1, 0, 6, 104, 28, },
+ { 0, 1, 0, 6, 108, 28, },
+ { 2, 1, 0, 6, 108, 28, },
+ { 1, 1, 0, 6, 108, 28, },
+ { 0, 1, 0, 6, 112, 28, },
+ { 2, 1, 0, 6, 112, 28, },
+ { 1, 1, 0, 6, 112, 28, },
+ { 0, 1, 0, 6, 116, 28, },
+ { 2, 1, 0, 6, 116, 28, },
+ { 1, 1, 0, 6, 116, 28, },
+ { 0, 1, 0, 6, 120, 28, },
+ { 2, 1, 0, 6, 120, 28, },
+ { 1, 1, 0, 6, 120, 28, },
+ { 0, 1, 0, 6, 124, 28, },
+ { 2, 1, 0, 6, 124, 28, },
+ { 1, 1, 0, 6, 124, 28, },
+ { 0, 1, 0, 6, 128, 28, },
+ { 2, 1, 0, 6, 128, 28, },
+ { 1, 1, 0, 6, 128, 28, },
+ { 0, 1, 0, 6, 132, 28, },
+ { 2, 1, 0, 6, 132, 28, },
+ { 1, 1, 0, 6, 132, 28, },
+ { 0, 1, 0, 6, 136, 28, },
+ { 2, 1, 0, 6, 136, 28, },
+ { 1, 1, 0, 6, 136, 28, },
+ { 0, 1, 0, 6, 140, 24, },
+ { 2, 1, 0, 6, 140, 28, },
+ { 1, 1, 0, 6, 140, 28, },
+ { 0, 1, 0, 6, 149, 24, },
+ { 2, 1, 0, 6, 149, 28, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 28, },
+ { 2, 1, 0, 6, 153, 28, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 28, },
+ { 2, 1, 0, 6, 157, 28, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 28, },
+ { 2, 1, 0, 6, 161, 28, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 28, },
+ { 2, 1, 0, 6, 165, 28, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 22, },
+ { 2, 1, 0, 7, 36, 26, },
+ { 1, 1, 0, 7, 36, 26, },
+ { 0, 1, 0, 7, 40, 26, },
+ { 2, 1, 0, 7, 40, 26, },
+ { 1, 1, 0, 7, 40, 26, },
+ { 0, 1, 0, 7, 44, 26, },
+ { 2, 1, 0, 7, 44, 26, },
+ { 1, 1, 0, 7, 44, 26, },
+ { 0, 1, 0, 7, 48, 26, },
+ { 2, 1, 0, 7, 48, 26, },
+ { 1, 1, 0, 7, 48, 26, },
+ { 0, 1, 0, 7, 52, 26, },
+ { 2, 1, 0, 7, 52, 26, },
+ { 1, 1, 0, 7, 52, 26, },
+ { 0, 1, 0, 7, 56, 26, },
+ { 2, 1, 0, 7, 56, 26, },
+ { 1, 1, 0, 7, 56, 26, },
+ { 0, 1, 0, 7, 60, 26, },
+ { 2, 1, 0, 7, 60, 26, },
+ { 1, 1, 0, 7, 60, 26, },
+ { 0, 1, 0, 7, 64, 24, },
+ { 2, 1, 0, 7, 64, 26, },
+ { 1, 1, 0, 7, 64, 26, },
+ { 0, 1, 0, 7, 100, 22, },
+ { 2, 1, 0, 7, 100, 26, },
+ { 1, 1, 0, 7, 100, 26, },
+ { 0, 1, 0, 7, 104, 26, },
+ { 2, 1, 0, 7, 104, 26, },
+ { 1, 1, 0, 7, 104, 26, },
+ { 0, 1, 0, 7, 108, 26, },
+ { 2, 1, 0, 7, 108, 26, },
+ { 1, 1, 0, 7, 108, 26, },
+ { 0, 1, 0, 7, 112, 26, },
+ { 2, 1, 0, 7, 112, 26, },
+ { 1, 1, 0, 7, 112, 26, },
+ { 0, 1, 0, 7, 116, 26, },
+ { 2, 1, 0, 7, 116, 26, },
+ { 1, 1, 0, 7, 116, 26, },
+ { 0, 1, 0, 7, 120, 26, },
+ { 2, 1, 0, 7, 120, 26, },
+ { 1, 1, 0, 7, 120, 26, },
+ { 0, 1, 0, 7, 124, 26, },
+ { 2, 1, 0, 7, 124, 26, },
+ { 1, 1, 0, 7, 124, 26, },
+ { 0, 1, 0, 7, 128, 26, },
+ { 2, 1, 0, 7, 128, 26, },
+ { 1, 1, 0, 7, 128, 26, },
+ { 0, 1, 0, 7, 132, 26, },
+ { 2, 1, 0, 7, 132, 26, },
+ { 1, 1, 0, 7, 132, 26, },
+ { 0, 1, 0, 7, 136, 26, },
+ { 2, 1, 0, 7, 136, 26, },
+ { 1, 1, 0, 7, 136, 26, },
+ { 0, 1, 0, 7, 140, 22, },
+ { 2, 1, 0, 7, 140, 26, },
+ { 1, 1, 0, 7, 140, 26, },
+ { 0, 1, 0, 7, 149, 22, },
+ { 2, 1, 0, 7, 149, 26, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 26, },
+ { 2, 1, 0, 7, 153, 26, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 26, },
+ { 2, 1, 0, 7, 157, 26, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 26, },
+ { 2, 1, 0, 7, 161, 26, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 26, },
+ { 2, 1, 0, 7, 165, 26, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 28, },
+ { 2, 1, 1, 2, 38, 32, },
+ { 1, 1, 1, 2, 38, 32, },
+ { 0, 1, 1, 2, 46, 32, },
+ { 2, 1, 1, 2, 46, 32, },
+ { 1, 1, 1, 2, 46, 32, },
+ { 0, 1, 1, 2, 54, 32, },
+ { 2, 1, 1, 2, 54, 32, },
+ { 1, 1, 1, 2, 54, 32, },
+ { 0, 1, 1, 2, 62, 28, },
+ { 2, 1, 1, 2, 62, 32, },
+ { 1, 1, 1, 2, 62, 32, },
+ { 0, 1, 1, 2, 102, 28, },
+ { 2, 1, 1, 2, 102, 32, },
+ { 1, 1, 1, 2, 102, 32, },
+ { 0, 1, 1, 2, 110, 32, },
+ { 2, 1, 1, 2, 110, 32, },
+ { 1, 1, 1, 2, 110, 32, },
+ { 0, 1, 1, 2, 118, 32, },
+ { 2, 1, 1, 2, 118, 32, },
+ { 1, 1, 1, 2, 118, 32, },
+ { 0, 1, 1, 2, 126, 32, },
+ { 2, 1, 1, 2, 126, 32, },
+ { 1, 1, 1, 2, 126, 32, },
+ { 0, 1, 1, 2, 134, 30, },
+ { 2, 1, 1, 2, 134, 32, },
+ { 1, 1, 1, 2, 134, 32, },
+ { 0, 1, 1, 2, 151, 28, },
+ { 2, 1, 1, 2, 151, 32, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 32, },
+ { 2, 1, 1, 2, 159, 32, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 26, },
+ { 2, 1, 1, 3, 38, 30, },
+ { 1, 1, 1, 3, 38, 30, },
+ { 0, 1, 1, 3, 46, 30, },
+ { 2, 1, 1, 3, 46, 30, },
+ { 1, 1, 1, 3, 46, 30, },
+ { 0, 1, 1, 3, 54, 30, },
+ { 2, 1, 1, 3, 54, 30, },
+ { 1, 1, 1, 3, 54, 30, },
+ { 0, 1, 1, 3, 62, 26, },
+ { 2, 1, 1, 3, 62, 30, },
+ { 1, 1, 1, 3, 62, 30, },
+ { 0, 1, 1, 3, 102, 26, },
+ { 2, 1, 1, 3, 102, 30, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 30, },
+ { 2, 1, 1, 3, 110, 30, },
+ { 1, 1, 1, 3, 110, 30, },
+ { 0, 1, 1, 3, 118, 30, },
+ { 2, 1, 1, 3, 118, 30, },
+ { 1, 1, 1, 3, 118, 30, },
+ { 0, 1, 1, 3, 126, 30, },
+ { 2, 1, 1, 3, 126, 30, },
+ { 1, 1, 1, 3, 126, 30, },
+ { 0, 1, 1, 3, 134, 28, },
+ { 2, 1, 1, 3, 134, 30, },
+ { 1, 1, 1, 3, 134, 30, },
+ { 0, 1, 1, 3, 151, 26, },
+ { 2, 1, 1, 3, 151, 30, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 30, },
+ { 2, 1, 1, 3, 159, 30, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 20, },
+ { 2, 1, 1, 6, 38, 28, },
+ { 1, 1, 1, 6, 38, 28, },
+ { 0, 1, 1, 6, 46, 28, },
+ { 2, 1, 1, 6, 46, 28, },
+ { 1, 1, 1, 6, 46, 28, },
+ { 0, 1, 1, 6, 54, 28, },
+ { 2, 1, 1, 6, 54, 28, },
+ { 1, 1, 1, 6, 54, 28, },
+ { 0, 1, 1, 6, 62, 20, },
+ { 2, 1, 1, 6, 62, 28, },
+ { 1, 1, 1, 6, 62, 28, },
+ { 0, 1, 1, 6, 102, 22, },
+ { 2, 1, 1, 6, 102, 28, },
+ { 1, 1, 1, 6, 102, 28, },
+ { 0, 1, 1, 6, 110, 28, },
+ { 2, 1, 1, 6, 110, 28, },
+ { 1, 1, 1, 6, 110, 28, },
+ { 0, 1, 1, 6, 118, 28, },
+ { 2, 1, 1, 6, 118, 28, },
+ { 1, 1, 1, 6, 118, 28, },
+ { 0, 1, 1, 6, 126, 28, },
+ { 2, 1, 1, 6, 126, 28, },
+ { 1, 1, 1, 6, 126, 28, },
+ { 0, 1, 1, 6, 134, 26, },
+ { 2, 1, 1, 6, 134, 28, },
+ { 1, 1, 1, 6, 134, 28, },
+ { 0, 1, 1, 6, 151, 22, },
+ { 2, 1, 1, 6, 151, 28, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 28, },
+ { 2, 1, 1, 6, 159, 28, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 18, },
+ { 2, 1, 1, 7, 38, 26, },
+ { 1, 1, 1, 7, 38, 26, },
+ { 0, 1, 1, 7, 46, 26, },
+ { 2, 1, 1, 7, 46, 26, },
+ { 1, 1, 1, 7, 46, 26, },
+ { 0, 1, 1, 7, 54, 26, },
+ { 2, 1, 1, 7, 54, 26, },
+ { 1, 1, 1, 7, 54, 26, },
+ { 0, 1, 1, 7, 62, 18, },
+ { 2, 1, 1, 7, 62, 26, },
+ { 1, 1, 1, 7, 62, 26, },
+ { 0, 1, 1, 7, 102, 20, },
+ { 2, 1, 1, 7, 102, 26, },
+ { 1, 1, 1, 7, 102, 26, },
+ { 0, 1, 1, 7, 110, 26, },
+ { 2, 1, 1, 7, 110, 26, },
+ { 1, 1, 1, 7, 110, 26, },
+ { 0, 1, 1, 7, 118, 26, },
+ { 2, 1, 1, 7, 118, 26, },
+ { 1, 1, 1, 7, 118, 26, },
+ { 0, 1, 1, 7, 126, 26, },
+ { 2, 1, 1, 7, 126, 26, },
+ { 1, 1, 1, 7, 126, 26, },
+ { 0, 1, 1, 7, 134, 24, },
+ { 2, 1, 1, 7, 134, 26, },
+ { 1, 1, 1, 7, 134, 26, },
+ { 0, 1, 1, 7, 151, 20, },
+ { 2, 1, 1, 7, 151, 26, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 26, },
+ { 2, 1, 1, 7, 159, 26, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 24, },
+ { 2, 1, 2, 4, 42, 32, },
+ { 1, 1, 2, 4, 42, 32, },
+ { 0, 1, 2, 4, 58, 24, },
+ { 2, 1, 2, 4, 58, 32, },
+ { 1, 1, 2, 4, 58, 32, },
+ { 0, 1, 2, 4, 106, 24, },
+ { 2, 1, 2, 4, 106, 32, },
+ { 1, 1, 2, 4, 106, 32, },
+ { 0, 1, 2, 4, 122, 32, },
+ { 2, 1, 2, 4, 122, 32, },
+ { 1, 1, 2, 4, 122, 32, },
+ { 0, 1, 2, 4, 155, 26, },
+ { 2, 1, 2, 4, 155, 32, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 22, },
+ { 2, 1, 2, 5, 42, 30, },
+ { 1, 1, 2, 5, 42, 30, },
+ { 0, 1, 2, 5, 58, 22, },
+ { 2, 1, 2, 5, 58, 30, },
+ { 1, 1, 2, 5, 58, 30, },
+ { 0, 1, 2, 5, 106, 22, },
+ { 2, 1, 2, 5, 106, 30, },
+ { 1, 1, 2, 5, 106, 30, },
+ { 0, 1, 2, 5, 122, 30, },
+ { 2, 1, 2, 5, 122, 30, },
+ { 1, 1, 2, 5, 122, 30, },
+ { 0, 1, 2, 5, 155, 24, },
+ { 2, 1, 2, 5, 155, 30, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 20, },
+ { 2, 1, 2, 8, 42, 28, },
+ { 1, 1, 2, 8, 42, 28, },
+ { 0, 1, 2, 8, 58, 20, },
+ { 2, 1, 2, 8, 58, 28, },
+ { 1, 1, 2, 8, 58, 28, },
+ { 0, 1, 2, 8, 106, 20, },
+ { 2, 1, 2, 8, 106, 28, },
+ { 1, 1, 2, 8, 106, 28, },
+ { 0, 1, 2, 8, 122, 28, },
+ { 2, 1, 2, 8, 122, 28, },
+ { 1, 1, 2, 8, 122, 28, },
+ { 0, 1, 2, 8, 155, 20, },
+ { 2, 1, 2, 8, 155, 28, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 18, },
+ { 2, 1, 2, 9, 42, 26, },
+ { 1, 1, 2, 9, 42, 26, },
+ { 0, 1, 2, 9, 58, 18, },
+ { 2, 1, 2, 9, 58, 26, },
+ { 1, 1, 2, 9, 58, 26, },
+ { 0, 1, 2, 9, 106, 18, },
+ { 2, 1, 2, 9, 106, 26, },
+ { 1, 1, 2, 9, 106, 26, },
+ { 0, 1, 2, 9, 122, 26, },
+ { 2, 1, 2, 9, 122, 26, },
+ { 1, 1, 2, 9, 122, 26, },
+ { 0, 1, 2, 9, 155, 18, },
+ { 2, 1, 2, 9, 155, 26, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type1);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type2[] = {
+ { 0, 0, 0, 0, 1, 42, },
+ { 2, 0, 0, 0, 1, 42, },
+ { 1, 0, 0, 0, 1, 42, },
+ { 0, 0, 0, 0, 2, 50, },
+ { 2, 0, 0, 0, 2, 42, },
+ { 1, 0, 0, 0, 2, 42, },
+ { 0, 0, 0, 0, 3, 50, },
+ { 2, 0, 0, 0, 3, 42, },
+ { 1, 0, 0, 0, 3, 42, },
+ { 0, 0, 0, 0, 4, 50, },
+ { 2, 0, 0, 0, 4, 42, },
+ { 1, 0, 0, 0, 4, 42, },
+ { 0, 0, 0, 0, 5, 50, },
+ { 2, 0, 0, 0, 5, 42, },
+ { 1, 0, 0, 0, 5, 42, },
+ { 0, 0, 0, 0, 6, 50, },
+ { 2, 0, 0, 0, 6, 42, },
+ { 1, 0, 0, 0, 6, 42, },
+ { 0, 0, 0, 0, 7, 50, },
+ { 2, 0, 0, 0, 7, 42, },
+ { 1, 0, 0, 0, 7, 42, },
+ { 0, 0, 0, 0, 8, 50, },
+ { 2, 0, 0, 0, 8, 42, },
+ { 1, 0, 0, 0, 8, 42, },
+ { 0, 0, 0, 0, 9, 50, },
+ { 2, 0, 0, 0, 9, 42, },
+ { 1, 0, 0, 0, 9, 42, },
+ { 0, 0, 0, 0, 10, 50, },
+ { 2, 0, 0, 0, 10, 42, },
+ { 1, 0, 0, 0, 10, 42, },
+ { 0, 0, 0, 0, 11, 44, },
+ { 2, 0, 0, 0, 11, 42, },
+ { 1, 0, 0, 0, 11, 42, },
+ { 0, 0, 0, 0, 12, 63, },
+ { 2, 0, 0, 0, 12, 42, },
+ { 1, 0, 0, 0, 12, 42, },
+ { 0, 0, 0, 0, 13, 63, },
+ { 2, 0, 0, 0, 13, 42, },
+ { 1, 0, 0, 0, 13, 42, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 42, },
+ { 0, 0, 0, 1, 1, 32, },
+ { 2, 0, 0, 1, 1, 42, },
+ { 1, 0, 0, 1, 1, 42, },
+ { 0, 0, 0, 1, 2, 42, },
+ { 2, 0, 0, 1, 2, 42, },
+ { 1, 0, 0, 1, 2, 42, },
+ { 0, 0, 0, 1, 3, 42, },
+ { 2, 0, 0, 1, 3, 42, },
+ { 1, 0, 0, 1, 3, 42, },
+ { 0, 0, 0, 1, 4, 42, },
+ { 2, 0, 0, 1, 4, 42, },
+ { 1, 0, 0, 1, 4, 42, },
+ { 0, 0, 0, 1, 5, 42, },
+ { 2, 0, 0, 1, 5, 42, },
+ { 1, 0, 0, 1, 5, 42, },
+ { 0, 0, 0, 1, 6, 42, },
+ { 2, 0, 0, 1, 6, 42, },
+ { 1, 0, 0, 1, 6, 42, },
+ { 0, 0, 0, 1, 7, 42, },
+ { 2, 0, 0, 1, 7, 42, },
+ { 1, 0, 0, 1, 7, 42, },
+ { 0, 0, 0, 1, 8, 42, },
+ { 2, 0, 0, 1, 8, 42, },
+ { 1, 0, 0, 1, 8, 42, },
+ { 0, 0, 0, 1, 9, 42, },
+ { 2, 0, 0, 1, 9, 42, },
+ { 1, 0, 0, 1, 9, 42, },
+ { 0, 0, 0, 1, 10, 42, },
+ { 2, 0, 0, 1, 10, 42, },
+ { 1, 0, 0, 1, 10, 42, },
+ { 0, 0, 0, 1, 11, 32, },
+ { 2, 0, 0, 1, 11, 42, },
+ { 1, 0, 0, 1, 11, 42, },
+ { 0, 0, 0, 1, 12, 63, },
+ { 2, 0, 0, 1, 12, 42, },
+ { 1, 0, 0, 1, 12, 42, },
+ { 0, 0, 0, 1, 13, 63, },
+ { 2, 0, 0, 1, 13, 42, },
+ { 1, 0, 0, 1, 13, 42, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 32, },
+ { 2, 0, 0, 2, 1, 42, },
+ { 1, 0, 0, 2, 1, 42, },
+ { 0, 0, 0, 2, 2, 40, },
+ { 2, 0, 0, 2, 2, 42, },
+ { 1, 0, 0, 2, 2, 42, },
+ { 0, 0, 0, 2, 3, 40, },
+ { 2, 0, 0, 2, 3, 42, },
+ { 1, 0, 0, 2, 3, 42, },
+ { 0, 0, 0, 2, 4, 40, },
+ { 2, 0, 0, 2, 4, 42, },
+ { 1, 0, 0, 2, 4, 42, },
+ { 0, 0, 0, 2, 5, 40, },
+ { 2, 0, 0, 2, 5, 42, },
+ { 1, 0, 0, 2, 5, 42, },
+ { 0, 0, 0, 2, 6, 40, },
+ { 2, 0, 0, 2, 6, 42, },
+ { 1, 0, 0, 2, 6, 42, },
+ { 0, 0, 0, 2, 7, 40, },
+ { 2, 0, 0, 2, 7, 42, },
+ { 1, 0, 0, 2, 7, 42, },
+ { 0, 0, 0, 2, 8, 40, },
+ { 2, 0, 0, 2, 8, 42, },
+ { 1, 0, 0, 2, 8, 42, },
+ { 0, 0, 0, 2, 9, 40, },
+ { 2, 0, 0, 2, 9, 42, },
+ { 1, 0, 0, 2, 9, 42, },
+ { 0, 0, 0, 2, 10, 40, },
+ { 2, 0, 0, 2, 10, 42, },
+ { 1, 0, 0, 2, 10, 42, },
+ { 0, 0, 0, 2, 11, 28, },
+ { 2, 0, 0, 2, 11, 42, },
+ { 1, 0, 0, 2, 11, 42, },
+ { 0, 0, 0, 2, 12, 63, },
+ { 2, 0, 0, 2, 12, 42, },
+ { 1, 0, 0, 2, 12, 42, },
+ { 0, 0, 0, 2, 13, 63, },
+ { 2, 0, 0, 2, 13, 42, },
+ { 1, 0, 0, 2, 13, 42, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 32, },
+ { 2, 0, 0, 3, 1, 40, },
+ { 1, 0, 0, 3, 1, 40, },
+ { 0, 0, 0, 3, 2, 40, },
+ { 2, 0, 0, 3, 2, 40, },
+ { 1, 0, 0, 3, 2, 40, },
+ { 0, 0, 0, 3, 3, 40, },
+ { 2, 0, 0, 3, 3, 40, },
+ { 1, 0, 0, 3, 3, 40, },
+ { 0, 0, 0, 3, 4, 40, },
+ { 2, 0, 0, 3, 4, 40, },
+ { 1, 0, 0, 3, 4, 40, },
+ { 0, 0, 0, 3, 5, 40, },
+ { 2, 0, 0, 3, 5, 40, },
+ { 1, 0, 0, 3, 5, 40, },
+ { 0, 0, 0, 3, 6, 40, },
+ { 2, 0, 0, 3, 6, 40, },
+ { 1, 0, 0, 3, 6, 40, },
+ { 0, 0, 0, 3, 7, 40, },
+ { 2, 0, 0, 3, 7, 40, },
+ { 1, 0, 0, 3, 7, 40, },
+ { 0, 0, 0, 3, 8, 40, },
+ { 2, 0, 0, 3, 8, 40, },
+ { 1, 0, 0, 3, 8, 40, },
+ { 0, 0, 0, 3, 9, 40, },
+ { 2, 0, 0, 3, 9, 40, },
+ { 1, 0, 0, 3, 9, 40, },
+ { 0, 0, 0, 3, 10, 40, },
+ { 2, 0, 0, 3, 10, 40, },
+ { 1, 0, 0, 3, 10, 40, },
+ { 0, 0, 0, 3, 11, 28, },
+ { 2, 0, 0, 3, 11, 40, },
+ { 1, 0, 0, 3, 11, 40, },
+ { 0, 0, 0, 3, 12, 63, },
+ { 2, 0, 0, 3, 12, 40, },
+ { 1, 0, 0, 3, 12, 40, },
+ { 0, 0, 0, 3, 13, 63, },
+ { 2, 0, 0, 3, 13, 40, },
+ { 1, 0, 0, 3, 13, 40, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 32, },
+ { 2, 0, 0, 6, 1, 40, },
+ { 1, 0, 0, 6, 1, 40, },
+ { 0, 0, 0, 6, 2, 40, },
+ { 2, 0, 0, 6, 2, 40, },
+ { 1, 0, 0, 6, 2, 40, },
+ { 0, 0, 0, 6, 3, 40, },
+ { 2, 0, 0, 6, 3, 40, },
+ { 1, 0, 0, 6, 3, 40, },
+ { 0, 0, 0, 6, 4, 40, },
+ { 2, 0, 0, 6, 4, 40, },
+ { 1, 0, 0, 6, 4, 40, },
+ { 0, 0, 0, 6, 5, 40, },
+ { 2, 0, 0, 6, 5, 40, },
+ { 1, 0, 0, 6, 5, 40, },
+ { 0, 0, 0, 6, 6, 40, },
+ { 2, 0, 0, 6, 6, 40, },
+ { 1, 0, 0, 6, 6, 40, },
+ { 0, 0, 0, 6, 7, 40, },
+ { 2, 0, 0, 6, 7, 40, },
+ { 1, 0, 0, 6, 7, 40, },
+ { 0, 0, 0, 6, 8, 40, },
+ { 2, 0, 0, 6, 8, 40, },
+ { 1, 0, 0, 6, 8, 40, },
+ { 0, 0, 0, 6, 9, 40, },
+ { 2, 0, 0, 6, 9, 40, },
+ { 1, 0, 0, 6, 9, 40, },
+ { 0, 0, 0, 6, 10, 40, },
+ { 2, 0, 0, 6, 10, 40, },
+ { 1, 0, 0, 6, 10, 40, },
+ { 0, 0, 0, 6, 11, 28, },
+ { 2, 0, 0, 6, 11, 40, },
+ { 1, 0, 0, 6, 11, 40, },
+ { 0, 0, 0, 6, 12, 63, },
+ { 2, 0, 0, 6, 12, 40, },
+ { 1, 0, 0, 6, 12, 40, },
+ { 0, 0, 0, 6, 13, 63, },
+ { 2, 0, 0, 6, 13, 40, },
+ { 1, 0, 0, 6, 13, 40, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 32, },
+ { 2, 0, 0, 7, 1, 40, },
+ { 1, 0, 0, 7, 1, 40, },
+ { 0, 0, 0, 7, 2, 40, },
+ { 2, 0, 0, 7, 2, 40, },
+ { 1, 0, 0, 7, 2, 40, },
+ { 0, 0, 0, 7, 3, 40, },
+ { 2, 0, 0, 7, 3, 40, },
+ { 1, 0, 0, 7, 3, 40, },
+ { 0, 0, 0, 7, 4, 40, },
+ { 2, 0, 0, 7, 4, 40, },
+ { 1, 0, 0, 7, 4, 40, },
+ { 0, 0, 0, 7, 5, 40, },
+ { 2, 0, 0, 7, 5, 40, },
+ { 1, 0, 0, 7, 5, 40, },
+ { 0, 0, 0, 7, 6, 40, },
+ { 2, 0, 0, 7, 6, 40, },
+ { 1, 0, 0, 7, 6, 40, },
+ { 0, 0, 0, 7, 7, 40, },
+ { 2, 0, 0, 7, 7, 40, },
+ { 1, 0, 0, 7, 7, 40, },
+ { 0, 0, 0, 7, 8, 40, },
+ { 2, 0, 0, 7, 8, 40, },
+ { 1, 0, 0, 7, 8, 40, },
+ { 0, 0, 0, 7, 9, 40, },
+ { 2, 0, 0, 7, 9, 40, },
+ { 1, 0, 0, 7, 9, 40, },
+ { 0, 0, 0, 7, 10, 40, },
+ { 2, 0, 0, 7, 10, 40, },
+ { 1, 0, 0, 7, 10, 40, },
+ { 0, 0, 0, 7, 11, 28, },
+ { 2, 0, 0, 7, 11, 40, },
+ { 1, 0, 0, 7, 11, 40, },
+ { 0, 0, 0, 7, 12, 63, },
+ { 2, 0, 0, 7, 12, 40, },
+ { 1, 0, 0, 7, 12, 40, },
+ { 0, 0, 0, 7, 13, 63, },
+ { 2, 0, 0, 7, 13, 40, },
+ { 1, 0, 0, 7, 13, 40, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 30, },
+ { 2, 0, 1, 2, 3, 34, },
+ { 1, 0, 1, 2, 3, 34, },
+ { 0, 0, 1, 2, 4, 34, },
+ { 2, 0, 1, 2, 4, 34, },
+ { 1, 0, 1, 2, 4, 34, },
+ { 0, 0, 1, 2, 5, 34, },
+ { 2, 0, 1, 2, 5, 34, },
+ { 1, 0, 1, 2, 5, 34, },
+ { 0, 0, 1, 2, 6, 34, },
+ { 2, 0, 1, 2, 6, 34, },
+ { 1, 0, 1, 2, 6, 34, },
+ { 0, 0, 1, 2, 7, 34, },
+ { 2, 0, 1, 2, 7, 34, },
+ { 1, 0, 1, 2, 7, 34, },
+ { 0, 0, 1, 2, 8, 34, },
+ { 2, 0, 1, 2, 8, 34, },
+ { 1, 0, 1, 2, 8, 34, },
+ { 0, 0, 1, 2, 9, 34, },
+ { 2, 0, 1, 2, 9, 34, },
+ { 1, 0, 1, 2, 9, 34, },
+ { 0, 0, 1, 2, 10, 34, },
+ { 2, 0, 1, 2, 10, 34, },
+ { 1, 0, 1, 2, 10, 34, },
+ { 0, 0, 1, 2, 11, 28, },
+ { 2, 0, 1, 2, 11, 34, },
+ { 1, 0, 1, 2, 11, 34, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 34, },
+ { 1, 0, 1, 2, 12, 34, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 34, },
+ { 1, 0, 1, 2, 13, 34, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 30, },
+ { 2, 0, 1, 3, 3, 34, },
+ { 1, 0, 1, 3, 3, 34, },
+ { 0, 0, 1, 3, 4, 34, },
+ { 2, 0, 1, 3, 4, 34, },
+ { 1, 0, 1, 3, 4, 34, },
+ { 0, 0, 1, 3, 5, 34, },
+ { 2, 0, 1, 3, 5, 34, },
+ { 1, 0, 1, 3, 5, 34, },
+ { 0, 0, 1, 3, 6, 34, },
+ { 2, 0, 1, 3, 6, 34, },
+ { 1, 0, 1, 3, 6, 34, },
+ { 0, 0, 1, 3, 7, 34, },
+ { 2, 0, 1, 3, 7, 34, },
+ { 1, 0, 1, 3, 7, 34, },
+ { 0, 0, 1, 3, 8, 34, },
+ { 2, 0, 1, 3, 8, 34, },
+ { 1, 0, 1, 3, 8, 34, },
+ { 0, 0, 1, 3, 9, 34, },
+ { 2, 0, 1, 3, 9, 34, },
+ { 1, 0, 1, 3, 9, 34, },
+ { 0, 0, 1, 3, 10, 34, },
+ { 2, 0, 1, 3, 10, 34, },
+ { 1, 0, 1, 3, 10, 34, },
+ { 0, 0, 1, 3, 11, 28, },
+ { 2, 0, 1, 3, 11, 34, },
+ { 1, 0, 1, 3, 11, 34, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 34, },
+ { 1, 0, 1, 3, 12, 34, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 34, },
+ { 1, 0, 1, 3, 13, 34, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 30, },
+ { 2, 0, 1, 6, 3, 34, },
+ { 1, 0, 1, 6, 3, 34, },
+ { 0, 0, 1, 6, 4, 34, },
+ { 2, 0, 1, 6, 4, 34, },
+ { 1, 0, 1, 6, 4, 34, },
+ { 0, 0, 1, 6, 5, 34, },
+ { 2, 0, 1, 6, 5, 34, },
+ { 1, 0, 1, 6, 5, 34, },
+ { 0, 0, 1, 6, 6, 34, },
+ { 2, 0, 1, 6, 6, 34, },
+ { 1, 0, 1, 6, 6, 34, },
+ { 0, 0, 1, 6, 7, 34, },
+ { 2, 0, 1, 6, 7, 34, },
+ { 1, 0, 1, 6, 7, 34, },
+ { 0, 0, 1, 6, 8, 34, },
+ { 2, 0, 1, 6, 8, 34, },
+ { 1, 0, 1, 6, 8, 34, },
+ { 0, 0, 1, 6, 9, 34, },
+ { 2, 0, 1, 6, 9, 34, },
+ { 1, 0, 1, 6, 9, 34, },
+ { 0, 0, 1, 6, 10, 34, },
+ { 2, 0, 1, 6, 10, 34, },
+ { 1, 0, 1, 6, 10, 34, },
+ { 0, 0, 1, 6, 11, 28, },
+ { 2, 0, 1, 6, 11, 34, },
+ { 1, 0, 1, 6, 11, 34, },
+ { 0, 0, 1, 6, 12, 63, },
+ { 2, 0, 1, 6, 12, 34, },
+ { 1, 0, 1, 6, 12, 34, },
+ { 0, 0, 1, 6, 13, 63, },
+ { 2, 0, 1, 6, 13, 34, },
+ { 1, 0, 1, 6, 13, 34, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 30, },
+ { 2, 0, 1, 7, 3, 34, },
+ { 1, 0, 1, 7, 3, 34, },
+ { 0, 0, 1, 7, 4, 34, },
+ { 2, 0, 1, 7, 4, 34, },
+ { 1, 0, 1, 7, 4, 34, },
+ { 0, 0, 1, 7, 5, 34, },
+ { 2, 0, 1, 7, 5, 34, },
+ { 1, 0, 1, 7, 5, 34, },
+ { 0, 0, 1, 7, 6, 34, },
+ { 2, 0, 1, 7, 6, 34, },
+ { 1, 0, 1, 7, 6, 34, },
+ { 0, 0, 1, 7, 7, 34, },
+ { 2, 0, 1, 7, 7, 34, },
+ { 1, 0, 1, 7, 7, 34, },
+ { 0, 0, 1, 7, 8, 34, },
+ { 2, 0, 1, 7, 8, 34, },
+ { 1, 0, 1, 7, 8, 34, },
+ { 0, 0, 1, 7, 9, 34, },
+ { 2, 0, 1, 7, 9, 34, },
+ { 1, 0, 1, 7, 9, 34, },
+ { 0, 0, 1, 7, 10, 34, },
+ { 2, 0, 1, 7, 10, 34, },
+ { 1, 0, 1, 7, 10, 34, },
+ { 0, 0, 1, 7, 11, 28, },
+ { 2, 0, 1, 7, 11, 34, },
+ { 1, 0, 1, 7, 11, 34, },
+ { 0, 0, 1, 7, 12, 63, },
+ { 2, 0, 1, 7, 12, 34, },
+ { 1, 0, 1, 7, 12, 34, },
+ { 0, 0, 1, 7, 13, 63, },
+ { 2, 0, 1, 7, 13, 34, },
+ { 1, 0, 1, 7, 13, 34, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 42, },
+ { 2, 1, 0, 1, 36, 42, },
+ { 1, 1, 0, 1, 36, 42, },
+ { 0, 1, 0, 1, 40, 42, },
+ { 2, 1, 0, 1, 40, 42, },
+ { 1, 1, 0, 1, 40, 42, },
+ { 0, 1, 0, 1, 44, 42, },
+ { 2, 1, 0, 1, 44, 42, },
+ { 1, 1, 0, 1, 44, 42, },
+ { 0, 1, 0, 1, 48, 42, },
+ { 2, 1, 0, 1, 48, 42, },
+ { 1, 1, 0, 1, 48, 42, },
+ { 0, 1, 0, 1, 52, 42, },
+ { 2, 1, 0, 1, 52, 42, },
+ { 1, 1, 0, 1, 52, 42, },
+ { 0, 1, 0, 1, 56, 42, },
+ { 2, 1, 0, 1, 56, 42, },
+ { 1, 1, 0, 1, 56, 42, },
+ { 0, 1, 0, 1, 60, 42, },
+ { 2, 1, 0, 1, 60, 42, },
+ { 1, 1, 0, 1, 60, 42, },
+ { 0, 1, 0, 1, 64, 42, },
+ { 2, 1, 0, 1, 64, 42, },
+ { 1, 1, 0, 1, 64, 42, },
+ { 0, 1, 0, 1, 100, 42, },
+ { 2, 1, 0, 1, 100, 42, },
+ { 1, 1, 0, 1, 100, 42, },
+ { 0, 1, 0, 1, 104, 42, },
+ { 2, 1, 0, 1, 104, 42, },
+ { 1, 1, 0, 1, 104, 42, },
+ { 0, 1, 0, 1, 108, 42, },
+ { 2, 1, 0, 1, 108, 42, },
+ { 1, 1, 0, 1, 108, 42, },
+ { 0, 1, 0, 1, 112, 42, },
+ { 2, 1, 0, 1, 112, 42, },
+ { 1, 1, 0, 1, 112, 42, },
+ { 0, 1, 0, 1, 116, 42, },
+ { 2, 1, 0, 1, 116, 42, },
+ { 1, 1, 0, 1, 116, 42, },
+ { 0, 1, 0, 1, 120, 42, },
+ { 2, 1, 0, 1, 120, 42, },
+ { 1, 1, 0, 1, 120, 42, },
+ { 0, 1, 0, 1, 124, 42, },
+ { 2, 1, 0, 1, 124, 42, },
+ { 1, 1, 0, 1, 124, 42, },
+ { 0, 1, 0, 1, 128, 42, },
+ { 2, 1, 0, 1, 128, 42, },
+ { 1, 1, 0, 1, 128, 42, },
+ { 0, 1, 0, 1, 132, 42, },
+ { 2, 1, 0, 1, 132, 42, },
+ { 1, 1, 0, 1, 132, 42, },
+ { 0, 1, 0, 1, 136, 42, },
+ { 2, 1, 0, 1, 136, 42, },
+ { 1, 1, 0, 1, 136, 42, },
+ { 0, 1, 0, 1, 140, 40, },
+ { 2, 1, 0, 1, 140, 40, },
+ { 1, 1, 0, 1, 140, 40, },
+ { 0, 1, 0, 1, 149, 44, },
+ { 2, 1, 0, 1, 149, 44, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 44, },
+ { 2, 1, 0, 1, 153, 44, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 44, },
+ { 2, 1, 0, 1, 157, 44, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 44, },
+ { 2, 1, 0, 1, 161, 44, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 44, },
+ { 2, 1, 0, 1, 165, 44, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 32, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 32, },
+ { 0, 1, 0, 2, 40, 32, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 32, },
+ { 0, 1, 0, 2, 44, 32, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 32, },
+ { 0, 1, 0, 2, 48, 36, },
+ { 2, 1, 0, 2, 48, 36, },
+ { 1, 1, 0, 2, 48, 36, },
+ { 0, 1, 0, 2, 52, 36, },
+ { 2, 1, 0, 2, 52, 36, },
+ { 1, 1, 0, 2, 52, 36, },
+ { 0, 1, 0, 2, 56, 32, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 32, },
+ { 0, 1, 0, 2, 60, 32, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 32, },
+ { 0, 1, 0, 2, 64, 32, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 32, },
+ { 0, 1, 0, 2, 100, 32, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 32, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 32, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 32, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 32, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 32, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 32, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 32, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 32, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 32, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 30, },
+ { 2, 1, 0, 2, 140, 30, },
+ { 1, 1, 0, 2, 140, 30, },
+ { 0, 1, 0, 2, 149, 40, },
+ { 2, 1, 0, 2, 149, 40, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 40, },
+ { 2, 1, 0, 2, 153, 40, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 40, },
+ { 2, 1, 0, 2, 157, 40, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 40, },
+ { 2, 1, 0, 2, 161, 40, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 42, },
+ { 2, 1, 0, 2, 165, 42, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 32, },
+ { 2, 1, 0, 3, 36, 32, },
+ { 1, 1, 0, 3, 36, 32, },
+ { 0, 1, 0, 3, 40, 32, },
+ { 2, 1, 0, 3, 40, 32, },
+ { 1, 1, 0, 3, 40, 32, },
+ { 0, 1, 0, 3, 44, 32, },
+ { 2, 1, 0, 3, 44, 32, },
+ { 1, 1, 0, 3, 44, 32, },
+ { 0, 1, 0, 3, 48, 36, },
+ { 2, 1, 0, 3, 48, 36, },
+ { 1, 1, 0, 3, 48, 36, },
+ { 0, 1, 0, 3, 52, 36, },
+ { 2, 1, 0, 3, 52, 36, },
+ { 1, 1, 0, 3, 52, 36, },
+ { 0, 1, 0, 3, 56, 32, },
+ { 2, 1, 0, 3, 56, 32, },
+ { 1, 1, 0, 3, 56, 32, },
+ { 0, 1, 0, 3, 60, 32, },
+ { 2, 1, 0, 3, 60, 32, },
+ { 1, 1, 0, 3, 60, 32, },
+ { 0, 1, 0, 3, 64, 32, },
+ { 2, 1, 0, 3, 64, 32, },
+ { 1, 1, 0, 3, 64, 32, },
+ { 0, 1, 0, 3, 100, 32, },
+ { 2, 1, 0, 3, 100, 32, },
+ { 1, 1, 0, 3, 100, 32, },
+ { 0, 1, 0, 3, 104, 32, },
+ { 2, 1, 0, 3, 104, 32, },
+ { 1, 1, 0, 3, 104, 32, },
+ { 0, 1, 0, 3, 108, 32, },
+ { 2, 1, 0, 3, 108, 32, },
+ { 1, 1, 0, 3, 108, 32, },
+ { 0, 1, 0, 3, 112, 32, },
+ { 2, 1, 0, 3, 112, 32, },
+ { 1, 1, 0, 3, 112, 32, },
+ { 0, 1, 0, 3, 116, 32, },
+ { 2, 1, 0, 3, 116, 32, },
+ { 1, 1, 0, 3, 116, 32, },
+ { 0, 1, 0, 3, 120, 32, },
+ { 2, 1, 0, 3, 120, 32, },
+ { 1, 1, 0, 3, 120, 32, },
+ { 0, 1, 0, 3, 124, 32, },
+ { 2, 1, 0, 3, 124, 32, },
+ { 1, 1, 0, 3, 124, 32, },
+ { 0, 1, 0, 3, 128, 32, },
+ { 2, 1, 0, 3, 128, 32, },
+ { 1, 1, 0, 3, 128, 32, },
+ { 0, 1, 0, 3, 132, 32, },
+ { 2, 1, 0, 3, 132, 32, },
+ { 1, 1, 0, 3, 132, 32, },
+ { 0, 1, 0, 3, 136, 32, },
+ { 2, 1, 0, 3, 136, 32, },
+ { 1, 1, 0, 3, 136, 32, },
+ { 0, 1, 0, 3, 140, 30, },
+ { 2, 1, 0, 3, 140, 30, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 149, 40, },
+ { 2, 1, 0, 3, 149, 40, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 40, },
+ { 2, 1, 0, 3, 153, 40, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 40, },
+ { 2, 1, 0, 3, 157, 40, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 40, },
+ { 2, 1, 0, 3, 161, 40, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 42, },
+ { 2, 1, 0, 3, 165, 42, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 32, },
+ { 2, 1, 0, 6, 36, 32, },
+ { 1, 1, 0, 6, 36, 32, },
+ { 0, 1, 0, 6, 40, 32, },
+ { 2, 1, 0, 6, 40, 32, },
+ { 1, 1, 0, 6, 40, 32, },
+ { 0, 1, 0, 6, 44, 32, },
+ { 2, 1, 0, 6, 44, 32, },
+ { 1, 1, 0, 6, 44, 32, },
+ { 0, 1, 0, 6, 48, 36, },
+ { 2, 1, 0, 6, 48, 36, },
+ { 1, 1, 0, 6, 48, 36, },
+ { 0, 1, 0, 6, 52, 36, },
+ { 2, 1, 0, 6, 52, 36, },
+ { 1, 1, 0, 6, 52, 36, },
+ { 0, 1, 0, 6, 56, 32, },
+ { 2, 1, 0, 6, 56, 32, },
+ { 1, 1, 0, 6, 56, 32, },
+ { 0, 1, 0, 6, 60, 32, },
+ { 2, 1, 0, 6, 60, 32, },
+ { 1, 1, 0, 6, 60, 32, },
+ { 0, 1, 0, 6, 64, 32, },
+ { 2, 1, 0, 6, 64, 32, },
+ { 1, 1, 0, 6, 64, 32, },
+ { 0, 1, 0, 6, 100, 32, },
+ { 2, 1, 0, 6, 100, 32, },
+ { 1, 1, 0, 6, 100, 32, },
+ { 0, 1, 0, 6, 104, 32, },
+ { 2, 1, 0, 6, 104, 32, },
+ { 1, 1, 0, 6, 104, 32, },
+ { 0, 1, 0, 6, 108, 32, },
+ { 2, 1, 0, 6, 108, 32, },
+ { 1, 1, 0, 6, 108, 32, },
+ { 0, 1, 0, 6, 112, 32, },
+ { 2, 1, 0, 6, 112, 32, },
+ { 1, 1, 0, 6, 112, 32, },
+ { 0, 1, 0, 6, 116, 32, },
+ { 2, 1, 0, 6, 116, 32, },
+ { 1, 1, 0, 6, 116, 32, },
+ { 0, 1, 0, 6, 120, 32, },
+ { 2, 1, 0, 6, 120, 32, },
+ { 1, 1, 0, 6, 120, 32, },
+ { 0, 1, 0, 6, 124, 32, },
+ { 2, 1, 0, 6, 124, 32, },
+ { 1, 1, 0, 6, 124, 32, },
+ { 0, 1, 0, 6, 128, 32, },
+ { 2, 1, 0, 6, 128, 32, },
+ { 1, 1, 0, 6, 128, 32, },
+ { 0, 1, 0, 6, 132, 32, },
+ { 2, 1, 0, 6, 132, 32, },
+ { 1, 1, 0, 6, 132, 32, },
+ { 0, 1, 0, 6, 136, 32, },
+ { 2, 1, 0, 6, 136, 32, },
+ { 1, 1, 0, 6, 136, 32, },
+ { 0, 1, 0, 6, 140, 30, },
+ { 2, 1, 0, 6, 140, 30, },
+ { 1, 1, 0, 6, 140, 30, },
+ { 0, 1, 0, 6, 149, 40, },
+ { 2, 1, 0, 6, 149, 40, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 40, },
+ { 2, 1, 0, 6, 153, 40, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 40, },
+ { 2, 1, 0, 6, 157, 40, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 40, },
+ { 2, 1, 0, 6, 161, 40, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 42, },
+ { 2, 1, 0, 6, 165, 42, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 32, },
+ { 2, 1, 0, 7, 36, 32, },
+ { 1, 1, 0, 7, 36, 32, },
+ { 0, 1, 0, 7, 40, 32, },
+ { 2, 1, 0, 7, 40, 32, },
+ { 1, 1, 0, 7, 40, 32, },
+ { 0, 1, 0, 7, 44, 32, },
+ { 2, 1, 0, 7, 44, 32, },
+ { 1, 1, 0, 7, 44, 32, },
+ { 0, 1, 0, 7, 48, 36, },
+ { 2, 1, 0, 7, 48, 36, },
+ { 1, 1, 0, 7, 48, 36, },
+ { 0, 1, 0, 7, 52, 36, },
+ { 2, 1, 0, 7, 52, 36, },
+ { 1, 1, 0, 7, 52, 36, },
+ { 0, 1, 0, 7, 56, 32, },
+ { 2, 1, 0, 7, 56, 32, },
+ { 1, 1, 0, 7, 56, 32, },
+ { 0, 1, 0, 7, 60, 32, },
+ { 2, 1, 0, 7, 60, 32, },
+ { 1, 1, 0, 7, 60, 32, },
+ { 0, 1, 0, 7, 64, 32, },
+ { 2, 1, 0, 7, 64, 32, },
+ { 1, 1, 0, 7, 64, 32, },
+ { 0, 1, 0, 7, 100, 32, },
+ { 2, 1, 0, 7, 100, 32, },
+ { 1, 1, 0, 7, 100, 32, },
+ { 0, 1, 0, 7, 104, 32, },
+ { 2, 1, 0, 7, 104, 32, },
+ { 1, 1, 0, 7, 104, 32, },
+ { 0, 1, 0, 7, 108, 32, },
+ { 2, 1, 0, 7, 108, 32, },
+ { 1, 1, 0, 7, 108, 32, },
+ { 0, 1, 0, 7, 112, 32, },
+ { 2, 1, 0, 7, 112, 32, },
+ { 1, 1, 0, 7, 112, 32, },
+ { 0, 1, 0, 7, 116, 32, },
+ { 2, 1, 0, 7, 116, 32, },
+ { 1, 1, 0, 7, 116, 32, },
+ { 0, 1, 0, 7, 120, 32, },
+ { 2, 1, 0, 7, 120, 32, },
+ { 1, 1, 0, 7, 120, 32, },
+ { 0, 1, 0, 7, 124, 32, },
+ { 2, 1, 0, 7, 124, 32, },
+ { 1, 1, 0, 7, 124, 32, },
+ { 0, 1, 0, 7, 128, 32, },
+ { 2, 1, 0, 7, 128, 32, },
+ { 1, 1, 0, 7, 128, 32, },
+ { 0, 1, 0, 7, 132, 32, },
+ { 2, 1, 0, 7, 132, 32, },
+ { 1, 1, 0, 7, 132, 32, },
+ { 0, 1, 0, 7, 136, 32, },
+ { 2, 1, 0, 7, 136, 32, },
+ { 1, 1, 0, 7, 136, 32, },
+ { 0, 1, 0, 7, 140, 30, },
+ { 2, 1, 0, 7, 140, 30, },
+ { 1, 1, 0, 7, 140, 30, },
+ { 0, 1, 0, 7, 149, 40, },
+ { 2, 1, 0, 7, 149, 40, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 40, },
+ { 2, 1, 0, 7, 153, 40, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 40, },
+ { 2, 1, 0, 7, 157, 40, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 40, },
+ { 2, 1, 0, 7, 161, 40, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 42, },
+ { 2, 1, 0, 7, 165, 42, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 32, },
+ { 2, 1, 1, 2, 38, 32, },
+ { 1, 1, 1, 2, 38, 32, },
+ { 0, 1, 1, 2, 46, 36, },
+ { 2, 1, 1, 2, 46, 36, },
+ { 1, 1, 1, 2, 46, 36, },
+ { 0, 1, 1, 2, 54, 36, },
+ { 2, 1, 1, 2, 54, 36, },
+ { 1, 1, 1, 2, 54, 36, },
+ { 0, 1, 1, 2, 62, 32, },
+ { 2, 1, 1, 2, 62, 32, },
+ { 1, 1, 1, 2, 62, 32, },
+ { 0, 1, 1, 2, 102, 30, },
+ { 2, 1, 1, 2, 102, 30, },
+ { 1, 1, 1, 2, 102, 30, },
+ { 0, 1, 1, 2, 110, 32, },
+ { 2, 1, 1, 2, 110, 32, },
+ { 1, 1, 1, 2, 110, 32, },
+ { 0, 1, 1, 2, 118, 32, },
+ { 2, 1, 1, 2, 118, 32, },
+ { 1, 1, 1, 2, 118, 32, },
+ { 0, 1, 1, 2, 126, 32, },
+ { 2, 1, 1, 2, 126, 32, },
+ { 1, 1, 1, 2, 126, 32, },
+ { 0, 1, 1, 2, 134, 36, },
+ { 2, 1, 1, 2, 134, 36, },
+ { 1, 1, 1, 2, 134, 36, },
+ { 0, 1, 1, 2, 151, 36, },
+ { 2, 1, 1, 2, 151, 36, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 40, },
+ { 2, 1, 1, 2, 159, 40, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 32, },
+ { 2, 1, 1, 3, 38, 32, },
+ { 1, 1, 1, 3, 38, 32, },
+ { 0, 1, 1, 3, 46, 36, },
+ { 2, 1, 1, 3, 46, 36, },
+ { 1, 1, 1, 3, 46, 36, },
+ { 0, 1, 1, 3, 54, 36, },
+ { 2, 1, 1, 3, 54, 36, },
+ { 1, 1, 1, 3, 54, 36, },
+ { 0, 1, 1, 3, 62, 32, },
+ { 2, 1, 1, 3, 62, 32, },
+ { 1, 1, 1, 3, 62, 32, },
+ { 0, 1, 1, 3, 102, 30, },
+ { 2, 1, 1, 3, 102, 30, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 32, },
+ { 2, 1, 1, 3, 110, 32, },
+ { 1, 1, 1, 3, 110, 32, },
+ { 0, 1, 1, 3, 118, 32, },
+ { 2, 1, 1, 3, 118, 32, },
+ { 1, 1, 1, 3, 118, 32, },
+ { 0, 1, 1, 3, 126, 32, },
+ { 2, 1, 1, 3, 126, 32, },
+ { 1, 1, 1, 3, 126, 32, },
+ { 0, 1, 1, 3, 134, 36, },
+ { 2, 1, 1, 3, 134, 36, },
+ { 1, 1, 1, 3, 134, 36, },
+ { 0, 1, 1, 3, 151, 36, },
+ { 2, 1, 1, 3, 151, 36, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 40, },
+ { 2, 1, 1, 3, 159, 40, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 32, },
+ { 2, 1, 1, 6, 38, 32, },
+ { 1, 1, 1, 6, 38, 32, },
+ { 0, 1, 1, 6, 46, 36, },
+ { 2, 1, 1, 6, 46, 36, },
+ { 1, 1, 1, 6, 46, 36, },
+ { 0, 1, 1, 6, 54, 36, },
+ { 2, 1, 1, 6, 54, 36, },
+ { 1, 1, 1, 6, 54, 36, },
+ { 0, 1, 1, 6, 62, 32, },
+ { 2, 1, 1, 6, 62, 32, },
+ { 1, 1, 1, 6, 62, 32, },
+ { 0, 1, 1, 6, 102, 30, },
+ { 2, 1, 1, 6, 102, 30, },
+ { 1, 1, 1, 6, 102, 30, },
+ { 0, 1, 1, 6, 110, 32, },
+ { 2, 1, 1, 6, 110, 32, },
+ { 1, 1, 1, 6, 110, 32, },
+ { 0, 1, 1, 6, 118, 32, },
+ { 2, 1, 1, 6, 118, 32, },
+ { 1, 1, 1, 6, 118, 32, },
+ { 0, 1, 1, 6, 126, 32, },
+ { 2, 1, 1, 6, 126, 32, },
+ { 1, 1, 1, 6, 126, 32, },
+ { 0, 1, 1, 6, 134, 36, },
+ { 2, 1, 1, 6, 134, 36, },
+ { 1, 1, 1, 6, 134, 36, },
+ { 0, 1, 1, 6, 151, 36, },
+ { 2, 1, 1, 6, 151, 36, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 40, },
+ { 2, 1, 1, 6, 159, 40, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 32, },
+ { 2, 1, 1, 7, 38, 32, },
+ { 1, 1, 1, 7, 38, 32, },
+ { 0, 1, 1, 7, 46, 36, },
+ { 2, 1, 1, 7, 46, 36, },
+ { 1, 1, 1, 7, 46, 36, },
+ { 0, 1, 1, 7, 54, 36, },
+ { 2, 1, 1, 7, 54, 36, },
+ { 1, 1, 1, 7, 54, 36, },
+ { 0, 1, 1, 7, 62, 32, },
+ { 2, 1, 1, 7, 62, 32, },
+ { 1, 1, 1, 7, 62, 32, },
+ { 0, 1, 1, 7, 102, 30, },
+ { 2, 1, 1, 7, 102, 30, },
+ { 1, 1, 1, 7, 102, 30, },
+ { 0, 1, 1, 7, 110, 32, },
+ { 2, 1, 1, 7, 110, 32, },
+ { 1, 1, 1, 7, 110, 32, },
+ { 0, 1, 1, 7, 118, 32, },
+ { 2, 1, 1, 7, 118, 32, },
+ { 1, 1, 1, 7, 118, 32, },
+ { 0, 1, 1, 7, 126, 32, },
+ { 2, 1, 1, 7, 126, 32, },
+ { 1, 1, 1, 7, 126, 32, },
+ { 0, 1, 1, 7, 134, 36, },
+ { 2, 1, 1, 7, 134, 36, },
+ { 1, 1, 1, 7, 134, 36, },
+ { 0, 1, 1, 7, 151, 36, },
+ { 2, 1, 1, 7, 151, 36, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 40, },
+ { 2, 1, 1, 7, 159, 40, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 34, },
+ { 2, 1, 2, 4, 42, 34, },
+ { 1, 1, 2, 4, 42, 34, },
+ { 0, 1, 2, 4, 58, 34, },
+ { 2, 1, 2, 4, 58, 34, },
+ { 1, 1, 2, 4, 58, 34, },
+ { 0, 1, 2, 4, 106, 32, },
+ { 2, 1, 2, 4, 106, 32, },
+ { 1, 1, 2, 4, 106, 32, },
+ { 0, 1, 2, 4, 122, 34, },
+ { 2, 1, 2, 4, 122, 34, },
+ { 1, 1, 2, 4, 122, 34, },
+ { 0, 1, 2, 4, 155, 34, },
+ { 2, 1, 2, 4, 155, 34, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 34, },
+ { 2, 1, 2, 5, 42, 34, },
+ { 1, 1, 2, 5, 42, 34, },
+ { 0, 1, 2, 5, 58, 34, },
+ { 2, 1, 2, 5, 58, 34, },
+ { 1, 1, 2, 5, 58, 34, },
+ { 0, 1, 2, 5, 106, 32, },
+ { 2, 1, 2, 5, 106, 32, },
+ { 1, 1, 2, 5, 106, 32, },
+ { 0, 1, 2, 5, 122, 34, },
+ { 2, 1, 2, 5, 122, 34, },
+ { 1, 1, 2, 5, 122, 34, },
+ { 0, 1, 2, 5, 155, 34, },
+ { 2, 1, 2, 5, 155, 34, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 34, },
+ { 2, 1, 2, 8, 42, 34, },
+ { 1, 1, 2, 8, 42, 34, },
+ { 0, 1, 2, 8, 58, 34, },
+ { 2, 1, 2, 8, 58, 34, },
+ { 1, 1, 2, 8, 58, 34, },
+ { 0, 1, 2, 8, 106, 32, },
+ { 2, 1, 2, 8, 106, 32, },
+ { 1, 1, 2, 8, 106, 32, },
+ { 0, 1, 2, 8, 122, 34, },
+ { 2, 1, 2, 8, 122, 34, },
+ { 1, 1, 2, 8, 122, 34, },
+ { 0, 1, 2, 8, 155, 34, },
+ { 2, 1, 2, 8, 155, 34, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 34, },
+ { 2, 1, 2, 9, 42, 34, },
+ { 1, 1, 2, 9, 42, 34, },
+ { 0, 1, 2, 9, 58, 34, },
+ { 2, 1, 2, 9, 58, 34, },
+ { 1, 1, 2, 9, 58, 34, },
+ { 0, 1, 2, 9, 106, 32, },
+ { 2, 1, 2, 9, 106, 32, },
+ { 1, 1, 2, 9, 106, 32, },
+ { 0, 1, 2, 9, 122, 34, },
+ { 2, 1, 2, 9, 122, 34, },
+ { 1, 1, 2, 9, 122, 34, },
+ { 0, 1, 2, 9, 155, 34, },
+ { 2, 1, 2, 9, 155, 34, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type2);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type3[] = {
+ { 0, 0, 0, 0, 1, 46, },
+ { 2, 0, 0, 0, 1, 40, },
+ { 1, 0, 0, 0, 1, 40, },
+ { 0, 0, 0, 0, 2, 46, },
+ { 2, 0, 0, 0, 2, 40, },
+ { 1, 0, 0, 0, 2, 40, },
+ { 0, 0, 0, 0, 3, 46, },
+ { 2, 0, 0, 0, 3, 40, },
+ { 1, 0, 0, 0, 3, 40, },
+ { 0, 0, 0, 0, 4, 46, },
+ { 2, 0, 0, 0, 4, 40, },
+ { 1, 0, 0, 0, 4, 40, },
+ { 0, 0, 0, 0, 5, 46, },
+ { 2, 0, 0, 0, 5, 40, },
+ { 1, 0, 0, 0, 5, 40, },
+ { 0, 0, 0, 0, 6, 46, },
+ { 2, 0, 0, 0, 6, 40, },
+ { 1, 0, 0, 0, 6, 40, },
+ { 0, 0, 0, 0, 7, 46, },
+ { 2, 0, 0, 0, 7, 40, },
+ { 1, 0, 0, 0, 7, 40, },
+ { 0, 0, 0, 0, 8, 46, },
+ { 2, 0, 0, 0, 8, 40, },
+ { 1, 0, 0, 0, 8, 40, },
+ { 0, 0, 0, 0, 9, 46, },
+ { 2, 0, 0, 0, 9, 40, },
+ { 1, 0, 0, 0, 9, 40, },
+ { 0, 0, 0, 0, 10, 46, },
+ { 2, 0, 0, 0, 10, 40, },
+ { 1, 0, 0, 0, 10, 40, },
+ { 0, 0, 0, 0, 11, 46, },
+ { 2, 0, 0, 0, 11, 40, },
+ { 1, 0, 0, 0, 11, 40, },
+ { 0, 0, 0, 0, 12, 63, },
+ { 2, 0, 0, 0, 12, 40, },
+ { 1, 0, 0, 0, 12, 40, },
+ { 0, 0, 0, 0, 13, 63, },
+ { 2, 0, 0, 0, 13, 40, },
+ { 1, 0, 0, 0, 13, 40, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 40, },
+ { 0, 0, 0, 1, 1, 46, },
+ { 2, 0, 0, 1, 1, 40, },
+ { 1, 0, 0, 1, 1, 40, },
+ { 0, 0, 0, 1, 2, 46, },
+ { 2, 0, 0, 1, 2, 40, },
+ { 1, 0, 0, 1, 2, 40, },
+ { 0, 0, 0, 1, 3, 46, },
+ { 2, 0, 0, 1, 3, 40, },
+ { 1, 0, 0, 1, 3, 40, },
+ { 0, 0, 0, 1, 4, 46, },
+ { 2, 0, 0, 1, 4, 40, },
+ { 1, 0, 0, 1, 4, 40, },
+ { 0, 0, 0, 1, 5, 46, },
+ { 2, 0, 0, 1, 5, 40, },
+ { 1, 0, 0, 1, 5, 40, },
+ { 0, 0, 0, 1, 6, 46, },
+ { 2, 0, 0, 1, 6, 40, },
+ { 1, 0, 0, 1, 6, 40, },
+ { 0, 0, 0, 1, 7, 46, },
+ { 2, 0, 0, 1, 7, 40, },
+ { 1, 0, 0, 1, 7, 40, },
+ { 0, 0, 0, 1, 8, 46, },
+ { 2, 0, 0, 1, 8, 40, },
+ { 1, 0, 0, 1, 8, 40, },
+ { 0, 0, 0, 1, 9, 46, },
+ { 2, 0, 0, 1, 9, 40, },
+ { 1, 0, 0, 1, 9, 40, },
+ { 0, 0, 0, 1, 10, 46, },
+ { 2, 0, 0, 1, 10, 40, },
+ { 1, 0, 0, 1, 10, 40, },
+ { 0, 0, 0, 1, 11, 46, },
+ { 2, 0, 0, 1, 11, 40, },
+ { 1, 0, 0, 1, 11, 40, },
+ { 0, 0, 0, 1, 12, 63, },
+ { 2, 0, 0, 1, 12, 40, },
+ { 1, 0, 0, 1, 12, 40, },
+ { 0, 0, 0, 1, 13, 63, },
+ { 2, 0, 0, 1, 13, 40, },
+ { 1, 0, 0, 1, 13, 40, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 46, },
+ { 2, 0, 0, 2, 1, 40, },
+ { 1, 0, 0, 2, 1, 40, },
+ { 0, 0, 0, 2, 2, 46, },
+ { 2, 0, 0, 2, 2, 40, },
+ { 1, 0, 0, 2, 2, 40, },
+ { 0, 0, 0, 2, 3, 46, },
+ { 2, 0, 0, 2, 3, 40, },
+ { 1, 0, 0, 2, 3, 40, },
+ { 0, 0, 0, 2, 4, 46, },
+ { 2, 0, 0, 2, 4, 40, },
+ { 1, 0, 0, 2, 4, 40, },
+ { 0, 0, 0, 2, 5, 46, },
+ { 2, 0, 0, 2, 5, 40, },
+ { 1, 0, 0, 2, 5, 40, },
+ { 0, 0, 0, 2, 6, 46, },
+ { 2, 0, 0, 2, 6, 40, },
+ { 1, 0, 0, 2, 6, 40, },
+ { 0, 0, 0, 2, 7, 46, },
+ { 2, 0, 0, 2, 7, 40, },
+ { 1, 0, 0, 2, 7, 40, },
+ { 0, 0, 0, 2, 8, 46, },
+ { 2, 0, 0, 2, 8, 40, },
+ { 1, 0, 0, 2, 8, 40, },
+ { 0, 0, 0, 2, 9, 46, },
+ { 2, 0, 0, 2, 9, 40, },
+ { 1, 0, 0, 2, 9, 40, },
+ { 0, 0, 0, 2, 10, 46, },
+ { 2, 0, 0, 2, 10, 40, },
+ { 1, 0, 0, 2, 10, 40, },
+ { 0, 0, 0, 2, 11, 46, },
+ { 2, 0, 0, 2, 11, 40, },
+ { 1, 0, 0, 2, 11, 40, },
+ { 0, 0, 0, 2, 12, 63, },
+ { 2, 0, 0, 2, 12, 40, },
+ { 1, 0, 0, 2, 12, 40, },
+ { 0, 0, 0, 2, 13, 63, },
+ { 2, 0, 0, 2, 13, 40, },
+ { 1, 0, 0, 2, 13, 40, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 46, },
+ { 2, 0, 0, 3, 1, 40, },
+ { 1, 0, 0, 3, 1, 40, },
+ { 0, 0, 0, 3, 2, 46, },
+ { 2, 0, 0, 3, 2, 40, },
+ { 1, 0, 0, 3, 2, 40, },
+ { 0, 0, 0, 3, 3, 46, },
+ { 2, 0, 0, 3, 3, 40, },
+ { 1, 0, 0, 3, 3, 40, },
+ { 0, 0, 0, 3, 4, 46, },
+ { 2, 0, 0, 3, 4, 40, },
+ { 1, 0, 0, 3, 4, 40, },
+ { 0, 0, 0, 3, 5, 46, },
+ { 2, 0, 0, 3, 5, 40, },
+ { 1, 0, 0, 3, 5, 40, },
+ { 0, 0, 0, 3, 6, 46, },
+ { 2, 0, 0, 3, 6, 40, },
+ { 1, 0, 0, 3, 6, 40, },
+ { 0, 0, 0, 3, 7, 46, },
+ { 2, 0, 0, 3, 7, 40, },
+ { 1, 0, 0, 3, 7, 40, },
+ { 0, 0, 0, 3, 8, 46, },
+ { 2, 0, 0, 3, 8, 40, },
+ { 1, 0, 0, 3, 8, 40, },
+ { 0, 0, 0, 3, 9, 46, },
+ { 2, 0, 0, 3, 9, 40, },
+ { 1, 0, 0, 3, 9, 40, },
+ { 0, 0, 0, 3, 10, 46, },
+ { 2, 0, 0, 3, 10, 40, },
+ { 1, 0, 0, 3, 10, 40, },
+ { 0, 0, 0, 3, 11, 46, },
+ { 2, 0, 0, 3, 11, 40, },
+ { 1, 0, 0, 3, 11, 40, },
+ { 0, 0, 0, 3, 12, 63, },
+ { 2, 0, 0, 3, 12, 40, },
+ { 1, 0, 0, 3, 12, 40, },
+ { 0, 0, 0, 3, 13, 63, },
+ { 2, 0, 0, 3, 13, 40, },
+ { 1, 0, 0, 3, 13, 40, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 46, },
+ { 2, 0, 0, 6, 1, 40, },
+ { 1, 0, 0, 6, 1, 40, },
+ { 0, 0, 0, 6, 2, 46, },
+ { 2, 0, 0, 6, 2, 40, },
+ { 1, 0, 0, 6, 2, 40, },
+ { 0, 0, 0, 6, 3, 46, },
+ { 2, 0, 0, 6, 3, 40, },
+ { 1, 0, 0, 6, 3, 40, },
+ { 0, 0, 0, 6, 4, 46, },
+ { 2, 0, 0, 6, 4, 40, },
+ { 1, 0, 0, 6, 4, 40, },
+ { 0, 0, 0, 6, 5, 46, },
+ { 2, 0, 0, 6, 5, 40, },
+ { 1, 0, 0, 6, 5, 40, },
+ { 0, 0, 0, 6, 6, 46, },
+ { 2, 0, 0, 6, 6, 40, },
+ { 1, 0, 0, 6, 6, 40, },
+ { 0, 0, 0, 6, 7, 46, },
+ { 2, 0, 0, 6, 7, 40, },
+ { 1, 0, 0, 6, 7, 40, },
+ { 0, 0, 0, 6, 8, 46, },
+ { 2, 0, 0, 6, 8, 40, },
+ { 1, 0, 0, 6, 8, 40, },
+ { 0, 0, 0, 6, 9, 46, },
+ { 2, 0, 0, 6, 9, 40, },
+ { 1, 0, 0, 6, 9, 40, },
+ { 0, 0, 0, 6, 10, 46, },
+ { 2, 0, 0, 6, 10, 40, },
+ { 1, 0, 0, 6, 10, 40, },
+ { 0, 0, 0, 6, 11, 46, },
+ { 2, 0, 0, 6, 11, 40, },
+ { 1, 0, 0, 6, 11, 40, },
+ { 0, 0, 0, 6, 12, 63, },
+ { 2, 0, 0, 6, 12, 40, },
+ { 1, 0, 0, 6, 12, 40, },
+ { 0, 0, 0, 6, 13, 63, },
+ { 2, 0, 0, 6, 13, 40, },
+ { 1, 0, 0, 6, 13, 40, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 46, },
+ { 2, 0, 0, 7, 1, 40, },
+ { 1, 0, 0, 7, 1, 40, },
+ { 0, 0, 0, 7, 2, 46, },
+ { 2, 0, 0, 7, 2, 40, },
+ { 1, 0, 0, 7, 2, 40, },
+ { 0, 0, 0, 7, 3, 46, },
+ { 2, 0, 0, 7, 3, 40, },
+ { 1, 0, 0, 7, 3, 40, },
+ { 0, 0, 0, 7, 4, 46, },
+ { 2, 0, 0, 7, 4, 40, },
+ { 1, 0, 0, 7, 4, 40, },
+ { 0, 0, 0, 7, 5, 46, },
+ { 2, 0, 0, 7, 5, 40, },
+ { 1, 0, 0, 7, 5, 40, },
+ { 0, 0, 0, 7, 6, 46, },
+ { 2, 0, 0, 7, 6, 40, },
+ { 1, 0, 0, 7, 6, 40, },
+ { 0, 0, 0, 7, 7, 46, },
+ { 2, 0, 0, 7, 7, 40, },
+ { 1, 0, 0, 7, 7, 40, },
+ { 0, 0, 0, 7, 8, 46, },
+ { 2, 0, 0, 7, 8, 40, },
+ { 1, 0, 0, 7, 8, 40, },
+ { 0, 0, 0, 7, 9, 46, },
+ { 2, 0, 0, 7, 9, 40, },
+ { 1, 0, 0, 7, 9, 40, },
+ { 0, 0, 0, 7, 10, 46, },
+ { 2, 0, 0, 7, 10, 40, },
+ { 1, 0, 0, 7, 10, 40, },
+ { 0, 0, 0, 7, 11, 46, },
+ { 2, 0, 0, 7, 11, 40, },
+ { 1, 0, 0, 7, 11, 40, },
+ { 0, 0, 0, 7, 12, 63, },
+ { 2, 0, 0, 7, 12, 40, },
+ { 1, 0, 0, 7, 12, 40, },
+ { 0, 0, 0, 7, 13, 63, },
+ { 2, 0, 0, 7, 13, 40, },
+ { 1, 0, 0, 7, 13, 40, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 46, },
+ { 2, 0, 1, 2, 3, 40, },
+ { 1, 0, 1, 2, 3, 40, },
+ { 0, 0, 1, 2, 4, 46, },
+ { 2, 0, 1, 2, 4, 40, },
+ { 1, 0, 1, 2, 4, 40, },
+ { 0, 0, 1, 2, 5, 46, },
+ { 2, 0, 1, 2, 5, 40, },
+ { 1, 0, 1, 2, 5, 40, },
+ { 0, 0, 1, 2, 6, 46, },
+ { 2, 0, 1, 2, 6, 40, },
+ { 1, 0, 1, 2, 6, 40, },
+ { 0, 0, 1, 2, 7, 46, },
+ { 2, 0, 1, 2, 7, 40, },
+ { 1, 0, 1, 2, 7, 40, },
+ { 0, 0, 1, 2, 8, 46, },
+ { 2, 0, 1, 2, 8, 40, },
+ { 1, 0, 1, 2, 8, 40, },
+ { 0, 0, 1, 2, 9, 46, },
+ { 2, 0, 1, 2, 9, 40, },
+ { 1, 0, 1, 2, 9, 40, },
+ { 0, 0, 1, 2, 10, 46, },
+ { 2, 0, 1, 2, 10, 40, },
+ { 1, 0, 1, 2, 10, 40, },
+ { 0, 0, 1, 2, 11, 46, },
+ { 2, 0, 1, 2, 11, 40, },
+ { 1, 0, 1, 2, 11, 40, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 40, },
+ { 1, 0, 1, 2, 12, 40, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 40, },
+ { 1, 0, 1, 2, 13, 40, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 46, },
+ { 2, 0, 1, 3, 3, 40, },
+ { 1, 0, 1, 3, 3, 40, },
+ { 0, 0, 1, 3, 4, 46, },
+ { 2, 0, 1, 3, 4, 40, },
+ { 1, 0, 1, 3, 4, 40, },
+ { 0, 0, 1, 3, 5, 46, },
+ { 2, 0, 1, 3, 5, 40, },
+ { 1, 0, 1, 3, 5, 40, },
+ { 0, 0, 1, 3, 6, 46, },
+ { 2, 0, 1, 3, 6, 40, },
+ { 1, 0, 1, 3, 6, 40, },
+ { 0, 0, 1, 3, 7, 46, },
+ { 2, 0, 1, 3, 7, 40, },
+ { 1, 0, 1, 3, 7, 40, },
+ { 0, 0, 1, 3, 8, 46, },
+ { 2, 0, 1, 3, 8, 40, },
+ { 1, 0, 1, 3, 8, 40, },
+ { 0, 0, 1, 3, 9, 46, },
+ { 2, 0, 1, 3, 9, 40, },
+ { 1, 0, 1, 3, 9, 40, },
+ { 0, 0, 1, 3, 10, 46, },
+ { 2, 0, 1, 3, 10, 40, },
+ { 1, 0, 1, 3, 10, 40, },
+ { 0, 0, 1, 3, 11, 46, },
+ { 2, 0, 1, 3, 11, 40, },
+ { 1, 0, 1, 3, 11, 40, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 40, },
+ { 1, 0, 1, 3, 12, 40, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 40, },
+ { 1, 0, 1, 3, 13, 40, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 46, },
+ { 2, 0, 1, 6, 3, 40, },
+ { 1, 0, 1, 6, 3, 40, },
+ { 0, 0, 1, 6, 4, 46, },
+ { 2, 0, 1, 6, 4, 40, },
+ { 1, 0, 1, 6, 4, 40, },
+ { 0, 0, 1, 6, 5, 46, },
+ { 2, 0, 1, 6, 5, 40, },
+ { 1, 0, 1, 6, 5, 40, },
+ { 0, 0, 1, 6, 6, 46, },
+ { 2, 0, 1, 6, 6, 40, },
+ { 1, 0, 1, 6, 6, 40, },
+ { 0, 0, 1, 6, 7, 46, },
+ { 2, 0, 1, 6, 7, 40, },
+ { 1, 0, 1, 6, 7, 40, },
+ { 0, 0, 1, 6, 8, 46, },
+ { 2, 0, 1, 6, 8, 40, },
+ { 1, 0, 1, 6, 8, 40, },
+ { 0, 0, 1, 6, 9, 46, },
+ { 2, 0, 1, 6, 9, 40, },
+ { 1, 0, 1, 6, 9, 40, },
+ { 0, 0, 1, 6, 10, 46, },
+ { 2, 0, 1, 6, 10, 40, },
+ { 1, 0, 1, 6, 10, 40, },
+ { 0, 0, 1, 6, 11, 46, },
+ { 2, 0, 1, 6, 11, 40, },
+ { 1, 0, 1, 6, 11, 40, },
+ { 0, 0, 1, 6, 12, 63, },
+ { 2, 0, 1, 6, 12, 40, },
+ { 1, 0, 1, 6, 12, 40, },
+ { 0, 0, 1, 6, 13, 63, },
+ { 2, 0, 1, 6, 13, 40, },
+ { 1, 0, 1, 6, 13, 40, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 46, },
+ { 2, 0, 1, 7, 3, 40, },
+ { 1, 0, 1, 7, 3, 40, },
+ { 0, 0, 1, 7, 4, 46, },
+ { 2, 0, 1, 7, 4, 40, },
+ { 1, 0, 1, 7, 4, 40, },
+ { 0, 0, 1, 7, 5, 46, },
+ { 2, 0, 1, 7, 5, 40, },
+ { 1, 0, 1, 7, 5, 40, },
+ { 0, 0, 1, 7, 6, 46, },
+ { 2, 0, 1, 7, 6, 40, },
+ { 1, 0, 1, 7, 6, 40, },
+ { 0, 0, 1, 7, 7, 46, },
+ { 2, 0, 1, 7, 7, 40, },
+ { 1, 0, 1, 7, 7, 40, },
+ { 0, 0, 1, 7, 8, 46, },
+ { 2, 0, 1, 7, 8, 40, },
+ { 1, 0, 1, 7, 8, 40, },
+ { 0, 0, 1, 7, 9, 46, },
+ { 2, 0, 1, 7, 9, 40, },
+ { 1, 0, 1, 7, 9, 40, },
+ { 0, 0, 1, 7, 10, 46, },
+ { 2, 0, 1, 7, 10, 40, },
+ { 1, 0, 1, 7, 10, 40, },
+ { 0, 0, 1, 7, 11, 46, },
+ { 2, 0, 1, 7, 11, 40, },
+ { 1, 0, 1, 7, 11, 40, },
+ { 0, 0, 1, 7, 12, 63, },
+ { 2, 0, 1, 7, 12, 40, },
+ { 1, 0, 1, 7, 12, 40, },
+ { 0, 0, 1, 7, 13, 63, },
+ { 2, 0, 1, 7, 13, 40, },
+ { 1, 0, 1, 7, 13, 40, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 46, },
+ { 2, 1, 0, 1, 36, 40, },
+ { 1, 1, 0, 1, 36, 40, },
+ { 0, 1, 0, 1, 40, 46, },
+ { 2, 1, 0, 1, 40, 40, },
+ { 1, 1, 0, 1, 40, 40, },
+ { 0, 1, 0, 1, 44, 46, },
+ { 2, 1, 0, 1, 44, 40, },
+ { 1, 1, 0, 1, 44, 40, },
+ { 0, 1, 0, 1, 48, 46, },
+ { 2, 1, 0, 1, 48, 40, },
+ { 1, 1, 0, 1, 48, 40, },
+ { 0, 1, 0, 1, 52, 46, },
+ { 2, 1, 0, 1, 52, 40, },
+ { 1, 1, 0, 1, 52, 40, },
+ { 0, 1, 0, 1, 56, 46, },
+ { 2, 1, 0, 1, 56, 40, },
+ { 1, 1, 0, 1, 56, 40, },
+ { 0, 1, 0, 1, 60, 46, },
+ { 2, 1, 0, 1, 60, 40, },
+ { 1, 1, 0, 1, 60, 40, },
+ { 0, 1, 0, 1, 64, 46, },
+ { 2, 1, 0, 1, 64, 40, },
+ { 1, 1, 0, 1, 64, 40, },
+ { 0, 1, 0, 1, 100, 46, },
+ { 2, 1, 0, 1, 100, 40, },
+ { 1, 1, 0, 1, 100, 40, },
+ { 0, 1, 0, 1, 104, 46, },
+ { 2, 1, 0, 1, 104, 40, },
+ { 1, 1, 0, 1, 104, 40, },
+ { 0, 1, 0, 1, 108, 46, },
+ { 2, 1, 0, 1, 108, 40, },
+ { 1, 1, 0, 1, 108, 40, },
+ { 0, 1, 0, 1, 112, 46, },
+ { 2, 1, 0, 1, 112, 40, },
+ { 1, 1, 0, 1, 112, 40, },
+ { 0, 1, 0, 1, 116, 46, },
+ { 2, 1, 0, 1, 116, 40, },
+ { 1, 1, 0, 1, 116, 40, },
+ { 0, 1, 0, 1, 120, 46, },
+ { 2, 1, 0, 1, 120, 40, },
+ { 1, 1, 0, 1, 120, 40, },
+ { 0, 1, 0, 1, 124, 46, },
+ { 2, 1, 0, 1, 124, 40, },
+ { 1, 1, 0, 1, 124, 40, },
+ { 0, 1, 0, 1, 128, 46, },
+ { 2, 1, 0, 1, 128, 40, },
+ { 1, 1, 0, 1, 128, 40, },
+ { 0, 1, 0, 1, 132, 46, },
+ { 2, 1, 0, 1, 132, 40, },
+ { 1, 1, 0, 1, 132, 40, },
+ { 0, 1, 0, 1, 136, 46, },
+ { 2, 1, 0, 1, 136, 40, },
+ { 1, 1, 0, 1, 136, 40, },
+ { 0, 1, 0, 1, 140, 46, },
+ { 2, 1, 0, 1, 140, 40, },
+ { 1, 1, 0, 1, 140, 40, },
+ { 0, 1, 0, 1, 149, 46, },
+ { 2, 1, 0, 1, 149, 40, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 46, },
+ { 2, 1, 0, 1, 153, 40, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 46, },
+ { 2, 1, 0, 1, 157, 40, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 46, },
+ { 2, 1, 0, 1, 161, 40, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 46, },
+ { 2, 1, 0, 1, 165, 40, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 46, },
+ { 2, 1, 0, 2, 36, 40, },
+ { 1, 1, 0, 2, 36, 40, },
+ { 0, 1, 0, 2, 40, 46, },
+ { 2, 1, 0, 2, 40, 40, },
+ { 1, 1, 0, 2, 40, 40, },
+ { 0, 1, 0, 2, 44, 46, },
+ { 2, 1, 0, 2, 44, 40, },
+ { 1, 1, 0, 2, 44, 40, },
+ { 0, 1, 0, 2, 48, 46, },
+ { 2, 1, 0, 2, 48, 40, },
+ { 1, 1, 0, 2, 48, 40, },
+ { 0, 1, 0, 2, 52, 46, },
+ { 2, 1, 0, 2, 52, 40, },
+ { 1, 1, 0, 2, 52, 40, },
+ { 0, 1, 0, 2, 56, 46, },
+ { 2, 1, 0, 2, 56, 40, },
+ { 1, 1, 0, 2, 56, 40, },
+ { 0, 1, 0, 2, 60, 46, },
+ { 2, 1, 0, 2, 60, 40, },
+ { 1, 1, 0, 2, 60, 40, },
+ { 0, 1, 0, 2, 64, 46, },
+ { 2, 1, 0, 2, 64, 40, },
+ { 1, 1, 0, 2, 64, 40, },
+ { 0, 1, 0, 2, 100, 46, },
+ { 2, 1, 0, 2, 100, 40, },
+ { 1, 1, 0, 2, 100, 40, },
+ { 0, 1, 0, 2, 104, 46, },
+ { 2, 1, 0, 2, 104, 40, },
+ { 1, 1, 0, 2, 104, 40, },
+ { 0, 1, 0, 2, 108, 46, },
+ { 2, 1, 0, 2, 108, 40, },
+ { 1, 1, 0, 2, 108, 40, },
+ { 0, 1, 0, 2, 112, 46, },
+ { 2, 1, 0, 2, 112, 40, },
+ { 1, 1, 0, 2, 112, 40, },
+ { 0, 1, 0, 2, 116, 46, },
+ { 2, 1, 0, 2, 116, 40, },
+ { 1, 1, 0, 2, 116, 40, },
+ { 0, 1, 0, 2, 120, 46, },
+ { 2, 1, 0, 2, 120, 40, },
+ { 1, 1, 0, 2, 120, 40, },
+ { 0, 1, 0, 2, 124, 46, },
+ { 2, 1, 0, 2, 124, 40, },
+ { 1, 1, 0, 2, 124, 40, },
+ { 0, 1, 0, 2, 128, 46, },
+ { 2, 1, 0, 2, 128, 40, },
+ { 1, 1, 0, 2, 128, 40, },
+ { 0, 1, 0, 2, 132, 46, },
+ { 2, 1, 0, 2, 132, 40, },
+ { 1, 1, 0, 2, 132, 40, },
+ { 0, 1, 0, 2, 136, 46, },
+ { 2, 1, 0, 2, 136, 40, },
+ { 1, 1, 0, 2, 136, 40, },
+ { 0, 1, 0, 2, 140, 46, },
+ { 2, 1, 0, 2, 140, 40, },
+ { 1, 1, 0, 2, 140, 40, },
+ { 0, 1, 0, 2, 149, 46, },
+ { 2, 1, 0, 2, 149, 40, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 46, },
+ { 2, 1, 0, 2, 153, 40, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 46, },
+ { 2, 1, 0, 2, 157, 40, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 46, },
+ { 2, 1, 0, 2, 161, 40, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 46, },
+ { 2, 1, 0, 2, 165, 40, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 46, },
+ { 2, 1, 0, 3, 36, 40, },
+ { 1, 1, 0, 3, 36, 40, },
+ { 0, 1, 0, 3, 40, 46, },
+ { 2, 1, 0, 3, 40, 40, },
+ { 1, 1, 0, 3, 40, 40, },
+ { 0, 1, 0, 3, 44, 46, },
+ { 2, 1, 0, 3, 44, 40, },
+ { 1, 1, 0, 3, 44, 40, },
+ { 0, 1, 0, 3, 48, 46, },
+ { 2, 1, 0, 3, 48, 40, },
+ { 1, 1, 0, 3, 48, 40, },
+ { 0, 1, 0, 3, 52, 46, },
+ { 2, 1, 0, 3, 52, 40, },
+ { 1, 1, 0, 3, 52, 40, },
+ { 0, 1, 0, 3, 56, 46, },
+ { 2, 1, 0, 3, 56, 40, },
+ { 1, 1, 0, 3, 56, 40, },
+ { 0, 1, 0, 3, 60, 46, },
+ { 2, 1, 0, 3, 60, 40, },
+ { 1, 1, 0, 3, 60, 40, },
+ { 0, 1, 0, 3, 64, 46, },
+ { 2, 1, 0, 3, 64, 40, },
+ { 1, 1, 0, 3, 64, 40, },
+ { 0, 1, 0, 3, 100, 46, },
+ { 2, 1, 0, 3, 100, 40, },
+ { 1, 1, 0, 3, 100, 40, },
+ { 0, 1, 0, 3, 104, 46, },
+ { 2, 1, 0, 3, 104, 40, },
+ { 1, 1, 0, 3, 104, 40, },
+ { 0, 1, 0, 3, 108, 46, },
+ { 2, 1, 0, 3, 108, 40, },
+ { 1, 1, 0, 3, 108, 40, },
+ { 0, 1, 0, 3, 112, 46, },
+ { 2, 1, 0, 3, 112, 40, },
+ { 1, 1, 0, 3, 112, 40, },
+ { 0, 1, 0, 3, 116, 46, },
+ { 2, 1, 0, 3, 116, 40, },
+ { 1, 1, 0, 3, 116, 40, },
+ { 0, 1, 0, 3, 120, 46, },
+ { 2, 1, 0, 3, 120, 40, },
+ { 1, 1, 0, 3, 120, 40, },
+ { 0, 1, 0, 3, 124, 46, },
+ { 2, 1, 0, 3, 124, 40, },
+ { 1, 1, 0, 3, 124, 40, },
+ { 0, 1, 0, 3, 128, 46, },
+ { 2, 1, 0, 3, 128, 40, },
+ { 1, 1, 0, 3, 128, 40, },
+ { 0, 1, 0, 3, 132, 46, },
+ { 2, 1, 0, 3, 132, 40, },
+ { 1, 1, 0, 3, 132, 40, },
+ { 0, 1, 0, 3, 136, 46, },
+ { 2, 1, 0, 3, 136, 40, },
+ { 1, 1, 0, 3, 136, 40, },
+ { 0, 1, 0, 3, 140, 46, },
+ { 2, 1, 0, 3, 140, 40, },
+ { 1, 1, 0, 3, 140, 40, },
+ { 0, 1, 0, 3, 149, 46, },
+ { 2, 1, 0, 3, 149, 40, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 46, },
+ { 2, 1, 0, 3, 153, 40, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 46, },
+ { 2, 1, 0, 3, 157, 40, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 46, },
+ { 2, 1, 0, 3, 161, 40, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 46, },
+ { 2, 1, 0, 3, 165, 40, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 46, },
+ { 2, 1, 0, 6, 36, 40, },
+ { 1, 1, 0, 6, 36, 40, },
+ { 0, 1, 0, 6, 40, 46, },
+ { 2, 1, 0, 6, 40, 40, },
+ { 1, 1, 0, 6, 40, 40, },
+ { 0, 1, 0, 6, 44, 46, },
+ { 2, 1, 0, 6, 44, 40, },
+ { 1, 1, 0, 6, 44, 40, },
+ { 0, 1, 0, 6, 48, 46, },
+ { 2, 1, 0, 6, 48, 40, },
+ { 1, 1, 0, 6, 48, 40, },
+ { 0, 1, 0, 6, 52, 46, },
+ { 2, 1, 0, 6, 52, 40, },
+ { 1, 1, 0, 6, 52, 40, },
+ { 0, 1, 0, 6, 56, 46, },
+ { 2, 1, 0, 6, 56, 40, },
+ { 1, 1, 0, 6, 56, 40, },
+ { 0, 1, 0, 6, 60, 46, },
+ { 2, 1, 0, 6, 60, 40, },
+ { 1, 1, 0, 6, 60, 40, },
+ { 0, 1, 0, 6, 64, 46, },
+ { 2, 1, 0, 6, 64, 40, },
+ { 1, 1, 0, 6, 64, 40, },
+ { 0, 1, 0, 6, 100, 46, },
+ { 2, 1, 0, 6, 100, 40, },
+ { 1, 1, 0, 6, 100, 40, },
+ { 0, 1, 0, 6, 104, 46, },
+ { 2, 1, 0, 6, 104, 40, },
+ { 1, 1, 0, 6, 104, 40, },
+ { 0, 1, 0, 6, 108, 46, },
+ { 2, 1, 0, 6, 108, 40, },
+ { 1, 1, 0, 6, 108, 40, },
+ { 0, 1, 0, 6, 112, 46, },
+ { 2, 1, 0, 6, 112, 40, },
+ { 1, 1, 0, 6, 112, 40, },
+ { 0, 1, 0, 6, 116, 46, },
+ { 2, 1, 0, 6, 116, 40, },
+ { 1, 1, 0, 6, 116, 40, },
+ { 0, 1, 0, 6, 120, 46, },
+ { 2, 1, 0, 6, 120, 40, },
+ { 1, 1, 0, 6, 120, 40, },
+ { 0, 1, 0, 6, 124, 46, },
+ { 2, 1, 0, 6, 124, 40, },
+ { 1, 1, 0, 6, 124, 40, },
+ { 0, 1, 0, 6, 128, 46, },
+ { 2, 1, 0, 6, 128, 40, },
+ { 1, 1, 0, 6, 128, 40, },
+ { 0, 1, 0, 6, 132, 46, },
+ { 2, 1, 0, 6, 132, 40, },
+ { 1, 1, 0, 6, 132, 40, },
+ { 0, 1, 0, 6, 136, 46, },
+ { 2, 1, 0, 6, 136, 40, },
+ { 1, 1, 0, 6, 136, 40, },
+ { 0, 1, 0, 6, 140, 46, },
+ { 2, 1, 0, 6, 140, 40, },
+ { 1, 1, 0, 6, 140, 40, },
+ { 0, 1, 0, 6, 149, 46, },
+ { 2, 1, 0, 6, 149, 40, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 46, },
+ { 2, 1, 0, 6, 153, 40, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 46, },
+ { 2, 1, 0, 6, 157, 40, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 46, },
+ { 2, 1, 0, 6, 161, 40, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 46, },
+ { 2, 1, 0, 6, 165, 40, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 46, },
+ { 2, 1, 0, 7, 36, 40, },
+ { 1, 1, 0, 7, 36, 40, },
+ { 0, 1, 0, 7, 40, 46, },
+ { 2, 1, 0, 7, 40, 40, },
+ { 1, 1, 0, 7, 40, 40, },
+ { 0, 1, 0, 7, 44, 46, },
+ { 2, 1, 0, 7, 44, 40, },
+ { 1, 1, 0, 7, 44, 40, },
+ { 0, 1, 0, 7, 48, 46, },
+ { 2, 1, 0, 7, 48, 40, },
+ { 1, 1, 0, 7, 48, 40, },
+ { 0, 1, 0, 7, 52, 46, },
+ { 2, 1, 0, 7, 52, 40, },
+ { 1, 1, 0, 7, 52, 40, },
+ { 0, 1, 0, 7, 56, 46, },
+ { 2, 1, 0, 7, 56, 40, },
+ { 1, 1, 0, 7, 56, 40, },
+ { 0, 1, 0, 7, 60, 46, },
+ { 2, 1, 0, 7, 60, 40, },
+ { 1, 1, 0, 7, 60, 40, },
+ { 0, 1, 0, 7, 64, 46, },
+ { 2, 1, 0, 7, 64, 40, },
+ { 1, 1, 0, 7, 64, 40, },
+ { 0, 1, 0, 7, 100, 46, },
+ { 2, 1, 0, 7, 100, 40, },
+ { 1, 1, 0, 7, 100, 40, },
+ { 0, 1, 0, 7, 104, 46, },
+ { 2, 1, 0, 7, 104, 40, },
+ { 1, 1, 0, 7, 104, 40, },
+ { 0, 1, 0, 7, 108, 46, },
+ { 2, 1, 0, 7, 108, 40, },
+ { 1, 1, 0, 7, 108, 40, },
+ { 0, 1, 0, 7, 112, 46, },
+ { 2, 1, 0, 7, 112, 40, },
+ { 1, 1, 0, 7, 112, 40, },
+ { 0, 1, 0, 7, 116, 46, },
+ { 2, 1, 0, 7, 116, 40, },
+ { 1, 1, 0, 7, 116, 40, },
+ { 0, 1, 0, 7, 120, 46, },
+ { 2, 1, 0, 7, 120, 40, },
+ { 1, 1, 0, 7, 120, 40, },
+ { 0, 1, 0, 7, 124, 46, },
+ { 2, 1, 0, 7, 124, 40, },
+ { 1, 1, 0, 7, 124, 40, },
+ { 0, 1, 0, 7, 128, 46, },
+ { 2, 1, 0, 7, 128, 40, },
+ { 1, 1, 0, 7, 128, 40, },
+ { 0, 1, 0, 7, 132, 46, },
+ { 2, 1, 0, 7, 132, 40, },
+ { 1, 1, 0, 7, 132, 40, },
+ { 0, 1, 0, 7, 136, 46, },
+ { 2, 1, 0, 7, 136, 40, },
+ { 1, 1, 0, 7, 136, 40, },
+ { 0, 1, 0, 7, 140, 46, },
+ { 2, 1, 0, 7, 140, 40, },
+ { 1, 1, 0, 7, 140, 40, },
+ { 0, 1, 0, 7, 149, 46, },
+ { 2, 1, 0, 7, 149, 40, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 46, },
+ { 2, 1, 0, 7, 153, 40, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 46, },
+ { 2, 1, 0, 7, 157, 40, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 46, },
+ { 2, 1, 0, 7, 161, 40, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 46, },
+ { 2, 1, 0, 7, 165, 40, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 46, },
+ { 2, 1, 1, 2, 38, 40, },
+ { 1, 1, 1, 2, 38, 40, },
+ { 0, 1, 1, 2, 46, 46, },
+ { 2, 1, 1, 2, 46, 40, },
+ { 1, 1, 1, 2, 46, 40, },
+ { 0, 1, 1, 2, 54, 46, },
+ { 2, 1, 1, 2, 54, 40, },
+ { 1, 1, 1, 2, 54, 40, },
+ { 0, 1, 1, 2, 62, 46, },
+ { 2, 1, 1, 2, 62, 40, },
+ { 1, 1, 1, 2, 62, 40, },
+ { 0, 1, 1, 2, 102, 46, },
+ { 2, 1, 1, 2, 102, 40, },
+ { 1, 1, 1, 2, 102, 40, },
+ { 0, 1, 1, 2, 110, 46, },
+ { 2, 1, 1, 2, 110, 40, },
+ { 1, 1, 1, 2, 110, 40, },
+ { 0, 1, 1, 2, 118, 46, },
+ { 2, 1, 1, 2, 118, 40, },
+ { 1, 1, 1, 2, 118, 40, },
+ { 0, 1, 1, 2, 126, 46, },
+ { 2, 1, 1, 2, 126, 40, },
+ { 1, 1, 1, 2, 126, 40, },
+ { 0, 1, 1, 2, 134, 46, },
+ { 2, 1, 1, 2, 134, 40, },
+ { 1, 1, 1, 2, 134, 40, },
+ { 0, 1, 1, 2, 151, 46, },
+ { 2, 1, 1, 2, 151, 40, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 46, },
+ { 2, 1, 1, 2, 159, 40, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 46, },
+ { 2, 1, 1, 3, 38, 40, },
+ { 1, 1, 1, 3, 38, 40, },
+ { 0, 1, 1, 3, 46, 46, },
+ { 2, 1, 1, 3, 46, 40, },
+ { 1, 1, 1, 3, 46, 40, },
+ { 0, 1, 1, 3, 54, 46, },
+ { 2, 1, 1, 3, 54, 40, },
+ { 1, 1, 1, 3, 54, 40, },
+ { 0, 1, 1, 3, 62, 46, },
+ { 2, 1, 1, 3, 62, 40, },
+ { 1, 1, 1, 3, 62, 40, },
+ { 0, 1, 1, 3, 102, 46, },
+ { 2, 1, 1, 3, 102, 40, },
+ { 1, 1, 1, 3, 102, 40, },
+ { 0, 1, 1, 3, 110, 46, },
+ { 2, 1, 1, 3, 110, 40, },
+ { 1, 1, 1, 3, 110, 40, },
+ { 0, 1, 1, 3, 118, 46, },
+ { 2, 1, 1, 3, 118, 40, },
+ { 1, 1, 1, 3, 118, 40, },
+ { 0, 1, 1, 3, 126, 46, },
+ { 2, 1, 1, 3, 126, 40, },
+ { 1, 1, 1, 3, 126, 40, },
+ { 0, 1, 1, 3, 134, 46, },
+ { 2, 1, 1, 3, 134, 40, },
+ { 1, 1, 1, 3, 134, 40, },
+ { 0, 1, 1, 3, 151, 46, },
+ { 2, 1, 1, 3, 151, 40, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 46, },
+ { 2, 1, 1, 3, 159, 40, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 46, },
+ { 2, 1, 1, 6, 38, 40, },
+ { 1, 1, 1, 6, 38, 40, },
+ { 0, 1, 1, 6, 46, 46, },
+ { 2, 1, 1, 6, 46, 40, },
+ { 1, 1, 1, 6, 46, 40, },
+ { 0, 1, 1, 6, 54, 46, },
+ { 2, 1, 1, 6, 54, 40, },
+ { 1, 1, 1, 6, 54, 40, },
+ { 0, 1, 1, 6, 62, 46, },
+ { 2, 1, 1, 6, 62, 40, },
+ { 1, 1, 1, 6, 62, 40, },
+ { 0, 1, 1, 6, 102, 46, },
+ { 2, 1, 1, 6, 102, 40, },
+ { 1, 1, 1, 6, 102, 40, },
+ { 0, 1, 1, 6, 110, 46, },
+ { 2, 1, 1, 6, 110, 40, },
+ { 1, 1, 1, 6, 110, 40, },
+ { 0, 1, 1, 6, 118, 46, },
+ { 2, 1, 1, 6, 118, 40, },
+ { 1, 1, 1, 6, 118, 40, },
+ { 0, 1, 1, 6, 126, 46, },
+ { 2, 1, 1, 6, 126, 40, },
+ { 1, 1, 1, 6, 126, 40, },
+ { 0, 1, 1, 6, 134, 46, },
+ { 2, 1, 1, 6, 134, 40, },
+ { 1, 1, 1, 6, 134, 40, },
+ { 0, 1, 1, 6, 151, 46, },
+ { 2, 1, 1, 6, 151, 40, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 46, },
+ { 2, 1, 1, 6, 159, 40, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 46, },
+ { 2, 1, 1, 7, 38, 40, },
+ { 1, 1, 1, 7, 38, 40, },
+ { 0, 1, 1, 7, 46, 46, },
+ { 2, 1, 1, 7, 46, 40, },
+ { 1, 1, 1, 7, 46, 40, },
+ { 0, 1, 1, 7, 54, 46, },
+ { 2, 1, 1, 7, 54, 40, },
+ { 1, 1, 1, 7, 54, 40, },
+ { 0, 1, 1, 7, 62, 46, },
+ { 2, 1, 1, 7, 62, 40, },
+ { 1, 1, 1, 7, 62, 40, },
+ { 0, 1, 1, 7, 102, 46, },
+ { 2, 1, 1, 7, 102, 40, },
+ { 1, 1, 1, 7, 102, 40, },
+ { 0, 1, 1, 7, 110, 46, },
+ { 2, 1, 1, 7, 110, 40, },
+ { 1, 1, 1, 7, 110, 40, },
+ { 0, 1, 1, 7, 118, 46, },
+ { 2, 1, 1, 7, 118, 40, },
+ { 1, 1, 1, 7, 118, 40, },
+ { 0, 1, 1, 7, 126, 46, },
+ { 2, 1, 1, 7, 126, 40, },
+ { 1, 1, 1, 7, 126, 40, },
+ { 0, 1, 1, 7, 134, 46, },
+ { 2, 1, 1, 7, 134, 40, },
+ { 1, 1, 1, 7, 134, 40, },
+ { 0, 1, 1, 7, 151, 46, },
+ { 2, 1, 1, 7, 151, 40, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 46, },
+ { 2, 1, 1, 7, 159, 40, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 46, },
+ { 2, 1, 2, 4, 42, 40, },
+ { 1, 1, 2, 4, 42, 40, },
+ { 0, 1, 2, 4, 58, 46, },
+ { 2, 1, 2, 4, 58, 40, },
+ { 1, 1, 2, 4, 58, 40, },
+ { 0, 1, 2, 4, 106, 46, },
+ { 2, 1, 2, 4, 106, 40, },
+ { 1, 1, 2, 4, 106, 40, },
+ { 0, 1, 2, 4, 122, 46, },
+ { 2, 1, 2, 4, 122, 40, },
+ { 1, 1, 2, 4, 122, 40, },
+ { 0, 1, 2, 4, 155, 46, },
+ { 2, 1, 2, 4, 155, 40, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 46, },
+ { 2, 1, 2, 5, 42, 40, },
+ { 1, 1, 2, 5, 42, 40, },
+ { 0, 1, 2, 5, 58, 46, },
+ { 2, 1, 2, 5, 58, 40, },
+ { 1, 1, 2, 5, 58, 40, },
+ { 0, 1, 2, 5, 106, 46, },
+ { 2, 1, 2, 5, 106, 40, },
+ { 1, 1, 2, 5, 106, 40, },
+ { 0, 1, 2, 5, 122, 46, },
+ { 2, 1, 2, 5, 122, 40, },
+ { 1, 1, 2, 5, 122, 40, },
+ { 0, 1, 2, 5, 155, 46, },
+ { 2, 1, 2, 5, 155, 40, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 46, },
+ { 2, 1, 2, 8, 42, 40, },
+ { 1, 1, 2, 8, 42, 40, },
+ { 0, 1, 2, 8, 58, 46, },
+ { 2, 1, 2, 8, 58, 40, },
+ { 1, 1, 2, 8, 58, 40, },
+ { 0, 1, 2, 8, 106, 46, },
+ { 2, 1, 2, 8, 106, 40, },
+ { 1, 1, 2, 8, 106, 40, },
+ { 0, 1, 2, 8, 122, 46, },
+ { 2, 1, 2, 8, 122, 40, },
+ { 1, 1, 2, 8, 122, 40, },
+ { 0, 1, 2, 8, 155, 46, },
+ { 2, 1, 2, 8, 155, 40, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 46, },
+ { 2, 1, 2, 9, 42, 40, },
+ { 1, 1, 2, 9, 42, 40, },
+ { 0, 1, 2, 9, 58, 46, },
+ { 2, 1, 2, 9, 58, 40, },
+ { 1, 1, 2, 9, 58, 40, },
+ { 0, 1, 2, 9, 106, 46, },
+ { 2, 1, 2, 9, 106, 40, },
+ { 1, 1, 2, 9, 106, 40, },
+ { 0, 1, 2, 9, 122, 46, },
+ { 2, 1, 2, 9, 122, 40, },
+ { 1, 1, 2, 9, 122, 40, },
+ { 0, 1, 2, 9, 155, 46, },
+ { 2, 1, 2, 9, 155, 40, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type3);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type5[] = {
+ { 0, 0, 0, 0, 1, 46, },
+ { 2, 0, 0, 0, 1, 40, },
+ { 1, 0, 0, 0, 1, 40, },
+ { 0, 0, 0, 0, 2, 46, },
+ { 2, 0, 0, 0, 2, 40, },
+ { 1, 0, 0, 0, 2, 40, },
+ { 0, 0, 0, 0, 3, 46, },
+ { 2, 0, 0, 0, 3, 40, },
+ { 1, 0, 0, 0, 3, 40, },
+ { 0, 0, 0, 0, 4, 46, },
+ { 2, 0, 0, 0, 4, 40, },
+ { 1, 0, 0, 0, 4, 40, },
+ { 0, 0, 0, 0, 5, 46, },
+ { 2, 0, 0, 0, 5, 40, },
+ { 1, 0, 0, 0, 5, 40, },
+ { 0, 0, 0, 0, 6, 46, },
+ { 2, 0, 0, 0, 6, 40, },
+ { 1, 0, 0, 0, 6, 40, },
+ { 0, 0, 0, 0, 7, 46, },
+ { 2, 0, 0, 0, 7, 40, },
+ { 1, 0, 0, 0, 7, 40, },
+ { 0, 0, 0, 0, 8, 46, },
+ { 2, 0, 0, 0, 8, 40, },
+ { 1, 0, 0, 0, 8, 40, },
+ { 0, 0, 0, 0, 9, 46, },
+ { 2, 0, 0, 0, 9, 40, },
+ { 1, 0, 0, 0, 9, 40, },
+ { 0, 0, 0, 0, 10, 46, },
+ { 2, 0, 0, 0, 10, 40, },
+ { 1, 0, 0, 0, 10, 40, },
+ { 0, 0, 0, 0, 11, 46, },
+ { 2, 0, 0, 0, 11, 40, },
+ { 1, 0, 0, 0, 11, 40, },
+ { 0, 0, 0, 0, 12, 63, },
+ { 2, 0, 0, 0, 12, 40, },
+ { 1, 0, 0, 0, 12, 40, },
+ { 0, 0, 0, 0, 13, 63, },
+ { 2, 0, 0, 0, 13, 40, },
+ { 1, 0, 0, 0, 13, 40, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 40, },
+ { 0, 0, 0, 1, 1, 46, },
+ { 2, 0, 0, 1, 1, 40, },
+ { 1, 0, 0, 1, 1, 40, },
+ { 0, 0, 0, 1, 2, 46, },
+ { 2, 0, 0, 1, 2, 40, },
+ { 1, 0, 0, 1, 2, 40, },
+ { 0, 0, 0, 1, 3, 46, },
+ { 2, 0, 0, 1, 3, 40, },
+ { 1, 0, 0, 1, 3, 40, },
+ { 0, 0, 0, 1, 4, 46, },
+ { 2, 0, 0, 1, 4, 40, },
+ { 1, 0, 0, 1, 4, 40, },
+ { 0, 0, 0, 1, 5, 46, },
+ { 2, 0, 0, 1, 5, 40, },
+ { 1, 0, 0, 1, 5, 40, },
+ { 0, 0, 0, 1, 6, 46, },
+ { 2, 0, 0, 1, 6, 40, },
+ { 1, 0, 0, 1, 6, 40, },
+ { 0, 0, 0, 1, 7, 46, },
+ { 2, 0, 0, 1, 7, 40, },
+ { 1, 0, 0, 1, 7, 40, },
+ { 0, 0, 0, 1, 8, 46, },
+ { 2, 0, 0, 1, 8, 40, },
+ { 1, 0, 0, 1, 8, 40, },
+ { 0, 0, 0, 1, 9, 46, },
+ { 2, 0, 0, 1, 9, 40, },
+ { 1, 0, 0, 1, 9, 40, },
+ { 0, 0, 0, 1, 10, 46, },
+ { 2, 0, 0, 1, 10, 40, },
+ { 1, 0, 0, 1, 10, 40, },
+ { 0, 0, 0, 1, 11, 46, },
+ { 2, 0, 0, 1, 11, 40, },
+ { 1, 0, 0, 1, 11, 40, },
+ { 0, 0, 0, 1, 12, 63, },
+ { 2, 0, 0, 1, 12, 40, },
+ { 1, 0, 0, 1, 12, 40, },
+ { 0, 0, 0, 1, 13, 63, },
+ { 2, 0, 0, 1, 13, 40, },
+ { 1, 0, 0, 1, 13, 40, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 46, },
+ { 2, 0, 0, 2, 1, 40, },
+ { 1, 0, 0, 2, 1, 40, },
+ { 0, 0, 0, 2, 2, 46, },
+ { 2, 0, 0, 2, 2, 40, },
+ { 1, 0, 0, 2, 2, 40, },
+ { 0, 0, 0, 2, 3, 46, },
+ { 2, 0, 0, 2, 3, 40, },
+ { 1, 0, 0, 2, 3, 40, },
+ { 0, 0, 0, 2, 4, 46, },
+ { 2, 0, 0, 2, 4, 40, },
+ { 1, 0, 0, 2, 4, 40, },
+ { 0, 0, 0, 2, 5, 46, },
+ { 2, 0, 0, 2, 5, 40, },
+ { 1, 0, 0, 2, 5, 40, },
+ { 0, 0, 0, 2, 6, 46, },
+ { 2, 0, 0, 2, 6, 40, },
+ { 1, 0, 0, 2, 6, 40, },
+ { 0, 0, 0, 2, 7, 46, },
+ { 2, 0, 0, 2, 7, 40, },
+ { 1, 0, 0, 2, 7, 40, },
+ { 0, 0, 0, 2, 8, 46, },
+ { 2, 0, 0, 2, 8, 40, },
+ { 1, 0, 0, 2, 8, 40, },
+ { 0, 0, 0, 2, 9, 46, },
+ { 2, 0, 0, 2, 9, 40, },
+ { 1, 0, 0, 2, 9, 40, },
+ { 0, 0, 0, 2, 10, 46, },
+ { 2, 0, 0, 2, 10, 40, },
+ { 1, 0, 0, 2, 10, 40, },
+ { 0, 0, 0, 2, 11, 46, },
+ { 2, 0, 0, 2, 11, 40, },
+ { 1, 0, 0, 2, 11, 40, },
+ { 0, 0, 0, 2, 12, 63, },
+ { 2, 0, 0, 2, 12, 40, },
+ { 1, 0, 0, 2, 12, 40, },
+ { 0, 0, 0, 2, 13, 63, },
+ { 2, 0, 0, 2, 13, 40, },
+ { 1, 0, 0, 2, 13, 40, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 46, },
+ { 2, 0, 0, 3, 1, 40, },
+ { 1, 0, 0, 3, 1, 40, },
+ { 0, 0, 0, 3, 2, 46, },
+ { 2, 0, 0, 3, 2, 40, },
+ { 1, 0, 0, 3, 2, 40, },
+ { 0, 0, 0, 3, 3, 46, },
+ { 2, 0, 0, 3, 3, 40, },
+ { 1, 0, 0, 3, 3, 40, },
+ { 0, 0, 0, 3, 4, 46, },
+ { 2, 0, 0, 3, 4, 40, },
+ { 1, 0, 0, 3, 4, 40, },
+ { 0, 0, 0, 3, 5, 46, },
+ { 2, 0, 0, 3, 5, 40, },
+ { 1, 0, 0, 3, 5, 40, },
+ { 0, 0, 0, 3, 6, 46, },
+ { 2, 0, 0, 3, 6, 40, },
+ { 1, 0, 0, 3, 6, 40, },
+ { 0, 0, 0, 3, 7, 46, },
+ { 2, 0, 0, 3, 7, 40, },
+ { 1, 0, 0, 3, 7, 40, },
+ { 0, 0, 0, 3, 8, 46, },
+ { 2, 0, 0, 3, 8, 40, },
+ { 1, 0, 0, 3, 8, 40, },
+ { 0, 0, 0, 3, 9, 46, },
+ { 2, 0, 0, 3, 9, 40, },
+ { 1, 0, 0, 3, 9, 40, },
+ { 0, 0, 0, 3, 10, 46, },
+ { 2, 0, 0, 3, 10, 40, },
+ { 1, 0, 0, 3, 10, 40, },
+ { 0, 0, 0, 3, 11, 46, },
+ { 2, 0, 0, 3, 11, 40, },
+ { 1, 0, 0, 3, 11, 40, },
+ { 0, 0, 0, 3, 12, 63, },
+ { 2, 0, 0, 3, 12, 40, },
+ { 1, 0, 0, 3, 12, 40, },
+ { 0, 0, 0, 3, 13, 63, },
+ { 2, 0, 0, 3, 13, 40, },
+ { 1, 0, 0, 3, 13, 40, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 46, },
+ { 2, 0, 0, 6, 1, 40, },
+ { 1, 0, 0, 6, 1, 40, },
+ { 0, 0, 0, 6, 2, 46, },
+ { 2, 0, 0, 6, 2, 40, },
+ { 1, 0, 0, 6, 2, 40, },
+ { 0, 0, 0, 6, 3, 46, },
+ { 2, 0, 0, 6, 3, 40, },
+ { 1, 0, 0, 6, 3, 40, },
+ { 0, 0, 0, 6, 4, 46, },
+ { 2, 0, 0, 6, 4, 40, },
+ { 1, 0, 0, 6, 4, 40, },
+ { 0, 0, 0, 6, 5, 46, },
+ { 2, 0, 0, 6, 5, 40, },
+ { 1, 0, 0, 6, 5, 40, },
+ { 0, 0, 0, 6, 6, 46, },
+ { 2, 0, 0, 6, 6, 40, },
+ { 1, 0, 0, 6, 6, 40, },
+ { 0, 0, 0, 6, 7, 46, },
+ { 2, 0, 0, 6, 7, 40, },
+ { 1, 0, 0, 6, 7, 40, },
+ { 0, 0, 0, 6, 8, 46, },
+ { 2, 0, 0, 6, 8, 40, },
+ { 1, 0, 0, 6, 8, 40, },
+ { 0, 0, 0, 6, 9, 46, },
+ { 2, 0, 0, 6, 9, 40, },
+ { 1, 0, 0, 6, 9, 40, },
+ { 0, 0, 0, 6, 10, 46, },
+ { 2, 0, 0, 6, 10, 40, },
+ { 1, 0, 0, 6, 10, 40, },
+ { 0, 0, 0, 6, 11, 46, },
+ { 2, 0, 0, 6, 11, 40, },
+ { 1, 0, 0, 6, 11, 40, },
+ { 0, 0, 0, 6, 12, 63, },
+ { 2, 0, 0, 6, 12, 40, },
+ { 1, 0, 0, 6, 12, 40, },
+ { 0, 0, 0, 6, 13, 63, },
+ { 2, 0, 0, 6, 13, 40, },
+ { 1, 0, 0, 6, 13, 40, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 46, },
+ { 2, 0, 0, 7, 1, 40, },
+ { 1, 0, 0, 7, 1, 40, },
+ { 0, 0, 0, 7, 2, 46, },
+ { 2, 0, 0, 7, 2, 40, },
+ { 1, 0, 0, 7, 2, 40, },
+ { 0, 0, 0, 7, 3, 46, },
+ { 2, 0, 0, 7, 3, 40, },
+ { 1, 0, 0, 7, 3, 40, },
+ { 0, 0, 0, 7, 4, 46, },
+ { 2, 0, 0, 7, 4, 40, },
+ { 1, 0, 0, 7, 4, 40, },
+ { 0, 0, 0, 7, 5, 46, },
+ { 2, 0, 0, 7, 5, 40, },
+ { 1, 0, 0, 7, 5, 40, },
+ { 0, 0, 0, 7, 6, 46, },
+ { 2, 0, 0, 7, 6, 40, },
+ { 1, 0, 0, 7, 6, 40, },
+ { 0, 0, 0, 7, 7, 46, },
+ { 2, 0, 0, 7, 7, 40, },
+ { 1, 0, 0, 7, 7, 40, },
+ { 0, 0, 0, 7, 8, 46, },
+ { 2, 0, 0, 7, 8, 40, },
+ { 1, 0, 0, 7, 8, 40, },
+ { 0, 0, 0, 7, 9, 46, },
+ { 2, 0, 0, 7, 9, 40, },
+ { 1, 0, 0, 7, 9, 40, },
+ { 0, 0, 0, 7, 10, 46, },
+ { 2, 0, 0, 7, 10, 40, },
+ { 1, 0, 0, 7, 10, 40, },
+ { 0, 0, 0, 7, 11, 46, },
+ { 2, 0, 0, 7, 11, 40, },
+ { 1, 0, 0, 7, 11, 40, },
+ { 0, 0, 0, 7, 12, 63, },
+ { 2, 0, 0, 7, 12, 40, },
+ { 1, 0, 0, 7, 12, 40, },
+ { 0, 0, 0, 7, 13, 63, },
+ { 2, 0, 0, 7, 13, 40, },
+ { 1, 0, 0, 7, 13, 40, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 46, },
+ { 2, 0, 1, 2, 3, 40, },
+ { 1, 0, 1, 2, 3, 40, },
+ { 0, 0, 1, 2, 4, 46, },
+ { 2, 0, 1, 2, 4, 40, },
+ { 1, 0, 1, 2, 4, 40, },
+ { 0, 0, 1, 2, 5, 46, },
+ { 2, 0, 1, 2, 5, 40, },
+ { 1, 0, 1, 2, 5, 40, },
+ { 0, 0, 1, 2, 6, 46, },
+ { 2, 0, 1, 2, 6, 40, },
+ { 1, 0, 1, 2, 6, 40, },
+ { 0, 0, 1, 2, 7, 46, },
+ { 2, 0, 1, 2, 7, 40, },
+ { 1, 0, 1, 2, 7, 40, },
+ { 0, 0, 1, 2, 8, 46, },
+ { 2, 0, 1, 2, 8, 40, },
+ { 1, 0, 1, 2, 8, 40, },
+ { 0, 0, 1, 2, 9, 46, },
+ { 2, 0, 1, 2, 9, 40, },
+ { 1, 0, 1, 2, 9, 40, },
+ { 0, 0, 1, 2, 10, 46, },
+ { 2, 0, 1, 2, 10, 40, },
+ { 1, 0, 1, 2, 10, 40, },
+ { 0, 0, 1, 2, 11, 46, },
+ { 2, 0, 1, 2, 11, 40, },
+ { 1, 0, 1, 2, 11, 40, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 40, },
+ { 1, 0, 1, 2, 12, 40, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 40, },
+ { 1, 0, 1, 2, 13, 40, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 46, },
+ { 2, 0, 1, 3, 3, 40, },
+ { 1, 0, 1, 3, 3, 40, },
+ { 0, 0, 1, 3, 4, 46, },
+ { 2, 0, 1, 3, 4, 40, },
+ { 1, 0, 1, 3, 4, 40, },
+ { 0, 0, 1, 3, 5, 46, },
+ { 2, 0, 1, 3, 5, 40, },
+ { 1, 0, 1, 3, 5, 40, },
+ { 0, 0, 1, 3, 6, 46, },
+ { 2, 0, 1, 3, 6, 40, },
+ { 1, 0, 1, 3, 6, 40, },
+ { 0, 0, 1, 3, 7, 46, },
+ { 2, 0, 1, 3, 7, 40, },
+ { 1, 0, 1, 3, 7, 40, },
+ { 0, 0, 1, 3, 8, 46, },
+ { 2, 0, 1, 3, 8, 40, },
+ { 1, 0, 1, 3, 8, 40, },
+ { 0, 0, 1, 3, 9, 46, },
+ { 2, 0, 1, 3, 9, 40, },
+ { 1, 0, 1, 3, 9, 40, },
+ { 0, 0, 1, 3, 10, 46, },
+ { 2, 0, 1, 3, 10, 40, },
+ { 1, 0, 1, 3, 10, 40, },
+ { 0, 0, 1, 3, 11, 46, },
+ { 2, 0, 1, 3, 11, 40, },
+ { 1, 0, 1, 3, 11, 40, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 40, },
+ { 1, 0, 1, 3, 12, 40, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 40, },
+ { 1, 0, 1, 3, 13, 40, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 46, },
+ { 2, 0, 1, 6, 3, 40, },
+ { 1, 0, 1, 6, 3, 40, },
+ { 0, 0, 1, 6, 4, 46, },
+ { 2, 0, 1, 6, 4, 40, },
+ { 1, 0, 1, 6, 4, 40, },
+ { 0, 0, 1, 6, 5, 46, },
+ { 2, 0, 1, 6, 5, 40, },
+ { 1, 0, 1, 6, 5, 40, },
+ { 0, 0, 1, 6, 6, 46, },
+ { 2, 0, 1, 6, 6, 40, },
+ { 1, 0, 1, 6, 6, 40, },
+ { 0, 0, 1, 6, 7, 46, },
+ { 2, 0, 1, 6, 7, 40, },
+ { 1, 0, 1, 6, 7, 40, },
+ { 0, 0, 1, 6, 8, 46, },
+ { 2, 0, 1, 6, 8, 40, },
+ { 1, 0, 1, 6, 8, 40, },
+ { 0, 0, 1, 6, 9, 46, },
+ { 2, 0, 1, 6, 9, 40, },
+ { 1, 0, 1, 6, 9, 40, },
+ { 0, 0, 1, 6, 10, 46, },
+ { 2, 0, 1, 6, 10, 40, },
+ { 1, 0, 1, 6, 10, 40, },
+ { 0, 0, 1, 6, 11, 46, },
+ { 2, 0, 1, 6, 11, 40, },
+ { 1, 0, 1, 6, 11, 40, },
+ { 0, 0, 1, 6, 12, 63, },
+ { 2, 0, 1, 6, 12, 40, },
+ { 1, 0, 1, 6, 12, 40, },
+ { 0, 0, 1, 6, 13, 63, },
+ { 2, 0, 1, 6, 13, 40, },
+ { 1, 0, 1, 6, 13, 40, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 46, },
+ { 2, 0, 1, 7, 3, 40, },
+ { 1, 0, 1, 7, 3, 40, },
+ { 0, 0, 1, 7, 4, 46, },
+ { 2, 0, 1, 7, 4, 40, },
+ { 1, 0, 1, 7, 4, 40, },
+ { 0, 0, 1, 7, 5, 46, },
+ { 2, 0, 1, 7, 5, 40, },
+ { 1, 0, 1, 7, 5, 40, },
+ { 0, 0, 1, 7, 6, 46, },
+ { 2, 0, 1, 7, 6, 40, },
+ { 1, 0, 1, 7, 6, 40, },
+ { 0, 0, 1, 7, 7, 46, },
+ { 2, 0, 1, 7, 7, 40, },
+ { 1, 0, 1, 7, 7, 40, },
+ { 0, 0, 1, 7, 8, 46, },
+ { 2, 0, 1, 7, 8, 40, },
+ { 1, 0, 1, 7, 8, 40, },
+ { 0, 0, 1, 7, 9, 46, },
+ { 2, 0, 1, 7, 9, 40, },
+ { 1, 0, 1, 7, 9, 40, },
+ { 0, 0, 1, 7, 10, 46, },
+ { 2, 0, 1, 7, 10, 40, },
+ { 1, 0, 1, 7, 10, 40, },
+ { 0, 0, 1, 7, 11, 46, },
+ { 2, 0, 1, 7, 11, 40, },
+ { 1, 0, 1, 7, 11, 40, },
+ { 0, 0, 1, 7, 12, 63, },
+ { 2, 0, 1, 7, 12, 40, },
+ { 1, 0, 1, 7, 12, 40, },
+ { 0, 0, 1, 7, 13, 63, },
+ { 2, 0, 1, 7, 13, 40, },
+ { 1, 0, 1, 7, 13, 40, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 46, },
+ { 2, 1, 0, 1, 36, 40, },
+ { 1, 1, 0, 1, 36, 40, },
+ { 0, 1, 0, 1, 40, 46, },
+ { 2, 1, 0, 1, 40, 40, },
+ { 1, 1, 0, 1, 40, 40, },
+ { 0, 1, 0, 1, 44, 46, },
+ { 2, 1, 0, 1, 44, 40, },
+ { 1, 1, 0, 1, 44, 40, },
+ { 0, 1, 0, 1, 48, 46, },
+ { 2, 1, 0, 1, 48, 40, },
+ { 1, 1, 0, 1, 48, 40, },
+ { 0, 1, 0, 1, 52, 46, },
+ { 2, 1, 0, 1, 52, 40, },
+ { 1, 1, 0, 1, 52, 40, },
+ { 0, 1, 0, 1, 56, 46, },
+ { 2, 1, 0, 1, 56, 40, },
+ { 1, 1, 0, 1, 56, 40, },
+ { 0, 1, 0, 1, 60, 46, },
+ { 2, 1, 0, 1, 60, 40, },
+ { 1, 1, 0, 1, 60, 40, },
+ { 0, 1, 0, 1, 64, 46, },
+ { 2, 1, 0, 1, 64, 40, },
+ { 1, 1, 0, 1, 64, 40, },
+ { 0, 1, 0, 1, 100, 46, },
+ { 2, 1, 0, 1, 100, 40, },
+ { 1, 1, 0, 1, 100, 40, },
+ { 0, 1, 0, 1, 104, 46, },
+ { 2, 1, 0, 1, 104, 40, },
+ { 1, 1, 0, 1, 104, 40, },
+ { 0, 1, 0, 1, 108, 46, },
+ { 2, 1, 0, 1, 108, 40, },
+ { 1, 1, 0, 1, 108, 40, },
+ { 0, 1, 0, 1, 112, 46, },
+ { 2, 1, 0, 1, 112, 40, },
+ { 1, 1, 0, 1, 112, 40, },
+ { 0, 1, 0, 1, 116, 46, },
+ { 2, 1, 0, 1, 116, 40, },
+ { 1, 1, 0, 1, 116, 40, },
+ { 0, 1, 0, 1, 120, 46, },
+ { 2, 1, 0, 1, 120, 40, },
+ { 1, 1, 0, 1, 120, 40, },
+ { 0, 1, 0, 1, 124, 46, },
+ { 2, 1, 0, 1, 124, 40, },
+ { 1, 1, 0, 1, 124, 40, },
+ { 0, 1, 0, 1, 128, 46, },
+ { 2, 1, 0, 1, 128, 40, },
+ { 1, 1, 0, 1, 128, 40, },
+ { 0, 1, 0, 1, 132, 46, },
+ { 2, 1, 0, 1, 132, 40, },
+ { 1, 1, 0, 1, 132, 40, },
+ { 0, 1, 0, 1, 136, 46, },
+ { 2, 1, 0, 1, 136, 40, },
+ { 1, 1, 0, 1, 136, 40, },
+ { 0, 1, 0, 1, 140, 46, },
+ { 2, 1, 0, 1, 140, 40, },
+ { 1, 1, 0, 1, 140, 40, },
+ { 0, 1, 0, 1, 149, 46, },
+ { 2, 1, 0, 1, 149, 40, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 46, },
+ { 2, 1, 0, 1, 153, 40, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 46, },
+ { 2, 1, 0, 1, 157, 40, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 46, },
+ { 2, 1, 0, 1, 161, 40, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 46, },
+ { 2, 1, 0, 1, 165, 40, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 46, },
+ { 2, 1, 0, 2, 36, 40, },
+ { 1, 1, 0, 2, 36, 40, },
+ { 0, 1, 0, 2, 40, 46, },
+ { 2, 1, 0, 2, 40, 40, },
+ { 1, 1, 0, 2, 40, 40, },
+ { 0, 1, 0, 2, 44, 46, },
+ { 2, 1, 0, 2, 44, 40, },
+ { 1, 1, 0, 2, 44, 40, },
+ { 0, 1, 0, 2, 48, 46, },
+ { 2, 1, 0, 2, 48, 40, },
+ { 1, 1, 0, 2, 48, 40, },
+ { 0, 1, 0, 2, 52, 46, },
+ { 2, 1, 0, 2, 52, 40, },
+ { 1, 1, 0, 2, 52, 40, },
+ { 0, 1, 0, 2, 56, 46, },
+ { 2, 1, 0, 2, 56, 40, },
+ { 1, 1, 0, 2, 56, 40, },
+ { 0, 1, 0, 2, 60, 46, },
+ { 2, 1, 0, 2, 60, 40, },
+ { 1, 1, 0, 2, 60, 40, },
+ { 0, 1, 0, 2, 64, 46, },
+ { 2, 1, 0, 2, 64, 40, },
+ { 1, 1, 0, 2, 64, 40, },
+ { 0, 1, 0, 2, 100, 46, },
+ { 2, 1, 0, 2, 100, 40, },
+ { 1, 1, 0, 2, 100, 40, },
+ { 0, 1, 0, 2, 104, 46, },
+ { 2, 1, 0, 2, 104, 40, },
+ { 1, 1, 0, 2, 104, 40, },
+ { 0, 1, 0, 2, 108, 46, },
+ { 2, 1, 0, 2, 108, 40, },
+ { 1, 1, 0, 2, 108, 40, },
+ { 0, 1, 0, 2, 112, 46, },
+ { 2, 1, 0, 2, 112, 40, },
+ { 1, 1, 0, 2, 112, 40, },
+ { 0, 1, 0, 2, 116, 46, },
+ { 2, 1, 0, 2, 116, 40, },
+ { 1, 1, 0, 2, 116, 40, },
+ { 0, 1, 0, 2, 120, 46, },
+ { 2, 1, 0, 2, 120, 40, },
+ { 1, 1, 0, 2, 120, 40, },
+ { 0, 1, 0, 2, 124, 46, },
+ { 2, 1, 0, 2, 124, 40, },
+ { 1, 1, 0, 2, 124, 40, },
+ { 0, 1, 0, 2, 128, 46, },
+ { 2, 1, 0, 2, 128, 40, },
+ { 1, 1, 0, 2, 128, 40, },
+ { 0, 1, 0, 2, 132, 46, },
+ { 2, 1, 0, 2, 132, 40, },
+ { 1, 1, 0, 2, 132, 40, },
+ { 0, 1, 0, 2, 136, 46, },
+ { 2, 1, 0, 2, 136, 40, },
+ { 1, 1, 0, 2, 136, 40, },
+ { 0, 1, 0, 2, 140, 46, },
+ { 2, 1, 0, 2, 140, 40, },
+ { 1, 1, 0, 2, 140, 40, },
+ { 0, 1, 0, 2, 149, 46, },
+ { 2, 1, 0, 2, 149, 40, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 46, },
+ { 2, 1, 0, 2, 153, 40, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 46, },
+ { 2, 1, 0, 2, 157, 40, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 46, },
+ { 2, 1, 0, 2, 161, 40, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 46, },
+ { 2, 1, 0, 2, 165, 40, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 46, },
+ { 2, 1, 0, 3, 36, 40, },
+ { 1, 1, 0, 3, 36, 40, },
+ { 0, 1, 0, 3, 40, 46, },
+ { 2, 1, 0, 3, 40, 40, },
+ { 1, 1, 0, 3, 40, 40, },
+ { 0, 1, 0, 3, 44, 46, },
+ { 2, 1, 0, 3, 44, 40, },
+ { 1, 1, 0, 3, 44, 40, },
+ { 0, 1, 0, 3, 48, 46, },
+ { 2, 1, 0, 3, 48, 40, },
+ { 1, 1, 0, 3, 48, 40, },
+ { 0, 1, 0, 3, 52, 46, },
+ { 2, 1, 0, 3, 52, 40, },
+ { 1, 1, 0, 3, 52, 40, },
+ { 0, 1, 0, 3, 56, 46, },
+ { 2, 1, 0, 3, 56, 40, },
+ { 1, 1, 0, 3, 56, 40, },
+ { 0, 1, 0, 3, 60, 46, },
+ { 2, 1, 0, 3, 60, 40, },
+ { 1, 1, 0, 3, 60, 40, },
+ { 0, 1, 0, 3, 64, 46, },
+ { 2, 1, 0, 3, 64, 40, },
+ { 1, 1, 0, 3, 64, 40, },
+ { 0, 1, 0, 3, 100, 46, },
+ { 2, 1, 0, 3, 100, 40, },
+ { 1, 1, 0, 3, 100, 40, },
+ { 0, 1, 0, 3, 104, 46, },
+ { 2, 1, 0, 3, 104, 40, },
+ { 1, 1, 0, 3, 104, 40, },
+ { 0, 1, 0, 3, 108, 46, },
+ { 2, 1, 0, 3, 108, 40, },
+ { 1, 1, 0, 3, 108, 40, },
+ { 0, 1, 0, 3, 112, 46, },
+ { 2, 1, 0, 3, 112, 40, },
+ { 1, 1, 0, 3, 112, 40, },
+ { 0, 1, 0, 3, 116, 46, },
+ { 2, 1, 0, 3, 116, 40, },
+ { 1, 1, 0, 3, 116, 40, },
+ { 0, 1, 0, 3, 120, 46, },
+ { 2, 1, 0, 3, 120, 40, },
+ { 1, 1, 0, 3, 120, 40, },
+ { 0, 1, 0, 3, 124, 46, },
+ { 2, 1, 0, 3, 124, 40, },
+ { 1, 1, 0, 3, 124, 40, },
+ { 0, 1, 0, 3, 128, 46, },
+ { 2, 1, 0, 3, 128, 40, },
+ { 1, 1, 0, 3, 128, 40, },
+ { 0, 1, 0, 3, 132, 46, },
+ { 2, 1, 0, 3, 132, 40, },
+ { 1, 1, 0, 3, 132, 40, },
+ { 0, 1, 0, 3, 136, 46, },
+ { 2, 1, 0, 3, 136, 40, },
+ { 1, 1, 0, 3, 136, 40, },
+ { 0, 1, 0, 3, 140, 46, },
+ { 2, 1, 0, 3, 140, 40, },
+ { 1, 1, 0, 3, 140, 40, },
+ { 0, 1, 0, 3, 149, 46, },
+ { 2, 1, 0, 3, 149, 40, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 46, },
+ { 2, 1, 0, 3, 153, 40, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 46, },
+ { 2, 1, 0, 3, 157, 40, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 46, },
+ { 2, 1, 0, 3, 161, 40, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 46, },
+ { 2, 1, 0, 3, 165, 40, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 46, },
+ { 2, 1, 0, 6, 36, 40, },
+ { 1, 1, 0, 6, 36, 40, },
+ { 0, 1, 0, 6, 40, 46, },
+ { 2, 1, 0, 6, 40, 40, },
+ { 1, 1, 0, 6, 40, 40, },
+ { 0, 1, 0, 6, 44, 46, },
+ { 2, 1, 0, 6, 44, 40, },
+ { 1, 1, 0, 6, 44, 40, },
+ { 0, 1, 0, 6, 48, 46, },
+ { 2, 1, 0, 6, 48, 40, },
+ { 1, 1, 0, 6, 48, 40, },
+ { 0, 1, 0, 6, 52, 46, },
+ { 2, 1, 0, 6, 52, 40, },
+ { 1, 1, 0, 6, 52, 40, },
+ { 0, 1, 0, 6, 56, 46, },
+ { 2, 1, 0, 6, 56, 40, },
+ { 1, 1, 0, 6, 56, 40, },
+ { 0, 1, 0, 6, 60, 46, },
+ { 2, 1, 0, 6, 60, 40, },
+ { 1, 1, 0, 6, 60, 40, },
+ { 0, 1, 0, 6, 64, 46, },
+ { 2, 1, 0, 6, 64, 40, },
+ { 1, 1, 0, 6, 64, 40, },
+ { 0, 1, 0, 6, 100, 46, },
+ { 2, 1, 0, 6, 100, 40, },
+ { 1, 1, 0, 6, 100, 40, },
+ { 0, 1, 0, 6, 104, 46, },
+ { 2, 1, 0, 6, 104, 40, },
+ { 1, 1, 0, 6, 104, 40, },
+ { 0, 1, 0, 6, 108, 46, },
+ { 2, 1, 0, 6, 108, 40, },
+ { 1, 1, 0, 6, 108, 40, },
+ { 0, 1, 0, 6, 112, 46, },
+ { 2, 1, 0, 6, 112, 40, },
+ { 1, 1, 0, 6, 112, 40, },
+ { 0, 1, 0, 6, 116, 46, },
+ { 2, 1, 0, 6, 116, 40, },
+ { 1, 1, 0, 6, 116, 40, },
+ { 0, 1, 0, 6, 120, 46, },
+ { 2, 1, 0, 6, 120, 40, },
+ { 1, 1, 0, 6, 120, 40, },
+ { 0, 1, 0, 6, 124, 46, },
+ { 2, 1, 0, 6, 124, 40, },
+ { 1, 1, 0, 6, 124, 40, },
+ { 0, 1, 0, 6, 128, 46, },
+ { 2, 1, 0, 6, 128, 40, },
+ { 1, 1, 0, 6, 128, 40, },
+ { 0, 1, 0, 6, 132, 46, },
+ { 2, 1, 0, 6, 132, 40, },
+ { 1, 1, 0, 6, 132, 40, },
+ { 0, 1, 0, 6, 136, 46, },
+ { 2, 1, 0, 6, 136, 40, },
+ { 1, 1, 0, 6, 136, 40, },
+ { 0, 1, 0, 6, 140, 46, },
+ { 2, 1, 0, 6, 140, 40, },
+ { 1, 1, 0, 6, 140, 40, },
+ { 0, 1, 0, 6, 149, 46, },
+ { 2, 1, 0, 6, 149, 40, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 46, },
+ { 2, 1, 0, 6, 153, 40, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 46, },
+ { 2, 1, 0, 6, 157, 40, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 46, },
+ { 2, 1, 0, 6, 161, 40, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 46, },
+ { 2, 1, 0, 6, 165, 40, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 46, },
+ { 2, 1, 0, 7, 36, 40, },
+ { 1, 1, 0, 7, 36, 40, },
+ { 0, 1, 0, 7, 40, 46, },
+ { 2, 1, 0, 7, 40, 40, },
+ { 1, 1, 0, 7, 40, 40, },
+ { 0, 1, 0, 7, 44, 46, },
+ { 2, 1, 0, 7, 44, 40, },
+ { 1, 1, 0, 7, 44, 40, },
+ { 0, 1, 0, 7, 48, 46, },
+ { 2, 1, 0, 7, 48, 40, },
+ { 1, 1, 0, 7, 48, 40, },
+ { 0, 1, 0, 7, 52, 46, },
+ { 2, 1, 0, 7, 52, 40, },
+ { 1, 1, 0, 7, 52, 40, },
+ { 0, 1, 0, 7, 56, 46, },
+ { 2, 1, 0, 7, 56, 40, },
+ { 1, 1, 0, 7, 56, 40, },
+ { 0, 1, 0, 7, 60, 46, },
+ { 2, 1, 0, 7, 60, 40, },
+ { 1, 1, 0, 7, 60, 40, },
+ { 0, 1, 0, 7, 64, 46, },
+ { 2, 1, 0, 7, 64, 40, },
+ { 1, 1, 0, 7, 64, 40, },
+ { 0, 1, 0, 7, 100, 46, },
+ { 2, 1, 0, 7, 100, 40, },
+ { 1, 1, 0, 7, 100, 40, },
+ { 0, 1, 0, 7, 104, 46, },
+ { 2, 1, 0, 7, 104, 40, },
+ { 1, 1, 0, 7, 104, 40, },
+ { 0, 1, 0, 7, 108, 46, },
+ { 2, 1, 0, 7, 108, 40, },
+ { 1, 1, 0, 7, 108, 40, },
+ { 0, 1, 0, 7, 112, 46, },
+ { 2, 1, 0, 7, 112, 40, },
+ { 1, 1, 0, 7, 112, 40, },
+ { 0, 1, 0, 7, 116, 46, },
+ { 2, 1, 0, 7, 116, 40, },
+ { 1, 1, 0, 7, 116, 40, },
+ { 0, 1, 0, 7, 120, 46, },
+ { 2, 1, 0, 7, 120, 40, },
+ { 1, 1, 0, 7, 120, 40, },
+ { 0, 1, 0, 7, 124, 46, },
+ { 2, 1, 0, 7, 124, 40, },
+ { 1, 1, 0, 7, 124, 40, },
+ { 0, 1, 0, 7, 128, 46, },
+ { 2, 1, 0, 7, 128, 40, },
+ { 1, 1, 0, 7, 128, 40, },
+ { 0, 1, 0, 7, 132, 46, },
+ { 2, 1, 0, 7, 132, 40, },
+ { 1, 1, 0, 7, 132, 40, },
+ { 0, 1, 0, 7, 136, 46, },
+ { 2, 1, 0, 7, 136, 40, },
+ { 1, 1, 0, 7, 136, 40, },
+ { 0, 1, 0, 7, 140, 46, },
+ { 2, 1, 0, 7, 140, 40, },
+ { 1, 1, 0, 7, 140, 40, },
+ { 0, 1, 0, 7, 149, 46, },
+ { 2, 1, 0, 7, 149, 40, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 46, },
+ { 2, 1, 0, 7, 153, 40, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 46, },
+ { 2, 1, 0, 7, 157, 40, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 46, },
+ { 2, 1, 0, 7, 161, 40, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 46, },
+ { 2, 1, 0, 7, 165, 40, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 46, },
+ { 2, 1, 1, 2, 38, 40, },
+ { 1, 1, 1, 2, 38, 40, },
+ { 0, 1, 1, 2, 46, 46, },
+ { 2, 1, 1, 2, 46, 40, },
+ { 1, 1, 1, 2, 46, 40, },
+ { 0, 1, 1, 2, 54, 46, },
+ { 2, 1, 1, 2, 54, 40, },
+ { 1, 1, 1, 2, 54, 40, },
+ { 0, 1, 1, 2, 62, 46, },
+ { 2, 1, 1, 2, 62, 40, },
+ { 1, 1, 1, 2, 62, 40, },
+ { 0, 1, 1, 2, 102, 46, },
+ { 2, 1, 1, 2, 102, 40, },
+ { 1, 1, 1, 2, 102, 40, },
+ { 0, 1, 1, 2, 110, 46, },
+ { 2, 1, 1, 2, 110, 40, },
+ { 1, 1, 1, 2, 110, 40, },
+ { 0, 1, 1, 2, 118, 46, },
+ { 2, 1, 1, 2, 118, 40, },
+ { 1, 1, 1, 2, 118, 40, },
+ { 0, 1, 1, 2, 126, 46, },
+ { 2, 1, 1, 2, 126, 40, },
+ { 1, 1, 1, 2, 126, 40, },
+ { 0, 1, 1, 2, 134, 46, },
+ { 2, 1, 1, 2, 134, 40, },
+ { 1, 1, 1, 2, 134, 40, },
+ { 0, 1, 1, 2, 151, 46, },
+ { 2, 1, 1, 2, 151, 40, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 46, },
+ { 2, 1, 1, 2, 159, 40, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 46, },
+ { 2, 1, 1, 3, 38, 40, },
+ { 1, 1, 1, 3, 38, 40, },
+ { 0, 1, 1, 3, 46, 46, },
+ { 2, 1, 1, 3, 46, 40, },
+ { 1, 1, 1, 3, 46, 40, },
+ { 0, 1, 1, 3, 54, 46, },
+ { 2, 1, 1, 3, 54, 40, },
+ { 1, 1, 1, 3, 54, 40, },
+ { 0, 1, 1, 3, 62, 46, },
+ { 2, 1, 1, 3, 62, 40, },
+ { 1, 1, 1, 3, 62, 40, },
+ { 0, 1, 1, 3, 102, 46, },
+ { 2, 1, 1, 3, 102, 40, },
+ { 1, 1, 1, 3, 102, 40, },
+ { 0, 1, 1, 3, 110, 46, },
+ { 2, 1, 1, 3, 110, 40, },
+ { 1, 1, 1, 3, 110, 40, },
+ { 0, 1, 1, 3, 118, 46, },
+ { 2, 1, 1, 3, 118, 40, },
+ { 1, 1, 1, 3, 118, 40, },
+ { 0, 1, 1, 3, 126, 46, },
+ { 2, 1, 1, 3, 126, 40, },
+ { 1, 1, 1, 3, 126, 40, },
+ { 0, 1, 1, 3, 134, 46, },
+ { 2, 1, 1, 3, 134, 40, },
+ { 1, 1, 1, 3, 134, 40, },
+ { 0, 1, 1, 3, 151, 46, },
+ { 2, 1, 1, 3, 151, 40, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 46, },
+ { 2, 1, 1, 3, 159, 40, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 46, },
+ { 2, 1, 1, 6, 38, 40, },
+ { 1, 1, 1, 6, 38, 40, },
+ { 0, 1, 1, 6, 46, 46, },
+ { 2, 1, 1, 6, 46, 40, },
+ { 1, 1, 1, 6, 46, 40, },
+ { 0, 1, 1, 6, 54, 46, },
+ { 2, 1, 1, 6, 54, 40, },
+ { 1, 1, 1, 6, 54, 40, },
+ { 0, 1, 1, 6, 62, 46, },
+ { 2, 1, 1, 6, 62, 40, },
+ { 1, 1, 1, 6, 62, 40, },
+ { 0, 1, 1, 6, 102, 46, },
+ { 2, 1, 1, 6, 102, 40, },
+ { 1, 1, 1, 6, 102, 40, },
+ { 0, 1, 1, 6, 110, 46, },
+ { 2, 1, 1, 6, 110, 40, },
+ { 1, 1, 1, 6, 110, 40, },
+ { 0, 1, 1, 6, 118, 46, },
+ { 2, 1, 1, 6, 118, 40, },
+ { 1, 1, 1, 6, 118, 40, },
+ { 0, 1, 1, 6, 126, 46, },
+ { 2, 1, 1, 6, 126, 40, },
+ { 1, 1, 1, 6, 126, 40, },
+ { 0, 1, 1, 6, 134, 46, },
+ { 2, 1, 1, 6, 134, 40, },
+ { 1, 1, 1, 6, 134, 40, },
+ { 0, 1, 1, 6, 151, 46, },
+ { 2, 1, 1, 6, 151, 40, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 46, },
+ { 2, 1, 1, 6, 159, 40, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 46, },
+ { 2, 1, 1, 7, 38, 40, },
+ { 1, 1, 1, 7, 38, 40, },
+ { 0, 1, 1, 7, 46, 46, },
+ { 2, 1, 1, 7, 46, 40, },
+ { 1, 1, 1, 7, 46, 40, },
+ { 0, 1, 1, 7, 54, 46, },
+ { 2, 1, 1, 7, 54, 40, },
+ { 1, 1, 1, 7, 54, 40, },
+ { 0, 1, 1, 7, 62, 46, },
+ { 2, 1, 1, 7, 62, 40, },
+ { 1, 1, 1, 7, 62, 40, },
+ { 0, 1, 1, 7, 102, 46, },
+ { 2, 1, 1, 7, 102, 40, },
+ { 1, 1, 1, 7, 102, 40, },
+ { 0, 1, 1, 7, 110, 46, },
+ { 2, 1, 1, 7, 110, 40, },
+ { 1, 1, 1, 7, 110, 40, },
+ { 0, 1, 1, 7, 118, 46, },
+ { 2, 1, 1, 7, 118, 40, },
+ { 1, 1, 1, 7, 118, 40, },
+ { 0, 1, 1, 7, 126, 46, },
+ { 2, 1, 1, 7, 126, 40, },
+ { 1, 1, 1, 7, 126, 40, },
+ { 0, 1, 1, 7, 134, 46, },
+ { 2, 1, 1, 7, 134, 40, },
+ { 1, 1, 1, 7, 134, 40, },
+ { 0, 1, 1, 7, 151, 46, },
+ { 2, 1, 1, 7, 151, 40, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 46, },
+ { 2, 1, 1, 7, 159, 40, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 46, },
+ { 2, 1, 2, 4, 42, 40, },
+ { 1, 1, 2, 4, 42, 40, },
+ { 0, 1, 2, 4, 58, 46, },
+ { 2, 1, 2, 4, 58, 40, },
+ { 1, 1, 2, 4, 58, 40, },
+ { 0, 1, 2, 4, 106, 46, },
+ { 2, 1, 2, 4, 106, 40, },
+ { 1, 1, 2, 4, 106, 40, },
+ { 0, 1, 2, 4, 122, 46, },
+ { 2, 1, 2, 4, 122, 40, },
+ { 1, 1, 2, 4, 122, 40, },
+ { 0, 1, 2, 4, 155, 46, },
+ { 2, 1, 2, 4, 155, 40, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 46, },
+ { 2, 1, 2, 5, 42, 40, },
+ { 1, 1, 2, 5, 42, 40, },
+ { 0, 1, 2, 5, 58, 46, },
+ { 2, 1, 2, 5, 58, 40, },
+ { 1, 1, 2, 5, 58, 40, },
+ { 0, 1, 2, 5, 106, 46, },
+ { 2, 1, 2, 5, 106, 40, },
+ { 1, 1, 2, 5, 106, 40, },
+ { 0, 1, 2, 5, 122, 46, },
+ { 2, 1, 2, 5, 122, 40, },
+ { 1, 1, 2, 5, 122, 40, },
+ { 0, 1, 2, 5, 155, 46, },
+ { 2, 1, 2, 5, 155, 40, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 46, },
+ { 2, 1, 2, 8, 42, 40, },
+ { 1, 1, 2, 8, 42, 40, },
+ { 0, 1, 2, 8, 58, 46, },
+ { 2, 1, 2, 8, 58, 40, },
+ { 1, 1, 2, 8, 58, 40, },
+ { 0, 1, 2, 8, 106, 46, },
+ { 2, 1, 2, 8, 106, 40, },
+ { 1, 1, 2, 8, 106, 40, },
+ { 0, 1, 2, 8, 122, 46, },
+ { 2, 1, 2, 8, 122, 40, },
+ { 1, 1, 2, 8, 122, 40, },
+ { 0, 1, 2, 8, 155, 46, },
+ { 2, 1, 2, 8, 155, 40, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 46, },
+ { 2, 1, 2, 9, 42, 40, },
+ { 1, 1, 2, 9, 42, 40, },
+ { 0, 1, 2, 9, 58, 46, },
+ { 2, 1, 2, 9, 58, 40, },
+ { 1, 1, 2, 9, 58, 40, },
+ { 0, 1, 2, 9, 106, 46, },
+ { 2, 1, 2, 9, 106, 40, },
+ { 1, 1, 2, 9, 106, 40, },
+ { 0, 1, 2, 9, 122, 46, },
+ { 2, 1, 2, 9, 122, 40, },
+ { 1, 1, 2, 9, 122, 40, },
+ { 0, 1, 2, 9, 155, 46, },
+ { 2, 1, 2, 9, 155, 40, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type5);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type7[] = {
+ { 0, 0, 0, 0, 1, 44, },
+ { 2, 0, 0, 0, 1, 32, },
+ { 1, 0, 0, 0, 1, 32, },
+ { 0, 0, 0, 0, 2, 52, },
+ { 2, 0, 0, 0, 2, 32, },
+ { 1, 0, 0, 0, 2, 32, },
+ { 0, 0, 0, 0, 3, 52, },
+ { 2, 0, 0, 0, 3, 32, },
+ { 1, 0, 0, 0, 3, 32, },
+ { 0, 0, 0, 0, 4, 52, },
+ { 2, 0, 0, 0, 4, 32, },
+ { 1, 0, 0, 0, 4, 32, },
+ { 0, 0, 0, 0, 5, 52, },
+ { 2, 0, 0, 0, 5, 32, },
+ { 1, 0, 0, 0, 5, 32, },
+ { 0, 0, 0, 0, 6, 52, },
+ { 2, 0, 0, 0, 6, 32, },
+ { 1, 0, 0, 0, 6, 32, },
+ { 0, 0, 0, 0, 7, 52, },
+ { 2, 0, 0, 0, 7, 32, },
+ { 1, 0, 0, 0, 7, 32, },
+ { 0, 0, 0, 0, 8, 52, },
+ { 2, 0, 0, 0, 8, 32, },
+ { 1, 0, 0, 0, 8, 32, },
+ { 0, 0, 0, 0, 9, 52, },
+ { 2, 0, 0, 0, 9, 32, },
+ { 1, 0, 0, 0, 9, 32, },
+ { 0, 0, 0, 0, 10, 52, },
+ { 2, 0, 0, 0, 10, 32, },
+ { 1, 0, 0, 0, 10, 32, },
+ { 0, 0, 0, 0, 11, 44, },
+ { 2, 0, 0, 0, 11, 32, },
+ { 1, 0, 0, 0, 11, 32, },
+ { 0, 0, 0, 0, 12, 63, },
+ { 2, 0, 0, 0, 12, 32, },
+ { 1, 0, 0, 0, 12, 32, },
+ { 0, 0, 0, 0, 13, 63, },
+ { 2, 0, 0, 0, 13, 32, },
+ { 1, 0, 0, 0, 13, 32, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 38, },
+ { 2, 0, 0, 1, 1, 32, },
+ { 1, 0, 0, 1, 1, 32, },
+ { 0, 0, 0, 1, 2, 46, },
+ { 2, 0, 0, 1, 2, 32, },
+ { 1, 0, 0, 1, 2, 32, },
+ { 0, 0, 0, 1, 3, 46, },
+ { 2, 0, 0, 1, 3, 32, },
+ { 1, 0, 0, 1, 3, 32, },
+ { 0, 0, 0, 1, 4, 46, },
+ { 2, 0, 0, 1, 4, 32, },
+ { 1, 0, 0, 1, 4, 32, },
+ { 0, 0, 0, 1, 5, 46, },
+ { 2, 0, 0, 1, 5, 32, },
+ { 1, 0, 0, 1, 5, 32, },
+ { 0, 0, 0, 1, 6, 46, },
+ { 2, 0, 0, 1, 6, 32, },
+ { 1, 0, 0, 1, 6, 32, },
+ { 0, 0, 0, 1, 7, 46, },
+ { 2, 0, 0, 1, 7, 32, },
+ { 1, 0, 0, 1, 7, 32, },
+ { 0, 0, 0, 1, 8, 46, },
+ { 2, 0, 0, 1, 8, 32, },
+ { 1, 0, 0, 1, 8, 32, },
+ { 0, 0, 0, 1, 9, 46, },
+ { 2, 0, 0, 1, 9, 32, },
+ { 1, 0, 0, 1, 9, 32, },
+ { 0, 0, 0, 1, 10, 46, },
+ { 2, 0, 0, 1, 10, 32, },
+ { 1, 0, 0, 1, 10, 32, },
+ { 0, 0, 0, 1, 11, 38, },
+ { 2, 0, 0, 1, 11, 32, },
+ { 1, 0, 0, 1, 11, 32, },
+ { 0, 0, 0, 1, 12, 63, },
+ { 2, 0, 0, 1, 12, 32, },
+ { 1, 0, 0, 1, 12, 32, },
+ { 0, 0, 0, 1, 13, 63, },
+ { 2, 0, 0, 1, 13, 32, },
+ { 1, 0, 0, 1, 13, 32, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 34, },
+ { 2, 0, 0, 2, 1, 32, },
+ { 1, 0, 0, 2, 1, 32, },
+ { 0, 0, 0, 2, 2, 46, },
+ { 2, 0, 0, 2, 2, 32, },
+ { 1, 0, 0, 2, 2, 32, },
+ { 0, 0, 0, 2, 3, 46, },
+ { 2, 0, 0, 2, 3, 32, },
+ { 1, 0, 0, 2, 3, 32, },
+ { 0, 0, 0, 2, 4, 46, },
+ { 2, 0, 0, 2, 4, 32, },
+ { 1, 0, 0, 2, 4, 32, },
+ { 0, 0, 0, 2, 5, 46, },
+ { 2, 0, 0, 2, 5, 32, },
+ { 1, 0, 0, 2, 5, 32, },
+ { 0, 0, 0, 2, 6, 46, },
+ { 2, 0, 0, 2, 6, 32, },
+ { 1, 0, 0, 2, 6, 32, },
+ { 0, 0, 0, 2, 7, 46, },
+ { 2, 0, 0, 2, 7, 32, },
+ { 1, 0, 0, 2, 7, 32, },
+ { 0, 0, 0, 2, 8, 46, },
+ { 2, 0, 0, 2, 8, 32, },
+ { 1, 0, 0, 2, 8, 32, },
+ { 0, 0, 0, 2, 9, 46, },
+ { 2, 0, 0, 2, 9, 32, },
+ { 1, 0, 0, 2, 9, 32, },
+ { 0, 0, 0, 2, 10, 46, },
+ { 2, 0, 0, 2, 10, 32, },
+ { 1, 0, 0, 2, 10, 32, },
+ { 0, 0, 0, 2, 11, 34, },
+ { 2, 0, 0, 2, 11, 32, },
+ { 1, 0, 0, 2, 11, 32, },
+ { 0, 0, 0, 2, 12, 63, },
+ { 2, 0, 0, 2, 12, 32, },
+ { 1, 0, 0, 2, 12, 32, },
+ { 0, 0, 0, 2, 13, 63, },
+ { 2, 0, 0, 2, 13, 32, },
+ { 1, 0, 0, 2, 13, 32, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 32, },
+ { 2, 0, 0, 3, 1, 30, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 44, },
+ { 2, 0, 0, 3, 2, 30, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 44, },
+ { 2, 0, 0, 3, 3, 30, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 44, },
+ { 2, 0, 0, 3, 4, 30, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 44, },
+ { 2, 0, 0, 3, 5, 30, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 44, },
+ { 2, 0, 0, 3, 6, 30, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 44, },
+ { 2, 0, 0, 3, 7, 30, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 44, },
+ { 2, 0, 0, 3, 8, 30, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 44, },
+ { 2, 0, 0, 3, 9, 30, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 44, },
+ { 2, 0, 0, 3, 10, 30, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 32, },
+ { 2, 0, 0, 3, 11, 30, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 63, },
+ { 2, 0, 0, 3, 12, 30, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 63, },
+ { 2, 0, 0, 3, 13, 30, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 30, },
+ { 2, 0, 0, 6, 1, 28, },
+ { 1, 0, 0, 6, 1, 28, },
+ { 0, 0, 0, 6, 2, 42, },
+ { 2, 0, 0, 6, 2, 28, },
+ { 1, 0, 0, 6, 2, 28, },
+ { 0, 0, 0, 6, 3, 42, },
+ { 2, 0, 0, 6, 3, 28, },
+ { 1, 0, 0, 6, 3, 28, },
+ { 0, 0, 0, 6, 4, 42, },
+ { 2, 0, 0, 6, 4, 28, },
+ { 1, 0, 0, 6, 4, 28, },
+ { 0, 0, 0, 6, 5, 42, },
+ { 2, 0, 0, 6, 5, 28, },
+ { 1, 0, 0, 6, 5, 28, },
+ { 0, 0, 0, 6, 6, 42, },
+ { 2, 0, 0, 6, 6, 28, },
+ { 1, 0, 0, 6, 6, 28, },
+ { 0, 0, 0, 6, 7, 42, },
+ { 2, 0, 0, 6, 7, 28, },
+ { 1, 0, 0, 6, 7, 28, },
+ { 0, 0, 0, 6, 8, 42, },
+ { 2, 0, 0, 6, 8, 28, },
+ { 1, 0, 0, 6, 8, 28, },
+ { 0, 0, 0, 6, 9, 42, },
+ { 2, 0, 0, 6, 9, 28, },
+ { 1, 0, 0, 6, 9, 28, },
+ { 0, 0, 0, 6, 10, 42, },
+ { 2, 0, 0, 6, 10, 28, },
+ { 1, 0, 0, 6, 10, 28, },
+ { 0, 0, 0, 6, 11, 30, },
+ { 2, 0, 0, 6, 11, 28, },
+ { 1, 0, 0, 6, 11, 28, },
+ { 0, 0, 0, 6, 12, 63, },
+ { 2, 0, 0, 6, 12, 28, },
+ { 1, 0, 0, 6, 12, 28, },
+ { 0, 0, 0, 6, 13, 63, },
+ { 2, 0, 0, 6, 13, 28, },
+ { 1, 0, 0, 6, 13, 28, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 28, },
+ { 2, 0, 0, 7, 1, 26, },
+ { 1, 0, 0, 7, 1, 26, },
+ { 0, 0, 0, 7, 2, 40, },
+ { 2, 0, 0, 7, 2, 26, },
+ { 1, 0, 0, 7, 2, 26, },
+ { 0, 0, 0, 7, 3, 40, },
+ { 2, 0, 0, 7, 3, 26, },
+ { 1, 0, 0, 7, 3, 26, },
+ { 0, 0, 0, 7, 4, 40, },
+ { 2, 0, 0, 7, 4, 26, },
+ { 1, 0, 0, 7, 4, 26, },
+ { 0, 0, 0, 7, 5, 40, },
+ { 2, 0, 0, 7, 5, 26, },
+ { 1, 0, 0, 7, 5, 26, },
+ { 0, 0, 0, 7, 6, 40, },
+ { 2, 0, 0, 7, 6, 26, },
+ { 1, 0, 0, 7, 6, 26, },
+ { 0, 0, 0, 7, 7, 40, },
+ { 2, 0, 0, 7, 7, 26, },
+ { 1, 0, 0, 7, 7, 26, },
+ { 0, 0, 0, 7, 8, 40, },
+ { 2, 0, 0, 7, 8, 26, },
+ { 1, 0, 0, 7, 8, 26, },
+ { 0, 0, 0, 7, 9, 40, },
+ { 2, 0, 0, 7, 9, 26, },
+ { 1, 0, 0, 7, 9, 26, },
+ { 0, 0, 0, 7, 10, 40, },
+ { 2, 0, 0, 7, 10, 26, },
+ { 1, 0, 0, 7, 10, 26, },
+ { 0, 0, 0, 7, 11, 28, },
+ { 2, 0, 0, 7, 11, 26, },
+ { 1, 0, 0, 7, 11, 26, },
+ { 0, 0, 0, 7, 12, 63, },
+ { 2, 0, 0, 7, 12, 26, },
+ { 1, 0, 0, 7, 12, 26, },
+ { 0, 0, 0, 7, 13, 63, },
+ { 2, 0, 0, 7, 13, 26, },
+ { 1, 0, 0, 7, 13, 26, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 36, },
+ { 2, 0, 1, 2, 3, 32, },
+ { 1, 0, 1, 2, 3, 32, },
+ { 0, 0, 1, 2, 4, 40, },
+ { 2, 0, 1, 2, 4, 32, },
+ { 1, 0, 1, 2, 4, 32, },
+ { 0, 0, 1, 2, 5, 40, },
+ { 2, 0, 1, 2, 5, 32, },
+ { 1, 0, 1, 2, 5, 32, },
+ { 0, 0, 1, 2, 6, 40, },
+ { 2, 0, 1, 2, 6, 32, },
+ { 1, 0, 1, 2, 6, 32, },
+ { 0, 0, 1, 2, 7, 40, },
+ { 2, 0, 1, 2, 7, 32, },
+ { 1, 0, 1, 2, 7, 32, },
+ { 0, 0, 1, 2, 8, 40, },
+ { 2, 0, 1, 2, 8, 32, },
+ { 1, 0, 1, 2, 8, 32, },
+ { 0, 0, 1, 2, 9, 40, },
+ { 2, 0, 1, 2, 9, 32, },
+ { 1, 0, 1, 2, 9, 32, },
+ { 0, 0, 1, 2, 10, 40, },
+ { 2, 0, 1, 2, 10, 32, },
+ { 1, 0, 1, 2, 10, 32, },
+ { 0, 0, 1, 2, 11, 34, },
+ { 2, 0, 1, 2, 11, 32, },
+ { 1, 0, 1, 2, 11, 32, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 32, },
+ { 1, 0, 1, 2, 12, 32, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 32, },
+ { 1, 0, 1, 2, 13, 32, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 34, },
+ { 2, 0, 1, 3, 3, 30, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 38, },
+ { 2, 0, 1, 3, 4, 30, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 38, },
+ { 2, 0, 1, 3, 5, 30, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 38, },
+ { 2, 0, 1, 3, 6, 30, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 38, },
+ { 2, 0, 1, 3, 7, 30, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 38, },
+ { 2, 0, 1, 3, 8, 30, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 38, },
+ { 2, 0, 1, 3, 9, 30, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 38, },
+ { 2, 0, 1, 3, 10, 30, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 32, },
+ { 2, 0, 1, 3, 11, 30, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 30, },
+ { 1, 0, 1, 3, 12, 30, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 30, },
+ { 1, 0, 1, 3, 13, 30, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 32, },
+ { 2, 0, 1, 6, 3, 28, },
+ { 1, 0, 1, 6, 3, 28, },
+ { 0, 0, 1, 6, 4, 36, },
+ { 2, 0, 1, 6, 4, 28, },
+ { 1, 0, 1, 6, 4, 28, },
+ { 0, 0, 1, 6, 5, 36, },
+ { 2, 0, 1, 6, 5, 28, },
+ { 1, 0, 1, 6, 5, 28, },
+ { 0, 0, 1, 6, 6, 36, },
+ { 2, 0, 1, 6, 6, 28, },
+ { 1, 0, 1, 6, 6, 28, },
+ { 0, 0, 1, 6, 7, 36, },
+ { 2, 0, 1, 6, 7, 28, },
+ { 1, 0, 1, 6, 7, 28, },
+ { 0, 0, 1, 6, 8, 36, },
+ { 2, 0, 1, 6, 8, 28, },
+ { 1, 0, 1, 6, 8, 28, },
+ { 0, 0, 1, 6, 9, 36, },
+ { 2, 0, 1, 6, 9, 28, },
+ { 1, 0, 1, 6, 9, 28, },
+ { 0, 0, 1, 6, 10, 36, },
+ { 2, 0, 1, 6, 10, 28, },
+ { 1, 0, 1, 6, 10, 28, },
+ { 0, 0, 1, 6, 11, 30, },
+ { 2, 0, 1, 6, 11, 28, },
+ { 1, 0, 1, 6, 11, 28, },
+ { 0, 0, 1, 6, 12, 63, },
+ { 2, 0, 1, 6, 12, 28, },
+ { 1, 0, 1, 6, 12, 28, },
+ { 0, 0, 1, 6, 13, 63, },
+ { 2, 0, 1, 6, 13, 28, },
+ { 1, 0, 1, 6, 13, 28, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 32, },
+ { 2, 0, 1, 7, 3, 26, },
+ { 1, 0, 1, 7, 3, 26, },
+ { 0, 0, 1, 7, 4, 36, },
+ { 2, 0, 1, 7, 4, 26, },
+ { 1, 0, 1, 7, 4, 26, },
+ { 0, 0, 1, 7, 5, 36, },
+ { 2, 0, 1, 7, 5, 26, },
+ { 1, 0, 1, 7, 5, 26, },
+ { 0, 0, 1, 7, 6, 36, },
+ { 2, 0, 1, 7, 6, 26, },
+ { 1, 0, 1, 7, 6, 26, },
+ { 0, 0, 1, 7, 7, 36, },
+ { 2, 0, 1, 7, 7, 26, },
+ { 1, 0, 1, 7, 7, 26, },
+ { 0, 0, 1, 7, 8, 36, },
+ { 2, 0, 1, 7, 8, 26, },
+ { 1, 0, 1, 7, 8, 26, },
+ { 0, 0, 1, 7, 9, 36, },
+ { 2, 0, 1, 7, 9, 26, },
+ { 1, 0, 1, 7, 9, 26, },
+ { 0, 0, 1, 7, 10, 36, },
+ { 2, 0, 1, 7, 10, 26, },
+ { 1, 0, 1, 7, 10, 26, },
+ { 0, 0, 1, 7, 11, 30, },
+ { 2, 0, 1, 7, 11, 26, },
+ { 1, 0, 1, 7, 11, 26, },
+ { 0, 0, 1, 7, 12, 63, },
+ { 2, 0, 1, 7, 12, 26, },
+ { 1, 0, 1, 7, 12, 26, },
+ { 0, 0, 1, 7, 13, 63, },
+ { 2, 0, 1, 7, 13, 26, },
+ { 1, 0, 1, 7, 13, 26, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 38, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 32, },
+ { 0, 1, 0, 1, 40, 38, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 32, },
+ { 0, 1, 0, 1, 44, 38, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 32, },
+ { 0, 1, 0, 1, 48, 38, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 32, },
+ { 0, 1, 0, 1, 52, 38, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 32, },
+ { 0, 1, 0, 1, 56, 38, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 32, },
+ { 0, 1, 0, 1, 60, 38, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 32, },
+ { 0, 1, 0, 1, 64, 38, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 32, },
+ { 0, 1, 0, 1, 100, 36, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 36, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 36, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 36, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 36, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 36, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 36, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 36, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 36, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 36, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 36, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 149, 36, },
+ { 2, 1, 0, 1, 149, 32, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 36, },
+ { 2, 1, 0, 1, 153, 32, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 36, },
+ { 2, 1, 0, 1, 157, 32, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 36, },
+ { 2, 1, 0, 1, 161, 32, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 36, },
+ { 2, 1, 0, 1, 165, 32, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 36, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 32, },
+ { 0, 1, 0, 2, 40, 36, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 32, },
+ { 0, 1, 0, 2, 44, 36, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 32, },
+ { 0, 1, 0, 2, 48, 36, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 32, },
+ { 0, 1, 0, 2, 52, 36, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 32, },
+ { 0, 1, 0, 2, 56, 36, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 32, },
+ { 0, 1, 0, 2, 60, 36, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 32, },
+ { 0, 1, 0, 2, 64, 36, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 32, },
+ { 0, 1, 0, 2, 100, 36, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 36, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 36, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 36, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 36, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 36, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 36, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 36, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 36, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 36, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 34, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 149, 32, },
+ { 2, 1, 0, 2, 149, 32, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 38, },
+ { 2, 1, 0, 2, 153, 32, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 38, },
+ { 2, 1, 0, 2, 157, 32, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 38, },
+ { 2, 1, 0, 2, 161, 32, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 38, },
+ { 2, 1, 0, 2, 165, 32, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 34, },
+ { 2, 1, 0, 3, 36, 30, },
+ { 1, 1, 0, 3, 36, 30, },
+ { 0, 1, 0, 3, 40, 34, },
+ { 2, 1, 0, 3, 40, 30, },
+ { 1, 1, 0, 3, 40, 30, },
+ { 0, 1, 0, 3, 44, 34, },
+ { 2, 1, 0, 3, 44, 30, },
+ { 1, 1, 0, 3, 44, 30, },
+ { 0, 1, 0, 3, 48, 34, },
+ { 2, 1, 0, 3, 48, 30, },
+ { 1, 1, 0, 3, 48, 30, },
+ { 0, 1, 0, 3, 52, 34, },
+ { 2, 1, 0, 3, 52, 30, },
+ { 1, 1, 0, 3, 52, 30, },
+ { 0, 1, 0, 3, 56, 34, },
+ { 2, 1, 0, 3, 56, 30, },
+ { 1, 1, 0, 3, 56, 30, },
+ { 0, 1, 0, 3, 60, 34, },
+ { 2, 1, 0, 3, 60, 30, },
+ { 1, 1, 0, 3, 60, 30, },
+ { 0, 1, 0, 3, 64, 34, },
+ { 2, 1, 0, 3, 64, 30, },
+ { 1, 1, 0, 3, 64, 30, },
+ { 0, 1, 0, 3, 100, 34, },
+ { 2, 1, 0, 3, 100, 30, },
+ { 1, 1, 0, 3, 100, 30, },
+ { 0, 1, 0, 3, 104, 34, },
+ { 2, 1, 0, 3, 104, 30, },
+ { 1, 1, 0, 3, 104, 30, },
+ { 0, 1, 0, 3, 108, 34, },
+ { 2, 1, 0, 3, 108, 30, },
+ { 1, 1, 0, 3, 108, 30, },
+ { 0, 1, 0, 3, 112, 34, },
+ { 2, 1, 0, 3, 112, 30, },
+ { 1, 1, 0, 3, 112, 30, },
+ { 0, 1, 0, 3, 116, 34, },
+ { 2, 1, 0, 3, 116, 30, },
+ { 1, 1, 0, 3, 116, 30, },
+ { 0, 1, 0, 3, 120, 34, },
+ { 2, 1, 0, 3, 120, 30, },
+ { 1, 1, 0, 3, 120, 30, },
+ { 0, 1, 0, 3, 124, 34, },
+ { 2, 1, 0, 3, 124, 30, },
+ { 1, 1, 0, 3, 124, 30, },
+ { 0, 1, 0, 3, 128, 34, },
+ { 2, 1, 0, 3, 128, 30, },
+ { 1, 1, 0, 3, 128, 30, },
+ { 0, 1, 0, 3, 132, 34, },
+ { 2, 1, 0, 3, 132, 30, },
+ { 1, 1, 0, 3, 132, 30, },
+ { 0, 1, 0, 3, 136, 34, },
+ { 2, 1, 0, 3, 136, 30, },
+ { 1, 1, 0, 3, 136, 30, },
+ { 0, 1, 0, 3, 140, 32, },
+ { 2, 1, 0, 3, 140, 30, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 149, 30, },
+ { 2, 1, 0, 3, 149, 30, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 36, },
+ { 2, 1, 0, 3, 153, 30, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 36, },
+ { 2, 1, 0, 3, 157, 30, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 36, },
+ { 2, 1, 0, 3, 161, 30, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 36, },
+ { 2, 1, 0, 3, 165, 30, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 32, },
+ { 2, 1, 0, 6, 36, 28, },
+ { 1, 1, 0, 6, 36, 28, },
+ { 0, 1, 0, 6, 40, 32, },
+ { 2, 1, 0, 6, 40, 28, },
+ { 1, 1, 0, 6, 40, 28, },
+ { 0, 1, 0, 6, 44, 32, },
+ { 2, 1, 0, 6, 44, 28, },
+ { 1, 1, 0, 6, 44, 28, },
+ { 0, 1, 0, 6, 48, 32, },
+ { 2, 1, 0, 6, 48, 28, },
+ { 1, 1, 0, 6, 48, 28, },
+ { 0, 1, 0, 6, 52, 32, },
+ { 2, 1, 0, 6, 52, 28, },
+ { 1, 1, 0, 6, 52, 28, },
+ { 0, 1, 0, 6, 56, 32, },
+ { 2, 1, 0, 6, 56, 28, },
+ { 1, 1, 0, 6, 56, 28, },
+ { 0, 1, 0, 6, 60, 32, },
+ { 2, 1, 0, 6, 60, 28, },
+ { 1, 1, 0, 6, 60, 28, },
+ { 0, 1, 0, 6, 64, 32, },
+ { 2, 1, 0, 6, 64, 28, },
+ { 1, 1, 0, 6, 64, 28, },
+ { 0, 1, 0, 6, 100, 32, },
+ { 2, 1, 0, 6, 100, 28, },
+ { 1, 1, 0, 6, 100, 28, },
+ { 0, 1, 0, 6, 104, 32, },
+ { 2, 1, 0, 6, 104, 28, },
+ { 1, 1, 0, 6, 104, 28, },
+ { 0, 1, 0, 6, 108, 32, },
+ { 2, 1, 0, 6, 108, 28, },
+ { 1, 1, 0, 6, 108, 28, },
+ { 0, 1, 0, 6, 112, 32, },
+ { 2, 1, 0, 6, 112, 28, },
+ { 1, 1, 0, 6, 112, 28, },
+ { 0, 1, 0, 6, 116, 32, },
+ { 2, 1, 0, 6, 116, 28, },
+ { 1, 1, 0, 6, 116, 28, },
+ { 0, 1, 0, 6, 120, 32, },
+ { 2, 1, 0, 6, 120, 28, },
+ { 1, 1, 0, 6, 120, 28, },
+ { 0, 1, 0, 6, 124, 32, },
+ { 2, 1, 0, 6, 124, 28, },
+ { 1, 1, 0, 6, 124, 28, },
+ { 0, 1, 0, 6, 128, 32, },
+ { 2, 1, 0, 6, 128, 28, },
+ { 1, 1, 0, 6, 128, 28, },
+ { 0, 1, 0, 6, 132, 32, },
+ { 2, 1, 0, 6, 132, 28, },
+ { 1, 1, 0, 6, 132, 28, },
+ { 0, 1, 0, 6, 136, 32, },
+ { 2, 1, 0, 6, 136, 28, },
+ { 1, 1, 0, 6, 136, 28, },
+ { 0, 1, 0, 6, 140, 30, },
+ { 2, 1, 0, 6, 140, 28, },
+ { 1, 1, 0, 6, 140, 28, },
+ { 0, 1, 0, 6, 149, 28, },
+ { 2, 1, 0, 6, 149, 28, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 34, },
+ { 2, 1, 0, 6, 153, 28, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 34, },
+ { 2, 1, 0, 6, 157, 28, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 34, },
+ { 2, 1, 0, 6, 161, 28, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 34, },
+ { 2, 1, 0, 6, 165, 28, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 30, },
+ { 2, 1, 0, 7, 36, 26, },
+ { 1, 1, 0, 7, 36, 26, },
+ { 0, 1, 0, 7, 40, 30, },
+ { 2, 1, 0, 7, 40, 26, },
+ { 1, 1, 0, 7, 40, 26, },
+ { 0, 1, 0, 7, 44, 30, },
+ { 2, 1, 0, 7, 44, 26, },
+ { 1, 1, 0, 7, 44, 26, },
+ { 0, 1, 0, 7, 48, 30, },
+ { 2, 1, 0, 7, 48, 26, },
+ { 1, 1, 0, 7, 48, 26, },
+ { 0, 1, 0, 7, 52, 30, },
+ { 2, 1, 0, 7, 52, 26, },
+ { 1, 1, 0, 7, 52, 26, },
+ { 0, 1, 0, 7, 56, 30, },
+ { 2, 1, 0, 7, 56, 26, },
+ { 1, 1, 0, 7, 56, 26, },
+ { 0, 1, 0, 7, 60, 30, },
+ { 2, 1, 0, 7, 60, 26, },
+ { 1, 1, 0, 7, 60, 26, },
+ { 0, 1, 0, 7, 64, 30, },
+ { 2, 1, 0, 7, 64, 26, },
+ { 1, 1, 0, 7, 64, 26, },
+ { 0, 1, 0, 7, 100, 30, },
+ { 2, 1, 0, 7, 100, 26, },
+ { 1, 1, 0, 7, 100, 26, },
+ { 0, 1, 0, 7, 104, 30, },
+ { 2, 1, 0, 7, 104, 26, },
+ { 1, 1, 0, 7, 104, 26, },
+ { 0, 1, 0, 7, 108, 30, },
+ { 2, 1, 0, 7, 108, 26, },
+ { 1, 1, 0, 7, 108, 26, },
+ { 0, 1, 0, 7, 112, 30, },
+ { 2, 1, 0, 7, 112, 26, },
+ { 1, 1, 0, 7, 112, 26, },
+ { 0, 1, 0, 7, 116, 30, },
+ { 2, 1, 0, 7, 116, 26, },
+ { 1, 1, 0, 7, 116, 26, },
+ { 0, 1, 0, 7, 120, 30, },
+ { 2, 1, 0, 7, 120, 26, },
+ { 1, 1, 0, 7, 120, 26, },
+ { 0, 1, 0, 7, 124, 30, },
+ { 2, 1, 0, 7, 124, 26, },
+ { 1, 1, 0, 7, 124, 26, },
+ { 0, 1, 0, 7, 128, 30, },
+ { 2, 1, 0, 7, 128, 26, },
+ { 1, 1, 0, 7, 128, 26, },
+ { 0, 1, 0, 7, 132, 30, },
+ { 2, 1, 0, 7, 132, 26, },
+ { 1, 1, 0, 7, 132, 26, },
+ { 0, 1, 0, 7, 136, 30, },
+ { 2, 1, 0, 7, 136, 26, },
+ { 1, 1, 0, 7, 136, 26, },
+ { 0, 1, 0, 7, 140, 28, },
+ { 2, 1, 0, 7, 140, 26, },
+ { 1, 1, 0, 7, 140, 26, },
+ { 0, 1, 0, 7, 149, 26, },
+ { 2, 1, 0, 7, 149, 26, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 32, },
+ { 2, 1, 0, 7, 153, 26, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 32, },
+ { 2, 1, 0, 7, 157, 26, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 32, },
+ { 2, 1, 0, 7, 161, 26, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 32, },
+ { 2, 1, 0, 7, 165, 26, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 32, },
+ { 2, 1, 1, 2, 38, 32, },
+ { 1, 1, 1, 2, 38, 32, },
+ { 0, 1, 1, 2, 46, 32, },
+ { 2, 1, 1, 2, 46, 32, },
+ { 1, 1, 1, 2, 46, 32, },
+ { 0, 1, 1, 2, 54, 32, },
+ { 2, 1, 1, 2, 54, 32, },
+ { 1, 1, 1, 2, 54, 32, },
+ { 0, 1, 1, 2, 62, 30, },
+ { 2, 1, 1, 2, 62, 32, },
+ { 1, 1, 1, 2, 62, 32, },
+ { 0, 1, 1, 2, 102, 30, },
+ { 2, 1, 1, 2, 102, 32, },
+ { 1, 1, 1, 2, 102, 32, },
+ { 0, 1, 1, 2, 110, 38, },
+ { 2, 1, 1, 2, 110, 32, },
+ { 1, 1, 1, 2, 110, 32, },
+ { 0, 1, 1, 2, 118, 38, },
+ { 2, 1, 1, 2, 118, 32, },
+ { 1, 1, 1, 2, 118, 32, },
+ { 0, 1, 1, 2, 126, 38, },
+ { 2, 1, 1, 2, 126, 32, },
+ { 1, 1, 1, 2, 126, 32, },
+ { 0, 1, 1, 2, 134, 38, },
+ { 2, 1, 1, 2, 134, 32, },
+ { 1, 1, 1, 2, 134, 32, },
+ { 0, 1, 1, 2, 151, 32, },
+ { 2, 1, 1, 2, 151, 32, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 32, },
+ { 2, 1, 1, 2, 159, 32, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 30, },
+ { 2, 1, 1, 3, 38, 30, },
+ { 1, 1, 1, 3, 38, 30, },
+ { 0, 1, 1, 3, 46, 30, },
+ { 2, 1, 1, 3, 46, 30, },
+ { 1, 1, 1, 3, 46, 30, },
+ { 0, 1, 1, 3, 54, 30, },
+ { 2, 1, 1, 3, 54, 30, },
+ { 1, 1, 1, 3, 54, 30, },
+ { 0, 1, 1, 3, 62, 28, },
+ { 2, 1, 1, 3, 62, 30, },
+ { 1, 1, 1, 3, 62, 30, },
+ { 0, 1, 1, 3, 102, 28, },
+ { 2, 1, 1, 3, 102, 30, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 36, },
+ { 2, 1, 1, 3, 110, 30, },
+ { 1, 1, 1, 3, 110, 30, },
+ { 0, 1, 1, 3, 118, 36, },
+ { 2, 1, 1, 3, 118, 30, },
+ { 1, 1, 1, 3, 118, 30, },
+ { 0, 1, 1, 3, 126, 36, },
+ { 2, 1, 1, 3, 126, 30, },
+ { 1, 1, 1, 3, 126, 30, },
+ { 0, 1, 1, 3, 134, 36, },
+ { 2, 1, 1, 3, 134, 30, },
+ { 1, 1, 1, 3, 134, 30, },
+ { 0, 1, 1, 3, 151, 30, },
+ { 2, 1, 1, 3, 151, 30, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 30, },
+ { 2, 1, 1, 3, 159, 30, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 28, },
+ { 2, 1, 1, 6, 38, 28, },
+ { 1, 1, 1, 6, 38, 28, },
+ { 0, 1, 1, 6, 46, 28, },
+ { 2, 1, 1, 6, 46, 28, },
+ { 1, 1, 1, 6, 46, 28, },
+ { 0, 1, 1, 6, 54, 28, },
+ { 2, 1, 1, 6, 54, 28, },
+ { 1, 1, 1, 6, 54, 28, },
+ { 0, 1, 1, 6, 62, 26, },
+ { 2, 1, 1, 6, 62, 28, },
+ { 1, 1, 1, 6, 62, 28, },
+ { 0, 1, 1, 6, 102, 26, },
+ { 2, 1, 1, 6, 102, 28, },
+ { 1, 1, 1, 6, 102, 28, },
+ { 0, 1, 1, 6, 110, 34, },
+ { 2, 1, 1, 6, 110, 28, },
+ { 1, 1, 1, 6, 110, 28, },
+ { 0, 1, 1, 6, 118, 34, },
+ { 2, 1, 1, 6, 118, 28, },
+ { 1, 1, 1, 6, 118, 28, },
+ { 0, 1, 1, 6, 126, 34, },
+ { 2, 1, 1, 6, 126, 28, },
+ { 1, 1, 1, 6, 126, 28, },
+ { 0, 1, 1, 6, 134, 34, },
+ { 2, 1, 1, 6, 134, 28, },
+ { 1, 1, 1, 6, 134, 28, },
+ { 0, 1, 1, 6, 151, 28, },
+ { 2, 1, 1, 6, 151, 28, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 28, },
+ { 2, 1, 1, 6, 159, 28, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 26, },
+ { 2, 1, 1, 7, 38, 26, },
+ { 1, 1, 1, 7, 38, 26, },
+ { 0, 1, 1, 7, 46, 26, },
+ { 2, 1, 1, 7, 46, 26, },
+ { 1, 1, 1, 7, 46, 26, },
+ { 0, 1, 1, 7, 54, 26, },
+ { 2, 1, 1, 7, 54, 26, },
+ { 1, 1, 1, 7, 54, 26, },
+ { 0, 1, 1, 7, 62, 24, },
+ { 2, 1, 1, 7, 62, 26, },
+ { 1, 1, 1, 7, 62, 26, },
+ { 0, 1, 1, 7, 102, 24, },
+ { 2, 1, 1, 7, 102, 26, },
+ { 1, 1, 1, 7, 102, 26, },
+ { 0, 1, 1, 7, 110, 32, },
+ { 2, 1, 1, 7, 110, 26, },
+ { 1, 1, 1, 7, 110, 26, },
+ { 0, 1, 1, 7, 118, 32, },
+ { 2, 1, 1, 7, 118, 26, },
+ { 1, 1, 1, 7, 118, 26, },
+ { 0, 1, 1, 7, 126, 32, },
+ { 2, 1, 1, 7, 126, 26, },
+ { 1, 1, 1, 7, 126, 26, },
+ { 0, 1, 1, 7, 134, 32, },
+ { 2, 1, 1, 7, 134, 26, },
+ { 1, 1, 1, 7, 134, 26, },
+ { 0, 1, 1, 7, 151, 26, },
+ { 2, 1, 1, 7, 151, 26, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 26, },
+ { 2, 1, 1, 7, 159, 26, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 26, },
+ { 2, 1, 2, 4, 42, 32, },
+ { 1, 1, 2, 4, 42, 32, },
+ { 0, 1, 2, 4, 58, 26, },
+ { 2, 1, 2, 4, 58, 32, },
+ { 1, 1, 2, 4, 58, 32, },
+ { 0, 1, 2, 4, 106, 28, },
+ { 2, 1, 2, 4, 106, 32, },
+ { 1, 1, 2, 4, 106, 32, },
+ { 0, 1, 2, 4, 122, 28, },
+ { 2, 1, 2, 4, 122, 32, },
+ { 1, 1, 2, 4, 122, 32, },
+ { 0, 1, 2, 4, 155, 28, },
+ { 2, 1, 2, 4, 155, 32, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 24, },
+ { 2, 1, 2, 5, 42, 30, },
+ { 1, 1, 2, 5, 42, 30, },
+ { 0, 1, 2, 5, 58, 24, },
+ { 2, 1, 2, 5, 58, 30, },
+ { 1, 1, 2, 5, 58, 30, },
+ { 0, 1, 2, 5, 106, 26, },
+ { 2, 1, 2, 5, 106, 30, },
+ { 1, 1, 2, 5, 106, 30, },
+ { 0, 1, 2, 5, 122, 26, },
+ { 2, 1, 2, 5, 122, 30, },
+ { 1, 1, 2, 5, 122, 30, },
+ { 0, 1, 2, 5, 155, 26, },
+ { 2, 1, 2, 5, 155, 30, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 22, },
+ { 2, 1, 2, 8, 42, 28, },
+ { 1, 1, 2, 8, 42, 28, },
+ { 0, 1, 2, 8, 58, 22, },
+ { 2, 1, 2, 8, 58, 28, },
+ { 1, 1, 2, 8, 58, 28, },
+ { 0, 1, 2, 8, 106, 24, },
+ { 2, 1, 2, 8, 106, 28, },
+ { 1, 1, 2, 8, 106, 28, },
+ { 0, 1, 2, 8, 122, 24, },
+ { 2, 1, 2, 8, 122, 28, },
+ { 1, 1, 2, 8, 122, 28, },
+ { 0, 1, 2, 8, 155, 24, },
+ { 2, 1, 2, 8, 155, 28, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 20, },
+ { 2, 1, 2, 9, 42, 26, },
+ { 1, 1, 2, 9, 42, 26, },
+ { 0, 1, 2, 9, 58, 20, },
+ { 2, 1, 2, 9, 58, 26, },
+ { 1, 1, 2, 9, 58, 26, },
+ { 0, 1, 2, 9, 106, 22, },
+ { 2, 1, 2, 9, 106, 26, },
+ { 1, 1, 2, 9, 106, 26, },
+ { 0, 1, 2, 9, 122, 22, },
+ { 2, 1, 2, 9, 122, 26, },
+ { 1, 1, 2, 9, 122, 26, },
+ { 0, 1, 2, 9, 155, 22, },
+ { 2, 1, 2, 9, 155, 26, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type7);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type8[] = {
+ { 0, 0, 0, 0, 1, 46, },
+ { 2, 0, 0, 0, 1, 46, },
+ { 1, 0, 0, 0, 1, 46, },
+ { 0, 0, 0, 0, 2, 46, },
+ { 2, 0, 0, 0, 2, 46, },
+ { 1, 0, 0, 0, 2, 46, },
+ { 0, 0, 0, 0, 3, 46, },
+ { 2, 0, 0, 0, 3, 46, },
+ { 1, 0, 0, 0, 3, 46, },
+ { 0, 0, 0, 0, 4, 46, },
+ { 2, 0, 0, 0, 4, 46, },
+ { 1, 0, 0, 0, 4, 46, },
+ { 0, 0, 0, 0, 5, 46, },
+ { 2, 0, 0, 0, 5, 46, },
+ { 1, 0, 0, 0, 5, 46, },
+ { 0, 0, 0, 0, 6, 46, },
+ { 2, 0, 0, 0, 6, 46, },
+ { 1, 0, 0, 0, 6, 46, },
+ { 0, 0, 0, 0, 7, 46, },
+ { 2, 0, 0, 0, 7, 46, },
+ { 1, 0, 0, 0, 7, 46, },
+ { 0, 0, 0, 0, 8, 46, },
+ { 2, 0, 0, 0, 8, 46, },
+ { 1, 0, 0, 0, 8, 46, },
+ { 0, 0, 0, 0, 9, 46, },
+ { 2, 0, 0, 0, 9, 46, },
+ { 1, 0, 0, 0, 9, 46, },
+ { 0, 0, 0, 0, 10, 46, },
+ { 2, 0, 0, 0, 10, 46, },
+ { 1, 0, 0, 0, 10, 46, },
+ { 0, 0, 0, 0, 11, 46, },
+ { 2, 0, 0, 0, 11, 46, },
+ { 1, 0, 0, 0, 11, 46, },
+ { 0, 0, 0, 0, 12, 63, },
+ { 2, 0, 0, 0, 12, 46, },
+ { 1, 0, 0, 0, 12, 46, },
+ { 0, 0, 0, 0, 13, 63, },
+ { 2, 0, 0, 0, 13, 46, },
+ { 1, 0, 0, 0, 13, 46, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 46, },
+ { 0, 0, 0, 1, 1, 46, },
+ { 2, 0, 0, 1, 1, 46, },
+ { 1, 0, 0, 1, 1, 46, },
+ { 0, 0, 0, 1, 2, 46, },
+ { 2, 0, 0, 1, 2, 46, },
+ { 1, 0, 0, 1, 2, 46, },
+ { 0, 0, 0, 1, 3, 46, },
+ { 2, 0, 0, 1, 3, 46, },
+ { 1, 0, 0, 1, 3, 46, },
+ { 0, 0, 0, 1, 4, 46, },
+ { 2, 0, 0, 1, 4, 46, },
+ { 1, 0, 0, 1, 4, 46, },
+ { 0, 0, 0, 1, 5, 46, },
+ { 2, 0, 0, 1, 5, 46, },
+ { 1, 0, 0, 1, 5, 46, },
+ { 0, 0, 0, 1, 6, 46, },
+ { 2, 0, 0, 1, 6, 46, },
+ { 1, 0, 0, 1, 6, 46, },
+ { 0, 0, 0, 1, 7, 46, },
+ { 2, 0, 0, 1, 7, 46, },
+ { 1, 0, 0, 1, 7, 46, },
+ { 0, 0, 0, 1, 8, 46, },
+ { 2, 0, 0, 1, 8, 46, },
+ { 1, 0, 0, 1, 8, 46, },
+ { 0, 0, 0, 1, 9, 46, },
+ { 2, 0, 0, 1, 9, 46, },
+ { 1, 0, 0, 1, 9, 46, },
+ { 0, 0, 0, 1, 10, 46, },
+ { 2, 0, 0, 1, 10, 46, },
+ { 1, 0, 0, 1, 10, 46, },
+ { 0, 0, 0, 1, 11, 46, },
+ { 2, 0, 0, 1, 11, 46, },
+ { 1, 0, 0, 1, 11, 46, },
+ { 0, 0, 0, 1, 12, 63, },
+ { 2, 0, 0, 1, 12, 46, },
+ { 1, 0, 0, 1, 12, 46, },
+ { 0, 0, 0, 1, 13, 63, },
+ { 2, 0, 0, 1, 13, 46, },
+ { 1, 0, 0, 1, 13, 46, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 46, },
+ { 0, 0, 0, 2, 1, 46, },
+ { 2, 0, 0, 2, 1, 46, },
+ { 1, 0, 0, 2, 1, 46, },
+ { 0, 0, 0, 2, 2, 46, },
+ { 2, 0, 0, 2, 2, 46, },
+ { 1, 0, 0, 2, 2, 46, },
+ { 0, 0, 0, 2, 3, 46, },
+ { 2, 0, 0, 2, 3, 46, },
+ { 1, 0, 0, 2, 3, 46, },
+ { 0, 0, 0, 2, 4, 46, },
+ { 2, 0, 0, 2, 4, 46, },
+ { 1, 0, 0, 2, 4, 46, },
+ { 0, 0, 0, 2, 5, 46, },
+ { 2, 0, 0, 2, 5, 46, },
+ { 1, 0, 0, 2, 5, 46, },
+ { 0, 0, 0, 2, 6, 46, },
+ { 2, 0, 0, 2, 6, 46, },
+ { 1, 0, 0, 2, 6, 46, },
+ { 0, 0, 0, 2, 7, 46, },
+ { 2, 0, 0, 2, 7, 46, },
+ { 1, 0, 0, 2, 7, 46, },
+ { 0, 0, 0, 2, 8, 46, },
+ { 2, 0, 0, 2, 8, 46, },
+ { 1, 0, 0, 2, 8, 46, },
+ { 0, 0, 0, 2, 9, 46, },
+ { 2, 0, 0, 2, 9, 46, },
+ { 1, 0, 0, 2, 9, 46, },
+ { 0, 0, 0, 2, 10, 46, },
+ { 2, 0, 0, 2, 10, 46, },
+ { 1, 0, 0, 2, 10, 46, },
+ { 0, 0, 0, 2, 11, 46, },
+ { 2, 0, 0, 2, 11, 46, },
+ { 1, 0, 0, 2, 11, 46, },
+ { 0, 0, 0, 2, 12, 63, },
+ { 2, 0, 0, 2, 12, 46, },
+ { 1, 0, 0, 2, 12, 46, },
+ { 0, 0, 0, 2, 13, 63, },
+ { 2, 0, 0, 2, 13, 46, },
+ { 1, 0, 0, 2, 13, 46, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 46, },
+ { 0, 0, 0, 3, 1, 46, },
+ { 2, 0, 0, 3, 1, 46, },
+ { 1, 0, 0, 3, 1, 46, },
+ { 0, 0, 0, 3, 2, 46, },
+ { 2, 0, 0, 3, 2, 46, },
+ { 1, 0, 0, 3, 2, 46, },
+ { 0, 0, 0, 3, 3, 46, },
+ { 2, 0, 0, 3, 3, 46, },
+ { 1, 0, 0, 3, 3, 46, },
+ { 0, 0, 0, 3, 4, 46, },
+ { 2, 0, 0, 3, 4, 46, },
+ { 1, 0, 0, 3, 4, 46, },
+ { 0, 0, 0, 3, 5, 46, },
+ { 2, 0, 0, 3, 5, 46, },
+ { 1, 0, 0, 3, 5, 46, },
+ { 0, 0, 0, 3, 6, 46, },
+ { 2, 0, 0, 3, 6, 46, },
+ { 1, 0, 0, 3, 6, 46, },
+ { 0, 0, 0, 3, 7, 46, },
+ { 2, 0, 0, 3, 7, 46, },
+ { 1, 0, 0, 3, 7, 46, },
+ { 0, 0, 0, 3, 8, 46, },
+ { 2, 0, 0, 3, 8, 46, },
+ { 1, 0, 0, 3, 8, 46, },
+ { 0, 0, 0, 3, 9, 46, },
+ { 2, 0, 0, 3, 9, 46, },
+ { 1, 0, 0, 3, 9, 46, },
+ { 0, 0, 0, 3, 10, 46, },
+ { 2, 0, 0, 3, 10, 46, },
+ { 1, 0, 0, 3, 10, 46, },
+ { 0, 0, 0, 3, 11, 46, },
+ { 2, 0, 0, 3, 11, 46, },
+ { 1, 0, 0, 3, 11, 46, },
+ { 0, 0, 0, 3, 12, 63, },
+ { 2, 0, 0, 3, 12, 46, },
+ { 1, 0, 0, 3, 12, 46, },
+ { 0, 0, 0, 3, 13, 63, },
+ { 2, 0, 0, 3, 13, 46, },
+ { 1, 0, 0, 3, 13, 46, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 46, },
+ { 0, 0, 0, 6, 1, 46, },
+ { 2, 0, 0, 6, 1, 46, },
+ { 1, 0, 0, 6, 1, 46, },
+ { 0, 0, 0, 6, 2, 46, },
+ { 2, 0, 0, 6, 2, 46, },
+ { 1, 0, 0, 6, 2, 46, },
+ { 0, 0, 0, 6, 3, 46, },
+ { 2, 0, 0, 6, 3, 46, },
+ { 1, 0, 0, 6, 3, 46, },
+ { 0, 0, 0, 6, 4, 46, },
+ { 2, 0, 0, 6, 4, 46, },
+ { 1, 0, 0, 6, 4, 46, },
+ { 0, 0, 0, 6, 5, 46, },
+ { 2, 0, 0, 6, 5, 46, },
+ { 1, 0, 0, 6, 5, 46, },
+ { 0, 0, 0, 6, 6, 46, },
+ { 2, 0, 0, 6, 6, 46, },
+ { 1, 0, 0, 6, 6, 46, },
+ { 0, 0, 0, 6, 7, 46, },
+ { 2, 0, 0, 6, 7, 46, },
+ { 1, 0, 0, 6, 7, 46, },
+ { 0, 0, 0, 6, 8, 46, },
+ { 2, 0, 0, 6, 8, 46, },
+ { 1, 0, 0, 6, 8, 46, },
+ { 0, 0, 0, 6, 9, 46, },
+ { 2, 0, 0, 6, 9, 46, },
+ { 1, 0, 0, 6, 9, 46, },
+ { 0, 0, 0, 6, 10, 46, },
+ { 2, 0, 0, 6, 10, 46, },
+ { 1, 0, 0, 6, 10, 46, },
+ { 0, 0, 0, 6, 11, 46, },
+ { 2, 0, 0, 6, 11, 46, },
+ { 1, 0, 0, 6, 11, 46, },
+ { 0, 0, 0, 6, 12, 63, },
+ { 2, 0, 0, 6, 12, 46, },
+ { 1, 0, 0, 6, 12, 46, },
+ { 0, 0, 0, 6, 13, 63, },
+ { 2, 0, 0, 6, 13, 46, },
+ { 1, 0, 0, 6, 13, 46, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 46, },
+ { 0, 0, 0, 7, 1, 46, },
+ { 2, 0, 0, 7, 1, 46, },
+ { 1, 0, 0, 7, 1, 46, },
+ { 0, 0, 0, 7, 2, 46, },
+ { 2, 0, 0, 7, 2, 46, },
+ { 1, 0, 0, 7, 2, 46, },
+ { 0, 0, 0, 7, 3, 46, },
+ { 2, 0, 0, 7, 3, 46, },
+ { 1, 0, 0, 7, 3, 46, },
+ { 0, 0, 0, 7, 4, 46, },
+ { 2, 0, 0, 7, 4, 46, },
+ { 1, 0, 0, 7, 4, 46, },
+ { 0, 0, 0, 7, 5, 46, },
+ { 2, 0, 0, 7, 5, 46, },
+ { 1, 0, 0, 7, 5, 46, },
+ { 0, 0, 0, 7, 6, 46, },
+ { 2, 0, 0, 7, 6, 46, },
+ { 1, 0, 0, 7, 6, 46, },
+ { 0, 0, 0, 7, 7, 46, },
+ { 2, 0, 0, 7, 7, 46, },
+ { 1, 0, 0, 7, 7, 46, },
+ { 0, 0, 0, 7, 8, 46, },
+ { 2, 0, 0, 7, 8, 46, },
+ { 1, 0, 0, 7, 8, 46, },
+ { 0, 0, 0, 7, 9, 46, },
+ { 2, 0, 0, 7, 9, 46, },
+ { 1, 0, 0, 7, 9, 46, },
+ { 0, 0, 0, 7, 10, 46, },
+ { 2, 0, 0, 7, 10, 46, },
+ { 1, 0, 0, 7, 10, 46, },
+ { 0, 0, 0, 7, 11, 46, },
+ { 2, 0, 0, 7, 11, 46, },
+ { 1, 0, 0, 7, 11, 46, },
+ { 0, 0, 0, 7, 12, 63, },
+ { 2, 0, 0, 7, 12, 46, },
+ { 1, 0, 0, 7, 12, 46, },
+ { 0, 0, 0, 7, 13, 63, },
+ { 2, 0, 0, 7, 13, 46, },
+ { 1, 0, 0, 7, 13, 46, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 46, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 30, },
+ { 2, 0, 1, 2, 3, 34, },
+ { 1, 0, 1, 2, 3, 34, },
+ { 0, 0, 1, 2, 4, 34, },
+ { 2, 0, 1, 2, 4, 34, },
+ { 1, 0, 1, 2, 4, 34, },
+ { 0, 0, 1, 2, 5, 34, },
+ { 2, 0, 1, 2, 5, 34, },
+ { 1, 0, 1, 2, 5, 34, },
+ { 0, 0, 1, 2, 6, 34, },
+ { 2, 0, 1, 2, 6, 34, },
+ { 1, 0, 1, 2, 6, 34, },
+ { 0, 0, 1, 2, 7, 34, },
+ { 2, 0, 1, 2, 7, 34, },
+ { 1, 0, 1, 2, 7, 34, },
+ { 0, 0, 1, 2, 8, 34, },
+ { 2, 0, 1, 2, 8, 34, },
+ { 1, 0, 1, 2, 8, 34, },
+ { 0, 0, 1, 2, 9, 34, },
+ { 2, 0, 1, 2, 9, 34, },
+ { 1, 0, 1, 2, 9, 34, },
+ { 0, 0, 1, 2, 10, 34, },
+ { 2, 0, 1, 2, 10, 34, },
+ { 1, 0, 1, 2, 10, 34, },
+ { 0, 0, 1, 2, 11, 28, },
+ { 2, 0, 1, 2, 11, 34, },
+ { 1, 0, 1, 2, 11, 34, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 34, },
+ { 1, 0, 1, 2, 12, 34, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 34, },
+ { 1, 0, 1, 2, 13, 34, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 30, },
+ { 2, 0, 1, 3, 3, 34, },
+ { 1, 0, 1, 3, 3, 34, },
+ { 0, 0, 1, 3, 4, 34, },
+ { 2, 0, 1, 3, 4, 34, },
+ { 1, 0, 1, 3, 4, 34, },
+ { 0, 0, 1, 3, 5, 34, },
+ { 2, 0, 1, 3, 5, 34, },
+ { 1, 0, 1, 3, 5, 34, },
+ { 0, 0, 1, 3, 6, 34, },
+ { 2, 0, 1, 3, 6, 34, },
+ { 1, 0, 1, 3, 6, 34, },
+ { 0, 0, 1, 3, 7, 34, },
+ { 2, 0, 1, 3, 7, 34, },
+ { 1, 0, 1, 3, 7, 34, },
+ { 0, 0, 1, 3, 8, 34, },
+ { 2, 0, 1, 3, 8, 34, },
+ { 1, 0, 1, 3, 8, 34, },
+ { 0, 0, 1, 3, 9, 34, },
+ { 2, 0, 1, 3, 9, 34, },
+ { 1, 0, 1, 3, 9, 34, },
+ { 0, 0, 1, 3, 10, 34, },
+ { 2, 0, 1, 3, 10, 34, },
+ { 1, 0, 1, 3, 10, 34, },
+ { 0, 0, 1, 3, 11, 28, },
+ { 2, 0, 1, 3, 11, 34, },
+ { 1, 0, 1, 3, 11, 34, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 34, },
+ { 1, 0, 1, 3, 12, 34, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 34, },
+ { 1, 0, 1, 3, 13, 34, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 30, },
+ { 2, 0, 1, 6, 3, 34, },
+ { 1, 0, 1, 6, 3, 34, },
+ { 0, 0, 1, 6, 4, 34, },
+ { 2, 0, 1, 6, 4, 34, },
+ { 1, 0, 1, 6, 4, 34, },
+ { 0, 0, 1, 6, 5, 34, },
+ { 2, 0, 1, 6, 5, 34, },
+ { 1, 0, 1, 6, 5, 34, },
+ { 0, 0, 1, 6, 6, 34, },
+ { 2, 0, 1, 6, 6, 34, },
+ { 1, 0, 1, 6, 6, 34, },
+ { 0, 0, 1, 6, 7, 34, },
+ { 2, 0, 1, 6, 7, 34, },
+ { 1, 0, 1, 6, 7, 34, },
+ { 0, 0, 1, 6, 8, 34, },
+ { 2, 0, 1, 6, 8, 34, },
+ { 1, 0, 1, 6, 8, 34, },
+ { 0, 0, 1, 6, 9, 34, },
+ { 2, 0, 1, 6, 9, 34, },
+ { 1, 0, 1, 6, 9, 34, },
+ { 0, 0, 1, 6, 10, 34, },
+ { 2, 0, 1, 6, 10, 34, },
+ { 1, 0, 1, 6, 10, 34, },
+ { 0, 0, 1, 6, 11, 28, },
+ { 2, 0, 1, 6, 11, 34, },
+ { 1, 0, 1, 6, 11, 34, },
+ { 0, 0, 1, 6, 12, 63, },
+ { 2, 0, 1, 6, 12, 34, },
+ { 1, 0, 1, 6, 12, 34, },
+ { 0, 0, 1, 6, 13, 63, },
+ { 2, 0, 1, 6, 13, 34, },
+ { 1, 0, 1, 6, 13, 34, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 30, },
+ { 2, 0, 1, 7, 3, 34, },
+ { 1, 0, 1, 7, 3, 34, },
+ { 0, 0, 1, 7, 4, 34, },
+ { 2, 0, 1, 7, 4, 34, },
+ { 1, 0, 1, 7, 4, 34, },
+ { 0, 0, 1, 7, 5, 34, },
+ { 2, 0, 1, 7, 5, 34, },
+ { 1, 0, 1, 7, 5, 34, },
+ { 0, 0, 1, 7, 6, 34, },
+ { 2, 0, 1, 7, 6, 34, },
+ { 1, 0, 1, 7, 6, 34, },
+ { 0, 0, 1, 7, 7, 34, },
+ { 2, 0, 1, 7, 7, 34, },
+ { 1, 0, 1, 7, 7, 34, },
+ { 0, 0, 1, 7, 8, 34, },
+ { 2, 0, 1, 7, 8, 34, },
+ { 1, 0, 1, 7, 8, 34, },
+ { 0, 0, 1, 7, 9, 34, },
+ { 2, 0, 1, 7, 9, 34, },
+ { 1, 0, 1, 7, 9, 34, },
+ { 0, 0, 1, 7, 10, 34, },
+ { 2, 0, 1, 7, 10, 34, },
+ { 1, 0, 1, 7, 10, 34, },
+ { 0, 0, 1, 7, 11, 28, },
+ { 2, 0, 1, 7, 11, 34, },
+ { 1, 0, 1, 7, 11, 34, },
+ { 0, 0, 1, 7, 12, 63, },
+ { 2, 0, 1, 7, 12, 34, },
+ { 1, 0, 1, 7, 12, 34, },
+ { 0, 0, 1, 7, 13, 63, },
+ { 2, 0, 1, 7, 13, 34, },
+ { 1, 0, 1, 7, 13, 34, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 46, },
+ { 2, 1, 0, 1, 36, 46, },
+ { 1, 1, 0, 1, 36, 46, },
+ { 0, 1, 0, 1, 40, 46, },
+ { 2, 1, 0, 1, 40, 46, },
+ { 1, 1, 0, 1, 40, 46, },
+ { 0, 1, 0, 1, 44, 46, },
+ { 2, 1, 0, 1, 44, 46, },
+ { 1, 1, 0, 1, 44, 46, },
+ { 0, 1, 0, 1, 48, 46, },
+ { 2, 1, 0, 1, 48, 46, },
+ { 1, 1, 0, 1, 48, 46, },
+ { 0, 1, 0, 1, 52, 46, },
+ { 2, 1, 0, 1, 52, 46, },
+ { 1, 1, 0, 1, 52, 46, },
+ { 0, 1, 0, 1, 56, 46, },
+ { 2, 1, 0, 1, 56, 46, },
+ { 1, 1, 0, 1, 56, 46, },
+ { 0, 1, 0, 1, 60, 46, },
+ { 2, 1, 0, 1, 60, 46, },
+ { 1, 1, 0, 1, 60, 46, },
+ { 0, 1, 0, 1, 64, 46, },
+ { 2, 1, 0, 1, 64, 46, },
+ { 1, 1, 0, 1, 64, 46, },
+ { 0, 1, 0, 1, 100, 46, },
+ { 2, 1, 0, 1, 100, 46, },
+ { 1, 1, 0, 1, 100, 46, },
+ { 0, 1, 0, 1, 104, 46, },
+ { 2, 1, 0, 1, 104, 46, },
+ { 1, 1, 0, 1, 104, 46, },
+ { 0, 1, 0, 1, 108, 46, },
+ { 2, 1, 0, 1, 108, 46, },
+ { 1, 1, 0, 1, 108, 46, },
+ { 0, 1, 0, 1, 112, 46, },
+ { 2, 1, 0, 1, 112, 46, },
+ { 1, 1, 0, 1, 112, 46, },
+ { 0, 1, 0, 1, 116, 46, },
+ { 2, 1, 0, 1, 116, 46, },
+ { 1, 1, 0, 1, 116, 46, },
+ { 0, 1, 0, 1, 120, 46, },
+ { 2, 1, 0, 1, 120, 46, },
+ { 1, 1, 0, 1, 120, 46, },
+ { 0, 1, 0, 1, 124, 46, },
+ { 2, 1, 0, 1, 124, 46, },
+ { 1, 1, 0, 1, 124, 46, },
+ { 0, 1, 0, 1, 128, 46, },
+ { 2, 1, 0, 1, 128, 46, },
+ { 1, 1, 0, 1, 128, 46, },
+ { 0, 1, 0, 1, 132, 46, },
+ { 2, 1, 0, 1, 132, 46, },
+ { 1, 1, 0, 1, 132, 46, },
+ { 0, 1, 0, 1, 136, 46, },
+ { 2, 1, 0, 1, 136, 46, },
+ { 1, 1, 0, 1, 136, 46, },
+ { 0, 1, 0, 1, 140, 46, },
+ { 2, 1, 0, 1, 140, 46, },
+ { 1, 1, 0, 1, 140, 46, },
+ { 0, 1, 0, 1, 149, 46, },
+ { 2, 1, 0, 1, 149, 46, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 46, },
+ { 2, 1, 0, 1, 153, 46, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 46, },
+ { 2, 1, 0, 1, 157, 46, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 46, },
+ { 2, 1, 0, 1, 161, 46, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 46, },
+ { 2, 1, 0, 1, 165, 46, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 46, },
+ { 2, 1, 0, 2, 36, 46, },
+ { 1, 1, 0, 2, 36, 46, },
+ { 0, 1, 0, 2, 40, 46, },
+ { 2, 1, 0, 2, 40, 46, },
+ { 1, 1, 0, 2, 40, 46, },
+ { 0, 1, 0, 2, 44, 46, },
+ { 2, 1, 0, 2, 44, 46, },
+ { 1, 1, 0, 2, 44, 46, },
+ { 0, 1, 0, 2, 48, 46, },
+ { 2, 1, 0, 2, 48, 46, },
+ { 1, 1, 0, 2, 48, 46, },
+ { 0, 1, 0, 2, 52, 46, },
+ { 2, 1, 0, 2, 52, 46, },
+ { 1, 1, 0, 2, 52, 46, },
+ { 0, 1, 0, 2, 56, 46, },
+ { 2, 1, 0, 2, 56, 46, },
+ { 1, 1, 0, 2, 56, 46, },
+ { 0, 1, 0, 2, 60, 46, },
+ { 2, 1, 0, 2, 60, 46, },
+ { 1, 1, 0, 2, 60, 46, },
+ { 0, 1, 0, 2, 64, 46, },
+ { 2, 1, 0, 2, 64, 46, },
+ { 1, 1, 0, 2, 64, 46, },
+ { 0, 1, 0, 2, 100, 46, },
+ { 2, 1, 0, 2, 100, 46, },
+ { 1, 1, 0, 2, 100, 46, },
+ { 0, 1, 0, 2, 104, 46, },
+ { 2, 1, 0, 2, 104, 46, },
+ { 1, 1, 0, 2, 104, 46, },
+ { 0, 1, 0, 2, 108, 46, },
+ { 2, 1, 0, 2, 108, 46, },
+ { 1, 1, 0, 2, 108, 46, },
+ { 0, 1, 0, 2, 112, 46, },
+ { 2, 1, 0, 2, 112, 46, },
+ { 1, 1, 0, 2, 112, 46, },
+ { 0, 1, 0, 2, 116, 46, },
+ { 2, 1, 0, 2, 116, 46, },
+ { 1, 1, 0, 2, 116, 46, },
+ { 0, 1, 0, 2, 120, 46, },
+ { 2, 1, 0, 2, 120, 46, },
+ { 1, 1, 0, 2, 120, 46, },
+ { 0, 1, 0, 2, 124, 46, },
+ { 2, 1, 0, 2, 124, 46, },
+ { 1, 1, 0, 2, 124, 46, },
+ { 0, 1, 0, 2, 128, 46, },
+ { 2, 1, 0, 2, 128, 46, },
+ { 1, 1, 0, 2, 128, 46, },
+ { 0, 1, 0, 2, 132, 46, },
+ { 2, 1, 0, 2, 132, 46, },
+ { 1, 1, 0, 2, 132, 46, },
+ { 0, 1, 0, 2, 136, 46, },
+ { 2, 1, 0, 2, 136, 46, },
+ { 1, 1, 0, 2, 136, 46, },
+ { 0, 1, 0, 2, 140, 46, },
+ { 2, 1, 0, 2, 140, 46, },
+ { 1, 1, 0, 2, 140, 46, },
+ { 0, 1, 0, 2, 149, 46, },
+ { 2, 1, 0, 2, 149, 46, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 46, },
+ { 2, 1, 0, 2, 153, 46, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 46, },
+ { 2, 1, 0, 2, 157, 46, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 46, },
+ { 2, 1, 0, 2, 161, 46, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 46, },
+ { 2, 1, 0, 2, 165, 46, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 46, },
+ { 2, 1, 0, 3, 36, 46, },
+ { 1, 1, 0, 3, 36, 46, },
+ { 0, 1, 0, 3, 40, 46, },
+ { 2, 1, 0, 3, 40, 46, },
+ { 1, 1, 0, 3, 40, 46, },
+ { 0, 1, 0, 3, 44, 46, },
+ { 2, 1, 0, 3, 44, 46, },
+ { 1, 1, 0, 3, 44, 46, },
+ { 0, 1, 0, 3, 48, 46, },
+ { 2, 1, 0, 3, 48, 46, },
+ { 1, 1, 0, 3, 48, 46, },
+ { 0, 1, 0, 3, 52, 46, },
+ { 2, 1, 0, 3, 52, 46, },
+ { 1, 1, 0, 3, 52, 46, },
+ { 0, 1, 0, 3, 56, 46, },
+ { 2, 1, 0, 3, 56, 46, },
+ { 1, 1, 0, 3, 56, 46, },
+ { 0, 1, 0, 3, 60, 46, },
+ { 2, 1, 0, 3, 60, 46, },
+ { 1, 1, 0, 3, 60, 46, },
+ { 0, 1, 0, 3, 64, 46, },
+ { 2, 1, 0, 3, 64, 46, },
+ { 1, 1, 0, 3, 64, 46, },
+ { 0, 1, 0, 3, 100, 46, },
+ { 2, 1, 0, 3, 100, 46, },
+ { 1, 1, 0, 3, 100, 46, },
+ { 0, 1, 0, 3, 104, 46, },
+ { 2, 1, 0, 3, 104, 46, },
+ { 1, 1, 0, 3, 104, 46, },
+ { 0, 1, 0, 3, 108, 46, },
+ { 2, 1, 0, 3, 108, 46, },
+ { 1, 1, 0, 3, 108, 46, },
+ { 0, 1, 0, 3, 112, 46, },
+ { 2, 1, 0, 3, 112, 46, },
+ { 1, 1, 0, 3, 112, 46, },
+ { 0, 1, 0, 3, 116, 46, },
+ { 2, 1, 0, 3, 116, 46, },
+ { 1, 1, 0, 3, 116, 46, },
+ { 0, 1, 0, 3, 120, 46, },
+ { 2, 1, 0, 3, 120, 46, },
+ { 1, 1, 0, 3, 120, 46, },
+ { 0, 1, 0, 3, 124, 46, },
+ { 2, 1, 0, 3, 124, 46, },
+ { 1, 1, 0, 3, 124, 46, },
+ { 0, 1, 0, 3, 128, 46, },
+ { 2, 1, 0, 3, 128, 46, },
+ { 1, 1, 0, 3, 128, 46, },
+ { 0, 1, 0, 3, 132, 46, },
+ { 2, 1, 0, 3, 132, 46, },
+ { 1, 1, 0, 3, 132, 46, },
+ { 0, 1, 0, 3, 136, 46, },
+ { 2, 1, 0, 3, 136, 46, },
+ { 1, 1, 0, 3, 136, 46, },
+ { 0, 1, 0, 3, 140, 46, },
+ { 2, 1, 0, 3, 140, 46, },
+ { 1, 1, 0, 3, 140, 46, },
+ { 0, 1, 0, 3, 149, 46, },
+ { 2, 1, 0, 3, 149, 46, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 46, },
+ { 2, 1, 0, 3, 153, 46, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 46, },
+ { 2, 1, 0, 3, 157, 46, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 46, },
+ { 2, 1, 0, 3, 161, 46, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 46, },
+ { 2, 1, 0, 3, 165, 46, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 46, },
+ { 2, 1, 0, 6, 36, 46, },
+ { 1, 1, 0, 6, 36, 46, },
+ { 0, 1, 0, 6, 40, 46, },
+ { 2, 1, 0, 6, 40, 46, },
+ { 1, 1, 0, 6, 40, 46, },
+ { 0, 1, 0, 6, 44, 46, },
+ { 2, 1, 0, 6, 44, 46, },
+ { 1, 1, 0, 6, 44, 46, },
+ { 0, 1, 0, 6, 48, 46, },
+ { 2, 1, 0, 6, 48, 46, },
+ { 1, 1, 0, 6, 48, 46, },
+ { 0, 1, 0, 6, 52, 46, },
+ { 2, 1, 0, 6, 52, 46, },
+ { 1, 1, 0, 6, 52, 46, },
+ { 0, 1, 0, 6, 56, 46, },
+ { 2, 1, 0, 6, 56, 46, },
+ { 1, 1, 0, 6, 56, 46, },
+ { 0, 1, 0, 6, 60, 46, },
+ { 2, 1, 0, 6, 60, 46, },
+ { 1, 1, 0, 6, 60, 46, },
+ { 0, 1, 0, 6, 64, 46, },
+ { 2, 1, 0, 6, 64, 46, },
+ { 1, 1, 0, 6, 64, 46, },
+ { 0, 1, 0, 6, 100, 46, },
+ { 2, 1, 0, 6, 100, 46, },
+ { 1, 1, 0, 6, 100, 46, },
+ { 0, 1, 0, 6, 104, 46, },
+ { 2, 1, 0, 6, 104, 46, },
+ { 1, 1, 0, 6, 104, 46, },
+ { 0, 1, 0, 6, 108, 46, },
+ { 2, 1, 0, 6, 108, 46, },
+ { 1, 1, 0, 6, 108, 46, },
+ { 0, 1, 0, 6, 112, 46, },
+ { 2, 1, 0, 6, 112, 46, },
+ { 1, 1, 0, 6, 112, 46, },
+ { 0, 1, 0, 6, 116, 46, },
+ { 2, 1, 0, 6, 116, 46, },
+ { 1, 1, 0, 6, 116, 46, },
+ { 0, 1, 0, 6, 120, 46, },
+ { 2, 1, 0, 6, 120, 46, },
+ { 1, 1, 0, 6, 120, 46, },
+ { 0, 1, 0, 6, 124, 46, },
+ { 2, 1, 0, 6, 124, 46, },
+ { 1, 1, 0, 6, 124, 46, },
+ { 0, 1, 0, 6, 128, 46, },
+ { 2, 1, 0, 6, 128, 46, },
+ { 1, 1, 0, 6, 128, 46, },
+ { 0, 1, 0, 6, 132, 46, },
+ { 2, 1, 0, 6, 132, 46, },
+ { 1, 1, 0, 6, 132, 46, },
+ { 0, 1, 0, 6, 136, 46, },
+ { 2, 1, 0, 6, 136, 46, },
+ { 1, 1, 0, 6, 136, 46, },
+ { 0, 1, 0, 6, 140, 46, },
+ { 2, 1, 0, 6, 140, 46, },
+ { 1, 1, 0, 6, 140, 46, },
+ { 0, 1, 0, 6, 149, 46, },
+ { 2, 1, 0, 6, 149, 46, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 46, },
+ { 2, 1, 0, 6, 153, 46, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 46, },
+ { 2, 1, 0, 6, 157, 46, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 46, },
+ { 2, 1, 0, 6, 161, 46, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 46, },
+ { 2, 1, 0, 6, 165, 46, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 46, },
+ { 2, 1, 0, 7, 36, 46, },
+ { 1, 1, 0, 7, 36, 46, },
+ { 0, 1, 0, 7, 40, 46, },
+ { 2, 1, 0, 7, 40, 46, },
+ { 1, 1, 0, 7, 40, 46, },
+ { 0, 1, 0, 7, 44, 46, },
+ { 2, 1, 0, 7, 44, 46, },
+ { 1, 1, 0, 7, 44, 46, },
+ { 0, 1, 0, 7, 48, 46, },
+ { 2, 1, 0, 7, 48, 46, },
+ { 1, 1, 0, 7, 48, 46, },
+ { 0, 1, 0, 7, 52, 46, },
+ { 2, 1, 0, 7, 52, 46, },
+ { 1, 1, 0, 7, 52, 46, },
+ { 0, 1, 0, 7, 56, 46, },
+ { 2, 1, 0, 7, 56, 46, },
+ { 1, 1, 0, 7, 56, 46, },
+ { 0, 1, 0, 7, 60, 46, },
+ { 2, 1, 0, 7, 60, 46, },
+ { 1, 1, 0, 7, 60, 46, },
+ { 0, 1, 0, 7, 64, 46, },
+ { 2, 1, 0, 7, 64, 46, },
+ { 1, 1, 0, 7, 64, 46, },
+ { 0, 1, 0, 7, 100, 46, },
+ { 2, 1, 0, 7, 100, 46, },
+ { 1, 1, 0, 7, 100, 46, },
+ { 0, 1, 0, 7, 104, 46, },
+ { 2, 1, 0, 7, 104, 46, },
+ { 1, 1, 0, 7, 104, 46, },
+ { 0, 1, 0, 7, 108, 46, },
+ { 2, 1, 0, 7, 108, 46, },
+ { 1, 1, 0, 7, 108, 46, },
+ { 0, 1, 0, 7, 112, 46, },
+ { 2, 1, 0, 7, 112, 46, },
+ { 1, 1, 0, 7, 112, 46, },
+ { 0, 1, 0, 7, 116, 46, },
+ { 2, 1, 0, 7, 116, 46, },
+ { 1, 1, 0, 7, 116, 46, },
+ { 0, 1, 0, 7, 120, 46, },
+ { 2, 1, 0, 7, 120, 46, },
+ { 1, 1, 0, 7, 120, 46, },
+ { 0, 1, 0, 7, 124, 46, },
+ { 2, 1, 0, 7, 124, 46, },
+ { 1, 1, 0, 7, 124, 46, },
+ { 0, 1, 0, 7, 128, 46, },
+ { 2, 1, 0, 7, 128, 46, },
+ { 1, 1, 0, 7, 128, 46, },
+ { 0, 1, 0, 7, 132, 46, },
+ { 2, 1, 0, 7, 132, 46, },
+ { 1, 1, 0, 7, 132, 46, },
+ { 0, 1, 0, 7, 136, 46, },
+ { 2, 1, 0, 7, 136, 46, },
+ { 1, 1, 0, 7, 136, 46, },
+ { 0, 1, 0, 7, 140, 46, },
+ { 2, 1, 0, 7, 140, 46, },
+ { 1, 1, 0, 7, 140, 46, },
+ { 0, 1, 0, 7, 149, 46, },
+ { 2, 1, 0, 7, 149, 46, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 46, },
+ { 2, 1, 0, 7, 153, 46, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 46, },
+ { 2, 1, 0, 7, 157, 46, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 46, },
+ { 2, 1, 0, 7, 161, 46, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 46, },
+ { 2, 1, 0, 7, 165, 46, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 46, },
+ { 2, 1, 1, 2, 38, 46, },
+ { 1, 1, 1, 2, 38, 46, },
+ { 0, 1, 1, 2, 46, 46, },
+ { 2, 1, 1, 2, 46, 46, },
+ { 1, 1, 1, 2, 46, 46, },
+ { 0, 1, 1, 2, 54, 46, },
+ { 2, 1, 1, 2, 54, 46, },
+ { 1, 1, 1, 2, 54, 46, },
+ { 0, 1, 1, 2, 62, 46, },
+ { 2, 1, 1, 2, 62, 46, },
+ { 1, 1, 1, 2, 62, 46, },
+ { 0, 1, 1, 2, 102, 46, },
+ { 2, 1, 1, 2, 102, 46, },
+ { 1, 1, 1, 2, 102, 46, },
+ { 0, 1, 1, 2, 110, 46, },
+ { 2, 1, 1, 2, 110, 46, },
+ { 1, 1, 1, 2, 110, 46, },
+ { 0, 1, 1, 2, 118, 46, },
+ { 2, 1, 1, 2, 118, 46, },
+ { 1, 1, 1, 2, 118, 46, },
+ { 0, 1, 1, 2, 126, 46, },
+ { 2, 1, 1, 2, 126, 46, },
+ { 1, 1, 1, 2, 126, 46, },
+ { 0, 1, 1, 2, 134, 46, },
+ { 2, 1, 1, 2, 134, 46, },
+ { 1, 1, 1, 2, 134, 46, },
+ { 0, 1, 1, 2, 151, 46, },
+ { 2, 1, 1, 2, 151, 46, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 46, },
+ { 2, 1, 1, 2, 159, 46, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 46, },
+ { 2, 1, 1, 3, 38, 46, },
+ { 1, 1, 1, 3, 38, 46, },
+ { 0, 1, 1, 3, 46, 46, },
+ { 2, 1, 1, 3, 46, 46, },
+ { 1, 1, 1, 3, 46, 46, },
+ { 0, 1, 1, 3, 54, 46, },
+ { 2, 1, 1, 3, 54, 46, },
+ { 1, 1, 1, 3, 54, 46, },
+ { 0, 1, 1, 3, 62, 46, },
+ { 2, 1, 1, 3, 62, 46, },
+ { 1, 1, 1, 3, 62, 46, },
+ { 0, 1, 1, 3, 102, 46, },
+ { 2, 1, 1, 3, 102, 46, },
+ { 1, 1, 1, 3, 102, 46, },
+ { 0, 1, 1, 3, 110, 46, },
+ { 2, 1, 1, 3, 110, 46, },
+ { 1, 1, 1, 3, 110, 46, },
+ { 0, 1, 1, 3, 118, 46, },
+ { 2, 1, 1, 3, 118, 46, },
+ { 1, 1, 1, 3, 118, 46, },
+ { 0, 1, 1, 3, 126, 46, },
+ { 2, 1, 1, 3, 126, 46, },
+ { 1, 1, 1, 3, 126, 46, },
+ { 0, 1, 1, 3, 134, 46, },
+ { 2, 1, 1, 3, 134, 46, },
+ { 1, 1, 1, 3, 134, 46, },
+ { 0, 1, 1, 3, 151, 46, },
+ { 2, 1, 1, 3, 151, 46, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 46, },
+ { 2, 1, 1, 3, 159, 46, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 46, },
+ { 2, 1, 1, 6, 38, 46, },
+ { 1, 1, 1, 6, 38, 46, },
+ { 0, 1, 1, 6, 46, 46, },
+ { 2, 1, 1, 6, 46, 46, },
+ { 1, 1, 1, 6, 46, 46, },
+ { 0, 1, 1, 6, 54, 46, },
+ { 2, 1, 1, 6, 54, 46, },
+ { 1, 1, 1, 6, 54, 46, },
+ { 0, 1, 1, 6, 62, 46, },
+ { 2, 1, 1, 6, 62, 46, },
+ { 1, 1, 1, 6, 62, 46, },
+ { 0, 1, 1, 6, 102, 46, },
+ { 2, 1, 1, 6, 102, 46, },
+ { 1, 1, 1, 6, 102, 46, },
+ { 0, 1, 1, 6, 110, 46, },
+ { 2, 1, 1, 6, 110, 46, },
+ { 1, 1, 1, 6, 110, 46, },
+ { 0, 1, 1, 6, 118, 46, },
+ { 2, 1, 1, 6, 118, 46, },
+ { 1, 1, 1, 6, 118, 46, },
+ { 0, 1, 1, 6, 126, 46, },
+ { 2, 1, 1, 6, 126, 46, },
+ { 1, 1, 1, 6, 126, 46, },
+ { 0, 1, 1, 6, 134, 46, },
+ { 2, 1, 1, 6, 134, 46, },
+ { 1, 1, 1, 6, 134, 46, },
+ { 0, 1, 1, 6, 151, 46, },
+ { 2, 1, 1, 6, 151, 46, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 46, },
+ { 2, 1, 1, 6, 159, 46, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 46, },
+ { 2, 1, 1, 7, 38, 46, },
+ { 1, 1, 1, 7, 38, 46, },
+ { 0, 1, 1, 7, 46, 46, },
+ { 2, 1, 1, 7, 46, 46, },
+ { 1, 1, 1, 7, 46, 46, },
+ { 0, 1, 1, 7, 54, 46, },
+ { 2, 1, 1, 7, 54, 46, },
+ { 1, 1, 1, 7, 54, 46, },
+ { 0, 1, 1, 7, 62, 46, },
+ { 2, 1, 1, 7, 62, 46, },
+ { 1, 1, 1, 7, 62, 46, },
+ { 0, 1, 1, 7, 102, 46, },
+ { 2, 1, 1, 7, 102, 46, },
+ { 1, 1, 1, 7, 102, 46, },
+ { 0, 1, 1, 7, 110, 46, },
+ { 2, 1, 1, 7, 110, 46, },
+ { 1, 1, 1, 7, 110, 46, },
+ { 0, 1, 1, 7, 118, 46, },
+ { 2, 1, 1, 7, 118, 46, },
+ { 1, 1, 1, 7, 118, 46, },
+ { 0, 1, 1, 7, 126, 46, },
+ { 2, 1, 1, 7, 126, 46, },
+ { 1, 1, 1, 7, 126, 46, },
+ { 0, 1, 1, 7, 134, 46, },
+ { 2, 1, 1, 7, 134, 46, },
+ { 1, 1, 1, 7, 134, 46, },
+ { 0, 1, 1, 7, 151, 46, },
+ { 2, 1, 1, 7, 151, 46, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 46, },
+ { 2, 1, 1, 7, 159, 46, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 46, },
+ { 2, 1, 2, 4, 42, 46, },
+ { 1, 1, 2, 4, 42, 46, },
+ { 0, 1, 2, 4, 58, 46, },
+ { 2, 1, 2, 4, 58, 46, },
+ { 1, 1, 2, 4, 58, 46, },
+ { 0, 1, 2, 4, 106, 46, },
+ { 2, 1, 2, 4, 106, 46, },
+ { 1, 1, 2, 4, 106, 46, },
+ { 0, 1, 2, 4, 122, 46, },
+ { 2, 1, 2, 4, 122, 46, },
+ { 1, 1, 2, 4, 122, 46, },
+ { 0, 1, 2, 4, 155, 46, },
+ { 2, 1, 2, 4, 155, 46, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 46, },
+ { 2, 1, 2, 5, 42, 46, },
+ { 1, 1, 2, 5, 42, 46, },
+ { 0, 1, 2, 5, 58, 46, },
+ { 2, 1, 2, 5, 58, 46, },
+ { 1, 1, 2, 5, 58, 46, },
+ { 0, 1, 2, 5, 106, 46, },
+ { 2, 1, 2, 5, 106, 46, },
+ { 1, 1, 2, 5, 106, 46, },
+ { 0, 1, 2, 5, 122, 46, },
+ { 2, 1, 2, 5, 122, 46, },
+ { 1, 1, 2, 5, 122, 46, },
+ { 0, 1, 2, 5, 155, 46, },
+ { 2, 1, 2, 5, 155, 46, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 46, },
+ { 2, 1, 2, 8, 42, 46, },
+ { 1, 1, 2, 8, 42, 46, },
+ { 0, 1, 2, 8, 58, 46, },
+ { 2, 1, 2, 8, 58, 46, },
+ { 1, 1, 2, 8, 58, 46, },
+ { 0, 1, 2, 8, 106, 46, },
+ { 2, 1, 2, 8, 106, 46, },
+ { 1, 1, 2, 8, 106, 46, },
+ { 0, 1, 2, 8, 122, 46, },
+ { 2, 1, 2, 8, 122, 46, },
+ { 1, 1, 2, 8, 122, 46, },
+ { 0, 1, 2, 8, 155, 46, },
+ { 2, 1, 2, 8, 155, 46, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 46, },
+ { 2, 1, 2, 9, 42, 46, },
+ { 1, 1, 2, 9, 42, 46, },
+ { 0, 1, 2, 9, 58, 46, },
+ { 2, 1, 2, 9, 58, 46, },
+ { 1, 1, 2, 9, 58, 46, },
+ { 0, 1, 2, 9, 106, 46, },
+ { 2, 1, 2, 9, 106, 46, },
+ { 1, 1, 2, 9, 106, 46, },
+ { 0, 1, 2, 9, 122, 46, },
+ { 2, 1, 2, 9, 122, 46, },
+ { 1, 1, 2, 9, 122, 46, },
+ { 0, 1, 2, 9, 155, 46, },
+ { 2, 1, 2, 9, 155, 46, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type8);
+
+static const u8
+rtw8814a_pwrtrk_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11,
+ 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 19},
+ {0, 1, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 10,
+ 11, 12, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18},
+ {0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 17, 18, 19},
+};
+
+static const u8
+rtw8814a_pwrtrk_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25, 25},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25},
+};
+
+static const u8
+rtw8814a_pwrtrk_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 10, 10,
+ 11, 12, 13, 14, 15, 15, 15, 15, 16, 16, 17, 18},
+ {0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11,
+ 12, 12, 13, 14, 15, 15, 16, 17, 17, 18, 19, 19, 20},
+ {0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 11,
+ 12, 13, 13, 14, 14, 15, 16, 17, 18, 18, 19, 20, 20},
+};
+
+static const u8
+rtw8814a_pwrtrk_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 18, 19, 20, 21, 21, 22, 23, 24, 25, 25},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 17, 18, 19, 20, 21, 22, 23, 24, 24, 25, 25, 25},
+ {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 25},
+};
+
+static const u8
+rtw8814a_pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17, 17},
+ {0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11,
+ 12, 12, 13, 14, 15, 15, 16, 17, 17, 18, 19, 19, 20},
+ {0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 11,
+ 12, 13, 13, 14, 14, 15, 16, 17, 18, 18, 19, 20, 20},
+};
+
+static const u8
+rtw8814a_pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14,
+ 15, 15, 16, 17, 18, 18, 19, 20, 21, 22, 23, 23, 24},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 18, 19, 20, 20, 21, 22, 23, 24, 25, 25},
+};
+
+static const u8
+rtw8814a_pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ 8, 9, 9, 10, 11, 11, 11, 11, 12, 12, 13, 13, 14},
+ {0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 11,
+ 12, 13, 14, 14, 15, 16, 16, 17, 18, 19, 19, 20, 21},
+ {0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 17, 18, 19},
+};
+
+static const u8
+rtw8814a_pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 11, 12, 12, 13,
+ 14, 15, 16, 16, 17, 18, 19, 20, 21, 21, 22, 23, 24},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 25, 25, 25},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25},
+};
+
+static const u8 rtw8814a_pwrtrk_2gd_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7,
+ 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2gd_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_2gc_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2gc_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13
+};
+
+static const u8 rtw8814a_pwrtrk_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
+ 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 13
+};
+
+static const u8 rtw8814a_pwrtrk_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6,
+ 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
+ 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 13
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
+ 6, 6, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7,
+ 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 7,
+ 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14
+};
+
+const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_tbl = {
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gd_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gd_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gd_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gd_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gd_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gd_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gc_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gc_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gc_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gc_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gc_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gc_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gd_n = rtw8814a_pwrtrk_2gd_n,
+ .pwrtrk_2gd_p = rtw8814a_pwrtrk_2gd_p,
+ .pwrtrk_2gc_n = rtw8814a_pwrtrk_2gc_n,
+ .pwrtrk_2gc_p = rtw8814a_pwrtrk_2gc_p,
+ .pwrtrk_2gb_n = rtw8814a_pwrtrk_2gb_n,
+ .pwrtrk_2gb_p = rtw8814a_pwrtrk_2gb_p,
+ .pwrtrk_2ga_n = rtw8814a_pwrtrk_2ga_n,
+ .pwrtrk_2ga_p = rtw8814a_pwrtrk_2ga_p,
+ .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_2g_cck_d_n,
+ .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_2g_cck_d_p,
+ .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_2g_cck_c_n,
+ .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_2g_cck_c_p,
+ .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_2g_cck_a_p,
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2gd_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2gd_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2gc_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2gc_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type0_tbl = {
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gd_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gd_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gd_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gd_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gd_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gd_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gc_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gc_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gc_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gc_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gc_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gc_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gd_n = rtw8814a_pwrtrk_type0_2gd_n,
+ .pwrtrk_2gd_p = rtw8814a_pwrtrk_type0_2gd_p,
+ .pwrtrk_2gc_n = rtw8814a_pwrtrk_type0_2gc_n,
+ .pwrtrk_2gc_p = rtw8814a_pwrtrk_type0_2gc_p,
+ .pwrtrk_2gb_n = rtw8814a_pwrtrk_type0_2gb_n,
+ .pwrtrk_2gb_p = rtw8814a_pwrtrk_type0_2gb_p,
+ .pwrtrk_2ga_n = rtw8814a_pwrtrk_type0_2ga_n,
+ .pwrtrk_2ga_p = rtw8814a_pwrtrk_type0_2ga_p,
+ .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type0_2g_cck_d_n,
+ .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type0_2g_cck_d_p,
+ .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type0_2g_cck_c_n,
+ .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type0_2g_cck_c_p,
+ .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type0_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type0_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type0_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type0_2g_cck_a_p,
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 11, 12, 12,
+ 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10, 10, 10, 10,
+ 11, 11, 12, 12, 13, 14, 15, 16, 16, 16, 16, 16, 16},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 10, 10, 11, 11, 11, 12,
+ 12, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 22, 22, 22, 22, 22},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 22, 22, 22, 22, 22},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 23, 23, 23, 23, 23, 23},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 11, 12,
+ 12, 13, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12,
+ 13, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11, 11,
+ 12, 12, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 21, 21, 21, 21, 21, 21},
+ {0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 20, 20, 21, 21, 21, 21, 21},
+ {0, 1, 2, 3, 3, 4, 5, 6, 7, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 21, 21, 21, 21, 21, 21},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 11, 12, 13,
+ 13, 13, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12,
+ 13, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11, 11,
+ 12, 12, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 21, 21, 21, 21, 21, 21, 21},
+ {0, 0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 10, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 20, 20, 20, 20, 20, 20},
+ {0, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 21, 21, 21, 21, 21, 21},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 5, 5, 6, 7, 7, 8, 9, 10, 10, 11, 11, 11,
+ 12, 13, 13, 13, 13, 14, 15, 15, 15, 15, 15, 15, 15},
+ {0, 1, 2, 3, 3, 4, 5, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12,
+ 12, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11, 11, 11, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14,
+ 15, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 20},
+ {0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14,
+ 15, 16, 16, 17, 18, 19, 20, 20, 20, 20, 20, 20, 20},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11, 12, 13, 14, 15,
+ 15, 16, 17, 18, 19, 19, 20, 20, 20, 20, 20, 20, 20},
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2gd_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 7, 8, 9, 10, 11, 11, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2gd_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2gc_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2gc_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 7, 8, 8, 9, 10, 10,
+ 11, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 9,
+ 10, 11, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10,
+ 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 8,
+ 9, 9, 9, 9, 9, 9, 10, 10, 11, 11, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 10, 11, 12, 12, 13, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 6, 6, 6, 7, 8, 9, 9, 10,
+ 10, 11, 11, 11, 12, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10,
+ 10, 10, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 7, 8, 9, 9, 9, 9,
+ 10, 10, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10,
+ 11, 11, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13
+};
+
+const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type2_tbl = {
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gd_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gd_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gd_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gd_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gd_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gd_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gc_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gc_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gc_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gc_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gc_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gc_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gd_n = rtw8814a_pwrtrk_type2_2gd_n,
+ .pwrtrk_2gd_p = rtw8814a_pwrtrk_type2_2gd_p,
+ .pwrtrk_2gc_n = rtw8814a_pwrtrk_type2_2gc_n,
+ .pwrtrk_2gc_p = rtw8814a_pwrtrk_type2_2gc_p,
+ .pwrtrk_2gb_n = rtw8814a_pwrtrk_type2_2gb_n,
+ .pwrtrk_2gb_p = rtw8814a_pwrtrk_type2_2gb_p,
+ .pwrtrk_2ga_n = rtw8814a_pwrtrk_type2_2ga_n,
+ .pwrtrk_2ga_p = rtw8814a_pwrtrk_type2_2ga_p,
+ .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type2_2g_cck_d_n,
+ .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type2_2g_cck_d_p,
+ .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type2_2g_cck_c_n,
+ .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type2_2g_cck_c_p,
+ .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type2_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type2_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type2_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type2_2g_cck_a_p,
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 3, 3, 3, 4, 6, 6, 7, 7, 8, 9, 10, 11, 12, 13, 13, 14,
+ 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 4, 5, 6, 7, 7, 8, 7, 8, 10, 11, 12, 12, 13, 13, 14, 14,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 5, 5, 6, 7, 8, 9, 9, 10, 11, 12, 13, 13, 14, 15, 15, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 9, 10, 10, 11, 12,
+ 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 0, 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 11, 12,
+ 12, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 0, 1, 1, 2, 2, 3, 5, 7, 8, 9, 10, 11, 12, 13, 13,
+ 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 2, 3, 3, 4, 6, 7, 7, 8, 9, 9, 9, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12},
+ {0, 1, 2, 3, 3, 7, 7, 8, 8, 9, 11, 12, 12, 13, 14, 14, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 0, 1, 2, 3, 4, 5, 7, 8, 8, 10, 11, 12, 12, 13, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 0, 1, 1, 2, 2, 3, 4, 5, 6, 6, 7, 8, 9, 11, 11,
+ 11, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13, 13, 13},
+ {0, 0, 1, 2, 3, 3, 5, 5, 6, 8, 8, 9, 10, 11, 13, 13, 13,
+ 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 2, 3, 4, 4, 5, 7, 8, 9, 9, 10, 11, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 2, 2, 2, 3, 4, 5, 6, 7, 9, 10, 10, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12},
+ {0, 1, 2, 3, 3, 7, 7, 8, 8, 9, 11, 12, 12, 13, 14, 14, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 0, 1, 2, 3, 4, 5, 7, 8, 8, 10, 11, 12, 12, 13, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 1, 2, 2, 4, 5, 6, 6, 7, 8, 9, 10, 11, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 0, 2, 3, 4, 5, 6, 8, 8, 9, 9, 11, 12, 13, 13, 13,
+ 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 0, 1, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 0, 1, 2, 3, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12},
+ {0, 2, 3, 4, 5, 7, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 17,
+ 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18},
+ {0, 1, 2, 3, 3, 4, 6, 7, 8, 8, 10, 11, 11, 12, 13, 13, 13,
+ 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 3, 3, 3, 5, 5, 6, 6, 8, 8, 9, 10, 11, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 8, 9, 11, 12, 13, 14, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 0, 1, 3, 3, 4, 5, 5, 6, 7, 7, 8, 10, 10, 11, 11, 11,
+ 11, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13},
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2gd_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 7, 8, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2gd_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2gc_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2gc_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 8, 8, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 10, 10, 10,
+ 10, 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 8, 9, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 9, 10, 10, 10,
+ 10, 11, 11, 11, 11, 111, 12, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 7, 8, 8, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 8, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 11, 11, 10, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 8,
+ 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 8, 8, 9, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 8,
+ 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 2, 3, 4, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 8,
+ 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 9, 9, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 9, 9, 9,
+ 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type5_tbl = {
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gd_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gd_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gd_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gd_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gd_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gd_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gc_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gc_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gc_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gc_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gc_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gc_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gd_n = rtw8814a_pwrtrk_type5_2gd_n,
+ .pwrtrk_2gd_p = rtw8814a_pwrtrk_type5_2gd_p,
+ .pwrtrk_2gc_n = rtw8814a_pwrtrk_type5_2gc_n,
+ .pwrtrk_2gc_p = rtw8814a_pwrtrk_type5_2gc_p,
+ .pwrtrk_2gb_n = rtw8814a_pwrtrk_type5_2gb_n,
+ .pwrtrk_2gb_p = rtw8814a_pwrtrk_type5_2gb_p,
+ .pwrtrk_2ga_n = rtw8814a_pwrtrk_type5_2ga_n,
+ .pwrtrk_2ga_p = rtw8814a_pwrtrk_type5_2ga_p,
+ .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type5_2g_cck_d_n,
+ .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type5_2g_cck_d_p,
+ .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type5_2g_cck_c_n,
+ .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type5_2g_cck_c_p,
+ .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type5_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type5_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type5_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type5_2g_cck_a_p,
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2gd_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2gd_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2gc_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2gc_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type7_tbl = {
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gd_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gd_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gd_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gd_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gd_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gd_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gc_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gc_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gc_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gc_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gc_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gc_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gd_n = rtw8814a_pwrtrk_type7_2gd_n,
+ .pwrtrk_2gd_p = rtw8814a_pwrtrk_type7_2gd_p,
+ .pwrtrk_2gc_n = rtw8814a_pwrtrk_type7_2gc_n,
+ .pwrtrk_2gc_p = rtw8814a_pwrtrk_type7_2gc_p,
+ .pwrtrk_2gb_n = rtw8814a_pwrtrk_type7_2gb_n,
+ .pwrtrk_2gb_p = rtw8814a_pwrtrk_type7_2gb_p,
+ .pwrtrk_2ga_n = rtw8814a_pwrtrk_type7_2ga_n,
+ .pwrtrk_2ga_p = rtw8814a_pwrtrk_type7_2ga_p,
+ .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type7_2g_cck_d_n,
+ .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type7_2g_cck_d_p,
+ .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type7_2g_cck_c_n,
+ .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type7_2g_cck_c_p,
+ .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type7_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type7_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type7_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type7_2g_cck_a_p,
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 4, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11, 12,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 1, 2, 3, 4, 4, 6, 7, 7, 8, 9, 9, 10, 10, 11, 11, 12,
+ 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8, 9, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 14,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 0, 1, 2, 3, 5, 5, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 4, 4, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12,
+ 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 9, 10, 10, 10, 11, 11,
+ 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11,
+ 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 3, 4, 4, 5, 6, 7, 7, 8, 10, 11, 12, 13, 14,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 0, 1, 2, 4, 5, 5, 6, 6, 7, 7, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 9, 10, 10, 10, 11, 11,
+ 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11,
+ 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 10, 11, 11, 13, 13,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 10, 10, 11, 13, 13,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10, 10, 11, 11,
+ 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 10, 11, 12,
+ 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 7, 7, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 13,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 2, 3, 3, 4, 4, 6, 7, 7, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2gd_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 7, 8, 9, 10, 11, 11, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2gd_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2gc_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2gc_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 7, 8, 8, 9, 10, 10,
+ 11, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 9,
+ 10, 11, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10,
+ 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 8,
+ 9, 9, 9, 9, 9, 9, 10, 10, 11, 11, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 10, 11, 12, 12, 13, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 6, 6, 6, 7, 8, 9, 9, 10,
+ 10, 11, 11, 11, 12, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10,
+ 10, 10, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 7, 8, 9, 9, 9, 9,
+ 10, 10, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10,
+ 11, 11, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13
+};
+
+const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type8_tbl = {
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gd_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gd_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gd_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gd_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gd_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gd_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gc_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gc_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gc_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gc_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gc_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gc_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gd_n = rtw8814a_pwrtrk_type8_2gd_n,
+ .pwrtrk_2gd_p = rtw8814a_pwrtrk_type8_2gd_p,
+ .pwrtrk_2gc_n = rtw8814a_pwrtrk_type8_2gc_n,
+ .pwrtrk_2gc_p = rtw8814a_pwrtrk_type8_2gc_p,
+ .pwrtrk_2gb_n = rtw8814a_pwrtrk_type8_2gb_n,
+ .pwrtrk_2gb_p = rtw8814a_pwrtrk_type8_2gb_p,
+ .pwrtrk_2ga_n = rtw8814a_pwrtrk_type8_2ga_n,
+ .pwrtrk_2ga_p = rtw8814a_pwrtrk_type8_2ga_p,
+ .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type8_2g_cck_d_n,
+ .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type8_2g_cck_d_p,
+ .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type8_2g_cck_c_n,
+ .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type8_2g_cck_c_p,
+ .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type8_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type8_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type8_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type8_2g_cck_a_p,
+};
+
+static const struct rtw_pwr_seq_cmd init_power_on_8814a[] = {
+ {0x10c2,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8814a[] = {
+ {0x0012,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x0015,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0015,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), 0},
+ {0x0023,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0x0046,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0062,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ {0x0301,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x0071,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8814a[] = {
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ {0x00F0,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x0081,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0x30, 0x20},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(0), 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8814a[] = {
+ {0x0c00,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x04},
+ {0x0e00,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x04},
+ {0x1002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_US},
+ {0x1002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x001F,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x0007,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x28},
+ {0x0008,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0x02, 0},
+ {0x0066,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x0041,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0x0042,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x004e,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0x0041,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8814a[] = {
+ {0x0003,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0080,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x01},
+ {0x0081,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x30},
+ {0x0045,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0046,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0xff},
+ {0x0047,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x0015,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x0015,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0x0012,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), 0},
+ {0x0023,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
+ {0x0008,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0007,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x20},
+ {0x001f,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0021,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0076,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0091,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xA0, 0xA0},
+ {0x0070,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+const struct rtw_pwr_seq_cmd * const card_enable_flow_8814a[] = {
+ init_power_on_8814a,
+ trans_carddis_to_cardemu_8814a,
+ trans_cardemu_to_act_8814a,
+ NULL
+};
+
+const struct rtw_pwr_seq_cmd * const card_disable_flow_8814a[] = {
+ trans_act_to_cardemu_8814a,
+ trans_cardemu_to_carddis_8814a,
+ NULL
+};
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a_table.h b/drivers/net/wireless/realtek/rtw88/rtw8814a_table.h
new file mode 100644
index 000000000000..69fb87e36c48
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814a_table.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#ifndef __RTW8814A_TABLE_H__
+#define __RTW8814A_TABLE_H__
+
+extern const struct rtw_table rtw8814a_mac_tbl;
+extern const struct rtw_table rtw8814a_agc_tbl;
+extern const struct rtw_table rtw8814a_bb_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type0_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type2_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type3_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type4_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type5_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type7_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type8_tbl;
+extern const struct rtw_table rtw8814a_rf_a_tbl;
+extern const struct rtw_table rtw8814a_rf_b_tbl;
+extern const struct rtw_table rtw8814a_rf_c_tbl;
+extern const struct rtw_table rtw8814a_rf_d_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type0_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type1_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type2_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type3_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type5_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type7_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type8_tbl;
+extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_tbl;
+extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type0_tbl;
+extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type2_tbl;
+extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type5_tbl;
+extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type7_tbl;
+extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type8_tbl;
+extern const struct rtw_pwr_seq_cmd * const card_disable_flow_8814a[];
+extern const struct rtw_pwr_seq_cmd * const card_enable_flow_8814a[];
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814ae.c b/drivers/net/wireless/realtek/rtw88/rtw8814ae.c
new file mode 100644
index 000000000000..54d2e20a7764
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814ae.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "pci.h"
+#include "rtw8814a.h"
+
+static const struct pci_device_id rtw_8814ae_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8813),
+ .driver_data = (kernel_ulong_t)&rtw8814a_hw_spec
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, rtw_8814ae_id_table);
+
+static struct pci_driver rtw_8814ae_driver = {
+ .name = "rtw_8814ae",
+ .id_table = rtw_8814ae_id_table,
+ .probe = rtw_pci_probe,
+ .remove = rtw_pci_remove,
+ .driver.pm = &rtw_pm_ops,
+ .shutdown = rtw_pci_shutdown,
+};
+module_pci_driver(rtw_8814ae_driver);
+
+MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless 8814ae driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814au.c b/drivers/net/wireless/realtek/rtw88/rtw8814au.c
new file mode 100644
index 000000000000..afe045fb84de
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814au.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include "main.h"
+#include "rtw8814a.h"
+#include "usb.h"
+
+static const struct usb_device_id rtw_8814au_id_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8813, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400d, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9054, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x1817, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x1852, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x1853, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0026, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x809a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x809b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0106, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa834, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa833, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ {},
+};
+MODULE_DEVICE_TABLE(usb, rtw_8814au_id_table);
+
+static struct usb_driver rtw_8814au_driver = {
+ .name = "rtw_8814au",
+ .id_table = rtw_8814au_id_table,
+ .probe = rtw_usb_probe,
+ .disconnect = rtw_usb_disconnect,
+};
+module_usb_driver(rtw_8814au_driver);
+
+MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless 8814au driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index eb7e34c545d0..0ade7f11cbd2 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -680,11 +680,11 @@ static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
}
static void
-rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path,
+ u8 rs, u32 *phy_pwr_idx)
{
struct rtw_hal *hal = &rtwdev->hal;
static const u32 offset_txagc[2] = {0x1d00, 0x1d80};
- static u32 phy_pwr_idx;
u8 rate, rate_idx, pwr_index, shift;
int j;
@@ -692,12 +692,12 @@ rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
rate = rtw_rate_section[rs][j];
pwr_index = hal->tx_pwr_tbl[path][rate];
shift = rate & 0x3;
- phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
+ *phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
if (shift == 0x3 || rate == DESC_RATEVHT1SS_MCS9) {
rate_idx = rate & 0xfc;
rtw_write32(rtwdev, offset_txagc[path] + rate_idx,
- phy_pwr_idx);
- phy_pwr_idx = 0;
+ *phy_pwr_idx);
+ *phy_pwr_idx = 0;
}
}
}
@@ -705,14 +705,16 @@ rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
static void rtw8821c_set_tx_power_index(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
+ u32 phy_pwr_idx = 0;
int rs, path;
for (path = 0; path < hal->rf_path_num; path++) {
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) {
+ for (rs = 0; rs <= __RTW_RATE_SECTION_2SS_MAX; rs++) {
if (rs == RTW_RATE_SECTION_HT_2S ||
rs == RTW_RATE_SECTION_VHT_2S)
continue;
- rtw8821c_set_tx_power_index_by_rate(rtwdev, path, rs);
+ rtw8821c_set_tx_power_index_by_rate(rtwdev, path, rs,
+ &phy_pwr_idx);
}
}
}
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 7f03903ddf4b..b4934da88e33 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -935,11 +935,11 @@ static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
}
static void
-rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path,
+ u8 rs, u32 *phy_pwr_idx)
{
struct rtw_hal *hal = &rtwdev->hal;
static const u32 offset_txagc[2] = {0x1d00, 0x1d80};
- static u32 phy_pwr_idx;
u8 rate, rate_idx, pwr_index, shift;
int j;
@@ -947,12 +947,12 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
rate = rtw_rate_section[rs][j];
pwr_index = hal->tx_pwr_tbl[path][rate];
shift = rate & 0x3;
- phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
+ *phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
if (shift == 0x3) {
rate_idx = rate & 0xfc;
rtw_write32(rtwdev, offset_txagc[path] + rate_idx,
- phy_pwr_idx);
- phy_pwr_idx = 0;
+ *phy_pwr_idx);
+ *phy_pwr_idx = 0;
}
}
}
@@ -960,11 +960,13 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
static void rtw8822b_set_tx_power_index(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
+ u32 phy_pwr_idx = 0;
int rs, path;
for (path = 0; path < hal->rf_path_num; path++) {
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
- rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs);
+ for (rs = 0; rs <= __RTW_RATE_SECTION_2SS_MAX; rs++)
+ rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs,
+ &phy_pwr_idx);
}
}
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
index 8883300fc6ad..572d1f31832e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
@@ -73,6 +73,10 @@ static const struct usb_device_id rtw_8822bu_id_table[] = {
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* ELECOM WDB-867DU3S */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0107, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30H */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x010a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30N */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3322, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* D-Link DWA-T185 rev. A1 */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8822bu_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index ec362a817f5f..5e53e0db177e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -2746,7 +2746,7 @@ static void rtw8822c_set_tx_power_index(struct rtw_dev *rtwdev)
s8 diff_idx[4];
rtw8822c_set_write_tx_power_ref(rtwdev, pwr_ref_cck, pwr_ref_ofdm);
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) {
+ for (rs = 0; rs <= __RTW_RATE_SECTION_2SS_MAX; rs++) {
for (j = 0; j < rtw_rate_size[rs]; j++) {
rate = rtw_rate_section[rs][j];
pwr_a = hal->tx_pwr_tbl[RF_PATH_A][rate];
diff --git a/drivers/net/wireless/realtek/rtw88/rtw88xxa.c b/drivers/net/wireless/realtek/rtw88/rtw88xxa.c
index 71e61b9c0bec..0fa943271fb6 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw88xxa.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw88xxa.c
@@ -1637,7 +1637,7 @@ void rtw88xxa_set_tx_power_index(struct rtw_dev *rtwdev)
int rs, path;
for (path = 0; path < hal->rf_path_num; path++) {
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) {
+ for (rs = 0; rs <= __RTW_RATE_SECTION_2SS_MAX; rs++) {
if (hal->rf_path_num == 1 &&
(rs == RTW_RATE_SECTION_HT_2S ||
rs == RTW_RATE_SECTION_VHT_2S))
diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c
index 90fc8a5fa89e..8b0afaaffaa0 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.c
+++ b/drivers/net/wireless/realtek/rtw88/rx.c
@@ -73,6 +73,12 @@ static void rtw_rx_phy_stat(struct rtw_dev *rtwdev,
rate_ss_evm = 2;
evm_id = RTW_EVM_2SS_A;
break;
+ case DESC_RATEMCS16...DESC_RATEMCS23:
+ case DESC_RATEVHT3SS_MCS0...DESC_RATEVHT3SS_MCS9:
+ rate_ss = 3;
+ rate_ss_evm = 3;
+ evm_id = RTW_EVM_3SS_A;
+ break;
default:
rtw_warn(rtwdev, "unknown pkt rate = %d\n", pkt_stat->rate);
return;
diff --git a/drivers/net/wireless/realtek/rtw88/sar.c b/drivers/net/wireless/realtek/rtw88/sar.c
index c472f1502b82..50b9c2412bb1 100644
--- a/drivers/net/wireless/realtek/rtw88/sar.c
+++ b/drivers/net/wireless/realtek/rtw88/sar.c
@@ -97,7 +97,7 @@ int rtw_set_sar_specs(struct rtw_dev *rtwdev,
power, BIT(RTW_COMMON_SAR_FCT));
for (j = 0; j < RTW_RF_PATH_MAX; j++) {
- for (k = 0; k < RTW_RATE_SECTION_MAX; k++) {
+ for (k = 0; k < RTW_RATE_SECTION_NUM; k++) {
arg = (struct rtw_sar_arg){
.sar_band = idx,
.path = j,
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index e024061bdbf7..6209a49312f1 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -1147,7 +1147,7 @@ static void rtw_sdio_declaim(struct rtw_dev *rtwdev,
sdio_release_host(sdio_func);
}
-static struct rtw_hci_ops rtw_sdio_ops = {
+static const struct rtw_hci_ops rtw_sdio_ops = {
.tx_write = rtw_sdio_tx_write,
.tx_kick_off = rtw_sdio_tx_kick_off,
.setup = rtw_sdio_setup,
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index c4908db4ff0e..c8092fa0d9f1 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -881,7 +881,7 @@ static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
}
}
-static struct rtw_hci_ops rtw_usb_ops = {
+static const struct rtw_hci_ops rtw_usb_ops = {
.tx_write = rtw_usb_tx_write,
.tx_kick_off = rtw_usb_tx_kick_off,
.setup = rtw_usb_setup,
diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c
index e222d3c01a77..66819f694405 100644
--- a/drivers/net/wireless/realtek/rtw88/util.c
+++ b/drivers/net/wireless/realtek/rtw88/util.c
@@ -101,7 +101,8 @@ void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
*nss = 4;
*mcs = rate - DESC_RATEVHT4SS_MCS0;
} else if (rate >= DESC_RATEMCS0 &&
- rate <= DESC_RATEMCS15) {
+ rate <= DESC_RATEMCS31) {
+ *nss = 0;
*mcs = rate - DESC_RATEMCS0;
}
}
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index b1c86cdd9c0e..205d7ecca7d7 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -123,7 +123,7 @@ config RTW89_DEBUGMSG
config RTW89_DEBUGFS
bool "Realtek rtw89 debugfs support"
- depends on RTW89_CORE
+ depends on RTW89_CORE && CFG80211_DEBUGFS
select RTW89_DEBUG
help
Enable debugfs support
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 8fa1e6c1ce13..eca3d767ff60 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -476,6 +476,12 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
case WLAN_CIPHER_SUITE_WEP104:
hw_key_type = RTW89_SEC_KEY_TYPE_WEP104;
break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (!chip->hw_tkip_crypto)
+ return -EOPNOTSUPP;
+ hw_key_type = RTW89_SEC_KEY_TYPE_TKIP;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ break;
case WLAN_CIPHER_SUITE_CCMP:
hw_key_type = RTW89_SEC_KEY_TYPE_CCMP128;
if (!chip->hw_mgmt_tx_encrypt)
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index 4df4e04c3e67..f60e93870b09 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -8,6 +8,7 @@
#include "fw.h"
#include "mac.h"
#include "ps.h"
+#include "sar.h"
#include "util.h"
static void rtw89_swap_chanctx(struct rtw89_dev *rtwdev,
@@ -155,7 +156,7 @@ int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev,
int ret;
u8 idx;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_CHANCTX) {
chan = rtw89_chan_get(rtwdev, idx);
@@ -310,7 +311,7 @@ const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
enum rtw89_entity_mode mode;
u8 role_index;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (unlikely(link_index >= __RTW89_MLD_MAX_LINK_NUM)) {
WARN(1, "link index %u is invalid (max link inst num: %d)\n",
@@ -366,7 +367,7 @@ static void rtw89_entity_recalc_mgnt_roles(struct rtw89_dev *rtwdev)
u8 pos = 0;
int i, j;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
for (i = 0; i < RTW89_MAX_INTERFACE_NUM; i++)
mgnt->active_roles[i] = NULL;
@@ -427,7 +428,7 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
struct rtw89_chan chan;
u8 idx;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
bitmap_copy(recalc_map, hal->entity_map, NUM_OF_RTW89_CHANCTX);
@@ -2390,7 +2391,7 @@ static void rtw89_mcc_update_limit(struct rtw89_dev *rtwdev)
rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_upd_lmt_iterator, NULL);
}
-void rtw89_chanctx_work(struct work_struct *work)
+void rtw89_chanctx_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
chanctx_work.work);
@@ -2401,12 +2402,10 @@ void rtw89_chanctx_work(struct work_struct *work)
int ret;
int i;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
- if (hal->entity_pause) {
- mutex_unlock(&rtwdev->mutex);
+ if (hal->entity_pause)
return;
- }
for (i = 0; i < NUM_OF_RTW89_CHANCTX_CHANGES; i++) {
if (test_and_clear_bit(i, hal->changes))
@@ -2445,8 +2444,6 @@ void rtw89_chanctx_work(struct work_struct *work)
default:
break;
}
-
- mutex_unlock(&rtwdev->mutex);
}
void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
@@ -2477,8 +2474,8 @@ void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"queue chanctx work for mode %d with delay %d us\n",
mode, delay);
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->chanctx_work,
- usecs_to_jiffies(delay));
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->chanctx_work,
+ usecs_to_jiffies(delay));
}
void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev)
@@ -2491,7 +2488,7 @@ void rtw89_chanctx_track(struct rtw89_dev *rtwdev)
struct rtw89_hal *hal = &rtwdev->hal;
enum rtw89_entity_mode mode;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (hal->entity_pause)
return;
@@ -2512,7 +2509,7 @@ void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
struct rtw89_hal *hal = &rtwdev->hal;
enum rtw89_entity_mode mode;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (hal->entity_pause)
return;
@@ -2555,7 +2552,7 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
enum rtw89_entity_mode mode;
int ret;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (unlikely(!hal->entity_pause)) {
rtw89_chanctx_proceed_cb(rtwdev, cb_parm);
@@ -2677,6 +2674,7 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
struct rtw89_entity_weight w = {};
+ int ret;
rtwvif_link->chanctx_idx = cfg->idx;
rtwvif_link->chanctx_assigned = true;
@@ -2696,7 +2694,13 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
rtw89_swap_chanctx(rtwdev, cfg->idx, RTW89_CHANCTX_0);
out:
- return rtw89_set_channel(rtwdev);
+ ret = rtw89_set_channel(rtwdev);
+ if (ret)
+ return ret;
+
+ rtw89_tas_reset(rtwdev, true);
+
+ return 0;
}
void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index 092a6f676894..e6391f6f2aa7 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -99,7 +99,7 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
const struct cfg80211_chan_def *chandef);
void rtw89_entity_init(struct rtw89_dev *rtwdev);
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev);
-void rtw89_chanctx_work(struct work_struct *work);
+void rtw89_chanctx_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev);
void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_changes change);
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index 68316d44b204..5ccf0cbaed2f 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -10,7 +10,7 @@
#include "ps.h"
#include "reg.h"
-#define RTW89_COEX_VERSION 0x07000113
+#define RTW89_COEX_VERSION 0x07000413
#define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/
#define BTC_E2G_LIMIT_DEF 80
@@ -89,10 +89,10 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
[CXST_B4] = __DEF_FBTC_SLOT(50, 0xe5555555, SLOT_MIX),
[CXST_LK] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_ISO),
[CXST_BLK] = __DEF_FBTC_SLOT(500, 0x55555555, SLOT_MIX),
- [CXST_E2G] = __DEF_FBTC_SLOT(0, 0xea5a5a5a, SLOT_MIX),
- [CXST_E5G] = __DEF_FBTC_SLOT(0, 0xffffffff, SLOT_ISO),
+ [CXST_E2G] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_MIX),
+ [CXST_E5G] = __DEF_FBTC_SLOT(5, 0xffffffff, SLOT_ISO),
[CXST_EBT] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX),
- [CXST_ENULL] = __DEF_FBTC_SLOT(0, 0xaaaaaaaa, SLOT_ISO),
+ [CXST_ENULL] = __DEF_FBTC_SLOT(5, 0xaaaaaaaa, SLOT_ISO),
[CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
[CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO),
[CXST_B1FDD] = __DEF_FBTC_SLOT(50, 0xffffdfff, SLOT_ISO),
@@ -132,6 +132,14 @@ static const u32 cxtbl[] = {
static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
/* firmware version must be in decreasing order for each chip */
+ {RTL8852BT, RTW89_FW_VER_CODE(0, 29, 122, 0),
+ .fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
+ .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
+ .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
+ .fwlrole = 7, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7,
+ .fwevntrptl = 1, .fwc2hfunc = 2, .drvinfo_type = 1, .info_buf = 1800,
+ .max_role_num = 6,
+ },
{RTL8852BT, RTW89_FW_VER_CODE(0, 29, 90, 0),
.fcxbtcrpt = 7, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
.fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
@@ -1372,11 +1380,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
} else if (ver->fcxbtcrpt == 8) {
pfinfo = &pfwinfo->rpt_ctrl.finfo.v8;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v8);
- break;
} else if (ver->fcxbtcrpt == 7) {
pfinfo = &pfwinfo->rpt_ctrl.finfo.v7;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v7);
- break;
} else {
goto err;
}
@@ -1534,6 +1540,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
} else if (ver->fcxbtafh == 2) {
pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo.v2);
+ } else if (ver->fcxbtafh == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo.v7);
} else {
goto err;
}
@@ -1602,7 +1611,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
wl->ver_info.fw = le32_to_cpu(prpt->v4.wl_fw_info.fw_ver);
dm->wl_fw_cx_offload = !!le32_to_cpu(prpt->v4.wl_fw_info.cx_offload);
- for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++)
memcpy(&dm->gnt.band[i], &prpt->v4.gnt_val[i],
sizeof(dm->gnt.band[i]));
@@ -1634,7 +1643,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
wl->ver_info.fw = le32_to_cpu(prpt->v5.rpt_info.fw_ver);
dm->wl_fw_cx_offload = 0;
- for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++)
memcpy(&dm->gnt.band[i], &prpt->v5.gnt_val[i][0],
sizeof(dm->gnt.band[i]));
@@ -1661,7 +1670,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
wl->ver_info.fw = le32_to_cpu(prpt->v105.rpt_info.fw_ver);
dm->wl_fw_cx_offload = 0;
- for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++)
memcpy(&dm->gnt.band[i], &prpt->v105.gnt_val[i][0],
sizeof(dm->gnt.band[i]));
@@ -1687,7 +1696,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
wl->ver_info.fw_coex = le32_to_cpu(prpt->v7.rpt_info.cx_ver);
wl->ver_info.fw = le32_to_cpu(prpt->v7.rpt_info.fw_ver);
- for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++)
memcpy(&dm->gnt.band[i], &prpt->v7.gnt_val[i][0],
sizeof(dm->gnt.band[i]));
@@ -1719,7 +1728,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
wl->ver_info.fw_coex = le32_to_cpu(prpt->v8.rpt_info.cx_ver);
wl->ver_info.fw = le32_to_cpu(prpt->v8.rpt_info.fw_ver);
- for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++)
memcpy(&dm->gnt.band[i], &prpt->v8.gnt_val[i][0],
sizeof(dm->gnt.band[i]));
@@ -2706,7 +2715,7 @@ static void _set_gnt(struct rtw89_dev *rtwdev, u8 phy_map, u8 wl_state, u8 bt_st
if (phy_map > BTC_PHY_ALL)
return;
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
if (!(phy_map & BIT(i)))
continue;
@@ -2755,7 +2764,7 @@ static void _set_gnt_v1(struct rtw89_dev *rtwdev, u8 phy_map,
if (phy_map > BTC_PHY_ALL)
return;
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
if (!(phy_map & BIT(i)))
continue;
@@ -2955,7 +2964,7 @@ static void _set_rf_trx_para(struct rtw89_dev *rtwdev)
if (ver->fwlrole == 0) {
link_mode = wl->role_info.link_mode;
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
if (wl->dbcc_info.real_band[i] == RTW89_BAND_2G)
dbcc_2g_phy = i;
}
@@ -4240,7 +4249,7 @@ static void _set_ant_v0(struct rtw89_dev *rtwdev, bool force_exec,
case BTC_ANT_W2G:
rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
if (rtwdev->dbcc_en) {
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
b2g = (wl_dinfo->real_band[i] == RTW89_BAND_2G);
gnt_wl_ctrl = b2g ? BTC_GNT_HW : BTC_GNT_SW_HI;
@@ -4583,23 +4592,16 @@ static void _action_bt_hid(struct rtw89_dev *rtwdev)
static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
- struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc;
struct rtw89_btc_dm *dm = &btc->dm;
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ dm->slot_dur[CXST_W1] = 20;
+ dm->slot_dur[CXST_B1] = BTC_B1_MAX;
+
switch (btc->cx.state_map) {
case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP */
- if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
- dm->slot_dur[CXST_W1] = 40;
- dm->slot_dur[CXST_B1] = 200;
- _set_policy(rtwdev,
- BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP);
- } else {
- _set_policy(rtwdev,
- BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP);
- }
+ _set_policy(rtwdev, BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP);
break;
case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP */
_set_policy(rtwdev, BTC_CXP_PAUTO2_TD3050, BTC_ACT_BT_A2DP);
@@ -4609,15 +4611,10 @@ static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
break;
case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP */
case BTC_WLINKING: /* wl-connecting + bt-A2DP */
- if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
- dm->slot_dur[CXST_W1] = 40;
- dm->slot_dur[CXST_B1] = 200;
- _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
- BTC_ACT_BT_A2DP);
- } else {
- _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1,
- BTC_ACT_BT_A2DP);
- }
+ if (btc->cx.wl.rfk_info.con_rfk)
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_A2DP);
+ else
+ _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1, BTC_ACT_BT_A2DP);
break;
case BTC_WIDLE: /* wl-idle + bt-A2DP */
_set_policy(rtwdev, BTC_CXP_AUTO_TD20B1, BTC_ACT_BT_A2DP);
@@ -4645,7 +4642,10 @@ static void _action_bt_a2dpsink(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_FIX_TD2060, BTC_ACT_BT_A2DPSINK);
break;
case BTC_WLINKING: /* wl-connecting + bt-A2dp_Sink */
- _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_A2DPSINK);
+ if (btc->cx.wl.rfk_info.con_rfk)
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_A2DPSINK);
+ else
+ _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_A2DPSINK);
break;
case BTC_WIDLE: /* wl-idle + bt-A2dp_Sink */
_set_policy(rtwdev, BTC_CXP_FIX_TD2080, BTC_ACT_BT_A2DPSINK);
@@ -4693,27 +4693,20 @@ static void _action_bt_pan(struct rtw89_dev *rtwdev)
static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
- struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc;
struct rtw89_btc_dm *dm = &btc->dm;
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ dm->slot_dur[CXST_W1] = 20;
+ dm->slot_dur[CXST_B1] = BTC_B1_MAX;
+
switch (btc->cx.state_map) {
case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP+HID */
case BTC_WIDLE: /* wl-idle + bt-A2DP */
- if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
- dm->slot_dur[CXST_W1] = 40;
- dm->slot_dur[CXST_B1] = 200;
- _set_policy(rtwdev,
- BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP_HID);
- } else {
- _set_policy(rtwdev,
- BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP_HID);
- }
+ _set_policy(rtwdev, BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP_HID);
break;
case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP+HID */
- _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3050, BTC_ACT_BT_A2DP_HID);
+ _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3070, BTC_ACT_BT_A2DP_HID);
break;
case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2DP+HID */
@@ -4721,15 +4714,10 @@ static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev)
break;
case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP+HID */
case BTC_WLINKING: /* wl-connecting + bt-A2DP+HID */
- if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
- dm->slot_dur[CXST_W1] = 40;
- dm->slot_dur[CXST_B1] = 200;
- _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
- BTC_ACT_BT_A2DP_HID);
- } else {
- _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1,
- BTC_ACT_BT_A2DP_HID);
- }
+ if (btc->cx.wl.rfk_info.con_rfk)
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_A2DP_HID);
+ else
+ _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1, BTC_ACT_BT_A2DP_HID);
break;
}
}
@@ -4905,9 +4893,9 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
if (rtwdev->dbcc_en) {
if (ver->fwlrole == 0) {
- wl_rinfo.dbcc_2g_phy = RTW89_PHY_MAX;
+ wl_rinfo.dbcc_2g_phy = RTW89_PHY_NUM;
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
if (wl_dinfo->real_band[i] == RTW89_BAND_2G)
wl_rinfo.dbcc_2g_phy = i;
}
@@ -5408,7 +5396,8 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
- if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
+ if (btc->cx.state_map != BTC_WLINKING &&
+ RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
_action_wl_25g_mcc(rtwdev);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], Scan offload!\n");
} else if (rtwdev->dbcc_en) {
@@ -5798,7 +5787,7 @@ static void _update_wl_info(struct rtw89_dev *rtwdev)
phy = wl_linfo[i].phy;
/* check dbcc role */
- if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) {
+ if (rtwdev->dbcc_en && phy < RTW89_PHY_NUM) {
wl_dinfo->role[phy] = wl_linfo[i].role;
wl_dinfo->op_band[phy] = wl_linfo[i].band;
_update_dbcc_band(rtwdev, phy);
@@ -5948,7 +5937,7 @@ static void _update_wl_info_v1(struct rtw89_dev *rtwdev)
phy = wl_linfo[i].phy;
- if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) {
+ if (rtwdev->dbcc_en && phy < RTW89_PHY_NUM) {
wl_dinfo->role[phy] = wl_linfo[i].role;
wl_dinfo->op_band[phy] = wl_linfo[i].band;
_update_dbcc_band(rtwdev, phy);
@@ -6098,7 +6087,7 @@ static void _update_wl_info_v2(struct rtw89_dev *rtwdev)
phy = wl_linfo[i].phy;
- if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) {
+ if (rtwdev->dbcc_en && phy < RTW89_PHY_NUM) {
wl_dinfo->role[phy] = wl_linfo[i].role;
wl_dinfo->op_band[phy] = wl_linfo[i].band;
_update_dbcc_band(rtwdev, phy);
@@ -6389,7 +6378,7 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info;
struct rtw89_btc_wl_active_role_v7 *act_role = NULL;
- u8 i, mode, cnt = 0, cnt_2g = 0, cnt_5g = 0, phy_now = RTW89_PHY_MAX, phy_dbcc;
+ u8 i, mode, cnt = 0, cnt_2g = 0, cnt_5g = 0, phy_now = RTW89_PHY_NUM, phy_dbcc;
bool b2g = false, b5g = false, client_joined = false, client_inc_2g = false;
u8 client_cnt_last[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
u8 cid_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
@@ -6400,7 +6389,7 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
memset(wl_rinfo, 0, sizeof(*wl_rinfo));
for (i = 0; i < RTW89_PORT_NUM; i++) {
- if (!wl_linfo[i].active || wl_linfo[i].phy >= RTW89_PHY_MAX)
+ if (!wl_linfo[i].active || wl_linfo[i].phy >= RTW89_PHY_NUM)
continue;
act_role = &wl_rinfo->active_role[i];
@@ -6494,7 +6483,7 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role, &dbcc_2g_phy);
/* correct 2G-located PHY band for gnt ctrl */
- if (dbcc_2g_phy < RTW89_PHY_MAX)
+ if (dbcc_2g_phy < RTW89_PHY_NUM)
wl_dinfo->op_band[dbcc_2g_phy] = RTW89_BAND_2G;
} else if (b2g && b5g && cnt == 2) {
mode = BTC_WLINK_25G_MCC;
@@ -6720,7 +6709,7 @@ static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id
_fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
}
-void rtw89_coex_act1_work(struct work_struct *work)
+void rtw89_coex_act1_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
coex_act1_work.work);
@@ -6729,7 +6718,8 @@ void rtw89_coex_act1_work(struct work_struct *work)
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &cx->wl;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__);
dm->cnt_notify[BTC_NCNT_TIMER]++;
if (wl->status.map._4way)
@@ -6738,10 +6728,9 @@ void rtw89_coex_act1_work(struct work_struct *work)
wl->status.map.connecting = false;
_run_coex(rtwdev, BTC_RSN_ACT1_WORK);
- mutex_unlock(&rtwdev->mutex);
}
-void rtw89_coex_bt_devinfo_work(struct work_struct *work)
+void rtw89_coex_bt_devinfo_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
coex_bt_devinfo_work.work);
@@ -6749,15 +6738,15 @@ void rtw89_coex_bt_devinfo_work(struct work_struct *work)
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__);
dm->cnt_notify[BTC_NCNT_TIMER]++;
a2dp->play_latency = 0;
_run_coex(rtwdev, BTC_RSN_BT_DEVINFO_WORK);
- mutex_unlock(&rtwdev->mutex);
}
-void rtw89_coex_rfk_chk_work(struct work_struct *work)
+void rtw89_coex_rfk_chk_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
coex_rfk_chk_work.work);
@@ -6766,7 +6755,8 @@ void rtw89_coex_rfk_chk_work(struct work_struct *work)
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &cx->wl;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__);
dm->cnt_notify[BTC_NCNT_TIMER]++;
if (wl->rfk_info.state != BTC_WRFK_STOP) {
@@ -6778,7 +6768,6 @@ void rtw89_coex_rfk_chk_work(struct work_struct *work)
_write_scbd(rtwdev, BTC_WSCB_WLRFK, false);
_run_coex(rtwdev, BTC_RSN_RFK_CHK_WORK);
}
- mutex_unlock(&rtwdev->mutex);
}
static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update)
@@ -6882,7 +6871,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode, igno_bt, always_freerun;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
dm->run_reason = reason;
_update_dm_step(rtwdev, reason);
@@ -7002,7 +6991,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
goto exit;
}
- if (wl->status.val & btc_scanning_map.val) {
+ if (wl->status.val & btc_scanning_map.val && !wl->rfk_info.con_rfk) {
_action_wl_scan(rtwdev);
bt->scan_rx_low_pri = true;
goto exit;
@@ -7187,7 +7176,7 @@ void rtw89_btc_ntfy_scan_start(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band)
"[BTC], %s(): phy_idx=%d, band=%d\n",
__func__, phy_idx, band);
- if (phy_idx >= RTW89_PHY_MAX)
+ if (phy_idx >= RTW89_PHY_NUM)
return;
btc->dm.cnt_notify[BTC_NCNT_SCAN_START]++;
@@ -7223,6 +7212,8 @@ void rtw89_btc_ntfy_scan_finish(struct rtw89_dev *rtwdev, u8 phy_idx)
_fw_set_drv_info(rtwdev, CXDRVINFO_DBCC);
}
+ btc->dm.tdma_instant_excute = 1;
+
_run_coex(rtwdev, BTC_RSN_NTFY_SCAN_FINISH);
}
@@ -7235,7 +7226,7 @@ void rtw89_btc_ntfy_switch_band(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band)
"[BTC], %s(): phy_idx=%d, band=%d\n",
__func__, phy_idx, band);
- if (phy_idx >= RTW89_PHY_MAX)
+ if (phy_idx >= RTW89_PHY_NUM)
return;
btc->dm.cnt_notify[BTC_NCNT_SWITCH_BAND]++;
@@ -7284,7 +7275,7 @@ void rtw89_btc_ntfy_specific_packet(struct rtw89_dev *rtwdev,
"[BTC], %s(): EAPOL_End cnt=%d\n",
__func__, cnt);
wl->status.map._4way = false;
- cancel_delayed_work(&rtwdev->coex_act1_work);
+ wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwdev->coex_act1_work);
break;
case PACKET_ARP:
cnt = ++cx->cnt_wl[BTC_WCNT_ARP];
@@ -7303,56 +7294,56 @@ void rtw89_btc_ntfy_specific_packet(struct rtw89_dev *rtwdev,
}
if (delay_work) {
- cancel_delayed_work(&rtwdev->coex_act1_work);
- ieee80211_queue_delayed_work(rtwdev->hw,
- &rtwdev->coex_act1_work, delay);
+ wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwdev->coex_act1_work);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy,
+ &rtwdev->coex_act1_work, delay);
}
btc->dm.cnt_notify[BTC_NCNT_SPECIAL_PACKET]++;
_run_coex(rtwdev, BTC_RSN_NTFY_SPECIFIC_PACKET);
}
-void rtw89_btc_ntfy_eapol_packet_work(struct work_struct *work)
+void rtw89_btc_ntfy_eapol_packet_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
btc.eapol_notify_work);
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_leave_ps_mode(rtwdev);
rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_EAPOL);
- mutex_unlock(&rtwdev->mutex);
}
-void rtw89_btc_ntfy_arp_packet_work(struct work_struct *work)
+void rtw89_btc_ntfy_arp_packet_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
btc.arp_notify_work);
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_ARP);
- mutex_unlock(&rtwdev->mutex);
}
-void rtw89_btc_ntfy_dhcp_packet_work(struct work_struct *work)
+void rtw89_btc_ntfy_dhcp_packet_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
btc.dhcp_notify_work);
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_leave_ps_mode(rtwdev);
rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_DHCP);
- mutex_unlock(&rtwdev->mutex);
}
-void rtw89_btc_ntfy_icmp_packet_work(struct work_struct *work)
+void rtw89_btc_ntfy_icmp_packet_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
btc.icmp_notify_work);
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_leave_ps_mode(rtwdev);
rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_ICMP);
- mutex_unlock(&rtwdev->mutex);
}
static u8 _update_bt_rssi_level(struct rtw89_dev *rtwdev, u8 rssi)
@@ -7531,9 +7522,9 @@ static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
a2dp->vendor_id = 0;
a2dp->flush_time = 0;
a2dp->play_latency = 1;
- ieee80211_queue_delayed_work(rtwdev->hw,
- &rtwdev->coex_bt_devinfo_work,
- RTW89_COEX_BT_DEVINFO_WORK_PERIOD);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy,
+ &rtwdev->coex_bt_devinfo_work,
+ RTW89_COEX_BT_DEVINFO_WORK_PERIOD);
}
_run_coex(rtwdev, BTC_RSN_UPDATE_BT_INFO);
@@ -7671,7 +7662,8 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev,
else
wl->status.map.connecting = 0;
- if (state == BTC_ROLE_MSTS_STA_DIS_CONN)
+ if (state == BTC_ROLE_MSTS_STA_DIS_CONN ||
+ state == BTC_ROLE_MSTS_STA_CONN_END)
wl->status.map._4way = false;
_run_coex(rtwdev, BTC_RSN_NTFY_ROLE_INFO);
@@ -7781,7 +7773,7 @@ static bool _ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_path,
wl->rfk_info.state = BTC_WRFK_STOP;
_write_scbd(rtwdev, BTC_WSCB_WLRFK, false);
- cancel_delayed_work(&rtwdev->coex_rfk_chk_work);
+ wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwdev->coex_rfk_chk_work);
break;
default:
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -7795,9 +7787,9 @@ static bool _ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_path,
_run_coex(rtwdev, BTC_RSN_NTFY_WL_RFK);
if (wl->rfk_info.state == BTC_WRFK_START)
- ieee80211_queue_delayed_work(rtwdev->hw,
- &rtwdev->coex_rfk_chk_work,
- RTW89_COEX_RFK_CHK_WORK_PERIOD);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy,
+ &rtwdev->coex_rfk_chk_work,
+ RTW89_COEX_RFK_CHK_WORK_PERIOD);
}
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -7815,6 +7807,8 @@ void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map,
bool allow;
int ret;
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
band = FIELD_GET(BTC_RFK_BAND_MAP, phy_map);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
@@ -8115,6 +8109,7 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
return;
func = rtw89_btc_c2h_get_index_by_ver(rtwdev, func);
+ pfwinfo->cnt_c2h++;
switch (func) {
case BTF_EVNT_BUF_OVERFLOW:
@@ -8156,7 +8151,7 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
#define BTC_CX_FW_OFFLOAD 0
-static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_cx_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -8168,40 +8163,43 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
u32 ver_main = 0, ver_sub = 0, ver_hotfix = 0, id_branch = 0;
u8 cv, rfe, iso, ant_num, ant_single_pos;
+ char *p = buf, *end = buf + bufsz;
if (!(dm->coex_info_map & BTC_COEX_INFO_CX))
- return;
+ return 0;
dm->cnt_notify[BTC_NCNT_SHOW_COEX_INFO]++;
- seq_printf(m, "========== [BTC COEX INFO (%d)] ==========\n",
- chip->chip_id);
+ p += scnprintf(p, end - p,
+ "========== [BTC COEX INFO (%d)] ==========\n",
+ chip->chip_id);
ver_main = FIELD_GET(GENMASK(31, 24), RTW89_COEX_VERSION);
ver_sub = FIELD_GET(GENMASK(23, 16), RTW89_COEX_VERSION);
ver_hotfix = FIELD_GET(GENMASK(15, 8), RTW89_COEX_VERSION);
id_branch = FIELD_GET(GENMASK(7, 0), RTW89_COEX_VERSION);
- seq_printf(m, " %-15s : Coex:%d.%d.%d(branch:%d), ",
- "[coex_version]", ver_main, ver_sub, ver_hotfix, id_branch);
+ p += scnprintf(p, end - p, " %-15s : Coex:%d.%d.%d(branch:%d), ",
+ "[coex_version]", ver_main, ver_sub, ver_hotfix,
+ id_branch);
ver_main = FIELD_GET(GENMASK(31, 24), wl->ver_info.fw_coex);
ver_sub = FIELD_GET(GENMASK(23, 16), wl->ver_info.fw_coex);
ver_hotfix = FIELD_GET(GENMASK(15, 8), wl->ver_info.fw_coex);
id_branch = FIELD_GET(GENMASK(7, 0), wl->ver_info.fw_coex);
- seq_printf(m, "WL_FW_coex:%d.%d.%d(branch:%d)",
- ver_main, ver_sub, ver_hotfix, id_branch);
+ p += scnprintf(p, end - p, "WL_FW_coex:%d.%d.%d(branch:%d)",
+ ver_main, ver_sub, ver_hotfix, id_branch);
ver_main = FIELD_GET(GENMASK(31, 24), chip->wlcx_desired);
ver_sub = FIELD_GET(GENMASK(23, 16), chip->wlcx_desired);
ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->wlcx_desired);
- seq_printf(m, "(%s, desired:%d.%d.%d), ",
- (wl->ver_info.fw_coex >= chip->wlcx_desired ?
- "Match" : "Mismatch"), ver_main, ver_sub, ver_hotfix);
+ p += scnprintf(p, end - p, "(%s, desired:%d.%d.%d), ",
+ (wl->ver_info.fw_coex >= chip->wlcx_desired ?
+ "Match" : "Mismatch"), ver_main, ver_sub, ver_hotfix);
- seq_printf(m, "BT_FW_coex:%d(%s, desired:%d)\n",
- bt->ver_info.fw_coex,
- (bt->ver_info.fw_coex >= chip->btcx_desired ?
- "Match" : "Mismatch"), chip->btcx_desired);
+ p += scnprintf(p, end - p, "BT_FW_coex:%d(%s, desired:%d)\n",
+ bt->ver_info.fw_coex,
+ (bt->ver_info.fw_coex >= chip->btcx_desired ?
+ "Match" : "Mismatch"), chip->btcx_desired);
if (bt->enable.now && bt->ver_info.fw == 0)
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true);
@@ -8212,10 +8210,11 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
ver_sub = FIELD_GET(GENMASK(23, 16), wl->ver_info.fw);
ver_hotfix = FIELD_GET(GENMASK(15, 8), wl->ver_info.fw);
id_branch = FIELD_GET(GENMASK(7, 0), wl->ver_info.fw);
- seq_printf(m, " %-15s : WL_FW:%d.%d.%d.%d, BT_FW:0x%x(%s)\n",
- "[sub_module]",
- ver_main, ver_sub, ver_hotfix, id_branch,
- bt->ver_info.fw, bt->run_patch_code ? "patch" : "ROM");
+ p += scnprintf(p, end - p,
+ " %-15s : WL_FW:%d.%d.%d.%d, BT_FW:0x%x(%s)\n",
+ "[sub_module]",
+ ver_main, ver_sub, ver_hotfix, id_branch,
+ bt->ver_info.fw, bt->run_patch_code ? "patch" : "ROM");
if (ver->fcxinit == 7) {
cv = md->md_v7.kt_ver;
@@ -8231,36 +8230,41 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
ant_single_pos = md->md.ant.single_pos;
}
- seq_printf(m, " %-15s : cv:%x, rfe_type:0x%x, ant_iso:%d, ant_pg:%d, %s",
- "[hw_info]", cv, rfe, iso, ant_num,
- ant_num > 1 ? "" :
- ant_single_pos ? "1Ant_Pos:S1, " : "1Ant_Pos:S0, ");
+ p += scnprintf(p, end - p,
+ " %-15s : cv:%x, rfe_type:0x%x, ant_iso:%d, ant_pg:%d, %s",
+ "[hw_info]", cv, rfe, iso, ant_num,
+ ant_num > 1 ? "" :
+ ant_single_pos ? "1Ant_Pos:S1, " : "1Ant_Pos:S0, ");
- seq_printf(m, "3rd_coex:%d, dbcc:%d, tx_num:%d, rx_num:%d\n",
- btc->cx.other.type, rtwdev->dbcc_en, hal->tx_nss,
- hal->rx_nss);
+ p += scnprintf(p, end - p,
+ "3rd_coex:%d, dbcc:%d, tx_num:%d, rx_num:%d\n",
+ btc->cx.other.type, rtwdev->dbcc_en, hal->tx_nss,
+ hal->rx_nss);
+
+ return p - buf;
}
-static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_wl_role_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_link_info *plink = NULL;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
struct rtw89_traffic_stats *t;
+ char *p = buf, *end = buf + bufsz;
u8 i;
if (rtwdev->dbcc_en) {
- seq_printf(m,
- " %-15s : PHY0_band(op:%d/scan:%d/real:%d), ",
- "[dbcc_info]", wl_dinfo->op_band[RTW89_PHY_0],
- wl_dinfo->scan_band[RTW89_PHY_0],
- wl_dinfo->real_band[RTW89_PHY_0]);
- seq_printf(m,
- "PHY1_band(op:%d/scan:%d/real:%d)\n",
- wl_dinfo->op_band[RTW89_PHY_1],
- wl_dinfo->scan_band[RTW89_PHY_1],
- wl_dinfo->real_band[RTW89_PHY_1]);
+ p += scnprintf(p, end - p,
+ " %-15s : PHY0_band(op:%d/scan:%d/real:%d), ",
+ "[dbcc_info]", wl_dinfo->op_band[RTW89_PHY_0],
+ wl_dinfo->scan_band[RTW89_PHY_0],
+ wl_dinfo->real_band[RTW89_PHY_0]);
+ p += scnprintf(p, end - p,
+ "PHY1_band(op:%d/scan:%d/real:%d)\n",
+ wl_dinfo->op_band[RTW89_PHY_1],
+ wl_dinfo->scan_band[RTW89_PHY_1],
+ wl_dinfo->real_band[RTW89_PHY_1]);
}
for (i = 0; i < RTW89_PORT_NUM; i++) {
@@ -8272,38 +8276,41 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!plink->active)
continue;
- seq_printf(m,
- " [port_%d] : role=%d(phy-%d), connect=%d(client_cnt=%d), mode=%d, center_ch=%d, bw=%d",
- plink->pid, (u32)plink->role, plink->phy,
- (u32)plink->connected, plink->client_cnt - 1,
- (u32)plink->mode, plink->ch, (u32)plink->bw);
+ p += scnprintf(p, end - p,
+ " [port_%d] : role=%d(phy-%d), connect=%d(client_cnt=%d), mode=%d, center_ch=%d, bw=%d",
+ plink->pid, (u32)plink->role, plink->phy,
+ (u32)plink->connected, plink->client_cnt - 1,
+ (u32)plink->mode, plink->ch, (u32)plink->bw);
if (plink->connected == MLME_NO_LINK)
continue;
- seq_printf(m,
- ", mac_id=%d, max_tx_time=%dus, max_tx_retry=%d\n",
- plink->mac_id, plink->tx_time, plink->tx_retry);
+ p += scnprintf(p, end - p,
+ ", mac_id=%d, max_tx_time=%dus, max_tx_retry=%d\n",
+ plink->mac_id, plink->tx_time, plink->tx_retry);
- seq_printf(m,
- " [port_%d] : rssi=-%ddBm(%d), busy=%d, dir=%s, ",
- plink->pid, 110 - plink->stat.rssi,
- plink->stat.rssi, plink->busy,
- plink->dir == RTW89_TFC_UL ? "UL" : "DL");
+ p += scnprintf(p, end - p,
+ " [port_%d] : rssi=-%ddBm(%d), busy=%d, dir=%s, ",
+ plink->pid, 110 - plink->stat.rssi,
+ plink->stat.rssi, plink->busy,
+ plink->dir == RTW89_TFC_UL ? "UL" : "DL");
t = &plink->stat.traffic;
- seq_printf(m,
- "tx[rate:%d/busy_level:%d], ",
- (u32)t->tx_rate, t->tx_tfc_lv);
+ p += scnprintf(p, end - p,
+ "tx[rate:%d/busy_level:%d], ",
+ (u32)t->tx_rate, t->tx_tfc_lv);
- seq_printf(m, "rx[rate:%d/busy_level:%d/drop:%d]\n",
- (u32)t->rx_rate,
- t->rx_tfc_lv, plink->rx_rate_drop_cnt);
+ p += scnprintf(p, end - p,
+ "rx[rate:%d/busy_level:%d/drop:%d]\n",
+ (u32)t->rx_rate,
+ t->rx_tfc_lv, plink->rx_rate_drop_cnt);
}
+
+ return p - buf;
}
-static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_wl_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
@@ -8314,12 +8321,13 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
+ char *p = buf, *end = buf + bufsz;
u8 mode;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_WL))
- return;
+ return 0;
- seq_puts(m, "========== [WL Status] ==========\n");
+ p += scnprintf(p, end - p, "========== [WL Status] ==========\n");
if (ver->fwlrole == 0)
mode = wl_rinfo->link_mode;
@@ -8332,24 +8340,28 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
- return;
+ goto out;
- seq_printf(m, " %-15s : link_mode:%d, ", "[status]", mode);
+ p += scnprintf(p, end - p, " %-15s : link_mode:%d, ", "[status]",
+ mode);
- seq_printf(m,
- "rf_off:%d, power_save:%d, scan:%s(band:%d/phy_map:0x%x), ",
- wl->status.map.rf_off, wl->status.map.lps,
- wl->status.map.scan ? "Y" : "N",
- wl->scan_info.band[RTW89_PHY_0], wl->scan_info.phy_map);
+ p += scnprintf(p, end - p,
+ "rf_off:%d, power_save:%d, scan:%s(band:%d/phy_map:0x%x), ",
+ wl->status.map.rf_off, wl->status.map.lps,
+ wl->status.map.scan ? "Y" : "N",
+ wl->scan_info.band[RTW89_PHY_0], wl->scan_info.phy_map);
- seq_printf(m,
- "connecting:%s, roam:%s, 4way:%s, init_ok:%s\n",
- wl->status.map.connecting ? "Y" : "N",
- wl->status.map.roaming ? "Y" : "N",
- wl->status.map._4way ? "Y" : "N",
- wl->status.map.init_ok ? "Y" : "N");
+ p += scnprintf(p, end - p,
+ "connecting:%s, roam:%s, 4way:%s, init_ok:%s\n",
+ wl->status.map.connecting ? "Y" : "N",
+ wl->status.map.roaming ? "Y" : "N",
+ wl->status.map._4way ? "Y" : "N",
+ wl->status.map.init_ok ? "Y" : "N");
- _show_wl_role_info(rtwdev, m);
+ p += _show_wl_role_info(rtwdev, p, end - p);
+
+out:
+ return p - buf;
}
enum btc_bt_a2dp_type {
@@ -8358,7 +8370,7 @@ enum btc_bt_a2dp_type {
BTC_A2DP_TWS_RELAY = 2,
};
-static void _show_bt_profile_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_bt_profile_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
@@ -8366,50 +8378,55 @@ static void _show_bt_profile_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_bt_hid_desc hid = bt_linfo->hid_desc;
struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc;
struct rtw89_btc_bt_pan_desc pan = bt_linfo->pan_desc;
+ char *p = buf, *end = buf + bufsz;
if (hfp.exist) {
- seq_printf(m, " %-15s : type:%s, sut_pwr:%d, golden-rx:%d",
- "[HFP]", (hfp.type == 0 ? "SCO" : "eSCO"),
- bt_linfo->sut_pwr_level[0],
- bt_linfo->golden_rx_shift[0]);
+ p += scnprintf(p, end - p,
+ " %-15s : type:%s, sut_pwr:%d, golden-rx:%d",
+ "[HFP]", (hfp.type == 0 ? "SCO" : "eSCO"),
+ bt_linfo->sut_pwr_level[0],
+ bt_linfo->golden_rx_shift[0]);
}
if (hid.exist) {
- seq_printf(m,
- "\n\r %-15s : type:%s%s%s%s%s pair-cnt:%d, sut_pwr:%d, golden-rx:%d\n",
- "[HID]",
- hid.type & BTC_HID_218 ? "2/18," : "",
- hid.type & BTC_HID_418 ? "4/18," : "",
- hid.type & BTC_HID_BLE ? "BLE," : "",
- hid.type & BTC_HID_RCU ? "RCU," : "",
- hid.type & BTC_HID_RCU_VOICE ? "RCU-Voice," : "",
- hid.pair_cnt, bt_linfo->sut_pwr_level[1],
- bt_linfo->golden_rx_shift[1]);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : type:%s%s%s%s%s pair-cnt:%d, sut_pwr:%d, golden-rx:%d\n",
+ "[HID]",
+ hid.type & BTC_HID_218 ? "2/18," : "",
+ hid.type & BTC_HID_418 ? "4/18," : "",
+ hid.type & BTC_HID_BLE ? "BLE," : "",
+ hid.type & BTC_HID_RCU ? "RCU," : "",
+ hid.type & BTC_HID_RCU_VOICE ? "RCU-Voice," : "",
+ hid.pair_cnt, bt_linfo->sut_pwr_level[1],
+ bt_linfo->golden_rx_shift[1]);
}
if (a2dp.exist) {
- seq_printf(m,
- " %-15s : type:%s, bit-pool:%d, flush-time:%d, ",
- "[A2DP]",
- a2dp.type == BTC_A2DP_LEGACY ? "Legacy" : "TWS",
- a2dp.bitpool, a2dp.flush_time);
+ p += scnprintf(p, end - p,
+ " %-15s : type:%s, bit-pool:%d, flush-time:%d, ",
+ "[A2DP]",
+ a2dp.type == BTC_A2DP_LEGACY ? "Legacy" : "TWS",
+ a2dp.bitpool, a2dp.flush_time);
- seq_printf(m,
- "vid:0x%x, Dev-name:0x%x, sut_pwr:%d, golden-rx:%d\n",
- a2dp.vendor_id, a2dp.device_name,
- bt_linfo->sut_pwr_level[2],
- bt_linfo->golden_rx_shift[2]);
+ p += scnprintf(p, end - p,
+ "vid:0x%x, Dev-name:0x%x, sut_pwr:%d, golden-rx:%d\n",
+ a2dp.vendor_id, a2dp.device_name,
+ bt_linfo->sut_pwr_level[2],
+ bt_linfo->golden_rx_shift[2]);
}
if (pan.exist) {
- seq_printf(m, " %-15s : sut_pwr:%d, golden-rx:%d\n",
- "[PAN]",
- bt_linfo->sut_pwr_level[3],
- bt_linfo->golden_rx_shift[3]);
+ p += scnprintf(p, end - p,
+ " %-15s : sut_pwr:%d, golden-rx:%d\n",
+ "[PAN]",
+ bt_linfo->sut_pwr_level[3],
+ bt_linfo->golden_rx_shift[3]);
}
+
+ return p - buf;
}
-static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_bt_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
@@ -8418,129 +8435,136 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_info *wl = &cx->wl;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
union rtw89_btc_module_info *md = &btc->mdinfo;
+ char *p = buf, *end = buf + bufsz;
u8 *afh = bt_linfo->afh_map;
u8 *afh_le = bt_linfo->afh_map_le;
u8 bt_pos;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_BT))
- return;
+ return 0;
if (ver->fcxinit == 7)
bt_pos = md->md_v7.bt_pos;
else
bt_pos = md->md.bt_pos;
- seq_puts(m, "========== [BT Status] ==========\n");
-
- seq_printf(m, " %-15s : enable:%s, btg:%s%s, connect:%s, ",
- "[status]", bt->enable.now ? "Y" : "N",
- bt->btg_type ? "Y" : "N",
- (bt->enable.now && (bt->btg_type != bt_pos) ?
- "(efuse-mismatch!!)" : ""),
- (bt_linfo->status.map.connect ? "Y" : "N"));
-
- seq_printf(m, "igno_wl:%s, mailbox_avl:%s, rfk_state:0x%x\n",
- bt->igno_wl ? "Y" : "N",
- bt->mbx_avl ? "Y" : "N", bt->rfk_info.val);
-
- seq_printf(m, " %-15s : profile:%s%s%s%s%s ",
- "[profile]",
- (bt_linfo->profile_cnt.now == 0) ? "None," : "",
- bt_linfo->hfp_desc.exist ? "HFP," : "",
- bt_linfo->hid_desc.exist ? "HID," : "",
- bt_linfo->a2dp_desc.exist ?
- (bt_linfo->a2dp_desc.sink ? "A2DP_sink," : "A2DP,") : "",
- bt_linfo->pan_desc.exist ? "PAN," : "");
-
- seq_printf(m,
- "multi-link:%s, role:%s, ble-connect:%s, CQDDR:%s, A2DP_active:%s, PAN_active:%s\n",
- bt_linfo->multi_link.now ? "Y" : "N",
- bt_linfo->slave_role ? "Slave" : "Master",
- bt_linfo->status.map.ble_connect ? "Y" : "N",
- bt_linfo->cqddr ? "Y" : "N",
- bt_linfo->a2dp_desc.active ? "Y" : "N",
- bt_linfo->pan_desc.active ? "Y" : "N");
-
- seq_printf(m,
- " %-15s : rssi:%ddBm(lvl:%d), tx_rate:%dM, %s%s%s",
- "[link]", bt_linfo->rssi - 100,
- bt->rssi_level,
- bt_linfo->tx_3m ? 3 : 2,
- bt_linfo->status.map.inq_pag ? " inq-page!!" : "",
- bt_linfo->status.map.acl_busy ? " acl_busy!!" : "",
- bt_linfo->status.map.mesh_busy ? " mesh_busy!!" : "");
-
- seq_printf(m,
- "%s afh_map[%02x%02x_%02x%02x_%02x%02x_%02x%02x_%02x%02x], ",
- bt_linfo->relink.now ? " ReLink!!" : "",
- afh[0], afh[1], afh[2], afh[3], afh[4],
- afh[5], afh[6], afh[7], afh[8], afh[9]);
+ p += scnprintf(p, end - p, "========== [BT Status] ==========\n");
+
+ p += scnprintf(p, end - p,
+ " %-15s : enable:%s, btg:%s%s, connect:%s, ",
+ "[status]", bt->enable.now ? "Y" : "N",
+ bt->btg_type ? "Y" : "N",
+ (bt->enable.now && (bt->btg_type != bt_pos) ?
+ "(efuse-mismatch!!)" : ""),
+ (bt_linfo->status.map.connect ? "Y" : "N"));
+
+ p += scnprintf(p, end - p,
+ "igno_wl:%s, mailbox_avl:%s, rfk_state:0x%x\n",
+ bt->igno_wl ? "Y" : "N",
+ bt->mbx_avl ? "Y" : "N", bt->rfk_info.val);
+
+ p += scnprintf(p, end - p, " %-15s : profile:%s%s%s%s%s ",
+ "[profile]",
+ (bt_linfo->profile_cnt.now == 0) ? "None," : "",
+ bt_linfo->hfp_desc.exist ? "HFP," : "",
+ bt_linfo->hid_desc.exist ? "HID," : "",
+ bt_linfo->a2dp_desc.exist ?
+ (bt_linfo->a2dp_desc.sink ? "A2DP_sink," : "A2DP,") : "",
+ bt_linfo->pan_desc.exist ? "PAN," : "");
+
+ p += scnprintf(p, end - p,
+ "multi-link:%s, role:%s, ble-connect:%s, CQDDR:%s, A2DP_active:%s, PAN_active:%s\n",
+ bt_linfo->multi_link.now ? "Y" : "N",
+ bt_linfo->slave_role ? "Slave" : "Master",
+ bt_linfo->status.map.ble_connect ? "Y" : "N",
+ bt_linfo->cqddr ? "Y" : "N",
+ bt_linfo->a2dp_desc.active ? "Y" : "N",
+ bt_linfo->pan_desc.active ? "Y" : "N");
+
+ p += scnprintf(p, end - p,
+ " %-15s : rssi:%ddBm(lvl:%d), tx_rate:%dM, %s%s%s",
+ "[link]", bt_linfo->rssi - 100,
+ bt->rssi_level,
+ bt_linfo->tx_3m ? 3 : 2,
+ bt_linfo->status.map.inq_pag ? " inq-page!!" : "",
+ bt_linfo->status.map.acl_busy ? " acl_busy!!" : "",
+ bt_linfo->status.map.mesh_busy ? " mesh_busy!!" : "");
+
+ p += scnprintf(p, end - p,
+ "%s afh_map[%02x%02x_%02x%02x_%02x%02x_%02x%02x_%02x%02x], ",
+ bt_linfo->relink.now ? " ReLink!!" : "",
+ afh[0], afh[1], afh[2], afh[3], afh[4],
+ afh[5], afh[6], afh[7], afh[8], afh[9]);
if (ver->fcxbtafh == 2 && bt_linfo->status.map.ble_connect)
- seq_printf(m,
- "LE[%02x%02x_%02x_%02x%02x]",
- afh_le[0], afh_le[1], afh_le[2],
- afh_le[3], afh_le[4]);
-
- seq_printf(m, "wl_ch_map[en:%d/ch:%d/bw:%d]\n",
- wl->afh_info.en, wl->afh_info.ch, wl->afh_info.bw);
-
- seq_printf(m,
- " %-15s : retry:%d, relink:%d, rate_chg:%d, reinit:%d, reenable:%d, ",
- "[stat_cnt]", cx->cnt_bt[BTC_BCNT_RETRY],
- cx->cnt_bt[BTC_BCNT_RELINK], cx->cnt_bt[BTC_BCNT_RATECHG],
- cx->cnt_bt[BTC_BCNT_REINIT], cx->cnt_bt[BTC_BCNT_REENABLE]);
-
- seq_printf(m,
- "role-switch:%d, afh:%d, inq_page:%d(inq:%d/page:%d), igno_wl:%d\n",
- cx->cnt_bt[BTC_BCNT_ROLESW], cx->cnt_bt[BTC_BCNT_AFH],
- cx->cnt_bt[BTC_BCNT_INQPAG], cx->cnt_bt[BTC_BCNT_INQ],
- cx->cnt_bt[BTC_BCNT_PAGE], cx->cnt_bt[BTC_BCNT_IGNOWL]);
-
- _show_bt_profile_info(rtwdev, m);
-
- seq_printf(m,
- " %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)\n",
- "[bt_info]", bt->raw_info[2], bt->raw_info[3],
- bt->raw_info[4], bt->raw_info[5], bt->raw_info[6],
- bt->raw_info[7],
- bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply",
- cx->cnt_bt[BTC_BCNT_INFOUPDATE],
- cx->cnt_bt[BTC_BCNT_INFOSAME]);
-
- seq_printf(m,
- " %-15s : Hi-rx = %d, Hi-tx = %d, Lo-rx = %d, Lo-tx = %d (bt_polut_wl_tx = %d)",
- "[trx_req_cnt]", cx->cnt_bt[BTC_BCNT_HIPRI_RX],
- cx->cnt_bt[BTC_BCNT_HIPRI_TX], cx->cnt_bt[BTC_BCNT_LOPRI_RX],
- cx->cnt_bt[BTC_BCNT_LOPRI_TX], cx->cnt_bt[BTC_BCNT_POLUT]);
+ p += scnprintf(p, end - p,
+ "LE[%02x%02x_%02x_%02x%02x]",
+ afh_le[0], afh_le[1], afh_le[2],
+ afh_le[3], afh_le[4]);
+
+ p += scnprintf(p, end - p, "wl_ch_map[en:%d/ch:%d/bw:%d]\n",
+ wl->afh_info.en, wl->afh_info.ch, wl->afh_info.bw);
+
+ p += scnprintf(p, end - p,
+ " %-15s : retry:%d, relink:%d, rate_chg:%d, reinit:%d, reenable:%d, ",
+ "[stat_cnt]", cx->cnt_bt[BTC_BCNT_RETRY],
+ cx->cnt_bt[BTC_BCNT_RELINK],
+ cx->cnt_bt[BTC_BCNT_RATECHG],
+ cx->cnt_bt[BTC_BCNT_REINIT],
+ cx->cnt_bt[BTC_BCNT_REENABLE]);
+
+ p += scnprintf(p, end - p,
+ "role-switch:%d, afh:%d, inq_page:%d(inq:%d/page:%d), igno_wl:%d\n",
+ cx->cnt_bt[BTC_BCNT_ROLESW], cx->cnt_bt[BTC_BCNT_AFH],
+ cx->cnt_bt[BTC_BCNT_INQPAG], cx->cnt_bt[BTC_BCNT_INQ],
+ cx->cnt_bt[BTC_BCNT_PAGE], cx->cnt_bt[BTC_BCNT_IGNOWL]);
+
+ p += _show_bt_profile_info(rtwdev, p, end - p);
+
+ p += scnprintf(p, end - p,
+ " %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)\n",
+ "[bt_info]", bt->raw_info[2], bt->raw_info[3],
+ bt->raw_info[4], bt->raw_info[5], bt->raw_info[6],
+ bt->raw_info[7],
+ bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply",
+ cx->cnt_bt[BTC_BCNT_INFOUPDATE],
+ cx->cnt_bt[BTC_BCNT_INFOSAME]);
+
+ p += scnprintf(p, end - p,
+ " %-15s : Hi-rx = %d, Hi-tx = %d, Lo-rx = %d, Lo-tx = %d (bt_polut_wl_tx = %d)",
+ "[trx_req_cnt]", cx->cnt_bt[BTC_BCNT_HIPRI_RX],
+ cx->cnt_bt[BTC_BCNT_HIPRI_TX],
+ cx->cnt_bt[BTC_BCNT_LOPRI_RX],
+ cx->cnt_bt[BTC_BCNT_LOPRI_TX],
+ cx->cnt_bt[BTC_BCNT_POLUT]);
if (!bt->scan_info_update) {
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_SCAN_INFO, true);
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
} else {
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_SCAN_INFO, false);
if (ver->fcxbtscan == 1) {
- seq_printf(m,
- "(INQ:%d-%d/PAGE:%d-%d/LE:%d-%d/INIT:%d-%d)",
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INQ].win),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INQ].intvl),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_PAGE].win),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_PAGE].intvl),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_BLE].win),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_BLE].intvl),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INIT].win),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INIT].intvl));
+ p += scnprintf(p, end - p,
+ "(INQ:%d-%d/PAGE:%d-%d/LE:%d-%d/INIT:%d-%d)",
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INQ].win),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INQ].intvl),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_PAGE].win),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_PAGE].intvl),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_BLE].win),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_BLE].intvl),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INIT].win),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INIT].intvl));
} else if (ver->fcxbtscan == 2) {
- seq_printf(m,
- "(BG:%d-%d/INIT:%d-%d/LE:%d-%d)",
- le16_to_cpu(bt->scan_info_v2[CXSCAN_BG].win),
- le16_to_cpu(bt->scan_info_v2[CXSCAN_BG].intvl),
- le16_to_cpu(bt->scan_info_v2[CXSCAN_INIT].win),
- le16_to_cpu(bt->scan_info_v2[CXSCAN_INIT].intvl),
- le16_to_cpu(bt->scan_info_v2[CXSCAN_LE].win),
- le16_to_cpu(bt->scan_info_v2[CXSCAN_LE].intvl));
+ p += scnprintf(p, end - p,
+ "(BG:%d-%d/INIT:%d-%d/LE:%d-%d)",
+ le16_to_cpu(bt->scan_info_v2[CXSCAN_BG].win),
+ le16_to_cpu(bt->scan_info_v2[CXSCAN_BG].intvl),
+ le16_to_cpu(bt->scan_info_v2[CXSCAN_INIT].win),
+ le16_to_cpu(bt->scan_info_v2[CXSCAN_INIT].intvl),
+ le16_to_cpu(bt->scan_info_v2[CXSCAN_LE].win),
+ le16_to_cpu(bt->scan_info_v2[CXSCAN_LE].intvl));
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
if (bt_linfo->profile_cnt.now || bt_linfo->status.map.ble_connect)
@@ -8560,6 +8584,8 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, true);
else
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, false);
+
+ return p - buf;
}
#define CASE_BTC_RSN_STR(e) case BTC_RSN_ ## e: return #e
@@ -8853,114 +8879,132 @@ static const char *id_to_ant(u32 id)
}
static
-void seq_print_segment(struct seq_file *m, const char *prefix, u16 *data,
- u8 len, u8 seg_len, u8 start_idx, u8 ring_len)
+int scnprintf_segment(char *buf, size_t bufsz, const char *prefix, const u16 *data,
+ u8 len, u8 seg_len, u8 start_idx, u8 ring_len)
{
- u8 i;
+ char *p = buf, *end = buf + bufsz;
u8 cur_index;
+ u8 i;
for (i = 0; i < len ; i++) {
if ((i % seg_len) == 0)
- seq_printf(m, " %-15s : ", prefix);
+ p += scnprintf(p, end - p, " %-15s : ", prefix);
cur_index = (start_idx + i) % ring_len;
if (i % 3 == 0)
- seq_printf(m, "-> %-20s",
- steps_to_str(*(data + cur_index)));
+ p += scnprintf(p, end - p, "-> %-20s",
+ steps_to_str(*(data + cur_index)));
else if (i % 3 == 1)
- seq_printf(m, "-> %-15s",
- steps_to_str(*(data + cur_index)));
+ p += scnprintf(p, end - p, "-> %-15s",
+ steps_to_str(*(data + cur_index)));
else
- seq_printf(m, "-> %-13s",
- steps_to_str(*(data + cur_index)));
+ p += scnprintf(p, end - p, "-> %-13s",
+ steps_to_str(*(data + cur_index)));
if (i == (len - 1) || (i % seg_len) == (seg_len - 1))
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+ return p - buf;
}
-static void _show_dm_step(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_dm_step(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
+ char *p = buf, *end = buf + bufsz;
u8 start_idx;
u8 len;
len = dm->dm_step.step_ov ? RTW89_BTC_DM_MAXSTEP : dm->dm_step.step_pos;
start_idx = dm->dm_step.step_ov ? dm->dm_step.step_pos : 0;
- seq_print_segment(m, "[dm_steps]", dm->dm_step.step, len, 6, start_idx,
- ARRAY_SIZE(dm->dm_step.step));
+ p += scnprintf_segment(p, end - p, "[dm_steps]", dm->dm_step.step, len,
+ 6, start_idx, ARRAY_SIZE(dm->dm_step.step));
+
+ return p - buf;
}
-static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_dm_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ char *p = buf, *end = buf + bufsz;
u8 igno_bt;
if (!(dm->coex_info_map & BTC_COEX_INFO_DM))
- return;
+ return 0;
- seq_printf(m, "========== [Mechanism Status %s] ==========\n",
- (btc->manual_ctrl ? "(Manual)" : "(Auto)"));
+ p += scnprintf(p, end - p,
+ "========== [Mechanism Status %s] ==========\n",
+ (btc->manual_ctrl ? "(Manual)" : "(Auto)"));
- seq_printf(m,
- " %-15s : type:%s, reason:%s(), action:%s(), ant_path:%s, init_mode:%s, run_cnt:%d\n",
- "[status]",
- btc->ant_type == BTC_ANT_SHARED ? "shared" : "dedicated",
- steps_to_str(dm->run_reason),
- steps_to_str(dm->run_action | BTC_ACT_EXT_BIT),
- id_to_ant(FIELD_GET(GENMASK(7, 0), dm->set_ant_path)),
- id_to_mode(wl->coex_mode),
- dm->cnt_dm[BTC_DCNT_RUN]);
+ p += scnprintf(p, end - p,
+ " %-15s : type:%s, reason:%s(), action:%s(), ant_path:%s, init_mode:%s, run_cnt:%d\n",
+ "[status]",
+ btc->ant_type == BTC_ANT_SHARED ? "shared" : "dedicated",
+ steps_to_str(dm->run_reason),
+ steps_to_str(dm->run_action | BTC_ACT_EXT_BIT),
+ id_to_ant(FIELD_GET(GENMASK(7, 0), dm->set_ant_path)),
+ id_to_mode(wl->coex_mode),
+ dm->cnt_dm[BTC_DCNT_RUN]);
- _show_dm_step(rtwdev, m);
+ p += _show_dm_step(rtwdev, p, end - p);
if (ver->fcxctrl == 7)
igno_bt = btc->ctrl.ctrl_v7.igno_bt;
else
igno_bt = btc->ctrl.ctrl.igno_bt;
- seq_printf(m, " %-15s : wl_only:%d, bt_only:%d, igno_bt:%d, free_run:%d, wl_ps_ctrl:%d, wl_mimo_ps:%d, ",
- "[dm_flag]", dm->wl_only, dm->bt_only, igno_bt,
- dm->freerun, btc->lps, dm->wl_mimo_ps);
+ p += scnprintf(p, end - p,
+ " %-15s : wl_only:%d, bt_only:%d, igno_bt:%d, free_run:%d, wl_ps_ctrl:%d, wl_mimo_ps:%d, ",
+ "[dm_flag]", dm->wl_only, dm->bt_only, igno_bt,
+ dm->freerun, btc->lps, dm->wl_mimo_ps);
- seq_printf(m, "leak_ap:%d, fw_offload:%s%s\n", dm->leak_ap,
- (BTC_CX_FW_OFFLOAD ? "Y" : "N"),
- (dm->wl_fw_cx_offload == BTC_CX_FW_OFFLOAD ?
- "" : "(Mismatch!!)"));
+ p += scnprintf(p, end - p, "leak_ap:%d, fw_offload:%s%s\n",
+ dm->leak_ap,
+ (BTC_CX_FW_OFFLOAD ? "Y" : "N"),
+ (dm->wl_fw_cx_offload == BTC_CX_FW_OFFLOAD ?
+ "" : "(Mismatch!!)"));
if (dm->rf_trx_para.wl_tx_power == 0xff)
- seq_printf(m,
- " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:orig, ",
- "[trx_ctrl]", wl->rssi_level, dm->trx_para_level);
+ p += scnprintf(p, end - p,
+ " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:orig, ",
+ "[trx_ctrl]", wl->rssi_level,
+ dm->trx_para_level);
else
- seq_printf(m,
- " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:%d, ",
- "[trx_ctrl]", wl->rssi_level, dm->trx_para_level,
- dm->rf_trx_para.wl_tx_power);
+ p += scnprintf(p, end - p,
+ " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:%d, ",
+ "[trx_ctrl]", wl->rssi_level,
+ dm->trx_para_level,
+ dm->rf_trx_para.wl_tx_power);
- seq_printf(m,
- "wl_rx_lvl:%d, bt_tx_pwr_dec:%d, bt_rx_lna:%d(%s-tbl), wl_btg_rx:%d\n",
- dm->rf_trx_para.wl_rx_gain, dm->rf_trx_para.bt_tx_power,
- dm->rf_trx_para.bt_rx_gain,
- (bt->hi_lna_rx ? "Hi" : "Ori"), dm->wl_btg_rx);
+ p += scnprintf(p, end - p,
+ "wl_rx_lvl:%d, bt_tx_pwr_dec:%d, bt_rx_lna:%d(%s-tbl), wl_btg_rx:%d\n",
+ dm->rf_trx_para.wl_rx_gain,
+ dm->rf_trx_para.bt_tx_power,
+ dm->rf_trx_para.bt_rx_gain,
+ (bt->hi_lna_rx ? "Hi" : "Ori"), dm->wl_btg_rx);
- seq_printf(m,
- " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU, bt_scan_rx_low_pri:%d\n",
- "[dm_ctrl]", dm->wl_tx_limit.enable, dm->wl_tx_limit.tx_time,
- dm->wl_tx_limit.tx_retry, btc->bt_req_len, bt->scan_rx_low_pri);
+ p += scnprintf(p, end - p,
+ " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU, bt_scan_rx_low_pri:%d\n",
+ "[dm_ctrl]", dm->wl_tx_limit.enable,
+ dm->wl_tx_limit.tx_time,
+ dm->wl_tx_limit.tx_retry, btc->bt_req_len,
+ bt->scan_rx_low_pri);
+
+ return p - buf;
}
-static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_error(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
union rtw89_btc_fbtc_cysta_info *pcysta;
+ char *p = buf, *end = buf + bufsz;
u32 except_cnt, exception_map;
pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
@@ -8985,81 +9029,87 @@ static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
except_cnt = pcysta->v7.except_cnt;
exception_map = le32_to_cpu(pcysta->v7.except_map);
} else {
- return;
+ return 0;
}
if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW] == 0 && except_cnt == 0 &&
!pfwinfo->len_mismch && !pfwinfo->fver_mismch)
- return;
+ return 0;
- seq_printf(m, " %-15s : ", "[error]");
+ p += scnprintf(p, end - p, " %-15s : ", "[error]");
if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW]) {
- seq_printf(m,
- "overflow-cnt: %d, ",
- pfwinfo->event[BTF_EVNT_BUF_OVERFLOW]);
+ p += scnprintf(p, end - p,
+ "overflow-cnt: %d, ",
+ pfwinfo->event[BTF_EVNT_BUF_OVERFLOW]);
}
if (pfwinfo->len_mismch) {
- seq_printf(m,
- "len-mismatch: 0x%x, ",
- pfwinfo->len_mismch);
+ p += scnprintf(p, end - p,
+ "len-mismatch: 0x%x, ",
+ pfwinfo->len_mismch);
}
if (pfwinfo->fver_mismch) {
- seq_printf(m,
- "fver-mismatch: 0x%x, ",
- pfwinfo->fver_mismch);
+ p += scnprintf(p, end - p,
+ "fver-mismatch: 0x%x, ",
+ pfwinfo->fver_mismch);
}
/* cycle statistics exceptions */
if (exception_map || except_cnt) {
- seq_printf(m,
- "exception-type: 0x%x, exception-cnt = %d",
- exception_map, except_cnt);
+ p += scnprintf(p, end - p,
+ "exception-type: 0x%x, exception-cnt = %d",
+ exception_map, except_cnt);
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
+
+ return p - buf;
}
-static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_tdma(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_tdma *t = NULL;
+ char *p = buf, *end = buf + bufsz;
pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
if (ver->fcxtdma == 1)
t = &pfwinfo->rpt_fbtc_tdma.finfo.v1;
else
t = &pfwinfo->rpt_fbtc_tdma.finfo.v3.tdma;
- seq_printf(m,
- " %-15s : ", "[tdma_policy]");
- seq_printf(m,
- "type:%d, rx_flow_ctrl:%d, tx_pause:%d, ",
- (u32)t->type,
- t->rxflctrl, t->txpause);
+ p += scnprintf(p, end - p,
+ " %-15s : ", "[tdma_policy]");
+ p += scnprintf(p, end - p,
+ "type:%d, rx_flow_ctrl:%d, tx_pause:%d, ",
+ (u32)t->type,
+ t->rxflctrl, t->txpause);
- seq_printf(m,
- "wl_toggle_n:%d, leak_n:%d, ext_ctrl:%d, ",
- t->wtgle_n, t->leak_n, t->ext_ctrl);
+ p += scnprintf(p, end - p,
+ "wl_toggle_n:%d, leak_n:%d, ext_ctrl:%d, ",
+ t->wtgle_n, t->leak_n, t->ext_ctrl);
- seq_printf(m,
- "policy_type:%d",
- (u32)btc->policy_type);
+ p += scnprintf(p, end - p,
+ "policy_type:%d",
+ (u32)btc->policy_type);
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
+
+ return p - buf;
}
-static void _show_fbtc_slots(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_slots(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
+ char *p = buf, *end = buf + bufsz;
u16 dur, cxtype;
u32 tbl;
u8 i = 0;
@@ -9074,28 +9124,30 @@ static void _show_fbtc_slots(struct rtw89_dev *rtwdev, struct seq_file *m)
tbl = le32_to_cpu(dm->slot_now.v7[i].cxtbl);
cxtype = le16_to_cpu(dm->slot_now.v7[i].cxtype);
} else {
- return;
+ return 0;
}
if (i % 5 == 0)
- seq_printf(m,
- " %-15s : %5s[%03d/0x%x/%d]",
- "[slot_list]",
- id_to_slot((u32)i),
- dur, tbl, cxtype);
+ p += scnprintf(p, end - p,
+ " %-15s : %5s[%03d/0x%x/%d]",
+ "[slot_list]",
+ id_to_slot((u32)i),
+ dur, tbl, cxtype);
else
- seq_printf(m,
- ", %5s[%03d/0x%x/%d]",
- id_to_slot((u32)i),
- dur, tbl, cxtype);
+ p += scnprintf(p, end - p,
+ ", %5s[%03d/0x%x/%d]",
+ id_to_slot((u32)i),
+ dur, tbl, cxtype);
if (i % 5 == 4)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
+
+ return p - buf;
}
-static void _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
@@ -9104,63 +9156,64 @@ static void _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_cysta_v2 *pcysta_le32 = NULL;
union rtw89_btc_fbtc_rxflct r;
- u8 i, cnt = 0, slot_pair;
u16 cycle, c_begin, c_end, store_index;
+ char *p = buf, *end = buf + bufsz;
+ u8 i, cnt = 0, slot_pair;
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo.v2;
- seq_printf(m,
- " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
- "[cycle_cnt]",
- le16_to_cpu(pcysta_le32->cycles),
- le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL]),
- le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL_OK]),
- le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_SLOT]),
- le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_OK]));
+ p += scnprintf(p, end - p,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta_le32->cycles),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL]),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL_OK]),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_SLOT]),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_OK]));
for (i = 0; i < CXST_MAX; i++) {
if (!le32_to_cpu(pcysta_le32->slot_cnt[i]))
continue;
- seq_printf(m, ", %s:%d", id_to_slot((u32)i),
- le32_to_cpu(pcysta_le32->slot_cnt[i]));
+ p += scnprintf(p, end - p, ", %s:%d", id_to_slot((u32)i),
+ le32_to_cpu(pcysta_le32->slot_cnt[i]));
}
if (dm->tdma_now.rxflctrl) {
- seq_printf(m, ", leak_rx:%d",
- le32_to_cpu(pcysta_le32->leakrx_cnt));
+ p += scnprintf(p, end - p, ", leak_rx:%d",
+ le32_to_cpu(pcysta_le32->leakrx_cnt));
}
if (le32_to_cpu(pcysta_le32->collision_cnt)) {
- seq_printf(m, ", collision:%d",
- le32_to_cpu(pcysta_le32->collision_cnt));
+ p += scnprintf(p, end - p, ", collision:%d",
+ le32_to_cpu(pcysta_le32->collision_cnt));
}
if (le32_to_cpu(pcysta_le32->skip_cnt)) {
- seq_printf(m, ", skip:%d",
- le32_to_cpu(pcysta_le32->skip_cnt));
- }
- seq_puts(m, "\n");
-
- seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
- "[cycle_time]",
- le16_to_cpu(pcysta_le32->tavg_cycle[CXT_WL]),
- le16_to_cpu(pcysta_le32->tavg_cycle[CXT_BT]),
- le16_to_cpu(pcysta_le32->tavg_lk) / 1000,
- le16_to_cpu(pcysta_le32->tavg_lk) % 1000);
- seq_printf(m, ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
- le16_to_cpu(pcysta_le32->tmax_cycle[CXT_WL]),
- le16_to_cpu(pcysta_le32->tmax_cycle[CXT_BT]),
- le16_to_cpu(pcysta_le32->tmax_lk) / 1000,
- le16_to_cpu(pcysta_le32->tmax_lk) % 1000);
- seq_printf(m, ", maxdiff_t[wl:%d/bt:%d]\n",
- le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_WL]),
- le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_BT]));
+ p += scnprintf(p, end - p, ", skip:%d",
+ le32_to_cpu(pcysta_le32->skip_cnt));
+ }
+ p += scnprintf(p, end - p, "\n");
+
+ p += scnprintf(p, end - p, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ le16_to_cpu(pcysta_le32->tavg_cycle[CXT_WL]),
+ le16_to_cpu(pcysta_le32->tavg_cycle[CXT_BT]),
+ le16_to_cpu(pcysta_le32->tavg_lk) / 1000,
+ le16_to_cpu(pcysta_le32->tavg_lk) % 1000);
+ p += scnprintf(p, end - p, ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ le16_to_cpu(pcysta_le32->tmax_cycle[CXT_WL]),
+ le16_to_cpu(pcysta_le32->tmax_cycle[CXT_BT]),
+ le16_to_cpu(pcysta_le32->tmax_lk) / 1000,
+ le16_to_cpu(pcysta_le32->tmax_lk) % 1000);
+ p += scnprintf(p, end - p, ", maxdiff_t[wl:%d/bt:%d]\n",
+ le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_WL]),
+ le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_BT]));
if (le16_to_cpu(pcysta_le32->cycles) <= 1)
- return;
+ goto out;
/* 1 cycle record 1 wl-slot and 1 bt-slot */
slot_pair = BTC_CYCLE_SLOT_MAX / 2;
@@ -9177,53 +9230,57 @@ static void _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
store_index = ((cycle - 1) % slot_pair) * 2;
if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 1)
- seq_printf(m,
- " %-15s : ->b%02d->w%02d", "[cycle_step]",
- le16_to_cpu(pcysta_le32->tslot_cycle[store_index]),
- le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1]));
+ p += scnprintf(p, end - p,
+ " %-15s : ->b%02d->w%02d",
+ "[cycle_step]",
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index]),
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1]));
else
- seq_printf(m,
- "->b%02d->w%02d",
- le16_to_cpu(pcysta_le32->tslot_cycle[store_index]),
- le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1]));
+ p += scnprintf(p, end - p,
+ "->b%02d->w%02d",
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index]),
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1]));
if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 0 || cnt == c_end)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
if (a2dp->exist) {
- seq_printf(m,
- " %-15s : a2dp_ept:%d, a2dp_late:%d",
- "[a2dp_t_sta]",
- le16_to_cpu(pcysta_le32->a2dpept),
- le16_to_cpu(pcysta_le32->a2dpeptto));
-
- seq_printf(m,
- ", avg_t:%d, max_t:%d",
- le16_to_cpu(pcysta_le32->tavg_a2dpept),
- le16_to_cpu(pcysta_le32->tmax_a2dpept));
+ p += scnprintf(p, end - p,
+ " %-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ le16_to_cpu(pcysta_le32->a2dpept),
+ le16_to_cpu(pcysta_le32->a2dpeptto));
+
+ p += scnprintf(p, end - p,
+ ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta_le32->tavg_a2dpept),
+ le16_to_cpu(pcysta_le32->tmax_a2dpept));
r.val = dm->tdma_now.rxflctrl;
if (r.type && r.tgln_n) {
- seq_printf(m,
- ", cycle[PSTDMA:%d/TDMA:%d], ",
- le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_ON]),
- le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_OFF]));
-
- seq_printf(m,
- "avg_t[PSTDMA:%d/TDMA:%d], ",
- le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_ON]),
- le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_OFF]));
-
- seq_printf(m,
- "max_t[PSTDMA:%d/TDMA:%d]",
- le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_ON]),
- le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_OFF]));
+ p += scnprintf(p, end - p,
+ ", cycle[PSTDMA:%d/TDMA:%d], ",
+ le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_ON]),
+ le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_OFF]));
+
+ p += scnprintf(p, end - p,
+ "avg_t[PSTDMA:%d/TDMA:%d], ",
+ le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_ON]),
+ le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_OFF]));
+
+ p += scnprintf(p, end - p,
+ "max_t[PSTDMA:%d/TDMA:%d]",
+ le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_ON]),
+ le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_OFF]));
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+out:
+ return p - buf;
}
-static void _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
@@ -9234,60 +9291,64 @@ static void _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_rpt_cmn_info *pcinfo;
u8 i, cnt = 0, slot_pair, divide_cnt;
u16 cycle, c_begin, c_end, store_index;
+ char *p = buf, *end = buf + bufsz;
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v3;
- seq_printf(m,
- " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
- "[cycle_cnt]",
- le16_to_cpu(pcysta->cycles),
- le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
- le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
- le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
- le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+ p += scnprintf(p, end - p,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta->cycles),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
for (i = 0; i < CXST_MAX; i++) {
if (!le32_to_cpu(pcysta->slot_cnt[i]))
continue;
- seq_printf(m, ", %s:%d", id_to_slot(i),
- le32_to_cpu(pcysta->slot_cnt[i]));
+ p += scnprintf(p, end - p, ", %s:%d", id_to_slot(i),
+ le32_to_cpu(pcysta->slot_cnt[i]));
}
if (dm->tdma_now.rxflctrl)
- seq_printf(m, ", leak_rx:%d", le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+ p += scnprintf(p, end - p, ", leak_rx:%d",
+ le32_to_cpu(pcysta->leak_slot.cnt_rximr));
if (le32_to_cpu(pcysta->collision_cnt))
- seq_printf(m, ", collision:%d", le32_to_cpu(pcysta->collision_cnt));
+ p += scnprintf(p, end - p, ", collision:%d",
+ le32_to_cpu(pcysta->collision_cnt));
if (le32_to_cpu(pcysta->skip_cnt))
- seq_printf(m, ", skip:%d", le32_to_cpu(pcysta->skip_cnt));
-
- seq_puts(m, "\n");
-
- seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
- "[cycle_time]",
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
- le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
- seq_printf(m,
- ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
- le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
- seq_printf(m,
- ", maxdiff_t[wl:%d/bt:%d]\n",
- le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
+ p += scnprintf(p, end - p, ", skip:%d",
+ le32_to_cpu(pcysta->skip_cnt));
+
+ p += scnprintf(p, end - p, "\n");
+
+ p += scnprintf(p, end - p, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ p += scnprintf(p, end - p,
+ ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
+ p += scnprintf(p, end - p,
+ ", maxdiff_t[wl:%d/bt:%d]\n",
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
cycle = le16_to_cpu(pcysta->cycles);
if (cycle <= 1)
- return;
+ goto out;
/* 1 cycle record 1 wl-slot and 1 bt-slot */
slot_pair = BTC_CYCLE_SLOT_MAX / 2;
@@ -9309,51 +9370,56 @@ static void _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
store_index = ((cycle - 1) % slot_pair) * 2;
if (cnt % divide_cnt == 1)
- seq_printf(m, " %-15s : ", "[cycle_step]");
+ p += scnprintf(p, end - p, " %-15s : ",
+ "[cycle_step]");
- seq_printf(m, "->b%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index]));
+ p += scnprintf(p, end - p, "->b%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index]));
if (a2dp->exist) {
a2dp_trx = &pcysta->a2dp_trx[store_index];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
- seq_printf(m, "->w%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
+ p += scnprintf(p, end - p, "->w%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
if (a2dp->exist) {
a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
if (cnt % divide_cnt == 0 || cnt == c_end)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
if (a2dp->exist) {
- seq_printf(m, " %-15s : a2dp_ept:%d, a2dp_late:%d",
- "[a2dp_t_sta]",
- le16_to_cpu(pcysta->a2dp_ept.cnt),
- le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
+ p += scnprintf(p, end - p,
+ " %-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
- seq_printf(m, ", avg_t:%d, max_t:%d",
- le16_to_cpu(pcysta->a2dp_ept.tavg),
- le16_to_cpu(pcysta->a2dp_ept.tmax));
+ p += scnprintf(p, end - p, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+out:
+ return p - buf;
}
-static void _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
@@ -9364,62 +9430,64 @@ static void _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_rpt_cmn_info *pcinfo;
u8 i, cnt = 0, slot_pair, divide_cnt;
u16 cycle, c_begin, c_end, store_index;
+ char *p = buf, *end = buf + bufsz;
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v4;
- seq_printf(m,
- " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
- "[cycle_cnt]",
- le16_to_cpu(pcysta->cycles),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+ p += scnprintf(p, end - p,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta->cycles),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
for (i = 0; i < CXST_MAX; i++) {
if (!le16_to_cpu(pcysta->slot_cnt[i]))
continue;
- seq_printf(m, ", %s:%d", id_to_slot(i),
- le16_to_cpu(pcysta->slot_cnt[i]));
+ p += scnprintf(p, end - p, ", %s:%d", id_to_slot(i),
+ le16_to_cpu(pcysta->slot_cnt[i]));
}
if (dm->tdma_now.rxflctrl)
- seq_printf(m, ", leak_rx:%d",
- le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+ p += scnprintf(p, end - p, ", leak_rx:%d",
+ le32_to_cpu(pcysta->leak_slot.cnt_rximr));
if (pcysta->collision_cnt)
- seq_printf(m, ", collision:%d", pcysta->collision_cnt);
+ p += scnprintf(p, end - p, ", collision:%d",
+ pcysta->collision_cnt);
if (le16_to_cpu(pcysta->skip_cnt))
- seq_printf(m, ", skip:%d",
- le16_to_cpu(pcysta->skip_cnt));
-
- seq_puts(m, "\n");
-
- seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
- "[cycle_time]",
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
- le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
- seq_printf(m,
- ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
- le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
- seq_printf(m,
- ", maxdiff_t[wl:%d/bt:%d]\n",
- le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
+ p += scnprintf(p, end - p, ", skip:%d",
+ le16_to_cpu(pcysta->skip_cnt));
+
+ p += scnprintf(p, end - p, "\n");
+
+ p += scnprintf(p, end - p, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ p += scnprintf(p, end - p,
+ ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
+ p += scnprintf(p, end - p,
+ ", maxdiff_t[wl:%d/bt:%d]\n",
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
cycle = le16_to_cpu(pcysta->cycles);
if (cycle <= 1)
- return;
+ goto out;
/* 1 cycle record 1 wl-slot and 1 bt-slot */
slot_pair = BTC_CYCLE_SLOT_MAX / 2;
@@ -9441,51 +9509,56 @@ static void _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
store_index = ((cycle - 1) % slot_pair) * 2;
if (cnt % divide_cnt == 1)
- seq_printf(m, " %-15s : ", "[cycle_step]");
+ p += scnprintf(p, end - p, " %-15s : ",
+ "[cycle_step]");
- seq_printf(m, "->b%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index]));
+ p += scnprintf(p, end - p, "->b%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index]));
if (a2dp->exist) {
a2dp_trx = &pcysta->a2dp_trx[store_index];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
- seq_printf(m, "->w%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
+ p += scnprintf(p, end - p, "->w%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
if (a2dp->exist) {
a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
if (cnt % divide_cnt == 0 || cnt == c_end)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
if (a2dp->exist) {
- seq_printf(m, " %-15s : a2dp_ept:%d, a2dp_late:%d",
- "[a2dp_t_sta]",
- le16_to_cpu(pcysta->a2dp_ept.cnt),
- le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
+ p += scnprintf(p, end - p,
+ " %-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
- seq_printf(m, ", avg_t:%d, max_t:%d",
- le16_to_cpu(pcysta->a2dp_ept.tavg),
- le16_to_cpu(pcysta->a2dp_ept.tmax));
+ p += scnprintf(p, end - p, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+out:
+ return p - buf;
}
-static void _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
@@ -9496,58 +9569,60 @@ static void _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_rpt_cmn_info *pcinfo;
u8 i, cnt = 0, slot_pair, divide_cnt;
u16 cycle, c_begin, c_end, store_index;
+ char *p = buf, *end = buf + bufsz;
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v5;
- seq_printf(m,
- " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
- "[cycle_cnt]",
- le16_to_cpu(pcysta->cycles),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+ p += scnprintf(p, end - p,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta->cycles),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
for (i = 0; i < CXST_MAX; i++) {
if (!le16_to_cpu(pcysta->slot_cnt[i]))
continue;
- seq_printf(m, ", %s:%d", id_to_slot(i),
- le16_to_cpu(pcysta->slot_cnt[i]));
+ p += scnprintf(p, end - p, ", %s:%d", id_to_slot(i),
+ le16_to_cpu(pcysta->slot_cnt[i]));
}
if (dm->tdma_now.rxflctrl)
- seq_printf(m, ", leak_rx:%d",
- le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+ p += scnprintf(p, end - p, ", leak_rx:%d",
+ le32_to_cpu(pcysta->leak_slot.cnt_rximr));
if (pcysta->collision_cnt)
- seq_printf(m, ", collision:%d", pcysta->collision_cnt);
+ p += scnprintf(p, end - p, ", collision:%d",
+ pcysta->collision_cnt);
if (le16_to_cpu(pcysta->skip_cnt))
- seq_printf(m, ", skip:%d",
- le16_to_cpu(pcysta->skip_cnt));
-
- seq_puts(m, "\n");
-
- seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
- "[cycle_time]",
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
- le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
- seq_printf(m,
- ", max_t[wl:%d/bt:%d/lk:%d.%03d]\n",
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
- le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
+ p += scnprintf(p, end - p, ", skip:%d",
+ le16_to_cpu(pcysta->skip_cnt));
+
+ p += scnprintf(p, end - p, "\n");
+
+ p += scnprintf(p, end - p, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ p += scnprintf(p, end - p,
+ ", max_t[wl:%d/bt:%d/lk:%d.%03d]\n",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
cycle = le16_to_cpu(pcysta->cycles);
if (cycle <= 1)
- return;
+ goto out;
/* 1 cycle record 1 wl-slot and 1 bt-slot */
slot_pair = BTC_CYCLE_SLOT_MAX / 2;
@@ -9565,58 +9640,63 @@ static void _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
divide_cnt = BTC_CYCLE_SLOT_MAX / 4;
if (c_begin > c_end)
- return;
+ goto out;
for (cycle = c_begin; cycle <= c_end; cycle++) {
cnt++;
store_index = ((cycle - 1) % slot_pair) * 2;
if (cnt % divide_cnt == 1)
- seq_printf(m, " %-15s : ", "[cycle_step]");
+ p += scnprintf(p, end - p, " %-15s : ",
+ "[cycle_step]");
- seq_printf(m, "->b%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index]));
+ p += scnprintf(p, end - p, "->b%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index]));
if (a2dp->exist) {
a2dp_trx = &pcysta->a2dp_trx[store_index];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
- seq_printf(m, "->w%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
+ p += scnprintf(p, end - p, "->w%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
if (a2dp->exist) {
a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
if (cnt % divide_cnt == 0 || cnt == c_end)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
if (a2dp->exist) {
- seq_printf(m, " %-15s : a2dp_ept:%d, a2dp_late:%d",
- "[a2dp_t_sta]",
- le16_to_cpu(pcysta->a2dp_ept.cnt),
- le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
+ p += scnprintf(p, end - p,
+ " %-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
- seq_printf(m, ", avg_t:%d, max_t:%d",
- le16_to_cpu(pcysta->a2dp_ept.tavg),
- le16_to_cpu(pcysta->a2dp_ept.tmax));
+ p += scnprintf(p, end - p, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+out:
+ return p - buf;
}
-static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc_bt_info *bt = &rtwdev->btc.cx.bt;
struct rtw89_btc_bt_a2dp_desc *a2dp = &bt->link_info.a2dp_desc;
@@ -9624,68 +9704,75 @@ static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_fbtc_cysta_v7 *pcysta = NULL;
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_rpt_cmn_info *pcinfo;
+ char *p = buf, *end = buf + bufsz;
u16 cycle, c_begin, c_end, s_id;
u8 i, cnt = 0, divide_cnt;
u8 slot_pair;
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v7;
- seq_printf(m, "\n\r %-15s : cycle:%d", "[slot_stat]",
- le16_to_cpu(pcysta->cycles));
+ p += scnprintf(p, end - p, "\n\r %-15s : cycle:%d", "[slot_stat]",
+ le16_to_cpu(pcysta->cycles));
for (i = 0; i < CXST_MAX; i++) {
if (!le16_to_cpu(pcysta->slot_cnt[i]))
continue;
- seq_printf(m, ", %s:%d",
- id_to_slot(i), le16_to_cpu(pcysta->slot_cnt[i]));
+ p += scnprintf(p, end - p, ", %s:%d",
+ id_to_slot(i),
+ le16_to_cpu(pcysta->slot_cnt[i]));
}
if (dm->tdma_now.rxflctrl)
- seq_printf(m, ", leak_rx:%d",
- le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+ p += scnprintf(p, end - p, ", leak_rx:%d",
+ le32_to_cpu(pcysta->leak_slot.cnt_rximr));
if (pcysta->collision_cnt)
- seq_printf(m, ", collision:%d", pcysta->collision_cnt);
+ p += scnprintf(p, end - p, ", collision:%d",
+ pcysta->collision_cnt);
if (pcysta->skip_cnt)
- seq_printf(m, ", skip:%d", le16_to_cpu(pcysta->skip_cnt));
-
- seq_printf(m, "\n\r %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
- "[cycle_stat]",
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
- le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
- seq_printf(m, ", max_t[wl:%d/bt:%d(>%dms:%d)/lk:%d.%03d]",
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
- dm->bt_slot_flood, dm->cnt_dm[BTC_DCNT_BT_SLOT_FLOOD],
- le16_to_cpu(pcysta->leak_slot.tamx) / 1000,
- le16_to_cpu(pcysta->leak_slot.tamx) % 1000);
- seq_printf(m, ", bcn[all:%d/ok:%d/in_bt:%d/in_bt_ok:%d]",
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+ p += scnprintf(p, end - p, ", skip:%d",
+ le16_to_cpu(pcysta->skip_cnt));
+
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_stat]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ p += scnprintf(p, end - p,
+ ", max_t[wl:%d/bt:%d(>%dms:%d)/lk:%d.%03d]",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ dm->bt_slot_flood, dm->cnt_dm[BTC_DCNT_BT_SLOT_FLOOD],
+ le16_to_cpu(pcysta->leak_slot.tamx) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tamx) % 1000);
+ p += scnprintf(p, end - p, ", bcn[all:%d/ok:%d/in_bt:%d/in_bt_ok:%d]",
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
if (a2dp->exist) {
- seq_printf(m,
- "\n\r %-15s : a2dp_ept:%d, a2dp_late:%d(streak 2S:%d/max:%d)",
- "[a2dp_stat]",
- le16_to_cpu(pcysta->a2dp_ept.cnt),
- le16_to_cpu(pcysta->a2dp_ept.cnt_timeout),
- a2dp->no_empty_streak_2s, a2dp->no_empty_streak_max);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : a2dp_ept:%d, a2dp_late:%d(streak 2S:%d/max:%d)",
+ "[a2dp_stat]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout),
+ a2dp->no_empty_streak_2s,
+ a2dp->no_empty_streak_max);
- seq_printf(m, ", avg_t:%d, max_t:%d",
- le16_to_cpu(pcysta->a2dp_ept.tavg),
- le16_to_cpu(pcysta->a2dp_ept.tmax));
+ p += scnprintf(p, end - p, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
}
if (le16_to_cpu(pcysta->cycles) <= 1)
- return;
+ goto out;
/* 1 cycle = 1 wl-slot + 1 bt-slot */
slot_pair = BTC_CYCLE_SLOT_MAX / 2;
@@ -9703,7 +9790,7 @@ static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
divide_cnt = 6;
if (c_begin > c_end)
- return;
+ goto out;
for (cycle = c_begin; cycle <= c_end; cycle++) {
cnt++;
@@ -9711,129 +9798,142 @@ static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
if (cnt % divide_cnt == 1) {
if (a2dp->exist)
- seq_printf(m, "\n\r %-15s : ", "[slotT_wermtan]");
+ p += scnprintf(p, end - p, "\n\r %-15s : ",
+ "[slotT_wermtan]");
else
- seq_printf(m, "\n\r %-15s : ", "[slotT_rxerr]");
+ p += scnprintf(p, end - p, "\n\r %-15s : ",
+ "[slotT_rxerr]");
}
- seq_printf(m, "->b%d", le16_to_cpu(pcysta->slot_step_time[s_id]));
+ p += scnprintf(p, end - p, "->b%d",
+ le16_to_cpu(pcysta->slot_step_time[s_id]));
if (a2dp->exist)
- seq_printf(m, "(%d/%d/%d/%dM/%d/%d/%d)",
- pcysta->wl_rx_err_ratio[s_id],
- pcysta->a2dp_trx[s_id].empty_cnt,
- pcysta->a2dp_trx[s_id].retry_cnt,
- (pcysta->a2dp_trx[s_id].tx_rate ? 3 : 2),
- pcysta->a2dp_trx[s_id].tx_cnt,
- pcysta->a2dp_trx[s_id].ack_cnt,
- pcysta->a2dp_trx[s_id].nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%d/%dM/%d/%d/%d)",
+ pcysta->wl_rx_err_ratio[s_id],
+ pcysta->a2dp_trx[s_id].empty_cnt,
+ pcysta->a2dp_trx[s_id].retry_cnt,
+ (pcysta->a2dp_trx[s_id].tx_rate ? 3 : 2),
+ pcysta->a2dp_trx[s_id].tx_cnt,
+ pcysta->a2dp_trx[s_id].ack_cnt,
+ pcysta->a2dp_trx[s_id].nack_cnt);
else
- seq_printf(m, "(%d)", pcysta->wl_rx_err_ratio[s_id]);
+ p += scnprintf(p, end - p, "(%d)",
+ pcysta->wl_rx_err_ratio[s_id]);
- seq_printf(m, "->w%d", le16_to_cpu(pcysta->slot_step_time[s_id + 1]));
+ p += scnprintf(p, end - p, "->w%d",
+ le16_to_cpu(pcysta->slot_step_time[s_id + 1]));
if (a2dp->exist)
- seq_printf(m, "(%d/%d/%d/%dM/%d/%d/%d)",
- pcysta->wl_rx_err_ratio[s_id + 1],
- pcysta->a2dp_trx[s_id + 1].empty_cnt,
- pcysta->a2dp_trx[s_id + 1].retry_cnt,
- (pcysta->a2dp_trx[s_id + 1].tx_rate ? 3 : 2),
- pcysta->a2dp_trx[s_id + 1].tx_cnt,
- pcysta->a2dp_trx[s_id + 1].ack_cnt,
- pcysta->a2dp_trx[s_id + 1].nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%d/%dM/%d/%d/%d)",
+ pcysta->wl_rx_err_ratio[s_id + 1],
+ pcysta->a2dp_trx[s_id + 1].empty_cnt,
+ pcysta->a2dp_trx[s_id + 1].retry_cnt,
+ (pcysta->a2dp_trx[s_id + 1].tx_rate ? 3 : 2),
+ pcysta->a2dp_trx[s_id + 1].tx_cnt,
+ pcysta->a2dp_trx[s_id + 1].ack_cnt,
+ pcysta->a2dp_trx[s_id + 1].nack_cnt);
else
- seq_printf(m, "(%d)", pcysta->wl_rx_err_ratio[s_id + 1]);
+ p += scnprintf(p, end - p, "(%d)",
+ pcysta->wl_rx_err_ratio[s_id + 1]);
}
+
+out:
+ return p - buf;
}
-static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_nullsta(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo;
union rtw89_btc_fbtc_cynullsta_info *ns;
+ char *p = buf, *end = buf + bufsz;
u8 i = 0;
if (!btc->dm.tdma_now.rxflctrl)
- return;
+ return 0;
pcinfo = &pfwinfo->rpt_fbtc_nullsta.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
if (ver->fcxnullsta == 1) {
for (i = 0; i < 2; i++) {
- seq_printf(m, " %-15s : ", "[NULL-STA]");
- seq_printf(m, "null-%d", i);
- seq_printf(m, "[ok:%d/",
- le32_to_cpu(ns->v1.result[i][1]));
- seq_printf(m, "fail:%d/",
- le32_to_cpu(ns->v1.result[i][0]));
- seq_printf(m, "on_time:%d/",
- le32_to_cpu(ns->v1.result[i][2]));
- seq_printf(m, "retry:%d/",
- le32_to_cpu(ns->v1.result[i][3]));
- seq_printf(m, "avg_t:%d.%03d/",
- le32_to_cpu(ns->v1.avg_t[i]) / 1000,
- le32_to_cpu(ns->v1.avg_t[i]) % 1000);
- seq_printf(m, "max_t:%d.%03d]\n",
- le32_to_cpu(ns->v1.max_t[i]) / 1000,
- le32_to_cpu(ns->v1.max_t[i]) % 1000);
+ p += scnprintf(p, end - p, " %-15s : ", "[NULL-STA]");
+ p += scnprintf(p, end - p, "null-%d", i);
+ p += scnprintf(p, end - p, "[ok:%d/",
+ le32_to_cpu(ns->v1.result[i][1]));
+ p += scnprintf(p, end - p, "fail:%d/",
+ le32_to_cpu(ns->v1.result[i][0]));
+ p += scnprintf(p, end - p, "on_time:%d/",
+ le32_to_cpu(ns->v1.result[i][2]));
+ p += scnprintf(p, end - p, "retry:%d/",
+ le32_to_cpu(ns->v1.result[i][3]));
+ p += scnprintf(p, end - p, "avg_t:%d.%03d/",
+ le32_to_cpu(ns->v1.avg_t[i]) / 1000,
+ le32_to_cpu(ns->v1.avg_t[i]) % 1000);
+ p += scnprintf(p, end - p, "max_t:%d.%03d]\n",
+ le32_to_cpu(ns->v1.max_t[i]) / 1000,
+ le32_to_cpu(ns->v1.max_t[i]) % 1000);
}
} else if (ver->fcxnullsta == 7) {
for (i = 0; i < 2; i++) {
- seq_printf(m, " %-15s : ", "[NULL-STA]");
- seq_printf(m, "null-%d", i);
- seq_printf(m, "[Tx:%d/",
- le32_to_cpu(ns->v7.result[i][4]));
- seq_printf(m, "[ok:%d/",
- le32_to_cpu(ns->v7.result[i][1]));
- seq_printf(m, "fail:%d/",
- le32_to_cpu(ns->v7.result[i][0]));
- seq_printf(m, "on_time:%d/",
- le32_to_cpu(ns->v7.result[i][2]));
- seq_printf(m, "retry:%d/",
- le32_to_cpu(ns->v7.result[i][3]));
- seq_printf(m, "avg_t:%d.%03d/",
- le32_to_cpu(ns->v7.tavg[i]) / 1000,
- le32_to_cpu(ns->v7.tavg[i]) % 1000);
- seq_printf(m, "max_t:%d.%03d]\n",
- le32_to_cpu(ns->v7.tmax[i]) / 1000,
- le32_to_cpu(ns->v7.tmax[i]) % 1000);
+ p += scnprintf(p, end - p, " %-15s : ", "[NULL-STA]");
+ p += scnprintf(p, end - p, "null-%d", i);
+ p += scnprintf(p, end - p, "[Tx:%d/",
+ le32_to_cpu(ns->v7.result[i][4]));
+ p += scnprintf(p, end - p, "[ok:%d/",
+ le32_to_cpu(ns->v7.result[i][1]));
+ p += scnprintf(p, end - p, "fail:%d/",
+ le32_to_cpu(ns->v7.result[i][0]));
+ p += scnprintf(p, end - p, "on_time:%d/",
+ le32_to_cpu(ns->v7.result[i][2]));
+ p += scnprintf(p, end - p, "retry:%d/",
+ le32_to_cpu(ns->v7.result[i][3]));
+ p += scnprintf(p, end - p, "avg_t:%d.%03d/",
+ le32_to_cpu(ns->v7.tavg[i]) / 1000,
+ le32_to_cpu(ns->v7.tavg[i]) % 1000);
+ p += scnprintf(p, end - p, "max_t:%d.%03d]\n",
+ le32_to_cpu(ns->v7.tmax[i]) / 1000,
+ le32_to_cpu(ns->v7.tmax[i]) % 1000);
}
} else {
for (i = 0; i < 2; i++) {
- seq_printf(m, " %-15s : ", "[NULL-STA]");
- seq_printf(m, "null-%d", i);
- seq_printf(m, "[Tx:%d/",
- le32_to_cpu(ns->v2.result[i][4]));
- seq_printf(m, "[ok:%d/",
- le32_to_cpu(ns->v2.result[i][1]));
- seq_printf(m, "fail:%d/",
- le32_to_cpu(ns->v2.result[i][0]));
- seq_printf(m, "on_time:%d/",
- le32_to_cpu(ns->v2.result[i][2]));
- seq_printf(m, "retry:%d/",
- le32_to_cpu(ns->v2.result[i][3]));
- seq_printf(m, "avg_t:%d.%03d/",
- le32_to_cpu(ns->v2.avg_t[i]) / 1000,
- le32_to_cpu(ns->v2.avg_t[i]) % 1000);
- seq_printf(m, "max_t:%d.%03d]\n",
- le32_to_cpu(ns->v2.max_t[i]) / 1000,
- le32_to_cpu(ns->v2.max_t[i]) % 1000);
+ p += scnprintf(p, end - p, " %-15s : ", "[NULL-STA]");
+ p += scnprintf(p, end - p, "null-%d", i);
+ p += scnprintf(p, end - p, "[Tx:%d/",
+ le32_to_cpu(ns->v2.result[i][4]));
+ p += scnprintf(p, end - p, "[ok:%d/",
+ le32_to_cpu(ns->v2.result[i][1]));
+ p += scnprintf(p, end - p, "fail:%d/",
+ le32_to_cpu(ns->v2.result[i][0]));
+ p += scnprintf(p, end - p, "on_time:%d/",
+ le32_to_cpu(ns->v2.result[i][2]));
+ p += scnprintf(p, end - p, "retry:%d/",
+ le32_to_cpu(ns->v2.result[i][3]));
+ p += scnprintf(p, end - p, "avg_t:%d.%03d/",
+ le32_to_cpu(ns->v2.avg_t[i]) / 1000,
+ le32_to_cpu(ns->v2.avg_t[i]) % 1000);
+ p += scnprintf(p, end - p, "max_t:%d.%03d]\n",
+ le32_to_cpu(ns->v2.max_t[i]) / 1000,
+ le32_to_cpu(ns->v2.max_t[i]) % 1000);
}
}
+
+ return p - buf;
}
-static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_step_v2(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_steps_v2 *pstep = NULL;
const struct rtw89_btc_ver *ver = btc->ver;
+ char *p = buf, *end = buf + bufsz;
u8 type, val, cnt = 0, state = 0;
bool outloop = false;
u16 i, diff_t, n_start = 0, n_stop = 0;
@@ -9841,14 +9941,14 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pstep = &pfwinfo->rpt_fbtc_step.finfo.v2;
pos_old = le16_to_cpu(pstep->pos_old);
pos_new = le16_to_cpu(pstep->pos_new);
if (pcinfo->req_fver != pstep->fver)
- return;
+ return 0;
/* store step info by using ring instead of FIFO*/
do {
@@ -9877,13 +9977,15 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
continue;
if (cnt % 10 == 0)
- seq_printf(m, " %-15s : ", "[steps]");
+ p += scnprintf(p, end - p,
+ " %-15s : ", "[steps]");
- seq_printf(m, "-> %s(%02d)(%02d)",
- (type == CXSTEP_SLOT ? "SLT" :
- "EVT"), (u32)val, diff_t);
+ p += scnprintf(p, end - p,
+ "-> %s(%02d)(%02d)",
+ (type == CXSTEP_SLOT ? "SLT" :
+ "EVT"), (u32)val, diff_t);
if (cnt % 10 == 9)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
cnt++;
}
@@ -9900,29 +10002,32 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
break;
}
} while (!outloop);
+
+ return p - buf;
}
-static void _show_fbtc_step_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_step_v3(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo;
struct rtw89_btc_fbtc_steps_v3 *pstep;
u32 i, n_begin, n_end, array_idx, cnt = 0;
+ char *p = buf, *end = buf + bufsz;
u8 type, val;
u16 diff_t;
if ((pfwinfo->rpt_en_map &
rtw89_btc_fw_rpt_ver(rtwdev, RPT_EN_FW_STEP_INFO)) == 0)
- return;
+ return 0;
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pstep = &pfwinfo->rpt_fbtc_step.finfo.v3;
if (pcinfo->req_fver != pstep->fver)
- return;
+ return 0;
if (le32_to_cpu(pstep->cnt) <= FCXDEF_STEP)
n_begin = 1;
@@ -9932,7 +10037,7 @@ static void _show_fbtc_step_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
n_end = le32_to_cpu(pstep->cnt);
if (n_begin > n_end)
- return;
+ return 0;
/* restore step info by using ring instead of FIFO */
for (i = n_begin; i <= n_end; i++) {
@@ -9945,50 +10050,55 @@ static void _show_fbtc_step_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
continue;
if (cnt % 10 == 0)
- seq_printf(m, " %-15s : ", "[steps]");
+ p += scnprintf(p, end - p, " %-15s : ", "[steps]");
- seq_printf(m, "-> %s(%02d)",
- (type == CXSTEP_SLOT ?
- id_to_slot((u32)val) :
- id_to_evt((u32)val)), diff_t);
+ p += scnprintf(p, end - p, "-> %s(%02d)",
+ (type == CXSTEP_SLOT ?
+ id_to_slot((u32)val) :
+ id_to_evt((u32)val)), diff_t);
if (cnt % 10 == 9)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
cnt++;
}
+
+ return p - buf;
}
-static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fw_dm_msg(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
+ char *p = buf, *end = buf + bufsz;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_DM))
- return;
+ goto out;
- _show_error(rtwdev, m);
- _show_fbtc_tdma(rtwdev, m);
- _show_fbtc_slots(rtwdev, m);
+ p += _show_error(rtwdev, p, end - p);
+ p += _show_fbtc_tdma(rtwdev, p, end - p);
+ p += _show_fbtc_slots(rtwdev, p, end - p);
if (ver->fcxcysta == 2)
- _show_fbtc_cysta_v2(rtwdev, m);
+ p += _show_fbtc_cysta_v2(rtwdev, p, end - p);
else if (ver->fcxcysta == 3)
- _show_fbtc_cysta_v3(rtwdev, m);
+ p += _show_fbtc_cysta_v3(rtwdev, p, end - p);
else if (ver->fcxcysta == 4)
- _show_fbtc_cysta_v4(rtwdev, m);
+ p += _show_fbtc_cysta_v4(rtwdev, p, end - p);
else if (ver->fcxcysta == 5)
- _show_fbtc_cysta_v5(rtwdev, m);
+ p += _show_fbtc_cysta_v5(rtwdev, p, end - p);
else if (ver->fcxcysta == 7)
- _show_fbtc_cysta_v7(rtwdev, m);
+ p += _show_fbtc_cysta_v7(rtwdev, p, end - p);
- _show_fbtc_nullsta(rtwdev, m);
+ p += _show_fbtc_nullsta(rtwdev, p, end - p);
if (ver->fcxstep == 2)
- _show_fbtc_step_v2(rtwdev, m);
+ p += _show_fbtc_step_v2(rtwdev, p, end - p);
else if (ver->fcxstep == 3)
- _show_fbtc_step_v3(rtwdev, m);
+ p += _show_fbtc_step_v3(rtwdev, p, end - p);
+out:
+ return p - buf;
}
static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt_cfg)
@@ -10033,12 +10143,13 @@ static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt
}
}
-static void _show_gpio_dbg(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_gpio_dbg(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
union rtw89_btc_fbtc_gpio_dbg *gdbg = NULL;
+ char *p = buf, *end = buf + bufsz;
u8 *gpio_map, i;
u32 en_map;
@@ -10048,8 +10159,8 @@ static void _show_gpio_dbg(struct rtw89_dev *rtwdev, struct seq_file *m)
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n",
__func__);
- seq_puts(m, "\n");
- return;
+ p += scnprintf(p, end - p, "\n");
+ goto out;
}
if (ver->fcxgpiodbg == 7) {
@@ -10061,20 +10172,24 @@ static void _show_gpio_dbg(struct rtw89_dev *rtwdev, struct seq_file *m)
}
if (!en_map)
- return;
+ goto out;
- seq_printf(m, " %-15s : enable_map:0x%08x",
- "[gpio_dbg]", en_map);
+ p += scnprintf(p, end - p, " %-15s : enable_map:0x%08x",
+ "[gpio_dbg]", en_map);
for (i = 0; i < BTC_DBG_MAX1; i++) {
if (!(en_map & BIT(i)))
continue;
- seq_printf(m, ", %s->GPIO%d", id_to_gdbg(i), gpio_map[i]);
+ p += scnprintf(p, end - p, ", %s->GPIO%d", id_to_gdbg(i),
+ gpio_map[i]);
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
+
+out:
+ return p - buf;
}
-static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_mreg_v1(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
@@ -10086,45 +10201,47 @@ static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_mac_ax_coex_gnt gnt_cfg = {};
struct rtw89_mac_ax_gnt gnt;
+ char *p = buf, *end = buf + bufsz;
u8 i = 0, type = 0, cnt = 0;
u32 val, offset;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_MREG))
- return;
+ return 0;
- seq_puts(m, "========== [HW Status] ==========\n");
+ p += scnprintf(p, end - p, "========== [HW Status] ==========\n");
- seq_printf(m,
- " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n",
- "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
- bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
- cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
+ p += scnprintf(p, end - p,
+ " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n",
+ "[scoreboard]", wl->scbd,
+ cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
+ bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
+ cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
_get_gnt(rtwdev, &gnt_cfg);
gnt = gnt_cfg.band[0];
- seq_printf(m,
- " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
- "[gnt_status]",
- chip->chip_id == RTL8852C ? "HW" :
- btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
- gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt);
+ p += scnprintf(p, end - p,
+ " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
+ "[gnt_status]",
+ chip->chip_id == RTL8852C ? "HW" :
+ btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
+ gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt);
gnt = gnt_cfg.band[1];
- seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
- gnt.gnt_wl_sw_en ? "SW" : "HW",
- gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW",
- gnt.gnt_bt);
+ p += scnprintf(p, end - p, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
+ gnt.gnt_wl_sw_en ? "SW" : "HW",
+ gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW",
+ gnt.gnt_bt);
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
if (!pcinfo->valid) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): stop due rpt_fbtc_mregval.cinfo\n",
__func__);
- return;
+ goto out;
}
pmreg = &pfwinfo->rpt_fbtc_mregval.finfo.v1;
@@ -10138,21 +10255,26 @@ static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
val = le32_to_cpu(pmreg->mreg_val[i]);
if (cnt % 6 == 0)
- seq_printf(m, " %-15s : %d_0x%04x=0x%08x",
- "[reg]", (u32)type, offset, val);
+ p += scnprintf(p, end - p,
+ " %-15s : %d_0x%04x=0x%08x",
+ "[reg]", (u32)type, offset, val);
else
- seq_printf(m, ", %d_0x%04x=0x%08x", (u32)type,
- offset, val);
+ p += scnprintf(p, end - p, ", %d_0x%04x=0x%08x",
+ (u32)type,
+ offset, val);
if (cnt % 6 == 5)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
cnt++;
if (i >= pmreg->reg_num)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+out:
+ return p - buf;
}
-static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_mreg_v2(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
@@ -10164,46 +10286,48 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_mac_ax_coex_gnt gnt_cfg = {};
struct rtw89_mac_ax_gnt gnt;
+ char *p = buf, *end = buf + bufsz;
u8 i = 0, type = 0, cnt = 0;
u32 val, offset;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_MREG))
- return;
+ return 0;
- seq_puts(m, "========== [HW Status] ==========\n");
+ p += scnprintf(p, end - p, "========== [HW Status] ==========\n");
- seq_printf(m,
- " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n",
- "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
- bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
- cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
+ p += scnprintf(p, end - p,
+ " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n",
+ "[scoreboard]", wl->scbd,
+ cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
+ bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
+ cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
_get_gnt(rtwdev, &gnt_cfg);
gnt = gnt_cfg.band[0];
- seq_printf(m,
- " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], polut_type:%s",
- "[gnt_status]",
- chip->chip_id == RTL8852C ? "HW" :
- btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
- gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt,
- id_to_polut(wl->bt_polut_type[wl->pta_req_mac]));
+ p += scnprintf(p, end - p,
+ " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], polut_type:%s",
+ "[gnt_status]",
+ chip->chip_id == RTL8852C ? "HW" :
+ btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
+ gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt,
+ id_to_polut(wl->bt_polut_type[wl->pta_req_mac]));
gnt = gnt_cfg.band[1];
- seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
- gnt.gnt_wl_sw_en ? "SW" : "HW",
- gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW",
- gnt.gnt_bt);
+ p += scnprintf(p, end - p, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
+ gnt.gnt_wl_sw_en ? "SW" : "HW",
+ gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW",
+ gnt.gnt_bt);
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
if (!pcinfo->valid) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): stop due rpt_fbtc_mregval.cinfo\n",
__func__);
- return;
+ goto out;
}
pmreg = &pfwinfo->rpt_fbtc_mregval.finfo.v2;
@@ -10217,21 +10341,26 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
val = le32_to_cpu(pmreg->mreg_val[i]);
if (cnt % 6 == 0)
- seq_printf(m, " %-15s : %d_0x%04x=0x%08x",
- "[reg]", (u32)type, offset, val);
+ p += scnprintf(p, end - p,
+ " %-15s : %d_0x%04x=0x%08x",
+ "[reg]", (u32)type, offset, val);
else
- seq_printf(m, ", %d_0x%04x=0x%08x", (u32)type,
- offset, val);
+ p += scnprintf(p, end - p, ", %d_0x%04x=0x%08x",
+ (u32)type,
+ offset, val);
if (cnt % 6 == 5)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
cnt++;
if (i >= pmreg->reg_num)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+out:
+ return p - buf;
}
-static void _show_mreg_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_mreg_v7(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
@@ -10242,46 +10371,50 @@ static void _show_mreg_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_bt_info *bt = &cx->bt;
struct rtw89_mac_ax_gnt *gnt = NULL;
struct rtw89_btc_dm *dm = &btc->dm;
+ char *p = buf, *end = buf + bufsz;
u8 i, type, cnt = 0;
u32 val, offset;
if (!(dm->coex_info_map & BTC_COEX_INFO_MREG))
- return;
+ return 0;
- seq_puts(m, "\n\r========== [HW Status] ==========");
+ p += scnprintf(p, end - p, "\n\r========== [HW Status] ==========");
- seq_printf(m,
- "\n\r %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)",
- "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
- bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
- cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)",
+ "[scoreboard]", wl->scbd,
+ cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
+ bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
+ cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
/* To avoid I/O if WL LPS or power-off */
dm->pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
- seq_printf(m,
- "\n\r %-15s : pta_owner:%s, pta_req_mac:MAC%d, rf_gnt_source: polut_type:%s",
- "[gnt_status]",
- rtwdev->chip->para_ver & BTC_FEAT_PTA_ONOFF_CTRL ? "HW" :
- dm->pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
- wl->pta_req_mac, id_to_polut(wl->bt_polut_type[wl->pta_req_mac]));
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : pta_owner:%s, pta_req_mac:MAC%d, rf_gnt_source: polut_type:%s",
+ "[gnt_status]",
+ rtwdev->chip->para_ver & BTC_FEAT_PTA_ONOFF_CTRL ? "HW" :
+ dm->pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
+ wl->pta_req_mac,
+ id_to_polut(wl->bt_polut_type[wl->pta_req_mac]));
gnt = &dm->gnt.band[RTW89_PHY_0];
- seq_printf(m, ", phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d]",
- gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl,
- gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt);
+ p += scnprintf(p, end - p, ", phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d]",
+ gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl,
+ gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt);
if (rtwdev->dbcc_en) {
gnt = &dm->gnt.band[RTW89_PHY_1];
- seq_printf(m, ", phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]",
- gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl,
- gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt);
+ p += scnprintf(p, end - p,
+ ", phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]",
+ gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl,
+ gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt);
}
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
if (!pcinfo->valid)
- return;
+ goto out;
pmreg = &pfwinfo->rpt_fbtc_mregval.finfo.v7;
@@ -10291,17 +10424,21 @@ static void _show_mreg_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
val = le32_to_cpu(pmreg->mreg_val[i]);
if (cnt % 6 == 0)
- seq_printf(m, "\n\r %-15s : %s_0x%x=0x%x", "[reg]",
- id_to_regtype(type), offset, val);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : %s_0x%x=0x%x", "[reg]",
+ id_to_regtype(type), offset, val);
else
- seq_printf(m, ", %s_0x%x=0x%x",
- id_to_regtype(type), offset, val);
+ p += scnprintf(p, end - p, ", %s_0x%x=0x%x",
+ id_to_regtype(type), offset, val);
cnt++;
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
+
+out:
+ return p - buf;
}
-static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_summary_v1(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
@@ -10312,56 +10449,59 @@ static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_info *wl = &cx->wl;
struct rtw89_btc_bt_info *bt = &cx->bt;
u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ char *p = buf, *end = buf + bufsz;
u8 i;
if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
- return;
+ return 0;
- seq_puts(m, "========== [Statistics] ==========\n");
+ p += scnprintf(p, end - p, "========== [Statistics] ==========\n");
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
prptctrl = &pfwinfo->rpt_ctrl.finfo.v1;
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
- "[summary]", pfwinfo->cnt_h2c,
- pfwinfo->cnt_h2c_fail, prptctrl->h2c_cnt,
- pfwinfo->cnt_c2h, prptctrl->c2h_cnt);
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, prptctrl->h2c_cnt,
+ pfwinfo->cnt_c2h, prptctrl->c2h_cnt);
- seq_printf(m,
- "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x",
- pfwinfo->event[BTF_EVNT_RPT], prptctrl->rpt_cnt,
- prptctrl->rpt_enable, dm->error.val);
+ p += scnprintf(p, end - p,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ prptctrl->rpt_cnt,
+ prptctrl->rpt_enable, dm->error.val);
if (dm->error.map.wl_fw_hang)
- seq_puts(m, " (WL FW Hang!!)");
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : send_ok:%d, send_fail:%d, recv:%d",
- "[mailbox]", prptctrl->mb_send_ok_cnt,
- prptctrl->mb_send_fail_cnt, prptctrl->mb_recv_cnt);
-
- seq_printf(m,
- "(A2DP_empty:%d, A2DP_flowstop:%d, A2DP_full:%d)\n",
- prptctrl->mb_a2dp_empty_cnt,
- prptctrl->mb_a2dp_flct_cnt,
- prptctrl->mb_a2dp_full_cnt);
-
- seq_printf(m,
- " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]",
- "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
- cx->cnt_wl[BTC_WCNT_RFK_GO],
- cx->cnt_wl[BTC_WCNT_RFK_REJECT],
- cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
-
- seq_printf(m,
- ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n",
- prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REQ],
- prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_GO],
- prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REJECT],
- prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_TIMEOUT],
- prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_FAIL]);
+ p += scnprintf(p, end - p, " (WL FW Hang!!)");
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d",
+ "[mailbox]", prptctrl->mb_send_ok_cnt,
+ prptctrl->mb_send_fail_cnt,
+ prptctrl->mb_recv_cnt);
+
+ p += scnprintf(p, end - p,
+ "(A2DP_empty:%d, A2DP_flowstop:%d, A2DP_full:%d)\n",
+ prptctrl->mb_a2dp_empty_cnt,
+ prptctrl->mb_a2dp_flct_cnt,
+ prptctrl->mb_a2dp_full_cnt);
+
+ p += scnprintf(p, end - p,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]",
+ "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ p += scnprintf(p, end - p,
+ ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n",
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REQ],
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_GO],
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REJECT],
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_TIMEOUT],
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_FAIL]);
if (prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_TIMEOUT] > 0)
bt->rfk_info.map.timeout = 1;
@@ -10370,42 +10510,44 @@ static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
dm->error.map.wl_rfk_timeout = bt->rfk_info.map.timeout;
} else {
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x",
- "[summary]", pfwinfo->cnt_h2c,
- pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h,
- pfwinfo->event[BTF_EVNT_RPT],
- btc->fwinfo.rpt_en_map);
- seq_puts(m, " (WL FW report invalid!!)\n");
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h,
+ pfwinfo->event[BTF_EVNT_RPT],
+ btc->fwinfo.rpt_en_map);
+ p += scnprintf(p, end - p, " (WL FW report invalid!!)\n");
}
for (i = 0; i < BTC_NCNT_NUM; i++)
cnt_sum += dm->cnt_notify[i];
- seq_printf(m,
- " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
- "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
- cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+ p += scnprintf(p, end - p,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ p += scnprintf(p, end - p,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
- seq_printf(m,
- "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n",
- cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
- cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
- cnt[BTC_NCNT_WL_STA]);
+ p += scnprintf(p, end - p,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]", cnt[BTC_NCNT_SCAN_START],
+ cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
- seq_printf(m,
- " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
- "[notify_cnt]", cnt[BTC_NCNT_SCAN_START],
- cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND],
- cnt[BTC_NCNT_SPECIAL_PACKET]);
+ p += scnprintf(p, end - p,
+ "timer=%d, control=%d, customerize=%d\n",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
- seq_printf(m,
- "timer=%d, control=%d, customerize=%d\n",
- cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
- cnt[BTC_NCNT_CUSTOMERIZE]);
+ return p - buf;
}
-static void _show_summary_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_summary_v4(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
@@ -10416,64 +10558,65 @@ static void _show_summary_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_info *wl = &cx->wl;
struct rtw89_btc_bt_info *bt = &cx->bt;
u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ char *p = buf, *end = buf + bufsz;
u8 i;
if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
- return;
+ return 0;
- seq_puts(m, "========== [Statistics] ==========\n");
+ p += scnprintf(p, end - p, "========== [Statistics] ==========\n");
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
prptctrl = &pfwinfo->rpt_ctrl.finfo.v4;
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
- "[summary]", pfwinfo->cnt_h2c,
- pfwinfo->cnt_h2c_fail,
- le32_to_cpu(prptctrl->rpt_info.cnt_h2c),
- pfwinfo->cnt_c2h,
- le32_to_cpu(prptctrl->rpt_info.cnt_c2h));
-
- seq_printf(m,
- "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x",
- pfwinfo->event[BTF_EVNT_RPT],
- le32_to_cpu(prptctrl->rpt_info.cnt),
- le32_to_cpu(prptctrl->rpt_info.en),
- dm->error.val);
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail,
+ le32_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le32_to_cpu(prptctrl->rpt_info.cnt_c2h));
+
+ p += scnprintf(p, end - p,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le32_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en),
+ dm->error.val);
if (dm->error.map.wl_fw_hang)
- seq_puts(m, " (WL FW Hang!!)");
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
- "[mailbox]",
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
-
- seq_printf(m,
- "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
-
- seq_printf(m,
- " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]",
- "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
- cx->cnt_wl[BTC_WCNT_RFK_GO],
- cx->cnt_wl[BTC_WCNT_RFK_REJECT],
- cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
-
- seq_printf(m,
- ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n",
- le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]),
- le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_GO]),
- le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REJECT]),
- le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]),
- le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_FAIL]));
+ p += scnprintf(p, end - p, " (WL FW Hang!!)");
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ p += scnprintf(p, end - p,
+ "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ p += scnprintf(p, end - p,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]",
+ "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ p += scnprintf(p, end - p,
+ ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n",
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_GO]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REJECT]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_FAIL]));
if (le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]) > 0)
bt->rfk_info.map.timeout = 1;
@@ -10482,42 +10625,44 @@ static void _show_summary_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
dm->error.map.wl_rfk_timeout = bt->rfk_info.map.timeout;
} else {
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x",
- "[summary]", pfwinfo->cnt_h2c,
- pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h,
- pfwinfo->event[BTF_EVNT_RPT],
- btc->fwinfo.rpt_en_map);
- seq_puts(m, " (WL FW report invalid!!)\n");
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h,
+ pfwinfo->event[BTF_EVNT_RPT],
+ btc->fwinfo.rpt_en_map);
+ p += scnprintf(p, end - p, " (WL FW report invalid!!)\n");
}
for (i = 0; i < BTC_NCNT_NUM; i++)
cnt_sum += dm->cnt_notify[i];
- seq_printf(m,
- " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
- "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
- cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+ p += scnprintf(p, end - p,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ p += scnprintf(p, end - p,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
- seq_printf(m,
- "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n",
- cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
- cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
- cnt[BTC_NCNT_WL_STA]);
+ p += scnprintf(p, end - p,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]", cnt[BTC_NCNT_SCAN_START],
+ cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
- seq_printf(m,
- " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
- "[notify_cnt]", cnt[BTC_NCNT_SCAN_START],
- cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND],
- cnt[BTC_NCNT_SPECIAL_PACKET]);
+ p += scnprintf(p, end - p,
+ "timer=%d, control=%d, customerize=%d\n",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
- seq_printf(m,
- "timer=%d, control=%d, customerize=%d\n",
- cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
- cnt[BTC_NCNT_CUSTOMERIZE]);
+ return p - buf;
}
-static void _show_summary_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_summary_v5(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
@@ -10527,112 +10672,118 @@ static void _show_summary_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &cx->wl;
u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ char *p = buf, *end = buf + bufsz;
u8 i;
if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
- return;
+ return 0;
- seq_puts(m, "========== [Statistics] ==========\n");
+ p += scnprintf(p, end - p, "========== [Statistics] ==========\n");
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
prptctrl = &pfwinfo->rpt_ctrl.finfo.v5;
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ",
- "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
- le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
- pfwinfo->cnt_c2h,
- le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
- le16_to_cpu(prptctrl->rpt_info.len_c2h));
-
- seq_printf(m,
- "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
- pfwinfo->event[BTF_EVNT_RPT],
- le16_to_cpu(prptctrl->rpt_info.cnt),
- le32_to_cpu(prptctrl->rpt_info.en));
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h));
+
+ p += scnprintf(p, end - p,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
if (dm->error.map.wl_fw_hang)
- seq_puts(m, " (WL FW Hang!!)");
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
- "[mailbox]",
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
-
- seq_printf(m,
- "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
-
- seq_printf(m,
- " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]",
- "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
- cx->cnt_wl[BTC_WCNT_RFK_GO],
- cx->cnt_wl[BTC_WCNT_RFK_REJECT],
- cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
-
- seq_printf(m,
- ", bt_rfk[req:%d]",
- le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
-
- seq_printf(m,
- ", AOAC[RF_on:%d/RF_off:%d]",
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ p += scnprintf(p, end - p, " (WL FW Hang!!)");
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ p += scnprintf(p, end - p,
+ "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ p += scnprintf(p, end - p,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ p += scnprintf(p, end - p,
+ ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ p += scnprintf(p, end - p,
+ ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
} else {
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d",
- "[summary]", pfwinfo->cnt_h2c,
- pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h);
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h);
}
if (!pcinfo->valid || pfwinfo->len_mismch || pfwinfo->fver_mismch ||
pfwinfo->err[BTFRE_EXCEPTION]) {
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:"
- "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]",
- "[ERROR]", pcinfo->valid, pfwinfo->len_mismch,
- pfwinfo->fver_mismch, pfwinfo->err[BTFRE_EXCEPTION],
- wl->status.map.lps, wl->status.map.rf_off);
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:"
+ "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]",
+ "[ERROR]", pcinfo->valid, pfwinfo->len_mismch,
+ pfwinfo->fver_mismch,
+ pfwinfo->err[BTFRE_EXCEPTION],
+ wl->status.map.lps, wl->status.map.rf_off);
}
for (i = 0; i < BTC_NCNT_NUM; i++)
cnt_sum += dm->cnt_notify[i];
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
- "[notify_cnt]",
- cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
- cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ p += scnprintf(p, end - p,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
- seq_printf(m,
- "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
- cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
- cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
- cnt[BTC_NCNT_WL_STA]);
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
- "[notify_cnt]",
- cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
- cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SPECIAL_PACKET]);
+ p += scnprintf(p, end - p,
+ "timer=%d, control=%d, customerize=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
- seq_printf(m,
- "timer=%d, control=%d, customerize=%d",
- cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
- cnt[BTC_NCNT_CUSTOMERIZE]);
+ return p - buf;
}
-static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_summary_v105(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
@@ -10642,112 +10793,118 @@ static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &cx->wl;
u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ char *p = buf, *end = buf + bufsz;
u8 i;
if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
- return;
+ return 0;
- seq_puts(m, "========== [Statistics] ==========\n");
+ p += scnprintf(p, end - p, "========== [Statistics] ==========\n");
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
prptctrl = &pfwinfo->rpt_ctrl.finfo.v105;
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ",
- "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
- le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
- pfwinfo->cnt_c2h,
- le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
- le16_to_cpu(prptctrl->rpt_info.len_c2h));
-
- seq_printf(m,
- "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
- pfwinfo->event[BTF_EVNT_RPT],
- le16_to_cpu(prptctrl->rpt_info.cnt),
- le32_to_cpu(prptctrl->rpt_info.en));
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h));
+
+ p += scnprintf(p, end - p,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
if (dm->error.map.wl_fw_hang)
- seq_puts(m, " (WL FW Hang!!)");
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
- "[mailbox]",
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
-
- seq_printf(m,
- "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
-
- seq_printf(m,
- " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]",
- "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
- cx->cnt_wl[BTC_WCNT_RFK_GO],
- cx->cnt_wl[BTC_WCNT_RFK_REJECT],
- cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
-
- seq_printf(m,
- ", bt_rfk[req:%d]",
- le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
-
- seq_printf(m,
- ", AOAC[RF_on:%d/RF_off:%d]",
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ p += scnprintf(p, end - p, " (WL FW Hang!!)");
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ p += scnprintf(p, end - p,
+ "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ p += scnprintf(p, end - p,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ p += scnprintf(p, end - p,
+ ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ p += scnprintf(p, end - p,
+ ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
} else {
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d",
- "[summary]", pfwinfo->cnt_h2c,
- pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h);
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h);
}
if (!pcinfo->valid || pfwinfo->len_mismch || pfwinfo->fver_mismch ||
pfwinfo->err[BTFRE_EXCEPTION]) {
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:"
- "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]",
- "[ERROR]", pcinfo->valid, pfwinfo->len_mismch,
- pfwinfo->fver_mismch, pfwinfo->err[BTFRE_EXCEPTION],
- wl->status.map.lps, wl->status.map.rf_off);
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:"
+ "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]",
+ "[ERROR]", pcinfo->valid, pfwinfo->len_mismch,
+ pfwinfo->fver_mismch,
+ pfwinfo->err[BTFRE_EXCEPTION],
+ wl->status.map.lps, wl->status.map.rf_off);
}
for (i = 0; i < BTC_NCNT_NUM; i++)
cnt_sum += dm->cnt_notify[i];
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
- "[notify_cnt]",
- cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
- cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
- seq_printf(m,
- "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
- cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
- cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
- cnt[BTC_NCNT_WL_STA]);
+ p += scnprintf(p, end - p,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
- "[notify_cnt]",
- cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
- cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SPECIAL_PACKET]);
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
- seq_printf(m,
- "timer=%d, control=%d, customerize=%d",
- cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
- cnt[BTC_NCNT_CUSTOMERIZE]);
+ p += scnprintf(p, end - p,
+ "timer=%d, control=%d, customerize=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
+
+ return p - buf;
}
-static void _show_summary_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_summary_v7(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
struct rtw89_btc_fbtc_rpt_ctrl_v7 *prptctrl = NULL;
@@ -10756,100 +10913,111 @@ static void _show_summary_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_wl_info *wl = &cx->wl;
u32 *cnt = rtwdev->btc.dm.cnt_notify;
+ char *p = buf, *end = buf + bufsz;
u32 cnt_sum = 0;
u8 i;
if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
- return;
+ return 0;
- seq_printf(m, "%s", "\n\r========== [Statistics] ==========");
+ p += scnprintf(p, end - p, "%s",
+ "\n\r========== [Statistics] ==========");
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF &&
!wl->status.map.rf_off) {
prptctrl = &pfwinfo->rpt_ctrl.finfo.v7;
- seq_printf(m,
- "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d),"
- "c2h_cnt=%d(fw_send:%d, len:%d, max:%d), ",
- "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
- le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h,
- le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
- le16_to_cpu(prptctrl->rpt_info.len_c2h),
- rtwdev->btc.ver->info_buf);
-
- seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
- pfwinfo->event[BTF_EVNT_RPT],
- le16_to_cpu(prptctrl->rpt_info.cnt),
- le32_to_cpu(prptctrl->rpt_info.en));
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d),"
+ "c2h_cnt=%d(fw_send:%d, len:%d, max:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h),
+ rtwdev->btc.ver->info_buf);
+
+ p += scnprintf(p, end - p,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
if (dm->error.map.wl_fw_hang)
- seq_puts(m, " (WL FW Hang!!)");
-
- seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
- "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
-
- seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
-
- seq_printf(m,
- "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
- "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
- cx->cnt_wl[BTC_WCNT_RFK_GO],
- cx->cnt_wl[BTC_WCNT_RFK_REJECT],
- cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
- wl->rfk_info.proc_time);
-
- seq_printf(m, ", bt_rfk[req:%d]",
- le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
-
- seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]",
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ p += scnprintf(p, end - p, " (WL FW Hang!!)");
+
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ p += scnprintf(p, end - p,
+ "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
+ wl->rfk_info.proc_time);
+
+ p += scnprintf(p, end - p, ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ p += scnprintf(p, end - p, ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
} else {
- seq_printf(m,
- "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
- "[summary]",
- pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
- pfwinfo->cnt_c2h,
- wl->status.map.lps, wl->status.map.rf_off);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
+ "[summary]",
+ pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ pfwinfo->cnt_c2h,
+ wl->status.map.lps, wl->status.map.rf_off);
}
for (i = 0; i < BTC_NCNT_NUM; i++)
cnt_sum += dm->cnt_notify[i];
- seq_printf(m,
- "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
- "[notify_cnt]",
- cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
- cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ p += scnprintf(p, end - p,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
- seq_printf(m,
- "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
- cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
- cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
- cnt[BTC_NCNT_WL_STA]);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
- seq_printf(m,
- "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
- "[notify_cnt]",
- cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
- cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
- cnt[BTC_NCNT_SPECIAL_PACKET]);
+ p += scnprintf(p, end - p,
+ "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
+ rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
+ cnt[BTC_NCNT_COUNTRYCODE]);
- seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
- cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
- rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
- cnt[BTC_NCNT_COUNTRYCODE]);
+ return p - buf;
}
-static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_summary_v8(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
@@ -10858,153 +11026,173 @@ static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_wl_info *wl = &cx->wl;
u32 *cnt = rtwdev->btc.dm.cnt_notify;
+ char *p = buf, *end = buf + bufsz;
u32 cnt_sum = 0;
u8 i;
if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
- return;
+ return 0;
- seq_printf(m, "%s", "\n\r========== [Statistics] ==========");
+ p += scnprintf(p, end - p, "%s",
+ "\n\r========== [Statistics] ==========");
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF &&
!wl->status.map.rf_off) {
prptctrl = &pfwinfo->rpt_ctrl.finfo.v8;
- seq_printf(m,
- "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d, max:fw-%d/drv-%d), ",
- "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
- le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h,
- le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
- le16_to_cpu(prptctrl->rpt_info.len_c2h),
- (prptctrl->rpt_len_max_h << 8) + prptctrl->rpt_len_max_l,
- rtwdev->btc.ver->info_buf);
-
- seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
- pfwinfo->event[BTF_EVNT_RPT],
- le16_to_cpu(prptctrl->rpt_info.cnt),
- le32_to_cpu(prptctrl->rpt_info.en));
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d, max:fw-%d/drv-%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h),
+ (prptctrl->rpt_len_max_h << 8) + prptctrl->rpt_len_max_l,
+ rtwdev->btc.ver->info_buf);
+
+ p += scnprintf(p, end - p,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
if (dm->error.map.wl_fw_hang)
- seq_puts(m, " (WL FW Hang!!)");
-
- seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
- "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
-
- seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
-
- seq_printf(m,
- "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
- "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
- cx->cnt_wl[BTC_WCNT_RFK_GO],
- cx->cnt_wl[BTC_WCNT_RFK_REJECT],
- cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
- wl->rfk_info.proc_time);
-
- seq_printf(m, ", bt_rfk[req:%d]",
- le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
-
- seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]",
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ p += scnprintf(p, end - p, " (WL FW Hang!!)");
+
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ p += scnprintf(p, end - p,
+ "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
+ wl->rfk_info.proc_time);
+
+ p += scnprintf(p, end - p, ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ p += scnprintf(p, end - p, ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
} else {
- seq_printf(m,
- "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
- "[summary]",
- pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
- pfwinfo->cnt_c2h,
- wl->status.map.lps, wl->status.map.rf_off);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
+ "[summary]",
+ pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ pfwinfo->cnt_c2h,
+ wl->status.map.lps, wl->status.map.rf_off);
}
for (i = 0; i < BTC_NCNT_NUM; i++)
cnt_sum += dm->cnt_notify[i];
- seq_printf(m,
- "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
- "[notify_cnt]",
- cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
- cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
- seq_printf(m,
- "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
- cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
- cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
- cnt[BTC_NCNT_WL_STA]);
+ p += scnprintf(p, end - p,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
- seq_printf(m,
- "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
- "[notify_cnt]",
- cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
- cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
- cnt[BTC_NCNT_SPECIAL_PACKET]);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
- seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
- cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
- rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
- cnt[BTC_NCNT_COUNTRYCODE]);
+ p += scnprintf(p, end - p,
+ "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
+ rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
+ cnt[BTC_NCNT_COUNTRYCODE]);
+
+ return p - buf;
}
-void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+ssize_t rtw89_btc_dump_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal;
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_bt_info *bt = &cx->bt;
-
- seq_puts(m, "=========================================\n");
- seq_printf(m, "WL FW / BT FW %d.%d.%d.%d / NA\n",
- fw_suit->major_ver, fw_suit->minor_ver,
- fw_suit->sub_ver, fw_suit->sub_idex);
- seq_printf(m, "manual %d\n", btc->manual_ctrl);
-
- seq_puts(m, "=========================================\n");
-
- seq_printf(m, "\n\r %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)",
- "[bt_info]",
- bt->raw_info[2], bt->raw_info[3],
- bt->raw_info[4], bt->raw_info[5],
- bt->raw_info[6], bt->raw_info[7],
- bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply",
- cx->cnt_bt[BTC_BCNT_INFOUPDATE],
- cx->cnt_bt[BTC_BCNT_INFOSAME]);
-
- seq_puts(m, "\n=========================================\n");
-
- _show_cx_info(rtwdev, m);
- _show_wl_info(rtwdev, m);
- _show_bt_info(rtwdev, m);
- _show_dm_info(rtwdev, m);
- _show_fw_dm_msg(rtwdev, m);
+ char *p = buf, *end = buf + bufsz;
+
+ p += scnprintf(p, end - p,
+ "=========================================\n");
+ p += scnprintf(p, end - p,
+ "WL FW / BT FW %d.%d.%d.%d / NA\n",
+ fw_suit->major_ver, fw_suit->minor_ver,
+ fw_suit->sub_ver, fw_suit->sub_idex);
+ p += scnprintf(p, end - p, "manual %d\n",
+ btc->manual_ctrl);
+
+ p += scnprintf(p, end - p,
+ "=========================================\n");
+
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)",
+ "[bt_info]",
+ bt->raw_info[2], bt->raw_info[3],
+ bt->raw_info[4], bt->raw_info[5],
+ bt->raw_info[6], bt->raw_info[7],
+ bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply",
+ cx->cnt_bt[BTC_BCNT_INFOUPDATE],
+ cx->cnt_bt[BTC_BCNT_INFOSAME]);
+
+ p += scnprintf(p, end - p,
+ "\n=========================================\n");
+
+ p += _show_cx_info(rtwdev, p, end - p);
+ p += _show_wl_info(rtwdev, p, end - p);
+ p += _show_bt_info(rtwdev, p, end - p);
+ p += _show_dm_info(rtwdev, p, end - p);
+ p += _show_fw_dm_msg(rtwdev, p, end - p);
if (ver->fcxmreg == 1)
- _show_mreg_v1(rtwdev, m);
+ p += _show_mreg_v1(rtwdev, p, end - p);
else if (ver->fcxmreg == 2)
- _show_mreg_v2(rtwdev, m);
+ p += _show_mreg_v2(rtwdev, p, end - p);
else if (ver->fcxmreg == 7)
- _show_mreg_v7(rtwdev, m);
+ p += _show_mreg_v7(rtwdev, p, end - p);
- _show_gpio_dbg(rtwdev, m);
+ p += _show_gpio_dbg(rtwdev, p, end - p);
if (ver->fcxbtcrpt == 1)
- _show_summary_v1(rtwdev, m);
+ p += _show_summary_v1(rtwdev, p, end - p);
else if (ver->fcxbtcrpt == 4)
- _show_summary_v4(rtwdev, m);
+ p += _show_summary_v4(rtwdev, p, end - p);
else if (ver->fcxbtcrpt == 5)
- _show_summary_v5(rtwdev, m);
+ p += _show_summary_v5(rtwdev, p, end - p);
else if (ver->fcxbtcrpt == 105)
- _show_summary_v105(rtwdev, m);
+ p += _show_summary_v105(rtwdev, p, end - p);
else if (ver->fcxbtcrpt == 7)
- _show_summary_v7(rtwdev, m);
+ p += _show_summary_v7(rtwdev, p, end - p);
else if (ver->fcxbtcrpt == 8)
- _show_summary_v8(rtwdev, m);
+ p += _show_summary_v8(rtwdev, p, end - p);
+
+ return p - buf;
}
void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev)
@@ -11037,3 +11225,24 @@ out:
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] use version def[%d] = 0x%08x\n",
(int)(btc->ver - rtw89_btc_ver_defs), btc->ver->fw_ver_code);
}
+
+void rtw89_btc_ntfy_preserve_bt_time(struct rtw89_dev *rtwdev, u32 ms)
+{
+ struct rtw89_btc_bt_link_info *bt_linfo = &rtwdev->btc.cx.bt.link_info;
+ struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc;
+
+ if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
+ return;
+
+ if (!a2dp.exist)
+ return;
+
+ fsleep(ms * 1000);
+}
+EXPORT_SYMBOL(rtw89_btc_ntfy_preserve_bt_time);
+
+void rtw89_btc_ntfy_conn_rfk(struct rtw89_dev *rtwdev, bool state)
+{
+ rtwdev->btc.cx.wl.rfk_info.con_rfk = state;
+}
+EXPORT_SYMBOL(rtw89_btc_ntfy_conn_rfk);
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index dbdb56e063ef..e3a1fcd79620 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -267,10 +267,10 @@ void rtw89_btc_ntfy_scan_finish(struct rtw89_dev *rtwdev, u8 phy_idx);
void rtw89_btc_ntfy_switch_band(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band);
void rtw89_btc_ntfy_specific_packet(struct rtw89_dev *rtwdev,
enum btc_pkt_type pkt_type);
-void rtw89_btc_ntfy_eapol_packet_work(struct work_struct *work);
-void rtw89_btc_ntfy_arp_packet_work(struct work_struct *work);
-void rtw89_btc_ntfy_dhcp_packet_work(struct work_struct *work);
-void rtw89_btc_ntfy_icmp_packet_work(struct work_struct *work);
+void rtw89_btc_ntfy_eapol_packet_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_btc_ntfy_arp_packet_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_btc_ntfy_dhcp_packet_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_btc_ntfy_icmp_packet_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
@@ -282,14 +282,16 @@ void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map,
void rtw89_btc_ntfy_wl_sta(struct rtw89_dev *rtwdev);
void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
-void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m);
-void rtw89_coex_act1_work(struct work_struct *work);
-void rtw89_coex_bt_devinfo_work(struct work_struct *work);
-void rtw89_coex_rfk_chk_work(struct work_struct *work);
+ssize_t rtw89_btc_dump_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz);
+void rtw89_coex_act1_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_coex_bt_devinfo_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_coex_rfk_chk_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_coex_power_on(struct rtw89_dev *rtwdev);
void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type);
void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type);
void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev);
+void rtw89_btc_ntfy_preserve_bt_time(struct rtw89_dev *rtwdev, u32 ms);
+void rtw89_btc_ntfy_conn_rfk(struct rtw89_dev *rtwdev, bool state);
static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 85f739f1173d..cc9b014457ac 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -913,16 +913,17 @@ static enum btc_pkt_type
rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
struct sk_buff *skb = tx_req->skb;
struct udphdr *udphdr;
if (IEEE80211_SKB_CB(skb)->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
- ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.eapol_notify_work);
+ wiphy_work_queue(wiphy, &rtwdev->btc.eapol_notify_work);
return PACKET_EAPOL;
}
if (skb->protocol == htons(ETH_P_ARP)) {
- ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.arp_notify_work);
+ wiphy_work_queue(wiphy, &rtwdev->btc.arp_notify_work);
return PACKET_ARP;
}
@@ -932,14 +933,14 @@ rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
if (((udphdr->source == htons(67) && udphdr->dest == htons(68)) ||
(udphdr->source == htons(68) && udphdr->dest == htons(67))) &&
skb->len > 282) {
- ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.dhcp_notify_work);
+ wiphy_work_queue(wiphy, &rtwdev->btc.dhcp_notify_work);
return PACKET_DHCP;
}
}
if (skb->protocol == htons(ETH_P_IP) &&
ip_hdr(skb)->protocol == IPPROTO_ICMP) {
- ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.icmp_notify_work);
+ wiphy_work_queue(wiphy, &rtwdev->btc.icmp_notify_work);
return PACKET_ICMP;
}
@@ -2071,17 +2072,17 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_cancel_6ghz_probe_work(struct work_struct *work)
+static void rtw89_cancel_6ghz_probe_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
cancel_6ghz_probe_work);
struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
struct rtw89_pktofld_info *info;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
if (!rtwdev->scanning)
- goto out;
+ return;
list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) {
if (!info->cancel || !test_bit(info->id, rtwdev->pkt_offload))
@@ -2094,9 +2095,6 @@ static void rtw89_cancel_6ghz_probe_work(struct work_struct *work)
* since if during scanning, pkt_list is accessed in bottom half.
*/
}
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
@@ -2131,7 +2129,7 @@ static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
}
if (queue_work)
- ieee80211_queue_work(rtwdev->hw, &rtwdev->cancel_6ghz_probe_work);
+ wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->cancel_6ghz_probe_work);
}
static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif_link *rtwvif_link,
@@ -2194,8 +2192,11 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
}
pkt_stat->beacon_nr++;
- if (phy_ppdu)
+ if (phy_ppdu) {
ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg);
+ if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
+ rtwvif_link->bcn_bw_idx = phy_ppdu->bw_idx;
+ }
pkt_stat->beacon_rate = desc_info->data_rate;
}
@@ -2381,6 +2382,49 @@ static void rtw89_core_validate_rx_signal(struct ieee80211_rx_status *rx_status)
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
}
+static void rtw89_core_update_rx_freq_from_ie(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ size_t hdr_len, ielen;
+ u8 *variable;
+ int chan;
+
+ if (!rtwdev->chip->rx_freq_frome_ie)
+ return;
+
+ if (!rtwdev->scanning)
+ return;
+
+ if (ieee80211_is_beacon(mgmt->frame_control)) {
+ variable = mgmt->u.beacon.variable;
+ hdr_len = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ } else if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ variable = mgmt->u.probe_resp.variable;
+ hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+ } else {
+ return;
+ }
+
+ if (skb->len > hdr_len)
+ ielen = skb->len - hdr_len;
+ else
+ return;
+
+ /* The parsing code for both 2GHz and 5GHz bands is the same in this
+ * function.
+ */
+ chan = cfg80211_get_ies_channel_number(variable, ielen, NL80211_BAND_2GHZ);
+ if (chan == -1)
+ return;
+
+ rx_status->band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
+ rx_status->freq = ieee80211_channel_to_frequency(chan, rx_status->band);
+}
+
static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
@@ -2398,6 +2442,7 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu);
rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status);
rtw89_core_validate_rx_signal(rx_status);
+ rtw89_core_update_rx_freq_from_ie(rtwdev, skb_ppdu, rx_status);
/* In low power mode, it does RX in thread context. */
local_bh_disable();
@@ -3143,13 +3188,14 @@ static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinv
ieee80211_txq_schedule_end(hw, ac);
}
-static void rtw89_ips_work(struct work_struct *work)
+static void rtw89_ips_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
ips_work);
- mutex_lock(&rtwdev->mutex);
+
+ lockdep_assert_wiphy(wiphy);
+
rtw89_enter_ips_by_hwflags(rtwdev);
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_core_txq_work(struct work_struct *w)
@@ -3291,7 +3337,7 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
u32 reg;
int ret;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtw89_leave_ips_by_hwflags(rtwdev);
rtw89_leave_lps(rtwdev);
@@ -3330,9 +3376,9 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_write32_clr(rtwdev, reg, B_AX_A_UC_CAM_MATCH | B_AX_A_BC_CAM_MATCH);
ieee80211_ready_on_channel(hw);
- cancel_delayed_work(&rtwvif->roc.roc_work);
- ieee80211_queue_delayed_work(hw, &rtwvif->roc.roc_work,
- msecs_to_jiffies(rtwvif->roc.duration));
+ wiphy_delayed_work_cancel(hw->wiphy, &rtwvif->roc.roc_work);
+ wiphy_delayed_work_queue(hw->wiphy, &rtwvif->roc.roc_work,
+ msecs_to_jiffies(rtwvif->roc.duration));
}
void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
@@ -3345,7 +3391,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
u32 reg;
int ret;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
ieee80211_remain_on_channel_expired(hw);
@@ -3377,18 +3423,18 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
queue_work(rtwdev->txq_wq, &rtwdev->txq_work);
if (hw->conf.flags & IEEE80211_CONF_IDLE)
- ieee80211_queue_delayed_work(hw, &roc->roc_work,
- msecs_to_jiffies(RTW89_ROC_IDLE_TIMEOUT));
+ wiphy_delayed_work_queue(hw->wiphy, &roc->roc_work,
+ msecs_to_jiffies(RTW89_ROC_IDLE_TIMEOUT));
}
-void rtw89_roc_work(struct work_struct *work)
+void rtw89_roc_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif,
roc.roc_work.work);
struct rtw89_dev *rtwdev = rtwvif->rtwdev;
struct rtw89_roc *roc = &rtwvif->roc;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
switch (roc->state) {
case RTW89_ROC_IDLE:
@@ -3401,8 +3447,6 @@ void rtw89_roc_work(struct work_struct *work)
default:
break;
}
-
- mutex_unlock(&rtwdev->mutex);
}
static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev,
@@ -3533,26 +3577,26 @@ void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
ewma_tp_init(&stats->rx_ewma_tp);
}
-static void rtw89_track_work(struct work_struct *work)
+static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
track_work.work);
bool tfc_changed;
+ lockdep_assert_wiphy(wiphy);
+
if (test_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags))
return;
- mutex_lock(&rtwdev->mutex);
-
if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
- goto out;
+ return;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
- RTW89_TRACK_WORK_PERIOD);
+ wiphy_delayed_work_queue(wiphy, &rtwdev->track_work,
+ RTW89_TRACK_WORK_PERIOD);
tfc_changed = rtw89_traffic_stats_track(rtwdev);
if (rtwdev->scanning)
- goto out;
+ return;
rtw89_leave_lps(rtwdev);
@@ -3577,9 +3621,6 @@ static void rtw89_track_work(struct work_struct *work)
if (rtwdev->lps_enabled && !rtwdev->btc.lps)
rtw89_enter_lps_track(rtwdev);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size)
@@ -3613,7 +3654,7 @@ int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
u8 idx;
int i;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
idx = rtw89_core_acquire_bit_map(cam_info->ba_cam_map, chip->bacam_num);
if (idx == chip->bacam_num) {
@@ -3657,7 +3698,7 @@ int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
struct rtw89_ba_cam_entry *entry = NULL, *tmp;
u8 idx;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
list_for_each_entry_safe(entry, tmp, &rtwsta_link->ba_cam_list, list) {
if (entry->tid != tid)
@@ -4432,26 +4473,26 @@ static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
{
int i;
- for (i = 0; i < RTW89_PHY_MAX; i++)
+ for (i = 0; i < RTW89_PHY_NUM; i++)
skb_queue_head_init(&rtwdev->ppdu_sts.rx_queue[i]);
- for (i = 0; i < RTW89_PHY_MAX; i++)
+ for (i = 0; i < RTW89_PHY_NUM; i++)
rtwdev->ppdu_sts.curr_rx_ppdu_cnt[i] = U8_MAX;
}
-void rtw89_core_update_beacon_work(struct work_struct *work)
+void rtw89_core_update_beacon_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev;
struct rtw89_vif_link *rtwvif_link = container_of(work, struct rtw89_vif_link,
update_beacon_work);
+ lockdep_assert_wiphy(wiphy);
+
if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE)
return;
rtwdev = rtwvif_link->rtwvif->rtwdev;
- mutex_lock(&rtwdev->mutex);
rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
- mutex_unlock(&rtwdev->mutex);
}
int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond)
@@ -4560,16 +4601,14 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
rtw89_mac_cfg_phy_rpt_bands(rtwdev, true);
rtw89_mac_update_rts_threshold(rtwdev);
- rtw89_tas_reset(rtwdev);
-
ret = rtw89_hci_start(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to start hci\n");
return ret;
}
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
- RTW89_TRACK_WORK_PERIOD);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->track_work,
+ RTW89_TRACK_WORK_PERIOD);
set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
@@ -4583,8 +4622,11 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
void rtw89_core_stop(struct rtw89_dev *rtwdev)
{
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
struct rtw89_btc *btc = &rtwdev->btc;
+ lockdep_assert_wiphy(wiphy);
+
/* Prvent to stop twice; enter_ips and ops_stop */
if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
return;
@@ -4593,25 +4635,21 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
- mutex_unlock(&rtwdev->mutex);
-
- cancel_work_sync(&rtwdev->c2h_work);
- cancel_work_sync(&rtwdev->cancel_6ghz_probe_work);
- cancel_work_sync(&btc->eapol_notify_work);
- cancel_work_sync(&btc->arp_notify_work);
- cancel_work_sync(&btc->dhcp_notify_work);
- cancel_work_sync(&btc->icmp_notify_work);
+ wiphy_work_cancel(wiphy, &rtwdev->c2h_work);
+ wiphy_work_cancel(wiphy, &rtwdev->cancel_6ghz_probe_work);
+ wiphy_work_cancel(wiphy, &btc->eapol_notify_work);
+ wiphy_work_cancel(wiphy, &btc->arp_notify_work);
+ wiphy_work_cancel(wiphy, &btc->dhcp_notify_work);
+ wiphy_work_cancel(wiphy, &btc->icmp_notify_work);
cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work);
- cancel_delayed_work_sync(&rtwdev->track_work);
- cancel_delayed_work_sync(&rtwdev->chanctx_work);
- cancel_delayed_work_sync(&rtwdev->coex_act1_work);
- cancel_delayed_work_sync(&rtwdev->coex_bt_devinfo_work);
- cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work);
- cancel_delayed_work_sync(&rtwdev->cfo_track_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->chanctx_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_act1_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_bt_devinfo_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_rfk_chk_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->cfo_track_work);
cancel_delayed_work_sync(&rtwdev->forbid_ba_work);
- cancel_delayed_work_sync(&rtwdev->antdiv_work);
-
- mutex_lock(&rtwdev->mutex);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->antdiv_work);
rtw89_btc_ntfy_poweroff(rtwdev);
rtw89_hci_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, true);
@@ -4825,20 +4863,19 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
- INIT_DELAYED_WORK(&rtwdev->track_work, rtw89_track_work);
- INIT_DELAYED_WORK(&rtwdev->chanctx_work, rtw89_chanctx_work);
- INIT_DELAYED_WORK(&rtwdev->coex_act1_work, rtw89_coex_act1_work);
- INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work);
- INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
- INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
+ wiphy_delayed_work_init(&rtwdev->track_work, rtw89_track_work);
+ wiphy_delayed_work_init(&rtwdev->chanctx_work, rtw89_chanctx_work);
+ wiphy_delayed_work_init(&rtwdev->coex_act1_work, rtw89_coex_act1_work);
+ wiphy_delayed_work_init(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work);
+ wiphy_delayed_work_init(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
+ wiphy_delayed_work_init(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
- INIT_DELAYED_WORK(&rtwdev->antdiv_work, rtw89_phy_antdiv_work);
+ wiphy_delayed_work_init(&rtwdev->antdiv_work, rtw89_phy_antdiv_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
if (!rtwdev->txq_wq)
return -ENOMEM;
spin_lock_init(&rtwdev->ba_lock);
spin_lock_init(&rtwdev->rpwm_lock);
- mutex_init(&rtwdev->mutex);
mutex_init(&rtwdev->rf_mutex);
rtwdev->total_sta_assoc = 0;
@@ -4847,10 +4884,10 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_init_wait(&rtwdev->wow.wait);
rtw89_init_wait(&rtwdev->mac.ps_wait);
- INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
- INIT_WORK(&rtwdev->ips_work, rtw89_ips_work);
+ wiphy_work_init(&rtwdev->c2h_work, rtw89_fw_c2h_work);
+ wiphy_work_init(&rtwdev->ips_work, rtw89_ips_work);
+ wiphy_work_init(&rtwdev->cancel_6ghz_probe_work, rtw89_cancel_6ghz_probe_work);
INIT_WORK(&rtwdev->load_firmware_work, rtw89_load_firmware_work);
- INIT_WORK(&rtwdev->cancel_6ghz_probe_work, rtw89_cancel_6ghz_probe_work);
skb_queue_head_init(&rtwdev->c2h_queue);
rtw89_core_ppdu_sts_init(rtwdev);
@@ -4867,10 +4904,13 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtwdev->mlo_dbcc_mode = MLO_2_PLUS_0_1RF;
}
- INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
- INIT_WORK(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work);
- INIT_WORK(&btc->dhcp_notify_work, rtw89_btc_ntfy_dhcp_packet_work);
- INIT_WORK(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work);
+ rtwdev->bbs[RTW89_PHY_0].phy_idx = RTW89_PHY_0;
+ rtwdev->bbs[RTW89_PHY_1].phy_idx = RTW89_PHY_1;
+
+ wiphy_work_init(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
+ wiphy_work_init(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work);
+ wiphy_work_init(&btc->dhcp_notify_work, rtw89_btc_ntfy_dhcp_packet_work);
+ wiphy_work_init(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work);
init_completion(&rtwdev->fw.req.completion);
init_completion(&rtwdev->rfk_wait.completion);
@@ -4890,11 +4930,10 @@ void rtw89_core_deinit(struct rtw89_dev *rtwdev)
{
rtw89_ser_deinit(rtwdev);
rtw89_unload_firmware(rtwdev);
- rtw89_fw_free_all_early_h2c(rtwdev);
+ __rtw89_fw_free_all_early_h2c(rtwdev);
destroy_workqueue(rtwdev->txq_wq);
mutex_destroy(&rtwdev->rf_mutex);
- mutex_destroy(&rtwdev->mutex);
}
EXPORT_SYMBOL(rtw89_core_deinit);
@@ -4903,6 +4942,7 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif_link->chanctx_idx);
+ struct rtw89_bb_ctx *bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx);
rtwdev->scanning = true;
rtw89_leave_lps(rtwdev);
@@ -4913,7 +4953,8 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv
rtw89_btc_ntfy_scan_start(rtwdev, rtwvif_link->phy_idx, chan->band_type);
rtw89_chip_rfk_scan(rtwdev, rtwvif_link, true);
rtw89_hci_recalc_int_mit(rtwdev);
- rtw89_phy_config_edcca(rtwdev, true);
+ rtw89_phy_config_edcca(rtwdev, bb, true);
+ rtw89_tas_scan(rtwdev, true);
rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, mac_addr);
}
@@ -4922,6 +4963,7 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, bool hw_scan)
{
struct ieee80211_bss_conf *bss_conf;
+ struct rtw89_bb_ctx *bb;
if (!rtwvif_link)
return;
@@ -4937,12 +4979,15 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
rtw89_chip_rfk_scan(rtwdev, rtwvif_link, false);
rtw89_btc_ntfy_scan_finish(rtwdev, rtwvif_link->phy_idx);
- rtw89_phy_config_edcca(rtwdev, false);
+ bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx);
+ rtw89_phy_config_edcca(rtwdev, bb, false);
+ rtw89_tas_scan(rtwdev, false);
rtwdev->scanning = false;
- rtwdev->dig.bypass_dig = true;
+ rtw89_for_each_active_bb(rtwdev, bb)
+ bb->dig.bypass_dig = true;
if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE))
- ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
+ wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->ips_work);
}
static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
@@ -5042,8 +5087,6 @@ static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
rtw89_hci_mac_pre_deinit(rtwdev);
- rtw89_mac_pwr_off(rtwdev);
-
return 0;
}
@@ -5124,36 +5167,45 @@ int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
rtw89_read_chip_ver(rtwdev);
+ ret = rtw89_mac_pwr_on(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to power on\n");
+ return ret;
+ }
+
ret = rtw89_wait_firmware_completion(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to wait firmware completion\n");
- return ret;
+ goto out;
}
ret = rtw89_fw_recognize(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to recognize firmware\n");
- return ret;
+ goto out;
}
ret = rtw89_chip_efuse_info_setup(rtwdev);
if (ret)
- return ret;
+ goto out;
ret = rtw89_fw_recognize_elements(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to recognize firmware elements\n");
- return ret;
+ goto out;
}
ret = rtw89_chip_board_info_setup(rtwdev);
if (ret)
- return ret;
+ goto out;
rtw89_core_setup_rfe_parms(rtwdev);
rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev);
- return 0;
+out:
+ rtw89_mac_pwr_off(rtwdev);
+
+ return ret;
}
EXPORT_SYMBOL(rtw89_chip_info_setup);
@@ -5296,7 +5348,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
goto err_free_supported_band;
}
- ret = rtw89_regd_init(rtwdev, rtw89_regd_notifier);
+ ret = rtw89_regd_init_hint(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to init regd\n");
goto err_unregister_hw;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index ff4894c7fa8a..4be05d6cad18 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -17,11 +17,13 @@ struct rtw89_dev;
struct rtw89_pci_info;
struct rtw89_mac_gen_def;
struct rtw89_phy_gen_def;
+struct rtw89_fw_blacklist;
struct rtw89_efuse_block_cfg;
struct rtw89_h2c_rf_tssi;
struct rtw89_fw_txpwr_track_cfg;
struct rtw89_phy_rfk_log_fmt;
struct rtw89_debugfs;
+struct rtw89_regd_data;
extern const struct ieee80211_ops rtw89_ops;
@@ -718,6 +720,7 @@ enum rtw89_ofdma_type {
RTW89_OFDMA_NUM,
};
+/* neither insert new in the middle, nor change any given definition */
enum rtw89_regulation_type {
RTW89_WW = 0,
RTW89_ETSI = 1,
@@ -826,7 +829,7 @@ enum rtw89_mac_idx {
enum rtw89_phy_idx {
RTW89_PHY_0 = 0,
RTW89_PHY_1 = 1,
- RTW89_PHY_MAX
+ RTW89_PHY_NUM,
};
#define __RTW89_MLD_MAX_LINK_NUM 2
@@ -1540,16 +1543,16 @@ struct rtw89_btc_u8_sta_chg {
};
struct rtw89_btc_wl_scan_info {
- u8 band[RTW89_PHY_MAX];
+ u8 band[RTW89_PHY_NUM];
u8 phy_map;
u8 rsvd;
};
struct rtw89_btc_wl_dbcc_info {
- u8 op_band[RTW89_PHY_MAX]; /* op band in each phy */
- u8 scan_band[RTW89_PHY_MAX]; /* scan band in each phy */
- u8 real_band[RTW89_PHY_MAX];
- u8 role[RTW89_PHY_MAX]; /* role in each phy */
+ u8 op_band[RTW89_PHY_NUM]; /* op band in each phy */
+ u8 scan_band[RTW89_PHY_NUM]; /* scan band in each phy */
+ u8 real_band[RTW89_PHY_NUM];
+ u8 role[RTW89_PHY_NUM]; /* role in each phy */
};
struct rtw89_btc_wl_active_role {
@@ -1761,7 +1764,8 @@ struct rtw89_btc_wl_rfk_info {
u32 phy_map: 2;
u32 band: 2;
u32 type: 8;
- u32 rsvd: 14;
+ u32 con_rfk: 1;
+ u32 rsvd: 13;
u32 start_time;
u32 proc_time;
@@ -1898,7 +1902,7 @@ struct rtw89_btc_wl_info {
u8 cn_report;
u8 coex_mode;
u8 pta_req_mac;
- u8 bt_polut_type[RTW89_PHY_MAX]; /* BT polluted WL-Tx type for phy0/1 */
+ u8 bt_polut_type[RTW89_PHY_NUM]; /* BT polluted WL-Tx type for phy0/1 */
bool is_5g_hi_channel;
bool pta_reg_mac_chg;
@@ -2230,7 +2234,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v4 {
struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info wl_fw_info;
struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
__le32 bt_cnt[BTC_BCNT_STA_MAX];
- struct rtw89_mac_ax_gnt gnt_val[RTW89_PHY_MAX];
+ struct rtw89_mac_ax_gnt gnt_val[RTW89_PHY_NUM];
} __packed;
struct rtw89_btc_fbtc_rpt_ctrl_v5 {
@@ -2238,7 +2242,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v5 {
u8 rsvd;
__le16 rsvd1;
- u8 gnt_val[RTW89_PHY_MAX][4];
+ u8 gnt_val[RTW89_PHY_NUM][4];
__le16 bt_cnt[BTC_BCNT_STA_MAX];
struct rtw89_btc_fbtc_rpt_ctrl_info_v5 rpt_info;
@@ -2250,7 +2254,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v105 {
u8 rsvd;
__le16 rsvd1;
- u8 gnt_val[RTW89_PHY_MAX][4];
+ u8 gnt_val[RTW89_PHY_NUM][4];
__le16 bt_cnt[BTC_BCNT_STA_MAX_V105];
struct rtw89_btc_fbtc_rpt_ctrl_info_v5 rpt_info;
@@ -2263,7 +2267,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v7 {
u8 rsvd1;
u8 rsvd2;
- u8 gnt_val[RTW89_PHY_MAX][4];
+ u8 gnt_val[RTW89_PHY_NUM][4];
__le16 bt_cnt[BTC_BCNT_STA_MAX_V105];
struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info;
@@ -2276,7 +2280,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v8 {
u8 rpt_len_max_l; /* BTC_RPT_MAX bit0~7 */
u8 rpt_len_max_h; /* BTC_RPT_MAX bit8~15 */
- u8 gnt_val[RTW89_PHY_MAX][4];
+ u8 gnt_val[RTW89_PHY_NUM][4];
__le16 bt_cnt[BTC_BCNT_STA_MAX_V105];
struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info;
@@ -3040,6 +3044,7 @@ struct rtw89_btc_rpt_cmn_info {
union rtw89_btc_fbtc_btafh_info {
struct rtw89_btc_fbtc_btafh v1;
struct rtw89_btc_fbtc_btafh_v2 v2;
+ struct rtw89_btc_fbtc_btafh_v7 v7;
};
struct rtw89_btc_report_ctrl_state {
@@ -3174,10 +3179,10 @@ struct rtw89_btc {
struct rtw89_btc_btf_fwinfo fwinfo;
struct rtw89_btc_dbg dbg;
- struct work_struct eapol_notify_work;
- struct work_struct arp_notify_work;
- struct work_struct dhcp_notify_work;
- struct work_struct icmp_notify_work;
+ struct wiphy_work eapol_notify_work;
+ struct wiphy_work arp_notify_work;
+ struct wiphy_work dhcp_notify_work;
+ struct wiphy_work icmp_notify_work;
u32 bt_req_len;
@@ -3444,7 +3449,7 @@ enum rtw89_roc_state {
struct rtw89_roc {
struct ieee80211_channel chan;
- struct delayed_work roc_work;
+ struct wiphy_delayed_work roc_work;
enum ieee80211_roc_type type;
enum rtw89_roc_state state;
int duration;
@@ -3498,6 +3503,7 @@ struct rtw89_vif_link {
u8 self_role;
u8 wmm;
u8 bcn_hit_cond;
+ u8 bcn_bw_idx;
u8 hit_rule;
u8 last_noa_nr;
u64 sync_bcn_tsf;
@@ -3514,7 +3520,7 @@ struct rtw89_vif_link {
bool pre_pwr_diff_en;
bool pwr_diff_en;
u8 def_tri_idx;
- struct work_struct update_beacon_work;
+ struct wiphy_work update_beacon_work;
struct rtw89_addr_cam_entry addr_cam;
struct rtw89_bssid_cam_entry bssid_cam;
struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
@@ -3672,6 +3678,8 @@ struct rtw89_chip_ops {
int (*h2c_ampdu_cmac_tbl)(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link);
+ int (*h2c_txtime_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link);
int (*h2c_default_dmac_tbl)(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link);
@@ -4189,10 +4197,12 @@ struct rtw89_edcca_regs {
u32 edcca_p_mask;
u32 ppdu_level;
u32 ppdu_mask;
- u32 rpt_a;
- u32 rpt_b;
- u32 rpt_sel;
- u32 rpt_sel_mask;
+ struct rtw89_edcca_p_regs {
+ u32 rpt_a;
+ u32 rpt_b;
+ u32 rpt_sel;
+ u32 rpt_sel_mask;
+ } p[RTW89_PHY_NUM];
u32 rpt_sel_be;
u32 rpt_sel_be_mask;
u32 tx_collision_t2r_st;
@@ -4231,6 +4241,7 @@ enum rtw89_chanctx_state {
enum rtw89_chanctx_callbacks {
RTW89_CHANCTX_CALLBACK_PLACEHOLDER,
RTW89_CHANCTX_CALLBACK_RFK,
+ RTW89_CHANCTX_CALLBACK_TAS,
NUM_OF_RTW89_CHANCTX_CALLBACKS,
};
@@ -4251,6 +4262,7 @@ struct rtw89_chip_info {
bool try_ce_fw;
u8 bbmcu_nr;
u32 needed_fw_elms;
+ const struct rtw89_fw_blacklist *fw_blacklist;
u32 fifo_size;
bool small_fifo_size;
u32 dle_scc_rsvd_size;
@@ -4271,10 +4283,13 @@ struct rtw89_chip_info {
bool support_unii4;
bool support_rnr;
bool support_ant_gain;
+ bool support_tas;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
+ bool rx_freq_frome_ie;
bool hw_sec_hdr;
bool hw_mgmt_tx_encrypt;
+ bool hw_tkip_crypto;
u8 rf_path_num;
u8 tx_nss;
u8 rx_nss;
@@ -4478,6 +4493,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_CH_INFO_BE_V0,
RTW89_FW_FEATURE_LPS_CH_INFO,
RTW89_FW_FEATURE_NO_PHYCAP_P1,
+ RTW89_FW_FEATURE_NO_POWER_DIFFERENCE,
};
struct rtw89_fw_suit {
@@ -4536,6 +4552,7 @@ struct rtw89_fw_elm_info {
struct rtw89_phy_table *rf_nctl;
struct rtw89_fw_txpwr_track_cfg *txpwr_trk;
struct rtw89_phy_rfk_log_fmt *rfk_log_fmt;
+ const struct rtw89_regd_data *regd;
};
enum rtw89_fw_mss_dev_type {
@@ -4651,6 +4668,7 @@ enum rtw89_ant_gain_domain_type {
struct rtw89_ant_gain_info {
s8 offset[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR];
u32 regd_enabled;
+ bool block_country;
};
struct rtw89_6ghz_span {
@@ -4666,18 +4684,29 @@ struct rtw89_6ghz_span {
enum rtw89_tas_state {
RTW89_TAS_STATE_DPR_OFF,
RTW89_TAS_STATE_DPR_ON,
- RTW89_TAS_STATE_DPR_FORBID,
+ RTW89_TAS_STATE_STATIC_SAR,
};
-#define RTW89_TAS_MAX_WINDOW 50
+#define RTW89_TAS_TX_RATIO_WINDOW 6
+#define RTW89_TAS_TXPWR_WINDOW 180
struct rtw89_tas_info {
- s16 txpwr_history[RTW89_TAS_MAX_WINDOW];
- s32 total_txpwr;
- u8 cur_idx;
- s8 dpr_gap;
- s8 delta;
+ u16 tx_ratio_history[RTW89_TAS_TX_RATIO_WINDOW];
+ u64 txpwr_history[RTW89_TAS_TXPWR_WINDOW];
+ u8 txpwr_head_idx;
+ u8 txpwr_tail_idx;
+ u8 tx_ratio_idx;
+ u16 total_tx_ratio;
+ u64 total_txpwr;
+ u64 instant_txpwr;
+ u32 window_size;
+ s8 dpr_on_threshold;
+ s8 dpr_off_threshold;
+ enum rtw89_tas_state backup_state;
enum rtw89_tas_state state;
+ bool keep_history;
+ bool block_regd;
bool enable;
+ bool pause;
};
struct rtw89_chanctx_cfg {
@@ -4735,6 +4764,7 @@ struct rtw89_edcca_bak {
enum rtw89_dm_type {
RTW89_DM_DYNAMIC_EDCCA,
RTW89_DM_THERMAL_PROTECT,
+ RTW89_DM_TAS,
};
#define RTW89_THERMAL_PROT_LV_MAX 5
@@ -4762,12 +4792,11 @@ struct rtw89_hal {
struct rtw89_chanctx chanctx[NUM_OF_RTW89_CHANCTX];
struct cfg80211_chan_def roc_chandef;
- bool entity_active[RTW89_PHY_MAX];
+ bool entity_active[RTW89_PHY_NUM];
bool entity_pause;
enum rtw89_entity_mode entity_mode;
struct rtw89_entity_mgnt entity_mgnt;
- struct rtw89_edcca_bak edcca_bak;
u32 disabled_dm_bitmap; /* bitmap of enum rtw89_dm_type */
u8 thermal_prot_th;
@@ -4973,7 +5002,7 @@ struct rtw89_dpk_bkup_para {
struct rtw89_dpk_info {
bool is_dpk_enable;
bool is_dpk_reload_en;
- u8 dpk_gs[RTW89_PHY_MAX];
+ u8 dpk_gs[RTW89_PHY_NUM];
u16 dc_i[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
u16 dc_q[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
u8 corr_val[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
@@ -5135,7 +5164,7 @@ struct rtw89_tssi_info {
u32 alignment_backup_by_ch[RF_PATH_MAX][TSSI_MAX_CH_NUM][TSSI_ALIMK_VALUE_NUM];
u32 alignment_value[RF_PATH_MAX][TSSI_ALIMK_MAX][TSSI_ALIMK_VALUE_NUM];
bool alignment_done[RF_PATH_MAX][TSSI_ALIMK_MAX];
- u32 tssi_alimk_time;
+ u64 tssi_alimk_time;
};
struct rtw89_power_trim_info {
@@ -5146,9 +5175,27 @@ struct rtw89_power_trim_info {
u8 pad_bias_trim[RF_PATH_MAX];
};
+enum rtw89_regd_func {
+ RTW89_REGD_FUNC_TAS = 0, /* TAS (Time Average SAR) */
+ RTW89_REGD_FUNC_DAG = 1, /* DAG (Dynamic Antenna Gain) */
+
+ NUM_OF_RTW89_REGD_FUNC,
+};
+
struct rtw89_regd {
char alpha2[3];
u8 txpwr_regd[RTW89_BAND_NUM];
+ DECLARE_BITMAP(func_bitmap, NUM_OF_RTW89_REGD_FUNC);
+};
+
+struct rtw89_regd_data {
+ unsigned int nr;
+ struct rtw89_regd map[] __counted_by(nr);
+};
+
+struct rtw89_regd_ctrl {
+ unsigned int nr;
+ const struct rtw89_regd *map;
};
#define RTW89_REGD_MAX_COUNTRY_NUM U8_MAX
@@ -5156,6 +5203,7 @@ struct rtw89_regd {
#define RTW89_5GHZ_UNII4_START_INDEX 25
struct rtw89_regulatory_info {
+ struct rtw89_regd_ctrl ctrl;
const struct rtw89_regd *regd;
enum rtw89_reg_6ghz_power reg_6ghz_power;
struct rtw89_reg_6ghz_tpe reg_6ghz_tpe;
@@ -5299,8 +5347,8 @@ struct rtw89_lps_parm {
};
struct rtw89_ppdu_sts_info {
- struct sk_buff_head rx_queue[RTW89_PHY_MAX];
- u8 curr_rx_ppdu_cnt[RTW89_PHY_MAX];
+ struct sk_buff_head rx_queue[RTW89_PHY_NUM];
+ u8 curr_rx_ppdu_cnt[RTW89_PHY_NUM];
};
struct rtw89_early_h2c {
@@ -5419,8 +5467,8 @@ struct rtw89_phy_efuse_gain {
bool offset_valid;
bool comp_valid;
s8 offset[RF_PATH_MAX][RTW89_GAIN_OFFSET_NR]; /* S(8, 0) */
- s8 offset_base[RTW89_PHY_MAX]; /* S(8, 4) */
- s8 rssi_base[RTW89_PHY_MAX]; /* S(8, 4) */
+ s8 offset_base[RTW89_PHY_NUM]; /* S(8, 4) */
+ s8 rssi_base[RTW89_PHY_NUM]; /* S(8, 4) */
s8 comp[RF_PATH_MAX][RTW89_SUBBAND_NR]; /* S(8, 0) */
};
@@ -5629,8 +5677,6 @@ struct rtw89_dev {
struct rtw89_sta_link __rcu *assoc_link_on_macid[RTW89_MAX_MAC_ID_NUM];
refcount_t refcount_ap_info;
- /* ensures exclusive access from mac80211 callbacks */
- struct mutex mutex;
struct list_head rtwvifs_list;
/* used to protect rf read write */
struct mutex rf_mutex;
@@ -5650,10 +5696,10 @@ struct rtw89_dev {
struct rtw89_cam_info cam_info;
struct sk_buff_head c2h_queue;
- struct work_struct c2h_work;
- struct work_struct ips_work;
+ struct wiphy_work c2h_work;
+ struct wiphy_work ips_work;
+ struct wiphy_work cancel_6ghz_probe_work;
struct work_struct load_firmware_work;
- struct work_struct cancel_6ghz_probe_work;
struct list_head early_h2c_list;
@@ -5682,9 +5728,6 @@ struct rtw89_dev {
struct rtw89_power_trim_info pwr_trim;
struct rtw89_cfo_tracking_info cfo_tracking;
- struct rtw89_env_monitor_info env_monitor;
- struct rtw89_dig_info dig;
- struct rtw89_phy_ch_info ch_info;
union {
struct rtw89_phy_bb_gain_info ax;
struct rtw89_phy_bb_gain_info_be be;
@@ -5693,15 +5736,22 @@ struct rtw89_dev {
struct rtw89_phy_ul_tb_info ul_tb_info;
struct rtw89_antdiv_info antdiv;
- struct delayed_work track_work;
- struct delayed_work chanctx_work;
- struct delayed_work coex_act1_work;
- struct delayed_work coex_bt_devinfo_work;
- struct delayed_work coex_rfk_chk_work;
- struct delayed_work cfo_track_work;
+ struct rtw89_bb_ctx {
+ enum rtw89_phy_idx phy_idx;
+ struct rtw89_env_monitor_info env_monitor;
+ struct rtw89_dig_info dig;
+ struct rtw89_phy_ch_info ch_info;
+ struct rtw89_edcca_bak edcca_bak;
+ } bbs[RTW89_PHY_NUM];
+
+ struct wiphy_delayed_work track_work;
+ struct wiphy_delayed_work chanctx_work;
+ struct wiphy_delayed_work coex_act1_work;
+ struct wiphy_delayed_work coex_bt_devinfo_work;
+ struct wiphy_delayed_work coex_rfk_chk_work;
+ struct wiphy_delayed_work cfo_track_work;
struct delayed_work forbid_ba_work;
- struct delayed_work roc_work;
- struct delayed_work antdiv_work;
+ struct wiphy_delayed_work antdiv_work;
struct rtw89_ppdu_sts_info ppdu_sts;
u8 total_sta_assoc;
bool scanning;
@@ -6978,6 +7028,48 @@ static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev)
}
}
+static inline u8 rtw89_get_active_phy_bitmap(struct rtw89_dev *rtwdev)
+{
+ if (!rtwdev->dbcc_en)
+ return BIT(RTW89_PHY_0);
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_0_PLUS_2_1RF:
+ case MLO_0_PLUS_2_2RF:
+ return BIT(RTW89_PHY_1);
+ case MLO_1_PLUS_1_1RF:
+ case MLO_1_PLUS_1_2RF:
+ case MLO_2_PLUS_2_2RF:
+ case DBCC_LEGACY:
+ return BIT(RTW89_PHY_0) | BIT(RTW89_PHY_1);
+ case MLO_2_PLUS_0_1RF:
+ case MLO_2_PLUS_0_2RF:
+ default:
+ return BIT(RTW89_PHY_0);
+ }
+}
+
+#define rtw89_for_each_active_bb(rtwdev, bb) \
+ for (u8 __active_bb_bitmap = rtw89_get_active_phy_bitmap(rtwdev), \
+ __phy_idx = 0; __phy_idx < RTW89_PHY_NUM; __phy_idx++) \
+ if (__active_bb_bitmap & BIT(__phy_idx) && \
+ (bb = &rtwdev->bbs[__phy_idx]))
+
+#define rtw89_for_each_capab_bb(rtwdev, bb) \
+ for (u8 __phy_idx_max = rtwdev->dbcc_en ? RTW89_PHY_1 : RTW89_PHY_0, \
+ __phy_idx = 0; __phy_idx <= __phy_idx_max; __phy_idx++) \
+ if ((bb = &rtwdev->bbs[__phy_idx]))
+
+static inline
+struct rtw89_bb_ctx *rtw89_get_bb_ctx(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (phy_idx >= RTW89_PHY_NUM)
+ return &rtwdev->bbs[RTW89_PHY_0];
+
+ return &rtwdev->bbs[phy_idx];
+}
+
static inline bool rtw89_is_rtl885xb(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -7092,9 +7184,7 @@ void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate);
int rtw89_regd_setup(struct rtw89_dev *rtwdev);
-int rtw89_regd_init(struct rtw89_dev *rtwdev,
- void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request));
-void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+int rtw89_regd_init_hint(struct rtw89_dev *rtwdev);
void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats);
int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond);
@@ -7102,8 +7192,8 @@ void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond,
const struct rtw89_completion_data *data);
int rtw89_core_start(struct rtw89_dev *rtwdev);
void rtw89_core_stop(struct rtw89_dev *rtwdev);
-void rtw89_core_update_beacon_work(struct work_struct *work);
-void rtw89_roc_work(struct work_struct *work);
+void rtw89_core_update_beacon_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_roc_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 09fa977a6e6d..f2c5753fd386 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -13,6 +13,7 @@
#include "ps.h"
#include "reg.h"
#include "sar.h"
+#include "util.h"
#ifdef CONFIG_RTW89_DEBUGMSG
unsigned int rtw89_debug_mask;
@@ -22,11 +23,21 @@ MODULE_PARM_DESC(debug_mask, "Debugging mask");
#endif
#ifdef CONFIG_RTW89_DEBUGFS
+struct rtw89_debugfs_priv_opt {
+ bool rlock:1;
+ bool wlock:1;
+ size_t rsize;
+};
+
struct rtw89_debugfs_priv {
struct rtw89_dev *rtwdev;
- int (*cb_read)(struct seq_file *m, void *v);
- ssize_t (*cb_write)(struct file *filp, const char __user *buffer,
- size_t count, loff_t *loff);
+ ssize_t (*cb_read)(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz);
+ ssize_t (*cb_write)(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count);
+ struct rtw89_debugfs_priv_opt opt;
union {
u32 cb_data;
struct {
@@ -51,6 +62,8 @@ struct rtw89_debugfs_priv {
u8 sel;
} mac_mem;
};
+ ssize_t rused;
+ char *rbuf;
};
struct rtw89_debugfs {
@@ -74,6 +87,28 @@ struct rtw89_debugfs {
struct rtw89_debugfs_priv disable_dm;
};
+struct rtw89_debugfs_iter_data {
+ char *buf;
+ size_t bufsz;
+ int written_sz;
+};
+
+static void rtw89_debugfs_iter_data_setup(struct rtw89_debugfs_iter_data *iter_data,
+ char *buf, size_t bufsz)
+{
+ iter_data->buf = buf;
+ iter_data->bufsz = bufsz;
+ iter_data->written_sz = 0;
+}
+
+static void rtw89_debugfs_iter_data_next(struct rtw89_debugfs_iter_data *iter_data,
+ char *buf, size_t bufsz, int written_sz)
+{
+ iter_data->buf = buf;
+ iter_data->bufsz = bufsz;
+ iter_data->written_sz += written_sz;
+}
+
static const u16 rtw89_rate_info_bw_to_mhz_map[] = {
[RATE_INFO_BW_20] = 20,
[RATE_INFO_BW_40] = 40,
@@ -90,84 +125,122 @@ static u16 rtw89_rate_info_bw_to_mhz(enum rate_info_bw bw)
return 0;
}
-static int rtw89_debugfs_single_show(struct seq_file *m, void *v)
+static ssize_t rtw89_debugfs_file_read_helper(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsz, void *data)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_debugfs_priv *debugfs_priv = data;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ ssize_t n;
- return debugfs_priv->cb_read(m, v);
+ n = debugfs_priv->cb_read(rtwdev, debugfs_priv, buf, bufsz);
+ rtw89_might_trailing_ellipsis(buf, bufsz, n);
+
+ return n;
}
-static ssize_t rtw89_debugfs_single_write(struct file *filp,
- const char __user *buffer,
- size_t count, loff_t *loff)
+static ssize_t rtw89_debugfs_file_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
{
- struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtw89_debugfs_priv *debugfs_priv = file->private_data;
+ struct rtw89_debugfs_priv_opt *opt = &debugfs_priv->opt;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ size_t bufsz = opt->rsize ? opt->rsize : PAGE_SIZE;
+ char *buf;
+ ssize_t n;
- return debugfs_priv->cb_write(filp, buffer, count, loff);
-}
+ if (!debugfs_priv->rbuf)
+ debugfs_priv->rbuf = devm_kzalloc(rtwdev->dev, bufsz, GFP_KERNEL);
-static ssize_t rtw89_debugfs_seq_file_write(struct file *filp,
- const char __user *buffer,
- size_t count, loff_t *loff)
-{
- struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = seqpriv->private;
+ buf = debugfs_priv->rbuf;
+ if (!buf)
+ return -ENOMEM;
+
+ if (*ppos) {
+ n = debugfs_priv->rused;
+ goto out;
+ }
+
+ if (opt->rlock) {
+ n = wiphy_locked_debugfs_read(rtwdev->hw->wiphy, file, buf, bufsz,
+ userbuf, count, ppos,
+ rtw89_debugfs_file_read_helper,
+ debugfs_priv);
+ debugfs_priv->rused = n;
- return debugfs_priv->cb_write(filp, buffer, count, loff);
+ return n;
+ }
+
+ n = rtw89_debugfs_file_read_helper(rtwdev->hw->wiphy, file, buf, bufsz,
+ debugfs_priv);
+ debugfs_priv->rused = n;
+
+out:
+ return simple_read_from_buffer(userbuf, count, ppos, buf, n);
}
-static int rtw89_debugfs_single_open(struct inode *inode, struct file *filp)
+static ssize_t rtw89_debugfs_file_write_helper(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t count, void *data)
{
- return single_open(filp, rtw89_debugfs_single_show, inode->i_private);
+ struct rtw89_debugfs_priv *debugfs_priv = data;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+
+ return debugfs_priv->cb_write(rtwdev, debugfs_priv, buf, count);
}
-static int rtw89_debugfs_close(struct inode *inode, struct file *filp)
+static ssize_t rtw89_debugfs_file_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *loff)
{
- return 0;
+ struct rtw89_debugfs_priv *debugfs_priv = file->private_data;
+ struct rtw89_debugfs_priv_opt *opt = &debugfs_priv->opt;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char *buf __free(kfree) = kmalloc(count + 1, GFP_KERNEL);
+ ssize_t n;
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (opt->wlock) {
+ n = wiphy_locked_debugfs_write(rtwdev->hw->wiphy,
+ file, buf, count + 1,
+ userbuf, count,
+ rtw89_debugfs_file_write_helper,
+ debugfs_priv);
+ return n;
+ }
+
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+
+ return debugfs_priv->cb_write(rtwdev, debugfs_priv, buf, count);
}
-static const struct file_operations file_ops_single_r = {
- .owner = THIS_MODULE,
- .open = rtw89_debugfs_single_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+static const struct debugfs_short_fops file_ops_single_r = {
+ .read = rtw89_debugfs_file_read,
+ .llseek = generic_file_llseek,
};
-static const struct file_operations file_ops_common_rw = {
- .owner = THIS_MODULE,
- .open = rtw89_debugfs_single_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = rtw89_debugfs_seq_file_write,
+static const struct debugfs_short_fops file_ops_common_rw = {
+ .read = rtw89_debugfs_file_read,
+ .write = rtw89_debugfs_file_write,
+ .llseek = generic_file_llseek,
};
-static const struct file_operations file_ops_single_w = {
- .owner = THIS_MODULE,
- .write = rtw89_debugfs_single_write,
- .open = simple_open,
- .release = rtw89_debugfs_close,
+static const struct debugfs_short_fops file_ops_single_w = {
+ .write = rtw89_debugfs_file_write,
+ .llseek = generic_file_llseek,
};
static ssize_t
-rtw89_debug_priv_read_reg_select(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_read_reg_select(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- char buf[32];
- size_t buf_size;
u32 addr, len;
int num;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
num = sscanf(buf, "%x %x", &addr, &len);
if (num != 2) {
rtw89_info(rtwdev, "invalid format: <addr> <len>\n");
@@ -182,11 +255,13 @@ rtw89_debug_priv_read_reg_select(struct file *filp,
return count;
}
-static int rtw89_debug_priv_read_reg_get(struct seq_file *m, void *v)
+static
+ssize_t rtw89_debug_priv_read_reg_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- u32 addr, end, data, k;
+ char *p = buf, *end = buf + bufsz;
+ u32 addr, addr_end, data, k;
u32 len;
len = debugfs_priv->read_reg.len;
@@ -210,41 +285,34 @@ static int rtw89_debug_priv_read_reg_get(struct seq_file *m, void *v)
return -EINVAL;
}
- seq_printf(m, "get %d bytes at 0x%08x=0x%08x\n", len, addr, data);
+ p += scnprintf(p, end - p, "get %d bytes at 0x%08x=0x%08x\n", len,
+ addr, data);
- return 0;
+ return p - buf;
ndata:
- end = addr + len;
+ addr_end = addr + len;
- for (; addr < end; addr += 16) {
- seq_printf(m, "%08xh : ", 0x18600000 + addr);
+ for (; addr < addr_end; addr += 16) {
+ p += scnprintf(p, end - p, "%08xh : ", 0x18600000 + addr);
for (k = 0; k < 16; k += 4) {
data = rtw89_read32(rtwdev, addr + k);
- seq_printf(m, "%08x ", data);
+ p += scnprintf(p, end - p, "%08x ", data);
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
- return 0;
+ return p - buf;
}
-static ssize_t rtw89_debug_priv_write_reg_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static
+ssize_t rtw89_debug_priv_write_reg_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- char buf[32];
- size_t buf_size;
u32 addr, val, len;
int num;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
num = sscanf(buf, "%x %x %x", &addr, &val, &len);
if (num != 3) {
rtw89_info(rtwdev, "invalid format: <addr> <val> <len>\n");
@@ -273,24 +341,14 @@ static ssize_t rtw89_debug_priv_write_reg_set(struct file *filp,
}
static ssize_t
-rtw89_debug_priv_read_rf_select(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_read_rf_select(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- char buf[32];
- size_t buf_size;
u32 addr, mask;
u8 path;
int num;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
num = sscanf(buf, "%hhd %x %x", &path, &addr, &mask);
if (num != 3) {
rtw89_info(rtwdev, "invalid format: <path> <addr> <mask>\n");
@@ -310,10 +368,12 @@ rtw89_debug_priv_read_rf_select(struct file *filp,
return count;
}
-static int rtw89_debug_priv_read_rf_get(struct seq_file *m, void *v)
+static
+ssize_t rtw89_debug_priv_read_rf_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char *p = buf, *end = buf + bufsz;
u32 addr, data, mask;
u8 path;
@@ -323,28 +383,21 @@ static int rtw89_debug_priv_read_rf_get(struct seq_file *m, void *v)
data = rtw89_read_rf(rtwdev, path, addr, mask);
- seq_printf(m, "path %d, rf register 0x%08x=0x%08x\n", path, addr, data);
+ p += scnprintf(p, end - p, "path %d, rf register 0x%08x=0x%08x\n",
+ path, addr, data);
- return 0;
+ return p - buf;
}
-static ssize_t rtw89_debug_priv_write_rf_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static
+ssize_t rtw89_debug_priv_write_rf_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- char buf[32];
- size_t buf_size;
u32 addr, val, mask;
u8 path;
int num;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
num = sscanf(buf, "%hhd %x %x %x", &path, &addr, &mask, &val);
if (num != 4) {
rtw89_info(rtwdev, "invalid format: <path> <addr> <mask> <val>\n");
@@ -363,29 +416,31 @@ static ssize_t rtw89_debug_priv_write_rf_set(struct file *filp,
return count;
}
-static int rtw89_debug_priv_rf_reg_dump_get(struct seq_file *m, void *v)
+static
+ssize_t rtw89_debug_priv_rf_reg_dump_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
const struct rtw89_chip_info *chip = rtwdev->chip;
+ char *p = buf, *end = buf + bufsz;
u32 addr, offset, data;
u8 path;
for (path = 0; path < chip->rf_path_num; path++) {
- seq_printf(m, "RF path %d:\n\n", path);
+ p += scnprintf(p, end - p, "RF path %d:\n\n", path);
for (addr = 0; addr < 0x100; addr += 4) {
- seq_printf(m, "0x%08x: ", addr);
+ p += scnprintf(p, end - p, "0x%08x: ", addr);
for (offset = 0; offset < 4; offset++) {
data = rtw89_read_rf(rtwdev, path,
addr + offset, RFREG_MASK);
- seq_printf(m, "0x%05x ", data);
+ p += scnprintf(p, end - p, "0x%05x ", data);
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
- return 0;
+ return p - buf;
}
struct txpwr_ent {
@@ -704,56 +759,71 @@ static const struct txpwr_map __txpwr_map_lmt_ru_be = {
};
static unsigned int
-__print_txpwr_ent(struct seq_file *m, const struct txpwr_ent *ent,
- const s8 *buf, const unsigned int cur)
+__print_txpwr_ent(char *buf, size_t bufsz, const struct txpwr_ent *ent,
+ const s8 *bufp, const unsigned int cur, unsigned int *ate)
{
+ char *p = buf, *end = buf + bufsz;
unsigned int cnt, i;
+ unsigned int eaten;
char *fmt;
if (ent->nested) {
- for (cnt = 0, i = 0; i < ent->len; i++)
- cnt += __print_txpwr_ent(m, ent->ptr + i, buf,
- cur + cnt);
- return cnt;
+ for (cnt = 0, i = 0; i < ent->len; i++, cnt += eaten)
+ p += __print_txpwr_ent(p, end - p, ent->ptr + i, bufp,
+ cur + cnt, &eaten);
+ *ate = cnt;
+ goto out;
}
switch (ent->len) {
case 0:
- seq_printf(m, "\t<< %s >>\n", ent->txt);
- return 0;
+ p += scnprintf(p, end - p, "\t<< %s >>\n", ent->txt);
+ *ate = 0;
+ goto out;
case 2:
fmt = "%s\t| %3d, %3d,\t\tdBm\n";
- seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1]);
- return 2;
+ p += scnprintf(p, end - p, fmt, ent->txt, bufp[cur],
+ bufp[cur + 1]);
+ *ate = 2;
+ goto out;
case 4:
fmt = "%s\t| %3d, %3d, %3d, %3d,\tdBm\n";
- seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1],
- buf[cur + 2], buf[cur + 3]);
- return 4;
+ p += scnprintf(p, end - p, fmt, ent->txt, bufp[cur],
+ bufp[cur + 1],
+ bufp[cur + 2], bufp[cur + 3]);
+ *ate = 4;
+ goto out;
case 8:
fmt = "%s\t| %3d, %3d, %3d, %3d, %3d, %3d, %3d, %3d,\tdBm\n";
- seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1],
- buf[cur + 2], buf[cur + 3], buf[cur + 4],
- buf[cur + 5], buf[cur + 6], buf[cur + 7]);
- return 8;
+ p += scnprintf(p, end - p, fmt, ent->txt, bufp[cur],
+ bufp[cur + 1],
+ bufp[cur + 2], bufp[cur + 3], bufp[cur + 4],
+ bufp[cur + 5], bufp[cur + 6], bufp[cur + 7]);
+ *ate = 8;
+ goto out;
default:
return 0;
}
+
+out:
+ return p - buf;
}
-static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev,
- const struct txpwr_map *map)
+static ssize_t __print_txpwr_map(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ const struct txpwr_map *map)
{
u8 fct = rtwdev->chip->txpwr_factor_mac;
u8 path_num = rtwdev->chip->rf_path_num;
+ char *p = buf, *end = buf + bufsz;
unsigned int cur, i;
+ unsigned int eaten;
u32 max_valid_addr;
u32 val, addr;
- s8 *buf, tmp;
+ s8 *bufp, tmp;
int ret;
- buf = vzalloc(map->addr_to - map->addr_from + 4);
- if (!buf)
+ bufp = vzalloc(map->addr_to - map->addr_from + 4);
+ if (!bufp)
return -ENOMEM;
if (path_num == 1)
@@ -773,31 +843,32 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev,
for (i = 0; i < 4; i++, val >>= 8) {
/* signed 7 bits, and reserved BIT(7) */
tmp = sign_extend32(val, 6);
- buf[cur + i] = tmp >> fct;
+ bufp[cur + i] = tmp >> fct;
}
}
- for (cur = 0, i = 0; i < map->size; i++)
- cur += __print_txpwr_ent(m, &map->ent[i], buf, cur);
+ for (cur = 0, i = 0; i < map->size; i++, cur += eaten)
+ p += __print_txpwr_ent(p, end - p, &map->ent[i], bufp, cur, &eaten);
- vfree(buf);
- return 0;
+ vfree(bufp);
+ return p - buf;
}
#define case_REGD(_regd) \
case RTW89_ ## _regd: \
- seq_puts(m, #_regd "\n"); \
+ p += scnprintf(p, end - p, #_regd "\n"); \
break
-static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan)
+static int __print_regd(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ const struct rtw89_chan *chan)
{
+ char *p = buf, *end = buf + bufsz;
u8 band = chan->band_type;
u8 regd = rtw89_regd_get(rtwdev, band);
switch (regd) {
default:
- seq_printf(m, "UNKNOWN: %d\n", regd);
+ p += scnprintf(p, end - p, "UNKNOWN: %d\n", regd);
break;
case_REGD(WW);
case_REGD(ETSI);
@@ -816,6 +887,8 @@ static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev,
case_REGD(UK);
case_REGD(THAILAND);
}
+
+ return p - buf;
}
#undef case_REGD
@@ -844,96 +917,93 @@ static const struct dbgfs_txpwr_table *dbgfs_txpwr_tables[RTW89_CHIP_GEN_NUM] =
};
static
-void rtw89_debug_priv_txpwr_table_get_regd(struct seq_file *m,
- struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan)
+int rtw89_debug_priv_txpwr_table_get_regd(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz,
+ const struct rtw89_chan *chan)
{
const struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
const struct rtw89_reg_6ghz_tpe *tpe6 = &regulatory->reg_6ghz_tpe;
+ char *p = buf, *end = buf + bufsz;
- seq_printf(m, "[Chanctx] band %u, ch %u, bw %u\n",
- chan->band_type, chan->channel, chan->band_width);
+ p += scnprintf(p, end - p, "[Chanctx] band %u, ch %u, bw %u\n",
+ chan->band_type, chan->channel, chan->band_width);
- seq_puts(m, "[Regulatory] ");
- __print_regd(m, rtwdev, chan);
+ p += scnprintf(p, end - p, "[Regulatory] ");
+ p += __print_regd(rtwdev, p, end - p, chan);
if (chan->band_type == RTW89_BAND_6G) {
- seq_printf(m, "[reg6_pwr_type] %u\n", regulatory->reg_6ghz_power);
+ p += scnprintf(p, end - p, "[reg6_pwr_type] %u\n",
+ regulatory->reg_6ghz_power);
if (tpe6->valid)
- seq_printf(m, "[TPE] %d dBm\n", tpe6->constraint);
+ p += scnprintf(p, end - p, "[TPE] %d dBm\n",
+ tpe6->constraint);
}
+
+ return p - buf;
}
-static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v)
+static
+ssize_t rtw89_debug_priv_txpwr_table_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
const struct dbgfs_txpwr_table *tbl;
const struct rtw89_chan *chan;
- int ret = 0;
+ char *p = buf, *end = buf + bufsz;
+ ssize_t n;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
- mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
- rtw89_debug_priv_txpwr_table_get_regd(m, rtwdev, chan);
+ p += rtw89_debug_priv_txpwr_table_get_regd(rtwdev, p, end - p, chan);
- seq_puts(m, "[SAR]\n");
- rtw89_print_sar(m, rtwdev, chan->freq);
+ p += scnprintf(p, end - p, "[SAR]\n");
+ p += rtw89_print_sar(rtwdev, p, end - p, chan->freq);
- seq_puts(m, "[TAS]\n");
- rtw89_print_tas(m, rtwdev);
+ p += scnprintf(p, end - p, "[TAS]\n");
+ p += rtw89_print_tas(rtwdev, p, end - p);
- seq_puts(m, "[DAG]\n");
- rtw89_print_ant_gain(m, rtwdev, chan);
+ p += scnprintf(p, end - p, "[DAG]\n");
+ p += rtw89_print_ant_gain(rtwdev, p, end - p, chan);
tbl = dbgfs_txpwr_tables[chip_gen];
- if (!tbl) {
- ret = -EOPNOTSUPP;
- goto err;
- }
-
- seq_puts(m, "\n[TX power byrate]\n");
- ret = __print_txpwr_map(m, rtwdev, tbl->byr);
- if (ret)
- goto err;
-
- seq_puts(m, "\n[TX power limit]\n");
- ret = __print_txpwr_map(m, rtwdev, tbl->lmt);
- if (ret)
- goto err;
-
- seq_puts(m, "\n[TX power limit_ru]\n");
- ret = __print_txpwr_map(m, rtwdev, tbl->lmt_ru);
- if (ret)
- goto err;
+ if (!tbl)
+ return -EOPNOTSUPP;
-err:
- mutex_unlock(&rtwdev->mutex);
- return ret;
+ p += scnprintf(p, end - p, "\n[TX power byrate]\n");
+ n = __print_txpwr_map(rtwdev, p, end - p, tbl->byr);
+ if (n < 0)
+ return n;
+ p += n;
+
+ p += scnprintf(p, end - p, "\n[TX power limit]\n");
+ n = __print_txpwr_map(rtwdev, p, end - p, tbl->lmt);
+ if (n < 0)
+ return n;
+ p += n;
+
+ p += scnprintf(p, end - p, "\n[TX power limit_ru]\n");
+ n = __print_txpwr_map(rtwdev, p, end - p, tbl->lmt_ru);
+ if (n < 0)
+ return n;
+ p += n;
+
+ return p - buf;
}
static ssize_t
-rtw89_debug_priv_mac_reg_dump_select(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_mac_reg_dump_select(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
const struct rtw89_chip_info *chip = rtwdev->chip;
- char buf[32];
- size_t buf_size;
int sel;
int ret;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
ret = kstrtoint(buf, 0, &sel);
if (ret)
return ret;
@@ -957,99 +1027,91 @@ rtw89_debug_priv_mac_reg_dump_select(struct file *filp,
#define RTW89_MAC_PAGE_SIZE 0x100
-static int rtw89_debug_priv_mac_reg_dump_get(struct seq_file *m, void *v)
+static
+ssize_t rtw89_debug_priv_mac_reg_dump_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
enum rtw89_debug_mac_reg_sel reg_sel = debugfs_priv->cb_data;
- u32 start, end;
+ char *p = buf, *end = buf + bufsz;
+ u32 start, end_addr;
u32 i, j, k, page;
u32 val;
switch (reg_sel) {
case RTW89_DBG_SEL_MAC_00:
- seq_puts(m, "Debug selected MAC page 0x00\n");
+ p += scnprintf(p, end - p, "Debug selected MAC page 0x00\n");
start = 0x000;
- end = 0x014;
+ end_addr = 0x014;
break;
case RTW89_DBG_SEL_MAC_30:
- seq_puts(m, "Debug selected MAC page 0x30\n");
+ p += scnprintf(p, end - p, "Debug selected MAC page 0x30\n");
start = 0x030;
- end = 0x033;
+ end_addr = 0x033;
break;
case RTW89_DBG_SEL_MAC_40:
- seq_puts(m, "Debug selected MAC page 0x40\n");
+ p += scnprintf(p, end - p, "Debug selected MAC page 0x40\n");
start = 0x040;
- end = 0x07f;
+ end_addr = 0x07f;
break;
case RTW89_DBG_SEL_MAC_80:
- seq_puts(m, "Debug selected MAC page 0x80\n");
+ p += scnprintf(p, end - p, "Debug selected MAC page 0x80\n");
start = 0x080;
- end = 0x09f;
+ end_addr = 0x09f;
break;
case RTW89_DBG_SEL_MAC_C0:
- seq_puts(m, "Debug selected MAC page 0xc0\n");
+ p += scnprintf(p, end - p, "Debug selected MAC page 0xc0\n");
start = 0x0c0;
- end = 0x0df;
+ end_addr = 0x0df;
break;
case RTW89_DBG_SEL_MAC_E0:
- seq_puts(m, "Debug selected MAC page 0xe0\n");
+ p += scnprintf(p, end - p, "Debug selected MAC page 0xe0\n");
start = 0x0e0;
- end = 0x0ff;
+ end_addr = 0x0ff;
break;
case RTW89_DBG_SEL_BB:
- seq_puts(m, "Debug selected BB register\n");
+ p += scnprintf(p, end - p, "Debug selected BB register\n");
start = 0x100;
- end = 0x17f;
+ end_addr = 0x17f;
break;
case RTW89_DBG_SEL_IQK:
- seq_puts(m, "Debug selected IQK register\n");
+ p += scnprintf(p, end - p, "Debug selected IQK register\n");
start = 0x180;
- end = 0x1bf;
+ end_addr = 0x1bf;
break;
case RTW89_DBG_SEL_RFC:
- seq_puts(m, "Debug selected RFC register\n");
+ p += scnprintf(p, end - p, "Debug selected RFC register\n");
start = 0x1c0;
- end = 0x1ff;
+ end_addr = 0x1ff;
break;
default:
- seq_puts(m, "Selected invalid register page\n");
+ p += scnprintf(p, end - p, "Selected invalid register page\n");
return -EINVAL;
}
- for (i = start; i <= end; i++) {
+ for (i = start; i <= end_addr; i++) {
page = i << 8;
for (j = page; j < page + RTW89_MAC_PAGE_SIZE; j += 16) {
- seq_printf(m, "%08xh : ", 0x18600000 + j);
+ p += scnprintf(p, end - p, "%08xh : ", 0x18600000 + j);
for (k = 0; k < 4; k++) {
val = rtw89_read32(rtwdev, j + (k << 2));
- seq_printf(m, "%08x ", val);
+ p += scnprintf(p, end - p, "%08x ", val);
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
}
- return 0;
+ return p - buf;
}
static ssize_t
-rtw89_debug_priv_mac_mem_dump_select(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_mac_mem_dump_select(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- char buf[32];
- size_t buf_size;
u32 sel, start_addr, len;
int num;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
num = sscanf(buf, "%x %x %x", &sel, &start_addr, &len);
if (num != 3) {
rtw89_info(rtwdev, "invalid format: <sel> <start> <len>\n");
@@ -1066,15 +1128,16 @@ rtw89_debug_priv_mac_mem_dump_select(struct file *filp,
return count;
}
-static void rtw89_debug_dump_mac_mem(struct seq_file *m,
- struct rtw89_dev *rtwdev,
- u8 sel, u32 start_addr, u32 len)
+static int rtw89_debug_dump_mac_mem(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz,
+ u8 sel, u32 start_addr, u32 len)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u32 filter_model_addr = mac->filter_model_addr;
u32 indir_access_addr = mac->indir_access_addr;
u32 base_addr, start_page, residue;
- u32 i, j, p, pages;
+ char *p = buf, *end = buf + bufsz;
+ u32 i, j, pp, pages;
u32 dump_len, remain;
u32 val;
@@ -1085,32 +1148,37 @@ static void rtw89_debug_dump_mac_mem(struct seq_file *m,
base_addr = mac->mem_base_addrs[sel];
base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
- for (p = 0; p < pages; p++) {
+ for (pp = 0; pp < pages; pp++) {
dump_len = min_t(u32, remain, MAC_MEM_DUMP_PAGE_SIZE);
rtw89_write32(rtwdev, filter_model_addr, base_addr);
for (i = indir_access_addr + residue;
i < indir_access_addr + dump_len;) {
- seq_printf(m, "%08xh:", i);
+ p += scnprintf(p, end - p, "%08xh:", i);
for (j = 0;
j < 4 && i < indir_access_addr + dump_len;
j++, i += 4) {
val = rtw89_read32(rtwdev, i);
- seq_printf(m, " %08x", val);
+ p += scnprintf(p, end - p, " %08x", val);
remain -= 4;
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
base_addr += MAC_MEM_DUMP_PAGE_SIZE;
}
+
+ return p - buf;
}
-static int
-rtw89_debug_priv_mac_mem_dump_get(struct seq_file *m, void *v)
+static ssize_t
+rtw89_debug_priv_mac_mem_dump_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char *p = buf, *end = buf + bufsz;
bool grant_read = false;
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
if (debugfs_priv->mac_mem.sel >= RTW89_MAC_MEM_NUM)
return -ENOENT;
@@ -1127,40 +1195,28 @@ rtw89_debug_priv_mac_mem_dump_get(struct seq_file *m, void *v)
}
}
- mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
if (grant_read)
rtw89_write32_set(rtwdev, R_AX_TCR1, B_AX_TCR_FORCE_READ_TXDFIFO);
- rtw89_debug_dump_mac_mem(m, rtwdev,
- debugfs_priv->mac_mem.sel,
- debugfs_priv->mac_mem.start,
- debugfs_priv->mac_mem.len);
+ p += rtw89_debug_dump_mac_mem(rtwdev, p, end - p,
+ debugfs_priv->mac_mem.sel,
+ debugfs_priv->mac_mem.start,
+ debugfs_priv->mac_mem.len);
if (grant_read)
rtw89_write32_clr(rtwdev, R_AX_TCR1, B_AX_TCR_FORCE_READ_TXDFIFO);
- mutex_unlock(&rtwdev->mutex);
- return 0;
+ return p - buf;
}
static ssize_t
-rtw89_debug_priv_mac_dbg_port_dump_select(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_mac_dbg_port_dump_select(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- char buf[32];
- size_t buf_size;
int sel, set;
int num;
bool enable;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
num = sscanf(buf, "%d %d", &sel, &set);
if (num != 2) {
rtw89_info(rtwdev, "invalid format: <sel> <set>\n");
@@ -1196,13 +1252,13 @@ rtw89_debug_priv_mac_dbg_port_dump_select(struct file *filp,
}
static int rtw89_debug_mac_dump_ss_dbg(struct rtw89_dev *rtwdev,
- struct seq_file *m)
+ char *buf, size_t bufsz)
{
return 0;
}
static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev,
- struct seq_file *m)
+ char *buf, size_t bufsz)
{
#define DLE_DFI_DUMP(__type, __target, __sel) \
({ \
@@ -1231,7 +1287,7 @@ static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev,
__data; \
})
-#define DLE_DFI_FREE_PAGE_DUMP(__m, __type) \
+#define DLE_DFI_FREE_PAGE_DUMP(__p, __end, __type) \
({ \
u32 __freepg, __pubpg; \
u32 __freepg_head, __freepg_tail, __pubpg_num; \
@@ -1241,24 +1297,25 @@ static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev,
__freepg_head = FIELD_GET(B_AX_DLE_FREE_HEADPG, __freepg); \
__freepg_tail = FIELD_GET(B_AX_DLE_FREE_TAILPG, __freepg); \
__pubpg_num = FIELD_GET(B_AX_DLE_PUB_PGNUM, __pubpg); \
- seq_printf(__m, "[%s] freepg head: %d\n", \
- #__type, __freepg_head); \
- seq_printf(__m, "[%s] freepg tail: %d\n", \
- #__type, __freepg_tail); \
- seq_printf(__m, "[%s] pubpg num : %d\n", \
- #__type, __pubpg_num); \
+ __p += scnprintf(__p, __end - __p, "[%s] freepg head: %d\n", \
+ #__type, __freepg_head); \
+ __p += scnprintf(__p, __end - __p, "[%s] freepg tail: %d\n", \
+ #__type, __freepg_tail); \
+ __p += scnprintf(__p, __end - __p, "[%s] pubpg num : %d\n", \
+ #__type, __pubpg_num); \
})
-#define case_QUOTA(__m, __type, __id) \
+#define case_QUOTA(__p, __end, __type, __id) \
case __type##_QTAID_##__id: \
- val32 = DLE_DFI_DUMP(__type, QUOTA, __type##_QTAID_##__id); \
+ val32 = DLE_DFI_DUMP(__type, QUOTA, __type##_QTAID_##__id); \
rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, val32); \
use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, val32); \
- seq_printf(__m, "[%s][%s] rsv_pgnum: %d\n", \
- #__type, #__id, rsv_pgnum); \
- seq_printf(__m, "[%s][%s] use_pgnum: %d\n", \
- #__type, #__id, use_pgnum); \
+ __p += scnprintf(__p, __end - __p, "[%s][%s] rsv_pgnum: %d\n", \
+ #__type, #__id, rsv_pgnum); \
+ __p += scnprintf(__p, __end - __p, "[%s][%s] use_pgnum: %d\n", \
+ #__type, #__id, use_pgnum); \
break
+ char *p = buf, *end = buf + bufsz;
u32 quota_id;
u32 val32;
u16 rsv_pgnum, use_pgnum;
@@ -1266,38 +1323,39 @@ static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev,
ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL);
if (ret) {
- seq_puts(m, "[DLE] : DMAC not enabled\n");
- return ret;
+ p += scnprintf(p, end - p, "[DLE] : DMAC not enabled\n");
+ goto out;
}
- DLE_DFI_FREE_PAGE_DUMP(m, WDE);
- DLE_DFI_FREE_PAGE_DUMP(m, PLE);
+ DLE_DFI_FREE_PAGE_DUMP(p, end, WDE);
+ DLE_DFI_FREE_PAGE_DUMP(p, end, PLE);
for (quota_id = 0; quota_id <= WDE_QTAID_CPUIO; quota_id++) {
switch (quota_id) {
- case_QUOTA(m, WDE, HOST_IF);
- case_QUOTA(m, WDE, WLAN_CPU);
- case_QUOTA(m, WDE, DATA_CPU);
- case_QUOTA(m, WDE, PKTIN);
- case_QUOTA(m, WDE, CPUIO);
+ case_QUOTA(p, end, WDE, HOST_IF);
+ case_QUOTA(p, end, WDE, WLAN_CPU);
+ case_QUOTA(p, end, WDE, DATA_CPU);
+ case_QUOTA(p, end, WDE, PKTIN);
+ case_QUOTA(p, end, WDE, CPUIO);
}
}
for (quota_id = 0; quota_id <= PLE_QTAID_CPUIO; quota_id++) {
switch (quota_id) {
- case_QUOTA(m, PLE, B0_TXPL);
- case_QUOTA(m, PLE, B1_TXPL);
- case_QUOTA(m, PLE, C2H);
- case_QUOTA(m, PLE, H2C);
- case_QUOTA(m, PLE, WLAN_CPU);
- case_QUOTA(m, PLE, MPDU);
- case_QUOTA(m, PLE, CMAC0_RX);
- case_QUOTA(m, PLE, CMAC1_RX);
- case_QUOTA(m, PLE, CMAC1_BBRPT);
- case_QUOTA(m, PLE, WDRLS);
- case_QUOTA(m, PLE, CPUIO);
+ case_QUOTA(p, end, PLE, B0_TXPL);
+ case_QUOTA(p, end, PLE, B1_TXPL);
+ case_QUOTA(p, end, PLE, C2H);
+ case_QUOTA(p, end, PLE, H2C);
+ case_QUOTA(p, end, PLE, WLAN_CPU);
+ case_QUOTA(p, end, PLE, MPDU);
+ case_QUOTA(p, end, PLE, CMAC0_RX);
+ case_QUOTA(p, end, PLE, CMAC1_RX);
+ case_QUOTA(p, end, PLE, CMAC1_BBRPT);
+ case_QUOTA(p, end, PLE, WDRLS);
+ case_QUOTA(p, end, PLE, CPUIO);
}
}
- return 0;
+out:
+ return p - buf;
#undef case_QUOTA
#undef DLE_DFI_DUMP
@@ -1305,73 +1363,88 @@ static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev,
}
static int rtw89_debug_mac_dump_dmac_dbg(struct rtw89_dev *rtwdev,
- struct seq_file *m)
+ char *buf, size_t bufsz)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
+ char *p = buf, *end = buf + bufsz;
u32 dmac_err;
int i, ret;
ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL);
if (ret) {
- seq_puts(m, "[DMAC] : DMAC not enabled\n");
- return ret;
+ p += scnprintf(p, end - p, "[DMAC] : DMAC not enabled\n");
+ goto out;
}
dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR);
- seq_printf(m, "R_AX_DMAC_ERR_ISR=0x%08x\n", dmac_err);
- seq_printf(m, "R_AX_DMAC_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_DMAC_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_DMAC_ERR_ISR=0x%08x\n", dmac_err);
+ p += scnprintf(p, end - p, "R_AX_DMAC_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DMAC_ERR_IMR));
if (dmac_err) {
- seq_printf(m, "R_AX_WDE_ERR_FLAG_CFG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG_NUM1));
- seq_printf(m, "R_AX_PLE_ERR_FLAG_CFG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG_NUM1));
+ p += scnprintf(p, end - p, "R_AX_WDE_ERR_FLAG_CFG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG_NUM1));
+ p += scnprintf(p, end - p, "R_AX_PLE_ERR_FLAG_CFG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG_NUM1));
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_PLE_ERRFLAG_MSG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERRFLAG_MSG));
- seq_printf(m, "R_AX_WDE_ERRFLAG_MSG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDE_ERRFLAG_MSG));
- seq_printf(m, "R_AX_PLE_DBGERR_LOCKEN=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_DBGERR_LOCKEN));
- seq_printf(m, "R_AX_PLE_DBGERR_STS=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_DBGERR_STS));
+ p += scnprintf(p, end - p,
+ "R_AX_PLE_ERRFLAG_MSG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERRFLAG_MSG));
+ p += scnprintf(p, end - p,
+ "R_AX_WDE_ERRFLAG_MSG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERRFLAG_MSG));
+ p += scnprintf(p, end - p,
+ "R_AX_PLE_DBGERR_LOCKEN=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_DBGERR_LOCKEN));
+ p += scnprintf(p, end - p,
+ "R_AX_PLE_DBGERR_STS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_DBGERR_STS));
}
}
if (dmac_err & B_AX_WDRLS_ERR_FLAG) {
- seq_printf(m, "R_AX_WDRLS_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR));
- seq_printf(m, "R_AX_WDRLS_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR));
+ p += scnprintf(p, end - p, "R_AX_WDRLS_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_WDRLS_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR));
if (chip->chip_id == RTL8852C)
- seq_printf(m, "R_AX_RPQ_RXBD_IDX=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX_V1));
+ p += scnprintf(p, end - p,
+ "R_AX_RPQ_RXBD_IDX=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX_V1));
else
- seq_printf(m, "R_AX_RPQ_RXBD_IDX=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
+ p += scnprintf(p, end - p,
+ "R_AX_RPQ_RXBD_IDX=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
}
if (dmac_err & B_AX_WSEC_ERR_FLAG) {
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_SEC_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG_IMR));
- seq_printf(m, "R_AX_SEC_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG));
- seq_printf(m, "R_AX_SEC_ENG_CTRL=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
- seq_printf(m, "R_AX_SEC_MPDU_PROC=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
- seq_printf(m, "R_AX_SEC_CAM_ACCESS=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
- seq_printf(m, "R_AX_SEC_CAM_RDATA=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
- seq_printf(m, "R_AX_SEC_DEBUG1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_DEBUG1));
- seq_printf(m, "R_AX_SEC_TX_DEBUG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
- seq_printf(m, "R_AX_SEC_RX_DEBUG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_ENG_CTRL=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_MPDU_PROC=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_CAM_ACCESS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_CAM_RDATA=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
+ p += scnprintf(p, end - p, "R_AX_SEC_DEBUG1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_DEBUG1));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_TX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_RX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL,
B_AX_DBG_SEL0, 0x8B);
@@ -1382,187 +1455,229 @@ static int rtw89_debug_mac_dump_dmac_dbg(struct rtw89_dev *rtwdev,
for (i = 0; i < 0x10; i++) {
rtw89_write32_mask(rtwdev, R_AX_SEC_ENG_CTRL,
B_AX_SEC_DBG_PORT_FIELD_MASK, i);
- seq_printf(m, "sel=%x,R_AX_SEC_DEBUG2=0x%08x\n",
- i, rtw89_read32(rtwdev, R_AX_SEC_DEBUG2));
+ p += scnprintf(p, end - p,
+ "sel=%x,R_AX_SEC_DEBUG2=0x%08x\n",
+ i,
+ rtw89_read32(rtwdev, R_AX_SEC_DEBUG2));
}
} else {
- seq_printf(m, "R_AX_SEC_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_DEBUG));
- seq_printf(m, "R_AX_SEC_ENG_CTRL=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
- seq_printf(m, "R_AX_SEC_MPDU_PROC=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
- seq_printf(m, "R_AX_SEC_CAM_ACCESS=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
- seq_printf(m, "R_AX_SEC_CAM_RDATA=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
- seq_printf(m, "R_AX_SEC_CAM_WDATA=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA));
- seq_printf(m, "R_AX_SEC_TX_DEBUG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
- seq_printf(m, "R_AX_SEC_RX_DEBUG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
- seq_printf(m, "R_AX_SEC_TRX_PKT_CNT=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT));
- seq_printf(m, "R_AX_SEC_TRX_BLK_CNT=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_DEBUG));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_ENG_CTRL=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_MPDU_PROC=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_CAM_ACCESS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_CAM_RDATA=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_CAM_WDATA=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_TX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_RX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_TRX_PKT_CNT=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_TRX_BLK_CNT=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT));
}
}
if (dmac_err & B_AX_MPDU_ERR_FLAG) {
- seq_printf(m, "R_AX_MPDU_TX_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR));
- seq_printf(m, "R_AX_MPDU_TX_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR));
- seq_printf(m, "R_AX_MPDU_RX_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR));
- seq_printf(m, "R_AX_MPDU_RX_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR));
+ p += scnprintf(p, end - p, "R_AX_MPDU_TX_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_MPDU_TX_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR));
+ p += scnprintf(p, end - p, "R_AX_MPDU_RX_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_MPDU_RX_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR));
}
if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) {
- seq_printf(m, "R_AX_STA_SCHEDULER_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR));
- seq_printf(m, "R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_STA_SCHEDULER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR));
}
if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) {
- seq_printf(m, "R_AX_WDE_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
- seq_printf(m, "R_AX_WDE_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
- seq_printf(m, "R_AX_PLE_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
- seq_printf(m, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
+ p += scnprintf(p, end - p, "R_AX_WDE_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_WDE_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
+ p += scnprintf(p, end - p, "R_AX_PLE_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
}
if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) {
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_TXPKTCTL_B0_ERRFLAG_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_IMR));
- seq_printf(m, "R_AX_TXPKTCTL_B0_ERRFLAG_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_ISR));
- seq_printf(m, "R_AX_TXPKTCTL_B1_ERRFLAG_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_IMR));
- seq_printf(m, "R_AX_TXPKTCTL_B1_ERRFLAG_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_TXPKTCTL_B0_ERRFLAG_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_TXPKTCTL_B0_ERRFLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_TXPKTCTL_B1_ERRFLAG_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_TXPKTCTL_B1_ERRFLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_ISR));
} else {
- seq_printf(m, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR));
- seq_printf(m, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1));
+ p += scnprintf(p, end - p,
+ "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1));
}
}
if (dmac_err & B_AX_PLE_DLE_ERR_FLAG) {
- seq_printf(m, "R_AX_WDE_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
- seq_printf(m, "R_AX_WDE_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
- seq_printf(m, "R_AX_PLE_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
- seq_printf(m, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
- seq_printf(m, "R_AX_WD_CPUQ_OP_0=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0));
- seq_printf(m, "R_AX_WD_CPUQ_OP_1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1));
- seq_printf(m, "R_AX_WD_CPUQ_OP_2=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2));
- seq_printf(m, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS));
- seq_printf(m, "R_AX_PL_CPUQ_OP_0=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0));
- seq_printf(m, "R_AX_PL_CPUQ_OP_1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1));
- seq_printf(m, "R_AX_PL_CPUQ_OP_2=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2));
- seq_printf(m, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS));
+ p += scnprintf(p, end - p, "R_AX_WDE_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_WDE_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
+ p += scnprintf(p, end - p, "R_AX_PLE_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
+ p += scnprintf(p, end - p, "R_AX_WD_CPUQ_OP_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0));
+ p += scnprintf(p, end - p, "R_AX_WD_CPUQ_OP_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1));
+ p += scnprintf(p, end - p, "R_AX_WD_CPUQ_OP_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2));
+ p += scnprintf(p, end - p, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS));
+ p += scnprintf(p, end - p, "R_AX_PL_CPUQ_OP_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0));
+ p += scnprintf(p, end - p, "R_AX_PL_CPUQ_OP_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1));
+ p += scnprintf(p, end - p, "R_AX_PL_CPUQ_OP_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2));
+ p += scnprintf(p, end - p, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS));
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_RX_CTRL0=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RX_CTRL0));
- seq_printf(m, "R_AX_RX_CTRL1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RX_CTRL1));
- seq_printf(m, "R_AX_RX_CTRL2=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RX_CTRL2));
+ p += scnprintf(p, end - p, "R_AX_RX_CTRL0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL0));
+ p += scnprintf(p, end - p, "R_AX_RX_CTRL1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL1));
+ p += scnprintf(p, end - p, "R_AX_RX_CTRL2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL2));
} else {
- seq_printf(m, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0));
- seq_printf(m, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1));
- seq_printf(m, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2));
+ p += scnprintf(p, end - p,
+ "R_AX_RXDMA_PKT_INFO_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0));
+ p += scnprintf(p, end - p,
+ "R_AX_RXDMA_PKT_INFO_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1));
+ p += scnprintf(p, end - p,
+ "R_AX_RXDMA_PKT_INFO_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2));
}
}
if (dmac_err & B_AX_PKTIN_ERR_FLAG) {
- seq_printf(m, "R_AX_PKTIN_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR));
- seq_printf(m, "R_AX_PKTIN_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR));
+ p += scnprintf(p, end - p, "R_AX_PKTIN_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_PKTIN_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR));
}
if (dmac_err & B_AX_DISPATCH_ERR_FLAG) {
- seq_printf(m, "R_AX_HOST_DISPATCHER_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR));
- seq_printf(m, "R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR));
- seq_printf(m, "R_AX_CPU_DISPATCHER_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR));
- seq_printf(m, "R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR));
- seq_printf(m, "R_AX_OTHER_DISPATCHER_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR));
- seq_printf(m, "R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_HOST_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_CPU_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_OTHER_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR));
}
if (dmac_err & B_AX_BBRPT_ERR_FLAG) {
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_BBRPT_COM_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR));
- seq_printf(m, "R_AX_BBRPT_COM_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_ISR));
- seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR));
- seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR));
- seq_printf(m, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR));
- seq_printf(m, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_COM_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_COM_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR));
} else {
- seq_printf(m, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR));
- seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR));
- seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR));
- seq_printf(m, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR));
- seq_printf(m, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR));
}
}
if (dmac_err & B_AX_HAXIDMA_ERR_FLAG && chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_HAXIDMA_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_HAXI_IDCT_MSK));
- seq_printf(m, "R_AX_HAXIDMA_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_HAXI_IDCT));
+ p += scnprintf(p, end - p, "R_AX_HAXIDMA_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HAXI_IDCT_MSK));
+ p += scnprintf(p, end - p, "R_AX_HAXIDMA_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HAXI_IDCT));
}
- return 0;
+out:
+ return p - buf;
}
static int rtw89_debug_mac_dump_cmac_err(struct rtw89_dev *rtwdev,
- struct seq_file *m,
+ char *buf, size_t bufsz,
enum rtw89_mac_idx band)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
+ char *p = buf, *end = buf + bufsz;
u32 offset = 0;
u32 cmac_err;
int ret;
@@ -1570,96 +1685,127 @@ static int rtw89_debug_mac_dump_cmac_err(struct rtw89_dev *rtwdev,
ret = rtw89_mac_check_mac_en(rtwdev, band, RTW89_CMAC_SEL);
if (ret) {
if (band)
- seq_puts(m, "[CMAC] : CMAC1 not enabled\n");
+ p += scnprintf(p, end - p,
+ "[CMAC] : CMAC1 not enabled\n");
else
- seq_puts(m, "[CMAC] : CMAC0 not enabled\n");
- return ret;
+ p += scnprintf(p, end - p,
+ "[CMAC] : CMAC0 not enabled\n");
+ goto out;
}
if (band)
offset = RTW89_MAC_AX_BAND_REG_OFFSET;
cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset);
- seq_printf(m, "R_AX_CMAC_ERR_ISR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset));
- seq_printf(m, "R_AX_CMAC_FUNC_EN [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN + offset));
- seq_printf(m, "R_AX_CK_EN [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_CK_EN + offset));
+ p += scnprintf(p, end - p, "R_AX_CMAC_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset));
+ p += scnprintf(p, end - p, "R_AX_CMAC_FUNC_EN [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN + offset));
+ p += scnprintf(p, end - p, "R_AX_CK_EN [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CK_EN + offset));
if (cmac_err & B_AX_SCHEDULE_TOP_ERR_IND) {
- seq_printf(m, "R_AX_SCHEDULE_ERR_IMR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR + offset));
- seq_printf(m, "R_AX_SCHEDULE_ERR_ISR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_SCHEDULE_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_SCHEDULE_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR + offset));
}
if (cmac_err & B_AX_PTCL_TOP_ERR_IND) {
- seq_printf(m, "R_AX_PTCL_IMR0 [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_PTCL_IMR0 + offset));
- seq_printf(m, "R_AX_PTCL_ISR0 [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_PTCL_ISR0 + offset));
+ p += scnprintf(p, end - p, "R_AX_PTCL_IMR0 [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_PTCL_IMR0 + offset));
+ p += scnprintf(p, end - p, "R_AX_PTCL_ISR0 [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_PTCL_ISR0 + offset));
}
if (cmac_err & B_AX_DMA_TOP_ERR_IND) {
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_RX_ERR_FLAG [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG + offset));
- seq_printf(m, "R_AX_RX_ERR_FLAG_IMR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG_IMR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_RX_ERR_FLAG [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_RX_ERR_FLAG_IMR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG_IMR + offset));
} else {
- seq_printf(m, "R_AX_DLE_CTRL [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_DLE_CTRL + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_DLE_CTRL [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_DLE_CTRL + offset));
}
}
if (cmac_err & B_AX_DMA_TOP_ERR_IND || cmac_err & B_AX_WMAC_RX_ERR_IND) {
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_PHYINFO_ERR_ISR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR + offset));
- seq_printf(m, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_PHYINFO_ERR_ISR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset));
} else {
- seq_printf(m, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset));
}
}
if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) {
- seq_printf(m, "R_AX_TXPWR_IMR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_TXPWR_IMR + offset));
- seq_printf(m, "R_AX_TXPWR_ISR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_TXPWR_ISR + offset));
+ p += scnprintf(p, end - p, "R_AX_TXPWR_IMR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_TXPWR_IMR + offset));
+ p += scnprintf(p, end - p, "R_AX_TXPWR_ISR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_TXPWR_ISR + offset));
}
if (cmac_err & B_AX_WMAC_TX_ERR_IND) {
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_TRXPTCL_ERROR_INDICA [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA + offset));
- seq_printf(m, "R_AX_TRXPTCL_ERROR_INDICA_MASK [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA_MASK + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_TRXPTCL_ERROR_INDICA [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev,
+ R_AX_TRXPTCL_ERROR_INDICA + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_TRXPTCL_ERROR_INDICA_MASK [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev,
+ R_AX_TRXPTCL_ERROR_INDICA_MASK + offset));
} else {
- seq_printf(m, "R_AX_TMAC_ERR_IMR_ISR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_TMAC_ERR_IMR_ISR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev,
+ R_AX_TMAC_ERR_IMR_ISR + offset));
}
- seq_printf(m, "R_AX_DBGSEL_TRXPTCL [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_DBGSEL_TRXPTCL [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL + offset));
}
- seq_printf(m, "R_AX_CMAC_ERR_IMR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_CMAC_ERR_IMR + offset));
+ p += scnprintf(p, end - p, "R_AX_CMAC_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CMAC_ERR_IMR + offset));
- return 0;
+out:
+ return p - buf;
}
static int rtw89_debug_mac_dump_cmac_dbg(struct rtw89_dev *rtwdev,
- struct seq_file *m)
+ char *buf, size_t bufsz)
{
- rtw89_debug_mac_dump_cmac_err(rtwdev, m, RTW89_MAC_0);
+ char *p = buf, *end = buf + bufsz;
+
+ p += rtw89_debug_mac_dump_cmac_err(rtwdev, p, end - p, RTW89_MAC_0);
if (rtwdev->dbcc_en)
- rtw89_debug_mac_dump_cmac_err(rtwdev, m, RTW89_MAC_1);
+ p += rtw89_debug_mac_dump_cmac_err(rtwdev, p, end - p, RTW89_MAC_1);
- return 0;
+ return p - buf;
}
static const struct rtw89_mac_dbg_port_info dbg_port_ptcl_c0 = {
@@ -2465,11 +2611,12 @@ static const struct rtw89_mac_dbg_port_info dbg_port_pcie_misc2 = {
.rd_msk = B_AX_DEBUG_ST_MASK
};
-static const struct rtw89_mac_dbg_port_info *
-rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
- struct rtw89_dev *rtwdev, u32 sel)
+static int
+rtw89_debug_mac_dbg_port_sel(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ u32 sel, const struct rtw89_mac_dbg_port_info **ppinfo)
{
- const struct rtw89_mac_dbg_port_info *info;
+ const struct rtw89_mac_dbg_port_info *info = NULL;
+ char *p = buf, *end = buf + bufsz;
u32 index;
u32 val32;
u16 val16;
@@ -2481,28 +2628,28 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val16 = rtw89_read16(rtwdev, R_AX_PTCL_DBG);
val16 |= B_AX_PTCL_DBG_EN;
rtw89_write16(rtwdev, R_AX_PTCL_DBG, val16);
- seq_puts(m, "Enable PTCL C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable PTCL C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_PTCL_C1:
info = &dbg_port_ptcl_c1;
val16 = rtw89_read16(rtwdev, R_AX_PTCL_DBG_C1);
val16 |= B_AX_PTCL_DBG_EN;
rtw89_write16(rtwdev, R_AX_PTCL_DBG_C1, val16);
- seq_puts(m, "Enable PTCL C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable PTCL C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_SCH_C0:
info = &dbg_port_sch_c0;
val32 = rtw89_read32(rtwdev, R_AX_SCH_DBG_SEL);
val32 |= B_AX_SCH_DBG_EN;
rtw89_write32(rtwdev, R_AX_SCH_DBG_SEL, val32);
- seq_puts(m, "Enable SCH C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable SCH C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_SCH_C1:
info = &dbg_port_sch_c1;
val32 = rtw89_read32(rtwdev, R_AX_SCH_DBG_SEL_C1);
val32 |= B_AX_SCH_DBG_EN;
rtw89_write32(rtwdev, R_AX_SCH_DBG_SEL_C1, val32);
- seq_puts(m, "Enable SCH C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable SCH C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_TMAC_C0:
info = &dbg_port_tmac_c0;
@@ -2519,7 +2666,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
- seq_puts(m, "Enable TMAC C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable TMAC C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_TMAC_C1:
info = &dbg_port_tmac_c1;
@@ -2536,7 +2683,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
- seq_puts(m, "Enable TMAC C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable TMAC C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_RMAC_C0:
info = &dbg_port_rmac_c0;
@@ -2558,7 +2705,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val8 = u8_replace_bits(val8, RMAC_CMAC_DBG_SEL,
B_AX_DBGSEL_TRXPTCL_MASK);
rtw89_write8(rtwdev, R_AX_DBGSEL_TRXPTCL, val8);
- seq_puts(m, "Enable RMAC C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable RMAC C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_RMAC_C1:
info = &dbg_port_rmac_c1;
@@ -2580,23 +2727,23 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val8 = u8_replace_bits(val8, RMAC_CMAC_DBG_SEL,
B_AX_DBGSEL_TRXPTCL_MASK);
rtw89_write8(rtwdev, R_AX_DBGSEL_TRXPTCL_C1, val8);
- seq_puts(m, "Enable RMAC C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable RMAC C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_RMACST_C0:
info = &dbg_port_rmacst_c0;
- seq_puts(m, "Enable RMAC state C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable RMAC state C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_RMACST_C1:
info = &dbg_port_rmacst_c1;
- seq_puts(m, "Enable RMAC state C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable RMAC state C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_RMAC_PLCP_C0:
info = &dbg_port_rmac_plcp_c0;
- seq_puts(m, "Enable RMAC PLCP C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable RMAC PLCP C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_RMAC_PLCP_C1:
info = &dbg_port_rmac_plcp_c1;
- seq_puts(m, "Enable RMAC PLCP C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable RMAC PLCP C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_TRXPTCL_C0:
info = &dbg_port_trxptcl_c0;
@@ -2608,7 +2755,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
- seq_puts(m, "Enable TRXPTCL C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable TRXPTCL C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_TRXPTCL_C1:
info = &dbg_port_trxptcl_c1;
@@ -2620,131 +2767,137 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
- seq_puts(m, "Enable TRXPTCL C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable TRXPTCL C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_TX_INFOL_C0:
info = &dbg_port_tx_infol_c0;
val32 = rtw89_read32(rtwdev, R_AX_TCR1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1, val32);
- seq_puts(m, "Enable tx infol dump.\n");
+ p += scnprintf(p, end - p, "Enable tx infol dump.\n");
break;
case RTW89_DBG_PORT_SEL_TX_INFOH_C0:
info = &dbg_port_tx_infoh_c0;
val32 = rtw89_read32(rtwdev, R_AX_TCR1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1, val32);
- seq_puts(m, "Enable tx infoh dump.\n");
+ p += scnprintf(p, end - p, "Enable tx infoh dump.\n");
break;
case RTW89_DBG_PORT_SEL_TX_INFOL_C1:
info = &dbg_port_tx_infol_c1;
val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1_C1, val32);
- seq_puts(m, "Enable tx infol dump.\n");
+ p += scnprintf(p, end - p, "Enable tx infol dump.\n");
break;
case RTW89_DBG_PORT_SEL_TX_INFOH_C1:
info = &dbg_port_tx_infoh_c1;
val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1_C1, val32);
- seq_puts(m, "Enable tx infoh dump.\n");
+ p += scnprintf(p, end - p, "Enable tx infoh dump.\n");
break;
case RTW89_DBG_PORT_SEL_TXTF_INFOL_C0:
info = &dbg_port_txtf_infol_c0;
val32 = rtw89_read32(rtwdev, R_AX_TCR1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1, val32);
- seq_puts(m, "Enable tx tf infol dump.\n");
+ p += scnprintf(p, end - p, "Enable tx tf infol dump.\n");
break;
case RTW89_DBG_PORT_SEL_TXTF_INFOH_C0:
info = &dbg_port_txtf_infoh_c0;
val32 = rtw89_read32(rtwdev, R_AX_TCR1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1, val32);
- seq_puts(m, "Enable tx tf infoh dump.\n");
+ p += scnprintf(p, end - p, "Enable tx tf infoh dump.\n");
break;
case RTW89_DBG_PORT_SEL_TXTF_INFOL_C1:
info = &dbg_port_txtf_infol_c1;
val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1_C1, val32);
- seq_puts(m, "Enable tx tf infol dump.\n");
+ p += scnprintf(p, end - p, "Enable tx tf infol dump.\n");
break;
case RTW89_DBG_PORT_SEL_TXTF_INFOH_C1:
info = &dbg_port_txtf_infoh_c1;
val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1_C1, val32);
- seq_puts(m, "Enable tx tf infoh dump.\n");
+ p += scnprintf(p, end - p, "Enable tx tf infoh dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_BUFMGN_FREEPG:
info = &dbg_port_wde_bufmgn_freepg;
- seq_puts(m, "Enable wde bufmgn freepg dump.\n");
+ p += scnprintf(p, end - p, "Enable wde bufmgn freepg dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_BUFMGN_QUOTA:
info = &dbg_port_wde_bufmgn_quota;
- seq_puts(m, "Enable wde bufmgn quota dump.\n");
+ p += scnprintf(p, end - p, "Enable wde bufmgn quota dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_BUFMGN_PAGELLT:
info = &dbg_port_wde_bufmgn_pagellt;
- seq_puts(m, "Enable wde bufmgn pagellt dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable wde bufmgn pagellt dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_BUFMGN_PKTINFO:
info = &dbg_port_wde_bufmgn_pktinfo;
- seq_puts(m, "Enable wde bufmgn pktinfo dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable wde bufmgn pktinfo dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_QUEMGN_PREPKT:
info = &dbg_port_wde_quemgn_prepkt;
- seq_puts(m, "Enable wde quemgn prepkt dump.\n");
+ p += scnprintf(p, end - p, "Enable wde quemgn prepkt dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_QUEMGN_NXTPKT:
info = &dbg_port_wde_quemgn_nxtpkt;
- seq_puts(m, "Enable wde quemgn nxtpkt dump.\n");
+ p += scnprintf(p, end - p, "Enable wde quemgn nxtpkt dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_QUEMGN_QLNKTBL:
info = &dbg_port_wde_quemgn_qlnktbl;
- seq_puts(m, "Enable wde quemgn qlnktbl dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable wde quemgn qlnktbl dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_QUEMGN_QEMPTY:
info = &dbg_port_wde_quemgn_qempty;
- seq_puts(m, "Enable wde quemgn qempty dump.\n");
+ p += scnprintf(p, end - p, "Enable wde quemgn qempty dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_BUFMGN_FREEPG:
info = &dbg_port_ple_bufmgn_freepg;
- seq_puts(m, "Enable ple bufmgn freepg dump.\n");
+ p += scnprintf(p, end - p, "Enable ple bufmgn freepg dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_BUFMGN_QUOTA:
info = &dbg_port_ple_bufmgn_quota;
- seq_puts(m, "Enable ple bufmgn quota dump.\n");
+ p += scnprintf(p, end - p, "Enable ple bufmgn quota dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_BUFMGN_PAGELLT:
info = &dbg_port_ple_bufmgn_pagellt;
- seq_puts(m, "Enable ple bufmgn pagellt dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable ple bufmgn pagellt dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_BUFMGN_PKTINFO:
info = &dbg_port_ple_bufmgn_pktinfo;
- seq_puts(m, "Enable ple bufmgn pktinfo dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable ple bufmgn pktinfo dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_QUEMGN_PREPKT:
info = &dbg_port_ple_quemgn_prepkt;
- seq_puts(m, "Enable ple quemgn prepkt dump.\n");
+ p += scnprintf(p, end - p, "Enable ple quemgn prepkt dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_QUEMGN_NXTPKT:
info = &dbg_port_ple_quemgn_nxtpkt;
- seq_puts(m, "Enable ple quemgn nxtpkt dump.\n");
+ p += scnprintf(p, end - p, "Enable ple quemgn nxtpkt dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_QUEMGN_QLNKTBL:
info = &dbg_port_ple_quemgn_qlnktbl;
- seq_puts(m, "Enable ple quemgn qlnktbl dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable ple quemgn qlnktbl dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_QUEMGN_QEMPTY:
info = &dbg_port_ple_quemgn_qempty;
- seq_puts(m, "Enable ple quemgn qempty dump.\n");
+ p += scnprintf(p, end - p, "Enable ple quemgn qempty dump.\n");
break;
case RTW89_DBG_PORT_SEL_PKTINFO:
info = &dbg_port_pktinfo;
- seq_puts(m, "Enable pktinfo dump.\n");
+ p += scnprintf(p, end - p, "Enable pktinfo dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_TX0:
rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL,
@@ -2763,7 +2916,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 0);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, index);
- seq_printf(m, "Enable Dispatcher hdt tx%x dump.\n", index);
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt tx%x dump.\n", index);
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_TX6:
info = &dbg_port_dspt_hdt_tx6;
@@ -2771,7 +2925,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 0);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 6);
- seq_puts(m, "Enable Dispatcher hdt tx6 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt tx6 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_TX7:
info = &dbg_port_dspt_hdt_tx7;
@@ -2779,7 +2934,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 0);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 7);
- seq_puts(m, "Enable Dispatcher hdt tx7 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt tx7 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_TX8:
info = &dbg_port_dspt_hdt_tx8;
@@ -2787,7 +2943,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 0);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 8);
- seq_puts(m, "Enable Dispatcher hdt tx8 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt tx8 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_TX9:
case RTW89_DBG_PORT_SEL_DSPT_HDT_TXA:
@@ -2799,7 +2956,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 0);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, index);
- seq_printf(m, "Enable Dispatcher hdt tx%x dump.\n", index);
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt tx%x dump.\n", index);
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_TXD:
info = &dbg_port_dspt_hdt_txD;
@@ -2807,7 +2965,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 0);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 0xD);
- seq_puts(m, "Enable Dispatcher hdt txD dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt txD dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX0:
info = &dbg_port_dspt_cdt_tx0;
@@ -2815,7 +2974,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 0);
- seq_puts(m, "Enable Dispatcher cdt tx0 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx0 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX1:
info = &dbg_port_dspt_cdt_tx1;
@@ -2823,7 +2983,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 1);
- seq_puts(m, "Enable Dispatcher cdt tx1 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx1 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX3:
info = &dbg_port_dspt_cdt_tx3;
@@ -2831,7 +2992,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 3);
- seq_puts(m, "Enable Dispatcher cdt tx3 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx3 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX4:
info = &dbg_port_dspt_cdt_tx4;
@@ -2839,7 +3001,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 4);
- seq_puts(m, "Enable Dispatcher cdt tx4 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx4 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX5:
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX6:
@@ -2851,7 +3014,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, index);
- seq_printf(m, "Enable Dispatcher cdt tx%x dump.\n", index);
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx%x dump.\n", index);
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX9:
info = &dbg_port_dspt_cdt_tx9;
@@ -2859,7 +3023,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 9);
- seq_puts(m, "Enable Dispatcher cdt tx9 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx9 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TXA:
case RTW89_DBG_PORT_SEL_DSPT_CDT_TXB:
@@ -2870,7 +3035,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, index);
- seq_printf(m, "Enable Dispatcher cdt tx%x dump.\n", index);
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx%x dump.\n", index);
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_RX0:
info = &dbg_port_dspt_hdt_rx0;
@@ -2878,7 +3044,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 2);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 0);
- seq_puts(m, "Enable Dispatcher hdt rx0 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt rx0 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_RX1:
case RTW89_DBG_PORT_SEL_DSPT_HDT_RX2:
@@ -2888,7 +3055,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 2);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, index);
- seq_printf(m, "Enable Dispatcher hdt rx%x dump.\n", index);
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt rx%x dump.\n", index);
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_RX3:
info = &dbg_port_dspt_hdt_rx3;
@@ -2896,7 +3064,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 2);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 3);
- seq_puts(m, "Enable Dispatcher hdt rx3 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt rx3 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_RX4:
info = &dbg_port_dspt_hdt_rx4;
@@ -2904,7 +3073,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 2);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 4);
- seq_puts(m, "Enable Dispatcher hdt rx4 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt rx4 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_RX5:
info = &dbg_port_dspt_hdt_rx5;
@@ -2912,7 +3082,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 2);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 5);
- seq_puts(m, "Enable Dispatcher hdt rx5 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt rx5 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_0:
info = &dbg_port_dspt_cdt_rx_p0_0;
@@ -2920,7 +3091,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 3);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 0);
- seq_puts(m, "Enable Dispatcher cdt rx part0 0 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt rx part0 0 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0:
case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_1:
@@ -2929,7 +3101,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 3);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 1);
- seq_puts(m, "Enable Dispatcher cdt rx part0 1 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt rx part0 1 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_2:
info = &dbg_port_dspt_cdt_rx_p0_2;
@@ -2937,43 +3110,50 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 3);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 2);
- seq_puts(m, "Enable Dispatcher cdt rx part0 2 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt rx part0 2 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P1:
info = &dbg_port_dspt_cdt_rx_p1;
rtw89_write8_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_INTN_SEL_MASK, 3);
- seq_puts(m, "Enable Dispatcher cdt rx part1 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt rx part1 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_STF_CTRL:
info = &dbg_port_dspt_stf_ctrl;
rtw89_write8_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_INTN_SEL_MASK, 4);
- seq_puts(m, "Enable Dispatcher stf control dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher stf control dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_ADDR_CTRL:
info = &dbg_port_dspt_addr_ctrl;
rtw89_write8_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_INTN_SEL_MASK, 5);
- seq_puts(m, "Enable Dispatcher addr control dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher addr control dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_WDE_INTF:
info = &dbg_port_dspt_wde_intf;
rtw89_write8_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_INTN_SEL_MASK, 6);
- seq_puts(m, "Enable Dispatcher wde interface dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher wde interface dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_PLE_INTF:
info = &dbg_port_dspt_ple_intf;
rtw89_write8_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_INTN_SEL_MASK, 7);
- seq_puts(m, "Enable Dispatcher ple interface dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher ple interface dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_FLOW_CTRL:
info = &dbg_port_dspt_flow_ctrl;
rtw89_write8_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_INTN_SEL_MASK, 8);
- seq_puts(m, "Enable Dispatcher flow control dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher flow control dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_TXDMA:
info = &dbg_port_pcie_txdma;
@@ -2981,7 +3161,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = u32_replace_bits(val32, PCIE_TXDMA_DBG_SEL, B_AX_DBG_SEL0);
val32 = u32_replace_bits(val32, PCIE_TXDMA_DBG_SEL, B_AX_DBG_SEL1);
rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
- seq_puts(m, "Enable pcie txdma dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie txdma dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_RXDMA:
info = &dbg_port_pcie_rxdma;
@@ -2989,7 +3169,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = u32_replace_bits(val32, PCIE_RXDMA_DBG_SEL, B_AX_DBG_SEL0);
val32 = u32_replace_bits(val32, PCIE_RXDMA_DBG_SEL, B_AX_DBG_SEL1);
rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
- seq_puts(m, "Enable pcie rxdma dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie rxdma dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_CVT:
info = &dbg_port_pcie_cvt;
@@ -2997,7 +3177,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = u32_replace_bits(val32, PCIE_CVT_DBG_SEL, B_AX_DBG_SEL0);
val32 = u32_replace_bits(val32, PCIE_CVT_DBG_SEL, B_AX_DBG_SEL1);
rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
- seq_puts(m, "Enable pcie cvt dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie cvt dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_CXPL:
info = &dbg_port_pcie_cxpl;
@@ -3005,7 +3185,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = u32_replace_bits(val32, PCIE_CXPL_DBG_SEL, B_AX_DBG_SEL0);
val32 = u32_replace_bits(val32, PCIE_CXPL_DBG_SEL, B_AX_DBG_SEL1);
rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
- seq_puts(m, "Enable pcie cxpl dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie cxpl dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_IO:
info = &dbg_port_pcie_io;
@@ -3013,7 +3193,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = u32_replace_bits(val32, PCIE_IO_DBG_SEL, B_AX_DBG_SEL0);
val32 = u32_replace_bits(val32, PCIE_IO_DBG_SEL, B_AX_DBG_SEL1);
rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
- seq_puts(m, "Enable pcie io dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie io dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_MISC:
info = &dbg_port_pcie_misc;
@@ -3021,7 +3201,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = u32_replace_bits(val32, PCIE_MISC_DBG_SEL, B_AX_DBG_SEL0);
val32 = u32_replace_bits(val32, PCIE_MISC_DBG_SEL, B_AX_DBG_SEL1);
rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
- seq_puts(m, "Enable pcie misc dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie misc dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_MISC2:
info = &dbg_port_pcie_misc2;
@@ -3029,14 +3209,16 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val16 = u16_replace_bits(val16, PCIE_MISC2_DBG_SEL,
B_AX_PCIE_DBG_SEL_MASK);
rtw89_write16(rtwdev, R_AX_PCIE_DBG_CTRL, val16);
- seq_puts(m, "Enable pcie misc2 dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie misc2 dump.\n");
break;
default:
- seq_puts(m, "Dbg port select err\n");
- return NULL;
+ p += scnprintf(p, end - p, "Dbg port select err\n");
+ break;
}
- return info;
+ *ppinfo = info;
+
+ return p - buf;
}
static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel)
@@ -3070,23 +3252,25 @@ static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel)
}
static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev,
- struct seq_file *m, u32 sel)
+ char *buf, size_t bufsz, u32 sel)
{
- const struct rtw89_mac_dbg_port_info *info;
- u8 val8;
- u16 val16;
+ const struct rtw89_mac_dbg_port_info *info = NULL;
+ char *p = buf, *end = buf + bufsz;
u32 val32;
+ u16 val16;
+ u8 val8;
u32 i;
- info = rtw89_debug_mac_dbg_port_sel(m, rtwdev, sel);
+ p += rtw89_debug_mac_dbg_port_sel(rtwdev, p, end - p, sel, &info);
+
if (!info) {
rtw89_err(rtwdev, "failed to select debug port %d\n", sel);
- return -EINVAL;
+ goto out;
}
#define case_DBG_SEL(__sel) \
case RTW89_DBG_PORT_SEL_##__sel: \
- seq_puts(m, "Dump debug port " #__sel ":\n"); \
+ p += scnprintf(p, end - p, "Dump debug port " #__sel ":\n"); \
break
switch (sel) {
@@ -3182,8 +3366,8 @@ static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev,
#undef case_DBG_SEL
- seq_printf(m, "Sel addr = 0x%X\n", info->sel_addr);
- seq_printf(m, "Read addr = 0x%X\n", info->rd_addr);
+ p += scnprintf(p, end - p, "Sel addr = 0x%X\n", info->sel_addr);
+ p += scnprintf(p, end - p, "Read addr = 0x%X\n", info->rd_addr);
for (i = info->srt; i <= info->end; i++) {
switch (info->sel_byte) {
@@ -3191,17 +3375,17 @@ static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev,
default:
rtw89_write8_mask(rtwdev, info->sel_addr,
info->sel_msk, i);
- seq_printf(m, "0x%02X: ", i);
+ p += scnprintf(p, end - p, "0x%02X: ", i);
break;
case 2:
rtw89_write16_mask(rtwdev, info->sel_addr,
info->sel_msk, i);
- seq_printf(m, "0x%04X: ", i);
+ p += scnprintf(p, end - p, "0x%04X: ", i);
break;
case 4:
rtw89_write32_mask(rtwdev, info->sel_addr,
info->sel_msk, i);
- seq_printf(m, "0x%04X: ", i);
+ p += scnprintf(p, end - p, "0x%04X: ", i);
break;
}
@@ -3212,77 +3396,75 @@ static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev,
default:
val8 = rtw89_read8_mask(rtwdev,
info->rd_addr, info->rd_msk);
- seq_printf(m, "0x%02X\n", val8);
+ p += scnprintf(p, end - p, "0x%02X\n", val8);
break;
case 2:
val16 = rtw89_read16_mask(rtwdev,
info->rd_addr, info->rd_msk);
- seq_printf(m, "0x%04X\n", val16);
+ p += scnprintf(p, end - p, "0x%04X\n", val16);
break;
case 4:
val32 = rtw89_read32_mask(rtwdev,
info->rd_addr, info->rd_msk);
- seq_printf(m, "0x%08X\n", val32);
+ p += scnprintf(p, end - p, "0x%08X\n", val32);
break;
}
}
- return 0;
+out:
+ return p - buf;
}
static int rtw89_debug_mac_dump_dbg_port(struct rtw89_dev *rtwdev,
- struct seq_file *m)
+ char *buf, size_t bufsz)
{
+ char *p = buf, *end = buf + bufsz;
+ ssize_t n;
u32 sel;
- int ret = 0;
for (sel = RTW89_DBG_PORT_SEL_PTCL_C0;
sel < RTW89_DBG_PORT_SEL_LAST; sel++) {
if (!is_dbg_port_valid(rtwdev, sel))
continue;
- ret = rtw89_debug_mac_dbg_port_dump(rtwdev, m, sel);
- if (ret) {
+ n = rtw89_debug_mac_dbg_port_dump(rtwdev, p, end - p, sel);
+ if (n < 0) {
rtw89_err(rtwdev,
"failed to dump debug port %d\n", sel);
break;
}
+ p += n;
}
- return ret;
+ return p - buf;
}
-static int
-rtw89_debug_priv_mac_dbg_port_dump_get(struct seq_file *m, void *v)
+static ssize_t
+rtw89_debug_priv_mac_dbg_port_dump_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char *p = buf, *end = buf + bufsz;
if (debugfs_priv->dbgpkg_en.ss_dbg)
- rtw89_debug_mac_dump_ss_dbg(rtwdev, m);
+ p += rtw89_debug_mac_dump_ss_dbg(rtwdev, p, end - p);
if (debugfs_priv->dbgpkg_en.dle_dbg)
- rtw89_debug_mac_dump_dle_dbg(rtwdev, m);
+ p += rtw89_debug_mac_dump_dle_dbg(rtwdev, p, end - p);
if (debugfs_priv->dbgpkg_en.dmac_dbg)
- rtw89_debug_mac_dump_dmac_dbg(rtwdev, m);
+ p += rtw89_debug_mac_dump_dmac_dbg(rtwdev, p, end - p);
if (debugfs_priv->dbgpkg_en.cmac_dbg)
- rtw89_debug_mac_dump_cmac_dbg(rtwdev, m);
+ p += rtw89_debug_mac_dump_cmac_dbg(rtwdev, p, end - p);
if (debugfs_priv->dbgpkg_en.dbg_port)
- rtw89_debug_mac_dump_dbg_port(rtwdev, m);
+ p += rtw89_debug_mac_dump_dbg_port(rtwdev, p, end - p);
- return 0;
+ return p - buf;
};
-static u8 *rtw89_hex2bin_user(struct rtw89_dev *rtwdev,
- const char __user *user_buf, size_t count)
+static u8 *rtw89_hex2bin(struct rtw89_dev *rtwdev, const char *buf, size_t count)
{
- char *buf;
u8 *bin;
int num;
int err = 0;
- buf = memdup_user(user_buf, count);
- if (IS_ERR(buf))
- return buf;
-
num = count / 2;
bin = kmalloc(num, GFP_KERNEL);
if (!bin) {
@@ -3297,22 +3479,18 @@ static u8 *rtw89_hex2bin_user(struct rtw89_dev *rtwdev,
}
out:
- kfree(buf);
-
return err ? ERR_PTR(err) : bin;
}
-static ssize_t rtw89_debug_priv_send_h2c_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static ssize_t rtw89_debug_priv_send_h2c_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
u8 *h2c;
int ret;
u16 h2c_len = count / 2;
- h2c = rtw89_hex2bin_user(rtwdev, user_buf, count);
+ h2c = rtw89_hex2bin(rtwdev, buf, count);
if (IS_ERR(h2c))
return -EFAULT;
@@ -3323,34 +3501,36 @@ static ssize_t rtw89_debug_priv_send_h2c_set(struct file *filp,
return ret ? ret : count;
}
-static int
-rtw89_debug_priv_early_h2c_get(struct seq_file *m, void *v)
+static ssize_t
+rtw89_debug_priv_early_h2c_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_early_h2c *early_h2c;
+ char *p = buf, *end = buf + bufsz;
int seq = 0;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list)
- seq_printf(m, "%d: %*ph\n", ++seq, early_h2c->h2c_len, early_h2c->h2c);
- mutex_unlock(&rtwdev->mutex);
+ p += scnprintf(p, end - p, "%d: %*ph\n", ++seq,
+ early_h2c->h2c_len, early_h2c->h2c);
- return 0;
+ return p - buf;
}
static ssize_t
-rtw89_debug_priv_early_h2c_set(struct file *filp, const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_early_h2c_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_early_h2c *early_h2c;
u8 *h2c;
u16 h2c_len = count / 2;
- h2c = rtw89_hex2bin_user(rtwdev, user_buf, count);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ h2c = rtw89_hex2bin(rtwdev, buf, count);
if (IS_ERR(h2c))
return -EFAULT;
@@ -3369,9 +3549,7 @@ rtw89_debug_priv_early_h2c_set(struct file *filp, const char __user *user_buf,
early_h2c->h2c = h2c;
early_h2c->h2c_len = h2c_len;
- mutex_lock(&rtwdev->mutex);
list_add_tail(&early_h2c->list, &rtwdev->early_h2c_list);
- mutex_unlock(&rtwdev->mutex);
out:
return count;
@@ -3404,15 +3582,16 @@ static int rtw89_dbg_trigger_ctrl_error(struct rtw89_dev *rtwdev)
return 0;
}
-static int
-rtw89_debug_priv_fw_crash_get(struct seq_file *m, void *v)
+static ssize_t
+rtw89_debug_priv_fw_crash_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char *p = buf, *end = buf + bufsz;
- seq_printf(m, "%d\n",
- test_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags));
- return 0;
+ p += scnprintf(p, end - p, "%d\n",
+ test_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags));
+ return p - buf;
}
enum rtw89_dbg_crash_simulation_type {
@@ -3421,17 +3600,17 @@ enum rtw89_dbg_crash_simulation_type {
};
static ssize_t
-rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_fw_crash_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
int (*sim)(struct rtw89_dev *rtwdev);
u8 crash_type;
int ret;
- ret = kstrtou8_from_user(user_buf, count, 0, &crash_type);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ ret = kstrtou8(buf, 0, &crash_type);
if (ret)
return -EINVAL;
@@ -3448,10 +3627,8 @@ rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf,
return -EINVAL;
}
- mutex_lock(&rtwdev->mutex);
set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
ret = sim(rtwdev);
- mutex_unlock(&rtwdev->mutex);
if (ret)
return ret;
@@ -3459,27 +3636,22 @@ rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf,
return count;
}
-static int rtw89_debug_priv_btc_info_get(struct seq_file *m, void *v)
+static ssize_t rtw89_debug_priv_btc_info_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
-
- rtw89_btc_dump_info(rtwdev, m);
-
- return 0;
+ return rtw89_btc_dump_info(rtwdev, buf, bufsz);
}
-static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static ssize_t rtw89_debug_priv_btc_manual_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
int ret;
- ret = kstrtobool_from_user(user_buf, count, &btc->manual_ctrl);
+ ret = kstrtobool(buf, &btc->manual_ctrl);
if (ret)
return ret;
@@ -3491,31 +3663,29 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
return count;
}
-static ssize_t rtw89_debug_priv_fw_log_manual_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static ssize_t rtw89_debug_priv_fw_log_manual_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_fw_log *log = &rtwdev->fw.log;
bool fw_log_manual;
- if (kstrtobool_from_user(user_buf, count, &fw_log_manual))
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ if (kstrtobool(buf, &fw_log_manual))
goto out;
- mutex_lock(&rtwdev->mutex);
log->enable = fw_log_manual;
if (log->enable)
rtw89_fw_log_prepare(rtwdev);
rtw89_fw_h2c_fw_log(rtwdev, fw_log_manual);
- mutex_unlock(&rtwdev->mutex);
out:
return count;
}
-static void rtw89_sta_link_info_get_iter(struct seq_file *m,
- struct rtw89_dev *rtwdev,
- struct rtw89_sta_link *rtwsta_link)
+static int rtw89_sta_link_info_get_iter(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz,
+ struct rtw89_sta_link *rtwsta_link)
{
static const char * const he_gi_str[] = {
[NL80211_RATE_INFO_HE_GI_0_8] = "0.8",
@@ -3533,6 +3703,7 @@ static void rtw89_sta_link_info_get_iter(struct seq_file *m,
u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
bool ant_asterisk = hal->tx_path_diversity || hal->ant_diversity;
struct ieee80211_link_sta *link_sta;
+ char *p = buf, *end = buf + bufsz;
u8 evm_min, evm_max, evm_1ss;
u16 max_rc_amsdu_len;
u8 rssi;
@@ -3546,107 +3717,136 @@ static void rtw89_sta_link_info_get_iter(struct seq_file *m,
rcu_read_unlock();
- seq_printf(m, "TX rate [%u, %u]: ", rtwsta_link->mac_id, rtwsta_link->link_id);
+ p += scnprintf(p, end - p, "TX rate [%u, %u]: ", rtwsta_link->mac_id,
+ rtwsta_link->link_id);
if (rate->flags & RATE_INFO_FLAGS_MCS)
- seq_printf(m, "HT MCS-%d%s", rate->mcs,
- rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : "");
+ p += scnprintf(p, end - p, "HT MCS-%d%s", rate->mcs,
+ rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : "");
else if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
- seq_printf(m, "VHT %dSS MCS-%d%s", rate->nss, rate->mcs,
- rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : "");
+ p += scnprintf(p, end - p, "VHT %dSS MCS-%d%s", rate->nss,
+ rate->mcs,
+ rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : "");
else if (rate->flags & RATE_INFO_FLAGS_HE_MCS)
- seq_printf(m, "HE %dSS MCS-%d GI:%s", rate->nss, rate->mcs,
- rate->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
- he_gi_str[rate->he_gi] : "N/A");
+ p += scnprintf(p, end - p, "HE %dSS MCS-%d GI:%s", rate->nss,
+ rate->mcs,
+ rate->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
+ he_gi_str[rate->he_gi] : "N/A");
else if (rate->flags & RATE_INFO_FLAGS_EHT_MCS)
- seq_printf(m, "EHT %dSS MCS-%d GI:%s", rate->nss, rate->mcs,
- rate->eht_gi < ARRAY_SIZE(eht_gi_str) ?
- eht_gi_str[rate->eht_gi] : "N/A");
+ p += scnprintf(p, end - p, "EHT %dSS MCS-%d GI:%s", rate->nss,
+ rate->mcs,
+ rate->eht_gi < ARRAY_SIZE(eht_gi_str) ?
+ eht_gi_str[rate->eht_gi] : "N/A");
else
- seq_printf(m, "Legacy %d", rate->legacy);
- seq_printf(m, "%s", rtwsta_link->ra_report.might_fallback_legacy ? " FB_G" : "");
- seq_printf(m, " BW:%u", rtw89_rate_info_bw_to_mhz(rate->bw));
- seq_printf(m, " (hw_rate=0x%x)", rtwsta_link->ra_report.hw_rate);
- seq_printf(m, " ==> agg_wait=%d (%d)\n", rtwsta_link->max_agg_wait,
- max_rc_amsdu_len);
-
- seq_printf(m, "RX rate [%u, %u]: ", rtwsta_link->mac_id, rtwsta_link->link_id);
+ p += scnprintf(p, end - p, "Legacy %d", rate->legacy);
+ p += scnprintf(p, end - p, "%s",
+ rtwsta_link->ra_report.might_fallback_legacy ? " FB_G" : "");
+ p += scnprintf(p, end - p, " BW:%u",
+ rtw89_rate_info_bw_to_mhz(rate->bw));
+ p += scnprintf(p, end - p, " (hw_rate=0x%x)",
+ rtwsta_link->ra_report.hw_rate);
+ p += scnprintf(p, end - p, " ==> agg_wait=%d (%d)\n",
+ rtwsta_link->max_agg_wait,
+ max_rc_amsdu_len);
+
+ p += scnprintf(p, end - p, "RX rate [%u, %u]: ", rtwsta_link->mac_id,
+ rtwsta_link->link_id);
switch (status->encoding) {
case RX_ENC_LEGACY:
- seq_printf(m, "Legacy %d", status->rate_idx +
- (status->band != NL80211_BAND_2GHZ ? 4 : 0));
+ p += scnprintf(p, end - p, "Legacy %d", status->rate_idx +
+ (status->band != NL80211_BAND_2GHZ ? 4 : 0));
break;
case RX_ENC_HT:
- seq_printf(m, "HT MCS-%d%s", status->rate_idx,
- status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : "");
+ p += scnprintf(p, end - p, "HT MCS-%d%s", status->rate_idx,
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : "");
break;
case RX_ENC_VHT:
- seq_printf(m, "VHT %dSS MCS-%d%s", status->nss, status->rate_idx,
- status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : "");
+ p += scnprintf(p, end - p, "VHT %dSS MCS-%d%s", status->nss,
+ status->rate_idx,
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : "");
break;
case RX_ENC_HE:
- seq_printf(m, "HE %dSS MCS-%d GI:%s", status->nss, status->rate_idx,
- status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
- he_gi_str[status->he_gi] : "N/A");
+ p += scnprintf(p, end - p, "HE %dSS MCS-%d GI:%s",
+ status->nss, status->rate_idx,
+ status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
+ he_gi_str[status->he_gi] : "N/A");
break;
case RX_ENC_EHT:
- seq_printf(m, "EHT %dSS MCS-%d GI:%s", status->nss, status->rate_idx,
- status->eht.gi < ARRAY_SIZE(eht_gi_str) ?
- eht_gi_str[status->eht.gi] : "N/A");
+ p += scnprintf(p, end - p, "EHT %dSS MCS-%d GI:%s",
+ status->nss, status->rate_idx,
+ status->eht.gi < ARRAY_SIZE(eht_gi_str) ?
+ eht_gi_str[status->eht.gi] : "N/A");
break;
}
- seq_printf(m, " BW:%u", rtw89_rate_info_bw_to_mhz(status->bw));
- seq_printf(m, " (hw_rate=0x%x)\n", rtwsta_link->rx_hw_rate);
+ p += scnprintf(p, end - p, " BW:%u",
+ rtw89_rate_info_bw_to_mhz(status->bw));
+ p += scnprintf(p, end - p, " (hw_rate=0x%x)\n",
+ rtwsta_link->rx_hw_rate);
rssi = ewma_rssi_read(&rtwsta_link->avg_rssi);
- seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d) [",
- RTW89_RSSI_RAW_TO_DBM(rssi), rssi, rtwsta_link->prev_rssi);
+ p += scnprintf(p, end - p, "RSSI: %d dBm (raw=%d, prev=%d) [",
+ RTW89_RSSI_RAW_TO_DBM(rssi), rssi,
+ rtwsta_link->prev_rssi);
for (i = 0; i < ant_num; i++) {
rssi = ewma_rssi_read(&rtwsta_link->rssi[i]);
- seq_printf(m, "%d%s%s", RTW89_RSSI_RAW_TO_DBM(rssi),
- ant_asterisk && (hal->antenna_tx & BIT(i)) ? "*" : "",
- i + 1 == ant_num ? "" : ", ");
+ p += scnprintf(p, end - p, "%d%s%s",
+ RTW89_RSSI_RAW_TO_DBM(rssi),
+ ant_asterisk && (hal->antenna_tx & BIT(i)) ? "*" : "",
+ i + 1 == ant_num ? "" : ", ");
}
- seq_puts(m, "]\n");
+ p += scnprintf(p, end - p, "]\n");
evm_1ss = ewma_evm_read(&rtwsta_link->evm_1ss);
- seq_printf(m, "EVM: [%2u.%02u, ", evm_1ss >> 2, (evm_1ss & 0x3) * 25);
+ p += scnprintf(p, end - p, "EVM: [%2u.%02u, ", evm_1ss >> 2,
+ (evm_1ss & 0x3) * 25);
for (i = 0; i < (hal->ant_diversity ? 2 : 1); i++) {
evm_min = ewma_evm_read(&rtwsta_link->evm_min[i]);
evm_max = ewma_evm_read(&rtwsta_link->evm_max[i]);
- seq_printf(m, "%s(%2u.%02u, %2u.%02u)", i == 0 ? "" : " ",
- evm_min >> 2, (evm_min & 0x3) * 25,
- evm_max >> 2, (evm_max & 0x3) * 25);
+ p += scnprintf(p, end - p, "%s(%2u.%02u, %2u.%02u)",
+ i == 0 ? "" : " ",
+ evm_min >> 2, (evm_min & 0x3) * 25,
+ evm_max >> 2, (evm_max & 0x3) * 25);
}
- seq_puts(m, "]\t");
+ p += scnprintf(p, end - p, "]\t");
snr = ewma_snr_read(&rtwsta_link->avg_snr);
- seq_printf(m, "SNR: %u\n", snr);
+ p += scnprintf(p, end - p, "SNR: %u\n", snr);
+
+ return p - buf;
}
static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
{
- struct seq_file *m = (struct seq_file *)data;
+ struct rtw89_debugfs_iter_data *iter_data =
+ (struct rtw89_debugfs_iter_data *)data;
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_dev *rtwdev = rtwsta->rtwdev;
struct rtw89_sta_link *rtwsta_link;
+ size_t bufsz = iter_data->bufsz;
+ char *buf = iter_data->buf;
+ char *p = buf, *end = buf + bufsz;
unsigned int link_id;
rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
- rtw89_sta_link_info_get_iter(m, rtwdev, rtwsta_link);
+ p += rtw89_sta_link_info_get_iter(rtwdev, p, end - p, rtwsta_link);
+
+ rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf);
}
-static void
-rtw89_debug_append_rx_rate(struct seq_file *m, struct rtw89_pkt_stat *pkt_stat,
+static int
+rtw89_debug_append_rx_rate(char *buf, size_t bufsz, struct rtw89_pkt_stat *pkt_stat,
enum rtw89_hw_rate first_rate, int len)
{
+ char *p = buf, *end = buf + bufsz;
int i;
for (i = 0; i < len; i++)
- seq_printf(m, "%s%u", i == 0 ? "" : ", ",
- pkt_stat->rx_rate_cnt[first_rate + i]);
+ p += scnprintf(p, end - p, "%s%u", i == 0 ? "" : ", ",
+ pkt_stat->rx_rate_cnt[first_rate + i]);
+
+ return p - buf;
}
#define FIRST_RATE_SAME(rate) {RTW89_HW_RATE_ ## rate, RTW89_HW_RATE_ ## rate}
@@ -3671,34 +3871,40 @@ static const struct rtw89_rx_rate_cnt_info {
{FIRST_RATE_GEV1(EHT_NSS2_MCS0), 14, 0, "EHT 2SS:"},
};
-static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
+static ssize_t rtw89_debug_priv_phy_info_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_traffic_stats *stats = &rtwdev->stats;
struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.last_pkt_stat;
const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_debugfs_iter_data iter_data;
const struct rtw89_rx_rate_cnt_info *info;
struct rtw89_hal *hal = &rtwdev->hal;
+ char *p = buf, *end = buf + bufsz;
enum rtw89_hw_rate first_rate;
u8 rssi;
int i;
rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi);
- seq_printf(m, "TP TX: %u [%u] Mbps (lv: %d",
- stats->tx_throughput, stats->tx_throughput_raw, stats->tx_tfc_lv);
+ p += scnprintf(p, end - p, "TP TX: %u [%u] Mbps (lv: %d",
+ stats->tx_throughput, stats->tx_throughput_raw,
+ stats->tx_tfc_lv);
if (hal->thermal_prot_lv)
- seq_printf(m, ", duty: %d%%",
- 100 - hal->thermal_prot_lv * RTW89_THERMAL_PROT_STEP);
- seq_printf(m, "), RX: %u [%u] Mbps (lv: %d)\n",
- stats->rx_throughput, stats->rx_throughput_raw, stats->rx_tfc_lv);
- seq_printf(m, "Beacon: %u (%d dBm), TF: %u\n", pkt_stat->beacon_nr,
- RTW89_RSSI_RAW_TO_DBM(rssi), stats->rx_tf_periodic);
- seq_printf(m, "Avg packet length: TX=%u, RX=%u\n", stats->tx_avg_len,
- stats->rx_avg_len);
-
- seq_puts(m, "RX count:\n");
+ p += scnprintf(p, end - p, ", duty: %d%%",
+ 100 - hal->thermal_prot_lv * RTW89_THERMAL_PROT_STEP);
+ p += scnprintf(p, end - p, "), RX: %u [%u] Mbps (lv: %d)\n",
+ stats->rx_throughput, stats->rx_throughput_raw,
+ stats->rx_tfc_lv);
+ p += scnprintf(p, end - p, "Beacon: %u (%d dBm), TF: %u\n",
+ pkt_stat->beacon_nr,
+ RTW89_RSSI_RAW_TO_DBM(rssi), stats->rx_tf_periodic);
+ p += scnprintf(p, end - p, "Avg packet length: TX=%u, RX=%u\n",
+ stats->tx_avg_len,
+ stats->rx_avg_len);
+
+ p += scnprintf(p, end - p, "RX count:\n");
for (i = 0; i < ARRAY_SIZE(rtw89_rx_rate_cnt_infos); i++) {
info = &rtw89_rx_rate_cnt_infos[i];
@@ -3706,189 +3912,238 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
if (first_rate >= RTW89_HW_RATE_NR)
continue;
- seq_printf(m, "%10s [", info->rate_mode);
- rtw89_debug_append_rx_rate(m, pkt_stat,
- first_rate, info->len);
+ p += scnprintf(p, end - p, "%10s [", info->rate_mode);
+ p += rtw89_debug_append_rx_rate(p, end - p, pkt_stat,
+ first_rate, info->len);
if (info->ext) {
- seq_puts(m, "][");
- rtw89_debug_append_rx_rate(m, pkt_stat,
- first_rate + info->len, info->ext);
+ p += scnprintf(p, end - p, "][");
+ p += rtw89_debug_append_rx_rate(p, end - p, pkt_stat,
+ first_rate + info->len, info->ext);
}
- seq_puts(m, "]\n");
+ p += scnprintf(p, end - p, "]\n");
}
- ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_info_get_iter, m);
+ rtw89_debugfs_iter_data_setup(&iter_data, p, end - p);
+ ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_info_get_iter, &iter_data);
+ p += iter_data.written_sz;
- return 0;
+ return p - buf;
}
-static void rtw89_dump_addr_cam(struct seq_file *m,
- struct rtw89_dev *rtwdev,
- struct rtw89_addr_cam_entry *addr_cam)
+static int rtw89_dump_addr_cam(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz,
+ struct rtw89_addr_cam_entry *addr_cam)
{
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
const struct rtw89_sec_cam_entry *sec_entry;
+ char *p = buf, *end = buf + bufsz;
u8 sec_cam_idx;
int i;
- seq_printf(m, "\taddr_cam_idx=%u\n", addr_cam->addr_cam_idx);
- seq_printf(m, "\t-> bssid_cam_idx=%u\n", addr_cam->bssid_cam_idx);
- seq_printf(m, "\tsec_cam_bitmap=%*ph\n", (int)sizeof(addr_cam->sec_cam_map),
- addr_cam->sec_cam_map);
+ p += scnprintf(p, end - p, "\taddr_cam_idx=%u\n",
+ addr_cam->addr_cam_idx);
+ p += scnprintf(p, end - p, "\t-> bssid_cam_idx=%u\n",
+ addr_cam->bssid_cam_idx);
+ p += scnprintf(p, end - p, "\tsec_cam_bitmap=%*ph\n",
+ (int)sizeof(addr_cam->sec_cam_map),
+ addr_cam->sec_cam_map);
for_each_set_bit(i, addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM) {
sec_cam_idx = addr_cam->sec_ent[i];
sec_entry = cam_info->sec_entries[sec_cam_idx];
if (!sec_entry)
continue;
- seq_printf(m, "\tsec[%d]: sec_cam_idx %u", i, sec_entry->sec_cam_idx);
+ p += scnprintf(p, end - p, "\tsec[%d]: sec_cam_idx %u", i,
+ sec_entry->sec_cam_idx);
if (sec_entry->ext_key)
- seq_printf(m, ", %u", sec_entry->sec_cam_idx + 1);
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, ", %u",
+ sec_entry->sec_cam_idx + 1);
+ p += scnprintf(p, end - p, "\n");
}
+
+ return p - buf;
}
-__printf(3, 4)
-static void rtw89_dump_pkt_offload(struct seq_file *m, struct list_head *pkt_list,
- const char *fmt, ...)
+__printf(4, 5)
+static int rtw89_dump_pkt_offload(char *buf, size_t bufsz, struct list_head *pkt_list,
+ const char *fmt, ...)
{
+ char *p = buf, *end = buf + bufsz;
struct rtw89_pktofld_info *info;
struct va_format vaf;
va_list args;
if (list_empty(pkt_list))
- return;
+ return 0;
va_start(args, fmt);
vaf.va = &args;
vaf.fmt = fmt;
- seq_printf(m, "%pV", &vaf);
+ p += scnprintf(p, end - p, "%pV", &vaf);
va_end(args);
list_for_each_entry(info, pkt_list, list)
- seq_printf(m, "%d ", info->id);
+ p += scnprintf(p, end - p, "%d ", info->id);
+
+ p += scnprintf(p, end - p, "\n");
- seq_puts(m, "\n");
+ return p - buf;
}
-static void rtw89_vif_link_ids_get(struct seq_file *m, u8 *mac,
- struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link)
+static int rtw89_vif_link_ids_get(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz, u8 *mac,
+ struct rtw89_vif_link *rtwvif_link)
{
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif_link->bssid_cam;
-
- seq_printf(m, " [%u] %pM\n", rtwvif_link->mac_id, rtwvif_link->mac_addr);
- seq_printf(m, "\tlink_id=%u\n", rtwvif_link->link_id);
- seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx);
- rtw89_dump_addr_cam(m, rtwdev, &rtwvif_link->addr_cam);
- rtw89_dump_pkt_offload(m, &rtwvif_link->general_pkt_list,
- "\tpkt_ofld[GENERAL]: ");
+ char *p = buf, *end = buf + bufsz;
+
+ p += scnprintf(p, end - p, " [%u] %pM\n", rtwvif_link->mac_id,
+ rtwvif_link->mac_addr);
+ p += scnprintf(p, end - p, "\tlink_id=%u\n", rtwvif_link->link_id);
+ p += scnprintf(p, end - p, "\tbssid_cam_idx=%u\n",
+ bssid_cam->bssid_cam_idx);
+ p += rtw89_dump_addr_cam(rtwdev, p, end - p, &rtwvif_link->addr_cam);
+ p += rtw89_dump_pkt_offload(p, end - p, &rtwvif_link->general_pkt_list,
+ "\tpkt_ofld[GENERAL]: ");
+
+ return p - buf;
}
static
void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
- struct seq_file *m = (struct seq_file *)data;
+ struct rtw89_debugfs_iter_data *iter_data =
+ (struct rtw89_debugfs_iter_data *)data;
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_dev *rtwdev = rtwvif->rtwdev;
struct rtw89_vif_link *rtwvif_link;
+ size_t bufsz = iter_data->bufsz;
+ char *buf = iter_data->buf;
+ char *p = buf, *end = buf + bufsz;
unsigned int link_id;
- seq_printf(m, "VIF %pM\n", rtwvif->mac_addr);
+ p += scnprintf(p, end - p, "VIF %pM\n", rtwvif->mac_addr);
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
- rtw89_vif_link_ids_get(m, mac, rtwdev, rtwvif_link);
+ p += rtw89_vif_link_ids_get(rtwdev, p, end - p, mac, rtwvif_link);
+
+ rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf);
}
-static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_dev *rtwdev,
- struct rtw89_sta_link *rtwsta_link)
+static int rtw89_dump_ba_cam(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz,
+ struct rtw89_sta_link *rtwsta_link)
{
struct rtw89_ba_cam_entry *entry;
+ char *p = buf, *end = buf + bufsz;
bool first = true;
list_for_each_entry(entry, &rtwsta_link->ba_cam_list, list) {
if (first) {
- seq_puts(m, "\tba_cam ");
+ p += scnprintf(p, end - p, "\tba_cam ");
first = false;
} else {
- seq_puts(m, ", ");
+ p += scnprintf(p, end - p, ", ");
}
- seq_printf(m, "tid[%u]=%d", entry->tid,
- (int)(entry - rtwdev->cam_info.ba_cam_entry));
+ p += scnprintf(p, end - p, "tid[%u]=%d", entry->tid,
+ (int)(entry - rtwdev->cam_info.ba_cam_entry));
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
+
+ return p - buf;
}
-static void rtw89_sta_link_ids_get(struct seq_file *m,
- struct rtw89_dev *rtwdev,
- struct rtw89_sta_link *rtwsta_link)
+static int rtw89_sta_link_ids_get(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz,
+ struct rtw89_sta_link *rtwsta_link)
{
struct ieee80211_link_sta *link_sta;
+ char *p = buf, *end = buf + bufsz;
rcu_read_lock();
link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
- seq_printf(m, " [%u] %pM\n", rtwsta_link->mac_id, link_sta->addr);
+ p += scnprintf(p, end - p, " [%u] %pM\n", rtwsta_link->mac_id,
+ link_sta->addr);
rcu_read_unlock();
- seq_printf(m, "\tlink_id=%u\n", rtwsta_link->link_id);
- rtw89_dump_addr_cam(m, rtwdev, &rtwsta_link->addr_cam);
- rtw89_dump_ba_cam(m, rtwdev, rtwsta_link);
+ p += scnprintf(p, end - p, "\tlink_id=%u\n", rtwsta_link->link_id);
+ p += rtw89_dump_addr_cam(rtwdev, p, end - p, &rtwsta_link->addr_cam);
+ p += rtw89_dump_ba_cam(rtwdev, p, end - p, rtwsta_link);
+
+ return p - buf;
}
static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
{
- struct seq_file *m = (struct seq_file *)data;
+ struct rtw89_debugfs_iter_data *iter_data =
+ (struct rtw89_debugfs_iter_data *)data;
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_dev *rtwdev = rtwsta->rtwdev;
struct rtw89_sta_link *rtwsta_link;
+ size_t bufsz = iter_data->bufsz;
+ char *buf = iter_data->buf;
+ char *p = buf, *end = buf + bufsz;
unsigned int link_id;
- seq_printf(m, "STA %pM %s\n", sta->addr, sta->tdls ? "(TDLS)" : "");
+ p += scnprintf(p, end - p, "STA %pM %s\n", sta->addr,
+ sta->tdls ? "(TDLS)" : "");
rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
- rtw89_sta_link_ids_get(m, rtwdev, rtwsta_link);
+ p += rtw89_sta_link_ids_get(rtwdev, p, end - p, rtwsta_link);
+
+ rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf);
}
-static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
+static ssize_t rtw89_debug_priv_stations_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ struct rtw89_debugfs_iter_data iter_data;
+ char *p = buf, *end = buf + bufsz;
u8 idx;
- mutex_lock(&rtwdev->mutex);
-
- seq_puts(m, "map:\n");
- seq_printf(m, "\tmac_id: %*ph\n", (int)sizeof(rtwdev->mac_id_map),
- rtwdev->mac_id_map);
- seq_printf(m, "\taddr_cam: %*ph\n", (int)sizeof(cam_info->addr_cam_map),
- cam_info->addr_cam_map);
- seq_printf(m, "\tbssid_cam: %*ph\n", (int)sizeof(cam_info->bssid_cam_map),
- cam_info->bssid_cam_map);
- seq_printf(m, "\tsec_cam: %*ph\n", (int)sizeof(cam_info->sec_cam_map),
- cam_info->sec_cam_map);
- seq_printf(m, "\tba_cam: %*ph\n", (int)sizeof(cam_info->ba_cam_map),
- cam_info->ba_cam_map);
- seq_printf(m, "\tpkt_ofld: %*ph\n", (int)sizeof(rtwdev->pkt_offload),
- rtwdev->pkt_offload);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ p += scnprintf(p, end - p, "map:\n");
+ p += scnprintf(p, end - p, "\tmac_id: %*ph\n",
+ (int)sizeof(rtwdev->mac_id_map),
+ rtwdev->mac_id_map);
+ p += scnprintf(p, end - p, "\taddr_cam: %*ph\n",
+ (int)sizeof(cam_info->addr_cam_map),
+ cam_info->addr_cam_map);
+ p += scnprintf(p, end - p, "\tbssid_cam: %*ph\n",
+ (int)sizeof(cam_info->bssid_cam_map),
+ cam_info->bssid_cam_map);
+ p += scnprintf(p, end - p, "\tsec_cam: %*ph\n",
+ (int)sizeof(cam_info->sec_cam_map),
+ cam_info->sec_cam_map);
+ p += scnprintf(p, end - p, "\tba_cam: %*ph\n",
+ (int)sizeof(cam_info->ba_cam_map),
+ cam_info->ba_cam_map);
+ p += scnprintf(p, end - p, "\tpkt_ofld: %*ph\n",
+ (int)sizeof(rtwdev->pkt_offload),
+ rtwdev->pkt_offload);
for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
if (!(rtwdev->chip->support_bands & BIT(idx)))
continue;
- rtw89_dump_pkt_offload(m, &rtwdev->scan_info.pkt_list[idx],
- "\t\t[SCAN %u]: ", idx);
+ p += rtw89_dump_pkt_offload(p, end - p, &rtwdev->scan_info.pkt_list[idx],
+ "\t\t[SCAN %u]: ", idx);
}
+ rtw89_debugfs_iter_data_setup(&iter_data, p, end - p);
ieee80211_iterate_active_interfaces_atomic(rtwdev->hw,
- IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, m);
+ IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, &iter_data);
+ p += iter_data.written_sz;
- ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_ids_get_iter, m);
+ rtw89_debugfs_iter_data_setup(&iter_data, p, end - p);
+ ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_ids_get_iter, &iter_data);
+ p += iter_data.written_sz;
- mutex_unlock(&rtwdev->mutex);
-
- return 0;
+ return p - buf;
}
#define DM_INFO(type) {RTW89_DM_ ## type, #type}
@@ -3899,43 +4154,45 @@ static const struct rtw89_disabled_dm_info {
} rtw89_disabled_dm_infos[] = {
DM_INFO(DYNAMIC_EDCCA),
DM_INFO(THERMAL_PROTECT),
+ DM_INFO(TAS),
};
-static int
-rtw89_debug_priv_disable_dm_get(struct seq_file *m, void *v)
+static ssize_t
+rtw89_debug_priv_disable_dm_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
const struct rtw89_disabled_dm_info *info;
struct rtw89_hal *hal = &rtwdev->hal;
+ char *p = buf, *end = buf + bufsz;
u32 disabled;
int i;
- seq_printf(m, "Disabled DM: 0x%x\n", hal->disabled_dm_bitmap);
+ p += scnprintf(p, end - p, "Disabled DM: 0x%x\n",
+ hal->disabled_dm_bitmap);
for (i = 0; i < ARRAY_SIZE(rtw89_disabled_dm_infos); i++) {
info = &rtw89_disabled_dm_infos[i];
disabled = BIT(info->type) & hal->disabled_dm_bitmap;
- seq_printf(m, "[%d] %s: %c\n", info->type, info->name,
- disabled ? 'X' : 'O');
+ p += scnprintf(p, end - p, "[%d] %s: %c\n", info->type,
+ info->name,
+ disabled ? 'X' : 'O');
}
- return 0;
+ return p - buf;
}
static ssize_t
-rtw89_debug_priv_disable_dm_set(struct file *filp, const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_disable_dm_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_hal *hal = &rtwdev->hal;
u32 conf;
int ret;
- ret = kstrtou32_from_user(user_buf, count, 0, &conf);
+ ret = kstrtou32(buf, 0, &conf);
if (ret)
return -EINVAL;
@@ -3944,46 +4201,62 @@ rtw89_debug_priv_disable_dm_set(struct file *filp, const char __user *user_buf,
return count;
}
-#define rtw89_debug_priv_get(name) \
+#define rtw89_debug_priv_get(name, opts...) \
{ \
.cb_read = rtw89_debug_priv_ ##name## _get, \
+ .opt = { opts }, \
}
-#define rtw89_debug_priv_set(name) \
+#define rtw89_debug_priv_set(name, opts...) \
{ \
.cb_write = rtw89_debug_priv_ ##name## _set, \
+ .opt = { opts }, \
}
-#define rtw89_debug_priv_select_and_get(name) \
+#define rtw89_debug_priv_select_and_get(name, opts...) \
{ \
.cb_write = rtw89_debug_priv_ ##name## _select, \
.cb_read = rtw89_debug_priv_ ##name## _get, \
+ .opt = { opts }, \
}
-#define rtw89_debug_priv_set_and_get(name) \
+#define rtw89_debug_priv_set_and_get(name, opts...) \
{ \
.cb_write = rtw89_debug_priv_ ##name## _set, \
.cb_read = rtw89_debug_priv_ ##name## _get, \
+ .opt = { opts }, \
}
+#define RSIZE_8K .rsize = 0x2000
+#define RSIZE_12K .rsize = 0x3000
+#define RSIZE_16K .rsize = 0x4000
+#define RSIZE_20K .rsize = 0x5000
+#define RSIZE_32K .rsize = 0x8000
+#define RSIZE_64K .rsize = 0x10000
+#define RSIZE_128K .rsize = 0x20000
+#define RSIZE_1M .rsize = 0x100000
+#define RLOCK .rlock = 1
+#define WLOCK .wlock = 1
+#define RWLOCK RLOCK, WLOCK
+
static const struct rtw89_debugfs rtw89_debugfs_templ = {
.read_reg = rtw89_debug_priv_select_and_get(read_reg),
.write_reg = rtw89_debug_priv_set(write_reg),
.read_rf = rtw89_debug_priv_select_and_get(read_rf),
.write_rf = rtw89_debug_priv_set(write_rf),
- .rf_reg_dump = rtw89_debug_priv_get(rf_reg_dump),
- .txpwr_table = rtw89_debug_priv_get(txpwr_table),
- .mac_reg_dump = rtw89_debug_priv_select_and_get(mac_reg_dump),
- .mac_mem_dump = rtw89_debug_priv_select_and_get(mac_mem_dump),
- .mac_dbg_port_dump = rtw89_debug_priv_select_and_get(mac_dbg_port_dump),
+ .rf_reg_dump = rtw89_debug_priv_get(rf_reg_dump, RSIZE_8K),
+ .txpwr_table = rtw89_debug_priv_get(txpwr_table, RSIZE_20K, RLOCK),
+ .mac_reg_dump = rtw89_debug_priv_select_and_get(mac_reg_dump, RSIZE_128K),
+ .mac_mem_dump = rtw89_debug_priv_select_and_get(mac_mem_dump, RSIZE_16K, RLOCK),
+ .mac_dbg_port_dump = rtw89_debug_priv_select_and_get(mac_dbg_port_dump, RSIZE_1M),
.send_h2c = rtw89_debug_priv_set(send_h2c),
- .early_h2c = rtw89_debug_priv_set_and_get(early_h2c),
- .fw_crash = rtw89_debug_priv_set_and_get(fw_crash),
- .btc_info = rtw89_debug_priv_get(btc_info),
+ .early_h2c = rtw89_debug_priv_set_and_get(early_h2c, RWLOCK),
+ .fw_crash = rtw89_debug_priv_set_and_get(fw_crash, WLOCK),
+ .btc_info = rtw89_debug_priv_get(btc_info, RSIZE_12K),
.btc_manual = rtw89_debug_priv_set(btc_manual),
- .fw_log_manual = rtw89_debug_priv_set(fw_log_manual),
+ .fw_log_manual = rtw89_debug_priv_set(fw_log_manual, WLOCK),
.phy_info = rtw89_debug_priv_get(phy_info),
- .stations = rtw89_debug_priv_get(stations),
+ .stations = rtw89_debug_priv_get(stations, RLOCK),
.disable_dm = rtw89_debug_priv_set_and_get(disable_dm),
};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 5d4ad23cc3bd..8643b17866f8 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -38,6 +38,16 @@ struct rtw89_arp_rsp {
static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
+const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = {
+ .ver = 0x00,
+ .list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ },
+};
+EXPORT_SYMBOL(rtw89_fw_blacklist_default);
+
union rtw89_fw_element_arg {
size_t offset;
enum rtw89_rf_path rf_path;
@@ -314,7 +324,7 @@ static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
if (!sec->secure_boot)
goto out;
- sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v);
+ sb_sel_ver = get_unaligned_le32(&section_content->sb_sel_ver.v);
if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn)
goto ignore;
@@ -344,6 +354,46 @@ ignore:
return 0;
}
+static int __check_secure_blacklist(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_section_info *section_info,
+ const void *content)
+{
+ const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist;
+ const union rtw89_fw_section_mssc_content *section_content = content;
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u8 byte_idx;
+ u8 bit_mask;
+
+ if (!sec->secure_boot)
+ return 0;
+
+ if (!info->secure_section_exist || section_info->ignore)
+ return 0;
+
+ if (!chip_blacklist) {
+ rtw89_warn(rtwdev, "chip no blacklist for secure firmware\n");
+ return -ENOENT;
+ }
+
+ byte_idx = section_content->blacklist.bit_in_chip_list >> 3;
+ bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7);
+
+ if (section_content->blacklist.ver > chip_blacklist->ver) {
+ rtw89_warn(rtwdev, "chip blacklist out of date (%u, %u)\n",
+ section_content->blacklist.ver, chip_blacklist->ver);
+ return -EINVAL;
+ }
+
+ if (chip_blacklist->list[byte_idx] & bit_mask) {
+ rtw89_warn(rtwdev, "firmware %u in chip blacklist\n",
+ section_content->blacklist.ver);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
static int __parse_security_section(struct rtw89_dev *rtwdev,
struct rtw89_fw_bin_info *info,
struct rtw89_fw_hdr_section_info *section_info,
@@ -364,8 +414,11 @@ static int __parse_security_section(struct rtw89_dev *rtwdev,
*mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN;
if (sec->secure_boot) {
- if (sec->mss_idx >= section_info->mssc)
+ if (sec->mss_idx >= section_info->mssc) {
+ rtw89_err(rtwdev, "unexpected MSS %d >= %d\n",
+ sec->mss_idx, section_info->mssc);
return -EFAULT;
+ }
section_info->key_addr = content + section_info->len +
sec->mss_idx * FWDL_SECURITY_SIGLEN;
section_info->key_len = FWDL_SECURITY_SIGLEN;
@@ -374,6 +427,9 @@ static int __parse_security_section(struct rtw89_dev *rtwdev,
info->secure_section_exist = true;
}
+ ret = __check_secure_blacklist(rtwdev, info, section_info, content);
+ WARN_ONCE(ret, "Current firmware in blacklist. Please update firmware.\n");
+
return 0;
}
@@ -490,18 +546,61 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev,
}
static
+const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev,
+ const struct firmware *firmware)
+{
+ const struct rtw89_mfw_hdr *mfw_hdr;
+
+ if (sizeof(*mfw_hdr) > firmware->size)
+ return NULL;
+
+ mfw_hdr = (const struct rtw89_mfw_hdr *)firmware->data;
+
+ if (mfw_hdr->sig != RTW89_MFW_SIG)
+ return NULL;
+
+ return mfw_hdr;
+}
+
+static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev,
+ const struct firmware *firmware,
+ const struct rtw89_mfw_hdr *mfw_hdr)
+{
+ const void *mfw = firmware->data;
+ u32 mfw_len = firmware->size;
+ u8 fw_nr = mfw_hdr->fw_nr;
+ const void *ptr;
+
+ if (fw_nr == 0) {
+ rtw89_err(rtwdev, "mfw header has no fw entry\n");
+ return -ENOENT;
+ }
+
+ ptr = &mfw_hdr->info[fw_nr];
+
+ if (ptr > mfw + mfw_len) {
+ rtw89_err(rtwdev, "mfw header out of address\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static
int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
struct rtw89_fw_suit *fw_suit, bool nowarn)
{
struct rtw89_fw_info *fw_info = &rtwdev->fw;
const struct firmware *firmware = fw_info->req.firmware;
+ const struct rtw89_mfw_info *mfw_info = NULL, *tmp;
+ const struct rtw89_mfw_hdr *mfw_hdr;
const u8 *mfw = firmware->data;
u32 mfw_len = firmware->size;
- const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
- const struct rtw89_mfw_info *mfw_info = NULL, *tmp;
+ int ret;
int i;
- if (mfw_hdr->sig != RTW89_MFW_SIG) {
+ mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware);
+ if (!mfw_hdr) {
rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
/* legacy firmware support normal type only */
if (type != RTW89_FW_NORMAL)
@@ -511,6 +610,10 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
return 0;
}
+ ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
+ if (ret)
+ return ret;
+
for (i = 0; i < mfw_hdr->fw_nr; i++) {
tmp = &mfw_hdr->info[i];
if (tmp->type != type)
@@ -540,6 +643,12 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
found:
fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
fw_suit->size = le32_to_cpu(mfw_info->size);
+
+ if (fw_suit->data + fw_suit->size > mfw + mfw_len) {
+ rtw89_err(rtwdev, "fw_suit %d out of address\n", type);
+ return -EFAULT;
+ }
+
return 0;
}
@@ -547,16 +656,21 @@ static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev)
{
struct rtw89_fw_info *fw_info = &rtwdev->fw;
const struct firmware *firmware = fw_info->req.firmware;
- const struct rtw89_mfw_hdr *mfw_hdr =
- (const struct rtw89_mfw_hdr *)firmware->data;
const struct rtw89_mfw_info *mfw_info;
+ const struct rtw89_mfw_hdr *mfw_hdr;
u32 size;
+ int ret;
- if (mfw_hdr->sig != RTW89_MFW_SIG) {
+ mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware);
+ if (!mfw_hdr) {
rtw89_warn(rtwdev, "not mfw format\n");
return 0;
}
+ ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
+ if (ret)
+ return ret;
+
mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1];
size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
@@ -735,6 +849,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -988,7 +1103,7 @@ int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev,
bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap);
if ((bitmap & needed_bitmap) != needed_bitmap) {
- rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n",
+ rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %08x\n",
needed_bitmap, bitmap);
return -ENOENT;
}
@@ -1056,6 +1171,101 @@ allocated:
return 0;
}
+static bool rtw89_regd_entcpy(struct rtw89_regd *regd, const void *cursor,
+ u8 cursor_size)
+{
+ /* fill default values if needed for backward compatibility */
+ struct rtw89_fw_regd_entry entry = {
+ .rule_2ghz = RTW89_NA,
+ .rule_5ghz = RTW89_NA,
+ .rule_6ghz = RTW89_NA,
+ .fmap = cpu_to_le32(0x0),
+ };
+ u8 valid_size = min_t(u8, sizeof(entry), cursor_size);
+ unsigned int i;
+ u32 fmap;
+
+ memcpy(&entry, cursor, valid_size);
+ memset(regd, 0, sizeof(*regd));
+
+ regd->alpha2[0] = entry.alpha2_0;
+ regd->alpha2[1] = entry.alpha2_1;
+ regd->alpha2[2] = '\0';
+
+ /* also need to consider forward compatibility */
+ regd->txpwr_regd[RTW89_BAND_2G] = entry.rule_2ghz < RTW89_REGD_NUM ?
+ entry.rule_2ghz : RTW89_NA;
+ regd->txpwr_regd[RTW89_BAND_5G] = entry.rule_5ghz < RTW89_REGD_NUM ?
+ entry.rule_5ghz : RTW89_NA;
+ regd->txpwr_regd[RTW89_BAND_6G] = entry.rule_6ghz < RTW89_REGD_NUM ?
+ entry.rule_6ghz : RTW89_NA;
+
+ BUILD_BUG_ON(sizeof(fmap) != sizeof(entry.fmap));
+ BUILD_BUG_ON(sizeof(fmap) * 8 < NUM_OF_RTW89_REGD_FUNC);
+
+ fmap = le32_to_cpu(entry.fmap);
+ for (i = 0; i < NUM_OF_RTW89_REGD_FUNC; i++) {
+ if (fmap & BIT(i))
+ set_bit(i, regd->func_bitmap);
+ }
+
+ return true;
+}
+
+#define rtw89_for_each_in_regd_element(regd, element) \
+ for (const void *cursor = (element)->content, \
+ *end = (element)->content + \
+ le32_to_cpu((element)->num_ents) * (element)->ent_sz; \
+ cursor < end; cursor += (element)->ent_sz) \
+ if (rtw89_regd_entcpy(regd, cursor, (element)->ent_sz))
+
+static
+int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_element_hdr *elm,
+ const union rtw89_fw_element_arg arg)
+{
+ const struct __rtw89_fw_regd_element *regd_elm = &elm->u.regd;
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ u32 num_ents = le32_to_cpu(regd_elm->num_ents);
+ struct rtw89_regd_data *p;
+ struct rtw89_regd regd;
+ u32 i = 0;
+
+ if (num_ents > RTW89_REGD_MAX_COUNTRY_NUM) {
+ rtw89_warn(rtwdev,
+ "regd element ents (%d) are over max num (%d)\n",
+ num_ents, RTW89_REGD_MAX_COUNTRY_NUM);
+ rtw89_warn(rtwdev,
+ "regd element ignore and take another/common\n");
+ return 1;
+ }
+
+ if (elm_info->regd) {
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "regd element take the latter\n");
+ devm_kfree(rtwdev->dev, elm_info->regd);
+ elm_info->regd = NULL;
+ }
+
+ p = devm_kzalloc(rtwdev->dev, struct_size(p, map, num_ents), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p->nr = num_ents;
+ rtw89_for_each_in_regd_element(&regd, regd_elm)
+ p->map[i++] = regd;
+
+ if (i != num_ents) {
+ rtw89_err(rtwdev, "regd element has %d invalid ents\n",
+ num_ents - i);
+ devm_kfree(rtwdev->dev, p);
+ return -EINVAL;
+ }
+
+ elm_info->regd = p;
+ return 0;
+}
+
static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
[RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
{ .fw_type = RTW89_FW_BBMCU0 }, NULL},
@@ -1114,6 +1324,9 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
[RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = {
rtw89_build_rfk_log_fmt_from_elm, {}, NULL,
},
+ [RTW89_FW_ELEMENT_ID_REGD] = {
+ rtw89_recognize_regd_from_elm, {}, "REGD",
+ },
};
int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
@@ -1322,7 +1535,6 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
- ret = -1;
goto fail;
}
@@ -1409,7 +1621,6 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
ret = rtw89_h2c_tx(rtwdev, skb, true);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
- ret = -1;
goto fail;
}
@@ -2697,7 +2908,9 @@ int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
{
const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
+ static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12};
const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw89_h2c_lps_ml_cmn_info *h2c;
struct rtw89_vif_link *rtwvif_link;
const struct rtw89_chan *chan;
@@ -2705,6 +2918,7 @@ int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
u32 len = sizeof(*h2c);
unsigned int link_id;
struct sk_buff *skb;
+ u8 beacon_bw_ofst;
u8 gain_band;
u32 done;
u8 path;
@@ -2722,9 +2936,10 @@ int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
skb_put(skb, len);
h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data;
- h2c->fmt_id = 0x1;
+ h2c->fmt_id = 0x3;
h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+ h2c->rfe_type = efuse->rfe_type;
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A;
@@ -2745,9 +2960,21 @@ int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
h2c->tia_gain[rtwvif_link->phy_idx][i] =
cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]);
}
+
+ if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) {
+ beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx];
+ h2c->dup_bcn_ofst[rtwvif_link->phy_idx] = beacon_bw_ofst;
+ }
+
memcpy(h2c->lna_gain[rtwvif_link->phy_idx],
gain->lna_gain[gain_band][bw_idx][path],
LNA_GAIN_NUM);
+ memcpy(h2c->tia_lna_op1db[rtwvif_link->phy_idx],
+ gain->tia_lna_op1db[gain_band][bw_idx][path],
+ LNA_GAIN_NUM + 1);
+ memcpy(h2c->lna_op1db[rtwvif_link->phy_idx],
+ gain->lna_op1db[gain_band][bw_idx][path],
+ LNA_GAIN_NUM);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -3281,9 +3508,10 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
- h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
+ h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) |
+ le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
CCTLINFO_G7_W6_ULDL);
- h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL);
+ h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL);
if (rtwsta_link) {
h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he,
@@ -3419,6 +3647,61 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl);
+
+int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link)
+{
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ if (rtwsta_link->cctl_tx_time) {
+ h2c->w3 |= le32_encode_bits(1, CCTLINFO_G7_W3_AMPDU_TIME_SEL);
+ h2c->m3 |= cpu_to_le32(CCTLINFO_G7_W3_AMPDU_TIME_SEL);
+
+ h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time,
+ CCTLINFO_G7_W2_AMPDU_MAX_TIME);
+ h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_AMPDU_MAX_TIME);
+ }
+ if (rtwsta_link->cctl_tx_retry_limit) {
+ h2c->w2 |= le32_encode_bits(1, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL) |
+ le32_encode_bits(rtwsta_link->data_tx_cnt_lmt,
+ CCTLINFO_G7_W2_DATA_TX_CNT_LMT);
+ h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL |
+ CCTLINFO_G7_W2_DATA_TX_CNT_LMT);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_g7);
int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link)
@@ -3622,14 +3905,15 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
-#define H2C_ROLE_MAINTAIN_LEN 4
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
enum rtw89_upd_mode upd_mode)
{
- struct sk_buff *skb;
u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
+ struct rtw89_h2c_role_maintain *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
u8 self_role;
int ret;
@@ -3642,21 +3926,27 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
self_role = rtwvif_link->self_role;
}
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
}
- skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
- SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
- SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
- SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
- SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif_link->wifi_role);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_role_maintain *)skb->data;
+
+ h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_ROLE_MAINTAIN_W0_MACID) |
+ le32_encode_bits(self_role, RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE) |
+ le32_encode_bits(upd_mode, RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE) |
+ le32_encode_bits(rtwvif_link->wifi_role,
+ RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE) |
+ le32_encode_bits(rtwvif_link->mac_idx,
+ RTW89_H2C_ROLE_MAINTAIN_W0_BAND) |
+ le32_encode_bits(rtwvif_link->port, RTW89_H2C_ROLE_MAINTAIN_W0_PORT);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
- H2C_ROLE_MAINTAIN_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -5311,6 +5601,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
u8 opch_size = sizeof(*opch) * option->num_opch;
u8 probe_id[NUM_NL80211_BANDS];
+ u8 scan_offload_ver = U8_MAX;
u8 cfg_len = sizeof(*h2c);
unsigned int cond;
u8 ver = U8_MAX;
@@ -5321,6 +5612,11 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
rtw89_scan_get_6g_disabled_chan(rtwdev, option);
+ if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) {
+ cfg_len = offsetofend(typeof(*h2c), w8);
+ scan_offload_ver = 0;
+ }
+
len = cfg_len + macc_role_size + opch_size;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -5392,10 +5688,8 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ);
}
- if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) {
- cfg_len = offsetofend(typeof(*h2c), w8);
+ if (scan_offload_ver == 0)
goto flex_member;
- }
h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0),
RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) |
@@ -5994,24 +6288,29 @@ void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
{
struct rtw89_early_h2c *early_h2c;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
}
}
-void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
+void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
{
struct rtw89_early_h2c *early_h2c, *tmp;
- mutex_lock(&rtwdev->mutex);
list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
list_del(&early_h2c->list);
kfree(early_h2c->h2c);
kfree(early_h2c);
}
- mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
+{
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ __rtw89_fw_free_all_early_h2c(rtwdev);
}
static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
@@ -6055,7 +6354,7 @@ void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
enqueue:
skb_queue_tail(&rtwdev->c2h_queue, c2h);
- ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
+ wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->c2h_work);
}
static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
@@ -6093,17 +6392,17 @@ static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
}
-void rtw89_fw_c2h_work(struct work_struct *work)
+void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
c2h_work);
struct sk_buff *skb, *tmp;
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
skb_unlink(skb, &rtwdev->c2h_queue);
- mutex_lock(&rtwdev->mutex);
rtw89_fw_c2h_cmd_handle(rtwdev, skb);
- mutex_unlock(&rtwdev->mutex);
dev_kfree_skb_any(skb);
}
}
@@ -6184,7 +6483,7 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
u32 ret;
if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (!h2c_info && !c2h_info)
return -EINVAL;
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 2026bc2fd2ac..55255b48bdb7 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -664,6 +664,11 @@ struct rtw89_fw_mss_pool_hdr {
union rtw89_fw_section_mssc_content {
struct {
+ u8 pad[0x20];
+ u8 bit_in_chip_list;
+ u8 ver;
+ } __packed blacklist;
+ struct {
u8 pad[58];
__le32 v;
} __packed sb_sel_ver;
@@ -673,6 +678,13 @@ union rtw89_fw_section_mssc_content {
} __packed key_sign_len;
} __packed;
+struct rtw89_fw_blacklist {
+ u8 ver;
+ u8 list[32];
+};
+
+extern const struct rtw89_fw_blacklist rtw89_fw_blacklist_default;
+
static inline void SET_CTRL_INFO_MACID(void *table, u32 val)
{
le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
@@ -1579,25 +1591,17 @@ struct rtw89_h2c_bcn_upd_be {
#define RTW89_H2C_BCN_UPD_BE_W7_ECSA_OFST GENMASK(30, 16)
#define RTW89_H2C_BCN_UPD_BE_W7_PROTECTION_KEY_ID BIT(31)
-static inline void SET_FWROLE_MAINTAIN_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
-}
-
-static inline void SET_FWROLE_MAINTAIN_SELF_ROLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(9, 8));
-}
-
-static inline void SET_FWROLE_MAINTAIN_UPD_MODE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(12, 10));
-}
+struct rtw89_h2c_role_maintain {
+ __le32 w0;
+};
-static inline void SET_FWROLE_MAINTAIN_WIFI_ROLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(16, 13));
-}
+#define RTW89_H2C_ROLE_MAINTAIN_W0_MACID GENMASK(7, 0)
+#define RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE GENMASK(9, 8)
+#define RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE GENMASK(12, 10)
+#define RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE GENMASK(16, 13)
+#define RTW89_H2C_ROLE_MAINTAIN_W0_BAND GENMASK(18, 17)
+#define RTW89_H2C_ROLE_MAINTAIN_W0_PORT GENMASK(21, 19)
+#define RTW89_H2C_ROLE_MAINTAIN_W0_MACID_EXT GENMASK(31, 24)
enum rtw89_fw_sta_type { /* value of RTW89_H2C_JOININFO_W1_STA_TYPE */
RTW89_FW_N_AC_STA = 0,
@@ -1801,17 +1805,21 @@ struct rtw89_h2c_lps_ch_info {
struct rtw89_h2c_lps_ml_cmn_info {
u8 fmt_id;
- u8 rsvd0[3];
+ u8 rfe_type;
+ u8 rsvd0[2];
__le32 mlo_dbcc_mode;
- u8 central_ch[RTW89_PHY_MAX];
- u8 pri_ch[RTW89_PHY_MAX];
- u8 bw[RTW89_PHY_MAX];
- u8 band[RTW89_PHY_MAX];
- u8 bcn_rate_type[RTW89_PHY_MAX];
+ u8 central_ch[RTW89_PHY_NUM];
+ u8 pri_ch[RTW89_PHY_NUM];
+ u8 bw[RTW89_PHY_NUM];
+ u8 band[RTW89_PHY_NUM];
+ u8 bcn_rate_type[RTW89_PHY_NUM];
u8 rsvd1[2];
- __le16 tia_gain[RTW89_PHY_MAX][TIA_GAIN_NUM];
- u8 lna_gain[RTW89_PHY_MAX][LNA_GAIN_NUM];
+ __le16 tia_gain[RTW89_PHY_NUM][TIA_GAIN_NUM];
+ u8 lna_gain[RTW89_PHY_NUM][LNA_GAIN_NUM];
u8 rsvd2[2];
+ u8 tia_lna_op1db[RTW89_PHY_NUM][LNA_GAIN_NUM + 1];
+ u8 lna_op1db[RTW89_PHY_NUM][LNA_GAIN_NUM];
+ u8 dup_bcn_ofst[RTW89_PHY_NUM];
} __packed;
static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
@@ -3882,6 +3890,7 @@ enum rtw89_fw_element_id {
RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU = 17,
RTW89_FW_ELEMENT_ID_TXPWR_TRK = 18,
RTW89_FW_ELEMENT_ID_RFKLOG_FMT = 19,
+ RTW89_FW_ELEMENT_ID_REGD = 20,
RTW89_FW_ELEMENT_ID_NUM,
};
@@ -3925,6 +3934,15 @@ struct __rtw89_fw_txpwr_element {
u8 content[];
} __packed;
+struct __rtw89_fw_regd_element {
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+ u8 ent_sz;
+ __le32 num_ents;
+ u8 content[];
+} __packed;
+
enum rtw89_fw_txpwr_trk_type {
__RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START = 0,
RTW89_FW_TXPWR_TRK_TYPE_6GB_N = 0,
@@ -4016,6 +4034,7 @@ struct rtw89_fw_element_hdr {
__le16 offset[];
} __packed rfk_log_fmt;
struct __rtw89_fw_txpwr_element txpwr;
+ struct __rtw89_fw_regd_element regd;
} __packed u;
} __packed;
@@ -4524,6 +4543,12 @@ struct rtw89_c2h_rfk_report {
u8 version;
} __packed;
+struct rtw89_c2h_rf_tas_info {
+ struct rtw89_c2h_hdr hdr;
+ __le32 cur_idx;
+ __le16 txpwr_history[20];
+} __packed;
+
#define RTW89_FW_RSVD_PLE_SIZE 0x800
#define RTW89_FW_BACKTRACE_INFO_SIZE 8
@@ -4573,6 +4598,8 @@ int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
+int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
@@ -4588,7 +4615,7 @@ int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
-void rtw89_fw_c2h_work(struct work_struct *work);
+void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work);
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
@@ -4654,6 +4681,7 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
bool rack, bool dack);
int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len);
void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev);
+void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
u8 macid);
@@ -4854,6 +4882,15 @@ static inline int rtw89_chip_h2c_ampdu_cmac_tbl(struct rtw89_dev *rtwdev,
}
static inline
+int rtw89_chip_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
+}
+
+static inline
int rtw89_chip_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params)
{
@@ -4874,6 +4911,18 @@ int rtw89_chip_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
return 0;
}
+/* Must consider compatibility; don't insert new in the mid.
+ * Fill each field's default value in rtw89_regd_entcpy().
+ */
+struct rtw89_fw_regd_entry {
+ u8 alpha2_0;
+ u8 alpha2_1;
+ u8 rule_2ghz;
+ u8 rule_5ghz;
+ u8 rule_6ghz;
+ __le32 fmap;
+} __packed;
+
/* must consider compatibility; don't insert new in the mid */
struct rtw89_fw_txpwr_byrate_entry {
u8 band;
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index a37c6d525d6f..b4841f948ec1 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1495,6 +1495,21 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
#undef PWR_ACT
}
+int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_mac_power_switch(rtwdev, true);
+ if (ret) {
+ rtw89_mac_power_switch(rtwdev, false);
+ ret = rtw89_mac_power_switch(rtwdev, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev)
{
rtw89_mac_power_switch(rtwdev, false);
@@ -3996,14 +4011,6 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb)
{
int ret;
- ret = rtw89_mac_power_switch(rtwdev, true);
- if (ret) {
- rtw89_mac_power_switch(rtwdev, false);
- ret = rtw89_mac_power_switch(rtwdev, true);
- if (ret)
- return ret;
- }
-
rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
if (include_bb) {
@@ -4036,6 +4043,10 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
bool include_bb = !!chip->bbmcu_nr;
int ret;
+ ret = rtw89_mac_pwr_on(rtwdev);
+ if (ret)
+ return ret;
+
ret = rtw89_mac_partial_init(rtwdev, include_bb);
if (ret)
goto fail;
@@ -4067,7 +4078,7 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
return ret;
fail:
- rtw89_mac_power_switch(rtwdev, false);
+ rtw89_mac_pwr_off(rtwdev);
return ret;
}
@@ -4826,6 +4837,32 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
rtw89_write32_set(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
}
+void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ struct ieee80211_bss_conf *bss_conf;
+ bool set;
+ u32 reg;
+
+ if (rtwdev->chip->chip_gen != RTW89_CHIP_BE)
+ return;
+
+ rcu_read_lock();
+
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ set = bss_conf->he_support && !bss_conf->eht_support;
+
+ rcu_read_unlock();
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CLIENT_OM_CTRL,
+ rtwvif_link->mac_idx);
+
+ if (set)
+ rtw89_write32_set(rtwdev, reg, B_BE_TRIG_DIS_EHTTB);
+ else
+ rtw89_write32_clr(rtwdev, reg, B_BE_TRIG_DIS_EHTTB);
+}
+
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
{
rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif_link);
@@ -6031,7 +6068,7 @@ int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl)
if (wl)
return 0;
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
g[i].gnt_bt_sw_en = 1;
g[i].gnt_bt = 1;
g[i].gnt_wl_sw_en = 1;
@@ -6422,6 +6459,7 @@ __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_
u32 tx_time)
{
#define MAC_AX_DFLT_TX_TIME 5280
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx;
u32 max_tx_time = tx_time == 0 ? MAC_AX_DFLT_TX_TIME : tx_time;
u32 reg;
@@ -6429,7 +6467,7 @@ __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_
if (rtwsta_link->cctl_tx_time) {
rtwsta_link->ampdu_max_time = (max_tx_time - 512) >> 9;
- ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
+ ret = rtw89_chip_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
} else {
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
if (ret) {
@@ -6437,8 +6475,8 @@ __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_
return ret;
}
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AMPDU_AGG_LIMIT, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK,
+ reg = rtw89_mac_reg_by_idx(rtwdev, mac->agg_limit.addr, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, mac->agg_limit.mask,
max_tx_time >> 5);
}
@@ -6464,6 +6502,7 @@ int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwst
int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link,
u32 *tx_time)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx;
u32 reg;
int ret = 0;
@@ -6477,8 +6516,8 @@ int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwst
return ret;
}
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AMPDU_AGG_LIMIT, mac_idx);
- *tx_time = rtw89_read32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK) << 5;
+ reg = rtw89_mac_reg_by_idx(rtwdev, mac->agg_limit.addr, mac_idx);
+ *tx_time = rtw89_read32_mask(rtwdev, reg, mac->agg_limit.mask) << 5;
}
return ret;
@@ -6494,9 +6533,9 @@ int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev,
if (!resume) {
rtwsta_link->cctl_tx_retry_limit = true;
- ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
+ ret = rtw89_chip_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
} else {
- ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
+ ret = rtw89_chip_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
rtwsta_link->cctl_tx_retry_limit = false;
}
@@ -6506,6 +6545,7 @@ int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev,
int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link, u8 *tx_retry)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx;
u32 reg;
int ret = 0;
@@ -6519,8 +6559,8 @@ int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
return ret;
}
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXCNT, mac_idx);
- *tx_retry = rtw89_read32_mask(rtwdev, reg, B_AX_L_TXCNT_LMT_MASK);
+ reg = rtw89_mac_reg_by_idx(rtwdev, mac->txcnt_limit.addr, mac_idx);
+ *tx_retry = rtw89_read32_mask(rtwdev, reg, mac->txcnt_limit.mask);
}
return ret;
@@ -6798,6 +6838,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.mask = B_AX_RXTRIG_RU26_DIS,
},
.wow_ctrl = {.addr = R_AX_WOW_CTRL, .mask = B_AX_WOW_WOWEN,},
+ .agg_limit = {.addr = R_AX_AMPDU_AGG_LIMIT, .mask = B_AX_AMPDU_MAX_TIME_MASK,},
+ .txcnt_limit = {.addr = R_AX_TXCNT, .mask = B_AX_L_TXCNT_LMT_MASK,},
.check_mac_en = rtw89_mac_check_mac_en_ax,
.sys_init = sys_init_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 8edea96d037f..fd7935d24501 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -964,6 +964,8 @@ struct rtw89_mac_gen_def {
struct rtw89_reg_def bfee_ctrl;
struct rtw89_reg_def narrow_bw_ru_dis;
struct rtw89_reg_def wow_ctrl;
+ struct rtw89_reg_def agg_limit;
+ struct rtw89_reg_def txcnt_limit;
int (*check_mac_en)(struct rtw89_dev *rtwdev, u8 band,
enum rtw89_mac_hwmod_sel sel);
@@ -1145,6 +1147,7 @@ rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_l
rtw89_write32_set(rtwdev, reg, bit);
}
+int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev);
void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev);
int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb);
int rtw89_mac_init(struct rtw89_dev *rtwdev);
@@ -1185,6 +1188,8 @@ void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, bool en);
void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
+void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link);
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en);
int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif);
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index b3669e0074df..4fded07d0bee 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -57,32 +57,30 @@ static void rtw89_ops_wake_tx_queue(struct ieee80211_hw *hw,
static int rtw89_ops_start(struct ieee80211_hw *hw)
{
struct rtw89_dev *rtwdev = hw->priv;
- int ret;
- mutex_lock(&rtwdev->mutex);
- ret = rtw89_core_start(rtwdev);
- mutex_unlock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
- return ret;
+ return rtw89_core_start(rtwdev);
}
static void rtw89_ops_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_core_stop(rtwdev);
- mutex_unlock(&rtwdev->mutex);
}
static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
{
struct rtw89_dev *rtwdev = hw->priv;
+ lockdep_assert_wiphy(hw->wiphy);
+
/* let previous ips work finish to ensure we don't leave ips twice */
- cancel_work_sync(&rtwdev->ips_work);
+ wiphy_work_cancel(hw->wiphy, &rtwdev->ips_work);
- mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
@@ -100,8 +98,6 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
!rtwdev->scanning)
rtw89_enter_ips(rtwdev);
- mutex_unlock(&rtwdev->mutex);
-
return 0;
}
@@ -115,7 +111,7 @@ static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev,
rtw89_vif_type_mapping(rtwvif_link, false);
- INIT_WORK(&rtwvif_link->update_beacon_work, rtw89_core_update_beacon_work);
+ wiphy_work_init(&rtwvif_link->update_beacon_work, rtw89_core_update_beacon_work);
INIT_LIST_HEAD(&rtwvif_link->general_pkt_list);
rtwvif_link->hit_rule = 0;
@@ -142,9 +138,9 @@ static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev,
static void __rtw89_ops_remove_iface_link(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
- mutex_unlock(&rtwdev->mutex);
- cancel_work_sync(&rtwvif_link->update_beacon_work);
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ wiphy_work_cancel(rtwdev->hw->wiphy, &rtwvif_link->update_beacon_work);
rtw89_leave_ps_mode(rtwdev);
@@ -162,11 +158,11 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
u8 mac_id, port;
int ret = 0;
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_debug(rtwdev, RTW89_DBG_STATE, "add vif %pM type %d, p2p %d\n",
vif->addr, vif->type, vif->p2p);
- mutex_lock(&rtwdev->mutex);
-
rtw89_leave_ips_by_hwflags(rtwdev);
if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
@@ -174,10 +170,8 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
mac_id = rtw89_acquire_mac_id(rtwdev);
- if (mac_id == RTW89_MAX_MAC_ID_NUM) {
- ret = -ENOSPC;
- goto err;
- }
+ if (mac_id == RTW89_MAX_MAC_ID_NUM)
+ return -ENOSPC;
port = rtw89_core_acquire_bit_map(rtwdev->hw_port, RTW89_PORT_NUM);
if (port == RTW89_PORT_NUM) {
@@ -198,7 +192,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->offchan = false;
rtwvif->roc.state = RTW89_ROC_IDLE;
- INIT_DELAYED_WORK(&rtwvif->roc.roc_work, rtw89_roc_work);
+ wiphy_delayed_work_init(&rtwvif->roc.roc_work, rtw89_roc_work);
rtw89_traffic_stats_init(rtwdev, &rtwvif->stats);
@@ -213,8 +207,6 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
goto unset_link;
rtw89_recalc_lps(rtwdev);
-
- mutex_unlock(&rtwdev->mutex);
return 0;
unset_link:
@@ -224,8 +216,6 @@ release_port:
rtw89_core_release_bit_map(rtwdev->hw_port, port);
release_macid:
rtw89_release_mac_id(rtwdev, mac_id);
-err:
- mutex_unlock(&rtwdev->mutex);
return ret;
}
@@ -239,12 +229,12 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
u8 port = rtw89_vif_get_main_port(rtwvif);
struct rtw89_vif_link *rtwvif_link;
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_debug(rtwdev, RTW89_DBG_STATE, "remove vif %pM type %d p2p %d\n",
vif->addr, vif->type, vif->p2p);
- cancel_delayed_work_sync(&rtwvif->roc.roc_work);
-
- mutex_lock(&rtwdev->mutex);
+ wiphy_delayed_work_cancel(hw->wiphy, &rtwvif->roc.roc_work);
rtwvif_link = rtwvif->links[RTW89_VIF_IDLE_LINK_ID];
if (unlikely(!rtwvif_link)) {
@@ -265,8 +255,6 @@ bottom:
rtw89_recalc_lps(rtwdev);
rtw89_enter_ips_by_hwflags(rtwdev);
-
- mutex_unlock(&rtwdev->mutex);
}
static int rtw89_ops_change_interface(struct ieee80211_hw *hw,
@@ -304,7 +292,8 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u32 rx_fltr;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_leave_ps_mode(rtwdev);
*new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL |
@@ -367,14 +356,11 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
B_AX_RX_FLTR_CFG_MASK,
rx_fltr);
if (!rtwdev->dbcc_en)
- goto out;
+ return;
rtw89_write32_mask(rtwdev,
rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_1),
B_AX_RX_FLTR_CFG_MASK,
rx_fltr);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static const u8 ac_to_fw_idx[IEEE80211_NUM_ACS] = {
@@ -670,6 +656,7 @@ static void __rtw89_ops_bss_link_assoc(struct rtw89_dev *rtwdev,
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, rtwvif_link);
rtw89_mac_port_update(rtwdev, rtwvif_link);
rtw89_mac_set_he_obss_narrow_bw_ru(rtwdev, rtwvif_link);
+ rtw89_mac_set_he_tb(rtwdev, rtwvif_link);
}
static void __rtw89_ops_bss_assoc(struct rtw89_dev *rtwdev,
@@ -689,7 +676,8 @@ static void rtw89_ops_vif_cfg_changed(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_leave_ps_mode(rtwdev);
if (changed & BSS_CHANGED_ASSOC) {
@@ -712,8 +700,6 @@ static void rtw89_ops_vif_cfg_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ARP_FILTER)
rtwvif->ip_addr = vif->cfg.arp_addr_list[0];
-
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw,
@@ -725,7 +711,8 @@ static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_leave_ps_mode(rtwdev);
rtwvif_link = rtwvif->links[conf->link_id];
@@ -733,7 +720,7 @@ static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw,
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, conf->link_id);
- goto out;
+ return;
}
if (changed & BSS_CHANGED_BSSID) {
@@ -763,9 +750,6 @@ static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_TPE)
rtw89_reg_6ghz_recalc(rtwdev, rtwvif_link, true);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
@@ -778,22 +762,19 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
const struct rtw89_chan *chan;
int ret = 0;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtwvif_link = rtwvif->links[link_conf->link_id];
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_conf->link_id);
- ret = -ENOLINK;
- goto out;
+ return -ENOLINK;
}
chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
- if (chan->band_type == RTW89_BAND_6G) {
- mutex_unlock(&rtwdev->mutex);
+ if (chan->band_type == RTW89_BAND_6G)
return -EOPNOTSUPP;
- }
if (rtwdev->scanning)
rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
@@ -810,15 +791,12 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw)) {
ret = rtw89_fw_h2c_ap_info_refcount(rtwdev, true);
if (ret)
- goto out;
+ return ret;
}
rtw89_queue_chanctx_work(rtwdev);
-out:
- mutex_unlock(&rtwdev->mutex);
-
- return ret;
+ return 0;
}
static
@@ -829,14 +807,14 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtwvif_link = rtwvif->links[link_conf->link_id];
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_conf->link_id);
- goto out;
+ return;
}
if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw))
@@ -845,22 +823,18 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rtw89_mac_stop_ap(rtwdev, rtwvif_link);
rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, NULL);
rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, NULL, true);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static int rtw89_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
bool set)
{
- struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_vif *rtwvif = rtwsta->rtwvif;
struct rtw89_vif_link *rtwvif_link;
unsigned int link_id;
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
- ieee80211_queue_work(rtwdev->hw, &rtwvif_link->update_beacon_work);
+ wiphy_work_queue(hw->wiphy, &rtwvif_link->update_beacon_work);
return 0;
}
@@ -873,9 +847,9 @@ static int rtw89_ops_conf_tx(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- int ret = 0;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_leave_ps_mode(rtwdev);
rtwvif_link = rtwvif->links[link_id];
@@ -883,17 +857,13 @@ static int rtw89_ops_conf_tx(struct ieee80211_hw *hw,
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_id);
- ret = -ENOLINK;
- goto out;
+ return -ENOLINK;
}
rtwvif_link->tx_params[ac] = *params;
__rtw89_conf_tx(rtwdev, rtwvif_link, ac);
-out:
- mutex_unlock(&rtwdev->mutex);
-
- return ret;
+ return 0;
}
static int __rtw89_ops_sta_state(struct ieee80211_hw *hw,
@@ -937,14 +907,11 @@ static int rtw89_ops_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state new_state)
{
struct rtw89_dev *rtwdev = hw->priv;
- int ret;
- mutex_lock(&rtwdev->mutex);
- rtw89_leave_ps_mode(rtwdev);
- ret = __rtw89_ops_sta_state(hw, vif, sta, old_state, new_state);
- mutex_unlock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
- return ret;
+ rtw89_leave_ps_mode(rtwdev);
+ return __rtw89_ops_sta_state(hw, vif, sta, old_state, new_state);
}
static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -953,9 +920,10 @@ static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct rtw89_dev *rtwdev = hw->priv;
- int ret = 0;
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
- mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
switch (cmd) {
@@ -964,7 +932,7 @@ static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = rtw89_cam_sec_key_add(rtwdev, vif, sta, key);
if (ret && ret != -EOPNOTSUPP) {
rtw89_err(rtwdev, "failed to add key to sec cam\n");
- goto out;
+ return ret;
}
break;
case DISABLE_KEY:
@@ -974,14 +942,11 @@ static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = rtw89_cam_sec_key_del(rtwdev, vif, sta, key, true);
if (ret) {
rtw89_err(rtwdev, "failed to remove key from sec cam\n");
- goto out;
+ return ret;
}
break;
}
-out:
- mutex_unlock(&rtwdev->mutex);
-
return ret;
}
@@ -997,38 +962,32 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_txq *txq = sta->txq[tid];
struct rtw89_txq *rtwtxq = (struct rtw89_txq *)txq->drv_priv;
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
switch (params->action) {
case IEEE80211_AMPDU_TX_START:
return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- mutex_lock(&rtwdev->mutex);
clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
clear_bit(tid, rtwsta->ampdu_map);
rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, rtwvif, rtwsta);
- mutex_unlock(&rtwdev->mutex);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
- mutex_lock(&rtwdev->mutex);
set_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
rtwsta->ampdu_params[tid].agg_num = params->buf_size;
rtwsta->ampdu_params[tid].amsdu = params->amsdu;
set_bit(tid, rtwsta->ampdu_map);
rtw89_leave_ps_mode(rtwdev);
rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, rtwvif, rtwsta);
- mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_START:
- mutex_lock(&rtwdev->mutex);
rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, true, params);
- mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_STOP:
- mutex_lock(&rtwdev->mutex);
rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, false, params);
- mutex_unlock(&rtwdev->mutex);
break;
default:
WARN_ON(1);
@@ -1042,11 +1001,11 @@ static int rtw89_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_leave_ps_mode(rtwdev);
if (test_bit(RTW89_FLAG_POWERON, rtwdev->flags))
rtw89_mac_update_rts_threshold(rtwdev);
- mutex_unlock(&rtwdev->mutex);
return 0;
}
@@ -1086,7 +1045,8 @@ static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_leave_lps(rtwdev);
rtw89_hci_flush_queues(rtwdev, queues, drop);
@@ -1094,8 +1054,6 @@ static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
__rtw89_drop_packets(rtwdev, vif);
else
rtw89_mac_flush_txq(rtwdev, queues, drop);
-
- mutex_unlock(&rtwdev->mutex);
}
struct rtw89_iter_bitrate_mask_data {
@@ -1142,10 +1100,10 @@ static int rtw89_ops_set_bitrate_mask(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_phy_rate_pattern_vif(rtwdev, vif, mask);
rtw89_ra_mask_info_update(rtwdev, vif, mask);
- mutex_unlock(&rtwdev->mutex);
return 0;
}
@@ -1156,6 +1114,8 @@ int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_hal *hal = &rtwdev->hal;
+ lockdep_assert_wiphy(hw->wiphy);
+
if (hal->ant_diversity) {
if (tx_ant != rx_ant || hweight32(tx_ant) != 1)
return -EINVAL;
@@ -1163,12 +1123,10 @@ int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
return -EINVAL;
}
- mutex_lock(&rtwdev->mutex);
hal->antenna_tx = tx_ant;
hal->antenna_rx = rx_ant;
hal->tx_path_diversity = false;
hal->ant_diversity_fixed = true;
- mutex_unlock(&rtwdev->mutex);
return 0;
}
@@ -1193,18 +1151,15 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev, "sw scan start: find no link on HW-0\n");
- goto out;
+ return;
}
rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, false);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw,
@@ -1214,18 +1169,15 @@ static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev, "sw scan complete: find no link on HW-0\n");
- goto out;
+ return;
}
rtw89_core_scan_complete(rtwdev, rtwvif_link, false);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_ops_reconfig_complete(struct ieee80211_hw *hw,
@@ -1245,21 +1197,18 @@ static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct rtw89_vif_link *rtwvif_link;
int ret;
+ lockdep_assert_wiphy(hw->wiphy);
+
if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
return 1;
- mutex_lock(&rtwdev->mutex);
-
- if (rtwdev->scanning || rtwvif->offchan) {
- ret = -EBUSY;
- goto out;
- }
+ if (rtwdev->scanning || rtwvif->offchan)
+ return -EBUSY;
rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev, "hw scan: find no link on HW-0\n");
- ret = -ENOLINK;
- goto out;
+ return -ENOLINK;
}
rtw89_hw_scan_start(rtwdev, rtwvif_link, req);
@@ -1269,9 +1218,6 @@ static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rtw89_err(rtwdev, "HW scan failed with status: %d\n", ret);
}
-out:
- mutex_unlock(&rtwdev->mutex);
-
return ret;
}
@@ -1282,24 +1228,21 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
+ lockdep_assert_wiphy(hw->wiphy);
+
if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
return;
- mutex_lock(&rtwdev->mutex);
-
if (!rtwdev->scanning)
- goto out;
+ return;
rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev, "cancel hw scan: find no link on HW-0\n");
- goto out;
+ return;
}
rtw89_hw_scan_abort(rtwdev, rtwvif_link);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw,
@@ -1322,13 +1265,10 @@ static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct rtw89_dev *rtwdev = hw->priv;
- int ret;
- mutex_lock(&rtwdev->mutex);
- ret = rtw89_chanctx_ops_add(rtwdev, ctx);
- mutex_unlock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
- return ret;
+ return rtw89_chanctx_ops_add(rtwdev, ctx);
}
static void rtw89_ops_remove_chanctx(struct ieee80211_hw *hw,
@@ -1336,9 +1276,9 @@ static void rtw89_ops_remove_chanctx(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_chanctx_ops_remove(rtwdev, ctx);
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_ops_change_chanctx(struct ieee80211_hw *hw,
@@ -1347,9 +1287,9 @@ static void rtw89_ops_change_chanctx(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_chanctx_ops_change(rtwdev, ctx, changed);
- mutex_unlock(&rtwdev->mutex);
}
static int rtw89_ops_assign_vif_chanctx(struct ieee80211_hw *hw,
@@ -1360,25 +1300,18 @@ static int rtw89_ops_assign_vif_chanctx(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- int ret;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtwvif_link = rtwvif->links[link_conf->link_id];
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_conf->link_id);
- ret = -ENOLINK;
- goto out;
+ return -ENOLINK;
}
- ret = rtw89_chanctx_ops_assign_vif(rtwdev, rtwvif_link, ctx);
-
-out:
- mutex_unlock(&rtwdev->mutex);
-
- return ret;
+ return rtw89_chanctx_ops_assign_vif(rtwdev, rtwvif_link, ctx);
}
static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw,
@@ -1390,11 +1323,10 @@ static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtwvif_link = rtwvif->links[link_conf->link_id];
if (unlikely(!rtwvif_link)) {
- mutex_unlock(&rtwdev->mutex);
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_conf->link_id);
@@ -1402,7 +1334,6 @@ static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
rtw89_chanctx_ops_unassign_vif(rtwdev, rtwvif_link, ctx);
- mutex_unlock(&rtwdev->mutex);
}
static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
@@ -1415,13 +1346,12 @@ static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct rtw89_roc *roc = &rtwvif->roc;
+ lockdep_assert_wiphy(hw->wiphy);
+
if (!rtwvif)
return -EINVAL;
- mutex_lock(&rtwdev->mutex);
-
if (roc->state != RTW89_ROC_IDLE) {
- mutex_unlock(&rtwdev->mutex);
return -EBUSY;
}
@@ -1439,8 +1369,6 @@ static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
rtw89_roc_start(rtwdev, rtwvif);
- mutex_unlock(&rtwdev->mutex);
-
return 0;
}
@@ -1450,14 +1378,14 @@ static int rtw89_ops_cancel_remain_on_channel(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
+ lockdep_assert_wiphy(hw->wiphy);
+
if (!rtwvif)
return -EINVAL;
- cancel_delayed_work_sync(&rtwvif->roc.roc_work);
+ wiphy_delayed_work_cancel(hw->wiphy, &rtwvif->roc.roc_work);
- mutex_lock(&rtwdev->mutex);
rtw89_roc_end(rtwdev, rtwvif);
- mutex_unlock(&rtwdev->mutex);
return 0;
}
@@ -1478,14 +1406,14 @@ static int rtw89_ops_set_tid_config(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
if (sta)
rtw89_core_set_tid_config(rtwdev, sta, tid_config);
else
ieee80211_iterate_stations_atomic(rtwdev->hw,
rtw89_set_tid_config_iter,
tid_config);
- mutex_unlock(&rtwdev->mutex);
return 0;
}
@@ -1509,7 +1437,7 @@ static bool rtw89_ops_can_activate_links(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
- guard(mutex)(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
return rtw89_can_work_on_links(rtwdev, vif, active_links);
}
@@ -1571,7 +1499,7 @@ int rtw89_ops_change_vif_links(struct ieee80211_hw *hw,
int ret = 0;
int i;
- guard(mutex)(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtw89_debug(rtwdev, RTW89_DBG_STATE,
"%s: old_links (0x%08x) -> new_links (0x%08x)\n",
@@ -1720,7 +1648,7 @@ int rtw89_ops_change_sta_links(struct ieee80211_hw *hw,
unsigned long set_links = new_links & ~old_links;
int ret = 0;
- guard(mutex)(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtw89_debug(rtwdev, RTW89_DBG_STATE,
"%s: old_links (0x%08x) -> new_links (0x%08x)\n",
@@ -1750,13 +1678,12 @@ static int rtw89_ops_suspend(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
int ret;
+ lockdep_assert_wiphy(hw->wiphy);
+
set_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags);
- cancel_delayed_work_sync(&rtwdev->track_work);
+ wiphy_delayed_work_cancel(hw->wiphy, &rtwdev->track_work);
- mutex_lock(&rtwdev->mutex);
ret = rtw89_wow_suspend(rtwdev, wowlan);
- mutex_unlock(&rtwdev->mutex);
-
if (ret) {
rtw89_warn(rtwdev, "failed to suspend for wow %d\n", ret);
clear_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags);
@@ -1771,15 +1698,15 @@ static int rtw89_ops_resume(struct ieee80211_hw *hw)
struct rtw89_dev *rtwdev = hw->priv;
int ret;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
ret = rtw89_wow_resume(rtwdev);
if (ret)
rtw89_warn(rtwdev, "failed to resume for wow %d\n", ret);
- mutex_unlock(&rtwdev->mutex);
clear_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags);
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
- RTW89_TRACK_WORK_PERIOD);
+ wiphy_delayed_work_queue(hw->wiphy, &rtwdev->track_work,
+ RTW89_TRACK_WORK_PERIOD);
return ret ? 1 : 0;
}
@@ -1799,18 +1726,16 @@ static void rtw89_set_rekey_data(struct ieee80211_hw *hw,
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
+ lockdep_assert_wiphy(hw->wiphy);
+
if (data->kek_len > sizeof(gtk_info->kek) ||
data->kck_len > sizeof(gtk_info->kck)) {
rtw89_warn(rtwdev, "kek or kck length over fw limit\n");
return;
}
- mutex_lock(&rtwdev->mutex);
-
memcpy(gtk_info->kek, data->kek, data->kek_len);
memcpy(gtk_info->kck, data->kck, data->kck_len);
-
- mutex_unlock(&rtwdev->mutex);
}
#endif
@@ -1818,16 +1743,13 @@ static void rtw89_ops_rfkill_poll(struct ieee80211_hw *hw)
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
/* wl_disable GPIO get floating when entering LPS */
if (test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
- goto out;
+ return;
rtw89_core_rfkill_poll(rtwdev, false);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
const struct ieee80211_ops rtw89_ops = {
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index 2dbdeae904ad..99b82dc85ea3 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -708,8 +708,8 @@ static int sec_eng_init_be(struct rtw89_dev *rtwdev)
val32 |= B_BE_CLK_EN_CGCMP | B_BE_CLK_EN_WAPI | B_BE_CLK_EN_WEP_TKIP |
B_BE_SEC_TX_ENC | B_BE_SEC_RX_DEC |
B_BE_MC_DEC | B_BE_BC_DEC |
- B_BE_BMC_MGNT_DEC | B_BE_UC_MGNT_DEC;
- val32 &= ~B_BE_SEC_PRE_ENQUE_TX;
+ B_BE_BMC_MGNT_DEC | B_BE_UC_MGNT_DEC |
+ B_BE_SEC_PRE_ENQUE_TX;
rtw89_write32(rtwdev, R_BE_SEC_ENG_CTRL, val32);
rtw89_write32_set(rtwdev, R_BE_SEC_MPDU_PROC, B_BE_APPEND_ICV | B_BE_APPEND_MIC);
@@ -1865,7 +1865,7 @@ int rtw89_mac_cfg_ctrl_path_v2(struct rtw89_dev *rtwdev, bool wl)
if (wl)
return 0;
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
g[i].gnt_bt_sw_en = 1;
g[i].gnt_bt = 1;
g[i].gnt_wl_sw_en = 1;
@@ -2585,6 +2585,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.mask = B_BE_RXTRIG_RU26_DIS,
},
.wow_ctrl = {.addr = R_BE_WOW_CTRL, .mask = B_BE_WOW_WOWEN,},
+ .agg_limit = {.addr = R_BE_AMPDU_AGG_LIMIT, .mask = B_BE_AMPDU_MAX_TIME_MASK,},
+ .txcnt_limit = {.addr = R_BE_TXCNT, .mask = B_BE_L_TXCNT_LMT_MASK,},
.check_mac_en = rtw89_mac_check_mac_en_be,
.sys_init = sys_init_be,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 4d11c3dd60a5..79fef5f90140 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -455,34 +455,36 @@
#define B_BE_RX0DMA_INT_EN BIT(0)
#define R_BE_HAXI_HISR00 0xB0B4
-#define B_BE_RDU_CH6_INT BIT(28)
-#define B_BE_RDU_CH5_INT BIT(27)
-#define B_BE_RDU_CH4_INT BIT(26)
-#define B_BE_RDU_CH2_INT BIT(25)
-#define B_BE_RDU_CH1_INT BIT(24)
-#define B_BE_RDU_CH0_INT BIT(23)
-#define B_BE_RXDMA_STUCK_INT BIT(22)
-#define B_BE_TXDMA_STUCK_INT BIT(21)
-#define B_BE_TXDMA_CH14_INT BIT(20)
-#define B_BE_TXDMA_CH13_INT BIT(19)
-#define B_BE_TXDMA_CH12_INT BIT(18)
-#define B_BE_TXDMA_CH11_INT BIT(17)
-#define B_BE_TXDMA_CH10_INT BIT(16)
-#define B_BE_TXDMA_CH9_INT BIT(15)
-#define B_BE_TXDMA_CH8_INT BIT(14)
-#define B_BE_TXDMA_CH7_INT BIT(13)
-#define B_BE_TXDMA_CH6_INT BIT(12)
-#define B_BE_TXDMA_CH5_INT BIT(11)
-#define B_BE_TXDMA_CH4_INT BIT(10)
-#define B_BE_TXDMA_CH3_INT BIT(9)
-#define B_BE_TXDMA_CH2_INT BIT(8)
-#define B_BE_TXDMA_CH1_INT BIT(7)
-#define B_BE_TXDMA_CH0_INT BIT(6)
-#define B_BE_RPQ1DMA_INT BIT(5)
-#define B_BE_RX1P1DMA_INT BIT(4)
+#define B_BE_RDU_CH5_INT_V1 BIT(30)
+#define B_BE_RDU_CH4_INT_V1 BIT(29)
+#define B_BE_RDU_CH3_INT_V1 BIT(28)
+#define B_BE_RDU_CH2_INT_V1 BIT(27)
+#define B_BE_RDU_CH1_INT_V1 BIT(26)
+#define B_BE_RDU_CH0_INT_V1 BIT(25)
+#define B_BE_RXDMA_STUCK_INT_V1 BIT(24)
+#define B_BE_TXDMA_STUCK_INT_V1 BIT(23)
+#define B_BE_TXDMA_CH14_INT_V1 BIT(22)
+#define B_BE_TXDMA_CH13_INT_V1 BIT(21)
+#define B_BE_TXDMA_CH12_INT_V1 BIT(20)
+#define B_BE_TXDMA_CH11_INT_V1 BIT(19)
+#define B_BE_TXDMA_CH10_INT_V1 BIT(18)
+#define B_BE_TXDMA_CH9_INT_V1 BIT(17)
+#define B_BE_TXDMA_CH8_INT_V1 BIT(16)
+#define B_BE_TXDMA_CH7_INT_V1 BIT(15)
+#define B_BE_TXDMA_CH6_INT_V1 BIT(14)
+#define B_BE_TXDMA_CH5_INT_V1 BIT(13)
+#define B_BE_TXDMA_CH4_INT_V1 BIT(12)
+#define B_BE_TXDMA_CH3_INT_V1 BIT(11)
+#define B_BE_TXDMA_CH2_INT_V1 BIT(10)
+#define B_BE_TXDMA_CH1_INT_V1 BIT(9)
+#define B_BE_TXDMA_CH0_INT_V1 BIT(8)
+#define B_BE_RX1P1DMA_INT_V1 BIT(7)
+#define B_BE_RX0P1DMA_INT_V1 BIT(6)
+#define B_BE_RO1DMA_INT BIT(5)
+#define B_BE_RP1DMA_INT BIT(4)
#define B_BE_RX1DMA_INT BIT(3)
-#define B_BE_RPQ0DMA_INT BIT(2)
-#define B_BE_RX0P1DMA_INT BIT(1)
+#define B_BE_RO0DMA_INT BIT(2)
+#define B_BE_RP0DMA_INT BIT(1)
#define B_BE_RX0DMA_INT BIT(0)
/* TX/RX */
diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
index cd39eebe8186..12e6a0cbb889 100644
--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
+++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
@@ -666,7 +666,7 @@ SIMPLE_DEV_PM_OPS(rtw89_pm_ops_be, rtw89_pci_suspend_be, rtw89_pci_resume_be);
EXPORT_SYMBOL(rtw89_pm_ops_be);
const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
- .isr_rdu = B_BE_RDU_CH1_INT | B_BE_RDU_CH0_INT,
+ .isr_rdu = B_BE_RDU_CH1_INT_V1 | B_BE_RDU_CH0_INT_V1,
.isr_halt_c2h = B_BE_HALT_C2H_INT,
.isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT,
.isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1},
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index c7c05f7fda1d..f4eee642e5ce 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -2044,12 +2044,15 @@ static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 cente
if (!chip->support_ant_gain)
return 0;
- if (!(ant_gain->regd_enabled & BIT(regd)))
+ if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd)))
return 0;
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq);
offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq);
+ if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
+ return min(offset_patha, offset_pathb);
+
return max(offset_patha, offset_pathb);
}
@@ -2057,10 +2060,17 @@ s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan)
{
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
s8 offset_patha, offset_pathb;
- if (!(ant_gain->regd_enabled & BIT(regd)))
+ if (!chip->support_ant_gain)
+ return 0;
+
+ if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd)))
+ return 0;
+
+ if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
return 0;
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
@@ -2070,24 +2080,29 @@ s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset);
-void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan)
+int rtw89_print_ant_gain(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ const struct rtw89_chan *chan)
{
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
const struct rtw89_chip_info *chip = rtwdev->chip;
u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
+ char *p = buf, *end = buf + bufsz;
s8 offset_patha, offset_pathb;
- if (!chip->support_ant_gain || !(ant_gain->regd_enabled & BIT(regd))) {
- seq_puts(m, "no DAG is applied\n");
- return;
+ if (!(chip->support_ant_gain && (ant_gain->regd_enabled & BIT(regd))) ||
+ ant_gain->block_country) {
+ p += scnprintf(p, end - p, "no DAG is applied\n");
+ goto out;
}
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
- seq_printf(m, "ChainA offset: %d dBm\n", offset_patha);
- seq_printf(m, "ChainB offset: %d dBm\n", offset_pathb);
+ p += scnprintf(p, end - p, "ChainA offset: %d dBm\n", offset_patha);
+ p += scnprintf(p, end - p, "ChainB offset: %d dBm\n", offset_pathb);
+
+out:
+ return p - buf;
}
static const u8 rtw89_rs_idx_num_ax[] = {
@@ -3455,6 +3470,30 @@ rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u3
static void
rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
+ const struct rtw89_c2h_rf_tas_info *rf_tas =
+ (const struct rtw89_c2h_rf_tas_info *)c2h->data;
+ const enum rtw89_sar_sources src = rtwdev->sar.src;
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ u64 linear = 0;
+ u32 i, cur_idx;
+ s16 txpwr;
+
+ if (!tas->enable || src == RTW89_SAR_SOURCE_NONE)
+ return;
+
+ cur_idx = le32_to_cpu(rf_tas->cur_idx);
+ for (i = 0; i < cur_idx; i++) {
+ txpwr = (s16)le16_to_cpu(rf_tas->txpwr_history[i]);
+ linear += rtw89_db_quarter_to_linear(txpwr);
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "tas: index: %u, txpwr: %d\n", i, txpwr);
+ }
+
+ if (cur_idx == 0)
+ tas->instant_txpwr = rtw89_db_to_linear(0);
+ else
+ tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx);
}
static
@@ -4600,7 +4639,7 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
cfo->dcfo_avg = 0;
rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n",
rtwdev->total_sta_assoc);
- if (rtwdev->total_sta_assoc == 0) {
+ if (rtwdev->total_sta_assoc == 0 || rtw89_is_mlo_1_1(rtwdev)) {
rtw89_phy_cfo_reset(rtwdev);
return;
}
@@ -4651,29 +4690,28 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
rtw89_phy_cfo_statistics_reset(rtwdev);
}
-void rtw89_phy_cfo_track_work(struct work_struct *work)
+void rtw89_phy_cfo_track_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
cfo_track_work.work);
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
if (!cfo->cfo_trig_by_timer_en)
- goto out;
+ return;
rtw89_leave_ps_mode(rtwdev);
rtw89_phy_cfo_dm(rtwdev);
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
- msecs_to_jiffies(cfo->cfo_timer_ms));
-out:
- mutex_unlock(&rtwdev->mutex);
+ wiphy_delayed_work_queue(wiphy, &rtwdev->cfo_track_work,
+ msecs_to_jiffies(cfo->cfo_timer_ms));
}
static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev)
{
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
- msecs_to_jiffies(cfo->cfo_timer_ms));
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->cfo_track_work,
+ msecs_to_jiffies(cfo->cfo_timer_ms));
}
void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
@@ -5115,7 +5153,6 @@ static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev)
struct rtw89_phy_iter_rssi_data {
struct rtw89_dev *rtwdev;
- struct rtw89_phy_ch_info *ch_info;
bool rssi_changed;
};
@@ -5123,10 +5160,15 @@ static
void __rtw89_phy_stat_rssi_update_iter(struct rtw89_sta_link *rtwsta_link,
struct rtw89_phy_iter_rssi_data *rssi_data)
{
- struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info;
+ struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
+ struct rtw89_dev *rtwdev = rssi_data->rtwdev;
+ struct rtw89_phy_ch_info *ch_info;
+ struct rtw89_bb_ctx *bb;
unsigned long rssi_curr;
rssi_curr = ewma_rssi_read(&rtwsta_link->avg_rssi);
+ bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx);
+ ch_info = &bb->ch_info;
if (rssi_curr < ch_info->rssi_min) {
ch_info->rssi_min = rssi_curr;
@@ -5157,11 +5199,13 @@ static void rtw89_phy_stat_rssi_update_iter(void *data,
static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev)
{
- struct rtw89_phy_iter_rssi_data rssi_data = {0};
+ struct rtw89_phy_iter_rssi_data rssi_data = {};
+ struct rtw89_bb_ctx *bb;
rssi_data.rtwdev = rtwdev;
- rssi_data.ch_info = &rtwdev->ch_info;
- rssi_data.ch_info->rssi_min = U8_MAX;
+ rtw89_for_each_active_bb(rtwdev, bb)
+ bb->ch_info.rssi_min = U8_MAX;
+
ieee80211_iterate_stations_atomic(rtwdev->hw,
rtw89_phy_stat_rssi_update_iter,
&rssi_data);
@@ -5199,24 +5243,27 @@ void rtw89_phy_stat_track(struct rtw89_dev *rtwdev)
memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
}
-static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, u32 time_us)
+static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u32 time_us)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
}
-static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx)
+static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u16 idx)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
}
-static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev)
+static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
env->ccx_manual_ctrl = false;
@@ -5225,17 +5272,20 @@ static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev)
env->ccx_period = 0;
env->ccx_unit_idx = RTW89_CCX_32_US;
- rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->en_mask, 1);
- rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1);
- rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1);
- rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask,
- RTW89_CCX_EDCCA_BW20_0);
+ rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->en_mask, 1, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask,
+ RTW89_CCX_EDCCA_BW20_0, bb->phy_idx);
}
-static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, u16 report,
- u16 score)
+static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
+ u16 report, u16 score)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
u32 numer = 0;
u16 ret = 0;
@@ -5275,9 +5325,10 @@ static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev,
*period, *unit_idx);
}
-static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev)
+static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
"lv:(%d)->(0)\n", env->ccx_rac_lv);
@@ -5288,9 +5339,10 @@ static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev)
}
static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
struct rtw89_ccx_para_info *para)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
bool is_update = env->ifs_clm_app != para->ifs_clm_app;
u8 i = 0;
u16 *ifs_th_l = env->ifs_clm_th_l;
@@ -5325,12 +5377,12 @@ static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev,
*/
ifs_th_l[IFS_CLM_TH_START_IDX] = 0;
ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us;
- ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev,
+ ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev, bb,
ifs_th0_us);
for (i = 1; i < RTW89_IFS_CLM_NUM; i++) {
ifs_th_l[i] = ifs_th_h[i - 1] + 1;
ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times;
- ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, ifs_th_h_us[i]);
+ ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, bb, ifs_th_h_us[i]);
}
ifs_update_finished:
@@ -5341,30 +5393,31 @@ ifs_update_finished:
return is_update;
}
-static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev)
+static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
u8 i = 0;
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask,
- env->ifs_clm_th_l[0]);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask,
- env->ifs_clm_th_l[1]);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask,
- env->ifs_clm_th_l[2]);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask,
- env->ifs_clm_th_l[3]);
-
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask,
- env->ifs_clm_th_h[0]);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask,
- env->ifs_clm_th_h[1]);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask,
- env->ifs_clm_th_h[2]);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask,
- env->ifs_clm_th_h[3]);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask,
+ env->ifs_clm_th_l[0], bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask,
+ env->ifs_clm_th_l[1], bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask,
+ env->ifs_clm_th_l[2], bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask,
+ env->ifs_clm_th_l[3], bb->phy_idx);
+
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask,
+ env->ifs_clm_th_h[0], bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask,
+ env->ifs_clm_th_h[1], bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask,
+ env->ifs_clm_th_h[2], bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask,
+ env->ifs_clm_th_h[3], bb->phy_idx);
for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
@@ -5372,31 +5425,38 @@ static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev)
i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]);
}
-static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev)
+static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
- struct rtw89_ccx_para_info para = {0};
+ struct rtw89_ccx_para_info para = {};
env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
env->ifs_clm_mntr_time = 0;
para.ifs_clm_app = RTW89_IFS_CLM_INIT;
- if (rtw89_phy_ifs_clm_th_update_check(rtwdev, &para))
- rtw89_phy_ifs_clm_set_th_reg(rtwdev);
+ if (rtw89_phy_ifs_clm_th_update_check(rtwdev, bb, &para))
+ rtw89_phy_ifs_clm_set_th_reg(rtwdev, bb);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true,
+ bb->phy_idx);
}
static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
enum rtw89_env_racing_lv level)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
int ret = 0;
if (level >= RTW89_RAC_MAX_NUM) {
@@ -5425,56 +5485,62 @@ static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
return ret;
}
-static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev)
+static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0);
- rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1);
- rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1,
+ bb->phy_idx);
env->ccx_ongoing = true;
}
-static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev)
+static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
u8 i = 0;
u32 res = 0;
env->ifs_clm_tx_ratio =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_tx, PERCENT);
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_tx, PERCENT);
env->ifs_clm_edcca_excl_cca_ratio =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_edcca_excl_cca,
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_edcca_excl_cca,
PERCENT);
env->ifs_clm_cck_fa_ratio =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERCENT);
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckfa, PERCENT);
env->ifs_clm_ofdm_fa_ratio =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERCENT);
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmfa, PERCENT);
env->ifs_clm_cck_cca_excl_fa_ratio =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckcca_excl_fa,
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckcca_excl_fa,
PERCENT);
env->ifs_clm_ofdm_cca_excl_fa_ratio =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmcca_excl_fa,
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmcca_excl_fa,
PERCENT);
env->ifs_clm_cck_fa_permil =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERMIL);
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckfa, PERMIL);
env->ifs_clm_ofdm_fa_permil =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERMIL);
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmfa, PERMIL);
for (i = 0; i < RTW89_IFS_CLM_NUM; i++) {
if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) {
env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD;
} else {
env->ifs_clm_ifs_avg[i] =
- rtw89_phy_ccx_idx_to_us(rtwdev,
+ rtw89_phy_ccx_idx_to_us(rtwdev, bb,
env->ifs_clm_avg[i]);
}
- res = rtw89_phy_ccx_idx_to_us(rtwdev, env->ifs_clm_cca[i]);
+ res = rtw89_phy_ccx_idx_to_us(rtwdev, bb, env->ifs_clm_cca[i]);
res += env->ifs_clm_his[i] >> 1;
if (env->ifs_clm_his[i])
res /= env->ifs_clm_his[i];
@@ -5504,81 +5570,82 @@ static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev)
env->ifs_clm_cca_avg[i]);
}
-static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev)
+static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
u8 i = 0;
- if (rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr,
- ccx->ifs_cnt_done_mask) == 0) {
+ if (rtw89_phy_read32_idx(rtwdev, ccx->ifs_total_addr,
+ ccx->ifs_cnt_done_mask, bb->phy_idx) == 0) {
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
"Get IFS_CLM report Fail\n");
return false;
}
env->ifs_clm_tx =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr,
- ccx->ifs_clm_tx_cnt_msk);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_tx_cnt_addr,
+ ccx->ifs_clm_tx_cnt_msk, bb->phy_idx);
env->ifs_clm_edcca_excl_cca =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr,
- ccx->ifs_clm_edcca_excl_cca_fa_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_tx_cnt_addr,
+ ccx->ifs_clm_edcca_excl_cca_fa_mask, bb->phy_idx);
env->ifs_clm_cckcca_excl_fa =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr,
- ccx->ifs_clm_cckcca_excl_fa_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_cca_addr,
+ ccx->ifs_clm_cckcca_excl_fa_mask, bb->phy_idx);
env->ifs_clm_ofdmcca_excl_fa =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr,
- ccx->ifs_clm_ofdmcca_excl_fa_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_cca_addr,
+ ccx->ifs_clm_ofdmcca_excl_fa_mask, bb->phy_idx);
env->ifs_clm_cckfa =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr,
- ccx->ifs_clm_cck_fa_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_fa_addr,
+ ccx->ifs_clm_cck_fa_mask, bb->phy_idx);
env->ifs_clm_ofdmfa =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr,
- ccx->ifs_clm_ofdm_fa_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_fa_addr,
+ ccx->ifs_clm_ofdm_fa_mask, bb->phy_idx);
env->ifs_clm_his[0] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
- ccx->ifs_t1_his_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
+ ccx->ifs_t1_his_mask, bb->phy_idx);
env->ifs_clm_his[1] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
- ccx->ifs_t2_his_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
+ ccx->ifs_t2_his_mask, bb->phy_idx);
env->ifs_clm_his[2] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
- ccx->ifs_t3_his_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
+ ccx->ifs_t3_his_mask, bb->phy_idx);
env->ifs_clm_his[3] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
- ccx->ifs_t4_his_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
+ ccx->ifs_t4_his_mask, bb->phy_idx);
env->ifs_clm_avg[0] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr,
- ccx->ifs_t1_avg_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_l_addr,
+ ccx->ifs_t1_avg_mask, bb->phy_idx);
env->ifs_clm_avg[1] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr,
- ccx->ifs_t2_avg_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_l_addr,
+ ccx->ifs_t2_avg_mask, bb->phy_idx);
env->ifs_clm_avg[2] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr,
- ccx->ifs_t3_avg_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_h_addr,
+ ccx->ifs_t3_avg_mask, bb->phy_idx);
env->ifs_clm_avg[3] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr,
- ccx->ifs_t4_avg_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_h_addr,
+ ccx->ifs_t4_avg_mask, bb->phy_idx);
env->ifs_clm_cca[0] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr,
- ccx->ifs_t1_cca_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_l_addr,
+ ccx->ifs_t1_cca_mask, bb->phy_idx);
env->ifs_clm_cca[1] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr,
- ccx->ifs_t2_cca_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_l_addr,
+ ccx->ifs_t2_cca_mask, bb->phy_idx);
env->ifs_clm_cca[2] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr,
- ccx->ifs_t3_cca_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_h_addr,
+ ccx->ifs_t3_cca_mask, bb->phy_idx);
env->ifs_clm_cca[3] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr,
- ccx->ifs_t4_cca_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_h_addr,
+ ccx->ifs_t4_cca_mask, bb->phy_idx);
env->ifs_clm_total_ifs =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr,
- ccx->ifs_total_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_total_addr,
+ ccx->ifs_total_mask, bb->phy_idx);
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n",
env->ifs_clm_total_ifs);
@@ -5598,16 +5665,17 @@ static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev)
"T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i],
env->ifs_clm_avg[i], env->ifs_clm_cca[i]);
- rtw89_phy_ifs_clm_get_utility(rtwdev);
+ rtw89_phy_ifs_clm_get_utility(rtwdev, bb);
return true;
}
static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
struct rtw89_ccx_para_info *para)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
u32 period = 0;
u32 unit_idx = 0;
@@ -5618,17 +5686,17 @@ static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
return -EINVAL;
}
- if (rtw89_phy_ccx_racing_ctrl(rtwdev, para->rac_lv))
+ if (rtw89_phy_ccx_racing_ctrl(rtwdev, bb, para->rac_lv))
return -EINVAL;
if (para->mntr_time != env->ifs_clm_mntr_time) {
rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
&period, &unit_idx);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr,
- ccx->ifs_clm_period_mask, period);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr,
- ccx->ifs_clm_cnt_unit_mask,
- unit_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr,
+ ccx->ifs_clm_period_mask, period, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr,
+ ccx->ifs_clm_cnt_unit_mask,
+ unit_idx, bb->phy_idx);
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
"Update IFS-CLM time ((%d)) -> ((%d))\n",
@@ -5639,18 +5707,19 @@ static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
env->ccx_unit_idx = (u8)unit_idx;
}
- if (rtw89_phy_ifs_clm_th_update_check(rtwdev, para)) {
+ if (rtw89_phy_ifs_clm_th_update_check(rtwdev, bb, para)) {
env->ifs_clm_app = para->ifs_clm_app;
- rtw89_phy_ifs_clm_set_th_reg(rtwdev);
+ rtw89_phy_ifs_clm_set_th_reg(rtwdev, bb);
}
return 0;
}
-void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
+static void __rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
- struct rtw89_ccx_para_info para = {0};
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ struct rtw89_ccx_para_info para = {};
u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL;
env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL;
@@ -5660,25 +5729,36 @@ void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
return;
}
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "BB-%d env_monitor track\n", bb->phy_idx);
+
/* only ifs_clm for now */
- if (rtw89_phy_ifs_clm_get_result(rtwdev))
+ if (rtw89_phy_ifs_clm_get_result(rtwdev, bb))
env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM;
- rtw89_phy_ccx_racing_release(rtwdev);
+ rtw89_phy_ccx_racing_release(rtwdev, bb);
para.mntr_time = 1900;
para.rac_lv = RTW89_RAC_LV_1;
para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
- if (rtw89_phy_ifs_clm_set(rtwdev, &para) == 0)
+ if (rtw89_phy_ifs_clm_set(rtwdev, bb, &para) == 0)
chk_result |= RTW89_PHY_ENV_MON_IFS_CLM;
if (chk_result)
- rtw89_phy_ccx_trigger(rtwdev);
+ rtw89_phy_ccx_trigger(rtwdev, bb);
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
"get_result=0x%x, chk_result:0x%x\n",
env->ccx_watchdog_result, chk_result);
}
+void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_active_bb(rtwdev, bb)
+ __rtw89_phy_env_monitor_track(rtwdev, bb);
+}
+
static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page)
{
if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM ||
@@ -5799,11 +5879,12 @@ static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev)
__rtw89_physts_parsing_init(rtwdev, RTW89_PHY_1);
}
-static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type)
+static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, int type)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct rtw89_dig_info *dig = &rtwdev->dig;
const struct rtw89_phy_dig_gain_cfg *cfg;
+ struct rtw89_dig_info *dig = &bb->dig;
const char *msg;
u8 i;
s8 gain_base;
@@ -5840,8 +5921,8 @@ static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type)
}
for (i = 0; i < cfg->size; i++) {
- tmp = rtw89_phy_read32_mask(rtwdev, cfg->table[i].addr,
- cfg->table[i].mask);
+ tmp = rtw89_phy_read32_idx(rtwdev, cfg->table[i].addr,
+ cfg->table[i].mask, bb->phy_idx);
tmp >>= DIG_GAIN_SHIFT;
gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base;
gain_base += DIG_GAIN;
@@ -5851,25 +5932,26 @@ static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type)
}
}
-static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev)
+static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
u32 tmp;
u8 i;
if (!rtwdev->hal.support_igi)
return;
- tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW,
- B_PATH0_IB_PKPW_MSK);
+ tmp = rtw89_phy_read32_idx(rtwdev, R_PATH0_IB_PKPW,
+ B_PATH0_IB_PKPW_MSK, bb->phy_idx);
dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT);
- dig->ib_pbk = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PBK,
- B_PATH0_IB_PBK_MSK);
+ dig->ib_pbk = rtw89_phy_read32_idx(rtwdev, R_PATH0_IB_PBK,
+ B_PATH0_IB_PBK_MSK, bb->phy_idx);
rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n",
dig->ib_pkpwr, dig->ib_pbk);
for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++)
- rtw89_phy_dig_read_gain_table(rtwdev, i);
+ rtw89_phy_dig_read_gain_table(rtwdev, bb, i);
}
static const u8 rssi_nolink = 22;
@@ -5878,10 +5960,11 @@ static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88};
static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16};
static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528};
-static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
+static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info;
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_phy_ch_info *ch_info = &bb->ch_info;
+ struct rtw89_dig_info *dig = &bb->dig;
bool is_linked = rtwdev->total_sta_assoc > 0;
if (is_linked) {
@@ -5892,10 +5975,11 @@ static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
}
}
-static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
+static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx);
+ struct rtw89_dig_info *dig = &bb->dig;
bool is_linked = rtwdev->total_sta_assoc > 0;
const u16 *fa_th_src = NULL;
@@ -5924,9 +6008,10 @@ static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20;
static const u8 igi_max_performance_mode = 0x5a;
static const u8 dynamic_pd_threshold_max;
-static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev)
+static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
dig->cur_gaincode.lna_idx = LNA_IDX_MAX;
dig->cur_gaincode.tia_idx = TIA_IDX_MAX;
@@ -5942,15 +6027,27 @@ static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev)
dig->is_linked_pre = false;
}
+static void __rtw89_phy_dig_init(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "BB-%d dig_init\n", bb->phy_idx);
+
+ rtw89_phy_dig_update_gain_para(rtwdev, bb);
+ rtw89_phy_dig_reset(rtwdev, bb);
+}
+
static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev)
{
- rtw89_phy_dig_update_gain_para(rtwdev);
- rtw89_phy_dig_reset(rtwdev);
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_capab_bb(rtwdev, bb)
+ __rtw89_phy_dig_init(rtwdev, bb);
}
-static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
+static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 rssi)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
u8 lna_idx;
if (rssi < dig->igi_rssi_th[0])
@@ -5969,9 +6066,10 @@ static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
return lna_idx;
}
-static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
+static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 rssi)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
u8 tia_idx;
if (rssi < dig->igi_rssi_th[0])
@@ -5984,10 +6082,11 @@ static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
#define IB_PBK_BASE 110
#define WB_RSSI_BASE 10
-static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
+static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 rssi,
struct rtw89_agc_gaincode_set *set)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
s8 lna_gain = dig->lna_gain[set->lna_idx];
s8 tia_gain = dig->tia_gain[set->tia_idx];
s32 wb_rssi = rssi + lna_gain + tia_gain;
@@ -6003,12 +6102,13 @@ static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
return rxb_idx;
}
-static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
+static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 rssi,
struct rtw89_agc_gaincode_set *set)
{
- set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, rssi);
- set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, rssi);
- set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, rssi, set);
+ set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, bb, rssi);
+ set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, bb, rssi);
+ set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, bb, rssi, set);
rtw89_debug(rtwdev, RTW89_DBG_DIG,
"final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n",
@@ -6017,10 +6117,11 @@ static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
#define IGI_OFFSET_MAX 25
#define IGI_OFFSET_MUL 2
-static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev)
+static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_dig_info *dig = &bb->dig;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
enum rtw89_dig_noisy_level noisy_lv;
u8 igi_offset = dig->fa_rssi_ofst;
u16 fa_ratio = 0;
@@ -6057,92 +6158,99 @@ static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev)
noisy_lv, igi_offset);
}
-static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx)
+static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 lna_idx)
{
const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
- rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr,
- dig_regs->p0_lna_init.mask, lna_idx);
- rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr,
- dig_regs->p1_lna_init.mask, lna_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p0_lna_init.addr,
+ dig_regs->p0_lna_init.mask, lna_idx, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p1_lna_init.addr,
+ dig_regs->p1_lna_init.mask, lna_idx, bb->phy_idx);
}
-static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx)
+static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 tia_idx)
{
const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
- rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr,
- dig_regs->p0_tia_init.mask, tia_idx);
- rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr,
- dig_regs->p1_tia_init.mask, tia_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p0_tia_init.addr,
+ dig_regs->p0_tia_init.mask, tia_idx, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p1_tia_init.addr,
+ dig_regs->p1_tia_init.mask, tia_idx, bb->phy_idx);
}
-static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
+static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 rxb_idx)
{
const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
- rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr,
- dig_regs->p0_rxb_init.mask, rxb_idx);
- rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr,
- dig_regs->p1_rxb_init.mask, rxb_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p0_rxb_init.addr,
+ dig_regs->p0_rxb_init.mask, rxb_idx, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p1_rxb_init.addr,
+ dig_regs->p1_rxb_init.mask, rxb_idx, bb->phy_idx);
}
static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
const struct rtw89_agc_gaincode_set set)
{
if (!rtwdev->hal.support_igi)
return;
- rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx);
- rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx);
- rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx);
+ rtw89_phy_dig_set_lna_idx(rtwdev, bb, set.lna_idx);
+ rtw89_phy_dig_set_tia_idx(rtwdev, bb, set.tia_idx);
+ rtw89_phy_dig_set_rxb_idx(rtwdev, bb, set.rxb_idx);
rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n",
set.lna_idx, set.tia_idx, set.rxb_idx);
}
static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
bool enable)
{
const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
- rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
- dig_regs->p0_p20_pagcugc_en.mask, enable);
- rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
- dig_regs->p0_s20_pagcugc_en.mask, enable);
- rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
- dig_regs->p1_p20_pagcugc_en.mask, enable);
- rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
- dig_regs->p1_s20_pagcugc_en.mask, enable);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
+ dig_regs->p0_p20_pagcugc_en.mask, enable, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
+ dig_regs->p0_s20_pagcugc_en.mask, enable, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
+ dig_regs->p1_p20_pagcugc_en.mask, enable, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
+ dig_regs->p1_s20_pagcugc_en.mask, enable, bb->phy_idx);
rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
}
-static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev)
+static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
if (!rtwdev->hal.support_igi)
return;
if (dig->force_gaincode_idx_en) {
- rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
+ rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->force_gaincode);
rtw89_debug(rtwdev, RTW89_DBG_DIG,
"Force gaincode index enabled.\n");
} else {
- rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi,
+ rtw89_phy_dig_gaincode_by_rssi(rtwdev, bb, dig->igi_fa_rssi,
&dig->cur_gaincode);
- rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode);
+ rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->cur_gaincode);
}
}
-static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
- bool enable)
+static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
+ u8 rssi, bool enable)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx);
const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
enum rtw89_bandwidth cbw = chan->band_width;
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
u8 ofdm_cca_th;
s8 cck_cca_th;
@@ -6184,10 +6292,10 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
"Dynamic PD th disabled, Set PD_low_bd=0\n");
}
- rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
- dig_regs->pd_lower_bound_mask, pd_val);
- rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
- dig_regs->pd_spatial_reuse_en, enable);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
+ dig_regs->pd_lower_bound_mask, pd_val, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
+ dig_regs->pd_spatial_reuse_en, enable, bb->phy_idx);
if (!rtwdev->hal.support_cckpd)
return;
@@ -6199,29 +6307,29 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
"igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n",
final_rssi, cck_cca_th, under_region, pd_val);
- rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_reg,
- dig_regs->bmode_cca_rssi_limit_en, enable);
- rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_lower_bound_reg,
- dig_regs->bmode_rssi_nocca_low_th_mask, pd_val);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->bmode_pd_reg,
+ dig_regs->bmode_cca_rssi_limit_en, enable, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->bmode_pd_lower_bound_reg,
+ dig_regs->bmode_rssi_nocca_low_th_mask, pd_val, bb->phy_idx);
}
-void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
+void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
dig->bypass_dig = false;
- rtw89_phy_dig_para_reset(rtwdev);
- rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
- rtw89_phy_dig_dyn_pd_th(rtwdev, rssi_nolink, false);
- rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
- rtw89_phy_dig_update_para(rtwdev);
+ rtw89_phy_dig_para_reset(rtwdev, bb);
+ rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->force_gaincode);
+ rtw89_phy_dig_dyn_pd_th(rtwdev, bb, rssi_nolink, false);
+ rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, false);
+ rtw89_phy_dig_update_para(rtwdev, bb);
}
#define IGI_RSSI_MIN 10
#define ABS_IGI_MIN 0xc
-void rtw89_phy_dig(struct rtw89_dev *rtwdev)
+static void __rtw89_phy_dig(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
bool is_linked = rtwdev->total_sta_assoc > 0;
u8 igi_min;
@@ -6230,20 +6338,22 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev)
return;
}
- rtw89_phy_dig_update_rssi_info(rtwdev);
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "BB-%d dig track\n", bb->phy_idx);
+
+ rtw89_phy_dig_update_rssi_info(rtwdev, bb);
if (!dig->is_linked_pre && is_linked) {
rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
- rtw89_phy_dig_update_para(rtwdev);
+ rtw89_phy_dig_update_para(rtwdev, bb);
dig->igi_fa_rssi = dig->igi_rssi;
} else if (dig->is_linked_pre && !is_linked) {
rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
- rtw89_phy_dig_update_para(rtwdev);
+ rtw89_phy_dig_update_para(rtwdev, bb);
dig->igi_fa_rssi = dig->igi_rssi;
}
dig->is_linked_pre = is_linked;
- rtw89_phy_dig_igi_offset_by_env(rtwdev);
+ rtw89_phy_dig_igi_offset_by_env(rtwdev, bb);
igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0);
dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode);
@@ -6262,14 +6372,22 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev)
dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
dig->igi_fa_rssi);
- rtw89_phy_dig_config_igi(rtwdev);
+ rtw89_phy_dig_config_igi(rtwdev, bb);
- rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en);
+ rtw89_phy_dig_dyn_pd_th(rtwdev, bb, dig->igi_fa_rssi, dig->dyn_pd_th_en);
if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max)
- rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, true);
+ rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, true);
else
- rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
+ rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, false);
+}
+
+void rtw89_phy_dig(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_active_bb(rtwdev, bb)
+ __rtw89_phy_dig(rtwdev, bb);
}
static void __rtw89_phy_tx_path_div_sta_iter(struct rtw89_dev *rtwdev,
@@ -6443,17 +6561,17 @@ static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev)
}
antdiv->training_count++;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work,
- state_period);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->antdiv_work,
+ state_period);
}
-void rtw89_phy_antdiv_work(struct work_struct *work)
+void rtw89_phy_antdiv_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
antdiv_work.work);
struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) {
rtw89_phy_antdiv_training_state(rtwdev);
@@ -6461,8 +6579,6 @@ void rtw89_phy_antdiv_work(struct work_struct *work)
rtw89_phy_antdiv_decision_state(rtwdev);
rtw89_phy_antdiv_set_ant(rtwdev);
}
-
- mutex_unlock(&rtwdev->mutex);
}
void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev)
@@ -6483,19 +6599,34 @@ void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev)
return;
antdiv->training_count = 0;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 0);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->antdiv_work, 0);
+}
+
+static void __rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "BB-%d env_monitor init\n", bb->phy_idx);
+
+ rtw89_phy_ccx_top_setting_init(rtwdev, bb);
+ rtw89_phy_ifs_clm_setting_init(rtwdev, bb);
}
static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
{
- rtw89_phy_ccx_top_setting_init(rtwdev);
- rtw89_phy_ifs_clm_setting_init(rtwdev);
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_capab_bb(rtwdev, bb)
+ __rtw89_phy_env_monitor_init(rtwdev, bb);
}
-static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev)
+static void __rtw89_phy_edcca_init(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
- struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak;
+ struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
+
+ rtw89_debug(rtwdev, RTW89_DBG_EDCCA, "BB-%d edcca init\n", bb->phy_idx);
memset(edcca_bak, 0, sizeof(*edcca_bak));
@@ -6511,8 +6642,16 @@ static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev)
rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 1);
}
- rtw89_phy_write32_mask(rtwdev, edcca_regs->tx_collision_t2r_st,
- edcca_regs->tx_collision_t2r_st_mask, 0x29);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->tx_collision_t2r_st,
+ edcca_regs->tx_collision_t2r_st_mask, 0x29, bb->phy_idx);
+}
+
+static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_capab_bb(rtwdev, bb)
+ __rtw89_phy_edcca_init(rtwdev, bb);
}
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
@@ -6875,44 +7014,46 @@ void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
}
EXPORT_SYMBOL(rtw89_decode_chan_idx);
-void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan)
+void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, bool scan)
{
const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
- struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak;
+ struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
if (scan) {
edcca_bak->a =
- rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_mask);
+ rtw89_phy_read32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_mask, bb->phy_idx);
edcca_bak->p =
- rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_p_mask);
+ rtw89_phy_read32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_p_mask, bb->phy_idx);
edcca_bak->ppdu =
- rtw89_phy_read32_mask(rtwdev, edcca_regs->ppdu_level,
- edcca_regs->ppdu_mask);
-
- rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_mask, EDCCA_MAX);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_p_mask, EDCCA_MAX);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level,
- edcca_regs->ppdu_mask, EDCCA_MAX);
+ rtw89_phy_read32_idx(rtwdev, edcca_regs->ppdu_level,
+ edcca_regs->ppdu_mask, bb->phy_idx);
+
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_mask, EDCCA_MAX, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_p_mask, EDCCA_MAX, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
+ edcca_regs->ppdu_mask, EDCCA_MAX, bb->phy_idx);
} else {
- rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_mask,
- edcca_bak->a);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_p_mask,
- edcca_bak->p);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level,
- edcca_regs->ppdu_mask,
- edcca_bak->ppdu);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_mask,
+ edcca_bak->a, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_p_mask,
+ edcca_bak->p, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
+ edcca_regs->ppdu_mask,
+ edcca_bak->ppdu, bb->phy_idx);
}
}
-static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev)
+static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
{
const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
+ const struct rtw89_edcca_p_regs *edcca_p_regs;
bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80;
s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80;
u8 path, per20_bitmap;
@@ -6922,13 +7063,18 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev)
if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_EDCCA))
return;
+ if (bb->phy_idx == RTW89_PHY_1)
+ edcca_p_regs = &edcca_regs->p[RTW89_PHY_1];
+ else
+ edcca_p_regs = &edcca_regs->p[RTW89_PHY_0];
+
if (rtwdev->chip->chip_id == RTL8922A)
rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
edcca_regs->rpt_sel_be_mask, 0);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
- edcca_regs->rpt_sel_mask, 0);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
+ rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
+ edcca_p_regs->rpt_sel_mask, 0);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
path = u32_get_bits(tmp, B_EDCCA_RPT_B_PATH_MASK);
flag_s80 = u32_get_bits(tmp, B_EDCCA_RPT_B_S80);
flag_s40 = u32_get_bits(tmp, B_EDCCA_RPT_B_S40);
@@ -6939,19 +7085,19 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev)
pwdb_p20 = u32_get_bits(tmp, MASKBYTE2);
pwdb_fb = u32_get_bits(tmp, MASKBYTE3);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
- edcca_regs->rpt_sel_mask, 4);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
+ rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
+ edcca_p_regs->rpt_sel_mask, 4);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
pwdb_s80 = u32_get_bits(tmp, MASKBYTE1);
pwdb_s40 = u32_get_bits(tmp, MASKBYTE2);
- per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_regs->rpt_a,
+ per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_p_regs->rpt_a,
MASKBYTE0);
if (rtwdev->chip->chip_id == RTL8922A) {
rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
edcca_regs->rpt_sel_be_mask, 4);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
pwdb[2] = u32_get_bits(tmp, MASKBYTE1);
@@ -6959,33 +7105,33 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev)
rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
edcca_regs->rpt_sel_be_mask, 5);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
pwdb[6] = u32_get_bits(tmp, MASKBYTE1);
pwdb[7] = u32_get_bits(tmp, MASKBYTE0);
} else {
- rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
- edcca_regs->rpt_sel_mask, 0);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
+ rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
+ edcca_p_regs->rpt_sel_mask, 0);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
- edcca_regs->rpt_sel_mask, 1);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
+ rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
+ edcca_p_regs->rpt_sel_mask, 1);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
pwdb[2] = u32_get_bits(tmp, MASKBYTE3);
pwdb[3] = u32_get_bits(tmp, MASKBYTE2);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
- edcca_regs->rpt_sel_mask, 2);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
+ rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
+ edcca_p_regs->rpt_sel_mask, 2);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
- edcca_regs->rpt_sel_mask, 3);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
+ rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
+ edcca_p_regs->rpt_sel_mask, 3);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
pwdb[6] = u32_get_bits(tmp, MASKBYTE3);
pwdb[7] = u32_get_bits(tmp, MASKBYTE2);
}
@@ -7007,9 +7153,10 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev)
pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80);
}
-static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev)
+static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info;
+ struct rtw89_phy_ch_info *ch_info = &bb->ch_info;
bool is_linked = rtwdev->total_sta_assoc > 0;
u8 rssi_min = ch_info->rssi_min >> 1;
u8 edcca_thre;
@@ -7025,13 +7172,13 @@ static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev)
return edcca_thre;
}
-void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev)
+void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
{
const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
- struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak;
+ struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
u8 th;
- th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev);
+ th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev, bb);
if (th == edcca_bak->th_old)
return;
@@ -7040,23 +7187,33 @@ void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
"[EDCCA]: Normal Mode, EDCCA_th = %d\n", th);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_mask, th);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_p_mask, th);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level,
- edcca_regs->ppdu_mask, th);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_mask, th, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_p_mask, th, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
+ edcca_regs->ppdu_mask, th, bb->phy_idx);
+}
+
+static
+void __rtw89_phy_edcca_track(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_EDCCA, "BB-%d edcca track\n", bb->phy_idx);
+
+ rtw89_phy_edcca_thre_calc(rtwdev, bb);
+ rtw89_phy_edcca_log(rtwdev, bb);
}
void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_bb_ctx *bb;
if (hal->disabled_dm_bitmap & BIT(RTW89_DM_DYNAMIC_EDCCA))
return;
- rtw89_phy_edcca_thre_calc(rtwdev);
- rtw89_phy_edcca_log(rtwdev);
+ rtw89_for_each_active_bb(rtwdev, bb)
+ __rtw89_phy_edcca_track(rtwdev, bb);
}
enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 08b635c93ac3..518a100375fb 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -835,8 +835,8 @@ s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev);
s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan);
-void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan);
+int rtw89_print_ant_gain(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ const struct rtw89_chan *chan);
void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl);
s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
@@ -914,6 +914,13 @@ static inline s8 rtw89_phy_txpwr_rf_to_bb(struct rtw89_dev *rtwdev, s8 txpwr_rf)
return txpwr_rf << (chip->txpwr_factor_bb - chip->txpwr_factor_rf);
}
+static inline s8 rtw89_phy_txpwr_bb_to_rf(struct rtw89_dev *rtwdev, s8 txpwr_bb)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return txpwr_bb >> (chip->txpwr_factor_bb - chip->txpwr_factor_rf);
+}
+
static inline s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -978,20 +985,20 @@ void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
struct rtw89_h2c_rf_tssi *h2c);
void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev);
-void rtw89_phy_cfo_track_work(struct work_struct *work);
+void rtw89_phy_cfo_track_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
struct rtw89_rx_phy_ppdu *phy_ppdu);
void rtw89_phy_stat_track(struct rtw89_dev *rtwdev);
void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev);
void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 val);
-void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev);
+void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb);
void rtw89_phy_dig(struct rtw89_dev *rtwdev);
void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev);
void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu);
void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev);
-void rtw89_phy_antdiv_work(struct work_struct *work);
+void rtw89_phy_antdiv_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
@@ -1002,9 +1009,10 @@ void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev);
u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band);
void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
u8 *ch, enum nl80211_band *band);
-void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan);
+void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, bool scan);
void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev);
-void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev);
+void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb);
enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 96ea04d90cd3..ac46a7baa00d 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -113,7 +113,7 @@ static void __rtw89_leave_lps(struct rtw89_dev *rtwdev,
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
{
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
__rtw89_leave_ps_mode(rtwdev);
}
@@ -125,7 +125,7 @@ void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool can_ps_mode = true;
unsigned int link_id;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
@@ -162,7 +162,7 @@ void rtw89_leave_lps(struct rtw89_dev *rtwdev)
struct rtw89_vif *rtwvif;
unsigned int link_id;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (!test_and_clear_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 10d0efa7a58e..c776954ad360 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -6618,6 +6618,13 @@
#define B_BE_RTS_LIMIT_IN_OFDM6 BIT(1)
#define B_BE_CHECK_CCK_EN BIT(0)
+#define R_BE_TXCNT 0x1082C
+#define R_BE_TXCNT_C1 0x1482C
+#define B_BE_ADD_TXCNT_BY BIT(31)
+#define B_BE_TOTAL_TC_OPT BIT(30)
+#define B_BE_S_TXCNT_LMT_MASK GENMASK(29, 24)
+#define B_BE_L_TXCNT_LMT_MASK GENMASK(21, 16)
+
#define R_BE_MBSSID_DROP_0 0x1083C
#define R_BE_MBSSID_DROP_0_C1 0x1483C
#define B_BE_GI_LTF_FB_SEL BIT(30)
@@ -7095,6 +7102,10 @@
#define B_BE_MACLBK_RDY_NUM_MASK GENMASK(7, 3)
#define B_BE_MACLBK_EN BIT(0)
+#define R_BE_CLIENT_OM_CTRL 0x11040
+#define R_BE_CLIENT_OM_CTRL_C1 0x15040
+#define B_BE_TRIG_DIS_EHTTB BIT(24)
+
#define R_BE_WMAC_NAV_CTL 0x11080
#define R_BE_WMAC_NAV_CTL_C1 0x15080
#define B_BE_WMAC_NAV_UPPER_EN BIT(26)
@@ -8157,6 +8168,8 @@
#define B_EDCCA_RPT_B_S40 BIT(4)
#define B_EDCCA_RPT_B_S80 BIT(3)
#define B_EDCCA_RPT_B_PATH_MASK GENMASK(2, 1)
+#define R_EDCCA_RPT_P1_A 0x1740
+#define R_EDCCA_RPT_P1_B 0x1744
#define R_SWSI_V1 0x174C
#define B_SWSI_W_BUSY_V1 BIT(24)
#define B_SWSI_R_BUSY_V1 BIT(25)
@@ -8222,6 +8235,7 @@
#define B_TXCKEN_FORCE_ALL GENMASK(24, 0)
#define R_EDCCA_RPT_SEL 0x20CC
#define B_EDCCA_RPT_SEL_MSK GENMASK(2, 0)
+#define B_EDCCA_RPT_SEL_P1_MSK GENMASK(5, 3)
#define R_ADC_FIFO 0x20fc
#define B_ADC_FIFO_RST GENMASK(31, 24)
#define B_ADC_FIFO_RXK GENMASK(31, 16)
@@ -8291,6 +8305,8 @@
#define B_P1_EN_SOUND_WO_NDP BIT(1)
#define R_EDCCA_RPT_A_BE 0x2E38
#define R_EDCCA_RPT_B_BE 0x2E3C
+#define R_EDCCA_RPT_P1_A_BE 0x2E40
+#define R_EDCCA_RPT_P1_B_BE 0x2E44
#define R_S1_HW_SI_DIS 0x3200
#define B_S1_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
#define R_P1_RXCK 0x32A0
@@ -9169,6 +9185,16 @@
#define B_IQKINF2_FCNT GENMASK(23, 16)
#define B_IQKINF2_KCNT GENMASK(15, 8)
#define B_IQKINF2_NCTLV GENMASK(7, 0)
+#define R_TXAGC_REF_DBM_RF1_P0 0xBC04
+#define B_TXAGC_OFDM_REF_DBM_RF1_P0 GENMASK(10, 2)
+#define B_TXAGC_CCK_REF_DBM_RF1_P0 GENMASK(19, 11)
+#define R_TSSI_K_RF1_P0 0xBC28
+#define B_TSSI_K_OFDM_RF1_P0 GENMASK(9, 0)
+#define R_TXAGC_REF_DBM_RF1_P1 0xBD04
+#define B_TXAGC_OFDM_REF_DBM_RF1_P1 GENMASK(10, 2)
+#define B_TXAGC_CCK_REF_DBM_RF1_P1 GENMASK(19, 11)
+#define R_TSSI_K_RF1_P1 0xBD28
+#define B_TSSI_K_OFDM_RF1_P1 GENMASK(9, 0)
#define R_RFK_ST 0xBFF8
#define R_DCOF0 0xC000
#define B_DCOF0_RST BIT(17)
@@ -9338,16 +9364,18 @@
#define R_TSSI_MAP_OFST_P1 0xE720
#define B_TSSI_MAP_OFST_OFDM GENMASK(17, 9)
#define B_TSSI_MAP_OFST_CCK GENMASK(26, 18)
-#define R_TXAGC_REF0_P0 0xE628
-#define R_TXAGC_REF0_P1 0xE728
-#define B_TXAGC_REF0_OFDM_DBM GENMASK(8, 0)
-#define B_TXAGC_REF0_CCK_DBM GENMASK(17, 9)
-#define B_TXAGC_REF0_OFDM_CW GENMASK(26, 18)
-#define R_TXAGC_REF1_P0 0xE62C
-#define R_TXAGC_REF1_P1 0xE72C
-#define B_TXAGC_REF1_CCK_CW GENMASK(8, 0)
+#define R_TXAGC_REF_DBM_P0 0xE628
+#define B_TXAGC_OFDM_REF_DBM_P0 GENMASK(8, 0)
+#define B_TXAGC_CCK_REF_DBM_P0 GENMASK(17, 9)
+#define R_TSSI_K_P0 0xE6A0
+#define B_TSSI_K_OFDM_P0 GENMASK(29, 20)
#define R_TXPWR_RSTB 0xE70C
#define B_TXPWR_RSTB BIT(16)
+#define R_TXAGC_REF_DBM_P1 0xE728
+#define B_TXAGC_OFDM_REF_DBM_P1 GENMASK(8, 0)
+#define B_TXAGC_CCK_REF_DBM_P1 GENMASK(17, 9)
+#define R_TSSI_K_P1 0xE7A0
+#define B_TSSI_K_OFDM_P1 GENMASK(29, 20)
/* WiFi CPU local domain */
#define R_AX_WDT_CTRL 0x0040
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index 80b2f74589eb..655323a79608 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -7,257 +7,266 @@
#include "ps.h"
#include "util.h"
-#define COUNTRY_REGD(_alpha2, _txpwr_regd...) \
- {.alpha2 = (_alpha2), \
- .txpwr_regd = {_txpwr_regd}, \
+static
+void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+
+#define COUNTRY_REGD(_alpha2, _rule_2ghz, _rule_5ghz, _rule_6ghz, _fmap) \
+ { \
+ .alpha2 = _alpha2, \
+ .txpwr_regd[RTW89_BAND_2G] = _rule_2ghz, \
+ .txpwr_regd[RTW89_BAND_5G] = _rule_5ghz, \
+ .txpwr_regd[RTW89_BAND_6G] = _rule_6ghz, \
+ .func_bitmap = { _fmap, }, \
}
+static_assert(BITS_PER_TYPE(unsigned long) >= NUM_OF_RTW89_REGD_FUNC);
+
static const struct rtw89_regd rtw89_ww_regd =
- COUNTRY_REGD("00", RTW89_WW, RTW89_WW, RTW89_WW);
+ COUNTRY_REGD("00", RTW89_WW, RTW89_WW, RTW89_WW, 0x0);
static const struct rtw89_regd rtw89_regd_map[] = {
- COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC),
- COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE),
- COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC),
- COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GB", RTW89_UK, RTW89_UK, RTW89_UK),
- COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR),
- COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE),
- COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN),
- COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC),
- COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_ETSI),
- COUNTRY_REGD("TH", RTW89_THAILAND, RTW89_THAILAND, RTW89_THAILAND),
- COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CA", RTW89_IC, RTW89_IC, RTW89_IC),
- COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK, RTW89_MKK),
- COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("CC", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GY", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("XK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ST", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA, RTW89_NA),
- COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("CU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SY", RTW89_ETSI, RTW89_NA, RTW89_NA),
- COUNTRY_REGD("SD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC, 0x0),
+ COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE, 0x0),
+ COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC, 0x0),
+ COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x1),
+ COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("GB", RTW89_UK, RTW89_UK, RTW89_UK, 0x0),
+ COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR, 0x0),
+ COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE, 0x0),
+ COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN, 0x0),
+ COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC, 0x1),
+ COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("TH", RTW89_THAILAND, RTW89_THAILAND, RTW89_THAILAND, 0x0),
+ COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA, 0x0),
+ COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA, 0x0),
+ COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("CA", RTW89_IC, RTW89_IC, RTW89_IC, 0x1),
+ COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK, RTW89_MKK, 0x0),
+ COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0),
+ COUNTRY_REGD("CC", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0),
+ COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GY", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0),
+ COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("XK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0),
+ COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0),
+ COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ST", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0),
+ COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA, RTW89_NA, 0x0),
+ COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("CU", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SY", RTW89_ETSI, RTW89_NA, RTW89_NA, 0x0),
+ COUNTRY_REGD("SD", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
};
static const char rtw89_alpha2_list_eu[][3] = {
@@ -295,13 +304,16 @@ static const char rtw89_alpha2_list_eu[][3] = {
"RO",
};
-static const struct rtw89_regd *rtw89_regd_find_reg_by_name(const char *alpha2)
+static const struct rtw89_regd *rtw89_regd_find_reg_by_name(struct rtw89_dev *rtwdev,
+ const char *alpha2)
{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd_ctrl *regd_ctrl = &regulatory->ctrl;
u32 i;
- for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) {
- if (!memcmp(rtw89_regd_map[i].alpha2, alpha2, 2))
- return &rtw89_regd_map[i];
+ for (i = 0; i < regd_ctrl->nr; i++) {
+ if (!memcmp(regd_ctrl->map[i].alpha2, alpha2, 2))
+ return &regd_ctrl->map[i];
}
return &rtw89_ww_regd;
@@ -312,22 +324,25 @@ static bool rtw89_regd_is_ww(const struct rtw89_regd *regd)
return regd == &rtw89_ww_regd;
}
-static u8 rtw89_regd_get_index(const struct rtw89_regd *regd)
+static u8 rtw89_regd_get_index(struct rtw89_dev *rtwdev, const struct rtw89_regd *regd)
{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd_ctrl *regd_ctrl = &regulatory->ctrl;
+
BUILD_BUG_ON(ARRAY_SIZE(rtw89_regd_map) > RTW89_REGD_MAX_COUNTRY_NUM);
if (rtw89_regd_is_ww(regd))
return RTW89_REGD_MAX_COUNTRY_NUM;
- return regd - rtw89_regd_map;
+ return regd - regd_ctrl->map;
}
-static u8 rtw89_regd_get_index_by_name(const char *alpha2)
+static u8 rtw89_regd_get_index_by_name(struct rtw89_dev *rtwdev, const char *alpha2)
{
const struct rtw89_regd *regd;
- regd = rtw89_regd_find_reg_by_name(alpha2);
- return rtw89_regd_get_index(regd);
+ regd = rtw89_regd_find_reg_by_name(rtwdev, alpha2);
+ return rtw89_regd_get_index(rtwdev, regd);
}
#define rtw89_debug_regd(_dev, _regd, _desc, _argv...) \
@@ -345,6 +360,7 @@ static void rtw89_regd_setup_unii4(struct rtw89_dev *rtwdev,
struct wiphy *wiphy)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd_ctrl *regd_ctrl = &regulatory->ctrl;
const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_supported_band *sband;
struct rtw89_acpi_dsm_result res = {};
@@ -382,8 +398,8 @@ static void rtw89_regd_setup_unii4(struct rtw89_dev *rtwdev,
"acpi: eval if allow unii-4: 0x%x\n", val);
bottom:
- for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) {
- const struct rtw89_regd *regd = &rtw89_regd_map[i];
+ for (i = 0; i < regd_ctrl->nr; i++) {
+ const struct rtw89_regd *regd = &regd_ctrl->map[i];
switch (regd->txpwr_regd[RTW89_BAND_5G]) {
case RTW89_FCC:
@@ -406,7 +422,7 @@ static void __rtw89_regd_setup_policy_6ghz(struct rtw89_dev *rtwdev, bool block,
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
u8 index;
- index = rtw89_regd_get_index_by_name(alpha2);
+ index = rtw89_regd_get_index_by_name(rtwdev, alpha2);
if (index == RTW89_REGD_MAX_COUNTRY_NUM) {
rtw89_debug(rtwdev, RTW89_DBG_REGD, "%s: unknown alpha2 %c%c\n",
__func__, alpha2[0], alpha2[1]);
@@ -474,6 +490,7 @@ out:
static void rtw89_regd_setup_policy_6ghz_sp(struct rtw89_dev *rtwdev)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd_ctrl *regd_ctrl = &regulatory->ctrl;
const struct rtw89_acpi_policy_6ghz_sp *ptr;
struct rtw89_acpi_dsm_result res = {};
bool enable_by_us;
@@ -505,8 +522,8 @@ static void rtw89_regd_setup_policy_6ghz_sp(struct rtw89_dev *rtwdev)
enable_by_us = u8_get_bits(ptr->conf, RTW89_ACPI_CONF_6GHZ_SP_US);
- for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) {
- const struct rtw89_regd *tmp = &rtw89_regd_map[i];
+ for (i = 0; i < regd_ctrl->nr; i++) {
+ const struct rtw89_regd *tmp = &regd_ctrl->map[i];
if (enable_by_us && memcmp(tmp->alpha2, "US", 2) == 0)
clear_bit(i, regulatory->block_6ghz_sp);
@@ -573,8 +590,21 @@ bottom:
int rtw89_regd_setup(struct rtw89_dev *rtwdev)
{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ const struct rtw89_regd_data *regd_data = elm_info->regd;
struct wiphy *wiphy = rtwdev->hw->wiphy;
+ if (regd_data) {
+ regulatory->ctrl.nr = regd_data->nr;
+ regulatory->ctrl.map = regd_data->map;
+ } else {
+ regulatory->ctrl.nr = ARRAY_SIZE(rtw89_regd_map);
+ regulatory->ctrl.map = rtw89_regd_map;
+ }
+
+ regulatory->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
+
if (!wiphy)
return -EINVAL;
@@ -585,21 +615,16 @@ int rtw89_regd_setup(struct rtw89_dev *rtwdev)
return 0;
}
-int rtw89_regd_init(struct rtw89_dev *rtwdev,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+int rtw89_regd_init_hint(struct rtw89_dev *rtwdev)
{
- struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
const struct rtw89_regd *chip_regd;
struct wiphy *wiphy = rtwdev->hw->wiphy;
int ret;
- regulatory->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
-
if (!wiphy)
return -EINVAL;
- chip_regd = rtw89_regd_find_reg_by_name(rtwdev->efuse.country_code);
+ chip_regd = rtw89_regd_find_reg_by_name(rtwdev, rtwdev->efuse.country_code);
if (!rtw89_regd_is_ww(chip_regd)) {
rtwdev->regulatory.regd = chip_regd;
/* Ignore country ie if there is a country domain programmed in chip */
@@ -637,7 +662,7 @@ static void rtw89_regd_apply_policy_unii4(struct rtw89_dev *rtwdev,
if (!chip->support_unii4)
return;
- index = rtw89_regd_get_index(regd);
+ index = rtw89_regd_get_index(rtwdev, regd);
if (index != RTW89_REGD_MAX_COUNTRY_NUM &&
!test_bit(index, regulatory->block_unii4))
return;
@@ -655,7 +680,7 @@ static bool regd_is_6ghz_blocked(struct rtw89_dev *rtwdev)
const struct rtw89_regd *regd = regulatory->regd;
u8 index;
- index = rtw89_regd_get_index(regd);
+ index = rtw89_regd_get_index(rtwdev, regd);
if (index != RTW89_REGD_MAX_COUNTRY_NUM &&
!test_bit(index, regulatory->block_6ghz))
return false;
@@ -696,11 +721,36 @@ static void rtw89_regd_apply_policy_6ghz(struct rtw89_dev *rtwdev,
sband->channels[i].flags |= IEEE80211_CHAN_DISABLED;
}
+static void rtw89_regd_apply_policy_tas(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd *regd = regulatory->regd;
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+
+ if (!tas->enable)
+ return;
+
+ tas->block_regd = !test_bit(RTW89_REGD_FUNC_TAS, regd->func_bitmap);
+}
+
+static void rtw89_regd_apply_policy_ant_gain(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_regd *regd = regulatory->regd;
+
+ if (!chip->support_ant_gain)
+ return;
+
+ ant_gain->block_country = !test_bit(RTW89_REGD_FUNC_DAG, regd->func_bitmap);
+}
+
static void rtw89_regd_notifier_apply(struct rtw89_dev *rtwdev,
struct wiphy *wiphy,
struct regulatory_request *request)
{
- rtwdev->regulatory.regd = rtw89_regd_find_reg_by_name(request->alpha2);
+ rtwdev->regulatory.regd = rtw89_regd_find_reg_by_name(rtwdev, request->alpha2);
/* This notification might be set from the system of distros,
* and it does not expect the regulatory will be modified by
* connecting to an AP (i.e. country ie).
@@ -713,14 +763,17 @@ static void rtw89_regd_notifier_apply(struct rtw89_dev *rtwdev,
rtw89_regd_apply_policy_unii4(rtwdev, wiphy);
rtw89_regd_apply_policy_6ghz(rtwdev, wiphy);
+ rtw89_regd_apply_policy_tas(rtwdev);
+ rtw89_regd_apply_policy_ant_gain(rtwdev);
}
+static
void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ wiphy_lock(wiphy);
rtw89_leave_ps_mode(rtwdev);
if (wiphy->regd) {
@@ -736,7 +789,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request
rtw89_core_set_chip_txpwr(rtwdev);
exit:
- mutex_unlock(&rtwdev->mutex);
+ wiphy_unlock(wiphy);
}
/* Maximum Transmit Power field (@raw) can be EIRP or PSD.
@@ -925,7 +978,7 @@ static bool __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
sel = RTW89_REG_6GHZ_POWER_DFLT;
if (sel == RTW89_REG_6GHZ_POWER_STD) {
- index = rtw89_regd_get_index(regd);
+ index = rtw89_regd_get_index(rtwdev, regd);
if (index == RTW89_REGD_MAX_COUNTRY_NUM ||
test_bit(index, regulatory->block_6ghz_sp)) {
rtw89_debug(rtwdev, RTW89_DBG_REGD,
@@ -986,7 +1039,7 @@ int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvi
unsigned int changed = 0;
int ret;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
/* The result of reg_6ghz_tpe may depend on reg_6ghz_power type,
* so must do reg_6ghz_tpe_recalc() after reg_6ghz_power_recalc().
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index c56f70267882..0d482cd57f6e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -224,10 +224,17 @@ static const struct rtw89_edcca_regs rtw8851b_edcca_regs = {
.edcca_p_mask = B_EDCCA_LVL_MSK1,
.ppdu_level = R_SEG0R_EDCCA_LVL_V1,
.ppdu_mask = B_EDCCA_LVL_MSK3,
- .rpt_a = R_EDCCA_RPT_A,
- .rpt_b = R_EDCCA_RPT_B,
- .rpt_sel = R_EDCCA_RPT_SEL,
- .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .p = {{
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ }, {
+ .rpt_a = R_EDCCA_RPT_P1_A,
+ .rpt_b = R_EDCCA_RPT_P1_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK,
+ }},
.tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
.tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
};
@@ -1596,10 +1603,16 @@ static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8851b_rx_dck(rtwdev, phy_idx, chanctx_idx);
rtw8851b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8851b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8851b_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
}
static void rtw8851b_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -2410,6 +2423,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
+ .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -2445,6 +2459,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.try_ce_fw = true,
.bbmcu_nr = 0,
.needed_fw_elms = 0,
+ .fw_blacklist = NULL,
.fifo_size = 196608,
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
@@ -2483,10 +2498,13 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.support_ant_gain = false,
+ .support_tas = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
+ .hw_tkip_crypto = false,
.rf_path_num = 1,
.tx_nss = 1,
.rx_nss = 1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 9bd2842c27d5..286334e26c84 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -522,10 +522,17 @@ static const struct rtw89_edcca_regs rtw8852a_edcca_regs = {
.edcca_p_mask = B_EDCCA_LVL_MSK1,
.ppdu_level = R_SEG0R_EDCCA_LVL,
.ppdu_mask = B_EDCCA_LVL_MSK3,
- .rpt_a = R_EDCCA_RPT_A,
- .rpt_b = R_EDCCA_RPT_B,
- .rpt_sel = R_EDCCA_RPT_SEL,
- .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .p = {{
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ }, {
+ .rpt_a = R_EDCCA_RPT_P1_A,
+ .rpt_b = R_EDCCA_RPT_P1_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK,
+ }},
.tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
.tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
};
@@ -1356,10 +1363,16 @@ static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8852a_rx_dck(rtwdev, phy_idx, true, chanctx_idx);
rtw8852a_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852a_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852a_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
}
static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -2136,6 +2149,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
+ .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -2162,6 +2176,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.try_ce_fw = false,
.bbmcu_nr = 0,
.needed_fw_elms = 0,
+ .fw_blacklist = NULL,
.fifo_size = 458752,
.small_fifo_size = false,
.dle_scc_rsvd_size = 0,
@@ -2201,10 +2216,13 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = false,
.support_ant_gain = false,
+ .support_tas = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
+ .hw_tkip_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index dfb2bf61b0b8..eceb4fb9880d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -189,10 +189,17 @@ static const struct rtw89_edcca_regs rtw8852b_edcca_regs = {
.edcca_p_mask = B_EDCCA_LVL_MSK1,
.ppdu_level = R_SEG0R_EDCCA_LVL_V1,
.ppdu_mask = B_EDCCA_LVL_MSK3,
- .rpt_a = R_EDCCA_RPT_A,
- .rpt_b = R_EDCCA_RPT_B,
- .rpt_sel = R_EDCCA_RPT_SEL,
- .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .p = {{
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ }, {
+ .rpt_a = R_EDCCA_RPT_P1_A,
+ .rpt_b = R_EDCCA_RPT_P1_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK,
+ }},
.tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
.tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
};
@@ -568,10 +575,16 @@ static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8852b_rx_dck(rtwdev, phy_idx, chanctx_idx);
rtw8852b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852b_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
}
static void rtw8852b_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -763,6 +776,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
+ .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -798,6 +812,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.try_ce_fw = true,
.bbmcu_nr = 0,
.needed_fw_elms = 0,
+ .fw_blacklist = &rtw89_fw_blacklist_default,
.fifo_size = 196608,
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
@@ -837,10 +852,13 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.support_ant_gain = true,
+ .support_tas = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
+ .hw_tkip_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
index 0e094ce9c9b0..99c9505b3cbd 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -621,9 +621,9 @@ static void rtw8852bt_ext_loss_avg_update(struct rtw89_dev *rtwdev,
if (ext_loss_a == ext_loss_b) {
ext_loss_avg = ext_loss_a;
} else {
- linear = rtw89_db_2_linear(abs(ext_loss_a - ext_loss_b)) + 1;
- linear = DIV_ROUND_CLOSEST_ULL(linear / 2, 1 << RTW89_LINEAR_FRAC_BITS);
- ext_loss_avg = rtw89_linear_2_db(linear);
+ linear = rtw89_db_to_linear(abs(ext_loss_a - ext_loss_b)) + 1;
+ linear /= 2;
+ ext_loss_avg = rtw89_linear_to_db(linear);
ext_loss_avg += min(ext_loss_a, ext_loss_b);
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
index ef47a5facc83..fbf82d42687b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
@@ -3585,9 +3585,10 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
struct rtw8852bx_bb_tssi_bak tssi_bak;
s32 aliment_diff, tssi_cw_default;
- u32 start_time, finish_time;
u32 bb_reg_backup[8] = {0};
+ ktime_t start_time;
const s16 *power;
+ s64 this_time;
u8 band;
bool ok;
u32 tmp;
@@ -3613,7 +3614,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
return;
}
- start_time = ktime_get_ns();
+ start_time = ktime_get();
if (chan->band_type == RTW89_BAND_2G)
power = power_2g;
@@ -3738,12 +3739,12 @@ out:
rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak);
rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
- finish_time = ktime_get_ns();
- tssi_info->tssi_alimk_time += finish_time - start_time;
+ this_time = ktime_us_delta(ktime_get(), start_time);
+ tssi_info->tssi_alimk_time += this_time;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[TSSI PA K] %s processing time = %d ms\n", __func__,
- tssi_info->tssi_alimk_time);
+ "[TSSI PA K] %s processing time = %lld us (acc = %llu us)\n",
+ __func__, this_time, tssi_info->tssi_alimk_time);
}
void rtw8852b_dpk_init(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index bde3e1fb7ca6..bbf37442c492 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -187,10 +187,17 @@ static const struct rtw89_edcca_regs rtw8852bt_edcca_regs = {
.edcca_p_mask = B_EDCCA_LVL_MSK1,
.ppdu_level = R_SEG0R_EDCCA_LVL_V1,
.ppdu_mask = B_EDCCA_LVL_MSK3,
- .rpt_a = R_EDCCA_RPT_A,
- .rpt_b = R_EDCCA_RPT_B,
- .rpt_sel = R_EDCCA_RPT_SEL,
- .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .p = {{
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ }, {
+ .rpt_a = R_EDCCA_RPT_P1_A,
+ .rpt_b = R_EDCCA_RPT_P1_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK,
+ }},
.tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
.tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
};
@@ -541,10 +548,16 @@ static void rtw8852bt_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8852bt_rx_dck(rtwdev, phy_idx, chanctx_idx);
rtw8852bt_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852bt_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852bt_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
}
static void rtw8852bt_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -697,6 +710,7 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
+ .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -732,6 +746,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.try_ce_fw = true,
.bbmcu_nr = 0,
.needed_fw_elms = RTW89_AX_GEN_DEF_NEEDED_FW_ELEMENTS_NO_6GHZ,
+ .fw_blacklist = &rtw89_fw_blacklist_default,
.fifo_size = 458752,
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
@@ -770,10 +785,13 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.support_ant_gain = true,
+ .support_tas = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
+ .hw_tkip_crypto = true,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
index 336a83e1d46b..6e6889eea9a0 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
@@ -3663,9 +3663,10 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
struct rtw8852bx_bb_tssi_bak tssi_bak;
s32 aliment_diff, tssi_cw_default;
- u32 start_time, finish_time;
u32 bb_reg_backup[8] = {};
+ ktime_t start_time;
const s16 *power;
+ s64 this_time;
u8 band;
bool ok;
u32 tmp;
@@ -3675,7 +3676,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
"======> %s channel=%d path=%d\n", __func__, channel,
path);
- start_time = ktime_get_ns();
+ start_time = ktime_get();
if (chan->band_type == RTW89_BAND_2G)
power = power_2g;
@@ -3802,12 +3803,12 @@ out:
rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak);
rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
- finish_time = ktime_get_ns();
- tssi_info->tssi_alimk_time += finish_time - start_time;
+ this_time = ktime_us_delta(ktime_get(), start_time);
+ tssi_info->tssi_alimk_time += this_time;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[TSSI PA K] %s processing time = %d ms\n", __func__,
- tssi_info->tssi_alimk_time);
+ "[TSSI PA K] %s processing time = %lld us (acc = %llu us)\n",
+ __func__, this_time, tssi_info->tssi_alimk_time);
}
void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index bc84b15e7826..08bcdf246382 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -12,6 +12,7 @@
#include "rtw8852c.h"
#include "rtw8852c_rfk.h"
#include "rtw8852c_table.h"
+#include "sar.h"
#include "util.h"
#define RTW8852C_FW_FORMAT_MAX 1
@@ -186,10 +187,17 @@ static const struct rtw89_edcca_regs rtw8852c_edcca_regs = {
.edcca_p_mask = B_EDCCA_LVL_MSK1,
.ppdu_level = R_SEG0R_EDCCA_LVL,
.ppdu_mask = B_EDCCA_LVL_MSK3,
- .rpt_a = R_EDCCA_RPT_A,
- .rpt_b = R_EDCCA_RPT_B,
- .rpt_sel = R_EDCCA_RPT_SEL,
- .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .p = {{
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ }, {
+ .rpt_a = R_EDCCA_RPT_P1_A,
+ .rpt_b = R_EDCCA_RPT_P1_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK,
+ }},
.tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
.tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
};
@@ -1853,10 +1861,16 @@ static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
rtw8852c_mcc_get_ch_info(rtwdev, phy_idx);
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8852c_rx_dck(rtwdev, phy_idx, false);
rtw8852c_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852c_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852c_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
rtw89_fw_h2c_rf_ntfy_mcc(rtwdev);
}
@@ -2867,6 +2881,7 @@ static int rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
static const struct rtw89_chanctx_listener rtw8852c_chanctx_listener = {
.callbacks[RTW89_CHANCTX_CALLBACK_RFK] = rtw8852c_rfk_chanctx_cb,
+ .callbacks[RTW89_CHANCTX_CALLBACK_TAS] = rtw89_tas_chanctx_cb,
};
#ifdef CONFIG_PM
@@ -2928,6 +2943,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
+ .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -2954,6 +2970,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.try_ce_fw = false,
.bbmcu_nr = 0,
.needed_fw_elms = 0,
+ .fw_blacklist = &rtw89_fw_blacklist_default,
.fifo_size = 458752,
.small_fifo_size = false,
.dle_scc_rsvd_size = 0,
@@ -2996,10 +3013,13 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
.support_ant_gain = true,
+ .support_tas = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
+ .rx_freq_frome_ie = false,
.hw_sec_hdr = true,
.hw_mgmt_tx_encrypt = true,
+ .hw_tkip_crypto = true,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 11d66bfceb15..8082592db84a 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -205,10 +205,17 @@ static const struct rtw89_edcca_regs rtw8922a_edcca_regs = {
.edcca_p_mask = B_EDCCA_LVL_MSK1,
.ppdu_level = R_SEG0R_PPDU_LVL_BE,
.ppdu_mask = B_EDCCA_LVL_MSK1,
- .rpt_a = R_EDCCA_RPT_A_BE,
- .rpt_b = R_EDCCA_RPT_B_BE,
- .rpt_sel = R_EDCCA_RPT_SEL_BE,
- .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .p = {{
+ .rpt_a = R_EDCCA_RPT_A_BE,
+ .rpt_b = R_EDCCA_RPT_B_BE,
+ .rpt_sel = R_EDCCA_RPT_SEL_BE,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ }, {
+ .rpt_a = R_EDCCA_RPT_P1_A_BE,
+ .rpt_b = R_EDCCA_RPT_P1_B_BE,
+ .rpt_sel = R_EDCCA_RPT_SEL_BE,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK,
+ }},
.rpt_sel_be = R_EDCCA_RPTREG_SEL_BE,
.rpt_sel_be_mask = B_EDCCA_RPTREG_SEL_BE_MSK,
.tx_collision_t2r_st = R_TX_COLLISION_T2R_ST_BE,
@@ -2149,6 +2156,56 @@ static void rtw8922a_set_txpwr_ref(struct rtw89_dev *rtwdev,
B_BE_PWR_REF_CTRL_CCK, ref_cck);
}
+static const struct rtw89_reg_def rtw8922a_txpwr_ref[][3] = {
+ {{ .addr = R_TXAGC_REF_DBM_P0, .mask = B_TXAGC_OFDM_REF_DBM_P0},
+ { .addr = R_TXAGC_REF_DBM_P0, .mask = B_TXAGC_CCK_REF_DBM_P0},
+ { .addr = R_TSSI_K_P0, .mask = B_TSSI_K_OFDM_P0}
+ },
+ {{ .addr = R_TXAGC_REF_DBM_RF1_P0, .mask = B_TXAGC_OFDM_REF_DBM_RF1_P0},
+ { .addr = R_TXAGC_REF_DBM_RF1_P0, .mask = B_TXAGC_CCK_REF_DBM_RF1_P0},
+ { .addr = R_TSSI_K_RF1_P0, .mask = B_TSSI_K_OFDM_RF1_P0}
+ },
+};
+
+static void rtw8922a_set_txpwr_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s16 pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan);
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ static const u32 path_ofst[] = {0x0, 0x100};
+ const struct rtw89_reg_def *txpwr_ref;
+ static const s16 tssi_k_base = 0x12;
+ s16 tssi_k_ofst = abs(pwr_ofst) + tssi_k_base;
+ s16 ofst_dec[RF_PATH_NUM_8922A];
+ s16 tssi_k[RF_PATH_NUM_8922A];
+ s16 pwr_ref_ofst;
+ s16 pwr_ref = 0;
+ u8 i;
+
+ if (rtwdev->hal.cv == CHIP_CAV)
+ pwr_ref = 16;
+
+ pwr_ref <<= chip->txpwr_factor_rf;
+ pwr_ref_ofst = pwr_ref - rtw89_phy_txpwr_bb_to_rf(rtwdev, abs(pwr_ofst));
+
+ ofst_dec[RF_PATH_A] = pwr_ofst > 0 ? pwr_ref : pwr_ref_ofst;
+ ofst_dec[RF_PATH_B] = pwr_ofst > 0 ? pwr_ref_ofst : pwr_ref;
+ tssi_k[RF_PATH_A] = pwr_ofst > 0 ? tssi_k_base : tssi_k_ofst;
+ tssi_k[RF_PATH_B] = pwr_ofst > 0 ? tssi_k_ofst : tssi_k_base;
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ txpwr_ref = rtw8922a_txpwr_ref[phy_idx];
+
+ rtw89_phy_write32_mask(rtwdev, txpwr_ref[0].addr + path_ofst[i],
+ txpwr_ref[0].mask, ofst_dec[i]);
+ rtw89_phy_write32_mask(rtwdev, txpwr_ref[1].addr + path_ofst[i],
+ txpwr_ref[1].mask, ofst_dec[i]);
+ rtw89_phy_write32_mask(rtwdev, txpwr_ref[2].addr + path_ofst[i],
+ txpwr_ref[2].mask, tssi_k[i]);
+ }
+}
+
static void rtw8922a_bb_tx_triangular(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
@@ -2185,6 +2242,8 @@ static void rtw8922a_set_txpwr(struct rtw89_dev *rtwdev,
rtw8922a_set_tx_shape(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+ rtw8922a_set_txpwr_diff(rtwdev, chan, phy_idx);
+ rtw8922a_set_txpwr_ref(rtwdev, phy_idx);
}
static void rtw8922a_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
@@ -2695,6 +2754,7 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl_g7,
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl_g7,
.h2c_ampdu_cmac_tbl = rtw89_fw_h2c_ampdu_cmac_tbl_g7,
+ .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl_g7,
.h2c_default_dmac_tbl = rtw89_fw_h2c_default_dmac_tbl_v2,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon_be,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam_v1,
@@ -2721,6 +2781,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.try_ce_fw = false,
.bbmcu_nr = 1,
.needed_fw_elms = RTW89_BE_GEN_DEF_NEEDED_FW_ELEMENTS,
+ .fw_blacklist = &rtw89_fw_blacklist_default,
.fifo_size = 589824,
.small_fifo_size = false,
.dle_scc_rsvd_size = 0,
@@ -2760,11 +2821,14 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
- .support_ant_gain = false,
+ .support_ant_gain = true,
+ .support_tas = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = false,
.hw_sec_hdr = true,
.hw_mgmt_tx_encrypt = true,
+ .hw_tkip_crypto = true,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index 871f45a6508c..0b5af9528702 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -7,10 +7,16 @@
#include "phy.h"
#include "reg.h"
#include "sar.h"
+#include "util.h"
#define RTW89_TAS_FACTOR 2 /* unit: 0.25 dBm */
+#define RTW89_TAS_SAR_GAP (1 << RTW89_TAS_FACTOR)
#define RTW89_TAS_DPR_GAP (1 << RTW89_TAS_FACTOR)
#define RTW89_TAS_DELTA (2 << RTW89_TAS_FACTOR)
+#define RTW89_TAS_TX_RATIO_THRESHOLD 70
+#define RTW89_TAS_DFLT_TX_RATIO 80
+#define RTW89_TAS_DPR_ON_OFFSET (RTW89_TAS_DELTA + RTW89_TAS_SAR_GAP)
+#define RTW89_TAS_DPR_OFF_OFFSET (4 << RTW89_TAS_FACTOR)
static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev,
u32 center_freq)
@@ -99,7 +105,7 @@ struct rtw89_sar_handler rtw89_sar_handlers[RTW89_SAR_SOURCE_NR] = {
typeof(_dev) _d = (_dev); \
BUILD_BUG_ON(!rtw89_sar_handlers[_s].descr_sar_source); \
BUILD_BUG_ON(!rtw89_sar_handlers[_s].query_sar_config); \
- lockdep_assert_held(&_d->mutex); \
+ lockdep_assert_wiphy(_d->hw->wiphy); \
_d->sar._cfg_name = *(_cfg_data); \
_d->sar.src = _s; \
} while (0)
@@ -117,8 +123,8 @@ static s8 rtw89_txpwr_sar_to_mac(struct rtw89_dev *rtwdev, u8 fct, s32 cfg)
RTW89_SAR_TXPWR_MAC_MAX);
}
-static s8 rtw89_txpwr_tas_to_sar(const struct rtw89_sar_handler *sar_hdl,
- s8 cfg)
+static s32 rtw89_txpwr_tas_to_sar(const struct rtw89_sar_handler *sar_hdl,
+ s32 cfg)
{
const u8 fct = sar_hdl->txpwr_factor_sar;
@@ -128,8 +134,8 @@ static s8 rtw89_txpwr_tas_to_sar(const struct rtw89_sar_handler *sar_hdl,
return cfg >> (RTW89_TAS_FACTOR - fct);
}
-static s8 rtw89_txpwr_sar_to_tas(const struct rtw89_sar_handler *sar_hdl,
- s8 cfg)
+static s32 rtw89_txpwr_sar_to_tas(const struct rtw89_sar_handler *sar_hdl,
+ s32 cfg)
{
const u8 fct = sar_hdl->txpwr_factor_sar;
@@ -139,18 +145,48 @@ static s8 rtw89_txpwr_sar_to_tas(const struct rtw89_sar_handler *sar_hdl,
return cfg << (RTW89_TAS_FACTOR - fct);
}
+static bool rtw89_tas_is_active(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ struct rtw89_vif *rtwvif;
+
+ if (!tas->enable)
+ return false;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (ieee80211_vif_is_mld(rtwvif_to_vif(rtwvif)))
+ return false;
+ }
+
+ return true;
+}
+
+static const char *rtw89_tas_state_str(enum rtw89_tas_state state)
+{
+ switch (state) {
+ case RTW89_TAS_STATE_DPR_OFF:
+ return "DPR OFF";
+ case RTW89_TAS_STATE_DPR_ON:
+ return "DPR ON";
+ case RTW89_TAS_STATE_STATIC_SAR:
+ return "STATIC SAR";
+ default:
+ return NULL;
+ }
+}
+
s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq)
{
const enum rtw89_sar_sources src = rtwdev->sar.src;
/* its members are protected by rtw89_sar_set_src() */
const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src];
struct rtw89_tas_info *tas = &rtwdev->tas;
- s8 delta;
+ s32 offset;
int ret;
s32 cfg;
u8 fct;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (src == RTW89_SAR_SOURCE_NONE)
return RTW89_SAR_TXPWR_MAC_MAX;
@@ -159,15 +195,17 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq)
if (ret)
return RTW89_SAR_TXPWR_MAC_MAX;
- if (tas->enable) {
+ if (rtw89_tas_is_active(rtwdev)) {
switch (tas->state) {
case RTW89_TAS_STATE_DPR_OFF:
- return RTW89_SAR_TXPWR_MAC_MAX;
+ offset = rtw89_txpwr_tas_to_sar(sar_hdl, RTW89_TAS_DPR_OFF_OFFSET);
+ cfg += offset;
+ break;
case RTW89_TAS_STATE_DPR_ON:
- delta = rtw89_txpwr_tas_to_sar(sar_hdl, tas->delta);
- cfg -= delta;
+ offset = rtw89_txpwr_tas_to_sar(sar_hdl, RTW89_TAS_DPR_ON_OFFSET);
+ cfg -= offset;
break;
- case RTW89_TAS_STATE_DPR_FORBID:
+ case RTW89_TAS_STATE_STATIC_SAR:
default:
break;
}
@@ -178,72 +216,91 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq)
return rtw89_txpwr_sar_to_mac(rtwdev, fct, cfg);
}
-void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev, u32 center_freq)
+int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ u32 center_freq)
{
const enum rtw89_sar_sources src = rtwdev->sar.src;
/* its members are protected by rtw89_sar_set_src() */
const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src];
const u8 fct_mac = rtwdev->chip->txpwr_factor_mac;
+ char *p = buf, *end = buf + bufsz;
int ret;
s32 cfg;
u8 fct;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (src == RTW89_SAR_SOURCE_NONE) {
- seq_puts(m, "no SAR is applied\n");
- return;
+ p += scnprintf(p, end - p, "no SAR is applied\n");
+ goto out;
}
- seq_printf(m, "source: %d (%s)\n", src, sar_hdl->descr_sar_source);
+ p += scnprintf(p, end - p, "source: %d (%s)\n", src,
+ sar_hdl->descr_sar_source);
ret = sar_hdl->query_sar_config(rtwdev, center_freq, &cfg);
if (ret) {
- seq_printf(m, "config: return code: %d\n", ret);
- seq_printf(m, "assign: max setting: %d (unit: 1/%lu dBm)\n",
- RTW89_SAR_TXPWR_MAC_MAX, BIT(fct_mac));
- return;
+ p += scnprintf(p, end - p, "config: return code: %d\n", ret);
+ p += scnprintf(p, end - p,
+ "assign: max setting: %d (unit: 1/%lu dBm)\n",
+ RTW89_SAR_TXPWR_MAC_MAX, BIT(fct_mac));
+ goto out;
}
fct = sar_hdl->txpwr_factor_sar;
- seq_printf(m, "config: %d (unit: 1/%lu dBm)\n", cfg, BIT(fct));
+ p += scnprintf(p, end - p, "config: %d (unit: 1/%lu dBm)\n", cfg,
+ BIT(fct));
+
+out:
+ return p - buf;
}
-void rtw89_print_tas(struct seq_file *m, struct rtw89_dev *rtwdev)
+int rtw89_print_tas(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_tas_info *tas = &rtwdev->tas;
+ char *p = buf, *end = buf + bufsz;
- if (!tas->enable) {
- seq_puts(m, "no TAS is applied\n");
- return;
+ if (!rtw89_tas_is_active(rtwdev)) {
+ p += scnprintf(p, end - p, "no TAS is applied\n");
+ goto out;
}
- seq_printf(m, "DPR gap: %d\n", tas->dpr_gap);
- seq_printf(m, "TAS delta: %d\n", tas->delta);
+ p += scnprintf(p, end - p, "State: %s\n",
+ rtw89_tas_state_str(tas->state));
+ p += scnprintf(p, end - p, "Average time: %d\n",
+ tas->window_size * 2);
+ p += scnprintf(p, end - p, "SAR gap: %d dBm\n",
+ RTW89_TAS_SAR_GAP >> RTW89_TAS_FACTOR);
+ p += scnprintf(p, end - p, "DPR gap: %d dBm\n",
+ RTW89_TAS_DPR_GAP >> RTW89_TAS_FACTOR);
+ p += scnprintf(p, end - p, "DPR ON offset: %d dBm\n",
+ RTW89_TAS_DPR_ON_OFFSET >> RTW89_TAS_FACTOR);
+ p += scnprintf(p, end - p, "DPR OFF offset: %d dBm\n",
+ RTW89_TAS_DPR_OFF_OFFSET >> RTW89_TAS_FACTOR);
+
+out:
+ return p - buf;
}
static int rtw89_apply_sar_common(struct rtw89_dev *rtwdev,
const struct rtw89_sar_cfg_common *sar)
{
enum rtw89_sar_sources src;
- int ret = 0;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
src = rtwdev->sar.src;
if (src != RTW89_SAR_SOURCE_NONE && src != RTW89_SAR_SOURCE_COMMON) {
rtw89_warn(rtwdev, "SAR source: %d is in use", src);
- ret = -EBUSY;
- goto exit;
+ return -EBUSY;
}
rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_COMMON, cfg_common, sar);
rtw89_core_set_chip_txpwr(rtwdev);
+ rtw89_tas_reset(rtwdev, false);
-exit:
- mutex_unlock(&rtwdev->mutex);
- return ret;
+ return 0;
}
static const struct cfg80211_sar_freq_ranges rtw89_common_sar_freq_ranges[] = {
@@ -279,6 +336,8 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
s32 power;
u32 i, idx;
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
if (sar->type != NL80211_SAR_TYPE_POWER)
return -EINVAL;
@@ -304,65 +363,174 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
return rtw89_apply_sar_common(rtwdev, &sar_common);
}
-static void rtw89_tas_state_update(struct rtw89_dev *rtwdev)
+static bool rtw89_tas_query_sar_config(struct rtw89_dev *rtwdev, s32 *cfg)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
const enum rtw89_sar_sources src = rtwdev->sar.src;
/* its members are protected by rtw89_sar_set_src() */
const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src];
- struct rtw89_tas_info *tas = &rtwdev->tas;
- s32 txpwr_avg = tas->total_txpwr / RTW89_TAS_MAX_WINDOW / PERCENT;
- s32 dpr_on_threshold, dpr_off_threshold, cfg;
- enum rtw89_tas_state state = tas->state;
- const struct rtw89_chan *chan;
int ret;
- lockdep_assert_held(&rtwdev->mutex);
-
if (src == RTW89_SAR_SOURCE_NONE)
- return;
+ return false;
- chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
- ret = sar_hdl->query_sar_config(rtwdev, chan->freq, &cfg);
+ ret = sar_hdl->query_sar_config(rtwdev, chan->freq, cfg);
if (ret)
+ return false;
+
+ *cfg = rtw89_txpwr_sar_to_tas(sar_hdl, *cfg);
+
+ return true;
+}
+
+static void rtw89_tas_state_update(struct rtw89_dev *rtwdev,
+ enum rtw89_tas_state state)
+{
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+
+ if (tas->state == state)
return;
- cfg = rtw89_txpwr_sar_to_tas(sar_hdl, cfg);
+ rtw89_debug(rtwdev, RTW89_DBG_SAR, "tas: switch state: %s -> %s\n",
+ rtw89_tas_state_str(tas->state), rtw89_tas_state_str(state));
- if (tas->delta >= cfg) {
+ tas->state = state;
+ rtw89_core_set_chip_txpwr(rtwdev);
+}
+
+static u32 rtw89_tas_get_window_size(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ u8 band = chan->band_type;
+ u8 regd = rtw89_regd_get(rtwdev, band);
+
+ switch (regd) {
+ default:
rtw89_debug(rtwdev, RTW89_DBG_SAR,
- "TAS delta exceed SAR limit\n");
- state = RTW89_TAS_STATE_DPR_FORBID;
- goto out;
+ "tas: regd: %u is unhandled\n", regd);
+ fallthrough;
+ case RTW89_IC:
+ case RTW89_KCC:
+ return 180;
+ case RTW89_FCC:
+ switch (band) {
+ case RTW89_BAND_2G:
+ return 50;
+ case RTW89_BAND_5G:
+ return 30;
+ case RTW89_BAND_6G:
+ default:
+ return 15;
+ }
+ break;
+ }
+}
+
+static void rtw89_tas_window_update(struct rtw89_dev *rtwdev)
+{
+ u32 window_size = rtw89_tas_get_window_size(rtwdev);
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ u64 total_txpwr = 0;
+ u8 head_idx;
+ u32 i, j;
+
+ WARN_ON_ONCE(tas->window_size > RTW89_TAS_TXPWR_WINDOW);
+
+ if (tas->window_size == window_size)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR, "tas: window update: %u -> %u\n",
+ tas->window_size, window_size);
+
+ head_idx = (tas->txpwr_tail_idx - window_size + 1 + RTW89_TAS_TXPWR_WINDOW) %
+ RTW89_TAS_TXPWR_WINDOW;
+ for (i = 0; i < window_size; i++) {
+ j = (head_idx + i) % RTW89_TAS_TXPWR_WINDOW;
+ total_txpwr += tas->txpwr_history[j];
+ }
+
+ tas->window_size = window_size;
+ tas->total_txpwr = total_txpwr;
+ tas->txpwr_head_idx = head_idx;
+}
+
+static void rtw89_tas_history_update(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_bb_ctx *bb = rtw89_get_bb_ctx(rtwdev, RTW89_PHY_0);
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ u8 tx_ratio = env->ifs_clm_tx_ratio;
+ u64 instant_txpwr, txpwr;
+
+ /* txpwr in unit of linear(mW) multiply by percentage */
+ if (tx_ratio == 0) {
+ /* special case: idle tx power
+ * use -40 dBm * 100 tx ratio
+ */
+ instant_txpwr = rtw89_db_to_linear(-40);
+ txpwr = instant_txpwr * 100;
+ } else {
+ instant_txpwr = tas->instant_txpwr;
+ txpwr = instant_txpwr * tx_ratio;
}
- dpr_on_threshold = cfg;
- dpr_off_threshold = cfg - tas->dpr_gap;
+ tas->total_txpwr += txpwr - tas->txpwr_history[tas->txpwr_head_idx];
+ tas->total_tx_ratio += tx_ratio - tas->tx_ratio_history[tas->tx_ratio_idx];
+ tas->tx_ratio_history[tas->tx_ratio_idx] = tx_ratio;
+
+ tas->txpwr_head_idx = (tas->txpwr_head_idx + 1) % RTW89_TAS_TXPWR_WINDOW;
+ tas->txpwr_tail_idx = (tas->txpwr_tail_idx + 1) % RTW89_TAS_TXPWR_WINDOW;
+ tas->tx_ratio_idx = (tas->tx_ratio_idx + 1) % RTW89_TAS_TX_RATIO_WINDOW;
+ tas->txpwr_history[tas->txpwr_tail_idx] = txpwr;
+
rtw89_debug(rtwdev, RTW89_DBG_SAR,
- "DPR_ON thold: %d, DPR_OFF thold: %d, txpwr_avg: %d\n",
- dpr_on_threshold, dpr_off_threshold, txpwr_avg);
+ "tas: instant_txpwr: %d, tx_ratio: %u, txpwr: %d\n",
+ rtw89_linear_to_db_quarter(instant_txpwr), tx_ratio,
+ rtw89_linear_to_db_quarter(div_u64(txpwr, PERCENT)));
+}
- if (txpwr_avg >= dpr_on_threshold)
+static void rtw89_tas_rolling_average(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ s32 dpr_on_threshold, dpr_off_threshold;
+ enum rtw89_tas_state state;
+ u16 tx_ratio_avg;
+ s32 txpwr_avg;
+ u64 linear;
+
+ linear = DIV_ROUND_DOWN_ULL(tas->total_txpwr, tas->window_size * PERCENT);
+ txpwr_avg = rtw89_linear_to_db_quarter(linear);
+ tx_ratio_avg = tas->total_tx_ratio / RTW89_TAS_TX_RATIO_WINDOW;
+ dpr_on_threshold = tas->dpr_on_threshold;
+ dpr_off_threshold = tas->dpr_off_threshold;
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "tas: DPR_ON: %d, DPR_OFF: %d, txpwr_avg: %d, tx_ratio_avg: %u\n",
+ dpr_on_threshold, dpr_off_threshold, txpwr_avg, tx_ratio_avg);
+
+ if (tx_ratio_avg >= RTW89_TAS_TX_RATIO_THRESHOLD)
+ state = RTW89_TAS_STATE_STATIC_SAR;
+ else if (txpwr_avg >= dpr_on_threshold)
state = RTW89_TAS_STATE_DPR_ON;
else if (txpwr_avg < dpr_off_threshold)
state = RTW89_TAS_STATE_DPR_OFF;
-
-out:
- if (tas->state == state)
+ else
return;
- rtw89_debug(rtwdev, RTW89_DBG_SAR,
- "TAS old state: %d, new state: %d\n", tas->state, state);
- tas->state = state;
- rtw89_core_set_chip_txpwr(rtwdev);
+ rtw89_tas_state_update(rtwdev, state);
}
void rtw89_tas_init(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_tas_info *tas = &rtwdev->tas;
struct rtw89_acpi_dsm_result res = {};
int ret;
u8 val;
+ if (!chip->support_tas)
+ return;
+
ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_TAS_EN, &res);
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_SAR,
@@ -386,64 +554,116 @@ void rtw89_tas_init(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_SAR, "TAS not enable\n");
return;
}
-
- tas->dpr_gap = RTW89_TAS_DPR_GAP;
- tas->delta = RTW89_TAS_DELTA;
}
-void rtw89_tas_reset(struct rtw89_dev *rtwdev)
+void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
struct rtw89_tas_info *tas = &rtwdev->tas;
+ u64 linear;
+ s32 cfg;
+ int i;
- if (!tas->enable)
+ if (!rtw89_tas_is_active(rtwdev))
+ return;
+
+ if (!rtw89_tas_query_sar_config(rtwdev, &cfg))
return;
- memset(&tas->txpwr_history, 0, sizeof(tas->txpwr_history));
- tas->total_txpwr = 0;
- tas->cur_idx = 0;
+ tas->dpr_on_threshold = cfg - RTW89_TAS_SAR_GAP;
+ tas->dpr_off_threshold = cfg - RTW89_TAS_SAR_GAP - RTW89_TAS_DPR_GAP;
+
+ /* avoid history reset after new SAR apply */
+ if (!force && tas->keep_history)
+ return;
+
+ linear = rtw89_db_quarter_to_linear(cfg) * RTW89_TAS_DFLT_TX_RATIO;
+ for (i = 0; i < RTW89_TAS_TXPWR_WINDOW; i++)
+ tas->txpwr_history[i] = linear;
+
+ for (i = 0; i < RTW89_TAS_TX_RATIO_WINDOW; i++)
+ tas->tx_ratio_history[i] = RTW89_TAS_DFLT_TX_RATIO;
+
+ tas->total_tx_ratio = RTW89_TAS_DFLT_TX_RATIO * RTW89_TAS_TX_RATIO_WINDOW;
+ tas->total_txpwr = linear * RTW89_TAS_TXPWR_WINDOW;
+ tas->window_size = RTW89_TAS_TXPWR_WINDOW;
+ tas->txpwr_head_idx = 0;
+ tas->txpwr_tail_idx = RTW89_TAS_TXPWR_WINDOW - 1;
+ tas->tx_ratio_idx = 0;
tas->state = RTW89_TAS_STATE_DPR_OFF;
-}
+ tas->backup_state = RTW89_TAS_STATE_DPR_OFF;
+ tas->keep_history = true;
-static const struct rtw89_reg_def txpwr_regs[] = {
- {R_PATH0_TXPWR, B_PATH0_TXPWR},
- {R_PATH1_TXPWR, B_PATH1_TXPWR},
-};
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "tas: band: %u, freq: %u\n", chan->band_type, chan->freq);
+}
void rtw89_tas_track(struct rtw89_dev *rtwdev)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
- const enum rtw89_sar_sources src = rtwdev->sar.src;
- u8 max_nss_num = rtwdev->chip->rf_path_num;
struct rtw89_tas_info *tas = &rtwdev->tas;
- s16 tmp, txpwr, instant_txpwr = 0;
- u32 val;
- int i;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ s32 cfg;
- if (!tas->enable || src == RTW89_SAR_SOURCE_NONE)
+ if (hal->disabled_dm_bitmap & BIT(RTW89_DM_TAS))
return;
- if (env->ccx_watchdog_result != RTW89_PHY_ENV_MON_IFS_CLM)
+ if (!rtw89_tas_is_active(rtwdev))
return;
- for (i = 0; i < max_nss_num; i++) {
- val = rtw89_phy_read32_mask(rtwdev, txpwr_regs[i].addr,
- txpwr_regs[i].mask);
- tmp = sign_extend32(val, 8);
- if (tmp <= 0)
- return;
- instant_txpwr += tmp;
+ if (!rtw89_tas_query_sar_config(rtwdev, &cfg) || tas->block_regd) {
+ rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR);
+ return;
}
- instant_txpwr /= max_nss_num;
- /* in unit of 0.25 dBm multiply by percentage */
- txpwr = instant_txpwr * env->ifs_clm_tx_ratio;
- tas->total_txpwr += txpwr - tas->txpwr_history[tas->cur_idx];
- tas->txpwr_history[tas->cur_idx] = txpwr;
- rtw89_debug(rtwdev, RTW89_DBG_SAR,
- "instant_txpwr: %d, tx_ratio: %d, txpwr: %d\n",
- instant_txpwr, env->ifs_clm_tx_ratio, txpwr);
+ if (tas->pause)
+ return;
- tas->cur_idx = (tas->cur_idx + 1) % RTW89_TAS_MAX_WINDOW;
+ rtw89_tas_window_update(rtwdev);
+ rtw89_tas_history_update(rtwdev);
+ rtw89_tas_rolling_average(rtwdev);
+}
- rtw89_tas_state_update(rtwdev);
+void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start)
+{
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ s32 cfg;
+
+ if (!rtw89_tas_is_active(rtwdev))
+ return;
+
+ if (!rtw89_tas_query_sar_config(rtwdev, &cfg))
+ return;
+
+ if (start) {
+ tas->backup_state = tas->state;
+ rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR);
+ } else {
+ rtw89_tas_state_update(rtwdev, tas->backup_state);
+ }
+}
+
+void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_state state)
+{
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ s32 cfg;
+
+ if (!rtw89_tas_is_active(rtwdev))
+ return;
+
+ if (!rtw89_tas_query_sar_config(rtwdev, &cfg))
+ return;
+
+ switch (state) {
+ case RTW89_CHANCTX_STATE_MCC_START:
+ tas->pause = true;
+ rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR);
+ break;
+ case RTW89_CHANCTX_STATE_MCC_STOP:
+ tas->pause = false;
+ break;
+ default:
+ break;
+ }
}
+EXPORT_SYMBOL(rtw89_tas_chanctx_cb);
diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h
index 4ae081d2d3b4..0df1661db9a8 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.h
+++ b/drivers/net/wireless/realtek/rtw89/sar.h
@@ -19,12 +19,16 @@ struct rtw89_sar_handler {
extern const struct cfg80211_sar_capa rtw89_sar_capa;
s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq);
-void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev, u32 center_freq);
-void rtw89_print_tas(struct seq_file *m, struct rtw89_dev *rtwdev);
+int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ u32 center_freq);
+int rtw89_print_tas(struct rtw89_dev *rtwdev, char *buf, size_t bufsz);
int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
void rtw89_tas_init(struct rtw89_dev *rtwdev);
-void rtw89_tas_reset(struct rtw89_dev *rtwdev);
+void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force);
void rtw89_tas_track(struct rtw89_dev *rtwdev);
+void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start);
+void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_state state);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 26a944d3b672..0740e303680c 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -156,9 +156,9 @@ static void ser_state_run(struct rtw89_ser *ser, u8 evt)
rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
ser_st_name(ser), ser_ev_name(ser, evt));
- mutex_lock(&rtwdev->mutex);
+ wiphy_lock(rtwdev->hw->wiphy);
rtw89_leave_lps(rtwdev);
- mutex_unlock(&rtwdev->mutex);
+ wiphy_unlock(rtwdev->hw->wiphy);
ser->st_tbl[ser->state].st_func(ser, evt);
}
@@ -483,10 +483,13 @@ static void ser_l1_reset_pre_st_hdl(struct rtw89_ser *ser, u8 evt)
static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
{
struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
switch (evt) {
case SER_EV_STATE_IN:
- cancel_delayed_work_sync(&rtwdev->track_work);
+ wiphy_lock(wiphy);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work);
+ wiphy_unlock(wiphy);
drv_stop_tx(ser);
if (hal_stop_dma(ser)) {
@@ -517,8 +520,8 @@ static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
hal_enable_dma(ser);
drv_resume_rx(ser);
drv_resume_tx(ser);
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
- RTW89_TRACK_WORK_PERIOD);
+ wiphy_delayed_work_queue(wiphy, &rtwdev->track_work,
+ RTW89_TRACK_WORK_PERIOD);
break;
default:
@@ -708,9 +711,9 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
switch (evt) {
case SER_EV_STATE_IN:
- mutex_lock(&rtwdev->mutex);
+ wiphy_lock(rtwdev->hw->wiphy);
ser_l2_reset_st_pre_hdl(ser);
- mutex_unlock(&rtwdev->mutex);
+ wiphy_unlock(rtwdev->hw->wiphy);
ieee80211_restart_hw(rtwdev->hw);
ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT);
diff --git a/drivers/net/wireless/realtek/rtw89/util.c b/drivers/net/wireless/realtek/rtw89/util.c
index e71956ce9853..073714db26f2 100644
--- a/drivers/net/wireless/realtek/rtw89/util.c
+++ b/drivers/net/wireless/realtek/rtw89/util.c
@@ -4,103 +4,159 @@
#include "util.h"
-#define FRAC_ROWS 3
-#define FRAC_ROW_MAX (FRAC_ROWS - 1)
-#define NORM_ROW_MIN FRAC_ROWS
-
-static const u32 db_invert_table[12][8] = {
- /* rows 0~2 in unit of U(32,3) */
- {10, 13, 16, 20, 25, 32, 40, 50},
- {64, 80, 101, 128, 160, 201, 256, 318},
- {401, 505, 635, 800, 1007, 1268, 1596, 2010},
- /* rows 3~11 in unit of U(32,0) */
- {316, 398, 501, 631, 794, 1000, 1259, 1585},
- {1995, 2512, 3162, 3981, 5012, 6310, 7943, 10000},
- {12589, 15849, 19953, 25119, 31623, 39811, 50119, 63098},
- {79433, 100000, 125893, 158489, 199526, 251189, 316228, 398107},
- {501187, 630957, 794328, 1000000, 1258925, 1584893, 1995262, 2511886},
- {3162278, 3981072, 5011872, 6309573, 7943282, 1000000, 12589254,
- 15848932},
- {19952623, 25118864, 31622777, 39810717, 50118723, 63095734, 79432823,
- 100000000},
- {125892541, 158489319, 199526232, 251188643, 316227766, 398107171,
- 501187234, 630957345},
- {794328235, 1000000000, 1258925412, 1584893192, 1995262315, 2511886432U,
- 3162277660U, 3981071706U},
+#define RTW89_DBM_QUARTER_FACTOR 2
+#define RTW89_MIN_DBM (-41.25 * (1 << RTW89_DBM_QUARTER_FACTOR))
+#define RTW89_MAX_DBM (96 * (1 << RTW89_DBM_QUARTER_FACTOR))
+#define RTW89_DB_INVERT_TABLE_OFFSET (-RTW89_MIN_DBM)
+
+static const u64 db_invert_table[] = {
+ /* in unit of 0.000001 */
+ 75, 79, 84, 89, 94, 100, 106, 112, 119, 126, 133, 141, 150, 158, 168, 178, 188,
+ 200, 211, 224, 237, 251, 266, 282, 299, 316, 335, 355, 376, 398, 422, 447, 473,
+ 501, 531, 562, 596, 631, 668, 708, 750, 794, 841, 891, 944, 1000, 1059, 1122, 1189,
+ 1259, 1334, 1413, 1496, 1585, 1679, 1778, 1884, 1995, 2113, 2239, 2371, 2512, 2661,
+ 2818, 2985, 3162, 3350, 3548, 3758, 3981, 4217, 4467, 4732, 5012, 5309, 5623, 5957,
+ 6310, 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, 10593, 11220, 11885, 12589,
+ 13335, 14125, 14962, 15849, 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
+ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, 42170, 44668, 47315, 50119,
+ 53088, 56234, 59566, 63096, 66834, 70795, 74989, 79433, 84140, 89125, 94406, 100000,
+ 105925, 112202, 118850, 125893, 133352, 141254, 149624, 158489, 167880, 177828,
+ 188365, 199526, 211349, 223872, 237137, 251189, 266073, 281838, 298538, 316228,
+ 334965, 354813, 375837, 398107, 421697, 446684, 473151, 501187, 530884, 562341,
+ 595662, 630957, 668344, 707946, 749894, 794328, 841395, 891251, 944061, 1000000,
+ 1059254, 1122018, 1188502, 1258925, 1333521, 1412538, 1496236, 1584893, 1678804,
+ 1778279, 1883649, 1995262, 2113489, 2238721, 2371374, 2511886, 2660725, 2818383,
+ 2985383, 3162278, 3349654, 3548134, 3758374, 3981072, 4216965, 4466836, 4731513,
+ 5011872, 5308844, 5623413, 5956621, 6309573, 6683439, 7079458, 7498942, 7943282,
+ 8413951, 8912509, 9440609, 10000000, 10592537, 11220185, 11885022, 12589254,
+ 13335214, 14125375, 14962357, 15848932, 16788040, 17782794, 18836491, 19952623,
+ 21134890, 22387211, 23713737, 25118864, 26607251, 28183829, 29853826, 31622777,
+ 33496544, 35481339, 37583740, 39810717, 42169650, 44668359, 47315126, 50118723,
+ 53088444, 56234133, 59566214, 63095734, 66834392, 70794578, 74989421, 79432823,
+ 84139514, 89125094, 94406088, 100000000, 105925373, 112201845, 118850223, 125892541,
+ 133352143, 141253754, 149623566, 158489319, 167880402, 177827941, 188364909, 199526231,
+ 211348904, 223872114, 237137371, 251188643, 266072506, 281838293, 298538262, 316227766,
+ 334965439, 354813389, 375837404, 398107171, 421696503, 446683592, 473151259, 501187234,
+ 530884444, 562341325, 595662144, 630957344, 668343918, 707945784, 749894209, 794328235,
+ 841395142, 891250938, 944060876, 1000000000, 1059253725, 1122018454, 1188502227,
+ 1258925412, 1333521432, 1412537545, 1496235656, 1584893192, 1678804018, 1778279410,
+ 1883649089, 1995262315, 2113489040, 2238721139, 2371373706, 2511886432, 2660725060,
+ 2818382931, 2985382619, 3162277660, 3349654392, 3548133892, 3758374043, 3981071706,
+ 4216965034, 4466835922ULL, 4731512590ULL, 5011872336ULL, 5308844442ULL, 5623413252ULL,
+ 5956621435ULL, 6309573445ULL, 6683439176ULL, 7079457844ULL, 7498942093ULL,
+ 7943282347ULL, 8413951416ULL, 8912509381ULL, 9440608763ULL, 10000000000ULL,
+ 10592537252ULL, 11220184543ULL, 11885022274ULL, 12589254118ULL, 13335214322ULL,
+ 14125375446ULL, 14962356561ULL, 15848931925ULL, 16788040181ULL, 17782794100ULL,
+ 18836490895ULL, 19952623150ULL, 21134890398ULL, 22387211386ULL, 23713737057ULL,
+ 25118864315ULL, 26607250598ULL, 28183829313ULL, 29853826189ULL, 31622776602ULL,
+ 33496543916ULL, 35481338923ULL, 37583740429ULL, 39810717055ULL, 42169650343ULL,
+ 44668359215ULL, 47315125896ULL, 50118723363ULL, 53088444423ULL, 56234132519ULL,
+ 59566214353ULL, 63095734448ULL, 66834391757ULL, 70794578438ULL, 74989420933ULL,
+ 79432823472ULL, 84139514165ULL, 89125093813ULL, 94406087629ULL, 100000000000ULL,
+ 105925372518ULL, 112201845430ULL, 118850222744ULL, 125892541179ULL, 133352143216ULL,
+ 141253754462ULL, 149623565609ULL, 158489319246ULL, 167880401812ULL, 177827941004ULL,
+ 188364908949ULL, 199526231497ULL, 211348903984ULL, 223872113857ULL, 237137370566ULL,
+ 251188643151ULL, 266072505980ULL, 281838293126ULL, 298538261892ULL, 316227766017ULL,
+ 334965439158ULL, 354813389234ULL, 375837404288ULL, 398107170553ULL, 421696503429ULL,
+ 446683592151ULL, 473151258961ULL, 501187233627ULL, 530884444231ULL, 562341325190ULL,
+ 595662143529ULL, 630957344480ULL, 668343917569ULL, 707945784384ULL, 749894209332ULL,
+ 794328234724ULL, 841395141645ULL, 891250938134ULL, 944060876286ULL, 1000000000000ULL,
+ 1059253725177ULL, 1122018454302ULL, 1188502227437ULL, 1258925411794ULL,
+ 1333521432163ULL, 1412537544623ULL, 1496235656094ULL, 1584893192461ULL,
+ 1678804018123ULL, 1778279410039ULL, 1883649089490ULL, 1995262314969ULL,
+ 2113489039837ULL, 2238721138568ULL, 2371373705662ULL, 2511886431510ULL,
+ 2660725059799ULL, 2818382931264ULL, 2985382618918ULL, 3162277660168ULL,
+ 3349654391578ULL, 3548133892336ULL, 3758374042884ULL, 3981071705535ULL,
+ 4216965034286ULL, 4466835921510ULL, 4731512589615ULL, 5011872336273ULL,
+ 5308844442310ULL, 5623413251904ULL, 5956621435290ULL, 6309573444802ULL,
+ 6683439175686ULL, 7079457843841ULL, 7498942093325ULL, 7943282347243ULL,
+ 8413951416452ULL, 8912509381337ULL, 9440608762859ULL, 10000000000000ULL,
+ 10592537251773ULL, 11220184543020ULL, 11885022274370ULL, 12589254117942ULL,
+ 13335214321633ULL, 14125375446228ULL, 14962356560944ULL, 15848931924611ULL,
+ 16788040181226ULL, 17782794100389ULL, 18836490894898ULL, 19952623149689ULL,
+ 21134890398367ULL, 22387211385683ULL, 23713737056617ULL, 25118864315096ULL,
+ 26607250597988ULL, 28183829312645ULL, 29853826189180ULL, 31622776601684ULL,
+ 33496543915783ULL, 35481338923358ULL, 37583740428845ULL, 39810717055350ULL,
+ 42169650342858ULL, 44668359215096ULL, 47315125896148ULL, 50118723362727ULL,
+ 53088444423099ULL, 56234132519035ULL, 59566214352901ULL, 63095734448019ULL,
+ 66834391756862ULL, 70794578438414ULL, 74989420933246ULL, 79432823472428ULL,
+ 84139514164520ULL, 89125093813375ULL, 94406087628593ULL, 100000000000000ULL,
+ 105925372517729ULL, 112201845430197ULL, 118850222743702ULL, 125892541179417ULL,
+ 133352143216332ULL, 141253754462276ULL, 149623565609444ULL, 158489319246111ULL,
+ 167880401812256ULL, 177827941003893ULL, 188364908948981ULL, 199526231496888ULL,
+ 211348903983664ULL, 223872113856834ULL, 237137370566166ULL, 251188643150958ULL,
+ 266072505979882ULL, 281838293126446ULL, 298538261891796ULL, 316227766016838ULL,
+ 334965439157829ULL, 354813389233577ULL, 375837404288444ULL, 398107170553497ULL,
+ 421696503428583ULL, 446683592150964ULL, 473151258961482ULL, 501187233627272ULL,
+ 530884444230989ULL, 562341325190350ULL, 595662143529011ULL, 630957344480196ULL,
+ 668343917568615ULL, 707945784384138ULL, 749894209332456ULL, 794328234724284ULL,
+ 841395141645198ULL, 891250938133745ULL, 944060876285923ULL, 1000000000000000ULL,
+ 1059253725177290ULL, 1122018454301970ULL, 1188502227437020ULL, 1258925411794170ULL,
+ 1333521432163330ULL, 1412537544622760ULL, 1496235656094440ULL, 1584893192461110ULL,
+ 1678804018122560ULL, 1778279410038920ULL, 1883649089489810ULL, 1995262314968890ULL,
+ 2113489039836650ULL, 2238721138568340ULL, 2371373705661660ULL, 2511886431509590ULL,
+ 2660725059798820ULL, 2818382931264460ULL, 2985382618917960ULL, 3162277660168380ULL,
+ 3349654391578280ULL, 3548133892335770ULL, 3758374042884440ULL, 3981071705534970ULL
};
-u32 rtw89_linear_2_db(u64 val)
+s32 rtw89_linear_to_db_quarter(u64 val)
{
- u8 i, j;
- u32 dB;
-
- for (i = 0; i < 12; i++) {
- for (j = 0; j < 8; j++) {
- if (i <= FRAC_ROW_MAX &&
- (val << RTW89_LINEAR_FRAC_BITS) <= db_invert_table[i][j])
- goto cnt;
- else if (i > FRAC_ROW_MAX && val <= db_invert_table[i][j])
- goto cnt;
- }
- }
+ int r = ARRAY_SIZE(db_invert_table) - 1;
+ int l = 0;
+ int m;
- return 96; /* maximum 96 dB */
+ while (l <= r) {
+ m = l + (r - l) / 2;
-cnt:
- /* special cases */
- if (j == 0 && i == 0)
- goto end;
+ if (db_invert_table[m] == val)
+ return m - (s32)RTW89_DB_INVERT_TABLE_OFFSET;
- if (i == NORM_ROW_MIN && j == 0) {
- if (db_invert_table[NORM_ROW_MIN][0] - val >
- val - (db_invert_table[FRAC_ROW_MAX][7] >> RTW89_LINEAR_FRAC_BITS)) {
- i = FRAC_ROW_MAX;
- j = 7;
- }
- goto end;
+ if (db_invert_table[m] > val)
+ r = m - 1;
+ else
+ l = m + 1;
}
- if (i <= FRAC_ROW_MAX)
- val <<= RTW89_LINEAR_FRAC_BITS;
-
- /* compare difference to get precise dB */
- if (j == 0) {
- if (db_invert_table[i][j] - val >
- val - db_invert_table[i - 1][7]) {
- i--;
- j = 7;
- }
- } else {
- if (db_invert_table[i][j] - val >
- val - db_invert_table[i][j - 1]) {
- j--;
- }
- }
-end:
- dB = (i << 3) + j + 1;
+ if (l >= ARRAY_SIZE(db_invert_table))
+ return RTW89_MAX_DBM;
+ else if (r < 0)
+ return RTW89_MIN_DBM;
+ else if (val - db_invert_table[r] <= db_invert_table[l] - val)
+ return r - (s32)RTW89_DB_INVERT_TABLE_OFFSET;
+ else
+ return l - (s32)RTW89_DB_INVERT_TABLE_OFFSET;
+}
+EXPORT_SYMBOL(rtw89_linear_to_db_quarter);
- return dB;
+s32 rtw89_linear_to_db(u64 val)
+{
+ return rtw89_linear_to_db_quarter(val) >> RTW89_DBM_QUARTER_FACTOR;
}
-EXPORT_SYMBOL(rtw89_linear_2_db);
+EXPORT_SYMBOL(rtw89_linear_to_db);
-u64 rtw89_db_2_linear(u32 db)
+u64 rtw89_db_quarter_to_linear(s32 db)
{
- u64 linear;
- u8 i, j;
+ /* supported range -41.25 to 96 dBm, in unit of 0.25 dBm */
+ db = clamp_t(s32, db, RTW89_MIN_DBM, RTW89_MAX_DBM);
+ db += (s32)RTW89_DB_INVERT_TABLE_OFFSET;
- if (db > 96)
- db = 96;
- else if (db < 1)
- return 1;
+ return db_invert_table[db];
+}
+EXPORT_SYMBOL(rtw89_db_quarter_to_linear);
- i = (db - 1) >> 3;
- j = (db - 1) & 0x7;
+u64 rtw89_db_to_linear(s32 db)
+{
+ return rtw89_db_quarter_to_linear(db << RTW89_DBM_QUARTER_FACTOR);
+}
+EXPORT_SYMBOL(rtw89_db_to_linear);
- linear = db_invert_table[i][j];
+void rtw89_might_trailing_ellipsis(char *buf, size_t size, ssize_t used)
+{
+ static const char ellipsis[] = "...";
- if (i >= NORM_ROW_MIN)
- linear = linear << RTW89_LINEAR_FRAC_BITS;
+ /* length of null terminiator isn't included in 'used' */
+ if (used + 1 < size || size < sizeof(ellipsis))
+ return;
- return linear;
+ memcpy(buf + size - sizeof(ellipsis), ellipsis, sizeof(ellipsis));
}
-EXPORT_SYMBOL(rtw89_db_2_linear);
diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h
index e669544cafd3..bd08495301e4 100644
--- a/drivers/net/wireless/realtek/rtw89/util.h
+++ b/drivers/net/wireless/realtek/rtw89/util.h
@@ -6,13 +6,11 @@
#include "core.h"
-#define RTW89_LINEAR_FRAC_BITS 3
-
#define rtw89_iterate_vifs_bh(rtwdev, iterator, data) \
ieee80211_iterate_active_interfaces_atomic((rtwdev)->hw, \
IEEE80211_IFACE_ITER_NORMAL, iterator, data)
-/* call this function with rtwdev->mutex is held */
+/* call this function with wiphy mutex is held */
#define rtw89_for_each_rtwvif(rtwdev, rtwvif) \
list_for_each_entry(rtwvif, &(rtwdev)->rtwvifs_list, list)
@@ -25,7 +23,7 @@ static inline bool rtw89_rtwvif_in_list(struct rtw89_dev *rtwdev,
{
struct rtw89_vif *rtwvif;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
rtw89_for_each_rtwvif(rtwdev, rtwvif)
if (rtwvif == new)
@@ -75,7 +73,10 @@ static inline void ether_addr_copy_mask(u8 *dst, const u8 *src, u8 mask)
}
}
-u32 rtw89_linear_2_db(u64 linear);
-u64 rtw89_db_2_linear(u32 db);
+s32 rtw89_linear_to_db_quarter(u64 val);
+s32 rtw89_linear_to_db(u64 val);
+u64 rtw89_db_quarter_to_linear(s32 db);
+u64 rtw89_db_to_linear(s32 db);
+void rtw89_might_trailing_ellipsis(char *buf, size_t size, ssize_t used);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 01754d031bb4..17eee58503cb 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -604,6 +604,8 @@ static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev,
struct ieee80211_key_conf *key;
u8 sz;
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
cipher_info = rtw89_cipher_alg_recognize(cipher);
sz = struct_size(rekey_conf, key, cipher_info->len);
rekey_conf = kmalloc(sz, GFP_KERNEL);
@@ -616,15 +618,10 @@ static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev,
memcpy(rekey_conf->key, gtk,
flex_array_size(rekey_conf, key, cipher_info->len));
- /* ieee80211_gtk_rekey_add() will call set_key(), therefore we
- * need to unlock mutex
- */
- mutex_unlock(&rtwdev->mutex);
if (ieee80211_vif_is_mld(wow_vif))
key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, rtwvif_link->link_id);
else
key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1);
- mutex_lock(&rtwdev->mutex);
kfree(rekey_conf);
if (IS_ERR(key)) {
diff --git a/drivers/net/wireless/silabs/wfx/bus.h b/drivers/net/wireless/silabs/wfx/bus.h
index ccadfdd6873c..79edaef20881 100644
--- a/drivers/net/wireless/silabs/wfx/bus.h
+++ b/drivers/net/wireless/silabs/wfx/bus.h
@@ -28,6 +28,7 @@ struct wfx_hwbus_ops {
void (*lock)(void *bus_priv);
void (*unlock)(void *bus_priv);
size_t (*align_size)(void *bus_priv, size_t size);
+ void (*set_wakeup)(void *priv, bool enabled);
};
extern struct sdio_driver wfx_sdio_driver;
diff --git a/drivers/net/wireless/silabs/wfx/bus_sdio.c b/drivers/net/wireless/silabs/wfx/bus_sdio.c
index f290eecde773..ab0793b9908f 100644
--- a/drivers/net/wireless/silabs/wfx/bus_sdio.c
+++ b/drivers/net/wireless/silabs/wfx/bus_sdio.c
@@ -14,6 +14,7 @@
#include <linux/of_irq.h>
#include <linux/irq.h>
#include <linux/align.h>
+#include <linux/pm.h>
#include "bus.h"
#include "wfx.h"
@@ -172,6 +173,13 @@ static size_t wfx_sdio_align_size(void *priv, size_t size)
return sdio_align_size(bus->func, size);
}
+static void wfx_sdio_set_wakeup(void *priv, bool enabled)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ device_set_wakeup_enable(&bus->func->dev, enabled);
+}
+
static const struct wfx_hwbus_ops wfx_sdio_hwbus_ops = {
.copy_from_io = wfx_sdio_copy_from_io,
.copy_to_io = wfx_sdio_copy_to_io,
@@ -180,6 +188,7 @@ static const struct wfx_hwbus_ops wfx_sdio_hwbus_ops = {
.lock = wfx_sdio_lock,
.unlock = wfx_sdio_unlock,
.align_size = wfx_sdio_align_size,
+ .set_wakeup = wfx_sdio_set_wakeup,
};
static const struct of_device_id wfx_sdio_of_match[] = {
@@ -191,9 +200,48 @@ static const struct of_device_id wfx_sdio_of_match[] = {
};
MODULE_DEVICE_TABLE(of, wfx_sdio_of_match);
+static int wfx_sdio_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
+ int ret;
+
+ if (!device_may_wakeup(dev))
+ return 0;
+
+ flush_work(&bus->core->hif.bh);
+ /* Either "wakeup-source" attribute or out-of-band IRQ is required for
+ * WoWLAN
+ */
+ if (bus->of_irq) {
+ ret = enable_irq_wake(bus->of_irq);
+ if (ret)
+ return ret;
+ } else {
+ ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+ if (ret)
+ return ret;
+ }
+ return sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+}
+
+static int wfx_sdio_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
+
+ if (!device_may_wakeup(dev))
+ return 0;
+ if (bus->of_irq)
+ return disable_irq_wake(bus->of_irq);
+ else
+ return 0;
+}
+
static int wfx_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
{
const struct wfx_platform_data *pdata = of_device_get_match_data(&func->dev);
+ mmc_pm_flag_t pm_flag = sdio_get_host_pm_caps(func);
struct device_node *np = func->dev.of_node;
struct wfx_sdio_priv *bus;
int ret;
@@ -235,6 +283,9 @@ static int wfx_sdio_probe(struct sdio_func *func, const struct sdio_device_id *i
if (ret)
goto sdio_release;
+ if (pm_flag & MMC_PM_KEEP_POWER)
+ device_set_wakeup_capable(&func->dev, true);
+
return 0;
sdio_release:
@@ -261,6 +312,8 @@ static const struct sdio_device_id wfx_sdio_ids[] = {
};
MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids);
+static DEFINE_SIMPLE_DEV_PM_OPS(wfx_sdio_pm_ops, wfx_sdio_suspend, wfx_sdio_resume);
+
struct sdio_driver wfx_sdio_driver = {
.name = "wfx-sdio",
.id_table = wfx_sdio_ids,
@@ -268,5 +321,6 @@ struct sdio_driver wfx_sdio_driver = {
.remove = wfx_sdio_remove,
.drv = {
.of_match_table = wfx_sdio_of_match,
+ .pm = &wfx_sdio_pm_ops,
}
};
diff --git a/drivers/net/wireless/silabs/wfx/bus_spi.c b/drivers/net/wireless/silabs/wfx/bus_spi.c
index 160b90114aad..45ee19e1ecbf 100644
--- a/drivers/net/wireless/silabs/wfx/bus_spi.c
+++ b/drivers/net/wireless/silabs/wfx/bus_spi.c
@@ -13,6 +13,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of.h>
+#include <linux/pm.h>
#include "bus.h"
#include "wfx.h"
@@ -179,6 +180,13 @@ static size_t wfx_spi_align_size(void *priv, size_t size)
return ALIGN(size, 4);
}
+static void wfx_spi_set_wakeup(void *priv, bool enabled)
+{
+ struct wfx_spi_priv *bus = priv;
+
+ device_set_wakeup_enable(&bus->func->dev, enabled);
+}
+
static const struct wfx_hwbus_ops wfx_spi_hwbus_ops = {
.copy_from_io = wfx_spi_copy_from_io,
.copy_to_io = wfx_spi_copy_to_io,
@@ -187,8 +195,29 @@ static const struct wfx_hwbus_ops wfx_spi_hwbus_ops = {
.lock = wfx_spi_lock,
.unlock = wfx_spi_unlock,
.align_size = wfx_spi_align_size,
+ .set_wakeup = wfx_spi_set_wakeup,
};
+static int wfx_spi_suspend(struct device *dev)
+{
+ struct spi_device *func = to_spi_device(dev);
+ struct wfx_spi_priv *bus = spi_get_drvdata(func);
+
+ if (!device_may_wakeup(dev))
+ return 0;
+ flush_work(&bus->core->hif.bh);
+ return enable_irq_wake(func->irq);
+}
+
+static int wfx_spi_resume(struct device *dev)
+{
+ struct spi_device *func = to_spi_device(dev);
+
+ if (!device_may_wakeup(dev))
+ return 0;
+ return disable_irq_wake(func->irq);
+}
+
static int wfx_spi_probe(struct spi_device *func)
{
struct wfx_platform_data *pdata;
@@ -239,7 +268,12 @@ static int wfx_spi_probe(struct spi_device *func)
if (!bus->core)
return -EIO;
- return wfx_probe(bus->core);
+ ret = wfx_probe(bus->core);
+ if (ret)
+ return ret;
+
+ device_set_wakeup_capable(&func->dev, true);
+ return 0;
}
static void wfx_spi_remove(struct spi_device *func)
@@ -273,12 +307,15 @@ static const struct of_device_id wfx_spi_of_match[] = {
MODULE_DEVICE_TABLE(of, wfx_spi_of_match);
#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(wfx_spi_pm_ops, wfx_spi_suspend, wfx_spi_resume);
+
struct spi_driver wfx_spi_driver = {
+ .id_table = wfx_spi_id,
+ .probe = wfx_spi_probe,
+ .remove = wfx_spi_remove,
.driver = {
.name = "wfx-spi",
.of_match_table = of_match_ptr(wfx_spi_of_match),
+ .pm = &wfx_spi_pm_ops,
},
- .id_table = wfx_spi_id,
- .probe = wfx_spi_probe,
- .remove = wfx_spi_remove,
};
diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c
index 64441c8bc460..a61128debbad 100644
--- a/drivers/net/wireless/silabs/wfx/main.c
+++ b/drivers/net/wireless/silabs/wfx/main.c
@@ -121,6 +121,12 @@ static const struct ieee80211_iface_combination wfx_iface_combinations[] = {
}
};
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support wfx_wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT,
+};
+#endif
+
static const struct ieee80211_ops wfx_ops = {
.start = wfx_start,
.stop = wfx_stop,
@@ -153,6 +159,11 @@ static const struct ieee80211_ops wfx_ops = {
.unassign_vif_chanctx = wfx_unassign_vif_chanctx,
.remain_on_channel = wfx_remain_on_channel,
.cancel_remain_on_channel = wfx_cancel_remain_on_channel,
+#ifdef CONFIG_PM
+ .suspend = wfx_suspend,
+ .resume = wfx_resume,
+ .set_wakeup = wfx_set_wakeup,
+#endif
};
bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
@@ -289,6 +300,9 @@ struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_da
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
+#ifdef CONFIG_PM
+ hw->wiphy->wowlan = &wfx_wowlan_support;
+#endif
hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->wiphy->max_remain_on_channel_duration = 5000;
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index 7c04810dbf3d..e95b9ded17d9 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -10,6 +10,7 @@
#include "sta.h"
#include "wfx.h"
+#include "bus.h"
#include "fwio.h"
#include "bh.h"
#include "key.h"
@@ -803,6 +804,30 @@ void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
}
+#ifdef CONFIG_PM
+int wfx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+ /* FIXME: hardware also support WIPHY_WOWLAN_MAGIC_PKT and other filters */
+ if (!wowlan->any || !wowlan->disconnect)
+ return -EINVAL;
+ return 0;
+}
+
+int wfx_resume(struct ieee80211_hw *hw)
+{
+ return 0;
+}
+
+void wfx_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct wfx_dev *wdev = hw->priv;
+
+ if (enabled)
+ dev_info(wdev->dev, "support for WoWLAN is experimental\n");
+ wdev->hwbus_ops->set_wakeup(wdev->hwbus_priv, enabled);
+}
+#endif
+
int wfx_start(struct ieee80211_hw *hw)
{
return 0;
diff --git a/drivers/net/wireless/silabs/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h
index 7817c7c6f3dd..8702eed5267f 100644
--- a/drivers/net/wireless/silabs/wfx/sta.h
+++ b/drivers/net/wireless/silabs/wfx/sta.h
@@ -56,6 +56,9 @@ int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *conf);
+int wfx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+int wfx_resume(struct ieee80211_hw *hw);
+void wfx_set_wakeup(struct ieee80211_hw *hw, bool enabled);
/* Hardware API Callbacks */
void wfx_cooling_timeout_work(struct work_struct *work);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index cf6a331d4042..cf3e976471c6 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -4,7 +4,7 @@
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2024 Intel Corporation
+ * Copyright (C) 2018 - 2025 Intel Corporation
*/
/*
@@ -1983,11 +1983,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return;
}
- if (sta && sta->mlo) {
- if (WARN_ON(!link_sta)) {
- ieee80211_free_txskb(hw, skb);
- return;
- }
+ /* Do address translations only between shared links. It is
+ * possible that while an non-AP MLD station and an AP MLD
+ * station have shared links, the frame is intended to be sent
+ * on a link which is not shared (for example when sending a
+ * probe response).
+ */
+ if (sta && sta->mlo && link_sta) {
/* address translation to link addresses on TX */
ether_addr_copy(hdr->addr1, link_sta->addr);
ether_addr_copy(hdr->addr2, bss_conf->addr);
@@ -5345,6 +5347,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, TDLS_WIDER_BW);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(hw, STRICT);
if (param->mlo) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
@@ -5548,10 +5551,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
for (i = 0; i < ARRAY_SIZE(data->link_data); i++) {
- hrtimer_init(&data->link_data[i].beacon_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS_SOFT);
- data->link_data[i].beacon_timer.function =
- mac80211_hwsim_beacon;
+ hrtimer_setup(&data->link_data[i].beacon_timer, mac80211_hwsim_beacon,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS_SOFT);
data->link_data[i].link_id = i;
}
diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c
index 4ee374080466..1fffeff2190c 100644
--- a/drivers/net/wireless/virtual/virt_wifi.c
+++ b/drivers/net/wireless/virtual/virt_wifi.c
@@ -146,7 +146,7 @@ static void virt_wifi_inform_bss(struct wiphy *wiphy)
static const struct {
u8 tag;
u8 len;
- u8 ssid[8];
+ u8 ssid[8] __nonstring;
} __packed ssid = {
.tag = WLAN_EID_SSID,
.len = VIRT_WIFI_SSID_LEN,
@@ -519,11 +519,13 @@ static rx_handler_result_t virt_wifi_rx_handler(struct sk_buff **pskb)
}
/* Called with rtnl lock held. */
-static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int virt_wifi_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **tb = params->tb;
int err;
if (!tb[IFLA_LINK])
@@ -532,7 +534,7 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
netif_carrier_off(dev);
priv->upperdev = dev;
- priv->lowerdev = __dev_get_by_index(src_net,
+ priv->lowerdev = __dev_get_by_index(link_net,
nla_get_u32(tb[IFLA_LINK]));
if (!priv->lowerdev)
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index f90c33d19b39..9653dbaac3c0 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -21,7 +21,7 @@
struct zd_reg_alpha2_map {
u32 reg;
- char alpha2[2];
+ char alpha2[2] __nonstring;
};
static struct zd_reg_alpha2_map reg_alpha2_map[] = {
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index 829515a601b3..530a3ea47a1a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -1381,24 +1381,20 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
/* The phase is set to power off. */
ipc_imem->phase = IPC_P_OFF;
- hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
+ hrtimer_setup(&ipc_imem->startup_timer, ipc_imem_startup_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
+ hrtimer_setup(&ipc_imem->tdupdate_timer, ipc_imem_td_update_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
+ hrtimer_setup(&ipc_imem->fast_update_timer, ipc_imem_fast_update_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
+ hrtimer_setup(&ipc_imem->td_alloc_timer, ipc_imem_td_alloc_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
+ hrtimer_setup(&ipc_imem->adb_timer, ipc_imem_adb_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
if (ipc_imem_config(ipc_imem)) {
dev_err(ipc_imem->dev, "failed to initialize the imem");
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index 7a9c09cd4fdc..6a7a26085fc7 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -41,6 +41,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <net/gro.h>
#include "t7xx_dpmaif.h"
#include "t7xx_hif_dpmaif.h"
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 02f2ec7cf4ce..8bf63f2dcbbf 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -32,7 +32,6 @@
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_wakeup.h>
#include <linux/spinlock.h>
#include "t7xx_mhccif.h"
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index a51e2755991a..63a47d420bc5 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -967,15 +967,18 @@ out:
return dev;
}
-static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int wwan_rtnl_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
- u32 link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]);
struct wwan_netdev_priv *priv = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ u32 link_id;
int ret;
+ link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]);
+
if (IS_ERR(wwandev))
return PTR_ERR(wwandev);
@@ -1061,6 +1064,11 @@ static void wwan_create_default_link(struct wwan_device *wwandev,
{
struct nlattr *tb[IFLA_MAX + 1], *linkinfo[IFLA_INFO_MAX + 1];
struct nlattr *data[IFLA_WWAN_MAX + 1];
+ struct rtnl_newlink_params params = {
+ .src_net = &init_net,
+ .tb = tb,
+ .data = data,
+ };
struct net_device *dev;
struct nlmsghdr *nlh;
struct sk_buff *msg;
@@ -1105,7 +1113,7 @@ static void wwan_create_default_link(struct wwan_device *wwandev,
if (WARN_ON(IS_ERR(dev)))
goto unlock;
- if (WARN_ON(wwan_rtnl_newlink(&init_net, dev, tb, data, NULL))) {
+ if (WARN_ON(wwan_rtnl_newlink(dev, &params, NULL))) {
free_netdev(dev);
goto unlock;
}
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 8aeca7914050..1c1c74f4ff2d 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -284,8 +284,7 @@ static struct pp_ctx *pp_create_data(struct ntb_dev *ntb)
pp->ntb = ntb;
atomic_set(&pp->count, 0);
spin_lock_init(&pp->lock);
- hrtimer_init(&pp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pp->timer.function = pp_timer_func;
+ hrtimer_setup(&pp->timer, pp_timer_func, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
return pp;
}
diff --git a/drivers/nvdimm/badrange.c b/drivers/nvdimm/badrange.c
index a002ea6fdd84..ee478ccde7c6 100644
--- a/drivers/nvdimm/badrange.c
+++ b/drivers/nvdimm/badrange.c
@@ -167,7 +167,7 @@ static void set_badblock(struct badblocks *bb, sector_t s, int num)
dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
(u64) s * 512, (u64) num * 512);
/* this isn't an error as the hardware will still throw an exception */
- if (badblocks_set(bb, s, num, 1))
+ if (!badblocks_set(bb, s, num, 1))
dev_info_once(bb->dev, "%s: failed for sector %llx\n",
__func__, (u64) s);
}
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 5ca06e9a2d29..cc5c8f3f81e8 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -673,7 +673,7 @@ static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
{
if (bb->count) {
sector_t first_bad;
- int num_bad;
+ sector_t num_bad;
return !!badblocks_check(bb, sector, len / 512, &first_bad,
&num_bad);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index cfdfe0eaa512..8f3e816e805d 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -367,9 +367,10 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
struct nd_namespace_common *ndns = nd_pfn->ndns;
void *zero_page = page_address(ZERO_PAGE(0));
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
- int num_bad, meta_num, rc, bb_present;
+ int meta_num, rc, bb_present;
sector_t first_bad, meta_start;
struct nd_namespace_io *nsio;
+ sector_t num_bad;
if (nd_pfn->mode != PFN_MODE_PMEM)
return 0;
@@ -394,7 +395,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
bb_present = badblocks_check(&nd_region->bb, meta_start,
meta_num, &first_bad, &num_bad);
if (bb_present) {
- dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
+ dev_dbg(&nd_pfn->dev, "meta: %llx badblocks at %llx\n",
num_bad, first_bad);
nsoff = ALIGN_DOWN((nd_region->ndr_start
+ (first_bad << 9)) - nsio->res.start,
@@ -413,7 +414,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
}
if (rc) {
dev_err(&nd_pfn->dev,
- "error clearing %x badblocks at %llx\n",
+ "error clearing %llx badblocks at %llx\n",
num_bad, first_bad);
return rc;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 785b2d2dbc82..aa50006b7616 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -249,7 +249,7 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
unsigned int num = PFN_PHYS(nr_pages) >> SECTOR_SHIFT;
struct badblocks *bb = &pmem->bb;
sector_t first_bad;
- int num_bad;
+ sector_t num_bad;
if (kaddr)
*kaddr = pmem->virt_addr + offset;
diff --git a/drivers/nvme/common/Kconfig b/drivers/nvme/common/Kconfig
index 244432e0b73d..da963e4f3f1f 100644
--- a/drivers/nvme/common/Kconfig
+++ b/drivers/nvme/common/Kconfig
@@ -12,3 +12,4 @@ config NVME_AUTH
select CRYPTO_SHA512
select CRYPTO_DH
select CRYPTO_DH_RFC7919_GROUPS
+ select CRYPTO_HKDF
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index 9b7126e1a19d..2c092ec8c0a9 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -11,9 +11,12 @@
#include <linux/unaligned.h>
#include <crypto/hash.h>
#include <crypto/dh.h>
+#include <crypto/hkdf.h>
#include <linux/nvme.h>
#include <linux/nvme-auth.h>
+#define HKDF_MAX_HASHLEN 64
+
static u32 nvme_dhchap_seqnum;
static DEFINE_MUTEX(nvme_dhchap_mutex);
@@ -471,5 +474,339 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
}
EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
+/**
+ * nvme_auth_generate_psk - Generate a PSK for TLS
+ * @hmac_id: Hash function identifier
+ * @skey: Session key
+ * @skey_len: Length of @skey
+ * @c1: Value of challenge C1
+ * @c2: Value of challenge C2
+ * @hash_len: Hash length of the hash algorithm
+ * @ret_psk: Pointer too the resulting generated PSK
+ * @ret_len: length of @ret_psk
+ *
+ * Generate a PSK for TLS as specified in NVMe base specification, section
+ * 8.13.5.9: Generated PSK for TLS
+ *
+ * The generated PSK for TLS shall be computed applying the HMAC function
+ * using the hash function H( ) selected by the HashID parameter in the
+ * DH-HMAC-CHAP_Challenge message with the session key KS as key to the
+ * concatenation of the two challenges C1 and C2 (i.e., generated
+ * PSK = HMAC(KS, C1 || C2)).
+ *
+ * Returns 0 on success with a valid generated PSK pointer in @ret_psk and
+ * the length of @ret_psk in @ret_len, or a negative error number otherwise.
+ */
+int nvme_auth_generate_psk(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *c1, u8 *c2, size_t hash_len, u8 **ret_psk, size_t *ret_len)
+{
+ struct crypto_shash *tfm;
+ SHASH_DESC_ON_STACK(shash, tfm);
+ u8 *psk;
+ const char *hmac_name;
+ int ret, psk_len;
+
+ if (!c1 || !c2)
+ return -EINVAL;
+
+ hmac_name = nvme_auth_hmac_name(hmac_id);
+ if (!hmac_name) {
+ pr_warn("%s: invalid hash algorithm %d\n",
+ __func__, hmac_id);
+ return -EINVAL;
+ }
+
+ tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ psk_len = crypto_shash_digestsize(tfm);
+ psk = kzalloc(psk_len, GFP_KERNEL);
+ if (!psk) {
+ ret = -ENOMEM;
+ goto out_free_tfm;
+ }
+
+ shash->tfm = tfm;
+ ret = crypto_shash_setkey(tfm, skey, skey_len);
+ if (ret)
+ goto out_free_psk;
+
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out_free_psk;
+
+ ret = crypto_shash_update(shash, c1, hash_len);
+ if (ret)
+ goto out_free_psk;
+
+ ret = crypto_shash_update(shash, c2, hash_len);
+ if (ret)
+ goto out_free_psk;
+
+ ret = crypto_shash_final(shash, psk);
+ if (!ret) {
+ *ret_psk = psk;
+ *ret_len = psk_len;
+ }
+
+out_free_psk:
+ if (ret)
+ kfree_sensitive(psk);
+out_free_tfm:
+ crypto_free_shash(tfm);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_generate_psk);
+
+/**
+ * nvme_auth_generate_digest - Generate TLS PSK digest
+ * @hmac_id: Hash function identifier
+ * @psk: Generated input PSK
+ * @psk_len: Length of @psk
+ * @subsysnqn: NQN of the subsystem
+ * @hostnqn: NQN of the host
+ * @ret_digest: Pointer to the returned digest
+ *
+ * Generate a TLS PSK digest as specified in TP8018 Section 3.6.1.3:
+ * TLS PSK and PSK identity Derivation
+ *
+ * The PSK digest shall be computed by encoding in Base64 (refer to RFC
+ * 4648) the result of the application of the HMAC function using the hash
+ * function specified in item 4 above (ie the hash function of the cipher
+ * suite associated with the PSK identity) with the PSK as HMAC key to the
+ * concatenation of:
+ * - the NQN of the host (i.e., NQNh) not including the null terminator;
+ * - a space character;
+ * - the NQN of the NVM subsystem (i.e., NQNc) not including the null
+ * terminator;
+ * - a space character; and
+ * - the seventeen ASCII characters "NVMe-over-Fabrics"
+ * (i.e., <PSK digest> = Base64(HMAC(PSK, NQNh || " " || NQNc || " " ||
+ * "NVMe-over-Fabrics"))).
+ * The length of the PSK digest depends on the hash function used to compute
+ * it as follows:
+ * - If the SHA-256 hash function is used, the resulting PSK digest is 44
+ * characters long; or
+ * - If the SHA-384 hash function is used, the resulting PSK digest is 64
+ * characters long.
+ *
+ * Returns 0 on success with a valid digest pointer in @ret_digest, or a
+ * negative error number on failure.
+ */
+int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len,
+ char *subsysnqn, char *hostnqn, u8 **ret_digest)
+{
+ struct crypto_shash *tfm;
+ SHASH_DESC_ON_STACK(shash, tfm);
+ u8 *digest, *enc;
+ const char *hmac_name;
+ size_t digest_len, hmac_len;
+ int ret;
+
+ if (WARN_ON(!subsysnqn || !hostnqn))
+ return -EINVAL;
+
+ hmac_name = nvme_auth_hmac_name(hmac_id);
+ if (!hmac_name) {
+ pr_warn("%s: invalid hash algorithm %d\n",
+ __func__, hmac_id);
+ return -EINVAL;
+ }
+
+ switch (nvme_auth_hmac_hash_len(hmac_id)) {
+ case 32:
+ hmac_len = 44;
+ break;
+ case 48:
+ hmac_len = 64;
+ break;
+ default:
+ pr_warn("%s: invalid hash algorithm '%s'\n",
+ __func__, hmac_name);
+ return -EINVAL;
+ }
+
+ enc = kzalloc(hmac_len + 1, GFP_KERNEL);
+ if (!enc)
+ return -ENOMEM;
+
+ tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto out_free_enc;
+ }
+
+ digest_len = crypto_shash_digestsize(tfm);
+ digest = kzalloc(digest_len, GFP_KERNEL);
+ if (!digest) {
+ ret = -ENOMEM;
+ goto out_free_tfm;
+ }
+
+ shash->tfm = tfm;
+ ret = crypto_shash_setkey(tfm, psk, psk_len);
+ if (ret)
+ goto out_free_digest;
+
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out_free_digest;
+
+ ret = crypto_shash_update(shash, hostnqn, strlen(hostnqn));
+ if (ret)
+ goto out_free_digest;
+
+ ret = crypto_shash_update(shash, " ", 1);
+ if (ret)
+ goto out_free_digest;
+
+ ret = crypto_shash_update(shash, subsysnqn, strlen(subsysnqn));
+ if (ret)
+ goto out_free_digest;
+
+ ret = crypto_shash_update(shash, " NVMe-over-Fabrics", 18);
+ if (ret)
+ goto out_free_digest;
+
+ ret = crypto_shash_final(shash, digest);
+ if (ret)
+ goto out_free_digest;
+
+ ret = base64_encode(digest, digest_len, enc);
+ if (ret < hmac_len) {
+ ret = -ENOKEY;
+ goto out_free_digest;
+ }
+ *ret_digest = enc;
+ ret = 0;
+
+out_free_digest:
+ kfree_sensitive(digest);
+out_free_tfm:
+ crypto_free_shash(tfm);
+out_free_enc:
+ if (ret)
+ kfree_sensitive(enc);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_generate_digest);
+
+/**
+ * nvme_auth_derive_tls_psk - Derive TLS PSK
+ * @hmac_id: Hash function identifier
+ * @psk: generated input PSK
+ * @psk_len: size of @psk
+ * @psk_digest: TLS PSK digest
+ * @ret_psk: Pointer to the resulting TLS PSK
+ *
+ * Derive a TLS PSK as specified in TP8018 Section 3.6.1.3:
+ * TLS PSK and PSK identity Derivation
+ *
+ * The TLS PSK shall be derived as follows from an input PSK
+ * (i.e., either a retained PSK or a generated PSK) and a PSK
+ * identity using the HKDF-Extract and HKDF-Expand-Label operations
+ * (refer to RFC 5869 and RFC 8446) where the hash function is the
+ * one specified by the hash specifier of the PSK identity:
+ * 1. PRK = HKDF-Extract(0, Input PSK); and
+ * 2. TLS PSK = HKDF-Expand-Label(PRK, "nvme-tls-psk", PskIdentityContext, L),
+ * where PskIdentityContext is the hash identifier indicated in
+ * the PSK identity concatenated to a space character and to the
+ * Base64 PSK digest (i.e., "<hash> <PSK digest>") and L is the
+ * output size in bytes of the hash function (i.e., 32 for SHA-256
+ * and 48 for SHA-384).
+ *
+ * Returns 0 on success with a valid psk pointer in @ret_psk or a negative
+ * error number otherwise.
+ */
+int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
+ u8 *psk_digest, u8 **ret_psk)
+{
+ struct crypto_shash *hmac_tfm;
+ const char *hmac_name;
+ const char *psk_prefix = "tls13 nvme-tls-psk";
+ static const char default_salt[HKDF_MAX_HASHLEN];
+ size_t info_len, prk_len;
+ char *info;
+ unsigned char *prk, *tls_key;
+ int ret;
+
+ hmac_name = nvme_auth_hmac_name(hmac_id);
+ if (!hmac_name) {
+ pr_warn("%s: invalid hash algorithm %d\n",
+ __func__, hmac_id);
+ return -EINVAL;
+ }
+ if (hmac_id == NVME_AUTH_HASH_SHA512) {
+ pr_warn("%s: unsupported hash algorithm %s\n",
+ __func__, hmac_name);
+ return -EINVAL;
+ }
+
+ hmac_tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(hmac_tfm))
+ return PTR_ERR(hmac_tfm);
+
+ prk_len = crypto_shash_digestsize(hmac_tfm);
+ prk = kzalloc(prk_len, GFP_KERNEL);
+ if (!prk) {
+ ret = -ENOMEM;
+ goto out_free_shash;
+ }
+
+ if (WARN_ON(prk_len > HKDF_MAX_HASHLEN)) {
+ ret = -EINVAL;
+ goto out_free_prk;
+ }
+ ret = hkdf_extract(hmac_tfm, psk, psk_len,
+ default_salt, prk_len, prk);
+ if (ret)
+ goto out_free_prk;
+
+ ret = crypto_shash_setkey(hmac_tfm, prk, prk_len);
+ if (ret)
+ goto out_free_prk;
+
+ /*
+ * 2 addtional bytes for the length field from HDKF-Expand-Label,
+ * 2 addtional bytes for the HMAC ID, and one byte for the space
+ * separator.
+ */
+ info_len = strlen(psk_digest) + strlen(psk_prefix) + 5;
+ info = kzalloc(info_len + 1, GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out_free_prk;
+ }
+
+ put_unaligned_be16(psk_len, info);
+ memcpy(info + 2, psk_prefix, strlen(psk_prefix));
+ sprintf(info + 2 + strlen(psk_prefix), "%02d %s", hmac_id, psk_digest);
+
+ tls_key = kzalloc(psk_len, GFP_KERNEL);
+ if (!tls_key) {
+ ret = -ENOMEM;
+ goto out_free_info;
+ }
+ ret = hkdf_expand(hmac_tfm, info, info_len, tls_key, psk_len);
+ if (ret) {
+ kfree(tls_key);
+ goto out_free_info;
+ }
+ *ret_psk = tls_key;
+
+out_free_info:
+ kfree(info);
+out_free_prk:
+ kfree(prk);
+out_free_shash:
+ crypto_free_shash(hmac_tfm);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_derive_tls_psk);
+
MODULE_DESCRIPTION("NVMe Authentication framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/common/keyring.c b/drivers/nvme/common/keyring.c
index ed5167f942d8..32d16c53133b 100644
--- a/drivers/nvme/common/keyring.c
+++ b/drivers/nvme/common/keyring.c
@@ -5,7 +5,6 @@
#include <linux/module.h>
#include <linux/seq_file.h>
-#include <linux/key.h>
#include <linux/key-type.h>
#include <keys/user-type.h>
#include <linux/nvme.h>
@@ -124,6 +123,70 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring,
return key_ref_to_ptr(keyref);
}
+/**
+ * nvme_tls_psk_refresh - Refresh TLS PSK
+ * @keyring: Keyring holding the TLS PSK
+ * @hostnqn: Host NQN to use
+ * @subnqn: Subsystem NQN to use
+ * @hmac_id: Hash function identifier
+ * @data: TLS PSK key material
+ * @data_len: Length of @data
+ * @digest: TLS PSK digest
+ *
+ * Refresh a generated version 1 TLS PSK with the identity generated
+ * from @hmac_id, @hostnqn, @subnqn, and @digest in the keyring given
+ * by @keyring.
+ *
+ * Returns the updated key success or an error pointer otherwise.
+ */
+struct key *nvme_tls_psk_refresh(struct key *keyring,
+ const char *hostnqn, const char *subnqn, u8 hmac_id,
+ u8 *data, size_t data_len, const char *digest)
+{
+ key_perm_t keyperm =
+ KEY_POS_SEARCH | KEY_POS_VIEW | KEY_POS_READ |
+ KEY_POS_WRITE | KEY_POS_LINK | KEY_POS_SETATTR |
+ KEY_USR_SEARCH | KEY_USR_VIEW | KEY_USR_READ;
+ char *identity;
+ key_ref_t keyref;
+ key_serial_t keyring_id;
+ struct key *key;
+
+ if (!hostnqn || !subnqn || !data || !data_len)
+ return ERR_PTR(-EINVAL);
+
+ identity = kasprintf(GFP_KERNEL, "NVMe1G%02d %s %s %s",
+ hmac_id, hostnqn, subnqn, digest);
+ if (!identity)
+ return ERR_PTR(-ENOMEM);
+
+ if (!keyring)
+ keyring = nvme_keyring;
+ keyring_id = key_serial(keyring);
+ pr_debug("keyring %x refresh tls psk '%s'\n",
+ keyring_id, identity);
+ keyref = key_create_or_update(make_key_ref(keyring, true),
+ "psk", identity, data, data_len,
+ keyperm, KEY_ALLOC_NOT_IN_QUOTA |
+ KEY_ALLOC_BUILT_IN |
+ KEY_ALLOC_BYPASS_RESTRICTION);
+ if (IS_ERR(keyref)) {
+ pr_debug("refresh tls psk '%s' failed, error %ld\n",
+ identity, PTR_ERR(keyref));
+ kfree(identity);
+ return ERR_PTR(-ENOKEY);
+ }
+ kfree(identity);
+ /*
+ * Set the default timeout to 1 hour
+ * as suggested in TP8018.
+ */
+ key = key_ref_to_ptr(keyref);
+ key_set_timeout(key, 3600);
+ return key;
+}
+EXPORT_SYMBOL_GPL(nvme_tls_psk_refresh);
+
/*
* NVMe PSK priority list
*
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 486afe598184..10e453b2436e 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -109,7 +109,7 @@ config NVME_HOST_AUTH
bool "NVMe over Fabrics In-Band Authentication in host side"
depends on NVME_CORE
select NVME_AUTH
- select NVME_KEYRING if NVME_TCP_TLS
+ select NVME_KEYRING
help
This provides support for NVMe over Fabrics In-Band Authentication in
host side.
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index a060f69558e7..b1fddfa33ab9 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -221,7 +221,7 @@ static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
return APPLE_ANS_MAX_QUEUE_DEPTH;
}
-static void apple_nvme_rtkit_crashed(void *cookie)
+static void apple_nvme_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
{
struct apple_nvme *anv = cookie;
@@ -525,7 +525,7 @@ static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
if (!iod->sg)
return BLK_STS_RESOURCE;
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
- iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
+ iod->nents = blk_rq_map_sg(req, iod->sg);
if (!iod->nents)
goto out_free_sg;
@@ -599,7 +599,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
}
if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
- !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+ !blk_mq_add_to_batch(req, iob,
+ nvme_req(req)->status != NVME_SC_SUCCESS,
apple_nvme_complete_batch))
apple_nvme_complete_rq(req);
}
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 5ea0e21709da..6115fef74c1e 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -12,6 +12,7 @@
#include "nvme.h"
#include "fabrics.h"
#include <linux/nvme-auth.h>
+#include <linux/nvme-keyring.h>
#define CHAP_BUF_SIZE 4096
static struct kmem_cache *nvme_chap_buf_cache;
@@ -131,7 +132,13 @@ static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
data->auth_type = NVME_AUTH_COMMON_MESSAGES;
data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
data->t_id = cpu_to_le16(chap->transaction);
- data->sc_c = 0; /* No secure channel concatenation */
+ if (ctrl->opts->concat && chap->qid == 0) {
+ if (ctrl->opts->tls_key)
+ data->sc_c = NVME_AUTH_SECP_REPLACETLSPSK;
+ else
+ data->sc_c = NVME_AUTH_SECP_NEWTLSPSK;
+ } else
+ data->sc_c = NVME_AUTH_SECP_NOSC;
data->napd = 1;
data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
data->auth_protocol[0].dhchap.halen = 3;
@@ -311,8 +318,9 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
data->hl = chap->hash_len;
data->dhvlen = cpu_to_le16(chap->host_key_len);
memcpy(data->rval, chap->response, chap->hash_len);
- if (ctrl->ctrl_key) {
+ if (ctrl->ctrl_key)
chap->bi_directional = true;
+ if (ctrl->ctrl_key || ctrl->opts->concat) {
get_random_bytes(chap->c2, chap->hash_len);
data->cvalid = 1;
memcpy(data->rval + chap->hash_len, chap->c2,
@@ -322,7 +330,10 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
} else {
memset(chap->c2, 0, chap->hash_len);
}
- chap->s2 = nvme_auth_get_seqnum();
+ if (ctrl->opts->concat)
+ chap->s2 = 0;
+ else
+ chap->s2 = nvme_auth_get_seqnum();
data->seqnum = cpu_to_le32(chap->s2);
if (chap->host_key_len) {
dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
@@ -677,6 +688,92 @@ static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
crypto_free_kpp(chap->dh_tfm);
}
+void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl)
+{
+ dev_dbg(ctrl->device, "Wipe generated TLS PSK %08x\n",
+ key_serial(ctrl->opts->tls_key));
+ key_revoke(ctrl->opts->tls_key);
+ key_put(ctrl->opts->tls_key);
+ ctrl->opts->tls_key = NULL;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_revoke_tls_key);
+
+static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ u8 *psk, *digest, *tls_psk;
+ struct key *tls_key;
+ size_t psk_len;
+ int ret = 0;
+
+ if (!chap->sess_key) {
+ dev_warn(ctrl->device,
+ "%s: qid %d no session key negotiated\n",
+ __func__, chap->qid);
+ return -ENOKEY;
+ }
+
+ if (chap->qid) {
+ dev_warn(ctrl->device,
+ "qid %d: secure concatenation not supported on I/O queues\n",
+ chap->qid);
+ return -EINVAL;
+ }
+ ret = nvme_auth_generate_psk(chap->hash_id, chap->sess_key,
+ chap->sess_key_len,
+ chap->c1, chap->c2,
+ chap->hash_len, &psk, &psk_len);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "%s: qid %d failed to generate PSK, error %d\n",
+ __func__, chap->qid, ret);
+ return ret;
+ }
+ dev_dbg(ctrl->device,
+ "%s: generated psk %*ph\n", __func__, (int)psk_len, psk);
+
+ ret = nvme_auth_generate_digest(chap->hash_id, psk, psk_len,
+ ctrl->opts->subsysnqn,
+ ctrl->opts->host->nqn, &digest);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "%s: qid %d failed to generate digest, error %d\n",
+ __func__, chap->qid, ret);
+ goto out_free_psk;
+ };
+ dev_dbg(ctrl->device, "%s: generated digest %s\n",
+ __func__, digest);
+ ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len,
+ digest, &tls_psk);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "%s: qid %d failed to derive TLS psk, error %d\n",
+ __func__, chap->qid, ret);
+ goto out_free_digest;
+ };
+
+ tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring,
+ ctrl->opts->host->nqn,
+ ctrl->opts->subsysnqn, chap->hash_id,
+ tls_psk, psk_len, digest);
+ if (IS_ERR(tls_key)) {
+ ret = PTR_ERR(tls_key);
+ dev_warn(ctrl->device,
+ "%s: qid %d failed to insert generated key, error %d\n",
+ __func__, chap->qid, ret);
+ tls_key = NULL;
+ }
+ kfree_sensitive(tls_psk);
+ if (ctrl->opts->tls_key)
+ nvme_auth_revoke_tls_key(ctrl);
+ ctrl->opts->tls_key = tls_key;
+out_free_digest:
+ kfree_sensitive(digest);
+out_free_psk:
+ kfree_sensitive(psk);
+ return ret;
+}
+
static void nvme_queue_auth_work(struct work_struct *work)
{
struct nvme_dhchap_queue_context *chap =
@@ -833,6 +930,13 @@ static void nvme_queue_auth_work(struct work_struct *work)
}
if (!ret) {
chap->error = 0;
+ if (ctrl->opts->concat &&
+ (ret = nvme_auth_secure_concat(ctrl, chap))) {
+ dev_warn(ctrl->device,
+ "%s: qid %d failed to enable secure concatenation\n",
+ __func__, chap->qid);
+ chap->error = ret;
+ }
return;
}
@@ -912,6 +1016,11 @@ static void nvme_ctrl_auth_work(struct work_struct *work)
"qid 0: authentication failed\n");
return;
}
+ /*
+ * Only run authentication on the admin queue for secure concatenation.
+ */
+ if (ctrl->opts->concat)
+ return;
for (q = 1; q < ctrl->queue_count; q++) {
ret = nvme_auth_negotiate(ctrl, q);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f028913e2e62..777db89fdaa7 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -431,6 +431,12 @@ static inline void nvme_end_req_zoned(struct request *req)
static inline void __nvme_end_req(struct request *req)
{
+ if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
+ if (blk_rq_is_passthrough(req))
+ nvme_log_err_passthru(req);
+ else
+ nvme_log_error(req);
+ }
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
if (req->cmd_flags & REQ_NVME_MPATH)
@@ -441,12 +447,6 @@ void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
- if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
- if (blk_rq_is_passthrough(req))
- nvme_log_err_passthru(req);
- else
- nvme_log_error(req);
- }
__nvme_end_req(req);
blk_mq_end_request(req, status);
}
@@ -4018,6 +4018,9 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
+
+ nvme_mpath_remove_sysfs_link(ns);
+
del_gendisk(ns->disk);
mutex_lock(&ns->ctrl->namespaces_lock);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 432efcbf9e2f..93e9041b9657 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -472,8 +472,9 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
result = le32_to_cpu(res.u32);
ctrl->cntlid = result & 0xFFFF;
if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
- /* Secure concatenation is not implemented */
- if (result & NVME_CONNECT_AUTHREQ_ASCR) {
+ /* Check for secure concatenation */
+ if ((result & NVME_CONNECT_AUTHREQ_ASCR) &&
+ !ctrl->opts->concat) {
dev_warn(ctrl->device,
"qid 0: secure concatenation is not supported\n");
ret = -EOPNOTSUPP;
@@ -550,7 +551,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
/* Secure concatenation is not implemented */
if (result & NVME_CONNECT_AUTHREQ_ASCR) {
dev_warn(ctrl->device,
- "qid 0: secure concatenation is not supported\n");
+ "qid %d: secure concatenation is not supported\n", qid);
ret = -EOPNOTSUPP;
goto out_free_data;
}
@@ -706,6 +707,7 @@ static const match_table_t opt_tokens = {
#endif
#ifdef CONFIG_NVME_TCP_TLS
{ NVMF_OPT_TLS, "tls" },
+ { NVMF_OPT_CONCAT, "concat" },
#endif
{ NVMF_OPT_ERR, NULL }
};
@@ -735,6 +737,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->tls = false;
opts->tls_key = NULL;
opts->keyring = NULL;
+ opts->concat = false;
options = o = kstrdup(buf, GFP_KERNEL);
if (!options)
@@ -1053,6 +1056,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
opts->tls = true;
break;
+ case NVMF_OPT_CONCAT:
+ if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) {
+ pr_err("TLS is not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->concat = true;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -1079,6 +1090,23 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n",
opts->fast_io_fail_tmo, ctrl_loss_tmo);
}
+ if (opts->concat) {
+ if (opts->tls) {
+ pr_err("Secure concatenation over TLS is not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (opts->tls_key) {
+ pr_err("Cannot specify a TLS key for secure concatenation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!opts->dhchap_secret) {
+ pr_err("Need to enable DH-CHAP for secure concatenation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
opts->host = nvmf_host_add(hostnqn, &hostid);
if (IS_ERR(opts->host)) {
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 21d75dc4a3a0..9cf5b020adba 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -66,6 +66,7 @@ enum {
NVMF_OPT_TLS = 1 << 25,
NVMF_OPT_KEYRING = 1 << 26,
NVMF_OPT_TLS_KEY = 1 << 27,
+ NVMF_OPT_CONCAT = 1 << 28,
};
/**
@@ -101,6 +102,7 @@ enum {
* @keyring: Keyring to use for key lookups
* @tls_key: TLS key for encrypted connections (TCP)
* @tls: Start TLS encrypted connections (TCP)
+ * @concat: Enabled Secure channel concatenation (TCP)
* @disable_sqflow: disable controller sq flow control
* @hdr_digest: generate/verify header digest (TCP)
* @data_digest: generate/verify data digest (TCP)
@@ -130,6 +132,7 @@ struct nvmf_ctrl_options {
struct key *keyring;
struct key *tls_key;
bool tls;
+ bool concat;
bool disable_sqflow;
bool hdr_digest;
bool data_digest;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b9929a5a7f4e..2257c3c96dd2 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2571,7 +2571,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
if (ret)
return -ENOMEM;
- op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
+ op->nents = blk_rq_map_sg(rq, freq->sg_table.sgl);
WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
op->nents, rq_dma_dir(rq));
@@ -2858,7 +2858,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
unsigned int nr_io_queues;
int ret;
- nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
+ nr_io_queues = min3(opts->nr_io_queues, num_online_cpus(),
ctrl->lport->ops->max_hw_queues);
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret) {
@@ -2912,7 +2912,7 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
unsigned int nr_io_queues;
int ret;
- nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
+ nr_io_queues = min3(opts->nr_io_queues, num_online_cpus(),
ctrl->lport->ops->max_hw_queues);
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret) {
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 24e2c702da7a..ecf136489044 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -114,7 +114,8 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- struct io_uring_cmd *ioucmd, unsigned int flags)
+ struct io_uring_cmd *ioucmd, unsigned int flags,
+ unsigned int iou_issue_flags)
{
struct request_queue *q = req->q;
struct nvme_ns *ns = q->queuedata;
@@ -146,7 +147,8 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
goto out;
}
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
- rq_data_dir(req), &iter, ioucmd);
+ rq_data_dir(req), &iter, ioucmd,
+ iou_issue_flags);
if (ret < 0)
goto out;
ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
@@ -198,7 +200,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
req->timeout = timeout;
if (ubuffer && bufflen) {
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
- meta_len, NULL, flags);
+ meta_len, NULL, flags, 0);
if (ret)
return ret;
}
@@ -514,10 +516,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return PTR_ERR(req);
req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
- if (d.addr && d.data_len) {
+ if (d.data_len) {
ret = nvme_map_user_request(req, d.addr,
d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, ioucmd, vec);
+ d.metadata_len, ioucmd, vec, issue_flags);
if (ret)
return ret;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 2a7635565083..6b12ca80aa27 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -686,6 +686,8 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
kblockd_schedule_work(&head->partition_scan_work);
}
+ nvme_mpath_add_sysfs_link(ns->head);
+
mutex_lock(&head->lock);
if (nvme_path_is_optimized(ns)) {
int node, srcu_idx;
@@ -768,6 +770,25 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
if (nvme_state_is_live(ns->ana_state) &&
nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
+ else {
+ /*
+ * Add sysfs link from multipath head gendisk node to path
+ * device gendisk node.
+ * If path's ana state is live (i.e. state is either optimized
+ * or non-optimized) while we alloc the ns then sysfs link would
+ * be created from nvme_mpath_set_live(). In that case we would
+ * not fallthrough this code path. However for the path's ana
+ * state other than live, we call nvme_mpath_set_live() only
+ * after ana state transitioned to the live state. But we still
+ * want to create the sysfs link from head node to a path device
+ * irrespctive of the path's ana state.
+ * If we reach through here then it means that path's ana state
+ * is not live but still create the sysfs link to this path from
+ * head node if head node of the path has already come alive.
+ */
+ if (test_bit(NVME_NSHEAD_DISK_LIVE, &ns->head->flags))
+ nvme_mpath_add_sysfs_link(ns->head);
+ }
}
static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
@@ -955,6 +976,45 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
}
DEVICE_ATTR_RO(ana_state);
+static ssize_t queue_depth_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+
+ if (ns->head->subsys->iopolicy != NVME_IOPOLICY_QD)
+ return 0;
+
+ return sysfs_emit(buf, "%d\n", atomic_read(&ns->ctrl->nr_active));
+}
+DEVICE_ATTR_RO(queue_depth);
+
+static ssize_t numa_nodes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int node, srcu_idx;
+ nodemask_t numa_nodes;
+ struct nvme_ns *current_ns;
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+ struct nvme_ns_head *head = ns->head;
+
+ if (head->subsys->iopolicy != NVME_IOPOLICY_NUMA)
+ return 0;
+
+ nodes_clear(numa_nodes);
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ for_each_node(node) {
+ current_ns = srcu_dereference(head->current_path[node],
+ &head->srcu);
+ if (ns == current_ns)
+ node_set(node, numa_nodes);
+ }
+ srcu_read_unlock(&head->srcu, srcu_idx);
+
+ return sysfs_emit(buf, "%*pbl\n", nodemask_pr_args(&numa_nodes));
+}
+DEVICE_ATTR_RO(numa_nodes);
+
static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
struct nvme_ana_group_desc *desc, void *data)
{
@@ -967,6 +1027,84 @@ static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
return -ENXIO; /* just break out of the loop */
}
+void nvme_mpath_add_sysfs_link(struct nvme_ns_head *head)
+{
+ struct device *target;
+ int rc, srcu_idx;
+ struct nvme_ns *ns;
+ struct kobject *kobj;
+
+ /*
+ * Ensure head disk node is already added otherwise we may get invalid
+ * kobj for head disk node
+ */
+ if (!test_bit(GD_ADDED, &head->disk->state))
+ return;
+
+ kobj = &disk_to_dev(head->disk)->kobj;
+
+ /*
+ * loop through each ns chained through the head->list and create the
+ * sysfs link from head node to the ns path node
+ */
+ srcu_idx = srcu_read_lock(&head->srcu);
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ /*
+ * Avoid creating link if it already exists for the given path.
+ * When path ana state transitions from optimized to non-
+ * optimized or vice-versa, the nvme_mpath_set_live() is
+ * invoked which in truns call this function. Now if the sysfs
+ * link already exists for the given path and we attempt to re-
+ * create the link then sysfs code would warn about it loudly.
+ * So we evaluate NVME_NS_SYSFS_ATTR_LINK flag here to ensure
+ * that we're not creating duplicate link.
+ * The test_and_set_bit() is used because it is protecting
+ * against multiple nvme paths being simultaneously added.
+ */
+ if (test_and_set_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags))
+ continue;
+
+ /*
+ * Ensure that ns path disk node is already added otherwise we
+ * may get invalid kobj name for target
+ */
+ if (!test_bit(GD_ADDED, &ns->disk->state))
+ continue;
+
+ target = disk_to_dev(ns->disk);
+ /*
+ * Create sysfs link from head gendisk kobject @kobj to the
+ * ns path gendisk kobject @target->kobj.
+ */
+ rc = sysfs_add_link_to_group(kobj, nvme_ns_mpath_attr_group.name,
+ &target->kobj, dev_name(target));
+ if (unlikely(rc)) {
+ dev_err(disk_to_dev(ns->head->disk),
+ "failed to create link to %s\n",
+ dev_name(target));
+ clear_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags);
+ }
+ }
+
+ srcu_read_unlock(&head->srcu, srcu_idx);
+}
+
+void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns)
+{
+ struct device *target;
+ struct kobject *kobj;
+
+ if (!test_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags))
+ return;
+
+ target = disk_to_dev(ns->disk);
+ kobj = &disk_to_dev(ns->head->disk)->kobj;
+ sysfs_remove_link_from_group(kobj, nvme_ns_mpath_attr_group.name,
+ dev_name(target));
+ clear_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags);
+}
+
void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
if (nvme_ctrl_use_ana(ns->ctrl)) {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7be92d07430e..51e078642127 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -534,10 +534,11 @@ struct nvme_ns {
struct nvme_ns_head *head;
unsigned long flags;
-#define NVME_NS_REMOVING 0
-#define NVME_NS_ANA_PENDING 2
-#define NVME_NS_FORCE_RO 3
-#define NVME_NS_READY 4
+#define NVME_NS_REMOVING 0
+#define NVME_NS_ANA_PENDING 2
+#define NVME_NS_FORCE_RO 3
+#define NVME_NS_READY 4
+#define NVME_NS_SYSFS_ATTR_LINK 5
struct cdev cdev;
struct device cdev_device;
@@ -933,6 +934,7 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
extern const struct attribute_group *nvme_ns_attr_groups[];
+extern const struct attribute_group nvme_ns_mpath_attr_group;
extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops;
extern const struct attribute_group nvme_dev_attrs_group;
@@ -955,6 +957,8 @@ void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
+void nvme_mpath_add_sysfs_link(struct nvme_ns_head *ns);
+void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns);
void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
@@ -980,6 +984,8 @@ static inline void nvme_trace_bio_complete(struct request *req)
extern bool multipath;
extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
+extern struct device_attribute dev_attr_queue_depth;
+extern struct device_attribute dev_attr_numa_nodes;
extern struct device_attribute subsys_attr_iopolicy;
static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
@@ -1009,6 +1015,12 @@ static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
}
+static inline void nvme_mpath_add_sysfs_link(struct nvme_ns *ns)
+{
+}
+static inline void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns)
+{
+}
static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
return false;
@@ -1147,6 +1159,7 @@ void nvme_auth_stop(struct nvme_ctrl *ctrl);
int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
void nvme_auth_free(struct nvme_ctrl *ctrl);
+void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl);
#else
static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
{
@@ -1169,6 +1182,7 @@ static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
return -EPROTONOSUPPORT;
}
static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
+static inline void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl) {};
#endif
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 640590b21728..2883d17ee1eb 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -812,7 +812,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
if (!iod->sgt.sgl)
return BLK_STS_RESOURCE;
sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
- iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
+ iod->sgt.orig_nents = blk_rq_map_sg(req, iod->sgt.sgl);
if (!iod->sgt.orig_nents)
goto out_free_sg;
@@ -953,9 +953,6 @@ out_free_cmd:
return ret;
}
-/*
- * NOTE: ns is NULL when called on the admin queue.
- */
static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1130,8 +1127,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
- !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
- nvme_pci_complete_batch))
+ !blk_mq_add_to_batch(req, iob,
+ nvme_req(req)->status != NVME_SC_SUCCESS,
+ nvme_pci_complete_batch))
nvme_pci_complete_rq(req);
}
@@ -1411,9 +1409,20 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
struct nvme_command cmd = { };
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 csts = readl(dev->bar + NVME_REG_CSTS);
u8 opcode;
+ /*
+ * Shutdown the device immediately if we see it is disconnected. This
+ * unblocks PCIe error handling if the nvme driver is waiting in
+ * error_resume for a device that has been removed. We can't unbind the
+ * driver while the driver's error callback is waiting to complete, so
+ * we're relying on a timeout to break that deadlock if a removal
+ * occurs while reset work is running.
+ */
+ if (pci_dev_is_disconnected(pdev))
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
if (nvme_state_terminal(&dev->ctrl))
goto disable;
@@ -1421,7 +1430,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
* the recovery mechanism will surely fail.
*/
mb();
- if (pci_channel_offline(to_pci_dev(dev->dev)))
+ if (pci_channel_offline(pdev))
return BLK_EH_RESET_TIMER;
/*
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 86a2891d9bcc..b5a0295b5bf4 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1476,8 +1476,7 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
if (ret)
return -ENOMEM;
- req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
- req->data_sgl.sg_table.sgl);
+ req->data_sgl.nents = blk_rq_map_sg(rq, req->data_sgl.sg_table.sgl);
*count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
req->data_sgl.nents, rq_dma_dir(rq));
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index 3a41b9ab0f13..6d31226f7a4f 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -258,6 +258,8 @@ static struct attribute *nvme_ns_attrs[] = {
#ifdef CONFIG_NVME_MULTIPATH
&dev_attr_ana_grpid.attr,
&dev_attr_ana_state.attr,
+ &dev_attr_queue_depth.attr,
+ &dev_attr_numa_nodes.attr,
#endif
&dev_attr_io_passthru_err_log_enabled.attr,
NULL,
@@ -290,6 +292,10 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
return 0;
}
+ if (a == &dev_attr_queue_depth.attr || a == &dev_attr_numa_nodes.attr) {
+ if (nvme_disk_is_ns_head(dev_to_disk(dev)))
+ return 0;
+ }
#endif
return a->mode;
}
@@ -299,8 +305,22 @@ static const struct attribute_group nvme_ns_attr_group = {
.is_visible = nvme_ns_attrs_are_visible,
};
+#ifdef CONFIG_NVME_MULTIPATH
+static struct attribute *nvme_ns_mpath_attrs[] = {
+ NULL,
+};
+
+const struct attribute_group nvme_ns_mpath_attr_group = {
+ .name = "multipath",
+ .attrs = nvme_ns_mpath_attrs,
+};
+#endif
+
const struct attribute_group *nvme_ns_attr_groups[] = {
&nvme_ns_attr_group,
+#ifdef CONFIG_NVME_MULTIPATH
+ &nvme_ns_mpath_attr_group,
+#endif
NULL,
};
@@ -780,10 +800,10 @@ static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
return 0;
if (a == &dev_attr_tls_key.attr &&
- !ctrl->opts->tls)
+ !ctrl->opts->tls && !ctrl->opts->concat)
return 0;
if (a == &dev_attr_tls_configured_key.attr &&
- !ctrl->opts->tls_key)
+ (!ctrl->opts->tls_key || ctrl->opts->concat))
return 0;
if (a == &dev_attr_tls_keyring.attr &&
!ctrl->opts->keyring)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 327f3f2f5399..26c459f0198d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -8,7 +8,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/key.h>
#include <linux/nvme-tcp.h>
#include <linux/nvme-keyring.h>
#include <net/sock.h>
@@ -249,7 +248,7 @@ static inline bool nvme_tcp_tls_configured(struct nvme_ctrl *ctrl)
if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
return 0;
- return ctrl->opts->tls;
+ return ctrl->opts->tls || ctrl->opts->concat;
}
static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
@@ -1790,7 +1789,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
queue->cmnd_capsule_len = sizeof(struct nvme_command) +
NVME_TCP_ADMIN_CCSZ;
- ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
+ ret = sock_create_kern(current->nsproxy->net_ns,
+ ctrl->addr.ss_family, SOCK_STREAM,
IPPROTO_TCP, &queue->sock);
if (ret) {
dev_err(nctrl->device,
@@ -2060,7 +2060,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
if (nvme_tcp_tls_configured(ctrl)) {
if (ctrl->opts->tls_key)
pskid = key_serial(ctrl->opts->tls_key);
- else {
+ else if (ctrl->opts->tls) {
pskid = nvme_tls_psk_default(ctrl->opts->keyring,
ctrl->opts->host->nqn,
ctrl->opts->subsysnqn);
@@ -2090,9 +2090,25 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
int i, ret;
- if (nvme_tcp_tls_configured(ctrl) && !ctrl->tls_pskid) {
- dev_err(ctrl->device, "no PSK negotiated\n");
- return -ENOKEY;
+ if (nvme_tcp_tls_configured(ctrl)) {
+ if (ctrl->opts->concat) {
+ /*
+ * The generated PSK is stored in the
+ * fabric options
+ */
+ if (!ctrl->opts->tls_key) {
+ dev_err(ctrl->device, "no PSK generated\n");
+ return -ENOKEY;
+ }
+ if (ctrl->tls_pskid &&
+ ctrl->tls_pskid != key_serial(ctrl->opts->tls_key)) {
+ dev_err(ctrl->device, "Stale PSK id %08x\n", ctrl->tls_pskid);
+ ctrl->tls_pskid = 0;
+ }
+ } else if (!ctrl->tls_pskid) {
+ dev_err(ctrl->device, "no PSK negotiated\n");
+ return -ENOKEY;
+ }
}
for (i = 1; i < ctrl->queue_count; i++) {
@@ -2310,6 +2326,27 @@ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
}
}
+/*
+ * The TLS key is set by secure concatenation after negotiation has been
+ * completed on the admin queue. We need to revoke the key when:
+ * - concatenation is enabled (otherwise it's a static key set by the user)
+ * and
+ * - the generated key is present in ctrl->tls_key (otherwise there's nothing
+ * to revoke)
+ * and
+ * - a valid PSK key ID has been set in ctrl->tls_pskid (otherwise TLS
+ * negotiation has not run).
+ *
+ * We cannot always revoke the key as nvme_tcp_alloc_admin_queue() is called
+ * twice during secure concatenation, once on a 'normal' connection to run the
+ * DH-HMAC-CHAP negotiation (which generates the key, so it _must not_ be set),
+ * and once after the negotiation (which uses the key, so it _must_ be set).
+ */
+static bool nvme_tcp_key_revoke_needed(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts->concat && ctrl->opts->tls_key && ctrl->tls_pskid;
+}
+
static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
{
struct nvmf_ctrl_options *opts = ctrl->opts;
@@ -2319,6 +2356,16 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
if (ret)
return ret;
+ if (ctrl->opts && ctrl->opts->concat && !ctrl->tls_pskid) {
+ /* See comments for nvme_tcp_key_revoke_needed() */
+ dev_dbg(ctrl->device, "restart admin queue for secure concatenation\n");
+ nvme_stop_keep_alive(ctrl);
+ nvme_tcp_teardown_admin_queue(ctrl, false);
+ ret = nvme_tcp_configure_admin_queue(ctrl, false);
+ if (ret)
+ return ret;
+ }
+
if (ctrl->icdoff) {
ret = -EOPNOTSUPP;
dev_err(ctrl->device, "icdoff is not supported!\n");
@@ -2415,6 +2462,8 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_tcp_ctrl, err_work);
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+ if (nvme_tcp_key_revoke_needed(ctrl))
+ nvme_auth_revoke_tls_key(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
@@ -2455,6 +2504,8 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
container_of(work, struct nvme_ctrl, reset_work);
int ret;
+ if (nvme_tcp_key_revoke_needed(ctrl))
+ nvme_auth_revoke_tls_key(ctrl);
nvme_stop_ctrl(ctrl);
nvme_tcp_teardown_ctrl(ctrl, false);
@@ -2951,7 +3002,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE | NVMF_OPT_TLS |
- NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY,
+ NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY | NVMF_OPT_CONCAT,
.create_ctrl = nvme_tcp_create_ctrl,
};
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 382949e18c6a..cce4c5b55aa9 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -146,17 +146,16 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
return NULL;
}
-static int nvme_zone_parse_entry(struct nvme_ctrl *ctrl,
- struct nvme_ns_head *head,
+static int nvme_zone_parse_entry(struct nvme_ns *ns,
struct nvme_zone_descriptor *entry,
unsigned int idx, report_zones_cb cb,
void *data)
{
+ struct nvme_ns_head *head = ns->head;
struct blk_zone zone = { };
if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) {
- dev_err(ctrl->device, "invalid zone type %#x\n",
- entry->zt);
+ dev_err(ns->ctrl->device, "invalid zone type %#x\n", entry->zt);
return -EINVAL;
}
@@ -213,8 +212,7 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
break;
for (i = 0; i < nz && zone_idx < nr_zones; i++) {
- ret = nvme_zone_parse_entry(ns->ctrl, ns->head,
- &report->entries[i],
+ ret = nvme_zone_parse_entry(ns, &report->entries[i],
zone_idx, cb, data);
if (ret)
goto out_free;
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index b47d675232d2..0b0645ac5df4 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -15,6 +15,7 @@
#include <linux/ctype.h>
#include <linux/random.h>
#include <linux/nvme-auth.h>
+#include <linux/nvme-keyring.h>
#include <linux/unaligned.h>
#include "nvmet.h"
@@ -139,7 +140,7 @@ int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
return ret;
}
-u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
{
int ret = 0;
struct nvmet_host_link *p;
@@ -165,6 +166,11 @@ u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
goto out_unlock;
}
+ if (nvmet_queue_tls_keyid(sq)) {
+ pr_debug("host %s tls enabled\n", ctrl->hostnqn);
+ goto out_unlock;
+ }
+
ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id);
if (ret < 0) {
pr_warn("Failed to setup DH group");
@@ -233,6 +239,9 @@ out_unlock:
void nvmet_auth_sq_free(struct nvmet_sq *sq)
{
cancel_delayed_work(&sq->auth_expired_work);
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ sq->tls_key = 0;
+#endif
kfree(sq->dhchap_c1);
sq->dhchap_c1 = NULL;
kfree(sq->dhchap_c2);
@@ -261,6 +270,12 @@ void nvmet_destroy_auth(struct nvmet_ctrl *ctrl)
nvme_auth_free_key(ctrl->ctrl_key);
ctrl->ctrl_key = NULL;
}
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ if (ctrl->tls_key) {
+ key_put(ctrl->tls_key);
+ ctrl->tls_key = NULL;
+ }
+#endif
}
bool nvmet_check_auth_status(struct nvmet_req *req)
@@ -542,3 +557,58 @@ int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
return ret;
}
+
+void nvmet_auth_insert_psk(struct nvmet_sq *sq)
+{
+ int hash_len = nvme_auth_hmac_hash_len(sq->ctrl->shash_id);
+ u8 *psk, *digest, *tls_psk;
+ size_t psk_len;
+ int ret;
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ struct key *tls_key = NULL;
+#endif
+
+ ret = nvme_auth_generate_psk(sq->ctrl->shash_id,
+ sq->dhchap_skey,
+ sq->dhchap_skey_len,
+ sq->dhchap_c1, sq->dhchap_c2,
+ hash_len, &psk, &psk_len);
+ if (ret) {
+ pr_warn("%s: ctrl %d qid %d failed to generate PSK, error %d\n",
+ __func__, sq->ctrl->cntlid, sq->qid, ret);
+ return;
+ }
+ ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len,
+ sq->ctrl->subsysnqn,
+ sq->ctrl->hostnqn, &digest);
+ if (ret) {
+ pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n",
+ __func__, sq->ctrl->cntlid, sq->qid, ret);
+ goto out_free_psk;
+ }
+ ret = nvme_auth_derive_tls_psk(sq->ctrl->shash_id, psk, psk_len,
+ digest, &tls_psk);
+ if (ret) {
+ pr_warn("%s: ctrl %d qid %d failed to derive TLS PSK, error %d\n",
+ __func__, sq->ctrl->cntlid, sq->qid, ret);
+ goto out_free_digest;
+ }
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn, sq->ctrl->subsysnqn,
+ sq->ctrl->shash_id, tls_psk, psk_len, digest);
+ if (IS_ERR(tls_key)) {
+ pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n",
+ __func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key));
+ tls_key = NULL;
+ kfree_sensitive(tls_psk);
+ }
+ if (sq->ctrl->tls_key)
+ key_put(sq->ctrl->tls_key);
+ sq->ctrl->tls_key = tls_key;
+#endif
+
+out_free_digest:
+ kfree_sensitive(digest);
+out_free_psk:
+ kfree_sensitive(psk);
+}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 2e741696f371..71f8d06998d6 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1618,8 +1618,6 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
}
ctrl->cntlid = ret;
- uuid_copy(&ctrl->hostid, args->hostid);
-
/*
* Discovery controllers may use some arbitrary high value
* in order to cleanup stale discovery sessions
@@ -1647,7 +1645,7 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
if (args->hostid)
uuid_copy(&ctrl->hostid, args->hostid);
- dhchap_status = nvmet_setup_auth(ctrl);
+ dhchap_status = nvmet_setup_auth(ctrl, args->sq);
if (dhchap_status) {
pr_err("Failed to setup authentication, dhchap status %u\n",
dhchap_status);
@@ -1662,11 +1660,12 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
args->status = NVME_SC_SUCCESS;
- pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s.\n",
+ pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s%s.\n",
nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
ctrl->pi_support ? " T10-PI is enabled" : "",
- nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
+ nvmet_has_auth(ctrl, args->sq) ? " with DH-HMAC-CHAP" : "",
+ nvmet_queue_tls_keyid(args->sq) ? ", TLS" : "");
return ctrl;
diff --git a/drivers/nvme/target/debugfs.c b/drivers/nvme/target/debugfs.c
index 220c7391fc19..e4300eb95101 100644
--- a/drivers/nvme/target/debugfs.c
+++ b/drivers/nvme/target/debugfs.c
@@ -132,6 +132,27 @@ static int nvmet_ctrl_host_traddr_show(struct seq_file *m, void *p)
}
NVMET_DEBUGFS_ATTR(nvmet_ctrl_host_traddr);
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+static int nvmet_ctrl_tls_key_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+ key_serial_t keyid = nvmet_queue_tls_keyid(ctrl->sqs[0]);
+
+ seq_printf(m, "%08x\n", keyid);
+ return 0;
+}
+NVMET_DEBUGFS_ATTR(nvmet_ctrl_tls_key);
+
+static int nvmet_ctrl_tls_concat_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+
+ seq_printf(m, "%d\n", ctrl->concat);
+ return 0;
+}
+NVMET_DEBUGFS_ATTR(nvmet_ctrl_tls_concat);
+#endif
+
int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl)
{
char name[32];
@@ -157,6 +178,12 @@ int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl)
&nvmet_ctrl_state_fops);
debugfs_create_file("host_traddr", S_IRUSR, ctrl->debugfs_dir, ctrl,
&nvmet_ctrl_host_traddr_fops);
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ debugfs_create_file("tls_concat", S_IRUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_tls_concat_fops);
+ debugfs_create_file("tls_key", S_IRUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_tls_key_fops);
+#endif
return 0;
}
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index 2022757f08dc..bf01ec414c55 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -43,8 +43,26 @@ static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
data->auth_protocol[0].dhchap.halen,
data->auth_protocol[0].dhchap.dhlen);
req->sq->dhchap_tid = le16_to_cpu(data->t_id);
- if (data->sc_c)
- return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ if (data->sc_c != NVME_AUTH_SECP_NOSC) {
+ if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS))
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ /* Secure concatenation can only be enabled on the admin queue */
+ if (req->sq->qid)
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ switch (data->sc_c) {
+ case NVME_AUTH_SECP_NEWTLSPSK:
+ if (nvmet_queue_tls_keyid(req->sq))
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ break;
+ case NVME_AUTH_SECP_REPLACETLSPSK:
+ if (!nvmet_queue_tls_keyid(req->sq))
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ break;
+ default:
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ }
+ ctrl->concat = true;
+ }
if (data->napd != 1)
return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
@@ -103,6 +121,12 @@ static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
nvme_auth_dhgroup_name(fallback_dhgid));
ctrl->dh_gid = fallback_dhgid;
}
+ if (ctrl->dh_gid == NVME_AUTH_DHGROUP_NULL && ctrl->concat) {
+ pr_debug("%s: ctrl %d qid %d: NULL DH group invalid "
+ "for secure channel concatenation\n", __func__,
+ ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ }
pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
__func__, ctrl->cntlid, req->sq->qid,
nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
@@ -148,12 +172,22 @@ static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
if (memcmp(data->rval, response, data->hl)) {
pr_info("ctrl %d qid %d host response mismatch\n",
ctrl->cntlid, req->sq->qid);
+ pr_debug("ctrl %d qid %d rval %*ph\n",
+ ctrl->cntlid, req->sq->qid, data->hl, data->rval);
+ pr_debug("ctrl %d qid %d response %*ph\n",
+ ctrl->cntlid, req->sq->qid, data->hl, response);
kfree(response);
return NVME_AUTH_DHCHAP_FAILURE_FAILED;
}
kfree(response);
pr_debug("%s: ctrl %d qid %d host authenticated\n",
__func__, ctrl->cntlid, req->sq->qid);
+ if (!data->cvalid && ctrl->concat) {
+ pr_debug("%s: ctrl %d qid %d invalid challenge\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
if (data->cvalid) {
req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
GFP_KERNEL);
@@ -163,11 +197,23 @@ static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
__func__, ctrl->cntlid, req->sq->qid, data->hl,
req->sq->dhchap_c2);
- } else {
+ }
+ /*
+ * NVMe Base Spec 2.2 section 8.3.4.5.4: DH-HMAC-CHAP_Reply message
+ * Sequence Number (SEQNUM): [ .. ]
+ * The value 0h is used to indicate that bidirectional authentication
+ * is not performed, but a challenge value C2 is carried in order to
+ * generate a pre-shared key (PSK) for subsequent establishment of a
+ * secure channel.
+ */
+ if (req->sq->dhchap_s2 == 0) {
+ if (ctrl->concat)
+ nvmet_auth_insert_psk(req->sq);
req->sq->authenticated = true;
+ kfree(req->sq->dhchap_c2);
req->sq->dhchap_c2 = NULL;
- }
- req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
+ } else if (!data->cvalid)
+ req->sq->authenticated = true;
return 0;
}
@@ -246,7 +292,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
pr_debug("%s: ctrl %d qid %d reset negotiation\n",
__func__, ctrl->cntlid, req->sq->qid);
if (!req->sq->qid) {
- dhchap_status = nvmet_setup_auth(ctrl);
+ dhchap_status = nvmet_setup_auth(ctrl, req->sq);
if (dhchap_status) {
pr_err("ctrl %d qid 0 failed to setup re-authentication\n",
ctrl->cntlid);
@@ -303,6 +349,8 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
}
goto done_kfree;
case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
+ if (ctrl->concat)
+ nvmet_auth_insert_psk(req->sq);
req->sq->authenticated = true;
pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
__func__, ctrl->cntlid, req->sq->qid);
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index eb406c90c167..f012bdf89850 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -234,10 +234,26 @@ err:
return ret;
}
-static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl)
+static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
{
+ bool needs_auth = nvmet_has_auth(ctrl, sq);
+ key_serial_t keyid = nvmet_queue_tls_keyid(sq);
+
+ /* Do not authenticate I/O queues for secure concatenation */
+ if (ctrl->concat && sq->qid)
+ needs_auth = false;
+
+ if (keyid)
+ pr_debug("%s: ctrl %d qid %d should %sauthenticate, tls psk %08x\n",
+ __func__, ctrl->cntlid, sq->qid,
+ needs_auth ? "" : "not ", keyid);
+ else
+ pr_debug("%s: ctrl %d qid %d should %sauthenticate%s\n",
+ __func__, ctrl->cntlid, sq->qid,
+ needs_auth ? "" : "not ",
+ ctrl->concat ? ", secure concatenation" : "");
return (u32)ctrl->cntlid |
- (nvmet_has_auth(ctrl) ? NVME_CONNECT_AUTHREQ_ATR : 0);
+ (needs_auth ? NVME_CONNECT_AUTHREQ_ATR : 0);
}
static void nvmet_execute_admin_connect(struct nvmet_req *req)
@@ -247,6 +263,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = NULL;
struct nvmet_alloc_ctrl_args args = {
.port = req->port,
+ .sq = req->sq,
.ops = req->ops,
.p2p_client = req->p2p_client,
.kato = le32_to_cpu(c->kato),
@@ -299,7 +316,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
- args.result = cpu_to_le32(nvmet_connect_result(ctrl));
+ args.result = cpu_to_le32(nvmet_connect_result(ctrl, req->sq));
out:
kfree(d);
complete:
@@ -357,7 +374,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out_ctrl_put;
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
- req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
+ req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl, req->sq));
out:
kfree(d);
complete:
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 3ef4beacde32..7318b736d414 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -172,20 +172,6 @@ struct nvmet_fc_tgt_assoc {
struct work_struct del_work;
};
-
-static inline int
-nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
-{
- return (iodptr - iodptr->tgtport->iod);
-}
-
-static inline int
-nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
-{
- return (fodptr - fodptr->queue->fod);
-}
-
-
/*
* Association and Connection IDs:
*
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index a9d112d34d4f..a5c41144667c 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
}
iod->req.sg = iod->sg_table.sgl;
- iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
+ iod->req.sg_cnt = blk_rq_map_sg(req, iod->sg_table.sgl);
iod->req.transfer_len = blk_rq_payload_bytes(req);
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index fcf4f460dc9a..b6db8b74dc4a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -165,6 +165,9 @@ struct nvmet_sq {
u8 *dhchap_skey;
int dhchap_skey_len;
#endif
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ struct key *tls_key;
+#endif
struct completion free_done;
struct completion confirm_done;
};
@@ -289,6 +292,7 @@ struct nvmet_ctrl {
u64 err_counter;
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
bool pi_support;
+ bool concat;
#ifdef CONFIG_NVME_TARGET_AUTH
struct nvme_dhchap_key *host_key;
struct nvme_dhchap_key *ctrl_key;
@@ -298,6 +302,9 @@ struct nvmet_ctrl {
u8 *dh_key;
size_t dh_keysize;
#endif
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ struct key *tls_key;
+#endif
struct nvmet_pr_log_mgr pr_log_mgr;
};
@@ -583,6 +590,7 @@ void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
struct nvmet_alloc_ctrl_args {
struct nvmet_port *port;
+ struct nvmet_sq *sq;
char *subsysnqn;
char *hostnqn;
uuid_t *hostid;
@@ -819,7 +827,7 @@ static inline u8 nvmet_cc_iocqes(u32 cc)
/* Convert a 32-bit number to a 16-bit 0's based number */
static inline __le16 to0based(u32 a)
{
- return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
+ return cpu_to_le16(clamp(a, 1U, 1U << 16) - 1);
}
static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
@@ -851,6 +859,22 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
bio_put(bio);
}
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq)
+{
+ return sq->tls_key ? key_serial(sq->tls_key) : 0;
+}
+static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq)
+{
+ if (sq->tls_key) {
+ key_put(sq->tls_key);
+ sq->tls_key = NULL;
+ }
+}
+#else
+static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq) { return 0; }
+static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq) {}
+#endif
#ifdef CONFIG_NVME_TARGET_AUTH
u32 nvmet_auth_send_data_len(struct nvmet_req *req);
void nvmet_execute_auth_send(struct nvmet_req *req);
@@ -859,7 +883,7 @@ void nvmet_execute_auth_receive(struct nvmet_req *req);
int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
bool set_ctrl);
int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
-u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl);
+u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq);
void nvmet_auth_sq_init(struct nvmet_sq *sq);
void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
void nvmet_auth_sq_free(struct nvmet_sq *sq);
@@ -869,16 +893,18 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
unsigned int hash_len);
int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
unsigned int hash_len);
-static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
{
- return ctrl->host_key != NULL;
+ return ctrl->host_key != NULL && !nvmet_queue_tls_keyid(sq);
}
int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
u8 *buf, int buf_size);
int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
u8 *buf, int buf_size);
+void nvmet_auth_insert_psk(struct nvmet_sq *sq);
#else
-static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl,
+ struct nvmet_sq *sq)
{
return 0;
}
@@ -891,11 +917,13 @@ static inline bool nvmet_check_auth_status(struct nvmet_req *req)
{
return true;
}
-static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl,
+ struct nvmet_sq *sq)
{
return false;
}
static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
+static inline void nvmet_auth_insert_psk(struct nvmet_sq *sq) {};
#endif
int nvmet_pr_init_ns(struct nvmet_ns *ns);
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index 565d2bd36dcd..b54b3fdbe389 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -1265,15 +1265,12 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
u16 status;
- if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
if (!(flags & NVME_QUEUE_PHYS_CONTIG))
return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
- if (flags & NVME_CQ_IRQ_ENABLED)
- set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
-
cq->pci_addr = pci_addr;
cq->qid = cqid;
cq->depth = qsize + 1;
@@ -1290,24 +1287,27 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
cq->qes = ctrl->io_cqes;
cq->pci_size = cq->qes * cq->depth;
- cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
- if (!cq->iv) {
- status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
- goto err;
+ if (flags & NVME_CQ_IRQ_ENABLED) {
+ cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
+ if (!cq->iv)
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
}
status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth);
if (status != NVME_SC_SUCCESS)
goto err;
+ set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
+
dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
cqid, qsize, cq->qes, cq->vector);
return NVME_SC_SUCCESS;
err:
- clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
- clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
+ if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
+ nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
return status;
}
@@ -1333,7 +1333,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
u16 status;
- if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
+ if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
if (!(flags & NVME_QUEUE_PHYS_CONTIG))
@@ -1355,7 +1355,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth);
if (status != NVME_SC_SUCCESS)
- goto out_clear_bit;
+ return status;
sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND,
min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid);
@@ -1365,6 +1365,8 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
goto out_destroy_sq;
}
+ set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
+
dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n",
sqid, qsize, sq->qes);
@@ -1372,8 +1374,6 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
out_destroy_sq:
nvmet_sq_destroy(&sq->nvme_sq);
-out_clear_bit:
- clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
return status;
}
@@ -1385,7 +1385,6 @@ static u16 nvmet_pci_epf_delete_sq(struct nvmet_ctrl *tctrl, u16 sqid)
if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
- flush_workqueue(sq->iod_wq);
destroy_workqueue(sq->iod_wq);
sq->iod_wq = NULL;
@@ -2129,8 +2128,15 @@ static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf)
return -ENODEV;
}
- if (epc_features->bar[BAR_0].only_64bit)
- epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+ /*
+ * While NVMe PCIe Transport Specification 1.1, section 2.1.10, claims
+ * that the BAR0 type is Implementation Specific, in NVMe 1.1, the type
+ * is required to be 64-bit. Thus, for interoperability, always set the
+ * type to 64-bit. In the rare case that the PCI EPC does not support
+ * configuring BAR0 as 64-bit, the call to pci_epc_set_bar() will fail,
+ * and we will return failure back to the user.
+ */
+ epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
/*
* Calculate the size of the register bar: NVMe registers first with
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 4f9cac8a5abe..f2d0c920269b 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -8,7 +8,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/key.h>
#include <linux/nvme-tcp.h>
#include <linux/nvme-keyring.h>
#include <net/sock.h>
@@ -1080,10 +1079,11 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_tcp_ops))) {
- pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
+ pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n",
req->cmd, req->cmd->common.command_id,
req->cmd->common.opcode,
- le32_to_cpu(req->cmd->common.dptr.sgl.length));
+ le32_to_cpu(req->cmd->common.dptr.sgl.length),
+ le16_to_cpu(req->cqe->status));
nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
return 0;
@@ -1609,6 +1609,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
/* stop accepting incoming data */
queue->rcv_state = NVMET_TCP_RECV_ERR;
+ nvmet_sq_put_tls_key(&queue->nvme_sq);
nvmet_tcp_uninit_data_in_cmds(queue);
nvmet_sq_destroy(&queue->nvme_sq);
cancel_work_sync(&queue->io_work);
@@ -1794,6 +1795,27 @@ static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
return 0;
}
+static int nvmet_tcp_tls_key_lookup(struct nvmet_tcp_queue *queue,
+ key_serial_t peerid)
+{
+ struct key *tls_key = nvme_tls_key_lookup(peerid);
+ int status = 0;
+
+ if (IS_ERR(tls_key)) {
+ pr_warn("%s: queue %d failed to lookup key %x\n",
+ __func__, queue->idx, peerid);
+ spin_lock_bh(&queue->state_lock);
+ queue->state = NVMET_TCP_Q_FAILED;
+ spin_unlock_bh(&queue->state_lock);
+ status = PTR_ERR(tls_key);
+ } else {
+ pr_debug("%s: queue %d using TLS PSK %x\n",
+ __func__, queue->idx, peerid);
+ queue->nvme_sq.tls_key = tls_key;
+ }
+ return status;
+}
+
static void nvmet_tcp_tls_handshake_done(void *data, int status,
key_serial_t peerid)
{
@@ -1814,6 +1836,10 @@ static void nvmet_tcp_tls_handshake_done(void *data, int status,
spin_unlock_bh(&queue->state_lock);
cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
+
+ if (!status)
+ status = nvmet_tcp_tls_key_lookup(queue, peerid);
+
if (status)
nvmet_tcp_schedule_release_queue(queue);
else
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
index b810df727b44..b4cf245fb246 100644
--- a/drivers/nvmem/brcm_nvram.c
+++ b/drivers/nvmem/brcm_nvram.c
@@ -100,7 +100,7 @@ static int brcm_nvram_read_post_process_macaddr(void *context, const char *id, i
{
u8 mac[ETH_ALEN];
- if (bytes != 3 * ETH_ALEN - 1)
+ if (bytes != MAC_ADDR_STR_LEN)
return -EINVAL;
if (!mac_pton(buf, mac))
diff --git a/drivers/nvmem/layouts/u-boot-env.c b/drivers/nvmem/layouts/u-boot-env.c
index 731e6f4f12b2..436426d4e8f9 100644
--- a/drivers/nvmem/layouts/u-boot-env.c
+++ b/drivers/nvmem/layouts/u-boot-env.c
@@ -37,7 +37,7 @@ static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, i
{
u8 mac[ETH_ALEN];
- if (bytes != 3 * ETH_ALEN - 1)
+ if (bytes != MAC_ADDR_STR_LEN)
return -EINVAL;
if (!mac_pton(buf, mac))
diff --git a/drivers/of/address.c b/drivers/of/address.c
index d177a2b9edaf..f0f8f0dd191c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -1031,20 +1031,15 @@ EXPORT_SYMBOL_GPL(of_dma_is_coherent);
*
* Returns true if the "nonposted-mmio" property was found for
* the device's bus.
- *
- * This is currently only enabled on builds that support Apple ARM devices, as
- * an optimization.
*/
static bool of_mmio_is_nonposted(const struct device_node *np)
{
- if (!IS_ENABLED(CONFIG_ARCH_APPLE))
- return false;
-
struct device_node *parent __free(device_node) = of_get_parent(np);
- if (!parent)
- return false;
- return of_property_read_bool(parent, "nonposted-mmio");
+ if (of_property_read_bool(np, "nonposted-mmio"))
+ return true;
+
+ return parent && of_property_read_bool(parent, "nonposted-mmio");
}
static int __of_address_to_resource(struct device_node *dev, int index, int bar_no,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index af6c68bbb427..7043acd971a0 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -824,6 +824,33 @@ struct device_node *of_get_child_by_name(const struct device_node *node,
}
EXPORT_SYMBOL(of_get_child_by_name);
+/**
+ * of_get_available_child_by_name - Find the available child node by name for a given parent
+ * @node: parent node
+ * @name: child name to look for.
+ *
+ * This function looks for child node for given matching name and checks the
+ * device's availability for use.
+ *
+ * Return: A node pointer if found, with refcount incremented, use
+ * of_node_put() on it when done.
+ * Returns NULL if node is not found.
+ */
+struct device_node *of_get_available_child_by_name(const struct device_node *node,
+ const char *name)
+{
+ struct device_node *child;
+
+ child = of_get_child_by_name(node, name);
+ if (child && !of_device_is_available(child)) {
+ of_node_put(child);
+ return NULL;
+ }
+
+ return child;
+}
+EXPORT_SYMBOL(of_get_available_child_by_name);
+
struct device_node *__of_find_node_by_path(const struct device_node *parent,
const char *path)
{
@@ -1651,7 +1678,7 @@ int __of_add_property(struct device_node *np, struct property *prop)
prop->next = NULL;
next = &np->properties;
while (*next) {
- if (strcmp(prop->name, (*next)->name) == 0) {
+ if (of_prop_cmp(prop->name, (*next)->name) == 0) {
/* duplicate ! don't insert it */
rc = -EEXIST;
goto out_unlock;
@@ -1855,9 +1882,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
int id, len;
/* Skip those we do not want to proceed */
- if (!strcmp(pp->name, "name") ||
- !strcmp(pp->name, "phandle") ||
- !strcmp(pp->name, "linux,phandle"))
+ if (is_pseudo_property(pp->name))
continue;
np = of_find_node_by_path(pp->value);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index edf3be197265..5053e5d532cc 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -99,6 +99,11 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
bool coherent, set_map = false;
int ret;
+ if (dev->dma_range_map) {
+ dev_dbg(dev, "dma_range_map already set\n");
+ goto skip_map;
+ }
+
if (np == dev->of_node)
bus_np = __of_get_dma_parent(np);
else
@@ -119,7 +124,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
end = dma_range_map_max(map);
set_map = true;
}
-
+skip_map:
/*
* If @dev is expected to be DMA-capable then the bus code that created
* it should have initialised its dma_mask pointer by this point. For
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6c843d54ebb1..f8ad79b9b1c9 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -16,6 +16,7 @@
#define pr_fmt(fmt) "OF: " fmt
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/list.h>
@@ -38,11 +39,15 @@
unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
{
struct of_phandle_args oirq;
+ unsigned int ret;
if (of_irq_parse_one(dev, index, &oirq))
return 0;
- return irq_create_of_mapping(&oirq);
+ ret = irq_create_of_mapping(&oirq);
+ of_node_put(oirq.np);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
@@ -50,8 +55,8 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
* of_irq_find_parent - Given a device node, find its interrupt parent node
* @child: pointer to device node
*
- * Return: A pointer to the interrupt parent node, or NULL if the interrupt
- * parent could not be determined.
+ * Return: A pointer to the interrupt parent node with refcount increased
+ * or NULL if the interrupt parent could not be determined.
*/
struct device_node *of_irq_find_parent(struct device_node *child)
{
@@ -165,6 +170,8 @@ const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_ph
* the specifier for each map, and then returns the translated map.
*
* Return: 0 on success and a negative number on error
+ *
+ * Note: refcount of node @out_irq->np is increased by 1 on success.
*/
int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
{
@@ -310,6 +317,12 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
addrsize = (imap - match_array) - intsize;
if (ipar == newpar) {
+ /*
+ * We got @ipar's refcount, but the refcount was
+ * gotten again by of_irq_parse_imap_parent() via its
+ * alias @newpar.
+ */
+ of_node_put(ipar);
pr_debug("%pOF interrupt-map entry to self\n", ipar);
return 0;
}
@@ -339,10 +352,12 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw);
* This function resolves an interrupt for a node by walking the interrupt tree,
* finding which interrupt controller node it is attached to, and returning the
* interrupt specifier that can be used to retrieve a Linux IRQ number.
+ *
+ * Note: refcount of node @out_irq->np is increased by 1 on success.
*/
int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq)
{
- struct device_node *p;
+ struct device_node __free(device_node) *p = NULL;
const __be32 *addr;
u32 intsize;
int i, res, addr_len;
@@ -367,41 +382,33 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
/* Try the new-style interrupts-extended first */
res = of_parse_phandle_with_args(device, "interrupts-extended",
"#interrupt-cells", index, out_irq);
- if (!res)
- return of_irq_parse_raw(addr_buf, out_irq);
-
- /* Look for the interrupt parent. */
- p = of_irq_find_parent(device);
- if (p == NULL)
- return -EINVAL;
+ if (!res) {
+ p = out_irq->np;
+ } else {
+ /* Look for the interrupt parent. */
+ p = of_irq_find_parent(device);
+ /* Get size of interrupt specifier */
+ if (!p || of_property_read_u32(p, "#interrupt-cells", &intsize))
+ return -EINVAL;
+
+ pr_debug(" parent=%pOF, intsize=%d\n", p, intsize);
+
+ /* Copy intspec into irq structure */
+ out_irq->np = p;
+ out_irq->args_count = intsize;
+ for (i = 0; i < intsize; i++) {
+ res = of_property_read_u32_index(device, "interrupts",
+ (index * intsize) + i,
+ out_irq->args + i);
+ if (res)
+ return res;
+ }
- /* Get size of interrupt specifier */
- if (of_property_read_u32(p, "#interrupt-cells", &intsize)) {
- res = -EINVAL;
- goto out;
+ pr_debug(" intspec=%d\n", *out_irq->args);
}
- pr_debug(" parent=%pOF, intsize=%d\n", p, intsize);
-
- /* Copy intspec into irq structure */
- out_irq->np = p;
- out_irq->args_count = intsize;
- for (i = 0; i < intsize; i++) {
- res = of_property_read_u32_index(device, "interrupts",
- (index * intsize) + i,
- out_irq->args + i);
- if (res)
- goto out;
- }
-
- pr_debug(" intspec=%d\n", *out_irq->args);
-
-
/* Check if there are any interrupt-map translations to process */
- res = of_irq_parse_raw(addr_buf, out_irq);
- out:
- of_node_put(p);
- return res;
+ return of_irq_parse_raw(addr_buf, out_irq);
}
EXPORT_SYMBOL_GPL(of_irq_parse_one);
@@ -505,8 +512,10 @@ int of_irq_count(struct device_node *dev)
struct of_phandle_args irq;
int nr = 0;
- while (of_irq_parse_one(dev, nr, &irq) == 0)
+ while (of_irq_parse_one(dev, nr, &irq) == 0) {
+ of_node_put(irq.np);
nr++;
+ }
return nr;
}
@@ -623,6 +632,8 @@ void __init of_irq_init(const struct of_device_id *matches)
__func__, desc->dev, desc->dev,
desc->interrupt_parent);
of_node_clear_flag(desc->dev, OF_POPULATED);
+ of_node_put(desc->interrupt_parent);
+ of_node_put(desc->dev);
kfree(desc);
continue;
}
@@ -653,6 +664,7 @@ void __init of_irq_init(const struct of_device_id *matches)
err:
list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) {
list_del(&desc->list);
+ of_node_put(desc->interrupt_parent);
of_node_put(desc->dev);
kfree(desc);
}
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 1bdc7ceef3c5..df0bb00349e0 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -208,6 +208,13 @@ static void __maybe_unused of_dump_addr(const char *s, const __be32 *addr, int n
static void __maybe_unused of_dump_addr(const char *s, const __be32 *addr, int na) { }
#endif
+static inline bool is_pseudo_property(const char *prop_name)
+{
+ return !of_prop_cmp(prop_name, "name") ||
+ !of_prop_cmp(prop_name, "phandle") ||
+ !of_prop_cmp(prop_name, "linux,phandle");
+}
+
#if IS_ENABLED(CONFIG_KUNIT)
int __of_address_resource_bounds(struct resource *r, u64 start, u64 size);
#endif
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 434f6dd6a86c..1af6f52d0708 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -84,6 +84,12 @@ static int devicetree_state_flags;
#define DTSF_APPLY_FAIL 0x01
#define DTSF_REVERT_FAIL 0x02
+static int of_prop_val_eq(const struct property *p1, const struct property *p2)
+{
+ return p1->length == p2->length &&
+ !memcmp(p1->value, p2->value, (size_t)p1->length);
+}
+
/*
* If a changeset apply or revert encounters an error, an attempt will
* be made to undo partial changes, but may fail. If the undo fails
@@ -304,9 +310,7 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
int ret = 0;
if (target->in_livetree)
- if (!of_prop_cmp(overlay_prop->name, "name") ||
- !of_prop_cmp(overlay_prop->name, "phandle") ||
- !of_prop_cmp(overlay_prop->name, "linux,phandle"))
+ if (is_pseudo_property(overlay_prop->name))
return 0;
if (target->in_livetree)
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index c6d8afb284e8..f77cb19973a5 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -334,7 +334,7 @@ static int of_platform_bus_create(struct device_node *bus,
int rc = 0;
/* Make sure it has a compatible property */
- if (strict && (!of_get_property(bus, "compatible", NULL))) {
+ if (strict && (!of_property_present(bus, "compatible"))) {
pr_debug("%s() - skipping %pOF, no compatible prop\n",
__func__, bus);
return 0;
@@ -536,8 +536,8 @@ static int __init of_platform_default_populate_init(void)
* ignore errors for the rest.
*/
for_each_node_by_type(node, "display") {
- if (!of_get_property(node, "linux,opened", NULL) ||
- !of_get_property(node, "linux,boot-display", NULL))
+ if (!of_property_read_bool(node, "linux,opened") ||
+ !of_property_read_bool(node, "linux,boot-display"))
continue;
dev = of_platform_device_create(node, "of-display", NULL);
of_node_put(node);
@@ -551,7 +551,7 @@ static int __init of_platform_default_populate_init(void)
char buf[14];
const char *of_display_format = "of-display.%d";
- if (!of_get_property(node, "linux,opened", NULL) || node == boot_display)
+ if (!of_property_read_bool(node, "linux,opened") || node == boot_display)
continue;
ret = snprintf(buf, sizeof(buf), of_display_format, display_number++);
if (ret < sizeof(buf))
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 208d922cc24c..c1feb631e383 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -148,6 +148,39 @@ static void *of_find_property_value_of_size(const struct device_node *np,
}
/**
+ * of_property_read_u16_index - Find and read a u16 from a multi-value property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @index: index of the u16 in the list of values
+ * @out_value: pointer to return value, modified only if no error.
+ *
+ * Search for a property in a device node and read nth 16-bit value from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_value is modified only if a valid u16 value can be decoded.
+ */
+int of_property_read_u16_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u16 *out_value)
+{
+ const u16 *val = of_find_property_value_of_size(np, propname,
+ ((index + 1) * sizeof(*out_value)),
+ 0, NULL);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ *out_value = be16_to_cpup(((__be16 *)val) + index);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_u16_index);
+
+/**
* of_property_read_u32_index - Find and read a u32 from a multi-value property.
*
* @np: device node from which the property value is to be read.
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 779db058c42f..86424e145919 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -161,9 +161,7 @@ static int adjust_local_phandle_references(const struct device_node *local_fixup
for_each_property_of_node(local_fixups, prop_fix) {
/* skip properties added automatically */
- if (!of_prop_cmp(prop_fix->name, "name") ||
- !of_prop_cmp(prop_fix->name, "phandle") ||
- !of_prop_cmp(prop_fix->name, "linux,phandle"))
+ if (is_pseudo_property(prop_fix->name))
continue;
if ((prop_fix->length % 4) != 0 || prop_fix->length == 0)
@@ -249,25 +247,22 @@ static int adjust_local_phandle_references(const struct device_node *local_fixup
*/
int of_resolve_phandles(struct device_node *overlay)
{
- struct device_node *child, *local_fixups, *refnode;
- struct device_node *tree_symbols, *overlay_fixups;
+ struct device_node *child, *refnode;
+ struct device_node *overlay_fixups;
+ struct device_node __free(device_node) *local_fixups = NULL;
struct property *prop;
const char *refpath;
phandle phandle, phandle_delta;
int err;
- tree_symbols = NULL;
-
if (!overlay) {
pr_err("null overlay\n");
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (!of_node_check_flag(overlay, OF_DETACHED)) {
pr_err("overlay not detached\n");
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
phandle_delta = live_tree_max_phandle() + 1;
@@ -279,7 +274,7 @@ int of_resolve_phandles(struct device_node *overlay)
err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta);
if (err)
- goto out;
+ return err;
overlay_fixups = NULL;
@@ -288,16 +283,13 @@ int of_resolve_phandles(struct device_node *overlay)
overlay_fixups = child;
}
- if (!overlay_fixups) {
- err = 0;
- goto out;
- }
+ if (!overlay_fixups)
+ return 0;
- tree_symbols = of_find_node_by_path("/__symbols__");
+ struct device_node __free(device_node) *tree_symbols = of_find_node_by_path("/__symbols__");
if (!tree_symbols) {
pr_err("no symbols in root of device tree.\n");
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
for_each_property_of_node(overlay_fixups, prop) {
@@ -311,14 +303,12 @@ int of_resolve_phandles(struct device_node *overlay)
if (err) {
pr_err("node label '%s' not found in live devicetree symbols table\n",
prop->name);
- goto out;
+ return err;
}
refnode = of_find_node_by_path(refpath);
- if (!refnode) {
- err = -ENOENT;
- goto out;
- }
+ if (!refnode)
+ return -ENOENT;
phandle = refnode->phandle;
of_node_put(refnode);
@@ -328,11 +318,8 @@ int of_resolve_phandles(struct device_node *overlay)
break;
}
-out:
if (err)
pr_err("overlay phandle fixup failed: %d\n", err);
- of_node_put(tree_symbols);
-
return err;
}
EXPORT_SYMBOL_GPL(of_resolve_phandles);
diff --git a/drivers/of/unittest-data/tests-interrupts.dtsi b/drivers/of/unittest-data/tests-interrupts.dtsi
index 7c9f31cc131b..4ccb54f91c30 100644
--- a/drivers/of/unittest-data/tests-interrupts.dtsi
+++ b/drivers/of/unittest-data/tests-interrupts.dtsi
@@ -50,6 +50,13 @@
interrupt-map = <0x5000 1 2 &test_intc0 15>;
};
+ test_intc_intmap0: intc-intmap0 {
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ interrupt-controller;
+ interrupt-map = <0x6000 1 &test_intc_intmap0 0x7000 2>;
+ };
+
interrupts0 {
interrupt-parent = <&test_intc0>;
interrupts = <1>, <2>, <3>, <4>;
@@ -60,6 +67,12 @@
interrupts = <1>, <2>, <3>, <4>;
};
+ interrupts2 {
+ reg = <0x6000 0x100>;
+ interrupt-parent = <&test_intc_intmap0>;
+ interrupts = <1>;
+ };
+
interrupts-extended0 {
reg = <0x5000 0x100>;
/*
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index f88ddb1cf5d7..64d301893af7 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1654,6 +1654,72 @@ static void __init of_unittest_parse_interrupts_extended(void)
of_node_put(np);
}
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+static void __init of_unittest_irq_refcount(void)
+{
+ struct of_phandle_args args;
+ struct device_node *intc0, *int_ext0;
+ struct device_node *int2, *intc_intmap0;
+ unsigned int ref_c0, ref_c1, ref_c2;
+ int rc;
+ bool passed;
+
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return;
+
+ intc0 = of_find_node_by_path("/testcase-data/interrupts/intc0");
+ int_ext0 = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
+ intc_intmap0 = of_find_node_by_path("/testcase-data/interrupts/intc-intmap0");
+ int2 = of_find_node_by_path("/testcase-data/interrupts/interrupts2");
+ if (!intc0 || !int_ext0 || !intc_intmap0 || !int2) {
+ pr_err("missing testcase data\n");
+ goto out;
+ }
+
+ /* Test refcount for API of_irq_parse_one() */
+ passed = true;
+ ref_c0 = OF_KREF_READ(intc0);
+ ref_c1 = ref_c0 + 1;
+ memset(&args, 0, sizeof(args));
+ rc = of_irq_parse_one(int_ext0, 0, &args);
+ ref_c2 = OF_KREF_READ(intc0);
+ of_node_put(args.np);
+
+ passed &= !rc;
+ passed &= (args.np == intc0);
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 1);
+ passed &= (ref_c1 == ref_c2);
+ unittest(passed, "IRQ refcount case #1 failed, original(%u) expected(%u) got(%u)\n",
+ ref_c0, ref_c1, ref_c2);
+
+ /* Test refcount for API of_irq_parse_raw() */
+ passed = true;
+ ref_c0 = OF_KREF_READ(intc_intmap0);
+ ref_c1 = ref_c0 + 1;
+ memset(&args, 0, sizeof(args));
+ rc = of_irq_parse_one(int2, 0, &args);
+ ref_c2 = OF_KREF_READ(intc_intmap0);
+ of_node_put(args.np);
+
+ passed &= !rc;
+ passed &= (args.np == intc_intmap0);
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 2);
+ passed &= (ref_c1 == ref_c2);
+ unittest(passed, "IRQ refcount case #2 failed, original(%u) expected(%u) got(%u)\n",
+ ref_c0, ref_c1, ref_c2);
+
+out:
+ of_node_put(int2);
+ of_node_put(intc_intmap0);
+ of_node_put(int_ext0);
+ of_node_put(intc0);
+}
+#else
+static inline void __init of_unittest_irq_refcount(void) { }
+#endif
+
static const struct of_device_id match_node_table[] = {
{ .data = "A", .name = "name0", }, /* Name alone is lowest priority */
{ .data = "B", .type = "type1", }, /* followed by type alone */
@@ -4324,6 +4390,7 @@ static int __init of_unittest(void)
of_unittest_changeset_prop();
of_unittest_parse_interrupts();
of_unittest_parse_interrupts_extended();
+ of_unittest_irq_refcount();
of_unittest_dma_get_max_cpu_address();
of_unittest_parse_dma_ranges();
of_unittest_pci_dma_ranges();
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index e71674753711..016c9d5a60a8 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -39,7 +39,6 @@ static unsigned char led_type; /* bitmask of LED_HAS_XXX */
static unsigned char lastleds; /* LED state from most recent update */
static unsigned char lcd_new_text;
static unsigned char lcd_text[20];
-static unsigned char lcd_text_default[20];
static unsigned char lcd_no_led_support; /* KittyHawk doesn't support LED on its LCD */
struct lcd_block {
@@ -456,9 +455,8 @@ static int __init early_led_init(void)
struct pdc_chassis_info chassis_info;
int ret;
- snprintf(lcd_text_default, sizeof(lcd_text_default),
+ scnprintf(lcd_text, sizeof(lcd_text),
"Linux %s", init_utsname()->release);
- strcpy(lcd_text, lcd_text_default);
lcd_new_text = 1;
/* Work around the buggy PDC of KittyHawk-machines */
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 2fbd379923fd..da28295b4aac 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -122,7 +122,10 @@ config PCI_ATS
bool
config PCI_DOE
- bool
+ bool "Enable PCI Data Object Exchange (DOE) support"
+ help
+ Say Y here if you want be able to communicate with PCIe DOE
+ mailboxes.
config PCI_ECAM
bool
@@ -203,6 +206,12 @@ config PCI_P2PDMA
P2P DMA transactions must be between devices behind the same root
port.
+ Enabling this option will reduce the entropy of x86 KASLR memory
+ regions. For example - on a 46 bit system, the entropy goes down
+ from 16 bits to 15 bits. The actual reduction in entropy depends
+ on the physical address bits, on processor features, kernel config
+ (5 level page table) and physical memory present on the system.
+
If unsure, say N.
config PCI_LABEL
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 98910bc0fcc4..b6851101ac36 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -331,47 +331,6 @@ void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
-/*
- * Create pwrctrl devices (if required) for the PCI devices to handle the power
- * state.
- */
-static void pci_pwrctrl_create_devices(struct pci_dev *dev)
-{
- struct device_node *np = dev_of_node(&dev->dev);
- struct device *parent = &dev->dev;
- struct platform_device *pdev;
-
- /*
- * First ensure that we are starting from a PCI bridge and it has a
- * corresponding devicetree node.
- */
- if (np && pci_is_bridge(dev)) {
- /*
- * Now look for the child PCI device nodes and create pwrctrl
- * devices for them. The pwrctrl device drivers will manage the
- * power state of the devices.
- */
- for_each_available_child_of_node_scoped(np, child) {
- /*
- * First check whether the pwrctrl device really
- * needs to be created or not. This is decided
- * based on at least one of the power supplies
- * being defined in the devicetree node of the
- * device.
- */
- if (!of_pci_supply_present(child)) {
- pci_dbg(dev, "skipping OF node: %s\n", child->name);
- return;
- }
-
- /* Now create the pwrctrl device */
- pdev = of_platform_device_create(child, NULL, parent);
- if (!pdev)
- pci_err(dev, "failed to create OF node: %s\n", child->name);
- }
- }
-}
-
/**
* pci_bus_add_device - start driver for a single device
* @dev: device to add
@@ -396,8 +355,6 @@ void pci_bus_add_device(struct pci_dev *dev)
pci_proc_attach_device(dev);
pci_bridge_d3_update(dev);
- pci_pwrctrl_create_devices(dev);
-
/*
* If the PCI device is associated with a pwrctrl device with a
* power supply, create a device link between the PCI device and
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 0341d51d6aed..ef1cfdae33bb 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -355,6 +355,7 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {
static const struct j721e_pcie_data j7200_pcie_ep_data = {
.mode = PCI_MODE_EP,
.quirk_detect_quiet_flag = true,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.quirk_disable_flr = true,
.max_lanes = 2,
};
@@ -376,13 +377,13 @@ static const struct j721e_pcie_data j784s4_pcie_rc_data = {
.mode = PCI_MODE_RC,
.quirk_retrain_flag = true,
.byte_access_allowed = false,
- .linkdown_irq_regfield = LINK_DOWN,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.max_lanes = 4,
};
static const struct j721e_pcie_data j784s4_pcie_ep_data = {
.mode = PCI_MODE_EP,
- .linkdown_irq_regfield = LINK_DOWN,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.max_lanes = 4,
};
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index e0cc4560dfde..599ec4b1223e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -301,12 +301,12 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
val |= interrupts;
cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
- /* Set MSIX BAR and offset */
+ /* Set MSI-X BAR and offset */
reg = cap + PCI_MSIX_TABLE;
val = offset | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
- /* Set PBA BAR and offset. BAR must match MSIX BAR */
+ /* Set PBA BAR and offset. BAR must match MSI-X BAR */
reg = cap + PCI_MSIX_PBA;
val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
@@ -352,8 +352,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
spin_unlock_irqrestore(&ep->lock, flags);
offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
- CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
- CDNS_PCIE_MSG_NO_DATA;
+ CDNS_PCIE_NORMAL_MSG_CODE(msg_code);
writel(0, ep->irq_cpu_addr + offset);
}
@@ -573,8 +572,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
/*
* Next function field in ARI_CAP_AND_CTR register for last function
- * should be 0.
- * Clearing Next Function Number field for the last function used.
+ * should be 0. Clear Next Function Number field for the last
+ * function used.
*/
last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG);
reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn);
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index f5eeff834ec1..39ee9945c903 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -246,7 +246,7 @@ struct cdns_pcie_rp_ib_bar {
#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
-#define CDNS_PCIE_MSG_NO_DATA BIT(16)
+#define CDNS_PCIE_MSG_DATA BIT(16)
struct cdns_pcie;
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index b6d6778b0698..d9f0386396ed 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -6,6 +6,16 @@ menu "DesignWare-based PCIe controllers"
config PCIE_DW
bool
+config PCIE_DW_DEBUGFS
+ bool "DesignWare PCIe debugfs entries"
+ depends on DEBUG_FS
+ depends on PCIE_DW_HOST || PCIE_DW_EP
+ help
+ Say Y here to enable debugfs entries for the PCIe controller. These
+ entries provide various debug features related to the controller and
+ expose the RAS DES capabilities such as Silicon Debug, Error Injection
+ and Statistical Counters.
+
config PCIE_DW_HOST
bool
select PCIE_DW
@@ -27,6 +37,17 @@ config PCIE_AL
required only for DT-based platforms. ACPI platforms with the
Annapurna Labs PCIe controller don't need to enable this.
+config PCIE_AMD_MDB
+ bool "AMD MDB Versal2 PCIe controller"
+ depends on OF && (ARM64 || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCIe controller support on AMD
+ Versal2 SoCs. The AMD MDB Versal2 PCIe controller is based on
+ DesignWare IP and therefore the driver re-uses the DesignWare
+ core functions to implement the driver.
+
config PCI_MESON
tristate "Amlogic Meson PCIe controller"
default m if ARCH_MESON
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index a8308d9ea986..908cb7f345db 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCIE_DW_DEBUGFS) += pcie-designware-debugfs.o
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCIE_AMD_MDB) += pcie-amd-mdb.o
obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 90ace941090f..5f267dd261b5 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -41,7 +41,6 @@
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
#define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12)
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
-#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
#define IMX95_PCIE_PHY_GEN_CTRL 0x0
#define IMX95_PCIE_REF_USE_PAD BIT(17)
@@ -109,7 +108,6 @@ enum imx_pcie_variants {
#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
-#define IMX_PCIE_MAX_CLKS 6
#define IMX_PCIE_MAX_INSTANCES 2
struct imx_pcie;
@@ -120,9 +118,6 @@ struct imx_pcie_drvdata {
u32 flags;
int dbi_length;
const char *gpr;
- const char * const *clk_names;
- const u32 clks_cnt;
- const u32 clks_optional_cnt;
const u32 ltssm_off;
const u32 ltssm_mask;
const u32 mode_off[IMX_PCIE_MAX_INSTANCES];
@@ -137,7 +132,8 @@ struct imx_pcie_drvdata {
struct imx_pcie {
struct dw_pcie *pci;
struct gpio_desc *reset_gpiod;
- struct clk_bulk_data clks[IMX_PCIE_MAX_CLKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regmap *iomuxc_gpr;
u16 msi_ctrl;
u32 controller_id;
@@ -470,13 +466,14 @@ static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie)
int mult, div;
u16 val;
int i;
+ struct clk_bulk_data *clks = imx_pcie->clks;
if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return 0;
- for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++)
- if (strncmp(imx_pcie->clks[i].id, "pcie_phy", 8) == 0)
- phy_rate = clk_get_rate(imx_pcie->clks[i].clk);
+ for (i = 0; i < imx_pcie->num_clks; i++)
+ if (strncmp(clks[i].id, "pcie_phy", 8) == 0)
+ phy_rate = clk_get_rate(clks[i].clk);
switch (phy_rate) {
case 125000000:
@@ -668,7 +665,7 @@ static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie)
struct device *dev = pci->dev;
int ret;
- ret = clk_bulk_prepare_enable(imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
+ ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks);
if (ret)
return ret;
@@ -685,7 +682,7 @@ static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie)
return 0;
err_ref_clk:
- clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
return ret;
}
@@ -694,7 +691,7 @@ static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie)
{
if (imx_pcie->drvdata->enable_ref_clk)
imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
- clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
}
static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
@@ -1217,22 +1214,6 @@ static void imx_pcie_host_exit(struct dw_pcie_rp *pp)
regulator_disable(imx_pcie->vpcie);
}
-static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr)
-{
- struct imx_pcie *imx_pcie = to_imx_pcie(pcie);
- struct dw_pcie_rp *pp = &pcie->pp;
- struct resource_entry *entry;
-
- if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_CPU_ADDR_FIXUP))
- return cpu_addr;
-
- entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
- if (!entry)
- return cpu_addr;
-
- return cpu_addr - entry->offset;
-}
-
/*
* In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2
* register is reserved, so the generic DWC implementation of sending the
@@ -1263,7 +1244,6 @@ static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = {
static const struct dw_pcie_ops dw_pcie_ops = {
.start_link = imx_pcie_start_link,
.stop_link = imx_pcie_stop_link,
- .cpu_addr_fixup = imx_pcie_cpu_addr_fixup,
};
static void imx_pcie_ep_init(struct dw_pcie_ep *ep)
@@ -1474,9 +1454,8 @@ static int imx_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct imx_pcie *imx_pcie;
struct device_node *np;
- struct resource *dbi_base;
struct device_node *node = dev->of_node;
- int i, ret, req_cnt;
+ int ret, domain;
u16 val;
imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL);
@@ -1515,10 +1494,6 @@ static int imx_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx_pcie->phy_base);
}
- pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
/* Fetch GPIOs */
imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(imx_pcie->reset_gpiod))
@@ -1526,20 +1501,11 @@ static int imx_pcie_probe(struct platform_device *pdev)
"unable to get reset gpio\n");
gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset");
- if (imx_pcie->drvdata->clks_cnt >= IMX_PCIE_MAX_CLKS)
- return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n");
-
- for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++)
- imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i];
-
/* Fetch clocks */
- req_cnt = imx_pcie->drvdata->clks_cnt - imx_pcie->drvdata->clks_optional_cnt;
- ret = devm_clk_bulk_get(dev, req_cnt, imx_pcie->clks);
- if (ret)
- return ret;
- imx_pcie->clks[req_cnt].clk = devm_clk_get_optional(dev, "ref");
- if (IS_ERR(imx_pcie->clks[req_cnt].clk))
- return PTR_ERR(imx_pcie->clks[req_cnt].clk);
+ imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks);
+ if (imx_pcie->num_clks < 0)
+ return dev_err_probe(dev, imx_pcie->num_clks,
+ "failed to get clocks\n");
if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) {
imx_pcie->phy = devm_phy_get(dev, "pcie-phy");
@@ -1565,8 +1531,11 @@ static int imx_pcie_probe(struct platform_device *pdev)
switch (imx_pcie->drvdata->variant) {
case IMX8MQ:
case IMX8MQ_EP:
- if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
- imx_pcie->controller_id = 1;
+ domain = of_get_pci_domain_nr(node);
+ if (domain < 0 || domain > 1)
+ return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n");
+
+ imx_pcie->controller_id = domain;
break;
default:
break;
@@ -1645,6 +1614,7 @@ static int imx_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
+ pci->use_parent_dt_ranges = true;
if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
ret = imx_add_pcie_ep(imx_pcie, pdev);
if (ret < 0)
@@ -1675,13 +1645,6 @@ static void imx_pcie_shutdown(struct platform_device *pdev)
imx_pcie_assert_core_reset(imx_pcie);
}
-static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"};
-static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
-static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
-static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
-static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"};
-static const char * const imx95_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux", "ref"};
-
static const struct imx_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
@@ -1691,8 +1654,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
- .clk_names = imx6q_clks,
- .clks_cnt = ARRAY_SIZE(imx6q_clks),
.ltssm_off = IOMUXC_GPR12,
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
@@ -1707,8 +1668,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
- .clk_names = imx6sx_clks,
- .clks_cnt = ARRAY_SIZE(imx6sx_clks),
.ltssm_off = IOMUXC_GPR12,
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
@@ -1725,8 +1684,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
- .clk_names = imx6q_clks,
- .clks_cnt = ARRAY_SIZE(imx6q_clks),
.ltssm_off = IOMUXC_GPR12,
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
@@ -1742,8 +1699,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_APP_RESET |
IMX_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx7d-iomuxc-gpr",
- .clk_names = imx6q_clks,
- .clks_cnt = ARRAY_SIZE(imx6q_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.enable_ref_clk = imx7d_pcie_enable_ref_clk,
@@ -1755,8 +1710,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_PHY_RESET |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx8mq-iomuxc-gpr",
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.mode_off[1] = IOMUXC_GPR12,
@@ -1770,8 +1723,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_PHYDRV |
IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mm-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.enable_ref_clk = imx8mm_pcie_enable_ref_clk,
@@ -1782,8 +1733,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_PHYDRV |
IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mp-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.enable_ref_clk = imx8mm_pcie_enable_ref_clk,
@@ -1793,17 +1742,12 @@ static const struct imx_pcie_drvdata drvdata[] = {
.flags = IMX_PCIE_FLAG_HAS_PHYDRV |
IMX_PCIE_FLAG_CPU_ADDR_FIXUP |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
- .clk_names = imx8q_clks,
- .clks_cnt = ARRAY_SIZE(imx8q_clks),
},
[IMX95] = {
.variant = IMX95,
.flags = IMX_PCIE_FLAG_HAS_SERDES |
IMX_PCIE_FLAG_HAS_LUT |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
- .clk_names = imx95_clks,
- .clks_cnt = ARRAY_SIZE(imx95_clks),
- .clks_optional_cnt = 1,
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
@@ -1816,8 +1760,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_PHY_RESET,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mq-iomuxc-gpr",
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.mode_off[1] = IOMUXC_GPR12,
@@ -1832,8 +1774,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mm-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
@@ -1845,8 +1785,6 @@ static const struct imx_pcie_drvdata drvdata[] = {
IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mp-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
@@ -1857,15 +1795,11 @@ static const struct imx_pcie_drvdata drvdata[] = {
.flags = IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.epc_features = &imx8q_pcie_epc_features,
- .clk_names = imx8q_clks,
- .clks_cnt = ARRAY_SIZE(imx8q_clks),
},
[IMX95_EP] = {
.variant = IMX95_EP,
.flags = IMX_PCIE_FLAG_HAS_SERDES |
IMX_PCIE_FLAG_SUPPORT_64BIT,
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 63bd5003da45..76a37368ae4f 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -966,11 +966,11 @@ static const struct pci_epc_features ks_pcie_am654_epc_features = {
.msix_capable = true,
.bar[BAR_0] = { .type = BAR_RESERVED, },
.bar[BAR_1] = { .type = BAR_RESERVED, },
- .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
.bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, },
- .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .align = SZ_1M,
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+ .align = SZ_64K,
};
static const struct pci_epc_features*
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 239a05b36e8e..a44b5c256d6e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -356,7 +356,7 @@ static int ls_pcie_probe(struct platform_device *pdev)
if (pcie->drvdata->scfg_support) {
pcie->scfg =
syscon_regmap_lookup_by_phandle_args(dev->of_node,
- "fsl,pcie-scfg", 2,
+ "fsl,pcie-scfg", 1,
index);
if (IS_ERR(pcie->scfg)) {
dev_err(dev, "No syscfg phandle specified\n");
diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c
new file mode 100644
index 000000000000..4eb2a4e8189d
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for AMD MDB PCIe Bridge
+ *
+ * Copyright (C) 2024-2025, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define AMD_MDB_TLP_IR_STATUS_MISC 0x4C0
+#define AMD_MDB_TLP_IR_MASK_MISC 0x4C4
+#define AMD_MDB_TLP_IR_ENABLE_MISC 0x4C8
+#define AMD_MDB_TLP_IR_DISABLE_MISC 0x4CC
+
+#define AMD_MDB_TLP_PCIE_INTX_MASK GENMASK(23, 16)
+
+#define AMD_MDB_PCIE_INTR_INTX_ASSERT(x) BIT((x) * 2)
+
+/* Interrupt registers definitions. */
+#define AMD_MDB_PCIE_INTR_CMPL_TIMEOUT 15
+#define AMD_MDB_PCIE_INTR_INTX 16
+#define AMD_MDB_PCIE_INTR_PM_PME_RCVD 24
+#define AMD_MDB_PCIE_INTR_PME_TO_ACK_RCVD 25
+#define AMD_MDB_PCIE_INTR_MISC_CORRECTABLE 26
+#define AMD_MDB_PCIE_INTR_NONFATAL 27
+#define AMD_MDB_PCIE_INTR_FATAL 28
+
+#define IMR(x) BIT(AMD_MDB_PCIE_INTR_ ##x)
+#define AMD_MDB_PCIE_IMR_ALL_MASK \
+ ( \
+ IMR(CMPL_TIMEOUT) | \
+ IMR(PM_PME_RCVD) | \
+ IMR(PME_TO_ACK_RCVD) | \
+ IMR(MISC_CORRECTABLE) | \
+ IMR(NONFATAL) | \
+ IMR(FATAL) | \
+ AMD_MDB_TLP_PCIE_INTX_MASK \
+ )
+
+/**
+ * struct amd_mdb_pcie - PCIe port information
+ * @pci: DesignWare PCIe controller structure
+ * @slcr: MDB System Level Control and Status Register (SLCR) base
+ * @intx_domain: INTx IRQ domain pointer
+ * @mdb_domain: MDB IRQ domain pointer
+ * @intx_irq: INTx IRQ interrupt number
+ */
+struct amd_mdb_pcie {
+ struct dw_pcie pci;
+ void __iomem *slcr;
+ struct irq_domain *intx_domain;
+ struct irq_domain *mdb_domain;
+ int intx_irq;
+};
+
+static const struct dw_pcie_host_ops amd_mdb_pcie_host_ops = {
+};
+
+static void amd_mdb_intx_irq_mask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_DISABLE_MISC disables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_intx_irq_unmask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_ENABLE_MISC enables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_intx_irq_chip = {
+ .name = "AMD MDB INTx",
+ .irq_mask = amd_mdb_intx_irq_mask,
+ .irq_unmask = amd_mdb_intx_irq_unmask,
+};
+
+/**
+ * amd_mdb_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: Hardware interrupt number
+ *
+ * Return: Always returns '0'.
+ */
+static int amd_mdb_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_intx_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ domain operations. */
+static const struct irq_domain_ops amd_intx_domain_ops = {
+ .map = amd_mdb_pcie_intx_map,
+};
+
+static irqreturn_t dw_pcie_rp_intx(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i, int_status;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ int_status = FIELD_GET(AMD_MDB_TLP_PCIE_INTX_MASK, val);
+
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ if (int_status & AMD_MDB_PCIE_INTR_INTX_ASSERT(i))
+ generic_handle_domain_irq(pcie->intx_domain, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define _IC(x, s)[AMD_MDB_PCIE_INTR_ ## x] = { __stringify(x), s }
+
+static const struct {
+ const char *sym;
+ const char *str;
+} intr_cause[32] = {
+ _IC(CMPL_TIMEOUT, "Completion timeout"),
+ _IC(PM_PME_RCVD, "PM_PME message received"),
+ _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
+ _IC(MISC_CORRECTABLE, "Correctable error message"),
+ _IC(NONFATAL, "Non fatal error message"),
+ _IC(FATAL, "Fatal error message"),
+};
+
+static void amd_mdb_event_irq_mask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_event_irq_unmask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_event_irq_chip = {
+ .name = "AMD MDB RC-Event",
+ .irq_mask = amd_mdb_event_irq_mask,
+ .irq_unmask = amd_mdb_event_irq_unmask,
+};
+
+static int amd_mdb_pcie_event_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_event_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = amd_mdb_pcie_event_map,
+};
+
+static irqreturn_t amd_mdb_pcie_event(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= ~readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_MASK_MISC);
+ for_each_set_bit(i, &val, 32)
+ generic_handle_domain_irq(pcie->mdb_domain, i);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ return IRQ_HANDLED;
+}
+
+static void amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie *pcie)
+{
+ if (pcie->intx_domain) {
+ irq_domain_remove(pcie->intx_domain);
+ pcie->intx_domain = NULL;
+ }
+
+ if (pcie->mdb_domain) {
+ irq_domain_remove(pcie->mdb_domain);
+ pcie->mdb_domain = NULL;
+ }
+}
+
+static int amd_mdb_pcie_init_port(struct amd_mdb_pcie *pcie)
+{
+ unsigned long val;
+
+ /* Disable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+
+ /* Clear pending TLP interrupts. */
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= AMD_MDB_PCIE_IMR_ALL_MASK;
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ /* Enable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+
+ return 0;
+}
+
+/**
+ * amd_mdb_pcie_init_irq_domains - Initialize IRQ domain
+ * @pcie: PCIe port information
+ * @pdev: Platform device
+ *
+ * Return: Returns '0' on success and error value on failure.
+ */
+static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+ int err;
+
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ pcie->mdb_domain = irq_domain_add_linear(pcie_intc_node, 32,
+ &event_domain_ops, pcie);
+ if (!pcie->mdb_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add MDB domain\n");
+ goto out;
+ }
+
+ irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS);
+
+ pcie->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &amd_intx_domain_ops, pcie);
+ if (!pcie->intx_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add INTx domain\n");
+ goto mdb_out;
+ }
+
+ of_node_put(pcie_intc_node);
+ irq_domain_update_bus_token(pcie->intx_domain, DOMAIN_BUS_WIRED);
+
+ raw_spin_lock_init(&pp->lock);
+
+ return 0;
+mdb_out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+out:
+ of_node_put(pcie_intc_node);
+ return err;
+}
+
+static irqreturn_t amd_mdb_pcie_intr_handler(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ struct device *dev;
+ struct irq_data *d;
+
+ dev = pcie->pci.dev;
+
+ /*
+ * In the future, error reporting will be hooked to the AER subsystem.
+ * Currently, the driver prints a warning message to the user.
+ */
+ d = irq_domain_get_irq_data(pcie->mdb_domain, irq);
+ if (intr_cause[d->hwirq].str)
+ dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
+ else
+ dev_warn_once(dev, "Unknown IRQ %ld\n", d->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int i, irq, err;
+
+ amd_mdb_pcie_init_port(pcie);
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
+ if (!intr_cause[i].str)
+ continue;
+
+ irq = irq_create_mapping(pcie->mdb_domain, i);
+ if (!irq) {
+ dev_err(dev, "Failed to map MDB domain interrupt\n");
+ return -ENOMEM;
+ }
+
+ err = devm_request_irq(dev, irq, amd_mdb_pcie_intr_handler,
+ IRQF_NO_THREAD, intr_cause[i].sym, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+ }
+
+ pcie->intx_irq = irq_create_mapping(pcie->mdb_domain,
+ AMD_MDB_PCIE_INTR_INTX);
+ if (!pcie->intx_irq) {
+ dev_err(dev, "Failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, pcie->intx_irq, dw_pcie_rp_intx,
+ IRQF_NO_THREAD, NULL, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request INTx IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+
+ /* Plug the main event handler. */
+ err = devm_request_irq(dev, pp->irq, amd_mdb_pcie_event, IRQF_NO_THREAD,
+ "amd_mdb pcie_irq", pcie);
+ if (err) {
+ dev_err(dev, "Failed to request event IRQ %d, err=%d\n",
+ pp->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int err;
+
+ pcie->slcr = devm_platform_ioremap_resource_byname(pdev, "slcr");
+ if (IS_ERR(pcie->slcr))
+ return PTR_ERR(pcie->slcr);
+
+ err = amd_mdb_pcie_init_irq_domains(pcie, pdev);
+ if (err)
+ return err;
+
+ err = amd_mdb_setup_irq(pcie, pdev);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts, err=%d\n", err);
+ goto out;
+ }
+
+ pp->ops = &amd_mdb_pcie_host_ops;
+
+ err = dw_pcie_host_init(pp);
+ if (err) {
+ dev_err(dev, "Failed to initialize host, err=%d\n", err);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+ return err;
+}
+
+static int amd_mdb_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct amd_mdb_pcie *pcie;
+ struct dw_pcie *pci;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return amd_mdb_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id amd_mdb_pcie_of_match[] = {
+ {
+ .compatible = "amd,versal2-mdb-host",
+ },
+ {},
+};
+
+static struct platform_driver amd_mdb_pcie_driver = {
+ .driver = {
+ .name = "amd-mdb-pcie",
+ .of_match_table = amd_mdb_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = amd_mdb_pcie_probe,
+};
+
+builtin_platform_driver(amd_mdb_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
new file mode 100644
index 000000000000..9e6f4d00f262
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
@@ -0,0 +1,677 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe controller debugfs driver
+ *
+ * Copyright (C) 2025 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Shradha Todi <shradha.t@samsung.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "pcie-designware.h"
+
+#define SD_STATUS_L1LANE_REG 0xb0
+#define PIPE_RXVALID BIT(18)
+#define PIPE_DETECT_LANE BIT(17)
+#define LANE_SELECT GENMASK(3, 0)
+
+#define ERR_INJ0_OFF 0x34
+#define EINJ_VAL_DIFF GENMASK(28, 16)
+#define EINJ_VC_NUM GENMASK(14, 12)
+#define EINJ_TYPE_SHIFT 8
+#define EINJ0_TYPE GENMASK(11, 8)
+#define EINJ1_TYPE BIT(8)
+#define EINJ2_TYPE GENMASK(9, 8)
+#define EINJ3_TYPE GENMASK(10, 8)
+#define EINJ4_TYPE GENMASK(10, 8)
+#define EINJ5_TYPE BIT(8)
+#define EINJ_COUNT GENMASK(7, 0)
+
+#define ERR_INJ_ENABLE_REG 0x30
+
+#define RAS_DES_EVENT_COUNTER_DATA_REG 0xc
+
+#define RAS_DES_EVENT_COUNTER_CTRL_REG 0x8
+#define EVENT_COUNTER_GROUP_SELECT GENMASK(27, 24)
+#define EVENT_COUNTER_EVENT_SELECT GENMASK(23, 16)
+#define EVENT_COUNTER_LANE_SELECT GENMASK(11, 8)
+#define EVENT_COUNTER_STATUS BIT(7)
+#define EVENT_COUNTER_ENABLE GENMASK(4, 2)
+#define PER_EVENT_ON 0x3
+#define PER_EVENT_OFF 0x1
+
+#define DWC_DEBUGFS_BUF_MAX 128
+
+/**
+ * struct dwc_pcie_rasdes_info - Stores controller common information
+ * @ras_cap_offset: RAS DES vendor specific extended capability offset
+ * @reg_event_lock: Mutex used for RAS DES shadow event registers
+ *
+ * Any parameter constant to all files of the debugfs hierarchy for a single
+ * controller will be stored in this struct. It is allocated and assigned to
+ * controller specific struct dw_pcie during initialization.
+ */
+struct dwc_pcie_rasdes_info {
+ u32 ras_cap_offset;
+ struct mutex reg_event_lock;
+};
+
+/**
+ * struct dwc_pcie_rasdes_priv - Stores file specific private data information
+ * @pci: Reference to the dw_pcie structure
+ * @idx: Index of specific file related information in array of structs
+ *
+ * All debugfs files will have this struct as its private data.
+ */
+struct dwc_pcie_rasdes_priv {
+ struct dw_pcie *pci;
+ int idx;
+};
+
+/**
+ * struct dwc_pcie_err_inj - Store details about each error injection
+ * supported by DWC RAS DES
+ * @name: Name of the error that can be injected
+ * @err_inj_group: Group number to which the error belongs. The value
+ * can range from 0 to 5
+ * @err_inj_type: Each group can have multiple types of error
+ */
+struct dwc_pcie_err_inj {
+ const char *name;
+ u32 err_inj_group;
+ u32 err_inj_type;
+};
+
+static const struct dwc_pcie_err_inj err_inj_list[] = {
+ {"tx_lcrc", 0x0, 0x0},
+ {"b16_crc_dllp", 0x0, 0x1},
+ {"b16_crc_upd_fc", 0x0, 0x2},
+ {"tx_ecrc", 0x0, 0x3},
+ {"fcrc_tlp", 0x0, 0x4},
+ {"parity_tsos", 0x0, 0x5},
+ {"parity_skpos", 0x0, 0x6},
+ {"rx_lcrc", 0x0, 0x8},
+ {"rx_ecrc", 0x0, 0xb},
+ {"tlp_err_seq", 0x1, 0x0},
+ {"ack_nak_dllp_seq", 0x1, 0x1},
+ {"ack_nak_dllp", 0x2, 0x0},
+ {"upd_fc_dllp", 0x2, 0x1},
+ {"nak_dllp", 0x2, 0x2},
+ {"inv_sync_hdr_sym", 0x3, 0x0},
+ {"com_pad_ts1", 0x3, 0x1},
+ {"com_pad_ts2", 0x3, 0x2},
+ {"com_fts", 0x3, 0x3},
+ {"com_idl", 0x3, 0x4},
+ {"end_edb", 0x3, 0x5},
+ {"stp_sdp", 0x3, 0x6},
+ {"com_skp", 0x3, 0x7},
+ {"posted_tlp_hdr", 0x4, 0x0},
+ {"non_post_tlp_hdr", 0x4, 0x1},
+ {"cmpl_tlp_hdr", 0x4, 0x2},
+ {"posted_tlp_data", 0x4, 0x4},
+ {"non_post_tlp_data", 0x4, 0x5},
+ {"cmpl_tlp_data", 0x4, 0x6},
+ {"duplicate_tlp", 0x5, 0x0},
+ {"nullified_tlp", 0x5, 0x1},
+};
+
+static const u32 err_inj_type_mask[] = {
+ EINJ0_TYPE,
+ EINJ1_TYPE,
+ EINJ2_TYPE,
+ EINJ3_TYPE,
+ EINJ4_TYPE,
+ EINJ5_TYPE,
+};
+
+/**
+ * struct dwc_pcie_event_counter - Store details about each event counter
+ * supported in DWC RAS DES
+ * @name: Name of the error counter
+ * @group_no: Group number that the event belongs to. The value can range
+ * from 0 to 4
+ * @event_no: Event number of the particular event. The value ranges are:
+ * Group 0: 0 - 10
+ * Group 1: 5 - 13
+ * Group 2: 0 - 7
+ * Group 3: 0 - 5
+ * Group 4: 0 - 1
+ */
+struct dwc_pcie_event_counter {
+ const char *name;
+ u32 group_no;
+ u32 event_no;
+};
+
+static const struct dwc_pcie_event_counter event_list[] = {
+ {"ebuf_overflow", 0x0, 0x0},
+ {"ebuf_underrun", 0x0, 0x1},
+ {"decode_err", 0x0, 0x2},
+ {"running_disparity_err", 0x0, 0x3},
+ {"skp_os_parity_err", 0x0, 0x4},
+ {"sync_header_err", 0x0, 0x5},
+ {"rx_valid_deassertion", 0x0, 0x6},
+ {"ctl_skp_os_parity_err", 0x0, 0x7},
+ {"retimer_parity_err_1st", 0x0, 0x8},
+ {"retimer_parity_err_2nd", 0x0, 0x9},
+ {"margin_crc_parity_err", 0x0, 0xA},
+ {"detect_ei_infer", 0x1, 0x5},
+ {"receiver_err", 0x1, 0x6},
+ {"rx_recovery_req", 0x1, 0x7},
+ {"n_fts_timeout", 0x1, 0x8},
+ {"framing_err", 0x1, 0x9},
+ {"deskew_err", 0x1, 0xa},
+ {"framing_err_in_l0", 0x1, 0xc},
+ {"deskew_uncompleted_err", 0x1, 0xd},
+ {"bad_tlp", 0x2, 0x0},
+ {"lcrc_err", 0x2, 0x1},
+ {"bad_dllp", 0x2, 0x2},
+ {"replay_num_rollover", 0x2, 0x3},
+ {"replay_timeout", 0x2, 0x4},
+ {"rx_nak_dllp", 0x2, 0x5},
+ {"tx_nak_dllp", 0x2, 0x6},
+ {"retry_tlp", 0x2, 0x7},
+ {"fc_timeout", 0x3, 0x0},
+ {"poisoned_tlp", 0x3, 0x1},
+ {"ecrc_error", 0x3, 0x2},
+ {"unsupported_request", 0x3, 0x3},
+ {"completer_abort", 0x3, 0x4},
+ {"completion_timeout", 0x3, 0x5},
+ {"ebuf_skp_add", 0x4, 0x0},
+ {"ebuf_skp_del", 0x4, 0x1},
+};
+
+static ssize_t lane_detect_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_DETECT_LANE, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Detected\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Undetected\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t lane_detect_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 lane, val;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val &= ~(LANE_SELECT);
+ val |= FIELD_PREP(LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG, val);
+
+ return count;
+}
+
+static ssize_t rx_valid_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_RXVALID, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Valid\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Invalid\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t rx_valid_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return lane_detect_write(file, buf, count, ppos);
+}
+
+static ssize_t err_inj_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, counter, vc_num, err_group, type_mask;
+ int val_diff = 0;
+ char *kern_buf;
+
+ err_group = err_inj_list[pdata->idx].err_inj_group;
+ type_mask = err_inj_type_mask[err_group];
+
+ kern_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+ if (err_group == 4) {
+ val = sscanf(kern_buf, "%u %d %u", &counter, &val_diff, &vc_num);
+ if ((val != 3) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else if (err_group == 1) {
+ val = sscanf(kern_buf, "%u %d", &counter, &val_diff);
+ if ((val != 2) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else {
+ val = kstrtou32(kern_buf, 0, &counter);
+ if (val) {
+ kfree(kern_buf);
+ return val;
+ }
+ }
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group));
+ val &= ~(type_mask | EINJ_COUNT);
+ val |= ((err_inj_list[pdata->idx].err_inj_type << EINJ_TYPE_SHIFT) & type_mask);
+ val |= FIELD_PREP(EINJ_COUNT, counter);
+
+ if (err_group == 1 || err_group == 4) {
+ val &= ~(EINJ_VAL_DIFF);
+ val |= FIELD_PREP(EINJ_VAL_DIFF, val_diff);
+ }
+ if (err_group == 4) {
+ val &= ~(EINJ_VC_NUM);
+ val |= FIELD_PREP(EINJ_VC_NUM, vc_num);
+ }
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group), val);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ_ENABLE_REG, (0x1 << err_group));
+
+ kfree(kern_buf);
+ return count;
+}
+
+static void set_event_number(struct dwc_pcie_rasdes_priv *pdata,
+ struct dw_pcie *pci, struct dwc_pcie_rasdes_info *rinfo)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~EVENT_COUNTER_ENABLE;
+ val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_GROUP_SELECT, event_list[pdata->idx].group_no);
+ val |= FIELD_PREP(EVENT_COUNTER_EVENT_SELECT, event_list[pdata->idx].event_no);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+}
+
+static ssize_t counter_enable_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_STATUS, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Enabled\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Disabled\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_enable_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, enable;
+
+ val = kstrtou32_from_user(buf, count, 0, &enable);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (enable)
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_ON);
+ else
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_OFF);
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+
+ /*
+ * While enabling the counter, always read the status back to check if
+ * it is enabled or not. Return error if it is not enabled to let the
+ * users know that the counter is not supported on the platform.
+ */
+ if (enable) {
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset +
+ RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (!FIELD_GET(EVENT_COUNTER_STATUS, val)) {
+ mutex_unlock(&rinfo->reg_event_lock);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_lane_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_LANE_SELECT, val);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_lane_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, lane;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~(EVENT_COUNTER_LANE_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_value_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter value: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static const char *ltssm_status_string(enum dw_pcie_ltssm ltssm)
+{
+ const char *str;
+
+ switch (ltssm) {
+#define DW_PCIE_LTSSM_NAME(n) case n: str = #n; break
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_ACT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_COMPLIANCE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_CONFIG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_PRE_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_WAIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_START);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_WAI);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_COMPLETE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_LOCK);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_SPEED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_RCVRCFG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0S);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L123_SEND_EIDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_WAKE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ1);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ2);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ3);
+ default:
+ str = "DW_PCIE_LTSSM_UNKNOWN";
+ break;
+ }
+
+ return str + strlen("DW_PCIE_LTSSM_");
+}
+
+static int ltssm_status_show(struct seq_file *s, void *v)
+{
+ struct dw_pcie *pci = s->private;
+ enum dw_pcie_ltssm val;
+
+ val = dw_pcie_get_ltssm(pci);
+ seq_printf(s, "%s (0x%02x)\n", ltssm_status_string(val), val);
+
+ return 0;
+}
+
+static int ltssm_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ltssm_status_show, inode->i_private);
+}
+
+#define dwc_debugfs_create(name) \
+debugfs_create_file(#name, 0644, rasdes_debug, pci, \
+ &dbg_ ## name ## _fops)
+
+#define DWC_DEBUGFS_FOPS(name) \
+static const struct file_operations dbg_ ## name ## _fops = { \
+ .open = simple_open, \
+ .read = name ## _read, \
+ .write = name ## _write \
+}
+
+DWC_DEBUGFS_FOPS(lane_detect);
+DWC_DEBUGFS_FOPS(rx_valid);
+
+static const struct file_operations dwc_pcie_err_inj_ops = {
+ .open = simple_open,
+ .write = err_inj_write,
+};
+
+static const struct file_operations dwc_pcie_counter_enable_ops = {
+ .open = simple_open,
+ .read = counter_enable_read,
+ .write = counter_enable_write,
+};
+
+static const struct file_operations dwc_pcie_counter_lane_ops = {
+ .open = simple_open,
+ .read = counter_lane_read,
+ .write = counter_lane_write,
+};
+
+static const struct file_operations dwc_pcie_counter_value_ops = {
+ .open = simple_open,
+ .read = counter_value_read,
+};
+
+static const struct file_operations dwc_pcie_ltssm_status_ops = {
+ .open = ltssm_status_open,
+ .read = seq_read,
+};
+
+static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci)
+{
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+
+ mutex_destroy(&rinfo->reg_event_lock);
+}
+
+static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ struct dentry *rasdes_debug, *rasdes_err_inj;
+ struct dentry *rasdes_event_counter, *rasdes_events;
+ struct dwc_pcie_rasdes_info *rasdes_info;
+ struct dwc_pcie_rasdes_priv *priv_tmp;
+ struct device *dev = pci->dev;
+ int ras_cap, i, ret;
+
+ /*
+ * If a given SoC has no RAS DES capability, the following call is
+ * bound to return an error, breaking some existing platforms. So,
+ * return 0 here, as this is not necessarily an error.
+ */
+ ras_cap = dw_pcie_find_rasdes_capability(pci);
+ if (!ras_cap) {
+ dev_dbg(dev, "no RAS DES capability available\n");
+ return 0;
+ }
+
+ rasdes_info = devm_kzalloc(dev, sizeof(*rasdes_info), GFP_KERNEL);
+ if (!rasdes_info)
+ return -ENOMEM;
+
+ /* Create subdirectories for Debug, Error Injection, Statistics. */
+ rasdes_debug = debugfs_create_dir("rasdes_debug", dir);
+ rasdes_err_inj = debugfs_create_dir("rasdes_err_inj", dir);
+ rasdes_event_counter = debugfs_create_dir("rasdes_event_counter", dir);
+
+ mutex_init(&rasdes_info->reg_event_lock);
+ rasdes_info->ras_cap_offset = ras_cap;
+ pci->debugfs->rasdes_info = rasdes_info;
+
+ /* Create debugfs files for Debug subdirectory. */
+ dwc_debugfs_create(lane_detect);
+ dwc_debugfs_create(rx_valid);
+
+ /* Create debugfs files for Error Injection subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(err_inj_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ debugfs_create_file(err_inj_list[i].name, 0200, rasdes_err_inj, priv_tmp,
+ &dwc_pcie_err_inj_ops);
+ }
+
+ /* Create debugfs files for Statistical Counter subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(event_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ rasdes_events = debugfs_create_dir(event_list[i].name, rasdes_event_counter);
+ if (event_list[i].group_no == 0 || event_list[i].group_no == 4) {
+ debugfs_create_file("lane_select", 0644, rasdes_events,
+ priv_tmp, &dwc_pcie_counter_lane_ops);
+ }
+ debugfs_create_file("counter_value", 0444, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_value_ops);
+ debugfs_create_file("counter_enable", 0644, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_enable_ops);
+ }
+
+ return 0;
+
+err_deinit:
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ return ret;
+}
+
+static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ debugfs_create_file("ltssm_status", 0444, dir, pci,
+ &dwc_pcie_ltssm_status_ops);
+}
+
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+ if (!pci->debugfs)
+ return;
+
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ debugfs_remove_recursive(pci->debugfs->debug_dir);
+}
+
+void dwc_pcie_debugfs_init(struct dw_pcie *pci)
+{
+ char dirname[DWC_DEBUGFS_BUF_MAX];
+ struct device *dev = pci->dev;
+ struct debugfs_info *debugfs;
+ struct dentry *dir;
+ int err;
+
+ /* Create main directory for each platform driver. */
+ snprintf(dirname, DWC_DEBUGFS_BUF_MAX, "dwc_pcie_%s", dev_name(dev));
+ dir = debugfs_create_dir(dirname, NULL);
+ debugfs = devm_kzalloc(dev, sizeof(*debugfs), GFP_KERNEL);
+ if (!debugfs)
+ return;
+
+ debugfs->debug_dir = dir;
+ pci->debugfs = debugfs;
+ err = dwc_pcie_rasdes_debugfs_init(pci, dir);
+ if (err)
+ dev_err(dev, "failed to initialize RAS DES debugfs, err=%d\n",
+ err);
+
+ dwc_pcie_ltssm_debugfs_init(pci, dir);
+}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 8e07d432e74f..1a0bf9341542 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -102,6 +102,45 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
+/**
+ * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list
+ * @pci: DWC PCI device
+ * @prev_cap: Capability preceding the capability that should be hidden
+ * @cap: Capability that should be hidden
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap)
+{
+ u16 prev_cap_offset, cap_offset;
+ u32 prev_cap_header, cap_header;
+
+ prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap);
+ if (!prev_cap_offset)
+ return -EINVAL;
+
+ prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset);
+ cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header);
+ cap_header = dw_pcie_readl_dbi(pci, cap_offset);
+
+ /* cap must immediately follow prev_cap. */
+ if (PCI_EXT_CAP_ID(cap_header) != cap)
+ return -EINVAL;
+
+ /* Clear next ptr. */
+ prev_cap_header &= ~GENMASK(31, 20);
+
+ /* Set next ptr to next ptr of cap. */
+ prev_cap_header |= cap_header & GENMASK(31, 20);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability);
+
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
@@ -128,7 +167,7 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
}
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
- dma_addr_t cpu_addr, enum pci_barno bar,
+ dma_addr_t parent_bus_addr, enum pci_barno bar,
size_t size)
{
int ret;
@@ -146,7 +185,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
}
ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
- cpu_addr, bar, size);
+ parent_bus_addr, bar, size);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
@@ -181,7 +220,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
return ret;
set_bit(free_win, ep->ob_window_map);
- ep->outbound_addr[free_win] = atu->cpu_addr;
+ ep->outbound_addr[free_win] = atu->parent_bus_addr;
return 0;
}
@@ -205,6 +244,125 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
ep->bar_to_atu[bar] = 0;
}
+static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci,
+ enum pci_barno bar)
+{
+ u32 reg, bar_index;
+ unsigned int offset, nbars;
+ int i;
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (!offset)
+ return offset;
+
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar_index = reg & PCI_REBAR_CTRL_BAR_IDX;
+ if (bar_index == bar)
+ return offset;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ unsigned int rebar_offset;
+ u32 rebar_cap, rebar_ctrl;
+ int ret;
+
+ rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar);
+ if (!rebar_offset)
+ return -EINVAL;
+
+ ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap);
+ if (ret)
+ return ret;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ /*
+ * A BAR mask should not be written for a resizable BAR. The BAR mask
+ * is automatically derived by the controller every time the "selected
+ * size" bits are updated, see "Figure 3-26 Resizable BAR Example for
+ * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write
+ * BIT(0) to set the BAR enable bit.
+ */
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ }
+
+ /*
+ * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes
+ * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes"
+ * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB.
+ */
+ rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL);
+ rebar_ctrl &= ~GENMASK(31, 16);
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl);
+
+ /*
+ * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically
+ * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR
+ * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a.
+ */
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ }
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep,
+ enum pci_barno bar)
+{
+ const struct pci_epc_features *epc_features;
+
+ if (!ep->ops->get_features)
+ return BAR_PROGRAMMABLE;
+
+ epc_features = ep->ops->get_features(ep);
+
+ return epc_features->bar[bar].type;
+}
+
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
@@ -212,9 +370,9 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
size_t size = epf_bar->size;
+ enum pci_epc_bar_type bar_type;
int flags = epf_bar->flags;
int ret, type;
- u32 reg;
/*
* DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
@@ -246,19 +404,30 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
goto config_atu;
}
- reg = PCI_BASE_ADDRESS_0 + (4 * bar);
-
- dw_pcie_dbi_ro_wr_en(pci);
-
- dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
- dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
-
- if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
- dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
- dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ bar_type = dw_pcie_ep_get_bar_type(ep, bar);
+ switch (bar_type) {
+ case BAR_FIXED:
+ /*
+ * There is no need to write a BAR mask for a fixed BAR (except
+ * to write 1 to the LSB of the BAR mask register, to enable the
+ * BAR). Write the BAR mask regardless. (The fixed bits in the
+ * BAR mask register will be read-only anyway.)
+ */
+ fallthrough;
+ case BAR_PROGRAMMABLE:
+ ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar);
+ break;
+ case BAR_RESIZABLE:
+ ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(pci->dev, "Invalid BAR type\n");
+ break;
}
- dw_pcie_dbi_ro_wr_dis(pci);
+ if (ret)
+ return ret;
config_atu:
if (!(flags & PCI_BASE_ADDRESS_SPACE))
@@ -282,7 +451,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
u32 index;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- for (index = 0; index < pci->num_ob_windows; index++) {
+ for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {
if (ep->outbound_addr[index] != addr)
continue;
*atu_index = index;
@@ -314,7 +483,8 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- ret = dw_pcie_find_index(ep, addr, &atu_index);
+ ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset,
+ &atu_index);
if (ret < 0)
return;
@@ -333,7 +503,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
atu.func_no = func_no;
atu.type = PCIE_ATU_TYPE_MEM;
- atu.cpu_addr = addr;
+ atu.parent_bus_addr = addr - pci->parent_bus_offset;
atu.pci_addr = pci_addr;
atu.size = size;
ret = dw_pcie_ep_outbound_atu(ep, &atu);
@@ -666,6 +836,7 @@ void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ dwc_pcie_debugfs_deinit(pci);
dw_pcie_edma_remove(pci);
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
@@ -690,31 +861,15 @@ void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
-static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
-{
- u32 header;
- int pos = PCI_CFG_SPACE_SIZE;
-
- while (pos) {
- header = dw_pcie_readl_dbi(pci, pos);
- if (PCI_EXT_CAP_ID(header) == cap)
- return pos;
-
- pos = PCI_EXT_CAP_NEXT(header);
- if (!pos)
- break;
- }
-
- return 0;
-}
-
static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
{
+ struct dw_pcie_ep *ep = &pci->ep;
unsigned int offset;
unsigned int nbars;
- u32 reg, i;
+ enum pci_barno bar;
+ u32 reg, i, val;
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
dw_pcie_dbi_ro_wr_en(pci);
@@ -727,9 +882,29 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
* PCIe r6.0, sec 7.8.6.2 require us to support at least one
* size in the range from 1 MB to 512 GB. Advertise support
* for 1 MB BAR size only.
+ *
+ * For a BAR that has been configured via dw_pcie_ep_set_bar(),
+ * advertise support for only that size instead.
*/
- for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ /*
+ * While the RESBAR_CAP_REG_* fields are sticky, the
+ * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is
+ * sticky in certain versions of DWC PCIe, but not all).
+ *
+ * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by
+ * the controller when RESBAR_CAP_REG is written, which
+ * is why RESBAR_CAP_REG is written here.
+ */
+ val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar = val & PCI_REBAR_CTRL_BAR_IDX;
+ if (ep->epf_bar[bar])
+ pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val);
+ else
+ val = BIT(4);
+
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val);
+ }
}
dw_pcie_setup(pci);
@@ -773,6 +948,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
if (ret)
return ret;
+ ret = -ENOMEM;
if (!ep->ib_window_map) {
ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
GFP_KERNEL);
@@ -817,7 +993,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
if (ep->ops->init)
ep->ops->init(ep);
- ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+ ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
/*
* PTM responder capability can be disabled only after disabling
@@ -837,6 +1013,8 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
dw_pcie_ep_init_non_sticky_registers(pci);
+ dwc_pcie_debugfs_init(pci);
+
return 0;
err_remove_edma:
@@ -883,26 +1061,15 @@ void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
-/**
- * dw_pcie_ep_init - Initialize the endpoint device
- * @ep: DWC EP device
- *
- * Initialize the endpoint device. Allocate resources and create the EPC
- * device with the endpoint framework.
- *
- * Return: 0 if success, errno otherwise.
- */
-int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep)
{
- int ret;
- struct resource *res;
- struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
-
- INIT_LIST_HEAD(&ep->func_list);
+ struct pci_epc *epc = ep->epc;
+ struct resource *res;
+ int ret;
ret = dw_pcie_get_resources(pci);
if (ret)
@@ -915,8 +1082,37 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
- if (ep->ops->pre_init)
- ep->ops->pre_init(ep);
+ /*
+ * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call
+ * dw_pcie_parent_bus_offset() after setting ep->phys_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space",
+ ep->phys_base);
+
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
+
+ return 0;
+}
+
+/**
+ * dw_pcie_ep_init - Initialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Initialize the endpoint device. Allocate resources and create the EPC
+ * device with the endpoint framework.
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ int ret;
+ struct pci_epc *epc;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ INIT_LIST_HEAD(&ep->func_list);
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
@@ -927,9 +1123,12 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->epc = epc;
epc_set_drvdata(epc, ep);
- ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
- if (ret < 0)
- epc->max_functions = 1;
+ ret = dw_pcie_ep_get_resources(ep);
+ if (ret)
+ return ret;
+
+ if (ep->ops->pre_init)
+ ep->ops->pre_init(ep);
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index ffaded8f2df7..ecc33f6789e3 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -418,19 +418,15 @@ static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
}
}
-int dw_pcie_host_init(struct dw_pcie_rp *pp)
+static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
- struct pci_host_bridge *bridge;
struct resource *res;
int ret;
- raw_spin_lock_init(&pp->lock);
-
ret = dw_pcie_get_resources(pci);
if (ret)
return ret;
@@ -448,20 +444,43 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (IS_ERR(pp->va_cfg0_base))
return PTR_ERR(pp->va_cfg0_base);
- bridge = devm_pci_alloc_host_bridge(dev, 0);
- if (!bridge)
- return -ENOMEM;
-
- pp->bridge = bridge;
-
/* Get the I/O range from DT */
- win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);
if (win) {
pp->io_size = resource_size(win->res);
pp->io_bus_addr = win->res->start - win->offset;
pp->io_base = pci_pio_to_address(win->res->start);
}
+ /*
+ * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to
+ * call dw_pcie_parent_bus_offset() after setting pp->io_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config",
+ pp->cfg0_base);
+ return 0;
+}
+
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ struct pci_host_bridge *bridge;
+ int ret;
+
+ raw_spin_lock_init(&pp->lock);
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ pp->bridge = bridge;
+
+ ret = dw_pcie_host_get_resources(pp);
+ if (ret)
+ return ret;
+
/* Set default bus ops */
bridge->ops = &dw_pcie_ops;
bridge->child_ops = &dw_child_pcie_ops;
@@ -548,6 +567,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (pp->ops->post_init)
pp->ops->post_init(pp);
+ dwc_pcie_debugfs_init(pci);
+
return 0;
err_stop_link:
@@ -572,6 +593,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ dwc_pcie_debugfs_deinit(pci);
+
pci_stop_root_bus(pp->bridge->bus);
pci_remove_root_bus(pp->bridge->bus);
@@ -616,7 +639,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
type = PCIE_ATU_TYPE_CFG1;
atu.type = type;
- atu.cpu_addr = pp->cfg0_base;
+ atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset;
atu.pci_addr = busdev;
atu.size = pp->cfg0_size;
@@ -641,7 +664,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
if (pp->cfg0_io_shared) {
atu.type = PCIE_ATU_TYPE_IO;
- atu.cpu_addr = pp->io_base;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
atu.pci_addr = pp->io_bus_addr;
atu.size = pp->io_size;
@@ -667,7 +690,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
if (pp->cfg0_io_shared) {
atu.type = PCIE_ATU_TYPE_IO;
- atu.cpu_addr = pp->io_base;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
atu.pci_addr = pp->io_bus_addr;
atu.size = pp->io_size;
@@ -736,7 +759,7 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
atu.index = i;
atu.type = PCIE_ATU_TYPE_MEM;
- atu.cpu_addr = entry->res->start;
+ atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset;
atu.pci_addr = entry->res->start - entry->offset;
/* Adjust iATU size if MSG TLP region was allocated before */
@@ -758,7 +781,7 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pci->num_ob_windows > ++i) {
atu.index = i;
atu.type = PCIE_ATU_TYPE_IO;
- atu.cpu_addr = pp->io_base;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
atu.pci_addr = pp->io_bus_addr;
atu.size = pp->io_size;
@@ -902,13 +925,13 @@ static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
atu.size = resource_size(pci->pp.msg_res);
atu.index = pci->pp.msg_atu_index;
- atu.cpu_addr = pci->pp.msg_res->start;
+ atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset;
ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return ret;
- mem = ioremap(atu.cpu_addr, pci->region_align);
+ mem = ioremap(pci->pp.msg_res->start, pci->region_align);
if (!mem)
return -ENOMEM;
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 145e7f579072..97d76d3dc066 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -16,6 +16,8 @@
#include <linux/gpio/consumer.h>
#include <linux/ioport.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pcie-dwc.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/types.h>
@@ -283,6 +285,51 @@ u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
}
EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
+static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id,
+ u16 vsec_id)
+{
+ u16 vsec = 0;
+ u32 header;
+
+ if (vendor_id != dw_pcie_readw_dbi(pci, PCI_VENDOR_ID))
+ return 0;
+
+ while ((vsec = dw_pcie_find_next_ext_capability(pci, vsec,
+ PCI_EXT_CAP_ID_VNDR))) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_ID(header) == vsec_id)
+ return vsec;
+ }
+
+ return 0;
+}
+
+static u16 dw_pcie_find_vsec_capability(struct dw_pcie *pci,
+ const struct dwc_pcie_vsec_id *vsec_ids)
+{
+ const struct dwc_pcie_vsec_id *vid;
+ u16 vsec;
+ u32 header;
+
+ for (vid = vsec_ids; vid->vendor_id; vid++) {
+ vsec = __dw_pcie_find_vsec_capability(pci, vid->vendor_id,
+ vid->vsec_id);
+ if (vsec) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_REV(header) == vid->vsec_rev)
+ return vsec;
+ }
+ }
+
+ return 0;
+}
+
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci)
+{
+ return dw_pcie_find_vsec_capability(pci, dwc_pcie_rasdes_vsec_ids);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability);
+
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
if (!IS_ALIGNED((uintptr_t)addr, size)) {
@@ -470,25 +517,22 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
const struct dw_pcie_ob_atu_cfg *atu)
{
- u64 cpu_addr = atu->cpu_addr;
+ u64 parent_bus_addr = atu->parent_bus_addr;
u32 retries, val;
u64 limit_addr;
- if (pci->ops && pci->ops->cpu_addr_fixup)
- cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
-
- limit_addr = cpu_addr + atu->size - 1;
+ limit_addr = parent_bus_addr + atu->size - 1;
- if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
- !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ if ((limit_addr & ~pci->region_limit) != (parent_bus_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
!IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) {
return -EINVAL;
}
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT,
lower_32_bits(limit_addr));
@@ -502,7 +546,7 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
upper_32_bits(atu->pci_addr));
val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no);
- if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
+ if (upper_32_bits(limit_addr) > upper_32_bits(parent_bus_addr) &&
dw_pcie_ver_is_ge(pci, 460A))
val |= PCIE_ATU_INCREASE_REGION_SIZE;
if (dw_pcie_ver_is(pci, 490A))
@@ -545,13 +589,13 @@ static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg
}
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
+ u64 parent_bus_addr, u64 pci_addr, u64 size)
{
u64 limit_addr = pci_addr + size - 1;
u32 retries, val;
if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
- !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
!IS_ALIGNED(pci_addr, pci->region_align) || !size) {
return -EINVAL;
}
@@ -568,9 +612,9 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
upper_32_bits(limit_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
val = type;
if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
@@ -597,18 +641,18 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
}
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar, size_t size)
+ int type, u64 parent_bus_addr, u8 bar, size_t size)
{
u32 retries, val;
- if (!IS_ALIGNED(cpu_addr, pci->region_align) ||
- !IS_ALIGNED(cpu_addr, size))
+ if (!IS_ALIGNED(parent_bus_addr, pci->region_align) ||
+ !IS_ALIGNED(parent_bus_addr, size))
return -EINVAL;
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
PCIE_ATU_FUNC_NUM(func_no));
@@ -1105,3 +1149,63 @@ void dw_pcie_setup(struct dw_pcie *pci)
dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
}
+
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phys_addr)
+{
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ int index;
+ u64 reg_addr, fixup_addr;
+ u64 (*fixup)(struct dw_pcie *pcie, u64 cpu_addr);
+
+ /* Look up reg_name address on parent bus */
+ index = of_property_match_string(np, "reg-names", reg_name);
+
+ if (index < 0) {
+ dev_err(dev, "No %s in devicetree \"reg\" property\n", reg_name);
+ return 0;
+ }
+
+ of_property_read_reg(np, index, &reg_addr, NULL);
+
+ fixup = pci->ops ? pci->ops->cpu_addr_fixup : NULL;
+ if (fixup) {
+ fixup_addr = fixup(pci, cpu_phys_addr);
+ if (reg_addr == fixup_addr) {
+ dev_info(dev, "%s reg[%d] %#010llx == %#010llx == fixup(cpu %#010llx); %ps is redundant with this devicetree\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr, fixup);
+ } else {
+ dev_warn(dev, "%s reg[%d] %#010llx != %#010llx == fixup(cpu %#010llx); devicetree is broken\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr);
+ reg_addr = fixup_addr;
+ }
+
+ return cpu_phys_addr - reg_addr;
+ }
+
+ if (pci->use_parent_dt_ranges) {
+
+ /*
+ * This platform once had a fixup, presumably because it
+ * translates between CPU and PCI controller addresses.
+ * Log a note if devicetree didn't describe a translation.
+ */
+ if (reg_addr == cpu_phys_addr)
+ dev_info(dev, "%s reg[%d] %#010llx == cpu %#010llx\n; no fixup was ever needed for this devicetree\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ } else {
+ if (reg_addr != cpu_phys_addr) {
+ dev_warn(dev, "%s reg[%d] %#010llx != cpu %#010llx; no fixup and devicetree \"ranges\" is broken, assuming no translation\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ return 0;
+ }
+ }
+
+ return cpu_phys_addr - reg_addr;
+}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 501d9ddfea16..56aafdbcdaca 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -330,9 +330,40 @@ enum dw_pcie_ltssm {
/* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
DW_PCIE_LTSSM_DETECT_ACT = 0x1,
+ DW_PCIE_LTSSM_POLL_ACTIVE = 0x2,
+ DW_PCIE_LTSSM_POLL_COMPLIANCE = 0x3,
+ DW_PCIE_LTSSM_POLL_CONFIG = 0x4,
+ DW_PCIE_LTSSM_PRE_DETECT_QUIET = 0x5,
DW_PCIE_LTSSM_DETECT_WAIT = 0x6,
+ DW_PCIE_LTSSM_CFG_LINKWD_START = 0x7,
+ DW_PCIE_LTSSM_CFG_LINKWD_ACEPT = 0x8,
+ DW_PCIE_LTSSM_CFG_LANENUM_WAI = 0x9,
+ DW_PCIE_LTSSM_CFG_LANENUM_ACEPT = 0xa,
+ DW_PCIE_LTSSM_CFG_COMPLETE = 0xb,
+ DW_PCIE_LTSSM_CFG_IDLE = 0xc,
+ DW_PCIE_LTSSM_RCVRY_LOCK = 0xd,
+ DW_PCIE_LTSSM_RCVRY_SPEED = 0xe,
+ DW_PCIE_LTSSM_RCVRY_RCVRCFG = 0xf,
+ DW_PCIE_LTSSM_RCVRY_IDLE = 0x10,
DW_PCIE_LTSSM_L0 = 0x11,
+ DW_PCIE_LTSSM_L0S = 0x12,
+ DW_PCIE_LTSSM_L123_SEND_EIDLE = 0x13,
+ DW_PCIE_LTSSM_L1_IDLE = 0x14,
DW_PCIE_LTSSM_L2_IDLE = 0x15,
+ DW_PCIE_LTSSM_L2_WAKE = 0x16,
+ DW_PCIE_LTSSM_DISABLED_ENTRY = 0x17,
+ DW_PCIE_LTSSM_DISABLED_IDLE = 0x18,
+ DW_PCIE_LTSSM_DISABLED = 0x19,
+ DW_PCIE_LTSSM_LPBK_ENTRY = 0x1a,
+ DW_PCIE_LTSSM_LPBK_ACTIVE = 0x1b,
+ DW_PCIE_LTSSM_LPBK_EXIT = 0x1c,
+ DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT = 0x1d,
+ DW_PCIE_LTSSM_HOT_RESET_ENTRY = 0x1e,
+ DW_PCIE_LTSSM_HOT_RESET = 0x1f,
+ DW_PCIE_LTSSM_RCVRY_EQ0 = 0x20,
+ DW_PCIE_LTSSM_RCVRY_EQ1 = 0x21,
+ DW_PCIE_LTSSM_RCVRY_EQ2 = 0x22,
+ DW_PCIE_LTSSM_RCVRY_EQ3 = 0x23,
DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
};
@@ -343,7 +374,7 @@ struct dw_pcie_ob_atu_cfg {
u8 func_no;
u8 code;
u8 routing;
- u64 cpu_addr;
+ u64 parent_bus_addr;
u64 pci_addr;
u64 size;
};
@@ -437,6 +468,11 @@ struct dw_pcie_ops {
void (*stop_link)(struct dw_pcie *pcie);
};
+struct debugfs_info {
+ struct dentry *debug_dir;
+ void *rasdes_info;
+};
+
struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
@@ -445,6 +481,7 @@ struct dw_pcie {
void __iomem *atu_base;
resource_size_t atu_phys_addr;
size_t atu_size;
+ resource_size_t parent_bus_offset;
u32 num_ib_windows;
u32 num_ob_windows;
u32 region_align;
@@ -465,6 +502,20 @@ struct dw_pcie {
struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS];
struct gpio_desc *pe_rst;
bool suspended;
+ struct debugfs_info *debugfs;
+
+ /*
+ * If iATU input addresses are offset from CPU physical addresses,
+ * we previously required .cpu_addr_fixup() to convert them. We
+ * now rely on the devicetree instead. If .cpu_addr_fixup()
+ * exists, we compare its results with devicetree.
+ *
+ * If .cpu_addr_fixup() does not exist, we assume the offset is
+ * zero and warn if devicetree claims otherwise. If we know all
+ * devicetrees correctly describe the offset, set
+ * use_parent_dt_ranges to true to avoid this warning.
+ */
+ bool use_parent_dt_ranges;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -478,6 +529,7 @@ void dw_pcie_version_detect(struct dw_pcie *pci);
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci);
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_write(void __iomem *addr, int size, u32 val);
@@ -491,14 +543,18 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci);
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
const struct dw_pcie_ob_atu_cfg *atu);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size);
+ u64 parent_bus_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar, size_t size);
+ int type, u64 parent_bus_addr,
+ u8 bar, size_t size);
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
int dw_pcie_edma_detect(struct dw_pcie *pci);
void dw_pcie_edma_remove(struct dw_pcie *pci);
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phy_addr);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
@@ -743,6 +799,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap);
struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
#else
@@ -800,10 +857,29 @@ static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}
+static inline int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci,
+ u8 prev_cap, u8 cap)
+{
+ return 0;
+}
+
static inline struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
{
return NULL;
}
#endif
+
+#ifdef CONFIG_PCIE_DW_DEBUGFS
+void dwc_pcie_debugfs_init(struct dw_pcie *pci);
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci);
+#else
+static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci)
+{
+}
+static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+}
+#endif
+
#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 93698abff4d9..c624b7ebd118 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -240,6 +240,34 @@ static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
.init = rockchip_pcie_host_init,
};
+/*
+ * ATS does not work on RK3588 when running in EP mode.
+ *
+ * After the host has enabled ATS on the EP side, it will send an IOTLB
+ * invalidation request to the EP side. However, the RK3588 will never send
+ * a completion back and eventually the host will print an IOTLB_INV_TIMEOUT
+ * error, and the EP will not be operational. If we hide the ATS capability,
+ * things work as expected.
+ */
+static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ /* Only hide the ATS capability for RK3588 running in EP mode. */
+ if (!of_device_is_compatible(dev->of_node, "rockchip,rk3588-pcie-ep"))
+ return;
+
+ if (dw_pcie_ep_hide_ext_capability(pci, PCI_EXT_CAP_ID_SECPCI,
+ PCI_EXT_CAP_ID_ATS))
+ dev_err(dev, "failed to hide ATS capability\n");
+}
+
+static void rockchip_pcie_ep_pre_init(struct dw_pcie_ep *ep)
+{
+ rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep);
+}
+
static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -272,13 +300,14 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = {
.linkup_notifier = true,
.msi_capable = true,
.msix_capable = true,
+ .intx_capable = false,
.align = SZ_64K,
- .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_4] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
};
/*
@@ -292,13 +321,14 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = {
.linkup_notifier = true,
.msi_capable = true,
.msix_capable = true,
+ .intx_capable = false,
.align = SZ_64K,
- .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
.bar[BAR_4] = { .type = BAR_RESERVED, },
- .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
};
static const struct pci_epc_features *
@@ -312,6 +342,7 @@ rockchip_pcie_get_features(struct dw_pcie_ep *ep)
static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = {
.init = rockchip_pcie_ep_init,
+ .pre_init = rockchip_pcie_ep_pre_init,
.raise_irq = rockchip_pcie_raise_irq,
.get_features = rockchip_pcie_get_features,
};
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 615a0e3e6d7e..1f2f4c28a949 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -409,16 +409,21 @@ static int histb_pcie_probe(struct platform_device *pdev)
ret = histb_pcie_host_enable(pp);
if (ret) {
dev_err(dev, "failed to enable host\n");
- return ret;
+ goto err_exit_phy;
}
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host\n");
- return ret;
+ goto err_exit_phy;
}
return 0;
+
+err_exit_phy:
+ phy_exit(hipcie->phy);
+
+ return ret;
}
static void histb_pcie_remove(struct platform_device *pdev)
@@ -427,8 +432,7 @@ static void histb_pcie_remove(struct platform_device *pdev)
histb_pcie_host_disable(hipcie);
- if (hipcie->phy)
- phy_exit(hipcie->phy);
+ phy_exit(hipcie->phy);
}
static const struct of_device_id histb_pcie_of_match[] = {
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index 9b53b8f6f268..c21906eced61 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -57,7 +57,6 @@
PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \
PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD)
-#define BUS_IATU_OFFSET SZ_256M
#define RESET_INTERVAL_MS 100
struct intel_pcie {
@@ -381,13 +380,7 @@ static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
return intel_pcie_host_setup(pcie);
}
-static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
-{
- return cpu_addr + BUS_IATU_OFFSET;
-}
-
static const struct dw_pcie_ops intel_pcie_ops = {
- .cpu_addr_fixup = intel_pcie_cpu_addr,
};
static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
@@ -409,6 +402,7 @@ static int intel_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
pci = &pcie->pci;
pci->dev = dev;
+ pci->use_parent_dt_ranges = true;
pp = &pci->pp;
ret = intel_pcie_get_resources(pdev);
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 1b2088acb538..d0e6a3811b00 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -216,10 +216,9 @@ static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy)
usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0);
- if (reg_val & PIPE_CLK_STABLE) {
- dev_err(dev, "PIPE clk is not stable\n");
- return -EINVAL;
- }
+ if (reg_val & PIPE_CLK_STABLE)
+ return dev_err_probe(dev, -ETIMEDOUT,
+ "PIPE clk is not stable\n");
return 0;
}
@@ -371,10 +370,9 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
if (ret < 0)
return 0;
- if (ret > MAX_PCI_SLOTS) {
- dev_err(dev, "Too many GPIO clock requests!\n");
- return -EINVAL;
- }
+ if (ret > MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many GPIO clock requests!\n");
pcie->n_gpio_clkreq = ret;
@@ -420,17 +418,16 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
"unable to get a valid reset gpio\n");
}
- if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) {
- dev_err(dev, "Too many PCI slots!\n");
- return -EINVAL;
- }
+ if (pcie->num_slots + 1 >= MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many PCI slots!\n");
+
pcie->num_slots++;
ret = of_pci_get_devfn(child);
- if (ret < 0) {
- dev_err(dev, "failed to parse devfn: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to parse devfn\n");
slot = PCI_SLOT(ret);
@@ -452,7 +449,7 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *child, *node = dev->of_node;
+ struct device_node *node = dev->of_node;
void __iomem *apb_base;
int ret;
@@ -477,17 +474,13 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
return ret;
/* Parse OF children */
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
ret = kirin_pcie_parse_port(kirin_pcie, pdev, child);
if (ret)
- goto put_node;
+ return ret;
}
return 0;
-
-put_node:
- of_node_put(child);
- return ret;
}
static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
@@ -729,16 +722,9 @@ static int kirin_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
int ret;
- if (!dev->of_node) {
- dev_err(dev, "NULL node\n");
- return -EINVAL;
- }
-
data = of_device_get_match_data(dev);
- if (!data) {
- dev_err(dev, "OF data missing\n");
- return -EINVAL;
- }
+ if (!data)
+ return dev_err_probe(dev, -EINVAL, "OF data missing\n");
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
if (!kirin_pcie)
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index c08f64d7a825..46b1c6d19974 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -48,7 +48,7 @@
#define PARF_DBI_BASE_ADDR_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
-#define PARF_NO_SNOOP_OVERIDE 0x3d4
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_SRIS_MODE 0x644
@@ -89,9 +89,9 @@
#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
-/* PARF_NO_SNOOP_OVERIDE register fields */
-#define WR_NO_SNOOP_OVERIDE_EN BIT(1)
-#define RD_NO_SNOOP_OVERIDE_EN BIT(3)
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
/* PARF_DEVICE_TYPE register fields */
#define PARF_DEVICE_TYPE_EP 0x0
@@ -529,8 +529,8 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
if (pcie_ep->cfg && pcie_ep->cfg->override_no_snoop)
- writel_relaxed(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN,
- pcie_ep->parf + PARF_NO_SNOOP_OVERIDE);
+ writel_relaxed(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie_ep->parf + PARF_NO_SNOOP_OVERRIDE);
return 0;
@@ -825,6 +825,10 @@ static const struct pci_epc_features qcom_pcie_epc_features = {
.msi_capable = true,
.msix_capable = false,
.align = SZ_4K,
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features *
@@ -933,6 +937,7 @@ static const struct of_device_id qcom_pcie_ep_match[] = {
{ .compatible = "qcom,sa8775p-pcie-ep", .data = &cfg_1_34_0},
{ .compatible = "qcom,sdx55-pcie-ep", },
{ .compatible = "qcom,sm8450-pcie-ep", },
+ { .compatible = "qcom,sar2130p-pcie-ep", },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index e4d3366ead1f..dc98ae63362d 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -61,7 +61,7 @@
#define PARF_DBI_BASE_ADDR_V2_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
-#define PARF_NO_SNOOP_OVERIDE 0x3d4
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_DEVICE_TYPE 0x1000
@@ -135,9 +135,9 @@
#define PARF_INT_ALL_LINK_UP BIT(13)
#define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23)
-/* PARF_NO_SNOOP_OVERIDE register fields */
-#define WR_NO_SNOOP_OVERIDE_EN BIT(1)
-#define RD_NO_SNOOP_OVERIDE_EN BIT(3)
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
/* PARF_DEVICE_TYPE register fields */
#define DEVICE_TYPE_RC 0x4
@@ -1007,8 +1007,8 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
if (pcie_cfg->override_no_snoop)
- writel(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN,
- pcie->parf + PARF_NO_SNOOP_OVERIDE);
+ writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie->parf + PARF_NO_SNOOP_OVERRIDE);
qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_pcie_clear_hpc(pcie->pci);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 6084b38bdda1..ac27bda5ba26 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1356,7 +1356,7 @@ static struct pci_ops hv_pcifront_ops = {
*
* If the PF driver wishes to initiate communication, it can "invalidate" one or
* more of the first 64 blocks. This invalidation is delivered via a callback
- * supplied by the VF driver by this driver.
+ * supplied to the VF driver by this driver.
*
* No protocol is implied, except that supplied by the PF and VF drivers.
*/
@@ -1757,8 +1757,7 @@ static int hv_compose_multi_msi_req_get_cpu(void)
spin_lock_irqsave(&multi_msi_cpu_lock, flags);
- cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
- false);
+ cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
cpu = cpu_next;
spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 665f35f9d826..b0e3bce10aa4 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1422,7 +1422,7 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
}
/*
- * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
+ * devm_of_pci_get_host_bridge_resources() only sets up translatable resources,
* so we need extra resource setup parsing our special DT properties encoding
* the MEM and IO apertures.
*/
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index b3cdbc5927de..d2f88997283a 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -2106,47 +2106,39 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
- struct device_node *np = dev->of_node, *port;
+ struct device_node *np = dev->of_node;
const struct tegra_pcie_soc *soc = pcie->soc;
u32 lanes = 0, mask = 0;
unsigned int lane = 0;
int err;
/* parse root ports */
- for_each_child_of_node(np, port) {
+ for_each_child_of_node_scoped(np, port) {
struct tegra_pcie_port *rp;
unsigned int index;
u32 value;
char *label;
err = of_pci_get_devfn(port);
- if (err < 0) {
- dev_err(dev, "failed to parse address: %d\n", err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse address\n");
index = PCI_SLOT(err);
- if (index < 1 || index > soc->num_ports) {
- dev_err(dev, "invalid port number: %d\n", index);
- err = -EINVAL;
- goto err_node_put;
- }
+ if (index < 1 || index > soc->num_ports)
+ return dev_err_probe(dev, -EINVAL,
+ "invalid port number: %d\n", index);
index--;
err = of_property_read_u32(port, "nvidia,num-lanes", &value);
- if (err < 0) {
- dev_err(dev, "failed to parse # of lanes: %d\n",
- err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err,
+ "failed to parse # of lanes\n");
- if (value > 16) {
- dev_err(dev, "invalid # of lanes: %u\n", value);
- err = -EINVAL;
- goto err_node_put;
- }
+ if (value > 16)
+ return dev_err_probe(dev, -EINVAL,
+ "invalid # of lanes: %u\n", value);
lanes |= value << (index << 3);
@@ -2159,16 +2151,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
lane += value;
rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
- if (!rp) {
- err = -ENOMEM;
- goto err_node_put;
- }
+ if (!rp)
+ return -ENOMEM;
err = of_address_to_resource(port, 0, &rp->regs);
- if (err < 0) {
- dev_err(dev, "failed to parse address: %d\n", err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse address\n");
INIT_LIST_HEAD(&rp->list);
rp->index = index;
@@ -2177,16 +2165,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->np = port;
rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
- if (IS_ERR(rp->base)) {
- err = PTR_ERR(rp->base);
- goto err_node_put;
- }
+ if (IS_ERR(rp->base))
+ return PTR_ERR(rp->base);
label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
- if (!label) {
- err = -ENOMEM;
- goto err_node_put;
- }
+ if (!label)
+ return -ENOMEM;
/*
* Returns -ENOENT if reset-gpios property is not populated
@@ -2199,34 +2183,26 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
GPIOD_OUT_LOW,
label);
if (IS_ERR(rp->reset_gpio)) {
- if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
+ if (PTR_ERR(rp->reset_gpio) == -ENOENT)
rp->reset_gpio = NULL;
- } else {
- dev_err(dev, "failed to get reset GPIO: %ld\n",
- PTR_ERR(rp->reset_gpio));
- err = PTR_ERR(rp->reset_gpio);
- goto err_node_put;
- }
+ else
+ return dev_err_probe(dev, PTR_ERR(rp->reset_gpio),
+ "failed to get reset GPIO\n");
}
list_add_tail(&rp->list, &pcie->ports);
}
err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
- if (err < 0) {
- dev_err(dev, "invalid lane configuration\n");
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err,
+ "invalid lane configuration\n");
err = tegra_pcie_get_regulators(pcie, mask);
if (err < 0)
return err;
return 0;
-
-err_node_put:
- of_node_put(port);
- return err;
}
/*
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index b5bd10a62adb..08161065a89c 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -204,7 +204,7 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
v = readl(addr);
if (v & 0xff00)
- pr_err("Bad MSIX cap header: %08x\n", v);
+ pr_err("Bad MSI-X cap header: %08x\n", v);
v |= 0xbc00; /* next capability is EA at 0xbc */
set_val(v, where, size, val);
return PCIBIOS_SUCCESSFUL;
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 88c0977bc41a..7bce327897c9 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -154,7 +154,7 @@ static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
* X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain
* the expected behaviour of .set_affinity for each MSI interrupt, the 16
* MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs
- * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another
+ * for each core). The MSI vector is moved from 1 MSI GIC IRQ to another
* MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a
* consequence, the total MSI vectors that X-Gene v1 supports will be
* reduced to 256 (2048/8) vectors.
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index eb55a7f8573a..70409e71a18f 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -6,6 +6,7 @@
* Description: Altera PCIe host controller driver
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
@@ -77,9 +78,25 @@
#define S10_TLP_FMTTYPE_CFGWR0 0x45
#define S10_TLP_FMTTYPE_CFGWR1 0x44
+#define AGLX_RP_CFG_ADDR(pcie, reg) (((pcie)->hip_base) + (reg))
+#define AGLX_RP_SECONDARY(pcie) \
+ readb(AGLX_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS))
+
+#define AGLX_BDF_REG 0x00002004
+#define AGLX_ROOT_PORT_IRQ_STATUS 0x14c
+#define AGLX_ROOT_PORT_IRQ_ENABLE 0x150
+#define CFG_AER BIT(4)
+
+#define AGLX_CFG_TARGET GENMASK(13, 12)
+#define AGLX_CFG_TARGET_TYPE0 0
+#define AGLX_CFG_TARGET_TYPE1 1
+#define AGLX_CFG_TARGET_LOCAL_2000 2
+#define AGLX_CFG_TARGET_LOCAL_3000 3
+
enum altera_pcie_version {
ALTERA_PCIE_V1 = 0,
ALTERA_PCIE_V2,
+ ALTERA_PCIE_V3,
};
struct altera_pcie {
@@ -102,6 +119,11 @@ struct altera_pcie_ops {
int size, u32 *value);
int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno,
int where, int size, u32 value);
+ int (*ep_read_cfg)(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 *value);
+ int (*ep_write_cfg)(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 value);
+ void (*rp_isr)(struct irq_desc *desc);
};
struct altera_pcie_data {
@@ -112,6 +134,9 @@ struct altera_pcie_data {
u32 cfgrd1;
u32 cfgwr0;
u32 cfgwr1;
+ u32 port_conf_offset;
+ u32 port_irq_status_offset;
+ u32 port_irq_enable_offset;
};
struct tlp_rp_regpair_t {
@@ -131,6 +156,28 @@ static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
return readl_relaxed(pcie->cra_base + reg);
}
+static inline void cra_writew(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writew_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readw(struct altera_pcie *pcie, const u32 reg)
+{
+ return readw_relaxed(pcie->cra_base + reg);
+}
+
+static inline void cra_writeb(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writeb_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readb(struct altera_pcie *pcie, const u32 reg)
+{
+ return readb_relaxed(pcie->cra_base + reg);
+}
+
static bool altera_pcie_link_up(struct altera_pcie *pcie)
{
return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
@@ -145,11 +192,20 @@ static bool s10_altera_pcie_link_up(struct altera_pcie *pcie)
return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA);
}
+static bool aglx_altera_pcie_link_up(struct altera_pcie *pcie)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie,
+ pcie->pcie_data->cap_offset +
+ PCI_EXP_LNKSTA);
+
+ return (readw_relaxed(addr) & PCI_EXP_LNKSTA_DLLLA);
+}
+
/*
* Altera PCIe port uses BAR0 of RC's configuration space as the translation
* from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
* using these registers, so it can be reached by DMA from EP devices.
- * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt
+ * This BAR0 will also access to MSI vector when receiving MSI/MSI-X interrupt
* from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge
* should be hidden during enumeration to avoid the sizing and resource
* allocation by PCIe core.
@@ -425,6 +481,103 @@ static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
return PCIBIOS_SUCCESSFUL;
}
+static int aglx_rp_read_cfg(struct altera_pcie *pcie, int where,
+ int size, u32 *value)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ *value = readb_relaxed(addr);
+ break;
+ case 2:
+ *value = readw_relaxed(addr);
+ break;
+ default:
+ *value = readl_relaxed(addr);
+ break;
+ }
+
+ /* Interrupt PIN not programmed in hardware, set to INTA. */
+ if (where == PCI_INTERRUPT_PIN && size == 1 && !(*value))
+ *value = 0x01;
+ else if (where == PCI_INTERRUPT_LINE && !(*value & 0xff00))
+ *value |= 0x0100;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
+ int where, int size, u32 value)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ writeb_relaxed(value, addr);
+ break;
+ case 2:
+ writew_relaxed(value, addr);
+ break;
+ default:
+ writel_relaxed(value, addr);
+ break;
+ }
+
+ /*
+ * Monitor changes to PCI_PRIMARY_BUS register on Root Port
+ * and update local copy of root bus number accordingly.
+ */
+ if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS)
+ pcie->root_bus_nr = value & 0xff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_ep_write_cfg(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 value)
+{
+ cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG);
+ if (busno > AGLX_RP_SECONDARY(pcie))
+ where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1);
+
+ switch (size) {
+ case 1:
+ cra_writeb(pcie, value, where);
+ break;
+ case 2:
+ cra_writew(pcie, value, where);
+ break;
+ default:
+ cra_writel(pcie, value, where);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_ep_read_cfg(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 *value)
+{
+ cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG);
+ if (busno > AGLX_RP_SECONDARY(pcie))
+ where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1);
+
+ switch (size) {
+ case 1:
+ *value = cra_readb(pcie, where);
+ break;
+ case 2:
+ *value = cra_readw(pcie, where);
+ break;
+ default:
+ *value = cra_readl(pcie, where);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
unsigned int devfn, int where, int size,
u32 *value)
@@ -437,6 +590,10 @@ static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
return pcie->pcie_data->ops->rp_read_cfg(pcie, where,
size, value);
+ if (pcie->pcie_data->ops->ep_read_cfg)
+ return pcie->pcie_data->ops->ep_read_cfg(pcie, busno, devfn,
+ where, size, value);
+
switch (size) {
case 1:
byte_en = 1 << (where & 3);
@@ -481,6 +638,10 @@ static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
return pcie->pcie_data->ops->rp_write_cfg(pcie, busno,
where, size, value);
+ if (pcie->pcie_data->ops->ep_write_cfg)
+ return pcie->pcie_data->ops->ep_write_cfg(pcie, busno, devfn,
+ where, size, value);
+
switch (size) {
case 1:
data32 = (value & 0xff) << shift;
@@ -659,7 +820,32 @@ static void altera_pcie_isr(struct irq_desc *desc)
dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit);
}
}
+ chained_irq_exit(chip, desc);
+}
+
+static void aglx_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct altera_pcie *pcie;
+ struct device *dev;
+ u32 status;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+ pcie = irq_desc_get_handler_data(desc);
+ dev = &pcie->pdev->dev;
+
+ status = readl(pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_status_offset);
+ if (status & CFG_AER) {
+ writel(CFG_AER, (pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_status_offset));
+
+ ret = generic_handle_domain_irq(pcie->irq_domain, 0);
+ if (ret)
+ dev_err_ratelimited(dev, "unexpected IRQ %d\n", pcie->irq);
+ }
chained_irq_exit(chip, desc);
}
@@ -694,9 +880,9 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
if (IS_ERR(pcie->cra_base))
return PTR_ERR(pcie->cra_base);
- if (pcie->pcie_data->version == ALTERA_PCIE_V2) {
- pcie->hip_base =
- devm_platform_ioremap_resource_byname(pdev, "Hip");
+ if (pcie->pcie_data->version == ALTERA_PCIE_V2 ||
+ pcie->pcie_data->version == ALTERA_PCIE_V3) {
+ pcie->hip_base = devm_platform_ioremap_resource_byname(pdev, "Hip");
if (IS_ERR(pcie->hip_base))
return PTR_ERR(pcie->hip_base);
}
@@ -706,7 +892,7 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
if (pcie->irq < 0)
return pcie->irq;
- irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
+ irq_set_chained_handler_and_data(pcie->irq, pcie->pcie_data->ops->rp_isr, pcie);
return 0;
}
@@ -719,6 +905,7 @@ static const struct altera_pcie_ops altera_pcie_ops_1_0 = {
.tlp_read_pkt = tlp_read_packet,
.tlp_write_pkt = tlp_write_packet,
.get_link_status = altera_pcie_link_up,
+ .rp_isr = altera_pcie_isr,
};
static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
@@ -727,6 +914,16 @@ static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
.get_link_status = s10_altera_pcie_link_up,
.rp_read_cfg = s10_rp_read_cfg,
.rp_write_cfg = s10_rp_write_cfg,
+ .rp_isr = altera_pcie_isr,
+};
+
+static const struct altera_pcie_ops altera_pcie_ops_3_0 = {
+ .rp_read_cfg = aglx_rp_read_cfg,
+ .rp_write_cfg = aglx_rp_write_cfg,
+ .get_link_status = aglx_altera_pcie_link_up,
+ .ep_read_cfg = aglx_ep_read_cfg,
+ .ep_write_cfg = aglx_ep_write_cfg,
+ .rp_isr = aglx_isr,
};
static const struct altera_pcie_data altera_pcie_1_0_data = {
@@ -749,11 +946,44 @@ static const struct altera_pcie_data altera_pcie_2_0_data = {
.cfgwr1 = S10_TLP_FMTTYPE_CFGWR1,
};
+static const struct altera_pcie_data altera_pcie_3_0_f_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x14000,
+ .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS,
+ .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE,
+};
+
+static const struct altera_pcie_data altera_pcie_3_0_p_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x104000,
+ .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS,
+ .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE,
+};
+
+static const struct altera_pcie_data altera_pcie_3_0_r_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x1300,
+ .port_irq_status_offset = 0x0,
+ .port_irq_enable_offset = 0x4,
+};
+
static const struct of_device_id altera_pcie_of_match[] = {
{.compatible = "altr,pcie-root-port-1.0",
.data = &altera_pcie_1_0_data },
{.compatible = "altr,pcie-root-port-2.0",
.data = &altera_pcie_2_0_data },
+ {.compatible = "altr,pcie-root-port-3.0-f-tile",
+ .data = &altera_pcie_3_0_f_tile_data },
+ {.compatible = "altr,pcie-root-port-3.0-p-tile",
+ .data = &altera_pcie_3_0_p_tile_data },
+ {.compatible = "altr,pcie-root-port-3.0-r-tile",
+ .data = &altera_pcie_3_0_r_tile_data },
{},
};
@@ -791,11 +1021,18 @@ static int altera_pcie_probe(struct platform_device *pdev)
return ret;
}
- /* clear all interrupts */
- cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
- /* enable all interrupts */
- cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
- altera_pcie_host_init(pcie);
+ if (pcie->pcie_data->version == ALTERA_PCIE_V1 ||
+ pcie->pcie_data->version == ALTERA_PCIE_V2) {
+ /* clear all interrupts */
+ cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
+ /* enable all interrupts */
+ cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+ altera_pcie_host_init(pcie);
+ } else if (pcie->pcie_data->version == ALTERA_PCIE_V3) {
+ writel(CFG_AER,
+ pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_enable_offset);
+ }
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index a7e51bc1c2fe..18e11b9a7f46 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -732,7 +732,6 @@ static int apple_pcie_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
struct platform_device *platform = to_platform_device(dev);
- struct device_node *of_port;
struct apple_pcie *pcie;
int ret;
@@ -755,11 +754,10 @@ static int apple_pcie_init(struct pci_config_window *cfg)
if (ret)
return ret;
- for_each_child_of_node(dev->of_node, of_port) {
+ for_each_child_of_node_scoped(dev->of_node, of_port) {
ret = apple_pcie_setup_port(pcie, of_port);
if (ret) {
dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
- of_node_put(of_port);
return ret;
}
}
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index e733a27dc8df..e19628e13898 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -40,7 +40,7 @@
/* Broadcom STB PCIe Register Offsets */
#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1 0x0188
#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc
-#define PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN 0x0
+#define PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN 0x0
#define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c
#define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
@@ -55,6 +55,10 @@
#define PCIE_RC_DL_MDIO_WR_DATA 0x1104
#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
+#define PCIE_RC_PL_PHY_CTL_15 0x184c
+#define PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK 0x400000
+#define PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK 0xff
+
#define PCIE_MISC_MISC_CTRL 0x4008
#define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80
#define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400
@@ -146,9 +150,6 @@
#define MSI_INT_MASK_SET 0x10
#define MSI_INT_MASK_CLR 0x14
-#define PCIE_EXT_CFG_DATA 0x8000
-#define PCIE_EXT_CFG_INDEX 0x9000
-
#define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1
#define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0
@@ -174,8 +175,9 @@
#define MDIO_PORT0 0x0
#define MDIO_DATA_MASK 0x7fffffff
#define MDIO_PORT_MASK 0xf0000
+#define MDIO_PORT_EXT_MASK 0x200000
#define MDIO_REGAD_MASK 0xffff
-#define MDIO_CMD_MASK 0xfff00000
+#define MDIO_CMD_MASK 0x00100000
#define MDIO_CMD_READ 0x1
#define MDIO_CMD_WRITE 0x0
#define MDIO_DATA_DONE_MASK 0x80000000
@@ -191,11 +193,11 @@
#define SSC_STATUS_PLL_LOCK_MASK 0x800
#define PCIE_BRCM_MAX_MEMC 3
-#define IDX_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_INDEX])
-#define DATA_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_DATA])
-#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->reg_offsets[RGR1_SW_INIT_1])
-#define HARD_DEBUG(pcie) ((pcie)->reg_offsets[PCIE_HARD_DEBUG])
-#define INTR2_CPU_BASE(pcie) ((pcie)->reg_offsets[PCIE_INTR2_CPU_BASE])
+#define IDX_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_INDEX])
+#define DATA_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_DATA])
+#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->cfg->offsets[RGR1_SW_INIT_1])
+#define HARD_DEBUG(pcie) ((pcie)->cfg->offsets[PCIE_HARD_DEBUG])
+#define INTR2_CPU_BASE(pcie) ((pcie)->cfg->offsets[PCIE_INTR2_CPU_BASE])
/* Rescal registers */
#define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
@@ -234,13 +236,24 @@ struct inbound_win {
u64 cpu_addr;
};
+/*
+ * The RESCAL block is tied to PCIe controller #1, regardless of the number of
+ * controllers, and turning off PCIe controller #1 prevents access to the RESCAL
+ * register blocks, therefore no other controller can access this register
+ * space, and depending upon the bus fabric we may get a timeout (UBUS/GISB),
+ * or a hang (AXI).
+ */
+#define CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN BIT(0)
+
struct pcie_cfg_data {
const int *offsets;
const enum pcie_soc_base soc_base;
const bool has_phy;
+ const u32 quirks;
u8 num_inbound_wins;
int (*perst_set)(struct brcm_pcie *pcie, u32 val);
int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ int (*post_setup)(struct brcm_pcie *pcie);
};
struct subdev_regulators {
@@ -276,8 +289,6 @@ struct brcm_pcie {
int gen;
u64 msi_target_addr;
struct brcm_msi *msi;
- const int *reg_offsets;
- enum pcie_soc_base soc_base;
struct reset_control *rescal;
struct reset_control *perst_reset;
struct reset_control *bridge_reset;
@@ -285,17 +296,14 @@ struct brcm_pcie {
int num_memc;
u64 memc_size[PCIE_BRCM_MAX_MEMC];
u32 hw_rev;
- int (*perst_set)(struct brcm_pcie *pcie, u32 val);
- int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
struct subdev_regulators *sr;
bool ep_wakeup_capable;
- bool has_phy;
- u8 num_inbound_wins;
+ const struct pcie_cfg_data *cfg;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
{
- return pcie->soc_base == BCM7435 || pcie->soc_base == BCM7425;
+ return pcie->cfg->soc_base == BCM7435 || pcie->cfg->soc_base == BCM7425;
}
/*
@@ -309,8 +317,8 @@ static int brcm_pcie_encode_ibar_size(u64 size)
if (log2_in >= 12 && log2_in <= 15)
/* Covers 4KB to 32KB (inclusive) */
return (log2_in - 12) + 0x1c;
- else if (log2_in >= 16 && log2_in <= 35)
- /* Covers 64KB to 32GB, (inclusive) */
+ else if (log2_in >= 16 && log2_in <= 36)
+ /* Covers 64KB to 64GB, (inclusive) */
return log2_in - 15;
/* Something is awry so disable */
return 0;
@@ -320,6 +328,7 @@ static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
{
u32 pkt = 0;
+ pkt |= FIELD_PREP(MDIO_PORT_EXT_MASK, port >> 4);
pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
@@ -403,12 +412,12 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
{
u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
- u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+ u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
- writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+ u32p_replace_bits(&lnkcap, gen, PCI_EXP_LNKCAP_SLS);
+ writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- lnkctl2 = (lnkctl2 & ~0xf) | gen;
+ u16p_replace_bits(&lnkctl2, gen, PCI_EXP_LNKCTL2_TLS);
writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
}
@@ -550,7 +559,7 @@ static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return hwirq;
for (i = 0; i < nr_irqs; i++)
- irq_domain_set_info(domain, virq + i, hwirq + i,
+ irq_domain_set_info(domain, virq + i, (irq_hw_number_t)hwirq + i,
&brcm_msi_bottom_irq_chip, domain->host_data,
handle_edge_irq, NULL, NULL);
return 0;
@@ -717,8 +726,8 @@ static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
- writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
- return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where);
+ writel(idx, base + IDX_ADDR(pcie));
+ return base + DATA_ADDR(pcie) + PCIE_ECAM_REG(where);
}
static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
@@ -821,6 +830,39 @@ static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
return 0;
}
+static int brcm_pcie_post_setup_bcm2712(struct brcm_pcie *pcie)
+{
+ static const u16 data[] = { 0x50b9, 0xbda1, 0x0094, 0x97b4, 0x5030,
+ 0x5030, 0x0007 };
+ static const u8 regs[] = { 0x16, 0x17, 0x18, 0x19, 0x1b, 0x1c, 0x1e };
+ int ret, i;
+ u32 tmp;
+
+ /* Allow a 54MHz (xosc) refclk source */
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET, 0x1600);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, regs[i], data[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ usleep_range(100, 200);
+
+ /*
+ * Set L1SS sub-state timers to avoid lengthy state transitions,
+ * PM clock period is 18.52ns (1/54MHz, round down).
+ */
+ tmp = readl(pcie->base + PCIE_RC_PL_PHY_CTL_15);
+ tmp &= ~PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK;
+ tmp |= 0x12;
+ writel(tmp, pcie->base + PCIE_RC_PL_PHY_CTL_15);
+
+ return 0;
+}
+
static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size,
u64 cpu_addr, u64 pci_offset)
{
@@ -855,7 +897,7 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
* security considerations, and is not implemented in our modern
* SoCs.
*/
- if (pcie->soc_base != BCM7712)
+ if (pcie->cfg->soc_base != BCM7712)
add_inbound_win(b++, &n, 0, 0, 0);
resource_list_for_each_entry(entry, &bridge->dma_ranges) {
@@ -872,10 +914,10 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
* That being said, each BARs size must still be a power of
* two.
*/
- if (pcie->soc_base == BCM7712)
+ if (pcie->cfg->soc_base == BCM7712)
add_inbound_win(b++, &n, size, cpu_start, pcie_start);
- if (n > pcie->num_inbound_wins)
+ if (n > pcie->cfg->num_inbound_wins)
break;
}
@@ -889,7 +931,7 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
* that enables multiple memory controllers. As such, it can return
* now w/o doing special configuration.
*/
- if (pcie->soc_base == BCM7712)
+ if (pcie->cfg->soc_base == BCM7712)
return n;
ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
@@ -1012,7 +1054,7 @@ static void set_inbound_win_registers(struct brcm_pcie *pcie,
* 7712:
* All of their BARs need to be set.
*/
- if (pcie->soc_base == BCM7712) {
+ if (pcie->cfg->soc_base == BCM7712) {
/* BUS remap register settings */
reg_offset = brcm_ubus_reg_offset(i);
tmp = lower_32_bits(cpu_addr) & ~0xfff;
@@ -1036,15 +1078,15 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
int memc, ret;
/* Reset the bridge */
- ret = pcie->bridge_sw_init_set(pcie, 1);
+ ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
if (ret)
return ret;
/* Ensure that PERST# is asserted; some bootloaders may deassert it. */
- if (pcie->soc_base == BCM2711) {
- ret = pcie->perst_set(pcie, 1);
+ if (pcie->cfg->soc_base == BCM2711) {
+ ret = pcie->cfg->perst_set(pcie, 1);
if (ret) {
- pcie->bridge_sw_init_set(pcie, 0);
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
return ret;
}
}
@@ -1052,7 +1094,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
usleep_range(100, 200);
/* Take the bridge out of reset */
- ret = pcie->bridge_sw_init_set(pcie, 0);
+ ret = pcie->cfg->bridge_sw_init_set(pcie, 0);
if (ret)
return ret;
@@ -1072,9 +1114,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
*/
if (is_bmips(pcie))
burst = 0x1; /* 256 bytes */
- else if (pcie->soc_base == BCM2711)
+ else if (pcie->cfg->soc_base == BCM2711)
burst = 0x0; /* 128 bytes */
- else if (pcie->soc_base == BCM7278)
+ else if (pcie->cfg->soc_base == BCM7278)
burst = 0x3; /* 512 bytes */
else
burst = 0x2; /* 512 bytes */
@@ -1180,10 +1222,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
/* PCIe->SCB endian mode for inbound window */
tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
- u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
+ u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN,
PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+ if (pcie->cfg->post_setup) {
+ ret = pcie->cfg->post_setup(pcie);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -1199,7 +1247,7 @@ static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie)
u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
/* 7712 does not have this (RGR1) timer */
- if (pcie->soc_base == BCM7712)
+ if (pcie->cfg->soc_base == BCM7712)
return;
/* Each unit in timeout register is 1/216,000,000 seconds */
@@ -1276,8 +1324,12 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
bool ssc_good = false;
int ret, i;
+ /* Limit the generation if specified */
+ if (pcie->gen)
+ brcm_pcie_set_gen(pcie, pcie->gen);
+
/* Unassert the fundamental reset */
- ret = pcie->perst_set(pcie, 0);
+ ret = pcie->cfg->perst_set(pcie, 0);
if (ret)
return ret;
@@ -1302,9 +1354,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
brcm_config_clkreq(pcie);
- if (pcie->gen)
- brcm_pcie_set_gen(pcie, pcie->gen);
-
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie);
if (ret == 0)
@@ -1367,7 +1416,8 @@ static int brcm_pcie_add_bus(struct pci_bus *bus)
ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
if (ret) {
- dev_info(dev, "No regulators for downstream device\n");
+ dev_info(dev, "Did not get regulators, err=%d\n", ret);
+ pcie->sr = NULL;
goto no_regulators;
}
@@ -1390,7 +1440,7 @@ static void brcm_pcie_remove_bus(struct pci_bus *bus)
struct subdev_regulators *sr = pcie->sr;
struct device *dev = &bus->dev;
- if (!sr)
+ if (!sr || !bus->parent || !pci_is_root_bus(bus->parent))
return;
if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
@@ -1463,12 +1513,12 @@ static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
static inline int brcm_phy_start(struct brcm_pcie *pcie)
{
- return pcie->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
+ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
}
static inline int brcm_phy_stop(struct brcm_pcie *pcie)
{
- return pcie->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
+ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
}
static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
@@ -1479,7 +1529,7 @@ static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
if (brcm_pcie_link_up(pcie))
brcm_pcie_enter_l23(pcie);
/* Assert fundamental reset */
- ret = pcie->perst_set(pcie, 1);
+ ret = pcie->cfg->perst_set(pcie, 1);
if (ret)
return ret;
@@ -1493,8 +1543,9 @@ static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
writel(tmp, base + HARD_DEBUG(pcie));
- /* Shutdown PCIe bridge */
- ret = pcie->bridge_sw_init_set(pcie, 1);
+ if (!(pcie->cfg->quirks & CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN))
+ /* Shutdown PCIe bridge */
+ ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
return ret;
}
@@ -1582,7 +1633,7 @@ static int brcm_pcie_resume_noirq(struct device *dev)
goto err_reset;
/* Take bridge out of reset so we can access the SERDES reg */
- pcie->bridge_sw_init_set(pcie, 0);
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
/* SERDES_IDDQ = 0 */
tmp = readl(base + HARD_DEBUG(pcie));
@@ -1660,7 +1711,7 @@ static void brcm_pcie_remove(struct platform_device *pdev)
static const int pcie_offsets[] = {
[RGR1_SW_INIT_1] = 0x9210,
[EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ [EXT_CFG_DATA] = 0x8000,
[PCIE_HARD_DEBUG] = 0x4204,
[PCIE_INTR2_CPU_BASE] = 0x4300,
};
@@ -1668,7 +1719,7 @@ static const int pcie_offsets[] = {
static const int pcie_offsets_bcm7278[] = {
[RGR1_SW_INIT_1] = 0xc010,
[EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ [EXT_CFG_DATA] = 0x8000,
[PCIE_HARD_DEBUG] = 0x4204,
[PCIE_INTR2_CPU_BASE] = 0x4300,
};
@@ -1682,8 +1733,9 @@ static const int pcie_offsets_bcm7425[] = {
};
static const int pcie_offsets_bcm7712[] = {
+ [RGR1_SW_INIT_1] = 0x9210,
[EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ [EXT_CFG_DATA] = 0x8000,
[PCIE_HARD_DEBUG] = 0x4304,
[PCIE_INTR2_CPU_BASE] = 0x4400,
};
@@ -1704,6 +1756,16 @@ static const struct pcie_cfg_data bcm2711_cfg = {
.num_inbound_wins = 3,
};
+static const struct pcie_cfg_data bcm2712_cfg = {
+ .offsets = pcie_offsets_bcm7712,
+ .soc_base = BCM7712,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .post_setup = brcm_pcie_post_setup_bcm2712,
+ .quirks = CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN,
+ .num_inbound_wins = 10,
+};
+
static const struct pcie_cfg_data bcm4908_cfg = {
.offsets = pcie_offsets,
.soc_base = BCM4908,
@@ -1755,6 +1817,7 @@ static const struct pcie_cfg_data bcm7712_cfg = {
static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
+ { .compatible = "brcm,bcm2712-pcie", .data = &bcm2712_cfg },
{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
{ .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg },
@@ -1784,7 +1847,7 @@ static struct pci_ops brcm7425_pcie_ops = {
static int brcm_pcie_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node, *msi_np;
+ struct device_node *np = pdev->dev.of_node;
struct pci_host_bridge *bridge;
const struct pcie_cfg_data *data;
struct brcm_pcie *pcie;
@@ -1803,12 +1866,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->dev = &pdev->dev;
pcie->np = np;
- pcie->reg_offsets = data->offsets;
- pcie->soc_base = data->soc_base;
- pcie->perst_set = data->perst_set;
- pcie->bridge_sw_init_set = data->bridge_sw_init_set;
- pcie->has_phy = data->has_phy;
- pcie->num_inbound_wins = data->num_inbound_wins;
+ pcie->cfg = data;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
@@ -1843,7 +1901,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret, "could not enable clock\n");
- pcie->bridge_sw_init_set(pcie, 0);
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
if (pcie->swinit_reset) {
ret = reset_control_assert(pcie->swinit_reset);
@@ -1882,22 +1940,29 @@ static int brcm_pcie_probe(struct platform_device *pdev)
goto fail;
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
- if (pcie->soc_base == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
+ if (pcie->cfg->soc_base == BCM4908 &&
+ pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
ret = -ENODEV;
goto fail;
}
- msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
- if (pci_msi_enabled() && msi_np == pcie->np) {
- ret = brcm_pcie_enable_msi(pcie);
+ if (pci_msi_enabled()) {
+ struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
+
+ if (msi_np == pcie->np)
+ ret = brcm_pcie_enable_msi(pcie);
+
+ of_node_put(msi_np);
+
if (ret) {
dev_err(pcie->dev, "probe of internal MSI failed");
goto fail;
}
}
- bridge->ops = pcie->soc_base == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
+ bridge->ops = pcie->cfg->soc_base == BCM7425 ?
+ &brcm7425_pcie_ops : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
@@ -1940,3 +2005,4 @@ module_platform_driver(brcm_pcie_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
MODULE_AUTHOR("Broadcom");
+MODULE_SOFTDEP("pre: irq_bcm2712_mip");
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index aa24ac9aaecc..9d52504acae4 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -15,6 +15,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_device.h>
@@ -24,6 +25,7 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include "../pci.h"
@@ -352,7 +354,8 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
range_type, *num, (unsigned long long)cpu_addr,
- (unsigned long long)pci_addr, (unsigned long long)table_size);
+ (unsigned long long)pci_addr,
+ (unsigned long long)table_size);
cpu_addr += table_size;
pci_addr += table_size;
@@ -887,7 +890,8 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
for (i = 0; i < num_resets; i++)
pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
- ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets);
+ ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets,
+ pcie->phy_resets);
if (ret) {
dev_err(dev, "failed to get PHY bulk reset\n");
return ret;
@@ -917,22 +921,27 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
return pcie->num_clks;
}
- ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes);
- if (ret == 0) {
- if (num_lanes == 0 || num_lanes > 16 || (num_lanes != 1 && num_lanes % 2))
+ ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes);
+ if (ret == 0) {
+ if (num_lanes == 0 || num_lanes > 16 ||
+ (num_lanes != 1 && num_lanes % 2))
dev_warn(dev, "invalid num-lanes, using controller defaults\n");
- else
+ else
pcie->num_lanes = num_lanes;
- }
+ }
return 0;
}
static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
struct device *dev = pcie->dev;
+ struct resource_entry *entry;
+ struct regmap *pbus_regmap;
+ u32 val, args[2], size;
+ resource_size_t addr;
int err;
- u32 val;
/*
* The controller may have been left out of reset by the bootloader
@@ -940,12 +949,31 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
*/
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
pcie->phy_resets);
- reset_control_assert(pcie->mac_reset);
/* Wait for the time needed to complete the reset lines assert. */
msleep(PCIE_EN7581_RESET_TIME_MS);
/*
+ * Configure PBus base address and base address mask to allow the
+ * hw to detect if a given address is accessible on PCIe controller.
+ */
+ pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "mediatek,pbus-csr",
+ ARRAY_SIZE(args),
+ args);
+ if (IS_ERR(pbus_regmap))
+ return PTR_ERR(pbus_regmap);
+
+ entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ addr = entry->res->start - entry->offset;
+ regmap_write(pbus_regmap, args[0], lower_32_bits(addr));
+ size = lower_32_bits(resource_size(entry->res));
+ regmap_write(pbus_regmap, args[1], GENMASK(31, __fls(size)));
+
+ /*
* Unlike the other MediaTek Gen3 controllers, the Airoha EN7581
* requires PHY initialization and power-on before PHY reset deassert.
*/
@@ -961,7 +989,8 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
goto err_phy_on;
}
- err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
if (err) {
dev_err(dev, "failed to deassert PHYs\n");
goto err_phy_deassert;
@@ -1006,7 +1035,8 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
err_clk_prepare_enable:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
- reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
err_phy_deassert:
phy_power_off(pcie->phy);
err_phy_on:
@@ -1030,7 +1060,8 @@ static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);
/* PHY power on and enable pipe clock */
- err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
if (err) {
dev_err(dev, "failed to deassert PHYs\n");
return err;
@@ -1070,7 +1101,8 @@ err_clk_init:
err_phy_on:
phy_exit(pcie->phy);
err_phy_init:
- reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
return err;
}
@@ -1085,7 +1117,8 @@ static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
phy_power_off(pcie->phy);
phy_exit(pcie->phy);
- reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
}
static int mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie)
@@ -1112,7 +1145,8 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
* Deassert the line in order to avoid unbalance in deassert_count
* counter since the bulk is shared.
*/
- reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
/* Don't touch the hardware registers before power up */
err = pcie->soc->power_up(pcie);
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 3bcfc4e58ba2..811a8b4acd50 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1041,24 +1041,22 @@ err_free_ck:
static int mtk_pcie_setup(struct mtk_pcie *pcie)
{
struct device *dev = pcie->dev;
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
struct mtk_pcie_port *port, *tmp;
int err, slot;
slot = of_get_pci_domain_nr(dev->of_node);
if (slot < 0) {
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
err = of_pci_get_devfn(child);
- if (err < 0) {
- dev_err(dev, "failed to get devfn: %d\n", err);
- goto error_put_node;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to get devfn\n");
slot = PCI_SLOT(err);
err = mtk_pcie_parse_port(pcie, child, slot);
if (err)
- goto error_put_node;
+ return err;
}
} else {
err = mtk_pcie_parse_port(pcie, node, slot);
@@ -1079,9 +1077,6 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
mtk_pcie_subsys_powerdown(pcie);
return 0;
-error_put_node:
- of_node_put(child);
- return err;
}
static int mtk_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index 776caa0b1011..01ead2f92e87 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -258,30 +258,25 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
{
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
int err;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
int slot;
err = of_pci_get_devfn(child);
- if (err < 0) {
- of_node_put(child);
- dev_err(dev, "failed to parse devfn: %d\n", err);
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse devfn\n");
slot = PCI_SLOT(err);
err = mt7621_pcie_parse_port(pcie, child, slot);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 7c92eada04af..c32b803a47c7 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -178,8 +178,8 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host,
* space, it's generally only accessible when in endpoint mode.
* When in root complex mode, the controller is unable to target
* itself with either type 0 or type 1 accesses, and indeed, any
- * controller initiated target transfer to its own config space
- * result in a completer abort.
+ * controller-initiated target transfer to its own config space
+ * results in a completer abort.
*
* Each channel effectively only supports a single device, but as
* the same channel <-> device access works for any PCI_SLOT()
@@ -775,7 +775,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
if (err)
return err;
- /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ /* Two IRQs are for MSI, but they are also used for non-MSI IRQs */
err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_bottom_chip.name, host);
@@ -792,7 +792,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
goto err;
}
- /* disable all MSIs */
+ /* Disable all MSIs */
rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
/*
@@ -892,6 +892,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
dev_err(pcie->dev, "Failed to map inbound regions!\n");
return -EINVAL;
}
+
/*
* If the size of the range is larger than the alignment of
* the start address, we have to use multiple entries to
@@ -903,6 +904,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
size = min(size, alignment);
}
+
/* Hardware supports max 4GiB inbound region */
size = min(size, 1ULL << 32);
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 5adac6adc046..6a46be17aa91 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -367,7 +367,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
}
}
- rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
+ rockchip_pcie_write(rockchip, PCI_VENDOR_ID_ROCKCHIP,
PCIE_CORE_CONFIG_VENDOR);
rockchip_pcie_write(rockchip,
PCI_CLASS_BRIDGE_PCI_NORMAL << 8,
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 11def598534b..14954f43e5e9 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -200,7 +200,6 @@
#define AXI_WRAPPER_NOR_MSG 0xc
#define PCIE_RC_SEND_PME_OFF 0x11960
-#define ROCKCHIP_VENDOR_ID 0x1d87
#define PCIE_LINK_IS_L2(x) \
(((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
#define PCIE_LINK_TRAINING_DONE(x) \
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index 81e8bfae53d0..13ca493d22bd 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -84,6 +84,7 @@ enum xilinx_cpm_version {
CPM,
CPM5,
CPM5_HOST1,
+ CPM5NC_HOST,
};
/**
@@ -478,6 +479,9 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
{
const struct xilinx_cpm_variant *variant = port->variant;
+ if (variant->version == CPM5NC_HOST)
+ return;
+
if (cpm_pcie_link_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
else
@@ -538,7 +542,8 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
if (IS_ERR(port->cfg))
return PTR_ERR(port->cfg);
- if (port->variant->version == CPM5) {
+ if (port->variant->version == CPM5 ||
+ port->variant->version == CPM5_HOST1) {
port->reg_base = devm_platform_ioremap_resource_byname(pdev,
"cpm_csr");
if (IS_ERR(port->reg_base))
@@ -578,28 +583,34 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
port->dev = dev;
- err = xilinx_cpm_pcie_init_irq_domain(port);
- if (err)
- return err;
+ port->variant = of_device_get_match_data(dev);
- bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
- if (!bus)
- return -ENODEV;
+ if (port->variant->version != CPM5NC_HOST) {
+ err = xilinx_cpm_pcie_init_irq_domain(port);
+ if (err)
+ return err;
+ }
- port->variant = of_device_get_match_data(dev);
+ bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!bus) {
+ err = -ENODEV;
+ goto err_free_irq_domains;
+ }
err = xilinx_cpm_pcie_parse_dt(port, bus->res);
if (err) {
dev_err(dev, "Parsing DT failed\n");
- goto err_parse_dt;
+ goto err_free_irq_domains;
}
xilinx_cpm_pcie_init_port(port);
- err = xilinx_cpm_setup_irq(port);
- if (err) {
- dev_err(dev, "Failed to set up interrupts\n");
- goto err_setup_irq;
+ if (port->variant->version != CPM5NC_HOST) {
+ err = xilinx_cpm_setup_irq(port);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts\n");
+ goto err_setup_irq;
+ }
}
bridge->sysdata = port->cfg;
@@ -612,11 +623,13 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
return 0;
err_host_bridge:
- xilinx_cpm_free_interrupts(port);
+ if (port->variant->version != CPM5NC_HOST)
+ xilinx_cpm_free_interrupts(port);
err_setup_irq:
pci_ecam_free(port->cfg);
-err_parse_dt:
- xilinx_cpm_free_irq_domains(port);
+err_free_irq_domains:
+ if (port->variant->version != CPM5NC_HOST)
+ xilinx_cpm_free_irq_domains(port);
return err;
}
@@ -639,6 +652,10 @@ static const struct xilinx_cpm_variant cpm5_host1 = {
.ir_enable = XILINX_CPM_PCIE1_IR_ENABLE,
};
+static const struct xilinx_cpm_variant cpm5n_host = {
+ .version = CPM5NC_HOST,
+};
+
static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
{
.compatible = "xlnx,versal-cpm-host-1.00",
@@ -652,6 +669,10 @@ static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
.compatible = "xlnx,versal-cpm5-host1",
.data = &cpm5_host1,
},
+ {
+ .compatible = "xlnx,versal-cpm5nc-host",
+ .data = &cpm5n_host,
+ },
{}
};
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 9d9596947350..8df064b62a2f 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -17,6 +17,8 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+#include <xen/xen.h>
+
#include <asm/irqdomain.h>
#define VMD_CFGBAR 0
@@ -125,7 +127,7 @@ struct vmd_irq_list {
struct vmd_dev {
struct pci_dev *dev;
- spinlock_t cfg_lock;
+ raw_spinlock_t cfg_lock;
void __iomem *cfgbar;
int msix_count;
@@ -391,7 +393,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
if (!addr)
return -EFAULT;
- spin_lock_irqsave(&vmd->cfg_lock, flags);
+ raw_spin_lock_irqsave(&vmd->cfg_lock, flags);
switch (len) {
case 1:
*value = readb(addr);
@@ -406,7 +408,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
ret = -EINVAL;
break;
}
- spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+ raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);
return ret;
}
@@ -426,7 +428,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
if (!addr)
return -EFAULT;
- spin_lock_irqsave(&vmd->cfg_lock, flags);
+ raw_spin_lock_irqsave(&vmd->cfg_lock, flags);
switch (len) {
case 1:
writeb(value, addr);
@@ -444,7 +446,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
ret = -EINVAL;
break;
}
- spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+ raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);
return ret;
}
@@ -970,6 +972,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct vmd_dev *vmd;
int err;
+ if (xen_domain()) {
+ /*
+ * Xen doesn't have knowledge about devices in the VMD bus
+ * because the config space of devices behind the VMD bridge is
+ * not known to Xen, and hence Xen cannot discover or configure
+ * them in any way.
+ *
+ * Bypass of MSI remapping won't work in that case as direct
+ * write by Linux to the MSI entries won't result in functional
+ * interrupts, as Xen is the entity that manages the host
+ * interrupt controller and must configure interrupts. However
+ * multiplexing of interrupts by the VMD bridge will work under
+ * Xen, so force the usage of that mode which must always be
+ * supported by VMD bridges.
+ */
+ features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP;
+ }
+
if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
return -ENOMEM;
@@ -1009,7 +1029,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
vmd->first_vec = 1;
- spin_lock_init(&vmd->cfg_lock);
+ raw_spin_lock_init(&vmd->cfg_lock);
pci_set_drvdata(dev, vmd);
err = vmd_enable_domain(vmd, features);
if (err)
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
index 3431a7df3e0d..73047316889e 100644
--- a/drivers/pci/devres.c
+++ b/drivers/pci/devres.c
@@ -40,7 +40,7 @@
* Legacy struct storing addresses to whole mapped BARs.
*/
struct pcim_iomap_devres {
- void __iomem *table[PCI_STD_NUM_BARS];
+ void __iomem *table[PCI_NUM_RESOURCES];
};
/* Used to restore the old INTx state on driver detach. */
@@ -577,7 +577,7 @@ static int pcim_add_mapping_to_legacy_table(struct pci_dev *pdev,
{
void __iomem **legacy_iomap_table;
- if (bar >= PCI_STD_NUM_BARS)
+ if (!pci_bar_index_is_valid(bar))
return -EINVAL;
legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
@@ -622,7 +622,7 @@ static void pcim_remove_bar_from_legacy_table(struct pci_dev *pdev, int bar)
{
void __iomem **legacy_iomap_table;
- if (bar >= PCI_STD_NUM_BARS)
+ if (!pci_bar_index_is_valid(bar))
return;
legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
@@ -655,6 +655,9 @@ void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
void __iomem *mapping;
struct pcim_addr_devres *res;
+ if (!pci_bar_index_is_valid(bar))
+ return NULL;
+
res = pcim_addr_devres_alloc(pdev);
if (!res)
return NULL;
@@ -722,6 +725,9 @@ void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
int ret;
struct pcim_addr_devres *res;
+ if (!pci_bar_index_is_valid(bar))
+ return IOMEM_ERR_PTR(-EINVAL);
+
res = pcim_addr_devres_alloc(pdev);
if (!res)
return IOMEM_ERR_PTR(-ENOMEM);
@@ -823,6 +829,9 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
int ret;
struct pcim_addr_devres *res;
+ if (!pci_bar_index_is_valid(bar))
+ return -EINVAL;
+
res = pcim_addr_devres_alloc(pdev);
if (!res)
return -ENOMEM;
@@ -991,6 +1000,9 @@ void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
void __iomem *mapping;
struct pcim_addr_devres *res;
+ if (!pci_bar_index_is_valid(bar))
+ return IOMEM_ERR_PTR(-EINVAL);
+
res = pcim_addr_devres_alloc(pdev);
if (!res)
return IOMEM_ERR_PTR(-ENOMEM);
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
index 7bd7892c5222..aae9a8a00406 100644
--- a/drivers/pci/doe.c
+++ b/drivers/pci/doe.c
@@ -14,15 +14,17 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci-doe.h>
+#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include "pci.h"
-#define PCI_DOE_PROTOCOL_DISCOVERY 0
+#define PCI_DOE_FEATURE_DISCOVERY 0
/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
#define PCI_DOE_TIMEOUT HZ
@@ -43,22 +45,27 @@
*
* @pdev: PCI device this mailbox belongs to
* @cap_offset: Capability offset
- * @prots: Array of protocols supported (encoded as long values)
+ * @feats: Array of features supported (encoded as long values)
* @wq: Wait queue for work item
* @work_queue: Queue of pci_doe_work items
* @flags: Bit array of PCI_DOE_FLAG_* flags
+ * @sysfs_attrs: Array of sysfs device attributes
*/
struct pci_doe_mb {
struct pci_dev *pdev;
u16 cap_offset;
- struct xarray prots;
+ struct xarray feats;
wait_queue_head_t wq;
struct workqueue_struct *work_queue;
unsigned long flags;
+
+#ifdef CONFIG_SYSFS
+ struct device_attribute *sysfs_attrs;
+#endif
};
-struct pci_doe_protocol {
+struct pci_doe_feature {
u16 vid;
u8 type;
};
@@ -66,7 +73,7 @@ struct pci_doe_protocol {
/**
* struct pci_doe_task - represents a single query/response
*
- * @prot: DOE Protocol
+ * @feat: DOE Feature
* @request_pl: The request payload
* @request_pl_sz: Size of the request payload (bytes)
* @response_pl: The response payload
@@ -78,7 +85,7 @@ struct pci_doe_protocol {
* @doe_mb: Used internally by the mailbox
*/
struct pci_doe_task {
- struct pci_doe_protocol prot;
+ struct pci_doe_feature feat;
const __le32 *request_pl;
size_t request_pl_sz;
__le32 *response_pl;
@@ -92,6 +99,152 @@ struct pci_doe_task {
struct pci_doe_mb *doe_mb;
};
+#ifdef CONFIG_SYSFS
+static ssize_t doe_discovery_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "0001:00\n");
+}
+static DEVICE_ATTR_RO(doe_discovery);
+
+static struct attribute *pci_doe_sysfs_feature_attrs[] = {
+ &dev_attr_doe_discovery.attr,
+ NULL
+};
+
+static bool pci_doe_features_sysfs_group_visible(struct kobject *kobj)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ return !xa_empty(&pdev->doe_mbs);
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(pci_doe_features_sysfs)
+
+const struct attribute_group pci_doe_sysfs_group = {
+ .name = "doe_features",
+ .attrs = pci_doe_sysfs_feature_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(pci_doe_features_sysfs),
+};
+
+static ssize_t pci_doe_sysfs_feature_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", attr->attr.name);
+}
+
+static void pci_doe_sysfs_feature_remove(struct pci_dev *pdev,
+ struct pci_doe_mb *doe_mb)
+{
+ struct device_attribute *attrs = doe_mb->sysfs_attrs;
+ struct device *dev = &pdev->dev;
+ unsigned long i;
+ void *entry;
+
+ if (!attrs)
+ return;
+
+ doe_mb->sysfs_attrs = NULL;
+ xa_for_each(&doe_mb->feats, i, entry) {
+ if (attrs[i].show)
+ sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr,
+ pci_doe_sysfs_group.name);
+ kfree(attrs[i].attr.name);
+ }
+ kfree(attrs);
+}
+
+static int pci_doe_sysfs_feature_populate(struct pci_dev *pdev,
+ struct pci_doe_mb *doe_mb)
+{
+ struct device *dev = &pdev->dev;
+ struct device_attribute *attrs;
+ unsigned long num_features = 0;
+ unsigned long vid, type;
+ unsigned long i;
+ void *entry;
+ int ret;
+
+ xa_for_each(&doe_mb->feats, i, entry)
+ num_features++;
+
+ attrs = kcalloc(num_features, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs) {
+ pci_warn(pdev, "Failed allocating the device_attribute array\n");
+ return -ENOMEM;
+ }
+
+ doe_mb->sysfs_attrs = attrs;
+ xa_for_each(&doe_mb->feats, i, entry) {
+ sysfs_attr_init(&attrs[i].attr);
+ vid = xa_to_value(entry) >> 8;
+ type = xa_to_value(entry) & 0xFF;
+
+ if (vid == PCI_VENDOR_ID_PCI_SIG &&
+ type == PCI_DOE_FEATURE_DISCOVERY) {
+
+ /*
+ * DOE Discovery, manually displayed by
+ * `dev_attr_doe_discovery`
+ */
+ continue;
+ }
+
+ attrs[i].attr.name = kasprintf(GFP_KERNEL,
+ "%04lx:%02lx", vid, type);
+ if (!attrs[i].attr.name) {
+ ret = -ENOMEM;
+ pci_warn(pdev, "Failed allocating the attribute name\n");
+ goto fail;
+ }
+
+ attrs[i].attr.mode = 0444;
+ attrs[i].show = pci_doe_sysfs_feature_show;
+
+ ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr,
+ pci_doe_sysfs_group.name);
+ if (ret) {
+ attrs[i].show = NULL;
+ if (ret != -EEXIST) {
+ pci_warn(pdev, "Failed adding %s to sysfs group\n",
+ attrs[i].attr.name);
+ goto fail;
+ } else
+ kfree(attrs[i].attr.name);
+ }
+ }
+
+ return 0;
+
+fail:
+ pci_doe_sysfs_feature_remove(pdev, doe_mb);
+ return ret;
+}
+
+void pci_doe_sysfs_teardown(struct pci_dev *pdev)
+{
+ struct pci_doe_mb *doe_mb;
+ unsigned long index;
+
+ xa_for_each(&pdev->doe_mbs, index, doe_mb)
+ pci_doe_sysfs_feature_remove(pdev, doe_mb);
+}
+
+void pci_doe_sysfs_init(struct pci_dev *pdev)
+{
+ struct pci_doe_mb *doe_mb;
+ unsigned long index;
+ int ret;
+
+ xa_for_each(&pdev->doe_mbs, index, doe_mb) {
+ ret = pci_doe_sysfs_feature_populate(pdev, doe_mb);
+ if (ret)
+ return;
+ }
+}
+#endif
+
static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout)
{
if (wait_event_timeout(doe_mb->wq,
@@ -183,8 +336,8 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
length = 0;
/* Write DOE Header */
- val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) |
- FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type);
+ val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->feat.vid) |
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->feat.type);
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
@@ -229,12 +382,12 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
int i = 0;
u32 val;
- /* Read the first dword to get the protocol */
+ /* Read the first dword to get the feature */
pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
- if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) ||
- (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) {
- dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n",
- doe_mb->cap_offset, task->prot.vid, task->prot.type,
+ if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->feat.vid) ||
+ (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->feat.type)) {
+ dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Feature] = [%04x, %02x], got [%04x, %02x]\n",
+ doe_mb->cap_offset, task->feat.vid, task->feat.type,
FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val),
FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val));
return -EIO;
@@ -396,7 +549,7 @@ static void pci_doe_task_complete(struct pci_doe_task *task)
}
static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u16 *vid,
- u8 *protocol)
+ u8 *feature)
{
u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
*index) |
@@ -407,7 +560,7 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u1
u32 response_pl;
int rc;
- rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, PCI_DOE_PROTOCOL_DISCOVERY,
+ rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, PCI_DOE_FEATURE_DISCOVERY,
&request_pl_le, sizeof(request_pl_le),
&response_pl_le, sizeof(response_pl_le));
if (rc < 0)
@@ -418,7 +571,7 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u1
response_pl = le32_to_cpu(response_pl_le);
*vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
- *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
+ *feature = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_TYPE,
response_pl);
*index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX,
response_pl);
@@ -426,12 +579,12 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u1
return 0;
}
-static void *pci_doe_xa_prot_entry(u16 vid, u8 prot)
+static void *pci_doe_xa_feat_entry(u16 vid, u8 type)
{
- return xa_mk_value((vid << 8) | prot);
+ return xa_mk_value((vid << 8) | type);
}
-static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
+static int pci_doe_cache_features(struct pci_doe_mb *doe_mb)
{
u8 index = 0;
u8 xa_idx = 0;
@@ -442,19 +595,19 @@ static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
do {
int rc;
u16 vid;
- u8 prot;
+ u8 type;
rc = pci_doe_discovery(doe_mb, PCI_EXT_CAP_VER(hdr), &index,
- &vid, &prot);
+ &vid, &type);
if (rc)
return rc;
pci_dbg(doe_mb->pdev,
- "[%x] Found protocol %d vid: %x prot: %x\n",
- doe_mb->cap_offset, xa_idx, vid, prot);
+ "[%x] Found feature %d vid: %x type: %x\n",
+ doe_mb->cap_offset, xa_idx, vid, type);
- rc = xa_insert(&doe_mb->prots, xa_idx++,
- pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL);
+ rc = xa_insert(&doe_mb->feats, xa_idx++,
+ pci_doe_xa_feat_entry(vid, type), GFP_KERNEL);
if (rc)
return rc;
} while (index);
@@ -478,7 +631,7 @@ static void pci_doe_cancel_tasks(struct pci_doe_mb *doe_mb)
* @pdev: PCI device to create the DOE mailbox for
* @cap_offset: Offset of the DOE mailbox
*
- * Create a single mailbox object to manage the mailbox protocol at the
+ * Create a single mailbox object to manage the mailbox feature at the
* cap_offset specified.
*
* RETURNS: created mailbox object on success
@@ -497,7 +650,7 @@ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
doe_mb->pdev = pdev;
doe_mb->cap_offset = cap_offset;
init_waitqueue_head(&doe_mb->wq);
- xa_init(&doe_mb->prots);
+ xa_init(&doe_mb->feats);
doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0,
dev_bus_name(&pdev->dev),
@@ -520,11 +673,11 @@ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
/*
* The state machine and the mailbox should be in sync now;
- * Use the mailbox to query protocols.
+ * Use the mailbox to query features.
*/
- rc = pci_doe_cache_protocols(doe_mb);
+ rc = pci_doe_cache_features(doe_mb);
if (rc) {
- pci_err(pdev, "[%x] failed to cache protocols : %d\n",
+ pci_err(pdev, "[%x] failed to cache features : %d\n",
doe_mb->cap_offset, rc);
goto err_cancel;
}
@@ -533,7 +686,7 @@ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
err_cancel:
pci_doe_cancel_tasks(doe_mb);
- xa_destroy(&doe_mb->prots);
+ xa_destroy(&doe_mb->feats);
err_destroy_wq:
destroy_workqueue(doe_mb->work_queue);
err_free:
@@ -551,31 +704,31 @@ err_free:
static void pci_doe_destroy_mb(struct pci_doe_mb *doe_mb)
{
pci_doe_cancel_tasks(doe_mb);
- xa_destroy(&doe_mb->prots);
+ xa_destroy(&doe_mb->feats);
destroy_workqueue(doe_mb->work_queue);
kfree(doe_mb);
}
/**
- * pci_doe_supports_prot() - Return if the DOE instance supports the given
- * protocol
+ * pci_doe_supports_feat() - Return if the DOE instance supports the given
+ * feature
* @doe_mb: DOE mailbox capability to query
- * @vid: Protocol Vendor ID
- * @type: Protocol type
+ * @vid: Feature Vendor ID
+ * @type: Feature type
*
- * RETURNS: True if the DOE mailbox supports the protocol specified
+ * RETURNS: True if the DOE mailbox supports the feature specified
*/
-static bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
+static bool pci_doe_supports_feat(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
{
unsigned long index;
void *entry;
- /* The discovery protocol must always be supported */
- if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY)
+ /* The discovery feature must always be supported */
+ if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_FEATURE_DISCOVERY)
return true;
- xa_for_each(&doe_mb->prots, index, entry)
- if (entry == pci_doe_xa_prot_entry(vid, type))
+ xa_for_each(&doe_mb->feats, index, entry)
+ if (entry == pci_doe_xa_feat_entry(vid, type))
return true;
return false;
@@ -603,7 +756,7 @@ static bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
static int pci_doe_submit_task(struct pci_doe_mb *doe_mb,
struct pci_doe_task *task)
{
- if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type))
+ if (!pci_doe_supports_feat(doe_mb, task->feat.vid, task->feat.type))
return -EINVAL;
if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
@@ -649,8 +802,8 @@ int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type,
{
DECLARE_COMPLETION_ONSTACK(c);
struct pci_doe_task task = {
- .prot.vid = vendor,
- .prot.type = type,
+ .feat.vid = vendor,
+ .feat.type = type,
.request_pl = request,
.request_pl_sz = request_sz,
.response_pl = response,
@@ -677,7 +830,7 @@ EXPORT_SYMBOL_GPL(pci_doe);
* @vendor: Vendor ID
* @type: Data Object Type
*
- * Find first DOE mailbox of a PCI device which supports the given protocol.
+ * Find first DOE mailbox of a PCI device which supports the given feature.
*
* RETURNS: Pointer to the DOE mailbox or NULL if none was found.
*/
@@ -688,7 +841,7 @@ struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor,
unsigned long index;
xa_for_each(&pdev->doe_mbs, index, doe_mb)
- if (pci_doe_supports_prot(doe_mb, vendor, type))
+ if (pci_doe_supports_feat(doe_mb, vendor, type))
return doe_mb;
return NULL;
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
index 17bbdc9bbde0..1c5d82eb57d4 100644
--- a/drivers/pci/endpoint/Kconfig
+++ b/drivers/pci/endpoint/Kconfig
@@ -26,7 +26,7 @@ config PCI_ENDPOINT_CONFIGFS
help
This will enable the configfs entry that can be used to
configure the endpoint function and used to bind the
- function with a endpoint controller.
+ function with an endpoint controller.
source "drivers/pci/endpoint/functions/Kconfig"
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 54286a40bdfb..6643a88c7a0c 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -125,7 +125,7 @@ static const struct pci_epf_mhi_ep_info sm8450_info = {
static struct pci_epf_header sa8775p_header = {
.vendorid = PCI_VENDOR_ID_QCOM,
- .deviceid = 0x0306, /* FIXME: Update deviceid for sa8775p EP */
+ .deviceid = 0x0116,
.baseclass_code = PCI_CLASS_OTHERS,
.interrupt_pin = PCI_INTERRUPT_INTA,
};
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index b94e205ae10b..50eb4106369f 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -45,6 +45,9 @@
#define TIMER_RESOLUTION 1
#define CAP_UNALIGNED_ACCESS BIT(0)
+#define CAP_MSI BIT(1)
+#define CAP_MSIX BIT(2)
+#define CAP_INTX BIT(3)
static struct workqueue_struct *kpcitest_workqueue;
@@ -66,17 +69,17 @@ struct pci_epf_test {
};
struct pci_epf_test_reg {
- u32 magic;
- u32 command;
- u32 status;
- u64 src_addr;
- u64 dst_addr;
- u32 size;
- u32 checksum;
- u32 irq_type;
- u32 irq_number;
- u32 flags;
- u32 caps;
+ __le32 magic;
+ __le32 command;
+ __le32 status;
+ __le64 src_addr;
+ __le64 dst_addr;
+ __le32 size;
+ __le32 checksum;
+ __le32 irq_type;
+ __le32 irq_number;
+ __le32 flags;
+ __le32 caps;
} __packed;
static struct pci_epf_header test_header = {
@@ -324,13 +327,17 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test,
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
struct pci_epc_map src_map, dst_map;
- u64 src_addr = reg->src_addr;
- u64 dst_addr = reg->dst_addr;
- size_t copy_size = reg->size;
+ u64 src_addr = le64_to_cpu(reg->src_addr);
+ u64 dst_addr = le64_to_cpu(reg->dst_addr);
+ size_t orig_size, copy_size;
ssize_t map_size = 0;
+ u32 flags = le32_to_cpu(reg->flags);
+ u32 status = 0;
void *copy_buf = NULL, *buf;
- if (reg->flags & FLAG_USE_DMA) {
+ orig_size = copy_size = le32_to_cpu(reg->size);
+
+ if (flags & FLAG_USE_DMA) {
if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
dev_err(dev, "DMA controller doesn't support MEMCPY\n");
ret = -EINVAL;
@@ -350,7 +357,7 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test,
src_addr, copy_size, &src_map);
if (ret) {
dev_err(dev, "Failed to map source address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
+ status = STATUS_SRC_ADDR_INVALID;
goto free_buf;
}
@@ -358,7 +365,7 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test,
dst_addr, copy_size, &dst_map);
if (ret) {
dev_err(dev, "Failed to map destination address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
+ status = STATUS_DST_ADDR_INVALID;
pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
&src_map);
goto free_buf;
@@ -367,7 +374,7 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test,
map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
ktime_get_ts64(&start);
- if (reg->flags & FLAG_USE_DMA) {
+ if (flags & FLAG_USE_DMA) {
ret = pci_epf_test_data_transfer(epf_test,
dst_map.phys_addr, src_map.phys_addr,
map_size, 0, DMA_MEM_TO_MEM);
@@ -391,8 +398,8 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test,
map_size = 0;
}
- pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start,
- &end, reg->flags & FLAG_USE_DMA);
+ pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end,
+ flags & FLAG_USE_DMA);
unmap:
if (map_size) {
@@ -405,9 +412,10 @@ free_buf:
set_status:
if (!ret)
- reg->status |= STATUS_COPY_SUCCESS;
+ status |= STATUS_COPY_SUCCESS;
else
- reg->status |= STATUS_COPY_FAIL;
+ status |= STATUS_COPY_FAIL;
+ reg->status = cpu_to_le32(status);
}
static void pci_epf_test_read(struct pci_epf_test *epf_test,
@@ -423,9 +431,14 @@ static void pci_epf_test_read(struct pci_epf_test *epf_test,
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
struct device *dma_dev = epf->epc->dev.parent;
- u64 src_addr = reg->src_addr;
- size_t src_size = reg->size;
+ u64 src_addr = le64_to_cpu(reg->src_addr);
+ size_t orig_size, src_size;
ssize_t map_size = 0;
+ u32 flags = le32_to_cpu(reg->flags);
+ u32 checksum = le32_to_cpu(reg->checksum);
+ u32 status = 0;
+
+ orig_size = src_size = le32_to_cpu(reg->size);
src_buf = kzalloc(src_size, GFP_KERNEL);
if (!src_buf) {
@@ -439,12 +452,12 @@ static void pci_epf_test_read(struct pci_epf_test *epf_test,
src_addr, src_size, &map);
if (ret) {
dev_err(dev, "Failed to map address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
+ status = STATUS_SRC_ADDR_INVALID;
goto free_buf;
}
map_size = map.pci_size;
- if (reg->flags & FLAG_USE_DMA) {
+ if (flags & FLAG_USE_DMA) {
dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
DMA_FROM_DEVICE);
if (dma_mapping_error(dma_dev, dst_phys_addr)) {
@@ -481,11 +494,11 @@ static void pci_epf_test_read(struct pci_epf_test *epf_test,
map_size = 0;
}
- pci_epf_test_print_rate(epf_test, "READ", reg->size, &start,
- &end, reg->flags & FLAG_USE_DMA);
+ pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end,
+ flags & FLAG_USE_DMA);
- crc32 = crc32_le(~0, src_buf, reg->size);
- if (crc32 != reg->checksum)
+ crc32 = crc32_le(~0, src_buf, orig_size);
+ if (crc32 != checksum)
ret = -EIO;
unmap:
@@ -497,9 +510,10 @@ free_buf:
set_status:
if (!ret)
- reg->status |= STATUS_READ_SUCCESS;
+ status |= STATUS_READ_SUCCESS;
else
- reg->status |= STATUS_READ_FAIL;
+ status |= STATUS_READ_FAIL;
+ reg->status = cpu_to_le32(status);
}
static void pci_epf_test_write(struct pci_epf_test *epf_test,
@@ -514,9 +528,13 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test,
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
struct device *dma_dev = epf->epc->dev.parent;
- u64 dst_addr = reg->dst_addr;
- size_t dst_size = reg->size;
+ u64 dst_addr = le64_to_cpu(reg->dst_addr);
+ size_t orig_size, dst_size;
ssize_t map_size = 0;
+ u32 flags = le32_to_cpu(reg->flags);
+ u32 status = 0;
+
+ orig_size = dst_size = le32_to_cpu(reg->size);
dst_buf = kzalloc(dst_size, GFP_KERNEL);
if (!dst_buf) {
@@ -524,7 +542,7 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test,
goto set_status;
}
get_random_bytes(dst_buf, dst_size);
- reg->checksum = crc32_le(~0, dst_buf, dst_size);
+ reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size));
buf = dst_buf;
while (dst_size) {
@@ -532,12 +550,12 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test,
dst_addr, dst_size, &map);
if (ret) {
dev_err(dev, "Failed to map address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
+ status = STATUS_DST_ADDR_INVALID;
goto free_buf;
}
map_size = map.pci_size;
- if (reg->flags & FLAG_USE_DMA) {
+ if (flags & FLAG_USE_DMA) {
src_phys_addr = dma_map_single(dma_dev, buf, map_size,
DMA_TO_DEVICE);
if (dma_mapping_error(dma_dev, src_phys_addr)) {
@@ -576,8 +594,8 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test,
map_size = 0;
}
- pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start,
- &end, reg->flags & FLAG_USE_DMA);
+ pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end,
+ flags & FLAG_USE_DMA);
/*
* wait 1ms inorder for the write to complete. Without this delay L3
@@ -594,9 +612,10 @@ free_buf:
set_status:
if (!ret)
- reg->status |= STATUS_WRITE_SUCCESS;
+ status |= STATUS_WRITE_SUCCESS;
else
- reg->status |= STATUS_WRITE_FAIL;
+ status |= STATUS_WRITE_FAIL;
+ reg->status = cpu_to_le32(status);
}
static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
@@ -605,39 +624,42 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
- u32 status = reg->status | STATUS_IRQ_RAISED;
+ u32 status = le32_to_cpu(reg->status);
+ u32 irq_number = le32_to_cpu(reg->irq_number);
+ u32 irq_type = le32_to_cpu(reg->irq_type);
int count;
/*
* Set the status before raising the IRQ to ensure that the host sees
* the updated value when it gets the IRQ.
*/
- WRITE_ONCE(reg->status, status);
+ status |= STATUS_IRQ_RAISED;
+ WRITE_ONCE(reg->status, cpu_to_le32(status));
- switch (reg->irq_type) {
+ switch (irq_type) {
case IRQ_TYPE_INTX:
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
PCI_IRQ_INTX, 0);
break;
case IRQ_TYPE_MSI:
count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
- if (reg->irq_number > count || count <= 0) {
+ if (irq_number > count || count <= 0) {
dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
- reg->irq_number, count);
+ irq_number, count);
return;
}
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
- PCI_IRQ_MSI, reg->irq_number);
+ PCI_IRQ_MSI, irq_number);
break;
case IRQ_TYPE_MSIX:
count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
- if (reg->irq_number > count || count <= 0) {
- dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
- reg->irq_number, count);
+ if (irq_number > count || count <= 0) {
+ dev_err(dev, "Invalid MSI-X IRQ number %d / %d\n",
+ irq_number, count);
return;
}
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
- PCI_IRQ_MSIX, reg->irq_number);
+ PCI_IRQ_MSIX, irq_number);
break;
default:
dev_err(dev, "Failed to raise IRQ, unknown type\n");
@@ -654,21 +676,22 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
struct device *dev = &epf->dev;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+ u32 irq_type = le32_to_cpu(reg->irq_type);
- command = READ_ONCE(reg->command);
+ command = le32_to_cpu(READ_ONCE(reg->command));
if (!command)
goto reset_handler;
WRITE_ONCE(reg->command, 0);
WRITE_ONCE(reg->status, 0);
- if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) &&
+ if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) &&
!epf_test->dma_supported) {
dev_err(dev, "Cannot transfer data using DMA\n");
goto reset_handler;
}
- if (reg->irq_type > IRQ_TYPE_MSIX) {
+ if (irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Failed to detect IRQ type\n");
goto reset_handler;
}
@@ -718,6 +741,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
+ epf_test->reg[bar] = NULL;
dev_err(dev, "Failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
@@ -753,6 +777,15 @@ static void pci_epf_test_set_capabilities(struct pci_epf *epf)
if (epc->ops->align_addr)
caps |= CAP_UNALIGNED_ACCESS;
+ if (epf_test->epc_features->msi_capable)
+ caps |= CAP_MSI;
+
+ if (epf_test->epc_features->msix_capable)
+ caps |= CAP_MSIX;
+
+ if (epf_test->epc_features->intx_capable)
+ caps |= CAP_INTX;
+
reg->caps = cpu_to_le32(caps);
}
@@ -909,6 +942,7 @@ static void pci_epf_test_free_space(struct pci_epf *epf)
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
+ epf_test->reg[bar] = NULL;
}
}
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 9e9ca5f8e8f8..beabea00af91 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -25,13 +25,6 @@ static void devm_pci_epc_release(struct device *dev, void *res)
pci_epc_destroy(epc);
}
-static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
-{
- struct pci_epc **epc = res;
-
- return *epc == match_data;
-}
-
/**
* pci_epc_put() - release the PCI endpoint controller
* @epc: epc returned by pci_epc_get()
@@ -609,6 +602,10 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
if (!epc_features)
return -EINVAL;
+ if (epc_features->bar[bar].type == BAR_RESIZABLE &&
+ (epf_bar->size < SZ_1M || (u64)epf_bar->size > (SZ_128G * 1024)))
+ return -EINVAL;
+
if (epc_features->bar[bar].type == BAR_FIXED &&
(epc_features->bar[bar].fixed_size != epf_bar->size))
return -EINVAL;
@@ -635,6 +632,33 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
EXPORT_SYMBOL_GPL(pci_epc_set_bar);
/**
+ * pci_epc_bar_size_to_rebar_cap() - convert a size to the representation used
+ * by the Resizable BAR Capability Register
+ * @size: the size to convert
+ * @cap: where to store the result
+ *
+ * Returns 0 on success and a negative error code in case of error.
+ */
+int pci_epc_bar_size_to_rebar_cap(size_t size, u32 *cap)
+{
+ /*
+ * As per PCIe r6.0, sec 7.8.6.2, min size for a resizable BAR is 1 MB,
+ * thus disallow a requested BAR size smaller than 1 MB.
+ * Disallow a requested BAR size larger than 128 TB.
+ */
+ if (size < SZ_1M || (u64)size > (SZ_128G * 1024))
+ return -EINVAL;
+
+ *cap = ilog2(size) - ilog2(SZ_1M);
+
+ /* Sizes in REBAR_CAP start at BIT(4). */
+ *cap = BIT(*cap + 4);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epc_bar_size_to_rebar_cap);
+
+/**
* pci_epc_write_header() - write standard configuration header
* @epc: the EPC device to which the configuration header should be written
* @func_no: the physical endpoint function number in the EPC device
@@ -931,24 +955,6 @@ void pci_epc_destroy(struct pci_epc *epc)
}
EXPORT_SYMBOL_GPL(pci_epc_destroy);
-/**
- * devm_pci_epc_destroy() - destroy the EPC device
- * @dev: device that wants to destroy the EPC
- * @epc: the EPC device that has to be destroyed
- *
- * Invoke to destroy the devres associated with this
- * pci_epc and destroy the EPC device.
- */
-void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
-{
- int r;
-
- r = devres_release(dev, devm_pci_epc_release, devm_pci_epc_match,
- epc);
- dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
-}
-EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
-
static void pci_epc_release(struct device *dev)
{
kfree(to_pci_epc(dev));
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 50bc2892a36c..394395c7f8de 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -274,6 +274,10 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
if (size < 128)
size = 128;
+ /* According to PCIe base spec, min size for a resizable BAR is 1 MB. */
+ if (epc_features->bar[bar].type == BAR_RESIZABLE && size < SZ_1M)
+ size = SZ_1M;
+
if (epc_features->bar[bar].type == BAR_FIXED && bar_fixed_size) {
if (size > bar_fixed_size) {
dev_err(&epf->dev,
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 123c4c7c2ab5..3207860b52e4 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -97,7 +97,7 @@ config HOTPLUG_PCI_CPCI_ZT5550
tristate "Ziatech ZT5550 CompactPCI Hotplug driver"
depends on HOTPLUG_PCI_CPCI && X86
help
- Say Y here if you have an Performance Technologies (formerly Intel,
+ Say Y here if you have a Performance Technologies (formerly Intel,
formerly just Ziatech) Ziatech ZT5550 CompactPCI system card.
To compile this driver as a module, choose M here: the
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index 03fa39ab0c88..a31d6b62f523 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -44,8 +44,6 @@ struct cpci_hp_controller_ops {
int (*enable_irq)(void);
int (*disable_irq)(void);
int (*check_irq)(void *dev_id);
- u8 (*get_power)(struct slot *slot);
- int (*set_power)(struct slot *slot, int value);
};
struct cpci_hp_controller {
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index d0559d2faf50..dd93e53ea7c2 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -71,13 +71,10 @@ static int
enable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = to_slot(hotplug_slot);
- int retval = 0;
dbg("%s - physical_slot = %s", __func__, slot_name(slot));
- if (controller->ops->set_power)
- retval = controller->ops->set_power(slot, 1);
- return retval;
+ return 0;
}
static int
@@ -109,12 +106,6 @@ disable_slot(struct hotplug_slot *hotplug_slot)
}
cpci_led_on(slot);
- if (controller->ops->set_power) {
- retval = controller->ops->set_power(slot, 0);
- if (retval)
- goto disable_error;
- }
-
slot->adapter_status = 0;
if (slot->extracting) {
@@ -129,11 +120,7 @@ disable_error:
static u8
cpci_get_power_status(struct slot *slot)
{
- u8 power = 1;
-
- if (controller->ops->get_power)
- power = controller->ops->get_power(slot);
- return power;
+ return 1;
}
static int
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 36236ac88fd5..d30f1316c98e 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -14,18 +14,16 @@
* Scott Murray <scottm@somanetworks.com>
*/
-#include <linux/module.h> /* try_module_get & module_put */
+#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/mount.h>
#include <linux/namei.h>
-#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/uaccess.h>
@@ -42,20 +40,14 @@
/* local variables */
static bool debug;
-static LIST_HEAD(pci_hotplug_slot_list);
-static DEFINE_MUTEX(pci_hp_mutex);
-
/* Weee, fun with macros... */
#define GET_STATUS(name, type) \
static int get_##name(struct hotplug_slot *slot, type *value) \
{ \
const struct hotplug_slot_ops *ops = slot->ops; \
int retval = 0; \
- if (!try_module_get(slot->owner)) \
- return -ENODEV; \
if (ops->get_##name) \
retval = ops->get_##name(slot, value); \
- module_put(slot->owner); \
return retval; \
}
@@ -88,10 +80,6 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
power = (u8)(lpower & 0xff);
dbg("power = %d\n", power);
- if (!try_module_get(slot->owner)) {
- retval = -ENODEV;
- goto exit;
- }
switch (power) {
case 0:
if (slot->ops->disable_slot)
@@ -107,9 +95,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
err("Illegal value specified for power\n");
retval = -EINVAL;
}
- module_put(slot->owner);
-exit:
if (retval)
return retval;
return count;
@@ -146,15 +132,9 @@ static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
attention = (u8)(lattention & 0xff);
dbg(" - attention = %d\n", attention);
- if (!try_module_get(slot->owner)) {
- retval = -ENODEV;
- goto exit;
- }
if (ops->set_attention_status)
retval = ops->set_attention_status(slot, attention);
- module_put(slot->owner);
-exit:
if (retval)
return retval;
return count;
@@ -212,15 +192,9 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
test = (u32)(ltest & 0xffffffff);
dbg("test = %d\n", test);
- if (!try_module_get(slot->owner)) {
- retval = -ENODEV;
- goto exit;
- }
if (slot->ops->hardware_test)
retval = slot->ops->hardware_test(slot, test);
- module_put(slot->owner);
-exit:
if (retval)
return retval;
return count;
@@ -231,12 +205,8 @@ static struct pci_slot_attribute hotplug_slot_attr_test = {
.store = test_write_file
};
-static bool has_power_file(struct pci_slot *pci_slot)
+static bool has_power_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if ((slot->ops->enable_slot) ||
(slot->ops->disable_slot) ||
(slot->ops->get_power_status))
@@ -244,87 +214,79 @@ static bool has_power_file(struct pci_slot *pci_slot)
return false;
}
-static bool has_attention_file(struct pci_slot *pci_slot)
+static bool has_attention_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if ((slot->ops->set_attention_status) ||
(slot->ops->get_attention_status))
return true;
return false;
}
-static bool has_latch_file(struct pci_slot *pci_slot)
+static bool has_latch_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if (slot->ops->get_latch_status)
return true;
return false;
}
-static bool has_adapter_file(struct pci_slot *pci_slot)
+static bool has_adapter_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if (slot->ops->get_adapter_status)
return true;
return false;
}
-static bool has_test_file(struct pci_slot *pci_slot)
+static bool has_test_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if (slot->ops->hardware_test)
return true;
return false;
}
-static int fs_add_slot(struct pci_slot *pci_slot)
+static int fs_add_slot(struct hotplug_slot *slot, struct pci_slot *pci_slot)
{
+ struct kobject *kobj;
int retval = 0;
/* Create symbolic link to the hotplug driver module */
- pci_hp_create_module_link(pci_slot);
+ kobj = kset_find_obj(module_kset, slot->mod_name);
+ if (kobj) {
+ retval = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ if (retval)
+ dev_err(&pci_slot->bus->dev,
+ "Error creating sysfs link (%d)\n", retval);
+ kobject_put(kobj);
+ }
- if (has_power_file(pci_slot)) {
+ if (has_power_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_power.attr);
if (retval)
goto exit_power;
}
- if (has_attention_file(pci_slot)) {
+ if (has_attention_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
if (retval)
goto exit_attention;
}
- if (has_latch_file(pci_slot)) {
+ if (has_latch_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_latch.attr);
if (retval)
goto exit_latch;
}
- if (has_adapter_file(pci_slot)) {
+ if (has_adapter_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
if (retval)
goto exit_adapter;
}
- if (has_test_file(pci_slot)) {
+ if (has_test_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_test.attr);
if (retval)
@@ -334,56 +296,45 @@ static int fs_add_slot(struct pci_slot *pci_slot)
goto exit;
exit_test:
- if (has_adapter_file(pci_slot))
+ if (has_adapter_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
exit_adapter:
- if (has_latch_file(pci_slot))
+ if (has_latch_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
exit_latch:
- if (has_attention_file(pci_slot))
+ if (has_attention_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
exit_attention:
- if (has_power_file(pci_slot))
+ if (has_power_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
exit_power:
- pci_hp_remove_module_link(pci_slot);
+ sysfs_remove_link(&pci_slot->kobj, "module");
exit:
return retval;
}
-static void fs_remove_slot(struct pci_slot *pci_slot)
+static void fs_remove_slot(struct hotplug_slot *slot, struct pci_slot *pci_slot)
{
- if (has_power_file(pci_slot))
+ if (has_power_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
- if (has_attention_file(pci_slot))
+ if (has_attention_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
- if (has_latch_file(pci_slot))
+ if (has_latch_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
- if (has_adapter_file(pci_slot))
+ if (has_adapter_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
- if (has_test_file(pci_slot))
+ if (has_test_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_test.attr);
- pci_hp_remove_module_link(pci_slot);
-}
-
-static struct hotplug_slot *get_slot_from_name(const char *name)
-{
- struct hotplug_slot *slot;
-
- list_for_each_entry(slot, &pci_hotplug_slot_list, slot_list) {
- if (strcmp(hotplug_slot_name(slot), name) == 0)
- return slot;
- }
- return NULL;
+ sysfs_remove_link(&pci_slot->kobj, "module");
}
/**
@@ -476,18 +427,19 @@ EXPORT_SYMBOL_GPL(__pci_hp_initialize);
*/
int pci_hp_add(struct hotplug_slot *slot)
{
- struct pci_slot *pci_slot = slot->pci_slot;
+ struct pci_slot *pci_slot;
int result;
- result = fs_add_slot(pci_slot);
+ if (WARN_ON(!slot))
+ return -EINVAL;
+
+ pci_slot = slot->pci_slot;
+
+ result = fs_add_slot(slot, pci_slot);
if (result)
return result;
kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
- mutex_lock(&pci_hp_mutex);
- list_add(&slot->slot_list, &pci_hotplug_slot_list);
- mutex_unlock(&pci_hp_mutex);
- dbg("Added slot %s to the list\n", hotplug_slot_name(slot));
return 0;
}
EXPORT_SYMBOL_GPL(pci_hp_add);
@@ -514,22 +466,10 @@ EXPORT_SYMBOL_GPL(pci_hp_deregister);
*/
void pci_hp_del(struct hotplug_slot *slot)
{
- struct hotplug_slot *temp;
-
if (WARN_ON(!slot))
return;
- mutex_lock(&pci_hp_mutex);
- temp = get_slot_from_name(hotplug_slot_name(slot));
- if (WARN_ON(temp != slot)) {
- mutex_unlock(&pci_hp_mutex);
- return;
- }
-
- list_del(&slot->slot_list);
- mutex_unlock(&pci_hp_mutex);
- dbg("Removed slot %s from the list\n", hotplug_slot_name(slot));
- fs_remove_slot(slot->pci_slot);
+ fs_remove_slot(slot, slot->pci_slot);
}
EXPORT_SYMBOL_GPL(pci_hp_del);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index ff458e692fed..997841c69893 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -286,9 +286,12 @@ static int pciehp_suspend(struct pcie_device *dev)
static bool pciehp_device_replaced(struct controller *ctrl)
{
- struct pci_dev *pdev __free(pci_dev_put);
+ struct pci_dev *pdev __free(pci_dev_put) = NULL;
u32 reg;
+ if (pci_dev_is_disconnected(ctrl->pcie->port))
+ return false;
+
pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0));
if (!pdev)
return true;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index bb5a8d9f03ad..8a09fb6083e2 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -292,7 +292,7 @@ int pciehp_check_link_status(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
bool found;
- u16 lnk_status;
+ u16 lnk_status, linksta2;
if (!pcie_wait_for_link(pdev, true)) {
ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl));
@@ -319,7 +319,8 @@ int pciehp_check_link_status(struct controller *ctrl)
return -1;
}
- __pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA2, &linksta2);
+ __pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status, linksta2);
if (!found) {
ctrl_info(ctrl, "Slot(%s): No device found\n",
@@ -430,7 +431,7 @@ void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
* removed immediately after the check so the caller may need to take
* this into account.
*
- * It the hotplug controller itself is not available anymore returns
+ * If the hotplug controller itself is not available anymore returns
* %-ENODEV.
*/
int pciehp_card_present(struct controller *ctrl)
@@ -842,7 +843,9 @@ void pcie_enable_interrupt(struct controller *ctrl)
{
u16 mask;
- mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
+ mask = PCI_EXP_SLTCTL_DLLSCE;
+ if (!pciehp_poll_mode)
+ mask |= PCI_EXP_SLTCTL_HPIE;
pcie_write_cmd(ctrl, mask, mask);
}
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index f0e2d2d54d71..a425530e0939 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -33,24 +33,8 @@ extern bool shpchp_poll_mode;
extern int shpchp_poll_time;
extern bool shpchp_debug;
-#define dbg(format, arg...) \
-do { \
- if (shpchp_debug) \
- printk(KERN_DEBUG "%s: " format, MY_NAME, ## arg); \
-} while (0)
-#define err(format, arg...) \
- printk(KERN_ERR "%s: " format, MY_NAME, ## arg)
-#define info(format, arg...) \
- printk(KERN_INFO "%s: " format, MY_NAME, ## arg)
-#define warn(format, arg...) \
- printk(KERN_WARNING "%s: " format, MY_NAME, ## arg)
-
#define ctrl_dbg(ctrl, format, arg...) \
- do { \
- if (shpchp_debug) \
- pci_printk(KERN_DEBUG, ctrl->pci_dev, \
- format, ## arg); \
- } while (0)
+ pci_dbg(ctrl->pci_dev, format, ## arg)
#define ctrl_err(ctrl, format, arg...) \
pci_err(ctrl->pci_dev, format, ## arg)
#define ctrl_info(ctrl, format, arg...) \
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index a92e28b72908..0c341453afc6 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -22,7 +22,6 @@
#include "shpchp.h"
/* Global variables */
-bool shpchp_debug;
bool shpchp_poll_mode;
int shpchp_poll_time;
@@ -33,10 +32,8 @@ int shpchp_poll_time;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-module_param(shpchp_debug, bool, 0644);
module_param(shpchp_poll_mode, bool, 0644);
module_param(shpchp_poll_time, int, 0644);
-MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds");
@@ -324,20 +321,12 @@ static struct pci_driver shpc_driver = {
static int __init shpcd_init(void)
{
- int retval;
-
- retval = pci_register_driver(&shpc_driver);
- dbg("%s: pci_register_driver = %d\n", __func__, retval);
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-
- return retval;
+ return pci_register_driver(&shpc_driver);
}
static void __exit shpcd_cleanup(void)
{
- dbg("unload_shpchpd()\n");
pci_unregister_driver(&shpc_driver);
- info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
}
module_init(shpcd_init);
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 012b9e3fe5b0..bfbec7c1a6b1 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -675,7 +675,7 @@ static int shpc_get_cur_bus_speed(struct controller *ctrl)
out:
bus->cur_bus_speed = bus_speed;
- dbg("Current bus speed = %d\n", bus_speed);
+ ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed);
return retval;
}
diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c
index 9fb7cacc15cd..fe706ed946df 100644
--- a/drivers/pci/iomap.c
+++ b/drivers/pci/iomap.c
@@ -9,6 +9,8 @@
#include <linux/export.h>
+#include "pci.h" /* for pci_bar_index_is_valid() */
+
/**
* pci_iomap_range - create a virtual mapping cookie for a PCI BAR
* @dev: PCI device that owns the BAR
@@ -33,12 +35,19 @@ void __iomem *pci_iomap_range(struct pci_dev *dev,
unsigned long offset,
unsigned long maxlen)
{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
+ resource_size_t start, len;
+ unsigned long flags;
+
+ if (!pci_bar_index_is_valid(bar))
+ return NULL;
+
+ start = pci_resource_start(dev, bar);
+ len = pci_resource_len(dev, bar);
+ flags = pci_resource_flags(dev, bar);
if (len <= offset || !start)
return NULL;
+
len -= offset;
start += offset;
if (maxlen && len > maxlen)
@@ -77,16 +86,20 @@ void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
unsigned long offset,
unsigned long maxlen)
{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
+ resource_size_t start, len;
+ unsigned long flags;
-
- if (flags & IORESOURCE_IO)
+ if (!pci_bar_index_is_valid(bar))
return NULL;
+ start = pci_resource_start(dev, bar);
+ len = pci_resource_len(dev, bar);
+ flags = pci_resource_flags(dev, bar);
+
if (len <= offset || !start)
return NULL;
+ if (flags & IORESOURCE_IO)
+ return NULL;
len -= offset;
start += offset;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 9e4770cdd4d5..10693b5d7eb6 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -285,23 +285,16 @@ const struct attribute_group sriov_vf_dev_attr_group = {
.is_visible = sriov_vf_attrs_are_visible,
};
-int pci_iov_add_virtfn(struct pci_dev *dev, int id)
+static struct pci_dev *pci_iov_scan_device(struct pci_dev *dev, int id,
+ struct pci_bus *bus)
{
- int i;
- int rc = -ENOMEM;
- u64 size;
- struct pci_dev *virtfn;
- struct resource *res;
struct pci_sriov *iov = dev->sriov;
- struct pci_bus *bus;
-
- bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
- if (!bus)
- goto failed;
+ struct pci_dev *virtfn;
+ int rc;
virtfn = pci_alloc_dev(bus);
if (!virtfn)
- goto failed0;
+ return ERR_PTR(-ENOMEM);
virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
virtfn->vendor = dev->vendor;
@@ -314,8 +307,35 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
pci_read_vf_config_common(virtfn);
rc = pci_setup_device(virtfn);
- if (rc)
- goto failed1;
+ if (rc) {
+ pci_dev_put(dev);
+ pci_bus_put(virtfn->bus);
+ kfree(virtfn);
+ return ERR_PTR(rc);
+ }
+
+ return virtfn;
+}
+
+int pci_iov_add_virtfn(struct pci_dev *dev, int id)
+{
+ struct pci_bus *bus;
+ struct pci_dev *virtfn;
+ struct resource *res;
+ int rc, i;
+ u64 size;
+
+ bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
+ if (!bus) {
+ rc = -ENOMEM;
+ goto failed;
+ }
+
+ virtfn = pci_iov_scan_device(dev, id, bus);
+ if (IS_ERR(virtfn)) {
+ rc = PTR_ERR(virtfn);
+ goto failed0;
+ }
virtfn->dev.parent = dev->dev.parent;
virtfn->multifunction = 0;
@@ -952,7 +972,7 @@ void pci_iov_remove(struct pci_dev *dev)
void pci_iov_update_resource(struct pci_dev *dev, int resno)
{
struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
int vf_bar = resno - PCI_IOV_RESOURCES;
struct pci_bus_region region;
u16 cmd;
diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c
index b956ce591f96..17ec6332cb1d 100644
--- a/drivers/pci/msi/api.c
+++ b/drivers/pci/msi/api.c
@@ -162,7 +162,7 @@ struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
EXPORT_SYMBOL_GPL(pci_msix_alloc_irq_at);
/**
- * pci_msix_free_irq - Free an interrupt on a PCI/MSIX interrupt domain
+ * pci_msix_free_irq - Free an interrupt on a PCI/MSI-X interrupt domain
*
* @dev: The PCI device to operate on
* @map: A struct msi_map describing the interrupt to free
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 2f647cac4cae..6569ba3577fe 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -10,12 +10,12 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include "../pci.h"
#include "msi.h"
int pci_msi_enable = 1;
-int pci_msi_ignore_mask;
/**
* pci_msi_supported - check whether MSI may be enabled on a device
@@ -295,8 +295,7 @@ static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
- /* Respect XEN's mask disabling */
- if (pci_msi_ignore_mask)
+ if (pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY))
control &= ~PCI_MSI_FLAGS_MASKBIT;
desc.nvec_used = nvec;
@@ -609,12 +608,13 @@ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
desc->pci.msi_attrib.is_64 = 1;
desc->pci.msi_attrib.default_irq = dev->irq;
desc->pci.mask_base = dev->msix_base;
- desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !desc->pci.msi_attrib.is_virtual;
- if (desc->pci.msi_attrib.can_mask) {
+
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY) &&
+ !desc->pci.msi_attrib.is_virtual) {
void __iomem *addr = pci_msix_desc_addr(desc);
+ desc->pci.msi_attrib.can_mask = 1;
desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
}
@@ -659,9 +659,6 @@ static void msix_mask_all(void __iomem *base, int tsize)
u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
int i;
- if (pci_msi_ignore_mask)
- return;
-
for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
@@ -744,15 +741,17 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
/* Disable INTX */
pci_intx_for_msi(dev, 0);
- /*
- * Ensure that all table entries are masked to prevent
- * stale entries from firing in a crash kernel.
- *
- * Done late to deal with a broken Marvell NVME device
- * which takes the MSI-X mask bits into account even
- * when MSI-X is disabled, which prevents MSI delivery.
- */
- msix_mask_all(dev->msix_base, tsize);
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY)) {
+ /*
+ * Ensure that all table entries are masked to prevent
+ * stale entries from firing in a crash kernel.
+ *
+ * Done late to deal with a broken Marvell NVME device
+ * which takes the MSI-X mask bits into account even
+ * when MSI-X is disabled, which prevents MSI delivery.
+ */
+ msix_mask_all(dev->msix_base, tsize);
+ }
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 7a806f5c0d20..ab7a8252bf41 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -455,9 +455,9 @@ failed:
* @out_irq: structure of_phandle_args filled by this function
*
* This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
+ * device node exists for a given pci_dev, it will use normal OF tree
* walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
+ * PCI tree until a device node is found, at which point it will finish
* resolving using the OF tree walking.
*/
static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
@@ -517,13 +517,16 @@ static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *
}
/*
- * Ok, we have found a parent with a device-node, hand over to
+ * Ok, we have found a parent with a device node, hand over to
* the OF parsing code.
+ *
* We build a unit address from the linux device to be used for
* resolution. Note that we use the linux bus number which may
* not match your firmware bus numbering.
+ *
* Fortunately, in most cases, interrupt-map-mask doesn't
* include the bus number as part of the matching.
+ *
* You should still be careful about that though if you intend
* to rely on this function (you ship a firmware that doesn't
* create device nodes for all PCI devices).
@@ -653,8 +656,8 @@ void of_pci_remove_node(struct pci_dev *pdev)
np = pci_device_to_OF_node(pdev);
if (!np || !of_node_check_flag(np, OF_DYNAMIC))
return;
- pdev->dev.of_node = NULL;
+ device_remove_of_node(&pdev->dev);
of_changeset_revert(np->data);
of_changeset_destroy(np->data);
of_node_put(np);
@@ -711,11 +714,18 @@ void of_pci_make_dev_node(struct pci_dev *pdev)
goto out_free_node;
np->data = cset;
- pdev->dev.of_node = np;
+
+ ret = device_add_of_node(&pdev->dev, np);
+ if (ret)
+ goto out_revert_cset;
+
kfree(name);
return;
+out_revert_cset:
+ np->data = NULL;
+ of_changeset_revert(cset);
out_free_node:
of_node_put(np);
out_destroy_cset:
@@ -724,7 +734,112 @@ out_destroy_cset:
out_free_name:
kfree(name);
}
-#endif
+
+void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge)
+{
+ struct device_node *np;
+
+ np = pci_bus_to_OF_node(bridge->bus);
+ if (!np || !of_node_check_flag(np, OF_DYNAMIC))
+ return;
+
+ device_remove_of_node(&bridge->bus->dev);
+ device_remove_of_node(&bridge->dev);
+ of_changeset_revert(np->data);
+ of_changeset_destroy(np->data);
+ of_node_put(np);
+}
+
+void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge)
+{
+ struct device_node *np = NULL;
+ struct of_changeset *cset;
+ const char *name;
+ int ret;
+
+ /*
+ * If there is already a device tree node linked to the PCI bus handled
+ * by this bridge (i.e. the PCI root bus), nothing to do.
+ */
+ if (pci_bus_to_OF_node(bridge->bus))
+ return;
+
+ /*
+ * The root bus has no node. Check that the host bridge has no node
+ * too
+ */
+ if (bridge->dev.of_node) {
+ dev_err(&bridge->dev, "PCI host bridge of_node already set");
+ return;
+ }
+
+ /* Check if there is a DT root node to attach the created node */
+ if (!of_root) {
+ pr_err("of_root node is NULL, cannot create PCI host bridge node\n");
+ return;
+ }
+
+ name = kasprintf(GFP_KERNEL, "pci@%x,%x", pci_domain_nr(bridge->bus),
+ bridge->bus->number);
+ if (!name)
+ return;
+
+ cset = kmalloc(sizeof(*cset), GFP_KERNEL);
+ if (!cset)
+ goto out_free_name;
+ of_changeset_init(cset);
+
+ np = of_changeset_create_node(cset, of_root, name);
+ if (!np)
+ goto out_destroy_cset;
+
+ ret = of_pci_add_host_bridge_properties(bridge, cset, np);
+ if (ret)
+ goto out_free_node;
+
+ /*
+ * This of_node will be added to an existing device. The of_node parent
+ * is the root OF node and so this node will be handled by the platform
+ * bus. Avoid any new device creation.
+ */
+ of_node_set_flag(np, OF_POPULATED);
+ np->fwnode.dev = &bridge->dev;
+ fwnode_dev_initialized(&np->fwnode, true);
+
+ ret = of_changeset_apply(cset);
+ if (ret)
+ goto out_free_node;
+
+ np->data = cset;
+
+ /* Add the of_node to host bridge and the root bus */
+ ret = device_add_of_node(&bridge->dev, np);
+ if (ret)
+ goto out_revert_cset;
+
+ ret = device_add_of_node(&bridge->bus->dev, np);
+ if (ret)
+ goto out_remove_bridge_dev_of_node;
+
+ kfree(name);
+
+ return;
+
+out_remove_bridge_dev_of_node:
+ device_remove_of_node(&bridge->dev);
+out_revert_cset:
+ np->data = NULL;
+ of_changeset_revert(cset);
+out_free_node:
+ of_node_put(np);
+out_destroy_cset:
+ of_changeset_destroy(cset);
+ kfree(cset);
+out_free_name:
+ kfree(name);
+}
+
+#endif /* CONFIG_PCI_DYNAMIC_OF_NODES */
/**
* of_pci_supply_present() - Check if the power supply is present for the PCI
diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
index 58fbafac7c6a..506fcd507113 100644
--- a/drivers/pci/of_property.c
+++ b/drivers/pci/of_property.c
@@ -54,9 +54,13 @@ enum of_pci_prop_compatible {
static void of_pci_set_address(struct pci_dev *pdev, u32 *prop, u64 addr,
u32 reg_num, u32 flags, bool reloc)
{
- prop[0] = FIELD_PREP(OF_PCI_ADDR_FIELD_BUS, pdev->bus->number) |
- FIELD_PREP(OF_PCI_ADDR_FIELD_DEV, PCI_SLOT(pdev->devfn)) |
- FIELD_PREP(OF_PCI_ADDR_FIELD_FUNC, PCI_FUNC(pdev->devfn));
+ if (pdev) {
+ prop[0] = FIELD_PREP(OF_PCI_ADDR_FIELD_BUS, pdev->bus->number) |
+ FIELD_PREP(OF_PCI_ADDR_FIELD_DEV, PCI_SLOT(pdev->devfn)) |
+ FIELD_PREP(OF_PCI_ADDR_FIELD_FUNC, PCI_FUNC(pdev->devfn));
+ } else
+ prop[0] = 0;
+
prop[0] |= flags | reg_num;
if (!reloc) {
prop[0] |= OF_PCI_ADDR_FIELD_NONRELOC;
@@ -65,7 +69,7 @@ static void of_pci_set_address(struct pci_dev *pdev, u32 *prop, u64 addr,
}
}
-static int of_pci_get_addr_flags(struct resource *res, u32 *flags)
+static int of_pci_get_addr_flags(const struct resource *res, u32 *flags)
{
u32 ss;
@@ -390,3 +394,106 @@ int of_pci_add_properties(struct pci_dev *pdev, struct of_changeset *ocs,
return 0;
}
+
+static bool of_pci_is_range_resource(const struct resource *res, u32 *flags)
+{
+ if (!(resource_type(res) & IORESOURCE_MEM) &&
+ !(resource_type(res) & IORESOURCE_MEM_64))
+ return false;
+
+ if (of_pci_get_addr_flags(res, flags))
+ return false;
+
+ return true;
+}
+
+static int of_pci_host_bridge_prop_ranges(struct pci_host_bridge *bridge,
+ struct of_changeset *ocs,
+ struct device_node *np)
+{
+ struct resource_entry *window;
+ unsigned int ranges_sz = 0;
+ unsigned int n_range = 0;
+ struct resource *res;
+ int n_addr_cells;
+ u32 *ranges;
+ u64 val64;
+ u32 flags;
+ int ret;
+
+ n_addr_cells = of_n_addr_cells(np);
+ if (n_addr_cells <= 0 || n_addr_cells > 2)
+ return -EINVAL;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ res = window->res;
+ if (!of_pci_is_range_resource(res, &flags))
+ continue;
+ n_range++;
+ }
+
+ if (!n_range)
+ return 0;
+
+ ranges = kcalloc(n_range,
+ (OF_PCI_ADDRESS_CELLS + OF_PCI_SIZE_CELLS +
+ n_addr_cells) * sizeof(*ranges),
+ GFP_KERNEL);
+ if (!ranges)
+ return -ENOMEM;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ res = window->res;
+ if (!of_pci_is_range_resource(res, &flags))
+ continue;
+
+ /* PCI bus address */
+ val64 = res->start;
+ of_pci_set_address(NULL, &ranges[ranges_sz],
+ val64 - window->offset, 0, flags, false);
+ ranges_sz += OF_PCI_ADDRESS_CELLS;
+
+ /* Host bus address */
+ if (n_addr_cells == 2)
+ ranges[ranges_sz++] = upper_32_bits(val64);
+ ranges[ranges_sz++] = lower_32_bits(val64);
+
+ /* Size */
+ val64 = resource_size(res);
+ ranges[ranges_sz] = upper_32_bits(val64);
+ ranges[ranges_sz + 1] = lower_32_bits(val64);
+ ranges_sz += OF_PCI_SIZE_CELLS;
+ }
+
+ ret = of_changeset_add_prop_u32_array(ocs, np, "ranges", ranges,
+ ranges_sz);
+ kfree(ranges);
+ return ret;
+}
+
+int of_pci_add_host_bridge_properties(struct pci_host_bridge *bridge,
+ struct of_changeset *ocs,
+ struct device_node *np)
+{
+ int ret;
+
+ ret = of_changeset_add_prop_string(ocs, np, "device_type", "pci");
+ if (ret)
+ return ret;
+
+ ret = of_changeset_add_prop_u32(ocs, np, "#address-cells",
+ OF_PCI_ADDRESS_CELLS);
+ if (ret)
+ return ret;
+
+ ret = of_changeset_add_prop_u32(ocs, np, "#size-cells",
+ OF_PCI_SIZE_CELLS);
+ if (ret)
+ return ret;
+
+ ret = of_pci_host_bridge_prop_ranges(bridge, ocs, np);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f57ea36d125d..c8bd71a739f7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -812,8 +812,7 @@ static int pci_pm_suspend(struct device *dev)
* suspend callbacks can cope with runtime-suspended devices, it is
* better to resume the device from runtime suspend here.
*/
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- pci_dev_need_resume(pci_dev)) {
+ if (!dev_pm_smart_suspend(dev) || pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
} else {
@@ -1151,8 +1150,7 @@ static int pci_pm_poweroff(struct device *dev)
}
/* The reason to do that is the same as in pci_pm_suspend(). */
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- pci_dev_need_resume(pci_dev)) {
+ if (!dev_pm_smart_suspend(dev) || pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
} else {
@@ -1653,7 +1651,8 @@ static int pci_dma_configure(struct device *dev)
pci_put_host_bridge_device(bridge);
- if (!ret && !driver->driver_managed_dma) {
+ /* @driver may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !driver->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index b46ce1a2c554..c6cda56ca52c 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1257,6 +1257,10 @@ static int pci_create_resource_files(struct pci_dev *pdev)
int i;
int retval;
+ /* Skip devices with non-mappable BARs */
+ if (pdev->non_mappable_bars)
+ return 0;
+
/* Expose the PCI resources from this device as files */
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
@@ -1556,7 +1560,7 @@ static ssize_t __resource_resize_store(struct device *dev, int n,
return -EINVAL;
device_lock(dev);
- if (dev->driver) {
+ if (dev->driver || pci_num_vf(pdev)) {
ret = -EBUSY;
goto unlock;
}
@@ -1578,7 +1582,7 @@ static ssize_t __resource_resize_store(struct device *dev, int n,
pci_remove_resource_files(pdev);
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
if (pci_resource_len(pdev, i) &&
pci_resource_flags(pdev, i) == flags)
pci_release_resource(pdev, i);
@@ -1805,5 +1809,8 @@ const struct attribute_group *pci_dev_attr_groups[] = {
#ifdef CONFIG_PCIEASPM
&aspm_ctrl_attr_group,
#endif
+#ifdef CONFIG_PCI_DOE
+ &pci_doe_sysfs_group,
+#endif
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 869d204a70a3..4d7c9f64ea24 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -954,8 +954,10 @@ struct pci_acs {
};
static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
- const char *p, u16 mask, u16 flags)
+ const char *p, const u16 acs_mask, const u16 acs_flags)
{
+ u16 flags = acs_flags;
+ u16 mask = acs_mask;
char *delimit;
int ret = 0;
@@ -963,7 +965,7 @@ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
return;
while (*p) {
- if (!mask) {
+ if (!acs_mask) {
/* Check for ACS flags */
delimit = strstr(p, "@");
if (delimit) {
@@ -971,6 +973,8 @@ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
u32 shift = 0;
end = delimit - p - 1;
+ mask = 0;
+ flags = 0;
while (end > -1) {
if (*(p + end) == '0') {
@@ -1027,10 +1031,14 @@ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
pci_dbg(dev, "ACS mask = %#06x\n", mask);
pci_dbg(dev, "ACS flags = %#06x\n", flags);
+ pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl);
+ pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl);
- /* If mask is 0 then we copy the bit from the firmware setting. */
- caps->ctrl = (caps->ctrl & ~mask) | (caps->fw_ctrl & mask);
- caps->ctrl |= flags;
+ /*
+ * For mask bits that are 0, copy them from the firmware setting
+ * and apply flags for all the mask bits that are 1.
+ */
+ caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask);
pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
}
@@ -1871,7 +1879,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
unsigned int pos, nbars, i;
u32 ctrl;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+ pos = pdev->rebar_cap;
if (!pos)
return;
@@ -1884,7 +1892,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
- res = pdev->resource + bar_idx;
+ res = pci_resource_n(pdev, bar_idx);
size = pci_rebar_bytes_to_size(resource_size(res));
ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
@@ -3023,7 +3031,7 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
* @bridge: Bridge to check
*
* This function checks if it is possible to move the bridge to D3.
- * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
+ * Currently we only allow D3 for some PCIe ports and for Thunderbolt.
*/
bool pci_bridge_d3_possible(struct pci_dev *bridge)
{
@@ -3067,10 +3075,10 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
return false;
/*
- * It should be safe to put PCIe ports from 2015 or newer
- * to D3.
+ * Out of caution, we only allow PCIe ports from 2015 or newer
+ * into D3 on x86.
*/
- if (dmi_get_bios_year() >= 2015)
+ if (!IS_ENABLED(CONFIG_X86) || dmi_get_bios_year() >= 2015)
return true;
break;
}
@@ -3718,6 +3726,11 @@ void pci_acs_init(struct pci_dev *dev)
pci_enable_acs(dev);
}
+void pci_rebar_init(struct pci_dev *pdev)
+{
+ pdev->rebar_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+}
+
/**
* pci_rebar_find_pos - find position of resize ctrl reg for BAR
* @pdev: PCI device
@@ -3732,7 +3745,7 @@ static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
unsigned int pos, nbars, i;
u32 ctrl;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+ pos = pdev->rebar_cap;
if (!pos)
return -ENOTSUPP;
@@ -3757,7 +3770,7 @@ static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
* @bar: BAR to query
*
* Get the possible sizes of a resizable BAR as bitmask defined in the spec
- * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
+ * (bit 0=1MB, bit 31=128TB). Returns 0 if BAR isn't resizable.
*/
u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
{
@@ -3805,7 +3818,7 @@ int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
* pci_rebar_set_size - set a new size for a BAR
* @pdev: PCI device
* @bar: BAR to set size to
- * @size: new size as defined in the spec (0=1MB, 19=512GB)
+ * @size: new size as defined in the spec (0=1MB, 31=128TB)
*
* Set the new size of a BAR as defined in the spec.
* Returns zero if resizing was successful, error code otherwise.
@@ -3921,6 +3934,9 @@ EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
*/
void pci_release_region(struct pci_dev *pdev, int bar)
{
+ if (!pci_bar_index_is_valid(bar))
+ return;
+
/*
* This is done for backwards compatibility, because the old PCI devres
* API had a mode in which the function became managed if it had been
@@ -3965,6 +3981,9 @@ EXPORT_SYMBOL(pci_release_region);
static int __pci_request_region(struct pci_dev *pdev, int bar,
const char *name, int exclusive)
{
+ if (!pci_bar_index_is_valid(bar))
+ return -EINVAL;
+
if (pci_is_managed(pdev)) {
if (exclusive == IORESOURCE_EXCLUSIVE)
return pcim_request_region_exclusive(pdev, bar, name);
@@ -4766,7 +4785,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
/*
* PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
- * after which we should expect an link active if the reset was
+ * after which we should expect the link to be active if the reset was
* successful. If so, software must wait a minimum 100ms before sending
* configuration requests to devices downstream this port.
*
@@ -5230,6 +5249,7 @@ const struct pci_reset_fn_method pci_reset_fn_methods[] = {
int __pci_reset_function_locked(struct pci_dev *dev)
{
int i, m, rc;
+ const struct pci_reset_fn_method *method;
might_sleep();
@@ -5246,9 +5266,13 @@ int __pci_reset_function_locked(struct pci_dev *dev)
if (!m)
return -ENOTTY;
- rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
+ method = &pci_reset_fn_methods[m];
+ pci_dbg(dev, "reset via %s\n", method->name);
+ rc = method->reset_fn(dev, PCI_RESET_DO_RESET);
if (!rc)
return 0;
+
+ pci_dbg(dev, "%s failed with %d\n", method->name, rc);
if (rc != -ENOTTY)
return rc;
}
@@ -5405,6 +5429,8 @@ static bool pci_bus_resettable(struct pci_bus *bus)
return false;
list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (!pci_reset_supported(dev))
+ return false;
if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
(dev->subordinate && !pci_bus_resettable(dev->subordinate)))
return false;
@@ -5481,6 +5507,8 @@ static bool pci_slot_resettable(struct pci_slot *slot)
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
+ if (!pci_reset_supported(dev))
+ return false;
if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
(dev->subordinate && !pci_bus_resettable(dev->subordinate)))
return false;
@@ -6190,21 +6218,25 @@ void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
enum pci_bus_speed speed, speed_cap;
struct pci_dev *limiting_dev = NULL;
u32 bw_avail, bw_cap;
+ char *flit_mode = "";
bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
+ if (dev->bus && dev->bus->flit_mode)
+ flit_mode = ", in Flit mode";
+
if (bw_avail >= bw_cap && verbose)
- pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)%s\n",
bw_cap / 1000, bw_cap % 1000,
- pci_speed_string(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap, flit_mode);
else if (bw_avail < bw_cap)
- pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)%s\n",
bw_avail / 1000, bw_avail % 1000,
pci_speed_string(speed), width,
limiting_dev ? pci_name(limiting_dev) : "<unknown>",
bw_cap / 1000, bw_cap % 1000,
- pci_speed_string(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap, flit_mode);
}
/**
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 01e51db8d285..b81e99cd4b62 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -167,6 +167,22 @@ static inline void pci_wakeup_event(struct pci_dev *dev)
pm_wakeup_event(&dev->dev, 100);
}
+/**
+ * pci_bar_index_is_valid - Check whether a BAR index is within valid range
+ * @bar: BAR index
+ *
+ * Protects against overflowing &struct pci_dev.resource array.
+ *
+ * Return: true for valid index, false otherwise.
+ */
+static inline bool pci_bar_index_is_valid(int bar)
+{
+ if (bar >= 0 && bar < PCI_NUM_RESOURCES)
+ return true;
+
+ return false;
+}
+
static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
{
return !!(pci_dev->subordinate);
@@ -253,6 +269,7 @@ extern const struct attribute_group *pci_dev_groups[];
extern const struct attribute_group *pci_dev_attr_groups[];
extern const struct attribute_group *pcibus_groups[];
extern const struct attribute_group *pci_bus_groups[];
+extern const struct attribute_group pci_doe_sysfs_group;
#else
static inline int pci_create_sysfs_dev_files(struct pci_dev *pdev) { return 0; }
static inline void pci_remove_sysfs_dev_files(struct pci_dev *pdev) { }
@@ -266,6 +283,8 @@ extern unsigned long pci_hotplug_io_size;
extern unsigned long pci_hotplug_mmio_size;
extern unsigned long pci_hotplug_mmio_pref_size;
extern unsigned long pci_hotplug_bus_size;
+extern unsigned long pci_cardbus_io_size;
+extern unsigned long pci_cardbus_mem_size;
/**
* pci_match_one_device - Tell if a PCI device structure has a matching
@@ -309,6 +328,10 @@ enum pci_bar_type {
struct device *pci_get_host_bridge_device(struct pci_dev *dev);
void pci_put_host_bridge_device(struct device *dev);
+unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
+int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
+int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
+
int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int rrs_timeout);
@@ -333,6 +356,29 @@ void pci_walk_bus_locked(struct pci_bus *top,
void *userdata);
const char *pci_resource_name(struct pci_dev *dev, unsigned int i);
+bool pci_resource_is_optional(const struct pci_dev *dev, int resno);
+
+/**
+ * pci_resource_num - Reverse lookup resource number from device resources
+ * @dev: PCI device
+ * @res: Resource to lookup index for (MUST be a @dev's resource)
+ *
+ * Perform reverse lookup to determine the resource number for @res within
+ * @dev resource array. NOTE: The caller is responsible for ensuring @res is
+ * among @dev's resources!
+ *
+ * Returns: resource number.
+ */
+static inline int pci_resource_num(const struct pci_dev *dev,
+ const struct resource *res)
+{
+ int resno = res - &dev->resource[0];
+
+ /* Passing a resource that is not among dev's resources? */
+ WARN_ON_ONCE(resno >= PCI_NUM_RESOURCES);
+
+ return resno;
+}
void pci_reassigndev_resource_alignment(struct pci_dev *dev);
void pci_disable_bridge_window(struct pci_dev *dev);
@@ -406,9 +452,10 @@ const char *pci_speed_string(enum pci_bus_speed speed);
void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
void pcie_report_downtraining(struct pci_dev *dev);
-static inline void __pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
+static inline void __pcie_update_link_speed(struct pci_bus *bus, u16 linksta, u16 linksta2)
{
bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
+ bus->flit_mode = (linksta2 & PCI_EXP_LNKSTA2_FLIT) ? 1 : 0;
}
void pcie_update_link_speed(struct pci_bus *bus);
@@ -456,6 +503,14 @@ static inline void pci_npem_create(struct pci_dev *dev) { }
static inline void pci_npem_remove(struct pci_dev *dev) { }
#endif
+#if defined(CONFIG_PCI_DOE) && defined(CONFIG_SYSFS)
+void pci_doe_sysfs_init(struct pci_dev *pci_dev);
+void pci_doe_sysfs_teardown(struct pci_dev *pdev);
+#else
+static inline void pci_doe_sysfs_init(struct pci_dev *pdev) { }
+static inline void pci_doe_sysfs_teardown(struct pci_dev *pdev) { }
+#endif
+
/**
* pci_dev_set_io_state - Set the new error state if possible.
*
@@ -553,7 +608,8 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
- unsigned int tlp_len, struct pcie_tlp_log *log);
+ unsigned int tlp_len, bool flit,
+ struct pcie_tlp_log *log);
unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc);
void pcie_print_tlp_log(const struct pci_dev *dev,
const struct pcie_tlp_log *log, const char *pfx);
@@ -632,6 +688,10 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
int pci_iov_bus_range(struct pci_bus *bus);
+static inline bool pci_resource_is_iov(int resno)
+{
+ return resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END;
+}
extern const struct attribute_group sriov_pf_dev_attr_group;
extern const struct attribute_group sriov_vf_dev_attr_group;
#else
@@ -641,12 +701,21 @@ static inline int pci_iov_init(struct pci_dev *dev)
}
static inline void pci_iov_release(struct pci_dev *dev) { }
static inline void pci_iov_remove(struct pci_dev *dev) { }
+static inline void pci_iov_update_resource(struct pci_dev *dev, int resno) { }
+static inline resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
+ int resno)
+{
+ return 0;
+}
static inline void pci_restore_iov_state(struct pci_dev *dev) { }
static inline int pci_iov_bus_range(struct pci_bus *bus)
{
return 0;
}
-
+static inline bool pci_resource_is_iov(int resno)
+{
+ return false;
+}
#endif /* CONFIG_PCI_IOV */
#ifdef CONFIG_PCIE_TPH
@@ -680,12 +749,10 @@ unsigned long pci_cardbus_resource_alignment(struct resource *);
static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
struct resource *res)
{
-#ifdef CONFIG_PCI_IOV
- int resno = res - dev->resource;
+ int resno = pci_resource_num(dev, res);
- if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ if (pci_resource_is_iov(resno))
return pci_sriov_resource_alignment(dev, resno);
-#endif
if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS)
return pci_cardbus_resource_alignment(res);
return resource_alignment(res);
@@ -799,6 +866,7 @@ static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
}
#endif
+void pci_rebar_init(struct pci_dev *pdev);
int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size);
static inline u64 pci_rebar_size_to_bytes(int size)
@@ -876,9 +944,16 @@ void of_pci_make_dev_node(struct pci_dev *pdev);
void of_pci_remove_node(struct pci_dev *pdev);
int of_pci_add_properties(struct pci_dev *pdev, struct of_changeset *ocs,
struct device_node *np);
+void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge);
+void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge);
+int of_pci_add_host_bridge_properties(struct pci_host_bridge *bridge,
+ struct of_changeset *ocs,
+ struct device_node *np);
#else
static inline void of_pci_make_dev_node(struct pci_dev *pdev) { }
static inline void of_pci_remove_node(struct pci_dev *pdev) { }
+static inline void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge) { }
+static inline void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge) { }
#endif
#ifdef CONFIG_PCIEAER
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 508474e17183..a1cf8c7ef628 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -2,7 +2,7 @@
/*
* Implement the AER root port service driver. The driver registers an IRQ
* handler. When a root port triggers an AER interrupt, the IRQ handler
- * collects root port status and schedules work.
+ * collects Root Port status and schedules work.
*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
@@ -17,6 +17,7 @@
#include <linux/bitops.h>
#include <linux/cper.h>
+#include <linux/dev_printk.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/sched.h>
@@ -35,6 +36,9 @@
#include "../pci.h"
#include "portdrv.h"
+#define aer_printk(level, pdev, fmt, arg...) \
+ dev_printk(level, &(pdev)->dev, fmt, ##arg)
+
#define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
@@ -56,9 +60,9 @@ struct aer_stats {
/*
* Fields for all AER capable devices. They indicate the errors
* "as seen by this device". Note that this may mean that if an
- * end point is causing problems, the AER counters may increment
- * at its link partner (e.g. root port) because the errors will be
- * "seen" by the link partner and not the problematic end point
+ * Endpoint is causing problems, the AER counters may increment
+ * at its link partner (e.g. Root Port) because the errors will be
+ * "seen" by the link partner and not the problematic Endpoint
* itself (which may report all counters as 0 as it never saw any
* problems).
*/
@@ -76,10 +80,10 @@ struct aer_stats {
u64 dev_total_nonfatal_errs;
/*
- * Fields for Root ports & root complex event collectors only, these
+ * Fields for Root Ports & Root Complex Event Collectors only; these
* indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL
- * messages received by the root port / event collector, INCLUDING the
- * ones that are generated internally (by the rootport itself)
+ * messages received by the Root Port / Event Collector, INCLUDING the
+ * ones that are generated internally (by the Root Port itself)
*/
u64 rootport_total_cor_errs;
u64 rootport_total_fatal_errs;
@@ -138,7 +142,7 @@ static const char * const ecrc_policy_str[] = {
* enable_ecrc_checking - enable PCIe ECRC checking for a device
* @dev: the PCI device
*
- * Returns 0 on success, or negative on failure.
+ * Return: 0 on success, or negative on failure.
*/
static int enable_ecrc_checking(struct pci_dev *dev)
{
@@ -159,10 +163,10 @@ static int enable_ecrc_checking(struct pci_dev *dev)
}
/**
- * disable_ecrc_checking - disables PCIe ECRC checking for a device
+ * disable_ecrc_checking - disable PCIe ECRC checking for a device
* @dev: the PCI device
*
- * Returns 0 on success, or negative on failure.
+ * Return: 0 on success, or negative on failure.
*/
static int disable_ecrc_checking(struct pci_dev *dev)
{
@@ -283,10 +287,10 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev)
* pci_aer_raw_clear_status - Clear AER error registers.
* @dev: the PCI device
*
- * Clearing AER error status registers unconditionally, regardless of
+ * Clear AER error status registers unconditionally, regardless of
* whether they're owned by firmware or the OS.
*
- * Returns 0 on success, or negative on failure.
+ * Return: 0 on success, or negative on failure.
*/
int pci_aer_raw_clear_status(struct pci_dev *dev)
{
@@ -378,8 +382,8 @@ void pci_aer_init(struct pci_dev *dev)
/*
* We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER,
* PCI_ERR_COR_MASK, and PCI_ERR_CAP. Root and Root Complex Event
- * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r5.0, sec
- * 7.8.4).
+ * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r6.0, sec
+ * 7.8.4.9).
*/
n = pcie_cap_has_rtctl(dev) ? 5 : 4;
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
@@ -686,7 +690,7 @@ static void __aer_print_error(struct pci_dev *dev,
if (!errmsg)
errmsg = "Unknown Error Bit";
- pci_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg,
+ aer_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg,
info->first_error == i ? " (First)" : "");
}
pci_dev_aer_stats_incr(dev, info);
@@ -709,11 +713,11 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
level = (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR;
- pci_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
+ aer_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
aer_error_severity_string[info->severity],
aer_error_layer[layer], aer_agent_string[agent]);
- pci_printk(level, dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
+ aer_printk(level, dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
dev->vendor, dev->device, info->status, info->mask);
__aer_print_error(dev, info);
@@ -825,8 +829,8 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
u16 reg16;
/*
- * When bus id is equal to 0, it might be a bad id
- * reported by root port.
+ * When bus ID is equal to 0, it might be a bad ID
+ * reported by Root Port.
*/
if ((PCI_BUS_NUM(e_info->id) != 0) &&
!(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) {
@@ -834,15 +838,15 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
if (e_info->id == pci_dev_id(dev))
return true;
- /* Continue id comparing if there is no multiple error */
+ /* Continue ID comparing if there is no multiple error */
if (!e_info->multi_error_valid)
return false;
}
/*
* When either
- * 1) bus id is equal to 0. Some ports might lose the bus
- * id of error source id;
+ * 1) bus ID is equal to 0. Some ports might lose the bus
+ * ID of error source id;
* 2) bus flag PCI_BUS_FLAGS_NO_AERSID is set
* 3) There are multiple errors and prior ID comparing fails;
* We check AER status registers to find possible reporter.
@@ -894,9 +898,9 @@ static int find_device_iter(struct pci_dev *dev, void *data)
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
- * @e_info: including detailed error information such like id
+ * @e_info: including detailed error information such as ID
*
- * Return true if found.
+ * Return: true if found.
*
* Invoked by DPC when error is detected at the Root Port.
* Caller of this function must set id, severity, and multi_error_valid of
@@ -938,9 +942,9 @@ static bool find_source_device(struct pci_dev *parent,
/**
* pci_aer_unmask_internal_errors - unmask internal errors
- * @dev: pointer to the pcie_dev data structure
+ * @dev: pointer to the pci_dev data structure
*
- * Unmasks internal errors in the Uncorrectable and Correctable Error
+ * Unmask internal errors in the Uncorrectable and Correctable Error
* Mask registers.
*
* Note: AER must be enabled and supported by the device which must be
@@ -1003,7 +1007,7 @@ static int cxl_rch_handle_error_iter(struct pci_dev *dev, void *data)
if (!is_cxl_mem_dev(dev) || !cxl_error_is_native(dev))
return 0;
- /* protect dev->driver */
+ /* Protect dev->driver */
device_lock(&dev->dev);
err_handler = dev->driver ? dev->driver->err_handler : NULL;
@@ -1195,10 +1199,10 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
/**
* aer_get_device_error_info - read error status from dev and store it to info
- * @dev: pointer to the device expected to have a error record
+ * @dev: pointer to the device expected to have an error record
* @info: pointer to structure to store the error record
*
- * Return 1 on success, 0 on error.
+ * Return: 1 on success, 0 on error.
*
* Note that @info is reused among all error devices. Clear fields properly.
*/
@@ -1245,6 +1249,7 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG,
aer + PCI_ERR_PREFIX_LOG,
aer_tlp_log_len(dev, aercc),
+ aercc & PCI_ERR_CAP_TLP_LOG_FLIT,
&info->tlp);
}
}
@@ -1256,7 +1261,7 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
{
int i;
- /* Report all before handle them, not to lost records by reset etc. */
+ /* Report all before handling them, to not lose records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
if (aer_get_device_error_info(e_info->dev[i], e_info))
aer_print_error(e_info->dev[i], e_info);
@@ -1268,8 +1273,8 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
}
/**
- * aer_isr_one_error - consume an error detected by root port
- * @rpc: pointer to the root port which holds an error
+ * aer_isr_one_error - consume an error detected by Root Port
+ * @rpc: pointer to the Root Port which holds an error
* @e_src: pointer to an error source
*/
static void aer_isr_one_error(struct aer_rpc *rpc,
@@ -1319,11 +1324,11 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
}
/**
- * aer_isr - consume errors detected by root port
+ * aer_isr - consume errors detected by Root Port
* @irq: IRQ assigned to Root Port
* @context: pointer to Root Port data structure
*
- * Invoked, as DPC, when root port records new detected error
+ * Invoked, as DPC, when Root Port records new detected error
*/
static irqreturn_t aer_isr(int irq, void *context)
{
@@ -1383,7 +1388,7 @@ static void aer_disable_irq(struct pci_dev *pdev)
int aer = pdev->aer_cap;
u32 reg32;
- /* Disable Root's interrupt in response to error messages */
+ /* Disable Root Port's interrupt in response to error messages */
pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
@@ -1583,9 +1588,9 @@ static struct pcie_port_service_driver aerdriver = {
};
/**
- * pcie_aer_init - register AER root service driver
+ * pcie_aer_init - register AER service driver
*
- * Invoked when AER root service driver is loaded.
+ * Invoked when AER service driver is loaded.
*/
int __init pcie_aer_init(void)
{
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index da3e7edcf49d..29fcb0689a91 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1270,16 +1270,16 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
parent_link = link->parent;
/*
- * link->downstream is a pointer to the pci_dev of function 0. If
- * we remove that function, the pci_dev is about to be deallocated,
- * so we can't use link->downstream again. Free the link state to
- * avoid this.
+ * Free the parent link state, no later than function 0 (i.e.
+ * link->downstream) being removed.
*
- * If we're removing a non-0 function, it's possible we could
- * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends
- * programming the same ASPM Control value for all functions of
- * multi-function devices, so disable ASPM for all of them.
+ * Do not free the link state any earlier. If function 0 is a
+ * switch upstream port, this link state is parent_link to all
+ * subordinate ones.
*/
+ if (pdev != link->downstream)
+ goto out;
+
pcie_config_aspm_link(link, 0);
list_del(&link->sibling);
free_link_state(link);
@@ -1290,6 +1290,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
pcie_config_aspm_path(parent_link);
}
+ out:
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c
index 0a5e7efbce2c..d8d2aa85a229 100644
--- a/drivers/pci/pcie/bwctrl.c
+++ b/drivers/pci/pcie/bwctrl.c
@@ -113,7 +113,7 @@ static u16 pcie_bwctrl_select_speed(struct pci_dev *port, enum pci_bus_speed spe
up_read(&pci_bus_sem);
}
if (!supported_speeds)
- return PCI_EXP_LNKCAP2_SLS_2_5GB;
+ supported_speeds = PCI_EXP_LNKCAP2_SLS_2_5GB;
return pcie_supported_speeds2target_speed(supported_speeds & desired_speeds);
}
@@ -294,6 +294,10 @@ static int pcie_bwnotif_probe(struct pcie_device *srv)
struct pci_dev *port = srv->port;
int ret;
+ /* Can happen if we run out of bus numbers during enumeration. */
+ if (!port->subordinate)
+ return -ENODEV;
+
struct pcie_bwctrl_data *data = devm_kzalloc(&srv->device,
sizeof(*data), GFP_KERNEL);
if (!data)
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 242cabd5eeeb..df42f15c9829 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -219,7 +219,9 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
goto clear_status;
pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG,
- dpc_tlp_log_len(pdev), &tlp_log);
+ dpc_tlp_log_len(pdev),
+ pdev->subordinate->flit_mode,
+ &tlp_log);
pcie_print_tlp_log(pdev, &tlp_log, dev_fmt(""));
if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG + 1)
@@ -398,11 +400,21 @@ void pci_dpc_init(struct pci_dev *pdev)
/* Quirks may set dpc_rp_log_size if device or firmware is buggy */
if (!pdev->dpc_rp_log_size) {
+ u16 flags;
+ int ret;
+
+ ret = pcie_capability_read_word(pdev, PCI_EXP_FLAGS, &flags);
+ if (ret)
+ return;
+
pdev->dpc_rp_log_size =
FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, cap);
+ if (FIELD_GET(PCI_EXP_FLAGS_FLIT, flags))
+ pdev->dpc_rp_log_size += FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE4,
+ cap) << 4;
+
if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG ||
- pdev->dpc_rp_log_size > PCIE_STD_NUM_TLP_HEADERLOG + 1 +
- PCIE_STD_MAX_TLP_PREFIXLOG) {
+ pdev->dpc_rp_log_size > PCIE_STD_MAX_TLP_HEADERLOG + 1) {
pci_err(pdev, "RP PIO log size %u is invalid\n",
pdev->dpc_rp_log_size);
pdev->dpc_rp_log_size = 0;
diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
index 02e73099bad0..e8318fd5f6ed 100644
--- a/drivers/pci/pcie/portdrv.c
+++ b/drivers/pci/pcie/portdrv.c
@@ -228,10 +228,12 @@ static int get_port_device_capability(struct pci_dev *dev)
/*
* Disable hot-plug interrupts in case they have been enabled
- * by the BIOS and the hot-plug service driver is not loaded.
+ * by the BIOS and the hot-plug service driver won't be loaded
+ * to handle them.
*/
- pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
- PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
+ if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+ pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
}
#ifdef CONFIG_PCIEAER
diff --git a/drivers/pci/pcie/tlp.c b/drivers/pci/pcie/tlp.c
index 0860b5da837f..890d5391d7f5 100644
--- a/drivers/pci/pcie/tlp.c
+++ b/drivers/pci/pcie/tlp.c
@@ -7,6 +7,7 @@
#include <linux/aer.h>
#include <linux/array_size.h>
+#include <linux/bitfield.h>
#include <linux/pci.h>
#include <linux/string.h>
@@ -21,6 +22,9 @@
*/
unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc)
{
+ if (aercc & PCI_ERR_CAP_TLP_LOG_FLIT)
+ return FIELD_GET(PCI_ERR_CAP_TLP_LOG_SIZE, aercc);
+
return PCIE_STD_NUM_TLP_HEADERLOG +
((aercc & PCI_ERR_CAP_PREFIX_LOG_PRESENT) ?
dev->eetlp_prefix_max : 0);
@@ -49,6 +53,7 @@ unsigned int dpc_tlp_log_len(struct pci_dev *dev)
* @where: PCI Config offset of TLP Header Log
* @where2: PCI Config offset of TLP Prefix Log
* @tlp_len: TLP Log length (Header Log + TLP Prefix Log in DWORDs)
+ * @flit: TLP Logged in Flit mode
* @log: TLP Log structure to fill
*
* Fill @log from TLP Header Log registers, e.g., AER or DPC.
@@ -56,28 +61,34 @@ unsigned int dpc_tlp_log_len(struct pci_dev *dev)
* Return: 0 on success and filled TLP Log structure, <0 on error.
*/
int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
- unsigned int tlp_len, struct pcie_tlp_log *log)
+ unsigned int tlp_len, bool flit, struct pcie_tlp_log *log)
{
unsigned int i;
int off, ret;
- u32 *to;
+
+ if (tlp_len > ARRAY_SIZE(log->dw))
+ tlp_len = ARRAY_SIZE(log->dw);
memset(log, 0, sizeof(*log));
for (i = 0; i < tlp_len; i++) {
- if (i < PCIE_STD_NUM_TLP_HEADERLOG) {
+ if (i < PCIE_STD_NUM_TLP_HEADERLOG)
off = where + i * 4;
- to = &log->dw[i];
- } else {
+ else
off = where2 + (i - PCIE_STD_NUM_TLP_HEADERLOG) * 4;
- to = &log->prefix[i - PCIE_STD_NUM_TLP_HEADERLOG];
- }
- ret = pci_read_config_dword(dev, off, to);
+ ret = pci_read_config_dword(dev, off, &log->dw[i]);
if (ret)
return pcibios_err_to_errno(ret);
}
+ /*
+ * Hard-code non-Flit mode to 4 DWORDs, for now. The exact length
+ * can only be known if the TLP is parsed.
+ */
+ log->header_len = flit ? tlp_len : 4;
+ log->flit = flit;
+
return 0;
}
@@ -94,22 +105,31 @@ int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
void pcie_print_tlp_log(const struct pci_dev *dev,
const struct pcie_tlp_log *log, const char *pfx)
{
- char buf[11 * (PCIE_STD_NUM_TLP_HEADERLOG + ARRAY_SIZE(log->prefix)) +
- sizeof(EE_PREFIX_STR)];
+ /* EE_PREFIX_STR fits the extended DW space needed for the Flit mode */
+ char buf[11 * PCIE_STD_MAX_TLP_HEADERLOG + 1];
unsigned int i;
int len;
len = scnprintf(buf, sizeof(buf), "%#010x %#010x %#010x %#010x",
log->dw[0], log->dw[1], log->dw[2], log->dw[3]);
- if (log->prefix[0])
- len += scnprintf(buf + len, sizeof(buf) - len, EE_PREFIX_STR);
- for (i = 0; i < ARRAY_SIZE(log->prefix); i++) {
- if (!log->prefix[i])
- break;
- len += scnprintf(buf + len, sizeof(buf) - len,
- " %#010x", log->prefix[i]);
+ if (log->flit) {
+ for (i = PCIE_STD_NUM_TLP_HEADERLOG; i < log->header_len; i++) {
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ " %#010x", log->dw[i]);
+ }
+ } else {
+ if (log->prefix[0])
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ EE_PREFIX_STR);
+ for (i = 0; i < ARRAY_SIZE(log->prefix); i++) {
+ if (!log->prefix[i])
+ break;
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ " %#010x", log->prefix[i]);
+ }
}
- pci_err(dev, "%sTLP Header: %s\n", pfx, buf);
+ pci_err(dev, "%sTLP Header%s: %s\n", pfx,
+ log->flit ? " (Flit)" : "", buf);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 246744d8d268..364fa2a514f8 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -9,6 +9,8 @@
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -789,10 +791,11 @@ EXPORT_SYMBOL_GPL(pci_speed_string);
void pcie_update_link_speed(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
- u16 linksta;
+ u16 linksta, linksta2;
pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
- __pcie_update_link_speed(bus, linksta);
+ pcie_capability_read_word(bridge, PCI_EXP_LNKSTA2, &linksta2);
+ __pcie_update_link_speed(bus, linksta, linksta2);
}
EXPORT_SYMBOL_GPL(pcie_update_link_speed);
@@ -954,6 +957,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
resource_size_t offset, next_offset;
LIST_HEAD(resources);
struct resource *res, *next_res;
+ bool bus_registered = false;
char addr[64], *fmt;
const char *name;
int err;
@@ -996,10 +1000,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
/* Temporarily move resources off the list */
list_splice_init(&bridge->windows, &resources);
err = device_add(&bridge->dev);
- if (err) {
- put_device(&bridge->dev);
+ if (err)
goto free;
- }
+
bus->bridge = get_device(&bridge->dev);
device_enable_async_suspend(bus->bridge);
pci_set_bus_of_node(bus);
@@ -1018,6 +1021,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
name = dev_name(&bus->dev);
err = device_register(&bus->dev);
+ bus_registered = true;
if (err)
goto unregister;
@@ -1095,6 +1099,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
}
+ of_pci_make_host_bridge_node(bridge);
+
down_write(&pci_bus_sem);
list_add_tail(&bus->node, &pci_root_buses);
up_write(&pci_bus_sem);
@@ -1104,12 +1110,15 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
unregister:
put_device(&bridge->dev);
device_del(&bridge->dev);
-
free:
#ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_bus_release_domain_nr(parent, bus->domain_nr);
#endif
- kfree(bus);
+ if (bus_registered)
+ put_device(&bus->dev);
+ else
+ kfree(bus);
+
return err;
}
@@ -1218,7 +1227,10 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
add_dev:
pci_set_bus_msi_domain(child);
ret = device_register(&child->dev);
- WARN_ON(ret < 0);
+ if (WARN_ON(ret < 0)) {
+ put_device(&child->dev);
+ return NULL;
+ }
pcibios_add_bus(child);
@@ -1374,8 +1386,6 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
- pci_enable_rrs_sv(dev);
-
if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
!is_cardbus && !broken) {
unsigned int cmax, buses;
@@ -1616,6 +1626,11 @@ void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_flags_reg = reg16;
+
+ type = pci_pcie_type(pdev);
+ if (type == PCI_EXP_TYPE_ROOT_PORT)
+ pci_enable_rrs_sv(pdev);
+
pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &pdev->devcap);
pdev->pcie_mpss = FIELD_GET(PCI_EXP_DEVCAP_PAYLOAD, pdev->devcap);
@@ -1632,7 +1647,6 @@ void set_pcie_port_type(struct pci_dev *pdev)
* correctly so detect impossible configurations here and correct
* the port type accordingly.
*/
- type = pci_pcie_type(pdev);
if (type == PCI_EXP_TYPE_DOWNSTREAM) {
/*
* If pdev claims to be downstream port but the parent
@@ -2494,6 +2508,36 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
}
EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
+static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(bus);
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ np = of_pci_find_child_device(dev_of_node(&bus->dev), devfn);
+ if (!np || of_find_device_by_node(np))
+ return NULL;
+
+ /*
+ * First check whether the pwrctrl device really needs to be created or
+ * not. This is decided based on at least one of the power supplies
+ * being defined in the devicetree node of the device.
+ */
+ if (!of_pci_supply_present(np)) {
+ pr_debug("PCI/pwrctrl: Skipping OF node: %s\n", np->name);
+ return NULL;
+ }
+
+ /* Now create the pwrctrl device */
+ pdev = of_platform_device_create(np, NULL, &host->dev);
+ if (!pdev) {
+ pr_err("PCI/pwrctrl: Failed to create pwrctrl device for node: %s\n", np->name);
+ return NULL;
+ }
+
+ return pdev;
+}
+
/*
* Read the config data for a PCI device, sanity-check it,
* and fill in the dev structure.
@@ -2503,6 +2547,15 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
struct pci_dev *dev;
u32 l;
+ /*
+ * Create pwrctrl device (if required) for the PCI device to handle the
+ * power state. If the pwrctrl device is created, then skip scanning
+ * further as the pwrctrl core will rescan the bus after powering on
+ * the device.
+ */
+ if (pci_pwrctrl_create_device(bus, devfn))
+ return NULL;
+
if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
return NULL;
@@ -2565,6 +2618,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_rcec_init(dev); /* Root Complex Event Collector */
pci_doe_init(dev); /* Data Object Exchange */
pci_tph_init(dev); /* TLP Processing Hints */
+ pci_rebar_init(dev); /* Resizable BAR */
pcie_report_downtraining(dev);
pci_init_reset_methods(dev);
@@ -2662,6 +2716,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
WARN_ON(ret < 0);
pci_npem_create(dev);
+
+ pci_doe_sysfs_init(dev);
}
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index f967709082d6..9348a0fb8084 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -251,6 +251,10 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
security_locked_down(LOCKDOWN_PCI_ACCESS))
return -EPERM;
+ /* Skip devices with non-mappable BARs */
+ if (dev->non_mappable_bars)
+ return -EINVAL;
+
if (fpriv->mmap_state == pci_mmap_io) {
if (!arch_can_pci_mmap_io())
return -EINVAL;
diff --git a/drivers/pci/pwrctrl/Kconfig b/drivers/pci/pwrctrl/Kconfig
index 54589bb2403b..990cab67d413 100644
--- a/drivers/pci/pwrctrl/Kconfig
+++ b/drivers/pci/pwrctrl/Kconfig
@@ -10,3 +10,14 @@ config PCI_PWRCTL_PWRSEQ
tristate
select POWER_SEQUENCING
select PCI_PWRCTL
+
+config PCI_PWRCTL_SLOT
+ tristate "PCI Power Control driver for PCI slots"
+ select PCI_PWRCTL
+ help
+ Say Y here to enable the PCI Power Control driver to control the power
+ state of PCI slots.
+
+ This is a generic driver that controls the power state of different
+ PCI slots. The voltage regulators powering the rails of the PCI slots
+ are expected to be defined in the devicetree node of the PCI bridge.
diff --git a/drivers/pci/pwrctrl/Makefile b/drivers/pci/pwrctrl/Makefile
index 75c7ce531c7e..ddfb12c5aadf 100644
--- a/drivers/pci/pwrctrl/Makefile
+++ b/drivers/pci/pwrctrl/Makefile
@@ -4,3 +4,6 @@ obj-$(CONFIG_PCI_PWRCTL) += pci-pwrctrl-core.o
pci-pwrctrl-core-y := core.o
obj-$(CONFIG_PCI_PWRCTL_PWRSEQ) += pci-pwrctrl-pwrseq.o
+
+obj-$(CONFIG_PCI_PWRCTL_SLOT) += pci-pwrctl-slot.o
+pci-pwrctl-slot-y := slot.o
diff --git a/drivers/pci/pwrctrl/core.c b/drivers/pci/pwrctrl/core.c
index 2fb174db91e5..9cc7e2b7f2b5 100644
--- a/drivers/pci/pwrctrl/core.c
+++ b/drivers/pci/pwrctrl/core.c
@@ -44,7 +44,7 @@ static void rescan_work_func(struct work_struct *work)
struct pci_pwrctrl, work);
pci_lock_rescan_remove();
- pci_rescan_bus(to_pci_dev(pwrctrl->dev->parent)->bus);
+ pci_rescan_bus(to_pci_host_bridge(pwrctrl->dev->parent)->bus);
pci_unlock_rescan_remove();
}
diff --git a/drivers/pci/pwrctrl/slot.c b/drivers/pci/pwrctrl/slot.c
new file mode 100644
index 000000000000..18becc144913
--- /dev/null
+++ b/drivers/pci/pwrctrl/slot.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pci-pwrctrl.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+struct pci_pwrctrl_slot_data {
+ struct pci_pwrctrl ctx;
+ struct regulator_bulk_data *supplies;
+ int num_supplies;
+};
+
+static void devm_pci_pwrctrl_slot_power_off(void *data)
+{
+ struct pci_pwrctrl_slot_data *slot = data;
+
+ regulator_bulk_disable(slot->num_supplies, slot->supplies);
+ regulator_bulk_free(slot->num_supplies, slot->supplies);
+}
+
+static int pci_pwrctrl_slot_probe(struct platform_device *pdev)
+{
+ struct pci_pwrctrl_slot_data *slot;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ slot = devm_kzalloc(dev, sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ return -ENOMEM;
+
+ ret = of_regulator_bulk_get_all(dev, dev_of_node(dev),
+ &slot->supplies);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to get slot regulators\n");
+ return ret;
+ }
+
+ slot->num_supplies = ret;
+ ret = regulator_bulk_enable(slot->num_supplies, slot->supplies);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to enable slot regulators\n");
+ goto err_regulator_free;
+ }
+
+ ret = devm_add_action_or_reset(dev, devm_pci_pwrctrl_slot_power_off,
+ slot);
+ if (ret)
+ goto err_regulator_disable;
+
+ pci_pwrctrl_init(&slot->ctx, dev);
+
+ ret = devm_pci_pwrctrl_device_set_ready(dev, &slot->ctx);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register pwrctrl driver\n");
+
+ return 0;
+
+err_regulator_disable:
+ regulator_bulk_disable(slot->num_supplies, slot->supplies);
+err_regulator_free:
+ regulator_bulk_free(slot->num_supplies, slot->supplies);
+
+ return ret;
+}
+
+static const struct of_device_id pci_pwrctrl_slot_of_match[] = {
+ {
+ .compatible = "pciclass,0604",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pci_pwrctrl_slot_of_match);
+
+static struct platform_driver pci_pwrctrl_slot_driver = {
+ .driver = {
+ .name = "pci-pwrctrl-slot",
+ .of_match_table = pci_pwrctrl_slot_of_match,
+ },
+ .probe = pci_pwrctrl_slot_probe,
+};
+module_platform_driver(pci_pwrctrl_slot_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Generic PCI Power Control driver for PCI Slots");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 82b21e34c545..8d610c17e0f2 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -621,7 +621,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
{
u32 region;
struct pci_bus_region bus_region;
- struct resource *res = dev->resource + pos;
+ struct resource *res = pci_resource_n(dev, pos);
const char *res_name = pci_resource_name(dev, pos);
pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
@@ -671,7 +671,7 @@ static void quirk_io_region(struct pci_dev *dev, int port,
{
u16 region;
struct pci_bus_region bus_region;
- struct resource *res = dev->resource + nr;
+ struct resource *res = pci_resource_n(dev, nr);
pci_read_config_word(dev, port, &region);
region &= ~(size - 1);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index efc37fcb73e2..445afdfa6498 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -41,7 +41,6 @@ static void pci_stop_dev(struct pci_dev *dev)
if (!pci_dev_test_and_clear_added(dev))
return;
- pci_pwrctrl_unregister(&dev->dev);
device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
@@ -53,6 +52,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
if (pci_dev_test_and_set_removed(dev))
return;
+ pci_doe_sysfs_teardown(dev);
pci_npem_remove(dev);
device_del(&dev->dev);
@@ -64,6 +64,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
pci_doe_destroy(dev);
pcie_aspm_exit_link_state(dev);
pci_bridge_d3_update(dev);
+ pci_pwrctrl_unregister(&dev->dev);
pci_free_resources(dev);
put_device(&dev->dev);
}
@@ -163,6 +164,8 @@ void pci_stop_root_bus(struct pci_bus *bus)
&bus->devices, bus_list)
pci_stop_bus_device(child);
+ of_pci_remove_host_bridge_node(host_bridge);
+
/* stop the host bridge */
device_release_driver(&host_bridge->dev);
}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 5e00cecf1f1a..54d6f4fa3ce1 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -127,12 +127,42 @@ static resource_size_t get_res_add_align(struct list_head *head,
return dev_res ? dev_res->min_align : 0;
}
+static void restore_dev_resource(struct pci_dev_resource *dev_res)
+{
+ struct resource *res = dev_res->res;
+
+ res->start = dev_res->start;
+ res->end = dev_res->end;
+ res->flags = dev_res->flags;
+}
+
+static bool pdev_resources_assignable(struct pci_dev *dev)
+{
+ u16 class = dev->class >> 8, command;
+
+ /* Don't touch classless devices or host bridges or IOAPICs */
+ if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
+ return false;
+
+ /* Don't touch IOAPIC devices already enabled by firmware */
+ if (class == PCI_CLASS_SYSTEM_PIC) {
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
+ return false;
+ }
+
+ return true;
+}
+
/* Sort resources by alignment */
static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
{
struct resource *r;
int i;
+ if (!pdev_resources_assignable(dev))
+ return;
+
pci_dev_for_each_resource(dev, r, i) {
const char *r_name = pci_resource_name(dev, i);
struct pci_dev_resource *dev_res, *tmp;
@@ -176,23 +206,16 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
}
}
-static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head)
+bool pci_resource_is_optional(const struct pci_dev *dev, int resno)
{
- u16 class = dev->class >> 8;
-
- /* Don't touch classless devices or host bridges or IOAPICs */
- if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
- return;
+ const struct resource *res = pci_resource_n(dev, resno);
- /* Don't touch IOAPIC devices already enabled by firmware */
- if (class == PCI_CLASS_SYSTEM_PIC) {
- u16 command;
- pci_read_config_word(dev, PCI_COMMAND, &command);
- if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
- return;
- }
+ if (pci_resource_is_iov(resno))
+ return true;
+ if (resno == PCI_ROM_RESOURCE && !(res->flags & IORESOURCE_ROM_ENABLE))
+ return true;
- pdev_sort_resources(dev, head);
+ return false;
}
static inline void reset_resource(struct resource *res)
@@ -216,10 +239,11 @@ static inline void reset_resource(struct resource *res)
static void reassign_resources_sorted(struct list_head *realloc_head,
struct list_head *head)
{
- struct resource *res;
- const char *res_name;
struct pci_dev_resource *add_res, *tmp;
struct pci_dev_resource *dev_res;
+ struct pci_dev *dev;
+ struct resource *res;
+ const char *res_name;
resource_size_t add_size, align;
int idx;
@@ -227,9 +251,15 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
bool found_match = false;
res = add_res->res;
+ dev = add_res->dev;
+ idx = pci_resource_num(dev, res);
- /* Skip resource that has been reset */
- if (!res->flags)
+ /*
+ * Skip resource that failed the earlier assignment and is
+ * not optional as it would just fail again.
+ */
+ if (!res->parent && resource_size(res) &&
+ !pci_resource_is_optional(dev, idx))
goto out;
/* Skip this resource if not found in head list */
@@ -242,20 +272,22 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
if (!found_match) /* Just skip */
continue;
- idx = res - &add_res->dev->resource[0];
- res_name = pci_resource_name(add_res->dev, idx);
+ res_name = pci_resource_name(dev, idx);
add_size = add_res->add_size;
align = add_res->min_align;
- if (!resource_size(res)) {
- resource_set_range(res, align, add_size);
- if (pci_assign_resource(add_res->dev, idx))
- reset_resource(res);
- } else {
+ if (!res->parent) {
+ resource_set_range(res, align,
+ resource_size(res) + add_size);
+ if (pci_assign_resource(dev, idx)) {
+ pci_dbg(dev,
+ "%s %pR: ignoring failure in optional allocation\n",
+ res_name, res);
+ }
+ } else if (add_size > 0) {
res->flags |= add_res->flags &
(IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
- if (pci_reassign_resource(add_res->dev, idx,
- add_size, align))
- pci_info(add_res->dev, "%s %pR: failed to add %llx\n",
+ if (pci_reassign_resource(dev, idx, add_size, align))
+ pci_info(dev, "%s %pR: failed to add optional %llx\n",
res_name, res,
(unsigned long long) add_size);
}
@@ -271,36 +303,39 @@ out:
* @head: Head of the list tracking requests for resources
* @fail_head: Head of the list tracking requests that could not be
* allocated
+ * @optional: Assign also optional resources
*
* Satisfy resource requests of each element in the list. Add requests that
* could not be satisfied to the failed_list.
*/
static void assign_requested_resources_sorted(struct list_head *head,
- struct list_head *fail_head)
+ struct list_head *fail_head,
+ bool optional)
{
- struct resource *res;
struct pci_dev_resource *dev_res;
+ struct resource *res;
+ struct pci_dev *dev;
+ bool optional_res;
int idx;
list_for_each_entry(dev_res, head, list) {
res = dev_res->res;
- idx = res - &dev_res->dev->resource[0];
- if (resource_size(res) &&
- pci_assign_resource(dev_res->dev, idx)) {
+ dev = dev_res->dev;
+ idx = pci_resource_num(dev, res);
+ optional_res = pci_resource_is_optional(dev, idx);
+
+ if (!resource_size(res))
+ continue;
+
+ if (!optional && optional_res)
+ continue;
+
+ if (pci_assign_resource(dev, idx)) {
if (fail_head) {
- /*
- * If the failed resource is a ROM BAR and
- * it will be enabled later, don't add it
- * to the list.
- */
- if (!((idx == PCI_ROM_RESOURCE) &&
- (!(res->flags & IORESOURCE_ROM_ENABLE))))
- add_to_list(fail_head,
- dev_res->dev, res,
- 0 /* don't care */,
- 0 /* don't care */);
+ add_to_list(fail_head, dev, res,
+ 0 /* don't care */,
+ 0 /* don't care */);
}
- reset_resource(res);
}
}
}
@@ -345,6 +380,20 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res)
return false; /* Should not get here */
}
+/* Return: @true if assignment of a required resource failed. */
+static bool pci_required_resource_failed(struct list_head *fail_head)
+{
+ struct pci_dev_resource *fail_res;
+
+ list_for_each_entry(fail_res, fail_head, list) {
+ int idx = pci_resource_num(fail_res->dev, fail_res->res);
+
+ if (!pci_resource_is_optional(fail_res->dev, idx))
+ return true;
+ }
+ return false;
+}
+
static void __assign_resources_sorted(struct list_head *head,
struct list_head *realloc_head,
struct list_head *fail_head)
@@ -354,9 +403,11 @@ static void __assign_resources_sorted(struct list_head *head,
* adjacent, so later reassign can not reallocate them one by one in
* parent resource window.
*
- * Try to assign requested + add_size at beginning. If could do that,
- * could get out early. If could not do that, we still try to assign
- * requested at first, then try to reassign add_size for some resources.
+ * Try to assign required and any optional resources at beginning
+ * (add_size included). If all required resources were successfully
+ * assigned, get out early. If could not do that, we still try to
+ * assign required at first, then try to reassign some optional
+ * resources.
*
* Separate three resource type checking if we need to release
* assigned resource after requested + add_size try.
@@ -372,27 +423,36 @@ static void __assign_resources_sorted(struct list_head *head,
*/
LIST_HEAD(save_head);
LIST_HEAD(local_fail_head);
+ LIST_HEAD(dummy_head);
struct pci_dev_resource *save_res;
struct pci_dev_resource *dev_res, *tmp_res, *dev_res2;
+ struct resource *res;
+ struct pci_dev *dev;
+ const char *res_name;
+ int idx;
unsigned long fail_type;
resource_size_t add_align, align;
+ if (!realloc_head)
+ realloc_head = &dummy_head;
+
/* Check if optional add_size is there */
- if (!realloc_head || list_empty(realloc_head))
- goto requested_and_reassign;
+ if (list_empty(realloc_head))
+ goto assign;
/* Save original start, end, flags etc at first */
list_for_each_entry(dev_res, head, list) {
if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) {
free_list(&save_head);
- goto requested_and_reassign;
+ goto assign;
}
}
/* Update res in head list with add_size in realloc_head list */
list_for_each_entry_safe(dev_res, tmp_res, head, list) {
- dev_res->res->end += get_res_add_size(realloc_head,
- dev_res->res);
+ res = dev_res->res;
+
+ res->end += get_res_add_size(realloc_head, res);
/*
* There are two kinds of additional resources in the list:
@@ -400,10 +460,10 @@ static void __assign_resources_sorted(struct list_head *head,
* 2. SR-IOV resource -- IORESOURCE_SIZEALIGN
* Here just fix the additional alignment for bridge
*/
- if (!(dev_res->res->flags & IORESOURCE_STARTALIGN))
+ if (!(res->flags & IORESOURCE_STARTALIGN))
continue;
- add_align = get_res_add_align(realloc_head, dev_res->res);
+ add_align = get_res_add_align(realloc_head, res);
/*
* The "head" list is sorted by alignment so resources with
@@ -412,11 +472,8 @@ static void __assign_resources_sorted(struct list_head *head,
* need to reorder the list by alignment to make it
* consistent.
*/
- if (add_align > dev_res->res->start) {
- resource_size_t r_size = resource_size(dev_res->res);
-
- dev_res->res->start = add_align;
- dev_res->res->end = add_align + r_size - 1;
+ if (add_align > res->start) {
+ resource_set_range(res, add_align, resource_size(res));
list_for_each_entry(dev_res2, head, list) {
align = pci_resource_alignment(dev_res2->dev,
@@ -431,54 +488,94 @@ static void __assign_resources_sorted(struct list_head *head,
}
- /* Try updated head list with add_size added */
- assign_requested_resources_sorted(head, &local_fail_head);
+assign:
+ assign_requested_resources_sorted(head, &local_fail_head, true);
- /* All assigned with add_size? */
+ /* All non-optional resources assigned? */
if (list_empty(&local_fail_head)) {
/* Remove head list from realloc_head list */
list_for_each_entry(dev_res, head, list)
remove_from_list(realloc_head, dev_res->res);
free_list(&save_head);
- free_list(head);
- return;
+ goto out;
+ }
+
+ /* Without realloc_head and only optional fails, nothing more to do. */
+ if (!pci_required_resource_failed(&local_fail_head) &&
+ list_empty(realloc_head)) {
+ list_for_each_entry(save_res, &save_head, list) {
+ struct resource *res = save_res->res;
+
+ if (res->parent)
+ continue;
+
+ restore_dev_resource(save_res);
+ }
+ free_list(&local_fail_head);
+ free_list(&save_head);
+ goto out;
}
/* Check failed type */
fail_type = pci_fail_res_type_mask(&local_fail_head);
/* Remove not need to be released assigned res from head list etc */
- list_for_each_entry_safe(dev_res, tmp_res, head, list)
- if (dev_res->res->parent &&
- !pci_need_to_release(fail_type, dev_res->res)) {
+ list_for_each_entry_safe(dev_res, tmp_res, head, list) {
+ res = dev_res->res;
+
+ if (res->parent && !pci_need_to_release(fail_type, res)) {
/* Remove it from realloc_head list */
- remove_from_list(realloc_head, dev_res->res);
- remove_from_list(&save_head, dev_res->res);
+ remove_from_list(realloc_head, res);
+ remove_from_list(&save_head, res);
list_del(&dev_res->list);
kfree(dev_res);
}
+ }
free_list(&local_fail_head);
/* Release assigned resource */
- list_for_each_entry(dev_res, head, list)
- if (dev_res->res->parent)
- release_resource(dev_res->res);
- /* Restore start/end/flags from saved list */
- list_for_each_entry(save_res, &save_head, list) {
- struct resource *res = save_res->res;
+ list_for_each_entry(dev_res, head, list) {
+ res = dev_res->res;
+ dev = dev_res->dev;
+
+ if (!res->parent)
+ continue;
+
+ idx = pci_resource_num(dev, res);
+ res_name = pci_resource_name(dev, idx);
+ pci_dbg(dev, "%s %pR: releasing\n", res_name, res);
- res->start = save_res->start;
- res->end = save_res->end;
- res->flags = save_res->flags;
+ release_resource(res);
}
+ /* Restore start/end/flags from saved list */
+ list_for_each_entry(save_res, &save_head, list)
+ restore_dev_resource(save_res);
free_list(&save_head);
-requested_and_reassign:
/* Satisfy the must-have resource requests */
- assign_requested_resources_sorted(head, fail_head);
+ assign_requested_resources_sorted(head, NULL, false);
/* Try to satisfy any additional optional resource requests */
- if (realloc_head)
+ if (!list_empty(realloc_head))
reassign_resources_sorted(realloc_head, head);
+
+out:
+ /* Reset any failed resource, cannot use fail_head as it can be NULL. */
+ list_for_each_entry(dev_res, head, list) {
+ res = dev_res->res;
+ dev = dev_res->dev;
+
+ if (res->parent)
+ continue;
+
+ if (fail_head) {
+ add_to_list(fail_head, dev, res,
+ 0 /* don't care */,
+ 0 /* don't care */);
+ }
+
+ reset_resource(res);
+ }
+
free_list(head);
}
@@ -488,7 +585,7 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev,
{
LIST_HEAD(head);
- __dev_sort_resources(dev, &head);
+ pdev_sort_resources(dev, &head);
__assign_resources_sorted(&head, add_head, fail_head);
}
@@ -501,7 +598,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus,
LIST_HEAD(head);
list_for_each_entry(dev, &bus->devices, bus_list)
- __dev_sort_resources(dev, &head);
+ pdev_sort_resources(dev, &head);
__assign_resources_sorted(&head, realloc_head, fail_head);
}
@@ -694,7 +791,7 @@ void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
{
}
-void pci_setup_bridge(struct pci_bus *bus)
+static void pci_setup_bridge(struct pci_bus *bus)
{
unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
@@ -814,11 +911,9 @@ static resource_size_t calculate_iosize(resource_size_t size,
size = (size & 0xff) + ((size & ~0xffUL) << 2);
#endif
size = size + size1;
- if (size < old_size)
- size = old_size;
- size = ALIGN(max(size, add_size) + children_add_size, align);
- return size;
+ size = max(size, add_size) + children_add_size;
+ return ALIGN(max(size, old_size), align);
}
static resource_size_t calculate_memsize(resource_size_t size,
@@ -843,9 +938,9 @@ resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
return 1;
}
-#define PCI_P2P_DEFAULT_MEM_ALIGN 0x100000 /* 1MiB */
-#define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */
-#define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */
+#define PCI_P2P_DEFAULT_MEM_ALIGN SZ_1M
+#define PCI_P2P_DEFAULT_IO_ALIGN SZ_4K
+#define PCI_P2P_DEFAULT_IO_ALIGN_1K SZ_1K
static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
{
@@ -910,7 +1005,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
continue;
r_size = resource_size(r);
- if (r_size < 0x400)
+ if (r_size < SZ_1K)
/* Might be re-aligned for ISA */
size += r_size;
else
@@ -927,9 +1022,14 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
size0 = calculate_iosize(size, min_size, size1, 0, 0,
resource_size(b_res), min_align);
- size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
- calculate_iosize(size, min_size, size1, add_size, children_add_size,
- resource_size(b_res), min_align);
+
+ size1 = size0;
+ if (realloc_head && (add_size > 0 || children_add_size > 0)) {
+ size1 = calculate_iosize(size, min_size, size1, add_size,
+ children_add_size, resource_size(b_res),
+ min_align);
+ }
+
if (!size0 && !size1) {
if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
@@ -1058,8 +1158,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
struct list_head *realloc_head)
{
struct pci_dev *dev;
- resource_size_t min_align, win_align, align, size, size0, size1;
- resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */
+ resource_size_t min_align, win_align, align, size, size0, size1 = 0;
+ resource_size_t aligns[28]; /* Alignments from 1MB to 128TB */
int order, max_order;
struct resource *b_res = find_bus_resource_of_type(bus,
mask | IORESOURCE_PREFETCH, type);
@@ -1092,17 +1192,15 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
(r->flags & mask) != type3))
continue;
r_size = resource_size(r);
-#ifdef CONFIG_PCI_IOV
+
/* Put SRIOV requested res to the optional list */
- if (realloc_head && i >= PCI_IOV_RESOURCES &&
- i <= PCI_IOV_RESOURCE_END) {
+ if (realloc_head && pci_resource_is_optional(dev, i)) {
add_align = max(pci_resource_alignment(dev, r), add_align);
- r->end = r->start - 1;
- add_to_list(realloc_head, dev, r, r_size, 0 /* Don't care */);
+ add_to_list(realloc_head, dev, r, 0, 0 /* Don't care */);
children_add_size += r_size;
continue;
}
-#endif
+
/*
* aligns[0] is for 1MB (since bridge memory
* windows are always at least 1MB aligned), so
@@ -1141,22 +1239,35 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
min_align = calculate_mem_align(aligns, max_order);
min_align = max(min_align, win_align);
size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
- add_align = max(min_align, add_align);
if (bus->self && size0 &&
!pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type,
- size0, add_align)) {
+ size0, min_align)) {
min_align = 1ULL << (max_order + __ffs(SZ_1M));
min_align = max(min_align, win_align);
size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), win_align);
- add_align = win_align;
pci_info(bus->self, "bridge window %pR to %pR requires relaxed alignment rules\n",
b_res, &bus->busn_res);
}
- size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
- calculate_memsize(size, min_size, add_size, children_add_size,
- resource_size(b_res), add_align);
+ if (realloc_head && (add_size > 0 || children_add_size > 0)) {
+ add_align = max(min_align, add_align);
+ size1 = calculate_memsize(size, min_size, add_size, children_add_size,
+ resource_size(b_res), add_align);
+
+ if (bus->self && size1 &&
+ !pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type,
+ size1, add_align)) {
+ min_align = 1ULL << (max_order + __ffs(SZ_1M));
+ min_align = max(min_align, win_align);
+ size1 = calculate_memsize(size, min_size, add_size, children_add_size,
+ resource_size(b_res), win_align);
+ pci_info(bus->self,
+ "bridge window %pR to %pR requires relaxed alignment rules\n",
+ b_res, &bus->busn_res);
+ }
+ }
+
if (!size0 && !size1) {
if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
@@ -1164,8 +1275,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
b_res->flags = 0;
return 0;
}
- b_res->start = min_align;
- b_res->end = size0 + min_align - 1;
+
+ resource_set_range(b_res, min_align, size0);
b_res->flags |= IORESOURCE_STARTALIGN;
if (bus->self && size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
@@ -1640,8 +1751,7 @@ static void pci_bridge_release_resources(struct pci_bus *bus,
pci_info(dev, "resource %d %pR released\n",
PCI_BRIDGE_RESOURCES + idx, r);
/* Keep the old size */
- r->end = resource_size(r) - 1;
- r->start = 0;
+ resource_set_range(r, 0, resource_size(r));
r->flags = 0;
/* Avoiding touch the one without PREF */
@@ -1880,7 +1990,7 @@ static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
* Make sure prefetchable memory is reduced from
* the correct resource. Specifically we put 32-bit
* prefetchable memory in non-prefetchable window
- * if there is an 64-bit prefetchable window.
+ * if there is a 64-bit prefetchable window.
*
* See comments in __pci_bus_size_bridges() for
* more information.
@@ -2102,13 +2212,49 @@ pci_root_bus_distribute_available_resources(struct pci_bus *bus,
* in case of root bus.
*/
if (bridge && pci_bridge_resources_not_assigned(dev))
- pci_bridge_distribute_available_resources(bridge,
- add_list);
+ pci_bridge_distribute_available_resources(dev, add_list);
else
pci_root_bus_distribute_available_resources(b, add_list);
}
}
+static void pci_prepare_next_assign_round(struct list_head *fail_head,
+ int tried_times,
+ enum release_type rel_type)
+{
+ struct pci_dev_resource *fail_res;
+
+ pr_info("PCI: No. %d try to assign unassigned res\n", tried_times + 1);
+
+ /*
+ * Try to release leaf bridge's resources that aren't big
+ * enough to contain child device resources.
+ */
+ list_for_each_entry(fail_res, fail_head, list) {
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
+ fail_res->flags & PCI_RES_TYPE_MASK,
+ rel_type);
+ }
+
+ /* Restore size and flags */
+ list_for_each_entry(fail_res, fail_head, list) {
+ struct resource *res = fail_res->res;
+ struct pci_dev *dev = fail_res->dev;
+ int idx = pci_resource_num(dev, res);
+
+ restore_dev_resource(fail_res);
+
+ if (!pci_is_bridge(dev))
+ continue;
+
+ if (idx >= PCI_BRIDGE_RESOURCES &&
+ idx <= PCI_BRIDGE_RESOURCE_END)
+ res->flags = 0;
+ }
+
+ free_list(fail_head);
+}
+
/*
* First try will not touch PCI bridge res.
* Second and later try will clear small leaf bridge res.
@@ -2122,7 +2268,6 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
int tried_times = 0;
enum release_type rel_type = leaf_only;
LIST_HEAD(fail_head);
- struct pci_dev_resource *fail_res;
int pci_try_num = 1;
enum enable_type enable_local;
@@ -2136,78 +2281,50 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
max_depth, pci_try_num);
}
-again:
- /*
- * Last try will use add_list, otherwise will try good to have as must
- * have, so can realloc parent bridge resource
- */
- if (tried_times + 1 == pci_try_num)
- add_list = &realloc_head;
- /*
- * Depth first, calculate sizes and alignments of all subordinate buses.
- */
- __pci_bus_size_bridges(bus, add_list);
-
- pci_root_bus_distribute_available_resources(bus, add_list);
-
- /* Depth last, allocate resources and update the hardware. */
- __pci_bus_assign_resources(bus, add_list, &fail_head);
- if (add_list)
- BUG_ON(!list_empty(add_list));
- tried_times++;
-
- /* Any device complain? */
- if (list_empty(&fail_head))
- goto dump;
-
- if (tried_times >= pci_try_num) {
- if (enable_local == undefined)
- dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
- else if (enable_local == auto_enabled)
- dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
-
- free_list(&fail_head);
- goto dump;
- }
+ while (1) {
+ /*
+ * Last try will use add_list, otherwise will try good to
+ * have as must have, so can realloc parent bridge resource
+ */
+ if (tried_times + 1 == pci_try_num)
+ add_list = &realloc_head;
+ /*
+ * Depth first, calculate sizes and alignments of all
+ * subordinate buses.
+ */
+ __pci_bus_size_bridges(bus, add_list);
- dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
- tried_times + 1);
+ pci_root_bus_distribute_available_resources(bus, add_list);
- /* Third times and later will not check if it is leaf */
- if ((tried_times + 1) > 2)
- rel_type = whole_subtree;
+ /* Depth last, allocate resources and update the hardware. */
+ __pci_bus_assign_resources(bus, add_list, &fail_head);
+ if (add_list)
+ BUG_ON(!list_empty(add_list));
+ tried_times++;
- /*
- * Try to release leaf bridge's resources that doesn't fit resource of
- * child device under that bridge.
- */
- list_for_each_entry(fail_res, &fail_head, list)
- pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & PCI_RES_TYPE_MASK,
- rel_type);
+ /* Any device complain? */
+ if (list_empty(&fail_head))
+ break;
- /* Restore size and flags */
- list_for_each_entry(fail_res, &fail_head, list) {
- struct resource *res = fail_res->res;
- int idx;
+ if (tried_times >= pci_try_num) {
+ if (enable_local == undefined) {
+ dev_info(&bus->dev,
+ "Some PCI device resources are unassigned, try booting with pci=realloc\n");
+ } else if (enable_local == auto_enabled) {
+ dev_info(&bus->dev,
+ "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
+ }
+ free_list(&fail_head);
+ break;
+ }
- res->start = fail_res->start;
- res->end = fail_res->end;
- res->flags = fail_res->flags;
+ /* Third times and later will not check if it is leaf */
+ if (tried_times + 1 > 2)
+ rel_type = whole_subtree;
- if (pci_is_bridge(fail_res->dev)) {
- idx = res - &fail_res->dev->resource[0];
- if (idx >= PCI_BRIDGE_RESOURCES &&
- idx <= PCI_BRIDGE_RESOURCE_END)
- res->flags = 0;
- }
+ pci_prepare_next_assign_round(&fail_head, tried_times, rel_type);
}
- free_list(&fail_head);
- goto again;
-
-dump:
- /* Dump the resource on buses */
pci_bus_dump_resources(bus);
}
@@ -2229,71 +2346,40 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
struct pci_bus *parent = bridge->subordinate;
/* List of resources that want additional resources */
LIST_HEAD(add_list);
-
int tried_times = 0;
LIST_HEAD(fail_head);
- struct pci_dev_resource *fail_res;
- int retval;
-
-again:
- __pci_bus_size_bridges(parent, &add_list);
-
- /*
- * Distribute remaining resources (if any) equally between hotplug
- * bridges below. This makes it possible to extend the hierarchy
- * later without running out of resources.
- */
- pci_bridge_distribute_available_resources(bridge, &add_list);
-
- __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
- BUG_ON(!list_empty(&add_list));
- tried_times++;
-
- if (list_empty(&fail_head))
- goto enable_all;
-
- if (tried_times >= 2) {
- /* Still fail, don't need to try more */
- free_list(&fail_head);
- goto enable_all;
- }
+ int ret;
- printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
- tried_times + 1);
+ while (1) {
+ __pci_bus_size_bridges(parent, &add_list);
- /*
- * Try to release leaf bridge's resources that aren't big enough
- * to contain child device resources.
- */
- list_for_each_entry(fail_res, &fail_head, list)
- pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & PCI_RES_TYPE_MASK,
- whole_subtree);
+ /*
+ * Distribute remaining resources (if any) equally between
+ * hotplug bridges below. This makes it possible to extend
+ * the hierarchy later without running out of resources.
+ */
+ pci_bridge_distribute_available_resources(bridge, &add_list);
- /* Restore size and flags */
- list_for_each_entry(fail_res, &fail_head, list) {
- struct resource *res = fail_res->res;
- int idx;
+ __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
+ BUG_ON(!list_empty(&add_list));
+ tried_times++;
- res->start = fail_res->start;
- res->end = fail_res->end;
- res->flags = fail_res->flags;
+ if (list_empty(&fail_head))
+ break;
- if (pci_is_bridge(fail_res->dev)) {
- idx = res - &fail_res->dev->resource[0];
- if (idx >= PCI_BRIDGE_RESOURCES &&
- idx <= PCI_BRIDGE_RESOURCE_END)
- res->flags = 0;
+ if (tried_times >= 2) {
+ /* Still fail, don't need to try more */
+ free_list(&fail_head);
+ break;
}
- }
- free_list(&fail_head);
- goto again;
+ pci_prepare_next_assign_round(&fail_head, tried_times,
+ whole_subtree);
+ }
-enable_all:
- retval = pci_reenable_device(bridge);
- if (retval)
- pci_err(bridge, "Error reenabling bridge (%d)\n", retval);
+ ret = pci_reenable_device(bridge);
+ if (ret)
+ pci_err(bridge, "Error reenabling bridge (%d)\n", ret);
pci_set_master(bridge);
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
@@ -2373,13 +2459,8 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
cleanup:
/* Restore size and flags */
- list_for_each_entry(dev_res, &failed, list) {
- struct resource *res = dev_res->res;
-
- res->start = dev_res->start;
- res->end = dev_res->end;
- res->flags = dev_res->flags;
- }
+ list_for_each_entry(dev_res, &failed, list)
+ restore_dev_resource(dev_res);
free_list(&failed);
/* Revert to the old configuration */
@@ -2387,11 +2468,9 @@ cleanup:
struct resource *res = dev_res->res;
bridge = dev_res->dev;
- i = res - bridge->resource;
+ i = pci_resource_num(bridge, res);
- res->start = dev_res->start;
- res->end = dev_res->end;
- res->flags = dev_res->flags;
+ restore_dev_resource(dev_res);
pci_claim_resource(bridge, i);
pci_setup_bridge(bridge->subordinate);
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index ca14576bf2bf..c6657cdd06f6 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -29,7 +29,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
u16 cmd;
u32 new, check, mask;
int reg;
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
/* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
@@ -127,10 +127,8 @@ void pci_update_resource(struct pci_dev *dev, int resno)
{
if (resno <= PCI_ROM_RESOURCE)
pci_std_update_resource(dev, resno);
-#ifdef CONFIG_PCI_IOV
- else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ else if (pci_resource_is_iov(resno))
pci_iov_update_resource(dev, resno);
-#endif
}
int pci_claim_resource(struct pci_dev *dev, int resource)
@@ -262,7 +260,7 @@ resource_size_t __weak pcibios_align_resource(void *data,
static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
int resno, resource_size_t size, resource_size_t align)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
resource_size_t min;
int ret;
@@ -325,7 +323,7 @@ static int _pci_assign_resource(struct pci_dev *dev, int resno,
int pci_assign_resource(struct pci_dev *dev, int resno)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
resource_size_t align, size;
int ret;
@@ -372,7 +370,7 @@ EXPORT_SYMBOL(pci_assign_resource);
int pci_reassign_resource(struct pci_dev *dev, int resno,
resource_size_t addsize, resource_size_t min_align)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
unsigned long flags;
resource_size_t new_size;
@@ -389,7 +387,6 @@ int pci_reassign_resource(struct pci_dev *dev, int resno,
return -EINVAL;
}
- /* already aligned with min_align */
new_size = resource_size(res) + addsize;
ret = _pci_assign_resource(dev, resno, new_size, min_align);
if (ret) {
@@ -411,14 +408,14 @@ int pci_reassign_resource(struct pci_dev *dev, int resno,
void pci_release_resource(struct pci_dev *dev, int resno)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
- pci_info(dev, "%s %pR: releasing\n", res_name, res);
-
if (!res->parent)
return;
+ pci_info(dev, "%s %pR: releasing\n", res_name, res);
+
release_resource(res);
res->end = resource_size(res) - 1;
res->start = 0;
@@ -428,7 +425,7 @@ EXPORT_SYMBOL(pci_release_resource);
int pci_resize_resource(struct pci_dev *dev, int resno, int size)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
struct pci_host_bridge *host;
int old, ret;
u32 sizes;
@@ -497,8 +494,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
continue;
- if ((i == PCI_ROM_RESOURCE) &&
- (!(r->flags & IORESOURCE_ROM_ENABLE)))
+ if (pci_resource_is_optional(dev, i))
continue;
if (r->flags & IORESOURCE_UNSET) {
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 36b44be0489d..50fb3eb595fe 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -7,7 +7,6 @@
#include <linux/kobject.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/err.h>
#include "pci.h"
@@ -325,49 +324,6 @@ void pci_destroy_slot(struct pci_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_destroy_slot);
-#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
-#include <linux/pci_hotplug.h>
-/**
- * pci_hp_create_module_link - create symbolic link to hotplug driver module
- * @pci_slot: struct pci_slot
- *
- * Helper function for pci_hotplug_core.c to create symbolic link to
- * the hotplug driver module.
- */
-void pci_hp_create_module_link(struct pci_slot *pci_slot)
-{
- struct hotplug_slot *slot = pci_slot->hotplug;
- struct kobject *kobj = NULL;
- int ret;
-
- if (!slot || !slot->ops)
- return;
- kobj = kset_find_obj(module_kset, slot->mod_name);
- if (!kobj)
- return;
- ret = sysfs_create_link(&pci_slot->kobj, kobj, "module");
- if (ret)
- dev_err(&pci_slot->bus->dev, "Error creating sysfs link (%d)\n",
- ret);
- kobject_put(kobj);
-}
-EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
-
-/**
- * pci_hp_remove_module_link - remove symbolic link to the hotplug driver
- * module.
- * @pci_slot: struct pci_slot
- *
- * Helper function for pci_hotplug_core.c to remove symbolic link to
- * the hotplug driver module.
- */
-void pci_hp_remove_module_link(struct pci_slot *pci_slot)
-{
- sysfs_remove_link(&pci_slot->kobj, "module");
-}
-EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
-#endif
-
static int pci_slot_init(void)
{
struct kset *pci_bus_kset;
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index 06fd317529fc..df9a28ba69dc 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
+#include <linux/perf/arm_pmuv3.h>
#include <linux/platform_device.h>
#include <asm/apple_m1_pmu.h>
@@ -120,6 +121,8 @@ enum m1_pmu_events {
*/
M1_PMU_CFG_COUNT_USER = BIT(8),
M1_PMU_CFG_COUNT_KERNEL = BIT(9),
+ M1_PMU_CFG_COUNT_HOST = BIT(10),
+ M1_PMU_CFG_COUNT_GUEST = BIT(11),
};
/*
@@ -172,6 +175,17 @@ static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_BRANCH_MISSES] = M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC,
};
+#define M1_PMUV3_EVENT_MAP(pmuv3_event, m1_event) \
+ [ARMV8_PMUV3_PERFCTR_##pmuv3_event] = M1_PMU_PERFCTR_##m1_event
+
+static const u16 m1_pmu_pmceid_map[ARMV8_PMUV3_MAX_COMMON_EVENTS] = {
+ [0 ... ARMV8_PMUV3_MAX_COMMON_EVENTS - 1] = HW_OP_UNSUPPORTED,
+ M1_PMUV3_EVENT_MAP(INST_RETIRED, INST_ALL),
+ M1_PMUV3_EVENT_MAP(CPU_CYCLES, CORE_ACTIVE_CYCLE),
+ M1_PMUV3_EVENT_MAP(BR_RETIRED, INST_BRANCH),
+ M1_PMUV3_EVENT_MAP(BR_MIS_PRED_RETIRED, BRANCH_MISPRED_NONSPEC),
+};
+
/* sysfs definitions */
static ssize_t m1_pmu_events_sysfs_show(struct device *dev,
struct device_attribute *attr,
@@ -327,11 +341,10 @@ static void m1_pmu_disable_counter_interrupt(unsigned int index)
__m1_pmu_enable_counter_interrupt(index, false);
}
-static void m1_pmu_configure_counter(unsigned int index, u8 event,
- bool user, bool kernel)
+static void __m1_pmu_configure_event_filter(unsigned int index, bool user,
+ bool kernel, bool host)
{
- u64 val, user_bit, kernel_bit;
- int shift;
+ u64 clear, set, user_bit, kernel_bit;
switch (index) {
case 0 ... 7:
@@ -346,19 +359,27 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
BUG();
}
- val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
-
+ clear = set = 0;
if (user)
- val |= user_bit;
+ set |= user_bit;
else
- val &= ~user_bit;
+ clear |= user_bit;
if (kernel)
- val |= kernel_bit;
+ set |= kernel_bit;
else
- val &= ~kernel_bit;
+ clear |= kernel_bit;
+
+ if (host)
+ sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL1, clear, set);
+ else if (is_kernel_in_hyp_mode())
+ sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL12, clear, set);
+}
- write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
+static void __m1_pmu_configure_eventsel(unsigned int index, u8 event)
+{
+ u64 clear = 0, set = 0;
+ int shift;
/*
* Counters 0 and 1 have fixed events. For anything else,
@@ -371,21 +392,32 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
break;
case 2 ... 5:
shift = (index - 2) * 8;
- val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
- val &= ~((u64)0xff << shift);
- val |= (u64)event << shift;
- write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
+ clear |= (u64)0xff << shift;
+ set |= (u64)event << shift;
+ sysreg_clear_set_s(SYS_IMP_APL_PMESR0_EL1, clear, set);
break;
case 6 ... 9:
shift = (index - 6) * 8;
- val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
- val &= ~((u64)0xff << shift);
- val |= (u64)event << shift;
- write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
+ clear |= (u64)0xff << shift;
+ set |= (u64)event << shift;
+ sysreg_clear_set_s(SYS_IMP_APL_PMESR1_EL1, clear, set);
break;
}
}
+static void m1_pmu_configure_counter(unsigned int index, unsigned long config_base)
+{
+ bool kernel = config_base & M1_PMU_CFG_COUNT_KERNEL;
+ bool guest = config_base & M1_PMU_CFG_COUNT_GUEST;
+ bool host = config_base & M1_PMU_CFG_COUNT_HOST;
+ bool user = config_base & M1_PMU_CFG_COUNT_USER;
+ u8 evt = config_base & M1_PMU_CFG_EVENT;
+
+ __m1_pmu_configure_event_filter(index, user && host, kernel && host, true);
+ __m1_pmu_configure_event_filter(index, user && guest, kernel && guest, false);
+ __m1_pmu_configure_eventsel(index, evt);
+}
+
/* arm_pmu backend */
static void m1_pmu_enable_event(struct perf_event *event)
{
@@ -396,11 +428,7 @@ static void m1_pmu_enable_event(struct perf_event *event)
user = event->hw.config_base & M1_PMU_CFG_COUNT_USER;
kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL;
- m1_pmu_disable_counter_interrupt(event->hw.idx);
- m1_pmu_disable_counter(event->hw.idx);
- isb();
-
- m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
+ m1_pmu_configure_counter(event->hw.idx, event->hw.config_base);
m1_pmu_enable_counter(event->hw.idx);
m1_pmu_enable_counter_interrupt(event->hw.idx);
isb();
@@ -538,6 +566,26 @@ static int m2_pmu_map_event(struct perf_event *event)
return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
}
+static int m1_pmu_map_pmuv3_event(unsigned int eventsel)
+{
+ u16 m1_event = HW_OP_UNSUPPORTED;
+
+ if (eventsel < ARMV8_PMUV3_MAX_COMMON_EVENTS)
+ m1_event = m1_pmu_pmceid_map[eventsel];
+
+ return m1_event == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : m1_event;
+}
+
+static void m1_pmu_init_pmceid(struct arm_pmu *pmu)
+{
+ unsigned int event;
+
+ for (event = 0; event < ARMV8_PMUV3_MAX_COMMON_EVENTS; event++) {
+ if (m1_pmu_map_pmuv3_event(event) >= 0)
+ set_bit(event, pmu->pmceid_bitmap);
+ }
+}
+
static void m1_pmu_reset(void *info)
{
int i;
@@ -558,7 +606,7 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
{
unsigned long config_base = 0;
- if (!attr->exclude_guest) {
+ if (!attr->exclude_guest && !is_kernel_in_hyp_mode()) {
pr_debug("ARM performance counters do not support mode exclusion\n");
return -EOPNOTSUPP;
}
@@ -566,6 +614,10 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
config_base |= M1_PMU_CFG_COUNT_KERNEL;
if (!attr->exclude_user)
config_base |= M1_PMU_CFG_COUNT_USER;
+ if (!attr->exclude_host)
+ config_base |= M1_PMU_CFG_COUNT_HOST;
+ if (!attr->exclude_guest)
+ config_base |= M1_PMU_CFG_COUNT_GUEST;
event->config_base = config_base;
@@ -594,6 +646,9 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags)
cpu_pmu->reset = m1_pmu_reset;
cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
+ cpu_pmu->map_pmuv3_event = m1_pmu_map_pmuv3_event;
+ m1_pmu_init_pmceid(cpu_pmu);
+
bitmap_set(cpu_pmu->cntr_mask, 0, M1_PMU_NR_COUNTERS);
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group;
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index d5fcea3d4328..1a0d0e1a2263 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1273,9 +1273,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* No overflow interrupt? Have to use a timer instead. */
if (!ccn->irq) {
dev_info(ccn->dev, "No access to interrupts, using timer.\n");
- hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
+ hrtimer_setup(&ccn->dt.hrtimer, arm_ccn_pmu_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
/* Pick one CPU which we will use to collect data from CCN... */
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index ef959e66db7c..d4fe30ff225b 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -802,8 +802,6 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
CMN_EVENT_ATTR(_model, ccha_##_name, CMN_TYPE_CCHA, _event)
#define CMN_EVENT_CCLA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
-#define CMN_EVENT_CCLA_RNI(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event)
#define CMN_EVENT_HNS(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
@@ -1798,6 +1796,9 @@ static int arm_cmn_event_init(struct perf_event *event)
} else if (type == CMN_TYPE_XP &&
(cmn->part == PART_CMN700 || cmn->part == PART_CMN_S3)) {
hw->wide_sel = true;
+ } else if (type == CMN_TYPE_RND) {
+ /* Secretly permit this as an alias for "rnid" events */
+ type = CMN_TYPE_RNI;
}
/* This is sufficiently annoying to recalculate, so cache it */
diff --git a/drivers/perf/arm_cspmu/ampere_cspmu.c b/drivers/perf/arm_cspmu/ampere_cspmu.c
index f72f5689923c..b8ca69fd9d1d 100644
--- a/drivers/perf/arm_cspmu/ampere_cspmu.c
+++ b/drivers/perf/arm_cspmu/ampere_cspmu.c
@@ -10,10 +10,10 @@
#include "arm_cspmu.h"
-#define PMAUXR0 0xD80
-#define PMAUXR1 0xD84
-#define PMAUXR2 0xD88
-#define PMAUXR3 0xD8C
+#define PMAUXR0 PMIMPDEF
+#define PMAUXR1 (PMIMPDEF + 0x4)
+#define PMAUXR2 (PMIMPDEF + 0x8)
+#define PMAUXR3 (PMIMPDEF + 0xC)
#define to_ampere_cspmu_ctx(cspmu) ((struct ampere_cspmu_ctx *)(cspmu->impl.ctx))
@@ -132,32 +132,20 @@ ampere_cspmu_get_name(const struct arm_cspmu *cspmu)
return ctx->name;
}
-static u32 ampere_cspmu_event_filter(const struct perf_event *event)
+static void ampere_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
{
/*
- * PMEVFILTR or PMCCFILTR aren't used in Ampere SoC PMU but are marked
- * as RES0. Make sure, PMCCFILTR is written zero.
+ * PMCCFILTR is RES0, so this is just a dummy callback to override
+ * the default implementation and avoid writing to it.
*/
- return 0;
}
static void ampere_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc,
- u32 filter)
+ const struct perf_event *event)
{
- struct perf_event *event;
- unsigned int idx;
u32 threshold, rank, bank;
- /*
- * At this point, all the events have the same filter settings.
- * Therefore, take the first event and use its configuration.
- */
- idx = find_first_bit(cspmu->hw_events.used_ctrs,
- cspmu->cycle_counter_logical_idx);
-
- event = cspmu->hw_events.events[idx];
-
threshold = get_threshold(event);
rank = get_rank(event);
bank = get_bank(event);
@@ -233,7 +221,7 @@ static int ampere_cspmu_init_ops(struct arm_cspmu *cspmu)
cspmu->impl.ctx = ctx;
- impl_ops->event_filter = ampere_cspmu_event_filter;
+ impl_ops->set_cc_filter = ampere_cspmu_set_cc_filter;
impl_ops->set_ev_filter = ampere_cspmu_set_ev_filter;
impl_ops->validate_event = ampere_cspmu_validate_event;
impl_ops->get_name = ampere_cspmu_get_name;
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index 81e8b97e9353..efa9b229e701 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -40,51 +40,6 @@
ARM_CSPMU_EXT_ATTR(_name, arm_cspmu_cpumask_show, \
(unsigned long)_config)
-/*
- * CoreSight PMU Arch register offsets.
- */
-#define PMEVCNTR_LO 0x0
-#define PMEVCNTR_HI 0x4
-#define PMEVTYPER 0x400
-#define PMCCFILTR 0x47C
-#define PMEVFILTR 0xA00
-#define PMCNTENSET 0xC00
-#define PMCNTENCLR 0xC20
-#define PMINTENSET 0xC40
-#define PMINTENCLR 0xC60
-#define PMOVSCLR 0xC80
-#define PMOVSSET 0xCC0
-#define PMCFGR 0xE00
-#define PMCR 0xE04
-#define PMIIDR 0xE08
-
-/* PMCFGR register field */
-#define PMCFGR_NCG GENMASK(31, 28)
-#define PMCFGR_HDBG BIT(24)
-#define PMCFGR_TRO BIT(23)
-#define PMCFGR_SS BIT(22)
-#define PMCFGR_FZO BIT(21)
-#define PMCFGR_MSI BIT(20)
-#define PMCFGR_UEN BIT(19)
-#define PMCFGR_NA BIT(17)
-#define PMCFGR_EX BIT(16)
-#define PMCFGR_CCD BIT(15)
-#define PMCFGR_CC BIT(14)
-#define PMCFGR_SIZE GENMASK(13, 8)
-#define PMCFGR_N GENMASK(7, 0)
-
-/* PMCR register field */
-#define PMCR_TRO BIT(11)
-#define PMCR_HDBG BIT(10)
-#define PMCR_FZO BIT(9)
-#define PMCR_NA BIT(8)
-#define PMCR_DP BIT(5)
-#define PMCR_X BIT(4)
-#define PMCR_D BIT(3)
-#define PMCR_C BIT(2)
-#define PMCR_P BIT(1)
-#define PMCR_E BIT(0)
-
/* Each SET/CLR register supports up to 32 counters. */
#define ARM_CSPMU_SET_CLR_COUNTER_SHIFT 5
#define ARM_CSPMU_SET_CLR_COUNTER_NUM \
@@ -111,7 +66,9 @@ static unsigned long arm_cspmu_cpuhp_state;
static DEFINE_MUTEX(arm_cspmu_lock);
static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc, u32 filter);
+ const struct perf_event *event);
+static void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event);
static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
{
@@ -226,6 +183,7 @@ arm_cspmu_event_attr_is_visible(struct kobject *kobj,
static struct attribute *arm_cspmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
ARM_CSPMU_FORMAT_FILTER_ATTR,
+ ARM_CSPMU_FORMAT_FILTER2_ATTR,
NULL,
};
@@ -250,11 +208,6 @@ static bool arm_cspmu_is_cycle_counter_event(const struct perf_event *event)
return (event->attr.config == ARM_CSPMU_EVT_CYCLES_DEFAULT);
}
-static u32 arm_cspmu_event_filter(const struct perf_event *event)
-{
- return event->attr.config1 & ARM_CSPMU_FILTER_MASK;
-}
-
static ssize_t arm_cspmu_identifier_show(struct device *dev,
struct device_attribute *attr,
char *page)
@@ -416,7 +369,7 @@ static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
DEFAULT_IMPL_OP(get_name),
DEFAULT_IMPL_OP(is_cycle_counter_event),
DEFAULT_IMPL_OP(event_type),
- DEFAULT_IMPL_OP(event_filter),
+ DEFAULT_IMPL_OP(set_cc_filter),
DEFAULT_IMPL_OP(set_ev_filter),
DEFAULT_IMPL_OP(event_attr_is_visible),
};
@@ -812,26 +765,28 @@ static inline void arm_cspmu_set_event(struct arm_cspmu *cspmu,
}
static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc,
- u32 filter)
+ const struct perf_event *event)
{
- u32 offset = PMEVFILTR + (4 * hwc->idx);
+ u32 filter = event->attr.config1 & ARM_CSPMU_FILTER_MASK;
+ u32 filter2 = event->attr.config2 & ARM_CSPMU_FILTER_MASK;
+ u32 offset = 4 * event->hw.idx;
- writel(filter, cspmu->base0 + offset);
+ writel(filter, cspmu->base0 + PMEVFILTR + offset);
+ writel(filter2, cspmu->base0 + PMEVFILT2R + offset);
}
-static inline void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu, u32 filter)
+static void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
{
- u32 offset = PMCCFILTR;
+ u32 filter = event->attr.config1 & ARM_CSPMU_FILTER_MASK;
- writel(filter, cspmu->base0 + offset);
+ writel(filter, cspmu->base0 + PMCCFILTR);
}
static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
{
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- u32 filter;
/* We always reprogram the counter */
if (pmu_flags & PERF_EF_RELOAD)
@@ -839,13 +794,11 @@ static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
arm_cspmu_set_event_period(event);
- filter = cspmu->impl.ops.event_filter(event);
-
if (event->hw.extra_reg.idx == cspmu->cycle_counter_logical_idx) {
- arm_cspmu_set_cc_filter(cspmu, filter);
+ cspmu->impl.ops.set_cc_filter(cspmu, event);
} else {
arm_cspmu_set_event(cspmu, hwc);
- cspmu->impl.ops.set_ev_filter(cspmu, hwc, filter);
+ cspmu->impl.ops.set_ev_filter(cspmu, event);
}
hwc->state = 0;
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h b/drivers/perf/arm_cspmu/arm_cspmu.h
index 2621f3111148..19684b76bd96 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.h
+++ b/drivers/perf/arm_cspmu/arm_cspmu.h
@@ -47,6 +47,8 @@
/* Default filter format */
#define ARM_CSPMU_FORMAT_FILTER_ATTR \
ARM_CSPMU_FORMAT_ATTR(filter, "config1:0-31")
+#define ARM_CSPMU_FORMAT_FILTER2_ATTR \
+ ARM_CSPMU_FORMAT_ATTR(filter2, "config2:0-31")
/*
* This is the default event number for cycle count, if supported, since the
@@ -65,6 +67,53 @@
/* The cycle counter, if implemented, is located at counter[31]. */
#define ARM_CSPMU_CYCLE_CNTR_IDX 31
+/*
+ * CoreSight PMU Arch register offsets.
+ */
+#define PMEVCNTR_LO 0x0
+#define PMEVCNTR_HI 0x4
+#define PMEVTYPER 0x400
+#define PMCCFILTR 0x47C
+#define PMEVFILT2R 0x800
+#define PMEVFILTR 0xA00
+#define PMCNTENSET 0xC00
+#define PMCNTENCLR 0xC20
+#define PMINTENSET 0xC40
+#define PMINTENCLR 0xC60
+#define PMOVSCLR 0xC80
+#define PMOVSSET 0xCC0
+#define PMIMPDEF 0xD80
+#define PMCFGR 0xE00
+#define PMCR 0xE04
+#define PMIIDR 0xE08
+
+/* PMCFGR register field */
+#define PMCFGR_NCG GENMASK(31, 28)
+#define PMCFGR_HDBG BIT(24)
+#define PMCFGR_TRO BIT(23)
+#define PMCFGR_SS BIT(22)
+#define PMCFGR_FZO BIT(21)
+#define PMCFGR_MSI BIT(20)
+#define PMCFGR_UEN BIT(19)
+#define PMCFGR_NA BIT(17)
+#define PMCFGR_EX BIT(16)
+#define PMCFGR_CCD BIT(15)
+#define PMCFGR_CC BIT(14)
+#define PMCFGR_SIZE GENMASK(13, 8)
+#define PMCFGR_N GENMASK(7, 0)
+
+/* PMCR register field */
+#define PMCR_TRO BIT(11)
+#define PMCR_HDBG BIT(10)
+#define PMCR_FZO BIT(9)
+#define PMCR_NA BIT(8)
+#define PMCR_DP BIT(5)
+#define PMCR_X BIT(4)
+#define PMCR_D BIT(3)
+#define PMCR_C BIT(2)
+#define PMCR_P BIT(1)
+#define PMCR_E BIT(0)
+
/* PMIIDR register field */
#define ARM_CSPMU_PMIIDR_IMPLEMENTER GENMASK(11, 0)
#define ARM_CSPMU_PMIIDR_PRODUCTID GENMASK(31, 20)
@@ -103,11 +152,11 @@ struct arm_cspmu_impl_ops {
bool (*is_cycle_counter_event)(const struct perf_event *event);
/* Decode event type/id from configs */
u32 (*event_type)(const struct perf_event *event);
- /* Decode filter value from configs */
- u32 (*event_filter)(const struct perf_event *event);
- /* Set event filter */
+ /* Set event filters */
+ void (*set_cc_filter)(struct arm_cspmu *cspmu,
+ const struct perf_event *event);
void (*set_ev_filter)(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc, u32 filter);
+ const struct perf_event *event);
/* Implementation specific event validation */
int (*validate_event)(struct arm_cspmu *cspmu,
struct perf_event *event);
diff --git a/drivers/perf/arm_cspmu/nvidia_cspmu.c b/drivers/perf/arm_cspmu/nvidia_cspmu.c
index 8116c7846a46..dc6d4e3e2a1b 100644
--- a/drivers/perf/arm_cspmu/nvidia_cspmu.c
+++ b/drivers/perf/arm_cspmu/nvidia_cspmu.c
@@ -6,6 +6,7 @@
/* Support for NVIDIA specific attributes. */
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/topology.h>
@@ -183,6 +184,24 @@ static u32 nv_cspmu_event_filter(const struct perf_event *event)
return filter_val;
}
+static void nv_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
+{
+ u32 filter = nv_cspmu_event_filter(event);
+ u32 offset = PMEVFILTR + (4 * event->hw.idx);
+
+ writel(filter, cspmu->base0 + offset);
+}
+
+static void nv_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
+{
+ u32 filter = nv_cspmu_event_filter(event);
+
+ writel(filter, cspmu->base0 + PMCCFILTR);
+}
+
+
enum nv_cspmu_name_fmt {
NAME_FMT_GENERIC,
NAME_FMT_SOCKET
@@ -322,7 +341,8 @@ static int nv_cspmu_init_ops(struct arm_cspmu *cspmu)
cspmu->impl.ctx = ctx;
/* NVIDIA specific callbacks. */
- impl_ops->event_filter = nv_cspmu_event_filter;
+ impl_ops->set_cc_filter = nv_cspmu_set_cc_filter;
+ impl_ops->set_ev_filter = nv_cspmu_set_ev_filter;
impl_ops->get_event_attrs = nv_cspmu_get_event_attrs;
impl_ops->get_format_attrs = nv_cspmu_get_format_attrs;
impl_ops->get_name = nv_cspmu_get_name;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 398cce3d76fc..2f33e69a8caf 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -342,12 +342,10 @@ armpmu_add(struct perf_event *event, int flags)
if (idx < 0)
return idx;
- /*
- * If there is an event in the counter we are going to use then make
- * sure it is disabled.
- */
+ /* The newly-allocated counter should be empty */
+ WARN_ON_ONCE(hw_events->events[idx]);
+
event->hw.idx = idx;
- armpmu->disable(event);
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index 0e360feb3432..e506d59654e7 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -795,11 +795,6 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
static void armv8pmu_enable_event(struct perf_event *event)
{
- /*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
- armv8pmu_disable_event_counter(event);
armv8pmu_write_event_type(event);
armv8pmu_enable_event_irq(event);
armv8pmu_enable_event_counter(event);
@@ -825,10 +820,10 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
else
armv8pmu_disable_user_access();
+ kvm_vcpu_pmu_resync_el0();
+
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
-
- kvm_vcpu_pmu_resync_el0();
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
@@ -1369,6 +1364,7 @@ PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
PMUV3_INIT_SIMPLE(armv8_neoverse_v2)
PMUV3_INIT_SIMPLE(armv8_neoverse_v3)
PMUV3_INIT_SIMPLE(armv8_neoverse_v3ae)
+PMUV3_INIT_SIMPLE(armv8_rainier)
PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
@@ -1416,6 +1412,7 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,neoverse-v2-pmu", .data = armv8_neoverse_v2_pmu_init},
{.compatible = "arm,neoverse-v3-pmu", .data = armv8_neoverse_v3_pmu_init},
{.compatible = "arm,neoverse-v3ae-pmu", .data = armv8_neoverse_v3ae_pmu_init},
+ {.compatible = "arm,rainier-pmu", .data = armv8_rainier_pmu_init},
{.compatible = "cavium,thunder-pmu", .data = armv8_cavium_thunder_pmu_init},
{.compatible = "brcm,vulcan-pmu", .data = armv8_brcm_vulcan_pmu_init},
{.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index f5e6878db9d6..3efed8839a4e 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -50,7 +50,7 @@ static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_C
static void set_spe_event_has_cx(struct perf_event *event)
{
- if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && !perf_allow_kernel(&event->attr))
+ if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && !perf_allow_kernel())
event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
}
@@ -765,7 +765,7 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
set_spe_event_has_cx(event);
reg = arm_spe_event_to_pmscr(event);
if (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT))
- return perf_allow_kernel(&event->attr);
+ return perf_allow_kernel();
return 0;
}
diff --git a/drivers/perf/arm_v7_pmu.c b/drivers/perf/arm_v7_pmu.c
index 420cadd108e7..17831e1920bd 100644
--- a/drivers/perf/arm_v7_pmu.c
+++ b/drivers/perf/arm_v7_pmu.c
@@ -858,16 +858,6 @@ static void armv7pmu_enable_event(struct perf_event *event)
}
/*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
-
- /*
- * Disable counter
- */
- armv7_pmnc_disable_counter(idx);
-
- /*
* Set event (if destined for PMNx counters)
* We only need to set the event for the cycle counter if we
* have the ability to perform event filtering.
@@ -875,14 +865,7 @@ static void armv7pmu_enable_event(struct perf_event *event)
if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /*
- * Enable interrupt for this counter
- */
armv7_pmnc_enable_intens(idx);
-
- /*
- * Enable counter
- */
armv7_pmnc_enable_counter(idx);
}
@@ -898,18 +881,7 @@ static void armv7pmu_disable_event(struct perf_event *event)
return;
}
- /*
- * Disable counter and interrupt
- */
-
- /*
- * Disable counter
- */
armv7_pmnc_disable_counter(idx);
-
- /*
- * Disable interrupt for this counter
- */
armv7_pmnc_disable_intens(idx);
}
@@ -1477,14 +1449,6 @@ static void krait_pmu_enable_event(struct perf_event *event)
int idx = hwc->idx;
/*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
-
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
-
- /*
* Set event (if destined for PMNx counters)
* We set the event for the cycle counter because we
* have the ability to perform event filtering.
@@ -1494,10 +1458,7 @@ static void krait_pmu_enable_event(struct perf_event *event)
else
armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /* Enable interrupt for this counter */
armv7_pmnc_enable_intens(idx);
-
- /* Enable counter */
armv7_pmnc_enable_counter(idx);
}
@@ -1798,14 +1759,6 @@ static void scorpion_pmu_enable_event(struct perf_event *event)
int idx = hwc->idx;
/*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
-
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
-
- /*
* Set event (if destined for PMNx counters)
* We don't set the event for the cycle counter because we
* don't have the ability to perform event filtering.
@@ -1815,10 +1768,7 @@ static void scorpion_pmu_enable_event(struct perf_event *event)
else if (idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /* Enable interrupt for this counter */
armv7_pmnc_enable_intens(idx);
-
- /* Enable counter */
armv7_pmnc_enable_counter(idx);
}
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
index cccecae9823f..146ff57813fb 100644
--- a/drivers/perf/dwc_pcie_pmu.c
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/pcie-dwc.h>
#include <linux/perf_event.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -99,26 +100,6 @@ struct dwc_pcie_dev_info {
struct list_head dev_node;
};
-struct dwc_pcie_pmu_vsec_id {
- u16 vendor_id;
- u16 vsec_id;
- u8 vsec_rev;
-};
-
-/*
- * VSEC IDs are allocated by the vendor, so a given ID may mean different
- * things to different vendors. See PCIe r6.0, sec 7.9.5.2.
- */
-static const struct dwc_pcie_pmu_vsec_id dwc_pcie_pmu_vsec_ids[] = {
- { .vendor_id = PCI_VENDOR_ID_ALIBABA,
- .vsec_id = 0x02, .vsec_rev = 0x4 },
- { .vendor_id = PCI_VENDOR_ID_AMPERE,
- .vsec_id = 0x02, .vsec_rev = 0x4 },
- { .vendor_id = PCI_VENDOR_ID_QCOM,
- .vsec_id = 0x02, .vsec_rev = 0x4 },
- {} /* terminator */
-};
-
static ssize_t cpumask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -529,14 +510,14 @@ static void dwc_pcie_unregister_pmu(void *data)
static u16 dwc_pcie_des_cap(struct pci_dev *pdev)
{
- const struct dwc_pcie_pmu_vsec_id *vid;
+ const struct dwc_pcie_vsec_id *vid;
u16 vsec;
u32 val;
if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT))
return 0;
- for (vid = dwc_pcie_pmu_vsec_ids; vid->vendor_id; vid++) {
+ for (vid = dwc_pcie_rasdes_vsec_ids; vid->vendor_id; vid++) {
vsec = pci_find_vsec_capability(pdev, vid->vendor_id,
vid->vsec_id);
if (vsec) {
@@ -565,15 +546,15 @@ static int dwc_pcie_register_dev(struct pci_dev *pdev)
u32 sbdf;
sbdf = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn);
- plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", sbdf,
- pdev, sizeof(*pdev));
-
+ plat_dev = platform_device_register_simple("dwc_pcie_pmu", sbdf, NULL, 0);
if (IS_ERR(plat_dev))
return PTR_ERR(plat_dev);
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
- if (!dev_info)
+ if (!dev_info) {
+ platform_device_unregister(plat_dev);
return -ENOMEM;
+ }
/* Cache platform device to handle pci device hotplug */
dev_info->plat_dev = plat_dev;
@@ -614,18 +595,26 @@ static struct notifier_block dwc_pcie_pmu_nb = {
static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
{
- struct pci_dev *pdev = plat_dev->dev.platform_data;
+ struct pci_dev *pdev;
struct dwc_pcie_pmu *pcie_pmu;
char *name;
u32 sbdf;
u16 vsec;
int ret;
+ sbdf = plat_dev->id;
+ pdev = pci_get_domain_bus_and_slot(sbdf >> 16, PCI_BUS_NUM(sbdf & 0xffff),
+ sbdf & 0xff);
+ if (!pdev) {
+ pr_err("No pdev found for the sbdf 0x%x\n", sbdf);
+ return -ENODEV;
+ }
+
vsec = dwc_pcie_des_cap(pdev);
if (!vsec)
return -ENODEV;
- sbdf = plat_dev->id;
+ pci_dev_put(pdev);
name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf);
if (!name)
return -ENOMEM;
@@ -640,7 +629,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
pcie_pmu->on_cpu = -1;
pcie_pmu->pmu = (struct pmu){
.name = name,
- .parent = &pdev->dev,
+ .parent = &plat_dev->dev,
.module = THIS_MODULE,
.attr_groups = dwc_pcie_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
@@ -730,6 +719,15 @@ static struct platform_driver dwc_pcie_pmu_driver = {
.driver = {.name = "dwc_pcie_pmu",},
};
+static void dwc_pcie_cleanup_devices(void)
+{
+ struct dwc_pcie_dev_info *dev_info, *tmp;
+
+ list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) {
+ dwc_pcie_unregister_dev(dev_info);
+ }
+}
+
static int __init dwc_pcie_pmu_init(void)
{
struct pci_dev *pdev = NULL;
@@ -742,7 +740,7 @@ static int __init dwc_pcie_pmu_init(void)
ret = dwc_pcie_register_dev(pdev);
if (ret) {
pci_dev_put(pdev);
- return ret;
+ goto err_cleanup;
}
}
@@ -751,35 +749,35 @@ static int __init dwc_pcie_pmu_init(void)
dwc_pcie_pmu_online_cpu,
dwc_pcie_pmu_offline_cpu);
if (ret < 0)
- return ret;
+ goto err_cleanup;
dwc_pcie_pmu_hp_state = ret;
ret = platform_driver_register(&dwc_pcie_pmu_driver);
if (ret)
- goto platform_driver_register_err;
+ goto err_remove_cpuhp;
ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
if (ret)
- goto platform_driver_register_err;
+ goto err_unregister_driver;
notify = true;
return 0;
-platform_driver_register_err:
+err_unregister_driver:
+ platform_driver_unregister(&dwc_pcie_pmu_driver);
+err_remove_cpuhp:
cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
-
+err_cleanup:
+ dwc_pcie_cleanup_devices();
return ret;
}
static void __exit dwc_pcie_pmu_exit(void)
{
- struct dwc_pcie_dev_info *dev_info, *tmp;
-
if (notify)
bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
- list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node)
- dwc_pcie_unregister_dev(dev_info);
+ dwc_pcie_cleanup_devices();
platform_driver_unregister(&dwc_pcie_pmu_driver);
cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
}
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 039feded9152..72ac17efd846 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -1064,8 +1064,8 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
+ hrtimer_setup(&ddr_pmu->hrtimer, cn10k_ddr_pmu_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
cpuhp_state_add_instance_nocalls(
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index cadd60221b8f..6ed4707bd6bb 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -752,9 +752,8 @@ static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
tx2_pmu->cpu = cpu;
if (tx2_pmu->hrtimer_callback) {
- hrtimer_init(&tx2_pmu->hrtimer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback;
+ hrtimer_setup(&tx2_pmu->hrtimer, tx2_pmu->hrtimer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ret = tx2_uncore_pmu_register(tx2_pmu);
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 152344e4f7e4..fd0e0cd1c1cf 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -177,9 +177,7 @@ static void phy_mdm6600_cmd(struct phy_mdm6600 *ddata, int val)
values[0] = val;
- gpiod_set_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES,
- ddata->cmd_gpios->desc,
- ddata->cmd_gpios->info, values);
+ gpiod_multi_set_value_cansleep(ddata->cmd_gpios, values);
}
/**
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 95a8e2b9a614..464cc9aca157 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -49,6 +49,20 @@ config PINCTRL_AMD
Requires ACPI/FDT device enumeration code to set up a platform
device.
+config PINCTRL_AMDISP
+ tristate "AMDISP GPIO pin control"
+ depends on DRM_AMD_ISP || COMPILE_TEST
+ depends on HAS_IOMEM
+ select GPIOLIB
+ select PINCONF
+ select GENERIC_PINCONF
+ help
+ The driver for memory mapped GPIO functionality on AMD platforms
+ with ISP support. All the pins are output controlled only
+
+ Requires AMDGPU to MFD add device for enumeration to set up as
+ platform device.
+
config PINCTRL_APPLE_GPIO
tristate "Apple SoC GPIO pin controller driver"
depends on ARCH_APPLE
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index fba1c56624c0..ac27e88677d1 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o
obj-$(CONFIG_OF) += devicetree.o
obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o
+obj-$(CONFIG_PINCTRL_AMDISP) += pinctrl-amdisp.o
obj-$(CONFIG_PINCTRL_APPLE_GPIO) += pinctrl-apple-gpio.o
obj-$(CONFIG_PINCTRL_ARTPEC6) += pinctrl-artpec6.o
obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index 73dbf29c002f..9ea20fde3a24 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -52,6 +52,12 @@
#define BCM281XX_HDMI_PIN_REG_MODE_MASK 0x0010
#define BCM281XX_HDMI_PIN_REG_MODE_SHIFT 4
+/* BCM21664 access lock registers */
+#define BCM21664_WR_ACCESS_OFFSET 0x07F0
+#define BCM21664_WR_ACCESS_PASSWORD 0xA5A501
+#define BCM21664_ACCESS_LOCK_OFFSET(lock) (0x0780 + (lock * 4))
+#define BCM21664_ACCESS_LOCK_COUNT 5
+
/*
* bcm281xx_pin_type - types of pin register
*/
@@ -72,24 +78,45 @@ static enum bcm281xx_pin_type hdmi_pin = BCM281XX_PIN_TYPE_HDMI;
struct bcm281xx_pin_function {
const char *name;
const char * const *groups;
- const unsigned ngroups;
+ const unsigned int ngroups;
};
/*
- * bcm281xx_pinctrl_data - Broadcom-specific pinctrl data
- * @reg_base - base of pinctrl registers
+ * Device types (used in bcm281xx_pinctrl_desc to differentiate
+ * the two device types from each other)
*/
-struct bcm281xx_pinctrl_data {
- void __iomem *reg_base;
+enum bcm281xx_pinctrl_type {
+ BCM281XX_PINCTRL_TYPE,
+ BCM21664_PINCTRL_TYPE,
+};
+
+/*
+ * bcm281xx_pinctrl_info - description of a pinctrl device supported
+ * by this driver, intended to be used as a provider of OF match data.
+ */
+struct bcm281xx_pinctrl_info {
+ enum bcm281xx_pinctrl_type device_type;
/* List of all pins */
const struct pinctrl_pin_desc *pins;
- const unsigned npins;
+ unsigned int npins;
const struct bcm281xx_pin_function *functions;
- const unsigned nfunctions;
+ unsigned int nfunctions;
+
+ const struct regmap_config *regmap_config;
+};
+
+/*
+ * bcm281xx_pinctrl_data - Broadcom-specific pinctrl data
+ * @reg_base - base of pinctrl registers
+ */
+struct bcm281xx_pinctrl_data {
+ struct device *dev;
+ void __iomem *reg_base;
struct regmap *regmap;
+ const struct bcm281xx_pinctrl_info *info;
};
/*
@@ -933,22 +960,598 @@ static const struct bcm281xx_pin_function bcm281xx_functions[] = {
BCM281XX_PIN_FUNCTION(alt4),
};
-static struct bcm281xx_pinctrl_data bcm281xx_pinctrl = {
+static const struct regmap_config bcm281xx_pinctrl_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = BCM281XX_PIN_VC_CAM3_SDA * 4,
+};
+
+static const struct bcm281xx_pinctrl_info bcm281xx_pinctrl = {
+ .device_type = BCM281XX_PINCTRL_TYPE,
+
.pins = bcm281xx_pinctrl_pins,
.npins = ARRAY_SIZE(bcm281xx_pinctrl_pins),
.functions = bcm281xx_functions,
.nfunctions = ARRAY_SIZE(bcm281xx_functions),
+
+ .regmap_config = &bcm281xx_pinctrl_regmap_config,
+};
+
+/* BCM21664 data */
+#define BCM21664_PIN_ADCSYN 0
+#define BCM21664_PIN_BATRM 1
+#define BCM21664_PIN_BSC1CLK 2
+#define BCM21664_PIN_BSC1DAT 3
+#define BCM21664_PIN_CAMCS0 4
+#define BCM21664_PIN_CAMCS1 5
+#define BCM21664_PIN_CLK32K 6
+#define BCM21664_PIN_CLK_CX8 7
+#define BCM21664_PIN_DCLK1 8
+#define BCM21664_PIN_DCLK4 9
+#define BCM21664_PIN_DCLKREQ1 10
+#define BCM21664_PIN_DCLKREQ4 11
+#define BCM21664_PIN_DMIC0CLK 12
+#define BCM21664_PIN_DMIC0DQ 13
+#define BCM21664_PIN_DSI0TE 14
+#define BCM21664_PIN_GPIO00 15
+#define BCM21664_PIN_GPIO01 16
+#define BCM21664_PIN_GPIO02 17
+#define BCM21664_PIN_GPIO03 18
+#define BCM21664_PIN_GPIO04 19
+#define BCM21664_PIN_GPIO05 20
+#define BCM21664_PIN_GPIO06 21
+#define BCM21664_PIN_GPIO07 22
+#define BCM21664_PIN_GPIO08 23
+#define BCM21664_PIN_GPIO09 24
+#define BCM21664_PIN_GPIO10 25
+#define BCM21664_PIN_GPIO11 26
+#define BCM21664_PIN_GPIO12 27
+#define BCM21664_PIN_GPIO13 28
+#define BCM21664_PIN_GPIO14 29
+#define BCM21664_PIN_GPIO15 30
+#define BCM21664_PIN_GPIO16 31
+#define BCM21664_PIN_GPIO17 32
+#define BCM21664_PIN_GPIO18 33
+#define BCM21664_PIN_GPIO19 34
+#define BCM21664_PIN_GPIO20 35
+#define BCM21664_PIN_GPIO21 36
+#define BCM21664_PIN_GPIO22 37
+#define BCM21664_PIN_GPIO23 38
+#define BCM21664_PIN_GPIO24 39
+#define BCM21664_PIN_GPIO25 40
+#define BCM21664_PIN_GPIO26 41
+#define BCM21664_PIN_GPIO27 42
+#define BCM21664_PIN_GPIO28 43
+#define BCM21664_PIN_GPIO32 44
+#define BCM21664_PIN_GPIO33 45
+#define BCM21664_PIN_GPIO34 46
+#define BCM21664_PIN_GPS_CALREQ 47
+#define BCM21664_PIN_GPS_HOSTREQ 48
+#define BCM21664_PIN_GPS_PABLANK 49
+#define BCM21664_PIN_GPS_TMARK 50
+#define BCM21664_PIN_ICUSBDM 51
+#define BCM21664_PIN_ICUSBDP 52
+#define BCM21664_PIN_LCDCS0 53
+#define BCM21664_PIN_LCDRES 54
+#define BCM21664_PIN_LCDSCL 55
+#define BCM21664_PIN_LCDSDA 56
+#define BCM21664_PIN_LCDTE 57
+#define BCM21664_PIN_MDMGPIO00 58
+#define BCM21664_PIN_MDMGPIO01 59
+#define BCM21664_PIN_MDMGPIO02 60
+#define BCM21664_PIN_MDMGPIO03 61
+#define BCM21664_PIN_MDMGPIO04 62
+#define BCM21664_PIN_MDMGPIO05 63
+#define BCM21664_PIN_MDMGPIO06 64
+#define BCM21664_PIN_MDMGPIO07 65
+#define BCM21664_PIN_MDMGPIO08 66
+#define BCM21664_PIN_MMC0CK 67
+#define BCM21664_PIN_MMC0CMD 68
+#define BCM21664_PIN_MMC0DAT0 69
+#define BCM21664_PIN_MMC0DAT1 70
+#define BCM21664_PIN_MMC0DAT2 71
+#define BCM21664_PIN_MMC0DAT3 72
+#define BCM21664_PIN_MMC0DAT4 73
+#define BCM21664_PIN_MMC0DAT5 74
+#define BCM21664_PIN_MMC0DAT6 75
+#define BCM21664_PIN_MMC0DAT7 76
+#define BCM21664_PIN_MMC0RST 77
+#define BCM21664_PIN_MMC1CK 78
+#define BCM21664_PIN_MMC1CMD 79
+#define BCM21664_PIN_MMC1DAT0 80
+#define BCM21664_PIN_MMC1DAT1 81
+#define BCM21664_PIN_MMC1DAT2 82
+#define BCM21664_PIN_MMC1DAT3 83
+#define BCM21664_PIN_MMC1DAT4 84
+#define BCM21664_PIN_MMC1DAT5 85
+#define BCM21664_PIN_MMC1DAT6 86
+#define BCM21664_PIN_MMC1DAT7 87
+#define BCM21664_PIN_MMC1RST 88
+#define BCM21664_PIN_PC1 89
+#define BCM21664_PIN_PC2 90
+#define BCM21664_PIN_PMBSCCLK 91
+#define BCM21664_PIN_PMBSCDAT 92
+#define BCM21664_PIN_PMUINT 93
+#define BCM21664_PIN_RESETN 94
+#define BCM21664_PIN_RFST2G_MTSLOTEN3G 95
+#define BCM21664_PIN_RTXDATA2G_TXDATA3G1 96
+#define BCM21664_PIN_RTXEN2G_TXDATA3G2 97
+#define BCM21664_PIN_RXDATA3G0 98
+#define BCM21664_PIN_RXDATA3G1 99
+#define BCM21664_PIN_RXDATA3G2 100
+#define BCM21664_PIN_SDCK 101
+#define BCM21664_PIN_SDCMD 102
+#define BCM21664_PIN_SDDAT0 103
+#define BCM21664_PIN_SDDAT1 104
+#define BCM21664_PIN_SDDAT2 105
+#define BCM21664_PIN_SDDAT3 106
+#define BCM21664_PIN_SIMCLK 107
+#define BCM21664_PIN_SIMDAT 108
+#define BCM21664_PIN_SIMDET 109
+#define BCM21664_PIN_SIMRST 110
+#define BCM21664_PIN_GPIO93 111
+#define BCM21664_PIN_GPIO94 112
+#define BCM21664_PIN_SPI0CLK 113
+#define BCM21664_PIN_SPI0FSS 114
+#define BCM21664_PIN_SPI0RXD 115
+#define BCM21664_PIN_SPI0TXD 116
+#define BCM21664_PIN_SRI_C 117
+#define BCM21664_PIN_SRI_D 118
+#define BCM21664_PIN_SRI_E 119
+#define BCM21664_PIN_SSPCK 120
+#define BCM21664_PIN_SSPDI 121
+#define BCM21664_PIN_SSPDO 122
+#define BCM21664_PIN_SSPSYN 123
+#define BCM21664_PIN_STAT1 124
+#define BCM21664_PIN_STAT2 125
+#define BCM21664_PIN_SWCLKTCK 126
+#define BCM21664_PIN_SWDIOTMS 127
+#define BCM21664_PIN_SYSCLKEN 128
+#define BCM21664_PIN_TDI 129
+#define BCM21664_PIN_TDO 130
+#define BCM21664_PIN_TESTMODE 131
+#define BCM21664_PIN_TRACECLK 132
+#define BCM21664_PIN_TRACEDT00 133
+#define BCM21664_PIN_TRACEDT01 134
+#define BCM21664_PIN_TRACEDT02 135
+#define BCM21664_PIN_TRACEDT03 136
+#define BCM21664_PIN_TRACEDT04 137
+#define BCM21664_PIN_TRACEDT05 138
+#define BCM21664_PIN_TRACEDT06 139
+#define BCM21664_PIN_TRACEDT07 140
+#define BCM21664_PIN_TRSTB 141
+#define BCM21664_PIN_TXDATA3G0 142
+#define BCM21664_PIN_UBCTSN 143
+#define BCM21664_PIN_UBRTSN 144
+#define BCM21664_PIN_UBRX 145
+#define BCM21664_PIN_UBTX 146
+#define BCM21664_PIN_TRACEDT08 147
+#define BCM21664_PIN_TRACEDT09 148
+#define BCM21664_PIN_TRACEDT10 149
+#define BCM21664_PIN_TRACEDT11 150
+#define BCM21664_PIN_TRACEDT12 151
+#define BCM21664_PIN_TRACEDT13 152
+#define BCM21664_PIN_TRACEDT14 153
+#define BCM21664_PIN_TRACEDT15 154
+
+static const struct pinctrl_pin_desc bcm21664_pinctrl_pins[] = {
+ BCM281XX_PIN_DESC(BCM21664_PIN_ADCSYN, "adcsyn", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_BATRM, "batrm", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_BSC1CLK, "bsc1clk", i2c),
+ BCM281XX_PIN_DESC(BCM21664_PIN_BSC1DAT, "bsc1dat", i2c),
+ BCM281XX_PIN_DESC(BCM21664_PIN_CAMCS0, "camcs0", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_CAMCS1, "camcs1", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_CLK32K, "clk32k", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_CLK_CX8, "clk_cx8", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_DCLK1, "dclk1", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_DCLK4, "dclk4", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_DCLKREQ1, "dclkreq1", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_DCLKREQ4, "dclkreq4", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_DMIC0CLK, "dmic0clk", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_DMIC0DQ, "dmic0dq", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_DSI0TE, "dsi0te", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO00, "gpio00", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO01, "gpio01", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO02, "gpio02", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO03, "gpio03", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO04, "gpio04", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO05, "gpio05", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO06, "gpio06", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO07, "gpio07", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO08, "gpio08", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO09, "gpio09", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO10, "gpio10", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO11, "gpio11", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO12, "gpio12", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO13, "gpio13", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO14, "gpio14", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO15, "gpio15", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO16, "gpio16", i2c),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO17, "gpio17", i2c),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO18, "gpio18", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO19, "gpio19", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO20, "gpio20", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO21, "gpio21", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO22, "gpio22", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO23, "gpio23", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO24, "gpio24", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO25, "gpio25", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO26, "gpio26", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO27, "gpio27", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO28, "gpio28", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO32, "gpio32", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO33, "gpio33", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO34, "gpio34", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPS_CALREQ, "gps_calreq", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPS_HOSTREQ, "gps_hostreq", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPS_PABLANK, "gps_pablank", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPS_TMARK, "gps_tmark", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_ICUSBDM, "icusbdm", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_ICUSBDP, "icusbdp", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_LCDCS0, "lcdcs0", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_LCDRES, "lcdres", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_LCDSCL, "lcdscl", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_LCDSDA, "lcdsda", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_LCDTE, "lcdte", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MDMGPIO00, "mdmgpio00", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MDMGPIO01, "mdmgpio01", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MDMGPIO02, "mdmgpio02", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MDMGPIO03, "mdmgpio03", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MDMGPIO04, "mdmgpio04", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MDMGPIO05, "mdmgpio05", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MDMGPIO06, "mdmgpio06", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MDMGPIO07, "mdmgpio07", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MDMGPIO08, "mdmgpio08", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC0CK, "mmc0ck", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC0CMD, "mmc0cmd", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC0DAT0, "mmc0dat0", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC0DAT1, "mmc0dat1", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC0DAT2, "mmc0dat2", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC0DAT3, "mmc0dat3", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC0DAT4, "mmc0dat4", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC0DAT5, "mmc0dat5", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC0DAT6, "mmc0dat6", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC0DAT7, "mmc0dat7", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC0RST, "mmc0rst", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC1CK, "mmc1ck", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC1CMD, "mmc1cmd", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC1DAT0, "mmc1dat0", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC1DAT1, "mmc1dat1", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC1DAT2, "mmc1dat2", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC1DAT3, "mmc1dat3", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC1DAT4, "mmc1dat4", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC1DAT5, "mmc1dat5", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC1DAT6, "mmc1dat6", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC1DAT7, "mmc1dat7", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_MMC1RST, "mmc1rst", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_PC1, "pc1", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_PC2, "pc2", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_PMBSCCLK, "pmbscclk", i2c),
+ BCM281XX_PIN_DESC(BCM21664_PIN_PMBSCDAT, "pmbscdat", i2c),
+ BCM281XX_PIN_DESC(BCM21664_PIN_PMUINT, "pmuint", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_RESETN, "resetn", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_RFST2G_MTSLOTEN3G, "rfst2g_mtsloten3g", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_RTXDATA2G_TXDATA3G1, "rtxdata2g_txdata3g1", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_RTXEN2G_TXDATA3G2, "rtxen2g_txdata3g2", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_RXDATA3G0, "rxdata3g0", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_RXDATA3G1, "rxdata3g1", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_RXDATA3G2, "rxdata3g2", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SDCK, "sdck", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SDCMD, "sdcmd", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SDDAT0, "sddat0", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SDDAT1, "sddat1", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SDDAT2, "sddat2", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SDDAT3, "sddat3", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SIMCLK, "simclk", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SIMDAT, "simdat", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SIMDET, "simdet", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SIMRST, "simrst", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO93, "gpio93", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_GPIO94, "gpio94", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SPI0CLK, "spi0clk", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SPI0FSS, "spi0fss", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SPI0RXD, "spi0rxd", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SPI0TXD, "spi0txd", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SRI_C, "sri_c", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SRI_D, "sri_d", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SRI_E, "sri_e", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SSPCK, "sspck", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SSPDI, "sspdi", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SSPDO, "sspdo", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SSPSYN, "sspsyn", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_STAT1, "stat1", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_STAT2, "stat2", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SWCLKTCK, "swclktck", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SWDIOTMS, "swdiotms", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_SYSCLKEN, "sysclken", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TDI, "tdi", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TDO, "tdo", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TESTMODE, "testmode", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACECLK, "traceclk", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT00, "tracedt00", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT01, "tracedt01", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT02, "tracedt02", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT03, "tracedt03", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT04, "tracedt04", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT05, "tracedt05", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT06, "tracedt06", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT07, "tracedt07", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRSTB, "trstb", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TXDATA3G0, "txdata3g0", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_UBCTSN, "ubctsn", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_UBRTSN, "ubrtsn", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_UBRX, "ubrx", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_UBTX, "ubtx", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT08, "tracedt08", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT09, "tracedt09", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT10, "tracedt10", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT11, "tracedt11", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT12, "tracedt12", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT13, "tracedt13", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT14, "tracedt14", std),
+ BCM281XX_PIN_DESC(BCM21664_PIN_TRACEDT15, "tracedt15", std),
};
+static const char * const bcm21664_alt_groups[] = {
+ "adcsyn",
+ "batrm",
+ "bsc1clk",
+ "bsc1dat",
+ "camcs0",
+ "camcs1",
+ "clk32k",
+ "clk_cx8",
+ "dclk1",
+ "dclk4",
+ "dclkreq1",
+ "dclkreq4",
+ "dmic0clk",
+ "dmic0dq",
+ "dsi0te",
+ "gpio00",
+ "gpio01",
+ "gpio02",
+ "gpio03",
+ "gpio04",
+ "gpio05",
+ "gpio06",
+ "gpio07",
+ "gpio08",
+ "gpio09",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio13",
+ "gpio14",
+ "gpio15",
+ "gpio16",
+ "gpio17",
+ "gpio18",
+ "gpio19",
+ "gpio20",
+ "gpio21",
+ "gpio22",
+ "gpio23",
+ "gpio24",
+ "gpio25",
+ "gpio26",
+ "gpio27",
+ "gpio28",
+ "gpio32",
+ "gpio33",
+ "gpio34",
+ "gps_calreq",
+ "gps_hostreq",
+ "gps_pablank",
+ "gps_tmark",
+ "icusbdm",
+ "icusbdp",
+ "lcdcs0",
+ "lcdres",
+ "lcdscl",
+ "lcdsda",
+ "lcdte",
+ "mdmgpio00",
+ "mdmgpio01",
+ "mdmgpio02",
+ "mdmgpio03",
+ "mdmgpio04",
+ "mdmgpio05",
+ "mdmgpio06",
+ "mdmgpio07",
+ "mdmgpio08",
+ "mmc0ck",
+ "mmc0cmd",
+ "mmc0dat0",
+ "mmc0dat1",
+ "mmc0dat2",
+ "mmc0dat3",
+ "mmc0dat4",
+ "mmc0dat5",
+ "mmc0dat6",
+ "mmc0dat7",
+ "mmc0rst",
+ "mmc1ck",
+ "mmc1cmd",
+ "mmc1dat0",
+ "mmc1dat1",
+ "mmc1dat2",
+ "mmc1dat3",
+ "mmc1dat4",
+ "mmc1dat5",
+ "mmc1dat6",
+ "mmc1dat7",
+ "mmc1rst",
+ "pc1",
+ "pc2",
+ "pmbscclk",
+ "pmbscdat",
+ "pmuint",
+ "resetn",
+ "rfst2g_mtsloten3g",
+ "rtxdata2g_txdata3g1",
+ "rtxen2g_txdata3g2",
+ "rxdata3g0",
+ "rxdata3g1",
+ "rxdata3g2",
+ "sdck",
+ "sdcmd",
+ "sddat0",
+ "sddat1",
+ "sddat2",
+ "sddat3",
+ "simclk",
+ "simdat",
+ "simdet",
+ "simrst",
+ "gpio93",
+ "gpio94",
+ "spi0clk",
+ "spi0fss",
+ "spi0rxd",
+ "spi0txd",
+ "sri_c",
+ "sri_d",
+ "sri_e",
+ "sspck",
+ "sspdi",
+ "sspdo",
+ "sspsyn",
+ "stat1",
+ "stat2",
+ "swclktck",
+ "swdiotms",
+ "sysclken",
+ "tdi",
+ "tdo",
+ "testmode",
+ "traceclk",
+ "tracedt00",
+ "tracedt01",
+ "tracedt02",
+ "tracedt03",
+ "tracedt04",
+ "tracedt05",
+ "tracedt06",
+ "tracedt07",
+ "trstb",
+ "txdata3g0",
+ "ubctsn",
+ "ubrtsn",
+ "ubrx",
+ "ubtx",
+ "tracedt08",
+ "tracedt09",
+ "tracedt10",
+ "tracedt11",
+ "tracedt12",
+ "tracedt13",
+ "tracedt14",
+ "tracedt15",
+};
+
+#define BCM21664_PIN_FUNCTION(fcn_name) \
+{ \
+ .name = #fcn_name, \
+ .groups = bcm21664_alt_groups, \
+ .ngroups = ARRAY_SIZE(bcm21664_alt_groups), \
+}
+
+static const struct bcm281xx_pin_function bcm21664_functions[] = {
+ BCM21664_PIN_FUNCTION(alt1),
+ BCM21664_PIN_FUNCTION(alt2),
+ BCM21664_PIN_FUNCTION(alt3),
+ BCM21664_PIN_FUNCTION(alt4),
+ BCM21664_PIN_FUNCTION(alt5),
+ BCM21664_PIN_FUNCTION(alt6),
+};
+
+static const struct regmap_config bcm21664_pinctrl_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = BCM21664_WR_ACCESS_OFFSET,
+};
+
+static const struct bcm281xx_pinctrl_info bcm21664_pinctrl = {
+ .device_type = BCM21664_PINCTRL_TYPE,
+
+ .pins = bcm21664_pinctrl_pins,
+ .npins = ARRAY_SIZE(bcm21664_pinctrl_pins),
+ .functions = bcm21664_functions,
+ .nfunctions = ARRAY_SIZE(bcm21664_functions),
+
+ .regmap_config = &bcm21664_pinctrl_regmap_config,
+};
+
+/* BCM21664 pinctrl access lock handlers */
+static int bcm21664_pinctrl_lock_all(struct bcm281xx_pinctrl_data *pdata)
+{
+ int i, rc;
+
+ for (i = 0; i < BCM21664_ACCESS_LOCK_COUNT; i++) {
+ rc = regmap_write(pdata->regmap, BCM21664_WR_ACCESS_OFFSET,
+ BCM21664_WR_ACCESS_PASSWORD);
+ if (rc) {
+ dev_err(pdata->dev, "Failed to enable write access: %d\n",
+ rc);
+ return rc;
+ }
+ rc = regmap_write(pdata->regmap, BCM21664_ACCESS_LOCK_OFFSET(i),
+ 0xffffffff);
+ if (rc) {
+ dev_err(pdata->dev, "Failed to write access lock: %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int bcm21664_pinctrl_set_pin_lock(struct bcm281xx_pinctrl_data *pdata,
+ unsigned int pin, bool lock)
+{
+ unsigned int access_lock = pin / 32;
+ int rc;
+
+ dev_dbg(pdata->dev,
+ "%s(): %s pin %s (%d)\n",
+ __func__, lock ? "Lock" : "Unlock", pdata->info->pins[pin].name,
+ pin);
+
+ rc = regmap_write(pdata->regmap, BCM21664_WR_ACCESS_OFFSET,
+ BCM21664_WR_ACCESS_PASSWORD);
+ if (rc) {
+ dev_err(pdata->dev, "Failed to enable write access: %d\n",
+ rc);
+ return rc;
+ }
+
+ rc = regmap_update_bits(pdata->regmap,
+ BCM21664_ACCESS_LOCK_OFFSET(access_lock),
+ BIT(pin % 32),
+ (int)lock << (pin % 32));
+
+ if (rc) {
+ dev_err(pdata->dev, "Failed to %s pin: %d\n",
+ lock ? "lock" : "unlock", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
static inline enum bcm281xx_pin_type pin_type_get(struct pinctrl_dev *pctldev,
- unsigned pin)
+ unsigned int pin)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
- if (pin >= pdata->npins)
+ if (pin >= pdata->info->npins)
return BCM281XX_PIN_TYPE_UNKNOWN;
- return *(enum bcm281xx_pin_type *)(pdata->pins[pin].drv_data);
+ return *(enum bcm281xx_pin_type *)(pdata->info->pins[pin].drv_data);
}
#define BCM281XX_PIN_SHIFT(type, param) \
@@ -970,36 +1573,29 @@ static inline void bcm281xx_pin_update(u32 *reg_val, u32 *reg_mask,
*reg_mask |= param_mask;
}
-static const struct regmap_config bcm281xx_pinctrl_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .max_register = BCM281XX_PIN_VC_CAM3_SDA,
-};
-
static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
- return pdata->npins;
+ return pdata->info->npins;
}
static const char *bcm281xx_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
- unsigned group)
+ unsigned int group)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
- return pdata->pins[group].name;
+ return pdata->info->pins[group].name;
}
static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
- unsigned group,
+ unsigned int group,
const unsigned **pins,
- unsigned *num_pins)
+ unsigned int *num_pins)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
- *pins = &pdata->pins[group].number;
+ *pins = &pdata->info->pins[group].number;
*num_pins = 1;
return 0;
@@ -1007,7 +1603,7 @@ static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
static void bcm281xx_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s,
- unsigned offset)
+ unsigned int offset)
{
seq_printf(s, " %s", dev_name(pctldev->dev));
}
@@ -1025,43 +1621,53 @@ static int bcm281xx_pinctrl_get_fcns_count(struct pinctrl_dev *pctldev)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
- return pdata->nfunctions;
+ return pdata->info->nfunctions;
}
static const char *bcm281xx_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev,
- unsigned function)
+ unsigned int function)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
- return pdata->functions[function].name;
+ return pdata->info->functions[function].name;
}
static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev,
- unsigned function,
+ unsigned int function,
const char * const **groups,
- unsigned * const num_groups)
+ unsigned int * const num_groups)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
- *groups = pdata->functions[function].groups;
- *num_groups = pdata->functions[function].ngroups;
+ *groups = pdata->info->functions[function].groups;
+ *num_groups = pdata->info->functions[function].ngroups;
return 0;
}
static int bcm281xx_pinmux_set(struct pinctrl_dev *pctldev,
- unsigned function,
- unsigned group)
+ unsigned int function,
+ unsigned int group)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
- const struct bcm281xx_pin_function *f = &pdata->functions[function];
- u32 offset = 4 * pdata->pins[group].number;
+ const struct bcm281xx_pin_function *f = &pdata->info->functions[function];
+ enum bcm281xx_pinctrl_type device_type = pdata->info->device_type;
+ unsigned int pin = pdata->info->pins[group].number;
+ u32 offset = 4 * pin;
int rc = 0;
dev_dbg(pctldev->dev,
"%s(): Enable function %s (%d) of pin %s (%d) @offset 0x%x.\n",
- __func__, f->name, function, pdata->pins[group].name,
- pdata->pins[group].number, offset);
+ __func__, f->name, function, pdata->info->pins[group].name,
+ pin, offset);
+
+ if (device_type == BCM21664_PINCTRL_TYPE) {
+ rc = bcm21664_pinctrl_set_pin_lock(pdata, pin, false);
+ if (rc) {
+ /* Error is printed in bcm21664_pinctrl_set_pin_lock */
+ return rc;
+ }
+ }
rc = regmap_update_bits(pdata->regmap, offset,
BCM281XX_PIN_REG_F_SEL_MASK,
@@ -1069,7 +1675,15 @@ static int bcm281xx_pinmux_set(struct pinctrl_dev *pctldev,
if (rc)
dev_err(pctldev->dev,
"Error updating register for pin %s (%d).\n",
- pdata->pins[group].name, pdata->pins[group].number);
+ pdata->info->pins[group].name, pin);
+
+ if (device_type == BCM21664_PINCTRL_TYPE) {
+ rc = bcm21664_pinctrl_set_pin_lock(pdata, pin, true);
+ if (rc) {
+ /* Error is printed in bcm21664_pinctrl_set_pin_lock */
+ return rc;
+ }
+ }
return rc;
}
@@ -1082,7 +1696,7 @@ static const struct pinmux_ops bcm281xx_pinctrl_pinmux_ops = {
};
static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *config)
{
return -ENOTSUPP;
@@ -1091,9 +1705,9 @@ static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev,
/* Goes through the configs and update register val/mask */
static int bcm281xx_std_pin_update(struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *configs,
- unsigned num_configs,
+ unsigned int num_configs,
u32 *val,
u32 *mask)
{
@@ -1168,7 +1782,7 @@ static int bcm281xx_std_pin_update(struct pinctrl_dev *pctldev,
"Invalid Drive Strength value (%d) for "
"pin %s (%d). Valid values are "
"(2..16) mA, even numbers only.\n",
- arg, pdata->pins[pin].name, pin);
+ arg, pdata->info->pins[pin].name, pin);
return -EINVAL;
}
bcm281xx_pin_update(val, mask, (arg/2)-1,
@@ -1179,7 +1793,7 @@ static int bcm281xx_std_pin_update(struct pinctrl_dev *pctldev,
default:
dev_err(pctldev->dev,
"Unrecognized pin config %d for pin %s (%d).\n",
- param, pdata->pins[pin].name, pin);
+ param, pdata->info->pins[pin].name, pin);
return -EINVAL;
} /* switch config */
@@ -1207,9 +1821,9 @@ static const u16 bcm281xx_pullup_map[] = {
/* Goes through the configs and update register val/mask */
static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *configs,
- unsigned num_configs,
+ unsigned int num_configs,
u32 *val,
u32 *mask)
{
@@ -1233,7 +1847,7 @@ static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev,
"Invalid pull-up value (%d) for pin %s "
"(%d). Valid values are 568, 720, 831, "
"1080, 1200, 1800, 2700 Ohms.\n",
- arg, pdata->pins[pin].name, pin);
+ arg, pdata->info->pins[pin].name, pin);
return -EINVAL;
}
@@ -1266,7 +1880,69 @@ static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev,
default:
dev_err(pctldev->dev,
"Unrecognized pin config %d for pin %s (%d).\n",
- param, pdata->pins[pin].name, pin);
+ param, pdata->info->pins[pin].name, pin);
+ return -EINVAL;
+
+ } /* switch config */
+ } /* for each config */
+
+ return 0;
+}
+
+/* Goes through the configs and update register val/mask */
+static int bcm21664_i2c_pin_update(struct pinctrl_dev *pctldev,
+ unsigned int pin,
+ unsigned long *configs,
+ unsigned int num_configs,
+ u32 *val,
+ u32 *mask)
+{
+ struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ int i;
+ enum pin_config_param param;
+ u32 arg;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ /*
+ * BCM21664 I2C pins use the same config bits as standard pins,
+ * but only pull up/none, slew rate and input enable/disable
+ * options are supported.
+ */
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ bcm281xx_pin_update(val, mask, 1,
+ BCM281XX_PIN_SHIFT(STD, PULL_UP),
+ BCM281XX_PIN_MASK(STD, PULL_UP));
+ break;
+
+ case PIN_CONFIG_BIAS_DISABLE:
+ bcm281xx_pin_update(val, mask, 0,
+ BCM281XX_PIN_SHIFT(STD, PULL_UP),
+ BCM281XX_PIN_MASK(STD, PULL_UP));
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ arg = (arg >= 1 ? 1 : 0);
+ bcm281xx_pin_update(val, mask, arg,
+ BCM281XX_PIN_SHIFT(STD, SLEW),
+ BCM281XX_PIN_MASK(STD, SLEW));
+ break;
+
+ case PIN_CONFIG_INPUT_ENABLE:
+ /* inversed since register is for input _disable_ */
+ arg = (arg >= 1 ? 0 : 1);
+ bcm281xx_pin_update(val, mask, arg,
+ BCM281XX_PIN_SHIFT(STD, INPUT_DIS),
+ BCM281XX_PIN_MASK(STD, INPUT_DIS));
+ break;
+
+ default:
+ dev_err(pctldev->dev,
+ "Unrecognized pin config %d for pin %s (%d).\n",
+ param, pdata->info->pins[pin].name, pin);
return -EINVAL;
} /* switch config */
@@ -1277,9 +1953,9 @@ static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev,
/* Goes through the configs and update register val/mask */
static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *configs,
- unsigned num_configs,
+ unsigned int num_configs,
u32 *val,
u32 *mask)
{
@@ -1311,7 +1987,7 @@ static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev,
default:
dev_err(pctldev->dev,
"Unrecognized pin config %d for pin %s (%d).\n",
- param, pdata->pins[pin].name, pin);
+ param, pdata->info->pins[pin].name, pin);
return -EINVAL;
} /* switch config */
@@ -1321,11 +1997,12 @@ static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev,
}
static int bcm281xx_pinctrl_pin_config_set(struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *configs,
- unsigned num_configs)
+ unsigned int num_configs)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ enum bcm281xx_pinctrl_type device_type = pdata->info->device_type;
enum bcm281xx_pin_type pin_type;
u32 offset = 4 * pin;
u32 cfg_val, cfg_mask;
@@ -1343,8 +2020,12 @@ static int bcm281xx_pinctrl_pin_config_set(struct pinctrl_dev *pctldev,
break;
case BCM281XX_PIN_TYPE_I2C:
- rc = bcm281xx_i2c_pin_update(pctldev, pin, configs,
- num_configs, &cfg_val, &cfg_mask);
+ if (device_type == BCM21664_PINCTRL_TYPE)
+ rc = bcm21664_i2c_pin_update(pctldev, pin, configs,
+ num_configs, &cfg_val, &cfg_mask);
+ else
+ rc = bcm281xx_i2c_pin_update(pctldev, pin, configs,
+ num_configs, &cfg_val, &cfg_mask);
break;
case BCM281XX_PIN_TYPE_HDMI:
@@ -1354,7 +2035,7 @@ static int bcm281xx_pinctrl_pin_config_set(struct pinctrl_dev *pctldev,
default:
dev_err(pctldev->dev, "Unknown pin type for pin %s (%d).\n",
- pdata->pins[pin].name, pin);
+ pdata->info->pins[pin].name, pin);
return -EINVAL;
} /* switch pin type */
@@ -1364,16 +2045,32 @@ static int bcm281xx_pinctrl_pin_config_set(struct pinctrl_dev *pctldev,
dev_dbg(pctldev->dev,
"%s(): Set pin %s (%d) with config 0x%x, mask 0x%x\n",
- __func__, pdata->pins[pin].name, pin, cfg_val, cfg_mask);
+ __func__, pdata->info->pins[pin].name, pin, cfg_val, cfg_mask);
+
+ if (device_type == BCM21664_PINCTRL_TYPE) {
+ rc = bcm21664_pinctrl_set_pin_lock(pdata, pin, false);
+ if (rc) {
+ /* Error is printed in bcm21664_pinctrl_set_pin_lock */
+ return rc;
+ }
+ }
rc = regmap_update_bits(pdata->regmap, offset, cfg_mask, cfg_val);
if (rc) {
dev_err(pctldev->dev,
"Error updating register for pin %s (%d).\n",
- pdata->pins[pin].name, pin);
+ pdata->info->pins[pin].name, pin);
return rc;
}
+ if (device_type == BCM21664_PINCTRL_TYPE) {
+ rc = bcm21664_pinctrl_set_pin_lock(pdata, pin, true);
+ if (rc) {
+ /* Error is printed in bcm21664_pinctrl_set_pin_lock */
+ return rc;
+ }
+ }
+
return 0;
}
@@ -1390,10 +2087,23 @@ static struct pinctrl_desc bcm281xx_pinctrl_desc = {
.owner = THIS_MODULE,
};
+static struct bcm281xx_pinctrl_data bcm281xx_pinctrl_pdata;
+
static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
{
- struct bcm281xx_pinctrl_data *pdata = &bcm281xx_pinctrl;
+ struct bcm281xx_pinctrl_data *pdata = &bcm281xx_pinctrl_pdata;
struct pinctrl_dev *pctl;
+ int rc;
+
+ /* Set device pointer in platform data */
+ pdata->dev = &pdev->dev;
+
+ /* Get the data to use from OF match */
+ pdata->info = of_device_get_match_data(&pdev->dev);
+ if (!pdata->info) {
+ dev_err(&pdev->dev, "Failed to get data from OF match\n");
+ return -ENODEV;
+ }
/* So far We can assume there is only 1 bank of registers */
pdata->reg_base = devm_platform_ioremap_resource(pdev, 0);
@@ -1404,15 +2114,27 @@ static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
/* Initialize the dynamic part of pinctrl_desc */
pdata->regmap = devm_regmap_init_mmio(&pdev->dev, pdata->reg_base,
- &bcm281xx_pinctrl_regmap_config);
+ pdata->info->regmap_config);
if (IS_ERR(pdata->regmap)) {
dev_err(&pdev->dev, "Regmap MMIO init failed.\n");
return -ENODEV;
}
bcm281xx_pinctrl_desc.name = dev_name(&pdev->dev);
- bcm281xx_pinctrl_desc.pins = bcm281xx_pinctrl.pins;
- bcm281xx_pinctrl_desc.npins = bcm281xx_pinctrl.npins;
+ bcm281xx_pinctrl_desc.pins = pdata->info->pins;
+ bcm281xx_pinctrl_desc.npins = pdata->info->npins;
+
+ /*
+ * For BCM21664, lock all pins by default; they will be unlocked
+ * as needed
+ */
+ if (pdata->info->device_type == BCM21664_PINCTRL_TYPE) {
+ rc = bcm21664_pinctrl_lock_all(pdata);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to lock all pins\n");
+ return rc;
+ }
+ }
pctl = devm_pinctrl_register(&pdev->dev, &bcm281xx_pinctrl_desc, pdata);
if (IS_ERR(pctl)) {
@@ -1426,7 +2148,8 @@ static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
}
static const struct of_device_id bcm281xx_pinctrl_of_match[] = {
- { .compatible = "brcm,bcm11351-pinctrl", },
+ { .compatible = "brcm,bcm11351-pinctrl", .data = &bcm281xx_pinctrl },
+ { .compatible = "brcm,bcm21664-pinctrl", .data = &bcm21664_pinctrl },
{ },
};
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index cc1fe0555e19..eaeec096bc9a 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -346,14 +346,14 @@ static int bcm2835_gpio_get_direction(struct gpio_chip *chip, unsigned int offse
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
enum bcm2835_fsel fsel = bcm2835_pinctrl_fsel_get(pc, offset);
- /* Alternative function doesn't clearly provide a direction */
- if (fsel > BCM2835_FSEL_GPIO_OUT)
- return -EINVAL;
-
- if (fsel == BCM2835_FSEL_GPIO_IN)
- return GPIO_LINE_DIRECTION_IN;
+ if (fsel == BCM2835_FSEL_GPIO_OUT)
+ return GPIO_LINE_DIRECTION_OUT;
- return GPIO_LINE_DIRECTION_OUT;
+ /*
+ * Alternative function doesn't clearly provide a direction. Default
+ * to INPUT.
+ */
+ return GPIO_LINE_DIRECTION_IN;
}
static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 6a94ecd6a8de..0b7f74beb6a6 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -143,10 +143,14 @@ static int dt_to_map_one_config(struct pinctrl *p,
pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
if (pctldev)
break;
- /* Do not defer probing of hogs (circular loop) */
+ /*
+ * Do not defer probing of hogs (circular loop)
+ *
+ * Return 1 to let the caller catch the case.
+ */
if (np_pctldev == p->dev->of_node) {
of_node_put(np_pctldev);
- return -ENODEV;
+ return 1;
}
}
of_node_put(np_pctldev);
@@ -265,6 +269,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
ret = dt_to_map_one_config(p, pctldev, statename,
np_config);
of_node_put(np_config);
+ if (ret == 1)
+ continue;
if (ret < 0)
goto err;
}
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 14c26c023590..248c2e558ff3 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -39,7 +39,6 @@ config PINCTRL_INTEL
config PINCTRL_INTEL_PLATFORM
tristate "Intel pinctrl and GPIO platform driver"
- depends on ACPI
select PINCTRL_INTEL
help
This pinctrl driver provides an interface that allows configuring
@@ -141,7 +140,6 @@ config PINCTRL_METEORLAKE
config PINCTRL_METEORPOINT
tristate "Intel Meteor Point pinctrl and GPIO driver"
- depends on ACPI
select PINCTRL_INTEL
help
Meteor Point is the PCH of Intel Meteor Lake. This pinctrl driver
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 7340dc20349c..969137c4cb06 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1355,6 +1355,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
void __iomem *reg;
unsigned long pending;
+ chained_irq_enter(chip, desc);
+
/* check from GPIO controller which pin triggered the interrupt */
for (base = 0; base < vg->chip.ngpio; base += 32) {
reg = byt_gpio_reg(vg, base, BYT_INT_STAT_REG);
@@ -1369,7 +1371,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(pin, &pending, 32)
generic_handle_domain_irq(vg->chip.irq.domain, base + pin);
}
- chip->irq_eoi(data);
+
+ chained_irq_exit(chip, desc);
}
static bool byt_direct_irq_sanity_check(struct intel_pinctrl *vg, int pin, u32 conf0)
@@ -1557,16 +1560,14 @@ static int byt_set_soc_data(struct intel_pinctrl *vg,
vg->soc = soc;
vg->ncommunities = vg->soc->ncommunities;
- vg->communities = devm_kcalloc(vg->dev, vg->ncommunities,
- sizeof(*vg->communities), GFP_KERNEL);
+ vg->communities = devm_kmemdup_array(vg->dev, vg->soc->communities, vg->ncommunities,
+ sizeof(*vg->soc->communities), GFP_KERNEL);
if (!vg->communities)
return -ENOMEM;
for (i = 0; i < vg->soc->ncommunities; i++) {
struct intel_community *comm = vg->communities + i;
- *comm = vg->soc->communities[i];
-
comm->pad_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(comm->pad_regs))
return PTR_ERR(comm->pad_regs);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index c673e262e1db..69b18ce0f685 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1631,9 +1631,8 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
pctrl->soc = soc_data;
pctrl->ncommunities = pctrl->soc->ncommunities;
- pctrl->communities = devm_kmemdup(dev, pctrl->soc->communities,
- pctrl->ncommunities * sizeof(*pctrl->communities),
- GFP_KERNEL);
+ pctrl->communities = devm_kmemdup_array(dev, pctrl->soc->communities, pctrl->ncommunities,
+ sizeof(*pctrl->soc->communities), GFP_KERNEL);
if (!pctrl->communities)
return -ENOMEM;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 527e4b87ae52..d889c7c878e2 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1543,7 +1543,6 @@ static int intel_pinctrl_probe_pwm(struct intel_pinctrl *pctrl,
.clk_rate = 19200000,
.npwm = 1,
.base_unit_bits = 22,
- .bypass = true,
};
struct pwm_chip *chip;
@@ -1577,8 +1576,8 @@ int intel_pinctrl_probe(struct platform_device *pdev,
* to the registers.
*/
pctrl->ncommunities = pctrl->soc->ncommunities;
- pctrl->communities = devm_kcalloc(dev, pctrl->ncommunities,
- sizeof(*pctrl->communities), GFP_KERNEL);
+ pctrl->communities = devm_kmemdup_array(dev, pctrl->soc->communities, pctrl->ncommunities,
+ sizeof(*pctrl->soc->communities), GFP_KERNEL);
if (!pctrl->communities)
return -ENOMEM;
@@ -1588,8 +1587,6 @@ int intel_pinctrl_probe(struct platform_device *pdev,
u32 offset;
u32 value;
- *community = pctrl->soc->communities[i];
-
regs = devm_platform_ioremap_resource(pdev, community->barno);
if (IS_ERR(regs))
return PTR_ERR(regs);
@@ -1941,3 +1938,4 @@ MODULE_AUTHOR("Mathias Nyman <mathias.nyman@linux.intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("Intel pinctrl/GPIO core driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("PWM_LPSS");
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index cc5ede17c383..ac5459a4c63e 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -549,6 +549,8 @@ static void lp_gpio_irq_handler(struct irq_desc *desc)
unsigned long pending;
u32 base, pin;
+ chained_irq_enter(chip, desc);
+
/* check from GPIO controller which pin triggered the interrupt */
for (base = 0; base < lg->chip.ngpio; base += 32) {
reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
@@ -560,7 +562,8 @@ static void lp_gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(pin, &pending, 32)
generic_handle_domain_irq(lg->chip.irq.domain, base + pin);
}
- chip->irq_eoi(data);
+
+ chained_irq_exit(chip, desc);
}
static void lp_irq_ack(struct irq_data *d)
diff --git a/drivers/pinctrl/intel/pinctrl-tangier.c b/drivers/pinctrl/intel/pinctrl-tangier.c
index d3baf0f4eea0..ac61e632b487 100644
--- a/drivers/pinctrl/intel/pinctrl-tangier.c
+++ b/drivers/pinctrl/intel/pinctrl-tangier.c
@@ -524,7 +524,6 @@ static int tng_pinctrl_probe(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct tng_family *families;
struct tng_pinctrl *tp;
- size_t families_len;
void __iomem *regs;
unsigned int i;
@@ -543,8 +542,8 @@ static int tng_pinctrl_probe(struct platform_device *pdev,
* Make a copy of the families which we can use to hold pointers
* to the registers.
*/
- families_len = size_mul(sizeof(*families), tp->nfamilies);
- families = devm_kmemdup(dev, tp->families, families_len, GFP_KERNEL);
+ families = devm_kmemdup_array(dev, tp->families, tp->nfamilies,
+ sizeof(*tp->families), GFP_KERNEL);
if (!families)
return -ENOMEM;
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index 27f0a54e12bf..ced4ee509b5b 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2014-2018 MediaTek Inc.
+// Copyright (c) 2014-2025 MediaTek Inc.
/*
* Library for MediaTek External Interrupt Support
*
* Author: Maoguang Meng <maoguang.meng@mediatek.com>
* Sean Wang <sean.wang@mediatek.com>
+ * Hao Chang <ot_chhao.chang@mediatek.com>
+ * Qingliang Li <qingliang.li@mediatek.com>
*
*/
@@ -20,6 +22,7 @@
#include <linux/platform_device.h>
#include "mtk-eint.h"
+#include "pinctrl-mtk-common-v2.h"
#define MTK_EINT_EDGE_SENSITIVE 0
#define MTK_EINT_LEVEL_SENSITIVE 1
@@ -68,13 +71,11 @@ static void __iomem *mtk_eint_get_offset(struct mtk_eint *eint,
unsigned int eint_num,
unsigned int offset)
{
- unsigned int eint_base = 0;
+ unsigned int idx = eint->pins[eint_num].index;
+ unsigned int inst = eint->pins[eint_num].instance;
void __iomem *reg;
- if (eint_num >= eint->hw->ap_num)
- eint_base = eint->hw->ap_num;
-
- reg = eint->base + offset + ((eint_num - eint_base) / 32) * 4;
+ reg = eint->base[inst] + offset + (idx / 32 * 4);
return reg;
}
@@ -83,7 +84,7 @@ static unsigned int mtk_eint_can_en_debounce(struct mtk_eint *eint,
unsigned int eint_num)
{
unsigned int sens;
- unsigned int bit = BIT(eint_num % 32);
+ unsigned int bit = BIT(eint->pins[eint_num].index % 32);
void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
eint->regs->sens);
@@ -92,7 +93,7 @@ static unsigned int mtk_eint_can_en_debounce(struct mtk_eint *eint,
else
sens = MTK_EINT_EDGE_SENSITIVE;
- if (eint_num < eint->hw->db_cnt && sens != MTK_EINT_EDGE_SENSITIVE)
+ if (eint->pins[eint_num].debounce && sens != MTK_EINT_EDGE_SENSITIVE)
return 1;
else
return 0;
@@ -102,9 +103,9 @@ static int mtk_eint_flip_edge(struct mtk_eint *eint, int hwirq)
{
int start_level, curr_level;
unsigned int reg_offset;
- u32 mask = BIT(hwirq & 0x1f);
- u32 port = (hwirq >> 5) & eint->hw->port_mask;
- void __iomem *reg = eint->base + (port << 2);
+ unsigned int mask = BIT(eint->pins[hwirq].index & 0x1f);
+ unsigned int port = (eint->pins[hwirq].index >> 5) & eint->hw->port_mask;
+ void __iomem *reg = eint->base[eint->pins[hwirq].instance] + (port << 2);
curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl, hwirq);
@@ -126,11 +127,13 @@ static int mtk_eint_flip_edge(struct mtk_eint *eint, int hwirq)
static void mtk_eint_mask(struct irq_data *d)
{
struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
- u32 mask = BIT(d->hwirq & 0x1f);
+ unsigned int idx = eint->pins[d->hwirq].index;
+ unsigned int inst = eint->pins[d->hwirq].instance;
+ unsigned int mask = BIT(idx & 0x1f);
void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
eint->regs->mask_set);
- eint->cur_mask[d->hwirq >> 5] &= ~mask;
+ eint->cur_mask[inst][idx >> 5] &= ~mask;
writel(mask, reg);
}
@@ -138,22 +141,24 @@ static void mtk_eint_mask(struct irq_data *d)
static void mtk_eint_unmask(struct irq_data *d)
{
struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
- u32 mask = BIT(d->hwirq & 0x1f);
+ unsigned int idx = eint->pins[d->hwirq].index;
+ unsigned int inst = eint->pins[d->hwirq].instance;
+ unsigned int mask = BIT(idx & 0x1f);
void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
eint->regs->mask_clr);
- eint->cur_mask[d->hwirq >> 5] |= mask;
+ eint->cur_mask[inst][idx >> 5] |= mask;
writel(mask, reg);
- if (eint->dual_edge[d->hwirq])
+ if (eint->pins[d->hwirq].dual_edge)
mtk_eint_flip_edge(eint, d->hwirq);
}
static unsigned int mtk_eint_get_mask(struct mtk_eint *eint,
unsigned int eint_num)
{
- unsigned int bit = BIT(eint_num % 32);
+ unsigned int bit = BIT(eint->pins[eint_num].index % 32);
void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
eint->regs->mask);
@@ -163,7 +168,7 @@ static unsigned int mtk_eint_get_mask(struct mtk_eint *eint,
static void mtk_eint_ack(struct irq_data *d)
{
struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
- u32 mask = BIT(d->hwirq & 0x1f);
+ unsigned int mask = BIT(eint->pins[d->hwirq].index & 0x1f);
void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
eint->regs->ack);
@@ -174,7 +179,7 @@ static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
{
struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
bool masked;
- u32 mask = BIT(d->hwirq & 0x1f);
+ unsigned int mask = BIT(eint->pins[d->hwirq].index & 0x1f);
void __iomem *reg;
if (((type & IRQ_TYPE_EDGE_BOTH) && (type & IRQ_TYPE_LEVEL_MASK)) ||
@@ -186,9 +191,9 @@ static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
}
if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
- eint->dual_edge[d->hwirq] = 1;
+ eint->pins[d->hwirq].dual_edge = 1;
else
- eint->dual_edge[d->hwirq] = 0;
+ eint->pins[d->hwirq].dual_edge = 0;
if (!mtk_eint_get_mask(eint, d->hwirq)) {
mtk_eint_mask(d);
@@ -223,27 +228,32 @@ static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
- int shift = d->hwirq & 0x1f;
- int reg = d->hwirq >> 5;
+ unsigned int idx = eint->pins[d->hwirq].index;
+ unsigned int inst = eint->pins[d->hwirq].instance;
+ unsigned int shift = idx & 0x1f;
+ unsigned int port = idx >> 5;
if (on)
- eint->wake_mask[reg] |= BIT(shift);
+ eint->wake_mask[inst][port] |= BIT(shift);
else
- eint->wake_mask[reg] &= ~BIT(shift);
+ eint->wake_mask[inst][port] &= ~BIT(shift);
return 0;
}
static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
- void __iomem *base, u32 *buf)
+ void __iomem *base, unsigned int **buf)
{
- int port;
+ int inst, port, port_num;
void __iomem *reg;
- for (port = 0; port < eint->hw->ports; port++) {
- reg = base + (port << 2);
- writel_relaxed(~buf[port], reg + eint->regs->mask_set);
- writel_relaxed(buf[port], reg + eint->regs->mask_clr);
+ for (inst = 0; inst < eint->nbase; inst++) {
+ port_num = DIV_ROUND_UP(eint->base_pin_num[inst], 32);
+ for (port = 0; port < port_num; port++) {
+ reg = eint->base[inst] + (port << 2);
+ writel_relaxed(~buf[inst][port], reg + eint->regs->mask_set);
+ writel_relaxed(buf[inst][port], reg + eint->regs->mask_clr);
+ }
}
}
@@ -303,15 +313,18 @@ static struct irq_chip mtk_eint_irq_chip = {
static unsigned int mtk_eint_hw_init(struct mtk_eint *eint)
{
- void __iomem *dom_en = eint->base + eint->regs->dom_en;
- void __iomem *mask_set = eint->base + eint->regs->mask_set;
- unsigned int i;
-
- for (i = 0; i < eint->hw->ap_num; i += 32) {
- writel(0xffffffff, dom_en);
- writel(0xffffffff, mask_set);
- dom_en += 4;
- mask_set += 4;
+ void __iomem *dom_reg, *mask_reg;
+ unsigned int i, j;
+
+ for (i = 0; i < eint->nbase; i++) {
+ dom_reg = eint->base[i] + eint->regs->dom_en;
+ mask_reg = eint->base[i] + eint->regs->mask_set;
+ for (j = 0; j < eint->base_pin_num[i]; j += 32) {
+ writel(0xffffffff, dom_reg);
+ writel(0xffffffff, mask_reg);
+ dom_reg += 4;
+ mask_reg += 4;
+ }
}
return 0;
@@ -322,14 +335,16 @@ mtk_eint_debounce_process(struct mtk_eint *eint, int index)
{
unsigned int rst, ctrl_offset;
unsigned int bit, dbnc;
+ unsigned int inst = eint->pins[index].instance;
+ unsigned int idx = eint->pins[index].index;
- ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_ctrl;
- dbnc = readl(eint->base + ctrl_offset);
- bit = MTK_EINT_DBNC_SET_EN << ((index % 4) * 8);
+ ctrl_offset = (idx / 4) * 4 + eint->regs->dbnc_ctrl;
+ dbnc = readl(eint->base[inst] + ctrl_offset);
+ bit = MTK_EINT_DBNC_SET_EN << ((idx % 4) * 8);
if ((bit & dbnc) > 0) {
- ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_set;
- rst = MTK_EINT_DBNC_RST_BIT << ((index % 4) * 8);
- writel(rst, eint->base + ctrl_offset);
+ ctrl_offset = (idx / 4) * 4 + eint->regs->dbnc_set;
+ rst = MTK_EINT_DBNC_RST_BIT << ((idx % 4) * 8);
+ writel(rst, eint->base[inst] + ctrl_offset);
}
}
@@ -337,65 +352,68 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct mtk_eint *eint = irq_desc_get_handler_data(desc);
- unsigned int status, eint_num;
- int offset, mask_offset, index;
- void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat);
+ unsigned int i, j, port, status, shift, mask, eint_num;
+ void __iomem *reg;
int dual_edge, start_level, curr_level;
chained_irq_enter(chip, desc);
- for (eint_num = 0; eint_num < eint->hw->ap_num; eint_num += 32,
- reg += 4) {
- status = readl(reg);
- while (status) {
- offset = __ffs(status);
- mask_offset = eint_num >> 5;
- index = eint_num + offset;
- status &= ~BIT(offset);
-
- /*
- * If we get an interrupt on pin that was only required
- * for wake (but no real interrupt requested), mask the
- * interrupt (as would mtk_eint_resume do anyway later
- * in the resume sequence).
- */
- if (eint->wake_mask[mask_offset] & BIT(offset) &&
- !(eint->cur_mask[mask_offset] & BIT(offset))) {
- writel_relaxed(BIT(offset), reg -
- eint->regs->stat +
- eint->regs->mask_set);
- }
-
- dual_edge = eint->dual_edge[index];
- if (dual_edge) {
- /*
- * Clear soft-irq in case we raised it last
- * time.
- */
- writel(BIT(offset), reg - eint->regs->stat +
- eint->regs->soft_clr);
-
- start_level =
- eint->gpio_xlate->get_gpio_state(eint->pctl,
- index);
- }
-
- generic_handle_domain_irq(eint->domain, index);
-
- if (dual_edge) {
- curr_level = mtk_eint_flip_edge(eint, index);
+ for (i = 0; i < eint->nbase; i++) {
+ for (j = 0; j < eint->base_pin_num[i]; j += 32) {
+ port = j >> 5;
+ status = readl(eint->base[i] + port * 4 + eint->regs->stat);
+ while (status) {
+ shift = __ffs(status);
+ status &= ~BIT(shift);
+ mask = BIT(shift);
+ eint_num = eint->pin_list[i][shift + j];
/*
- * If level changed, we might lost one edge
- * interrupt, raised it through soft-irq.
+ * If we get an interrupt on pin that was only required
+ * for wake (but no real interrupt requested), mask the
+ * interrupt (as would mtk_eint_resume do anyway later
+ * in the resume sequence).
*/
- if (start_level != curr_level)
- writel(BIT(offset), reg -
- eint->regs->stat +
- eint->regs->soft_set);
+ if (eint->wake_mask[i][port] & mask &&
+ !(eint->cur_mask[i][port] & mask)) {
+ reg = mtk_eint_get_offset(eint, eint_num,
+ eint->regs->mask_set);
+ writel_relaxed(mask, reg);
+ }
+
+ dual_edge = eint->pins[eint_num].dual_edge;
+ if (dual_edge) {
+ /*
+ * Clear soft-irq in case we raised it last
+ * time.
+ */
+ reg = mtk_eint_get_offset(eint, eint_num,
+ eint->regs->soft_clr);
+ writel(mask, reg);
+
+ start_level =
+ eint->gpio_xlate->get_gpio_state(eint->pctl,
+ eint_num);
+ }
+
+ generic_handle_domain_irq(eint->domain, eint_num);
+
+ if (dual_edge) {
+ curr_level = mtk_eint_flip_edge(eint, eint_num);
+
+ /*
+ * If level changed, we might lost one edge
+ * interrupt, raised it through soft-irq.
+ */
+ if (start_level != curr_level) {
+ reg = mtk_eint_get_offset(eint, eint_num,
+ eint->regs->soft_set);
+ writel(mask, reg);
+ }
+ }
+
+ if (eint->pins[eint_num].debounce)
+ mtk_eint_debounce_process(eint, eint_num);
}
-
- if (index < eint->hw->db_cnt)
- mtk_eint_debounce_process(eint, index);
}
}
chained_irq_exit(chip, desc);
@@ -423,6 +441,8 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
int virq, eint_offset;
unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask,
dbnc;
+ unsigned int inst = eint->pins[eint_num].instance;
+ unsigned int idx = eint->pins[eint_num].index;
struct irq_data *d;
if (!eint->hw->db_time)
@@ -432,8 +452,8 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
eint_offset = (eint_num % 4) * 8;
d = irq_get_irq_data(virq);
- set_offset = (eint_num / 4) * 4 + eint->regs->dbnc_set;
- clr_offset = (eint_num / 4) * 4 + eint->regs->dbnc_clr;
+ set_offset = (idx / 4) * 4 + eint->regs->dbnc_set;
+ clr_offset = (idx / 4) * 4 + eint->regs->dbnc_clr;
if (!mtk_eint_can_en_debounce(eint, eint_num))
return -EINVAL;
@@ -454,12 +474,12 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
}
clr_bit = 0xff << eint_offset;
- writel(clr_bit, eint->base + clr_offset);
+ writel(clr_bit, eint->base[inst] + clr_offset);
bit = ((dbnc << MTK_EINT_DBNC_SET_DBNC_BITS) | MTK_EINT_DBNC_SET_EN) <<
eint_offset;
rst = MTK_EINT_DBNC_RST_BIT << eint_offset;
- writel(rst | bit, eint->base + set_offset);
+ writel(rst | bit, eint->base[inst] + set_offset);
/*
* Delay a while (more than 2T) to wait for hw debounce counter reset
@@ -487,32 +507,69 @@ EXPORT_SYMBOL_GPL(mtk_eint_find_irq);
int mtk_eint_do_init(struct mtk_eint *eint)
{
- int i;
+ unsigned int size, i, port, inst = 0;
+ struct mtk_pinctrl *hw = (struct mtk_pinctrl *)eint->pctl;
/* If clients don't assign a specific regs, let's use generic one */
if (!eint->regs)
eint->regs = &mtk_generic_eint_regs;
- eint->wake_mask = devm_kcalloc(eint->dev, eint->hw->ports,
- sizeof(*eint->wake_mask), GFP_KERNEL);
- if (!eint->wake_mask)
+ eint->base_pin_num = devm_kmalloc_array(eint->dev, eint->nbase, sizeof(u16),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!eint->base_pin_num)
return -ENOMEM;
- eint->cur_mask = devm_kcalloc(eint->dev, eint->hw->ports,
- sizeof(*eint->cur_mask), GFP_KERNEL);
- if (!eint->cur_mask)
- return -ENOMEM;
+ if (eint->nbase == 1) {
+ size = eint->hw->ap_num * sizeof(struct mtk_eint_pin);
+ eint->pins = devm_kmalloc(eint->dev, size, GFP_KERNEL);
+ if (!eint->pins)
+ goto err_pins;
+
+ eint->base_pin_num[inst] = eint->hw->ap_num;
+ for (i = 0; i < eint->hw->ap_num; i++) {
+ eint->pins[i].instance = inst;
+ eint->pins[i].index = i;
+ eint->pins[i].debounce = (i < eint->hw->db_cnt) ? 1 : 0;
+ }
+ }
- eint->dual_edge = devm_kcalloc(eint->dev, eint->hw->ap_num,
- sizeof(int), GFP_KERNEL);
- if (!eint->dual_edge)
- return -ENOMEM;
+ if (hw && hw->soc && hw->soc->eint_pin) {
+ eint->pins = hw->soc->eint_pin;
+ for (i = 0; i < eint->hw->ap_num; i++) {
+ inst = eint->pins[i].instance;
+ if (inst >= eint->nbase)
+ continue;
+ eint->base_pin_num[inst]++;
+ }
+ }
+
+ eint->pin_list = devm_kmalloc(eint->dev, eint->nbase * sizeof(u16 *), GFP_KERNEL);
+ if (!eint->pin_list)
+ goto err_pin_list;
+
+ eint->wake_mask = devm_kmalloc(eint->dev, eint->nbase * sizeof(u32 *), GFP_KERNEL);
+ if (!eint->wake_mask)
+ goto err_wake_mask;
+
+ eint->cur_mask = devm_kmalloc(eint->dev, eint->nbase * sizeof(u32 *), GFP_KERNEL);
+ if (!eint->cur_mask)
+ goto err_cur_mask;
+
+ for (i = 0; i < eint->nbase; i++) {
+ eint->pin_list[i] = devm_kzalloc(eint->dev, eint->base_pin_num[i] * sizeof(u16),
+ GFP_KERNEL);
+ port = DIV_ROUND_UP(eint->base_pin_num[i], 32);
+ eint->wake_mask[i] = devm_kzalloc(eint->dev, port * sizeof(u32), GFP_KERNEL);
+ eint->cur_mask[i] = devm_kzalloc(eint->dev, port * sizeof(u32), GFP_KERNEL);
+ if (!eint->pin_list[i] || !eint->wake_mask[i] || !eint->cur_mask[i])
+ goto err_eint;
+ }
eint->domain = irq_domain_add_linear(eint->dev->of_node,
eint->hw->ap_num,
&irq_domain_simple_ops, NULL);
if (!eint->domain)
- return -ENOMEM;
+ goto err_eint;
if (eint->hw->db_time) {
for (i = 0; i < MTK_EINT_DBNC_MAX; i++)
@@ -523,8 +580,11 @@ int mtk_eint_do_init(struct mtk_eint *eint)
mtk_eint_hw_init(eint);
for (i = 0; i < eint->hw->ap_num; i++) {
+ inst = eint->pins[i].instance;
+ if (inst >= eint->nbase)
+ continue;
+ eint->pin_list[inst][eint->pins[i].index] = i;
int virq = irq_create_mapping(eint->domain, i);
-
irq_set_chip_and_handler(virq, &mtk_eint_irq_chip,
handle_level_irq);
irq_set_chip_data(virq, eint);
@@ -534,6 +594,27 @@ int mtk_eint_do_init(struct mtk_eint *eint)
eint);
return 0;
+
+err_eint:
+ for (i = 0; i < eint->nbase; i++) {
+ if (eint->cur_mask[i])
+ devm_kfree(eint->dev, eint->cur_mask[i]);
+ if (eint->wake_mask[i])
+ devm_kfree(eint->dev, eint->wake_mask[i]);
+ if (eint->pin_list[i])
+ devm_kfree(eint->dev, eint->pin_list[i]);
+ }
+ devm_kfree(eint->dev, eint->cur_mask);
+err_cur_mask:
+ devm_kfree(eint->dev, eint->wake_mask);
+err_wake_mask:
+ devm_kfree(eint->dev, eint->pin_list);
+err_pin_list:
+ if (eint->nbase == 1)
+ devm_kfree(eint->dev, eint->pins);
+err_pins:
+ devm_kfree(eint->dev, eint->base_pin_num);
+ return -ENOMEM;
}
EXPORT_SYMBOL_GPL(mtk_eint_do_init);
diff --git a/drivers/pinctrl/mediatek/mtk-eint.h b/drivers/pinctrl/mediatek/mtk-eint.h
index 6139b16cd225..f7f58cca0d5e 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.h
+++ b/drivers/pinctrl/mediatek/mtk-eint.h
@@ -1,10 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) 2014-2018 MediaTek Inc.
+ * Copyright (C) 2014-2025 MediaTek Inc.
*
* Author: Maoguang Meng <maoguang.meng@mediatek.com>
* Sean Wang <sean.wang@mediatek.com>
- *
+ * Hao Chang <ot_chhao.chang@mediatek.com>
+ * Qingliang Li <qingliang.li@mediatek.com>
*/
#ifndef __MTK_EINT_H
#define __MTK_EINT_H
@@ -40,6 +41,14 @@ struct mtk_eint_hw {
const unsigned int *db_time;
};
+struct mtk_eint_pin {
+ u16 number;
+ u8 instance;
+ u8 index;
+ bool debounce;
+ bool dual_edge;
+};
+
extern const unsigned int debounce_time_mt2701[];
extern const unsigned int debounce_time_mt6765[];
extern const unsigned int debounce_time_mt6795[];
@@ -56,17 +65,21 @@ struct mtk_eint_xt {
struct mtk_eint {
struct device *dev;
- void __iomem *base;
+ void __iomem **base;
+ u8 nbase;
+ u16 *base_pin_num;
struct irq_domain *domain;
int irq;
int *dual_edge;
- u32 *wake_mask;
- u32 *cur_mask;
+ u16 **pin_list;
+ u32 **wake_mask;
+ u32 **cur_mask;
/* Used to fit into various EINT device */
const struct mtk_eint_hw *hw;
const struct mtk_eint_regs *regs;
+ struct mtk_eint_pin *pins;
u16 num_db_time;
/* Used to fit into various pinctrl device */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 00e95682b9f8..d1556b75d9ef 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include "mtk-eint.h"
@@ -367,7 +368,7 @@ static const struct mtk_eint_xt mtk_eint_xt = {
int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- int ret;
+ int ret, i, j, count_reg_names;
if (!IS_ENABLED(CONFIG_EINT_MTK))
return 0;
@@ -379,10 +380,24 @@ int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
if (!hw->eint)
return -ENOMEM;
- hw->eint->base = devm_platform_ioremap_resource_byname(pdev, "eint");
- if (IS_ERR(hw->eint->base)) {
- ret = PTR_ERR(hw->eint->base);
- goto err_free_eint;
+ count_reg_names = of_property_count_strings(np, "reg-names");
+ if (count_reg_names < hw->soc->nbase_names)
+ return -EINVAL;
+
+ hw->eint->nbase = count_reg_names - hw->soc->nbase_names;
+ hw->eint->base = devm_kmalloc_array(&pdev->dev, hw->eint->nbase,
+ sizeof(*hw->eint->base), GFP_KERNEL | __GFP_ZERO);
+ if (!hw->eint->base) {
+ ret = -ENOMEM;
+ goto err_free_base;
+ }
+
+ for (i = hw->soc->nbase_names, j = 0; i < count_reg_names; i++, j++) {
+ hw->eint->base[j] = of_iomap(np, i);
+ if (IS_ERR(hw->eint->base[j])) {
+ ret = PTR_ERR(hw->eint->base[j]);
+ goto err_free_eint;
+ }
}
hw->eint->irq = irq_of_parse_and_map(np, 0);
@@ -401,9 +416,19 @@ int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
hw->eint->pctl = hw;
hw->eint->gpio_xlate = &mtk_eint_xt;
- return mtk_eint_do_init(hw->eint);
+ ret = mtk_eint_do_init(hw->eint);
+ if (ret)
+ goto err_free_eint;
+
+ return 0;
err_free_eint:
+ for (j = 0; j < hw->eint->nbase; j++) {
+ if (hw->eint->base[j])
+ iounmap(hw->eint->base[j]);
+ }
+ devm_kfree(hw->dev, hw->eint->base);
+err_free_base:
devm_kfree(hw->dev, hw->eint);
hw->eint = NULL;
return ret;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
index 9c271dc2b521..36d2898037dd 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
@@ -242,6 +242,7 @@ struct mtk_pin_soc {
unsigned int nfuncs;
const struct mtk_eint_regs *eint_regs;
const struct mtk_eint_hw *eint_hw;
+ struct mtk_eint_pin *eint_pin;
/* Specific parameters per SoC */
u8 gpio_m;
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.h b/drivers/pinctrl/mediatek/pinctrl-paris.h
index 948ce126aa0c..d8c1822662fc 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.h
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.h
@@ -49,6 +49,13 @@
__VA_ARGS__, { } }, \
}
+#define MTK_EINT_PIN(_number, _instance, _index, _debounce) { \
+ .number = _number, \
+ .instance = _instance, \
+ .index = _index, \
+ .debounce = _debounce, \
+ }
+
#define PINCTRL_PIN_GROUP(_name_, id) \
{ \
.grp = PINCTRL_PINGROUP(_name_,id##_pins, ARRAY_SIZE(id##_pins)), \
diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig
index cc397896762c..90639bc171f6 100644
--- a/drivers/pinctrl/meson/Kconfig
+++ b/drivers/pinctrl/meson/Kconfig
@@ -67,6 +67,17 @@ config PINCTRL_MESON_S4
select PINCTRL_MESON_AXG_PMX
default y
+config PINCTRL_AMLOGIC_A4
+ bool "AMLOGIC pincontrol"
+ depends on ARM64
+ default y
+ help
+ This is the driver for the pin controller found on Amlogic SoCs.
+
+ This driver is simplify subsequent support for new amlogic SoCs,
+ to support new Amlogic SoCs, only need to add the corresponding dts file,
+ no additional binding header files or C file are added.
+
config PINCTRL_AMLOGIC_C3
tristate "Amlogic C3 SoC pinctrl driver"
depends on ARM64
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index 9e538b9ffb9b..c92a65a83344 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_PINCTRL_MESON_AXG) += pinctrl-meson-axg.o
obj-$(CONFIG_PINCTRL_MESON_G12A) += pinctrl-meson-g12a.o
obj-$(CONFIG_PINCTRL_MESON_A1) += pinctrl-meson-a1.o
obj-$(CONFIG_PINCTRL_MESON_S4) += pinctrl-meson-s4.o
+obj-$(CONFIG_PINCTRL_AMLOGIC_A4) += pinctrl-amlogic-a4.o
obj-$(CONFIG_PINCTRL_AMLOGIC_C3) += pinctrl-amlogic-c3.o
obj-$(CONFIG_PINCTRL_AMLOGIC_T7) += pinctrl-amlogic-t7.o
diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
new file mode 100644
index 000000000000..ee7bbc72f9b3
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
@@ -0,0 +1,1053 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (c) 2024 Amlogic, Inc. All rights reserved.
+ * Author: Xianwei Zhao <xianwei.zhao@amlogic.com>
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <dt-bindings/pinctrl/amlogic,pinctrl.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+
+#define gpio_chip_to_bank(chip) \
+ container_of(chip, struct aml_gpio_bank, gpio_chip)
+
+#define AML_REG_PULLEN 0
+#define AML_REG_PULL 1
+#define AML_REG_DIR 2
+#define AML_REG_OUT 3
+#define AML_REG_IN 4
+#define AML_REG_DS 5
+#define AML_NUM_REG 6
+
+enum aml_pinconf_drv {
+ PINCONF_DRV_500UA,
+ PINCONF_DRV_2500UA,
+ PINCONF_DRV_3000UA,
+ PINCONF_DRV_4000UA,
+};
+
+struct aml_pio_control {
+ u32 gpio_offset;
+ u32 reg_offset[AML_NUM_REG];
+ u32 bit_offset[AML_NUM_REG];
+};
+
+struct aml_reg_bit {
+ u32 bank_id;
+ u32 reg_offs[AML_NUM_REG];
+ u32 bit_offs[AML_NUM_REG];
+};
+
+struct aml_pctl_data {
+ unsigned int number;
+ struct aml_reg_bit rb_offs[];
+};
+
+struct aml_pmx_func {
+ const char *name;
+ const char **groups;
+ unsigned int ngroups;
+};
+
+struct aml_pctl_group {
+ const char *name;
+ unsigned int npins;
+ unsigned int *pins;
+ unsigned int *func;
+};
+
+struct aml_gpio_bank {
+ struct gpio_chip gpio_chip;
+ struct aml_pio_control pc;
+ u32 bank_id;
+ unsigned int pin_base;
+ struct regmap *reg_mux;
+ struct regmap *reg_gpio;
+ struct regmap *reg_ds;
+};
+
+struct aml_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+ struct aml_gpio_bank *banks;
+ int nbanks;
+ struct aml_pmx_func *functions;
+ int nfunctions;
+ struct aml_pctl_group *groups;
+ int ngroups;
+
+ const struct aml_pctl_data *data;
+};
+
+static const unsigned int aml_bit_strides[AML_NUM_REG] = {
+ 1, 1, 1, 1, 1, 2
+};
+
+static const unsigned int aml_def_regoffs[AML_NUM_REG] = {
+ 3, 4, 2, 1, 0, 7
+};
+
+static const char *aml_bank_name[31] = {
+"GPIOA", "GPIOB", "GPIOC", "GPIOD", "GPIOE", "GPIOF", "GPIOG",
+"GPIOH", "GPIOI", "GPIOJ", "GPIOK", "GPIOL", "GPIOM", "GPION",
+"GPIOO", "GPIOP", "GPIOQ", "GPIOR", "GPIOS", "GPIOT", "GPIOU",
+"GPIOV", "GPIOW", "GPIOX", "GPIOY", "GPIOZ", "GPIODV", "GPIOAO",
+"GPIOCC", "TEST_N", "ANALOG"
+};
+
+static int aml_pmx_calc_reg_and_offset(struct pinctrl_gpio_range *range,
+ unsigned int pin, unsigned int *reg,
+ unsigned int *offset)
+{
+ unsigned int shift;
+
+ shift = (pin - range->pin_base) << 2;
+ *reg = (shift / 32) * 4;
+ *offset = shift % 32;
+
+ return 0;
+}
+
+static int aml_pctl_set_function(struct aml_pinctrl *info,
+ struct pinctrl_gpio_range *range,
+ int pin_id, int func)
+{
+ struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
+ int reg;
+ int offset;
+
+ if (!bank->reg_mux)
+ return 0;
+
+ aml_pmx_calc_reg_and_offset(range, pin_id, &reg, &offset);
+ return regmap_update_bits(bank->reg_mux, reg,
+ 0xf << offset, (func & 0xf) << offset);
+}
+
+static int aml_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+ struct aml_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+ return info->nfunctions;
+}
+
+static const char *aml_pmx_get_fname(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct aml_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+ return info->functions[selector].name;
+}
+
+static int aml_pmx_get_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **grps,
+ unsigned * const ngrps)
+{
+ struct aml_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+ *grps = info->functions[selector].groups;
+ *ngrps = info->functions[selector].ngroups;
+
+ return 0;
+}
+
+static int aml_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned int fselector,
+ unsigned int group_id)
+{
+ struct aml_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ struct aml_pctl_group *group = &info->groups[group_id];
+ struct pinctrl_gpio_range *range;
+ int i;
+
+ for (i = 0; i < group->npins; i++) {
+ range = pinctrl_find_gpio_range_from_pin(pctldev, group->pins[i]);
+ aml_pctl_set_function(info, range, group->pins[i], group->func[i]);
+ }
+
+ return 0;
+}
+
+static int aml_pmx_request_gpio(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct aml_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+ return aml_pctl_set_function(info, range, pin, 0);
+}
+
+static const struct pinmux_ops aml_pmx_ops = {
+ .set_mux = aml_pmx_set_mux,
+ .get_functions_count = aml_pmx_get_funcs_count,
+ .get_function_name = aml_pmx_get_fname,
+ .get_function_groups = aml_pmx_get_groups,
+ .gpio_request_enable = aml_pmx_request_gpio,
+};
+
+static int aml_calc_reg_and_bit(struct pinctrl_gpio_range *range,
+ unsigned int pin,
+ unsigned int reg_type,
+ unsigned int *reg, unsigned int *bit)
+{
+ struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
+
+ *bit = (pin - range->pin_base) * aml_bit_strides[reg_type]
+ + bank->pc.bit_offset[reg_type];
+ *reg = (bank->pc.reg_offset[reg_type] + (*bit / 32)) * 4;
+ *bit &= 0x1f;
+
+ return 0;
+}
+
+static int aml_pinconf_get_pull(struct aml_pinctrl *info, unsigned int pin)
+{
+ struct pinctrl_gpio_range *range =
+ pinctrl_find_gpio_range_from_pin(info->pctl, pin);
+ struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
+ unsigned int reg, bit, val;
+ int ret, conf;
+
+ aml_calc_reg_and_bit(range, pin, AML_REG_PULLEN, &reg, &bit);
+
+ ret = regmap_read(bank->reg_gpio, reg, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & BIT(bit))) {
+ conf = PIN_CONFIG_BIAS_DISABLE;
+ } else {
+ aml_calc_reg_and_bit(range, pin, AML_REG_PULL, &reg, &bit);
+
+ ret = regmap_read(bank->reg_gpio, reg, &val);
+ if (ret)
+ return ret;
+
+ if (val & BIT(bit))
+ conf = PIN_CONFIG_BIAS_PULL_UP;
+ else
+ conf = PIN_CONFIG_BIAS_PULL_DOWN;
+ }
+
+ return conf;
+}
+
+static int aml_pinconf_get_drive_strength(struct aml_pinctrl *info,
+ unsigned int pin,
+ u16 *drive_strength_ua)
+{
+ struct pinctrl_gpio_range *range =
+ pinctrl_find_gpio_range_from_pin(info->pctl, pin);
+ struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
+ unsigned int reg, bit;
+ unsigned int val;
+ int ret;
+
+ if (!bank->reg_ds)
+ return -EOPNOTSUPP;
+
+ aml_calc_reg_and_bit(range, pin, AML_REG_DS, &reg, &bit);
+ ret = regmap_read(bank->reg_ds, reg, &val);
+ if (ret)
+ return ret;
+
+ switch ((val >> bit) & 0x3) {
+ case PINCONF_DRV_500UA:
+ *drive_strength_ua = 500;
+ break;
+ case PINCONF_DRV_2500UA:
+ *drive_strength_ua = 2500;
+ break;
+ case PINCONF_DRV_3000UA:
+ *drive_strength_ua = 3000;
+ break;
+ case PINCONF_DRV_4000UA:
+ *drive_strength_ua = 4000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aml_pinconf_get_gpio_bit(struct aml_pinctrl *info,
+ unsigned int pin,
+ unsigned int reg_type)
+{
+ struct pinctrl_gpio_range *range =
+ pinctrl_find_gpio_range_from_pin(info->pctl, pin);
+ struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
+ unsigned int reg, bit, val;
+ int ret;
+
+ aml_calc_reg_and_bit(range, pin, reg_type, &reg, &bit);
+ ret = regmap_read(bank->reg_gpio, reg, &val);
+ if (ret)
+ return ret;
+
+ return BIT(bit) & val ? 1 : 0;
+}
+
+static int aml_pinconf_get_output(struct aml_pinctrl *info,
+ unsigned int pin)
+{
+ int ret = aml_pinconf_get_gpio_bit(info, pin, AML_REG_DIR);
+
+ if (ret < 0)
+ return ret;
+
+ return !ret;
+}
+
+static int aml_pinconf_get_drive(struct aml_pinctrl *info,
+ unsigned int pin)
+{
+ return aml_pinconf_get_gpio_bit(info, pin, AML_REG_OUT);
+}
+
+static int aml_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin,
+ unsigned long *config)
+{
+ struct aml_pinctrl *info = pinctrl_dev_get_drvdata(pcdev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u16 arg;
+ int ret;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (aml_pinconf_get_pull(info, pin) == param)
+ arg = 1;
+ else
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ ret = aml_pinconf_get_drive_strength(info, pin, &arg);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ ret = aml_pinconf_get_output(info, pin);
+ if (ret <= 0)
+ return -EINVAL;
+ arg = 1;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ ret = aml_pinconf_get_output(info, pin);
+ if (ret <= 0)
+ return -EINVAL;
+
+ ret = aml_pinconf_get_drive(info, pin);
+ if (ret < 0)
+ return -EINVAL;
+
+ arg = ret;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ dev_dbg(info->dev, "pinconf for pin %u is %lu\n", pin, *config);
+
+ return 0;
+}
+
+static int aml_pinconf_disable_bias(struct aml_pinctrl *info,
+ unsigned int pin)
+{
+ struct pinctrl_gpio_range *range =
+ pinctrl_find_gpio_range_from_pin(info->pctl, pin);
+ struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
+ unsigned int reg, bit = 0;
+
+ aml_calc_reg_and_bit(range, pin, AML_REG_PULLEN, &reg, &bit);
+
+ return regmap_update_bits(bank->reg_gpio, reg, BIT(bit), 0);
+}
+
+static int aml_pinconf_enable_bias(struct aml_pinctrl *info, unsigned int pin,
+ bool pull_up)
+{
+ struct pinctrl_gpio_range *range =
+ pinctrl_find_gpio_range_from_pin(info->pctl, pin);
+ struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
+ unsigned int reg, bit, val = 0;
+ int ret;
+
+ aml_calc_reg_and_bit(range, pin, AML_REG_PULL, &reg, &bit);
+ if (pull_up)
+ val = BIT(bit);
+
+ ret = regmap_update_bits(bank->reg_gpio, reg, BIT(bit), val);
+ if (ret)
+ return ret;
+
+ aml_calc_reg_and_bit(range, pin, AML_REG_PULLEN, &reg, &bit);
+ return regmap_update_bits(bank->reg_gpio, reg, BIT(bit), BIT(bit));
+}
+
+static int aml_pinconf_set_drive_strength(struct aml_pinctrl *info,
+ unsigned int pin,
+ u16 drive_strength_ua)
+{
+ struct pinctrl_gpio_range *range =
+ pinctrl_find_gpio_range_from_pin(info->pctl, pin);
+ struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
+ unsigned int reg, bit, ds_val;
+
+ if (!bank->reg_ds) {
+ dev_err(info->dev, "drive-strength not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ aml_calc_reg_and_bit(range, pin, AML_REG_DS, &reg, &bit);
+
+ if (drive_strength_ua <= 500) {
+ ds_val = PINCONF_DRV_500UA;
+ } else if (drive_strength_ua <= 2500) {
+ ds_val = PINCONF_DRV_2500UA;
+ } else if (drive_strength_ua <= 3000) {
+ ds_val = PINCONF_DRV_3000UA;
+ } else if (drive_strength_ua <= 4000) {
+ ds_val = PINCONF_DRV_4000UA;
+ } else {
+ dev_warn_once(info->dev,
+ "pin %u: invalid drive-strength : %d , default to 4mA\n",
+ pin, drive_strength_ua);
+ ds_val = PINCONF_DRV_4000UA;
+ }
+
+ return regmap_update_bits(bank->reg_ds, reg, 0x3 << bit, ds_val << bit);
+}
+
+static int aml_pinconf_set_gpio_bit(struct aml_pinctrl *info,
+ unsigned int pin,
+ unsigned int reg_type,
+ bool arg)
+{
+ struct pinctrl_gpio_range *range =
+ pinctrl_find_gpio_range_from_pin(info->pctl, pin);
+ struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
+ unsigned int reg, bit;
+
+ aml_calc_reg_and_bit(range, pin, reg_type, &reg, &bit);
+ return regmap_update_bits(bank->reg_gpio, reg, BIT(bit),
+ arg ? BIT(bit) : 0);
+}
+
+static int aml_pinconf_set_output(struct aml_pinctrl *info,
+ unsigned int pin,
+ bool out)
+{
+ return aml_pinconf_set_gpio_bit(info, pin, AML_REG_DIR, !out);
+}
+
+static int aml_pinconf_set_drive(struct aml_pinctrl *info,
+ unsigned int pin,
+ bool high)
+{
+ return aml_pinconf_set_gpio_bit(info, pin, AML_REG_OUT, high);
+}
+
+static int aml_pinconf_set_output_drive(struct aml_pinctrl *info,
+ unsigned int pin,
+ bool high)
+{
+ int ret;
+
+ ret = aml_pinconf_set_output(info, pin, true);
+ if (ret)
+ return ret;
+
+ return aml_pinconf_set_drive(info, pin, high);
+}
+
+static int aml_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct aml_pinctrl *info = pinctrl_dev_get_drvdata(pcdev);
+ enum pin_config_param param;
+ unsigned int arg = 0;
+ int i, ret;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ case PIN_CONFIG_OUTPUT:
+ arg = pinconf_to_config_argument(configs[i]);
+ break;
+
+ default:
+ break;
+ }
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = aml_pinconf_disable_bias(info, pin);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = aml_pinconf_enable_bias(info, pin, true);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = aml_pinconf_enable_bias(info, pin, false);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ ret = aml_pinconf_set_drive_strength(info, pin, arg);
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ ret = aml_pinconf_set_output(info, pin, arg);
+ break;
+ case PIN_CONFIG_OUTPUT:
+ ret = aml_pinconf_set_output_drive(info, pin, arg);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aml_pinconf_group_set(struct pinctrl_dev *pcdev,
+ unsigned int num_group,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct aml_pinctrl *info = pinctrl_dev_get_drvdata(pcdev);
+ int i;
+
+ for (i = 0; i < info->groups[num_group].npins; i++) {
+ aml_pinconf_set(pcdev, info->groups[num_group].pins[i], configs,
+ num_configs);
+ }
+
+ return 0;
+}
+
+static int aml_pinconf_group_get(struct pinctrl_dev *pcdev,
+ unsigned int group, unsigned long *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct pinconf_ops aml_pinconf_ops = {
+ .pin_config_get = aml_pinconf_get,
+ .pin_config_set = aml_pinconf_set,
+ .pin_config_group_get = aml_pinconf_group_get,
+ .pin_config_group_set = aml_pinconf_group_set,
+ .is_generic = true,
+};
+
+static int aml_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct aml_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+ return info->ngroups;
+}
+
+static const char *aml_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct aml_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+ return info->groups[selector].name;
+}
+
+static int aml_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector, const unsigned int **pins,
+ unsigned int *npins)
+{
+ struct aml_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+ if (selector >= info->ngroups)
+ return -EINVAL;
+
+ *pins = info->groups[selector].pins;
+ *npins = info->groups[selector].npins;
+
+ return 0;
+}
+
+static inline const struct aml_pctl_group *
+ aml_pctl_find_group_by_name(const struct aml_pinctrl *info,
+ const char *name)
+{
+ int i;
+
+ for (i = 0; i < info->ngroups; i++) {
+ if (!strcmp(info->groups[i].name, name))
+ return &info->groups[i];
+ }
+
+ return NULL;
+}
+
+static void aml_pin_dbg_show(struct pinctrl_dev *pcdev, struct seq_file *s,
+ unsigned int offset)
+{
+ seq_printf(s, " %s", dev_name(pcdev->dev));
+}
+
+static const struct pinctrl_ops aml_pctrl_ops = {
+ .get_groups_count = aml_get_groups_count,
+ .get_group_name = aml_get_group_name,
+ .get_group_pins = aml_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pinmux,
+ .dt_free_map = pinconf_generic_dt_free_map,
+ .pin_dbg_show = aml_pin_dbg_show,
+};
+
+static int aml_pctl_parse_functions(struct device_node *np,
+ struct aml_pinctrl *info, u32 index,
+ int *grp_index)
+{
+ struct device *dev = info->dev;
+ struct aml_pmx_func *func;
+ struct aml_pctl_group *grp;
+ int ret, i;
+
+ func = &info->functions[index];
+ func->name = np->name;
+ func->ngroups = of_get_child_count(np);
+ if (func->ngroups == 0)
+ return dev_err_probe(dev, -EINVAL, "No groups defined\n");
+
+ func->groups = devm_kcalloc(dev, func->ngroups, sizeof(*func->groups), GFP_KERNEL);
+ if (!func->groups)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_child_of_node_scoped(np, child) {
+ func->groups[i++] = child->name;
+ grp = &info->groups[*grp_index];
+ grp->name = child->name;
+ *grp_index += 1;
+ ret = pinconf_generic_parse_dt_pinmux(child, dev, &grp->pins,
+ &grp->func, &grp->npins);
+ if (ret) {
+ dev_err(dev, "function :%s, groups:%s fail\n", func->name, child->name);
+ return ret;
+ }
+ }
+ dev_dbg(dev, "Function[%d\t name:%s,\tgroups:%d]\n", index, func->name, func->ngroups);
+
+ return 0;
+}
+
+static u32 aml_bank_pins(struct device_node *np)
+{
+ struct of_phandle_args of_args;
+
+ if (of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
+ 0, &of_args))
+ return 0;
+ else
+ return of_args.args[2];
+}
+
+static int aml_bank_number(struct device_node *np)
+{
+ struct of_phandle_args of_args;
+
+ if (of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
+ 0, &of_args))
+ return -EINVAL;
+ else
+ return of_args.args[1] >> 8;
+}
+
+static unsigned int aml_count_pins(struct device_node *np)
+{
+ struct device_node *child;
+ unsigned int pins = 0;
+
+ for_each_child_of_node(np, child) {
+ if (of_property_read_bool(child, "gpio-controller"))
+ pins += aml_bank_pins(child);
+ }
+
+ return pins;
+}
+
+/*
+ * A pinctrl device contains two types of nodes. The one named GPIO
+ * bank which includes gpio-controller property. The other one named
+ * function which includes one or more pin groups. The pin group
+ * include pinmux property(global index in pinctrl dev, and mux vlaue
+ * in mux reg) and pin configuration properties.
+ */
+static void aml_pctl_dt_child_count(struct aml_pinctrl *info,
+ struct device_node *np)
+{
+ struct device_node *child;
+
+ for_each_child_of_node(np, child) {
+ if (of_property_read_bool(child, "gpio-controller")) {
+ info->nbanks++;
+ } else {
+ info->nfunctions++;
+ info->ngroups += of_get_child_count(child);
+ }
+ }
+}
+
+static struct regmap *aml_map_resource(struct device *dev, unsigned int id,
+ struct device_node *node, char *name)
+{
+ struct resource res;
+ void __iomem *base;
+ int i;
+
+ struct regmap_config aml_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ i = of_property_match_string(node, "reg-names", name);
+ if (i < 0)
+ return NULL;
+ if (of_address_to_resource(node, i, &res))
+ return NULL;
+ base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ aml_regmap_config.max_register = resource_size(&res) - 4;
+ aml_regmap_config.name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-%s", aml_bank_name[id], name);
+ if (!aml_regmap_config.name)
+ return ERR_PTR(-ENOMEM);
+
+ return devm_regmap_init_mmio(dev, base, &aml_regmap_config);
+}
+
+static inline int aml_gpio_calc_reg_and_bit(struct aml_gpio_bank *bank,
+ unsigned int reg_type,
+ unsigned int gpio,
+ unsigned int *reg,
+ unsigned int *bit)
+{
+ *bit = gpio * aml_bit_strides[reg_type] + bank->pc.bit_offset[reg_type];
+ *reg = (bank->pc.reg_offset[reg_type] + (*bit / 32)) * 4;
+ *bit &= 0x1f;
+
+ return 0;
+}
+
+static int aml_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct aml_gpio_bank *bank = gpiochip_get_data(chip);
+ unsigned int bit, reg, val;
+ int ret;
+
+ aml_gpio_calc_reg_and_bit(bank, AML_REG_DIR, gpio, &reg, &bit);
+
+ ret = regmap_read(bank->reg_gpio, reg, &val);
+ if (ret)
+ return ret;
+
+ return BIT(bit) & val ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
+}
+
+static int aml_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct aml_gpio_bank *bank = gpiochip_get_data(chip);
+ unsigned int bit, reg;
+
+ aml_gpio_calc_reg_and_bit(bank, AML_REG_DIR, gpio, &reg, &bit);
+
+ return regmap_update_bits(bank->reg_gpio, reg, BIT(bit), BIT(bit));
+}
+
+static int aml_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
+ int value)
+{
+ struct aml_gpio_bank *bank = gpiochip_get_data(chip);
+ unsigned int bit, reg;
+ int ret;
+
+ aml_gpio_calc_reg_and_bit(bank, AML_REG_DIR, gpio, &reg, &bit);
+ ret = regmap_update_bits(bank->reg_gpio, reg, BIT(bit), 0);
+ if (ret < 0)
+ return ret;
+
+ aml_gpio_calc_reg_and_bit(bank, AML_REG_OUT, gpio, &reg, &bit);
+
+ return regmap_update_bits(bank->reg_gpio, reg, BIT(bit),
+ value ? BIT(bit) : 0);
+}
+
+static void aml_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+{
+ struct aml_gpio_bank *bank = gpiochip_get_data(chip);
+ unsigned int bit, reg;
+
+ aml_gpio_calc_reg_and_bit(bank, AML_REG_OUT, gpio, &reg, &bit);
+
+ regmap_update_bits(bank->reg_gpio, reg, BIT(bit),
+ value ? BIT(bit) : 0);
+}
+
+static int aml_gpio_get(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct aml_gpio_bank *bank = gpiochip_get_data(chip);
+ unsigned int reg, bit, val;
+
+ aml_gpio_calc_reg_and_bit(bank, AML_REG_IN, gpio, &reg, &bit);
+ regmap_read(bank->reg_gpio, reg, &val);
+
+ return !!(val & BIT(bit));
+}
+
+static const struct gpio_chip aml_gpio_template = {
+ .request = gpiochip_generic_request,
+ .free = gpiochip_generic_free,
+ .set_config = gpiochip_generic_config,
+ .set = aml_gpio_set,
+ .get = aml_gpio_get,
+ .direction_input = aml_gpio_direction_input,
+ .direction_output = aml_gpio_direction_output,
+ .get_direction = aml_gpio_get_direction,
+ .can_sleep = false,
+};
+
+static void init_bank_register_bit(struct aml_pinctrl *info,
+ struct aml_gpio_bank *bank)
+{
+ const struct aml_pctl_data *data = info->data;
+ const struct aml_reg_bit *aml_rb;
+ bool def_offs = true;
+ int i;
+
+ if (data) {
+ for (i = 0; i < data->number; i++) {
+ aml_rb = &data->rb_offs[i];
+ if (bank->bank_id == aml_rb->bank_id) {
+ def_offs = false;
+ break;
+ }
+ }
+ }
+
+ if (def_offs) {
+ for (i = 0; i < AML_NUM_REG; i++) {
+ bank->pc.reg_offset[i] = aml_def_regoffs[i];
+ bank->pc.bit_offset[i] = 0;
+ }
+ } else {
+ for (i = 0; i < AML_NUM_REG; i++) {
+ bank->pc.reg_offset[i] = aml_rb->reg_offs[i];
+ bank->pc.bit_offset[i] = aml_rb->bit_offs[i];
+ }
+ }
+}
+
+static int aml_gpiolib_register_bank(struct aml_pinctrl *info,
+ int bank_nr, struct device_node *np)
+{
+ struct aml_gpio_bank *bank = &info->banks[bank_nr];
+ struct device *dev = info->dev;
+ int ret = 0;
+
+ ret = aml_bank_number(np);
+ if (ret < 0) {
+ dev_err(dev, "get num=%d bank identity fail\n", bank_nr);
+ return -EINVAL;
+ }
+ bank->bank_id = ret;
+
+ bank->reg_mux = aml_map_resource(dev, bank->bank_id, np, "mux");
+ if (IS_ERR_OR_NULL(bank->reg_mux)) {
+ if (bank->bank_id == AMLOGIC_GPIO_TEST_N ||
+ bank->bank_id == AMLOGIC_GPIO_ANALOG)
+ bank->reg_mux = NULL;
+ else
+ return dev_err_probe(dev, bank->reg_mux ? PTR_ERR(bank->reg_mux) : -ENOENT,
+ "mux registers not found\n");
+ }
+
+ bank->reg_gpio = aml_map_resource(dev, bank->bank_id, np, "gpio");
+ if (IS_ERR_OR_NULL(bank->reg_gpio))
+ return dev_err_probe(dev, bank->reg_gpio ? PTR_ERR(bank->reg_gpio) : -ENOENT,
+ "gpio registers not found\n");
+
+ bank->reg_ds = aml_map_resource(dev, bank->bank_id, np, "ds");
+ if (IS_ERR_OR_NULL(bank->reg_ds)) {
+ dev_dbg(info->dev, "ds registers not found - skipping\n");
+ bank->reg_ds = bank->reg_gpio;
+ }
+
+ bank->gpio_chip = aml_gpio_template;
+ bank->gpio_chip.base = -1;
+ bank->gpio_chip.ngpio = aml_bank_pins(np);
+ bank->gpio_chip.fwnode = of_fwnode_handle(np);
+ bank->gpio_chip.parent = dev;
+
+ init_bank_register_bit(info, bank);
+ bank->gpio_chip.label = aml_bank_name[bank->bank_id];
+
+ bank->pin_base = bank->bank_id << 8;
+
+ return 0;
+}
+
+static int aml_pctl_probe_dt(struct platform_device *pdev,
+ struct pinctrl_desc *pctl_desc,
+ struct aml_pinctrl *info)
+{
+ struct device *dev = &pdev->dev;
+ struct pinctrl_pin_desc *pdesc;
+ struct device_node *np = dev->of_node;
+ int grp_index = 0;
+ int i = 0, j = 0, k = 0, bank;
+ int ret = 0;
+
+ aml_pctl_dt_child_count(info, np);
+ if (!info->nbanks)
+ return dev_err_probe(dev, -EINVAL, "you need at least one gpio bank\n");
+
+ dev_dbg(dev, "nbanks = %d\n", info->nbanks);
+ dev_dbg(dev, "nfunctions = %d\n", info->nfunctions);
+ dev_dbg(dev, "ngroups = %d\n", info->ngroups);
+
+ info->functions = devm_kcalloc(dev, info->nfunctions, sizeof(*info->functions), GFP_KERNEL);
+
+ info->groups = devm_kcalloc(dev, info->ngroups, sizeof(*info->groups), GFP_KERNEL);
+
+ info->banks = devm_kcalloc(dev, info->nbanks, sizeof(*info->banks), GFP_KERNEL);
+
+ if (!info->functions || !info->groups || !info->banks)
+ return -ENOMEM;
+
+ info->data = (struct aml_pctl_data *)of_device_get_match_data(dev);
+
+ pctl_desc->npins = aml_count_pins(np);
+
+ pdesc = devm_kcalloc(dev, pctl_desc->npins, sizeof(*pdesc), GFP_KERNEL);
+ if (!pdesc)
+ return -ENOMEM;
+
+ pctl_desc->pins = pdesc;
+
+ bank = 0;
+ for_each_child_of_node_scoped(np, child) {
+ if (of_property_read_bool(child, "gpio-controller")) {
+ const char *bank_name = NULL;
+ char **pin_names;
+
+ ret = aml_gpiolib_register_bank(info, bank, child);
+ if (ret)
+ return ret;
+
+ k = info->banks[bank].pin_base;
+ bank_name = info->banks[bank].gpio_chip.label;
+
+ pin_names = devm_kasprintf_strarray(dev, bank_name,
+ info->banks[bank].gpio_chip.ngpio);
+ if (IS_ERR(pin_names))
+ return PTR_ERR(pin_names);
+
+ for (j = 0; j < info->banks[bank].gpio_chip.ngpio; j++, k++) {
+ pdesc->number = k;
+ pdesc->name = pin_names[j];
+ pdesc++;
+ }
+ bank++;
+ } else {
+ ret = aml_pctl_parse_functions(child, info,
+ i++, &grp_index);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int aml_pctl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct aml_pinctrl *info;
+ struct pinctrl_desc *pctl_desc;
+ int ret, i;
+
+ pctl_desc = devm_kzalloc(dev, sizeof(*pctl_desc), GFP_KERNEL);
+ if (!pctl_desc)
+ return -ENOMEM;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ platform_set_drvdata(pdev, info);
+ ret = aml_pctl_probe_dt(pdev, pctl_desc, info);
+ if (ret)
+ return ret;
+
+ pctl_desc->owner = THIS_MODULE;
+ pctl_desc->pctlops = &aml_pctrl_ops;
+ pctl_desc->pmxops = &aml_pmx_ops;
+ pctl_desc->confops = &aml_pinconf_ops;
+ pctl_desc->name = dev_name(dev);
+
+ info->pctl = devm_pinctrl_register(dev, pctl_desc, info);
+ if (IS_ERR(info->pctl))
+ return dev_err_probe(dev, PTR_ERR(info->pctl), "Failed pinctrl registration\n");
+
+ for (i = 0; i < info->nbanks; i++) {
+ ret = gpiochip_add_data(&info->banks[i].gpio_chip, &info->banks[i]);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add gpiochip(%d)!\n", i);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id aml_pctl_of_match[] = {
+ { .compatible = "amlogic,pinctrl-a4", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, aml_pctl_dt_match);
+
+static struct platform_driver aml_pctl_driver = {
+ .driver = {
+ .name = "amlogic-pinctrl",
+ .of_match_table = aml_pctl_of_match,
+ },
+ .probe = aml_pctl_probe,
+};
+module_platform_driver(aml_pctl_driver);
+
+MODULE_AUTHOR("Xianwei Zhao <xianwei.zhao@amlogic.com>");
+MODULE_DESCRIPTION("Pin controller and GPIO driver for Amlogic SoC");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35.c b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
index 59c4e7c6cdde..06ae1fe8b8c5 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-ma35.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
@@ -98,12 +98,6 @@ static const u32 ds_3300mv_tbl[] = {
17100, 25600, 34100, 42800, 48000, 56000, 77000, 82000,
};
-struct ma35_pin_func {
- const char *name;
- const char **groups;
- u32 ngroups;
-};
-
struct ma35_pin_setting {
u32 offset;
u32 shift;
@@ -112,13 +106,6 @@ struct ma35_pin_setting {
unsigned int nconfigs;
};
-struct ma35_pin_group {
- const char *name;
- unsigned int npins;
- unsigned int *pins;
- struct ma35_pin_setting *settings;
-};
-
struct ma35_pin_bank {
void __iomem *reg_base;
struct clk *clk;
@@ -147,9 +134,9 @@ struct ma35_pinctrl {
struct pinctrl_dev *pctl;
const struct ma35_pinctrl_soc_info *info;
struct regmap *regmap;
- struct ma35_pin_group *groups;
+ struct group_desc *groups;
unsigned int ngroups;
- struct ma35_pin_func *functions;
+ struct pinfunction *functions;
unsigned int nfunctions;
};
@@ -166,7 +153,7 @@ static const char *ma35_get_group_name(struct pinctrl_dev *pctldev, unsigned int
{
struct ma35_pinctrl *npctl = pinctrl_dev_get_drvdata(pctldev);
- return npctl->groups[selector].name;
+ return npctl->groups[selector].grp.name;
}
static int ma35_get_group_pins(struct pinctrl_dev *pctldev, unsigned int selector,
@@ -177,19 +164,19 @@ static int ma35_get_group_pins(struct pinctrl_dev *pctldev, unsigned int selecto
if (selector >= npctl->ngroups)
return -EINVAL;
- *pins = npctl->groups[selector].pins;
- *npins = npctl->groups[selector].npins;
+ *pins = npctl->groups[selector].grp.pins;
+ *npins = npctl->groups[selector].grp.npins;
return 0;
}
-static struct ma35_pin_group *ma35_pinctrl_find_group_by_name(
- const struct ma35_pinctrl *npctl, const char *name)
+static struct group_desc *
+ma35_pinctrl_find_group_by_name(const struct ma35_pinctrl *npctl, const char *name)
{
int i;
for (i = 0; i < npctl->ngroups; i++) {
- if (!strcmp(npctl->groups[i].name, name))
+ if (!strcmp(npctl->groups[i].grp.name, name))
return &npctl->groups[i];
}
return NULL;
@@ -201,9 +188,10 @@ static int ma35_pinctrl_dt_node_to_map_func(struct pinctrl_dev *pctldev,
unsigned int *num_maps)
{
struct ma35_pinctrl *npctl = pinctrl_dev_get_drvdata(pctldev);
- struct ma35_pin_group *grp;
+ struct ma35_pin_setting *setting;
struct pinctrl_map *new_map;
struct device_node *parent;
+ struct group_desc *grp;
int map_num = 1;
int i;
@@ -217,7 +205,7 @@ static int ma35_pinctrl_dt_node_to_map_func(struct pinctrl_dev *pctldev,
return -EINVAL;
}
- map_num += grp->npins;
+ map_num += grp->grp.npins;
new_map = kcalloc(map_num, sizeof(*new_map), GFP_KERNEL);
if (!new_map)
return -ENOMEM;
@@ -229,17 +217,19 @@ static int ma35_pinctrl_dt_node_to_map_func(struct pinctrl_dev *pctldev,
if (!parent)
return -EINVAL;
+ setting = grp->data;
+
new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
new_map[0].data.mux.function = parent->name;
new_map[0].data.mux.group = np->name;
of_node_put(parent);
new_map++;
- for (i = 0; i < grp->npins; i++) {
+ for (i = 0; i < grp->grp.npins; i++) {
new_map[i].type = PIN_MAP_TYPE_CONFIGS_PIN;
- new_map[i].data.configs.group_or_pin = pin_get_name(pctldev, grp->pins[i]);
- new_map[i].data.configs.configs = grp->settings[i].configs;
- new_map[i].data.configs.num_configs = grp->settings[i].nconfigs;
+ new_map[i].data.configs.group_or_pin = pin_get_name(pctldev, grp->grp.pins[i]);
+ new_map[i].data.configs.configs = setting[i].configs;
+ new_map[i].data.configs.num_configs = setting[i].nconfigs;
}
dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
(*map)->data.mux.function, (*map)->data.mux.group, map_num);
@@ -287,14 +277,14 @@ static int ma35_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int selecto
unsigned int group)
{
struct ma35_pinctrl *npctl = pinctrl_dev_get_drvdata(pctldev);
- struct ma35_pin_group *grp = &npctl->groups[group];
- struct ma35_pin_setting *setting = grp->settings;
+ struct group_desc *grp = &npctl->groups[group];
+ struct ma35_pin_setting *setting = grp->data;
u32 i, regval;
dev_dbg(npctl->dev, "enable function %s group %s\n",
- npctl->functions[selector].name, npctl->groups[group].name);
+ npctl->functions[selector].name, grp->grp.name);
- for (i = 0; i < grp->npins; i++) {
+ for (i = 0; i < grp->grp.npins; i++) {
regmap_read(npctl->regmap, setting->offset, &regval);
regval &= ~GENMASK(setting->shift + MA35_MFP_BITS_PER_PORT - 1,
setting->shift);
@@ -529,7 +519,6 @@ static int ma35_gpiolib_register(struct platform_device *pdev, struct ma35_pinct
bank->irqtype = 0;
bank->irqinten = 0;
bank->chip.label = bank->name;
- bank->chip.of_gpio_n_cells = 2;
bank->chip.parent = &pdev->dev;
bank->chip.request = ma35_gpio_core_to_request;
bank->chip.direction_input = ma35_gpio_core_direction_in;
@@ -986,22 +975,22 @@ static const struct pinconf_ops ma35_pinconf_ops = {
.is_generic = true,
};
-static int ma35_pinctrl_parse_groups(struct device_node *np, struct ma35_pin_group *grp,
+static int ma35_pinctrl_parse_groups(struct fwnode_handle *fwnode, struct group_desc *grp,
struct ma35_pinctrl *npctl, u32 index)
{
+ struct device_node *np = to_of_node(fwnode);
struct ma35_pin_setting *pin;
unsigned long *configs;
unsigned int nconfigs;
+ unsigned int *pins;
int i, j, count, ret;
u32 *elems;
- grp->name = np->name;
-
ret = pinconf_generic_parse_dt_config(np, NULL, &configs, &nconfigs);
if (ret)
return ret;
- count = of_property_count_elems_of_size(np, "nuvoton,pins", sizeof(u32));
+ count = fwnode_property_count_u32(fwnode, "nuvoton,pins");
if (!count || count % 3)
return -EINVAL;
@@ -1009,21 +998,22 @@ static int ma35_pinctrl_parse_groups(struct device_node *np, struct ma35_pin_gro
if (!elems)
return -ENOMEM;
- ret = of_property_read_u32_array(np, "nuvoton,pins", elems, count);
+ grp->grp.name = np->name;
+
+ ret = fwnode_property_read_u32_array(fwnode, "nuvoton,pins", elems, count);
if (ret)
return -EINVAL;
+ grp->grp.npins = count / 3;
- grp->npins = count / 3;
-
- grp->pins = devm_kcalloc(npctl->dev, grp->npins, sizeof(*grp->pins), GFP_KERNEL);
- if (!grp->pins)
+ pins = devm_kcalloc(npctl->dev, grp->grp.npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
return -ENOMEM;
+ grp->grp.pins = pins;
- grp->settings = devm_kcalloc(npctl->dev, grp->npins, sizeof(*grp->settings), GFP_KERNEL);
- if (!grp->settings)
+ pin = devm_kcalloc(npctl->dev, grp->grp.npins, sizeof(*pin), GFP_KERNEL);
+ if (!pin)
return -ENOMEM;
-
- pin = grp->settings;
+ grp->data = pin;
for (i = 0, j = 0; i < count; i += 3, j++) {
pin->offset = elems[i] * MA35_MFP_REG_SZ_PER_BANK + MA35_MFP_REG_BASE;
@@ -1031,19 +1021,21 @@ static int ma35_pinctrl_parse_groups(struct device_node *np, struct ma35_pin_gro
pin->muxval = elems[i + 2];
pin->configs = configs;
pin->nconfigs = nconfigs;
- grp->pins[j] = npctl->info->get_pin_num(pin->offset, pin->shift);
+ pins[j] = npctl->info->get_pin_num(pin->offset, pin->shift);
pin++;
}
return 0;
}
-static int ma35_pinctrl_parse_functions(struct device_node *np, struct ma35_pinctrl *npctl,
+static int ma35_pinctrl_parse_functions(struct fwnode_handle *fwnode, struct ma35_pinctrl *npctl,
u32 index)
{
- struct device_node *child;
- struct ma35_pin_func *func;
- struct ma35_pin_group *grp;
+ struct device_node *np = to_of_node(fwnode);
+ struct fwnode_handle *child;
+ struct pinfunction *func;
+ struct group_desc *grp;
static u32 grp_index;
+ const char **groups;
u32 ret, i = 0;
dev_dbg(npctl->dev, "parse function(%d): %s\n", index, np->name);
@@ -1055,31 +1047,34 @@ static int ma35_pinctrl_parse_functions(struct device_node *np, struct ma35_pinc
if (func->ngroups <= 0)
return 0;
- func->groups = devm_kcalloc(npctl->dev, func->ngroups, sizeof(char *), GFP_KERNEL);
- if (!func->groups)
+ groups = devm_kcalloc(npctl->dev, func->ngroups, sizeof(*groups), GFP_KERNEL);
+ if (!groups)
return -ENOMEM;
- for_each_child_of_node(np, child) {
- func->groups[i] = child->name;
+ fwnode_for_each_child_node(fwnode, child) {
+ struct device_node *node = to_of_node(child);
+
+ groups[i] = node->name;
grp = &npctl->groups[grp_index++];
ret = ma35_pinctrl_parse_groups(child, grp, npctl, i++);
if (ret) {
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
}
+
+ func->groups = groups;
return 0;
}
static int ma35_pinctrl_probe_dt(struct platform_device *pdev, struct ma35_pinctrl *npctl)
{
+ struct device *dev = &pdev->dev;
struct fwnode_handle *child;
u32 idx = 0;
int ret;
- device_for_each_child_node(&pdev->dev, child) {
- if (fwnode_property_present(child, "gpio-controller"))
- continue;
+ for_each_gpiochip_node(dev, child) {
npctl->nfunctions++;
npctl->ngroups += of_get_child_count(to_of_node(child));
}
@@ -1097,11 +1092,8 @@ static int ma35_pinctrl_probe_dt(struct platform_device *pdev, struct ma35_pinct
if (!npctl->groups)
return -ENOMEM;
- device_for_each_child_node(&pdev->dev, child) {
- if (fwnode_property_present(child, "gpio-controller"))
- continue;
-
- ret = ma35_pinctrl_parse_functions(to_of_node(child), npctl, idx++);
+ for_each_gpiochip_node(dev, child) {
+ ret = ma35_pinctrl_parse_functions(child, npctl, idx++);
if (ret) {
fwnode_handle_put(child);
dev_err(&pdev->dev, "failed to parse function\n");
@@ -1146,7 +1138,7 @@ int ma35_pinctrl_probe(struct platform_device *pdev, const struct ma35_pinctrl_s
npctl->info = info;
npctl->dev = &pdev->dev;
- npctl->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "nuvoton,sys");
+ npctl->regmap = syscon_regmap_lookup_by_phandle(dev_of_node(dev), "nuvoton,sys");
if (IS_ERR(npctl->regmap))
return dev_err_probe(&pdev->dev, PTR_ERR(npctl->regmap),
"No syscfg phandle specified\n");
diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35d1.c b/drivers/pinctrl/nuvoton/pinctrl-ma35d1.c
index 8bb9a5a35954..eafa06ca0879 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-ma35d1.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-ma35d1.c
@@ -9,7 +9,6 @@
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 62a46d824b46..dfd32feb3428 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -7,10 +7,8 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -504,17 +502,6 @@ static const int lkgpo2_pins[] = { 9 };
static const int nprd_smi_pins[] = { 190 };
-/*
- * pin: name, number
- * group: name, npins, pins
- * function: name, ngroups, groups
- */
-struct npcm7xx_group {
- const char *name;
- const unsigned int *pins;
- int npins;
-};
-
#define NPCM7XX_GRPS \
NPCM7XX_GRP(smb0), \
NPCM7XX_GRP(smb0b), \
@@ -642,22 +629,14 @@ enum {
#undef NPCM7XX_GRP
};
-static struct npcm7xx_group npcm7xx_groups[] = {
-#define NPCM7XX_GRP(x) { .name = #x, .pins = x ## _pins, \
- .npins = ARRAY_SIZE(x ## _pins) }
+static struct pingroup npcm7xx_groups[] = {
+#define NPCM7XX_GRP(x) PINCTRL_PINGROUP(#x, x ## _pins, ARRAY_SIZE(x ## _pins))
NPCM7XX_GRPS
#undef NPCM7XX_GRP
};
#define NPCM7XX_SFUNC(a) NPCM7XX_FUNC(a, #a)
#define NPCM7XX_FUNC(a, b...) static const char *a ## _grp[] = { b }
-#define NPCM7XX_MKFUNC(nm) { .name = #nm, .ngroups = ARRAY_SIZE(nm ## _grp), \
- .groups = nm ## _grp }
-struct npcm7xx_func {
- const char *name;
- const unsigned int ngroups;
- const char *const *groups;
-};
NPCM7XX_SFUNC(smb0);
NPCM7XX_SFUNC(smb0b);
@@ -776,7 +755,8 @@ NPCM7XX_SFUNC(lkgpo2);
NPCM7XX_SFUNC(nprd_smi);
/* Function names */
-static struct npcm7xx_func npcm7xx_funcs[] = {
+static struct pinfunction npcm7xx_funcs[] = {
+#define NPCM7XX_MKFUNC(nm) PINCTRL_PINFUNCTION(#nm, nm ## _grp, ARRAY_SIZE(nm ## _grp))
NPCM7XX_MKFUNC(smb0),
NPCM7XX_MKFUNC(smb0b),
NPCM7XX_MKFUNC(smb0c),
@@ -892,6 +872,7 @@ static struct npcm7xx_func npcm7xx_funcs[] = {
NPCM7XX_MKFUNC(lkgpo1),
NPCM7XX_MKFUNC(lkgpo2),
NPCM7XX_MKFUNC(nprd_smi),
+#undef NPCM7XX_MKFUNC
};
#define NPCM7XX_PINCFG(a, b, c, d, e, f, g, h, i, j, k) \
@@ -1849,22 +1830,13 @@ static struct pinctrl_desc npcm7xx_pinctrl_desc = {
static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl)
{
int ret = -ENXIO;
- struct resource res;
struct device *dev = pctrl->dev;
struct fwnode_reference_args args;
struct fwnode_handle *child;
int id = 0;
for_each_gpiochip_node(dev, child) {
- struct device_node *np = to_of_node(child);
-
- ret = of_address_to_resource(np, 0, &res);
- if (ret < 0) {
- dev_err(dev, "Resource fail for GPIO bank %u\n", id);
- return ret;
- }
-
- pctrl->gpio_bank[id].base = ioremap(res.start, resource_size(&res));
+ pctrl->gpio_bank[id].base = fwnode_iomap(child, 0);
if (!pctrl->gpio_bank[id].base)
return -EINVAL;
@@ -1886,7 +1858,7 @@ static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl)
return ret;
}
- ret = irq_of_parse_and_map(np, 0);
+ ret = fwnode_irq_get(child, 0);
if (!ret) {
dev_err(dev, "No IRQ for GPIO bank %u\n", id);
return -EINVAL;
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
index 471f644c5eef..be3db8ab406c 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
@@ -587,17 +587,6 @@ static const int hgpio5_pins[] = { 25 };
static const int hgpio6_pins[] = { 59 };
static const int hgpio7_pins[] = { 60 };
-/*
- * pin: name, number
- * group: name, npins, pins
- * function: name, ngroups, groups
- */
-struct npcm8xx_pingroup {
- const char *name;
- const unsigned int *pins;
- int npins;
-};
-
#define NPCM8XX_GRPS \
NPCM8XX_GRP(gpi36), \
NPCM8XX_GRP(gpi35), \
@@ -829,22 +818,14 @@ enum {
#undef NPCM8XX_GRP
};
-static struct npcm8xx_pingroup npcm8xx_pingroups[] = {
-#define NPCM8XX_GRP(x) { .name = #x, .pins = x ## _pins, \
- .npins = ARRAY_SIZE(x ## _pins) }
+static struct pingroup npcm8xx_pingroups[] = {
+#define NPCM8XX_GRP(x) PINCTRL_PINGROUP(#x, x ## _pins, ARRAY_SIZE(x ## _pins))
NPCM8XX_GRPS
#undef NPCM8XX_GRP
};
#define NPCM8XX_SFUNC(a) NPCM8XX_FUNC(a, #a)
#define NPCM8XX_FUNC(a, b...) static const char *a ## _grp[] = { b }
-#define NPCM8XX_MKFUNC(nm) { .name = #nm, .ngroups = ARRAY_SIZE(nm ## _grp), \
- .groups = nm ## _grp }
-struct npcm8xx_func {
- const char *name;
- const unsigned int ngroups;
- const char *const *groups;
-};
NPCM8XX_SFUNC(gpi36);
NPCM8XX_SFUNC(gpi35);
@@ -1067,7 +1048,8 @@ NPCM8XX_SFUNC(hgpio6);
NPCM8XX_SFUNC(hgpio7);
/* Function names */
-static struct npcm8xx_func npcm8xx_funcs[] = {
+static struct pinfunction npcm8xx_funcs[] = {
+#define NPCM8XX_MKFUNC(nm) PINCTRL_PINFUNCTION(#nm, nm ## _grp, ARRAY_SIZE(nm ## _grp))
NPCM8XX_MKFUNC(gpi36),
NPCM8XX_MKFUNC(gpi35),
NPCM8XX_MKFUNC(tp_jtag3),
@@ -1287,15 +1269,18 @@ static struct npcm8xx_func npcm8xx_funcs[] = {
NPCM8XX_MKFUNC(hgpio5),
NPCM8XX_MKFUNC(hgpio6),
NPCM8XX_MKFUNC(hgpio7),
+#undef NPCM8XX_MKFUNC
};
#define NPCM8XX_PINCFG(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q) \
- [a] { .fn0 = fn_ ## b, .reg0 = NPCM8XX_GCR_ ## c, .bit0 = d, \
+ [a] = { \
+ .flag = q, \
+ .fn0 = fn_ ## b, .reg0 = NPCM8XX_GCR_ ## c, .bit0 = d, \
.fn1 = fn_ ## e, .reg1 = NPCM8XX_GCR_ ## f, .bit1 = g, \
.fn2 = fn_ ## h, .reg2 = NPCM8XX_GCR_ ## i, .bit2 = j, \
.fn3 = fn_ ## k, .reg3 = NPCM8XX_GCR_ ## l, .bit3 = m, \
.fn4 = fn_ ## n, .reg4 = NPCM8XX_GCR_ ## o, .bit4 = p, \
- .flag = q }
+ }
/* Drive strength controlled by NPCM8XX_GP_N_ODSC */
#define DRIVE_STRENGTH_LO_SHIFT 8
@@ -2361,8 +2346,8 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl)
return dev_err_probe(dev, ret, "gpio-ranges fail for GPIO bank %u\n", id);
ret = fwnode_irq_get(child, 0);
- if (!ret)
- return dev_err_probe(dev, ret, "No IRQ for GPIO bank %u\n", id);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to retrieve IRQ for bank %u\n", id);
pctrl->gpio_bank[id].irq = ret;
pctrl->gpio_bank[id].irq_chip = npcmgpio_irqchip;
@@ -2374,6 +2359,9 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl)
pctrl->gpio_bank[id].gc.parent = dev;
pctrl->gpio_bank[id].gc.fwnode = child;
pctrl->gpio_bank[id].gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", child);
+ if (pctrl->gpio_bank[id].gc.label == NULL)
+ return -ENOMEM;
+
pctrl->gpio_bank[id].gc.dbg_show = npcmgpio_dbg_show;
pctrl->gpio_bank[id].direction_input = pctrl->gpio_bank[id].gc.direction_input;
pctrl->gpio_bank[id].gc.direction_input = npcmgpio_direction_input;
@@ -2436,7 +2424,7 @@ static int npcm8xx_pinctrl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pctrl);
pctrl->gcr_regmap =
- syscon_regmap_lookup_by_phandle(dev->of_node, "nuvoton,sysgcr");
+ syscon_regmap_lookup_by_phandle(dev_of_node(dev), "nuvoton,sysgcr");
if (IS_ERR(pctrl->gcr_regmap))
return dev_err_probe(dev, PTR_ERR(pctrl->gcr_regmap),
"Failed to find nuvoton,sysgcr property\n");
diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
index cdad4ef11a2f..4264ca749175 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
@@ -10,7 +10,6 @@
// block, shared between all GPIO banks
#include <linux/device.h>
-#include <linux/fwnode.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -18,6 +17,7 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/pinctrl/pinconf.h>
@@ -482,13 +482,6 @@ static const struct pingroup wpcm450_groups[] = {
#define WPCM450_SFUNC(a) WPCM450_FUNC(a, #a)
#define WPCM450_FUNC(a, b...) static const char *a ## _grp[] = { b }
-#define WPCM450_MKFUNC(nm) { .name = #nm, .ngroups = ARRAY_SIZE(nm ## _grp), \
- .groups = nm ## _grp }
-struct wpcm450_func {
- const char *name;
- const unsigned int ngroups;
- const char *const *groups;
-};
WPCM450_SFUNC(smb3);
WPCM450_SFUNC(smb4);
@@ -555,7 +548,8 @@ WPCM450_FUNC(gpio, WPCM450_GRPS);
#undef WPCM450_GRP
/* Function names */
-static struct wpcm450_func wpcm450_funcs[] = {
+static struct pinfunction wpcm450_funcs[] = {
+#define WPCM450_MKFUNC(nm) PINCTRL_PINFUNCTION(#nm, nm ## _grp, ARRAY_SIZE(nm ## _grp))
WPCM450_MKFUNC(smb3),
WPCM450_MKFUNC(smb4),
WPCM450_MKFUNC(smb5),
@@ -616,6 +610,7 @@ static struct wpcm450_func wpcm450_funcs[] = {
WPCM450_MKFUNC(hg6),
WPCM450_MKFUNC(hg7),
WPCM450_MKFUNC(gpio),
+#undef WPCM450_MKFUNC
};
#define WPCM450_PINCFG(a, b, c, d, e, f, g) \
@@ -1033,7 +1028,7 @@ static int wpcm450_gpio_register(struct platform_device *pdev,
return dev_err_probe(dev, PTR_ERR(pctrl->gpio_base),
"Resource fail for GPIO controller\n");
- device_for_each_child_node(dev, child) {
+ for_each_gpiochip_node(dev, child) {
void __iomem *dat = NULL;
void __iomem *set = NULL;
void __iomem *dirout = NULL;
@@ -1044,9 +1039,6 @@ static int wpcm450_gpio_register(struct platform_device *pdev,
u32 reg;
int i;
- if (!fwnode_property_read_bool(child, "gpio-controller"))
- continue;
-
ret = fwnode_property_read_u32(child, "reg", &reg);
if (ret < 0)
return ret;
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 42547f64453e..d67838afb085 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -234,6 +234,67 @@ static void parse_dt_cfg(struct device_node *np,
}
/**
+ * pinconf_generic_parse_dt_pinmux()
+ * parse the pinmux properties into generic pin mux values.
+ * @np: node containing the pinmux properties
+ * @dev: pincontrol core device
+ * @pid: array with pin identity entries
+ * @pmux: array with pin mux value entries
+ * @npins: number of pins
+ *
+ * pinmux propertity: mux value [0,7]bits and pin identity [8,31]bits.
+ */
+int pinconf_generic_parse_dt_pinmux(struct device_node *np, struct device *dev,
+ unsigned int **pid, unsigned int **pmux,
+ unsigned int *npins)
+{
+ unsigned int *pid_t;
+ unsigned int *pmux_t;
+ struct property *prop;
+ unsigned int npins_t, i;
+ u32 value;
+ int ret;
+
+ prop = of_find_property(np, "pinmux", NULL);
+ if (!prop) {
+ dev_info(dev, "Missing pinmux property\n");
+ return -ENOENT;
+ }
+
+ if (!pid || !pmux || !npins) {
+ dev_err(dev, "parameters error\n");
+ return -EINVAL;
+ }
+
+ npins_t = prop->length / sizeof(u32);
+ pid_t = devm_kcalloc(dev, npins_t, sizeof(*pid_t), GFP_KERNEL);
+ pmux_t = devm_kcalloc(dev, npins_t, sizeof(*pmux_t), GFP_KERNEL);
+ if (!pid_t || !pmux_t) {
+ dev_err(dev, "kalloc memory fail\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < npins_t; i++) {
+ ret = of_property_read_u32_index(np, "pinmux", i, &value);
+ if (ret) {
+ dev_err(dev, "get pinmux value fail\n");
+ goto exit;
+ }
+ pmux_t[i] = value & 0xff;
+ pid_t[i] = (value >> 8) & 0xffffff;
+ }
+ *pid = pid_t;
+ *pmux = pmux_t;
+ *npins = npins_t;
+
+ return 0;
+exit:
+ devm_kfree(dev, pid_t);
+ devm_kfree(dev, pmux_t);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pinconf_generic_parse_dt_pinmux);
+
+/**
* pinconf_generic_parse_dt_config()
* parse the config properties into generic pinconfig values.
* @np: node containing the pinconfig properties
@@ -295,6 +356,75 @@ out:
}
EXPORT_SYMBOL_GPL(pinconf_generic_parse_dt_config);
+int pinconf_generic_dt_node_to_map_pinmux(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct device *dev = pctldev->dev;
+ struct device_node *pnode;
+ unsigned long *configs = NULL;
+ unsigned int num_configs = 0;
+ struct property *prop;
+ unsigned int reserved_maps;
+ int reserve;
+ int ret;
+
+ prop = of_find_property(np, "pinmux", NULL);
+ if (!prop) {
+ dev_info(dev, "Missing pinmux property\n");
+ return -ENOENT;
+ }
+
+ pnode = of_get_parent(np);
+ if (!pnode) {
+ dev_info(dev, "Missing function node\n");
+ return -EINVAL;
+ }
+
+ reserved_maps = 0;
+ *map = NULL;
+ *num_maps = 0;
+
+ ret = pinconf_generic_parse_dt_config(np, pctldev, &configs,
+ &num_configs);
+ if (ret < 0) {
+ dev_err(dev, "%pOF: could not parse node property\n", np);
+ return ret;
+ }
+
+ reserve = 1;
+ if (num_configs)
+ reserve++;
+
+ ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps,
+ num_maps, reserve);
+ if (ret < 0)
+ goto exit;
+
+ ret = pinctrl_utils_add_map_mux(pctldev, map,
+ &reserved_maps, num_maps, np->name,
+ pnode->name);
+ if (ret < 0)
+ goto exit;
+
+ if (num_configs) {
+ ret = pinctrl_utils_add_map_configs(pctldev, map, &reserved_maps,
+ num_maps, np->name, configs,
+ num_configs, PIN_MAP_TYPE_CONFIGS_GROUP);
+ if (ret < 0)
+ goto exit;
+ }
+
+exit:
+ kfree(configs);
+ if (ret)
+ pinctrl_utils_free_map(pctldev, *map, *num_maps);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map_pinmux);
+
int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np, struct pinctrl_map **map,
unsigned int *reserved_maps, unsigned int *num_maps,
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
index a14c950bc700..a171195b3615 100644
--- a/drivers/pinctrl/pinconf.h
+++ b/drivers/pinctrl/pinconf.h
@@ -138,4 +138,8 @@ int pinconf_generic_parse_dt_config(struct device_node *np,
struct pinctrl_dev *pctldev,
unsigned long **configs,
unsigned int *nconfigs);
+
+int pinconf_generic_parse_dt_pinmux(struct device_node *np, struct device *dev,
+ unsigned int **pid, unsigned int **pmux,
+ unsigned int *npins);
#endif
diff --git a/drivers/pinctrl/pinctrl-amdisp.c b/drivers/pinctrl/pinctrl-amdisp.c
new file mode 100644
index 000000000000..9256ed67bb20
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-amdisp.c
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * AMD ISP Pinctrl Driver
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-amdisp.h"
+
+#define DRV_NAME "amdisp-pinctrl"
+#define GPIO_CONTROL_PIN 4
+#define GPIO_OFFSET_0 0x0
+#define GPIO_OFFSET_1 0x4
+#define GPIO_OFFSET_2 0x50
+
+static const u32 gpio_offset[] = {
+ GPIO_OFFSET_0,
+ GPIO_OFFSET_1,
+ GPIO_OFFSET_2
+};
+
+struct amdisp_pinctrl_data {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ const struct amdisp_function *functions;
+ unsigned int nfunctions;
+ const struct amdisp_pingroup *groups;
+ unsigned int ngroups;
+};
+
+static const struct amdisp_pinctrl_data amdisp_pinctrl_data = {
+ .pins = amdisp_pins,
+ .npins = ARRAY_SIZE(amdisp_pins),
+ .functions = amdisp_functions,
+ .nfunctions = ARRAY_SIZE(amdisp_functions),
+ .groups = amdisp_groups,
+ .ngroups = ARRAY_SIZE(amdisp_groups),
+};
+
+struct amdisp_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctrl;
+ struct pinctrl_desc desc;
+ struct pinctrl_gpio_range gpio_range;
+ struct gpio_chip gc;
+ const struct amdisp_pinctrl_data *data;
+ void __iomem *gpiobase;
+ raw_spinlock_t lock;
+};
+
+static int amdisp_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct amdisp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->data->ngroups;
+}
+
+static const char *amdisp_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ struct amdisp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->data->groups[group].name;
+}
+
+static int amdisp_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct amdisp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pctrl->data->groups[group].pins;
+ *num_pins = pctrl->data->groups[group].npins;
+ return 0;
+}
+
+const struct pinctrl_ops amdisp_pinctrl_ops = {
+ .get_groups_count = amdisp_get_groups_count,
+ .get_group_name = amdisp_get_group_name,
+ .get_group_pins = amdisp_get_group_pins,
+};
+
+static int amdisp_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
+{
+ /* amdisp gpio only has output mode */
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int amdisp_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
+{
+ return -EOPNOTSUPP;
+}
+
+static int amdisp_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
+ int value)
+{
+ /* Nothing to do, amdisp gpio only has output mode */
+ return 0;
+}
+
+static int amdisp_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ unsigned long flags;
+ u32 pin_reg;
+ struct amdisp_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ pin_reg = readl(pctrl->gpiobase + gpio_offset[gpio]);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return !!(pin_reg & BIT(GPIO_CONTROL_PIN));
+}
+
+static void amdisp_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+{
+ unsigned long flags;
+ u32 pin_reg;
+ struct amdisp_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ pin_reg = readl(pctrl->gpiobase + gpio_offset[gpio]);
+ if (value)
+ pin_reg |= BIT(GPIO_CONTROL_PIN);
+ else
+ pin_reg &= ~BIT(GPIO_CONTROL_PIN);
+ writel(pin_reg, pctrl->gpiobase + gpio_offset[gpio]);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int amdisp_gpiochip_add(struct platform_device *pdev,
+ struct amdisp_pinctrl *pctrl)
+{
+ struct gpio_chip *gc = &pctrl->gc;
+ struct pinctrl_gpio_range *grange = &pctrl->gpio_range;
+ int ret;
+
+ gc->label = dev_name(pctrl->dev);
+ gc->parent = &pdev->dev;
+ gc->names = amdisp_range_pins_name;
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
+ gc->get_direction = amdisp_gpio_get_direction;
+ gc->direction_input = amdisp_gpio_direction_input;
+ gc->direction_output = amdisp_gpio_direction_output;
+ gc->get = amdisp_gpio_get;
+ gc->set = amdisp_gpio_set;
+ gc->base = -1;
+ gc->ngpio = ARRAY_SIZE(amdisp_range_pins);
+
+ grange->id = 0;
+ grange->pin_base = 0;
+ grange->base = 0;
+ grange->pins = amdisp_range_pins;
+ grange->npins = ARRAY_SIZE(amdisp_range_pins);
+ grange->name = gc->label;
+ grange->gc = gc;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, gc, pctrl);
+ if (ret)
+ return ret;
+
+ pinctrl_add_gpio_range(pctrl->pctrl, grange);
+
+ return 0;
+}
+
+static int amdisp_pinctrl_probe(struct platform_device *pdev)
+{
+ struct amdisp_pinctrl *pctrl;
+ struct resource *res;
+ int ret;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pdev->dev.init_name = DRV_NAME;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ pctrl->gpiobase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctrl->gpiobase))
+ return PTR_ERR(pctrl->gpiobase);
+
+ platform_set_drvdata(pdev, pctrl);
+
+ pctrl->dev = &pdev->dev;
+ pctrl->data = &amdisp_pinctrl_data;
+ pctrl->desc.owner = THIS_MODULE;
+ pctrl->desc.pctlops = &amdisp_pinctrl_ops;
+ pctrl->desc.pmxops = NULL;
+ pctrl->desc.name = dev_name(&pdev->dev);
+ pctrl->desc.pins = pctrl->data->pins;
+ pctrl->desc.npins = pctrl->data->npins;
+ ret = devm_pinctrl_register_and_init(&pdev->dev, &pctrl->desc,
+ pctrl, &pctrl->pctrl);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_enable(pctrl->pctrl);
+ if (ret)
+ return ret;
+
+ ret = amdisp_gpiochip_add(pdev, pctrl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct platform_driver amdisp_pinctrl_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = amdisp_pinctrl_probe,
+};
+module_platform_driver(amdisp_pinctrl_driver);
+
+MODULE_AUTHOR("Benjamin Chan <benjamin.chan@amd.com>");
+MODULE_AUTHOR("Pratap Nirujogi <pratap.nirujogi@amd.com>");
+MODULE_DESCRIPTION("AMDISP pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/pinctrl/pinctrl-amdisp.h b/drivers/pinctrl/pinctrl-amdisp.h
new file mode 100644
index 000000000000..9e3597a03227
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-amdisp.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * AMD ISP Pinctrl Driver
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ */
+
+static const struct pinctrl_pin_desc amdisp_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"), /* sensor0 control */
+ PINCTRL_PIN(1, "GPIO_1"), /* sensor1 control */
+ PINCTRL_PIN(2, "GPIO_2"), /* sensor2 control */
+};
+
+#define AMDISP_GPIO_PINS(pin) \
+static const unsigned int gpio##pin##_pins[] = { pin }
+AMDISP_GPIO_PINS(0);
+AMDISP_GPIO_PINS(1);
+AMDISP_GPIO_PINS(2);
+
+static const unsigned int amdisp_range_pins[] = {
+ 0, 1, 2
+};
+
+static const char * const amdisp_range_pins_name[] = {
+ "gpio0", "gpio1", "gpio2"
+};
+
+enum amdisp_functions {
+ mux_gpio,
+ mux_NA
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2"
+};
+
+/**
+ * struct amdisp_function - a pinmux function
+ * @name: Name of the pinmux function.
+ * @groups: List of pingroups for this function.
+ * @ngroups: Number of entries in @groups.
+ */
+struct amdisp_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+#define FUNCTION(fname) \
+ [mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+static const struct amdisp_function amdisp_functions[] = {
+ FUNCTION(gpio),
+};
+
+/**
+ * struct amdisp_pingroup - a pinmux group
+ * @name: Name of the pinmux group.
+ * @pins: List of pins for this group.
+ * @npins: Number of entries in @pins.
+ * @funcs: List of functions belongs to this group.
+ * @nfuncs: Number of entries in @funcs.
+ * @offset: Group offset in amdisp pinmux groups.
+ */
+struct amdisp_pingroup {
+ const char *name;
+ const unsigned int *pins;
+ unsigned int npins;
+ unsigned int *funcs;
+ unsigned int nfuncs;
+ unsigned int offset;
+};
+
+#define PINGROUP(id, f0) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ mux_##f0, \
+ }, \
+ .nfuncs = 1, \
+ .offset = id, \
+ }
+
+static const struct amdisp_pingroup amdisp_groups[] = {
+ PINGROUP(0, gpio),
+ PINGROUP(1, gpio),
+ PINGROUP(2, gpio),
+};
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index d73004b4a45e..3cfbcaee9e65 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -40,6 +40,7 @@
/* Port Select configures the port */
#define CY8C95X0_PORTSEL 0x18
+
/* Port settings, write PORTSEL first */
#define CY8C95X0_INTMASK 0x19
#define CY8C95X0_SELPWM 0x1A
@@ -53,6 +54,9 @@
#define CY8C95X0_DRV_PP_FAST 0x21
#define CY8C95X0_DRV_PP_SLOW 0x22
#define CY8C95X0_DRV_HIZ 0x23
+
+/* Internal device configuration */
+#define CY8C95X0_ENABLE_WDE 0x2D
#define CY8C95X0_DEVID 0x2E
#define CY8C95X0_WATCHDOG 0x2F
#define CY8C95X0_COMMAND 0x30
@@ -137,7 +141,7 @@ static const struct dmi_system_id cy8c95x0_dmi_acpi_irq_info[] = {
* @irq_trig_low: I/O bits affected by a low voltage level
* @irq_trig_high: I/O bits affected by a high voltage level
* @push_pull: I/O bits configured as push pull driver
- * @shiftmask: Mask used to compensate for Gport2 width
+ * @map: Mask used to compensate for Gport2 width
* @nport: Number of Gports in this chip
* @gpio_chip: gpiolib chip
* @driver_data: private driver data
@@ -158,7 +162,7 @@ struct cy8c95x0_pinctrl {
DECLARE_BITMAP(irq_trig_low, MAX_LINE);
DECLARE_BITMAP(irq_trig_high, MAX_LINE);
DECLARE_BITMAP(push_pull, MAX_LINE);
- DECLARE_BITMAP(shiftmask, MAX_LINE);
+ DECLARE_BITMAP(map, MAX_LINE);
unsigned int nport;
struct gpio_chip gpio_chip;
unsigned long driver_data;
@@ -310,9 +314,6 @@ static const char * const cy8c95x0_groups[] = {
"gp77",
};
-static int cy8c95x0_pinmux_direction(struct cy8c95x0_pinctrl *chip,
- unsigned int pin, bool input);
-
static inline u8 cypress_get_port(struct cy8c95x0_pinctrl *chip, unsigned int pin)
{
/* Account for GPORT2 which only has 4 bits */
@@ -477,20 +478,14 @@ static const struct regmap_config cy8c9520_i2c_regmap = {
#endif
};
-static inline int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip,
- unsigned int reg,
- unsigned int port,
- unsigned int mask,
- unsigned int val,
- bool *change, bool async,
- bool force)
+/* Caller should never modify PORTSEL directly */
+static int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip,
+ unsigned int reg, unsigned int port,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
{
int ret, off, i;
- /* Caller should never modify PORTSEL directly */
- if (reg == CY8C95X0_PORTSEL)
- return -EINVAL;
-
/* Registers behind the PORTSEL mux have their own range in regmap */
if (cy8c95x0_muxed_register(reg)) {
off = CY8C95X0_MUX_REGMAP_TO_OFFSET(reg, port);
@@ -507,7 +502,8 @@ static inline int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip
if (ret < 0)
return ret;
- /* Mimic what hardware does and update the cache when a WC bit is written.
+ /*
+ * Mimic what hardware does and update the cache when a WC bit is written.
* Allows to mark the registers as non-volatile and reduces I/O cycles.
*/
if (cy8c95x0_wc_register(reg) && (mask & val)) {
@@ -523,7 +519,7 @@ static inline int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip
regcache_cache_only(chip->regmap, false);
}
- return ret;
+ return 0;
}
/**
@@ -575,12 +571,13 @@ static int cy8c95x0_regmap_update_bits(struct cy8c95x0_pinctrl *chip, unsigned i
}
/**
- * cy8c95x0_regmap_read() - reads a register using the regmap cache
+ * cy8c95x0_regmap_read_bits() - reads a register using the regmap cache
* @chip: The pinctrl to work on
* @reg: The register to read from. Can be direct access or muxed register.
* @port: The port to be used for muxed registers or quick path direct access
* registers. Otherwise unused.
- * @read_val: Value read from hardware or cache
+ * @mask: Bitmask to apply
+ * @val: Value read from hardware or cache
*
* This function handles the register reads from the direct access registers and
* the muxed registers while caching all register accesses, internally handling
@@ -590,10 +587,12 @@ static int cy8c95x0_regmap_update_bits(struct cy8c95x0_pinctrl *chip, unsigned i
*
* Return: 0 for successful request, else a corresponding error value
*/
-static int cy8c95x0_regmap_read(struct cy8c95x0_pinctrl *chip, unsigned int reg,
- unsigned int port, unsigned int *read_val)
+static int cy8c95x0_regmap_read_bits(struct cy8c95x0_pinctrl *chip, unsigned int reg,
+ unsigned int port, unsigned int mask, unsigned int *val)
{
- int off, ret;
+ unsigned int off;
+ unsigned int tmp;
+ int ret;
/* Registers behind the PORTSEL mux have their own range in regmap */
if (cy8c95x0_muxed_register(reg)) {
@@ -605,11 +604,14 @@ static int cy8c95x0_regmap_read(struct cy8c95x0_pinctrl *chip, unsigned int reg,
else
off = reg;
}
- guard(mutex)(&chip->i2c_lock);
- ret = regmap_read(chip->regmap, off, read_val);
+ scoped_guard(mutex, &chip->i2c_lock)
+ ret = regmap_read(chip->regmap, off, &tmp);
+ if (ret)
+ return ret;
- return ret;
+ *val = tmp & mask;
+ return 0;
}
static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
@@ -617,26 +619,18 @@ static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
{
DECLARE_BITMAP(tmask, MAX_LINE);
DECLARE_BITMAP(tval, MAX_LINE);
+ unsigned long bits, offset;
int write_val;
- u8 bits;
int ret;
/* Add the 4 bit gap of Gport2 */
- bitmap_andnot(tmask, mask, chip->shiftmask, MAX_LINE);
- bitmap_shift_left(tmask, tmask, 4, MAX_LINE);
- bitmap_replace(tmask, tmask, mask, chip->shiftmask, BANK_SZ * 3);
-
- bitmap_andnot(tval, val, chip->shiftmask, MAX_LINE);
- bitmap_shift_left(tval, tval, 4, MAX_LINE);
- bitmap_replace(tval, tval, val, chip->shiftmask, BANK_SZ * 3);
-
- for (unsigned int i = 0; i < chip->nport; i++) {
- /* Skip over unused banks */
- bits = bitmap_get_value8(tmask, i * BANK_SZ);
- if (!bits)
- continue;
+ bitmap_scatter(tmask, mask, chip->map, MAX_LINE);
+ bitmap_scatter(tval, val, chip->map, MAX_LINE);
+
+ for_each_set_clump8(offset, bits, tmask, chip->tpin) {
+ unsigned int i = offset / 8;
- write_val = bitmap_get_value8(tval, i * BANK_SZ);
+ write_val = bitmap_get_value8(tval, offset);
ret = cy8c95x0_regmap_update_bits(chip, reg, i, bits, write_val);
if (ret < 0) {
@@ -653,40 +647,54 @@ static int cy8c95x0_read_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
{
DECLARE_BITMAP(tmask, MAX_LINE);
DECLARE_BITMAP(tval, MAX_LINE);
- DECLARE_BITMAP(tmp, MAX_LINE);
- int read_val;
- u8 bits;
+ unsigned long bits, offset;
+ unsigned int read_val;
int ret;
/* Add the 4 bit gap of Gport2 */
- bitmap_andnot(tmask, mask, chip->shiftmask, MAX_LINE);
- bitmap_shift_left(tmask, tmask, 4, MAX_LINE);
- bitmap_replace(tmask, tmask, mask, chip->shiftmask, BANK_SZ * 3);
-
- bitmap_andnot(tval, val, chip->shiftmask, MAX_LINE);
- bitmap_shift_left(tval, tval, 4, MAX_LINE);
- bitmap_replace(tval, tval, val, chip->shiftmask, BANK_SZ * 3);
-
- for (unsigned int i = 0; i < chip->nport; i++) {
- /* Skip over unused banks */
- bits = bitmap_get_value8(tmask, i * BANK_SZ);
- if (!bits)
- continue;
+ bitmap_scatter(tmask, mask, chip->map, MAX_LINE);
+ bitmap_scatter(tval, val, chip->map, MAX_LINE);
+
+ for_each_set_clump8(offset, bits, tmask, chip->tpin) {
+ unsigned int i = offset / 8;
- ret = cy8c95x0_regmap_read(chip, reg, i, &read_val);
+ ret = cy8c95x0_regmap_read_bits(chip, reg, i, bits, &read_val);
if (ret < 0) {
dev_err(chip->dev, "failed reading register %d, port %u: err %d\n", reg, i, ret);
return ret;
}
- read_val &= bits;
- read_val |= bitmap_get_value8(tval, i * BANK_SZ) & ~bits;
- bitmap_set_value8(tval, read_val, i * BANK_SZ);
+ read_val |= bitmap_get_value8(tval, offset) & ~bits;
+ bitmap_set_value8(tval, read_val, offset);
}
/* Fill the 4 bit gap of Gport2 */
- bitmap_shift_right(tmp, tval, 4, MAX_LINE);
- bitmap_replace(val, tmp, tval, chip->shiftmask, MAX_LINE);
+ bitmap_gather(val, tval, chip->map, MAX_LINE);
+
+ return 0;
+}
+
+static int cy8c95x0_pinmux_direction(struct cy8c95x0_pinctrl *chip, unsigned int pin, bool input)
+{
+ u8 port = cypress_get_port(chip, pin);
+ u8 bit = cypress_get_pin_mask(chip, pin);
+ int ret;
+
+ ret = cy8c95x0_regmap_write_bits(chip, CY8C95X0_DIRECTION, port, bit, input ? bit : 0);
+ if (ret)
+ return ret;
+
+ /*
+ * Disable driving the pin by forcing it to HighZ. Only setting
+ * the direction register isn't sufficient in Push-Pull mode.
+ */
+ if (input && test_bit(pin, chip->push_pull)) {
+ ret = cy8c95x0_regmap_write_bits(chip, CY8C95X0_DRV_HIZ, port, bit, bit);
+ if (ret)
+ return ret;
+
+ __clear_bit(pin, chip->push_pull);
+ }
return 0;
}
@@ -717,10 +725,10 @@ static int cy8c95x0_gpio_get_value(struct gpio_chip *gc, unsigned int off)
struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
u8 port = cypress_get_port(chip, off);
u8 bit = cypress_get_pin_mask(chip, off);
- u32 reg_val;
+ unsigned int reg_val;
int ret;
- ret = cy8c95x0_regmap_read(chip, CY8C95X0_INPUT, port, &reg_val);
+ ret = cy8c95x0_regmap_read_bits(chip, CY8C95X0_INPUT, port, bit, &reg_val);
if (ret < 0) {
/*
* NOTE:
@@ -731,7 +739,7 @@ static int cy8c95x0_gpio_get_value(struct gpio_chip *gc, unsigned int off)
return 0;
}
- return !!(reg_val & bit);
+ return reg_val ? 1 : 0;
}
static void cy8c95x0_gpio_set_value(struct gpio_chip *gc, unsigned int off,
@@ -749,14 +757,14 @@ static int cy8c95x0_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
u8 port = cypress_get_port(chip, off);
u8 bit = cypress_get_pin_mask(chip, off);
- u32 reg_val;
+ unsigned int reg_val;
int ret;
- ret = cy8c95x0_regmap_read(chip, CY8C95X0_DIRECTION, port, &reg_val);
+ ret = cy8c95x0_regmap_read_bits(chip, CY8C95X0_DIRECTION, port, bit, &reg_val);
if (ret < 0)
return ret;
- if (reg_val & bit)
+ if (reg_val)
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
@@ -769,8 +777,8 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
enum pin_config_param param = pinconf_to_config_param(*config);
u8 port = cypress_get_port(chip, off);
u8 bit = cypress_get_pin_mask(chip, off);
+ unsigned int reg_val;
unsigned int reg;
- u32 reg_val;
u16 arg = 0;
int ret;
@@ -827,16 +835,16 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
* Writing 1 to one of the drive mode registers will automatically
* clear conflicting set bits in the other drive mode registers.
*/
- ret = cy8c95x0_regmap_read(chip, reg, port, &reg_val);
+ ret = cy8c95x0_regmap_read_bits(chip, reg, port, bit, &reg_val);
if (ret < 0)
return ret;
- if (reg_val & bit)
+ if (reg_val)
arg = 1;
if (param == PIN_CONFIG_OUTPUT_ENABLE)
arg = !arg;
- *config = pinconf_to_config_packed(param, (u16)arg);
+ *config = pinconf_to_config_packed(param, arg);
return 0;
}
@@ -1095,7 +1103,7 @@ static irqreturn_t cy8c95x0_irq_handler(int irq, void *devid)
if (!ret)
return IRQ_RETVAL(0);
- ret = 0;
+ ret = false;
for_each_set_bit(level, pending, MAX_LINE) {
/* Already accounted for 4bit gap in GPort2 */
nested_irq = irq_find_mapping(gc->irq.domain, level);
@@ -1114,7 +1122,7 @@ static irqreturn_t cy8c95x0_irq_handler(int irq, void *devid)
else
handle_nested_irq(nested_irq);
- ret = 1;
+ ret = true;
}
return IRQ_RETVAL(ret);
@@ -1248,32 +1256,6 @@ static int cy8c95x0_gpio_request_enable(struct pinctrl_dev *pctldev,
return cy8c95x0_set_mode(chip, pin, false);
}
-static int cy8c95x0_pinmux_direction(struct cy8c95x0_pinctrl *chip,
- unsigned int pin, bool input)
-{
- u8 port = cypress_get_port(chip, pin);
- u8 bit = cypress_get_pin_mask(chip, pin);
- int ret;
-
- ret = cy8c95x0_regmap_write_bits(chip, CY8C95X0_DIRECTION, port, bit, input ? bit : 0);
- if (ret)
- return ret;
-
- /*
- * Disable driving the pin by forcing it to HighZ. Only setting
- * the direction register isn't sufficient in Push-Pull mode.
- */
- if (input && test_bit(pin, chip->push_pull)) {
- ret = cy8c95x0_regmap_write_bits(chip, CY8C95X0_DRV_HIZ, port, bit, bit);
- if (ret)
- return ret;
-
- __clear_bit(pin, chip->push_pull);
- }
-
- return 0;
-}
-
static int cy8c95x0_gpio_set_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int pin, bool input)
@@ -1305,7 +1287,7 @@ static int cy8c95x0_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned int num_configs)
{
struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
- int ret = 0;
+ int ret;
int i;
for (i = 0; i < num_configs; i++) {
@@ -1314,7 +1296,7 @@ static int cy8c95x0_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
return ret;
}
- return ret;
+ return 0;
}
static const struct pinconf_ops cy8c95x0_pinconf_ops = {
@@ -1486,8 +1468,11 @@ static int cy8c95x0_probe(struct i2c_client *client)
return PTR_ERR(chip->regmap);
bitmap_zero(chip->push_pull, MAX_LINE);
- bitmap_zero(chip->shiftmask, MAX_LINE);
- bitmap_set(chip->shiftmask, 0, 20);
+
+ /* Setup HW pins mapping */
+ bitmap_fill(chip->map, MAX_LINE);
+ bitmap_clear(chip->map, 20, 4);
+
mutex_init(&chip->i2c_lock);
if (dmi_first_match(cy8c95x0_dmi_acpi_irq_info)) {
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index bc7ee54e062b..a9e48eac15f6 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -3,7 +3,7 @@
* Ingenic SoCs pinctrl driver
*
* Copyright (c) 2017 Paul Cercueil <paul@crapouillou.net>
- * Copyright (c) 2017, 2019 Paul Boddie <paul@boddie.org.uk>
+ * Copyright (c) 2017, 2019, 2020, 2023 Paul Boddie <paul@boddie.org.uk>
* Copyright (c) 2019, 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
*/
@@ -58,6 +58,8 @@
#define JZ4770_GPIO_FLAG 0x50
#define JZ4770_GPIO_PEN 0x70
+#define X1600_GPIO_PU 0x80
+
#define X1830_GPIO_PEL 0x110
#define X1830_GPIO_PEH 0x120
#define X1830_GPIO_SR 0x150
@@ -112,6 +114,7 @@ enum jz_version {
ID_JZ4780,
ID_X1000,
ID_X1500,
+ ID_X1600,
ID_X1830,
ID_X2000,
ID_X2100,
@@ -162,6 +165,7 @@ static const unsigned long enabled_socs =
IS_ENABLED(CONFIG_MACH_JZ4780) << ID_JZ4780 |
IS_ENABLED(CONFIG_MACH_X1000) << ID_X1000 |
IS_ENABLED(CONFIG_MACH_X1500) << ID_X1500 |
+ IS_ENABLED(CONFIG_MACH_X1600) << ID_X1600 |
IS_ENABLED(CONFIG_MACH_X1830) << ID_X1830 |
IS_ENABLED(CONFIG_MACH_X2000) << ID_X2000 |
IS_ENABLED(CONFIG_MACH_X2100) << ID_X2100;
@@ -206,6 +210,14 @@ static int jz4730_nand_cs5_pins[] = { 0x57, };
static int jz4730_pwm_pwm0_pins[] = { 0x5e, };
static int jz4730_pwm_pwm1_pins[] = { 0x5f, };
+static int jz4730_mii_pins[] = { 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
+ 0x77, 0x78, 0x19, 0x7a, 0x1b, 0x7c, };
+
+static int jz4730_i2s_mclk_pins[] = { 0x44, };
+static int jz4730_i2s_acreset_pins[] = { 0x45, };
+static int jz4730_i2s_data_pins[] = { 0x46, 0x47, };
+static int jz4730_i2s_clock_pins[] = { 0x4d, 0x4e, };
+
static u8 jz4730_lcd_8bit_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, };
static const struct group_desc jz4730_groups[] = {
@@ -227,6 +239,12 @@ static const struct group_desc jz4730_groups[] = {
INGENIC_PIN_GROUP("nand-cs5", jz4730_nand_cs5, 1),
INGENIC_PIN_GROUP("pwm0", jz4730_pwm_pwm0, 1),
INGENIC_PIN_GROUP("pwm1", jz4730_pwm_pwm1, 1),
+ INGENIC_PIN_GROUP("mii", jz4730_mii, 1),
+ INGENIC_PIN_GROUP("i2s-mclk-out", jz4730_i2s_mclk, 1),
+ INGENIC_PIN_GROUP("i2s-acreset", jz4730_i2s_acreset, 1),
+ INGENIC_PIN_GROUP("i2s-data", jz4730_i2s_data, 1),
+ INGENIC_PIN_GROUP("i2s-master", jz4730_i2s_clock, 1),
+ INGENIC_PIN_GROUP("i2s-slave", jz4730_i2s_clock, 2),
};
static const char *jz4730_mmc_groups[] = { "mmc-1bit", "mmc-4bit", };
@@ -242,6 +260,8 @@ static const char *jz4730_nand_groups[] = {
};
static const char *jz4730_pwm0_groups[] = { "pwm0", };
static const char *jz4730_pwm1_groups[] = { "pwm1", };
+static const char *jz4730_mii_groups[] = { "mii", };
+static const char *jz4730_i2s_groups[] = { "i2s-data", "i2s-master", "i2s-slave", };
static const struct function_desc jz4730_functions[] = {
INGENIC_PIN_FUNCTION("mmc", jz4730_mmc),
@@ -253,6 +273,8 @@ static const struct function_desc jz4730_functions[] = {
INGENIC_PIN_FUNCTION("nand", jz4730_nand),
INGENIC_PIN_FUNCTION("pwm0", jz4730_pwm0),
INGENIC_PIN_FUNCTION("pwm1", jz4730_pwm1),
+ INGENIC_PIN_FUNCTION("mii", jz4730_mii),
+ INGENIC_PIN_FUNCTION("i2s", jz4730_i2s),
};
static const struct ingenic_chip_info jz4730_chip_info = {
@@ -2351,6 +2373,233 @@ static const struct ingenic_chip_info x1500_chip_info = {
.access_table = &x1000_access_table,
};
+static const u32 x1600_pull_ups[4] = {
+ 0xffffffff, 0xdffbf7bf, 0x987e0000, 0x0000003f,
+};
+
+static const u32 x1600_pull_downs[4] = {
+ 0x00000000, 0x00000000, 0x07000007, 0x00000000,
+};
+
+static int x1600_uart0_data_pins[] = { 0x27, 0x28, };
+static int x1600_uart0_hwflow_pins[] = { 0x29, 0x2a, };
+static int x1600_uart1_data_pins[] = { 0x23, 0x22, };
+static int x1600_uart1_hwflow_pins[] = { 0x25, 0x24, };
+static int x1600_uart2_data_a_pins[] = { 0x1f, 0x1e, };
+static int x1600_uart2_data_b_pins[] = { 0x21, 0x20, };
+static int x1600_uart3_data_b_pins[] = { 0x25, 0x24, };
+static int x1600_uart3_data_d_pins[] = { 0x65, 0x64, };
+static int x1600_sfc_pins[] = { 0x53, 0x54, 0x55, 0x56, 0x51, 0x52, 0x24, };
+static int x1600_ssi_dt_a_pins[] = { 0x1e, };
+static int x1600_ssi_dt_b_pins[] = { 0x2d, };
+static int x1600_ssi_dr_a_pins[] = { 0x1d, };
+static int x1600_ssi_dr_b_pins[] = { 0x2e, };
+static int x1600_ssi_clk_a_pins[] = { 0x1f, };
+static int x1600_ssi_clk_b_pins[] = { 0x2c, };
+static int x1600_ssi_ce0_a_pins[] = { 0x1c, };
+static int x1600_ssi_ce0_b_pins[] = { 0x31, };
+static int x1600_ssi_ce1_a_pins[] = { 0x22, };
+static int x1600_ssi_ce1_b_pins[] = { 0x30, };
+static int x1600_mmc0_1bit_b_pins[] = { 0x2c, 0x2d, 0x2e, };
+static int x1600_mmc0_4bit_b_pins[] = { 0x2f, 0x30, 0x31, };
+static int x1600_mmc0_1bit_c_pins[] = { 0x51, 0x53, 0x54, };
+static int x1600_mmc0_4bit_c_pins[] = { 0x56, 0x55, 0x52, };
+static int x1600_mmc1_1bit_pins[] = { 0x60, 0x61, 0x62, };
+static int x1600_mmc1_4bit_pins[] = { 0x63, 0x64, 0x65, };
+static int x1600_i2c0_a_pins[] = { 0x1d, 0x1c, };
+static int x1600_i2c0_b_pins[] = { 0x3f, 0x3e, };
+static int x1600_i2c1_b_15_pins[] = { 0x30, 0x2f, };
+static int x1600_i2c1_b_19_pins[] = { 0x34, 0x33, };
+static int x1600_i2s_data_tx_pins[] = { 0x39, };
+static int x1600_i2s_data_rx_pins[] = { 0x35, };
+static int x1600_i2s_clk_rx_pins[] = { 0x37, 0x38, };
+static int x1600_i2s_clk_tx_pins[] = { 0x3b, 0x3c, };
+static int x1600_i2s_sysclk_pins[] = { 0x36, 0x3a, };
+
+static int x1600_cim_pins[] = {
+ 0x14, 0x16, 0x15, 0x18, 0x13,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+};
+
+static int x1600_slcd_8bit_pins[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x17, 0x19, 0x1a, 0x1b,
+};
+
+static int x1600_slcd_16bit_pins[] = {
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+};
+
+static int x1600_lcd_16bit_pins[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x18, 0x19, 0x1a, 0x1b,
+};
+
+static int x1600_lcd_18bit_pins[] = {
+ 0x10, 0x11,
+};
+
+static int x1600_lcd_24bit_pins[] = {
+ 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+};
+
+static int x1600_pwm_pwm0_pins[] = { 0x40, };
+static int x1600_pwm_pwm1_pins[] = { 0x41, };
+static int x1600_pwm_pwm2_pins[] = { 0x42, };
+static int x1600_pwm_pwm3_pins[] = { 0x58, };
+static int x1600_pwm_pwm4_pins[] = { 0x59, };
+static int x1600_pwm_pwm5_b_pins[] = { 0x33, };
+static int x1600_pwm_pwm5_c_pins[] = { 0x5a, };
+static int x1600_pwm_pwm6_b9_pins[] = { 0x29, };
+static int x1600_pwm_pwm6_b20_pins[] = { 0x34, };
+static int x1600_pwm_pwm7_b10_pins[] = { 0x2a, };
+static int x1600_pwm_pwm7_b21_pins[] = { 0x35, };
+
+static int x1600_mac_pins[] = {
+ 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c,
+};
+
+static int x1600_sfc_funcs[] = { 0, 0, 0, 0, 0, 0, 2, };
+
+static const struct group_desc x1600_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", x1600_uart0_data, 0),
+ INGENIC_PIN_GROUP("uart0-hwflow", x1600_uart0_hwflow, 0),
+ INGENIC_PIN_GROUP("uart1-data", x1600_uart1_data, 1),
+ INGENIC_PIN_GROUP("uart1-hwflow", x1600_uart1_hwflow, 1),
+ INGENIC_PIN_GROUP("uart2-data-a", x1600_uart2_data_a, 2),
+ INGENIC_PIN_GROUP("uart2-data-b", x1600_uart2_data_b, 1),
+ INGENIC_PIN_GROUP("uart3-data-b", x1600_uart3_data_b, 0),
+ INGENIC_PIN_GROUP("uart3-data-d", x1600_uart3_data_d, 2),
+ INGENIC_PIN_GROUP_FUNCS("sfc", x1600_sfc, x1600_sfc_funcs),
+ INGENIC_PIN_GROUP("ssi-dt-a", x1600_ssi_dt_a, 0),
+ INGENIC_PIN_GROUP("ssi-dt-b", x1600_ssi_dt_b, 1),
+ INGENIC_PIN_GROUP("ssi-dr-a", x1600_ssi_dr_a, 0),
+ INGENIC_PIN_GROUP("ssi-dr-b", x1600_ssi_dr_b, 1),
+ INGENIC_PIN_GROUP("ssi-clk-a", x1600_ssi_clk_a, 0),
+ INGENIC_PIN_GROUP("ssi-clk-b", x1600_ssi_clk_b, 1),
+ INGENIC_PIN_GROUP("ssi-ce0-a", x1600_ssi_ce0_a, 0),
+ INGENIC_PIN_GROUP("ssi-ce0-b", x1600_ssi_ce0_b, 1),
+ INGENIC_PIN_GROUP("ssi-ce1-a", x1600_ssi_ce1_a, 2),
+ INGENIC_PIN_GROUP("ssi-ce1-b", x1600_ssi_ce1_b, 1),
+ INGENIC_PIN_GROUP("mmc0-1bit-b", x1600_mmc0_1bit_b, 0),
+ INGENIC_PIN_GROUP("mmc0-4bit-b", x1600_mmc0_4bit_b, 0),
+ INGENIC_PIN_GROUP("mmc0-1bit-c", x1600_mmc0_1bit_c, 1),
+ INGENIC_PIN_GROUP("mmc0-4bit-c", x1600_mmc0_4bit_c, 1),
+ INGENIC_PIN_GROUP("mmc1-1bit", x1600_mmc1_1bit, 0),
+ INGENIC_PIN_GROUP("mmc1-4bit", x1600_mmc1_4bit, 0),
+ INGENIC_PIN_GROUP("i2c0-data-a", x1600_i2c0_a, 2),
+ INGENIC_PIN_GROUP("i2c0-data-b", x1600_i2c0_b, 0),
+ INGENIC_PIN_GROUP("i2c1-data-b-15", x1600_i2c1_b_15, 2),
+ INGENIC_PIN_GROUP("i2c1-data-b-19", x1600_i2c1_b_19, 0),
+ INGENIC_PIN_GROUP("i2s-data-tx", x1600_i2s_data_tx, 0),
+ INGENIC_PIN_GROUP("i2s-data-rx", x1600_i2s_data_rx, 0),
+ INGENIC_PIN_GROUP("i2s-clk-rx", x1600_i2s_clk_rx, 0),
+ INGENIC_PIN_GROUP("i2s-clk-tx", x1600_i2s_clk_tx, 0),
+ INGENIC_PIN_GROUP("i2s-sysclk", x1600_i2s_sysclk, 0),
+ INGENIC_PIN_GROUP("cim-data", x1600_cim, 2),
+ INGENIC_PIN_GROUP("slcd-8bit", x1600_slcd_8bit, 1),
+ INGENIC_PIN_GROUP("slcd-16bit", x1600_slcd_16bit, 1),
+ INGENIC_PIN_GROUP("lcd-16bit", x1600_lcd_16bit, 0),
+ INGENIC_PIN_GROUP("lcd-18bit", x1600_lcd_18bit, 0),
+ INGENIC_PIN_GROUP("lcd-24bit", x1600_lcd_24bit, 0),
+ INGENIC_PIN_GROUP("pwm0", x1600_pwm_pwm0, 0),
+ INGENIC_PIN_GROUP("pwm1", x1600_pwm_pwm1, 0),
+ INGENIC_PIN_GROUP("pwm2", x1600_pwm_pwm2, 0),
+ INGENIC_PIN_GROUP("pwm3", x1600_pwm_pwm3, 1),
+ INGENIC_PIN_GROUP("pwm4", x1600_pwm_pwm4, 1),
+ INGENIC_PIN_GROUP("pwm5-b", x1600_pwm_pwm5_b, 2),
+ INGENIC_PIN_GROUP("pwm5-c", x1600_pwm_pwm5_c, 1),
+ INGENIC_PIN_GROUP("pwm6-b9", x1600_pwm_pwm6_b9, 1),
+ INGENIC_PIN_GROUP("pwm6-b20", x1600_pwm_pwm6_b20, 2),
+ INGENIC_PIN_GROUP("pwm7-b10", x1600_pwm_pwm7_b10, 1),
+ INGENIC_PIN_GROUP("pwm7-b21", x1600_pwm_pwm7_b21, 2),
+ INGENIC_PIN_GROUP("mac", x1600_mac, 1),
+};
+
+static const char * const x1600_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char * const x1600_uart1_groups[] = { "uart1-data", "uart1-hwflow", };
+static const char * const x1600_uart2_groups[] = { "uart2-data-a", "uart2-data-b", };
+static const char * const x1600_uart3_groups[] = { "uart3-data-b", "uart3-data-d", };
+
+static const char * const x1600_sfc_groups[] = { "sfc", };
+
+static const char * const x1600_ssi_groups[] = {
+ "ssi-dt-a", "ssi-dt-b",
+ "ssi-dr-a", "ssi-dr-b",
+ "ssi-clk-a", "ssi-clk-b",
+ "ssi-ce0-a", "ssi-ce0-b",
+ "ssi-ce1-a", "ssi-ce1-b",
+};
+
+static const char * const x1600_mmc0_groups[] = { "mmc0-1bit-b", "mmc0-4bit-b",
+ "mmc0-1bit-c", "mmc0-4bit-c",
+};
+
+static const char * const x1600_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
+
+static const char * const x1600_i2c0_groups[] = { "i2c0-data-a", "i2c0-data-b", };
+static const char * const x1600_i2c1_groups[] = { "i2c1-data-b-15", "i2c1-data-b-19", };
+
+static const char * const x1600_i2s_groups[] = {
+ "i2s-data-tx", "i2s-data-rx", "i2s-clk-rx", "i2s-clk-tx", "i2s-sysclk",
+};
+
+static const char * const x1600_cim_groups[] = { "cim-data", };
+
+static const char * const x1600_lcd_groups[] = { "slcd-8bit", "slcd-16bit",
+ "lcd-16bit", "lcd-18bit", "lcd-24bit", "lcd-no-pins",
+};
+
+static const char * const x1600_pwm0_groups[] = { "pwm0", };
+static const char * const x1600_pwm1_groups[] = { "pwm1", };
+static const char * const x1600_pwm2_groups[] = { "pwm2", };
+static const char * const x1600_pwm3_groups[] = { "pwm3", };
+static const char * const x1600_pwm4_groups[] = { "pwm4", };
+static const char * const x1600_pwm5_groups[] = { "pwm5-b", "pwm5-c", };
+static const char * const x1600_pwm6_groups[] = { "pwm6-b9", "pwm6-b20", };
+static const char * const x1600_pwm7_groups[] = { "pwm7-b10", "pwm7-b21", };
+
+static const char * const x1600_mac_groups[] = { "mac", };
+
+static const struct function_desc x1600_functions[] = {
+ INGENIC_PIN_FUNCTION("uart0", x1600_uart0),
+ INGENIC_PIN_FUNCTION("uart1", x1600_uart1),
+ INGENIC_PIN_FUNCTION("uart2", x1600_uart2),
+ INGENIC_PIN_FUNCTION("uart3", x1600_uart3),
+ INGENIC_PIN_FUNCTION("sfc", x1600_sfc),
+ INGENIC_PIN_FUNCTION("ssi", x1600_ssi),
+ INGENIC_PIN_FUNCTION("mmc0", x1600_mmc0),
+ INGENIC_PIN_FUNCTION("mmc1", x1600_mmc1),
+ INGENIC_PIN_FUNCTION("i2c0", x1600_i2c0),
+ INGENIC_PIN_FUNCTION("i2c1", x1600_i2c1),
+ INGENIC_PIN_FUNCTION("i2s", x1600_i2s),
+ INGENIC_PIN_FUNCTION("cim", x1600_cim),
+ INGENIC_PIN_FUNCTION("lcd", x1600_lcd),
+ INGENIC_PIN_FUNCTION("pwm0", x1600_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", x1600_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", x1600_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", x1600_pwm3),
+ INGENIC_PIN_FUNCTION("pwm4", x1600_pwm4),
+ INGENIC_PIN_FUNCTION("pwm5", x1600_pwm5),
+ INGENIC_PIN_FUNCTION("pwm6", x1600_pwm6),
+ INGENIC_PIN_FUNCTION("pwm7", x1600_pwm7),
+ INGENIC_PIN_FUNCTION("mac", x1600_mac),
+};
+
+static const struct ingenic_chip_info x1600_chip_info = {
+ .num_chips = 4,
+ .reg_offset = 0x100,
+ .version = ID_X1600,
+ .groups = x1600_groups,
+ .num_groups = ARRAY_SIZE(x1600_groups),
+ .functions = x1600_functions,
+ .num_functions = ARRAY_SIZE(x1600_functions),
+ .pull_ups = x1600_pull_ups,
+ .pull_downs = x1600_pull_downs,
+ .access_table = &x1000_access_table,
+};
+
static const u32 x1830_pull_ups[4] = {
0x5fdfffc0, 0xffffefff, 0x1ffffbff, 0x0fcff3fc,
};
@@ -3860,7 +4109,9 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
pulldown = (bias == GPIO_PULL_DOWN) && (jzpc->info->pull_downs[offt] & BIT(idx));
} else {
- if (is_soc_or_above(jzpc, ID_JZ4770))
+ if (is_soc_or_above(jzpc, ID_X1600))
+ pull = ingenic_get_pin_config(jzpc, pin, X1600_GPIO_PU);
+ else if (is_soc_or_above(jzpc, ID_JZ4770))
pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
else if (is_soc_or_above(jzpc, ID_JZ4740))
pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
@@ -3959,6 +4210,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
REG_SET(X1830_GPIO_PEH), bias << idxh);
}
+ } else if (is_soc_or_above(jzpc, ID_X1600)) {
+ ingenic_config_pin(jzpc, pin, X1600_GPIO_PU, bias);
} else if (is_soc_or_above(jzpc, ID_JZ4770)) {
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias);
} else if (is_soc_or_above(jzpc, ID_JZ4740)) {
@@ -4150,6 +4403,7 @@ static const struct of_device_id ingenic_gpio_of_matches[] __initconst = {
{ .compatible = "ingenic,jz4775-gpio" },
{ .compatible = "ingenic,jz4780-gpio" },
{ .compatible = "ingenic,x1000-gpio" },
+ { .compatible = "ingenic,x1600-gpio" },
{ .compatible = "ingenic,x1830-gpio" },
{ .compatible = "ingenic,x2000-gpio" },
{ .compatible = "ingenic,x2100-gpio" },
@@ -4398,6 +4652,10 @@ static const struct of_device_id ingenic_pinctrl_of_matches[] = {
.data = IF_ENABLED(CONFIG_MACH_X1500, &x1500_chip_info)
},
{
+ .compatible = "ingenic,x1600-pinctrl",
+ .data = IF_ENABLED(CONFIG_MACH_X1600, &x1600_chip_info)
+ },
+ {
.compatible = "ingenic,x1830-pinctrl",
.data = IF_ENABLED(CONFIG_MACH_X1830, &x1830_chip_info)
},
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index b96e6368a956..4d1f41488017 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -382,6 +382,7 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
{
struct mcp23s08 *mcp = data;
int intcap, intcon, intf, i, gpio, gpio_orig, intcap_mask, defval, gpinten;
+ bool need_unmask = false;
unsigned long int enabled_interrupts;
unsigned int child_irq;
bool intf_set, intcap_changed, gpio_bit_changed,
@@ -396,9 +397,6 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
goto unlock;
}
- if (mcp_read(mcp, MCP_INTCAP, &intcap))
- goto unlock;
-
if (mcp_read(mcp, MCP_INTCON, &intcon))
goto unlock;
@@ -408,6 +406,16 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
if (mcp_read(mcp, MCP_DEFVAL, &defval))
goto unlock;
+ /* Mask level interrupts to avoid their immediate reactivation after clearing */
+ if (intcon) {
+ need_unmask = true;
+ if (mcp_write(mcp, MCP_GPINTEN, gpinten & ~intcon))
+ goto unlock;
+ }
+
+ if (mcp_read(mcp, MCP_INTCAP, &intcap))
+ goto unlock;
+
/* This clears the interrupt(configurable on S18) */
if (mcp_read(mcp, MCP_GPIO, &gpio))
goto unlock;
@@ -470,9 +478,18 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
}
}
+ if (need_unmask) {
+ mutex_lock(&mcp->lock);
+ goto unlock;
+ }
+
return IRQ_HANDLED;
unlock:
+ if (need_unmask)
+ if (mcp_write(mcp, MCP_GPINTEN, gpinten))
+ dev_err(mcp->chip.parent, "can't unmask GPINTEN\n");
+
mutex_unlock(&mcp->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 53408344927a..8c50e0091b32 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1393,12 +1393,6 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
dev_err(pctl->dev, "Failed to retrieve IRQ for bank %u\n", i);
goto err;
}
- if (!ret) {
- fwnode_handle_put(child);
- dev_err(pctl->dev, "No IRQ for bank %u\n", i);
- ret = -EINVAL;
- goto err;
- }
irq = ret;
bank = &pctl->gpio_banks[i];
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 15145882950f..930c454e0cec 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -2003,6 +2003,115 @@ static int rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RK3528_DRV_BITS_PER_PIN 8
+#define RK3528_DRV_PINS_PER_REG 2
+#define RK3528_DRV_GPIO0_OFFSET 0x100
+#define RK3528_DRV_GPIO1_OFFSET 0x20120
+#define RK3528_DRV_GPIO2_OFFSET 0x30160
+#define RK3528_DRV_GPIO3_OFFSET 0x20190
+#define RK3528_DRV_GPIO4_OFFSET 0x101C0
+
+static int rk3528_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+
+ if (bank->bank_num == 0)
+ *reg = RK3528_DRV_GPIO0_OFFSET;
+ else if (bank->bank_num == 1)
+ *reg = RK3528_DRV_GPIO1_OFFSET;
+ else if (bank->bank_num == 2)
+ *reg = RK3528_DRV_GPIO2_OFFSET;
+ else if (bank->bank_num == 3)
+ *reg = RK3528_DRV_GPIO3_OFFSET;
+ else if (bank->bank_num == 4)
+ *reg = RK3528_DRV_GPIO4_OFFSET;
+ else
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+
+ *reg += ((pin_num / RK3528_DRV_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3528_DRV_PINS_PER_REG;
+ *bit *= RK3528_DRV_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RK3528_PULL_BITS_PER_PIN 2
+#define RK3528_PULL_PINS_PER_REG 8
+#define RK3528_PULL_GPIO0_OFFSET 0x200
+#define RK3528_PULL_GPIO1_OFFSET 0x20210
+#define RK3528_PULL_GPIO2_OFFSET 0x30220
+#define RK3528_PULL_GPIO3_OFFSET 0x20230
+#define RK3528_PULL_GPIO4_OFFSET 0x10240
+
+static int rk3528_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+
+ if (bank->bank_num == 0)
+ *reg = RK3528_PULL_GPIO0_OFFSET;
+ else if (bank->bank_num == 1)
+ *reg = RK3528_PULL_GPIO1_OFFSET;
+ else if (bank->bank_num == 2)
+ *reg = RK3528_PULL_GPIO2_OFFSET;
+ else if (bank->bank_num == 3)
+ *reg = RK3528_PULL_GPIO3_OFFSET;
+ else if (bank->bank_num == 4)
+ *reg = RK3528_PULL_GPIO4_OFFSET;
+ else
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+
+ *reg += ((pin_num / RK3528_PULL_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3528_PULL_PINS_PER_REG;
+ *bit *= RK3528_PULL_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RK3528_SMT_BITS_PER_PIN 1
+#define RK3528_SMT_PINS_PER_REG 8
+#define RK3528_SMT_GPIO0_OFFSET 0x400
+#define RK3528_SMT_GPIO1_OFFSET 0x20410
+#define RK3528_SMT_GPIO2_OFFSET 0x30420
+#define RK3528_SMT_GPIO3_OFFSET 0x20430
+#define RK3528_SMT_GPIO4_OFFSET 0x10440
+
+static int rk3528_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num,
+ struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+
+ if (bank->bank_num == 0)
+ *reg = RK3528_SMT_GPIO0_OFFSET;
+ else if (bank->bank_num == 1)
+ *reg = RK3528_SMT_GPIO1_OFFSET;
+ else if (bank->bank_num == 2)
+ *reg = RK3528_SMT_GPIO2_OFFSET;
+ else if (bank->bank_num == 3)
+ *reg = RK3528_SMT_GPIO3_OFFSET;
+ else if (bank->bank_num == 4)
+ *reg = RK3528_SMT_GPIO4_OFFSET;
+ else
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+
+ *reg += ((pin_num / RK3528_SMT_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3528_SMT_PINS_PER_REG;
+ *bit *= RK3528_SMT_BITS_PER_PIN;
+
+ return 0;
+}
+
#define RK3562_DRV_BITS_PER_PIN 8
#define RK3562_DRV_PINS_PER_REG 2
#define RK3562_DRV_GPIO0_OFFSET 0x20070
@@ -2640,7 +2749,8 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
rmask_bits = RK3588_DRV_BITS_PER_PIN;
ret = strength;
goto config;
- } else if (ctrl->type == RK3562 ||
+ } else if (ctrl->type == RK3528 ||
+ ctrl->type == RK3562 ||
ctrl->type == RK3568) {
rmask_bits = RK3568_DRV_BITS_PER_PIN;
ret = (1 << (strength + 1)) - 1;
@@ -2785,6 +2895,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RK3328:
case RK3368:
case RK3399:
+ case RK3528:
case RK3562:
case RK3568:
case RK3576:
@@ -2846,6 +2957,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3328:
case RK3368:
case RK3399:
+ case RK3528:
case RK3562:
case RK3568:
case RK3576:
@@ -3115,6 +3227,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3328:
case RK3368:
case RK3399:
+ case RK3528:
case RK3562:
case RK3568:
case RK3576:
@@ -4237,6 +4350,49 @@ static struct rockchip_pin_ctrl rk3399_pin_ctrl = {
.drv_calc_reg = rk3399_calc_drv_reg_and_bit,
};
+static struct rockchip_pin_bank rk3528_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS_OFFSET(0, 32, "gpio0",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0, 0, 0, 0),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(1, 32, "gpio1",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x20020, 0x20028, 0x20030, 0x20038),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(2, 32, "gpio2",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x30040, 0, 0, 0),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(3, 32, "gpio3",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x20060, 0x20068, 0x20070, 0),
+ PIN_BANK_IOMUX_FLAGS_OFFSET(4, 32, "gpio4",
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0x10080, 0x10088, 0x10090, 0x10098),
+};
+
+static struct rockchip_pin_ctrl rk3528_pin_ctrl = {
+ .pin_banks = rk3528_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3528_pin_banks),
+ .label = "RK3528-GPIO",
+ .type = RK3528,
+ .pull_calc_reg = rk3528_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3528_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rk3528_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rk3562_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS_OFFSET(0, 32, "gpio0",
IOMUX_WIDTH_4BIT,
@@ -4404,6 +4560,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &rk3368_pin_ctrl },
{ .compatible = "rockchip,rk3399-pinctrl",
.data = &rk3399_pin_ctrl },
+ { .compatible = "rockchip,rk3528-pinctrl",
+ .data = &rk3528_pin_ctrl },
{ .compatible = "rockchip,rk3562-pinctrl",
.data = &rk3562_pin_ctrl },
{ .compatible = "rockchip,rk3568-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index 87a20cec8e21..35cd38079d1e 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -196,6 +196,7 @@ enum rockchip_pinctrl_type {
RK3328,
RK3368,
RK3399,
+ RK3528,
RK3562,
RK3568,
RK3576,
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index 9e34b92ff5f2..9fd7a8fb2bc4 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -281,9 +281,8 @@ static int pxa2xx_build_functions(struct pxa_pinctrl *pctl)
for (df = pctl->ppins[i].functions; df->name; df++)
if (!pxa2xx_find_function(pctl, df->name, functions))
(functions + pctl->nfuncs++)->name = df->name;
- pctl->functions = devm_kmemdup(pctl->dev, functions,
- pctl->nfuncs * sizeof(*functions),
- GFP_KERNEL);
+ pctl->functions = devm_kmemdup_array(pctl->dev, functions, pctl->nfuncs,
+ sizeof(*functions), GFP_KERNEL);
if (!pctl->functions)
return -ENOMEM;
@@ -314,7 +313,8 @@ static int pxa2xx_build_groups(struct pxa_pinctrl *pctl)
pctl->ppins[j].pin.name;
func = pctl->functions + i;
func->ngroups = ngroups;
- func->groups = devm_kmemdup(pctl->dev, gtmp, ngroups * sizeof(*gtmp), GFP_KERNEL);
+ func->groups = devm_kmemdup_array(pctl->dev, gtmp, ngroups,
+ sizeof(*gtmp), GFP_KERNEL);
if (!func->groups)
return -ENOMEM;
}
diff --git a/drivers/pinctrl/qcom/Kconfig.msm b/drivers/pinctrl/qcom/Kconfig.msm
index 35f47660a56b..0bb44c9a4c06 100644
--- a/drivers/pinctrl/qcom/Kconfig.msm
+++ b/drivers/pinctrl/qcom/Kconfig.msm
@@ -138,10 +138,10 @@ config PINCTRL_MSM8916
Qualcomm TLMM block found on the Qualcomm 8916 platform.
config PINCTRL_MSM8917
- tristate "Qualcomm 8917 pin controller driver"
+ tristate "Qualcomm 8917/8937 pin controller driver"
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm TLMM block found on the Qualcomm MSM8917 platform.
+ Qualcomm TLMM block found on the Qualcomm MSM8917, MSM8937 platform.
config PINCTRL_MSM8953
tristate "Qualcomm 8953 pin controller driver"
@@ -437,4 +437,14 @@ config PINCTRL_X1E80100
Say Y here to compile statically, or M here to compile it as a module.
If unsure, say N.
+config PINCTRL_TLMM_TEST
+ tristate "Qualcomm TLMM test driver"
+ depends on ARM64 || COMPILE_TEST
+ depends on KUNIT
+ help
+ This driver provides test cases for the interrupt capabilities of
+ TLMM driver (pinctrl-msm). Specify a floating gpio to use for testing
+ using the module parameter "gpio" and execute the kunit suite.
+ If unsure, say N.
+
endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 5c4100925cf9..954f5291cc37 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -71,3 +71,4 @@ obj-$(CONFIG_PINCTRL_SM8750) += pinctrl-sm8750.o
obj-$(CONFIG_PINCTRL_SC8280XP_LPASS_LPI) += pinctrl-sc8280xp-lpass-lpi.o
obj-$(CONFIG_PINCTRL_LPASS_LPI) += pinctrl-lpass-lpi.o
obj-$(CONFIG_PINCTRL_X1E80100) += pinctrl-x1e80100.o
+obj-$(CONFIG_PINCTRL_TLMM_TEST) += tlmm-test.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 47daa47153c9..82f0cc43bbf4 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1045,8 +1045,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
const struct msm_pingroup *g;
u32 intr_target_mask = GENMASK(2, 0);
unsigned long flags;
- bool was_enabled;
- u32 val;
+ u32 val, oldval;
if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
set_bit(d->hwirq, pctrl->dual_edge_irqs);
@@ -1108,8 +1107,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
* internal circuitry of TLMM, toggling the RAW_STATUS
* could cause the INTR_STATUS to be set for EDGE interrupts.
*/
- val = msm_readl_intr_cfg(pctrl, g);
- was_enabled = val & BIT(g->intr_raw_status_bit);
+ val = oldval = msm_readl_intr_cfg(pctrl, g);
val |= BIT(g->intr_raw_status_bit);
if (g->intr_detection_width == 2) {
val &= ~(3 << g->intr_detection_bit);
@@ -1162,9 +1160,11 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
/*
* The first time we set RAW_STATUS_EN it could trigger an interrupt.
* Clear the interrupt. This is safe because we have
- * IRQCHIP_SET_TYPE_MASKED.
+ * IRQCHIP_SET_TYPE_MASKED. When changing the interrupt type, we could
+ * also still have a non-matching interrupt latched, so clear whenever
+ * making changes to the interrupt configuration.
*/
- if (!was_enabled)
+ if (val != oldval)
msm_ack_intr_status(pctrl, g);
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8917.c b/drivers/pinctrl/qcom/pinctrl-msm8917.c
index cff137bb3b23..350636807b07 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8917.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8917.c
@@ -539,6 +539,7 @@ enum msm8917_functions {
msm_mux_webcam_standby,
msm_mux_wsa_io,
msm_mux_wsa_irq,
+ msm_mux_wsa_reset,
msm_mux__,
};
@@ -1123,6 +1124,10 @@ static const char * const wsa_io_groups[] = {
"gpio94", "gpio95",
};
+static const char * const wsa_reset_groups[] = {
+ "gpio96",
+};
+
static const char * const blsp_spi8_groups[] = {
"gpio96", "gpio97", "gpio98", "gpio99",
};
@@ -1378,6 +1383,7 @@ static const struct pinfunction msm8917_functions[] = {
MSM_PIN_FUNCTION(webcam_standby),
MSM_PIN_FUNCTION(wsa_io),
MSM_PIN_FUNCTION(wsa_irq),
+ MSM_PIN_FUNCTION(wsa_reset),
};
static const struct msm_pingroup msm8917_groups[] = {
@@ -1616,5 +1622,5 @@ static void __exit msm8917_pinctrl_exit(void)
}
module_exit(msm8917_pinctrl_exit);
-MODULE_DESCRIPTION("Qualcomm msm8917 pinctrl driver");
+MODULE_DESCRIPTION("Qualcomm msm8917/msm8937 pinctrl driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sa8775p.c b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
index 8fdea25d8d67..a5b38221aea8 100644
--- a/drivers/pinctrl/qcom/pinctrl-sa8775p.c
+++ b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022,2025, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023, Linaro Limited
*/
@@ -467,6 +467,7 @@ enum sa8775p_functions {
msm_mux_edp2_lcd,
msm_mux_edp3_hot,
msm_mux_edp3_lcd,
+ msm_mux_egpio,
msm_mux_emac0_mcg0,
msm_mux_emac0_mcg1,
msm_mux_emac0_mcg2,
@@ -744,6 +745,13 @@ static const char * const edp3_lcd_groups[] = {
"gpio49",
};
+static const char *const egpio_groups[] = {
+ "gpio126", "gpio127", "gpio128", "gpio129", "gpio130", "gpio131",
+ "gpio132", "gpio133", "gpio134", "gpio135", "gpio136", "gpio137",
+ "gpio138", "gpio139", "gpio140", "gpio141", "gpio142", "gpio143",
+ "gpio144", "gpio145", "gpio146", "gpio147", "gpio148",
+};
+
static const char * const emac0_mcg0_groups[] = {
"gpio12",
};
@@ -1209,6 +1217,7 @@ static const struct pinfunction sa8775p_functions[] = {
MSM_PIN_FUNCTION(edp2_lcd),
MSM_PIN_FUNCTION(edp3_hot),
MSM_PIN_FUNCTION(edp3_lcd),
+ MSM_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(emac0_mcg0),
MSM_PIN_FUNCTION(emac0_mcg1),
MSM_PIN_FUNCTION(emac0_mcg2),
@@ -1454,29 +1463,29 @@ static const struct msm_pingroup sa8775p_groups[] = {
[123] = PINGROUP(123, hs2_mi2s, phase_flag, _, _, _, _, _, _, _),
[124] = PINGROUP(124, hs2_mi2s, phase_flag, _, _, _, _, _, _, _),
[125] = PINGROUP(125, hs2_mi2s, phase_flag, _, _, _, _, _, _, _),
- [126] = PINGROUP(126, _, _, _, _, _, _, _, _, _),
- [127] = PINGROUP(127, _, _, _, _, _, _, _, _, _),
- [128] = PINGROUP(128, _, _, _, _, _, _, _, _, _),
- [129] = PINGROUP(129, _, _, _, _, _, _, _, _, _),
- [130] = PINGROUP(130, _, _, _, _, _, _, _, _, _),
- [131] = PINGROUP(131, _, _, _, _, _, _, _, _, _),
- [132] = PINGROUP(132, _, _, _, _, _, _, _, _, _),
- [133] = PINGROUP(133, _, _, _, _, _, _, _, _, _),
- [134] = PINGROUP(134, _, _, _, _, _, _, _, _, _),
- [135] = PINGROUP(135, _, _, _, _, _, _, _, _, _),
- [136] = PINGROUP(136, _, _, _, _, _, _, _, _, _),
- [137] = PINGROUP(137, _, _, _, _, _, _, _, _, _),
- [138] = PINGROUP(138, _, _, _, _, _, _, _, _, _),
- [139] = PINGROUP(139, _, _, _, _, _, _, _, _, _),
- [140] = PINGROUP(140, _, _, _, _, _, _, _, _, _),
- [141] = PINGROUP(141, _, _, _, _, _, _, _, _, _),
- [142] = PINGROUP(142, _, _, _, _, _, _, _, _, _),
- [143] = PINGROUP(143, _, _, _, _, _, _, _, _, _),
- [144] = PINGROUP(144, dbg_out, _, _, _, _, _, _, _, _),
- [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _),
- [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _),
- [147] = PINGROUP(147, _, _, _, _, _, _, _, _, _),
- [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, _, _, _, _, _, _, _, _, egpio),
+ [127] = PINGROUP(127, _, _, _, _, _, _, _, _, egpio),
+ [128] = PINGROUP(128, _, _, _, _, _, _, _, _, egpio),
+ [129] = PINGROUP(129, _, _, _, _, _, _, _, _, egpio),
+ [130] = PINGROUP(130, _, _, _, _, _, _, _, _, egpio),
+ [131] = PINGROUP(131, _, _, _, _, _, _, _, _, egpio),
+ [132] = PINGROUP(132, _, _, _, _, _, _, _, _, egpio),
+ [133] = PINGROUP(133, _, _, _, _, _, _, _, _, egpio),
+ [134] = PINGROUP(134, _, _, _, _, _, _, _, _, egpio),
+ [135] = PINGROUP(135, _, _, _, _, _, _, _, _, egpio),
+ [136] = PINGROUP(136, _, _, _, _, _, _, _, _, egpio),
+ [137] = PINGROUP(137, _, _, _, _, _, _, _, _, egpio),
+ [138] = PINGROUP(138, _, _, _, _, _, _, _, _, egpio),
+ [139] = PINGROUP(139, _, _, _, _, _, _, _, _, egpio),
+ [140] = PINGROUP(140, _, _, _, _, _, _, _, _, egpio),
+ [141] = PINGROUP(141, _, _, _, _, _, _, _, _, egpio),
+ [142] = PINGROUP(142, _, _, _, _, _, _, _, _, egpio),
+ [143] = PINGROUP(143, _, _, _, _, _, _, _, _, egpio),
+ [144] = PINGROUP(144, dbg_out, _, _, _, _, _, _, _, egpio),
+ [145] = PINGROUP(145, _, _, _, _, _, _, _, _, egpio),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, egpio),
+ [147] = PINGROUP(147, _, _, _, _, _, _, _, _, egpio),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, egpio),
[149] = UFS_RESET(ufs_reset, 0x1a2000),
[150] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x199000, 15, 0),
[151] = SDC_QDSD_PINGROUP(sdc1_clk, 0x199000, 13, 6),
@@ -1511,6 +1520,7 @@ static const struct msm_pinctrl_soc_data sa8775p_pinctrl = {
.ngpios = 150,
.wakeirq_map = sa8775p_pdc_map,
.nwakeirq_map = ARRAY_SIZE(sa8775p_pdc_map),
+ .egpio_func = 9,
};
static int sa8775p_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/tlmm-test.c b/drivers/pinctrl/qcom/tlmm-test.c
new file mode 100644
index 000000000000..fd02bf3a76cb
--- /dev/null
+++ b/drivers/pinctrl/qcom/tlmm-test.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "tlmm-test: " fmt
+
+#include <kunit/test.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+
+/*
+ * This TLMM test module serves the purpose of validating that the TLMM driver
+ * (pinctrl-msm) delivers expected number of interrupts in response to changing
+ * GPIO state.
+ *
+ * To achieve this without external equipment the test takes a module parameter
+ * "gpio", which the tester is expected to specify an unused and non-connected
+ * pin. The GPIO state is then driven by adjusting the bias of the pin, at
+ * suitable times through the different test cases.
+ *
+ * Upon execution, the test initialization will find the TLMM node (subject to
+ * tlmm_of_match[] allow listing) and create the necessary references
+ * dynamically, rather then relying on e.g. Devicetree and phandles.
+ */
+
+#define MSM_PULL_MASK GENMASK(2, 0)
+#define MSM_PULL_DOWN 1
+#define MSM_PULL_UP 3
+#define TLMM_REG_SIZE 0x1000
+
+static int tlmm_test_gpio = -1;
+module_param_named(gpio, tlmm_test_gpio, int, 0600);
+
+static struct {
+ void __iomem *base;
+ void __iomem *reg;
+ int irq;
+
+ u32 low_val;
+ u32 high_val;
+} tlmm_suite;
+
+/**
+ * struct tlmm_test_priv - Per-test context
+ * @intr_count: number of times hard handler was hit with TLMM_TEST_COUNT op set
+ * @thread_count: number of times thread handler was hit with TLMM_TEST_COUNT op set
+ * @intr_op: operations to be performed by the hard IRQ handler
+ * @intr_op_remain: number of times the TLMM_TEST_THEN_* operations should be
+ * performed by the hard IRQ handler
+ * @thread_op: operations to be performed by the threaded IRQ handler
+ * @thread_op_remain: number of times the TLMM_TEST_THEN_* operations should
+ * be performed by the threaded IRQ handler
+ */
+struct tlmm_test_priv {
+ atomic_t intr_count;
+ atomic_t thread_count;
+
+ unsigned int intr_op;
+ atomic_t intr_op_remain;
+
+ unsigned int thread_op;
+ atomic_t thread_op_remain;
+};
+
+/* Operation masks for @intr_op and @thread_op */
+#define TLMM_TEST_COUNT BIT(0)
+#define TLMM_TEST_OUTPUT_LOW BIT(1)
+#define TLMM_TEST_OUTPUT_HIGH BIT(2)
+#define TLMM_TEST_THEN_HIGH BIT(3)
+#define TLMM_TEST_THEN_LOW BIT(4)
+#define TLMM_TEST_WAKE_THREAD BIT(5)
+
+static void tlmm_output_low(void)
+{
+ writel(tlmm_suite.low_val, tlmm_suite.reg);
+ readl(tlmm_suite.reg);
+}
+
+static void tlmm_output_high(void)
+{
+ writel(tlmm_suite.high_val, tlmm_suite.reg);
+ readl(tlmm_suite.reg);
+}
+
+static irqreturn_t tlmm_test_intr_fn(int irq, void *dev_id)
+{
+ struct tlmm_test_priv *priv = dev_id;
+
+ if (priv->intr_op & TLMM_TEST_COUNT)
+ atomic_inc(&priv->intr_count);
+
+ if (priv->intr_op & TLMM_TEST_OUTPUT_LOW)
+ tlmm_output_low();
+ if (priv->intr_op & TLMM_TEST_OUTPUT_HIGH)
+ tlmm_output_high();
+
+ if (atomic_dec_if_positive(&priv->intr_op_remain) > 0) {
+ udelay(1);
+
+ if (priv->intr_op & TLMM_TEST_THEN_LOW)
+ tlmm_output_low();
+ if (priv->intr_op & TLMM_TEST_THEN_HIGH)
+ tlmm_output_high();
+ }
+
+ return priv->intr_op & TLMM_TEST_WAKE_THREAD ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
+static irqreturn_t tlmm_test_intr_thread_fn(int irq, void *dev_id)
+{
+ struct tlmm_test_priv *priv = dev_id;
+
+ if (priv->thread_op & TLMM_TEST_COUNT)
+ atomic_inc(&priv->thread_count);
+
+ if (priv->thread_op & TLMM_TEST_OUTPUT_LOW)
+ tlmm_output_low();
+ if (priv->thread_op & TLMM_TEST_OUTPUT_HIGH)
+ tlmm_output_high();
+
+ if (atomic_dec_if_positive(&priv->thread_op_remain) > 0) {
+ udelay(1);
+ if (priv->thread_op & TLMM_TEST_THEN_LOW)
+ tlmm_output_low();
+ if (priv->thread_op & TLMM_TEST_THEN_HIGH)
+ tlmm_output_high();
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void tlmm_test_request_hard_irq(struct kunit *test, unsigned long irqflags)
+{
+ struct tlmm_test_priv *priv = test->priv;
+ int ret;
+
+ ret = request_irq(tlmm_suite.irq, tlmm_test_intr_fn, irqflags, test->name, priv);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void tlmm_test_request_threaded_irq(struct kunit *test, unsigned long irqflags)
+{
+ struct tlmm_test_priv *priv = test->priv;
+ int ret;
+
+ ret = request_threaded_irq(tlmm_suite.irq,
+ tlmm_test_intr_fn, tlmm_test_intr_thread_fn,
+ irqflags, test->name, priv);
+
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void tlmm_test_silent(struct kunit *test, unsigned long irqflags)
+{
+ struct tlmm_test_priv *priv = test->priv;
+
+ priv->intr_op = TLMM_TEST_COUNT;
+
+ /* GPIO line at non-triggering level */
+ if (irqflags == IRQF_TRIGGER_LOW || irqflags == IRQF_TRIGGER_FALLING)
+ tlmm_output_high();
+ else
+ tlmm_output_low();
+
+ tlmm_test_request_hard_irq(test, irqflags);
+ msleep(100);
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 0);
+}
+
+/*
+ * Test that no RISING interrupts are triggered on a silent pin
+ */
+static void tlmm_test_silent_rising(struct kunit *test)
+{
+ tlmm_test_silent(test, IRQF_TRIGGER_RISING);
+}
+
+/*
+ * Test that no FALLING interrupts are triggered on a silent pin
+ */
+static void tlmm_test_silent_falling(struct kunit *test)
+{
+ tlmm_test_silent(test, IRQF_TRIGGER_FALLING);
+}
+
+/*
+ * Test that no LOW interrupts are triggered on a silent pin
+ */
+static void tlmm_test_silent_low(struct kunit *test)
+{
+ tlmm_test_silent(test, IRQF_TRIGGER_LOW);
+}
+
+/*
+ * Test that no HIGH interrupts are triggered on a silent pin
+ */
+static void tlmm_test_silent_high(struct kunit *test)
+{
+ tlmm_test_silent(test, IRQF_TRIGGER_HIGH);
+}
+
+/*
+ * Square wave with 10 high pulses, assert that we get 10 rising interrupts
+ */
+static void tlmm_test_rising(struct kunit *test)
+{
+ struct tlmm_test_priv *priv = test->priv;
+ int i;
+
+ priv->intr_op = TLMM_TEST_COUNT;
+
+ tlmm_output_low();
+
+ tlmm_test_request_hard_irq(test, IRQF_TRIGGER_RISING);
+ for (i = 0; i < 10; i++) {
+ tlmm_output_low();
+ msleep(20);
+ tlmm_output_high();
+ msleep(20);
+ }
+
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
+}
+
+/*
+ * Square wave with 10 low pulses, assert that we get 10 falling interrupts
+ */
+static void tlmm_test_falling(struct kunit *test)
+{
+ struct tlmm_test_priv *priv = test->priv;
+ int i;
+
+ priv->intr_op = TLMM_TEST_COUNT;
+
+ tlmm_output_high();
+
+ tlmm_test_request_hard_irq(test, IRQF_TRIGGER_FALLING);
+ for (i = 0; i < 10; i++) {
+ tlmm_output_high();
+ msleep(20);
+ tlmm_output_low();
+ msleep(20);
+ }
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
+}
+
+/*
+ * Drive line low 10 times, handler drives it high to "clear the interrupt
+ * source", assert we get 10 interrupts
+ */
+static void tlmm_test_low(struct kunit *test)
+{
+ struct tlmm_test_priv *priv = test->priv;
+ int i;
+
+ priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_HIGH;
+ atomic_set(&priv->intr_op_remain, 9);
+
+ tlmm_output_high();
+
+ tlmm_test_request_hard_irq(test, IRQF_TRIGGER_LOW);
+ for (i = 0; i < 10; i++) {
+ msleep(20);
+ tlmm_output_low();
+ }
+ msleep(100);
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
+}
+
+/*
+ * Drive line high 10 times, handler drives it low to "clear the interrupt
+ * source", assert we get 10 interrupts
+ */
+static void tlmm_test_high(struct kunit *test)
+{
+ struct tlmm_test_priv *priv = test->priv;
+ int i;
+
+ priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_LOW;
+ atomic_set(&priv->intr_op_remain, 9);
+
+ tlmm_output_low();
+
+ tlmm_test_request_hard_irq(test, IRQF_TRIGGER_HIGH);
+ for (i = 0; i < 10; i++) {
+ msleep(20);
+ tlmm_output_high();
+ }
+ msleep(100);
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
+}
+
+/*
+ * Handler drives GPIO high to "clear the interrupt source", then low to
+ * simulate a new interrupt, repeated 10 times, assert we get 10 interrupts
+ */
+static void tlmm_test_falling_in_handler(struct kunit *test)
+{
+ struct tlmm_test_priv *priv = test->priv;
+
+ priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_HIGH | TLMM_TEST_THEN_LOW;
+ atomic_set(&priv->intr_op_remain, 10);
+
+ tlmm_output_high();
+
+ tlmm_test_request_hard_irq(test, IRQF_TRIGGER_FALLING);
+ msleep(20);
+ tlmm_output_low();
+ msleep(100);
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
+}
+
+/*
+ * Handler drives GPIO low to "clear the interrupt source", then high to
+ * simulate a new interrupt, repeated 10 times, assert we get 10 interrupts
+ */
+static void tlmm_test_rising_in_handler(struct kunit *test)
+{
+ struct tlmm_test_priv *priv = test->priv;
+
+ priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_LOW | TLMM_TEST_THEN_HIGH;
+ atomic_set(&priv->intr_op_remain, 10);
+
+ tlmm_output_low();
+
+ tlmm_test_request_hard_irq(test, IRQF_TRIGGER_RISING);
+ msleep(20);
+ tlmm_output_high();
+ msleep(100);
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
+}
+
+/*
+ * Square wave with 10 high pulses, assert that we get 10 rising hard and
+ * 10 threaded interrupts
+ */
+static void tlmm_test_thread_rising(struct kunit *test)
+{
+ struct tlmm_test_priv *priv = test->priv;
+ int i;
+
+ priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_WAKE_THREAD;
+ priv->thread_op = TLMM_TEST_COUNT;
+
+ tlmm_output_low();
+
+ tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_RISING);
+ for (i = 0; i < 10; i++) {
+ tlmm_output_low();
+ msleep(20);
+ tlmm_output_high();
+ msleep(20);
+ }
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
+}
+
+/*
+ * Square wave with 10 low pulses, assert that we get 10 falling interrupts
+ */
+static void tlmm_test_thread_falling(struct kunit *test)
+{
+ struct tlmm_test_priv *priv = test->priv;
+ int i;
+
+ priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_WAKE_THREAD;
+ priv->thread_op = TLMM_TEST_COUNT;
+
+ tlmm_output_high();
+
+ tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_FALLING);
+ for (i = 0; i < 10; i++) {
+ tlmm_output_high();
+ msleep(20);
+ tlmm_output_low();
+ msleep(20);
+ }
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
+}
+
+/*
+ * Drive line high 10 times, threaded handler drives it low to "clear the
+ * interrupt source", assert we get 10 interrupts
+ */
+static void tlmm_test_thread_high(struct kunit *test)
+{
+ struct tlmm_test_priv *priv = test->priv;
+ int i;
+
+ priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_WAKE_THREAD;
+ priv->thread_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_LOW;
+
+ tlmm_output_low();
+
+ tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
+ for (i = 0; i < 10; i++) {
+ tlmm_output_high();
+ msleep(20);
+ }
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
+}
+
+/*
+ * Drive line low 10 times, threaded handler drives it high to "clear the
+ * interrupt source", assert we get 10 interrupts
+ */
+static void tlmm_test_thread_low(struct kunit *test)
+{
+ struct tlmm_test_priv *priv = test->priv;
+ int i;
+
+ priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_WAKE_THREAD;
+ priv->thread_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_HIGH;
+
+ tlmm_output_high();
+
+ tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
+ for (i = 0; i < 10; i++) {
+ tlmm_output_low();
+ msleep(20);
+ }
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
+}
+
+/*
+ * Handler drives GPIO low to "clear the interrupt source", then high in the
+ * threaded handler to simulate a new interrupt, repeated 10 times, assert we
+ * get 10 interrupts
+ */
+static void tlmm_test_thread_rising_in_handler(struct kunit *test)
+{
+ struct tlmm_test_priv *priv = test->priv;
+
+ priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_LOW | TLMM_TEST_WAKE_THREAD;
+ priv->thread_op = TLMM_TEST_COUNT | TLMM_TEST_THEN_HIGH;
+ atomic_set(&priv->thread_op_remain, 10);
+
+ tlmm_output_low();
+
+ tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_RISING);
+ msleep(20);
+ tlmm_output_high();
+ msleep(100);
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
+}
+
+/*
+ * Handler drives GPIO high to "clear the interrupt source", then low in the
+ * threaded handler to simulate a new interrupt, repeated 10 times, assert we
+ * get 10 interrupts
+ */
+static void tlmm_test_thread_falling_in_handler(struct kunit *test)
+{
+ struct tlmm_test_priv *priv = test->priv;
+
+ priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_HIGH | TLMM_TEST_WAKE_THREAD;
+ priv->thread_op = TLMM_TEST_COUNT | TLMM_TEST_THEN_LOW;
+ atomic_set(&priv->thread_op_remain, 10);
+
+ tlmm_output_high();
+
+ tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_FALLING);
+ msleep(20);
+ tlmm_output_low();
+ msleep(100);
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
+}
+
+/*
+ * Validate that edge interrupts occurring while irq is disabled is delivered
+ * once the interrupt is reenabled.
+ */
+static void tlmm_test_rising_while_disabled(struct kunit *test)
+{
+ struct tlmm_test_priv *priv = test->priv;
+ unsigned int after_edge;
+ unsigned int before_edge;
+
+ priv->intr_op = TLMM_TEST_COUNT;
+ atomic_set(&priv->thread_op_remain, 10);
+
+ tlmm_output_low();
+
+ tlmm_test_request_hard_irq(test, IRQF_TRIGGER_RISING);
+ msleep(20);
+
+ disable_irq(tlmm_suite.irq);
+ before_edge = atomic_read(&priv->intr_count);
+
+ tlmm_output_high();
+ msleep(20);
+ after_edge = atomic_read(&priv->intr_count);
+
+ msleep(20);
+ enable_irq(tlmm_suite.irq);
+ msleep(20);
+
+ free_irq(tlmm_suite.irq, priv);
+
+ KUNIT_ASSERT_EQ(test, before_edge, 0);
+ KUNIT_ASSERT_EQ(test, after_edge, 0);
+ KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 1);
+}
+
+static int tlmm_test_init(struct kunit *test)
+{
+ struct tlmm_test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+
+ atomic_set(&priv->intr_count, 0);
+ atomic_set(&priv->thread_count, 0);
+
+ atomic_set(&priv->intr_op_remain, 0);
+ atomic_set(&priv->thread_op_remain, 0);
+
+ test->priv = priv;
+
+ return 0;
+}
+
+/*
+ * NOTE: When adding compatibles to this list, ensure that TLMM_REG_SIZE and
+ * pull configuration values are supported and correct.
+ */
+static const struct of_device_id tlmm_of_match[] = {
+ { .compatible = "qcom,sc8280xp-tlmm" },
+ { .compatible = "qcom,x1e80100-tlmm" },
+ {}
+};
+
+static int tlmm_test_init_suite(struct kunit_suite *suite)
+{
+ struct of_phandle_args args = {};
+ struct resource res;
+ int ret;
+ u32 val;
+
+ if (tlmm_test_gpio < 0) {
+ pr_err("use the tlmm-test.gpio module parameter to specify which GPIO to use\n");
+ return -EINVAL;
+ }
+
+ struct device_node *tlmm __free(device_node) = of_find_matching_node(NULL, tlmm_of_match);
+ if (!tlmm) {
+ pr_err("failed to find tlmm node\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(tlmm, 0, &res);
+ if (ret < 0)
+ return ret;
+
+ tlmm_suite.base = ioremap(res.start, resource_size(&res));
+ if (!tlmm_suite.base)
+ return -ENOMEM;
+
+ args.np = tlmm;
+ args.args_count = 2;
+ args.args[0] = tlmm_test_gpio;
+ args.args[1] = 0;
+
+ tlmm_suite.irq = irq_create_of_mapping(&args);
+ if (!tlmm_suite.irq) {
+ pr_err("failed to map TLMM irq %d\n", args.args[0]);
+ goto err_unmap;
+ }
+
+ tlmm_suite.reg = tlmm_suite.base + tlmm_test_gpio * TLMM_REG_SIZE;
+ val = readl(tlmm_suite.reg) & ~MSM_PULL_MASK;
+ tlmm_suite.low_val = val | MSM_PULL_DOWN;
+ tlmm_suite.high_val = val | MSM_PULL_UP;
+
+ return 0;
+
+err_unmap:
+ iounmap(tlmm_suite.base);
+ tlmm_suite.base = NULL;
+ return -EINVAL;
+}
+
+static void tlmm_test_exit_suite(struct kunit_suite *suite)
+{
+ irq_dispose_mapping(tlmm_suite.irq);
+ iounmap(tlmm_suite.base);
+
+ tlmm_suite.base = NULL;
+ tlmm_suite.irq = -1;
+}
+
+static struct kunit_case tlmm_test_cases[] = {
+ KUNIT_CASE(tlmm_test_silent_rising),
+ KUNIT_CASE(tlmm_test_silent_falling),
+ KUNIT_CASE(tlmm_test_silent_low),
+ KUNIT_CASE(tlmm_test_silent_high),
+ KUNIT_CASE(tlmm_test_rising),
+ KUNIT_CASE(tlmm_test_falling),
+ KUNIT_CASE(tlmm_test_high),
+ KUNIT_CASE(tlmm_test_low),
+ KUNIT_CASE(tlmm_test_rising_in_handler),
+ KUNIT_CASE(tlmm_test_falling_in_handler),
+ KUNIT_CASE(tlmm_test_thread_rising),
+ KUNIT_CASE(tlmm_test_thread_falling),
+ KUNIT_CASE(tlmm_test_thread_high),
+ KUNIT_CASE(tlmm_test_thread_low),
+ KUNIT_CASE(tlmm_test_thread_rising_in_handler),
+ KUNIT_CASE(tlmm_test_thread_falling_in_handler),
+ KUNIT_CASE(tlmm_test_rising_while_disabled),
+ {}
+};
+
+static struct kunit_suite tlmm_test_suite = {
+ .name = "tlmm-test",
+ .init = tlmm_test_init,
+ .suite_init = tlmm_test_init_suite,
+ .suite_exit = tlmm_test_exit_suite,
+ .test_cases = tlmm_test_cases,
+};
+
+kunit_test_suites(&tlmm_test_suite);
+
+MODULE_DESCRIPTION("Qualcomm TLMM test");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
index dd1f8c29d3e7..3b5812963850 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
@@ -246,6 +246,9 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv)
int ret;
chip.label = devm_kasprintf(priv->dev, GFP_KERNEL, "%pOFn", np);
+ if (!chip.label)
+ return -ENOMEM;
+
chip.parent = priv->dev;
chip.ngpio = priv->npins;
@@ -256,6 +259,8 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv)
return ret;
}
+ of_node_put(of_args.np);
+
if ((of_args.args[0] != 0) ||
(of_args.args[1] != 0) ||
(of_args.args[2] != priv->npins)) {
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index ce4a07a3df49..c72e250f4a15 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -318,6 +318,7 @@ struct rzg2l_pinctrl_pin_settings {
* @pmc: PMC registers cache
* @pfc: PFC registers cache
* @iolh: IOLH registers cache
+ * @pupd: PUPD registers cache
* @ien: IEN registers cache
* @sd_ch: SD_CH registers cache
* @eth_poc: ET_POC registers cache
@@ -331,6 +332,7 @@ struct rzg2l_pinctrl_reg_cache {
u32 *pfc;
u32 *iolh[2];
u32 *ien[2];
+ u32 *pupd[2];
u8 sd_ch[2];
u8 eth_poc[2];
u8 eth_mode;
@@ -2712,6 +2714,11 @@ static int rzg2l_pinctrl_reg_cache_alloc(struct rzg2l_pinctrl *pctrl)
if (!cache->ien[i])
return -ENOMEM;
+ cache->pupd[i] = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->pupd[i]),
+ GFP_KERNEL);
+ if (!cache->pupd[i])
+ return -ENOMEM;
+
/* Allocate dedicated cache. */
dedicated_cache->iolh[i] = devm_kcalloc(pctrl->dev, n_dedicated_pins,
sizeof(*dedicated_cache->iolh[i]),
@@ -2756,6 +2763,8 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
if (ret)
return dev_err_probe(pctrl->dev, ret, "Unable to parse gpio-ranges\n");
+ of_node_put(of_args.np);
+
if (of_args.args[0] != 0 || of_args.args[1] != 0 ||
of_args.args[2] != pctrl->data->n_port_pins)
return dev_err_probe(pctrl->dev, -EINVAL,
@@ -2953,7 +2962,7 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
for (u32 port = 0; port < nports; port++) {
- bool has_iolh, has_ien;
+ bool has_iolh, has_ien, has_pupd;
u32 off, caps;
u8 pincnt;
u64 cfg;
@@ -2965,6 +2974,7 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
caps = FIELD_GET(PIN_CFG_MASK, cfg);
has_iolh = !!(caps & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C));
has_ien = !!(caps & PIN_CFG_IEN);
+ has_pupd = !!(caps & PIN_CFG_PUPD);
if (suspend)
RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PFC(off), cache->pfc[port]);
@@ -2983,6 +2993,15 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
}
}
+ if (has_pupd) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PUPD(off),
+ cache->pupd[0][port]);
+ if (pincnt >= 4) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PUPD(off),
+ cache->pupd[1][port]);
+ }
+ }
+
RZG2L_PCTRL_REG_ACCESS16(suspend, pctrl->base + PM(off), cache->pm[port]);
RZG2L_PCTRL_REG_ACCESS8(suspend, pctrl->base + P(off), cache->p[port]);
@@ -3386,6 +3405,7 @@ static struct platform_driver rzg2l_pinctrl_driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(rzg2l_pinctrl_of_table),
.pm = pm_sleep_ptr(&rzg2l_pinctrl_pm_ops),
+ .suppress_bind_attrs = true,
},
.probe = rzg2l_pinctrl_probe,
};
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
index 4062c56619f5..8c7169db4fcc 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -940,6 +940,8 @@ static int rzv2m_gpio_register(struct rzv2m_pinctrl *pctrl)
return ret;
}
+ of_node_put(of_args.np);
+
if (of_args.args[0] != 0 || of_args.args[1] != 0 ||
of_args.args[2] != pctrl->data->n_port_pins) {
dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n");
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index 3ea7106ce5ea..dd07720e32cc 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -41,6 +41,15 @@ static const struct samsung_pin_bank_type exynos5433_bank_type_alive = {
};
/*
+ * Bank type for alive type. Bit fields:
+ * CON: 4, DAT: 1, PUD: 2, DRV: 3
+ */
+static const struct samsung_pin_bank_type exynos7870_bank_type_alive = {
+ .fld_width = { 4, 1, 2, 3, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
+};
+
+/*
* Bank type for non-alive type. Bit fields:
* CON: 4, DAT: 1, PUD: 4, DRV: 4, CONPDN: 2, PUDPDN: 4
*/
@@ -70,6 +79,174 @@ static const struct samsung_pin_bank_type exynos8895_bank_type_off = {
/* Pad retention control code for accessing PMU regmap */
static atomic_t exynos_shared_retention_refcnt;
+/* pin banks of exynos2200 pin-controller - ALIVE */
+static const struct samsung_pin_bank_data exynos2200_pin_banks0[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTW(8, 0x0, "gpa0", 0x00),
+ EXYNOS850_PIN_BANK_EINTW(8, 0x20, "gpa1", 0x04),
+ EXYNOS850_PIN_BANK_EINTW(8, 0x40, "gpa2", 0x08),
+ EXYNOS850_PIN_BANK_EINTW(8, 0x60, "gpa3", 0x0c),
+ EXYNOS850_PIN_BANK_EINTW(2, 0x80, "gpa4", 0x10),
+ EXYNOS_PIN_BANK_EINTN(4, 0xa0, "gpq0"),
+ EXYNOS_PIN_BANK_EINTN(2, 0xc0, "gpq1"),
+ EXYNOS_PIN_BANK_EINTN(2, 0xe0, "gpq2"),
+};
+
+/* pin banks of exynos2200 pin-controller - CMGP */
+static const struct samsung_pin_bank_data exynos2200_pin_banks1[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTW(2, 0x0, "gpm0", 0x00),
+ EXYNOS850_PIN_BANK_EINTW(2, 0x20, "gpm1", 0x04),
+ EXYNOS850_PIN_BANK_EINTW(2, 0x40, "gpm2", 0x08),
+ EXYNOS850_PIN_BANK_EINTW(2, 0x60, "gpm3", 0x0c),
+ EXYNOS850_PIN_BANK_EINTW(2, 0x80, "gpm4", 0x10),
+ EXYNOS850_PIN_BANK_EINTW(2, 0xa0, "gpm5", 0x14),
+ EXYNOS850_PIN_BANK_EINTW(2, 0xc0, "gpm6", 0x18),
+ EXYNOS850_PIN_BANK_EINTW(2, 0xe0, "gpm7", 0x1c),
+ EXYNOS850_PIN_BANK_EINTW(2, 0x100, "gpm8", 0x20),
+ EXYNOS850_PIN_BANK_EINTW(2, 0x120, "gpm9", 0x24),
+ EXYNOS850_PIN_BANK_EINTW(2, 0x140, "gpm10", 0x28),
+ EXYNOS850_PIN_BANK_EINTW(2, 0x160, "gpm11", 0x2c),
+ EXYNOS850_PIN_BANK_EINTW(2, 0x180, "gpm12", 0x30),
+ EXYNOS850_PIN_BANK_EINTW(2, 0x1a0, "gpm13", 0x34),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x1c0, "gpm14", 0x38),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x1e0, "gpm15", 0x3c),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x200, "gpm16", 0x40),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x220, "gpm17", 0x44),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x240, "gpm20", 0x48),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x260, "gpm21", 0x4c),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x280, "gpm22", 0x50),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x2a0, "gpm23", 0x54),
+ EXYNOS850_PIN_BANK_EINTW(1, 0x2c0, "gpm24", 0x58),
+};
+
+/* pin banks of exynos2200 pin-controller - HSI1 */
+static const struct samsung_pin_bank_data exynos2200_pin_banks2[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTG(4, 0x0, "gpf0", 0x00),
+};
+
+/* pin banks of exynos2200 pin-controller - UFS */
+static const struct samsung_pin_bank_data exynos2200_pin_banks3[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTG(7, 0x0, "gpf1", 0x00),
+};
+
+/* pin banks of exynos2200 pin-controller - HSI1UFS */
+static const struct samsung_pin_bank_data exynos2200_pin_banks4[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTG(2, 0x0, "gpf2", 0x00),
+};
+
+/* pin banks of exynos2200 pin-controller - PERIC0 */
+static const struct samsung_pin_bank_data exynos2200_pin_banks5[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTG(4, 0x0, "gpb0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpb1", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x40, "gpb2", 0x08),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x60, "gpb3", 0x0c),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp4", 0x10),
+ EXYNOS850_PIN_BANK_EINTG(2, 0xa0, "gpc0", 0x14),
+ EXYNOS850_PIN_BANK_EINTG(2, 0xc0, "gpc1", 0x18),
+ EXYNOS850_PIN_BANK_EINTG(2, 0xe0, "gpc2", 0x1c),
+ EXYNOS850_PIN_BANK_EINTG(7, 0x100, "gpg1", 0x20),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x120, "gpg2", 0x24),
+};
+
+/* pin banks of exynos2200 pin-controller - PERIC1 */
+static const struct samsung_pin_bank_data exynos2200_pin_banks6[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTG(4, 0x0, "gpp7", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp8", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x40, "gpp9", 0x08),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x60, "gpp10", 0x0c),
+};
+
+/* pin banks of exynos2200 pin-controller - PERIC2 */
+static const struct samsung_pin_bank_data exynos2200_pin_banks7[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTG(4, 0x0, "gpp0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp1", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x40, "gpp2", 0x08),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x60, "gpp3", 0x0c),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp5", 0x10),
+ EXYNOS850_PIN_BANK_EINTG(4, 0xa0, "gpp6", 0x14),
+ EXYNOS850_PIN_BANK_EINTG(4, 0xc0, "gpp11", 0x18),
+ EXYNOS850_PIN_BANK_EINTG(2, 0xe0, "gpc3", 0x1c),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x100, "gpc4", 0x20),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x120, "gpc5", 0x24),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x140, "gpc6", 0x28),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x160, "gpc7", 0x2c),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x180, "gpc8", 0x30),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x1a0, "gpc9", 0x34),
+ EXYNOS850_PIN_BANK_EINTG(5, 0x1c0, "gpg0", 0x38),
+};
+
+/* pin banks of exynos2200 pin-controller - VTS */
+static const struct samsung_pin_bank_data exynos2200_pin_banks8[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTG(7, 0x0, "gpv0", 0x00),
+};
+
+static const struct samsung_pin_ctrl exynos2200_pin_ctrl[] = {
+ {
+ /* pin-controller instance 0 ALIVE data */
+ .pin_banks = exynos2200_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos2200_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 1 CMGP data */
+ .pin_banks = exynos2200_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos2200_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 2 HSI1 data */
+ .pin_banks = exynos2200_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos2200_pin_banks2),
+ }, {
+ /* pin-controller instance 3 UFS data */
+ .pin_banks = exynos2200_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos2200_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 4 HSI1UFS data */
+ .pin_banks = exynos2200_pin_banks4,
+ .nr_banks = ARRAY_SIZE(exynos2200_pin_banks4),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 5 PERIC0 data */
+ .pin_banks = exynos2200_pin_banks5,
+ .nr_banks = ARRAY_SIZE(exynos2200_pin_banks5),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 6 PERIC1 data */
+ .pin_banks = exynos2200_pin_banks6,
+ .nr_banks = ARRAY_SIZE(exynos2200_pin_banks6),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 7 PERIC2 data */
+ .pin_banks = exynos2200_pin_banks7,
+ .nr_banks = ARRAY_SIZE(exynos2200_pin_banks7),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 8 VTS data */
+ .pin_banks = exynos2200_pin_banks8,
+ .nr_banks = ARRAY_SIZE(exynos2200_pin_banks8),
+ },
+};
+
+const struct samsung_pinctrl_of_match_data exynos2200_of_data __initconst = {
+ .ctrl = exynos2200_pin_ctrl,
+ .num_ctrl = ARRAY_SIZE(exynos2200_pin_ctrl),
+};
+
/* pin banks of exynos5433 pin-controller - ALIVE */
static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
/* Must start with EINTG banks, ordered by EINT group number. */
@@ -450,6 +627,136 @@ const struct samsung_pinctrl_of_match_data exynos7_of_data __initconst = {
.num_ctrl = ARRAY_SIZE(exynos7_pin_ctrl),
};
+/* pin banks of exynos7870 pin-controller 0 (ALIVE) */
+static const struct samsung_pin_bank_data exynos7870_pin_banks0[] __initconst = {
+ EXYNOS7870_PIN_BANK_EINTN(6, 0x000, "etc0"),
+ EXYNOS7870_PIN_BANK_EINTN(3, 0x020, "etc1"),
+ EXYNOS7870_PIN_BANK_EINTW(8, 0x040, "gpa0", 0x00),
+ EXYNOS7870_PIN_BANK_EINTW(8, 0x060, "gpa1", 0x04),
+ EXYNOS7870_PIN_BANK_EINTW(8, 0x080, "gpa2", 0x08),
+ EXYNOS7870_PIN_BANK_EINTN(2, 0x0c0, "gpq0"),
+};
+
+/* pin banks of exynos7870 pin-controller 1 (DISPAUD) */
+static const struct samsung_pin_bank_data exynos7870_pin_banks1[] __initconst = {
+ EXYNOS8895_PIN_BANK_EINTG(4, 0x000, "gpz0", 0x00),
+ EXYNOS8895_PIN_BANK_EINTG(6, 0x020, "gpz1", 0x04),
+ EXYNOS8895_PIN_BANK_EINTG(4, 0x040, "gpz2", 0x08),
+};
+
+/* pin banks of exynos7870 pin-controller 2 (ESE) */
+static const struct samsung_pin_bank_data exynos7870_pin_banks2[] __initconst = {
+ EXYNOS8895_PIN_BANK_EINTG(5, 0x000, "gpc7", 0x00),
+};
+
+/* pin banks of exynos7870 pin-controller 3 (FSYS) */
+static const struct samsung_pin_bank_data exynos7870_pin_banks3[] __initconst = {
+ EXYNOS8895_PIN_BANK_EINTG(3, 0x000, "gpr0", 0x00),
+ EXYNOS8895_PIN_BANK_EINTG(8, 0x020, "gpr1", 0x04),
+ EXYNOS8895_PIN_BANK_EINTG(2, 0x040, "gpr2", 0x08),
+ EXYNOS8895_PIN_BANK_EINTG(4, 0x060, "gpr3", 0x0c),
+ EXYNOS8895_PIN_BANK_EINTG(6, 0x080, "gpr4", 0x10),
+};
+
+/* pin banks of exynos7870 pin-controller 4 (MIF) */
+static const struct samsung_pin_bank_data exynos7870_pin_banks4[] __initconst = {
+ EXYNOS8895_PIN_BANK_EINTG(2, 0x000, "gpm0", 0x00),
+};
+
+/* pin banks of exynos7870 pin-controller 5 (NFC) */
+static const struct samsung_pin_bank_data exynos7870_pin_banks5[] __initconst = {
+ EXYNOS8895_PIN_BANK_EINTG(4, 0x000, "gpc2", 0x00),
+};
+
+/* pin banks of exynos7870 pin-controller 6 (TOP) */
+static const struct samsung_pin_bank_data exynos7870_pin_banks6[] __initconst = {
+ EXYNOS8895_PIN_BANK_EINTG(4, 0x000, "gpb0", 0x00),
+ EXYNOS8895_PIN_BANK_EINTG(3, 0x020, "gpc0", 0x04),
+ EXYNOS8895_PIN_BANK_EINTG(4, 0x040, "gpc1", 0x08),
+ EXYNOS8895_PIN_BANK_EINTG(4, 0x060, "gpc4", 0x0c),
+ EXYNOS8895_PIN_BANK_EINTG(2, 0x080, "gpc5", 0x10),
+ EXYNOS8895_PIN_BANK_EINTG(4, 0x0a0, "gpc6", 0x14),
+ EXYNOS8895_PIN_BANK_EINTG(2, 0x0c0, "gpc8", 0x18),
+ EXYNOS8895_PIN_BANK_EINTG(2, 0x0e0, "gpc9", 0x1c),
+ EXYNOS8895_PIN_BANK_EINTG(7, 0x100, "gpd1", 0x20),
+ EXYNOS8895_PIN_BANK_EINTG(6, 0x120, "gpd2", 0x24),
+ EXYNOS8895_PIN_BANK_EINTG(8, 0x140, "gpd3", 0x28),
+ EXYNOS8895_PIN_BANK_EINTG(7, 0x160, "gpd4", 0x2c),
+ EXYNOS8895_PIN_BANK_EINTG(3, 0x1a0, "gpe0", 0x34),
+ EXYNOS8895_PIN_BANK_EINTG(4, 0x1c0, "gpf0", 0x38),
+ EXYNOS8895_PIN_BANK_EINTG(2, 0x1e0, "gpf1", 0x3c),
+ EXYNOS8895_PIN_BANK_EINTG(2, 0x200, "gpf2", 0x40),
+ EXYNOS8895_PIN_BANK_EINTG(4, 0x220, "gpf3", 0x44),
+ EXYNOS8895_PIN_BANK_EINTG(5, 0x240, "gpf4", 0x48),
+};
+
+/* pin banks of exynos7870 pin-controller 7 (TOUCH) */
+static const struct samsung_pin_bank_data exynos7870_pin_banks7[] __initconst = {
+ EXYNOS8895_PIN_BANK_EINTG(3, 0x000, "gpc3", 0x00),
+};
+
+static const struct samsung_pin_ctrl exynos7870_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 Alive data */
+ .pin_banks = exynos7870_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos7870_pin_banks0),
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 1 DISPAUD data */
+ .pin_banks = exynos7870_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos7870_pin_banks1),
+ }, {
+ /* pin-controller instance 2 ESE data */
+ .pin_banks = exynos7870_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos7870_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 3 FSYS data */
+ .pin_banks = exynos7870_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos7870_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 4 MIF data */
+ .pin_banks = exynos7870_pin_banks4,
+ .nr_banks = ARRAY_SIZE(exynos7870_pin_banks4),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 5 NFC data */
+ .pin_banks = exynos7870_pin_banks5,
+ .nr_banks = ARRAY_SIZE(exynos7870_pin_banks5),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 6 TOP data */
+ .pin_banks = exynos7870_pin_banks6,
+ .nr_banks = ARRAY_SIZE(exynos7870_pin_banks6),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 7 TOUCH data */
+ .pin_banks = exynos7870_pin_banks7,
+ .nr_banks = ARRAY_SIZE(exynos7870_pin_banks7),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ },
+};
+
+const struct samsung_pinctrl_of_match_data exynos7870_of_data __initconst = {
+ .ctrl = exynos7870_pin_ctrl,
+ .num_ctrl = ARRAY_SIZE(exynos7870_pin_ctrl),
+};
+
/* pin banks of exynos7885 pin-controller 0 (ALIVE) */
static const struct samsung_pin_bank_data exynos7885_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTN(3, 0x000, "etc0"),
@@ -1370,83 +1677,83 @@ const struct samsung_pinctrl_of_match_data fsd_of_data __initconst = {
/* pin banks of gs101 pin-controller (ALIVE) */
static const struct samsung_pin_bank_data gs101_pin_alive[] = {
- EXYNOS850_PIN_BANK_EINTW(8, 0x0, "gpa0", 0x00),
- EXYNOS850_PIN_BANK_EINTW(7, 0x20, "gpa1", 0x04),
- EXYNOS850_PIN_BANK_EINTW(5, 0x40, "gpa2", 0x08),
- EXYNOS850_PIN_BANK_EINTW(4, 0x60, "gpa3", 0x0c),
- EXYNOS850_PIN_BANK_EINTW(4, 0x80, "gpa4", 0x10),
- EXYNOS850_PIN_BANK_EINTW(7, 0xa0, "gpa5", 0x14),
- EXYNOS850_PIN_BANK_EINTW(8, 0xc0, "gpa9", 0x18),
- EXYNOS850_PIN_BANK_EINTW(2, 0xe0, "gpa10", 0x1c),
+ GS101_PIN_BANK_EINTW(8, 0x0, "gpa0", 0x00, 0x00),
+ GS101_PIN_BANK_EINTW(7, 0x20, "gpa1", 0x04, 0x08),
+ GS101_PIN_BANK_EINTW(5, 0x40, "gpa2", 0x08, 0x10),
+ GS101_PIN_BANK_EINTW(4, 0x60, "gpa3", 0x0c, 0x18),
+ GS101_PIN_BANK_EINTW(4, 0x80, "gpa4", 0x10, 0x1c),
+ GS101_PIN_BANK_EINTW(7, 0xa0, "gpa5", 0x14, 0x20),
+ GS101_PIN_BANK_EINTW(8, 0xc0, "gpa9", 0x18, 0x28),
+ GS101_PIN_BANK_EINTW(2, 0xe0, "gpa10", 0x1c, 0x30),
};
/* pin banks of gs101 pin-controller (FAR_ALIVE) */
static const struct samsung_pin_bank_data gs101_pin_far_alive[] = {
- EXYNOS850_PIN_BANK_EINTW(8, 0x0, "gpa6", 0x00),
- EXYNOS850_PIN_BANK_EINTW(4, 0x20, "gpa7", 0x04),
- EXYNOS850_PIN_BANK_EINTW(8, 0x40, "gpa8", 0x08),
- EXYNOS850_PIN_BANK_EINTW(2, 0x60, "gpa11", 0x0c),
+ GS101_PIN_BANK_EINTW(8, 0x0, "gpa6", 0x00, 0x00),
+ GS101_PIN_BANK_EINTW(4, 0x20, "gpa7", 0x04, 0x08),
+ GS101_PIN_BANK_EINTW(8, 0x40, "gpa8", 0x08, 0x0c),
+ GS101_PIN_BANK_EINTW(2, 0x60, "gpa11", 0x0c, 0x14),
};
/* pin banks of gs101 pin-controller (GSACORE) */
static const struct samsung_pin_bank_data gs101_pin_gsacore[] = {
- EXYNOS850_PIN_BANK_EINTG(2, 0x0, "gps0", 0x00),
- EXYNOS850_PIN_BANK_EINTG(8, 0x20, "gps1", 0x04),
- EXYNOS850_PIN_BANK_EINTG(3, 0x40, "gps2", 0x08),
+ GS101_PIN_BANK_EINTG(2, 0x0, "gps0", 0x00, 0x00),
+ GS101_PIN_BANK_EINTG(8, 0x20, "gps1", 0x04, 0x04),
+ GS101_PIN_BANK_EINTG(3, 0x40, "gps2", 0x08, 0x0c),
};
/* pin banks of gs101 pin-controller (GSACTRL) */
static const struct samsung_pin_bank_data gs101_pin_gsactrl[] = {
- EXYNOS850_PIN_BANK_EINTW(6, 0x0, "gps3", 0x00),
+ GS101_PIN_BANK_EINTW(6, 0x0, "gps3", 0x00, 0x00),
};
/* pin banks of gs101 pin-controller (PERIC0) */
static const struct samsung_pin_bank_data gs101_pin_peric0[] = {
- EXYNOS850_PIN_BANK_EINTG(5, 0x0, "gpp0", 0x00),
- EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp1", 0x04),
- EXYNOS850_PIN_BANK_EINTG(4, 0x40, "gpp2", 0x08),
- EXYNOS850_PIN_BANK_EINTG(2, 0x60, "gpp3", 0x0c),
- EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp4", 0x10),
- EXYNOS850_PIN_BANK_EINTG(2, 0xa0, "gpp5", 0x14),
- EXYNOS850_PIN_BANK_EINTG(4, 0xc0, "gpp6", 0x18),
- EXYNOS850_PIN_BANK_EINTG(2, 0xe0, "gpp7", 0x1c),
- EXYNOS850_PIN_BANK_EINTG(4, 0x100, "gpp8", 0x20),
- EXYNOS850_PIN_BANK_EINTG(2, 0x120, "gpp9", 0x24),
- EXYNOS850_PIN_BANK_EINTG(4, 0x140, "gpp10", 0x28),
- EXYNOS850_PIN_BANK_EINTG(2, 0x160, "gpp11", 0x2c),
- EXYNOS850_PIN_BANK_EINTG(4, 0x180, "gpp12", 0x30),
- EXYNOS850_PIN_BANK_EINTG(2, 0x1a0, "gpp13", 0x34),
- EXYNOS850_PIN_BANK_EINTG(4, 0x1c0, "gpp14", 0x38),
- EXYNOS850_PIN_BANK_EINTG(2, 0x1e0, "gpp15", 0x3c),
- EXYNOS850_PIN_BANK_EINTG(4, 0x200, "gpp16", 0x40),
- EXYNOS850_PIN_BANK_EINTG(2, 0x220, "gpp17", 0x44),
- EXYNOS850_PIN_BANK_EINTG(4, 0x240, "gpp18", 0x48),
- EXYNOS850_PIN_BANK_EINTG(4, 0x260, "gpp19", 0x4c),
+ GS101_PIN_BANK_EINTG(5, 0x0, "gpp0", 0x00, 0x00),
+ GS101_PIN_BANK_EINTG(4, 0x20, "gpp1", 0x04, 0x08),
+ GS101_PIN_BANK_EINTG(4, 0x40, "gpp2", 0x08, 0x0c),
+ GS101_PIN_BANK_EINTG(2, 0x60, "gpp3", 0x0c, 0x10),
+ GS101_PIN_BANK_EINTG(4, 0x80, "gpp4", 0x10, 0x14),
+ GS101_PIN_BANK_EINTG(2, 0xa0, "gpp5", 0x14, 0x18),
+ GS101_PIN_BANK_EINTG(4, 0xc0, "gpp6", 0x18, 0x1c),
+ GS101_PIN_BANK_EINTG(2, 0xe0, "gpp7", 0x1c, 0x20),
+ GS101_PIN_BANK_EINTG(4, 0x100, "gpp8", 0x20, 0x24),
+ GS101_PIN_BANK_EINTG(2, 0x120, "gpp9", 0x24, 0x28),
+ GS101_PIN_BANK_EINTG(4, 0x140, "gpp10", 0x28, 0x2c),
+ GS101_PIN_BANK_EINTG(2, 0x160, "gpp11", 0x2c, 0x30),
+ GS101_PIN_BANK_EINTG(4, 0x180, "gpp12", 0x30, 0x34),
+ GS101_PIN_BANK_EINTG(2, 0x1a0, "gpp13", 0x34, 0x38),
+ GS101_PIN_BANK_EINTG(4, 0x1c0, "gpp14", 0x38, 0x3c),
+ GS101_PIN_BANK_EINTG(2, 0x1e0, "gpp15", 0x3c, 0x40),
+ GS101_PIN_BANK_EINTG(4, 0x200, "gpp16", 0x40, 0x44),
+ GS101_PIN_BANK_EINTG(2, 0x220, "gpp17", 0x44, 0x48),
+ GS101_PIN_BANK_EINTG(4, 0x240, "gpp18", 0x48, 0x4c),
+ GS101_PIN_BANK_EINTG(4, 0x260, "gpp19", 0x4c, 0x50),
};
/* pin banks of gs101 pin-controller (PERIC1) */
static const struct samsung_pin_bank_data gs101_pin_peric1[] = {
- EXYNOS850_PIN_BANK_EINTG(8, 0x0, "gpp20", 0x00),
- EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp21", 0x04),
- EXYNOS850_PIN_BANK_EINTG(2, 0x40, "gpp22", 0x08),
- EXYNOS850_PIN_BANK_EINTG(8, 0x60, "gpp23", 0x0c),
- EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp24", 0x10),
- EXYNOS850_PIN_BANK_EINTG(4, 0xa0, "gpp25", 0x14),
- EXYNOS850_PIN_BANK_EINTG(5, 0xc0, "gpp26", 0x18),
- EXYNOS850_PIN_BANK_EINTG(4, 0xe0, "gpp27", 0x1c),
+ GS101_PIN_BANK_EINTG(8, 0x0, "gpp20", 0x00, 0x00),
+ GS101_PIN_BANK_EINTG(4, 0x20, "gpp21", 0x04, 0x08),
+ GS101_PIN_BANK_EINTG(2, 0x40, "gpp22", 0x08, 0x0c),
+ GS101_PIN_BANK_EINTG(8, 0x60, "gpp23", 0x0c, 0x10),
+ GS101_PIN_BANK_EINTG(4, 0x80, "gpp24", 0x10, 0x18),
+ GS101_PIN_BANK_EINTG(4, 0xa0, "gpp25", 0x14, 0x1c),
+ GS101_PIN_BANK_EINTG(5, 0xc0, "gpp26", 0x18, 0x20),
+ GS101_PIN_BANK_EINTG(4, 0xe0, "gpp27", 0x1c, 0x28),
};
/* pin banks of gs101 pin-controller (HSI1) */
static const struct samsung_pin_bank_data gs101_pin_hsi1[] = {
- EXYNOS850_PIN_BANK_EINTG(6, 0x0, "gph0", 0x00),
- EXYNOS850_PIN_BANK_EINTG(7, 0x20, "gph1", 0x04),
+ GS101_PIN_BANK_EINTG(6, 0x0, "gph0", 0x00, 0x00),
+ GS101_PIN_BANK_EINTG(7, 0x20, "gph1", 0x04, 0x08),
};
/* pin banks of gs101 pin-controller (HSI2) */
static const struct samsung_pin_bank_data gs101_pin_hsi2[] = {
- EXYNOS850_PIN_BANK_EINTG(6, 0x0, "gph2", 0x00),
- EXYNOS850_PIN_BANK_EINTG(2, 0x20, "gph3", 0x04),
- EXYNOS850_PIN_BANK_EINTG(6, 0x40, "gph4", 0x08),
+ GS101_PIN_BANK_EINTG(6, 0x0, "gph2", 0x00, 0x00),
+ GS101_PIN_BANK_EINTG(2, 0x20, "gph3", 0x04, 0x08),
+ GS101_PIN_BANK_EINTG(6, 0x40, "gph4", 0x08, 0x0c),
};
static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = {
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index 7b7ff7ffeb56..b483270ddc53 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -112,6 +112,25 @@
.pctl_res_idx = pctl_idx, \
} \
+#define EXYNOS7870_PIN_BANK_EINTN(pins, reg, id) \
+ { \
+ .type = &exynos7870_bank_type_alive, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_NONE, \
+ .name = id \
+ }
+
+#define EXYNOS7870_PIN_BANK_EINTW(pins, reg, id, offs) \
+ { \
+ .type = &exynos7870_bank_type_alive, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_WKUP, \
+ .eint_offset = offs, \
+ .name = id \
+ }
+
#define EXYNOS850_PIN_BANK_EINTN(pins, reg, id) \
{ \
.type = &exynos850_bank_type_alive, \
@@ -175,6 +194,28 @@
.name = id \
}
+#define GS101_PIN_BANK_EINTG(pins, reg, id, offs, fltcon_offs) \
+ { \
+ .type = &exynos850_bank_type_off, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_GPIO, \
+ .eint_offset = offs, \
+ .eint_fltcon_offset = fltcon_offs, \
+ .name = id \
+ }
+
+#define GS101_PIN_BANK_EINTW(pins, reg, id, offs, fltcon_offs) \
+ { \
+ .type = &exynos850_bank_type_alive, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_WKUP, \
+ .eint_offset = offs, \
+ .eint_fltcon_offset = fltcon_offs, \
+ .name = id \
+ }
+
/**
* struct exynos_weint_data: irq specific data for all the wakeup interrupts
* generated by the external wakeup interrupt controller.
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index cfced7afd4ca..2896eb2de2c0 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1230,6 +1230,7 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
bank->eint_con_offset = bdata->eint_con_offset;
bank->eint_mask_offset = bdata->eint_mask_offset;
bank->eint_pend_offset = bdata->eint_pend_offset;
+ bank->eint_fltcon_offset = bdata->eint_fltcon_offset;
bank->name = bdata->name;
raw_spin_lock_init(&bank->slock);
@@ -1469,10 +1470,14 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
#ifdef CONFIG_PINCTRL_EXYNOS_ARM64
{ .compatible = "google,gs101-pinctrl",
.data = &gs101_of_data },
+ { .compatible = "samsung,exynos2200-pinctrl",
+ .data = &exynos2200_of_data },
{ .compatible = "samsung,exynos5433-pinctrl",
.data = &exynos5433_of_data },
{ .compatible = "samsung,exynos7-pinctrl",
.data = &exynos7_of_data },
+ { .compatible = "samsung,exynos7870-pinctrl",
+ .data = &exynos7870_of_data },
{ .compatible = "samsung,exynos7885-pinctrl",
.data = &exynos7885_of_data },
{ .compatible = "samsung,exynos850-pinctrl",
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index bb0689d52ea0..3cf758df7d69 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -144,6 +144,7 @@ struct samsung_pin_bank_type {
* @eint_con_offset: ExynosAuto SoC-specific EINT control register offset of bank.
* @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank.
* @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank.
+ * @eint_fltcon_offset: GS101 SoC-specific EINT filter config register offset.
* @name: name to be prefixed for each pin in this pin bank.
*/
struct samsung_pin_bank_data {
@@ -158,6 +159,7 @@ struct samsung_pin_bank_data {
u32 eint_con_offset;
u32 eint_mask_offset;
u32 eint_pend_offset;
+ u32 eint_fltcon_offset;
const char *name;
};
@@ -175,6 +177,7 @@ struct samsung_pin_bank_data {
* @eint_con_offset: ExynosAuto SoC-specific EINT register or interrupt offset of bank.
* @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank.
* @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank.
+ * @eint_fltcon_offset: GS101 SoC-specific EINT filter config register offset.
* @name: name to be prefixed for each pin in this pin bank.
* @id: id of the bank, propagated to the pin range.
* @pin_base: starting pin number of the bank.
@@ -201,6 +204,7 @@ struct samsung_pin_bank {
u32 eint_con_offset;
u32 eint_mask_offset;
u32 eint_pend_offset;
+ u32 eint_fltcon_offset;
const char *name;
u32 id;
@@ -373,6 +377,7 @@ struct samsung_pmx_func {
};
/* list of all exported SoC specific data */
+extern const struct samsung_pinctrl_of_match_data exynos2200_of_data;
extern const struct samsung_pinctrl_of_match_data exynos3250_of_data;
extern const struct samsung_pinctrl_of_match_data exynos4210_of_data;
extern const struct samsung_pinctrl_of_match_data exynos4x12_of_data;
@@ -382,6 +387,7 @@ extern const struct samsung_pinctrl_of_match_data exynos5410_of_data;
extern const struct samsung_pinctrl_of_match_data exynos5420_of_data;
extern const struct samsung_pinctrl_of_match_data exynos5433_of_data;
extern const struct samsung_pinctrl_of_match_data exynos7_of_data;
+extern const struct samsung_pinctrl_of_match_data exynos7870_of_data;
extern const struct samsung_pinctrl_of_match_data exynos7885_of_data;
extern const struct samsung_pinctrl_of_match_data exynos850_of_data;
extern const struct samsung_pinctrl_of_match_data exynos8895_of_data;
diff --git a/drivers/pinctrl/sophgo/Kconfig b/drivers/pinctrl/sophgo/Kconfig
index c05f909a8838..9b0a3a4753b1 100644
--- a/drivers/pinctrl/sophgo/Kconfig
+++ b/drivers/pinctrl/sophgo/Kconfig
@@ -3,17 +3,21 @@
# Sophgo SoC PINCTRL drivers
#
-config PINCTRL_SOPHGO_CV18XX
- bool
+config PINCTRL_SOPHGO_COMMON
+ tristate
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
+config PINCTRL_SOPHGO_CV18XX_OPS
+ bool
+
config PINCTRL_SOPHGO_CV1800B
tristate "Sophgo CV1800B SoC Pinctrl driver"
depends on ARCH_SOPHGO || COMPILE_TEST
depends on OF
- select PINCTRL_SOPHGO_CV18XX
+ select PINCTRL_SOPHGO_COMMON
+ select PINCTRL_SOPHGO_CV18XX_OPS
help
Say Y to select the pinctrl driver for CV1800B SoC.
This pin controller allows selecting the mux function for
@@ -24,7 +28,8 @@ config PINCTRL_SOPHGO_CV1812H
tristate "Sophgo CV1812H SoC Pinctrl driver"
depends on ARCH_SOPHGO || COMPILE_TEST
depends on OF
- select PINCTRL_SOPHGO_CV18XX
+ select PINCTRL_SOPHGO_COMMON
+ select PINCTRL_SOPHGO_CV18XX_OPS
help
Say Y to select the pinctrl driver for CV1812H SoC.
This pin controller allows selecting the mux function for
@@ -35,7 +40,8 @@ config PINCTRL_SOPHGO_SG2000
tristate "Sophgo SG2000 SoC Pinctrl driver"
depends on ARCH_SOPHGO || COMPILE_TEST
depends on OF
- select PINCTRL_SOPHGO_CV18XX
+ select PINCTRL_SOPHGO_COMMON
+ select PINCTRL_SOPHGO_CV18XX_OPS
help
Say Y to select the pinctrl driver for SG2000 SoC.
This pin controller allows selecting the mux function for
@@ -46,9 +52,37 @@ config PINCTRL_SOPHGO_SG2002
tristate "Sophgo SG2002 SoC Pinctrl driver"
depends on ARCH_SOPHGO || COMPILE_TEST
depends on OF
- select PINCTRL_SOPHGO_CV18XX
+ select PINCTRL_SOPHGO_COMMON
+ select PINCTRL_SOPHGO_CV18XX_OPS
help
Say Y to select the pinctrl driver for SG2002 SoC.
This pin controller allows selecting the mux function for
each pin. This driver can also be built as a module called
pinctrl-sg2002.
+
+config PINCTRL_SOPHGO_SG2042_OPS
+ bool
+
+config PINCTRL_SOPHGO_SG2042
+ tristate "Sophgo SG2042 SoC Pinctrl driver"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on OF
+ select PINCTRL_SOPHGO_COMMON
+ select PINCTRL_SOPHGO_SG2042_OPS
+ help
+ Say Y to select the pinctrl driver for SG2042 SoC.
+ This pin controller allows selecting the mux function for
+ each pin. This driver can also be built as a module called
+ pinctrl-sg2042.
+
+config PINCTRL_SOPHGO_SG2044
+ tristate "Sophgo SG2044 SoC Pinctrl driver"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on OF
+ select PINCTRL_SOPHGO_COMMON
+ select PINCTRL_SOPHGO_SG2042_OPS
+ help
+ Say Y to select the pinctrl driver for SG2044 SoC.
+ This pin controller allows selecting the mux function for
+ each pin. This driver can also be built as a module called
+ pinctrl-sg2044.
diff --git a/drivers/pinctrl/sophgo/Makefile b/drivers/pinctrl/sophgo/Makefile
index 4113a5c9191b..479c65e6dbc0 100644
--- a/drivers/pinctrl/sophgo/Makefile
+++ b/drivers/pinctrl/sophgo/Makefile
@@ -1,7 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PINCTRL_SOPHGO_CV18XX) += pinctrl-cv18xx.o
+obj-$(CONFIG_PINCTRL_SOPHGO_COMMON) += pinctrl-sophgo.o
+pinctrl-sophgo-objs += pinctrl-sophgo-common.o
+pinctrl-sophgo-$(CONFIG_PINCTRL_SOPHGO_CV18XX_OPS) += pinctrl-cv18xx.o
+pinctrl-sophgo-$(CONFIG_PINCTRL_SOPHGO_SG2042_OPS) += pinctrl-sg2042-ops.o
+
obj-$(CONFIG_PINCTRL_SOPHGO_CV1800B) += pinctrl-cv1800b.o
obj-$(CONFIG_PINCTRL_SOPHGO_CV1812H) += pinctrl-cv1812h.o
obj-$(CONFIG_PINCTRL_SOPHGO_SG2000) += pinctrl-sg2000.o
obj-$(CONFIG_PINCTRL_SOPHGO_SG2002) += pinctrl-sg2002.o
+obj-$(CONFIG_PINCTRL_SOPHGO_SG2042) += pinctrl-sg2042.o
+obj-$(CONFIG_PINCTRL_SOPHGO_SG2044) += pinctrl-sg2044.o
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv1800b.c b/drivers/pinctrl/sophgo/pinctrl-cv1800b.c
index 3322906689e7..82db60212477 100644
--- a/drivers/pinctrl/sophgo/pinctrl-cv1800b.c
+++ b/drivers/pinctrl/sophgo/pinctrl-cv1800b.c
@@ -34,8 +34,9 @@ static const char *const cv1800b_power_domain_desc[] = {
[VDDIO_SD0_SPI] = "VDDIO_SD0_SPI",
};
-static int cv1800b_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
+static int cv1800b_get_pull_up(const struct sophgo_pin *sp, const u32 *psmap)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
u32 pstate = psmap[pin->power_domain];
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
@@ -54,8 +55,9 @@ static int cv1800b_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
return -ENOTSUPP;
}
-static int cv1800b_get_pull_down(struct cv1800_pin *pin, const u32 *psmap)
+static int cv1800b_get_pull_down(const struct sophgo_pin *sp, const u32 *psmap)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
u32 pstate = psmap[pin->power_domain];
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
@@ -108,9 +110,10 @@ static const u32 cv1800b_eth_oc_map[] = {
17800
};
-static int cv1800b_get_oc_map(struct cv1800_pin *pin, const u32 *psmap,
+static int cv1800b_get_oc_map(const struct sophgo_pin *sp, const u32 *psmap,
const u32 **map)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
u32 pstate = psmap[pin->power_domain];
@@ -153,9 +156,10 @@ static const u32 cv1800b_18od33_3v3_schmitt_map[] = {
1100000
};
-static int cv1800b_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
+static int cv1800b_get_schmitt_map(const struct sophgo_pin *sp, const u32 *psmap,
const u32 **map)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
u32 pstate = psmap[pin->power_domain];
@@ -177,11 +181,11 @@ static int cv1800b_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
return -ENOTSUPP;
}
-static const struct cv1800_vddio_cfg_ops cv1800b_vddio_cfg_ops = {
+static const struct sophgo_vddio_cfg_ops cv1800b_vddio_cfg_ops = {
.get_pull_up = cv1800b_get_pull_up,
.get_pull_down = cv1800b_get_pull_down,
.get_oc_map = cv1800b_get_oc_map,
- .get_schmitt_map = cv1800b_get_schmitt_map,
+ .get_schmitt_map = cv1800b_get_schmitt_map,
};
static const struct pinctrl_pin_desc cv1800b_pins[] = {
@@ -433,13 +437,18 @@ static const struct cv1800_pin cv1800b_pin_data[ARRAY_SIZE(cv1800b_pins)] = {
CV1800_PINCONF_AREA_SYS, 0x120, 5),
};
-static const struct cv1800_pinctrl_data cv1800b_pindata = {
+static const struct sophgo_pinctrl_data cv1800b_pindata = {
.pins = cv1800b_pins,
.pindata = cv1800b_pin_data,
.pdnames = cv1800b_power_domain_desc,
.vddio_ops = &cv1800b_vddio_cfg_ops,
+ .cfg_ops = &cv1800_cfg_ops,
+ .pctl_ops = &cv1800_pctrl_ops,
+ .pmx_ops = &cv1800_pmx_ops,
+ .pconf_ops = &cv1800_pconf_ops,
.npins = ARRAY_SIZE(cv1800b_pins),
- .npd = ARRAY_SIZE(cv1800b_power_domain_desc),
+ .npds = ARRAY_SIZE(cv1800b_power_domain_desc),
+ .pinsize = sizeof(struct cv1800_pin),
};
static const struct of_device_id cv1800b_pinctrl_ids[] = {
@@ -449,7 +458,7 @@ static const struct of_device_id cv1800b_pinctrl_ids[] = {
MODULE_DEVICE_TABLE(of, cv1800b_pinctrl_ids);
static struct platform_driver cv1800b_pinctrl_driver = {
- .probe = cv1800_pinctrl_probe,
+ .probe = sophgo_pinctrl_probe,
.driver = {
.name = "cv1800b-pinctrl",
.suppress_bind_attrs = true,
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv1812h.c b/drivers/pinctrl/sophgo/pinctrl-cv1812h.c
index 5632290b46fa..881a4d7ef589 100644
--- a/drivers/pinctrl/sophgo/pinctrl-cv1812h.c
+++ b/drivers/pinctrl/sophgo/pinctrl-cv1812h.c
@@ -40,8 +40,9 @@ static const char *const cv1812h_power_domain_desc[] = {
[VDDIO_VIVO] = "VDDIO_VIVO",
};
-static int cv1812h_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
+static int cv1812h_get_pull_up(const struct sophgo_pin *sp, const u32 *psmap)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
u32 pstate = psmap[pin->power_domain];
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
@@ -60,8 +61,9 @@ static int cv1812h_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
return -ENOTSUPP;
}
-static int cv1812h_get_pull_down(struct cv1800_pin *pin, const u32 *psmap)
+static int cv1812h_get_pull_down(const struct sophgo_pin *sp, const u32 *psmap)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
u32 pstate = psmap[pin->power_domain];
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
@@ -114,9 +116,10 @@ static const u32 cv1812h_eth_oc_map[] = {
17800
};
-static int cv1812h_get_oc_map(struct cv1800_pin *pin, const u32 *psmap,
+static int cv1812h_get_oc_map(const struct sophgo_pin *sp, const u32 *psmap,
const u32 **map)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
u32 pstate = psmap[pin->power_domain];
@@ -159,9 +162,10 @@ static const u32 cv1812h_18od33_3v3_schmitt_map[] = {
1100000
};
-static int cv1812h_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
+static int cv1812h_get_schmitt_map(const struct sophgo_pin *sp, const u32 *psmap,
const u32 **map)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
u32 pstate = psmap[pin->power_domain];
@@ -183,11 +187,11 @@ static int cv1812h_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
return -ENOTSUPP;
}
-static const struct cv1800_vddio_cfg_ops cv1812h_vddio_cfg_ops = {
+static const struct sophgo_vddio_cfg_ops cv1812h_vddio_cfg_ops = {
.get_pull_up = cv1812h_get_pull_up,
.get_pull_down = cv1812h_get_pull_down,
.get_oc_map = cv1812h_get_oc_map,
- .get_schmitt_map = cv1812h_get_schmitt_map,
+ .get_schmitt_map = cv1812h_get_schmitt_map,
};
static const struct pinctrl_pin_desc cv1812h_pins[] = {
@@ -742,13 +746,18 @@ static const struct cv1800_pin cv1812h_pin_data[ARRAY_SIZE(cv1812h_pins)] = {
CV1800_PINCONF_AREA_RTC, 0x028),
};
-static const struct cv1800_pinctrl_data cv1812h_pindata = {
+static const struct sophgo_pinctrl_data cv1812h_pindata = {
.pins = cv1812h_pins,
.pindata = cv1812h_pin_data,
.pdnames = cv1812h_power_domain_desc,
.vddio_ops = &cv1812h_vddio_cfg_ops,
+ .cfg_ops = &cv1800_cfg_ops,
+ .pctl_ops = &cv1800_pctrl_ops,
+ .pmx_ops = &cv1800_pmx_ops,
+ .pconf_ops = &cv1800_pconf_ops,
.npins = ARRAY_SIZE(cv1812h_pins),
- .npd = ARRAY_SIZE(cv1812h_power_domain_desc),
+ .npds = ARRAY_SIZE(cv1812h_power_domain_desc),
+ .pinsize = sizeof(struct cv1800_pin),
};
static const struct of_device_id cv1812h_pinctrl_ids[] = {
@@ -758,7 +767,7 @@ static const struct of_device_id cv1812h_pinctrl_ids[] = {
MODULE_DEVICE_TABLE(of, cv1812h_pinctrl_ids);
static struct platform_driver cv1812h_pinctrl_driver = {
- .probe = cv1800_pinctrl_probe,
+ .probe = sophgo_pinctrl_probe,
.driver = {
.name = "cv1812h-pinctrl",
.suppress_bind_attrs = true,
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv18xx.c b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c
index 57f2674e75d6..c3a2dcf71f2a 100644
--- a/drivers/pinctrl/sophgo/pinctrl-cv18xx.c
+++ b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c
@@ -15,8 +15,6 @@
#include <linux/seq_file.h>
#include <linux/spinlock.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
@@ -24,35 +22,15 @@
#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
-#include "../core.h"
#include "../pinctrl-utils.h"
-#include "../pinconf.h"
#include "../pinmux.h"
#include "pinctrl-cv18xx.h"
-struct cv1800_pinctrl {
- struct device *dev;
- struct pinctrl_dev *pctl_dev;
- const struct cv1800_pinctrl_data *data;
- struct pinctrl_desc pdesc;
+struct cv1800_priv {
u32 *power_cfg;
-
- struct mutex mutex;
- raw_spinlock_t lock;
-
void __iomem *regs[2];
};
-struct cv1800_pin_mux_config {
- struct cv1800_pin *pin;
- u32 config;
-};
-
-static unsigned int cv1800_dt_get_pin(u32 value)
-{
- return value & GENMASK(15, 0);
-}
-
static unsigned int cv1800_dt_get_pin_mux(u32 value)
{
return (value >> 16) & GENMASK(7, 0);
@@ -66,40 +44,28 @@ static unsigned int cv1800_dt_get_pin_mux2(u32 value)
#define cv1800_pinctrl_get_component_addr(pctrl, _comp) \
((pctrl)->regs[(_comp)->area] + (_comp)->offset)
-static int cv1800_cmp_pin(const void *key, const void *pivot)
-{
- const struct cv1800_pin *pin = pivot;
- int pin_id = (long)key;
- int pivid = pin->pin;
-
- return pin_id - pivid;
-}
-
-static int cv1800_set_power_cfg(struct cv1800_pinctrl *pctrl,
+static int cv1800_set_power_cfg(struct sophgo_pinctrl *pctrl,
u8 domain, u32 cfg)
{
- if (domain >= pctrl->data->npd)
+ struct cv1800_priv *priv = pctrl->priv_ctrl;
+
+ if (domain >= pctrl->data->npds)
return -ENOTSUPP;
- if (pctrl->power_cfg[domain] && pctrl->power_cfg[domain] != cfg)
+ if (priv->power_cfg[domain] && priv->power_cfg[domain] != cfg)
return -EINVAL;
- pctrl->power_cfg[domain] = cfg;
+ priv->power_cfg[domain] = cfg;
return 0;
}
-static int cv1800_get_power_cfg(struct cv1800_pinctrl *pctrl,
+static int cv1800_get_power_cfg(struct sophgo_pinctrl *pctrl,
u8 domain)
{
- return pctrl->power_cfg[domain];
-}
+ struct cv1800_priv *priv = pctrl->priv_ctrl;
-static struct cv1800_pin *cv1800_get_pin(struct cv1800_pinctrl *pctrl,
- unsigned long pin)
-{
- return bsearch((void *)pin, pctrl->data->pindata, pctrl->data->npins,
- sizeof(struct cv1800_pin), cv1800_cmp_pin);
+ return priv->power_cfg[domain];
}
#define PIN_BGA_ID_OFFSET 8
@@ -112,7 +78,7 @@ static const char *const io_type_desc[] = {
"ETH"
};
-static const char *cv1800_get_power_cfg_desc(struct cv1800_pinctrl *pctrl,
+static const char *cv1800_get_power_cfg_desc(struct sophgo_pinctrl *pctrl,
u8 domain)
{
return pctrl->data->pdnames[domain];
@@ -121,53 +87,57 @@ static const char *cv1800_get_power_cfg_desc(struct cv1800_pinctrl *pctrl,
static void cv1800_pctrl_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *seq, unsigned int pin_id)
{
- struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
+ struct sophgo_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct cv1800_priv *priv = pctrl->priv_ctrl;
+ const struct sophgo_pin *sp = sophgo_get_pin(pctrl, pin_id);
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pin_hwid = pin->pin.id;
u32 value;
void __iomem *reg;
- if (pin->pin >> PIN_BGA_ID_OFFSET)
+ if (pin_hwid >> PIN_BGA_ID_OFFSET)
seq_printf(seq, "pos: %c%u ",
- 'A' + (pin->pin >> PIN_BGA_ID_OFFSET) - 1,
- pin->pin & PIN_BGA_ID_MASK);
+ 'A' + (pin_hwid >> PIN_BGA_ID_OFFSET) - 1,
+ pin_hwid & PIN_BGA_ID_MASK);
else
- seq_printf(seq, "pos: %u ", pin->pin);
+ seq_printf(seq, "pos: %u ", pin_hwid);
seq_printf(seq, "power-domain: %s ",
cv1800_get_power_cfg_desc(pctrl, pin->power_domain));
seq_printf(seq, "type: %s ", io_type_desc[type]);
- reg = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux);
+ reg = cv1800_pinctrl_get_component_addr(priv, &pin->mux);
value = readl(reg);
seq_printf(seq, "mux: 0x%08x ", value);
- if (pin->flags & CV1800_PIN_HAVE_MUX2) {
- reg = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux2);
+ if (pin->pin.flags & CV1800_PIN_HAVE_MUX2) {
+ reg = cv1800_pinctrl_get_component_addr(priv, &pin->mux2);
value = readl(reg);
seq_printf(seq, "mux2: 0x%08x ", value);
}
if (type == IO_TYPE_1V8_ONLY || type == IO_TYPE_1V8_OR_3V3) {
- reg = cv1800_pinctrl_get_component_addr(pctrl, &pin->conf);
+ reg = cv1800_pinctrl_get_component_addr(priv, &pin->conf);
value = readl(reg);
seq_printf(seq, "conf: 0x%08x ", value);
}
}
-static int cv1800_verify_pinmux_config(const struct cv1800_pin_mux_config *config)
+static int cv1800_verify_pinmux_config(const struct sophgo_pin_mux_config *config)
{
+ struct cv1800_pin *pin = sophgo_to_cv1800_pin(config->pin);
unsigned int mux = cv1800_dt_get_pin_mux(config->config);
unsigned int mux2 = cv1800_dt_get_pin_mux2(config->config);
- if (mux > config->pin->mux.max)
+ if (mux > pin->mux.max)
return -EINVAL;
- if (config->pin->flags & CV1800_PIN_HAVE_MUX2) {
- if (mux != config->pin->mux2.pfunc)
+ if (pin->pin.flags & CV1800_PIN_HAVE_MUX2) {
+ if (mux != pin->mux2.pfunc)
return -EINVAL;
- if (mux2 > config->pin->mux2.max)
+ if (mux2 > pin->mux2.max)
return -EINVAL;
} else {
if (mux2 != PIN_MUX_INVALD)
@@ -177,9 +147,10 @@ static int cv1800_verify_pinmux_config(const struct cv1800_pin_mux_config *confi
return 0;
}
-static int cv1800_verify_pin_group(const struct cv1800_pin_mux_config *mux,
- unsigned long npins)
+static int cv1800_verify_pin_group(const struct sophgo_pin_mux_config *mux,
+ unsigned int npins)
{
+ struct cv1800_pin *pin;
enum cv1800_pin_io_type type;
u8 power_domain;
int i;
@@ -187,232 +158,78 @@ static int cv1800_verify_pin_group(const struct cv1800_pin_mux_config *mux,
if (npins == 1)
return 0;
- type = cv1800_pin_io_type(mux[0].pin);
- power_domain = mux[0].pin->power_domain;
+ pin = sophgo_to_cv1800_pin(mux[0].pin);
+ type = cv1800_pin_io_type(pin);
+ power_domain = pin->power_domain;
for (i = 0; i < npins; i++) {
- if (type != cv1800_pin_io_type(mux[i].pin) ||
- power_domain != mux[i].pin->power_domain)
+ pin = sophgo_to_cv1800_pin(mux[i].pin);
+
+ if (type != cv1800_pin_io_type(pin) ||
+ power_domain != pin->power_domain)
return -ENOTSUPP;
}
return 0;
}
-static int cv1800_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np,
- struct pinctrl_map **maps,
- unsigned int *num_maps)
+static int cv1800_dt_node_to_map_post(struct device_node *cur,
+ struct sophgo_pinctrl *pctrl,
+ struct sophgo_pin_mux_config *pinmuxs,
+ unsigned int npins)
{
- struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- struct device *dev = pctrl->dev;
- struct device_node *child;
- struct pinctrl_map *map;
- const char **grpnames;
- const char *grpname;
- int ngroups = 0;
- int nmaps = 0;
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(pinmuxs[0].pin);
+ u32 power;
int ret;
- for_each_available_child_of_node(np, child)
- ngroups += 1;
-
- grpnames = devm_kcalloc(dev, ngroups, sizeof(*grpnames), GFP_KERNEL);
- if (!grpnames)
- return -ENOMEM;
-
- map = kcalloc(ngroups * 2, sizeof(*map), GFP_KERNEL);
- if (!map)
- return -ENOMEM;
-
- ngroups = 0;
- mutex_lock(&pctrl->mutex);
- for_each_available_child_of_node(np, child) {
- int npins = of_property_count_u32_elems(child, "pinmux");
- unsigned int *pins;
- struct cv1800_pin_mux_config *pinmuxs;
- u32 config, power;
- int i;
-
- if (npins < 1) {
- dev_err(dev, "invalid pinctrl group %pOFn.%pOFn\n",
- np, child);
- ret = -EINVAL;
- goto dt_failed;
- }
-
- grpname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn.%pOFn",
- np, child);
- if (!grpname) {
- ret = -ENOMEM;
- goto dt_failed;
- }
-
- grpnames[ngroups++] = grpname;
-
- pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL);
- if (!pins) {
- ret = -ENOMEM;
- goto dt_failed;
- }
-
- pinmuxs = devm_kcalloc(dev, npins, sizeof(*pinmuxs), GFP_KERNEL);
- if (!pinmuxs) {
- ret = -ENOMEM;
- goto dt_failed;
- }
-
- for (i = 0; i < npins; i++) {
- ret = of_property_read_u32_index(child, "pinmux",
- i, &config);
- if (ret)
- goto dt_failed;
-
- pins[i] = cv1800_dt_get_pin(config);
- pinmuxs[i].config = config;
- pinmuxs[i].pin = cv1800_get_pin(pctrl, pins[i]);
-
- if (!pinmuxs[i].pin) {
- dev_err(dev, "failed to get pin %d\n", pins[i]);
- ret = -ENODEV;
- goto dt_failed;
- }
-
- ret = cv1800_verify_pinmux_config(&pinmuxs[i]);
- if (ret) {
- dev_err(dev, "group %s pin %d is invalid\n",
- grpname, i);
- goto dt_failed;
- }
- }
-
- ret = cv1800_verify_pin_group(pinmuxs, npins);
- if (ret) {
- dev_err(dev, "group %s is invalid\n", grpname);
- goto dt_failed;
- }
-
- ret = of_property_read_u32(child, "power-source", &power);
- if (ret)
- goto dt_failed;
-
- if (!(power == PIN_POWER_STATE_3V3 || power == PIN_POWER_STATE_1V8)) {
- dev_err(dev, "group %s have unsupported power: %u\n",
- grpname, power);
- ret = -ENOTSUPP;
- goto dt_failed;
- }
-
- ret = cv1800_set_power_cfg(pctrl, pinmuxs[0].pin->power_domain,
- power);
- if (ret)
- goto dt_failed;
-
- map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
- map[nmaps].data.mux.function = np->name;
- map[nmaps].data.mux.group = grpname;
- nmaps += 1;
-
- ret = pinconf_generic_parse_dt_config(child, pctldev,
- &map[nmaps].data.configs.configs,
- &map[nmaps].data.configs.num_configs);
- if (ret) {
- dev_err(dev, "failed to parse pin config of group %s: %d\n",
- grpname, ret);
- goto dt_failed;
- }
-
- ret = pinctrl_generic_add_group(pctldev, grpname,
- pins, npins, pinmuxs);
- if (ret < 0) {
- dev_err(dev, "failed to add group %s: %d\n", grpname, ret);
- goto dt_failed;
- }
-
- /* don't create a map if there are no pinconf settings */
- if (map[nmaps].data.configs.num_configs == 0)
- continue;
-
- map[nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
- map[nmaps].data.configs.group_or_pin = grpname;
- nmaps += 1;
- }
-
- ret = pinmux_generic_add_function(pctldev, np->name,
- grpnames, ngroups, NULL);
- if (ret < 0) {
- dev_err(dev, "error adding function %s: %d\n", np->name, ret);
- goto function_failed;
- }
-
- *maps = map;
- *num_maps = nmaps;
- mutex_unlock(&pctrl->mutex);
+ ret = of_property_read_u32(cur, "power-source", &power);
+ if (ret)
+ return ret;
- return 0;
+ if (!(power == PIN_POWER_STATE_3V3 || power == PIN_POWER_STATE_1V8))
+ return -ENOTSUPP;
-dt_failed:
- of_node_put(child);
-function_failed:
- pinctrl_utils_free_map(pctldev, map, nmaps);
- mutex_unlock(&pctrl->mutex);
- return ret;
+ return cv1800_set_power_cfg(pctrl, pin->power_domain, power);
}
-static const struct pinctrl_ops cv1800_pctrl_ops = {
+const struct pinctrl_ops cv1800_pctrl_ops = {
.get_groups_count = pinctrl_generic_get_group_count,
.get_group_name = pinctrl_generic_get_group_name,
.get_group_pins = pinctrl_generic_get_group_pins,
.pin_dbg_show = cv1800_pctrl_dbg_show,
- .dt_node_to_map = cv1800_pctrl_dt_node_to_map,
+ .dt_node_to_map = sophgo_pctrl_dt_node_to_map,
.dt_free_map = pinctrl_utils_free_map,
};
+EXPORT_SYMBOL_GPL(cv1800_pctrl_ops);
-static int cv1800_pmx_set_mux(struct pinctrl_dev *pctldev,
- unsigned int fsel, unsigned int gsel)
+static void cv1800_set_pinmux_config(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *sp, u32 config)
{
- struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- const struct group_desc *group;
- const struct cv1800_pin_mux_config *configs;
- unsigned int i;
-
- group = pinctrl_generic_get_group(pctldev, gsel);
- if (!group)
- return -EINVAL;
-
- configs = group->data;
-
- for (i = 0; i < group->grp.npins; i++) {
- const struct cv1800_pin *pin = configs[i].pin;
- u32 value = configs[i].config;
- void __iomem *reg_mux;
- void __iomem *reg_mux2;
- unsigned long flags;
- u32 mux;
- u32 mux2;
-
- reg_mux = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux);
- reg_mux2 = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux2);
- mux = cv1800_dt_get_pin_mux(value);
- mux2 = cv1800_dt_get_pin_mux2(value);
-
- raw_spin_lock_irqsave(&pctrl->lock, flags);
- writel_relaxed(mux, reg_mux);
- if (mux2 != PIN_MUX_INVALD)
- writel_relaxed(mux2, reg_mux2);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
-
- return 0;
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
+ struct cv1800_priv *priv = pctrl->priv_ctrl;
+ void __iomem *reg_mux;
+ void __iomem *reg_mux2;
+ u32 mux;
+ u32 mux2;
+
+ reg_mux = cv1800_pinctrl_get_component_addr(priv, &pin->mux);
+ reg_mux2 = cv1800_pinctrl_get_component_addr(priv, &pin->mux2);
+ mux = cv1800_dt_get_pin_mux(config);
+ mux2 = cv1800_dt_get_pin_mux2(config);
+
+ writel_relaxed(mux, reg_mux);
+ if (mux2 != PIN_MUX_INVALD)
+ writel_relaxed(mux2, reg_mux2);
}
-static const struct pinmux_ops cv1800_pmx_ops = {
+const struct pinmux_ops cv1800_pmx_ops = {
.get_functions_count = pinmux_generic_get_function_count,
.get_function_name = pinmux_generic_get_function_name,
.get_function_groups = pinmux_generic_get_function_groups,
- .set_mux = cv1800_pmx_set_mux,
+ .set_mux = sophgo_pmx_set_mux,
.strict = true,
};
+EXPORT_SYMBOL_GPL(cv1800_pmx_ops);
#define PIN_IO_PULLUP BIT(2)
#define PIN_IO_PULLDOWN BIT(3)
@@ -421,94 +238,14 @@ static const struct pinmux_ops cv1800_pmx_ops = {
#define PIN_IO_BUS_HOLD BIT(10)
#define PIN_IO_OUT_FAST_SLEW BIT(11)
-static u32 cv1800_pull_down_typical_resistor(struct cv1800_pinctrl *pctrl,
- struct cv1800_pin *pin)
-{
- return pctrl->data->vddio_ops->get_pull_down(pin, pctrl->power_cfg);
-}
-
-static u32 cv1800_pull_up_typical_resistor(struct cv1800_pinctrl *pctrl,
- struct cv1800_pin *pin)
-{
- return pctrl->data->vddio_ops->get_pull_up(pin, pctrl->power_cfg);
-}
-
-static int cv1800_pinctrl_oc2reg(struct cv1800_pinctrl *pctrl,
- struct cv1800_pin *pin, u32 target)
-{
- const u32 *map;
- int i, len;
-
- len = pctrl->data->vddio_ops->get_oc_map(pin, pctrl->power_cfg, &map);
- if (len < 0)
- return len;
-
- for (i = 0; i < len; i++) {
- if (map[i] >= target)
- return i;
- }
-
- return -EINVAL;
-}
-
-static int cv1800_pinctrl_reg2oc(struct cv1800_pinctrl *pctrl,
- struct cv1800_pin *pin, u32 reg)
-{
- const u32 *map;
- int len;
-
- len = pctrl->data->vddio_ops->get_oc_map(pin, pctrl->power_cfg, &map);
- if (len < 0)
- return len;
-
- if (reg >= len)
- return -EINVAL;
-
- return map[reg];
-}
-
-static int cv1800_pinctrl_schmitt2reg(struct cv1800_pinctrl *pctrl,
- struct cv1800_pin *pin, u32 target)
-{
- const u32 *map;
- int i, len;
-
- len = pctrl->data->vddio_ops->get_schmitt_map(pin, pctrl->power_cfg,
- &map);
- if (len < 0)
- return len;
-
- for (i = 0; i < len; i++) {
- if (map[i] == target)
- return i;
- }
-
- return -EINVAL;
-}
-
-static int cv1800_pinctrl_reg2schmitt(struct cv1800_pinctrl *pctrl,
- struct cv1800_pin *pin, u32 reg)
-{
- const u32 *map;
- int len;
-
- len = pctrl->data->vddio_ops->get_schmitt_map(pin, pctrl->power_cfg,
- &map);
- if (len < 0)
- return len;
-
- if (reg >= len)
- return -EINVAL;
-
- return map[reg];
-}
-
static int cv1800_pconf_get(struct pinctrl_dev *pctldev,
unsigned int pin_id, unsigned long *config)
{
- struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct sophgo_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct cv1800_priv *priv = pctrl->priv_ctrl;
int param = pinconf_to_config_param(*config);
- struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
+ const struct sophgo_pin *sp = sophgo_get_pin(pctrl, pin_id);
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
enum cv1800_pin_io_type type;
u32 value;
u32 arg;
@@ -522,28 +259,28 @@ static int cv1800_pconf_get(struct pinctrl_dev *pctldev,
if (type == IO_TYPE_ETH || type == IO_TYPE_AUDIO)
return -ENOTSUPP;
- value = readl(cv1800_pinctrl_get_component_addr(pctrl, &pin->conf));
+ value = readl(cv1800_pinctrl_get_component_addr(priv, &pin->conf));
switch (param) {
case PIN_CONFIG_BIAS_PULL_DOWN:
enabled = FIELD_GET(PIN_IO_PULLDOWN, value);
- arg = cv1800_pull_down_typical_resistor(pctrl, pin);
+ arg = sophgo_pinctrl_typical_pull_down(pctrl, sp, priv->power_cfg);
break;
case PIN_CONFIG_BIAS_PULL_UP:
enabled = FIELD_GET(PIN_IO_PULLUP, value);
- arg = cv1800_pull_up_typical_resistor(pctrl, pin);
+ arg = sophgo_pinctrl_typical_pull_up(pctrl, sp, priv->power_cfg);
break;
case PIN_CONFIG_DRIVE_STRENGTH_UA:
enabled = true;
arg = FIELD_GET(PIN_IO_DRIVE, value);
- ret = cv1800_pinctrl_reg2oc(pctrl, pin, arg);
+ ret = sophgo_pinctrl_reg2oc(pctrl, sp, priv->power_cfg, arg);
if (ret < 0)
return ret;
arg = ret;
break;
case PIN_CONFIG_INPUT_SCHMITT_UV:
arg = FIELD_GET(PIN_IO_SCHMITT, value);
- ret = cv1800_pinctrl_reg2schmitt(pctrl, pin, arg);
+ ret = sophgo_pinctrl_reg2schmitt(pctrl, sp, priv->power_cfg, arg);
if (ret < 0)
return ret;
arg = ret;
@@ -570,14 +307,16 @@ static int cv1800_pconf_get(struct pinctrl_dev *pctldev,
return enabled ? 0 : -EINVAL;
}
-static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
- struct cv1800_pin *pin,
+static int cv1800_pinconf_compute_config(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *sp,
unsigned long *configs,
unsigned int num_configs,
- u32 *value)
+ u32 *value, u32 *mask)
{
+ struct cv1800_priv *priv = pctrl->priv_ctrl;
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
int i;
- u32 v = 0;
+ u32 v = 0, m = 0;
enum cv1800_pin_io_type type;
int ret;
@@ -596,24 +335,30 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
case PIN_CONFIG_BIAS_PULL_DOWN:
v &= ~PIN_IO_PULLDOWN;
v |= FIELD_PREP(PIN_IO_PULLDOWN, arg);
+ m |= PIN_IO_PULLDOWN;
break;
case PIN_CONFIG_BIAS_PULL_UP:
v &= ~PIN_IO_PULLUP;
v |= FIELD_PREP(PIN_IO_PULLUP, arg);
+ m |= PIN_IO_PULLUP;
break;
case PIN_CONFIG_DRIVE_STRENGTH_UA:
- ret = cv1800_pinctrl_oc2reg(pctrl, pin, arg);
+ ret = sophgo_pinctrl_oc2reg(pctrl, sp,
+ priv->power_cfg, arg);
if (ret < 0)
return ret;
v &= ~PIN_IO_DRIVE;
v |= FIELD_PREP(PIN_IO_DRIVE, ret);
+ m |= PIN_IO_DRIVE;
break;
case PIN_CONFIG_INPUT_SCHMITT_UV:
- ret = cv1800_pinctrl_schmitt2reg(pctrl, pin, arg);
+ ret = sophgo_pinctrl_schmitt2reg(pctrl, sp,
+ priv->power_cfg, arg);
if (ret < 0)
return ret;
v &= ~PIN_IO_SCHMITT;
v |= FIELD_PREP(PIN_IO_SCHMITT, ret);
+ m |= PIN_IO_SCHMITT;
break;
case PIN_CONFIG_POWER_SOURCE:
/* Ignore power source as it is always fixed */
@@ -621,10 +366,12 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
case PIN_CONFIG_SLEW_RATE:
v &= ~PIN_IO_OUT_FAST_SLEW;
v |= FIELD_PREP(PIN_IO_OUT_FAST_SLEW, arg);
+ m |= PIN_IO_OUT_FAST_SLEW;
break;
case PIN_CONFIG_BIAS_BUS_HOLD:
v &= ~PIN_IO_BUS_HOLD;
v |= FIELD_PREP(PIN_IO_BUS_HOLD, arg);
+ m |= PIN_IO_BUS_HOLD;
break;
default:
return -ENOTSUPP;
@@ -632,134 +379,73 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
}
*value = v;
+ *mask = m;
return 0;
}
-static int cv1800_pin_set_config(struct cv1800_pinctrl *pctrl,
- unsigned int pin_id,
- u32 value)
+static int cv1800_set_pinconf_config(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *sp,
+ u32 value, u32 mask)
{
- struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
- unsigned long flags;
+ struct cv1800_priv *priv = pctrl->priv_ctrl;
+ struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
void __iomem *addr;
+ u32 reg;
- if (!pin)
- return -EINVAL;
-
- addr = cv1800_pinctrl_get_component_addr(pctrl, &pin->conf);
+ addr = cv1800_pinctrl_get_component_addr(priv, &pin->conf);
- raw_spin_lock_irqsave(&pctrl->lock, flags);
- writel(value, addr);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ reg = readl(addr);
+ reg &= ~mask;
+ reg |= value;
+ writel(reg, addr);
return 0;
}
-static int cv1800_pconf_set(struct pinctrl_dev *pctldev,
- unsigned int pin_id, unsigned long *configs,
- unsigned int num_configs)
-{
- struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
- u32 value;
-
- if (!pin)
- return -ENODEV;
-
- if (cv1800_pinconf_compute_config(pctrl, pin,
- configs, num_configs, &value))
- return -ENOTSUPP;
-
- return cv1800_pin_set_config(pctrl, pin_id, value);
-}
-
-static int cv1800_pconf_group_set(struct pinctrl_dev *pctldev,
- unsigned int gsel,
- unsigned long *configs,
- unsigned int num_configs)
-{
- struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- const struct group_desc *group;
- const struct cv1800_pin_mux_config *pinmuxs;
- u32 value;
- int i;
-
- group = pinctrl_generic_get_group(pctldev, gsel);
- if (!group)
- return -EINVAL;
-
- pinmuxs = group->data;
-
- if (cv1800_pinconf_compute_config(pctrl, pinmuxs[0].pin,
- configs, num_configs, &value))
- return -ENOTSUPP;
-
- for (i = 0; i < group->grp.npins; i++)
- cv1800_pin_set_config(pctrl, group->grp.pins[i], value);
-
- return 0;
-}
-
-static const struct pinconf_ops cv1800_pconf_ops = {
+const struct pinconf_ops cv1800_pconf_ops = {
.pin_config_get = cv1800_pconf_get,
- .pin_config_set = cv1800_pconf_set,
- .pin_config_group_set = cv1800_pconf_group_set,
+ .pin_config_set = sophgo_pconf_set,
+ .pin_config_group_set = sophgo_pconf_group_set,
.is_generic = true,
};
+EXPORT_SYMBOL_GPL(cv1800_pconf_ops);
-int cv1800_pinctrl_probe(struct platform_device *pdev)
+static int cv1800_pinctrl_init(struct platform_device *pdev,
+ struct sophgo_pinctrl *pctrl)
{
- struct device *dev = &pdev->dev;
- struct cv1800_pinctrl *pctrl;
- const struct cv1800_pinctrl_data *pctrl_data;
- int ret;
-
- pctrl_data = device_get_match_data(dev);
- if (!pctrl_data)
- return -ENODEV;
+ const struct sophgo_pinctrl_data *pctrl_data = pctrl->data;
+ struct cv1800_priv *priv;
- if (pctrl_data->npins == 0 || pctrl_data->npd == 0)
- return dev_err_probe(dev, -EINVAL, "invalid pin data\n");
-
- pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
- if (!pctrl)
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct cv1800_priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- pctrl->power_cfg = devm_kcalloc(dev, pctrl_data->npd,
- sizeof(u32), GFP_KERNEL);
- if (!pctrl->power_cfg)
+ priv->power_cfg = devm_kcalloc(&pdev->dev, pctrl_data->npds,
+ sizeof(u32), GFP_KERNEL);
+ if (!priv->power_cfg)
return -ENOMEM;
- pctrl->regs[0] = devm_platform_ioremap_resource_byname(pdev, "sys");
- if (IS_ERR(pctrl->regs[0]))
- return PTR_ERR(pctrl->regs[0]);
-
- pctrl->regs[1] = devm_platform_ioremap_resource_byname(pdev, "rtc");
- if (IS_ERR(pctrl->regs[1]))
- return PTR_ERR(pctrl->regs[1]);
-
- pctrl->pdesc.name = dev_name(dev);
- pctrl->pdesc.pins = pctrl_data->pins;
- pctrl->pdesc.npins = pctrl_data->npins;
- pctrl->pdesc.pctlops = &cv1800_pctrl_ops;
- pctrl->pdesc.pmxops = &cv1800_pmx_ops;
- pctrl->pdesc.confops = &cv1800_pconf_ops;
- pctrl->pdesc.owner = THIS_MODULE;
-
- pctrl->data = pctrl_data;
- pctrl->dev = dev;
- raw_spin_lock_init(&pctrl->lock);
- mutex_init(&pctrl->mutex);
+ priv->regs[0] = devm_platform_ioremap_resource_byname(pdev, "sys");
+ if (IS_ERR(priv->regs[0]))
+ return PTR_ERR(priv->regs[0]);
- platform_set_drvdata(pdev, pctrl);
+ priv->regs[1] = devm_platform_ioremap_resource_byname(pdev, "rtc");
+ if (IS_ERR(priv->regs[1]))
+ return PTR_ERR(priv->regs[1]);
- ret = devm_pinctrl_register_and_init(dev, &pctrl->pdesc,
- pctrl, &pctrl->pctl_dev);
- if (ret)
- return dev_err_probe(dev, ret,
- "fail to register pinctrl driver\n");
+ pctrl->priv_ctrl = priv;
- return pinctrl_enable(pctrl->pctl_dev);
+ return 0;
}
-EXPORT_SYMBOL_GPL(cv1800_pinctrl_probe);
+
+const struct sophgo_cfg_ops cv1800_cfg_ops = {
+ .pctrl_init = cv1800_pinctrl_init,
+ .verify_pinmux_config = cv1800_verify_pinmux_config,
+ .verify_pin_group = cv1800_verify_pin_group,
+ .dt_node_to_map_post = cv1800_dt_node_to_map_post,
+ .compute_pinconf_config = cv1800_pinconf_compute_config,
+ .set_pinconf_config = cv1800_set_pinconf_config,
+ .set_pinmux_config = cv1800_set_pinmux_config,
+};
+EXPORT_SYMBOL_GPL(cv1800_cfg_ops);
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv18xx.h b/drivers/pinctrl/sophgo/pinctrl-cv18xx.h
index 1a9998abb3b7..759c0e604acf 100644
--- a/drivers/pinctrl/sophgo/pinctrl-cv18xx.h
+++ b/drivers/pinctrl/sophgo/pinctrl-cv18xx.h
@@ -8,13 +8,14 @@
#include <linux/bits.h>
#include <linux/bitfield.h>
-#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf.h>
+#include "pinctrl-sophgo.h"
+
enum cv1800_pin_io_type {
IO_TYPE_1V8_ONLY = 0,
IO_TYPE_1V8_OR_3V3 = 1,
@@ -49,58 +50,37 @@ struct cv1800_pinconf {
#define CV1800_PIN_FLAG_IO_TYPE(type) \
FIELD_PREP_CONST(CV1800_PIN_IO_TYPE, type)
struct cv1800_pin {
- u16 pin;
- u16 flags;
+ struct sophgo_pin pin;
u8 power_domain;
struct cv1800_pinmux mux;
struct cv1800_pinmux2 mux2;
struct cv1800_pinconf conf;
};
+#define sophgo_to_cv1800_pin(_pin) \
+ container_of((_pin), struct cv1800_pin, pin)
+
#define PIN_POWER_STATE_1V8 1800
#define PIN_POWER_STATE_3V3 3300
-/**
- * struct cv1800_vddio_cfg_ops - pin vddio operations
- *
- * @get_pull_up: get resistor for pull up;
- * @get_pull_down: get resistor for pull down.
- * @get_oc_map: get mapping for typical low level output current value to
- * register value map.
- * @get_schmitt_map: get mapping for register value to typical schmitt
- * threshold.
- */
-struct cv1800_vddio_cfg_ops {
- int (*get_pull_up)(struct cv1800_pin *pin, const u32 *psmap);
- int (*get_pull_down)(struct cv1800_pin *pin, const u32 *psmap);
- int (*get_oc_map)(struct cv1800_pin *pin, const u32 *psmap,
- const u32 **map);
- int (*get_schmitt_map)(struct cv1800_pin *pin, const u32 *psmap,
- const u32 **map);
-};
-
-struct cv1800_pinctrl_data {
- const struct pinctrl_pin_desc *pins;
- const struct cv1800_pin *pindata;
- const char * const *pdnames;
- const struct cv1800_vddio_cfg_ops *vddio_ops;
- u16 npins;
- u16 npd;
-};
-
-static inline enum cv1800_pin_io_type cv1800_pin_io_type(struct cv1800_pin *pin)
+static inline enum cv1800_pin_io_type cv1800_pin_io_type(const struct cv1800_pin *pin)
{
- return FIELD_GET(CV1800_PIN_IO_TYPE, pin->flags);
+ return FIELD_GET(CV1800_PIN_IO_TYPE, pin->pin.flags);
};
-int cv1800_pinctrl_probe(struct platform_device *pdev);
+extern const struct pinctrl_ops cv1800_pctrl_ops;
+extern const struct pinmux_ops cv1800_pmx_ops;
+extern const struct pinconf_ops cv1800_pconf_ops;
+extern const struct sophgo_cfg_ops cv1800_cfg_ops;
#define CV1800_FUNC_PIN(_id, _power_domain, _type, \
_mux_area, _mux_offset, _mux_func_max) \
{ \
- .pin = (_id), \
+ .pin = { \
+ .id = (_id), \
+ .flags = CV1800_PIN_FLAG_IO_TYPE(_type), \
+ }, \
.power_domain = (_power_domain), \
- .flags = CV1800_PIN_FLAG_IO_TYPE(_type), \
.mux = { \
.area = (_mux_area), \
.offset = (_mux_offset), \
@@ -112,9 +92,11 @@ int cv1800_pinctrl_probe(struct platform_device *pdev);
_mux_area, _mux_offset, _mux_func_max, \
_conf_area, _conf_offset) \
{ \
- .pin = (_id), \
+ .pin = { \
+ .id = (_id), \
+ .flags = CV1800_PIN_FLAG_IO_TYPE(_type), \
+ }, \
.power_domain = (_power_domain), \
- .flags = CV1800_PIN_FLAG_IO_TYPE(_type), \
.mux = { \
.area = (_mux_area), \
.offset = (_mux_offset), \
@@ -132,10 +114,12 @@ int cv1800_pinctrl_probe(struct platform_device *pdev);
_mux2_func_max, \
_conf_area, _conf_offset) \
{ \
- .pin = (_id), \
+ .pin = { \
+ .id = (_id), \
+ .flags = CV1800_PIN_FLAG_IO_TYPE(_type) | \
+ CV1800_PIN_HAVE_MUX2, \
+ }, \
.power_domain = (_power_domain), \
- .flags = CV1800_PIN_FLAG_IO_TYPE(_type) | \
- CV1800_PIN_HAVE_MUX2, \
.mux = { \
.area = (_mux_area), \
.offset = (_mux_offset), \
diff --git a/drivers/pinctrl/sophgo/pinctrl-sg2000.c b/drivers/pinctrl/sophgo/pinctrl-sg2000.c
index 63c05b4dd68f..0e303d2fa104 100644
--- a/drivers/pinctrl/sophgo/pinctrl-sg2000.c
+++ b/drivers/pinctrl/sophgo/pinctrl-sg2000.c
@@ -40,8 +40,9 @@ static const char *const sg2000_power_domain_desc[] = {
[VDDIO_VIVO] = "VDDIO_VIVO",
};
-static int sg2000_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
+static int sg2000_get_pull_up(const struct sophgo_pin *sp, const u32 *psmap)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
u32 pstate = psmap[pin->power_domain];
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
@@ -60,8 +61,9 @@ static int sg2000_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
return -ENOTSUPP;
}
-static int sg2000_get_pull_down(struct cv1800_pin *pin, const u32 *psmap)
+static int sg2000_get_pull_down(const struct sophgo_pin *sp, const u32 *psmap)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
u32 pstate = psmap[pin->power_domain];
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
@@ -114,9 +116,10 @@ static const u32 sg2000_eth_oc_map[] = {
17800
};
-static int sg2000_get_oc_map(struct cv1800_pin *pin, const u32 *psmap,
+static int sg2000_get_oc_map(const struct sophgo_pin *sp, const u32 *psmap,
const u32 **map)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
u32 pstate = psmap[pin->power_domain];
@@ -159,9 +162,10 @@ static const u32 sg2000_18od33_3v3_schmitt_map[] = {
1100000
};
-static int sg2000_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
+static int sg2000_get_schmitt_map(const struct sophgo_pin *sp, const u32 *psmap,
const u32 **map)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
u32 pstate = psmap[pin->power_domain];
@@ -183,11 +187,11 @@ static int sg2000_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
return -ENOTSUPP;
}
-static const struct cv1800_vddio_cfg_ops sg2000_vddio_cfg_ops = {
+static const struct sophgo_vddio_cfg_ops sg2000_vddio_cfg_ops = {
.get_pull_up = sg2000_get_pull_up,
.get_pull_down = sg2000_get_pull_down,
.get_oc_map = sg2000_get_oc_map,
- .get_schmitt_map = sg2000_get_schmitt_map,
+ .get_schmitt_map = sg2000_get_schmitt_map,
};
static const struct pinctrl_pin_desc sg2000_pins[] = {
@@ -742,13 +746,18 @@ static const struct cv1800_pin sg2000_pin_data[ARRAY_SIZE(sg2000_pins)] = {
CV1800_PINCONF_AREA_RTC, 0x028),
};
-static const struct cv1800_pinctrl_data sg2000_pindata = {
+static const struct sophgo_pinctrl_data sg2000_pindata = {
.pins = sg2000_pins,
.pindata = sg2000_pin_data,
.pdnames = sg2000_power_domain_desc,
.vddio_ops = &sg2000_vddio_cfg_ops,
+ .cfg_ops = &cv1800_cfg_ops,
+ .pctl_ops = &cv1800_pctrl_ops,
+ .pmx_ops = &cv1800_pmx_ops,
+ .pconf_ops = &cv1800_pconf_ops,
.npins = ARRAY_SIZE(sg2000_pins),
- .npd = ARRAY_SIZE(sg2000_power_domain_desc),
+ .npds = ARRAY_SIZE(sg2000_power_domain_desc),
+ .pinsize = sizeof(struct cv1800_pin),
};
static const struct of_device_id sg2000_pinctrl_ids[] = {
@@ -758,7 +767,7 @@ static const struct of_device_id sg2000_pinctrl_ids[] = {
MODULE_DEVICE_TABLE(of, sg2000_pinctrl_ids);
static struct platform_driver sg2000_pinctrl_driver = {
- .probe = cv1800_pinctrl_probe,
+ .probe = sophgo_pinctrl_probe,
.driver = {
.name = "sg2000-pinctrl",
.suppress_bind_attrs = true,
diff --git a/drivers/pinctrl/sophgo/pinctrl-sg2002.c b/drivers/pinctrl/sophgo/pinctrl-sg2002.c
index 5c49208dcb59..2443fff1bc00 100644
--- a/drivers/pinctrl/sophgo/pinctrl-sg2002.c
+++ b/drivers/pinctrl/sophgo/pinctrl-sg2002.c
@@ -34,8 +34,9 @@ static const char *const sg2002_power_domain_desc[] = {
[VDDIO_SD1] = "VDDIO_SD1",
};
-static int sg2002_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
+static int sg2002_get_pull_up(const struct sophgo_pin *sp, const u32 *psmap)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
u32 pstate = psmap[pin->power_domain];
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
@@ -54,8 +55,9 @@ static int sg2002_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
return -ENOTSUPP;
}
-static int sg2002_get_pull_down(struct cv1800_pin *pin, const u32 *psmap)
+static int sg2002_get_pull_down(const struct sophgo_pin *sp, const u32 *psmap)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
u32 pstate = psmap[pin->power_domain];
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
@@ -108,9 +110,10 @@ static const u32 sg2002_eth_oc_map[] = {
17800
};
-static int sg2002_get_oc_map(struct cv1800_pin *pin, const u32 *psmap,
+static int sg2002_get_oc_map(const struct sophgo_pin *sp, const u32 *psmap,
const u32 **map)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
u32 pstate = psmap[pin->power_domain];
@@ -153,9 +156,10 @@ static const u32 sg2002_18od33_3v3_schmitt_map[] = {
1100000
};
-static int sg2002_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
+static int sg2002_get_schmitt_map(const struct sophgo_pin *sp, const u32 *psmap,
const u32 **map)
{
+ const struct cv1800_pin *pin = sophgo_to_cv1800_pin(sp);
enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
u32 pstate = psmap[pin->power_domain];
@@ -177,11 +181,11 @@ static int sg2002_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
return -ENOTSUPP;
}
-static const struct cv1800_vddio_cfg_ops sg2002_vddio_cfg_ops = {
+static const struct sophgo_vddio_cfg_ops sg2002_vddio_cfg_ops = {
.get_pull_up = sg2002_get_pull_up,
.get_pull_down = sg2002_get_pull_down,
.get_oc_map = sg2002_get_oc_map,
- .get_schmitt_map = sg2002_get_schmitt_map,
+ .get_schmitt_map = sg2002_get_schmitt_map,
};
static const struct pinctrl_pin_desc sg2002_pins[] = {
@@ -513,13 +517,18 @@ static const struct cv1800_pin sg2002_pin_data[ARRAY_SIZE(sg2002_pins)] = {
CV1800_PINCONF_AREA_SYS, 0xc84),
};
-static const struct cv1800_pinctrl_data sg2002_pindata = {
+static const struct sophgo_pinctrl_data sg2002_pindata = {
.pins = sg2002_pins,
.pindata = sg2002_pin_data,
.pdnames = sg2002_power_domain_desc,
.vddio_ops = &sg2002_vddio_cfg_ops,
+ .cfg_ops = &cv1800_cfg_ops,
+ .pctl_ops = &cv1800_pctrl_ops,
+ .pmx_ops = &cv1800_pmx_ops,
+ .pconf_ops = &cv1800_pconf_ops,
.npins = ARRAY_SIZE(sg2002_pins),
- .npd = ARRAY_SIZE(sg2002_power_domain_desc),
+ .npds = ARRAY_SIZE(sg2002_power_domain_desc),
+ .pinsize = sizeof(struct cv1800_pin),
};
static const struct of_device_id sg2002_pinctrl_ids[] = {
@@ -529,7 +538,7 @@ static const struct of_device_id sg2002_pinctrl_ids[] = {
MODULE_DEVICE_TABLE(of, sg2002_pinctrl_ids);
static struct platform_driver sg2002_pinctrl_driver = {
- .probe = cv1800_pinctrl_probe,
+ .probe = sophgo_pinctrl_probe,
.driver = {
.name = "sg2002-pinctrl",
.suppress_bind_attrs = true,
diff --git a/drivers/pinctrl/sophgo/pinctrl-sg2042-ops.c b/drivers/pinctrl/sophgo/pinctrl-sg2042-ops.c
new file mode 100644
index 000000000000..0526aa3f8438
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-sg2042-ops.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo sg2042 SoCs pinctrl driver.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "../pinctrl-utils.h"
+#include "../pinmux.h"
+
+#include "pinctrl-sg2042.h"
+
+#define PIN_IO_PULL_ONE_ENABLE BIT(0)
+#define PIN_IO_PULL_DIR_UP (BIT(1) | PIN_IO_PULL_ONE_ENABLE)
+#define PIN_IO_PULL_DIR_DOWN (0 | PIN_IO_PULL_ONE_ENABLE)
+#define PIN_IO_PULL_ONE_MASK GENMASK(1, 0)
+
+#define PIN_IO_PULL_UP BIT(2)
+#define PIN_IO_PULL_UP_DONW BIT(3)
+#define PIN_IO_PULL_UP_MASK GENMASK(3, 2)
+
+#define PIN_IO_MUX GENMASK(5, 4)
+#define PIN_IO_DRIVE GENMASK(9, 6)
+#define PIN_IO_SCHMITT_ENABLE BIT(10)
+#define PIN_IO_OUTPUT_ENABLE BIT(11)
+
+struct sg2042_priv {
+ void __iomem *regs;
+};
+
+static u8 sg2042_dt_get_pin_mux(u32 value)
+{
+ return value >> 16;
+}
+
+static inline u32 sg2042_get_pin_reg(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *sp)
+{
+ struct sg2042_priv *priv = pctrl->priv_ctrl;
+ const struct sg2042_pin *pin = sophgo_to_sg2042_pin(sp);
+ void __iomem *reg = priv->regs + pin->offset;
+
+ if (sp->flags & PIN_FLAG_WRITE_HIGH)
+ return readl(reg) >> 16;
+ else
+ return readl(reg) & 0xffff;
+}
+
+static int sg2042_set_pin_reg(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *sp,
+ u32 value, u32 mask)
+{
+ struct sg2042_priv *priv = pctrl->priv_ctrl;
+ const struct sg2042_pin *pin = sophgo_to_sg2042_pin(sp);
+ void __iomem *reg = priv->regs + pin->offset;
+ u32 v = readl(reg);
+
+ if (sp->flags & PIN_FLAG_WRITE_HIGH) {
+ v &= ~(mask << 16);
+ v |= value << 16;
+ } else {
+ v &= ~mask;
+ v |= value;
+ }
+
+ writel(v, reg);
+
+ return 0;
+}
+
+static void sg2042_pctrl_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *seq, unsigned int pin_id)
+{
+ struct sophgo_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct sophgo_pin *sp = sophgo_get_pin(pctrl, pin_id);
+ u32 value, mux;
+
+ value = sg2042_get_pin_reg(pctrl, sp);
+ mux = FIELD_GET(PIN_IO_MUX, value);
+ seq_printf(seq, "mux:%u reg:0x%04x ", mux, value);
+}
+
+const struct pinctrl_ops sg2042_pctrl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .pin_dbg_show = sg2042_pctrl_dbg_show,
+ .dt_node_to_map = sophgo_pctrl_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+EXPORT_SYMBOL_GPL(sg2042_pctrl_ops);
+
+static void sg2042_set_pinmux_config(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *sp, u32 config)
+{
+ u32 mux = sg2042_dt_get_pin_mux(config);
+
+ if (!(sp->flags & PIN_FLAG_NO_PINMUX))
+ sg2042_set_pin_reg(pctrl, sp, mux, PIN_IO_MUX);
+}
+
+const struct pinmux_ops sg2042_pmx_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = sophgo_pmx_set_mux,
+ .strict = true,
+};
+EXPORT_SYMBOL_GPL(sg2042_pmx_ops);
+
+static int sg2042_pconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin_id, unsigned long *config)
+{
+ struct sophgo_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ int param = pinconf_to_config_param(*config);
+ const struct sophgo_pin *sp = sophgo_get_pin(pctrl, pin_id);
+ u32 value;
+ u32 arg;
+ bool enabled;
+ int ret;
+
+ if (!sp)
+ return -EINVAL;
+
+ value = sg2042_get_pin_reg(pctrl, sp);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (sp->flags & PIN_FLAG_ONLY_ONE_PULL)
+ arg = FIELD_GET(PIN_IO_PULL_ONE_ENABLE, value);
+ else
+ arg = FIELD_GET(PIN_IO_PULL_UP_MASK, value);
+ enabled = arg == 0;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (sp->flags & PIN_FLAG_ONLY_ONE_PULL) {
+ arg = FIELD_GET(PIN_IO_PULL_ONE_MASK, value);
+ enabled = arg == PIN_IO_PULL_DIR_DOWN;
+ } else {
+ enabled = FIELD_GET(PIN_IO_PULL_UP_DONW, value) != 0;
+ }
+ arg = sophgo_pinctrl_typical_pull_down(pctrl, sp, NULL);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (sp->flags & PIN_FLAG_ONLY_ONE_PULL) {
+ arg = FIELD_GET(PIN_IO_PULL_ONE_MASK, value);
+ enabled = arg == PIN_IO_PULL_DIR_UP;
+ } else {
+ enabled = FIELD_GET(PIN_IO_PULL_UP, value) != 0;
+ }
+ arg = sophgo_pinctrl_typical_pull_up(pctrl, sp, NULL);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ enabled = FIELD_GET(PIN_IO_OUTPUT_ENABLE, value) != 0;
+ arg = FIELD_GET(PIN_IO_DRIVE, value);
+ ret = sophgo_pinctrl_reg2oc(pctrl, sp, NULL, arg);
+ if (ret < 0)
+ return ret;
+ arg = ret;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ arg = FIELD_GET(PIN_IO_SCHMITT_ENABLE, value);
+ enabled = arg != 0;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return enabled ? 0 : -EINVAL;
+}
+
+static int sg2042_pinconf_compute_config(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *sp,
+ unsigned long *configs,
+ unsigned int num_configs,
+ u32 *value, u32 *mask)
+{
+ int i;
+ u16 v = 0, m = 0;
+ int ret;
+
+ if (!sp)
+ return -EINVAL;
+
+ for (i = 0; i < num_configs; i++) {
+ int param = pinconf_to_config_param(configs[i]);
+ u32 arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (sp->flags & PIN_FLAG_ONLY_ONE_PULL) {
+ v &= ~PIN_IO_PULL_ONE_ENABLE;
+ m |= PIN_IO_PULL_ONE_ENABLE;
+ } else {
+ v &= ~PIN_IO_PULL_UP_MASK;
+ m |= PIN_IO_PULL_UP_MASK;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (sp->flags & PIN_FLAG_ONLY_ONE_PULL) {
+ v &= ~PIN_IO_PULL_ONE_MASK;
+ v |= PIN_IO_PULL_DIR_DOWN;
+ m |= PIN_IO_PULL_ONE_MASK;
+ } else {
+ v |= PIN_IO_PULL_UP_DONW;
+ m |= PIN_IO_PULL_UP_DONW;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (sp->flags & PIN_FLAG_ONLY_ONE_PULL) {
+ v &= ~PIN_IO_PULL_ONE_MASK;
+ v |= PIN_IO_PULL_DIR_UP;
+ m |= PIN_IO_PULL_ONE_MASK;
+ } else {
+ v |= PIN_IO_PULL_UP;
+ m |= PIN_IO_PULL_UP;
+ }
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ v &= ~(PIN_IO_DRIVE | PIN_IO_OUTPUT_ENABLE);
+ if (arg != 0) {
+ ret = sophgo_pinctrl_oc2reg(pctrl, sp, NULL, arg);
+ if (ret < 0)
+ return ret;
+ if (!(sp->flags & PIN_FLAG_NO_OEX_EN))
+ v |= PIN_IO_OUTPUT_ENABLE;
+ v |= FIELD_PREP(PIN_IO_DRIVE, ret);
+ }
+ m |= PIN_IO_DRIVE | PIN_IO_OUTPUT_ENABLE;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ v |= PIN_IO_SCHMITT_ENABLE;
+ m |= PIN_IO_SCHMITT_ENABLE;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ *value = v;
+ *mask = m;
+
+ return 0;
+}
+
+const struct pinconf_ops sg2042_pconf_ops = {
+ .pin_config_get = sg2042_pconf_get,
+ .pin_config_set = sophgo_pconf_set,
+ .pin_config_group_set = sophgo_pconf_group_set,
+ .is_generic = true,
+};
+EXPORT_SYMBOL_GPL(sg2042_pconf_ops);
+
+static int sophgo_pinctrl_init(struct platform_device *pdev,
+ struct sophgo_pinctrl *pctrl)
+{
+ struct sg2042_priv *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ pctrl->priv_ctrl = priv;
+
+ return 0;
+}
+
+const struct sophgo_cfg_ops sg2042_cfg_ops = {
+ .pctrl_init = sophgo_pinctrl_init,
+ .compute_pinconf_config = sg2042_pinconf_compute_config,
+ .set_pinconf_config = sg2042_set_pin_reg,
+ .set_pinmux_config = sg2042_set_pinmux_config,
+};
+EXPORT_SYMBOL_GPL(sg2042_cfg_ops);
diff --git a/drivers/pinctrl/sophgo/pinctrl-sg2042.c b/drivers/pinctrl/sophgo/pinctrl-sg2042.c
new file mode 100644
index 000000000000..185305ac897d
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-sg2042.c
@@ -0,0 +1,655 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2042 SoC pinctrl driver.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/pinctrl-sg2042.h>
+
+#include "pinctrl-sg2042.h"
+
+static int sg2042_get_pull_up(const struct sophgo_pin *sp, const u32 *psmap)
+{
+ return 35000;
+}
+
+static int sg2042_get_pull_down(const struct sophgo_pin *sp, const u32 *psmap)
+{
+ return 28000;
+}
+
+static const u32 sg2042_oc_map[] = {
+ 5400, 8100, 10700, 13400,
+ 16100, 18800, 21400, 24100,
+ 26800, 29400, 32100, 34800,
+ 37400, 40100, 42800, 45400
+};
+
+static int sg2042_get_oc_map(const struct sophgo_pin *sp, const u32 *psmap,
+ const u32 **map)
+{
+ *map = sg2042_oc_map;
+ return ARRAY_SIZE(sg2042_oc_map);
+}
+
+static const struct sophgo_vddio_cfg_ops sg2042_vddio_cfg_ops = {
+ .get_pull_up = sg2042_get_pull_up,
+ .get_pull_down = sg2042_get_pull_down,
+ .get_oc_map = sg2042_get_oc_map,
+};
+
+static const struct pinctrl_pin_desc sg2042_pins[] = {
+ PINCTRL_PIN(PIN_LPC_LCLK, "lpc_lclk"),
+ PINCTRL_PIN(PIN_LPC_LFRAME, "lpc_lframe"),
+ PINCTRL_PIN(PIN_LPC_LAD0, "lpc_lad0"),
+ PINCTRL_PIN(PIN_LPC_LAD1, "lpc_lad1"),
+ PINCTRL_PIN(PIN_LPC_LAD2, "lpc_lad2"),
+ PINCTRL_PIN(PIN_LPC_LAD3, "lpc_lad3"),
+ PINCTRL_PIN(PIN_LPC_LDRQ0, "lpc_ldrq0"),
+ PINCTRL_PIN(PIN_LPC_LDRQ1, "lpc_ldrq1"),
+ PINCTRL_PIN(PIN_LPC_SERIRQ, "lpc_serirq"),
+ PINCTRL_PIN(PIN_LPC_CLKRUN, "lpc_clkrun"),
+ PINCTRL_PIN(PIN_LPC_LPME, "lpc_lpme"),
+ PINCTRL_PIN(PIN_LPC_LPCPD, "lpc_lpcpd"),
+ PINCTRL_PIN(PIN_LPC_LSMI, "lpc_lsmi"),
+ PINCTRL_PIN(PIN_PCIE0_L0_RESET, "pcie0_l0_reset"),
+ PINCTRL_PIN(PIN_PCIE0_L1_RESET, "pcie0_l1_reset"),
+ PINCTRL_PIN(PIN_PCIE0_L0_WAKEUP, "pcie0_l0_wakeup"),
+ PINCTRL_PIN(PIN_PCIE0_L1_WAKEUP, "pcie0_l1_wakeup"),
+ PINCTRL_PIN(PIN_PCIE0_L0_CLKREQ_IN, "pcie0_l0_clkreq_in"),
+ PINCTRL_PIN(PIN_PCIE0_L1_CLKREQ_IN, "pcie0_l1_clkreq_in"),
+ PINCTRL_PIN(PIN_PCIE1_L0_RESET, "pcie1_l0_reset"),
+ PINCTRL_PIN(PIN_PCIE1_L1_RESET, "pcie1_l1_reset"),
+ PINCTRL_PIN(PIN_PCIE1_L0_WAKEUP, "pcie1_l0_wakeup"),
+ PINCTRL_PIN(PIN_PCIE1_L1_WAKEUP, "pcie1_l1_wakeup"),
+ PINCTRL_PIN(PIN_PCIE1_L0_CLKREQ_IN, "pcie1_l0_clkreq_in"),
+ PINCTRL_PIN(PIN_PCIE1_L1_CLKREQ_IN, "pcie1_l1_clkreq_in"),
+ PINCTRL_PIN(PIN_SPIF0_CLK_SEL1, "spif0_clk_sel1"),
+ PINCTRL_PIN(PIN_SPIF0_CLK_SEL0, "spif0_clk_sel0"),
+ PINCTRL_PIN(PIN_SPIF0_WP, "spif0_wp"),
+ PINCTRL_PIN(PIN_SPIF0_HOLD, "spif0_hold"),
+ PINCTRL_PIN(PIN_SPIF0_SDI, "spif0_sdi"),
+ PINCTRL_PIN(PIN_SPIF0_CS, "spif0_cs"),
+ PINCTRL_PIN(PIN_SPIF0_SCK, "spif0_sck"),
+ PINCTRL_PIN(PIN_SPIF0_SDO, "spif0_sdo"),
+ PINCTRL_PIN(PIN_SPIF1_CLK_SEL1, "spif1_clk_sel1"),
+ PINCTRL_PIN(PIN_SPIF1_CLK_SEL0, "spif1_clk_sel0"),
+ PINCTRL_PIN(PIN_SPIF1_WP, "spif1_wp"),
+ PINCTRL_PIN(PIN_SPIF1_HOLD, "spif1_hold"),
+ PINCTRL_PIN(PIN_SPIF1_SDI, "spif1_sdi"),
+ PINCTRL_PIN(PIN_SPIF1_CS, "spif1_cs"),
+ PINCTRL_PIN(PIN_SPIF1_SCK, "spif1_sck"),
+ PINCTRL_PIN(PIN_SPIF1_SDO, "spif1_sdo"),
+ PINCTRL_PIN(PIN_EMMC_WP, "emmc_wp"),
+ PINCTRL_PIN(PIN_EMMC_CD, "emmc_cd"),
+ PINCTRL_PIN(PIN_EMMC_RST, "emmc_rst"),
+ PINCTRL_PIN(PIN_EMMC_PWR_EN, "emmc_pwr_en"),
+ PINCTRL_PIN(PIN_SDIO_CD, "sdio_cd"),
+ PINCTRL_PIN(PIN_SDIO_WP, "sdio_wp"),
+ PINCTRL_PIN(PIN_SDIO_RST, "sdio_rst"),
+ PINCTRL_PIN(PIN_SDIO_PWR_EN, "sdio_pwr_en"),
+ PINCTRL_PIN(PIN_RGMII0_TXD0, "rgmii0_txd0"),
+ PINCTRL_PIN(PIN_RGMII0_TXD1, "rgmii0_txd1"),
+ PINCTRL_PIN(PIN_RGMII0_TXD2, "rgmii0_txd2"),
+ PINCTRL_PIN(PIN_RGMII0_TXD3, "rgmii0_txd3"),
+ PINCTRL_PIN(PIN_RGMII0_TXCTRL, "rgmii0_txctrl"),
+ PINCTRL_PIN(PIN_RGMII0_RXD0, "rgmii0_rxd0"),
+ PINCTRL_PIN(PIN_RGMII0_RXD1, "rgmii0_rxd1"),
+ PINCTRL_PIN(PIN_RGMII0_RXD2, "rgmii0_rxd2"),
+ PINCTRL_PIN(PIN_RGMII0_RXD3, "rgmii0_rxd3"),
+ PINCTRL_PIN(PIN_RGMII0_RXCTRL, "rgmii0_rxctrl"),
+ PINCTRL_PIN(PIN_RGMII0_TXC, "rgmii0_txc"),
+ PINCTRL_PIN(PIN_RGMII0_RXC, "rgmii0_rxc"),
+ PINCTRL_PIN(PIN_RGMII0_REFCLKO, "rgmii0_refclko"),
+ PINCTRL_PIN(PIN_RGMII0_IRQ, "rgmii0_irq"),
+ PINCTRL_PIN(PIN_RGMII0_MDC, "rgmii0_mdc"),
+ PINCTRL_PIN(PIN_RGMII0_MDIO, "rgmii0_mdio"),
+ PINCTRL_PIN(PIN_PWM0, "pwm0"),
+ PINCTRL_PIN(PIN_PWM1, "pwm1"),
+ PINCTRL_PIN(PIN_PWM2, "pwm2"),
+ PINCTRL_PIN(PIN_PWM3, "pwm3"),
+ PINCTRL_PIN(PIN_FAN0, "fan0"),
+ PINCTRL_PIN(PIN_FAN1, "fan1"),
+ PINCTRL_PIN(PIN_FAN2, "fan2"),
+ PINCTRL_PIN(PIN_FAN3, "fan3"),
+ PINCTRL_PIN(PIN_IIC0_SDA, "iic0_sda"),
+ PINCTRL_PIN(PIN_IIC0_SCL, "iic0_scl"),
+ PINCTRL_PIN(PIN_IIC1_SDA, "iic1_sda"),
+ PINCTRL_PIN(PIN_IIC1_SCL, "iic1_scl"),
+ PINCTRL_PIN(PIN_IIC2_SDA, "iic2_sda"),
+ PINCTRL_PIN(PIN_IIC2_SCL, "iic2_scl"),
+ PINCTRL_PIN(PIN_IIC3_SDA, "iic3_sda"),
+ PINCTRL_PIN(PIN_IIC3_SCL, "iic3_scl"),
+ PINCTRL_PIN(PIN_UART0_TX, "uart0_tx"),
+ PINCTRL_PIN(PIN_UART0_RX, "uart0_rx"),
+ PINCTRL_PIN(PIN_UART0_RTS, "uart0_rts"),
+ PINCTRL_PIN(PIN_UART0_CTS, "uart0_cts"),
+ PINCTRL_PIN(PIN_UART1_TX, "uart1_tx"),
+ PINCTRL_PIN(PIN_UART1_RX, "uart1_rx"),
+ PINCTRL_PIN(PIN_UART1_RTS, "uart1_rts"),
+ PINCTRL_PIN(PIN_UART1_CTS, "uart1_cts"),
+ PINCTRL_PIN(PIN_UART2_TX, "uart2_tx"),
+ PINCTRL_PIN(PIN_UART2_RX, "uart2_rx"),
+ PINCTRL_PIN(PIN_UART2_RTS, "uart2_rts"),
+ PINCTRL_PIN(PIN_UART2_CTS, "uart2_cts"),
+ PINCTRL_PIN(PIN_UART3_TX, "uart3_tx"),
+ PINCTRL_PIN(PIN_UART3_RX, "uart3_rx"),
+ PINCTRL_PIN(PIN_UART3_RTS, "uart3_rts"),
+ PINCTRL_PIN(PIN_UART3_CTS, "uart3_cts"),
+ PINCTRL_PIN(PIN_SPI0_CS0, "spi0_cs0"),
+ PINCTRL_PIN(PIN_SPI0_CS1, "spi0_cs1"),
+ PINCTRL_PIN(PIN_SPI0_SDI, "spi0_sdi"),
+ PINCTRL_PIN(PIN_SPI0_SDO, "spi0_sdo"),
+ PINCTRL_PIN(PIN_SPI0_SCK, "spi0_sck"),
+ PINCTRL_PIN(PIN_SPI1_CS0, "spi1_cs0"),
+ PINCTRL_PIN(PIN_SPI1_CS1, "spi1_cs1"),
+ PINCTRL_PIN(PIN_SPI1_SDI, "spi1_sdi"),
+ PINCTRL_PIN(PIN_SPI1_SDO, "spi1_sdo"),
+ PINCTRL_PIN(PIN_SPI1_SCK, "spi1_sck"),
+ PINCTRL_PIN(PIN_JTAG0_TDO, "jtag0_tdo"),
+ PINCTRL_PIN(PIN_JTAG0_TCK, "jtag0_tck"),
+ PINCTRL_PIN(PIN_JTAG0_TDI, "jtag0_tdi"),
+ PINCTRL_PIN(PIN_JTAG0_TMS, "jtag0_tms"),
+ PINCTRL_PIN(PIN_JTAG0_TRST, "jtag0_trst"),
+ PINCTRL_PIN(PIN_JTAG0_SRST, "jtag0_srst"),
+ PINCTRL_PIN(PIN_JTAG1_TDO, "jtag1_tdo"),
+ PINCTRL_PIN(PIN_JTAG1_TCK, "jtag1_tck"),
+ PINCTRL_PIN(PIN_JTAG1_TDI, "jtag1_tdi"),
+ PINCTRL_PIN(PIN_JTAG1_TMS, "jtag1_tms"),
+ PINCTRL_PIN(PIN_JTAG1_TRST, "jtag1_trst"),
+ PINCTRL_PIN(PIN_JTAG1_SRST, "jtag1_srst"),
+ PINCTRL_PIN(PIN_JTAG2_TDO, "jtag2_tdo"),
+ PINCTRL_PIN(PIN_JTAG2_TCK, "jtag2_tck"),
+ PINCTRL_PIN(PIN_JTAG2_TDI, "jtag2_tdi"),
+ PINCTRL_PIN(PIN_JTAG2_TMS, "jtag2_tms"),
+ PINCTRL_PIN(PIN_JTAG2_TRST, "jtag2_trst"),
+ PINCTRL_PIN(PIN_JTAG2_SRST, "jtag2_srst"),
+ PINCTRL_PIN(PIN_GPIO0, "gpio0"),
+ PINCTRL_PIN(PIN_GPIO1, "gpio1"),
+ PINCTRL_PIN(PIN_GPIO2, "gpio2"),
+ PINCTRL_PIN(PIN_GPIO3, "gpio3"),
+ PINCTRL_PIN(PIN_GPIO4, "gpio4"),
+ PINCTRL_PIN(PIN_GPIO5, "gpio5"),
+ PINCTRL_PIN(PIN_GPIO6, "gpio6"),
+ PINCTRL_PIN(PIN_GPIO7, "gpio7"),
+ PINCTRL_PIN(PIN_GPIO8, "gpio8"),
+ PINCTRL_PIN(PIN_GPIO9, "gpio9"),
+ PINCTRL_PIN(PIN_GPIO10, "gpio10"),
+ PINCTRL_PIN(PIN_GPIO11, "gpio11"),
+ PINCTRL_PIN(PIN_GPIO12, "gpio12"),
+ PINCTRL_PIN(PIN_GPIO13, "gpio13"),
+ PINCTRL_PIN(PIN_GPIO14, "gpio14"),
+ PINCTRL_PIN(PIN_GPIO15, "gpio15"),
+ PINCTRL_PIN(PIN_GPIO16, "gpio16"),
+ PINCTRL_PIN(PIN_GPIO17, "gpio17"),
+ PINCTRL_PIN(PIN_GPIO18, "gpio18"),
+ PINCTRL_PIN(PIN_GPIO19, "gpio19"),
+ PINCTRL_PIN(PIN_GPIO20, "gpio20"),
+ PINCTRL_PIN(PIN_GPIO21, "gpio21"),
+ PINCTRL_PIN(PIN_GPIO22, "gpio22"),
+ PINCTRL_PIN(PIN_GPIO23, "gpio23"),
+ PINCTRL_PIN(PIN_GPIO24, "gpio24"),
+ PINCTRL_PIN(PIN_GPIO25, "gpio25"),
+ PINCTRL_PIN(PIN_GPIO26, "gpio26"),
+ PINCTRL_PIN(PIN_GPIO27, "gpio27"),
+ PINCTRL_PIN(PIN_GPIO28, "gpio28"),
+ PINCTRL_PIN(PIN_GPIO29, "gpio29"),
+ PINCTRL_PIN(PIN_GPIO30, "gpio30"),
+ PINCTRL_PIN(PIN_GPIO31, "gpio31"),
+ PINCTRL_PIN(PIN_MODE_SEL0, "mode_sel0"),
+ PINCTRL_PIN(PIN_MODE_SEL1, "mode_sel1"),
+ PINCTRL_PIN(PIN_MODE_SEL2, "mode_sel2"),
+ PINCTRL_PIN(PIN_BOOT_SEL0, "boot_sel0"),
+ PINCTRL_PIN(PIN_BOOT_SEL1, "boot_sel1"),
+ PINCTRL_PIN(PIN_BOOT_SEL2, "boot_sel2"),
+ PINCTRL_PIN(PIN_BOOT_SEL3, "boot_sel3"),
+ PINCTRL_PIN(PIN_BOOT_SEL4, "boot_sel4"),
+ PINCTRL_PIN(PIN_BOOT_SEL5, "boot_sel5"),
+ PINCTRL_PIN(PIN_BOOT_SEL6, "boot_sel6"),
+ PINCTRL_PIN(PIN_BOOT_SEL7, "boot_sel7"),
+ PINCTRL_PIN(PIN_MULTI_SCKT, "multi_sckt"),
+ PINCTRL_PIN(PIN_SCKT_ID0, "sckt_id0"),
+ PINCTRL_PIN(PIN_SCKT_ID1, "sckt_id1"),
+ PINCTRL_PIN(PIN_PLL_CLK_IN_MAIN, "pll_clk_in_main"),
+ PINCTRL_PIN(PIN_PLL_CLK_IN_DDR_L, "pll_clk_in_ddr_l"),
+ PINCTRL_PIN(PIN_PLL_CLK_IN_DDR_R, "pll_clk_in_ddr_r"),
+ PINCTRL_PIN(PIN_XTAL_32K, "xtal_32k"),
+ PINCTRL_PIN(PIN_SYS_RST, "sys_rst"),
+ PINCTRL_PIN(PIN_PWR_BUTTON, "pwr_button"),
+ PINCTRL_PIN(PIN_TEST_EN, "test_en"),
+ PINCTRL_PIN(PIN_TEST_MODE_MBIST, "test_mode_mbist"),
+ PINCTRL_PIN(PIN_TEST_MODE_SCAN, "test_mode_scan"),
+ PINCTRL_PIN(PIN_TEST_MODE_BSD, "test_mode_bsd"),
+ PINCTRL_PIN(PIN_BISR_BYP, "bisr_byp"),
+};
+
+static const struct sg2042_pin sg2042_pin_data[ARRAY_SIZE(sg2042_pins)] = {
+ SG2042_GENERAL_PIN(PIN_LPC_LCLK, 0x000,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_LPC_LFRAME, 0x000,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_LPC_LAD0, 0x004,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_LPC_LAD1, 0x004,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_LPC_LAD2, 0x008,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_LPC_LAD3, 0x008,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_LPC_LDRQ0, 0x00c,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_LPC_LDRQ1, 0x00c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_LPC_SERIRQ, 0x010,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_LPC_CLKRUN, 0x010,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_LPC_LPME, 0x014,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_LPC_LPCPD, 0x014,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_LPC_LSMI, 0x018,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PCIE0_L0_RESET, 0x018,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PCIE0_L1_RESET, 0x01c,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PCIE0_L0_WAKEUP, 0x01c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PCIE0_L1_WAKEUP, 0x020,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PCIE0_L0_CLKREQ_IN, 0x020,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PCIE0_L1_CLKREQ_IN, 0x024,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PCIE1_L0_RESET, 0x024,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PCIE1_L1_RESET, 0x028,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PCIE1_L0_WAKEUP, 0x028,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PCIE1_L1_WAKEUP, 0x02c,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PCIE1_L0_CLKREQ_IN, 0x02c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PCIE1_L1_CLKREQ_IN, 0x030,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF0_CLK_SEL1, 0x030,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF0_CLK_SEL0, 0x034,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF0_WP, 0x034,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF0_HOLD, 0x038,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF0_SDI, 0x038,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF0_CS, 0x03c,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF0_SCK, 0x03c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF0_SDO, 0x040,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF1_CLK_SEL1, 0x040,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF1_CLK_SEL0, 0x044,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF1_WP, 0x044,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF1_HOLD, 0x048,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF1_SDI, 0x048,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF1_CS, 0x04c,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF1_SCK, 0x04c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPIF1_SDO, 0x050,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_EMMC_WP, 0x050,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_EMMC_CD, 0x054,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_EMMC_RST, 0x054,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_EMMC_PWR_EN, 0x058,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SDIO_CD, 0x058,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SDIO_WP, 0x05c,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SDIO_RST, 0x05c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SDIO_PWR_EN, 0x060,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_RGMII0_TXD0, 0x060,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_TXD1, 0x064,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_TXD2, 0x064,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_TXD3, 0x068,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_TXCTRL, 0x068,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_RXD0, 0x06c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_RXD1, 0x06c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_RXD2, 0x070,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_RXD3, 0x070,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_RXCTRL, 0x074,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_TXC, 0x074,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_RXC, 0x078,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_REFCLKO, 0x078,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_IRQ, 0x07c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_MDC, 0x07c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_MDIO, 0x080,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PWM0, 0x080,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PWM1, 0x084,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PWM2, 0x084,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_PWM3, 0x088,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_FAN0, 0x088,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_FAN1, 0x08c,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_FAN2, 0x08c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_FAN3, 0x090,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_IIC0_SDA, 0x090,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_IIC0_SCL, 0x094,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_IIC1_SDA, 0x094,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_IIC1_SCL, 0x098,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_IIC2_SDA, 0x098,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_IIC2_SCL, 0x09c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_IIC3_SDA, 0x09c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_IIC3_SCL, 0x0a0,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_UART0_TX, 0x0a0,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART0_RX, 0x0a4,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART0_RTS, 0x0a4,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART0_CTS, 0x0a8,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART1_TX, 0x0a8,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART1_RX, 0x0ac,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART1_RTS, 0x0ac,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART1_CTS, 0x0b0,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART2_TX, 0x0b0,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART2_RX, 0x0b4,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART2_RTS, 0x0b4,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART2_CTS, 0x0b8,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART3_TX, 0x0b8,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART3_RX, 0x0bc,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART3_RTS, 0x0bc,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_UART3_CTS, 0x0c0,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPI0_CS0, 0x0c0,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPI0_CS1, 0x0c4,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPI0_SDI, 0x0c4,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPI0_SDO, 0x0c8,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPI0_SCK, 0x0c8,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPI1_CS0, 0x0cc,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPI1_CS1, 0x0cc,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPI1_SDI, 0x0d0,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPI1_SDO, 0x0d0,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_SPI1_SCK, 0x0d4,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG0_TDO, 0x0d4,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG0_TCK, 0x0d8,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG0_TDI, 0x0d8,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG0_TMS, 0x0dc,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG0_TRST, 0x0dc,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG0_SRST, 0x0e0,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG1_TDO, 0x0e0,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG1_TCK, 0x0e4,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG1_TDI, 0x0e4,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG1_TMS, 0x0e8,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG1_TRST, 0x0e8,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG1_SRST, 0x0ec,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG2_TDO, 0x0ec,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG2_TCK, 0x0f0,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG2_TDI, 0x0f0,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG2_TMS, 0x0f4,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG2_TRST, 0x0f4,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_JTAG2_SRST, 0x0f8,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO0, 0x0f8,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO1, 0x0fc,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO2, 0x0fc,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO3, 0x100,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO4, 0x100,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO5, 0x104,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO6, 0x104,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO7, 0x108,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO8, 0x108,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO9, 0x10c,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO10, 0x10c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO11, 0x110,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO12, 0x110,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO13, 0x114,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO14, 0x114,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO15, 0x118,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO16, 0x118,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO17, 0x11c,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO18, 0x11c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO19, 0x120,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO20, 0x120,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO21, 0x124,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO22, 0x124,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO23, 0x128,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO24, 0x128,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO25, 0x12c,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO26, 0x12c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO27, 0x130,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO28, 0x130,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO29, 0x134,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO30, 0x134,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_GPIO31, 0x138,
+ PIN_FLAG_ONLY_ONE_PULL),
+ SG2042_GENERAL_PIN(PIN_MODE_SEL0, 0x138,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL |
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_MODE_SEL1, 0x13c,
+ PIN_FLAG_ONLY_ONE_PULL | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_MODE_SEL2, 0x13c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL |
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL0, 0x140,
+ PIN_FLAG_ONLY_ONE_PULL | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL1, 0x140,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL |
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL2, 0x144,
+ PIN_FLAG_ONLY_ONE_PULL | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL3, 0x144,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL |
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL4, 0x148,
+ PIN_FLAG_ONLY_ONE_PULL | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL5, 0x148,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL |
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL6, 0x14c,
+ PIN_FLAG_ONLY_ONE_PULL | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL7, 0x14c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL |
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_MULTI_SCKT, 0x150,
+ PIN_FLAG_ONLY_ONE_PULL | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_SCKT_ID0, 0x150,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL |
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_SCKT_ID1, 0x154,
+ PIN_FLAG_ONLY_ONE_PULL | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_PLL_CLK_IN_MAIN, 0x154,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL |
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_PLL_CLK_IN_DDR_L, 0x158,
+ PIN_FLAG_ONLY_ONE_PULL | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_PLL_CLK_IN_DDR_R, 0x158,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL |
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_XTAL_32K, 0x15c,
+ PIN_FLAG_ONLY_ONE_PULL | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_SYS_RST, 0x15c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL |
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_PWR_BUTTON, 0x160,
+ PIN_FLAG_ONLY_ONE_PULL | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_TEST_EN, 0x160,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL |
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_TEST_MODE_MBIST, 0x164,
+ PIN_FLAG_ONLY_ONE_PULL | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_TEST_MODE_SCAN, 0x164,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL |
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_TEST_MODE_BSD, 0x168,
+ PIN_FLAG_ONLY_ONE_PULL | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BISR_BYP, 0x168,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_ONLY_ONE_PULL |
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+};
+
+static const struct sophgo_pinctrl_data sg2042_pindata = {
+ .pins = sg2042_pins,
+ .pindata = sg2042_pin_data,
+ .vddio_ops = &sg2042_vddio_cfg_ops,
+ .cfg_ops = &sg2042_cfg_ops,
+ .pctl_ops = &sg2042_pctrl_ops,
+ .pmx_ops = &sg2042_pmx_ops,
+ .pconf_ops = &sg2042_pconf_ops,
+ .npins = ARRAY_SIZE(sg2042_pins),
+ .pinsize = sizeof(struct sg2042_pin),
+};
+
+static const struct of_device_id sg2042_pinctrl_ids[] = {
+ { .compatible = "sophgo,sg2042-pinctrl", .data = &sg2042_pindata },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sg2042_pinctrl_ids);
+
+static struct platform_driver sg2042_pinctrl_driver = {
+ .probe = sophgo_pinctrl_probe,
+ .driver = {
+ .name = "sg2042-pinctrl",
+ .suppress_bind_attrs = true,
+ .of_match_table = sg2042_pinctrl_ids,
+ },
+};
+module_platform_driver(sg2042_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for the SG2002 series SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sophgo/pinctrl-sg2042.h b/drivers/pinctrl/sophgo/pinctrl-sg2042.h
new file mode 100644
index 000000000000..d481973fcf97
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-sg2042.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#ifndef _PINCTRL_SOPHGO_SG2042_H
+#define _PINCTRL_SOPHGO_SG2042_H
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+
+#include "pinctrl-sophgo.h"
+
+#define PIN_FLAG_DEFAULT 0
+#define PIN_FLAG_WRITE_HIGH BIT(0)
+#define PIN_FLAG_ONLY_ONE_PULL BIT(1)
+#define PIN_FLAG_NO_PINMUX BIT(2)
+#define PIN_FLAG_NO_OEX_EN BIT(3)
+#define PIN_FLAG_IS_ETH BIT(4)
+
+struct sg2042_pin {
+ struct sophgo_pin pin;
+ u16 offset;
+};
+
+#define sophgo_to_sg2042_pin(_pin) \
+ container_of((_pin), struct sg2042_pin, pin)
+
+extern const struct pinctrl_ops sg2042_pctrl_ops;
+extern const struct pinmux_ops sg2042_pmx_ops;
+extern const struct pinconf_ops sg2042_pconf_ops;
+extern const struct sophgo_cfg_ops sg2042_cfg_ops;
+
+#define SG2042_GENERAL_PIN(_id, _offset, _flag) \
+ { \
+ .pin = { \
+ .id = (_id), \
+ .flags = (_flag), \
+ }, \
+ .offset = (_offset), \
+ }
+
+#endif
diff --git a/drivers/pinctrl/sophgo/pinctrl-sg2044.c b/drivers/pinctrl/sophgo/pinctrl-sg2044.c
new file mode 100644
index 000000000000..b0c46d8954ca
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-sg2044.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2042 SoC pinctrl driver.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/pinctrl-sg2044.h>
+
+#include "pinctrl-sg2042.h"
+
+static int sg2044_get_pull_up(const struct sophgo_pin *sp, const u32 *psmap)
+{
+ return 19500;
+}
+
+static int sg2044_get_pull_down(const struct sophgo_pin *sp, const u32 *psmap)
+{
+ return 23200;
+}
+
+static const u32 sg2044_oc_map[] = {
+ 3200, 6400, 9600, 12700,
+ 15900, 19100, 22200, 25300,
+ 29500, 32700, 35900, 39000,
+ 42000, 45200, 48300, 51400
+};
+
+static int sg2044_get_oc_map(const struct sophgo_pin *sp, const u32 *psmap,
+ const u32 **map)
+{
+ *map = sg2044_oc_map;
+ return ARRAY_SIZE(sg2044_oc_map);
+}
+
+static const struct sophgo_vddio_cfg_ops sg2044_vddio_cfg_ops = {
+ .get_pull_up = sg2044_get_pull_up,
+ .get_pull_down = sg2044_get_pull_down,
+ .get_oc_map = sg2044_get_oc_map,
+};
+
+static const struct pinctrl_pin_desc sg2044_pins[] = {
+ PINCTRL_PIN(PIN_IIC0_SMBSUS_IN, "iic0_smbsus_in"),
+ PINCTRL_PIN(PIN_IIC0_SMBSUS_OUT, "iic0_smbsus_out"),
+ PINCTRL_PIN(PIN_IIC0_SMBALERT, "iic0_smbalert"),
+ PINCTRL_PIN(PIN_IIC1_SMBSUS_IN, "iic1_smbsus_in"),
+ PINCTRL_PIN(PIN_IIC1_SMBSUS_OUT, "iic1_smbsus_out"),
+ PINCTRL_PIN(PIN_IIC1_SMBALERT, "iic1_smbalert"),
+ PINCTRL_PIN(PIN_IIC2_SMBSUS_IN, "iic2_smbsus_in"),
+ PINCTRL_PIN(PIN_IIC2_SMBSUS_OUT, "iic2_smbsus_out"),
+ PINCTRL_PIN(PIN_IIC2_SMBALERT, "iic2_smbalert"),
+ PINCTRL_PIN(PIN_IIC3_SMBSUS_IN, "iic3_smbsus_in"),
+ PINCTRL_PIN(PIN_IIC3_SMBSUS_OUT, "iic3_smbsus_out"),
+ PINCTRL_PIN(PIN_IIC3_SMBALERT, "iic3_smbalert"),
+ PINCTRL_PIN(PIN_PCIE0_L0_RESET, "pcie0_l0_reset"),
+ PINCTRL_PIN(PIN_PCIE0_L1_RESET, "pcie0_l1_reset"),
+ PINCTRL_PIN(PIN_PCIE0_L0_WAKEUP, "pcie0_l0_wakeup"),
+ PINCTRL_PIN(PIN_PCIE0_L1_WAKEUP, "pcie0_l1_wakeup"),
+ PINCTRL_PIN(PIN_PCIE0_L0_CLKREQ_IN, "pcie0_l0_clkreq_in"),
+ PINCTRL_PIN(PIN_PCIE0_L1_CLKREQ_IN, "pcie0_l1_clkreq_in"),
+ PINCTRL_PIN(PIN_PCIE1_L0_RESET, "pcie1_l0_reset"),
+ PINCTRL_PIN(PIN_PCIE1_L1_RESET, "pcie1_l1_reset"),
+ PINCTRL_PIN(PIN_PCIE1_L0_WAKEUP, "pcie1_l0_wakeup"),
+ PINCTRL_PIN(PIN_PCIE1_L1_WAKEUP, "pcie1_l1_wakeup"),
+ PINCTRL_PIN(PIN_PCIE1_L0_CLKREQ_IN, "pcie1_l0_clkreq_in"),
+ PINCTRL_PIN(PIN_PCIE1_L1_CLKREQ_IN, "pcie1_l1_clkreq_in"),
+ PINCTRL_PIN(PIN_PCIE2_L0_RESET, "pcie2_l0_reset"),
+ PINCTRL_PIN(PIN_PCIE2_L1_RESET, "pcie2_l1_reset"),
+ PINCTRL_PIN(PIN_PCIE2_L0_WAKEUP, "pcie2_l0_wakeup"),
+ PINCTRL_PIN(PIN_PCIE2_L1_WAKEUP, "pcie2_l1_wakeup"),
+ PINCTRL_PIN(PIN_PCIE2_L0_CLKREQ_IN, "pcie2_l0_clkreq_in"),
+ PINCTRL_PIN(PIN_PCIE2_L1_CLKREQ_IN, "pcie2_l1_clkreq_in"),
+ PINCTRL_PIN(PIN_PCIE3_L0_RESET, "pcie3_l0_reset"),
+ PINCTRL_PIN(PIN_PCIE3_L1_RESET, "pcie3_l1_reset"),
+ PINCTRL_PIN(PIN_PCIE3_L0_WAKEUP, "pcie3_l0_wakeup"),
+ PINCTRL_PIN(PIN_PCIE3_L1_WAKEUP, "pcie3_l1_wakeup"),
+ PINCTRL_PIN(PIN_PCIE3_L0_CLKREQ_IN, "pcie3_l0_clkreq_in"),
+ PINCTRL_PIN(PIN_PCIE3_L1_CLKREQ_IN, "pcie3_l1_clkreq_in"),
+ PINCTRL_PIN(PIN_PCIE4_L0_RESET, "pcie4_l0_reset"),
+ PINCTRL_PIN(PIN_PCIE4_L1_RESET, "pcie4_l1_reset"),
+ PINCTRL_PIN(PIN_PCIE4_L0_WAKEUP, "pcie4_l0_wakeup"),
+ PINCTRL_PIN(PIN_PCIE4_L1_WAKEUP, "pcie4_l1_wakeup"),
+ PINCTRL_PIN(PIN_PCIE4_L0_CLKREQ_IN, "pcie4_l0_clkreq_in"),
+ PINCTRL_PIN(PIN_PCIE4_L1_CLKREQ_IN, "pcie4_l1_clkreq_in"),
+ PINCTRL_PIN(PIN_SPIF0_CLK_SEL1, "spif0_clk_sel1"),
+ PINCTRL_PIN(PIN_SPIF0_CLK_SEL0, "spif0_clk_sel0"),
+ PINCTRL_PIN(PIN_SPIF0_WP, "spif0_wp"),
+ PINCTRL_PIN(PIN_SPIF0_HOLD, "spif0_hold"),
+ PINCTRL_PIN(PIN_SPIF0_SDI, "spif0_sdi"),
+ PINCTRL_PIN(PIN_SPIF0_CS, "spif0_cs"),
+ PINCTRL_PIN(PIN_SPIF0_SCK, "spif0_sck"),
+ PINCTRL_PIN(PIN_SPIF0_SDO, "spif0_sdo"),
+ PINCTRL_PIN(PIN_SPIF1_CLK_SEL1, "spif1_clk_sel1"),
+ PINCTRL_PIN(PIN_SPIF1_CLK_SEL0, "spif1_clk_sel0"),
+ PINCTRL_PIN(PIN_SPIF1_WP, "spif1_wp"),
+ PINCTRL_PIN(PIN_SPIF1_HOLD, "spif1_hold"),
+ PINCTRL_PIN(PIN_SPIF1_SDI, "spif1_sdi"),
+ PINCTRL_PIN(PIN_SPIF1_CS, "spif1_cs"),
+ PINCTRL_PIN(PIN_SPIF1_SCK, "spif1_sck"),
+ PINCTRL_PIN(PIN_SPIF1_SDO, "spif1_sdo"),
+ PINCTRL_PIN(PIN_EMMC_WP, "emmc_wp"),
+ PINCTRL_PIN(PIN_EMMC_CD, "emmc_cd"),
+ PINCTRL_PIN(PIN_EMMC_RST, "emmc_rst"),
+ PINCTRL_PIN(PIN_EMMC_PWR_EN, "emmc_pwr_en"),
+ PINCTRL_PIN(PIN_SDIO_CD, "sdio_cd"),
+ PINCTRL_PIN(PIN_SDIO_WP, "sdio_wp"),
+ PINCTRL_PIN(PIN_SDIO_RST, "sdio_rst"),
+ PINCTRL_PIN(PIN_SDIO_PWR_EN, "sdio_pwr_en"),
+ PINCTRL_PIN(PIN_RGMII0_TXD0, "rgmii0_txd0"),
+ PINCTRL_PIN(PIN_RGMII0_TXD1, "rgmii0_txd1"),
+ PINCTRL_PIN(PIN_RGMII0_TXD2, "rgmii0_txd2"),
+ PINCTRL_PIN(PIN_RGMII0_TXD3, "rgmii0_txd3"),
+ PINCTRL_PIN(PIN_RGMII0_TXCTRL, "rgmii0_txctrl"),
+ PINCTRL_PIN(PIN_RGMII0_RXD0, "rgmii0_rxd0"),
+ PINCTRL_PIN(PIN_RGMII0_RXD1, "rgmii0_rxd1"),
+ PINCTRL_PIN(PIN_RGMII0_RXD2, "rgmii0_rxd2"),
+ PINCTRL_PIN(PIN_RGMII0_RXD3, "rgmii0_rxd3"),
+ PINCTRL_PIN(PIN_RGMII0_RXCTRL, "rgmii0_rxctrl"),
+ PINCTRL_PIN(PIN_RGMII0_TXC, "rgmii0_txc"),
+ PINCTRL_PIN(PIN_RGMII0_RXC, "rgmii0_rxc"),
+ PINCTRL_PIN(PIN_RGMII0_REFCLKO, "rgmii0_refclko"),
+ PINCTRL_PIN(PIN_RGMII0_IRQ, "rgmii0_irq"),
+ PINCTRL_PIN(PIN_RGMII0_MDC, "rgmii0_mdc"),
+ PINCTRL_PIN(PIN_RGMII0_MDIO, "rgmii0_mdio"),
+ PINCTRL_PIN(PIN_PWM0, "pwm0"),
+ PINCTRL_PIN(PIN_PWM1, "pwm1"),
+ PINCTRL_PIN(PIN_PWM2, "pwm2"),
+ PINCTRL_PIN(PIN_PWM3, "pwm3"),
+ PINCTRL_PIN(PIN_FAN0, "fan0"),
+ PINCTRL_PIN(PIN_FAN1, "fan1"),
+ PINCTRL_PIN(PIN_FAN2, "fan2"),
+ PINCTRL_PIN(PIN_FAN3, "fan3"),
+ PINCTRL_PIN(PIN_IIC0_SDA, "iic0_sda"),
+ PINCTRL_PIN(PIN_IIC0_SCL, "iic0_scl"),
+ PINCTRL_PIN(PIN_IIC1_SDA, "iic1_sda"),
+ PINCTRL_PIN(PIN_IIC1_SCL, "iic1_scl"),
+ PINCTRL_PIN(PIN_IIC2_SDA, "iic2_sda"),
+ PINCTRL_PIN(PIN_IIC2_SCL, "iic2_scl"),
+ PINCTRL_PIN(PIN_IIC3_SDA, "iic3_sda"),
+ PINCTRL_PIN(PIN_IIC3_SCL, "iic3_scl"),
+ PINCTRL_PIN(PIN_UART0_TX, "uart0_tx"),
+ PINCTRL_PIN(PIN_UART0_RX, "uart0_rx"),
+ PINCTRL_PIN(PIN_UART0_RTS, "uart0_rts"),
+ PINCTRL_PIN(PIN_UART0_CTS, "uart0_cts"),
+ PINCTRL_PIN(PIN_UART1_TX, "uart1_tx"),
+ PINCTRL_PIN(PIN_UART1_RX, "uart1_rx"),
+ PINCTRL_PIN(PIN_UART1_RTS, "uart1_rts"),
+ PINCTRL_PIN(PIN_UART1_CTS, "uart1_cts"),
+ PINCTRL_PIN(PIN_UART2_TX, "uart2_tx"),
+ PINCTRL_PIN(PIN_UART2_RX, "uart2_rx"),
+ PINCTRL_PIN(PIN_UART2_RTS, "uart2_rts"),
+ PINCTRL_PIN(PIN_UART2_CTS, "uart2_cts"),
+ PINCTRL_PIN(PIN_UART3_TX, "uart3_tx"),
+ PINCTRL_PIN(PIN_UART3_RX, "uart3_rx"),
+ PINCTRL_PIN(PIN_UART3_RTS, "uart3_rts"),
+ PINCTRL_PIN(PIN_UART3_CTS, "uart3_cts"),
+ PINCTRL_PIN(PIN_SPI0_CS0, "spi0_cs0"),
+ PINCTRL_PIN(PIN_SPI0_CS1, "spi0_cs1"),
+ PINCTRL_PIN(PIN_SPI0_SDI, "spi0_sdi"),
+ PINCTRL_PIN(PIN_SPI0_SDO, "spi0_sdo"),
+ PINCTRL_PIN(PIN_SPI0_SCK, "spi0_sck"),
+ PINCTRL_PIN(PIN_SPI1_CS0, "spi1_cs0"),
+ PINCTRL_PIN(PIN_SPI1_CS1, "spi1_cs1"),
+ PINCTRL_PIN(PIN_SPI1_SDI, "spi1_sdi"),
+ PINCTRL_PIN(PIN_SPI1_SDO, "spi1_sdo"),
+ PINCTRL_PIN(PIN_SPI1_SCK, "spi1_sck"),
+ PINCTRL_PIN(PIN_JTAG0_TDO, "jtag0_tdo"),
+ PINCTRL_PIN(PIN_JTAG0_TCK, "jtag0_tck"),
+ PINCTRL_PIN(PIN_JTAG0_TDI, "jtag0_tdi"),
+ PINCTRL_PIN(PIN_JTAG0_TMS, "jtag0_tms"),
+ PINCTRL_PIN(PIN_JTAG0_TRST, "jtag0_trst"),
+ PINCTRL_PIN(PIN_JTAG0_SRST, "jtag0_srst"),
+ PINCTRL_PIN(PIN_JTAG1_TDO, "jtag1_tdo"),
+ PINCTRL_PIN(PIN_JTAG1_TCK, "jtag1_tck"),
+ PINCTRL_PIN(PIN_JTAG1_TDI, "jtag1_tdi"),
+ PINCTRL_PIN(PIN_JTAG1_TMS, "jtag1_tms"),
+ PINCTRL_PIN(PIN_JTAG1_TRST, "jtag1_trst"),
+ PINCTRL_PIN(PIN_JTAG1_SRST, "jtag1_srst"),
+ PINCTRL_PIN(PIN_JTAG2_TDO, "jtag2_tdo"),
+ PINCTRL_PIN(PIN_JTAG2_TCK, "jtag2_tck"),
+ PINCTRL_PIN(PIN_JTAG2_TDI, "jtag2_tdi"),
+ PINCTRL_PIN(PIN_JTAG2_TMS, "jtag2_tms"),
+ PINCTRL_PIN(PIN_JTAG2_TRST, "jtag2_trst"),
+ PINCTRL_PIN(PIN_JTAG2_SRST, "jtag2_srst"),
+ PINCTRL_PIN(PIN_JTAG3_TDO, "jtag3_tdo"),
+ PINCTRL_PIN(PIN_JTAG3_TCK, "jtag3_tck"),
+ PINCTRL_PIN(PIN_JTAG3_TDI, "jtag3_tdi"),
+ PINCTRL_PIN(PIN_JTAG3_TMS, "jtag3_tms"),
+ PINCTRL_PIN(PIN_JTAG3_TRST, "jtag3_trst"),
+ PINCTRL_PIN(PIN_JTAG3_SRST, "jtag3_srst"),
+ PINCTRL_PIN(PIN_GPIO0, "gpio0"),
+ PINCTRL_PIN(PIN_GPIO1, "gpio1"),
+ PINCTRL_PIN(PIN_GPIO2, "gpio2"),
+ PINCTRL_PIN(PIN_GPIO3, "gpio3"),
+ PINCTRL_PIN(PIN_GPIO4, "gpio4"),
+ PINCTRL_PIN(PIN_GPIO5, "gpio5"),
+ PINCTRL_PIN(PIN_GPIO6, "gpio6"),
+ PINCTRL_PIN(PIN_GPIO7, "gpio7"),
+ PINCTRL_PIN(PIN_GPIO8, "gpio8"),
+ PINCTRL_PIN(PIN_GPIO9, "gpio9"),
+ PINCTRL_PIN(PIN_GPIO10, "gpio10"),
+ PINCTRL_PIN(PIN_GPIO11, "gpio11"),
+ PINCTRL_PIN(PIN_GPIO12, "gpio12"),
+ PINCTRL_PIN(PIN_GPIO13, "gpio13"),
+ PINCTRL_PIN(PIN_GPIO14, "gpio14"),
+ PINCTRL_PIN(PIN_GPIO15, "gpio15"),
+ PINCTRL_PIN(PIN_GPIO16, "gpio16"),
+ PINCTRL_PIN(PIN_GPIO17, "gpio17"),
+ PINCTRL_PIN(PIN_GPIO18, "gpio18"),
+ PINCTRL_PIN(PIN_GPIO19, "gpio19"),
+ PINCTRL_PIN(PIN_GPIO20, "gpio20"),
+ PINCTRL_PIN(PIN_GPIO21, "gpio21"),
+ PINCTRL_PIN(PIN_GPIO22, "gpio22"),
+ PINCTRL_PIN(PIN_GPIO23, "gpio23"),
+ PINCTRL_PIN(PIN_GPIO24, "gpio24"),
+ PINCTRL_PIN(PIN_GPIO25, "gpio25"),
+ PINCTRL_PIN(PIN_GPIO26, "gpio26"),
+ PINCTRL_PIN(PIN_GPIO27, "gpio27"),
+ PINCTRL_PIN(PIN_GPIO28, "gpio28"),
+ PINCTRL_PIN(PIN_GPIO29, "gpio29"),
+ PINCTRL_PIN(PIN_GPIO30, "gpio30"),
+ PINCTRL_PIN(PIN_GPIO31, "gpio31"),
+ PINCTRL_PIN(PIN_MODE_SEL0, "mode_sel0"),
+ PINCTRL_PIN(PIN_MODE_SEL1, "mode_sel1"),
+ PINCTRL_PIN(PIN_MODE_SEL2, "mode_sel2"),
+ PINCTRL_PIN(PIN_BOOT_SEL0, "boot_sel0"),
+ PINCTRL_PIN(PIN_BOOT_SEL1, "boot_sel1"),
+ PINCTRL_PIN(PIN_BOOT_SEL2, "boot_sel2"),
+ PINCTRL_PIN(PIN_BOOT_SEL3, "boot_sel3"),
+ PINCTRL_PIN(PIN_BOOT_SEL4, "boot_sel4"),
+ PINCTRL_PIN(PIN_BOOT_SEL5, "boot_sel5"),
+ PINCTRL_PIN(PIN_BOOT_SEL6, "boot_sel6"),
+ PINCTRL_PIN(PIN_BOOT_SEL7, "boot_sel7"),
+ PINCTRL_PIN(PIN_MULTI_SCKT, "multi_sckt"),
+ PINCTRL_PIN(PIN_SCKT_ID0, "sckt_id0"),
+ PINCTRL_PIN(PIN_SCKT_ID1, "sckt_id1"),
+ PINCTRL_PIN(PIN_PLL_CLK_IN_MAIN, "pll_clk_in_main"),
+ PINCTRL_PIN(PIN_PLL_CLK_IN_DDR_0, "pll_clk_in_ddr_0"),
+ PINCTRL_PIN(PIN_PLL_CLK_IN_DDR_1, "pll_clk_in_ddr_1"),
+ PINCTRL_PIN(PIN_PLL_CLK_IN_DDR_2, "pll_clk_in_ddr_2"),
+ PINCTRL_PIN(PIN_PLL_CLK_IN_DDR_3, "pll_clk_in_ddr_3"),
+ PINCTRL_PIN(PIN_XTAL_32K, "xtal_32k"),
+ PINCTRL_PIN(PIN_SYS_RST, "sys_rst"),
+ PINCTRL_PIN(PIN_PWR_BUTTON, "pwr_button"),
+ PINCTRL_PIN(PIN_TEST_EN, "test_en"),
+ PINCTRL_PIN(PIN_TEST_MODE_MBIST, "test_mode_mbist"),
+ PINCTRL_PIN(PIN_TEST_MODE_SCAN, "test_mode_scan"),
+ PINCTRL_PIN(PIN_TEST_MODE_BSD, "test_mode_bsd"),
+ PINCTRL_PIN(PIN_BISR_BYP, "bisr_byp"),
+};
+
+static const struct sg2042_pin sg2044_pin_data[ARRAY_SIZE(sg2044_pins)] = {
+ SG2042_GENERAL_PIN(PIN_IIC0_SMBSUS_IN, 0x000,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_IIC0_SMBSUS_OUT, 0x000,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_IIC0_SMBALERT, 0x004,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_IIC1_SMBSUS_IN, 0x004,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_IIC1_SMBSUS_OUT, 0x008,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_IIC1_SMBALERT, 0x008,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_IIC2_SMBSUS_IN, 0x00c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_IIC2_SMBSUS_OUT, 0x00c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_IIC2_SMBALERT, 0x010,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_IIC3_SMBSUS_IN, 0x010,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_IIC3_SMBSUS_OUT, 0x014,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_IIC3_SMBALERT, 0x014,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE0_L0_RESET, 0x018,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE0_L1_RESET, 0x018,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE0_L0_WAKEUP, 0x01c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE0_L1_WAKEUP, 0x01c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE0_L0_CLKREQ_IN, 0x020,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE0_L1_CLKREQ_IN, 0x020,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE1_L0_RESET, 0x024,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE1_L1_RESET, 0x024,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE1_L0_WAKEUP, 0x028,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE1_L1_WAKEUP, 0x028,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE1_L0_CLKREQ_IN, 0x02c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE1_L1_CLKREQ_IN, 0x02c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE2_L0_RESET, 0x030,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE2_L1_RESET, 0x030,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE2_L0_WAKEUP, 0x034,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE2_L1_WAKEUP, 0x034,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE2_L0_CLKREQ_IN, 0x038,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE2_L1_CLKREQ_IN, 0x038,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE3_L0_RESET, 0x03c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE3_L1_RESET, 0x03c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE3_L0_WAKEUP, 0x040,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE3_L1_WAKEUP, 0x040,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE3_L0_CLKREQ_IN, 0x044,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE3_L1_CLKREQ_IN, 0x044,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE4_L0_RESET, 0x048,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE4_L1_RESET, 0x048,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE4_L0_WAKEUP, 0x04c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE4_L1_WAKEUP, 0x04c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PCIE4_L0_CLKREQ_IN, 0x050,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PCIE4_L1_CLKREQ_IN, 0x050,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SPIF0_CLK_SEL1, 0x054,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SPIF0_CLK_SEL0, 0x054,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SPIF0_WP, 0x058,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SPIF0_HOLD, 0x058,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SPIF0_SDI, 0x05c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SPIF0_CS, 0x05c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SPIF0_SCK, 0x060,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SPIF0_SDO, 0x060,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SPIF1_CLK_SEL1, 0x064,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SPIF1_CLK_SEL0, 0x064,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SPIF1_WP, 0x068,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SPIF1_HOLD, 0x068,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SPIF1_SDI, 0x06c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SPIF1_CS, 0x06c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SPIF1_SCK, 0x070,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SPIF1_SDO, 0x070,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_EMMC_WP, 0x074,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_EMMC_CD, 0x074,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_EMMC_RST, 0x078,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_EMMC_PWR_EN, 0x078,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SDIO_CD, 0x07c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SDIO_WP, 0x07c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SDIO_RST, 0x080,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SDIO_PWR_EN, 0x080,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_TXD0, 0x084,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_TXD1, 0x084,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_TXD2, 0x088,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_TXD3, 0x088,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_TXCTRL, 0x08c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_RXD0, 0x08c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_RXD1, 0x090,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_RXD2, 0x090,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_RXD3, 0x094,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_RXCTRL, 0x094,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_TXC, 0x098,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_RXC, 0x098,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_REFCLKO, 0x09c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_IRQ, 0x09c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_RGMII0_MDC, 0x0a0,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_RGMII0_MDIO, 0x0a0,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PWM0, 0x0a4,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PWM1, 0x0a4,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_PWM2, 0x0a8,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_PWM3, 0x0a8,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_FAN0, 0x0ac,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_FAN1, 0x0ac,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_FAN2, 0x0b0,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_FAN3, 0x0b0,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_IIC0_SDA, 0x0b4,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_IIC0_SCL, 0x0b4,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_IIC1_SDA, 0x0b8,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_IIC1_SCL, 0x0b8,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_IIC2_SDA, 0x0bc,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_IIC2_SCL, 0x0bc,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_IIC3_SDA, 0x0c0,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_IIC3_SCL, 0x0c0,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_UART0_TX, 0x0c4,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_UART0_RX, 0x0c4,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_UART0_RTS, 0x0c8,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_UART0_CTS, 0x0c8,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_UART1_TX, 0x0cc,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_UART1_RX, 0x0cc,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_UART1_RTS, 0x0d0,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_UART1_CTS, 0x0d0,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_UART2_TX, 0x0d4,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_UART2_RX, 0x0d4,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_UART2_RTS, 0x0d8,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_UART2_CTS, 0x0d8,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_UART3_TX, 0x0dc,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_UART3_RX, 0x0dc,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_UART3_RTS, 0x0e0,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_UART3_CTS, 0x0e0,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SPI0_CS0, 0x0e4,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SPI0_CS1, 0x0e4,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SPI0_SDI, 0x0e8,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SPI0_SDO, 0x0e8,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SPI0_SCK, 0x0ec,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SPI1_CS0, 0x0ec,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SPI1_CS1, 0x0f0,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SPI1_SDI, 0x0f0,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_SPI1_SDO, 0x0f4,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_SPI1_SCK, 0x0f4,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_JTAG0_TDO, 0x0f8,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_JTAG0_TCK, 0x0f8,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_JTAG0_TDI, 0x0fc,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_JTAG0_TMS, 0x0fc,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_JTAG0_TRST, 0x100,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_JTAG0_SRST, 0x100,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_JTAG1_TDO, 0x104,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_JTAG1_TCK, 0x104,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_JTAG1_TDI, 0x108,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_JTAG1_TMS, 0x108,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_JTAG1_TRST, 0x10c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_JTAG1_SRST, 0x10c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_JTAG2_TDO, 0x110,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_JTAG2_TCK, 0x110,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_JTAG2_TDI, 0x114,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_JTAG2_TMS, 0x114,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_JTAG2_TRST, 0x118,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_JTAG2_SRST, 0x118,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_JTAG3_TDO, 0x11c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_JTAG3_TCK, 0x11c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_JTAG3_TDI, 0x120,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_JTAG3_TMS, 0x120,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_JTAG3_TRST, 0x124,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_JTAG3_SRST, 0x124,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO0, 0x128,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO1, 0x128,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO2, 0x12c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO3, 0x12c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO4, 0x130,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO5, 0x130,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO6, 0x134,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO7, 0x134,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO8, 0x138,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO9, 0x138,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO10, 0x13c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO11, 0x13c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO12, 0x140,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO13, 0x140,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO14, 0x144,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO15, 0x144,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO16, 0x148,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO17, 0x148,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO18, 0x14c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO19, 0x14c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO20, 0x150,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO21, 0x150,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO22, 0x154,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO23, 0x154,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO24, 0x158,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO25, 0x158,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO26, 0x15c,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO27, 0x15c,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO28, 0x160,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO29, 0x160,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_GPIO30, 0x164,
+ PIN_FLAG_DEFAULT),
+ SG2042_GENERAL_PIN(PIN_GPIO31, 0x164,
+ PIN_FLAG_WRITE_HIGH),
+ SG2042_GENERAL_PIN(PIN_MODE_SEL0, 0x168,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_MODE_SEL1, 0x168,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_MODE_SEL2, 0x16c,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL0, 0x16c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL1, 0x170,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL2, 0x170,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL3, 0x174,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL4, 0x174,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL5, 0x178,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL6, 0x178,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BOOT_SEL7, 0x17c,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_MULTI_SCKT, 0x17c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_SCKT_ID0, 0x180,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_SCKT_ID1, 0x180,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_PLL_CLK_IN_MAIN, 0x184,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_PLL_CLK_IN_DDR_0, 0x184,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_PLL_CLK_IN_DDR_1, 0x188,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_PLL_CLK_IN_DDR_2, 0x188,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_PLL_CLK_IN_DDR_3, 0x18c,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_XTAL_32K, 0x18c,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_SYS_RST, 0x190,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_PWR_BUTTON, 0x190,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_TEST_EN, 0x194,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_TEST_MODE_MBIST, 0x194,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_TEST_MODE_SCAN, 0x198,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_TEST_MODE_BSD, 0x198,
+ PIN_FLAG_WRITE_HIGH | PIN_FLAG_NO_PINMUX |
+ PIN_FLAG_NO_OEX_EN),
+ SG2042_GENERAL_PIN(PIN_BISR_BYP, 0x19c,
+ PIN_FLAG_NO_PINMUX | PIN_FLAG_NO_OEX_EN),
+};
+
+static const struct sophgo_pinctrl_data sg2044_pindata = {
+ .pins = sg2044_pins,
+ .pindata = sg2044_pin_data,
+ .vddio_ops = &sg2044_vddio_cfg_ops,
+ .cfg_ops = &sg2042_cfg_ops,
+ .pctl_ops = &sg2042_pctrl_ops,
+ .pmx_ops = &sg2042_pmx_ops,
+ .pconf_ops = &sg2042_pconf_ops,
+ .npins = ARRAY_SIZE(sg2044_pins),
+ .pinsize = sizeof(struct sg2042_pin),
+};
+
+static const struct of_device_id sg2044_pinctrl_ids[] = {
+ { .compatible = "sophgo,sg2044-pinctrl", .data = &sg2044_pindata },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sg2044_pinctrl_ids);
+
+static struct platform_driver sg2044_pinctrl_driver = {
+ .probe = sophgo_pinctrl_probe,
+ .driver = {
+ .name = "sg2044-pinctrl",
+ .suppress_bind_attrs = true,
+ .of_match_table = sg2044_pinctrl_ids,
+ },
+};
+module_platform_driver(sg2044_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for the SG2002 series SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sophgo/pinctrl-sophgo-common.c b/drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
new file mode 100644
index 000000000000..7f1fd68db19e
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SoCs pinctrl common ops.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ */
+
+#include <linux/bsearch.h>
+#include <linux/cleanup.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "../pinctrl-utils.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+
+#include "pinctrl-sophgo.h"
+
+static u16 sophgo_dt_get_pin(u32 value)
+{
+ return value;
+}
+
+static int sophgo_cmp_pin(const void *key, const void *pivot)
+{
+ const struct sophgo_pin *pin = pivot;
+ int pin_id = (long)key;
+ int pivid = pin->id;
+
+ return pin_id - pivid;
+}
+
+const struct sophgo_pin *sophgo_get_pin(struct sophgo_pinctrl *pctrl,
+ unsigned long pin_id)
+{
+ return bsearch((void *)pin_id, pctrl->data->pindata, pctrl->data->npins,
+ pctrl->data->pinsize, sophgo_cmp_pin);
+}
+
+static int sophgo_verify_pinmux_config(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin_mux_config *config)
+{
+ if (pctrl->data->cfg_ops->verify_pinmux_config)
+ return pctrl->data->cfg_ops->verify_pinmux_config(config);
+ return 0;
+}
+
+static int sophgo_verify_pin_group(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin_mux_config *config,
+ unsigned int npins)
+{
+ if (pctrl->data->cfg_ops->verify_pin_group)
+ return pctrl->data->cfg_ops->verify_pin_group(config, npins);
+ return 0;
+}
+
+static int sophgo_dt_node_to_map_post(struct device_node *cur,
+ struct sophgo_pinctrl *pctrl,
+ struct sophgo_pin_mux_config *config,
+ unsigned int npins)
+{
+ if (pctrl->data->cfg_ops->dt_node_to_map_post)
+ return pctrl->data->cfg_ops->dt_node_to_map_post(cur, pctrl,
+ config, npins);
+ return 0;
+}
+
+int sophgo_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np,
+ struct pinctrl_map **maps, unsigned int *num_maps)
+{
+ struct sophgo_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = pctrl->dev;
+ struct device_node *child;
+ struct pinctrl_map *map;
+ const char **grpnames;
+ const char *grpname;
+ int ngroups = 0;
+ int nmaps = 0;
+ int ret;
+
+ for_each_available_child_of_node(np, child)
+ ngroups += 1;
+
+ grpnames = devm_kcalloc(dev, ngroups, sizeof(*grpnames), GFP_KERNEL);
+ if (!grpnames)
+ return -ENOMEM;
+
+ map = kcalloc(ngroups * 2, sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ ngroups = 0;
+ guard(mutex)(&pctrl->mutex);
+ for_each_available_child_of_node(np, child) {
+ int npins = of_property_count_u32_elems(child, "pinmux");
+ unsigned int *pins;
+ struct sophgo_pin_mux_config *pinmuxs;
+ u32 config;
+ int i;
+
+ if (npins < 1) {
+ dev_err(dev, "invalid pinctrl group %pOFn.%pOFn\n",
+ np, child);
+ ret = -EINVAL;
+ goto dt_failed;
+ }
+
+ grpname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn.%pOFn",
+ np, child);
+ if (!grpname) {
+ ret = -ENOMEM;
+ goto dt_failed;
+ }
+
+ grpnames[ngroups++] = grpname;
+
+ pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins) {
+ ret = -ENOMEM;
+ goto dt_failed;
+ }
+
+ pinmuxs = devm_kcalloc(dev, npins, sizeof(*pinmuxs), GFP_KERNEL);
+ if (!pinmuxs) {
+ ret = -ENOMEM;
+ goto dt_failed;
+ }
+
+ for (i = 0; i < npins; i++) {
+ ret = of_property_read_u32_index(child, "pinmux",
+ i, &config);
+ if (ret)
+ goto dt_failed;
+
+ pins[i] = sophgo_dt_get_pin(config);
+ pinmuxs[i].config = config;
+ pinmuxs[i].pin = sophgo_get_pin(pctrl, pins[i]);
+
+ if (!pinmuxs[i].pin) {
+ dev_err(dev, "failed to get pin %d\n", pins[i]);
+ ret = -ENODEV;
+ goto dt_failed;
+ }
+
+ ret = sophgo_verify_pinmux_config(pctrl, &pinmuxs[i]);
+ if (ret) {
+ dev_err(dev, "group %s pin %d is invalid\n",
+ grpname, i);
+ goto dt_failed;
+ }
+ }
+
+ ret = sophgo_verify_pin_group(pctrl, pinmuxs, npins);
+ if (ret) {
+ dev_err(dev, "group %s is invalid\n", grpname);
+ goto dt_failed;
+ }
+
+ ret = sophgo_dt_node_to_map_post(child, pctrl, pinmuxs, npins);
+ if (ret)
+ goto dt_failed;
+
+ map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
+ map[nmaps].data.mux.function = np->name;
+ map[nmaps].data.mux.group = grpname;
+ nmaps += 1;
+
+ ret = pinconf_generic_parse_dt_config(child, pctldev,
+ &map[nmaps].data.configs.configs,
+ &map[nmaps].data.configs.num_configs);
+ if (ret) {
+ dev_err(dev, "failed to parse pin config of group %s: %d\n",
+ grpname, ret);
+ goto dt_failed;
+ }
+
+ ret = pinctrl_generic_add_group(pctldev, grpname,
+ pins, npins, pinmuxs);
+ if (ret < 0) {
+ dev_err(dev, "failed to add group %s: %d\n", grpname, ret);
+ goto dt_failed;
+ }
+
+ /* don't create a map if there are no pinconf settings */
+ if (map[nmaps].data.configs.num_configs == 0)
+ continue;
+
+ map[nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ map[nmaps].data.configs.group_or_pin = grpname;
+ nmaps += 1;
+ }
+
+ ret = pinmux_generic_add_function(pctldev, np->name,
+ grpnames, ngroups, NULL);
+ if (ret < 0) {
+ dev_err(dev, "error adding function %s: %d\n", np->name, ret);
+ goto function_failed;
+ }
+
+ *maps = map;
+ *num_maps = nmaps;
+
+ return 0;
+
+dt_failed:
+ of_node_put(child);
+function_failed:
+ pinctrl_utils_free_map(pctldev, map, nmaps);
+ return ret;
+}
+
+int sophgo_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int fsel, unsigned int gsel)
+{
+ struct sophgo_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct group_desc *group;
+ const struct sophgo_pin_mux_config *configs;
+ unsigned int i;
+
+ group = pinctrl_generic_get_group(pctldev, gsel);
+ if (!group)
+ return -EINVAL;
+
+ configs = group->data;
+
+ for (i = 0; i < group->grp.npins; i++) {
+ const struct sophgo_pin *pin = configs[i].pin;
+ u32 value = configs[i].config;
+
+ guard(raw_spinlock_irqsave)(&pctrl->lock);
+
+ pctrl->data->cfg_ops->set_pinmux_config(pctrl, pin, value);
+ }
+
+ return 0;
+}
+
+static int sophgo_pin_set_config(struct sophgo_pinctrl *pctrl,
+ unsigned int pin_id,
+ u32 value, u32 mask)
+{
+ const struct sophgo_pin *pin = sophgo_get_pin(pctrl, pin_id);
+
+ if (!pin)
+ return -EINVAL;
+
+ guard(raw_spinlock_irqsave)(&pctrl->lock);
+
+ return pctrl->data->cfg_ops->set_pinconf_config(pctrl, pin, value, mask);
+}
+
+int sophgo_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct sophgo_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct sophgo_pin *pin = sophgo_get_pin(pctrl, pin_id);
+ u32 value, mask;
+
+ if (!pin)
+ return -ENODEV;
+
+ if (pctrl->data->cfg_ops->compute_pinconf_config(pctrl, pin,
+ configs, num_configs,
+ &value, &mask))
+ return -ENOTSUPP;
+
+ return sophgo_pin_set_config(pctrl, pin_id, value, mask);
+}
+
+int sophgo_pconf_group_set(struct pinctrl_dev *pctldev, unsigned int gsel,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct sophgo_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct group_desc *group;
+ const struct sophgo_pin_mux_config *pinmuxs;
+ u32 value, mask;
+ int i;
+
+ group = pinctrl_generic_get_group(pctldev, gsel);
+ if (!group)
+ return -EINVAL;
+
+ pinmuxs = group->data;
+
+ if (pctrl->data->cfg_ops->compute_pinconf_config(pctrl, pinmuxs[0].pin,
+ configs, num_configs,
+ &value, &mask))
+ return -ENOTSUPP;
+
+ for (i = 0; i < group->grp.npins; i++)
+ sophgo_pin_set_config(pctrl, group->grp.pins[i], value, mask);
+
+ return 0;
+}
+
+u32 sophgo_pinctrl_typical_pull_down(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *pin,
+ const u32 *power_cfg)
+{
+ return pctrl->data->vddio_ops->get_pull_down(pin, power_cfg);
+}
+
+u32 sophgo_pinctrl_typical_pull_up(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *pin,
+ const u32 *power_cfg)
+{
+ return pctrl->data->vddio_ops->get_pull_up(pin, power_cfg);
+}
+
+int sophgo_pinctrl_oc2reg(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *pin,
+ const u32 *power_cfg, u32 target)
+{
+ const u32 *map;
+ int i, len;
+
+ if (!pctrl->data->vddio_ops->get_oc_map)
+ return -ENOTSUPP;
+
+ len = pctrl->data->vddio_ops->get_oc_map(pin, power_cfg, &map);
+ if (len < 0)
+ return len;
+
+ for (i = 0; i < len; i++) {
+ if (map[i] >= target)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+int sophgo_pinctrl_reg2oc(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *pin,
+ const u32 *power_cfg, u32 reg)
+{
+ const u32 *map;
+ int len;
+
+ if (!pctrl->data->vddio_ops->get_oc_map)
+ return -ENOTSUPP;
+
+ len = pctrl->data->vddio_ops->get_oc_map(pin, power_cfg, &map);
+ if (len < 0)
+ return len;
+
+ if (reg >= len)
+ return -EINVAL;
+
+ return map[reg];
+}
+
+int sophgo_pinctrl_schmitt2reg(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *pin,
+ const u32 *power_cfg, u32 target)
+{
+ const u32 *map;
+ int i, len;
+
+ if (!pctrl->data->vddio_ops->get_schmitt_map)
+ return -ENOTSUPP;
+
+ len = pctrl->data->vddio_ops->get_schmitt_map(pin, power_cfg, &map);
+ if (len < 0)
+ return len;
+
+ for (i = 0; i < len; i++) {
+ if (map[i] == target)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+int sophgo_pinctrl_reg2schmitt(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *pin,
+ const u32 *power_cfg, u32 reg)
+{
+ const u32 *map;
+ int len;
+
+ if (!pctrl->data->vddio_ops->get_schmitt_map)
+ return -ENOTSUPP;
+
+ len = pctrl->data->vddio_ops->get_schmitt_map(pin, power_cfg, &map);
+ if (len < 0)
+ return len;
+
+ if (reg >= len)
+ return -EINVAL;
+
+ return map[reg];
+}
+
+int sophgo_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sophgo_pinctrl *pctrl;
+ const struct sophgo_pinctrl_data *pctrl_data;
+ int ret;
+
+ pctrl_data = device_get_match_data(dev);
+ if (!pctrl_data)
+ return -ENODEV;
+
+ if (pctrl_data->npins == 0)
+ return dev_err_probe(dev, -EINVAL, "invalid pin data\n");
+
+ pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->pdesc.name = dev_name(dev);
+ pctrl->pdesc.pins = pctrl_data->pins;
+ pctrl->pdesc.npins = pctrl_data->npins;
+ pctrl->pdesc.pctlops = pctrl_data->pctl_ops;
+ pctrl->pdesc.pmxops = pctrl_data->pmx_ops;
+ pctrl->pdesc.confops = pctrl_data->pconf_ops;
+ pctrl->pdesc.owner = THIS_MODULE;
+
+ pctrl->data = pctrl_data;
+ pctrl->dev = dev;
+ raw_spin_lock_init(&pctrl->lock);
+ mutex_init(&pctrl->mutex);
+
+ ret = pctrl->data->cfg_ops->pctrl_init(pdev, pctrl);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pctrl);
+
+ ret = devm_pinctrl_register_and_init(dev, &pctrl->pdesc,
+ pctrl, &pctrl->pctrl_dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "fail to register pinctrl driver\n");
+
+ return pinctrl_enable(pctrl->pctrl_dev);
+}
+EXPORT_SYMBOL_GPL(sophgo_pinctrl_probe);
+
+MODULE_DESCRIPTION("Common pinctrl helper function for the Sophgo SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sophgo/pinctrl-sophgo.h b/drivers/pinctrl/sophgo/pinctrl-sophgo.h
new file mode 100644
index 000000000000..4cd9b5484894
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-sophgo.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#ifndef _PINCTRL_SOPHGO_H
+#define _PINCTRL_SOPHGO_H
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include "../core.h"
+
+struct sophgo_pinctrl;
+
+struct sophgo_pin {
+ u16 id;
+ u16 flags;
+};
+
+struct sophgo_pin_mux_config {
+ const struct sophgo_pin *pin;
+ u32 config;
+};
+
+/**
+ * struct sophgo_cfg_ops - pin configuration operations
+ *
+ * @pctrl_init: soc specific init callback
+ * @verify_pinmux_config: verify the pinmux config for a pin
+ * @verify_pin_group: verify the whole pinmux group
+ * @dt_node_to_map_post: post init for the pinmux config map
+ * @compute_pinconf_config: compute pinconf config
+ * @set_pinconf_config: set pinconf config (the caller holds lock)
+ * @set_pinmux_config: set mux config (the caller holds lock)
+ */
+struct sophgo_cfg_ops {
+ int (*pctrl_init)(struct platform_device *pdev,
+ struct sophgo_pinctrl *pctrl);
+ int (*verify_pinmux_config)(const struct sophgo_pin_mux_config *config);
+ int (*verify_pin_group)(const struct sophgo_pin_mux_config *pinmuxs,
+ unsigned int npins);
+ int (*dt_node_to_map_post)(struct device_node *cur,
+ struct sophgo_pinctrl *pctrl,
+ struct sophgo_pin_mux_config *pinmuxs,
+ unsigned int npins);
+ int (*compute_pinconf_config)(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *sp,
+ unsigned long *configs,
+ unsigned int num_configs,
+ u32 *value, u32 *mask);
+ int (*set_pinconf_config)(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *sp,
+ u32 value, u32 mask);
+ void (*set_pinmux_config)(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *sp, u32 config);
+};
+
+/**
+ * struct sophgo_vddio_cfg_ops - pin vddio operations
+ *
+ * @get_pull_up: get resistor for pull up;
+ * @get_pull_down: get resistor for pull down.
+ * @get_oc_map: get mapping for typical low level output current value to
+ * register value map.
+ * @get_schmitt_map: get mapping for register value to typical schmitt
+ * threshold.
+ */
+struct sophgo_vddio_cfg_ops {
+ int (*get_pull_up)(const struct sophgo_pin *pin, const u32 *psmap);
+ int (*get_pull_down)(const struct sophgo_pin *pin, const u32 *psmap);
+ int (*get_oc_map)(const struct sophgo_pin *pin, const u32 *psmap,
+ const u32 **map);
+ int (*get_schmitt_map)(const struct sophgo_pin *pin, const u32 *psmap,
+ const u32 **map);
+};
+
+struct sophgo_pinctrl_data {
+ const struct pinctrl_pin_desc *pins;
+ const void *pindata;
+ const char * const *pdnames;
+ const struct sophgo_vddio_cfg_ops *vddio_ops;
+ const struct sophgo_cfg_ops *cfg_ops;
+ const struct pinctrl_ops *pctl_ops;
+ const struct pinmux_ops *pmx_ops;
+ const struct pinconf_ops *pconf_ops;
+ u16 npins;
+ u16 npds;
+ u16 pinsize;
+};
+
+struct sophgo_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctrl_dev;
+ const struct sophgo_pinctrl_data *data;
+ struct pinctrl_desc pdesc;
+
+ struct mutex mutex;
+ raw_spinlock_t lock;
+ void *priv_ctrl;
+};
+
+const struct sophgo_pin *sophgo_get_pin(struct sophgo_pinctrl *pctrl,
+ unsigned long pin_id);
+int sophgo_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np,
+ struct pinctrl_map **maps, unsigned int *num_maps);
+int sophgo_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int fsel, unsigned int gsel);
+int sophgo_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
+ unsigned long *configs, unsigned int num_configs);
+int sophgo_pconf_group_set(struct pinctrl_dev *pctldev, unsigned int gsel,
+ unsigned long *configs, unsigned int num_configs);
+u32 sophgo_pinctrl_typical_pull_down(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *pin,
+ const u32 *power_cfg);
+u32 sophgo_pinctrl_typical_pull_up(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *pin,
+ const u32 *power_cfg);
+int sophgo_pinctrl_oc2reg(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *pin,
+ const u32 *power_cfg, u32 target);
+int sophgo_pinctrl_reg2oc(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *pin,
+ const u32 *power_cfg, u32 reg);
+int sophgo_pinctrl_schmitt2reg(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *pin,
+ const u32 *power_cfg, u32 target);
+int sophgo_pinctrl_reg2schmitt(struct sophgo_pinctrl *pctrl,
+ const struct sophgo_pin *pin,
+ const u32 *power_cfg, u32 reg);
+int sophgo_pinctrl_probe(struct platform_device *pdev);
+
+#endif /* _PINCTRL_SOPHGO_H */
diff --git a/drivers/pinctrl/spacemit/Kconfig b/drivers/pinctrl/spacemit/Kconfig
index 168f8a5ffbb9..d6f6017fd097 100644
--- a/drivers/pinctrl/spacemit/Kconfig
+++ b/drivers/pinctrl/spacemit/Kconfig
@@ -4,9 +4,10 @@
#
config PINCTRL_SPACEMIT_K1
- tristate "SpacemiT K1 SoC Pinctrl driver"
+ bool "SpacemiT K1 SoC Pinctrl driver"
depends on ARCH_SPACEMIT || COMPILE_TEST
depends on OF
+ default ARCH_SPACEMIT
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/spacemit/pinctrl-k1.c b/drivers/pinctrl/spacemit/pinctrl-k1.c
index a32579d73613..67e867b04a02 100644
--- a/drivers/pinctrl/spacemit/pinctrl-k1.c
+++ b/drivers/pinctrl/spacemit/pinctrl-k1.c
@@ -9,6 +9,7 @@
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
@@ -749,7 +750,10 @@ static int spacemit_pinctrl_probe(struct platform_device *pdev)
pctrl->data = pctrl_data;
pctrl->dev = dev;
raw_spin_lock_init(&pctrl->lock);
- mutex_init(&pctrl->mutex);
+
+ ret = devm_mutex_init(dev, &pctrl->mutex);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, pctrl);
@@ -1044,7 +1048,7 @@ static struct platform_driver k1_pinctrl_driver = {
.of_match_table = k1_pinctrl_ids,
},
};
-module_platform_driver(k1_pinctrl_driver);
+builtin_platform_driver(k1_pinctrl_driver);
MODULE_AUTHOR("Yixun Lan <dlan@gentoo.org>");
MODULE_DESCRIPTION("Pinctrl driver for the SpacemiT K1 SoC");
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index a78fdbbdfc0c..dc62eba96348 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -131,4 +131,14 @@ config PINCTRL_SUN50I_H616_R
default ARM64 && ARCH_SUNXI
select PINCTRL_SUNXI
+config PINCTRL_SUN55I_A523
+ bool "Support for the Allwinner A523 PIO"
+ default ARM64 && ARCH_SUNXI
+ select PINCTRL_SUNXI
+
+config PINCTRL_SUN55I_A523_R
+ bool "Support for the Allwinner A523 R-PIO"
+ default ARM64 && ARCH_SUNXI
+ select PINCTRL_SUNXI
+
endif
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index 2ff5a55927ad..951b3f1e4b4f 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Core
obj-y += pinctrl-sunxi.o
+obj-y += pinctrl-sunxi-dt.o
# SoC Drivers
obj-$(CONFIG_PINCTRL_SUNIV_F1C100S) += pinctrl-suniv-f1c100s.o
@@ -26,5 +27,7 @@ obj-$(CONFIG_PINCTRL_SUN50I_H6) += pinctrl-sun50i-h6.o
obj-$(CONFIG_PINCTRL_SUN50I_H6_R) += pinctrl-sun50i-h6-r.o
obj-$(CONFIG_PINCTRL_SUN50I_H616) += pinctrl-sun50i-h616.o
obj-$(CONFIG_PINCTRL_SUN50I_H616_R) += pinctrl-sun50i-h616-r.o
+obj-$(CONFIG_PINCTRL_SUN55I_A523) += pinctrl-sun55i-a523.o
+obj-$(CONFIG_PINCTRL_SUN55I_A523_R) += pinctrl-sun55i-a523-r.o
obj-$(CONFIG_PINCTRL_SUN9I_A80) += pinctrl-sun9i-a80.o
obj-$(CONFIG_PINCTRL_SUN9I_A80_R) += pinctrl-sun9i-a80-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c b/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c
index 8e2aab542fcf..8efe35b77af4 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c
@@ -820,15 +820,13 @@ static const struct sunxi_pinctrl_desc d1_pinctrl_data = {
static int d1_pinctrl_probe(struct platform_device *pdev)
{
- unsigned long variant = (unsigned long)of_device_get_match_data(&pdev->dev);
-
- return sunxi_pinctrl_init_with_variant(pdev, &d1_pinctrl_data, variant);
+ return sunxi_pinctrl_init_with_flags(pdev, &d1_pinctrl_data,
+ SUNXI_PINCTRL_NEW_REG_LAYOUT);
}
static const struct of_device_id d1_pinctrl_match[] = {
{
.compatible = "allwinner,sun20i-d1-pinctrl",
- .data = (void *)PINCTRL_SUN20I_D1
},
{}
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index fa47fe36ee5b..b2e82bf927b3 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -17,6 +17,10 @@
#include "pinctrl-sunxi.h"
+#define PINCTRL_SUN4I_A10 BIT(0)
+#define PINCTRL_SUN7I_A20 BIT(1)
+#define PINCTRL_SUN8I_R40 BIT(2)
+
static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -1295,8 +1299,8 @@ static int sun4i_a10_pinctrl_probe(struct platform_device *pdev)
{
unsigned long variant = (unsigned long)of_device_get_match_data(&pdev->dev);
- return sunxi_pinctrl_init_with_variant(pdev, &sun4i_a10_pinctrl_data,
- variant);
+ return sunxi_pinctrl_init_with_flags(pdev, &sun4i_a10_pinctrl_data,
+ variant);
}
static const struct of_device_id sun4i_a10_pinctrl_match[] = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun55i-a523-r.c b/drivers/pinctrl/sunxi/pinctrl-sun55i-a523-r.c
new file mode 100644
index 000000000000..69cd2b4ebd7d
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun55i-a523-r.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner A523 SoC r-pinctrl driver.
+ *
+ * Copyright (C) 2024 Arm Ltd.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const u8 a523_r_nr_bank_pins[SUNXI_PINCTRL_MAX_BANKS] =
+/* PL PM */
+ { 14, 6 };
+
+static const unsigned int a523_r_irq_bank_map[] = { 0, 1 };
+
+static const u8 a523_r_irq_bank_muxes[SUNXI_PINCTRL_MAX_BANKS] =
+/* PL PM */
+ { 14, 14 };
+
+static struct sunxi_pinctrl_desc a523_r_pinctrl_data = {
+ .irq_banks = ARRAY_SIZE(a523_r_irq_bank_map),
+ .irq_bank_map = a523_r_irq_bank_map,
+ .irq_read_needs_mux = true,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+ .pin_base = PL_BASE,
+};
+
+static int a523_r_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_dt_table_init(pdev, a523_r_nr_bank_pins,
+ a523_r_irq_bank_muxes,
+ &a523_r_pinctrl_data,
+ SUNXI_PINCTRL_NEW_REG_LAYOUT);
+}
+
+static const struct of_device_id a523_r_pinctrl_match[] = {
+ { .compatible = "allwinner,sun55i-a523-r-pinctrl", },
+ {}
+};
+
+static struct platform_driver a523_r_pinctrl_driver = {
+ .probe = a523_r_pinctrl_probe,
+ .driver = {
+ .name = "sun55i-a523-r-pinctrl",
+ .of_match_table = a523_r_pinctrl_match,
+ },
+};
+builtin_platform_driver(a523_r_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun55i-a523.c b/drivers/pinctrl/sunxi/pinctrl-sun55i-a523.c
new file mode 100644
index 000000000000..7d2308c37d29
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun55i-a523.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner A523 SoC pinctrl driver.
+ *
+ * Copyright (C) 2023 Arm Ltd.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const u8 a523_nr_bank_pins[SUNXI_PINCTRL_MAX_BANKS] =
+/* PA PB PC PD PE PF PG PH PI PJ PK */
+ { 0, 15, 17, 24, 16, 7, 15, 20, 17, 28, 24 };
+
+static const unsigned int a523_irq_bank_map[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
+
+static const u8 a523_irq_bank_muxes[SUNXI_PINCTRL_MAX_BANKS] =
+/* PA PB PC PD PE PF PG PH PI PJ PK */
+ { 0, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14};
+
+static struct sunxi_pinctrl_desc a523_pinctrl_data = {
+ .irq_banks = ARRAY_SIZE(a523_irq_bank_map),
+ .irq_bank_map = a523_irq_bank_map,
+ .irq_read_needs_mux = true,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+};
+
+static int a523_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_dt_table_init(pdev, a523_nr_bank_pins,
+ a523_irq_bank_muxes,
+ &a523_pinctrl_data,
+ SUNXI_PINCTRL_NEW_REG_LAYOUT |
+ SUNXI_PINCTRL_ELEVEN_BANKS);
+}
+
+static const struct of_device_id a523_pinctrl_match[] = {
+ { .compatible = "allwinner,sun55i-a523-pinctrl", },
+ {}
+};
+
+static struct platform_driver a523_pinctrl_driver = {
+ .probe = a523_pinctrl_probe,
+ .driver = {
+ .name = "sun55i-a523-pinctrl",
+ .of_match_table = a523_pinctrl_match,
+ },
+};
+builtin_platform_driver(a523_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i.c b/drivers/pinctrl/sunxi/pinctrl-sun5i.c
index 06ecb121c827..6eef314c9377 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i.c
@@ -16,6 +16,10 @@
#include "pinctrl-sunxi.h"
+#define PINCTRL_SUN5I_A10S BIT(0)
+#define PINCTRL_SUN5I_A13 BIT(1)
+#define PINCTRL_SUN5I_GR8 BIT(2)
+
static const struct sunxi_desc_pin sun5i_pins[] = {
SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 0),
PINCTRL_SUN5I_A10S,
@@ -719,8 +723,8 @@ static int sun5i_pinctrl_probe(struct platform_device *pdev)
{
unsigned long variant = (unsigned long)of_device_get_match_data(&pdev->dev);
- return sunxi_pinctrl_init_with_variant(pdev, &sun5i_pinctrl_data,
- variant);
+ return sunxi_pinctrl_init_with_flags(pdev, &sun5i_pinctrl_data,
+ variant);
}
static const struct of_device_id sun5i_pinctrl_match[] = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
index 82ac064931df..8d8c92ce41cf 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
@@ -17,6 +17,9 @@
#include "pinctrl-sunxi.h"
+#define PINCTRL_SUN6I_A31 BIT(0)
+#define PINCTRL_SUN6I_A31S BIT(1)
+
static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -972,9 +975,8 @@ static int sun6i_a31_pinctrl_probe(struct platform_device *pdev)
unsigned long variant =
(unsigned long)of_device_get_match_data(&pdev->dev);
- return sunxi_pinctrl_init_with_variant(pdev,
- &sun6i_a31_pinctrl_data,
- variant);
+ return sunxi_pinctrl_init_with_flags(pdev, &sun6i_a31_pinctrl_data,
+ variant);
}
static const struct of_device_id sun6i_a31_pinctrl_match[] = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c
index 49c9a0b6a0eb..696d7dd8d87b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c
@@ -22,6 +22,9 @@
#include "pinctrl-sunxi.h"
+#define PINCTRL_SUN8I_V3 BIT(0)
+#define PINCTRL_SUN8I_V3S BIT(1)
+
static const struct sunxi_desc_pin sun8i_v3s_pins[] = {
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
@@ -552,8 +555,8 @@ static int sun8i_v3s_pinctrl_probe(struct platform_device *pdev)
{
unsigned long variant = (unsigned long)of_device_get_match_data(&pdev->dev);
- return sunxi_pinctrl_init_with_variant(pdev, &sun8i_v3s_pinctrl_data,
- variant);
+ return sunxi_pinctrl_init_with_flags(pdev, &sun8i_v3s_pinctrl_data,
+ variant);
}
static const struct of_device_id sun8i_v3s_pinctrl_match[] = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c b/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
new file mode 100644
index 000000000000..1833078f6877
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2025 Arm Ltd.
+ *
+ * Generic DT driven Allwinner pinctrl driver routines.
+ * Builds the pin tables from minimal driver information and pin groups
+ * described in the DT. Then hands those tables of to the traditional
+ * sunxi pinctrl driver.
+ * sunxi_pinctrl_init() expects a table like shown below, previously spelled
+ * out in a per-SoC .c file. This code generates this table, like so:
+ *
+ * SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1), // code iterates over every implemented
+ * // pin, based on pins_per_bank[] array passed in
+ *
+ * SUNXI_FUNCTION(0x0, "gpio_in"), // always added, for every pin
+ * SUNXI_FUNCTION(0x1, "gpio_out"), // always added, for every pin
+ *
+ * SUNXI_FUNCTION(0x2, "mmc0"), // based on pingroup found in DT:
+ * // mmc0-pins {
+ * // pins = "PF0", "PF1", ...
+ * // function = "mmc0";
+ * // allwinner,pinmux = <2>;
+ *
+ * SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 1)), // derived by irq_bank_muxes[]
+ * // array passed in
+ */
+
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pinctrl-sunxi.h"
+
+#define INVALID_MUX 0xff
+
+/*
+ * Return the "index"th element from the "allwinner,pinmux" property. If the
+ * property does not hold enough entries, return the last one instead.
+ * For almost every group the pinmux value is actually the same, so this
+ * allows to just list one value in the property.
+ */
+static u8 sunxi_pinctrl_dt_read_pinmux(const struct device_node *node,
+ int index)
+{
+ int ret, num_elems;
+ u32 value;
+
+ num_elems = of_property_count_u32_elems(node, "allwinner,pinmux");
+ if (num_elems <= 0)
+ return INVALID_MUX;
+
+ if (index >= num_elems)
+ index = num_elems - 1;
+
+ ret = of_property_read_u32_index(node, "allwinner,pinmux", index,
+ &value);
+ if (ret)
+ return INVALID_MUX;
+
+ return value;
+}
+
+/*
+ * Allocate a table with a sunxi_desc_pin structure for every pin needed.
+ * Fills in the respective pin names ("PA0") and their pin numbers.
+ * Returns the pins array. We cannot use the member in *desc yet, as this
+ * is marked as const, and we will need to change the array still.
+ */
+static struct sunxi_desc_pin *init_pins_table(struct device *dev,
+ const u8 *pins_per_bank,
+ struct sunxi_pinctrl_desc *desc)
+{
+ struct sunxi_desc_pin *pins, *cur_pin;
+ int name_size = 0;
+ int port_base = desc->pin_base / PINS_PER_BANK;
+ char *pin_names, *cur_name;
+ int i, j;
+
+ /*
+ * Find the total number of pins.
+ * Also work out how much memory we need to store all the pin names.
+ */
+ for (i = 0; i < SUNXI_PINCTRL_MAX_BANKS; i++) {
+ desc->npins += pins_per_bank[i];
+ if (pins_per_bank[i] < 10) {
+ /* 4 bytes for "PXy\0" */
+ name_size += pins_per_bank[i] * 4;
+ } else {
+ /* 4 bytes for each "PXy\0" */
+ name_size += 10 * 4;
+
+ /* 5 bytes for each "PXyy\0" */
+ name_size += (pins_per_bank[i] - 10) * 5;
+ }
+ }
+
+ if (desc->npins == 0) {
+ dev_err(dev, "no ports defined\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pins = devm_kzalloc(dev, desc->npins * sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate memory to store the name for every pin. */
+ pin_names = devm_kmalloc(dev, name_size, GFP_KERNEL);
+ if (!pin_names)
+ return ERR_PTR(-ENOMEM);
+
+ /* Fill the pins array with the name and the number for each pin. */
+ cur_name = pin_names;
+ cur_pin = pins;
+ for (i = 0; i < SUNXI_PINCTRL_MAX_BANKS; i++) {
+ for (j = 0; j < pins_per_bank[i]; j++, cur_pin++) {
+ int nchars = sprintf(cur_name, "P%c%d",
+ port_base + 'A' + i, j);
+
+ cur_pin->pin.number = (port_base + i) * PINS_PER_BANK + j;
+ cur_pin->pin.name = cur_name;
+ cur_name += nchars + 1;
+ }
+ }
+
+ return pins;
+}
+
+/*
+ * Work out the number of functions for each pin. This will visit every
+ * child node of the pinctrl DT node to find all advertised functions.
+ * Provide memory to hold the per-function information and assign it to
+ * the pin table.
+ * Fill in the GPIO in/out functions already (that every pin has), also add
+ * an "irq" function at the end, for those pins in IRQ-capable ports.
+ * We do not fill in the extra functions (those describe in DT nodes) yet.
+ * We (ab)use the "variant" member in each pin to keep track of the number of
+ * extra functions needed. At the end this will get reset to 2, so that we
+ * can add extra function later, after the two GPIO functions.
+ */
+static int prepare_function_table(struct device *dev, struct device_node *pnode,
+ struct sunxi_desc_pin *pins, int npins,
+ const u8 *irq_bank_muxes)
+{
+ struct device_node *node;
+ struct property *prop;
+ struct sunxi_desc_function *func;
+ int num_funcs, irq_bank, last_bank, i;
+
+ /*
+ * We need at least three functions per pin:
+ * - one for GPIO in
+ * - one for GPIO out
+ * - one for the sentinel signalling the end of the list
+ */
+ num_funcs = 3 * npins;
+
+ /*
+ * Add a function for each pin in a bank supporting interrupts.
+ * We temporarily (ab)use the variant field to store the number of
+ * functions per pin. This will be cleaned back to 0 before we hand
+ * over the whole structure to the generic sunxi pinctrl setup code.
+ */
+ for (i = 0; i < npins; i++) {
+ struct sunxi_desc_pin *pin = &pins[i];
+ int bank = pin->pin.number / PINS_PER_BANK;
+
+ if (irq_bank_muxes[bank]) {
+ pin->variant++;
+ num_funcs++;
+ }
+ }
+
+ /*
+ * Go over each pin group (every child of the pinctrl DT node) and
+ * add the number of special functions each pins has. Also update the
+ * total number of functions required.
+ * We might slightly overshoot here in case of double definitions.
+ */
+ for_each_child_of_node(pnode, node) {
+ const char *name;
+
+ of_property_for_each_string(node, "pins", prop, name) {
+ for (i = 0; i < npins; i++) {
+ if (strcmp(pins[i].pin.name, name))
+ continue;
+
+ pins[i].variant++;
+ num_funcs++;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Allocate the memory needed for the functions in one table.
+ * We later use pointers into this table to mark each pin.
+ */
+ func = devm_kzalloc(dev, num_funcs * sizeof(*func), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ /*
+ * Assign the function's memory and fill in GPIOs, IRQ and a sentinel.
+ * The extra functions will be filled in later.
+ */
+ irq_bank = 0;
+ last_bank = 0;
+ for (i = 0; i < npins; i++) {
+ struct sunxi_desc_pin *pin = &pins[i];
+ int bank = pin->pin.number / PINS_PER_BANK;
+ int lastfunc = pin->variant + 1;
+ int irq_mux = irq_bank_muxes[bank];
+
+ func[0].name = "gpio_in";
+ func[0].muxval = 0;
+ func[1].name = "gpio_out";
+ func[1].muxval = 1;
+
+ if (irq_mux) {
+ if (bank > last_bank)
+ irq_bank++;
+ func[lastfunc].muxval = irq_mux;
+ func[lastfunc].irqbank = irq_bank;
+ func[lastfunc].irqnum = pin->pin.number % PINS_PER_BANK;
+ func[lastfunc].name = "irq";
+ }
+
+ if (bank > last_bank)
+ last_bank = bank;
+
+ pin->functions = func;
+
+ /* Skip over the other needed functions and the sentinel. */
+ func += pin->variant + 3;
+
+ /*
+ * Reset the value for filling in the remaining functions
+ * behind the GPIOs later.
+ */
+ pin->variant = 2;
+ }
+
+ return 0;
+}
+
+/*
+ * Iterate over all pins in a single group and add the function name and its
+ * mux value to the respective pin.
+ * The "variant" member is again used to temporarily track the number of
+ * already added functions.
+ */
+static void fill_pin_function(struct device *dev, struct device_node *node,
+ struct sunxi_desc_pin *pins, int npins)
+{
+ const char *name, *funcname;
+ struct sunxi_desc_function *func;
+ struct property *prop;
+ int pin, i, index;
+ u8 muxval;
+
+ if (of_property_read_string(node, "function", &funcname)) {
+ dev_warn(dev, "missing \"function\" property\n");
+ return;
+ }
+
+ index = 0;
+ of_property_for_each_string(node, "pins", prop, name) {
+ /* Find the index of this pin in our table. */
+ for (pin = 0; pin < npins; pin++)
+ if (!strcmp(pins[pin].pin.name, name))
+ break;
+ if (pin == npins) {
+ dev_warn(dev, "%s: cannot find pin %s\n",
+ of_node_full_name(node), name);
+ index++;
+ continue;
+ }
+
+ /* Read the associated mux value. */
+ muxval = sunxi_pinctrl_dt_read_pinmux(node, index);
+ if (muxval == INVALID_MUX) {
+ dev_warn(dev, "%s: invalid mux value for pin %s\n",
+ of_node_full_name(node), name);
+ index++;
+ continue;
+ }
+
+ /*
+ * Check for double definitions by comparing the to-be-added
+ * function with already assigned ones.
+ * Ignore identical pairs (function name and mux value the
+ * same), but warn about conflicting assignments.
+ */
+ for (i = 2; i < pins[pin].variant; i++) {
+ func = &pins[pin].functions[i];
+
+ /* Skip over totally unrelated functions. */
+ if (strcmp(func->name, funcname) &&
+ func->muxval != muxval)
+ continue;
+
+ /* Ignore (but skip below) any identical functions. */
+ if (!strcmp(func->name, funcname) &&
+ muxval == func->muxval)
+ break;
+
+ dev_warn(dev,
+ "pin %s: function %s redefined to mux %d\n",
+ name, funcname, muxval);
+ break;
+ }
+
+ /* Skip any pins with that function already assigned. */
+ if (i < pins[pin].variant) {
+ index++;
+ continue;
+ }
+
+ /* Assign function and muxval to the next free slot. */
+ func = &pins[pin].functions[pins[pin].variant];
+ func->muxval = muxval;
+ func->name = funcname;
+
+ pins[pin].variant++;
+ index++;
+ }
+}
+
+/*
+ * Initialise the pinctrl table, by building it from driver provided
+ * information: the number of pins per bank, the IRQ capable banks and their
+ * IRQ mux value.
+ * Then iterate over all pinctrl DT node children to enter the function name
+ * and mux values for each mentioned pin.
+ * At the end hand over this structure to the actual sunxi pinctrl driver.
+ */
+int sunxi_pinctrl_dt_table_init(struct platform_device *pdev,
+ const u8 *pins_per_bank,
+ const u8 *irq_bank_muxes,
+ struct sunxi_pinctrl_desc *desc,
+ unsigned long flags)
+{
+ struct device_node *pnode = pdev->dev.of_node, *node;
+ struct sunxi_desc_pin *pins;
+ int ret, i;
+
+ pins = init_pins_table(&pdev->dev, pins_per_bank, desc);
+ if (IS_ERR(pins))
+ return PTR_ERR(pins);
+
+ ret = prepare_function_table(&pdev->dev, pnode, pins, desc->npins,
+ irq_bank_muxes);
+ if (ret)
+ return ret;
+
+ /*
+ * Now iterate over all groups and add the respective function name
+ * and mux values to each pin listed within.
+ */
+ for_each_child_of_node(pnode, node)
+ fill_pin_function(&pdev->dev, node, pins, desc->npins);
+
+ /* Clear the temporary storage. */
+ for (i = 0; i < desc->npins; i++)
+ pins[i].variant = 0;
+
+ desc->pins = pins;
+
+ return sunxi_pinctrl_init_with_flags(pdev, desc, flags);
+}
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index bde67ee31417..f1c5a991cf7b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -58,13 +58,29 @@ static struct irq_chip sunxi_pinctrl_level_irq_chip;
* The following functions calculate the register and the bit offset to access.
* They take a pin number which is relative to the start of the current device.
*/
+
+/*
+ * When using the extended register layout, Bank K does not fit into the
+ * space used for the other banks. Instead it lives at offset 0x500.
+ */
+static u32 sunxi_bank_offset(const struct sunxi_pinctrl *pctl, u32 pin)
+{
+ u32 offset = 0;
+
+ if (pin >= PK_BASE) {
+ pin -= PK_BASE;
+ offset = PIO_BANK_K_OFFSET;
+ }
+
+ return offset + (pin / PINS_PER_BANK) * pctl->bank_mem_size;
+}
+
static void sunxi_mux_reg(const struct sunxi_pinctrl *pctl,
u32 pin, u32 *reg, u32 *shift, u32 *mask)
{
- u32 bank = pin / PINS_PER_BANK;
u32 offset = pin % PINS_PER_BANK * MUX_FIELD_WIDTH;
- *reg = bank * pctl->bank_mem_size + MUX_REGS_OFFSET +
+ *reg = sunxi_bank_offset(pctl, pin) + MUX_REGS_OFFSET +
offset / BITS_PER_TYPE(u32) * sizeof(u32);
*shift = offset % BITS_PER_TYPE(u32);
*mask = (BIT(MUX_FIELD_WIDTH) - 1) << *shift;
@@ -73,10 +89,9 @@ static void sunxi_mux_reg(const struct sunxi_pinctrl *pctl,
static void sunxi_data_reg(const struct sunxi_pinctrl *pctl,
u32 pin, u32 *reg, u32 *shift, u32 *mask)
{
- u32 bank = pin / PINS_PER_BANK;
u32 offset = pin % PINS_PER_BANK * DATA_FIELD_WIDTH;
- *reg = bank * pctl->bank_mem_size + DATA_REGS_OFFSET +
+ *reg = sunxi_bank_offset(pctl, pin) + DATA_REGS_OFFSET +
offset / BITS_PER_TYPE(u32) * sizeof(u32);
*shift = offset % BITS_PER_TYPE(u32);
*mask = (BIT(DATA_FIELD_WIDTH) - 1) << *shift;
@@ -85,10 +100,9 @@ static void sunxi_data_reg(const struct sunxi_pinctrl *pctl,
static void sunxi_dlevel_reg(const struct sunxi_pinctrl *pctl,
u32 pin, u32 *reg, u32 *shift, u32 *mask)
{
- u32 bank = pin / PINS_PER_BANK;
u32 offset = pin % PINS_PER_BANK * pctl->dlevel_field_width;
- *reg = bank * pctl->bank_mem_size + DLEVEL_REGS_OFFSET +
+ *reg = sunxi_bank_offset(pctl, pin) + DLEVEL_REGS_OFFSET +
offset / BITS_PER_TYPE(u32) * sizeof(u32);
*shift = offset % BITS_PER_TYPE(u32);
*mask = (BIT(pctl->dlevel_field_width) - 1) << *shift;
@@ -97,10 +111,9 @@ static void sunxi_dlevel_reg(const struct sunxi_pinctrl *pctl,
static void sunxi_pull_reg(const struct sunxi_pinctrl *pctl,
u32 pin, u32 *reg, u32 *shift, u32 *mask)
{
- u32 bank = pin / PINS_PER_BANK;
u32 offset = pin % PINS_PER_BANK * PULL_FIELD_WIDTH;
- *reg = bank * pctl->bank_mem_size + pctl->pull_regs_offset +
+ *reg = sunxi_bank_offset(pctl, pin) + pctl->pull_regs_offset +
offset / BITS_PER_TYPE(u32) * sizeof(u32);
*shift = offset % BITS_PER_TYPE(u32);
*mask = (BIT(PULL_FIELD_WIDTH) - 1) << *shift;
@@ -723,9 +736,11 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
val = uV > 1800000 && uV <= 2500000 ? BIT(bank) : 0;
raw_spin_lock_irqsave(&pctl->lock, flags);
- reg = readl(pctl->membase + PIO_POW_MOD_CTL_REG);
+ reg = readl(pctl->membase + pctl->pow_mod_sel_offset +
+ PIO_POW_MOD_CTL_OFS);
reg &= ~BIT(bank);
- writel(reg | val, pctl->membase + PIO_POW_MOD_CTL_REG);
+ writel(reg | val, pctl->membase + pctl->pow_mod_sel_offset +
+ PIO_POW_MOD_CTL_OFS);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
fallthrough;
@@ -733,9 +748,10 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
val = uV <= 1800000 ? 1 : 0;
raw_spin_lock_irqsave(&pctl->lock, flags);
- reg = readl(pctl->membase + PIO_POW_MOD_SEL_REG);
+ reg = readl(pctl->membase + pctl->pow_mod_sel_offset);
reg &= ~(1 << bank);
- writel(reg | val << bank, pctl->membase + PIO_POW_MOD_SEL_REG);
+ writel(reg | val << bank,
+ pctl->membase + pctl->pow_mod_sel_offset);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
return 0;
default:
@@ -1472,9 +1488,9 @@ static int sunxi_pinctrl_setup_debounce(struct sunxi_pinctrl *pctl,
return 0;
}
-int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
- const struct sunxi_pinctrl_desc *desc,
- unsigned long variant)
+int sunxi_pinctrl_init_with_flags(struct platform_device *pdev,
+ const struct sunxi_pinctrl_desc *desc,
+ unsigned long flags)
{
struct device_node *node = pdev->dev.of_node;
struct pinctrl_desc *pctrl_desc;
@@ -1497,8 +1513,8 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
pctl->dev = &pdev->dev;
pctl->desc = desc;
- pctl->variant = variant;
- if (pctl->variant >= PINCTRL_SUN20I_D1) {
+ pctl->variant = flags & SUNXI_PINCTRL_VARIANT_MASK;
+ if (flags & SUNXI_PINCTRL_NEW_REG_LAYOUT) {
pctl->bank_mem_size = D1_BANK_MEM_SIZE;
pctl->pull_regs_offset = D1_PULL_REGS_OFFSET;
pctl->dlevel_field_width = D1_DLEVEL_FIELD_WIDTH;
@@ -1507,6 +1523,10 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
pctl->pull_regs_offset = PULL_REGS_OFFSET;
pctl->dlevel_field_width = DLEVEL_FIELD_WIDTH;
}
+ if (flags & SUNXI_PINCTRL_ELEVEN_BANKS)
+ pctl->pow_mod_sel_offset = PIO_11B_POW_MOD_SEL_REG;
+ else
+ pctl->pow_mod_sel_offset = PIO_POW_MOD_SEL_REG;
pctl->irq_array = devm_kcalloc(&pdev->dev,
IRQ_PER_BANK * pctl->desc->irq_banks,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index a87a2f944d60..ad26e4de16a8 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -25,10 +25,15 @@
#define PG_BASE 192
#define PH_BASE 224
#define PI_BASE 256
+#define PJ_BASE 288
+#define PK_BASE 320
#define PL_BASE 352
#define PM_BASE 384
#define PN_BASE 416
+/* maximum number of banks per controller (PA -> PK) */
+#define SUNXI_PINCTRL_MAX_BANKS 11
+
#define SUNXI_PINCTRL_PIN(bank, pin) \
PINCTRL_PIN(P ## bank ## _BASE + (pin), "P" #bank #pin)
@@ -82,21 +87,16 @@
#define SUN4I_FUNC_INPUT 0
#define SUN4I_FUNC_IRQ 6
-#define PINCTRL_SUN5I_A10S BIT(1)
-#define PINCTRL_SUN5I_A13 BIT(2)
-#define PINCTRL_SUN5I_GR8 BIT(3)
-#define PINCTRL_SUN6I_A31 BIT(4)
-#define PINCTRL_SUN6I_A31S BIT(5)
-#define PINCTRL_SUN4I_A10 BIT(6)
-#define PINCTRL_SUN7I_A20 BIT(7)
-#define PINCTRL_SUN8I_R40 BIT(8)
-#define PINCTRL_SUN8I_V3 BIT(9)
-#define PINCTRL_SUN8I_V3S BIT(10)
-/* Variants below here have an updated register layout. */
-#define PINCTRL_SUN20I_D1 BIT(11)
-
-#define PIO_POW_MOD_SEL_REG 0x340
-#define PIO_POW_MOD_CTL_REG 0x344
+#define SUNXI_PINCTRL_VARIANT_MASK GENMASK(7, 0)
+#define SUNXI_PINCTRL_NEW_REG_LAYOUT BIT(8)
+#define SUNXI_PINCTRL_PORTF_SWITCH BIT(9)
+#define SUNXI_PINCTRL_ELEVEN_BANKS BIT(10)
+
+#define PIO_POW_MOD_SEL_REG 0x340
+#define PIO_11B_POW_MOD_SEL_REG 0x380
+#define PIO_POW_MOD_CTL_OFS 0x004
+
+#define PIO_BANK_K_OFFSET 0x500
enum sunxi_desc_bias_voltage {
BIAS_VOLTAGE_NONE,
@@ -164,7 +164,7 @@ struct sunxi_pinctrl {
struct gpio_chip *chip;
const struct sunxi_pinctrl_desc *desc;
struct device *dev;
- struct sunxi_pinctrl_regulator regulators[9];
+ struct sunxi_pinctrl_regulator regulators[11];
struct irq_domain *domain;
struct sunxi_pinctrl_function *functions;
unsigned nfunctions;
@@ -178,6 +178,7 @@ struct sunxi_pinctrl {
u32 bank_mem_size;
u32 pull_regs_offset;
u32 dlevel_field_width;
+ u32 pow_mod_sel_offset;
};
#define SUNXI_PIN(_pin, ...) \
@@ -299,11 +300,17 @@ static inline u32 sunxi_grp_config_reg(u16 pin)
return GRP_CFG_REG + bank * 0x4;
}
-int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
- const struct sunxi_pinctrl_desc *desc,
- unsigned long variant);
+int sunxi_pinctrl_init_with_flags(struct platform_device *pdev,
+ const struct sunxi_pinctrl_desc *desc,
+ unsigned long flags);
#define sunxi_pinctrl_init(_dev, _desc) \
- sunxi_pinctrl_init_with_variant(_dev, _desc, 0)
+ sunxi_pinctrl_init_with_flags(_dev, _desc, 0)
+
+int sunxi_pinctrl_dt_table_init(struct platform_device *pdev,
+ const u8 *pins_per_bank,
+ const u8 *irq_bank_muxes,
+ struct sunxi_pinctrl_desc *desc,
+ unsigned long flags);
#endif /* __PINCTRL_SUNXI_H */
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index c83e5a65e680..11ecbd6a9b2a 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -96,6 +96,7 @@ static const struct cfg_param {
{"nvidia,slew-rate-falling", TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING},
{"nvidia,slew-rate-rising", TEGRA_PINCONF_PARAM_SLEW_RATE_RISING},
{"nvidia,drive-type", TEGRA_PINCONF_PARAM_DRIVE_TYPE},
+ {"nvidia,gpio-mode", TEGRA_PINCONF_PARAM_GPIO_MODE},
};
static int tegra_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
@@ -270,13 +271,16 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev,
val = pmx_readl(pmx, g->mux_bank, g->mux_reg);
val &= ~(0x3 << g->mux_bit);
val |= i << g->mux_bit;
+ /* Set the SFIO/GPIO selection to SFIO when under pinmux control*/
+ if (pmx->soc->sfsel_in_mux)
+ val |= (1 << g->sfsel_bit);
pmx_writel(pmx, val, g->mux_bank, g->mux_reg);
return 0;
}
-static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev,
- unsigned int offset)
+static int tegra_pinctrl_get_group_index(struct pinctrl_dev *pctldev,
+ unsigned int offset)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
unsigned int group, num_pins, j;
@@ -289,12 +293,35 @@ static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *
continue;
for (j = 0; j < num_pins; j++) {
if (offset == pins[j])
- return &pmx->soc->groups[group];
+ return group;
}
}
- dev_err(pctldev->dev, "Pingroup not found for pin %u\n", offset);
- return NULL;
+ return -EINVAL;
+}
+
+static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev,
+ unsigned int offset,
+ int group_index)
+{
+ struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ if (group_index < 0 || group_index >= pmx->soc->ngroups)
+ return NULL;
+
+ return &pmx->soc->groups[group_index];
+}
+
+static struct tegra_pingroup_config *tegra_pinctrl_get_group_config(struct pinctrl_dev *pctldev,
+ unsigned int offset,
+ int group_index)
+{
+ struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ if (group_index < 0)
+ return NULL;
+
+ return &pmx->pingroup_configs[group_index];
}
static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
@@ -303,12 +330,15 @@ static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tegra_pingroup *group;
+ struct tegra_pingroup_config *config;
+ int group_index;
u32 value;
if (!pmx->soc->sfsel_in_mux)
return 0;
- group = tegra_pinctrl_get_group(pctldev, offset);
+ group_index = tegra_pinctrl_get_group_index(pctldev, offset);
+ group = tegra_pinctrl_get_group(pctldev, offset, group_index);
if (!group)
return -EINVAL;
@@ -316,7 +346,11 @@ static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
if (group->mux_reg < 0 || group->sfsel_bit < 0)
return -EINVAL;
+ config = tegra_pinctrl_get_group_config(pctldev, offset, group_index);
+ if (!config)
+ return -EINVAL;
value = pmx_readl(pmx, group->mux_bank, group->mux_reg);
+ config->is_sfsel = (value & BIT(group->sfsel_bit)) != 0;
value &= ~BIT(group->sfsel_bit);
pmx_writel(pmx, value, group->mux_bank, group->mux_reg);
@@ -329,12 +363,15 @@ static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev,
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tegra_pingroup *group;
+ struct tegra_pingroup_config *config;
+ int group_index;
u32 value;
if (!pmx->soc->sfsel_in_mux)
return;
- group = tegra_pinctrl_get_group(pctldev, offset);
+ group_index = tegra_pinctrl_get_group_index(pctldev, offset);
+ group = tegra_pinctrl_get_group(pctldev, offset, group_index);
if (!group)
return;
@@ -342,8 +379,12 @@ static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev,
if (group->mux_reg < 0 || group->sfsel_bit < 0)
return;
+ config = tegra_pinctrl_get_group_config(pctldev, offset, group_index);
+ if (!config)
+ return;
value = pmx_readl(pmx, group->mux_bank, group->mux_reg);
- value |= BIT(group->sfsel_bit);
+ if (config->is_sfsel)
+ value |= BIT(group->sfsel_bit);
pmx_writel(pmx, value, group->mux_bank, group->mux_reg);
}
@@ -468,6 +509,16 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
*bit = g->drvtype_bit;
*width = 2;
break;
+ case TEGRA_PINCONF_PARAM_GPIO_MODE:
+ if (pmx->soc->sfsel_in_mux) {
+ *bank = g->mux_bank;
+ *reg = g->mux_reg;
+ *bit = g->sfsel_bit;
+ *width = 1;
+ } else {
+ *reg = -EINVAL;
+ }
+ break;
default:
dev_err(pmx->dev, "Invalid config param %04x\n", param);
return -ENOTSUPP;
@@ -788,6 +839,12 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
pmx->dev = &pdev->dev;
pmx->soc = soc_data;
+ pmx->pingroup_configs = devm_kcalloc(&pdev->dev,
+ pmx->soc->ngroups, sizeof(*pmx->pingroup_configs),
+ GFP_KERNEL);
+ if (!pmx->pingroup_configs)
+ return -ENOMEM;
+
/*
* Each mux group will appear in 4 functions' list of groups.
* This over-allocates slightly, since not all groups are mux groups.
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
index b3289bdf727d..bc7b70913b89 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
@@ -8,6 +8,10 @@
#ifndef __PINMUX_TEGRA_H__
#define __PINMUX_TEGRA_H__
+struct tegra_pingroup_config {
+ bool is_sfsel;
+};
+
struct tegra_pmx {
struct device *dev;
struct pinctrl_dev *pctl;
@@ -21,6 +25,8 @@ struct tegra_pmx {
int nbanks;
void __iomem **regs;
u32 *backup_regs;
+ /* Array of size soc->ngroups */
+ struct tegra_pingroup_config *pingroup_configs;
};
enum tegra_pinconf_param {
@@ -54,6 +60,8 @@ enum tegra_pinconf_param {
TEGRA_PINCONF_PARAM_SLEW_RATE_RISING,
/* argument: Integer, range is HW-dependant */
TEGRA_PINCONF_PARAM_DRIVE_TYPE,
+ /* argument: Boolean */
+ TEGRA_PINCONF_PARAM_GPIO_MODE,
};
enum tegra_pinconf_pull {
@@ -176,16 +184,22 @@ struct tegra_pingroup {
/**
* struct tegra_pinctrl_soc_data - Tegra pin controller driver configuration
- * @ngpios: The number of GPIO pins the pin controller HW affects.
- * @pins: An array describing all pins the pin controller affects.
- * All pins which are also GPIOs must be listed first within the
- * array, and be numbered identically to the GPIO controller's
- * numbering.
- * @npins: The numbmer of entries in @pins.
- * @functions: An array describing all mux functions the SoC supports.
- * @nfunctions: The numbmer of entries in @functions.
- * @groups: An array describing all pin groups the pin SoC supports.
- * @ngroups: The numbmer of entries in @groups.
+ * @ngpios: The number of GPIO pins the pin controller HW affects.
+ * @gpio_compatible: Device-tree GPIO compatible string.
+ * @pins: An array describing all pins the pin controller affects.
+ * All pins which are also GPIOs must be listed first within the
+ * array, and be numbered identically to the GPIO controller's
+ * numbering.
+ * @npins: The number of entries in @pins.
+ * @functions: An array describing all mux functions the SoC supports.
+ * @nfunctions: The number of entries in @functions.
+ * @groups: An array describing all pin groups the pin SoC supports.
+ * @ngroups: The number of entries in @groups.
+ * @hsm_in_mux: High-speed mode field. Only applicable to devices with one pin per group.
+ * @schmitt_in_mux: Schmitt trigger field. Only applicable to devices with one pin per group.
+ * @drvtype_in_mux: Drivetype field. Only applicable to devices with one pin per group.
+ * @sfsel_in_mux: Special function selection field.
+ * Only applicable to devices with one pin per group.
*/
struct tegra_pinctrl_soc_data {
unsigned ngpios;
diff --git a/drivers/platform/arm64/Kconfig b/drivers/platform/arm64/Kconfig
index f88395ea3376..0abe5377891b 100644
--- a/drivers/platform/arm64/Kconfig
+++ b/drivers/platform/arm64/Kconfig
@@ -33,6 +33,27 @@ config EC_ACER_ASPIRE1
laptop where this information is not properly exposed via the
standard ACPI devices.
+config EC_HUAWEI_GAOKUN
+ tristate "Huawei Matebook E Go Embedded Controller driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on I2C
+ depends on INPUT
+ depends on HWMON
+ select AUXILIARY_BUS
+
+ help
+ Say Y here to enable the EC driver for the Huawei Matebook E Go
+ which is a sc8280xp-based 2-in-1 tablet. The driver handles battery
+ (information, charge control) and USB Type-C DP HPD events as well
+ as some misc functions like the lid sensor and temperature sensors,
+ etc.
+
+ This driver provides battery and AC status support for the mentioned
+ laptop where this information is not properly exposed via the
+ standard ACPI devices.
+
+ Say M or Y here to include this support.
+
config EC_LENOVO_YOGA_C630
tristate "Lenovo Yoga C630 Embedded Controller driver"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/platform/arm64/Makefile b/drivers/platform/arm64/Makefile
index b2ae9114fdd8..46a99eba3264 100644
--- a/drivers/platform/arm64/Makefile
+++ b/drivers/platform/arm64/Makefile
@@ -6,4 +6,5 @@
#
obj-$(CONFIG_EC_ACER_ASPIRE1) += acer-aspire1-ec.o
+obj-$(CONFIG_EC_HUAWEI_GAOKUN) += huawei-gaokun-ec.o
obj-$(CONFIG_EC_LENOVO_YOGA_C630) += lenovo-yoga-c630.o
diff --git a/drivers/platform/arm64/huawei-gaokun-ec.c b/drivers/platform/arm64/huawei-gaokun-ec.c
new file mode 100644
index 000000000000..97c2607f8d9f
--- /dev/null
+++ b/drivers/platform/arm64/huawei-gaokun-ec.c
@@ -0,0 +1,825 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * huawei-gaokun-ec - An EC driver for HUAWEI Matebook E Go
+ *
+ * Copyright (C) 2024-2025 Pengyu Luo <mitltlatltl@gmail.com>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/notifier.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_data/huawei-gaokun-ec.h>
+
+#define EC_EVENT 0x06
+
+/* Also can be found in ACPI specification 12.3 */
+#define EC_READ 0x80
+#define EC_WRITE 0x81
+#define EC_BURST 0x82
+#define EC_QUERY 0x84
+
+#define EC_FN_LOCK_ON 0x5A
+#define EC_FN_LOCK_OFF 0x55
+#define EC_FN_LOCK_READ 0x6B
+#define EC_FN_LOCK_WRITE 0x6C
+
+#define EC_EVENT_LID 0x81
+
+#define EC_LID_STATE 0x80
+#define EC_LID_OPEN BIT(1)
+
+#define EC_TEMP_REG 0x61
+
+#define EC_STANDBY_REG 0xB2
+#define EC_STANDBY_ENTER 0xDB
+#define EC_STANDBY_EXIT 0xEB
+
+enum gaokun_ec_smart_charge_cmd {
+ SMART_CHARGE_DATA_WRITE = 0xE3,
+ SMART_CHARGE_DATA_READ,
+ SMART_CHARGE_ENABLE_WRITE,
+ SMART_CHARGE_ENABLE_READ,
+};
+
+enum gaokun_ec_ucsi_cmd {
+ UCSI_REG_WRITE = 0xD2,
+ UCSI_REG_READ,
+ UCSI_DATA_WRITE,
+ UCSI_DATA_READ,
+};
+
+#define UCSI_REG_SIZE 7
+
+/*
+ * For tx, command sequences are arranged as
+ * {master_cmd, slave_cmd, data_len, data_seq}
+ */
+#define REQ_HDR_SIZE 3
+#define INPUT_SIZE_OFFSET 2
+#define REQ_LEN(req) (REQ_HDR_SIZE + (req)[INPUT_SIZE_OFFSET])
+
+/*
+ * For rx, data sequences are arranged as
+ * {status, data_len(unreliable), data_seq}
+ */
+#define RESP_HDR_SIZE 2
+
+#define MKREQ(REG0, REG1, SIZE, ...) \
+{ \
+ REG0, REG1, SIZE, \
+ /* ## will remove comma when SIZE is 0 */ \
+ ## __VA_ARGS__, \
+ /* make sure len(pkt[3:]) >= SIZE */ \
+ [3 + (SIZE)] = 0, \
+}
+
+#define MKRESP(SIZE) \
+{ \
+ [RESP_HDR_SIZE + (SIZE) - 1] = 0, \
+}
+
+/* Possible size 1, 4, 20, 24. Most of the time, the size is 1. */
+static inline void refill_req(u8 *dest, const u8 *src, size_t size)
+{
+ memcpy(dest + REQ_HDR_SIZE, src, size);
+}
+
+static inline void refill_req_byte(u8 *dest, const u8 *src)
+{
+ dest[REQ_HDR_SIZE] = *src;
+}
+
+/* Possible size 1, 2, 4, 7, 20. Most of the time, the size is 1. */
+static inline void extr_resp(u8 *dest, const u8 *src, size_t size)
+{
+ memcpy(dest, src + RESP_HDR_SIZE, size);
+}
+
+static inline void extr_resp_byte(u8 *dest, const u8 *src)
+{
+ *dest = src[RESP_HDR_SIZE];
+}
+
+static inline void *extr_resp_shallow(const u8 *src)
+{
+ return (void *)(src + RESP_HDR_SIZE);
+}
+
+struct gaokun_ec {
+ struct i2c_client *client;
+ struct mutex lock; /* EC transaction lock */
+ struct blocking_notifier_head notifier_list;
+ struct device *hwmon_dev;
+ struct input_dev *idev;
+ bool suspended;
+};
+
+static int gaokun_ec_request(struct gaokun_ec *ec, const u8 *req,
+ size_t resp_len, u8 *resp)
+{
+ struct i2c_client *client = ec->client;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = client->flags,
+ .len = REQ_LEN(req),
+ .buf = (void *)req,
+ }, {
+ .addr = client->addr,
+ .flags = client->flags | I2C_M_RD,
+ .len = resp_len,
+ .buf = resp,
+ },
+ };
+ int ret;
+
+ guard(mutex)(&ec->lock);
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "I2C transfer error %d\n", ret);
+ goto out_after_break;
+ }
+
+ ret = *resp;
+ if (ret)
+ dev_err(&client->dev, "EC transaction error %d\n", ret);
+
+out_after_break:
+ usleep_range(2000, 2500); /* have a break, ACPI did this */
+
+ return ret;
+}
+
+/* -------------------------------------------------------------------------- */
+/* Common API */
+
+/**
+ * gaokun_ec_read - Read from EC
+ * @ec: The gaokun_ec structure
+ * @req: The sequence to request
+ * @resp_len: The size to read
+ * @resp: The buffer to store response sequence
+ *
+ * This function is used to read data after writing a magic sequence to EC.
+ * All EC operations depend on this function.
+ *
+ * Huawei uses magic sequences everywhere to complete various functions, all
+ * these sequences are passed to ECCD(a ACPI method which is quiet similar
+ * to gaokun_ec_request), there is no good abstraction to generalize these
+ * sequences, so just wrap it for now. Almost all magic sequences are kept
+ * in this file.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_read(struct gaokun_ec *ec, const u8 *req,
+ size_t resp_len, u8 *resp)
+{
+ return gaokun_ec_request(ec, req, resp_len, resp);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_read);
+
+/**
+ * gaokun_ec_write - Write to EC
+ * @ec: The gaokun_ec structure
+ * @req: The sequence to request
+ *
+ * This function has no big difference from gaokun_ec_read. When caller care
+ * only write status and no actual data are returned, then use it.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_write(struct gaokun_ec *ec, const u8 *req)
+{
+ u8 ec_resp[] = MKRESP(0);
+
+ return gaokun_ec_request(ec, req, sizeof(ec_resp), ec_resp);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_write);
+
+int gaokun_ec_read_byte(struct gaokun_ec *ec, const u8 *req, u8 *byte)
+{
+ int ret;
+ u8 ec_resp[] = MKRESP(sizeof(*byte));
+
+ ret = gaokun_ec_read(ec, req, sizeof(ec_resp), ec_resp);
+ extr_resp_byte(byte, ec_resp);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_read_byte);
+
+/**
+ * gaokun_ec_register_notify - Register a notifier callback for EC events.
+ * @ec: The gaokun_ec structure
+ * @nb: Notifier block pointer to register
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_register_notify(struct gaokun_ec *ec, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&ec->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_register_notify);
+
+/**
+ * gaokun_ec_unregister_notify - Unregister notifier callback for EC events.
+ * @ec: The gaokun_ec structure
+ * @nb: Notifier block pointer to unregister
+ *
+ * Unregister a notifier callback that was previously registered with
+ * gaokun_ec_register_notify().
+ */
+void gaokun_ec_unregister_notify(struct gaokun_ec *ec, struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&ec->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_unregister_notify);
+
+/* -------------------------------------------------------------------------- */
+/* API for PSY */
+
+/**
+ * gaokun_ec_psy_multi_read - Read contiguous registers
+ * @ec: The gaokun_ec structure
+ * @reg: The start register
+ * @resp_len: The number of registers to be read
+ * @resp: The buffer to store response sequence
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_psy_multi_read(struct gaokun_ec *ec, u8 reg,
+ size_t resp_len, u8 *resp)
+{
+ u8 ec_req[] = MKREQ(0x02, EC_READ, 1, 0);
+ u8 ec_resp[] = MKRESP(1);
+ int i, ret;
+
+ for (i = 0; i < resp_len; ++i, reg++) {
+ refill_req_byte(ec_req, &reg);
+ ret = gaokun_ec_read(ec, ec_req, sizeof(ec_resp), ec_resp);
+ if (ret)
+ return ret;
+ extr_resp_byte(&resp[i], ec_resp);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_psy_multi_read);
+
+/* Smart charge */
+
+/**
+ * gaokun_ec_psy_get_smart_charge - Get smart charge data from EC
+ * @ec: The gaokun_ec structure
+ * @resp: The buffer to store response sequence (mode, delay, start, end)
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_psy_get_smart_charge(struct gaokun_ec *ec,
+ u8 resp[GAOKUN_SMART_CHARGE_DATA_SIZE])
+{
+ /* GBCM */
+ u8 ec_req[] = MKREQ(0x02, SMART_CHARGE_DATA_READ, 0);
+ u8 ec_resp[] = MKRESP(GAOKUN_SMART_CHARGE_DATA_SIZE);
+ int ret;
+
+ ret = gaokun_ec_read(ec, ec_req, sizeof(ec_resp), ec_resp);
+ if (ret)
+ return ret;
+
+ extr_resp(resp, ec_resp, GAOKUN_SMART_CHARGE_DATA_SIZE);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_psy_get_smart_charge);
+
+static inline bool validate_battery_threshold_range(u8 start, u8 end)
+{
+ return end != 0 && start <= end && end <= 100;
+}
+
+/**
+ * gaokun_ec_psy_set_smart_charge - Set smart charge data
+ * @ec: The gaokun_ec structure
+ * @req: The sequence to request (mode, delay, start, end)
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_psy_set_smart_charge(struct gaokun_ec *ec,
+ const u8 req[GAOKUN_SMART_CHARGE_DATA_SIZE])
+{
+ /* SBCM */
+ u8 ec_req[] = MKREQ(0x02, SMART_CHARGE_DATA_WRITE,
+ GAOKUN_SMART_CHARGE_DATA_SIZE);
+
+ if (!validate_battery_threshold_range(req[2], req[3]))
+ return -EINVAL;
+
+ refill_req(ec_req, req, GAOKUN_SMART_CHARGE_DATA_SIZE);
+
+ return gaokun_ec_write(ec, ec_req);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_psy_set_smart_charge);
+
+/* Smart charge enable */
+
+/**
+ * gaokun_ec_psy_get_smart_charge_enable - Get smart charge state
+ * @ec: The gaokun_ec structure
+ * @on: The state
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_psy_get_smart_charge_enable(struct gaokun_ec *ec, bool *on)
+{
+ /* GBAC */
+ u8 ec_req[] = MKREQ(0x02, SMART_CHARGE_ENABLE_READ, 0);
+ u8 state;
+ int ret;
+
+ ret = gaokun_ec_read_byte(ec, ec_req, &state);
+ if (ret)
+ return ret;
+
+ *on = !!state;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_psy_get_smart_charge_enable);
+
+/**
+ * gaokun_ec_psy_set_smart_charge_enable - Set smart charge state
+ * @ec: The gaokun_ec structure
+ * @on: The state
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_psy_set_smart_charge_enable(struct gaokun_ec *ec, bool on)
+{
+ /* SBAC */
+ u8 ec_req[] = MKREQ(0x02, SMART_CHARGE_ENABLE_WRITE, 1, on);
+
+ return gaokun_ec_write(ec, ec_req);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_psy_set_smart_charge_enable);
+
+/* -------------------------------------------------------------------------- */
+/* API for UCSI */
+
+/**
+ * gaokun_ec_ucsi_read - Read UCSI data from EC
+ * @ec: The gaokun_ec structure
+ * @resp: The buffer to store response sequence
+ *
+ * Read CCI and MSGI (used by UCSI subdriver).
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_ucsi_read(struct gaokun_ec *ec,
+ u8 resp[GAOKUN_UCSI_READ_SIZE])
+{
+ u8 ec_req[] = MKREQ(0x03, UCSI_DATA_READ, 0);
+ u8 ec_resp[] = MKRESP(GAOKUN_UCSI_READ_SIZE);
+ int ret;
+
+ ret = gaokun_ec_read(ec, ec_req, sizeof(ec_resp), ec_resp);
+ if (ret)
+ return ret;
+
+ extr_resp(resp, ec_resp, GAOKUN_UCSI_READ_SIZE);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_ucsi_read);
+
+/**
+ * gaokun_ec_ucsi_write - Write UCSI data to EC
+ * @ec: The gaokun_ec structure
+ * @req: The sequence to request
+ *
+ * Write CTRL and MSGO (used by UCSI subdriver).
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_ucsi_write(struct gaokun_ec *ec,
+ const u8 req[GAOKUN_UCSI_WRITE_SIZE])
+{
+ u8 ec_req[] = MKREQ(0x03, UCSI_DATA_WRITE, GAOKUN_UCSI_WRITE_SIZE);
+
+ refill_req(ec_req, req, GAOKUN_UCSI_WRITE_SIZE);
+
+ return gaokun_ec_write(ec, ec_req);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_ucsi_write);
+
+/**
+ * gaokun_ec_ucsi_get_reg - Get UCSI register from EC
+ * @ec: The gaokun_ec structure
+ * @ureg: The gaokun ucsi register
+ *
+ * Get UCSI register data (used by UCSI subdriver).
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_ucsi_get_reg(struct gaokun_ec *ec, struct gaokun_ucsi_reg *ureg)
+{
+ u8 ec_req[] = MKREQ(0x03, UCSI_REG_READ, 0);
+ u8 ec_resp[] = MKRESP(UCSI_REG_SIZE);
+ int ret;
+
+ ret = gaokun_ec_read(ec, ec_req, sizeof(ec_resp), ec_resp);
+ if (ret)
+ return ret;
+
+ extr_resp((u8 *)ureg, ec_resp, UCSI_REG_SIZE);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_ucsi_get_reg);
+
+/**
+ * gaokun_ec_ucsi_pan_ack - Ack pin assignment notifications from EC
+ * @ec: The gaokun_ec structure
+ * @port_id: The port id receiving and handling the notifications
+ *
+ * Ack pin assignment notifications (used by UCSI subdriver).
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_ucsi_pan_ack(struct gaokun_ec *ec, int port_id)
+{
+ u8 ec_req[] = MKREQ(0x03, UCSI_REG_WRITE, 1);
+ u8 data = 1 << port_id;
+
+ if (port_id == GAOKUN_UCSI_NO_PORT_UPDATE)
+ data = 0;
+
+ refill_req_byte(ec_req, &data);
+
+ return gaokun_ec_write(ec, ec_req);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_ucsi_pan_ack);
+
+/* -------------------------------------------------------------------------- */
+/* EC Sysfs */
+
+/* Fn lock */
+static int gaokun_ec_get_fn_lock(struct gaokun_ec *ec, bool *on)
+{
+ /* GFRS */
+ u8 ec_req[] = MKREQ(0x02, EC_FN_LOCK_READ, 0);
+ int ret;
+ u8 state;
+
+ ret = gaokun_ec_read_byte(ec, ec_req, &state);
+ if (ret)
+ return ret;
+
+ if (state == EC_FN_LOCK_ON)
+ *on = true;
+ else if (state == EC_FN_LOCK_OFF)
+ *on = false;
+ else
+ return -EIO;
+
+ return 0;
+}
+
+static int gaokun_ec_set_fn_lock(struct gaokun_ec *ec, bool on)
+{
+ /* SFRS */
+ u8 ec_req[] = MKREQ(0x02, EC_FN_LOCK_WRITE, 1,
+ on ? EC_FN_LOCK_ON : EC_FN_LOCK_OFF);
+
+ return gaokun_ec_write(ec, ec_req);
+}
+
+static ssize_t fn_lock_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gaokun_ec *ec = dev_get_drvdata(dev);
+ bool on;
+ int ret;
+
+ ret = gaokun_ec_get_fn_lock(ec, &on);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", on);
+}
+
+static ssize_t fn_lock_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct gaokun_ec *ec = dev_get_drvdata(dev);
+ bool on;
+ int ret;
+
+ if (kstrtobool(buf, &on))
+ return -EINVAL;
+
+ ret = gaokun_ec_set_fn_lock(ec, on);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(fn_lock);
+
+static struct attribute *gaokun_ec_attrs[] = {
+ &dev_attr_fn_lock.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(gaokun_ec);
+
+/* -------------------------------------------------------------------------- */
+/* Thermal Zone HwMon */
+
+/* Range from 0 to 0x2C, partially valid */
+static const u8 temp_reg[] = {
+ 0x05, 0x07, 0x08, 0x0E, 0x0F, 0x12, 0x15, 0x1E,
+ 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26,
+ 0x27, 0x28, 0x29, 0x2A
+};
+
+static int gaokun_ec_get_temp(struct gaokun_ec *ec, u8 idx, long *temp)
+{
+ /* GTMP */
+ u8 ec_req[] = MKREQ(0x02, EC_TEMP_REG, 1, temp_reg[idx]);
+ u8 ec_resp[] = MKRESP(sizeof(__le16));
+ __le16 *tmp;
+ int ret;
+
+ ret = gaokun_ec_read(ec, ec_req, sizeof(ec_resp), ec_resp);
+ if (ret)
+ return ret;
+
+ tmp = (__le16 *)extr_resp_shallow(ec_resp);
+ *temp = le16_to_cpu(*tmp) * 100; /* convert to HwMon's unit */
+
+ return 0;
+}
+
+static umode_t
+gaokun_ec_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return type == hwmon_temp ? 0444 : 0;
+}
+
+static int
+gaokun_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct gaokun_ec *ec = dev_get_drvdata(dev);
+
+ if (type == hwmon_temp)
+ return gaokun_ec_get_temp(ec, channel, val);
+
+ return -EINVAL;
+}
+
+static const struct hwmon_ops gaokun_ec_hwmon_ops = {
+ .is_visible = gaokun_ec_hwmon_is_visible,
+ .read = gaokun_ec_hwmon_read,
+};
+
+static u32 gaokun_ec_temp_config[] = {
+ [0 ... ARRAY_SIZE(temp_reg) - 1] = HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info gaokun_ec_temp = {
+ .type = hwmon_temp,
+ .config = gaokun_ec_temp_config,
+};
+
+static const struct hwmon_channel_info * const gaokun_ec_hwmon_info[] = {
+ &gaokun_ec_temp,
+ NULL
+};
+
+static const struct hwmon_chip_info gaokun_ec_hwmon_chip_info = {
+ .ops = &gaokun_ec_hwmon_ops,
+ .info = gaokun_ec_hwmon_info,
+};
+
+/* -------------------------------------------------------------------------- */
+/* Modern Standby */
+
+static int gaokun_ec_suspend(struct device *dev)
+{
+ struct gaokun_ec *ec = dev_get_drvdata(dev);
+ u8 ec_req[] = MKREQ(0x02, EC_STANDBY_REG, 1, EC_STANDBY_ENTER);
+ int ret;
+
+ if (ec->suspended)
+ return 0;
+
+ ret = gaokun_ec_write(ec, ec_req);
+ if (ret)
+ return ret;
+
+ ec->suspended = true;
+
+ return 0;
+}
+
+static int gaokun_ec_resume(struct device *dev)
+{
+ struct gaokun_ec *ec = dev_get_drvdata(dev);
+ u8 ec_req[] = MKREQ(0x02, EC_STANDBY_REG, 1, EC_STANDBY_EXIT);
+ int ret;
+ int i;
+
+ if (!ec->suspended)
+ return 0;
+
+ for (i = 0; i < 3; ++i) {
+ ret = gaokun_ec_write(ec, ec_req);
+ if (ret == 0)
+ break;
+
+ msleep(100); /* EC need time to resume */
+ };
+
+ ec->suspended = false;
+
+ return 0;
+}
+
+static void gaokun_aux_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ kfree(adev);
+}
+
+static void gaokun_aux_remove(void *data)
+{
+ struct auxiliary_device *adev = data;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+static int gaokun_aux_init(struct device *parent, const char *name,
+ struct gaokun_ec *ec)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->name = name;
+ adev->id = 0;
+ adev->dev.parent = parent;
+ adev->dev.release = gaokun_aux_release;
+ adev->dev.platform_data = ec;
+ /* Allow aux devices to access parent's DT nodes directly */
+ device_set_of_node_from_dev(&adev->dev, parent);
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ kfree(adev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, gaokun_aux_remove, adev);
+}
+
+/* -------------------------------------------------------------------------- */
+/* EC */
+
+static irqreturn_t gaokun_ec_irq_handler(int irq, void *data)
+{
+ struct gaokun_ec *ec = data;
+ u8 ec_req[] = MKREQ(EC_EVENT, EC_QUERY, 0);
+ u8 status, id;
+ int ret;
+
+ ret = gaokun_ec_read_byte(ec, ec_req, &id);
+ if (ret)
+ return IRQ_HANDLED;
+
+ switch (id) {
+ case 0x0: /* No event */
+ break;
+
+ case EC_EVENT_LID:
+ gaokun_ec_psy_read_byte(ec, EC_LID_STATE, &status);
+ status &= EC_LID_OPEN;
+ input_report_switch(ec->idev, SW_LID, !status);
+ input_sync(ec->idev);
+ break;
+
+ default:
+ blocking_notifier_call_chain(&ec->notifier_list, id, ec);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int gaokun_ec_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct gaokun_ec *ec;
+ int ret;
+
+ ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL);
+ if (!ec)
+ return -ENOMEM;
+
+ ret = devm_mutex_init(dev, &ec->lock);
+ if (ret)
+ return ret;
+
+ ec->client = client;
+ i2c_set_clientdata(client, ec);
+ BLOCKING_INIT_NOTIFIER_HEAD(&ec->notifier_list);
+
+ /* Lid switch */
+ ec->idev = devm_input_allocate_device(dev);
+ if (!ec->idev)
+ return -ENOMEM;
+
+ ec->idev->name = "LID";
+ ec->idev->phys = "gaokun-ec/input0";
+ input_set_capability(ec->idev, EV_SW, SW_LID);
+
+ ret = input_register_device(ec->idev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register input device\n");
+
+ ret = gaokun_aux_init(dev, GAOKUN_DEV_PSY, ec);
+ if (ret)
+ return ret;
+
+ ret = gaokun_aux_init(dev, GAOKUN_DEV_UCSI, ec);
+ if (ret)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ gaokun_ec_irq_handler, IRQF_ONESHOT,
+ dev_name(dev), ec);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request IRQ\n");
+
+ ec->hwmon_dev = devm_hwmon_device_register_with_info(dev, "gaokun_ec_hwmon",
+ ec, &gaokun_ec_hwmon_chip_info, NULL);
+ if (IS_ERR(ec->hwmon_dev))
+ return dev_err_probe(dev, PTR_ERR(ec->hwmon_dev),
+ "Failed to register hwmon device\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id gaokun_ec_id[] = {
+ { "gaokun-ec", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, gaokun_ec_id);
+
+static const struct of_device_id gaokun_ec_of_match[] = {
+ { .compatible = "huawei,gaokun3-ec", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gaokun_ec_of_match);
+
+static const struct dev_pm_ops gaokun_ec_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(gaokun_ec_suspend, gaokun_ec_resume)
+};
+
+static struct i2c_driver gaokun_ec_driver = {
+ .driver = {
+ .name = "gaokun-ec",
+ .of_match_table = gaokun_ec_of_match,
+ .pm = &gaokun_ec_pm_ops,
+ .dev_groups = gaokun_ec_groups,
+ },
+ .probe = gaokun_ec_probe,
+ .id_table = gaokun_ec_id,
+};
+module_i2c_driver(gaokun_ec_driver);
+
+MODULE_DESCRIPTION("HUAWEI Matebook E Go EC driver");
+MODULE_AUTHOR("Pengyu Luo <mitltlatltl@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 5a2f1d98b350..be319949b941 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -30,6 +30,7 @@
#define DRV_NAME "cros_ec_lpcs"
#define ACPI_DRV_NAME "GOOG0004"
+#define FRMW_ACPI_DRV_NAME "FRMWC004"
/* True if ACPI device is present */
static bool cros_ec_lpc_acpi_device_found;
@@ -514,7 +515,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
acpi_status status;
struct cros_ec_device *ec_dev;
struct cros_ec_lpc *ec_lpc;
- struct lpc_driver_data *driver_data;
+ const struct lpc_driver_data *driver_data;
u8 buf[2] = {};
int irq, ret;
u32 quirks;
@@ -526,6 +527,9 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
ec_lpc->mmio_memory_base = EC_LPC_ADDR_MEMMAP;
driver_data = platform_get_drvdata(pdev);
+ if (!driver_data)
+ driver_data = acpi_device_get_match_data(dev);
+
if (driver_data) {
quirks = driver_data->quirks;
@@ -696,12 +700,6 @@ static void cros_ec_lpc_remove(struct platform_device *pdev)
cros_ec_unregister(ec_dev);
}
-static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
- { ACPI_DRV_NAME, 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids);
-
static const struct lpc_driver_data framework_laptop_npcx_lpc_driver_data __initconst = {
.quirks = CROS_EC_LPC_QUIRK_REMAP_MEMORY,
.quirk_mmio_memory_base = 0xE00,
@@ -713,6 +711,13 @@ static const struct lpc_driver_data framework_laptop_mec_lpc_driver_data __initc
.quirk_aml_mutex_name = "ECMT",
};
+static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
+ { ACPI_DRV_NAME, 0 },
+ { FRMW_ACPI_DRV_NAME, (kernel_ulong_t)&framework_laptop_npcx_lpc_driver_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids);
+
static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
{
/*
@@ -866,7 +871,8 @@ static int __init cros_ec_lpc_init(void)
int ret;
const struct dmi_system_id *dmi_match;
- cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME);
+ cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME) ||
+ !!cros_ec_lpc_get_device(FRMW_ACPI_DRV_NAME);
dmi_match = dmi_first_match(cros_ec_lpc_dmi_table);
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index bc1a5ba09528..f22e9523da3e 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -296,18 +296,81 @@ static ssize_t kb_wake_angle_store(struct device *dev,
return count;
}
+static ssize_t usbpdmuxinfo_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ ssize_t count = 0;
+ struct ec_response_usb_pd_ports resp_pd_ports;
+ int ret;
+ int i;
+
+ ret = cros_ec_cmd(ec->ec_dev, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
+ &resp_pd_ports, sizeof(resp_pd_ports));
+ if (ret < 0)
+ return -EIO;
+
+ for (i = 0; i < resp_pd_ports.num_ports; i++) {
+ struct ec_response_usb_pd_mux_info resp_mux;
+ struct ec_params_usb_pd_mux_info req = {
+ .port = i,
+ };
+
+ ret = cros_ec_cmd(ec->ec_dev, 0, EC_CMD_USB_PD_MUX_INFO,
+ &req, sizeof(req), &resp_mux, sizeof(resp_mux));
+
+ if (ret >= 0) {
+ count += sysfs_emit_at(buf, count, "Port %d:", i);
+ count += sysfs_emit_at(buf, count, " USB=%d",
+ !!(resp_mux.flags & USB_PD_MUX_USB_ENABLED));
+ count += sysfs_emit_at(buf, count, " DP=%d",
+ !!(resp_mux.flags & USB_PD_MUX_DP_ENABLED));
+ count += sysfs_emit_at(buf, count, " POLARITY=%s",
+ (resp_mux.flags & USB_PD_MUX_POLARITY_INVERTED) ?
+ "INVERTED" : "NORMAL");
+ count += sysfs_emit_at(buf, count, " HPD_IRQ=%d",
+ !!(resp_mux.flags & USB_PD_MUX_HPD_IRQ));
+ count += sysfs_emit_at(buf, count, " HPD_LVL=%d",
+ !!(resp_mux.flags & USB_PD_MUX_HPD_LVL));
+ count += sysfs_emit_at(buf, count, " SAFE=%d",
+ !!(resp_mux.flags & USB_PD_MUX_SAFE_MODE));
+ count += sysfs_emit_at(buf, count, " TBT=%d",
+ !!(resp_mux.flags & USB_PD_MUX_TBT_COMPAT_ENABLED));
+ count += sysfs_emit_at(buf, count, " USB4=%d\n",
+ !!(resp_mux.flags & USB_PD_MUX_USB4_ENABLED));
+ }
+ }
+
+ return count ? : -EIO;
+}
+
+static ssize_t ap_mode_entry_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ const bool ap_driven_altmode = cros_ec_check_features(
+ ec, EC_FEATURE_TYPEC_REQUIRE_AP_MODE_ENTRY);
+
+ return sysfs_emit(buf, "%s\n", ap_driven_altmode ? "yes" : "no");
+}
+
/* Module initialization */
static DEVICE_ATTR_RW(reboot);
static DEVICE_ATTR_RO(version);
static DEVICE_ATTR_RO(flashinfo);
static DEVICE_ATTR_RW(kb_wake_angle);
+static DEVICE_ATTR_RO(usbpdmuxinfo);
+static DEVICE_ATTR_RO(ap_mode_entry);
static struct attribute *__ec_attrs[] = {
&dev_attr_kb_wake_angle.attr,
&dev_attr_reboot.attr,
&dev_attr_version.attr,
&dev_attr_flashinfo.attr,
+ &dev_attr_usbpdmuxinfo.attr,
+ &dev_attr_ap_mode_entry.attr,
NULL,
};
@@ -320,6 +383,14 @@ static umode_t cros_ec_ctrl_visible(struct kobject *kobj,
if (a == &dev_attr_kb_wake_angle.attr && !ec->has_kb_wake_angle)
return 0;
+ if (a == &dev_attr_usbpdmuxinfo.attr ||
+ a == &dev_attr_ap_mode_entry.attr) {
+ struct cros_ec_platform *ec_platform = dev_get_platdata(ec->dev);
+
+ if (strcmp(ec_platform->ec_name, CROS_EC_DEV_NAME))
+ return 0;
+ }
+
return a->mode;
}
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 6ee182101bc9..d2228720991f 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -42,6 +42,24 @@ static void cros_typec_role_switch_quirk(struct fwnode_handle *fwnode)
#endif
}
+static int cros_typec_enter_usb_mode(struct typec_port *tc_port, enum usb_mode mode)
+{
+ struct cros_typec_port *port = typec_get_drvdata(tc_port);
+ struct ec_params_typec_control req = {
+ .port = port->port_num,
+ .command = (mode == USB_MODE_USB4) ?
+ TYPEC_CONTROL_COMMAND_ENTER_MODE : TYPEC_CONTROL_COMMAND_EXIT_MODES,
+ .mode_to_enter = CROS_EC_ALTMODE_USB4
+ };
+
+ return cros_ec_cmd(port->typec_data->ec, 0, EC_CMD_TYPEC_CONTROL,
+ &req, sizeof(req), NULL, 0);
+}
+
+static const struct typec_operations cros_typec_usb_mode_ops = {
+ .enter_usb_mode = cros_typec_enter_usb_mode
+};
+
static int cros_typec_parse_port_props(struct typec_capability *cap,
struct fwnode_handle *fwnode,
struct device *dev)
@@ -84,6 +102,13 @@ static int cros_typec_parse_port_props(struct typec_capability *cap,
cap->prefer_role = ret;
}
+ if (fwnode_property_present(fwnode, "usb2-port"))
+ cap->usb_capability |= USB_CAPABILITY_USB2;
+ if (fwnode_property_present(fwnode, "usb3-port"))
+ cap->usb_capability |= USB_CAPABILITY_USB3;
+ if (fwnode_property_present(fwnode, "usb4-port"))
+ cap->usb_capability |= USB_CAPABILITY_USB4;
+
cros_typec_role_switch_quirk(fwnode);
cap->fwnode = fwnode;
@@ -379,6 +404,9 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
if (ret < 0)
goto unregister_ports;
+ cap->driver_data = cros_port;
+ cap->ops = &cros_typec_usb_mode_ops;
+
cros_port->port = typec_register_port(dev, cap);
if (IS_ERR(cros_port->port)) {
ret = PTR_ERR(cros_port->port);
diff --git a/drivers/platform/chrome/cros_ec_typec.h b/drivers/platform/chrome/cros_ec_typec.h
index 9fd5342bb0ad..f9c31f04c102 100644
--- a/drivers/platform/chrome/cros_ec_typec.h
+++ b/drivers/platform/chrome/cros_ec_typec.h
@@ -18,6 +18,7 @@
enum {
CROS_EC_ALTMODE_DP = 0,
CROS_EC_ALTMODE_TBT,
+ CROS_EC_ALTMODE_USB4,
CROS_EC_ALTMODE_MAX,
};
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index f7dfa0e785fd..aa760f064a17 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -14,6 +14,19 @@ menuconfig MELLANOX_PLATFORM
if MELLANOX_PLATFORM
+config MLX_PLATFORM
+ tristate "Mellanox Technologies platform support"
+ depends on ACPI && I2C && PCI
+ select REGMAP
+ help
+ This option enables system support for the Mellanox Technologies
+ platform. The Mellanox systems provide data center networking
+ solutions based on Virtual Protocol Interconnect (VPI) technology
+ enable seamless connectivity to 56/100Gb/s InfiniBand or 10/40/56GbE
+ connection.
+
+ If you have a Mellanox system, say Y or M here.
+
config MLXREG_HOTPLUG
tristate "Mellanox platform hotplug driver support"
depends on HWMON
diff --git a/drivers/platform/mellanox/Makefile b/drivers/platform/mellanox/Makefile
index 04703c0416b1..ba56485cbe8c 100644
--- a/drivers/platform/mellanox/Makefile
+++ b/drivers/platform/mellanox/Makefile
@@ -3,6 +3,7 @@
# Makefile for linux/drivers/platform/mellanox
# Mellanox Platform-Specific Drivers
#
+obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_MLXBF_BOOTCTL) += mlxbf-bootctl.o
obj-$(CONFIG_MLXBF_PMC) += mlxbf-pmc.o
obj-$(CONFIG_MLXBF_TMFIFO) += mlxbf-tmfifo.o
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/mellanox/mlx-platform.c
index 9c7f30a47f1f..08b0430a2899 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/mellanox/mlx-platform.c
@@ -145,7 +145,7 @@
#define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1
#define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2
#define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3
-#define MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET 0xd9
+#define MLXPLAT_CPLD_LPC_REG_CPLD6_MVER_OFFSET 0xd9
#define MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET 0xdb
#define MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET 0xda
#define MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET 0xdc
@@ -2247,7 +2247,7 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_modular_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
-/* Platform hotplug for NVLink blade systems family data */
+/* Platform hotplug for NVLink blade systems family data */
static struct mlxreg_core_data mlxplat_mlxcpld_global_wp_items_data[] = {
{
.label = "global_wp_grant",
@@ -2279,7 +2279,7 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_chassis_blade_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
-/* Platform hotplug for switch systems family data */
+/* Platform hotplug for switch systems family data */
static struct mlxreg_core_data mlxplat_mlxcpld_erot_ap_items_data[] = {
{
.label = "erot1_ap",
@@ -2387,7 +2387,7 @@ static struct mlxreg_core_hotplug_notifier mlxplat_mlxcpld_l1_switch_pwr_events_
.user_handler = mlxplat_mlxcpld_l1_switch_pwr_events_handler,
};
-/* Platform hotplug for l1 switch systems family data */
+/* Platform hotplug for l1 switch systems family data */
static struct mlxreg_core_data mlxplat_mlxcpld_l1_switch_pwr_events_items_data[] = {
{
.label = "power_button",
@@ -4401,7 +4401,7 @@ static struct mlxreg_core_platform_data mlxplat_modular_regs_io_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_regs_io_data),
};
-/* Platform register access for chassis blade systems family data */
+/* Platform register access for chassis blade systems family data */
static struct mlxreg_core_data mlxplat_mlxcpld_chassis_blade_regs_io_data[] = {
{
.label = "cpld1_version",
@@ -5050,7 +5050,6 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
- case MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET:
@@ -5186,7 +5185,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
- case MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD6_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET:
@@ -5343,7 +5342,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
- case MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD6_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET:
@@ -6573,7 +6572,7 @@ static int mlxplat_probe(struct platform_device *pdev)
}
/* Set default registers. */
- for (i = 0; i < mlxplat_regmap_config->num_reg_defaults; i++) {
+ for (i = 0; i < mlxplat_regmap_config->num_reg_defaults; i++) {
err = regmap_write(priv->regmap,
mlxplat_regmap_config->reg_defaults[i].reg,
mlxplat_regmap_config->reg_defaults[i].def);
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
index 9cae07348d5e..b95dcb8d483c 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.c
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -91,6 +91,7 @@ static const char * const mlxbf_rsh_log_level[] = {
static DEFINE_MUTEX(icm_ops_lock);
static DEFINE_MUTEX(os_up_lock);
static DEFINE_MUTEX(mfg_ops_lock);
+static DEFINE_MUTEX(rtc_ops_lock);
/*
* Objects are stored within the MFG partition per type.
@@ -489,6 +490,23 @@ static ssize_t large_icm_store(struct device *dev,
return res.a0 ? -EPERM : count;
}
+static ssize_t rtc_battery_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct arm_smccc_res res;
+
+ mutex_lock(&rtc_ops_lock);
+ arm_smccc_smc(MLNX_HANDLE_GET_RTC_LOW_BATT, 0, 0, 0, 0,
+ 0, 0, 0, &res);
+ mutex_unlock(&rtc_ops_lock);
+
+ if (res.a0)
+ return -EPERM;
+
+ return sysfs_emit(buf, "0x%lx\n", res.a1);
+}
+
static ssize_t os_up_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -906,6 +924,7 @@ static DEVICE_ATTR_RW(sn);
static DEVICE_ATTR_RW(uuid);
static DEVICE_ATTR_RW(rev);
static DEVICE_ATTR_WO(mfg_lock);
+static DEVICE_ATTR_RO(rtc_battery);
static struct attribute *mlxbf_bootctl_attrs[] = {
&dev_attr_post_reset_wdog.attr,
@@ -925,6 +944,7 @@ static struct attribute *mlxbf_bootctl_attrs[] = {
&dev_attr_uuid.attr,
&dev_attr_rev.attr,
&dev_attr_mfg_lock.attr,
+ &dev_attr_rtc_battery.attr,
NULL
};
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.h b/drivers/platform/mellanox/mlxbf-bootctl.h
index 1299750a8661..90bbbdc65879 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.h
+++ b/drivers/platform/mellanox/mlxbf-bootctl.h
@@ -103,6 +103,11 @@
*/
#define MLNX_HANDLE_OS_UP 0x82000014
+/*
+ * SMC function ID to get and clear the RTC low voltage bit
+ */
+#define MLNX_HANDLE_GET_RTC_LOW_BATT 0x82000023
+
/* SMC function IDs for SiP Service queries */
#define MLXBF_BOOTCTL_SIP_SVC_CALL_COUNT 0x8200ff00
#define MLXBF_BOOTCTL_SIP_SVC_UID 0x8200ff01
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index d4f32ad66530..a594d5fcfcfd 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -371,7 +371,7 @@ static const struct software_node *ssam_node_group_sp8[] = {
NULL,
};
-/* Devices for Surface Pro 9 (Intel/x86) and 10 */
+/* Devices for Surface Pro 9, 10 and 11 (Intel/x86) */
static const struct software_node *ssam_node_group_sp9[] = {
&ssam_node_root,
&ssam_node_hub_kip,
@@ -430,6 +430,9 @@ static const struct acpi_device_id ssam_platform_hub_acpi_match[] = {
/* Surface Pro 10 */
{ "MSHW0510", (unsigned long)ssam_node_group_sp9 },
+ /* Surface Pro 11 */
+ { "MSHW0583", (unsigned long)ssam_node_group_sp9 },
+
/* Surface Book 2 */
{ "MSHW0107", (unsigned long)ssam_node_group_gen5 },
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 0258dd879d64..43407e76476b 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -475,6 +475,17 @@ config IDEAPAD_LAPTOP
This is a driver for Lenovo IdeaPad netbooks contains drivers for
rfkill switch, hotkey, fan control and backlight control.
+config LENOVO_WMI_HOTKEY_UTILITIES
+ tristate "Lenovo Hotkey Utility WMI extras driver"
+ depends on ACPI_WMI
+ select NEW_LEDS
+ select LEDS_CLASS
+ imply IDEAPAD_LAPTOP
+ help
+ This driver provides WMI support for Lenovo customized hotkeys function,
+ such as LED control for audio/mic mute event for Ideapad, YOGA, XiaoXin,
+ Gaming, ThinkBook and so on.
+
config LENOVO_YMC
tristate "Lenovo Yoga Tablet Mode Control"
depends on ACPI_WMI
@@ -778,6 +789,23 @@ config BARCO_P50_GPIO
To compile this driver as a module, choose M here: the module
will be called barco-p50-gpio.
+config SAMSUNG_GALAXYBOOK
+ tristate "Samsung Galaxy Book driver"
+ depends on ACPI
+ depends on ACPI_BATTERY
+ depends on INPUT
+ depends on LEDS_CLASS
+ depends on SERIO_I8042
+ select ACPI_PLATFORM_PROFILE
+ select FW_ATTR_CLASS
+ help
+ This is a driver for Samsung Galaxy Book series notebooks. It adds
+ support for the keyboard backlight control, performance mode control,
+ function keys, and various firmware attributes.
+
+ For more information about this driver, see
+ <file:Documentation/admin-guide/laptops/samsung-galaxybook.rst>.
+
config SAMSUNG_LAPTOP
tristate "Samsung Laptop driver"
depends on RFKILL || RFKILL = n
@@ -1012,19 +1040,6 @@ config SERIAL_MULTI_INSTANTIATE
To compile this driver as a module, choose M here: the module
will be called serial-multi-instantiate.
-config MLX_PLATFORM
- tristate "Mellanox Technologies platform support"
- depends on ACPI && I2C && PCI
- select REGMAP
- help
- This option enables system support for the Mellanox Technologies
- platform. The Mellanox systems provide data center networking
- solutions based on Virtual Protocol Interconnect (VPI) technology
- enable seamless connectivity to 56/100Gb/s InfiniBand or 10/40/56GbE
- connection.
-
- If you have a Mellanox system, say Y or M here.
-
config TOUCHSCREEN_DMI
bool "DMI based touchscreen configuration info"
depends on ACPI && DMI && I2C=y && TOUCHSCREEN_SILEAD
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index e1b142947067..650dfbebb6c8 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_UV_SYSFS) += uv_sysfs.o
# IBM Thinkpad and Lenovo
obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o
+obj-$(CONFIG_LENOVO_WMI_HOTKEY_UTILITIES) += lenovo-wmi-hotkey-utilities.o
obj-$(CONFIG_LENOVO_YMC) += lenovo-ymc.o
obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
@@ -95,8 +96,9 @@ obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
obj-$(CONFIG_BARCO_P50_GPIO) += barco-p50-gpio.o
# Samsung
-obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
-obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
+obj-$(CONFIG_SAMSUNG_GALAXYBOOK) += samsung-galaxybook.o
+obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
+obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
# Toshiba
obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
@@ -122,7 +124,6 @@ obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
# Platform drivers
obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o
obj-$(CONFIG_SERIAL_MULTI_INSTANTIATE) += serial-multi-instantiate.o
-obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o
obj-$(CONFIG_WIRELESS_HOTKEY) += wireless-hotkey.o
obj-$(CONFIG_X86_ANDROID_TABLETS) += x86-android-tablets/
diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile
index 56f62fc9c97b..c6c40bdcbded 100644
--- a/drivers/platform/x86/amd/Makefile
+++ b/drivers/platform/x86/amd/Makefile
@@ -5,7 +5,7 @@
#
obj-$(CONFIG_AMD_3D_VCACHE) += amd_3d_vcache.o
-amd_3d_vcache-objs := x3d_vcache.o
+amd_3d_vcache-y := x3d_vcache.o
obj-$(CONFIG_AMD_PMC) += pmc/
obj-$(CONFIG_AMD_HSMP) += hsmp/
obj-$(CONFIG_AMD_PMF) += pmf/
diff --git a/drivers/platform/x86/amd/hsmp/Kconfig b/drivers/platform/x86/amd/hsmp/Kconfig
index 7d10d4462a45..d6f7a62d55b5 100644
--- a/drivers/platform/x86/amd/hsmp/Kconfig
+++ b/drivers/platform/x86/amd/hsmp/Kconfig
@@ -7,7 +7,7 @@ config AMD_HSMP
tristate
menu "AMD HSMP Driver"
- depends on AMD_NB || COMPILE_TEST
+ depends on AMD_NODE || COMPILE_TEST
config AMD_HSMP_ACPI
tristate "AMD HSMP ACPI device driver"
diff --git a/drivers/platform/x86/amd/hsmp/Makefile b/drivers/platform/x86/amd/hsmp/Makefile
index 3175d8885e87..0759bbcd13f6 100644
--- a/drivers/platform/x86/amd/hsmp/Makefile
+++ b/drivers/platform/x86/amd/hsmp/Makefile
@@ -5,8 +5,8 @@
#
obj-$(CONFIG_AMD_HSMP) += hsmp_common.o
-hsmp_common-objs := hsmp.o
+hsmp_common-y := hsmp.o
obj-$(CONFIG_AMD_HSMP_PLAT) += amd_hsmp.o
-amd_hsmp-objs := plat.o
+amd_hsmp-y := plat.o
obj-$(CONFIG_AMD_HSMP_ACPI) += hsmp_acpi.o
-hsmp_acpi-objs := acpi.o
+hsmp_acpi-y := acpi.o
diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c
index 444b43be35a2..c1eccb3c80c5 100644
--- a/drivers/platform/x86/amd/hsmp/acpi.c
+++ b/drivers/platform/x86/amd/hsmp/acpi.c
@@ -10,7 +10,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
#include <linux/acpi.h>
#include <linux/device.h>
@@ -24,6 +23,8 @@
#include <uapi/asm-generic/errno-base.h>
+#include <asm/amd_node.h>
+
#include "hsmp.h"
#define DRIVER_NAME "amd_hsmp"
@@ -321,8 +322,8 @@ static int hsmp_acpi_probe(struct platform_device *pdev)
return -ENOMEM;
if (!hsmp_pdev->is_probed) {
- hsmp_pdev->num_sockets = amd_nb_num();
- if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_SOCKETS)
+ hsmp_pdev->num_sockets = amd_num_nodes();
+ if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES)
return -ENODEV;
hsmp_pdev->sock = devm_kcalloc(&pdev->dev, hsmp_pdev->num_sockets,
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c
index 03164e30b3a5..a3ac09a90de4 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp/hsmp.c
@@ -8,7 +8,6 @@
*/
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
#include <linux/acpi.h>
#include <linux/delay.h>
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.h b/drivers/platform/x86/amd/hsmp/hsmp.h
index e852f0a947e4..af8b21f821d6 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.h
+++ b/drivers/platform/x86/amd/hsmp/hsmp.h
@@ -21,8 +21,6 @@
#define HSMP_ATTR_GRP_NAME_SIZE 10
-#define MAX_AMD_SOCKETS 8
-
#define HSMP_CDEV_NAME "hsmp_cdev"
#define HSMP_DEVNODE_NAME "hsmp"
@@ -41,7 +39,6 @@ struct hsmp_socket {
void __iomem *virt_base_addr;
struct semaphore hsmp_sem;
char name[HSMP_ATTR_GRP_NAME_SIZE];
- struct pci_dev *root;
struct device *dev;
u16 sock_ind;
int (*amd_hsmp_rdwr)(struct hsmp_socket *sock, u32 off, u32 *val, bool rw);
diff --git a/drivers/platform/x86/amd/hsmp/plat.c b/drivers/platform/x86/amd/hsmp/plat.c
index 02ca85762b68..b9782a078dbd 100644
--- a/drivers/platform/x86/amd/hsmp/plat.c
+++ b/drivers/platform/x86/amd/hsmp/plat.c
@@ -10,14 +10,16 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
+#include <linux/build_bug.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/sysfs.h>
+#include <asm/amd_node.h>
+
#include "hsmp.h"
#define DRIVER_NAME "amd_hsmp"
@@ -34,28 +36,12 @@
#define SMN_HSMP_MSG_RESP 0x0010980
#define SMN_HSMP_MSG_DATA 0x00109E0
-#define HSMP_INDEX_REG 0xc4
-#define HSMP_DATA_REG 0xc8
-
static struct hsmp_plat_device *hsmp_pdev;
static int amd_hsmp_pci_rdwr(struct hsmp_socket *sock, u32 offset,
u32 *value, bool write)
{
- int ret;
-
- if (!sock->root)
- return -ENODEV;
-
- ret = pci_write_config_dword(sock->root, HSMP_INDEX_REG,
- sock->mbinfo.base_addr + offset);
- if (ret)
- return ret;
-
- ret = (write ? pci_write_config_dword(sock->root, HSMP_DATA_REG, *value)
- : pci_read_config_dword(sock->root, HSMP_DATA_REG, value));
-
- return ret;
+ return amd_smn_hsmp_rdwr(sock->sock_ind, sock->mbinfo.base_addr + offset, value, write);
}
static ssize_t hsmp_metric_tbl_plat_read(struct file *filp, struct kobject *kobj,
@@ -95,7 +81,12 @@ static umode_t hsmp_is_sock_attr_visible(struct kobject *kobj,
* Static array of 8 + 1(for NULL) elements is created below
* to create sysfs groups for sockets.
* is_bin_visible function is used to show / hide the necessary groups.
+ *
+ * Validate the maximum number against MAX_AMD_NUM_NODES. If this changes,
+ * then the attributes and groups below must be adjusted.
*/
+static_assert(MAX_AMD_NUM_NODES == 8);
+
#define HSMP_BIN_ATTR(index, _list) \
static const struct bin_attribute attr##index = { \
.attr = { .name = HSMP_METRICS_TABLE_NAME, .mode = 0444}, \
@@ -159,10 +150,7 @@ static int init_platform_device(struct device *dev)
int ret, i;
for (i = 0; i < hsmp_pdev->num_sockets; i++) {
- if (!node_to_amd_nb(i))
- return -ENODEV;
sock = &hsmp_pdev->sock[i];
- sock->root = node_to_amd_nb(i)->root;
sock->sock_ind = i;
sock->dev = dev;
sock->mbinfo.base_addr = SMN_HSMP_BASE;
@@ -305,11 +293,11 @@ static int __init hsmp_plt_init(void)
return -ENOMEM;
/*
- * amd_nb_num() returns number of SMN/DF interfaces present in the system
+ * amd_num_nodes() returns number of SMN/DF interfaces present in the system
* if we have N SMN/DF interfaces that ideally means N sockets
*/
- hsmp_pdev->num_sockets = amd_nb_num();
- if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_SOCKETS)
+ hsmp_pdev->num_sockets = amd_num_nodes();
+ if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES)
return ret;
ret = platform_driver_register(&amd_hsmp_driver);
diff --git a/drivers/platform/x86/amd/pmc/Makefile b/drivers/platform/x86/amd/pmc/Makefile
index 255d94ddf999..bb6905c4cae9 100644
--- a/drivers/platform/x86/amd/pmc/Makefile
+++ b/drivers/platform/x86/amd/pmc/Makefile
@@ -4,6 +4,6 @@
# AMD Power Management Controller Driver
#
-amd-pmc-objs := pmc.o pmc-quirks.o mp1_stb.o
-obj-$(CONFIG_AMD_PMC) += amd-pmc.o
-amd-pmc-$(CONFIG_AMD_MP2_STB) += mp2_stb.o
+obj-$(CONFIG_AMD_PMC) += amd-pmc.o
+amd-pmc-y := pmc.o pmc-quirks.o mp1_stb.o
+amd-pmc-$(CONFIG_AMD_MP2_STB) += mp2_stb.o
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index e6124498b195..d789d6cab794 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -32,70 +32,6 @@
#include "pmc.h"
-/* SMU communication registers */
-#define AMD_PMC_REGISTER_RESPONSE 0x980
-#define AMD_PMC_REGISTER_ARGUMENT 0x9BC
-
-/* PMC Scratch Registers */
-#define AMD_PMC_SCRATCH_REG_CZN 0x94
-#define AMD_PMC_SCRATCH_REG_YC 0xD14
-#define AMD_PMC_SCRATCH_REG_1AH 0xF14
-
-/* STB Registers */
-#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
-#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
-#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
-
-/* Base address of SMU for mapping physical address to virtual address */
-#define AMD_PMC_MAPPING_SIZE 0x01000
-#define AMD_PMC_BASE_ADDR_OFFSET 0x10000
-#define AMD_PMC_BASE_ADDR_LO 0x13B102E8
-#define AMD_PMC_BASE_ADDR_HI 0x13B102EC
-#define AMD_PMC_BASE_ADDR_LO_MASK GENMASK(15, 0)
-#define AMD_PMC_BASE_ADDR_HI_MASK GENMASK(31, 20)
-
-/* SMU Response Codes */
-#define AMD_PMC_RESULT_OK 0x01
-#define AMD_PMC_RESULT_CMD_REJECT_BUSY 0xFC
-#define AMD_PMC_RESULT_CMD_REJECT_PREREQ 0xFD
-#define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
-#define AMD_PMC_RESULT_FAILED 0xFF
-
-/* FCH SSC Registers */
-#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
-#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
-#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
-#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
-#define FCH_SSC_MAPPING_SIZE 0x800
-#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
-#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
-
-/* SMU Message Definations */
-#define SMU_MSG_GETSMUVERSION 0x02
-#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
-#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
-#define SMU_MSG_LOG_START 0x06
-#define SMU_MSG_LOG_RESET 0x07
-#define SMU_MSG_LOG_DUMP_DATA 0x08
-#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
-
-#define PMC_MSG_DELAY_MIN_US 50
-#define RESPONSE_REGISTER_LOOP_MAX 20000
-
-#define DELAY_MIN_US 2000
-#define DELAY_MAX_US 3000
-
-enum amd_pmc_def {
- MSG_TEST = 0x01,
- MSG_OS_HINT_PCO,
- MSG_OS_HINT_RN,
-};
-
-struct amd_pmc_bit_map {
- const char *name;
- u32 bit_mask;
-};
-
static const struct amd_pmc_bit_map soc15_ip_blk_v2[] = {
{"DISPLAY", BIT(0)},
{"CPU", BIT(1)},
@@ -165,23 +101,6 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
iowrite32(val, dev->regbase + reg_offset);
}
-struct smu_metrics {
- u32 table_version;
- u32 hint_count;
- u32 s0i3_last_entry_status;
- u32 timein_s0i2;
- u64 timeentering_s0i3_lastcapture;
- u64 timeentering_s0i3_totaltime;
- u64 timeto_resume_to_os_lastcapture;
- u64 timeto_resume_to_os_totaltime;
- u64 timein_s0i3_lastcapture;
- u64 timein_s0i3_totaltime;
- u64 timein_swdrips_lastcapture;
- u64 timein_swdrips_totaltime;
- u64 timecondition_notmet_lastcapture[32];
- u64 timecondition_notmet_totaltime[32];
-} __packed;
-
static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
{
switch (dev->cpu_id) {
@@ -247,11 +166,12 @@ static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table)
{
- if (!pdev->smu_virt_addr) {
- int ret = amd_pmc_setup_smu_logging(pdev);
+ int rc;
- if (ret)
- return ret;
+ if (!pdev->smu_virt_addr) {
+ rc = amd_pmc_setup_smu_logging(pdev);
+ if (rc)
+ return rc;
}
if (pdev->cpu_id == AMD_CPU_ID_PCO)
@@ -300,10 +220,10 @@ static ssize_t smu_fw_version_show(struct device *d, struct device_attribute *at
char *buf)
{
struct amd_pmc_dev *dev = dev_get_drvdata(d);
+ int rc;
if (!dev->major) {
- int rc = amd_pmc_get_smu_version(dev);
-
+ rc = amd_pmc_get_smu_version(dev);
if (rc)
return rc;
}
@@ -314,10 +234,10 @@ static ssize_t smu_program_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct amd_pmc_dev *dev = dev_get_drvdata(d);
+ int rc;
if (!dev->major) {
- int rc = amd_pmc_get_smu_version(dev);
-
+ rc = amd_pmc_get_smu_version(dev);
if (rc)
return rc;
}
@@ -778,14 +698,14 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
static int amd_pmc_suspend_handler(struct device *dev)
{
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ int rc;
/*
* Must be called only from the same set of dev_pm_ops handlers
* as i8042_pm_suspend() is called: currently just from .suspend.
*/
if (pdev->disable_8042_wakeup && !disable_workarounds) {
- int rc = amd_pmc_wa_irq1(pdev);
-
+ rc = amd_pmc_wa_irq1(pdev);
if (rc) {
dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc);
return rc;
@@ -808,6 +728,7 @@ static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SP) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SHP) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
{ }
@@ -823,7 +744,6 @@ static int amd_pmc_probe(struct platform_device *pdev)
u32 val;
dev->dev = &pdev->dev;
-
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
if (!rdev || !pci_match_id(pmc_pci_ids, rdev)) {
err = -ENODEV;
@@ -831,8 +751,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
}
dev->cpu_id = rdev->device;
-
- if (dev->cpu_id == AMD_CPU_ID_SP) {
+ if (dev->cpu_id == AMD_CPU_ID_SP || dev->cpu_id == AMD_CPU_ID_SHP) {
dev_warn_once(dev->dev, "S0i3 is not supported on this hardware\n");
err = -ENODEV;
goto err_pci_dev_put;
@@ -847,7 +766,6 @@ static int amd_pmc_probe(struct platform_device *pdev)
}
base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK;
-
err = amd_smn_read(0, AMD_PMC_BASE_ADDR_HI, &val);
if (err) {
dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_HI);
@@ -865,7 +783,9 @@ static int amd_pmc_probe(struct platform_device *pdev)
goto err_pci_dev_put;
}
- mutex_init(&dev->lock);
+ err = devm_mutex_init(dev->dev, &dev->lock);
+ if (err)
+ goto err_pci_dev_put;
/* Get num of IP blocks within the SoC */
amd_pmc_get_ip_info(dev);
@@ -904,7 +824,6 @@ static void amd_pmc_remove(struct platform_device *pdev)
pci_dev_put(dev->rdev);
if (IS_ENABLED(CONFIG_AMD_MP2_STB))
amd_mp2_stb_deinit(dev);
- mutex_destroy(&dev->lock);
}
static const struct acpi_device_id amd_pmc_acpi_ids[] = {
diff --git a/drivers/platform/x86/amd/pmc/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h
index f43f0253b0f5..62f3e51020fd 100644
--- a/drivers/platform/x86/amd/pmc/pmc.h
+++ b/drivers/platform/x86/amd/pmc/pmc.h
@@ -14,6 +14,59 @@
#include <linux/types.h>
#include <linux/mutex.h>
+/* SMU communication registers */
+#define AMD_PMC_REGISTER_RESPONSE 0x980
+#define AMD_PMC_REGISTER_ARGUMENT 0x9BC
+
+/* PMC Scratch Registers */
+#define AMD_PMC_SCRATCH_REG_CZN 0x94
+#define AMD_PMC_SCRATCH_REG_YC 0xD14
+#define AMD_PMC_SCRATCH_REG_1AH 0xF14
+
+/* STB Registers */
+#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
+#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
+#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
+
+/* Base address of SMU for mapping physical address to virtual address */
+#define AMD_PMC_MAPPING_SIZE 0x01000
+#define AMD_PMC_BASE_ADDR_OFFSET 0x10000
+#define AMD_PMC_BASE_ADDR_LO 0x13B102E8
+#define AMD_PMC_BASE_ADDR_HI 0x13B102EC
+#define AMD_PMC_BASE_ADDR_LO_MASK GENMASK(15, 0)
+#define AMD_PMC_BASE_ADDR_HI_MASK GENMASK(31, 20)
+
+/* SMU Response Codes */
+#define AMD_PMC_RESULT_OK 0x01
+#define AMD_PMC_RESULT_CMD_REJECT_BUSY 0xFC
+#define AMD_PMC_RESULT_CMD_REJECT_PREREQ 0xFD
+#define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
+#define AMD_PMC_RESULT_FAILED 0xFF
+
+/* FCH SSC Registers */
+#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
+#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
+#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
+#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
+#define FCH_SSC_MAPPING_SIZE 0x800
+#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
+#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
+
+/* SMU Message Definations */
+#define SMU_MSG_GETSMUVERSION 0x02
+#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
+#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
+#define SMU_MSG_LOG_START 0x06
+#define SMU_MSG_LOG_RESET 0x07
+#define SMU_MSG_LOG_DUMP_DATA 0x08
+#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
+
+#define PMC_MSG_DELAY_MIN_US 50
+#define RESPONSE_REGISTER_LOOP_MAX 20000
+
+#define DELAY_MIN_US 2000
+#define DELAY_MAX_US 3000
+
enum s2d_msg_port {
MSG_PORT_PMC,
MSG_PORT_S2D,
@@ -65,6 +118,34 @@ struct amd_pmc_dev {
struct stb_arg stb_arg;
};
+struct amd_pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+struct smu_metrics {
+ u32 table_version;
+ u32 hint_count;
+ u32 s0i3_last_entry_status;
+ u32 timein_s0i2;
+ u64 timeentering_s0i3_lastcapture;
+ u64 timeentering_s0i3_totaltime;
+ u64 timeto_resume_to_os_lastcapture;
+ u64 timeto_resume_to_os_totaltime;
+ u64 timein_s0i3_lastcapture;
+ u64 timein_s0i3_totaltime;
+ u64 timein_swdrips_lastcapture;
+ u64 timein_swdrips_totaltime;
+ u64 timecondition_notmet_lastcapture[32];
+ u64 timecondition_notmet_totaltime[32];
+} __packed;
+
+enum amd_pmc_def {
+ MSG_TEST = 0x01,
+ MSG_OS_HINT_PCO,
+ MSG_OS_HINT_RN,
+};
+
void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev);
void amd_pmc_quirks_init(struct amd_pmc_dev *dev);
void amd_mp2_stb_init(struct amd_pmc_dev *dev);
@@ -79,6 +160,7 @@ void amd_mp2_stb_deinit(struct amd_pmc_dev *dev);
#define AMD_CPU_ID_CB 0x14D8
#define AMD_CPU_ID_PS 0x14E8
#define AMD_CPU_ID_SP 0x14A4
+#define AMD_CPU_ID_SHP 0x153A
#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122
#define PCI_DEVICE_ID_AMD_MP2_STB 0x172c
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
index 6b26e48ce8ad..5978464e0eb7 100644
--- a/drivers/platform/x86/amd/pmf/Makefile
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -4,7 +4,7 @@
# AMD Platform Management Framework
#
-obj-$(CONFIG_AMD_PMF) += amd-pmf.o
-amd-pmf-objs := core.o acpi.o sps.o \
- auto-mode.o cnqf.o \
- tee-if.o spc.o
+obj-$(CONFIG_AMD_PMF) += amd-pmf.o
+amd-pmf-y := core.o acpi.o sps.o \
+ auto-mode.o cnqf.o \
+ tee-if.o spc.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index dd5780a1d06e..f75f7ecd8cd9 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -220,7 +220,7 @@ static void apmf_sbios_heartbeat_notify(struct work_struct *work)
if (!info)
return;
- schedule_delayed_work(&dev->heart_beat, msecs_to_jiffies(dev->hb_interval * 1000));
+ schedule_delayed_work(&dev->heart_beat, secs_to_jiffies(dev->hb_interval));
kfree(info);
}
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index f34f3130c330..1d90f9382024 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -219,12 +219,14 @@ static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_
switch (dev->current_profile) {
case PLATFORM_PROFILE_PERFORMANCE:
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
val = TA_BEST_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
val = TA_BETTER_PERFORMANCE;
break;
case PLATFORM_PROFILE_LOW_POWER:
+ case PLATFORM_PROFILE_QUIET:
val = TA_BEST_BATTERY;
break;
default:
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index ceaff1ebb7b9..a1e43873a07b 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -510,18 +510,18 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
ret = amd_pmf_set_dram_addr(dev, true);
if (ret)
- goto error;
+ goto err_cancel_work;
dev->policy_base = devm_ioremap_resource(dev->dev, dev->res);
if (IS_ERR(dev->policy_base)) {
ret = PTR_ERR(dev->policy_base);
- goto error;
+ goto err_free_dram_buf;
}
dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
if (!dev->policy_buf) {
ret = -ENOMEM;
- goto error;
+ goto err_free_dram_buf;
}
memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
@@ -531,13 +531,13 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
if (!dev->prev_data) {
ret = -ENOMEM;
- goto error;
+ goto err_free_policy;
}
for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) {
ret = amd_pmf_tee_init(dev, &amd_pmf_ta_uuid[i]);
if (ret)
- return ret;
+ goto err_free_prev_data;
ret = amd_pmf_start_policy_engine(dev);
switch (ret) {
@@ -550,27 +550,41 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
status = false;
break;
default:
- goto error;
+ ret = -EINVAL;
+ amd_pmf_tee_deinit(dev);
+ goto err_free_prev_data;
}
if (status)
break;
}
- if (!status && !pb_side_load)
- goto error;
+ if (!status && !pb_side_load) {
+ ret = -EINVAL;
+ goto err_free_prev_data;
+ }
if (pb_side_load)
amd_pmf_open_pb(dev, dev->dbgfs_dir);
ret = amd_pmf_register_input_device(dev);
if (ret)
- goto error;
+ goto err_pmf_remove_pb;
return 0;
-error:
- amd_pmf_deinit_smart_pc(dev);
+err_pmf_remove_pb:
+ if (pb_side_load && dev->esbin)
+ amd_pmf_remove_pb(dev);
+ amd_pmf_tee_deinit(dev);
+err_free_prev_data:
+ kfree(dev->prev_data);
+err_free_policy:
+ kfree(dev->policy_buf);
+err_free_dram_buf:
+ kfree(dev->buf);
+err_cancel_work:
+ cancel_delayed_work_sync(&dev->pb_work);
return ret;
}
diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c
index ca4670d0dc67..f09a3fc6524a 100644
--- a/drivers/platform/x86/asus-tf103c-dock.c
+++ b/drivers/platform/x86/asus-tf103c-dock.c
@@ -856,7 +856,7 @@ static int tf103c_dock_probe(struct i2c_client *client)
/* 5. Setup irqchip for touchpad IRQ pass-through */
dock->tp_irqchip.name = KBUILD_MODNAME;
- dock->tp_irq_domain = irq_domain_add_linear(NULL, 1, &irq_domain_simple_ops, NULL);
+ dock->tp_irq_domain = irq_domain_create_linear(NULL, 1, &irq_domain_simple_ops, NULL);
if (!dock->tp_irq_domain)
return -ENOMEM;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 58754bc5b5b1..abbebd4bfb15 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -69,7 +69,6 @@
#include <linux/hwmon-sysfs.h>
#include <linux/power_supply.h>
#include <linux/sysfs.h>
-#include <linux/fb.h>
#include <acpi/video.h>
/* ======= */
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index d09060aedd3f..f8a0dffcaab7 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -18,15 +18,35 @@ config ALIENWARE_WMI
tristate "Alienware Special feature control"
default m
depends on ACPI
+ depends on ACPI_WMI
+ depends on DMI
depends on LEDS_CLASS
depends on NEW_LEDS
- depends on ACPI_WMI
+ help
+ This is a driver for controlling Alienware WMI driven features.
+
+ On legacy devices, it exposes an interface for controlling the AlienFX
+ zones on Alienware machines that don't contain a dedicated
+ AlienFX USB MCU such as the X51 and X51-R2.
+
+ On newer devices, it exposes the AWCC thermal control interface through
+ known Kernel APIs.
+
+config ALIENWARE_WMI_LEGACY
+ bool "Alienware Legacy WMI device driver"
+ default y
+ depends on ALIENWARE_WMI
+ help
+ Legacy Alienware WMI driver with AlienFX LED control capabilities.
+
+config ALIENWARE_WMI_WMAX
+ bool "Alienware WMAX WMI device driver"
+ default y
+ depends on ALIENWARE_WMI
select ACPI_PLATFORM_PROFILE
help
- This is a driver for controlling Alienware BIOS driven
- features. It exposes an interface for controlling the AlienFX
- zones on Alienware machines that don't contain a dedicated AlienFX
- USB MCU such as the X51 and X51-R2.
+ Alienware WMI driver with AlienFX LED, HDMI, amplifier, deep sleep and
+ AWCC thermal control capabilities.
config DCDBAS
tristate "Dell Systems Management Base Driver"
diff --git a/drivers/platform/x86/dell/Makefile b/drivers/platform/x86/dell/Makefile
index bb3cbd470a46..c7501c25e627 100644
--- a/drivers/platform/x86/dell/Makefile
+++ b/drivers/platform/x86/dell/Makefile
@@ -4,24 +4,27 @@
# Dell x86 Platform-Specific Drivers
#
-obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
-obj-$(CONFIG_DCDBAS) += dcdbas.o
-obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
-obj-$(CONFIG_DELL_RBTN) += dell-rbtn.o
-obj-$(CONFIG_DELL_RBU) += dell_rbu.o
-obj-$(CONFIG_DELL_PC) += dell-pc.o
-obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
-dell-smbios-objs := dell-smbios-base.o
-dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
-dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
-obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
-obj-$(CONFIG_DELL_SMO8800) += dell-lis3lv02d.o
-obj-$(CONFIG_DELL_UART_BACKLIGHT) += dell-uart-backlight.o
-obj-$(CONFIG_DELL_WMI) += dell-wmi.o
-dell-wmi-objs := dell-wmi-base.o
-dell-wmi-$(CONFIG_DELL_WMI_PRIVACY) += dell-wmi-privacy.o
-obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
-obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
-obj-$(CONFIG_DELL_WMI_DDV) += dell-wmi-ddv.o
-obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o
-obj-$(CONFIG_DELL_WMI_SYSMAN) += dell-wmi-sysman/
+obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
+alienware-wmi-y := alienware-wmi-base.o
+alienware-wmi-$(CONFIG_ALIENWARE_WMI_LEGACY) += alienware-wmi-legacy.o
+alienware-wmi-$(CONFIG_ALIENWARE_WMI_WMAX) += alienware-wmi-wmax.o
+obj-$(CONFIG_DCDBAS) += dcdbas.o
+obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
+obj-$(CONFIG_DELL_RBTN) += dell-rbtn.o
+obj-$(CONFIG_DELL_RBU) += dell_rbu.o
+obj-$(CONFIG_DELL_PC) += dell-pc.o
+obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
+dell-smbios-y := dell-smbios-base.o
+dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
+dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
+obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
+obj-$(CONFIG_DELL_SMO8800) += dell-lis3lv02d.o
+obj-$(CONFIG_DELL_UART_BACKLIGHT) += dell-uart-backlight.o
+obj-$(CONFIG_DELL_WMI) += dell-wmi.o
+dell-wmi-y := dell-wmi-base.o
+dell-wmi-$(CONFIG_DELL_WMI_PRIVACY) += dell-wmi-privacy.o
+obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
+obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
+obj-$(CONFIG_DELL_WMI_DDV) += dell-wmi-ddv.o
+obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o
+obj-$(CONFIG_DELL_WMI_SYSMAN) += dell-wmi-sysman/
diff --git a/drivers/platform/x86/dell/alienware-wmi-base.c b/drivers/platform/x86/dell/alienware-wmi-base.c
new file mode 100644
index 000000000000..64562b92314f
--- /dev/null
+++ b/drivers/platform/x86/dell/alienware-wmi-base.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Alienware special feature control
+ *
+ * Copyright (C) 2014 Dell Inc <Dell.Client.Kernel@dell.com>
+ * Copyright (C) 2025 Kurt Borja <kuurtb@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/cleanup.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dmi.h>
+#include <linux/leds.h>
+#include "alienware-wmi.h"
+
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
+MODULE_AUTHOR("Kurt Borja <kuurtb@gmail.com>");
+MODULE_DESCRIPTION("Alienware special feature control");
+MODULE_LICENSE("GPL");
+
+struct alienfx_quirks *alienfx;
+
+static struct alienfx_quirks quirk_inspiron5675 = {
+ .num_zones = 2,
+ .hdmi_mux = false,
+ .amplifier = false,
+ .deepslp = false,
+};
+
+static struct alienfx_quirks quirk_unknown = {
+ .num_zones = 2,
+ .hdmi_mux = false,
+ .amplifier = false,
+ .deepslp = false,
+};
+
+static struct alienfx_quirks quirk_x51_r1_r2 = {
+ .num_zones = 3,
+ .hdmi_mux = false,
+ .amplifier = false,
+ .deepslp = false,
+};
+
+static struct alienfx_quirks quirk_x51_r3 = {
+ .num_zones = 4,
+ .hdmi_mux = false,
+ .amplifier = true,
+ .deepslp = false,
+};
+
+static struct alienfx_quirks quirk_asm100 = {
+ .num_zones = 2,
+ .hdmi_mux = true,
+ .amplifier = false,
+ .deepslp = false,
+};
+
+static struct alienfx_quirks quirk_asm200 = {
+ .num_zones = 2,
+ .hdmi_mux = true,
+ .amplifier = false,
+ .deepslp = true,
+};
+
+static struct alienfx_quirks quirk_asm201 = {
+ .num_zones = 2,
+ .hdmi_mux = true,
+ .amplifier = true,
+ .deepslp = true,
+};
+
+static int __init dmi_matched(const struct dmi_system_id *dmi)
+{
+ alienfx = dmi->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id alienware_quirks[] __initconst = {
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware ASM100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASM100"),
+ },
+ .driver_data = &quirk_asm100,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware ASM200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASM200"),
+ },
+ .driver_data = &quirk_asm200,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware ASM201",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASM201"),
+ },
+ .driver_data = &quirk_asm201,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware X51 R1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51"),
+ },
+ .driver_data = &quirk_x51_r1_r2,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware X51 R2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R2"),
+ },
+ .driver_data = &quirk_x51_r1_r2,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware X51 R3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R3"),
+ },
+ .driver_data = &quirk_x51_r3,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inc. Inspiron 5675",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5675"),
+ },
+ .driver_data = &quirk_inspiron5675,
+ },
+ {}
+};
+
+u8 alienware_interface;
+
+int alienware_wmi_command(struct wmi_device *wdev, u32 method_id,
+ void *in_args, size_t in_size, u32 *out_data)
+{
+ struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer in = {in_size, in_args};
+ acpi_status ret;
+
+ ret = wmidev_evaluate_method(wdev, 0, method_id, &in, out_data ? &out : NULL);
+ if (ACPI_FAILURE(ret))
+ return -EIO;
+
+ union acpi_object *obj __free(kfree) = out.pointer;
+
+ if (out_data) {
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ *out_data = (u32)obj->integer.value;
+ else
+ return -ENOMSG;
+ }
+
+ return 0;
+}
+
+/*
+ * Helpers used for zone control
+ */
+static int parse_rgb(const char *buf, struct color_platform *colors)
+{
+ long unsigned int rgb;
+ int ret;
+ union color_union {
+ struct color_platform cp;
+ int package;
+ } repackager;
+
+ ret = kstrtoul(buf, 16, &rgb);
+ if (ret)
+ return ret;
+
+ /* RGB triplet notation is 24-bit hexadecimal */
+ if (rgb > 0xFFFFFF)
+ return -EINVAL;
+
+ repackager.package = rgb & 0x0f0f0f0f;
+ pr_debug("alienware-wmi: r: %d g:%d b: %d\n",
+ repackager.cp.red, repackager.cp.green, repackager.cp.blue);
+ *colors = repackager.cp;
+ return 0;
+}
+
+/*
+ * Individual RGB zone control
+ */
+static ssize_t zone_show(struct device *dev, struct device_attribute *attr,
+ char *buf, u8 location)
+{
+ struct alienfx_priv *priv = dev_get_drvdata(dev);
+ struct color_platform *colors = &priv->colors[location];
+
+ return sprintf(buf, "red: %d, green: %d, blue: %d\n",
+ colors->red, colors->green, colors->blue);
+
+}
+
+static ssize_t zone_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count, u8 location)
+{
+ struct alienfx_priv *priv = dev_get_drvdata(dev);
+ struct color_platform *colors = &priv->colors[location];
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ int ret;
+
+ ret = parse_rgb(buf, colors);
+ if (ret)
+ return ret;
+
+ ret = pdata->ops.upd_led(priv, pdata->wdev, location);
+
+ return ret ? ret : count;
+}
+
+static ssize_t zone00_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 0);
+}
+
+static ssize_t zone00_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 0);
+}
+
+static DEVICE_ATTR_RW(zone00);
+
+static ssize_t zone01_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 1);
+}
+
+static ssize_t zone01_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 1);
+}
+
+static DEVICE_ATTR_RW(zone01);
+
+static ssize_t zone02_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 2);
+}
+
+static ssize_t zone02_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 2);
+}
+
+static DEVICE_ATTR_RW(zone02);
+
+static ssize_t zone03_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 3);
+}
+
+static ssize_t zone03_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 3);
+}
+
+static DEVICE_ATTR_RW(zone03);
+
+/*
+ * Lighting control state device attribute (Global)
+ */
+static ssize_t lighting_control_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct alienfx_priv *priv = dev_get_drvdata(dev);
+
+ if (priv->lighting_control_state == LEGACY_BOOTING)
+ return sysfs_emit(buf, "[booting] running suspend\n");
+ else if (priv->lighting_control_state == LEGACY_SUSPEND)
+ return sysfs_emit(buf, "booting running [suspend]\n");
+
+ return sysfs_emit(buf, "booting [running] suspend\n");
+}
+
+static ssize_t lighting_control_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct alienfx_priv *priv = dev_get_drvdata(dev);
+ u8 val;
+
+ if (strcmp(buf, "booting\n") == 0)
+ val = LEGACY_BOOTING;
+ else if (strcmp(buf, "suspend\n") == 0)
+ val = LEGACY_SUSPEND;
+ else if (alienware_interface == LEGACY)
+ val = LEGACY_RUNNING;
+ else
+ val = WMAX_RUNNING;
+
+ priv->lighting_control_state = val;
+ pr_debug("alienware-wmi: updated control state to %d\n",
+ priv->lighting_control_state);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(lighting_control_state);
+
+static umode_t zone_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (n < alienfx->num_zones + 1)
+ return attr->mode;
+
+ return 0;
+}
+
+static bool zone_group_visible(struct kobject *kobj)
+{
+ return alienfx->num_zones > 0;
+}
+DEFINE_SYSFS_GROUP_VISIBLE(zone);
+
+static struct attribute *zone_attrs[] = {
+ &dev_attr_lighting_control_state.attr,
+ &dev_attr_zone00.attr,
+ &dev_attr_zone01.attr,
+ &dev_attr_zone02.attr,
+ &dev_attr_zone03.attr,
+ NULL
+};
+
+static struct attribute_group zone_attribute_group = {
+ .name = "rgb_zones",
+ .is_visible = SYSFS_GROUP_VISIBLE(zone),
+ .attrs = zone_attrs,
+};
+
+/*
+ * LED Brightness (Global)
+ */
+static void global_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct alienfx_priv *priv = container_of(led_cdev, struct alienfx_priv,
+ global_led);
+ struct alienfx_platdata *pdata = dev_get_platdata(&priv->pdev->dev);
+ int ret;
+
+ priv->global_brightness = brightness;
+
+ ret = pdata->ops.upd_brightness(priv, pdata->wdev, brightness);
+ if (ret)
+ pr_err("LED brightness update failed\n");
+}
+
+static enum led_brightness global_led_get(struct led_classdev *led_cdev)
+{
+ struct alienfx_priv *priv = container_of(led_cdev, struct alienfx_priv,
+ global_led);
+
+ return priv->global_brightness;
+}
+
+/*
+ * Platform Driver
+ */
+static int alienfx_probe(struct platform_device *pdev)
+{
+ struct alienfx_priv *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (alienware_interface == WMAX)
+ priv->lighting_control_state = WMAX_RUNNING;
+ else
+ priv->lighting_control_state = LEGACY_RUNNING;
+
+ priv->pdev = pdev;
+ priv->global_led.name = "alienware::global_brightness";
+ priv->global_led.brightness_set = global_led_set;
+ priv->global_led.brightness_get = global_led_get;
+ priv->global_led.max_brightness = 0x0F;
+ priv->global_brightness = priv->global_led.max_brightness;
+ platform_set_drvdata(pdev, priv);
+
+ return devm_led_classdev_register(&pdev->dev, &priv->global_led);
+}
+
+static const struct attribute_group *alienfx_groups[] = {
+ &zone_attribute_group,
+ WMAX_DEV_GROUPS
+ NULL
+};
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "alienware-wmi",
+ .dev_groups = alienfx_groups,
+ },
+ .probe = alienfx_probe,
+};
+
+static void alienware_alienfx_remove(void *data)
+{
+ struct platform_device *pdev = data;
+
+ platform_device_unregister(pdev);
+}
+
+int alienware_alienfx_setup(struct alienfx_platdata *pdata)
+{
+ struct device *dev = &pdata->wdev->dev;
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = platform_device_register_data(NULL, "alienware-wmi",
+ PLATFORM_DEVID_NONE, pdata,
+ sizeof(*pdata));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ dev_set_drvdata(dev, pdev);
+ ret = devm_add_action_or_reset(dev, alienware_alienfx_remove, pdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __init alienware_wmi_init(void)
+{
+ int ret;
+
+ dmi_check_system(alienware_quirks);
+ if (!alienfx)
+ alienfx = &quirk_unknown;
+
+ ret = platform_driver_register(&platform_driver);
+ if (ret < 0)
+ return ret;
+
+ if (wmi_has_guid(WMAX_CONTROL_GUID)) {
+ alienware_interface = WMAX;
+ ret = alienware_wmax_wmi_init();
+ } else {
+ alienware_interface = LEGACY;
+ ret = alienware_legacy_wmi_init();
+ }
+
+ if (ret < 0)
+ platform_driver_unregister(&platform_driver);
+
+ return ret;
+}
+
+module_init(alienware_wmi_init);
+
+static void __exit alienware_wmi_exit(void)
+{
+ if (alienware_interface == WMAX)
+ alienware_wmax_wmi_exit();
+ else
+ alienware_legacy_wmi_exit();
+
+ platform_driver_unregister(&platform_driver);
+}
+
+module_exit(alienware_wmi_exit);
diff --git a/drivers/platform/x86/dell/alienware-wmi-legacy.c b/drivers/platform/x86/dell/alienware-wmi-legacy.c
new file mode 100644
index 000000000000..4a84a2fe918b
--- /dev/null
+++ b/drivers/platform/x86/dell/alienware-wmi-legacy.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Alienware LEGACY WMI device driver
+ *
+ * Copyright (C) 2025 Kurt Borja <kuurtb@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/wmi.h>
+#include "alienware-wmi.h"
+
+struct legacy_led_args {
+ struct color_platform colors;
+ u8 brightness;
+ u8 state;
+} __packed;
+
+
+/*
+ * Legacy WMI driver
+ */
+static int legacy_wmi_update_led(struct alienfx_priv *priv,
+ struct wmi_device *wdev, u8 location)
+{
+ struct legacy_led_args legacy_args = {
+ .colors = priv->colors[location],
+ .brightness = priv->global_brightness,
+ .state = 0,
+ };
+ struct acpi_buffer input;
+ acpi_status status;
+
+ if (legacy_args.state != LEGACY_RUNNING) {
+ legacy_args.state = priv->lighting_control_state;
+
+ input.length = sizeof(legacy_args);
+ input.pointer = &legacy_args;
+
+ status = wmi_evaluate_method(LEGACY_POWER_CONTROL_GUID, 0,
+ location + 1, &input, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return 0;
+ }
+
+ return alienware_wmi_command(wdev, location + 1, &legacy_args,
+ sizeof(legacy_args), NULL);
+}
+
+static int legacy_wmi_update_brightness(struct alienfx_priv *priv,
+ struct wmi_device *wdev, u8 brightness)
+{
+ return legacy_wmi_update_led(priv, wdev, 0);
+}
+
+static int legacy_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct alienfx_platdata pdata = {
+ .wdev = wdev,
+ .ops = {
+ .upd_led = legacy_wmi_update_led,
+ .upd_brightness = legacy_wmi_update_brightness,
+ },
+ };
+
+ return alienware_alienfx_setup(&pdata);
+}
+
+static const struct wmi_device_id alienware_legacy_device_id_table[] = {
+ { LEGACY_CONTROL_GUID, NULL },
+ { },
+};
+MODULE_DEVICE_TABLE(wmi, alienware_legacy_device_id_table);
+
+static struct wmi_driver alienware_legacy_wmi_driver = {
+ .driver = {
+ .name = "alienware-wmi-alienfx",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = alienware_legacy_device_id_table,
+ .probe = legacy_wmi_probe,
+ .no_singleton = true,
+};
+
+int __init alienware_legacy_wmi_init(void)
+{
+ return wmi_driver_register(&alienware_legacy_wmi_driver);
+}
+
+void __exit alienware_legacy_wmi_exit(void)
+{
+ wmi_driver_unregister(&alienware_legacy_wmi_driver);
+}
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
new file mode 100644
index 000000000000..3d3014b5adf0
--- /dev/null
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -0,0 +1,768 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Alienware WMAX WMI device driver
+ *
+ * Copyright (C) 2014 Dell Inc <Dell.Client.Kernel@dell.com>
+ * Copyright (C) 2025 Kurt Borja <kuurtb@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dmi.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_profile.h>
+#include <linux/wmi.h>
+#include "alienware-wmi.h"
+
+#define WMAX_METHOD_HDMI_SOURCE 0x1
+#define WMAX_METHOD_HDMI_STATUS 0x2
+#define WMAX_METHOD_HDMI_CABLE 0x5
+#define WMAX_METHOD_AMPLIFIER_CABLE 0x6
+#define WMAX_METHOD_DEEP_SLEEP_CONTROL 0x0B
+#define WMAX_METHOD_DEEP_SLEEP_STATUS 0x0C
+#define WMAX_METHOD_BRIGHTNESS 0x3
+#define WMAX_METHOD_ZONE_CONTROL 0x4
+#define WMAX_METHOD_THERMAL_INFORMATION 0x14
+#define WMAX_METHOD_THERMAL_CONTROL 0x15
+#define WMAX_METHOD_GAME_SHIFT_STATUS 0x25
+
+#define WMAX_THERMAL_MODE_GMODE 0xAB
+
+#define WMAX_FAILURE_CODE 0xFFFFFFFF
+#define WMAX_THERMAL_TABLE_MASK GENMASK(7, 4)
+#define WMAX_THERMAL_MODE_MASK GENMASK(3, 0)
+#define WMAX_SENSOR_ID_MASK BIT(8)
+
+static bool force_platform_profile;
+module_param_unsafe(force_platform_profile, bool, 0);
+MODULE_PARM_DESC(force_platform_profile, "Forces auto-detecting thermal profiles without checking if WMI thermal backend is available");
+
+static bool force_gmode;
+module_param_unsafe(force_gmode, bool, 0);
+MODULE_PARM_DESC(force_gmode, "Forces G-Mode when performance profile is selected");
+
+struct awcc_quirks {
+ bool pprof;
+ bool gmode;
+};
+
+static struct awcc_quirks g_series_quirks = {
+ .pprof = true,
+ .gmode = true,
+};
+
+static struct awcc_quirks generic_quirks = {
+ .pprof = true,
+ .gmode = false,
+};
+
+static struct awcc_quirks empty_quirks;
+
+static const struct dmi_system_id awcc_dmi_table[] __initconst = {
+ {
+ .ident = "Alienware m16 R1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R1 AMD"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
+ .ident = "Alienware m17 R5",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
+ .ident = "Alienware m18 R2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m18 R2"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
+ .ident = "Alienware x15 R1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x15 R1"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
+ .ident = "Alienware x17 R2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x17 R2"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
+ .ident = "Dell Inc. G15 5510",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5510"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
+ .ident = "Dell Inc. G15 5511",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5511"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
+ .ident = "Dell Inc. G15 5515",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
+ .ident = "Dell Inc. G3 3500",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G3 3500"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
+ .ident = "Dell Inc. G3 3590",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G3 3590"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
+ .ident = "Dell Inc. G5 5500",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G5 5500"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+};
+
+enum WMAX_THERMAL_INFORMATION_OPERATIONS {
+ WMAX_OPERATION_SYS_DESCRIPTION = 0x02,
+ WMAX_OPERATION_LIST_IDS = 0x03,
+ WMAX_OPERATION_CURRENT_PROFILE = 0x0B,
+};
+
+enum WMAX_THERMAL_CONTROL_OPERATIONS {
+ WMAX_OPERATION_ACTIVATE_PROFILE = 0x01,
+};
+
+enum WMAX_GAME_SHIFT_STATUS_OPERATIONS {
+ WMAX_OPERATION_TOGGLE_GAME_SHIFT = 0x01,
+ WMAX_OPERATION_GET_GAME_SHIFT_STATUS = 0x02,
+};
+
+enum WMAX_THERMAL_TABLES {
+ WMAX_THERMAL_TABLE_BASIC = 0x90,
+ WMAX_THERMAL_TABLE_USTT = 0xA0,
+};
+
+enum wmax_thermal_mode {
+ THERMAL_MODE_USTT_BALANCED,
+ THERMAL_MODE_USTT_BALANCED_PERFORMANCE,
+ THERMAL_MODE_USTT_COOL,
+ THERMAL_MODE_USTT_QUIET,
+ THERMAL_MODE_USTT_PERFORMANCE,
+ THERMAL_MODE_USTT_LOW_POWER,
+ THERMAL_MODE_BASIC_QUIET,
+ THERMAL_MODE_BASIC_BALANCED,
+ THERMAL_MODE_BASIC_BALANCED_PERFORMANCE,
+ THERMAL_MODE_BASIC_PERFORMANCE,
+ THERMAL_MODE_LAST,
+};
+
+struct wmax_led_args {
+ u32 led_mask;
+ struct color_platform colors;
+ u8 state;
+} __packed;
+
+struct wmax_brightness_args {
+ u32 led_mask;
+ u32 percentage;
+};
+
+struct wmax_basic_args {
+ u8 arg;
+};
+
+struct wmax_u32_args {
+ u8 operation;
+ u8 arg1;
+ u8 arg2;
+ u8 arg3;
+};
+
+struct awcc_priv {
+ struct wmi_device *wdev;
+ struct device *ppdev;
+ enum wmax_thermal_mode supported_thermal_profiles[PLATFORM_PROFILE_LAST];
+};
+
+static const enum platform_profile_option wmax_mode_to_platform_profile[THERMAL_MODE_LAST] = {
+ [THERMAL_MODE_USTT_BALANCED] = PLATFORM_PROFILE_BALANCED,
+ [THERMAL_MODE_USTT_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
+ [THERMAL_MODE_USTT_COOL] = PLATFORM_PROFILE_COOL,
+ [THERMAL_MODE_USTT_QUIET] = PLATFORM_PROFILE_QUIET,
+ [THERMAL_MODE_USTT_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
+ [THERMAL_MODE_USTT_LOW_POWER] = PLATFORM_PROFILE_LOW_POWER,
+ [THERMAL_MODE_BASIC_QUIET] = PLATFORM_PROFILE_QUIET,
+ [THERMAL_MODE_BASIC_BALANCED] = PLATFORM_PROFILE_BALANCED,
+ [THERMAL_MODE_BASIC_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
+ [THERMAL_MODE_BASIC_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
+};
+
+static struct awcc_quirks *awcc;
+
+/*
+ * The HDMI mux sysfs node indicates the status of the HDMI input mux.
+ * It can toggle between standard system GPU output and HDMI input.
+ */
+static ssize_t cable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ u32 out_data;
+ int ret;
+
+ ret = alienware_wmi_command(pdata->wdev, WMAX_METHOD_HDMI_CABLE,
+ &in_args, sizeof(in_args), &out_data);
+ if (!ret) {
+ if (out_data == 0)
+ return sysfs_emit(buf, "[unconnected] connected unknown\n");
+ else if (out_data == 1)
+ return sysfs_emit(buf, "unconnected [connected] unknown\n");
+ }
+
+ pr_err("alienware-wmi: unknown HDMI cable status: %d\n", ret);
+ return sysfs_emit(buf, "unconnected connected [unknown]\n");
+}
+
+static ssize_t source_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ u32 out_data;
+ int ret;
+
+ ret = alienware_wmi_command(pdata->wdev, WMAX_METHOD_HDMI_STATUS,
+ &in_args, sizeof(in_args), &out_data);
+ if (!ret) {
+ if (out_data == 1)
+ return sysfs_emit(buf, "[input] gpu unknown\n");
+ else if (out_data == 2)
+ return sysfs_emit(buf, "input [gpu] unknown\n");
+ }
+
+ pr_err("alienware-wmi: unknown HDMI source status: %u\n", ret);
+ return sysfs_emit(buf, "input gpu [unknown]\n");
+}
+
+static ssize_t source_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ struct wmax_basic_args args;
+ int ret;
+
+ if (strcmp(buf, "gpu\n") == 0)
+ args.arg = 1;
+ else if (strcmp(buf, "input\n") == 0)
+ args.arg = 2;
+ else
+ args.arg = 3;
+ pr_debug("alienware-wmi: setting hdmi to %d : %s", args.arg, buf);
+
+ ret = alienware_wmi_command(pdata->wdev, WMAX_METHOD_HDMI_SOURCE, &args,
+ sizeof(args), NULL);
+ if (ret < 0)
+ pr_err("alienware-wmi: HDMI toggle failed: results: %u\n", ret);
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(cable);
+static DEVICE_ATTR_RW(source);
+
+static bool hdmi_group_visible(struct kobject *kobj)
+{
+ return alienware_interface == WMAX && alienfx->hdmi_mux;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(hdmi);
+
+static struct attribute *hdmi_attrs[] = {
+ &dev_attr_cable.attr,
+ &dev_attr_source.attr,
+ NULL,
+};
+
+const struct attribute_group wmax_hdmi_attribute_group = {
+ .name = "hdmi",
+ .is_visible = SYSFS_GROUP_VISIBLE(hdmi),
+ .attrs = hdmi_attrs,
+};
+
+/*
+ * Alienware GFX amplifier support
+ * - Currently supports reading cable status
+ * - Leaving expansion room to possibly support dock/undock events later
+ */
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ u32 out_data;
+ int ret;
+
+ ret = alienware_wmi_command(pdata->wdev, WMAX_METHOD_AMPLIFIER_CABLE,
+ &in_args, sizeof(in_args), &out_data);
+ if (!ret) {
+ if (out_data == 0)
+ return sysfs_emit(buf, "[unconnected] connected unknown\n");
+ else if (out_data == 1)
+ return sysfs_emit(buf, "unconnected [connected] unknown\n");
+ }
+
+ pr_err("alienware-wmi: unknown amplifier cable status: %d\n", ret);
+ return sysfs_emit(buf, "unconnected connected [unknown]\n");
+}
+
+static DEVICE_ATTR_RO(status);
+
+static bool amplifier_group_visible(struct kobject *kobj)
+{
+ return alienware_interface == WMAX && alienfx->amplifier;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(amplifier);
+
+static struct attribute *amplifier_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+
+const struct attribute_group wmax_amplifier_attribute_group = {
+ .name = "amplifier",
+ .is_visible = SYSFS_GROUP_VISIBLE(amplifier),
+ .attrs = amplifier_attrs,
+};
+
+/*
+ * Deep Sleep Control support
+ * - Modifies BIOS setting for deep sleep control allowing extra wakeup events
+ */
+static ssize_t deepsleep_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ u32 out_data;
+ int ret;
+
+ ret = alienware_wmi_command(pdata->wdev, WMAX_METHOD_DEEP_SLEEP_STATUS,
+ &in_args, sizeof(in_args), &out_data);
+ if (!ret) {
+ if (out_data == 0)
+ return sysfs_emit(buf, "[disabled] s5 s5_s4\n");
+ else if (out_data == 1)
+ return sysfs_emit(buf, "disabled [s5] s5_s4\n");
+ else if (out_data == 2)
+ return sysfs_emit(buf, "disabled s5 [s5_s4]\n");
+ }
+
+ pr_err("alienware-wmi: unknown deep sleep status: %d\n", ret);
+ return sysfs_emit(buf, "disabled s5 s5_s4 [unknown]\n");
+}
+
+static ssize_t deepsleep_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ struct wmax_basic_args args;
+ int ret;
+
+ if (strcmp(buf, "disabled\n") == 0)
+ args.arg = 0;
+ else if (strcmp(buf, "s5\n") == 0)
+ args.arg = 1;
+ else
+ args.arg = 2;
+ pr_debug("alienware-wmi: setting deep sleep to %d : %s", args.arg, buf);
+
+ ret = alienware_wmi_command(pdata->wdev, WMAX_METHOD_DEEP_SLEEP_CONTROL,
+ &args, sizeof(args), NULL);
+ if (!ret)
+ pr_err("alienware-wmi: deep sleep control failed: results: %u\n", ret);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(deepsleep);
+
+static bool deepsleep_group_visible(struct kobject *kobj)
+{
+ return alienware_interface == WMAX && alienfx->deepslp;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(deepsleep);
+
+static struct attribute *deepsleep_attrs[] = {
+ &dev_attr_deepsleep.attr,
+ NULL,
+};
+
+const struct attribute_group wmax_deepsleep_attribute_group = {
+ .name = "deepsleep",
+ .is_visible = SYSFS_GROUP_VISIBLE(deepsleep),
+ .attrs = deepsleep_attrs,
+};
+
+/*
+ * Thermal Profile control
+ * - Provides thermal profile control through the Platform Profile API
+ */
+static bool is_wmax_thermal_code(u32 code)
+{
+ if (code & WMAX_SENSOR_ID_MASK)
+ return false;
+
+ if ((code & WMAX_THERMAL_MODE_MASK) >= THERMAL_MODE_LAST)
+ return false;
+
+ if ((code & WMAX_THERMAL_TABLE_MASK) == WMAX_THERMAL_TABLE_BASIC &&
+ (code & WMAX_THERMAL_MODE_MASK) >= THERMAL_MODE_BASIC_QUIET)
+ return true;
+
+ if ((code & WMAX_THERMAL_TABLE_MASK) == WMAX_THERMAL_TABLE_USTT &&
+ (code & WMAX_THERMAL_MODE_MASK) <= THERMAL_MODE_USTT_LOW_POWER)
+ return true;
+
+ return false;
+}
+
+static int wmax_thermal_information(struct wmi_device *wdev, u8 operation,
+ u8 arg, u32 *out_data)
+{
+ struct wmax_u32_args in_args = {
+ .operation = operation,
+ .arg1 = arg,
+ .arg2 = 0,
+ .arg3 = 0,
+ };
+ int ret;
+
+ ret = alienware_wmi_command(wdev, WMAX_METHOD_THERMAL_INFORMATION,
+ &in_args, sizeof(in_args), out_data);
+ if (ret < 0)
+ return ret;
+
+ if (*out_data == WMAX_FAILURE_CODE)
+ return -EBADRQC;
+
+ return 0;
+}
+
+static int wmax_thermal_control(struct wmi_device *wdev, u8 profile)
+{
+ struct wmax_u32_args in_args = {
+ .operation = WMAX_OPERATION_ACTIVATE_PROFILE,
+ .arg1 = profile,
+ .arg2 = 0,
+ .arg3 = 0,
+ };
+ u32 out_data;
+ int ret;
+
+ ret = alienware_wmi_command(wdev, WMAX_METHOD_THERMAL_CONTROL,
+ &in_args, sizeof(in_args), &out_data);
+ if (ret)
+ return ret;
+
+ if (out_data == WMAX_FAILURE_CODE)
+ return -EBADRQC;
+
+ return 0;
+}
+
+static int wmax_game_shift_status(struct wmi_device *wdev, u8 operation,
+ u32 *out_data)
+{
+ struct wmax_u32_args in_args = {
+ .operation = operation,
+ .arg1 = 0,
+ .arg2 = 0,
+ .arg3 = 0,
+ };
+ int ret;
+
+ ret = alienware_wmi_command(wdev, WMAX_METHOD_GAME_SHIFT_STATUS,
+ &in_args, sizeof(in_args), out_data);
+ if (ret < 0)
+ return ret;
+
+ if (*out_data == WMAX_FAILURE_CODE)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int thermal_profile_get(struct device *dev,
+ enum platform_profile_option *profile)
+{
+ struct awcc_priv *priv = dev_get_drvdata(dev);
+ u32 out_data;
+ int ret;
+
+ ret = wmax_thermal_information(priv->wdev, WMAX_OPERATION_CURRENT_PROFILE,
+ 0, &out_data);
+
+ if (ret < 0)
+ return ret;
+
+ if (out_data == WMAX_THERMAL_MODE_GMODE) {
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ return 0;
+ }
+
+ if (!is_wmax_thermal_code(out_data))
+ return -ENODATA;
+
+ out_data &= WMAX_THERMAL_MODE_MASK;
+ *profile = wmax_mode_to_platform_profile[out_data];
+
+ return 0;
+}
+
+static int thermal_profile_set(struct device *dev,
+ enum platform_profile_option profile)
+{
+ struct awcc_priv *priv = dev_get_drvdata(dev);
+
+ if (awcc->gmode) {
+ u32 gmode_status;
+ int ret;
+
+ ret = wmax_game_shift_status(priv->wdev,
+ WMAX_OPERATION_GET_GAME_SHIFT_STATUS,
+ &gmode_status);
+
+ if (ret < 0)
+ return ret;
+
+ if ((profile == PLATFORM_PROFILE_PERFORMANCE && !gmode_status) ||
+ (profile != PLATFORM_PROFILE_PERFORMANCE && gmode_status)) {
+ ret = wmax_game_shift_status(priv->wdev,
+ WMAX_OPERATION_TOGGLE_GAME_SHIFT,
+ &gmode_status);
+
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return wmax_thermal_control(priv->wdev,
+ priv->supported_thermal_profiles[profile]);
+}
+
+static int thermal_profile_probe(void *drvdata, unsigned long *choices)
+{
+ enum platform_profile_option profile;
+ struct awcc_priv *priv = drvdata;
+ enum wmax_thermal_mode mode;
+ u8 sys_desc[4];
+ u32 first_mode;
+ u32 out_data;
+ int ret;
+
+ ret = wmax_thermal_information(priv->wdev, WMAX_OPERATION_SYS_DESCRIPTION,
+ 0, (u32 *) &sys_desc);
+ if (ret < 0)
+ return ret;
+
+ first_mode = sys_desc[0] + sys_desc[1];
+
+ for (u32 i = 0; i < sys_desc[3]; i++) {
+ ret = wmax_thermal_information(priv->wdev, WMAX_OPERATION_LIST_IDS,
+ i + first_mode, &out_data);
+
+ if (ret == -EIO)
+ return ret;
+
+ if (ret == -EBADRQC)
+ break;
+
+ if (!is_wmax_thermal_code(out_data))
+ continue;
+
+ mode = out_data & WMAX_THERMAL_MODE_MASK;
+ profile = wmax_mode_to_platform_profile[mode];
+ priv->supported_thermal_profiles[profile] = out_data;
+
+ set_bit(profile, choices);
+ }
+
+ if (bitmap_empty(choices, PLATFORM_PROFILE_LAST))
+ return -ENODEV;
+
+ if (awcc->gmode) {
+ priv->supported_thermal_profiles[PLATFORM_PROFILE_PERFORMANCE] =
+ WMAX_THERMAL_MODE_GMODE;
+
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+ }
+
+ return 0;
+}
+
+static const struct platform_profile_ops awcc_platform_profile_ops = {
+ .probe = thermal_profile_probe,
+ .profile_get = thermal_profile_get,
+ .profile_set = thermal_profile_set,
+};
+
+static int awcc_platform_profile_init(struct wmi_device *wdev)
+{
+ struct awcc_priv *priv = dev_get_drvdata(&wdev->dev);
+
+ priv->ppdev = devm_platform_profile_register(&wdev->dev, "alienware-wmi",
+ priv, &awcc_platform_profile_ops);
+
+ return PTR_ERR_OR_ZERO(priv->ppdev);
+}
+
+static int alienware_awcc_setup(struct wmi_device *wdev)
+{
+ struct awcc_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->wdev = wdev;
+ dev_set_drvdata(&wdev->dev, priv);
+
+ if (awcc->pprof) {
+ ret = awcc_platform_profile_init(wdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * WMAX WMI driver
+ */
+static int wmax_wmi_update_led(struct alienfx_priv *priv,
+ struct wmi_device *wdev, u8 location)
+{
+ struct wmax_led_args in_args = {
+ .led_mask = 1 << location,
+ .colors = priv->colors[location],
+ .state = priv->lighting_control_state,
+ };
+
+ return alienware_wmi_command(wdev, WMAX_METHOD_ZONE_CONTROL, &in_args,
+ sizeof(in_args), NULL);
+}
+
+static int wmax_wmi_update_brightness(struct alienfx_priv *priv,
+ struct wmi_device *wdev, u8 brightness)
+{
+ struct wmax_brightness_args in_args = {
+ .led_mask = 0xFF,
+ .percentage = brightness,
+ };
+
+ return alienware_wmi_command(wdev, WMAX_METHOD_BRIGHTNESS, &in_args,
+ sizeof(in_args), NULL);
+}
+
+static int wmax_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct alienfx_platdata pdata = {
+ .wdev = wdev,
+ .ops = {
+ .upd_led = wmax_wmi_update_led,
+ .upd_brightness = wmax_wmi_update_brightness,
+ },
+ };
+ int ret;
+
+ if (awcc)
+ ret = alienware_awcc_setup(wdev);
+ else
+ ret = alienware_alienfx_setup(&pdata);
+
+ return ret;
+}
+
+static const struct wmi_device_id alienware_wmax_device_id_table[] = {
+ { WMAX_CONTROL_GUID, NULL },
+ { },
+};
+MODULE_DEVICE_TABLE(wmi, alienware_wmax_device_id_table);
+
+static struct wmi_driver alienware_wmax_wmi_driver = {
+ .driver = {
+ .name = "alienware-wmi-wmax",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = alienware_wmax_device_id_table,
+ .probe = wmax_wmi_probe,
+ .no_singleton = true,
+};
+
+int __init alienware_wmax_wmi_init(void)
+{
+ const struct dmi_system_id *id;
+
+ id = dmi_first_match(awcc_dmi_table);
+ if (id)
+ awcc = id->driver_data;
+
+ if (force_platform_profile) {
+ if (!awcc)
+ awcc = &empty_quirks;
+
+ awcc->pprof = true;
+ }
+
+ if (force_gmode) {
+ if (awcc)
+ awcc->gmode = true;
+ else
+ pr_warn("force_gmode requires platform profile support\n");
+ }
+
+ return wmi_driver_register(&alienware_wmax_wmi_driver);
+}
+
+void __exit alienware_wmax_wmi_exit(void)
+{
+ wmi_driver_unregister(&alienware_wmax_wmi_driver);
+}
diff --git a/drivers/platform/x86/dell/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
deleted file mode 100644
index e252e0cf47ef..000000000000
--- a/drivers/platform/x86/dell/alienware-wmi.c
+++ /dev/null
@@ -1,1249 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Alienware AlienFX control
- *
- * Copyright (C) 2014 Dell Inc <Dell.Client.Kernel@dell.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/acpi.h>
-#include <linux/bitfield.h>
-#include <linux/bits.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/platform_profile.h>
-#include <linux/dmi.h>
-#include <linux/leds.h>
-
-#define LEGACY_CONTROL_GUID "A90597CE-A997-11DA-B012-B622A1EF5492"
-#define LEGACY_POWER_CONTROL_GUID "A80593CE-A997-11DA-B012-B622A1EF5492"
-#define WMAX_CONTROL_GUID "A70591CE-A997-11DA-B012-B622A1EF5492"
-
-#define WMAX_METHOD_HDMI_SOURCE 0x1
-#define WMAX_METHOD_HDMI_STATUS 0x2
-#define WMAX_METHOD_BRIGHTNESS 0x3
-#define WMAX_METHOD_ZONE_CONTROL 0x4
-#define WMAX_METHOD_HDMI_CABLE 0x5
-#define WMAX_METHOD_AMPLIFIER_CABLE 0x6
-#define WMAX_METHOD_DEEP_SLEEP_CONTROL 0x0B
-#define WMAX_METHOD_DEEP_SLEEP_STATUS 0x0C
-#define WMAX_METHOD_THERMAL_INFORMATION 0x14
-#define WMAX_METHOD_THERMAL_CONTROL 0x15
-#define WMAX_METHOD_GAME_SHIFT_STATUS 0x25
-
-#define WMAX_THERMAL_MODE_GMODE 0xAB
-
-#define WMAX_FAILURE_CODE 0xFFFFFFFF
-
-MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
-MODULE_DESCRIPTION("Alienware special feature control");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("wmi:" LEGACY_CONTROL_GUID);
-MODULE_ALIAS("wmi:" WMAX_CONTROL_GUID);
-
-static bool force_platform_profile;
-module_param_unsafe(force_platform_profile, bool, 0);
-MODULE_PARM_DESC(force_platform_profile, "Forces auto-detecting thermal profiles without checking if WMI thermal backend is available");
-
-static bool force_gmode;
-module_param_unsafe(force_gmode, bool, 0);
-MODULE_PARM_DESC(force_gmode, "Forces G-Mode when performance profile is selected");
-
-enum INTERFACE_FLAGS {
- LEGACY,
- WMAX,
-};
-
-enum LEGACY_CONTROL_STATES {
- LEGACY_RUNNING = 1,
- LEGACY_BOOTING = 0,
- LEGACY_SUSPEND = 3,
-};
-
-enum WMAX_CONTROL_STATES {
- WMAX_RUNNING = 0xFF,
- WMAX_BOOTING = 0,
- WMAX_SUSPEND = 3,
-};
-
-enum WMAX_THERMAL_INFORMATION_OPERATIONS {
- WMAX_OPERATION_SYS_DESCRIPTION = 0x02,
- WMAX_OPERATION_LIST_IDS = 0x03,
- WMAX_OPERATION_CURRENT_PROFILE = 0x0B,
-};
-
-enum WMAX_THERMAL_CONTROL_OPERATIONS {
- WMAX_OPERATION_ACTIVATE_PROFILE = 0x01,
-};
-
-enum WMAX_GAME_SHIFT_STATUS_OPERATIONS {
- WMAX_OPERATION_TOGGLE_GAME_SHIFT = 0x01,
- WMAX_OPERATION_GET_GAME_SHIFT_STATUS = 0x02,
-};
-
-enum WMAX_THERMAL_TABLES {
- WMAX_THERMAL_TABLE_BASIC = 0x90,
- WMAX_THERMAL_TABLE_USTT = 0xA0,
-};
-
-enum wmax_thermal_mode {
- THERMAL_MODE_USTT_BALANCED,
- THERMAL_MODE_USTT_BALANCED_PERFORMANCE,
- THERMAL_MODE_USTT_COOL,
- THERMAL_MODE_USTT_QUIET,
- THERMAL_MODE_USTT_PERFORMANCE,
- THERMAL_MODE_USTT_LOW_POWER,
- THERMAL_MODE_BASIC_QUIET,
- THERMAL_MODE_BASIC_BALANCED,
- THERMAL_MODE_BASIC_BALANCED_PERFORMANCE,
- THERMAL_MODE_BASIC_PERFORMANCE,
- THERMAL_MODE_LAST,
-};
-
-static const enum platform_profile_option wmax_mode_to_platform_profile[THERMAL_MODE_LAST] = {
- [THERMAL_MODE_USTT_BALANCED] = PLATFORM_PROFILE_BALANCED,
- [THERMAL_MODE_USTT_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
- [THERMAL_MODE_USTT_COOL] = PLATFORM_PROFILE_COOL,
- [THERMAL_MODE_USTT_QUIET] = PLATFORM_PROFILE_QUIET,
- [THERMAL_MODE_USTT_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
- [THERMAL_MODE_USTT_LOW_POWER] = PLATFORM_PROFILE_LOW_POWER,
- [THERMAL_MODE_BASIC_QUIET] = PLATFORM_PROFILE_QUIET,
- [THERMAL_MODE_BASIC_BALANCED] = PLATFORM_PROFILE_BALANCED,
- [THERMAL_MODE_BASIC_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
- [THERMAL_MODE_BASIC_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
-};
-
-struct quirk_entry {
- u8 num_zones;
- u8 hdmi_mux;
- u8 amplifier;
- u8 deepslp;
- bool thermal;
- bool gmode;
-};
-
-static struct quirk_entry *quirks;
-
-
-static struct quirk_entry quirk_inspiron5675 = {
- .num_zones = 2,
- .hdmi_mux = 0,
- .amplifier = 0,
- .deepslp = 0,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_unknown = {
- .num_zones = 2,
- .hdmi_mux = 0,
- .amplifier = 0,
- .deepslp = 0,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_x51_r1_r2 = {
- .num_zones = 3,
- .hdmi_mux = 0,
- .amplifier = 0,
- .deepslp = 0,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_x51_r3 = {
- .num_zones = 4,
- .hdmi_mux = 0,
- .amplifier = 1,
- .deepslp = 0,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_asm100 = {
- .num_zones = 2,
- .hdmi_mux = 1,
- .amplifier = 0,
- .deepslp = 0,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_asm200 = {
- .num_zones = 2,
- .hdmi_mux = 1,
- .amplifier = 0,
- .deepslp = 1,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_asm201 = {
- .num_zones = 2,
- .hdmi_mux = 1,
- .amplifier = 1,
- .deepslp = 1,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_g_series = {
- .num_zones = 0,
- .hdmi_mux = 0,
- .amplifier = 0,
- .deepslp = 0,
- .thermal = true,
- .gmode = true,
-};
-
-static struct quirk_entry quirk_x_series = {
- .num_zones = 0,
- .hdmi_mux = 0,
- .amplifier = 0,
- .deepslp = 0,
- .thermal = true,
- .gmode = false,
-};
-
-static int __init dmi_matched(const struct dmi_system_id *dmi)
-{
- quirks = dmi->driver_data;
- return 1;
-}
-
-static const struct dmi_system_id alienware_quirks[] __initconst = {
- {
- .callback = dmi_matched,
- .ident = "Alienware ASM100",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASM100"),
- },
- .driver_data = &quirk_asm100,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware ASM200",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASM200"),
- },
- .driver_data = &quirk_asm200,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware ASM201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASM201"),
- },
- .driver_data = &quirk_asm201,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware m16 R1 AMD",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R1 AMD"),
- },
- .driver_data = &quirk_x_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware m17 R5",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
- },
- .driver_data = &quirk_x_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware m18 R2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m18 R2"),
- },
- .driver_data = &quirk_x_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware x15 R1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x15 R1"),
- },
- .driver_data = &quirk_x_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware x17 R2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x17 R2"),
- },
- .driver_data = &quirk_x_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware X51 R1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51"),
- },
- .driver_data = &quirk_x51_r1_r2,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware X51 R2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R2"),
- },
- .driver_data = &quirk_x51_r1_r2,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware X51 R3",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R3"),
- },
- .driver_data = &quirk_x51_r3,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. G15 5510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5510"),
- },
- .driver_data = &quirk_g_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. G15 5511",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5511"),
- },
- .driver_data = &quirk_g_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. G15 5515",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
- },
- .driver_data = &quirk_g_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. G3 3500",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "G3 3500"),
- },
- .driver_data = &quirk_g_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. G3 3590",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "G3 3590"),
- },
- .driver_data = &quirk_g_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. G5 5500",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "G5 5500"),
- },
- .driver_data = &quirk_g_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. Inspiron 5675",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5675"),
- },
- .driver_data = &quirk_inspiron5675,
- },
- {}
-};
-
-struct color_platform {
- u8 blue;
- u8 green;
- u8 red;
-} __packed;
-
-struct wmax_brightness_args {
- u32 led_mask;
- u32 percentage;
-};
-
-struct wmax_basic_args {
- u8 arg;
-};
-
-struct legacy_led_args {
- struct color_platform colors;
- u8 brightness;
- u8 state;
-} __packed;
-
-struct wmax_led_args {
- u32 led_mask;
- struct color_platform colors;
- u8 state;
-} __packed;
-
-struct wmax_u32_args {
- u8 operation;
- u8 arg1;
- u8 arg2;
- u8 arg3;
-};
-
-static struct platform_device *platform_device;
-static struct color_platform colors[4];
-static enum wmax_thermal_mode supported_thermal_profiles[PLATFORM_PROFILE_LAST];
-
-static u8 interface;
-static u8 lighting_control_state;
-static u8 global_brightness;
-
-/*
- * Helpers used for zone control
- */
-static int parse_rgb(const char *buf, struct color_platform *colors)
-{
- long unsigned int rgb;
- int ret;
- union color_union {
- struct color_platform cp;
- int package;
- } repackager;
-
- ret = kstrtoul(buf, 16, &rgb);
- if (ret)
- return ret;
-
- /* RGB triplet notation is 24-bit hexadecimal */
- if (rgb > 0xFFFFFF)
- return -EINVAL;
-
- repackager.package = rgb & 0x0f0f0f0f;
- pr_debug("alienware-wmi: r: %d g:%d b: %d\n",
- repackager.cp.red, repackager.cp.green, repackager.cp.blue);
- *colors = repackager.cp;
- return 0;
-}
-
-/*
- * Individual RGB zone control
- */
-static int alienware_update_led(u8 location)
-{
- int method_id;
- acpi_status status;
- char *guid;
- struct acpi_buffer input;
- struct legacy_led_args legacy_args;
- struct wmax_led_args wmax_basic_args;
- if (interface == WMAX) {
- wmax_basic_args.led_mask = 1 << location;
- wmax_basic_args.colors = colors[location];
- wmax_basic_args.state = lighting_control_state;
- guid = WMAX_CONTROL_GUID;
- method_id = WMAX_METHOD_ZONE_CONTROL;
-
- input.length = sizeof(wmax_basic_args);
- input.pointer = &wmax_basic_args;
- } else {
- legacy_args.colors = colors[location];
- legacy_args.brightness = global_brightness;
- legacy_args.state = 0;
- if (lighting_control_state == LEGACY_BOOTING ||
- lighting_control_state == LEGACY_SUSPEND) {
- guid = LEGACY_POWER_CONTROL_GUID;
- legacy_args.state = lighting_control_state;
- } else
- guid = LEGACY_CONTROL_GUID;
- method_id = location + 1;
-
- input.length = sizeof(legacy_args);
- input.pointer = &legacy_args;
- }
- pr_debug("alienware-wmi: guid %s method %d\n", guid, method_id);
-
- status = wmi_evaluate_method(guid, 0, method_id, &input, NULL);
- if (ACPI_FAILURE(status))
- pr_err("alienware-wmi: zone set failure: %u\n", status);
- return ACPI_FAILURE(status);
-}
-
-static ssize_t zone_show(struct device *dev, struct device_attribute *attr,
- char *buf, u8 location)
-{
- return sprintf(buf, "red: %d, green: %d, blue: %d\n",
- colors[location].red, colors[location].green,
- colors[location].blue);
-
-}
-
-static ssize_t zone_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count, u8 location)
-{
- int ret;
-
- ret = parse_rgb(buf, &colors[location]);
- if (ret)
- return ret;
-
- ret = alienware_update_led(location);
-
- return ret ? ret : count;
-}
-
-static ssize_t zone00_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return zone_show(dev, attr, buf, 0);
-}
-
-static ssize_t zone00_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return zone_store(dev, attr, buf, count, 0);
-}
-
-static DEVICE_ATTR_RW(zone00);
-
-static ssize_t zone01_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return zone_show(dev, attr, buf, 1);
-}
-
-static ssize_t zone01_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return zone_store(dev, attr, buf, count, 1);
-}
-
-static DEVICE_ATTR_RW(zone01);
-
-static ssize_t zone02_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return zone_show(dev, attr, buf, 2);
-}
-
-static ssize_t zone02_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return zone_store(dev, attr, buf, count, 2);
-}
-
-static DEVICE_ATTR_RW(zone02);
-
-static ssize_t zone03_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return zone_show(dev, attr, buf, 3);
-}
-
-static ssize_t zone03_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return zone_store(dev, attr, buf, count, 3);
-}
-
-static DEVICE_ATTR_RW(zone03);
-
-/*
- * Lighting control state device attribute (Global)
- */
-static ssize_t lighting_control_state_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- if (lighting_control_state == LEGACY_BOOTING)
- return sysfs_emit(buf, "[booting] running suspend\n");
- else if (lighting_control_state == LEGACY_SUSPEND)
- return sysfs_emit(buf, "booting running [suspend]\n");
-
- return sysfs_emit(buf, "booting [running] suspend\n");
-}
-
-static ssize_t lighting_control_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- u8 val;
-
- if (strcmp(buf, "booting\n") == 0)
- val = LEGACY_BOOTING;
- else if (strcmp(buf, "suspend\n") == 0)
- val = LEGACY_SUSPEND;
- else if (interface == LEGACY)
- val = LEGACY_RUNNING;
- else
- val = WMAX_RUNNING;
-
- lighting_control_state = val;
- pr_debug("alienware-wmi: updated control state to %d\n",
- lighting_control_state);
-
- return count;
-}
-
-static DEVICE_ATTR_RW(lighting_control_state);
-
-static umode_t zone_attr_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- if (n < quirks->num_zones + 1)
- return attr->mode;
-
- return 0;
-}
-
-static bool zone_group_visible(struct kobject *kobj)
-{
- return quirks->num_zones > 0;
-}
-DEFINE_SYSFS_GROUP_VISIBLE(zone);
-
-static struct attribute *zone_attrs[] = {
- &dev_attr_lighting_control_state.attr,
- &dev_attr_zone00.attr,
- &dev_attr_zone01.attr,
- &dev_attr_zone02.attr,
- &dev_attr_zone03.attr,
- NULL
-};
-
-static struct attribute_group zone_attribute_group = {
- .name = "rgb_zones",
- .is_visible = SYSFS_GROUP_VISIBLE(zone),
- .attrs = zone_attrs,
-};
-
-/*
- * LED Brightness (Global)
- */
-static int wmax_brightness(int brightness)
-{
- acpi_status status;
- struct acpi_buffer input;
- struct wmax_brightness_args args = {
- .led_mask = 0xFF,
- .percentage = brightness,
- };
- input.length = sizeof(args);
- input.pointer = &args;
- status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
- WMAX_METHOD_BRIGHTNESS, &input, NULL);
- if (ACPI_FAILURE(status))
- pr_err("alienware-wmi: brightness set failure: %u\n", status);
- return ACPI_FAILURE(status);
-}
-
-static void global_led_set(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- int ret;
- global_brightness = brightness;
- if (interface == WMAX)
- ret = wmax_brightness(brightness);
- else
- ret = alienware_update_led(0);
- if (ret)
- pr_err("LED brightness update failed\n");
-}
-
-static enum led_brightness global_led_get(struct led_classdev *led_cdev)
-{
- return global_brightness;
-}
-
-static struct led_classdev global_led = {
- .brightness_set = global_led_set,
- .brightness_get = global_led_get,
- .name = "alienware::global_brightness",
-};
-
-static int alienware_zone_init(struct platform_device *dev)
-{
- if (interface == WMAX) {
- lighting_control_state = WMAX_RUNNING;
- } else if (interface == LEGACY) {
- lighting_control_state = LEGACY_RUNNING;
- }
- global_led.max_brightness = 0x0F;
- global_brightness = global_led.max_brightness;
-
- return led_classdev_register(&dev->dev, &global_led);
-}
-
-static void alienware_zone_exit(struct platform_device *dev)
-{
- if (!quirks->num_zones)
- return;
-
- led_classdev_unregister(&global_led);
-}
-
-static acpi_status alienware_wmax_command(void *in_args, size_t in_size,
- u32 command, u32 *out_data)
-{
- acpi_status status;
- union acpi_object *obj;
- struct acpi_buffer input;
- struct acpi_buffer output;
-
- input.length = in_size;
- input.pointer = in_args;
- if (out_data) {
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL;
- status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
- command, &input, &output);
- if (ACPI_SUCCESS(status)) {
- obj = (union acpi_object *)output.pointer;
- if (obj && obj->type == ACPI_TYPE_INTEGER)
- *out_data = (u32)obj->integer.value;
- }
- kfree(output.pointer);
- } else {
- status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
- command, &input, NULL);
- }
- return status;
-}
-
-/*
- * The HDMI mux sysfs node indicates the status of the HDMI input mux.
- * It can toggle between standard system GPU output and HDMI input.
- */
-static ssize_t cable_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct wmax_basic_args in_args = {
- .arg = 0,
- };
- acpi_status status;
- u32 out_data;
-
- status =
- alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_HDMI_CABLE, &out_data);
- if (ACPI_SUCCESS(status)) {
- if (out_data == 0)
- return sysfs_emit(buf, "[unconnected] connected unknown\n");
- else if (out_data == 1)
- return sysfs_emit(buf, "unconnected [connected] unknown\n");
- }
- pr_err("alienware-wmi: unknown HDMI cable status: %d\n", status);
- return sysfs_emit(buf, "unconnected connected [unknown]\n");
-}
-
-static ssize_t source_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct wmax_basic_args in_args = {
- .arg = 0,
- };
- acpi_status status;
- u32 out_data;
-
- status =
- alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_HDMI_STATUS, &out_data);
-
- if (ACPI_SUCCESS(status)) {
- if (out_data == 1)
- return sysfs_emit(buf, "[input] gpu unknown\n");
- else if (out_data == 2)
- return sysfs_emit(buf, "input [gpu] unknown\n");
- }
- pr_err("alienware-wmi: unknown HDMI source status: %u\n", status);
- return sysfs_emit(buf, "input gpu [unknown]\n");
-}
-
-static ssize_t source_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct wmax_basic_args args;
- acpi_status status;
-
- if (strcmp(buf, "gpu\n") == 0)
- args.arg = 1;
- else if (strcmp(buf, "input\n") == 0)
- args.arg = 2;
- else
- args.arg = 3;
- pr_debug("alienware-wmi: setting hdmi to %d : %s", args.arg, buf);
-
- status = alienware_wmax_command(&args, sizeof(args),
- WMAX_METHOD_HDMI_SOURCE, NULL);
-
- if (ACPI_FAILURE(status))
- pr_err("alienware-wmi: HDMI toggle failed: results: %u\n",
- status);
- return count;
-}
-
-static DEVICE_ATTR_RO(cable);
-static DEVICE_ATTR_RW(source);
-
-static bool hdmi_group_visible(struct kobject *kobj)
-{
- return quirks->hdmi_mux;
-}
-DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(hdmi);
-
-static struct attribute *hdmi_attrs[] = {
- &dev_attr_cable.attr,
- &dev_attr_source.attr,
- NULL,
-};
-
-static const struct attribute_group hdmi_attribute_group = {
- .name = "hdmi",
- .is_visible = SYSFS_GROUP_VISIBLE(hdmi),
- .attrs = hdmi_attrs,
-};
-
-/*
- * Alienware GFX amplifier support
- * - Currently supports reading cable status
- * - Leaving expansion room to possibly support dock/undock events later
- */
-static ssize_t status_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct wmax_basic_args in_args = {
- .arg = 0,
- };
- acpi_status status;
- u32 out_data;
-
- status =
- alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_AMPLIFIER_CABLE, &out_data);
- if (ACPI_SUCCESS(status)) {
- if (out_data == 0)
- return sysfs_emit(buf, "[unconnected] connected unknown\n");
- else if (out_data == 1)
- return sysfs_emit(buf, "unconnected [connected] unknown\n");
- }
- pr_err("alienware-wmi: unknown amplifier cable status: %d\n", status);
- return sysfs_emit(buf, "unconnected connected [unknown]\n");
-}
-
-static DEVICE_ATTR_RO(status);
-
-static bool amplifier_group_visible(struct kobject *kobj)
-{
- return quirks->amplifier;
-}
-DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(amplifier);
-
-static struct attribute *amplifier_attrs[] = {
- &dev_attr_status.attr,
- NULL,
-};
-
-static const struct attribute_group amplifier_attribute_group = {
- .name = "amplifier",
- .is_visible = SYSFS_GROUP_VISIBLE(amplifier),
- .attrs = amplifier_attrs,
-};
-
-/*
- * Deep Sleep Control support
- * - Modifies BIOS setting for deep sleep control allowing extra wakeup events
- */
-static ssize_t deepsleep_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct wmax_basic_args in_args = {
- .arg = 0,
- };
- acpi_status status;
- u32 out_data;
-
- status = alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_DEEP_SLEEP_STATUS, &out_data);
- if (ACPI_SUCCESS(status)) {
- if (out_data == 0)
- return sysfs_emit(buf, "[disabled] s5 s5_s4\n");
- else if (out_data == 1)
- return sysfs_emit(buf, "disabled [s5] s5_s4\n");
- else if (out_data == 2)
- return sysfs_emit(buf, "disabled s5 [s5_s4]\n");
- }
- pr_err("alienware-wmi: unknown deep sleep status: %d\n", status);
- return sysfs_emit(buf, "disabled s5 s5_s4 [unknown]\n");
-}
-
-static ssize_t deepsleep_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct wmax_basic_args args;
- acpi_status status;
-
- if (strcmp(buf, "disabled\n") == 0)
- args.arg = 0;
- else if (strcmp(buf, "s5\n") == 0)
- args.arg = 1;
- else
- args.arg = 2;
- pr_debug("alienware-wmi: setting deep sleep to %d : %s", args.arg, buf);
-
- status = alienware_wmax_command(&args, sizeof(args),
- WMAX_METHOD_DEEP_SLEEP_CONTROL, NULL);
-
- if (ACPI_FAILURE(status))
- pr_err("alienware-wmi: deep sleep control failed: results: %u\n",
- status);
- return count;
-}
-
-static DEVICE_ATTR_RW(deepsleep);
-
-static bool deepsleep_group_visible(struct kobject *kobj)
-{
- return quirks->deepslp;
-}
-DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(deepsleep);
-
-static struct attribute *deepsleep_attrs[] = {
- &dev_attr_deepsleep.attr,
- NULL,
-};
-
-static const struct attribute_group deepsleep_attribute_group = {
- .name = "deepsleep",
- .is_visible = SYSFS_GROUP_VISIBLE(deepsleep),
- .attrs = deepsleep_attrs,
-};
-
-/*
- * Thermal Profile control
- * - Provides thermal profile control through the Platform Profile API
- */
-#define WMAX_THERMAL_TABLE_MASK GENMASK(7, 4)
-#define WMAX_THERMAL_MODE_MASK GENMASK(3, 0)
-#define WMAX_SENSOR_ID_MASK BIT(8)
-
-static bool is_wmax_thermal_code(u32 code)
-{
- if (code & WMAX_SENSOR_ID_MASK)
- return false;
-
- if ((code & WMAX_THERMAL_MODE_MASK) >= THERMAL_MODE_LAST)
- return false;
-
- if ((code & WMAX_THERMAL_TABLE_MASK) == WMAX_THERMAL_TABLE_BASIC &&
- (code & WMAX_THERMAL_MODE_MASK) >= THERMAL_MODE_BASIC_QUIET)
- return true;
-
- if ((code & WMAX_THERMAL_TABLE_MASK) == WMAX_THERMAL_TABLE_USTT &&
- (code & WMAX_THERMAL_MODE_MASK) <= THERMAL_MODE_USTT_LOW_POWER)
- return true;
-
- return false;
-}
-
-static int wmax_thermal_information(u8 operation, u8 arg, u32 *out_data)
-{
- struct wmax_u32_args in_args = {
- .operation = operation,
- .arg1 = arg,
- .arg2 = 0,
- .arg3 = 0,
- };
- acpi_status status;
-
- status = alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_THERMAL_INFORMATION,
- out_data);
-
- if (ACPI_FAILURE(status))
- return -EIO;
-
- if (*out_data == WMAX_FAILURE_CODE)
- return -EBADRQC;
-
- return 0;
-}
-
-static int wmax_thermal_control(u8 profile)
-{
- struct wmax_u32_args in_args = {
- .operation = WMAX_OPERATION_ACTIVATE_PROFILE,
- .arg1 = profile,
- .arg2 = 0,
- .arg3 = 0,
- };
- acpi_status status;
- u32 out_data;
-
- status = alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_THERMAL_CONTROL,
- &out_data);
-
- if (ACPI_FAILURE(status))
- return -EIO;
-
- if (out_data == WMAX_FAILURE_CODE)
- return -EBADRQC;
-
- return 0;
-}
-
-static int wmax_game_shift_status(u8 operation, u32 *out_data)
-{
- struct wmax_u32_args in_args = {
- .operation = operation,
- .arg1 = 0,
- .arg2 = 0,
- .arg3 = 0,
- };
- acpi_status status;
-
- status = alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_GAME_SHIFT_STATUS,
- out_data);
-
- if (ACPI_FAILURE(status))
- return -EIO;
-
- if (*out_data == WMAX_FAILURE_CODE)
- return -EOPNOTSUPP;
-
- return 0;
-}
-
-static int thermal_profile_get(struct device *dev,
- enum platform_profile_option *profile)
-{
- u32 out_data;
- int ret;
-
- ret = wmax_thermal_information(WMAX_OPERATION_CURRENT_PROFILE,
- 0, &out_data);
-
- if (ret < 0)
- return ret;
-
- if (out_data == WMAX_THERMAL_MODE_GMODE) {
- *profile = PLATFORM_PROFILE_PERFORMANCE;
- return 0;
- }
-
- if (!is_wmax_thermal_code(out_data))
- return -ENODATA;
-
- out_data &= WMAX_THERMAL_MODE_MASK;
- *profile = wmax_mode_to_platform_profile[out_data];
-
- return 0;
-}
-
-static int thermal_profile_set(struct device *dev,
- enum platform_profile_option profile)
-{
- if (quirks->gmode) {
- u32 gmode_status;
- int ret;
-
- ret = wmax_game_shift_status(WMAX_OPERATION_GET_GAME_SHIFT_STATUS,
- &gmode_status);
-
- if (ret < 0)
- return ret;
-
- if ((profile == PLATFORM_PROFILE_PERFORMANCE && !gmode_status) ||
- (profile != PLATFORM_PROFILE_PERFORMANCE && gmode_status)) {
- ret = wmax_game_shift_status(WMAX_OPERATION_TOGGLE_GAME_SHIFT,
- &gmode_status);
-
- if (ret < 0)
- return ret;
- }
- }
-
- return wmax_thermal_control(supported_thermal_profiles[profile]);
-}
-
-static int thermal_profile_probe(void *drvdata, unsigned long *choices)
-{
- enum platform_profile_option profile;
- enum wmax_thermal_mode mode;
- u8 sys_desc[4];
- u32 first_mode;
- u32 out_data;
- int ret;
-
- ret = wmax_thermal_information(WMAX_OPERATION_SYS_DESCRIPTION,
- 0, (u32 *) &sys_desc);
- if (ret < 0)
- return ret;
-
- first_mode = sys_desc[0] + sys_desc[1];
-
- for (u32 i = 0; i < sys_desc[3]; i++) {
- ret = wmax_thermal_information(WMAX_OPERATION_LIST_IDS,
- i + first_mode, &out_data);
-
- if (ret == -EIO)
- return ret;
-
- if (ret == -EBADRQC)
- break;
-
- if (!is_wmax_thermal_code(out_data))
- continue;
-
- mode = out_data & WMAX_THERMAL_MODE_MASK;
- profile = wmax_mode_to_platform_profile[mode];
- supported_thermal_profiles[profile] = out_data;
-
- set_bit(profile, choices);
- }
-
- if (bitmap_empty(choices, PLATFORM_PROFILE_LAST))
- return -ENODEV;
-
- if (quirks->gmode) {
- supported_thermal_profiles[PLATFORM_PROFILE_PERFORMANCE] =
- WMAX_THERMAL_MODE_GMODE;
-
- set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
- }
-
- return 0;
-}
-
-static const struct platform_profile_ops awcc_platform_profile_ops = {
- .probe = thermal_profile_probe,
- .profile_get = thermal_profile_get,
- .profile_set = thermal_profile_set,
-};
-
-static int create_thermal_profile(struct platform_device *platform_device)
-{
- struct device *ppdev;
-
- ppdev = devm_platform_profile_register(&platform_device->dev, "alienware-wmi",
- NULL, &awcc_platform_profile_ops);
-
- return PTR_ERR_OR_ZERO(ppdev);
-}
-
-/*
- * Platform Driver
- */
-static const struct attribute_group *alienfx_groups[] = {
- &zone_attribute_group,
- &hdmi_attribute_group,
- &amplifier_attribute_group,
- &deepsleep_attribute_group,
- NULL
-};
-
-static struct platform_driver platform_driver = {
- .driver = {
- .name = "alienware-wmi",
- .dev_groups = alienfx_groups,
- },
-};
-
-static int __init alienware_wmi_init(void)
-{
- int ret;
-
- if (wmi_has_guid(LEGACY_CONTROL_GUID))
- interface = LEGACY;
- else if (wmi_has_guid(WMAX_CONTROL_GUID))
- interface = WMAX;
- else {
- pr_warn("alienware-wmi: No known WMI GUID found\n");
- return -ENODEV;
- }
-
- dmi_check_system(alienware_quirks);
- if (quirks == NULL)
- quirks = &quirk_unknown;
-
- if (force_platform_profile)
- quirks->thermal = true;
-
- if (force_gmode) {
- if (quirks->thermal)
- quirks->gmode = true;
- else
- pr_warn("force_gmode requires platform profile support\n");
- }
-
- ret = platform_driver_register(&platform_driver);
- if (ret)
- goto fail_platform_driver;
- platform_device = platform_device_alloc("alienware-wmi", PLATFORM_DEVID_NONE);
- if (!platform_device) {
- ret = -ENOMEM;
- goto fail_platform_device1;
- }
- ret = platform_device_add(platform_device);
- if (ret)
- goto fail_platform_device2;
-
- if (quirks->thermal) {
- ret = create_thermal_profile(platform_device);
- if (ret)
- goto fail_prep_thermal_profile;
- }
-
- if (quirks->num_zones > 0) {
- ret = alienware_zone_init(platform_device);
- if (ret)
- goto fail_prep_zones;
- }
-
- return 0;
-
-fail_prep_zones:
- alienware_zone_exit(platform_device);
-fail_prep_thermal_profile:
- platform_device_del(platform_device);
-fail_platform_device2:
- platform_device_put(platform_device);
-fail_platform_device1:
- platform_driver_unregister(&platform_driver);
-fail_platform_driver:
- return ret;
-}
-
-module_init(alienware_wmi_init);
-
-static void __exit alienware_wmi_exit(void)
-{
- alienware_zone_exit(platform_device);
- platform_device_unregister(platform_device);
- platform_driver_unregister(&platform_driver);
-}
-
-module_exit(alienware_wmi_exit);
diff --git a/drivers/platform/x86/dell/alienware-wmi.h b/drivers/platform/x86/dell/alienware-wmi.h
new file mode 100644
index 000000000000..68d4242211ae
--- /dev/null
+++ b/drivers/platform/x86/dell/alienware-wmi.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Alienware WMI special features driver
+ *
+ * Copyright (C) 2014 Dell Inc <Dell.Client.Kernel@dell.com>
+ * Copyright (C) 2024 Kurt Borja <kuurtb@gmail.com>
+ */
+
+#ifndef _ALIENWARE_WMI_H_
+#define _ALIENWARE_WMI_H_
+
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/wmi.h>
+
+#define LEGACY_CONTROL_GUID "A90597CE-A997-11DA-B012-B622A1EF5492"
+#define LEGACY_POWER_CONTROL_GUID "A80593CE-A997-11DA-B012-B622A1EF5492"
+#define WMAX_CONTROL_GUID "A70591CE-A997-11DA-B012-B622A1EF5492"
+
+enum INTERFACE_FLAGS {
+ LEGACY,
+ WMAX,
+};
+
+enum LEGACY_CONTROL_STATES {
+ LEGACY_RUNNING = 1,
+ LEGACY_BOOTING = 0,
+ LEGACY_SUSPEND = 3,
+};
+
+enum WMAX_CONTROL_STATES {
+ WMAX_RUNNING = 0xFF,
+ WMAX_BOOTING = 0,
+ WMAX_SUSPEND = 3,
+};
+
+struct alienfx_quirks {
+ u8 num_zones;
+ bool hdmi_mux;
+ bool amplifier;
+ bool deepslp;
+};
+
+struct color_platform {
+ u8 blue;
+ u8 green;
+ u8 red;
+} __packed;
+
+struct alienfx_priv {
+ struct platform_device *pdev;
+ struct led_classdev global_led;
+ struct color_platform colors[4];
+ u8 global_brightness;
+ u8 lighting_control_state;
+};
+
+struct alienfx_ops {
+ int (*upd_led)(struct alienfx_priv *priv, struct wmi_device *wdev,
+ u8 location);
+ int (*upd_brightness)(struct alienfx_priv *priv, struct wmi_device *wdev,
+ u8 brightness);
+};
+
+struct alienfx_platdata {
+ struct wmi_device *wdev;
+ struct alienfx_ops ops;
+};
+
+extern u8 alienware_interface;
+extern struct alienfx_quirks *alienfx;
+
+int alienware_wmi_command(struct wmi_device *wdev, u32 method_id,
+ void *in_args, size_t in_size, u32 *out_data);
+
+int alienware_alienfx_setup(struct alienfx_platdata *pdata);
+
+#if IS_ENABLED(CONFIG_ALIENWARE_WMI_LEGACY)
+int __init alienware_legacy_wmi_init(void);
+void __exit alienware_legacy_wmi_exit(void);
+#else
+static inline int alienware_legacy_wmi_init(void)
+{
+ return -ENODEV;
+}
+
+static inline void alienware_legacy_wmi_exit(void)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_ALIENWARE_WMI_WMAX)
+extern const struct attribute_group wmax_hdmi_attribute_group;
+extern const struct attribute_group wmax_amplifier_attribute_group;
+extern const struct attribute_group wmax_deepsleep_attribute_group;
+
+#define WMAX_DEV_GROUPS &wmax_hdmi_attribute_group, \
+ &wmax_amplifier_attribute_group, \
+ &wmax_deepsleep_attribute_group,
+
+int __init alienware_wmax_wmi_init(void);
+void __exit alienware_wmax_wmi_exit(void);
+#else
+#define WMAX_DEV_GROUPS
+
+static inline int alienware_wmax_wmi_init(void)
+{
+ return -ENODEV;
+}
+
+
+static inline void alienware_wmax_wmi_exit(void)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c
index 50002ef13d5d..8f868f845350 100644
--- a/drivers/platform/x86/dell/dell-uart-backlight.c
+++ b/drivers/platform/x86/dell/dell-uart-backlight.c
@@ -325,7 +325,7 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
return PTR_ERR_OR_ZERO(dell_bl->bl);
}
-struct serdev_device_driver dell_uart_bl_serdev_driver = {
+static struct serdev_device_driver dell_uart_bl_serdev_driver = {
.probe = dell_uart_bl_serdev_probe,
.driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c
index e75cd6e1efe6..f27739da380f 100644
--- a/drivers/platform/x86/dell/dell-wmi-ddv.c
+++ b/drivers/platform/x86/dell/dell-wmi-ddv.c
@@ -104,7 +104,6 @@ struct dell_wmi_ddv_sensors {
struct dell_wmi_ddv_data {
struct acpi_battery_hook hook;
- struct device_attribute temp_attr;
struct device_attribute eppid_attr;
struct dell_wmi_ddv_sensors fans;
struct dell_wmi_ddv_sensors temps;
@@ -651,24 +650,6 @@ static int dell_wmi_ddv_battery_index(struct acpi_device *acpi_dev, u32 *index)
return kstrtou32(uid_str, 10, index);
}
-static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct dell_wmi_ddv_data *data = container_of(attr, struct dell_wmi_ddv_data, temp_attr);
- u32 index, value;
- int ret;
-
- ret = dell_wmi_ddv_battery_index(to_acpi_device(dev->parent), &index);
- if (ret < 0)
- return ret;
-
- ret = dell_wmi_ddv_query_integer(data->wdev, DELL_DDV_BATTERY_TEMPERATURE, index, &value);
- if (ret < 0)
- return ret;
-
- /* Use 2731 instead of 2731.5 to avoid unnecessary rounding */
- return sysfs_emit(buf, "%d\n", value - 2731);
-}
-
static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dell_wmi_ddv_data *data = container_of(attr, struct dell_wmi_ddv_data, eppid_attr);
@@ -695,6 +676,46 @@ static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, cha
return ret;
}
+static int dell_wmi_ddv_get_property(struct power_supply *psy, const struct power_supply_ext *ext,
+ void *drvdata, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct dell_wmi_ddv_data *data = drvdata;
+ u32 index, value;
+ int ret;
+
+ ret = dell_wmi_ddv_battery_index(to_acpi_device(psy->dev.parent), &index);
+ if (ret < 0)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_TEMP:
+ ret = dell_wmi_ddv_query_integer(data->wdev, DELL_DDV_BATTERY_TEMPERATURE, index,
+ &value);
+ if (ret < 0)
+ return ret;
+
+ /* Use 2732 instead of 2731.5 to avoid unnecessary rounding and to emulate
+ * the behaviour of the OEM application which seems to round down the result.
+ */
+ val->intval = value - 2732;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const enum power_supply_property dell_wmi_ddv_properties[] = {
+ POWER_SUPPLY_PROP_TEMP,
+};
+
+static const struct power_supply_ext dell_wmi_ddv_extension = {
+ .name = DRIVER_NAME,
+ .properties = dell_wmi_ddv_properties,
+ .num_properties = ARRAY_SIZE(dell_wmi_ddv_properties),
+ .get_property = dell_wmi_ddv_get_property,
+};
+
static int dell_wmi_ddv_add_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
{
struct dell_wmi_ddv_data *data = container_of(hook, struct dell_wmi_ddv_data, hook);
@@ -706,13 +727,14 @@ static int dell_wmi_ddv_add_battery(struct power_supply *battery, struct acpi_ba
if (ret < 0)
return 0;
- ret = device_create_file(&battery->dev, &data->temp_attr);
+ ret = device_create_file(&battery->dev, &data->eppid_attr);
if (ret < 0)
return ret;
- ret = device_create_file(&battery->dev, &data->eppid_attr);
+ ret = power_supply_register_extension(battery, &dell_wmi_ddv_extension, &data->wdev->dev,
+ data);
if (ret < 0) {
- device_remove_file(&battery->dev, &data->temp_attr);
+ device_remove_file(&battery->dev, &data->eppid_attr);
return ret;
}
@@ -724,38 +746,24 @@ static int dell_wmi_ddv_remove_battery(struct power_supply *battery, struct acpi
{
struct dell_wmi_ddv_data *data = container_of(hook, struct dell_wmi_ddv_data, hook);
- device_remove_file(&battery->dev, &data->temp_attr);
device_remove_file(&battery->dev, &data->eppid_attr);
+ power_supply_unregister_extension(battery, &dell_wmi_ddv_extension);
return 0;
}
-static void dell_wmi_ddv_battery_remove(void *data)
-{
- struct acpi_battery_hook *hook = data;
-
- battery_hook_unregister(hook);
-}
-
static int dell_wmi_ddv_battery_add(struct dell_wmi_ddv_data *data)
{
data->hook.name = "Dell DDV Battery Extension";
data->hook.add_battery = dell_wmi_ddv_add_battery;
data->hook.remove_battery = dell_wmi_ddv_remove_battery;
- sysfs_attr_init(&data->temp_attr.attr);
- data->temp_attr.attr.name = "temp";
- data->temp_attr.attr.mode = 0444;
- data->temp_attr.show = temp_show;
-
sysfs_attr_init(&data->eppid_attr.attr);
data->eppid_attr.attr.name = "eppid";
data->eppid_attr.attr.mode = 0444;
data->eppid_attr.show = eppid_show;
- battery_hook_register(&data->hook);
-
- return devm_add_action_or_reset(&data->wdev->dev, dell_wmi_ddv_battery_remove, &data->hook);
+ return devm_battery_hook_register(&data->wdev->dev, &data->hook);
}
static int dell_wmi_ddv_buffer_read(struct seq_file *seq, enum dell_ddv_method method)
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/Makefile b/drivers/platform/x86/dell/dell-wmi-sysman/Makefile
index 825fb2fbeea8..0a6df449e222 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/Makefile
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_DELL_WMI_SYSMAN) += dell-wmi-sysman.o
-dell-wmi-sysman-objs := sysman.o \
+dell-wmi-sysman-y := sysman.o \
enum-attributes.o \
int-attributes.o \
string-attributes.o \
diff --git a/drivers/platform/x86/hp/hp-bioscfg/Makefile b/drivers/platform/x86/hp/hp-bioscfg/Makefile
index 67be0d917753..7d23649b34dc 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/Makefile
+++ b/drivers/platform/x86/hp/hp-bioscfg/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_HP_BIOSCFG) := hp-bioscfg.o
-hp-bioscfg-objs := bioscfg.o \
+hp-bioscfg-y := bioscfg.o \
biosattr-interface.o \
enum-attributes.o \
int-attributes.o \
diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
index 0b277b7e37dd..13237890fc92 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
@@ -388,16 +388,13 @@ union acpi_object *hp_get_wmiobj_pointer(int instance_id, const char *guid_strin
*/
int hp_get_instance_count(const char *guid_string)
{
- union acpi_object *wmi_obj = NULL;
- int i = 0;
+ int ret;
- do {
- kfree(wmi_obj);
- wmi_obj = hp_get_wmiobj_pointer(i, guid_string);
- i++;
- } while (wmi_obj);
+ ret = wmi_instance_count(guid_string);
+ if (ret < 0)
+ return 0;
- return i - 1;
+ return ret;
}
/**
@@ -448,7 +445,7 @@ int hp_convert_hexstr_to_str(const char *input, u32 input_len, char **str, int *
return -ENOMEM;
for (i = 0; i < input_len; i += 5) {
- strncpy(tmp, input + i, strlen(tmp));
+ strscpy(tmp, input + i);
if (kstrtol(tmp, 16, &ch) == 0) {
// escape char
if (ch == '\\' ||
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 30bd366d7b58..17a09b7784ed 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -854,6 +854,7 @@ static const struct attribute_group ideapad_attribute_group = {
.is_visible = ideapad_is_visible,
.attrs = ideapad_attributes
};
+__ATTRIBUTE_GROUPS(ideapad_attribute);
/*
* DYTC Platform profile
@@ -1245,21 +1246,6 @@ static void ideapad_unregister_rfkill(struct ideapad_private *priv, int dev)
}
/*
- * Platform device
- */
-static int ideapad_sysfs_init(struct ideapad_private *priv)
-{
- return device_add_group(&priv->platform_device->dev,
- &ideapad_attribute_group);
-}
-
-static void ideapad_sysfs_exit(struct ideapad_private *priv)
-{
- device_remove_group(&priv->platform_device->dev,
- &ideapad_attribute_group);
-}
-
-/*
* input device
*/
#define IDEAPAD_WMI_KEY 0x100
@@ -2175,10 +2161,6 @@ static int ideapad_acpi_add(struct platform_device *pdev)
ideapad_check_features(priv);
- err = ideapad_sysfs_init(priv);
- if (err)
- return err;
-
ideapad_debugfs_init(priv);
err = ideapad_input_init(priv);
@@ -2265,7 +2247,6 @@ backlight_failed:
input_failed:
ideapad_debugfs_exit(priv);
- ideapad_sysfs_exit(priv);
return err;
}
@@ -2293,7 +2274,6 @@ static void ideapad_acpi_remove(struct platform_device *pdev)
ideapad_kbd_bl_exit(priv);
ideapad_input_exit(priv);
ideapad_debugfs_exit(priv);
- ideapad_sysfs_exit(priv);
}
#ifdef CONFIG_PM_SLEEP
@@ -2325,6 +2305,7 @@ static struct platform_driver ideapad_acpi_driver = {
.name = "ideapad_acpi",
.pm = &ideapad_pm,
.acpi_match_table = ACPI_PTR(ideapad_device_ids),
+ .dev_groups = ideapad_attribute_groups,
},
};
diff --git a/drivers/platform/x86/intel/ifs/Makefile b/drivers/platform/x86/intel/ifs/Makefile
index 30f035ef5581..c3e417bce9b6 100644
--- a/drivers/platform/x86/intel/ifs/Makefile
+++ b/drivers/platform/x86/intel/ifs/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_INTEL_IFS) += intel_ifs.o
-intel_ifs-objs := core.o load.o runtest.o sysfs.o
+intel_ifs-y := core.o load.o runtest.o sysfs.o
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index 092252eb95a8..30ff8f3ea1f5 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -56,7 +56,7 @@ static void skl_int3472_log_sensor_module_name(struct int3472_discrete_device *i
static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
struct acpi_resource_gpio *agpio,
- const char *func, unsigned long gpio_flags)
+ const char *con_id, unsigned long gpio_flags)
{
char *path = agpio->resource_source.string_ptr;
struct acpi_device *adev;
@@ -71,14 +71,14 @@ static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
if (!adev)
return -ENODEV;
- *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, gpio_flags);
+ *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], con_id, gpio_flags);
return 0;
}
static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int3472,
struct acpi_resource_gpio *agpio,
- const char *func, unsigned long gpio_flags)
+ const char *con_id, unsigned long gpio_flags)
{
int ret;
@@ -88,7 +88,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
}
ret = skl_int3472_fill_gpiod_lookup(&int3472->gpios.table[int3472->n_sensor_gpios],
- agpio, func, gpio_flags);
+ agpio, con_id, gpio_flags);
if (ret)
return ret;
@@ -101,7 +101,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
static struct gpio_desc *
skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
struct acpi_resource_gpio *agpio,
- const char *func, unsigned long gpio_flags)
+ const char *con_id, unsigned long gpio_flags)
{
struct gpio_desc *desc;
int ret;
@@ -112,12 +112,12 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
return ERR_PTR(-ENOMEM);
lookup->dev_id = dev_name(int3472->dev);
- ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, gpio_flags);
+ ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, con_id, gpio_flags);
if (ret)
return ERR_PTR(ret);
gpiod_add_lookup_table(lookup);
- desc = devm_gpiod_get(int3472->dev, func, GPIOD_OUT_LOW);
+ desc = devm_gpiod_get(int3472->dev, con_id, GPIOD_OUT_LOW);
gpiod_remove_lookup_table(lookup);
return desc;
@@ -129,7 +129,7 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
* @hid: The ACPI HID of the device without the instance number e.g. INT347E
* @type_from: The GPIO type from ACPI ?SDT
* @type_to: The assigned GPIO type, typically same as @type_from
- * @func: The function, e.g. "enable"
+ * @con_id: The name of the GPIO for the device
* @polarity_low: GPIO_ACTIVE_LOW true if the @polarity_low is true,
* GPIO_ACTIVE_HIGH otherwise
*/
@@ -138,15 +138,15 @@ struct int3472_gpio_map {
u8 type_from;
u8 type_to;
bool polarity_low;
- const char *func;
+ const char *con_id;
};
static const struct int3472_gpio_map int3472_gpio_map[] = {
{ "INT347E", INT3472_GPIO_TYPE_RESET, INT3472_GPIO_TYPE_RESET, false, "enable" },
};
-static void int3472_get_func_and_polarity(struct acpi_device *adev, u8 *type,
- const char **func, unsigned long *gpio_flags)
+static void int3472_get_con_id_and_polarity(struct acpi_device *adev, u8 *type,
+ const char **con_id, unsigned long *gpio_flags)
{
unsigned int i;
@@ -165,33 +165,33 @@ static void int3472_get_func_and_polarity(struct acpi_device *adev, u8 *type,
*type = int3472_gpio_map[i].type_to;
*gpio_flags = int3472_gpio_map[i].polarity_low ?
GPIO_ACTIVE_LOW : GPIO_ACTIVE_HIGH;
- *func = int3472_gpio_map[i].func;
+ *con_id = int3472_gpio_map[i].con_id;
return;
}
switch (*type) {
case INT3472_GPIO_TYPE_RESET:
- *func = "reset";
+ *con_id = "reset";
*gpio_flags = GPIO_ACTIVE_LOW;
break;
case INT3472_GPIO_TYPE_POWERDOWN:
- *func = "powerdown";
+ *con_id = "powerdown";
*gpio_flags = GPIO_ACTIVE_LOW;
break;
case INT3472_GPIO_TYPE_CLK_ENABLE:
- *func = "clk-enable";
+ *con_id = "clk-enable";
*gpio_flags = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_PRIVACY_LED:
- *func = "privacy-led";
+ *con_id = "privacy-led";
*gpio_flags = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_POWER_ENABLE:
- *func = "power-enable";
+ *con_id = "power-enable";
*gpio_flags = GPIO_ACTIVE_HIGH;
break;
default:
- *func = "unknown";
+ *con_id = "unknown";
*gpio_flags = GPIO_ACTIVE_HIGH;
break;
}
@@ -238,7 +238,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
union acpi_object *obj;
struct gpio_desc *gpio;
const char *err_msg;
- const char *func;
+ const char *con_id;
unsigned long gpio_flags;
int ret;
@@ -262,26 +262,26 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
type = FIELD_GET(INT3472_GPIO_DSM_TYPE, obj->integer.value);
- int3472_get_func_and_polarity(int3472->sensor, &type, &func, &gpio_flags);
+ int3472_get_con_id_and_polarity(int3472->sensor, &type, &con_id, &gpio_flags);
pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value);
/* Pin field is not really used under Windows and wraps around at 8 bits */
if (pin != (agpio->pin_table[0] & 0xff))
dev_dbg(int3472->dev, FW_BUG "%s %s pin number mismatch _DSM %d resource %d\n",
- func, agpio->resource_source.string_ptr, pin, agpio->pin_table[0]);
+ con_id, agpio->resource_source.string_ptr, pin, agpio->pin_table[0]);
active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value);
if (!active_value)
gpio_flags ^= GPIO_ACTIVE_LOW;
- dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", func,
+ dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", con_id,
agpio->resource_source.string_ptr, agpio->pin_table[0],
str_high_low(gpio_flags == GPIO_ACTIVE_HIGH));
switch (type) {
case INT3472_GPIO_TYPE_RESET:
case INT3472_GPIO_TYPE_POWERDOWN:
- ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, gpio_flags);
+ ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, con_id, gpio_flags);
if (ret)
err_msg = "Failed to map GPIO pin to sensor\n";
@@ -289,7 +289,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
case INT3472_GPIO_TYPE_CLK_ENABLE:
case INT3472_GPIO_TYPE_PRIVACY_LED:
case INT3472_GPIO_TYPE_POWER_ENABLE:
- gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, gpio_flags);
+ gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, con_id, gpio_flags);
if (IS_ERR(gpio)) {
ret = PTR_ERR(gpio);
err_msg = "Failed to get GPIO\n";
diff --git a/drivers/platform/x86/intel/pmc/Makefile b/drivers/platform/x86/intel/pmc/Makefile
index 389e5419dadf..b148b40d09f5 100644
--- a/drivers/platform/x86/intel/pmc/Makefile
+++ b/drivers/platform/x86/intel/pmc/Makefile
@@ -4,7 +4,7 @@
#
intel_pmc_core-y := core.o core_ssram.o spt.o cnp.o \
- icl.o tgl.o adl.o mtl.o arl.o lnl.o
+ icl.o tgl.o adl.o mtl.o arl.o lnl.o ptl.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
intel_pmc_core_pltdrv-y := pltdrv.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core_pltdrv.o
diff --git a/drivers/platform/x86/intel/pmc/adl.c b/drivers/platform/x86/intel/pmc/adl.c
index e7878558fd90..9e7dfd6e3310 100644
--- a/drivers/platform/x86/intel/pmc/adl.c
+++ b/drivers/platform/x86/intel/pmc/adl.c
@@ -11,7 +11,7 @@
#include "core.h"
/* Alder Lake: PGD PFET Enable Ack Status Register(s) bitmap */
-const struct pmc_bit_map adl_pfear_map[] = {
+static const struct pmc_bit_map adl_pfear_map[] = {
{"SPI/eSPI", BIT(2)},
{"XHCI", BIT(3)},
{"SPA", BIT(4)},
@@ -54,7 +54,7 @@ const struct pmc_bit_map adl_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_adl_pfear_map[] = {
+static const struct pmc_bit_map *ext_adl_pfear_map[] = {
/*
* Check intel_pmc_core_ids[] users of cnp_reg_map for
* a list of core SoCs using this.
@@ -63,7 +63,7 @@ const struct pmc_bit_map *ext_adl_pfear_map[] = {
NULL
};
-const struct pmc_bit_map adl_ltr_show_map[] = {
+static const struct pmc_bit_map adl_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
{"SATA", CNP_PMC_LTR_SATA},
@@ -100,7 +100,7 @@ const struct pmc_bit_map adl_ltr_show_map[] = {
{}
};
-const struct pmc_bit_map adl_clocksource_status_map[] = {
+static const struct pmc_bit_map adl_clocksource_status_map[] = {
{"CLKPART1_OFF_STS", BIT(0)},
{"CLKPART2_OFF_STS", BIT(1)},
{"CLKPART3_OFF_STS", BIT(2)},
@@ -128,7 +128,7 @@ const struct pmc_bit_map adl_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map adl_power_gating_status_0_map[] = {
+static const struct pmc_bit_map adl_power_gating_status_0_map[] = {
{"PMC_PGD0_PG_STS", BIT(0)},
{"DMI_PGD0_PG_STS", BIT(1)},
{"ESPISPI_PGD0_PG_STS", BIT(2)},
@@ -158,7 +158,7 @@ const struct pmc_bit_map adl_power_gating_status_0_map[] = {
{}
};
-const struct pmc_bit_map adl_power_gating_status_1_map[] = {
+static const struct pmc_bit_map adl_power_gating_status_1_map[] = {
{"USBR0_PGD0_PG_STS", BIT(0)},
{"SMT1_PGD0_PG_STS", BIT(2)},
{"CSMERTC_PGD0_PG_STS", BIT(6)},
@@ -170,14 +170,14 @@ const struct pmc_bit_map adl_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map adl_power_gating_status_2_map[] = {
+static const struct pmc_bit_map adl_power_gating_status_2_map[] = {
{"THC0_PGD0_PG_STS", BIT(7)},
{"THC1_PGD0_PG_STS", BIT(8)},
{"SPF_PGD0_PG_STS", BIT(14)},
{}
};
-const struct pmc_bit_map adl_d3_status_0_map[] = {
+static const struct pmc_bit_map adl_d3_status_0_map[] = {
{"ISH_D3_STS", BIT(2)},
{"LPSS_D3_STS", BIT(3)},
{"XDCI_D3_STS", BIT(4)},
@@ -193,13 +193,13 @@ const struct pmc_bit_map adl_d3_status_0_map[] = {
{}
};
-const struct pmc_bit_map adl_d3_status_1_map[] = {
+static const struct pmc_bit_map adl_d3_status_1_map[] = {
{"GBE_D3_STS", BIT(19)},
{"CNVI_D3_STS", BIT(27)},
{}
};
-const struct pmc_bit_map adl_d3_status_2_map[] = {
+static const struct pmc_bit_map adl_d3_status_2_map[] = {
{"CSMERTC_D3_STS", BIT(1)},
{"CSE_D3_STS", BIT(4)},
{"KVMCC_D3_STS", BIT(5)},
@@ -210,20 +210,20 @@ const struct pmc_bit_map adl_d3_status_2_map[] = {
{}
};
-const struct pmc_bit_map adl_d3_status_3_map[] = {
+static const struct pmc_bit_map adl_d3_status_3_map[] = {
{"THC0_D3_STS", BIT(14)},
{"THC1_D3_STS", BIT(15)},
{}
};
-const struct pmc_bit_map adl_vnn_req_status_0_map[] = {
+static const struct pmc_bit_map adl_vnn_req_status_0_map[] = {
{"ISH_VNN_REQ_STS", BIT(2)},
{"ESPISPI_VNN_REQ_STS", BIT(18)},
{"DSP_VNN_REQ_STS", BIT(19)},
{}
};
-const struct pmc_bit_map adl_vnn_req_status_1_map[] = {
+static const struct pmc_bit_map adl_vnn_req_status_1_map[] = {
{"NPK_VNN_REQ_STS", BIT(4)},
{"EXI_VNN_REQ_STS", BIT(9)},
{"GBE_VNN_REQ_STS", BIT(19)},
@@ -232,7 +232,7 @@ const struct pmc_bit_map adl_vnn_req_status_1_map[] = {
{}
};
-const struct pmc_bit_map adl_vnn_req_status_2_map[] = {
+static const struct pmc_bit_map adl_vnn_req_status_2_map[] = {
{"CSMERTC_VNN_REQ_STS", BIT(1)},
{"CSE_VNN_REQ_STS", BIT(4)},
{"SMT1_VNN_REQ_STS", BIT(8)},
@@ -245,12 +245,12 @@ const struct pmc_bit_map adl_vnn_req_status_2_map[] = {
{}
};
-const struct pmc_bit_map adl_vnn_req_status_3_map[] = {
+static const struct pmc_bit_map adl_vnn_req_status_3_map[] = {
{"GPIOCOM5_VNN_REQ_STS", BIT(11)},
{}
};
-const struct pmc_bit_map adl_vnn_misc_status_map[] = {
+static const struct pmc_bit_map adl_vnn_misc_status_map[] = {
{"CPU_C10_REQ_STS", BIT(0)},
{"PCIe_LPM_En_REQ_STS", BIT(3)},
{"ITH_REQ_STS", BIT(5)},
@@ -265,7 +265,7 @@ const struct pmc_bit_map adl_vnn_misc_status_map[] = {
{}
};
-const struct pmc_bit_map *adl_lpm_maps[] = {
+static const struct pmc_bit_map *adl_lpm_maps[] = {
adl_clocksource_status_map,
adl_power_gating_status_0_map,
adl_power_gating_status_1_map,
@@ -311,20 +311,8 @@ const struct pmc_reg_map adl_reg_map = {
.pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP,
};
-int adl_core_init(struct pmc_dev *pmcdev)
-{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
- int ret;
-
- pmcdev->suspend = cnl_suspend;
- pmcdev->resume = cnl_resume;
-
- pmc->map = &adl_reg_map;
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
-
- pmc_core_get_low_power_modes(pmcdev);
-
- return 0;
-}
+struct pmc_dev_info adl_pmc_dev = {
+ .map = &adl_reg_map,
+ .suspend = cnl_suspend,
+ .resume = cnl_resume,
+};
diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c
index 05dec4f5019f..320993bd6d31 100644
--- a/drivers/platform/x86/intel/pmc/arl.c
+++ b/drivers/platform/x86/intel/pmc/arl.c
@@ -16,10 +16,11 @@
#define IOEP_LPM_REQ_GUID 0x5077612
#define SOCS_LPM_REQ_GUID 0x8478657
#define PCHS_LPM_REQ_GUID 0x9684572
+#define SOCM_LPM_REQ_GUID 0x2625030
static const u8 ARL_LPM_REG_INDEX[] = {0, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20};
-const struct pmc_bit_map arl_socs_ltr_show_map[] = {
+static const struct pmc_bit_map arl_socs_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
{"SATA", CNP_PMC_LTR_SATA},
@@ -59,7 +60,7 @@ const struct pmc_bit_map arl_socs_ltr_show_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_clocksource_status_map[] = {
+static const struct pmc_bit_map arl_socs_clocksource_status_map[] = {
{"AON2_OFF_STS", BIT(0)},
{"AON3_OFF_STS", BIT(1)},
{"AON4_OFF_STS", BIT(2)},
@@ -87,7 +88,7 @@ const struct pmc_bit_map arl_socs_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_power_gating_status_0_map[] = {
+static const struct pmc_bit_map arl_socs_power_gating_status_0_map[] = {
{"PMC_PGD0_PG_STS", BIT(0)},
{"DMI_PGD0_PG_STS", BIT(1)},
{"ESPISPI_PGD0_PG_STS", BIT(2)},
@@ -123,7 +124,7 @@ const struct pmc_bit_map arl_socs_power_gating_status_0_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_power_gating_status_1_map[] = {
+static const struct pmc_bit_map arl_socs_power_gating_status_1_map[] = {
{"USBR0_PGD0_PG_STS", BIT(0)},
{"SUSRAM_PGD0_PG_STS", BIT(1)},
{"SMT1_PGD0_PG_STS", BIT(2)},
@@ -159,7 +160,7 @@ const struct pmc_bit_map arl_socs_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_power_gating_status_2_map[] = {
+static const struct pmc_bit_map arl_socs_power_gating_status_2_map[] = {
{"PSF8_PGD0_PG_STS", BIT(0)},
{"FIA_PGD0_PG_STS", BIT(1)},
{"SOC_D2D_PGD3_PG_STS", BIT(2)},
@@ -187,7 +188,7 @@ const struct pmc_bit_map arl_socs_power_gating_status_2_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_d3_status_2_map[] = {
+static const struct pmc_bit_map arl_socs_d3_status_2_map[] = {
{"CSMERTC_D3_STS", BIT(1)},
{"SUSRAM_D3_STS", BIT(2)},
{"CSE_D3_STS", BIT(4)},
@@ -206,7 +207,7 @@ const struct pmc_bit_map arl_socs_d3_status_2_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_d3_status_3_map[] = {
+static const struct pmc_bit_map arl_socs_d3_status_3_map[] = {
{"GBETSN_D3_STS", BIT(13)},
{"THC0_D3_STS", BIT(14)},
{"THC1_D3_STS", BIT(15)},
@@ -214,13 +215,13 @@ const struct pmc_bit_map arl_socs_d3_status_3_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_vnn_req_status_3_map[] = {
+static const struct pmc_bit_map arl_socs_vnn_req_status_3_map[] = {
{"DTS0_VNN_REQ_STS", BIT(7)},
{"GPIOCOM5_VNN_REQ_STS", BIT(11)},
{}
};
-const struct pmc_bit_map *arl_socs_lpm_maps[] = {
+static const struct pmc_bit_map *arl_socs_lpm_maps[] = {
arl_socs_clocksource_status_map,
arl_socs_power_gating_status_0_map,
arl_socs_power_gating_status_1_map,
@@ -238,7 +239,7 @@ const struct pmc_bit_map *arl_socs_lpm_maps[] = {
NULL
};
-const struct pmc_bit_map arl_socs_pfear_map[] = {
+static const struct pmc_bit_map arl_socs_pfear_map[] = {
{"RSVD64", BIT(0)},
{"RSVD65", BIT(1)},
{"RSVD66", BIT(2)},
@@ -249,13 +250,13 @@ const struct pmc_bit_map arl_socs_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_arl_socs_pfear_map[] = {
+static const struct pmc_bit_map *ext_arl_socs_pfear_map[] = {
mtl_socm_pfear_map,
arl_socs_pfear_map,
NULL
};
-const struct pmc_reg_map arl_socs_reg_map = {
+static const struct pmc_reg_map arl_socs_reg_map = {
.pfear_sts = ext_arl_socs_pfear_map,
.ppfear_buckets = ARL_SOCS_PPFEAR_NUM_ENTRIES,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
@@ -283,7 +284,7 @@ const struct pmc_reg_map arl_socs_reg_map = {
.pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP,
};
-const struct pmc_bit_map arl_pchs_ltr_show_map[] = {
+static const struct pmc_bit_map arl_pchs_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
{"SATA", CNP_PMC_LTR_SATA},
@@ -323,7 +324,7 @@ const struct pmc_bit_map arl_pchs_ltr_show_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_clocksource_status_map[] = {
+static const struct pmc_bit_map arl_pchs_clocksource_status_map[] = {
{"AON2_OFF_STS", BIT(0)},
{"AON3_OFF_STS", BIT(1)},
{"AON4_OFF_STS", BIT(2)},
@@ -358,7 +359,7 @@ const struct pmc_bit_map arl_pchs_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_power_gating_status_0_map[] = {
+static const struct pmc_bit_map arl_pchs_power_gating_status_0_map[] = {
{"PMC_PGD0_PG_STS", BIT(0)},
{"DMI_PGD0_PG_STS", BIT(1)},
{"ESPISPI_PGD0_PG_STS", BIT(2)},
@@ -394,7 +395,7 @@ const struct pmc_bit_map arl_pchs_power_gating_status_0_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_power_gating_status_1_map[] = {
+static const struct pmc_bit_map arl_pchs_power_gating_status_1_map[] = {
{"USBR0_PGD0_PG_STS", BIT(0)},
{"SUSRAM_PGD0_PG_STS", BIT(1)},
{"SMT1_PGD0_PG_STS", BIT(2)},
@@ -430,7 +431,7 @@ const struct pmc_bit_map arl_pchs_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_power_gating_status_2_map[] = {
+static const struct pmc_bit_map arl_pchs_power_gating_status_2_map[] = {
{"U3FPW2_PGD0_PG_STS", BIT(0)},
{"FIA_PGD0_PG_STS", BIT(1)},
{"FIACPCB_X_PGD0_PG_STS", BIT(2)},
@@ -457,7 +458,7 @@ const struct pmc_bit_map arl_pchs_power_gating_status_2_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_d3_status_0_map[] = {
+static const struct pmc_bit_map arl_pchs_d3_status_0_map[] = {
{"SPF_D3_STS", BIT(0)},
{"LPSS_D3_STS", BIT(3)},
{"XDCI_D3_STS", BIT(4)},
@@ -474,7 +475,7 @@ const struct pmc_bit_map arl_pchs_d3_status_0_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_d3_status_1_map[] = {
+static const struct pmc_bit_map arl_pchs_d3_status_1_map[] = {
{"GBETSN1_D3_STS", BIT(14)},
{"GBE_D3_STS", BIT(19)},
{"ITSS_D3_STS", BIT(23)},
@@ -483,7 +484,7 @@ const struct pmc_bit_map arl_pchs_d3_status_1_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_d3_status_2_map[] = {
+static const struct pmc_bit_map arl_pchs_d3_status_2_map[] = {
{"CSMERTC_D3_STS", BIT(1)},
{"SUSRAM_D3_STS", BIT(2)},
{"CSE_D3_STS", BIT(4)},
@@ -504,7 +505,7 @@ const struct pmc_bit_map arl_pchs_d3_status_2_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_d3_status_3_map[] = {
+static const struct pmc_bit_map arl_pchs_d3_status_3_map[] = {
{"ESE_D3_STS", BIT(3)},
{"GBETSN_D3_STS", BIT(13)},
{"THC0_D3_STS", BIT(14)},
@@ -513,13 +514,13 @@ const struct pmc_bit_map arl_pchs_d3_status_3_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_vnn_req_status_0_map[] = {
+static const struct pmc_bit_map arl_pchs_vnn_req_status_0_map[] = {
{"FIA_VNN_REQ_STS", BIT(17)},
{"ESPISPI_VNN_REQ_STS", BIT(18)},
{}
};
-const struct pmc_bit_map arl_pchs_vnn_req_status_1_map[] = {
+static const struct pmc_bit_map arl_pchs_vnn_req_status_1_map[] = {
{"NPK_VNN_REQ_STS", BIT(4)},
{"DFXAGG_VNN_REQ_STS", BIT(8)},
{"EXI_VNN_REQ_STS", BIT(9)},
@@ -530,7 +531,7 @@ const struct pmc_bit_map arl_pchs_vnn_req_status_1_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_vnn_req_status_2_map[] = {
+static const struct pmc_bit_map arl_pchs_vnn_req_status_2_map[] = {
{"FIA2_VNN_REQ_STS", BIT(0)},
{"CSMERTC_VNN_REQ_STS", BIT(1)},
{"CSE_VNN_REQ_STS", BIT(4)},
@@ -548,7 +549,7 @@ const struct pmc_bit_map arl_pchs_vnn_req_status_2_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_vnn_req_status_3_map[] = {
+static const struct pmc_bit_map arl_pchs_vnn_req_status_3_map[] = {
{"ESE_VNN_REQ_STS", BIT(3)},
{"DTS0_VNN_REQ_STS", BIT(7)},
{"GPIOCOM5_VNN_REQ_STS", BIT(11)},
@@ -556,7 +557,7 @@ const struct pmc_bit_map arl_pchs_vnn_req_status_3_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_vnn_misc_status_map[] = {
+static const struct pmc_bit_map arl_pchs_vnn_misc_status_map[] = {
{"CPU_C10_REQ_STS", BIT(0)},
{"TS_OFF_REQ_STS", BIT(1)},
{"PNDE_MET_REQ_STS", BIT(2)},
@@ -586,7 +587,7 @@ const struct pmc_bit_map arl_pchs_vnn_misc_status_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_signal_status_map[] = {
+static const struct pmc_bit_map arl_pchs_signal_status_map[] = {
{"LSX_Wake0_STS", BIT(0)},
{"LSX_Wake1_STS", BIT(1)},
{"LSX_Wake2_STS", BIT(2)},
@@ -606,7 +607,7 @@ const struct pmc_bit_map arl_pchs_signal_status_map[] = {
{}
};
-const struct pmc_bit_map *arl_pchs_lpm_maps[] = {
+static const struct pmc_bit_map *arl_pchs_lpm_maps[] = {
arl_pchs_clocksource_status_map,
arl_pchs_power_gating_status_0_map,
arl_pchs_power_gating_status_1_map,
@@ -624,7 +625,7 @@ const struct pmc_bit_map *arl_pchs_lpm_maps[] = {
NULL
};
-const struct pmc_reg_map arl_pchs_reg_map = {
+static const struct pmc_reg_map arl_pchs_reg_map = {
.pfear_sts = ext_arl_socs_pfear_map,
.ppfear_buckets = ARL_SOCS_PPFEAR_NUM_ENTRIES,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
@@ -650,6 +651,7 @@ const struct pmc_reg_map arl_pchs_reg_map = {
.etr3_offset = ETR3_OFFSET,
};
+#define PMC_DEVID_SOCM 0x777f
#define PMC_DEVID_SOCS 0xae7f
#define PMC_DEVID_IOEP 0x7ecf
#define PMC_DEVID_PCHS 0x7f27
@@ -669,11 +671,17 @@ static struct pmc_info arl_pmc_info_list[] = {
.devid = PMC_DEVID_PCHS,
.map = &arl_pchs_reg_map,
},
+ {
+ .guid = SOCM_LPM_REQ_GUID,
+ .devid = PMC_DEVID_SOCM,
+ .map = &mtl_socm_reg_map,
+ },
{}
};
#define ARL_NPU_PCI_DEV 0xad1d
#define ARL_GNA_PCI_DEV 0xae4c
+#define ARL_H_GNA_PCI_DEV 0x774c
/*
* Set power state of select devices that do not have drivers to D3
* so that they do not block Package C entry.
@@ -684,6 +692,12 @@ static void arl_d3_fixup(void)
pmc_core_set_device_d3(ARL_GNA_PCI_DEV);
}
+static void arl_h_d3_fixup(void)
+{
+ pmc_core_set_device_d3(ARL_NPU_PCI_DEV);
+ pmc_core_set_device_d3(ARL_H_GNA_PCI_DEV);
+}
+
static int arl_resume(struct pmc_dev *pmcdev)
{
arl_d3_fixup();
@@ -691,40 +705,41 @@ static int arl_resume(struct pmc_dev *pmcdev)
return cnl_resume(pmcdev);
}
-int arl_core_init(struct pmc_dev *pmcdev)
+static int arl_h_resume(struct pmc_dev *pmcdev)
{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
- int ret;
- int func = 0;
- bool ssram_init = true;
+ arl_h_d3_fixup();
+
+ return cnl_resume(pmcdev);
+}
+static int arl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
+{
arl_d3_fixup();
- pmcdev->suspend = cnl_suspend;
- pmcdev->resume = arl_resume;
- pmcdev->regmap_list = arl_pmc_info_list;
+ return generic_core_init(pmcdev, pmc_dev_info);
+}
- /*
- * If ssram init fails use legacy method to at least get the
- * primary PMC
- */
- ret = pmc_core_ssram_init(pmcdev, func);
- if (ret) {
- ssram_init = false;
- pmc->map = &arl_socs_reg_map;
-
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
- }
-
- pmc_core_get_low_power_modes(pmcdev);
- pmc_core_punit_pmt_init(pmcdev, ARL_PMT_DMU_GUID);
-
- if (ssram_init) {
- ret = pmc_core_ssram_get_lpm_reqs(pmcdev);
- if (ret)
- return ret;
- }
-
- return 0;
+static int arl_h_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
+{
+ arl_h_d3_fixup();
+ return generic_core_init(pmcdev, pmc_dev_info);
}
+
+struct pmc_dev_info arl_pmc_dev = {
+ .pci_func = 0,
+ .dmu_guid = ARL_PMT_DMU_GUID,
+ .regmap_list = arl_pmc_info_list,
+ .map = &arl_socs_reg_map,
+ .suspend = cnl_suspend,
+ .resume = arl_resume,
+ .init = arl_core_init,
+};
+
+struct pmc_dev_info arl_h_pmc_dev = {
+ .pci_func = 2,
+ .dmu_guid = ARL_PMT_DMU_GUID,
+ .regmap_list = arl_pmc_info_list,
+ .map = &mtl_socm_reg_map,
+ .suspend = cnl_suspend,
+ .resume = arl_h_resume,
+ .init = arl_h_core_init,
+};
diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c
index fc5193fdf8a8..2c5af158bbe2 100644
--- a/drivers/platform/x86/intel/pmc/cnp.c
+++ b/drivers/platform/x86/intel/pmc/cnp.c
@@ -88,7 +88,7 @@ const struct pmc_bit_map cnp_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_cnp_pfear_map[] = {
+static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
/*
* Check intel_pmc_core_ids[] users of cnp_reg_map for
* a list of core SoCs using this.
@@ -97,7 +97,7 @@ const struct pmc_bit_map *ext_cnp_pfear_map[] = {
NULL
};
-const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
+static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
{"AUDIO_D3", BIT(0)},
{"OTG_D3", BIT(1)},
{"XHCI_D3", BIT(2)},
@@ -110,7 +110,7 @@ const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
{}
};
-const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
+static const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
{"SDIO_PLL_OFF", BIT(0)},
{"USB2_PLL_OFF", BIT(1)},
{"AUDIO_PLL_OFF", BIT(2)},
@@ -127,7 +127,7 @@ const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
{}
};
-const struct pmc_bit_map cnp_slps0_dbg2_map[] = {
+static const struct pmc_bit_map cnp_slps0_dbg2_map[] = {
{"MPHY_CORE_GATED", BIT(0)},
{"CSME_GATED", BIT(1)},
{"USB2_SUS_GATED", BIT(2)},
@@ -274,20 +274,9 @@ int cnl_resume(struct pmc_dev *pmcdev)
return pmc_core_resume_common(pmcdev);
}
-int cnp_core_init(struct pmc_dev *pmcdev)
-{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
- int ret;
-
- pmcdev->suspend = cnl_suspend;
- pmcdev->resume = cnl_resume;
-
- pmc->map = &cnp_reg_map;
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
-
- pmc_core_get_low_power_modes(pmcdev);
+struct pmc_dev_info cnp_pmc_dev = {
+ .map = &cnp_reg_map,
+ .suspend = cnl_suspend,
+ .resume = cnl_resume,
+};
- return 0;
-}
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 1ee0fb5f8250..7a1d11f2914f 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -1345,40 +1345,80 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
}
}
+/*
+ * When supported, ssram init is used to achieve all available PMCs.
+ * If ssram init fails, this function uses legacy method to at least get the
+ * primary PMC.
+ */
+int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
+{
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ bool ssram;
+ int ret;
+
+ pmcdev->suspend = pmc_dev_info->suspend;
+ pmcdev->resume = pmc_dev_info->resume;
+
+ ssram = pmc_dev_info->regmap_list != NULL;
+ if (ssram) {
+ pmcdev->regmap_list = pmc_dev_info->regmap_list;
+ ret = pmc_core_ssram_init(pmcdev, pmc_dev_info->pci_func);
+ if (ret) {
+ dev_warn(&pmcdev->pdev->dev,
+ "ssram init failed, %d, using legacy init\n", ret);
+ ssram = false;
+ }
+ }
+
+ if (!ssram) {
+ pmc->map = pmc_dev_info->map;
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
+ }
+
+ pmc_core_get_low_power_modes(pmcdev);
+ if (pmc_dev_info->dmu_guid)
+ pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guid);
+
+ if (ssram)
+ return pmc_core_ssram_get_lpm_reqs(pmcdev);
+
+ return 0;
+}
+
static const struct x86_cpu_id intel_pmc_core_ids[] = {
- X86_MATCH_VFM(INTEL_SKYLAKE_L, spt_core_init),
- X86_MATCH_VFM(INTEL_SKYLAKE, spt_core_init),
- X86_MATCH_VFM(INTEL_KABYLAKE_L, spt_core_init),
- X86_MATCH_VFM(INTEL_KABYLAKE, spt_core_init),
- X86_MATCH_VFM(INTEL_CANNONLAKE_L, cnp_core_init),
- X86_MATCH_VFM(INTEL_ICELAKE_L, icl_core_init),
- X86_MATCH_VFM(INTEL_ICELAKE_NNPI, icl_core_init),
- X86_MATCH_VFM(INTEL_COMETLAKE, cnp_core_init),
- X86_MATCH_VFM(INTEL_COMETLAKE_L, cnp_core_init),
- X86_MATCH_VFM(INTEL_TIGERLAKE_L, tgl_l_core_init),
- X86_MATCH_VFM(INTEL_TIGERLAKE, tgl_core_init),
- X86_MATCH_VFM(INTEL_ATOM_TREMONT, tgl_l_core_init),
- X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, icl_core_init),
- X86_MATCH_VFM(INTEL_ROCKETLAKE, tgl_core_init),
- X86_MATCH_VFM(INTEL_ALDERLAKE_L, tgl_l_core_init),
- X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, tgl_l_core_init),
- X86_MATCH_VFM(INTEL_ALDERLAKE, adl_core_init),
- X86_MATCH_VFM(INTEL_RAPTORLAKE_P, tgl_l_core_init),
- X86_MATCH_VFM(INTEL_RAPTORLAKE, adl_core_init),
- X86_MATCH_VFM(INTEL_RAPTORLAKE_S, adl_core_init),
- X86_MATCH_VFM(INTEL_METEORLAKE_L, mtl_core_init),
- X86_MATCH_VFM(INTEL_ARROWLAKE, arl_core_init),
- X86_MATCH_VFM(INTEL_LUNARLAKE_M, lnl_core_init),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, &spt_pmc_dev),
+ X86_MATCH_VFM(INTEL_SKYLAKE, &spt_pmc_dev),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, &spt_pmc_dev),
+ X86_MATCH_VFM(INTEL_KABYLAKE, &spt_pmc_dev),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, &cnp_pmc_dev),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_pmc_dev),
+ X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &icl_pmc_dev),
+ X86_MATCH_VFM(INTEL_COMETLAKE, &cnp_pmc_dev),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, &cnp_pmc_dev),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, &tgl_l_pmc_dev),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, &tgl_pmc_dev),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT, &tgl_l_pmc_dev),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &icl_pmc_dev),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, &tgl_pmc_dev),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, &tgl_l_pmc_dev),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &tgl_l_pmc_dev),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_pmc_dev),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &tgl_l_pmc_dev),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_pmc_dev),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_pmc_dev),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, &mtl_pmc_dev),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, &arl_pmc_dev),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_H, &arl_h_pmc_dev),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_U, &arl_h_pmc_dev),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_pmc_dev),
+ X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_pmc_dev),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
-static const struct pci_device_id pmc_pci_ids[] = {
- { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
- { }
-};
-
/*
* This quirk can be used on those platforms where
* the platform BIOS enforces 24Mhz crystal to shutdown
@@ -1452,7 +1492,7 @@ static int pmc_core_probe(struct platform_device *pdev)
static bool device_initialized;
struct pmc_dev *pmcdev;
const struct x86_cpu_id *cpu_id;
- int (*core_init)(struct pmc_dev *pmcdev);
+ struct pmc_dev_info *pmc_dev_info;
struct pmc *primary_pmc;
int ret;
@@ -1472,7 +1512,7 @@ static int pmc_core_probe(struct platform_device *pdev)
if (!cpu_id)
return -ENODEV;
- core_init = (int (*)(struct pmc_dev *))cpu_id->driver_data;
+ pmc_dev_info = (struct pmc_dev_info *)cpu_id->driver_data;
/* Primary PMC */
primary_pmc = devm_kzalloc(&pdev->dev, sizeof(*primary_pmc), GFP_KERNEL);
@@ -1489,16 +1529,13 @@ static int pmc_core_probe(struct platform_device *pdev)
if (!pmcdev->pkgc_res_cnt)
return -ENOMEM;
- /*
- * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
- * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
- * in this case.
- */
- if (core_init == spt_core_init && !pci_dev_present(pmc_pci_ids))
- core_init = cnp_core_init;
-
mutex_init(&pmcdev->lock);
- ret = core_init(pmcdev);
+
+ if (pmc_dev_info->init)
+ ret = pmc_dev_info->init(pmcdev, pmc_dev_info);
+ else
+ ret = generic_core_init(pmcdev, pmc_dev_info);
+
if (ret) {
pmc_core_clean_structure(pdev);
return ret;
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index b9d3291d0bf2..945a1c440cca 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -285,6 +285,14 @@ enum ppfear_regs {
#define LNL_PPFEAR_NUM_ENTRIES 12
#define LNL_S0IX_BLOCKER_OFFSET 0x2004
+/* Panther Lake Power Management Controller register offsets */
+#define PTL_LPM_NUM_MAPS 14
+#define PTL_PMC_LTR_SATA2 0x1B90
+#define PTL_PMC_LTR_PMC 0x1BA8
+#define PTL_PMC_LTR_CUR_ASLT 0x1C28
+#define PTL_PMC_LTR_CUR_PLT 0x1C2C
+#define PTL_PCD_PMC_MMIO_REG_LEN 0x31A8
+
extern const char *pmc_lpm_modes[];
struct pmc_bit_map {
@@ -430,178 +438,77 @@ struct pmc_dev {
enum pmc_index {
PMC_IDX_MAIN,
- PMC_IDX_SOC = PMC_IDX_MAIN,
PMC_IDX_IOE,
PMC_IDX_PCH,
PMC_IDX_MAX
};
+/**
+ * struct pmc_dev_info - Structure to keep PMC device info
+ * @pci_func: Function number of the primary PMC
+ * @dmu_guid: Die Management Unit GUID
+ * @regmap_list: Pointer to a list of pmc_info structure that could be
+ * available for the platform. When set, this field implies
+ * SSRAM support.
+ * @map: Pointer to a pmc_reg_map struct that contains platform
+ * specific attributes of the primary PMC
+ * @suspend: Function to perform platform specific suspend
+ * @resume: Function to perform platform specific resume
+ * @init: Function to perform platform specific init action
+ */
+struct pmc_dev_info {
+ u8 pci_func;
+ u32 dmu_guid;
+ struct pmc_info *regmap_list;
+ const struct pmc_reg_map *map;
+ void (*suspend)(struct pmc_dev *pmcdev);
+ int (*resume)(struct pmc_dev *pmcdev);
+ int (*init)(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info);
+};
+
extern const struct pmc_bit_map msr_map[];
-extern const struct pmc_bit_map spt_pll_map[];
-extern const struct pmc_bit_map spt_mphy_map[];
-extern const struct pmc_bit_map spt_pfear_map[];
-extern const struct pmc_bit_map *ext_spt_pfear_map[];
-extern const struct pmc_bit_map spt_ltr_show_map[];
-extern const struct pmc_reg_map spt_reg_map;
extern const struct pmc_bit_map cnp_pfear_map[];
-extern const struct pmc_bit_map *ext_cnp_pfear_map[];
-extern const struct pmc_bit_map cnp_slps0_dbg0_map[];
-extern const struct pmc_bit_map cnp_slps0_dbg1_map[];
-extern const struct pmc_bit_map cnp_slps0_dbg2_map[];
extern const struct pmc_bit_map *cnp_slps0_dbg_maps[];
extern const struct pmc_bit_map cnp_ltr_show_map[];
extern const struct pmc_reg_map cnp_reg_map;
-extern const struct pmc_bit_map icl_pfear_map[];
-extern const struct pmc_bit_map *ext_icl_pfear_map[];
-extern const struct pmc_reg_map icl_reg_map;
-extern const struct pmc_bit_map tgl_pfear_map[];
-extern const struct pmc_bit_map *ext_tgl_pfear_map[];
-extern const struct pmc_bit_map tgl_clocksource_status_map[];
-extern const struct pmc_bit_map tgl_power_gating_status_map[];
-extern const struct pmc_bit_map tgl_d3_status_map[];
-extern const struct pmc_bit_map tgl_vnn_req_status_map[];
-extern const struct pmc_bit_map tgl_vnn_misc_status_map[];
extern const struct pmc_bit_map tgl_signal_status_map[];
-extern const struct pmc_bit_map *tgl_lpm_maps[];
-extern const struct pmc_reg_map tgl_reg_map;
-extern const struct pmc_reg_map tgl_h_reg_map;
-extern const struct pmc_bit_map adl_pfear_map[];
-extern const struct pmc_bit_map *ext_adl_pfear_map[];
-extern const struct pmc_bit_map adl_ltr_show_map[];
-extern const struct pmc_bit_map adl_clocksource_status_map[];
-extern const struct pmc_bit_map adl_power_gating_status_0_map[];
-extern const struct pmc_bit_map adl_power_gating_status_1_map[];
-extern const struct pmc_bit_map adl_power_gating_status_2_map[];
-extern const struct pmc_bit_map adl_d3_status_0_map[];
-extern const struct pmc_bit_map adl_d3_status_1_map[];
-extern const struct pmc_bit_map adl_d3_status_2_map[];
-extern const struct pmc_bit_map adl_d3_status_3_map[];
-extern const struct pmc_bit_map adl_vnn_req_status_0_map[];
-extern const struct pmc_bit_map adl_vnn_req_status_1_map[];
-extern const struct pmc_bit_map adl_vnn_req_status_2_map[];
-extern const struct pmc_bit_map adl_vnn_req_status_3_map[];
-extern const struct pmc_bit_map adl_vnn_misc_status_map[];
-extern const struct pmc_bit_map *adl_lpm_maps[];
extern const struct pmc_reg_map adl_reg_map;
extern const struct pmc_bit_map mtl_socm_pfear_map[];
-extern const struct pmc_bit_map *ext_mtl_socm_pfear_map[];
-extern const struct pmc_bit_map mtl_socm_ltr_show_map[];
-extern const struct pmc_bit_map mtl_socm_clocksource_status_map[];
-extern const struct pmc_bit_map mtl_socm_power_gating_status_0_map[];
-extern const struct pmc_bit_map mtl_socm_power_gating_status_1_map[];
-extern const struct pmc_bit_map mtl_socm_power_gating_status_2_map[];
extern const struct pmc_bit_map mtl_socm_d3_status_0_map[];
extern const struct pmc_bit_map mtl_socm_d3_status_1_map[];
-extern const struct pmc_bit_map mtl_socm_d3_status_2_map[];
-extern const struct pmc_bit_map mtl_socm_d3_status_3_map[];
extern const struct pmc_bit_map mtl_socm_vnn_req_status_0_map[];
extern const struct pmc_bit_map mtl_socm_vnn_req_status_1_map[];
extern const struct pmc_bit_map mtl_socm_vnn_req_status_2_map[];
-extern const struct pmc_bit_map mtl_socm_vnn_req_status_3_map[];
extern const struct pmc_bit_map mtl_socm_vnn_misc_status_map[];
extern const struct pmc_bit_map mtl_socm_signal_status_map[];
-extern const struct pmc_bit_map *mtl_socm_lpm_maps[];
extern const struct pmc_reg_map mtl_socm_reg_map;
-extern const struct pmc_bit_map mtl_ioep_pfear_map[];
-extern const struct pmc_bit_map *ext_mtl_ioep_pfear_map[];
-extern const struct pmc_bit_map mtl_ioep_ltr_show_map[];
-extern const struct pmc_bit_map mtl_ioep_clocksource_status_map[];
-extern const struct pmc_bit_map mtl_ioep_power_gating_status_0_map[];
-extern const struct pmc_bit_map mtl_ioep_power_gating_status_1_map[];
-extern const struct pmc_bit_map mtl_ioep_power_gating_status_2_map[];
-extern const struct pmc_bit_map mtl_ioep_d3_status_0_map[];
-extern const struct pmc_bit_map mtl_ioep_d3_status_1_map[];
-extern const struct pmc_bit_map mtl_ioep_d3_status_2_map[];
-extern const struct pmc_bit_map mtl_ioep_d3_status_3_map[];
-extern const struct pmc_bit_map mtl_ioep_vnn_req_status_0_map[];
-extern const struct pmc_bit_map mtl_ioep_vnn_req_status_1_map[];
-extern const struct pmc_bit_map mtl_ioep_vnn_req_status_2_map[];
-extern const struct pmc_bit_map mtl_ioep_vnn_req_status_3_map[];
-extern const struct pmc_bit_map mtl_ioep_vnn_misc_status_map[];
-extern const struct pmc_bit_map *mtl_ioep_lpm_maps[];
extern const struct pmc_reg_map mtl_ioep_reg_map;
-extern const struct pmc_bit_map mtl_ioem_pfear_map[];
-extern const struct pmc_bit_map *ext_mtl_ioem_pfear_map[];
-extern const struct pmc_bit_map mtl_ioem_power_gating_status_1_map[];
-extern const struct pmc_bit_map mtl_ioem_vnn_req_status_1_map[];
-extern const struct pmc_bit_map *mtl_ioem_lpm_maps[];
-extern const struct pmc_reg_map mtl_ioem_reg_map;
-extern const struct pmc_reg_map lnl_socm_reg_map;
-
-/* LNL */
-extern const struct pmc_bit_map lnl_ltr_show_map[];
-extern const struct pmc_bit_map lnl_clocksource_status_map[];
-extern const struct pmc_bit_map lnl_power_gating_status_0_map[];
-extern const struct pmc_bit_map lnl_power_gating_status_1_map[];
-extern const struct pmc_bit_map lnl_power_gating_status_2_map[];
-extern const struct pmc_bit_map lnl_d3_status_0_map[];
-extern const struct pmc_bit_map lnl_d3_status_1_map[];
-extern const struct pmc_bit_map lnl_d3_status_2_map[];
-extern const struct pmc_bit_map lnl_d3_status_3_map[];
-extern const struct pmc_bit_map lnl_vnn_req_status_0_map[];
-extern const struct pmc_bit_map lnl_vnn_req_status_1_map[];
-extern const struct pmc_bit_map lnl_vnn_req_status_2_map[];
-extern const struct pmc_bit_map lnl_vnn_req_status_3_map[];
-extern const struct pmc_bit_map lnl_vnn_misc_status_map[];
-extern const struct pmc_bit_map *lnl_lpm_maps[];
-extern const struct pmc_bit_map *lnl_blk_maps[];
-extern const struct pmc_bit_map lnl_pfear_map[];
-extern const struct pmc_bit_map *ext_lnl_pfear_map[];
-extern const struct pmc_bit_map lnl_signal_status_map[];
-
-/* ARL */
-extern const struct pmc_bit_map arl_socs_ltr_show_map[];
-extern const struct pmc_bit_map arl_socs_clocksource_status_map[];
-extern const struct pmc_bit_map arl_socs_power_gating_status_0_map[];
-extern const struct pmc_bit_map arl_socs_power_gating_status_1_map[];
-extern const struct pmc_bit_map arl_socs_power_gating_status_2_map[];
-extern const struct pmc_bit_map arl_socs_d3_status_2_map[];
-extern const struct pmc_bit_map arl_socs_d3_status_3_map[];
-extern const struct pmc_bit_map arl_socs_vnn_req_status_3_map[];
-extern const struct pmc_bit_map *arl_socs_lpm_maps[];
-extern const struct pmc_bit_map arl_socs_pfear_map[];
-extern const struct pmc_bit_map *ext_arl_socs_pfear_map[];
-extern const struct pmc_reg_map arl_socs_reg_map;
-extern const struct pmc_bit_map arl_pchs_ltr_show_map[];
-extern const struct pmc_bit_map arl_pchs_clocksource_status_map[];
-extern const struct pmc_bit_map arl_pchs_power_gating_status_0_map[];
-extern const struct pmc_bit_map arl_pchs_power_gating_status_1_map[];
-extern const struct pmc_bit_map arl_pchs_power_gating_status_2_map[];
-extern const struct pmc_bit_map arl_pchs_d3_status_0_map[];
-extern const struct pmc_bit_map arl_pchs_d3_status_1_map[];
-extern const struct pmc_bit_map arl_pchs_d3_status_2_map[];
-extern const struct pmc_bit_map arl_pchs_d3_status_3_map[];
-extern const struct pmc_bit_map arl_pchs_vnn_req_status_0_map[];
-extern const struct pmc_bit_map arl_pchs_vnn_req_status_1_map[];
-extern const struct pmc_bit_map arl_pchs_vnn_req_status_2_map[];
-extern const struct pmc_bit_map arl_pchs_vnn_req_status_3_map[];
-extern const struct pmc_bit_map arl_pchs_vnn_misc_status_map[];
-extern const struct pmc_bit_map arl_pchs_signal_status_map[];
-extern const struct pmc_bit_map *arl_pchs_lpm_maps[];
-extern const struct pmc_reg_map arl_pchs_reg_map;
-
-extern void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev);
-extern int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev);
+
+void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev);
+int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev);
int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore);
int pmc_core_resume_common(struct pmc_dev *pmcdev);
int get_primary_reg_base(struct pmc *pmc);
-extern void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev);
-extern void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid);
-extern void pmc_core_set_device_d3(unsigned int device);
-
-extern int pmc_core_ssram_init(struct pmc_dev *pmcdev, int func);
-
-int spt_core_init(struct pmc_dev *pmcdev);
-int cnp_core_init(struct pmc_dev *pmcdev);
-int icl_core_init(struct pmc_dev *pmcdev);
-int tgl_core_init(struct pmc_dev *pmcdev);
-int tgl_l_core_init(struct pmc_dev *pmcdev);
-int tgl_core_generic_init(struct pmc_dev *pmcdev, int pch_tp);
-int adl_core_init(struct pmc_dev *pmcdev);
-int mtl_core_init(struct pmc_dev *pmcdev);
-int arl_core_init(struct pmc_dev *pmcdev);
-int lnl_core_init(struct pmc_dev *pmcdev);
+void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev);
+void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid);
+void pmc_core_set_device_d3(unsigned int device);
+
+int pmc_core_ssram_init(struct pmc_dev *pmcdev, int func);
+
+int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info);
+
+extern struct pmc_dev_info spt_pmc_dev;
+extern struct pmc_dev_info cnp_pmc_dev;
+extern struct pmc_dev_info icl_pmc_dev;
+extern struct pmc_dev_info tgl_l_pmc_dev;
+extern struct pmc_dev_info tgl_pmc_dev;
+extern struct pmc_dev_info adl_pmc_dev;
+extern struct pmc_dev_info mtl_pmc_dev;
+extern struct pmc_dev_info arl_pmc_dev;
+extern struct pmc_dev_info arl_h_pmc_dev;
+extern struct pmc_dev_info lnl_pmc_dev;
+extern struct pmc_dev_info ptl_pmc_dev;
void cnl_suspend(struct pmc_dev *pmcdev);
int cnl_resume(struct pmc_dev *pmcdev);
diff --git a/drivers/platform/x86/intel/pmc/icl.c b/drivers/platform/x86/intel/pmc/icl.c
index 71b0fd6cb7d8..db7ed15bf863 100644
--- a/drivers/platform/x86/intel/pmc/icl.c
+++ b/drivers/platform/x86/intel/pmc/icl.c
@@ -10,7 +10,7 @@
#include "core.h"
-const struct pmc_bit_map icl_pfear_map[] = {
+static const struct pmc_bit_map icl_pfear_map[] = {
{"RES_65", BIT(0)},
{"RES_66", BIT(1)},
{"RES_67", BIT(2)},
@@ -22,7 +22,7 @@ const struct pmc_bit_map icl_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_icl_pfear_map[] = {
+static const struct pmc_bit_map *ext_icl_pfear_map[] = {
/*
* Check intel_pmc_core_ids[] users of icl_reg_map for
* a list of core SoCs using this.
@@ -32,7 +32,7 @@ const struct pmc_bit_map *ext_icl_pfear_map[] = {
NULL
};
-const struct pmc_reg_map icl_reg_map = {
+static const struct pmc_reg_map icl_reg_map = {
.pfear_sts = ext_icl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slp_s0_res_counter_step = ICL_PMC_SLP_S0_RES_COUNTER_STEP,
@@ -50,18 +50,6 @@ const struct pmc_reg_map icl_reg_map = {
.etr3_offset = ETR3_OFFSET,
};
-int icl_core_init(struct pmc_dev *pmcdev)
-{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
- int ret;
-
- pmc->map = &icl_reg_map;
-
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
-
- pmc_core_get_low_power_modes(pmcdev);
-
- return ret;
-}
+struct pmc_dev_info icl_pmc_dev = {
+ .map = &icl_reg_map,
+};
diff --git a/drivers/platform/x86/intel/pmc/lnl.c b/drivers/platform/x86/intel/pmc/lnl.c
index be029f12cdf4..da513c234714 100644
--- a/drivers/platform/x86/intel/pmc/lnl.c
+++ b/drivers/platform/x86/intel/pmc/lnl.c
@@ -13,7 +13,7 @@
#include "core.h"
-const struct pmc_bit_map lnl_ltr_show_map[] = {
+static const struct pmc_bit_map lnl_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
{"SATA", CNP_PMC_LTR_SATA},
@@ -55,7 +55,7 @@ const struct pmc_bit_map lnl_ltr_show_map[] = {
{}
};
-const struct pmc_bit_map lnl_power_gating_status_0_map[] = {
+static const struct pmc_bit_map lnl_power_gating_status_0_map[] = {
{"PMC_PGD0_PG_STS", BIT(0), 0},
{"FUSE_OSSE_PGD0_PG_STS", BIT(1), 0},
{"ESPISPI_PGD0_PG_STS", BIT(2), 0},
@@ -91,7 +91,7 @@ const struct pmc_bit_map lnl_power_gating_status_0_map[] = {
{}
};
-const struct pmc_bit_map lnl_power_gating_status_1_map[] = {
+static const struct pmc_bit_map lnl_power_gating_status_1_map[] = {
{"USBR0_PGD0_PG_STS", BIT(0), 1},
{"SUSRAM_PGD0_PG_STS", BIT(1), 1},
{"SMT1_PGD0_PG_STS", BIT(2), 1},
@@ -127,7 +127,7 @@ const struct pmc_bit_map lnl_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map lnl_power_gating_status_2_map[] = {
+static const struct pmc_bit_map lnl_power_gating_status_2_map[] = {
{"PSF8_PGD0_PG_STS", BIT(0), 0},
{"SBR16B2_PGD0_PG_STS", BIT(1), 0},
{"D2D_IPU_PGD0_PG_STS", BIT(2), 1},
@@ -163,7 +163,7 @@ const struct pmc_bit_map lnl_power_gating_status_2_map[] = {
{}
};
-const struct pmc_bit_map lnl_d3_status_0_map[] = {
+static const struct pmc_bit_map lnl_d3_status_0_map[] = {
{"LPSS_D3_STS", BIT(3), 1},
{"XDCI_D3_STS", BIT(4), 1},
{"XHCI_D3_STS", BIT(5), 1},
@@ -175,7 +175,7 @@ const struct pmc_bit_map lnl_d3_status_0_map[] = {
{}
};
-const struct pmc_bit_map lnl_d3_status_1_map[] = {
+static const struct pmc_bit_map lnl_d3_status_1_map[] = {
{"OSSE_SMT1_D3_STS", BIT(7), 0},
{"GBE_D3_STS", BIT(19), 0},
{"ITSS_D3_STS", BIT(23), 0},
@@ -185,7 +185,7 @@ const struct pmc_bit_map lnl_d3_status_1_map[] = {
{}
};
-const struct pmc_bit_map lnl_d3_status_2_map[] = {
+static const struct pmc_bit_map lnl_d3_status_2_map[] = {
{"ESE_D3_STS", BIT(0), 0},
{"CSMERTC_D3_STS", BIT(1), 0},
{"SUSRAM_D3_STS", BIT(2), 0},
@@ -205,7 +205,7 @@ const struct pmc_bit_map lnl_d3_status_2_map[] = {
{}
};
-const struct pmc_bit_map lnl_d3_status_3_map[] = {
+static const struct pmc_bit_map lnl_d3_status_3_map[] = {
{"THC0_D3_STS", BIT(14), 1},
{"THC1_D3_STS", BIT(15), 1},
{"OSSE_SMT3_D3_STS", BIT(21), 0},
@@ -213,14 +213,14 @@ const struct pmc_bit_map lnl_d3_status_3_map[] = {
{}
};
-const struct pmc_bit_map lnl_vnn_req_status_0_map[] = {
+static const struct pmc_bit_map lnl_vnn_req_status_0_map[] = {
{"LPSS_VNN_REQ_STS", BIT(3), 1},
{"OSSE_VNN_REQ_STS", BIT(15), 1},
{"ESPISPI_VNN_REQ_STS", BIT(18), 1},
{}
};
-const struct pmc_bit_map lnl_vnn_req_status_1_map[] = {
+static const struct pmc_bit_map lnl_vnn_req_status_1_map[] = {
{"NPK_VNN_REQ_STS", BIT(4), 1},
{"OSSE_SMT1_VNN_REQ_STS", BIT(7), 1},
{"DFXAGG_VNN_REQ_STS", BIT(8), 0},
@@ -232,7 +232,7 @@ const struct pmc_bit_map lnl_vnn_req_status_1_map[] = {
{}
};
-const struct pmc_bit_map lnl_vnn_req_status_2_map[] = {
+static const struct pmc_bit_map lnl_vnn_req_status_2_map[] = {
{"eSE_VNN_REQ_STS", BIT(0), 1},
{"CSMERTC_VNN_REQ_STS", BIT(1), 1},
{"CSE_VNN_REQ_STS", BIT(4), 1},
@@ -249,14 +249,14 @@ const struct pmc_bit_map lnl_vnn_req_status_2_map[] = {
{}
};
-const struct pmc_bit_map lnl_vnn_req_status_3_map[] = {
+static const struct pmc_bit_map lnl_vnn_req_status_3_map[] = {
{"DISP_SHIM_VNN_REQ_STS", BIT(2), 0},
{"DTS0_VNN_REQ_STS", BIT(7), 0},
{"GPIOCOM5_VNN_REQ_STS", BIT(11), 2},
{}
};
-const struct pmc_bit_map lnl_vnn_misc_status_map[] = {
+static const struct pmc_bit_map lnl_vnn_misc_status_map[] = {
{"CPU_C10_REQ_STS", BIT(0), 0},
{"TS_OFF_REQ_STS", BIT(1), 0},
{"PNDE_MET_REQ_STS", BIT(2), 1},
@@ -292,7 +292,7 @@ const struct pmc_bit_map lnl_vnn_misc_status_map[] = {
{}
};
-const struct pmc_bit_map lnl_clocksource_status_map[] = {
+static const struct pmc_bit_map lnl_clocksource_status_map[] = {
{"AON2_OFF_STS", BIT(0), 0},
{"AON3_OFF_STS", BIT(1), 1},
{"AON4_OFF_STS", BIT(2), 1},
@@ -317,7 +317,7 @@ const struct pmc_bit_map lnl_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map lnl_signal_status_map[] = {
+static const struct pmc_bit_map lnl_signal_status_map[] = {
{"LSX_Wake0_STS", BIT(0), 0},
{"LSX_Wake1_STS", BIT(1), 0},
{"LSX_Wake2_STS", BIT(2), 0},
@@ -337,7 +337,7 @@ const struct pmc_bit_map lnl_signal_status_map[] = {
{}
};
-const struct pmc_bit_map lnl_rsc_status_map[] = {
+static const struct pmc_bit_map lnl_rsc_status_map[] = {
{"Memory", 0, 1},
{"PSF0", 0, 1},
{"PSF4", 0, 1},
@@ -349,7 +349,7 @@ const struct pmc_bit_map lnl_rsc_status_map[] = {
{}
};
-const struct pmc_bit_map *lnl_lpm_maps[] = {
+static const struct pmc_bit_map *lnl_lpm_maps[] = {
lnl_clocksource_status_map,
lnl_power_gating_status_0_map,
lnl_power_gating_status_1_map,
@@ -367,7 +367,7 @@ const struct pmc_bit_map *lnl_lpm_maps[] = {
NULL
};
-const struct pmc_bit_map *lnl_blk_maps[] = {
+static const struct pmc_bit_map *lnl_blk_maps[] = {
lnl_power_gating_status_0_map,
lnl_power_gating_status_1_map,
lnl_power_gating_status_2_map,
@@ -386,7 +386,7 @@ const struct pmc_bit_map *lnl_blk_maps[] = {
NULL
};
-const struct pmc_bit_map lnl_pfear_map[] = {
+static const struct pmc_bit_map lnl_pfear_map[] = {
{"PMC_0", BIT(0)},
{"FUSE_OSSE", BIT(1)},
{"ESPISPI", BIT(2)},
@@ -498,12 +498,12 @@ const struct pmc_bit_map lnl_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_lnl_pfear_map[] = {
+static const struct pmc_bit_map *ext_lnl_pfear_map[] = {
lnl_pfear_map,
NULL
};
-const struct pmc_reg_map lnl_socm_reg_map = {
+static const struct pmc_reg_map lnl_socm_reg_map = {
.pfear_sts = ext_lnl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
@@ -550,22 +550,15 @@ static int lnl_resume(struct pmc_dev *pmcdev)
return cnl_resume(pmcdev);
}
-int lnl_core_init(struct pmc_dev *pmcdev)
+static int lnl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
{
- int ret;
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
-
lnl_d3_fixup();
-
- pmcdev->suspend = cnl_suspend;
- pmcdev->resume = lnl_resume;
-
- pmc->map = &lnl_socm_reg_map;
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
-
- pmc_core_get_low_power_modes(pmcdev);
-
- return 0;
+ return generic_core_init(pmcdev, pmc_dev_info);
}
+
+struct pmc_dev_info lnl_pmc_dev = {
+ .map = &lnl_socm_reg_map,
+ .suspend = cnl_suspend,
+ .resume = lnl_resume,
+ .init = lnl_core_init,
+};
diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c
index 02949fed76e9..8862829694a7 100644
--- a/drivers/platform/x86/intel/pmc/mtl.c
+++ b/drivers/platform/x86/intel/pmc/mtl.c
@@ -102,12 +102,12 @@ const struct pmc_bit_map mtl_socm_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_mtl_socm_pfear_map[] = {
+static const struct pmc_bit_map *ext_mtl_socm_pfear_map[] = {
mtl_socm_pfear_map,
NULL
};
-const struct pmc_bit_map mtl_socm_ltr_show_map[] = {
+static const struct pmc_bit_map mtl_socm_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
{"SATA", CNP_PMC_LTR_SATA},
@@ -141,7 +141,7 @@ const struct pmc_bit_map mtl_socm_ltr_show_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_clocksource_status_map[] = {
+static const struct pmc_bit_map mtl_socm_clocksource_status_map[] = {
{"AON2_OFF_STS", BIT(0)},
{"AON3_OFF_STS", BIT(1)},
{"AON4_OFF_STS", BIT(2)},
@@ -167,7 +167,7 @@ const struct pmc_bit_map mtl_socm_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_power_gating_status_0_map[] = {
+static const struct pmc_bit_map mtl_socm_power_gating_status_0_map[] = {
{"PMC_PGD0_PG_STS", BIT(0)},
{"DMI_PGD0_PG_STS", BIT(1)},
{"ESPISPI_PGD0_PG_STS", BIT(2)},
@@ -203,7 +203,7 @@ const struct pmc_bit_map mtl_socm_power_gating_status_0_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_power_gating_status_1_map[] = {
+static const struct pmc_bit_map mtl_socm_power_gating_status_1_map[] = {
{"USBR0_PGD0_PG_STS", BIT(0)},
{"SUSRAM_PGD0_PG_STS", BIT(1)},
{"SMT1_PGD0_PG_STS", BIT(2)},
@@ -239,7 +239,7 @@ const struct pmc_bit_map mtl_socm_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_power_gating_status_2_map[] = {
+static const struct pmc_bit_map mtl_socm_power_gating_status_2_map[] = {
{"PSF8_PGD0_PG_STS", BIT(0)},
{"FIA_PGD0_PG_STS", BIT(1)},
{"SOC_D2D_PGD1_PG_STS", BIT(2)},
@@ -291,7 +291,7 @@ const struct pmc_bit_map mtl_socm_d3_status_1_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_d3_status_2_map[] = {
+static const struct pmc_bit_map mtl_socm_d3_status_2_map[] = {
{"GNA_D3_STS", BIT(0)},
{"CSMERTC_D3_STS", BIT(1)},
{"SUSRAM_D3_STS", BIT(2)},
@@ -310,7 +310,7 @@ const struct pmc_bit_map mtl_socm_d3_status_2_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_d3_status_3_map[] = {
+static const struct pmc_bit_map mtl_socm_d3_status_3_map[] = {
{"ESE_D3_STS", BIT(2)},
{"GBETSN_D3_STS", BIT(13)},
{"THC0_D3_STS", BIT(14)},
@@ -353,7 +353,7 @@ const struct pmc_bit_map mtl_socm_vnn_req_status_2_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_vnn_req_status_3_map[] = {
+static const struct pmc_bit_map mtl_socm_vnn_req_status_3_map[] = {
{"ESE_VNN_REQ_STS", BIT(2)},
{"DTS0_VNN_REQ_STS", BIT(7)},
{"GPIOCOM5_VNN_REQ_STS", BIT(11)},
@@ -432,7 +432,7 @@ const struct pmc_bit_map mtl_socm_signal_status_map[] = {
{}
};
-const struct pmc_bit_map *mtl_socm_lpm_maps[] = {
+static const struct pmc_bit_map *mtl_socm_lpm_maps[] = {
mtl_socm_clocksource_status_map,
mtl_socm_power_gating_status_0_map,
mtl_socm_power_gating_status_1_map,
@@ -476,7 +476,7 @@ const struct pmc_reg_map mtl_socm_reg_map = {
.lpm_reg_index = MTL_LPM_REG_INDEX,
};
-const struct pmc_bit_map mtl_ioep_pfear_map[] = {
+static const struct pmc_bit_map mtl_ioep_pfear_map[] = {
{"PMC_0", BIT(0)},
{"OPI", BIT(1)},
{"TCSS", BIT(2)},
@@ -563,12 +563,12 @@ const struct pmc_bit_map mtl_ioep_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_mtl_ioep_pfear_map[] = {
+static const struct pmc_bit_map *ext_mtl_ioep_pfear_map[] = {
mtl_ioep_pfear_map,
NULL
};
-const struct pmc_bit_map mtl_ioep_ltr_show_map[] = {
+static const struct pmc_bit_map mtl_ioep_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
{"SATA", CNP_PMC_LTR_SATA},
@@ -600,7 +600,7 @@ const struct pmc_bit_map mtl_ioep_ltr_show_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_clocksource_status_map[] = {
+static const struct pmc_bit_map mtl_ioep_clocksource_status_map[] = {
{"AON2_OFF_STS", BIT(0)},
{"AON3_OFF_STS", BIT(1)},
{"AON4_OFF_STS", BIT(2)},
@@ -623,7 +623,7 @@ const struct pmc_bit_map mtl_ioep_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_power_gating_status_0_map[] = {
+static const struct pmc_bit_map mtl_ioep_power_gating_status_0_map[] = {
{"PMC_PGD0_PG_STS", BIT(0)},
{"DMI_PGD0_PG_STS", BIT(1)},
{"TCSS_PGD0_PG_STS", BIT(2)},
@@ -650,7 +650,7 @@ const struct pmc_bit_map mtl_ioep_power_gating_status_0_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_power_gating_status_1_map[] = {
+static const struct pmc_bit_map mtl_ioep_power_gating_status_1_map[] = {
{"PSF9_PGD0_PG_STS", BIT(0)},
{"MPFPW4_PGD0_PG_STS", BIT(1)},
{"SBR0_PGD0_PG_STS", BIT(8)},
@@ -668,7 +668,7 @@ const struct pmc_bit_map mtl_ioep_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_power_gating_status_2_map[] = {
+static const struct pmc_bit_map mtl_ioep_power_gating_status_2_map[] = {
{"FIA_PGD0_PG_STS", BIT(1)},
{"FIA_P_PGD0_PG_STS", BIT(3)},
{"TAM_PGD0_PG_STS", BIT(4)},
@@ -680,7 +680,7 @@ const struct pmc_bit_map mtl_ioep_power_gating_status_2_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_d3_status_0_map[] = {
+static const struct pmc_bit_map mtl_ioep_d3_status_0_map[] = {
{"SPF_D3_STS", BIT(0)},
{"SPA_D3_STS", BIT(12)},
{"SPB_D3_STS", BIT(13)},
@@ -691,43 +691,43 @@ const struct pmc_bit_map mtl_ioep_d3_status_0_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_d3_status_1_map[] = {
+static const struct pmc_bit_map mtl_ioep_d3_status_1_map[] = {
{"GBETSN1_D3_STS", BIT(14)},
{"P2S_D3_STS", BIT(24)},
{}
};
-const struct pmc_bit_map mtl_ioep_d3_status_2_map[] = {
+static const struct pmc_bit_map mtl_ioep_d3_status_2_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_d3_status_3_map[] = {
+static const struct pmc_bit_map mtl_ioep_d3_status_3_map[] = {
{"GBETSN_D3_STS", BIT(13)},
{"ACE_D3_STS", BIT(23)},
{}
};
-const struct pmc_bit_map mtl_ioep_vnn_req_status_0_map[] = {
+static const struct pmc_bit_map mtl_ioep_vnn_req_status_0_map[] = {
{"FIA_VNN_REQ_STS", BIT(17)},
{}
};
-const struct pmc_bit_map mtl_ioep_vnn_req_status_1_map[] = {
+static const struct pmc_bit_map mtl_ioep_vnn_req_status_1_map[] = {
{"DFXAGG_VNN_REQ_STS", BIT(8)},
{}
};
-const struct pmc_bit_map mtl_ioep_vnn_req_status_2_map[] = {
+static const struct pmc_bit_map mtl_ioep_vnn_req_status_2_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_vnn_req_status_3_map[] = {
+static const struct pmc_bit_map mtl_ioep_vnn_req_status_3_map[] = {
{"DTS0_VNN_REQ_STS", BIT(7)},
{"DISP_VNN_REQ_STS", BIT(19)},
{}
};
-const struct pmc_bit_map mtl_ioep_vnn_misc_status_map[] = {
+static const struct pmc_bit_map mtl_ioep_vnn_misc_status_map[] = {
{"CPU_C10_REQ_STS", BIT(0)},
{"TS_OFF_REQ_STS", BIT(1)},
{"PNDE_MET_REQ_STS", BIT(2)},
@@ -762,7 +762,7 @@ const struct pmc_bit_map mtl_ioep_vnn_misc_status_map[] = {
{}
};
-const struct pmc_bit_map *mtl_ioep_lpm_maps[] = {
+static const struct pmc_bit_map *mtl_ioep_lpm_maps[] = {
mtl_ioep_clocksource_status_map,
mtl_ioep_power_gating_status_0_map,
mtl_ioep_power_gating_status_1_map,
@@ -800,7 +800,7 @@ const struct pmc_reg_map mtl_ioep_reg_map = {
.lpm_reg_index = MTL_LPM_REG_INDEX,
};
-const struct pmc_bit_map mtl_ioem_pfear_map[] = {
+static const struct pmc_bit_map mtl_ioem_pfear_map[] = {
{"PMC_0", BIT(0)},
{"OPI", BIT(1)},
{"TCSS", BIT(2)},
@@ -887,12 +887,12 @@ const struct pmc_bit_map mtl_ioem_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_mtl_ioem_pfear_map[] = {
+static const struct pmc_bit_map *ext_mtl_ioem_pfear_map[] = {
mtl_ioem_pfear_map,
NULL
};
-const struct pmc_bit_map mtl_ioem_power_gating_status_1_map[] = {
+static const struct pmc_bit_map mtl_ioem_power_gating_status_1_map[] = {
{"PSF9_PGD0_PG_STS", BIT(0)},
{"MPFPW4_PGD0_PG_STS", BIT(1)},
{"SBR0_PGD0_PG_STS", BIT(8)},
@@ -909,7 +909,7 @@ const struct pmc_bit_map mtl_ioem_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map *mtl_ioem_lpm_maps[] = {
+static const struct pmc_bit_map *mtl_ioem_lpm_maps[] = {
mtl_ioep_clocksource_status_map,
mtl_ioep_power_gating_status_0_map,
mtl_ioem_power_gating_status_1_map,
@@ -927,7 +927,7 @@ const struct pmc_bit_map *mtl_ioem_lpm_maps[] = {
NULL
};
-const struct pmc_reg_map mtl_ioem_reg_map = {
+static const struct pmc_reg_map mtl_ioem_reg_map = {
.regmap_length = MTL_IOE_PMC_MMIO_REG_LEN,
.pfear_sts = ext_mtl_ioem_pfear_map,
.ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
@@ -990,39 +990,18 @@ static int mtl_resume(struct pmc_dev *pmcdev)
return cnl_resume(pmcdev);
}
-int mtl_core_init(struct pmc_dev *pmcdev)
+static int mtl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
- int ret;
- int func = 2;
- bool ssram_init = true;
-
mtl_d3_fixup();
-
- pmcdev->suspend = cnl_suspend;
- pmcdev->resume = mtl_resume;
- pmcdev->regmap_list = mtl_pmc_info_list;
-
- /*
- * If ssram init fails use legacy method to at least get the
- * primary PMC
- */
- ret = pmc_core_ssram_init(pmcdev, func);
- if (ret) {
- ssram_init = false;
- dev_warn(&pmcdev->pdev->dev,
- "ssram init failed, %d, using legacy init\n", ret);
- pmc->map = &mtl_socm_reg_map;
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
- }
-
- pmc_core_get_low_power_modes(pmcdev);
- pmc_core_punit_pmt_init(pmcdev, MTL_PMT_DMU_GUID);
-
- if (ssram_init)
- return pmc_core_ssram_get_lpm_reqs(pmcdev);
-
- return 0;
+ return generic_core_init(pmcdev, pmc_dev_info);
}
+
+struct pmc_dev_info mtl_pmc_dev = {
+ .pci_func = 2,
+ .dmu_guid = MTL_PMT_DMU_GUID,
+ .regmap_list = mtl_pmc_info_list,
+ .map = &mtl_socm_reg_map,
+ .suspend = cnl_suspend,
+ .resume = mtl_resume,
+ .init = mtl_core_init,
+};
diff --git a/drivers/platform/x86/intel/pmc/ptl.c b/drivers/platform/x86/intel/pmc/ptl.c
new file mode 100644
index 000000000000..394515af60d6
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/ptl.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains platform specific structure definitions
+ * and init function used by Panther Lake PCH.
+ *
+ * Copyright (c) 2025, Intel Corporation.
+ */
+
+#include <linux/pci.h>
+
+#include "core.h"
+
+static const struct pmc_bit_map ptl_pcdp_pfear_map[] = {
+ {"PMC_0", BIT(0)},
+ {"FUSE_OSSE", BIT(1)},
+ {"ESPISPI", BIT(2)},
+ {"XHCI", BIT(3)},
+ {"SPA", BIT(4)},
+ {"SPB", BIT(5)},
+ {"MPFPW2", BIT(6)},
+ {"GBE", BIT(7)},
+
+ {"SBR16B20", BIT(0)},
+ {"SBR8B20", BIT(1)},
+ {"SBR16B21", BIT(2)},
+ {"DBG_SBR16B", BIT(3)},
+ {"OSSE_HOTHAM", BIT(4)},
+ {"D2D_DISP_1", BIT(5)},
+ {"LPSS", BIT(6)},
+ {"LPC", BIT(7)},
+
+ {"SMB", BIT(0)},
+ {"ISH", BIT(1)},
+ {"SBR16B2", BIT(2)},
+ {"NPK_0", BIT(3)},
+ {"D2D_NOC_1", BIT(4)},
+ {"SBR8B2", BIT(5)},
+ {"FUSE", BIT(6)},
+ {"SBR16B0", BIT(7)},
+
+ {"PSF0", BIT(0)},
+ {"XDCI", BIT(1)},
+ {"EXI", BIT(2)},
+ {"CSE", BIT(3)},
+ {"KVMCC", BIT(4)},
+ {"PMT", BIT(5)},
+ {"CLINK", BIT(6)},
+ {"PTIO", BIT(7)},
+
+ {"USBR0", BIT(0)},
+ {"SUSRAM", BIT(1)},
+ {"SMT1", BIT(2)},
+ {"MPFPW1", BIT(3)},
+ {"SMS2", BIT(4)},
+ {"SMS1", BIT(5)},
+ {"CSMERTC", BIT(6)},
+ {"CSMEPSF", BIT(7)},
+
+ {"D2D_NOC_0", BIT(0)},
+ {"ESE", BIT(1)},
+ {"P2SB8B", BIT(2)},
+ {"SBR16B7", BIT(3)},
+ {"SBR16B3", BIT(4)},
+ {"OSSE_SMT1", BIT(5)},
+ {"D2D_DISP", BIT(6)},
+ {"DBG_SBR", BIT(7)},
+
+ {"U3FPW1", BIT(0)},
+ {"FIA_X", BIT(1)},
+ {"PSF4", BIT(2)},
+ {"CNVI", BIT(3)},
+ {"UFSX2", BIT(4)},
+ {"ENDBG", BIT(5)},
+ {"DBC", BIT(6)},
+ {"FIA_PG", BIT(7)},
+
+ {"D2D_IPU", BIT(0)},
+ {"NPK1", BIT(1)},
+ {"FIACPCB_X", BIT(2)},
+ {"SBR8B4", BIT(3)},
+ {"DBG_PSF", BIT(4)},
+ {"PSF6", BIT(5)},
+ {"UFSPW1", BIT(6)},
+ {"FIA_U", BIT(7)},
+
+ {"PSF8", BIT(0)},
+ {"SBR16B4", BIT(1)},
+ {"SBR16B5", BIT(2)},
+ {"FIACPCB_U", BIT(3)},
+ {"TAM", BIT(4)},
+ {"D2D_NOC_2", BIT(5)},
+ {"TBTLSX", BIT(6)},
+ {"THC0", BIT(7)},
+
+ {"THC1", BIT(0)},
+ {"PMC_1", BIT(1)},
+ {"SBR8B1", BIT(2)},
+ {"TCSS", BIT(3)},
+ {"DISP_PGA", BIT(4)},
+ {"SBR16B1", BIT(5)},
+ {"SBRG", BIT(6)},
+ {"PSF5", BIT(7)},
+
+ {"P2SB16B", BIT(0)},
+ {"ACE_0", BIT(1)},
+ {"ACE_1", BIT(2)},
+ {"ACE_2", BIT(3)},
+ {"ACE_3", BIT(4)},
+ {"ACE_4", BIT(5)},
+ {"ACE_5", BIT(6)},
+ {"ACE_6", BIT(7)},
+
+ {"ACE_7", BIT(0)},
+ {"ACE_8", BIT(1)},
+ {"ACE_9", BIT(2)},
+ {"ACE_10", BIT(3)},
+ {"FIACPCB_PG", BIT(4)},
+ {"SBR16B6", BIT(5)},
+ {"OSSE", BIT(6)},
+ {"SBR8B0", BIT(7)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_ptl_pcdp_pfear_map[] = {
+ ptl_pcdp_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map ptl_pcdp_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"SOUTHPORT_F", ADL_PMC_LTR_SPF},
+ {"ME", CNP_PMC_LTR_ME},
+ {"SATA1", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"SATA2", PTL_PMC_LTR_SATA2},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
+ {"SOUTHPORT_G", MTL_PMC_LTR_SPG},
+ {"ESE", MTL_PMC_LTR_ESE},
+ {"IOE_PMC", MTL_PMC_LTR_IOE_PMC},
+ {"DMI3", ARL_PMC_LTR_DMI3},
+ {"OSSE", LNL_PMC_LTR_OSSE},
+
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", PTL_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", PTL_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_clocksource_status_map[] = {
+ {"AON2_OFF_STS", BIT(0), 1},
+ {"AON3_OFF_STS", BIT(1), 0},
+ {"AON4_OFF_STS", BIT(2), 1},
+ {"AON5_OFF_STS", BIT(3), 1},
+ {"AON1_OFF_STS", BIT(4), 0},
+ {"XTAL_LVM_OFF_STS", BIT(5), 0},
+ {"MPFPW1_0_PLL_OFF_STS", BIT(6), 1},
+ {"USB3_PLL_OFF_STS", BIT(8), 1},
+ {"AON3_SPL_OFF_STS", BIT(9), 1},
+ {"MPFPW2_0_PLL_OFF_STS", BIT(12), 1},
+ {"XTAL_AGGR_OFF_STS", BIT(17), 1},
+ {"USB2_PLL_OFF_STS", BIT(18), 0},
+ {"SAF_PLL_OFF_STS", BIT(19), 1},
+ {"SE_TCSS_PLL_OFF_STS", BIT(20), 1},
+ {"DDI_PLL_OFF_STS", BIT(21), 1},
+ {"FILTER_PLL_OFF_STS", BIT(22), 1},
+ {"ACE_PLL_OFF_STS", BIT(24), 0},
+ {"FABRIC_PLL_OFF_STS", BIT(25), 1},
+ {"SOC_PLL_OFF_STS", BIT(26), 1},
+ {"REF_PLL_OFF_STS", BIT(28), 1},
+ {"IMG_PLL_OFF_STS", BIT(29), 1},
+ {"RTC_PLL_OFF_STS", BIT(31), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_power_gating_status_0_map[] = {
+ {"PMC_PGD0_PG_STS", BIT(0), 0},
+ {"FUSE_OSSE_PGD0_PG_STS", BIT(1), 0},
+ {"ESPISPI_PGD0_PG_STS", BIT(2), 0},
+ {"XHCI_PGD0_PG_STS", BIT(3), 1},
+ {"SPA_PGD0_PG_STS", BIT(4), 1},
+ {"SPB_PGD0_PG_STS", BIT(5), 1},
+ {"MPFPW2_PGD0_PG_STS", BIT(6), 0},
+ {"GBE_PGD0_PG_STS", BIT(7), 1},
+ {"SBR16B20_PGD0_PG_STS", BIT(8), 0},
+ {"SBR8B20_PGD0_PG_STS", BIT(9), 0},
+ {"SBR16B21_PGD0_PG_STS", BIT(10), 0},
+ {"DBG_PGD0_PG_STS", BIT(11), 0},
+ {"OSSE_HOTHAM_PGD0_PG_STS", BIT(12), 1},
+ {"D2D_DISP_PGD1_PG_STS", BIT(13), 1},
+ {"LPSS_PGD0_PG_STS", BIT(14), 1},
+ {"LPC_PGD0_PG_STS", BIT(15), 0},
+ {"SMB_PGD0_PG_STS", BIT(16), 0},
+ {"ISH_PGD0_PG_STS", BIT(17), 0},
+ {"SBR16B2_PGD0_PG_STS", BIT(18), 0},
+ {"NPK_PGD0_PG_STS", BIT(19), 0},
+ {"D2D_NOC_PGD1_PG_STS", BIT(20), 1},
+ {"SBR8B2_PGD0_PG_STS", BIT(21), 0},
+ {"FUSE_PGD0_PG_STS", BIT(22), 0},
+ {"SBR16B0_PGD0_PG_STS", BIT(23), 0},
+ {"PSF0_PGD0_PG_STS", BIT(24), 0},
+ {"XDCI_PGD0_PG_STS", BIT(25), 1},
+ {"EXI_PGD0_PG_STS", BIT(26), 0},
+ {"CSE_PGD0_PG_STS", BIT(27), 1},
+ {"KVMCC_PGD0_PG_STS", BIT(28), 1},
+ {"PMT_PGD0_PG_STS", BIT(29), 1},
+ {"CLINK_PGD0_PG_STS", BIT(30), 1},
+ {"PTIO_PGD0_PG_STS", BIT(31), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_power_gating_status_1_map[] = {
+ {"USBR0_PGD0_PG_STS", BIT(0), 1},
+ {"SUSRAM_PGD0_PG_STS", BIT(1), 1},
+ {"SMT1_PGD0_PG_STS", BIT(2), 1},
+ {"MPFPW1_PGD0_PG_STS", BIT(3), 0},
+ {"SMS2_PGD0_PG_STS", BIT(4), 1},
+ {"SMS1_PGD0_PG_STS", BIT(5), 1},
+ {"CSMERTC_PGD0_PG_STS", BIT(6), 0},
+ {"CSMEPSF_PGD0_PG_STS", BIT(7), 0},
+ {"D2D_NOC_PGD0_PG_STS", BIT(8), 0},
+ {"ESE_PGD0_PG_STS", BIT(9), 1},
+ {"P2SB8B_PGD0_PG_STS", BIT(10), 1},
+ {"SBR16B7_PGD0_PG_STS", BIT(11), 0},
+ {"SBR16B3_PGD0_PG_STS", BIT(12), 0},
+ {"OSSE_SMT1_PGD0_PG_STS", BIT(13), 1},
+ {"D2D_DISP_PGD0_PG_STS", BIT(14), 1},
+ {"DBG_SBR_PGD0_PG_STS", BIT(15), 0},
+ {"U3FPW1_PGD0_PG_STS", BIT(16), 0},
+ {"FIA_X_PGD0_PG_STS", BIT(17), 0},
+ {"PSF4_PGD0_PG_STS", BIT(18), 0},
+ {"CNVI_PGD0_PG_STS", BIT(19), 0},
+ {"UFSX2_PGD0_PG_STS", BIT(20), 1},
+ {"ENDBG_PGD0_PG_STS", BIT(21), 0},
+ {"DBC_PGD0_PG_STS", BIT(22), 0},
+ {"FIA_PG_PGD0_PG_STS", BIT(23), 0},
+ {"D2D_IPU_PGD0_PG_STS", BIT(24), 1},
+ {"NPK_PGD1_PG_STS", BIT(25), 0},
+ {"FIACPCB_X_PGD0_PG_STS", BIT(26), 0},
+ {"SBR8B4_PGD0_PG_STS", BIT(27), 0},
+ {"DBG_PSF_PGD0_PG_STS", BIT(28), 0},
+ {"PSF6_PGD0_PG_STS", BIT(29), 0},
+ {"UFSPW1_PGD0_PG_STS", BIT(30), 0},
+ {"FIA_U_PGD0_PG_STS", BIT(31), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_power_gating_status_2_map[] = {
+ {"PSF8_PGD0_PG_STS", BIT(0), 0},
+ {"SBR16B4_PGD0_PG_STS", BIT(1), 0},
+ {"SBR16B5_PGD0_PG_STS", BIT(2), 0},
+ {"FIACPCB_U_PGD0_PG_STS", BIT(3), 0},
+ {"TAM_PGD0_PG_STS", BIT(4), 1},
+ {"D2D_NOC_PGD0_PG_STS", BIT(5), 1},
+ {"TBTLSX_PGD0_PG_STS", BIT(6), 1},
+ {"THC0_PGD0_PG_STS", BIT(7), 1},
+ {"THC1_PGD0_PG_STS", BIT(8), 1},
+ {"PMC_PGD1_PG_STS", BIT(9), 0},
+ {"SBR8B1_PGD0_PG_STS", BIT(10), 0},
+ {"TCSS_PGD0_PG_STS", BIT(11), 0},
+ {"DISP_PGA_PGD0_PG_STS", BIT(12), 0},
+ {"SBR16B1_PGD0_PG_STS", BIT(13), 0},
+ {"SBRG_PGD0_PG_STS", BIT(14), 0},
+ {"PSF5_PGD0_PG_STS", BIT(15), 0},
+ {"P2SB16B_PGD0_PG_STS", BIT(16), 1},
+ {"ACE_PGD0_PG_STS", BIT(17), 0},
+ {"ACE_PGD1_PG_STS", BIT(18), 0},
+ {"ACE_PGD2_PG_STS", BIT(19), 0},
+ {"ACE_PGD3_PG_STS", BIT(20), 0},
+ {"ACE_PGD4_PG_STS", BIT(21), 0},
+ {"ACE_PGD5_PG_STS", BIT(22), 0},
+ {"ACE_PGD6_PG_STS", BIT(23), 0},
+ {"ACE_PGD7_PG_STS", BIT(24), 0},
+ {"ACE_PGD8_PG_STS", BIT(25), 0},
+ {"ACE_PGD9_PG_STS", BIT(26), 0},
+ {"ACE_PGD10_PG_STS", BIT(27), 0},
+ {"FIACPCB_PG_PGD0_PG_STS", BIT(28), 0},
+ {"SBR16B6_PGD0_PG_STS", BIT(29), 0},
+ {"OSSE_PGD0_PG_STS", BIT(30), 1},
+ {"SBR8B0_PGD0_PG_STS", BIT(31), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_d3_status_0_map[] = {
+ {"LPSS_D3_STS", BIT(3), 1},
+ {"XDCI_D3_STS", BIT(4), 1},
+ {"XHCI_D3_STS", BIT(5), 1},
+ {"OSSE_D3_STS", BIT(6), 0},
+ {"SPA_D3_STS", BIT(12), 0},
+ {"SPB_D3_STS", BIT(13), 0},
+ {"ESPISPI_D3_STS", BIT(18), 0},
+ {"PSTH_D3_STS", BIT(21), 0},
+ {"OSSE_SMT1_D3_STS", BIT(30), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_d3_status_1_map[] = {
+ {"GBE_D3_STS", BIT(19), 0},
+ {"ITSS_D3_STS", BIT(23), 0},
+ {"CNVI_D3_STS", BIT(27), 0},
+ {"UFSX2_D3_STS", BIT(28), 1},
+ {"OSSE_HOTHAM_D3_STS", BIT(29), 0},
+ {"ESE_D3_STS", BIT(30), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_d3_status_2_map[] = {
+ {"CSMERTC_D3_STS", BIT(1), 0},
+ {"SUSRAM_D3_STS", BIT(2), 0},
+ {"CSE_D3_STS", BIT(4), 0},
+ {"KVMCC_D3_STS", BIT(5), 0},
+ {"USBR0_D3_STS", BIT(6), 0},
+ {"ISH_D3_STS", BIT(7), 0},
+ {"SMT1_D3_STS", BIT(8), 0},
+ {"SMT2_D3_STS", BIT(9), 0},
+ {"SMT3_D3_STS", BIT(10), 0},
+ {"OSSE_SMT2_D3_STS", BIT(12), 0},
+ {"CLINK_D3_STS", BIT(14), 0},
+ {"PTIO_D3_STS", BIT(16), 0},
+ {"PMT_D3_STS", BIT(17), 0},
+ {"SMS1_D3_STS", BIT(18), 0},
+ {"SMS2_D3_STS", BIT(19), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_d3_status_3_map[] = {
+ {"THC0_D3_STS", BIT(14), 1},
+ {"THC1_D3_STS", BIT(15), 1},
+ {"OSSE_SMT3_D3_STS", BIT(18), 0},
+ {"ACE_D3_STS", BIT(23), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_vnn_req_status_0_map[] = {
+ {"LPSS_VNN_REQ_STS", BIT(3), 1},
+ {"OSSE_VNN_REQ_STS", BIT(6), 1},
+ {"ESPISPI_VNN_REQ_STS", BIT(18), 1},
+ {"OSSE_SMT1_VNN_REQ_STS", BIT(30), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_vnn_req_status_1_map[] = {
+ {"NPK_VNN_REQ_STS", BIT(4), 1},
+ {"DFXAGG_VNN_REQ_STS", BIT(8), 0},
+ {"EXI_VNN_REQ_STS", BIT(9), 1},
+ {"P2D_VNN_REQ_STS", BIT(18), 1},
+ {"GBE_VNN_REQ_STS", BIT(19), 1},
+ {"SMB_VNN_REQ_STS", BIT(25), 1},
+ {"LPC_VNN_REQ_STS", BIT(26), 0},
+ {"ESE_VNN_REQ_STS", BIT(30), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_vnn_req_status_2_map[] = {
+ {"CSMERTC_VNN_REQ_STS", BIT(1), 1},
+ {"CSE_VNN_REQ_STS", BIT(4), 1},
+ {"ISH_VNN_REQ_STS", BIT(7), 1},
+ {"SMT1_VNN_REQ_STS", BIT(8), 1},
+ {"CLINK_VNN_REQ_STS", BIT(14), 1},
+ {"SMS1_VNN_REQ_STS", BIT(18), 1},
+ {"SMS2_VNN_REQ_STS", BIT(19), 1},
+ {"GPIOCOM4_VNN_REQ_STS", BIT(20), 1},
+ {"GPIOCOM3_VNN_REQ_STS", BIT(21), 1},
+ {"GPIOCOM1_VNN_REQ_STS", BIT(23), 1},
+ {"GPIOCOM0_VNN_REQ_STS", BIT(24), 1},
+ {"DISP_SHIM_VNN_REQ_STS", BIT(26), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_vnn_req_status_3_map[] = {
+ {"DTS0_VNN_REQ_STS", BIT(7), 0},
+ {"GPIOCOM5_VNN_REQ_STS", BIT(11), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_vnn_misc_status_map[] = {
+ {"CPU_C10_REQ_STS", BIT(0), 0},
+ {"TS_OFF_REQ_STS", BIT(1), 0},
+ {"PNDE_MET_REQ_STS", BIT(2), 1},
+ {"PG5_PMA0_REQ_STS", BIT(3), 0},
+ {"FW_THROTTLE_ALLOWED_REQ_STS", BIT(4), 0},
+ {"VNN_SOC_REQ_STS", BIT(6), 1},
+ {"ISH_VNNAON_REQ_STS", BIT(7), 0},
+ {"D2D_NOC_CFI_QACTIVE_REQ_STS", BIT(8), 1},
+ {"D2D_NOC_GPSB_QACTIVE_REQ_STS", BIT(9), 1},
+ {"D2D_IPU_QACTIVE_REQ_STS", BIT(10), 1},
+ {"PLT_GREATER_REQ_STS", BIT(11), 1},
+ {"ALL_SBR_IDLE_REQ_STS", BIT(12), 0},
+ {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13), 0},
+ {"PM_SYNC_STATES_REQ_STS", BIT(14), 0},
+ {"EA_REQ_STS", BIT(15), 0},
+ {"MPHY_CORE_OFF_REQ_STS", BIT(16), 0},
+ {"BRK_EV_EN_REQ_STS", BIT(17), 0},
+ {"AUTO_DEMO_EN_REQ_STS", BIT(18), 0},
+ {"ITSS_CLK_SRC_REQ_STS", BIT(19), 1},
+ {"ARC_IDLE_REQ_STS", BIT(21), 0},
+ {"PG5_PMA1_REQ_STS", BIT(22), 0},
+ {"FIA_DEEP_PM_REQ_STS", BIT(23), 0},
+ {"XDCI_ATTACHED_REQ_STS", BIT(24), 1},
+ {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25), 0},
+ {"D2D_DISP_DDI_QACTIVE_REQ_STS", BIT(26), 1},
+ {"PRE_WAKE0_REQ_STS", BIT(27), 1},
+ {"PRE_WAKE1_REQ_STS", BIT(28), 1},
+ {"PRE_WAKE2_REQ_STS", BIT(29), 1},
+ {"D2D_DISP_EDP_QACTIVE_REQ_STS", BIT(31), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_signal_status_map[] = {
+ {"LSX_Wake0_STS", BIT(0), 0},
+ {"LSX_Wake1_STS", BIT(1), 0},
+ {"LSX_Wake2_STS", BIT(2), 0},
+ {"LSX_Wake3_STS", BIT(3), 0},
+ {"LSX_Wake4_STS", BIT(4), 0},
+ {"LSX_Wake5_STS", BIT(5), 0},
+ {"LSX_Wake6_STS", BIT(6), 0},
+ {"LSX_Wake7_STS", BIT(7), 0},
+ {"LPSS_Wake0_STS", BIT(8), 1},
+ {"LPSS_Wake1_STS", BIT(9), 1},
+ {"Int_Timer_SS_Wake0_STS", BIT(10), 1},
+ {"Int_Timer_SS_Wake1_STS", BIT(11), 1},
+ {"Int_Timer_SS_Wake2_STS", BIT(12), 1},
+ {"Int_Timer_SS_Wake3_STS", BIT(13), 1},
+ {"Int_Timer_SS_Wake4_STS", BIT(14), 1},
+ {"Int_Timer_SS_Wake5_STS", BIT(15), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_rsc_status_map[] = {
+ {"Memory", 0, 1},
+ {"PSF0", 0, 1},
+ {"PSF4", 0, 1},
+ {"PSF5", 0, 1},
+ {"PSF6", 0, 1},
+ {"PSF8", 0, 1},
+ {"SAF_CFI_LINK", 0, 1},
+ {"SB", 0, 1},
+ {}
+};
+
+static const struct pmc_bit_map *ptl_pcdp_lpm_maps[] = {
+ ptl_pcdp_clocksource_status_map,
+ ptl_pcdp_power_gating_status_0_map,
+ ptl_pcdp_power_gating_status_1_map,
+ ptl_pcdp_power_gating_status_2_map,
+ ptl_pcdp_d3_status_0_map,
+ ptl_pcdp_d3_status_1_map,
+ ptl_pcdp_d3_status_2_map,
+ ptl_pcdp_d3_status_3_map,
+ ptl_pcdp_vnn_req_status_0_map,
+ ptl_pcdp_vnn_req_status_1_map,
+ ptl_pcdp_vnn_req_status_2_map,
+ ptl_pcdp_vnn_req_status_3_map,
+ ptl_pcdp_vnn_misc_status_map,
+ ptl_pcdp_signal_status_map,
+ NULL
+};
+
+static const struct pmc_bit_map *ptl_pcdp_blk_maps[] = {
+ ptl_pcdp_power_gating_status_0_map,
+ ptl_pcdp_power_gating_status_1_map,
+ ptl_pcdp_power_gating_status_2_map,
+ ptl_pcdp_rsc_status_map,
+ ptl_pcdp_vnn_req_status_0_map,
+ ptl_pcdp_vnn_req_status_1_map,
+ ptl_pcdp_vnn_req_status_2_map,
+ ptl_pcdp_vnn_req_status_3_map,
+ ptl_pcdp_d3_status_0_map,
+ ptl_pcdp_d3_status_1_map,
+ ptl_pcdp_d3_status_2_map,
+ ptl_pcdp_d3_status_3_map,
+ ptl_pcdp_clocksource_status_map,
+ ptl_pcdp_vnn_misc_status_map,
+ ptl_pcdp_signal_status_map,
+ NULL
+};
+
+static const struct pmc_reg_map ptl_pcdp_reg_map = {
+ .pfear_sts = ext_ptl_pcdp_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_show_sts = ptl_pcdp_ltr_show_map,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = PTL_PCD_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = LNL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .lpm_num_maps = PTL_LPM_NUM_MAPS,
+ .ltr_ignore_max = LNL_NUM_IP_IGN_ALLOWED,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .etr3_offset = ETR3_OFFSET,
+ .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET,
+ .lpm_priority_offset = MTL_LPM_PRI_OFFSET,
+ .lpm_en_offset = MTL_LPM_EN_OFFSET,
+ .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET,
+ .lpm_sts = ptl_pcdp_lpm_maps,
+ .lpm_status_offset = MTL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
+ .s0ix_blocker_maps = ptl_pcdp_blk_maps,
+ .s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
+};
+
+#define PTL_NPU_PCI_DEV 0xb03e
+#define PTL_IPU_PCI_DEV 0xb05d
+
+/*
+ * Set power state of select devices that do not have drivers to D3
+ * so that they do not block Package C entry.
+ */
+static void ptl_d3_fixup(void)
+{
+ pmc_core_set_device_d3(PTL_IPU_PCI_DEV);
+ pmc_core_set_device_d3(PTL_NPU_PCI_DEV);
+}
+
+static int ptl_resume(struct pmc_dev *pmcdev)
+{
+ ptl_d3_fixup();
+ return cnl_resume(pmcdev);
+}
+
+static int ptl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
+{
+ ptl_d3_fixup();
+ return generic_core_init(pmcdev, pmc_dev_info);
+}
+
+struct pmc_dev_info ptl_pmc_dev = {
+ .map = &ptl_pcdp_reg_map,
+ .suspend = cnl_suspend,
+ .resume = ptl_resume,
+ .init = ptl_core_init,
+};
diff --git a/drivers/platform/x86/intel/pmc/spt.c b/drivers/platform/x86/intel/pmc/spt.c
index ab993a69e33e..b50534aa2cba 100644
--- a/drivers/platform/x86/intel/pmc/spt.c
+++ b/drivers/platform/x86/intel/pmc/spt.c
@@ -8,9 +8,11 @@
*
*/
+#include <linux/pci.h>
+
#include "core.h"
-const struct pmc_bit_map spt_pll_map[] = {
+static const struct pmc_bit_map spt_pll_map[] = {
{"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0},
{"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
{"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
@@ -18,7 +20,7 @@ const struct pmc_bit_map spt_pll_map[] = {
{}
};
-const struct pmc_bit_map spt_mphy_map[] = {
+static const struct pmc_bit_map spt_mphy_map[] = {
{"MPHY CORE LANE 0", SPT_PMC_BIT_MPHY_LANE0},
{"MPHY CORE LANE 1", SPT_PMC_BIT_MPHY_LANE1},
{"MPHY CORE LANE 2", SPT_PMC_BIT_MPHY_LANE2},
@@ -38,7 +40,7 @@ const struct pmc_bit_map spt_mphy_map[] = {
{}
};
-const struct pmc_bit_map spt_pfear_map[] = {
+static const struct pmc_bit_map spt_pfear_map[] = {
{"PMC", SPT_PMC_BIT_PMC},
{"OPI-DMI", SPT_PMC_BIT_OPI},
{"SPI / eSPI", SPT_PMC_BIT_SPI},
@@ -82,7 +84,7 @@ const struct pmc_bit_map spt_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_spt_pfear_map[] = {
+static const struct pmc_bit_map *ext_spt_pfear_map[] = {
/*
* Check intel_pmc_core_ids[] users of spt_reg_map for
* a list of core SoCs using this.
@@ -91,7 +93,7 @@ const struct pmc_bit_map *ext_spt_pfear_map[] = {
NULL
};
-const struct pmc_bit_map spt_ltr_show_map[] = {
+static const struct pmc_bit_map spt_ltr_show_map[] = {
{"SOUTHPORT_A", SPT_PMC_LTR_SPA},
{"SOUTHPORT_B", SPT_PMC_LTR_SPB},
{"SATA", SPT_PMC_LTR_SATA},
@@ -116,7 +118,7 @@ const struct pmc_bit_map spt_ltr_show_map[] = {
{}
};
-const struct pmc_reg_map spt_reg_map = {
+static const struct pmc_reg_map spt_reg_map = {
.pfear_sts = ext_spt_pfear_map,
.mphy_sts = spt_mphy_map,
.pll_sts = spt_pll_map,
@@ -134,18 +136,25 @@ const struct pmc_reg_map spt_reg_map = {
.pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
};
-int spt_core_init(struct pmc_dev *pmcdev)
-{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
- int ret;
-
- pmc->map = &spt_reg_map;
-
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
+static const struct pci_device_id spt_pmc_pci_id[] = {
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
+ { }
+};
- pmc_core_get_low_power_modes(pmcdev);
+static int spt_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
+{
+ /*
+ * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
+ * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
+ * in this case.
+ */
+ if (!pci_dev_present(spt_pmc_pci_id))
+ return generic_core_init(pmcdev, &cnp_pmc_dev);
- return ret;
+ return generic_core_init(pmcdev, pmc_dev_info);
}
+
+struct pmc_dev_info spt_pmc_dev = {
+ .map = &spt_reg_map,
+ .init = spt_core_init,
+};
diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c
index e0580de18077..02e731ed3391 100644
--- a/drivers/platform/x86/intel/pmc/tgl.c
+++ b/drivers/platform/x86/intel/pmc/tgl.c
@@ -18,7 +18,7 @@ enum pch_type {
PCH_LP
};
-const struct pmc_bit_map tgl_pfear_map[] = {
+static const struct pmc_bit_map tgl_pfear_map[] = {
{"PSF9", BIT(0)},
{"RES_66", BIT(1)},
{"RES_67", BIT(2)},
@@ -29,7 +29,7 @@ const struct pmc_bit_map tgl_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_tgl_pfear_map[] = {
+static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
/*
* Check intel_pmc_core_ids[] users of tgl_reg_map for
* a list of core SoCs using this.
@@ -39,7 +39,7 @@ const struct pmc_bit_map *ext_tgl_pfear_map[] = {
NULL
};
-const struct pmc_bit_map tgl_clocksource_status_map[] = {
+static const struct pmc_bit_map tgl_clocksource_status_map[] = {
{"USB2PLL_OFF_STS", BIT(18)},
{"PCIe/USB3.1_Gen2PLL_OFF_STS", BIT(19)},
{"PCIe_Gen3PLL_OFF_STS", BIT(20)},
@@ -55,7 +55,7 @@ const struct pmc_bit_map tgl_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map tgl_power_gating_status_map[] = {
+static const struct pmc_bit_map tgl_power_gating_status_map[] = {
{"CSME_PG_STS", BIT(0)},
{"SATA_PG_STS", BIT(1)},
{"xHCI_PG_STS", BIT(2)},
@@ -83,7 +83,7 @@ const struct pmc_bit_map tgl_power_gating_status_map[] = {
{}
};
-const struct pmc_bit_map tgl_d3_status_map[] = {
+static const struct pmc_bit_map tgl_d3_status_map[] = {
{"ADSP_D3_STS", BIT(0)},
{"SATA_D3_STS", BIT(1)},
{"xHCI0_D3_STS", BIT(2)},
@@ -98,7 +98,7 @@ const struct pmc_bit_map tgl_d3_status_map[] = {
{}
};
-const struct pmc_bit_map tgl_vnn_req_status_map[] = {
+static const struct pmc_bit_map tgl_vnn_req_status_map[] = {
{"GPIO_COM0_VNN_REQ_STS", BIT(1)},
{"GPIO_COM1_VNN_REQ_STS", BIT(2)},
{"GPIO_COM2_VNN_REQ_STS", BIT(3)},
@@ -123,7 +123,7 @@ const struct pmc_bit_map tgl_vnn_req_status_map[] = {
{}
};
-const struct pmc_bit_map tgl_vnn_misc_status_map[] = {
+static const struct pmc_bit_map tgl_vnn_misc_status_map[] = {
{"CPU_C10_REQ_STS_0", BIT(0)},
{"PCIe_LPM_En_REQ_STS_3", BIT(3)},
{"ITH_REQ_STS_5", BIT(5)},
@@ -175,7 +175,7 @@ const struct pmc_bit_map tgl_signal_status_map[] = {
{}
};
-const struct pmc_bit_map *tgl_lpm_maps[] = {
+static const struct pmc_bit_map *tgl_lpm_maps[] = {
tgl_clocksource_status_map,
tgl_power_gating_status_map,
tgl_d3_status_map,
@@ -185,7 +185,7 @@ const struct pmc_bit_map *tgl_lpm_maps[] = {
NULL
};
-const struct pmc_reg_map tgl_reg_map = {
+static const struct pmc_reg_map tgl_reg_map = {
.pfear_sts = ext_tgl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
@@ -210,7 +210,7 @@ const struct pmc_reg_map tgl_reg_map = {
.etr3_offset = ETR3_OFFSET,
};
-const struct pmc_reg_map tgl_h_reg_map = {
+static const struct pmc_reg_map tgl_h_reg_map = {
.pfear_sts = ext_tgl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
@@ -285,35 +285,28 @@ free_acpi_obj:
ACPI_FREE(out_obj);
}
-int tgl_l_core_init(struct pmc_dev *pmcdev)
+static int tgl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
{
- return tgl_core_generic_init(pmcdev, PCH_LP);
-}
-
-int tgl_core_init(struct pmc_dev *pmcdev)
-{
- return tgl_core_generic_init(pmcdev, PCH_H);
-}
-
-int tgl_core_generic_init(struct pmc_dev *pmcdev, int pch_tp)
-{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
int ret;
- if (pch_tp == PCH_H)
- pmc->map = &tgl_h_reg_map;
- else
- pmc->map = &tgl_reg_map;
-
- pmcdev->suspend = cnl_suspend;
- pmcdev->resume = cnl_resume;
-
- ret = get_primary_reg_base(pmc);
+ ret = generic_core_init(pmcdev, pmc_dev_info);
if (ret)
return ret;
- pmc_core_get_low_power_modes(pmcdev);
pmc_core_get_tgl_lpm_reqs(pmcdev->pdev);
-
return 0;
}
+
+struct pmc_dev_info tgl_l_pmc_dev = {
+ .map = &tgl_reg_map,
+ .suspend = cnl_suspend,
+ .resume = cnl_resume,
+ .init = tgl_core_init,
+};
+
+struct pmc_dev_info tgl_pmc_dev = {
+ .map = &tgl_h_reg_map,
+ .suspend = cnl_suspend,
+ .resume = cnl_resume,
+ .init = tgl_core_init,
+};
diff --git a/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c b/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c
new file mode 100644
index 000000000000..89153afd7015
--- /dev/null
+++ b/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lenovo Super Hotkey Utility WMI extras driver for Ideapad laptop
+ *
+ * Copyright (C) 2025 Lenovo
+ */
+
+#include <linux/cleanup.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+
+/* Lenovo Super Hotkey WMI GUIDs */
+#define LUD_WMI_METHOD_GUID "CE6C0974-0407-4F50-88BA-4FC3B6559AD8"
+
+/* Lenovo Utility Data WMI method_id */
+#define WMI_LUD_GET_SUPPORT 1
+#define WMI_LUD_SET_FEATURE 2
+
+#define WMI_LUD_GET_MICMUTE_LED_VER 20
+#define WMI_LUD_GET_AUDIOMUTE_LED_VER 26
+
+#define WMI_LUD_SUPPORT_MICMUTE_LED_VER 25
+#define WMI_LUD_SUPPORT_AUDIOMUTE_LED_VER 27
+
+/* Input parameters to mute/unmute audio LED and Mic LED */
+struct wmi_led_args {
+ u8 id;
+ u8 subid;
+ u16 value;
+};
+
+/* Values of input parameters to SetFeature of audio LED and Mic LED */
+enum hotkey_set_feature {
+ MIC_MUTE_LED_ON = 1,
+ MIC_MUTE_LED_OFF = 2,
+ AUDIO_MUTE_LED_ON = 4,
+ AUDIO_MUTE_LED_OFF = 5,
+};
+
+#define LSH_ACPI_LED_MAX 2
+
+struct lenovo_super_hotkey_wmi_private {
+ struct led_classdev cdev[LSH_ACPI_LED_MAX];
+ struct wmi_device *led_wdev;
+};
+
+enum mute_led_type {
+ MIC_MUTE,
+ AUDIO_MUTE,
+};
+
+static int lsh_wmi_mute_led_set(enum mute_led_type led_type, struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+
+{
+ struct lenovo_super_hotkey_wmi_private *wpriv = container_of(led_cdev,
+ struct lenovo_super_hotkey_wmi_private, cdev[led_type]);
+ struct wmi_led_args led_arg = {0, 0, 0};
+ struct acpi_buffer input;
+ acpi_status status;
+
+ switch (led_type) {
+ case MIC_MUTE:
+ led_arg.id = brightness == LED_ON ? MIC_MUTE_LED_ON : MIC_MUTE_LED_OFF;
+ break;
+ case AUDIO_MUTE:
+ led_arg.id = brightness == LED_ON ? AUDIO_MUTE_LED_ON : AUDIO_MUTE_LED_OFF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ input.length = sizeof(led_arg);
+ input.pointer = &led_arg;
+ status = wmidev_evaluate_method(wpriv->led_wdev, 0, WMI_LUD_SET_FEATURE, &input, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return 0;
+}
+
+static int lsh_wmi_audiomute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+
+{
+ return lsh_wmi_mute_led_set(AUDIO_MUTE, led_cdev, brightness);
+}
+
+static int lsh_wmi_micmute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ return lsh_wmi_mute_led_set(MIC_MUTE, led_cdev, brightness);
+}
+
+static int lenovo_super_hotkey_wmi_led_init(enum mute_led_type led_type, struct device *dev)
+{
+ struct lenovo_super_hotkey_wmi_private *wpriv = dev_get_drvdata(dev);
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer input;
+ int led_version, err = 0;
+ unsigned int wmiarg;
+ acpi_status status;
+
+ switch (led_type) {
+ case MIC_MUTE:
+ wmiarg = WMI_LUD_GET_MICMUTE_LED_VER;
+ break;
+ case AUDIO_MUTE:
+ wmiarg = WMI_LUD_GET_AUDIOMUTE_LED_VER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ input.length = sizeof(wmiarg);
+ input.pointer = &wmiarg;
+ status = wmidev_evaluate_method(wpriv->led_wdev, 0, WMI_LUD_GET_SUPPORT, &input, &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ union acpi_object *obj __free(kfree) = output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ led_version = obj->integer.value;
+ else
+ return -EIO;
+
+ wpriv->cdev[led_type].max_brightness = LED_ON;
+ wpriv->cdev[led_type].flags = LED_CORE_SUSPENDRESUME;
+
+ switch (led_type) {
+ case MIC_MUTE:
+ if (led_version != WMI_LUD_SUPPORT_MICMUTE_LED_VER)
+ return -EIO;
+
+ wpriv->cdev[led_type].name = "platform::micmute";
+ wpriv->cdev[led_type].brightness_set_blocking = &lsh_wmi_micmute_led_set;
+ wpriv->cdev[led_type].default_trigger = "audio-micmute";
+ break;
+ case AUDIO_MUTE:
+ if (led_version != WMI_LUD_SUPPORT_AUDIOMUTE_LED_VER)
+ return -EIO;
+
+ wpriv->cdev[led_type].name = "platform::mute";
+ wpriv->cdev[led_type].brightness_set_blocking = &lsh_wmi_audiomute_led_set;
+ wpriv->cdev[led_type].default_trigger = "audio-mute";
+ break;
+ default:
+ dev_err(dev, "Unknown LED type %d\n", led_type);
+ return -EINVAL;
+ }
+
+ err = devm_led_classdev_register(dev, &wpriv->cdev[led_type]);
+ if (err < 0) {
+ dev_err(dev, "Could not register mute LED %d : %d\n", led_type, err);
+ return err;
+ }
+ return 0;
+}
+
+static int lenovo_super_hotkey_wmi_leds_setup(struct device *dev)
+{
+ int err;
+
+ err = lenovo_super_hotkey_wmi_led_init(MIC_MUTE, dev);
+ if (err)
+ return err;
+
+ err = lenovo_super_hotkey_wmi_led_init(AUDIO_MUTE, dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int lenovo_super_hotkey_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct lenovo_super_hotkey_wmi_private *wpriv;
+
+ wpriv = devm_kzalloc(&wdev->dev, sizeof(*wpriv), GFP_KERNEL);
+ if (!wpriv)
+ return -ENOMEM;
+
+ dev_set_drvdata(&wdev->dev, wpriv);
+ wpriv->led_wdev = wdev;
+ return lenovo_super_hotkey_wmi_leds_setup(&wdev->dev);
+}
+
+static const struct wmi_device_id lenovo_super_hotkey_wmi_id_table[] = {
+ { LUD_WMI_METHOD_GUID, NULL }, /* Utility data */
+ { }
+};
+
+MODULE_DEVICE_TABLE(wmi, lenovo_super_hotkey_wmi_id_table);
+
+static struct wmi_driver lenovo_wmi_hotkey_utilities_driver = {
+ .driver = {
+ .name = "lenovo_wmi_hotkey_utilities",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS
+ },
+ .id_table = lenovo_super_hotkey_wmi_id_table,
+ .probe = lenovo_super_hotkey_wmi_probe,
+ .no_singleton = true,
+};
+
+module_wmi_driver(lenovo_wmi_hotkey_utilities_driver);
+
+MODULE_AUTHOR("Jackie Dong <dongeg1@lenovo.com>");
+MODULE_DESCRIPTION("Lenovo Super Hotkey Utility WMI extras driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
index a96b215cd2c5..25933cd018d1 100644
--- a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
+++ b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
@@ -219,7 +219,7 @@ static int yt2_1380_fc_serdev_probe(struct serdev_device *serdev)
return 0;
}
-struct serdev_device_driver yt2_1380_fc_serdev_driver = {
+static struct serdev_device_driver yt2_1380_fc_serdev_driver = {
.probe = yt2_1380_fc_serdev_probe,
.driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/platform/x86/samsung-galaxybook.c b/drivers/platform/x86/samsung-galaxybook.c
new file mode 100644
index 000000000000..5878a351993e
--- /dev/null
+++ b/drivers/platform/x86/samsung-galaxybook.c
@@ -0,0 +1,1425 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Samsung Galaxy Book driver
+ *
+ * Copyright (c) 2025 Joshua Grisham <josh@joshuagrisham.com>
+ *
+ * With contributions to the SCAI ACPI device interface:
+ * Copyright (c) 2024 Giulio Girardi <giulio.girardi@protechgroup.it>
+ *
+ * Implementation inspired by existing x86 platform drivers.
+ * Thank you to the authors!
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i8042.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/platform_profile.h>
+#include <linux/serio.h>
+#include <linux/sysfs.h>
+#include <linux/uuid.h>
+#include <linux/workqueue.h>
+#include <acpi/battery.h>
+#include "firmware_attributes_class.h"
+
+#define DRIVER_NAME "samsung-galaxybook"
+
+struct samsung_galaxybook {
+ struct platform_device *platform;
+ struct acpi_device *acpi;
+
+ struct device *fw_attrs_dev;
+ struct kset *fw_attrs_kset;
+ /* block in case firmware attributes are updated in multiple threads */
+ struct mutex fw_attr_lock;
+
+ bool has_kbd_backlight;
+ bool has_block_recording;
+ bool has_performance_mode;
+
+ struct led_classdev kbd_backlight;
+ struct work_struct kbd_backlight_hotkey_work;
+ /* block in case brightness updated using hotkey and another thread */
+ struct mutex kbd_backlight_lock;
+
+ void *i8042_filter_ptr;
+
+ struct work_struct block_recording_hotkey_work;
+ struct input_dev *camera_lens_cover_switch;
+
+ struct acpi_battery_hook battery_hook;
+
+ u8 profile_performance_modes[PLATFORM_PROFILE_LAST];
+};
+
+enum galaxybook_fw_attr_id {
+ GB_ATTR_POWER_ON_LID_OPEN,
+ GB_ATTR_USB_CHARGING,
+ GB_ATTR_BLOCK_RECORDING,
+};
+
+static const char * const galaxybook_fw_attr_name[] = {
+ [GB_ATTR_POWER_ON_LID_OPEN] = "power_on_lid_open",
+ [GB_ATTR_USB_CHARGING] = "usb_charging",
+ [GB_ATTR_BLOCK_RECORDING] = "block_recording",
+};
+
+static const char * const galaxybook_fw_attr_desc[] = {
+ [GB_ATTR_POWER_ON_LID_OPEN] = "Power On Lid Open",
+ [GB_ATTR_USB_CHARGING] = "USB Charging",
+ [GB_ATTR_BLOCK_RECORDING] = "Block Recording",
+};
+
+#define GB_ATTR_LANGUAGE_CODE "en_US.UTF-8"
+
+struct galaxybook_fw_attr {
+ struct samsung_galaxybook *galaxybook;
+ enum galaxybook_fw_attr_id fw_attr_id;
+ struct attribute_group attr_group;
+ struct kobj_attribute display_name;
+ struct kobj_attribute current_value;
+ int (*get_value)(struct samsung_galaxybook *galaxybook, bool *value);
+ int (*set_value)(struct samsung_galaxybook *galaxybook, const bool value);
+};
+
+struct sawb {
+ u16 safn;
+ u16 sasb;
+ u8 rflg;
+ union {
+ struct {
+ u8 gunm;
+ u8 guds[250];
+ } __packed;
+ struct {
+ u8 caid[16];
+ u8 fncn;
+ u8 subn;
+ u8 iob0;
+ u8 iob1;
+ u8 iob2;
+ u8 iob3;
+ u8 iob4;
+ u8 iob5;
+ u8 iob6;
+ u8 iob7;
+ u8 iob8;
+ u8 iob9;
+ } __packed;
+ struct {
+ u8 iob_prefix[18];
+ u8 iobs[10];
+ } __packed;
+ } __packed;
+} __packed;
+
+#define GB_SAWB_LEN_SETTINGS 0x15
+#define GB_SAWB_LEN_PERFORMANCE_MODE 0x100
+
+#define GB_SAFN 0x5843
+
+#define GB_SASB_KBD_BACKLIGHT 0x78
+#define GB_SASB_POWER_MANAGEMENT 0x7a
+#define GB_SASB_USB_CHARGING_GET 0x67
+#define GB_SASB_USB_CHARGING_SET 0x68
+#define GB_SASB_NOTIFICATIONS 0x86
+#define GB_SASB_BLOCK_RECORDING 0x8a
+#define GB_SASB_PERFORMANCE_MODE 0x91
+
+#define GB_SAWB_RFLG_POS 4
+#define GB_SAWB_GB_GUNM_POS 5
+
+#define GB_RFLG_SUCCESS 0xaa
+#define GB_GUNM_FAIL 0xff
+
+#define GB_GUNM_FEATURE_ENABLE 0xbb
+#define GB_GUNM_FEATURE_ENABLE_SUCCESS 0xdd
+#define GB_GUDS_FEATURE_ENABLE 0xaa
+#define GB_GUDS_FEATURE_ENABLE_SUCCESS 0xcc
+
+#define GB_GUNM_GET 0x81
+#define GB_GUNM_SET 0x82
+
+#define GB_GUNM_POWER_MANAGEMENT 0x82
+
+#define GB_GUNM_USB_CHARGING_GET 0x80
+#define GB_GUNM_USB_CHARGING_ON 0x81
+#define GB_GUNM_USB_CHARGING_OFF 0x80
+#define GB_GUDS_POWER_ON_LID_OPEN 0xa3
+#define GB_GUDS_POWER_ON_LID_OPEN_GET 0x81
+#define GB_GUDS_POWER_ON_LID_OPEN_SET 0x80
+#define GB_GUDS_BATTERY_CHARGE_CONTROL 0xe9
+#define GB_GUDS_BATTERY_CHARGE_CONTROL_GET 0x91
+#define GB_GUDS_BATTERY_CHARGE_CONTROL_SET 0x90
+#define GB_GUNM_ACPI_NOTIFY_ENABLE 0x80
+#define GB_GUDS_ACPI_NOTIFY_ENABLE 0x02
+
+#define GB_BLOCK_RECORDING_ON 0x0
+#define GB_BLOCK_RECORDING_OFF 0x1
+
+#define GB_FNCN_PERFORMANCE_MODE 0x51
+#define GB_SUBN_PERFORMANCE_MODE_LIST 0x01
+#define GB_SUBN_PERFORMANCE_MODE_GET 0x02
+#define GB_SUBN_PERFORMANCE_MODE_SET 0x03
+
+/* guid 8246028d-8bca-4a55-ba0f-6f1e6b921b8f */
+static const guid_t performance_mode_guid =
+ GUID_INIT(0x8246028d, 0x8bca, 0x4a55, 0xba, 0x0f, 0x6f, 0x1e, 0x6b, 0x92, 0x1b, 0x8f);
+#define GB_PERFORMANCE_MODE_GUID performance_mode_guid
+
+#define GB_PERFORMANCE_MODE_FANOFF 0xb
+#define GB_PERFORMANCE_MODE_LOWNOISE 0xa
+#define GB_PERFORMANCE_MODE_OPTIMIZED 0x0
+#define GB_PERFORMANCE_MODE_OPTIMIZED_V2 0x2
+#define GB_PERFORMANCE_MODE_PERFORMANCE 0x1
+#define GB_PERFORMANCE_MODE_PERFORMANCE_V2 0x15
+#define GB_PERFORMANCE_MODE_ULTRA 0x16
+#define GB_PERFORMANCE_MODE_IGNORE1 0x14
+#define GB_PERFORMANCE_MODE_IGNORE2 0xc
+
+#define GB_ACPI_METHOD_ENABLE "SDLS"
+#define GB_ACPI_METHOD_ENABLE_ON 1
+#define GB_ACPI_METHOD_ENABLE_OFF 0
+#define GB_ACPI_METHOD_SETTINGS "CSFI"
+#define GB_ACPI_METHOD_PERFORMANCE_MODE "CSXI"
+
+#define GB_KBD_BACKLIGHT_MAX_BRIGHTNESS 3
+
+#define GB_ACPI_NOTIFY_BATTERY_STATE_CHANGED 0x61
+#define GB_ACPI_NOTIFY_DEVICE_ON_TABLE 0x6c
+#define GB_ACPI_NOTIFY_DEVICE_OFF_TABLE 0x6d
+#define GB_ACPI_NOTIFY_HOTKEY_PERFORMANCE_MODE 0x70
+
+#define GB_KEY_KBD_BACKLIGHT_KEYDOWN 0x2c
+#define GB_KEY_KBD_BACKLIGHT_KEYUP 0xac
+#define GB_KEY_BLOCK_RECORDING_KEYDOWN 0x1f
+#define GB_KEY_BLOCK_RECORDING_KEYUP 0x9f
+#define GB_KEY_BATTERY_NOTIFY_KEYUP 0xf
+#define GB_KEY_BATTERY_NOTIFY_KEYDOWN 0x8f
+
+/*
+ * Optional features which have been determined as not supported on a particular
+ * device will return GB_NOT_SUPPORTED from their init function. Positive
+ * EOPNOTSUPP is used as the underlying value instead of negative to
+ * differentiate this return code from valid upstream failures.
+ */
+#define GB_NOT_SUPPORTED EOPNOTSUPP /* Galaxy Book feature not supported */
+
+/*
+ * ACPI method handling
+ */
+
+static int galaxybook_acpi_method(struct samsung_galaxybook *galaxybook, acpi_string method,
+ struct sawb *buf, size_t len)
+{
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object in_obj, *out_obj;
+ struct acpi_object_list input;
+ acpi_status status;
+ int err;
+
+ in_obj.type = ACPI_TYPE_BUFFER;
+ in_obj.buffer.length = len;
+ in_obj.buffer.pointer = (u8 *)buf;
+
+ input.count = 1;
+ input.pointer = &in_obj;
+
+ status = acpi_evaluate_object_typed(galaxybook->acpi->handle, method, &input, &output,
+ ACPI_TYPE_BUFFER);
+
+ if (ACPI_FAILURE(status)) {
+ dev_err(&galaxybook->acpi->dev, "failed to execute method %s; got %s\n",
+ method, acpi_format_exception(status));
+ return -EIO;
+ }
+
+ out_obj = output.pointer;
+
+ if (out_obj->buffer.length != len || out_obj->buffer.length < GB_SAWB_GB_GUNM_POS + 1) {
+ dev_err(&galaxybook->acpi->dev,
+ "failed to execute %s; response length mismatch\n",
+ method);
+ err = -EPROTO;
+ goto out_free;
+ }
+ if (out_obj->buffer.pointer[GB_SAWB_RFLG_POS] != GB_RFLG_SUCCESS) {
+ dev_err(&galaxybook->acpi->dev,
+ "failed to execute %s; device did not respond with success code 0x%x\n",
+ method, GB_RFLG_SUCCESS);
+ err = -ENXIO;
+ goto out_free;
+ }
+ if (out_obj->buffer.pointer[GB_SAWB_GB_GUNM_POS] == GB_GUNM_FAIL) {
+ dev_err(&galaxybook->acpi->dev,
+ "failed to execute %s; device responded with failure code 0x%x\n",
+ method, GB_GUNM_FAIL);
+ err = -ENXIO;
+ goto out_free;
+ }
+
+ memcpy(buf, out_obj->buffer.pointer, len);
+ err = 0;
+
+out_free:
+ kfree(out_obj);
+ return err;
+}
+
+static int galaxybook_enable_acpi_feature(struct samsung_galaxybook *galaxybook, const u16 sasb)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = sasb;
+ buf.gunm = GB_GUNM_FEATURE_ENABLE;
+ buf.guds[0] = GB_GUDS_FEATURE_ENABLE;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ if (buf.gunm != GB_GUNM_FEATURE_ENABLE_SUCCESS &&
+ buf.guds[0] != GB_GUDS_FEATURE_ENABLE_SUCCESS)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * Keyboard Backlight
+ */
+
+static int kbd_backlight_acpi_get(struct samsung_galaxybook *galaxybook,
+ enum led_brightness *brightness)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_KBD_BACKLIGHT;
+ buf.gunm = GB_GUNM_GET;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ *brightness = buf.gunm;
+
+ return 0;
+}
+
+static int kbd_backlight_acpi_set(struct samsung_galaxybook *galaxybook,
+ const enum led_brightness brightness)
+{
+ struct sawb buf = {};
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_KBD_BACKLIGHT;
+ buf.gunm = GB_GUNM_SET;
+
+ buf.guds[0] = brightness;
+
+ return galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+}
+
+static enum led_brightness kbd_backlight_show(struct led_classdev *led)
+{
+ struct samsung_galaxybook *galaxybook =
+ container_of(led, struct samsung_galaxybook, kbd_backlight);
+ enum led_brightness brightness;
+ int err;
+
+ err = kbd_backlight_acpi_get(galaxybook, &brightness);
+ if (err)
+ return err;
+
+ return brightness;
+}
+
+static int kbd_backlight_store(struct led_classdev *led,
+ const enum led_brightness brightness)
+{
+ struct samsung_galaxybook *galaxybook =
+ container_of_const(led, struct samsung_galaxybook, kbd_backlight);
+
+ return kbd_backlight_acpi_set(galaxybook, brightness);
+}
+
+static int galaxybook_kbd_backlight_init(struct samsung_galaxybook *galaxybook)
+{
+ struct led_init_data init_data = {};
+ enum led_brightness brightness;
+ int err;
+
+ err = devm_mutex_init(&galaxybook->platform->dev, &galaxybook->kbd_backlight_lock);
+ if (err)
+ return err;
+
+ err = galaxybook_enable_acpi_feature(galaxybook, GB_SASB_KBD_BACKLIGHT);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to enable kbd_backlight feature, error %d\n", err);
+ return GB_NOT_SUPPORTED;
+ }
+
+ err = kbd_backlight_acpi_get(galaxybook, &brightness);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to get initial kbd_backlight brightness, error %d\n", err);
+ return GB_NOT_SUPPORTED;
+ }
+
+ init_data.devicename = DRIVER_NAME;
+ init_data.default_label = ":" LED_FUNCTION_KBD_BACKLIGHT;
+ init_data.devname_mandatory = true;
+
+ galaxybook->kbd_backlight.brightness_get = kbd_backlight_show;
+ galaxybook->kbd_backlight.brightness_set_blocking = kbd_backlight_store;
+ galaxybook->kbd_backlight.flags = LED_BRIGHT_HW_CHANGED;
+ galaxybook->kbd_backlight.max_brightness = GB_KBD_BACKLIGHT_MAX_BRIGHTNESS;
+
+ return devm_led_classdev_register_ext(&galaxybook->platform->dev,
+ &galaxybook->kbd_backlight, &init_data);
+}
+
+/*
+ * Battery Extension (adds charge_control_end_threshold to the battery device)
+ */
+
+static int charge_control_end_threshold_acpi_get(struct samsung_galaxybook *galaxybook, u8 *value)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_POWER_MANAGEMENT;
+ buf.gunm = GB_GUNM_POWER_MANAGEMENT;
+ buf.guds[0] = GB_GUDS_BATTERY_CHARGE_CONTROL;
+ buf.guds[1] = GB_GUDS_BATTERY_CHARGE_CONTROL_GET;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ *value = buf.guds[1];
+
+ return 0;
+}
+
+static int charge_control_end_threshold_acpi_set(struct samsung_galaxybook *galaxybook, u8 value)
+{
+ struct sawb buf = {};
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_POWER_MANAGEMENT;
+ buf.gunm = GB_GUNM_POWER_MANAGEMENT;
+ buf.guds[0] = GB_GUDS_BATTERY_CHARGE_CONTROL;
+ buf.guds[1] = GB_GUDS_BATTERY_CHARGE_CONTROL_SET;
+ buf.guds[2] = value;
+
+ return galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+}
+
+static int galaxybook_battery_ext_property_get(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *ext_data,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct samsung_galaxybook *galaxybook = ext_data;
+ int err;
+
+ if (psp != POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD)
+ return -EINVAL;
+
+ err = charge_control_end_threshold_acpi_get(galaxybook, (u8 *)&val->intval);
+ if (err)
+ return err;
+
+ /*
+ * device stores "no end threshold" as 0 instead of 100;
+ * if device has 0, report 100
+ */
+ if (val->intval == 0)
+ val->intval = 100;
+
+ return 0;
+}
+
+static int galaxybook_battery_ext_property_set(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *ext_data,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct samsung_galaxybook *galaxybook = ext_data;
+ u8 value;
+
+ if (psp != POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD)
+ return -EINVAL;
+
+ value = val->intval;
+
+ if (value < 1 || value > 100)
+ return -EINVAL;
+
+ /*
+ * device stores "no end threshold" as 0 instead of 100;
+ * if setting to 100, send 0
+ */
+ if (value == 100)
+ value = 0;
+
+ return charge_control_end_threshold_acpi_set(galaxybook, value);
+}
+
+static int galaxybook_battery_ext_property_is_writeable(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *ext_data,
+ enum power_supply_property psp)
+{
+ if (psp == POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD)
+ return true;
+
+ return false;
+}
+
+static const enum power_supply_property galaxybook_battery_properties[] = {
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD,
+};
+
+static const struct power_supply_ext galaxybook_battery_ext = {
+ .name = DRIVER_NAME,
+ .properties = galaxybook_battery_properties,
+ .num_properties = ARRAY_SIZE(galaxybook_battery_properties),
+ .get_property = galaxybook_battery_ext_property_get,
+ .set_property = galaxybook_battery_ext_property_set,
+ .property_is_writeable = galaxybook_battery_ext_property_is_writeable,
+};
+
+static int galaxybook_battery_add(struct power_supply *battery, struct acpi_battery_hook *hook)
+{
+ struct samsung_galaxybook *galaxybook =
+ container_of(hook, struct samsung_galaxybook, battery_hook);
+
+ return power_supply_register_extension(battery, &galaxybook_battery_ext,
+ &battery->dev, galaxybook);
+}
+
+static int galaxybook_battery_remove(struct power_supply *battery, struct acpi_battery_hook *hook)
+{
+ power_supply_unregister_extension(battery, &galaxybook_battery_ext);
+ return 0;
+}
+
+static int galaxybook_battery_threshold_init(struct samsung_galaxybook *galaxybook)
+{
+ u8 value;
+ int err;
+
+ err = charge_control_end_threshold_acpi_get(galaxybook, &value);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to get initial battery charge end threshold, error %d\n", err);
+ return 0;
+ }
+
+ galaxybook->battery_hook.add_battery = galaxybook_battery_add;
+ galaxybook->battery_hook.remove_battery = galaxybook_battery_remove;
+ galaxybook->battery_hook.name = "Samsung Galaxy Book Battery Extension";
+
+ return devm_battery_hook_register(&galaxybook->platform->dev, &galaxybook->battery_hook);
+}
+
+/*
+ * Platform Profile / Performance mode
+ */
+
+static int performance_mode_acpi_get(struct samsung_galaxybook *galaxybook, u8 *performance_mode)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_PERFORMANCE_MODE;
+ export_guid(buf.caid, &GB_PERFORMANCE_MODE_GUID);
+ buf.fncn = GB_FNCN_PERFORMANCE_MODE;
+ buf.subn = GB_SUBN_PERFORMANCE_MODE_GET;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_PERFORMANCE_MODE,
+ &buf, GB_SAWB_LEN_PERFORMANCE_MODE);
+ if (err)
+ return err;
+
+ *performance_mode = buf.iob0;
+
+ return 0;
+}
+
+static int performance_mode_acpi_set(struct samsung_galaxybook *galaxybook,
+ const u8 performance_mode)
+{
+ struct sawb buf = {};
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_PERFORMANCE_MODE;
+ export_guid(buf.caid, &GB_PERFORMANCE_MODE_GUID);
+ buf.fncn = GB_FNCN_PERFORMANCE_MODE;
+ buf.subn = GB_SUBN_PERFORMANCE_MODE_SET;
+ buf.iob0 = performance_mode;
+
+ return galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_PERFORMANCE_MODE,
+ &buf, GB_SAWB_LEN_PERFORMANCE_MODE);
+}
+
+static int get_performance_mode_profile(struct samsung_galaxybook *galaxybook,
+ const u8 performance_mode,
+ enum platform_profile_option *profile)
+{
+ switch (performance_mode) {
+ case GB_PERFORMANCE_MODE_FANOFF:
+ *profile = PLATFORM_PROFILE_LOW_POWER;
+ break;
+ case GB_PERFORMANCE_MODE_LOWNOISE:
+ *profile = PLATFORM_PROFILE_QUIET;
+ break;
+ case GB_PERFORMANCE_MODE_OPTIMIZED:
+ case GB_PERFORMANCE_MODE_OPTIMIZED_V2:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case GB_PERFORMANCE_MODE_PERFORMANCE:
+ case GB_PERFORMANCE_MODE_PERFORMANCE_V2:
+ case GB_PERFORMANCE_MODE_ULTRA:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ case GB_PERFORMANCE_MODE_IGNORE1:
+ case GB_PERFORMANCE_MODE_IGNORE2:
+ return -EOPNOTSUPP;
+ default:
+ dev_warn(&galaxybook->platform->dev,
+ "unrecognized performance mode 0x%x\n", performance_mode);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int galaxybook_platform_profile_get(struct device *dev,
+ enum platform_profile_option *profile)
+{
+ struct samsung_galaxybook *galaxybook = dev_get_drvdata(dev);
+ u8 performance_mode;
+ int err;
+
+ err = performance_mode_acpi_get(galaxybook, &performance_mode);
+ if (err)
+ return err;
+
+ return get_performance_mode_profile(galaxybook, performance_mode, profile);
+}
+
+static int galaxybook_platform_profile_set(struct device *dev,
+ enum platform_profile_option profile)
+{
+ struct samsung_galaxybook *galaxybook = dev_get_drvdata(dev);
+
+ return performance_mode_acpi_set(galaxybook,
+ galaxybook->profile_performance_modes[profile]);
+}
+
+static int galaxybook_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ struct samsung_galaxybook *galaxybook = drvdata;
+ u8 *perfmodes = galaxybook->profile_performance_modes;
+ enum platform_profile_option profile;
+ struct sawb buf = {};
+ unsigned int i;
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_PERFORMANCE_MODE;
+ export_guid(buf.caid, &GB_PERFORMANCE_MODE_GUID);
+ buf.fncn = GB_FNCN_PERFORMANCE_MODE;
+ buf.subn = GB_SUBN_PERFORMANCE_MODE_LIST;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_PERFORMANCE_MODE,
+ &buf, GB_SAWB_LEN_PERFORMANCE_MODE);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to get supported performance modes, error %d\n", err);
+ return err;
+ }
+
+ /* set initial default profile performance mode values */
+ perfmodes[PLATFORM_PROFILE_LOW_POWER] = GB_PERFORMANCE_MODE_FANOFF;
+ perfmodes[PLATFORM_PROFILE_QUIET] = GB_PERFORMANCE_MODE_LOWNOISE;
+ perfmodes[PLATFORM_PROFILE_BALANCED] = GB_PERFORMANCE_MODE_OPTIMIZED;
+ perfmodes[PLATFORM_PROFILE_PERFORMANCE] = GB_PERFORMANCE_MODE_PERFORMANCE;
+
+ /*
+ * Value returned in iob0 will have the number of supported performance
+ * modes per device. The performance mode values will then be given as a
+ * list after this (iob1-iobX). Loop through the supported values and
+ * enable their mapped platform_profile choice, overriding "legacy"
+ * values along the way if a non-legacy value exists.
+ */
+ for (i = 1; i <= buf.iob0; i++) {
+ err = get_performance_mode_profile(galaxybook, buf.iobs[i], &profile);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "ignoring unmapped performance mode 0x%x\n", buf.iobs[i]);
+ continue;
+ }
+ switch (buf.iobs[i]) {
+ case GB_PERFORMANCE_MODE_OPTIMIZED_V2:
+ perfmodes[profile] = GB_PERFORMANCE_MODE_OPTIMIZED_V2;
+ break;
+ case GB_PERFORMANCE_MODE_PERFORMANCE_V2:
+ /* only update if not already overwritten by Ultra */
+ if (perfmodes[profile] != GB_PERFORMANCE_MODE_ULTRA)
+ perfmodes[profile] = GB_PERFORMANCE_MODE_PERFORMANCE_V2;
+ break;
+ case GB_PERFORMANCE_MODE_ULTRA:
+ perfmodes[profile] = GB_PERFORMANCE_MODE_ULTRA;
+ break;
+ default:
+ break;
+ }
+ set_bit(profile, choices);
+ dev_dbg(&galaxybook->platform->dev,
+ "setting platform profile %d to use performance mode 0x%x\n",
+ profile, perfmodes[profile]);
+ }
+
+ /* initialize performance_mode using balanced's mapped value */
+ if (test_bit(PLATFORM_PROFILE_BALANCED, choices))
+ return performance_mode_acpi_set(galaxybook, perfmodes[PLATFORM_PROFILE_BALANCED]);
+
+ return 0;
+}
+
+static const struct platform_profile_ops galaxybook_platform_profile_ops = {
+ .probe = galaxybook_platform_profile_probe,
+ .profile_get = galaxybook_platform_profile_get,
+ .profile_set = galaxybook_platform_profile_set,
+};
+
+static int galaxybook_platform_profile_init(struct samsung_galaxybook *galaxybook)
+{
+ struct device *platform_profile_dev;
+ u8 performance_mode;
+ int err;
+
+ err = performance_mode_acpi_get(galaxybook, &performance_mode);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to get initial performance mode, error %d\n", err);
+ return GB_NOT_SUPPORTED;
+ }
+
+ platform_profile_dev = devm_platform_profile_register(&galaxybook->platform->dev,
+ DRIVER_NAME, galaxybook,
+ &galaxybook_platform_profile_ops);
+
+ return PTR_ERR_OR_ZERO(platform_profile_dev);
+}
+
+/*
+ * Firmware Attributes
+ */
+
+/* Power on lid open (device should power on when lid is opened) */
+
+static int power_on_lid_open_acpi_get(struct samsung_galaxybook *galaxybook, bool *value)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_POWER_MANAGEMENT;
+ buf.gunm = GB_GUNM_POWER_MANAGEMENT;
+ buf.guds[0] = GB_GUDS_POWER_ON_LID_OPEN;
+ buf.guds[1] = GB_GUDS_POWER_ON_LID_OPEN_GET;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ *value = buf.guds[1];
+
+ return 0;
+}
+
+static int power_on_lid_open_acpi_set(struct samsung_galaxybook *galaxybook, const bool value)
+{
+ struct sawb buf = {};
+
+ lockdep_assert_held(&galaxybook->fw_attr_lock);
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_POWER_MANAGEMENT;
+ buf.gunm = GB_GUNM_POWER_MANAGEMENT;
+ buf.guds[0] = GB_GUDS_POWER_ON_LID_OPEN;
+ buf.guds[1] = GB_GUDS_POWER_ON_LID_OPEN_SET;
+ buf.guds[2] = value ? 1 : 0;
+
+ return galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+}
+
+/* USB Charging (USB ports can provide power when device is powered off) */
+
+static int usb_charging_acpi_get(struct samsung_galaxybook *galaxybook, bool *value)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_USB_CHARGING_GET;
+ buf.gunm = GB_GUNM_USB_CHARGING_GET;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ *value = buf.gunm == 1;
+
+ return 0;
+}
+
+static int usb_charging_acpi_set(struct samsung_galaxybook *galaxybook, const bool value)
+{
+ struct sawb buf = {};
+
+ lockdep_assert_held(&galaxybook->fw_attr_lock);
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_USB_CHARGING_SET;
+ buf.gunm = value ? GB_GUNM_USB_CHARGING_ON : GB_GUNM_USB_CHARGING_OFF;
+
+ return galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+}
+
+/* Block recording (blocks access to camera and microphone) */
+
+static int block_recording_acpi_get(struct samsung_galaxybook *galaxybook, bool *value)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_BLOCK_RECORDING;
+ buf.gunm = GB_GUNM_GET;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ *value = buf.gunm == GB_BLOCK_RECORDING_ON;
+
+ return 0;
+}
+
+static int block_recording_acpi_set(struct samsung_galaxybook *galaxybook, const bool value)
+{
+ struct sawb buf = {};
+ int err;
+
+ lockdep_assert_held(&galaxybook->fw_attr_lock);
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_BLOCK_RECORDING;
+ buf.gunm = GB_GUNM_SET;
+ buf.guds[0] = value ? GB_BLOCK_RECORDING_ON : GB_BLOCK_RECORDING_OFF;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ input_report_switch(galaxybook->camera_lens_cover_switch,
+ SW_CAMERA_LENS_COVER, value ? 1 : 0);
+ input_sync(galaxybook->camera_lens_cover_switch);
+
+ return 0;
+}
+
+static int galaxybook_block_recording_init(struct samsung_galaxybook *galaxybook)
+{
+ bool value;
+ int err;
+
+ err = galaxybook_enable_acpi_feature(galaxybook, GB_SASB_BLOCK_RECORDING);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to initialize block_recording, error %d\n", err);
+ return GB_NOT_SUPPORTED;
+ }
+
+ guard(mutex)(&galaxybook->fw_attr_lock);
+
+ err = block_recording_acpi_get(galaxybook, &value);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to get initial block_recording state, error %d\n", err);
+ return GB_NOT_SUPPORTED;
+ }
+
+ galaxybook->camera_lens_cover_switch =
+ devm_input_allocate_device(&galaxybook->platform->dev);
+ if (!galaxybook->camera_lens_cover_switch)
+ return -ENOMEM;
+
+ galaxybook->camera_lens_cover_switch->name = "Samsung Galaxy Book Camera Lens Cover";
+ galaxybook->camera_lens_cover_switch->phys = DRIVER_NAME "/input0";
+ galaxybook->camera_lens_cover_switch->id.bustype = BUS_HOST;
+
+ input_set_capability(galaxybook->camera_lens_cover_switch, EV_SW, SW_CAMERA_LENS_COVER);
+
+ err = input_register_device(galaxybook->camera_lens_cover_switch);
+ if (err)
+ return err;
+
+ input_report_switch(galaxybook->camera_lens_cover_switch,
+ SW_CAMERA_LENS_COVER, value ? 1 : 0);
+ input_sync(galaxybook->camera_lens_cover_switch);
+
+ return 0;
+}
+
+/* Firmware Attributes setup */
+
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "enumeration\n");
+}
+
+static struct kobj_attribute fw_attr_type = __ATTR_RO(type);
+
+static ssize_t default_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "0\n");
+}
+
+static struct kobj_attribute fw_attr_default_value = __ATTR_RO(default_value);
+
+static ssize_t possible_values_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "0;1\n");
+}
+
+static struct kobj_attribute fw_attr_possible_values = __ATTR_RO(possible_values);
+
+static ssize_t display_name_language_code_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", GB_ATTR_LANGUAGE_CODE);
+}
+
+static struct kobj_attribute fw_attr_display_name_language_code =
+ __ATTR_RO(display_name_language_code);
+
+static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct galaxybook_fw_attr *fw_attr =
+ container_of(attr, struct galaxybook_fw_attr, display_name);
+
+ return sysfs_emit(buf, "%s\n", galaxybook_fw_attr_desc[fw_attr->fw_attr_id]);
+}
+
+static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct galaxybook_fw_attr *fw_attr =
+ container_of(attr, struct galaxybook_fw_attr, current_value);
+ bool value;
+ int err;
+
+ err = fw_attr->get_value(fw_attr->galaxybook, &value);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%u\n", value);
+}
+
+static ssize_t current_value_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct galaxybook_fw_attr *fw_attr =
+ container_of(attr, struct galaxybook_fw_attr, current_value);
+ struct samsung_galaxybook *galaxybook = fw_attr->galaxybook;
+ bool value;
+ int err;
+
+ if (!count)
+ return -EINVAL;
+
+ err = kstrtobool(buf, &value);
+ if (err)
+ return err;
+
+ guard(mutex)(&galaxybook->fw_attr_lock);
+
+ err = fw_attr->set_value(galaxybook, value);
+ if (err)
+ return err;
+
+ return count;
+}
+
+#define NUM_FW_ATTR_ENUM_ATTRS 6
+
+static int galaxybook_fw_attr_init(struct samsung_galaxybook *galaxybook,
+ const enum galaxybook_fw_attr_id fw_attr_id,
+ int (*get_value)(struct samsung_galaxybook *galaxybook,
+ bool *value),
+ int (*set_value)(struct samsung_galaxybook *galaxybook,
+ const bool value))
+{
+ struct galaxybook_fw_attr *fw_attr;
+ struct attribute **attrs;
+
+ fw_attr = devm_kzalloc(&galaxybook->platform->dev, sizeof(*fw_attr), GFP_KERNEL);
+ if (!fw_attr)
+ return -ENOMEM;
+
+ attrs = devm_kcalloc(&galaxybook->platform->dev, NUM_FW_ATTR_ENUM_ATTRS + 1,
+ sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ attrs[0] = &fw_attr_type.attr;
+ attrs[1] = &fw_attr_default_value.attr;
+ attrs[2] = &fw_attr_possible_values.attr;
+ attrs[3] = &fw_attr_display_name_language_code.attr;
+
+ sysfs_attr_init(&fw_attr->display_name.attr);
+ fw_attr->display_name.attr.name = "display_name";
+ fw_attr->display_name.attr.mode = 0444;
+ fw_attr->display_name.show = display_name_show;
+ attrs[4] = &fw_attr->display_name.attr;
+
+ sysfs_attr_init(&fw_attr->current_value.attr);
+ fw_attr->current_value.attr.name = "current_value";
+ fw_attr->current_value.attr.mode = 0644;
+ fw_attr->current_value.show = current_value_show;
+ fw_attr->current_value.store = current_value_store;
+ attrs[5] = &fw_attr->current_value.attr;
+
+ attrs[6] = NULL;
+
+ fw_attr->galaxybook = galaxybook;
+ fw_attr->fw_attr_id = fw_attr_id;
+ fw_attr->attr_group.name = galaxybook_fw_attr_name[fw_attr_id];
+ fw_attr->attr_group.attrs = attrs;
+ fw_attr->get_value = get_value;
+ fw_attr->set_value = set_value;
+
+ return sysfs_create_group(&galaxybook->fw_attrs_kset->kobj, &fw_attr->attr_group);
+}
+
+static void galaxybook_kset_unregister(void *data)
+{
+ struct kset *kset = data;
+
+ kset_unregister(kset);
+}
+
+static void galaxybook_fw_attrs_dev_unregister(void *data)
+{
+ struct device *fw_attrs_dev = data;
+
+ device_unregister(fw_attrs_dev);
+}
+
+static int galaxybook_fw_attrs_init(struct samsung_galaxybook *galaxybook)
+{
+ bool value;
+ int err;
+
+ err = devm_mutex_init(&galaxybook->platform->dev, &galaxybook->fw_attr_lock);
+ if (err)
+ return err;
+
+ galaxybook->fw_attrs_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
+ NULL, "%s", DRIVER_NAME);
+ if (IS_ERR(galaxybook->fw_attrs_dev))
+ return PTR_ERR(galaxybook->fw_attrs_dev);
+
+ err = devm_add_action_or_reset(&galaxybook->platform->dev,
+ galaxybook_fw_attrs_dev_unregister,
+ galaxybook->fw_attrs_dev);
+ if (err)
+ return err;
+
+ galaxybook->fw_attrs_kset = kset_create_and_add("attributes", NULL,
+ &galaxybook->fw_attrs_dev->kobj);
+ if (!galaxybook->fw_attrs_kset)
+ return -ENOMEM;
+ err = devm_add_action_or_reset(&galaxybook->platform->dev,
+ galaxybook_kset_unregister, galaxybook->fw_attrs_kset);
+ if (err)
+ return err;
+
+ err = power_on_lid_open_acpi_get(galaxybook, &value);
+ if (!err) {
+ err = galaxybook_fw_attr_init(galaxybook,
+ GB_ATTR_POWER_ON_LID_OPEN,
+ &power_on_lid_open_acpi_get,
+ &power_on_lid_open_acpi_set);
+ if (err)
+ return err;
+ }
+
+ err = usb_charging_acpi_get(galaxybook, &value);
+ if (!err) {
+ err = galaxybook_fw_attr_init(galaxybook,
+ GB_ATTR_USB_CHARGING,
+ &usb_charging_acpi_get,
+ &usb_charging_acpi_set);
+ if (err)
+ return err;
+ }
+
+ err = galaxybook_block_recording_init(galaxybook);
+ if (err == GB_NOT_SUPPORTED)
+ return 0;
+ else if (err)
+ return err;
+
+ galaxybook->has_block_recording = true;
+
+ return galaxybook_fw_attr_init(galaxybook,
+ GB_ATTR_BLOCK_RECORDING,
+ &block_recording_acpi_get,
+ &block_recording_acpi_set);
+}
+
+/*
+ * Hotkeys and notifications
+ */
+
+static void galaxybook_kbd_backlight_hotkey_work(struct work_struct *work)
+{
+ struct samsung_galaxybook *galaxybook =
+ from_work(galaxybook, work, kbd_backlight_hotkey_work);
+ int brightness;
+ int err;
+
+ guard(mutex)(&galaxybook->kbd_backlight_lock);
+
+ brightness = galaxybook->kbd_backlight.brightness;
+ if (brightness < galaxybook->kbd_backlight.max_brightness)
+ brightness++;
+ else
+ brightness = 0;
+
+ err = led_set_brightness_sync(&galaxybook->kbd_backlight, brightness);
+ if (err) {
+ dev_err(&galaxybook->platform->dev,
+ "failed to set kbd_backlight brightness, error %d\n", err);
+ return;
+ }
+
+ led_classdev_notify_brightness_hw_changed(&galaxybook->kbd_backlight, brightness);
+}
+
+static void galaxybook_block_recording_hotkey_work(struct work_struct *work)
+{
+ struct samsung_galaxybook *galaxybook =
+ from_work(galaxybook, work, block_recording_hotkey_work);
+ bool value;
+ int err;
+
+ guard(mutex)(&galaxybook->fw_attr_lock);
+
+ err = block_recording_acpi_get(galaxybook, &value);
+ if (err) {
+ dev_err(&galaxybook->platform->dev,
+ "failed to get block_recording, error %d\n", err);
+ return;
+ }
+
+ err = block_recording_acpi_set(galaxybook, !value);
+ if (err)
+ dev_err(&galaxybook->platform->dev,
+ "failed to set block_recording, error %d\n", err);
+}
+
+static bool galaxybook_i8042_filter(unsigned char data, unsigned char str, struct serio *port,
+ void *context)
+{
+ struct samsung_galaxybook *galaxybook = context;
+ static bool extended;
+
+ if (str & I8042_STR_AUXDATA)
+ return false;
+
+ if (data == 0xe0) {
+ extended = true;
+ return true;
+ } else if (extended) {
+ extended = false;
+ switch (data) {
+ case GB_KEY_KBD_BACKLIGHT_KEYDOWN:
+ return true;
+ case GB_KEY_KBD_BACKLIGHT_KEYUP:
+ if (galaxybook->has_kbd_backlight)
+ schedule_work(&galaxybook->kbd_backlight_hotkey_work);
+ return true;
+
+ case GB_KEY_BLOCK_RECORDING_KEYDOWN:
+ return true;
+ case GB_KEY_BLOCK_RECORDING_KEYUP:
+ if (galaxybook->has_block_recording)
+ schedule_work(&galaxybook->block_recording_hotkey_work);
+ return true;
+
+ /* battery notification already sent to battery + SCAI device */
+ case GB_KEY_BATTERY_NOTIFY_KEYUP:
+ case GB_KEY_BATTERY_NOTIFY_KEYDOWN:
+ return true;
+
+ default:
+ /*
+ * Report the previously filtered e0 before continuing
+ * with the next non-filtered byte.
+ */
+ serio_interrupt(port, 0xe0, 0);
+ return false;
+ }
+ }
+
+ return false;
+}
+
+static void galaxybook_i8042_filter_remove(void *data)
+{
+ struct samsung_galaxybook *galaxybook = data;
+
+ i8042_remove_filter(galaxybook_i8042_filter);
+ cancel_work_sync(&galaxybook->kbd_backlight_hotkey_work);
+ cancel_work_sync(&galaxybook->block_recording_hotkey_work);
+}
+
+static int galaxybook_i8042_filter_install(struct samsung_galaxybook *galaxybook)
+{
+ int err;
+
+ if (!galaxybook->has_kbd_backlight && !galaxybook->has_block_recording)
+ return 0;
+
+ INIT_WORK(&galaxybook->kbd_backlight_hotkey_work,
+ galaxybook_kbd_backlight_hotkey_work);
+ INIT_WORK(&galaxybook->block_recording_hotkey_work,
+ galaxybook_block_recording_hotkey_work);
+
+ err = i8042_install_filter(galaxybook_i8042_filter, galaxybook);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(&galaxybook->platform->dev,
+ galaxybook_i8042_filter_remove, galaxybook);
+}
+
+/*
+ * ACPI device setup
+ */
+
+static void galaxybook_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct samsung_galaxybook *galaxybook = data;
+
+ switch (event) {
+ case GB_ACPI_NOTIFY_BATTERY_STATE_CHANGED:
+ case GB_ACPI_NOTIFY_DEVICE_ON_TABLE:
+ case GB_ACPI_NOTIFY_DEVICE_OFF_TABLE:
+ break;
+ case GB_ACPI_NOTIFY_HOTKEY_PERFORMANCE_MODE:
+ if (galaxybook->has_performance_mode)
+ platform_profile_cycle();
+ break;
+ default:
+ dev_warn(&galaxybook->platform->dev,
+ "unknown ACPI notification event: 0x%x\n", event);
+ }
+
+ acpi_bus_generate_netlink_event(DRIVER_NAME, dev_name(&galaxybook->platform->dev),
+ event, 1);
+}
+
+static int galaxybook_enable_acpi_notify(struct samsung_galaxybook *galaxybook)
+{
+ struct sawb buf = {};
+ int err;
+
+ err = galaxybook_enable_acpi_feature(galaxybook, GB_SASB_NOTIFICATIONS);
+ if (err)
+ return err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_NOTIFICATIONS;
+ buf.gunm = GB_GUNM_ACPI_NOTIFY_ENABLE;
+ buf.guds[0] = GB_GUDS_ACPI_NOTIFY_ENABLE;
+
+ return galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+}
+
+static void galaxybook_acpi_remove_notify_handler(void *data)
+{
+ struct samsung_galaxybook *galaxybook = data;
+
+ acpi_remove_notify_handler(galaxybook->acpi->handle, ACPI_ALL_NOTIFY,
+ galaxybook_acpi_notify);
+}
+
+static void galaxybook_acpi_disable(void *data)
+{
+ struct samsung_galaxybook *galaxybook = data;
+
+ acpi_execute_simple_method(galaxybook->acpi->handle,
+ GB_ACPI_METHOD_ENABLE, GB_ACPI_METHOD_ENABLE_OFF);
+}
+
+static int galaxybook_acpi_init(struct samsung_galaxybook *galaxybook)
+{
+ acpi_status status;
+ int err;
+
+ status = acpi_execute_simple_method(galaxybook->acpi->handle, GB_ACPI_METHOD_ENABLE,
+ GB_ACPI_METHOD_ENABLE_ON);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ err = devm_add_action_or_reset(&galaxybook->platform->dev,
+ galaxybook_acpi_disable, galaxybook);
+ if (err)
+ return err;
+
+ status = acpi_install_notify_handler(galaxybook->acpi->handle, ACPI_ALL_NOTIFY,
+ galaxybook_acpi_notify, galaxybook);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ err = devm_add_action_or_reset(&galaxybook->platform->dev,
+ galaxybook_acpi_remove_notify_handler, galaxybook);
+ if (err)
+ return err;
+
+ err = galaxybook_enable_acpi_notify(galaxybook);
+ if (err)
+ dev_dbg(&galaxybook->platform->dev, "failed to enable ACPI notifications; "
+ "some hotkeys will not be supported\n");
+
+ err = galaxybook_enable_acpi_feature(galaxybook, GB_SASB_POWER_MANAGEMENT);
+ if (err)
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to initialize ACPI power management features; "
+ "many features of this driver will not be available\n");
+
+ return 0;
+}
+
+/*
+ * Platform driver
+ */
+
+static int galaxybook_probe(struct platform_device *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct samsung_galaxybook *galaxybook;
+ int err;
+
+ if (!adev)
+ return -ENODEV;
+
+ galaxybook = devm_kzalloc(&pdev->dev, sizeof(*galaxybook), GFP_KERNEL);
+ if (!galaxybook)
+ return -ENOMEM;
+
+ galaxybook->platform = pdev;
+ galaxybook->acpi = adev;
+
+ /*
+ * Features must be enabled and initialized in the following order to
+ * avoid failures seen on certain devices:
+ * - GB_SASB_POWER_MANAGEMENT (including performance mode)
+ * - GB_SASB_KBD_BACKLIGHT
+ * - GB_SASB_BLOCK_RECORDING (as part of fw_attrs init)
+ */
+
+ err = galaxybook_acpi_init(galaxybook);
+ if (err)
+ return dev_err_probe(&galaxybook->platform->dev, err,
+ "failed to initialize ACPI device\n");
+
+ err = galaxybook_platform_profile_init(galaxybook);
+ if (!err)
+ galaxybook->has_performance_mode = true;
+ else if (err != GB_NOT_SUPPORTED)
+ return dev_err_probe(&galaxybook->platform->dev, err,
+ "failed to initialize platform profile\n");
+
+ err = galaxybook_battery_threshold_init(galaxybook);
+ if (err)
+ return dev_err_probe(&galaxybook->platform->dev, err,
+ "failed to initialize battery threshold\n");
+
+ err = galaxybook_kbd_backlight_init(galaxybook);
+ if (!err)
+ galaxybook->has_kbd_backlight = true;
+ else if (err != GB_NOT_SUPPORTED)
+ return dev_err_probe(&galaxybook->platform->dev, err,
+ "failed to initialize kbd_backlight\n");
+
+ err = galaxybook_fw_attrs_init(galaxybook);
+ if (err)
+ return dev_err_probe(&galaxybook->platform->dev, err,
+ "failed to initialize firmware-attributes\n");
+
+ err = galaxybook_i8042_filter_install(galaxybook);
+ if (err)
+ return dev_err_probe(&galaxybook->platform->dev, err,
+ "failed to initialize i8042_filter\n");
+
+ return 0;
+}
+
+static const struct acpi_device_id galaxybook_device_ids[] = {
+ { "SAM0427" },
+ { "SAM0428" },
+ { "SAM0429" },
+ { "SAM0430" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, galaxybook_device_ids);
+
+static struct platform_driver galaxybook_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .acpi_match_table = galaxybook_device_ids,
+ },
+ .probe = galaxybook_probe,
+};
+module_platform_driver(galaxybook_platform_driver);
+
+MODULE_AUTHOR("Joshua Grisham <josh@joshuagrisham.com>");
+MODULE_DESCRIPTION("Samsung Galaxy Book driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 323316ac6783..0fc275e461be 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -262,16 +262,11 @@ static int tlmi_simple_call(const char *guid, const char *arg)
return 0;
}
-/* Extract output string from WMI return buffer */
-static int tlmi_extract_output_string(const struct acpi_buffer *output,
- char **string)
+/* Extract output string from WMI return value */
+static int tlmi_extract_output_string(union acpi_object *obj, char **string)
{
- const union acpi_object *obj;
char *s;
- obj = output->pointer;
- if (!obj)
- return -ENOMEM;
if (obj->type != ACPI_TYPE_STRING || !obj->string.pointer)
return -EIO;
@@ -349,20 +344,18 @@ static int tlmi_opcode_setting(char *setting, const char *value)
return ret;
}
-static int tlmi_setting(int item, char **value, const char *guid_string)
+static int tlmi_setting(struct wmi_device *wdev, int item, char **value)
{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_status status;
+ union acpi_object *obj;
int ret;
- status = wmi_query_block(guid_string, item, &output);
- if (ACPI_FAILURE(status)) {
- kfree(output.pointer);
+ obj = wmidev_block_query(wdev, item);
+ if (!obj)
return -EIO;
- }
- ret = tlmi_extract_output_string(&output, value);
- kfree(output.pointer);
+ ret = tlmi_extract_output_string(obj, value);
+ kfree(obj);
+
return ret;
}
@@ -370,19 +363,22 @@ static int tlmi_get_bios_selections(const char *item, char **value)
{
const struct acpi_buffer input = { strlen(item), (char *)item };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
acpi_status status;
int ret;
status = wmi_evaluate_method(LENOVO_GET_BIOS_SELECTIONS_GUID,
0, 0, &input, &output);
-
- if (ACPI_FAILURE(status)) {
- kfree(output.pointer);
+ if (ACPI_FAILURE(status))
return -EIO;
- }
- ret = tlmi_extract_output_string(&output, value);
- kfree(output.pointer);
+ obj = output.pointer;
+ if (!obj)
+ return -ENODATA;
+
+ ret = tlmi_extract_output_string(obj, value);
+ kfree(obj);
+
return ret;
}
@@ -993,7 +989,7 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a
char *item, *value;
int ret;
- ret = tlmi_setting(setting->index, &item, LENOVO_BIOS_SETTING_GUID);
+ ret = tlmi_setting(setting->wdev, setting->index, &item);
if (ret)
return ret;
@@ -1586,7 +1582,7 @@ static struct tlmi_pwd_setting *tlmi_create_auth(const char *pwd_type,
return new_pwd;
}
-static int tlmi_analyze(void)
+static int tlmi_analyze(struct wmi_device *wdev)
{
int i, ret;
@@ -1623,7 +1619,7 @@ static int tlmi_analyze(void)
char *item = NULL;
tlmi_priv.setting[i] = NULL;
- ret = tlmi_setting(i, &item, LENOVO_BIOS_SETTING_GUID);
+ ret = tlmi_setting(wdev, i, &item);
if (ret)
break;
if (!item)
@@ -1646,6 +1642,7 @@ static int tlmi_analyze(void)
kfree(item);
goto fail_clear_attr;
}
+ setting->wdev = wdev;
setting->index = i;
strscpy(setting->display_name, item);
/* If BIOS selections supported, load those */
@@ -1664,7 +1661,7 @@ static int tlmi_analyze(void)
*/
char *optitem, *optstart, *optend;
- if (!tlmi_setting(setting->index, &optitem, LENOVO_BIOS_SETTING_GUID)) {
+ if (!tlmi_setting(setting->wdev, setting->index, &optitem)) {
optstart = strstr(optitem, "[Optional:");
if (optstart) {
optstart += strlen("[Optional:");
@@ -1789,7 +1786,7 @@ static int tlmi_probe(struct wmi_device *wdev, const void *context)
{
int ret;
- ret = tlmi_analyze();
+ ret = tlmi_analyze(wdev);
if (ret)
return ret;
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
index f267d8b46957..a80452482227 100644
--- a/drivers/platform/x86/think-lmi.h
+++ b/drivers/platform/x86/think-lmi.h
@@ -4,6 +4,7 @@
#define _THINK_LMI_H_
#include <linux/types.h>
+#include <linux/wmi.h>
#define TLMI_SETTINGS_COUNT 256
#define TLMI_SETTINGS_MAXLEN 512
@@ -87,6 +88,7 @@ struct tlmi_pwd_setting {
/* Attribute setting details */
struct tlmi_attr_setting {
struct kobject kobj;
+ struct wmi_device *wdev;
int index;
char display_name[TLMI_SETTINGS_MAXLEN];
char *possible_values;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 1cc91173e012..0384cf311878 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -39,7 +39,6 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/dmi.h>
-#include <linux/fb.h>
#include <linux/freezer.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -368,9 +367,6 @@ static struct {
u32 beep_needs_two_args:1;
u32 mixer_no_level_control:1;
u32 battery_force_primary:1;
- u32 input_device_registered:1;
- u32 platform_drv_registered:1;
- u32 sensors_pdrv_registered:1;
u32 hotkey_poll_active:1;
u32 has_adaptive_kbd:1;
u32 kbd_lang:1;
@@ -8513,7 +8509,7 @@ static void fan_watchdog_reset(void)
if (fan_watchdog_maxinterval > 0 &&
tpacpi_lifecycle != TPACPI_LIFE_EXITING)
mod_delayed_work(tpacpi_wq, &fan_watchdog_task,
- msecs_to_jiffies(fan_watchdog_maxinterval * 1000));
+ secs_to_jiffies(fan_watchdog_maxinterval));
else
cancel_delayed_work(&fan_watchdog_task);
}
@@ -11817,36 +11813,18 @@ MODULE_PARM_DESC(profile_force, "Force profile mode. -1=off, 1=MMC, 2=PSC");
static void thinkpad_acpi_module_exit(void)
{
- struct ibm_struct *ibm, *itmp;
-
tpacpi_lifecycle = TPACPI_LIFE_EXITING;
- if (tpacpi_hwmon)
- hwmon_device_unregister(tpacpi_hwmon);
- if (tp_features.sensors_pdrv_registered)
+ if (tpacpi_sensors_pdev) {
platform_driver_unregister(&tpacpi_hwmon_pdriver);
- if (tp_features.platform_drv_registered)
- platform_driver_unregister(&tpacpi_pdriver);
-
- list_for_each_entry_safe_reverse(ibm, itmp,
- &tpacpi_all_drivers,
- all_drivers) {
- ibm_exit(ibm);
+ platform_device_unregister(tpacpi_sensors_pdev);
}
- dbg_printk(TPACPI_DBG_INIT, "finished subdriver exit path...\n");
-
- if (tpacpi_inputdev) {
- if (tp_features.input_device_registered)
- input_unregister_device(tpacpi_inputdev);
- else
- input_free_device(tpacpi_inputdev);
+ if (tpacpi_pdev) {
+ platform_driver_unregister(&tpacpi_pdriver);
+ platform_device_unregister(tpacpi_pdev);
}
- if (tpacpi_sensors_pdev)
- platform_device_unregister(tpacpi_sensors_pdev);
- if (tpacpi_pdev)
- platform_device_unregister(tpacpi_pdev);
if (proc_dir)
remove_proc_entry(TPACPI_PROC_DIR, acpi_root_dir);
if (tpacpi_wq)
@@ -11858,11 +11836,76 @@ static void thinkpad_acpi_module_exit(void)
kfree(thinkpad_id.nummodel_str);
}
+static void tpacpi_subdrivers_release(void *data)
+{
+ struct ibm_struct *ibm, *itmp;
+
+ list_for_each_entry_safe_reverse(ibm, itmp, &tpacpi_all_drivers, all_drivers)
+ ibm_exit(ibm);
+
+ dbg_printk(TPACPI_DBG_INIT, "finished subdriver exit path...\n");
+}
+
+static int __init tpacpi_pdriver_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_mutex_init(&pdev->dev, &tpacpi_inputdev_send_mutex);
+ if (ret)
+ return ret;
+
+ tpacpi_inputdev = devm_input_allocate_device(&pdev->dev);
+ if (!tpacpi_inputdev)
+ return -ENOMEM;
+
+ tpacpi_inputdev->name = "ThinkPad Extra Buttons";
+ tpacpi_inputdev->phys = TPACPI_DRVR_NAME "/input0";
+ tpacpi_inputdev->id.bustype = BUS_HOST;
+ tpacpi_inputdev->id.vendor = thinkpad_id.vendor;
+ tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
+ tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
+ tpacpi_inputdev->dev.parent = &tpacpi_pdev->dev;
+
+ /* Init subdriver dependencies */
+ tpacpi_detect_brightness_capabilities();
+
+ /* Init subdrivers */
+ for (unsigned int i = 0; i < ARRAY_SIZE(ibms_init); i++) {
+ ret = ibm_init(&ibms_init[i]);
+ if (ret >= 0 && *ibms_init[i].param)
+ ret = ibms_init[i].data->write(ibms_init[i].param);
+ if (ret < 0) {
+ tpacpi_subdrivers_release(NULL);
+ return ret;
+ }
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, tpacpi_subdrivers_release, NULL);
+ if (ret)
+ return ret;
+
+ ret = input_register_device(tpacpi_inputdev);
+ if (ret < 0)
+ pr_err("unable to register input device\n");
+
+ return ret;
+}
+
+static int __init tpacpi_hwmon_pdriver_probe(struct platform_device *pdev)
+{
+ tpacpi_hwmon = devm_hwmon_device_register_with_groups(
+ &tpacpi_sensors_pdev->dev, TPACPI_NAME, NULL, tpacpi_hwmon_groups);
+
+ if (IS_ERR(tpacpi_hwmon))
+ pr_err("unable to register hwmon device\n");
+
+ return PTR_ERR_OR_ZERO(tpacpi_hwmon);
+}
static int __init thinkpad_acpi_module_init(void)
{
const struct dmi_system_id *dmi_id;
- int ret, i;
+ int ret;
acpi_object_type obj_type;
tpacpi_lifecycle = TPACPI_LIFE_INIT;
@@ -11922,93 +11965,29 @@ static int __init thinkpad_acpi_module_init(void)
tp_features.quirks = dmi_id->driver_data;
/* Device initialization */
- tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, PLATFORM_DEVID_NONE,
- NULL, 0);
+ tpacpi_pdev = platform_create_bundle(&tpacpi_pdriver, tpacpi_pdriver_probe,
+ NULL, 0, NULL, 0);
if (IS_ERR(tpacpi_pdev)) {
ret = PTR_ERR(tpacpi_pdev);
tpacpi_pdev = NULL;
- pr_err("unable to register platform device\n");
+ pr_err("unable to register platform device/driver bundle\n");
thinkpad_acpi_module_exit();
return ret;
}
- tpacpi_sensors_pdev = platform_device_register_simple(
- TPACPI_HWMON_DRVR_NAME,
- PLATFORM_DEVID_NONE, NULL, 0);
+
+ tpacpi_sensors_pdev = platform_create_bundle(&tpacpi_hwmon_pdriver,
+ tpacpi_hwmon_pdriver_probe,
+ NULL, 0, NULL, 0);
if (IS_ERR(tpacpi_sensors_pdev)) {
ret = PTR_ERR(tpacpi_sensors_pdev);
tpacpi_sensors_pdev = NULL;
- pr_err("unable to register hwmon platform device\n");
+ pr_err("unable to register hwmon platform device/driver bundle\n");
thinkpad_acpi_module_exit();
return ret;
}
- mutex_init(&tpacpi_inputdev_send_mutex);
- tpacpi_inputdev = input_allocate_device();
- if (!tpacpi_inputdev) {
- thinkpad_acpi_module_exit();
- return -ENOMEM;
- } else {
- /* Prepare input device, but don't register */
- tpacpi_inputdev->name = "ThinkPad Extra Buttons";
- tpacpi_inputdev->phys = TPACPI_DRVR_NAME "/input0";
- tpacpi_inputdev->id.bustype = BUS_HOST;
- tpacpi_inputdev->id.vendor = thinkpad_id.vendor;
- tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
- tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
- tpacpi_inputdev->dev.parent = &tpacpi_pdev->dev;
- }
-
- /* Init subdriver dependencies */
- tpacpi_detect_brightness_capabilities();
-
- /* Init subdrivers */
- for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
- ret = ibm_init(&ibms_init[i]);
- if (ret >= 0 && *ibms_init[i].param)
- ret = ibms_init[i].data->write(ibms_init[i].param);
- if (ret < 0) {
- thinkpad_acpi_module_exit();
- return ret;
- }
- }
-
tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
- ret = platform_driver_register(&tpacpi_pdriver);
- if (ret) {
- pr_err("unable to register main platform driver\n");
- thinkpad_acpi_module_exit();
- return ret;
- }
- tp_features.platform_drv_registered = 1;
-
- ret = platform_driver_register(&tpacpi_hwmon_pdriver);
- if (ret) {
- pr_err("unable to register hwmon platform driver\n");
- thinkpad_acpi_module_exit();
- return ret;
- }
- tp_features.sensors_pdrv_registered = 1;
-
- tpacpi_hwmon = hwmon_device_register_with_groups(
- &tpacpi_sensors_pdev->dev, TPACPI_NAME, NULL, tpacpi_hwmon_groups);
- if (IS_ERR(tpacpi_hwmon)) {
- ret = PTR_ERR(tpacpi_hwmon);
- tpacpi_hwmon = NULL;
- pr_err("unable to register hwmon device\n");
- thinkpad_acpi_module_exit();
- return ret;
- }
-
- ret = input_register_device(tpacpi_inputdev);
- if (ret < 0) {
- pr_err("unable to register input device\n");
- thinkpad_acpi_module_exit();
- return ret;
- } else {
- tp_features.input_device_registered = 1;
- }
-
return 0;
}
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 646370bd6b03..e46453750d5f 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -123,24 +123,6 @@ static const void *find_guid_context(struct wmi_block *wblock,
return NULL;
}
-static acpi_status wmi_method_enable(struct wmi_block *wblock, bool enable)
-{
- struct guid_block *block;
- char method[5];
- acpi_status status;
- acpi_handle handle;
-
- block = &wblock->gblock;
- handle = wblock->acpi_device->handle;
-
- snprintf(method, 5, "WE%02X", block->notify_id);
- status = acpi_execute_simple_method(handle, method, enable);
- if (status == AE_NOT_FOUND)
- return AE_OK;
-
- return status;
-}
-
#define WMI_ACPI_METHOD_NAME_SIZE 5
static inline void get_acpi_method_name(const struct wmi_block *wblock,
@@ -184,6 +166,44 @@ static int wmidev_match_guid(struct device *dev, const void *data)
static const struct bus_type wmi_bus_type;
+static const struct device_type wmi_type_event;
+
+static const struct device_type wmi_type_method;
+
+static int wmi_device_enable(struct wmi_device *wdev, bool enable)
+{
+ struct wmi_block *wblock = container_of(wdev, struct wmi_block, dev);
+ char method[WMI_ACPI_METHOD_NAME_SIZE];
+ acpi_handle handle;
+ acpi_status status;
+
+ if (!(wblock->gblock.flags & ACPI_WMI_EXPENSIVE))
+ return 0;
+
+ if (wblock->dev.dev.type == &wmi_type_method)
+ return 0;
+
+ if (wblock->dev.dev.type == &wmi_type_event)
+ snprintf(method, sizeof(method), "WE%02X", wblock->gblock.notify_id);
+ else
+ get_acpi_method_name(wblock, 'C', method);
+
+ /*
+ * Not all WMI devices marked as expensive actually implement the
+ * necessary ACPI method. Ignore this missing ACPI method to match
+ * the behaviour of the Windows driver.
+ */
+ status = acpi_get_handle(wblock->acpi_device->handle, method, &handle);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ status = acpi_execute_simple_method(handle, NULL, enable);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return 0;
+}
+
static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
{
struct device *dev;
@@ -337,10 +357,8 @@ static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
{
struct guid_block *block;
acpi_handle handle;
- acpi_status status, wc_status = AE_ERROR;
struct acpi_object_list input;
union acpi_object wq_params[1];
- char wc_method[WMI_ACPI_METHOD_NAME_SIZE];
char method[WMI_ACPI_METHOD_NAME_SIZE];
if (!out)
@@ -364,40 +382,9 @@ static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
if (instance == 0 && test_bit(WMI_READ_TAKES_NO_ARGS, &wblock->flags))
input.count = 0;
- /*
- * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method first to
- * enable collection.
- */
- if (block->flags & ACPI_WMI_EXPENSIVE) {
- get_acpi_method_name(wblock, 'C', wc_method);
-
- /*
- * Some GUIDs break the specification by declaring themselves
- * expensive, but have no corresponding WCxx method. So we
- * should not fail if this happens.
- */
- wc_status = acpi_execute_simple_method(handle, wc_method, 1);
- }
-
get_acpi_method_name(wblock, 'Q', method);
- status = acpi_evaluate_object(handle, method, &input, out);
-
- /*
- * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if
- * the WQxx method failed - we should disable collection anyway.
- */
- if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) {
- /*
- * Ignore whether this WCxx call succeeds or not since
- * the previously executed WQxx method call might have
- * succeeded, and returning the failing status code
- * of this call would throw away the result of the WQxx
- * call, potentially leaking memory.
- */
- acpi_execute_simple_method(handle, wc_method, 0);
- }
- return status;
+ return acpi_evaluate_object(handle, method, &input, out);
}
/**
@@ -421,9 +408,15 @@ acpi_status wmi_query_block(const char *guid_string, u8 instance,
if (IS_ERR(wdev))
return AE_ERROR;
+ if (wmi_device_enable(wdev, true) < 0)
+ dev_warn(&wdev->dev, "Failed to enable device\n");
+
wblock = container_of(wdev, struct wmi_block, dev);
status = __query_block(wblock, instance, out);
+ if (wmi_device_enable(wdev, false) < 0)
+ dev_warn(&wdev->dev, "Failed to disable device\n");
+
wmi_device_put(wdev);
return status;
@@ -470,7 +463,14 @@ acpi_status wmi_set_block(const char *guid_string, u8 instance, const struct acp
if (IS_ERR(wdev))
return AE_ERROR;
+ if (wmi_device_enable(wdev, true) < 0)
+ dev_warn(&wdev->dev, "Failed to enable device\n");
+
status = wmidev_block_set(wdev, instance, in);
+
+ if (wmi_device_enable(wdev, false) < 0)
+ dev_warn(&wdev->dev, "Failed to disable device\n");
+
wmi_device_put(wdev);
return status;
@@ -551,7 +551,7 @@ acpi_status wmi_install_notify_handler(const char *guid,
wblock->handler = handler;
wblock->handler_data = data;
- if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
+ if (wmi_device_enable(wdev, true) < 0)
dev_warn(&wblock->dev.dev, "Failed to enable device\n");
status = AE_OK;
@@ -588,7 +588,7 @@ acpi_status wmi_remove_notify_handler(const char *guid)
if (!wblock->handler) {
status = AE_NULL_ENTRY;
} else {
- if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
+ if (wmi_device_enable(wdev, false) < 0)
dev_warn(&wblock->dev.dev, "Failed to disable device\n");
wblock->handler = NULL;
@@ -821,11 +821,19 @@ static int wmi_dev_match(struct device *dev, const struct device_driver *driver)
return 0;
}
+static void wmi_dev_disable(void *data)
+{
+ struct device *dev = data;
+
+ if (wmi_device_enable(to_wmi_device(dev), false) < 0)
+ dev_warn(dev, "Failed to disable device\n");
+}
+
static int wmi_dev_probe(struct device *dev)
{
struct wmi_block *wblock = dev_to_wblock(dev);
struct wmi_driver *wdriver = to_wmi_driver(dev->driver);
- int ret = 0;
+ int ret;
/* Some older WMI drivers will break if instantiated multiple times,
* so they are blocked from probing WMI devices with a duplicated GUID.
@@ -844,18 +852,22 @@ static int wmi_dev_probe(struct device *dev)
return -ENODEV;
}
- if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
+ if (wmi_device_enable(to_wmi_device(dev), true) < 0)
dev_warn(dev, "failed to enable device -- probing anyway\n");
+ /*
+ * We have to make sure that all devres-managed resources are released first because
+ * some might still want to access the underlying WMI device.
+ */
+ ret = devm_add_action_or_reset(dev, wmi_dev_disable, dev);
+ if (ret < 0)
+ return ret;
+
if (wdriver->probe) {
ret = wdriver->probe(to_wmi_device(dev),
find_guid_context(wblock, wdriver));
- if (ret) {
- if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
- dev_warn(dev, "Failed to disable device\n");
-
+ if (ret)
return ret;
- }
}
down_write(&wblock->notify_lock);
@@ -876,9 +888,6 @@ static void wmi_dev_remove(struct device *dev)
if (wdriver->remove)
wdriver->remove(to_wmi_device(dev));
-
- if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
- dev_warn(dev, "failed to disable device\n");
}
static void wmi_dev_shutdown(struct device *dev)
@@ -902,7 +911,11 @@ static void wmi_dev_shutdown(struct device *dev)
if (wdriver->shutdown)
wdriver->shutdown(to_wmi_device(dev));
- if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
+ /*
+ * We still need to disable the WMI device here since devres-managed resources
+ * like wmi_dev_disable() will not be release during shutdown.
+ */
+ if (wmi_device_enable(to_wmi_device(dev), false) < 0)
dev_warn(dev, "Failed to disable device\n");
}
}
diff --git a/drivers/platform/x86/x86-android-tablets/Kconfig b/drivers/platform/x86/x86-android-tablets/Kconfig
index a67bddc43007..193da15ee01c 100644
--- a/drivers/platform/x86/x86-android-tablets/Kconfig
+++ b/drivers/platform/x86/x86-android-tablets/Kconfig
@@ -10,6 +10,7 @@ config X86_ANDROID_TABLETS
depends on ACPI && EFI && PCI
select NEW_LEDS
select LEDS_CLASS
+ select POWER_SUPPLY
help
X86 tablets which ship with Android as (part of) the factory image
typically have various problems with their DSDTs. The factory kernels
diff --git a/drivers/pmdomain/Kconfig b/drivers/pmdomain/Kconfig
index 23c64851a5b0..91f04ace35d4 100644
--- a/drivers/pmdomain/Kconfig
+++ b/drivers/pmdomain/Kconfig
@@ -16,6 +16,7 @@ source "drivers/pmdomain/st/Kconfig"
source "drivers/pmdomain/starfive/Kconfig"
source "drivers/pmdomain/sunxi/Kconfig"
source "drivers/pmdomain/tegra/Kconfig"
+source "drivers/pmdomain/thead/Kconfig"
source "drivers/pmdomain/ti/Kconfig"
source "drivers/pmdomain/xilinx/Kconfig"
diff --git a/drivers/pmdomain/Makefile b/drivers/pmdomain/Makefile
index a68ece2f4c68..7030f44a49df 100644
--- a/drivers/pmdomain/Makefile
+++ b/drivers/pmdomain/Makefile
@@ -14,6 +14,7 @@ obj-y += st/
obj-y += starfive/
obj-y += sunxi/
obj-y += tegra/
+obj-y += thead/
obj-y += ti/
obj-y += xilinx/
obj-y += core.o governor.o
diff --git a/drivers/pmdomain/amlogic/meson-secure-pwrc.c b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
index 42ce41a2fe3a..ff76ea36835e 100644
--- a/drivers/pmdomain/amlogic/meson-secure-pwrc.c
+++ b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
@@ -221,7 +221,7 @@ static const struct meson_secure_pwrc_domain_desc t7_pwrc_domains[] = {
SEC_PD(T7_VI_CLK2, 0),
/* ETH is for ethernet online wakeup, and should be always on */
SEC_PD(T7_ETH, GENPD_FLAG_ALWAYS_ON),
- SEC_PD(T7_ISP, 0),
+ TOP_PD(T7_ISP, 0, PWRC_T7_MIPI_ISP_ID),
SEC_PD(T7_MIPI_ISP, 0),
TOP_PD(T7_GDC, 0, PWRC_T7_NIC3_ID),
TOP_PD(T7_DEWARP, 0, PWRC_T7_NIC3_ID),
diff --git a/drivers/pmdomain/arm/scmi_pm_domain.c b/drivers/pmdomain/arm/scmi_pm_domain.c
index 86b531e15b85..2a213c218126 100644
--- a/drivers/pmdomain/arm/scmi_pm_domain.c
+++ b/drivers/pmdomain/arm/scmi_pm_domain.c
@@ -24,8 +24,7 @@ struct scmi_pm_domain {
static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on)
{
- int ret;
- u32 state, ret_state;
+ u32 state;
struct scmi_pm_domain *pd = to_scmi_pd(domain);
if (power_on)
@@ -33,13 +32,7 @@ static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on)
else
state = SCMI_POWER_STATE_GENERIC_OFF;
- ret = power_ops->state_set(pd->ph, pd->domain, state);
- if (!ret)
- ret = power_ops->state_get(pd->ph, pd->domain, &ret_state);
- if (!ret && state != ret_state)
- return -EIO;
-
- return ret;
+ return power_ops->state_set(pd->ph, pd->domain, state);
}
static int scmi_pd_power_on(struct generic_pm_domain *domain)
diff --git a/drivers/pmdomain/bcm/bcm2835-power.c b/drivers/pmdomain/bcm/bcm2835-power.c
index d2f0233cb620..d3cd816979ac 100644
--- a/drivers/pmdomain/bcm/bcm2835-power.c
+++ b/drivers/pmdomain/bcm/bcm2835-power.c
@@ -520,6 +520,7 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
}
dom->base.name = name;
+ dom->base.flags = GENPD_FLAG_ACTIVE_WAKEUP;
dom->base.power_on = bcm2835_power_pd_power_on;
dom->base.power_off = bcm2835_power_pd_power_off;
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 6c94137865c9..9b2f28b34bb5 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -697,6 +697,37 @@ bool dev_pm_genpd_get_hwmode(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode);
+/**
+ * dev_pm_genpd_rpm_always_on() - Control if the PM domain can be powered off.
+ *
+ * @dev: Device for which the PM domain may need to stay on for.
+ * @on: Value to set or unset for the condition.
+ *
+ * For some usecases a consumer driver requires its device to remain power-on
+ * from the PM domain perspective during runtime. This function allows the
+ * behaviour to be dynamically controlled for a device attached to a genpd.
+ *
+ * It is assumed that the users guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Return: Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_rpm_always_on(struct device *dev, bool on)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return -ENODEV;
+
+ genpd_lock(genpd);
+ dev_gpd_data(dev)->rpm_always_on = on;
+ genpd_unlock(genpd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_rpm_always_on);
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -868,6 +899,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
if (!pm_runtime_suspended(pdd->dev) ||
irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
not_suspended++;
+
+ /* The device may need its PM domain to stay powered on. */
+ if (to_gpd_data(pdd)->rpm_always_on)
+ return -EBUSY;
}
if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index 958d34d4821b..105fcaf13a34 100644
--- a/drivers/pmdomain/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
@@ -1361,7 +1361,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
}
if (IS_ENABLED(CONFIG_LOCKDEP) &&
- of_property_read_bool(domain->dev->of_node, "power-domains"))
+ of_property_present(domain->dev->of_node, "power-domains"))
lockdep_set_subclass(&domain->genpd.mlock, 1);
ret = of_genpd_add_provider_simple(domain->dev->of_node,
diff --git a/drivers/pmdomain/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c
index b99326917330..dce1a6d37e80 100644
--- a/drivers/pmdomain/renesas/rcar-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-sysc.c
@@ -434,8 +434,6 @@ static int __init rcar_sysc_pd_init(void)
}
error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
- if (!error)
- fwnode_dev_initialized(of_fwnode_handle(np), true);
out_put:
of_node_put(np);
diff --git a/drivers/pmdomain/rockchip/Kconfig b/drivers/pmdomain/rockchip/Kconfig
index b0d70f1a8439..218d43186e5b 100644
--- a/drivers/pmdomain/rockchip/Kconfig
+++ b/drivers/pmdomain/rockchip/Kconfig
@@ -4,6 +4,8 @@ if ARCH_ROCKCHIP || COMPILE_TEST
config ROCKCHIP_PM_DOMAINS
bool "Rockchip generic power domain"
depends on PM
+ depends on HAVE_ARM_SMCCC_DISCOVERY
+ depends on REGULATOR
select PM_GENERIC_DOMAINS
help
Say y here to enable power domain support.
diff --git a/drivers/pmdomain/rockchip/pm-domains.c b/drivers/pmdomain/rockchip/pm-domains.c
index cb0f93800138..03bcf79a461f 100644
--- a/drivers/pmdomain/rockchip/pm-domains.c
+++ b/drivers/pmdomain/rockchip/pm-domains.c
@@ -5,6 +5,7 @@
* Copyright (c) 2015 ROCKCHIP, Co. Ltd.
*/
+#include <linux/arm-smccc.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/err.h>
@@ -18,8 +19,10 @@
#include <linux/of_clk.h>
#include <linux/clk.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/mfd/syscon.h>
#include <soc/rockchip/pm_domains.h>
+#include <soc/rockchip/rockchip_sip.h>
#include <dt-bindings/power/px30-power.h>
#include <dt-bindings/power/rockchip,rv1126-power.h>
#include <dt-bindings/power/rk3036-power.h>
@@ -44,6 +47,7 @@ struct rockchip_domain_info {
int idle_mask;
int ack_mask;
bool active_wakeup;
+ bool need_regulator;
int pwr_w_mask;
int req_w_mask;
int clk_ungate_mask;
@@ -92,6 +96,8 @@ struct rockchip_pm_domain {
u32 *qos_save_regs[MAX_QOS_REGS_NUM];
int num_clks;
struct clk_bulk_data *clks;
+ struct device_node *node;
+ struct regulator *supply;
};
struct rockchip_pmu {
@@ -129,7 +135,7 @@ struct rockchip_pmu {
.active_wakeup = wakeup, \
}
-#define DOMAIN_M_O_R(_name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, ack, wakeup) \
+#define DOMAIN_M_O_R(_name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, ack, wakeup, regulator) \
{ \
.name = _name, \
.pwr_offset = p_offset, \
@@ -145,6 +151,7 @@ struct rockchip_pmu {
.idle_mask = (idle), \
.ack_mask = (ack), \
.active_wakeup = wakeup, \
+ .need_regulator = regulator, \
}
#define DOMAIN_M_O_R_G(_name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, ack, g_mask, wakeup) \
@@ -303,8 +310,8 @@ void rockchip_pmu_unblock(void)
}
EXPORT_SYMBOL_GPL(rockchip_pmu_unblock);
-#define DOMAIN_RK3588(name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, wakeup) \
- DOMAIN_M_O_R(name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, idle, wakeup)
+#define DOMAIN_RK3588(name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, wakeup, regulator) \
+ DOMAIN_M_O_R(name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, idle, wakeup, regulator)
static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
{
@@ -533,16 +540,18 @@ error:
return ret;
}
-static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
- bool on)
+static int rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
+ bool on)
{
struct rockchip_pmu *pmu = pd->pmu;
struct generic_pm_domain *genpd = &pd->genpd;
u32 pd_pwr_offset = pd->info->pwr_offset;
bool is_on, is_mem_on = false;
+ struct arm_smccc_res res;
+ int ret;
if (pd->info->pwr_mask == 0)
- return;
+ return 0;
if (on && pd->info->mem_status_mask)
is_mem_on = rockchip_pmu_domain_is_mem_on(pd);
@@ -557,16 +566,28 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
wmb();
- if (is_mem_on && rockchip_pmu_domain_mem_reset(pd))
- return;
+ if (is_mem_on) {
+ ret = rockchip_pmu_domain_mem_reset(pd);
+ if (ret)
+ return ret;
+ }
- if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
- is_on == on, 0, 10000)) {
- dev_err(pmu->dev,
- "failed to set domain '%s', val=%d\n",
- genpd->name, is_on);
- return;
+
+ ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
+ is_on == on, 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev, "failed to set domain '%s' %s, val=%d\n",
+ genpd->name, on ? "on" : "off", is_on);
+ return ret;
}
+
+ /* Inform firmware to keep this pd on or off */
+ if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE)
+ arm_smccc_smc(ROCKCHIP_SIP_SUSPEND_MODE, ROCKCHIP_SLEEP_PD_CONFIG,
+ pmu->info->pwr_offset + pd_pwr_offset,
+ pd->info->pwr_mask, on, 0, 0, 0, &res);
+
+ return 0;
}
static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
@@ -574,54 +595,99 @@ static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
struct rockchip_pmu *pmu = pd->pmu;
int ret;
- mutex_lock(&pmu->mutex);
+ guard(mutex)(&pmu->mutex);
- if (rockchip_pmu_domain_is_on(pd) != power_on) {
- ret = clk_bulk_enable(pd->num_clks, pd->clks);
- if (ret < 0) {
- dev_err(pmu->dev, "failed to enable clocks\n");
- mutex_unlock(&pmu->mutex);
- return ret;
- }
+ if (rockchip_pmu_domain_is_on(pd) == power_on)
+ return 0;
- rockchip_pmu_ungate_clk(pd, true);
+ ret = clk_bulk_enable(pd->num_clks, pd->clks);
+ if (ret < 0) {
+ dev_err(pmu->dev, "failed to enable clocks\n");
+ return ret;
+ }
- if (!power_on) {
- rockchip_pmu_save_qos(pd);
+ rockchip_pmu_ungate_clk(pd, true);
- /* if powering down, idle request to NIU first */
- rockchip_pmu_set_idle_request(pd, true);
- }
+ if (!power_on) {
+ rockchip_pmu_save_qos(pd);
- rockchip_do_pmu_set_power_domain(pd, power_on);
+ /* if powering down, idle request to NIU first */
+ ret = rockchip_pmu_set_idle_request(pd, true);
+ if (ret < 0)
+ goto out;
+ }
- if (power_on) {
- /* if powering up, leave idle mode */
- rockchip_pmu_set_idle_request(pd, false);
+ ret = rockchip_do_pmu_set_power_domain(pd, power_on);
+ if (ret < 0)
+ goto out;
- rockchip_pmu_restore_qos(pd);
- }
+ if (power_on) {
+ /* if powering up, leave idle mode */
+ ret = rockchip_pmu_set_idle_request(pd, false);
+ if (ret < 0)
+ goto out;
- rockchip_pmu_ungate_clk(pd, false);
- clk_bulk_disable(pd->num_clks, pd->clks);
+ rockchip_pmu_restore_qos(pd);
}
- mutex_unlock(&pmu->mutex);
- return 0;
+out:
+ rockchip_pmu_ungate_clk(pd, false);
+ clk_bulk_disable(pd->num_clks, pd->clks);
+
+ return ret;
+}
+
+static int rockchip_pd_regulator_disable(struct rockchip_pm_domain *pd)
+{
+ return IS_ERR_OR_NULL(pd->supply) ? 0 : regulator_disable(pd->supply);
+}
+
+static int rockchip_pd_regulator_enable(struct rockchip_pm_domain *pd)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+
+ if (!pd->info->need_regulator)
+ return 0;
+
+ if (IS_ERR_OR_NULL(pd->supply)) {
+ pd->supply = devm_of_regulator_get(pmu->dev, pd->node, "domain");
+
+ if (IS_ERR(pd->supply))
+ return PTR_ERR(pd->supply);
+ }
+
+ return regulator_enable(pd->supply);
}
static int rockchip_pd_power_on(struct generic_pm_domain *domain)
{
struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
+ int ret;
+
+ ret = rockchip_pd_regulator_enable(pd);
+ if (ret) {
+ dev_err(pd->pmu->dev, "Failed to enable supply: %d\n", ret);
+ return ret;
+ }
- return rockchip_pd_power(pd, true);
+ ret = rockchip_pd_power(pd, true);
+ if (ret)
+ rockchip_pd_regulator_disable(pd);
+
+ return ret;
}
static int rockchip_pd_power_off(struct generic_pm_domain *domain)
{
struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
+ int ret;
- return rockchip_pd_power(pd, false);
+ ret = rockchip_pd_power(pd, false);
+ if (ret)
+ return ret;
+
+ rockchip_pd_regulator_disable(pd);
+ return ret;
}
static int rockchip_pd_attach_dev(struct generic_pm_domain *genpd,
@@ -702,6 +768,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->info = pd_info;
pd->pmu = pmu;
+ pd->node = node;
pd->num_clks = of_clk_get_parent_count(node);
if (pd->num_clks > 0) {
@@ -1165,35 +1232,35 @@ static const struct rockchip_domain_info rk3576_pm_domains[] = {
};
static const struct rockchip_domain_info rk3588_pm_domains[] = {
- [RK3588_PD_GPU] = DOMAIN_RK3588("gpu", 0x0, BIT(0), 0, 0x0, 0, BIT(1), 0x0, BIT(0), BIT(0), false),
- [RK3588_PD_NPU] = DOMAIN_RK3588("npu", 0x0, BIT(1), BIT(1), 0x0, 0, 0, 0x0, 0, 0, false),
- [RK3588_PD_VCODEC] = DOMAIN_RK3588("vcodec", 0x0, BIT(2), BIT(2), 0x0, 0, 0, 0x0, 0, 0, false),
- [RK3588_PD_NPUTOP] = DOMAIN_RK3588("nputop", 0x0, BIT(3), 0, 0x0, BIT(11), BIT(2), 0x0, BIT(1), BIT(1), false),
- [RK3588_PD_NPU1] = DOMAIN_RK3588("npu1", 0x0, BIT(4), 0, 0x0, BIT(12), BIT(3), 0x0, BIT(2), BIT(2), false),
- [RK3588_PD_NPU2] = DOMAIN_RK3588("npu2", 0x0, BIT(5), 0, 0x0, BIT(13), BIT(4), 0x0, BIT(3), BIT(3), false),
- [RK3588_PD_VENC0] = DOMAIN_RK3588("venc0", 0x0, BIT(6), 0, 0x0, BIT(14), BIT(5), 0x0, BIT(4), BIT(4), false),
- [RK3588_PD_VENC1] = DOMAIN_RK3588("venc1", 0x0, BIT(7), 0, 0x0, BIT(15), BIT(6), 0x0, BIT(5), BIT(5), false),
- [RK3588_PD_RKVDEC0] = DOMAIN_RK3588("rkvdec0", 0x0, BIT(8), 0, 0x0, BIT(16), BIT(7), 0x0, BIT(6), BIT(6), false),
- [RK3588_PD_RKVDEC1] = DOMAIN_RK3588("rkvdec1", 0x0, BIT(9), 0, 0x0, BIT(17), BIT(8), 0x0, BIT(7), BIT(7), false),
- [RK3588_PD_VDPU] = DOMAIN_RK3588("vdpu", 0x0, BIT(10), 0, 0x0, BIT(18), BIT(9), 0x0, BIT(8), BIT(8), false),
- [RK3588_PD_RGA30] = DOMAIN_RK3588("rga30", 0x0, BIT(11), 0, 0x0, BIT(19), BIT(10), 0x0, 0, 0, false),
- [RK3588_PD_AV1] = DOMAIN_RK3588("av1", 0x0, BIT(12), 0, 0x0, BIT(20), BIT(11), 0x0, BIT(9), BIT(9), false),
- [RK3588_PD_VI] = DOMAIN_RK3588("vi", 0x0, BIT(13), 0, 0x0, BIT(21), BIT(12), 0x0, BIT(10), BIT(10), false),
- [RK3588_PD_FEC] = DOMAIN_RK3588("fec", 0x0, BIT(14), 0, 0x0, BIT(22), BIT(13), 0x0, 0, 0, false),
- [RK3588_PD_ISP1] = DOMAIN_RK3588("isp1", 0x0, BIT(15), 0, 0x0, BIT(23), BIT(14), 0x0, BIT(11), BIT(11), false),
- [RK3588_PD_RGA31] = DOMAIN_RK3588("rga31", 0x4, BIT(0), 0, 0x0, BIT(24), BIT(15), 0x0, BIT(12), BIT(12), false),
- [RK3588_PD_VOP] = DOMAIN_RK3588("vop", 0x4, BIT(1), 0, 0x0, BIT(25), BIT(16), 0x0, BIT(13) | BIT(14), BIT(13) | BIT(14), false),
- [RK3588_PD_VO0] = DOMAIN_RK3588("vo0", 0x4, BIT(2), 0, 0x0, BIT(26), BIT(17), 0x0, BIT(15), BIT(15), false),
- [RK3588_PD_VO1] = DOMAIN_RK3588("vo1", 0x4, BIT(3), 0, 0x0, BIT(27), BIT(18), 0x4, BIT(0), BIT(16), false),
- [RK3588_PD_AUDIO] = DOMAIN_RK3588("audio", 0x4, BIT(4), 0, 0x0, BIT(28), BIT(19), 0x4, BIT(1), BIT(17), false),
- [RK3588_PD_PHP] = DOMAIN_RK3588("php", 0x4, BIT(5), 0, 0x0, BIT(29), BIT(20), 0x4, BIT(5), BIT(21), false),
- [RK3588_PD_GMAC] = DOMAIN_RK3588("gmac", 0x4, BIT(6), 0, 0x0, BIT(30), BIT(21), 0x0, 0, 0, false),
- [RK3588_PD_PCIE] = DOMAIN_RK3588("pcie", 0x4, BIT(7), 0, 0x0, BIT(31), BIT(22), 0x0, 0, 0, true),
- [RK3588_PD_NVM] = DOMAIN_RK3588("nvm", 0x4, BIT(8), BIT(24), 0x4, 0, 0, 0x4, BIT(2), BIT(18), false),
- [RK3588_PD_NVM0] = DOMAIN_RK3588("nvm0", 0x4, BIT(9), 0, 0x4, BIT(1), BIT(23), 0x0, 0, 0, false),
- [RK3588_PD_SDIO] = DOMAIN_RK3588("sdio", 0x4, BIT(10), 0, 0x4, BIT(2), BIT(24), 0x4, BIT(3), BIT(19), false),
- [RK3588_PD_USB] = DOMAIN_RK3588("usb", 0x4, BIT(11), 0, 0x4, BIT(3), BIT(25), 0x4, BIT(4), BIT(20), true),
- [RK3588_PD_SDMMC] = DOMAIN_RK3588("sdmmc", 0x4, BIT(13), 0, 0x4, BIT(5), BIT(26), 0x0, 0, 0, false),
+ [RK3588_PD_GPU] = DOMAIN_RK3588("gpu", 0x0, BIT(0), 0, 0x0, 0, BIT(1), 0x0, BIT(0), BIT(0), false, true),
+ [RK3588_PD_NPU] = DOMAIN_RK3588("npu", 0x0, BIT(1), BIT(1), 0x0, 0, 0, 0x0, 0, 0, false, true),
+ [RK3588_PD_VCODEC] = DOMAIN_RK3588("vcodec", 0x0, BIT(2), BIT(2), 0x0, 0, 0, 0x0, 0, 0, false, false),
+ [RK3588_PD_NPUTOP] = DOMAIN_RK3588("nputop", 0x0, BIT(3), 0, 0x0, BIT(11), BIT(2), 0x0, BIT(1), BIT(1), false, false),
+ [RK3588_PD_NPU1] = DOMAIN_RK3588("npu1", 0x0, BIT(4), 0, 0x0, BIT(12), BIT(3), 0x0, BIT(2), BIT(2), false, false),
+ [RK3588_PD_NPU2] = DOMAIN_RK3588("npu2", 0x0, BIT(5), 0, 0x0, BIT(13), BIT(4), 0x0, BIT(3), BIT(3), false, false),
+ [RK3588_PD_VENC0] = DOMAIN_RK3588("venc0", 0x0, BIT(6), 0, 0x0, BIT(14), BIT(5), 0x0, BIT(4), BIT(4), false, false),
+ [RK3588_PD_VENC1] = DOMAIN_RK3588("venc1", 0x0, BIT(7), 0, 0x0, BIT(15), BIT(6), 0x0, BIT(5), BIT(5), false, false),
+ [RK3588_PD_RKVDEC0] = DOMAIN_RK3588("rkvdec0", 0x0, BIT(8), 0, 0x0, BIT(16), BIT(7), 0x0, BIT(6), BIT(6), false, false),
+ [RK3588_PD_RKVDEC1] = DOMAIN_RK3588("rkvdec1", 0x0, BIT(9), 0, 0x0, BIT(17), BIT(8), 0x0, BIT(7), BIT(7), false, false),
+ [RK3588_PD_VDPU] = DOMAIN_RK3588("vdpu", 0x0, BIT(10), 0, 0x0, BIT(18), BIT(9), 0x0, BIT(8), BIT(8), false, false),
+ [RK3588_PD_RGA30] = DOMAIN_RK3588("rga30", 0x0, BIT(11), 0, 0x0, BIT(19), BIT(10), 0x0, 0, 0, false, false),
+ [RK3588_PD_AV1] = DOMAIN_RK3588("av1", 0x0, BIT(12), 0, 0x0, BIT(20), BIT(11), 0x0, BIT(9), BIT(9), false, false),
+ [RK3588_PD_VI] = DOMAIN_RK3588("vi", 0x0, BIT(13), 0, 0x0, BIT(21), BIT(12), 0x0, BIT(10), BIT(10), false, false),
+ [RK3588_PD_FEC] = DOMAIN_RK3588("fec", 0x0, BIT(14), 0, 0x0, BIT(22), BIT(13), 0x0, 0, 0, false, false),
+ [RK3588_PD_ISP1] = DOMAIN_RK3588("isp1", 0x0, BIT(15), 0, 0x0, BIT(23), BIT(14), 0x0, BIT(11), BIT(11), false, false),
+ [RK3588_PD_RGA31] = DOMAIN_RK3588("rga31", 0x4, BIT(0), 0, 0x0, BIT(24), BIT(15), 0x0, BIT(12), BIT(12), false, false),
+ [RK3588_PD_VOP] = DOMAIN_RK3588("vop", 0x4, BIT(1), 0, 0x0, BIT(25), BIT(16), 0x0, BIT(13) | BIT(14), BIT(13) | BIT(14), false, false),
+ [RK3588_PD_VO0] = DOMAIN_RK3588("vo0", 0x4, BIT(2), 0, 0x0, BIT(26), BIT(17), 0x0, BIT(15), BIT(15), false, false),
+ [RK3588_PD_VO1] = DOMAIN_RK3588("vo1", 0x4, BIT(3), 0, 0x0, BIT(27), BIT(18), 0x4, BIT(0), BIT(16), false, false),
+ [RK3588_PD_AUDIO] = DOMAIN_RK3588("audio", 0x4, BIT(4), 0, 0x0, BIT(28), BIT(19), 0x4, BIT(1), BIT(17), false, false),
+ [RK3588_PD_PHP] = DOMAIN_RK3588("php", 0x4, BIT(5), 0, 0x0, BIT(29), BIT(20), 0x4, BIT(5), BIT(21), false, false),
+ [RK3588_PD_GMAC] = DOMAIN_RK3588("gmac", 0x4, BIT(6), 0, 0x0, BIT(30), BIT(21), 0x0, 0, 0, false, false),
+ [RK3588_PD_PCIE] = DOMAIN_RK3588("pcie", 0x4, BIT(7), 0, 0x0, BIT(31), BIT(22), 0x0, 0, 0, true, false),
+ [RK3588_PD_NVM] = DOMAIN_RK3588("nvm", 0x4, BIT(8), BIT(24), 0x4, 0, 0, 0x4, BIT(2), BIT(18), false, false),
+ [RK3588_PD_NVM0] = DOMAIN_RK3588("nvm0", 0x4, BIT(9), 0, 0x4, BIT(1), BIT(23), 0x0, 0, 0, false, false),
+ [RK3588_PD_SDIO] = DOMAIN_RK3588("sdio", 0x4, BIT(10), 0, 0x4, BIT(2), BIT(24), 0x4, BIT(3), BIT(19), false, false),
+ [RK3588_PD_USB] = DOMAIN_RK3588("usb", 0x4, BIT(11), 0, 0x4, BIT(3), BIT(25), 0x4, BIT(4), BIT(20), true, false),
+ [RK3588_PD_SDMMC] = DOMAIN_RK3588("sdmmc", 0x4, BIT(13), 0, 0x4, BIT(5), BIT(26), 0x0, 0, 0, false, false),
};
static const struct rockchip_pmu_info px30_pmu = {
diff --git a/drivers/pmdomain/sunxi/sun20i-ppu.c b/drivers/pmdomain/sunxi/sun20i-ppu.c
index 8700f9dd5f75..9f002748d224 100644
--- a/drivers/pmdomain/sunxi/sun20i-ppu.c
+++ b/drivers/pmdomain/sunxi/sun20i-ppu.c
@@ -182,11 +182,26 @@ static const struct sun20i_ppu_desc sun20i_d1_ppu_desc = {
.num_domains = ARRAY_SIZE(sun20i_d1_ppu_pd_names),
};
+static const char *const sun8i_v853_ppu_pd_names[] = {
+ "RISCV",
+ "NPU",
+ "VE",
+};
+
+static const struct sun20i_ppu_desc sun8i_v853_ppu_desc = {
+ .names = sun8i_v853_ppu_pd_names,
+ .num_domains = ARRAY_SIZE(sun8i_v853_ppu_pd_names),
+};
+
static const struct of_device_id sun20i_ppu_of_match[] = {
{
.compatible = "allwinner,sun20i-d1-ppu",
.data = &sun20i_d1_ppu_desc,
},
+ {
+ .compatible = "allwinner,sun8i-v853-ppu",
+ .data = &sun8i_v853_ppu_desc,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun20i_ppu_of_match);
diff --git a/drivers/pmdomain/thead/Kconfig b/drivers/pmdomain/thead/Kconfig
new file mode 100644
index 000000000000..7d52f8374b07
--- /dev/null
+++ b/drivers/pmdomain/thead/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config TH1520_PM_DOMAINS
+ tristate "Support TH1520 Power Domains"
+ depends on TH1520_AON_PROTOCOL
+ select REGMAP_MMIO
+ help
+ This driver enables power domain management for the T-HEAD
+ TH-1520 SoC. On this SoC there are number of power domains,
+ which can be managed independently. For example GPU, NPU,
+ and DPU reside in their own power domains which can be
+ turned on/off.
diff --git a/drivers/pmdomain/thead/Makefile b/drivers/pmdomain/thead/Makefile
new file mode 100644
index 000000000000..adfdf5479c68
--- /dev/null
+++ b/drivers/pmdomain/thead/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_TH1520_PM_DOMAINS) += th1520-pm-domains.o
diff --git a/drivers/pmdomain/thead/th1520-pm-domains.c b/drivers/pmdomain/thead/th1520-pm-domains.c
new file mode 100644
index 000000000000..f702e20306f4
--- /dev/null
+++ b/drivers/pmdomain/thead/th1520-pm-domains.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Alibaba Group Holding Limited.
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Michal Wilczynski <m.wilczynski@samsung.com>
+ */
+
+#include <linux/firmware/thead/thead,th1520-aon.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+
+#include <dt-bindings/power/thead,th1520-power.h>
+
+struct th1520_power_domain {
+ struct th1520_aon_chan *aon_chan;
+ struct generic_pm_domain genpd;
+ u32 rsrc;
+};
+
+struct th1520_power_info {
+ const char *name;
+ u32 rsrc;
+ bool disabled;
+};
+
+/*
+ * The AUDIO power domain is marked as disabled to prevent the driver from
+ * managing its power state. Direct AON firmware calls to control this power
+ * island trigger a firmware bug causing system instability. Until this
+ * firmware issue is resolved, the AUDIO power domain must remain disabled
+ * to avoid crashes.
+ */
+static const struct th1520_power_info th1520_pd_ranges[] = {
+ [TH1520_AUDIO_PD] = {"audio", TH1520_AON_AUDIO_PD, true },
+ [TH1520_VDEC_PD] = { "vdec", TH1520_AON_VDEC_PD, false },
+ [TH1520_NPU_PD] = { "npu", TH1520_AON_NPU_PD, false },
+ [TH1520_VENC_PD] = { "venc", TH1520_AON_VENC_PD, false },
+ [TH1520_GPU_PD] = { "gpu", TH1520_AON_GPU_PD, false },
+ [TH1520_DSP0_PD] = { "dsp0", TH1520_AON_DSP0_PD, false },
+ [TH1520_DSP1_PD] = { "dsp1", TH1520_AON_DSP1_PD, false }
+};
+
+static inline struct th1520_power_domain *
+to_th1520_power_domain(struct generic_pm_domain *genpd)
+{
+ return container_of(genpd, struct th1520_power_domain, genpd);
+}
+
+static int th1520_pd_power_on(struct generic_pm_domain *domain)
+{
+ struct th1520_power_domain *pd = to_th1520_power_domain(domain);
+
+ return th1520_aon_power_update(pd->aon_chan, pd->rsrc, true);
+}
+
+static int th1520_pd_power_off(struct generic_pm_domain *domain)
+{
+ struct th1520_power_domain *pd = to_th1520_power_domain(domain);
+
+ return th1520_aon_power_update(pd->aon_chan, pd->rsrc, false);
+}
+
+static struct generic_pm_domain *th1520_pd_xlate(const struct of_phandle_args *spec,
+ void *data)
+{
+ struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
+ struct genpd_onecell_data *pd_data = data;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(th1520_pd_ranges); i++) {
+ struct th1520_power_domain *pd;
+
+ if (th1520_pd_ranges[i].disabled)
+ continue;
+
+ pd = to_th1520_power_domain(pd_data->domains[i]);
+ if (pd->rsrc == spec->args[0]) {
+ domain = &pd->genpd;
+ break;
+ }
+ }
+
+ return domain;
+}
+
+static struct th1520_power_domain *
+th1520_add_pm_domain(struct device *dev, const struct th1520_power_info *pi)
+{
+ struct th1520_power_domain *pd;
+ int ret;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->rsrc = pi->rsrc;
+ pd->genpd.power_on = th1520_pd_power_on;
+ pd->genpd.power_off = th1520_pd_power_off;
+ pd->genpd.name = pi->name;
+
+ ret = pm_genpd_init(&pd->genpd, NULL, true);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return pd;
+}
+
+static void th1520_pd_init_all_off(struct generic_pm_domain **domains,
+ struct device *dev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(th1520_pd_ranges); i++) {
+ struct th1520_power_domain *pd;
+
+ if (th1520_pd_ranges[i].disabled)
+ continue;
+
+ pd = to_th1520_power_domain(domains[i]);
+
+ ret = th1520_aon_power_update(pd->aon_chan, pd->rsrc, false);
+ if (ret)
+ dev_err(dev,
+ "Failed to initially power down power domain %s\n",
+ pd->genpd.name);
+ }
+}
+
+static int th1520_pd_probe(struct platform_device *pdev)
+{
+ struct generic_pm_domain **domains;
+ struct genpd_onecell_data *pd_data;
+ struct th1520_aon_chan *aon_chan;
+ struct device *dev = &pdev->dev;
+ int i, ret;
+
+ aon_chan = th1520_aon_init(dev);
+ if (IS_ERR(aon_chan))
+ return dev_err_probe(dev, PTR_ERR(aon_chan),
+ "Failed to get AON channel\n");
+
+ domains = devm_kcalloc(dev, ARRAY_SIZE(th1520_pd_ranges),
+ sizeof(*domains), GFP_KERNEL);
+ if (!domains) {
+ ret = -ENOMEM;
+ goto err_clean_aon;
+ }
+
+ pd_data = devm_kzalloc(dev, sizeof(*pd_data), GFP_KERNEL);
+ if (!pd_data) {
+ ret = -ENOMEM;
+ goto err_clean_aon;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(th1520_pd_ranges); i++) {
+ struct th1520_power_domain *pd;
+
+ if (th1520_pd_ranges[i].disabled)
+ continue;
+
+ pd = th1520_add_pm_domain(dev, &th1520_pd_ranges[i]);
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
+ goto err_clean_genpd;
+ }
+
+ pd->aon_chan = aon_chan;
+ domains[i] = &pd->genpd;
+ dev_dbg(dev, "added power domain %s\n", pd->genpd.name);
+ }
+
+ pd_data->domains = domains;
+ pd_data->num_domains = ARRAY_SIZE(th1520_pd_ranges);
+ pd_data->xlate = th1520_pd_xlate;
+
+ /*
+ * Initialize all power domains to off to ensure they start in a
+ * low-power state. This allows device drivers to manage power
+ * domains by turning them on or off as needed.
+ */
+ th1520_pd_init_all_off(domains, dev);
+
+ ret = of_genpd_add_provider_onecell(dev->of_node, pd_data);
+ if (ret)
+ goto err_clean_genpd;
+
+ return 0;
+
+err_clean_genpd:
+ for (i--; i >= 0; i--)
+ pm_genpd_remove(domains[i]);
+err_clean_aon:
+ th1520_aon_deinit(aon_chan);
+
+ return ret;
+}
+
+static const struct of_device_id th1520_pd_match[] = {
+ { .compatible = "thead,th1520-aon" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, th1520_pd_match);
+
+static struct platform_driver th1520_pd_driver = {
+ .driver = {
+ .name = "th1520-pd",
+ .of_match_table = th1520_pd_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = th1520_pd_probe,
+};
+module_platform_driver(th1520_pd_driver);
+
+MODULE_AUTHOR("Michal Wilczynski <m.wilczynski@samsung.com>");
+MODULE_DESCRIPTION("T-HEAD TH1520 SoC power domain controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pmdomain/ti/omap_prm.c b/drivers/pmdomain/ti/omap_prm.c
index b8ceb3c2b81c..79d165331d8c 100644
--- a/drivers/pmdomain/ti/omap_prm.c
+++ b/drivers/pmdomain/ti/omap_prm.c
@@ -613,7 +613,7 @@ static int omap_prm_domain_attach_clock(struct device *dev,
if (!of_device_is_compatible(np, "simple-pm-bus"))
return 0;
- if (!of_property_read_bool(np, "clocks"))
+ if (!of_property_present(np, "clocks"))
return 0;
error = pm_clk_create(dev);
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 4e80273dfb1e..b342570d0236 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -9,7 +9,6 @@ extern const struct attribute_group *pnp_dev_groups[];
extern const struct bus_type pnp_bus_type;
int pnp_register_protocol(struct pnp_protocol *protocol);
-void pnp_unregister_protocol(struct pnp_protocol *protocol);
#define PNP_EISA_ID_MASK 0x7fffffff
void pnp_eisa_id_to_string(u32 id, char *str);
@@ -21,9 +20,7 @@ int pnp_add_device(struct pnp_dev *dev);
struct pnp_id *pnp_add_id(struct pnp_dev *dev, const char *id);
int pnp_add_card(struct pnp_card *card);
-void pnp_remove_card(struct pnp_card *card);
int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev);
-void pnp_remove_card_device(struct pnp_dev *dev);
struct pnp_port {
resource_size_t min; /* min base number */
@@ -138,7 +135,6 @@ void pnp_init_resources(struct pnp_dev *dev);
void pnp_fixup_device(struct pnp_dev *dev);
void pnp_free_options(struct pnp_dev *dev);
int __pnp_add_device(struct pnp_dev *dev);
-void __pnp_remove_device(struct pnp_dev *dev);
int pnp_check_port(struct pnp_dev *dev, struct resource *res);
int pnp_check_mem(struct pnp_dev *dev, struct resource *res);
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index 9610a9f08ff4..c7596dc24fbd 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -270,25 +270,6 @@ int pnp_add_card(struct pnp_card *card)
}
/**
- * pnp_remove_card - removes a PnP card from the PnP Layer
- * @card: pointer to the card to remove
- */
-void pnp_remove_card(struct pnp_card *card)
-{
- struct list_head *pos, *temp;
-
- device_unregister(&card->dev);
- mutex_lock(&pnp_lock);
- list_del(&card->global_list);
- list_del(&card->protocol_list);
- mutex_unlock(&pnp_lock);
- list_for_each_safe(pos, temp, &card->devices) {
- struct pnp_dev *dev = card_to_pnp_dev(pos);
- pnp_remove_card_device(dev);
- }
-}
-
-/**
* pnp_add_card_device - adds a device to the specified card
* @card: pointer to the card to add to
* @dev: pointer to the device to add
@@ -307,19 +288,6 @@ int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev)
}
/**
- * pnp_remove_card_device- removes a device from the specified card
- * @dev: pointer to the device to remove
- */
-void pnp_remove_card_device(struct pnp_dev *dev)
-{
- mutex_lock(&pnp_lock);
- dev->card = NULL;
- list_del(&dev->card_list);
- mutex_unlock(&pnp_lock);
- __pnp_remove_device(dev);
-}
-
-/**
* pnp_request_card_device - Searches for a PnP device under the specified card
* @clink: pointer to the card link, cannot be NULL
* @id: pointer to a PnP ID structure that explains the rules for finding the device
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 6a60c5d83383..ac48db6dcfe3 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -78,16 +78,6 @@ int pnp_register_protocol(struct pnp_protocol *protocol)
return ret;
}
-/**
- * pnp_unregister_protocol - removes a pnp protocol from the pnp layer
- * @protocol: pointer to the corresponding pnp_protocol structure
- */
-void pnp_unregister_protocol(struct pnp_protocol *protocol)
-{
- pnp_remove_protocol(protocol);
- device_unregister(&protocol->dev);
-}
-
static void pnp_free_ids(struct pnp_dev *dev)
{
struct pnp_id *id;
@@ -220,12 +210,6 @@ int pnp_add_device(struct pnp_dev *dev)
return 0;
}
-void __pnp_remove_device(struct pnp_dev *dev)
-{
- pnp_delist_device(dev);
- device_unregister(&dev->dev);
-}
-
static int __init pnp_init(void)
{
return bus_register(&pnp_bus_type);
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index c2801bd6384d..e9fe08ee3812 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -327,6 +327,7 @@ static const struct of_device_id at91_pmc_ids[] = {
{ .compatible = "microchip,sam9x60-pmc" },
{ .compatible = "microchip,sama7g5-pmc" },
{ .compatible = "microchip,sam9x7-pmc" },
+ { .compatible = "microchip,sama7d65-pmc" },
{ /* Sentinel. */ }
};
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index 1a6fc8d38e20..90c664d344d0 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -162,11 +162,11 @@ static void ltc2952_poweroff_default(struct ltc2952_poweroff *data)
data->wde_interval = 300L * NSEC_PER_MSEC;
data->trigger_delay = ktime_set(2, 500L * NSEC_PER_MSEC);
- hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->timer_trigger.function = ltc2952_poweroff_timer_trigger;
+ hrtimer_setup(&data->timer_trigger, ltc2952_poweroff_timer_trigger, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&data->timer_wde, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->timer_wde.function = ltc2952_poweroff_timer_wde;
+ hrtimer_setup(&data->timer_wde, ltc2952_poweroff_timer_wde, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static int ltc2952_poweroff_init(struct platform_device *pdev)
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 7b18358f194a..8dbd39afa43c 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -449,12 +449,6 @@ config CHARGER_88PM860X
help
Say Y here to enable charger for Marvell 88PM860x chip.
-config CHARGER_PCF50633
- tristate "NXP PCF50633 MBC"
- depends on MFD_PCF50633
- help
- Say Y to include support for NXP PCF50633 Main Battery Charger.
-
config BATTERY_RX51
tristate "Nokia RX-51 (N900) battery driver"
depends on TWL4030_MADC
@@ -583,6 +577,12 @@ config CHARGER_MAX77693
help
Say Y to enable support for the Maxim MAX77693 battery charger.
+config CHARGER_MAX77705
+ tristate "Maxim MAX77705 battery charger driver"
+ depends on MFD_MAX77705
+ help
+ Say Y to enable support for the Maxim MAX77705 battery charger.
+
config CHARGER_MAX77976
tristate "Maxim MAX77976 battery charger driver"
depends on I2C
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index b55cc48a4c86..61677be328b0 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -62,7 +62,6 @@ obj-$(CONFIG_CHARGER_RT9467) += rt9467-charger.o
obj-$(CONFIG_CHARGER_RT9471) += rt9471.o
obj-$(CONFIG_BATTERY_TWL4030_MADC) += twl4030_madc_battery.o
obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
-obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
obj-$(CONFIG_BATTERY_RX51) += rx51_battery.o
obj-$(CONFIG_AB8500_BM) += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o ab8500_chargalg.o
obj-$(CONFIG_CHARGER_CPCAP) += cpcap-charger.o
@@ -80,6 +79,7 @@ obj-$(CONFIG_CHARGER_MAX14577) += max14577_charger.o
obj-$(CONFIG_CHARGER_DETECTOR_MAX14656) += max14656_charger_detector.o
obj-$(CONFIG_CHARGER_MAX77650) += max77650-charger.o
obj-$(CONFIG_CHARGER_MAX77693) += max77693_charger.o
+obj-$(CONFIG_CHARGER_MAX77705) += max77705_charger.o
obj-$(CONFIG_CHARGER_MAX77976) += max77976_charger.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index 7a8d1feb8e90..dc6c8b0dd1cf 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -1787,13 +1787,12 @@ static int ab8500_chargalg_probe(struct platform_device *pdev)
psy_cfg.drv_data = di;
/* Initilialize safety timer */
- hrtimer_init(&di->safety_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- di->safety_timer.function = ab8500_chargalg_safety_timer_expired;
+ hrtimer_setup(&di->safety_timer, ab8500_chargalg_safety_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Initilialize maintenance timer */
- hrtimer_init(&di->maintenance_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- di->maintenance_timer.function =
- ab8500_chargalg_maintenance_timer_expired;
+ hrtimer_setup(&di->maintenance_timer, ab8500_chargalg_maintenance_timer_expired,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
/* Init work for chargalg */
INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 1042d37424f5..5f4537766e5b 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -3494,11 +3494,11 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->invalid_charger_detect_state = 0;
/* AC and USB supply config */
- ac_psy_cfg.of_node = np;
+ ac_psy_cfg.fwnode = dev_fwnode(dev);
ac_psy_cfg.supplied_to = supply_interface;
ac_psy_cfg.num_supplicants = ARRAY_SIZE(supply_interface);
ac_psy_cfg.drv_data = &di->ac_chg;
- usb_psy_cfg.of_node = np;
+ usb_psy_cfg.fwnode = dev_fwnode(dev);
usb_psy_cfg.supplied_to = supply_interface;
usb_psy_cfg.num_supplicants = ARRAY_SIZE(supply_interface);
usb_psy_cfg.drv_data = &di->usb_chg;
diff --git a/drivers/power/supply/acer_a500_battery.c b/drivers/power/supply/acer_a500_battery.c
index 39d85b11a13c..daf01dc8025b 100644
--- a/drivers/power/supply/acer_a500_battery.c
+++ b/drivers/power/supply/acer_a500_battery.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/property.h>
enum {
REG_CAPACITY,
@@ -231,7 +232,7 @@ static int a500_battery_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bat);
- psy_cfg.of_node = pdev->dev.parent->of_node;
+ psy_cfg.fwnode = dev_fwnode(pdev->dev.parent);
psy_cfg.drv_data = bat;
psy_cfg.no_wakeup_source = true;
diff --git a/drivers/power/supply/act8945a_charger.c b/drivers/power/supply/act8945a_charger.c
index b2b82f97a471..3901a02f326a 100644
--- a/drivers/power/supply/act8945a_charger.c
+++ b/drivers/power/supply/act8945a_charger.c
@@ -614,7 +614,7 @@ static int act8945a_charger_probe(struct platform_device *pdev)
if (ret)
return -EINVAL;
- psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&pdev->dev);
psy_cfg.drv_data = charger;
charger->psy = devm_power_supply_register(&pdev->dev,
diff --git a/drivers/power/supply/axp20x_ac_power.c b/drivers/power/supply/axp20x_ac_power.c
index e5733cb9e19e..5f6ea416fa30 100644
--- a/drivers/power/supply/axp20x_ac_power.c
+++ b/drivers/power/supply/axp20x_ac_power.c
@@ -364,7 +364,7 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, power);
- psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&pdev->dev);
psy_cfg.drv_data = power;
power->supply = devm_power_supply_register(&pdev->dev,
diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
index 3c3158f31a48..50ca8e110085 100644
--- a/drivers/power/supply/axp20x_battery.c
+++ b/drivers/power/supply/axp20x_battery.c
@@ -89,6 +89,8 @@
#define AXP717_BAT_CC_MIN_UA 0
#define AXP717_BAT_CC_MAX_UA 3008000
+#define AXP717_TS_PIN_DISABLE BIT(4)
+
struct axp20x_batt_ps;
struct axp_data {
@@ -117,6 +119,7 @@ struct axp20x_batt_ps {
/* Maximum constant charge current */
unsigned int max_ccc;
const struct axp_data *data;
+ bool ts_disable;
};
static int axp20x_battery_get_max_voltage(struct axp20x_batt_ps *axp20x_batt,
@@ -984,6 +987,24 @@ static void axp717_set_battery_info(struct platform_device *pdev,
int ccc = info->constant_charge_current_max_ua;
int val;
+ axp_batt->ts_disable = (device_property_read_bool(axp_batt->dev,
+ "x-powers,no-thermistor"));
+
+ /*
+ * Under rare conditions an incorrectly programmed efuse for
+ * the temp sensor on the PMIC may trigger a fault condition.
+ * Allow users to hard-code if the ts pin is not used to work
+ * around this problem. Note that this requires the battery
+ * be correctly defined in the device tree with a monitored
+ * battery node.
+ */
+ if (axp_batt->ts_disable) {
+ regmap_update_bits(axp_batt->regmap,
+ AXP717_TS_PIN_CFG,
+ AXP717_TS_PIN_DISABLE,
+ AXP717_TS_PIN_DISABLE);
+ }
+
if (vmin > 0 && axp717_set_voltage_min_design(axp_batt, vmin))
dev_err(&pdev->dev,
"couldn't set voltage_min_design\n");
@@ -1090,7 +1111,7 @@ static int axp20x_power_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, axp20x_batt);
psy_cfg.drv_data = axp20x_batt;
- psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&pdev->dev);
axp20x_batt->data = (struct axp_data *)of_device_get_match_data(dev);
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index 9722912268fe..e75d1e377ac1 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -492,7 +492,7 @@ static int axp717_usb_power_set_input_current_limit(struct axp20x_usb_power *pow
if (power->max_input_cur && (intval > power->max_input_cur)) {
dev_warn(power->dev,
- "reqested current %d clamped to max current %d\n",
+ "requested current %d clamped to max current %d\n",
intval, power->max_input_cur);
intval = power->max_input_cur;
}
@@ -1011,7 +1011,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
return ret;
}
- psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&pdev->dev);
psy_cfg.drv_data = power;
power->supply = devm_power_supply_register(&pdev->dev,
diff --git a/drivers/power/supply/bd99954-charger.c b/drivers/power/supply/bd99954-charger.c
index 54bf88262510..d03a70cf8406 100644
--- a/drivers/power/supply/bd99954-charger.c
+++ b/drivers/power/supply/bd99954-charger.c
@@ -156,7 +156,7 @@ static const struct regmap_config bd9995x_regmap_config = {
.reg_stride = 1,
.max_register = 3 * 0x100,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.ranges = regmap_range_cfg,
.num_ranges = ARRAY_SIZE(regmap_range_cfg),
@@ -982,7 +982,7 @@ static int bd9995x_probe(struct i2c_client *client)
bd->client = client;
bd->dev = dev;
psy_cfg.drv_data = bd;
- psy_cfg.of_node = dev->of_node;
+ psy_cfg.fwnode = dev_fwnode(dev);
mutex_init(&bd->lock);
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 22f6a3b71632..9e3b9181ee76 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -1497,7 +1497,7 @@ static int bq2415x_power_supply_init(struct bq2415x_device *bq)
char revstr[8];
struct power_supply_config psy_cfg = {
.drv_data = bq,
- .of_node = bq->dev->of_node,
+ .fwnode = dev_fwnode(bq->dev),
.attr_grp = bq2415x_sysfs_groups,
};
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index b4ba01744368..f0d97ab45bd8 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -2117,7 +2117,7 @@ static int bq24190_probe(struct i2c_client *client)
#endif
charger_cfg.drv_data = bdi;
- charger_cfg.of_node = dev->of_node;
+ charger_cfg.fwnode = dev_fwnode(dev);
charger_cfg.supplied_to = bq24190_charger_supplied_to;
charger_cfg.num_supplicants = ARRAY_SIZE(bq24190_charger_supplied_to);
bdi->charger = power_supply_register(dev, &bq24190_charger_desc,
diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c
index 1416586f2459..766eecb35694 100644
--- a/drivers/power/supply/bq24257_charger.c
+++ b/drivers/power/supply/bq24257_charger.c
@@ -113,7 +113,7 @@ static const struct regmap_config bq24257_regmap_config = {
.val_bits = 8,
.max_register = BQ24257_REG_7,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = bq24257_is_volatile_reg,
};
diff --git a/drivers/power/supply/bq24735-charger.c b/drivers/power/supply/bq24735-charger.c
index 73a7fc867b03..637e0da65f87 100644
--- a/drivers/power/supply/bq24735-charger.c
+++ b/drivers/power/supply/bq24735-charger.c
@@ -402,7 +402,7 @@ static int bq24735_charger_probe(struct i2c_client *client)
psy_cfg.supplied_to = charger->pdata->supplied_to;
psy_cfg.num_supplicants = charger->pdata->num_supplicants;
- psy_cfg.of_node = client->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&client->dev);
psy_cfg.drv_data = charger;
i2c_set_clientdata(client, charger);
diff --git a/drivers/power/supply/bq2515x_charger.c b/drivers/power/supply/bq2515x_charger.c
index a3424f67f2b1..437bff5bc420 100644
--- a/drivers/power/supply/bq2515x_charger.c
+++ b/drivers/power/supply/bq2515x_charger.c
@@ -1060,7 +1060,7 @@ static const struct regmap_config bq25150_regmap_config = {
.max_register = BQ2515X_DEVICE_ID,
.reg_defaults = bq25150_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(bq25150_reg_defaults),
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = bq2515x_volatile_register,
};
@@ -1071,7 +1071,7 @@ static const struct regmap_config bq25155_regmap_config = {
.max_register = BQ2515X_DEVICE_ID,
.reg_defaults = bq25155_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(bq25155_reg_defaults),
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = bq2515x_volatile_register,
};
@@ -1102,7 +1102,7 @@ static int bq2515x_probe(struct i2c_client *client)
i2c_set_clientdata(client, bq2515x);
charger_cfg.drv_data = bq2515x;
- charger_cfg.of_node = dev->of_node;
+ charger_cfg.fwnode = dev_fwnode(dev);
ret = bq2515x_read_properties(bq2515x);
if (ret) {
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
index 5514d1896bb8..9f9b6019f8e1 100644
--- a/drivers/power/supply/bq256xx_charger.c
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -1657,7 +1657,7 @@ static int bq256xx_parse_dt(struct bq256xx_device *bq,
int ret = 0;
psy_cfg->drv_data = bq;
- psy_cfg->of_node = dev->of_node;
+ psy_cfg->fwnode = dev_fwnode(dev);
ret = device_property_read_u32(bq->dev, "ti,watchdog-timeout-ms",
&bq->watchdog_timer);
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 2f5ceaf00b94..868e86e1749b 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -164,7 +164,7 @@ static const struct regmap_config bq25890_regmap_config = {
.val_bits = 8,
.max_register = 0x14,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.wr_table = &bq25890_writeable_regs,
.volatile_table = &bq25890_volatile_regs,
diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c
index 0c5e2938bb36..4ff76e3dddf6 100644
--- a/drivers/power/supply/bq25980_charger.c
+++ b/drivers/power/supply/bq25980_charger.c
@@ -932,7 +932,7 @@ static const struct regmap_config bq25980_regmap_config = {
.max_register = BQ25980_CHRGR_CTRL_6,
.reg_defaults = bq25980_reg_defs,
.num_reg_defaults = ARRAY_SIZE(bq25980_reg_defs),
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = bq25980_is_volatile_reg,
};
@@ -943,7 +943,7 @@ static const struct regmap_config bq25975_regmap_config = {
.max_register = BQ25980_CHRGR_CTRL_6,
.reg_defaults = bq25975_reg_defs,
.num_reg_defaults = ARRAY_SIZE(bq25975_reg_defs),
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = bq25980_is_volatile_reg,
};
@@ -954,7 +954,7 @@ static const struct regmap_config bq25960_regmap_config = {
.max_register = BQ25980_CHRGR_CTRL_6,
.reg_defaults = bq25960_reg_defs,
.num_reg_defaults = ARRAY_SIZE(bq25960_reg_defs),
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = bq25980_is_volatile_reg,
};
@@ -1057,7 +1057,7 @@ static int bq25980_power_supply_init(struct bq25980_device *bq,
struct device *dev)
{
struct power_supply_config psy_cfg = { .drv_data = bq,
- .of_node = dev->of_node, };
+ .fwnode = dev_fwnode(dev), };
psy_cfg.supplied_to = bq25980_charger_supplied_to;
psy_cfg.num_supplicants = ARRAY_SIZE(bq25980_charger_supplied_to);
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 90a5bccfc6b9..2f31d750a4c1 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -124,6 +124,7 @@ enum bq27xxx_reg_index {
BQ27XXX_DM_DATA, /* Block Data */
BQ27XXX_DM_CKSUM, /* Block Data Checksum */
BQ27XXX_REG_SEDVF, /* End-of-discharge Voltage */
+ BQ27XXX_REG_PKCFG, /* Pack Configuration */
BQ27XXX_REG_MAX, /* sentinel */
};
@@ -161,6 +162,7 @@ static u8
[BQ27XXX_DM_DATA] = INVALID_REG_ADDR,
[BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR,
[BQ27XXX_REG_SEDVF] = 0x77,
+ [BQ27XXX_REG_PKCFG] = 0x7C,
},
bq27010_regs[BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_CTRL] = 0x00,
@@ -187,6 +189,7 @@ static u8
[BQ27XXX_DM_DATA] = INVALID_REG_ADDR,
[BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR,
[BQ27XXX_REG_SEDVF] = 0x77,
+ [BQ27XXX_REG_PKCFG] = 0x7C,
},
bq2750x_regs[BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_CTRL] = 0x00,
@@ -583,6 +586,7 @@ static enum power_supply_property bq27000_props[] = {
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
};
static enum power_supply_property bq27010_props[] = {
@@ -604,6 +608,7 @@ static enum power_supply_property bq27010_props[] = {
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
};
#define bq2750x_props bq27510g3_props
@@ -1918,7 +1923,6 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
cache.flags = -1; /* read error */
if (cache.flags >= 0) {
cache.capacity = bq27xxx_battery_read_soc(di);
- di->cache.flags = cache.flags;
/*
* On gauges with signed current reporting the current must be
@@ -2045,6 +2049,35 @@ static int bq27xxx_battery_voltage(struct bq27xxx_device_info *di,
}
/*
+ * Return the design maximum battery Voltage in microvolts, or < 0 if something
+ * fails. The programmed value of the maximum battery voltage is determined by
+ * QV0 and QV1 (bits 5 and 6) in the Pack Configuration register.
+ */
+static int bq27xxx_battery_read_dmax_volt(struct bq27xxx_device_info *di,
+ union power_supply_propval *val)
+{
+ int reg_val, qv;
+
+ if (di->voltage_max_design > 0) {
+ val->intval = di->voltage_max_design;
+ return 0;
+ }
+
+ reg_val = bq27xxx_read(di, BQ27XXX_REG_PKCFG, true);
+ if (reg_val < 0) {
+ dev_err(di->dev, "error reading design max voltage\n");
+ return reg_val;
+ }
+
+ qv = (reg_val >> 5) & 0x3;
+ val->intval = 3968000 + 48000 * qv;
+
+ di->voltage_max_design = val->intval;
+
+ return 0;
+}
+
+/*
* Return the design minimum battery Voltage in microvolts
* Or < 0 if something fails.
*/
@@ -2158,6 +2191,9 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
ret = bq27xxx_battery_read_dmin_volt(di, val);
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ ret = bq27xxx_battery_read_dmax_volt(di, val);
+ break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
ret = bq27xxx_battery_read_cyct(di, val);
break;
@@ -2199,7 +2235,7 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
{
struct power_supply_desc *psy_desc;
struct power_supply_config psy_cfg = {
- .of_node = di->dev->of_node,
+ .fwnode = dev_fwnode(di->dev),
.drv_data = di,
.no_wakeup_source = true,
};
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index 813037c00ded..8106d1edcbc2 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -1130,7 +1130,7 @@ static int cpcap_battery_probe(struct platform_device *pdev)
if (error)
return error;
- psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&pdev->dev);
psy_cfg.drv_data = ddata;
ddata->psy = devm_power_supply_register(ddata->dev,
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index 6625d539d9ae..13300dc60baf 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -902,7 +902,7 @@ static int cpcap_charger_probe(struct platform_device *pdev)
atomic_set(&ddata->active, 1);
- psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&pdev->dev);
psy_cfg.drv_data = ddata;
psy_cfg.supplied_to = cpcap_charger_supplied_to;
psy_cfg.num_supplicants = ARRAY_SIZE(cpcap_charger_supplied_to);
diff --git a/drivers/power/supply/ds2760_battery.c b/drivers/power/supply/ds2760_battery.c
index 83bdec5a2bda..5badf58c6edb 100644
--- a/drivers/power/supply/ds2760_battery.c
+++ b/drivers/power/supply/ds2760_battery.c
@@ -112,7 +112,6 @@ struct ds2760_device_info {
struct power_supply_desc bat_desc;
struct workqueue_struct *monitor_wqueue;
struct delayed_work monitor_work;
- struct delayed_work set_charged_work;
struct notifier_block pm_notifier;
};
@@ -489,50 +488,6 @@ static void ds2760_battery_external_power_changed(struct power_supply *psy)
}
-static void ds2760_battery_set_charged_work(struct work_struct *work)
-{
- char bias;
- struct ds2760_device_info *di = container_of(work,
- struct ds2760_device_info, set_charged_work.work);
-
- dev_dbg(di->dev, "%s\n", __func__);
-
- ds2760_battery_read_status(di);
-
- /* When we get notified by external circuitry that the battery is
- * considered fully charged now, we know that there is no current
- * flow any more. However, the ds2760's internal current meter is
- * too inaccurate to rely on - spec say something ~15% failure.
- * Hence, we use the current offset bias register to compensate
- * that error.
- */
-
- if (!power_supply_am_i_supplied(di->bat))
- return;
-
- bias = (signed char) di->current_raw +
- (signed char) di->raw[DS2760_CURRENT_OFFSET_BIAS];
-
- dev_dbg(di->dev, "%s: bias = %d\n", __func__, bias);
-
- w1_ds2760_write(di->dev, &bias, DS2760_CURRENT_OFFSET_BIAS, 1);
- w1_ds2760_store_eeprom(di->dev, DS2760_EEPROM_BLOCK1);
- w1_ds2760_recall_eeprom(di->dev, DS2760_EEPROM_BLOCK1);
-
- /* Write to the di->raw[] buffer directly - the CURRENT_OFFSET_BIAS
- * value won't be read back by ds2760_battery_read_status() */
- di->raw[DS2760_CURRENT_OFFSET_BIAS] = bias;
-}
-
-static void ds2760_battery_set_charged(struct power_supply *psy)
-{
- struct ds2760_device_info *di = power_supply_get_drvdata(psy);
-
- /* postpone the actual work by 20 secs. This is for debouncing GPIO
- * signals and to let the current value settle. See AN4188. */
- mod_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
-}
-
static int ds2760_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -692,17 +647,15 @@ static int w1_ds2760_add_slave(struct w1_slave *sl)
di->bat_desc.set_property = ds2760_battery_set_property;
di->bat_desc.property_is_writeable =
ds2760_battery_property_is_writeable;
- di->bat_desc.set_charged = ds2760_battery_set_charged;
di->bat_desc.external_power_changed =
ds2760_battery_external_power_changed;
psy_cfg.drv_data = di;
+ psy_cfg.fwnode = dev_fwnode(dev);
if (dev->of_node) {
u32 tmp;
- psy_cfg.of_node = dev->of_node;
-
if (!of_property_read_bool(dev->of_node, "maxim,pmod-enabled"))
pmod_enabled = true;
@@ -747,8 +700,6 @@ static int w1_ds2760_add_slave(struct w1_slave *sl)
}
INIT_DELAYED_WORK(&di->monitor_work, ds2760_battery_work);
- INIT_DELAYED_WORK(&di->set_charged_work,
- ds2760_battery_set_charged_work);
di->monitor_wqueue = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!di->monitor_wqueue) {
retval = -ESRCH;
@@ -774,7 +725,6 @@ static void w1_ds2760_remove_slave(struct w1_slave *sl)
unregister_pm_notifier(&di->pm_notifier);
cancel_delayed_work_sync(&di->monitor_work);
- cancel_delayed_work_sync(&di->set_charged_work);
destroy_workqueue(di->monitor_wqueue);
}
diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
index d5d215f5ad8b..f5f2566b3a32 100644
--- a/drivers/power/supply/generic-adc-battery.c
+++ b/drivers/power/supply/generic-adc-battery.c
@@ -166,7 +166,7 @@ static int gab_probe(struct platform_device *pdev)
if (!adc_bat)
return -ENOMEM;
- psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&pdev->dev);
psy_cfg.drv_data = adc_bat;
psy_desc = &adc_bat->psy_desc;
psy_desc->name = dev_name(&pdev->dev);
diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index 46d18ce6a739..1dfd5b0cb30d 100644
--- a/drivers/power/supply/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
@@ -333,7 +333,7 @@ static int gpio_charger_probe(struct platform_device *pdev)
charger_desc->property_is_writeable =
gpio_charger_property_is_writeable;
- psy_cfg.of_node = dev->of_node;
+ psy_cfg.fwnode = dev_fwnode(dev);
psy_cfg.drv_data = gpio_charger;
if (pdata) {
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
index 0a40f425c277..b111c7ce2be3 100644
--- a/drivers/power/supply/ingenic-battery.c
+++ b/drivers/power/supply/ingenic-battery.c
@@ -146,7 +146,7 @@ static int ingenic_battery_probe(struct platform_device *pdev)
desc->num_properties = ARRAY_SIZE(ingenic_battery_properties);
desc->get_property = ingenic_battery_get_property;
psy_cfg.drv_data = bat;
- psy_cfg.of_node = dev->of_node;
+ psy_cfg.fwnode = dev_fwnode(dev);
bat->battery = devm_power_supply_register(dev, desc, &psy_cfg);
if (IS_ERR(bat->battery))
diff --git a/drivers/power/supply/ip5xxx_power.c b/drivers/power/supply/ip5xxx_power.c
index c448e0ac0dfa..a031eadb49dd 100644
--- a/drivers/power/supply/ip5xxx_power.c
+++ b/drivers/power/supply/ip5xxx_power.c
@@ -848,7 +848,7 @@ static int ip5xxx_power_probe(struct i2c_client *client)
fields = (const struct ip5xxx_regfield_config *)of_id->data;
ip5xxx_setup_regs(dev, ip5xxx, fields);
- psy_cfg.of_node = dev->of_node;
+ psy_cfg.fwnode = dev_fwnode(dev);
psy_cfg.drv_data = ip5xxx;
psy = devm_power_supply_register(dev, &ip5xxx_battery_desc, &psy_cfg);
diff --git a/drivers/power/supply/lego_ev3_battery.c b/drivers/power/supply/lego_ev3_battery.c
index 9085de0ae1b2..28454de05761 100644
--- a/drivers/power/supply/lego_ev3_battery.c
+++ b/drivers/power/supply/lego_ev3_battery.c
@@ -23,6 +23,7 @@
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
+#include <linux/property.h>
struct lego_ev3_battery {
struct iio_channel *iio_v;
@@ -198,7 +199,7 @@ static int lego_ev3_battery_probe(struct platform_device *pdev)
batt->v_min = 48000000;
}
- psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&pdev->dev);
psy_cfg.drv_data = batt;
batt->psy = devm_power_supply_register(dev, &lego_ev3_battery_desc,
diff --git a/drivers/power/supply/lt3651-charger.c b/drivers/power/supply/lt3651-charger.c
index 8de500ffad95..ebfbdbcb7683 100644
--- a/drivers/power/supply/lt3651-charger.c
+++ b/drivers/power/supply/lt3651-charger.c
@@ -131,7 +131,7 @@ static int lt3651_charger_probe(struct platform_device *pdev)
charger_desc->properties = lt3651_charger_properties;
charger_desc->num_properties = ARRAY_SIZE(lt3651_charger_properties);
charger_desc->get_property = lt3651_charger_get_property;
- psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&pdev->dev);
psy_cfg.drv_data = lt3651_charger;
lt3651_charger->charger = devm_power_supply_register(&pdev->dev,
diff --git a/drivers/power/supply/ltc4162-l-charger.c b/drivers/power/supply/ltc4162-l-charger.c
index 23eb426295db..9dd74fa9552d 100644
--- a/drivers/power/supply/ltc4162-l-charger.c
+++ b/drivers/power/supply/ltc4162-l-charger.c
@@ -1119,7 +1119,7 @@ static const struct regmap_config ltc4162l_regmap_config = {
.writeable_reg = ltc4162l_is_writeable_reg,
.volatile_reg = ltc4162l_is_volatile_reg,
.max_register = LTC4162L_INPUT_UNDERVOLTAGE_DAC,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static void ltc4162l_clear_interrupts(struct ltc4162l_info *info)
@@ -1185,7 +1185,7 @@ static int ltc4162l_probe(struct i2c_client *client)
if (!device_property_read_u32(dev, "lltc,cell-count", &value))
info->cell_count = value;
- ltc4162l_config.of_node = dev->of_node;
+ ltc4162l_config.fwnode = dev_fwnode(dev);
ltc4162l_config.drv_data = info;
ltc4162l_config.attr_grp = ltc4162l_attr_groups;
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 655b3f25dbd7..acea176101fa 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -1066,7 +1066,7 @@ static int max17042_probe(struct i2c_client *client, struct device *dev, int irq
dev_set_drvdata(dev, chip);
psy_cfg.drv_data = chip;
- psy_cfg.of_node = dev->of_node;
+ psy_cfg.fwnode = dev_fwnode(dev);
/* When current is not measured,
* CURRENT_NOW and CURRENT_AVG properties should be invisible. */
diff --git a/drivers/power/supply/max1720x_battery.c b/drivers/power/supply/max1720x_battery.c
index 11580e414713..ea3912fd1de8 100644
--- a/drivers/power/supply/max1720x_battery.c
+++ b/drivers/power/supply/max1720x_battery.c
@@ -29,6 +29,11 @@
/* ModelGauge m5 */
#define MAX172XX_STATUS 0x00 /* Status */
#define MAX172XX_STATUS_BAT_ABSENT BIT(3) /* Battery absent */
+#define MAX172XX_STATUS_IMX BIT(6) /* Maximum Current Alert Threshold Exceeded */
+#define MAX172XX_STATUS_VMN BIT(8) /* Minimum Voltage Alert Threshold Exceeded */
+#define MAX172XX_STATUS_TMN BIT(9) /* Minimum Temperature Alert Threshold Exceeded */
+#define MAX172XX_STATUS_VMX BIT(12) /* Maximum Voltage Alert Threshold Exceeded */
+#define MAX172XX_STATUS_TMX BIT(13) /* Maximum Temperature Alert Threshold Exceeded */
#define MAX172XX_REPCAP 0x05 /* Average capacity */
#define MAX172XX_REPSOC 0x06 /* Percentage of charge */
#define MAX172XX_TEMP 0x08 /* Temperature */
@@ -114,7 +119,7 @@ static const struct regmap_config max1720x_regmap_cfg = {
.val_format_endian = REGMAP_ENDIAN_LITTLE,
.rd_table = &max1720x_readable_regs,
.volatile_table = &max1720x_volatile_regs,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_range max1720x_nvmem_allow[] = {
@@ -250,6 +255,7 @@ static const struct nvmem_cell_info max1720x_nvmem_cells[] = {
};
static const enum power_supply_property max1720x_battery_props[] = {
+ POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
@@ -302,7 +308,7 @@ static int max172xx_temperature_to_ps(unsigned int reg)
/*
* Calculating current registers resolution:
*
- * RSense stored in 10^-5 Ohm, so mesaurment voltage must be
+ * RSense stored in 10^-5 Ohm, so measurement voltage must be
* in 10^-11 Volts for get current in uA.
* 16 bit current reg fullscale +/-51.2mV is 102400 uV.
* So: 102400 / 65535 * 10^5 = 156252
@@ -314,6 +320,43 @@ static int max172xx_current_to_voltage(unsigned int reg)
return val * 156252;
}
+static int max172xx_battery_health(struct max1720x_device_info *info,
+ unsigned int *health)
+{
+ unsigned int status;
+ int ret;
+
+ ret = regmap_read(info->regmap, MAX172XX_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ if (status & MAX172XX_STATUS_VMN)
+ *health = POWER_SUPPLY_HEALTH_DEAD;
+ else if (status & MAX172XX_STATUS_VMX)
+ *health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else if (status & MAX172XX_STATUS_TMN)
+ *health = POWER_SUPPLY_HEALTH_COLD;
+ else if (status & MAX172XX_STATUS_TMX)
+ *health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (status & MAX172XX_STATUS_IMX)
+ *health = POWER_SUPPLY_HEALTH_OVERCURRENT;
+ else
+ *health = POWER_SUPPLY_HEALTH_GOOD;
+
+ /* Clear events which are not self-clearing to detect next events */
+ if (status > 0 && status != MAX172XX_STATUS_IMX) {
+ ret = regmap_set_bits(info->regmap, MAX172XX_STATUS,
+ MAX172XX_STATUS_VMN |
+ MAX172XX_STATUS_VMX |
+ MAX172XX_STATUS_TMN |
+ MAX172XX_STATUS_TMX);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int max1720x_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -323,6 +366,10 @@ static int max1720x_battery_get_property(struct power_supply *psy,
int ret = 0;
switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = max172xx_battery_health(info, &reg_val);
+ val->intval = reg_val;
+ break;
case POWER_SUPPLY_PROP_PRESENT:
/*
* POWER_SUPPLY_PROP_PRESENT will always readable via
diff --git a/drivers/power/supply/max77650-charger.c b/drivers/power/supply/max77650-charger.c
index 5f58c0c24b4d..4ae43668992e 100644
--- a/drivers/power/supply/max77650-charger.c
+++ b/drivers/power/supply/max77650-charger.c
@@ -298,7 +298,7 @@ static int max77650_charger_probe(struct platform_device *pdev)
chg->dev = dev;
- pscfg.of_node = dev->of_node;
+ pscfg.fwnode = dev_fwnode(dev);
pscfg.drv_data = chg;
chg_irq = platform_get_irq_byname(pdev, "CHG");
diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c
index cdea35c0d1de..027d6a539b65 100644
--- a/drivers/power/supply/max77693_charger.c
+++ b/drivers/power/supply/max77693_charger.c
@@ -608,7 +608,7 @@ static int max77693_set_charge_input_threshold_volt(struct max77693_charger *chg
case 4700000:
case 4800000:
case 4900000:
- data = (uvolt - 4700000) / 100000;
+ data = ((uvolt - 4700000) / 100000) + 1;
break;
default:
dev_err(chg->dev, "Wrong value for charge input voltage regulation threshold\n");
diff --git a/drivers/power/supply/max77705_charger.c b/drivers/power/supply/max77705_charger.c
new file mode 100644
index 000000000000..eec5e9ef795e
--- /dev/null
+++ b/drivers/power/supply/max77705_charger.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Based on max77650-charger.c
+ *
+ * Copyright (C) 2025 Dzmitry Sankouski <dsankouski@gmail.org>
+ *
+ * Battery charger driver for MAXIM 77705 charger/power-supply.
+ */
+
+#include <linux/devm-helpers.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max77693-common.h>
+#include <linux/mfd/max77705-private.h>
+#include <linux/power/max77705_charger.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+
+static const char *max77705_charger_model = "max77705";
+static const char *max77705_charger_manufacturer = "Maxim Integrated";
+
+static const struct regmap_config max77705_chg_regmap_config = {
+ .reg_base = MAX77705_CHG_REG_BASE,
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77705_CHG_REG_SAFEOUT_CTRL,
+};
+
+static enum power_supply_property max77705_charger_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+};
+
+static int max77705_chgin_irq(void *irq_drv_data)
+{
+ struct max77705_charger_data *charger = irq_drv_data;
+
+ queue_work(charger->wqueue, &charger->chgin_work);
+
+ return 0;
+}
+
+static const struct regmap_irq max77705_charger_irqs[] = {
+ { .mask = MAX77705_BYP_IM, },
+ { .mask = MAX77705_INP_LIMIT_IM, },
+ { .mask = MAX77705_BATP_IM, },
+ { .mask = MAX77705_BAT_IM, },
+ { .mask = MAX77705_CHG_IM, },
+ { .mask = MAX77705_WCIN_IM, },
+ { .mask = MAX77705_CHGIN_IM, },
+ { .mask = MAX77705_AICL_IM, },
+};
+
+static struct regmap_irq_chip max77705_charger_irq_chip = {
+ .name = "max77705-charger",
+ .status_base = MAX77705_CHG_REG_INT,
+ .mask_base = MAX77705_CHG_REG_INT_MASK,
+ .handle_post_irq = max77705_chgin_irq,
+ .num_regs = 1,
+ .irqs = max77705_charger_irqs,
+ .num_irqs = ARRAY_SIZE(max77705_charger_irqs),
+};
+
+static int max77705_charger_enable(struct max77705_charger_data *chg)
+{
+ int rv;
+
+ rv = regmap_update_bits(chg->regmap, MAX77705_CHG_REG_CNFG_09,
+ MAX77705_CHG_EN_MASK, MAX77705_CHG_EN_MASK);
+ if (rv)
+ dev_err(chg->dev, "unable to enable the charger: %d\n", rv);
+
+ return rv;
+}
+
+static void max77705_charger_disable(void *data)
+{
+ struct max77705_charger_data *chg = data;
+ int rv;
+
+ rv = regmap_update_bits(chg->regmap,
+ MAX77705_CHG_REG_CNFG_09,
+ MAX77705_CHG_EN_MASK,
+ MAX77705_CHG_DISABLE);
+ if (rv)
+ dev_err(chg->dev, "unable to disable the charger: %d\n", rv);
+}
+
+static int max77705_get_online(struct regmap *regmap, int *val)
+{
+ unsigned int data;
+ int ret;
+
+ ret = regmap_read(regmap, MAX77705_CHG_REG_INT_OK, &data);
+ if (ret < 0)
+ return ret;
+
+ *val = !!(data & MAX77705_CHGIN_OK);
+
+ return 0;
+}
+
+static int max77705_check_battery(struct max77705_charger_data *charger, int *val)
+{
+ unsigned int reg_data;
+ unsigned int reg_data2;
+ struct regmap *regmap = charger->regmap;
+
+ regmap_read(regmap, MAX77705_CHG_REG_INT_OK, &reg_data);
+
+ dev_dbg(charger->dev, "CHG_INT_OK(0x%x)\n", reg_data);
+
+ regmap_read(regmap, MAX77705_CHG_REG_DETAILS_00, &reg_data2);
+
+ dev_dbg(charger->dev, "CHG_DETAILS00(0x%x)\n", reg_data2);
+
+ if ((reg_data & MAX77705_BATP_OK) || !(reg_data2 & MAX77705_BATP_DTLS))
+ *val = true;
+ else
+ *val = false;
+
+ return 0;
+}
+
+static int max77705_get_charge_type(struct max77705_charger_data *charger, int *val)
+{
+ struct regmap *regmap = charger->regmap;
+ unsigned int reg_data;
+
+ regmap_read(regmap, MAX77705_CHG_REG_CNFG_09, &reg_data);
+ if (!MAX77705_CHARGER_CHG_CHARGING(reg_data)) {
+ *val = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ return 0;
+ }
+
+ regmap_read(regmap, MAX77705_CHG_REG_DETAILS_01, &reg_data);
+ reg_data &= MAX77705_CHG_DTLS;
+
+ switch (reg_data) {
+ case 0x0:
+ case MAX77705_CHARGER_CONSTANT_CURRENT:
+ case MAX77705_CHARGER_CONSTANT_VOLTAGE:
+ *val = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ return 0;
+ default:
+ *val = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ return 0;
+ }
+
+ return 0;
+}
+
+static int max77705_get_status(struct max77705_charger_data *charger, int *val)
+{
+ struct regmap *regmap = charger->regmap;
+ unsigned int reg_data;
+
+ regmap_read(regmap, MAX77705_CHG_REG_CNFG_09, &reg_data);
+ if (!MAX77705_CHARGER_CHG_CHARGING(reg_data)) {
+ *val = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ return 0;
+ }
+
+ regmap_read(regmap, MAX77705_CHG_REG_DETAILS_01, &reg_data);
+ reg_data &= MAX77705_CHG_DTLS;
+
+ switch (reg_data) {
+ case 0x0:
+ case MAX77705_CHARGER_CONSTANT_CURRENT:
+ case MAX77705_CHARGER_CONSTANT_VOLTAGE:
+ *val = POWER_SUPPLY_STATUS_CHARGING;
+ return 0;
+ case MAX77705_CHARGER_END_OF_CHARGE:
+ case MAX77705_CHARGER_DONE:
+ *val = POWER_SUPPLY_STATUS_FULL;
+ return 0;
+ /* those values hard coded as in vendor kernel, because of */
+ /* failure to determine it's actual meaning. */
+ case 0x05:
+ case 0x06:
+ case 0x07:
+ *val = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ return 0;
+ case 0x08:
+ case 0xA:
+ case 0xB:
+ *val = POWER_SUPPLY_STATUS_DISCHARGING;
+ return 0;
+ default:
+ *val = POWER_SUPPLY_STATUS_UNKNOWN;
+ return 0;
+ }
+
+ return 0;
+}
+
+static int max77705_get_vbus_state(struct regmap *regmap, int *value)
+{
+ int ret;
+ unsigned int charge_dtls;
+
+ ret = regmap_read(regmap, MAX77705_CHG_REG_DETAILS_00, &charge_dtls);
+ if (ret)
+ return ret;
+
+ charge_dtls = ((charge_dtls & MAX77705_CHGIN_DTLS) >>
+ MAX77705_CHGIN_DTLS_SHIFT);
+
+ switch (charge_dtls) {
+ case 0x00:
+ *value = POWER_SUPPLY_HEALTH_UNDERVOLTAGE;
+ break;
+ case 0x01:
+ *value = POWER_SUPPLY_HEALTH_UNDERVOLTAGE;
+ break;
+ case 0x02:
+ *value = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ break;
+ case 0x03:
+ *value = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static int max77705_get_battery_health(struct max77705_charger_data *charger,
+ int *value)
+{
+ struct regmap *regmap = charger->regmap;
+ unsigned int bat_dtls;
+
+ regmap_read(regmap, MAX77705_CHG_REG_DETAILS_01, &bat_dtls);
+ bat_dtls = ((bat_dtls & MAX77705_BAT_DTLS) >> MAX77705_BAT_DTLS_SHIFT);
+
+ switch (bat_dtls) {
+ case MAX77705_BATTERY_NOBAT:
+ dev_dbg(charger->dev, "%s: No battery and the charger is suspended\n",
+ __func__);
+ *value = POWER_SUPPLY_HEALTH_NO_BATTERY;
+ break;
+ case MAX77705_BATTERY_PREQUALIFICATION:
+ dev_dbg(charger->dev, "%s: battery is okay but its voltage is low(~VPQLB)\n",
+ __func__);
+ break;
+ case MAX77705_BATTERY_DEAD:
+ dev_dbg(charger->dev, "%s: battery dead\n", __func__);
+ *value = POWER_SUPPLY_HEALTH_DEAD;
+ break;
+ case MAX77705_BATTERY_GOOD:
+ case MAX77705_BATTERY_LOWVOLTAGE:
+ *value = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case MAX77705_BATTERY_OVERVOLTAGE:
+ dev_dbg(charger->dev, "%s: battery ovp\n", __func__);
+ *value = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ break;
+ default:
+ dev_dbg(charger->dev, "%s: battery unknown\n", __func__);
+ *value = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ break;
+ }
+
+ return 0;
+}
+
+static int max77705_get_health(struct max77705_charger_data *charger, int *val)
+{
+ struct regmap *regmap = charger->regmap;
+ int ret, is_online = 0;
+
+ ret = max77705_get_online(regmap, &is_online);
+ if (ret)
+ return ret;
+ if (is_online) {
+ ret = max77705_get_vbus_state(regmap, val);
+ if (ret || (*val != POWER_SUPPLY_HEALTH_GOOD))
+ return ret;
+ }
+ return max77705_get_battery_health(charger, val);
+}
+
+static int max77705_get_input_current(struct max77705_charger_data *charger,
+ int *val)
+{
+ unsigned int reg_data;
+ int get_current = 0;
+ struct regmap *regmap = charger->regmap;
+
+ regmap_read(regmap, MAX77705_CHG_REG_CNFG_09, &reg_data);
+
+ reg_data &= MAX77705_CHG_CHGIN_LIM_MASK;
+
+ if (reg_data <= 3)
+ get_current = MAX77705_CURRENT_CHGIN_MIN;
+ else if (reg_data >= MAX77705_CHG_CHGIN_LIM_MASK)
+ get_current = MAX77705_CURRENT_CHGIN_MAX;
+ else
+ get_current = (reg_data + 1) * MAX77705_CURRENT_CHGIN_STEP;
+
+ *val = get_current;
+
+ return 0;
+}
+
+static int max77705_get_charge_current(struct max77705_charger_data *charger,
+ int *val)
+{
+ unsigned int reg_data;
+ struct regmap *regmap = charger->regmap;
+
+ regmap_read(regmap, MAX77705_CHG_REG_CNFG_02, &reg_data);
+ reg_data &= MAX77705_CHG_CC;
+
+ *val = reg_data <= 0x2 ? MAX77705_CURRENT_CHGIN_MIN : reg_data * MAX77705_CURRENT_CHG_STEP;
+
+ return 0;
+}
+
+static int max77705_set_float_voltage(struct max77705_charger_data *charger,
+ int float_voltage)
+{
+ int float_voltage_mv;
+ unsigned int reg_data = 0;
+ struct regmap *regmap = charger->regmap;
+
+ float_voltage_mv = float_voltage / 1000;
+ reg_data = float_voltage_mv <= 4000 ? 0x0 :
+ float_voltage_mv >= 4500 ? 0x23 :
+ (float_voltage_mv <= 4200) ? (float_voltage_mv - 4000) / 50 :
+ (((float_voltage_mv - 4200) / 10) + 0x04);
+
+ return regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_04,
+ MAX77705_CHG_CV_PRM_MASK,
+ (reg_data << MAX77705_CHG_CV_PRM_SHIFT));
+}
+
+static int max77705_get_float_voltage(struct max77705_charger_data *charger,
+ int *val)
+{
+ unsigned int reg_data = 0;
+ int voltage_mv;
+ struct regmap *regmap = charger->regmap;
+
+ regmap_read(regmap, MAX77705_CHG_REG_CNFG_04, &reg_data);
+ reg_data &= MAX77705_CHG_PRM_MASK;
+ voltage_mv = reg_data <= 0x04 ? reg_data * 50 + 4000 :
+ (reg_data - 4) * 10 + 4200;
+ *val = voltage_mv * 1000;
+
+ return 0;
+}
+
+static int max77705_chg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max77705_charger_data *charger = power_supply_get_drvdata(psy);
+ struct regmap *regmap = charger->regmap;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ return max77705_get_online(regmap, &val->intval);
+ case POWER_SUPPLY_PROP_PRESENT:
+ return max77705_check_battery(charger, &val->intval);
+ case POWER_SUPPLY_PROP_STATUS:
+ return max77705_get_status(charger, &val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ return max77705_get_charge_type(charger, &val->intval);
+ case POWER_SUPPLY_PROP_HEALTH:
+ return max77705_get_health(charger, &val->intval);
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return max77705_get_input_current(charger, &val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ return max77705_get_charge_current(charger, &val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return max77705_get_float_voltage(charger, &val->intval);
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = charger->bat_info->voltage_max_design_uv;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = max77705_charger_model;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = max77705_charger_manufacturer;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct power_supply_desc max77705_charger_psy_desc = {
+ .name = "max77705-charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = max77705_charger_props,
+ .num_properties = ARRAY_SIZE(max77705_charger_props),
+ .get_property = max77705_chg_get_property,
+};
+
+static void max77705_chgin_isr_work(struct work_struct *work)
+{
+ struct max77705_charger_data *charger =
+ container_of(work, struct max77705_charger_data, chgin_work);
+
+ power_supply_changed(charger->psy_chg);
+}
+
+static void max77705_charger_initialize(struct max77705_charger_data *chg)
+{
+ u8 reg_data;
+ struct power_supply_battery_info *info;
+ struct regmap *regmap = chg->regmap;
+
+ if (power_supply_get_battery_info(chg->psy_chg, &info) < 0)
+ return;
+
+ chg->bat_info = info;
+
+ /* unlock charger setting protect */
+ /* slowest LX slope */
+ reg_data = MAX77705_CHGPROT_MASK | MAX77705_SLOWEST_LX_SLOPE;
+ regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_06, reg_data,
+ reg_data);
+
+ /* fast charge timer disable */
+ /* restart threshold disable */
+ /* pre-qual charge disable */
+ reg_data = (MAX77705_FCHGTIME_DISABLE << MAX77705_FCHGTIME_SHIFT) |
+ (MAX77705_CHG_RSTRT_DISABLE << MAX77705_CHG_RSTRT_SHIFT) |
+ (MAX77705_CHG_PQEN_DISABLE << MAX77705_PQEN_SHIFT);
+ regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_01,
+ (MAX77705_FCHGTIME_MASK |
+ MAX77705_CHG_RSTRT_MASK |
+ MAX77705_PQEN_MASK),
+ reg_data);
+
+ /* OTG off(UNO on), boost off */
+ regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_00,
+ MAX77705_OTG_CTRL, 0);
+
+ /* charge current 450mA(default) */
+ /* otg current limit 900mA */
+ regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_02,
+ MAX77705_OTG_ILIM_MASK,
+ MAX77705_OTG_ILIM_900 << MAX77705_OTG_ILIM_SHIFT);
+
+ /* BAT to SYS OCP 4.80A */
+ regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_05,
+ MAX77705_REG_B2SOVRC_MASK,
+ MAX77705_B2SOVRC_4_8A << MAX77705_REG_B2SOVRC_SHIFT);
+ /* top off current 150mA */
+ /* top off timer 30min */
+ reg_data = (MAX77705_TO_ITH_150MA << MAX77705_TO_ITH_SHIFT) |
+ (MAX77705_TO_TIME_30M << MAX77705_TO_TIME_SHIFT) |
+ (MAX77705_SYS_TRACK_DISABLE << MAX77705_SYS_TRACK_DIS_SHIFT);
+ regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_03,
+ (MAX77705_TO_ITH_MASK |
+ MAX77705_TO_TIME_MASK |
+ MAX77705_SYS_TRACK_DIS_MASK), reg_data);
+
+ /* cv voltage 4.2V or 4.35V */
+ /* MINVSYS 3.6V(default) */
+ if (info->voltage_max_design_uv < 0) {
+ dev_warn(chg->dev, "missing battery:voltage-max-design-microvolt\n");
+ max77705_set_float_voltage(chg, 4200000);
+ } else {
+ max77705_set_float_voltage(chg, info->voltage_max_design_uv);
+ }
+
+ regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_12,
+ MAX77705_VCHGIN_REG_MASK, MAX77705_VCHGIN_4_5);
+ regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_12,
+ MAX77705_WCIN_REG_MASK, MAX77705_WCIN_4_5);
+
+ /* Watchdog timer */
+ regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_00,
+ MAX77705_WDTEN_MASK, 0);
+
+ /* Active Discharge Enable */
+ regmap_update_bits(regmap, MAX77705_PMIC_REG_MAINCTRL1, 1, 1);
+
+ /* VBYPSET=5.0V */
+ regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_11, MAX77705_VBYPSET_MASK, 0);
+
+ /* Switching Frequency : 1.5MHz */
+ regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_08, MAX77705_REG_FSW_MASK,
+ (MAX77705_CHG_FSW_1_5MHz << MAX77705_REG_FSW_SHIFT));
+
+ /* Auto skip mode */
+ regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_12, MAX77705_REG_DISKIP_MASK,
+ (MAX77705_AUTO_SKIP << MAX77705_REG_DISKIP_SHIFT));
+}
+
+static int max77705_charger_probe(struct i2c_client *i2c)
+{
+ struct power_supply_config pscfg = {};
+ struct max77705_charger_data *chg;
+ struct device *dev;
+ struct regmap_irq_chip_data *irq_data;
+ int ret;
+
+ dev = &i2c->dev;
+
+ chg = devm_kzalloc(dev, sizeof(*chg), GFP_KERNEL);
+ if (!chg)
+ return -ENOMEM;
+
+ chg->dev = dev;
+ i2c_set_clientdata(i2c, chg);
+
+ chg->regmap = devm_regmap_init_i2c(i2c, &max77705_chg_regmap_config);
+ if (IS_ERR(chg->regmap))
+ return PTR_ERR(chg->regmap);
+
+ ret = regmap_update_bits(chg->regmap,
+ MAX77705_CHG_REG_INT_MASK,
+ MAX77705_CHGIN_IM, 0);
+ if (ret)
+ return ret;
+
+ pscfg.fwnode = dev_fwnode(dev);
+ pscfg.drv_data = chg;
+
+ chg->psy_chg = devm_power_supply_register(dev, &max77705_charger_psy_desc, &pscfg);
+ if (IS_ERR(chg->psy_chg))
+ return PTR_ERR(chg->psy_chg);
+
+ max77705_charger_irq_chip.irq_drv_data = chg;
+ ret = devm_regmap_add_irq_chip(chg->dev, chg->regmap, i2c->irq,
+ IRQF_ONESHOT | IRQF_SHARED, 0,
+ &max77705_charger_irq_chip,
+ &irq_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add irq chip\n");
+
+ chg->wqueue = create_singlethread_workqueue(dev_name(dev));
+ if (IS_ERR(chg->wqueue))
+ return dev_err_probe(dev, PTR_ERR(chg->wqueue), "failed to create workqueue\n");
+
+ ret = devm_work_autocancel(dev, &chg->chgin_work, max77705_chgin_isr_work);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize interrupt work\n");
+
+ max77705_charger_initialize(chg);
+
+ ret = max77705_charger_enable(chg);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable charge\n");
+
+ return devm_add_action_or_reset(dev, max77705_charger_disable, chg);
+}
+
+static const struct of_device_id max77705_charger_of_match[] = {
+ { .compatible = "maxim,max77705-charger" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77705_charger_of_match);
+
+static struct i2c_driver max77705_charger_driver = {
+ .driver = {
+ .name = "max77705-charger",
+ .of_match_table = max77705_charger_of_match,
+ },
+ .probe = max77705_charger_probe,
+};
+module_i2c_driver(max77705_charger_driver);
+
+MODULE_AUTHOR("Dzmitry Sankouski <dsankouski@gmail.com>");
+MODULE_DESCRIPTION("Maxim MAX77705 charger driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/max8903_charger.c b/drivers/power/supply/max8903_charger.c
index e65d0141f260..45fbaad6c647 100644
--- a/drivers/power/supply/max8903_charger.c
+++ b/drivers/power/supply/max8903_charger.c
@@ -349,7 +349,7 @@ static int max8903_probe(struct platform_device *pdev)
data->psy_desc.properties = max8903_charger_props;
data->psy_desc.num_properties = ARRAY_SIZE(max8903_charger_props);
- psy_cfg.of_node = dev->of_node;
+ psy_cfg.fwnode = dev_fwnode(dev);
psy_cfg.drv_data = data;
data->psy = devm_power_supply_register(dev, &data->psy_desc, &psy_cfg);
diff --git a/drivers/power/supply/mm8013.c b/drivers/power/supply/mm8013.c
index 4adf2acc2779..93c50cff31bc 100644
--- a/drivers/power/supply/mm8013.c
+++ b/drivers/power/supply/mm8013.c
@@ -274,7 +274,7 @@ static int mm8013_probe(struct i2c_client *client)
return dev_err_probe(dev, ret, "MM8013 not found\n");
psy_cfg.drv_data = chip;
- psy_cfg.of_node = dev->of_node;
+ psy_cfg.fwnode = dev_fwnode(dev);
psy = devm_power_supply_register(dev, &mm8013_desc, &psy_cfg);
if (IS_ERR(psy))
diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c
index e99e55148976..77747eb51667 100644
--- a/drivers/power/supply/mt6360_charger.c
+++ b/drivers/power/supply/mt6360_charger.c
@@ -810,7 +810,7 @@ static int mt6360_charger_probe(struct platform_device *pdev)
memcpy(&mci->psy_desc, &mt6360_charger_desc, sizeof(mci->psy_desc));
mci->psy_desc.name = dev_name(&pdev->dev);
charger_cfg.drv_data = mci;
- charger_cfg.of_node = pdev->dev.of_node;
+ charger_cfg.fwnode = dev_fwnode(&pdev->dev);
mci->psy = devm_power_supply_register(&pdev->dev,
&mci->psy_desc, &charger_cfg);
if (IS_ERR(mci->psy))
diff --git a/drivers/power/supply/mt6370-charger.c b/drivers/power/supply/mt6370-charger.c
index ad8793bf997e..98579998b300 100644
--- a/drivers/power/supply/mt6370-charger.c
+++ b/drivers/power/supply/mt6370-charger.c
@@ -752,7 +752,7 @@ static int mt6370_chg_init_psy(struct mt6370_priv *priv)
{
struct power_supply_config cfg = {
.drv_data = priv,
- .of_node = dev_of_node(priv->dev),
+ .fwnode = dev_fwnode(priv->dev),
};
priv->psy = devm_power_supply_register(priv->dev, &mt6370_chg_psy_desc,
@@ -772,7 +772,6 @@ static void mt6370_chg_destroy_wq(void *data)
{
struct workqueue_struct *wq = data;
- flush_workqueue(wq);
destroy_workqueue(wq);
}
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index 849f63e89ba0..b9b607822676 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -674,7 +674,7 @@ static int olpc_battery_probe(struct platform_device *pdev)
/* Ignore the status. It doesn't actually matter */
- ac_psy_cfg.of_node = pdev->dev.of_node;
+ ac_psy_cfg.fwnode = dev_fwnode(&pdev->dev);
ac_psy_cfg.drv_data = data;
data->olpc_ac = devm_power_supply_register(&pdev->dev, &olpc_ac_desc,
@@ -692,7 +692,7 @@ static int olpc_battery_probe(struct platform_device *pdev)
olpc_bat_desc.num_properties = ARRAY_SIZE(olpc_xo1_bat_props);
}
- bat_psy_cfg.of_node = pdev->dev.of_node;
+ bat_psy_cfg.fwnode = dev_fwnode(&pdev->dev);
bat_psy_cfg.drv_data = data;
bat_psy_cfg.attr_grp = olpc_bat_sysfs_groups;
diff --git a/drivers/power/supply/pcf50633-charger.c b/drivers/power/supply/pcf50633-charger.c
deleted file mode 100644
index 0136bc87b105..000000000000
--- a/drivers/power/supply/pcf50633-charger.c
+++ /dev/null
@@ -1,466 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* NXP PCF50633 Main Battery Charger Driver
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * Author: Balaji Rao <balajirrao@openmoko.org>
- * All rights reserved.
- *
- * Broken down from monstrous PCF50633 driver mainly by
- * Harald Welte, Andy Green and Werner Almesberger
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/sysfs.h>
-#include <linux/platform_device.h>
-#include <linux/power_supply.h>
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/mfd/pcf50633/mbc.h>
-
-struct pcf50633_mbc {
- struct pcf50633 *pcf;
-
- int adapter_online;
- int usb_online;
-
- struct power_supply *usb;
- struct power_supply *adapter;
- struct power_supply *ac;
-};
-
-int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
-{
- struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
- int ret = 0;
- u8 bits;
- u8 mbcs2, chgmod;
- unsigned int mbcc5;
-
- if (ma >= 1000) {
- bits = PCF50633_MBCC7_USB_1000mA;
- ma = 1000;
- } else if (ma >= 500) {
- bits = PCF50633_MBCC7_USB_500mA;
- ma = 500;
- } else if (ma >= 100) {
- bits = PCF50633_MBCC7_USB_100mA;
- ma = 100;
- } else {
- bits = PCF50633_MBCC7_USB_SUSPEND;
- ma = 0;
- }
-
- ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7,
- PCF50633_MBCC7_USB_MASK, bits);
- if (ret)
- dev_err(pcf->dev, "error setting usb curlim to %d mA\n", ma);
- else
- dev_info(pcf->dev, "usb curlim to %d mA\n", ma);
-
- /*
- * We limit the charging current to be the USB current limit.
- * The reason is that on pcf50633, when it enters PMU Standby mode,
- * which it does when the device goes "off", the USB current limit
- * reverts to the variant default. In at least one common case, that
- * default is 500mA. By setting the charging current to be the same
- * as the USB limit we set here before PMU standby, we enforce it only
- * using the correct amount of current even when the USB current limit
- * gets reset to the wrong thing
- */
-
- if (mbc->pcf->pdata->charger_reference_current_ma) {
- mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma;
- if (mbcc5 > 255)
- mbcc5 = 255;
- pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5);
- }
-
- mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2);
- chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK);
-
- /* If chgmod == BATFULL, setting chgena has no effect.
- * Datasheet says we need to set resume instead but when autoresume is
- * used resume doesn't work. Clear and set chgena instead.
- */
- if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL)
- pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1,
- PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA);
- else {
- pcf50633_reg_clear_bits(pcf, PCF50633_REG_MBCC1,
- PCF50633_MBCC1_CHGENA);
- pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1,
- PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA);
- }
-
- power_supply_changed(mbc->usb);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pcf50633_mbc_usb_curlim_set);
-
-int pcf50633_mbc_get_status(struct pcf50633 *pcf)
-{
- struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
- int status = 0;
- u8 chgmod;
-
- if (!mbc)
- return 0;
-
- chgmod = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2)
- & PCF50633_MBCS2_MBC_MASK;
-
- if (mbc->usb_online)
- status |= PCF50633_MBC_USB_ONLINE;
- if (chgmod == PCF50633_MBCS2_MBC_USB_PRE ||
- chgmod == PCF50633_MBCS2_MBC_USB_PRE_WAIT ||
- chgmod == PCF50633_MBCS2_MBC_USB_FAST ||
- chgmod == PCF50633_MBCS2_MBC_USB_FAST_WAIT)
- status |= PCF50633_MBC_USB_ACTIVE;
- if (mbc->adapter_online)
- status |= PCF50633_MBC_ADAPTER_ONLINE;
- if (chgmod == PCF50633_MBCS2_MBC_ADP_PRE ||
- chgmod == PCF50633_MBCS2_MBC_ADP_PRE_WAIT ||
- chgmod == PCF50633_MBCS2_MBC_ADP_FAST ||
- chgmod == PCF50633_MBCS2_MBC_ADP_FAST_WAIT)
- status |= PCF50633_MBC_ADAPTER_ACTIVE;
-
- return status;
-}
-EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status);
-
-int pcf50633_mbc_get_usb_online_status(struct pcf50633 *pcf)
-{
- struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
-
- if (!mbc)
- return 0;
-
- return mbc->usb_online;
-}
-EXPORT_SYMBOL_GPL(pcf50633_mbc_get_usb_online_status);
-
-static ssize_t
-show_chgmode(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
-
- u8 mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2);
- u8 chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK);
-
- return sysfs_emit(buf, "%d\n", chgmod);
-}
-static DEVICE_ATTR(chgmode, S_IRUGO, show_chgmode, NULL);
-
-static ssize_t
-show_usblim(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
- u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) &
- PCF50633_MBCC7_USB_MASK;
- unsigned int ma;
-
- if (usblim == PCF50633_MBCC7_USB_1000mA)
- ma = 1000;
- else if (usblim == PCF50633_MBCC7_USB_500mA)
- ma = 500;
- else if (usblim == PCF50633_MBCC7_USB_100mA)
- ma = 100;
- else
- ma = 0;
-
- return sysfs_emit(buf, "%u\n", ma);
-}
-
-static ssize_t set_usblim(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
- unsigned long ma;
- int ret;
-
- ret = kstrtoul(buf, 10, &ma);
- if (ret)
- return ret;
-
- pcf50633_mbc_usb_curlim_set(mbc->pcf, ma);
-
- return count;
-}
-
-static DEVICE_ATTR(usb_curlim, S_IRUGO | S_IWUSR, show_usblim, set_usblim);
-
-static ssize_t
-show_chglim(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
- u8 mbcc5 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC5);
- unsigned int ma;
-
- if (!mbc->pcf->pdata->charger_reference_current_ma)
- return -ENODEV;
-
- ma = (mbc->pcf->pdata->charger_reference_current_ma * mbcc5) >> 8;
-
- return sysfs_emit(buf, "%u\n", ma);
-}
-
-static ssize_t set_chglim(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
- unsigned long ma;
- unsigned int mbcc5;
- int ret;
-
- if (!mbc->pcf->pdata->charger_reference_current_ma)
- return -ENODEV;
-
- ret = kstrtoul(buf, 10, &ma);
- if (ret)
- return ret;
-
- mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma;
- if (mbcc5 > 255)
- mbcc5 = 255;
- pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5);
-
- return count;
-}
-
-/*
- * This attribute allows to change MBC charging limit on the fly
- * independently of usb current limit. It also gets set automatically every
- * time usb current limit is changed.
- */
-static DEVICE_ATTR(chg_curlim, S_IRUGO | S_IWUSR, show_chglim, set_chglim);
-
-static struct attribute *pcf50633_mbc_sysfs_attrs[] = {
- &dev_attr_chgmode.attr,
- &dev_attr_usb_curlim.attr,
- &dev_attr_chg_curlim.attr,
- NULL,
-};
-
-ATTRIBUTE_GROUPS(pcf50633_mbc_sysfs);
-
-static void
-pcf50633_mbc_irq_handler(int irq, void *data)
-{
- struct pcf50633_mbc *mbc = data;
-
- /* USB */
- if (irq == PCF50633_IRQ_USBINS) {
- mbc->usb_online = 1;
- } else if (irq == PCF50633_IRQ_USBREM) {
- mbc->usb_online = 0;
- pcf50633_mbc_usb_curlim_set(mbc->pcf, 0);
- }
-
- /* Adapter */
- if (irq == PCF50633_IRQ_ADPINS)
- mbc->adapter_online = 1;
- else if (irq == PCF50633_IRQ_ADPREM)
- mbc->adapter_online = 0;
-
- power_supply_changed(mbc->ac);
- power_supply_changed(mbc->usb);
- power_supply_changed(mbc->adapter);
-
- if (mbc->pcf->pdata->mbc_event_callback)
- mbc->pcf->pdata->mbc_event_callback(mbc->pcf, irq);
-}
-
-static int adapter_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct pcf50633_mbc *mbc = power_supply_get_drvdata(psy);
- int ret = 0;
-
- switch (psp) {
- case POWER_SUPPLY_PROP_ONLINE:
- val->intval = mbc->adapter_online;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int usb_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct pcf50633_mbc *mbc = power_supply_get_drvdata(psy);
- int ret = 0;
- u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) &
- PCF50633_MBCC7_USB_MASK;
-
- switch (psp) {
- case POWER_SUPPLY_PROP_ONLINE:
- val->intval = mbc->usb_online &&
- (usblim <= PCF50633_MBCC7_USB_500mA);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int ac_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct pcf50633_mbc *mbc = power_supply_get_drvdata(psy);
- int ret = 0;
- u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) &
- PCF50633_MBCC7_USB_MASK;
-
- switch (psp) {
- case POWER_SUPPLY_PROP_ONLINE:
- val->intval = mbc->usb_online &&
- (usblim == PCF50633_MBCC7_USB_1000mA);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static enum power_supply_property power_props[] = {
- POWER_SUPPLY_PROP_ONLINE,
-};
-
-static const u8 mbc_irq_handlers[] = {
- PCF50633_IRQ_ADPINS,
- PCF50633_IRQ_ADPREM,
- PCF50633_IRQ_USBINS,
- PCF50633_IRQ_USBREM,
- PCF50633_IRQ_BATFULL,
- PCF50633_IRQ_CHGHALT,
- PCF50633_IRQ_THLIMON,
- PCF50633_IRQ_THLIMOFF,
- PCF50633_IRQ_USBLIMON,
- PCF50633_IRQ_USBLIMOFF,
- PCF50633_IRQ_LOWSYS,
- PCF50633_IRQ_LOWBAT,
-};
-
-static const struct power_supply_desc pcf50633_mbc_adapter_desc = {
- .name = "adapter",
- .type = POWER_SUPPLY_TYPE_MAINS,
- .properties = power_props,
- .num_properties = ARRAY_SIZE(power_props),
- .get_property = &adapter_get_property,
-};
-
-static const struct power_supply_desc pcf50633_mbc_usb_desc = {
- .name = "usb",
- .type = POWER_SUPPLY_TYPE_USB,
- .properties = power_props,
- .num_properties = ARRAY_SIZE(power_props),
- .get_property = usb_get_property,
-};
-
-static const struct power_supply_desc pcf50633_mbc_ac_desc = {
- .name = "ac",
- .type = POWER_SUPPLY_TYPE_MAINS,
- .properties = power_props,
- .num_properties = ARRAY_SIZE(power_props),
- .get_property = ac_get_property,
-};
-
-static int pcf50633_mbc_probe(struct platform_device *pdev)
-{
- struct power_supply_config psy_cfg = {};
- struct power_supply_config usb_psy_cfg;
- struct pcf50633_mbc *mbc;
- int i;
- u8 mbcs1;
-
- mbc = devm_kzalloc(&pdev->dev, sizeof(*mbc), GFP_KERNEL);
- if (!mbc)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, mbc);
- mbc->pcf = dev_to_pcf50633(pdev->dev.parent);
-
- /* Set up IRQ handlers */
- for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++)
- pcf50633_register_irq(mbc->pcf, mbc_irq_handlers[i],
- pcf50633_mbc_irq_handler, mbc);
-
- psy_cfg.supplied_to = mbc->pcf->pdata->batteries;
- psy_cfg.num_supplicants = mbc->pcf->pdata->num_batteries;
- psy_cfg.drv_data = mbc;
-
- /* Create power supplies */
- mbc->adapter = devm_power_supply_register(&pdev->dev,
- &pcf50633_mbc_adapter_desc,
- &psy_cfg);
- if (IS_ERR(mbc->adapter)) {
- dev_err(mbc->pcf->dev, "failed to register adapter\n");
- return PTR_ERR(mbc->adapter);
- }
-
- usb_psy_cfg = psy_cfg;
- usb_psy_cfg.attr_grp = pcf50633_mbc_sysfs_groups;
-
- mbc->usb = devm_power_supply_register(&pdev->dev,
- &pcf50633_mbc_usb_desc,
- &usb_psy_cfg);
- if (IS_ERR(mbc->usb)) {
- dev_err(mbc->pcf->dev, "failed to register usb\n");
- return PTR_ERR(mbc->usb);
- }
-
- mbc->ac = devm_power_supply_register(&pdev->dev,
- &pcf50633_mbc_ac_desc,
- &psy_cfg);
- if (IS_ERR(mbc->ac)) {
- dev_err(mbc->pcf->dev, "failed to register ac\n");
- return PTR_ERR(mbc->ac);
- }
-
- mbcs1 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS1);
- if (mbcs1 & PCF50633_MBCS1_USBPRES)
- pcf50633_mbc_irq_handler(PCF50633_IRQ_USBINS, mbc);
- if (mbcs1 & PCF50633_MBCS1_ADAPTPRES)
- pcf50633_mbc_irq_handler(PCF50633_IRQ_ADPINS, mbc);
-
- return 0;
-}
-
-static void pcf50633_mbc_remove(struct platform_device *pdev)
-{
- struct pcf50633_mbc *mbc = platform_get_drvdata(pdev);
- int i;
-
- /* Remove IRQ handlers */
- for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++)
- pcf50633_free_irq(mbc->pcf, mbc_irq_handlers[i]);
-}
-
-static struct platform_driver pcf50633_mbc_driver = {
- .driver = {
- .name = "pcf50633-mbc",
- },
- .probe = pcf50633_mbc_probe,
- .remove = pcf50633_mbc_remove,
-};
-
-module_platform_driver(pcf50633_mbc_driver);
-
-MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
-MODULE_DESCRIPTION("PCF50633 mbc driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pcf50633-mbc");
diff --git a/drivers/power/supply/pm8916_bms_vm.c b/drivers/power/supply/pm8916_bms_vm.c
index 5d0dd842509c..5120be086e6f 100644
--- a/drivers/power/supply/pm8916_bms_vm.c
+++ b/drivers/power/supply/pm8916_bms_vm.c
@@ -210,7 +210,7 @@ static int pm8916_bms_vm_battery_probe(struct platform_device *pdev)
bat->vbat_now = bat->last_ocv;
psy_cfg.drv_data = bat;
- psy_cfg.of_node = dev->of_node;
+ psy_cfg.fwnode = dev_fwnode(dev);
bat->battery = devm_power_supply_register(dev, &pm8916_bms_vm_battery_psy_desc, &psy_cfg);
if (IS_ERR(bat->battery))
diff --git a/drivers/power/supply/pm8916_lbc.c b/drivers/power/supply/pm8916_lbc.c
index 6d92e98cbecc..c74b75b1b267 100644
--- a/drivers/power/supply/pm8916_lbc.c
+++ b/drivers/power/supply/pm8916_lbc.c
@@ -322,7 +322,7 @@ static int pm8916_lbc_charger_probe(struct platform_device *pdev)
dev_err_probe(dev, ret, "Error while parsing device tree\n");
psy_cfg.drv_data = chg;
- psy_cfg.of_node = dev->of_node;
+ psy_cfg.fwnode = dev_fwnode(dev);
chg->charger = devm_power_supply_register(dev, &pm8916_lbc_charger_psy_desc, &psy_cfg);
if (IS_ERR(chg->charger))
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 76c340b38015..33a5bfce4604 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -200,11 +200,11 @@ static int __power_supply_populate_supplied_from(struct power_supply *epsy,
int i = 0;
do {
- np = of_parse_phandle(psy->of_node, "power-supplies", i++);
+ np = of_parse_phandle(psy->dev.of_node, "power-supplies", i++);
if (!np)
break;
- if (np == epsy->of_node) {
+ if (np == epsy->dev.of_node) {
dev_dbg(&psy->dev, "%s: Found supply : %s\n",
psy->desc->name, epsy->desc->name);
psy->supplied_from[i-1] = (char *)epsy->desc->name;
@@ -235,7 +235,7 @@ static int __power_supply_find_supply_from_node(struct power_supply *epsy,
struct device_node *np = data;
/* returning non-zero breaks out of power_supply_for_each_psy loop */
- if (epsy->of_node == np)
+ if (epsy->dev.of_node == np)
return 1;
return 0;
@@ -270,13 +270,13 @@ static int power_supply_check_supplies(struct power_supply *psy)
return 0;
/* No device node found, nothing to do */
- if (!psy->of_node)
+ if (!psy->dev.of_node)
return 0;
do {
int ret;
- np = of_parse_phandle(psy->of_node, "power-supplies", cnt++);
+ np = of_parse_phandle(psy->dev.of_node, "power-supplies", cnt++);
if (!np)
break;
@@ -449,19 +449,6 @@ int power_supply_get_property_from_supplier(struct power_supply *psy,
}
EXPORT_SYMBOL_GPL(power_supply_get_property_from_supplier);
-int power_supply_set_battery_charged(struct power_supply *psy)
-{
- if (atomic_read(&psy->use_cnt) >= 0 &&
- psy->desc->type == POWER_SUPPLY_TYPE_BATTERY &&
- psy->desc->set_charged) {
- psy->desc->set_charged(psy);
- return 0;
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(power_supply_set_battery_charged);
-
static int power_supply_match_device_by_name(struct device *dev, const void *data)
{
const char *name = data;
@@ -606,8 +593,8 @@ int power_supply_get_battery_info(struct power_supply *psy,
const __be32 *list;
u32 min_max[2];
- if (psy->of_node) {
- battery_np = of_parse_phandle(psy->of_node, "monitored-battery", 0);
+ if (psy->dev.of_node) {
+ battery_np = of_parse_phandle(psy->dev.of_node, "monitored-battery", 0);
if (!battery_np)
return -ENODEV;
@@ -1544,9 +1531,8 @@ __power_supply_register(struct device *parent,
if (cfg) {
dev->groups = cfg->attr_grp;
psy->drv_data = cfg->drv_data;
- psy->of_node =
+ dev->of_node =
cfg->fwnode ? to_of_node(cfg->fwnode) : cfg->of_node;
- dev->of_node = psy->of_node;
psy->supplied_to = cfg->supplied_to;
psy->num_supplicants = cfg->num_supplicants;
}
diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
index 47d29271ddf4..fe27676fbc7c 100644
--- a/drivers/power/supply/qcom_battmgr.c
+++ b/drivers/power/supply/qcom_battmgr.c
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/power_supply.h>
+#include <linux/property.h>
#include <linux/soc/qcom/pdr.h>
#include <linux/soc/qcom/pmic_glink.h>
#include <linux/math.h>
@@ -1336,10 +1337,10 @@ static int qcom_battmgr_probe(struct auxiliary_device *adev,
battmgr->dev = dev;
psy_cfg.drv_data = battmgr;
- psy_cfg.of_node = adev->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&adev->dev);
psy_cfg_supply.drv_data = battmgr;
- psy_cfg_supply.of_node = adev->dev.of_node;
+ psy_cfg_supply.fwnode = dev_fwnode(&adev->dev);
psy_cfg_supply.supplied_to = qcom_battmgr_battery;
psy_cfg_supply.num_supplicants = 1;
diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c
index 3b4132376649..74a8d8ed8d9f 100644
--- a/drivers/power/supply/qcom_pmi8998_charger.c
+++ b/drivers/power/supply/qcom_pmi8998_charger.c
@@ -964,7 +964,7 @@ static int smb2_probe(struct platform_device *pdev)
return rc;
supply_config.drv_data = chip;
- supply_config.of_node = pdev->dev.of_node;
+ supply_config.fwnode = dev_fwnode(&pdev->dev);
desc = devm_kzalloc(chip->dev, sizeof(smb2_psy_desc), GFP_KERNEL);
if (!desc)
diff --git a/drivers/power/supply/qcom_smbb.c b/drivers/power/supply/qcom_smbb.c
index a79563f6ff7a..28afe758a2da 100644
--- a/drivers/power/supply/qcom_smbb.c
+++ b/drivers/power/supply/qcom_smbb.c
@@ -880,7 +880,7 @@ static int smbb_charger_probe(struct platform_device *pdev)
}
bat_cfg.drv_data = chg;
- bat_cfg.of_node = pdev->dev.of_node;
+ bat_cfg.fwnode = dev_fwnode(&pdev->dev);
chg->bat_psy = devm_power_supply_register(&pdev->dev,
&bat_psy_desc,
&bat_cfg);
diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
index e5f35d083c23..945c7720c4ae 100644
--- a/drivers/power/supply/rk817_charger.c
+++ b/drivers/power/supply/rk817_charger.c
@@ -1088,7 +1088,7 @@ static int rk817_charger_probe(struct platform_device *pdev)
rk817_bat_calib_vol(charger);
pscfg.drv_data = charger;
- pscfg.of_node = node;
+ pscfg.fwnode = node ? &node->fwnode : NULL;
/*
* Get sample resistor value. Note only values of 10000 or 20000
diff --git a/drivers/power/supply/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c
index 7a27b262fb84..b2674adfa30b 100644
--- a/drivers/power/supply/rt5033_battery.c
+++ b/drivers/power/supply/rt5033_battery.c
@@ -160,7 +160,7 @@ static int rt5033_battery_probe(struct i2c_client *client)
}
i2c_set_clientdata(client, battery);
- psy_cfg.of_node = client->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&client->dev);
psy_cfg.drv_data = battery;
battery->psy = devm_power_supply_register(&client->dev,
diff --git a/drivers/power/supply/rt5033_charger.c b/drivers/power/supply/rt5033_charger.c
index d19c7e80a92a..2fdc58439707 100644
--- a/drivers/power/supply/rt5033_charger.c
+++ b/drivers/power/supply/rt5033_charger.c
@@ -16,6 +16,7 @@
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/mfd/rt5033-private.h>
+#include <linux/property.h>
struct rt5033_charger_data {
unsigned int pre_uamp;
@@ -675,7 +676,7 @@ static int rt5033_charger_probe(struct platform_device *pdev)
charger->regmap = dev_get_regmap(pdev->dev.parent, NULL);
mutex_init(&charger->lock);
- psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&pdev->dev);
psy_cfg.drv_data = charger;
charger->psy = devm_power_supply_register(charger->dev,
diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
index 64a23e3d7bb0..1ffe7f02932f 100644
--- a/drivers/power/supply/rt9455_charger.c
+++ b/drivers/power/supply/rt9455_charger.c
@@ -1579,7 +1579,7 @@ static const struct regmap_config rt9455_regmap_config = {
.writeable_reg = rt9455_is_writeable_reg,
.volatile_reg = rt9455_is_volatile_reg,
.max_register = RT9455_REG_MASK3,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static int rt9455_probe(struct i2c_client *client)
@@ -1658,7 +1658,7 @@ static int rt9455_probe(struct i2c_client *client)
INIT_DEFERRABLE_WORK(&info->batt_presence_work,
rt9455_batt_presence_work_callback);
- rt9455_charger_config.of_node = dev->of_node;
+ rt9455_charger_config.fwnode = dev_fwnode(dev);
rt9455_charger_config.drv_data = info;
rt9455_charger_config.supplied_to = rt9455_charger_supplied_to;
rt9455_charger_config.num_supplicants =
diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c
index 235169c85c5d..e9aba9ad393c 100644
--- a/drivers/power/supply/rt9467-charger.c
+++ b/drivers/power/supply/rt9467-charger.c
@@ -826,7 +826,7 @@ static int rt9467_register_psy(struct rt9467_chg_data *data)
{
struct power_supply_config cfg = {
.drv_data = data,
- .of_node = dev_of_node(data->dev),
+ .fwnode = dev_fwnode(data->dev),
.attr_grp = rt9467_sysfs_groups,
};
diff --git a/drivers/power/supply/rt9471.c b/drivers/power/supply/rt9471.c
index 67b86ac91a21..bd966abb4df5 100644
--- a/drivers/power/supply/rt9471.c
+++ b/drivers/power/supply/rt9471.c
@@ -723,7 +723,7 @@ static int rt9471_register_psy(struct rt9471_chip *chip)
char *psy_name;
cfg.drv_data = chip;
- cfg.of_node = dev->of_node;
+ cfg.fwnode = dev_fwnode(dev);
cfg.attr_grp = rt9471_sysfs_groups;
psy_name = devm_kasprintf(dev, GFP_KERNEL, "rt9471-%s", dev_name(dev));
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index 6f3d0413b1c1..943c82ee978f 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -1138,7 +1138,7 @@ static int sbs_probe(struct i2c_client *client)
chip->flags = (uintptr_t)i2c_get_match_data(client);
chip->client = client;
- psy_cfg.of_node = client->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&client->dev);
psy_cfg.drv_data = chip;
chip->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
sbs_invalidate_cached_props(chip);
diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c
index ab3f095d90ea..27764123b929 100644
--- a/drivers/power/supply/sbs-charger.c
+++ b/drivers/power/supply/sbs-charger.c
@@ -173,7 +173,7 @@ static int sbs_probe(struct i2c_client *client)
return -ENOMEM;
chip->client = client;
- psy_cfg.of_node = client->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&client->dev);
psy_cfg.drv_data = chip;
i2c_set_clientdata(client, chip);
diff --git a/drivers/power/supply/sbs-manager.c b/drivers/power/supply/sbs-manager.c
index 7d2f39f19acb..869729dfcd66 100644
--- a/drivers/power/supply/sbs-manager.c
+++ b/drivers/power/supply/sbs-manager.c
@@ -379,7 +379,7 @@ static int sbsm_probe(struct i2c_client *client)
return ret;
psy_cfg.drv_data = data;
- psy_cfg.of_node = dev->of_node;
+ psy_cfg.fwnode = dev_fwnode(dev);
data->psy = devm_power_supply_register(dev, psy_desc, &psy_cfg);
if (IS_ERR(data->psy))
return dev_err_probe(dev, PTR_ERR(data->psy),
diff --git a/drivers/power/supply/sc2731_charger.c b/drivers/power/supply/sc2731_charger.c
index 50d5157af927..58b86fd78771 100644
--- a/drivers/power/supply/sc2731_charger.c
+++ b/drivers/power/supply/sc2731_charger.c
@@ -480,7 +480,7 @@ static int sc2731_charger_probe(struct platform_device *pdev)
}
charger_cfg.drv_data = info;
- charger_cfg.of_node = np;
+ charger_cfg.fwnode = dev_fwnode(&pdev->dev);
info->psy_usb = devm_power_supply_register(&pdev->dev,
&sc2731_charger_desc,
&charger_cfg);
diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
index f36edc2ba708..a7ed9de8a289 100644
--- a/drivers/power/supply/sc27xx_fuel_gauge.c
+++ b/drivers/power/supply/sc27xx_fuel_gauge.c
@@ -1014,9 +1014,8 @@ static int sc27xx_fgu_hw_init(struct sc27xx_fgu_data *data)
if (!table)
return -EINVAL;
- data->cap_table = devm_kmemdup(data->dev, table,
- data->table_len * sizeof(*table),
- GFP_KERNEL);
+ data->cap_table = devm_kmemdup_array(data->dev, table, data->table_len,
+ sizeof(*table), GFP_KERNEL);
if (!data->cap_table) {
power_supply_put_battery_info(data->battery, info);
return -ENOMEM;
@@ -1141,7 +1140,6 @@ disable_fgu:
static int sc27xx_fgu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct power_supply_config fgu_cfg = { };
struct sc27xx_fgu_data *data;
int ret, irq;
@@ -1205,7 +1203,7 @@ static int sc27xx_fgu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
fgu_cfg.drv_data = data;
- fgu_cfg.of_node = np;
+ fgu_cfg.fwnode = dev_fwnode(dev);
data->battery = devm_power_supply_register(dev, &sc27xx_fgu_desc,
&fgu_cfg);
if (IS_ERR(data->battery)) {
diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
index c8392933ee28..8b95f7e8712f 100644
--- a/drivers/power/supply/smb347-charger.c
+++ b/drivers/power/supply/smb347-charger.c
@@ -1488,7 +1488,7 @@ static const struct regmap_config smb347_regmap = {
.max_register = SMB347_MAX_REGISTER,
.volatile_reg = smb347_volatile_reg,
.readable_reg = smb347_readable_reg,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regulator_ops smb347_usb_vbus_regulator_ops = {
@@ -1553,7 +1553,7 @@ static int smb347_probe(struct i2c_client *client)
return PTR_ERR(smb->regmap);
mains_usb_cfg.drv_data = smb;
- mains_usb_cfg.of_node = dev->of_node;
+ mains_usb_cfg.fwnode = dev_fwnode(dev);
if (smb->use_mains) {
smb->mains = devm_power_supply_register(dev, &smb347_mains_desc,
&mains_usb_cfg);
diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c
index d65193e410a6..d010f013af8c 100644
--- a/drivers/power/supply/tps65090-charger.c
+++ b/drivers/power/supply/tps65090-charger.c
@@ -259,7 +259,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
psy_cfg.supplied_to = pdata->supplied_to;
psy_cfg.num_supplicants = pdata->num_supplicants;
- psy_cfg.of_node = pdev->dev.of_node;
+ psy_cfg.fwnode = dev_fwnode(&pdev->dev);
psy_cfg.drv_data = cdata;
cdata->ac = devm_power_supply_register(&pdev->dev, &tps65090_charger_desc,
diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
index 6fff44e1ecac..6af17ce0b204 100644
--- a/drivers/power/supply/tps65217_charger.c
+++ b/drivers/power/supply/tps65217_charger.c
@@ -198,7 +198,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
charger->tps = tps;
charger->dev = &pdev->dev;
- cfg.of_node = pdev->dev.of_node;
+ cfg.fwnode = dev_fwnode(&pdev->dev);
cfg.drv_data = charger;
charger->psy = devm_power_supply_register(&pdev->dev,
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
index 7382bec6a43c..d32a7633f9e7 100644
--- a/drivers/power/supply/ucs1002_power.c
+++ b/drivers/power/supply/ucs1002_power.c
@@ -560,7 +560,7 @@ static int ucs1002_probe(struct i2c_client *client)
irq_a_det = of_irq_get_byname(dev->of_node, "a_det");
irq_alert = of_irq_get_byname(dev->of_node, "alert");
- charger_config.of_node = dev->of_node;
+ charger_config.fwnode = dev_fwnode(dev);
charger_config.drv_data = info;
ret = regmap_read(info->regmap, UCS1002_REG_PRODUCT_ID, &regval);
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
index 69ef8d081c98..03c4c796d993 100644
--- a/drivers/powercap/Kconfig
+++ b/drivers/powercap/Kconfig
@@ -82,7 +82,7 @@ config DTPM
config DTPM_CPU
bool "Add CPU power capping based on the energy model"
- depends on DTPM && ENERGY_MODEL
+ depends on DTPM && ENERGY_MODEL && SMP
help
This enables support for CPU power limitation based on
energy model.
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index 04c212953ded..5ad7cc438068 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -339,8 +339,7 @@ struct idle_inject_device *idle_inject_register_full(struct cpumask *cpumask,
return NULL;
cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask);
- hrtimer_init(&ii_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ii_dev->timer.function = idle_inject_timer_fn;
+ hrtimer_setup(&ii_dev->timer, idle_inject_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ii_dev->latency_us = UINT_MAX;
ii_dev->update = update;
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 77d75e1f14a9..5ab3feb29686 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1274,7 +1274,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &rapl_defaults_byt),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &rapl_defaults_cht),
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &rapl_defaults_tng),
- X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &rapl_defaults_ann),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID2,&rapl_defaults_ann),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &rapl_defaults_core),
@@ -2064,8 +2064,7 @@ int rapl_package_add_pmu(struct rapl_package *rp)
raw_spin_lock_init(&data->lock);
INIT_LIST_HEAD(&data->active_list);
data->timer_interval = ms_to_ktime(rapl_pmu.timer_ms);
- hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->hrtimer.function = rapl_hrtimer_handle;
+ hrtimer_setup(&data->hrtimer, rapl_hrtimer_handle, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
return rapl_pmu_update(rp);
}
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
index d46eed159495..f5eeb4dd01ad 100644
--- a/drivers/pps/generators/pps_gen_parport.c
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -208,8 +208,7 @@ static void parport_attach(struct parport *port)
calibrate_port(&device);
- hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
- device.timer.function = hrtimer_event;
+ hrtimer_setup(&device.timer, hrtimer_event, CLOCK_REALTIME, HRTIMER_MODE_ABS);
hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS);
return;
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index bf6468c56419..4380e6ddb849 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -205,6 +205,10 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
case PTP_EXTTS_REQUEST:
case PTP_EXTTS_REQUEST2:
+ if ((pccontext->fp->f_mode & FMODE_WRITE) == 0) {
+ err = -EACCES;
+ break;
+ }
memset(&req, 0, sizeof(req));
if (copy_from_user(&req.extts, (void __user *)arg,
@@ -246,6 +250,10 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
case PTP_PEROUT_REQUEST:
case PTP_PEROUT_REQUEST2:
+ if ((pccontext->fp->f_mode & FMODE_WRITE) == 0) {
+ err = -EACCES;
+ break;
+ }
memset(&req, 0, sizeof(req));
if (copy_from_user(&req.perout, (void __user *)arg,
@@ -314,6 +322,10 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
case PTP_ENABLE_PPS:
case PTP_ENABLE_PPS2:
+ if ((pccontext->fp->f_mode & FMODE_WRITE) == 0) {
+ err = -EACCES;
+ break;
+ }
memset(&req, 0, sizeof(req));
if (!capable(CAP_SYS_TIME))
@@ -456,6 +468,10 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
case PTP_PIN_SETFUNC:
case PTP_PIN_SETFUNC2:
+ if ((pccontext->fp->f_mode & FMODE_WRITE) == 0) {
+ err = -EACCES;
+ break;
+ }
if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
err = -EFAULT;
break;
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index b651087f426f..b25635c5c745 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -2090,6 +2090,10 @@ ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen,
{
struct ptp_ocp_signal s = { };
+ if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
s.polarity = bp->signal[gen].polarity;
s.period = ktime_set(req->period.sec, req->period.nsec);
if (!s.period)
@@ -3959,9 +3963,6 @@ _signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr)
bool on;
u32 val;
- if (!signal)
- return;
-
on = signal->running;
sprintf(label, "GEN%d", nr + 1);
seq_printf(s, "%7s: %s, period:%llu duty:%d%% phase:%llu pol:%d",
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 0915c1e7df16..4731d5b90d7e 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -567,7 +567,7 @@ config PWM_SIFIVE
tristate "SiFive PWM support"
depends on OF
depends on COMMON_CLK && HAS_IOMEM
- depends on RISCV || COMPILE_TEST
+ depends on ARCH_SIFIVE || COMPILE_TEST
help
Generic PWM framework driver for SiFive SoCs.
@@ -584,6 +584,16 @@ config PWM_SL28CPLD
To compile this driver as a module, choose M here: the module
will be called pwm-sl28cpld.
+config PWM_SOPHGO_SG2042
+ tristate "Sophgo SG2042 PWM support"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ PWM driver for the PWM controller on Sophgo SG2042 SoC. The PWM
+ controller supports outputing 4 channels of PWM waveforms.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm_sophgo_sg2042.
+
config PWM_SPEAR
tristate "STMicroelectronics SPEAr PWM support"
depends on PLAT_SPEAR || COMPILE_TEST
@@ -636,7 +646,7 @@ config PWM_STM32_LP
will be called pwm-stm32-lp.
config PWM_STMPE
- bool "STMPE expander PWM export"
+ tristate "STMPE expander PWM export"
depends on MFD_STMPE
help
This enables support for the PWMs found in the STMPE I/O
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 9081e0c0e9e0..539e0def3f82 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_PWM_RZ_MTU3) += pwm-rz-mtu3.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
obj-$(CONFIG_PWM_SIFIVE) += pwm-sifive.o
obj-$(CONFIG_PWM_SL28CPLD) += pwm-sl28cpld.o
+obj-$(CONFIG_PWM_SOPHGO_SG2042) += pwm-sophgo-sg2042.o
obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
obj-$(CONFIG_PWM_SPRD) += pwm-sprd.o
obj-$(CONFIG_PWM_STI) += pwm-sti.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index ccd54c089bab..a40c511e0096 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -1000,11 +1000,27 @@ of_pwm_xlate_with_flags(struct pwm_chip *chip, const struct of_phandle_args *arg
}
EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags);
+/*
+ * This callback is used for PXA PWM chips that only have a single PWM line.
+ * For such chips you could argue that passing the line number (i.e. the first
+ * parameter in the common case) is useless as it's always zero. So compared to
+ * the default xlate function of_pwm_xlate_with_flags() the first parameter is
+ * the default period and the second are flags.
+ *
+ * Note that if #pwm-cells = <3>, the semantic is the same as for
+ * of_pwm_xlate_with_flags() to allow converting the affected driver to
+ * #pwm-cells = <3> without breaking the legacy binding.
+ *
+ * Don't use for new drivers.
+ */
struct pwm_device *
of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
{
struct pwm_device *pwm;
+ if (args->args_count >= 3)
+ return of_pwm_xlate_with_flags(chip, args);
+
pwm = pwm_request_from_chip(chip, 0, NULL);
if (IS_ERR(pwm))
return pwm;
@@ -1716,8 +1732,7 @@ static struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
return ERR_PTR(index);
}
- err = of_parse_phandle_with_args(np, "pwms", "#pwm-cells", index,
- &args);
+ err = of_parse_phandle_with_args_map(np, "pwms", "pwm", index, &args);
if (err) {
pr_err("%s(): can't parse \"pwms\" property\n", __func__);
return ERR_PTR(err);
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index c950e1dbd2b8..04559a9de718 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -98,7 +98,7 @@ static int clps711x_pwm_probe(struct platform_device *pdev)
return devm_pwmchip_add(&pdev->dev, chip);
}
-static const struct of_device_id __maybe_unused clps711x_pwm_dt_ids[] = {
+static const struct of_device_id clps711x_pwm_dt_ids[] = {
{ .compatible = "cirrus,ep7209-pwm", },
{ }
};
@@ -107,7 +107,7 @@ MODULE_DEVICE_TABLE(of, clps711x_pwm_dt_ids);
static struct platform_driver clps711x_pwm_driver = {
.driver = {
.name = "clps711x-pwm",
- .of_match_table = of_match_ptr(clps711x_pwm_dt_ids),
+ .of_match_table = clps711x_pwm_dt_ids,
},
.probe = clps711x_pwm_probe,
};
diff --git a/drivers/pwm/pwm-gpio.c b/drivers/pwm/pwm-gpio.c
index 9f8884ac7504..5f4edeb394a9 100644
--- a/drivers/pwm/pwm-gpio.c
+++ b/drivers/pwm/pwm-gpio.c
@@ -207,13 +207,12 @@ static int pwm_gpio_probe(struct platform_device *pdev)
chip->ops = &pwm_gpio_ops;
chip->atomic = true;
- hrtimer_init(&gpwm->gpio_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_setup(&gpwm->gpio_timer, pwm_gpio_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
ret = devm_add_action_or_reset(dev, pwm_gpio_disable_hrtimer, gpwm);
if (ret)
return ret;
- gpwm->gpio_timer.function = pwm_gpio_timer;
-
ret = pwmchip_add(chip);
if (ret < 0)
return dev_err_probe(dev, ret, "could not add pwmchip\n");
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 3b99feb3bb49..c976ff1c8ed9 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -10,6 +10,8 @@
* Author: Alan Cox <alan@linux.intel.com>
*/
+#define DEFAULT_SYMBOL_NAMESPACE "PWM_LPSS"
+
#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -17,10 +19,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/pwm.h>
#include <linux/time.h>
-#define DEFAULT_SYMBOL_NAMESPACE "PWM_LPSS"
-
#include "pwm-lpss.h"
#define PWM 0x00000000
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index b5267ab5193b..60792181401e 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -10,7 +10,6 @@
#ifndef __PWM_LPSS_H
#define __PWM_LPSS_H
-#include <linux/pwm.h>
#include <linux/types.h>
#include <linux/platform_data/x86/pwm-lpss.h>
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 1298b29183e5..5162f3991644 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -8,7 +8,6 @@
* based on the pwm-twl-led.c driver
*/
-#include <linux/acpi.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/module.h>
@@ -639,21 +638,17 @@ static const struct i2c_device_id pca9685_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca9685_id);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id pca9685_acpi_ids[] = {
{ "INT3492", 0 },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(acpi, pca9685_acpi_ids);
-#endif
-#ifdef CONFIG_OF
static const struct of_device_id pca9685_dt_ids[] = {
{ .compatible = "nxp,pca9685-pwm", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pca9685_dt_ids);
-#endif
static const struct dev_pm_ops pca9685_pwm_pm = {
SET_RUNTIME_PM_OPS(pca9685_pwm_runtime_suspend,
@@ -663,8 +658,8 @@ static const struct dev_pm_ops pca9685_pwm_pm = {
static struct i2c_driver pca9685_i2c_driver = {
.driver = {
.name = "pca9685-pwm",
- .acpi_match_table = ACPI_PTR(pca9685_acpi_ids),
- .of_match_table = of_match_ptr(pca9685_dt_ids),
+ .acpi_match_table = pca9685_acpi_ids,
+ .of_match_table = pca9685_dt_ids,
.pm = &pca9685_pwm_pm,
},
.probe = pca9685_pwm_probe,
diff --git a/drivers/pwm/pwm-sophgo-sg2042.c b/drivers/pwm/pwm-sophgo-sg2042.c
new file mode 100644
index 000000000000..ff4639d849ce
--- /dev/null
+++ b/drivers/pwm/pwm-sophgo-sg2042.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2042 PWM Controller Driver
+ *
+ * Copyright (C) 2024 Sophgo Technology Inc.
+ * Copyright (C) 2024 Chen Wang <unicorn_wang@outlook.com>
+ *
+ * Limitations:
+ * - After reset, the output of the PWM channel is always high.
+ * The value of HLPERIOD/PERIOD is 0.
+ * - When HLPERIOD or PERIOD is reconfigured, PWM will start to
+ * output waveforms with the new configuration after completing
+ * the running period.
+ * - When PERIOD and HLPERIOD is set to 0, the PWM wave output will
+ * be stopped and the output is pulled to high.
+ * See the datasheet [1] for more details.
+ * [1]:https://github.com/sophgo/sophgo-doc/tree/main/SG2042/TRM
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/reset.h>
+
+/*
+ * Offset RegisterName
+ * 0x0000 HLPERIOD0
+ * 0x0004 PERIOD0
+ * 0x0008 HLPERIOD1
+ * 0x000C PERIOD1
+ * 0x0010 HLPERIOD2
+ * 0x0014 PERIOD2
+ * 0x0018 HLPERIOD3
+ * 0x001C PERIOD3
+ * Four groups and every group is composed of HLPERIOD & PERIOD
+ */
+#define SG2042_PWM_HLPERIOD(chan) ((chan) * 8 + 0)
+#define SG2042_PWM_PERIOD(chan) ((chan) * 8 + 4)
+
+#define SG2042_PWM_CHANNELNUM 4
+
+/**
+ * struct sg2042_pwm_ddata - private driver data
+ * @base: base address of mapped PWM registers
+ * @clk_rate_hz: rate of base clock in HZ
+ */
+struct sg2042_pwm_ddata {
+ void __iomem *base;
+ unsigned long clk_rate_hz;
+};
+
+/*
+ * period_ticks: PERIOD
+ * hlperiod_ticks: HLPERIOD
+ */
+static void pwm_sg2042_config(struct sg2042_pwm_ddata *ddata, unsigned int chan,
+ u32 period_ticks, u32 hlperiod_ticks)
+{
+ void __iomem *base = ddata->base;
+
+ writel(period_ticks, base + SG2042_PWM_PERIOD(chan));
+ writel(hlperiod_ticks, base + SG2042_PWM_HLPERIOD(chan));
+}
+
+static int pwm_sg2042_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct sg2042_pwm_ddata *ddata = pwmchip_get_drvdata(chip);
+ u32 hlperiod_ticks;
+ u32 period_ticks;
+
+ if (state->polarity == PWM_POLARITY_INVERSED)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ pwm_sg2042_config(ddata, pwm->hwpwm, 0, 0);
+ return 0;
+ }
+
+ /*
+ * Duration of High level (duty_cycle) = HLPERIOD x Period_of_input_clk
+ * Duration of One Cycle (period) = PERIOD x Period_of_input_clk
+ */
+ period_ticks = min(mul_u64_u64_div_u64(ddata->clk_rate_hz, state->period, NSEC_PER_SEC), U32_MAX);
+ hlperiod_ticks = min(mul_u64_u64_div_u64(ddata->clk_rate_hz, state->duty_cycle, NSEC_PER_SEC), U32_MAX);
+
+ dev_dbg(pwmchip_parent(chip), "chan[%u]: PERIOD=%u, HLPERIOD=%u\n",
+ pwm->hwpwm, period_ticks, hlperiod_ticks);
+
+ pwm_sg2042_config(ddata, pwm->hwpwm, period_ticks, hlperiod_ticks);
+
+ return 0;
+}
+
+static int pwm_sg2042_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct sg2042_pwm_ddata *ddata = pwmchip_get_drvdata(chip);
+ unsigned int chan = pwm->hwpwm;
+ u32 hlperiod_ticks;
+ u32 period_ticks;
+
+ period_ticks = readl(ddata->base + SG2042_PWM_PERIOD(chan));
+ hlperiod_ticks = readl(ddata->base + SG2042_PWM_HLPERIOD(chan));
+
+ if (!period_ticks) {
+ state->enabled = false;
+ return 0;
+ }
+
+ if (hlperiod_ticks > period_ticks)
+ hlperiod_ticks = period_ticks;
+
+ state->enabled = true;
+ state->period = DIV_ROUND_UP_ULL((u64)period_ticks * NSEC_PER_SEC, ddata->clk_rate_hz);
+ state->duty_cycle = DIV_ROUND_UP_ULL((u64)hlperiod_ticks * NSEC_PER_SEC, ddata->clk_rate_hz);
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ return 0;
+}
+
+static const struct pwm_ops pwm_sg2042_ops = {
+ .apply = pwm_sg2042_apply,
+ .get_state = pwm_sg2042_get_state,
+};
+
+static const struct of_device_id sg2042_pwm_ids[] = {
+ { .compatible = "sophgo,sg2042-pwm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sg2042_pwm_ids);
+
+static int pwm_sg2042_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sg2042_pwm_ddata *ddata;
+ struct reset_control *rst;
+ struct pwm_chip *chip;
+ struct clk *clk;
+ int ret;
+
+ chip = devm_pwmchip_alloc(dev, SG2042_PWM_CHANNELNUM, sizeof(*ddata));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ ddata = pwmchip_get_drvdata(chip);
+
+ ddata->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ddata->base))
+ return PTR_ERR(ddata->base);
+
+ clk = devm_clk_get_enabled(dev, "apb");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Failed to get base clk\n");
+
+ ret = devm_clk_rate_exclusive_get(dev, clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get exclusive rate\n");
+
+ ddata->clk_rate_hz = clk_get_rate(clk);
+ /* period = PERIOD * NSEC_PER_SEC / clk_rate_hz */
+ if (!ddata->clk_rate_hz || ddata->clk_rate_hz > NSEC_PER_SEC)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid clock rate: %lu\n", ddata->clk_rate_hz);
+
+ rst = devm_reset_control_get_optional_shared_deasserted(dev, NULL);
+ if (IS_ERR(rst))
+ return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset\n");
+
+ chip->ops = &pwm_sg2042_ops;
+ chip->atomic = true;
+
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to register PWM chip\n");
+
+ return 0;
+}
+
+static struct platform_driver pwm_sg2042_driver = {
+ .driver = {
+ .name = "sg2042-pwm",
+ .of_match_table = sg2042_pwm_ids,
+ },
+ .probe = pwm_sg2042_probe,
+};
+module_platform_driver(pwm_sg2042_driver);
+
+MODULE_AUTHOR("Chen Wang");
+MODULE_DESCRIPTION("Sophgo SG2042 PWM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
index bb91062d5f1d..73f12843999a 100644
--- a/drivers/pwm/pwm-stmpe.c
+++ b/drivers/pwm/pwm-stmpe.c
@@ -326,12 +326,33 @@ static int __init stmpe_pwm_probe(struct platform_device *pdev)
return ret;
}
+ platform_set_drvdata(pdev, chip);
+
return 0;
}
-static struct platform_driver stmpe_pwm_driver = {
+static void __exit stmpe_pwm_remove(struct platform_device *pdev)
+{
+ struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+
+ pwmchip_remove(chip);
+ stmpe_disable(stmpe, STMPE_BLOCK_PWM);
+}
+
+/*
+ * stmpe_pwm_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver stmpe_pwm_driver __refdata = {
.driver = {
.name = "stmpe-pwm",
},
+ .remove = __exit_p(stmpe_pwm_remove),
};
-builtin_platform_driver_probe(stmpe_pwm_driver, stmpe_pwm_probe);
+module_platform_driver_probe(stmpe_pwm_driver, stmpe_pwm_probe);
+
+MODULE_DESCRIPTION("STMPE expander PWM");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 39297f7d8177..05e32d764028 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -981,6 +981,13 @@ config REGULATOR_PCA9450
Say y here to support the NXP PCA9450A/PCA9450B/PCA9450C PMIC
regulator driver.
+config REGULATOR_PF9453
+ tristate "NXP PF9453 regulator driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say y here to support the NXP PF9453 PMIC regulator driver.
+
config REGULATOR_PCAP
tristate "Motorola PCAP2 regulator driver"
depends on EZX_PCAP
@@ -988,13 +995,6 @@ config REGULATOR_PCAP
This driver provides support for the voltage regulators of the
PCAP2 PMIC.
-config REGULATOR_PCF50633
- tristate "NXP PCF50633 regulator driver"
- depends on MFD_PCF50633
- help
- Say Y here to support the voltage regulators and converters
- on PCF50633
-
config REGULATOR_PF8X00
tristate "NXP PF8100/PF8121A/PF8200 regulator driver"
depends on I2C && OF
@@ -1330,10 +1330,10 @@ config REGULATOR_S2MPA01
via I2C bus. S2MPA01 has 10 Bucks and 26 LDO outputs.
config REGULATOR_S2MPS11
- tristate "Samsung S2MPS11/13/14/15/S2MPU02 voltage regulator"
+ tristate "Samsung S2MPS11/13/14/15/S2MPU02/05 voltage regulator"
depends on MFD_SEC_CORE || COMPILE_TEST
help
- This driver supports a Samsung S2MPS11/13/14/15/S2MPU02 voltage
+ This driver supports a Samsung S2MPS11/13/14/15/S2MPU02/05 voltage
output regulator via I2C bus. The chip is comprised of high efficient
Buck converters including Dual-Phase Buck converter, Buck-Boost
converter, various LDOs.
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 3d5a803dce8a..524e026c0273 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -123,6 +123,7 @@ obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_USB_VBUS) += qcom_usb_vbus-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PCA9450) += pca9450-regulator.o
+obj-$(CONFIG_REGULATOR_PF9453) += pf9453-regulator.o
obj-$(CONFIG_REGULATOR_PF8X00) += pf8x00-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
obj-$(CONFIG_REGULATOR_PV88060) += pv88060-regulator.o
@@ -132,7 +133,6 @@ obj-$(CONFIG_REGULATOR_PWM) += pwm-regulator.o
obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
obj-$(CONFIG_REGULATOR_PBIAS) += pbias-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
-obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_RAA215300) += raa215300.o
obj-$(CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY) += rpi-panel-attiny-regulator.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index 40f7dba42b5a..eb2a666a45cb 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -14,8 +14,9 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
-#define AD5398_CURRENT_EN_MASK 0x8000
+#define AD5398_SW_POWER_DOWN BIT(15)
struct ad5398_chip_info {
struct i2c_client *client;
@@ -113,7 +114,7 @@ static int ad5398_set_current_limit(struct regulator_dev *rdev, int min_uA, int
/* prepare register data */
selector = (selector << chip->current_offset) & chip->current_mask;
- data = (unsigned short)selector | (data & AD5398_CURRENT_EN_MASK);
+ data = (unsigned short)selector | (data & AD5398_SW_POWER_DOWN);
/* write the new current value back as well as enable bit */
ret = ad5398_write_reg(client, data);
@@ -132,10 +133,10 @@ static int ad5398_is_enabled(struct regulator_dev *rdev)
if (ret < 0)
return ret;
- if (data & AD5398_CURRENT_EN_MASK)
- return 1;
- else
+ if (data & AD5398_SW_POWER_DOWN)
return 0;
+ else
+ return 1;
}
static int ad5398_enable(struct regulator_dev *rdev)
@@ -149,10 +150,10 @@ static int ad5398_enable(struct regulator_dev *rdev)
if (ret < 0)
return ret;
- if (data & AD5398_CURRENT_EN_MASK)
+ if (!(data & AD5398_SW_POWER_DOWN))
return 0;
- data |= AD5398_CURRENT_EN_MASK;
+ data &= ~AD5398_SW_POWER_DOWN;
ret = ad5398_write_reg(client, data);
@@ -170,10 +171,10 @@ static int ad5398_disable(struct regulator_dev *rdev)
if (ret < 0)
return ret;
- if (!(data & AD5398_CURRENT_EN_MASK))
+ if (data & AD5398_SW_POWER_DOWN)
return 0;
- data &= ~AD5398_CURRENT_EN_MASK;
+ data |= AD5398_SW_POWER_DOWN;
ret = ad5398_write_reg(client, data);
@@ -221,15 +222,20 @@ static int ad5398_probe(struct i2c_client *client)
const struct ad5398_current_data_format *df =
(struct ad5398_current_data_format *)id->driver_data;
- if (!init_data)
- return -EINVAL;
-
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
config.dev = &client->dev;
+ if (client->dev.of_node)
+ init_data = of_get_regulator_init_data(&client->dev,
+ client->dev.of_node,
+ &ad5398_reg);
+ if (!init_data)
+ return -EINVAL;
+
config.init_data = init_data;
+ config.of_node = client->dev.of_node;
config.driver_data = chip;
chip->client = client;
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index dca99cfb7cbb..da891415efc0 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -371,8 +371,8 @@
.ops = &axp20x_ops, \
}
-#define AXP_DESC_DELAY(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
- _vmask, _ereg, _emask, _ramp_delay) \
+#define AXP_DESC(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
+ _vmask, _ereg, _emask) \
[_family##_##_id] = { \
.name = (_match), \
.supply_name = (_supply), \
@@ -388,15 +388,9 @@
.vsel_mask = (_vmask), \
.enable_reg = (_ereg), \
.enable_mask = (_emask), \
- .ramp_delay = (_ramp_delay), \
.ops = &axp20x_ops, \
}
-#define AXP_DESC(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
- _vmask, _ereg, _emask) \
- AXP_DESC_DELAY(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
- _vmask, _ereg, _emask, 0)
-
#define AXP_DESC_SW(_family, _id, _match, _supply, _ereg, _emask) \
[_family##_##_id] = { \
.name = (_match), \
@@ -805,9 +799,9 @@ static const struct regulator_desc axp717_regulators[] = {
axp717_dcdc3_ranges, AXP717_DCDC3_NUM_VOLTAGES,
AXP717_DCDC3_CONTROL, AXP717_DCDC_V_OUT_MASK,
AXP717_DCDC_OUTPUT_CONTROL, BIT(2), 640),
- AXP_DESC_DELAY(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100,
+ AXP_DESC(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100,
AXP717_DCDC4_CONTROL, AXP717_DCDC_V_OUT_MASK,
- AXP717_DCDC_OUTPUT_CONTROL, BIT(3), 6400),
+ AXP717_DCDC_OUTPUT_CONTROL, BIT(3)),
AXP_DESC(AXP717, ALDO1, "aldo1", "aldoin", 500, 3500, 100,
AXP717_ALDO1_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(0)),
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 4ddf0efead68..00a7f3617cd8 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1830,12 +1830,49 @@ static const struct file_operations constraint_flags_fops = {
#define REG_STR_SIZE 64
+static void link_and_create_debugfs(struct regulator *regulator, struct regulator_dev *rdev,
+ struct device *dev)
+{
+ int err = 0;
+
+ if (dev) {
+ regulator->dev = dev;
+
+ /* Add a link to the device sysfs entry */
+ err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
+ regulator->supply_name);
+ if (err) {
+ rdev_dbg(rdev, "could not add device link %s: %pe\n",
+ dev->kobj.name, ERR_PTR(err));
+ /* non-fatal */
+ }
+ }
+
+ if (err != -EEXIST) {
+ regulator->debugfs = debugfs_create_dir(regulator->supply_name, rdev->debugfs);
+ if (IS_ERR(regulator->debugfs)) {
+ rdev_dbg(rdev, "Failed to create debugfs directory\n");
+ regulator->debugfs = NULL;
+ }
+ }
+
+ if (regulator->debugfs) {
+ debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+ &regulator->uA_load);
+ debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].min_uV);
+ debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].max_uV);
+ debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
+ regulator, &constraint_flags_fops);
+ }
+}
+
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
const char *supply_name)
{
struct regulator *regulator;
- int err = 0;
lockdep_assert_held_once(&rdev->mutex.base);
@@ -1868,38 +1905,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
list_add(&regulator->list, &rdev->consumer_list);
- if (dev) {
- regulator->dev = dev;
-
- /* Add a link to the device sysfs entry */
- err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
- supply_name);
- if (err) {
- rdev_dbg(rdev, "could not add device link %s: %pe\n",
- dev->kobj.name, ERR_PTR(err));
- /* non-fatal */
- }
- }
-
- if (err != -EEXIST) {
- regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
- if (IS_ERR(regulator->debugfs)) {
- rdev_dbg(rdev, "Failed to create debugfs directory\n");
- regulator->debugfs = NULL;
- }
- }
-
- if (regulator->debugfs) {
- debugfs_create_u32("uA_load", 0444, regulator->debugfs,
- &regulator->uA_load);
- debugfs_create_u32("min_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].min_uV);
- debugfs_create_u32("max_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].max_uV);
- debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
- regulator, &constraint_flags_fops);
- }
-
/*
* Check now if the regulator is an always on regulator - if
* it is then we don't need to do nearly so much work for
@@ -2069,6 +2074,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (have_full_constraints()) {
r = dummy_regulator_rdev;
+ if (!r) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
get_device(&r->dev);
} else {
dev_err(dev, "Failed to resolve %s-supply for %s\n",
@@ -2086,6 +2095,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
goto out;
}
r = dummy_regulator_rdev;
+ if (!r) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
get_device(&r->dev);
}
@@ -2133,6 +2146,9 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
regulator_unlock_two(rdev, r, &ww_ctx);
+ /* rdev->supply was created in set_supply() */
+ link_and_create_debugfs(rdev->supply, r, &rdev->dev);
+
/*
* In set_machine_constraints() we may have turned this regulator on
* but we couldn't propagate to the supply if it hadn't been resolved
@@ -2211,8 +2227,10 @@ struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct devic
* enabled, even if it isn't hooked up, and just
* provide a dummy.
*/
- dev_warn(dev, "supply %s not found, using dummy regulator\n", id);
rdev = dummy_regulator_rdev;
+ if (!rdev)
+ return ERR_PTR(-EPROBE_DEFER);
+ dev_warn(dev, "supply %s not found, using dummy regulator\n", id);
get_device(&rdev->dev);
break;
@@ -2271,6 +2289,8 @@ struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct devic
return regulator;
}
+ link_and_create_debugfs(regulator, rdev, dev);
+
rdev->open_count++;
if (get_type == EXCLUSIVE_GET) {
rdev->exclusive = 1;
diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
index fb9643ed7a49..fb0767b33a36 100644
--- a/drivers/regulator/cros-ec-regulator.c
+++ b/drivers/regulator/cros-ec-regulator.c
@@ -138,8 +138,8 @@ static int cros_ec_regulator_init_info(struct device *dev,
data->num_voltages =
min_t(u16, ARRAY_SIZE(resp.voltages_mv), resp.num_voltages);
data->voltages_mV =
- devm_kmemdup(dev, resp.voltages_mv,
- sizeof(u16) * data->num_voltages, GFP_KERNEL);
+ devm_kmemdup_array(dev, resp.voltages_mv, data->num_voltages,
+ sizeof(resp.voltages_mv[0]), GFP_KERNEL);
if (!data->voltages_mV)
return -ENOMEM;
diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
index 36164aec30e8..2cf03042fddf 100644
--- a/drivers/regulator/devres.c
+++ b/drivers/regulator/devres.c
@@ -332,9 +332,8 @@ int devm_regulator_bulk_get_const(struct device *dev, int num_consumers,
const struct regulator_bulk_data *in_consumers,
struct regulator_bulk_data **out_consumers)
{
- *out_consumers = devm_kmemdup(dev, in_consumers,
- num_consumers * sizeof(*in_consumers),
- GFP_KERNEL);
+ *out_consumers = devm_kmemdup_array(dev, in_consumers, num_consumers,
+ sizeof(*in_consumers), GFP_KERNEL);
if (*out_consumers == NULL)
return -ENOMEM;
@@ -772,6 +771,23 @@ static struct regulator *_devm_of_regulator_get(struct device *dev, struct devic
}
/**
+ * devm_of_regulator_get - Resource managed of_regulator_get()
+ * @dev: device used for dev_printk() messages and resource lifetime management
+ * @node: device node for regulator "consumer"
+ * @id: supply name or regulator ID.
+ *
+ * Managed of_regulator_get(). Regulators returned from this
+ * function are automatically regulator_put() on driver detach. See
+ * of_regulator_get() for more information.
+ */
+struct regulator *devm_of_regulator_get(struct device *dev, struct device_node *node,
+ const char *id)
+{
+ return _devm_of_regulator_get(dev, node, id, NORMAL_GET);
+}
+EXPORT_SYMBOL_GPL(devm_of_regulator_get);
+
+/**
* devm_of_regulator_get_optional - Resource managed of_regulator_get_optional()
* @dev: device used for dev_printk() messages and resource lifetime management
* @node: device node for regulator "consumer"
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index 5b9b9e4e762d..9f59889129ab 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -60,7 +60,7 @@ static struct platform_driver dummy_regulator_driver = {
.probe = dummy_regulator_probe,
.driver = {
.name = "reg-dummy",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
},
};
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 011088c57891..32e88cada47a 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -698,6 +698,27 @@ struct regulator *_of_regulator_get(struct device *dev, struct device_node *node
}
/**
+ * of_regulator_get - get regulator via device tree lookup
+ * @dev: device used for dev_printk() messages
+ * @node: device node for regulator "consumer"
+ * @id: Supply name
+ *
+ * Return: pointer to struct regulator corresponding to the regulator producer,
+ * or PTR_ERR() encoded error number.
+ *
+ * This is intended for use by consumers that want to get a regulator
+ * supply directly from a device node. This will _not_ consider supply
+ * aliases. See regulator_dev_lookup().
+ */
+struct regulator *of_regulator_get(struct device *dev,
+ struct device_node *node,
+ const char *id)
+{
+ return _of_regulator_get(dev, node, id, NORMAL_GET);
+}
+EXPORT_SYMBOL_GPL(of_regulator_get);
+
+/**
* of_regulator_get_optional - get optional regulator via device tree lookup
* @dev: device used for dev_printk() messages
* @node: device node for regulator "consumer"
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index faa6b79c27d7..a56f3ab754fa 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -36,6 +36,7 @@ struct pca9450 {
enum pca9450_chip_type type;
unsigned int rcnt;
int irq;
+ bool sd_vsel_fixed_low;
};
static const struct regmap_range pca9450_status_range = {
@@ -98,6 +99,61 @@ static const struct regulator_ops pca9450_ldo_regulator_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
+static unsigned int pca9450_ldo5_get_reg_voltage_sel(struct regulator_dev *rdev)
+{
+ struct pca9450 *pca9450 = rdev_get_drvdata(rdev);
+
+ if (pca9450->sd_vsel_fixed_low)
+ return PCA9450_REG_LDO5CTRL_L;
+
+ if (pca9450->sd_vsel_gpio && !gpiod_get_value(pca9450->sd_vsel_gpio))
+ return PCA9450_REG_LDO5CTRL_L;
+
+ return rdev->desc->vsel_reg;
+}
+
+static int pca9450_ldo5_get_voltage_sel_regmap(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, pca9450_ldo5_get_reg_voltage_sel(rdev), &val);
+ if (ret != 0)
+ return ret;
+
+ val &= rdev->desc->vsel_mask;
+ val >>= ffs(rdev->desc->vsel_mask) - 1;
+
+ return val;
+}
+
+static int pca9450_ldo5_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned int sel)
+{
+ int ret;
+
+ sel <<= ffs(rdev->desc->vsel_mask) - 1;
+
+ ret = regmap_update_bits(rdev->regmap, pca9450_ldo5_get_reg_voltage_sel(rdev),
+ rdev->desc->vsel_mask, sel);
+ if (ret)
+ return ret;
+
+ if (rdev->desc->apply_bit)
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg,
+ rdev->desc->apply_bit,
+ rdev->desc->apply_bit);
+ return ret;
+}
+
+static const struct regulator_ops pca9450_ldo5_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = pca9450_ldo5_set_voltage_sel_regmap,
+ .get_voltage_sel = pca9450_ldo5_get_voltage_sel_regmap,
+};
+
/*
* BUCK1/2/3
* 0.60 to 2.1875V (12.5mV step)
@@ -453,14 +509,14 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.of_match = of_match_ptr("LDO5"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO5,
- .ops = &pca9450_ldo_regulator_ops,
+ .ops = &pca9450_ldo5_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO5_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo5_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts),
.vsel_reg = PCA9450_REG_LDO5CTRL_H,
.vsel_mask = LDO5HOUT_MASK,
- .enable_reg = PCA9450_REG_LDO5CTRL_H,
+ .enable_reg = PCA9450_REG_LDO5CTRL_L,
.enable_mask = LDO5H_EN_MASK,
.owner = THIS_MODULE,
},
@@ -667,14 +723,14 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.of_match = of_match_ptr("LDO5"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO5,
- .ops = &pca9450_ldo_regulator_ops,
+ .ops = &pca9450_ldo5_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO5_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo5_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts),
.vsel_reg = PCA9450_REG_LDO5CTRL_H,
.vsel_mask = LDO5HOUT_MASK,
- .enable_reg = PCA9450_REG_LDO5CTRL_H,
+ .enable_reg = PCA9450_REG_LDO5CTRL_L,
.enable_mask = LDO5H_EN_MASK,
.owner = THIS_MODULE,
},
@@ -857,14 +913,14 @@ static const struct pca9450_regulator_desc pca9451a_regulators[] = {
.of_match = of_match_ptr("LDO5"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO5,
- .ops = &pca9450_ldo_regulator_ops,
+ .ops = &pca9450_ldo5_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO5_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo5_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts),
.vsel_reg = PCA9450_REG_LDO5CTRL_H,
.vsel_mask = LDO5HOUT_MASK,
- .enable_reg = PCA9450_REG_LDO5CTRL_H,
+ .enable_reg = PCA9450_REG_LDO5CTRL_L,
.enable_mask = LDO5H_EN_MASK,
.owner = THIS_MODULE,
},
@@ -915,6 +971,7 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
of_device_get_match_data(&i2c->dev);
const struct pca9450_regulator_desc *regulator_desc;
struct regulator_config config = { };
+ struct regulator_dev *ldo5;
struct pca9450 *pca9450;
unsigned int device_id, i;
unsigned int reset_ctrl;
@@ -980,11 +1037,15 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
config.regmap = pca9450->regmap;
config.dev = pca9450->dev;
+ config.driver_data = pca9450;
rdev = devm_regulator_register(pca9450->dev, desc, &config);
if (IS_ERR(rdev))
return dev_err_probe(pca9450->dev, PTR_ERR(rdev),
"Failed to register regulator(%s)\n", desc->name);
+
+ if (!strcmp(desc->name, "ldo5"))
+ ldo5 = rdev;
}
if (pca9450->irq) {
@@ -1032,15 +1093,19 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
}
/*
- * The driver uses the LDO5CTRL_H register to control the LDO5 regulator.
- * This is only valid if the SD_VSEL input of the PMIC is high. Let's
- * check if the pin is available as GPIO and set it to high.
+ * For LDO5 we need to be able to check the status of the SD_VSEL input in
+ * order to know which control register is used. Most boards connect SD_VSEL
+ * to the VSELECT signal, so we can use the GPIO that is internally routed
+ * to this signal (if SION bit is set in IOMUX).
*/
- pca9450->sd_vsel_gpio = gpiod_get_optional(pca9450->dev, "sd-vsel", GPIOD_OUT_HIGH);
+ pca9450->sd_vsel_gpio = gpiod_get_optional(&ldo5->dev, "sd-vsel", GPIOD_IN);
+ if (IS_ERR(pca9450->sd_vsel_gpio)) {
+ dev_err(&i2c->dev, "Failed to get SD_VSEL GPIO\n");
+ return ret;
+ }
- if (IS_ERR(pca9450->sd_vsel_gpio))
- return dev_err_probe(&i2c->dev, PTR_ERR(pca9450->sd_vsel_gpio),
- "Failed to get SD_VSEL GPIO\n");
+ pca9450->sd_vsel_fixed_low =
+ of_property_read_bool(ldo5->dev.of_node, "nxp,sd-vsel-fixed-low");
dev_info(&i2c->dev, "%s probed.\n",
type == PCA9450_TYPE_PCA9450A ? "pca9450a" :
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
deleted file mode 100644
index 9f08a62c800e..000000000000
--- a/drivers/regulator/pcf50633-regulator.c
+++ /dev/null
@@ -1,124 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* NXP PCF50633 PMIC Driver
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * Author: Balaji Rao <balajirrao@openmoko.org>
- * All rights reserved.
- *
- * Broken down from monstrous PCF50633 driver mainly by
- * Harald Welte and Andy Green and Werner Almesberger
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/mfd/pcf50633/pmic.h>
-
-#define PCF50633_REGULATOR(_name, _id, _min_uV, _uV_step, _min_sel, _n) \
- { \
- .name = _name, \
- .id = PCF50633_REGULATOR_##_id, \
- .ops = &pcf50633_regulator_ops, \
- .n_voltages = _n, \
- .min_uV = _min_uV, \
- .uV_step = _uV_step, \
- .linear_min_sel = _min_sel, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .vsel_reg = PCF50633_REG_##_id##OUT, \
- .vsel_mask = 0xff, \
- .enable_reg = PCF50633_REG_##_id##OUT + 1, \
- .enable_mask = PCF50633_REGULATOR_ON, \
- }
-
-static const struct regulator_ops pcf50633_regulator_ops = {
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .list_voltage = regulator_list_voltage_linear,
- .map_voltage = regulator_map_voltage_linear,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
-};
-
-static const struct regulator_desc regulators[] = {
- [PCF50633_REGULATOR_AUTO] =
- PCF50633_REGULATOR("auto", AUTO, 1800000, 25000, 0x2f, 128),
- [PCF50633_REGULATOR_DOWN1] =
- PCF50633_REGULATOR("down1", DOWN1, 625000, 25000, 0, 96),
- [PCF50633_REGULATOR_DOWN2] =
- PCF50633_REGULATOR("down2", DOWN2, 625000, 25000, 0, 96),
- [PCF50633_REGULATOR_LDO1] =
- PCF50633_REGULATOR("ldo1", LDO1, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_LDO2] =
- PCF50633_REGULATOR("ldo2", LDO2, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_LDO3] =
- PCF50633_REGULATOR("ldo3", LDO3, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_LDO4] =
- PCF50633_REGULATOR("ldo4", LDO4, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_LDO5] =
- PCF50633_REGULATOR("ldo5", LDO5, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_LDO6] =
- PCF50633_REGULATOR("ldo6", LDO6, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_HCLDO] =
- PCF50633_REGULATOR("hcldo", HCLDO, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_MEMLDO] =
- PCF50633_REGULATOR("memldo", MEMLDO, 900000, 100000, 0, 28),
-};
-
-static int pcf50633_regulator_probe(struct platform_device *pdev)
-{
- struct regulator_dev *rdev;
- struct pcf50633 *pcf;
- struct regulator_config config = { };
-
- /* Already set by core driver */
- pcf = dev_to_pcf50633(pdev->dev.parent);
-
- config.dev = &pdev->dev;
- config.init_data = dev_get_platdata(&pdev->dev);
- config.driver_data = pcf;
- config.regmap = pcf->regmap;
-
- rdev = devm_regulator_register(&pdev->dev, &regulators[pdev->id],
- &config);
- if (IS_ERR(rdev))
- return PTR_ERR(rdev);
-
- platform_set_drvdata(pdev, rdev);
-
- if (pcf->pdata->regulator_registered)
- pcf->pdata->regulator_registered(pcf, pdev->id);
-
- return 0;
-}
-
-static struct platform_driver pcf50633_regulator_driver = {
- .driver = {
- .name = "pcf50633-regulator",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- },
- .probe = pcf50633_regulator_probe,
-};
-
-static int __init pcf50633_regulator_init(void)
-{
- return platform_driver_register(&pcf50633_regulator_driver);
-}
-subsys_initcall(pcf50633_regulator_init);
-
-static void __exit pcf50633_regulator_exit(void)
-{
- platform_driver_unregister(&pcf50633_regulator_driver);
-}
-module_exit(pcf50633_regulator_exit);
-
-MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
-MODULE_DESCRIPTION("PCF50633 regulator driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pcf50633-regulator");
diff --git a/drivers/regulator/pf9453-regulator.c b/drivers/regulator/pf9453-regulator.c
new file mode 100644
index 000000000000..ed6bf0f6c4fe
--- /dev/null
+++ b/drivers/regulator/pf9453-regulator.c
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024 NXP.
+ * NXP PF9453 pmic driver
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+struct pf9453_dvs_config {
+ unsigned int run_reg; /* dvs0 */
+ unsigned int run_mask;
+ unsigned int standby_reg; /* dvs1 */
+ unsigned int standby_mask;
+};
+
+struct pf9453_regulator_desc {
+ struct regulator_desc desc;
+ const struct pf9453_dvs_config dvs;
+};
+
+struct pf9453 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *sd_vsel_gpio;
+ int irq;
+};
+
+enum {
+ PF9453_BUCK1 = 0,
+ PF9453_BUCK2,
+ PF9453_BUCK3,
+ PF9453_BUCK4,
+ PF9453_LDO1,
+ PF9453_LDO2,
+ PF9453_LDOSNVS,
+ PF9453_REGULATOR_CNT
+};
+
+enum {
+ PF9453_DVS_LEVEL_RUN = 0,
+ PF9453_DVS_LEVEL_STANDBY,
+ PF9453_DVS_LEVEL_DPSTANDBY,
+ PF9453_DVS_LEVEL_MAX
+};
+
+#define PF9453_BUCK1_VOLTAGE_NUM 0x80
+#define PF9453_BUCK2_VOLTAGE_NUM 0x80
+#define PF9453_BUCK3_VOLTAGE_NUM 0x80
+#define PF9453_BUCK4_VOLTAGE_NUM 0x80
+
+#define PF9453_LDO1_VOLTAGE_NUM 0x65
+#define PF9453_LDO2_VOLTAGE_NUM 0x3b
+#define PF9453_LDOSNVS_VOLTAGE_NUM 0x59
+
+enum {
+ PF9453_REG_DEV_ID = 0x00,
+ PF9453_REG_OTP_VER = 0x01,
+ PF9453_REG_INT1 = 0x02,
+ PF9453_REG_INT1_MASK = 0x03,
+ PF9453_REG_INT1_STATUS = 0x04,
+ PF9453_REG_VRFLT1_INT = 0x05,
+ PF9453_REG_VRFLT1_MASK = 0x06,
+ PF9453_REG_PWRON_STAT = 0x07,
+ PF9453_REG_RESET_CTRL = 0x08,
+ PF9453_REG_SW_RST = 0x09,
+ PF9453_REG_PWR_CTRL = 0x0a,
+ PF9453_REG_CONFIG1 = 0x0b,
+ PF9453_REG_CONFIG2 = 0x0c,
+ PF9453_REG_32K_CONFIG = 0x0d,
+ PF9453_REG_BUCK1CTRL = 0x10,
+ PF9453_REG_BUCK1OUT = 0x11,
+ PF9453_REG_BUCK2CTRL = 0x14,
+ PF9453_REG_BUCK2OUT = 0x15,
+ PF9453_REG_BUCK2OUT_STBY = 0x1d,
+ PF9453_REG_BUCK2OUT_MAX_LIMIT = 0x1f,
+ PF9453_REG_BUCK2OUT_MIN_LIMIT = 0x20,
+ PF9453_REG_BUCK3CTRL = 0x21,
+ PF9453_REG_BUCK3OUT = 0x22,
+ PF9453_REG_BUCK4CTRL = 0x2e,
+ PF9453_REG_BUCK4OUT = 0x2f,
+ PF9453_REG_LDO1OUT_L = 0x36,
+ PF9453_REG_LDO1CFG = 0x37,
+ PF9453_REG_LDO1OUT_H = 0x38,
+ PF9453_REG_LDOSNVS_CFG1 = 0x39,
+ PF9453_REG_LDOSNVS_CFG2 = 0x3a,
+ PF9453_REG_LDO2CFG = 0x3b,
+ PF9453_REG_LDO2OUT = 0x3c,
+ PF9453_REG_BUCK_POK = 0x3d,
+ PF9453_REG_LSW_CTRL1 = 0x40,
+ PF9453_REG_LSW_CTRL2 = 0x41,
+ PF9453_REG_LOCK = 0x4e,
+ PF9453_MAX_REG
+};
+
+#define PF9453_UNLOCK_KEY 0x5c
+#define PF9453_LOCK_KEY 0x0
+
+/* PF9453 BUCK ENMODE bits */
+#define BUCK_ENMODE_OFF 0x00
+#define BUCK_ENMODE_ONREQ 0x01
+#define BUCK_ENMODE_ONREQ_STBY 0x02
+#define BUCK_ENMODE_ONREQ_STBY_DPSTBY 0x03
+
+/* PF9453 BUCK ENMODE bits */
+#define LDO_ENMODE_OFF 0x00
+#define LDO_ENMODE_ONREQ 0x01
+#define LDO_ENMODE_ONREQ_STBY 0x02
+#define LDO_ENMODE_ONREQ_STBY_DPSTBY 0x03
+
+/* PF9453_REG_BUCK1_CTRL bits */
+#define BUCK1_LPMODE 0x30
+#define BUCK1_AD 0x08
+#define BUCK1_FPWM 0x04
+#define BUCK1_ENMODE_MASK GENMASK(1, 0)
+
+/* PF9453_REG_BUCK2_CTRL bits */
+#define BUCK2_RAMP_MASK GENMASK(7, 6)
+#define BUCK2_RAMP_25MV 0x0
+#define BUCK2_RAMP_12P5MV 0x1
+#define BUCK2_RAMP_6P25MV 0x2
+#define BUCK2_RAMP_3P125MV 0x3
+#define BUCK2_LPMODE 0x30
+#define BUCK2_AD 0x08
+#define BUCK2_FPWM 0x04
+#define BUCK2_ENMODE_MASK GENMASK(1, 0)
+
+/* PF9453_REG_BUCK3_CTRL bits */
+#define BUCK3_LPMODE 0x30
+#define BUCK3_AD 0x08
+#define BUCK3_FPWM 0x04
+#define BUCK3_ENMODE_MASK GENMASK(1, 0)
+
+/* PF9453_REG_BUCK4_CTRL bits */
+#define BUCK4_LPMODE 0x30
+#define BUCK4_AD 0x08
+#define BUCK4_FPWM 0x04
+#define BUCK4_ENMODE_MASK GENMASK(1, 0)
+
+/* PF9453_REG_BUCK123_PRESET_EN bit */
+#define BUCK123_PRESET_EN 0x80
+
+/* PF9453_BUCK1OUT bits */
+#define BUCK1OUT_MASK GENMASK(6, 0)
+
+/* PF9453_BUCK2OUT bits */
+#define BUCK2OUT_MASK GENMASK(6, 0)
+#define BUCK2OUT_STBY_MASK GENMASK(6, 0)
+
+/* PF9453_REG_BUCK3OUT bits */
+#define BUCK3OUT_MASK GENMASK(6, 0)
+
+/* PF9453_REG_BUCK4OUT bits */
+#define BUCK4OUT_MASK GENMASK(6, 0)
+
+/* PF9453_REG_LDO1_VOLT bits */
+#define LDO1_EN_MASK GENMASK(1, 0)
+#define LDO1OUT_MASK GENMASK(6, 0)
+
+/* PF9453_REG_LDO2_VOLT bits */
+#define LDO2_EN_MASK GENMASK(1, 0)
+#define LDO2OUT_MASK GENMASK(6, 0)
+
+/* PF9453_REG_LDOSNVS_VOLT bits */
+#define LDOSNVS_EN_MASK GENMASK(0, 0)
+#define LDOSNVSCFG1_MASK GENMASK(6, 0)
+
+/* PF9453_REG_IRQ bits */
+#define IRQ_RSVD 0x80
+#define IRQ_RSTB 0x40
+#define IRQ_ONKEY 0x20
+#define IRQ_RESETKEY 0x10
+#define IRQ_VR_FLT1 0x08
+#define IRQ_LOWVSYS 0x04
+#define IRQ_THERM_100 0x02
+#define IRQ_THERM_80 0x01
+
+/* PF9453_REG_RESET_CTRL bits */
+#define WDOG_B_CFG_MASK GENMASK(7, 6)
+#define WDOG_B_CFG_NONE 0x00
+#define WDOG_B_CFG_WARM 0x40
+#define WDOG_B_CFG_COLD 0x80
+
+/* PF9453_REG_CONFIG2 bits */
+#define I2C_LT_MASK GENMASK(1, 0)
+#define I2C_LT_FORCE_DISABLE 0x00
+#define I2C_LT_ON_STANDBY_RUN 0x01
+#define I2C_LT_ON_RUN 0x02
+#define I2C_LT_FORCE_ENABLE 0x03
+
+static const struct regmap_range pf9453_status_range = {
+ .range_min = PF9453_REG_INT1,
+ .range_max = PF9453_REG_PWRON_STAT,
+};
+
+static const struct regmap_access_table pf9453_volatile_regs = {
+ .yes_ranges = &pf9453_status_range,
+ .n_yes_ranges = 1,
+};
+
+static const struct regmap_config pf9453_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &pf9453_volatile_regs,
+ .max_register = PF9453_MAX_REG - 1,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+/*
+ * BUCK2
+ * BUCK2RAM[1:0] BUCK2 DVS ramp rate setting
+ * 00: 25mV/1usec
+ * 01: 25mV/2usec
+ * 10: 25mV/4usec
+ * 11: 25mV/8usec
+ */
+static const unsigned int pf9453_dvs_buck_ramp_table[] = {
+ 25000, 12500, 6250, 3125
+};
+
+static bool is_reg_protect(uint reg)
+{
+ switch (reg) {
+ case PF9453_REG_BUCK1OUT:
+ case PF9453_REG_BUCK2OUT:
+ case PF9453_REG_BUCK3OUT:
+ case PF9453_REG_BUCK4OUT:
+ case PF9453_REG_LDO1OUT_L:
+ case PF9453_REG_LDO1OUT_H:
+ case PF9453_REG_LDO2OUT:
+ case PF9453_REG_LDOSNVS_CFG1:
+ case PF9453_REG_BUCK2OUT_MAX_LIMIT:
+ case PF9453_REG_BUCK2OUT_MIN_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int pf9453_pmic_write(struct pf9453 *pf9453, unsigned int reg, u8 mask, unsigned int val)
+{
+ int ret = -EINVAL;
+ u8 data, key;
+ u32 rxBuf;
+
+ /* If not updating entire register, perform a read-mod-write */
+ data = val;
+ key = PF9453_UNLOCK_KEY;
+
+ if (mask != 0xffU) {
+ /* Read data */
+ ret = regmap_read(pf9453->regmap, reg, &rxBuf);
+ if (ret) {
+ dev_err(pf9453->dev, "Read reg=%0x error!\n", reg);
+ return ret;
+ }
+ data = (val & mask) | (rxBuf & (~mask));
+ }
+
+ if (reg < PF9453_MAX_REG) {
+ if (is_reg_protect(reg)) {
+ ret = regmap_raw_write(pf9453->regmap, PF9453_REG_LOCK, &key, 1U);
+ if (ret) {
+ dev_err(pf9453->dev, "Write reg=%0x error!\n", reg);
+ return ret;
+ }
+
+ ret = regmap_raw_write(pf9453->regmap, reg, &data, 1U);
+ if (ret) {
+ dev_err(pf9453->dev, "Write reg=%0x error!\n", reg);
+ return ret;
+ }
+
+ key = PF9453_LOCK_KEY;
+ ret = regmap_raw_write(pf9453->regmap, PF9453_REG_LOCK, &key, 1U);
+ if (ret) {
+ dev_err(pf9453->dev, "Write reg=%0x error!\n", reg);
+ return ret;
+ }
+ } else {
+ ret = regmap_raw_write(pf9453->regmap, reg, &data, 1U);
+ if (ret) {
+ dev_err(pf9453->dev, "Write reg=%0x error!\n", reg);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * pf9453_regulator_enable_regmap for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their descriptor and then use
+ * this as their enable() operation, saving some code.
+ */
+static int pf9453_regulator_enable_regmap(struct regulator_dev *rdev)
+{
+ struct pf9453 *pf9453 = dev_get_drvdata(rdev->dev.parent);
+ unsigned int val;
+
+ if (rdev->desc->enable_is_inverted) {
+ val = rdev->desc->disable_val;
+ } else {
+ val = rdev->desc->enable_val;
+ if (!val)
+ val = rdev->desc->enable_mask;
+ }
+
+ return pf9453_pmic_write(pf9453, rdev->desc->enable_reg, rdev->desc->enable_mask, val);
+}
+
+/**
+ * pf9453_regulator_disable_regmap for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their descriptor and then use
+ * this as their disable() operation, saving some code.
+ */
+static int pf9453_regulator_disable_regmap(struct regulator_dev *rdev)
+{
+ struct pf9453 *pf9453 = dev_get_drvdata(rdev->dev.parent);
+ unsigned int val;
+
+ if (rdev->desc->enable_is_inverted) {
+ val = rdev->desc->enable_val;
+ if (!val)
+ val = rdev->desc->enable_mask;
+ } else {
+ val = rdev->desc->disable_val;
+ }
+
+ return pf9453_pmic_write(pf9453, rdev->desc->enable_reg, rdev->desc->enable_mask, val);
+}
+
+/**
+ * pf9453_regulator_set_voltage_sel_regmap for regmap users
+ *
+ * @rdev: regulator to operate on
+ * @sel: Selector to set
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * vsel_reg and vsel_mask fields in their descriptor and then use this
+ * as their set_voltage_vsel operation, saving some code.
+ */
+static int pf9453_regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned int sel)
+{
+ struct pf9453 *pf9453 = dev_get_drvdata(rdev->dev.parent);
+ int ret;
+
+ sel <<= ffs(rdev->desc->vsel_mask) - 1;
+ ret = pf9453_pmic_write(pf9453, rdev->desc->vsel_reg, rdev->desc->vsel_mask, sel);
+ if (ret)
+ return ret;
+
+ if (rdev->desc->apply_bit)
+ ret = pf9453_pmic_write(pf9453, rdev->desc->apply_reg,
+ rdev->desc->apply_bit, rdev->desc->apply_bit);
+ return ret;
+}
+
+static int find_closest_bigger(unsigned int target, const unsigned int *table,
+ unsigned int num_sel, unsigned int *sel)
+{
+ unsigned int s, tmp, max, maxsel = 0;
+ bool found = false;
+
+ max = table[0];
+
+ for (s = 0; s < num_sel; s++) {
+ if (table[s] > max) {
+ max = table[s];
+ maxsel = s;
+ }
+ if (table[s] >= target) {
+ if (!found || table[s] - target < tmp - target) {
+ tmp = table[s];
+ *sel = s;
+ found = true;
+ if (tmp == target)
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ *sel = maxsel;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * pf9453_regulator_set_ramp_delay_regmap
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the ramp_reg
+ * and ramp_mask fields in their descriptor and then use this as their
+ * set_ramp_delay operation, saving some code.
+ */
+static int pf9453_regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay)
+{
+ struct pf9453 *pf9453 = dev_get_drvdata(rdev->dev.parent);
+ unsigned int sel;
+ int ret;
+
+ if (WARN_ON(!rdev->desc->n_ramp_values || !rdev->desc->ramp_delay_table))
+ return -EINVAL;
+
+ ret = find_closest_bigger(ramp_delay, rdev->desc->ramp_delay_table,
+ rdev->desc->n_ramp_values, &sel);
+
+ if (ret) {
+ dev_warn(rdev_get_dev(rdev),
+ "Can't set ramp-delay %u, setting %u\n", ramp_delay,
+ rdev->desc->ramp_delay_table[sel]);
+ }
+
+ sel <<= ffs(rdev->desc->ramp_mask) - 1;
+
+ return pf9453_pmic_write(pf9453, rdev->desc->ramp_reg,
+ rdev->desc->ramp_mask, sel);
+}
+
+static const struct regulator_ops pf9453_dvs_buck_regulator_ops = {
+ .enable = pf9453_regulator_enable_regmap,
+ .disable = pf9453_regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = pf9453_regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = pf9453_regulator_set_ramp_delay_regmap,
+};
+
+static const struct regulator_ops pf9453_buck_regulator_ops = {
+ .enable = pf9453_regulator_enable_regmap,
+ .disable = pf9453_regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = pf9453_regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static const struct regulator_ops pf9453_ldo_regulator_ops = {
+ .enable = pf9453_regulator_enable_regmap,
+ .disable = pf9453_regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = pf9453_regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+/*
+ * BUCK1/3/4
+ * 0.60 to 3.775V (25mV step)
+ */
+static const struct linear_range pf9453_buck134_volts[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x00, 0x7F, 25000),
+};
+
+/*
+ * BUCK2
+ * 0.60 to 2.1875V (12.5mV step)
+ */
+static const struct linear_range pf9453_buck2_volts[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x00, 0x7F, 12500),
+};
+
+/*
+ * LDO1
+ * 0.8 to 3.3V (25mV step)
+ */
+static const struct linear_range pf9453_ldo1_volts[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x00, 0x64, 25000),
+};
+
+/*
+ * LDO2
+ * 0.5 to 1.95V (25mV step)
+ */
+static const struct linear_range pf9453_ldo2_volts[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0x00, 0x3A, 25000),
+};
+
+/*
+ * LDOSNVS
+ * 1.2 to 3.4V (25mV step)
+ */
+static const struct linear_range pf9453_ldosnvs_volts[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x58, 25000),
+};
+
+static int buck_set_dvs(const struct regulator_desc *desc,
+ struct device_node *np, struct pf9453 *pf9453,
+ char *prop, unsigned int reg, unsigned int mask)
+{
+ int ret, i;
+ u32 uv;
+
+ ret = of_property_read_u32(np, prop, &uv);
+ if (ret == -EINVAL)
+ return 0;
+ else if (ret)
+ return ret;
+
+ for (i = 0; i < desc->n_voltages; i++) {
+ ret = regulator_desc_list_voltage_linear_range(desc, i);
+ if (ret < 0)
+ continue;
+ if (ret == uv) {
+ i <<= ffs(desc->vsel_mask) - 1;
+ ret = pf9453_pmic_write(pf9453, reg, mask, i);
+ break;
+ }
+ }
+
+ if (ret == 0) {
+ struct pf9453_regulator_desc *regulator = container_of(desc,
+ struct pf9453_regulator_desc, desc);
+
+ /* Enable DVS control through PMIC_STBY_REQ for this BUCK */
+ ret = pf9453_pmic_write(pf9453, regulator->desc.enable_reg,
+ BUCK2_LPMODE, BUCK2_LPMODE);
+ }
+ return ret;
+}
+
+static int pf9453_set_dvs_levels(struct device_node *np, const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ struct pf9453_regulator_desc *data = container_of(desc, struct pf9453_regulator_desc, desc);
+ struct pf9453 *pf9453 = dev_get_drvdata(cfg->dev);
+ const struct pf9453_dvs_config *dvs = &data->dvs;
+ unsigned int reg, mask;
+ int i, ret = 0;
+ char *prop;
+
+ for (i = 0; i < PF9453_DVS_LEVEL_MAX; i++) {
+ switch (i) {
+ case PF9453_DVS_LEVEL_RUN:
+ prop = "nxp,dvs-run-voltage";
+ reg = dvs->run_reg;
+ mask = dvs->run_mask;
+ break;
+ case PF9453_DVS_LEVEL_DPSTANDBY:
+ case PF9453_DVS_LEVEL_STANDBY:
+ prop = "nxp,dvs-standby-voltage";
+ reg = dvs->standby_reg;
+ mask = dvs->standby_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = buck_set_dvs(desc, np, pf9453, prop, reg, mask);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static const struct pf9453_regulator_desc pf9453_regulators[] = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("BUCK1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_BUCK1,
+ .ops = &pf9453_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_BUCK1_VOLTAGE_NUM,
+ .linear_ranges = pf9453_buck134_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_buck134_volts),
+ .vsel_reg = PF9453_REG_BUCK1OUT,
+ .vsel_mask = BUCK1OUT_MASK,
+ .enable_reg = PF9453_REG_BUCK1CTRL,
+ .enable_mask = BUCK1_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("BUCK2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_BUCK2,
+ .ops = &pf9453_dvs_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_BUCK2_VOLTAGE_NUM,
+ .linear_ranges = pf9453_buck2_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_buck2_volts),
+ .vsel_reg = PF9453_REG_BUCK2OUT,
+ .vsel_mask = BUCK2OUT_MASK,
+ .enable_reg = PF9453_REG_BUCK2CTRL,
+ .enable_mask = BUCK2_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
+ .ramp_reg = PF9453_REG_BUCK2CTRL,
+ .ramp_mask = BUCK2_RAMP_MASK,
+ .ramp_delay_table = pf9453_dvs_buck_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pf9453_dvs_buck_ramp_table),
+ .owner = THIS_MODULE,
+ .of_parse_cb = pf9453_set_dvs_levels,
+ },
+ .dvs = {
+ .run_reg = PF9453_REG_BUCK2OUT,
+ .run_mask = BUCK2OUT_MASK,
+ .standby_reg = PF9453_REG_BUCK2OUT_STBY,
+ .standby_mask = BUCK2OUT_STBY_MASK,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck3",
+ .of_match = of_match_ptr("BUCK3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_BUCK3,
+ .ops = &pf9453_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_BUCK3_VOLTAGE_NUM,
+ .linear_ranges = pf9453_buck134_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_buck134_volts),
+ .vsel_reg = PF9453_REG_BUCK3OUT,
+ .vsel_mask = BUCK3OUT_MASK,
+ .enable_reg = PF9453_REG_BUCK3CTRL,
+ .enable_mask = BUCK3_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("BUCK4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_BUCK4,
+ .ops = &pf9453_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_BUCK4_VOLTAGE_NUM,
+ .linear_ranges = pf9453_buck134_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_buck134_volts),
+ .vsel_reg = PF9453_REG_BUCK4OUT,
+ .vsel_mask = BUCK4OUT_MASK,
+ .enable_reg = PF9453_REG_BUCK4CTRL,
+ .enable_mask = BUCK4_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_LDO1,
+ .ops = &pf9453_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_LDO1_VOLTAGE_NUM,
+ .linear_ranges = pf9453_ldo1_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_ldo1_volts),
+ .vsel_reg = PF9453_REG_LDO1OUT_H,
+ .vsel_mask = LDO1OUT_MASK,
+ .enable_reg = PF9453_REG_LDO1CFG,
+ .enable_mask = LDO1_EN_MASK,
+ .enable_val = LDO_ENMODE_ONREQ,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_LDO2,
+ .ops = &pf9453_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_LDO2_VOLTAGE_NUM,
+ .linear_ranges = pf9453_ldo2_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_ldo2_volts),
+ .vsel_reg = PF9453_REG_LDO2OUT,
+ .vsel_mask = LDO2OUT_MASK,
+ .enable_reg = PF9453_REG_LDO2CFG,
+ .enable_mask = LDO2_EN_MASK,
+ .enable_val = LDO_ENMODE_ONREQ,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldosnvs",
+ .of_match = of_match_ptr("LDO-SNVS"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_LDOSNVS,
+ .ops = &pf9453_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_LDOSNVS_VOLTAGE_NUM,
+ .linear_ranges = pf9453_ldosnvs_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_ldosnvs_volts),
+ .vsel_reg = PF9453_REG_LDOSNVS_CFG1,
+ .vsel_mask = LDOSNVSCFG1_MASK,
+ .enable_reg = PF9453_REG_LDOSNVS_CFG2,
+ .enable_mask = LDOSNVS_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ { }
+};
+
+static irqreturn_t pf9453_irq_handler(int irq, void *data)
+{
+ struct pf9453 *pf9453 = data;
+ struct regmap *regmap = pf9453->regmap;
+ unsigned int status;
+ int ret;
+
+ ret = regmap_read(regmap, PF9453_REG_INT1, &status);
+ if (ret < 0) {
+ dev_err(pf9453->dev, "Failed to read INT1(%d)\n", ret);
+ return IRQ_NONE;
+ }
+
+ if (status & IRQ_RSTB)
+ dev_warn(pf9453->dev, "IRQ_RSTB interrupt.\n");
+
+ if (status & IRQ_ONKEY)
+ dev_warn(pf9453->dev, "IRQ_ONKEY interrupt.\n");
+
+ if (status & IRQ_VR_FLT1)
+ dev_warn(pf9453->dev, "VRFLT1 interrupt.\n");
+
+ if (status & IRQ_RESETKEY)
+ dev_warn(pf9453->dev, "IRQ_RESETKEY interrupt.\n");
+
+ if (status & IRQ_LOWVSYS)
+ dev_warn(pf9453->dev, "LOWVSYS interrupt.\n");
+
+ if (status & IRQ_THERM_100)
+ dev_warn(pf9453->dev, "IRQ_THERM_100 interrupt.\n");
+
+ if (status & IRQ_THERM_80)
+ dev_warn(pf9453->dev, "IRQ_THERM_80 interrupt.\n");
+
+ return IRQ_HANDLED;
+}
+
+static int pf9453_i2c_probe(struct i2c_client *i2c)
+{
+ const struct pf9453_regulator_desc *regulator_desc = of_device_get_match_data(&i2c->dev);
+ struct regulator_config config = { };
+ unsigned int reset_ctrl;
+ unsigned int device_id;
+ struct pf9453 *pf9453;
+ int ret;
+
+ if (!i2c->irq)
+ return dev_err_probe(&i2c->dev, -EINVAL, "No IRQ configured?\n");
+
+ pf9453 = devm_kzalloc(&i2c->dev, sizeof(struct pf9453), GFP_KERNEL);
+ if (!pf9453)
+ return -ENOMEM;
+
+ pf9453->regmap = devm_regmap_init_i2c(i2c, &pf9453_regmap_config);
+ if (IS_ERR(pf9453->regmap))
+ return dev_err_probe(&i2c->dev, PTR_ERR(pf9453->regmap),
+ "regmap initialization failed\n");
+
+ pf9453->irq = i2c->irq;
+ pf9453->dev = &i2c->dev;
+
+ dev_set_drvdata(&i2c->dev, pf9453);
+
+ ret = regmap_read(pf9453->regmap, PF9453_REG_DEV_ID, &device_id);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Read device id error\n");
+
+ /* Check your board and dts for match the right pmic */
+ if ((device_id >> 4) != 0xb)
+ return dev_err_probe(&i2c->dev, -EINVAL, "Device id(%x) mismatched\n",
+ device_id >> 4);
+
+ while (regulator_desc->desc.name) {
+ const struct regulator_desc *desc;
+ struct regulator_dev *rdev;
+
+ desc = &regulator_desc->desc;
+
+ config.regmap = pf9453->regmap;
+ config.dev = pf9453->dev;
+
+ rdev = devm_regulator_register(pf9453->dev, desc, &config);
+ if (IS_ERR(rdev))
+ return dev_err_probe(pf9453->dev, PTR_ERR(rdev),
+ "Failed to register regulator(%s)\n", desc->name);
+
+ regulator_desc++;
+ }
+
+ ret = devm_request_threaded_irq(pf9453->dev, pf9453->irq, NULL, pf9453_irq_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
+ "pf9453-irq", pf9453);
+ if (ret)
+ return dev_err_probe(pf9453->dev, ret, "Failed to request IRQ: %d\n", pf9453->irq);
+
+ /* Unmask all interrupt except PWRON/WDOG/RSVD */
+ ret = pf9453_pmic_write(pf9453, PF9453_REG_INT1_MASK,
+ IRQ_ONKEY | IRQ_RESETKEY | IRQ_RSTB | IRQ_VR_FLT1
+ | IRQ_LOWVSYS | IRQ_THERM_100 | IRQ_THERM_80, IRQ_RSVD);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Unmask irq error\n");
+
+ if (of_property_read_bool(i2c->dev.of_node, "nxp,wdog_b-warm-reset"))
+ reset_ctrl = WDOG_B_CFG_WARM;
+ else
+ reset_ctrl = WDOG_B_CFG_COLD;
+
+ /* Set reset behavior on assertion of WDOG_B signal */
+ ret = pf9453_pmic_write(pf9453, PF9453_REG_RESET_CTRL, WDOG_B_CFG_MASK, reset_ctrl);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Failed to set WDOG_B reset behavior\n");
+
+ /*
+ * The driver uses the LDO1OUT_H register to control the LDO1 regulator.
+ * This is only valid if the SD_VSEL input of the PMIC is high. Let's
+ * check if the pin is available as GPIO and set it to high.
+ */
+ pf9453->sd_vsel_gpio = gpiod_get_optional(pf9453->dev, "sd-vsel", GPIOD_OUT_HIGH);
+
+ if (IS_ERR(pf9453->sd_vsel_gpio))
+ return dev_err_probe(&i2c->dev, PTR_ERR(pf9453->sd_vsel_gpio),
+ "Failed to get SD_VSEL GPIO\n");
+
+ return 0;
+}
+
+static const struct of_device_id pf9453_of_match[] = {
+ {
+ .compatible = "nxp,pf9453",
+ .data = pf9453_regulators,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pf9453_of_match);
+
+static struct i2c_driver pf9453_i2c_driver = {
+ .driver = {
+ .name = "nxp-pf9453",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = pf9453_of_match,
+ },
+ .probe = pf9453_i2c_probe,
+};
+
+module_i2c_driver(pf9453_i2c_driver);
+
+MODULE_AUTHOR("Joy Zou <joy.zou@nxp.com>");
+MODULE_DESCRIPTION("NXP PF9453 Power Management IC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/rtq2208-regulator.c b/drivers/regulator/rtq2208-regulator.c
index 5925fa7a9a06..9cde7181b0f0 100644
--- a/drivers/regulator/rtq2208-regulator.c
+++ b/drivers/regulator/rtq2208-regulator.c
@@ -27,6 +27,11 @@
#define RTQ2208_REG_LDO1_CFG 0xB1
#define RTQ2208_REG_LDO2_CFG 0xC1
#define RTQ2208_REG_LDO_DVS_CTRL 0xD0
+#define RTQ2208_REG_HIDDEN_BUCKPH 0x55
+#define RTQ2208_REG_HIDDEN_LDOCFG0 0x8F
+#define RTQ2208_REG_HIDDEN_LDOCFG1 0x96
+#define RTQ2208_REG_HIDDEN0 0xFE
+#define RTQ2208_REG_HIDDEN1 0xFF
/* Mask */
#define RTQ2208_BUCK_NR_MTP_SEL_MASK GENMASK(7, 0)
@@ -45,6 +50,11 @@
#define RTQ2208_LDO1_VOSEL_SD_MASK BIT(5)
#define RTQ2208_LDO2_DISCHG_EN_MASK BIT(6)
#define RTQ2208_LDO2_VOSEL_SD_MASK BIT(7)
+#define RTQ2208_MASK_BUCKPH_GROUP1 GENMASK(6, 4)
+#define RTQ2208_MASK_BUCKPH_GROUP2 GENMASK(2, 0)
+#define RTQ2208_MASK_LDO2_OPT0 BIT(7)
+#define RTQ2208_MASK_LDO2_OPT1 BIT(6)
+#define RTQ2208_MASK_LDO1_FIXED BIT(6)
/* Size */
#define RTQ2208_VOUT_MAXNUM 256
@@ -245,11 +255,6 @@ static const unsigned int rtq2208_ldo_volt_table[] = {
3300000,
};
-static struct of_regulator_match rtq2208_ldo_match[] = {
- {.name = "ldo2", },
- {.name = "ldo1", },
-};
-
static unsigned int rtq2208_of_map_mode(unsigned int mode)
{
switch (mode) {
@@ -344,59 +349,6 @@ static irqreturn_t rtq2208_irq_handler(int irqno, void *devid)
return IRQ_HANDLED;
}
-static int rtq2208_of_get_ldo_dvs_ability(struct device *dev)
-{
- struct device_node *np;
- struct of_regulator_match *match;
- struct regulator_desc *desc;
- struct regulator_init_data *init_data;
- u32 fixed_uV;
- int ret, i;
-
- if (!dev->of_node)
- return -ENODEV;
-
- np = of_get_child_by_name(dev->of_node, "regulators");
- if (!np)
- np = dev->of_node;
-
- ret = of_regulator_match(dev, np, rtq2208_ldo_match, ARRAY_SIZE(rtq2208_ldo_match));
-
- of_node_put(np);
-
- if (ret < 0)
- return ret;
-
- for (i = 0; i < ARRAY_SIZE(rtq2208_ldo_match); i++) {
- match = rtq2208_ldo_match + i;
- init_data = match->init_data;
- desc = (struct regulator_desc *)match->desc;
-
- if (!init_data || !desc)
- continue;
-
- /* specify working fixed voltage if the propery exists */
- ret = of_property_read_u32(match->of_node, "richtek,fixed-microvolt", &fixed_uV);
-
- if (!ret) {
- if (fixed_uV != init_data->constraints.min_uV ||
- fixed_uV != init_data->constraints.max_uV)
- return -EINVAL;
- desc->n_voltages = 1;
- desc->fixed_uV = fixed_uV;
- desc->fixed_uV = init_data->constraints.min_uV;
- desc->ops = &rtq2208_regulator_ldo_fix_ops;
- } else {
- desc->n_voltages = ARRAY_SIZE(rtq2208_ldo_volt_table);
- desc->volt_table = rtq2208_ldo_volt_table;
- desc->ops = &rtq2208_regulator_ldo_adj_ops;
- }
- }
-
- return 0;
-}
-
-
#define BUCK_INFO(_name, _id) \
{ \
.name = _name, \
@@ -424,9 +376,11 @@ static const struct linear_range rtq2208_vout_range[] = {
REGULATOR_LINEAR_RANGE(1310000, 181, 255, 10000),
};
-static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel, int idx)
+static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel, int idx,
+ unsigned int ldo1_fixed, unsigned int ldo2_fixed)
{
struct regulator_desc *desc;
+ unsigned int fixed_uV;
static const struct {
char *name;
int base;
@@ -462,7 +416,8 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->mode_mask = RTQ2208_BUCK_NRMODE_MASK;
- if (idx >= RTQ2208_BUCK_B && idx <= RTQ2208_BUCK_E) {
+ switch (idx) {
+ case RTQ2208_BUCK_B ... RTQ2208_BUCK_E:
/* init buck desc */
desc->ops = &rtq2208_regulator_buck_ops;
desc->vsel_reg = curr_info->base + VSEL_SHIFT(mtp_sel);
@@ -480,7 +435,19 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->suspend_config_reg = BUCK_RG_SHIFT(curr_info->base, 4);
rdesc->suspend_enable_mask = RTQ2208_BUCK_EN_STR_MASK;
rdesc->suspend_mode_mask = RTQ2208_BUCK_STRMODE_MASK;
- } else {
+ break;
+ default:
+ fixed_uV = idx == RTQ2208_LDO2 ? ldo2_fixed : ldo1_fixed;
+ if (fixed_uV) {
+ desc->n_voltages = 1;
+ desc->fixed_uV = fixed_uV;
+ desc->ops = &rtq2208_regulator_ldo_fix_ops;
+ } else {
+ desc->n_voltages = ARRAY_SIZE(rtq2208_ldo_volt_table);
+ desc->volt_table = rtq2208_ldo_volt_table;
+ desc->ops = &rtq2208_regulator_ldo_adj_ops;
+ }
+
/* init ldo desc */
desc->active_discharge_reg = RTQ2208_REG_LDO_DVS_CTRL;
desc->active_discharge_on = curr_info->dis_on;
@@ -490,13 +457,15 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->suspend_config_reg = curr_info->base;
rdesc->suspend_enable_mask = RTQ2208_LDO_EN_STR_MASK;
+ break;
}
}
static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *regulator_idx_table,
- struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev)
+ struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev,
+ unsigned int ldo1_fixed, unsigned int ldo2_fixed)
{
- int mtp_sel, i, idx, ret;
+ int mtp_sel, i, idx;
/* get mtp_sel0 or mtp_sel1 */
mtp_sel = device_property_read_bool(dev, "richtek,mtp-sel-high");
@@ -508,43 +477,101 @@ static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *
if (!rdesc[i])
return -ENOMEM;
- rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx);
-
- /* init ldo dvs ability */
- if (idx >= RTQ2208_LDO2)
- rtq2208_ldo_match[idx - RTQ2208_LDO2].desc = &rdesc[i]->desc;
+ rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx, ldo1_fixed, ldo2_fixed);
}
- /* init ldo fixed_uV */
- ret = rtq2208_of_get_ldo_dvs_ability(dev);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get ldo fixed_uV\n");
-
return 0;
}
-/** different slave address corresponds different used bucks
- * slave address 0x10: BUCK[BCA FGE]
- * slave address 0x20: BUCK[BC FGHE]
- * slave address 0x40: BUCK[C G]
- */
-static int rtq2208_regulator_check(int slave_addr, int *num,
- int *regulator_idx_table, unsigned int *buck_masks)
+static int rtq2208_regulator_check(struct device *dev, int *num, int *regulator_idx_table,
+ unsigned int *buck_masks, unsigned int *ldo1_fixed_uV,
+ unsigned int *ldo2_fixed_uV)
{
- static bool rtq2208_used_table[3][RTQ2208_LDO_MAX] = {
- /* BUCK[BCA FGE], LDO[12] */
- {1, 1, 0, 1, 1, 1, 0, 1, 1, 1},
- /* BUCK[BC FGHE], LDO[12]*/
- {1, 1, 0, 0, 1, 1, 1, 1, 1, 1},
- /* BUCK[C G], LDO[12] */
- {0, 1, 0, 0, 0, 1, 0, 0, 1, 1},
- };
- int i, idx = ffs(slave_addr >> 4) - 1;
+ struct regmap *regmap = dev_get_regmap(dev, NULL);
+ bool rtq2208_used_table[RTQ2208_LDO_MAX] = {0};
+ u8 entry_key[] = { 0x69, 0x01 };
+ unsigned int buck_phase, ldo_cfg0, ldo_cfg1;
+ int i, ret;
u8 mask;
+ ret = regmap_raw_write(regmap, RTQ2208_REG_HIDDEN0, entry_key, ARRAY_SIZE(entry_key));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enter hidden page\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_BUCKPH, &buck_phase);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read buck phase configuration\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_LDOCFG0, &ldo_cfg0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read ldo cfg0\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_LDOCFG1, &ldo_cfg1);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read ldo cfg1\n");
+
+ ret = regmap_write(regmap, RTQ2208_REG_HIDDEN1, 0x00);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to exit hidden page\n");
+
+ dev_info(dev, "BUCK Phase 0x%x\n", buck_phase);
+ /*
+ * Use buck phase configuration to assign used table mask
+ * GROUP1 GROUP2
+ * 0 -> 2P + 2P BC FG
+ * 1 -> 2P + 1P + 1P BCA FGE
+ * 2 -> 1P + 1P + 1P + 1P BCDA FGHE
+ * 3 -> 3P + 1P BC FG
+ * others -> 4P C G
+ */
+ switch (FIELD_GET(RTQ2208_MASK_BUCKPH_GROUP1, buck_phase)) {
+ case 2:
+ rtq2208_used_table[RTQ2208_BUCK_D] = true;
+ fallthrough;
+ case 1:
+ rtq2208_used_table[RTQ2208_BUCK_A] = true;
+ fallthrough;
+ case 0:
+ case 3:
+ rtq2208_used_table[RTQ2208_BUCK_B] = true;
+ fallthrough;
+ default:
+ rtq2208_used_table[RTQ2208_BUCK_C] = true;
+ break;
+ }
+
+ switch (FIELD_GET(RTQ2208_MASK_BUCKPH_GROUP2, buck_phase)) {
+ case 2:
+ rtq2208_used_table[RTQ2208_BUCK_F] = true;
+ fallthrough;
+ case 1:
+ rtq2208_used_table[RTQ2208_BUCK_E] = true;
+ fallthrough;
+ case 0:
+ case 3:
+ rtq2208_used_table[RTQ2208_BUCK_H] = true;
+ fallthrough;
+ default:
+ rtq2208_used_table[RTQ2208_BUCK_G] = true;
+ break;
+ }
+
+ *ldo1_fixed_uV = FIELD_GET(RTQ2208_MASK_LDO1_FIXED, ldo_cfg1) ? 1200000 : 0;
+
+ if (!FIELD_GET(RTQ2208_MASK_LDO2_OPT0, ldo_cfg0) &&
+ !FIELD_GET(RTQ2208_MASK_LDO2_OPT1, ldo_cfg1))
+ *ldo2_fixed_uV = 0;
+ else if (FIELD_GET(RTQ2208_MASK_LDO2_OPT1, ldo_cfg1))
+ *ldo2_fixed_uV = 900000;
+ else
+ *ldo2_fixed_uV = 1200000;
+
+ /* By default, LDO1 & LDO2 are always used */
+ rtq2208_used_table[RTQ2208_LDO1] = rtq2208_used_table[RTQ2208_LDO2] = true;
+
for (i = 0; i < RTQ2208_LDO_MAX; i++) {
- if (!rtq2208_used_table[idx][i])
+ if (!rtq2208_used_table[i])
continue;
regulator_idx_table[(*num)++] = i;
@@ -559,7 +586,7 @@ static int rtq2208_regulator_check(int slave_addr, int *num,
static const struct regmap_config rtq2208_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 0xEF,
+ .max_register = 0xFF,
};
static int rtq2208_probe(struct i2c_client *i2c)
@@ -573,6 +600,7 @@ static int rtq2208_probe(struct i2c_client *i2c)
int i, ret = 0, idx, n_regulator = 0;
unsigned int regulator_idx_table[RTQ2208_LDO_MAX],
buck_masks[RTQ2208_BUCK_NUM_IRQ_REGS] = {0x33, 0x33, 0x33, 0x33, 0x33};
+ unsigned int ldo1_fixed_uV, ldo2_fixed_uV;
rdev_map = devm_kzalloc(dev, sizeof(struct rtq2208_rdev_map), GFP_KERNEL);
if (!rdev_map)
@@ -583,7 +611,8 @@ static int rtq2208_probe(struct i2c_client *i2c)
return dev_err_probe(dev, PTR_ERR(regmap), "Failed to allocate regmap\n");
/* get needed regulator */
- ret = rtq2208_regulator_check(i2c->addr, &n_regulator, regulator_idx_table, buck_masks);
+ ret = rtq2208_regulator_check(dev, &n_regulator, regulator_idx_table, buck_masks,
+ &ldo1_fixed_uV, &ldo2_fixed_uV);
if (ret)
return dev_err_probe(dev, ret, "Failed to check used regulators\n");
@@ -593,7 +622,8 @@ static int rtq2208_probe(struct i2c_client *i2c)
cfg.dev = dev;
/* init regulator desc */
- ret = rtq2208_parse_regulator_dt_data(n_regulator, regulator_idx_table, rdesc, dev);
+ ret = rtq2208_parse_regulator_dt_data(n_regulator, regulator_idx_table, rdesc, dev,
+ ldo1_fixed_uV, ldo2_fixed_uV);
if (ret)
return ret;
diff --git a/drivers/regulator/rtq6752-regulator.c b/drivers/regulator/rtq6752-regulator.c
index d35d844eff3b..618904ede72c 100644
--- a/drivers/regulator/rtq6752-regulator.c
+++ b/drivers/regulator/rtq6752-regulator.c
@@ -105,7 +105,7 @@ static int rtq6752_get_error_flags(struct regulator_dev *rdev,
unsigned int *flags)
{
unsigned int val, events = 0;
- const unsigned int fault_mask[] = {
+ static const unsigned int fault_mask[] = {
RTQ6752_PAVDDF_MASK, RTQ6752_NAVDDF_MASK };
int rid = rdev_get_id(rdev), ret;
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 7dcf92af8f15..04ae9c6150bd 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -21,6 +21,7 @@
#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s2mps15.h>
#include <linux/mfd/samsung/s2mpu02.h>
+#include <linux/mfd/samsung/s2mpu05.h>
/* The highest number of possible regulators for supported devices. */
#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX
@@ -254,6 +255,9 @@ static int s2mps11_regulator_enable(struct regulator_dev *rdev)
else
val = rdev->desc->enable_mask;
break;
+ case S2MPU05:
+ val = rdev->desc->enable_mask;
+ break;
default:
return -EINVAL;
}
@@ -1118,6 +1122,86 @@ static const struct regulator_desc s2mpu02_regulators[] = {
regulator_desc_s2mpu02_buck7(7),
};
+#define regulator_desc_s2mpu05_ldo_reg(num, min, step, reg) { \
+ .name = "ldo"#num, \
+ .id = S2MPU05_LDO##num, \
+ .ops = &s2mpu02_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .n_voltages = S2MPU05_LDO_N_VOLTAGES, \
+ .vsel_reg = reg, \
+ .vsel_mask = S2MPU05_LDO_VSEL_MASK, \
+ .enable_reg = reg, \
+ .enable_mask = S2MPU05_ENABLE_MASK, \
+ .enable_time = S2MPU05_ENABLE_TIME_LDO \
+}
+
+#define regulator_desc_s2mpu05_ldo(num, reg, min, step) \
+ regulator_desc_s2mpu05_ldo_reg(num, min, step, S2MPU05_REG_L##num##reg)
+
+#define regulator_desc_s2mpu05_ldo1(num, reg) \
+ regulator_desc_s2mpu05_ldo(num, reg, S2MPU05_LDO_MIN1, S2MPU05_LDO_STEP1)
+
+#define regulator_desc_s2mpu05_ldo2(num, reg) \
+ regulator_desc_s2mpu05_ldo(num, reg, S2MPU05_LDO_MIN1, S2MPU05_LDO_STEP2)
+
+#define regulator_desc_s2mpu05_ldo3(num, reg) \
+ regulator_desc_s2mpu05_ldo(num, reg, S2MPU05_LDO_MIN2, S2MPU05_LDO_STEP2)
+
+#define regulator_desc_s2mpu05_ldo4(num, reg) \
+ regulator_desc_s2mpu05_ldo(num, reg, S2MPU05_LDO_MIN3, S2MPU05_LDO_STEP2)
+
+#define regulator_desc_s2mpu05_buck(num, which) { \
+ .name = "buck"#num, \
+ .id = S2MPU05_BUCK##num, \
+ .ops = &s2mpu02_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPU05_BUCK_MIN##which, \
+ .uV_step = S2MPU05_BUCK_STEP##which, \
+ .n_voltages = S2MPU05_BUCK_N_VOLTAGES, \
+ .vsel_reg = S2MPU05_REG_B##num##CTRL2, \
+ .vsel_mask = S2MPU05_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPU05_REG_B##num##CTRL1, \
+ .enable_mask = S2MPU05_ENABLE_MASK, \
+ .enable_time = S2MPU05_ENABLE_TIME_BUCK##num \
+}
+
+#define regulator_desc_s2mpu05_buck123(num) regulator_desc_s2mpu05_buck(num, 1)
+#define regulator_desc_s2mpu05_buck45(num) regulator_desc_s2mpu05_buck(num, 2)
+
+static const struct regulator_desc s2mpu05_regulators[] = {
+ regulator_desc_s2mpu05_ldo4(1, CTRL),
+ regulator_desc_s2mpu05_ldo3(2, CTRL),
+ regulator_desc_s2mpu05_ldo2(3, CTRL),
+ regulator_desc_s2mpu05_ldo1(4, CTRL),
+ regulator_desc_s2mpu05_ldo1(5, CTRL),
+ regulator_desc_s2mpu05_ldo1(6, CTRL),
+ regulator_desc_s2mpu05_ldo2(7, CTRL),
+ regulator_desc_s2mpu05_ldo3(8, CTRL),
+ regulator_desc_s2mpu05_ldo4(9, CTRL1),
+ regulator_desc_s2mpu05_ldo4(10, CTRL),
+ /* LDOs 11-24 are used for CP. They aren't documented. */
+ regulator_desc_s2mpu05_ldo2(25, CTRL),
+ regulator_desc_s2mpu05_ldo3(26, CTRL),
+ regulator_desc_s2mpu05_ldo2(27, CTRL),
+ regulator_desc_s2mpu05_ldo3(28, CTRL),
+ regulator_desc_s2mpu05_ldo3(29, CTRL),
+ regulator_desc_s2mpu05_ldo2(30, CTRL),
+ regulator_desc_s2mpu05_ldo3(31, CTRL),
+ regulator_desc_s2mpu05_ldo3(32, CTRL),
+ regulator_desc_s2mpu05_ldo3(33, CTRL),
+ regulator_desc_s2mpu05_ldo3(34, CTRL),
+ regulator_desc_s2mpu05_ldo3(35, CTRL),
+ regulator_desc_s2mpu05_buck123(1),
+ regulator_desc_s2mpu05_buck123(2),
+ regulator_desc_s2mpu05_buck123(3),
+ regulator_desc_s2mpu05_buck45(4),
+ regulator_desc_s2mpu05_buck45(5),
+};
+
static int s2mps11_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
@@ -1159,6 +1243,11 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
regulators = s2mpu02_regulators;
BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mpu02_regulators));
break;
+ case S2MPU05:
+ rdev_num = ARRAY_SIZE(s2mpu05_regulators);
+ regulators = s2mpu05_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mpu05_regulators));
+ break;
default:
dev_err(&pdev->dev, "Invalid device type: %u\n",
s2mps11->dev_type);
@@ -1228,6 +1317,7 @@ static const struct platform_device_id s2mps11_pmic_id[] = {
{ "s2mps14-regulator", S2MPS14X},
{ "s2mps15-regulator", S2MPS15X},
{ "s2mpu02-regulator", S2MPU02},
+ { "s2mpu05-regulator", S2MPU05},
{ },
};
MODULE_DEVICE_TABLE(platform, s2mps11_pmic_id);
@@ -1245,5 +1335,5 @@ module_platform_driver(s2mps11_pmic_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("Samsung S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S2MPS11/14/15/S2MPU02/05 Regulator Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index ea5024919c2f..90cb1fc13e71 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -19,6 +19,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include "imx_rproc.h"
@@ -95,6 +96,7 @@ enum imx_dsp_rp_mbox_messages {
/**
* struct imx_dsp_rproc - DSP remote processor state
* @regmap: regmap handler
+ * @run_stall: reset control handle used for Run/Stall operation
* @rproc: rproc handler
* @dsp_dcfg: device configuration pointer
* @clks: clocks needed by this device
@@ -111,6 +113,7 @@ enum imx_dsp_rp_mbox_messages {
*/
struct imx_dsp_rproc {
struct regmap *regmap;
+ struct reset_control *run_stall;
struct rproc *rproc;
const struct imx_dsp_rproc_dcfg *dsp_dcfg;
struct clk_bulk_data clks[DSP_RPROC_CLK_MAX];
@@ -192,9 +195,7 @@ static int imx8mp_dsp_reset(struct imx_dsp_rproc *priv)
/* Keep reset asserted for 10 cycles */
usleep_range(1, 2);
- regmap_update_bits(priv->regmap, IMX8M_AudioDSP_REG2,
- IMX8M_AudioDSP_REG2_RUNSTALL,
- IMX8M_AudioDSP_REG2_RUNSTALL);
+ reset_control_assert(priv->run_stall);
/* Take the DSP out of reset and keep stalled for FW loading */
pwrctl = readl(dap + IMX8M_DAP_PWRCTL);
@@ -231,13 +232,9 @@ static int imx8ulp_dsp_reset(struct imx_dsp_rproc *priv)
/* Specific configuration for i.MX8MP */
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8mp = {
- .src_reg = IMX8M_AudioDSP_REG2,
- .src_mask = IMX8M_AudioDSP_REG2_RUNSTALL,
- .src_start = 0,
- .src_stop = IMX8M_AudioDSP_REG2_RUNSTALL,
.att = imx_dsp_rproc_att_imx8mp,
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8mp),
- .method = IMX_RPROC_MMIO,
+ .method = IMX_RPROC_RESET_CONTROLLER,
};
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8mp = {
@@ -329,6 +326,9 @@ static int imx_dsp_rproc_start(struct rproc *rproc)
true,
rproc->bootaddr);
break;
+ case IMX_RPROC_RESET_CONTROLLER:
+ ret = reset_control_deassert(priv->run_stall);
+ break;
default:
return -EOPNOTSUPP;
}
@@ -369,6 +369,9 @@ static int imx_dsp_rproc_stop(struct rproc *rproc)
false,
rproc->bootaddr);
break;
+ case IMX_RPROC_RESET_CONTROLLER:
+ ret = reset_control_assert(priv->run_stall);
+ break;
default:
return -EOPNOTSUPP;
}
@@ -995,6 +998,13 @@ static int imx_dsp_rproc_detect_mode(struct imx_dsp_rproc *priv)
priv->regmap = regmap;
break;
+ case IMX_RPROC_RESET_CONTROLLER:
+ priv->run_stall = devm_reset_control_get_exclusive(dev, "runstall");
+ if (IS_ERR(priv->run_stall)) {
+ dev_err(dev, "Failed to get DSP runstall reset control\n");
+ return PTR_ERR(priv->run_stall);
+ }
+ break;
default:
ret = -EOPNOTSUPP;
break;
diff --git a/drivers/remoteproc/imx_rproc.h b/drivers/remoteproc/imx_rproc.h
index 17a7d051c531..cfd38d37e146 100644
--- a/drivers/remoteproc/imx_rproc.h
+++ b/drivers/remoteproc/imx_rproc.h
@@ -24,6 +24,8 @@ enum imx_rproc_method {
IMX_RPROC_SMC,
/* Through System Control Unit API */
IMX_RPROC_SCU_API,
+ /* Through Reset Controller API */
+ IMX_RPROC_RESET_CONTROLLER,
};
/* dcfg flags */
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 5f463937cbbf..9c7182b3b038 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -724,6 +724,7 @@ out:
* @rproc: remote processor to apply the address translation for
* @da: device address to translate
* @len: length of the memory buffer
+ * @is_iomem: pointer filled in to indicate if @da is iomapped memory
*
* Custom function implementing the rproc .da_to_va ops to provide address
* translation (device address to kernel virtual address) for internal RAMs
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 1656574b7317..4a4eb9c0b133 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -563,7 +563,7 @@ static int pru_handle_intrmap(struct rproc *rproc)
return -ENODEV;
}
- fwspec.fwnode = of_node_to_fwnode(irq_parent);
+ fwspec.fwnode = of_fwnode_handle(irq_parent);
fwspec.param_count = 3;
for (i = 0; i < pru->evt_count; i++) {
fwspec.param[0] = rsc->pru_intc_map[i].event;
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index e78bd986dc3f..0c0199fb0e68 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -134,6 +134,11 @@
#define BOOT_FSM_TIMEOUT 10000
#define BHS_CHECK_MAX_LOOPS 200
+/* External power block headswitch */
+#define EXTERNAL_BHS_ON BIT(0)
+#define EXTERNAL_BHS_STATUS BIT(4)
+#define EXTERNAL_BHS_TIMEOUT_US 50
+
struct reg_info {
struct regulator *reg;
int uV;
@@ -161,6 +166,7 @@ struct rproc_hexagon_res {
bool has_mba_logs;
bool has_spare_reg;
bool has_qaccept_regs;
+ bool has_ext_bhs_reg;
bool has_ext_cntl_regs;
bool has_vq6;
};
@@ -180,6 +186,7 @@ struct q6v5 {
u32 halt_nc;
u32 halt_vq6;
u32 conn_box;
+ u32 ext_bhs;
u32 qaccept_mdm;
u32 qaccept_cx;
@@ -237,6 +244,7 @@ struct q6v5 {
bool has_mba_logs;
bool has_spare_reg;
bool has_qaccept_regs;
+ bool has_ext_bhs_reg;
bool has_ext_cntl_regs;
bool has_vq6;
u64 mpss_perm;
@@ -246,8 +254,10 @@ struct q6v5 {
};
enum {
+ MSS_MSM8226,
MSS_MSM8909,
MSS_MSM8916,
+ MSS_MSM8926,
MSS_MSM8953,
MSS_MSM8974,
MSS_MSM8996,
@@ -415,6 +425,34 @@ static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
}
}
+static int q6v5_external_bhs_enable(struct q6v5 *qproc)
+{
+ u32 val;
+ int ret = 0;
+
+ /*
+ * Enable external power block headswitch and wait for it to
+ * stabilize
+ */
+ regmap_set_bits(qproc->conn_map, qproc->ext_bhs, EXTERNAL_BHS_ON);
+
+ ret = regmap_read_poll_timeout(qproc->conn_map, qproc->ext_bhs,
+ val, val & EXTERNAL_BHS_STATUS,
+ 1, EXTERNAL_BHS_TIMEOUT_US);
+
+ if (ret) {
+ dev_err(qproc->dev, "External BHS timed out\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static void q6v5_external_bhs_disable(struct q6v5 *qproc)
+{
+ regmap_clear_bits(qproc->conn_map, qproc->ext_bhs, EXTERNAL_BHS_ON);
+}
+
static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, u64 *current_perm,
bool local, bool remote, phys_addr_t addr,
size_t size)
@@ -1112,11 +1150,17 @@ static int q6v5_mba_load(struct q6v5 *qproc)
goto disable_proxy_clk;
}
+ if (qproc->has_ext_bhs_reg) {
+ ret = q6v5_external_bhs_enable(qproc);
+ if (ret < 0)
+ goto disable_vdd;
+ }
+
ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count);
if (ret) {
dev_err(qproc->dev, "failed to enable reset clocks\n");
- goto disable_vdd;
+ goto disable_ext_bhs;
}
ret = q6v5_reset_deassert(qproc);
@@ -1214,6 +1258,9 @@ assert_reset:
disable_reset_clks:
q6v5_clk_disable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count);
+disable_ext_bhs:
+ if (qproc->has_ext_bhs_reg)
+ q6v5_external_bhs_disable(qproc);
disable_vdd:
q6v5_regulator_disable(qproc, qproc->active_regs,
qproc->active_reg_count);
@@ -1281,6 +1328,8 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
qproc->reset_clk_count);
q6v5_clk_disable(qproc->dev, qproc->active_clks,
qproc->active_clk_count);
+ if (qproc->has_ext_bhs_reg)
+ q6v5_external_bhs_disable(qproc);
q6v5_regulator_disable(qproc, qproc->active_regs,
qproc->active_reg_count);
@@ -1750,6 +1799,23 @@ static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
qproc->qaccept_axi = args.args[2];
}
+ if (qproc->has_ext_bhs_reg) {
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,ext-bhs-reg",
+ 1, 0, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse ext-bhs-reg index 0\n");
+ return -EINVAL;
+ }
+
+ qproc->conn_map = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ if (IS_ERR(qproc->conn_map))
+ return PTR_ERR(qproc->conn_map);
+
+ qproc->ext_bhs = args.args[0];
+ }
+
if (qproc->has_ext_cntl_regs) {
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
"qcom,ext-regs",
@@ -1831,6 +1897,13 @@ static int q6v5_pds_attach(struct device *dev, struct device **devs,
while (pd_names[num_pds])
num_pds++;
+ /* Handle single power domain */
+ if (num_pds == 1 && dev->pm_domain) {
+ devs[0] = dev;
+ pm_runtime_enable(dev);
+ return 1;
+ }
+
for (i = 0; i < num_pds; i++) {
devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
if (IS_ERR_OR_NULL(devs[i])) {
@@ -1851,8 +1924,15 @@ unroll_attach:
static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
size_t pd_count)
{
+ struct device *dev = qproc->dev;
int i;
+ /* Handle single power domain */
+ if (pd_count == 1 && dev->pm_domain) {
+ pm_runtime_disable(dev);
+ return;
+ }
+
for (i = 0; i < pd_count; i++)
dev_pm_domain_detach(pds[i], false);
}
@@ -2007,6 +2087,7 @@ static int q6v5_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qproc);
qproc->has_qaccept_regs = desc->has_qaccept_regs;
+ qproc->has_ext_bhs_reg = desc->has_ext_bhs_reg;
qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs;
qproc->has_vq6 = desc->has_vq6;
qproc->has_spare_reg = desc->has_spare_reg;
@@ -2160,6 +2241,7 @@ static const struct rproc_hexagon_res sc7180_mss = {
.has_mba_logs = true,
.has_spare_reg = true,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_SC7180,
@@ -2188,6 +2270,7 @@ static const struct rproc_hexagon_res sc7280_mss = {
.has_mba_logs = true,
.has_spare_reg = false,
.has_qaccept_regs = true,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = true,
.has_vq6 = true,
.version = MSS_SC7280,
@@ -2219,6 +2302,7 @@ static const struct rproc_hexagon_res sdm660_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_SDM660,
@@ -2254,6 +2338,7 @@ static const struct rproc_hexagon_res sdm845_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_SDM845,
@@ -2285,6 +2370,7 @@ static const struct rproc_hexagon_res msm8998_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8998,
@@ -2323,6 +2409,7 @@ static const struct rproc_hexagon_res msm8996_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8996,
@@ -2357,6 +2444,7 @@ static const struct rproc_hexagon_res msm8909_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8909,
@@ -2402,6 +2490,7 @@ static const struct rproc_hexagon_res msm8916_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8916,
@@ -2437,6 +2526,7 @@ static const struct rproc_hexagon_res msm8953_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8953,
@@ -2449,17 +2539,101 @@ static const struct rproc_hexagon_res msm8974_mss = {
.supply = "pll",
.uA = 100000,
},
+ {
+ .supply = "mx",
+ .uV = 1050000,
+ },
{}
},
.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
{
+ .supply = "cx",
+ .uA = 100000,
+ },
+ {}
+ },
+ .active_supply = (struct qcom_mss_reg_res[]) {
+ {
+ .supply = "mss",
+ .uV = 1050000,
+ .uA = 100000,
+ },
+ {}
+ },
+ .proxy_clk_names = (char*[]){
+ "xo",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "mem",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .need_mem_protection = false,
+ .has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
+ .version = MSS_MSM8974,
+};
+
+static const struct rproc_hexagon_res msm8226_mss = {
+ .hexagon_mba_image = "mba.b00",
+ .proxy_supply = (struct qcom_mss_reg_res[]) {
+ {
+ .supply = "pll",
+ .uA = 100000,
+ },
+ {
.supply = "mx",
.uV = 1050000,
},
+ {}
+ },
+ .proxy_clk_names = (char*[]){
+ "xo",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "mem",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .need_mem_protection = false,
+ .has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_bhs_reg = true,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
+ .version = MSS_MSM8226,
+};
+
+static const struct rproc_hexagon_res msm8926_mss = {
+ .hexagon_mba_image = "mba.b00",
+ .proxy_supply = (struct qcom_mss_reg_res[]) {
{
- .supply = "cx",
+ .supply = "pll",
.uA = 100000,
},
+ {
+ .supply = "mx",
+ .uV = 1050000,
+ },
{}
},
.active_supply = (struct qcom_mss_reg_res[]) {
@@ -2481,7 +2655,6 @@ static const struct rproc_hexagon_res msm8974_mss = {
NULL
},
.proxy_pd_names = (char*[]){
- "mx",
"cx",
NULL
},
@@ -2490,15 +2663,18 @@ static const struct rproc_hexagon_res msm8974_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
- .version = MSS_MSM8974,
+ .version = MSS_MSM8926,
};
static const struct of_device_id q6v5_of_match[] = {
{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
+ { .compatible = "qcom,msm8226-mss-pil", .data = &msm8226_mss},
{ .compatible = "qcom,msm8909-mss-pil", .data = &msm8909_mss},
{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
+ { .compatible = "qcom,msm8926-mss-pil", .data = &msm8926_mss},
{ .compatible = "qcom,msm8953-mss-pil", .data = &msm8953_mss},
{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 97c4bdd9222a..b306f223127c 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -501,16 +501,16 @@ static int adsp_pds_attach(struct device *dev, struct device **devs,
if (!pd_names)
return 0;
+ while (pd_names[num_pds])
+ num_pds++;
+
/* Handle single power domain */
- if (dev->pm_domain) {
+ if (num_pds == 1 && dev->pm_domain) {
devs[0] = dev;
pm_runtime_enable(dev);
return 1;
}
- while (pd_names[num_pds])
- num_pds++;
-
for (i = 0; i < num_pds; i++) {
devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
if (IS_ERR_OR_NULL(devs[i])) {
@@ -535,7 +535,7 @@ static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
int i;
/* Handle single power domain */
- if (dev->pm_domain && pd_count) {
+ if (pd_count == 1 && dev->pm_domain) {
pm_runtime_disable(dev);
return;
}
@@ -1348,6 +1348,7 @@ static const struct adsp_data sc7280_wpss_resource = {
.crash_reason_smem = 626,
.firmware_name = "wpss.mdt",
.pas_id = 6,
+ .minidump_id = 4,
.auto_boot = false,
.proxy_pd_names = (char*[]){
"cx",
@@ -1409,8 +1410,32 @@ static const struct adsp_data sm8650_mpss_resource = {
.region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
};
+static const struct adsp_data sm8750_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .dtb_firmware_name = "modem_dtb.mdt",
+ .pas_id = 4,
+ .dtb_pas_id = 0x26,
+ .minidump_id = 3,
+ .auto_boot = false,
+ .decrypt_shutdown = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .load_state = "modem",
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+ .smem_host_id = 1,
+ .region_assign_idx = 2,
+ .region_assign_count = 2,
+ .region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
+};
+
static const struct of_device_id adsp_of_match[] = {
- { .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
+ { .compatible = "qcom,msm8226-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource},
@@ -1474,6 +1499,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sm8650-adsp-pas", .data = &sm8550_adsp_resource},
{ .compatible = "qcom,sm8650-cdsp-pas", .data = &sm8650_cdsp_resource},
{ .compatible = "qcom,sm8650-mpss-pas", .data = &sm8650_mpss_resource},
+ { .compatible = "qcom,sm8750-mpss-pas", .data = &sm8750_mpss_resource},
{ .compatible = "qcom,x1e80100-adsp-pas", .data = &x1e80100_adsp_resource},
{ .compatible = "qcom,x1e80100-cdsp-pas", .data = &x1e80100_cdsp_resource},
{ },
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index c24e4a882873..660ac6fc4082 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -619,7 +619,7 @@ static irqreturn_t sysmon_shutdown_interrupt(int irq, void *data)
* @name: name of this subdev, to use in SSR
* @ssctl_instance: instance id of the ssctl QMI service
*
- * Return: A new qcom_sysmon object, or NULL on failure
+ * Return: A new qcom_sysmon object, or an error pointer on failure
*/
struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
const char *name,
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 5b5664603eed..775b056d795a 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -117,10 +117,10 @@ static const struct wcnss_data pronto_v1_data = {
.pmu_offset = 0x1004,
.spare_offset = 0x1088,
- .pd_names = { "mx", "cx" },
+ .pd_names = { "cx", "mx" },
.vregs = (struct wcnss_vreg_info[]) {
- { "vddmx", 950000, 1150000, 0 },
{ "vddcx", .super_turbo = true},
+ { "vddmx", 950000, 1150000, 0 },
{ "vddpx", 1800000, 1800000, 0 },
},
.num_pd_vregs = 2,
@@ -131,10 +131,10 @@ static const struct wcnss_data pronto_v2_data = {
.pmu_offset = 0x1004,
.spare_offset = 0x1088,
- .pd_names = { "mx", "cx" },
+ .pd_names = { "cx", "mx" },
.vregs = (struct wcnss_vreg_info[]) {
- { "vddmx", 1287500, 1287500, 0 },
{ "vddcx", .super_turbo = true },
+ { "vddmx", 1287500, 1287500, 0 },
{ "vddpx", 1800000, 1800000, 0 },
},
.num_pd_vregs = 2,
@@ -397,8 +397,17 @@ static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
static int wcnss_init_pds(struct qcom_wcnss *wcnss,
const char * const pd_names[WCNSS_MAX_PDS])
{
+ struct device *dev = wcnss->dev;
int i, ret;
+ /* Handle single power domain */
+ if (dev->pm_domain) {
+ wcnss->pds[0] = dev;
+ wcnss->num_pds = 1;
+ pm_runtime_enable(dev);
+ return 0;
+ }
+
for (i = 0; i < WCNSS_MAX_PDS; i++) {
if (!pd_names[i])
break;
@@ -418,8 +427,15 @@ static int wcnss_init_pds(struct qcom_wcnss *wcnss,
static void wcnss_release_pds(struct qcom_wcnss *wcnss)
{
+ struct device *dev = wcnss->dev;
int i;
+ /* Handle single power domain */
+ if (wcnss->num_pds == 1 && dev->pm_domain) {
+ pm_runtime_disable(dev);
+ return;
+ }
+
for (i = 0; i < wcnss->num_pds; i++)
dev_pm_domain_detach(wcnss->pds[i], false);
}
@@ -437,10 +453,13 @@ static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
* the regulators for the power domains. For old device trees we need to
* reserve extra space to manage them through the regulator interface.
*/
- if (wcnss->num_pds)
- info += num_pd_vregs;
- else
+ if (wcnss->num_pds) {
+ info += wcnss->num_pds;
+ /* Handle single power domain case */
+ num_vregs += num_pd_vregs - wcnss->num_pds;
+ } else {
num_vregs += num_pd_vregs;
+ }
bulk = devm_kcalloc(wcnss->dev,
num_vregs, sizeof(struct regulator_bulk_data),
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index c2cf0d277729..b21eedefff87 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -2025,6 +2025,7 @@ int rproc_shutdown(struct rproc *rproc)
kfree(rproc->cached_table);
rproc->cached_table = NULL;
rproc->table_ptr = NULL;
+ rproc->table_sz = 0;
out:
mutex_unlock(&rproc->lock);
return ret;
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 5b3abb6db248..99f6f9784e68 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -96,6 +96,13 @@ config RESET_HSDK
help
This enables the reset controller driver for HSDK board.
+config RESET_IMX_SCU
+ tristate "i.MX8Q Reset Driver"
+ depends on IMX_SCU && HAVE_ARM_SMCCC
+ depends on (ARM64 && ARCH_MXC) || COMPILE_TEST
+ help
+ This enables the reset controller driver for i.MX8QM/i.MX8QXP
+
config RESET_IMX7
tristate "i.MX7/8 Reset Driver"
depends on HAS_IOMEM
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 677c4d1e2632..31f9904d13f9 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o
obj-$(CONFIG_RESET_EYEQ) += reset-eyeq.o
obj-$(CONFIG_RESET_GPIO) += reset-gpio.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
+obj-$(CONFIG_RESET_IMX_SCU) += reset-imx-scu.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_IMX8MP_AUDIOMIX) += reset-imx8mp-audiomix.o
obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o
diff --git a/drivers/reset/reset-imx-scu.c b/drivers/reset/reset-imx-scu.c
new file mode 100644
index 000000000000..919fc29f944c
--- /dev/null
+++ b/drivers/reset/reset-imx-scu.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2025 NXP
+ * Frank Li <Frank.Li@nxp.com>
+ */
+#include <linux/firmware/imx/svc/misc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+struct imx_scu_reset {
+ struct reset_controller_dev rc;
+ struct imx_sc_ipc *ipc_handle;
+};
+
+static struct imx_scu_reset *to_imx_scu(struct reset_controller_dev *rc)
+{
+ return container_of(rc, struct imx_scu_reset, rc);
+}
+
+struct imx_scu_id_map {
+ u32 resource_id;
+ u32 command_id;
+};
+
+static const struct imx_scu_id_map imx_scu_id_map[] = {
+ { IMX_SC_R_CSI_0, IMX_SC_C_MIPI_RESET },
+ { IMX_SC_R_CSI_1, IMX_SC_C_MIPI_RESET },
+};
+
+static int imx_scu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct imx_scu_reset *priv = to_imx_scu(rc);
+
+ return imx_sc_misc_set_control(priv->ipc_handle, imx_scu_id_map[id].resource_id,
+ imx_scu_id_map[id].command_id, true);
+}
+
+static const struct reset_control_ops imx_scu_reset_ops = {
+ .assert = imx_scu_reset_assert,
+};
+
+static int imx_scu_xlate(struct reset_controller_dev *rc, const struct of_phandle_args *reset_spec)
+{
+ int i;
+
+ for (i = 0; i < rc->nr_resets; i++)
+ if (reset_spec->args[0] == imx_scu_id_map[i].resource_id)
+ return i;
+
+ return -EINVAL;
+}
+
+static int imx_scu_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_scu_reset *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, &priv->rc);
+
+ ret = imx_scu_get_handle(&priv->ipc_handle);
+ if (ret)
+ return dev_err_probe(dev, ret, "sc_misc_MIPI get ipc handle failed!\n");
+
+ priv->rc.ops = &imx_scu_reset_ops;
+ priv->rc.owner = THIS_MODULE;
+ priv->rc.of_node = dev->of_node;
+ priv->rc.of_reset_n_cells = 1;
+ priv->rc.of_xlate = imx_scu_xlate;
+ priv->rc.nr_resets = ARRAY_SIZE(imx_scu_id_map);
+
+ return devm_reset_controller_register(dev, &priv->rc);
+}
+
+static const struct of_device_id imx_scu_reset_ids[] = {
+ { .compatible = "fsl,imx-scu-reset", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, imx_scu_reset_ids);
+
+static struct platform_driver imx_scu_reset_driver = {
+ .probe = imx_scu_reset_probe,
+ .driver = {
+ .name = "scu-reset",
+ .of_match_table = imx_scu_reset_ids,
+ },
+};
+module_platform_driver(imx_scu_reset_driver);
+
+MODULE_AUTHOR("Frank Li <Frank.Li@nxp.com>");
+MODULE_DESCRIPTION("i.MX scu reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-imx8mp-audiomix.c b/drivers/reset/reset-imx8mp-audiomix.c
index 6e3f3069f727..6b357adfe646 100644
--- a/drivers/reset/reset-imx8mp-audiomix.c
+++ b/drivers/reset/reset-imx8mp-audiomix.c
@@ -3,6 +3,8 @@
* Copyright 2024 NXP
*/
+#include <dt-bindings/reset/imx8mp-reset-audiomix.h>
+
#include <linux/auxiliary_bus.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -11,8 +13,36 @@
#include <linux/of_address.h>
#include <linux/reset-controller.h>
-#define EARC 0x200
-#define EARC_RESET_MASK 0x3
+#define IMX8MP_AUDIOMIX_EARC_RESET_OFFSET 0x200
+#define IMX8MP_AUDIOMIX_EARC_RESET_MASK BIT(1)
+#define IMX8MP_AUDIOMIX_EARC_PHY_RESET_MASK BIT(2)
+
+#define IMX8MP_AUDIOMIX_DSP_RUNSTALL_OFFSET 0x108
+#define IMX8MP_AUDIOMIX_DSP_RUNSTALL_MASK BIT(5)
+
+struct imx8mp_reset_map {
+ unsigned int offset;
+ unsigned int mask;
+ bool active_low;
+};
+
+static const struct imx8mp_reset_map reset_map[] = {
+ [IMX8MP_AUDIOMIX_EARC_RESET] = {
+ .offset = IMX8MP_AUDIOMIX_EARC_RESET_OFFSET,
+ .mask = IMX8MP_AUDIOMIX_EARC_RESET_MASK,
+ .active_low = true,
+ },
+ [IMX8MP_AUDIOMIX_EARC_PHY_RESET] = {
+ .offset = IMX8MP_AUDIOMIX_EARC_RESET_OFFSET,
+ .mask = IMX8MP_AUDIOMIX_EARC_PHY_RESET_MASK,
+ .active_low = true,
+ },
+ [IMX8MP_AUDIOMIX_DSP_RUNSTALL] = {
+ .offset = IMX8MP_AUDIOMIX_DSP_RUNSTALL_OFFSET,
+ .mask = IMX8MP_AUDIOMIX_DSP_RUNSTALL_MASK,
+ .active_low = false,
+ },
+};
struct imx8mp_audiomix_reset {
struct reset_controller_dev rcdev;
@@ -25,38 +55,42 @@ static struct imx8mp_audiomix_reset *to_imx8mp_audiomix_reset(struct reset_contr
return container_of(rcdev, struct imx8mp_audiomix_reset, rcdev);
}
-static int imx8mp_audiomix_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int imx8mp_audiomix_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
{
struct imx8mp_audiomix_reset *priv = to_imx8mp_audiomix_reset(rcdev);
void __iomem *reg_addr = priv->base;
- unsigned int mask, reg;
- unsigned long flags;
+ unsigned int mask, offset, active_low;
+ unsigned long reg, flags;
+
+ mask = reset_map[id].mask;
+ offset = reset_map[id].offset;
+ active_low = reset_map[id].active_low;
- mask = BIT(id);
spin_lock_irqsave(&priv->lock, flags);
- reg = readl(reg_addr + EARC);
- writel(reg & ~mask, reg_addr + EARC);
+
+ reg = readl(reg_addr + offset);
+ if (active_low ^ assert)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ writel(reg, reg_addr + offset);
+
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
+static int imx8mp_audiomix_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return imx8mp_audiomix_update(rcdev, id, true);
+}
+
static int imx8mp_audiomix_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct imx8mp_audiomix_reset *priv = to_imx8mp_audiomix_reset(rcdev);
- void __iomem *reg_addr = priv->base;
- unsigned int mask, reg;
- unsigned long flags;
-
- mask = BIT(id);
- spin_lock_irqsave(&priv->lock, flags);
- reg = readl(reg_addr + EARC);
- writel(reg | mask, reg_addr + EARC);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
+ return imx8mp_audiomix_update(rcdev, id, false);
}
static const struct reset_control_ops imx8mp_audiomix_reset_ops = {
@@ -78,7 +112,7 @@ static int imx8mp_audiomix_reset_probe(struct auxiliary_device *adev,
spin_lock_init(&priv->lock);
priv->rcdev.owner = THIS_MODULE;
- priv->rcdev.nr_resets = fls(EARC_RESET_MASK);
+ priv->rcdev.nr_resets = ARRAY_SIZE(reset_map);
priv->rcdev.ops = &imx8mp_audiomix_reset_ops;
priv->rcdev.of_node = dev->parent->of_node;
priv->rcdev.dev = dev;
diff --git a/drivers/reset/reset-microchip-sparx5.c b/drivers/reset/reset-microchip-sparx5.c
index aa5464be7053..6d3e75b33260 100644
--- a/drivers/reset/reset-microchip-sparx5.c
+++ b/drivers/reset/reset-microchip-sparx5.c
@@ -8,6 +8,7 @@
*/
#include <linux/mfd/syscon.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -72,14 +73,22 @@ static struct regmap *mchp_lan966x_syscon_to_regmap(struct device *dev,
struct device_node *syscon_np)
{
struct regmap_config regmap_config = mchp_lan966x_syscon_regmap_config;
- resource_size_t size;
+ struct resource res;
void __iomem *base;
+ int err;
+
+ err = of_address_to_resource(syscon_np, 0, &res);
+ if (err)
+ return ERR_PTR(err);
- base = devm_of_iomap(dev, syscon_np, 0, &size);
- if (IS_ERR(base))
- return ERR_CAST(base);
+ /* It is not possible to use devm_of_iomap because this resource is
+ * shared with other drivers.
+ */
+ base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!base)
+ return ERR_PTR(-ENOMEM);
- regmap_config.max_register = size - 4;
+ regmap_config.max_register = resource_size(&res) - 4;
return devm_regmap_init_mmio(dev, base, &regmap_config);
}
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e31fa0ad127e..b88cd4fb295b 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -240,8 +240,7 @@ static struct rtc_device *rtc_allocate_device(void)
/* Init uie timer */
rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, rtc);
/* Init pie timer */
- hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- rtc->pie_timer.function = rtc_pie_update_irq;
+ hrtimer_setup(&rtc->pie_timer, rtc_pie_update_irq, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rtc->pie_enabled = 0;
set_bit(RTC_FEATURE_ALARM, rtc->features);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 3ed642f4f00d..31bfb49588c2 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
+#include <asm/machine.h>
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
#include <asm/idals.h>
@@ -3382,7 +3383,7 @@ int dasd_device_is_ro(struct dasd_device *device)
struct diag210 diag_data;
int rc;
- if (!MACHINE_IS_VM)
+ if (!machine_is_vm())
return 0;
ccw_device_get_id(device->cdev, &dev_id);
memset(&diag_data, 0, sizeof(diag_data));
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 71d8fb86139d..3bb69069dfc6 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include <asm/machine.h>
#include <asm/debug.h>
#include <linux/uaccess.h>
#include <asm/ipl.h>
@@ -234,7 +235,7 @@ static int __init dasd_parse_keyword(char *keyword)
return 0;
}
if (strncmp("nopav", keyword, length) == 0) {
- if (MACHINE_IS_VM)
+ if (machine_is_vm())
pr_info("'nopav' is not supported on z/VM\n");
else {
dasd_nopav = 1;
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 26812abddef1..56f1af8a7ddd 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/jiffies.h>
#include <asm/asm-extable.h>
+#include <asm/machine.h>
#include <asm/dasd.h>
#include <asm/debug.h>
#include <asm/diag.h>
@@ -75,7 +76,7 @@ static inline int __dia250(void *iob, int cmd)
} addr_type;
exception = 1;
- asm volatile(
+ asm_inline volatile(
" diag %[rx],%[cmd],0x250\n"
"0: lhi %[exc],0\n"
"1:\n"
@@ -654,7 +655,7 @@ static struct dasd_discipline dasd_diag_discipline = {
static int __init
dasd_diag_init(void)
{
- if (!MACHINE_IS_VM) {
+ if (!machine_is_vm()) {
pr_info("Discipline %s cannot be used without z/VM\n",
dasd_diag_discipline.name);
return -ENODEV;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 1ebe589b5185..88fa17aea2ec 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <asm/css_chars.h>
+#include <asm/machine.h>
#include <asm/debug.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
@@ -1953,7 +1954,7 @@ static int dasd_eckd_validate_server(struct dasd_device *device,
if (private->uid.type == UA_BASE_PAV_ALIAS ||
private->uid.type == UA_HYPER_PAV_ALIAS)
return 0;
- if (dasd_nopav || MACHINE_IS_VM)
+ if (dasd_nopav || machine_is_vm())
enable_pav = 0;
else
enable_pav = 1;
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index c763c50d1454..6a61c0a595d9 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -23,6 +23,7 @@
#include <linux/reboot.h>
#include <linux/serial.h> /* ASYNC_* flags */
#include <linux/slab.h>
+#include <asm/machine.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <linux/io.h>
@@ -907,7 +908,7 @@ static int __init con3215_init(void)
return -ENODEV;
/* Set the console mode for VM */
- if (MACHINE_IS_VM) {
+ if (machine_is_vm()) {
cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
}
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index ae1b9aa3a2b5..1a3190848670 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -23,6 +23,7 @@
#include <linux/memblock.h>
#include <linux/compat.h>
+#include <asm/machine.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
@@ -2156,7 +2157,7 @@ con3270_init(void)
return -ENODEV;
/* Set the console mode for VM */
- if (MACHINE_IS_VM) {
+ if (machine_is_vm()) {
cpcmd("TERM CONMODE 3270", NULL, 0, NULL);
cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
}
diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c
index 9418a9270d03..711f6982438e 100644
--- a/drivers/s390/char/diag_ftp.c
+++ b/drivers/s390/char/diag_ftp.c
@@ -106,7 +106,7 @@ static int diag_ftp_2c4(struct diag_ftp_ldfpl *fpl,
int rc;
diag_stat_inc(DIAG_STAT_X2C4);
- asm volatile(
+ asm_inline volatile(
" diag %[addr],%[cmd],0x2c4\n"
"0: j 2f\n"
"1: la %[rc],%[err]\n"
diff --git a/drivers/s390/char/hmcdrv_ftp.c b/drivers/s390/char/hmcdrv_ftp.c
index 02b6f394aec2..4e3c7ec6749b 100644
--- a/drivers/s390/char/hmcdrv_ftp.c
+++ b/drivers/s390/char/hmcdrv_ftp.c
@@ -17,6 +17,8 @@
#include <linux/ctype.h>
#include <linux/crc16.h>
+#include <asm/machine.h>
+
#include "hmcdrv_ftp.h"
#include "hmcdrv_cache.h"
#include "sclp_ftp.h"
@@ -308,9 +310,9 @@ int hmcdrv_ftp_startup(void)
mutex_lock(&hmcdrv_ftp_mutex); /* block transfers while start-up */
if (hmcdrv_ftp_refcnt == 0) {
- if (MACHINE_IS_VM)
+ if (machine_is_vm())
hmcdrv_ftp_funcs = &hmcdrv_ftp_zvm;
- else if (MACHINE_IS_LPAR || MACHINE_IS_KVM)
+ else if (machine_is_lpar() || machine_is_kvm())
hmcdrv_ftp_funcs = &hmcdrv_ftp_lpar;
else
rc = -EOPNOTSUPP;
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 7207a7f5842a..2d9886651d9b 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <net/iucv/iucv.h>
#include <linux/uaccess.h>
+#include <asm/machine.h>
#include <asm/ebcdic.h>
#include <asm/extmem.h>
@@ -456,7 +457,7 @@ static int __init mon_init(void)
{
int rc;
- if (!MACHINE_IS_VM) {
+ if (!machine_is_vm()) {
pr_err("The z/VM *MONITOR record device driver cannot be "
"loaded without z/VM\n");
return -ENODEV;
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index bc5193d81f9c..0fab1f025a94 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <asm/machine.h>
#include <asm/ebcdic.h>
#include <asm/appldata.h>
#include <asm/monwriter.h>
@@ -293,7 +294,7 @@ static struct miscdevice mon_dev = {
static int __init mon_init(void)
{
- if (!MACHINE_IS_VM)
+ if (!machine_is_vm())
return -ENODEV;
/*
* misc_register() has to be the last action in module_init(), because
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index ba3d7114b34f..d2ce7f80ae8d 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/wait.h>
+#include <asm/machine.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
@@ -618,7 +619,7 @@ static void raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
if (rq->rc) {
/* Reset command failed. */
rp->state = RAW3270_STATE_INIT;
- } else if (MACHINE_IS_VM) {
+ } else if (machine_is_vm()) {
raw3270_size_device_vm(rp);
raw3270_size_device_done(rp);
} else {
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 07a6e8a7f05a..b31a680e0871 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/list.h>
#include <asm/asm-extable.h>
+#include <asm/machine.h>
#include <asm/sclp.h>
#include <asm/ebcdic.h>
#include <asm/asm.h>
@@ -317,7 +318,7 @@ static inline int sclp_service_call(sclp_cmdw_t command, void *sccb)
int cc, exception;
exception = 1;
- asm volatile(
+ asm_inline volatile(
"0: .insn rre,0xb2200000,%[cmd],%[sccb]\n" /* servc */
"1: lhi %[exc],0\n"
"2:\n"
@@ -342,21 +343,21 @@ static inline int sclp_service_call(sclp_cmdw_t command, void *sccb)
static inline unsigned char
sclp_ascebc(unsigned char ch)
{
- return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch];
+ return (machine_is_vm()) ? _ascebc[ch] : _ascebc_500[ch];
}
/* translate string from EBCDIC to ASCII */
static inline void
sclp_ebcasc_str(char *str, int nr)
{
- (MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
+ (machine_is_vm()) ? EBCASC(str, nr) : EBCASC_500(str, nr);
}
/* translate string from ASCII to EBCDIC */
static inline void
sclp_ascebc_str(char *str, int nr)
{
- (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
+ (machine_is_vm()) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
}
static inline struct gds_vector *
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index f905a6643a0f..9fcdce9bb35f 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -8,6 +8,7 @@
#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/cpufeature.h>
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -428,7 +429,7 @@ static void __init add_memory_merged(u16 rn)
goto skip_add;
for (addr = start; addr < start + size; addr += block_size)
add_memory(0, addr, block_size,
- MACHINE_HAS_EDAT1 ?
+ cpu_has_edat1() ?
MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
skip_add:
first_rn = rn;
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index e5d947c763ea..6a030ba38bf3 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -264,6 +264,19 @@ static struct console sclp_console =
};
/*
+ * Release allocated pages.
+ */
+static void __init __sclp_console_free_pages(void)
+{
+ struct list_head *page, *p;
+
+ list_for_each_safe(page, p, &sclp_con_pages) {
+ list_del(page);
+ free_page((unsigned long)page);
+ }
+}
+
+/*
* called by console_init() in drivers/char/tty_io.c at boot-time.
*/
static int __init
@@ -282,6 +295,10 @@ sclp_console_init(void)
/* Allocate pages for output buffering */
for (i = 0; i < sclp_console_pages; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!page) {
+ __sclp_console_free_pages();
+ return -ENOMEM;
+ }
list_add_tail(page, &sclp_con_pages);
}
sclp_conbuf = NULL;
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index d9d6edaf8de8..93b2d20d720c 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -50,10 +50,6 @@ static void __init sclp_early_facilities_detect(void)
sclp.has_aeni = !!(sccb->fac118 & 0x20);
sclp.has_aisi = !!(sccb->fac118 & 0x10);
sclp.has_zpci_lsi = !!(sccb->fac118 & 0x01);
- if (sccb->fac85 & 0x02)
- get_lowcore()->machine_flags |= MACHINE_FLAG_ESOP;
- if (sccb->fac91 & 0x40)
- get_lowcore()->machine_flags |= MACHINE_FLAG_TLB_GUEST;
sclp.has_diag204_bif = !!(sccb->fac98 & 0x80);
sclp.has_diag310 = !!(sccb->fac91 & 0x80);
if (sccb->cpuoff > 134) {
@@ -78,7 +74,7 @@ static void __init sclp_early_facilities_detect(void)
sclp.hamax = U64_MAX;
if (!sccb->hcpua) {
- if (MACHINE_IS_VM)
+ if (machine_is_vm())
sclp.max_cores = 64;
else
sclp.max_cores = sccb->ncpurl;
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index 5a5383cceb6f..b5bd40f13c75 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -13,6 +13,7 @@
#include <asm/sections.h>
#include <asm/physmem_info.h>
#include <asm/facility.h>
+#include <asm/machine.h>
#include "sclp.h"
#include "sclp_rw.h"
@@ -335,6 +336,18 @@ int __init sclp_early_get_hsa_size(unsigned long *hsa_size)
return 0;
}
+void __init sclp_early_detect_machine_features(void)
+{
+ struct read_info_sccb *sccb = &sclp_info_sccb;
+
+ if (!sclp_info_sccb_valid)
+ return;
+ if (sccb->fac85 & 0x02)
+ set_machine_feature(MFEATURE_ESOP);
+ if (sccb->fac91 & 0x40)
+ set_machine_feature(MFEATURE_TLB_GUEST);
+}
+
#define SCLP_STORAGE_INFO_FACILITY 0x0000400000000000UL
void __weak __init add_physmem_online_range(u64 start, u64 end) {}
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 892c18d2f87e..0a92d08830e7 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -490,6 +490,17 @@ static const struct tty_operations sclp_ops = {
.flush_buffer = sclp_tty_flush_buffer,
};
+/* Release allocated pages. */
+static void __init __sclp_tty_free_pages(void)
+{
+ struct list_head *page, *p;
+
+ list_for_each_safe(page, p, &sclp_tty_pages) {
+ list_del(page);
+ free_page((unsigned long)page);
+ }
+}
+
static int __init
sclp_tty_init(void)
{
@@ -499,7 +510,7 @@ sclp_tty_init(void)
int rc;
/* z/VM multiplexes the line mode output on the 32xx screen */
- if (MACHINE_IS_VM && !CONSOLE_IS_SCLP)
+ if (machine_is_vm() && !CONSOLE_IS_SCLP)
return 0;
if (!sclp.has_linemode)
return 0;
@@ -516,6 +527,7 @@ sclp_tty_init(void)
for (i = 0; i < MAX_KMEM_PAGES; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (page == NULL) {
+ __sclp_tty_free_pages();
tty_driver_kref_put(driver);
return -ENOMEM;
}
@@ -524,7 +536,7 @@ sclp_tty_init(void)
timer_setup(&sclp_tty_timer, sclp_tty_timeout, 0);
sclp_ttybuf = NULL;
sclp_tty_buffer_count = 0;
- if (MACHINE_IS_VM) {
+ if (machine_is_vm()) {
/* case input lines to lowercase */
sclp_tty_tolower = 1;
}
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index c6d58335beb4..3a695c5bf77f 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -23,6 +23,7 @@
#include <linux/mutex.h>
#include <linux/cma.h>
#include <linux/mm.h>
+#include <asm/machine.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/vmcp.h>
@@ -52,7 +53,7 @@ early_param("vmcp_cma", early_parse_vmcp_cma);
void __init vmcp_cma_reserve(void)
{
- if (!MACHINE_IS_VM)
+ if (!machine_is_vm())
return;
cma_declare_contiguous(0, vmcp_cma_size, 0, 0, 0, false, "vmcp", &vmcp_cma);
}
@@ -254,7 +255,7 @@ static int __init vmcp_init(void)
{
int ret;
- if (!MACHINE_IS_VM)
+ if (!machine_is_vm())
return 0;
vmcp_debug = debug_register("vmcp", 1, 1, 240);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index b2d93a6e36c4..dac85294d2f5 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
+#include <asm/machine.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
@@ -809,7 +810,7 @@ static int __init vmlogrdr_init(void)
int i;
dev_t dev;
- if (! MACHINE_IS_VM) {
+ if (!machine_is_vm()) {
pr_err("not running under VM, driver not loaded.\n");
return -ENODEV;
}
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 90ba7a2b9cb4..0fd918769a4b 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -18,6 +18,7 @@
#include <linux/kobject.h>
#include <linux/uaccess.h>
+#include <asm/machine.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/debug.h>
@@ -1009,7 +1010,7 @@ static int __init ur_init(void)
int rc;
dev_t dev;
- if (!MACHINE_IS_VM) {
+ if (!machine_is_vm()) {
pr_err("The %s cannot be loaded without z/VM\n",
ur_banner);
return -ENODEV;
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
index 4916dd0a7eb1..3285ce636c5c 100644
--- a/drivers/s390/cio/crw.c
+++ b/drivers/s390/cio/crw.c
@@ -77,9 +77,8 @@ repeat:
if (unlikely(chain > 1)) {
struct crw tmp_crw;
- printk(KERN_WARNING"%s: Code does not support more "
- "than two chained crws; please report to "
- "linux390@de.ibm.com!\n", __func__);
+ printk(KERN_WARNING "%s: Code does not support more than two chained crws\n",
+ __func__);
ccode = stcrw(&tmp_crw);
printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index a512eac83485..d0f65d97dd4a 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/errno.h>
+#include <asm/machine.h>
#include <asm/ccwdev.h>
#include <asm/setup.h>
#include <asm/cio.h>
@@ -175,7 +176,7 @@ static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
struct senseid *senseid = &cdev->private->dma_area->senseid;
int vm = 0;
- if (rc && MACHINE_IS_VM) {
+ if (rc && machine_is_vm()) {
/* Try diag 0x210 fallback on z/VM. */
snsid_init(cdev);
if (diag210_get_dev_info(cdev) == 0) {
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 5ff1e51cddf3..a540045b64a6 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -22,7 +22,7 @@ static inline int __stsch(struct subchannel_id schid, struct schib *addr)
int ccode, exception;
exception = 1;
- asm volatile(
+ asm_inline volatile(
" lgr 1,%[r1]\n"
" stsch %[addr]\n"
"0: lhi %[exc],0\n"
@@ -52,7 +52,7 @@ static inline int __msch(struct subchannel_id schid, struct schib *addr)
int ccode, exception;
exception = 1;
- asm volatile(
+ asm_inline volatile(
" lgr 1,%[r1]\n"
" msch %[addr]\n"
"0: lhi %[exc],0\n"
@@ -106,7 +106,7 @@ static inline int __ssch(struct subchannel_id schid, union orb *addr)
int ccode, exception;
exception = 1;
- asm volatile(
+ asm_inline volatile(
" lgr 1,%[r1]\n"
" ssch %[addr]\n"
"0: lhi %[exc],0\n"
@@ -178,7 +178,7 @@ int chsc(void *chsc_area)
int cc, exception;
exception = 1;
- asm volatile(
+ asm_inline volatile(
" .insn rre,0xb25f0000,%[chsc_area],0\n"
"0: lhi %[exc],0\n"
"1:\n"
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 914dde041675..6ff5c9cfb7ed 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -171,7 +171,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
return -ENODEV;
}
- parent = kzalloc(struct_size(parent, mdev_types, 1), GFP_KERNEL);
+ parent = kzalloc(sizeof(*parent), GFP_KERNEL);
if (!parent)
return -ENOMEM;
@@ -186,10 +186,10 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
parent->mdev_type.sysfs_name = "io";
parent->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)";
- parent->mdev_types[0] = &parent->mdev_type;
+ parent->mdev_types = &parent->mdev_type;
ret = mdev_register_parent(&parent->parent, &sch->dev,
&vfio_ccw_mdev_driver,
- parent->mdev_types, 1);
+ &parent->mdev_types, 1);
if (ret)
goto out_unreg;
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index b62bbc5c6376..0501d4bbcdbd 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -79,7 +79,7 @@ struct vfio_ccw_parent {
struct mdev_parent parent;
struct mdev_type mdev_type;
- struct mdev_type *mdev_types[];
+ struct mdev_type *mdev_types;
};
/**
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 26e1ea1940ec..1564cd7e3f59 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -26,6 +26,7 @@
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
+#include <asm/machine.h>
#include <asm/airq.h>
#include <asm/tpi.h>
#include <linux/atomic.h>
@@ -2324,10 +2325,9 @@ static inline int __init ap_async_init(void)
* Setup the high resolution poll timer.
* If we are running under z/VM adjust polling to z/VM polling rate.
*/
- if (MACHINE_IS_VM)
+ if (machine_is_vm())
poll_high_timeout = 1500000;
- hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ap_poll_timer.function = ap_poll_timeout;
+ hrtimer_setup(&ap_poll_timer, ap_poll_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
queue_work(system_long_wq, &ap_scan_bus_work);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index a52c2690933f..bc8669b5c304 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -650,13 +650,22 @@ static void vfio_ap_matrix_init(struct ap_config_info *info,
matrix->adm_max = info->apxa ? info->nd : 15;
}
+static void signal_guest_ap_cfg_changed(struct ap_matrix_mdev *matrix_mdev)
+{
+ if (matrix_mdev->cfg_chg_trigger)
+ eventfd_signal(matrix_mdev->cfg_chg_trigger);
+}
+
static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
{
- if (matrix_mdev->kvm)
+ if (matrix_mdev->kvm) {
kvm_arch_crypto_set_masks(matrix_mdev->kvm,
matrix_mdev->shadow_apcb.apm,
matrix_mdev->shadow_apcb.aqm,
matrix_mdev->shadow_apcb.adm);
+
+ signal_guest_ap_cfg_changed(matrix_mdev);
+ }
}
static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
@@ -792,6 +801,7 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev)
if (ret)
goto err_put_vdev;
matrix_mdev->req_trigger = NULL;
+ matrix_mdev->cfg_chg_trigger = NULL;
dev_set_drvdata(&mdev->dev, matrix_mdev);
mutex_lock(&matrix_dev->mdevs_lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
@@ -2046,6 +2056,13 @@ static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count)
matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev);
+ get_update_locks_for_mdev(matrix_mdev);
+
+ if (matrix_mdev->kvm) {
+ kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ signal_guest_ap_cfg_changed(matrix_mdev);
+ }
+
if (matrix_mdev->req_trigger) {
if (!(count % 10))
dev_notice_ratelimited(dev,
@@ -2057,6 +2074,8 @@ static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count)
dev_notice(dev,
"No device request registered, blocked until released by user\n");
}
+
+ release_update_locks_for_mdev(matrix_mdev);
}
static int vfio_ap_mdev_get_device_info(unsigned long arg)
@@ -2097,6 +2116,10 @@ static ssize_t vfio_ap_get_irq_info(unsigned long arg)
info.count = 1;
info.flags = VFIO_IRQ_INFO_EVENTFD;
break;
+ case VFIO_AP_CFG_CHG_IRQ_INDEX:
+ info.count = 1;
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
+ break;
default:
return -EINVAL;
}
@@ -2160,6 +2183,39 @@ static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev,
return 0;
}
+static int vfio_ap_set_cfg_change_irq(struct ap_matrix_mdev *matrix_mdev, unsigned long arg)
+{
+ s32 fd;
+ void __user *data;
+ unsigned long minsz;
+ struct eventfd_ctx *cfg_chg_trigger;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+ data = (void __user *)(arg + minsz);
+
+ if (get_user(fd, (s32 __user *)data))
+ return -EFAULT;
+
+ if (fd == -1) {
+ if (matrix_mdev->cfg_chg_trigger)
+ eventfd_ctx_put(matrix_mdev->cfg_chg_trigger);
+ matrix_mdev->cfg_chg_trigger = NULL;
+ } else if (fd >= 0) {
+ cfg_chg_trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(cfg_chg_trigger))
+ return PTR_ERR(cfg_chg_trigger);
+
+ if (matrix_mdev->cfg_chg_trigger)
+ eventfd_ctx_put(matrix_mdev->cfg_chg_trigger);
+
+ matrix_mdev->cfg_chg_trigger = cfg_chg_trigger;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev,
unsigned long arg)
{
@@ -2175,6 +2231,8 @@ static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev,
switch (irq_set.index) {
case VFIO_AP_REQ_IRQ_INDEX:
return vfio_ap_set_request_irq(matrix_mdev, arg);
+ case VFIO_AP_CFG_CHG_IRQ_INDEX:
+ return vfio_ap_set_cfg_change_irq(matrix_mdev, arg);
default:
return -EINVAL;
}
@@ -2199,8 +2257,8 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
ret = vfio_ap_mdev_reset_queues(matrix_mdev);
break;
case VFIO_DEVICE_GET_IRQ_INFO:
- ret = vfio_ap_get_irq_info(arg);
- break;
+ ret = vfio_ap_get_irq_info(arg);
+ break;
case VFIO_DEVICE_SET_IRQS:
ret = vfio_ap_set_irqs(matrix_mdev, arg);
break;
@@ -2316,10 +2374,10 @@ int vfio_ap_mdev_register(void)
matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT;
matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT;
- matrix_dev->mdev_types[0] = &matrix_dev->mdev_type;
+ matrix_dev->mdev_types = &matrix_dev->mdev_type;
ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device,
&vfio_ap_matrix_driver,
- matrix_dev->mdev_types, 1);
+ &matrix_dev->mdev_types, 1);
if (ret)
goto err_driver;
return 0;
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index 437a161c8659..9bff666b0b35 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -53,7 +53,7 @@ struct ap_matrix_dev {
struct mutex guests_lock; /* serializes access to each KVM guest */
struct mdev_parent parent;
struct mdev_type mdev_type;
- struct mdev_type *mdev_types[1];
+ struct mdev_type *mdev_types;
};
extern struct ap_matrix_dev *matrix_dev;
@@ -105,6 +105,7 @@ struct ap_queue_table {
* @mdev: the mediated device
* @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev
* @req_trigger eventfd ctx for signaling userspace to return a device
+ * @cfg_chg_trigger eventfd ctx to signal AP config changed to userspace
* @apm_add: bitmap of APIDs added to the host's AP configuration
* @aqm_add: bitmap of APQIs added to the host's AP configuration
* @adm_add: bitmap of control domain numbers added to the host's AP
@@ -120,6 +121,7 @@ struct ap_matrix_mdev {
struct mdev_device *mdev;
struct ap_queue_table qtable;
struct eventfd_ctx *req_trigger;
+ struct eventfd_ctx *cfg_chg_trigger;
DECLARE_BITMAP(apm_add, AP_DEVICES);
DECLARE_BITMAP(aqm_add, AP_DOMAINS);
DECLARE_BITMAP(adm_add, AP_DOMAINS);
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index c61e6427384c..9eb9e3c49f81 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -2,15 +2,6 @@
menu "S/390 network device drivers"
depends on NETDEVICES && S390
-config LCS
- def_tristate m
- prompt "Lan Channel Station Interface"
- depends on CCW && NETDEVICES && ETHERNET
- help
- Select this option if you want to use LCS networking on IBM System z.
- To compile as a module, choose M. The module name is lcs.
- If you do not use LCS, choose N.
-
config CTCM
def_tristate m
prompt "CTC and MPC SNA device support"
@@ -98,7 +89,7 @@ config QETH_OSX
config CCWGROUP
tristate
- default (LCS || CTCM || QETH || SMC)
+ default (CTCM || QETH || SMC)
config ISM
tristate "Support for ISM vPCI Adapter"
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index bc55ec316adb..b5aaba290127 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_CTCM) += ctcm.o fsm.o
obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o
-obj-$(CONFIG_LCS) += lcs.o
qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_ethtool.o
obj-$(CONFIG_QETH) += qeth.o
qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 2f34761e6413..60ed70a39d2c 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -20,7 +20,6 @@
MODULE_DESCRIPTION("ISM driver for s390");
MODULE_LICENSE("GPL");
-#define PCI_DEVICE_ID_IBM_ISM 0x04ED
#define DRV_NAME "ism"
static const struct pci_device_id ism_device_table[] = {
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
deleted file mode 100644
index 88db8378325a..000000000000
--- a/drivers/s390/net/lcs.c
+++ /dev/null
@@ -1,2385 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Linux for S/390 LAN channel station device driver
- *
- * Copyright IBM Corp. 1999, 2009
- * Author(s): Original Code written by
- * DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com>
- * Rewritten by
- * Frank Pavlic <fpavlic@de.ibm.com> and
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#define KMSG_COMPONENT "lcs"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/module.h>
-#include <linux/if.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/in.h>
-#include <linux/igmp.h>
-#include <linux/delay.h>
-#include <linux/kthread.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-#include <net/ip.h>
-
-#include <asm/debug.h>
-#include <asm/idals.h>
-#include <asm/timex.h>
-#include <linux/device.h>
-#include <asm/ccwgroup.h>
-
-#include "lcs.h"
-
-
-/*
- * initialization string for output
- */
-
-static char version[] __initdata = "LCS driver";
-
-/*
- * the root device for lcs group devices
- */
-static struct device *lcs_root_dev;
-
-/*
- * Some prototypes.
- */
-static void lcs_tasklet(unsigned long);
-static void lcs_start_kernel_thread(struct work_struct *);
-static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
-#ifdef CONFIG_IP_MULTICAST
-static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
-#endif /* CONFIG_IP_MULTICAST */
-static int lcs_recovery(void *ptr);
-
-/*
- * Debug Facility Stuff
- */
-static char debug_buffer[255];
-static debug_info_t *lcs_dbf_setup;
-static debug_info_t *lcs_dbf_trace;
-
-/*
- * LCS Debug Facility functions
- */
-static void
-lcs_unregister_debug_facility(void)
-{
- debug_unregister(lcs_dbf_setup);
- debug_unregister(lcs_dbf_trace);
-}
-
-static int
-lcs_register_debug_facility(void)
-{
- lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8);
- lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8);
- if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
- pr_err("Not enough memory for debug facility.\n");
- lcs_unregister_debug_facility();
- return -ENOMEM;
- }
- debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
- debug_set_level(lcs_dbf_setup, 2);
- debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
- debug_set_level(lcs_dbf_trace, 2);
- return 0;
-}
-
-/*
- * Allocate io buffers.
- */
-static int
-lcs_alloc_channel(struct lcs_channel *channel)
-{
- int cnt;
-
- LCS_DBF_TEXT(2, setup, "ichalloc");
- for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
- /* alloc memory fo iobuffer */
- channel->iob[cnt].data =
- kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
- if (channel->iob[cnt].data == NULL)
- break;
- channel->iob[cnt].state = LCS_BUF_STATE_EMPTY;
- }
- if (cnt < LCS_NUM_BUFFS) {
- /* Not all io buffers could be allocated. */
- LCS_DBF_TEXT(2, setup, "echalloc");
- while (cnt-- > 0)
- kfree(channel->iob[cnt].data);
- return -ENOMEM;
- }
- return 0;
-}
-
-/*
- * Free io buffers.
- */
-static void
-lcs_free_channel(struct lcs_channel *channel)
-{
- int cnt;
-
- LCS_DBF_TEXT(2, setup, "ichfree");
- for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
- kfree(channel->iob[cnt].data);
- channel->iob[cnt].data = NULL;
- }
-}
-
-/*
- * Cleanup channel.
- */
-static void
-lcs_cleanup_channel(struct lcs_channel *channel)
-{
- LCS_DBF_TEXT(3, setup, "cleanch");
- /* Kill write channel tasklets. */
- tasklet_kill(&channel->irq_tasklet);
- /* Free channel buffers. */
- lcs_free_channel(channel);
-}
-
-/*
- * LCS free memory for card and channels.
- */
-static void
-lcs_free_card(struct lcs_card *card)
-{
- LCS_DBF_TEXT(2, setup, "remcard");
- LCS_DBF_HEX(2, setup, &card, sizeof(void*));
- kfree(card);
-}
-
-/*
- * LCS alloc memory for card and channels
- */
-static struct lcs_card *
-lcs_alloc_card(void)
-{
- struct lcs_card *card;
- int rc;
-
- LCS_DBF_TEXT(2, setup, "alloclcs");
-
- card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA);
- if (card == NULL)
- return NULL;
- card->lan_type = LCS_FRAME_TYPE_AUTO;
- card->pkt_seq = 0;
- card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT;
- /* Allocate io buffers for the read channel. */
- rc = lcs_alloc_channel(&card->read);
- if (rc){
- LCS_DBF_TEXT(2, setup, "iccwerr");
- lcs_free_card(card);
- return NULL;
- }
- /* Allocate io buffers for the write channel. */
- rc = lcs_alloc_channel(&card->write);
- if (rc) {
- LCS_DBF_TEXT(2, setup, "iccwerr");
- lcs_cleanup_channel(&card->read);
- lcs_free_card(card);
- return NULL;
- }
-
-#ifdef CONFIG_IP_MULTICAST
- INIT_LIST_HEAD(&card->ipm_list);
-#endif
- LCS_DBF_HEX(2, setup, &card, sizeof(void*));
- return card;
-}
-
-/*
- * Setup read channel.
- */
-static void
-lcs_setup_read_ccws(struct lcs_card *card)
-{
- int cnt;
-
- LCS_DBF_TEXT(2, setup, "ireadccw");
- /* Setup read ccws. */
- memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1));
- for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
- card->read.ccws[cnt].cmd_code = LCS_CCW_READ;
- card->read.ccws[cnt].count = LCS_IOBUFFERSIZE;
- card->read.ccws[cnt].flags =
- CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI;
- /*
- * Note: we have allocated the buffer with GFP_DMA, so
- * we do not need to do set_normalized_cda.
- */
- card->read.ccws[cnt].cda =
- virt_to_dma32(card->read.iob[cnt].data);
- ((struct lcs_header *)
- card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
- card->read.iob[cnt].callback = lcs_get_frames_cb;
- card->read.iob[cnt].state = LCS_BUF_STATE_READY;
- card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
- }
- card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
- card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI;
- card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
- /* Last ccw is a tic (transfer in channel). */
- card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
- card->read.ccws[LCS_NUM_BUFFS].cda = virt_to_dma32(card->read.ccws);
- /* Setg initial state of the read channel. */
- card->read.state = LCS_CH_STATE_INIT;
-
- card->read.io_idx = 0;
- card->read.buf_idx = 0;
-}
-
-static void
-lcs_setup_read(struct lcs_card *card)
-{
- LCS_DBF_TEXT(3, setup, "initread");
-
- lcs_setup_read_ccws(card);
- /* Initialize read channel tasklet. */
- card->read.irq_tasklet.data = (unsigned long) &card->read;
- card->read.irq_tasklet.func = lcs_tasklet;
- /* Initialize waitqueue. */
- init_waitqueue_head(&card->read.wait_q);
-}
-
-/*
- * Setup write channel.
- */
-static void
-lcs_setup_write_ccws(struct lcs_card *card)
-{
- int cnt;
-
- LCS_DBF_TEXT(3, setup, "iwritccw");
- /* Setup write ccws. */
- memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1));
- for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
- card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
- card->write.ccws[cnt].count = 0;
- card->write.ccws[cnt].flags =
- CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI;
- /*
- * Note: we have allocated the buffer with GFP_DMA, so
- * we do not need to do set_normalized_cda.
- */
- card->write.ccws[cnt].cda =
- virt_to_dma32(card->write.iob[cnt].data);
- }
- /* Last ccw is a tic (transfer in channel). */
- card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
- card->write.ccws[LCS_NUM_BUFFS].cda = virt_to_dma32(card->write.ccws);
- /* Set initial state of the write channel. */
- card->read.state = LCS_CH_STATE_INIT;
-
- card->write.io_idx = 0;
- card->write.buf_idx = 0;
-}
-
-static void
-lcs_setup_write(struct lcs_card *card)
-{
- LCS_DBF_TEXT(3, setup, "initwrit");
-
- lcs_setup_write_ccws(card);
- /* Initialize write channel tasklet. */
- card->write.irq_tasklet.data = (unsigned long) &card->write;
- card->write.irq_tasklet.func = lcs_tasklet;
- /* Initialize waitqueue. */
- init_waitqueue_head(&card->write.wait_q);
-}
-
-static void
-lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&card->mask_lock, flags);
- card->thread_allowed_mask = threads;
- spin_unlock_irqrestore(&card->mask_lock, flags);
- wake_up(&card->wait_q);
-}
-static int lcs_threads_running(struct lcs_card *card, unsigned long threads)
-{
- unsigned long flags;
- int rc = 0;
-
- spin_lock_irqsave(&card->mask_lock, flags);
- rc = (card->thread_running_mask & threads);
- spin_unlock_irqrestore(&card->mask_lock, flags);
- return rc;
-}
-
-static int
-lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
-{
- return wait_event_interruptible(card->wait_q,
- lcs_threads_running(card, threads) == 0);
-}
-
-static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&card->mask_lock, flags);
- if ( !(card->thread_allowed_mask & thread) ||
- (card->thread_start_mask & thread) ) {
- spin_unlock_irqrestore(&card->mask_lock, flags);
- return -EPERM;
- }
- card->thread_start_mask |= thread;
- spin_unlock_irqrestore(&card->mask_lock, flags);
- return 0;
-}
-
-static void
-lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&card->mask_lock, flags);
- card->thread_running_mask &= ~thread;
- spin_unlock_irqrestore(&card->mask_lock, flags);
- wake_up(&card->wait_q);
-}
-
-static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
-{
- unsigned long flags;
- int rc = 0;
-
- spin_lock_irqsave(&card->mask_lock, flags);
- if (card->thread_start_mask & thread){
- if ((card->thread_allowed_mask & thread) &&
- !(card->thread_running_mask & thread)){
- rc = 1;
- card->thread_start_mask &= ~thread;
- card->thread_running_mask |= thread;
- } else
- rc = -EPERM;
- }
- spin_unlock_irqrestore(&card->mask_lock, flags);
- return rc;
-}
-
-static int
-lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
-{
- int rc = 0;
- wait_event(card->wait_q,
- (rc = __lcs_do_run_thread(card, thread)) >= 0);
- return rc;
-}
-
-static int
-lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
-{
- unsigned long flags;
- int rc = 0;
-
- spin_lock_irqsave(&card->mask_lock, flags);
- LCS_DBF_TEXT_(4, trace, " %02x%02x%02x",
- (u8) card->thread_start_mask,
- (u8) card->thread_allowed_mask,
- (u8) card->thread_running_mask);
- rc = (card->thread_start_mask & thread);
- spin_unlock_irqrestore(&card->mask_lock, flags);
- return rc;
-}
-
-/*
- * Initialize channels,card and state machines.
- */
-static void
-lcs_setup_card(struct lcs_card *card)
-{
- LCS_DBF_TEXT(2, setup, "initcard");
- LCS_DBF_HEX(2, setup, &card, sizeof(void*));
-
- lcs_setup_read(card);
- lcs_setup_write(card);
- /* Set cards initial state. */
- card->state = DEV_STATE_DOWN;
- card->tx_buffer = NULL;
- card->tx_emitted = 0;
-
- init_waitqueue_head(&card->wait_q);
- spin_lock_init(&card->lock);
- spin_lock_init(&card->ipm_lock);
- spin_lock_init(&card->mask_lock);
-#ifdef CONFIG_IP_MULTICAST
- INIT_LIST_HEAD(&card->ipm_list);
-#endif
- INIT_LIST_HEAD(&card->lancmd_waiters);
-}
-
-static void lcs_clear_multicast_list(struct lcs_card *card)
-{
-#ifdef CONFIG_IP_MULTICAST
- struct lcs_ipm_list *ipm;
- unsigned long flags;
-
- /* Free multicast list. */
- LCS_DBF_TEXT(3, setup, "clmclist");
- spin_lock_irqsave(&card->ipm_lock, flags);
- while (!list_empty(&card->ipm_list)){
- ipm = list_entry(card->ipm_list.next,
- struct lcs_ipm_list, list);
- list_del(&ipm->list);
- if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){
- spin_unlock_irqrestore(&card->ipm_lock, flags);
- lcs_send_delipm(card, ipm);
- spin_lock_irqsave(&card->ipm_lock, flags);
- }
- kfree(ipm);
- }
- spin_unlock_irqrestore(&card->ipm_lock, flags);
-#endif
-}
-
-/*
- * Cleanup channels,card and state machines.
- */
-static void
-lcs_cleanup_card(struct lcs_card *card)
-{
-
- LCS_DBF_TEXT(3, setup, "cleancrd");
- LCS_DBF_HEX(2,setup,&card,sizeof(void*));
-
- if (card->dev != NULL)
- free_netdev(card->dev);
- /* Cleanup channels. */
- lcs_cleanup_channel(&card->write);
- lcs_cleanup_channel(&card->read);
-}
-
-/*
- * Start channel.
- */
-static int
-lcs_start_channel(struct lcs_channel *channel)
-{
- unsigned long flags;
- int rc;
-
- LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev));
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_start(channel->ccwdev,
- channel->ccws + channel->io_idx, 0, 0,
- DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
- if (rc == 0)
- channel->state = LCS_CH_STATE_RUNNING;
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
- if (rc) {
- LCS_DBF_TEXT_(4,trace,"essh%s",
- dev_name(&channel->ccwdev->dev));
- dev_err(&channel->ccwdev->dev,
- "Starting an LCS device resulted in an error,"
- " rc=%d!\n", rc);
- }
- return rc;
-}
-
-static int
-lcs_clear_channel(struct lcs_channel *channel)
-{
- unsigned long flags;
- int rc;
-
- LCS_DBF_TEXT(4,trace,"clearch");
- LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_clear(channel->ccwdev, 0);
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
- if (rc) {
- LCS_DBF_TEXT_(4, trace, "ecsc%s",
- dev_name(&channel->ccwdev->dev));
- return rc;
- }
- wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
- channel->state = LCS_CH_STATE_STOPPED;
- return rc;
-}
-
-
-/*
- * Stop channel.
- */
-static int
-lcs_stop_channel(struct lcs_channel *channel)
-{
- unsigned long flags;
- int rc;
-
- if (channel->state == LCS_CH_STATE_STOPPED)
- return 0;
- LCS_DBF_TEXT(4,trace,"haltsch");
- LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
- channel->state = LCS_CH_STATE_INIT;
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_halt(channel->ccwdev, 0);
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
- if (rc) {
- LCS_DBF_TEXT_(4, trace, "ehsc%s",
- dev_name(&channel->ccwdev->dev));
- return rc;
- }
- /* Asynchronous halt initialted. Wait for its completion. */
- wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED));
- lcs_clear_channel(channel);
- return 0;
-}
-
-/*
- * start read and write channel
- */
-static int
-lcs_start_channels(struct lcs_card *card)
-{
- int rc;
-
- LCS_DBF_TEXT(2, trace, "chstart");
- /* start read channel */
- rc = lcs_start_channel(&card->read);
- if (rc)
- return rc;
- /* start write channel */
- rc = lcs_start_channel(&card->write);
- if (rc)
- lcs_stop_channel(&card->read);
- return rc;
-}
-
-/*
- * stop read and write channel
- */
-static int
-lcs_stop_channels(struct lcs_card *card)
-{
- LCS_DBF_TEXT(2, trace, "chhalt");
- lcs_stop_channel(&card->read);
- lcs_stop_channel(&card->write);
- return 0;
-}
-
-/*
- * Get empty buffer.
- */
-static struct lcs_buffer *
-__lcs_get_buffer(struct lcs_channel *channel)
-{
- int index;
-
- LCS_DBF_TEXT(5, trace, "_getbuff");
- index = channel->io_idx;
- do {
- if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) {
- channel->iob[index].state = LCS_BUF_STATE_LOCKED;
- return channel->iob + index;
- }
- index = (index + 1) & (LCS_NUM_BUFFS - 1);
- } while (index != channel->io_idx);
- return NULL;
-}
-
-static struct lcs_buffer *
-lcs_get_buffer(struct lcs_channel *channel)
-{
- struct lcs_buffer *buffer;
- unsigned long flags;
-
- LCS_DBF_TEXT(5, trace, "getbuff");
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- buffer = __lcs_get_buffer(channel);
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
- return buffer;
-}
-
-/*
- * Resume channel program if the channel is suspended.
- */
-static int
-__lcs_resume_channel(struct lcs_channel *channel)
-{
- int rc;
-
- if (channel->state != LCS_CH_STATE_SUSPENDED)
- return 0;
- if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
- return 0;
- LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev));
- rc = ccw_device_resume(channel->ccwdev);
- if (rc) {
- LCS_DBF_TEXT_(4, trace, "ersc%s",
- dev_name(&channel->ccwdev->dev));
- dev_err(&channel->ccwdev->dev,
- "Sending data from the LCS device to the LAN failed"
- " with rc=%d\n",rc);
- } else
- channel->state = LCS_CH_STATE_RUNNING;
- return rc;
-
-}
-
-/*
- * Make a buffer ready for processing.
- */
-static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
-{
- int prev, next;
-
- LCS_DBF_TEXT(5, trace, "rdybits");
- prev = (index - 1) & (LCS_NUM_BUFFS - 1);
- next = (index + 1) & (LCS_NUM_BUFFS - 1);
- /* Check if we may clear the suspend bit of this buffer. */
- if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) {
- /* Check if we have to set the PCI bit. */
- if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND))
- /* Suspend bit of the previous buffer is not set. */
- channel->ccws[index].flags |= CCW_FLAG_PCI;
- /* Suspend bit of the next buffer is set. */
- channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND;
- }
-}
-
-static int
-lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
-{
- unsigned long flags;
- int index, rc;
-
- LCS_DBF_TEXT(5, trace, "rdybuff");
- BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
- buffer->state != LCS_BUF_STATE_PROCESSED);
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- buffer->state = LCS_BUF_STATE_READY;
- index = buffer - channel->iob;
- /* Set length. */
- channel->ccws[index].count = buffer->count;
- /* Check relevant PCI/suspend bits. */
- __lcs_ready_buffer_bits(channel, index);
- rc = __lcs_resume_channel(channel);
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
- return rc;
-}
-
-/*
- * Mark the buffer as processed. Take care of the suspend bit
- * of the previous buffer. This function is called from
- * interrupt context, so the lock must not be taken.
- */
-static int
-__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
-{
- int index, prev, next;
-
- LCS_DBF_TEXT(5, trace, "prcsbuff");
- BUG_ON(buffer->state != LCS_BUF_STATE_READY);
- buffer->state = LCS_BUF_STATE_PROCESSED;
- index = buffer - channel->iob;
- prev = (index - 1) & (LCS_NUM_BUFFS - 1);
- next = (index + 1) & (LCS_NUM_BUFFS - 1);
- /* Set the suspend bit and clear the PCI bit of this buffer. */
- channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
- channel->ccws[index].flags &= ~CCW_FLAG_PCI;
- /* Check the suspend bit of the previous buffer. */
- if (channel->iob[prev].state == LCS_BUF_STATE_READY) {
- /*
- * Previous buffer is in state ready. It might have
- * happened in lcs_ready_buffer that the suspend bit
- * has not been cleared to avoid an endless loop.
- * Do it now.
- */
- __lcs_ready_buffer_bits(channel, prev);
- }
- /* Clear PCI bit of next buffer. */
- channel->ccws[next].flags &= ~CCW_FLAG_PCI;
- return __lcs_resume_channel(channel);
-}
-
-/*
- * Put a processed buffer back to state empty.
- */
-static void
-lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
-{
- unsigned long flags;
-
- LCS_DBF_TEXT(5, trace, "relbuff");
- BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
- buffer->state != LCS_BUF_STATE_PROCESSED);
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- buffer->state = LCS_BUF_STATE_EMPTY;
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
-}
-
-/*
- * Get buffer for a lan command.
- */
-static struct lcs_buffer *
-lcs_get_lancmd(struct lcs_card *card, int count)
-{
- struct lcs_buffer *buffer;
- struct lcs_cmd *cmd;
-
- LCS_DBF_TEXT(4, trace, "getlncmd");
- /* Get buffer and wait if none is available. */
- wait_event(card->write.wait_q,
- ((buffer = lcs_get_buffer(&card->write)) != NULL));
- count += sizeof(struct lcs_header);
- *(__u16 *)(buffer->data + count) = 0;
- buffer->count = count + sizeof(__u16);
- buffer->callback = lcs_release_buffer;
- cmd = (struct lcs_cmd *) buffer->data;
- cmd->offset = count;
- cmd->type = LCS_FRAME_TYPE_CONTROL;
- cmd->slot = 0;
- return buffer;
-}
-
-
-static void
-lcs_get_reply(struct lcs_reply *reply)
-{
- refcount_inc(&reply->refcnt);
-}
-
-static void
-lcs_put_reply(struct lcs_reply *reply)
-{
- if (refcount_dec_and_test(&reply->refcnt))
- kfree(reply);
-}
-
-static struct lcs_reply *
-lcs_alloc_reply(struct lcs_cmd *cmd)
-{
- struct lcs_reply *reply;
-
- LCS_DBF_TEXT(4, trace, "getreply");
-
- reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
- if (!reply)
- return NULL;
- refcount_set(&reply->refcnt, 1);
- reply->sequence_no = cmd->sequence_no;
- reply->received = 0;
- reply->rc = 0;
- init_waitqueue_head(&reply->wait_q);
-
- return reply;
-}
-
-/*
- * Notifier function for lancmd replies. Called from read irq.
- */
-static void
-lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
-{
- struct list_head *l, *n;
- struct lcs_reply *reply;
-
- LCS_DBF_TEXT(4, trace, "notiwait");
- spin_lock(&card->lock);
- list_for_each_safe(l, n, &card->lancmd_waiters) {
- reply = list_entry(l, struct lcs_reply, list);
- if (reply->sequence_no == cmd->sequence_no) {
- lcs_get_reply(reply);
- list_del_init(&reply->list);
- if (reply->callback != NULL)
- reply->callback(card, cmd);
- reply->received = 1;
- reply->rc = cmd->return_code;
- wake_up(&reply->wait_q);
- lcs_put_reply(reply);
- break;
- }
- }
- spin_unlock(&card->lock);
-}
-
-/*
- * Emit buffer of a lan command.
- */
-static void
-lcs_lancmd_timeout(struct timer_list *t)
-{
- struct lcs_reply *reply = from_timer(reply, t, timer);
- struct lcs_reply *list_reply, *r;
- unsigned long flags;
-
- LCS_DBF_TEXT(4, trace, "timeout");
- spin_lock_irqsave(&reply->card->lock, flags);
- list_for_each_entry_safe(list_reply, r,
- &reply->card->lancmd_waiters,list) {
- if (reply == list_reply) {
- lcs_get_reply(reply);
- list_del_init(&reply->list);
- spin_unlock_irqrestore(&reply->card->lock, flags);
- reply->received = 1;
- reply->rc = -ETIME;
- wake_up(&reply->wait_q);
- lcs_put_reply(reply);
- return;
- }
- }
- spin_unlock_irqrestore(&reply->card->lock, flags);
-}
-
-static int
-lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
- void (*reply_callback)(struct lcs_card *, struct lcs_cmd *))
-{
- struct lcs_reply *reply;
- struct lcs_cmd *cmd;
- unsigned long flags;
- int rc;
-
- LCS_DBF_TEXT(4, trace, "sendcmd");
- cmd = (struct lcs_cmd *) buffer->data;
- cmd->return_code = 0;
- cmd->sequence_no = card->sequence_no++;
- reply = lcs_alloc_reply(cmd);
- if (!reply)
- return -ENOMEM;
- reply->callback = reply_callback;
- reply->card = card;
- spin_lock_irqsave(&card->lock, flags);
- list_add_tail(&reply->list, &card->lancmd_waiters);
- spin_unlock_irqrestore(&card->lock, flags);
-
- buffer->callback = lcs_release_buffer;
- rc = lcs_ready_buffer(&card->write, buffer);
- if (rc)
- return rc;
- timer_setup(&reply->timer, lcs_lancmd_timeout, 0);
- mod_timer(&reply->timer, jiffies + HZ * card->lancmd_timeout);
- wait_event(reply->wait_q, reply->received);
- del_timer_sync(&reply->timer);
- LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
- rc = reply->rc;
- lcs_put_reply(reply);
- return rc ? -EIO : 0;
-}
-
-/*
- * LCS startup command
- */
-static int
-lcs_send_startup(struct lcs_card *card, __u8 initiator)
-{
- struct lcs_buffer *buffer;
- struct lcs_cmd *cmd;
-
- LCS_DBF_TEXT(2, trace, "startup");
- buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
- cmd = (struct lcs_cmd *) buffer->data;
- cmd->cmd_code = LCS_CMD_STARTUP;
- cmd->initiator = initiator;
- cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE;
- return lcs_send_lancmd(card, buffer, NULL);
-}
-
-/*
- * LCS shutdown command
- */
-static int
-lcs_send_shutdown(struct lcs_card *card)
-{
- struct lcs_buffer *buffer;
- struct lcs_cmd *cmd;
-
- LCS_DBF_TEXT(2, trace, "shutdown");
- buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
- cmd = (struct lcs_cmd *) buffer->data;
- cmd->cmd_code = LCS_CMD_SHUTDOWN;
- cmd->initiator = LCS_INITIATOR_TCPIP;
- return lcs_send_lancmd(card, buffer, NULL);
-}
-
-/*
- * LCS lanstat command
- */
-static void
-__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd)
-{
- LCS_DBF_TEXT(2, trace, "statcb");
- memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH);
-}
-
-static int
-lcs_send_lanstat(struct lcs_card *card)
-{
- struct lcs_buffer *buffer;
- struct lcs_cmd *cmd;
-
- LCS_DBF_TEXT(2,trace, "cmdstat");
- buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
- cmd = (struct lcs_cmd *) buffer->data;
- /* Setup lanstat command. */
- cmd->cmd_code = LCS_CMD_LANSTAT;
- cmd->initiator = LCS_INITIATOR_TCPIP;
- cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
- cmd->cmd.lcs_std_cmd.portno = card->portno;
- return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
-}
-
-/*
- * send stoplan command
- */
-static int
-lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
-{
- struct lcs_buffer *buffer;
- struct lcs_cmd *cmd;
-
- LCS_DBF_TEXT(2, trace, "cmdstpln");
- buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
- cmd = (struct lcs_cmd *) buffer->data;
- cmd->cmd_code = LCS_CMD_STOPLAN;
- cmd->initiator = initiator;
- cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
- cmd->cmd.lcs_std_cmd.portno = card->portno;
- return lcs_send_lancmd(card, buffer, NULL);
-}
-
-/*
- * send startlan command
- */
-static void
-__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd)
-{
- LCS_DBF_TEXT(2, trace, "srtlancb");
- card->lan_type = cmd->cmd.lcs_std_cmd.lan_type;
- card->portno = cmd->cmd.lcs_std_cmd.portno;
-}
-
-static int
-lcs_send_startlan(struct lcs_card *card, __u8 initiator)
-{
- struct lcs_buffer *buffer;
- struct lcs_cmd *cmd;
-
- LCS_DBF_TEXT(2, trace, "cmdstaln");
- buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
- cmd = (struct lcs_cmd *) buffer->data;
- cmd->cmd_code = LCS_CMD_STARTLAN;
- cmd->initiator = initiator;
- cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
- cmd->cmd.lcs_std_cmd.portno = card->portno;
- return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb);
-}
-
-#ifdef CONFIG_IP_MULTICAST
-/*
- * send setipm command (Multicast)
- */
-static int
-lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
-{
- struct lcs_buffer *buffer;
- struct lcs_cmd *cmd;
-
- LCS_DBF_TEXT(2, trace, "cmdsetim");
- buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
- cmd = (struct lcs_cmd *) buffer->data;
- cmd->cmd_code = LCS_CMD_SETIPM;
- cmd->initiator = LCS_INITIATOR_TCPIP;
- cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
- cmd->cmd.lcs_qipassist.portno = card->portno;
- cmd->cmd.lcs_qipassist.version = 4;
- cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
- memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
- &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
- LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
- return lcs_send_lancmd(card, buffer, NULL);
-}
-
-/*
- * send delipm command (Multicast)
- */
-static int
-lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
-{
- struct lcs_buffer *buffer;
- struct lcs_cmd *cmd;
-
- LCS_DBF_TEXT(2, trace, "cmddelim");
- buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
- cmd = (struct lcs_cmd *) buffer->data;
- cmd->cmd_code = LCS_CMD_DELIPM;
- cmd->initiator = LCS_INITIATOR_TCPIP;
- cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
- cmd->cmd.lcs_qipassist.portno = card->portno;
- cmd->cmd.lcs_qipassist.version = 4;
- cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
- memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
- &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
- LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
- return lcs_send_lancmd(card, buffer, NULL);
-}
-
-/*
- * check if multicast is supported by LCS
- */
-static void
-__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd)
-{
- LCS_DBF_TEXT(2, trace, "chkmccb");
- card->ip_assists_supported =
- cmd->cmd.lcs_qipassist.ip_assists_supported;
- card->ip_assists_enabled =
- cmd->cmd.lcs_qipassist.ip_assists_enabled;
-}
-
-static int
-lcs_check_multicast_support(struct lcs_card *card)
-{
- struct lcs_buffer *buffer;
- struct lcs_cmd *cmd;
- int rc;
-
- LCS_DBF_TEXT(2, trace, "cmdqipa");
- /* Send query ipassist. */
- buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
- cmd = (struct lcs_cmd *) buffer->data;
- cmd->cmd_code = LCS_CMD_QIPASSIST;
- cmd->initiator = LCS_INITIATOR_TCPIP;
- cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
- cmd->cmd.lcs_qipassist.portno = card->portno;
- cmd->cmd.lcs_qipassist.version = 4;
- cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
- rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
- if (rc != 0) {
- pr_err("Query IPAssist failed. Assuming unsupported!\n");
- return -EOPNOTSUPP;
- }
- if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
- return 0;
- return -EOPNOTSUPP;
-}
-
-/*
- * set or del multicast address on LCS card
- */
-static void
-lcs_fix_multicast_list(struct lcs_card *card)
-{
- struct list_head failed_list;
- struct lcs_ipm_list *ipm, *tmp;
- unsigned long flags;
- int rc;
-
- LCS_DBF_TEXT(4,trace, "fixipm");
- INIT_LIST_HEAD(&failed_list);
- spin_lock_irqsave(&card->ipm_lock, flags);
-list_modified:
- list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){
- switch (ipm->ipm_state) {
- case LCS_IPM_STATE_SET_REQUIRED:
- /* del from ipm_list so no one else can tamper with
- * this entry */
- list_del_init(&ipm->list);
- spin_unlock_irqrestore(&card->ipm_lock, flags);
- rc = lcs_send_setipm(card, ipm);
- spin_lock_irqsave(&card->ipm_lock, flags);
- if (rc) {
- pr_info("Adding multicast address failed."
- " Table possibly full!\n");
- /* store ipm in failed list -> will be added
- * to ipm_list again, so a retry will be done
- * during the next call of this function */
- list_add_tail(&ipm->list, &failed_list);
- } else {
- ipm->ipm_state = LCS_IPM_STATE_ON_CARD;
- /* re-insert into ipm_list */
- list_add_tail(&ipm->list, &card->ipm_list);
- }
- goto list_modified;
- case LCS_IPM_STATE_DEL_REQUIRED:
- list_del(&ipm->list);
- spin_unlock_irqrestore(&card->ipm_lock, flags);
- lcs_send_delipm(card, ipm);
- spin_lock_irqsave(&card->ipm_lock, flags);
- kfree(ipm);
- goto list_modified;
- case LCS_IPM_STATE_ON_CARD:
- break;
- }
- }
- /* re-insert all entries from the failed_list into ipm_list */
- list_for_each_entry_safe(ipm, tmp, &failed_list, list)
- list_move_tail(&ipm->list, &card->ipm_list);
-
- spin_unlock_irqrestore(&card->ipm_lock, flags);
-}
-
-/*
- * get mac address for the relevant Multicast address
- */
-static void
-lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
-{
- LCS_DBF_TEXT(4,trace, "getmac");
- ip_eth_mc_map(ipm, mac);
-}
-
-/*
- * function called by net device to handle multicast address relevant things
- */
-static void lcs_remove_mc_addresses(struct lcs_card *card,
- struct in_device *in4_dev)
-{
- struct ip_mc_list *im4;
- struct list_head *l;
- struct lcs_ipm_list *ipm;
- unsigned long flags;
- char buf[MAX_ADDR_LEN];
-
- LCS_DBF_TEXT(4, trace, "remmclst");
- spin_lock_irqsave(&card->ipm_lock, flags);
- list_for_each(l, &card->ipm_list) {
- ipm = list_entry(l, struct lcs_ipm_list, list);
- for (im4 = rcu_dereference(in4_dev->mc_list);
- im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
- lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
- if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
- (memcmp(buf, &ipm->ipm.mac_addr,
- LCS_MAC_LENGTH) == 0) )
- break;
- }
- if (im4 == NULL)
- ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED;
- }
- spin_unlock_irqrestore(&card->ipm_lock, flags);
-}
-
-static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card,
- struct ip_mc_list *im4,
- char *buf)
-{
- struct lcs_ipm_list *tmp, *ipm = NULL;
- struct list_head *l;
- unsigned long flags;
-
- LCS_DBF_TEXT(4, trace, "chkmcent");
- spin_lock_irqsave(&card->ipm_lock, flags);
- list_for_each(l, &card->ipm_list) {
- tmp = list_entry(l, struct lcs_ipm_list, list);
- if ( (tmp->ipm.ip_addr == im4->multiaddr) &&
- (memcmp(buf, &tmp->ipm.mac_addr,
- LCS_MAC_LENGTH) == 0) ) {
- ipm = tmp;
- break;
- }
- }
- spin_unlock_irqrestore(&card->ipm_lock, flags);
- return ipm;
-}
-
-static void lcs_set_mc_addresses(struct lcs_card *card,
- struct in_device *in4_dev)
-{
-
- struct ip_mc_list *im4;
- struct lcs_ipm_list *ipm;
- char buf[MAX_ADDR_LEN];
- unsigned long flags;
-
- LCS_DBF_TEXT(4, trace, "setmclst");
- for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
- im4 = rcu_dereference(im4->next_rcu)) {
- lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
- ipm = lcs_check_addr_entry(card, im4, buf);
- if (ipm != NULL)
- continue; /* Address already in list. */
- ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
- if (ipm == NULL) {
- pr_info("Not enough memory to add"
- " new multicast entry!\n");
- break;
- }
- memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
- ipm->ipm.ip_addr = im4->multiaddr;
- ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED;
- spin_lock_irqsave(&card->ipm_lock, flags);
- LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4);
- list_add(&ipm->list, &card->ipm_list);
- spin_unlock_irqrestore(&card->ipm_lock, flags);
- }
-}
-
-static int
-lcs_register_mc_addresses(void *data)
-{
- struct lcs_card *card;
- struct in_device *in4_dev;
-
- card = (struct lcs_card *) data;
-
- if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
- return 0;
- LCS_DBF_TEXT(4, trace, "regmulti");
-
- in4_dev = in_dev_get(card->dev);
- if (in4_dev == NULL)
- goto out;
- rcu_read_lock();
- lcs_remove_mc_addresses(card,in4_dev);
- lcs_set_mc_addresses(card, in4_dev);
- rcu_read_unlock();
- in_dev_put(in4_dev);
-
- netif_carrier_off(card->dev);
- netif_tx_disable(card->dev);
- wait_event(card->write.wait_q,
- (card->write.state != LCS_CH_STATE_RUNNING));
- lcs_fix_multicast_list(card);
- if (card->state == DEV_STATE_UP) {
- netif_carrier_on(card->dev);
- netif_wake_queue(card->dev);
- }
-out:
- lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
- return 0;
-}
-#endif /* CONFIG_IP_MULTICAST */
-
-/*
- * function called by net device to
- * handle multicast address relevant things
- */
-static void
-lcs_set_multicast_list(struct net_device *dev)
-{
-#ifdef CONFIG_IP_MULTICAST
- struct lcs_card *card;
-
- LCS_DBF_TEXT(4, trace, "setmulti");
- card = (struct lcs_card *) dev->ml_priv;
-
- if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
- schedule_work(&card->kernel_thread_starter);
-#endif /* CONFIG_IP_MULTICAST */
-}
-
-static long
-lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
-{
- if (!IS_ERR(irb))
- return 0;
-
- switch (PTR_ERR(irb)) {
- case -EIO:
- dev_warn(&cdev->dev,
- "An I/O-error occurred on the LCS device\n");
- LCS_DBF_TEXT(2, trace, "ckirberr");
- LCS_DBF_TEXT_(2, trace, " rc%d", -EIO);
- break;
- case -ETIMEDOUT:
- dev_warn(&cdev->dev,
- "A command timed out on the LCS device\n");
- LCS_DBF_TEXT(2, trace, "ckirberr");
- LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT);
- break;
- default:
- dev_warn(&cdev->dev,
- "An error occurred on the LCS device, rc=%ld\n",
- PTR_ERR(irb));
- LCS_DBF_TEXT(2, trace, "ckirberr");
- LCS_DBF_TEXT(2, trace, " rc???");
- }
- return PTR_ERR(irb);
-}
-
-static int
-lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
-{
- int dstat, cstat;
- char *sense;
-
- sense = (char *) irb->ecw;
- cstat = irb->scsw.cmd.cstat;
- dstat = irb->scsw.cmd.dstat;
-
- if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
- SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
- SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
- LCS_DBF_TEXT(2, trace, "CGENCHK");
- return 1;
- }
- if (dstat & DEV_STAT_UNIT_CHECK) {
- if (sense[LCS_SENSE_BYTE_1] &
- LCS_SENSE_RESETTING_EVENT) {
- LCS_DBF_TEXT(2, trace, "REVIND");
- return 1;
- }
- if (sense[LCS_SENSE_BYTE_0] &
- LCS_SENSE_CMD_REJECT) {
- LCS_DBF_TEXT(2, trace, "CMDREJ");
- return 0;
- }
- if ((!sense[LCS_SENSE_BYTE_0]) &&
- (!sense[LCS_SENSE_BYTE_1]) &&
- (!sense[LCS_SENSE_BYTE_2]) &&
- (!sense[LCS_SENSE_BYTE_3])) {
- LCS_DBF_TEXT(2, trace, "ZEROSEN");
- return 0;
- }
- LCS_DBF_TEXT(2, trace, "DGENCHK");
- return 1;
- }
- return 0;
-}
-
-static void
-lcs_schedule_recovery(struct lcs_card *card)
-{
- LCS_DBF_TEXT(2, trace, "startrec");
- if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD))
- schedule_work(&card->kernel_thread_starter);
-}
-
-/*
- * IRQ Handler for LCS channels
- */
-static void
-lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
-{
- struct lcs_card *card;
- struct lcs_channel *channel;
- int rc, index;
- int cstat, dstat;
-
- if (lcs_check_irb_error(cdev, irb))
- return;
-
- card = CARD_FROM_DEV(cdev);
- if (card->read.ccwdev == cdev)
- channel = &card->read;
- else
- channel = &card->write;
-
- cstat = irb->scsw.cmd.cstat;
- dstat = irb->scsw.cmd.dstat;
- LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev));
- LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
- irb->scsw.cmd.dstat);
- LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
- irb->scsw.cmd.actl);
-
- /* Check for channel and device errors presented */
- rc = lcs_get_problem(cdev, irb);
- if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) {
- dev_warn(&cdev->dev,
- "The LCS device stopped because of an error,"
- " dstat=0x%X, cstat=0x%X \n",
- dstat, cstat);
- if (rc) {
- channel->state = LCS_CH_STATE_ERROR;
- }
- }
- if (channel->state == LCS_CH_STATE_ERROR) {
- lcs_schedule_recovery(card);
- wake_up(&card->wait_q);
- return;
- }
- /* How far in the ccw chain have we processed? */
- if ((channel->state != LCS_CH_STATE_INIT) &&
- (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
- (irb->scsw.cmd.cpa != 0)) {
- index = (struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)
- - channel->ccws;
- if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
- (irb->scsw.cmd.cstat & SCHN_STAT_PCI))
- /* Bloody io subsystem tells us lies about cpa... */
- index = (index - 1) & (LCS_NUM_BUFFS - 1);
- while (channel->io_idx != index) {
- __lcs_processed_buffer(channel,
- channel->iob + channel->io_idx);
- channel->io_idx =
- (channel->io_idx + 1) & (LCS_NUM_BUFFS - 1);
- }
- }
-
- if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) ||
- (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) ||
- (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK))
- /* Mark channel as stopped. */
- channel->state = LCS_CH_STATE_STOPPED;
- else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)
- /* CCW execution stopped on a suspend bit. */
- channel->state = LCS_CH_STATE_SUSPENDED;
- if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
- if (irb->scsw.cmd.cc != 0) {
- ccw_device_halt(channel->ccwdev, 0);
- return;
- }
- /* The channel has been stopped by halt_IO. */
- channel->state = LCS_CH_STATE_HALTED;
- }
- if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
- channel->state = LCS_CH_STATE_CLEARED;
- /* Do the rest in the tasklet. */
- tasklet_schedule(&channel->irq_tasklet);
-}
-
-/*
- * Tasklet for IRQ handler
- */
-static void
-lcs_tasklet(unsigned long data)
-{
- unsigned long flags;
- struct lcs_channel *channel;
- struct lcs_buffer *iob;
- int buf_idx;
-
- channel = (struct lcs_channel *) data;
- LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev));
-
- /* Check for processed buffers. */
- iob = channel->iob;
- buf_idx = channel->buf_idx;
- while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) {
- /* Do the callback thing. */
- if (iob[buf_idx].callback != NULL)
- iob[buf_idx].callback(channel, iob + buf_idx);
- buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1);
- }
- channel->buf_idx = buf_idx;
-
- if (channel->state == LCS_CH_STATE_STOPPED)
- lcs_start_channel(channel);
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- if (channel->state == LCS_CH_STATE_SUSPENDED &&
- channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY)
- __lcs_resume_channel(channel);
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
-
- /* Something happened on the channel. Wake up waiters. */
- wake_up(&channel->wait_q);
-}
-
-/*
- * Finish current tx buffer and make it ready for transmit.
- */
-static void
-__lcs_emit_txbuffer(struct lcs_card *card)
-{
- LCS_DBF_TEXT(5, trace, "emittx");
- *(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0;
- card->tx_buffer->count += 2;
- lcs_ready_buffer(&card->write, card->tx_buffer);
- card->tx_buffer = NULL;
- card->tx_emitted++;
-}
-
-/*
- * Callback for finished tx buffers.
- */
-static void
-lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
-{
- struct lcs_card *card;
-
- LCS_DBF_TEXT(5, trace, "txbuffcb");
- /* Put buffer back to pool. */
- lcs_release_buffer(channel, buffer);
- card = container_of(channel, struct lcs_card, write);
- if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev))
- netif_wake_queue(card->dev);
- spin_lock(&card->lock);
- card->tx_emitted--;
- if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
- /*
- * Last running tx buffer has finished. Submit partially
- * filled current buffer.
- */
- __lcs_emit_txbuffer(card);
- spin_unlock(&card->lock);
-}
-
-/*
- * Packet transmit function called by network stack
- */
-static netdev_tx_t __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
- struct net_device *dev)
-{
- struct lcs_header *header;
- int rc = NETDEV_TX_OK;
-
- LCS_DBF_TEXT(5, trace, "hardxmit");
- if (skb == NULL) {
- card->stats.tx_dropped++;
- card->stats.tx_errors++;
- return NETDEV_TX_OK;
- }
- if (card->state != DEV_STATE_UP) {
- dev_kfree_skb(skb);
- card->stats.tx_dropped++;
- card->stats.tx_errors++;
- card->stats.tx_carrier_errors++;
- return NETDEV_TX_OK;
- }
- if (skb->protocol == htons(ETH_P_IPV6)) {
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- netif_stop_queue(card->dev);
- spin_lock(&card->lock);
- if (card->tx_buffer != NULL &&
- card->tx_buffer->count + sizeof(struct lcs_header) +
- skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
- /* skb too big for current tx buffer. */
- __lcs_emit_txbuffer(card);
- if (card->tx_buffer == NULL) {
- /* Get new tx buffer */
- card->tx_buffer = lcs_get_buffer(&card->write);
- if (card->tx_buffer == NULL) {
- card->stats.tx_dropped++;
- rc = NETDEV_TX_BUSY;
- goto out;
- }
- card->tx_buffer->callback = lcs_txbuffer_cb;
- card->tx_buffer->count = 0;
- }
- header = (struct lcs_header *)
- (card->tx_buffer->data + card->tx_buffer->count);
- card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
- header->offset = card->tx_buffer->count;
- header->type = card->lan_type;
- header->slot = card->portno;
- skb_copy_from_linear_data(skb, header + 1, skb->len);
- spin_unlock(&card->lock);
- card->stats.tx_bytes += skb->len;
- card->stats.tx_packets++;
- dev_kfree_skb(skb);
- netif_wake_queue(card->dev);
- spin_lock(&card->lock);
- if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
- /* If this is the first tx buffer emit it immediately. */
- __lcs_emit_txbuffer(card);
-out:
- spin_unlock(&card->lock);
- return rc;
-}
-
-static netdev_tx_t lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct lcs_card *card;
- int rc;
-
- LCS_DBF_TEXT(5, trace, "pktxmit");
- card = (struct lcs_card *) dev->ml_priv;
- rc = __lcs_start_xmit(card, skb, dev);
- return rc;
-}
-
-/*
- * send startlan and lanstat command to make LCS device ready
- */
-static int
-lcs_startlan_auto(struct lcs_card *card)
-{
- int rc;
-
- LCS_DBF_TEXT(2, trace, "strtauto");
- card->lan_type = LCS_FRAME_TYPE_ENET;
- rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
- if (rc == 0)
- return 0;
-
- return -EIO;
-}
-
-static int
-lcs_startlan(struct lcs_card *card)
-{
- int rc, i;
-
- LCS_DBF_TEXT(2, trace, "startlan");
- rc = 0;
- if (card->portno != LCS_INVALID_PORT_NO) {
- if (card->lan_type == LCS_FRAME_TYPE_AUTO)
- rc = lcs_startlan_auto(card);
- else
- rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
- } else {
- for (i = 0; i <= 16; i++) {
- card->portno = i;
- if (card->lan_type != LCS_FRAME_TYPE_AUTO)
- rc = lcs_send_startlan(card,
- LCS_INITIATOR_TCPIP);
- else
- /* autodetecting lan type */
- rc = lcs_startlan_auto(card);
- if (rc == 0)
- break;
- }
- }
- if (rc == 0)
- return lcs_send_lanstat(card);
- return rc;
-}
-
-/*
- * LCS detect function
- * setup channels and make them I/O ready
- */
-static int
-lcs_detect(struct lcs_card *card)
-{
- int rc = 0;
-
- LCS_DBF_TEXT(2, setup, "lcsdetct");
- /* start/reset card */
- if (card->dev)
- netif_stop_queue(card->dev);
- rc = lcs_stop_channels(card);
- if (rc == 0) {
- rc = lcs_start_channels(card);
- if (rc == 0) {
- rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP);
- if (rc == 0)
- rc = lcs_startlan(card);
- }
- }
- if (rc == 0) {
- card->state = DEV_STATE_UP;
- } else {
- card->state = DEV_STATE_DOWN;
- card->write.state = LCS_CH_STATE_INIT;
- card->read.state = LCS_CH_STATE_INIT;
- }
- return rc;
-}
-
-/*
- * LCS Stop card
- */
-static int
-lcs_stopcard(struct lcs_card *card)
-{
- int rc;
-
- LCS_DBF_TEXT(3, setup, "stopcard");
-
- if (card->read.state != LCS_CH_STATE_STOPPED &&
- card->write.state != LCS_CH_STATE_STOPPED &&
- card->read.state != LCS_CH_STATE_ERROR &&
- card->write.state != LCS_CH_STATE_ERROR &&
- card->state == DEV_STATE_UP) {
- lcs_clear_multicast_list(card);
- rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
- rc = lcs_send_shutdown(card);
- }
- rc = lcs_stop_channels(card);
- card->state = DEV_STATE_DOWN;
-
- return rc;
-}
-
-/*
- * Kernel Thread helper functions for LGW initiated commands
- */
-static void
-lcs_start_kernel_thread(struct work_struct *work)
-{
- struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
- LCS_DBF_TEXT(5, trace, "krnthrd");
- if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
- kthread_run(lcs_recovery, card, "lcs_recover");
-#ifdef CONFIG_IP_MULTICAST
- if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
- kthread_run(lcs_register_mc_addresses, card, "regipm");
-#endif
-}
-
-/*
- * Process control frames.
- */
-static void
-lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
-{
- LCS_DBF_TEXT(5, trace, "getctrl");
- if (cmd->initiator == LCS_INITIATOR_LGW) {
- switch(cmd->cmd_code) {
- case LCS_CMD_STARTUP:
- case LCS_CMD_STARTLAN:
- lcs_schedule_recovery(card);
- break;
- case LCS_CMD_STOPLAN:
- if (card->dev) {
- pr_warn("Stoplan for %s initiated by LGW\n",
- card->dev->name);
- netif_carrier_off(card->dev);
- }
- break;
- default:
- LCS_DBF_TEXT(5, trace, "noLGWcmd");
- break;
- }
- } else
- lcs_notify_lancmd_waiters(card, cmd);
-}
-
-/*
- * Unpack network packet.
- */
-static void
-lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
-{
- struct sk_buff *skb;
-
- LCS_DBF_TEXT(5, trace, "getskb");
- if (card->dev == NULL ||
- card->state != DEV_STATE_UP)
- /* The card isn't up. Ignore the packet. */
- return;
-
- skb = dev_alloc_skb(skb_len);
- if (skb == NULL) {
- dev_err(&card->dev->dev,
- " Allocating a socket buffer to interface %s failed\n",
- card->dev->name);
- card->stats.rx_dropped++;
- return;
- }
- skb_put_data(skb, skb_data, skb_len);
- skb->protocol = card->lan_type_trans(skb, card->dev);
- card->stats.rx_bytes += skb_len;
- card->stats.rx_packets++;
- if (skb->protocol == htons(ETH_P_802_2))
- *((__u32 *)skb->cb) = ++card->pkt_seq;
- netif_rx(skb);
-}
-
-/*
- * LCS main routine to get packets and lancmd replies from the buffers
- */
-static void
-lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
-{
- struct lcs_card *card;
- struct lcs_header *lcs_hdr;
- __u16 offset;
-
- LCS_DBF_TEXT(5, trace, "lcsgtpkt");
- lcs_hdr = (struct lcs_header *) buffer->data;
- if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) {
- LCS_DBF_TEXT(4, trace, "-eiogpkt");
- return;
- }
- card = container_of(channel, struct lcs_card, read);
- offset = 0;
- while (lcs_hdr->offset != 0) {
- if (lcs_hdr->offset <= 0 ||
- lcs_hdr->offset > LCS_IOBUFFERSIZE ||
- lcs_hdr->offset < offset) {
- /* Offset invalid. */
- card->stats.rx_length_errors++;
- card->stats.rx_errors++;
- return;
- }
- if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
- lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
- else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET)
- lcs_get_skb(card, (char *)(lcs_hdr + 1),
- lcs_hdr->offset - offset -
- sizeof(struct lcs_header));
- else
- dev_info_once(&card->dev->dev,
- "Unknown frame type %d\n",
- lcs_hdr->type);
- offset = lcs_hdr->offset;
- lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
- lcs_hdr = (struct lcs_header *) (buffer->data + offset);
- }
- /* The buffer is now empty. Make it ready again. */
- lcs_ready_buffer(&card->read, buffer);
-}
-
-/*
- * get network statistics for ifconfig and other user programs
- */
-static struct net_device_stats *
-lcs_getstats(struct net_device *dev)
-{
- struct lcs_card *card;
-
- LCS_DBF_TEXT(4, trace, "netstats");
- card = (struct lcs_card *) dev->ml_priv;
- return &card->stats;
-}
-
-/*
- * stop lcs device
- * This function will be called by user doing ifconfig xxx down
- */
-static int
-lcs_stop_device(struct net_device *dev)
-{
- struct lcs_card *card;
- int rc;
-
- LCS_DBF_TEXT(2, trace, "stopdev");
- card = (struct lcs_card *) dev->ml_priv;
- netif_carrier_off(dev);
- netif_tx_disable(dev);
- dev->flags &= ~IFF_UP;
- wait_event(card->write.wait_q,
- (card->write.state != LCS_CH_STATE_RUNNING));
- rc = lcs_stopcard(card);
- if (rc)
- dev_err(&card->dev->dev,
- " Shutting down the LCS device failed\n");
- return rc;
-}
-
-/*
- * start lcs device and make it runnable
- * This function will be called by user doing ifconfig xxx up
- */
-static int
-lcs_open_device(struct net_device *dev)
-{
- struct lcs_card *card;
- int rc;
-
- LCS_DBF_TEXT(2, trace, "opendev");
- card = (struct lcs_card *) dev->ml_priv;
- /* initialize statistics */
- rc = lcs_detect(card);
- if (rc) {
- pr_err("Error in opening device!\n");
-
- } else {
- dev->flags |= IFF_UP;
- netif_carrier_on(dev);
- netif_wake_queue(dev);
- card->state = DEV_STATE_UP;
- }
- return rc;
-}
-
-/*
- * show function for portno called by cat or similar things
- */
-static ssize_t
-lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct lcs_card *card;
-
- card = dev_get_drvdata(dev);
-
- if (!card)
- return 0;
-
- return sysfs_emit(buf, "%d\n", card->portno);
-}
-
-/*
- * store the value which is piped to file portno
- */
-static ssize_t
-lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct lcs_card *card;
- int rc;
- s16 value;
-
- card = dev_get_drvdata(dev);
-
- if (!card)
- return 0;
-
- rc = kstrtos16(buf, 0, &value);
- if (rc)
- return -EINVAL;
- /* TODO: sanity checks */
- card->portno = value;
- if (card->dev)
- card->dev->dev_port = card->portno;
-
- return count;
-
-}
-
-static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
-
-static const char *lcs_type[] = {
- "not a channel",
- "2216 parallel",
- "2216 channel",
- "OSA LCS card",
- "unknown channel type",
- "unsupported channel type",
-};
-
-static ssize_t
-lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct ccwgroup_device *cgdev;
-
- cgdev = to_ccwgroupdev(dev);
- if (!cgdev)
- return -ENODEV;
-
- return sysfs_emit(buf, "%s\n",
- lcs_type[cgdev->cdev[0]->id.driver_info]);
-}
-
-static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
-
-static ssize_t
-lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct lcs_card *card;
-
- card = dev_get_drvdata(dev);
-
- return card ? sysfs_emit(buf, "%u\n", card->lancmd_timeout) : 0;
-}
-
-static ssize_t
-lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct lcs_card *card;
- unsigned int value;
- int rc;
-
- card = dev_get_drvdata(dev);
-
- if (!card)
- return 0;
-
- rc = kstrtouint(buf, 0, &value);
- if (rc)
- return -EINVAL;
- /* TODO: sanity checks */
- card->lancmd_timeout = value;
-
- return count;
-
-}
-
-static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
-
-static ssize_t
-lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct lcs_card *card = dev_get_drvdata(dev);
- char *tmp;
- int i;
-
- if (!card)
- return -EINVAL;
- if (card->state != DEV_STATE_UP)
- return -EPERM;
- i = simple_strtoul(buf, &tmp, 16);
- if (i == 1)
- lcs_schedule_recovery(card);
- return count;
-}
-
-static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store);
-
-static struct attribute * lcs_attrs[] = {
- &dev_attr_portno.attr,
- &dev_attr_type.attr,
- &dev_attr_lancmd_timeout.attr,
- &dev_attr_recover.attr,
- NULL,
-};
-static struct attribute_group lcs_attr_group = {
- .attrs = lcs_attrs,
-};
-static const struct attribute_group *lcs_attr_groups[] = {
- &lcs_attr_group,
- NULL,
-};
-static const struct device_type lcs_devtype = {
- .name = "lcs",
- .groups = lcs_attr_groups,
-};
-
-/*
- * lcs_probe_device is called on establishing a new ccwgroup_device.
- */
-static int
-lcs_probe_device(struct ccwgroup_device *ccwgdev)
-{
- struct lcs_card *card;
-
- if (!get_device(&ccwgdev->dev))
- return -ENODEV;
-
- LCS_DBF_TEXT(2, setup, "add_dev");
- card = lcs_alloc_card();
- if (!card) {
- LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM);
- put_device(&ccwgdev->dev);
- return -ENOMEM;
- }
- dev_set_drvdata(&ccwgdev->dev, card);
- ccwgdev->cdev[0]->handler = lcs_irq;
- ccwgdev->cdev[1]->handler = lcs_irq;
- card->gdev = ccwgdev;
- INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread);
- card->thread_start_mask = 0;
- card->thread_allowed_mask = 0;
- card->thread_running_mask = 0;
- ccwgdev->dev.type = &lcs_devtype;
-
- return 0;
-}
-
-static int
-lcs_register_netdev(struct ccwgroup_device *ccwgdev)
-{
- struct lcs_card *card;
-
- LCS_DBF_TEXT(2, setup, "regnetdv");
- card = dev_get_drvdata(&ccwgdev->dev);
- if (card->dev->reg_state != NETREG_UNINITIALIZED)
- return 0;
- SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
- return register_netdev(card->dev);
-}
-
-/*
- * lcs_new_device will be called by setting the group device online.
- */
-static const struct net_device_ops lcs_netdev_ops = {
- .ndo_open = lcs_open_device,
- .ndo_stop = lcs_stop_device,
- .ndo_get_stats = lcs_getstats,
- .ndo_start_xmit = lcs_start_xmit,
-};
-
-static const struct net_device_ops lcs_mc_netdev_ops = {
- .ndo_open = lcs_open_device,
- .ndo_stop = lcs_stop_device,
- .ndo_get_stats = lcs_getstats,
- .ndo_start_xmit = lcs_start_xmit,
- .ndo_set_rx_mode = lcs_set_multicast_list,
-};
-
-static int
-lcs_new_device(struct ccwgroup_device *ccwgdev)
-{
- struct lcs_card *card;
- struct net_device *dev=NULL;
- enum lcs_dev_states recover_state;
- int rc;
-
- card = dev_get_drvdata(&ccwgdev->dev);
- if (!card)
- return -ENODEV;
-
- LCS_DBF_TEXT(2, setup, "newdev");
- LCS_DBF_HEX(3, setup, &card, sizeof(void*));
- card->read.ccwdev = ccwgdev->cdev[0];
- card->write.ccwdev = ccwgdev->cdev[1];
-
- recover_state = card->state;
- rc = ccw_device_set_online(card->read.ccwdev);
- if (rc)
- goto out_err;
- rc = ccw_device_set_online(card->write.ccwdev);
- if (rc)
- goto out_werr;
-
- LCS_DBF_TEXT(3, setup, "lcsnewdv");
-
- lcs_setup_card(card);
- rc = lcs_detect(card);
- if (rc) {
- LCS_DBF_TEXT(2, setup, "dtctfail");
- dev_err(&ccwgdev->dev,
- "Detecting a network adapter for LCS devices"
- " failed with rc=%d (0x%x)\n", rc, rc);
- lcs_stopcard(card);
- goto out;
- }
- if (card->dev) {
- LCS_DBF_TEXT(2, setup, "samedev");
- LCS_DBF_HEX(3, setup, &card, sizeof(void*));
- goto netdev_out;
- }
- switch (card->lan_type) {
- case LCS_FRAME_TYPE_ENET:
- card->lan_type_trans = eth_type_trans;
- dev = alloc_etherdev(0);
- break;
- default:
- LCS_DBF_TEXT(3, setup, "errinit");
- pr_err(" Initialization failed\n");
- goto out;
- }
- if (!dev)
- goto out;
- card->dev = dev;
- card->dev->ml_priv = card;
- card->dev->netdev_ops = &lcs_netdev_ops;
- card->dev->dev_port = card->portno;
- eth_hw_addr_set(card->dev, card->mac);
-#ifdef CONFIG_IP_MULTICAST
- if (!lcs_check_multicast_support(card))
- card->dev->netdev_ops = &lcs_mc_netdev_ops;
-#endif
-netdev_out:
- lcs_set_allowed_threads(card,0xffffffff);
- if (recover_state == DEV_STATE_RECOVER) {
- lcs_set_multicast_list(card->dev);
- card->dev->flags |= IFF_UP;
- netif_carrier_on(card->dev);
- netif_wake_queue(card->dev);
- card->state = DEV_STATE_UP;
- } else {
- lcs_stopcard(card);
- }
-
- if (lcs_register_netdev(ccwgdev) != 0)
- goto out;
-
- /* Print out supported assists: IPv6 */
- pr_info("LCS device %s %s IPv6 support\n", card->dev->name,
- (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
- "with" : "without");
- /* Print out supported assist: Multicast */
- pr_info("LCS device %s %s Multicast support\n", card->dev->name,
- (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
- "with" : "without");
- return 0;
-out:
-
- ccw_device_set_offline(card->write.ccwdev);
-out_werr:
- ccw_device_set_offline(card->read.ccwdev);
-out_err:
- return -ENODEV;
-}
-
-/*
- * lcs_shutdown_device, called when setting the group device offline.
- */
-static int
-__lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
-{
- struct lcs_card *card;
- enum lcs_dev_states recover_state;
- int ret = 0, ret2 = 0, ret3 = 0;
-
- LCS_DBF_TEXT(3, setup, "shtdndev");
- card = dev_get_drvdata(&ccwgdev->dev);
- if (!card)
- return -ENODEV;
- if (recovery_mode == 0) {
- lcs_set_allowed_threads(card, 0);
- if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD))
- return -ERESTARTSYS;
- }
- LCS_DBF_HEX(3, setup, &card, sizeof(void*));
- recover_state = card->state;
-
- ret = lcs_stop_device(card->dev);
- ret2 = ccw_device_set_offline(card->read.ccwdev);
- ret3 = ccw_device_set_offline(card->write.ccwdev);
- if (!ret)
- ret = (ret2) ? ret2 : ret3;
- if (ret)
- LCS_DBF_TEXT_(3, setup, "1err:%d", ret);
- if (recover_state == DEV_STATE_UP) {
- card->state = DEV_STATE_RECOVER;
- }
- return 0;
-}
-
-static int
-lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
-{
- return __lcs_shutdown_device(ccwgdev, 0);
-}
-
-/*
- * drive lcs recovery after startup and startlan initiated by Lan Gateway
- */
-static int
-lcs_recovery(void *ptr)
-{
- struct lcs_card *card;
- struct ccwgroup_device *gdev;
- int rc;
-
- card = (struct lcs_card *) ptr;
-
- LCS_DBF_TEXT(4, trace, "recover1");
- if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD))
- return 0;
- LCS_DBF_TEXT(4, trace, "recover2");
- gdev = card->gdev;
- dev_warn(&gdev->dev,
- "A recovery process has been started for the LCS device\n");
- rc = __lcs_shutdown_device(gdev, 1);
- rc = lcs_new_device(gdev);
- if (!rc)
- pr_info("Device %s successfully recovered!\n",
- card->dev->name);
- else
- pr_info("Device %s could not be recovered!\n",
- card->dev->name);
- lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD);
- return 0;
-}
-
-/*
- * lcs_remove_device, free buffers and card
- */
-static void
-lcs_remove_device(struct ccwgroup_device *ccwgdev)
-{
- struct lcs_card *card;
-
- card = dev_get_drvdata(&ccwgdev->dev);
- if (!card)
- return;
-
- LCS_DBF_TEXT(3, setup, "remdev");
- LCS_DBF_HEX(3, setup, &card, sizeof(void*));
- if (ccwgdev->state == CCWGROUP_ONLINE) {
- lcs_shutdown_device(ccwgdev);
- }
- if (card->dev)
- unregister_netdev(card->dev);
- lcs_cleanup_card(card);
- lcs_free_card(card);
- dev_set_drvdata(&ccwgdev->dev, NULL);
- put_device(&ccwgdev->dev);
-}
-
-static struct ccw_device_id lcs_ids[] = {
- {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
- {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
- {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
- {},
-};
-MODULE_DEVICE_TABLE(ccw, lcs_ids);
-
-static struct ccw_driver lcs_ccw_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "lcs",
- },
- .ids = lcs_ids,
- .probe = ccwgroup_probe_ccwdev,
- .remove = ccwgroup_remove_ccwdev,
- .int_class = IRQIO_LCS,
-};
-
-/*
- * LCS ccwgroup driver registration
- */
-static struct ccwgroup_driver lcs_group_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "lcs",
- },
- .ccw_driver = &lcs_ccw_driver,
- .setup = lcs_probe_device,
- .remove = lcs_remove_device,
- .set_online = lcs_new_device,
- .set_offline = lcs_shutdown_device,
-};
-
-static ssize_t group_store(struct device_driver *ddrv, const char *buf,
- size_t count)
-{
- int err;
- err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf);
- return err ? err : count;
-}
-static DRIVER_ATTR_WO(group);
-
-static struct attribute *lcs_drv_attrs[] = {
- &driver_attr_group.attr,
- NULL,
-};
-static struct attribute_group lcs_drv_attr_group = {
- .attrs = lcs_drv_attrs,
-};
-static const struct attribute_group *lcs_drv_attr_groups[] = {
- &lcs_drv_attr_group,
- NULL,
-};
-
-/*
- * LCS Module/Kernel initialization function
- */
-static int
-__init lcs_init_module(void)
-{
- int rc;
-
- pr_info("Loading %s\n", version);
- rc = lcs_register_debug_facility();
- LCS_DBF_TEXT(0, setup, "lcsinit");
- if (rc)
- goto out_err;
- lcs_root_dev = root_device_register("lcs");
- rc = PTR_ERR_OR_ZERO(lcs_root_dev);
- if (rc)
- goto register_err;
- rc = ccw_driver_register(&lcs_ccw_driver);
- if (rc)
- goto ccw_err;
- lcs_group_driver.driver.groups = lcs_drv_attr_groups;
- rc = ccwgroup_driver_register(&lcs_group_driver);
- if (rc)
- goto ccwgroup_err;
- return 0;
-
-ccwgroup_err:
- ccw_driver_unregister(&lcs_ccw_driver);
-ccw_err:
- root_device_unregister(lcs_root_dev);
-register_err:
- lcs_unregister_debug_facility();
-out_err:
- pr_err("Initializing the lcs device driver failed\n");
- return rc;
-}
-
-
-/*
- * LCS module cleanup function
- */
-static void
-__exit lcs_cleanup_module(void)
-{
- pr_info("Terminating lcs module.\n");
- LCS_DBF_TEXT(0, trace, "cleanup");
- ccwgroup_driver_unregister(&lcs_group_driver);
- ccw_driver_unregister(&lcs_ccw_driver);
- root_device_unregister(lcs_root_dev);
- lcs_unregister_debug_facility();
-}
-
-module_init(lcs_init_module);
-module_exit(lcs_cleanup_module);
-
-MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>");
-MODULE_DESCRIPTION("S/390 LAN channel station device driver");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
deleted file mode 100644
index a2699b70b050..000000000000
--- a/drivers/s390/net/lcs.h
+++ /dev/null
@@ -1,342 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*lcs.h*/
-
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-#include <linux/refcount.h>
-#include <asm/ccwdev.h>
-
-#define LCS_DBF_TEXT(level, name, text) \
- do { \
- debug_text_event(lcs_dbf_##name, level, text); \
- } while (0)
-
-#define LCS_DBF_HEX(level,name,addr,len) \
-do { \
- debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
-} while (0)
-
-#define LCS_DBF_TEXT_(level,name,text...) \
- do { \
- if (debug_level_enabled(lcs_dbf_##name, level)) { \
- scnprintf(debug_buffer, sizeof(debug_buffer), text); \
- debug_text_event(lcs_dbf_##name, level, debug_buffer); \
- } \
- } while (0)
-
-/**
- * sysfs related stuff
- */
-#define CARD_FROM_DEV(cdev) \
- (struct lcs_card *) dev_get_drvdata( \
- &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
-
-/**
- * Enum for classifying detected devices.
- */
-enum lcs_channel_types {
- /* Device is not a channel */
- lcs_channel_type_none,
-
- /* Device is a 2216 channel */
- lcs_channel_type_parallel,
-
- /* Device is a 2216 channel */
- lcs_channel_type_2216,
-
- /* Device is a OSA2 card */
- lcs_channel_type_osa2
-};
-
-/**
- * CCW commands used in this driver
- */
-#define LCS_CCW_WRITE 0x01
-#define LCS_CCW_READ 0x02
-#define LCS_CCW_TRANSFER 0x08
-
-/**
- * LCS device status primitives
- */
-#define LCS_CMD_STARTLAN 0x01
-#define LCS_CMD_STOPLAN 0x02
-#define LCS_CMD_LANSTAT 0x04
-#define LCS_CMD_STARTUP 0x07
-#define LCS_CMD_SHUTDOWN 0x08
-#define LCS_CMD_QIPASSIST 0xb2
-#define LCS_CMD_SETIPM 0xb4
-#define LCS_CMD_DELIPM 0xb5
-
-#define LCS_INITIATOR_TCPIP 0x00
-#define LCS_INITIATOR_LGW 0x01
-#define LCS_STD_CMD_SIZE 16
-#define LCS_MULTICAST_CMD_SIZE 404
-
-/**
- * LCS IPASSIST MASKS,only used when multicast is switched on
- */
-/* Not supported by LCS */
-#define LCS_IPASS_ARP_PROCESSING 0x0001
-#define LCS_IPASS_IN_CHECKSUM_SUPPORT 0x0002
-#define LCS_IPASS_OUT_CHECKSUM_SUPPORT 0x0004
-#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
-#define LCS_IPASS_IP_FILTERING 0x0010
-/* Supported by lcs 3172 */
-#define LCS_IPASS_IPV6_SUPPORT 0x0020
-#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
-
-/**
- * LCS sense byte definitions
- */
-#define LCS_SENSE_BYTE_0 0
-#define LCS_SENSE_BYTE_1 1
-#define LCS_SENSE_BYTE_2 2
-#define LCS_SENSE_BYTE_3 3
-#define LCS_SENSE_INTERFACE_DISCONNECT 0x01
-#define LCS_SENSE_EQUIPMENT_CHECK 0x10
-#define LCS_SENSE_BUS_OUT_CHECK 0x20
-#define LCS_SENSE_INTERVENTION_REQUIRED 0x40
-#define LCS_SENSE_CMD_REJECT 0x80
-#define LCS_SENSE_RESETTING_EVENT 0x80
-#define LCS_SENSE_DEVICE_ONLINE 0x20
-
-/**
- * LCS packet type definitions
- */
-#define LCS_FRAME_TYPE_CONTROL 0
-#define LCS_FRAME_TYPE_ENET 1
-#define LCS_FRAME_TYPE_TR 2
-#define LCS_FRAME_TYPE_FDDI 7
-#define LCS_FRAME_TYPE_AUTO -1
-
-/**
- * some more definitions,we will sort them later
- */
-#define LCS_ILLEGAL_OFFSET 0xffff
-#define LCS_IOBUFFERSIZE 0x5000
-#define LCS_NUM_BUFFS 32 /* needs to be power of 2 */
-#define LCS_MAC_LENGTH 6
-#define LCS_INVALID_PORT_NO -1
-#define LCS_LANCMD_TIMEOUT_DEFAULT 5
-
-/**
- * Multicast state
- */
-#define LCS_IPM_STATE_SET_REQUIRED 0
-#define LCS_IPM_STATE_DEL_REQUIRED 1
-#define LCS_IPM_STATE_ON_CARD 2
-
-/**
- * LCS IP Assist declarations
- * seems to be only used for multicast
- */
-#define LCS_IPASS_ARP_PROCESSING 0x0001
-#define LCS_IPASS_INBOUND_CSUM_SUPP 0x0002
-#define LCS_IPASS_OUTBOUND_CSUM_SUPP 0x0004
-#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
-#define LCS_IPASS_IP_FILTERING 0x0010
-#define LCS_IPASS_IPV6_SUPPORT 0x0020
-#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
-
-/**
- * LCS Buffer states
- */
-enum lcs_buffer_states {
- LCS_BUF_STATE_EMPTY, /* buffer is empty */
- LCS_BUF_STATE_LOCKED, /* buffer is locked, don't touch */
- LCS_BUF_STATE_READY, /* buffer is ready for read/write */
- LCS_BUF_STATE_PROCESSED,
-};
-
-/**
- * LCS Channel State Machine declarations
- */
-enum lcs_channel_states {
- LCS_CH_STATE_INIT,
- LCS_CH_STATE_HALTED,
- LCS_CH_STATE_STOPPED,
- LCS_CH_STATE_RUNNING,
- LCS_CH_STATE_SUSPENDED,
- LCS_CH_STATE_CLEARED,
- LCS_CH_STATE_ERROR,
-};
-
-/**
- * LCS device state machine
- */
-enum lcs_dev_states {
- DEV_STATE_DOWN,
- DEV_STATE_UP,
- DEV_STATE_RECOVER,
-};
-
-enum lcs_threads {
- LCS_SET_MC_THREAD = 1,
- LCS_RECOVERY_THREAD = 2,
-};
-
-/**
- * LCS struct declarations
- */
-struct lcs_header {
- __u16 offset;
- __u8 type;
- __u8 slot;
-} __attribute__ ((packed));
-
-struct lcs_ip_mac_pair {
- __be32 ip_addr;
- __u8 mac_addr[LCS_MAC_LENGTH];
- __u8 reserved[2];
-} __attribute__ ((packed));
-
-struct lcs_ipm_list {
- struct list_head list;
- struct lcs_ip_mac_pair ipm;
- __u8 ipm_state;
-};
-
-struct lcs_cmd {
- __u16 offset;
- __u8 type;
- __u8 slot;
- __u8 cmd_code;
- __u8 initiator;
- __u16 sequence_no;
- __u16 return_code;
- union {
- struct {
- __u8 lan_type;
- __u8 portno;
- __u16 parameter_count;
- __u8 operator_flags[3];
- __u8 reserved[3];
- } lcs_std_cmd;
- struct {
- __u16 unused1;
- __u16 buff_size;
- __u8 unused2[6];
- } lcs_startup;
- struct {
- __u8 lan_type;
- __u8 portno;
- __u8 unused[10];
- __u8 mac_addr[LCS_MAC_LENGTH];
- __u32 num_packets_deblocked;
- __u32 num_packets_blocked;
- __u32 num_packets_tx_on_lan;
- __u32 num_tx_errors_detected;
- __u32 num_tx_packets_disgarded;
- __u32 num_packets_rx_from_lan;
- __u32 num_rx_errors_detected;
- __u32 num_rx_discarded_nobuffs_avail;
- __u32 num_rx_packets_too_large;
- } lcs_lanstat_cmd;
-#ifdef CONFIG_IP_MULTICAST
- struct {
- __u8 lan_type;
- __u8 portno;
- __u16 num_ip_pairs;
- __u16 ip_assists_supported;
- __u16 ip_assists_enabled;
- __u16 version;
- struct {
- struct lcs_ip_mac_pair
- ip_mac_pair[32];
- __u32 response_data;
- } lcs_ipass_ctlmsg __attribute ((packed));
- } lcs_qipassist __attribute__ ((packed));
-#endif /*CONFIG_IP_MULTICAST */
- } cmd __attribute__ ((packed));
-} __attribute__ ((packed));
-
-/**
- * Forward declarations.
- */
-struct lcs_card;
-struct lcs_channel;
-
-/**
- * Definition of an lcs buffer.
- */
-struct lcs_buffer {
- enum lcs_buffer_states state;
- void *data;
- int count;
- /* Callback for completion notification. */
- void (*callback)(struct lcs_channel *, struct lcs_buffer *);
-};
-
-struct lcs_reply {
- struct list_head list;
- __u16 sequence_no;
- refcount_t refcnt;
- /* Callback for completion notification. */
- void (*callback)(struct lcs_card *, struct lcs_cmd *);
- wait_queue_head_t wait_q;
- struct lcs_card *card;
- struct timer_list timer;
- int received;
- int rc;
-};
-
-/**
- * Definition of an lcs channel
- */
-struct lcs_channel {
- enum lcs_channel_states state;
- struct ccw_device *ccwdev;
- struct ccw1 ccws[LCS_NUM_BUFFS + 1];
- wait_queue_head_t wait_q;
- struct tasklet_struct irq_tasklet;
- struct lcs_buffer iob[LCS_NUM_BUFFS];
- int io_idx;
- int buf_idx;
-};
-
-
-/**
- * definition of the lcs card
- */
-struct lcs_card {
- spinlock_t lock;
- spinlock_t ipm_lock;
- enum lcs_dev_states state;
- struct net_device *dev;
- struct net_device_stats stats;
- __be16 (*lan_type_trans)(struct sk_buff *skb,
- struct net_device *dev);
- struct ccwgroup_device *gdev;
- struct lcs_channel read;
- struct lcs_channel write;
- struct lcs_buffer *tx_buffer;
- int tx_emitted;
- struct list_head lancmd_waiters;
- int lancmd_timeout;
-
- struct work_struct kernel_thread_starter;
- spinlock_t mask_lock;
- unsigned long thread_start_mask;
- unsigned long thread_running_mask;
- unsigned long thread_allowed_mask;
- wait_queue_head_t wait_q;
-
-#ifdef CONFIG_IP_MULTICAST
- struct list_head ipm_list;
-#endif
- __u8 mac[LCS_MAC_LENGTH];
- __u16 ip_assists_supported;
- __u16 ip_assists_enabled;
- __s8 lan_type;
- __u32 pkt_seq;
- __u16 sequence_no;
- __s16 portno;
- /* Some info copied from probeinfo */
- u8 device_forced;
- u8 max_port_no;
- u8 hint_port_no;
- s16 port_protocol_no;
-} __attribute__ ((aligned(8)));
-
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 75910c0bcc2b..777404d66e0c 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -22,6 +22,7 @@
#include <linux/hash.h>
#include <linux/hashtable.h>
#include <net/switchdev.h>
+#include <asm/machine.h>
#include <asm/chsc.h>
#include <asm/css_chars.h>
#include <asm/setup.h>
@@ -299,7 +300,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "l2reqmac");
- if (MACHINE_IS_VM) {
+ if (machine_is_vm()) {
rc = qeth_vm_request_mac(card);
if (!rc)
goto out;
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index c84ec2fbf99b..c68ba8dbc014 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <net/iucv/iucv.h>
+#include <asm/machine.h>
#include <asm/cpcmd.h>
#include <asm/ebcdic.h>
#include "smsgiucv.h"
@@ -138,7 +139,7 @@ static int __init smsg_init(void)
{
int rc;
- if (!MACHINE_IS_VM) {
+ if (!machine_is_vm()) {
rc = -EPROTONOSUPPORT;
goto out;
}
diff --git a/drivers/s390/net/smsgiucv_app.c b/drivers/s390/net/smsgiucv_app.c
index cc44cbb0028b..4bd4d6bfc126 100644
--- a/drivers/s390/net/smsgiucv_app.c
+++ b/drivers/s390/net/smsgiucv_app.c
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <net/iucv/iucv.h>
+#include <asm/machine.h>
#include "smsgiucv.h"
/* prefix used for SMSG registration */
@@ -153,7 +154,7 @@ static int __init smsgiucv_app_init(void)
struct device_driver *smsgiucv_drv;
int rc;
- if (!MACHINE_IS_VM)
+ if (!machine_is_vm())
return -ENODEV;
smsgiucv_drv = driver_find(SMSGIUCV_DRV_NAME, &iucv_bus);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index ab2f35bc294d..22074e81bd38 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -41,7 +41,7 @@
#define ZFCP_BUS_ID_SIZE 20
-MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com");
+MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("FCP HBA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 37c24ffea65c..5a3c670aec27 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -303,9 +303,9 @@ if SCSI_LOWLEVEL && SCSI
config ISCSI_TCP
tristate "iSCSI Initiator over TCP/IP"
depends on SCSI && INET
+ select CRC32
select CRYPTO
select CRYPTO_MD5
- select CRYPTO_CRC32C
select SCSI_ISCSI_ATTRS
help
The iSCSI Driver provides a host with the ability to access storage
@@ -336,7 +336,6 @@ source "drivers/scsi/cxgbi/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
source "drivers/scsi/bnx2fc/Kconfig"
source "drivers/scsi/be2iscsi/Kconfig"
-source "drivers/scsi/cxlflash/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1313ddf2fd1a..16de3e41f94c 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -96,7 +96,6 @@ obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
-obj-$(CONFIG_CXLFLASH) += cxlflash/
obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index abf6a82b74af..0be719f38377 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -3221,8 +3221,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
break;
}
fallthrough;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 91170a67cc91..4b12e6dd8f07 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -2029,7 +2029,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
}
-static struct pci_error_handlers aac_pci_err_handler = {
+static const struct pci_error_handlers aac_pci_err_handler = {
.error_detected = aac_pci_error_detected,
.mmio_enabled = aac_pci_mmio_enabled,
.slot_reset = aac_pci_slot_reset,
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index e50a3dbf9de3..ef21b85cf014 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -591,7 +591,7 @@ datadir_t acornscsi_datadirection(int command)
case CHANGE_DEFINITION: case COMPARE: case COPY:
case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
- case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE_6:
case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
case WRITE_6: case WRITE_10: case WRITE_VERIFY:
case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 76a1e373386e..a8b399ed98fc 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5776,7 +5776,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
}
-static struct pci_error_handlers beiscsi_eeh_handlers = {
+static const struct pci_error_handlers beiscsi_eeh_handlers = {
.error_detected = beiscsi_eeh_err_detected,
.slot_reset = beiscsi_eeh_reset,
.resume = beiscsi_eeh_resume,
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 6aa1d3a7e24b..f015c53de0d4 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1642,7 +1642,7 @@ MODULE_DEVICE_TABLE(pci, bfad_id_table);
/*
* PCI error recovery handlers.
*/
-static struct pci_error_handlers bfad_err_handler = {
+static const struct pci_error_handlers bfad_err_handler = {
.error_detected = bfad_pci_error_detected,
.slot_reset = bfad_pci_slot_reset,
.mmio_enabled = bfad_pci_mmio_enabled,
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 9a3f2ed050bd..79c8dafdd49e 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1162,7 +1162,7 @@ err_resume_exit:
dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
}
-static struct pci_error_handlers csio_err_handler = {
+static const struct pci_error_handlers csio_err_handler = {
.error_detected = csio_pci_error_detected,
.slot_reset = csio_pci_slot_reset,
.resume = csio_pci_resume,
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
deleted file mode 100644
index c424d36e89a6..000000000000
--- a/drivers/scsi/cxlflash/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# IBM CXL-attached Flash Accelerator SCSI Driver
-#
-
-config CXLFLASH
- tristate "Support for IBM CAPI Flash (DEPRECATED)"
- depends on PCI && SCSI && (CXL || OCXL) && EEH
- select IRQ_POLL
- help
- The cxlflash driver is deprecated and will be removed in a future
- kernel release.
-
- Allows CAPI Accelerated IO to Flash
- If unsure, say N.
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
deleted file mode 100644
index fd2f0dd9daf9..000000000000
--- a/drivers/scsi/cxlflash/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CXLFLASH) += cxlflash.o
-cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
-cxlflash-$(CONFIG_CXL) += cxl_hw.o
-cxlflash-$(CONFIG_OCXL) += ocxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
deleted file mode 100644
index 181e0445ed42..000000000000
--- a/drivers/scsi/cxlflash/backend.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#ifndef _CXLFLASH_BACKEND_H
-#define _CXLFLASH_BACKEND_H
-
-extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
-extern const struct cxlflash_backend_ops cxlflash_ocxl_ops;
-
-struct cxlflash_backend_ops {
- struct module *module;
- void __iomem * (*psa_map)(void *ctx_cookie);
- void (*psa_unmap)(void __iomem *addr);
- int (*process_element)(void *ctx_cookie);
- int (*map_afu_irq)(void *ctx_cookie, int num, irq_handler_t handler,
- void *cookie, char *name);
- void (*unmap_afu_irq)(void *ctx_cookie, int num, void *cookie);
- u64 (*get_irq_objhndl)(void *ctx_cookie, int irq);
- int (*start_context)(void *ctx_cookie);
- int (*stop_context)(void *ctx_cookie);
- int (*afu_reset)(void *ctx_cookie);
- void (*set_master)(void *ctx_cookie);
- void * (*get_context)(struct pci_dev *dev, void *afu_cookie);
- void * (*dev_context_init)(struct pci_dev *dev, void *afu_cookie);
- int (*release_context)(void *ctx_cookie);
- void (*perst_reloads_same_image)(void *afu_cookie, bool image);
- ssize_t (*read_adapter_vpd)(struct pci_dev *dev, void *buf,
- size_t count);
- int (*allocate_afu_irqs)(void *ctx_cookie, int num);
- void (*free_afu_irqs)(void *ctx_cookie);
- void * (*create_afu)(struct pci_dev *dev);
- void (*destroy_afu)(void *afu_cookie);
- struct file * (*get_fd)(void *ctx_cookie, struct file_operations *fops,
- int *fd);
- void * (*fops_get_context)(struct file *file);
- int (*start_work)(void *ctx_cookie, u64 irqs);
- int (*fd_mmap)(struct file *file, struct vm_area_struct *vm);
- int (*fd_release)(struct inode *inode, struct file *file);
-};
-
-#endif /* _CXLFLASH_BACKEND_H */
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
deleted file mode 100644
index de6229e27b48..000000000000
--- a/drivers/scsi/cxlflash/common.h
+++ /dev/null
@@ -1,340 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_COMMON_H
-#define _CXLFLASH_COMMON_H
-
-#include <linux/async.h>
-#include <linux/cdev.h>
-#include <linux/irq_poll.h>
-#include <linux/list.h>
-#include <linux/rwsem.h>
-#include <linux/types.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-
-#include "backend.h"
-
-extern const struct file_operations cxlflash_cxl_fops;
-
-#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
-#define MAX_FC_PORTS CXLFLASH_MAX_FC_PORTS /* max ports per AFU */
-#define LEGACY_FC_PORTS 2 /* legacy ports per AFU */
-
-#define CHAN2PORTBANK(_x) ((_x) >> ilog2(CXLFLASH_NUM_FC_PORTS_PER_BANK))
-#define CHAN2BANKPORT(_x) ((_x) & (CXLFLASH_NUM_FC_PORTS_PER_BANK - 1))
-
-#define CHAN2PORTMASK(_x) (1 << (_x)) /* channel to port mask */
-#define PORTMASK2CHAN(_x) (ilog2((_x))) /* port mask to channel */
-#define PORTNUM2CHAN(_x) ((_x) - 1) /* port number to channel */
-
-#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */
-#define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */
-#define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants
- * max_sectors
- * in units of
- * 512 byte
- * sectors
- */
-
-#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
-
-/* AFU command retry limit */
-#define MC_RETRY_CNT 5 /* Sufficient for SCSI and certain AFU errors */
-
-/* Command management definitions */
-#define CXLFLASH_MAX_CMDS 256
-#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
-
-/* RRQ for master issued cmds */
-#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS
-
-/* SQ for master issued cmds */
-#define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS
-
-/* Hardware queue definitions */
-#define CXLFLASH_DEF_HWQS 1
-#define CXLFLASH_MAX_HWQS 8
-#define PRIMARY_HWQ 0
-
-
-static inline void check_sizes(void)
-{
- BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_FC_PORTS_PER_BANK);
- BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_MAX_CMDS);
-}
-
-/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
-#define CMD_BUFSIZE SIZE_4K
-
-enum cxlflash_lr_state {
- LINK_RESET_INVALID,
- LINK_RESET_REQUIRED,
- LINK_RESET_COMPLETE
-};
-
-enum cxlflash_init_state {
- INIT_STATE_NONE,
- INIT_STATE_PCI,
- INIT_STATE_AFU,
- INIT_STATE_SCSI,
- INIT_STATE_CDEV
-};
-
-enum cxlflash_state {
- STATE_PROBING, /* Initial state during probe */
- STATE_PROBED, /* Temporary state, probe completed but EEH occurred */
- STATE_NORMAL, /* Normal running state, everything good */
- STATE_RESET, /* Reset state, trying to reset/recover */
- STATE_FAILTERM /* Failed/terminating state, error out users/threads */
-};
-
-enum cxlflash_hwq_mode {
- HWQ_MODE_RR, /* Roundrobin (default) */
- HWQ_MODE_TAG, /* Distribute based on block MQ tag */
- HWQ_MODE_CPU, /* CPU affinity */
- MAX_HWQ_MODE
-};
-
-/*
- * Each context has its own set of resource handles that is visible
- * only from that context.
- */
-
-struct cxlflash_cfg {
- struct afu *afu;
-
- const struct cxlflash_backend_ops *ops;
- struct pci_dev *dev;
- struct pci_device_id *dev_id;
- struct Scsi_Host *host;
- int num_fc_ports;
- struct cdev cdev;
- struct device *chardev;
-
- ulong cxlflash_regs_pci;
-
- struct work_struct work_q;
- enum cxlflash_init_state init_state;
- enum cxlflash_lr_state lr_state;
- int lr_port;
- atomic_t scan_host_needed;
-
- void *afu_cookie;
-
- atomic_t recovery_threads;
- struct mutex ctx_recovery_mutex;
- struct mutex ctx_tbl_list_mutex;
- struct rw_semaphore ioctl_rwsem;
- struct ctx_info *ctx_tbl[MAX_CONTEXT];
- struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
- struct file_operations cxl_fops;
-
- /* Parameters that are LUN table related */
- int last_lun_index[MAX_FC_PORTS];
- int promote_lun_index;
- struct list_head lluns; /* list of llun_info structs */
-
- wait_queue_head_t tmf_waitq;
- spinlock_t tmf_slock;
- bool tmf_active;
- bool ws_unmap; /* Write-same unmap supported */
- wait_queue_head_t reset_waitq;
- enum cxlflash_state state;
- async_cookie_t async_reset_cookie;
-};
-
-struct afu_cmd {
- struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
- struct sisl_ioasa sa; /* IOASA must follow IOARCB */
- struct afu *parent;
- struct scsi_cmnd *scp;
- struct completion cevent;
- struct list_head queue;
- u32 hwq_index;
-
- u8 cmd_tmf:1,
- cmd_aborted:1;
-
- struct list_head list; /* Pending commands link */
-
- /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
- * However for performance reasons the IOARCB/IOASA should be
- * cache line aligned.
- */
-} __aligned(cache_line_size());
-
-static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
-{
- return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
-}
-
-static inline struct afu_cmd *sc_to_afuci(struct scsi_cmnd *sc)
-{
- struct afu_cmd *afuc = sc_to_afuc(sc);
-
- INIT_LIST_HEAD(&afuc->queue);
- return afuc;
-}
-
-static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
-{
- struct afu_cmd *afuc = sc_to_afuc(sc);
-
- memset(afuc, 0, sizeof(*afuc));
- return sc_to_afuci(sc);
-}
-
-struct hwq {
- /* Stuff requiring alignment go first. */
- struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */
- u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
-
- /* Beware of alignment till here. Preferably introduce new
- * fields after this point
- */
- struct afu *afu;
- void *ctx_cookie;
- struct sisl_host_map __iomem *host_map; /* MC host map */
- struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
- ctx_hndl_t ctx_hndl; /* master's context handle */
- u32 index; /* Index of this hwq */
- int num_irqs; /* Number of interrupts requested for context */
- struct list_head pending_cmds; /* Commands pending completion */
-
- atomic_t hsq_credits;
- spinlock_t hsq_slock; /* Hardware send queue lock */
- struct sisl_ioarcb *hsq_start;
- struct sisl_ioarcb *hsq_end;
- struct sisl_ioarcb *hsq_curr;
- spinlock_t hrrq_slock;
- u64 *hrrq_start;
- u64 *hrrq_end;
- u64 *hrrq_curr;
- bool toggle;
- bool hrrq_online;
-
- s64 room;
-
- struct irq_poll irqpoll;
-} __aligned(cache_line_size());
-
-struct afu {
- struct hwq hwqs[CXLFLASH_MAX_HWQS];
- int (*send_cmd)(struct afu *afu, struct afu_cmd *cmd);
- int (*context_reset)(struct hwq *hwq);
-
- /* AFU HW */
- struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
-
- atomic_t cmds_active; /* Number of currently active AFU commands */
- struct mutex sync_active; /* Mutex to serialize AFU commands */
- u64 hb;
- u32 internal_lun; /* User-desired LUN mode for this AFU */
-
- u32 num_hwqs; /* Number of hardware queues */
- u32 desired_hwqs; /* Desired h/w queues, effective on AFU reset */
- enum cxlflash_hwq_mode hwq_mode; /* Steering mode for h/w queues */
- u32 hwq_rr_count; /* Count to distribute traffic for roundrobin */
-
- char version[16];
- u64 interface_version;
-
- u32 irqpoll_weight;
- struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
-};
-
-static inline struct hwq *get_hwq(struct afu *afu, u32 index)
-{
- WARN_ON(index >= CXLFLASH_MAX_HWQS);
-
- return &afu->hwqs[index];
-}
-
-static inline bool afu_is_irqpoll_enabled(struct afu *afu)
-{
- return !!afu->irqpoll_weight;
-}
-
-static inline bool afu_has_cap(struct afu *afu, u64 cap)
-{
- u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
-
- return afu_cap & cap;
-}
-
-static inline bool afu_is_ocxl_lisn(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_OCXL_LISN);
-}
-
-static inline bool afu_is_afu_debug(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG);
-}
-
-static inline bool afu_is_lun_provision(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_LUN_PROVISION);
-}
-
-static inline bool afu_is_sq_cmd_mode(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
-}
-
-static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
-}
-
-static inline u64 lun_to_lunid(u64 lun)
-{
- __be64 lun_id;
-
- int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
- return be64_to_cpu(lun_id);
-}
-
-static inline struct fc_port_bank __iomem *get_fc_port_bank(
- struct cxlflash_cfg *cfg, int i)
-{
- struct afu *afu = cfg->afu;
-
- return &afu->afu_map->global.bank[CHAN2PORTBANK(i)];
-}
-
-static inline __be64 __iomem *get_fc_port_regs(struct cxlflash_cfg *cfg, int i)
-{
- struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i);
-
- return &fcpb->fc_port_regs[CHAN2BANKPORT(i)][0];
-}
-
-static inline __be64 __iomem *get_fc_port_luns(struct cxlflash_cfg *cfg, int i)
-{
- struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i);
-
- return &fcpb->fc_port_luns[CHAN2BANKPORT(i)][0];
-}
-
-int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t c, res_hndl_t r, u8 mode);
-void cxlflash_list_init(void);
-void cxlflash_term_global_luns(void);
-void cxlflash_free_errpage(void);
-int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd,
- void __user *arg);
-void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg);
-int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg);
-void cxlflash_term_local_luns(struct cxlflash_cfg *cfg);
-void cxlflash_restore_luntable(struct cxlflash_cfg *cfg);
-
-#endif /* ifndef _CXLFLASH_COMMON_H */
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
deleted file mode 100644
index b814130f3f5c..000000000000
--- a/drivers/scsi/cxlflash/cxl_hw.c
+++ /dev/null
@@ -1,177 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#include <misc/cxl.h>
-
-#include "backend.h"
-
-/*
- * The following routines map the cxlflash backend operations to existing CXL
- * kernel API function and are largely simple shims that provide an abstraction
- * for converting generic context and AFU cookies into cxl_context or cxl_afu
- * pointers.
- */
-
-static void __iomem *cxlflash_psa_map(void *ctx_cookie)
-{
- return cxl_psa_map(ctx_cookie);
-}
-
-static void cxlflash_psa_unmap(void __iomem *addr)
-{
- cxl_psa_unmap(addr);
-}
-
-static int cxlflash_process_element(void *ctx_cookie)
-{
- return cxl_process_element(ctx_cookie);
-}
-
-static int cxlflash_map_afu_irq(void *ctx_cookie, int num,
- irq_handler_t handler, void *cookie, char *name)
-{
- return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name);
-}
-
-static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
-{
- cxl_unmap_afu_irq(ctx_cookie, num, cookie);
-}
-
-static u64 cxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
-{
- /* Dummy fop for cxl */
- return 0;
-}
-
-static int cxlflash_start_context(void *ctx_cookie)
-{
- return cxl_start_context(ctx_cookie, 0, NULL);
-}
-
-static int cxlflash_stop_context(void *ctx_cookie)
-{
- return cxl_stop_context(ctx_cookie);
-}
-
-static int cxlflash_afu_reset(void *ctx_cookie)
-{
- return cxl_afu_reset(ctx_cookie);
-}
-
-static void cxlflash_set_master(void *ctx_cookie)
-{
- cxl_set_master(ctx_cookie);
-}
-
-static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie)
-{
- return cxl_get_context(dev);
-}
-
-static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie)
-{
- return cxl_dev_context_init(dev);
-}
-
-static int cxlflash_release_context(void *ctx_cookie)
-{
- return cxl_release_context(ctx_cookie);
-}
-
-static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
-{
- cxl_perst_reloads_same_image(afu_cookie, image);
-}
-
-static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev,
- void *buf, size_t count)
-{
- return cxl_read_adapter_vpd(dev, buf, count);
-}
-
-static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
-{
- return cxl_allocate_afu_irqs(ctx_cookie, num);
-}
-
-static void cxlflash_free_afu_irqs(void *ctx_cookie)
-{
- cxl_free_afu_irqs(ctx_cookie);
-}
-
-static void *cxlflash_create_afu(struct pci_dev *dev)
-{
- return cxl_pci_to_afu(dev);
-}
-
-static void cxlflash_destroy_afu(void *afu)
-{
- /* Dummy fop for cxl */
-}
-
-static struct file *cxlflash_get_fd(void *ctx_cookie,
- struct file_operations *fops, int *fd)
-{
- return cxl_get_fd(ctx_cookie, fops, fd);
-}
-
-static void *cxlflash_fops_get_context(struct file *file)
-{
- return cxl_fops_get_context(file);
-}
-
-static int cxlflash_start_work(void *ctx_cookie, u64 irqs)
-{
- struct cxl_ioctl_start_work work = { 0 };
-
- work.num_interrupts = irqs;
- work.flags = CXL_START_WORK_NUM_IRQS;
-
- return cxl_start_work(ctx_cookie, &work);
-}
-
-static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm)
-{
- return cxl_fd_mmap(file, vm);
-}
-
-static int cxlflash_fd_release(struct inode *inode, struct file *file)
-{
- return cxl_fd_release(inode, file);
-}
-
-const struct cxlflash_backend_ops cxlflash_cxl_ops = {
- .module = THIS_MODULE,
- .psa_map = cxlflash_psa_map,
- .psa_unmap = cxlflash_psa_unmap,
- .process_element = cxlflash_process_element,
- .map_afu_irq = cxlflash_map_afu_irq,
- .unmap_afu_irq = cxlflash_unmap_afu_irq,
- .get_irq_objhndl = cxlflash_get_irq_objhndl,
- .start_context = cxlflash_start_context,
- .stop_context = cxlflash_stop_context,
- .afu_reset = cxlflash_afu_reset,
- .set_master = cxlflash_set_master,
- .get_context = cxlflash_get_context,
- .dev_context_init = cxlflash_dev_context_init,
- .release_context = cxlflash_release_context,
- .perst_reloads_same_image = cxlflash_perst_reloads_same_image,
- .read_adapter_vpd = cxlflash_read_adapter_vpd,
- .allocate_afu_irqs = cxlflash_allocate_afu_irqs,
- .free_afu_irqs = cxlflash_free_afu_irqs,
- .create_afu = cxlflash_create_afu,
- .destroy_afu = cxlflash_destroy_afu,
- .get_fd = cxlflash_get_fd,
- .fops_get_context = cxlflash_fops_get_context,
- .start_work = cxlflash_start_work,
- .fd_mmap = cxlflash_fd_mmap,
- .fd_release = cxlflash_fd_release,
-};
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
deleted file mode 100644
index 962c797fda07..000000000000
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/unaligned.h>
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-
-#include <scsi/scsi_host.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "sislite.h"
-#include "common.h"
-#include "vlun.h"
-#include "superpipe.h"
-
-/**
- * create_local() - allocate and initialize a local LUN information structure
- * @sdev: SCSI device associated with LUN.
- * @wwid: World Wide Node Name for LUN.
- *
- * Return: Allocated local llun_info structure on success, NULL on failure
- */
-static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = NULL;
-
- lli = kzalloc(sizeof(*lli), GFP_KERNEL);
- if (unlikely(!lli)) {
- dev_err(dev, "%s: could not allocate lli\n", __func__);
- goto out;
- }
-
- lli->sdev = sdev;
- lli->host_no = sdev->host->host_no;
- lli->in_table = false;
-
- memcpy(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
-out:
- return lli;
-}
-
-/**
- * create_global() - allocate and initialize a global LUN information structure
- * @sdev: SCSI device associated with LUN.
- * @wwid: World Wide Node Name for LUN.
- *
- * Return: Allocated global glun_info structure on success, NULL on failure
- */
-static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct glun_info *gli = NULL;
-
- gli = kzalloc(sizeof(*gli), GFP_KERNEL);
- if (unlikely(!gli)) {
- dev_err(dev, "%s: could not allocate gli\n", __func__);
- goto out;
- }
-
- mutex_init(&gli->mutex);
- memcpy(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
-out:
- return gli;
-}
-
-/**
- * lookup_local() - find a local LUN information structure by WWID
- * @cfg: Internal structure associated with the host.
- * @wwid: WWID associated with LUN.
- *
- * Return: Found local lun_info structure on success, NULL on failure
- */
-static struct llun_info *lookup_local(struct cxlflash_cfg *cfg, u8 *wwid)
-{
- struct llun_info *lli, *temp;
-
- list_for_each_entry_safe(lli, temp, &cfg->lluns, list)
- if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
- return lli;
-
- return NULL;
-}
-
-/**
- * lookup_global() - find a global LUN information structure by WWID
- * @wwid: WWID associated with LUN.
- *
- * Return: Found global lun_info structure on success, NULL on failure
- */
-static struct glun_info *lookup_global(u8 *wwid)
-{
- struct glun_info *gli, *temp;
-
- list_for_each_entry_safe(gli, temp, &global.gluns, list)
- if (!memcmp(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
- return gli;
-
- return NULL;
-}
-
-/**
- * find_and_create_lun() - find or create a local LUN information structure
- * @sdev: SCSI device associated with LUN.
- * @wwid: WWID associated with LUN.
- *
- * The LUN is kept both in a local list (per adapter) and in a global list
- * (across all adapters). Certain attributes of the LUN are local to the
- * adapter (such as index, port selection mask, etc.).
- *
- * The block allocation map is shared across all adapters (i.e. associated
- * wih the global list). Since different attributes are associated with
- * the per adapter and global entries, allocate two separate structures for each
- * LUN (one local, one global).
- *
- * Keep a pointer back from the local to the global entry.
- *
- * This routine assumes the caller holds the global mutex.
- *
- * Return: Found/Allocated local lun_info structure on success, NULL on failure
- */
-static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = NULL;
- struct glun_info *gli = NULL;
-
- if (unlikely(!wwid))
- goto out;
-
- lli = lookup_local(cfg, wwid);
- if (lli)
- goto out;
-
- lli = create_local(sdev, wwid);
- if (unlikely(!lli))
- goto out;
-
- gli = lookup_global(wwid);
- if (gli) {
- lli->parent = gli;
- list_add(&lli->list, &cfg->lluns);
- goto out;
- }
-
- gli = create_global(sdev, wwid);
- if (unlikely(!gli)) {
- kfree(lli);
- lli = NULL;
- goto out;
- }
-
- lli->parent = gli;
- list_add(&lli->list, &cfg->lluns);
-
- list_add(&gli->list, &global.gluns);
-
-out:
- dev_dbg(dev, "%s: returning lli=%p, gli=%p\n", __func__, lli, gli);
- return lli;
-}
-
-/**
- * cxlflash_term_local_luns() - Delete all entries from local LUN list, free.
- * @cfg: Internal structure associated with the host.
- */
-void cxlflash_term_local_luns(struct cxlflash_cfg *cfg)
-{
- struct llun_info *lli, *temp;
-
- mutex_lock(&global.mutex);
- list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
- list_del(&lli->list);
- kfree(lli);
- }
- mutex_unlock(&global.mutex);
-}
-
-/**
- * cxlflash_list_init() - initializes the global LUN list
- */
-void cxlflash_list_init(void)
-{
- INIT_LIST_HEAD(&global.gluns);
- mutex_init(&global.mutex);
- global.err_page = NULL;
-}
-
-/**
- * cxlflash_term_global_luns() - frees resources associated with global LUN list
- */
-void cxlflash_term_global_luns(void)
-{
- struct glun_info *gli, *temp;
-
- mutex_lock(&global.mutex);
- list_for_each_entry_safe(gli, temp, &global.gluns, list) {
- list_del(&gli->list);
- cxlflash_ba_terminate(&gli->blka.ba_lun);
- kfree(gli);
- }
- mutex_unlock(&global.mutex);
-}
-
-/**
- * cxlflash_manage_lun() - handles LUN management activities
- * @sdev: SCSI device associated with LUN.
- * @arg: Manage ioctl data structure.
- *
- * This routine is used to notify the driver about a LUN's WWID and associate
- * SCSI devices (sdev) with a global LUN instance. Additionally it serves to
- * change a LUN's operating mode: legacy or superpipe.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_manage_lun(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_manage_lun *manage = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = NULL;
- int rc = 0;
- u64 flags = manage->hdr.flags;
- u32 chan = sdev->channel;
-
- mutex_lock(&global.mutex);
- lli = find_and_create_lun(sdev, manage->wwid);
- dev_dbg(dev, "%s: WWID=%016llx%016llx, flags=%016llx lli=%p\n",
- __func__, get_unaligned_be64(&manage->wwid[0]),
- get_unaligned_be64(&manage->wwid[8]), manage->hdr.flags, lli);
- if (unlikely(!lli)) {
- rc = -ENOMEM;
- goto out;
- }
-
- if (flags & DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE) {
- /*
- * Update port selection mask based upon channel, store off LUN
- * in unpacked, AFU-friendly format, and hang LUN reference in
- * the sdev.
- */
- lli->port_sel |= CHAN2PORTMASK(chan);
- lli->lun_id[chan] = lun_to_lunid(sdev->lun);
- sdev->hostdata = lli;
- } else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) {
- if (lli->parent->mode != MODE_NONE)
- rc = -EBUSY;
- else {
- /*
- * Clean up local LUN for this port and reset table
- * tracking when no more references exist.
- */
- sdev->hostdata = NULL;
- lli->port_sel &= ~CHAN2PORTMASK(chan);
- if (lli->port_sel == 0U)
- lli->in_table = false;
- }
- }
-
- dev_dbg(dev, "%s: port_sel=%08x chan=%u lun_id=%016llx\n",
- __func__, lli->port_sel, chan, lli->lun_id[chan]);
-
-out:
- mutex_unlock(&global.mutex);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
deleted file mode 100644
index ae626e389c8b..000000000000
--- a/drivers/scsi/cxlflash/main.c
+++ /dev/null
@@ -1,3970 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/delay.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <linux/unaligned.h>
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "main.h"
-#include "sislite.h"
-#include "common.h"
-
-MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
-MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
-MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
-MODULE_LICENSE("GPL");
-
-static char *cxlflash_devnode(const struct device *dev, umode_t *mode);
-static const struct class cxlflash_class = {
- .name = "cxlflash",
- .devnode = cxlflash_devnode,
-};
-
-static u32 cxlflash_major;
-static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
-
-/**
- * process_cmd_err() - command error handler
- * @cmd: AFU command that experienced the error.
- * @scp: SCSI command associated with the AFU command in error.
- *
- * Translates error bits from AFU command to SCSI command results.
- */
-static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
-{
- struct afu *afu = cmd->parent;
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioasa *ioasa;
- u32 resid;
-
- ioasa = &(cmd->sa);
-
- if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
- resid = ioasa->resid;
- scsi_set_resid(scp, resid);
- dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
- __func__, cmd, scp, resid);
- }
-
- if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
- dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
- __func__, cmd, scp);
- scp->result = (DID_ERROR << 16);
- }
-
- dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
- "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
- ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
- ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
-
- if (ioasa->rc.scsi_rc) {
- /* We have a SCSI status */
- if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
- memcpy(scp->sense_buffer, ioasa->sense_data,
- SISL_SENSE_DATA_LEN);
- scp->result = ioasa->rc.scsi_rc;
- } else
- scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
- }
-
- /*
- * We encountered an error. Set scp->result based on nature
- * of error.
- */
- if (ioasa->rc.fc_rc) {
- /* We have an FC status */
- switch (ioasa->rc.fc_rc) {
- case SISL_FC_RC_LINKDOWN:
- scp->result = (DID_REQUEUE << 16);
- break;
- case SISL_FC_RC_RESID:
- /* This indicates an FCP resid underrun */
- if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
- /* If the SISL_RC_FLAGS_OVERRUN flag was set,
- * then we will handle this error else where.
- * If not then we must handle it here.
- * This is probably an AFU bug.
- */
- scp->result = (DID_ERROR << 16);
- }
- break;
- case SISL_FC_RC_RESIDERR:
- /* Resid mismatch between adapter and device */
- case SISL_FC_RC_TGTABORT:
- case SISL_FC_RC_ABORTOK:
- case SISL_FC_RC_ABORTFAIL:
- case SISL_FC_RC_NOLOGI:
- case SISL_FC_RC_ABORTPEND:
- case SISL_FC_RC_WRABORTPEND:
- case SISL_FC_RC_NOEXP:
- case SISL_FC_RC_INUSE:
- scp->result = (DID_ERROR << 16);
- break;
- }
- }
-
- if (ioasa->rc.afu_rc) {
- /* We have an AFU error */
- switch (ioasa->rc.afu_rc) {
- case SISL_AFU_RC_NO_CHANNELS:
- scp->result = (DID_NO_CONNECT << 16);
- break;
- case SISL_AFU_RC_DATA_DMA_ERR:
- switch (ioasa->afu_extra) {
- case SISL_AFU_DMA_ERR_PAGE_IN:
- /* Retry */
- scp->result = (DID_IMM_RETRY << 16);
- break;
- case SISL_AFU_DMA_ERR_INVALID_EA:
- default:
- scp->result = (DID_ERROR << 16);
- }
- break;
- case SISL_AFU_RC_OUT_OF_DATA_BUFS:
- /* Retry */
- scp->result = (DID_ERROR << 16);
- break;
- default:
- scp->result = (DID_ERROR << 16);
- }
- }
-}
-
-/**
- * cmd_complete() - command completion handler
- * @cmd: AFU command that has completed.
- *
- * For SCSI commands this routine prepares and submits commands that have
- * either completed or timed out to the SCSI stack. For internal commands
- * (TMF or AFU), this routine simply notifies the originator that the
- * command has completed.
- */
-static void cmd_complete(struct afu_cmd *cmd)
-{
- struct scsi_cmnd *scp;
- ulong lock_flags;
- struct afu *afu = cmd->parent;
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- list_del(&cmd->list);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-
- if (cmd->scp) {
- scp = cmd->scp;
- if (unlikely(cmd->sa.ioasc))
- process_cmd_err(cmd, scp);
- else
- scp->result = (DID_OK << 16);
-
- dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
- __func__, scp, scp->result, cmd->sa.ioasc);
- scsi_done(scp);
- } else if (cmd->cmd_tmf) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- wake_up_all_locked(&cfg->tmf_waitq);
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- } else
- complete(&cmd->cevent);
-}
-
-/**
- * flush_pending_cmds() - flush all pending commands on this hardware queue
- * @hwq: Hardware queue to flush.
- *
- * The hardware send queue lock associated with this hardware queue must be
- * held when calling this routine.
- */
-static void flush_pending_cmds(struct hwq *hwq)
-{
- struct cxlflash_cfg *cfg = hwq->afu->parent;
- struct afu_cmd *cmd, *tmp;
- struct scsi_cmnd *scp;
- ulong lock_flags;
-
- list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
- /* Bypass command when on a doneq, cmd_complete() will handle */
- if (!list_empty(&cmd->queue))
- continue;
-
- list_del(&cmd->list);
-
- if (cmd->scp) {
- scp = cmd->scp;
- scp->result = (DID_IMM_RETRY << 16);
- scsi_done(scp);
- } else {
- cmd->cmd_aborted = true;
-
- if (cmd->cmd_tmf) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- wake_up_all_locked(&cfg->tmf_waitq);
- spin_unlock_irqrestore(&cfg->tmf_slock,
- lock_flags);
- } else
- complete(&cmd->cevent);
- }
- }
-}
-
-/**
- * context_reset() - reset context via specified register
- * @hwq: Hardware queue owning the context to be reset.
- * @reset_reg: MMIO register to perform reset.
- *
- * When the reset is successful, the SISLite specification guarantees that
- * the AFU has aborted all currently pending I/O. Accordingly, these commands
- * must be flushed.
- *
- * Return: 0 on success, -errno on failure
- */
-static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
-{
- struct cxlflash_cfg *cfg = hwq->afu->parent;
- struct device *dev = &cfg->dev->dev;
- int rc = -ETIMEDOUT;
- int nretry = 0;
- u64 val = 0x1;
- ulong lock_flags;
-
- dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
-
- writeq_be(val, reset_reg);
- do {
- val = readq_be(reset_reg);
- if ((val & 0x1) == 0x0) {
- rc = 0;
- break;
- }
-
- /* Double delay each time */
- udelay(1 << nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- if (!rc)
- flush_pending_cmds(hwq);
-
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-
- dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
- __func__, rc, val, nretry);
- return rc;
-}
-
-/**
- * context_reset_ioarrin() - reset context via IOARRIN register
- * @hwq: Hardware queue owning the context to be reset.
- *
- * Return: 0 on success, -errno on failure
- */
-static int context_reset_ioarrin(struct hwq *hwq)
-{
- return context_reset(hwq, &hwq->host_map->ioarrin);
-}
-
-/**
- * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
- * @hwq: Hardware queue owning the context to be reset.
- *
- * Return: 0 on success, -errno on failure
- */
-static int context_reset_sq(struct hwq *hwq)
-{
- return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
-}
-
-/**
- * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
- * @afu: AFU associated with the host.
- * @cmd: AFU command to send.
- *
- * Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
- */
-static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
- int rc = 0;
- s64 room;
- ulong lock_flags;
-
- /*
- * To avoid the performance penalty of MMIO, spread the update of
- * 'room' over multiple commands.
- */
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- if (--hwq->room < 0) {
- room = readq_be(&hwq->host_map->cmd_room);
- if (room <= 0) {
- dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
- "0x%02X, room=0x%016llX\n",
- __func__, cmd->rcb.cdb[0], room);
- hwq->room = 0;
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- hwq->room = room - 1;
- }
-
- list_add(&cmd->list, &hwq->pending_cmds);
- writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
-out:
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
- dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
- __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
- return rc;
-}
-
-/**
- * send_cmd_sq() - sends an AFU command via SQ ring
- * @afu: AFU associated with the host.
- * @cmd: AFU command to send.
- *
- * Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
- */
-static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
- int rc = 0;
- int newval;
- ulong lock_flags;
-
- newval = atomic_dec_if_positive(&hwq->hsq_credits);
- if (newval <= 0) {
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
- cmd->rcb.ioasa = &cmd->sa;
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
-
- *hwq->hsq_curr = cmd->rcb;
- if (hwq->hsq_curr < hwq->hsq_end)
- hwq->hsq_curr++;
- else
- hwq->hsq_curr = hwq->hsq_start;
-
- list_add(&cmd->list, &hwq->pending_cmds);
- writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
-
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-out:
- dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
- "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
- cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
- readq_be(&hwq->host_map->sq_head),
- readq_be(&hwq->host_map->sq_tail));
- return rc;
-}
-
-/**
- * wait_resp() - polls for a response or timeout to a sent AFU command
- * @afu: AFU associated with the host.
- * @cmd: AFU command that was sent.
- *
- * Return: 0 on success, -errno on failure
- */
-static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
- ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
-
- timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
- if (!timeout)
- rc = -ETIMEDOUT;
-
- if (cmd->cmd_aborted)
- rc = -EAGAIN;
-
- if (unlikely(cmd->sa.ioasc != 0)) {
- dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
- __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
- rc = -EIO;
- }
-
- return rc;
-}
-
-/**
- * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
- * @host: SCSI host associated with device.
- * @scp: SCSI command to send.
- * @afu: SCSI command to send.
- *
- * Hashes a command based upon the hardware queue mode.
- *
- * Return: Trusted index of target hardware queue
- */
-static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
- struct afu *afu)
-{
- u32 tag;
- u32 hwq = 0;
-
- if (afu->num_hwqs == 1)
- return 0;
-
- switch (afu->hwq_mode) {
- case HWQ_MODE_RR:
- hwq = afu->hwq_rr_count++ % afu->num_hwqs;
- break;
- case HWQ_MODE_TAG:
- tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp));
- hwq = blk_mq_unique_tag_to_hwq(tag);
- break;
- case HWQ_MODE_CPU:
- hwq = smp_processor_id() % afu->num_hwqs;
- break;
- default:
- WARN_ON_ONCE(1);
- }
-
- return hwq;
-}
-
-/**
- * send_tmf() - sends a Task Management Function (TMF)
- * @cfg: Internal structure associated with the host.
- * @sdev: SCSI device destined for TMF.
- * @tmfcmd: TMF command to send.
- *
- * Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
- */
-static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
- u64 tmfcmd)
-{
- struct afu *afu = cfg->afu;
- struct afu_cmd *cmd = NULL;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- bool needs_deletion = false;
- char *buf = NULL;
- ulong lock_flags;
- int rc = 0;
- ulong to;
-
- buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
- if (unlikely(!buf)) {
- dev_err(dev, "%s: no memory for command\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
- INIT_LIST_HEAD(&cmd->queue);
-
- /* When Task Management Function is active do not send another */
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- if (cfg->tmf_active)
- wait_event_interruptible_lock_irq(cfg->tmf_waitq,
- !cfg->tmf_active,
- cfg->tmf_slock);
- cfg->tmf_active = true;
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- cmd->parent = afu;
- cmd->cmd_tmf = true;
- cmd->hwq_index = hwq->index;
-
- cmd->rcb.ctx_id = hwq->ctx_hndl;
- cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
- cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
- cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
- SISL_REQ_FLAGS_SUP_UNDERRUN |
- SISL_REQ_FLAGS_TMF_CMD);
- memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
-
- rc = afu->send_cmd(afu, cmd);
- if (unlikely(rc)) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- goto out;
- }
-
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- to = msecs_to_jiffies(5000);
- to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
- !cfg->tmf_active,
- cfg->tmf_slock,
- to);
- if (!to) {
- dev_err(dev, "%s: TMF timed out\n", __func__);
- rc = -ETIMEDOUT;
- needs_deletion = true;
- } else if (cmd->cmd_aborted) {
- dev_err(dev, "%s: TMF aborted\n", __func__);
- rc = -EAGAIN;
- } else if (cmd->sa.ioasc) {
- dev_err(dev, "%s: TMF failed ioasc=%08x\n",
- __func__, cmd->sa.ioasc);
- rc = -EIO;
- }
- cfg->tmf_active = false;
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- if (needs_deletion) {
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- list_del(&cmd->list);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
- }
-out:
- kfree(buf);
- return rc;
-}
-
-/**
- * cxlflash_driver_info() - information handler for this host driver
- * @host: SCSI host associated with device.
- *
- * Return: A string describing the device.
- */
-static const char *cxlflash_driver_info(struct Scsi_Host *host)
-{
- return CXLFLASH_ADAPTER_NAME;
-}
-
-/**
- * cxlflash_queuecommand() - sends a mid-layer request
- * @host: SCSI host associated with device.
- * @scp: SCSI command to send.
- *
- * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
- */
-static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
-{
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd = sc_to_afuci(scp);
- struct scatterlist *sg = scsi_sglist(scp);
- int hwq_index = cmd_to_target_hwq(host, scp, afu);
- struct hwq *hwq = get_hwq(afu, hwq_index);
- u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
- ulong lock_flags;
- int rc = 0;
-
- dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08x-%08x-%08x-%08x)\n",
- __func__, scp, host->host_no, scp->device->channel,
- scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
-
- /*
- * If a Task Management Function is active, wait for it to complete
- * before continuing with regular commands.
- */
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- if (cfg->tmf_active) {
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- switch (cfg->state) {
- case STATE_PROBING:
- case STATE_PROBED:
- case STATE_RESET:
- dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- case STATE_FAILTERM:
- dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
- scp->result = (DID_NO_CONNECT << 16);
- scsi_done(scp);
- rc = 0;
- goto out;
- default:
- atomic_inc(&afu->cmds_active);
- break;
- }
-
- if (likely(sg)) {
- cmd->rcb.data_len = sg->length;
- cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
- }
-
- cmd->scp = scp;
- cmd->parent = afu;
- cmd->hwq_index = hwq_index;
-
- cmd->sa.ioasc = 0;
- cmd->rcb.ctx_id = hwq->ctx_hndl;
- cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
- cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
-
- if (scp->sc_data_direction == DMA_TO_DEVICE)
- req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
-
- cmd->rcb.req_flags = req_flags;
- memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
-
- rc = afu->send_cmd(afu, cmd);
- atomic_dec(&afu->cmds_active);
-out:
- return rc;
-}
-
-/**
- * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
-{
- struct pci_dev *pdev = cfg->dev;
-
- if (pci_channel_offline(pdev))
- wait_event_timeout(cfg->reset_waitq,
- !pci_channel_offline(pdev),
- CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
-}
-
-/**
- * free_mem() - free memory associated with the AFU
- * @cfg: Internal structure associated with the host.
- */
-static void free_mem(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
-
- if (cfg->afu) {
- free_pages((ulong)afu, get_order(sizeof(struct afu)));
- cfg->afu = NULL;
- }
-}
-
-/**
- * cxlflash_reset_sync() - synchronizing point for asynchronous resets
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
-{
- if (cfg->async_reset_cookie == 0)
- return;
-
- /* Wait until all async calls prior to this cookie have completed */
- async_synchronize_cookie(cfg->async_reset_cookie + 1);
- cfg->async_reset_cookie = 0;
-}
-
-/**
- * stop_afu() - stops the AFU command timers and unmaps the MMIO space
- * @cfg: Internal structure associated with the host.
- *
- * Safe to call with AFU in a partially allocated/initialized state.
- *
- * Cancels scheduled worker threads, waits for any active internal AFU
- * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
- */
-static void stop_afu(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct hwq *hwq;
- int i;
-
- cancel_work_sync(&cfg->work_q);
- if (!current_is_async())
- cxlflash_reset_sync(cfg);
-
- if (likely(afu)) {
- while (atomic_read(&afu->cmds_active))
- ssleep(1);
-
- if (afu_is_irqpoll_enabled(afu)) {
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- irq_poll_disable(&hwq->irqpoll);
- }
- }
-
- if (likely(afu->afu_map)) {
- cfg->ops->psa_unmap(afu->afu_map);
- afu->afu_map = NULL;
- }
- }
-}
-
-/**
- * term_intr() - disables all AFU interrupts
- * @cfg: Internal structure associated with the host.
- * @level: Depth of allocation, where to begin waterfall tear down.
- * @index: Index of the hardware queue.
- *
- * Safe to call with AFU/MC in partially allocated/initialized state.
- */
-static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
- u32 index)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
-
- if (!afu) {
- dev_err(dev, "%s: returning with NULL afu\n", __func__);
- return;
- }
-
- hwq = get_hwq(afu, index);
-
- if (!hwq->ctx_cookie) {
- dev_err(dev, "%s: returning with NULL MC\n", __func__);
- return;
- }
-
- switch (level) {
- case UNMAP_THREE:
- /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
- if (index == PRIMARY_HWQ)
- cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
- fallthrough;
- case UNMAP_TWO:
- cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
- fallthrough;
- case UNMAP_ONE:
- cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
- fallthrough;
- case FREE_IRQ:
- cfg->ops->free_afu_irqs(hwq->ctx_cookie);
- fallthrough;
- case UNDO_NOOP:
- /* No action required */
- break;
- }
-}
-
-/**
- * term_mc() - terminates the master context
- * @cfg: Internal structure associated with the host.
- * @index: Index of the hardware queue.
- *
- * Safe to call with AFU/MC in partially allocated/initialized state.
- */
-static void term_mc(struct cxlflash_cfg *cfg, u32 index)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- ulong lock_flags;
-
- if (!afu) {
- dev_err(dev, "%s: returning with NULL afu\n", __func__);
- return;
- }
-
- hwq = get_hwq(afu, index);
-
- if (!hwq->ctx_cookie) {
- dev_err(dev, "%s: returning with NULL MC\n", __func__);
- return;
- }
-
- WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
- if (index != PRIMARY_HWQ)
- WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
- hwq->ctx_cookie = NULL;
-
- spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
- hwq->hrrq_online = false;
- spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- flush_pending_cmds(hwq);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-}
-
-/**
- * term_afu() - terminates the AFU
- * @cfg: Internal structure associated with the host.
- *
- * Safe to call with AFU/MC in partially allocated/initialized state.
- */
-static void term_afu(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int k;
-
- /*
- * Tear down is carefully orchestrated to ensure
- * no interrupts can come in when the problem state
- * area is unmapped.
- *
- * 1) Disable all AFU interrupts for each master
- * 2) Unmap the problem state area
- * 3) Stop each master context
- */
- for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
- term_intr(cfg, UNMAP_THREE, k);
-
- stop_afu(cfg);
-
- for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
- term_mc(cfg, k);
-
- dev_dbg(dev, "%s: returning\n", __func__);
-}
-
-/**
- * notify_shutdown() - notifies device of pending shutdown
- * @cfg: Internal structure associated with the host.
- * @wait: Whether to wait for shutdown processing to complete.
- *
- * This function will notify the AFU that the adapter is being shutdown
- * and will wait for shutdown processing to complete if wait is true.
- * This notification should flush pending I/Os to the device and halt
- * further I/Os until the next AFU reset is issued and device restarted.
- */
-static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct dev_dependent_vals *ddv;
- __be64 __iomem *fc_port_regs;
- u64 reg, status;
- int i, retry_cnt = 0;
-
- ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
- if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
- return;
-
- if (!afu || !afu->afu_map) {
- dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
- return;
- }
-
- /* Notify AFU */
- for (i = 0; i < cfg->num_fc_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
-
- reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
- reg |= SISL_FC_SHUTDOWN_NORMAL;
- writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
- }
-
- if (!wait)
- return;
-
- /* Wait up to 1.5 seconds for shutdown processing to complete */
- for (i = 0; i < cfg->num_fc_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
- retry_cnt = 0;
-
- while (true) {
- status = readq_be(&fc_port_regs[FC_STATUS / 8]);
- if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
- break;
- if (++retry_cnt >= MC_RETRY_CNT) {
- dev_dbg(dev, "%s: port %d shutdown processing "
- "not yet completed\n", __func__, i);
- break;
- }
- msleep(100 * retry_cnt);
- }
- }
-}
-
-/**
- * cxlflash_get_minor() - gets the first available minor number
- *
- * Return: Unique minor number that can be used to create the character device.
- */
-static int cxlflash_get_minor(void)
-{
- int minor;
- long bit;
-
- bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
- if (bit >= CXLFLASH_MAX_ADAPTERS)
- return -1;
-
- minor = bit & MINORMASK;
- set_bit(minor, cxlflash_minor);
- return minor;
-}
-
-/**
- * cxlflash_put_minor() - releases the minor number
- * @minor: Minor number that is no longer needed.
- */
-static void cxlflash_put_minor(int minor)
-{
- clear_bit(minor, cxlflash_minor);
-}
-
-/**
- * cxlflash_release_chrdev() - release the character device for the host
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
-{
- device_unregister(cfg->chardev);
- cfg->chardev = NULL;
- cdev_del(&cfg->cdev);
- cxlflash_put_minor(MINOR(cfg->cdev.dev));
-}
-
-/**
- * cxlflash_remove() - PCI entry point to tear down host
- * @pdev: PCI device associated with the host.
- *
- * Safe to use as a cleanup in partially allocated/initialized state. Note that
- * the reset_waitq is flushed as part of the stop/termination of user contexts.
- */
-static void cxlflash_remove(struct pci_dev *pdev)
-{
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
- ulong lock_flags;
-
- if (!pci_is_enabled(pdev)) {
- dev_dbg(dev, "%s: Device is disabled\n", __func__);
- return;
- }
-
- /* Yield to running recovery threads before continuing with remove */
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
- cfg->state != STATE_PROBING);
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- if (cfg->tmf_active)
- wait_event_interruptible_lock_irq(cfg->tmf_waitq,
- !cfg->tmf_active,
- cfg->tmf_slock);
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- /* Notify AFU and wait for shutdown processing to complete */
- notify_shutdown(cfg, true);
-
- cfg->state = STATE_FAILTERM;
- cxlflash_stop_term_user_contexts(cfg);
-
- switch (cfg->init_state) {
- case INIT_STATE_CDEV:
- cxlflash_release_chrdev(cfg);
- fallthrough;
- case INIT_STATE_SCSI:
- cxlflash_term_local_luns(cfg);
- scsi_remove_host(cfg->host);
- fallthrough;
- case INIT_STATE_AFU:
- term_afu(cfg);
- fallthrough;
- case INIT_STATE_PCI:
- cfg->ops->destroy_afu(cfg->afu_cookie);
- pci_disable_device(pdev);
- fallthrough;
- case INIT_STATE_NONE:
- free_mem(cfg);
- scsi_host_put(cfg->host);
- break;
- }
-
- dev_dbg(dev, "%s: returning\n", __func__);
-}
-
-/**
- * alloc_mem() - allocates the AFU and its command pool
- * @cfg: Internal structure associated with the host.
- *
- * A partially allocated state remains on failure.
- *
- * Return:
- * 0 on success
- * -ENOMEM on failure to allocate memory
- */
-static int alloc_mem(struct cxlflash_cfg *cfg)
-{
- int rc = 0;
- struct device *dev = &cfg->dev->dev;
-
- /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
- cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(sizeof(struct afu)));
- if (unlikely(!cfg->afu)) {
- dev_err(dev, "%s: cannot get %d free pages\n",
- __func__, get_order(sizeof(struct afu)));
- rc = -ENOMEM;
- goto out;
- }
- cfg->afu->parent = cfg;
- cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
- cfg->afu->afu_map = NULL;
-out:
- return rc;
-}
-
-/**
- * init_pci() - initializes the host as a PCI device
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_pci(struct cxlflash_cfg *cfg)
-{
- struct pci_dev *pdev = cfg->dev;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- rc = pci_enable_device(pdev);
- if (rc || pci_channel_offline(pdev)) {
- if (pci_channel_offline(pdev)) {
- cxlflash_wait_for_pci_err_recovery(cfg);
- rc = pci_enable_device(pdev);
- }
-
- if (rc) {
- dev_err(dev, "%s: Cannot enable adapter\n", __func__);
- cxlflash_wait_for_pci_err_recovery(cfg);
- goto out;
- }
- }
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * init_scsi() - adds the host to the SCSI stack and kicks off host scan
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_scsi(struct cxlflash_cfg *cfg)
-{
- struct pci_dev *pdev = cfg->dev;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- rc = scsi_add_host(cfg->host, &pdev->dev);
- if (rc) {
- dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
- goto out;
- }
-
- scsi_scan_host(cfg->host);
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * set_port_online() - transitions the specified host FC port to online state
- * @fc_regs: Top of MMIO region defined for specified port.
- *
- * The provided MMIO region must be mapped prior to call. Online state means
- * that the FC link layer has synced, completed the handshaking process, and
- * is ready for login to start.
- */
-static void set_port_online(__be64 __iomem *fc_regs)
-{
- u64 cmdcfg;
-
- cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
- cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
- cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
- writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
-}
-
-/**
- * set_port_offline() - transitions the specified host FC port to offline state
- * @fc_regs: Top of MMIO region defined for specified port.
- *
- * The provided MMIO region must be mapped prior to call.
- */
-static void set_port_offline(__be64 __iomem *fc_regs)
-{
- u64 cmdcfg;
-
- cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
- cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
- cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
- writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
-}
-
-/**
- * wait_port_online() - waits for the specified host FC port come online
- * @fc_regs: Top of MMIO region defined for specified port.
- * @delay_us: Number of microseconds to delay between reading port status.
- * @nretry: Number of cycles to retry reading port status.
- *
- * The provided MMIO region must be mapped prior to call. This will timeout
- * when the cable is not plugged in.
- *
- * Return:
- * TRUE (1) when the specified port is online
- * FALSE (0) when the specified port fails to come online after timeout
- */
-static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
-{
- u64 status;
-
- WARN_ON(delay_us < 1000);
-
- do {
- msleep(delay_us / 1000);
- status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
- if (status == U64_MAX)
- nretry /= 2;
- } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
- nretry--);
-
- return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
-}
-
-/**
- * wait_port_offline() - waits for the specified host FC port go offline
- * @fc_regs: Top of MMIO region defined for specified port.
- * @delay_us: Number of microseconds to delay between reading port status.
- * @nretry: Number of cycles to retry reading port status.
- *
- * The provided MMIO region must be mapped prior to call.
- *
- * Return:
- * TRUE (1) when the specified port is offline
- * FALSE (0) when the specified port fails to go offline after timeout
- */
-static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
-{
- u64 status;
-
- WARN_ON(delay_us < 1000);
-
- do {
- msleep(delay_us / 1000);
- status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
- if (status == U64_MAX)
- nretry /= 2;
- } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
- nretry--);
-
- return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
-}
-
-/**
- * afu_set_wwpn() - configures the WWPN for the specified host FC port
- * @afu: AFU associated with the host that owns the specified FC port.
- * @port: Port number being configured.
- * @fc_regs: Top of MMIO region defined for specified port.
- * @wwpn: The world-wide-port-number previously discovered for port.
- *
- * The provided MMIO region must be mapped prior to call. As part of the
- * sequence to configure the WWPN, the port is toggled offline and then back
- * online. This toggling action can cause this routine to delay up to a few
- * seconds. When configured to use the internal LUN feature of the AFU, a
- * failure to come online is overridden.
- */
-static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
- u64 wwpn)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
-
- set_port_offline(fc_regs);
- if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT)) {
- dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
- __func__, port);
- }
-
- writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
-
- set_port_online(fc_regs);
- if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT)) {
- dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
- __func__, port);
- }
-}
-
-/**
- * afu_link_reset() - resets the specified host FC port
- * @afu: AFU associated with the host that owns the specified FC port.
- * @port: Port number being configured.
- * @fc_regs: Top of MMIO region defined for specified port.
- *
- * The provided MMIO region must be mapped prior to call. The sequence to
- * reset the port involves toggling it offline and then back online. This
- * action can cause this routine to delay up to a few seconds. An effort
- * is made to maintain link with the device by switching to host to use
- * the alternate port exclusively while the reset takes place.
- * failure to come online is overridden.
- */
-static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- u64 port_sel;
-
- /* first switch the AFU to the other links, if any */
- port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
- port_sel &= ~(1ULL << port);
- writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
- cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
-
- set_port_offline(fc_regs);
- if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT))
- dev_err(dev, "%s: wait on port %d to go offline timed out\n",
- __func__, port);
-
- set_port_online(fc_regs);
- if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT))
- dev_err(dev, "%s: wait on port %d to go online timed out\n",
- __func__, port);
-
- /* switch back to include this port */
- port_sel |= (1ULL << port);
- writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
- cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
-
- dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
-}
-
-/**
- * afu_err_intr_init() - clears and initializes the AFU for error interrupts
- * @afu: AFU associated with the host.
- */
-static void afu_err_intr_init(struct afu *afu)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- __be64 __iomem *fc_port_regs;
- int i;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- u64 reg;
-
- /* global async interrupts: AFU clears afu_ctrl on context exit
- * if async interrupts were sent to that context. This prevents
- * the AFU form sending further async interrupts when
- * there is
- * nobody to receive them.
- */
-
- /* mask all */
- writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
- /* set LISN# to send and point to primary master context */
- reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
-
- if (afu->internal_lun)
- reg |= 1; /* Bit 63 indicates local lun */
- writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
- /* clear all */
- writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
- /* unmask bits that are of interest */
- /* note: afu can send an interrupt after this step */
- writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
- /* clear again in case a bit came on after previous clear but before */
- /* unmask */
- writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
-
- /* Clear/Set internal lun bits */
- fc_port_regs = get_fc_port_regs(cfg, 0);
- reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
- reg &= SISL_FC_INTERNAL_MASK;
- if (afu->internal_lun)
- reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
- writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
-
- /* now clear FC errors */
- for (i = 0; i < cfg->num_fc_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
-
- writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
- writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
- }
-
- /* sync interrupts for master's IOARRIN write */
- /* note that unlike asyncs, there can be no pending sync interrupts */
- /* at this time (this is a fresh context and master has not written */
- /* IOARRIN yet), so there is nothing to clear. */
-
- /* set LISN#, it is always sent to the context that wrote IOARRIN */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- reg = readq_be(&hwq->host_map->ctx_ctrl);
- WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
- reg |= SISL_MSI_SYNC_ERROR;
- writeq_be(reg, &hwq->host_map->ctx_ctrl);
- writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
- }
-}
-
-/**
- * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the AFU.
- *
- * Return: Always return IRQ_HANDLED.
- */
-static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
-{
- struct hwq *hwq = (struct hwq *)data;
- struct cxlflash_cfg *cfg = hwq->afu->parent;
- struct device *dev = &cfg->dev->dev;
- u64 reg;
- u64 reg_unmasked;
-
- reg = readq_be(&hwq->host_map->intr_status);
- reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
-
- if (reg_unmasked == 0UL) {
- dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
- __func__, reg);
- goto cxlflash_sync_err_irq_exit;
- }
-
- dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
- __func__, reg);
-
- writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
-
-cxlflash_sync_err_irq_exit:
- return IRQ_HANDLED;
-}
-
-/**
- * process_hrrq() - process the read-response queue
- * @hwq: HWQ associated with the host.
- * @doneq: Queue of commands harvested from the RRQ.
- * @budget: Threshold of RRQ entries to process.
- *
- * This routine must be called holding the disabled RRQ spin lock.
- *
- * Return: The number of entries processed.
- */
-static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
-{
- struct afu *afu = hwq->afu;
- struct afu_cmd *cmd;
- struct sisl_ioasa *ioasa;
- struct sisl_ioarcb *ioarcb;
- bool toggle = hwq->toggle;
- int num_hrrq = 0;
- u64 entry,
- *hrrq_start = hwq->hrrq_start,
- *hrrq_end = hwq->hrrq_end,
- *hrrq_curr = hwq->hrrq_curr;
-
- /* Process ready RRQ entries up to the specified budget (if any) */
- while (true) {
- entry = *hrrq_curr;
-
- if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
- break;
-
- entry &= ~SISL_RESP_HANDLE_T_BIT;
-
- if (afu_is_sq_cmd_mode(afu)) {
- ioasa = (struct sisl_ioasa *)entry;
- cmd = container_of(ioasa, struct afu_cmd, sa);
- } else {
- ioarcb = (struct sisl_ioarcb *)entry;
- cmd = container_of(ioarcb, struct afu_cmd, rcb);
- }
-
- list_add_tail(&cmd->queue, doneq);
-
- /* Advance to next entry or wrap and flip the toggle bit */
- if (hrrq_curr < hrrq_end)
- hrrq_curr++;
- else {
- hrrq_curr = hrrq_start;
- toggle ^= SISL_RESP_HANDLE_T_BIT;
- }
-
- atomic_inc(&hwq->hsq_credits);
- num_hrrq++;
-
- if (budget > 0 && num_hrrq >= budget)
- break;
- }
-
- hwq->hrrq_curr = hrrq_curr;
- hwq->toggle = toggle;
-
- return num_hrrq;
-}
-
-/**
- * process_cmd_doneq() - process a queue of harvested RRQ commands
- * @doneq: Queue of completed commands.
- *
- * Note that upon return the queue can no longer be trusted.
- */
-static void process_cmd_doneq(struct list_head *doneq)
-{
- struct afu_cmd *cmd, *tmp;
-
- WARN_ON(list_empty(doneq));
-
- list_for_each_entry_safe(cmd, tmp, doneq, queue)
- cmd_complete(cmd);
-}
-
-/**
- * cxlflash_irqpoll() - process a queue of harvested RRQ commands
- * @irqpoll: IRQ poll structure associated with queue to poll.
- * @budget: Threshold of RRQ entries to process per poll.
- *
- * Return: The number of entries processed.
- */
-static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
-{
- struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
- unsigned long hrrq_flags;
- LIST_HEAD(doneq);
- int num_entries = 0;
-
- spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
-
- num_entries = process_hrrq(hwq, &doneq, budget);
- if (num_entries < budget)
- irq_poll_complete(irqpoll);
-
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
-
- process_cmd_doneq(&doneq);
- return num_entries;
-}
-
-/**
- * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the AFU.
- *
- * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
- */
-static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
-{
- struct hwq *hwq = (struct hwq *)data;
- struct afu *afu = hwq->afu;
- unsigned long hrrq_flags;
- LIST_HEAD(doneq);
- int num_entries = 0;
-
- spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
-
- /* Silently drop spurious interrupts when queue is not online */
- if (!hwq->hrrq_online) {
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
- return IRQ_HANDLED;
- }
-
- if (afu_is_irqpoll_enabled(afu)) {
- irq_poll_sched(&hwq->irqpoll);
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
- return IRQ_HANDLED;
- }
-
- num_entries = process_hrrq(hwq, &doneq, -1);
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
-
- if (num_entries == 0)
- return IRQ_NONE;
-
- process_cmd_doneq(&doneq);
- return IRQ_HANDLED;
-}
-
-/*
- * Asynchronous interrupt information table
- *
- * NOTE:
- * - Order matters here as this array is indexed by bit position.
- *
- * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
- * as complex and complains due to a lack of parentheses/braces.
- */
-#define ASTATUS_FC(_a, _b, _c, _d) \
- { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
-
-#define BUILD_SISL_ASTATUS_FC_PORT(_a) \
- ASTATUS_FC(_a, LINK_UP, "link up", 0), \
- ASTATUS_FC(_a, LINK_DN, "link down", 0), \
- ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
- ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
- ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
- ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
- ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
- ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
-
-static const struct asyc_intr_info ainfo[] = {
- BUILD_SISL_ASTATUS_FC_PORT(1),
- BUILD_SISL_ASTATUS_FC_PORT(0),
- BUILD_SISL_ASTATUS_FC_PORT(3),
- BUILD_SISL_ASTATUS_FC_PORT(2)
-};
-
-/**
- * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the AFU.
- *
- * Return: Always return IRQ_HANDLED.
- */
-static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
-{
- struct hwq *hwq = (struct hwq *)data;
- struct afu *afu = hwq->afu;
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- const struct asyc_intr_info *info;
- struct sisl_global_map __iomem *global = &afu->afu_map->global;
- __be64 __iomem *fc_port_regs;
- u64 reg_unmasked;
- u64 reg;
- u64 bit;
- u8 port;
-
- reg = readq_be(&global->regs.aintr_status);
- reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
-
- if (unlikely(reg_unmasked == 0)) {
- dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
- __func__, reg);
- goto out;
- }
-
- /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
- writeq_be(reg_unmasked, &global->regs.aintr_clear);
-
- /* Check each bit that is on */
- for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
- if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
- WARN_ON_ONCE(1);
- continue;
- }
-
- info = &ainfo[bit];
- if (unlikely(info->status != 1ULL << bit)) {
- WARN_ON_ONCE(1);
- continue;
- }
-
- port = info->port;
- fc_port_regs = get_fc_port_regs(cfg, port);
-
- dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
- __func__, port, info->desc,
- readq_be(&fc_port_regs[FC_STATUS / 8]));
-
- /*
- * Do link reset first, some OTHER errors will set FC_ERROR
- * again if cleared before or w/o a reset
- */
- if (info->action & LINK_RESET) {
- dev_err(dev, "%s: FC Port %d: resetting link\n",
- __func__, port);
- cfg->lr_state = LINK_RESET_REQUIRED;
- cfg->lr_port = port;
- schedule_work(&cfg->work_q);
- }
-
- if (info->action & CLR_FC_ERROR) {
- reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
-
- /*
- * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
- * should be the same and tracing one is sufficient.
- */
-
- dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
- __func__, port, reg);
-
- writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
- writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
- }
-
- if (info->action & SCAN_HOST) {
- atomic_inc(&cfg->scan_host_needed);
- schedule_work(&cfg->work_q);
- }
- }
-
-out:
- return IRQ_HANDLED;
-}
-
-/**
- * read_vpd() - obtains the WWPNs from VPD
- * @cfg: Internal structure associated with the host.
- * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
- *
- * Return: 0 on success, -errno on failure
- */
-static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
-{
- struct device *dev = &cfg->dev->dev;
- struct pci_dev *pdev = cfg->dev;
- int i, k, rc = 0;
- unsigned int kw_size;
- ssize_t vpd_size;
- char vpd_data[CXLFLASH_VPD_LEN];
- char tmp_buf[WWPN_BUF_LEN] = { 0 };
- const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
- cfg->dev_id->driver_data;
- const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
- const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
-
- /* Get the VPD data from the device */
- vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
- if (unlikely(vpd_size <= 0)) {
- dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
- __func__, vpd_size);
- rc = -ENODEV;
- goto out;
- }
-
- /*
- * Find the offset of the WWPN tag within the read only
- * VPD data and validate the found field (partials are
- * no good to us). Convert the ASCII data to an integer
- * value. Note that we must copy to a temporary buffer
- * because the conversion service requires that the ASCII
- * string be terminated.
- *
- * Allow for WWPN not being found for all devices, setting
- * the returned WWPN to zero when not found. Notify with a
- * log error for cards that should have had WWPN keywords
- * in the VPD - cards requiring WWPN will not have their
- * ports programmed and operate in an undefined state.
- */
- for (k = 0; k < cfg->num_fc_ports; k++) {
- i = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
- wwpn_vpd_tags[k], &kw_size);
- if (i == -ENOENT) {
- if (wwpn_vpd_required)
- dev_err(dev, "%s: Port %d WWPN not found\n",
- __func__, k);
- wwpn[k] = 0ULL;
- continue;
- }
-
- if (i < 0 || kw_size != WWPN_LEN) {
- dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
- __func__, k);
- rc = -ENODEV;
- goto out;
- }
-
- memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
- rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
- if (unlikely(rc)) {
- dev_err(dev, "%s: WWPN conversion failed for port %d\n",
- __func__, k);
- rc = -ENODEV;
- goto out;
- }
-
- dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
- }
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * init_pcr() - initialize the provisioning and control registers
- * @cfg: Internal structure associated with the host.
- *
- * Also sets up fast access to the mapped registers and initializes AFU
- * command fields that never change.
- */
-static void init_pcr(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct sisl_ctrl_map __iomem *ctrl_map;
- struct hwq *hwq;
- void *cookie;
- int i;
-
- for (i = 0; i < MAX_CONTEXT; i++) {
- ctrl_map = &afu->afu_map->ctrls[i].ctrl;
- /* Disrupt any clients that could be running */
- /* e.g. clients that survived a master restart */
- writeq_be(0, &ctrl_map->rht_start);
- writeq_be(0, &ctrl_map->rht_cnt_id);
- writeq_be(0, &ctrl_map->ctx_cap);
- }
-
- /* Copy frequently used fields into hwq */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
- cookie = hwq->ctx_cookie;
-
- hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
- hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
- hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
-
- /* Program the Endian Control for the master context */
- writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
- }
-}
-
-/**
- * init_global() - initialize AFU global registers
- * @cfg: Internal structure associated with the host.
- */
-static int init_global(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- struct sisl_host_map __iomem *hmap;
- __be64 __iomem *fc_port_regs;
- u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
- int i = 0, num_ports = 0;
- int rc = 0;
- int j;
- void *ctx;
- u64 reg;
-
- rc = read_vpd(cfg, &wwpn[0]);
- if (rc) {
- dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
- goto out;
- }
-
- /* Set up RRQ and SQ in HWQ for master issued cmds */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
- hmap = hwq->host_map;
-
- writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
- writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
- hwq->hrrq_online = true;
-
- if (afu_is_sq_cmd_mode(afu)) {
- writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
- writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
- }
- }
-
- /* AFU configuration */
- reg = readq_be(&afu->afu_map->global.regs.afu_config);
- reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
- /* enable all auto retry options and control endianness */
- /* leave others at default: */
- /* CTX_CAP write protected, mbox_r does not clear on read and */
- /* checker on if dual afu */
- writeq_be(reg, &afu->afu_map->global.regs.afu_config);
-
- /* Global port select: select either port */
- if (afu->internal_lun) {
- /* Only use port 0 */
- writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
- num_ports = 0;
- } else {
- writeq_be(PORT_MASK(cfg->num_fc_ports),
- &afu->afu_map->global.regs.afu_port_sel);
- num_ports = cfg->num_fc_ports;
- }
-
- for (i = 0; i < num_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
-
- /* Unmask all errors (but they are still masked at AFU) */
- writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
- /* Clear CRC error cnt & set a threshold */
- (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
- writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
-
- /* Set WWPNs. If already programmed, wwpn[i] is 0 */
- if (wwpn[i] != 0)
- afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
- /* Programming WWPN back to back causes additional
- * offline/online transitions and a PLOGI
- */
- msleep(100);
- }
-
- if (afu_is_ocxl_lisn(afu)) {
- /* Set up the LISN effective address for each master */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
- ctx = hwq->ctx_cookie;
-
- for (j = 0; j < hwq->num_irqs; j++) {
- reg = cfg->ops->get_irq_objhndl(ctx, j);
- writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
- }
-
- reg = hwq->ctx_hndl;
- writeq_be(SISL_LISN_PASID(reg, reg),
- &hwq->ctrl_map->lisn_pasid[0]);
- writeq_be(SISL_LISN_PASID(0UL, reg),
- &hwq->ctrl_map->lisn_pasid[1]);
- }
- }
-
- /* Set up master's own CTX_CAP to allow real mode, host translation */
- /* tables, afu cmds and read/write GSCSI cmds. */
- /* First, unlock ctx_cap write by reading mbox */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
- writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
- SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
- SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
- &hwq->ctrl_map->ctx_cap);
- }
-
- /*
- * Determine write-same unmap support for host by evaluating the unmap
- * sector support bit of the context control register associated with
- * the primary hardware queue. Note that while this status is reflected
- * in a context register, the outcome can be assumed to be host-wide.
- */
- hwq = get_hwq(afu, PRIMARY_HWQ);
- reg = readq_be(&hwq->host_map->ctx_ctrl);
- if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
- cfg->ws_unmap = true;
-
- /* Initialize heartbeat */
- afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
-out:
- return rc;
-}
-
-/**
- * start_afu() - initializes and starts the AFU
- * @cfg: Internal structure associated with the host.
- */
-static int start_afu(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- int rc = 0;
- int i;
-
- init_pcr(cfg);
-
- /* Initialize each HWQ */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- /* After an AFU reset, RRQ entries are stale, clear them */
- memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
-
- /* Initialize RRQ pointers */
- hwq->hrrq_start = &hwq->rrq_entry[0];
- hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
- hwq->hrrq_curr = hwq->hrrq_start;
- hwq->toggle = 1;
-
- /* Initialize spin locks */
- spin_lock_init(&hwq->hrrq_slock);
- spin_lock_init(&hwq->hsq_slock);
-
- /* Initialize SQ */
- if (afu_is_sq_cmd_mode(afu)) {
- memset(&hwq->sq, 0, sizeof(hwq->sq));
- hwq->hsq_start = &hwq->sq[0];
- hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
- hwq->hsq_curr = hwq->hsq_start;
-
- atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
- }
-
- /* Initialize IRQ poll */
- if (afu_is_irqpoll_enabled(afu))
- irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
- cxlflash_irqpoll);
-
- }
-
- rc = init_global(cfg);
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * init_intr() - setup interrupt handlers for the master context
- * @cfg: Internal structure associated with the host.
- * @hwq: Hardware queue to initialize.
- *
- * Return: 0 on success, -errno on failure
- */
-static enum undo_level init_intr(struct cxlflash_cfg *cfg,
- struct hwq *hwq)
-{
- struct device *dev = &cfg->dev->dev;
- void *ctx = hwq->ctx_cookie;
- int rc = 0;
- enum undo_level level = UNDO_NOOP;
- bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
- int num_irqs = hwq->num_irqs;
-
- rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
- if (unlikely(rc)) {
- dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
- __func__, rc);
- level = UNDO_NOOP;
- goto out;
- }
-
- rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
- "SISL_MSI_SYNC_ERROR");
- if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
- level = FREE_IRQ;
- goto out;
- }
-
- rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
- "SISL_MSI_RRQ_UPDATED");
- if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
- level = UNMAP_ONE;
- goto out;
- }
-
- /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
- if (!is_primary_hwq)
- goto out;
-
- rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
- "SISL_MSI_ASYNC_ERROR");
- if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
- level = UNMAP_TWO;
- goto out;
- }
-out:
- return level;
-}
-
-/**
- * init_mc() - create and register as the master context
- * @cfg: Internal structure associated with the host.
- * @index: HWQ Index of the master context.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_mc(struct cxlflash_cfg *cfg, u32 index)
-{
- void *ctx;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(cfg->afu, index);
- int rc = 0;
- int num_irqs;
- enum undo_level level;
-
- hwq->afu = cfg->afu;
- hwq->index = index;
- INIT_LIST_HEAD(&hwq->pending_cmds);
-
- if (index == PRIMARY_HWQ) {
- ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
- num_irqs = 3;
- } else {
- ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
- num_irqs = 2;
- }
- if (IS_ERR_OR_NULL(ctx)) {
- rc = -ENOMEM;
- goto err1;
- }
-
- WARN_ON(hwq->ctx_cookie);
- hwq->ctx_cookie = ctx;
- hwq->num_irqs = num_irqs;
-
- /* Set it up as a master with the CXL */
- cfg->ops->set_master(ctx);
-
- /* Reset AFU when initializing primary context */
- if (index == PRIMARY_HWQ) {
- rc = cfg->ops->afu_reset(ctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: AFU reset failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
- }
-
- level = init_intr(cfg, hwq);
- if (unlikely(level)) {
- dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
- goto err2;
- }
-
- /* Finally, activate the context by starting it */
- rc = cfg->ops->start_context(hwq->ctx_cookie);
- if (unlikely(rc)) {
- dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
- level = UNMAP_THREE;
- goto err2;
- }
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-err2:
- term_intr(cfg, level, index);
- if (index != PRIMARY_HWQ)
- cfg->ops->release_context(ctx);
-err1:
- hwq->ctx_cookie = NULL;
- goto out;
-}
-
-/**
- * get_num_afu_ports() - determines and configures the number of AFU ports
- * @cfg: Internal structure associated with the host.
- *
- * This routine determines the number of AFU ports by converting the global
- * port selection mask. The converted value is only valid following an AFU
- * reset (explicit or power-on). This routine must be invoked shortly after
- * mapping as other routines are dependent on the number of ports during the
- * initialization sequence.
- *
- * To support legacy AFUs that might not have reflected an initial global
- * port mask (value read is 0), default to the number of ports originally
- * supported by the cxlflash driver (2) before hardware with other port
- * offerings was introduced.
- */
-static void get_num_afu_ports(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- u64 port_mask;
- int num_fc_ports = LEGACY_FC_PORTS;
-
- port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
- if (port_mask != 0ULL)
- num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
-
- dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
- __func__, port_mask, num_fc_ports);
-
- cfg->num_fc_ports = num_fc_ports;
- cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
-}
-
-/**
- * init_afu() - setup as master context and start AFU
- * @cfg: Internal structure associated with the host.
- *
- * This routine is a higher level of control for configuring the
- * AFU on probe and reset paths.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_afu(struct cxlflash_cfg *cfg)
-{
- u64 reg;
- int rc = 0;
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- int i;
-
- cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
-
- mutex_init(&afu->sync_active);
- afu->num_hwqs = afu->desired_hwqs;
- for (i = 0; i < afu->num_hwqs; i++) {
- rc = init_mc(cfg, i);
- if (rc) {
- dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
- __func__, rc, i);
- goto err1;
- }
- }
-
- /* Map the entire MMIO space of the AFU using the first context */
- hwq = get_hwq(afu, PRIMARY_HWQ);
- afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
- if (!afu->afu_map) {
- dev_err(dev, "%s: psa_map failed\n", __func__);
- rc = -ENOMEM;
- goto err1;
- }
-
- /* No byte reverse on reading afu_version or string will be backwards */
- reg = readq(&afu->afu_map->global.regs.afu_version);
- memcpy(afu->version, &reg, sizeof(reg));
- afu->interface_version =
- readq_be(&afu->afu_map->global.regs.interface_version);
- if ((afu->interface_version + 1) == 0) {
- dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
- "interface version %016llx\n", afu->version,
- afu->interface_version);
- rc = -EINVAL;
- goto err1;
- }
-
- if (afu_is_sq_cmd_mode(afu)) {
- afu->send_cmd = send_cmd_sq;
- afu->context_reset = context_reset_sq;
- } else {
- afu->send_cmd = send_cmd_ioarrin;
- afu->context_reset = context_reset_ioarrin;
- }
-
- dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
- afu->version, afu->interface_version);
-
- get_num_afu_ports(cfg);
-
- rc = start_afu(cfg);
- if (rc) {
- dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
- goto err1;
- }
-
- afu_err_intr_init(cfg->afu);
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- hwq->room = readq_be(&hwq->host_map->cmd_room);
- }
-
- /* Restore the LUN mappings */
- cxlflash_restore_luntable(cfg);
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-
-err1:
- for (i = afu->num_hwqs - 1; i >= 0; i--) {
- term_intr(cfg, UNMAP_THREE, i);
- term_mc(cfg, i);
- }
- goto out;
-}
-
-/**
- * afu_reset() - resets the AFU
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_reset(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- /* Stop the context before the reset. Since the context is
- * no longer available restart it after the reset is complete
- */
- term_afu(cfg);
-
- rc = init_afu(cfg);
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * drain_ioctls() - wait until all currently executing ioctls have completed
- * @cfg: Internal structure associated with the host.
- *
- * Obtain write access to read/write semaphore that wraps ioctl
- * handling to 'drain' ioctls currently executing.
- */
-static void drain_ioctls(struct cxlflash_cfg *cfg)
-{
- down_write(&cfg->ioctl_rwsem);
- up_write(&cfg->ioctl_rwsem);
-}
-
-/**
- * cxlflash_async_reset_host() - asynchronous host reset handler
- * @data: Private data provided while scheduling reset.
- * @cookie: Cookie that can be used for checkpointing.
- */
-static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
-{
- struct cxlflash_cfg *cfg = data;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- if (cfg->state != STATE_RESET) {
- dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
- __func__, cfg->state);
- goto out;
- }
-
- drain_ioctls(cfg);
- cxlflash_mark_contexts_error(cfg);
- rc = afu_reset(cfg);
- if (rc)
- cfg->state = STATE_FAILTERM;
- else
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
-
-out:
- scsi_unblock_requests(cfg->host);
-}
-
-/**
- * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
-
- if (cfg->state != STATE_NORMAL) {
- dev_dbg(dev, "%s: Not performing reset state=%d\n",
- __func__, cfg->state);
- return;
- }
-
- cfg->state = STATE_RESET;
- scsi_block_requests(cfg->host);
- cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
- cfg);
-}
-
-/**
- * send_afu_cmd() - builds and sends an internal AFU command
- * @afu: AFU associated with the host.
- * @rcb: Pre-populated IOARCB describing command to send.
- *
- * The AFU can only take one internal AFU command at a time. This limitation is
- * enforced by using a mutex to provide exclusive access to the AFU during the
- * operation. This design point requires calling threads to not be on interrupt
- * context due to the possibility of sleeping during concurrent AFU operations.
- *
- * The command status is optionally passed back to the caller when the caller
- * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
- *
- * Return:
- * 0 on success, -errno on failure
- */
-static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd = NULL;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- ulong lock_flags;
- char *buf = NULL;
- int rc = 0;
- int nretry = 0;
-
- if (cfg->state != STATE_NORMAL) {
- dev_dbg(dev, "%s: Sync not required state=%u\n",
- __func__, cfg->state);
- return 0;
- }
-
- mutex_lock(&afu->sync_active);
- atomic_inc(&afu->cmds_active);
- buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
- if (unlikely(!buf)) {
- dev_err(dev, "%s: no memory for command\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
-
-retry:
- memset(cmd, 0, sizeof(*cmd));
- memcpy(&cmd->rcb, rcb, sizeof(*rcb));
- INIT_LIST_HEAD(&cmd->queue);
- init_completion(&cmd->cevent);
- cmd->parent = afu;
- cmd->hwq_index = hwq->index;
- cmd->rcb.ctx_id = hwq->ctx_hndl;
-
- dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
- __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
-
- rc = afu->send_cmd(afu, cmd);
- if (unlikely(rc)) {
- rc = -ENOBUFS;
- goto out;
- }
-
- rc = wait_resp(afu, cmd);
- switch (rc) {
- case -ETIMEDOUT:
- rc = afu->context_reset(hwq);
- if (rc) {
- /* Delete the command from pending_cmds list */
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- list_del(&cmd->list);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-
- cxlflash_schedule_async_reset(cfg);
- break;
- }
- fallthrough; /* to retry */
- case -EAGAIN:
- if (++nretry < 2)
- goto retry;
- fallthrough; /* to exit */
- default:
- break;
- }
-
- if (rcb->ioasa)
- *rcb->ioasa = cmd->sa;
-out:
- atomic_dec(&afu->cmds_active);
- mutex_unlock(&afu->sync_active);
- kfree(buf);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_afu_sync() - builds and sends an AFU sync command
- * @afu: AFU associated with the host.
- * @ctx: Identifies context requesting sync.
- * @res: Identifies resource requesting sync.
- * @mode: Type of sync to issue (lightweight, heavyweight, global).
- *
- * AFU sync operations are only necessary and allowed when the device is
- * operating normally. When not operating normally, sync requests can occur as
- * part of cleaning up resources associated with an adapter prior to removal.
- * In this scenario, these requests are simply ignored (safe due to the AFU
- * going away).
- *
- * Return:
- * 0 on success, -errno on failure
- */
-int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb rcb = { 0 };
-
- dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
- __func__, afu, ctx, res, mode);
-
- rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
- rcb.msi = SISL_MSI_RRQ_UPDATED;
- rcb.timeout = MC_AFU_SYNC_TIMEOUT;
-
- rcb.cdb[0] = SISL_AFU_CMD_SYNC;
- rcb.cdb[1] = mode;
- put_unaligned_be16(ctx, &rcb.cdb[2]);
- put_unaligned_be32(res, &rcb.cdb[4]);
-
- return send_afu_cmd(afu, &rcb);
-}
-
-/**
- * cxlflash_eh_abort_handler() - abort a SCSI command
- * @scp: SCSI command to abort.
- *
- * CXL Flash devices do not support a single command abort. Reset the context
- * as per SISLite specification. Flush any pending commands in the hardware
- * queue before the reset.
- *
- * Return: SUCCESS/FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
-{
- int rc = FAILED;
- struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct afu_cmd *cmd = sc_to_afuc(scp);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
-
- dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
- scp->device->channel, scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
-
- /* When the state is not normal, another reset/reload is in progress.
- * Return failed and the mid-layer will invoke host reset handler.
- */
- if (cfg->state != STATE_NORMAL) {
- dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
- __func__, cfg->state);
- goto out;
- }
-
- rc = afu->context_reset(hwq);
- if (unlikely(rc))
- goto out;
-
- rc = SUCCESS;
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_eh_device_reset_handler() - reset a single LUN
- * @scp: SCSI command to send.
- *
- * Return:
- * SUCCESS as defined in scsi/scsi.h
- * FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
-{
- int rc = SUCCESS;
- struct scsi_device *sdev = scp->device;
- struct Scsi_Host *host = sdev->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct device *dev = &cfg->dev->dev;
- int rcr = 0;
-
- dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
- host->host_no, sdev->channel, sdev->id, sdev->lun);
-retry:
- switch (cfg->state) {
- case STATE_NORMAL:
- rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
- if (unlikely(rcr))
- rc = FAILED;
- break;
- case STATE_RESET:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
- goto retry;
- default:
- rc = FAILED;
- break;
- }
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_eh_host_reset_handler() - reset the host adapter
- * @scp: SCSI command from stack identifying host.
- *
- * Following a reset, the state is evaluated again in case an EEH occurred
- * during the reset. In such a scenario, the host reset will either yield
- * until the EEH recovery is complete or return success or failure based
- * upon the current device state.
- *
- * Return:
- * SUCCESS as defined in scsi/scsi.h
- * FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
-{
- int rc = SUCCESS;
- int rcr = 0;
- struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
-
- switch (cfg->state) {
- case STATE_NORMAL:
- cfg->state = STATE_RESET;
- drain_ioctls(cfg);
- cxlflash_mark_contexts_error(cfg);
- rcr = afu_reset(cfg);
- if (rcr) {
- rc = FAILED;
- cfg->state = STATE_FAILTERM;
- } else
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
- ssleep(1);
- fallthrough;
- case STATE_RESET:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
- if (cfg->state == STATE_NORMAL)
- break;
- fallthrough;
- default:
- rc = FAILED;
- break;
- }
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_change_queue_depth() - change the queue depth for the device
- * @sdev: SCSI device destined for queue depth change.
- * @qdepth: Requested queue depth value to set.
- *
- * The requested queue depth is capped to the maximum supported value.
- *
- * Return: The actual queue depth set.
- */
-static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
-{
-
- if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
- qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
-
- scsi_change_queue_depth(sdev, qdepth);
- return sdev->queue_depth;
-}
-
-/**
- * cxlflash_show_port_status() - queries and presents the current port status
- * @port: Desired port for status reporting.
- * @cfg: Internal structure associated with the host.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf or -EINVAL.
- */
-static ssize_t cxlflash_show_port_status(u32 port,
- struct cxlflash_cfg *cfg,
- char *buf)
-{
- struct device *dev = &cfg->dev->dev;
- char *disp_status;
- u64 status;
- __be64 __iomem *fc_port_regs;
-
- WARN_ON(port >= MAX_FC_PORTS);
-
- if (port >= cfg->num_fc_ports) {
- dev_info(dev, "%s: Port %d not supported on this card.\n",
- __func__, port);
- return -EINVAL;
- }
-
- fc_port_regs = get_fc_port_regs(cfg, port);
- status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
- status &= FC_MTIP_STATUS_MASK;
-
- if (status == FC_MTIP_STATUS_ONLINE)
- disp_status = "online";
- else if (status == FC_MTIP_STATUS_OFFLINE)
- disp_status = "offline";
- else
- disp_status = "unknown";
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
-}
-
-/**
- * port0_show() - queries and presents the current status of port 0
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port0_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(0, cfg, buf);
-}
-
-/**
- * port1_show() - queries and presents the current status of port 1
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port1_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(1, cfg, buf);
-}
-
-/**
- * port2_show() - queries and presents the current status of port 2
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port2_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(2, cfg, buf);
-}
-
-/**
- * port3_show() - queries and presents the current status of port 3
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port3_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(3, cfg, buf);
-}
-
-/**
- * lun_mode_show() - presents the current LUN mode of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the LUN mode.
- * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t lun_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
-}
-
-/**
- * lun_mode_store() - sets the LUN mode of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the LUN mode.
- * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
- * @count: Length of data resizing in @buf.
- *
- * The CXL Flash AFU supports a dummy LUN mode where the external
- * links and storage are not required. Space on the FPGA is used
- * to create 1 or 2 small LUNs which are presented to the system
- * as if they were a normal storage device. This feature is useful
- * during development and also provides manufacturing with a way
- * to test the AFU without an actual device.
- *
- * 0 = external LUN[s] (default)
- * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
- * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
- * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
- * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t lun_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = shost_priv(shost);
- struct afu *afu = cfg->afu;
- int rc;
- u32 lun_mode;
-
- rc = kstrtouint(buf, 10, &lun_mode);
- if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
- afu->internal_lun = lun_mode;
-
- /*
- * When configured for internal LUN, there is only one channel,
- * channel number 0, else there will be one less than the number
- * of fc ports for this card.
- */
- if (afu->internal_lun)
- shost->max_channel = 0;
- else
- shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
-
- afu_reset(cfg);
- scsi_scan_host(cfg->host);
- }
-
- return count;
-}
-
-/**
- * ioctl_version_show() - presents the current ioctl version of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the ioctl version.
- * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t ioctl_version_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- ssize_t bytes = 0;
-
- bytes = scnprintf(buf, PAGE_SIZE,
- "disk: %u\n", DK_CXLFLASH_VERSION_0);
- bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
- "host: %u\n", HT_CXLFLASH_VERSION_0);
-
- return bytes;
-}
-
-/**
- * cxlflash_show_port_lun_table() - queries and presents the port LUN table
- * @port: Desired port for status reporting.
- * @cfg: Internal structure associated with the host.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf or -EINVAL.
- */
-static ssize_t cxlflash_show_port_lun_table(u32 port,
- struct cxlflash_cfg *cfg,
- char *buf)
-{
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_luns;
- int i;
- ssize_t bytes = 0;
-
- WARN_ON(port >= MAX_FC_PORTS);
-
- if (port >= cfg->num_fc_ports) {
- dev_info(dev, "%s: Port %d not supported on this card.\n",
- __func__, port);
- return -EINVAL;
- }
-
- fc_port_luns = get_fc_port_luns(cfg, port);
-
- for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
- bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
- "%03d: %016llx\n",
- i, readq_be(&fc_port_luns[i]));
- return bytes;
-}
-
-/**
- * port0_lun_table_show() - presents the current LUN table of port 0
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port0_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(0, cfg, buf);
-}
-
-/**
- * port1_lun_table_show() - presents the current LUN table of port 1
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port1_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(1, cfg, buf);
-}
-
-/**
- * port2_lun_table_show() - presents the current LUN table of port 2
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port2_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(2, cfg, buf);
-}
-
-/**
- * port3_lun_table_show() - presents the current LUN table of port 3
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port3_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(3, cfg, buf);
-}
-
-/**
- * irqpoll_weight_show() - presents the current IRQ poll weight for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the IRQ poll weight.
- * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
- * weight in ASCII.
- *
- * An IRQ poll weight of 0 indicates polling is disabled.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t irqpoll_weight_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
-}
-
-/**
- * irqpoll_weight_store() - sets the current IRQ poll weight for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the IRQ poll weight.
- * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
- * weight in ASCII.
- * @count: Length of data resizing in @buf.
- *
- * An IRQ poll weight of 0 indicates polling is disabled.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t irqpoll_weight_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct device *cfgdev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct hwq *hwq;
- u32 weight;
- int rc, i;
-
- rc = kstrtouint(buf, 10, &weight);
- if (rc)
- return -EINVAL;
-
- if (weight > 256) {
- dev_info(cfgdev,
- "Invalid IRQ poll weight. It must be 256 or less.\n");
- return -EINVAL;
- }
-
- if (weight == afu->irqpoll_weight) {
- dev_info(cfgdev,
- "Current IRQ poll weight has the same weight.\n");
- return -EINVAL;
- }
-
- if (afu_is_irqpoll_enabled(afu)) {
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- irq_poll_disable(&hwq->irqpoll);
- }
- }
-
- afu->irqpoll_weight = weight;
-
- if (weight > 0) {
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
- }
- }
-
- return count;
-}
-
-/**
- * num_hwqs_show() - presents the number of hardware queues for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the number of hardware queues.
- * @buf: Buffer of length PAGE_SIZE to report back the number of hardware
- * queues in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t num_hwqs_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
-}
-
-/**
- * num_hwqs_store() - sets the number of hardware queues for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the number of hardware queues.
- * @buf: Buffer of length PAGE_SIZE containing the number of hardware
- * queues in ASCII.
- * @count: Length of data resizing in @buf.
- *
- * n > 0: num_hwqs = n
- * n = 0: num_hwqs = num_online_cpus()
- * n < 0: num_online_cpus() / abs(n)
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t num_hwqs_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
- int rc;
- int nhwqs, num_hwqs;
-
- rc = kstrtoint(buf, 10, &nhwqs);
- if (rc)
- return -EINVAL;
-
- if (nhwqs >= 1)
- num_hwqs = nhwqs;
- else if (nhwqs == 0)
- num_hwqs = num_online_cpus();
- else
- num_hwqs = num_online_cpus() / abs(nhwqs);
-
- afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
- WARN_ON_ONCE(afu->desired_hwqs == 0);
-
-retry:
- switch (cfg->state) {
- case STATE_NORMAL:
- cfg->state = STATE_RESET;
- drain_ioctls(cfg);
- cxlflash_mark_contexts_error(cfg);
- rc = afu_reset(cfg);
- if (rc)
- cfg->state = STATE_FAILTERM;
- else
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
- break;
- case STATE_RESET:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
- if (cfg->state == STATE_NORMAL)
- goto retry;
- fallthrough;
- default:
- /* Ideally should not happen */
- dev_err(dev, "%s: Device is not ready, state=%d\n",
- __func__, cfg->state);
- break;
- }
-
- return count;
-}
-
-static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
-
-/**
- * hwq_mode_show() - presents the HWQ steering mode for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the HWQ steering mode.
- * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode
- * as a character string.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t hwq_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
-}
-
-/**
- * hwq_mode_store() - sets the HWQ steering mode for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the HWQ steering mode.
- * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode
- * as a character string.
- * @count: Length of data resizing in @buf.
- *
- * rr = Round-Robin
- * tag = Block MQ Tagging
- * cpu = CPU Affinity
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t hwq_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = shost_priv(shost);
- struct device *cfgdev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- int i;
- u32 mode = MAX_HWQ_MODE;
-
- for (i = 0; i < MAX_HWQ_MODE; i++) {
- if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
- mode = i;
- break;
- }
- }
-
- if (mode >= MAX_HWQ_MODE) {
- dev_info(cfgdev, "Invalid HWQ steering mode.\n");
- return -EINVAL;
- }
-
- afu->hwq_mode = mode;
-
- return count;
-}
-
-/**
- * mode_show() - presents the current mode of the device
- * @dev: Generic device associated with the device.
- * @attr: Device attribute representing the device mode.
- * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n",
- sdev->hostdata ? "superpipe" : "legacy");
-}
-
-/*
- * Host attributes
- */
-static DEVICE_ATTR_RO(port0);
-static DEVICE_ATTR_RO(port1);
-static DEVICE_ATTR_RO(port2);
-static DEVICE_ATTR_RO(port3);
-static DEVICE_ATTR_RW(lun_mode);
-static DEVICE_ATTR_RO(ioctl_version);
-static DEVICE_ATTR_RO(port0_lun_table);
-static DEVICE_ATTR_RO(port1_lun_table);
-static DEVICE_ATTR_RO(port2_lun_table);
-static DEVICE_ATTR_RO(port3_lun_table);
-static DEVICE_ATTR_RW(irqpoll_weight);
-static DEVICE_ATTR_RW(num_hwqs);
-static DEVICE_ATTR_RW(hwq_mode);
-
-static struct attribute *cxlflash_host_attrs[] = {
- &dev_attr_port0.attr,
- &dev_attr_port1.attr,
- &dev_attr_port2.attr,
- &dev_attr_port3.attr,
- &dev_attr_lun_mode.attr,
- &dev_attr_ioctl_version.attr,
- &dev_attr_port0_lun_table.attr,
- &dev_attr_port1_lun_table.attr,
- &dev_attr_port2_lun_table.attr,
- &dev_attr_port3_lun_table.attr,
- &dev_attr_irqpoll_weight.attr,
- &dev_attr_num_hwqs.attr,
- &dev_attr_hwq_mode.attr,
- NULL
-};
-
-ATTRIBUTE_GROUPS(cxlflash_host);
-
-/*
- * Device attributes
- */
-static DEVICE_ATTR_RO(mode);
-
-static struct attribute *cxlflash_dev_attrs[] = {
- &dev_attr_mode.attr,
- NULL
-};
-
-ATTRIBUTE_GROUPS(cxlflash_dev);
-
-/*
- * Host template
- */
-static struct scsi_host_template driver_template = {
- .module = THIS_MODULE,
- .name = CXLFLASH_ADAPTER_NAME,
- .info = cxlflash_driver_info,
- .ioctl = cxlflash_ioctl,
- .proc_name = CXLFLASH_NAME,
- .queuecommand = cxlflash_queuecommand,
- .eh_abort_handler = cxlflash_eh_abort_handler,
- .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
- .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
- .change_queue_depth = cxlflash_change_queue_depth,
- .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
- .can_queue = CXLFLASH_MAX_CMDS,
- .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
- .this_id = -1,
- .sg_tablesize = 1, /* No scatter gather support */
- .max_sectors = CXLFLASH_MAX_SECTORS,
- .shost_groups = cxlflash_host_groups,
- .sdev_groups = cxlflash_dev_groups,
-};
-
-/*
- * Device dependent values
- */
-static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
- CXLFLASH_WWPN_VPD_REQUIRED };
-static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
- CXLFLASH_NOTIFY_SHUTDOWN };
-static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
- (CXLFLASH_NOTIFY_SHUTDOWN |
- CXLFLASH_OCXL_DEV) };
-
-/*
- * PCI device binding table
- */
-static const struct pci_device_id cxlflash_pci_table[] = {
- {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
- {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
- {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
- {}
-};
-
-MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
-
-/**
- * cxlflash_worker_thread() - work thread handler for the AFU
- * @work: Work structure contained within cxlflash associated with host.
- *
- * Handles the following events:
- * - Link reset which cannot be performed on interrupt context due to
- * blocking up to a few seconds
- * - Rescan the host
- */
-static void cxlflash_worker_thread(struct work_struct *work)
-{
- struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
- work_q);
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_regs;
- int port;
- ulong lock_flags;
-
- /* Avoid MMIO if the device has failed */
-
- if (cfg->state != STATE_NORMAL)
- return;
-
- spin_lock_irqsave(cfg->host->host_lock, lock_flags);
-
- if (cfg->lr_state == LINK_RESET_REQUIRED) {
- port = cfg->lr_port;
- if (port < 0)
- dev_err(dev, "%s: invalid port index %d\n",
- __func__, port);
- else {
- spin_unlock_irqrestore(cfg->host->host_lock,
- lock_flags);
-
- /* The reset can block... */
- fc_port_regs = get_fc_port_regs(cfg, port);
- afu_link_reset(afu, port, fc_port_regs);
- spin_lock_irqsave(cfg->host->host_lock, lock_flags);
- }
-
- cfg->lr_state = LINK_RESET_COMPLETE;
- }
-
- spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
-
- if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
- scsi_scan_host(cfg->host);
-}
-
-/**
- * cxlflash_chr_open() - character device open handler
- * @inode: Device inode associated with this character device.
- * @file: File pointer for this device.
- *
- * Only users with admin privileges are allowed to open the character device.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_chr_open(struct inode *inode, struct file *file)
-{
- struct cxlflash_cfg *cfg;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
- file->private_data = cfg;
-
- return 0;
-}
-
-/**
- * decode_hioctl() - translates encoded host ioctl to easily identifiable string
- * @cmd: The host ioctl command to decode.
- *
- * Return: A string identifying the decoded host ioctl.
- */
-static char *decode_hioctl(unsigned int cmd)
-{
- switch (cmd) {
- case HT_CXLFLASH_LUN_PROVISION:
- return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
- }
-
- return "UNKNOWN";
-}
-
-/**
- * cxlflash_lun_provision() - host LUN provisioning handler
- * @cfg: Internal structure associated with the host.
- * @arg: Kernel copy of userspace ioctl data structure.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_lun_provision(struct cxlflash_cfg *cfg, void *arg)
-{
- struct ht_cxlflash_lun_provision *lunprov = arg;
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb rcb;
- struct sisl_ioasa asa;
- __be64 __iomem *fc_port_regs;
- u16 port = lunprov->port;
- u16 scmd = lunprov->hdr.subcmd;
- u16 type;
- u64 reg;
- u64 size;
- u64 lun_id;
- int rc = 0;
-
- if (!afu_is_lun_provision(afu)) {
- rc = -ENOTSUPP;
- goto out;
- }
-
- if (port >= cfg->num_fc_ports) {
- rc = -EINVAL;
- goto out;
- }
-
- switch (scmd) {
- case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
- type = SISL_AFU_LUN_PROVISION_CREATE;
- size = lunprov->size;
- lun_id = 0;
- break;
- case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
- type = SISL_AFU_LUN_PROVISION_DELETE;
- size = 0;
- lun_id = lunprov->lun_id;
- break;
- case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
- fc_port_regs = get_fc_port_regs(cfg, port);
-
- reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
- lunprov->max_num_luns = reg;
- reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
- lunprov->cur_num_luns = reg;
- reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
- lunprov->max_cap_port = reg;
- reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
- lunprov->cur_cap_port = reg;
-
- goto out;
- default:
- rc = -EINVAL;
- goto out;
- }
-
- memset(&rcb, 0, sizeof(rcb));
- memset(&asa, 0, sizeof(asa));
- rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
- rcb.lun_id = lun_id;
- rcb.msi = SISL_MSI_RRQ_UPDATED;
- rcb.timeout = MC_LUN_PROV_TIMEOUT;
- rcb.ioasa = &asa;
-
- rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
- rcb.cdb[1] = type;
- rcb.cdb[2] = port;
- put_unaligned_be64(size, &rcb.cdb[8]);
-
- rc = send_afu_cmd(afu, &rcb);
- if (rc) {
- dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
- __func__, rc, asa.ioasc, asa.afu_extra);
- goto out;
- }
-
- if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
- lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
- memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
- }
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_afu_debug() - host AFU debug handler
- * @cfg: Internal structure associated with the host.
- * @arg: Kernel copy of userspace ioctl data structure.
- *
- * For debug requests requiring a data buffer, always provide an aligned
- * (cache line) buffer to the AFU to appease any alignment requirements.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, void *arg)
-{
- struct ht_cxlflash_afu_debug *afu_dbg = arg;
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb rcb;
- struct sisl_ioasa asa;
- char *buf = NULL;
- char *kbuf = NULL;
- void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
- u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
- u32 ulen = afu_dbg->data_len;
- bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
- int rc = 0;
-
- if (!afu_is_afu_debug(afu)) {
- rc = -ENOTSUPP;
- goto out;
- }
-
- if (ulen) {
- req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
-
- if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
- rc = -EINVAL;
- goto out;
- }
-
- buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
- if (unlikely(!buf)) {
- rc = -ENOMEM;
- goto out;
- }
-
- kbuf = PTR_ALIGN(buf, cache_line_size());
-
- if (is_write) {
- req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
-
- if (copy_from_user(kbuf, ubuf, ulen)) {
- rc = -EFAULT;
- goto out;
- }
- }
- }
-
- memset(&rcb, 0, sizeof(rcb));
- memset(&asa, 0, sizeof(asa));
-
- rcb.req_flags = req_flags;
- rcb.msi = SISL_MSI_RRQ_UPDATED;
- rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
- rcb.ioasa = &asa;
-
- if (ulen) {
- rcb.data_len = ulen;
- rcb.data_ea = (uintptr_t)kbuf;
- }
-
- rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
- memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
- HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
-
- rc = send_afu_cmd(afu, &rcb);
- if (rc) {
- dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
- __func__, rc, asa.ioasc, asa.afu_extra);
- goto out;
- }
-
- if (ulen && !is_write) {
- if (copy_to_user(ubuf, kbuf, ulen))
- rc = -EFAULT;
- }
-out:
- kfree(buf);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_chr_ioctl() - character device IOCTL handler
- * @file: File pointer for this device.
- * @cmd: IOCTL command.
- * @arg: Userspace ioctl data structure.
- *
- * A read/write semaphore is used to implement a 'drain' of currently
- * running ioctls. The read semaphore is taken at the beginning of each
- * ioctl thread and released upon concluding execution. Additionally the
- * semaphore should be released and then reacquired in any ioctl execution
- * path which will wait for an event to occur that is outside the scope of
- * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
- * a thread simply needs to acquire the write semaphore.
- *
- * Return: 0 on success, -errno on failure
- */
-static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- typedef int (*hioctl) (struct cxlflash_cfg *, void *);
-
- struct cxlflash_cfg *cfg = file->private_data;
- struct device *dev = &cfg->dev->dev;
- char buf[sizeof(union cxlflash_ht_ioctls)];
- void __user *uarg = (void __user *)arg;
- struct ht_cxlflash_hdr *hdr;
- size_t size = 0;
- bool known_ioctl = false;
- int idx = 0;
- int rc = 0;
- hioctl do_ioctl = NULL;
-
- static const struct {
- size_t size;
- hioctl ioctl;
- } ioctl_tbl[] = { /* NOTE: order matters here */
- { sizeof(struct ht_cxlflash_lun_provision), cxlflash_lun_provision },
- { sizeof(struct ht_cxlflash_afu_debug), cxlflash_afu_debug },
- };
-
- /* Hold read semaphore so we can drain if needed */
- down_read(&cfg->ioctl_rwsem);
-
- dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
- __func__, cmd, idx, sizeof(ioctl_tbl));
-
- switch (cmd) {
- case HT_CXLFLASH_LUN_PROVISION:
- case HT_CXLFLASH_AFU_DEBUG:
- known_ioctl = true;
- idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
- size = ioctl_tbl[idx].size;
- do_ioctl = ioctl_tbl[idx].ioctl;
-
- if (likely(do_ioctl))
- break;
-
- fallthrough;
- default:
- rc = -EINVAL;
- goto out;
- }
-
- if (unlikely(copy_from_user(&buf, uarg, size))) {
- dev_err(dev, "%s: copy_from_user() fail "
- "size=%lu cmd=%d (%s) uarg=%p\n",
- __func__, size, cmd, decode_hioctl(cmd), uarg);
- rc = -EFAULT;
- goto out;
- }
-
- hdr = (struct ht_cxlflash_hdr *)&buf;
- if (hdr->version != HT_CXLFLASH_VERSION_0) {
- dev_dbg(dev, "%s: Version %u not supported for %s\n",
- __func__, hdr->version, decode_hioctl(cmd));
- rc = -EINVAL;
- goto out;
- }
-
- if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
- dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
- rc = -EINVAL;
- goto out;
- }
-
- rc = do_ioctl(cfg, (void *)&buf);
- if (likely(!rc))
- if (unlikely(copy_to_user(uarg, &buf, size))) {
- dev_err(dev, "%s: copy_to_user() fail "
- "size=%lu cmd=%d (%s) uarg=%p\n",
- __func__, size, cmd, decode_hioctl(cmd), uarg);
- rc = -EFAULT;
- }
-
- /* fall through to exit */
-
-out:
- up_read(&cfg->ioctl_rwsem);
- if (unlikely(rc && known_ioctl))
- dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
- __func__, decode_hioctl(cmd), cmd, rc);
- else
- dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
- __func__, decode_hioctl(cmd), cmd, rc);
- return rc;
-}
-
-/*
- * Character device file operations
- */
-static const struct file_operations cxlflash_chr_fops = {
- .owner = THIS_MODULE,
- .open = cxlflash_chr_open,
- .unlocked_ioctl = cxlflash_chr_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
-};
-
-/**
- * init_chrdev() - initialize the character device for the host
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_chrdev(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- struct device *char_dev;
- dev_t devno;
- int minor;
- int rc = 0;
-
- minor = cxlflash_get_minor();
- if (unlikely(minor < 0)) {
- dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
- rc = -ENOSPC;
- goto out;
- }
-
- devno = MKDEV(cxlflash_major, minor);
- cdev_init(&cfg->cdev, &cxlflash_chr_fops);
-
- rc = cdev_add(&cfg->cdev, devno, 1);
- if (rc) {
- dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
- goto err1;
- }
-
- char_dev = device_create(&cxlflash_class, NULL, devno,
- NULL, "cxlflash%d", minor);
- if (IS_ERR(char_dev)) {
- rc = PTR_ERR(char_dev);
- dev_err(dev, "%s: device_create failed rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- cfg->chardev = char_dev;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-err2:
- cdev_del(&cfg->cdev);
-err1:
- cxlflash_put_minor(minor);
- goto out;
-}
-
-/**
- * cxlflash_probe() - PCI entry point to add host
- * @pdev: PCI device associated with the host.
- * @dev_id: PCI device id associated with device.
- *
- * The device will initially start out in a 'probing' state and
- * transition to the 'normal' state at the end of a successful
- * probe. Should an EEH event occur during probe, the notification
- * thread (error_detected()) will wait until the probe handler
- * is nearly complete. At that time, the device will be moved to
- * a 'probed' state and the EEH thread woken up to drive the slot
- * reset and recovery (device moves to 'normal' state). Meanwhile,
- * the probe will be allowed to exit successfully.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_probe(struct pci_dev *pdev,
- const struct pci_device_id *dev_id)
-{
- struct Scsi_Host *host;
- struct cxlflash_cfg *cfg = NULL;
- struct device *dev = &pdev->dev;
- struct dev_dependent_vals *ddv;
- int rc = 0;
- int k;
-
- dev_err_once(&pdev->dev, "DEPRECATION: cxlflash is deprecated and will be removed in a future kernel release\n");
-
- dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
- __func__, pdev->irq);
-
- ddv = (struct dev_dependent_vals *)dev_id->driver_data;
- driver_template.max_sectors = ddv->max_sectors;
-
- host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
- if (!host) {
- dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
- host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
- host->unique_id = host->host_no;
- host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
-
- cfg = shost_priv(host);
- cfg->state = STATE_PROBING;
- cfg->host = host;
- rc = alloc_mem(cfg);
- if (rc) {
- dev_err(dev, "%s: alloc_mem failed\n", __func__);
- rc = -ENOMEM;
- scsi_host_put(cfg->host);
- goto out;
- }
-
- cfg->init_state = INIT_STATE_NONE;
- cfg->dev = pdev;
- cfg->cxl_fops = cxlflash_cxl_fops;
- cfg->ops = cxlflash_assign_ops(ddv);
- WARN_ON_ONCE(!cfg->ops);
-
- /*
- * Promoted LUNs move to the top of the LUN table. The rest stay on
- * the bottom half. The bottom half grows from the end (index = 255),
- * whereas the top half grows from the beginning (index = 0).
- *
- * Initialize the last LUN index for all possible ports.
- */
- cfg->promote_lun_index = 0;
-
- for (k = 0; k < MAX_FC_PORTS; k++)
- cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
-
- cfg->dev_id = (struct pci_device_id *)dev_id;
-
- init_waitqueue_head(&cfg->tmf_waitq);
- init_waitqueue_head(&cfg->reset_waitq);
-
- INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
- cfg->lr_state = LINK_RESET_INVALID;
- cfg->lr_port = -1;
- spin_lock_init(&cfg->tmf_slock);
- mutex_init(&cfg->ctx_tbl_list_mutex);
- mutex_init(&cfg->ctx_recovery_mutex);
- init_rwsem(&cfg->ioctl_rwsem);
- INIT_LIST_HEAD(&cfg->ctx_err_recovery);
- INIT_LIST_HEAD(&cfg->lluns);
-
- pci_set_drvdata(pdev, cfg);
-
- rc = init_pci(cfg);
- if (rc) {
- dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_PCI;
-
- cfg->afu_cookie = cfg->ops->create_afu(pdev);
- if (unlikely(!cfg->afu_cookie)) {
- dev_err(dev, "%s: create_afu failed\n", __func__);
- rc = -ENOMEM;
- goto out_remove;
- }
-
- rc = init_afu(cfg);
- if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
- dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_AFU;
-
- rc = init_scsi(cfg);
- if (rc) {
- dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_SCSI;
-
- rc = init_chrdev(cfg);
- if (rc) {
- dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_CDEV;
-
- if (wq_has_sleeper(&cfg->reset_waitq)) {
- cfg->state = STATE_PROBED;
- wake_up_all(&cfg->reset_waitq);
- } else
- cfg->state = STATE_NORMAL;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-
-out_remove:
- cfg->state = STATE_PROBED;
- cxlflash_remove(pdev);
- goto out;
-}
-
-/**
- * cxlflash_pci_error_detected() - called when a PCI error is detected
- * @pdev: PCI device struct.
- * @state: PCI channel state.
- *
- * When an EEH occurs during an active reset, wait until the reset is
- * complete and then take action based upon the device state.
- *
- * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
- */
-static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- int rc = 0;
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
-
- switch (state) {
- case pci_channel_io_frozen:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
- cfg->state != STATE_PROBING);
- if (cfg->state == STATE_FAILTERM)
- return PCI_ERS_RESULT_DISCONNECT;
-
- cfg->state = STATE_RESET;
- scsi_block_requests(cfg->host);
- drain_ioctls(cfg);
- rc = cxlflash_mark_contexts_error(cfg);
- if (unlikely(rc))
- dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
- __func__, rc);
- term_afu(cfg);
- return PCI_ERS_RESULT_NEED_RESET;
- case pci_channel_io_perm_failure:
- cfg->state = STATE_FAILTERM;
- wake_up_all(&cfg->reset_waitq);
- scsi_unblock_requests(cfg->host);
- return PCI_ERS_RESULT_DISCONNECT;
- default:
- break;
- }
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * cxlflash_pci_slot_reset() - called when PCI slot has been reset
- * @pdev: PCI device struct.
- *
- * This routine is called by the pci error recovery code after the PCI
- * slot has been reset, just before we should resume normal operations.
- *
- * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
- */
-static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
-{
- int rc = 0;
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
-
- rc = init_afu(cfg);
- if (unlikely(rc)) {
- dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * cxlflash_pci_resume() - called when normal operation can resume
- * @pdev: PCI device struct
- */
-static void cxlflash_pci_resume(struct pci_dev *pdev)
-{
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
-
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
- scsi_unblock_requests(cfg->host);
-}
-
-/**
- * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
- * @dev: Character device.
- * @mode: Mode that can be used to verify access.
- *
- * Return: Allocated string describing the devtmpfs structure.
- */
-static char *cxlflash_devnode(const struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
-}
-
-/**
- * cxlflash_class_init() - create character device class
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_class_init(void)
-{
- dev_t devno;
- int rc = 0;
-
- rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
- if (unlikely(rc)) {
- pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
- goto out;
- }
-
- cxlflash_major = MAJOR(devno);
-
- rc = class_register(&cxlflash_class);
- if (rc) {
- pr_err("%s: class_create failed rc=%d\n", __func__, rc);
- goto err;
- }
-
-out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
- return rc;
-err:
- unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
- goto out;
-}
-
-/**
- * cxlflash_class_exit() - destroy character device class
- */
-static void cxlflash_class_exit(void)
-{
- dev_t devno = MKDEV(cxlflash_major, 0);
-
- class_unregister(&cxlflash_class);
- unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
-}
-
-static const struct pci_error_handlers cxlflash_err_handler = {
- .error_detected = cxlflash_pci_error_detected,
- .slot_reset = cxlflash_pci_slot_reset,
- .resume = cxlflash_pci_resume,
-};
-
-/*
- * PCI device structure
- */
-static struct pci_driver cxlflash_driver = {
- .name = CXLFLASH_NAME,
- .id_table = cxlflash_pci_table,
- .probe = cxlflash_probe,
- .remove = cxlflash_remove,
- .shutdown = cxlflash_remove,
- .err_handler = &cxlflash_err_handler,
-};
-
-/**
- * init_cxlflash() - module entry point
- *
- * Return: 0 on success, -errno on failure
- */
-static int __init init_cxlflash(void)
-{
- int rc;
-
- check_sizes();
- cxlflash_list_init();
- rc = cxlflash_class_init();
- if (unlikely(rc))
- goto out;
-
- rc = pci_register_driver(&cxlflash_driver);
- if (unlikely(rc))
- goto err;
-out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
- return rc;
-err:
- cxlflash_class_exit();
- goto out;
-}
-
-/**
- * exit_cxlflash() - module exit point
- */
-static void __exit exit_cxlflash(void)
-{
- cxlflash_term_global_luns();
- cxlflash_free_errpage();
-
- pci_unregister_driver(&cxlflash_driver);
- cxlflash_class_exit();
-}
-
-module_init(init_cxlflash);
-module_exit(exit_cxlflash);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
deleted file mode 100644
index 0bfb98effce0..000000000000
--- a/drivers/scsi/cxlflash/main.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_MAIN_H
-#define _CXLFLASH_MAIN_H
-
-#include <linux/list.h>
-#include <linux/types.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
-
-#include "backend.h"
-
-#define CXLFLASH_NAME "cxlflash"
-#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
-#define CXLFLASH_MAX_ADAPTERS 32
-
-#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
-#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
-#define PCI_DEVICE_ID_IBM_BRIARD 0x0624
-
-/* Since there is only one target, make it 0 */
-#define CXLFLASH_TARGET 0
-#define CXLFLASH_MAX_CDB_LEN 16
-
-/* Really only one target per bus since the Texan is directly attached */
-#define CXLFLASH_MAX_NUM_TARGETS_PER_BUS 1
-#define CXLFLASH_MAX_NUM_LUNS_PER_TARGET 65536
-
-#define CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ)
-
-/* FC defines */
-#define FC_MTIP_CMDCONFIG 0x010
-#define FC_MTIP_STATUS 0x018
-#define FC_MAX_NUM_LUNS 0x080 /* Max LUNs host can provision for port */
-#define FC_CUR_NUM_LUNS 0x088 /* Cur number LUNs provisioned for port */
-#define FC_MAX_CAP_PORT 0x090 /* Max capacity all LUNs for port (4K blocks) */
-#define FC_CUR_CAP_PORT 0x098 /* Cur capacity all LUNs for port (4K blocks) */
-
-#define FC_PNAME 0x300
-#define FC_CONFIG 0x320
-#define FC_CONFIG2 0x328
-#define FC_STATUS 0x330
-#define FC_ERROR 0x380
-#define FC_ERRCAP 0x388
-#define FC_ERRMSK 0x390
-#define FC_CNT_CRCERR 0x538
-#define FC_CRC_THRESH 0x580
-
-#define FC_MTIP_CMDCONFIG_ONLINE 0x20ULL
-#define FC_MTIP_CMDCONFIG_OFFLINE 0x40ULL
-
-#define FC_MTIP_STATUS_MASK 0x30ULL
-#define FC_MTIP_STATUS_ONLINE 0x20ULL
-#define FC_MTIP_STATUS_OFFLINE 0x10ULL
-
-/* TIMEOUT and RETRY definitions */
-
-/* AFU command timeout values */
-#define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */
-#define MC_LUN_PROV_TIMEOUT 5 /* 5 secs */
-#define MC_AFU_DEBUG_TIMEOUT 5 /* 5 secs */
-
-/* AFU command room retry limit */
-#define MC_ROOM_RETRY_CNT 10
-
-/* FC CRC clear periodic timer */
-#define MC_CRC_THRESH 100 /* threshold in 5 mins */
-
-#define FC_PORT_STATUS_RETRY_CNT 100 /* 100 100ms retries = 10 seconds */
-#define FC_PORT_STATUS_RETRY_INTERVAL_US 100000 /* microseconds */
-
-/* VPD defines */
-#define CXLFLASH_VPD_LEN 256
-#define WWPN_LEN 16
-#define WWPN_BUF_LEN (WWPN_LEN + 1)
-
-enum undo_level {
- UNDO_NOOP = 0,
- FREE_IRQ,
- UNMAP_ONE,
- UNMAP_TWO,
- UNMAP_THREE
-};
-
-struct dev_dependent_vals {
- u64 max_sectors;
- u64 flags;
-#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL
-#define CXLFLASH_WWPN_VPD_REQUIRED 0x0000000000000002ULL
-#define CXLFLASH_OCXL_DEV 0x0000000000000004ULL
-};
-
-static inline const struct cxlflash_backend_ops *
-cxlflash_assign_ops(struct dev_dependent_vals *ddv)
-{
- const struct cxlflash_backend_ops *ops = NULL;
-
-#ifdef CONFIG_OCXL_BASE
- if (ddv->flags & CXLFLASH_OCXL_DEV)
- ops = &cxlflash_ocxl_ops;
-#endif
-
-#ifdef CONFIG_CXL_BASE
- if (!(ddv->flags & CXLFLASH_OCXL_DEV))
- ops = &cxlflash_cxl_ops;
-#endif
-
- return ops;
-}
-
-struct asyc_intr_info {
- u64 status;
- char *desc;
- u8 port;
- u8 action;
-#define CLR_FC_ERROR 0x01
-#define LINK_RESET 0x02
-#define SCAN_HOST 0x04
-};
-
-#endif /* _CXLFLASH_MAIN_H */
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
deleted file mode 100644
index 6542818e595a..000000000000
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ /dev/null
@@ -1,1399 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#include <linux/file.h>
-#include <linux/idr.h>
-#include <linux/module.h>
-#include <linux/mount.h>
-#include <linux/pseudo_fs.h>
-#include <linux/poll.h>
-#include <linux/sched/signal.h>
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <asm/xive.h>
-#include <misc/ocxl.h>
-
-#include <uapi/misc/cxl.h>
-
-#include "backend.h"
-#include "ocxl_hw.h"
-
-/*
- * Pseudo-filesystem to allocate inodes.
- */
-
-#define OCXLFLASH_FS_MAGIC 0x1697698f
-
-static int ocxlflash_fs_cnt;
-static struct vfsmount *ocxlflash_vfs_mount;
-
-static int ocxlflash_fs_init_fs_context(struct fs_context *fc)
-{
- return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type ocxlflash_fs_type = {
- .name = "ocxlflash",
- .owner = THIS_MODULE,
- .init_fs_context = ocxlflash_fs_init_fs_context,
- .kill_sb = kill_anon_super,
-};
-
-/*
- * ocxlflash_release_mapping() - release the memory mapping
- * @ctx: Context whose mapping is to be released.
- */
-static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
-{
- if (ctx->mapping)
- simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
- ctx->mapping = NULL;
-}
-
-/*
- * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
- * @dev: Generic device of the host.
- * @name: Name of the pseudo filesystem.
- * @fops: File operations.
- * @priv: Private data.
- * @flags: Flags for the file.
- *
- * Return: pointer to the file on success, ERR_PTR on failure
- */
-static struct file *ocxlflash_getfile(struct device *dev, const char *name,
- const struct file_operations *fops,
- void *priv, int flags)
-{
- struct file *file;
- struct inode *inode;
- int rc;
-
- if (fops->owner && !try_module_get(fops->owner)) {
- dev_err(dev, "%s: Owner does not exist\n", __func__);
- rc = -ENOENT;
- goto err1;
- }
-
- rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
- &ocxlflash_fs_cnt);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
- if (IS_ERR(inode)) {
- rc = PTR_ERR(inode);
- dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
- __func__, rc);
- goto err3;
- }
-
- file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
- flags & (O_ACCMODE | O_NONBLOCK), fops);
- if (IS_ERR(file)) {
- rc = PTR_ERR(file);
- dev_err(dev, "%s: alloc_file failed rc=%d\n",
- __func__, rc);
- goto err4;
- }
-
- file->private_data = priv;
-out:
- return file;
-err4:
- iput(inode);
-err3:
- simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
-err2:
- module_put(fops->owner);
-err1:
- file = ERR_PTR(rc);
- goto out;
-}
-
-/**
- * ocxlflash_psa_map() - map the process specific MMIO space
- * @ctx_cookie: Adapter context for which the mapping needs to be done.
- *
- * Return: MMIO pointer of the mapped region
- */
-static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev = ctx->hw_afu->dev;
-
- mutex_lock(&ctx->state_mutex);
- if (ctx->state != STARTED) {
- dev_err(dev, "%s: Context not started, state=%d\n", __func__,
- ctx->state);
- mutex_unlock(&ctx->state_mutex);
- return NULL;
- }
- mutex_unlock(&ctx->state_mutex);
-
- return ioremap(ctx->psn_phys, ctx->psn_size);
-}
-
-/**
- * ocxlflash_psa_unmap() - unmap the process specific MMIO space
- * @addr: MMIO pointer to unmap.
- */
-static void ocxlflash_psa_unmap(void __iomem *addr)
-{
- iounmap(addr);
-}
-
-/**
- * ocxlflash_process_element() - get process element of the adapter context
- * @ctx_cookie: Adapter context associated with the process element.
- *
- * Return: process element of the adapter context
- */
-static int ocxlflash_process_element(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- return ctx->pe;
-}
-
-/**
- * afu_map_irq() - map the interrupt of the adapter context
- * @flags: Flags.
- * @ctx: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @handler: Interrupt handler to register.
- * @cookie: Interrupt handler private data.
- * @name: Name of the interrupt.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
- irq_handler_t handler, void *cookie, char *name)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- struct ocxlflash_irqs *irq;
- struct xive_irq_data *xd;
- u32 virq;
- int rc = 0;
-
- if (num < 0 || num >= ctx->num_irqs) {
- dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
- rc = -ENOENT;
- goto out;
- }
-
- irq = &ctx->irqs[num];
- virq = irq_create_mapping(NULL, irq->hwirq);
- if (unlikely(!virq)) {
- dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- rc = request_irq(virq, handler, 0, name, cookie);
- if (unlikely(rc)) {
- dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
- goto err1;
- }
-
- xd = irq_get_handler_data(virq);
- if (unlikely(!xd)) {
- dev_err(dev, "%s: Can't get interrupt data\n", __func__);
- rc = -ENXIO;
- goto err2;
- }
-
- irq->virq = virq;
- irq->vtrig = xd->trig_mmio;
-out:
- return rc;
-err2:
- free_irq(virq, cookie);
-err1:
- irq_dispose_mapping(virq);
- goto out;
-}
-
-/**
- * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
- * @ctx_cookie: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @handler: Interrupt handler to register.
- * @cookie: Interrupt handler private data.
- * @name: Name of the interrupt.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
- irq_handler_t handler, void *cookie,
- char *name)
-{
- return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
-}
-
-/**
- * afu_unmap_irq() - unmap the interrupt
- * @flags: Flags.
- * @ctx: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @cookie: Interrupt handler private data.
- */
-static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
- void *cookie)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- struct ocxlflash_irqs *irq;
-
- if (num < 0 || num >= ctx->num_irqs) {
- dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
- return;
- }
-
- irq = &ctx->irqs[num];
-
- if (irq_find_mapping(NULL, irq->hwirq)) {
- free_irq(irq->virq, cookie);
- irq_dispose_mapping(irq->virq);
- }
-
- memset(irq, 0, sizeof(*irq));
-}
-
-/**
- * ocxlflash_unmap_afu_irq() - unmap the interrupt
- * @ctx_cookie: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @cookie: Interrupt handler private data.
- */
-static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
-{
- return afu_unmap_irq(0, ctx_cookie, num, cookie);
-}
-
-/**
- * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
- * @ctx_cookie: Context associated with the interrupt.
- * @irq: Interrupt number.
- *
- * Return: effective address of the mapped region
- */
-static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- if (irq < 0 || irq >= ctx->num_irqs)
- return 0;
-
- return (__force u64)ctx->irqs[irq].vtrig;
-}
-
-/**
- * ocxlflash_xsl_fault() - callback when translation error is triggered
- * @data: Private data provided at callback registration, the context.
- * @addr: Address that triggered the error.
- * @dsisr: Value of dsisr register.
- */
-static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
-{
- struct ocxlflash_context *ctx = data;
-
- spin_lock(&ctx->slock);
- ctx->fault_addr = addr;
- ctx->fault_dsisr = dsisr;
- ctx->pending_fault = true;
- spin_unlock(&ctx->slock);
-
- wake_up_all(&ctx->wq);
-}
-
-/**
- * start_context() - local routine to start a context
- * @ctx: Adapter context to be started.
- *
- * Assign the context specific MMIO space, add and enable the PE.
- *
- * Return: 0 on success, -errno on failure
- */
-static int start_context(struct ocxlflash_context *ctx)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct ocxl_afu_config *acfg = &afu->acfg;
- void *link_token = afu->link_token;
- struct pci_dev *pdev = afu->pdev;
- struct device *dev = afu->dev;
- bool master = ctx->master;
- struct mm_struct *mm;
- int rc = 0;
- u32 pid;
-
- mutex_lock(&ctx->state_mutex);
- if (ctx->state != OPENED) {
- dev_err(dev, "%s: Context state invalid, state=%d\n",
- __func__, ctx->state);
- rc = -EINVAL;
- goto out;
- }
-
- if (master) {
- ctx->psn_size = acfg->global_mmio_size;
- ctx->psn_phys = afu->gmmio_phys;
- } else {
- ctx->psn_size = acfg->pp_mmio_stride;
- ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
- }
-
- /* pid and mm not set for master contexts */
- if (master) {
- pid = 0;
- mm = NULL;
- } else {
- pid = current->mm->context.id;
- mm = current->mm;
- }
-
- rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0,
- pci_dev_id(pdev), mm, ocxlflash_xsl_fault,
- ctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- ctx->state = STARTED;
-out:
- mutex_unlock(&ctx->state_mutex);
- return rc;
-}
-
-/**
- * ocxlflash_start_context() - start a kernel context
- * @ctx_cookie: Adapter context to be started.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_start_context(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- return start_context(ctx);
-}
-
-/**
- * ocxlflash_stop_context() - stop a context
- * @ctx_cookie: Adapter context to be stopped.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_stop_context(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct ocxl_afu_config *acfg = &afu->acfg;
- struct pci_dev *pdev = afu->pdev;
- struct device *dev = afu->dev;
- enum ocxlflash_ctx_state state;
- int rc = 0;
-
- mutex_lock(&ctx->state_mutex);
- state = ctx->state;
- ctx->state = CLOSED;
- mutex_unlock(&ctx->state_mutex);
- if (state != STARTED)
- goto out;
-
- rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
- ctx->pe);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
- __func__, rc);
- /* If EBUSY, PE could be referenced in future by the AFU */
- if (rc == -EBUSY)
- goto out;
- }
-
- rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-out:
- return rc;
-}
-
-/**
- * ocxlflash_afu_reset() - reset the AFU
- * @ctx_cookie: Adapter context.
- */
-static int ocxlflash_afu_reset(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev = ctx->hw_afu->dev;
-
- /* Pending implementation from OCXL transport services */
- dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
-
- /* Silently return success until it is implemented */
- return 0;
-}
-
-/**
- * ocxlflash_set_master() - sets the context as master
- * @ctx_cookie: Adapter context to set as master.
- */
-static void ocxlflash_set_master(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- ctx->master = true;
-}
-
-/**
- * ocxlflash_get_context() - obtains the context associated with the host
- * @pdev: PCI device associated with the host.
- * @afu_cookie: Hardware AFU associated with the host.
- *
- * Return: returns the pointer to host adapter context
- */
-static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
-
- return afu->ocxl_ctx;
-}
-
-/**
- * ocxlflash_dev_context_init() - allocate and initialize an adapter context
- * @pdev: PCI device associated with the host.
- * @afu_cookie: Hardware AFU associated with the host.
- *
- * Return: returns the adapter context on success, ERR_PTR on failure
- */
-static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
- struct device *dev = afu->dev;
- struct ocxlflash_context *ctx;
- int rc;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (unlikely(!ctx)) {
- dev_err(dev, "%s: Context allocation failed\n", __func__);
- rc = -ENOMEM;
- goto err1;
- }
-
- idr_preload(GFP_KERNEL);
- rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
- idr_preload_end();
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
- goto err2;
- }
-
- spin_lock_init(&ctx->slock);
- init_waitqueue_head(&ctx->wq);
- mutex_init(&ctx->state_mutex);
-
- ctx->state = OPENED;
- ctx->pe = rc;
- ctx->master = false;
- ctx->mapping = NULL;
- ctx->hw_afu = afu;
- ctx->irq_bitmap = 0;
- ctx->pending_irq = false;
- ctx->pending_fault = false;
-out:
- return ctx;
-err2:
- kfree(ctx);
-err1:
- ctx = ERR_PTR(rc);
- goto out;
-}
-
-/**
- * ocxlflash_release_context() - releases an adapter context
- * @ctx_cookie: Adapter context to be released.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_release_context(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev;
- int rc = 0;
-
- if (!ctx)
- goto out;
-
- dev = ctx->hw_afu->dev;
- mutex_lock(&ctx->state_mutex);
- if (ctx->state >= STARTED) {
- dev_err(dev, "%s: Context in use, state=%d\n", __func__,
- ctx->state);
- mutex_unlock(&ctx->state_mutex);
- rc = -EBUSY;
- goto out;
- }
- mutex_unlock(&ctx->state_mutex);
-
- idr_remove(&ctx->hw_afu->idr, ctx->pe);
- ocxlflash_release_mapping(ctx);
- kfree(ctx);
-out:
- return rc;
-}
-
-/**
- * ocxlflash_perst_reloads_same_image() - sets the image reload policy
- * @afu_cookie: Hardware AFU associated with the host.
- * @image: Whether to load the same image on PERST.
- */
-static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
-
- afu->perst_same_image = image;
-}
-
-/**
- * ocxlflash_read_adapter_vpd() - reads the adapter VPD
- * @pdev: PCI device associated with the host.
- * @buf: Buffer to get the VPD data.
- * @count: Size of buffer (maximum bytes that can be read).
- *
- * Return: size of VPD on success, -errno on failure
- */
-static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
- size_t count)
-{
- return pci_read_vpd(pdev, 0, count, buf);
-}
-
-/**
- * free_afu_irqs() - internal service to free interrupts
- * @ctx: Adapter context.
- */
-static void free_afu_irqs(struct ocxlflash_context *ctx)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- int i;
-
- if (!ctx->irqs) {
- dev_err(dev, "%s: Interrupts not allocated\n", __func__);
- return;
- }
-
- for (i = ctx->num_irqs; i >= 0; i--)
- ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
-
- kfree(ctx->irqs);
- ctx->irqs = NULL;
-}
-
-/**
- * alloc_afu_irqs() - internal service to allocate interrupts
- * @ctx: Context associated with the request.
- * @num: Number of interrupts requested.
- *
- * Return: 0 on success, -errno on failure
- */
-static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- struct ocxlflash_irqs *irqs;
- int rc = 0;
- int hwirq;
- int i;
-
- if (ctx->irqs) {
- dev_err(dev, "%s: Interrupts already allocated\n", __func__);
- rc = -EEXIST;
- goto out;
- }
-
- if (num > OCXL_MAX_IRQS) {
- dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
- rc = -EINVAL;
- goto out;
- }
-
- irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
- if (unlikely(!irqs)) {
- dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < num; i++) {
- rc = ocxl_link_irq_alloc(afu->link_token, &hwirq);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
- __func__, rc);
- goto err;
- }
-
- irqs[i].hwirq = hwirq;
- }
-
- ctx->irqs = irqs;
- ctx->num_irqs = num;
-out:
- return rc;
-err:
- for (i = i-1; i >= 0; i--)
- ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
- kfree(irqs);
- goto out;
-}
-
-/**
- * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
- * @ctx_cookie: Context associated with the request.
- * @num: Number of interrupts requested.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
-{
- return alloc_afu_irqs(ctx_cookie, num);
-}
-
-/**
- * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
- * @ctx_cookie: Adapter context.
- */
-static void ocxlflash_free_afu_irqs(void *ctx_cookie)
-{
- free_afu_irqs(ctx_cookie);
-}
-
-/**
- * ocxlflash_unconfig_afu() - unconfigure the AFU
- * @afu: AFU associated with the host.
- */
-static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
-{
- if (afu->gmmio_virt) {
- iounmap(afu->gmmio_virt);
- afu->gmmio_virt = NULL;
- }
-}
-
-/**
- * ocxlflash_destroy_afu() - destroy the AFU structure
- * @afu_cookie: AFU to be freed.
- */
-static void ocxlflash_destroy_afu(void *afu_cookie)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
- int pos;
-
- if (!afu)
- return;
-
- ocxlflash_release_context(afu->ocxl_ctx);
- idr_destroy(&afu->idr);
-
- /* Disable the AFU */
- pos = afu->acfg.dvsec_afu_control_pos;
- ocxl_config_set_afu_state(afu->pdev, pos, 0);
-
- ocxlflash_unconfig_afu(afu);
- kfree(afu);
-}
-
-/**
- * ocxlflash_config_fn() - configure the host function
- * @pdev: PCI device associated with the host.
- * @afu: AFU associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
-{
- struct ocxl_fn_config *fcfg = &afu->fcfg;
- struct device *dev = &pdev->dev;
- u16 base, enabled, supported;
- int rc = 0;
-
- /* Read DVSEC config of the function */
- rc = ocxl_config_read_function(pdev, fcfg);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- /* Check if function has AFUs defined, only 1 per function supported */
- if (fcfg->max_afu_index >= 0) {
- afu->is_present = true;
- if (fcfg->max_afu_index != 0)
- dev_warn(dev, "%s: Unexpected AFU index value %d\n",
- __func__, fcfg->max_afu_index);
- }
-
- rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- afu->fn_actag_base = base;
- afu->fn_actag_enabled = enabled;
-
- ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
- dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
- __func__, base, enabled);
-
- rc = ocxl_link_setup(pdev, 0, &afu->link_token);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
- __func__, rc);
- goto err;
- }
-out:
- return rc;
-err:
- ocxl_link_release(pdev, afu->link_token);
- goto out;
-}
-
-/**
- * ocxlflash_unconfig_fn() - unconfigure the host function
- * @pdev: PCI device associated with the host.
- * @afu: AFU associated with the host.
- */
-static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
-{
- ocxl_link_release(pdev, afu->link_token);
-}
-
-/**
- * ocxlflash_map_mmio() - map the AFU MMIO space
- * @afu: AFU associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
-{
- struct ocxl_afu_config *acfg = &afu->acfg;
- struct pci_dev *pdev = afu->pdev;
- struct device *dev = afu->dev;
- phys_addr_t gmmio, ppmmio;
- int rc = 0;
-
- rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
- if (unlikely(rc)) {
- dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
- __func__, rc);
- goto out;
- }
- gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
- gmmio += acfg->global_mmio_offset;
-
- rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
- if (unlikely(rc)) {
- dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
- ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
- ppmmio += acfg->pp_mmio_offset;
-
- afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
- if (unlikely(!afu->gmmio_virt)) {
- dev_err(dev, "%s: MMIO mapping failed\n", __func__);
- rc = -ENOMEM;
- goto err2;
- }
-
- afu->gmmio_phys = gmmio;
- afu->ppmmio_phys = ppmmio;
-out:
- return rc;
-err2:
- pci_release_region(pdev, acfg->pp_mmio_bar);
-err1:
- pci_release_region(pdev, acfg->global_mmio_bar);
- goto out;
-}
-
-/**
- * ocxlflash_config_afu() - configure the host AFU
- * @pdev: PCI device associated with the host.
- * @afu: AFU associated with the host.
- *
- * Must be called _after_ host function configuration.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
-{
- struct ocxl_afu_config *acfg = &afu->acfg;
- struct ocxl_fn_config *fcfg = &afu->fcfg;
- struct device *dev = &pdev->dev;
- int count;
- int base;
- int pos;
- int rc = 0;
-
- /* This HW AFU function does not have any AFUs defined */
- if (!afu->is_present)
- goto out;
-
- /* Read AFU config at index 0 */
- rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- /* Only one AFU per function is supported, so actag_base is same */
- base = afu->fn_actag_base;
- count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
- pos = acfg->dvsec_afu_control_pos;
-
- ocxl_config_set_afu_actag(pdev, pos, base, count);
- dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
- afu->afu_actag_base = base;
- afu->afu_actag_enabled = count;
- afu->max_pasid = 1 << acfg->pasid_supported_log;
-
- ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
-
- rc = ocxlflash_map_mmio(afu);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- /* Enable the AFU */
- ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
-out:
- return rc;
-}
-
-/**
- * ocxlflash_create_afu() - create the AFU for OCXL
- * @pdev: PCI device associated with the host.
- *
- * Return: AFU on success, NULL on failure
- */
-static void *ocxlflash_create_afu(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ocxlflash_context *ctx;
- struct ocxl_hw_afu *afu;
- int rc;
-
- afu = kzalloc(sizeof(*afu), GFP_KERNEL);
- if (unlikely(!afu)) {
- dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
- goto out;
- }
-
- afu->pdev = pdev;
- afu->dev = dev;
- idr_init(&afu->idr);
-
- rc = ocxlflash_config_fn(pdev, afu);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Function configuration failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
-
- rc = ocxlflash_config_afu(pdev, afu);
- if (unlikely(rc)) {
- dev_err(dev, "%s: AFU configuration failed rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- ctx = ocxlflash_dev_context_init(pdev, afu);
- if (IS_ERR(ctx)) {
- rc = PTR_ERR(ctx);
- dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
- __func__, rc);
- goto err3;
- }
-
- afu->ocxl_ctx = ctx;
-out:
- return afu;
-err3:
- ocxlflash_unconfig_afu(afu);
-err2:
- ocxlflash_unconfig_fn(pdev, afu);
-err1:
- idr_destroy(&afu->idr);
- kfree(afu);
- afu = NULL;
- goto out;
-}
-
-/**
- * ctx_event_pending() - check for any event pending on the context
- * @ctx: Context to be checked.
- *
- * Return: true if there is an event pending, false if none pending
- */
-static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
-{
- if (ctx->pending_irq || ctx->pending_fault)
- return true;
-
- return false;
-}
-
-/**
- * afu_poll() - poll the AFU for events on the context
- * @file: File associated with the adapter context.
- * @poll: Poll structure from the user.
- *
- * Return: poll mask
- */
-static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
-{
- struct ocxlflash_context *ctx = file->private_data;
- struct device *dev = ctx->hw_afu->dev;
- ulong lock_flags;
- int mask = 0;
-
- poll_wait(file, &ctx->wq, poll);
-
- spin_lock_irqsave(&ctx->slock, lock_flags);
- if (ctx_event_pending(ctx))
- mask |= POLLIN | POLLRDNORM;
- else if (ctx->state == CLOSED)
- mask |= POLLERR;
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
-
- dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
- __func__, ctx->pe, mask);
-
- return mask;
-}
-
-/**
- * afu_read() - perform a read on the context for any event
- * @file: File associated with the adapter context.
- * @buf: Buffer to receive the data.
- * @count: Size of buffer (maximum bytes that can be read).
- * @off: Offset.
- *
- * Return: size of the data read on success, -errno on failure
- */
-static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
- loff_t *off)
-{
- struct ocxlflash_context *ctx = file->private_data;
- struct device *dev = ctx->hw_afu->dev;
- struct cxl_event event;
- ulong lock_flags;
- ssize_t esize;
- ssize_t rc;
- int bit;
- DEFINE_WAIT(event_wait);
-
- if (*off != 0) {
- dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
- __func__, *off);
- rc = -EINVAL;
- goto out;
- }
-
- spin_lock_irqsave(&ctx->slock, lock_flags);
-
- for (;;) {
- prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
-
- if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
- break;
-
- if (file->f_flags & O_NONBLOCK) {
- dev_err(dev, "%s: File cannot be blocked on I/O\n",
- __func__);
- rc = -EAGAIN;
- goto err;
- }
-
- if (signal_pending(current)) {
- dev_err(dev, "%s: Signal pending on the process\n",
- __func__);
- rc = -ERESTARTSYS;
- goto err;
- }
-
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
- schedule();
- spin_lock_irqsave(&ctx->slock, lock_flags);
- }
-
- finish_wait(&ctx->wq, &event_wait);
-
- memset(&event, 0, sizeof(event));
- event.header.process_element = ctx->pe;
- event.header.size = sizeof(struct cxl_event_header);
- if (ctx->pending_irq) {
- esize = sizeof(struct cxl_event_afu_interrupt);
- event.header.size += esize;
- event.header.type = CXL_EVENT_AFU_INTERRUPT;
-
- bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
- clear_bit(bit, &ctx->irq_bitmap);
- event.irq.irq = bit + 1;
- if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
- ctx->pending_irq = false;
- } else if (ctx->pending_fault) {
- event.header.size += sizeof(struct cxl_event_data_storage);
- event.header.type = CXL_EVENT_DATA_STORAGE;
- event.fault.addr = ctx->fault_addr;
- event.fault.dsisr = ctx->fault_dsisr;
- ctx->pending_fault = false;
- }
-
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
-
- if (copy_to_user(buf, &event, event.header.size)) {
- dev_err(dev, "%s: copy_to_user failed\n", __func__);
- rc = -EFAULT;
- goto out;
- }
-
- rc = event.header.size;
-out:
- return rc;
-err:
- finish_wait(&ctx->wq, &event_wait);
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
- goto out;
-}
-
-/**
- * afu_release() - release and free the context
- * @inode: File inode pointer.
- * @file: File associated with the context.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_release(struct inode *inode, struct file *file)
-{
- struct ocxlflash_context *ctx = file->private_data;
- int i;
-
- /* Unmap and free the interrupts associated with the context */
- for (i = ctx->num_irqs; i >= 0; i--)
- afu_unmap_irq(0, ctx, i, ctx);
- free_afu_irqs(ctx);
-
- return ocxlflash_release_context(ctx);
-}
-
-/**
- * ocxlflash_mmap_fault() - mmap fault handler
- * @vmf: VM fault associated with current fault.
- *
- * Return: 0 on success, -errno on failure
- */
-static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct ocxlflash_context *ctx = vma->vm_file->private_data;
- struct device *dev = ctx->hw_afu->dev;
- u64 mmio_area, offset;
-
- offset = vmf->pgoff << PAGE_SHIFT;
- if (offset >= ctx->psn_size)
- return VM_FAULT_SIGBUS;
-
- mutex_lock(&ctx->state_mutex);
- if (ctx->state != STARTED) {
- dev_err(dev, "%s: Context not started, state=%d\n",
- __func__, ctx->state);
- mutex_unlock(&ctx->state_mutex);
- return VM_FAULT_SIGBUS;
- }
- mutex_unlock(&ctx->state_mutex);
-
- mmio_area = ctx->psn_phys;
- mmio_area += offset;
-
- return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
-}
-
-static const struct vm_operations_struct ocxlflash_vmops = {
- .fault = ocxlflash_mmap_fault,
-};
-
-/**
- * afu_mmap() - map the fault handler operations
- * @file: File associated with the context.
- * @vma: VM area associated with mapping.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct ocxlflash_context *ctx = file->private_data;
-
- if ((vma_pages(vma) + vma->vm_pgoff) >
- (ctx->psn_size >> PAGE_SHIFT))
- return -EINVAL;
-
- vm_flags_set(vma, VM_IO | VM_PFNMAP);
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_ops = &ocxlflash_vmops;
- return 0;
-}
-
-static const struct file_operations ocxl_afu_fops = {
- .owner = THIS_MODULE,
- .poll = afu_poll,
- .read = afu_read,
- .release = afu_release,
- .mmap = afu_mmap,
-};
-
-#define PATCH_FOPS(NAME) \
- do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
-
-/**
- * ocxlflash_get_fd() - get file descriptor for an adapter context
- * @ctx_cookie: Adapter context.
- * @fops: File operations to be associated.
- * @fd: File descriptor to be returned back.
- *
- * Return: pointer to the file on success, ERR_PTR on failure
- */
-static struct file *ocxlflash_get_fd(void *ctx_cookie,
- struct file_operations *fops, int *fd)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev = ctx->hw_afu->dev;
- struct file *file;
- int flags, fdtmp;
- int rc = 0;
- char *name = NULL;
-
- /* Only allow one fd per context */
- if (ctx->mapping) {
- dev_err(dev, "%s: Context is already mapped to an fd\n",
- __func__);
- rc = -EEXIST;
- goto err1;
- }
-
- flags = O_RDWR | O_CLOEXEC;
-
- /* This code is similar to anon_inode_getfd() */
- rc = get_unused_fd_flags(flags);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
- fdtmp = rc;
-
- /* Patch the file ops that are not defined */
- if (fops) {
- PATCH_FOPS(poll);
- PATCH_FOPS(read);
- PATCH_FOPS(release);
- PATCH_FOPS(mmap);
- } else /* Use default ops */
- fops = (struct file_operations *)&ocxl_afu_fops;
-
- name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
- file = ocxlflash_getfile(dev, name, fops, ctx, flags);
- kfree(name);
- if (IS_ERR(file)) {
- rc = PTR_ERR(file);
- dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- ctx->mapping = file->f_mapping;
- *fd = fdtmp;
-out:
- return file;
-err2:
- put_unused_fd(fdtmp);
-err1:
- file = ERR_PTR(rc);
- goto out;
-}
-
-/**
- * ocxlflash_fops_get_context() - get the context associated with the file
- * @file: File associated with the adapter context.
- *
- * Return: pointer to the context
- */
-static void *ocxlflash_fops_get_context(struct file *file)
-{
- return file->private_data;
-}
-
-/**
- * ocxlflash_afu_irq() - interrupt handler for user contexts
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the context.
- *
- * Return: Always return IRQ_HANDLED.
- */
-static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
-{
- struct ocxlflash_context *ctx = data;
- struct device *dev = ctx->hw_afu->dev;
- int i;
-
- dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
- __func__, ctx->pe, irq);
-
- for (i = 0; i < ctx->num_irqs; i++) {
- if (ctx->irqs[i].virq == irq)
- break;
- }
- if (unlikely(i >= ctx->num_irqs)) {
- dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
- goto out;
- }
-
- spin_lock(&ctx->slock);
- set_bit(i - 1, &ctx->irq_bitmap);
- ctx->pending_irq = true;
- spin_unlock(&ctx->slock);
-
- wake_up_all(&ctx->wq);
-out:
- return IRQ_HANDLED;
-}
-
-/**
- * ocxlflash_start_work() - start a user context
- * @ctx_cookie: Context to be started.
- * @num_irqs: Number of interrupts requested.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- char *name;
- int rc = 0;
- int i;
-
- rc = alloc_afu_irqs(ctx, num_irqs);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
- goto out;
- }
-
- for (i = 0; i < num_irqs; i++) {
- name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
- dev_name(dev), ctx->pe, i);
- rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
- kfree(name);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
- __func__, rc);
- goto err;
- }
- }
-
- rc = start_context(ctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
- goto err;
- }
-out:
- return rc;
-err:
- for (i = i-1; i >= 0; i--)
- afu_unmap_irq(0, ctx, i, ctx);
- free_afu_irqs(ctx);
- goto out;
-};
-
-/**
- * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
- * @file: File installed with adapter file descriptor.
- * @vma: VM area associated with mapping.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
-{
- return afu_mmap(file, vma);
-}
-
-/**
- * ocxlflash_fd_release() - release the context associated with the file
- * @inode: File inode pointer.
- * @file: File associated with the adapter context.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_fd_release(struct inode *inode, struct file *file)
-{
- return afu_release(inode, file);
-}
-
-/* Backend ops to ocxlflash services */
-const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
- .module = THIS_MODULE,
- .psa_map = ocxlflash_psa_map,
- .psa_unmap = ocxlflash_psa_unmap,
- .process_element = ocxlflash_process_element,
- .map_afu_irq = ocxlflash_map_afu_irq,
- .unmap_afu_irq = ocxlflash_unmap_afu_irq,
- .get_irq_objhndl = ocxlflash_get_irq_objhndl,
- .start_context = ocxlflash_start_context,
- .stop_context = ocxlflash_stop_context,
- .afu_reset = ocxlflash_afu_reset,
- .set_master = ocxlflash_set_master,
- .get_context = ocxlflash_get_context,
- .dev_context_init = ocxlflash_dev_context_init,
- .release_context = ocxlflash_release_context,
- .perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
- .read_adapter_vpd = ocxlflash_read_adapter_vpd,
- .allocate_afu_irqs = ocxlflash_allocate_afu_irqs,
- .free_afu_irqs = ocxlflash_free_afu_irqs,
- .create_afu = ocxlflash_create_afu,
- .destroy_afu = ocxlflash_destroy_afu,
- .get_fd = ocxlflash_get_fd,
- .fops_get_context = ocxlflash_fops_get_context,
- .start_work = ocxlflash_start_work,
- .fd_mmap = ocxlflash_fd_mmap,
- .fd_release = ocxlflash_fd_release,
-};
diff --git a/drivers/scsi/cxlflash/ocxl_hw.h b/drivers/scsi/cxlflash/ocxl_hw.h
deleted file mode 100644
index f2fe88816bea..000000000000
--- a/drivers/scsi/cxlflash/ocxl_hw.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#define OCXL_MAX_IRQS 4 /* Max interrupts per process */
-
-struct ocxlflash_irqs {
- int hwirq;
- u32 virq;
- void __iomem *vtrig;
-};
-
-/* OCXL hardware AFU associated with the host */
-struct ocxl_hw_afu {
- struct ocxlflash_context *ocxl_ctx; /* Host context */
- struct pci_dev *pdev; /* PCI device */
- struct device *dev; /* Generic device */
- bool perst_same_image; /* Same image loaded on perst */
-
- struct ocxl_fn_config fcfg; /* DVSEC config of the function */
- struct ocxl_afu_config acfg; /* AFU configuration data */
-
- int fn_actag_base; /* Function acTag base */
- int fn_actag_enabled; /* Function acTag number enabled */
- int afu_actag_base; /* AFU acTag base */
- int afu_actag_enabled; /* AFU acTag number enabled */
-
- phys_addr_t ppmmio_phys; /* Per process MMIO space */
- phys_addr_t gmmio_phys; /* Global AFU MMIO space */
- void __iomem *gmmio_virt; /* Global MMIO map */
-
- void *link_token; /* Link token for the SPA */
- struct idr idr; /* IDR to manage contexts */
- int max_pasid; /* Maximum number of contexts */
- bool is_present; /* Function has AFUs defined */
-};
-
-enum ocxlflash_ctx_state {
- CLOSED,
- OPENED,
- STARTED
-};
-
-struct ocxlflash_context {
- struct ocxl_hw_afu *hw_afu; /* HW AFU back pointer */
- struct address_space *mapping; /* Mapping for pseudo filesystem */
- bool master; /* Whether this is a master context */
- int pe; /* Process element */
-
- phys_addr_t psn_phys; /* Process mapping */
- u64 psn_size; /* Process mapping size */
-
- spinlock_t slock; /* Protects irq/fault/event updates */
- wait_queue_head_t wq; /* Wait queue for poll and interrupts */
- struct mutex state_mutex; /* Mutex to update context state */
- enum ocxlflash_ctx_state state; /* Context state */
-
- struct ocxlflash_irqs *irqs; /* Pointer to array of structures */
- int num_irqs; /* Number of interrupts */
- bool pending_irq; /* Pending interrupt on the context */
- ulong irq_bitmap; /* Bits indicating pending irq num */
-
- u64 fault_addr; /* Address that triggered the fault */
- u64 fault_dsisr; /* Value of dsisr register at fault */
- bool pending_fault; /* Pending translation fault */
-};
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
deleted file mode 100644
index ab315c59505b..000000000000
--- a/drivers/scsi/cxlflash/sislite.h
+++ /dev/null
@@ -1,560 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _SISLITE_H
-#define _SISLITE_H
-
-#include <linux/types.h>
-
-typedef u16 ctx_hndl_t;
-typedef u32 res_hndl_t;
-
-#define SIZE_4K 4096
-#define SIZE_64K 65536
-
-/*
- * IOARCB: 64 bytes, min 16 byte alignment required, host native endianness
- * except for SCSI CDB which remains big endian per SCSI standards.
- */
-struct sisl_ioarcb {
- u16 ctx_id; /* ctx_hndl_t */
- u16 req_flags;
-#define SISL_REQ_FLAGS_RES_HNDL 0x8000U /* bit 0 (MSB) */
-#define SISL_REQ_FLAGS_PORT_LUN_ID 0x0000U
-
-#define SISL_REQ_FLAGS_SUP_UNDERRUN 0x4000U /* bit 1 */
-
-#define SISL_REQ_FLAGS_TIMEOUT_SECS 0x0000U /* bits 8,9 */
-#define SISL_REQ_FLAGS_TIMEOUT_MSECS 0x0040U
-#define SISL_REQ_FLAGS_TIMEOUT_USECS 0x0080U
-#define SISL_REQ_FLAGS_TIMEOUT_CYCLES 0x00C0U
-
-#define SISL_REQ_FLAGS_TMF_CMD 0x0004u /* bit 13 */
-
-#define SISL_REQ_FLAGS_AFU_CMD 0x0002U /* bit 14 */
-
-#define SISL_REQ_FLAGS_HOST_WRITE 0x0001U /* bit 15 (LSB) */
-#define SISL_REQ_FLAGS_HOST_READ 0x0000U
-
- union {
- u32 res_hndl; /* res_hndl_t */
- u32 port_sel; /* this is a selection mask:
- * 0x1 -> port#0 can be selected,
- * 0x2 -> port#1 can be selected.
- * Can be bitwise ORed.
- */
- };
- u64 lun_id;
- u32 data_len; /* 4K for read/write */
- u32 ioadl_len;
- union {
- u64 data_ea; /* min 16 byte aligned */
- u64 ioadl_ea;
- };
- u8 msi; /* LISN to send on RRQ write */
-#define SISL_MSI_CXL_PFAULT 0 /* reserved for CXL page faults */
-#define SISL_MSI_SYNC_ERROR 1 /* recommended for AFU sync error */
-#define SISL_MSI_RRQ_UPDATED 2 /* recommended for IO completion */
-#define SISL_MSI_ASYNC_ERROR 3 /* master only - for AFU async error */
-
- u8 rrq; /* 0 for a single RRQ */
- u16 timeout; /* in units specified by req_flags */
- u32 rsvd1;
- u8 cdb[16]; /* must be in big endian */
-#define SISL_AFU_CMD_SYNC 0xC0 /* AFU sync command */
-#define SISL_AFU_CMD_LUN_PROVISION 0xD0 /* AFU LUN provision command */
-#define SISL_AFU_CMD_DEBUG 0xE0 /* AFU debug command */
-
-#define SISL_AFU_LUN_PROVISION_CREATE 0x00 /* LUN provision create type */
-#define SISL_AFU_LUN_PROVISION_DELETE 0x01 /* LUN provision delete type */
-
- union {
- u64 reserved; /* Reserved for IOARRIN mode */
- struct sisl_ioasa *ioasa; /* IOASA EA for SQ Mode */
- };
-} __packed;
-
-struct sisl_rc {
- u8 flags;
-#define SISL_RC_FLAGS_SENSE_VALID 0x80U
-#define SISL_RC_FLAGS_FCP_RSP_CODE_VALID 0x40U
-#define SISL_RC_FLAGS_OVERRUN 0x20U
-#define SISL_RC_FLAGS_UNDERRUN 0x10U
-
- u8 afu_rc;
-#define SISL_AFU_RC_RHT_INVALID 0x01U /* user error */
-#define SISL_AFU_RC_RHT_UNALIGNED 0x02U /* should never happen */
-#define SISL_AFU_RC_RHT_OUT_OF_BOUNDS 0x03u /* user error */
-#define SISL_AFU_RC_RHT_DMA_ERR 0x04u /* see afu_extra
- * may retry if afu_retry is off
- * possible on master exit
- */
-#define SISL_AFU_RC_RHT_RW_PERM 0x05u /* no RW perms, user error */
-#define SISL_AFU_RC_LXT_UNALIGNED 0x12U /* should never happen */
-#define SISL_AFU_RC_LXT_OUT_OF_BOUNDS 0x13u /* user error */
-#define SISL_AFU_RC_LXT_DMA_ERR 0x14u /* see afu_extra
- * may retry if afu_retry is off
- * possible on master exit
- */
-#define SISL_AFU_RC_LXT_RW_PERM 0x15u /* no RW perms, user error */
-
-#define SISL_AFU_RC_NOT_XLATE_HOST 0x1au /* possible if master exited */
-
- /* NO_CHANNELS means the FC ports selected by dest_port in
- * IOARCB or in the LXT entry are down when the AFU tried to select
- * a FC port. If the port went down on an active IO, it will set
- * fc_rc to =0x54(NOLOGI) or 0x57(LINKDOWN) instead.
- */
-#define SISL_AFU_RC_NO_CHANNELS 0x20U /* see afu_extra, may retry */
-#define SISL_AFU_RC_CAP_VIOLATION 0x21U /* either user error or
- * afu reset/master restart
- */
-#define SISL_AFU_RC_OUT_OF_DATA_BUFS 0x30U /* always retry */
-#define SISL_AFU_RC_DATA_DMA_ERR 0x31U /* see afu_extra
- * may retry if afu_retry is off
- */
-
- u8 scsi_rc; /* SCSI status byte, retry as appropriate */
-#define SISL_SCSI_RC_CHECK 0x02U
-#define SISL_SCSI_RC_BUSY 0x08u
-
- u8 fc_rc; /* retry */
- /*
- * We should only see fc_rc=0x57 (LINKDOWN) or 0x54(NOLOGI) for
- * commands that are in flight when a link goes down or is logged out.
- * If the link is down or logged out before AFU selects the port, either
- * it will choose the other port or we will get afu_rc=0x20 (no_channel)
- * if there is no valid port to use.
- *
- * ABORTPEND/ABORTOK/ABORTFAIL/TGTABORT can be retried, typically these
- * would happen if a frame is dropped and something times out.
- * NOLOGI or LINKDOWN can be retried if the other port is up.
- * RESIDERR can be retried as well.
- *
- * ABORTFAIL might indicate that lots of frames are getting CRC errors.
- * So it maybe retried once and reset the link if it happens again.
- * The link can also be reset on the CRC error threshold interrupt.
- */
-#define SISL_FC_RC_ABORTPEND 0x52 /* exchange timeout or abort request */
-#define SISL_FC_RC_WRABORTPEND 0x53 /* due to write XFER_RDY invalid */
-#define SISL_FC_RC_NOLOGI 0x54 /* port not logged in, in-flight cmds */
-#define SISL_FC_RC_NOEXP 0x55 /* FC protocol error or HW bug */
-#define SISL_FC_RC_INUSE 0x56 /* tag already in use, HW bug */
-#define SISL_FC_RC_LINKDOWN 0x57 /* link down, in-flight cmds */
-#define SISL_FC_RC_ABORTOK 0x58 /* pending abort completed w/success */
-#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */
-#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */
-#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI
- * reported len, possibly due to dropped
- * frames
- */
-#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */
-};
-
-#define SISL_SENSE_DATA_LEN 20 /* Sense data length */
-#define SISL_WWID_DATA_LEN 16 /* WWID data length */
-
-/*
- * IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required,
- * host native endianness
- */
-struct sisl_ioasa {
- union {
- struct sisl_rc rc;
- u32 ioasc;
-#define SISL_IOASC_GOOD_COMPLETION 0x00000000U
- };
-
- union {
- u32 resid;
- u32 lunid_hi;
- };
-
- u8 port;
- u8 afu_extra;
- /* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR):
- * afu_exta contains PSL response code. Useful codes are:
- */
-#define SISL_AFU_DMA_ERR_PAGE_IN 0x0A /* AFU_retry_on_pagein Action
- * Enabled N/A
- * Disabled retry
- */
-#define SISL_AFU_DMA_ERR_INVALID_EA 0x0B /* this is a hard error
- * afu_rc Implies
- * 0x04, 0x14 master exit.
- * 0x31 user error.
- */
- /* when afu rc=0x20 (no channels):
- * afu_extra bits [4:5]: available portmask, [6:7]: requested portmask.
- */
-#define SISL_AFU_NO_CLANNELS_AMASK(afu_extra) (((afu_extra) & 0x0C) >> 2)
-#define SISL_AFU_NO_CLANNELS_RMASK(afu_extra) ((afu_extra) & 0x03)
-
- u8 scsi_extra;
- u8 fc_extra;
-
- union {
- u8 sense_data[SISL_SENSE_DATA_LEN];
- struct {
- u32 lunid_lo;
- u8 wwid[SISL_WWID_DATA_LEN];
- };
- };
-
- /* These fields are defined by the SISlite architecture for the
- * host to use as they see fit for their implementation.
- */
- union {
- u64 host_use[4];
- u8 host_use_b[32];
- };
-} __packed;
-
-#define SISL_RESP_HANDLE_T_BIT 0x1ULL /* Toggle bit */
-
-/* MMIO space is required to support only 64-bit access */
-
-/*
- * This AFU has two mechanisms to deal with endian-ness.
- * One is a global configuration (in the afu_config) register
- * below that specifies the endian-ness of the host.
- * The other is a per context (i.e. application) specification
- * controlled by the endian_ctrl field here. Since the master
- * context is one such application the master context's
- * endian-ness is set to be the same as the host.
- *
- * As per the SISlite spec, the MMIO registers are always
- * big endian.
- */
-#define SISL_ENDIAN_CTRL_BE 0x8000000000000080ULL
-#define SISL_ENDIAN_CTRL_LE 0x0000000000000000ULL
-
-#ifdef __BIG_ENDIAN
-#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_BE
-#else
-#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_LE
-#endif
-
-/* per context host transport MMIO */
-struct sisl_host_map {
- __be64 endian_ctrl; /* Per context Endian Control. The AFU will
- * operate on whatever the context is of the
- * host application.
- */
-
- __be64 intr_status; /* this sends LISN# programmed in ctx_ctrl.
- * Only recovery in a PERM_ERR is a context
- * exit since there is no way to tell which
- * command caused the error.
- */
-#define SISL_ISTATUS_PERM_ERR_LISN_3_EA 0x0400ULL /* b53, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_2_EA 0x0200ULL /* b54, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_1_EA 0x0100ULL /* b55, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_3_PASID 0x0080ULL /* b56, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_2_PASID 0x0040ULL /* b57, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_1_PASID 0x0020ULL /* b58, user error */
-#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */
-#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */
-#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */
-#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */
- /* Page in wait accessing RCB/IOASA/RRQ is reported in b63.
- * Same error in data/LXT/RHT access is reported via IOASA.
- */
-#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can only be
- * generated when AFU
- * auto retry is
- * disabled. If user
- * can determine the
- * command that caused
- * the error, it can
- * be retried.
- */
-#define SISL_ISTATUS_UNMASK (0x07FFULL) /* 1 means unmasked */
-#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */
-
- __be64 intr_clear;
- __be64 intr_mask;
- __be64 ioarrin; /* only write what cmd_room permits */
- __be64 rrq_start; /* start & end are both inclusive */
- __be64 rrq_end; /* write sequence: start followed by end */
- __be64 cmd_room;
- __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
-#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */
-#define SISL_CTX_CTRL_LISN_MASK (0xFFULL)
- __be64 mbox_w; /* restricted use */
- __be64 sq_start; /* Submission Queue (R/W): write sequence and */
- __be64 sq_end; /* inclusion semantics are the same as RRQ */
- __be64 sq_head; /* Submission Queue Head (R): for debugging */
- __be64 sq_tail; /* Submission Queue TAIL (R/W): next IOARCB */
- __be64 sq_ctx_reset; /* Submission Queue Context Reset (R/W) */
-};
-
-/* per context provisioning & control MMIO */
-struct sisl_ctrl_map {
- __be64 rht_start;
- __be64 rht_cnt_id;
- /* both cnt & ctx_id args must be ULL */
-#define SISL_RHT_CNT_ID(cnt, ctx_id) (((cnt) << 48) | ((ctx_id) << 32))
-
- __be64 ctx_cap; /* afu_rc below is when the capability is violated */
-#define SISL_CTX_CAP_PROXY_ISSUE 0x8000000000000000ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_REAL_MODE 0x4000000000000000ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_HOST_XLATE 0x2000000000000000ULL /* afu_rc 0x1a */
-#define SISL_CTX_CAP_PROXY_TARGET 0x1000000000000000ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_AFU_CMD 0x0000000000000008ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_GSCSI_CMD 0x0000000000000004ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ULL /* afu_rc 0x21 */
- __be64 mbox_r;
- __be64 lisn_pasid[2];
- /* pasid _a arg must be ULL */
-#define SISL_LISN_PASID(_a, _b) (((_a) << 32) | (_b))
- __be64 lisn_ea[3];
-};
-
-/* single copy global regs */
-struct sisl_global_regs {
- __be64 aintr_status;
- /*
- * In cxlflash, FC port/link are arranged in port pairs, each
- * gets a byte of status:
- *
- * *_OTHER: other err, FC_ERRCAP[31:20]
- * *_LOGO: target sent FLOGI/PLOGI/LOGO while logged in
- * *_CRC_T: CRC threshold exceeded
- * *_LOGI_R: login state machine timed out and retrying
- * *_LOGI_F: login failed, FC_ERROR[19:0]
- * *_LOGI_S: login succeeded
- * *_LINK_DN: link online to offline
- * *_LINK_UP: link offline to online
- */
-#define SISL_ASTATUS_FC2_OTHER 0x80000000ULL /* b32 */
-#define SISL_ASTATUS_FC2_LOGO 0x40000000ULL /* b33 */
-#define SISL_ASTATUS_FC2_CRC_T 0x20000000ULL /* b34 */
-#define SISL_ASTATUS_FC2_LOGI_R 0x10000000ULL /* b35 */
-#define SISL_ASTATUS_FC2_LOGI_F 0x08000000ULL /* b36 */
-#define SISL_ASTATUS_FC2_LOGI_S 0x04000000ULL /* b37 */
-#define SISL_ASTATUS_FC2_LINK_DN 0x02000000ULL /* b38 */
-#define SISL_ASTATUS_FC2_LINK_UP 0x01000000ULL /* b39 */
-
-#define SISL_ASTATUS_FC3_OTHER 0x00800000ULL /* b40 */
-#define SISL_ASTATUS_FC3_LOGO 0x00400000ULL /* b41 */
-#define SISL_ASTATUS_FC3_CRC_T 0x00200000ULL /* b42 */
-#define SISL_ASTATUS_FC3_LOGI_R 0x00100000ULL /* b43 */
-#define SISL_ASTATUS_FC3_LOGI_F 0x00080000ULL /* b44 */
-#define SISL_ASTATUS_FC3_LOGI_S 0x00040000ULL /* b45 */
-#define SISL_ASTATUS_FC3_LINK_DN 0x00020000ULL /* b46 */
-#define SISL_ASTATUS_FC3_LINK_UP 0x00010000ULL /* b47 */
-
-#define SISL_ASTATUS_FC0_OTHER 0x00008000ULL /* b48 */
-#define SISL_ASTATUS_FC0_LOGO 0x00004000ULL /* b49 */
-#define SISL_ASTATUS_FC0_CRC_T 0x00002000ULL /* b50 */
-#define SISL_ASTATUS_FC0_LOGI_R 0x00001000ULL /* b51 */
-#define SISL_ASTATUS_FC0_LOGI_F 0x00000800ULL /* b52 */
-#define SISL_ASTATUS_FC0_LOGI_S 0x00000400ULL /* b53 */
-#define SISL_ASTATUS_FC0_LINK_DN 0x00000200ULL /* b54 */
-#define SISL_ASTATUS_FC0_LINK_UP 0x00000100ULL /* b55 */
-
-#define SISL_ASTATUS_FC1_OTHER 0x00000080ULL /* b56 */
-#define SISL_ASTATUS_FC1_LOGO 0x00000040ULL /* b57 */
-#define SISL_ASTATUS_FC1_CRC_T 0x00000020ULL /* b58 */
-#define SISL_ASTATUS_FC1_LOGI_R 0x00000010ULL /* b59 */
-#define SISL_ASTATUS_FC1_LOGI_F 0x00000008ULL /* b60 */
-#define SISL_ASTATUS_FC1_LOGI_S 0x00000004ULL /* b61 */
-#define SISL_ASTATUS_FC1_LINK_DN 0x00000002ULL /* b62 */
-#define SISL_ASTATUS_FC1_LINK_UP 0x00000001ULL /* b63 */
-
-#define SISL_FC_INTERNAL_UNMASK 0x0000000300000000ULL /* 1 means unmasked */
-#define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK)
-#define SISL_FC_INTERNAL_SHIFT 32
-
-#define SISL_FC_SHUTDOWN_NORMAL 0x0000000000000010ULL
-#define SISL_FC_SHUTDOWN_ABRUPT 0x0000000000000020ULL
-
-#define SISL_STATUS_SHUTDOWN_ACTIVE 0x0000000000000010ULL
-#define SISL_STATUS_SHUTDOWN_COMPLETE 0x0000000000000020ULL
-
-#define SISL_ASTATUS_UNMASK 0xFFFFFFFFULL /* 1 means unmasked */
-#define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */
-
- __be64 aintr_clear;
- __be64 aintr_mask;
- __be64 afu_ctrl;
- __be64 afu_hb;
- __be64 afu_scratch_pad;
- __be64 afu_port_sel;
-#define SISL_AFUCONF_AR_IOARCB 0x4000ULL
-#define SISL_AFUCONF_AR_LXT 0x2000ULL
-#define SISL_AFUCONF_AR_RHT 0x1000ULL
-#define SISL_AFUCONF_AR_DATA 0x0800ULL
-#define SISL_AFUCONF_AR_RSRC 0x0400ULL
-#define SISL_AFUCONF_AR_IOASA 0x0200ULL
-#define SISL_AFUCONF_AR_RRQ 0x0100ULL
-/* Aggregate all Auto Retry Bits */
-#define SISL_AFUCONF_AR_ALL (SISL_AFUCONF_AR_IOARCB|SISL_AFUCONF_AR_LXT| \
- SISL_AFUCONF_AR_RHT|SISL_AFUCONF_AR_DATA| \
- SISL_AFUCONF_AR_RSRC|SISL_AFUCONF_AR_IOASA| \
- SISL_AFUCONF_AR_RRQ)
-#ifdef __BIG_ENDIAN
-#define SISL_AFUCONF_ENDIAN 0x0000ULL
-#else
-#define SISL_AFUCONF_ENDIAN 0x0020ULL
-#endif
-#define SISL_AFUCONF_MBOX_CLR_READ 0x0010ULL
- __be64 afu_config;
- __be64 rsvd[0xf8];
- __le64 afu_version;
- __be64 interface_version;
-#define SISL_INTVER_CAP_SHIFT 16
-#define SISL_INTVER_MAJ_SHIFT 8
-#define SISL_INTVER_CAP_MASK 0xFFFFFFFF00000000ULL
-#define SISL_INTVER_MAJ_MASK 0x00000000FFFF0000ULL
-#define SISL_INTVER_MIN_MASK 0x000000000000FFFFULL
-#define SISL_INTVER_CAP_IOARRIN_CMD_MODE 0x800000000000ULL
-#define SISL_INTVER_CAP_SQ_CMD_MODE 0x400000000000ULL
-#define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL
-#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL
-#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL
-#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL
-#define SISL_INTVER_CAP_OCXL_LISN 0x020000000000ULL
-};
-
-#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */
-#define CXLFLASH_MAX_FC_BANKS 2 /* max # of banks supported */
-#define CXLFLASH_MAX_FC_PORTS (CXLFLASH_NUM_FC_PORTS_PER_BANK * \
- CXLFLASH_MAX_FC_BANKS)
-#define CXLFLASH_MAX_CONTEXT 512 /* number of contexts per AFU */
-#define CXLFLASH_NUM_VLUNS 512 /* number of vluns per AFU/port */
-#define CXLFLASH_NUM_REGS 512 /* number of registers per port */
-
-struct fc_port_bank {
- __be64 fc_port_regs[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_REGS];
- __be64 fc_port_luns[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_VLUNS];
-};
-
-struct sisl_global_map {
- union {
- struct sisl_global_regs regs;
- char page0[SIZE_4K]; /* page 0 */
- };
-
- char page1[SIZE_4K]; /* page 1 */
-
- struct fc_port_bank bank[CXLFLASH_MAX_FC_BANKS]; /* pages 2 - 9 */
-
- /* pages 10 - 15 are reserved */
-
-};
-
-/*
- * CXL Flash Memory Map
- *
- * +-------------------------------+
- * | 512 * 64 KB User MMIO |
- * | (per context) |
- * | User Accessible |
- * +-------------------------------+
- * | 512 * 128 B per context |
- * | Provisioning and Control |
- * | Trusted Process accessible |
- * +-------------------------------+
- * | 64 KB Global |
- * | Trusted Process accessible |
- * +-------------------------------+
- */
-struct cxlflash_afu_map {
- union {
- struct sisl_host_map host;
- char harea[SIZE_64K]; /* 64KB each */
- } hosts[CXLFLASH_MAX_CONTEXT];
-
- union {
- struct sisl_ctrl_map ctrl;
- char carea[cache_line_size()]; /* 128B each */
- } ctrls[CXLFLASH_MAX_CONTEXT];
-
- union {
- struct sisl_global_map global;
- char garea[SIZE_64K]; /* 64KB single block */
- };
-};
-
-/*
- * LXT - LBA Translation Table
- * LXT control blocks
- */
-struct sisl_lxt_entry {
- u64 rlba_base; /* bits 0:47 is base
- * b48:55 is lun index
- * b58:59 is write & read perms
- * (if no perm, afu_rc=0x15)
- * b60:63 is port_sel mask
- */
-};
-
-/*
- * RHT - Resource Handle Table
- * Per the SISlite spec, RHT entries are to be 16-byte aligned
- */
-struct sisl_rht_entry {
- struct sisl_lxt_entry *lxt_start;
- u32 lxt_cnt;
- u16 rsvd;
- u8 fp; /* format & perm nibbles.
- * (if no perm, afu_rc=0x05)
- */
- u8 nmask;
-} __packed __aligned(16);
-
-struct sisl_rht_entry_f1 {
- u64 lun_id;
- union {
- struct {
- u8 valid;
- u8 rsvd[5];
- u8 fp;
- u8 port_sel;
- };
-
- u64 dw;
- };
-} __packed __aligned(16);
-
-/* make the fp byte */
-#define SISL_RHT_FP(fmt, perm) (((fmt) << 4) | (perm))
-
-/* make the fp byte for a clone from a source fp and clone flags
- * flags must be only 2 LSB bits.
- */
-#define SISL_RHT_FP_CLONE(src_fp, cln_flags) ((src_fp) & (0xFC | (cln_flags)))
-
-#define RHT_PERM_READ 0x01U
-#define RHT_PERM_WRITE 0x02U
-#define RHT_PERM_RW (RHT_PERM_READ | RHT_PERM_WRITE)
-
-/* extract the perm bits from a fp */
-#define SISL_RHT_PERM(fp) ((fp) & RHT_PERM_RW)
-
-#define PORT0 0x01U
-#define PORT1 0x02U
-#define PORT2 0x04U
-#define PORT3 0x08U
-#define PORT_MASK(_n) ((1 << (_n)) - 1)
-
-/* AFU Sync Mode byte */
-#define AFU_LW_SYNC 0x0U
-#define AFU_HW_SYNC 0x1U
-#define AFU_GSYNC 0x2U
-
-/* Special Task Management Function CDB */
-#define TMF_LUN_RESET 0x1U
-#define TMF_CLEAR_ACA 0x2U
-
-#endif /* _SISLITE_H */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
deleted file mode 100644
index 97631f48e19d..000000000000
--- a/drivers/scsi/cxlflash/superpipe.c
+++ /dev/null
@@ -1,2218 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/delay.h>
-#include <linux/file.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/syscalls.h>
-#include <linux/unaligned.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_eh.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "sislite.h"
-#include "common.h"
-#include "vlun.h"
-#include "superpipe.h"
-
-struct cxlflash_global global;
-
-/**
- * marshal_rele_to_resize() - translate release to resize structure
- * @release: Source structure from which to translate/copy.
- * @resize: Destination structure for the translate/copy.
- */
-static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
- struct dk_cxlflash_resize *resize)
-{
- resize->hdr = release->hdr;
- resize->context_id = release->context_id;
- resize->rsrc_handle = release->rsrc_handle;
-}
-
-/**
- * marshal_det_to_rele() - translate detach to release structure
- * @detach: Destination structure for the translate/copy.
- * @release: Source structure from which to translate/copy.
- */
-static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
- struct dk_cxlflash_release *release)
-{
- release->hdr = detach->hdr;
- release->context_id = detach->context_id;
-}
-
-/**
- * marshal_udir_to_rele() - translate udirect to release structure
- * @udirect: Source structure from which to translate/copy.
- * @release: Destination structure for the translate/copy.
- */
-static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
- struct dk_cxlflash_release *release)
-{
- release->hdr = udirect->hdr;
- release->context_id = udirect->context_id;
- release->rsrc_handle = udirect->rsrc_handle;
-}
-
-/**
- * cxlflash_free_errpage() - frees resources associated with global error page
- */
-void cxlflash_free_errpage(void)
-{
-
- mutex_lock(&global.mutex);
- if (global.err_page) {
- __free_page(global.err_page);
- global.err_page = NULL;
- }
- mutex_unlock(&global.mutex);
-}
-
-/**
- * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
- * @cfg: Internal structure associated with the host.
- *
- * When the host needs to go down, all users must be quiesced and their
- * memory freed. This is accomplished by putting the contexts in error
- * state which will notify the user and let them 'drive' the tear down.
- * Meanwhile, this routine camps until all user contexts have been removed.
- *
- * Note that the main loop in this routine will always execute at least once
- * to flush the reset_waitq.
- */
-void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int i, found = true;
-
- cxlflash_mark_contexts_error(cfg);
-
- while (true) {
- for (i = 0; i < MAX_CONTEXT; i++)
- if (cfg->ctx_tbl[i]) {
- found = true;
- break;
- }
-
- if (!found && list_empty(&cfg->ctx_err_recovery))
- return;
-
- dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
- __func__);
- wake_up_all(&cfg->reset_waitq);
- ssleep(1);
- found = false;
- }
-}
-
-/**
- * find_error_context() - locates a context by cookie on the error recovery list
- * @cfg: Internal structure associated with the host.
- * @rctxid: Desired context by id.
- * @file: Desired context by file.
- *
- * Return: Found context on success, NULL on failure
- */
-static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
- struct file *file)
-{
- struct ctx_info *ctxi;
-
- list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
- if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
- return ctxi;
-
- return NULL;
-}
-
-/**
- * get_context() - obtains a validated and locked context reference
- * @cfg: Internal structure associated with the host.
- * @rctxid: Desired context (raw, un-decoded format).
- * @arg: LUN information or file associated with request.
- * @ctx_ctrl: Control information to 'steer' desired lookup.
- *
- * NOTE: despite the name pid, in linux, current->pid actually refers
- * to the lightweight process id (tid) and can change if the process is
- * multi threaded. The tgid remains constant for the process and only changes
- * when the process of fork. For all intents and purposes, think of tgid
- * as a pid in the traditional sense.
- *
- * Return: Validated context on success, NULL on failure
- */
-struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
- void *arg, enum ctx_ctrl ctx_ctrl)
-{
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct lun_access *lun_access = NULL;
- struct file *file = NULL;
- struct llun_info *lli = arg;
- u64 ctxid = DECODE_CTXID(rctxid);
- int rc;
- pid_t pid = task_tgid_nr(current), ctxpid = 0;
-
- if (ctx_ctrl & CTX_CTRL_FILE) {
- lli = NULL;
- file = (struct file *)arg;
- }
-
- if (ctx_ctrl & CTX_CTRL_CLONE)
- pid = task_ppid_nr(current);
-
- if (likely(ctxid < MAX_CONTEXT)) {
- while (true) {
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- ctxi = cfg->ctx_tbl[ctxid];
- if (ctxi)
- if ((file && (ctxi->file != file)) ||
- (!file && (ctxi->ctxid != rctxid)))
- ctxi = NULL;
-
- if ((ctx_ctrl & CTX_CTRL_ERR) ||
- (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
- ctxi = find_error_context(cfg, rctxid, file);
- if (!ctxi) {
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- goto out;
- }
-
- /*
- * Need to acquire ownership of the context while still
- * under the table/list lock to serialize with a remove
- * thread. Use the 'try' to avoid stalling the
- * table/list lock for a single context.
- *
- * Note that the lock order is:
- *
- * cfg->ctx_tbl_list_mutex -> ctxi->mutex
- *
- * Therefore release ctx_tbl_list_mutex before retrying.
- */
- rc = mutex_trylock(&ctxi->mutex);
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- if (rc)
- break; /* got the context's lock! */
- }
-
- if (ctxi->unavail)
- goto denied;
-
- ctxpid = ctxi->pid;
- if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
- if (pid != ctxpid)
- goto denied;
-
- if (lli) {
- list_for_each_entry(lun_access, &ctxi->luns, list)
- if (lun_access->lli == lli)
- goto out;
- goto denied;
- }
- }
-
-out:
- dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
- "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
- ctx_ctrl);
-
- return ctxi;
-
-denied:
- mutex_unlock(&ctxi->mutex);
- ctxi = NULL;
- goto out;
-}
-
-/**
- * put_context() - release a context that was retrieved from get_context()
- * @ctxi: Context to release.
- *
- * For now, releasing the context equates to unlocking it's mutex.
- */
-void put_context(struct ctx_info *ctxi)
-{
- mutex_unlock(&ctxi->mutex);
-}
-
-/**
- * afu_attach() - attach a context to the AFU
- * @cfg: Internal structure associated with the host.
- * @ctxi: Context to attach.
- *
- * Upon setting the context capabilities, they must be confirmed with
- * a read back operation as the context might have been closed since
- * the mailbox was unlocked. When this occurs, registration is failed.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
-{
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
- int rc = 0;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- u64 val;
- int i;
-
- /* Unlock cap and restrict user to read/write cmds in translated mode */
- readq_be(&ctrl_map->mbox_r);
- val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
- writeq_be(val, &ctrl_map->ctx_cap);
- val = readq_be(&ctrl_map->ctx_cap);
- if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
- dev_err(dev, "%s: ctx may be closed val=%016llx\n",
- __func__, val);
- rc = -EAGAIN;
- goto out;
- }
-
- if (afu_is_ocxl_lisn(afu)) {
- /* Set up the LISN effective address for each interrupt */
- for (i = 0; i < ctxi->irqs; i++) {
- val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
- writeq_be(val, &ctrl_map->lisn_ea[i]);
- }
-
- /* Use primary HWQ PASID as identifier for all interrupts */
- val = hwq->ctx_hndl;
- writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
- writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
- }
-
- /* Set up MMIO registers pointing to the RHT */
- writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
- val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
- writeq_be(val, &ctrl_map->rht_cnt_id);
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * read_cap16() - issues a SCSI READ_CAP16 command
- * @sdev: SCSI device associated with LUN.
- * @lli: LUN destined for capacity request.
- *
- * The READ_CAP16 can take quite a while to complete. Should an EEH occur while
- * in scsi_execute_cmd(), the EEH handler will attempt to recover. As part of
- * the recovery, the handler drains all currently running ioctls, waiting until
- * they have completed before proceeding with a reset. As this routine is used
- * on the ioctl path, this can create a condition where the EEH handler becomes
- * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
- * temporarily unmark this thread as an ioctl thread by releasing the ioctl
- * read semaphore. This will allow the EEH handler to proceed with a recovery
- * while this thread is still running. Once the scsi_execute_cmd() returns,
- * reacquire the ioctl read semaphore and check the adapter state in case it
- * changed while inside of scsi_execute_cmd(). The state check will wait if the
- * adapter is still being recovered or return a failure if the recovery failed.
- * In the event that the adapter reset failed, simply return the failure as the
- * ioctl would be unable to continue.
- *
- * Note that the above puts a requirement on this routine to only be called on
- * an ioctl thread.
- *
- * Return: 0 on success, -errno on failure
- */
-static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct glun_info *gli = lli->parent;
- struct scsi_sense_hdr sshdr;
- const struct scsi_exec_args exec_args = {
- .sshdr = &sshdr,
- };
- u8 *cmd_buf = NULL;
- u8 *scsi_cmd = NULL;
- int rc = 0;
- int result = 0;
- int retry_cnt = 0;
- u32 to = CMD_TIMEOUT * HZ;
-
-retry:
- cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
- scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
- if (unlikely(!cmd_buf || !scsi_cmd)) {
- rc = -ENOMEM;
- goto out;
- }
-
- scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */
- scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
- put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
-
- dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
- retry_cnt ? "re" : "", scsi_cmd[0]);
-
- /* Drop the ioctl read semaphore across lengthy call */
- up_read(&cfg->ioctl_rwsem);
- result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, cmd_buf,
- CMD_BUFSIZE, to, CMD_RETRIES, &exec_args);
- down_read(&cfg->ioctl_rwsem);
- rc = check_state(cfg);
- if (rc) {
- dev_err(dev, "%s: Failed state result=%08x\n",
- __func__, result);
- rc = -ENODEV;
- goto out;
- }
-
- if (result > 0 && scsi_sense_valid(&sshdr)) {
- if (result & SAM_STAT_CHECK_CONDITION) {
- switch (sshdr.sense_key) {
- case NO_SENSE:
- case RECOVERED_ERROR:
- case NOT_READY:
- result &= ~SAM_STAT_CHECK_CONDITION;
- break;
- case UNIT_ATTENTION:
- switch (sshdr.asc) {
- case 0x29: /* Power on Reset or Device Reset */
- fallthrough;
- case 0x2A: /* Device capacity changed */
- case 0x3F: /* Report LUNs changed */
- /* Retry the command once more */
- if (retry_cnt++ < 1) {
- kfree(cmd_buf);
- kfree(scsi_cmd);
- goto retry;
- }
- }
- break;
- default:
- break;
- }
- }
- }
-
- if (result) {
- dev_err(dev, "%s: command failed, result=%08x\n",
- __func__, result);
- rc = -EIO;
- goto out;
- }
-
- /*
- * Read cap was successful, grab values from the buffer;
- * note that we don't need to worry about unaligned access
- * as the buffer is allocated on an aligned boundary.
- */
- mutex_lock(&gli->mutex);
- gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
- gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
- mutex_unlock(&gli->mutex);
-
-out:
- kfree(cmd_buf);
- kfree(scsi_cmd);
-
- dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
- __func__, gli->max_lba, gli->blk_len, rc);
- return rc;
-}
-
-/**
- * get_rhte() - obtains validated resource handle table entry reference
- * @ctxi: Context owning the resource handle.
- * @rhndl: Resource handle associated with entry.
- * @lli: LUN associated with request.
- *
- * Return: Validated RHTE on success, NULL on failure
- */
-struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
- struct llun_info *lli)
-{
- struct cxlflash_cfg *cfg = ctxi->cfg;
- struct device *dev = &cfg->dev->dev;
- struct sisl_rht_entry *rhte = NULL;
-
- if (unlikely(!ctxi->rht_start)) {
- dev_dbg(dev, "%s: Context does not have allocated RHT\n",
- __func__);
- goto out;
- }
-
- if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
- __func__, rhndl);
- goto out;
- }
-
- if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
- dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
- __func__, rhndl);
- goto out;
- }
-
- rhte = &ctxi->rht_start[rhndl];
- if (unlikely(rhte->nmask == 0)) {
- dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
- __func__, rhndl);
- rhte = NULL;
- goto out;
- }
-
-out:
- return rhte;
-}
-
-/**
- * rhte_checkout() - obtains free/empty resource handle table entry
- * @ctxi: Context owning the resource handle.
- * @lli: LUN associated with request.
- *
- * Return: Free RHTE on success, NULL on failure
- */
-struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
- struct llun_info *lli)
-{
- struct cxlflash_cfg *cfg = ctxi->cfg;
- struct device *dev = &cfg->dev->dev;
- struct sisl_rht_entry *rhte = NULL;
- int i;
-
- /* Find a free RHT entry */
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
- if (ctxi->rht_start[i].nmask == 0) {
- rhte = &ctxi->rht_start[i];
- ctxi->rht_out++;
- break;
- }
-
- if (likely(rhte))
- ctxi->rht_lun[i] = lli;
-
- dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
- return rhte;
-}
-
-/**
- * rhte_checkin() - releases a resource handle table entry
- * @ctxi: Context owning the resource handle.
- * @rhte: RHTE to release.
- */
-void rhte_checkin(struct ctx_info *ctxi,
- struct sisl_rht_entry *rhte)
-{
- u32 rsrc_handle = rhte - ctxi->rht_start;
-
- rhte->nmask = 0;
- rhte->fp = 0;
- ctxi->rht_out--;
- ctxi->rht_lun[rsrc_handle] = NULL;
- ctxi->rht_needs_ws[rsrc_handle] = false;
-}
-
-/**
- * rht_format1() - populates a RHTE for format 1
- * @rhte: RHTE to populate.
- * @lun_id: LUN ID of LUN associated with RHTE.
- * @perm: Desired permissions for RHTE.
- * @port_sel: Port selection mask
- */
-static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
- u32 port_sel)
-{
- /*
- * Populate the Format 1 RHT entry for direct access (physical
- * LUN) using the synchronization sequence defined in the
- * SISLite specification.
- */
- struct sisl_rht_entry_f1 dummy = { 0 };
- struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
-
- memset(rhte_f1, 0, sizeof(*rhte_f1));
- rhte_f1->fp = SISL_RHT_FP(1U, 0);
- dma_wmb(); /* Make setting of format bit visible */
-
- rhte_f1->lun_id = lun_id;
- dma_wmb(); /* Make setting of LUN id visible */
-
- /*
- * Use a dummy RHT Format 1 entry to build the second dword
- * of the entry that must be populated in a single write when
- * enabled (valid bit set to TRUE).
- */
- dummy.valid = 0x80;
- dummy.fp = SISL_RHT_FP(1U, perm);
- dummy.port_sel = port_sel;
- rhte_f1->dw = dummy.dw;
-
- dma_wmb(); /* Make remaining RHT entry fields visible */
-}
-
-/**
- * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
- * @gli: LUN to attach.
- * @mode: Desired mode of the LUN.
- * @locked: Mutex status on current thread.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
-{
- int rc = 0;
-
- if (!locked)
- mutex_lock(&gli->mutex);
-
- if (gli->mode == MODE_NONE)
- gli->mode = mode;
- else if (gli->mode != mode) {
- pr_debug("%s: gli_mode=%d requested_mode=%d\n",
- __func__, gli->mode, mode);
- rc = -EINVAL;
- goto out;
- }
-
- gli->users++;
- WARN_ON(gli->users <= 0);
-out:
- pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
- __func__, rc, gli->mode, gli->users);
- if (!locked)
- mutex_unlock(&gli->mutex);
- return rc;
-}
-
-/**
- * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
- * @gli: LUN to detach.
- *
- * When resetting the mode, terminate block allocation resources as they
- * are no longer required (service is safe to call even when block allocation
- * resources were not present - such as when transitioning from physical mode).
- * These resources will be reallocated when needed (subsequent transition to
- * virtual mode).
- */
-void cxlflash_lun_detach(struct glun_info *gli)
-{
- mutex_lock(&gli->mutex);
- WARN_ON(gli->mode == MODE_NONE);
- if (--gli->users == 0) {
- gli->mode = MODE_NONE;
- cxlflash_ba_terminate(&gli->blka.ba_lun);
- }
- pr_debug("%s: gli->users=%u\n", __func__, gli->users);
- WARN_ON(gli->users < 0);
- mutex_unlock(&gli->mutex);
-}
-
-/**
- * _cxlflash_disk_release() - releases the specified resource entry
- * @sdev: SCSI device associated with LUN.
- * @ctxi: Context owning resources.
- * @release: Release ioctl data structure.
- *
- * For LUNs in virtual mode, the virtual LUN associated with the specified
- * resource handle is resized to 0 prior to releasing the RHTE. Note that the
- * AFU sync should _not_ be performed when the context is sitting on the error
- * recovery list. A context on the error recovery list is not known to the AFU
- * due to reset. When the context is recovered, it will be reattached and made
- * known again to the AFU.
- *
- * Return: 0 on success, -errno on failure
- */
-int _cxlflash_disk_release(struct scsi_device *sdev,
- struct ctx_info *ctxi,
- struct dk_cxlflash_release *release)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct afu *afu = cfg->afu;
- bool put_ctx = false;
-
- struct dk_cxlflash_resize size;
- res_hndl_t rhndl = release->rsrc_handle;
-
- int rc = 0;
- int rcr = 0;
- u64 ctxid = DECODE_CTXID(release->context_id),
- rctxid = release->context_id;
-
- struct sisl_rht_entry *rhte;
- struct sisl_rht_entry_f1 *rhte_f1;
-
- dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
- __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
-
- if (!ctxi) {
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
- __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- put_ctx = true;
- }
-
- rhte = get_rhte(ctxi, rhndl, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
- __func__, rhndl);
- rc = -EINVAL;
- goto out;
- }
-
- /*
- * Resize to 0 for virtual LUNS by setting the size
- * to 0. This will clear LXT_START and LXT_CNT fields
- * in the RHT entry and properly sync with the AFU.
- *
- * Afterwards we clear the remaining fields.
- */
- switch (gli->mode) {
- case MODE_VIRTUAL:
- marshal_rele_to_resize(release, &size);
- size.req_size = 0;
- rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
- if (rc) {
- dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
- goto out;
- }
-
- break;
- case MODE_PHYSICAL:
- /*
- * Clear the Format 1 RHT entry for direct access
- * (physical LUN) using the synchronization sequence
- * defined in the SISLite specification.
- */
- rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
-
- rhte_f1->valid = 0;
- dma_wmb(); /* Make revocation of RHT entry visible */
-
- rhte_f1->lun_id = 0;
- dma_wmb(); /* Make clearing of LUN id visible */
-
- rhte_f1->dw = 0;
- dma_wmb(); /* Make RHT entry bottom-half clearing visible */
-
- if (!ctxi->err_recovery_active) {
- rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
- if (unlikely(rcr))
- dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
- __func__, rcr);
- }
- break;
- default:
- WARN(1, "Unsupported LUN mode!");
- goto out;
- }
-
- rhte_checkin(ctxi, rhte);
- cxlflash_lun_detach(gli);
-
-out:
- if (put_ctx)
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-int cxlflash_disk_release(struct scsi_device *sdev, void *release)
-{
- return _cxlflash_disk_release(sdev, NULL, release);
-}
-
-/**
- * destroy_context() - releases a context
- * @cfg: Internal structure associated with the host.
- * @ctxi: Context to release.
- *
- * This routine is safe to be called with a a non-initialized context.
- * Also note that the routine conditionally checks for the existence
- * of the context control map before clearing the RHT registers and
- * context capabilities because it is possible to destroy a context
- * while the context is in the error state (previous mapping was
- * removed [so there is no need to worry about clearing] and context
- * is waiting for a new mapping).
- */
-static void destroy_context(struct cxlflash_cfg *cfg,
- struct ctx_info *ctxi)
-{
- struct afu *afu = cfg->afu;
-
- if (ctxi->initialized) {
- WARN_ON(!list_empty(&ctxi->luns));
-
- /* Clear RHT registers and drop all capabilities for context */
- if (afu->afu_map && ctxi->ctrl_map) {
- writeq_be(0, &ctxi->ctrl_map->rht_start);
- writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
- writeq_be(0, &ctxi->ctrl_map->ctx_cap);
- }
- }
-
- /* Free memory associated with context */
- free_page((ulong)ctxi->rht_start);
- kfree(ctxi->rht_needs_ws);
- kfree(ctxi->rht_lun);
- kfree(ctxi);
-}
-
-/**
- * create_context() - allocates and initializes a context
- * @cfg: Internal structure associated with the host.
- *
- * Return: Allocated context on success, NULL on failure
- */
-static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct llun_info **lli = NULL;
- u8 *ws = NULL;
- struct sisl_rht_entry *rhte;
-
- ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
- lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
- ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
- if (unlikely(!ctxi || !lli || !ws)) {
- dev_err(dev, "%s: Unable to allocate context\n", __func__);
- goto err;
- }
-
- rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
- if (unlikely(!rhte)) {
- dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
- goto err;
- }
-
- ctxi->rht_lun = lli;
- ctxi->rht_needs_ws = ws;
- ctxi->rht_start = rhte;
-out:
- return ctxi;
-
-err:
- kfree(ws);
- kfree(lli);
- kfree(ctxi);
- ctxi = NULL;
- goto out;
-}
-
-/**
- * init_context() - initializes a previously allocated context
- * @ctxi: Previously allocated context
- * @cfg: Internal structure associated with the host.
- * @ctx: Previously obtained context cookie.
- * @ctxid: Previously obtained process element associated with CXL context.
- * @file: Previously obtained file associated with CXL context.
- * @perms: User-specified permissions.
- * @irqs: User-specified number of interrupts.
- */
-static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
- void *ctx, int ctxid, struct file *file, u32 perms,
- u64 irqs)
-{
- struct afu *afu = cfg->afu;
-
- ctxi->rht_perms = perms;
- ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
- ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
- ctxi->irqs = irqs;
- ctxi->pid = task_tgid_nr(current); /* tgid = pid */
- ctxi->ctx = ctx;
- ctxi->cfg = cfg;
- ctxi->file = file;
- ctxi->initialized = true;
- mutex_init(&ctxi->mutex);
- kref_init(&ctxi->kref);
- INIT_LIST_HEAD(&ctxi->luns);
- INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
-}
-
-/**
- * remove_context() - context kref release handler
- * @kref: Kernel reference associated with context to be removed.
- *
- * When a context no longer has any references it can safely be removed
- * from global access and destroyed. Note that it is assumed the thread
- * relinquishing access to the context holds its mutex.
- */
-static void remove_context(struct kref *kref)
-{
- struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
- struct cxlflash_cfg *cfg = ctxi->cfg;
- u64 ctxid = DECODE_CTXID(ctxi->ctxid);
-
- /* Remove context from table/error list */
- WARN_ON(!mutex_is_locked(&ctxi->mutex));
- ctxi->unavail = true;
- mutex_unlock(&ctxi->mutex);
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- mutex_lock(&ctxi->mutex);
-
- if (!list_empty(&ctxi->list))
- list_del(&ctxi->list);
- cfg->ctx_tbl[ctxid] = NULL;
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- mutex_unlock(&ctxi->mutex);
-
- /* Context now completely uncoupled/unreachable */
- destroy_context(cfg, ctxi);
-}
-
-/**
- * _cxlflash_disk_detach() - detaches a LUN from a context
- * @sdev: SCSI device associated with LUN.
- * @ctxi: Context owning resources.
- * @detach: Detach ioctl data structure.
- *
- * As part of the detach, all per-context resources associated with the LUN
- * are cleaned up. When detaching the last LUN for a context, the context
- * itself is cleaned up and released.
- *
- * Return: 0 on success, -errno on failure
- */
-static int _cxlflash_disk_detach(struct scsi_device *sdev,
- struct ctx_info *ctxi,
- struct dk_cxlflash_detach *detach)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct lun_access *lun_access, *t;
- struct dk_cxlflash_release rel;
- bool put_ctx = false;
-
- int i;
- int rc = 0;
- u64 ctxid = DECODE_CTXID(detach->context_id),
- rctxid = detach->context_id;
-
- dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
-
- if (!ctxi) {
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
- __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- put_ctx = true;
- }
-
- /* Cleanup outstanding resources tied to this LUN */
- if (ctxi->rht_out) {
- marshal_det_to_rele(detach, &rel);
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
- if (ctxi->rht_lun[i] == lli) {
- rel.rsrc_handle = i;
- _cxlflash_disk_release(sdev, ctxi, &rel);
- }
-
- /* No need to loop further if we're done */
- if (ctxi->rht_out == 0)
- break;
- }
- }
-
- /* Take our LUN out of context, free the node */
- list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
- if (lun_access->lli == lli) {
- list_del(&lun_access->list);
- kfree(lun_access);
- lun_access = NULL;
- break;
- }
-
- /*
- * Release the context reference and the sdev reference that
- * bound this LUN to the context.
- */
- if (kref_put(&ctxi->kref, remove_context))
- put_ctx = false;
- scsi_device_put(sdev);
-out:
- if (put_ctx)
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-static int cxlflash_disk_detach(struct scsi_device *sdev, void *detach)
-{
- return _cxlflash_disk_detach(sdev, NULL, detach);
-}
-
-/**
- * cxlflash_cxl_release() - release handler for adapter file descriptor
- * @inode: File-system inode associated with fd.
- * @file: File installed with adapter file descriptor.
- *
- * This routine is the release handler for the fops registered with
- * the CXL services on an initial attach for a context. It is called
- * when a close (explicitly by the user or as part of a process tear
- * down) is performed on the adapter file descriptor returned to the
- * user. The user should be aware that explicitly performing a close
- * considered catastrophic and subsequent usage of the superpipe API
- * with previously saved off tokens will fail.
- *
- * This routine derives the context reference and calls detach for
- * each LUN associated with the context.The final detach operation
- * causes the context itself to be freed. With exception to when the
- * CXL process element (context id) lookup fails (a case that should
- * theoretically never occur), every call into this routine results
- * in a complete freeing of a context.
- *
- * Detaching the LUN is typically an ioctl() operation and the underlying
- * code assumes that ioctl_rwsem has been acquired as a reader. To support
- * that design point, the semaphore is acquired and released around detach.
- *
- * Return: 0 on success
- */
-static int cxlflash_cxl_release(struct inode *inode, struct file *file)
-{
- struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
- cxl_fops);
- void *ctx = cfg->ops->fops_get_context(file);
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct dk_cxlflash_detach detach = { { 0 }, 0 };
- struct lun_access *lun_access, *t;
- enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- int ctxid;
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
- __func__, ctx, ctxid);
- goto out;
- }
-
- ctxi = get_context(cfg, ctxid, file, ctrl);
- if (unlikely(!ctxi)) {
- ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
- if (!ctxi) {
- dev_dbg(dev, "%s: ctxid=%d already free\n",
- __func__, ctxid);
- goto out_release;
- }
-
- dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
- __func__, ctxid);
- put_context(ctxi);
- goto out;
- }
-
- dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
-
- down_read(&cfg->ioctl_rwsem);
- detach.context_id = ctxi->ctxid;
- list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
- _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
- up_read(&cfg->ioctl_rwsem);
-out_release:
- cfg->ops->fd_release(inode, file);
-out:
- dev_dbg(dev, "%s: returning\n", __func__);
- return 0;
-}
-
-/**
- * unmap_context() - clears a previously established mapping
- * @ctxi: Context owning the mapping.
- *
- * This routine is used to switch between the error notification page
- * (dummy page of all 1's) and the real mapping (established by the CXL
- * fault handler).
- */
-static void unmap_context(struct ctx_info *ctxi)
-{
- unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
-}
-
-/**
- * get_err_page() - obtains and allocates the error notification page
- * @cfg: Internal structure associated with the host.
- *
- * Return: error notification page on success, NULL on failure
- */
-static struct page *get_err_page(struct cxlflash_cfg *cfg)
-{
- struct page *err_page = global.err_page;
- struct device *dev = &cfg->dev->dev;
-
- if (unlikely(!err_page)) {
- err_page = alloc_page(GFP_KERNEL);
- if (unlikely(!err_page)) {
- dev_err(dev, "%s: Unable to allocate err_page\n",
- __func__);
- goto out;
- }
-
- memset(page_address(err_page), -1, PAGE_SIZE);
-
- /* Serialize update w/ other threads to avoid a leak */
- mutex_lock(&global.mutex);
- if (likely(!global.err_page))
- global.err_page = err_page;
- else {
- __free_page(err_page);
- err_page = global.err_page;
- }
- mutex_unlock(&global.mutex);
- }
-
-out:
- dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
- return err_page;
-}
-
-/**
- * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
- * @vmf: VM fault associated with current fault.
- *
- * To support error notification via MMIO, faults are 'caught' by this routine
- * that was inserted before passing back the adapter file descriptor on attach.
- * When a fault occurs, this routine evaluates if error recovery is active and
- * if so, installs the error page to 'notify' the user about the error state.
- * During normal operation, the fault is simply handled by the original fault
- * handler that was installed by CXL services as part of initializing the
- * adapter file descriptor. The VMA's page protection bits are toggled to
- * indicate cached/not-cached depending on the memory backing the fault.
- *
- * Return: 0 on success, VM_FAULT_SIGBUS on failure
- */
-static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct file *file = vma->vm_file;
- struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
- cxl_fops);
- void *ctx = cfg->ops->fops_get_context(file);
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct page *err_page = NULL;
- enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- vm_fault_t rc = 0;
- int ctxid;
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
- __func__, ctx, ctxid);
- goto err;
- }
-
- ctxi = get_context(cfg, ctxid, file, ctrl);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
- goto err;
- }
-
- dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
-
- if (likely(!ctxi->err_recovery_active)) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- rc = ctxi->cxl_mmap_vmops->fault(vmf);
- } else {
- dev_dbg(dev, "%s: err recovery active, use err_page\n",
- __func__);
-
- err_page = get_err_page(cfg);
- if (unlikely(!err_page)) {
- dev_err(dev, "%s: Could not get err_page\n", __func__);
- rc = VM_FAULT_RETRY;
- goto out;
- }
-
- get_page(err_page);
- vmf->page = err_page;
- vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
- }
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
- return rc;
-
-err:
- rc = VM_FAULT_SIGBUS;
- goto out;
-}
-
-/*
- * Local MMAP vmops to 'catch' faults
- */
-static const struct vm_operations_struct cxlflash_mmap_vmops = {
- .fault = cxlflash_mmap_fault,
-};
-
-/**
- * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
- * @file: File installed with adapter file descriptor.
- * @vma: VM area associated with mapping.
- *
- * Installs local mmap vmops to 'catch' faults for error notification support.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
- cxl_fops);
- void *ctx = cfg->ops->fops_get_context(file);
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- int ctxid;
- int rc = 0;
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
- __func__, ctx, ctxid);
- rc = -EIO;
- goto out;
- }
-
- ctxi = get_context(cfg, ctxid, file, ctrl);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
- rc = -EIO;
- goto out;
- }
-
- dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
-
- rc = cfg->ops->fd_mmap(file, vma);
- if (likely(!rc)) {
- /* Insert ourself in the mmap fault handler path */
- ctxi->cxl_mmap_vmops = vma->vm_ops;
- vma->vm_ops = &cxlflash_mmap_vmops;
- }
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- return rc;
-}
-
-const struct file_operations cxlflash_cxl_fops = {
- .owner = THIS_MODULE,
- .mmap = cxlflash_cxl_mmap,
- .release = cxlflash_cxl_release,
-};
-
-/**
- * cxlflash_mark_contexts_error() - move contexts to error state and list
- * @cfg: Internal structure associated with the host.
- *
- * A context is only moved over to the error list when there are no outstanding
- * references to it. This ensures that a running operation has completed.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
-{
- int i, rc = 0;
- struct ctx_info *ctxi = NULL;
-
- mutex_lock(&cfg->ctx_tbl_list_mutex);
-
- for (i = 0; i < MAX_CONTEXT; i++) {
- ctxi = cfg->ctx_tbl[i];
- if (ctxi) {
- mutex_lock(&ctxi->mutex);
- cfg->ctx_tbl[i] = NULL;
- list_add(&ctxi->list, &cfg->ctx_err_recovery);
- ctxi->err_recovery_active = true;
- ctxi->ctrl_map = NULL;
- unmap_context(ctxi);
- mutex_unlock(&ctxi->mutex);
- }
- }
-
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- return rc;
-}
-
-/*
- * Dummy NULL fops
- */
-static const struct file_operations null_fops = {
- .owner = THIS_MODULE,
-};
-
-/**
- * check_state() - checks and responds to the current adapter state
- * @cfg: Internal structure associated with the host.
- *
- * This routine can block and should only be used on process context.
- * It assumes that the caller is an ioctl thread and holding the ioctl
- * read semaphore. This is temporarily let up across the wait to allow
- * for draining actively running ioctls. Also note that when waking up
- * from waiting in reset, the state is unknown and must be checked again
- * before proceeding.
- *
- * Return: 0 on success, -errno on failure
- */
-int check_state(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
-retry:
- switch (cfg->state) {
- case STATE_RESET:
- dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
- up_read(&cfg->ioctl_rwsem);
- rc = wait_event_interruptible(cfg->reset_waitq,
- cfg->state != STATE_RESET);
- down_read(&cfg->ioctl_rwsem);
- if (unlikely(rc))
- break;
- goto retry;
- case STATE_FAILTERM:
- dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
- rc = -ENODEV;
- break;
- default:
- break;
- }
-
- return rc;
-}
-
-/**
- * cxlflash_disk_attach() - attach a LUN to a context
- * @sdev: SCSI device associated with LUN.
- * @arg: Attach ioctl data structure.
- *
- * Creates a context and attaches LUN to it. A LUN can only be attached
- * one time to a context (subsequent attaches for the same context/LUN pair
- * are not supported). Additional LUNs can be attached to a context by
- * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_disk_attach(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_attach *attach = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct ctx_info *ctxi = NULL;
- struct lun_access *lun_access = NULL;
- int rc = 0;
- u32 perms;
- int ctxid = -1;
- u64 irqs = attach->num_interrupts;
- u64 flags = 0UL;
- u64 rctxid = 0UL;
- struct file *file = NULL;
-
- void *ctx = NULL;
-
- int fd = -1;
-
- if (irqs > 4) {
- dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
- __func__, irqs);
- rc = -EINVAL;
- goto out;
- }
-
- if (gli->max_lba == 0) {
- dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
- __func__, lli->lun_id[sdev->channel]);
- rc = read_cap16(sdev, lli);
- if (rc) {
- dev_err(dev, "%s: Invalid device rc=%d\n",
- __func__, rc);
- rc = -ENODEV;
- goto out;
- }
- dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
- dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
- }
-
- if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
- rctxid = attach->context_id;
- ctxi = get_context(cfg, rctxid, NULL, 0);
- if (!ctxi) {
- dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
- __func__, rctxid);
- rc = -EINVAL;
- goto out;
- }
-
- list_for_each_entry(lun_access, &ctxi->luns, list)
- if (lun_access->lli == lli) {
- dev_dbg(dev, "%s: Already attached\n",
- __func__);
- rc = -EINVAL;
- goto out;
- }
- }
-
- rc = scsi_device_get(sdev);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
- goto out;
- }
-
- lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
- if (unlikely(!lun_access)) {
- dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
- rc = -ENOMEM;
- goto err;
- }
-
- lun_access->lli = lli;
- lun_access->sdev = sdev;
-
- /* Non-NULL context indicates reuse (another context reference) */
- if (ctxi) {
- dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
- __func__, rctxid);
- kref_get(&ctxi->kref);
- list_add(&lun_access->list, &ctxi->luns);
- goto out_attach;
- }
-
- ctxi = create_context(cfg);
- if (unlikely(!ctxi)) {
- dev_err(dev, "%s: Failed to create context ctxid=%d\n",
- __func__, ctxid);
- rc = -ENOMEM;
- goto err;
- }
-
- ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
- if (IS_ERR_OR_NULL(ctx)) {
- dev_err(dev, "%s: Could not initialize context %p\n",
- __func__, ctx);
- rc = -ENODEV;
- goto err;
- }
-
- rc = cfg->ops->start_work(ctx, irqs);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Could not start context rc=%d\n",
- __func__, rc);
- goto err;
- }
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
- dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
- rc = -EPERM;
- goto err;
- }
-
- file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
- if (unlikely(fd < 0)) {
- rc = -ENODEV;
- dev_err(dev, "%s: Could not get file descriptor\n", __func__);
- goto err;
- }
-
- /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
- perms = SISL_RHT_PERM(attach->hdr.flags + 1);
-
- /* Context mutex is locked upon return */
- init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
-
- rc = afu_attach(cfg, ctxi);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
- goto err;
- }
-
- /*
- * No error paths after this point. Once the fd is installed it's
- * visible to user space and can't be undone safely on this thread.
- * There is no need to worry about a deadlock here because no one
- * knows about us yet; we can be the only one holding our mutex.
- */
- list_add(&lun_access->list, &ctxi->luns);
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- mutex_lock(&ctxi->mutex);
- cfg->ctx_tbl[ctxid] = ctxi;
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- fd_install(fd, file);
-
-out_attach:
- if (fd != -1)
- flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
- if (afu_is_sq_cmd_mode(afu))
- flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
-
- attach->hdr.return_flags = flags;
- attach->context_id = ctxi->ctxid;
- attach->block_size = gli->blk_len;
- attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
- attach->last_lba = gli->max_lba;
- attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
- attach->max_xfer /= gli->blk_len;
-
-out:
- attach->adap_fd = fd;
-
- if (ctxi)
- put_context(ctxi);
-
- dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
- __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
- return rc;
-
-err:
- /* Cleanup CXL context; okay to 'stop' even if it was not started */
- if (!IS_ERR_OR_NULL(ctx)) {
- cfg->ops->stop_context(ctx);
- cfg->ops->release_context(ctx);
- ctx = NULL;
- }
-
- /*
- * Here, we're overriding the fops with a dummy all-NULL fops because
- * fput() calls the release fop, which will cause us to mistakenly
- * call into the CXL code. Rather than try to add yet more complexity
- * to that routine (cxlflash_cxl_release) we should try to fix the
- * issue here.
- */
- if (fd > 0) {
- file->f_op = &null_fops;
- fput(file);
- put_unused_fd(fd);
- fd = -1;
- file = NULL;
- }
-
- /* Cleanup our context */
- if (ctxi) {
- destroy_context(cfg, ctxi);
- ctxi = NULL;
- }
-
- kfree(lun_access);
- scsi_device_put(sdev);
- goto out;
-}
-
-/**
- * recover_context() - recovers a context in error
- * @cfg: Internal structure associated with the host.
- * @ctxi: Context to release.
- * @adap_fd: Adapter file descriptor associated with new/recovered context.
- *
- * Restablishes the state for a context-in-error.
- *
- * Return: 0 on success, -errno on failure
- */
-static int recover_context(struct cxlflash_cfg *cfg,
- struct ctx_info *ctxi,
- int *adap_fd)
-{
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
- int fd = -1;
- int ctxid = -1;
- struct file *file;
- void *ctx;
- struct afu *afu = cfg->afu;
-
- ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
- if (IS_ERR_OR_NULL(ctx)) {
- dev_err(dev, "%s: Could not initialize context %p\n",
- __func__, ctx);
- rc = -ENODEV;
- goto out;
- }
-
- rc = cfg->ops->start_work(ctx, ctxi->irqs);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Could not start context rc=%d\n",
- __func__, rc);
- goto err1;
- }
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
- dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
- rc = -EPERM;
- goto err2;
- }
-
- file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
- if (unlikely(fd < 0)) {
- rc = -ENODEV;
- dev_err(dev, "%s: Could not get file descriptor\n", __func__);
- goto err2;
- }
-
- /* Update with new MMIO area based on updated context id */
- ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
-
- rc = afu_attach(cfg, ctxi);
- if (rc) {
- dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
- goto err3;
- }
-
- /*
- * No error paths after this point. Once the fd is installed it's
- * visible to user space and can't be undone safely on this thread.
- */
- ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
- ctxi->ctx = ctx;
- ctxi->file = file;
-
- /*
- * Put context back in table (note the reinit of the context list);
- * we must first drop the context's mutex and then acquire it in
- * order with the table/list mutex to avoid a deadlock - safe to do
- * here because no one can find us at this moment in time.
- */
- mutex_unlock(&ctxi->mutex);
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- mutex_lock(&ctxi->mutex);
- list_del_init(&ctxi->list);
- cfg->ctx_tbl[ctxid] = ctxi;
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- fd_install(fd, file);
- *adap_fd = fd;
-out:
- dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
- __func__, ctxid, fd, rc);
- return rc;
-
-err3:
- fput(file);
- put_unused_fd(fd);
-err2:
- cfg->ops->stop_context(ctx);
-err1:
- cfg->ops->release_context(ctx);
- goto out;
-}
-
-/**
- * cxlflash_afu_recover() - initiates AFU recovery
- * @sdev: SCSI device associated with LUN.
- * @arg: Recover ioctl data structure.
- *
- * Only a single recovery is allowed at a time to avoid exhausting CXL
- * resources (leading to recovery failure) in the event that we're up
- * against the maximum number of contexts limit. For similar reasons,
- * a context recovery is retried if there are multiple recoveries taking
- * place at the same time and the failure was due to CXL services being
- * unable to keep up.
- *
- * As this routine is called on ioctl context, it holds the ioctl r/w
- * semaphore that is used to drain ioctls in recovery scenarios. The
- * implementation to achieve the pacing described above (a local mutex)
- * requires that the ioctl r/w semaphore be dropped and reacquired to
- * avoid a 3-way deadlock when multiple process recoveries operate in
- * parallel.
- *
- * Because a user can detect an error condition before the kernel, it is
- * quite possible for this routine to act as the kernel's EEH detection
- * source (MMIO read of mbox_r). Because of this, there is a window of
- * time where an EEH might have been detected but not yet 'serviced'
- * (callback invoked, causing the device to enter reset state). To avoid
- * looping in this routine during that window, a 1 second sleep is in place
- * between the time the MMIO failure is detected and the time a wait on the
- * reset wait queue is attempted via check_state().
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_afu_recover(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_recover_afu *recover = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct afu *afu = cfg->afu;
- struct ctx_info *ctxi = NULL;
- struct mutex *mutex = &cfg->ctx_recovery_mutex;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- u64 flags;
- u64 ctxid = DECODE_CTXID(recover->context_id),
- rctxid = recover->context_id;
- long reg;
- bool locked = true;
- int lretry = 20; /* up to 2 seconds */
- int new_adap_fd = -1;
- int rc = 0;
-
- atomic_inc(&cfg->recovery_threads);
- up_read(&cfg->ioctl_rwsem);
- rc = mutex_lock_interruptible(mutex);
- down_read(&cfg->ioctl_rwsem);
- if (rc) {
- locked = false;
- goto out;
- }
-
- rc = check_state(cfg);
- if (rc) {
- dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
- rc = -ENODEV;
- goto out;
- }
-
- dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
- __func__, recover->reason, rctxid);
-
-retry:
- /* Ensure that this process is attached to the context */
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- if (ctxi->err_recovery_active) {
-retry_recover:
- rc = recover_context(cfg, ctxi, &new_adap_fd);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
- __func__, ctxid, rc);
- if ((rc == -ENODEV) &&
- ((atomic_read(&cfg->recovery_threads) > 1) ||
- (lretry--))) {
- dev_dbg(dev, "%s: Going to try again\n",
- __func__);
- mutex_unlock(mutex);
- msleep(100);
- rc = mutex_lock_interruptible(mutex);
- if (rc) {
- locked = false;
- goto out;
- }
- goto retry_recover;
- }
-
- goto out;
- }
-
- ctxi->err_recovery_active = false;
-
- flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
- DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
- if (afu_is_sq_cmd_mode(afu))
- flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
-
- recover->hdr.return_flags = flags;
- recover->context_id = ctxi->ctxid;
- recover->adap_fd = new_adap_fd;
- recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
- goto out;
- }
-
- /* Test if in error state */
- reg = readq_be(&hwq->ctrl_map->mbox_r);
- if (reg == -1) {
- dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
-
- /*
- * Before checking the state, put back the context obtained with
- * get_context() as it is no longer needed and sleep for a short
- * period of time (see prolog notes).
- */
- put_context(ctxi);
- ctxi = NULL;
- ssleep(1);
- rc = check_state(cfg);
- if (unlikely(rc))
- goto out;
- goto retry;
- }
-
- dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
-out:
- if (likely(ctxi))
- put_context(ctxi);
- if (locked)
- mutex_unlock(mutex);
- atomic_dec_if_positive(&cfg->recovery_threads);
- return rc;
-}
-
-/**
- * process_sense() - evaluates and processes sense data
- * @sdev: SCSI device associated with LUN.
- * @verify: Verify ioctl data structure.
- *
- * Return: 0 on success, -errno on failure
- */
-static int process_sense(struct scsi_device *sdev,
- struct dk_cxlflash_verify *verify)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- u64 prev_lba = gli->max_lba;
- struct scsi_sense_hdr sshdr = { 0 };
- int rc = 0;
-
- rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
- DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
- if (!rc) {
- dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
- rc = -EINVAL;
- goto out;
- }
-
- switch (sshdr.sense_key) {
- case NO_SENSE:
- case RECOVERED_ERROR:
- case NOT_READY:
- break;
- case UNIT_ATTENTION:
- switch (sshdr.asc) {
- case 0x29: /* Power on Reset or Device Reset */
- fallthrough;
- case 0x2A: /* Device settings/capacity changed */
- rc = read_cap16(sdev, lli);
- if (rc) {
- rc = -ENODEV;
- break;
- }
- if (prev_lba != gli->max_lba)
- dev_dbg(dev, "%s: Capacity changed old=%lld "
- "new=%lld\n", __func__, prev_lba,
- gli->max_lba);
- break;
- case 0x3F: /* Report LUNs changed, Rescan. */
- scsi_scan_host(cfg->host);
- break;
- default:
- rc = -EIO;
- break;
- }
- break;
- default:
- rc = -EIO;
- break;
- }
-out:
- dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
- sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
- return rc;
-}
-
-/**
- * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
- * @sdev: SCSI device associated with LUN.
- * @arg: Verify ioctl data structure.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_disk_verify(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_verify *verify = arg;
- int rc = 0;
- struct ctx_info *ctxi = NULL;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct sisl_rht_entry *rhte = NULL;
- res_hndl_t rhndl = verify->rsrc_handle;
- u64 ctxid = DECODE_CTXID(verify->context_id),
- rctxid = verify->context_id;
- u64 last_lba = 0;
-
- dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
- "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
- verify->hint, verify->hdr.flags);
-
- ctxi = get_context(cfg, rctxid, lli, 0);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- rhte = get_rhte(ctxi, rhndl, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
- __func__, rhndl);
- rc = -EINVAL;
- goto out;
- }
-
- /*
- * Look at the hint/sense to see if it requires us to redrive
- * inquiry (i.e. the Unit attention is due to the WWN changing).
- */
- if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
- /* Can't hold mutex across process_sense/read_cap16,
- * since we could have an intervening EEH event.
- */
- ctxi->unavail = true;
- mutex_unlock(&ctxi->mutex);
- rc = process_sense(sdev, verify);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Failed to validate sense data (%d)\n",
- __func__, rc);
- mutex_lock(&ctxi->mutex);
- ctxi->unavail = false;
- goto out;
- }
- mutex_lock(&ctxi->mutex);
- ctxi->unavail = false;
- }
-
- switch (gli->mode) {
- case MODE_PHYSICAL:
- last_lba = gli->max_lba;
- break;
- case MODE_VIRTUAL:
- /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
- last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
- last_lba /= CXLFLASH_BLOCK_SIZE;
- last_lba--;
- break;
- default:
- WARN(1, "Unsupported LUN mode!");
- }
-
- verify->last_lba = last_lba;
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
- __func__, rc, verify->last_lba);
- return rc;
-}
-
-/**
- * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
- * @cmd: The ioctl command to decode.
- *
- * Return: A string identifying the decoded ioctl.
- */
-static char *decode_ioctl(unsigned int cmd)
-{
- switch (cmd) {
- case DK_CXLFLASH_ATTACH:
- return __stringify_1(DK_CXLFLASH_ATTACH);
- case DK_CXLFLASH_USER_DIRECT:
- return __stringify_1(DK_CXLFLASH_USER_DIRECT);
- case DK_CXLFLASH_USER_VIRTUAL:
- return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
- case DK_CXLFLASH_VLUN_RESIZE:
- return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
- case DK_CXLFLASH_RELEASE:
- return __stringify_1(DK_CXLFLASH_RELEASE);
- case DK_CXLFLASH_DETACH:
- return __stringify_1(DK_CXLFLASH_DETACH);
- case DK_CXLFLASH_VERIFY:
- return __stringify_1(DK_CXLFLASH_VERIFY);
- case DK_CXLFLASH_VLUN_CLONE:
- return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
- case DK_CXLFLASH_RECOVER_AFU:
- return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
- case DK_CXLFLASH_MANAGE_LUN:
- return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
- }
-
- return "UNKNOWN";
-}
-
-/**
- * cxlflash_disk_direct_open() - opens a direct (physical) disk
- * @sdev: SCSI device associated with LUN.
- * @arg: UDirect ioctl data structure.
- *
- * On successful return, the user is informed of the resource handle
- * to be used to identify the direct lun and the size (in blocks) of
- * the direct lun in last LBA format.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct dk_cxlflash_release rel = { { 0 }, 0 };
-
- struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
-
- u64 ctxid = DECODE_CTXID(pphys->context_id),
- rctxid = pphys->context_id;
- u64 lun_size = 0;
- u64 last_lba = 0;
- u64 rsrc_handle = -1;
- u32 port = CHAN2PORTMASK(sdev->channel);
-
- int rc = 0;
-
- struct ctx_info *ctxi = NULL;
- struct sisl_rht_entry *rhte = NULL;
-
- dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
-
- rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
- goto out;
- }
-
- ctxi = get_context(cfg, rctxid, lli, 0);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto err1;
- }
-
- rhte = rhte_checkout(ctxi, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
- __func__, ctxid);
- rc = -EMFILE; /* too many opens */
- goto err1;
- }
-
- rsrc_handle = (rhte - ctxi->rht_start);
-
- rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
-
- last_lba = gli->max_lba;
- pphys->hdr.return_flags = 0;
- pphys->last_lba = last_lba;
- pphys->rsrc_handle = rsrc_handle;
-
- rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
- goto err2;
- }
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
- __func__, rsrc_handle, rc, last_lba);
- return rc;
-
-err2:
- marshal_udir_to_rele(pphys, &rel);
- _cxlflash_disk_release(sdev, ctxi, &rel);
- goto out;
-err1:
- cxlflash_lun_detach(gli);
- goto out;
-}
-
-/**
- * ioctl_common() - common IOCTL handler for driver
- * @sdev: SCSI device associated with LUN.
- * @cmd: IOCTL command.
- *
- * Handles common fencing operations that are valid for multiple ioctls. Always
- * allow through ioctls that are cleanup oriented in nature, even when operating
- * in a failed/terminating state.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ioctl_common(struct scsi_device *sdev, unsigned int cmd)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- int rc = 0;
-
- if (unlikely(!lli)) {
- dev_dbg(dev, "%s: Unknown LUN\n", __func__);
- rc = -EINVAL;
- goto out;
- }
-
- rc = check_state(cfg);
- if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
- switch (cmd) {
- case DK_CXLFLASH_VLUN_RESIZE:
- case DK_CXLFLASH_RELEASE:
- case DK_CXLFLASH_DETACH:
- dev_dbg(dev, "%s: Command override rc=%d\n",
- __func__, rc);
- rc = 0;
- break;
- }
- }
-out:
- return rc;
-}
-
-/**
- * cxlflash_ioctl() - IOCTL handler for driver
- * @sdev: SCSI device associated with LUN.
- * @cmd: IOCTL command.
- * @arg: Userspace ioctl data structure.
- *
- * A read/write semaphore is used to implement a 'drain' of currently
- * running ioctls. The read semaphore is taken at the beginning of each
- * ioctl thread and released upon concluding execution. Additionally the
- * semaphore should be released and then reacquired in any ioctl execution
- * path which will wait for an event to occur that is outside the scope of
- * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
- * a thread simply needs to acquire the write semaphore.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
-{
- typedef int (*sioctl) (struct scsi_device *, void *);
-
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct dk_cxlflash_hdr *hdr;
- char buf[sizeof(union cxlflash_ioctls)];
- size_t size = 0;
- bool known_ioctl = false;
- int idx;
- int rc = 0;
- struct Scsi_Host *shost = sdev->host;
- sioctl do_ioctl = NULL;
-
- static const struct {
- size_t size;
- sioctl ioctl;
- } ioctl_tbl[] = { /* NOTE: order matters here */
- {sizeof(struct dk_cxlflash_attach), cxlflash_disk_attach},
- {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
- {sizeof(struct dk_cxlflash_release), cxlflash_disk_release},
- {sizeof(struct dk_cxlflash_detach), cxlflash_disk_detach},
- {sizeof(struct dk_cxlflash_verify), cxlflash_disk_verify},
- {sizeof(struct dk_cxlflash_recover_afu), cxlflash_afu_recover},
- {sizeof(struct dk_cxlflash_manage_lun), cxlflash_manage_lun},
- {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
- {sizeof(struct dk_cxlflash_resize), cxlflash_vlun_resize},
- {sizeof(struct dk_cxlflash_clone), cxlflash_disk_clone},
- };
-
- /* Hold read semaphore so we can drain if needed */
- down_read(&cfg->ioctl_rwsem);
-
- /* Restrict command set to physical support only for internal LUN */
- if (afu->internal_lun)
- switch (cmd) {
- case DK_CXLFLASH_RELEASE:
- case DK_CXLFLASH_USER_VIRTUAL:
- case DK_CXLFLASH_VLUN_RESIZE:
- case DK_CXLFLASH_VLUN_CLONE:
- dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
- __func__, decode_ioctl(cmd), afu->internal_lun);
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- switch (cmd) {
- case DK_CXLFLASH_ATTACH:
- case DK_CXLFLASH_USER_DIRECT:
- case DK_CXLFLASH_RELEASE:
- case DK_CXLFLASH_DETACH:
- case DK_CXLFLASH_VERIFY:
- case DK_CXLFLASH_RECOVER_AFU:
- case DK_CXLFLASH_USER_VIRTUAL:
- case DK_CXLFLASH_VLUN_RESIZE:
- case DK_CXLFLASH_VLUN_CLONE:
- dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
- __func__, decode_ioctl(cmd), cmd, shost->host_no,
- sdev->channel, sdev->id, sdev->lun);
- rc = ioctl_common(sdev, cmd);
- if (unlikely(rc))
- goto cxlflash_ioctl_exit;
-
- fallthrough;
-
- case DK_CXLFLASH_MANAGE_LUN:
- known_ioctl = true;
- idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
- size = ioctl_tbl[idx].size;
- do_ioctl = ioctl_tbl[idx].ioctl;
-
- if (likely(do_ioctl))
- break;
-
- fallthrough;
- default:
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- if (unlikely(copy_from_user(&buf, arg, size))) {
- dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n",
- __func__, size, cmd, decode_ioctl(cmd), arg);
- rc = -EFAULT;
- goto cxlflash_ioctl_exit;
- }
-
- hdr = (struct dk_cxlflash_hdr *)&buf;
- if (hdr->version != DK_CXLFLASH_VERSION_0) {
- dev_dbg(dev, "%s: Version %u not supported for %s\n",
- __func__, hdr->version, decode_ioctl(cmd));
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
- dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- rc = do_ioctl(sdev, (void *)&buf);
- if (likely(!rc))
- if (unlikely(copy_to_user(arg, &buf, size))) {
- dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n",
- __func__, size, cmd, decode_ioctl(cmd), arg);
- rc = -EFAULT;
- }
-
- /* fall through to exit */
-
-cxlflash_ioctl_exit:
- up_read(&cfg->ioctl_rwsem);
- if (unlikely(rc && known_ioctl))
- dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
- "returned rc %d\n", __func__,
- decode_ioctl(cmd), cmd, shost->host_no,
- sdev->channel, sdev->id, sdev->lun, rc);
- else
- dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
- "returned rc %d\n", __func__, decode_ioctl(cmd),
- cmd, shost->host_no, sdev->channel, sdev->id,
- sdev->lun, rc);
- return rc;
-}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
deleted file mode 100644
index fe8c975d13d7..000000000000
--- a/drivers/scsi/cxlflash/superpipe.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_SUPERPIPE_H
-#define _CXLFLASH_SUPERPIPE_H
-
-extern struct cxlflash_global global;
-
-/*
- * Terminology: use afu (and not adapter) to refer to the HW.
- * Adapter is the entire slot and includes PSL out of which
- * only the AFU is visible to user space.
- */
-
-/* Chunk size parms: note sislite minimum chunk size is
- * 0x10000 LBAs corresponding to a NMASK or 16.
- */
-#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs */
-
-#define CMD_TIMEOUT 30 /* 30 secs */
-#define CMD_RETRIES 5 /* 5 retries for scsi_execute */
-
-#define MAX_SECTOR_UNIT 512 /* max_sector is in 512 byte multiples */
-
-enum lun_mode {
- MODE_NONE = 0,
- MODE_VIRTUAL,
- MODE_PHYSICAL
-};
-
-/* Global (entire driver, spans adapters) lun_info structure */
-struct glun_info {
- u64 max_lba; /* from read cap(16) */
- u32 blk_len; /* from read cap(16) */
- enum lun_mode mode; /* NONE, VIRTUAL, PHYSICAL */
- int users; /* Number of users w/ references to LUN */
-
- u8 wwid[16];
-
- struct mutex mutex;
-
- struct blka blka;
- struct list_head list;
-};
-
-/* Local (per-adapter) lun_info structure */
-struct llun_info {
- u64 lun_id[MAX_FC_PORTS]; /* from REPORT_LUNS */
- u32 lun_index; /* Index in the LUN table */
- u32 host_no; /* host_no from Scsi_host */
- u32 port_sel; /* What port to use for this LUN */
- bool in_table; /* Whether a LUN table entry was created */
-
- u8 wwid[16]; /* Keep a duplicate copy here? */
-
- struct glun_info *parent; /* Pointer to entry in global LUN structure */
- struct scsi_device *sdev;
- struct list_head list;
-};
-
-struct lun_access {
- struct llun_info *lli;
- struct scsi_device *sdev;
- struct list_head list;
-};
-
-enum ctx_ctrl {
- CTX_CTRL_CLONE = (1 << 1),
- CTX_CTRL_ERR = (1 << 2),
- CTX_CTRL_ERR_FALLBACK = (1 << 3),
- CTX_CTRL_NOPID = (1 << 4),
- CTX_CTRL_FILE = (1 << 5)
-};
-
-#define ENCODE_CTXID(_ctx, _id) (((((u64)_ctx) & 0xFFFFFFFF0ULL) << 28) | _id)
-#define DECODE_CTXID(_val) (_val & 0xFFFFFFFF)
-
-struct ctx_info {
- struct sisl_ctrl_map __iomem *ctrl_map; /* initialized at startup */
- struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment),
- * alloc/free on attach/detach
- */
- u32 rht_out; /* Number of checked out RHT entries */
- u32 rht_perms; /* User-defined permissions for RHT entries */
- struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
- u8 *rht_needs_ws; /* User-desired write-same function per RHTE */
-
- u64 ctxid;
- u64 irqs; /* Number of interrupts requested for context */
- pid_t pid;
- bool initialized;
- bool unavail;
- bool err_recovery_active;
- struct mutex mutex; /* Context protection */
- struct kref kref;
- void *ctx;
- struct cxlflash_cfg *cfg;
- struct list_head luns; /* LUNs attached to this context */
- const struct vm_operations_struct *cxl_mmap_vmops;
- struct file *file;
- struct list_head list; /* Link contexts in error recovery */
-};
-
-struct cxlflash_global {
- struct mutex mutex;
- struct list_head gluns;/* list of glun_info structs */
- struct page *err_page; /* One page of all 0xF for error notification */
-};
-
-int cxlflash_vlun_resize(struct scsi_device *sdev, void *resize);
-int _cxlflash_vlun_resize(struct scsi_device *sdev, struct ctx_info *ctxi,
- struct dk_cxlflash_resize *resize);
-
-int cxlflash_disk_release(struct scsi_device *sdev,
- void *release);
-int _cxlflash_disk_release(struct scsi_device *sdev, struct ctx_info *ctxi,
- struct dk_cxlflash_release *release);
-
-int cxlflash_disk_clone(struct scsi_device *sdev, void *arg);
-
-int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg);
-
-int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked);
-void cxlflash_lun_detach(struct glun_info *gli);
-
-struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxit, void *arg,
- enum ctx_ctrl ctrl);
-void put_context(struct ctx_info *ctxi);
-
-struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
- struct llun_info *lli);
-
-struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
- struct llun_info *lli);
-void rhte_checkin(struct ctx_info *ctxi, struct sisl_rht_entry *rhte);
-
-void cxlflash_ba_terminate(struct ba_lun *ba_lun);
-
-int cxlflash_manage_lun(struct scsi_device *sdev, void *manage);
-
-int check_state(struct cxlflash_cfg *cfg);
-
-#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
deleted file mode 100644
index 32e807703377..000000000000
--- a/drivers/scsi/cxlflash/vlun.c
+++ /dev/null
@@ -1,1336 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/syscalls.h>
-#include <linux/unaligned.h>
-#include <asm/bitsperlong.h>
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "sislite.h"
-#include "common.h"
-#include "vlun.h"
-#include "superpipe.h"
-
-/**
- * marshal_virt_to_resize() - translate uvirtual to resize structure
- * @virt: Source structure from which to translate/copy.
- * @resize: Destination structure for the translate/copy.
- */
-static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
- struct dk_cxlflash_resize *resize)
-{
- resize->hdr = virt->hdr;
- resize->context_id = virt->context_id;
- resize->rsrc_handle = virt->rsrc_handle;
- resize->req_size = virt->lun_size;
- resize->last_lba = virt->last_lba;
-}
-
-/**
- * marshal_clone_to_rele() - translate clone to release structure
- * @clone: Source structure from which to translate/copy.
- * @release: Destination structure for the translate/copy.
- */
-static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
- struct dk_cxlflash_release *release)
-{
- release->hdr = clone->hdr;
- release->context_id = clone->context_id_dst;
-}
-
-/**
- * ba_init() - initializes a block allocator
- * @ba_lun: Block allocator to initialize.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ba_init(struct ba_lun *ba_lun)
-{
- struct ba_lun_info *bali = NULL;
- int lun_size_au = 0, i = 0;
- int last_word_underflow = 0;
- u64 *lam;
-
- pr_debug("%s: Initializing LUN: lun_id=%016llx "
- "ba_lun->lsize=%lx ba_lun->au_size=%lX\n",
- __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
-
- /* Calculate bit map size */
- lun_size_au = ba_lun->lsize / ba_lun->au_size;
- if (lun_size_au == 0) {
- pr_debug("%s: Requested LUN size of 0!\n", __func__);
- return -EINVAL;
- }
-
- /* Allocate lun information container */
- bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
- if (unlikely(!bali)) {
- pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n",
- __func__, ba_lun->lun_id);
- return -ENOMEM;
- }
-
- bali->total_aus = lun_size_au;
- bali->lun_bmap_size = lun_size_au / BITS_PER_LONG;
-
- if (lun_size_au % BITS_PER_LONG)
- bali->lun_bmap_size++;
-
- /* Allocate bitmap space */
- bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)),
- GFP_KERNEL);
- if (unlikely(!bali->lun_alloc_map)) {
- pr_err("%s: Failed to allocate lun allocation map: "
- "lun_id=%016llx\n", __func__, ba_lun->lun_id);
- kfree(bali);
- return -ENOMEM;
- }
-
- /* Initialize the bit map size and set all bits to '1' */
- bali->free_aun_cnt = lun_size_au;
-
- for (i = 0; i < bali->lun_bmap_size; i++)
- bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL;
-
- /* If the last word not fully utilized, mark extra bits as allocated */
- last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG);
- last_word_underflow -= bali->free_aun_cnt;
- if (last_word_underflow > 0) {
- lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1];
- for (i = (HIBIT - last_word_underflow + 1);
- i < BITS_PER_LONG;
- i++)
- clear_bit(i, (ulong *)lam);
- }
-
- /* Initialize high elevator index, low/curr already at 0 from kzalloc */
- bali->free_high_idx = bali->lun_bmap_size;
-
- /* Allocate clone map */
- bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
- GFP_KERNEL);
- if (unlikely(!bali->aun_clone_map)) {
- pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n",
- __func__, ba_lun->lun_id);
- kfree(bali->lun_alloc_map);
- kfree(bali);
- return -ENOMEM;
- }
-
- /* Pass the allocated LUN info as a handle to the user */
- ba_lun->ba_lun_handle = bali;
-
- pr_debug("%s: Successfully initialized the LUN: "
- "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n",
- __func__, ba_lun->lun_id, bali->lun_bmap_size,
- bali->free_aun_cnt);
- return 0;
-}
-
-/**
- * find_free_range() - locates a free bit within the block allocator
- * @low: First word in block allocator to start search.
- * @high: Last word in block allocator to search.
- * @bali: LUN information structure owning the block allocator to search.
- * @bit_word: Passes back the word in the block allocator owning the free bit.
- *
- * Return: The bit position within the passed back word, -1 on failure
- */
-static int find_free_range(u32 low,
- u32 high,
- struct ba_lun_info *bali, int *bit_word)
-{
- int i;
- u64 bit_pos = -1;
- ulong *lam, num_bits;
-
- for (i = low; i < high; i++)
- if (bali->lun_alloc_map[i] != 0) {
- lam = (ulong *)&bali->lun_alloc_map[i];
- num_bits = (sizeof(*lam) * BITS_PER_BYTE);
- bit_pos = find_first_bit(lam, num_bits);
-
- pr_devel("%s: Found free bit %llu in LUN "
- "map entry %016llx at bitmap index = %d\n",
- __func__, bit_pos, bali->lun_alloc_map[i], i);
-
- *bit_word = i;
- bali->free_aun_cnt--;
- clear_bit(bit_pos, lam);
- break;
- }
-
- return bit_pos;
-}
-
-/**
- * ba_alloc() - allocates a block from the block allocator
- * @ba_lun: Block allocator from which to allocate a block.
- *
- * Return: The allocated block, -1 on failure
- */
-static u64 ba_alloc(struct ba_lun *ba_lun)
-{
- u64 bit_pos = -1;
- int bit_word = 0;
- struct ba_lun_info *bali = NULL;
-
- bali = ba_lun->ba_lun_handle;
-
- pr_debug("%s: Received block allocation request: "
- "lun_id=%016llx free_aun_cnt=%llx\n",
- __func__, ba_lun->lun_id, bali->free_aun_cnt);
-
- if (bali->free_aun_cnt == 0) {
- pr_debug("%s: No space left on LUN: lun_id=%016llx\n",
- __func__, ba_lun->lun_id);
- return -1ULL;
- }
-
- /* Search to find a free entry, curr->high then low->curr */
- bit_pos = find_free_range(bali->free_curr_idx,
- bali->free_high_idx, bali, &bit_word);
- if (bit_pos == -1) {
- bit_pos = find_free_range(bali->free_low_idx,
- bali->free_curr_idx,
- bali, &bit_word);
- if (bit_pos == -1) {
- pr_debug("%s: Could not find an allocation unit on LUN:"
- " lun_id=%016llx\n", __func__, ba_lun->lun_id);
- return -1ULL;
- }
- }
-
- /* Update the free_curr_idx */
- if (bit_pos == HIBIT)
- bali->free_curr_idx = bit_word + 1;
- else
- bali->free_curr_idx = bit_word;
-
- pr_debug("%s: Allocating AU number=%llx lun_id=%016llx "
- "free_aun_cnt=%llx\n", __func__,
- ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
- bali->free_aun_cnt);
-
- return (u64) ((bit_word * BITS_PER_LONG) + bit_pos);
-}
-
-/**
- * validate_alloc() - validates the specified block has been allocated
- * @bali: LUN info owning the block allocator.
- * @aun: Block to validate.
- *
- * Return: 0 on success, -1 on failure
- */
-static int validate_alloc(struct ba_lun_info *bali, u64 aun)
-{
- int idx = 0, bit_pos = 0;
-
- idx = aun / BITS_PER_LONG;
- bit_pos = aun % BITS_PER_LONG;
-
- if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]))
- return -1;
-
- return 0;
-}
-
-/**
- * ba_free() - frees a block from the block allocator
- * @ba_lun: Block allocator from which to allocate a block.
- * @to_free: Block to free.
- *
- * Return: 0 on success, -1 on failure
- */
-static int ba_free(struct ba_lun *ba_lun, u64 to_free)
-{
- int idx = 0, bit_pos = 0;
- struct ba_lun_info *bali = NULL;
-
- bali = ba_lun->ba_lun_handle;
-
- if (validate_alloc(bali, to_free)) {
- pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n",
- __func__, to_free, ba_lun->lun_id);
- return -1;
- }
-
- pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx "
- "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id,
- bali->free_aun_cnt);
-
- if (bali->aun_clone_map[to_free] > 0) {
- pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n",
- __func__, to_free, ba_lun->lun_id,
- bali->aun_clone_map[to_free]);
- bali->aun_clone_map[to_free]--;
- return 0;
- }
-
- idx = to_free / BITS_PER_LONG;
- bit_pos = to_free % BITS_PER_LONG;
-
- set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]);
- bali->free_aun_cnt++;
-
- if (idx < bali->free_low_idx)
- bali->free_low_idx = idx;
- else if (idx > bali->free_high_idx)
- bali->free_high_idx = idx;
-
- pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x "
- "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx,
- ba_lun->lun_id, bali->free_aun_cnt);
-
- return 0;
-}
-
-/**
- * ba_clone() - Clone a chunk of the block allocation table
- * @ba_lun: Block allocator from which to allocate a block.
- * @to_clone: Block to clone.
- *
- * Return: 0 on success, -1 on failure
- */
-static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
-{
- struct ba_lun_info *bali = ba_lun->ba_lun_handle;
-
- if (validate_alloc(bali, to_clone)) {
- pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n",
- __func__, to_clone, ba_lun->lun_id);
- return -1;
- }
-
- pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n",
- __func__, to_clone, ba_lun->lun_id);
-
- if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
- pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n",
- __func__, to_clone, ba_lun->lun_id);
- return -1;
- }
-
- bali->aun_clone_map[to_clone]++;
-
- return 0;
-}
-
-/**
- * ba_space() - returns the amount of free space left in the block allocator
- * @ba_lun: Block allocator.
- *
- * Return: Amount of free space in block allocator
- */
-static u64 ba_space(struct ba_lun *ba_lun)
-{
- struct ba_lun_info *bali = ba_lun->ba_lun_handle;
-
- return bali->free_aun_cnt;
-}
-
-/**
- * cxlflash_ba_terminate() - frees resources associated with the block allocator
- * @ba_lun: Block allocator.
- *
- * Safe to call in a partially allocated state.
- */
-void cxlflash_ba_terminate(struct ba_lun *ba_lun)
-{
- struct ba_lun_info *bali = ba_lun->ba_lun_handle;
-
- if (bali) {
- kfree(bali->aun_clone_map);
- kfree(bali->lun_alloc_map);
- kfree(bali);
- ba_lun->ba_lun_handle = NULL;
- }
-}
-
-/**
- * init_vlun() - initializes a LUN for virtual use
- * @lli: LUN information structure that owns the block allocator.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_vlun(struct llun_info *lli)
-{
- int rc = 0;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
-
- memset(blka, 0, sizeof(*blka));
- mutex_init(&blka->mutex);
-
- /* LUN IDs are unique per port, save the index instead */
- blka->ba_lun.lun_id = lli->lun_index;
- blka->ba_lun.lsize = gli->max_lba + 1;
- blka->ba_lun.lba_size = gli->blk_len;
-
- blka->ba_lun.au_size = MC_CHUNK_SIZE;
- blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE;
-
- rc = ba_init(&blka->ba_lun);
- if (unlikely(rc))
- pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc);
-
- pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli);
- return rc;
-}
-
-/**
- * write_same16() - sends a SCSI WRITE_SAME16 (0) command to specified LUN
- * @sdev: SCSI device associated with LUN.
- * @lba: Logical block address to start write same.
- * @nblks: Number of logical blocks to write same.
- *
- * The SCSI WRITE_SAME16 can take quite a while to complete. Should an EEH occur
- * while in scsi_execute_cmd(), the EEH handler will attempt to recover. As
- * part of the recovery, the handler drains all currently running ioctls,
- * waiting until they have completed before proceeding with a reset. As this
- * routine is used on the ioctl path, this can create a condition where the
- * EEH handler becomes stuck, infinitely waiting for this ioctl thread. To
- * avoid this behavior, temporarily unmark this thread as an ioctl thread by
- * releasing the ioctl read semaphore. This will allow the EEH handler to
- * proceed with a recovery while this thread is still running. Once the
- * scsi_execute_cmd() returns, reacquire the ioctl read semaphore and check the
- * adapter state in case it changed while inside of scsi_execute_cmd(). The
- * state check will wait if the adapter is still being recovered or return a
- * failure if the recovery failed. In the event that the adapter reset failed,
- * simply return the failure as the ioctl would be unable to continue.
- *
- * Note that the above puts a requirement on this routine to only be called on
- * an ioctl thread.
- *
- * Return: 0 on success, -errno on failure
- */
-static int write_same16(struct scsi_device *sdev,
- u64 lba,
- u32 nblks)
-{
- u8 *cmd_buf = NULL;
- u8 *scsi_cmd = NULL;
- int rc = 0;
- int result = 0;
- u64 offset = lba;
- int left = nblks;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- const u32 s = ilog2(sdev->sector_size) - 9;
- const u32 to = sdev->request_queue->rq_timeout;
- const u32 ws_limit =
- sdev->request_queue->limits.max_write_zeroes_sectors >> s;
-
- cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
- scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
- if (unlikely(!cmd_buf || !scsi_cmd)) {
- rc = -ENOMEM;
- goto out;
- }
-
- while (left > 0) {
-
- scsi_cmd[0] = WRITE_SAME_16;
- scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0;
- put_unaligned_be64(offset, &scsi_cmd[2]);
- put_unaligned_be32(ws_limit < left ? ws_limit : left,
- &scsi_cmd[10]);
-
- /* Drop the ioctl read semaphore across lengthy call */
- up_read(&cfg->ioctl_rwsem);
- result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_OUT,
- cmd_buf, CMD_BUFSIZE, to,
- CMD_RETRIES, NULL);
- down_read(&cfg->ioctl_rwsem);
- rc = check_state(cfg);
- if (rc) {
- dev_err(dev, "%s: Failed state result=%08x\n",
- __func__, result);
- rc = -ENODEV;
- goto out;
- }
-
- if (result) {
- dev_err_ratelimited(dev, "%s: command failed for "
- "offset=%lld result=%08x\n",
- __func__, offset, result);
- rc = -EIO;
- goto out;
- }
- left -= ws_limit;
- offset += ws_limit;
- }
-
-out:
- kfree(cmd_buf);
- kfree(scsi_cmd);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * grow_lxt() - expands the translation table associated with the specified RHTE
- * @afu: AFU associated with the host.
- * @sdev: SCSI device associated with LUN.
- * @ctxid: Context ID of context owning the RHTE.
- * @rhndl: Resource handle associated with the RHTE.
- * @rhte: Resource handle entry (RHTE).
- * @new_size: Number of translation entries associated with RHTE.
- *
- * By design, this routine employs a 'best attempt' allocation and will
- * truncate the requested size down if there is not sufficient space in
- * the block allocator to satisfy the request but there does exist some
- * amount of space. The user is made aware of this by returning the size
- * allocated.
- *
- * Return: 0 on success, -errno on failure
- */
-static int grow_lxt(struct afu *afu,
- struct scsi_device *sdev,
- ctx_hndl_t ctxid,
- res_hndl_t rhndl,
- struct sisl_rht_entry *rhte,
- u64 *new_size)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
- u32 av_size;
- u32 ngrps, ngrps_old;
- u64 aun; /* chunk# allocated by block allocator */
- u64 delta = *new_size - rhte->lxt_cnt;
- u64 my_new_size;
- int i, rc = 0;
-
- /*
- * Check what is available in the block allocator before re-allocating
- * LXT array. This is done up front under the mutex which must not be
- * released until after allocation is complete.
- */
- mutex_lock(&blka->mutex);
- av_size = ba_space(&blka->ba_lun);
- if (unlikely(av_size <= 0)) {
- dev_dbg(dev, "%s: ba_space error av_size=%d\n",
- __func__, av_size);
- mutex_unlock(&blka->mutex);
- rc = -ENOSPC;
- goto out;
- }
-
- if (av_size < delta)
- delta = av_size;
-
- lxt_old = rhte->lxt_start;
- ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
- ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta);
-
- if (ngrps != ngrps_old) {
- /* reallocate to fit new size */
- lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
- GFP_KERNEL);
- if (unlikely(!lxt)) {
- mutex_unlock(&blka->mutex);
- rc = -ENOMEM;
- goto out;
- }
-
- /* copy over all old entries */
- memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt));
- } else
- lxt = lxt_old;
-
- /* nothing can fail from now on */
- my_new_size = rhte->lxt_cnt + delta;
-
- /* add new entries to the end */
- for (i = rhte->lxt_cnt; i < my_new_size; i++) {
- /*
- * Due to the earlier check of available space, ba_alloc
- * cannot fail here. If it did due to internal error,
- * leave a rlba_base of -1u which will likely be a
- * invalid LUN (too large).
- */
- aun = ba_alloc(&blka->ba_lun);
- if ((aun == -1ULL) || (aun >= blka->nchunk))
- dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu "
- "max=%llu\n", __func__, aun, blka->nchunk - 1);
-
- /* select both ports, use r/w perms from RHT */
- lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
- (lli->lun_index << LXT_LUNIDX_SHIFT) |
- (RHT_PERM_RW << LXT_PERM_SHIFT |
- lli->port_sel));
- }
-
- mutex_unlock(&blka->mutex);
-
- /*
- * The following sequence is prescribed in the SISlite spec
- * for syncing up with the AFU when adding LXT entries.
- */
- dma_wmb(); /* Make LXT updates are visible */
-
- rhte->lxt_start = lxt;
- dma_wmb(); /* Make RHT entry's LXT table update visible */
-
- rhte->lxt_cnt = my_new_size;
- dma_wmb(); /* Make RHT entry's LXT table size update visible */
-
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
- if (unlikely(rc))
- rc = -EAGAIN;
-
- /* free old lxt if reallocated */
- if (lxt != lxt_old)
- kfree(lxt_old);
- *new_size = my_new_size;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * shrink_lxt() - reduces translation table associated with the specified RHTE
- * @afu: AFU associated with the host.
- * @sdev: SCSI device associated with LUN.
- * @rhndl: Resource handle associated with the RHTE.
- * @rhte: Resource handle entry (RHTE).
- * @ctxi: Context owning resources.
- * @new_size: Number of translation entries associated with RHTE.
- *
- * Return: 0 on success, -errno on failure
- */
-static int shrink_lxt(struct afu *afu,
- struct scsi_device *sdev,
- res_hndl_t rhndl,
- struct sisl_rht_entry *rhte,
- struct ctx_info *ctxi,
- u64 *new_size)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt, *lxt_old;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
- ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid);
- bool needs_ws = ctxi->rht_needs_ws[rhndl];
- bool needs_sync = !ctxi->err_recovery_active;
- u32 ngrps, ngrps_old;
- u64 aun; /* chunk# allocated by block allocator */
- u64 delta = rhte->lxt_cnt - *new_size;
- u64 my_new_size;
- int i, rc = 0;
-
- lxt_old = rhte->lxt_start;
- ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
- ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta);
-
- if (ngrps != ngrps_old) {
- /* Reallocate to fit new size unless new size is 0 */
- if (ngrps) {
- lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
- GFP_KERNEL);
- if (unlikely(!lxt)) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* Copy over old entries that will remain */
- memcpy(lxt, lxt_old,
- (sizeof(*lxt) * (rhte->lxt_cnt - delta)));
- } else
- lxt = NULL;
- } else
- lxt = lxt_old;
-
- /* Nothing can fail from now on */
- my_new_size = rhte->lxt_cnt - delta;
-
- /*
- * The following sequence is prescribed in the SISlite spec
- * for syncing up with the AFU when removing LXT entries.
- */
- rhte->lxt_cnt = my_new_size;
- dma_wmb(); /* Make RHT entry's LXT table size update visible */
-
- rhte->lxt_start = lxt;
- dma_wmb(); /* Make RHT entry's LXT table update visible */
-
- if (needs_sync) {
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
- if (unlikely(rc))
- rc = -EAGAIN;
- }
-
- if (needs_ws) {
- /*
- * Mark the context as unavailable, so that we can release
- * the mutex safely.
- */
- ctxi->unavail = true;
- mutex_unlock(&ctxi->mutex);
- }
-
- /* Free LBAs allocated to freed chunks */
- mutex_lock(&blka->mutex);
- for (i = delta - 1; i >= 0; i--) {
- aun = lxt_old[my_new_size + i].rlba_base >> MC_CHUNK_SHIFT;
- if (needs_ws)
- write_same16(sdev, aun, MC_CHUNK_SIZE);
- ba_free(&blka->ba_lun, aun);
- }
- mutex_unlock(&blka->mutex);
-
- if (needs_ws) {
- /* Make the context visible again */
- mutex_lock(&ctxi->mutex);
- ctxi->unavail = false;
- }
-
- /* Free old lxt if reallocated */
- if (lxt != lxt_old)
- kfree(lxt_old);
- *new_size = my_new_size;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * _cxlflash_vlun_resize() - changes the size of a virtual LUN
- * @sdev: SCSI device associated with LUN owning virtual LUN.
- * @ctxi: Context owning resources.
- * @resize: Resize ioctl data structure.
- *
- * On successful return, the user is informed of the new size (in blocks)
- * of the virtual LUN in last LBA format. When the size of the virtual
- * LUN is zero, the last LBA is reflected as -1. See comment in the
- * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
- * on the error recovery list.
- *
- * Return: 0 on success, -errno on failure
- */
-int _cxlflash_vlun_resize(struct scsi_device *sdev,
- struct ctx_info *ctxi,
- struct dk_cxlflash_resize *resize)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct afu *afu = cfg->afu;
- bool put_ctx = false;
-
- res_hndl_t rhndl = resize->rsrc_handle;
- u64 new_size;
- u64 nsectors;
- u64 ctxid = DECODE_CTXID(resize->context_id),
- rctxid = resize->context_id;
-
- struct sisl_rht_entry *rhte;
-
- int rc = 0;
-
- /*
- * The requested size (req_size) is always assumed to be in 4k blocks,
- * so we have to convert it here from 4k to chunk size.
- */
- nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
- new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
-
- dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n",
- __func__, ctxid, resize->rsrc_handle, resize->req_size,
- new_size);
-
- if (unlikely(gli->mode != MODE_VIRTUAL)) {
- dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n",
- __func__, gli->mode);
- rc = -EINVAL;
- goto out;
-
- }
-
- if (!ctxi) {
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
- __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- put_ctx = true;
- }
-
- rhte = get_rhte(ctxi, rhndl, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n",
- __func__, rhndl);
- rc = -EINVAL;
- goto out;
- }
-
- if (new_size > rhte->lxt_cnt)
- rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
- else if (new_size < rhte->lxt_cnt)
- rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
- else {
- /*
- * Rare case where there is already sufficient space, just
- * need to perform a translation sync with the AFU. This
- * scenario likely follows a previous sync failure during
- * a resize operation. Accordingly, perform the heavyweight
- * form of translation sync as it is unknown which type of
- * resize failed previously.
- */
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
- if (unlikely(rc)) {
- rc = -EAGAIN;
- goto out;
- }
- }
-
- resize->hdr.return_flags = 0;
- resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
- resize->last_lba /= CXLFLASH_BLOCK_SIZE;
- resize->last_lba--;
-
-out:
- if (put_ctx)
- put_context(ctxi);
- dev_dbg(dev, "%s: resized to %llu returning rc=%d\n",
- __func__, resize->last_lba, rc);
- return rc;
-}
-
-int cxlflash_vlun_resize(struct scsi_device *sdev, void *resize)
-{
- return _cxlflash_vlun_resize(sdev, NULL, resize);
-}
-
-/**
- * cxlflash_restore_luntable() - Restore LUN table to prior state
- * @cfg: Internal structure associated with the host.
- */
-void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
-{
- struct llun_info *lli, *temp;
- u32 lind;
- int k;
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_luns;
-
- mutex_lock(&global.mutex);
-
- list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
- if (!lli->in_table)
- continue;
-
- lind = lli->lun_index;
- dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
-
- for (k = 0; k < cfg->num_fc_ports; k++)
- if (lli->port_sel & (1 << k)) {
- fc_port_luns = get_fc_port_luns(cfg, k);
- writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
- dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
- }
- }
-
- mutex_unlock(&global.mutex);
-}
-
-/**
- * get_num_ports() - compute number of ports from port selection mask
- * @psm: Port selection mask.
- *
- * Return: Population count of port selection mask
- */
-static inline u8 get_num_ports(u32 psm)
-{
- static const u8 bits[16] = { 0, 1, 1, 2, 1, 2, 2, 3,
- 1, 2, 2, 3, 2, 3, 3, 4 };
-
- return bits[psm & 0xf];
-}
-
-/**
- * init_luntable() - write an entry in the LUN table
- * @cfg: Internal structure associated with the host.
- * @lli: Per adapter LUN information structure.
- *
- * On successful return, a LUN table entry is created:
- * - at the top for LUNs visible on multiple ports.
- * - at the bottom for LUNs visible only on one port.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
-{
- u32 chan;
- u32 lind;
- u32 nports;
- int rc = 0;
- int k;
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_luns;
-
- mutex_lock(&global.mutex);
-
- if (lli->in_table)
- goto out;
-
- nports = get_num_ports(lli->port_sel);
- if (nports == 0 || nports > cfg->num_fc_ports) {
- WARN(1, "Unsupported port configuration nports=%u", nports);
- rc = -EIO;
- goto out;
- }
-
- if (nports > 1) {
- /*
- * When LUN is visible from multiple ports, we will put
- * it in the top half of the LUN table.
- */
- for (k = 0; k < cfg->num_fc_ports; k++) {
- if (!(lli->port_sel & (1 << k)))
- continue;
-
- if (cfg->promote_lun_index == cfg->last_lun_index[k]) {
- rc = -ENOSPC;
- goto out;
- }
- }
-
- lind = lli->lun_index = cfg->promote_lun_index;
- dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
-
- for (k = 0; k < cfg->num_fc_ports; k++) {
- if (!(lli->port_sel & (1 << k)))
- continue;
-
- fc_port_luns = get_fc_port_luns(cfg, k);
- writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
- dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
- }
-
- cfg->promote_lun_index++;
- } else {
- /*
- * When LUN is visible only from one port, we will put
- * it in the bottom half of the LUN table.
- */
- chan = PORTMASK2CHAN(lli->port_sel);
- if (cfg->promote_lun_index == cfg->last_lun_index[chan]) {
- rc = -ENOSPC;
- goto out;
- }
-
- lind = lli->lun_index = cfg->last_lun_index[chan];
- fc_port_luns = get_fc_port_luns(cfg, chan);
- writeq_be(lli->lun_id[chan], &fc_port_luns[lind]);
- cfg->last_lun_index[chan]--;
- dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n\t%d=%llx\n",
- __func__, lind, chan, lli->lun_id[chan]);
- }
-
- lli->in_table = true;
-out:
- mutex_unlock(&global.mutex);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_disk_virtual_open() - open a virtual disk of specified size
- * @sdev: SCSI device associated with LUN owning virtual LUN.
- * @arg: UVirtual ioctl data structure.
- *
- * On successful return, the user is informed of the resource handle
- * to be used to identify the virtual LUN and the size (in blocks) of
- * the virtual LUN in last LBA format. When the size of the virtual LUN
- * is zero, the last LBA is reflected as -1.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
-
- struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg;
- struct dk_cxlflash_resize resize;
-
- u64 ctxid = DECODE_CTXID(virt->context_id),
- rctxid = virt->context_id;
- u64 lun_size = virt->lun_size;
- u64 last_lba = 0;
- u64 rsrc_handle = -1;
-
- int rc = 0;
-
- struct ctx_info *ctxi = NULL;
- struct sisl_rht_entry *rhte = NULL;
-
- dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
-
- /* Setup the LUNs block allocator on first call */
- mutex_lock(&gli->mutex);
- if (gli->mode == MODE_NONE) {
- rc = init_vlun(lli);
- if (rc) {
- dev_err(dev, "%s: init_vlun failed rc=%d\n",
- __func__, rc);
- rc = -ENOMEM;
- goto err0;
- }
- }
-
- rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__);
- goto err0;
- }
- mutex_unlock(&gli->mutex);
-
- rc = init_luntable(cfg, lli);
- if (rc) {
- dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc);
- goto err1;
- }
-
- ctxi = get_context(cfg, rctxid, lli, 0);
- if (unlikely(!ctxi)) {
- dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto err1;
- }
-
- rhte = rhte_checkout(ctxi, lli);
- if (unlikely(!rhte)) {
- dev_err(dev, "%s: too many opens ctxid=%llu\n",
- __func__, ctxid);
- rc = -EMFILE; /* too many opens */
- goto err1;
- }
-
- rsrc_handle = (rhte - ctxi->rht_start);
-
- /* Populate RHT format 0 */
- rhte->nmask = MC_RHT_NMASK;
- rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms);
-
- /* Resize even if requested size is 0 */
- marshal_virt_to_resize(virt, &resize);
- resize.rsrc_handle = rsrc_handle;
- rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
- if (rc) {
- dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc);
- goto err2;
- }
- last_lba = resize.last_lba;
-
- if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME)
- ctxi->rht_needs_ws[rsrc_handle] = true;
-
- virt->hdr.return_flags = 0;
- virt->last_lba = last_lba;
- virt->rsrc_handle = rsrc_handle;
-
- if (get_num_ports(lli->port_sel) > 1)
- virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
- __func__, rsrc_handle, rc, last_lba);
- return rc;
-
-err2:
- rhte_checkin(ctxi, rhte);
-err1:
- cxlflash_lun_detach(gli);
- goto out;
-err0:
- /* Special common cleanup prior to successful LUN attach */
- cxlflash_ba_terminate(&gli->blka.ba_lun);
- mutex_unlock(&gli->mutex);
- goto out;
-}
-
-/**
- * clone_lxt() - copies translation tables from source to destination RHTE
- * @afu: AFU associated with the host.
- * @blka: Block allocator associated with LUN.
- * @ctxid: Context ID of context owning the RHTE.
- * @rhndl: Resource handle associated with the RHTE.
- * @rhte: Destination resource handle entry (RHTE).
- * @rhte_src: Source resource handle entry (RHTE).
- *
- * Return: 0 on success, -errno on failure
- */
-static int clone_lxt(struct afu *afu,
- struct blka *blka,
- ctx_hndl_t ctxid,
- res_hndl_t rhndl,
- struct sisl_rht_entry *rhte,
- struct sisl_rht_entry *rhte_src)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt = NULL;
- bool locked = false;
- u32 ngrps;
- u64 aun; /* chunk# allocated by block allocator */
- int j;
- int i = 0;
- int rc = 0;
-
- ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
-
- if (ngrps) {
- /* allocate new LXTs for clone */
- lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
- GFP_KERNEL);
- if (unlikely(!lxt)) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* copy over */
- memcpy(lxt, rhte_src->lxt_start,
- (sizeof(*lxt) * rhte_src->lxt_cnt));
-
- /* clone the LBAs in block allocator via ref_cnt, note that the
- * block allocator mutex must be held until it is established
- * that this routine will complete without the need for a
- * cleanup.
- */
- mutex_lock(&blka->mutex);
- locked = true;
- for (i = 0; i < rhte_src->lxt_cnt; i++) {
- aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
- if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
- rc = -EIO;
- goto err;
- }
- }
- }
-
- /*
- * The following sequence is prescribed in the SISlite spec
- * for syncing up with the AFU when adding LXT entries.
- */
- dma_wmb(); /* Make LXT updates are visible */
-
- rhte->lxt_start = lxt;
- dma_wmb(); /* Make RHT entry's LXT table update visible */
-
- rhte->lxt_cnt = rhte_src->lxt_cnt;
- dma_wmb(); /* Make RHT entry's LXT table size update visible */
-
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
- if (unlikely(rc)) {
- rc = -EAGAIN;
- goto err2;
- }
-
-out:
- if (locked)
- mutex_unlock(&blka->mutex);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-err2:
- /* Reset the RHTE */
- rhte->lxt_cnt = 0;
- dma_wmb();
- rhte->lxt_start = NULL;
- dma_wmb();
-err:
- /* free the clones already made */
- for (j = 0; j < i; j++) {
- aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT);
- ba_free(&blka->ba_lun, aun);
- }
- kfree(lxt);
- goto out;
-}
-
-/**
- * cxlflash_disk_clone() - clone a context by making snapshot of another
- * @sdev: SCSI device associated with LUN owning virtual LUN.
- * @arg: Clone ioctl data structure.
- *
- * This routine effectively performs cxlflash_disk_open operation for each
- * in-use virtual resource in the source context. Note that the destination
- * context must be in pristine state and cannot have any resource handles
- * open at the time of the clone.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_disk_clone(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_clone *clone = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
- struct afu *afu = cfg->afu;
- struct dk_cxlflash_release release = { { 0 }, 0 };
-
- struct ctx_info *ctxi_src = NULL,
- *ctxi_dst = NULL;
- struct lun_access *lun_access_src, *lun_access_dst;
- u32 perms;
- u64 ctxid_src = DECODE_CTXID(clone->context_id_src),
- ctxid_dst = DECODE_CTXID(clone->context_id_dst),
- rctxid_src = clone->context_id_src,
- rctxid_dst = clone->context_id_dst;
- int i, j;
- int rc = 0;
- bool found;
- LIST_HEAD(sidecar);
-
- dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n",
- __func__, ctxid_src, ctxid_dst);
-
- /* Do not clone yourself */
- if (unlikely(rctxid_src == rctxid_dst)) {
- rc = -EINVAL;
- goto out;
- }
-
- if (unlikely(gli->mode != MODE_VIRTUAL)) {
- rc = -EINVAL;
- dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n",
- __func__, gli->mode);
- goto out;
- }
-
- ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
- ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
- if (unlikely(!ctxi_src || !ctxi_dst)) {
- dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n",
- __func__, ctxid_src, ctxid_dst);
- rc = -EINVAL;
- goto out;
- }
-
- /* Verify there is no open resource handle in the destination context */
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
- if (ctxi_dst->rht_start[i].nmask != 0) {
- rc = -EINVAL;
- goto out;
- }
-
- /* Clone LUN access list */
- list_for_each_entry(lun_access_src, &ctxi_src->luns, list) {
- found = false;
- list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list)
- if (lun_access_dst->sdev == lun_access_src->sdev) {
- found = true;
- break;
- }
-
- if (!found) {
- lun_access_dst = kzalloc(sizeof(*lun_access_dst),
- GFP_KERNEL);
- if (unlikely(!lun_access_dst)) {
- dev_err(dev, "%s: lun_access allocation fail\n",
- __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- *lun_access_dst = *lun_access_src;
- list_add(&lun_access_dst->list, &sidecar);
- }
- }
-
- if (unlikely(!ctxi_src->rht_out)) {
- dev_dbg(dev, "%s: Nothing to clone\n", __func__);
- goto out_success;
- }
-
- /* User specified permission on attach */
- perms = ctxi_dst->rht_perms;
-
- /*
- * Copy over checked-out RHT (and their associated LXT) entries by
- * hand, stopping after we've copied all outstanding entries and
- * cleaning up if the clone fails.
- *
- * Note: This loop is equivalent to performing cxlflash_disk_open and
- * cxlflash_vlun_resize. As such, LUN accounting needs to be taken into
- * account by attaching after each successful RHT entry clone. In the
- * event that a clone failure is experienced, the LUN detach is handled
- * via the cleanup performed by _cxlflash_disk_release.
- */
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
- if (ctxi_src->rht_out == ctxi_dst->rht_out)
- break;
- if (ctxi_src->rht_start[i].nmask == 0)
- continue;
-
- /* Consume a destination RHT entry */
- ctxi_dst->rht_out++;
- ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask;
- ctxi_dst->rht_start[i].fp =
- SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms);
- ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i];
-
- rc = clone_lxt(afu, blka, ctxid_dst, i,
- &ctxi_dst->rht_start[i],
- &ctxi_src->rht_start[i]);
- if (rc) {
- marshal_clone_to_rele(clone, &release);
- for (j = 0; j < i; j++) {
- release.rsrc_handle = j;
- _cxlflash_disk_release(sdev, ctxi_dst,
- &release);
- }
-
- /* Put back the one we failed on */
- rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]);
- goto err;
- }
-
- cxlflash_lun_attach(gli, gli->mode, false);
- }
-
-out_success:
- list_splice(&sidecar, &ctxi_dst->luns);
-
- /* fall through */
-out:
- if (ctxi_src)
- put_context(ctxi_src);
- if (ctxi_dst)
- put_context(ctxi_dst);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-
-err:
- list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list)
- kfree(lun_access_src);
- goto out;
-}
diff --git a/drivers/scsi/cxlflash/vlun.h b/drivers/scsi/cxlflash/vlun.h
deleted file mode 100644
index 68e3ea52fe80..000000000000
--- a/drivers/scsi/cxlflash/vlun.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_VLUN_H
-#define _CXLFLASH_VLUN_H
-
-/* RHT - Resource Handle Table */
-#define MC_RHT_NMASK 16 /* in bits */
-#define MC_CHUNK_SHIFT MC_RHT_NMASK /* shift to go from LBA to chunk# */
-
-#define HIBIT (BITS_PER_LONG - 1)
-
-#define MAX_AUN_CLONE_CNT 0xFF
-
-/*
- * LXT - LBA Translation Table
- *
- * +-------+-------+-------+-------+-------+-------+-------+---+---+
- * | RLBA_BASE |LUN_IDX| P |SEL|
- * +-------+-------+-------+-------+-------+-------+-------+---+---+
- *
- * The LXT Entry contains the physical LBA where the chunk starts (RLBA_BASE).
- * AFU ORes the low order bits from the virtual LBA (offset into the chunk)
- * with RLBA_BASE. The result is the physical LBA to be sent to storage.
- * The LXT Entry also contains an index to a LUN TBL and a bitmask of which
- * outgoing (FC) * ports can be selected. The port select bit-mask is ANDed
- * with a global port select bit-mask maintained by the driver.
- * In addition, it has permission bits that are ANDed with the
- * RHT permissions to arrive at the final permissions for the chunk.
- *
- * LXT tables are allocated dynamically in groups. This is done to avoid
- * a malloc/free overhead each time the LXT has to grow or shrink.
- *
- * Based on the current lxt_cnt (used), it is always possible to know
- * how many are allocated (used+free). The number of allocated entries is
- * not stored anywhere.
- *
- * The LXT table is re-allocated whenever it needs to cross into another group.
- */
-#define LXT_GROUP_SIZE 8
-#define LXT_NUM_GROUPS(lxt_cnt) (((lxt_cnt) + 7)/8) /* alloc'ed groups */
-#define LXT_LUNIDX_SHIFT 8 /* LXT entry, shift for LUN index */
-#define LXT_PERM_SHIFT 4 /* LXT entry, shift for permission bits */
-
-struct ba_lun_info {
- u64 *lun_alloc_map;
- u32 lun_bmap_size;
- u32 total_aus;
- u64 free_aun_cnt;
-
- /* indices to be used for elevator lookup of free map */
- u32 free_low_idx;
- u32 free_curr_idx;
- u32 free_high_idx;
-
- u8 *aun_clone_map;
-};
-
-struct ba_lun {
- u64 lun_id;
- u64 wwpn;
- size_t lsize; /* LUN size in number of LBAs */
- size_t lba_size; /* LBA size in number of bytes */
- size_t au_size; /* Allocation Unit size in number of LBAs */
- struct ba_lun_info *ba_lun_handle;
-};
-
-/* Block Allocator */
-struct blka {
- struct ba_lun ba_lun;
- u64 nchunk; /* number of chunks */
- struct mutex mutex;
-};
-
-#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
index 8469c156ab33..59f277593785 100644
--- a/drivers/scsi/elx/efct/efct_driver.c
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -735,7 +735,7 @@ efct_pci_io_resume(struct pci_dev *pdev)
MODULE_DEVICE_TABLE(pci, efct_pci_table);
-static struct pci_error_handlers efct_pci_err_handler = {
+static const struct pci_error_handlers efct_pci_err_handler = {
.error_detected = efct_pci_io_error_detected,
.slot_reset = efct_pci_io_slot_reset,
.resume = efct_pci_io_resume,
diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c
index 11211c469583..4c6bbf417a9a 100644
--- a/drivers/scsi/fnic/fdls_disc.c
+++ b/drivers/scsi/fnic/fdls_disc.c
@@ -308,43 +308,29 @@ void fdls_schedule_oxid_free_retry_work(struct work_struct *work)
struct fnic *fnic = iport->fnic;
struct reclaim_entry_s *reclaim_entry;
unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport));
+ unsigned long flags;
int idx;
- spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
-
for_each_set_bit(idx, oxid_pool->pending_schedule_free, FNIC_OXID_POOL_SZ) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Schedule oxid free. oxid idx: %d\n", idx);
- spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
- reclaim_entry = (struct reclaim_entry_s *)
- kzalloc(sizeof(struct reclaim_entry_s), GFP_KERNEL);
- spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
-
+ reclaim_entry = kzalloc(sizeof(*reclaim_entry), GFP_KERNEL);
if (!reclaim_entry) {
- FNIC_FCS_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
- "Failed to allocate memory for reclaim struct for oxid idx: 0x%x\n",
- idx);
-
schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry,
msecs_to_jiffies(SCHEDULE_OXID_FREE_RETRY_TIME));
- spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
return;
}
- if (test_and_clear_bit(idx, oxid_pool->pending_schedule_free)) {
- reclaim_entry->oxid_idx = idx;
- reclaim_entry->expires = round_jiffies(jiffies + delay_j);
- list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
- schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
- } else {
- /* unlikely scenario, free the allocated memory and continue */
- kfree(reclaim_entry);
- }
-}
-
- spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ clear_bit(idx, oxid_pool->pending_schedule_free);
+ reclaim_entry->oxid_idx = idx;
+ reclaim_entry->expires = round_jiffies(jiffies + delay_j);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
+ }
}
static bool fdls_is_oxid_fabric_req(uint16_t oxid)
@@ -1567,9 +1553,9 @@ void fdls_send_fabric_logo(struct fnic_iport_s *iport)
iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
- FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
- "0x%x: FDLS send fabric LOGO with oxid: 0x%x",
- iport->fcid, oxid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric LOGO with oxid: 0x%x",
+ iport->fcid, oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
@@ -1898,7 +1884,6 @@ static void fdls_fdmi_register_hba(struct fnic_iport_s *iport)
if (fnic->subsys_desc_len >= FNIC_FDMI_MODEL_LEN)
fnic->subsys_desc_len = FNIC_FDMI_MODEL_LEN - 1;
strscpy_pad(data, fnic->subsys_desc, FNIC_FDMI_MODEL_LEN);
- data[FNIC_FDMI_MODEL_LEN - 1] = 0;
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL, FNIC_FDMI_MODEL_LEN,
data, &attr_off_bytes);
@@ -2061,7 +2046,6 @@ static void fdls_fdmi_register_pa(struct fnic_iport_s *iport)
snprintf(tmp_data, FNIC_FDMI_OS_NAME_LEN - 1, "host%d",
fnic->host->host_no);
strscpy_pad(data, tmp_data, FNIC_FDMI_OS_NAME_LEN);
- data[FNIC_FDMI_OS_NAME_LEN - 1] = 0;
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_OS_NAME,
FNIC_FDMI_OS_NAME_LEN, data, &attr_off_bytes);
@@ -2071,7 +2055,6 @@ static void fdls_fdmi_register_pa(struct fnic_iport_s *iport)
sprintf(fc_host_system_hostname(fnic->host), "%s", utsname()->nodename);
strscpy_pad(data, fc_host_system_hostname(fnic->host),
FNIC_FDMI_HN_LEN);
- data[FNIC_FDMI_HN_LEN - 1] = 0;
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HOST_NAME,
FNIC_FDMI_HN_LEN, data, &attr_off_bytes);
@@ -4659,13 +4642,13 @@ fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
d_id = ntoh24(fchdr->fh_d_id);
/* some common validation */
- if (fdls_get_state(fabric) > FDLS_STATE_FABRIC_FLOGI) {
- if ((iport->fcid != d_id) || (!FNIC_FC_FRAME_CS_CTL(fchdr))) {
- FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
- "invalid frame received. Dropping frame");
- return -1;
- }
+ if (fdls_get_state(fabric) > FDLS_STATE_FABRIC_FLOGI) {
+ if (iport->fcid != d_id || (!FNIC_FC_FRAME_CS_CTL(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "invalid frame received. Dropping frame");
+ return -1;
}
+ }
/* BLS ABTS response */
if ((fchdr->fh_r_ctl == FC_RCTL_BA_ACC)
@@ -4682,7 +4665,7 @@ fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
"Received unexpected ABTS RSP(oxid:0x%x) from 0x%x. Dropping frame",
oxid, s_id);
return -1;
- }
+ }
return FNIC_FABRIC_BLS_ABTS_RSP;
} else if (fdls_is_oxid_fdmi_req(oxid)) {
return FNIC_FDMI_BLS_ABTS_RSP;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 0b20ac8c3f46..3dd06376e97b 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -1365,10 +1365,9 @@ static void __exit fnic_cleanup_module(void)
if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON)
destroy_workqueue(reset_fnic_work_queue);
- if (fnic_fip_queue) {
- flush_workqueue(fnic_fip_queue);
+ if (fnic_fip_queue)
destroy_workqueue(fnic_fip_queue);
- }
+
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 2d438d722d0b..e17f5d8226bf 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -633,8 +633,7 @@ extern struct dentry *hisi_sas_debugfs_dir;
extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba);
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
-extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
- int direction);
+extern u8 hisi_sas_get_ata_protocol(struct sas_task *task);
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
extern void hisi_sas_sata_done(struct sas_task *task,
struct hisi_sas_slot *slot);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index da4a2ed8ee86..3596414d970b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -21,8 +21,32 @@ struct hisi_sas_internal_abort_data {
bool rst_ha_timeout; /* reset the HA for timeout */
};
-u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
+static u8 hisi_sas_get_ata_protocol_from_tf(struct ata_queued_cmd *qc)
{
+ if (!qc)
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_NODATA:
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ case ATA_PROT_PIO:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+ case ATA_PROT_DMA:
+ return HISI_SAS_SATA_PROTOCOL_DMA;
+ case ATA_PROT_NCQ_NODATA:
+ case ATA_PROT_NCQ:
+ return HISI_SAS_SATA_PROTOCOL_FPDMA;
+ default:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+ }
+}
+
+u8 hisi_sas_get_ata_protocol(struct sas_task *task)
+{
+ struct host_to_dev_fis *fis = &task->ata_task.fis;
+ struct ata_queued_cmd *qc = task->uldd_task;
+ int direction = task->data_dir;
+
switch (fis->command) {
case ATA_CMD_FPDMA_WRITE:
case ATA_CMD_FPDMA_READ:
@@ -93,7 +117,7 @@ u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
{
if (direction == DMA_NONE)
return HISI_SAS_SATA_PROTOCOL_NONDATA;
- return HISI_SAS_SATA_PROTOCOL_PIO;
+ return hisi_sas_get_ata_protocol_from_tf(qc);
}
}
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index bb78e53c66e2..6621d633b2cc 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1806,7 +1806,7 @@ static struct platform_driver hisi_sas_v1_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = sas_v1_of_match,
- .acpi_match_table = ACPI_PTR(sas_v1_acpi_match),
+ .acpi_match_table = sas_v1_acpi_match,
},
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 71cd5b4450c2..04ee02797ca3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2538,9 +2538,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
(task->ata_task.fis.control & ATA_SRST))
dw1 |= 1 << CMD_HDR_RESET_OFF;
- dw1 |= (hisi_sas_get_ata_protocol(
- &task->ata_task.fis, task->data_dir))
- << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
hdr->dw1 = cpu_to_le32(dw1);
@@ -3653,7 +3651,7 @@ static struct platform_driver hisi_sas_v2_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = sas_v2_of_match,
- .acpi_match_table = ACPI_PTR(sas_v2_acpi_match),
+ .acpi_match_table = sas_v2_acpi_match,
},
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 48b95d9a7927..095bbf80c34e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1456,9 +1456,7 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
(task->ata_task.fis.control & ATA_SRST))
dw1 |= 1 << CMD_HDR_RESET_OFF;
- dw1 |= (hisi_sas_get_ata_protocol(
- &task->ata_task.fis, task->data_dir))
- << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis))
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 84d8de07b7ae..c73a71ac3c29 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -453,17 +453,13 @@ static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int status, len;
+ int status;
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
- char tmpbuf[10];
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
- strncpy(tmpbuf, buf, len);
- tmpbuf[len] = '\0';
- if (sscanf(tmpbuf, "%d", &status) != 1)
+ if (kstrtoint(buf, 10, &status))
return -EINVAL;
h = shost_to_hba(shost);
h->acciopath_status = !!status;
@@ -477,17 +473,13 @@ static ssize_t host_store_raid_offload_debug(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int debug_level, len;
+ int debug_level;
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
- char tmpbuf[10];
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
- strncpy(tmpbuf, buf, len);
- tmpbuf[len] = '\0';
- if (sscanf(tmpbuf, "%d", &debug_level) != 1)
+ if (kstrtoint(buf, 10, &debug_level))
return -EINVAL;
if (debug_level < 0)
debug_level = 0;
@@ -7238,8 +7230,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
static void init_driver_version(char *driver_version, int len)
{
- memset(driver_version, 0, len);
- strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
+ strscpy_pad(driver_version, HPSA " " HPSA_DRIVER_VERSION, len);
}
static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 16d085d56e9d..9e42230e42b8 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2922,9 +2922,7 @@ static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
struct timer_cb *p_timer;
p_timer = &vscsi->rsp_q_timer;
- hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
- p_timer->timer.function = ibmvscsis_service_wait_q;
+ hrtimer_setup(&p_timer->timer, ibmvscsis_service_wait_q, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
p_timer->started = false;
p_timer->timer_pops = 0;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index cce6c6b409ad..94adb6ac02a4 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3631,8 +3631,8 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
break;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
scb->scsi_cmd->result = DID_OK << 16;
break;
@@ -3899,8 +3899,8 @@ ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
case WRITE_6:
case READ_10:
case WRITE_10:
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
break;
case MODE_SENSE:
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 73085d2f5c43..acf0c2038d20 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -91,31 +91,31 @@ MODULE_DEVICE_TABLE(pci, isci_id_table);
/* linux isci specific settings */
-unsigned char no_outbound_task_to = 2;
+static unsigned char no_outbound_task_to = 2;
module_param(no_outbound_task_to, byte, 0);
MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
-u16 ssp_max_occ_to = 20;
+static u16 ssp_max_occ_to = 20;
module_param(ssp_max_occ_to, ushort, 0);
MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
-u16 stp_max_occ_to = 5;
+static u16 stp_max_occ_to = 5;
module_param(stp_max_occ_to, ushort, 0);
MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
-u16 ssp_inactive_to = 5;
+static u16 ssp_inactive_to = 5;
module_param(ssp_inactive_to, ushort, 0);
MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
-u16 stp_inactive_to = 5;
+static u16 stp_inactive_to = 5;
module_param(stp_inactive_to, ushort, 0);
MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
-unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
+static unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
module_param(phy_gen, byte, 0);
MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
-unsigned char max_concurr_spinup;
+static unsigned char max_concurr_spinup;
module_param(max_concurr_spinup, byte, 0);
MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 4e6b1decbca7..f6a8fe206415 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -473,13 +473,6 @@ static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
dest[word_cnt] = swab32(src[word_cnt]);
}
-extern unsigned char no_outbound_task_to;
-extern u16 ssp_max_occ_to;
-extern u16 stp_max_occ_to;
-extern u16 ssp_inactive_to;
-extern u16 stp_inactive_to;
-extern unsigned char phy_gen;
-extern unsigned char max_concurr_spinup;
extern uint cable_selection_override;
irqreturn_t isci_msix_isr(int vec, void *data);
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 27ae45332704..561ae3f2cbbd 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -198,7 +198,7 @@ enum sci_status sci_remote_device_reset(
* device. When there are no active IO for the device it is is in this
* state.
*
- * @SCI_STP_DEV_CMD: This is the command state for for the STP remote
+ * @SCI_STP_DEV_CMD: This is the command state for the STP remote
* device. This state is entered when the device is processing a
* non-NCQ command. The device object will fail any new start IO
* requests until this command is complete.
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index e81f60985193..7b4fe0e6afb2 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -17,7 +17,6 @@
* Zhenyu Wang
*/
-#include <crypto/hash.h>
#include <linux/types.h>
#include <linux/inet.h>
#include <linux/slab.h>
@@ -468,8 +467,7 @@ static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
* sufficient room.
*/
if (conn->hdrdgst_en) {
- iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen,
- hdr + hdrlen);
+ iscsi_tcp_dgst_header(hdr, hdrlen, hdr + hdrlen);
hdrlen += ISCSI_DIGEST_SIZE;
}
@@ -494,7 +492,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- struct ahash_request *tx_hash = NULL;
+ u32 *tx_crcp = NULL;
unsigned int hdr_spec_len;
ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
@@ -507,11 +505,10 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = tcp_sw_conn->tx_hash;
+ tx_crcp = &tcp_sw_conn->tx_crc;
return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
- sg, count, offset, len,
- NULL, tx_hash);
+ sg, count, offset, len, NULL, tx_crcp);
}
static void
@@ -520,7 +517,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- struct ahash_request *tx_hash = NULL;
+ u32 *tx_crcp = NULL;
unsigned int hdr_spec_len;
ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
@@ -532,10 +529,10 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = tcp_sw_conn->tx_hash;
+ tx_crcp = &tcp_sw_conn->tx_crc;
iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
- data, len, NULL, tx_hash);
+ data, len, NULL, tx_crcp);
}
static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
@@ -583,7 +580,6 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn;
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_sw_tcp_conn *tcp_sw_conn;
- struct crypto_ahash *tfm;
cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
conn_idx);
@@ -596,37 +592,9 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
mutex_init(&tcp_sw_conn->sock_lock);
-
- tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- goto free_conn;
-
- tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!tcp_sw_conn->tx_hash)
- goto free_tfm;
- ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL);
-
- tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!tcp_sw_conn->rx_hash)
- goto free_tx_hash;
- ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL);
-
- tcp_conn->rx_hash = tcp_sw_conn->rx_hash;
+ tcp_conn->rx_crcp = &tcp_sw_conn->rx_crc;
return cls_conn;
-
-free_tx_hash:
- ahash_request_free(tcp_sw_conn->tx_hash);
-free_tfm:
- crypto_free_ahash(tfm);
-free_conn:
- iscsi_conn_printk(KERN_ERR, conn,
- "Could not create connection due to crc32c "
- "loading error. Make sure the crc32c "
- "module is built as a module or into the "
- "kernel\n");
- iscsi_tcp_conn_teardown(cls_conn);
- return NULL;
}
static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
@@ -664,20 +632,8 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
iscsi_sw_tcp_release_conn(conn);
-
- ahash_request_free(tcp_sw_conn->rx_hash);
- if (tcp_sw_conn->tx_hash) {
- struct crypto_ahash *tfm;
-
- tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash);
- ahash_request_free(tcp_sw_conn->tx_hash);
- crypto_free_ahash(tfm);
- }
-
iscsi_tcp_conn_teardown(cls_conn);
}
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 89a6fc552f0b..c3e5d9fa6add 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -41,8 +41,8 @@ struct iscsi_sw_tcp_conn {
void (*old_write_space)(struct sock *);
/* data and header digests */
- struct ahash_request *tx_hash; /* CRC32C (Tx) */
- struct ahash_request *rx_hash; /* CRC32C (Rx) */
+ u32 tx_crc; /* CRC32C (Tx) */
+ u32 rx_crc; /* CRC32C (Rx) */
/* MIB custom statistics */
uint32_t sendpage_failures_cnt;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index c182aa83f2c9..e90805ba868f 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -15,7 +15,7 @@
* Zhenyu Wang
*/
-#include <crypto/hash.h>
+#include <linux/crc32c.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/inet.h>
@@ -168,7 +168,7 @@ iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
segment->size = ISCSI_DIGEST_SIZE;
segment->copied = 0;
segment->sg = NULL;
- segment->hash = NULL;
+ segment->crcp = NULL;
}
/**
@@ -191,29 +191,27 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
struct iscsi_segment *segment, int recv,
unsigned copied)
{
- struct scatterlist sg;
unsigned int pad;
ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %u %u size %u %s\n",
segment->copied, copied, segment->size,
recv ? "recv" : "xmit");
- if (segment->hash && copied) {
- /*
- * If a segment is kmapd we must unmap it before sending
- * to the crypto layer since that will try to kmap it again.
- */
- iscsi_tcp_segment_unmap(segment);
-
- if (!segment->data) {
- sg_init_table(&sg, 1);
- sg_set_page(&sg, sg_page(segment->sg), copied,
- segment->copied + segment->sg_offset +
- segment->sg->offset);
- } else
- sg_init_one(&sg, segment->data + segment->copied,
- copied);
- ahash_request_set_crypt(segment->hash, &sg, NULL, copied);
- crypto_ahash_update(segment->hash);
+ if (segment->crcp && copied) {
+ if (segment->data) {
+ *segment->crcp = crc32c(*segment->crcp,
+ segment->data + segment->copied,
+ copied);
+ } else {
+ const void *data;
+
+ data = kmap_local_page(sg_page(segment->sg));
+ *segment->crcp = crc32c(*segment->crcp,
+ data + segment->copied +
+ segment->sg_offset +
+ segment->sg->offset,
+ copied);
+ kunmap_local(data);
+ }
}
segment->copied += copied;
@@ -258,10 +256,8 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
* Set us up for transferring the data digest. hdr digest
* is completely handled in hdr done function.
*/
- if (segment->hash) {
- ahash_request_set_crypt(segment->hash, NULL,
- segment->digest, 0);
- crypto_ahash_final(segment->hash);
+ if (segment->crcp) {
+ put_unaligned_le32(~*segment->crcp, segment->digest);
iscsi_tcp_segment_splice_digest(segment,
recv ? segment->recv_digest : segment->digest);
return 0;
@@ -282,8 +278,7 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done);
* given buffer, and returns the number of bytes
* consumed, which can actually be less than @len.
*
- * If hash digest is enabled, the function will update the
- * hash while copying.
+ * If CRC is enabled, the function will update the CRC while copying.
* Combining these two operations doesn't buy us a lot (yet),
* but in the future we could implement combined copy+crc,
* just way we do for network layer checksums.
@@ -311,14 +306,10 @@ iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
}
inline void
-iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
- size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE])
+iscsi_tcp_dgst_header(const void *hdr, size_t hdrlen,
+ unsigned char digest[ISCSI_DIGEST_SIZE])
{
- struct scatterlist sg;
-
- sg_init_one(&sg, hdr, hdrlen);
- ahash_request_set_crypt(hash, &sg, digest, hdrlen);
- crypto_ahash_digest(hash);
+ put_unaligned_le32(~crc32c(~0, hdr, hdrlen), digest);
}
EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
@@ -343,24 +334,23 @@ iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
*/
static inline void
__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
- iscsi_segment_done_fn_t *done, struct ahash_request *hash)
+ iscsi_segment_done_fn_t *done, u32 *crcp)
{
memset(segment, 0, sizeof(*segment));
segment->total_size = size;
segment->done = done;
- if (hash) {
- segment->hash = hash;
- crypto_ahash_init(hash);
+ if (crcp) {
+ segment->crcp = crcp;
+ *crcp = ~0;
}
}
inline void
iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
- size_t size, iscsi_segment_done_fn_t *done,
- struct ahash_request *hash)
+ size_t size, iscsi_segment_done_fn_t *done, u32 *crcp)
{
- __iscsi_segment_init(segment, size, done, hash);
+ __iscsi_segment_init(segment, size, done, crcp);
segment->data = data;
segment->size = size;
}
@@ -370,13 +360,12 @@ inline int
iscsi_segment_seek_sg(struct iscsi_segment *segment,
struct scatterlist *sg_list, unsigned int sg_count,
unsigned int offset, size_t size,
- iscsi_segment_done_fn_t *done,
- struct ahash_request *hash)
+ iscsi_segment_done_fn_t *done, u32 *crcp)
{
struct scatterlist *sg;
unsigned int i;
- __iscsi_segment_init(segment, size, done, hash);
+ __iscsi_segment_init(segment, size, done, crcp);
for_each_sg(sg_list, sg, sg_count, i) {
if (offset < sg->length) {
iscsi_tcp_segment_init_sg(segment, sg, offset);
@@ -393,7 +382,7 @@ EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg);
* iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
* @tcp_conn: iscsi connection to prep for
*
- * This function always passes NULL for the hash argument, because when this
+ * This function always passes NULL for the crcp argument, because when this
* function is called we do not yet know the final size of the header and want
* to delay the digest processing until we know that.
*/
@@ -434,15 +423,15 @@ static void
iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
{
struct iscsi_conn *conn = tcp_conn->iscsi_conn;
- struct ahash_request *rx_hash = NULL;
+ u32 *rx_crcp = NULL;
if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
- rx_hash = tcp_conn->rx_hash;
+ rx_crcp = tcp_conn->rx_crcp;
iscsi_segment_init_linear(&tcp_conn->in.segment,
conn->data, tcp_conn->in.datalen,
- iscsi_tcp_data_recv_done, rx_hash);
+ iscsi_tcp_data_recv_done, rx_crcp);
}
/**
@@ -730,7 +719,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
if (tcp_conn->in.datalen) {
struct iscsi_tcp_task *tcp_task = task->dd_data;
- struct ahash_request *rx_hash = NULL;
+ u32 *rx_crcp = NULL;
struct scsi_data_buffer *sdb = &task->sc->sdb;
/*
@@ -743,7 +732,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
*/
if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
- rx_hash = tcp_conn->rx_hash;
+ rx_crcp = tcp_conn->rx_crcp;
ISCSI_DBG_TCP(conn, "iscsi_tcp_begin_data_in( "
"offset=%d, datalen=%d)\n",
@@ -756,7 +745,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
tcp_task->data_offset,
tcp_conn->in.datalen,
iscsi_tcp_process_data_in,
- rx_hash);
+ rx_crcp);
spin_unlock(&conn->session->back_lock);
return rc;
}
@@ -878,7 +867,7 @@ iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
return 0;
}
- iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr,
+ iscsi_tcp_dgst_header(hdr,
segment->total_copied - ISCSI_DIGEST_SIZE,
segment->digest);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e5a9c5a323f8..fe4fb67eb50c 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -74,8 +74,7 @@ struct lpfc_sli2_slim;
* queue depths when there are driver resource error or Firmware
* resource error.
*/
-/* 1 Second */
-#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1))
+#define QUEUE_RAMP_DOWN_INTERVAL (secs_to_jiffies(1))
/* Number of exchanges reserved for discovery to complete */
#define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -1715,18 +1714,12 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
* Note: If no valid cpu found, then nr_cpu_ids is returned.
*
**/
-static inline unsigned int
+static __always_inline unsigned int
lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
{
- unsigned int cpu_it;
-
- for_each_cpu_wrap(cpu_it, mask, start) {
- if (cpu_online(cpu_it))
- break;
- }
-
- return cpu_it;
+ return cpumask_next_and_wrap(start, mask, cpu_online_mask);
}
+
/**
* lpfc_next_present_cpu - Finds next present CPU after n
* @n: the cpu prior to search
@@ -1734,16 +1727,9 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
* Note: If no next present cpu, then fallback to first present cpu.
*
**/
-static inline unsigned int lpfc_next_present_cpu(int n)
+static __always_inline unsigned int lpfc_next_present_cpu(int n)
{
- unsigned int cpu;
-
- cpu = cpumask_next(n, cpu_present_mask);
-
- if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(cpu_present_mask);
-
- return cpu;
+ return cpumask_next_wrap(n, cpu_present_mask);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 1d7db49a8fe4..e08b48b1b655 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -8045,8 +8045,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
tmo = ((phba->fc_ratov * 3) + 3);
mod_timer(&vport->fc_disctmo,
- jiffies +
- msecs_to_jiffies(1000 * tmo));
+ jiffies + secs_to_jiffies(tmo));
}
return 0;
}
@@ -8081,7 +8080,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
tmo = ((phba->fc_ratov * 3) + 3);
mod_timer(&vport->fc_disctmo,
- jiffies + msecs_to_jiffies(1000 * tmo));
+ jiffies + secs_to_jiffies(tmo));
}
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) {
@@ -9511,7 +9510,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
if (!list_empty(&pring->txcmplq))
if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * timeout));
+ jiffies + secs_to_jiffies(timeout));
}
/**
@@ -9569,18 +9568,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
/* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ if (piocb->vport != vport)
+ continue;
+
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2243 iotag = 0x%x cmd_flag = 0x%x "
- "ulp_command = 0x%x this_vport %x "
- "sli_flag = 0x%x\n",
+ "ulp_command = 0x%x sli_flag = 0x%x\n",
piocb->iotag, piocb->cmd_flag,
get_job_cmnd(phba, piocb),
- (piocb->vport == vport),
phba->sli.sli_flag);
- if (piocb->vport != vport)
- continue;
-
if ((phba->sli.sli_flag & LPFC_SLI_ACTIVE) && !mbx_tmo_err) {
if (piocb->cmd_flag & LPFC_IO_LIBDFC)
continue;
@@ -10899,7 +10896,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
"3334 Delay fc port discovery for %d secs\n",
phba->fc_ratov);
mod_timer(&vport->delayed_disc_tmo,
- jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
+ jiffies + secs_to_jiffies(phba->fc_ratov));
return;
}
@@ -11156,7 +11153,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
if (!ndlp)
return;
- mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
phba->pport->port_state = LPFC_FLOGI;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 36e66df36a18..8ca590e8469b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -228,10 +228,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
- /* check for recovered fabric node */
- if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
- ndlp->nlp_DID == Fabric_DID)
+ /* Ignore callback for a mismatched (stale) rport */
+ if (ndlp->rport != rport) {
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_NODE,
+ "6788 fc rport mismatch: d_id x%06x ndlp x%px "
+ "fc rport x%px node rport x%px state x%x "
+ "refcnt %u\n",
+ ndlp->nlp_DID, ndlp, rport, ndlp->rport,
+ ndlp->nlp_state, kref_read(&ndlp->kref));
return;
+ }
if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -3518,7 +3524,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
if (phba->fc_topology &&
phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "3314 Toplogy changed was 0x%x is 0x%x\n",
+ "3314 Topology changed was 0x%x is 0x%x\n",
phba->fc_topology,
bf_get(lpfc_mbx_read_top_topology, la));
phba->fc_topology_changed = 1;
@@ -4973,7 +4979,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
tmo, vport->port_state, vport->fc_flag);
}
- mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
+ mod_timer(&vport->fc_disctmo, jiffies + secs_to_jiffies(tmo));
set_bit(FC_DISC_TMO, &vport->fc_flag);
/* Start Discovery Timer state <hba_state> */
@@ -5564,6 +5570,7 @@ static struct lpfc_nodelist *
__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *np = NULL;
uint32_t data1;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
@@ -5578,14 +5585,20 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1, ndlp->nlp_rpi,
ndlp->active_rrqs_xri_bitmap);
- return ndlp;
+
+ /* Check for new or potentially stale node */
+ if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
+ return ndlp;
+ np = ndlp;
}
}
- /* FIND node did <did> NOT FOUND */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0932 FIND node did x%x NOT FOUND.\n", did);
- return NULL;
+ if (!np)
+ /* FIND node did <did> NOT FOUND */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0932 FIND node did x%x NOT FOUND.\n", did);
+
+ return np;
}
struct lpfc_nodelist *
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index bcadf11414c8..7238608ca49f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -595,7 +595,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Set up ring-0 (ELS) timer */
timeout = phba->fc_ratov * 2;
mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * timeout));
+ jiffies + secs_to_jiffies(timeout));
/* Set up heart beat (HB) timer */
mod_timer(&phba->hb_tmofunc,
jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
@@ -604,7 +604,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
phba->last_completion_time = jiffies;
/* Set up error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
- jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
if (test_bit(LINK_DISABLED, &phba->hba_flag)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -3361,8 +3361,8 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
/* Determine how long we might wait for the active mailbox
* command to be gracefully completed by firmware.
*/
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
- phba->sli.mbox_active) * 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active)) + jiffies;
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -6909,7 +6909,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
* re-instantiate the Vlink using FDISC.
*/
mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000));
+ jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
@@ -7952,11 +7952,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
/* CMF congestion timer */
- hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- phba->cmf_timer.function = lpfc_cmf_timer;
+ hrtimer_setup(&phba->cmf_timer, lpfc_cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
/* CMF 1 minute stats collection timer */
- hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- phba->cmf_stats_timer.function = lpfc_cmf_stats_timer;
+ hrtimer_setup(&phba->cmf_stats_timer, lpfc_cmf_stats_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Control structure for handling external multi-buffer mailbox
@@ -12873,7 +12872,7 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
if (offline) {
/* Find next online CPU on original mask */
- cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
+ cpu_next = cpumask_next_wrap(cpu, orig_mask);
cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
/* Found a valid CPU */
@@ -13170,6 +13169,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
eqhdl = lpfc_get_eq_hdl(0);
rc = pci_irq_vector(phba->pcidev, 0);
if (rc < 0) {
+ free_irq(phba->pcidev->irq, phba);
pci_free_irq_vectors(phba->pcidev);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0496 MSI pci_irq_vec failed (%d)\n", rc);
@@ -13250,6 +13250,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
eqhdl = lpfc_get_eq_hdl(0);
retval = pci_irq_vector(phba->pcidev, 0);
if (retval < 0) {
+ free_irq(phba->pcidev->irq, phba);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0502 INTR pci_irq_vec failed (%d)\n",
retval);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 055ed632c14d..f0158fc00f78 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5645,9 +5645,8 @@ wait_for_cmpl:
* cmd_flag is set to LPFC_DRIVER_ABORTED before we wait
* for abort to complete.
*/
- wait_event_timeout(waitq,
- (lpfc_cmd->pCmd != cmnd),
- msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
+ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd),
+ secs_to_jiffies(2*vport->cfg_devloss_tmo));
spin_lock(&lpfc_cmd->buf_lock);
@@ -5911,7 +5910,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
* If target is not in a MAPPED state, delay until
* target is rediscovered or devloss timeout expires.
*/
- later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies;
while (time_after(later, jiffies)) {
if (!pnode)
return FAILED;
@@ -5957,7 +5956,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
lpfc_sli_abort_taskmgmt(vport,
&phba->sli.sli3_ring[LPFC_FCP_RING],
tgt_id, lun_id, context);
- later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies;
while (time_after(later, jiffies) && cnt) {
schedule_timeout_uninterruptible(msecs_to_jiffies(20));
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
@@ -6137,8 +6136,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
wait_event_timeout(waitq,
!test_bit(NLP_WAIT_FOR_LOGO,
&pnode->save_flags),
- msecs_to_jiffies(dev_loss_tmo *
- 1000));
+ secs_to_jiffies(dev_loss_tmo));
if (test_and_clear_bit(NLP_WAIT_FOR_LOGO,
&pnode->save_flags))
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 3fd9723cd271..4e0d48fcb204 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1025,7 +1025,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
LIST_HEAD(send_rrq);
clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
- next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
+ next_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1);
spin_lock_irqsave(&phba->rrq_list_lock, iflags);
list_for_each_entry_safe(rrq, nextrrq,
&phba->active_rrq_list, list) {
@@ -1208,8 +1208,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
else
rrq->send_rrq = 0;
rrq->xritag = xritag;
- rrq->rrq_stop_time = jiffies +
- msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
+ rrq->rrq_stop_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1);
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
@@ -1736,8 +1735,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
BUG_ON(!piocb->vport);
if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag))
mod_timer(&piocb->vport->els_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
+ jiffies + secs_to_jiffies(phba->fc_ratov << 1));
}
return 0;
@@ -2923,6 +2921,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ } else {
+ clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
}
/* The unreg_login mailbox is complete and had a
@@ -3956,8 +3956,7 @@ void lpfc_poll_eratt(struct timer_list *t)
else
/* Restart the timer for next eratt poll */
mod_timer(&phba->eratt_poll,
- jiffies +
- msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
return;
}
@@ -9008,7 +9007,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start the ELS watchdog timer */
mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
+ jiffies + secs_to_jiffies(phba->fc_ratov * 2));
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
@@ -9027,7 +9026,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
- jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
/*
* The port is ready, set the host's link state to LINK_DOWN
@@ -9504,8 +9503,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
/* timeout active mbox command */
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
- 1000);
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox));
mod_timer(&psli->mbox_tmo, jiffies + timeout);
}
@@ -9629,8 +9627,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
drvr_flag);
goto out_not_finished;
}
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
- 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox)) + jiffies;
i = 0;
/* Wait for command to complete */
while (((word0 & OWN_CHIP) == OWN_CHIP) ||
@@ -9756,9 +9753,8 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
* command to be gracefully completed by firmware.
*/
if (phba->sli.mbox_active)
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
- phba->sli.mbox_active) *
- 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active)) + jiffies;
spin_unlock_irq(&phba->hbalock);
/* Make sure the mailbox is really active */
@@ -9881,8 +9877,7 @@ lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
}
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
- * 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)) + jiffies;
do {
bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
@@ -10230,7 +10225,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
/* Start timer for the mbox_tmo and log some mailbox post messages */
mod_timer(&psli->mbox_tmo, (jiffies +
- msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
+ secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq))));
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -13159,7 +13154,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
SLI_IOCB_RET_IOCB);
if (retval == IOCB_SUCCESS) {
- timeout_req = msecs_to_jiffies(timeout * 1000);
+ timeout_req = secs_to_jiffies(timeout);
timeleft = wait_event_timeout(done_q,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
@@ -13275,8 +13270,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
/* now issue the command */
retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
- wait_for_completion_timeout(&mbox_done,
- msecs_to_jiffies(timeout * 1000));
+ wait_for_completion_timeout(&mbox_done, secs_to_jiffies(timeout));
spin_lock_irqsave(&phba->hbalock, flag);
pmboxq->ctx_u.mbox_wait = NULL;
@@ -13336,9 +13330,8 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
* command to be gracefully completed by firmware.
*/
if (phba->sli.mbox_active)
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
- phba->sli.mbox_active) *
- 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active)) + jiffies;
spin_unlock_irq(&phba->hbalock);
/* Enable softirqs again, done with phba->hbalock */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c35f7225058e..638b50f35287 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.7"
+#define LPFC_DRIVER_VERSION "14.4.0.8"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2024 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2025 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 3d70cc517573..cc56a7334319 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -246,7 +246,7 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
* fabric RA_TOV value and dev_loss tmo. The driver's
* devloss_tmo is 10 giving this loop a 3x multiplier minimally.
*/
- wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
+ wait_time_max = secs_to_jiffies((phba->fc_ratov * 3) + 3);
wait_time_max += jiffies;
start_time = jiffies;
while (time_before(jiffies, wait_time_max)) {
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index adab151663dd..2006094af418 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -855,8 +855,8 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
return scb;
#if MEGA_HAVE_CLUSTERING
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
/*
* Do we support clustering and is the support enabled
@@ -875,7 +875,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
}
scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
- scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ?
+ scb->raw_mbox[2] = *cmd->cmnd == RESERVE_6 ?
MEGA_RESERVE_LD : MEGA_RELEASE_LD;
scb->raw_mbox[3] = ldrv_num;
@@ -1618,8 +1618,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
* failed or the input parameter is invalid
*/
if( status == 1 &&
- (cmd->cmnd[0] == RESERVE ||
- cmd->cmnd[0] == RELEASE) ) {
+ (cmd->cmnd[0] == RESERVE_6 ||
+ cmd->cmnd[0] == RELEASE_6) ) {
cmd->result |= (DID_ERROR << 16) |
SAM_STAT_RESERVATION_CONFLICT;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 60cc3372991f..3ba837b3093f 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1725,8 +1725,8 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
return scb;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
/*
* Do we support clustering and is the support enabled
*/
@@ -1748,7 +1748,7 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
scb->dev_channel = 0xFF;
scb->dev_target = target;
ccb->raw_mbox[0] = CLUSTER_CMD;
- ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ?
+ ccb->raw_mbox[2] = scp->cmnd[0] == RESERVE_6 ?
RESERVE_LD : RELEASE_LD;
ccb->raw_mbox[3] = target;
@@ -2334,8 +2334,8 @@ megaraid_mbox_dpc(unsigned long devp)
* Error code returned is 1 if Reserve or Release
* failed or the input parameter is invalid
*/
- if (status == 1 && (scp->cmnd[0] == RESERVE ||
- scp->cmnd[0] == RELEASE)) {
+ if (status == 1 && (scp->cmnd[0] == RESERVE_6 ||
+ scp->cmnd[0] == RELEASE_6)) {
scp->result = DID_ERROR << 16 |
SAM_STAT_RESERVATION_CONFLICT;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d85f990aec88..28c75865967a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -93,7 +93,7 @@ static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
module_param(scmd_timeout, int, 0444);
MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
-int perf_mode = -1;
+static int perf_mode = -1;
module_param(perf_mode, int, 0444);
MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
@@ -105,15 +105,15 @@ MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:
"default mode is 'balanced'"
);
-int event_log_level = MFI_EVT_CLASS_CRITICAL;
+static int event_log_level = MFI_EVT_CLASS_CRITICAL;
module_param(event_log_level, int, 0644);
MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
-unsigned int enable_sdev_max_qd;
+static unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
-int poll_queues;
+static int poll_queues;
module_param(poll_queues, int, 0444);
MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
"This parameter is effective only if host_tagset_enable=1 &\n\t\t"
@@ -122,7 +122,7 @@ MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode
"High iops queues are not allocated &\n\t\t"
);
-int host_tagset_enable = 1;
+static int host_tagset_enable = 1;
module_param(host_tagset_enable, int, 0444);
MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 00cd18edfad6..96401eb7e231 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -19,6 +19,7 @@
#define MPI3_CONFIG_PAGETYPE_PCIE_SWITCH (0x31)
#define MPI3_CONFIG_PAGETYPE_PCIE_LINK (0x33)
#define MPI3_CONFIG_PAGEATTR_MASK (0xf0)
+#define MPI3_CONFIG_PAGEATTR_SHIFT (4)
#define MPI3_CONFIG_PAGEATTR_READ_ONLY (0x00)
#define MPI3_CONFIG_PAGEATTR_CHANGEABLE (0x10)
#define MPI3_CONFIG_PAGEATTR_PERSISTENT (0x20)
@@ -29,10 +30,13 @@
#define MPI3_CONFIG_ACTION_READ_PERSISTENT (0x04)
#define MPI3_CONFIG_ACTION_WRITE_PERSISTENT (0x05)
#define MPI3_DEVICE_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_DEVICE_PGAD_FORM_SHIFT (28)
#define MPI3_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
#define MPI3_DEVICE_PGAD_FORM_HANDLE (0x20000000)
#define MPI3_DEVICE_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_DEVICE_PGAD_HANDLE_SHIFT (0)
#define MPI3_SAS_EXPAND_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_SHIFT (28)
#define MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM (0x10000000)
#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE (0x20000000)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
index 2c6e548cbd0f..8d824107a678 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -66,7 +66,12 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_SIGNATURE1_SMM (0x204d4d53)
#define MPI3_IMAGE_HEADER_SIGNATURE1_PSW (0x20575350)
#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_MASK (0x00000300)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_SHIFT (8)
+#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_MASK (0x000000c0)
+#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_SHIFT (6)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_SHIFT (4)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_DI (0x00000010)
#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_NVDATA (0x00000008)
@@ -214,11 +219,13 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_1_OF_2 (0x04)
#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_2_OF_2 (0x05)
#define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0)
+#define MPI3_HASH_ALGORITHM_VERSION_SHIFT (5)
#define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00)
#define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20)
#define MPI3_HASH_ALGORITHM_VERSION_SHA2 (0x40)
#define MPI3_HASH_ALGORITHM_VERSION_SHA3 (0x60)
#define MPI3_HASH_ALGORITHM_SIZE_MASK (0x1f)
+#define MPI3_HASH_ALGORITHM_SIZE_SHIFT (0)
#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00)
#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01)
#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02)
@@ -236,6 +243,7 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_65 (0x0c)
#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_44 (0x0d)
#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_MASK (0x0f)
+#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_SHIFT (0)
#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
index af86d12c8e49..bbef5bac92ed 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_init.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -38,23 +38,31 @@ struct mpi3_scsi_io_request {
#define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80)
#define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40)
#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB_MASK (0x60000000)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB_SHIFT (29)
+#define MPI3_SCSIIO_FLAGS_IOC_USE_ONLY_27_MASK (0x18000000)
+#define MPI3_SCSIIO_FLAGS_IOC_USE_ONLY_27_SHIFT (27)
#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000)
#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000)
#define MPI3_SCSIIO_FLAGS_CDB_IN_SEPARATE_BUFFER (0x40000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_MASK (0x07000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SHIFT (24)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_SHIFT (18)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ (0x00000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ (0x01000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ (0x02000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ (0x04000000)
#define MPI3_SCSIIO_FLAGS_CMDPRI_MASK (0x00f00000)
#define MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT (20)
-#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER (0x00000000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE (0x00040000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_READ (0x00080000)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_MASK (0x00030000)
+#define MPI3_SCSIIO_FLAGS_DMAOPERATION_SHIFT (16)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_MASK (0x000000f0)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_SHIFT (4)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING (0x00000010)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE (0x00000020)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_PROD_SPECIFIC (0x00000080)
@@ -99,6 +107,7 @@ struct mpi3_scsi_io_reply {
#define MPI3_SCSI_STATUS_ACA_ACTIVE (0x30)
#define MPI3_SCSI_STATUS_TASK_ABORTED (0x40)
#define MPI3_SCSI_STATE_SENSE_MASK (0x03)
+#define MPI3_SCSI_STATE_SENSE_SHIFT (0)
#define MPI3_SCSI_STATE_SENSE_VALID (0x00)
#define MPI3_SCSI_STATE_SENSE_FAILED (0x01)
#define MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY (0x02)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index c374867f9ba0..b42933fcd423 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -30,6 +30,7 @@ struct mpi3_ioc_init_request {
#define MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED (0x08)
#define MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED (0x04)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SHIFT (0)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02)
@@ -40,6 +41,7 @@ struct mpi3_ioc_init_request {
#define MPI3_WHOINIT_MANUFACTURER (0x04)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_MASK (0x00000003)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_SHIFT (0)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_GUIDANCE (0x00000000)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_SPECIAL (0x00000001)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_HDD (0x00000002)
@@ -111,9 +113,11 @@ struct mpi3_ioc_facts_data {
__le32 diag_tty_size;
};
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_SHIFT (31)
#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000)
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x80000000)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_MASK (0x00000600)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_SHIFT (9)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_FIXED_THRESHOLD (0x00000000)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_OUTSTANDING_IO (0x00000200)
#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_SUPPORTED (0x00000100)
@@ -134,6 +138,7 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000)
#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_SHIFT (8)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200)
@@ -149,6 +154,7 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_EXCEPT_BLOCKING_BOOT_EVENT (0x0004)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_SELFTEST_FAILURE (0x0002)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SHIFT (0)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001)
#define MPI3_IOCFACTS_PROTOCOL_SAS (0x0010)
@@ -161,10 +167,12 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00)
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_SHIFT (4)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_IN_PROGRESS (0x00000010)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_COMPLETE (0x00000020)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000f)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_SHIFT (0)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000)
@@ -204,6 +212,7 @@ struct mpi3_create_request_queue_request {
};
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SHIFT (7)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
#define MPI3_CREATE_REQUEST_QUEUE_SIZE_MINIMUM (2)
@@ -237,10 +246,12 @@ struct mpi3_create_reply_queue_request {
};
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SHIFT (7)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_COALESCE_DISABLE (0x02)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_SHIFT (0)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
#define MPI3_CREATE_REPLY_QUEUE_SIZE_MINIMUM (2)
@@ -326,9 +337,11 @@ struct mpi3_event_notification_reply {
};
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK (0x01)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_SHIFT (0)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED (0x01)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_NOT_REQUIRED (0x00)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_MASK (0x02)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_SHIFT (1)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_ORIGINAL (0x00)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_REPLAY (0x02)
struct mpi3_event_data_gpio_interrupt {
@@ -487,6 +500,7 @@ struct mpi3_event_sas_topo_phy_entry {
#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_NO_EXIST (0x40)
#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_VACANT (0x80)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_MASK (0x0f)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_SHIFT (0)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING (0x02)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED (0x03)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE (0x04)
@@ -566,6 +580,7 @@ struct mpi3_event_pcie_topo_port_entry {
#define MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05)
#define MPI3_EVENT_PCIE_TOPO_PS_RESPONDING (0x06)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_MASK (0xf0)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_SHIFT (4)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_1 (0x10)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_2 (0x20)
@@ -573,6 +588,7 @@ struct mpi3_event_pcie_topo_port_entry {
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_8 (0x40)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_16 (0x50)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0f)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_SHIFT (0)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02)
@@ -881,6 +897,7 @@ struct mpi3_pel_req_action_acknowledge {
};
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_SHIFT (0)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02)
@@ -924,6 +941,7 @@ struct mpi3_ci_download_request {
#define MPI3_CI_DOWNLOAD_MSGFLAGS_FORCE_FMC_ENABLE (0x40)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_SIGNED_NVDATA (0x20)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MASK (0x03)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SHIFT (0)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_FAST (0x00)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MEDIUM (0x01)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SLOW (0x02)
@@ -953,6 +971,7 @@ struct mpi3_ci_download_reply {
#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20)
#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_SHIFT (1)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_AWAITING (0x02)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_ONLINE_PENDING (0x04)
@@ -976,9 +995,11 @@ struct mpi3_ci_upload_request {
};
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_MASK (0x01)
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SHIFT (0)
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY (0x00)
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SECONDARY (0x01)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_MASK (0x02)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_SHIFT (1)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_FLASH (0x00)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_EXECUTABLE (0x02)
#define MPI3_CTRL_OP_FORCE_FULL_DISCOVERY (0x01)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_tool.h b/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
index 3b960893870f..50a65b16a818 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
@@ -9,6 +9,7 @@
#define MPI3_DIAG_BUFFER_TYPE_FW (0x02)
#define MPI3_DIAG_BUFFER_ACTION_RELEASE (0x01)
+#define MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED (0x01)
struct mpi3_diag_buffer_post_request {
__le16 host_tag;
u8 ioc_use_only02;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index b2ab25a1cfeb..5c522e2531c3 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (34)
+#define MPI3_VERSION_UNIT (35)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
@@ -80,6 +80,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20)
#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ (0x000f0000)
#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT (16)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_SHIFT (14)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000)
@@ -97,6 +98,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_STATUS_READY (0x00000001)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET (0x00000024)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_MASK (0x0fff)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_SHIFT (0)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_OFFSET (0x00000026)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_MASK (0x0fff0000)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_SHIFT (16)
@@ -106,6 +108,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_HIGH_OFFSET (0x00000034)
#define MPI3_SYSIF_COALESCE_CONTROL_OFFSET (0x00000040)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_MASK (0xc0000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_SHIFT (30)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000)
@@ -124,6 +127,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(N) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((N) - 1) * 8))
#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_SHIFT (0)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST (0xf)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND (0x4)
@@ -133,6 +137,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH (0xd)
#define MPI3_SYSIF_HOST_DIAG_OFFSET (0x00001c08)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SHIFT (8)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200)
@@ -151,6 +156,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_FAULT_FUNC_AREA_SHIFT (24)
#define MPI3_SYSIF_FAULT_FUNC_AREA_MPI_DEFINED (0x00000000)
#define MPI3_SYSIF_FAULT_CODE_MASK (0x0000ffff)
+#define MPI3_SYSIF_FAULT_CODE_SHIFT (0)
#define MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET (0x0000f000)
#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001)
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002)
@@ -176,17 +182,20 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00001c5c)
#define MPI3_SYSIF_DIAG_RW_CONTROL_OFFSET (0x00001c60)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_MASK (0x00000030)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_SHIFT (4)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_1BYTE (0x00000000)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_2BYTES (0x00000010)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_4BYTES (0x00000020)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_8BYTES (0x00000030)
#define MPI3_SYSIF_DIAG_RW_CONTROL_RESET (0x00000004)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_MASK (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_SHIFT (1)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_READ (0x00000000)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_WRITE (0x00000002)
#define MPI3_SYSIF_DIAG_RW_CONTROL_START (0x00000001)
#define MPI3_SYSIF_DIAG_RW_STATUS_OFFSET (0x00001c62)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_MASK (0x0000000e)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SHIFT (1)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SUCCESS (0x00000000)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_INV_ADDR (0x00000002)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_ACC_ERR (0x00000004)
@@ -207,7 +216,9 @@ struct mpi3_default_reply_descriptor {
};
#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK (0x0001)
+#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_SHIFT (0)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK (0xf000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SHIFT (12)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY (0x0000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000)
@@ -301,6 +312,7 @@ union mpi3_sge_union {
};
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_MASK (0xf0)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SHIFT (4)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE (0x00)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_BIT_BUCKET (0x10)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_CHAIN (0x20)
@@ -309,6 +321,7 @@ union mpi3_sge_union {
#define MPI3_SGE_FLAGS_END_OF_LIST (0x08)
#define MPI3_SGE_FLAGS_END_OF_BUFFER (0x04)
#define MPI3_SGE_FLAGS_DLAS_MASK (0x03)
+#define MPI3_SGE_FLAGS_DLAS_SHIFT (0)
#define MPI3_SGE_FLAGS_DLAS_SYSTEM (0x00)
#define MPI3_SGE_FLAGS_DLAS_IOC_UDP (0x01)
#define MPI3_SGE_FLAGS_DLAS_IOC_CTL (0x02)
@@ -322,15 +335,18 @@ union mpi3_sge_union {
#define MPI3_EEDPFLAGS_CHK_APP_TAG (0x0200)
#define MPI3_EEDPFLAGS_CHK_GUARD (0x0100)
#define MPI3_EEDPFLAGS_ESC_MODE_MASK (0x00c0)
+#define MPI3_EEDPFLAGS_ESC_MODE_SHIFT (6)
#define MPI3_EEDPFLAGS_ESC_MODE_DO_NOT_DISABLE (0x0040)
#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE (0x0080)
#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_REFTAG_DISABLE (0x00c0)
#define MPI3_EEDPFLAGS_HOST_GUARD_MASK (0x0030)
+#define MPI3_EEDPFLAGS_HOST_GUARD_SHIFT (4)
#define MPI3_EEDPFLAGS_HOST_GUARD_T10_CRC (0x0000)
#define MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM (0x0010)
#define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020)
#define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008)
#define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007)
+#define MPI3_EEDPFLAGS_EEDP_OP_SHIFT (0)
#define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001)
#define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002)
#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003)
@@ -403,6 +419,7 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_LOG_INFO_AVAIL_MASK (0x8000)
#define MPI3_IOCSTATUS_LOG_INFO_AVAILABLE (0x8000)
#define MPI3_IOCSTATUS_STATUS_MASK (0x7fff)
+#define MPI3_IOCSTATUS_STATUS_SHIFT (0)
#define MPI3_IOCSTATUS_SUCCESS (0x0000)
#define MPI3_IOCSTATUS_INVALID_FUNCTION (0x0001)
#define MPI3_IOCSTATUS_BUSY (0x0002)
@@ -469,4 +486,5 @@ struct mpi3_default_reply {
#define MPI3_IOCLOGINFO_TYPE_NONE (0x0)
#define MPI3_IOCLOGINFO_TYPE_SAS (0x3)
#define MPI3_IOCLOGINFO_LOG_DATA_MASK (0x0fffffff)
+#define MPI3_IOCLOGINFO_LOG_DATA_SHIFT (0)
#endif
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0d72b5f1b69d..9bbc7cb98ca3 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -56,8 +56,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.12.0.3.50"
-#define MPI3MR_DRIVER_RELDATE "11-November-2024"
+#define MPI3MR_DRIVER_VERSION "8.13.0.5.50"
+#define MPI3MR_DRIVER_RELDATE "20-February-2025"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -80,13 +80,14 @@ extern atomic64_t event_counter;
/* Admin queue management definitions */
#define MPI3MR_ADMIN_REQ_Q_SIZE (2 * MPI3MR_PAGE_SIZE_4K)
-#define MPI3MR_ADMIN_REPLY_Q_SIZE (4 * MPI3MR_PAGE_SIZE_4K)
+#define MPI3MR_ADMIN_REPLY_Q_SIZE (8 * MPI3MR_PAGE_SIZE_4K)
#define MPI3MR_ADMIN_REQ_FRAME_SZ 128
#define MPI3MR_ADMIN_REPLY_FRAME_SZ 16
/* Operational queue management definitions */
#define MPI3MR_OP_REQ_Q_QD 512
#define MPI3MR_OP_REP_Q_QD 1024
+#define MPI3MR_OP_REP_Q_QD2K 2048
#define MPI3MR_OP_REP_Q_QD4K 4096
#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
@@ -328,6 +329,7 @@ enum mpi3mr_reset_reason {
#define MPI3MR_RESET_REASON_OSTYPE_SHIFT 28
#define MPI3MR_RESET_REASON_IOCNUM_SHIFT 20
+
/* Queue type definitions */
enum queue_type {
MPI3MR_DEFAULT_QUEUE = 0,
@@ -387,6 +389,7 @@ struct mpi3mr_ioc_facts {
u16 max_msix_vectors;
u8 personality;
u8 dma_mask;
+ bool max_req_limit;
u8 protocol_flags;
u8 sge_mod_mask;
u8 sge_mod_value;
@@ -456,6 +459,8 @@ struct op_req_qinfo {
* @enable_irq_poll: Flag to indicate polling is enabled
* @in_use: Queue is handled by poll/ISR
* @qtype: Type of queue (types defined in enum queue_type)
+ * @qfull_watermark: Watermark defined in reply queue to avoid
+ * reply queue full
*/
struct op_reply_qinfo {
u16 ci;
@@ -471,6 +476,7 @@ struct op_reply_qinfo {
bool enable_irq_poll;
atomic_t in_use;
enum queue_type qtype;
+ u16 qfull_watermark;
};
/**
@@ -928,6 +934,8 @@ struct trigger_event_data {
* @size: Buffer size
* @addr: Virtual address
* @dma_addr: Buffer DMA address
+ * @is_segmented: The buffer is segmented or not
+ * @disabled_after_reset: The buffer is disabled after reset
*/
struct diag_buffer_desc {
u8 type;
@@ -937,6 +945,8 @@ struct diag_buffer_desc {
u32 size;
void *addr;
dma_addr_t dma_addr;
+ bool is_segmented;
+ bool disabled_after_reset;
};
/**
@@ -1022,6 +1032,8 @@ struct scmd_priv {
* @admin_reply_base: Admin reply queue base virtual address
* @admin_reply_dma: Admin reply queue base dma address
* @admin_reply_q_in_use: Queue is handled by poll/ISR
+ * @admin_pend_isr: Count of unprocessed admin ISR/poll calls
+ * due to another thread processing replies
* @ready_timeout: Controller ready timeout
* @intr_info: Interrupt cookie pointer
* @intr_info_count: Number of interrupt cookies
@@ -1090,6 +1102,7 @@ struct scmd_priv {
* @ts_update_interval: Timestamp update interval
* @reset_in_progress: Reset in progress flag
* @unrecoverable: Controller unrecoverable flag
+ * @io_admin_reset_sync: Manage state of I/O ops during an admin reset process
* @prev_reset_result: Result of previous reset
* @reset_mutex: Controller reset mutex
* @reset_waitq: Controller reset wait queue
@@ -1153,6 +1166,12 @@ struct scmd_priv {
* @snapdump_trigger_active: Snapdump trigger active flag
* @pci_err_recovery: PCI error recovery in progress
* @block_on_pci_err: Block IO during PCI error recovery
+ * @reply_qfull_count: Occurences of reply queue full avoidance kicking-in
+ * @prevent_reply_qfull: Enable reply queue prevention
+ * @seg_tb_support: Segmented trace buffer support
+ * @num_tb_segs: Number of Segments in Trace buffer
+ * @trace_buf_pool: DMA pool for Segmented trace buffer segments
+ * @trace_buf: Trace buffer segments memory descriptor
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -1189,6 +1208,7 @@ struct mpi3mr_ioc {
void *admin_reply_base;
dma_addr_t admin_reply_dma;
atomic_t admin_reply_q_in_use;
+ atomic_t admin_pend_isr;
u32 ready_timeout;
@@ -1276,6 +1296,7 @@ struct mpi3mr_ioc {
u16 ts_update_interval;
u8 reset_in_progress;
u8 unrecoverable;
+ u8 io_admin_reset_sync;
int prev_reset_result;
struct mutex reset_mutex;
wait_queue_head_t reset_waitq;
@@ -1351,6 +1372,13 @@ struct mpi3mr_ioc {
bool fw_release_trigger_active;
bool pci_err_recovery;
bool block_on_pci_err;
+ atomic_t reply_qfull_count;
+ bool prevent_reply_qfull;
+ bool seg_tb_support;
+ u32 num_tb_segs;
+ struct dma_pool *trace_buf_pool;
+ struct segments *trace_buf;
+
};
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 7589f48aebc8..f36663613950 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -12,23 +12,98 @@
#include <uapi/scsi/scsi_bsg_mpi3mr.h>
/**
- * mpi3mr_alloc_trace_buffer: Allocate trace buffer
+ * mpi3mr_alloc_trace_buffer: Allocate segmented trace buffer
* @mrioc: Adapter instance reference
* @trace_size: Trace buffer size
*
- * Allocate trace buffer
+ * Allocate either segmented memory pools or contiguous buffer
+ * based on the controller capability for the host trace
+ * buffer.
+ *
* Return: 0 on success, non-zero on failure.
*/
static int mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc *mrioc, u32 trace_size)
{
struct diag_buffer_desc *diag_buffer = &mrioc->diag_buffers[0];
+ int i, sz;
+ u64 *diag_buffer_list = NULL;
+ dma_addr_t diag_buffer_list_dma;
+ u32 seg_count;
+
+ if (mrioc->seg_tb_support) {
+ seg_count = (trace_size) / MPI3MR_PAGE_SIZE_4K;
+ trace_size = seg_count * MPI3MR_PAGE_SIZE_4K;
+
+ diag_buffer_list = dma_alloc_coherent(&mrioc->pdev->dev,
+ sizeof(u64) * seg_count,
+ &diag_buffer_list_dma, GFP_KERNEL);
+ if (!diag_buffer_list)
+ return -1;
+
+ mrioc->num_tb_segs = seg_count;
+
+ sz = sizeof(struct segments) * seg_count;
+ mrioc->trace_buf = kzalloc(sz, GFP_KERNEL);
+ if (!mrioc->trace_buf)
+ goto trace_buf_failed;
+
+ mrioc->trace_buf_pool = dma_pool_create("trace_buf pool",
+ &mrioc->pdev->dev, MPI3MR_PAGE_SIZE_4K, MPI3MR_PAGE_SIZE_4K,
+ 0);
+ if (!mrioc->trace_buf_pool) {
+ ioc_err(mrioc, "trace buf pool: dma_pool_create failed\n");
+ goto trace_buf_pool_failed;
+ }
- diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
- trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
- if (diag_buffer->addr) {
- dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
+ for (i = 0; i < seg_count; i++) {
+ mrioc->trace_buf[i].segment =
+ dma_pool_zalloc(mrioc->trace_buf_pool, GFP_KERNEL,
+ &mrioc->trace_buf[i].segment_dma);
+ diag_buffer_list[i] =
+ (u64) mrioc->trace_buf[i].segment_dma;
+ if (!diag_buffer_list[i])
+ goto tb_seg_alloc_failed;
+ }
+
+ diag_buffer->addr = diag_buffer_list;
+ diag_buffer->dma_addr = diag_buffer_list_dma;
+ diag_buffer->is_segmented = true;
+
+ dprint_init(mrioc, "segmented trace diag buffer\n"
+ "is allocated successfully seg_count:%d\n", seg_count);
return 0;
+ } else {
+ diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+ trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
+ if (diag_buffer->addr) {
+ dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
+ return 0;
+ }
+ return -1;
}
+
+tb_seg_alloc_failed:
+ if (mrioc->trace_buf_pool) {
+ for (i = 0; i < mrioc->num_tb_segs; i++) {
+ if (mrioc->trace_buf[i].segment) {
+ dma_pool_free(mrioc->trace_buf_pool,
+ mrioc->trace_buf[i].segment,
+ mrioc->trace_buf[i].segment_dma);
+ mrioc->trace_buf[i].segment = NULL;
+ }
+ mrioc->trace_buf[i].segment = NULL;
+ }
+ dma_pool_destroy(mrioc->trace_buf_pool);
+ mrioc->trace_buf_pool = NULL;
+ }
+trace_buf_pool_failed:
+ kfree(mrioc->trace_buf);
+ mrioc->trace_buf = NULL;
+trace_buf_failed:
+ if (diag_buffer_list)
+ dma_free_coherent(&mrioc->pdev->dev,
+ sizeof(u64) * mrioc->num_tb_segs,
+ diag_buffer_list, diag_buffer_list_dma);
return -1;
}
@@ -100,8 +175,9 @@ retry_trace:
dprint_init(mrioc,
"trying to allocate trace diag buffer of size = %dKB\n",
trace_size / 1024);
- if (get_order(trace_size) > MAX_PAGE_ORDER ||
+ if ((!mrioc->seg_tb_support && (get_order(trace_size) > MAX_PAGE_ORDER)) ||
mpi3mr_alloc_trace_buffer(mrioc, trace_size)) {
+
retry = true;
trace_size -= trace_dec_size;
dprint_init(mrioc, "trace diag buffer allocation failed\n"
@@ -161,6 +237,12 @@ int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
u8 prev_status;
int retval = 0;
+ if (diag_buffer->disabled_after_reset) {
+ dprint_bsg_err(mrioc, "%s: skipping diag buffer posting\n"
+ "as it is disabled after reset\n", __func__);
+ return -1;
+ }
+
memset(&diag_buf_post_req, 0, sizeof(diag_buf_post_req));
mutex_lock(&mrioc->init_cmds.mutex);
if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
@@ -177,8 +259,12 @@ int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
diag_buf_post_req.address = le64_to_cpu(diag_buffer->dma_addr);
diag_buf_post_req.length = le32_to_cpu(diag_buffer->size);
- dprint_bsg_info(mrioc, "%s: posting diag buffer type %d\n", __func__,
- diag_buffer->type);
+ if (diag_buffer->is_segmented)
+ diag_buf_post_req.msg_flags |= MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED;
+
+ dprint_bsg_info(mrioc, "%s: posting diag buffer type %d segmented:%d\n", __func__,
+ diag_buffer->type, diag_buffer->is_segmented);
+
prev_status = diag_buffer->status;
diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED;
init_completion(&mrioc->init_cmds.done);
@@ -2339,6 +2425,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
}
if (!mrioc->ioctl_sges_allocated) {
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n",
__func__);
return -ENOMEM;
@@ -3061,6 +3148,29 @@ reply_queue_count_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(reply_queue_count);
/**
+ * reply_qfull_count_show - Show reply qfull count
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Retrieves the current value of the reply_qfull_count from the mrioc structure and
+ * formats it as a string for display.
+ *
+ * Return: sysfs_emit() return
+ */
+static ssize_t
+reply_qfull_count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%u\n", atomic_read(&mrioc->reply_qfull_count));
+}
+
+static DEVICE_ATTR_RO(reply_qfull_count);
+
+/**
* logging_level_show - Show controller debug level
* @dev: class device
* @attr: Device attributes
@@ -3152,6 +3262,7 @@ static struct attribute *mpi3mr_host_attrs[] = {
&dev_attr_fw_queue_depth.attr,
&dev_attr_op_req_q_count.attr,
&dev_attr_reply_queue_count.attr,
+ &dev_attr_reply_qfull_count.attr,
&dev_attr_logging_level.attr,
&dev_attr_adp_state.attr,
NULL,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 5ed31fe57474..3fcb1ad3b070 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -17,7 +17,7 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
struct mpi3_ioc_facts_data *facts_data);
static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
struct mpi3mr_drv_cmd *drv_cmd);
-
+static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
static int poll_queues;
module_param(poll_queues, int, 0444);
MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
@@ -446,8 +446,10 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
u16 threshold_comps = 0;
struct mpi3_default_reply_descriptor *reply_desc;
- if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
+ if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) {
+ atomic_inc(&mrioc->admin_pend_isr);
return 0;
+ }
reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
admin_reply_ci;
@@ -459,7 +461,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
}
do {
- if (mrioc->unrecoverable)
+ if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
break;
mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
@@ -554,7 +556,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
}
do {
- if (mrioc->unrecoverable)
+ if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
break;
req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
@@ -1302,7 +1304,7 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
(ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
retval = 0;
- ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
+ ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n",
(!retval) ? "successful" : "failed", ioc_status, ioc_config);
return retval;
}
@@ -1355,6 +1357,19 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
"\tcontroller while sas transport support is enabled at the\n"
"\tdriver, please reboot the system or reload the driver\n");
+ if (mrioc->seg_tb_support) {
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) {
+ ioc_err(mrioc,
+ "critical error: previously enabled segmented trace\n"
+ " buffer capability is disabled after reset. Please\n"
+ " update the firmware or reboot the system or\n"
+ " reload the driver to enable trace diag buffer\n");
+ mrioc->diag_buffers[0].disabled_after_reset = true;
+ } else
+ mrioc->diag_buffers[0].disabled_after_reset = false;
+ }
+
if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
GFP_KERNEL);
@@ -1717,7 +1732,7 @@ static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_info(mrioc,
- "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
+ "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n",
(!retval)?"successful":"failed", ioc_status,
ioc_config);
if (retval)
@@ -2104,15 +2119,22 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
}
reply_qid = qidx + 1;
- op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
- if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
- !mrioc->pdev->revision)
- op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+
+ if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
+ if (mrioc->pdev->revision)
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+ else
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+ } else
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
+
op_reply_q->ci = 0;
op_reply_q->ephase = 1;
atomic_set(&op_reply_q->pend_ios, 0);
atomic_set(&op_reply_q->in_use, 0);
op_reply_q->enable_irq_poll = false;
+ op_reply_q->qfull_watermark =
+ op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
if (!op_reply_q->q_segments) {
retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
@@ -2416,8 +2438,10 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
void *segment_base_addr;
u16 req_sz = mrioc->facts.op_req_sz;
struct segments *segments = op_req_q->q_segments;
+ struct op_reply_qinfo *op_reply_q = NULL;
reply_qidx = op_req_q->reply_qid - 1;
+ op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
if (mrioc->unrecoverable)
return -EFAULT;
@@ -2448,6 +2472,15 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
goto out;
}
+ /* Reply queue is nearing to get full, push back IOs to SML */
+ if ((mrioc->prevent_reply_qfull == true) &&
+ (atomic_read(&op_reply_q->pend_ios) >
+ (op_reply_q->qfull_watermark))) {
+ atomic_inc(&mrioc->reply_qfull_count);
+ retval = -EAGAIN;
+ goto out;
+ }
+
segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
req_entry = (u8 *)segment_base_addr +
((pi % op_req_q->segment_qd) * req_sz);
@@ -2726,7 +2759,16 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
return;
}
- if (mrioc->ts_update_counter++ >= mrioc->ts_update_interval) {
+ if (atomic_read(&mrioc->admin_pend_isr)) {
+ ioc_err(mrioc, "Unprocessed admin ISR instance found\n"
+ "flush admin replies\n");
+ mpi3mr_process_admin_reply_q(mrioc);
+ }
+
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
+ (mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
+
mrioc->ts_update_counter = 0;
mpi3mr_sync_timestamp(mrioc);
}
@@ -3091,6 +3133,9 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.dma_mask = (facts_flags &
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
+ mrioc->facts.dma_mask = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
mrioc->facts.protocol_flags = facts_data->protocol_flags;
mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
@@ -4214,6 +4259,13 @@ retry_init:
mrioc->shost->transportt = mpi3mr_transport_template;
}
+ if (mrioc->facts.max_req_limit)
+ mrioc->prevent_reply_qfull = true;
+
+ if (mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)
+ mrioc->seg_tb_support = true;
+
mrioc->reply_sz = mrioc->facts.reply_sz;
retval = mpi3mr_check_reset_dma_mask(mrioc);
@@ -4370,6 +4422,7 @@ retry_init:
goto out_failed_noretry;
}
+ mrioc->io_admin_reset_sync = 0;
if (is_resume || mrioc->block_on_pci_err) {
dprint_reset(mrioc, "setting up single ISR\n");
retval = mpi3mr_setup_isr(mrioc, 1);
@@ -4671,7 +4724,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
*/
void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
{
- u16 i;
+ u16 i, j;
struct mpi3mr_intr_info *intr_info;
struct diag_buffer_desc *diag_buffer;
@@ -4806,6 +4859,26 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
diag_buffer = &mrioc->diag_buffers[i];
+ if ((i == 0) && mrioc->seg_tb_support) {
+ if (mrioc->trace_buf_pool) {
+ for (j = 0; j < mrioc->num_tb_segs; j++) {
+ if (mrioc->trace_buf[j].segment) {
+ dma_pool_free(mrioc->trace_buf_pool,
+ mrioc->trace_buf[j].segment,
+ mrioc->trace_buf[j].segment_dma);
+ mrioc->trace_buf[j].segment = NULL;
+ }
+
+ mrioc->trace_buf[j].segment = NULL;
+ }
+ dma_pool_destroy(mrioc->trace_buf_pool);
+ mrioc->trace_buf_pool = NULL;
+ }
+
+ kfree(mrioc->trace_buf);
+ mrioc->trace_buf = NULL;
+ diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs;
+ }
if (diag_buffer->addr) {
dma_free_coherent(&mrioc->pdev->dev,
diag_buffer->size, diag_buffer->addr,
@@ -4883,7 +4956,7 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
}
ioc_info(mrioc,
- "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
+ "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n",
(!retval) ? "successful" : "failed", ioc_status,
ioc_config);
}
@@ -5229,6 +5302,55 @@ cleanup_drv_cmd:
}
/**
+ * mpi3mr_check_op_admin_proc -
+ * @mrioc: Adapter instance reference
+ *
+ * Check if any of the operation reply queues
+ * or the admin reply queue are currently in use.
+ * If any queue is in use, this function waits for
+ * a maximum of 10 seconds for them to become available.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
+{
+
+ u16 timeout = 10 * 10;
+ u16 elapsed_time = 0;
+ bool op_admin_in_use = false;
+
+ do {
+ op_admin_in_use = false;
+
+ /* Check admin_reply queue first to exit early */
+ if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
+ op_admin_in_use = true;
+ else {
+ /* Check op_reply queues */
+ int i;
+
+ for (i = 0; i < mrioc->num_queues; i++) {
+ if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
+ op_admin_in_use = true;
+ break;
+ }
+ }
+ }
+
+ if (!op_admin_in_use)
+ break;
+
+ msleep(100);
+
+ } while (++elapsed_time < timeout);
+
+ if (op_admin_in_use)
+ return 1;
+
+ return 0;
+}
+
+/**
* mpi3mr_soft_reset_handler - Reset the controller
* @mrioc: Adapter instance reference
* @reset_reason: Reset reason code
@@ -5308,6 +5430,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
mpi3mr_ioc_disable_intr(mrioc);
+ mrioc->io_admin_reset_sync = 1;
if (snapdump) {
mpi3mr_set_diagsave(mrioc);
@@ -5335,6 +5458,16 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
goto out;
}
+
+ retval = mpi3mr_check_op_admin_proc(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
+ "thread still processing replies even after a 10 second\n"
+ "timeout. Marking the controller as unrecoverable!\n");
+
+ goto out;
+ }
+
if (mrioc->num_io_throttle_group !=
mrioc->facts.max_io_throttle_group) {
ioc_err(mrioc,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index b9a51d3f2024..c186b892150f 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -3839,6 +3839,18 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
if (scmd) {
+ if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
+ cmd_priv = scsi_cmd_priv(scmd);
+ if (!cmd_priv)
+ goto out_unlock;
+
+ struct op_req_qinfo *op_req_q;
+
+ op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx];
+ tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag);
+ tm_req.task_request_queue_id =
+ cpu_to_le16(op_req_q->qid);
+ }
sdev = scmd->device;
sdev_priv_data = sdev->hostdata;
scsi_tgt_priv_data = ((sdev_priv_data) ?
@@ -4388,6 +4400,92 @@ out:
}
/**
+ * mpi3mr_eh_abort - Callback function for abort error handling
+ * @scmd: SCSI command reference
+ *
+ * Issues Abort Task Management if the command is in LLD scope
+ * and verifies if it is aborted successfully, and return status
+ * accordingly.
+ *
+ * Return: SUCCESS if the abort was successful, otherwise FAILED
+ */
+static int mpi3mr_eh_abort(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct scmd_priv *cmd_priv;
+ u16 dev_handle, timeout = MPI3MR_ABORTTM_TIMEOUT;
+ u8 resp_code = 0;
+ int retval = FAILED, ret = 0;
+ struct request *rq = scsi_cmd_to_rq(scmd);
+ unsigned long scmd_age_ms = jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc);
+ unsigned long scmd_age_sec = scmd_age_ms / HZ;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: attempting abort task for scmd(%p)\n", mrioc->name, scmd);
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: scmd(0x%p) is outstanding for %lus %lums, timeout %us, retries %d, allowed %d\n",
+ mrioc->name, scmd, scmd_age_sec, scmd_age_ms % HZ, rq->timeout / HZ,
+ scmd->retries, scmd->allowed);
+
+ scsi_print_command(scmd);
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Device not available, Skip issuing abort task\n",
+ mrioc->name);
+ retval = SUCCESS;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_handle = stgt_priv_data->dev_handle;
+
+ cmd_priv = scsi_cmd_priv(scmd);
+ if (!cmd_priv->in_lld_scope ||
+ cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: scmd (0x%p) not in LLD scope, Skip issuing Abort Task\n",
+ mrioc->name, scmd);
+ retval = SUCCESS;
+ goto out;
+ }
+
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Device (handle = 0x%04x) removed, Skip issuing Abort Task\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
+
+ ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
+ timeout, &mrioc->host_tm_cmds, &resp_code, scmd);
+
+ if (ret)
+ goto out;
+
+ if (cmd_priv->in_lld_scope) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Abort task failed. scmd (0x%p) was not terminated\n",
+ mrioc->name, scmd);
+ goto out;
+ }
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Abort Task %s for scmd (0x%p)\n", mrioc->name,
+ ((retval == SUCCESS) ? "SUCCEEDED" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
* mpi3mr_scan_start - Scan start callback handler
* @shost: SCSI host reference
*
@@ -5069,6 +5167,7 @@ static const struct scsi_host_template mpi3mr_driver_template = {
.scan_finished = mpi3mr_scan_finished,
.scan_start = mpi3mr_scan_start,
.change_queue_depth = mpi3mr_change_queue_depth,
+ .eh_abort_handler = mpi3mr_eh_abort,
.eh_device_reset_handler = mpi3mr_eh_dev_reset,
.eh_target_reset_handler = mpi3mr_eh_target_reset,
.eh_bus_reset_handler = mpi3mr_eh_bus_reset,
@@ -5803,7 +5902,7 @@ static const struct pci_device_id mpi3mr_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
-static struct pci_error_handlers mpi3mr_err_handler = {
+static const struct pci_error_handlers mpi3mr_err_handler = {
.error_detected = mpi3mr_pcierr_error_detected,
.mmio_enabled = mpi3mr_pcierr_mmio_enabled,
.slot_reset = mpi3mr_pcierr_slot_reset,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 0ba9e6a6a13c..c8d6ced5640e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -105,10 +105,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 6de35b32223c..b181b113fc80 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -125,6 +125,12 @@
* 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT
* 08-01-19 02.00.56 Bumped MPI2_HEADER_VERSION_UNIT
* 10-02-19 02.00.57 Bumped MPI2_HEADER_VERSION_UNIT
+ * 07-20-20 02.00.58 Bumped MPI2_HEADER_VERSION_UNIT
+ * 03-30-21 02.00.59 Bumped MPI2_HEADER_VERSION_UNIT
+ * 06-03-22 02.00.60 Bumped MPI2_HEADER_VERSION_UNIT
+ * 09-20-23 02.00.61 Bumped MPI2_HEADER_VERSION_UNIT
+ * 09-13-24 02.00.62 Bumped MPI2_HEADER_VERSION_UNIT
+ * Added MPI2_FUNCTION_MCTP_PASSTHROUGH
* --------------------------------------------------------------------------
*/
@@ -165,7 +171,7 @@
/* Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x39)
+#define MPI2_HEADER_VERSION_UNIT (0x3E)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -669,6 +675,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
#define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33)
+#define MPI2_FUNCTION_MCTP_PASSTHROUGH (0x34)
#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 587f7d248219..02bf26ca976e 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -251,6 +251,7 @@
* 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7.
* 08-01-19 02.00.49 Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID
* Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT
+ * 09-13-24 02.00.50 Added PCIe 32 GT/s link rate
*/
#ifndef MPI2_CNFG_H
@@ -606,7 +607,7 @@ typedef struct _MPI2_CONFIG_REPLY {
typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
- U8 ChipName[16]; /*0x04 */
+ U8 ChipName[16] __nonstring; /*0x04 */
U8 ChipRevision[8]; /*0x14 */
U8 BoardName[16]; /*0x1C */
U8 BoardAssembly[16]; /*0x2C */
@@ -1121,6 +1122,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_32_0_GBPS (0x04)
/*defines for IO Unit Page 7 ProcessorState field */
#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
@@ -2301,6 +2303,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
/*values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI2_SASIOUNIT1_ACONTROL_PROD_SPECIFIC_1 (0x8000)
#define MPI2_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100)
#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
@@ -3591,6 +3594,7 @@ typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
#define MPI26_PCIE_NEG_LINK_RATE_5_0 (0x03)
#define MPI26_PCIE_NEG_LINK_RATE_8_0 (0x04)
#define MPI26_PCIE_NEG_LINK_RATE_16_0 (0x05)
+#define MPI26_PCIE_NEG_LINK_RATE_32_0 (0x06)
/****************************************************************************
@@ -3700,6 +3704,7 @@ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
#define MPI26_PCIEIOUNIT1_MAX_RATE_5_0 (0x30)
#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40)
#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_32_0 (0x60)
/*values for PCIe IO Unit Page 1 DMDReportPCIe */
#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_MASK (0x80)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index d92852591134..1a279c6e1a9f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -179,6 +179,7 @@
* Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED
* Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP
* Added MPI2_FW_UPLOAD_ITYPE_COREDUMP
+ * 9-13-24 02.00.39 Added MPI26_MCTP_PASSTHROUGH messages
* --------------------------------------------------------------------------
*/
@@ -382,6 +383,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
/*ProductID field uses MPI2_FW_HEADER_PID_ */
/*IOCCapabilities */
+#define MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU (0x00800000)
#define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED (0x00200000)
#define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000)
#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000)
@@ -1798,5 +1800,57 @@ typedef struct _MPI26_IOUNIT_CONTROL_REPLY {
Mpi26IoUnitControlReply_t,
*pMpi26IoUnitControlReply_t;
+/****************************************************************************
+ * MCTP Passthrough messages (MPI v2.6 and later only.)
+ ****************************************************************************/
+
+/* MCTP Passthrough Request Message */
+typedef struct _MPI26_MCTP_PASSTHROUGH_REQUEST {
+ U8 MsgContext; /* 0x00 */
+ U8 Reserved1[2]; /* 0x01 */
+ U8 Function; /* 0x03 */
+ U8 Reserved2[3]; /* 0x04 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U32 Reserved4; /* 0x0C */
+ U8 Flags; /* 0x10 */
+ U8 Reserved5[3]; /* 0x11 */
+ U32 Reserved6; /* 0x14 */
+ U32 H2DLength; /* 0x18 */
+ U32 D2HLength; /* 0x1C */
+ MPI25_SGE_IO_UNION H2DSGL; /* 0x20 */
+ MPI25_SGE_IO_UNION D2HSGL; /* 0x30 */
+} MPI26_MCTP_PASSTHROUGH_REQUEST,
+ *PTR_MPI26_MCTP_PASSTHROUGH_REQUEST,
+ Mpi26MctpPassthroughRequest_t,
+ *pMpi26MctpPassthroughRequest_t;
+
+/* values for the MsgContext field */
+#define MPI26_MCTP_MSG_CONEXT_UNUSED (0x00)
+
+/* values for the Flags field */
+#define MPI26_MCTP_FLAGS_MSG_FORMAT_MPT (0x01)
+
+/* MCTP Passthrough Reply Message */
+typedef struct _MPI26_MCTP_PASSTHROUGH_REPLY {
+ U8 MsgContext; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Reserved2[3]; /* 0x04 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 ResponseDataLength; /* 0x14 */
+} MPI26_MCTP_PASSTHROUGH_REPLY,
+ *PTR_MPI26_MCTP_PASSTHROUGH_REPLY,
+ Mpi26MctpPassthroughReply_t,
+ *pMpi26MctpPassthroughReply_t;
#endif
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index dc43cfa83088..bd3efa5b46c7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1202,6 +1202,11 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
ioc->sge_size;
func_str = "nvme_encapsulated";
break;
+ case MPI2_FUNCTION_MCTP_PASSTHROUGH:
+ frame_sz = sizeof(Mpi26MctpPassthroughRequest_t) +
+ ioc->sge_size;
+ func_str = "mctp_passthru";
+ break;
default:
frame_sz = 32;
func_str = "unknown";
@@ -4874,6 +4879,12 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
i++;
}
+ if (ioc->facts.IOCCapabilities &
+ MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) {
+ pr_cont("%sMCTP Passthru", i ? "," : "");
+ i++;
+ }
+
iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
pr_cont("%sNCQ", i ? "," : "");
@@ -8018,7 +8029,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
mutex_lock(&ioc->hostdiag_unlock_mutex);
if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic))
- goto out;
+ goto unlock;
hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
@@ -8038,7 +8049,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc,
"Invalid host diagnostic register value\n");
_base_dump_reg_set(ioc);
- goto out;
+ goto unlock;
}
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
break;
@@ -8074,17 +8085,19 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
__func__, ioc_state);
_base_dump_reg_set(ioc);
- goto out;
+ goto fail;
}
pci_cfg_access_unlock(ioc->pdev);
ioc_info(ioc, "diag reset: SUCCESS\n");
return 0;
- out:
+unlock:
+ mutex_unlock(&ioc->hostdiag_unlock_mutex);
+
+fail:
pci_cfg_access_unlock(ioc->pdev);
ioc_err(ioc, "diag reset: FAILED\n");
- mutex_unlock(&ioc->hostdiag_unlock_mutex);
return -EFAULT;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index d8d1a64b4764..939141cde3ca 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "51.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 51
+#define MPT3SAS_DRIVER_VERSION "52.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 52
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 00
#define MPT3SAS_RELEASE_VERSION 00
@@ -1858,9 +1858,6 @@ int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
int mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page);
-int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
- u16 sz);
int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply,
struct Mpi2ManufacturingPage10_t *config_page);
@@ -1887,9 +1884,6 @@ int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page,
u32 form, u32 handle);
-int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
- u32 form, u32 handle);
int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page,
u32 form, u32 handle);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 2e88f456fc34..45ac853e1289 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -577,44 +577,6 @@ mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7
- * @ioc: per adapter object
- * @mpi_reply: reply mf payload returned from firmware
- * @config_page: contents of the config page
- * @sz: size of buffer passed in config_page
- * Context: sleep.
- *
- * Return: 0 for success, non-zero for failure.
- */
-int
-mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
- u16 sz)
-{
- Mpi2ConfigRequest_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
- mpi_request.Function = MPI2_FUNCTION_CONFIG;
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
- mpi_request.Header.PageNumber = 7;
- mpi_request.Header.PageVersion = MPI2_MANUFACTURING7_PAGEVERSION;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
-
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sz);
- out:
- return r;
-}
-
-/**
* mpt3sas_config_get_manufacturing_pg10 - obtain manufacturing page 10
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
@@ -1214,47 +1176,6 @@ mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_config_get_sas_device_pg1 - obtain sas device page 1
- * @ioc: per adapter object
- * @mpi_reply: reply mf payload returned from firmware
- * @config_page: contents of the config page
- * @form: GET_NEXT_HANDLE or HANDLE
- * @handle: device handle
- * Context: sleep.
- *
- * Return: 0 for success, non-zero for failure.
- */
-int
-mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
- u32 form, u32 handle)
-{
- Mpi2ConfigRequest_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
- mpi_request.Function = MPI2_FUNCTION_CONFIG;
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
- mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION;
- mpi_request.Header.PageNumber = 1;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
-
- mpi_request.PageAddress = cpu_to_le32(form | handle);
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
- out:
- return r;
-}
-
-/**
* mpt3sas_config_get_pcie_device_pg0 - obtain pcie device page 0
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 87784c96249a..063b10dd8251 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -186,6 +186,9 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
case MPI2_FUNCTION_NVME_ENCAPSULATED:
desc = "nvme_encapsulated";
break;
+ case MPI2_FUNCTION_MCTP_PASSTHROUGH:
+ desc = "mctp_passthrough";
+ break;
}
if (!desc)
@@ -653,6 +656,40 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
}
/**
+ * _ctl_send_mctp_passthru_req - Send an MCTP passthru request
+ * @ioc: per adapter object
+ * @mctp_passthru_req: MPI mctp passhthru request from caller
+ * @psge: pointer to the H2DSGL
+ * @data_out_dma: DMA buffer for H2D SGL
+ * @data_out_sz: H2D length
+ * @data_in_dma: DMA buffer for D2H SGL
+ * @data_in_sz: D2H length
+ * @smid: SMID to submit the request
+ *
+ */
+static void
+_ctl_send_mctp_passthru_req(
+ struct MPT3SAS_ADAPTER *ioc,
+ Mpi26MctpPassthroughRequest_t *mctp_passthru_req, void *psge,
+ dma_addr_t data_out_dma, int data_out_sz,
+ dma_addr_t data_in_dma, int data_in_sz,
+ u16 smid)
+{
+ mctp_passthru_req->H2DLength = data_out_sz;
+ mctp_passthru_req->D2HLength = data_in_sz;
+
+ /* Build the H2D SGL from the data out buffer */
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 0, 0);
+
+ psge += ioc->sge_size_ieee;
+
+ /* Build the D2H SGL for the data in buffer */
+ ioc->build_sg(ioc, psge, 0, 0, data_in_dma, data_in_sz);
+
+ ioc->put_smid_default(ioc, smid);
+}
+
+/**
* _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
* @ioc: per adapter object
* @karg: (struct mpt3_ioctl_command)
@@ -679,6 +716,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
size_t data_in_sz = 0;
long ret;
u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+ int tm_ret;
issue_reset = 0;
@@ -792,6 +830,23 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
init_completion(&ioc->ctl_cmds.done);
switch (mpi_request->Function) {
+ case MPI2_FUNCTION_MCTP_PASSTHROUGH:
+ {
+ Mpi26MctpPassthroughRequest_t *mctp_passthru_req =
+ (Mpi26MctpPassthroughRequest_t *)request;
+
+ if (!(ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU)) {
+ ioc_err(ioc, "%s: MCTP Passthrough request not supported\n",
+ __func__);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma,
+ data_out_sz, data_in_dma, data_in_sz, smid);
+ break;
+ }
case MPI2_FUNCTION_NVME_ENCAPSULATED:
{
nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
@@ -1120,18 +1175,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
if (pcie_device && (!ioc->tm_custom_handling) &&
(!(mpt3sas_scsih_is_pcie_scsi_device(
pcie_device->device_info))))
- mpt3sas_scsih_issue_locked_tm(ioc,
+ tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
0, 0, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, pcie_device->reset_timeout,
MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
else
- mpt3sas_scsih_issue_locked_tm(ioc,
+ tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
0, 0, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
+
+ if (tm_ret != SUCCESS) {
+ ioc_info(ioc,
+ "target reset failed, issue hard reset: handle (0x%04x)\n",
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ }
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
}
@@ -1200,6 +1262,8 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
}
karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+ karg.driver_capability |= MPT3_IOCTL_IOCINFO_DRIVER_CAP_MCTP_PASSTHRU;
+
if (copy_to_user(arg, &karg, sizeof(karg))) {
pr_err("failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -2787,6 +2851,217 @@ out_unlock_pciaccess:
}
/**
+ * _ctl_get_mpt_mctp_passthru_adapter - Traverse the IOC list and return the IOC at
+ * dev_index positionthat support MCTP passhtru
+ * @dev_index: position in the mpt3sas_ioc_list to search for
+ * Return pointer to the IOC on success
+ * NULL if device not found error
+ */
+static struct MPT3SAS_ADAPTER *
+_ctl_get_mpt_mctp_passthru_adapter(int dev_index)
+{
+ struct MPT3SAS_ADAPTER *ioc = NULL;
+ int count = 0;
+
+ spin_lock(&gioc_lock);
+ /* Traverse ioc list and return number of IOC that support MCTP passthru */
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+ if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) {
+ if (count == dev_index) {
+ spin_unlock(&gioc_lock);
+ return 0;
+ }
+ }
+ }
+ spin_unlock(&gioc_lock);
+
+ return NULL;
+}
+
+/**
+ * mpt3sas_get_device_count - Retrieve the count of MCTP passthrough
+ * capable devices managed by the driver.
+ *
+ * Returns number of devices that support MCTP passthrough.
+ */
+int
+mpt3sas_get_device_count(void)
+{
+ int count = 0;
+ struct MPT3SAS_ADAPTER *ioc = NULL;
+
+ spin_lock(&gioc_lock);
+ /* Traverse ioc list and return number of IOC that support MCTP passthru */
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
+ if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU)
+ count++;
+
+ spin_unlock(&gioc_lock);
+
+ return count;
+}
+EXPORT_SYMBOL(mpt3sas_get_device_count);
+
+/**
+ * mpt3sas_send_passthru_cmd - Send an MPI MCTP passthrough command to
+ * firmware
+ * @command: The MPI MCTP passthrough command to send to firmware
+ *
+ * Returns 0 on success, anything else is error.
+ */
+int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ MPI2RequestHeader_t *mpi_request = NULL, *request;
+ MPI2DefaultReply_t *mpi_reply;
+ Mpi26MctpPassthroughRequest_t *mctp_passthru_req;
+ u16 smid;
+ unsigned long timeout;
+ u8 issue_reset = 0;
+ u32 sz;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma = 0;
+ size_t data_out_sz = 0;
+ void *data_in = NULL;
+ dma_addr_t data_in_dma = 0;
+ size_t data_in_sz = 0;
+ long ret;
+
+ /* Retrieve ioc from dev_index */
+ ioc = _ctl_get_mpt_mctp_passthru_adapter(command->dev_index);
+ if (!ioc)
+ return -ENODEV;
+
+ mutex_lock(&ioc->pci_access_mutex);
+ if (ioc->shost_recovery ||
+ ioc->pci_error_recovery || ioc->is_driver_loading ||
+ ioc->remove_host) {
+ ret = -EAGAIN;
+ goto unlock_pci_access;
+ }
+
+ /* Lock the ctl_cmds mutex to ensure a single ctl cmd is pending */
+ if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
+ ret = -ERESTARTSYS;
+ goto unlock_pci_access;
+ }
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
+ ret = -EAGAIN;
+ goto unlock_ctl_cmds;
+ }
+
+ ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+ if (ret)
+ goto unlock_ctl_cmds;
+
+ mpi_request = (MPI2RequestHeader_t *)command->mpi_request;
+ if (mpi_request->Function != MPI2_FUNCTION_MCTP_PASSTHROUGH) {
+ ioc_err(ioc, "%s: Invalid request received, Function 0x%x\n",
+ __func__, mpi_request->Function);
+ ret = -EINVAL;
+ goto unlock_ctl_cmds;
+ }
+
+ /* Use first reserved smid for passthrough commands */
+ smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
+ ret = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(request, 0, ioc->request_sz);
+ memcpy(request, command->mpi_request, sizeof(Mpi26MctpPassthroughRequest_t));
+ ioc->ctl_cmds.smid = smid;
+ data_out_sz = command->data_out_size;
+ data_in_sz = command->data_in_size;
+
+ /* obtain dma-able memory for data transfer */
+ if (data_out_sz) /* WRITE */ {
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
+ &data_out_dma, GFP_ATOMIC);
+ if (!data_out) {
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ memcpy(data_out, command->data_out_buf_ptr, data_out_sz);
+
+ }
+
+ if (data_in_sz) /* READ */ {
+ data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
+ &data_in_dma, GFP_ATOMIC);
+ if (!data_in) {
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ psge = &((Mpi26MctpPassthroughRequest_t *)request)->H2DSGL;
+
+ init_completion(&ioc->ctl_cmds.done);
+
+ mctp_passthru_req = (Mpi26MctpPassthroughRequest_t *)request;
+
+ _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma,
+ data_out_sz, data_in_dma, data_in_sz, smid);
+
+ timeout = command->timeout;
+ if (timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
+ timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
+
+ wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi26MctpPassthroughRequest_t) / 4, issue_reset);
+ goto issue_host_reset;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+
+ /* copy out xdata to user */
+ if (data_in_sz)
+ memcpy(command->data_in_buf_ptr, data_in, data_in_sz);
+
+ /* copy out reply message frame to user */
+ if (command->max_reply_bytes) {
+ sz = min_t(u32, command->max_reply_bytes, ioc->reply_sz);
+ memcpy(command->reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz);
+ }
+
+issue_host_reset:
+ if (issue_reset) {
+ ret = -ENODATA;
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ }
+
+out:
+ /* free memory associated with sg buffers */
+ if (data_in)
+ dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
+ data_in_dma);
+
+ if (data_out)
+ dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
+ data_out_dma);
+
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+
+unlock_ctl_cmds:
+ mutex_unlock(&ioc->ctl_cmds.mutex);
+
+unlock_pci_access:
+ mutex_unlock(&ioc->pci_access_mutex);
+ return ret;
+
+}
+EXPORT_SYMBOL(mpt3sas_send_mctp_passthru_req);
+
+/**
* _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
* @file: (struct file)
* @cmd: ioctl opcode
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 171709e91006..483e0549c02f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -160,6 +160,9 @@ struct mpt3_ioctl_pci_info {
#define MPT3_IOCTL_INTERFACE_SAS35 (0x07)
#define MPT2_IOCTL_VERSION_LENGTH (32)
+/* Bits set for mpt3_ioctl_iocinfo.driver_cap */
+#define MPT3_IOCTL_IOCINFO_DRIVER_CAP_MCTP_PASSTHRU 0x1
+
/**
* struct mpt3_ioctl_iocinfo - generic controller info
* @hdr - generic header
@@ -175,6 +178,7 @@ struct mpt3_ioctl_pci_info {
* @driver_version - driver version - 32 ASCII characters
* @rsvd1 - reserved
* @scsi_id - scsi id of adapter 0
+ * @driver_capability - driver capabilities
* @rsvd2 - reserved
* @pci_information - pci info (2nd revision)
*/
@@ -192,7 +196,8 @@ struct mpt3_ioctl_iocinfo {
uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH];
uint8_t rsvd1;
uint8_t scsi_id;
- uint16_t rsvd2;
+ uint8_t driver_capability;
+ uint8_t rsvd2;
struct mpt3_ioctl_pci_info pci_information;
};
@@ -458,4 +463,46 @@ struct mpt3_enable_diag_sbr_reload {
struct mpt3_ioctl_header hdr;
};
+/**
+ * struct mpt3_passthru_command - generic mpt firmware passthru command
+ * @dev_index - device index
+ * @timeout - command timeout in seconds. (if zero then use driver default
+ * value).
+ * @reply_frame_buf_ptr - MPI reply location
+ * @data_in_buf_ptr - destination for read
+ * @data_out_buf_ptr - data source for write
+ * @max_reply_bytes - maximum number of reply bytes to be sent to app.
+ * @data_in_size - number bytes for data transfer in (read)
+ * @data_out_size - number bytes for data transfer out (write)
+ * @mpi_request - request frame
+ */
+struct mpt3_passthru_command {
+ u8 dev_index;
+ uint32_t timeout;
+ void *reply_frame_buf_ptr;
+ void *data_in_buf_ptr;
+ void *data_out_buf_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ Mpi26MctpPassthroughRequest_t *mpi_request;
+};
+
+/*
+ * mpt3sas_get_device_count - Retrieve the count of MCTP passthrough
+ * capable devices managed by the driver.
+ *
+ * Returns number of devices that support MCTP passthrough.
+ */
+int mpt3sas_get_device_count(void);
+
+/*
+ * mpt3sas_send_passthru_cmd - Send an MPI MCTP passthrough command to
+ * firmware
+ * @command: The MPI MCTP passthrough command to send to firmware
+ *
+ * Returns 0 on success, anything else is error .
+ */
+int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command);
+
#endif /* MPT3SAS_CTL_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index a456e5ec74d8..508861e88d9f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2703,7 +2703,7 @@ scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
ssp_target = 1;
if (sas_device->device_info &
MPI2_SAS_DEVICE_INFO_SEP) {
- sdev_printk(KERN_WARNING, sdev,
+ sdev_printk(KERN_INFO, sdev,
"set ignore_delay_remove for handle(0x%04x)\n",
sas_device_priv_data->sas_target->handle);
sas_device_priv_data->ignore_delay_remove = 1;
@@ -12710,7 +12710,7 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
-static struct pci_error_handlers _mpt3sas_err_handler = {
+static const struct pci_error_handlers _mpt3sas_err_handler = {
.error_detected = scsih_pci_error_detected,
.mmio_enabled = scsih_pci_mmio_enabled,
.slot_reset = scsih_pci_slot_reset,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index d84413b77d84..dc74ebc6405a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -328,10 +328,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 1444b1f1c4c8..c4592de4fefc 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -151,16 +151,6 @@ static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
}
-void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
-{
- u32 no;
- for_each_phy(phy_mask, phy_mask, no) {
- if (!(phy_mask & 1))
- continue;
- MVS_CHIP_DISP->phy_reset(mvi, no, hard);
- }
-}
-
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata)
{
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 19b01f7c4767..09ce3f2241f2 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -425,7 +425,6 @@ struct mvs_task_exec_info {
void mvs_get_sas_addr(void *buf, u32 buflen);
void mvs_iounmap(void __iomem *regs);
int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
-void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index c9539897048a..e87885cc701c 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2876,7 +2876,7 @@ MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
static enum cpuhp_state qedi_cpuhp_state;
-static struct pci_error_handlers qedi_err_handler = {
+static const struct pci_error_handlers qedi_err_handler = {
.error_detected = qedi_io_error_detected,
};
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 4f63aff333db..3a2bd953a976 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -282,8 +282,8 @@ struct register_host_info {
#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
struct config_info_data {
- uint8_t model_num[16];
- uint8_t model_description[80];
+ uint8_t model_num[16] __nonstring;
+ uint8_t model_description[80] __nonstring;
uint8_t reserved0[160];
uint8_t symbolic_name[64];
uint8_t serial_num[32];
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 6d16546e1729..9e7a407ba1b9 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2136,8 +2136,8 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
* @flash_id: Flash ID
*
* This function polls the device until bit 7 of what is read matches data
- * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed
- * out (a fatal error). The flash book recommeds reading bit 7 again after
+ * bit 7 or until data bit 5 becomes a 1. If that happens, the flash ROM timed
+ * out (a fatal error). The flash book recommends reading bit 7 again after
* reading bit 5 as a 1.
*
* Returns 0 on success, else non-zero.
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a77e0499b738..53daf923ad8e 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -510,22 +510,34 @@ void scsi_attach_vpd(struct scsi_device *sdev)
return;
for (i = 4; i < vpd_buf->len; i++) {
- if (vpd_buf->data[i] == 0x0)
+ switch (vpd_buf->data[i]) {
+ case 0x0:
scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0);
- if (vpd_buf->data[i] == 0x80)
+ break;
+ case 0x80:
scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
- if (vpd_buf->data[i] == 0x83)
+ break;
+ case 0x83:
scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
- if (vpd_buf->data[i] == 0x89)
+ break;
+ case 0x89:
scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
- if (vpd_buf->data[i] == 0xb0)
+ break;
+ case 0xb0:
scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0);
- if (vpd_buf->data[i] == 0xb1)
+ break;
+ case 0xb1:
scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
- if (vpd_buf->data[i] == 0xb2)
+ break;
+ case 0xb2:
scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
- if (vpd_buf->data[i] == 0xb7)
+ break;
+ case 0xb7:
scsi_update_vpd_page(sdev, 0xb7, &sdev->vpd_pgb7);
+ break;
+ default:
+ break;
+ }
}
kfree(vpd_buf);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 5ceaa4665e5d..f0eec4708ddd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -71,6 +71,10 @@ static const char *sdebug_version_date = "20210520";
#define NO_ADDITIONAL_SENSE 0x0
#define OVERLAP_ATOMIC_COMMAND_ASC 0x0
#define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
+#define FILEMARK_DETECTED_ASCQ 0x1
+#define EOP_EOM_DETECTED_ASCQ 0x2
+#define BEGINNING_OF_P_M_DETECTED_ASCQ 0x4
+#define EOD_DETECTED_ASCQ 0x5
#define LOGICAL_UNIT_NOT_READY 0x4
#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
#define UNRECOVERED_READ_ERR 0x11
@@ -80,8 +84,10 @@ static const char *sdebug_version_date = "20210520";
#define INVALID_FIELD_IN_CDB 0x24
#define INVALID_FIELD_IN_PARAM_LIST 0x26
#define WRITE_PROTECTED 0x27
+#define UA_READY_ASC 0x28
#define UA_RESET_ASC 0x29
#define UA_CHANGED_ASC 0x2a
+#define TOO_MANY_IN_PARTITION_ASC 0x3b
#define TARGET_CHANGED_ASC 0x3f
#define LUNS_CHANGED_ASCQ 0x0e
#define INSUFF_RES_ASC 0x55
@@ -173,6 +179,37 @@ static const char *sdebug_version_date = "20210520";
#define DEF_ZBC_MAX_OPEN_ZONES 8
#define DEF_ZBC_NR_CONV_ZONES 1
+/* Default parameters for tape drives */
+#define TAPE_DEF_DENSITY 0x0
+#define TAPE_BAD_DENSITY 0x65
+#define TAPE_DEF_BLKSIZE 0
+#define TAPE_MIN_BLKSIZE 512
+#define TAPE_MAX_BLKSIZE 1048576
+#define TAPE_EW 20
+#define TAPE_MAX_PARTITIONS 2
+#define TAPE_UNITS 10000
+#define TAPE_PARTITION_1_UNITS 1000
+
+/* The tape block data definitions */
+#define TAPE_BLOCK_FM_FLAG ((u32)0x1 << 30)
+#define TAPE_BLOCK_EOD_FLAG ((u32)0x2 << 30)
+#define TAPE_BLOCK_MARK_MASK ((u32)0x3 << 30)
+#define TAPE_BLOCK_SIZE_MASK (~TAPE_BLOCK_MARK_MASK)
+#define TAPE_BLOCK_MARK(a) (a & TAPE_BLOCK_MARK_MASK)
+#define TAPE_BLOCK_SIZE(a) (a & TAPE_BLOCK_SIZE_MASK)
+#define IS_TAPE_BLOCK_FM(a) ((a & TAPE_BLOCK_FM_FLAG) != 0)
+#define IS_TAPE_BLOCK_EOD(a) ((a & TAPE_BLOCK_EOD_FLAG) != 0)
+
+struct tape_block {
+ u32 fl_size;
+ unsigned char data[4];
+};
+
+/* Flags for sense data */
+#define SENSE_FLAG_FILEMARK 0x80
+#define SENSE_FLAG_EOM 0x40
+#define SENSE_FLAG_ILI 0x20
+
#define SDEBUG_LUN_0_VAL 0
/* bit mask values for sdebug_opts */
@@ -216,7 +253,8 @@ static const char *sdebug_version_date = "20210520";
#define SDEBUG_UA_LUNS_CHANGED 5
#define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
-#define SDEBUG_NUM_UAS 8
+#define SDEBUG_UA_NOT_READY_TO_READY 8
+#define SDEBUG_NUM_UAS 9
/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
* sector on read commands: */
@@ -262,11 +300,6 @@ static const char *sdebug_version_date = "20210520";
#define SDEB_XA_NOT_IN_USE XA_MARK_1
-static struct kmem_cache *queued_cmd_cache;
-
-#define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
-#define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
-
/* Zone types (zbcr05 table 25) */
enum sdebug_z_type {
ZBC_ZTYPE_CNV = 0x1,
@@ -363,6 +396,19 @@ struct sdebug_dev_info {
ktime_t create_ts; /* time since bootup that this device was created */
struct sdeb_zone_state *zstate;
+ /* For tapes */
+ unsigned int tape_blksize;
+ unsigned int tape_density;
+ unsigned char tape_partition;
+ unsigned char tape_nbr_partitions;
+ unsigned char tape_pending_nbr_partitions;
+ unsigned int tape_pending_part_0_size;
+ unsigned int tape_pending_part_1_size;
+ unsigned char tape_dce;
+ unsigned int tape_location[TAPE_MAX_PARTITIONS];
+ unsigned int tape_eop[TAPE_MAX_PARTITIONS];
+ struct tape_block *tape_blocks[TAPE_MAX_PARTITIONS];
+
struct dentry *debugfs_entry;
struct spinlock list_lock;
struct list_head inject_err_list;
@@ -409,24 +455,9 @@ struct sdebug_defer {
enum sdeb_defer_type defer_t;
};
-struct sdebug_device_access_info {
- bool atomic_write;
- u64 lba;
- u32 num;
- struct scsi_cmnd *self;
-};
-
-struct sdebug_queued_cmd {
- /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
- * instance indicates this slot is in use.
- */
- struct sdebug_defer sd_dp;
- struct scsi_cmnd *scmd;
- struct sdebug_device_access_info *i;
-};
-
struct sdebug_scsi_cmd {
spinlock_t lock;
+ struct sdebug_defer sd_dp;
};
static atomic_t sdebug_cmnd_count; /* number of incoming commands */
@@ -483,22 +514,27 @@ enum sdeb_opcode_index {
SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
SDEB_I_ATOMIC_WRITE_16 = 32,
- SDEB_I_LAST_ELEM_P1 = 33, /* keep this last (previous + 1) */
+ SDEB_I_READ_BLOCK_LIMITS = 33,
+ SDEB_I_LOCATE = 34,
+ SDEB_I_WRITE_FILEMARKS = 35,
+ SDEB_I_SPACE = 36,
+ SDEB_I_FORMAT_MEDIUM = 37,
+ SDEB_I_LAST_ELEM_P1 = 38, /* keep this last (previous + 1) */
};
static const unsigned char opcode_ind_arr[256] = {
/* 0x0; 0x0->0x1f: 6 byte cdbs */
SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
- 0, 0, 0, 0,
+ SDEB_I_FORMAT_MEDIUM, SDEB_I_READ_BLOCK_LIMITS, 0, 0,
SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
- 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
- SDEB_I_RELEASE,
+ SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
+ SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
SDEB_I_ALLOW_REMOVAL, 0,
/* 0x20; 0x20->0x3f: 10 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
- SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
+ SDEB_I_READ, 0, SDEB_I_WRITE, SDEB_I_LOCATE, 0, 0, 0, SDEB_I_VERIFY,
0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
/* 0x40; 0x40->0x5f: 10 byte cdbs */
@@ -573,6 +609,12 @@ static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
static int sdebug_do_add_host(bool mk_new_store);
static int sdebug_add_host_helper(int per_host_idx);
@@ -581,8 +623,6 @@ static int sdebug_add_store(void);
static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
static void sdebug_erase_all_stores(bool apart_from_first);
-static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
-
/*
* The following are overflow arrays for cdbs that "hit" the same index in
* the opcode_info_arr array. The most time sensitive (or commonly used) cdb
@@ -773,7 +813,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
/* 20 */
{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
{6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
+ {0, 0x1, 0, 0, resp_rewind, NULL,
{6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -800,6 +840,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
resp_pre_fetch, pre_fetch_iarr,
{10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* PRE-FETCH (10) */
+ /* READ POSITION (10) */
/* 30 */
{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
@@ -810,11 +851,23 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
{16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
-/* 31 */
+/* 32 */
{0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO,
resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
{16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
+ {0, 0x05, 0, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */
+ {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x2b, 0, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */
+ {10, 0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} },
+ {0, 0x10, 0, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */
+ {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x11, 0, F_D_IN, resp_space, NULL, /* SPACE (6) */
+ {6, 0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x4, 0, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */
+ {6, 0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+/* 38 */
/* sentinel */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -1331,6 +1384,30 @@ static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
my_name, key, asc, asq);
}
+/* Sense data that has information fields for tapes */
+static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq,
+ unsigned int information, unsigned char tape_flags)
+{
+ if (!scp->sense_buffer) {
+ sdev_printk(KERN_ERR, scp->device,
+ "%s: sense_buffer is NULL\n", __func__);
+ return;
+ }
+ memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+
+ scsi_build_sense(scp, /* sdebug_dsense */ 0, key, asc, asq);
+ /* only fixed format so far */
+
+ scp->sense_buffer[0] |= 0x80; /* valid */
+ scp->sense_buffer[2] |= tape_flags;
+ put_unaligned_be32(information, &scp->sense_buffer[3]);
+
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
+ my_name, key, asc, asq);
+}
+
static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
{
mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
@@ -1493,6 +1570,12 @@ static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (sdebug_verbose)
cp = "reported luns data has changed";
break;
+ case SDEBUG_UA_NOT_READY_TO_READY:
+ mk_sense_buffer(scp, UNIT_ATTENTION, UA_READY_ASC,
+ 0);
+ if (sdebug_verbose)
+ cp = "not ready to ready transition/media change";
+ break;
default:
pr_warn("unexpected unit attention code=%d\n", k);
if (sdebug_verbose)
@@ -2196,6 +2279,14 @@ static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
changing = (stopped_state != want_stop);
if (changing)
atomic_xchg(&devip->stopped, want_stop);
+ if (sdebug_ptype == TYPE_TAPE && !want_stop) {
+ int i;
+
+ set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
+ for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+ devip->tape_partition = 0;
+ }
if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
return SDEG_RES_IMMED_MASK;
else
@@ -2728,6 +2819,76 @@ static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
return sizeof(sas_sha_m_pg);
}
+static unsigned char partition_pg[] = {0x11, 12, 1, 0, 0x24, 3, 9, 0,
+ 0xff, 0xff, 0x00, 0x00};
+
+static int resp_partition_m_pg(unsigned char *p, int pcontrol, int target)
+{ /* Partition page for mode_sense (tape) */
+ memcpy(p, partition_pg, sizeof(partition_pg));
+ if (pcontrol == 1)
+ memset(p + 2, 0, sizeof(partition_pg) - 2);
+ return sizeof(partition_pg);
+}
+
+static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
+ unsigned char *new, int pg_len)
+{
+ int new_nbr, p0_size, p1_size;
+
+ if ((new[4] & 0x80) != 0) { /* FDP */
+ partition_pg[4] |= 0x80;
+ devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS;
+ devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS;
+ devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS;
+ } else {
+ new_nbr = new[3] + 1;
+ if (new_nbr > TAPE_MAX_PARTITIONS)
+ return 3;
+ if ((new[4] & 0x40) != 0) { /* SDP */
+ p1_size = TAPE_PARTITION_1_UNITS;
+ p0_size = TAPE_UNITS - p1_size;
+ if (p0_size < 100)
+ return 4;
+ } else if ((new[4] & 0x20) != 0) {
+ if (new_nbr > 1) {
+ p0_size = get_unaligned_be16(new + 8);
+ p1_size = get_unaligned_be16(new + 10);
+ if (p1_size == 0xFFFF)
+ p1_size = TAPE_UNITS - p0_size;
+ else if (p0_size == 0xFFFF)
+ p0_size = TAPE_UNITS - p1_size;
+ if (p0_size < 100 || p1_size < 100)
+ return 8;
+ } else {
+ p0_size = TAPE_UNITS;
+ p1_size = 0;
+ }
+ } else
+ return 6;
+ devip->tape_pending_nbr_partitions = new_nbr;
+ devip->tape_pending_part_0_size = p0_size;
+ devip->tape_pending_part_1_size = p1_size;
+ partition_pg[3] = new_nbr;
+ devip->tape_pending_nbr_partitions = new_nbr;
+ }
+
+ return 0;
+}
+
+static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
+ unsigned char dce)
+{ /* Compression page for mode_sense (tape) */
+ unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 00, 00};
+
+ memcpy(p, compression_pg, sizeof(compression_pg));
+ if (dce)
+ p[2] |= 0x80;
+ if (pcontrol == 1)
+ memset(p + 2, 0, sizeof(compression_pg) - 2);
+ return sizeof(compression_pg);
+}
+
/* PAGE_SIZE is more than necessary but provides room for future expansion. */
#define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
@@ -2742,7 +2903,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
unsigned char *ap;
unsigned char *arr __free(kfree);
unsigned char *cmd = scp->cmnd;
- bool dbd, llbaa, msense_6, is_disk, is_zbc;
+ bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
if (!arr)
@@ -2755,7 +2916,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
is_disk = (sdebug_ptype == TYPE_DISK);
is_zbc = devip->zoned;
- if ((is_disk || is_zbc) && !dbd)
+ is_tape = (sdebug_ptype == TYPE_TAPE);
+ if ((is_disk || is_zbc || is_tape) && !dbd)
bd_len = llbaa ? 16 : 8;
else
bd_len = 0;
@@ -2793,15 +2955,25 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
put_unaligned_be32(0xffffffff, ap + 0);
else
put_unaligned_be32(sdebug_capacity, ap + 0);
- put_unaligned_be16(sdebug_sector_size, ap + 6);
+ if (is_tape) {
+ ap[0] = devip->tape_density;
+ put_unaligned_be16(devip->tape_blksize, ap + 6);
+ } else
+ put_unaligned_be16(sdebug_sector_size, ap + 6);
offset += bd_len;
ap = arr + offset;
} else if (16 == bd_len) {
+ if (is_tape) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
+ return check_condition_result;
+ }
put_unaligned_be64((u64)sdebug_capacity, ap + 0);
put_unaligned_be32(sdebug_sector_size, ap + 12);
offset += bd_len;
ap = arr + offset;
}
+ if (cmd[2] == 0)
+ goto only_bd; /* Only block descriptor requested */
/*
* N.B. If len>0 before resp_*_pg() call, then form of that call should be:
@@ -2857,6 +3029,18 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
}
offset += len;
break;
+ case 0xf: /* Compression Mode Page (tape) */
+ if (!is_tape)
+ goto bad_pcode;
+ len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce);
+ offset += len;
+ break;
+ case 0x11: /* Partition Mode Page (tape) */
+ if (!is_tape)
+ goto bad_pcode;
+ len = resp_partition_m_pg(ap, pcontrol, target);
+ offset += len;
+ break;
case 0x19: /* if spc==1 then sas phy, control+discover */
if (subpcode > 0x2 && subpcode < 0xff)
goto bad_subpcode;
@@ -2902,6 +3086,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
default:
goto bad_pcode;
}
+only_bd:
if (msense_6)
arr[0] = offset - 1;
else
@@ -2945,8 +3130,34 @@ static int resp_mode_select(struct scsi_cmnd *scp,
__func__, param_len, res);
md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
- off = bd_len + (mselect6 ? 4 : 8);
- if (md_len > 2 || off >= res) {
+ off = (mselect6 ? 4 : 8);
+ if (sdebug_ptype == TYPE_TAPE) {
+ int blksize;
+
+ if (bd_len != 8) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA,
+ mselect6 ? 3 : 6, -1);
+ return check_condition_result;
+ }
+ if (arr[off] == TAPE_BAD_DENSITY) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
+ return check_condition_result;
+ }
+ blksize = get_unaligned_be16(arr + off + 6);
+ if (blksize != 0 &&
+ (blksize < TAPE_MIN_BLKSIZE ||
+ blksize > TAPE_MAX_BLKSIZE ||
+ (blksize % 4) != 0)) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1);
+ return check_condition_result;
+ }
+ devip->tape_density = arr[off];
+ devip->tape_blksize = blksize;
+ }
+ off += bd_len;
+ if (off >= res)
+ return 0; /* No page written, just descriptors */
+ if (md_len > 2) {
mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
return check_condition_result;
}
@@ -2984,6 +3195,25 @@ static int resp_mode_select(struct scsi_cmnd *scp,
goto set_mode_changed_ua;
}
break;
+ case 0xf: /* Compression mode page */
+ if (sdebug_ptype != TYPE_TAPE)
+ goto bad_pcode;
+ if ((arr[off + 2] & 0x40) != 0) {
+ devip->tape_dce = (arr[off + 2] & 0x80) != 0;
+ return 0;
+ }
+ break;
+ case 0x11: /* Medium Partition Mode Page (tape) */
+ if (sdebug_ptype == TYPE_TAPE) {
+ int fld;
+
+ fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
+ if (fld == 0)
+ return 0;
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1);
+ return check_condition_result;
+ }
+ break;
case 0x1c: /* Informational Exceptions Mode page */
if (iec_m_pg[1] == arr[off + 1]) {
memcpy(iec_m_pg + 2, arr + off + 2,
@@ -2999,6 +3229,10 @@ static int resp_mode_select(struct scsi_cmnd *scp,
set_mode_changed_ua:
set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
return 0;
+
+bad_pcode:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
}
static int resp_temp_l_pg(unsigned char *arr)
@@ -3138,6 +3372,265 @@ static int resp_log_sense(struct scsi_cmnd *scp,
min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
}
+enum {SDEBUG_READ_BLOCK_LIMITS_ARR_SZ = 6};
+static int resp_read_blklimits(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char arr[SDEBUG_READ_BLOCK_LIMITS_ARR_SZ];
+
+ arr[0] = 4;
+ put_unaligned_be24(TAPE_MAX_BLKSIZE, arr + 1);
+ put_unaligned_be16(TAPE_MIN_BLKSIZE, arr + 4);
+ return fill_from_dev_buffer(scp, arr, SDEBUG_READ_BLOCK_LIMITS_ARR_SZ);
+}
+
+static int resp_locate(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char *cmd = scp->cmnd;
+ unsigned int i, pos;
+ struct tape_block *blp;
+ int partition;
+
+ if ((cmd[1] & 0x02) != 0) {
+ if (cmd[8] >= devip->tape_nbr_partitions) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
+ return check_condition_result;
+ }
+ devip->tape_partition = cmd[8];
+ }
+ pos = get_unaligned_be32(cmd + 3);
+ partition = devip->tape_partition;
+
+ for (i = 0, blp = devip->tape_blocks[partition];
+ i < pos && i < devip->tape_eop[partition]; i++, blp++)
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size))
+ break;
+ if (i < pos) {
+ devip->tape_location[partition] = i;
+ mk_sense_buffer(scp, BLANK_CHECK, 0x05, 0);
+ return check_condition_result;
+ }
+ devip->tape_location[partition] = pos;
+
+ return 0;
+}
+
+static int resp_write_filemarks(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char *cmd = scp->cmnd;
+ unsigned int i, count, pos;
+ u32 data;
+ int partition = devip->tape_partition;
+
+ if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
+ return check_condition_result;
+ }
+ count = get_unaligned_be24(cmd + 2);
+ data = TAPE_BLOCK_FM_FLAG;
+ for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) {
+ if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */
+ devip->tape_location[partition] = devip->tape_eop[partition] - 1;
+ mk_sense_info_tape(scp, VOLUME_OVERFLOW, NO_ADDITIONAL_SENSE,
+ EOP_EOM_DETECTED_ASCQ, count, SENSE_FLAG_EOM);
+ return check_condition_result;
+ }
+ (devip->tape_blocks[partition] + pos)->fl_size = data;
+ }
+ (devip->tape_blocks[partition] + pos)->fl_size =
+ TAPE_BLOCK_EOD_FLAG;
+ devip->tape_location[partition] = pos;
+
+ return 0;
+}
+
+static int resp_space(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char *cmd = scp->cmnd, code;
+ int i = 0, pos, count;
+ struct tape_block *blp;
+ int partition = devip->tape_partition;
+
+ count = get_unaligned_be24(cmd + 2);
+ if ((count & 0x800000) != 0) /* extend negative to 32-bit count */
+ count |= 0xff000000;
+ code = cmd[1] & 0x0f;
+
+ pos = devip->tape_location[partition];
+ if (code == 0) { /* blocks */
+ if (count < 0) {
+ count = (-count);
+ pos -= 1;
+ for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
+ i++) {
+ if (pos < 0)
+ goto is_bop;
+ else if (IS_TAPE_BLOCK_FM(blp->fl_size))
+ goto is_fm;
+ if (i > 0) {
+ pos--;
+ blp--;
+ }
+ }
+ } else if (count > 0) {
+ for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
+ i++, pos++, blp++) {
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size))
+ goto is_eod;
+ if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
+ pos += 1;
+ goto is_fm;
+ }
+ if (pos >= devip->tape_eop[partition])
+ goto is_eop;
+ }
+ }
+ } else if (code == 1) { /* filemarks */
+ if (count < 0) {
+ count = (-count);
+ if (pos == 0)
+ goto is_bop;
+ else {
+ for (i = 0, blp = devip->tape_blocks[partition] + pos;
+ i < count && pos >= 0; i++, pos--, blp--) {
+ for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
+ pos >= 0; pos--, blp--)
+ ; /* empty */
+ if (pos < 0)
+ goto is_bop;
+ }
+ }
+ pos += 1;
+ } else if (count > 0) {
+ for (i = 0, blp = devip->tape_blocks[partition] + pos;
+ i < count; i++, pos++, blp++) {
+ for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
+ !IS_TAPE_BLOCK_EOD(blp->fl_size) &&
+ pos < devip->tape_eop[partition];
+ pos++, blp++)
+ ; /* empty */
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size))
+ goto is_eod;
+ if (pos >= devip->tape_eop[partition])
+ goto is_eop;
+ }
+ }
+ } else if (code == 3) { /* EOD */
+ for (blp = devip->tape_blocks[partition] + pos;
+ !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition];
+ pos++, blp++)
+ ; /* empty */
+ if (pos >= devip->tape_eop[partition])
+ goto is_eop;
+ } else {
+ /* sequential filemarks not supported */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
+ return check_condition_result;
+ }
+ devip->tape_location[partition] = pos;
+ return 0;
+
+is_fm:
+ devip->tape_location[partition] = pos;
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ FILEMARK_DETECTED_ASCQ, count - i,
+ SENSE_FLAG_FILEMARK);
+ return check_condition_result;
+
+is_eod:
+ devip->tape_location[partition] = pos;
+ mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
+ EOD_DETECTED_ASCQ, count - i,
+ 0);
+ return check_condition_result;
+
+is_bop:
+ devip->tape_location[partition] = 0;
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ BEGINNING_OF_P_M_DETECTED_ASCQ, count - i,
+ SENSE_FLAG_EOM);
+ devip->tape_location[partition] = 0;
+ return check_condition_result;
+
+is_eop:
+ devip->tape_location[partition] = devip->tape_eop[partition] - 1;
+ mk_sense_info_tape(scp, MEDIUM_ERROR, NO_ADDITIONAL_SENSE,
+ EOP_EOM_DETECTED_ASCQ, (unsigned int)i,
+ SENSE_FLAG_EOM);
+ return check_condition_result;
+}
+
+static int resp_rewind(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ devip->tape_location[devip->tape_partition] = 0;
+
+ return 0;
+}
+
+static int partition_tape(struct sdebug_dev_info *devip, int nbr_partitions,
+ int part_0_size, int part_1_size)
+{
+ int i;
+
+ if (part_0_size + part_1_size > TAPE_UNITS)
+ return -1;
+ devip->tape_eop[0] = part_0_size;
+ devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG;
+ devip->tape_eop[1] = part_1_size;
+ devip->tape_blocks[1] = devip->tape_blocks[0] +
+ devip->tape_eop[0];
+ devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG;
+
+ for (i = 0 ; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+
+ devip->tape_nbr_partitions = nbr_partitions;
+ devip->tape_partition = 0;
+
+ partition_pg[3] = nbr_partitions - 1;
+ put_unaligned_be16(devip->tape_eop[0], partition_pg + 8);
+ put_unaligned_be16(devip->tape_eop[1], partition_pg + 10);
+
+ return nbr_partitions;
+}
+
+static int resp_format_medium(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ unsigned char *cmd = scp->cmnd;
+
+ if (sdebug_ptype != TYPE_TAPE) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 0, -1);
+ return check_condition_result;
+ }
+ if (cmd[2] > 2) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
+ return check_condition_result;
+ }
+ if (cmd[2] != 0) {
+ if (devip->tape_pending_nbr_partitions > 0) {
+ res = partition_tape(devip,
+ devip->tape_pending_nbr_partitions,
+ devip->tape_pending_part_0_size,
+ devip->tape_pending_part_1_size);
+ } else
+ res = partition_tape(devip, devip->tape_nbr_partitions,
+ devip->tape_eop[0], devip->tape_eop[1]);
+ } else
+ res = partition_tape(devip, 1, TAPE_UNITS, 0);
+ if (res < 0)
+ return -EINVAL;
+
+ devip->tape_pending_nbr_partitions = -1;
+
+ return 0;
+}
+
static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
{
return devip->nr_zones != 0;
@@ -3871,6 +4364,98 @@ static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
return ret;
}
+static int resp_read_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u32 i, num, transfer, size;
+ u8 *cmd = scp->cmnd;
+ struct scsi_data_buffer *sdb = &scp->sdb;
+ int partition = devip->tape_partition;
+ u32 pos = devip->tape_location[partition];
+ struct tape_block *blp;
+ bool fixed, sili;
+
+ if (cmd[0] != READ_6) { /* Only Read(6) supported */
+ mk_sense_invalid_opcode(scp);
+ return illegal_condition_result;
+ }
+ fixed = (cmd[1] & 0x1) != 0;
+ sili = (cmd[1] & 0x2) != 0;
+ if (fixed && sili) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
+ return check_condition_result;
+ }
+
+ transfer = get_unaligned_be24(cmd + 2);
+ if (fixed) {
+ num = transfer;
+ size = devip->tape_blksize;
+ } else {
+ if (transfer < TAPE_MIN_BLKSIZE ||
+ transfer > TAPE_MAX_BLKSIZE) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
+ return check_condition_result;
+ }
+ num = 1;
+ size = transfer;
+ }
+
+ for (i = 0, blp = devip->tape_blocks[partition] + pos;
+ i < num && pos < devip->tape_eop[partition];
+ i++, pos++, blp++) {
+ devip->tape_location[partition] = pos + 1;
+ if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ FILEMARK_DETECTED_ASCQ, fixed ? num - i : size,
+ SENSE_FLAG_FILEMARK);
+ scsi_set_resid(scp, (num - i) * size);
+ return check_condition_result;
+ }
+ /* Assume no REW */
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size)) {
+ mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
+ EOD_DETECTED_ASCQ, fixed ? num - i : size,
+ 0);
+ devip->tape_location[partition] = pos;
+ scsi_set_resid(scp, (num - i) * size);
+ return check_condition_result;
+ }
+ sg_zero_buffer(sdb->table.sgl, sdb->table.nents,
+ size, i * size);
+ sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
+ &(blp->data), 4, i * size, false);
+ if (fixed) {
+ if (blp->fl_size != devip->tape_blksize) {
+ scsi_set_resid(scp, (num - i) * size);
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ 0, num - i,
+ SENSE_FLAG_ILI);
+ return check_condition_result;
+ }
+ } else {
+ if (blp->fl_size != size) {
+ if (blp->fl_size < size)
+ scsi_set_resid(scp, size - blp->fl_size);
+ if (!sili) {
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ 0, size - blp->fl_size,
+ SENSE_FLAG_ILI);
+ return check_condition_result;
+ }
+ }
+ }
+ }
+ if (pos >= devip->tape_eop[partition]) {
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size,
+ SENSE_FLAG_EOM);
+ devip->tape_location[partition] = pos - 1;
+ return check_condition_result;
+ }
+ devip->tape_location[partition] = pos;
+
+ return 0;
+}
+
static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
@@ -3882,6 +4467,9 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
+ if (sdebug_ptype == TYPE_TAPE)
+ return resp_read_tape(scp, devip);
+
switch (cmd[0]) {
case READ_16:
ei_lba = 0;
@@ -4178,6 +4766,67 @@ static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
}
}
+static int resp_write_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u32 i, num, transfer, size, written = 0;
+ u8 *cmd = scp->cmnd;
+ struct scsi_data_buffer *sdb = &scp->sdb;
+ int partition = devip->tape_partition;
+ int pos = devip->tape_location[partition];
+ struct tape_block *blp;
+ bool fixed, ew;
+
+ if (cmd[0] != WRITE_6) { /* Only Write(6) supported */
+ mk_sense_invalid_opcode(scp);
+ return illegal_condition_result;
+ }
+
+ fixed = (cmd[1] & 1) != 0;
+ transfer = get_unaligned_be24(cmd + 2);
+ if (fixed) {
+ num = transfer;
+ size = devip->tape_blksize;
+ } else {
+ if (transfer < TAPE_MIN_BLKSIZE ||
+ transfer > TAPE_MAX_BLKSIZE) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
+ return check_condition_result;
+ }
+ num = 1;
+ size = transfer;
+ }
+
+ scsi_set_resid(scp, num * transfer);
+ for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false;
+ i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) {
+ blp->fl_size = size;
+ sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
+ &(blp->data), 4, i * size, true);
+ written += size;
+ scsi_set_resid(scp, num * transfer - written);
+ ew |= (pos == devip->tape_eop[partition] - TAPE_EW);
+ }
+
+ devip->tape_location[partition] = pos;
+ blp->fl_size = TAPE_BLOCK_EOD_FLAG;
+ if (pos >= devip->tape_eop[partition] - 1) {
+ mk_sense_info_tape(scp, VOLUME_OVERFLOW,
+ NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
+ fixed ? num - i : transfer,
+ SENSE_FLAG_EOM);
+ return check_condition_result;
+ }
+ if (ew) { /* early warning */
+ mk_sense_info_tape(scp, NO_SENSE,
+ NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
+ fixed ? num - i : transfer,
+ SENSE_FLAG_EOM);
+ return check_condition_result;
+ }
+
+ return 0;
+}
+
static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
@@ -4190,6 +4839,9 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
+ if (sdebug_ptype == TYPE_TAPE)
+ return resp_write_tape(scp, devip);
+
switch (cmd[0]) {
case WRITE_16:
ei_lba = 0;
@@ -4918,7 +5570,10 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
* a GOOD status otherwise. Model a disk with a big cache and yield
* CONDITION MET. Actually tries to bring range in main memory into the
* cache associated with the CPU(s).
+ *
+ * The pcode 0x34 is also used for READ POSITION by tape devices.
*/
+enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
static int resp_pre_fetch(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
@@ -4930,6 +5585,31 @@ static int resp_pre_fetch(struct scsi_cmnd *scp,
struct sdeb_store_info *sip = devip2sip(devip, true);
u8 *fsp = sip->storep;
+ if (sdebug_ptype == TYPE_TAPE) {
+ if (cmd[0] == PRE_FETCH) { /* READ POSITION (10) */
+ int all_length;
+ unsigned char arr[20];
+ unsigned int pos;
+
+ all_length = get_unaligned_be16(cmd + 7);
+ if ((cmd[1] & 0xfe) != 0 ||
+ all_length != 0) { /* only short form */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB,
+ all_length ? 7 : 1, 0);
+ return check_condition_result;
+ }
+ memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
+ arr[1] = devip->tape_partition;
+ pos = devip->tape_location[devip->tape_partition];
+ put_unaligned_be32(pos, arr + 4);
+ put_unaligned_be32(pos, arr + 8);
+ return fill_from_dev_buffer(scp, arr,
+ SDEBUG_READ_POSITION_ARR_SZ);
+ }
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
lba = get_unaligned_be32(cmd + 2);
nblks = get_unaligned_be16(cmd + 7);
@@ -5638,10 +6318,10 @@ static u32 get_tag(struct scsi_cmnd *cmnd)
/* Queued (deferred) command completions converge here. */
static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
{
- struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
+ struct sdebug_scsi_cmd *sdsc = container_of(sd_dp,
+ typeof(*sdsc), sd_dp);
+ struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1;
unsigned long flags;
- struct scsi_cmnd *scp = sqcp->scmd;
- struct sdebug_scsi_cmd *sdsc;
bool aborted;
if (sdebug_statistics) {
@@ -5652,27 +6332,23 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
if (!scp) {
pr_err("scmd=NULL\n");
- goto out;
+ return;
}
- sdsc = scsi_cmd_priv(scp);
spin_lock_irqsave(&sdsc->lock, flags);
aborted = sd_dp->aborted;
if (unlikely(aborted))
sd_dp->aborted = false;
- ASSIGN_QUEUED_CMD(scp, NULL);
spin_unlock_irqrestore(&sdsc->lock, flags);
if (aborted) {
pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
blk_abort_request(scsi_cmd_to_rq(scp));
- goto out;
+ return;
}
scsi_done(scp); /* callback to mid level */
-out:
- sdebug_free_queued_cmd(sqcp);
}
/* When high resolution timer goes off this function is called. */
@@ -5835,6 +6511,10 @@ static struct sdebug_dev_info *sdebug_device_create(
} else {
devip->zoned = false;
}
+ if (sdebug_ptype == TYPE_TAPE) {
+ devip->tape_density = TAPE_DEF_DENSITY;
+ devip->tape_blksize = TAPE_DEF_BLKSIZE;
+ }
devip->create_ts = ktime_get_boottime();
atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
spin_lock_init(&devip->list_lock);
@@ -5905,6 +6585,21 @@ static int scsi_debug_sdev_configure(struct scsi_device *sdp,
if (devip == NULL)
return 1; /* no resources, will be marked offline */
}
+ if (sdebug_ptype == TYPE_TAPE) {
+ if (!devip->tape_blocks[0]) {
+ devip->tape_blocks[0] =
+ kcalloc(TAPE_UNITS, sizeof(struct tape_block),
+ GFP_KERNEL);
+ if (!devip->tape_blocks[0])
+ return 1;
+ }
+ devip->tape_pending_nbr_partitions = -1;
+ if (partition_tape(devip, 1, TAPE_UNITS, 0) < 0) {
+ kfree(devip->tape_blocks[0]);
+ devip->tape_blocks[0] = NULL;
+ return 1;
+ }
+ }
sdp->hostdata = devip;
if (sdebug_no_uld)
sdp->no_uld_attach = 1;
@@ -5950,31 +6645,41 @@ static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
debugfs_remove(devip->debugfs_entry);
+ if (sdebug_ptype == TYPE_TAPE) {
+ kfree(devip->tape_blocks[0]);
+ devip->tape_blocks[0] = NULL;
+ }
+
/* make this slot available for re-use */
devip->used = false;
sdp->hostdata = NULL;
}
-/* Returns true if we require the queued memory to be freed by the caller. */
-static bool stop_qc_helper(struct sdebug_defer *sd_dp,
- enum sdeb_defer_type defer_t)
+/* Returns true if cancelled or not running callback. */
+static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
{
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
+ struct sdebug_defer *sd_dp = &sdsc->sd_dp;
+ enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t);
+
+ lockdep_assert_held(&sdsc->lock);
+
if (defer_t == SDEB_DEFER_HRT) {
int res = hrtimer_try_to_cancel(&sd_dp->hrt);
switch (res) {
- case 0: /* Not active, it must have already run */
case -1: /* -1 It's executing the CB */
return false;
+ case 0: /* Not active, it must have already run */
case 1: /* Was active, we've now cancelled */
default:
return true;
}
} else if (defer_t == SDEB_DEFER_WQ) {
/* Cancel if pending */
- if (cancel_work_sync(&sd_dp->ew.work))
+ if (cancel_work(&sd_dp->ew.work))
return true;
- /* Was not pending, so it must have run */
+ /* callback may be running, so return false */
return false;
} else if (defer_t == SDEB_DEFER_POLL) {
return true;
@@ -5983,28 +6688,6 @@ static bool stop_qc_helper(struct sdebug_defer *sd_dp,
return false;
}
-
-static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
-{
- enum sdeb_defer_type l_defer_t;
- struct sdebug_defer *sd_dp;
- struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
- struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
-
- lockdep_assert_held(&sdsc->lock);
-
- if (!sqcp)
- return false;
- sd_dp = &sqcp->sd_dp;
- l_defer_t = READ_ONCE(sd_dp->defer_t);
- ASSIGN_QUEUED_CMD(cmnd, NULL);
-
- if (stop_qc_helper(sd_dp, l_defer_t))
- sdebug_free_queued_cmd(sqcp);
-
- return true;
-}
-
/*
* Called from scsi_debug_abort() only, which is for timed-out cmd.
*/
@@ -6076,7 +6759,7 @@ static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
{
- bool ok = scsi_debug_abort_cmnd(SCpnt);
+ bool aborted = scsi_debug_abort_cmnd(SCpnt);
u8 *cmd = SCpnt->cmnd;
u8 opcode = cmd[0];
@@ -6085,7 +6768,8 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
"%s: command%s found\n", __func__,
- ok ? "" : " not");
+ aborted ? "" : " not");
+
if (sdebug_fail_abort(SCpnt)) {
scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
@@ -6093,6 +6777,9 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
return FAILED;
}
+ if (aborted == false)
+ return FAILED;
+
return SUCCESS;
}
@@ -6144,6 +6831,22 @@ static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
return 0;
}
+static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
+{
+ if (sdebug_ptype == TYPE_TAPE) {
+ int i;
+
+ devip->tape_blksize = TAPE_DEF_BLKSIZE;
+ devip->tape_density = TAPE_DEF_DENSITY;
+ devip->tape_partition = 0;
+ devip->tape_dce = 0;
+ for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+ devip->tape_pending_nbr_partitions = -1;
+ /* Don't reset partitioning? */
+ }
+}
+
static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
{
struct scsi_device *sdp = SCpnt->device;
@@ -6157,8 +6860,10 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
scsi_debug_stop_all_queued(sdp);
- if (devip)
+ if (devip) {
set_bit(SDEBUG_UA_POR, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
+ }
if (sdebug_fail_lun_reset(SCpnt)) {
scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
@@ -6196,6 +6901,7 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if (devip->target == sdp->id) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
++k;
}
}
@@ -6227,6 +6933,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
++k;
}
@@ -6250,6 +6957,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
++k;
}
}
@@ -6366,33 +7074,6 @@ static bool inject_on_this_cmd(void)
#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
-
-void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
-{
- if (sqcp)
- kmem_cache_free(queued_cmd_cache, sqcp);
-}
-
-static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
-{
- struct sdebug_queued_cmd *sqcp;
- struct sdebug_defer *sd_dp;
-
- sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
- if (!sqcp)
- return NULL;
-
- sd_dp = &sqcp->sd_dp;
-
- hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
- INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
-
- sqcp->scmd = scmd;
-
- return sqcp;
-}
-
/* Complete the processing of the thread that queued a SCSI command to this
* driver. It either completes the command by calling cmnd_done() or
* schedules a hr timer or work queue then returns 0. Returns
@@ -6409,7 +7090,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
unsigned long flags;
u64 ns_from_boot = 0;
- struct sdebug_queued_cmd *sqcp;
struct scsi_device *sdp;
struct sdebug_defer *sd_dp;
@@ -6441,12 +7121,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
}
- sqcp = sdebug_alloc_queued_cmd(cmnd);
- if (!sqcp) {
- pr_err("%s no alloc\n", __func__);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
- sd_dp = &sqcp->sd_dp;
+ sd_dp = &sdsc->sd_dp;
if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
ns_from_boot = ktime_get_boottime_ns();
@@ -6494,7 +7169,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (kt <= d) { /* elapsed duration >= kt */
/* call scsi_done() from this thread */
- sdebug_free_queued_cmd(sqcp);
scsi_done(cmnd);
return 0;
}
@@ -6507,13 +7181,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (polled) {
spin_lock_irqsave(&sdsc->lock, flags);
sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
/* schedule the invocation of scsi_done() for a later time */
spin_lock_irqsave(&sdsc->lock, flags);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
/*
@@ -6537,13 +7209,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
sd_dp->issuing_cpu = raw_smp_processor_id();
if (polled) {
spin_lock_irqsave(&sdsc->lock, flags);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
spin_lock_irqsave(&sdsc->lock, flags);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
schedule_work(&sd_dp->ew.work);
spin_unlock_irqrestore(&sdsc->lock, flags);
@@ -6835,7 +7505,7 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
&data);
if (f >= 0) {
- seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
+ seq_printf(m, " BUSY: %s: %d,%d\n",
"first,last bits", f, l);
}
}
@@ -7910,12 +8580,6 @@ static int __init scsi_debug_init(void)
hosts_to_add = sdebug_add_host;
sdebug_add_host = 0;
- queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
- if (!queued_cmd_cache) {
- ret = -ENOMEM;
- goto driver_unreg;
- }
-
sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
if (IS_ERR_OR_NULL(sdebug_debugfs_root))
pr_info("%s: failed to create initial debugfs directory\n", __func__);
@@ -7942,8 +8606,6 @@ static int __init scsi_debug_init(void)
return 0;
-driver_unreg:
- driver_unregister(&sdebug_driverfs_driver);
bus_unreg:
bus_unregister(&pseudo_lld_bus);
dev_unreg:
@@ -7959,7 +8621,6 @@ static void __exit scsi_debug_exit(void)
for (; k; k--)
sdebug_do_remove_host(true);
- kmem_cache_destroy(queued_cmd_cache);
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
@@ -8343,7 +9004,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
struct sdebug_defer *sd_dp;
u32 unique_tag = blk_mq_unique_tag(rq);
u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
- struct sdebug_queued_cmd *sqcp;
unsigned long flags;
int queue_num = data->queue_num;
ktime_t time;
@@ -8359,13 +9019,7 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
time = ktime_get_boottime();
spin_lock_irqsave(&sdsc->lock, flags);
- sqcp = TO_QUEUED_CMD(cmd);
- if (!sqcp) {
- spin_unlock_irqrestore(&sdsc->lock, flags);
- return true;
- }
-
- sd_dp = &sqcp->sd_dp;
+ sd_dp = &sdsc->sd_dp;
if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
spin_unlock_irqrestore(&sdsc->lock, flags);
return true;
@@ -8375,8 +9029,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
spin_unlock_irqrestore(&sdsc->lock, flags);
return true;
}
-
- ASSIGN_QUEUED_CMD(cmd, NULL);
spin_unlock_irqrestore(&sdsc->lock, flags);
if (sdebug_statistics) {
@@ -8385,8 +9037,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
atomic_inc(&sdebug_miss_cpus);
}
- sdebug_free_queued_cmd(sqcp);
-
scsi_done(cmd); /* callback to mid level */
(*data->num_entries)++;
return true;
@@ -8701,8 +9351,12 @@ err_out:
static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
+ struct sdebug_defer *sd_dp = &sdsc->sd_dp;
spin_lock_init(&sdsc->lock);
+ hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
return 0;
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 815e7d63f3e2..376b8897ab90 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -547,6 +547,18 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
scsi_report_sense(sdev, &sshdr);
+ if (sshdr.sense_key == UNIT_ATTENTION) {
+ /*
+ * Increment the counters for Power on/Reset or New Media so
+ * that all ULDs interested in these can see that those have
+ * happened, even if someone else gets the sense data.
+ */
+ if (sshdr.asc == 0x28)
+ scmd->device->ua_new_media_ctr++;
+ else if (sshdr.asc == 0x29)
+ scmd->device->ua_por_ctr++;
+ }
+
if (scsi_sense_is_deferred(&sshdr))
return NEEDS_RETRY;
@@ -711,6 +723,13 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
return SUCCESS;
case COMPLETED:
+ /*
+ * A command using command duration limits (CDL) with a
+ * descriptor set with policy 0xD may be completed with success
+ * and the sense data DATA CURRENTLY UNAVAILABLE, indicating
+ * that the command was in fact aborted because it exceeded its
+ * duration limit. Never retry these commands.
+ */
if (sshdr.asc == 0x55 && sshdr.ascq == 0x0a) {
set_scsi_ml_byte(scmd, SCSIML_STAT_DL_TIMEOUT);
req->cmd_flags |= REQ_FAILFAST_DEV;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f1cfe0bb89b2..0d29470e86b0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1149,7 +1149,7 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
*/
- count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
+ count = __blk_rq_map_sg(rq, cmd->sdb.table.sgl, &last_sg);
if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) {
unsigned int pad_len =
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 96d7e1a9a7c7..4833b8fe251b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -151,8 +151,9 @@ int scsi_complete_async_scans(void)
struct async_scan_data *data;
do {
- if (list_empty(&scanning_hosts))
- return 0;
+ scoped_guard(spinlock, &async_scan_lock)
+ if (list_empty(&scanning_hosts))
+ return 0;
/* If we can't get memory immediately, that's OK. Just
* sleep a little. Even if we never get memory, the async
* scans will finish eventually.
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index be4aef0f4f99..055a03a83ad6 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -17,7 +17,9 @@ static const struct ctl_table scsi_table[] = {
.data = &scsi_logging_level,
.maxlen = sizeof(scsi_logging_level),
.mode = 0644,
- .proc_handler = proc_dointvec },
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_INT_MAX },
};
static struct ctl_table_header *scsi_table_header;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ebbd50ec0cda..74a6830b7ed8 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -163,9 +163,11 @@ static const char *st_formats[] = {
static int debugging = DEBUG;
+/* Setting these non-zero may risk recognizing resets */
#define MAX_RETRIES 0
#define MAX_WRITE_RETRIES 0
#define MAX_READY_RETRIES 0
+
#define NO_TAPE NOT_READY
#define ST_TIMEOUT (900 * HZ)
@@ -357,10 +359,18 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
{
int result = SRpnt->result;
u8 scode;
+ unsigned int ctr;
DEB(const char *stp;)
char *name = STp->name;
struct st_cmdstatus *cmdstatp;
+ ctr = scsi_get_ua_por_ctr(STp->device);
+ if (ctr != STp->por_ctr) {
+ STp->por_ctr = ctr;
+ STp->pos_unknown = 1; /* ASC => power on / reset */
+ st_printk(KERN_WARNING, STp, "Power on/reset recognized.");
+ }
+
if (!result)
return 0;
@@ -413,10 +423,11 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
if (cmdstatp->have_sense &&
cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17)
STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */
- if (cmdstatp->have_sense && scode == UNIT_ATTENTION && cmdstatp->sense_hdr.asc == 0x29)
+ if (cmdstatp->have_sense && scode == UNIT_ATTENTION &&
+ cmdstatp->sense_hdr.asc == 0x29 && !STp->pos_unknown) {
STp->pos_unknown = 1; /* ASC => power on / reset */
-
- STp->pos_unknown |= STp->device->was_reset;
+ st_printk(KERN_WARNING, STp, "Power on/reset recognized.");
+ }
if (cmdstatp->have_sense &&
scode == RECOVERED_ERROR
@@ -952,7 +963,6 @@ static void reset_state(struct scsi_tape *STp)
STp->partition = find_partition(STp);
if (STp->partition < 0)
STp->partition = 0;
- STp->new_partition = STp->partition;
}
}
@@ -969,6 +979,7 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
{
int attentions, waits, max_wait, scode;
int retval = CHKRES_READY, new_session = 0;
+ unsigned int ctr;
unsigned char cmd[MAX_COMMAND_SIZE];
struct st_request *SRpnt = NULL;
struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
@@ -1025,6 +1036,13 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
}
}
+ ctr = scsi_get_ua_new_media_ctr(STp->device);
+ if (ctr != STp->new_media_ctr) {
+ STp->new_media_ctr = ctr;
+ new_session = 1;
+ DEBC_printk(STp, "New tape session.");
+ }
+
retval = (STp->buffer)->syscall_result;
if (!retval)
retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY;
@@ -2897,7 +2915,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
timeout = STp->long_timeout * 8;
DEBC_printk(STp, "Erasing tape.\n");
- fileno = blkno = at_sm = 0;
break;
case MTSETBLK: /* Set block length */
case MTSETDENSITY: /* Set tape density */
@@ -2930,14 +2947,17 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
if (cmd_in == MTSETDENSITY) {
(STp->buffer)->b_data[4] = arg;
STp->density_changed = 1; /* At least we tried ;-) */
+ STp->changed_density = arg;
} else if (cmd_in == SET_DENS_AND_BLK)
(STp->buffer)->b_data[4] = arg >> 24;
else
(STp->buffer)->b_data[4] = STp->density;
if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) {
ltmp = arg & MT_ST_BLKSIZE_MASK;
- if (cmd_in == MTSETBLK)
+ if (cmd_in == MTSETBLK) {
STp->blksize_changed = 1; /* At least we tried ;-) */
+ STp->changed_blksize = arg;
+ }
} else
ltmp = STp->block_size;
(STp->buffer)->b_data[9] = (ltmp >> 16);
@@ -3084,7 +3104,9 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd_in == MTSETDRVBUFFER ||
cmd_in == SET_DENS_AND_BLK) {
if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST &&
- !(STp->use_pf & PF_TESTED)) {
+ cmdstatp->sense_hdr.asc == 0x24 &&
+ (STp->device)->scsi_level <= SCSI_2 &&
+ !(STp->use_pf & PF_TESTED)) {
/* Try the other possible state of Page Format if not
already tried */
STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
@@ -3636,9 +3658,23 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
retval = (-EIO);
goto out;
}
- reset_state(STp);
- /* remove this when the midlevel properly clears was_reset */
- STp->device->was_reset = 0;
+ reset_state(STp); /* Clears pos_unknown */
+
+ /* Fix the device settings after reset, ignore errors */
+ if (mtc.mt_op == MTREW || mtc.mt_op == MTSEEK ||
+ mtc.mt_op == MTEOM) {
+ if (STp->can_partitions) {
+ /* STp->new_partition contains the
+ * latest partition set
+ */
+ STp->partition = 0;
+ switch_partition(STp);
+ }
+ if (STp->density_changed)
+ st_int_ioctl(STp, MTSETDENSITY, STp->changed_density);
+ if (STp->blksize_changed)
+ st_int_ioctl(STp, MTSETBLK, STp->changed_blksize);
+ }
}
if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK &&
@@ -4122,7 +4158,7 @@ static void validate_options(void)
*/
static int __init st_setup(char *str)
{
- int i, len, ints[5];
+ int i, len, ints[ARRAY_SIZE(parms) + 1];
char *stp;
stp = get_options(str, ARRAY_SIZE(ints), ints);
@@ -4384,6 +4420,9 @@ static int st_probe(struct device *dev)
goto out_idr_remove;
}
+ tpnt->new_media_ctr = scsi_get_ua_new_media_ctr(SDp);
+ tpnt->por_ctr = scsi_get_ua_por_ctr(SDp);
+
dev_set_drvdata(dev, tpnt);
@@ -4665,6 +4704,24 @@ options_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(options);
+/**
+ * position_lost_in_reset_show - Value 1 indicates that reads, writes, etc.
+ * are blocked because a device reset has occurred and no operation positioning
+ * the tape has been issued.
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t position_lost_in_reset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+ struct scsi_tape *STp = STm->tape;
+
+ return sprintf(buf, "%d", STp->pos_unknown);
+}
+static DEVICE_ATTR_RO(position_lost_in_reset);
+
/* Support for tape stats */
/**
@@ -4849,6 +4906,7 @@ static struct attribute *st_dev_attrs[] = {
&dev_attr_default_density.attr,
&dev_attr_default_compression.attr,
&dev_attr_options.attr,
+ &dev_attr_position_lost_in_reset.attr,
NULL,
};
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 1aaaf5369a40..0d7c4b8c2c8a 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -165,6 +165,7 @@ struct scsi_tape {
unsigned char compression_changed;
unsigned char drv_buffer;
unsigned char density;
+ unsigned char changed_density;
unsigned char door_locked;
unsigned char autorew_dev; /* auto-rewind device */
unsigned char rew_at_close; /* rewind necessary at close */
@@ -172,11 +173,16 @@ struct scsi_tape {
unsigned char cleaning_req; /* cleaning requested? */
unsigned char first_tur; /* first TEST UNIT READY */
int block_size;
+ int changed_blksize;
int min_block;
int max_block;
int recover_count; /* From tape opening */
int recover_reg; /* From last status call */
+ /* The saved values of midlevel counters */
+ unsigned int new_media_ctr;
+ unsigned int por_ctr;
+
#if DEBUG
unsigned char write_pending;
int nbr_finished;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index a8614e54544e..35db061ae3ec 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -776,7 +776,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
vstor_packet->status != 0) {
- dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n",
+ dev_err(dev, "Failed to create sub-channel: op=%d, host=0x%x\n",
vstor_packet->operation, vstor_packet->status);
return;
}
@@ -1183,7 +1183,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
storvsc_log_ratelimited(device, loglevel,
- "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
+ "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x host 0x%x\n",
scsi_cmd_to_rq(request->cmd)->tag,
stor_pkt->vm_srb.cdb[0],
vstor_packet->vm_srb.scsi_status,
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index fd72d9088bdc..64ed7d64458a 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -26,6 +26,19 @@ static unsigned int sh_clk_read(struct clk *clk)
return ioread32(clk->mapped_reg);
}
+static unsigned int sh_clk_read_status(struct clk *clk)
+{
+ void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
+ (phys_addr_t)clk->enable_reg + clk->mapped_reg;
+
+ if (clk->flags & CLK_ENABLE_REG_8BIT)
+ return ioread8(mapped_status);
+ else if (clk->flags & CLK_ENABLE_REG_16BIT)
+ return ioread16(mapped_status);
+
+ return ioread32(mapped_status);
+}
+
static void sh_clk_write(int value, struct clk *clk)
{
if (clk->flags & CLK_ENABLE_REG_8BIT)
@@ -40,20 +53,10 @@ static int sh_clk_mstp_enable(struct clk *clk)
{
sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
if (clk->status_reg) {
- unsigned int (*read)(const void __iomem *addr);
int i;
- void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
- (phys_addr_t)clk->enable_reg + clk->mapped_reg;
-
- if (clk->flags & CLK_ENABLE_REG_8BIT)
- read = ioread8;
- else if (clk->flags & CLK_ENABLE_REG_16BIT)
- read = ioread16;
- else
- read = ioread32;
for (i = 1000;
- (read(mapped_status) & (1 << clk->enable_bit)) && i;
+ (sh_clk_read_status(clk) & (1 << clk->enable_bit)) && i;
i--)
cpu_relax();
if (!i) {
diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h
index 27c9fa745fd5..b8d5244678f0 100644
--- a/drivers/soc/apple/rtkit-internal.h
+++ b/drivers/soc/apple/rtkit-internal.h
@@ -44,6 +44,7 @@ struct apple_rtkit {
struct apple_rtkit_shmem ioreport_buffer;
struct apple_rtkit_shmem crashlog_buffer;
+ struct apple_rtkit_shmem oslog_buffer;
struct apple_rtkit_shmem syslog_buffer;
char *syslog_msg_buffer;
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
index e6d940292c9f..5fffd0f003dc 100644
--- a/drivers/soc/apple/rtkit.c
+++ b/drivers/soc/apple/rtkit.c
@@ -12,6 +12,7 @@ enum {
APPLE_RTKIT_PWR_STATE_IDLE = 0x201, /* sleeping, retain state */
APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */
APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */
+ APPLE_RTKIT_PWR_STATE_INIT = 0x220, /* init after starting the coproc */
};
enum {
@@ -66,8 +67,9 @@ enum {
#define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24)
#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56)
-#define APPLE_RTKIT_OSLOG_INIT 1
-#define APPLE_RTKIT_OSLOG_ACK 3
+#define APPLE_RTKIT_OSLOG_BUFFER_REQUEST 1
+#define APPLE_RTKIT_OSLOG_SIZE GENMASK_ULL(55, 36)
+#define APPLE_RTKIT_OSLOG_IOVA GENMASK_ULL(35, 0)
#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
@@ -97,12 +99,19 @@ bool apple_rtkit_is_crashed(struct apple_rtkit *rtk)
}
EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed);
-static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
+static int apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
u64 msg)
{
+ int ret;
+
msg &= ~APPLE_RTKIT_MGMT_TYPE;
msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type);
- apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+ ret = apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+
+ if (ret)
+ dev_err(rtk->dev, "RTKit: Failed to send management message: %d\n", ret);
+
+ return ret;
}
static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg)
@@ -251,15 +260,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
struct apple_rtkit_shmem *buffer,
u8 ep, u64 msg)
{
- size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg);
u64 reply;
int err;
+ /* The different size vs. IOVA shifts look odd but are indeed correct this way */
+ if (ep == APPLE_RTKIT_EP_OSLOG) {
+ buffer->size = FIELD_GET(APPLE_RTKIT_OSLOG_SIZE, msg);
+ buffer->iova = FIELD_GET(APPLE_RTKIT_OSLOG_IOVA, msg) << 12;
+ } else {
+ buffer->size = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg) << 12;
+ buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
+ }
+
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->is_mapped = false;
- buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
- buffer->size = n_4kpages << 12;
dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
buffer->size, &buffer->iova);
@@ -284,17 +299,30 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
}
if (!buffer->is_mapped) {
- reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
- APPLE_RTKIT_BUFFER_REQUEST);
- reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages);
- reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
- buffer->iova);
+ /* oslog uses different fields and needs a shifted IOVA instead of size */
+ if (ep == APPLE_RTKIT_EP_OSLOG) {
+ reply = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE,
+ APPLE_RTKIT_OSLOG_BUFFER_REQUEST);
+ reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_SIZE, buffer->size);
+ reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_IOVA,
+ buffer->iova >> 12);
+ } else {
+ reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
+ APPLE_RTKIT_BUFFER_REQUEST);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE,
+ buffer->size >> 12);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
+ buffer->iova);
+ }
apple_rtkit_send_message(rtk, ep, reply, NULL, false);
}
return 0;
error:
+ dev_err(rtk->dev, "RTKit: failed buffer request for 0x%zx bytes (%d)\n",
+ buffer->size, err);
+
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->iova = 0;
@@ -360,7 +388,6 @@ static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
apple_rtkit_memcpy(rtk, bfr, &rtk->crashlog_buffer, 0,
rtk->crashlog_buffer.size);
apple_rtkit_crashlog_dump(rtk, bfr, rtk->crashlog_buffer.size);
- kfree(bfr);
} else {
dev_err(rtk->dev,
"RTKit: Couldn't allocate crashlog shadow buffer\n");
@@ -368,7 +395,9 @@ static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
rtk->crashed = true;
if (rtk->ops->crashed)
- rtk->ops->crashed(rtk->cookie);
+ rtk->ops->crashed(rtk->cookie, bfr, rtk->crashlog_buffer.size);
+
+ kfree(bfr);
}
static void apple_rtkit_ioreport_rx(struct apple_rtkit *rtk, u64 msg)
@@ -448,7 +477,7 @@ static void apple_rtkit_syslog_rx_log(struct apple_rtkit *rtk, u64 msg)
log_context[sizeof(log_context) - 1] = 0;
- msglen = rtk->syslog_msg_size - 1;
+ msglen = strnlen(rtk->syslog_msg_buffer, rtk->syslog_msg_size - 1);
while (msglen > 0 &&
should_crop_syslog_char(rtk->syslog_msg_buffer[msglen - 1]))
msglen--;
@@ -482,25 +511,18 @@ static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg)
}
}
-static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg)
-{
- u64 ack;
-
- dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg);
- ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK);
- apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false);
-}
-
static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg)
{
u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
switch (type) {
- case APPLE_RTKIT_OSLOG_INIT:
- apple_rtkit_oslog_rx_init(rtk, msg);
+ case APPLE_RTKIT_OSLOG_BUFFER_REQUEST:
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->oslog_buffer,
+ APPLE_RTKIT_EP_OSLOG, msg);
break;
default:
- dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg);
+ dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n",
+ msg);
}
}
@@ -588,11 +610,18 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
.msg1 = ep,
};
- if (rtk->crashed)
+ if (rtk->crashed) {
+ dev_warn(rtk->dev,
+ "RTKit: Device is crashed, cannot send message\n");
return -EINVAL;
+ }
+
if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
- !apple_rtkit_is_running(rtk))
+ !apple_rtkit_is_running(rtk)) {
+ dev_warn(rtk->dev,
+ "RTKit: Endpoint 0x%02x is not running, cannot send message\n", ep);
return -EINVAL;
+ }
/*
* The message will be sent with a MMIO write. We need the barrier
@@ -667,7 +696,7 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
rtk->mbox->rx = apple_rtkit_rx;
rtk->mbox->cookie = rtk;
- rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
+ rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_HIGHPRI | WQ_MEM_RECLAIM,
dev_name(rtk->dev));
if (!rtk->wq) {
ret = -ENOMEM;
@@ -710,6 +739,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk)
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);
@@ -742,8 +772,10 @@ static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk,
reinit_completion(&rtk->ap_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
- msg);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion);
if (ret)
@@ -763,8 +795,10 @@ static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk,
reinit_completion(&rtk->iop_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
- msg);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
if (ret)
@@ -865,6 +899,7 @@ EXPORT_SYMBOL_GPL(apple_rtkit_quiesce);
int apple_rtkit_wake(struct apple_rtkit *rtk)
{
u64 msg;
+ int ret;
if (apple_rtkit_is_running(rtk))
return -EINVAL;
@@ -875,9 +910,11 @@ int apple_rtkit_wake(struct apple_rtkit *rtk)
* Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot
* will wait for the completion anyway.
*/
- msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
- msg);
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_INIT);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
return apple_rtkit_boot(rtk);
}
@@ -890,6 +927,7 @@ void apple_rtkit_free(struct apple_rtkit *rtk)
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 298b542dd1c0..09347bccdb1d 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -246,6 +246,9 @@ static const struct at91_soc socs[] __initconst = {
"samv70q19", "samv7"),
#endif
#ifdef CONFIG_SOC_SAMA7
+ AT91_SOC(SAMA7D65_CIDR_MATCH, AT91_CIDR_MASK_SAMA7G5,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7D65_EXID_MATCH,
+ "sama7d65", "sama7d6"),
AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G51_EXID_MATCH,
"sama7g51", "sama7g5"),
@@ -305,6 +308,7 @@ static int __init at91_get_cidr_exid_from_chipid(u32 *cidr, u32 *exid)
void __iomem *regs;
static const struct of_device_id chipids[] = {
{ .compatible = "atmel,sama5d2-chipid" },
+ { .compatible = "microchip,sama7d65-chipid" },
{ .compatible = "microchip,sama7g5-chipid" },
{ },
};
@@ -393,6 +397,7 @@ static const struct of_device_id at91_soc_allowed_list[] __initconst = {
{ .compatible = "atmel,at91sam9", },
{ .compatible = "atmel,sama5", },
{ .compatible = "atmel,samv7", },
+ { .compatible = "microchip,sama7d65", },
{ .compatible = "microchip,sama7g5", },
{ }
};
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index 2c78e54255f7..66a74017d9a3 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -45,6 +45,7 @@ at91_soc_init(const struct at91_soc *socs);
#define AT91SAM9N12_CIDR_MATCH 0x019a07a0
#define SAM9X60_CIDR_MATCH 0x019b35a0
#define SAM9X7_CIDR_MATCH 0x09750020
+#define SAMA7D65_CIDR_MATCH 0x00262100
#define SAMA7G5_CIDR_MATCH 0x00162100
#define AT91SAM9M11_EXID_MATCH 0x00000001
@@ -75,6 +76,8 @@ at91_soc_init(const struct at91_soc *socs);
#define SAM9X75_D5M_EXID_MATCH 0x00000010
#define SAM9X75_EXID_MATCH 0x00000000
+#define SAMA7D65_EXID_MATCH 0x00000080
+
#define SAMA7G51_EXID_MATCH 0x3
#define SAMA7G52_EXID_MATCH 0x2
#define SAMA7G53_EXID_MATCH 0x1
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c
index 8aa8dec14911..444a8f59b7da 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.c
+++ b/drivers/soc/hisilicon/kunpeng_hccs.c
@@ -1539,8 +1539,8 @@ static ssize_t used_types_show(struct kobject *kobj,
u16 i;
for (i = 0; i < hdev->used_type_num - 1; i++)
- len += sysfs_emit(&buf[len], "%s ", hdev->type_name_maps[i].name);
- len += sysfs_emit(&buf[len], "%s\n", hdev->type_name_maps[i].name);
+ len += sysfs_emit_at(buf, len, "%s ", hdev->type_name_maps[i].name);
+ len += sysfs_emit_at(buf, len, "%s\n", hdev->type_name_maps[i].name);
return len;
}
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 8ac7658e3d52..3ed8161d7d28 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -192,9 +192,20 @@ static __maybe_unused const struct of_device_id imx8_soc_match[] = {
devm_kasprintf((dev), GFP_KERNEL, "%d.%d", ((soc_rev) >> 4) & 0xf, (soc_rev) & 0xf) : \
"unknown"
+static void imx8m_unregister_soc(void *data)
+{
+ soc_device_unregister(data);
+}
+
+static void imx8m_unregister_cpufreq(void *data)
+{
+ platform_device_unregister(data);
+}
+
static int imx8m_soc_probe(struct platform_device *pdev)
{
struct soc_device_attribute *soc_dev_attr;
+ struct platform_device *cpufreq_dev;
const struct imx8_soc_data *data;
struct device *dev = &pdev->dev;
const struct of_device_id *id;
@@ -239,11 +250,22 @@ static int imx8m_soc_probe(struct platform_device *pdev)
if (IS_ERR(soc_dev))
return PTR_ERR(soc_dev);
+ ret = devm_add_action(dev, imx8m_unregister_soc, soc_dev);
+ if (ret)
+ return ret;
+
pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id,
soc_dev_attr->revision);
- if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
- platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
+ if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) {
+ cpufreq_dev = platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
+ if (IS_ERR(cpufreq_dev))
+ return dev_err_probe(dev, PTR_ERR(cpufreq_dev),
+ "Failed to register imx-cpufreq-dev device\n");
+ ret = devm_add_action(dev, imx8m_unregister_cpufreq, cpufreq_dev);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/soc/mediatek/mt8167-mmsys.h b/drivers/soc/mediatek/mt8167-mmsys.h
index f7a35b3656bb..c468926561b4 100644
--- a/drivers/soc/mediatek/mt8167-mmsys.h
+++ b/drivers/soc/mediatek/mt8167-mmsys.h
@@ -14,22 +14,21 @@
#define MT8167_DSI0_SEL_IN_RDMA0 0x1
static const struct mtk_mmsys_routes mt8167_mmsys_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_RDMA0,
- MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
- MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
- MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0
- },
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
+ OVL0_MOUT_EN_COLOR0),
+ MMSYS_ROUTE(DITHER0, RDMA0,
+ MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0,
+ MT8167_DITHER_MOUT_EN_RDMA0),
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0,
+ COLOR0_SEL_IN_OVL0),
+ MMSYS_ROUTE(RDMA0, DSI0,
+ MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0,
+ MT8167_DSI0_SEL_IN_RDMA0),
+ MMSYS_ROUTE(RDMA0, DSI0,
+ MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0,
+ MT8167_RDMA0_SOUT_DSI0),
};
#endif /* __SOC_MEDIATEK_MT8167_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8173-mmsys.h b/drivers/soc/mediatek/mt8173-mmsys.h
index 9d24e381271e..957876d7c166 100644
--- a/drivers/soc/mediatek/mt8173-mmsys.h
+++ b/drivers/soc/mediatek/mt8173-mmsys.h
@@ -33,63 +33,48 @@
#define MT8173_RDMA0_SOUT_COLOR0 BIT(0)
static const struct mtk_mmsys_routes mt8173_mmsys_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8173_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
- MT8173_OVL0_MOUT_EN_COLOR0, MT8173_OVL0_MOUT_EN_COLOR0
- }, {
- DDP_COMPONENT_OD0, DDP_COMPONENT_RDMA0,
- MT8173_DISP_REG_CONFIG_DISP_OD_MOUT_EN,
- MT8173_OD0_MOUT_EN_RDMA0, MT8173_OD0_MOUT_EN_RDMA0
- }, {
- DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
- MT8173_DISP_REG_CONFIG_DISP_UFOE_MOUT_EN,
- MT8173_UFOE_MOUT_EN_DSI0, MT8173_UFOE_MOUT_EN_DSI0
- }, {
- DDP_COMPONENT_COLOR0, DDP_COMPONENT_AAL0,
- MT8173_DISP_REG_CONFIG_DISP_COLOR0_SOUT_SEL_IN,
- MT8173_COLOR0_SOUT_MERGE, 0 /* SOUT to AAL */
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_UFOE,
- MT8173_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN,
- MT8173_RDMA0_SOUT_COLOR0, 0 /* SOUT to UFOE */
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8173_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
- MT8173_COLOR0_SEL_IN_OVL0, MT8173_COLOR0_SEL_IN_OVL0
- }, {
- DDP_COMPONENT_AAL0, DDP_COMPONENT_COLOR0,
- MT8173_DISP_REG_CONFIG_DISP_AAL_SEL_IN,
- MT8173_AAL_SEL_IN_MERGE, 0 /* SEL_IN from COLOR0 */
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_UFOE,
- MT8173_DISP_REG_CONFIG_DISP_UFOE_SEL_IN,
- MT8173_UFOE_SEL_IN_RDMA0, 0 /* SEL_IN from RDMA0 */
- }, {
- DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
- MT8173_DISP_REG_CONFIG_DSI0_SEL_IN,
- MT8173_DSI0_SEL_IN_UFOE, 0, /* SEL_IN from UFOE */
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
- MT8173_DISP_REG_CONFIG_DISP_OVL1_MOUT_EN,
- MT8173_OVL1_MOUT_EN_COLOR1, MT8173_OVL1_MOUT_EN_COLOR1
- }, {
- DDP_COMPONENT_GAMMA, DDP_COMPONENT_RDMA1,
- MT8173_DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN,
- MT8173_GAMMA_MOUT_EN_RDMA1, MT8173_GAMMA_MOUT_EN_RDMA1
- }, {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8173_DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN,
- RDMA1_SOUT_MASK, RDMA1_SOUT_DPI0
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
- MT8173_DISP_REG_CONFIG_DISP_COLOR1_SEL_IN,
- COLOR1_SEL_IN_OVL1, COLOR1_SEL_IN_OVL1
- }, {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8173_DISP_REG_CONFIG_DPI_SEL_IN,
- MT8173_DPI0_SEL_IN_MASK, MT8173_DPI0_SEL_IN_RDMA1
- }
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, MT8173_OVL0_MOUT_EN_COLOR0,
+ MT8173_OVL0_MOUT_EN_COLOR0),
+ MMSYS_ROUTE(OD0, RDMA0,
+ MT8173_DISP_REG_CONFIG_DISP_OD_MOUT_EN, MT8173_OD0_MOUT_EN_RDMA0,
+ MT8173_OD0_MOUT_EN_RDMA0),
+ MMSYS_ROUTE(UFOE, DSI0,
+ MT8173_DISP_REG_CONFIG_DISP_UFOE_MOUT_EN, MT8173_UFOE_MOUT_EN_DSI0,
+ MT8173_UFOE_MOUT_EN_DSI0),
+ MMSYS_ROUTE(COLOR0, AAL0,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR0_SOUT_SEL_IN, MT8173_COLOR0_SOUT_MERGE,
+ 0 /* SOUT to AAL */),
+ MMSYS_ROUTE(RDMA0, UFOE,
+ MT8173_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8173_RDMA0_SOUT_COLOR0,
+ 0 /* SOUT to UFOE */),
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, MT8173_COLOR0_SEL_IN_OVL0,
+ MT8173_COLOR0_SEL_IN_OVL0),
+ MMSYS_ROUTE(AAL0, COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_AAL_SEL_IN, MT8173_AAL_SEL_IN_MERGE,
+ 0 /* SEL_IN from COLOR0 */),
+ MMSYS_ROUTE(RDMA0, UFOE,
+ MT8173_DISP_REG_CONFIG_DISP_UFOE_SEL_IN, MT8173_UFOE_SEL_IN_RDMA0,
+ 0 /* SEL_IN from RDMA0 */),
+ MMSYS_ROUTE(UFOE, DSI0,
+ MT8173_DISP_REG_CONFIG_DSI0_SEL_IN, MT8173_DSI0_SEL_IN_UFOE,
+ 0 /* SEL_IN from UFOE */),
+ MMSYS_ROUTE(OVL1, COLOR1,
+ MT8173_DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, MT8173_OVL1_MOUT_EN_COLOR1,
+ MT8173_OVL1_MOUT_EN_COLOR1),
+ MMSYS_ROUTE(GAMMA, RDMA1,
+ MT8173_DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, MT8173_GAMMA_MOUT_EN_RDMA1,
+ MT8173_GAMMA_MOUT_EN_RDMA1),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8173_DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+ RDMA1_SOUT_DPI0),
+ MMSYS_ROUTE(OVL1, COLOR1,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1,
+ COLOR1_SEL_IN_OVL1),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8173_DISP_REG_CONFIG_DPI_SEL_IN, MT8173_DPI0_SEL_IN_MASK,
+ MT8173_DPI0_SEL_IN_RDMA1),
};
#endif /* __SOC_MEDIATEK_MT8173_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8183-mmsys.h b/drivers/soc/mediatek/mt8183-mmsys.h
index ff6be1703469..123384958c4b 100644
--- a/drivers/soc/mediatek/mt8183-mmsys.h
+++ b/drivers/soc/mediatek/mt8183-mmsys.h
@@ -28,35 +28,27 @@
#define MT8183_MMSYS_SW0_RST_B 0x140
static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
- MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
- MT8183_OVL0_MOUT_EN_OVL0_2L
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
- MT8183_OVL0_2L_MOUT_EN_DISP_PATH0
- }, {
- DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_RDMA1,
- MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
- MT8183_OVL1_2L_MOUT_EN_RDMA1
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
- MT8183_DITHER0_MOUT_IN_DSI0
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
- MT8183_DISP_PATH0_SEL_IN_OVL0_2L
- }, {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
- MT8183_DPI0_SEL_IN_RDMA1
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
- MT8183_RDMA0_SOUT_COLOR0
- }
+ MMSYS_ROUTE(OVL0, OVL_2L0,
+ MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
+ MT8183_OVL0_MOUT_EN_OVL0_2L),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
+ MT8183_OVL0_2L_MOUT_EN_DISP_PATH0),
+ MMSYS_ROUTE(OVL_2L1, RDMA1,
+ MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
+ MT8183_OVL1_2L_MOUT_EN_RDMA1),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
+ MT8183_DITHER0_MOUT_IN_DSI0),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
+ MT8183_DISP_PATH0_SEL_IN_OVL0_2L),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
+ MT8183_DPI0_SEL_IN_RDMA1),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
+ MT8183_RDMA0_SOUT_COLOR0),
};
#endif /* __SOC_MEDIATEK_MT8183_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8186-mmsys.h b/drivers/soc/mediatek/mt8186-mmsys.h
index 279d4138525b..354664be72bd 100644
--- a/drivers/soc/mediatek/mt8186-mmsys.h
+++ b/drivers/soc/mediatek/mt8186-mmsys.h
@@ -63,61 +63,39 @@
#define MT8186_MMSYS_SW0_RST_B 0x160
static const struct mtk_mmsys_routes mmsys_mt8186_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8186_DISP_OVL0_MOUT_EN, MT8186_OVL0_MOUT_EN_MASK,
- MT8186_OVL0_MOUT_TO_RDMA0
- },
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8186_DISP_RDMA0_SEL_IN, MT8186_RDMA0_SEL_IN_MASK,
- MT8186_RDMA0_FROM_OVL0
- },
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_CON_MASK,
- MT8186_OVL0_GO_BLEND
- },
- {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8186_DISP_RDMA0_SOUT_SEL, MT8186_RDMA0_SOUT_SEL_MASK,
- MT8186_RDMA0_SOUT_TO_COLOR0
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK,
- MT8186_DITHER0_MOUT_TO_DSI0,
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK,
- MT8186_DSI0_FROM_DITHER0
- },
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
- MT8186_DISP_OVL0_2L_MOUT_EN, MT8186_OVL0_2L_MOUT_EN_MASK,
- MT8186_OVL0_2L_MOUT_TO_RDMA1
- },
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
- MT8186_DISP_RDMA1_SEL_IN, MT8186_RDMA1_SEL_IN_MASK,
- MT8186_RDMA1_FROM_OVL0_2L
- },
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
- MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_2L_CON_MASK,
- MT8186_OVL0_2L_GO_BLEND
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8186_DISP_RDMA1_MOUT_EN, MT8186_RDMA1_MOUT_EN_MASK,
- MT8186_RDMA1_MOUT_TO_DPI0_SEL
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8186_DISP_DPI0_SEL_IN, MT8186_DPI0_SEL_IN_MASK,
- MT8186_DPI0_FROM_RDMA1
- },
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8186_DISP_OVL0_MOUT_EN, MT8186_OVL0_MOUT_EN_MASK,
+ MT8186_OVL0_MOUT_TO_RDMA0),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8186_DISP_RDMA0_SEL_IN, MT8186_RDMA0_SEL_IN_MASK,
+ MT8186_RDMA0_FROM_OVL0),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_CON_MASK,
+ MT8186_OVL0_GO_BLEND),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8186_DISP_RDMA0_SOUT_SEL, MT8186_RDMA0_SOUT_SEL_MASK,
+ MT8186_RDMA0_SOUT_TO_COLOR0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK,
+ MT8186_DITHER0_MOUT_TO_DSI0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK,
+ MT8186_DSI0_FROM_DITHER0),
+ MMSYS_ROUTE(OVL_2L0, RDMA1,
+ MT8186_DISP_OVL0_2L_MOUT_EN, MT8186_OVL0_2L_MOUT_EN_MASK,
+ MT8186_OVL0_2L_MOUT_TO_RDMA1),
+ MMSYS_ROUTE(OVL_2L0, RDMA1,
+ MT8186_DISP_RDMA1_SEL_IN, MT8186_RDMA1_SEL_IN_MASK,
+ MT8186_RDMA1_FROM_OVL0_2L),
+ MMSYS_ROUTE(OVL_2L0, RDMA1,
+ MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_2L_CON_MASK,
+ MT8186_OVL0_2L_GO_BLEND),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8186_DISP_RDMA1_MOUT_EN, MT8186_RDMA1_MOUT_EN_MASK,
+ MT8186_RDMA1_MOUT_TO_DPI0_SEL),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8186_DISP_DPI0_SEL_IN, MT8186_DPI0_SEL_IN_MASK,
+ MT8186_DPI0_FROM_RDMA1),
};
#endif /* __SOC_MEDIATEK_MT8186_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8188-mmsys.h b/drivers/soc/mediatek/mt8188-mmsys.h
index 6bebf1a69fc0..99080afead7e 100644
--- a/drivers/soc/mediatek/mt8188-mmsys.h
+++ b/drivers/soc/mediatek/mt8188-mmsys.h
@@ -202,158 +202,126 @@ static const u8 mmsys_mt8188_vdo1_rst_tb[] = {
};
static const struct mtk_mmsys_routes mmsys_mt8188_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0,
- MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
- MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0,
- MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8188_VDO0_DISP_RDMA_SEL, MT8188_SEL_IN_DISP_RDMA0_FROM_MASK,
- MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
- MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_MERGE0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8188_SEL_IN_VPP_MERGE_FROM_DITHER0_OUT
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
- MT8188_VDO0_DSC_WARP_SEL,
- MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK,
- MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
- MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
- MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
- MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8188_VDO0_DISP_RDMA_SEL, MT8188_SOUT_DISP_RDMA0_TO_MASK,
- MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DISP_DITHER0_SEL_OUT,
- MT8188_SOUT_DISP_DITHER0_TO_MASK,
- MT8188_SOUT_DISP_DITHER0_TO_DSI0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
- MT8188_VDO0_DISP_DITHER0_SEL_OUT,
- MT8188_SOUT_DISP_DITHER0_TO_MASK,
- MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
- },
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0,
+ MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0),
+ MMSYS_ROUTE(OVL0, WDMA0,
+ MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0,
+ MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8188_VDO0_DISP_RDMA_SEL, MT8188_SEL_IN_DISP_RDMA0_FROM_MASK,
+ MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
+ MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DITHER0, MERGE0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK,
+ MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DITHER0, DP_INTF0,
+ MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
+ MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8188_VDO0_DISP_RDMA_SEL, GENMASK(1, 0),
+ MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DSI0),
+ MMSYS_ROUTE(DITHER0, MERGE0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_VPP_MERGE0),
+ MMSYS_ROUTE(DITHER0, DP_INTF0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DPI0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, WDMA0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0),
+ MMSYS_ROUTE(MERGE0, DSC0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE),
};
static const struct mtk_mmsys_routes mmsys_mt8188_vdo1_routing_table[] = {
- {
- DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1,
- MT8188_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
- MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0
- }, {
- DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1,
- MT8188_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
- MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1
- }, {
- DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2,
- MT8188_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
- MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8188_SOUT_TO_MIXER_IN1_SEL
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8188_SOUT_TO_MIXER_IN2_SEL
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8188_SOUT_TO_MIXER_IN3_SEL
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8188_SOUT_TO_MIXER_IN4_SEL
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8188_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
- MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
- MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
- MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
- MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
- MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8188_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
- MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8188_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
- MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8188_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
- MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
- MT8188_MERGE4_SOUT_TO_DPI1_SEL
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8188_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
- MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
- MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL
- }
+ MMSYS_ROUTE(MDP_RDMA0, MERGE1,
+ MT8188_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0),
+ MMSYS_ROUTE(MDP_RDMA1, MERGE1,
+ MT8188_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1),
+ MMSYS_ROUTE(MDP_RDMA2, MERGE2,
+ MT8188_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN1_SEL),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN2_SEL),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN3_SEL),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN4_SEL),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8188_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
+ MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8188_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
+ MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8188_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
+ MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8188_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
+ MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
+ MT8188_MERGE4_SOUT_TO_DPI1_SEL),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8188_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
+ MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
+ MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL),
};
#endif /* __SOC_MEDIATEK_MT8188_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8192-mmsys.h b/drivers/soc/mediatek/mt8192-mmsys.h
index a016d80b4bc1..7cafa2455fd0 100644
--- a/drivers/soc/mediatek/mt8192-mmsys.h
+++ b/drivers/soc/mediatek/mt8192-mmsys.h
@@ -31,47 +31,36 @@
#define MT8192_DSI0_SEL_IN_DITHER0 0x1
static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = {
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8192_DISP_OVL0_2L_MOUT_EN, MT8192_OVL0_MOUT_EN_DISP_RDMA0,
- MT8192_OVL0_MOUT_EN_DISP_RDMA0
- }, {
- DDP_COMPONENT_OVL_2L2, DDP_COMPONENT_RDMA4,
- MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
- MT8192_OVL2_2L_MOUT_EN_RDMA4
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
- MT8192_DITHER0_MOUT_IN_DSI0
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8192_DISP_RDMA0_SEL_IN, MT8192_RDMA0_SEL_IN_OVL0_2L,
- MT8192_RDMA0_SEL_IN_OVL0_2L
- }, {
- DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
- MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
- MT8192_AAL0_SEL_IN_CCORR0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
- MT8192_DSI0_SEL_IN_DITHER0
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
- MT8192_RDMA0_SOUT_COLOR0
- }, {
- DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
- MT8192_DISP_CCORR0_SOUT_SEL, MT8192_CCORR0_SOUT_AAL0,
- MT8192_CCORR0_SOUT_AAL0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
- MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_GO_BG,
- MT8192_DISP_OVL0_GO_BG
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_2L_GO_BLEND,
- MT8192_DISP_OVL0_2L_GO_BLEND
- }
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8192_DISP_OVL0_2L_MOUT_EN, MT8192_OVL0_MOUT_EN_DISP_RDMA0,
+ MT8192_OVL0_MOUT_EN_DISP_RDMA0),
+ MMSYS_ROUTE(OVL_2L2, RDMA4,
+ MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
+ MT8192_OVL2_2L_MOUT_EN_RDMA4),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
+ MT8192_DITHER0_MOUT_IN_DSI0),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8192_DISP_RDMA0_SEL_IN, MT8192_RDMA0_SEL_IN_OVL0_2L,
+ MT8192_RDMA0_SEL_IN_OVL0_2L),
+ MMSYS_ROUTE(CCORR, AAL0,
+ MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
+ MT8192_AAL0_SEL_IN_CCORR0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
+ MT8192_DSI0_SEL_IN_DITHER0),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
+ MT8192_RDMA0_SOUT_COLOR0),
+ MMSYS_ROUTE(CCORR, AAL0,
+ MT8192_DISP_CCORR0_SOUT_SEL, MT8192_CCORR0_SOUT_AAL0,
+ MT8192_CCORR0_SOUT_AAL0),
+ MMSYS_ROUTE(OVL0, OVL_2L0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_GO_BG,
+ MT8192_DISP_OVL0_GO_BG),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_2L_GO_BLEND,
+ MT8192_DISP_OVL0_2L_GO_BLEND),
};
#endif /* __SOC_MEDIATEK_MT8192_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8195-mmsys.h b/drivers/soc/mediatek/mt8195-mmsys.h
index 9be2df2832a4..f69929a2a4d4 100644
--- a/drivers/soc/mediatek/mt8195-mmsys.h
+++ b/drivers/soc/mediatek/mt8195-mmsys.h
@@ -160,370 +160,278 @@
#define MT8195_SVPP3_MDP_RSZ BIT(5)
static const struct mtk_mmsys_routes mmsys_mt8195_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0,
- MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0,
- MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL1,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1,
- MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_RDMA1,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1,
- MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1,
- MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_OVL0,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0,
- MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
- MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
- MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
- MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
- MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
- MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
- MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
- MT8195_SEL_IN_DSI1_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
- MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
- MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA0_FROM_MASK,
- MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
- MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
- MT8195_SOUT_DISP_DITHER0_TO_DSI0
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
- MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
- MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DSI1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE
- }
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0),
+ MMSYS_ROUTE(OVL0, WDMA0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0),
+ MMSYS_ROUTE(OVL0, OVL1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1),
+ MMSYS_ROUTE(OVL1, RDMA1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1),
+ MMSYS_ROUTE(OVL1, WDMA1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1),
+ MMSYS_ROUTE(OVL1, OVL0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DITHER1, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(MERGE5, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(MERGE0, DSC0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DITHER1, DSC1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(MERGE0, DSC1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE0, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE0, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE0, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DSC1, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DSC1, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DSC1, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DSC0, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DSC0, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DSC0, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DSC1, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE5, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
+ MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
+ MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DSC1, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
+ MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(MERGE0, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
+ MT8195_SEL_IN_DSI1_FROM_VPP_MERGE),
+ MMSYS_ROUTE(OVL1, WDMA1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1),
+ MMSYS_ROUTE(MERGE0, WDMA1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DSC1, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DITHER1, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(DITHER1, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(DITHER1, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(DITHER1, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(OVL0, WDMA0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA0_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
+ MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
+ MT8195_SOUT_DISP_DITHER0_TO_DSI0),
+ MMSYS_ROUTE(DITHER1, DSC1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DITHER1, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE),
+ MMSYS_ROUTE(DITHER1, DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(MERGE5, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
+ MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE),
+ MMSYS_ROUTE(MERGE5, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
+ MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSI1),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, WDMA1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1),
+ MMSYS_ROUTE(MERGE0, DSC0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(MERGE0, DSC1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0),
+ MMSYS_ROUTE(DSC0, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE),
+ MMSYS_ROUTE(DSC1, DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1),
+ MMSYS_ROUTE(DSC1, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0),
+ MMSYS_ROUTE(DSC1, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(DSC1, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(DSC1, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(DSC1, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE),
};
static const struct mtk_mmsys_routes mmsys_mt8195_vdo1_routing_table[] = {
- {
- DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1,
- MT8195_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
- MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0
- }, {
- DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1,
- MT8195_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
- MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1
- }, {
- DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2,
- MT8195_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
- MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN1_SEL
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN2_SEL
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN3_SEL
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN4_SEL
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8195_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
- MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8195_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
- MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8195_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
- MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8195_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
- MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
- MT8195_MERGE4_SOUT_TO_DPI1_SEL
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
- MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
- MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL
- }
+ MMSYS_ROUTE(MDP_RDMA0, MERGE1,
+ MT8195_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0),
+ MMSYS_ROUTE(MDP_RDMA1, MERGE1,
+ MT8195_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1),
+ MMSYS_ROUTE(MDP_RDMA2, MERGE2,
+ MT8195_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN1_SEL),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN2_SEL),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN3_SEL),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN4_SEL),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8195_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
+ MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8195_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
+ MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8195_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
+ MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8195_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
+ MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
+ MT8195_MERGE4_SOUT_TO_DPI1_SEL),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8195_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
+ MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
+ MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL),
};
#endif /* __SOC_MEDIATEK_MT8195_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h
index 7abaf048d91e..533a3fd0923b 100644
--- a/drivers/soc/mediatek/mt8365-mmsys.h
+++ b/drivers/soc/mediatek/mt8365-mmsys.h
@@ -14,8 +14,9 @@
#define MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN 0xfd8
#define MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00 0xfdc
+#define MT8365_DISP_MS_IN_OUT_MASK GENMASK(3, 0)
#define MT8365_RDMA0_SOUT_COLOR0 0x1
-#define MT8365_DITHER_MOUT_EN_DSI0 0x1
+#define MT8365_DITHER_MOUT_EN_DSI0 BIT(0)
#define MT8365_DSI0_SEL_IN_DITHER 0x1
#define MT8365_RDMA0_SEL_IN_OVL0 0x0
#define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 0x0
@@ -27,56 +28,37 @@
#define MT8365_DPI0_SEL_IN_RDMA1 0x0
static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
- MT8365_OVL0_MOUT_PATH0_SEL, MT8365_OVL0_MOUT_PATH0_SEL
- },
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN,
- MT8365_RDMA0_SEL_IN_OVL0, MT8365_RDMA0_SEL_IN_OVL0
- },
- {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL,
- MT8365_RDMA0_SOUT_COLOR0, MT8365_RDMA0_SOUT_COLOR0
- },
- {
- DDP_COMPONENT_COLOR0, DDP_COMPONENT_CCORR,
- MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
- MT8365_DISP_COLOR_SEL_IN_COLOR0,MT8365_DISP_COLOR_SEL_IN_COLOR0
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN,
- MT8365_DITHER_MOUT_EN_DSI0, MT8365_DITHER_MOUT_EN_DSI0
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN,
- MT8365_DSI0_SEL_IN_DITHER, MT8365_DSI0_SEL_IN_DITHER
- },
- {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
- MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00,
- MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK, MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN,
- MT8365_DPI0_SEL_IN_RDMA1, MT8365_DPI0_SEL_IN_RDMA1
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL,
- MT8365_RDMA1_SOUT_DPI0, MT8365_RDMA1_SOUT_DPI0
- },
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_OVL0_MOUT_PATH0_SEL),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_SEL_IN_OVL0),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_SOUT_COLOR0),
+ MMSYS_ROUTE(COLOR0, CCORR,
+ MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DISP_COLOR_SEL_IN_COLOR0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DITHER_MOUT_EN_DSI0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DSI0_SEL_IN_DITHER),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00,
+ MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK,
+ MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DPI0_SEL_IN_RDMA1),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA1_SOUT_DPI0),
};
#endif /* __SOC_MEDIATEK_MT8365_MMSYS_H */
diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
index d370192737ca..fe628d5f5198 100644
--- a/drivers/soc/mediatek/mtk-mmsys.h
+++ b/drivers/soc/mediatek/mtk-mmsys.h
@@ -80,6 +80,20 @@
#define MMSYS_RST_NR(bank, bit) (((bank) * 32) + (bit))
+/*
+ * This macro adds a compile time check to make sure that the in/out
+ * selection bit(s) fit in the register mask, similar to bitfield
+ * macros, but this does not transform the value.
+ */
+#define MMSYS_ROUTE(from, to, reg_addr, reg_mask, selection) \
+ { DDP_COMPONENT_##from, DDP_COMPONENT_##to, reg_addr, reg_mask, \
+ (__BUILD_BUG_ON_ZERO_MSG((reg_mask) == 0, "Invalid mask") + \
+ __BUILD_BUG_ON_ZERO_MSG(~(reg_mask) & (selection), \
+ #selection " does not fit in " \
+ #reg_mask) + \
+ (selection)) \
+ }
+
struct mtk_mmsys_routes {
u32 from_comp;
u32 to_comp;
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 5250c1d702eb..aaa965d4b050 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -155,6 +155,7 @@
#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE3 23
#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE4 24
#define MT8188_MUTEX_MOD_DISP1_DISP_MIXER 30
+#define MT8188_MUTEX_MOD_DISP1_DPI1 38
#define MT8188_MUTEX_MOD_DISP1_DP_INTF1 39
#define MT8195_MUTEX_MOD_DISP_OVL0 0
@@ -289,6 +290,7 @@
#define MT8188_MUTEX_SOF_DSI0 1
#define MT8188_MUTEX_SOF_DP_INTF0 3
#define MT8188_MUTEX_SOF_DP_INTF1 4
+#define MT8188_MUTEX_SOF_DPI1 5
#define MT8195_MUTEX_SOF_DSI0 1
#define MT8195_MUTEX_SOF_DSI1 2
#define MT8195_MUTEX_SOF_DP_INTF0 3
@@ -301,6 +303,7 @@
#define MT8188_MUTEX_EOF_DSI0 (MT8188_MUTEX_SOF_DSI0 << 7)
#define MT8188_MUTEX_EOF_DP_INTF0 (MT8188_MUTEX_SOF_DP_INTF0 << 7)
#define MT8188_MUTEX_EOF_DP_INTF1 (MT8188_MUTEX_SOF_DP_INTF1 << 7)
+#define MT8188_MUTEX_EOF_DPI1 (MT8188_MUTEX_SOF_DPI1 << 7)
#define MT8195_MUTEX_EOF_DSI0 (MT8195_MUTEX_SOF_DSI0 << 7)
#define MT8195_MUTEX_EOF_DSI1 (MT8195_MUTEX_SOF_DSI1 << 7)
#define MT8195_MUTEX_EOF_DP_INTF0 (MT8195_MUTEX_SOF_DP_INTF0 << 7)
@@ -472,6 +475,7 @@ static const u8 mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_PWM0] = MT8188_MUTEX_MOD2_DISP_PWM0,
[DDP_COMPONENT_DP_INTF0] = MT8188_MUTEX_MOD_DISP_DP_INTF0,
[DDP_COMPONENT_DP_INTF1] = MT8188_MUTEX_MOD_DISP1_DP_INTF1,
+ [DDP_COMPONENT_DPI1] = MT8188_MUTEX_MOD_DISP1_DPI1,
[DDP_COMPONENT_ETHDR_MIXER] = MT8188_MUTEX_MOD_DISP1_DISP_MIXER,
[DDP_COMPONENT_MDP_RDMA0] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA0,
[DDP_COMPONENT_MDP_RDMA1] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA1,
@@ -686,6 +690,8 @@ static const u16 mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] =
MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0,
+ [MUTEX_SOF_DPI1] =
+ MT8188_MUTEX_SOF_DPI1 | MT8188_MUTEX_EOF_DPI1,
[MUTEX_SOF_DP_INTF0] =
MT8188_MUTEX_SOF_DP_INTF0 | MT8188_MUTEX_EOF_DP_INTF0,
[MUTEX_SOF_DP_INTF1] =
diff --git a/drivers/soc/mediatek/mtk-socinfo.c b/drivers/soc/mediatek/mtk-socinfo.c
index 123b12cd2543..c697a0398d91 100644
--- a/drivers/soc/mediatek/mtk-socinfo.c
+++ b/drivers/soc/mediatek/mtk-socinfo.c
@@ -56,29 +56,39 @@ static struct socinfo_data socinfo_data_table[] = {
MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EHZA", "Kompanio 1200", 0x81950304, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EZA", "Kompanio 1380", 0x81950400, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EHZA", "Kompanio 1380", 0x81950404, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8370", "MT8370AV/AZA", "Genio 510", 0x83700000, 0x00000081),
+ MTK_SOCINFO_ENTRY("MT8390", "MT8390AV/AZA", "Genio 700", 0x83900000, 0x00000080),
MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950100, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950800, CELL_NOT_USED),
};
static int mtk_socinfo_create_socinfo_node(struct mtk_socinfo *mtk_socinfop)
{
struct soc_device_attribute *attrs;
- static char machine[30] = {0};
+ struct socinfo_data *data = mtk_socinfop->socinfo_data;
static const char *soc_manufacturer = "MediaTek";
attrs = devm_kzalloc(mtk_socinfop->dev, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return -ENOMEM;
- snprintf(machine, sizeof(machine), "%s (%s)", mtk_socinfop->socinfo_data->marketing_name,
- mtk_socinfop->socinfo_data->soc_name);
- attrs->family = soc_manufacturer;
- attrs->machine = machine;
+ if (data->marketing_name != NULL && data->marketing_name[0] != '\0')
+ attrs->family = devm_kasprintf(mtk_socinfop->dev, GFP_KERNEL, "MediaTek %s",
+ data->marketing_name);
+ else
+ attrs->family = soc_manufacturer;
+
+ attrs->soc_id = data->soc_name;
+ /*
+ * The "machine" field will be populated automatically with the model
+ * name from board DTS (if available).
+ **/
mtk_socinfop->soc_dev = soc_device_register(attrs);
if (IS_ERR(mtk_socinfop->soc_dev))
return PTR_ERR(mtk_socinfop->soc_dev);
- dev_info(mtk_socinfop->dev, "%s %s SoC detected.\n", soc_manufacturer, attrs->machine);
+ dev_info(mtk_socinfop->dev, "%s (%s) SoC detected.\n", attrs->family, attrs->soc_id);
return 0;
}
diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c
index 393d2d1d275f..2310afa77b76 100644
--- a/drivers/soc/qcom/ice.c
+++ b/drivers/soc/qcom/ice.c
@@ -11,6 +11,7 @@
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -261,7 +262,7 @@ static struct qcom_ice *qcom_ice_create(struct device *dev,
* Return: ICE pointer on success, NULL if there is no ICE data provided by the
* consumer or ERR_PTR() on error.
*/
-struct qcom_ice *of_qcom_ice_get(struct device *dev)
+static struct qcom_ice *of_qcom_ice_get(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct qcom_ice *ice;
@@ -322,7 +323,53 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
return ice;
}
-EXPORT_SYMBOL_GPL(of_qcom_ice_get);
+
+static void qcom_ice_put(const struct qcom_ice *ice)
+{
+ struct platform_device *pdev = to_platform_device(ice->dev);
+
+ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"))
+ platform_device_put(pdev);
+}
+
+static void devm_of_qcom_ice_put(struct device *dev, void *res)
+{
+ qcom_ice_put(*(struct qcom_ice **)res);
+}
+
+/**
+ * devm_of_qcom_ice_get() - Devres managed helper to get an ICE instance from
+ * a DT node.
+ * @dev: device pointer for the consumer device.
+ *
+ * This function will provide an ICE instance either by creating one for the
+ * consumer device if its DT node provides the 'ice' reg range and the 'ice'
+ * clock (for legacy DT style). On the other hand, if consumer provides a
+ * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
+ * be created and so this function will return that instead.
+ *
+ * Return: ICE pointer on success, NULL if there is no ICE data provided by the
+ * consumer or ERR_PTR() on error.
+ */
+struct qcom_ice *devm_of_qcom_ice_get(struct device *dev)
+{
+ struct qcom_ice *ice, **dr;
+
+ dr = devres_alloc(devm_of_qcom_ice_put, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ ice = of_qcom_ice_get(dev);
+ if (!IS_ERR_OR_NULL(ice)) {
+ *dr = ice;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return ice;
+}
+EXPORT_SYMBOL_GPL(devm_of_qcom_ice_get);
static int qcom_ice_probe(struct platform_device *pdev)
{
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index 328b6153b2be..71be378d2e43 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -75,7 +75,6 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
{
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
locator_hdl);
- struct pdr_service *pds;
mutex_lock(&pdr->lock);
/* Create a local client port for QMI communication */
@@ -87,12 +86,7 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
mutex_unlock(&pdr->lock);
/* Service pending lookup requests */
- mutex_lock(&pdr->list_lock);
- list_for_each_entry(pds, &pdr->lookups, node) {
- if (pds->need_locator_lookup)
- schedule_work(&pdr->locator_work);
- }
- mutex_unlock(&pdr->list_lock);
+ schedule_work(&pdr->locator_work);
return 0;
}
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
index 8d17f7fb79e7..039508c1bbf7 100644
--- a/drivers/soc/qcom/pdr_internal.h
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -91,7 +91,6 @@ struct servreg_loc_pfr_resp {
struct qmi_response_type_v01 rsp;
};
-extern const struct qmi_elem_info servreg_location_entry_ei[];
extern const struct qmi_elem_info servreg_get_domain_list_req_ei[];
extern const struct qmi_elem_info servreg_get_domain_list_resp_ei[];
extern const struct qmi_elem_info servreg_register_listener_req_ei[];
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index 052c292eeda6..cde19cdfd3c7 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -233,7 +233,7 @@ static void pmic_glink_pdr_callback(int state, char *svc_path, void *priv)
static int pmic_glink_rpmsg_probe(struct rpmsg_device *rpdev)
{
- struct pmic_glink *pg = __pmic_glink;
+ struct pmic_glink *pg;
guard(mutex)(&__pmic_glink_lock);
pg = __pmic_glink;
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 0320ad3b9148..a543ab9bee6c 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/thermal.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/soc/qcom/qcom_aoss.h>
#define CREATE_TRACE_POINTS
@@ -358,7 +359,7 @@ static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
return 0;
ret = qmp_send(qmp_cdev->qmp, "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
- qmp_cdev->name, cdev_state ? "on" : "off");
+ qmp_cdev->name, str_on_off(cdev_state));
if (!ret)
qmp_cdev->state = cdev_state;
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index 154ca5beb471..1d1c438be3e7 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -429,6 +429,16 @@ static const struct qcom_pdm_domain_data *sc8280xp_domains[] = {
NULL,
};
+/* Unlike SDM660, SDM630/636 lack CDSP */
+static const struct qcom_pdm_domain_data *sdm630_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
static const struct qcom_pdm_domain_data *sdm660_domains[] = {
&adsp_audio_pd,
&adsp_root_pd,
@@ -546,6 +556,8 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sc7280", .data = sc7280_domains, },
{ .compatible = "qcom,sc8180x", .data = sc8180x_domains, },
{ .compatible = "qcom,sc8280xp", .data = sc8280xp_domains, },
+ { .compatible = "qcom,sdm630", .data = sdm630_domains, },
+ { .compatible = "qcom,sdm636", .data = sdm630_domains, },
{ .compatible = "qcom,sda660", .data = sdm660_domains, },
{ .compatible = "qcom,sdm660", .data = sdm660_domains, },
{ .compatible = "qcom,sdm670", .data = sdm670_domains, },
diff --git a/drivers/soc/qcom/qcom_pdr_msg.c b/drivers/soc/qcom/qcom_pdr_msg.c
index bf3e4a47165e..ca98932140d8 100644
--- a/drivers/soc/qcom/qcom_pdr_msg.c
+++ b/drivers/soc/qcom/qcom_pdr_msg.c
@@ -8,7 +8,7 @@
#include "pdr_internal.h"
-const struct qmi_elem_info servreg_location_entry_ei[] = {
+static const struct qmi_elem_info servreg_location_entry_ei[] = {
{
.data_type = QMI_STRING,
.elem_len = SERVREG_NAME_LENGTH + 1,
@@ -47,7 +47,6 @@ const struct qmi_elem_info servreg_location_entry_ei[] = {
},
{}
};
-EXPORT_SYMBOL_GPL(servreg_location_entry_ei);
const struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
{
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 6d2e135eed89..49648cf28bd2 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -334,6 +334,7 @@ config ARCH_R9A07G054
config ARCH_R9A08G045
bool "ARM64 Platform support for RZ/G3S"
select ARCH_RZG2L
+ select SYSC_R9A08G045
help
This enables support for the Renesas RZ/G3S SoC variants.
@@ -347,12 +348,14 @@ config ARCH_R9A09G011
config ARCH_R9A09G047
bool "ARM64 Platform support for RZ/G3E"
+ select SYS_R9A09G047
help
This enables support for the Renesas RZ/G3E SoC variants.
config ARCH_R9A09G057
bool "ARM64 Platform support for RZ/V2H(P)"
select RENESAS_RZV2H_ICU
+ select SYS_R9A09G057
help
This enables support for the Renesas RZ/V2H(P) SoC variants.
@@ -383,4 +386,19 @@ config PWC_RZV2M
config RST_RCAR
bool "Reset Controller support for R-Car" if COMPILE_TEST
+config SYSC_RZ
+ bool "System controller for RZ SoCs" if COMPILE_TEST
+
+config SYSC_R9A08G045
+ bool "Renesas RZ/G3S System controller support" if COMPILE_TEST
+ select SYSC_RZ
+
+config SYS_R9A09G047
+ bool "Renesas RZ/G3E System controller support" if COMPILE_TEST
+ select SYSC_RZ
+
+config SYS_R9A09G057
+ bool "Renesas RZ/V2H System controller support" if COMPILE_TEST
+ select SYSC_RZ
+
endif # SOC_RENESAS
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 734f8f8cefa4..81d4c5726e4c 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -6,7 +6,11 @@ obj-$(CONFIG_SOC_RENESAS) += renesas-soc.o
ifdef CONFIG_SMP
obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
+obj-$(CONFIG_SYSC_R9A08G045) += r9a08g045-sysc.o
+obj-$(CONFIG_SYS_R9A09G047) += r9a09g047-sys.o
+obj-$(CONFIG_SYS_R9A09G057) += r9a09g057-sys.o
# Family
obj-$(CONFIG_PWC_RZV2M) += pwc-rzv2m.o
obj-$(CONFIG_RST_RCAR) += rcar-rst.o
+obj-$(CONFIG_SYSC_RZ) += rz-sysc.o
diff --git a/drivers/soc/renesas/r9a08g045-sysc.c b/drivers/soc/renesas/r9a08g045-sysc.c
new file mode 100644
index 000000000000..f4db1431e036
--- /dev/null
+++ b/drivers/soc/renesas/r9a08g045-sysc.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/G3S System controller driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/init.h>
+
+#include "rz-sysc.h"
+
+static const struct rz_sysc_soc_id_init_data rzg3s_sysc_soc_id_init_data __initconst = {
+ .family = "RZ/G3S",
+ .id = 0x85e0447,
+ .devid_offset = 0xa04,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+};
+
+const struct rz_sysc_init_data rzg3s_sysc_init_data __initconst = {
+ .soc_id_init_data = &rzg3s_sysc_soc_id_init_data,
+};
diff --git a/drivers/soc/renesas/r9a09g047-sys.c b/drivers/soc/renesas/r9a09g047-sys.c
new file mode 100644
index 000000000000..cd2eb7782cfe
--- /dev/null
+++ b/drivers/soc/renesas/r9a09g047-sys.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/G3E System controller (SYS) driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include "rz-sysc.h"
+
+/* Register Offsets */
+#define SYS_LSI_MODE 0x300
+/*
+ * BOOTPLLCA[1:0]
+ * [0,0] => 1.1GHZ
+ * [0,1] => 1.5GHZ
+ * [1,0] => 1.6GHZ
+ * [1,1] => 1.7GHZ
+ */
+#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11)
+#define SYS_LSI_MODE_CA55_1_7GHZ 0x3
+
+#define SYS_LSI_PRR 0x308
+#define SYS_LSI_PRR_CA55_DIS BIT(8)
+#define SYS_LSI_PRR_NPU_DIS BIT(1)
+
+static void rzg3e_sys_print_id(struct device *dev,
+ void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ bool is_quad_core, npu_enabled;
+ u32 prr_val, mode_val;
+
+ prr_val = readl(sysc_base + SYS_LSI_PRR);
+ mode_val = readl(sysc_base + SYS_LSI_MODE);
+
+ /* Check CPU and NPU configuration */
+ is_quad_core = !(prr_val & SYS_LSI_PRR_CA55_DIS);
+ npu_enabled = !(prr_val & SYS_LSI_PRR_NPU_DIS);
+
+ dev_info(dev, "Detected Renesas %s Core %s %s Rev %s%s\n",
+ is_quad_core ? "Quad" : "Dual", soc_dev_attr->family,
+ soc_dev_attr->soc_id, soc_dev_attr->revision,
+ npu_enabled ? " with Ethos-U55" : "");
+
+ /* Check CA55 PLL configuration */
+ if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ)
+ dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n");
+}
+
+static const struct rz_sysc_soc_id_init_data rzg3e_sys_soc_id_init_data __initconst = {
+ .family = "RZ/G3E",
+ .id = 0x8679447,
+ .devid_offset = 0x304,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+ .print_id = rzg3e_sys_print_id,
+};
+
+const struct rz_sysc_init_data rzg3e_sys_init_data = {
+ .soc_id_init_data = &rzg3e_sys_soc_id_init_data,
+};
diff --git a/drivers/soc/renesas/r9a09g057-sys.c b/drivers/soc/renesas/r9a09g057-sys.c
new file mode 100644
index 000000000000..4c21cc29edbc
--- /dev/null
+++ b/drivers/soc/renesas/r9a09g057-sys.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/V2H System controller (SYS) driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include "rz-sysc.h"
+
+/* Register Offsets */
+#define SYS_LSI_MODE 0x300
+/*
+ * BOOTPLLCA[1:0]
+ * [0,0] => 1.1GHZ
+ * [0,1] => 1.5GHZ
+ * [1,0] => 1.6GHZ
+ * [1,1] => 1.7GHZ
+ */
+#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11)
+#define SYS_LSI_MODE_CA55_1_7GHZ 0x3
+
+#define SYS_LSI_PRR 0x308
+#define SYS_LSI_PRR_GPU_DIS BIT(0)
+#define SYS_LSI_PRR_ISP_DIS BIT(4)
+
+static void rzv2h_sys_print_id(struct device *dev,
+ void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ bool gpu_enabled, isp_enabled;
+ u32 prr_val, mode_val;
+
+ prr_val = readl(sysc_base + SYS_LSI_PRR);
+ mode_val = readl(sysc_base + SYS_LSI_MODE);
+
+ /* Check GPU and ISP configuration */
+ gpu_enabled = !(prr_val & SYS_LSI_PRR_GPU_DIS);
+ isp_enabled = !(prr_val & SYS_LSI_PRR_ISP_DIS);
+
+ dev_info(dev, "Detected Renesas %s %s Rev %s%s%s\n",
+ soc_dev_attr->family, soc_dev_attr->soc_id, soc_dev_attr->revision,
+ gpu_enabled ? " with GE3D (Mali-G31)" : "",
+ isp_enabled ? " with ISP (Mali-C55)" : "");
+
+ /* Check CA55 PLL configuration */
+ if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ)
+ dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n");
+}
+
+static const struct rz_sysc_soc_id_init_data rzv2h_sys_soc_id_init_data __initconst = {
+ .family = "RZ/V2H",
+ .id = 0x847a447,
+ .devid_offset = 0x304,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+ .print_id = rzv2h_sys_print_id,
+};
+
+const struct rz_sysc_init_data rzv2h_sys_init_data = {
+ .soc_id_init_data = &rzv2h_sys_soc_id_init_data,
+};
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 172d59e6fbcf..df2b38417b80 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -71,14 +71,6 @@ static const struct renesas_family fam_rzg2ul __initconst __maybe_unused = {
.name = "RZ/G2UL",
};
-static const struct renesas_family fam_rzg3s __initconst __maybe_unused = {
- .name = "RZ/G3S",
-};
-
-static const struct renesas_family fam_rzv2h __initconst __maybe_unused = {
- .name = "RZ/V2H",
-};
-
static const struct renesas_family fam_rzv2l __initconst __maybe_unused = {
.name = "RZ/V2L",
};
@@ -176,16 +168,6 @@ static const struct renesas_soc soc_rz_g2ul __initconst __maybe_unused = {
.id = 0x8450447,
};
-static const struct renesas_soc soc_rz_g3s __initconst __maybe_unused = {
- .family = &fam_rzg3s,
- .id = 0x85e0447,
-};
-
-static const struct renesas_soc soc_rz_v2h __initconst __maybe_unused = {
- .family = &fam_rzv2h,
- .id = 0x847a447,
-};
-
static const struct renesas_soc soc_rz_v2l __initconst __maybe_unused = {
.family = &fam_rzv2l,
.id = 0x8447447,
@@ -289,7 +271,6 @@ static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.id = 0x37,
};
-
static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R7S72100
{ .compatible = "renesas,r7s72100", .data = &soc_rz_a1h },
@@ -410,15 +391,9 @@ static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R9A07G054
{ .compatible = "renesas,r9a07g054", .data = &soc_rz_v2l },
#endif
-#ifdef CONFIG_ARCH_R9A08G045
- { .compatible = "renesas,r9a08g045", .data = &soc_rz_g3s },
-#endif
#ifdef CONFIG_ARCH_R9A09G011
{ .compatible = "renesas,r9a09g011", .data = &soc_rz_v2m },
#endif
-#ifdef CONFIG_ARCH_R9A09G057
- { .compatible = "renesas,r9a09g057", .data = &soc_rz_v2h },
-#endif
#ifdef CONFIG_ARCH_SH73A0
{ .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
#endif
@@ -444,11 +419,6 @@ static const struct renesas_id id_rzg2l __initconst = {
.mask = 0xfffffff,
};
-static const struct renesas_id id_rzv2h __initconst = {
- .offset = 0x304,
- .mask = 0xfffffff,
-};
-
static const struct renesas_id id_rzv2m __initconst = {
.offset = 0x104,
.mask = 0xff,
@@ -466,7 +436,6 @@ static const struct of_device_id renesas_ids[] __initconst = {
{ .compatible = "renesas,r9a07g054-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a08g045-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a09g011-sys", .data = &id_rzv2m },
- { .compatible = "renesas,r9a09g057-sys", .data = &id_rzv2h },
{ .compatible = "renesas,prr", .data = &id_prr },
{ /* sentinel */ }
};
@@ -531,7 +500,7 @@ static int __init renesas_soc_init(void)
eslo = product & 0xf;
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u",
eshi, eslo);
- } else if (id == &id_rzg2l || id == &id_rzv2h) {
+ } else if (id == &id_rzg2l) {
eshi = ((product >> 28) & 0x0f);
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u",
eshi);
diff --git a/drivers/soc/renesas/rz-sysc.c b/drivers/soc/renesas/rz-sysc.c
new file mode 100644
index 000000000000..14db508f669f
--- /dev/null
+++ b/drivers/soc/renesas/rz-sysc.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ System controller driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
+
+#include "rz-sysc.h"
+
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+
+/**
+ * struct rz_sysc - RZ SYSC private data structure
+ * @base: SYSC base address
+ * @dev: SYSC device pointer
+ */
+struct rz_sysc {
+ void __iomem *base;
+ struct device *dev;
+};
+
+static int rz_sysc_soc_init(struct rz_sysc *sysc, const struct of_device_id *match)
+{
+ const struct rz_sysc_init_data *sysc_data = match->data;
+ const struct rz_sysc_soc_id_init_data *soc_data = sysc_data->soc_id_init_data;
+ struct soc_device_attribute *soc_dev_attr;
+ const char *soc_id_start, *soc_id_end;
+ u32 val, revision, specific_id;
+ struct soc_device *soc_dev;
+ char soc_id[32] = {0};
+ size_t size;
+
+ soc_id_start = strchr(match->compatible, ',') + 1;
+ soc_id_end = strchr(match->compatible, '-');
+ size = soc_id_end - soc_id_start + 1;
+ if (size > 32)
+ size = sizeof(soc_id);
+ strscpy(soc_id, soc_id_start, size);
+
+ soc_dev_attr = devm_kzalloc(sysc->dev, sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = devm_kstrdup(sysc->dev, soc_data->family, GFP_KERNEL);
+ if (!soc_dev_attr->family)
+ return -ENOMEM;
+
+ soc_dev_attr->soc_id = devm_kstrdup(sysc->dev, soc_id, GFP_KERNEL);
+ if (!soc_dev_attr->soc_id)
+ return -ENOMEM;
+
+ val = readl(sysc->base + soc_data->devid_offset);
+ revision = field_get(soc_data->revision_mask, val);
+ specific_id = field_get(soc_data->specific_id_mask, val);
+ soc_dev_attr->revision = devm_kasprintf(sysc->dev, GFP_KERNEL, "%u", revision);
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
+
+ if (soc_data->id && specific_id != soc_data->id) {
+ dev_warn(sysc->dev, "SoC mismatch (product = 0x%x)\n", specific_id);
+ return -ENODEV;
+ }
+
+ /* Try to call SoC-specific device identification */
+ if (soc_data->print_id) {
+ soc_data->print_id(sysc->dev, sysc->base, soc_dev_attr);
+ } else {
+ dev_info(sysc->dev, "Detected Renesas %s %s Rev %s\n",
+ soc_dev_attr->family, soc_dev_attr->soc_id, soc_dev_attr->revision);
+ }
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ return 0;
+}
+
+static const struct of_device_id rz_sysc_match[] = {
+#ifdef CONFIG_SYSC_R9A08G045
+ { .compatible = "renesas,r9a08g045-sysc", .data = &rzg3s_sysc_init_data },
+#endif
+#ifdef CONFIG_SYS_R9A09G047
+ { .compatible = "renesas,r9a09g047-sys", .data = &rzg3e_sys_init_data },
+#endif
+#ifdef CONFIG_SYS_R9A09G057
+ { .compatible = "renesas,r9a09g057-sys", .data = &rzv2h_sys_init_data },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, rz_sysc_match);
+
+static int rz_sysc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct rz_sysc *sysc;
+
+ match = of_match_node(rz_sysc_match, dev->of_node);
+ if (!match)
+ return -ENODEV;
+
+ sysc = devm_kzalloc(dev, sizeof(*sysc), GFP_KERNEL);
+ if (!sysc)
+ return -ENOMEM;
+
+ sysc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sysc->base))
+ return PTR_ERR(sysc->base);
+
+ sysc->dev = dev;
+ return rz_sysc_soc_init(sysc, match);
+}
+
+static struct platform_driver rz_sysc_driver = {
+ .driver = {
+ .name = "renesas-rz-sysc",
+ .suppress_bind_attrs = true,
+ .of_match_table = rz_sysc_match
+ },
+ .probe = rz_sysc_probe
+};
+
+static int __init rz_sysc_init(void)
+{
+ return platform_driver_register(&rz_sysc_driver);
+}
+subsys_initcall(rz_sysc_init);
+
+MODULE_DESCRIPTION("Renesas RZ System Controller Driver");
+MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/renesas/rz-sysc.h b/drivers/soc/renesas/rz-sysc.h
new file mode 100644
index 000000000000..aa83948c5117
--- /dev/null
+++ b/drivers/soc/renesas/rz-sysc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RZ System Controller
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#ifndef __SOC_RENESAS_RZ_SYSC_H__
+#define __SOC_RENESAS_RZ_SYSC_H__
+
+#include <linux/device.h>
+#include <linux/sys_soc.h>
+#include <linux/types.h>
+
+/**
+ * struct rz_syc_soc_id_init_data - RZ SYSC SoC identification initialization data
+ * @family: RZ SoC family
+ * @id: RZ SoC expected ID
+ * @devid_offset: SYSC SoC ID register offset
+ * @revision_mask: SYSC SoC ID revision mask
+ * @specific_id_mask: SYSC SoC ID specific ID mask
+ * @print_id: print SoC-specific extended device identification
+ */
+struct rz_sysc_soc_id_init_data {
+ const char * const family;
+ u32 id;
+ u32 devid_offset;
+ u32 revision_mask;
+ u32 specific_id_mask;
+ void (*print_id)(struct device *dev, void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr);
+};
+
+/**
+ * struct rz_sysc_init_data - RZ SYSC initialization data
+ * @soc_id_init_data: RZ SYSC SoC ID initialization data
+ */
+struct rz_sysc_init_data {
+ const struct rz_sysc_soc_id_init_data *soc_id_init_data;
+};
+
+extern const struct rz_sysc_init_data rzg3e_sys_init_data;
+extern const struct rz_sysc_init_data rzg3s_sysc_init_data;
+extern const struct rz_sysc_init_data rzv2h_sys_init_data;
+
+#endif /* __SOC_RENESAS_RZ_SYSC_H__ */
diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
index 97006cc3b946..8e681f519526 100644
--- a/drivers/soc/samsung/exynos-asv.c
+++ b/drivers/soc/samsung/exynos-asv.c
@@ -9,6 +9,7 @@
* Samsung Exynos SoC Adaptive Supply Voltage support
*/
+#include <linux/array_size.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/energy_model.h>
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index e37dde1fb588..c86f1058ceed 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -12,6 +12,7 @@
* Samsung Exynos SoC Adaptive Supply Voltage and Chip ID support
*/
+#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/mfd/syscon.h>
@@ -55,7 +56,9 @@ static const struct exynos_soc_id {
{ "EXYNOS5440", 0xE5440000 },
{ "EXYNOS5800", 0xE5422000 },
{ "EXYNOS7420", 0xE7420000 },
+ { "EXYNOS7870", 0xE7870000 },
/* Compatible with: samsung,exynos850-chipid */
+ { "EXYNOS2200", 0xE9925000 },
{ "EXYNOS7885", 0xE7885000 },
{ "EXYNOS850", 0xE3830000 },
{ "EXYNOS8895", 0xE8895000 },
@@ -134,6 +137,8 @@ static int exynos_chipid_probe(struct platform_device *pdev)
soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%x", soc_info.revision);
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
soc_dev_attr->soc_id = product_id_to_soc_id(soc_info.product_id);
if (!soc_dev_attr->soc_id) {
pr_err("Unknown SoC\n");
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index dd5256e5aae1..c40313886a01 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos - CPU PMU(Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/arm-smccc.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/soc/samsung/exynos-usi.c b/drivers/soc/samsung/exynos-usi.c
index 114352695ac2..c5661ac19f7b 100644
--- a/drivers/soc/samsung/exynos-usi.c
+++ b/drivers/soc/samsung/exynos-usi.c
@@ -6,6 +6,7 @@
* Samsung Exynos USI driver (Universal Serial Interface).
*/
+#include <linux/array_size.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -16,6 +17,18 @@
#include <dt-bindings/soc/samsung,exynos-usi.h>
+/* USIv1: System Register: SW_CONF register bits */
+#define USI_V1_SW_CONF_NONE 0x0
+#define USI_V1_SW_CONF_I2C0 0x1
+#define USI_V1_SW_CONF_I2C1 0x2
+#define USI_V1_SW_CONF_I2C0_1 0x3
+#define USI_V1_SW_CONF_SPI 0x4
+#define USI_V1_SW_CONF_UART 0x8
+#define USI_V1_SW_CONF_UART_I2C1 0xa
+#define USI_V1_SW_CONF_MASK (USI_V1_SW_CONF_I2C0 | USI_V1_SW_CONF_I2C1 | \
+ USI_V1_SW_CONF_I2C0_1 | USI_V1_SW_CONF_SPI | \
+ USI_V1_SW_CONF_UART | USI_V1_SW_CONF_UART_I2C1)
+
/* USIv2: System Register: SW_CONF register bits */
#define USI_V2_SW_CONF_NONE 0x0
#define USI_V2_SW_CONF_UART BIT(0)
@@ -34,7 +47,8 @@
#define USI_OPTION_CLKSTOP_ON BIT(2)
enum exynos_usi_ver {
- USI_VER2 = 2,
+ USI_VER1 = 0,
+ USI_VER2,
};
struct exynos_usi_variant {
@@ -66,19 +80,39 @@ struct exynos_usi_mode {
unsigned int val; /* mode register value */
};
-static const struct exynos_usi_mode exynos_usi_modes[] = {
- [USI_V2_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE },
- [USI_V2_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART },
- [USI_V2_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI },
- [USI_V2_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C },
+#define USI_MODES_MAX (USI_MODE_UART_I2C1 + 1)
+static const struct exynos_usi_mode exynos_usi_modes[][USI_MODES_MAX] = {
+ [USI_VER1] = {
+ [USI_MODE_NONE] = { .name = "none", .val = USI_V1_SW_CONF_NONE },
+ [USI_MODE_UART] = { .name = "uart", .val = USI_V1_SW_CONF_UART },
+ [USI_MODE_SPI] = { .name = "spi", .val = USI_V1_SW_CONF_SPI },
+ [USI_MODE_I2C] = { .name = "i2c", .val = USI_V1_SW_CONF_I2C0 },
+ [USI_MODE_I2C1] = { .name = "i2c1", .val = USI_V1_SW_CONF_I2C1 },
+ [USI_MODE_I2C0_1] = { .name = "i2c0_1", .val = USI_V1_SW_CONF_I2C0_1 },
+ [USI_MODE_UART_I2C1] = { .name = "uart_i2c1", .val = USI_V1_SW_CONF_UART_I2C1 },
+ }, [USI_VER2] = {
+ [USI_MODE_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE },
+ [USI_MODE_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART },
+ [USI_MODE_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI },
+ [USI_MODE_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C },
+ },
};
static const char * const exynos850_usi_clk_names[] = { "pclk", "ipclk" };
static const struct exynos_usi_variant exynos850_usi_data = {
.ver = USI_VER2,
.sw_conf_mask = USI_V2_SW_CONF_MASK,
- .min_mode = USI_V2_NONE,
- .max_mode = USI_V2_I2C,
+ .min_mode = USI_MODE_NONE,
+ .max_mode = USI_MODE_I2C,
+ .num_clks = ARRAY_SIZE(exynos850_usi_clk_names),
+ .clk_names = exynos850_usi_clk_names,
+};
+
+static const struct exynos_usi_variant exynos8895_usi_data = {
+ .ver = USI_VER1,
+ .sw_conf_mask = USI_V1_SW_CONF_MASK,
+ .min_mode = USI_MODE_NONE,
+ .max_mode = USI_MODE_UART_I2C1,
.num_clks = ARRAY_SIZE(exynos850_usi_clk_names),
.clk_names = exynos850_usi_clk_names,
};
@@ -87,6 +121,9 @@ static const struct of_device_id exynos_usi_dt_match[] = {
{
.compatible = "samsung,exynos850-usi",
.data = &exynos850_usi_data,
+ }, {
+ .compatible = "samsung,exynos8895-usi",
+ .data = &exynos8895_usi_data,
},
{ } /* sentinel */
};
@@ -109,14 +146,15 @@ static int exynos_usi_set_sw_conf(struct exynos_usi *usi, size_t mode)
if (mode < usi->data->min_mode || mode > usi->data->max_mode)
return -EINVAL;
- val = exynos_usi_modes[mode].val;
+ val = exynos_usi_modes[usi->data->ver][mode].val;
ret = regmap_update_bits(usi->sysreg, usi->sw_conf,
usi->data->sw_conf_mask, val);
if (ret)
return ret;
usi->mode = mode;
- dev_dbg(usi->dev, "protocol: %s\n", exynos_usi_modes[usi->mode].name);
+ dev_dbg(usi->dev, "protocol: %s\n",
+ exynos_usi_modes[usi->data->ver][usi->mode].name);
return 0;
}
@@ -168,10 +206,42 @@ static int exynos_usi_configure(struct exynos_usi *usi)
if (ret)
return ret;
- if (usi->data->ver == USI_VER2)
- return exynos_usi_enable(usi);
+ if (usi->data->ver == USI_VER1)
+ ret = clk_bulk_prepare_enable(usi->data->num_clks,
+ usi->clks);
+ else if (usi->data->ver == USI_VER2)
+ ret = exynos_usi_enable(usi);
- return 0;
+ return ret;
+}
+
+static void exynos_usi_unconfigure(void *data)
+{
+ struct exynos_usi *usi = data;
+ u32 val;
+ int ret;
+
+ if (usi->data->ver == USI_VER1) {
+ clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks);
+ return;
+ }
+
+ ret = clk_bulk_prepare_enable(usi->data->num_clks, usi->clks);
+ if (ret)
+ return;
+
+ /* Make sure that we've stopped providing the clock to USI IP */
+ val = readl(usi->regs + USI_OPTION);
+ val &= ~USI_OPTION_CLKREQ_ON;
+ val |= ~USI_OPTION_CLKSTOP_ON;
+ writel(val, usi->regs + USI_OPTION);
+
+ /* Set USI block state to reset */
+ val = readl(usi->regs + USI_CON);
+ val |= USI_CON_RESET;
+ writel(val, usi->regs + USI_CON);
+
+ clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks);
}
static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi)
@@ -186,15 +256,11 @@ static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi)
return -EINVAL;
usi->mode = mode;
- usi->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
+ usi->sysreg = syscon_regmap_lookup_by_phandle_args(np, "samsung,sysreg",
+ 1, &usi->sw_conf);
if (IS_ERR(usi->sysreg))
return PTR_ERR(usi->sysreg);
- ret = of_property_read_u32_index(np, "samsung,sysreg", 1,
- &usi->sw_conf);
- if (ret)
- return ret;
-
usi->clkreq_on = of_property_read_bool(np, "samsung,clkreq-on");
return 0;
@@ -255,6 +321,10 @@ static int exynos_usi_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&pdev->dev, exynos_usi_unconfigure, usi);
+ if (ret)
+ return ret;
+
/* Make it possible to embed protocol nodes into USI np */
return of_platform_populate(np, NULL, NULL, dev);
}
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
index 30f230ed1769..4bad12a99542 100644
--- a/drivers/soc/samsung/exynos3250-pmu.c
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos3250 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
index 7a2d50be6b4a..2ae5c3e1b07a 100644
--- a/drivers/soc/samsung/exynos5250-pmu.c
+++ b/drivers/soc/samsung/exynos5250-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos5250 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
index 6fedcd78cb45..58a2209795f7 100644
--- a/drivers/soc/samsung/exynos5420-pmu.c
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos5420 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/pm.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index a08c377933c5..51b9d852bb6a 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -47,6 +47,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/syscore_ops.h>
#include <soc/tegra/common.h>
@@ -1181,7 +1182,7 @@ static int powergate_show(struct seq_file *s, void *data)
continue;
seq_printf(s, " %9s %7s\n", pmc->soc->powergates[i],
- status ? "yes" : "no");
+ str_yes_no(status));
}
return 0;
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index 4fb0f0a24828..704039eb3c07 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -105,6 +105,12 @@ err_unknown_variant:
return -ENODEV;
}
+static const struct regmap_config k3_chipinfo_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int k3_chipinfo_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -112,13 +118,18 @@ static int k3_chipinfo_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct soc_device *soc_dev;
struct regmap *regmap;
+ void __iomem *base;
u32 partno_id;
u32 variant;
u32 jtag_id;
u32 mfg;
int ret;
- regmap = device_node_to_regmap(node);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = regmap_init_mmio(dev, base, &k3_chipinfo_regmap_cfg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 0f45e3404756..295a46dc2be7 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -156,6 +156,7 @@ struct qcom_swrm_port_config {
u8 word_length;
u8 blk_group_count;
u8 lane_control;
+ u8 ch_mask;
};
/*
@@ -1048,9 +1049,13 @@ static int qcom_swrm_port_enable(struct sdw_bus *bus,
{
u32 reg = SWRM_DP_PORT_CTRL_BANK(enable_ch->port_num, bank);
struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ struct qcom_swrm_port_config *pcfg;
u32 val;
+ pcfg = &ctrl->pconfig[enable_ch->port_num];
ctrl->reg_read(ctrl, reg, &val);
+ if (pcfg->ch_mask != SWR_INVALID_PARAM && pcfg->ch_mask != 0)
+ enable_ch->ch_mask = pcfg->ch_mask;
if (enable_ch->enable)
val |= (enable_ch->ch_mask << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
@@ -1270,6 +1275,26 @@ static void *qcom_swrm_get_sdw_stream(struct snd_soc_dai *dai, int direction)
return ctrl->sruntime[dai->id];
}
+static int qcom_swrm_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, const unsigned int *tx_slot,
+ unsigned int rx_num, const unsigned int *rx_slot)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+ int i;
+
+ if (tx_slot) {
+ for (i = 0; i < tx_num; i++)
+ ctrl->pconfig[i].ch_mask = tx_slot[i];
+ }
+
+ if (rx_slot) {
+ for (i = 0; i < rx_num; i++)
+ ctrl->pconfig[i].ch_mask = rx_slot[i];
+ }
+
+ return 0;
+}
+
static int qcom_swrm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -1306,6 +1331,7 @@ static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = {
.shutdown = qcom_swrm_shutdown,
.set_stream = qcom_swrm_set_sdw_stream,
.get_stream = qcom_swrm_get_sdw_stream,
+ .set_channel_map = qcom_swrm_set_channel_map,
};
static const struct snd_soc_component_driver qcom_swrm_dai_component = {
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ea8a31032927..f40c282d4d63 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -55,6 +55,9 @@ config SPI_MEM
This extension is meant to simplify interaction with SPI memories
by providing a high-level interface to send memory-like commands.
+config SPI_OFFLOAD
+ bool
+
comment "SPI Master Controller Drivers"
config SPI_AIROHA_SNFI
@@ -176,6 +179,7 @@ config SPI_AU1550
config SPI_AXI_SPI_ENGINE
tristate "Analog Devices AXI SPI Engine controller"
depends on HAS_IOMEM
+ select SPI_OFFLOAD
help
This enables support for the Analog Devices AXI SPI Engine SPI controller.
It is part of the SPI Engine framework that is used in some Analog Devices
@@ -932,6 +936,15 @@ config SPI_QCOM_QSPI
help
QSPI(Quad SPI) driver for Qualcomm QSPI controller.
+config SPI_QPIC_SNAND
+ bool "QPIC SNAND controller"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select MTD
+ help
+ QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller.
+ QPIC controller supports both parallel nand and serial nand.
+ This config will enable serial nand driver for QPIC controller.
+
config SPI_QUP
tristate "Qualcomm SPI controller with QUP interface"
depends on ARCH_QCOM || COMPILE_TEST
@@ -1021,6 +1034,15 @@ config SPI_SN_F_OSPI
for connecting an SPI Flash memory over up to 8-bit wide bus.
It supports indirect access mode only.
+config SPI_SG2044_NOR
+ tristate "SG2044 SPI NOR Controller"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ This enables support for the SG2044 SPI NOR controller,
+ which supports Dual/Quad read and write operations while
+ also supporting 3Byte address devices and 4Byte address
+ devices.
+
config SPI_SPRD
tristate "Spreadtrum SPI controller"
depends on ARCH_SPRD || COMPILE_TEST
@@ -1045,6 +1067,16 @@ config SPI_STM32
is not available, the driver automatically falls back to
PIO mode.
+config SPI_STM32_OSPI
+ tristate "STMicroelectronics STM32 OCTO SPI controller"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ depends on SPI_MEM
+ help
+ This enables support for the Octo SPI controller in master mode.
+ This driver does not support generic SPI. The implementation only
+ supports spi-mem interface.
+
config SPI_STM32_QSPI
tristate "STMicroelectronics STM32 QUAD SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
@@ -1317,4 +1349,16 @@ endif # SPI_SLAVE
config SPI_DYNAMIC
def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
+if SPI_OFFLOAD
+
+comment "SPI Offload triggers"
+
+config SPI_OFFLOAD_TRIGGER_PWM
+ tristate "SPI offload trigger using PWM"
+ depends on PWM
+ help
+ Generic SPI offload trigger implemented using PWM output.
+
+endif # SPI_OFFLOAD
+
endif # SPI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 9db7554c1864..c3a1a47b3bf4 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -10,6 +10,7 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
obj-$(CONFIG_SPI_MASTER) += spi.o
obj-$(CONFIG_SPI_MEM) += spi-mem.o
obj-$(CONFIG_SPI_MUX) += spi-mux.o
+obj-$(CONFIG_SPI_OFFLOAD) += spi-offload.o
obj-$(CONFIG_SPI_SPIDEV) += spidev.o
obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
@@ -116,6 +117,7 @@ obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
+obj-$(CONFIG_SPI_QPIC_SNAND) += spi-qpic-snand.o
obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
obj-$(CONFIG_SPI_ROCKCHIP_SFC) += spi-rockchip-sfc.o
@@ -134,9 +136,11 @@ obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o
obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o
obj-$(CONFIG_SPI_SN_F_OSPI) += spi-sn-f-ospi.o
+obj-$(CONFIG_SPI_SG2044_NOR) += spi-sg2044-nor.o
obj-$(CONFIG_SPI_SPRD) += spi-sprd.o
obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o
obj-$(CONFIG_SPI_STM32) += spi-stm32.o
+obj-$(CONFIG_SPI_STM32_OSPI) += spi-stm32-ospi.o
obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
@@ -163,3 +167,6 @@ obj-$(CONFIG_SPI_AMD) += spi-amd.o
# SPI slave protocol handlers
obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o
obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL) += spi-slave-system-control.o
+
+# SPI offload triggers
+obj-$(CONFIG_SPI_OFFLOAD_TRIGGER_PWM) += spi-offload-trigger-pwm.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index d8c9be64d006..244ac0106862 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -930,11 +930,8 @@ static int atmel_qspi_sama7g5_transfer(struct spi_mem *mem,
/* Release the chip-select. */
ret = atmel_qspi_reg_sync(aq);
- if (ret) {
- pm_runtime_mark_last_busy(&aq->pdev->dev);
- pm_runtime_put_autosuspend(&aq->pdev->dev);
+ if (ret)
return ret;
- }
atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
index e9beae95dded..62a11142bd63 100644
--- a/drivers/spi/spi-aspeed-smc.c
+++ b/drivers/spi/spi-aspeed-smc.c
@@ -303,13 +303,6 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
u32 ctl_val;
int ret = 0;
- dev_dbg(aspi->dev,
- "CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x",
- chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth,
- op->addr.nbytes, op->dummy.nbytes, op->data.nbytes);
-
addr_mode = readl(aspi->regs + CE_CTRL_REG);
addr_mode_backup = addr_mode;
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 7c252126b33e..da9840957778 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -2,11 +2,15 @@
/*
* SPI-Engine SPI controller driver
* Copyright 2015 Analog Devices Inc.
+ * Copyright 2024 BayLibre, SAS
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/dmaengine.h>
#include <linux/fpga/adi-axi-common.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -14,9 +18,11 @@
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
+#include <linux/spi/offload/provider.h>
#include <linux/spi/spi.h>
#include <trace/events/spi.h>
+#define SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH 0x10
#define SPI_ENGINE_REG_RESET 0x40
#define SPI_ENGINE_REG_INT_ENABLE 0x80
@@ -24,6 +30,7 @@
#define SPI_ENGINE_REG_INT_SOURCE 0x88
#define SPI_ENGINE_REG_SYNC_ID 0xc0
+#define SPI_ENGINE_REG_OFFLOAD_SYNC_ID 0xc4
#define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0
#define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4
@@ -34,10 +41,24 @@
#define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8
#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec
+#define SPI_ENGINE_MAX_NUM_OFFLOADS 32
+
+#define SPI_ENGINE_REG_OFFLOAD_CTRL(x) (0x100 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_STATUS(x) (0x104 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_RESET(x) (0x108 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(x) (0x110 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(x) (0x114 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+
+#define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO GENMASK(15, 8)
+#define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD GENMASK(7, 0)
+
#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0)
#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1)
#define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2)
#define SPI_ENGINE_INT_SYNC BIT(3)
+#define SPI_ENGINE_INT_OFFLOAD_SYNC BIT(4)
+
+#define SPI_ENGINE_OFFLOAD_CTRL_ENABLE BIT(0)
#define SPI_ENGINE_CONFIG_CPHA BIT(0)
#define SPI_ENGINE_CONFIG_CPOL BIT(1)
@@ -79,6 +100,10 @@
#define SPI_ENGINE_CMD_CS_INV(flags) \
SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
+/* default sizes - can be changed when SPI Engine firmware is compiled */
+#define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE 16
+#define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE 16
+
struct spi_engine_program {
unsigned int length;
uint16_t instructions[] __counted_by(length);
@@ -106,6 +131,17 @@ struct spi_engine_message_state {
uint8_t *rx_buf;
};
+enum {
+ SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED,
+ SPI_ENGINE_OFFLOAD_FLAG_PREPARED,
+};
+
+struct spi_engine_offload {
+ struct spi_engine *spi_engine;
+ unsigned long flags;
+ unsigned int offload_num;
+};
+
struct spi_engine {
struct clk *clk;
struct clk *ref_clk;
@@ -118,6 +154,11 @@ struct spi_engine {
unsigned int int_enable;
/* shadows hardware CS inversion flag state */
u8 cs_inv;
+
+ unsigned int offload_ctrl_mem_size;
+ unsigned int offload_sdo_mem_size;
+ struct spi_offload *offload;
+ u32 offload_caps;
};
static void spi_engine_program_add_cmd(struct spi_engine_program *p,
@@ -163,9 +204,9 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
unsigned int n = min(len, 256U);
unsigned int flags = 0;
- if (xfer->tx_buf)
+ if (xfer->tx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM))
flags |= SPI_ENGINE_TRANSFER_WRITE;
- if (xfer->rx_buf)
+ if (xfer->rx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM))
flags |= SPI_ENGINE_TRANSFER_READ;
spi_engine_program_add_cmd(p, dry,
@@ -217,16 +258,24 @@ static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
*
* NB: This is separate from spi_engine_compile_message() because the latter
* is called twice and would otherwise result in double-evaluation.
+ *
+ * Returns 0 on success, -EINVAL on failure.
*/
-static void spi_engine_precompile_message(struct spi_message *msg)
+static int spi_engine_precompile_message(struct spi_message *msg)
{
unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
struct spi_transfer *xfer;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* If we have an offload transfer, we can't rx to buffer */
+ if (msg->offload && xfer->rx_buf)
+ return -EINVAL;
+
clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
}
+
+ return 0;
}
static void spi_engine_compile_message(struct spi_message *msg, bool dry,
@@ -521,11 +570,105 @@ static irqreturn_t spi_engine_irq(int irq, void *devid)
return IRQ_HANDLED;
}
+static int spi_engine_offload_prepare(struct spi_message *msg)
+{
+ struct spi_controller *host = msg->spi->controller;
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ struct spi_engine_program *p = msg->opt_state;
+ struct spi_engine_offload *priv = msg->offload->priv;
+ struct spi_transfer *xfer;
+ void __iomem *cmd_addr;
+ void __iomem *sdo_addr;
+ size_t tx_word_count = 0;
+ unsigned int i;
+
+ if (p->length > spi_engine->offload_ctrl_mem_size)
+ return -EINVAL;
+
+ /* count total number of tx words in message */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* no support for reading to rx_buf */
+ if (xfer->rx_buf)
+ return -EINVAL;
+
+ if (!xfer->tx_buf)
+ continue;
+
+ if (xfer->bits_per_word <= 8)
+ tx_word_count += xfer->len;
+ else if (xfer->bits_per_word <= 16)
+ tx_word_count += xfer->len / 2;
+ else
+ tx_word_count += xfer->len / 4;
+ }
+
+ if (tx_word_count && !(spi_engine->offload_caps & SPI_OFFLOAD_CAP_TX_STATIC_DATA))
+ return -EINVAL;
+
+ if (tx_word_count > spi_engine->offload_sdo_mem_size)
+ return -EINVAL;
+
+ /*
+ * This protects against calling spi_optimize_message() with an offload
+ * that has already been prepared with a different message.
+ */
+ if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags))
+ return -EBUSY;
+
+ cmd_addr = spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(priv->offload_num);
+ sdo_addr = spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(priv->offload_num);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!xfer->tx_buf)
+ continue;
+
+ if (xfer->bits_per_word <= 8) {
+ const u8 *buf = xfer->tx_buf;
+
+ for (i = 0; i < xfer->len; i++)
+ writel_relaxed(buf[i], sdo_addr);
+ } else if (xfer->bits_per_word <= 16) {
+ const u16 *buf = xfer->tx_buf;
+
+ for (i = 0; i < xfer->len / 2; i++)
+ writel_relaxed(buf[i], sdo_addr);
+ } else {
+ const u32 *buf = xfer->tx_buf;
+
+ for (i = 0; i < xfer->len / 4; i++)
+ writel_relaxed(buf[i], sdo_addr);
+ }
+ }
+
+ for (i = 0; i < p->length; i++)
+ writel_relaxed(p->instructions[i], cmd_addr);
+
+ return 0;
+}
+
+static void spi_engine_offload_unprepare(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ struct spi_engine *spi_engine = priv->spi_engine;
+
+ writel_relaxed(1, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
+ writel_relaxed(0, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
+
+ clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags);
+}
+
static int spi_engine_optimize_message(struct spi_message *msg)
{
struct spi_engine_program p_dry, *p;
+ int ret;
- spi_engine_precompile_message(msg);
+ ret = spi_engine_precompile_message(msg);
+ if (ret)
+ return ret;
p_dry.length = 0;
spi_engine_compile_message(msg, true, &p_dry);
@@ -537,20 +680,61 @@ static int spi_engine_optimize_message(struct spi_message *msg)
spi_engine_compile_message(msg, false, p);
spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
- AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
+ msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
msg->opt_state = p;
+ if (msg->offload) {
+ ret = spi_engine_offload_prepare(msg);
+ if (ret) {
+ msg->opt_state = NULL;
+ kfree(p);
+ return ret;
+ }
+ }
+
return 0;
}
static int spi_engine_unoptimize_message(struct spi_message *msg)
{
+ if (msg->offload)
+ spi_engine_offload_unprepare(msg->offload);
+
kfree(msg->opt_state);
return 0;
}
+static struct spi_offload
+*spi_engine_get_offload(struct spi_device *spi,
+ const struct spi_offload_config *config)
+{
+ struct spi_controller *host = spi->controller;
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ struct spi_engine_offload *priv;
+
+ if (!spi_engine->offload)
+ return ERR_PTR(-ENODEV);
+
+ if (config->capability_flags & ~spi_engine->offload_caps)
+ return ERR_PTR(-EINVAL);
+
+ priv = spi_engine->offload->priv;
+
+ if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags))
+ return ERR_PTR(-EBUSY);
+
+ return spi_engine->offload;
+}
+
+static void spi_engine_put_offload(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+
+ clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags);
+}
+
static int spi_engine_setup(struct spi_device *device)
{
struct spi_controller *host = device->controller;
@@ -583,6 +767,12 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
unsigned int int_enable = 0;
unsigned long flags;
+ if (msg->offload) {
+ dev_err(&host->dev, "Single transfer offload not supported\n");
+ msg->status = -EOPNOTSUPP;
+ goto out;
+ }
+
/* reinitialize message state for this transfer */
memset(st, 0, sizeof(*st));
st->cmd_buf = p->instructions;
@@ -632,11 +822,68 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
trace_spi_transfer_stop(msg, xfer);
}
+out:
spi_finalize_current_message(host);
return msg->status;
}
+static int spi_engine_trigger_enable(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ struct spi_engine *spi_engine = priv->spi_engine;
+ unsigned int reg;
+
+ reg = readl_relaxed(spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+ reg |= SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
+ writel_relaxed(reg, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+ return 0;
+}
+
+static void spi_engine_trigger_disable(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ struct spi_engine *spi_engine = priv->spi_engine;
+ unsigned int reg;
+
+ reg = readl_relaxed(spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+ reg &= ~SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
+ writel_relaxed(reg, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+}
+
+static struct dma_chan
+*spi_engine_tx_stream_request_dma_chan(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ char name[16];
+
+ snprintf(name, sizeof(name), "offload%u-tx", priv->offload_num);
+
+ return dma_request_chan(offload->provider_dev, name);
+}
+
+static struct dma_chan
+*spi_engine_rx_stream_request_dma_chan(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ char name[16];
+
+ snprintf(name, sizeof(name), "offload%u-rx", priv->offload_num);
+
+ return dma_request_chan(offload->provider_dev, name);
+}
+
+static const struct spi_offload_ops spi_engine_offload_ops = {
+ .trigger_enable = spi_engine_trigger_enable,
+ .trigger_disable = spi_engine_trigger_disable,
+ .tx_stream_request_dma_chan = spi_engine_tx_stream_request_dma_chan,
+ .rx_stream_request_dma_chan = spi_engine_rx_stream_request_dma_chan,
+};
+
static void spi_engine_release_hw(void *p)
{
struct spi_engine *spi_engine = p;
@@ -651,8 +898,7 @@ static int spi_engine_probe(struct platform_device *pdev)
struct spi_engine *spi_engine;
struct spi_controller *host;
unsigned int version;
- int irq;
- int ret;
+ int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -667,6 +913,46 @@ static int spi_engine_probe(struct platform_device *pdev)
spin_lock_init(&spi_engine->lock);
init_completion(&spi_engine->msg_complete);
+ /*
+ * REVISIT: for now, all SPI Engines only have one offload. In the
+ * future, this should be read from a memory mapped register to
+ * determine the number of offloads enabled at HDL compile time. For
+ * now, we can tell if an offload is present if there is a trigger
+ * source wired up to it.
+ */
+ if (device_property_present(&pdev->dev, "trigger-sources")) {
+ struct spi_engine_offload *priv;
+
+ spi_engine->offload =
+ devm_spi_offload_alloc(&pdev->dev,
+ sizeof(struct spi_engine_offload));
+ if (IS_ERR(spi_engine->offload))
+ return PTR_ERR(spi_engine->offload);
+
+ priv = spi_engine->offload->priv;
+ priv->spi_engine = spi_engine;
+ priv->offload_num = 0;
+
+ spi_engine->offload->ops = &spi_engine_offload_ops;
+ spi_engine->offload_caps = SPI_OFFLOAD_CAP_TRIGGER;
+
+ if (device_property_match_string(&pdev->dev, "dma-names", "offload0-rx") >= 0) {
+ spi_engine->offload_caps |= SPI_OFFLOAD_CAP_RX_STREAM_DMA;
+ spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_RX_STREAM;
+ }
+
+ if (device_property_match_string(&pdev->dev, "dma-names", "offload0-tx") >= 0) {
+ spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STREAM_DMA;
+ spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_TX_STREAM;
+ } else {
+ /*
+ * HDL compile option to enable TX DMA stream also disables
+ * the SDO memory, so can't do both at the same time.
+ */
+ spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STATIC_DATA;
+ }
+ }
+
spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
if (IS_ERR(spi_engine->clk))
return PTR_ERR(spi_engine->clk);
@@ -688,6 +974,19 @@ static int spi_engine_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if (ADI_AXI_PCORE_VER_MINOR(version) >= 1) {
+ unsigned int sizes = readl(spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH);
+
+ spi_engine->offload_ctrl_mem_size = 1 <<
+ FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD, sizes);
+ spi_engine->offload_sdo_mem_size = 1 <<
+ FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO, sizes);
+ } else {
+ spi_engine->offload_ctrl_mem_size = SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE;
+ spi_engine->offload_sdo_mem_size = SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE;
+ }
+
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
@@ -709,6 +1008,8 @@ static int spi_engine_probe(struct platform_device *pdev)
host->transfer_one_message = spi_engine_transfer_one_message;
host->optimize_message = spi_engine_optimize_message;
host->unoptimize_message = spi_engine_unoptimize_message;
+ host->get_offload = spi_engine_get_offload;
+ host->put_offload = spi_engine_put_offload;
host->num_chipselect = 8;
/* Some features depend of the IP core version. */
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 0cd37a7436d5..559fbdfbd9f7 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1658,6 +1658,12 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
int ret = PTR_ERR(cqspi->rx_chan);
cqspi->rx_chan = NULL;
+ if (ret == -ENODEV) {
+ /* DMA support is not mandatory */
+ dev_info(&cqspi->pdev->dev, "No Rx DMA available\n");
+ return 0;
+ }
+
return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
}
init_completion(&cqspi->rx_dma_complete);
@@ -2067,7 +2073,7 @@ static const struct cqspi_driver_platdata k2g_qspi = {
static const struct cqspi_driver_platdata am654_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD,
- .quirks = CQSPI_NEEDS_WR_DELAY,
+ .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NEEDS_WR_DELAY,
};
static const struct cqspi_driver_platdata intel_lgm_qspi = {
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
index fc9e33be1e0e..e01c63d23b64 100644
--- a/drivers/spi/spi-fsi.c
+++ b/drivers/spi/spi-fsi.c
@@ -479,6 +479,19 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
fsi_spi_sequence_add(&seq, shift);
+ } else if (next->tx_buf) {
+ if ((next->len + transfer->len) > (SPI_FSI_MAX_TX_SIZE + 8)) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ len = next->len;
+ while (len > 8) {
+ fsi_spi_sequence_add(&seq,
+ SPI_FSI_SEQUENCE_SHIFT_OUT(8));
+ len -= 8;
+ }
+ fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
} else {
next = NULL;
}
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 40f5c8fdba76..5e3818445234 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -572,7 +572,7 @@ static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
timeout += 1;
/* Double calculated timeout */
- return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+ return secs_to_jiffies(2 * timeout);
}
static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 4f192e013cd6..405deb6677c1 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -39,36 +39,8 @@ struct spi_gpio {
/*----------------------------------------------------------------------*/
-/*
- * Because the overhead of going through four GPIO procedure calls
- * per transferred bit can make performance a problem, this code
- * is set up so that you can use it in either of two ways:
- *
- * - The slow generic way: set up platform_data to hold the GPIO
- * numbers used for MISO/MOSI/SCK, and issue procedure calls for
- * each of them. This driver can handle several such busses.
- *
- * - The quicker inlined way: only helps with platform GPIO code
- * that inlines operations for constant GPIOs. This can give
- * you tight (fast!) inner loops, but each such bus needs a
- * new driver. You'll define a new C file, with Makefile and
- * Kconfig support; the C code can be a total of six lines:
- *
- * #define DRIVER_NAME "myboard_spi2"
- * #define SPI_MISO_GPIO 119
- * #define SPI_MOSI_GPIO 120
- * #define SPI_SCK_GPIO 121
- * #define SPI_N_CHIPSEL 4
- * #include "spi-gpio.c"
- */
-
-#ifndef DRIVER_NAME
#define DRIVER_NAME "spi_gpio"
-#define GENERIC_BITBANG /* vs tight inlines */
-
-#endif
-
/*----------------------------------------------------------------------*/
static inline struct spi_gpio *__pure
@@ -341,16 +313,14 @@ static int spi_gpio_probe_pdata(struct platform_device *pdev,
struct spi_gpio *spi_gpio = spi_controller_get_devdata(host);
int i;
-#ifdef GENERIC_BITBANG
- if (!pdata || !pdata->num_chipselect)
+ if (!pdata)
return -ENODEV;
-#endif
- /*
- * The host needs to think there is a chipselect even if not
- * connected
- */
- host->num_chipselect = pdata->num_chipselect ?: 1;
+ /* It's just one always-selected device, fine to continue */
+ if (!pdata->num_chipselect)
+ return 0;
+
+ host->num_chipselect = pdata->num_chipselect;
spi_gpio->cs_gpios = devm_kcalloc(dev, host->num_chipselect,
sizeof(*spi_gpio->cs_gpios),
GFP_KERNEL);
@@ -445,8 +415,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
return devm_spi_register_controller(&pdev->dev, host);
}
-MODULE_ALIAS("platform:" DRIVER_NAME);
-
static const struct of_device_id spi_gpio_dt_ids[] = {
{ .compatible = "spi-gpio" },
{}
@@ -465,3 +433,4 @@ module_platform_driver(spi_gpio_driver);
MODULE_DESCRIPTION("SPI host driver using generic bitbanged GPIO ");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index eeb7d082c247..832d6e9009eb 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1449,7 +1449,7 @@ static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
timeout += 1;
/* Double calculated timeout */
- return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+ return secs_to_jiffies(2 * timeout);
}
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index a9f0f47f4759..a31a1db07aa4 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -377,6 +377,17 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
/* Make sure the operation frequency is correct before going futher */
spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op);
+ dev_vdbg(&mem->spi->dev, "[cmd: 0x%02x][%dB addr: %#8llx][%2dB dummy][%4dB data %s] %d%c-%d%c-%d%c-%d%c @ %uHz\n",
+ op->cmd.opcode,
+ op->addr.nbytes, (op->addr.nbytes ? op->addr.val : 0),
+ op->dummy.nbytes,
+ op->data.nbytes, (op->data.nbytes ? (op->data.dir == SPI_MEM_DATA_IN ? " read" : "write") : " "),
+ op->cmd.buswidth, op->cmd.dtr ? 'D' : 'S',
+ op->addr.buswidth, op->addr.dtr ? 'D' : 'S',
+ op->dummy.buswidth, op->dummy.dtr ? 'D' : 'S',
+ op->data.buswidth, op->data.dtr ? 'D' : 'S',
+ op->max_freq ? op->max_freq : mem->spi->max_speed_hz);
+
ret = spi_mem_check_op(op);
if (ret)
return ret;
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
index 5b6af55855ef..62ba0bd9cbb7 100644
--- a/drivers/spi/spi-microchip-core.c
+++ b/drivers/spi/spi-microchip-core.c
@@ -70,8 +70,7 @@
#define INT_RX_CHANNEL_OVERFLOW BIT(2)
#define INT_TX_CHANNEL_UNDERRUN BIT(3)
-#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \
- CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
+#define INT_ENABLE_MASK (CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
#define REG_CONTROL (0x00)
#define REG_FRAME_SIZE (0x04)
@@ -133,10 +132,15 @@ static inline void mchp_corespi_disable(struct mchp_corespi *spi)
mchp_corespi_write(spi, REG_CONTROL, control);
}
-static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi)
+static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi, int fifo_max)
{
- while (spi->rx_len >= spi->n_bytes && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) {
- u32 data = mchp_corespi_read(spi, REG_RX_DATA);
+ for (int i = 0; i < fifo_max; i++) {
+ u32 data;
+
+ while (mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)
+ ;
+
+ data = mchp_corespi_read(spi, REG_RX_DATA);
spi->rx_len -= spi->n_bytes;
@@ -211,11 +215,10 @@ static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
mchp_corespi_write(spi, REG_FRAMESUP, len);
}
-static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
+static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi, int fifo_max)
{
- int fifo_max, i = 0;
+ int i = 0;
- fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
mchp_corespi_set_xfer_size(spi, fifo_max);
while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) {
@@ -413,19 +416,6 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
if (intfield == 0)
return IRQ_NONE;
- if (intfield & INT_TXDONE)
- mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE);
-
- if (intfield & INT_RXRDY) {
- mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
-
- if (spi->rx_len)
- mchp_corespi_read_fifo(spi);
- }
-
- if (!spi->rx_len && !spi->tx_len)
- finalise = true;
-
if (intfield & INT_RX_CHANNEL_OVERFLOW) {
mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
finalise = true;
@@ -512,9 +502,14 @@ static int mchp_corespi_transfer_one(struct spi_controller *host,
mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
- while (spi->tx_len)
- mchp_corespi_write_fifo(spi);
+ while (spi->tx_len) {
+ int fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
+
+ mchp_corespi_write_fifo(spi, fifo_max);
+ mchp_corespi_read_fifo(spi, fifo_max);
+ }
+ spi_finalize_current_transfer(host);
return 1;
}
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 197bf2dbe5de..4b0a1c0db041 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_qos.h>
#define SPI_CFG0_REG 0x0000
#define SPI_CFG1_REG 0x0004
@@ -147,6 +148,7 @@ struct mtk_spi_compatible {
* @tx_sgl_len: Size of TX DMA transfer
* @rx_sgl_len: Size of RX DMA transfer
* @dev_comp: Device data structure
+ * @qos_request: QoS request
* @spi_clk_hz: Current SPI clock in Hz
* @spimem_done: SPI-MEM operation completion
* @use_spimem: Enables SPI-MEM
@@ -166,6 +168,7 @@ struct mtk_spi {
struct scatterlist *tx_sgl, *rx_sgl;
u32 tx_sgl_len, rx_sgl_len;
const struct mtk_spi_compatible *dev_comp;
+ struct pm_qos_request qos_request;
u32 spi_clk_hz;
struct completion spimem_done;
bool use_spimem;
@@ -356,6 +359,7 @@ static int mtk_spi_hw_init(struct spi_controller *host,
struct mtk_chip_config *chip_config = spi->controller_data;
struct mtk_spi *mdata = spi_controller_get_devdata(host);
+ cpu_latency_qos_update_request(&mdata->qos_request, 500);
cpha = spi->mode & SPI_CPHA ? 1 : 0;
cpol = spi->mode & SPI_CPOL ? 1 : 0;
@@ -459,6 +463,15 @@ static int mtk_spi_prepare_message(struct spi_controller *host,
return mtk_spi_hw_init(host, msg->spi);
}
+static int mtk_spi_unprepare_message(struct spi_controller *host,
+ struct spi_message *message)
+{
+ struct mtk_spi *mdata = spi_controller_get_devdata(host);
+
+ cpu_latency_qos_update_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
+ return 0;
+}
+
static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
{
u32 reg_val;
@@ -1143,6 +1156,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
host->set_cs = mtk_spi_set_cs;
host->prepare_message = mtk_spi_prepare_message;
+ host->unprepare_message = mtk_spi_unprepare_message;
host->transfer_one = mtk_spi_transfer_one;
host->can_dma = mtk_spi_can_dma;
host->setup = mtk_spi_setup;
@@ -1249,6 +1263,8 @@ static int mtk_spi_probe(struct platform_device *pdev)
clk_disable_unprepare(mdata->spi_hclk);
}
+ cpu_latency_qos_add_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
+
if (mdata->dev_comp->need_pad_sel) {
if (mdata->pad_num != host->num_chipselect)
return dev_err_probe(dev, -EINVAL,
@@ -1292,6 +1308,7 @@ static void mtk_spi_remove(struct platform_device *pdev)
struct mtk_spi *mdata = spi_controller_get_devdata(host);
int ret;
+ cpu_latency_qos_remove_request(&mdata->qos_request);
if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
complete(&mdata->spimem_done);
diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c
index fdbea9dffb62..e82ee6dcf498 100644
--- a/drivers/spi/spi-mtk-snfi.c
+++ b/drivers/spi/spi-mtk-snfi.c
@@ -1284,9 +1284,6 @@ static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->controller);
- dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
- op->addr.val, op->addr.buswidth, op->addr.nbytes,
- op->data.buswidth, op->data.nbytes);
if (mtk_snand_is_page_ops(op)) {
if (op->data.dir == SPI_MEM_DATA_IN)
return mtk_snand_read_page_cache(ms, op);
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index c02c4204442f..0eb35c4e3987 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -68,9 +68,7 @@ static int spi_mux_select(struct spi_device *spi)
priv->current_cs = spi_get_chipselect(spi, 0);
- spi_setup(priv->spi);
-
- return 0;
+ return spi_setup(priv->spi);
}
static int spi_mux_setup(struct spi_device *spi)
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index 958bab27a081..67cc1d86de42 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -550,11 +550,6 @@ static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
int ret = 0;
u8 *buf;
- dev_dbg(fiu->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth, op->addr.val,
- op->data.nbytes);
-
if (fiu->spix_mode || op->addr.nbytes > 4)
return -EOPNOTSUPP;
diff --git a/drivers/spi/spi-offload-trigger-pwm.c b/drivers/spi/spi-offload-trigger-pwm.c
new file mode 100644
index 000000000000..805ed41560df
--- /dev/null
+++ b/drivers/spi/spi-offload-trigger-pwm.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ *
+ * Generic PWM trigger for SPI offload.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/math.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/pwm.h>
+#include <linux/spi/offload/provider.h>
+#include <linux/spi/offload/types.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+struct spi_offload_trigger_pwm_state {
+ struct device *dev;
+ struct pwm_device *pwm;
+};
+
+static bool spi_offload_trigger_pwm_match(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type,
+ u64 *args, u32 nargs)
+{
+ if (nargs)
+ return false;
+
+ return type == SPI_OFFLOAD_TRIGGER_PERIODIC;
+}
+
+static int spi_offload_trigger_pwm_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct spi_offload_trigger_periodic *periodic = &config->periodic;
+ struct pwm_waveform wf = { };
+ int ret;
+
+ if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC)
+ return -EINVAL;
+
+ if (!periodic->frequency_hz)
+ return -EINVAL;
+
+ wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz);
+ /* REVISIT: 50% duty-cycle for now - may add config parameter later */
+ wf.duty_length_ns = wf.period_length_ns / 2;
+
+ ret = pwm_round_waveform_might_sleep(st->pwm, &wf);
+ if (ret < 0)
+ return ret;
+
+ periodic->frequency_hz = DIV_ROUND_UP_ULL(NSEC_PER_SEC, wf.period_length_ns);
+
+ return 0;
+}
+
+static int spi_offload_trigger_pwm_enable(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct spi_offload_trigger_periodic *periodic = &config->periodic;
+ struct pwm_waveform wf = { };
+
+ if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC)
+ return -EINVAL;
+
+ if (!periodic->frequency_hz)
+ return -EINVAL;
+
+ wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz);
+ /* REVISIT: 50% duty-cycle for now - may add config parameter later */
+ wf.duty_length_ns = wf.period_length_ns / 2;
+
+ return pwm_set_waveform_might_sleep(st->pwm, &wf, false);
+}
+
+static void spi_offload_trigger_pwm_disable(struct spi_offload_trigger *trigger)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct pwm_waveform wf;
+ int ret;
+
+ ret = pwm_get_waveform_might_sleep(st->pwm, &wf);
+ if (ret < 0) {
+ dev_err(st->dev, "failed to get waveform: %d\n", ret);
+ return;
+ }
+
+ wf.duty_length_ns = 0;
+
+ ret = pwm_set_waveform_might_sleep(st->pwm, &wf, false);
+ if (ret < 0)
+ dev_err(st->dev, "failed to disable PWM: %d\n", ret);
+}
+
+static const struct spi_offload_trigger_ops spi_offload_trigger_pwm_ops = {
+ .match = spi_offload_trigger_pwm_match,
+ .validate = spi_offload_trigger_pwm_validate,
+ .enable = spi_offload_trigger_pwm_enable,
+ .disable = spi_offload_trigger_pwm_disable,
+};
+
+static void spi_offload_trigger_pwm_release(void *data)
+{
+ pwm_disable(data);
+}
+
+static int spi_offload_trigger_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_offload_trigger_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &spi_offload_trigger_pwm_ops,
+ };
+ struct spi_offload_trigger_pwm_state *st;
+ struct pwm_state state;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ info.priv = st;
+ st->dev = dev;
+
+ st->pwm = devm_pwm_get(dev, NULL);
+ if (IS_ERR(st->pwm))
+ return dev_err_probe(dev, PTR_ERR(st->pwm), "failed to get PWM\n");
+
+ /* init with duty_cycle = 0, output enabled to ensure trigger off */
+ pwm_init_state(st->pwm, &state);
+ state.enabled = true;
+
+ ret = pwm_apply_might_sleep(st->pwm, &state);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to apply PWM state\n");
+
+ ret = devm_add_action_or_reset(dev, spi_offload_trigger_pwm_release, st->pwm);
+ if (ret)
+ return ret;
+
+ return devm_spi_offload_trigger_register(dev, &info);
+}
+
+static const struct of_device_id spi_offload_trigger_pwm_of_match_table[] = {
+ { .compatible = "pwm-trigger" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, spi_offload_trigger_pwm_of_match_table);
+
+static struct platform_driver spi_offload_trigger_pwm_driver = {
+ .driver = {
+ .name = "pwm-trigger",
+ .of_match_table = spi_offload_trigger_pwm_of_match_table,
+ },
+ .probe = spi_offload_trigger_pwm_probe,
+};
+module_platform_driver(spi_offload_trigger_pwm_driver);
+
+MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>");
+MODULE_DESCRIPTION("Generic PWM trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-offload.c b/drivers/spi/spi-offload.c
new file mode 100644
index 000000000000..6bad042fe437
--- /dev/null
+++ b/drivers/spi/spi-offload.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ */
+
+/*
+ * SPI Offloading support.
+ *
+ * Some SPI controllers support offloading of SPI transfers. Essentially, this
+ * is the ability for a SPI controller to perform SPI transfers with minimal
+ * or even no CPU intervention, e.g. via a specialized SPI controller with a
+ * hardware trigger or via a conventional SPI controller using a non-Linux MCU
+ * processor core to offload the work.
+ */
+
+#define DEFAULT_SYMBOL_NAMESPACE "SPI_OFFLOAD"
+
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/export.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/spi/offload/consumer.h>
+#include <linux/spi/offload/provider.h>
+#include <linux/spi/offload/types.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+struct spi_controller_and_offload {
+ struct spi_controller *controller;
+ struct spi_offload *offload;
+};
+
+struct spi_offload_trigger {
+ struct list_head list;
+ struct kref ref;
+ struct fwnode_handle *fwnode;
+ /* synchronizes calling ops and driver registration */
+ struct mutex lock;
+ /*
+ * If the provider goes away while the consumer still has a reference,
+ * ops and priv will be set to NULL and all calls will fail with -ENODEV.
+ */
+ const struct spi_offload_trigger_ops *ops;
+ void *priv;
+};
+
+static LIST_HEAD(spi_offload_triggers);
+static DEFINE_MUTEX(spi_offload_triggers_lock);
+
+/**
+ * devm_spi_offload_alloc() - Allocate offload instance
+ * @dev: Device for devm purposes and assigned to &struct spi_offload.provider_dev
+ * @priv_size: Size of private data to allocate
+ *
+ * Offload providers should use this to allocate offload instances.
+ *
+ * Return: Pointer to new offload instance or error on failure.
+ */
+struct spi_offload *devm_spi_offload_alloc(struct device *dev,
+ size_t priv_size)
+{
+ struct spi_offload *offload;
+ void *priv;
+
+ offload = devm_kzalloc(dev, sizeof(*offload), GFP_KERNEL);
+ if (!offload)
+ return ERR_PTR(-ENOMEM);
+
+ priv = devm_kzalloc(dev, priv_size, GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ offload->provider_dev = dev;
+ offload->priv = priv;
+
+ return offload;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_alloc);
+
+static void spi_offload_put(void *data)
+{
+ struct spi_controller_and_offload *resource = data;
+
+ resource->controller->put_offload(resource->offload);
+ kfree(resource);
+}
+
+/**
+ * devm_spi_offload_get() - Get an offload instance
+ * @dev: Device for devm purposes
+ * @spi: SPI device to use for the transfers
+ * @config: Offload configuration
+ *
+ * Peripheral drivers call this function to get an offload instance that meets
+ * the requirements specified in @config. If no suitable offload instance is
+ * available, -ENODEV is returned.
+ *
+ * Return: Offload instance or error on failure.
+ */
+struct spi_offload *devm_spi_offload_get(struct device *dev,
+ struct spi_device *spi,
+ const struct spi_offload_config *config)
+{
+ struct spi_controller_and_offload *resource;
+ struct spi_offload *offload;
+ int ret;
+
+ if (!spi || !config)
+ return ERR_PTR(-EINVAL);
+
+ if (!spi->controller->get_offload)
+ return ERR_PTR(-ENODEV);
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
+ return ERR_PTR(-ENOMEM);
+
+ offload = spi->controller->get_offload(spi, config);
+ if (IS_ERR(offload)) {
+ kfree(resource);
+ return offload;
+ }
+
+ resource->controller = spi->controller;
+ resource->offload = offload;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_put, resource);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return offload;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_get);
+
+static void spi_offload_trigger_free(struct kref *ref)
+{
+ struct spi_offload_trigger *trigger =
+ container_of(ref, struct spi_offload_trigger, ref);
+
+ mutex_destroy(&trigger->lock);
+ fwnode_handle_put(trigger->fwnode);
+ kfree(trigger);
+}
+
+static void spi_offload_trigger_put(void *data)
+{
+ struct spi_offload_trigger *trigger = data;
+
+ scoped_guard(mutex, &trigger->lock)
+ if (trigger->ops && trigger->ops->release)
+ trigger->ops->release(trigger);
+
+ kref_put(&trigger->ref, spi_offload_trigger_free);
+}
+
+static struct spi_offload_trigger
+*spi_offload_trigger_get(enum spi_offload_trigger_type type,
+ struct fwnode_reference_args *args)
+{
+ struct spi_offload_trigger *trigger;
+ bool match = false;
+ int ret;
+
+ guard(mutex)(&spi_offload_triggers_lock);
+
+ list_for_each_entry(trigger, &spi_offload_triggers, list) {
+ if (trigger->fwnode != args->fwnode)
+ continue;
+
+ match = trigger->ops->match(trigger, type, args->args, args->nargs);
+ if (match)
+ break;
+ }
+
+ if (!match)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return ERR_PTR(-ENODEV);
+
+ if (trigger->ops->request) {
+ ret = trigger->ops->request(trigger, type, args->args, args->nargs);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ kref_get(&trigger->ref);
+
+ return trigger;
+}
+
+/**
+ * devm_spi_offload_trigger_get() - Get an offload trigger instance
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance connected to a trigger.
+ * @type: Trigger type to get.
+ *
+ * Return: Offload trigger instance or error on failure.
+ */
+struct spi_offload_trigger
+*devm_spi_offload_trigger_get(struct device *dev,
+ struct spi_offload *offload,
+ enum spi_offload_trigger_type type)
+{
+ struct spi_offload_trigger *trigger;
+ struct fwnode_reference_args args;
+ int ret;
+
+ ret = fwnode_property_get_reference_args(dev_fwnode(offload->provider_dev),
+ "trigger-sources",
+ "#trigger-source-cells", 0, 0,
+ &args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ trigger = spi_offload_trigger_get(type, &args);
+ fwnode_handle_put(args.fwnode);
+ if (IS_ERR(trigger))
+ return trigger;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_trigger_put, trigger);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return trigger;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_get);
+
+/**
+ * spi_offload_trigger_validate - Validate the requested trigger
+ * @trigger: Offload trigger instance
+ * @config: Trigger config to validate
+ *
+ * On success, @config may be modifed to reflect what the hardware can do.
+ * For example, the frequency of a periodic trigger may be adjusted to the
+ * nearest supported value.
+ *
+ * Callers will likely need to do additional validation of the modified trigger
+ * parameters.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int spi_offload_trigger_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return -ENODEV;
+
+ if (!trigger->ops->validate)
+ return -EOPNOTSUPP;
+
+ return trigger->ops->validate(trigger, config);
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_validate);
+
+/**
+ * spi_offload_trigger_enable - enables trigger for offload
+ * @offload: Offload instance
+ * @trigger: Offload trigger instance
+ * @config: Trigger config to validate
+ *
+ * There must be a prepared offload instance with the specified ID (i.e.
+ * spi_optimize_message() was called with the same offload assigned to the
+ * message). This will also reserve the bus for exclusive use by the offload
+ * instance until the trigger is disabled. Any other attempts to send a
+ * transfer or lock the bus will fail with -EBUSY during this time.
+ *
+ * Calls must be balanced with spi_offload_trigger_disable().
+ *
+ * Context: can sleep
+ * Return: 0 on success, else a negative error code.
+ */
+int spi_offload_trigger_enable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ int ret;
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return -ENODEV;
+
+ if (offload->ops && offload->ops->trigger_enable) {
+ ret = offload->ops->trigger_enable(offload);
+ if (ret)
+ return ret;
+ }
+
+ if (trigger->ops->enable) {
+ ret = trigger->ops->enable(trigger, config);
+ if (ret) {
+ if (offload->ops->trigger_disable)
+ offload->ops->trigger_disable(offload);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_enable);
+
+/**
+ * spi_offload_trigger_disable - disables hardware trigger for offload
+ * @offload: Offload instance
+ * @trigger: Offload trigger instance
+ *
+ * Disables the hardware trigger for the offload instance with the specified ID
+ * and releases the bus for use by other clients.
+ *
+ * Context: can sleep
+ */
+void spi_offload_trigger_disable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger)
+{
+ if (offload->ops && offload->ops->trigger_disable)
+ offload->ops->trigger_disable(offload);
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return;
+
+ if (trigger->ops->disable)
+ trigger->ops->disable(trigger);
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_disable);
+
+static void spi_offload_release_dma_chan(void *chan)
+{
+ dma_release_channel(chan);
+}
+
+/**
+ * devm_spi_offload_tx_stream_request_dma_chan - Get the DMA channel info for the TX stream
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance
+ *
+ * This is the DMA channel that will provide data to transfers that use the
+ * %SPI_OFFLOAD_XFER_TX_STREAM offload flag.
+ *
+ * Return: Pointer to DMA channel info, or negative error code
+ */
+struct dma_chan
+*devm_spi_offload_tx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload)
+{
+ struct dma_chan *chan;
+ int ret;
+
+ if (!offload->ops || !offload->ops->tx_stream_request_dma_chan)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ chan = offload->ops->tx_stream_request_dma_chan(offload);
+ if (IS_ERR(chan))
+ return chan;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_tx_stream_request_dma_chan);
+
+/**
+ * devm_spi_offload_rx_stream_request_dma_chan - Get the DMA channel info for the RX stream
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance
+ *
+ * This is the DMA channel that will receive data from transfers that use the
+ * %SPI_OFFLOAD_XFER_RX_STREAM offload flag.
+ *
+ * Return: Pointer to DMA channel info, or negative error code
+ */
+struct dma_chan
+*devm_spi_offload_rx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload)
+{
+ struct dma_chan *chan;
+ int ret;
+
+ if (!offload->ops || !offload->ops->rx_stream_request_dma_chan)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ chan = offload->ops->rx_stream_request_dma_chan(offload);
+ if (IS_ERR(chan))
+ return chan;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_rx_stream_request_dma_chan);
+
+/* Triggers providers */
+
+static void spi_offload_trigger_unregister(void *data)
+{
+ struct spi_offload_trigger *trigger = data;
+
+ scoped_guard(mutex, &spi_offload_triggers_lock)
+ list_del(&trigger->list);
+
+ scoped_guard(mutex, &trigger->lock) {
+ trigger->priv = NULL;
+ trigger->ops = NULL;
+ }
+
+ kref_put(&trigger->ref, spi_offload_trigger_free);
+}
+
+/**
+ * devm_spi_offload_trigger_register() - Allocate and register an offload trigger
+ * @dev: Device for devm purposes.
+ * @info: Provider-specific trigger info.
+ *
+ * Return: 0 on success, else a negative error code.
+ */
+int devm_spi_offload_trigger_register(struct device *dev,
+ struct spi_offload_trigger_info *info)
+{
+ struct spi_offload_trigger *trigger;
+
+ if (!info->fwnode || !info->ops)
+ return -EINVAL;
+
+ trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
+ if (!trigger)
+ return -ENOMEM;
+
+ kref_init(&trigger->ref);
+ mutex_init(&trigger->lock);
+ trigger->fwnode = fwnode_handle_get(info->fwnode);
+ trigger->ops = info->ops;
+ trigger->priv = info->priv;
+
+ scoped_guard(mutex, &spi_offload_triggers_lock)
+ list_add_tail(&trigger->list, &spi_offload_triggers);
+
+ return devm_add_action_or_reset(dev, spi_offload_trigger_unregister, trigger);
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_register);
+
+/**
+ * spi_offload_trigger_get_priv() - Get the private data for the trigger
+ *
+ * @trigger: Offload trigger instance.
+ *
+ * Return: Private data for the trigger.
+ */
+void *spi_offload_trigger_get_priv(struct spi_offload_trigger *trigger)
+{
+ return trigger->priv;
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_get_priv);
diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
new file mode 100644
index 000000000000..fbba7741a9bf
--- /dev/null
+++ b/drivers/spi/spi-qpic-snand.c
@@ -0,0 +1,1633 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Authors:
+ * Md Sadre Alam <quic_mdalam@quicinc.com>
+ * Sricharan R <quic_srichara@quicinc.com>
+ * Varadarajan Narayanan <quic_varada@quicinc.com>
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
+#include <linux/dma/qcom_bam_dma.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand-qpic-common.h>
+#include <linux/mtd/spinand.h>
+#include <linux/bitfield.h>
+
+#define NAND_FLASH_SPI_CFG 0xc0
+#define NAND_NUM_ADDR_CYCLES 0xc4
+#define NAND_BUSY_CHECK_WAIT_CNT 0xc8
+#define NAND_FLASH_FEATURES 0xf64
+
+/* QSPI NAND config reg bits */
+#define LOAD_CLK_CNTR_INIT_EN BIT(28)
+#define CLK_CNTR_INIT_VAL_VEC 0x924
+#define CLK_CNTR_INIT_VAL_VEC_MASK GENMASK(27, 16)
+#define FEA_STATUS_DEV_ADDR 0xc0
+#define FEA_STATUS_DEV_ADDR_MASK GENMASK(15, 8)
+#define SPI_CFG BIT(0)
+#define SPI_NUM_ADDR 0xDA4DB
+#define SPI_WAIT_CNT 0x10
+#define QPIC_QSPI_NUM_CS 1
+#define SPI_TRANSFER_MODE_x1 BIT(29)
+#define SPI_TRANSFER_MODE_x4 (3 << 29)
+#define SPI_WP BIT(28)
+#define SPI_HOLD BIT(27)
+#define QPIC_SET_FEATURE BIT(31)
+
+#define SPINAND_RESET 0xff
+#define SPINAND_READID 0x9f
+#define SPINAND_GET_FEATURE 0x0f
+#define SPINAND_SET_FEATURE 0x1f
+#define SPINAND_READ 0x13
+#define SPINAND_ERASE 0xd8
+#define SPINAND_WRITE_EN 0x06
+#define SPINAND_PROGRAM_EXECUTE 0x10
+#define SPINAND_PROGRAM_LOAD 0x84
+
+#define ACC_FEATURE 0xe
+#define BAD_BLOCK_MARKER_SIZE 0x2
+#define OOB_BUF_SIZE 128
+#define ecceng_to_qspi(eng) container_of(eng, struct qpic_spi_nand, ecc_eng)
+
+struct qpic_snand_op {
+ u32 cmd_reg;
+ u32 addr1_reg;
+ u32 addr2_reg;
+};
+
+struct snandc_read_status {
+ __le32 snandc_flash;
+ __le32 snandc_buffer;
+ __le32 snandc_erased_cw;
+};
+
+/*
+ * ECC state struct
+ * @corrected: ECC corrected
+ * @bitflips: Max bit flip
+ * @failed: ECC failed
+ */
+struct qcom_ecc_stats {
+ u32 corrected;
+ u32 bitflips;
+ u32 failed;
+};
+
+struct qpic_ecc {
+ struct device *dev;
+ int ecc_bytes_hw;
+ int spare_bytes;
+ int bbm_size;
+ int ecc_mode;
+ int bytes;
+ int steps;
+ int step_size;
+ int strength;
+ int cw_size;
+ int cw_data;
+ u32 cfg0;
+ u32 cfg1;
+ u32 cfg0_raw;
+ u32 cfg1_raw;
+ u32 ecc_buf_cfg;
+ u32 ecc_bch_cfg;
+ u32 clrflashstatus;
+ u32 clrreadstatus;
+ bool bch_enabled;
+};
+
+struct qpic_spi_nand {
+ struct qcom_nand_controller *snandc;
+ struct spi_controller *ctlr;
+ struct mtd_info *mtd;
+ struct clk *iomacro_clk;
+ struct qpic_ecc *ecc;
+ struct qcom_ecc_stats ecc_stats;
+ struct nand_ecc_engine ecc_eng;
+ u8 *data_buf;
+ u8 *oob_buf;
+ u32 wlen;
+ __le32 addr1;
+ __le32 addr2;
+ __le32 cmd;
+ u32 num_cw;
+ bool oob_rw;
+ bool page_rw;
+ bool raw_rw;
+};
+
+static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc,
+ int reg, int cw_offset, int read_size,
+ int is_last_read_loc)
+{
+ __le32 locreg_val;
+ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
+ ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
+ << READ_LOCATION_LAST));
+
+ locreg_val = cpu_to_le32(val);
+
+ if (reg == NAND_READ_LOCATION_0)
+ snandc->regs->read_location0 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_1)
+ snandc->regs->read_location1 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_2)
+ snandc->regs->read_location1 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_3)
+ snandc->regs->read_location3 = locreg_val;
+}
+
+static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc,
+ int reg, int cw_offset, int read_size,
+ int is_last_read_loc)
+{
+ __le32 locreg_val;
+ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
+ ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
+ << READ_LOCATION_LAST));
+
+ locreg_val = cpu_to_le32(val);
+
+ if (reg == NAND_READ_LOCATION_LAST_CW_0)
+ snandc->regs->read_location_last0 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_LAST_CW_1)
+ snandc->regs->read_location_last1 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_LAST_CW_2)
+ snandc->regs->read_location_last2 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_LAST_CW_3)
+ snandc->regs->read_location_last3 = locreg_val;
+}
+
+static struct qcom_nand_controller *nand_to_qcom_snand(struct nand_device *nand)
+{
+ struct nand_ecc_engine *eng = nand->ecc.engine;
+ struct qpic_spi_nand *qspi = ecceng_to_qspi(eng);
+
+ return qspi->snandc;
+}
+
+static int qcom_spi_init(struct qcom_nand_controller *snandc)
+{
+ u32 snand_cfg_val = 0x0;
+ int ret;
+
+ snand_cfg_val = FIELD_PREP(CLK_CNTR_INIT_VAL_VEC_MASK, CLK_CNTR_INIT_VAL_VEC) |
+ FIELD_PREP(LOAD_CLK_CNTR_INIT_EN, 0) |
+ FIELD_PREP(FEA_STATUS_DEV_ADDR_MASK, FEA_STATUS_DEV_ADDR) |
+ FIELD_PREP(SPI_CFG, 0);
+
+ snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
+ snandc->regs->num_addr_cycle = cpu_to_le32(SPI_NUM_ADDR);
+ snandc->regs->busy_wait_cnt = cpu_to_le32(SPI_WAIT_CNT);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
+
+ snand_cfg_val &= ~LOAD_CLK_CNTR_INIT_EN;
+ snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->num_addr_cycle, NAND_NUM_ADDR_CYCLES, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->busy_wait_cnt, NAND_BUSY_CHECK_WAIT_CNT, 1,
+ NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure in submitting spi init descriptor\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct qpic_ecc *qecc = snandc->qspi->ecc;
+
+ if (section > 1)
+ return -ERANGE;
+
+ oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct qpic_ecc *qecc = snandc->qspi->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = qecc->steps * 4;
+ oobregion->offset = ((qecc->steps - 1) * qecc->bytes) + qecc->bbm_size;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops qcom_spi_ooblayout = {
+ .ecc = qcom_spi_ooblayout_ecc,
+ .free = qcom_spi_ooblayout_free,
+};
+
+static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
+{
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int cwperpage, bad_block_byte;
+ struct qpic_ecc *ecc_cfg;
+
+ cwperpage = mtd->writesize / NANDC_STEP_SIZE;
+ snandc->qspi->num_cw = cwperpage;
+
+ ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
+ if (!ecc_cfg)
+ return -ENOMEM;
+ snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
+ GFP_KERNEL);
+ if (!snandc->qspi->oob_buf) {
+ kfree(ecc_cfg);
+ return -ENOMEM;
+ }
+
+ memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize);
+
+ nand->ecc.ctx.priv = ecc_cfg;
+ snandc->qspi->mtd = mtd;
+
+ ecc_cfg->ecc_bytes_hw = 7;
+ ecc_cfg->spare_bytes = 4;
+ ecc_cfg->bbm_size = 1;
+ ecc_cfg->bch_enabled = true;
+ ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size;
+
+ ecc_cfg->steps = 4;
+ ecc_cfg->strength = 4;
+ ecc_cfg->step_size = 512;
+ ecc_cfg->cw_data = 516;
+ ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes;
+ bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1;
+
+ mtd_set_ooblayout(mtd, &qcom_spi_ooblayout);
+
+ ecc_cfg->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_data) |
+ FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 1) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, ecc_cfg->ecc_bytes_hw) |
+ FIELD_PREP(STATUS_BFR_READ, 0) |
+ FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, ecc_cfg->spare_bytes);
+
+ ecc_cfg->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(ENABLE_BCH_ECC, ecc_cfg->bch_enabled);
+
+ ecc_cfg->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_size) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+
+ ecc_cfg->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+
+ ecc_cfg->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !ecc_cfg->bch_enabled) |
+ FIELD_PREP(ECC_SW_RESET, 0) |
+ FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) |
+ FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
+ FIELD_PREP(ECC_MODE_MASK, 0) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw);
+
+ ecc_cfg->ecc_buf_cfg = 0x203 << NUM_STEPS;
+ ecc_cfg->clrflashstatus = FS_READY_BSY_N;
+ ecc_cfg->clrreadstatus = 0xc0;
+
+ conf->step_size = ecc_cfg->step_size;
+ conf->strength = ecc_cfg->strength;
+
+ snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET);
+ snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET);
+
+ dev_dbg(snandc->dev, "ECC strength: %u bits per %u bytes\n",
+ ecc_cfg->strength, ecc_cfg->step_size);
+
+ return 0;
+}
+
+static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand)
+{
+ struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
+
+ kfree(ecc_cfg);
+}
+
+static int qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
+
+ snandc->qspi->ecc = ecc_cfg;
+ snandc->qspi->raw_rw = false;
+ snandc->qspi->oob_rw = false;
+ snandc->qspi->page_rw = false;
+
+ if (req->datalen)
+ snandc->qspi->page_rw = true;
+
+ if (req->ooblen)
+ snandc->qspi->oob_rw = true;
+
+ if (req->mode == MTD_OPS_RAW)
+ snandc->qspi->raw_rw = true;
+
+ return 0;
+}
+
+static int qcom_spi_ecc_finish_io_req_pipelined(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ if (req->mode == MTD_OPS_RAW || req->type != NAND_PAGE_READ)
+ return 0;
+
+ if (snandc->qspi->ecc_stats.failed)
+ mtd->ecc_stats.failed += snandc->qspi->ecc_stats.failed;
+ else
+ mtd->ecc_stats.corrected += snandc->qspi->ecc_stats.corrected;
+
+ if (snandc->qspi->ecc_stats.failed)
+ return -EBADMSG;
+ else
+ return snandc->qspi->ecc_stats.bitflips;
+}
+
+static struct nand_ecc_engine_ops qcom_spi_ecc_engine_ops_pipelined = {
+ .init_ctx = qcom_spi_ecc_init_ctx_pipelined,
+ .cleanup_ctx = qcom_spi_ecc_cleanup_ctx_pipelined,
+ .prepare_io_req = qcom_spi_ecc_prepare_io_req_pipelined,
+ .finish_io_req = qcom_spi_ecc_finish_io_req_pipelined,
+};
+
+/* helper to configure location register values */
+static void qcom_spi_set_read_loc(struct qcom_nand_controller *snandc, int cw, int reg,
+ int cw_offset, int read_size, int is_last_read_loc)
+{
+ int reg_base = NAND_READ_LOCATION_0;
+ int num_cw = snandc->qspi->num_cw;
+
+ if (cw == (num_cw - 1))
+ reg_base = NAND_READ_LOCATION_LAST_CW_0;
+
+ reg_base += reg * 4;
+
+ if (cw == (num_cw - 1))
+ return qcom_spi_set_read_loc_last(snandc, reg_base, cw_offset,
+ read_size, is_last_read_loc);
+ else
+ return qcom_spi_set_read_loc_first(snandc, reg_base, cw_offset,
+ read_size, is_last_read_loc);
+}
+
+static void
+qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int cw)
+{
+ __le32 *reg = &snandc->regs->read_location0;
+ int num_cw = snandc->qspi->num_cw;
+
+ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
+ if (cw == (num_cw - 1)) {
+ reg = &snandc->regs->read_location_last0;
+ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4,
+ NAND_BAM_NEXT_SGL);
+ }
+
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0);
+ qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+}
+
+static int qcom_spi_block_erase(struct qcom_nand_controller *snandc)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ int ret;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cfg0 = cpu_to_le32(ecc_cfg->cfg0_raw & ~(7 << CW_PER_PAGE));
+ snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to erase block\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *snandc,
+ bool use_ecc, int cw)
+{
+ __le32 *reg = &snandc->regs->read_location0;
+ int num_cw = snandc->qspi->num_cw;
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ if (cw == (num_cw - 1)) {
+ reg = &snandc->regs->read_location_last0;
+ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, NAND_BAM_NEXT_SGL);
+ }
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0);
+}
+
+static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ struct mtd_info *mtd = snandc->qspi->mtd;
+ int size, ret = 0;
+ int col, bbpos;
+ u32 cfg0, cfg1, ecc_bch_cfg;
+ u32 num_cw = snandc->qspi->num_cw;
+
+ qcom_clear_bam_transaction(snandc);
+ qcom_clear_read_regs(snandc);
+
+ size = ecc_cfg->cw_size;
+ col = ecc_cfg->cw_size * (num_cw - 1);
+
+ memset(snandc->data_buffer, 0xff, size);
+ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
+ snandc->regs->addr1 = snandc->qspi->addr2;
+
+ cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ 0 << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1_raw;
+ ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
+
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1);
+
+ qcom_spi_config_single_cw_page_read(snandc, false, num_cw - 1);
+
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, size, 0);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failed to read last cw\n");
+ return ret;
+ }
+
+ qcom_nandc_dev_to_mem(snandc, true);
+ u32 flash = le32_to_cpu(snandc->reg_read_buf[0]);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR))
+ return -EIO;
+
+ bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
+
+ if (snandc->data_buffer[bbpos] == 0xff)
+ snandc->data_buffer[bbpos + 1] = 0xff;
+ if (snandc->data_buffer[bbpos] != 0xff)
+ snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos];
+
+ memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes);
+
+ return ret;
+}
+
+static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_buf, u8 *oob_buf)
+{
+ struct snandc_read_status *buf;
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ int i, num_cw = snandc->qspi->num_cw;
+ bool flash_op_err = false, erased;
+ unsigned int max_bitflips = 0;
+ unsigned int uncorrectable_cws = 0;
+
+ snandc->qspi->ecc_stats.failed = 0;
+ snandc->qspi->ecc_stats.corrected = 0;
+
+ qcom_nandc_dev_to_mem(snandc, true);
+ buf = (struct snandc_read_status *)snandc->reg_read_buf;
+
+ for (i = 0; i < num_cw; i++, buf++) {
+ u32 flash, buffer, erased_cw;
+ int data_len, oob_len;
+
+ if (i == (num_cw - 1)) {
+ data_len = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_len = num_cw << 2;
+ } else {
+ data_len = ecc_cfg->cw_data;
+ oob_len = 0;
+ }
+
+ flash = le32_to_cpu(buf->snandc_flash);
+ buffer = le32_to_cpu(buf->snandc_buffer);
+ erased_cw = le32_to_cpu(buf->snandc_erased_cw);
+
+ if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
+ if (ecc_cfg->bch_enabled)
+ erased = (erased_cw & ERASED_CW) == ERASED_CW;
+ else
+ erased = false;
+
+ if (!erased)
+ uncorrectable_cws |= BIT(i);
+
+ } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
+ flash_op_err = true;
+ } else {
+ unsigned int stat;
+
+ stat = buffer & BS_CORRECTABLE_ERR_MSK;
+ snandc->qspi->ecc_stats.corrected += stat;
+ max_bitflips = max(max_bitflips, stat);
+ }
+
+ if (data_buf)
+ data_buf += data_len;
+ if (oob_buf)
+ oob_buf += oob_len + ecc_cfg->bytes;
+ }
+
+ if (flash_op_err)
+ return -EIO;
+
+ if (!uncorrectable_cws)
+ snandc->qspi->ecc_stats.bitflips = max_bitflips;
+ else
+ snandc->qspi->ecc_stats.failed++;
+
+ return 0;
+}
+
+static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt)
+{
+ int i;
+
+ qcom_nandc_dev_to_mem(snandc, true);
+
+ for (i = 0; i < cw_cnt; i++) {
+ u32 flash = le32_to_cpu(snandc->reg_read_buf[i]);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf,
+ u8 *oob_buf, int cw)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ struct mtd_info *mtd = snandc->qspi->mtd;
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
+ int raw_cw = cw;
+ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
+ int col;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+ raw_cw = num_cw - 1;
+
+ cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ 0 << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1_raw;
+ ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
+
+ col = ecc_cfg->cw_size * cw;
+
+ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
+ oob_size1 = ecc_cfg->bbm_size;
+
+ if (cw == (num_cw - 1)) {
+ data_size2 = NANDC_STEP_SIZE - data_size1 -
+ ((num_cw - 1) * 4);
+ oob_size2 = (num_cw * 4) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size2 = ecc_cfg->cw_data - data_size1;
+ oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ qcom_spi_set_read_loc(snandc, cw, 0, read_loc, data_size1, 0);
+ read_loc += data_size1;
+
+ qcom_spi_set_read_loc(snandc, cw, 1, read_loc, oob_size1, 0);
+ read_loc += oob_size1;
+
+ qcom_spi_set_read_loc(snandc, cw, 2, read_loc, data_size2, 0);
+ read_loc += data_size2;
+
+ qcom_spi_set_read_loc(snandc, cw, 3, read_loc, oob_size2, 1);
+
+ qcom_spi_config_cw_read(snandc, false, raw_cw);
+
+ qcom_read_data_dma(snandc, reg_off, data_buf, data_size1, 0);
+ reg_off += data_size1;
+
+ qcom_read_data_dma(snandc, reg_off, oob_buf, oob_size1, 0);
+ reg_off += oob_size1;
+
+ qcom_read_data_dma(snandc, reg_off, data_buf + data_size1, data_size2, 0);
+ reg_off += data_size2;
+
+ qcom_read_data_dma(snandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to read raw cw %d\n", cw);
+ return ret;
+ }
+
+ return qcom_spi_check_raw_flash_errors(snandc, 1);
+}
+
+static int qcom_spi_read_page_raw(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *oob_buf = NULL;
+ int ret, cw;
+ u32 num_cw = snandc->qspi->num_cw;
+
+ if (snandc->qspi->page_rw)
+ data_buf = op->data.buf.in;
+
+ oob_buf = snandc->qspi->oob_buf;
+ memset(oob_buf, 0xff, OOB_BUF_SIZE);
+
+ for (cw = 0; cw < num_cw; cw++) {
+ ret = qcom_spi_read_cw_raw(snandc, data_buf, oob_buf, cw);
+ if (ret)
+ return ret;
+
+ if (data_buf)
+ data_buf += ecc_cfg->cw_data;
+ if (oob_buf)
+ oob_buf += ecc_cfg->bytes;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
+ int ret, i;
+ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
+
+ data_buf = op->data.buf.in;
+ data_buf_start = data_buf;
+
+ oob_buf = snandc->qspi->oob_buf;
+ oob_buf_start = oob_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+
+ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
+
+ qcom_clear_bam_transaction(snandc);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size, oob_size;
+
+ if (i == (num_cw - 1)) {
+ data_size = 512 - ((num_cw - 1) << 2);
+ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size = ecc_cfg->cw_data;
+ oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ if (data_buf && oob_buf) {
+ qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 0);
+ qcom_spi_set_read_loc(snandc, i, 1, data_size, oob_size, 1);
+ } else if (data_buf) {
+ qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 1);
+ } else {
+ qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
+ }
+
+ qcom_spi_config_cw_read(snandc, true, i);
+
+ if (data_buf)
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC, data_buf,
+ data_size, 0);
+ if (oob_buf) {
+ int j;
+
+ for (j = 0; j < ecc_cfg->bbm_size; j++)
+ *oob_buf++ = 0xff;
+
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+
+ if (data_buf)
+ data_buf += data_size;
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to read page\n");
+ return ret;
+ }
+
+ return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
+}
+
+static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
+ int ret, i;
+ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
+
+ oob_buf = op->data.buf.in;
+ oob_buf_start = oob_buf;
+
+ data_buf_start = data_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size, oob_size;
+
+ if (i == (num_cw - 1)) {
+ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size = ecc_cfg->cw_data;
+ oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
+
+ qcom_spi_config_cw_read(snandc, true, i);
+
+ if (oob_buf) {
+ int j;
+
+ for (j = 0; j < ecc_cfg->bbm_size; j++)
+ *oob_buf++ = 0xff;
+
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to read oob\n");
+ return ret;
+ }
+
+ return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
+}
+
+static int qcom_spi_read_page(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
+ return qcom_spi_read_page_raw(snandc, op);
+
+ if (snandc->qspi->page_rw)
+ return qcom_spi_read_page_ecc(snandc, op);
+
+ if (snandc->qspi->oob_rw && snandc->qspi->raw_rw)
+ return qcom_spi_read_last_cw(snandc, op);
+
+ if (snandc->qspi->oob_rw)
+ return qcom_spi_read_page_oob(snandc, op);
+
+ return 0;
+}
+
+static void qcom_spi_config_page_write(struct qcom_nand_controller *snandc)
+{
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG,
+ 1, NAND_BAM_NEXT_SGL);
+}
+
+static void qcom_spi_config_cw_write(struct qcom_nand_controller *snandc)
+{
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+}
+
+static int qcom_spi_program_raw(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ struct mtd_info *mtd = snandc->qspi->mtd;
+ u8 *data_buf = NULL, *oob_buf = NULL;
+ int i, ret;
+ int num_cw = snandc->qspi->num_cw;
+ u32 cfg0, cfg1, ecc_bch_cfg;
+
+ cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1_raw;
+ ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
+
+ data_buf = snandc->qspi->data_buf;
+
+ oob_buf = snandc->qspi->oob_buf;
+ memset(oob_buf, 0xff, OOB_BUF_SIZE);
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_config_page_write(snandc);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int reg_off = FLASH_BUF_ACC;
+
+ data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
+ oob_size1 = ecc_cfg->bbm_size;
+
+ if (i == (num_cw - 1)) {
+ data_size2 = NANDC_STEP_SIZE - data_size1 -
+ ((num_cw - 1) << 2);
+ oob_size2 = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size2 = ecc_cfg->cw_data - data_size1;
+ oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ qcom_write_data_dma(snandc, reg_off, data_buf, data_size1,
+ NAND_BAM_NO_EOT);
+ reg_off += data_size1;
+ data_buf += data_size1;
+
+ qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size1,
+ NAND_BAM_NO_EOT);
+ oob_buf += oob_size1;
+ reg_off += oob_size1;
+
+ qcom_write_data_dma(snandc, reg_off, data_buf, data_size2,
+ NAND_BAM_NO_EOT);
+ reg_off += data_size2;
+ data_buf += data_size2;
+
+ qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size2, 0);
+ oob_buf += oob_size2;
+
+ qcom_spi_config_cw_write(snandc);
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to write raw page\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *oob_buf = NULL;
+ int i, ret;
+ int num_cw = snandc->qspi->num_cw;
+ u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
+
+ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+ ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
+
+ if (snandc->qspi->data_buf)
+ data_buf = snandc->qspi->data_buf;
+
+ oob_buf = snandc->qspi->oob_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_config_page_write(snandc);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size, oob_size;
+
+ if (i == (num_cw - 1)) {
+ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size = ecc_cfg->cw_data;
+ oob_size = ecc_cfg->bytes;
+ }
+
+ if (data_buf)
+ qcom_write_data_dma(snandc, FLASH_BUF_ACC, data_buf, data_size,
+ i == (num_cw - 1) ? NAND_BAM_NO_EOT : 0);
+
+ if (i == (num_cw - 1)) {
+ if (oob_buf) {
+ oob_buf += ecc_cfg->bbm_size;
+ qcom_write_data_dma(snandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+ }
+
+ qcom_spi_config_cw_write(snandc);
+
+ if (data_buf)
+ data_buf += data_size;
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to write page\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_program_oob(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *oob_buf = NULL;
+ int ret, col, data_size, oob_size;
+ int num_cw = snandc->qspi->num_cw;
+ u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
+
+ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+ ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
+
+ col = ecc_cfg->cw_size * (num_cw - 1);
+
+ oob_buf = snandc->qspi->data_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ /* calculate the data and oob size for the last codeword/step */
+ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_size = snandc->qspi->mtd->oobavail;
+
+ memset(snandc->data_buffer, 0xff, ecc_cfg->cw_data);
+ /* override new oob content to last codeword */
+ mtd_ooblayout_get_databytes(snandc->qspi->mtd, snandc->data_buffer + data_size,
+ oob_buf, 0, snandc->qspi->mtd->oobavail);
+ qcom_spi_config_page_write(snandc);
+ qcom_write_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, data_size + oob_size, 0);
+ qcom_spi_config_cw_write(snandc);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to write oob\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_program_execute(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
+ return qcom_spi_program_raw(snandc, op);
+
+ if (snandc->qspi->page_rw)
+ return qcom_spi_program_ecc(snandc, op);
+
+ if (snandc->qspi->oob_rw)
+ return qcom_spi_program_oob(snandc, op);
+
+ return 0;
+}
+
+static int qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode, u32 *cmd)
+{
+ switch (opcode) {
+ case SPINAND_RESET:
+ *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_RESET_DEVICE);
+ break;
+ case SPINAND_READID:
+ *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_FETCH_ID);
+ break;
+ case SPINAND_GET_FEATURE:
+ *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE);
+ break;
+ case SPINAND_SET_FEATURE:
+ *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE |
+ QPIC_SET_FEATURE);
+ break;
+ case SPINAND_READ:
+ if (snandc->qspi->raw_rw) {
+ *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
+ SPI_WP | SPI_HOLD | OP_PAGE_READ);
+ } else {
+ *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
+ SPI_WP | SPI_HOLD | OP_PAGE_READ_WITH_ECC);
+ }
+
+ break;
+ case SPINAND_ERASE:
+ *cmd = OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE | SPI_WP |
+ SPI_HOLD | SPI_TRANSFER_MODE_x1;
+ break;
+ case SPINAND_WRITE_EN:
+ *cmd = SPINAND_WRITE_EN;
+ break;
+ case SPINAND_PROGRAM_EXECUTE:
+ *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
+ SPI_WP | SPI_HOLD | OP_PROGRAM_PAGE);
+ break;
+ case SPINAND_PROGRAM_LOAD:
+ *cmd = SPINAND_PROGRAM_LOAD;
+ break;
+ default:
+ dev_err(snandc->dev, "Opcode not supported: %u\n", opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_write_page(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ int ret;
+ u32 cmd;
+
+ ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
+ if (ret < 0)
+ return ret;
+
+ if (op->cmd.opcode == SPINAND_PROGRAM_LOAD)
+ snandc->qspi->data_buf = (u8 *)op->data.buf.out;
+
+ return 0;
+}
+
+static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_snand_op s_op = {};
+ u32 cmd;
+ int ret, opcode;
+
+ ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
+ if (ret < 0)
+ return ret;
+
+ s_op.cmd_reg = cmd;
+ s_op.addr1_reg = op->addr.val;
+ s_op.addr2_reg = 0;
+
+ opcode = op->cmd.opcode;
+
+ switch (opcode) {
+ case SPINAND_WRITE_EN:
+ return 0;
+ case SPINAND_PROGRAM_EXECUTE:
+ s_op.addr1_reg = op->addr.val << 16;
+ s_op.addr2_reg = op->addr.val >> 16 & 0xff;
+ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
+ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
+ snandc->qspi->cmd = cpu_to_le32(cmd);
+ return qcom_spi_program_execute(snandc, op);
+ case SPINAND_READ:
+ s_op.addr1_reg = (op->addr.val << 16);
+ s_op.addr2_reg = op->addr.val >> 16 & 0xff;
+ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
+ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
+ snandc->qspi->cmd = cpu_to_le32(cmd);
+ return 0;
+ case SPINAND_ERASE:
+ s_op.addr2_reg = (op->addr.val >> 16) & 0xffff;
+ s_op.addr1_reg = op->addr.val;
+ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16);
+ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
+ snandc->qspi->cmd = cpu_to_le32(cmd);
+ qcom_spi_block_erase(snandc);
+ return 0;
+ default:
+ break;
+ }
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->cmd = cpu_to_le32(s_op.cmd_reg);
+ snandc->regs->exec = cpu_to_le32(1);
+ snandc->regs->addr0 = cpu_to_le32(s_op.addr1_reg);
+ snandc->regs->addr1 = cpu_to_le32(s_op.addr2_reg);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret)
+ dev_err(snandc->dev, "failure in submitting cmd descriptor\n");
+
+ return ret;
+}
+
+static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_mem_op *op)
+{
+ int ret, val, opcode;
+ bool copy = false, copy_ftr = false;
+
+ ret = qcom_spi_send_cmdaddr(snandc, op);
+ if (ret)
+ return ret;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+ opcode = op->cmd.opcode;
+
+ switch (opcode) {
+ case SPINAND_READID:
+ snandc->buf_count = 4;
+ qcom_read_reg_dma(snandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+ copy = true;
+ break;
+ case SPINAND_GET_FEATURE:
+ snandc->buf_count = 4;
+ qcom_read_reg_dma(snandc, NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
+ copy_ftr = true;
+ break;
+ case SPINAND_SET_FEATURE:
+ snandc->regs->flash_feature = cpu_to_le32(*(u32 *)op->data.buf.out);
+ qcom_write_reg_dma(snandc, &snandc->regs->flash_feature,
+ NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
+ break;
+ case SPINAND_PROGRAM_EXECUTE:
+ case SPINAND_WRITE_EN:
+ case SPINAND_RESET:
+ case SPINAND_ERASE:
+ case SPINAND_READ:
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret)
+ dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode);
+
+ if (copy) {
+ qcom_nandc_dev_to_mem(snandc, true);
+ memcpy(op->data.buf.in, snandc->reg_read_buf, snandc->buf_count);
+ }
+
+ if (copy_ftr) {
+ qcom_nandc_dev_to_mem(snandc, true);
+ val = le32_to_cpu(*(__le32 *)snandc->reg_read_buf);
+ val >>= 8;
+ memcpy(op->data.buf.in, &val, snandc->buf_count);
+ }
+
+ return ret;
+}
+
+static bool qcom_spi_is_page_op(const struct spi_mem_op *op)
+{
+ if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && op->addr.buswidth != 4)
+ return false;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (op->addr.buswidth == 4 && op->data.buswidth == 4)
+ return true;
+
+ if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
+ return true;
+
+ } else if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.buswidth == 4)
+ return true;
+ if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
+ return true;
+ }
+
+ return false;
+}
+
+static bool qcom_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
+ return false;
+
+ if (qcom_spi_is_page_op(op))
+ return true;
+
+ return ((!op->addr.nbytes || op->addr.buswidth == 1) &&
+ (!op->dummy.nbytes || op->dummy.buswidth == 1) &&
+ (!op->data.nbytes || op->data.buswidth == 1));
+}
+
+static int qcom_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct qcom_nand_controller *snandc = spi_controller_get_devdata(mem->spi->controller);
+
+ dev_dbg(snandc->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
+ op->addr.val, op->addr.buswidth, op->addr.nbytes,
+ op->data.buswidth, op->data.nbytes);
+
+ if (qcom_spi_is_page_op(op)) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ return qcom_spi_read_page(snandc, op);
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ return qcom_spi_write_page(snandc, op);
+ } else {
+ return qcom_spi_io_op(snandc, op);
+ }
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops qcom_spi_mem_ops = {
+ .supports_op = qcom_spi_supports_op,
+ .exec_op = qcom_spi_exec_op,
+};
+
+static const struct spi_controller_mem_caps qcom_spi_mem_caps = {
+ .ecc = true,
+};
+
+static int qcom_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctlr;
+ struct qcom_nand_controller *snandc;
+ struct qpic_spi_nand *qspi;
+ struct qpic_ecc *ecc;
+ struct resource *res;
+ const void *dev_data;
+ int ret;
+
+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+ if (!ecc)
+ return -ENOMEM;
+
+ qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL);
+ if (!qspi)
+ return -ENOMEM;
+
+ ctlr = __devm_spi_alloc_controller(dev, sizeof(*snandc), false);
+ if (!ctlr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ctlr);
+
+ snandc = spi_controller_get_devdata(ctlr);
+ qspi->snandc = snandc;
+
+ snandc->dev = dev;
+ snandc->qspi = qspi;
+ snandc->qspi->ctlr = ctlr;
+ snandc->qspi->ecc = ecc;
+
+ dev_data = of_device_get_match_data(dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "failed to get device data\n");
+ return -ENODEV;
+ }
+
+ snandc->props = dev_data;
+ snandc->dev = &pdev->dev;
+
+ snandc->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(snandc->core_clk))
+ return PTR_ERR(snandc->core_clk);
+
+ snandc->aon_clk = devm_clk_get(dev, "aon");
+ if (IS_ERR(snandc->aon_clk))
+ return PTR_ERR(snandc->aon_clk);
+
+ snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom");
+ if (IS_ERR(snandc->qspi->iomacro_clk))
+ return PTR_ERR(snandc->qspi->iomacro_clk);
+
+ snandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(snandc->base))
+ return PTR_ERR(snandc->base);
+
+ snandc->base_phys = res->start;
+ snandc->base_dma = dma_map_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, snandc->base_dma))
+ return -ENXIO;
+
+ ret = clk_prepare_enable(snandc->core_clk);
+ if (ret)
+ goto err_dis_core_clk;
+
+ ret = clk_prepare_enable(snandc->aon_clk);
+ if (ret)
+ goto err_dis_aon_clk;
+
+ ret = clk_prepare_enable(snandc->qspi->iomacro_clk);
+ if (ret)
+ goto err_dis_iom_clk;
+
+ ret = qcom_nandc_alloc(snandc);
+ if (ret)
+ goto err_snand_alloc;
+
+ ret = qcom_spi_init(snandc);
+ if (ret)
+ goto err_spi_init;
+
+ /* setup ECC engine */
+ snandc->qspi->ecc_eng.dev = &pdev->dev;
+ snandc->qspi->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
+ snandc->qspi->ecc_eng.ops = &qcom_spi_ecc_engine_ops_pipelined;
+ snandc->qspi->ecc_eng.priv = snandc;
+
+ ret = nand_ecc_register_on_host_hw_engine(&snandc->qspi->ecc_eng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ecc engine:%d\n", ret);
+ goto err_spi_init;
+ }
+
+ ctlr->num_chipselect = QPIC_QSPI_NUM_CS;
+ ctlr->mem_ops = &qcom_spi_mem_ops;
+ ctlr->mem_caps = &qcom_spi_mem_caps;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL |
+ SPI_TX_QUAD | SPI_RX_QUAD;
+
+ ret = spi_register_controller(ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_controller failed.\n");
+ goto err_spi_init;
+ }
+
+ return 0;
+
+err_spi_init:
+ qcom_nandc_unalloc(snandc);
+err_snand_alloc:
+ clk_disable_unprepare(snandc->qspi->iomacro_clk);
+err_dis_iom_clk:
+ clk_disable_unprepare(snandc->aon_clk);
+err_dis_aon_clk:
+ clk_disable_unprepare(snandc->core_clk);
+err_dis_core_clk:
+ dma_unmap_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ return ret;
+}
+
+static void qcom_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct qcom_nand_controller *snandc = spi_controller_get_devdata(ctlr);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ spi_unregister_controller(ctlr);
+
+ qcom_nandc_unalloc(snandc);
+
+ clk_disable_unprepare(snandc->aon_clk);
+ clk_disable_unprepare(snandc->core_clk);
+ clk_disable_unprepare(snandc->qspi->iomacro_clk);
+
+ dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+}
+
+static const struct qcom_nandc_props ipq9574_snandc_props = {
+ .dev_cmd_reg_start = 0x7000,
+ .supports_bam = true,
+};
+
+static const struct of_device_id qcom_snandc_of_match[] = {
+ {
+ .compatible = "qcom,ipq9574-snand",
+ .data = &ipq9574_snandc_props,
+ },
+ {}
+}
+MODULE_DEVICE_TABLE(of, qcom_snandc_of_match);
+
+static struct platform_driver qcom_spi_driver = {
+ .driver = {
+ .name = "qcom_snand",
+ .of_match_table = qcom_snandc_of_match,
+ },
+ .probe = qcom_spi_probe,
+ .remove = qcom_spi_remove,
+};
+module_platform_driver(qcom_spi_driver);
+
+MODULE_DESCRIPTION("SPI driver for QPIC QSPI cores");
+MODULE_AUTHOR("Md Sadre Alam <quic_mdalam@quicinc.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/spi/spi-realtek-rtl-snand.c b/drivers/spi/spi-realtek-rtl-snand.c
index cd0484041147..741cf2af3e91 100644
--- a/drivers/spi/spi-realtek-rtl-snand.c
+++ b/drivers/spi/spi-realtek-rtl-snand.c
@@ -364,7 +364,6 @@ static int rtl_snand_probe(struct platform_device *pdev)
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .cache_type = REGCACHE_NONE,
};
int irq, ret;
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 389275dbc003..9c47f5741c5f 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -139,7 +139,9 @@ struct s3c64xx_spi_dma_data {
* struct s3c64xx_spi_port_config - SPI Controller hardware info
* @fifo_lvl_mask: [DEPRECATED] use @{rx, tx}_fifomask instead.
* @rx_lvl_offset: [DEPRECATED] use @{rx,tx}_fifomask instead.
- * @fifo_depth: depth of the FIFO.
+ * @fifo_depth: depth of the FIFOs. Used by compatibles where all the instances
+ * of the IP define the same FIFO depth. It has higher precedence
+ * than the FIFO depth specified via DT.
* @rx_fifomask: SPI_STATUS.RX_FIFO_LVL mask. Shifted mask defining the field's
* length and position.
* @tx_fifomask: SPI_STATUS.TX_FIFO_LVL mask. Shifted mask defining the field's
diff --git a/drivers/spi/spi-sg2044-nor.c b/drivers/spi/spi-sg2044-nor.c
new file mode 100644
index 000000000000..a59aa3fc55d2
--- /dev/null
+++ b/drivers/spi/spi-sg2044-nor.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SG2044 SPI NOR controller driver
+ *
+ * Copyright (c) 2025 Longbin Li <looong.bin@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi-mem.h>
+
+/* Hardware register definitions */
+#define SPIFMC_CTRL 0x00
+#define SPIFMC_CTRL_CPHA BIT(12)
+#define SPIFMC_CTRL_CPOL BIT(13)
+#define SPIFMC_CTRL_HOLD_OL BIT(14)
+#define SPIFMC_CTRL_WP_OL BIT(15)
+#define SPIFMC_CTRL_LSBF BIT(20)
+#define SPIFMC_CTRL_SRST BIT(21)
+#define SPIFMC_CTRL_SCK_DIV_SHIFT 0
+#define SPIFMC_CTRL_FRAME_LEN_SHIFT 16
+#define SPIFMC_CTRL_SCK_DIV_MASK 0x7FF
+
+#define SPIFMC_CE_CTRL 0x04
+#define SPIFMC_CE_CTRL_CEMANUAL BIT(0)
+#define SPIFMC_CE_CTRL_CEMANUAL_EN BIT(1)
+
+#define SPIFMC_DLY_CTRL 0x08
+#define SPIFMC_CTRL_FM_INTVL_MASK 0x000f
+#define SPIFMC_CTRL_FM_INTVL BIT(0)
+#define SPIFMC_CTRL_CET_MASK 0x0f00
+#define SPIFMC_CTRL_CET BIT(8)
+
+#define SPIFMC_DMMR 0x0c
+
+#define SPIFMC_TRAN_CSR 0x10
+#define SPIFMC_TRAN_CSR_TRAN_MODE_MASK GENMASK(1, 0)
+#define SPIFMC_TRAN_CSR_TRAN_MODE_RX BIT(0)
+#define SPIFMC_TRAN_CSR_TRAN_MODE_TX BIT(1)
+#define SPIFMC_TRAN_CSR_FAST_MODE BIT(3)
+#define SPIFMC_TRAN_CSR_BUS_WIDTH_1_BIT (0x00 << 4)
+#define SPIFMC_TRAN_CSR_BUS_WIDTH_2_BIT (0x01 << 4)
+#define SPIFMC_TRAN_CSR_BUS_WIDTH_4_BIT (0x02 << 4)
+#define SPIFMC_TRAN_CSR_DMA_EN BIT(6)
+#define SPIFMC_TRAN_CSR_MISO_LEVEL BIT(7)
+#define SPIFMC_TRAN_CSR_ADDR_BYTES_MASK GENMASK(10, 8)
+#define SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT 8
+#define SPIFMC_TRAN_CSR_WITH_CMD BIT(11)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_MASK GENMASK(13, 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE (0x00 << 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_2_BYTE (0x01 << 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_4_BYTE (0x02 << 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE (0x03 << 12)
+#define SPIFMC_TRAN_CSR_GO_BUSY BIT(15)
+#define SPIFMC_TRAN_CSR_ADDR4B_SHIFT 20
+#define SPIFMC_TRAN_CSR_CMD4B_SHIFT 21
+
+#define SPIFMC_TRAN_NUM 0x14
+#define SPIFMC_FIFO_PORT 0x18
+#define SPIFMC_FIFO_PT 0x20
+
+#define SPIFMC_INT_STS 0x28
+#define SPIFMC_INT_TRAN_DONE BIT(0)
+#define SPIFMC_INT_RD_FIFO BIT(2)
+#define SPIFMC_INT_WR_FIFO BIT(3)
+#define SPIFMC_INT_RX_FRAME BIT(4)
+#define SPIFMC_INT_TX_FRAME BIT(5)
+
+#define SPIFMC_INT_EN 0x2c
+#define SPIFMC_INT_TRAN_DONE_EN BIT(0)
+#define SPIFMC_INT_RD_FIFO_EN BIT(2)
+#define SPIFMC_INT_WR_FIFO_EN BIT(3)
+#define SPIFMC_INT_RX_FRAME_EN BIT(4)
+#define SPIFMC_INT_TX_FRAME_EN BIT(5)
+
+#define SPIFMC_OPT 0x030
+#define SPIFMC_OPT_DISABLE_FIFO_FLUSH BIT(1)
+
+#define SPIFMC_MAX_FIFO_DEPTH 8
+
+#define SPIFMC_MAX_READ_SIZE 0x10000
+
+struct sg2044_spifmc {
+ struct spi_controller *ctrl;
+ void __iomem *io_base;
+ struct device *dev;
+ struct mutex lock;
+ struct clk *clk;
+};
+
+static int sg2044_spifmc_wait_int(struct sg2044_spifmc *spifmc, u8 int_type)
+{
+ u32 stat;
+
+ return readl_poll_timeout(spifmc->io_base + SPIFMC_INT_STS, stat,
+ (stat & int_type), 0, 1000000);
+}
+
+static int sg2044_spifmc_wait_xfer_size(struct sg2044_spifmc *spifmc,
+ int xfer_size)
+{
+ u8 stat;
+
+ return readl_poll_timeout(spifmc->io_base + SPIFMC_FIFO_PT, stat,
+ ((stat & 0xf) == xfer_size), 1, 1000000);
+}
+
+static u32 sg2044_spifmc_init_reg(struct sg2044_spifmc *spifmc)
+{
+ u32 reg;
+
+ reg = readl(spifmc->io_base + SPIFMC_TRAN_CSR);
+ reg &= ~(SPIFMC_TRAN_CSR_TRAN_MODE_MASK |
+ SPIFMC_TRAN_CSR_FAST_MODE |
+ SPIFMC_TRAN_CSR_BUS_WIDTH_2_BIT |
+ SPIFMC_TRAN_CSR_BUS_WIDTH_4_BIT |
+ SPIFMC_TRAN_CSR_DMA_EN |
+ SPIFMC_TRAN_CSR_ADDR_BYTES_MASK |
+ SPIFMC_TRAN_CSR_WITH_CMD |
+ SPIFMC_TRAN_CSR_FIFO_TRG_LVL_MASK);
+
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ return reg;
+}
+
+static ssize_t sg2044_spifmc_read_64k(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op, loff_t from,
+ size_t len, u_char *buf)
+{
+ int xfer_size, offset;
+ u32 reg;
+ int ret;
+ int i;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT;
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--)
+ writeb((from >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < op->dummy.nbytes; i++)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ writel(len, spifmc->io_base + SPIFMC_TRAN_NUM);
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_RD_FIFO);
+ if (ret < 0)
+ return ret;
+
+ offset = 0;
+ while (offset < len) {
+ xfer_size = min_t(size_t, SPIFMC_MAX_FIFO_DEPTH, len - offset);
+
+ ret = sg2044_spifmc_wait_xfer_size(spifmc, xfer_size);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < xfer_size; i++)
+ buf[i + offset] = readb(spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ offset += xfer_size;
+ }
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return len;
+}
+
+static ssize_t sg2044_spifmc_read(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ size_t xfer_size;
+ size_t offset;
+ loff_t from = op->addr.val;
+ size_t len = op->data.nbytes;
+ int ret;
+ u8 *din = op->data.buf.in;
+
+ offset = 0;
+ while (offset < len) {
+ xfer_size = min_t(size_t, SPIFMC_MAX_READ_SIZE, len - offset);
+
+ ret = sg2044_spifmc_read_64k(spifmc, op, from, xfer_size, din);
+ if (ret < 0)
+ return ret;
+
+ offset += xfer_size;
+ din += xfer_size;
+ from += xfer_size;
+ }
+
+ return 0;
+}
+
+static ssize_t sg2044_spifmc_write(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ size_t xfer_size;
+ const u8 *dout = op->data.buf.out;
+ int i, offset;
+ int ret;
+ u32 reg;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT;
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--)
+ writeb((op->addr.val >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < op->dummy.nbytes; i++)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ writel(op->data.nbytes, spifmc->io_base + SPIFMC_TRAN_NUM);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_xfer_size(spifmc, 0);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ offset = 0;
+ while (offset < op->data.nbytes) {
+ xfer_size = min_t(size_t, SPIFMC_MAX_FIFO_DEPTH, op->data.nbytes - offset);
+
+ ret = sg2044_spifmc_wait_xfer_size(spifmc, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < xfer_size; i++)
+ writeb(dout[i + offset], spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ offset += xfer_size;
+ }
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return 0;
+}
+
+static ssize_t sg2044_spifmc_tran_cmd(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ int i, ret;
+ u32 reg;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT;
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--)
+ writeb((op->addr.val >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < op->dummy.nbytes; i++)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return 0;
+}
+
+static void sg2044_spifmc_trans(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ sg2044_spifmc_read(spifmc, op);
+ else if (op->data.dir == SPI_MEM_DATA_OUT)
+ sg2044_spifmc_write(spifmc, op);
+ else
+ sg2044_spifmc_tran_cmd(spifmc, op);
+}
+
+static ssize_t sg2044_spifmc_trans_reg(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ const u8 *dout = NULL;
+ u8 *din = NULL;
+ size_t len = op->data.nbytes;
+ int ret, i;
+ u32 reg;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ din = op->data.buf.in;
+ else
+ dout = op->data.buf.out;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+
+ if (din) {
+ reg |= SPIFMC_TRAN_CSR_BUS_WIDTH_1_BIT;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX;
+
+ writel(SPIFMC_OPT_DISABLE_FIFO_FLUSH, spifmc->io_base + SPIFMC_OPT);
+ } else {
+ /*
+ * If write values to the Status Register,
+ * configure TRAN_CSR register as the same as
+ * sg2044_spifmc_read_reg.
+ */
+ if (op->cmd.opcode == 0x01) {
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX;
+ writel(len, spifmc->io_base + SPIFMC_TRAN_NUM);
+ }
+ }
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < len; i++) {
+ if (din)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+ else
+ writeb(dout[i], spifmc->io_base + SPIFMC_FIFO_PORT);
+ }
+
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ writel(len, spifmc->io_base + SPIFMC_TRAN_NUM);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ if (din) {
+ while (len--)
+ *din++ = readb(spifmc->io_base + SPIFMC_FIFO_PORT);
+ }
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return 0;
+}
+
+static int sg2044_spifmc_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct sg2044_spifmc *spifmc;
+
+ spifmc = spi_controller_get_devdata(mem->spi->controller);
+
+ mutex_lock(&spifmc->lock);
+
+ if (op->addr.nbytes == 0)
+ sg2044_spifmc_trans_reg(spifmc, op);
+ else
+ sg2044_spifmc_trans(spifmc, op);
+
+ mutex_unlock(&spifmc->lock);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops sg2044_spifmc_mem_ops = {
+ .exec_op = sg2044_spifmc_exec_op,
+};
+
+static void sg2044_spifmc_init(struct sg2044_spifmc *spifmc)
+{
+ u32 tran_csr;
+ u32 reg;
+
+ writel(0, spifmc->io_base + SPIFMC_DMMR);
+
+ reg = readl(spifmc->io_base + SPIFMC_CTRL);
+ reg |= SPIFMC_CTRL_SRST;
+ reg &= ~(SPIFMC_CTRL_SCK_DIV_MASK);
+ reg |= 1;
+ writel(reg, spifmc->io_base + SPIFMC_CTRL);
+
+ writel(0, spifmc->io_base + SPIFMC_CE_CTRL);
+
+ tran_csr = readl(spifmc->io_base + SPIFMC_TRAN_CSR);
+ tran_csr |= (0 << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT);
+ tran_csr |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_4_BYTE;
+ tran_csr |= SPIFMC_TRAN_CSR_WITH_CMD;
+ writel(tran_csr, spifmc->io_base + SPIFMC_TRAN_CSR);
+}
+
+static int sg2044_spifmc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct sg2044_spifmc *spifmc;
+ int ret;
+
+ ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*spifmc));
+ if (!ctrl)
+ return -ENOMEM;
+
+ spifmc = spi_controller_get_devdata(ctrl);
+
+ spifmc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(spifmc->clk))
+ return dev_err_probe(dev, PTR_ERR(spifmc->clk), "Cannot get and enable AHB clock\n");
+
+ spifmc->dev = &pdev->dev;
+ spifmc->ctrl = ctrl;
+
+ spifmc->io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spifmc->io_base))
+ return PTR_ERR(spifmc->io_base);
+
+ ctrl->num_chipselect = 1;
+ ctrl->dev.of_node = pdev->dev.of_node;
+ ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctrl->auto_runtime_pm = false;
+ ctrl->mem_ops = &sg2044_spifmc_mem_ops;
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | SPI_RX_QUAD | SPI_TX_QUAD;
+
+ ret = devm_mutex_init(dev, &spifmc->lock);
+ if (ret)
+ return ret;
+
+ sg2044_spifmc_init(spifmc);
+ sg2044_spifmc_init_reg(spifmc);
+
+ ret = devm_spi_register_controller(&pdev->dev, ctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "spi_register_controller failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id sg2044_spifmc_match[] = {
+ { .compatible = "sophgo,sg2044-spifmc-nor" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sg2044_spifmc_match);
+
+static struct platform_driver sg2044_nor_driver = {
+ .driver = {
+ .name = "sg2044,spifmc-nor",
+ .of_match_table = sg2044_spifmc_match,
+ },
+ .probe = sg2044_spifmc_probe,
+};
+module_platform_driver(sg2044_nor_driver);
+
+MODULE_DESCRIPTION("SG2044 SPI NOR controller driver");
+MODULE_AUTHOR("Longbin Li <looong.bin@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c
new file mode 100644
index 000000000000..668022098b1e
--- /dev/null
+++ b/drivers/spi/spi-stm32-ospi.c
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/types.h>
+
+#define OSPI_CR 0x00
+#define CR_EN BIT(0)
+#define CR_ABORT BIT(1)
+#define CR_DMAEN BIT(2)
+#define CR_FTHRES_SHIFT 8
+#define CR_TEIE BIT(16)
+#define CR_TCIE BIT(17)
+#define CR_SMIE BIT(19)
+#define CR_APMS BIT(22)
+#define CR_CSSEL BIT(24)
+#define CR_FMODE_MASK GENMASK(29, 28)
+#define CR_FMODE_INDW (0U)
+#define CR_FMODE_INDR (1U)
+#define CR_FMODE_APM (2U)
+#define CR_FMODE_MM (3U)
+
+#define OSPI_DCR1 0x08
+#define DCR1_DLYBYP BIT(3)
+#define DCR1_DEVSIZE_MASK GENMASK(20, 16)
+#define DCR1_MTYP_MASK GENMASK(26, 24)
+#define DCR1_MTYP_MX_MODE 1
+#define DCR1_MTYP_HP_MEMMODE 4
+
+#define OSPI_DCR2 0x0c
+#define DCR2_PRESC_MASK GENMASK(7, 0)
+
+#define OSPI_SR 0x20
+#define SR_TEF BIT(0)
+#define SR_TCF BIT(1)
+#define SR_FTF BIT(2)
+#define SR_SMF BIT(3)
+#define SR_BUSY BIT(5)
+
+#define OSPI_FCR 0x24
+#define FCR_CTEF BIT(0)
+#define FCR_CTCF BIT(1)
+#define FCR_CSMF BIT(3)
+
+#define OSPI_DLR 0x40
+#define OSPI_AR 0x48
+#define OSPI_DR 0x50
+#define OSPI_PSMKR 0x80
+#define OSPI_PSMAR 0x88
+
+#define OSPI_CCR 0x100
+#define CCR_IMODE_MASK GENMASK(2, 0)
+#define CCR_IDTR BIT(3)
+#define CCR_ISIZE_MASK GENMASK(5, 4)
+#define CCR_ADMODE_MASK GENMASK(10, 8)
+#define CCR_ADMODE_8LINES 4
+#define CCR_ADDTR BIT(11)
+#define CCR_ADSIZE_MASK GENMASK(13, 12)
+#define CCR_ADSIZE_32BITS 3
+#define CCR_DMODE_MASK GENMASK(26, 24)
+#define CCR_DMODE_8LINES 4
+#define CCR_DQSE BIT(29)
+#define CCR_DDTR BIT(27)
+#define CCR_BUSWIDTH_0 0x0
+#define CCR_BUSWIDTH_1 0x1
+#define CCR_BUSWIDTH_2 0x2
+#define CCR_BUSWIDTH_4 0x3
+#define CCR_BUSWIDTH_8 0x4
+
+#define OSPI_TCR 0x108
+#define TCR_DCYC_MASK GENMASK(4, 0)
+#define TCR_DHQC BIT(28)
+#define TCR_SSHIFT BIT(30)
+
+#define OSPI_IR 0x110
+
+#define STM32_OSPI_MAX_MMAP_SZ SZ_256M
+#define STM32_OSPI_MAX_NORCHIP 2
+
+#define STM32_FIFO_TIMEOUT_US 30000
+#define STM32_ABT_TIMEOUT_US 100000
+#define STM32_COMP_TIMEOUT_MS 5000
+#define STM32_BUSY_TIMEOUT_US 100000
+
+
+#define STM32_AUTOSUSPEND_DELAY -1
+
+struct stm32_ospi {
+ struct device *dev;
+ struct spi_controller *ctrl;
+ struct clk *clk;
+ struct reset_control *rstc;
+
+ struct completion data_completion;
+ struct completion match_completion;
+
+ struct dma_chan *dma_chtx;
+ struct dma_chan *dma_chrx;
+ struct completion dma_completion;
+
+ void __iomem *regs_base;
+ void __iomem *mm_base;
+ phys_addr_t regs_phys_base;
+ resource_size_t mm_size;
+ u32 clk_rate;
+ u32 fmode;
+ u32 cr_reg;
+ u32 dcr_reg;
+ u32 flash_presc[STM32_OSPI_MAX_NORCHIP];
+ int irq;
+ unsigned long status_timeout;
+
+ /*
+ * To protect device configuration, could be different between
+ * 2 flash access
+ */
+ struct mutex lock;
+};
+
+static void stm32_ospi_read_fifo(u8 *val, void __iomem *addr)
+{
+ *val = readb_relaxed(addr);
+}
+
+static void stm32_ospi_write_fifo(u8 *val, void __iomem *addr)
+{
+ writeb_relaxed(*val, addr);
+}
+
+static int stm32_ospi_abort(struct stm32_ospi *ospi)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr;
+ int timeout;
+
+ cr = readl_relaxed(regs_base + OSPI_CR) | CR_ABORT;
+ writel_relaxed(cr, regs_base + OSPI_CR);
+
+ /* wait clear of abort bit by hw */
+ timeout = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_CR,
+ cr, !(cr & CR_ABORT), 1,
+ STM32_ABT_TIMEOUT_US);
+
+ if (timeout)
+ dev_err(ospi->dev, "%s abort timeout:%d\n", __func__, timeout);
+
+ return timeout;
+}
+
+static int stm32_ospi_poll(struct stm32_ospi *ospi, u8 *buf, u32 len, bool read)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ void (*fifo)(u8 *val, void __iomem *addr);
+ u32 sr;
+ int ret;
+
+ if (read)
+ fifo = stm32_ospi_read_fifo;
+ else
+ fifo = stm32_ospi_write_fifo;
+
+ while (len--) {
+ ret = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_SR,
+ sr, sr & SR_FTF, 1,
+ STM32_FIFO_TIMEOUT_US);
+ if (ret) {
+ dev_err(ospi->dev, "fifo timeout (len:%d stat:%#x)\n",
+ len, sr);
+ return ret;
+ }
+ fifo(buf++, regs_base + OSPI_DR);
+ }
+
+ return 0;
+}
+
+static int stm32_ospi_wait_nobusy(struct stm32_ospi *ospi)
+{
+ u32 sr;
+
+ return readl_relaxed_poll_timeout_atomic(ospi->regs_base + OSPI_SR,
+ sr, !(sr & SR_BUSY), 1,
+ STM32_BUSY_TIMEOUT_US);
+}
+
+static int stm32_ospi_wait_cmd(struct stm32_ospi *ospi)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr, sr;
+ int err = 0;
+
+ if ((readl_relaxed(regs_base + OSPI_SR) & SR_TCF) ||
+ ospi->fmode == CR_FMODE_APM)
+ goto out;
+
+ reinit_completion(&ospi->data_completion);
+ cr = readl_relaxed(regs_base + OSPI_CR);
+ writel_relaxed(cr | CR_TCIE | CR_TEIE, regs_base + OSPI_CR);
+
+ if (!wait_for_completion_timeout(&ospi->data_completion,
+ msecs_to_jiffies(STM32_COMP_TIMEOUT_MS)))
+ err = -ETIMEDOUT;
+
+ sr = readl_relaxed(regs_base + OSPI_SR);
+ if (sr & SR_TCF)
+ /* avoid false timeout */
+ err = 0;
+ if (sr & SR_TEF)
+ err = -EIO;
+
+out:
+ /* clear flags */
+ writel_relaxed(FCR_CTCF | FCR_CTEF, regs_base + OSPI_FCR);
+
+ if (!err)
+ err = stm32_ospi_wait_nobusy(ospi);
+
+ return err;
+}
+
+static void stm32_ospi_dma_callback(void *arg)
+{
+ struct completion *dma_completion = arg;
+
+ complete(dma_completion);
+}
+
+static irqreturn_t stm32_ospi_irq(int irq, void *dev_id)
+{
+ struct stm32_ospi *ospi = (struct stm32_ospi *)dev_id;
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr, sr;
+
+ cr = readl_relaxed(regs_base + OSPI_CR);
+ sr = readl_relaxed(regs_base + OSPI_SR);
+
+ if (cr & CR_SMIE && sr & SR_SMF) {
+ /* disable irq */
+ cr &= ~CR_SMIE;
+ writel_relaxed(cr, regs_base + OSPI_CR);
+ complete(&ospi->match_completion);
+
+ return IRQ_HANDLED;
+ }
+
+ if (sr & (SR_TEF | SR_TCF)) {
+ /* disable irq */
+ cr &= ~CR_TCIE & ~CR_TEIE;
+ writel_relaxed(cr, regs_base + OSPI_CR);
+ complete(&ospi->data_completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_ospi_dma_setup(struct stm32_ospi *ospi,
+ struct dma_slave_config *dma_cfg)
+{
+ if (dma_cfg && ospi->dma_chrx) {
+ if (dmaengine_slave_config(ospi->dma_chrx, dma_cfg)) {
+ dev_err(ospi->dev, "dma rx config failed\n");
+ dma_release_channel(ospi->dma_chrx);
+ ospi->dma_chrx = NULL;
+ }
+ }
+
+ if (dma_cfg && ospi->dma_chtx) {
+ if (dmaengine_slave_config(ospi->dma_chtx, dma_cfg)) {
+ dev_err(ospi->dev, "dma tx config failed\n");
+ dma_release_channel(ospi->dma_chtx);
+ ospi->dma_chtx = NULL;
+ }
+ }
+
+ init_completion(&ospi->dma_completion);
+}
+
+static int stm32_ospi_tx_mm(struct stm32_ospi *ospi,
+ const struct spi_mem_op *op)
+{
+ memcpy_fromio(op->data.buf.in, ospi->mm_base + op->addr.val,
+ op->data.nbytes);
+ return 0;
+}
+
+static int stm32_ospi_tx_dma(struct stm32_ospi *ospi,
+ const struct spi_mem_op *op)
+{
+ struct dma_async_tx_descriptor *desc;
+ void __iomem *regs_base = ospi->regs_base;
+ enum dma_transfer_direction dma_dir;
+ struct dma_chan *dma_ch;
+ struct sg_table sgt;
+ dma_cookie_t cookie;
+ u32 cr, t_out;
+ int err;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dma_dir = DMA_DEV_TO_MEM;
+ dma_ch = ospi->dma_chrx;
+ } else {
+ dma_dir = DMA_MEM_TO_DEV;
+ dma_ch = ospi->dma_chtx;
+ }
+
+ /*
+ * Spi_map_buf return -EINVAL if the buffer is not DMA-able
+ * (DMA-able: in vmalloc | kmap | virt_addr_valid)
+ */
+ err = spi_controller_dma_map_mem_op_data(ospi->ctrl, op, &sgt);
+ if (err)
+ return err;
+
+ desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
+ dma_dir, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ cr = readl_relaxed(regs_base + OSPI_CR);
+
+ reinit_completion(&ospi->dma_completion);
+ desc->callback = stm32_ospi_dma_callback;
+ desc->callback_param = &ospi->dma_completion;
+ cookie = dmaengine_submit(desc);
+ err = dma_submit_error(cookie);
+ if (err)
+ goto out;
+
+ dma_async_issue_pending(dma_ch);
+
+ writel_relaxed(cr | CR_DMAEN, regs_base + OSPI_CR);
+
+ t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
+ if (!wait_for_completion_timeout(&ospi->dma_completion,
+ msecs_to_jiffies(t_out)))
+ err = -ETIMEDOUT;
+
+ if (err)
+ dmaengine_terminate_all(dma_ch);
+
+out:
+ writel_relaxed(cr & ~CR_DMAEN, regs_base + OSPI_CR);
+out_unmap:
+ spi_controller_dma_unmap_mem_op_data(ospi->ctrl, op, &sgt);
+
+ return err;
+}
+
+static int stm32_ospi_xfer(struct stm32_ospi *ospi, const struct spi_mem_op *op)
+{
+ u8 *buf;
+
+ if (!op->data.nbytes)
+ return 0;
+
+ if (ospi->fmode == CR_FMODE_MM)
+ return stm32_ospi_tx_mm(ospi, op);
+ else if (((op->data.dir == SPI_MEM_DATA_IN && ospi->dma_chrx) ||
+ (op->data.dir == SPI_MEM_DATA_OUT && ospi->dma_chtx)) &&
+ op->data.nbytes > 8)
+ if (!stm32_ospi_tx_dma(ospi, op))
+ return 0;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ buf = op->data.buf.in;
+ else
+ buf = (u8 *)op->data.buf.out;
+
+ return stm32_ospi_poll(ospi, buf, op->data.nbytes,
+ op->data.dir == SPI_MEM_DATA_IN);
+}
+
+static int stm32_ospi_wait_poll_status(struct stm32_ospi *ospi,
+ const struct spi_mem_op *op)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr;
+
+ reinit_completion(&ospi->match_completion);
+ cr = readl_relaxed(regs_base + OSPI_CR);
+ writel_relaxed(cr | CR_SMIE, regs_base + OSPI_CR);
+
+ if (!wait_for_completion_timeout(&ospi->match_completion,
+ msecs_to_jiffies(ospi->status_timeout))) {
+ u32 sr = readl_relaxed(regs_base + OSPI_SR);
+
+ /* Avoid false timeout */
+ if (!(sr & SR_SMF))
+ return -ETIMEDOUT;
+ }
+
+ writel_relaxed(FCR_CSMF, regs_base + OSPI_FCR);
+
+ return 0;
+}
+
+static int stm32_ospi_get_mode(u8 buswidth)
+{
+ switch (buswidth) {
+ case 8:
+ return CCR_BUSWIDTH_8;
+ case 4:
+ return CCR_BUSWIDTH_4;
+ default:
+ return buswidth;
+ }
+}
+
+static int stm32_ospi_send(struct spi_device *spi, const struct spi_mem_op *op)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(spi->controller);
+ void __iomem *regs_base = ospi->regs_base;
+ u32 ccr, cr, dcr2, tcr;
+ int timeout, err = 0, err_poll_status = 0;
+ u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1];
+
+ dev_dbg(ospi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth,
+ op->addr.val, op->data.nbytes);
+
+ cr = readl_relaxed(ospi->regs_base + OSPI_CR);
+ cr &= ~CR_CSSEL;
+ cr |= FIELD_PREP(CR_CSSEL, cs);
+ cr &= ~CR_FMODE_MASK;
+ cr |= FIELD_PREP(CR_FMODE_MASK, ospi->fmode);
+ writel_relaxed(cr, regs_base + OSPI_CR);
+
+ if (op->data.nbytes)
+ writel_relaxed(op->data.nbytes - 1, regs_base + OSPI_DLR);
+
+ /* set prescaler */
+ dcr2 = readl_relaxed(regs_base + OSPI_DCR2);
+ dcr2 |= FIELD_PREP(DCR2_PRESC_MASK, ospi->flash_presc[cs]);
+ writel_relaxed(dcr2, regs_base + OSPI_DCR2);
+
+ ccr = FIELD_PREP(CCR_IMODE_MASK, stm32_ospi_get_mode(op->cmd.buswidth));
+
+ if (op->addr.nbytes) {
+ ccr |= FIELD_PREP(CCR_ADMODE_MASK,
+ stm32_ospi_get_mode(op->addr.buswidth));
+ ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
+ }
+
+ tcr = TCR_SSHIFT;
+ if (op->dummy.buswidth && op->dummy.nbytes) {
+ tcr |= FIELD_PREP(TCR_DCYC_MASK,
+ op->dummy.nbytes * 8 / op->dummy.buswidth);
+ }
+ writel_relaxed(tcr, regs_base + OSPI_TCR);
+
+ if (op->data.nbytes) {
+ ccr |= FIELD_PREP(CCR_DMODE_MASK,
+ stm32_ospi_get_mode(op->data.buswidth));
+ }
+
+ writel_relaxed(ccr, regs_base + OSPI_CCR);
+
+ /* set instruction, must be set after ccr register update */
+ writel_relaxed(op->cmd.opcode, regs_base + OSPI_IR);
+
+ if (op->addr.nbytes && ospi->fmode != CR_FMODE_MM)
+ writel_relaxed(op->addr.val, regs_base + OSPI_AR);
+
+ if (ospi->fmode == CR_FMODE_APM)
+ err_poll_status = stm32_ospi_wait_poll_status(ospi, op);
+
+ err = stm32_ospi_xfer(ospi, op);
+
+ /*
+ * Abort in:
+ * -error case
+ * -read memory map: prefetching must be stopped if we read the last
+ * byte of device (device size - fifo size). like device size is not
+ * knows, the prefetching is always stop.
+ */
+ if (err || err_poll_status || ospi->fmode == CR_FMODE_MM)
+ goto abort;
+
+ /* Wait end of tx in indirect mode */
+ err = stm32_ospi_wait_cmd(ospi);
+ if (err)
+ goto abort;
+
+ return 0;
+
+abort:
+ timeout = stm32_ospi_abort(ospi);
+ writel_relaxed(FCR_CTCF | FCR_CSMF, regs_base + OSPI_FCR);
+
+ if (err || err_poll_status || timeout)
+ dev_err(ospi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
+ __func__, err, err_poll_status, timeout);
+
+ return err;
+}
+
+static int stm32_ospi_poll_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_rate_us,
+ unsigned long timeout_ms)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
+ void __iomem *regs_base = ospi->regs_base;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+
+ writel_relaxed(mask, regs_base + OSPI_PSMKR);
+ writel_relaxed(match, regs_base + OSPI_PSMAR);
+ ospi->fmode = CR_FMODE_APM;
+ ospi->status_timeout = timeout_ms;
+
+ ret = stm32_ospi_send(mem->spi, op);
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret;
+}
+
+static int stm32_ospi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+ if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
+ ospi->fmode = CR_FMODE_INDR;
+ else
+ ospi->fmode = CR_FMODE_INDW;
+
+ ret = stm32_ospi_send(mem->spi, op);
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret;
+}
+
+static int stm32_ospi_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller);
+
+ if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
+ return -EOPNOTSUPP;
+
+ /* Should never happen, as mm_base == null is an error probe exit condition */
+ if (!ospi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
+ return -EOPNOTSUPP;
+
+ if (!ospi->mm_size)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static ssize_t stm32_ospi_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller);
+ struct spi_mem_op op;
+ u32 addr_max;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+ /*
+ * Make a local copy of desc op_tmpl and complete dirmap rdesc
+ * spi_mem_op template with offs, len and *buf in order to get
+ * all needed transfer information into struct spi_mem_op
+ */
+ memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op));
+ dev_dbg(ospi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf);
+
+ op.data.nbytes = len;
+ op.addr.val = desc->info.offset + offs;
+ op.data.buf.in = buf;
+
+ addr_max = op.addr.val + op.data.nbytes + 1;
+ if (addr_max < ospi->mm_size && op.addr.buswidth)
+ ospi->fmode = CR_FMODE_MM;
+ else
+ ospi->fmode = CR_FMODE_INDR;
+
+ ret = stm32_ospi_send(desc->mem->spi, &op);
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret ?: len;
+}
+
+static int stm32_ospi_transfer_one_message(struct spi_controller *ctrl,
+ struct spi_message *msg)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl);
+ struct spi_transfer *transfer;
+ struct spi_device *spi = msg->spi;
+ struct spi_mem_op op;
+ struct gpio_desc *cs_gpiod = spi->cs_gpiod[ffs(spi->cs_index_mask) - 1];
+ int ret = 0;
+
+ if (!cs_gpiod)
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+
+ gpiod_set_value_cansleep(cs_gpiod, true);
+
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+ u8 dummy_bytes = 0;
+
+ memset(&op, 0, sizeof(op));
+
+ dev_dbg(ospi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n",
+ transfer->tx_buf, transfer->tx_nbits,
+ transfer->rx_buf, transfer->rx_nbits,
+ transfer->len, transfer->dummy_data);
+
+ /*
+ * OSPI hardware supports dummy bytes transfer.
+ * If current transfer is dummy byte, merge it with the next
+ * transfer in order to take into account OSPI block constraint
+ */
+ if (transfer->dummy_data) {
+ op.dummy.buswidth = transfer->tx_nbits;
+ op.dummy.nbytes = transfer->len;
+ dummy_bytes = transfer->len;
+
+ /* If happens, means that message is not correctly built */
+ if (list_is_last(&transfer->transfer_list, &msg->transfers)) {
+ ret = -EINVAL;
+ goto end_of_transfer;
+ }
+
+ transfer = list_next_entry(transfer, transfer_list);
+ }
+
+ op.data.nbytes = transfer->len;
+
+ if (transfer->rx_buf) {
+ ospi->fmode = CR_FMODE_INDR;
+ op.data.buswidth = transfer->rx_nbits;
+ op.data.dir = SPI_MEM_DATA_IN;
+ op.data.buf.in = transfer->rx_buf;
+ } else {
+ ospi->fmode = CR_FMODE_INDW;
+ op.data.buswidth = transfer->tx_nbits;
+ op.data.dir = SPI_MEM_DATA_OUT;
+ op.data.buf.out = transfer->tx_buf;
+ }
+
+ ret = stm32_ospi_send(spi, &op);
+ if (ret)
+ goto end_of_transfer;
+
+ msg->actual_length += transfer->len + dummy_bytes;
+ }
+
+end_of_transfer:
+ gpiod_set_value_cansleep(cs_gpiod, false);
+
+ mutex_unlock(&ospi->lock);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctrl);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret;
+}
+
+static int stm32_ospi_setup(struct spi_device *spi)
+{
+ struct spi_controller *ctrl = spi->controller;
+ struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl);
+ void __iomem *regs_base = ospi->regs_base;
+ int ret;
+ u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1];
+
+ if (ctrl->busy)
+ return -EBUSY;
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ ospi->flash_presc[cs] = DIV_ROUND_UP(ospi->clk_rate, spi->max_speed_hz) - 1;
+
+ mutex_lock(&ospi->lock);
+
+ ospi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_EN;
+ writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
+
+ /* set dcr fsize to max address */
+ ospi->dcr_reg = DCR1_DEVSIZE_MASK | DCR1_DLYBYP;
+ writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
+
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return 0;
+}
+
+/*
+ * No special host constraint, so use default spi_mem_default_supports_op
+ * to check supported mode.
+ */
+static const struct spi_controller_mem_ops stm32_ospi_mem_ops = {
+ .exec_op = stm32_ospi_exec_op,
+ .dirmap_create = stm32_ospi_dirmap_create,
+ .dirmap_read = stm32_ospi_dirmap_read,
+ .poll_status = stm32_ospi_poll_status,
+};
+
+static int stm32_ospi_get_resources(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_ospi *ospi = platform_get_drvdata(pdev);
+ struct resource *res;
+ struct reserved_mem *rmem = NULL;
+ struct device_node *node;
+ int ret;
+
+ ospi->regs_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(ospi->regs_base))
+ return PTR_ERR(ospi->regs_base);
+
+ ospi->regs_phys_base = res->start;
+
+ ospi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ospi->clk))
+ return dev_err_probe(dev, PTR_ERR(ospi->clk),
+ "Can't get clock\n");
+
+ ospi->clk_rate = clk_get_rate(ospi->clk);
+ if (!ospi->clk_rate) {
+ dev_err(dev, "Invalid clock rate\n");
+ return -EINVAL;
+ }
+
+ ospi->irq = platform_get_irq(pdev, 0);
+ if (ospi->irq < 0)
+ return ospi->irq;
+
+ ret = devm_request_irq(dev, ospi->irq, stm32_ospi_irq, 0,
+ dev_name(dev), ospi);
+ if (ret) {
+ dev_err(dev, "Failed to request irq\n");
+ return ret;
+ }
+
+ ospi->rstc = devm_reset_control_array_get_optional_exclusive(dev);
+ if (IS_ERR(ospi->rstc))
+ return dev_err_probe(dev, PTR_ERR(ospi->rstc),
+ "Can't get reset\n");
+
+ ospi->dma_chrx = dma_request_chan(dev, "rx");
+ if (IS_ERR(ospi->dma_chrx)) {
+ ret = PTR_ERR(ospi->dma_chrx);
+ ospi->dma_chrx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_dma;
+ }
+
+ ospi->dma_chtx = dma_request_chan(dev, "tx");
+ if (IS_ERR(ospi->dma_chtx)) {
+ ret = PTR_ERR(ospi->dma_chtx);
+ ospi->dma_chtx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_dma;
+ }
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+
+ if (rmem) {
+ ospi->mm_size = rmem->size;
+ ospi->mm_base = devm_ioremap(dev, rmem->base, rmem->size);
+ if (!ospi->mm_base) {
+ dev_err(dev, "unable to map memory region: %pa+%pa\n",
+ &rmem->base, &rmem->size);
+ ret = -ENOMEM;
+ goto err_dma;
+ }
+
+ if (ospi->mm_size > STM32_OSPI_MAX_MMAP_SZ) {
+ dev_err(dev, "Memory map size outsize bounds\n");
+ ret = -EINVAL;
+ goto err_dma;
+ }
+ } else {
+ dev_info(dev, "No memory-map region found\n");
+ }
+
+ init_completion(&ospi->data_completion);
+ init_completion(&ospi->match_completion);
+
+ return 0;
+
+err_dma:
+ dev_info(dev, "Can't get all resources (%d)\n", ret);
+
+ if (ospi->dma_chtx)
+ dma_release_channel(ospi->dma_chtx);
+ if (ospi->dma_chrx)
+ dma_release_channel(ospi->dma_chrx);
+
+ return ret;
+};
+
+static int stm32_ospi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct stm32_ospi *ospi;
+ struct dma_slave_config dma_cfg;
+ struct device_node *child;
+ int ret;
+ u8 spi_flash_count = 0;
+
+ /*
+ * Flash subnodes sanity check:
+ * 1 or 2 spi-nand/spi-nor flashes => supported
+ * All other flash node configuration => not supported
+ */
+ for_each_available_child_of_node(dev->of_node, child) {
+ if (of_device_is_compatible(child, "jedec,spi-nor") ||
+ of_device_is_compatible(child, "spi-nand"))
+ spi_flash_count++;
+ }
+
+ if (spi_flash_count == 0 || spi_flash_count > 2) {
+ dev_err(dev, "Incorrect DT flash node\n");
+ return -ENODEV;
+ }
+
+ ctrl = devm_spi_alloc_host(dev, sizeof(*ospi));
+ if (!ctrl)
+ return -ENOMEM;
+
+ ospi = spi_controller_get_devdata(ctrl);
+ ospi->ctrl = ctrl;
+
+ ospi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ospi);
+
+ ret = stm32_ospi_get_resources(pdev);
+ if (ret)
+ return ret;
+
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.src_addr = ospi->regs_phys_base + OSPI_DR;
+ dma_cfg.dst_addr = ospi->regs_phys_base + OSPI_DR;
+ dma_cfg.src_maxburst = 4;
+ dma_cfg.dst_maxburst = 4;
+ stm32_ospi_dma_setup(ospi, &dma_cfg);
+
+ mutex_init(&ospi->lock);
+
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD |
+ SPI_TX_OCTAL | SPI_RX_OCTAL;
+ ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ ctrl->setup = stm32_ospi_setup;
+ ctrl->bus_num = -1;
+ ctrl->mem_ops = &stm32_ospi_mem_ops;
+ ctrl->use_gpio_descriptors = true;
+ ctrl->transfer_one_message = stm32_ospi_transfer_one_message;
+ ctrl->num_chipselect = STM32_OSPI_MAX_NORCHIP;
+ ctrl->dev.of_node = dev->of_node;
+
+ pm_runtime_enable(ospi->dev);
+ pm_runtime_set_autosuspend_delay(ospi->dev, STM32_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(ospi->dev);
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ goto err_pm_enable;
+
+ if (ospi->rstc) {
+ reset_control_assert(ospi->rstc);
+ udelay(2);
+ reset_control_deassert(ospi->rstc);
+ }
+
+ ret = spi_register_controller(ctrl);
+ if (ret) {
+ /* Disable ospi */
+ writel_relaxed(0, ospi->regs_base + OSPI_CR);
+ goto err_pm_resume;
+ }
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return 0;
+
+err_pm_resume:
+ pm_runtime_put_sync_suspend(ospi->dev);
+
+err_pm_enable:
+ pm_runtime_force_suspend(ospi->dev);
+ mutex_destroy(&ospi->lock);
+
+ return ret;
+}
+
+static void stm32_ospi_remove(struct platform_device *pdev)
+{
+ struct stm32_ospi *ospi = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return;
+
+ spi_unregister_controller(ospi->ctrl);
+ /* Disable ospi */
+ writel_relaxed(0, ospi->regs_base + OSPI_CR);
+ mutex_destroy(&ospi->lock);
+
+ if (ospi->dma_chtx)
+ dma_release_channel(ospi->dma_chtx);
+ if (ospi->dma_chrx)
+ dma_release_channel(ospi->dma_chrx);
+
+ pm_runtime_put_sync_suspend(ospi->dev);
+ pm_runtime_force_suspend(ospi->dev);
+}
+
+static int __maybe_unused stm32_ospi_suspend(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return pm_runtime_force_suspend(ospi->dev);
+}
+
+static int __maybe_unused stm32_ospi_resume(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+ void __iomem *regs_base = ospi->regs_base;
+ int ret;
+
+ ret = pm_runtime_force_resume(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
+ writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_ospi_runtime_suspend(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ospi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_ospi_runtime_resume(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(ospi->clk);
+}
+
+static const struct dev_pm_ops stm32_ospi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_ospi_suspend, stm32_ospi_resume)
+ SET_RUNTIME_PM_OPS(stm32_ospi_runtime_suspend,
+ stm32_ospi_runtime_resume, NULL)
+};
+
+static const struct of_device_id stm32_ospi_of_match[] = {
+ { .compatible = "st,stm32mp25-ospi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_ospi_of_match);
+
+static struct platform_driver stm32_ospi_driver = {
+ .probe = stm32_ospi_probe,
+ .remove = stm32_ospi_remove,
+ .driver = {
+ .name = "stm32-ospi",
+ .pm = &stm32_ospi_pm_ops,
+ .of_match_table = stm32_ospi_of_match,
+ },
+};
+module_platform_driver(stm32_ospi_driver);
+
+MODULE_DESCRIPTION("STMicroelectronics STM32 OCTO SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 540b6948b24d..9691197bbf5a 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -362,11 +362,6 @@ static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op)
u32 ccr, cr;
int timeout, err = 0, err_poll_status = 0;
- dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth,
- op->addr.val, op->data.nbytes);
-
cr = readl_relaxed(qspi->io_base + QSPI_CR);
cr &= ~CR_PRESC_MASK & ~CR_FSEL;
cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc);
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index 2bd25c75f881..5232483c4a3a 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -540,10 +540,6 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
int err = 0, i;
u8 *tmpbuf;
- dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth);
-
zynq_qspi_chipselect(mem->spi, true);
zynq_qspi_config_op(xqspi, mem->spi, op);
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index d800d79f62a7..595b6dc10845 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -82,7 +82,6 @@
#define GQSPI_GENFIFO_RX 0x00020000
#define GQSPI_GENFIFO_STRIPE 0x00040000
#define GQSPI_GENFIFO_POLL 0x00080000
-#define GQSPI_GENFIFO_EXP_START 0x00000100
#define GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK 0x00000004
#define GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK 0x00000002
#define GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK 0x00000001
@@ -580,6 +579,8 @@ static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi,
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
zynqmp_qspi_set_tapdelay(xqspi, baud_rate_val);
}
+
+ dev_dbg(xqspi->dev, "config speed %u\n", req_speed_hz);
return 0;
}
@@ -670,69 +671,77 @@ static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
u32 genfifoentry)
{
- u32 transfer_len = 0;
+ u32 transfer_len, tempcount, exponent;
+ u8 imm_data;
- if (xqspi->txbuf) {
- genfifoentry &= ~GQSPI_GENFIFO_RX;
- genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
- genfifoentry |= GQSPI_GENFIFO_TX;
- transfer_len = xqspi->bytes_to_transfer;
- } else if (xqspi->rxbuf) {
- genfifoentry &= ~GQSPI_GENFIFO_TX;
- genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ if (xqspi->rxbuf) {
genfifoentry |= GQSPI_GENFIFO_RX;
if (xqspi->mode == GQSPI_MODE_DMA)
transfer_len = xqspi->dma_rx_bytes;
else
transfer_len = xqspi->bytes_to_receive;
} else {
- /* Sending dummy circles here */
- genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
- genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
transfer_len = xqspi->bytes_to_transfer;
}
+
+ if (xqspi->txbuf)
+ genfifoentry |= GQSPI_GENFIFO_TX;
+
genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
xqspi->genfifoentry = genfifoentry;
-
- if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) {
- genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= transfer_len;
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
- } else {
- int tempcount = transfer_len;
- u32 exponent = 8; /* 2^8 = 256 */
- u8 imm_data = tempcount & 0xFF;
-
- tempcount &= ~(tempcount & 0xFF);
- /* Immediate entry */
- if (tempcount != 0) {
- /* Exponent entries */
- genfifoentry |= GQSPI_GENFIFO_EXP;
- while (tempcount != 0) {
- if (tempcount & GQSPI_GENFIFO_EXP_START) {
- genfifoentry &=
- ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= exponent;
- zynqmp_gqspi_write(xqspi,
- GQSPI_GEN_FIFO_OFST,
- genfifoentry);
- }
- tempcount = tempcount >> 1;
- exponent++;
- }
- }
- if (imm_data != 0) {
- genfifoentry &= ~GQSPI_GENFIFO_EXP;
- genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= (u8)(imm_data & 0xFF);
+ dev_dbg(xqspi->dev, "genfifo %05x transfer_len %u\n",
+ genfifoentry, transfer_len);
+
+ /* Exponent entries */
+ imm_data = transfer_len;
+ tempcount = transfer_len >> 8;
+ exponent = 8;
+ genfifoentry |= GQSPI_GENFIFO_EXP;
+ while (tempcount) {
+ if (tempcount & 1)
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
- genfifoentry);
- }
- }
- if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf) {
- /* Dummy generic FIFO entry */
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
+ genfifoentry | exponent);
+ tempcount >>= 1;
+ exponent++;
}
+
+ /* Immediate entry */
+ genfifoentry &= ~GQSPI_GENFIFO_EXP;
+ if (imm_data)
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
+ genfifoentry | imm_data);
+
+ /* Dummy generic FIFO entry */
+ if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf)
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0);
+}
+
+/**
+ * zynqmp_qspi_disable_dma() - Disable DMA mode
+ * @xqspi: GQSPI instance
+ */
+static void zynqmp_qspi_disable_dma(struct zynqmp_qspi *xqspi)
+{
+ u32 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ xqspi->mode = GQSPI_MODE_IO;
+}
+
+/**
+ * zynqmp_qspi_enable_dma() - Enable DMA mode
+ * @xqspi: GQSPI instance
+ */
+static void zynqmp_qspi_enable_dma(struct zynqmp_qspi *xqspi)
+{
+ u32 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ xqspi->mode = GQSPI_MODE_DMA;
}
/**
@@ -744,7 +753,7 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
*/
static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
{
- u32 config_reg, genfifoentry;
+ u32 genfifoentry;
dma_unmap_single(xqspi->dev, xqspi->dma_addr,
xqspi->dma_rx_bytes, DMA_FROM_DEVICE);
@@ -758,9 +767,7 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
if (xqspi->bytes_to_receive > 0) {
/* Switch to IO mode,for remaining bytes to receive */
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ zynqmp_qspi_disable_dma(xqspi);
/* Initiate the transfer of remaining bytes */
genfifoentry = xqspi->genfifoentry;
@@ -799,7 +806,6 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
{
struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id;
- irqreturn_t ret = IRQ_NONE;
u32 status, mask, dma_status = 0;
status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
@@ -814,27 +820,24 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
dma_status);
}
- if (mask & GQSPI_ISR_TXNOT_FULL_MASK) {
+ if (!mask && !dma_status)
+ return IRQ_NONE;
+
+ if (mask & GQSPI_ISR_TXNOT_FULL_MASK)
zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL);
- ret = IRQ_HANDLED;
- }
- if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) {
+ if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK)
zynqmp_process_dma_irq(xqspi);
- ret = IRQ_HANDLED;
- } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
- (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
+ else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
+ (mask & GQSPI_IER_GENFIFOEMPTY_MASK))
zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
- ret = IRQ_HANDLED;
- }
if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 &&
((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
complete(&xqspi->data_completion);
- ret = IRQ_HANDLED;
}
- return ret;
+ return IRQ_HANDLED;
}
/**
@@ -845,17 +848,14 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
*/
static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
{
- u32 rx_bytes, rx_rem, config_reg;
+ u32 rx_bytes, rx_rem;
dma_addr_t addr;
u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf;
if (xqspi->bytes_to_receive < 8 ||
((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) {
/* Setting to IO mode */
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
- xqspi->mode = GQSPI_MODE_IO;
+ zynqmp_qspi_disable_dma(xqspi);
xqspi->dma_rx_bytes = 0;
return 0;
}
@@ -878,14 +878,7 @@ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_MSB_OFST,
((u32)addr) & 0xfff);
- /* Enabling the DMA mode */
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
-
- /* Switch to DMA mode */
- xqspi->mode = GQSPI_MODE_DMA;
+ zynqmp_qspi_enable_dma(xqspi);
/* Write the number of bytes to transfer */
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
@@ -905,18 +898,10 @@ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
u32 genfifoentry)
{
- u32 config_reg;
-
zynqmp_qspi_fillgenfifo(xqspi, tx_nbits, genfifoentry);
zynqmp_qspi_filltxfifo(xqspi, GQSPI_TXD_DEPTH);
- if (xqspi->mode == GQSPI_MODE_DMA) {
- config_reg = zynqmp_gqspi_read(xqspi,
- GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
- config_reg);
- xqspi->mode = GQSPI_MODE_IO;
- }
+ if (xqspi->mode == GQSPI_MODE_DMA)
+ zynqmp_qspi_disable_dma(xqspi);
}
/**
@@ -1059,18 +1044,14 @@ static unsigned long zynqmp_qspi_timeout(struct zynqmp_qspi *xqspi, u8 bits,
static int zynqmp_qspi_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct zynqmp_qspi *xqspi = spi_controller_get_devdata
- (mem->spi->controller);
+ struct zynqmp_qspi *xqspi =
+ spi_controller_get_devdata(mem->spi->controller);
unsigned long timeout;
int err = 0, i;
u32 genfifoentry = 0;
u16 opcode = op->cmd.opcode;
u64 opaddr;
- dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth);
-
mutex_lock(&xqspi->op_lock);
zynqmp_qspi_config_op(xqspi, op);
zynqmp_qspi_chipselect(mem->spi, false);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a7a4647717d4..90e27729ef6b 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -31,6 +31,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/sched/rt.h>
#include <linux/slab.h>
+#include <linux/spi/offload/types.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <uapi/linux/sched/types.h>
@@ -42,7 +43,7 @@ EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
#include "internals.h"
-static DEFINE_IDR(spi_master_idr);
+static DEFINE_IDR(spi_controller_idr);
static void spidev_release(struct device *dev)
{
@@ -305,7 +306,7 @@ static const struct attribute_group spi_controller_statistics_group = {
.attrs = spi_controller_statistics_attrs,
};
-static const struct attribute_group *spi_master_groups[] = {
+static const struct attribute_group *spi_controller_groups[] = {
&spi_controller_statistics_group,
NULL,
};
@@ -1106,7 +1107,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi_toggle_csgpiod(spi, idx, enable, activate);
}
}
- /* Some SPI masters need both GPIO CS & slave_select */
+ /* Some SPI controllers need both GPIO CS & ->set_cs() */
if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
spi->controller->set_cs)
spi->controller->set_cs(spi, !enable);
@@ -1495,10 +1496,7 @@ static void _spi_transfer_delay_ns(u32 ns)
} else {
u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
- if (us <= 10)
- udelay(us);
- else
- usleep_range(us, us + DIV_ROUND_UP(us, 10));
+ fsleep(us);
}
}
@@ -2534,7 +2532,7 @@ err_out:
* @ctlr: Pointer to spi_controller device
*
* Registers an spi_device for each child node of controller node which
- * represents a valid SPI slave.
+ * represents a valid SPI target device.
*/
static void of_register_spi_devices(struct spi_controller *ctlr)
{
@@ -2819,7 +2817,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
if (!lookup.max_speed_hz &&
ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
- /* Apple does not use _CRS but nested devices for SPI slaves */
+ /* Apple does not use _CRS but nested devices for SPI target devices */
acpi_spi_parse_apple_properties(adev, &lookup);
}
@@ -2911,7 +2909,7 @@ static void acpi_register_spi_devices(struct spi_controller *ctlr)
SPI_ACPI_ENUMERATE_MAX_DEPTH,
acpi_spi_add_device, NULL, ctlr, NULL);
if (ACPI_FAILURE(status))
- dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
+ dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n");
}
#else
static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
@@ -2925,16 +2923,15 @@ static void spi_controller_release(struct device *dev)
kfree(ctlr);
}
-static const struct class spi_master_class = {
+static const struct class spi_controller_class = {
.name = "spi_master",
.dev_release = spi_controller_release,
- .dev_groups = spi_master_groups,
+ .dev_groups = spi_controller_groups,
};
#ifdef CONFIG_SPI_SLAVE
/**
- * spi_target_abort - abort the ongoing transfer request on an SPI slave
- * controller
+ * spi_target_abort - abort the ongoing transfer request on an SPI target controller
* @spi: device used for the current transfer
*/
int spi_target_abort(struct spi_device *spi)
@@ -2954,9 +2951,13 @@ static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
struct device *child;
+ int ret;
child = device_find_any_child(&ctlr->dev);
- return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ put_device(child);
+
+ return ret;
}
static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
@@ -2975,13 +2976,13 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
child = device_find_any_child(&ctlr->dev);
if (child) {
- /* Remove registered slave */
+ /* Remove registered target device */
device_unregister(child);
put_device(child);
}
if (strcmp(name, "(null)")) {
- /* Register new slave */
+ /* Register new target device */
spi = spi_alloc_device(ctlr);
if (!spi)
return -ENOMEM;
@@ -3000,40 +3001,40 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(slave);
-static struct attribute *spi_slave_attrs[] = {
+static struct attribute *spi_target_attrs[] = {
&dev_attr_slave.attr,
NULL,
};
-static const struct attribute_group spi_slave_group = {
- .attrs = spi_slave_attrs,
+static const struct attribute_group spi_target_group = {
+ .attrs = spi_target_attrs,
};
-static const struct attribute_group *spi_slave_groups[] = {
+static const struct attribute_group *spi_target_groups[] = {
&spi_controller_statistics_group,
- &spi_slave_group,
+ &spi_target_group,
NULL,
};
-static const struct class spi_slave_class = {
+static const struct class spi_target_class = {
.name = "spi_slave",
.dev_release = spi_controller_release,
- .dev_groups = spi_slave_groups,
+ .dev_groups = spi_target_groups,
};
#else
-extern struct class spi_slave_class; /* dummy */
+extern struct class spi_target_class; /* dummy */
#endif
/**
- * __spi_alloc_controller - allocate an SPI master or slave controller
+ * __spi_alloc_controller - allocate an SPI host or target controller
* @dev: the controller, possibly using the platform_bus
* @size: how much zeroed driver-private data to allocate; the pointer to this
* memory is in the driver_data field of the returned device, accessible
* with spi_controller_get_devdata(); the memory is cacheline aligned;
* drivers granting DMA access to portions of their private data need to
* round up @size using ALIGN(size, dma_get_cache_alignment()).
- * @slave: flag indicating whether to allocate an SPI master (false) or SPI
- * slave (true) controller
+ * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true)
+ * controller
* Context: can sleep
*
* This call is used only by SPI controller drivers, which are the
@@ -3050,7 +3051,7 @@ extern struct class spi_slave_class; /* dummy */
* Return: the SPI controller structure on success, else NULL.
*/
struct spi_controller *__spi_alloc_controller(struct device *dev,
- unsigned int size, bool slave)
+ unsigned int size, bool target)
{
struct spi_controller *ctlr;
size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
@@ -3071,11 +3072,11 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
mutex_init(&ctlr->add_lock);
ctlr->bus_num = -1;
ctlr->num_chipselect = 1;
- ctlr->slave = slave;
- if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
- ctlr->dev.class = &spi_slave_class;
+ ctlr->target = target;
+ if (IS_ENABLED(CONFIG_SPI_SLAVE) && target)
+ ctlr->dev.class = &spi_target_class;
else
- ctlr->dev.class = &spi_master_class;
+ ctlr->dev.class = &spi_controller_class;
ctlr->dev.parent = dev;
pm_suspend_ignore_children(&ctlr->dev, true);
spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
@@ -3093,7 +3094,7 @@ static void devm_spi_release_controller(struct device *dev, void *ctlr)
* __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
* @dev: physical device of SPI controller
* @size: how much zeroed driver-private data to allocate
- * @slave: whether to allocate an SPI master (false) or SPI slave (true)
+ * @target: whether to allocate an SPI host (false) or SPI target (true) controller
* Context: can sleep
*
* Allocate an SPI controller and automatically release a reference on it
@@ -3106,7 +3107,7 @@ static void devm_spi_release_controller(struct device *dev, void *ctlr)
*/
struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
unsigned int size,
- bool slave)
+ bool target)
{
struct spi_controller **ptr, *ctlr;
@@ -3115,7 +3116,7 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
if (!ptr)
return NULL;
- ctlr = __spi_alloc_controller(dev, size, slave);
+ ctlr = __spi_alloc_controller(dev, size, target);
if (ctlr) {
ctlr->devm_allocated = true;
*ptr = ctlr;
@@ -3129,8 +3130,8 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
/**
- * spi_get_gpio_descs() - grab chip select GPIOs for the master
- * @ctlr: The SPI master to grab GPIO descriptors for
+ * spi_get_gpio_descs() - grab chip select GPIOs for the controller
+ * @ctlr: The SPI controller to grab GPIO descriptors for
*/
static int spi_get_gpio_descs(struct spi_controller *ctlr)
{
@@ -3228,7 +3229,7 @@ static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int e
int id;
mutex_lock(&board_lock);
- id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
+ id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL);
mutex_unlock(&board_lock);
if (WARN(id < 0, "couldn't get idr"))
return id == -ENOSPC ? -EBUSY : id;
@@ -3377,7 +3378,7 @@ destroy_queue:
spi_destroy_queue(ctlr);
free_bus_id:
mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
+ idr_remove(&spi_controller_idr, ctlr->bus_num);
mutex_unlock(&board_lock);
return status;
}
@@ -3389,8 +3390,7 @@ static void devm_spi_unregister(struct device *dev, void *res)
}
/**
- * devm_spi_register_controller - register managed SPI host or target
- * controller
+ * devm_spi_register_controller - register managed SPI host or target controller
* @dev: device managing SPI controller
* @ctlr: initialized controller, originally from spi_alloc_host() or
* spi_alloc_target()
@@ -3430,7 +3430,7 @@ static int __unregister(struct device *dev, void *null)
}
/**
- * spi_unregister_controller - unregister SPI master or slave controller
+ * spi_unregister_controller - unregister SPI host or target controller
* @ctlr: the controller being unregistered
* Context: can sleep
*
@@ -3454,7 +3454,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
- found = idr_find(&spi_master_idr, id);
+ found = idr_find(&spi_controller_idr, id);
mutex_unlock(&board_lock);
if (ctlr->queued) {
if (spi_destroy_queue(ctlr))
@@ -3469,7 +3469,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
/* Free bus id */
mutex_lock(&board_lock);
if (found == ctlr)
- idr_remove(&spi_master_idr, id);
+ idr_remove(&spi_controller_idr, id);
mutex_unlock(&board_lock);
if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
@@ -4158,6 +4158,15 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (_spi_xfer_word_delay_update(xfer, spi))
return -EINVAL;
+
+ /* Make sure controller supports required offload features. */
+ if (xfer->offload_flags) {
+ if (!message->offload)
+ return -EINVAL;
+
+ if (xfer->offload_flags & ~message->offload->xfer_flags)
+ return -EINVAL;
+ }
}
message->status = -EINPROGRESS;
@@ -4613,7 +4622,7 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
/**
* spi_bus_lock - obtain a lock for exclusive SPI bus usage
- * @ctlr: SPI bus master that should be locked for exclusive bus access
+ * @ctlr: SPI bus controller that should be locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
@@ -4644,7 +4653,7 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
/**
* spi_bus_unlock - release the lock for exclusive SPI bus usage
- * @ctlr: SPI bus master that was locked for exclusive bus access
+ * @ctlr: SPI bus controller that was locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
@@ -4761,9 +4770,9 @@ static struct spi_controller *of_find_spi_controller_by_node(struct device_node
{
struct device *dev;
- dev = class_find_device_by_of_node(&spi_master_class, node);
+ dev = class_find_device_by_of_node(&spi_controller_class, node);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
- dev = class_find_device_by_of_node(&spi_slave_class, node);
+ dev = class_find_device_by_of_node(&spi_target_class, node);
if (!dev)
return NULL;
@@ -4843,10 +4852,10 @@ struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev
{
struct device *dev;
- dev = class_find_device(&spi_master_class, NULL, adev,
+ dev = class_find_device(&spi_controller_class, NULL, adev,
spi_acpi_controller_match);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
- dev = class_find_device(&spi_slave_class, NULL, adev,
+ dev = class_find_device(&spi_target_class, NULL, adev,
spi_acpi_controller_match);
if (!dev)
return NULL;
@@ -4916,12 +4925,12 @@ static int __init spi_init(void)
if (status < 0)
goto err1;
- status = class_register(&spi_master_class);
+ status = class_register(&spi_controller_class);
if (status < 0)
goto err2;
if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
- status = class_register(&spi_slave_class);
+ status = class_register(&spi_target_class);
if (status < 0)
goto err3;
}
@@ -4934,7 +4943,7 @@ static int __init spi_init(void)
return 0;
err3:
- class_unregister(&spi_master_class);
+ class_unregister(&spi_controller_class);
err2:
bus_unregister(&spi_bus_type);
err1:
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 58ae4304fdab..6108959c28d9 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -706,6 +706,7 @@ static const struct spi_device_id spidev_spi_ids[] = {
{ .name = /* cisco */ "spi-petra" },
{ .name = /* dh */ "dhcom-board" },
{ .name = /* elgin */ "jg10309-01" },
+ { .name = /* gocontroll */ "moduline-module-slot"},
{ .name = /* lineartechnology */ "ltc2488" },
{ .name = /* lwn */ "bk4" },
{ .name = /* lwn */ "bk4-spi" },
@@ -737,6 +738,7 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "cisco,spi-petra", .data = &spidev_of_check },
{ .compatible = "dh,dhcom-board", .data = &spidev_of_check },
{ .compatible = "elgin,jg10309-01", .data = &spidev_of_check },
+ { .compatible = "gocontroll,moduline-module-slot", .data = &spidev_of_check},
{ .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
{ .compatible = "lwn,bk4", .data = &spidev_of_check },
{ .compatible = "lwn,bk4-spi", .data = &spidev_of_check },
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 4cfa494243b9..da9c64152a60 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -337,9 +337,7 @@ static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagereflis
list_for_each_entry(pageref, pagereflist, list) {
y_low = pageref->offset / info->fix.line_length;
y_high = (pageref->offset + PAGE_SIZE - 1) / info->fix.line_length;
- dev_dbg(info->device,
- "page->index=%lu y_low=%d y_high=%d\n",
- pageref->page->index, y_low, y_high);
+ dev_dbg(info->device, "y_low=%d y_high=%d\n", y_low, y_high);
if (y_high > info->var.yres - 1)
y_high = info->var.yres - 1;
if (y_low < dirty_lines_start)
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
index 049246774ced..6146555fe9cf 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -172,10 +172,10 @@ void atomisp_unregister_subdev(struct v4l2_subdev *subdev);
#define IS_BYT __IS_SOC(INTEL_ATOM_SILVERMONT)
#define IS_CHT __IS_SOC(INTEL_ATOM_AIRMONT)
#define IS_MRFD __IS_SOC(INTEL_ATOM_SILVERMONT_MID)
-#define IS_MOFD __IS_SOC(INTEL_ATOM_AIRMONT_MID)
+#define IS_MOFD __IS_SOC(INTEL_ATOM_SILVERMONT_MID2)
/* Both CHT and MOFD come with ISP2401 */
#define IS_ISP2401 __IS_SOCS(INTEL_ATOM_AIRMONT, \
- INTEL_ATOM_AIRMONT_MID)
+ INTEL_ATOM_SILVERMONT_MID2)
#endif /* ATOMISP_PLATFORM_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index 1a960a01854f..1fb2ba819de3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -34,7 +34,7 @@ static const char *CARD = "ATOM ISP"; /* max size 31 */
* FIXME: ISP should not know beforehand all CIDs supported by sensor.
* Instead, it needs to propagate to sensor unknown CIDs.
*/
-static struct v4l2_queryctrl ci_v4l2_controls[] = {
+static struct v4l2_query_ext_ctrl ci_v4l2_controls[] = {
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -1140,31 +1140,34 @@ static int atomisp_s_ctrl(struct file *file, void *fh,
/*
* To query the attributes of a control.
- * applications set the id field of a struct v4l2_queryctrl and call the
+ * applications set the id field of a struct v4l2_query_ext_ctrl and call the
* this ioctl with a pointer to this structure. The driver fills
* the rest of the structure.
*/
-static int atomisp_queryctl(struct file *file, void *fh,
- struct v4l2_queryctrl *qc)
+static int atomisp_query_ext_ctrl(struct file *file, void *fh,
+ struct v4l2_query_ext_ctrl *qc)
{
- int i, ret = -EINVAL;
+ int i;
+ /* TODO: implement V4L2_CTRL_FLAG_NEXT_CTRL */
if (qc->id & V4L2_CTRL_FLAG_NEXT_CTRL)
- return ret;
+ return -EINVAL;
for (i = 0; i < ctrls_num; i++) {
if (ci_v4l2_controls[i].id == qc->id) {
- memcpy(qc, &ci_v4l2_controls[i],
- sizeof(struct v4l2_queryctrl));
- qc->reserved[0] = 0;
- ret = 0;
- break;
+ *qc = ci_v4l2_controls[i];
+ qc->elems = 1;
+ qc->elem_size = 4;
+ return 0;
}
}
- if (ret != 0)
- qc->flags = V4L2_CTRL_FLAG_DISABLED;
- return ret;
+ /*
+ * This is probably not needed, but this flag has been set for
+ * many kernel versions. Leave it to avoid breaking any apps.
+ */
+ qc->flags = V4L2_CTRL_FLAG_DISABLED;
+ return -EINVAL;
}
static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh,
@@ -1561,9 +1564,7 @@ const struct v4l2_ioctl_ops atomisp_ioctl_ops = {
.vidioc_enum_input = atomisp_enum_input,
.vidioc_g_input = atomisp_g_input,
.vidioc_s_input = atomisp_s_input,
- .vidioc_queryctrl = atomisp_queryctl,
- .vidioc_s_ctrl = atomisp_s_ctrl,
- .vidioc_g_ctrl = atomisp_g_ctrl,
+ .vidioc_query_ext_ctrl = atomisp_query_ext_ctrl,
.vidioc_s_ext_ctrls = atomisp_s_ext_ctrls,
.vidioc_g_ext_ctrls = atomisp_g_ext_ctrls,
.vidioc_enum_framesizes = atomisp_enum_framesizes,
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
index a4e019e74e6b..a1bea8bd1a39 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
@@ -230,7 +230,8 @@ void ia_css_sdis_clear_coefficients(
int
ia_css_get_dvs_statistics(
struct ia_css_dvs_statistics *host_stats,
- const struct ia_css_isp_dvs_statistics *isp_stats) {
+ const struct ia_css_isp_dvs_statistics *isp_stats)
+{
struct ia_css_isp_dvs_statistics_map *map;
int ret = 0;
@@ -240,13 +241,11 @@ ia_css_get_dvs_statistics(
assert(isp_stats);
map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL);
- if (map)
- {
+ if (map) {
hmm_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
ia_css_translate_dvs_statistics(host_stats, map);
ia_css_isp_dvs_statistics_map_free(map);
- } else
- {
+ } else {
IA_CSS_ERROR("out of memory");
ret = -ENOMEM;
}
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index 09da4103a8db..86f2b30cb06c 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -180,60 +180,6 @@ out:
return ret;
}
-/*
- * This function is currently unused, but will be called when the
- * output/mem2mem device at the IDMAC input pad sends us a new
- * buffer. It kicks off the IDMAC read channels to bring in the
- * buffer fields from memory and begin the conversions.
- */
-static void __maybe_unused prepare_vdi_in_buffers(struct vdic_priv *priv,
- struct imx_media_buffer *curr)
-{
- dma_addr_t prev_phys, curr_phys, next_phys;
- struct imx_media_buffer *prev;
- struct vb2_buffer *curr_vb, *prev_vb;
- u32 fs = priv->field_size;
- u32 is = priv->in_stride;
-
- /* current input buffer is now previous */
- priv->prev_in_buf = priv->curr_in_buf;
- priv->curr_in_buf = curr;
- prev = priv->prev_in_buf ? priv->prev_in_buf : curr;
-
- prev_vb = &prev->vbuf.vb2_buf;
- curr_vb = &curr->vbuf.vb2_buf;
-
- switch (priv->fieldtype) {
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
- prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0) + fs;
- curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
- next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + fs;
- break;
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
- case V4L2_FIELD_INTERLACED:
- prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0) + is;
- curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
- next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + is;
- break;
- default:
- /*
- * can't get here, priv->fieldtype can only be one of
- * the above. This is to quiet smatch errors.
- */
- return;
- }
-
- ipu_cpmem_set_buffer(priv->vdi_in_ch_p, 0, prev_phys);
- ipu_cpmem_set_buffer(priv->vdi_in_ch, 0, curr_phys);
- ipu_cpmem_set_buffer(priv->vdi_in_ch_n, 0, next_phys);
-
- ipu_idmac_select_buffer(priv->vdi_in_ch_p, 0);
- ipu_idmac_select_buffer(priv->vdi_in_ch, 0);
- ipu_idmac_select_buffer(priv->vdi_in_ch_n, 0);
-}
-
static int setup_vdi_channel(struct vdic_priv *priv,
struct ipuv3_channel *channel,
dma_addr_t phys0, dma_addr_t phys1)
diff --git a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
index 4aa2797f5e3c..8b85524beb59 100644
--- a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
@@ -322,7 +322,8 @@ struct ipu3_uapi_ae_config {
* 0: positive, 1: negative, default 0.
* @y_calc: Pre-processing that converts Bayer quad to RGB+Y values to be
* used for building histogram. Range [0, 32], default 8.
- * Rule:
+ *
+ * Rule:
* y_gen_rate_gr + y_gen_rate_r + y_gen_rate_b + y_gen_rate_gb = 32
* A single Y is calculated based on sum of Gr/R/B/Gb based on
* their contribution ratio.
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 16e3ded98c32..832588f21f91 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -212,7 +212,7 @@ int iscsi_target_check_login_request(
if ((login_req->max_version != login->version_max) ||
(login_req->min_version != login->version_min)) {
- pr_err("Login request changed Version Max/Nin"
+ pr_err("Login request changed Version Max/Min"
" unexpectedly to 0x%02x/0x%02x, protocol error\n",
login_req->max_version, login_req->min_version);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
@@ -557,7 +557,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
* before initial PDU processing in iscsi_target_start_negotiation()
* has completed, go ahead and retry until it's cleared.
*
- * Otherwise if the TCP connection drops while this is occuring,
+ * Otherwise if the TCP connection drops while this is occurring,
* iscsi_target_start_negotiation() will detect the failure, call
* cancel_delayed_work_sync(&conn->login_work), and cleanup the
* remaining iscsi connection resources from iscsi_np process context.
@@ -1050,7 +1050,7 @@ static int iscsi_target_do_login(struct iscsit_conn *conn, struct iscsi_login *l
/*
* Check to make sure the TCP connection has not
* dropped asynchronously while session reinstatement
- * was occuring in this kthread context, before
+ * was occurring in this kthread context, before
* transitioning to full feature phase operation.
*/
if (iscsi_target_sk_check_close(conn))
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 761c511aea07..c7b7da629741 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -176,7 +176,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
memset(tl_cmd, 0, sizeof(*tl_cmd));
tl_cmd->sc = sc;
- tl_cmd->sc_cmd_tag = scsi_cmd_to_rq(sc)->tag;
+ tl_cmd->sc_cmd_tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
tcm_loop_target_queue_cmd(tl_cmd);
return 0;
@@ -242,7 +242,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
- scsi_cmd_to_rq(sc)->tag, TMR_ABORT_TASK);
+ blk_mq_unique_tag(scsi_cmd_to_rq(sc)),
+ TMR_ABORT_TASK);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index c40217f44b1b..66804bf1ee32 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -123,7 +123,7 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
goto unlock;
}
- read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
+ read_bytes = scnprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
if (!read_bytes)
goto unlock;
@@ -143,7 +143,7 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
}
filp_close(fp, NULL);
- strncpy(db_root, db_root_stage, read_bytes);
+ strscpy(db_root, db_root_stage);
pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
r = read_bytes;
@@ -3664,7 +3664,7 @@ static void target_init_dbroot(void)
}
filp_close(fp, NULL);
- strncpy(db_root, db_root_stage, DB_ROOT_LEN);
+ strscpy(db_root, db_root_stage);
pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d1ae3df069a4..cc2da086f96e 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1078,8 +1078,8 @@ passthrough_parse_cdb(struct se_cmd *cmd,
if (!dev->dev_attrib.emulate_pr &&
((cdb[0] == PERSISTENT_RESERVE_IN) ||
(cdb[0] == PERSISTENT_RESERVE_OUT) ||
- (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
- (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
+ (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) ||
+ (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10))) {
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
@@ -1101,7 +1101,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
return target_cmd_size_check(cmd, size);
}
- if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
+ if (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) {
cmd->execute_cmd = target_scsi2_reservation_release;
if (cdb[0] == RELEASE_10)
size = get_unaligned_be16(&cdb[7]);
@@ -1109,7 +1109,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
size = cmd->data_length;
return target_cmd_size_check(cmd, size);
}
- if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
+ if (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10) {
cmd->execute_cmd = target_scsi2_reservation_reserve;
if (cdb[0] == RESERVE_10)
size = get_unaligned_be16(&cdb[7]);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c8dc92a7d63e..73564efd11d2 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -167,18 +167,6 @@ static int iblock_configure_device(struct se_device *dev)
break;
}
- if (dev->dev_attrib.pi_prot_type) {
- struct bio_set *bs = &ib_dev->ibd_bio_set;
-
- if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
- pr_err("Unable to allocate bioset for PI\n");
- ret = -ENOMEM;
- goto out_blkdev_put;
- }
- pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
- &bs->bio_integrity_pool);
- }
-
dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
return 0;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 4f4ad6af416c..34cf2c399b39 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -91,7 +91,7 @@ target_scsi2_reservation_check(struct se_cmd *cmd)
switch (cmd->t_task_cdb[0]) {
case INQUIRY:
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
return 0;
default:
@@ -418,12 +418,12 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
return -EINVAL;
}
break;
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
/* Handled by CRH=1 in target_scsi2_reservation_release() */
ret = 0;
break;
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
/* Handled by CRH=1 in target_scsi2_reservation_reserve() */
ret = 0;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index ea14a3835681..0a02492bef70 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1674,9 +1674,9 @@ static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr,
return true;
switch (descr->opcode) {
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
/*
* The pr_ops which are used by the backend modules don't
@@ -1828,9 +1828,9 @@ static struct target_opcode_descriptor tcm_opcode_pro_register_move = {
static struct target_opcode_descriptor tcm_opcode_release = {
.support = SCSI_SUPPORT_FULL,
- .opcode = RELEASE,
+ .opcode = RELEASE_6,
.cdb_size = 6,
- .usage_bits = {RELEASE, 0x00, 0x00, 0x00,
+ .usage_bits = {RELEASE_6, 0x00, 0x00, 0x00,
0x00, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
@@ -1847,9 +1847,9 @@ static struct target_opcode_descriptor tcm_opcode_release10 = {
static struct target_opcode_descriptor tcm_opcode_reserve = {
.support = SCSI_SUPPORT_FULL,
- .opcode = RESERVE,
+ .opcode = RESERVE_6,
.cdb_size = 6,
- .usage_bits = {RESERVE, 0x00, 0x00, 0x00,
+ .usage_bits = {RESERVE_6, 0x00, 0x00, 0x00,
0x00, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
@@ -2151,8 +2151,10 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
if (descr->serv_action_valid)
return TCM_INVALID_CDB_FIELD;
- if (!descr->enabled || descr->enabled(descr, cmd))
+ if (!descr->enabled || descr->enabled(descr, cmd)) {
*opcode = descr;
+ return TCM_NO_SENSE;
+ }
break;
case 0x2:
/*
@@ -2166,8 +2168,10 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
if (descr->serv_action_valid &&
descr->service_action == requested_sa) {
if (!descr->enabled || descr->enabled(descr,
- cmd))
+ cmd)) {
*opcode = descr;
+ return TCM_NO_SENSE;
+ }
} else if (!descr->serv_action_valid)
return TCM_INVALID_CDB_FIELD;
break;
@@ -2180,13 +2184,15 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
*/
if (descr->service_action == requested_sa)
if (!descr->enabled || descr->enabled(descr,
- cmd))
+ cmd)) {
*opcode = descr;
+ return TCM_NO_SENSE;
+ }
break;
}
}
- return 0;
+ return TCM_NO_SENSE;
}
static sense_reason_t
@@ -2243,7 +2249,7 @@ spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
response_length += spc_rsoc_encode_command_descriptor(
&buf[response_length], rctd, descr);
}
- put_unaligned_be32(response_length - 3, buf);
+ put_unaligned_be32(response_length - 4, buf);
} else {
response_length = spc_rsoc_encode_one_command_descriptor(
&buf[response_length], rctd, descr,
@@ -2267,9 +2273,9 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
unsigned char *cdb = cmd->t_task_cdb;
switch (cdb[0]) {
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
if (!dev->dev_attrib.emulate_pr)
return TCM_UNSUPPORTED_SCSI_OPCODE;
@@ -2313,7 +2319,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
*size = get_unaligned_be32(&cdb[5]);
cmd->execute_cmd = target_scsi3_emulate_pr_out;
break;
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
if (cdb[0] == RELEASE_10)
*size = get_unaligned_be16(&cdb[7]);
@@ -2322,7 +2328,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
cmd->execute_cmd = target_scsi2_reservation_release;
break;
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
/*
* The SPC-2 RESERVE does not contain a size in the SCSI CDB.
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 7e918bd3f100..4307161533a7 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -412,8 +412,8 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
data->nr_sensors = 1;
- data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) *
- data->nr_sensors, GFP_KERNEL);
+ data->sensor = devm_kcalloc(dev, data->nr_sensors,
+ sizeof(*data->sensor), GFP_KERNEL);
if (!data->sensor)
return -ENOMEM;
diff --git a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c
index 543b03960e99..57b90005888a 100644
--- a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c
@@ -45,6 +45,9 @@ static int int3402_thermal_probe(struct platform_device *pdev)
struct int3402_thermal_data *d;
int ret;
+ if (!adev)
+ return -ENODEV;
+
if (!acpi_has_method(adev->handle, "_TMP"))
return -ENODEV;
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index 8dca6a6aceca..3d9efe69d562 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -133,8 +133,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
if (ACPI_SUCCESS(status))
int34x_zone->aux_trip_nr = trip_cnt;
- zone_trips = kzalloc(sizeof(*zone_trips) * (trip_cnt + INT340X_THERMAL_MAX_TRIP_COUNT),
- GFP_KERNEL);
+ zone_trips = kcalloc(trip_cnt + INT340X_THERMAL_MAX_TRIP_COUNT,
+ sizeof(*zone_trips), GFP_KERNEL);
if (!zone_trips) {
ret = -ENOMEM;
goto err_trips_alloc;
@@ -143,7 +143,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
for (i = 0; i < trip_cnt; i++) {
zone_trips[i].type = THERMAL_TRIP_PASSIVE;
zone_trips[i].temperature = THERMAL_TEMP_INVALID;
- zone_trips[i].flags |= THERMAL_TRIP_FLAG_RW_TEMP;
+ zone_trips[i].flags = THERMAL_TRIP_FLAG_RW_TEMP;
zone_trips[i].priv = THERMAL_INT_TO_TRIP_PRIV(i);
}
diff --git a/drivers/thermal/intel/intel_tcc.c b/drivers/thermal/intel/intel_tcc.c
index 817421508d5c..b2a615aea7c1 100644
--- a/drivers/thermal/intel/intel_tcc.c
+++ b/drivers/thermal/intel/intel_tcc.c
@@ -106,7 +106,7 @@ static const struct x86_cpu_id intel_tcc_cpu_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &temp_broadwell),
- X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID2, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT_NP, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &temp_goldmont),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &temp_goldmont),
diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c
index 70de6dbf99c5..a36289e61315 100644
--- a/drivers/thermal/k3_j72xx_bandgap.c
+++ b/drivers/thermal/k3_j72xx_bandgap.c
@@ -460,13 +460,13 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
goto err_alloc;
}
- ref_table = kzalloc(sizeof(*ref_table) * TABLE_SIZE, GFP_KERNEL);
+ ref_table = kcalloc(TABLE_SIZE, sizeof(*ref_table), GFP_KERNEL);
if (!ref_table) {
ret = -ENOMEM;
goto err_alloc;
}
- derived_table = devm_kzalloc(bgp->dev, sizeof(*derived_table) * TABLE_SIZE,
+ derived_table = devm_kcalloc(bgp->dev, TABLE_SIZE, sizeof(*derived_table),
GFP_KERNEL);
if (!derived_table) {
ret = -ENOMEM;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 2328ac0d8561..f96ca2710928 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1589,26 +1589,26 @@ thermal_zone_device_register_with_trips(const char *type,
tz->state = TZ_STATE_FLAG_INIT;
+ result = dev_set_name(&tz->device, "thermal_zone%d", tz->id);
+ if (result)
+ goto remove_id;
+
+ thermal_zone_device_init(tz);
+
+ result = thermal_zone_init_governor(tz);
+ if (result)
+ goto remove_id;
+
/* sys I/F */
/* Add nodes that are always present via .groups */
result = thermal_zone_create_device_groups(tz);
if (result)
goto remove_id;
- result = dev_set_name(&tz->device, "thermal_zone%d", tz->id);
- if (result) {
- thermal_zone_destroy_device_groups(tz);
- goto remove_id;
- }
- thermal_zone_device_init(tz);
result = device_register(&tz->device);
if (result)
goto release_device;
- result = thermal_zone_init_governor(tz);
- if (result)
- goto unregister;
-
if (!tz->tzp || !tz->tzp->no_hwmon) {
result = thermal_add_hwmon_sysfs(tz);
if (result)
diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
index c800504c3cfe..11d34f2a3d9f 100644
--- a/drivers/thermal/thermal_debugfs.c
+++ b/drivers/thermal/thermal_debugfs.c
@@ -319,7 +319,7 @@ static int cdev_tt_seq_show(struct seq_file *s, void *v)
int i = *(loff_t *)v;
if (!i)
- seq_puts(s, "Transition\tOccurences\n");
+ seq_puts(s, "Transition\tOccurrences\n");
list_for_each_entry(entry, &transitions[i], node) {
/*
@@ -876,7 +876,7 @@ void thermal_debug_tz_add(struct thermal_zone_device *tz)
tz_dbg->tz = tz;
- tz_dbg->trips_crossed = kzalloc(sizeof(int) * tz->num_trips, GFP_KERNEL);
+ tz_dbg->trips_crossed = kcalloc(tz->num_trips, sizeof(int), GFP_KERNEL);
if (!tz_dbg->trips_crossed) {
thermal_debugfs_remove_id(thermal_dbg);
return;
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 5401f03d6b6c..8264d5c3bbb3 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -107,7 +107,7 @@ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *n
if (!count)
return NULL;
- struct thermal_trip *tt __free(kfree) = kzalloc(sizeof(*tt) * count, GFP_KERNEL);
+ struct thermal_trip *tt __free(kfree) = kcalloc(count, sizeof(*tt), GFP_KERNEL);
if (!tt)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index dc1f456736dc..cd15e84c47f4 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -312,7 +312,7 @@ static void tb_cfg_print_error(struct tb_ctl *ctl, enum tb_cfg_space space,
static __be32 tb_crc(const void *data, size_t len)
{
- return cpu_to_be32(~__crc32c_le(~0, data, len));
+ return cpu_to_be32(~crc32c(~0, data, len));
}
static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 9c1d65d26553..e66183a72cf9 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -211,7 +211,7 @@ static u8 tb_crc8(u8 *data, int len)
static u32 tb_crc32(void *data, size_t len)
{
- return ~__crc32c_le(~0, data, len);
+ return ~crc32c(~0, data, len);
}
#define TB_DROM_DATA_START 13
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 8229a6fbda5a..717b31d78728 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -1009,6 +1009,8 @@ static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
*/
tb_tunnel_get(tunnel);
+ tunnel->dprx_started = true;
+
if (tunnel->callback) {
tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout);
queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0);
@@ -1021,9 +1023,12 @@ static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
{
- tunnel->dprx_canceled = true;
- cancel_delayed_work(&tunnel->dprx_work);
- tb_tunnel_put(tunnel);
+ if (tunnel->dprx_started) {
+ tunnel->dprx_started = false;
+ tunnel->dprx_canceled = true;
+ cancel_delayed_work(&tunnel->dprx_work);
+ tb_tunnel_put(tunnel);
+ }
}
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index 7f6d3a18a41e..8a0a0cb21a89 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -63,6 +63,7 @@ enum tb_tunnel_state {
* @allocated_down: Allocated downstream bandwidth (only for USB3)
* @bw_mode: DP bandwidth allocation mode registers can be used to
* determine consumed and allocated bandwidth
+ * @dprx_started: DPRX negotiation was started (tb_dp_dprx_start() was called for it)
* @dprx_canceled: Was DPRX capabilities read poll canceled
* @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes
* @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read
@@ -100,6 +101,7 @@ struct tb_tunnel {
int allocated_up;
int allocated_down;
bool bw_mode;
+ bool dprx_started;
bool dprx_canceled;
ktime_t dprx_timeout;
struct delayed_work dprx_work;
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 63a494d36a1f..7fb81bbaee60 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -383,7 +383,24 @@ config NULL_TTY
available or desired.
In order to use this driver, you should redirect the console to this
- TTY, or boot the kernel with console=ttynull.
+ TTY, boot the kernel with console=ttynull, or enable
+ NULL_TTY_DEFAULT_CONSOLE.
+
+ If unsure, say N.
+
+config NULL_TTY_DEFAULT_CONSOLE
+ bool "Support for console on ttynull"
+ depends on NULL_TTY=y && !VT_CONSOLE
+ help
+ Say Y here if you want the NULL TTY to be used as a /dev/console
+ device by default.
+
+ For example, it might be useful to prevent a VT-less kernel from
+ writing the system log to a random device connected to the serial
+ port.
+
+ Another console driver still might get preferred via the command
+ line, SPCR, or the device tree.
If unsure, say N.
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index ed4bf40278a7..4ca7472c38e0 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -24,6 +24,7 @@
#include <linux/tty.h>
#include <linux/wait.h>
#include <net/iucv/iucv.h>
+#include <asm/machine.h>
#include "hvc_console.h"
@@ -1240,7 +1241,7 @@ static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
{
int rc;
- if (!MACHINE_IS_VM || !hvc_iucv_devices)
+ if (!machine_is_vm() || !hvc_iucv_devices)
return -ENODEV;
if (!val)
@@ -1269,7 +1270,7 @@ static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
size_t index, len;
void *start, *end;
- if (!MACHINE_IS_VM || !hvc_iucv_devices)
+ if (!machine_is_vm() || !hvc_iucv_devices)
return -ENODEV;
rc = 0;
@@ -1306,7 +1307,7 @@ static int __init hvc_iucv_init(void)
if (!hvc_iucv_devices)
return -ENODEV;
- if (!MACHINE_IS_VM) {
+ if (!machine_is_vm()) {
pr_notice("The z/VM IUCV HVC device driver cannot "
"be used without z/VM\n");
rc = -ENODEV;
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index d0b18358859e..742004d63c6f 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -1056,8 +1056,7 @@ static int brcmuart_probe(struct platform_device *pdev)
}
/* setup HR timer */
- hrtimer_init(&priv->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- priv->hrt.function = brcmuart_hrtimer_func;
+ hrtimer_setup(&priv->hrt, brcmuart_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
up.port.shutdown = brcmuart_shutdown;
up.port.startup = brcmuart_startup;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 442967a6cd52..c57f44882abb 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -566,12 +566,10 @@ static int serial8250_em485_init(struct uart_8250_port *p)
if (!p->em485)
return -ENOMEM;
- hrtimer_init(&p->em485->stop_tx_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- hrtimer_init(&p->em485->start_tx_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- p->em485->stop_tx_timer.function = &serial8250_em485_handle_stop_tx;
- p->em485->start_tx_timer.function = &serial8250_em485_handle_start_tx;
+ hrtimer_setup(&p->em485->stop_tx_timer, &serial8250_em485_handle_stop_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&p->em485->start_tx_timer, &serial8250_em485_handle_start_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
p->em485->port = p;
p->em485->active_timer = NULL;
p->em485->tx_stopped = true;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 04212c823a91..98f178bdbcbe 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2867,11 +2867,10 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
return -EINVAL;
}
}
-
- hrtimer_init(&uap->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_init(&uap->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- uap->trigger_start_tx.function = pl011_trigger_start_tx;
- uap->trigger_stop_tx.function = pl011_trigger_stop_tx;
+ hrtimer_setup(&uap->trigger_start_tx, pl011_trigger_start_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&uap->trigger_stop_tx, pl011_trigger_stop_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
if (ret)
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 9c59ec128bb4..9a1afe409b98 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2582,10 +2582,10 @@ static int imx_uart_probe(struct platform_device *pdev)
imx_uart_writel(sport, ucr3, UCR3);
}
- hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- sport->trigger_start_tx.function = imx_trigger_start_tx;
- sport->trigger_stop_tx.function = imx_trigger_stop_tx;
+ hrtimer_setup(&sport->trigger_start_tx, imx_trigger_start_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&sport->trigger_stop_tx, imx_trigger_stop_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Allocate the IRQ(s) i.MX1 has three interrupts whereas later
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 92f7e752f862..d46650e578e5 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2426,10 +2426,10 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
}
/*
- * Disable the console device before suspending.
+ * Suspend the console device before suspending the port.
*/
if (uart_console(uport))
- console_stop(uport->cons);
+ console_suspend(uport->cons);
uart_change_pm(state, UART_PM_STATE_OFF);
@@ -2484,7 +2484,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
uart_port_unlock_irq(uport);
}
if (console_suspend_enabled)
- console_start(uport->cons);
+ console_resume(uport->cons);
}
if (tty_port_suspended(port)) {
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index b1ea48f38248..b72c3bc19bfa 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1702,8 +1702,7 @@ static void sci_request_dma(struct uart_port *port)
dma += s->buf_len_rx;
}
- hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- s->rx_timer.function = sci_dma_rx_timer_fn;
+ hrtimer_setup(&s->rx_timer, sci_dma_rx_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
s->chan_rx_saved = s->chan_rx = chan;
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 92ec51870d1d..fe457bf1e15b 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -454,7 +454,7 @@ static void cdns_uart_handle_tx(void *dev_id)
if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED &&
(kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port))) {
- cdns_uart->tx_timer.function = &cdns_rs485_rx_callback;
+ hrtimer_update_function(&cdns_uart->tx_timer, cdns_rs485_rx_callback);
hrtimer_start(&cdns_uart->tx_timer,
ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart)), HRTIMER_MODE_REL);
}
@@ -734,7 +734,7 @@ static void cdns_uart_start_tx(struct uart_port *port)
if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED) {
if (!cdns_uart->rs485_tx_started) {
- cdns_uart->tx_timer.function = &cdns_rs485_tx_callback;
+ hrtimer_update_function(&cdns_uart->tx_timer, cdns_rs485_tx_callback);
cdns_rs485_tx_setup(cdns_uart);
return hrtimer_start(&cdns_uart->tx_timer,
ms_to_ktime(port->rs485.delay_rts_before_send),
@@ -1626,8 +1626,8 @@ static int cdns_rs485_config(struct uart_port *port, struct ktermios *termios,
writel(val, port->membase + CDNS_UART_MODEMCR);
/* Timer setup */
- hrtimer_init(&cdns_uart->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cdns_uart->tx_timer.function = &cdns_rs485_tx_callback;
+ hrtimer_setup(&cdns_uart->tx_timer, &cdns_rs485_tx_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Disable transmitter and make Rx setup*/
cdns_uart_stop_tx(port);
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 3438269a5440..90b5ab60f5ae 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -458,6 +458,14 @@ static ssize_t pm_qos_enable_store(struct device *dev,
return count;
}
+static ssize_t critical_health_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->critical_health_count);
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -470,6 +478,7 @@ static DEVICE_ATTR_RW(enable_wb_buf_flush);
static DEVICE_ATTR_RW(wb_flush_threshold);
static DEVICE_ATTR_RW(rtc_update_ms);
static DEVICE_ATTR_RW(pm_qos_enable);
+static DEVICE_ATTR_RO(critical_health);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -484,6 +493,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_wb_flush_threshold.attr,
&dev_attr_rtc_update_ms.attr,
&dev_attr_pm_qos_enable.attr,
+ &dev_attr_critical_health.attr,
NULL
};
diff --git a/drivers/ufs/core/ufs_trace.h b/drivers/ufs/core/ufs_trace.h
index 84deca2b841d..caa32e23ffa5 100644
--- a/drivers/ufs/core/ufs_trace.h
+++ b/drivers/ufs/core/ufs_trace.h
@@ -83,34 +83,34 @@ UFS_CMD_TRACE_TSF_TYPES
TRACE_EVENT(ufshcd_clk_gating,
- TP_PROTO(const char *dev_name, int state),
+ TP_PROTO(struct ufs_hba *hba, int state),
- TP_ARGS(dev_name, state),
+ TP_ARGS(hba, state),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(int, state)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->state = state;
),
TP_printk("%s: gating state changed to %s",
- __get_str(dev_name),
+ dev_name(__entry->hba->dev),
__print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES))
);
TRACE_EVENT(ufshcd_clk_scaling,
- TP_PROTO(const char *dev_name, const char *state, const char *clk,
+ TP_PROTO(struct ufs_hba *hba, const char *state, const char *clk,
u32 prev_state, u32 curr_state),
- TP_ARGS(dev_name, state, clk, prev_state, curr_state),
+ TP_ARGS(hba, state, clk, prev_state, curr_state),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__string(state, state)
__string(clk, clk)
__field(u32, prev_state)
@@ -118,7 +118,7 @@ TRACE_EVENT(ufshcd_clk_scaling,
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__assign_str(state);
__assign_str(clk);
__entry->prev_state = prev_state;
@@ -126,80 +126,80 @@ TRACE_EVENT(ufshcd_clk_scaling,
),
TP_printk("%s: %s %s from %u to %u Hz",
- __get_str(dev_name), __get_str(state), __get_str(clk),
+ dev_name(__entry->hba->dev), __get_str(state), __get_str(clk),
__entry->prev_state, __entry->curr_state)
);
TRACE_EVENT(ufshcd_auto_bkops_state,
- TP_PROTO(const char *dev_name, const char *state),
+ TP_PROTO(struct ufs_hba *hba, const char *state),
- TP_ARGS(dev_name, state),
+ TP_ARGS(hba, state),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__string(state, state)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__assign_str(state);
),
TP_printk("%s: auto bkops - %s",
- __get_str(dev_name), __get_str(state))
+ dev_name(__entry->hba->dev), __get_str(state))
);
DECLARE_EVENT_CLASS(ufshcd_profiling_template,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err),
+ TP_ARGS(hba, profile_info, time_us, err),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__string(profile_info, profile_info)
__field(s64, time_us)
__field(int, err)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__assign_str(profile_info);
__entry->time_us = time_us;
__entry->err = err;
),
TP_printk("%s: %s: took %lld usecs, err %d",
- __get_str(dev_name), __get_str(profile_info),
+ dev_name(__entry->hba->dev), __get_str(profile_info),
__entry->time_us, __entry->err)
);
DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
+ TP_ARGS(hba, profile_info, time_us, err));
DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
+ TP_ARGS(hba, profile_info, time_us, err));
DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
+ TP_ARGS(hba, profile_info, time_us, err));
DECLARE_EVENT_CLASS(ufshcd_template,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state),
+ TP_ARGS(hba, err, usecs, dev_state, link_state),
TP_STRUCT__entry(
__field(s64, usecs)
__field(int, err)
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(int, dev_state)
__field(int, link_state)
),
@@ -207,14 +207,14 @@ DECLARE_EVENT_CLASS(ufshcd_template,
TP_fast_assign(
__entry->usecs = usecs;
__entry->err = err;
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->dev_state = dev_state;
__entry->link_state = link_state;
),
TP_printk(
"%s: took %lld usecs, dev_state: %s, link_state: %s, err %d",
- __get_str(dev_name),
+ dev_name(__entry->hba->dev),
__entry->usecs,
__print_symbolic(__entry->dev_state, UFS_PWR_MODES),
__print_symbolic(__entry->link_state, UFS_LINK_STATES),
@@ -223,60 +223,62 @@ DECLARE_EVENT_CLASS(ufshcd_template,
);
DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_system_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_init,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
TRACE_EVENT(ufshcd_command,
- TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t,
+ TP_PROTO(struct scsi_device *sdev, struct ufs_hba *hba,
+ enum ufs_trace_str_t str_t,
unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len,
u32 intr, u64 lba, u8 opcode, u8 group_id),
- TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
+ TP_ARGS(sdev, hba, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
opcode, group_id),
TP_STRUCT__entry(
__field(struct scsi_device *, sdev)
+ __field(struct ufs_hba *, hba)
__field(enum ufs_trace_str_t, str_t)
__field(unsigned int, tag)
__field(u32, doorbell)
@@ -290,6 +292,7 @@ TRACE_EVENT(ufshcd_command,
TP_fast_assign(
__entry->sdev = sdev;
+ __entry->hba = hba;
__entry->str_t = str_t;
__entry->tag = tag;
__entry->doorbell = doorbell;
@@ -312,13 +315,13 @@ TRACE_EVENT(ufshcd_command,
);
TRACE_EVENT(ufshcd_uic_command,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd,
+ TP_PROTO(struct ufs_hba *hba, enum ufs_trace_str_t str_t, u32 cmd,
u32 arg1, u32 arg2, u32 arg3),
- TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3),
+ TP_ARGS(hba, str_t, cmd, arg1, arg2, arg3),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(enum ufs_trace_str_t, str_t)
__field(u32, cmd)
__field(u32, arg1)
@@ -327,7 +330,7 @@ TRACE_EVENT(ufshcd_uic_command,
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->str_t = str_t;
__entry->cmd = cmd;
__entry->arg1 = arg1;
@@ -337,19 +340,19 @@ TRACE_EVENT(ufshcd_uic_command,
TP_printk(
"%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ show_ufs_cmd_trace_str(__entry->str_t), dev_name(__entry->hba->dev),
__entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3
)
);
TRACE_EVENT(ufshcd_upiu,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr,
+ TP_PROTO(struct ufs_hba *hba, enum ufs_trace_str_t str_t, void *hdr,
void *tsf, enum ufs_trace_tsf_t tsf_t),
- TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t),
+ TP_ARGS(hba, str_t, hdr, tsf, tsf_t),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(enum ufs_trace_str_t, str_t)
__array(unsigned char, hdr, 12)
__array(unsigned char, tsf, 16)
@@ -357,7 +360,7 @@ TRACE_EVENT(ufshcd_upiu,
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->str_t = str_t;
memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
@@ -366,7 +369,7 @@ TRACE_EVENT(ufshcd_upiu,
TP_printk(
"%s: %s: HDR:%s, %s:%s",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ show_ufs_cmd_trace_str(__entry->str_t), dev_name(__entry->hba->dev),
__print_hex(__entry->hdr, sizeof(__entry->hdr)),
show_ufs_cmd_trace_tsf(__entry->tsf_t),
__print_hex(__entry->tsf, sizeof(__entry->tsf))
@@ -375,22 +378,22 @@ TRACE_EVENT(ufshcd_upiu,
TRACE_EVENT(ufshcd_exception_event,
- TP_PROTO(const char *dev_name, u16 status),
+ TP_PROTO(struct ufs_hba *hba, u16 status),
- TP_ARGS(dev_name, status),
+ TP_ARGS(hba, status),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(u16, status)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->status = status;
),
TP_printk("%s: status 0x%x",
- __get_str(dev_name), __entry->status
+ dev_name(__entry->hba->dev), __entry->status
)
);
diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c
index 694ff7578fc1..9e63a9d3cb7e 100644
--- a/drivers/ufs/core/ufshcd-crypto.c
+++ b/drivers/ufs/core/ufshcd-crypto.c
@@ -72,11 +72,11 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
if (ccap_array[cap_idx].algorithm_id == UFS_CRYPTO_ALG_AES_XTS) {
/* In XTS mode, the blk_crypto_key's size is already doubled */
- memcpy(cfg.crypto_key, key->raw, key->size/2);
+ memcpy(cfg.crypto_key, key->bytes, key->size/2);
memcpy(cfg.crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2,
- key->raw + key->size/2, key->size/2);
+ key->bytes + key->size/2, key->size/2);
} else {
- memcpy(cfg.crypto_key, key->raw, key->size);
+ memcpy(cfg.crypto_key, key->bytes, key->size);
}
ufshcd_program_key(hba, &cfg, slot);
@@ -185,6 +185,7 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
hba->crypto_profile.ll_ops = ufshcd_crypto_ops;
/* UFS only supports 8 bytes for any DUN */
hba->crypto_profile.max_dun_bytes_supported = 8;
+ hba->crypto_profile.key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
hba->crypto_profile.dev = hba->dev;
/*
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 786f20ef2238..10b4a19a70f1 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -117,11 +117,12 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
return ufshcd_readl(hba, REG_UFS_VERSION);
}
-static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
- bool up, enum ufs_notify_change_status status)
+static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, bool up,
+ unsigned long target_freq,
+ enum ufs_notify_change_status status)
{
if (hba->vops && hba->vops->clk_scale_notify)
- return hba->vops->clk_scale_notify(hba, up, status);
+ return hba->vops->clk_scale_notify(hba, up, target_freq, status);
return 0;
}
@@ -159,9 +160,9 @@ static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
}
static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
if (hba->vops && hba->vops->pwr_change_notify)
return hba->vops->pwr_change_notify(hba, status,
@@ -270,6 +271,14 @@ static inline int ufshcd_mcq_vops_config_esi(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
+static inline u32 ufshcd_vops_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
+{
+ if (hba->vops && hba->vops->freq_to_gear_speed)
+ return hba->vops->freq_to_gear_speed(hba, freq);
+
+ return 0;
+}
+
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 464f13da259a..0534390c2a35 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -369,7 +369,7 @@ static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
else
header = &hba->lrb[tag].ucd_rsp_ptr->header;
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
+ trace_ufshcd_upiu(hba, str_t, header, &rq->sc.cdb,
UFS_TSF_CDB);
}
@@ -380,7 +380,7 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
if (!trace_ufshcd_upiu_enabled())
return;
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
+ trace_ufshcd_upiu(hba, str_t, &rq_rsp->header,
&rq_rsp->qr, UFS_TSF_OSF);
}
@@ -393,12 +393,12 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
return;
if (str_t == UFS_TM_SEND)
- trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+ trace_ufshcd_upiu(hba, str_t,
&descp->upiu_req.req_header,
&descp->upiu_req.input_param1,
UFS_TSF_TM_INPUT);
else
- trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+ trace_ufshcd_upiu(hba, str_t,
&descp->upiu_rsp.rsp_header,
&descp->upiu_rsp.output_param1,
UFS_TSF_TM_OUTPUT);
@@ -418,7 +418,7 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
else
cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
- trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
+ trace_ufshcd_uic_command(hba, str_t, cmd,
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
@@ -473,7 +473,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
} else {
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
}
- trace_ufshcd_command(cmd->device, str_t, tag, doorbell, hwq_id,
+ trace_ufshcd_command(cmd->device, hba, str_t, tag, doorbell, hwq_id,
transfer_len, intr, lba, opcode, group_id);
}
@@ -1063,7 +1063,7 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
clki->max_freq, ret);
break;
}
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_clk_scaling(hba,
"scaled up", clki->name,
clki->curr_freq,
clki->max_freq);
@@ -1081,7 +1081,7 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
clki->min_freq, ret);
break;
}
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_clk_scaling(hba,
"scaled down", clki->name,
clki->curr_freq,
clki->min_freq);
@@ -1122,7 +1122,7 @@ int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,
return ret;
}
- trace_ufshcd_clk_scaling(dev_name(dev),
+ trace_ufshcd_clk_scaling(hba,
(scaling_down ? "scaled down" : "scaled up"),
clki->name, hba->clk_scaling.target_freq, freq);
}
@@ -1162,7 +1162,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
int ret = 0;
ktime_t start = ktime_get();
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, PRE_CHANGE);
if (ret)
goto out;
@@ -1173,7 +1173,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
if (ret)
goto out;
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, POST_CHANGE);
if (ret) {
if (hba->use_pm_opp)
ufshcd_opp_set_rate(hba,
@@ -1186,7 +1186,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
ufshcd_pm_qos_update(hba, scale_up);
out:
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_scaling(hba,
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
@@ -1313,16 +1313,26 @@ out:
/**
* ufshcd_scale_gear - scale up/down UFS gear
* @hba: per adapter instance
+ * @target_gear: target gear to scale to
* @scale_up: True for scaling up gear and false for scaling down
*
* Return: 0 for success; -EBUSY if scaling can't happen at this time;
* non-zero for any other errors.
*/
-static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+static int ufshcd_scale_gear(struct ufs_hba *hba, u32 target_gear, bool scale_up)
{
int ret = 0;
struct ufs_pa_layer_attr new_pwr_info;
+ if (target_gear) {
+ new_pwr_info = hba->pwr_info;
+ new_pwr_info.gear_tx = target_gear;
+ new_pwr_info.gear_rx = target_gear;
+
+ goto config_pwr_mode;
+ }
+
+ /* Legacy gear scaling, in case vops_freq_to_gear_speed() is not implemented */
if (scale_up) {
memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
sizeof(struct ufs_pa_layer_attr));
@@ -1343,6 +1353,7 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
}
}
+config_pwr_mode:
/* check if the power mode needs to be changed or not? */
ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
if (ret)
@@ -1387,13 +1398,13 @@ out:
return ret;
}
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err)
{
up_write(&hba->clk_scaling_lock);
- /* Enable Write Booster if we have scaled up else disable it */
+ /* Enable Write Booster if current gear requires it else disable it */
if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
- ufshcd_wb_toggle(hba, scale_up);
+ ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear);
mutex_unlock(&hba->wb_mutex);
@@ -1413,15 +1424,19 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
bool scale_up)
{
+ u32 old_gear = hba->pwr_info.gear_rx;
+ u32 new_gear = 0;
int ret = 0;
+ new_gear = ufshcd_vops_freq_to_gear_speed(hba, freq);
+
ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
if (ret)
return ret;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
- ret = ufshcd_scale_gear(hba, false);
+ ret = ufshcd_scale_gear(hba, new_gear, false);
if (ret)
goto out_unprepare;
}
@@ -1429,13 +1444,13 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
ret = ufshcd_scale_clks(hba, freq, scale_up);
if (ret) {
if (!scale_up)
- ufshcd_scale_gear(hba, true);
+ ufshcd_scale_gear(hba, old_gear, true);
goto out_unprepare;
}
/* scale up the gear after scaling up clocks */
if (scale_up) {
- ret = ufshcd_scale_gear(hba, true);
+ ret = ufshcd_scale_gear(hba, new_gear, true);
if (ret) {
ufshcd_scale_clks(hba, hba->devfreq->previous_freq,
false);
@@ -1444,7 +1459,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
}
out_unprepare:
- ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
+ ufshcd_clock_scaling_unprepare(hba, ret);
return ret;
}
@@ -1548,7 +1563,7 @@ static int ufshcd_devfreq_target(struct device *dev,
if (!ret)
hba->clk_scaling.target_freq = *freq;
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_scaling(hba,
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
@@ -1720,6 +1735,8 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_clk_info *clki;
+ unsigned long freq;
u32 value;
int err = 0;
@@ -1743,14 +1760,25 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
if (value) {
ufshcd_resume_clkscaling(hba);
- } else {
- ufshcd_suspend_clkscaling(hba);
- err = ufshcd_devfreq_scale(hba, ULONG_MAX, true);
- if (err)
- dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
- __func__, err);
+ goto out_rel;
}
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ freq = clki->max_freq;
+
+ ufshcd_suspend_clkscaling(hba);
+
+ if (!ufshcd_is_devfreq_scaling_required(hba, freq, true))
+ goto out_rel;
+
+ err = ufshcd_devfreq_scale(hba, freq, true);
+ if (err)
+ dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+ __func__, err);
+ else
+ hba->clk_scaling.target_freq = freq;
+
+out_rel:
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
out:
@@ -1783,6 +1811,10 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
if (!hba->clk_scaling.min_gear)
hba->clk_scaling.min_gear = UFS_HS_G1;
+ if (!hba->clk_scaling.wb_gear)
+ /* Use intermediate gear speed HS_G3 as the default wb_gear */
+ hba->clk_scaling.wb_gear = UFS_HS_G3;
+
INIT_WORK(&hba->clk_scaling.suspend_work,
ufshcd_clk_scaling_suspend_work);
INIT_WORK(&hba->clk_scaling.resume_work,
@@ -1881,7 +1913,7 @@ start:
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
break;
}
@@ -1893,7 +1925,7 @@ start:
fallthrough;
case CLKS_OFF:
hba->clk_gating.state = REQ_CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work);
@@ -1933,7 +1965,7 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.is_suspended ||
hba->clk_gating.state != REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
return;
}
@@ -1955,7 +1987,7 @@ static void ufshcd_gate_work(struct work_struct *work)
hba->clk_gating.state = CLKS_ON;
dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
__func__, ret);
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
return;
}
@@ -1980,7 +2012,7 @@ static void ufshcd_gate_work(struct work_struct *work)
guard(spinlock_irqsave)(&hba->clk_gating.lock);
if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
}
@@ -2006,7 +2038,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
}
hba->clk_gating.state = REQ_CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ trace_ufshcd_clk_gating(hba, hba->clk_gating.state);
queue_delayed_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.gate_work,
msecs_to_jiffies(hba->clk_gating.delay_ms));
@@ -4005,7 +4037,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
*
* Return: 0 on success, non-zero value on failure.
*/
-static int ufshcd_dme_reset(struct ufs_hba *hba)
+int ufshcd_dme_reset(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_RESET,
@@ -4019,6 +4051,7 @@ static int ufshcd_dme_reset(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_dme_reset);
int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
int agreed_gear,
@@ -4044,7 +4077,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
*
* Return: 0 on success, non-zero value on failure.
*/
-static int ufshcd_dme_enable(struct ufs_hba *hba)
+int ufshcd_dme_enable(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_ENABLE,
@@ -4058,6 +4091,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_dme_enable);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
{
@@ -4422,7 +4456,7 @@ int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+ trace_ufshcd_profile_hibern8(hba, "enter",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret)
@@ -4447,7 +4481,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+ trace_ufshcd_profile_hibern8(hba, "exit",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
@@ -5808,7 +5842,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = true;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
+ trace_ufshcd_auto_bkops_state(hba, "Enabled");
/* No need of URGENT_BKOPS exception from the device */
err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@ -5859,7 +5893,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = false;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+ trace_ufshcd_auto_bkops_state(hba, "Disabled");
hba->is_urgent_bkops_lvl_checked = false;
out:
return err;
@@ -6193,7 +6227,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
return;
}
- trace_ufshcd_exception_event(dev_name(hba->dev), status);
+ trace_ufshcd_exception_event(hba, status);
if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
ufshcd_bkops_exception_event_handler(hba);
@@ -6201,6 +6235,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
+ if (status & hba->ee_drv_mask & MASK_EE_HEALTH_CRITICAL) {
+ hba->critical_health_count++;
+ sysfs_notify(&hba->dev->kobj, NULL, "critical_health");
+ }
+
ufs_debugfs_exception_event(hba, status);
}
@@ -7652,7 +7691,7 @@ static void ufshcd_process_probe_result(struct ufs_hba *hba,
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
- trace_ufshcd_init(dev_name(hba->dev), ret,
+ trace_ufshcd_init(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), probe_start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
}
@@ -8293,6 +8332,11 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_temp_notif_probe(hba, desc_buf);
+ if (dev_info->wspecversion >= 0x410) {
+ hba->critical_health_count = 0;
+ ufshcd_enable_ee(hba, MASK_EE_HEALTH_CRITICAL);
+ }
+
ufs_init_rtc(hba, desc_buf);
/*
@@ -9148,12 +9192,12 @@ out:
} else if (!ret && on && hba->clk_gating.is_initialized) {
scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
if (clk_state_changed)
- trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_gating(hba,
(on ? "on" : "off"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
@@ -9853,7 +9897,7 @@ static int ufshcd_wl_runtime_suspend(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
- trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
+ trace_ufshcd_wl_runtime_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9873,7 +9917,7 @@ static int ufshcd_wl_runtime_resume(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
- trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
+ trace_ufshcd_wl_runtime_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9905,7 +9949,7 @@ static int ufshcd_wl_suspend(struct device *dev)
out:
if (!ret)
hba->is_sys_suspended = true;
- trace_ufshcd_wl_suspend(dev_name(dev), ret,
+ trace_ufshcd_wl_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9928,7 +9972,7 @@ static int ufshcd_wl_resume(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
out:
- trace_ufshcd_wl_resume(dev_name(dev), ret,
+ trace_ufshcd_wl_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
@@ -9966,7 +10010,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
}
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
@@ -10039,7 +10083,7 @@ int ufshcd_system_suspend(struct device *dev)
ret = ufshcd_suspend(hba);
out:
- trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+ trace_ufshcd_system_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
@@ -10067,7 +10111,7 @@ int ufshcd_system_resume(struct device *dev)
ret = ufshcd_resume(hba);
out:
- trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+ trace_ufshcd_system_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -10093,7 +10137,7 @@ int ufshcd_runtime_suspend(struct device *dev)
ret = ufshcd_suspend(hba);
- trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+ trace_ufshcd_runtime_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
@@ -10120,7 +10164,7 @@ int ufshcd_runtime_resume(struct device *dev)
ret = ufshcd_resume(hba);
- trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+ trace_ufshcd_runtime_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
index 580c8d0bd8bb..191fbd799ec5 100644
--- a/drivers/ufs/host/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -142,3 +142,15 @@ config SCSI_UFS_SPRD
Select this if you have UFS controller on Unisoc chipset.
If unsure, say N.
+
+config SCSI_UFS_ROCKCHIP
+ tristate "Rockchip UFS host controller driver"
+ depends on SCSI_UFSHCD_PLATFORM && (ARCH_ROCKCHIP || COMPILE_TEST)
+ help
+ This selects the Rockchip specific additions to UFSHCD platform driver.
+ UFS host on Rockchip needs some vendor specific configuration before
+ accessing the hardware which includes PHY configuration and vendor
+ specific registers.
+
+ Select this if you have UFS controller on Rockchip chipset.
+ If unsure, say N.
diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile
index 4573aead02eb..2f97feb5db3f 100644
--- a/drivers/ufs/host/Makefile
+++ b/drivers/ufs/host/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
+obj-$(CONFIG_SCSI_UFS_ROCKCHIP) += ufs-rockchip.o
obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o
obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 13dd5dfc03eb..d7539cda97da 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -321,7 +321,7 @@ static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs,
}
static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr)
+ const struct ufs_pa_layer_attr *pwr)
{
struct ufs_hba *hba = ufs->hba;
u32 enabled_vh;
@@ -396,7 +396,7 @@ static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs,
}
static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr)
+ const struct ufs_pa_layer_attr *pwr)
{
struct ufs_hba *hba = ufs->hba;
int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx);
@@ -813,7 +813,7 @@ static u32 exynos_ufs_get_hs_gear(struct ufs_hba *hba)
}
static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -865,7 +865,7 @@ out:
#define PWR_MODE_STR_LEN 64
static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *pwr_req)
+ const struct ufs_pa_layer_attr *pwr_req)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
@@ -1320,6 +1320,7 @@ static void exynos_ufs_fmp_init(struct ufs_hba *hba, struct exynos_ufs *ufs)
return;
}
profile->max_dun_bytes_supported = AES_BLOCK_SIZE;
+ profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
profile->dev = hba->dev;
profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] =
DATA_UNIT_SIZE;
@@ -1366,7 +1367,7 @@ static int exynos_ufs_fmp_fill_prdt(struct ufs_hba *hba,
void *prdt, unsigned int num_segments)
{
struct fmp_sg_entry *fmp_prdt = prdt;
- const u8 *enckey = crypt_ctx->bc_key->raw;
+ const u8 *enckey = crypt_ctx->bc_key->bytes;
const u8 *twkey = enckey + AES_KEYSIZE_256;
u64 dun_lo = crypt_ctx->bc_dun[0];
u64 dun_hi = crypt_ctx->bc_dun[1];
@@ -1634,7 +1635,7 @@ static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
index 9670dc138d1e..aac517276189 100644
--- a/drivers/ufs/host/ufs-exynos.h
+++ b/drivers/ufs/host/ufs-exynos.h
@@ -188,7 +188,7 @@ struct exynos_ufs_drv_data {
int (*pre_pwr_change)(struct exynos_ufs *ufs,
struct ufs_pa_layer_attr *pwr);
int (*post_pwr_change)(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr);
+ const struct ufs_pa_layer_attr *pwr);
int (*pre_hce_enable)(struct exynos_ufs *ufs);
int (*post_hce_enable)(struct exynos_ufs *ufs);
};
diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
index 6e6569de74d8..6f2e6bf31225 100644
--- a/drivers/ufs/host/ufs-hisi.c
+++ b/drivers/ufs/host/ufs-hisi.c
@@ -361,9 +361,9 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
}
static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_host_params host_params;
int ret = 0;
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 135cd78109e2..182f58d0c9db 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -1081,8 +1081,8 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
}
static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_host_params host_params;
@@ -1134,9 +1134,9 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
}
static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status stage,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status stage,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
@@ -1643,6 +1643,7 @@ static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
}
static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
+ unsigned long target_freq,
enum ufs_notify_change_status status)
{
if (!ufshcd_is_clkscaling_supported(hba))
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 23b9f6efa047..1b37449fbffc 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -15,6 +15,8 @@
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/time.h>
+#include <linux/unaligned.h>
+#include <linux/units.h>
#include <soc/qcom/ice.h>
@@ -97,7 +99,7 @@ static const struct __ufs_qcom_bw_table {
};
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up);
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq);
static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
{
@@ -105,6 +107,26 @@ static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
}
#ifdef CONFIG_SCSI_UFS_CRYPTO
+/**
+ * ufs_qcom_config_ice_allocator() - ICE core allocator configuration
+ *
+ * @host: pointer to qcom specific variant structure.
+ */
+static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host)
+{
+ struct ufs_hba *hba = host->hba;
+ static const uint8_t val[4] = { NUM_RX_R1W0, NUM_TX_R0W1, NUM_RX_R1W1, NUM_TX_R1W1 };
+ u32 config;
+
+ if (!(host->caps & UFS_QCOM_CAP_ICE_CONFIG) ||
+ !(host->hba->caps & UFSHCD_CAP_CRYPTO))
+ return;
+
+ config = get_unaligned_le32(val);
+
+ ufshcd_writel(hba, ICE_ALLOCATOR_TYPE, REG_UFS_MEM_ICE_CONFIG);
+ ufshcd_writel(hba, config, REG_UFS_MEM_ICE_NUM_CORE);
+}
static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
{
@@ -125,7 +147,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
int err;
int i;
- ice = of_qcom_ice_get(dev);
+ ice = devm_of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;
@@ -147,6 +169,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
profile->ll_ops = ufs_qcom_crypto_ops;
profile->max_dun_bytes_supported = 8;
+ profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
profile->dev = dev;
/*
@@ -202,7 +225,7 @@ static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile,
err = qcom_ice_program_key(host->ice,
QCOM_ICE_CRYPTO_ALG_AES_XTS,
QCOM_ICE_CRYPTO_KEY_SIZE_256,
- key->raw,
+ key->bytes,
key->crypto_cfg.data_unit_size / 512,
slot);
ufshcd_release(hba);
@@ -248,6 +271,11 @@ static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
{
return 0;
}
+
+static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host)
+{
+}
+
#endif
static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
@@ -496,6 +524,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
err = ufs_qcom_check_hibern8(hba);
ufs_qcom_enable_hw_clk_gating(hba);
ufs_qcom_ice_enable(host);
+ ufs_qcom_config_ice_allocator(host);
break;
default:
dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
@@ -509,16 +538,10 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
* ufs_qcom_cfg_timers - Configure ufs qcom cfg timers
*
* @hba: host controller instance
- * @gear: Current operating gear
- * @hs: current power mode
- * @rate: current operating rate (A or B)
- * @update_link_startup_timer: indicate if link_start ongoing
* @is_pre_scale_up: flag to check if pre scale up condition.
* Return: zero for success and non-zero in case of a failure.
*/
-static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
- u32 hs, u32 rate, bool update_link_startup_timer,
- bool is_pre_scale_up)
+static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
@@ -534,11 +557,6 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba))
return 0;
- if (gear == 0) {
- dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
- return -EINVAL;
- }
-
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core_clk")) {
if (is_pre_scale_up)
@@ -574,14 +592,13 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
- 0, true, false)) {
+ if (ufs_qcom_cfg_timers(hba, false)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
return -EINVAL;
}
- err = ufs_qcom_set_core_clk_ctrl(hba, true);
+ err = ufs_qcom_set_core_clk_ctrl(hba, ULONG_MAX);
if (err)
dev_err(hba->dev, "cfg core clk ctrl failed\n");
/*
@@ -780,7 +797,7 @@ static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host)
static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -831,9 +848,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
}
break;
case POST_CHANGE:
- if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
- dev_req_params->pwr_rx,
- dev_req_params->hs_rate, false, false)) {
+ if (ufs_qcom_cfg_timers(hba, false)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
/*
@@ -989,6 +1004,14 @@ static void ufs_qcom_set_host_params(struct ufs_hba *hba)
host_params->hs_tx_gear = host_params->hs_rx_gear = ufs_qcom_get_hs_gear(hba);
}
+static void ufs_qcom_set_host_caps(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (host->hw_ver.major >= 0x5)
+ host->caps |= UFS_QCOM_CAP_ICE_CONFIG;
+}
+
static void ufs_qcom_set_caps(struct ufs_hba *hba)
{
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
@@ -997,6 +1020,8 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_WB_EN;
hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
+ ufs_qcom_set_host_caps(hba);
}
/**
@@ -1292,7 +1317,7 @@ static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg);
}
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
@@ -1306,10 +1331,11 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
!strcmp(clki->name, "core_clk_unipro")) {
if (!clki->max_freq)
cycles_in_1us = 150; /* default for backwards compatibility */
- else if (is_scale_up)
- cycles_in_1us = ceil(clki->max_freq, (1000 * 1000));
+ else if (freq == ULONG_MAX)
+ cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ);
else
- cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000));
+ cycles_in_1us = ceil(freq, HZ_PER_MHZ);
+
break;
}
}
@@ -1346,20 +1372,17 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us);
}
-static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
+static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long freq)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_pa_layer_attr *attr = &host->dev_req_params;
int ret;
- ret = ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
- attr->hs_rate, false, true);
+ ret = ufs_qcom_cfg_timers(hba, true);
if (ret) {
dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__);
return ret;
}
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, true);
+ return ufs_qcom_set_core_clk_ctrl(hba, freq);
}
static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
@@ -1388,14 +1411,15 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
return err;
}
-static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
+static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq)
{
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, false);
+ return ufs_qcom_set_core_clk_ctrl(hba, freq);
}
-static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
- bool scale_up, enum ufs_notify_change_status status)
+static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
+ unsigned long target_freq,
+ enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
@@ -1409,7 +1433,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
if (err)
return err;
if (scale_up)
- err = ufs_qcom_clk_scale_up_pre_change(hba);
+ err = ufs_qcom_clk_scale_up_pre_change(hba, target_freq);
else
err = ufs_qcom_clk_scale_down_pre_change(hba);
@@ -1421,7 +1445,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
if (scale_up)
err = ufs_qcom_clk_scale_up_post_change(hba);
else
- err = ufs_qcom_clk_scale_down_post_change(hba);
+ err = ufs_qcom_clk_scale_down_post_change(hba, target_freq);
if (err) {
@@ -1855,6 +1879,36 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
return ret;
}
+static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
+{
+ u32 gear = 0;
+
+ switch (freq) {
+ case 403000000:
+ gear = UFS_HS_G5;
+ break;
+ case 300000000:
+ gear = UFS_HS_G4;
+ break;
+ case 201500000:
+ gear = UFS_HS_G3;
+ break;
+ case 150000000:
+ case 100000000:
+ gear = UFS_HS_G2;
+ break;
+ case 75000000:
+ case 37500000:
+ gear = UFS_HS_G1;
+ break;
+ default:
+ dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq);
+ break;
+ }
+
+ return gear;
+}
+
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1883,6 +1937,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.op_runtime_config = ufs_qcom_op_runtime_config,
.get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
.config_esi = ufs_qcom_config_esi,
+ .freq_to_gear_speed = ufs_qcom_freq_to_gear_speed,
};
/**
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 919f53682beb..d0e6ec9128e7 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -50,6 +50,9 @@ enum {
*/
UFS_AH8_CFG = 0xFC,
+ REG_UFS_MEM_ICE_CONFIG = 0x260C,
+ REG_UFS_MEM_ICE_NUM_CORE = 0x2664,
+
REG_UFS_CFG3 = 0x271C,
REG_UFS_DEBUG_SPARE_CFG = 0x284C,
@@ -110,6 +113,9 @@ enum {
/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */
#define TEST_BUS_SUB_SEL_MASK GENMASK(4, 0) /* All XXX_SEL fields are 5 bits wide */
+/* bit definition for UFS Shared ICE config */
+#define UFS_QCOM_CAP_ICE_CONFIG BIT(0)
+
#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
@@ -135,6 +141,37 @@ enum {
#define UNIPRO_CORE_CLK_FREQ_201_5_MHZ 202
#define UNIPRO_CORE_CLK_FREQ_403_MHZ 403
+/* ICE allocator type to share AES engines among TX stream and RX stream */
+#define ICE_ALLOCATOR_TYPE 2
+
+/*
+ * Number of cores allocated for RX stream when Read data block received and
+ * Write data block is not in progress
+ */
+#define NUM_RX_R1W0 28
+
+/*
+ * Number of cores allocated for TX stream when Device asked to send write
+ * data block and Read data block is not in progress
+ */
+#define NUM_TX_R0W1 28
+
+/*
+ * Number of cores allocated for RX stream when Read data block received and
+ * Write data block is in progress
+ * OR
+ * Device asked to send write data block and Read data block is in progress
+ */
+#define NUM_RX_R1W1 15
+
+/*
+ * Number of cores allocated for TX stream (UFS write) when Read data block
+ * received and Write data block is in progress
+ * OR
+ * Device asked to send write data block and Read data block is in progress
+ */
+#define NUM_TX_R1W1 13
+
static inline void
ufs_qcom_get_controller_revision(struct ufs_hba *hba,
u8 *major, u16 *minor, u16 *step)
@@ -196,7 +233,7 @@ struct ufs_qcom_host {
#ifdef CONFIG_SCSI_UFS_CRYPTO
struct qcom_ice *ice;
#endif
-
+ u32 caps;
void __iomem *dev_ref_clk_ctrl_mmio;
bool is_dev_ref_clk_enabled;
struct ufs_hw_version hw_ver;
diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
index 03cd82db751b..5bf7d0e77ad8 100644
--- a/drivers/ufs/host/ufs-renesas.c
+++ b/drivers/ufs/host/ufs-renesas.c
@@ -9,324 +9,408 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
+#include <linux/firmware.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/sys_soc.h>
#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
+#define EFUSE_CALIB_SIZE 8
+
struct ufs_renesas_priv {
+ const struct firmware *fw;
+ void (*pre_init)(struct ufs_hba *hba);
bool initialized; /* The hardware needs initialization once */
+ u8 calib[EFUSE_CALIB_SIZE];
};
-enum {
- SET_PHY_INDEX_LO = 0,
- SET_PHY_INDEX_HI,
- TIMER_INDEX,
- MAX_INDEX
-};
+#define UFS_RENESAS_FIRMWARE_NAME "r8a779f0_ufs.bin"
+MODULE_FIRMWARE(UFS_RENESAS_FIRMWARE_NAME);
-enum ufs_renesas_init_param_mode {
- MODE_RESTORE,
- MODE_SET,
- MODE_SAVE,
- MODE_POLL,
- MODE_WAIT,
- MODE_WRITE,
-};
+static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba)
+{
+ ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + ");
+}
-#define PARAM_RESTORE(_reg, _index) \
- { .mode = MODE_RESTORE, .reg = _reg, .index = _index }
-#define PARAM_SET(_index, _set) \
- { .mode = MODE_SET, .index = _index, .u.set = _set }
-#define PARAM_SAVE(_reg, _mask, _index) \
- { .mode = MODE_SAVE, .reg = _reg, .mask = (u32)(_mask), \
- .index = _index }
-#define PARAM_POLL(_reg, _expected, _mask) \
- { .mode = MODE_POLL, .reg = _reg, .u.expected = _expected, \
- .mask = (u32)(_mask) }
-#define PARAM_WAIT(_delay_us) \
- { .mode = MODE_WAIT, .u.delay_us = _delay_us }
-
-#define PARAM_WRITE(_reg, _val) \
- { .mode = MODE_WRITE, .reg = _reg, .u.val = _val }
-
-#define PARAM_WRITE_D0_D4(_d0, _d4) \
- PARAM_WRITE(0xd0, _d0), PARAM_WRITE(0xd4, _d4)
-
-#define PARAM_WRITE_800_80C_POLL(_addr, _data_800) \
- PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
- PARAM_WRITE_D0_D4(0x00000800, ((_data_800) << 16) | BIT(8) | (_addr)), \
- PARAM_WRITE(0xd0, 0x0000080c), \
- PARAM_POLL(0xd4, BIT(8), BIT(8))
-
-#define PARAM_RESTORE_800_80C_POLL(_index) \
- PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
- PARAM_WRITE(0xd0, 0x00000800), \
- PARAM_RESTORE(0xd4, _index), \
- PARAM_WRITE(0xd0, 0x0000080c), \
- PARAM_POLL(0xd4, BIT(8), BIT(8))
-
-#define PARAM_WRITE_804_80C_POLL(_addr, _data_804) \
- PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
- PARAM_WRITE_D0_D4(0x00000804, ((_data_804) << 16) | BIT(8) | (_addr)), \
- PARAM_WRITE(0xd0, 0x0000080c), \
- PARAM_POLL(0xd4, BIT(8), BIT(8))
-
-#define PARAM_WRITE_828_82C_POLL(_data_828) \
- PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), \
- PARAM_WRITE_D0_D4(0x00000828, _data_828), \
- PARAM_WRITE(0xd0, 0x0000082c), \
- PARAM_POLL(0xd4, _data_828, _data_828)
-
-#define PARAM_WRITE_PHY(_addr16, _data16) \
- PARAM_WRITE(0xf0, 1), \
- PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x18, (_data16) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x19, ((_data16) >> 8) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE(0xf0, 0)
-
-#define PARAM_SET_PHY(_addr16, _data16) \
- PARAM_WRITE(0xf0, 1), \
- PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE_804_80C_POLL(0x1a, 0), \
- PARAM_WRITE(0xd0, 0x00000808), \
- PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_LO), \
- PARAM_WRITE_804_80C_POLL(0x1b, 0), \
- PARAM_WRITE(0xd0, 0x00000808), \
- PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_HI), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE(0xf0, 0), \
- PARAM_WRITE(0xf0, 1), \
- PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
- PARAM_SET(SET_PHY_INDEX_LO, ((_data16 & 0xff) << 16) | BIT(8) | 0x18), \
- PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_LO), \
- PARAM_SET(SET_PHY_INDEX_HI, (((_data16 >> 8) & 0xff) << 16) | BIT(8) | 0x19), \
- PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_HI), \
- PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE(0xf0, 0)
-
-#define PARAM_INDIRECT_WRITE(_gpio, _addr, _data_800) \
- PARAM_WRITE(0xf0, _gpio), \
- PARAM_WRITE_800_80C_POLL(_addr, _data_800), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE(0xf0, 0)
-
-#define PARAM_INDIRECT_POLL(_gpio, _addr, _expected, _mask) \
- PARAM_WRITE(0xf0, _gpio), \
- PARAM_WRITE_800_80C_POLL(_addr, 0), \
- PARAM_WRITE(0xd0, 0x00000808), \
- PARAM_POLL(0xd4, _expected, _mask), \
- PARAM_WRITE(0xf0, 0)
-
-struct ufs_renesas_init_param {
- enum ufs_renesas_init_param_mode mode;
- u32 reg;
- union {
- u32 expected;
- u32 delay_us;
- u32 set;
- u32 val;
- } u;
- u32 mask;
- u32 index;
-};
+static void ufs_renesas_poll(struct ufs_hba *hba, u32 reg, u32 expected, u32 mask)
+{
+ int ret;
+ u32 val;
-/* This setting is for SERIES B */
-static const struct ufs_renesas_init_param ufs_param[] = {
- PARAM_WRITE(0xc0, 0x49425308),
- PARAM_WRITE_D0_D4(0x00000104, 0x00000002),
- PARAM_WAIT(1),
- PARAM_WRITE_D0_D4(0x00000828, 0x00000200),
- PARAM_WAIT(1),
- PARAM_WRITE_D0_D4(0x00000828, 0x00000000),
- PARAM_WRITE_D0_D4(0x00000104, 0x00000001),
- PARAM_WRITE_D0_D4(0x00000940, 0x00000001),
- PARAM_WAIT(1),
- PARAM_WRITE_D0_D4(0x00000940, 0x00000000),
-
- PARAM_WRITE(0xc0, 0x49425308),
- PARAM_WRITE(0xc0, 0x41584901),
-
- PARAM_WRITE_D0_D4(0x0000080c, 0x00000100),
- PARAM_WRITE_D0_D4(0x00000804, 0x00000000),
- PARAM_WRITE(0xd0, 0x0000080c),
- PARAM_POLL(0xd4, BIT(8), BIT(8)),
-
- PARAM_WRITE(REG_CONTROLLER_ENABLE, 0x00000001),
-
- PARAM_WRITE(0xd0, 0x00000804),
- PARAM_POLL(0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)),
-
- PARAM_WRITE(0xd0, 0x00000d00),
- PARAM_SAVE(0xd4, 0x0000ffff, TIMER_INDEX),
- PARAM_WRITE(0xd4, 0x00000000),
- PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000),
- PARAM_WRITE_D0_D4(0x00000828, 0x08000000),
- PARAM_WRITE(0xd0, 0x0000082c),
- PARAM_POLL(0xd4, BIT(27), BIT(27)),
- PARAM_WRITE(0xd0, 0x00000d2c),
- PARAM_POLL(0xd4, BIT(0), BIT(0)),
+ ret = readl_poll_timeout_atomic(hba->mmio_base + reg,
+ val, (val & mask) == expected,
+ 10, 1000);
+ if (ret)
+ dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n",
+ __func__, ret, val, mask, expected);
+}
- /* phy setup */
- PARAM_INDIRECT_WRITE(1, 0x01, 0x001f),
- PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x5e, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x0d, 0x0003),
- PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
- PARAM_INDIRECT_WRITE(7, 0x5f, 0x0003),
- PARAM_INDIRECT_WRITE(7, 0x60, 0x0003),
- PARAM_INDIRECT_WRITE(7, 0x5b, 0x00a6),
- PARAM_INDIRECT_WRITE(7, 0x5c, 0x0003),
-
- PARAM_INDIRECT_POLL(7, 0x3c, 0, BIT(7)),
- PARAM_INDIRECT_POLL(7, 0x4c, 0, BIT(4)),
-
- PARAM_INDIRECT_WRITE(1, 0x32, 0x0080),
- PARAM_INDIRECT_WRITE(1, 0x1f, 0x0001),
- PARAM_INDIRECT_WRITE(0, 0x2c, 0x0001),
- PARAM_INDIRECT_WRITE(0, 0x32, 0x0087),
-
- PARAM_INDIRECT_WRITE(1, 0x4d, 0x0061),
- PARAM_INDIRECT_WRITE(4, 0x9b, 0x0009),
- PARAM_INDIRECT_WRITE(4, 0xa6, 0x0005),
- PARAM_INDIRECT_WRITE(4, 0xa5, 0x0058),
- PARAM_INDIRECT_WRITE(1, 0x39, 0x0027),
- PARAM_INDIRECT_WRITE(1, 0x47, 0x004c),
-
- PARAM_INDIRECT_WRITE(7, 0x0d, 0x0002),
- PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
-
- PARAM_WRITE_PHY(0x0028, 0x0061),
- PARAM_WRITE_PHY(0x4014, 0x0061),
- PARAM_SET_PHY(0x401c, BIT(2)),
- PARAM_WRITE_PHY(0x4000, 0x0000),
- PARAM_WRITE_PHY(0x4001, 0x0000),
-
- PARAM_WRITE_PHY(0x10ae, 0x0001),
- PARAM_WRITE_PHY(0x10ad, 0x0000),
- PARAM_WRITE_PHY(0x10af, 0x0001),
- PARAM_WRITE_PHY(0x10b6, 0x0001),
- PARAM_WRITE_PHY(0x10ae, 0x0000),
-
- PARAM_WRITE_PHY(0x10ae, 0x0001),
- PARAM_WRITE_PHY(0x10ad, 0x0000),
- PARAM_WRITE_PHY(0x10af, 0x0002),
- PARAM_WRITE_PHY(0x10b6, 0x0001),
- PARAM_WRITE_PHY(0x10ae, 0x0000),
-
- PARAM_WRITE_PHY(0x10ae, 0x0001),
- PARAM_WRITE_PHY(0x10ad, 0x0080),
- PARAM_WRITE_PHY(0x10af, 0x0000),
- PARAM_WRITE_PHY(0x10b6, 0x0001),
- PARAM_WRITE_PHY(0x10ae, 0x0000),
-
- PARAM_WRITE_PHY(0x10ae, 0x0001),
- PARAM_WRITE_PHY(0x10ad, 0x0080),
- PARAM_WRITE_PHY(0x10af, 0x001a),
- PARAM_WRITE_PHY(0x10b6, 0x0001),
- PARAM_WRITE_PHY(0x10ae, 0x0000),
-
- PARAM_INDIRECT_WRITE(7, 0x70, 0x0016),
- PARAM_INDIRECT_WRITE(7, 0x71, 0x0016),
- PARAM_INDIRECT_WRITE(7, 0x72, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x73, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x74, 0x0000),
- PARAM_INDIRECT_WRITE(7, 0x75, 0x0000),
- PARAM_INDIRECT_WRITE(7, 0x76, 0x0010),
- PARAM_INDIRECT_WRITE(7, 0x77, 0x0010),
- PARAM_INDIRECT_WRITE(7, 0x78, 0x00ff),
- PARAM_INDIRECT_WRITE(7, 0x79, 0x0000),
-
- PARAM_INDIRECT_WRITE(7, 0x19, 0x0007),
-
- PARAM_INDIRECT_WRITE(7, 0x1a, 0x0007),
-
- PARAM_INDIRECT_WRITE(7, 0x24, 0x000c),
-
- PARAM_INDIRECT_WRITE(7, 0x25, 0x000c),
-
- PARAM_INDIRECT_WRITE(7, 0x62, 0x0000),
- PARAM_INDIRECT_WRITE(7, 0x63, 0x0000),
- PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
- PARAM_INDIRECT_WRITE(7, 0x5d, 0x0004),
- PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
- PARAM_INDIRECT_POLL(7, 0x55, 0, BIT(6)),
- PARAM_INDIRECT_POLL(7, 0x41, 0, BIT(7)),
- /* end of phy setup */
+static u32 ufs_renesas_read(struct ufs_hba *hba, u32 reg)
+{
+ return ufshcd_readl(hba, reg);
+}
- PARAM_WRITE(0xf0, 0),
- PARAM_WRITE(0xd0, 0x00000d00),
- PARAM_RESTORE(0xd4, TIMER_INDEX),
-};
+static void ufs_renesas_write(struct ufs_hba *hba, u32 reg, u32 value)
+{
+ ufshcd_writel(hba, value, reg);
+}
-static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba)
+static void ufs_renesas_write_d0_d4(struct ufs_hba *hba, u32 data_d0, u32 data_d4)
{
- ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + ");
+ ufs_renesas_write(hba, 0xd0, data_d0);
+ ufs_renesas_write(hba, 0xd4, data_d4);
}
-static void ufs_renesas_reg_control(struct ufs_hba *hba,
- const struct ufs_renesas_init_param *p)
+static void ufs_renesas_write_800_80c_poll(struct ufs_hba *hba, u32 addr,
+ u32 data_800)
{
- static u32 save[MAX_INDEX];
- int ret;
- u32 val;
+ ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100);
+ ufs_renesas_write_d0_d4(hba, 0x00000800, (data_800 << 16) | BIT(8) | addr);
+ ufs_renesas_write(hba, 0xd0, 0x0000080c);
+ ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8));
+}
+
+static void ufs_renesas_write_804_80c_poll(struct ufs_hba *hba, u32 addr, u32 data_804)
+{
+ ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100);
+ ufs_renesas_write_d0_d4(hba, 0x00000804, (data_804 << 16) | BIT(8) | addr);
+ ufs_renesas_write(hba, 0xd0, 0x0000080c);
+ ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8));
+}
+
+static void ufs_renesas_write_828_82c_poll(struct ufs_hba *hba, u32 data_828)
+{
+ ufs_renesas_write_d0_d4(hba, 0x0000082c, 0x0f000000);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, data_828);
+ ufs_renesas_write(hba, 0xd0, 0x0000082c);
+ ufs_renesas_poll(hba, 0xd4, data_828, data_828);
+}
+
+static void ufs_renesas_write_phy(struct ufs_hba *hba, u32 addr16, u32 data16)
+{
+ ufs_renesas_write(hba, 0xf0, 1);
+ ufs_renesas_write_800_80c_poll(hba, 0x16, addr16 & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x17, (addr16 >> 8) & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x18, data16 & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x19, (data16 >> 8) & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x1c, 0x01);
+ ufs_renesas_write_828_82c_poll(hba, 0x0f000000);
+ ufs_renesas_write(hba, 0xf0, 0);
+}
+
+static void ufs_renesas_set_phy(struct ufs_hba *hba, u32 addr16, u32 data16)
+{
+ u32 low, high;
+
+ ufs_renesas_write(hba, 0xf0, 1);
+ ufs_renesas_write_800_80c_poll(hba, 0x16, addr16 & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x17, (addr16 >> 8) & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x1c, 0x01);
+ ufs_renesas_write_828_82c_poll(hba, 0x0f000000);
+ ufs_renesas_write_804_80c_poll(hba, 0x1a, 0);
+ ufs_renesas_write(hba, 0xd0, 0x00000808);
+ low = ufs_renesas_read(hba, 0xd4) & 0xff;
+ ufs_renesas_write_804_80c_poll(hba, 0x1b, 0);
+ ufs_renesas_write(hba, 0xd0, 0x00000808);
+ high = ufs_renesas_read(hba, 0xd4) & 0xff;
+ ufs_renesas_write_828_82c_poll(hba, 0x0f000000);
+ ufs_renesas_write(hba, 0xf0, 0);
+
+ data16 |= (high << 8) | low;
+ ufs_renesas_write_phy(hba, addr16, data16);
+}
+
+static void ufs_renesas_reset_indirect_write(struct ufs_hba *hba, int gpio,
+ u32 addr, u32 data)
+{
+ ufs_renesas_write(hba, 0xf0, gpio);
+ ufs_renesas_write_800_80c_poll(hba, addr, data);
+}
+
+static void ufs_renesas_reset_indirect_update(struct ufs_hba *hba)
+{
+ ufs_renesas_write_d0_d4(hba, 0x0000082c, 0x0f000000);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, 0x0f000000);
+ ufs_renesas_write(hba, 0xd0, 0x0000082c);
+ ufs_renesas_poll(hba, 0xd4, BIT(27) | BIT(26) | BIT(24), BIT(27) | BIT(26) | BIT(24));
+ ufs_renesas_write(hba, 0xf0, 0);
+}
+
+static void ufs_renesas_indirect_write(struct ufs_hba *hba, u32 gpio, u32 addr,
+ u32 data_800)
+{
+ ufs_renesas_write(hba, 0xf0, gpio);
+ ufs_renesas_write_800_80c_poll(hba, addr, data_800);
+ ufs_renesas_write_828_82c_poll(hba, 0x0f000000);
+ ufs_renesas_write(hba, 0xf0, 0);
+}
+
+static void ufs_renesas_indirect_poll(struct ufs_hba *hba, u32 gpio, u32 addr,
+ u32 expected, u32 mask)
+{
+ ufs_renesas_write(hba, 0xf0, gpio);
+ ufs_renesas_write_800_80c_poll(hba, addr, 0);
+ ufs_renesas_write(hba, 0xd0, 0x00000808);
+ ufs_renesas_poll(hba, 0xd4, expected, mask);
+ ufs_renesas_write(hba, 0xf0, 0);
+}
+
+static void ufs_renesas_init_step1_to_3(struct ufs_hba *hba, bool init108)
+{
+ ufs_renesas_write(hba, 0xc0, 0x49425308);
+ ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000002);
+ if (init108)
+ ufs_renesas_write_d0_d4(hba, 0x00000108, 0x00000002);
+ udelay(1);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, 0x00000200);
+ udelay(1);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, 0x00000000);
+ ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000001);
+ if (init108)
+ ufs_renesas_write_d0_d4(hba, 0x00000108, 0x00000001);
+ ufs_renesas_write_d0_d4(hba, 0x00000940, 0x00000001);
+ udelay(1);
+ ufs_renesas_write_d0_d4(hba, 0x00000940, 0x00000000);
+
+ ufs_renesas_write(hba, 0xc0, 0x49425308);
+ ufs_renesas_write(hba, 0xc0, 0x41584901);
+}
+
+static void ufs_renesas_init_step4_to_6(struct ufs_hba *hba)
+{
+ ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100);
+ ufs_renesas_write_d0_d4(hba, 0x00000804, 0x00000000);
+ ufs_renesas_write(hba, 0xd0, 0x0000080c);
+ ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8));
- WARN_ON(p->index >= MAX_INDEX);
-
- switch (p->mode) {
- case MODE_RESTORE:
- ufshcd_writel(hba, save[p->index], p->reg);
- break;
- case MODE_SET:
- save[p->index] |= p->u.set;
- break;
- case MODE_SAVE:
- save[p->index] = ufshcd_readl(hba, p->reg) & p->mask;
- break;
- case MODE_POLL:
- ret = readl_poll_timeout_atomic(hba->mmio_base + p->reg,
- val,
- (val & p->mask) == p->u.expected,
- 10, 1000);
- if (ret)
- dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n",
- __func__, ret, val, p->mask, p->u.expected);
- break;
- case MODE_WAIT:
- if (p->u.delay_us > 1000)
- mdelay(DIV_ROUND_UP(p->u.delay_us, 1000));
- else
- udelay(p->u.delay_us);
- break;
- case MODE_WRITE:
- ufshcd_writel(hba, p->u.val, p->reg);
- break;
- default:
- break;
+ ufs_renesas_write(hba, REG_CONTROLLER_ENABLE, 0x00000001);
+
+ ufs_renesas_write(hba, 0xd0, 0x00000804);
+ ufs_renesas_poll(hba, 0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0));
+}
+
+static u32 ufs_renesas_init_disable_timer(struct ufs_hba *hba)
+{
+ u32 timer_val;
+
+ ufs_renesas_write(hba, 0xd0, 0x00000d00);
+ timer_val = ufs_renesas_read(hba, 0xd4) & 0x0000ffff;
+ ufs_renesas_write(hba, 0xd4, 0x00000000);
+ ufs_renesas_write_d0_d4(hba, 0x0000082c, 0x0f000000);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, 0x08000000);
+ ufs_renesas_write(hba, 0xd0, 0x0000082c);
+ ufs_renesas_poll(hba, 0xd4, BIT(27), BIT(27));
+ ufs_renesas_write(hba, 0xd0, 0x00000d2c);
+ ufs_renesas_poll(hba, 0xd4, BIT(0), BIT(0));
+
+ return timer_val;
+}
+
+static void ufs_renesas_init_enable_timer(struct ufs_hba *hba, u32 timer_val)
+{
+ ufs_renesas_write(hba, 0xf0, 0);
+ ufs_renesas_write(hba, 0xd0, 0x00000d00);
+ ufs_renesas_write(hba, 0xd4, timer_val);
+}
+
+static void ufs_renesas_write_phy_10ad_10af(struct ufs_hba *hba,
+ u32 data_10ad, u32 data_10af)
+{
+ ufs_renesas_write_phy(hba, 0x10ae, 0x0001);
+ ufs_renesas_write_phy(hba, 0x10ad, data_10ad);
+ ufs_renesas_write_phy(hba, 0x10af, data_10af);
+ ufs_renesas_write_phy(hba, 0x10b6, 0x0001);
+ ufs_renesas_write_phy(hba, 0x10ae, 0x0000);
+}
+
+static void ufs_renesas_init_compensation_and_slicers(struct ufs_hba *hba)
+{
+ ufs_renesas_write_phy_10ad_10af(hba, 0x0000, 0x0001);
+ ufs_renesas_write_phy_10ad_10af(hba, 0x0000, 0x0002);
+ ufs_renesas_write_phy_10ad_10af(hba, 0x0080, 0x0000);
+ ufs_renesas_write_phy_10ad_10af(hba, 0x0080, 0x001a);
+}
+
+static void ufs_renesas_r8a779f0_es10_pre_init(struct ufs_hba *hba)
+{
+ u32 timer_val;
+
+ /* This setting is for SERIES B */
+ ufs_renesas_init_step1_to_3(hba, false);
+
+ ufs_renesas_init_step4_to_6(hba);
+
+ timer_val = ufs_renesas_init_disable_timer(hba);
+
+ /* phy setup */
+ ufs_renesas_indirect_write(hba, 1, 0x01, 0x001f);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0003);
+ ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007);
+ ufs_renesas_indirect_write(hba, 7, 0x5f, 0x0003);
+ ufs_renesas_indirect_write(hba, 7, 0x60, 0x0003);
+ ufs_renesas_indirect_write(hba, 7, 0x5b, 0x00a6);
+ ufs_renesas_indirect_write(hba, 7, 0x5c, 0x0003);
+
+ ufs_renesas_indirect_poll(hba, 7, 0x3c, 0, BIT(7));
+ ufs_renesas_indirect_poll(hba, 7, 0x4c, 0, BIT(4));
+
+ ufs_renesas_indirect_write(hba, 1, 0x32, 0x0080);
+ ufs_renesas_indirect_write(hba, 1, 0x1f, 0x0001);
+ ufs_renesas_indirect_write(hba, 0, 0x2c, 0x0001);
+ ufs_renesas_indirect_write(hba, 0, 0x32, 0x0087);
+
+ ufs_renesas_indirect_write(hba, 1, 0x4d, 0x0061);
+ ufs_renesas_indirect_write(hba, 4, 0x9b, 0x0009);
+ ufs_renesas_indirect_write(hba, 4, 0xa6, 0x0005);
+ ufs_renesas_indirect_write(hba, 4, 0xa5, 0x0058);
+ ufs_renesas_indirect_write(hba, 1, 0x39, 0x0027);
+ ufs_renesas_indirect_write(hba, 1, 0x47, 0x004c);
+
+ ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0002);
+ ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007);
+
+ ufs_renesas_write_phy(hba, 0x0028, 0x0061);
+ ufs_renesas_write_phy(hba, 0x4014, 0x0061);
+ ufs_renesas_set_phy(hba, 0x401c, BIT(2));
+ ufs_renesas_write_phy(hba, 0x4000, 0x0000);
+ ufs_renesas_write_phy(hba, 0x4001, 0x0000);
+
+ ufs_renesas_init_compensation_and_slicers(hba);
+
+ ufs_renesas_indirect_write(hba, 7, 0x70, 0x0016);
+ ufs_renesas_indirect_write(hba, 7, 0x71, 0x0016);
+ ufs_renesas_indirect_write(hba, 7, 0x72, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x73, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x74, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x75, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x76, 0x0010);
+ ufs_renesas_indirect_write(hba, 7, 0x77, 0x0010);
+ ufs_renesas_indirect_write(hba, 7, 0x78, 0x00ff);
+ ufs_renesas_indirect_write(hba, 7, 0x79, 0x0000);
+
+ ufs_renesas_indirect_write(hba, 7, 0x19, 0x0007);
+ ufs_renesas_indirect_write(hba, 7, 0x1a, 0x0007);
+ ufs_renesas_indirect_write(hba, 7, 0x24, 0x000c);
+ ufs_renesas_indirect_write(hba, 7, 0x25, 0x000c);
+ ufs_renesas_indirect_write(hba, 7, 0x62, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x63, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0004);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017);
+ ufs_renesas_indirect_poll(hba, 7, 0x55, 0, BIT(6));
+ ufs_renesas_indirect_poll(hba, 7, 0x41, 0, BIT(7));
+ /* end of phy setup */
+
+ ufs_renesas_init_enable_timer(hba, timer_val);
+}
+
+static void ufs_renesas_r8a779f0_init_step3_add(struct ufs_hba *hba, bool assert)
+{
+ u32 val_2x = 0, val_3x = 0, val_4x = 0;
+
+ if (assert) {
+ val_2x = 0x0001;
+ val_3x = 0x0003;
+ val_4x = 0x0001;
}
+
+ ufs_renesas_reset_indirect_write(hba, 7, 0x20, val_2x);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x4a, val_4x);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x35, val_3x);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x21, val_2x);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x4b, val_4x);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x36, val_3x);
+ ufs_renesas_reset_indirect_update(hba);
}
-static void ufs_renesas_pre_init(struct ufs_hba *hba)
+static void ufs_renesas_r8a779f0_pre_init(struct ufs_hba *hba)
{
- const struct ufs_renesas_init_param *p = ufs_param;
- unsigned int i;
+ struct ufs_renesas_priv *priv = ufshcd_get_variant(hba);
+ u32 timer_val;
+ u32 data;
+ int i;
+
+ /* This setting is for SERIES B */
+ ufs_renesas_init_step1_to_3(hba, true);
+
+ ufs_renesas_r8a779f0_init_step3_add(hba, true);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x5f, 0x0063);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x60, 0x0003);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x5b, 0x00a6);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x5c, 0x0003);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_r8a779f0_init_step3_add(hba, false);
+
+ ufs_renesas_init_step4_to_6(hba);
+
+ timer_val = ufs_renesas_init_disable_timer(hba);
+
+ ufs_renesas_indirect_write(hba, 1, 0x01, 0x001f);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0007);
+ ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007);
+
+ ufs_renesas_indirect_poll(hba, 7, 0x3c, 0, BIT(7));
+ ufs_renesas_indirect_poll(hba, 7, 0x4c, 0, BIT(4));
+
+ ufs_renesas_indirect_write(hba, 1, 0x32, 0x0080);
+ ufs_renesas_indirect_write(hba, 1, 0x1f, 0x0001);
+ ufs_renesas_indirect_write(hba, 1, 0x2c, 0x0001);
+ ufs_renesas_indirect_write(hba, 1, 0x32, 0x0087);
+
+ ufs_renesas_indirect_write(hba, 1, 0x4d, priv->calib[2]);
+ ufs_renesas_indirect_write(hba, 1, 0x4e, priv->calib[3]);
+ ufs_renesas_indirect_write(hba, 1, 0x0d, 0x0006);
+ ufs_renesas_indirect_write(hba, 1, 0x0e, 0x0007);
+ ufs_renesas_write_phy(hba, 0x0028, priv->calib[3]);
+ ufs_renesas_write_phy(hba, 0x4014, priv->calib[3]);
+
+ ufs_renesas_set_phy(hba, 0x401c, BIT(2));
+
+ ufs_renesas_write_phy(hba, 0x4000, priv->calib[6]);
+ ufs_renesas_write_phy(hba, 0x4001, priv->calib[7]);
+
+ ufs_renesas_indirect_write(hba, 1, 0x14, 0x0001);
+
+ ufs_renesas_init_compensation_and_slicers(hba);
+
+ ufs_renesas_indirect_write(hba, 7, 0x79, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x24, 0x000c);
+ ufs_renesas_indirect_write(hba, 7, 0x25, 0x000c);
+ ufs_renesas_indirect_write(hba, 7, 0x62, 0x00c0);
+ ufs_renesas_indirect_write(hba, 7, 0x63, 0x0001);
+
+ for (i = 0; i < priv->fw->size / 2; i++) {
+ data = (priv->fw->data[i * 2 + 1] << 8) | priv->fw->data[i * 2];
+ ufs_renesas_write_phy(hba, 0xc000 + i, data);
+ }
- for (i = 0; i < ARRAY_SIZE(ufs_param); i++)
- ufs_renesas_reg_control(hba, &p[i]);
+ ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0002);
+ ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007);
+
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0004);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017);
+ ufs_renesas_indirect_poll(hba, 7, 0x55, 0, BIT(6));
+ ufs_renesas_indirect_poll(hba, 7, 0x41, 0, BIT(7));
+
+ ufs_renesas_init_enable_timer(hba, timer_val);
}
static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba,
@@ -338,7 +422,7 @@ static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba,
return 0;
if (status == PRE_CHANGE)
- ufs_renesas_pre_init(hba);
+ priv->pre_init(hba);
priv->initialized = true;
@@ -356,20 +440,78 @@ static int ufs_renesas_setup_clocks(struct ufs_hba *hba, bool on,
return 0;
}
+static const struct soc_device_attribute ufs_fallback[] = {
+ { .soc_id = "r8a779f0", .revision = "ES1.[01]" },
+ { /* Sentinel */ }
+};
+
static int ufs_renesas_init(struct ufs_hba *hba)
{
+ const struct soc_device_attribute *attr;
+ struct nvmem_cell *cell = NULL;
+ struct device *dev = hba->dev;
struct ufs_renesas_priv *priv;
+ u8 *data = NULL;
+ size_t len;
+ int ret;
- priv = devm_kzalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ufshcd_set_variant(hba, priv);
hba->quirks |= UFSHCD_QUIRK_HIBERN_FASTAUTO;
+ attr = soc_device_match(ufs_fallback);
+ if (attr)
+ goto fallback;
+
+ ret = request_firmware(&priv->fw, UFS_RENESAS_FIRMWARE_NAME, dev);
+ if (ret) {
+ dev_warn(dev, "Failed to load firmware\n");
+ goto fallback;
+ }
+
+ cell = nvmem_cell_get(dev, "calibration");
+ if (IS_ERR(cell)) {
+ dev_warn(dev, "No calibration data specified\n");
+ goto fallback;
+ }
+
+ data = nvmem_cell_read(cell, &len);
+ if (IS_ERR(data)) {
+ dev_warn(dev, "Failed to read calibration data: %pe\n", data);
+ goto fallback;
+ }
+
+ if (len != EFUSE_CALIB_SIZE) {
+ dev_warn(dev, "Invalid calibration data size %zu\n", len);
+ goto fallback;
+ }
+
+ memcpy(priv->calib, data, EFUSE_CALIB_SIZE);
+ priv->pre_init = ufs_renesas_r8a779f0_pre_init;
+ goto out;
+
+fallback:
+ dev_info(dev, "Using ES1.0 init code\n");
+ priv->pre_init = ufs_renesas_r8a779f0_es10_pre_init;
+
+out:
+ kfree(data);
+ if (!IS_ERR_OR_NULL(cell))
+ nvmem_cell_put(cell);
+
return 0;
}
+static void ufs_renesas_exit(struct ufs_hba *hba)
+{
+ struct ufs_renesas_priv *priv = ufshcd_get_variant(hba);
+
+ release_firmware(priv->fw);
+}
+
static int ufs_renesas_set_dma_mask(struct ufs_hba *hba)
{
return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
@@ -378,6 +520,7 @@ static int ufs_renesas_set_dma_mask(struct ufs_hba *hba)
static const struct ufs_hba_variant_ops ufs_renesas_vops = {
.name = "renesas",
.init = ufs_renesas_init,
+ .exit = ufs_renesas_exit,
.set_dma_mask = ufs_renesas_set_dma_mask,
.setup_clocks = ufs_renesas_setup_clocks,
.hce_enable_notify = ufs_renesas_hce_enable_notify,
diff --git a/drivers/ufs/host/ufs-rockchip.c b/drivers/ufs/host/ufs-rockchip.c
new file mode 100644
index 000000000000..8754085dd0cc
--- /dev/null
+++ b/drivers/ufs/host/ufs-rockchip.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip UFS Host Controller driver
+ *
+ * Copyright (C) 2025 Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_wakeup.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <ufs/ufshcd.h>
+#include <ufs/unipro.h>
+#include "ufshcd-pltfrm.h"
+#include "ufs-rockchip.h"
+
+static int ufs_rockchip_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+
+ if (status == POST_CHANGE) {
+ err = ufshcd_dme_reset(hba);
+ if (err)
+ return err;
+
+ err = ufshcd_dme_enable(hba);
+ if (err)
+ return err;
+
+ return ufshcd_vops_phy_initialization(hba);
+ }
+
+ return 0;
+}
+
+static void ufs_rockchip_set_pm_lvl(struct ufs_hba *hba)
+{
+ hba->rpm_lvl = UFS_PM_LVL_5;
+ hba->spm_lvl = UFS_PM_LVL_5;
+}
+
+static int ufs_rockchip_rk3576_phy_init(struct ufs_hba *hba)
+{
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(PA_LOCAL_TX_LCC_ENABLE, 0x0), 0x0);
+ /* enable the mphy DME_SET cfg */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MPHY_CFG, 0x0), MPHY_CFG_ENABLE);
+ for (int i = 0; i < 2; i++) {
+ /* Configuration M - TX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, SEL_TX_LANE0 + i), 0x06);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, SEL_TX_LANE0 + i), 0x02);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_VALUE, SEL_TX_LANE0 + i), 0x44);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, SEL_TX_LANE0 + i), 0xe6);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, SEL_TX_LANE0 + i), 0x07);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_TASE_VALUE, SEL_TX_LANE0 + i), 0x93);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_BASE_NVALUE, SEL_TX_LANE0 + i), 0xc9);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_POWER_SAVING_CTRL, SEL_TX_LANE0 + i), 0x00);
+ /* Configuration M - RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, SEL_RX_LANE0 + i), 0x06);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, SEL_RX_LANE0 + i), 0x00);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE, SEL_RX_LANE0 + i), 0x58);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_PVALUE1, SEL_RX_LANE0 + i), 0x8c);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_PVALUE2, SEL_RX_LANE0 + i), 0x02);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_OPTION, SEL_RX_LANE0 + i), 0xf6);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_POWER_SAVING_CTRL, SEL_RX_LANE0 + i), 0x69);
+ }
+
+ /* disable the mphy DME_SET cfg */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MPHY_CFG, 0x0), MPHY_CFG_DISABLE);
+
+ ufs_sys_writel(host->mphy_base, 0x80, CMN_REG23);
+ ufs_sys_writel(host->mphy_base, 0xB5, TRSV0_REG14);
+ ufs_sys_writel(host->mphy_base, 0xB5, TRSV1_REG14);
+
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV0_REG15);
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV1_REG15);
+
+ ufs_sys_writel(host->mphy_base, 0x38, TRSV0_REG08);
+ ufs_sys_writel(host->mphy_base, 0x38, TRSV1_REG08);
+
+ ufs_sys_writel(host->mphy_base, 0x50, TRSV0_REG29);
+ ufs_sys_writel(host->mphy_base, 0x50, TRSV1_REG29);
+
+ ufs_sys_writel(host->mphy_base, 0x80, TRSV0_REG2E);
+ ufs_sys_writel(host->mphy_base, 0x80, TRSV1_REG2E);
+
+ ufs_sys_writel(host->mphy_base, 0x18, TRSV0_REG3C);
+ ufs_sys_writel(host->mphy_base, 0x18, TRSV1_REG3C);
+
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV0_REG16);
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV1_REG16);
+
+ ufs_sys_writel(host->mphy_base, 0x20, TRSV0_REG17);
+ ufs_sys_writel(host->mphy_base, 0x20, TRSV1_REG17);
+
+ ufs_sys_writel(host->mphy_base, 0xC0, TRSV0_REG18);
+ ufs_sys_writel(host->mphy_base, 0xC0, TRSV1_REG18);
+
+ ufs_sys_writel(host->mphy_base, 0x03, CMN_REG25);
+
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV0_REG3D);
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV1_REG3D);
+
+ ufs_sys_writel(host->mphy_base, 0xC0, CMN_REG23);
+ udelay(1);
+ ufs_sys_writel(host->mphy_base, 0x00, CMN_REG23);
+
+ usleep_range(200, 250);
+ /* start link up */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MIB_T_DBG_CPORT_TX_ENDIAN, 0), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MIB_T_DBG_CPORT_RX_ENDIAN, 0), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(N_DEVICEID, 0), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(N_DEVICEID_VALID, 0), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(T_PEERDEVICEID, 0), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(T_CONNECTIONSTATE, 0), 0x1);
+
+ return 0;
+}
+
+static int ufs_rockchip_common_init(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ufs_rockchip_host *host;
+ int err;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->ufs_sys_ctrl = devm_platform_ioremap_resource_byname(pdev, "hci_grf");
+ if (IS_ERR(host->ufs_sys_ctrl))
+ return dev_err_probe(dev, PTR_ERR(host->ufs_sys_ctrl),
+ "Failed to map HCI system control registers\n");
+
+ host->ufs_phy_ctrl = devm_platform_ioremap_resource_byname(pdev, "mphy_grf");
+ if (IS_ERR(host->ufs_phy_ctrl))
+ return dev_err_probe(dev, PTR_ERR(host->ufs_phy_ctrl),
+ "Failed to map mphy system control registers\n");
+
+ host->mphy_base = devm_platform_ioremap_resource_byname(pdev, "mphy");
+ if (IS_ERR(host->mphy_base))
+ return dev_err_probe(dev, PTR_ERR(host->mphy_base),
+ "Failed to map mphy base registers\n");
+
+ host->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(host->rst))
+ return dev_err_probe(dev, PTR_ERR(host->rst),
+ "failed to get reset control\n");
+
+ reset_control_assert(host->rst);
+ udelay(1);
+ reset_control_deassert(host->rst);
+
+ host->ref_out_clk = devm_clk_get_enabled(dev, "ref_out");
+ if (IS_ERR(host->ref_out_clk))
+ return dev_err_probe(dev, PTR_ERR(host->ref_out_clk),
+ "ref_out clock unavailable\n");
+
+ host->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(host->rst_gpio))
+ return dev_err_probe(dev, PTR_ERR(host->rst_gpio),
+ "failed to get reset gpio\n");
+
+ err = devm_clk_bulk_get_all_enabled(dev, &host->clks);
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to enable clocks\n");
+
+ host->hba = hba;
+
+ ufshcd_set_variant(hba, host);
+
+ return 0;
+}
+
+static int ufs_rockchip_rk3576_init(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ int ret;
+
+ hba->quirks = UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING;
+
+ /* Enable BKOPS when suspend */
+ hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+ /* Enable putting device into deep sleep */
+ hba->caps |= UFSHCD_CAP_DEEPSLEEP;
+ /* Enable devfreq of UFS */
+ hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ /* Enable WriteBooster */
+ hba->caps |= UFSHCD_CAP_WB_EN;
+
+ /* Set the default desired pm level in case no users set via sysfs */
+ ufs_rockchip_set_pm_lvl(hba);
+
+ ret = ufs_rockchip_common_init(hba);
+ if (ret)
+ return dev_err_probe(dev, ret, "ufs common init fail\n");
+
+ return 0;
+}
+
+static int ufs_rockchip_device_reset(struct ufs_hba *hba)
+{
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+
+ gpiod_set_value_cansleep(host->rst_gpio, 1);
+ usleep_range(20, 25);
+
+ gpiod_set_value_cansleep(host->rst_gpio, 0);
+ usleep_range(20, 25);
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops ufs_hba_rk3576_vops = {
+ .name = "rk3576",
+ .init = ufs_rockchip_rk3576_init,
+ .device_reset = ufs_rockchip_device_reset,
+ .hce_enable_notify = ufs_rockchip_hce_enable_notify,
+ .phy_initialization = ufs_rockchip_rk3576_phy_init,
+};
+
+static const struct of_device_id ufs_rockchip_of_match[] = {
+ { .compatible = "rockchip,rk3576-ufshc", .data = &ufs_hba_rk3576_vops },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ufs_rockchip_of_match);
+
+static int ufs_rockchip_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct ufs_hba_variant_ops *vops;
+ int err;
+
+ vops = device_get_match_data(dev);
+ if (!vops)
+ return dev_err_probe(dev, -ENODATA, "ufs_hba_variant_ops not defined.\n");
+
+ err = ufshcd_pltfrm_init(pdev, vops);
+ if (err)
+ return dev_err_probe(dev, err, "ufshcd_pltfrm_init failed\n");
+
+ return 0;
+}
+
+static void ufs_rockchip_remove(struct platform_device *pdev)
+{
+ ufshcd_pltfrm_remove(pdev);
+}
+
+#ifdef CONFIG_PM
+static int ufs_rockchip_runtime_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+
+ clk_disable_unprepare(host->ref_out_clk);
+
+ /* Do not power down the genpd if rpm_lvl is less than level 5 */
+ dev_pm_genpd_rpm_always_on(dev, hba->rpm_lvl < UFS_PM_LVL_5);
+
+ return ufshcd_runtime_suspend(dev);
+}
+
+static int ufs_rockchip_runtime_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+ int err;
+
+ err = clk_prepare_enable(host->ref_out_clk);
+ if (err) {
+ dev_err(hba->dev, "failed to enable ref_out clock %d\n", err);
+ return err;
+ }
+
+ reset_control_assert(host->rst);
+ udelay(1);
+ reset_control_deassert(host->rst);
+
+ return ufshcd_runtime_resume(dev);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int ufs_rockchip_system_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+ int err;
+
+ /*
+ * If spm_lvl is less than level 5, it means we need to keep the host
+ * controller in powered-on state. So device_set_awake_path() is
+ * calling pm core to notify the genpd provider to meet this requirement
+ */
+ if (hba->spm_lvl < UFS_PM_LVL_5)
+ device_set_awake_path(dev);
+
+ err = ufshcd_system_suspend(dev);
+ if (err) {
+ dev_err(hba->dev, "UFSHCD system suspend failed %d\n", err);
+ return err;
+ }
+
+ clk_disable_unprepare(host->ref_out_clk);
+
+ return 0;
+}
+
+static int ufs_rockchip_system_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+ int err;
+
+ err = clk_prepare_enable(host->ref_out_clk);
+ if (err) {
+ dev_err(hba->dev, "failed to enable ref_out clock %d\n", err);
+ return err;
+ }
+
+ return ufshcd_system_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops ufs_rockchip_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufs_rockchip_system_suspend, ufs_rockchip_system_resume)
+ SET_RUNTIME_PM_OPS(ufs_rockchip_runtime_suspend, ufs_rockchip_runtime_resume, NULL)
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+};
+
+static struct platform_driver ufs_rockchip_pltform = {
+ .probe = ufs_rockchip_probe,
+ .remove = ufs_rockchip_remove,
+ .driver = {
+ .name = "ufshcd-rockchip",
+ .pm = &ufs_rockchip_pm_ops,
+ .of_match_table = ufs_rockchip_of_match,
+ },
+};
+module_platform_driver(ufs_rockchip_pltform);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Rockchip UFS Host Driver");
diff --git a/drivers/ufs/host/ufs-rockchip.h b/drivers/ufs/host/ufs-rockchip.h
new file mode 100644
index 000000000000..3ba6fb9f73ae
--- /dev/null
+++ b/drivers/ufs/host/ufs-rockchip.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Rockchip UFS Host Controller driver
+ *
+ * Copyright (C) 2025 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef _UFS_ROCKCHIP_H_
+#define _UFS_ROCKCHIP_H_
+
+#define SEL_TX_LANE0 0x0
+#define SEL_TX_LANE1 0x1
+#define SEL_TX_LANE2 0x2
+#define SEL_TX_LANE3 0x3
+#define SEL_RX_LANE0 0x4
+#define SEL_RX_LANE1 0x5
+#define SEL_RX_LANE2 0x6
+#define SEL_RX_LANE3 0x7
+
+#define VND_TX_CLK_PRD 0xAA
+#define VND_TX_CLK_PRD_EN 0xA9
+#define VND_TX_LINERESET_PVALUE2 0xAB
+#define VND_TX_LINERESET_PVALUE1 0xAC
+#define VND_TX_LINERESET_VALUE 0xAD
+#define VND_TX_BASE_NVALUE 0x93
+#define VND_TX_TASE_VALUE 0x94
+#define VND_TX_POWER_SAVING_CTRL 0x7F
+#define VND_RX_CLK_PRD 0x12
+#define VND_RX_CLK_PRD_EN 0x11
+#define VND_RX_LINERESET_PVALUE2 0x1B
+#define VND_RX_LINERESET_PVALUE1 0x1C
+#define VND_RX_LINERESET_VALUE 0x1D
+#define VND_RX_LINERESET_OPTION 0x25
+#define VND_RX_POWER_SAVING_CTRL 0x2F
+#define VND_RX_SAVE_DET_CTRL 0x1E
+
+#define CMN_REG23 0x8C
+#define CMN_REG25 0x94
+#define TRSV0_REG08 0xE0
+#define TRSV1_REG08 0x220
+#define TRSV0_REG14 0x110
+#define TRSV1_REG14 0x250
+#define TRSV0_REG15 0x134
+#define TRSV1_REG15 0x274
+#define TRSV0_REG16 0x128
+#define TRSV1_REG16 0x268
+#define TRSV0_REG17 0x12C
+#define TRSV1_REG17 0x26c
+#define TRSV0_REG18 0x120
+#define TRSV1_REG18 0x260
+#define TRSV0_REG29 0x164
+#define TRSV1_REG29 0x2A4
+#define TRSV0_REG2E 0x178
+#define TRSV1_REG2E 0x2B8
+#define TRSV0_REG3C 0x1B0
+#define TRSV1_REG3C 0x2F0
+#define TRSV0_REG3D 0x1B4
+#define TRSV1_REG3D 0x2F4
+
+#define MPHY_CFG 0x200
+#define MPHY_CFG_ENABLE 0x40
+#define MPHY_CFG_DISABLE 0x0
+
+#define MIB_T_DBG_CPORT_TX_ENDIAN 0xc022
+#define MIB_T_DBG_CPORT_RX_ENDIAN 0xc023
+
+struct ufs_rockchip_host {
+ struct ufs_hba *hba;
+ void __iomem *ufs_phy_ctrl;
+ void __iomem *ufs_sys_ctrl;
+ void __iomem *mphy_base;
+ struct gpio_desc *rst_gpio;
+ struct reset_control *rst;
+ struct clk *ref_out_clk;
+ struct clk_bulk_data *clks;
+ uint64_t caps;
+};
+
+#define ufs_sys_writel(base, val, reg) \
+ writel((val), (base) + (reg))
+#define ufs_sys_readl(base, reg) readl((base) + (reg))
+#define ufs_sys_set_bits(base, mask, reg) \
+ ufs_sys_writel( \
+ (base), ((mask) | (ufs_sys_readl((base), (reg)))), (reg))
+#define ufs_sys_ctrl_clr_bits(base, mask, reg) \
+ ufs_sys_writel((base), \
+ ((~(mask)) & (ufs_sys_readl((base), (reg)))), \
+ (reg))
+
+#endif /* _UFS_ROCKCHIP_H_ */
diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c
index b1d532363f9d..65bd8fb96b99 100644
--- a/drivers/ufs/host/ufs-sprd.c
+++ b/drivers/ufs/host/ufs-sprd.c
@@ -160,9 +160,9 @@ static int ufs_sprd_common_init(struct ufs_hba *hba)
}
static int sprd_ufs_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_sprd_host *host = ufshcd_get_variant(hba);
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 9cfcaad23cf9..996387906aa1 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -157,7 +157,7 @@ static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int err = 0;
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index c17516c29b63..a093544482d5 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -424,8 +424,7 @@ static enum hrtimer_restart ci_otg_hrtimer_func(struct hrtimer *t)
/* Initialize timers */
static int ci_otg_init_timers(struct ci_hdrc *ci)
{
- hrtimer_init(&ci->otg_fsm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ci->otg_fsm_hrtimer.function = ci_otg_hrtimer_func;
+ hrtimer_setup(&ci->otg_fsm_hrtimer, ci_otg_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
return 0;
}
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 238c6fd50e75..2a542a99ec44 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -1459,8 +1459,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
/* Initialize QH */
qh->hsotg = hsotg;
timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
- hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- qh->wait_timer.function = &dwc2_wait_timer_fn;
+ hrtimer_setup(&qh->wait_timer, &dwc2_wait_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
qh->ep_type = ep_type;
qh->ep_is_in = ep_is_in;
diff --git a/drivers/usb/fotg210/fotg210-hcd.c b/drivers/usb/fotg210/fotg210-hcd.c
index 3d404d19a205..64c4965a160f 100644
--- a/drivers/usb/fotg210/fotg210-hcd.c
+++ b/drivers/usb/fotg210/fotg210-hcd.c
@@ -4901,8 +4901,7 @@ static int hcd_fotg210_init(struct usb_hcd *hcd)
*/
fotg210->need_io_watchdog = 1;
- hrtimer_init(&fotg210->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- fotg210->hrtimer.function = fotg210_hrtimer_func;
+ hrtimer_setup(&fotg210->hrtimer, fotg210_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 2eae8fc2e0db..94d478b6bcd3 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2142,8 +2142,8 @@ static int do_scsi_command(struct fsg_common *common)
* of Posix locks.
*/
case FORMAT_UNIT:
- case RELEASE:
- case RESERVE:
+ case RELEASE_6:
+ case RESERVE_6:
case SEND_DIAGNOSTIC:
default:
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index f60576a65ca6..58b0dd575af3 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1559,8 +1559,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm->port.open = ncm_open;
ncm->port.close = ncm_close;
- hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- ncm->task_timer.function = ncm_tx_timeout;
+ hrtimer_setup(&ncm->task_timer, ncm_tx_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
ncm->port.in_ep->name, ncm->port.out_ep->name,
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index bda08c5ba7c0..4f1b5db51dda 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2479,8 +2479,7 @@ static DEVICE_ATTR_RO(urbs);
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
- hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- dum_hcd->timer.function = dummy_timer;
+ hrtimer_setup(&dum_hcd->timer, dummy_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
@@ -2509,8 +2508,7 @@ static int dummy_start(struct usb_hcd *hcd)
return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock);
- hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- dum_hcd->timer.function = dummy_timer;
+ hrtimer_setup(&dum_hcd->timer, dummy_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6de79ac5e6a4..6d1d190c914d 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -466,8 +466,7 @@ static int ehci_init(struct usb_hcd *hcd)
*/
ehci->need_io_watchdog = 1;
- hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ehci->hrtimer.function = ehci_hrtimer_func;
+ hrtimer_setup(&ehci->hrtimer, ehci_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 9589243e8951..4cde3abb7006 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -760,8 +760,8 @@ cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
if (!controller)
goto kzalloc_fail;
- hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- controller->early_tx.function = cppi41_recheck_tx_req;
+ hrtimer_setup(&controller->early_tx, cppi41_recheck_tx_req, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
INIT_LIST_HEAD(&controller->early_tx_list);
controller->controller.channel_alloc = cppi41_dma_channel_allocate;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index e07c5e3eb18c..9b34e23b7091 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1079,6 +1079,20 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
/* GMC devices */
{ USB_DEVICE(GMC_VID, GMC_Z216C_PID) },
+ /* Altera USB Blaster 3 */
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6022_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6025_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 3) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6029_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 3) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602C_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 5ee60ba2a73c..52be47d684ea 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1612,3 +1612,16 @@
*/
#define GMC_VID 0x1cd7
#define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */
+
+/*
+ * Altera USB Blaster 3 (http://www.altera.com).
+ */
+#define ALTERA_VID 0x09fb
+#define ALTERA_UB3_6022_PID 0x6022
+#define ALTERA_UB3_6025_PID 0x6025
+#define ALTERA_UB3_6026_PID 0x6026
+#define ALTERA_UB3_6029_PID 0x6029
+#define ALTERA_UB3_602A_PID 0x602a
+#define ALTERA_UB3_602C_PID 0x602c
+#define ALTERA_UB3_602D_PID 0x602d
+#define ALTERA_UB3_602E_PID 0x602e
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 58bd54e8c483..5cd26dac2069 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1368,13 +1368,13 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990A (PCIe) */
.driver_info = RSVD(0) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990A (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990 (MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990A (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990 (RNDIS) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990A (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990A (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
@@ -1388,28 +1388,44 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(3) | RSVD(4) | RSVD(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x30), /* Telit FE990B (rmnet) */
+ .driver_info = NCTRL(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x30), /* Telit FE990B (MBIM) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x30), /* Telit FE990B (RNDIS) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x30), /* Telit FE990B (ECM) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x60) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c0, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c4, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x60) }, /* Telit FN990B (rmnet) */
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x40) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x30),
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x30), /* Telit FN990B (rmnet) */
.driver_info = NCTRL(5) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x60) }, /* Telit FN990B (MBIM) */
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x40) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x30),
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30), /* Telit FN990B (MBIM) */
.driver_info = NCTRL(6) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x60) }, /* Telit FN990B (RNDIS) */
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x40) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x30),
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x30), /* Telit FN990B (RNDIS) */
.driver_info = NCTRL(6) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x60) }, /* Telit FN990B (ECM) */
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x40) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x30),
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x30), /* Telit FN990B (ECM) */
.driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x60) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
diff --git a/drivers/usb/storage/debug.c b/drivers/usb/storage/debug.c
index 576be66ad962..dda610f689b7 100644
--- a/drivers/usb/storage/debug.c
+++ b/drivers/usb/storage/debug.c
@@ -58,8 +58,8 @@ void usb_stor_show_command(const struct us_data *us, struct scsi_cmnd *srb)
case INQUIRY: what = "INQUIRY"; break;
case RECOVER_BUFFERED_DATA: what = "RECOVER_BUFFERED_DATA"; break;
case MODE_SELECT: what = "MODE_SELECT"; break;
- case RESERVE: what = "RESERVE"; break;
- case RELEASE: what = "RELEASE"; break;
+ case RESERVE_6: what = "RESERVE"; break;
+ case RELEASE_6: what = "RELEASE"; break;
case COPY: what = "COPY"; break;
case ERASE: what = "ERASE"; break;
case MODE_SENSE: what = "MODE_SENSE"; break;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 6bf1a22c785a..a99db4e025cd 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -5117,16 +5117,16 @@ static void run_state_machine(struct tcpm_port *port)
*/
if (port->vbus_never_low) {
port->vbus_never_low = false;
- tcpm_set_state(port, SNK_SOFT_RESET,
- port->timings.sink_wait_cap_time);
+ upcoming_state = SNK_SOFT_RESET;
} else {
if (!port->self_powered)
upcoming_state = SNK_WAIT_CAPABILITIES_TIMEOUT;
else
upcoming_state = hard_reset_state(port);
- tcpm_set_state(port, SNK_WAIT_CAPABILITIES_TIMEOUT,
- port->timings.sink_wait_cap_time);
}
+
+ tcpm_set_state(port, upcoming_state,
+ port->timings.sink_wait_cap_time);
break;
case SNK_WAIT_CAPABILITIES_TIMEOUT:
/*
@@ -7721,14 +7721,14 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
kthread_init_work(&port->event_work, tcpm_pd_event_handler);
kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
- hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->state_machine_timer.function = state_machine_timer_handler;
- hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
- hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->enable_frs_timer.function = enable_frs_timer_handler;
- hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->send_discover_timer.function = send_discover_timer_handler;
+ hrtimer_setup(&port->state_machine_timer, state_machine_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&port->vdm_state_machine_timer, vdm_state_machine_timer_handler,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_setup(&port->enable_frs_timer, enable_frs_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&port->send_discover_timer, send_discover_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
spin_lock_init(&port->pd_event_lock);
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index 49559605177e..c321d442f0da 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -266,24 +266,12 @@ static struct file *vfio_device_open_file(struct vfio_device *device)
if (ret)
goto err_free;
- /*
- * We can't use anon_inode_getfd() because we need to modify
- * the f_mode flags directly to allow more than just ioctls
- */
- filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
- df, O_RDWR);
+ filep = anon_inode_getfile_fmode("[vfio-device]", &vfio_device_fops,
+ df, O_RDWR, FMODE_PREAD | FMODE_PWRITE);
if (IS_ERR(filep)) {
ret = PTR_ERR(filep);
goto err_close_device;
}
-
- /*
- * TODO: add an anon_inode interface to do this.
- * Appears to be missing by lack of need rather than
- * explicitly prevented. Now there's need.
- */
- filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
-
/*
* Use the pseudo fs inode on the device to link all mmaps
* to the same address space, allowing us to unmap all vmas
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index bf50ffa10bde..c3bcb6911c53 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -7,10 +7,6 @@ config VFIO_PCI_CORE
select VFIO_VIRQFD
select IRQ_BYPASS_MANAGER
-config VFIO_PCI_MMAP
- def_bool y if !S390
- depends on VFIO_PCI_CORE
-
config VFIO_PCI_INTX
def_bool y if !S390
depends on VFIO_PCI_CORE
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 586e49efb81b..c8586d47704c 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -116,7 +116,7 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_core_device *vdev)
res = &vdev->pdev->resource[bar];
- if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
+ if (vdev->pdev->non_mappable_bars)
goto no_mmap;
if (!(res->flags & IORESOURCE_MEM))
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 44c9ef1435a2..5df981920a94 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -39,6 +39,7 @@ source "drivers/gpu/vga/Kconfig"
source "drivers/gpu/host1x/Kconfig"
source "drivers/gpu/ipu-v3/Kconfig"
+source "drivers/gpu/nova-core/Kconfig"
source "drivers/gpu/drm/Kconfig"
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 720b5ada7fe8..0a1db2824076 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -11,7 +11,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include <linux/i2c.h>
#include <linux/backlight.h>
#include <linux/mfd/88pm860x.h>
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 3614a5d29c71..d9374d208cee 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -70,7 +70,7 @@ config LCD_ILI9320
then say y to include a power driver for it.
config LCD_TDO24M
- tristate "Toppoly TDO24M and TDO35S LCD Panels support"
+ tristate "Toppoly TDO24M and TDO35S LCD Panels support"
depends on SPI_MASTER
help
If you have a Toppoly TDO24M/TDO35S series LCD panel, say y here to
@@ -290,6 +290,17 @@ config BACKLIGHT_APPLE
If you have an Intel-based Apple say Y to enable a driver for its
backlight.
+config BACKLIGHT_APPLE_DWI
+ tristate "Apple DWI 2-Wire Interface Backlight Driver"
+ depends on ARCH_APPLE || COMPILE_TEST
+ help
+ Say Y to enable the backlight driver for backlight controllers
+ attached via the Apple DWI 2-wire interface which is found in some
+ Apple iPhones, iPads and iPod touches.
+
+ To compile this driver as a module, choose M here: the module will
+ be called apple_dwi_bl.
+
config BACKLIGHT_QCOM_WLED
tristate "Qualcomm PMIC WLED Driver"
select REGMAP
@@ -359,13 +370,6 @@ config BACKLIGHT_88PM860X
help
Say Y to enable the backlight driver for Marvell 88PM8606.
-config BACKLIGHT_PCF50633
- tristate "Backlight driver for NXP PCF50633 MFD"
- depends on MFD_PCF50633
- help
- If you have a backlight driven by a NXP PCF50633 MFD, say Y here to
- enable its driver.
-
config BACKLIGHT_AAT2870
tristate "AnalogicTech AAT2870 Backlight"
depends on MFD_AAT2870_CORE
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 8fc98f760a8a..dfbb169bf6ea 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
+obj-$(CONFIG_BACKLIGHT_APPLE_DWI) += apple_dwi_bl.o
obj-$(CONFIG_BACKLIGHT_AS3711) += as3711_bl.o
obj-$(CONFIG_BACKLIGHT_BD6107) += bd6107.o
obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
@@ -49,7 +50,6 @@ obj-$(CONFIG_BACKLIGHT_MP3309C) += mp3309c.o
obj-$(CONFIG_BACKLIGHT_MT6370) += mt6370-backlight.o
obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
-obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
obj-$(CONFIG_BACKLIGHT_QCOM_WLED) += qcom-wled.o
obj-$(CONFIG_BACKLIGHT_RT4831) += rt4831-backlight.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index aa5c15e8db86..81c40d355aae 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/mfd/adp5520.h>
#include <linux/slab.h>
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index f51ada4795e8..d4bbd7a7406b 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -11,7 +11,6 @@
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/leds.h>
#include <linux/slab.h>
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index ad4bd4c8f441..e09e20492e7c 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -11,7 +11,6 @@
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/leds.h>
#include <linux/workqueue.h>
diff --git a/drivers/video/backlight/apple_dwi_bl.c b/drivers/video/backlight/apple_dwi_bl.c
new file mode 100644
index 000000000000..93bd744972d6
--- /dev/null
+++ b/drivers/video/backlight/apple_dwi_bl.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Driver for backlight controllers attached via Apple DWI 2-wire interface
+ *
+ * Copyright (c) 2024 Nick Chan <towinchenmi@gmail.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define DWI_BL_CTL 0x0
+#define DWI_BL_CTL_SEND1 BIT(0)
+#define DWI_BL_CTL_SEND2 BIT(4)
+#define DWI_BL_CTL_SEND3 BIT(5)
+#define DWI_BL_CTL_LE_DATA BIT(6)
+/* Only used on Apple A9 and later */
+#define DWI_BL_CTL_SEND4 BIT(12)
+
+#define DWI_BL_CMD 0x4
+#define DWI_BL_CMD_TYPE GENMASK(31, 28)
+#define DWI_BL_CMD_TYPE_SET_BRIGHTNESS 0xa
+#define DWI_BL_CMD_DATA GENMASK(10, 0)
+
+#define DWI_BL_CTL_SEND (DWI_BL_CTL_SEND1 | \
+ DWI_BL_CTL_SEND2 | \
+ DWI_BL_CTL_SEND3 | \
+ DWI_BL_CTL_LE_DATA | \
+ DWI_BL_CTL_SEND4)
+
+#define DWI_BL_MAX_BRIGHTNESS 2047
+
+struct apple_dwi_bl {
+ void __iomem *base;
+};
+
+static int dwi_bl_update_status(struct backlight_device *bl)
+{
+ struct apple_dwi_bl *dwi_bl = bl_get_data(bl);
+
+ int brightness = backlight_get_brightness(bl);
+
+ u32 cmd = 0;
+
+ cmd |= FIELD_PREP(DWI_BL_CMD_DATA, brightness);
+ cmd |= FIELD_PREP(DWI_BL_CMD_TYPE, DWI_BL_CMD_TYPE_SET_BRIGHTNESS);
+
+ writel(cmd, dwi_bl->base + DWI_BL_CMD);
+ writel(DWI_BL_CTL_SEND, dwi_bl->base + DWI_BL_CTL);
+
+ return 0;
+}
+
+static int dwi_bl_get_brightness(struct backlight_device *bl)
+{
+ struct apple_dwi_bl *dwi_bl = bl_get_data(bl);
+
+ u32 cmd = readl(dwi_bl->base + DWI_BL_CMD);
+
+ return FIELD_GET(DWI_BL_CMD_DATA, cmd);
+}
+
+static const struct backlight_ops dwi_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = dwi_bl_get_brightness,
+ .update_status = dwi_bl_update_status
+};
+
+static int dwi_bl_probe(struct platform_device *dev)
+{
+ struct apple_dwi_bl *dwi_bl;
+ struct backlight_device *bl;
+ struct backlight_properties props;
+ struct resource *res;
+
+ dwi_bl = devm_kzalloc(&dev->dev, sizeof(*dwi_bl), GFP_KERNEL);
+ if (!dwi_bl)
+ return -ENOMEM;
+
+ dwi_bl->base = devm_platform_get_and_ioremap_resource(dev, 0, &res);
+ if (IS_ERR(dwi_bl->base))
+ return PTR_ERR(dwi_bl->base);
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = DWI_BL_MAX_BRIGHTNESS;
+ props.scale = BACKLIGHT_SCALE_LINEAR;
+
+ bl = devm_backlight_device_register(&dev->dev, dev->name, &dev->dev,
+ dwi_bl, &dwi_bl_ops, &props);
+ if (IS_ERR(bl))
+ return PTR_ERR(bl);
+
+ platform_set_drvdata(dev, dwi_bl);
+
+ bl->props.brightness = dwi_bl_get_brightness(bl);
+
+ return 0;
+}
+
+static const struct of_device_id dwi_bl_of_match[] = {
+ { .compatible = "apple,dwi-bl" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dwi_bl_of_match);
+
+static struct platform_driver dwi_bl_driver = {
+ .driver = {
+ .name = "apple-dwi-bl",
+ .of_match_table = dwi_bl_of_match
+ },
+ .probe = dwi_bl_probe,
+};
+
+module_platform_driver(dwi_bl_driver);
+
+MODULE_DESCRIPTION("Apple DWI Backlight Driver");
+MODULE_AUTHOR("Nick Chan <towinchenmi@gmail.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
index e6f66bb35ef5..9f89eb19894e 100644
--- a/drivers/video/backlight/as3711_bl.c
+++ b/drivers/video/backlight/as3711_bl.c
@@ -10,7 +10,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/kernel.h>
#include <linux/mfd/as3711.h>
#include <linux/module.h>
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index 90764f83d2f1..74567af84e97 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -10,7 +10,6 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 71f21bbc7a9f..81ff42bec0ad 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/mfd/da903x.h>
#include <linux/slab.h>
diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
index 5e13ef96b717..f41523d78121 100644
--- a/drivers/video/backlight/da9052_bl.c
+++ b/drivers/video/backlight/da9052_bl.c
@@ -9,7 +9,6 @@
#include <linux/backlight.h>
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index 2387009d452d..f59effc02352 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/fb.h>
#include <linux/backlight.h>
#define EP93XX_MAX_COUNT 255
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index fa9a983533b2..d8c2e4ada384 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
-#include <linux/fb.h>
#include <linux/backlight.h>
#include <cpu/dac.h>
diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c
index ae34d1ecbfbe..d2db157b2c29 100644
--- a/drivers/video/backlight/led_bl.c
+++ b/drivers/video/backlight/led_bl.c
@@ -229,8 +229,11 @@ static void led_bl_remove(struct platform_device *pdev)
backlight_device_unregister(bl);
led_bl_power_off(priv);
- for (i = 0; i < priv->nb_leds; i++)
+ for (i = 0; i < priv->nb_leds; i++) {
+ mutex_lock(&priv->leds[i]->led_access);
led_sysfs_enable(priv->leds[i]);
+ mutex_unlock(&priv->leds[i]->led_access);
+ }
}
static const struct of_device_id led_bl_of_match[] = {
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 346d3e29a843..1b493fb0516d 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -16,7 +16,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
-#include <linux/fb.h>
#include <linux/backlight.h>
#include <asm/hardware/locomo.h>
diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c
index 5f60989fa70f..a205f004eab2 100644
--- a/drivers/video/backlight/lv5207lp.c
+++ b/drivers/video/backlight/lv5207lp.c
@@ -9,7 +9,6 @@
#include <linux/backlight.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/platform_data/lv5207lp.h>
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index e607ec6fd4bf..4ac20a59e007 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -9,7 +9,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <linux/fb.h>
#include <linux/i2c.h>
#include <linux/backlight.h>
#include <linux/mfd/max8925.h>
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
deleted file mode 100644
index 157be2f366df..000000000000
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ /dev/null
@@ -1,154 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
- * PCF50633 backlight device driver
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-
-#include <linux/backlight.h>
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/mfd/pcf50633/backlight.h>
-
-struct pcf50633_bl {
- struct pcf50633 *pcf;
- struct backlight_device *bl;
-
- unsigned int brightness;
- unsigned int brightness_limit;
-};
-
-/*
- * pcf50633_bl_set_brightness_limit
- *
- * Update the brightness limit for the pc50633 backlight. The actual brightness
- * will not go above the limit. This is useful to limit power drain for example
- * on low battery.
- *
- * @dev: Pointer to a pcf50633 device
- * @limit: The brightness limit. Valid values are 0-63
- */
-int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit)
-{
- struct pcf50633_bl *pcf_bl = platform_get_drvdata(pcf->bl_pdev);
-
- if (!pcf_bl)
- return -ENODEV;
-
- pcf_bl->brightness_limit = limit & 0x3f;
- backlight_update_status(pcf_bl->bl);
-
- return 0;
-}
-
-static int pcf50633_bl_update_status(struct backlight_device *bl)
-{
- struct pcf50633_bl *pcf_bl = bl_get_data(bl);
- unsigned int new_brightness;
-
-
- if (bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK) ||
- bl->props.power != BACKLIGHT_POWER_ON)
- new_brightness = 0;
- else if (bl->props.brightness < pcf_bl->brightness_limit)
- new_brightness = bl->props.brightness;
- else
- new_brightness = pcf_bl->brightness_limit;
-
-
- if (pcf_bl->brightness == new_brightness)
- return 0;
-
- if (new_brightness) {
- pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDOUT,
- new_brightness);
- if (!pcf_bl->brightness)
- pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDENA, 1);
- } else {
- pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDENA, 0);
- }
-
- pcf_bl->brightness = new_brightness;
-
- return 0;
-}
-
-static int pcf50633_bl_get_brightness(struct backlight_device *bl)
-{
- struct pcf50633_bl *pcf_bl = bl_get_data(bl);
-
- return pcf_bl->brightness;
-}
-
-static const struct backlight_ops pcf50633_bl_ops = {
- .get_brightness = pcf50633_bl_get_brightness,
- .update_status = pcf50633_bl_update_status,
- .options = BL_CORE_SUSPENDRESUME,
-};
-
-static int pcf50633_bl_probe(struct platform_device *pdev)
-{
- struct pcf50633_bl *pcf_bl;
- struct device *parent = pdev->dev.parent;
- struct pcf50633_platform_data *pcf50633_data = dev_get_platdata(parent);
- struct pcf50633_bl_platform_data *pdata = pcf50633_data->backlight_data;
- struct backlight_properties bl_props;
-
- pcf_bl = devm_kzalloc(&pdev->dev, sizeof(*pcf_bl), GFP_KERNEL);
- if (!pcf_bl)
- return -ENOMEM;
-
- memset(&bl_props, 0, sizeof(bl_props));
- bl_props.type = BACKLIGHT_RAW;
- bl_props.max_brightness = 0x3f;
- bl_props.power = BACKLIGHT_POWER_ON;
-
- if (pdata) {
- bl_props.brightness = pdata->default_brightness;
- pcf_bl->brightness_limit = pdata->default_brightness_limit;
- } else {
- bl_props.brightness = 0x3f;
- pcf_bl->brightness_limit = 0x3f;
- }
-
- pcf_bl->pcf = dev_to_pcf50633(pdev->dev.parent);
-
- pcf_bl->bl = devm_backlight_device_register(&pdev->dev, pdev->name,
- &pdev->dev, pcf_bl,
- &pcf50633_bl_ops, &bl_props);
-
- if (IS_ERR(pcf_bl->bl))
- return PTR_ERR(pcf_bl->bl);
-
- platform_set_drvdata(pdev, pcf_bl);
-
- pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDDIM, pdata->ramp_time);
-
- /*
- * Should be different from bl_props.brightness, so we do not exit
- * update_status early the first time it's called
- */
- pcf_bl->brightness = pcf_bl->bl->props.brightness + 1;
-
- backlight_update_status(pcf_bl->bl);
-
- return 0;
-}
-
-static struct platform_driver pcf50633_bl_driver = {
- .probe = pcf50633_bl_probe,
- .driver = {
- .name = "pcf50633-backlight",
- },
-};
-
-module_platform_driver(pcf50633_bl_driver);
-
-MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
-MODULE_DESCRIPTION("PCF50633 backlight driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pcf50633-backlight");
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index d96d713fe7db..8aa860350644 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/backlight.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/mfd/tps65217.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c
index bfc1913e8b55..3005eba6c82c 100644
--- a/drivers/video/backlight/vgg2432a4.c
+++ b/drivers/video/backlight/vgg2432a4.c
@@ -10,7 +10,6 @@
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/init.h>
#include <linux/lcd.h>
#include <linux/module.h>
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index c5aaee205bdf..49027f04a1ec 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -9,7 +9,6 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/slab.h>
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index bc31db6ef7d2..12f54480f57f 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -24,7 +24,7 @@ config VGA_CONSOLE
Say Y.
config MDA_CONSOLE
- depends on !M68K && !PARISC && ISA
+ depends on VGA_CONSOLE && ISA
tristate "MDA text console (dual-headed)"
help
Say Y here if you have an old MDA or monochrome Hercules graphics
@@ -47,12 +47,11 @@ config SGI_NEWPORT_CONSOLE
card of your Indy. Most people say Y here.
config DUMMY_CONSOLE
- bool
- default y
+ def_bool VT || VGA_CONSOLE || FRAMEBUFFER_CONSOLE
config DUMMY_CONSOLE_COLUMNS
int "Initial number of console screen columns"
- depends on DUMMY_CONSOLE && !ARCH_FOOTBRIDGE
+ depends on DUMMY_CONSOLE && !(ARCH_FOOTBRIDGE && VGA_CONSOLE)
default 160 if PARISC
default 80
help
@@ -62,7 +61,7 @@ config DUMMY_CONSOLE_COLUMNS
config DUMMY_CONSOLE_ROWS
int "Initial number of console screen rows"
- depends on DUMMY_CONSOLE && !ARCH_FOOTBRIDGE
+ depends on DUMMY_CONSOLE && !(ARCH_FOOTBRIDGE && VGA_CONSOLE)
default 64 if PARISC
default 30 if ARM
default 25
diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
index 971355c2cd7e..e826cb7dd55d 100644
--- a/drivers/video/fbdev/aty/mach64_cursor.c
+++ b/drivers/video/fbdev/aty/mach64_cursor.c
@@ -6,7 +6,6 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/string.h>
-#include "../core/fb_draw.h"
#include <asm/io.h>
@@ -57,6 +56,12 @@
* definitation and CUR_VERT_POSN must be saturated to zero.
*/
+/* compose pixels based on mask */
+static inline unsigned long comp(unsigned long set, unsigned long unset, unsigned long mask)
+{
+ return ((set ^ unset) & mask) ^ unset;
+}
+
/*
* Hardware Cursor support.
*/
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index 840f22160763..6251a6b07b3a 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -137,13 +137,15 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
*/
int au1100fb_setmode(struct au1100fb_device *fbdev)
{
- struct fb_info *info = &fbdev->info;
+ struct fb_info *info;
u32 words;
int index;
if (!fbdev)
return -EINVAL;
+ info = &fbdev->info;
+
/* Update var-dependent FB info */
if (panel_is_active(fbdev->panel) || panel_is_color(fbdev->panel)) {
if (info->var.bits_per_pixel <= 8) {
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index d554d8c543d4..4abe12db7594 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -69,7 +69,7 @@ config FB_CFB_REV_PIXELS_IN_BYTE
bool
depends on FB_CORE
help
- Allow generic frame-buffer functions to work on displays with 1, 2
+ Allow I/O memory frame-buffer functions to work on displays with 1, 2
and 4 bits per pixel depths which has opposite order of pixels in
byte order to bytes in long order.
@@ -97,6 +97,14 @@ config FB_SYS_IMAGEBLIT
blitting. This is used by drivers that don't provide their own
(accelerated) version and the framebuffer is in system RAM.
+config FB_SYS_REV_PIXELS_IN_BYTE
+ bool
+ depends on FB_CORE
+ help
+ Allow virtual memory frame-buffer functions to work on displays with 1, 2
+ and 4 bits per pixel depths which has opposite order of pixels in
+ byte order to bytes in long order.
+
config FB_PROVIDE_GET_FB_UNMAPPED_AREA
bool
depends on FB
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index 3ff1b2a8659e..f9475c14f733 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -59,12 +59,11 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
}
static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int height, int width)
+ int sx, int height, int width, int fg, int bg)
{
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
struct fb_fillrect region;
- region.color = attr_bgcol_ec(bgshift, vc, info);
+ region.color = bg;
region.dx = sx * vc->vc_font.width;
region.dy = sy * vc->vc_font.height;
region.width = width * vc->vc_font.width;
diff --git a/drivers/video/fbdev/core/cfbcopyarea.c b/drivers/video/fbdev/core/cfbcopyarea.c
index a271f57d9c6c..23fbf3a8df7c 100644
--- a/drivers/video/fbdev/core/cfbcopyarea.c
+++ b/drivers/video/fbdev/core/cfbcopyarea.c
@@ -1,440 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * Generic function for frame buffer with packed pixels of any depth.
- *
- * Copyright (C) 1999-2005 James Simmons <jsimmons@www.infradead.org>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- * NOTES:
- *
- * This is for cfb packed pixels. Iplan and such are incorporated in the
- * drivers that need them.
- *
- * FIXME
- *
- * Also need to add code to deal with cards endians that are different than
- * the native cpu endians. I also need to deal with MSB position in the word.
- *
- * The two functions or copying forward and backward could be split up like
- * the ones for filling, i.e. in aligned and unaligned versions. This would
- * help moving some redundant computations and branches out of the loop, too.
+ * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
*/
-
#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
#include <linux/fb.h>
+#include <linux/bitrev.h>
#include <asm/types.h>
-#include <asm/io.h>
-#include "fb_draw.h"
-
-#if BITS_PER_LONG == 32
-# define FB_WRITEL fb_writel
-# define FB_READL fb_readl
-#else
-# define FB_WRITEL fb_writeq
-# define FB_READL fb_readq
-#endif
-
- /*
- * Generic bitwise copy algorithm
- */
-
-static void
-bitcpy(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
- const unsigned long __iomem *src, unsigned src_idx, int bits,
- unsigned n, u32 bswapmask)
-{
- unsigned long first, last;
- int const shift = dst_idx-src_idx;
-#if 0
- /*
- * If you suspect bug in this function, compare it with this simple
- * memmove implementation.
- */
- memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
- (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
- return;
+#ifdef CONFIG_FB_CFB_REV_PIXELS_IN_BYTE
+#define FB_REV_PIXELS_IN_BYTE
#endif
- first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
- last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
-
- if (!shift) {
- // Same alignment for source and dest
-
- if (dst_idx+n <= bits) {
- // Single word
- if (last)
- first &= last;
- FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
- } else {
- // Multiple destination words
-
- // Leading bits
- if (first != ~0UL) {
- FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
- dst++;
- src++;
- n -= bits - dst_idx;
- }
-
- // Main chunk
- n /= bits;
- while (n >= 8) {
- FB_WRITEL(FB_READL(src++), dst++);
- FB_WRITEL(FB_READL(src++), dst++);
- FB_WRITEL(FB_READL(src++), dst++);
- FB_WRITEL(FB_READL(src++), dst++);
- FB_WRITEL(FB_READL(src++), dst++);
- FB_WRITEL(FB_READL(src++), dst++);
- FB_WRITEL(FB_READL(src++), dst++);
- FB_WRITEL(FB_READL(src++), dst++);
- n -= 8;
- }
- while (n--)
- FB_WRITEL(FB_READL(src++), dst++);
-
- // Trailing bits
- if (last)
- FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst);
- }
- } else {
- /* Different alignment for source and dest */
- unsigned long d0, d1;
- int m;
-
- int const left = shift & (bits - 1);
- int const right = -shift & (bits - 1);
-
- if (dst_idx+n <= bits) {
- // Single destination word
- if (last)
- first &= last;
- d0 = FB_READL(src);
- d0 = fb_rev_pixels_in_long(d0, bswapmask);
- if (shift > 0) {
- // Single source word
- d0 <<= left;
- } else if (src_idx+n <= bits) {
- // Single source word
- d0 >>= right;
- } else {
- // 2 source words
- d1 = FB_READL(src + 1);
- d1 = fb_rev_pixels_in_long(d1, bswapmask);
- d0 = d0 >> right | d1 << left;
- }
- d0 = fb_rev_pixels_in_long(d0, bswapmask);
- FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
- } else {
- // Multiple destination words
- /** We must always remember the last value read, because in case
- SRC and DST overlap bitwise (e.g. when moving just one pixel in
- 1bpp), we always collect one full long for DST and that might
- overlap with the current long from SRC. We store this value in
- 'd0'. */
- d0 = FB_READL(src++);
- d0 = fb_rev_pixels_in_long(d0, bswapmask);
- // Leading bits
- if (shift > 0) {
- // Single source word
- d1 = d0;
- d0 <<= left;
- n -= bits - dst_idx;
- } else {
- // 2 source words
- d1 = FB_READL(src++);
- d1 = fb_rev_pixels_in_long(d1, bswapmask);
-
- d0 = d0 >> right | d1 << left;
- n -= bits - dst_idx;
- }
- d0 = fb_rev_pixels_in_long(d0, bswapmask);
- FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
- d0 = d1;
- dst++;
-
- // Main chunk
- m = n % bits;
- n /= bits;
- while ((n >= 4) && !bswapmask) {
- d1 = FB_READL(src++);
- FB_WRITEL(d0 >> right | d1 << left, dst++);
- d0 = d1;
- d1 = FB_READL(src++);
- FB_WRITEL(d0 >> right | d1 << left, dst++);
- d0 = d1;
- d1 = FB_READL(src++);
- FB_WRITEL(d0 >> right | d1 << left, dst++);
- d0 = d1;
- d1 = FB_READL(src++);
- FB_WRITEL(d0 >> right | d1 << left, dst++);
- d0 = d1;
- n -= 4;
- }
- while (n--) {
- d1 = FB_READL(src++);
- d1 = fb_rev_pixels_in_long(d1, bswapmask);
- d0 = d0 >> right | d1 << left;
- d0 = fb_rev_pixels_in_long(d0, bswapmask);
- FB_WRITEL(d0, dst++);
- d0 = d1;
- }
-
- // Trailing bits
- if (m) {
- if (m <= bits - right) {
- // Single source word
- d0 >>= right;
- } else {
- // 2 source words
- d1 = FB_READL(src);
- d1 = fb_rev_pixels_in_long(d1,
- bswapmask);
- d0 = d0 >> right | d1 << left;
- }
- d0 = fb_rev_pixels_in_long(d0, bswapmask);
- FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
- }
- }
- }
-}
-
- /*
- * Generic bitwise copy algorithm, operating backward
- */
-
-static void
-bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
- const unsigned long __iomem *src, unsigned src_idx, int bits,
- unsigned n, u32 bswapmask)
-{
- unsigned long first, last;
- int shift;
-
-#if 0
- /*
- * If you suspect bug in this function, compare it with this simple
- * memmove implementation.
- */
- memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
- (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
- return;
-#endif
-
- dst += (dst_idx + n - 1) / bits;
- src += (src_idx + n - 1) / bits;
- dst_idx = (dst_idx + n - 1) % bits;
- src_idx = (src_idx + n - 1) % bits;
-
- shift = dst_idx-src_idx;
-
- first = ~fb_shifted_pixels_mask_long(p, (dst_idx + 1) % bits, bswapmask);
- last = fb_shifted_pixels_mask_long(p, (bits + dst_idx + 1 - n) % bits, bswapmask);
-
- if (!shift) {
- // Same alignment for source and dest
-
- if ((unsigned long)dst_idx+1 >= n) {
- // Single word
- if (first)
- last &= first;
- FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst);
- } else {
- // Multiple destination words
-
- // Leading bits
- if (first) {
- FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
- dst--;
- src--;
- n -= dst_idx+1;
- }
-
- // Main chunk
- n /= bits;
- while (n >= 8) {
- FB_WRITEL(FB_READL(src--), dst--);
- FB_WRITEL(FB_READL(src--), dst--);
- FB_WRITEL(FB_READL(src--), dst--);
- FB_WRITEL(FB_READL(src--), dst--);
- FB_WRITEL(FB_READL(src--), dst--);
- FB_WRITEL(FB_READL(src--), dst--);
- FB_WRITEL(FB_READL(src--), dst--);
- FB_WRITEL(FB_READL(src--), dst--);
- n -= 8;
- }
- while (n--)
- FB_WRITEL(FB_READL(src--), dst--);
-
- // Trailing bits
- if (last != -1UL)
- FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst);
- }
- } else {
- // Different alignment for source and dest
- unsigned long d0, d1;
- int m;
-
- int const left = shift & (bits-1);
- int const right = -shift & (bits-1);
-
- if ((unsigned long)dst_idx+1 >= n) {
- // Single destination word
- if (first)
- last &= first;
- d0 = FB_READL(src);
- if (shift < 0) {
- // Single source word
- d0 >>= right;
- } else if (1+(unsigned long)src_idx >= n) {
- // Single source word
- d0 <<= left;
- } else {
- // 2 source words
- d1 = FB_READL(src - 1);
- d1 = fb_rev_pixels_in_long(d1, bswapmask);
- d0 = d0 << left | d1 >> right;
- }
- d0 = fb_rev_pixels_in_long(d0, bswapmask);
- FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
- } else {
- // Multiple destination words
- /** We must always remember the last value read, because in case
- SRC and DST overlap bitwise (e.g. when moving just one pixel in
- 1bpp), we always collect one full long for DST and that might
- overlap with the current long from SRC. We store this value in
- 'd0'. */
-
- d0 = FB_READL(src--);
- d0 = fb_rev_pixels_in_long(d0, bswapmask);
- // Leading bits
- if (shift < 0) {
- // Single source word
- d1 = d0;
- d0 >>= right;
- } else {
- // 2 source words
- d1 = FB_READL(src--);
- d1 = fb_rev_pixels_in_long(d1, bswapmask);
- d0 = d0 << left | d1 >> right;
- }
- d0 = fb_rev_pixels_in_long(d0, bswapmask);
- if (!first)
- FB_WRITEL(d0, dst);
- else
- FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
- d0 = d1;
- dst--;
- n -= dst_idx+1;
-
- // Main chunk
- m = n % bits;
- n /= bits;
- while ((n >= 4) && !bswapmask) {
- d1 = FB_READL(src--);
- FB_WRITEL(d0 << left | d1 >> right, dst--);
- d0 = d1;
- d1 = FB_READL(src--);
- FB_WRITEL(d0 << left | d1 >> right, dst--);
- d0 = d1;
- d1 = FB_READL(src--);
- FB_WRITEL(d0 << left | d1 >> right, dst--);
- d0 = d1;
- d1 = FB_READL(src--);
- FB_WRITEL(d0 << left | d1 >> right, dst--);
- d0 = d1;
- n -= 4;
- }
- while (n--) {
- d1 = FB_READL(src--);
- d1 = fb_rev_pixels_in_long(d1, bswapmask);
- d0 = d0 << left | d1 >> right;
- d0 = fb_rev_pixels_in_long(d0, bswapmask);
- FB_WRITEL(d0, dst--);
- d0 = d1;
- }
-
- // Trailing bits
- if (m) {
- if (m <= bits - left) {
- // Single source word
- d0 <<= left;
- } else {
- // 2 source words
- d1 = FB_READL(src);
- d1 = fb_rev_pixels_in_long(d1,
- bswapmask);
- d0 = d0 << left | d1 >> right;
- }
- d0 = fb_rev_pixels_in_long(d0, bswapmask);
- FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
- }
- }
- }
-}
+#include "cfbmem.h"
+#include "fb_copyarea.h"
void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
{
- u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
- u32 height = area->height, width = area->width;
- unsigned int const bits_per_line = p->fix.line_length * 8u;
- unsigned long __iomem *base = NULL;
- int bits = BITS_PER_LONG, bytes = bits >> 3;
- unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
- u32 bswapmask = fb_compute_bswapmask(p);
-
if (p->state != FBINFO_STATE_RUNNING)
return;
if (p->flags & FBINFO_VIRTFB)
- fb_warn_once(p, "Framebuffer is not in I/O address space.");
-
- /* if the beginning of the target area might overlap with the end of
- the source area, be have to copy the area reverse. */
- if ((dy == sy && dx > sx) || (dy > sy)) {
- dy += height;
- sy += height;
- rev_copy = 1;
- }
-
- // split the base of the framebuffer into a long-aligned address and the
- // index of the first bit
- base = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
- dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1));
- // add offset of source and target area
- dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel;
- src_idx += sy*bits_per_line + sx*p->var.bits_per_pixel;
+ fb_warn_once(p, "%s: framebuffer is not in I/O address space.\n", __func__);
if (p->fbops->fb_sync)
p->fbops->fb_sync(p);
- if (rev_copy) {
- while (height--) {
- dst_idx -= bits_per_line;
- src_idx -= bits_per_line;
- bitcpy_rev(p, base + (dst_idx / bits), dst_idx % bits,
- base + (src_idx / bits), src_idx % bits, bits,
- width*p->var.bits_per_pixel, bswapmask);
- }
- } else {
- while (height--) {
- bitcpy(p, base + (dst_idx / bits), dst_idx % bits,
- base + (src_idx / bits), src_idx % bits, bits,
- width*p->var.bits_per_pixel, bswapmask);
- dst_idx += bits_per_line;
- src_idx += bits_per_line;
- }
- }
+ fb_copyarea(p, area);
}
-
EXPORT_SYMBOL(cfb_copyarea);
-MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
-MODULE_DESCRIPTION("Generic software accelerated copyarea");
+MODULE_AUTHOR("Zsolt Kajtar <soci@c64.rulez.org>");
+MODULE_DESCRIPTION("I/O memory packed pixel framebuffer area copy");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/video/fbdev/core/cfbfillrect.c b/drivers/video/fbdev/core/cfbfillrect.c
index cbaa4c9e2355..615de89256d5 100644
--- a/drivers/video/fbdev/core/cfbfillrect.c
+++ b/drivers/video/fbdev/core/cfbfillrect.c
@@ -1,374 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * Generic fillrect for frame buffers with packed pixels of any depth.
- *
- * Copyright (C) 2000 James Simmons (jsimmons@linux-fbdev.org)
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- * NOTES:
- *
- * Also need to add code to deal with cards endians that are different than
- * the native cpu endians. I also need to deal with MSB position in the word.
- *
+ * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
*/
#include <linux/module.h>
-#include <linux/string.h>
#include <linux/fb.h>
+#include <linux/bitrev.h>
#include <asm/types.h>
-#include "fb_draw.h"
-#if BITS_PER_LONG == 32
-# define FB_WRITEL fb_writel
-# define FB_READL fb_readl
-#else
-# define FB_WRITEL fb_writeq
-# define FB_READL fb_readq
+#ifdef CONFIG_FB_CFB_REV_PIXELS_IN_BYTE
+#define FB_REV_PIXELS_IN_BYTE
#endif
- /*
- * Aligned pattern fill using 32/64-bit memory accesses
- */
-
-static void
-bitfill_aligned(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
- unsigned long pat, unsigned n, int bits, u32 bswapmask)
-{
- unsigned long first, last;
-
- if (!n)
- return;
-
- first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
- last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
-
- if (dst_idx+n <= bits) {
- // Single word
- if (last)
- first &= last;
- FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
- } else {
- // Multiple destination words
-
- // Leading bits
- if (first!= ~0UL) {
- FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
- dst++;
- n -= bits - dst_idx;
- }
-
- // Main chunk
- n /= bits;
- while (n >= 8) {
- FB_WRITEL(pat, dst++);
- FB_WRITEL(pat, dst++);
- FB_WRITEL(pat, dst++);
- FB_WRITEL(pat, dst++);
- FB_WRITEL(pat, dst++);
- FB_WRITEL(pat, dst++);
- FB_WRITEL(pat, dst++);
- FB_WRITEL(pat, dst++);
- n -= 8;
- }
- while (n--)
- FB_WRITEL(pat, dst++);
-
- // Trailing bits
- if (last)
- FB_WRITEL(comp(pat, FB_READL(dst), last), dst);
- }
-}
-
-
- /*
- * Unaligned generic pattern fill using 32/64-bit memory accesses
- * The pattern must have been expanded to a full 32/64-bit value
- * Left/right are the appropriate shifts to convert to the pattern to be
- * used for the next 32/64-bit word
- */
-
-static void
-bitfill_unaligned(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
- unsigned long pat, int left, int right, unsigned n, int bits)
-{
- unsigned long first, last;
-
- if (!n)
- return;
-
- first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
-
- if (dst_idx+n <= bits) {
- // Single word
- if (last)
- first &= last;
- FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
- } else {
- // Multiple destination words
- // Leading bits
- if (first) {
- FB_WRITEL(comp(pat, FB_READL(dst), first), dst);
- dst++;
- pat = pat << left | pat >> right;
- n -= bits - dst_idx;
- }
-
- // Main chunk
- n /= bits;
- while (n >= 4) {
- FB_WRITEL(pat, dst++);
- pat = pat << left | pat >> right;
- FB_WRITEL(pat, dst++);
- pat = pat << left | pat >> right;
- FB_WRITEL(pat, dst++);
- pat = pat << left | pat >> right;
- FB_WRITEL(pat, dst++);
- pat = pat << left | pat >> right;
- n -= 4;
- }
- while (n--) {
- FB_WRITEL(pat, dst++);
- pat = pat << left | pat >> right;
- }
-
- // Trailing bits
- if (last)
- FB_WRITEL(comp(pat, FB_READL(dst), last), dst);
- }
-}
-
- /*
- * Aligned pattern invert using 32/64-bit memory accesses
- */
-static void
-bitfill_aligned_rev(struct fb_info *p, unsigned long __iomem *dst,
- int dst_idx, unsigned long pat, unsigned n, int bits,
- u32 bswapmask)
-{
- unsigned long val = pat, dat;
- unsigned long first, last;
-
- if (!n)
- return;
-
- first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
- last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
-
- if (dst_idx+n <= bits) {
- // Single word
- if (last)
- first &= last;
- dat = FB_READL(dst);
- FB_WRITEL(comp(dat ^ val, dat, first), dst);
- } else {
- // Multiple destination words
- // Leading bits
- if (first!=0UL) {
- dat = FB_READL(dst);
- FB_WRITEL(comp(dat ^ val, dat, first), dst);
- dst++;
- n -= bits - dst_idx;
- }
-
- // Main chunk
- n /= bits;
- while (n >= 8) {
- FB_WRITEL(FB_READL(dst) ^ val, dst);
- dst++;
- FB_WRITEL(FB_READL(dst) ^ val, dst);
- dst++;
- FB_WRITEL(FB_READL(dst) ^ val, dst);
- dst++;
- FB_WRITEL(FB_READL(dst) ^ val, dst);
- dst++;
- FB_WRITEL(FB_READL(dst) ^ val, dst);
- dst++;
- FB_WRITEL(FB_READL(dst) ^ val, dst);
- dst++;
- FB_WRITEL(FB_READL(dst) ^ val, dst);
- dst++;
- FB_WRITEL(FB_READL(dst) ^ val, dst);
- dst++;
- n -= 8;
- }
- while (n--) {
- FB_WRITEL(FB_READL(dst) ^ val, dst);
- dst++;
- }
- // Trailing bits
- if (last) {
- dat = FB_READL(dst);
- FB_WRITEL(comp(dat ^ val, dat, last), dst);
- }
- }
-}
-
-
- /*
- * Unaligned generic pattern invert using 32/64-bit memory accesses
- * The pattern must have been expanded to a full 32/64-bit value
- * Left/right are the appropriate shifts to convert to the pattern to be
- * used for the next 32/64-bit word
- */
-
-static void
-bitfill_unaligned_rev(struct fb_info *p, unsigned long __iomem *dst,
- int dst_idx, unsigned long pat, int left, int right,
- unsigned n, int bits)
-{
- unsigned long first, last, dat;
-
- if (!n)
- return;
-
- first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
-
- if (dst_idx+n <= bits) {
- // Single word
- if (last)
- first &= last;
- dat = FB_READL(dst);
- FB_WRITEL(comp(dat ^ pat, dat, first), dst);
- } else {
- // Multiple destination words
-
- // Leading bits
- if (first != 0UL) {
- dat = FB_READL(dst);
- FB_WRITEL(comp(dat ^ pat, dat, first), dst);
- dst++;
- pat = pat << left | pat >> right;
- n -= bits - dst_idx;
- }
-
- // Main chunk
- n /= bits;
- while (n >= 4) {
- FB_WRITEL(FB_READL(dst) ^ pat, dst);
- dst++;
- pat = pat << left | pat >> right;
- FB_WRITEL(FB_READL(dst) ^ pat, dst);
- dst++;
- pat = pat << left | pat >> right;
- FB_WRITEL(FB_READL(dst) ^ pat, dst);
- dst++;
- pat = pat << left | pat >> right;
- FB_WRITEL(FB_READL(dst) ^ pat, dst);
- dst++;
- pat = pat << left | pat >> right;
- n -= 4;
- }
- while (n--) {
- FB_WRITEL(FB_READL(dst) ^ pat, dst);
- dst++;
- pat = pat << left | pat >> right;
- }
-
- // Trailing bits
- if (last) {
- dat = FB_READL(dst);
- FB_WRITEL(comp(dat ^ pat, dat, last), dst);
- }
- }
-}
+#include "cfbmem.h"
+#include "fb_fillrect.h"
void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
{
- unsigned long pat, pat2, fg;
- unsigned long width = rect->width, height = rect->height;
- int bits = BITS_PER_LONG, bytes = bits >> 3;
- u32 bpp = p->var.bits_per_pixel;
- unsigned long __iomem *dst;
- int dst_idx, left;
-
if (p->state != FBINFO_STATE_RUNNING)
return;
if (p->flags & FBINFO_VIRTFB)
- fb_warn_once(p, "Framebuffer is not in I/O address space.");
-
- if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
- p->fix.visual == FB_VISUAL_DIRECTCOLOR )
- fg = ((u32 *) (p->pseudo_palette))[rect->color];
- else
- fg = rect->color;
-
- pat = pixel_to_pat(bpp, fg);
+ fb_warn_once(p, "%s: framebuffer is not in I/O address space.\n", __func__);
- dst = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
- dst_idx = ((unsigned long)p->screen_base & (bytes - 1))*8;
- dst_idx += rect->dy*p->fix.line_length*8+rect->dx*bpp;
- /* FIXME For now we support 1-32 bpp only */
- left = bits % bpp;
if (p->fbops->fb_sync)
p->fbops->fb_sync(p);
- if (!left) {
- u32 bswapmask = fb_compute_bswapmask(p);
- void (*fill_op32)(struct fb_info *p,
- unsigned long __iomem *dst, int dst_idx,
- unsigned long pat, unsigned n, int bits,
- u32 bswapmask) = NULL;
- switch (rect->rop) {
- case ROP_XOR:
- fill_op32 = bitfill_aligned_rev;
- break;
- case ROP_COPY:
- fill_op32 = bitfill_aligned;
- break;
- default:
- printk( KERN_ERR "cfb_fillrect(): unknown rop, defaulting to ROP_COPY\n");
- fill_op32 = bitfill_aligned;
- break;
- }
- while (height--) {
- dst += dst_idx >> (ffs(bits) - 1);
- dst_idx &= (bits - 1);
- fill_op32(p, dst, dst_idx, pat, width*bpp, bits,
- bswapmask);
- dst_idx += p->fix.line_length*8;
- }
- } else {
- int right, r;
- void (*fill_op)(struct fb_info *p, unsigned long __iomem *dst,
- int dst_idx, unsigned long pat, int left,
- int right, unsigned n, int bits) = NULL;
-#ifdef __LITTLE_ENDIAN
- right = left;
- left = bpp - right;
-#else
- right = bpp - left;
-#endif
- switch (rect->rop) {
- case ROP_XOR:
- fill_op = bitfill_unaligned_rev;
- break;
- case ROP_COPY:
- fill_op = bitfill_unaligned;
- break;
- default:
- printk(KERN_ERR "cfb_fillrect(): unknown rop, defaulting to ROP_COPY\n");
- fill_op = bitfill_unaligned;
- break;
- }
- while (height--) {
- dst += dst_idx / bits;
- dst_idx &= (bits - 1);
- r = dst_idx % bpp;
- /* rotate pattern to the correct start position */
- pat2 = le_long_to_cpu(rolx(cpu_to_le_long(pat), r, bpp));
- fill_op(p, dst, dst_idx, pat2, left, right,
- width*bpp, bits);
- dst_idx += p->fix.line_length*8;
- }
- }
+ fb_fillrect(p, rect);
}
-
EXPORT_SYMBOL(cfb_fillrect);
-MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
-MODULE_DESCRIPTION("Generic software accelerated fill rectangle");
+MODULE_AUTHOR("Zsolt Kajtar <soci@c64.rulez.org>");
+MODULE_DESCRIPTION("I/O memory packed pixel framebuffer area fill");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/core/cfbimgblt.c b/drivers/video/fbdev/core/cfbimgblt.c
index 7d1d2f1a627d..bcec4e32c0e7 100644
--- a/drivers/video/fbdev/core/cfbimgblt.c
+++ b/drivers/video/fbdev/core/cfbimgblt.c
@@ -1,369 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * Generic BitBLT function for frame buffer with packed pixels of any depth.
- *
- * Copyright (C) June 1999 James Simmons
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- * NOTES:
- *
- * This function copys a image from system memory to video memory. The
- * image can be a bitmap where each 0 represents the background color and
- * each 1 represents the foreground color. Great for font handling. It can
- * also be a color image. This is determined by image_depth. The color image
- * must be laid out exactly in the same format as the framebuffer. Yes I know
- * their are cards with hardware that coverts images of various depths to the
- * framebuffer depth. But not every card has this. All images must be rounded
- * up to the nearest byte. For example a bitmap 12 bits wide must be two
- * bytes width.
- *
- * Tony:
- * Incorporate mask tables similar to fbcon-cfb*.c in 2.4 API. This speeds
- * up the code significantly.
- *
- * Code for depths not multiples of BITS_PER_LONG is still kludgy, which is
- * still processed a bit at a time.
- *
- * Also need to add code to deal with cards endians that are different than
- * the native cpu endians. I also need to deal with MSB position in the word.
+ * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
*/
#include <linux/module.h>
-#include <linux/string.h>
#include <linux/fb.h>
+#include <linux/bitrev.h>
#include <asm/types.h>
-#include "fb_draw.h"
-#define DEBUG
-
-#ifdef DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt,__func__,## args)
-#else
-#define DPRINTK(fmt, args...)
+#ifdef CONFIG_FB_CFB_REV_PIXELS_IN_BYTE
+#define FB_REV_PIXELS_IN_BYTE
#endif
-static const u32 cfb_tab8_be[] = {
- 0x00000000,0x000000ff,0x0000ff00,0x0000ffff,
- 0x00ff0000,0x00ff00ff,0x00ffff00,0x00ffffff,
- 0xff000000,0xff0000ff,0xff00ff00,0xff00ffff,
- 0xffff0000,0xffff00ff,0xffffff00,0xffffffff
-};
-
-static const u32 cfb_tab8_le[] = {
- 0x00000000,0xff000000,0x00ff0000,0xffff0000,
- 0x0000ff00,0xff00ff00,0x00ffff00,0xffffff00,
- 0x000000ff,0xff0000ff,0x00ff00ff,0xffff00ff,
- 0x0000ffff,0xff00ffff,0x00ffffff,0xffffffff
-};
-
-static const u32 cfb_tab16_be[] = {
- 0x00000000, 0x0000ffff, 0xffff0000, 0xffffffff
-};
-
-static const u32 cfb_tab16_le[] = {
- 0x00000000, 0xffff0000, 0x0000ffff, 0xffffffff
-};
-
-static const u32 cfb_tab32[] = {
- 0x00000000, 0xffffffff
-};
-
-#define FB_WRITEL fb_writel
-#define FB_READL fb_readl
-
-static inline void color_imageblit(const struct fb_image *image,
- struct fb_info *p, u8 __iomem *dst1,
- u32 start_index,
- u32 pitch_index)
-{
- /* Draw the penguin */
- u32 __iomem *dst, *dst2;
- u32 color = 0, val, shift;
- int i, n, bpp = p->var.bits_per_pixel;
- u32 null_bits = 32 - bpp;
- u32 *palette = (u32 *) p->pseudo_palette;
- const u8 *src = image->data;
- u32 bswapmask = fb_compute_bswapmask(p);
-
- dst2 = (u32 __iomem *) dst1;
- for (i = image->height; i--; ) {
- n = image->width;
- dst = (u32 __iomem *) dst1;
- shift = 0;
- val = 0;
-
- if (start_index) {
- u32 start_mask = ~fb_shifted_pixels_mask_u32(p,
- start_index, bswapmask);
- val = FB_READL(dst) & start_mask;
- shift = start_index;
- }
- while (n--) {
- if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
- p->fix.visual == FB_VISUAL_DIRECTCOLOR )
- color = palette[*src];
- else
- color = *src;
- color <<= FB_LEFT_POS(p, bpp);
- val |= FB_SHIFT_HIGH(p, color, shift ^ bswapmask);
- if (shift >= null_bits) {
- FB_WRITEL(val, dst++);
-
- val = (shift == null_bits) ? 0 :
- FB_SHIFT_LOW(p, color, 32 - shift);
- }
- shift += bpp;
- shift &= (32 - 1);
- src++;
- }
- if (shift) {
- u32 end_mask = fb_shifted_pixels_mask_u32(p, shift,
- bswapmask);
-
- FB_WRITEL((FB_READL(dst) & end_mask) | val, dst);
- }
- dst1 += p->fix.line_length;
- if (pitch_index) {
- dst2 += p->fix.line_length;
- dst1 = (u8 __iomem *)((long __force)dst2 & ~(sizeof(u32) - 1));
-
- start_index += pitch_index;
- start_index &= 32 - 1;
- }
- }
-}
-
-static inline void slow_imageblit(const struct fb_image *image, struct fb_info *p,
- u8 __iomem *dst1, u32 fgcolor,
- u32 bgcolor,
- u32 start_index,
- u32 pitch_index)
-{
- u32 shift, color = 0, bpp = p->var.bits_per_pixel;
- u32 __iomem *dst, *dst2;
- u32 val, pitch = p->fix.line_length;
- u32 null_bits = 32 - bpp;
- u32 spitch = (image->width+7)/8;
- const u8 *src = image->data, *s;
- u32 i, j, l;
- u32 bswapmask = fb_compute_bswapmask(p);
-
- dst2 = (u32 __iomem *) dst1;
- fgcolor <<= FB_LEFT_POS(p, bpp);
- bgcolor <<= FB_LEFT_POS(p, bpp);
-
- for (i = image->height; i--; ) {
- shift = val = 0;
- l = 8;
- j = image->width;
- dst = (u32 __iomem *) dst1;
- s = src;
-
- /* write leading bits */
- if (start_index) {
- u32 start_mask = ~fb_shifted_pixels_mask_u32(p,
- start_index, bswapmask);
- val = FB_READL(dst) & start_mask;
- shift = start_index;
- }
-
- while (j--) {
- l--;
- color = (*s & (1 << l)) ? fgcolor : bgcolor;
- val |= FB_SHIFT_HIGH(p, color, shift ^ bswapmask);
-
- /* Did the bitshift spill bits to the next long? */
- if (shift >= null_bits) {
- FB_WRITEL(val, dst++);
- val = (shift == null_bits) ? 0 :
- FB_SHIFT_LOW(p, color, 32 - shift);
- }
- shift += bpp;
- shift &= (32 - 1);
- if (!l) { l = 8; s++; }
- }
-
- /* write trailing bits */
- if (shift) {
- u32 end_mask = fb_shifted_pixels_mask_u32(p, shift,
- bswapmask);
-
- FB_WRITEL((FB_READL(dst) & end_mask) | val, dst);
- }
-
- dst1 += pitch;
- src += spitch;
- if (pitch_index) {
- dst2 += pitch;
- dst1 = (u8 __iomem *)((long __force)dst2 & ~(sizeof(u32) - 1));
- start_index += pitch_index;
- start_index &= 32 - 1;
- }
-
- }
-}
-
-/*
- * fast_imageblit - optimized monochrome color expansion
- *
- * Only if: bits_per_pixel == 8, 16, or 32
- * image->width is divisible by pixel/dword (ppw);
- * fix->line_legth is divisible by 4;
- * beginning and end of a scanline is dword aligned
- */
-static inline void fast_imageblit(const struct fb_image *image, struct fb_info *p,
- u8 __iomem *dst1, u32 fgcolor,
- u32 bgcolor)
-{
- u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
- u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
- u32 bit_mask, eorx, shift;
- const char *s = image->data, *src;
- u32 __iomem *dst;
- const u32 *tab = NULL;
- size_t tablen;
- u32 colortab[16];
- int i, j, k;
-
- switch (bpp) {
- case 8:
- tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
- tablen = 16;
- break;
- case 16:
- tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
- tablen = 4;
- break;
- case 32:
- tab = cfb_tab32;
- tablen = 2;
- break;
- default:
- return;
- }
-
- for (i = ppw-1; i--; ) {
- fgx <<= bpp;
- bgx <<= bpp;
- fgx |= fgcolor;
- bgx |= bgcolor;
- }
-
- bit_mask = (1 << ppw) - 1;
- eorx = fgx ^ bgx;
- k = image->width/ppw;
-
- for (i = 0; i < tablen; ++i)
- colortab[i] = (tab[i] & eorx) ^ bgx;
-
- for (i = image->height; i--; ) {
- dst = (u32 __iomem *)dst1;
- shift = 8;
- src = s;
-
- /*
- * Manually unroll the per-line copying loop for better
- * performance. This works until we processed the last
- * completely filled source byte (inclusive).
- */
- switch (ppw) {
- case 4: /* 8 bpp */
- for (j = k; j >= 2; j -= 2, ++src) {
- FB_WRITEL(colortab[(*src >> 4) & bit_mask], dst++);
- FB_WRITEL(colortab[(*src >> 0) & bit_mask], dst++);
- }
- break;
- case 2: /* 16 bpp */
- for (j = k; j >= 4; j -= 4, ++src) {
- FB_WRITEL(colortab[(*src >> 6) & bit_mask], dst++);
- FB_WRITEL(colortab[(*src >> 4) & bit_mask], dst++);
- FB_WRITEL(colortab[(*src >> 2) & bit_mask], dst++);
- FB_WRITEL(colortab[(*src >> 0) & bit_mask], dst++);
- }
- break;
- case 1: /* 32 bpp */
- for (j = k; j >= 8; j -= 8, ++src) {
- FB_WRITEL(colortab[(*src >> 7) & bit_mask], dst++);
- FB_WRITEL(colortab[(*src >> 6) & bit_mask], dst++);
- FB_WRITEL(colortab[(*src >> 5) & bit_mask], dst++);
- FB_WRITEL(colortab[(*src >> 4) & bit_mask], dst++);
- FB_WRITEL(colortab[(*src >> 3) & bit_mask], dst++);
- FB_WRITEL(colortab[(*src >> 2) & bit_mask], dst++);
- FB_WRITEL(colortab[(*src >> 1) & bit_mask], dst++);
- FB_WRITEL(colortab[(*src >> 0) & bit_mask], dst++);
- }
- break;
- }
-
- /*
- * For image widths that are not a multiple of 8, there
- * are trailing pixels left on the current line. Print
- * them as well.
- */
- for (; j--; ) {
- shift -= ppw;
- FB_WRITEL(colortab[(*src >> shift) & bit_mask], dst++);
- if (!shift) {
- shift = 8;
- ++src;
- }
- }
-
- dst1 += p->fix.line_length;
- s += spitch;
- }
-}
+#include "cfbmem.h"
+#include "fb_imageblit.h"
void cfb_imageblit(struct fb_info *p, const struct fb_image *image)
{
- u32 fgcolor, bgcolor, start_index, bitstart, pitch_index = 0;
- u32 bpl = sizeof(u32), bpp = p->var.bits_per_pixel;
- u32 width = image->width;
- u32 dx = image->dx, dy = image->dy;
- u8 __iomem *dst1;
-
if (p->state != FBINFO_STATE_RUNNING)
return;
if (p->flags & FBINFO_VIRTFB)
- fb_warn_once(p, "Framebuffer is not in I/O address space.");
-
- bitstart = (dy * p->fix.line_length * 8) + (dx * bpp);
- start_index = bitstart & (32 - 1);
- pitch_index = (p->fix.line_length & (bpl - 1)) * 8;
-
- bitstart /= 8;
- bitstart &= ~(bpl - 1);
- dst1 = p->screen_base + bitstart;
+ fb_warn_once(p, "%s: framebuffer is not in I/O address space.\n", __func__);
if (p->fbops->fb_sync)
p->fbops->fb_sync(p);
- if (image->depth == 1) {
- if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
- p->fix.visual == FB_VISUAL_DIRECTCOLOR) {
- fgcolor = ((u32*)(p->pseudo_palette))[image->fg_color];
- bgcolor = ((u32*)(p->pseudo_palette))[image->bg_color];
- } else {
- fgcolor = image->fg_color;
- bgcolor = image->bg_color;
- }
-
- if (32 % bpp == 0 && !start_index && !pitch_index &&
- ((width & (32/bpp-1)) == 0) &&
- bpp >= 8 && bpp <= 32)
- fast_imageblit(image, p, dst1, fgcolor, bgcolor);
- else
- slow_imageblit(image, p, dst1, fgcolor, bgcolor,
- start_index, pitch_index);
- } else
- color_imageblit(image, p, dst1, start_index, pitch_index);
+ fb_imageblit(p, image);
}
-
EXPORT_SYMBOL(cfb_imageblit);
-MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
-MODULE_DESCRIPTION("Generic software accelerated imaging drawing");
+MODULE_AUTHOR("Zsolt Kajtar <soci@c64.rulez.org>");
+MODULE_DESCRIPTION("I/O memory packed pixel framebuffer image draw");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/video/fbdev/core/cfbmem.h b/drivers/video/fbdev/core/cfbmem.h
new file mode 100644
index 000000000000..ce2f5f751ea1
--- /dev/null
+++ b/drivers/video/fbdev/core/cfbmem.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * I/O memory framebuffer access for drawing routines
+ *
+ * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
+ */
+
+/* keeps track of a bit address in framebuffer memory */
+struct fb_address {
+ void __iomem *address;
+ int bits;
+};
+
+/* initialize the bit address pointer to the beginning of the frame buffer */
+static inline struct fb_address fb_address_init(struct fb_info *p)
+{
+ void __iomem *base = p->screen_base;
+ struct fb_address ptr;
+
+ ptr.address = PTR_ALIGN_DOWN(base, BITS_PER_LONG / BITS_PER_BYTE);
+ ptr.bits = (base - ptr.address) * BITS_PER_BYTE;
+ return ptr;
+}
+
+/* framebuffer write access */
+static inline void fb_write_offset(unsigned long val, int offset, const struct fb_address *dst)
+{
+#if BITS_PER_LONG == 32
+ fb_writel(val, dst->address + offset * (BITS_PER_LONG / BITS_PER_BYTE));
+#else
+ fb_writeq(val, dst->address + offset * (BITS_PER_LONG / BITS_PER_BYTE));
+#endif
+}
+
+/* framebuffer read access */
+static inline unsigned long fb_read_offset(int offset, const struct fb_address *src)
+{
+#if BITS_PER_LONG == 32
+ return fb_readl(src->address + offset * (BITS_PER_LONG / BITS_PER_BYTE));
+#else
+ return fb_readq(src->address + offset * (BITS_PER_LONG / BITS_PER_BYTE));
+#endif
+}
diff --git a/drivers/video/fbdev/core/fb_copyarea.h b/drivers/video/fbdev/core/fb_copyarea.h
new file mode 100644
index 000000000000..53f1d5385c6e
--- /dev/null
+++ b/drivers/video/fbdev/core/fb_copyarea.h
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Generic bit area copy and twister engine for packed pixel framebuffers
+ *
+ * Rewritten by:
+ * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
+ *
+ * Based on previous work of:
+ * Copyright (C) 1999-2005 James Simmons <jsimmons@www.infradead.org>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ * Antonino Daplas <adaplas@hotpop.com>
+ * Geert Uytterhoeven
+ * and others
+ *
+ * NOTES:
+ *
+ * Handles native and foreign byte order on both endians, standard and
+ * reverse pixel order in a byte (<8 BPP), word length of 32/64 bits,
+ * bits per pixel from 1 to the word length. Handles line lengths at byte
+ * granularity while maintaining aligned accesses.
+ *
+ * Optimized routines for word aligned copying and byte aligned copying
+ * on reverse pixel framebuffers.
+ */
+#include "fb_draw.h"
+
+/* used when no reversing is necessary */
+static inline unsigned long fb_no_reverse(unsigned long val, struct fb_reverse reverse)
+{
+ return val;
+}
+
+/* modifies the masked area in a word */
+static inline void fb_copy_offset_masked(unsigned long mask, int offset,
+ const struct fb_address *dst,
+ const struct fb_address *src)
+{
+ fb_modify_offset(fb_read_offset(offset, src), mask, offset, dst);
+}
+
+/* copies the whole word */
+static inline void fb_copy_offset(int offset, const struct fb_address *dst,
+ const struct fb_address *src)
+{
+ fb_write_offset(fb_read_offset(offset, src), offset, dst);
+}
+
+/* forward aligned copy */
+static inline void fb_copy_aligned_fwd(const struct fb_address *dst,
+ const struct fb_address *src,
+ int end, struct fb_reverse reverse)
+{
+ unsigned long first, last;
+
+ first = fb_pixel_mask(dst->bits, reverse);
+ last = ~fb_pixel_mask(end & (BITS_PER_LONG-1), reverse);
+
+ /* Same alignment for source and dest */
+ if (end <= BITS_PER_LONG) {
+ /* Single word */
+ last = last ? (last & first) : first;
+
+ /* Trailing bits */
+ if (last == ~0UL)
+ fb_copy_offset(0, dst, src);
+ else
+ fb_copy_offset_masked(last, 0, dst, src);
+ } else {
+ /* Multiple destination words */
+ int offset = first != ~0UL;
+
+ /* Leading bits */
+ if (offset)
+ fb_copy_offset_masked(first, 0, dst, src);
+
+ /* Main chunk */
+ end /= BITS_PER_LONG;
+ while (offset + 4 <= end) {
+ fb_copy_offset(offset + 0, dst, src);
+ fb_copy_offset(offset + 1, dst, src);
+ fb_copy_offset(offset + 2, dst, src);
+ fb_copy_offset(offset + 3, dst, src);
+ offset += 4;
+ }
+ while (offset < end)
+ fb_copy_offset(offset++, dst, src);
+
+ /* Trailing bits */
+ if (last)
+ fb_copy_offset_masked(last, offset, dst, src);
+ }
+}
+
+/* reverse aligned copy */
+static inline void fb_copy_aligned_rev(const struct fb_address *dst,
+ const struct fb_address *src,
+ int end, struct fb_reverse reverse)
+{
+ unsigned long first, last;
+
+ first = fb_pixel_mask(dst->bits, reverse);
+ last = ~fb_pixel_mask(end & (BITS_PER_LONG-1), reverse);
+
+ if (end <= BITS_PER_LONG) {
+ /* Single word */
+ if (last)
+ first &= last;
+ if (first == ~0UL)
+ fb_copy_offset(0, dst, src);
+ else
+ fb_copy_offset_masked(first, 0, dst, src);
+ } else {
+ /* Multiple destination words */
+ int offset = first != ~0UL;
+
+ /* Trailing bits */
+ end /= BITS_PER_LONG;
+
+ if (last)
+ fb_copy_offset_masked(last, end, dst, src);
+
+ /* Main chunk */
+ while (end >= offset + 4) {
+ fb_copy_offset(end - 1, dst, src);
+ fb_copy_offset(end - 2, dst, src);
+ fb_copy_offset(end - 3, dst, src);
+ fb_copy_offset(end - 4, dst, src);
+ end -= 4;
+ }
+ while (end > offset)
+ fb_copy_offset(--end, dst, src);
+
+ /* Leading bits */
+ if (offset)
+ fb_copy_offset_masked(first, 0, dst, src);
+ }
+}
+
+static inline void fb_copy_aligned(struct fb_address *dst, struct fb_address *src,
+ int width, u32 height, unsigned int bits_per_line,
+ struct fb_reverse reverse, bool rev_copy)
+{
+ if (rev_copy)
+ while (height--) {
+ fb_copy_aligned_rev(dst, src, width + dst->bits, reverse);
+ fb_address_backward(dst, bits_per_line);
+ fb_address_backward(src, bits_per_line);
+ }
+ else
+ while (height--) {
+ fb_copy_aligned_fwd(dst, src, width + dst->bits, reverse);
+ fb_address_forward(dst, bits_per_line);
+ fb_address_forward(src, bits_per_line);
+ }
+}
+
+static __always_inline void fb_copy_fwd(const struct fb_address *dst,
+ const struct fb_address *src, int width,
+ unsigned long (*reorder)(unsigned long val,
+ struct fb_reverse reverse),
+ struct fb_reverse reverse)
+{
+ unsigned long first, last;
+ unsigned long d0, d1;
+ int end = dst->bits + width;
+ int shift, left, right;
+
+ first = fb_pixel_mask(dst->bits, reverse);
+ last = ~fb_pixel_mask(end & (BITS_PER_LONG-1), reverse);
+
+ shift = dst->bits - src->bits;
+ right = shift & (BITS_PER_LONG - 1);
+ left = -shift & (BITS_PER_LONG - 1);
+
+ if (end <= BITS_PER_LONG) {
+ /* Single destination word */
+ last = last ? (last & first) : first;
+ if (shift < 0) {
+ d0 = fb_left(reorder(fb_read_offset(-1, src), reverse), left);
+ if (src->bits + width > BITS_PER_LONG)
+ d0 |= fb_right(reorder(fb_read_offset(0, src), reverse), right);
+
+ if (last == ~0UL)
+ fb_write_offset(reorder(d0, reverse), 0, dst);
+ else
+ fb_modify_offset(reorder(d0, reverse), last, 0, dst);
+ } else {
+ d0 = fb_right(reorder(fb_read_offset(0, src), reverse), right);
+ fb_modify_offset(reorder(d0, reverse), last, 0, dst);
+ }
+ } else {
+ /* Multiple destination words */
+ int offset = first != ~0UL;
+
+ /* Leading bits */
+ if (shift < 0)
+ d0 = reorder(fb_read_offset(-1, src), reverse);
+ else
+ d0 = 0;
+
+ /* 2 source words */
+ if (offset) {
+ d1 = reorder(fb_read_offset(0, src), reverse);
+ d0 = fb_left(d0, left) | fb_right(d1, right);
+ fb_modify_offset(reorder(d0, reverse), first, 0, dst);
+ d0 = d1;
+ }
+
+ /* Main chunk */
+ end /= BITS_PER_LONG;
+ if (reorder == fb_no_reverse)
+ while (offset + 4 <= end) {
+ d1 = fb_read_offset(offset + 0, src);
+ d0 = fb_left(d0, left) | fb_right(d1, right);
+ fb_write_offset(d0, offset + 0, dst);
+ d0 = d1;
+ d1 = fb_read_offset(offset + 1, src);
+ d0 = fb_left(d0, left) | fb_right(d1, right);
+ fb_write_offset(d0, offset + 1, dst);
+ d0 = d1;
+ d1 = fb_read_offset(offset + 2, src);
+ d0 = fb_left(d0, left) | fb_right(d1, right);
+ fb_write_offset(d0, offset + 2, dst);
+ d0 = d1;
+ d1 = fb_read_offset(offset + 3, src);
+ d0 = fb_left(d0, left) | fb_right(d1, right);
+ fb_write_offset(d0, offset + 3, dst);
+ d0 = d1;
+ offset += 4;
+ }
+
+ while (offset < end) {
+ d1 = reorder(fb_read_offset(offset, src), reverse);
+ d0 = fb_left(d0, left) | fb_right(d1, right);
+ fb_write_offset(reorder(d0, reverse), offset, dst);
+ d0 = d1;
+ offset++;
+ }
+
+ /* Trailing bits */
+ if (last) {
+ d0 = fb_left(d0, left);
+ if (src->bits + width
+ > offset * BITS_PER_LONG + ((shift < 0) ? BITS_PER_LONG : 0))
+ d0 |= fb_right(reorder(fb_read_offset(offset, src), reverse),
+ right);
+ fb_modify_offset(reorder(d0, reverse), last, offset, dst);
+ }
+ }
+}
+
+static __always_inline void fb_copy_rev(const struct fb_address *dst,
+ const struct fb_address *src, int end,
+ unsigned long (*reorder)(unsigned long val,
+ struct fb_reverse reverse),
+ struct fb_reverse reverse)
+{
+ unsigned long first, last;
+ unsigned long d0, d1;
+ int shift, left, right;
+
+ first = fb_pixel_mask(dst->bits, reverse);
+ last = ~fb_pixel_mask(end & (BITS_PER_LONG-1), reverse);
+
+ shift = dst->bits - src->bits;
+ right = shift & (BITS_PER_LONG-1);
+ left = -shift & (BITS_PER_LONG-1);
+
+ if (end <= BITS_PER_LONG) {
+ /* Single destination word */
+ if (last)
+ first &= last;
+
+ if (shift > 0) {
+ d0 = fb_right(reorder(fb_read_offset(1, src), reverse), right);
+ if (src->bits > left)
+ d0 |= fb_left(reorder(fb_read_offset(0, src), reverse), left);
+ fb_modify_offset(reorder(d0, reverse), first, 0, dst);
+ } else {
+ d0 = fb_left(reorder(fb_read_offset(0, src), reverse), left);
+ if (src->bits + end - dst->bits > BITS_PER_LONG)
+ d0 |= fb_right(reorder(fb_read_offset(1, src), reverse), right);
+ if (first == ~0UL)
+ fb_write_offset(reorder(d0, reverse), 0, dst);
+ else
+ fb_modify_offset(reorder(d0, reverse), first, 0, dst);
+ }
+ } else {
+ /* Multiple destination words */
+ int offset = first != ~0UL;
+
+ end /= BITS_PER_LONG;
+
+ /* 2 source words */
+ if (fb_right(~0UL, right) & last)
+ d0 = fb_right(reorder(fb_read_offset(end + 1, src), reverse), right);
+ else
+ d0 = 0;
+
+ /* Trailing bits */
+ d1 = reorder(fb_read_offset(end, src), reverse);
+ if (last)
+ fb_modify_offset(reorder(fb_left(d1, left) | d0, reverse),
+ last, end, dst);
+ d0 = d1;
+
+ /* Main chunk */
+ if (reorder == fb_no_reverse)
+ while (end >= offset + 4) {
+ d1 = fb_read_offset(end - 1, src);
+ d0 = fb_left(d1, left) | fb_right(d0, right);
+ fb_write_offset(d0, end - 1, dst);
+ d0 = d1;
+ d1 = fb_read_offset(end - 2, src);
+ d0 = fb_left(d1, left) | fb_right(d0, right);
+ fb_write_offset(d0, end - 2, dst);
+ d0 = d1;
+ d1 = fb_read_offset(end - 3, src);
+ d0 = fb_left(d1, left) | fb_right(d0, right);
+ fb_write_offset(d0, end - 3, dst);
+ d0 = d1;
+ d1 = fb_read_offset(end - 4, src);
+ d0 = fb_left(d1, left) | fb_right(d0, right);
+ fb_write_offset(d0, end - 4, dst);
+ d0 = d1;
+ end -= 4;
+ }
+
+ while (end > offset) {
+ end--;
+ d1 = reorder(fb_read_offset(end, src), reverse);
+ d0 = fb_left(d1, left) | fb_right(d0, right);
+ fb_write_offset(reorder(d0, reverse), end, dst);
+ d0 = d1;
+ }
+
+ /* Leading bits */
+ if (offset) {
+ d0 = fb_right(d0, right);
+ if (src->bits > left)
+ d0 |= fb_left(reorder(fb_read_offset(0, src), reverse), left);
+ fb_modify_offset(reorder(d0, reverse), first, 0, dst);
+ }
+ }
+}
+
+static __always_inline void fb_copy(struct fb_address *dst, struct fb_address *src,
+ int width, u32 height, unsigned int bits_per_line,
+ unsigned long (*reorder)(unsigned long val,
+ struct fb_reverse reverse),
+ struct fb_reverse reverse, bool rev_copy)
+{
+ if (rev_copy)
+ while (height--) {
+ int move = src->bits < dst->bits ? -1 : 0;
+
+ fb_address_move_long(src, move);
+ fb_copy_rev(dst, src, width + dst->bits, reorder, reverse);
+ fb_address_backward(dst, bits_per_line);
+ fb_address_backward(src, bits_per_line);
+ fb_address_move_long(src, -move);
+ }
+ else
+ while (height--) {
+ int move = src->bits > dst->bits ? 1 : 0;
+
+ fb_address_move_long(src, move);
+ fb_copy_fwd(dst, src, width, reorder, reverse);
+ fb_address_forward(dst, bits_per_line);
+ fb_address_forward(src, bits_per_line);
+ fb_address_move_long(src, -move);
+ }
+}
+
+static inline void fb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+{
+ int bpp = p->var.bits_per_pixel;
+ u32 dy = area->dy;
+ u32 sy = area->sy;
+ u32 height = area->height;
+ int width = area->width * bpp;
+ unsigned int bits_per_line = BYTES_TO_BITS(p->fix.line_length);
+ struct fb_reverse reverse = fb_reverse_init(p);
+ struct fb_address dst = fb_address_init(p);
+ struct fb_address src = dst;
+ bool rev_copy = (dy > sy) || (dy == sy && area->dx > area->sx);
+
+ if (rev_copy) {
+ dy += height - 1;
+ sy += height - 1;
+ }
+ fb_address_forward(&dst, dy*bits_per_line + area->dx*bpp);
+ fb_address_forward(&src, sy*bits_per_line + area->sx*bpp);
+
+ if (src.bits == dst.bits)
+ fb_copy_aligned(&dst, &src, width, height, bits_per_line, reverse, rev_copy);
+ else if (!reverse.byte && (!reverse.pixel ||
+ !((src.bits ^ dst.bits) & (BITS_PER_BYTE-1)))) {
+ fb_copy(&dst, &src, width, height, bits_per_line,
+ fb_no_reverse, reverse, rev_copy);
+ } else
+ fb_copy(&dst, &src, width, height, bits_per_line,
+ fb_reverse_long, reverse, rev_copy);
+}
diff --git a/drivers/video/fbdev/core/fb_draw.h b/drivers/video/fbdev/core/fb_draw.h
index e0d829873930..8eb13f7b3972 100644
--- a/drivers/video/fbdev/core/fb_draw.h
+++ b/drivers/video/fbdev/core/fb_draw.h
@@ -1,187 +1,163 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Various common functions used by the framebuffer drawing code
+ *
+ * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
+ */
#ifndef _FB_DRAW_H
#define _FB_DRAW_H
-#include <asm/types.h>
-#include <linux/fb.h>
-#include <linux/bug.h>
+/* swap bytes in a long, independent of word size */
+#define swab_long _swab_long(BITS_PER_LONG)
+#define _swab_long(x) __swab_long(x)
+#define __swab_long(x) swab##x
- /*
- * Compose two values, using a bitmask as decision value
- * This is equivalent to (a & mask) | (b & ~mask)
- */
-
-static inline unsigned long
-comp(unsigned long a, unsigned long b, unsigned long mask)
+/* move the address pointer by the number of words */
+static inline void fb_address_move_long(struct fb_address *adr, int offset)
{
- return ((a ^ b) & mask) ^ b;
+ adr->address += offset * (BITS_PER_LONG / BITS_PER_BYTE);
}
- /*
- * Create a pattern with the given pixel's color
- */
+/* move the address pointer forward with the number of bits */
+static inline void fb_address_forward(struct fb_address *adr, unsigned int offset)
+{
+ unsigned int bits = (unsigned int)adr->bits + offset;
+
+ adr->bits = bits & (BITS_PER_LONG - 1u);
+ adr->address += (bits & ~(BITS_PER_LONG - 1u)) / BITS_PER_BYTE;
+}
-#if BITS_PER_LONG == 64
-static inline unsigned long
-pixel_to_pat( u32 bpp, u32 pixel)
+/* move the address pointer backwards with the number of bits */
+static inline void fb_address_backward(struct fb_address *adr, unsigned int offset)
{
- switch (bpp) {
- case 1:
- return 0xfffffffffffffffful*pixel;
- case 2:
- return 0x5555555555555555ul*pixel;
- case 4:
- return 0x1111111111111111ul*pixel;
- case 8:
- return 0x0101010101010101ul*pixel;
- case 12:
- return 0x1001001001001001ul*pixel;
- case 16:
- return 0x0001000100010001ul*pixel;
- case 24:
- return 0x0001000001000001ul*pixel;
- case 32:
- return 0x0000000100000001ul*pixel;
- default:
- WARN(1, "pixel_to_pat(): unsupported pixelformat %d\n", bpp);
- return 0;
- }
+ int bits = adr->bits - (int)offset;
+
+ adr->bits = bits & (BITS_PER_LONG - 1);
+ if (bits < 0)
+ adr->address -= (adr->bits - bits) / BITS_PER_BYTE;
+ else
+ adr->address += (bits - adr->bits) / BITS_PER_BYTE;
}
-#else
-static inline unsigned long
-pixel_to_pat( u32 bpp, u32 pixel)
+
+/* compose pixels based on mask */
+static inline unsigned long fb_comp(unsigned long set, unsigned long unset, unsigned long mask)
{
- switch (bpp) {
- case 1:
- return 0xfffffffful*pixel;
- case 2:
- return 0x55555555ul*pixel;
- case 4:
- return 0x11111111ul*pixel;
- case 8:
- return 0x01010101ul*pixel;
- case 12:
- return 0x01001001ul*pixel;
- case 16:
- return 0x00010001ul*pixel;
- case 24:
- return 0x01000001ul*pixel;
- case 32:
- return 0x00000001ul*pixel;
- default:
- WARN(1, "pixel_to_pat(): unsupported pixelformat %d\n", bpp);
- return 0;
- }
+ return ((set ^ unset) & mask) ^ unset;
}
-#endif
-#ifdef CONFIG_FB_CFB_REV_PIXELS_IN_BYTE
-#if BITS_PER_LONG == 64
-#define REV_PIXELS_MASK1 0x5555555555555555ul
-#define REV_PIXELS_MASK2 0x3333333333333333ul
-#define REV_PIXELS_MASK4 0x0f0f0f0f0f0f0f0ful
-#else
-#define REV_PIXELS_MASK1 0x55555555ul
-#define REV_PIXELS_MASK2 0x33333333ul
-#define REV_PIXELS_MASK4 0x0f0f0f0ful
-#endif
+/* framebuffer read-modify-write access for replacing bits in the mask */
+static inline void fb_modify_offset(unsigned long val, unsigned long mask,
+ int offset, const struct fb_address *dst)
+{
+ fb_write_offset(fb_comp(val, fb_read_offset(offset, dst), mask), offset, dst);
+}
-static inline unsigned long fb_rev_pixels_in_long(unsigned long val,
- u32 bswapmask)
+/*
+ * get current palette, if applicable for visual
+ *
+ * The pseudo color table entries (and colors) are right justified and in the
+ * same byte order as it's expected to be placed into a native ordered
+ * framebuffer memory. What that means:
+ *
+ * Expected bytes in framebuffer memory (in native order):
+ * RR GG BB RR GG BB RR GG BB ...
+ *
+ * Pseudo palette entry on little endian arch:
+ * RR | GG << 8 | BB << 16
+ *
+ * Pseudo palette entry on a big endian arch:
+ * RR << 16 | GG << 8 | BB
+ */
+static inline const u32 *fb_palette(struct fb_info *info)
{
- if (bswapmask & 1)
- val = comp(val >> 1, val << 1, REV_PIXELS_MASK1);
- if (bswapmask & 2)
- val = comp(val >> 2, val << 2, REV_PIXELS_MASK2);
- if (bswapmask & 3)
- val = comp(val >> 4, val << 4, REV_PIXELS_MASK4);
- return val;
+ return (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) ? info->pseudo_palette : NULL;
}
-static inline u32 fb_shifted_pixels_mask_u32(struct fb_info *p, u32 index,
- u32 bswapmask)
+/* move pixels right on screen when framebuffer is in native order */
+static inline unsigned long fb_right(unsigned long value, int index)
{
- u32 mask;
-
- if (!bswapmask) {
- mask = FB_SHIFT_HIGH(p, ~(u32)0, index);
- } else {
- mask = 0xff << FB_LEFT_POS(p, 8);
- mask = FB_SHIFT_LOW(p, mask, index & (bswapmask)) & mask;
- mask = FB_SHIFT_HIGH(p, mask, index & ~(bswapmask));
-#if defined(__i386__) || defined(__x86_64__)
- /* Shift argument is limited to 0 - 31 on x86 based CPU's */
- if(index + bswapmask < 32)
+#ifdef __LITTLE_ENDIAN
+ return value << index;
+#else
+ return value >> index;
#endif
- mask |= FB_SHIFT_HIGH(p, ~(u32)0,
- (index + bswapmask) & ~(bswapmask));
- }
- return mask;
}
-static inline unsigned long fb_shifted_pixels_mask_long(struct fb_info *p,
- u32 index,
- u32 bswapmask)
+/* move pixels left on screen when framebuffer is in native order */
+static inline unsigned long fb_left(unsigned long value, int index)
{
- unsigned long mask;
-
- if (!bswapmask) {
- mask = FB_SHIFT_HIGH(p, ~0UL, index);
- } else {
- mask = 0xff << FB_LEFT_POS(p, 8);
- mask = FB_SHIFT_LOW(p, mask, index & (bswapmask)) & mask;
- mask = FB_SHIFT_HIGH(p, mask, index & ~(bswapmask));
-#if defined(__i386__) || defined(__x86_64__)
- /* Shift argument is limited to 0 - 31 on x86 based CPU's */
- if(index + bswapmask < BITS_PER_LONG)
+#ifdef __LITTLE_ENDIAN
+ return value >> index;
+#else
+ return value << index;
#endif
- mask |= FB_SHIFT_HIGH(p, ~0UL,
- (index + bswapmask) & ~(bswapmask));
- }
- return mask;
}
+/* reversal options */
+struct fb_reverse {
+ bool byte, pixel;
+};
-static inline u32 fb_compute_bswapmask(struct fb_info *info)
+/* reverse bits of each byte in a long */
+static inline unsigned long fb_reverse_bits_long(unsigned long val)
{
- u32 bswapmask = 0;
- unsigned bpp = info->var.bits_per_pixel;
-
- if ((bpp < 8) && (info->var.nonstd & FB_NONSTD_REV_PIX_IN_B)) {
- /*
- * Reversed order of pixel layout in bytes
- * works only for 1, 2 and 4 bpp
- */
- bswapmask = 7 - bpp + 1;
- }
- return bswapmask;
+#if defined(CONFIG_HAVE_ARCH_BITREVERSE) && BITS_PER_LONG == 32
+ return bitrev8x4(val);
+#else
+ val = fb_comp(val >> 1, val << 1, ~0UL / 3);
+ val = fb_comp(val >> 2, val << 2, ~0UL / 5);
+ return fb_comp(val >> 4, val << 4, ~0UL / 17);
+#endif
}
-#else /* CONFIG_FB_CFB_REV_PIXELS_IN_BYTE */
-
-static inline unsigned long fb_rev_pixels_in_long(unsigned long val,
- u32 bswapmask)
+/* apply byte and bit reversals as necessary */
+static inline unsigned long fb_reverse_long(unsigned long val,
+ struct fb_reverse reverse)
{
- return val;
+ if (reverse.pixel)
+ val = fb_reverse_bits_long(val);
+ return reverse.byte ? swab_long(val) : val;
}
-#define fb_shifted_pixels_mask_u32(p, i, b) FB_SHIFT_HIGH((p), ~(u32)0, (i))
-#define fb_shifted_pixels_mask_long(p, i, b) FB_SHIFT_HIGH((p), ~0UL, (i))
-#define fb_compute_bswapmask(...) 0
-
-#endif /* CONFIG_FB_CFB_REV_PIXELS_IN_BYTE */
-
-#define cpu_to_le_long _cpu_to_le_long(BITS_PER_LONG)
-#define _cpu_to_le_long(x) __cpu_to_le_long(x)
-#define __cpu_to_le_long(x) cpu_to_le##x
+/* calculate a pixel mask for the given reversal */
+static inline unsigned long fb_pixel_mask(int index, struct fb_reverse reverse)
+{
+#ifdef FB_REV_PIXELS_IN_BYTE
+ if (reverse.byte)
+ return reverse.pixel ? fb_left(~0UL, index) : swab_long(fb_right(~0UL, index));
+ else
+ return reverse.pixel ? swab_long(fb_left(~0UL, index)) : fb_right(~0UL, index);
+#else
+ return reverse.byte ? swab_long(fb_right(~0UL, index)) : fb_right(~0UL, index);
+#endif
+}
-#define le_long_to_cpu _le_long_to_cpu(BITS_PER_LONG)
-#define _le_long_to_cpu(x) __le_long_to_cpu(x)
-#define __le_long_to_cpu(x) le##x##_to_cpu
-static inline unsigned long rolx(unsigned long word, unsigned int shift, unsigned int x)
+/*
+ * initialise reversals based on info
+ *
+ * Normally the first byte is the low byte on little endian and in the high
+ * on big endian. If it's the other way around then that's reverse byte order.
+ *
+ * Normally the first pixel is the LSB on little endian and the MSB on big
+ * endian. If that's not the case that's reverse pixel order.
+ */
+static inline struct fb_reverse fb_reverse_init(struct fb_info *info)
{
- return (word << shift) | (word >> (x - shift));
+ struct fb_reverse reverse;
+#ifdef __LITTLE_ENDIAN
+ reverse.byte = fb_be_math(info) != 0;
+#else
+ reverse.byte = fb_be_math(info) == 0;
+#endif
+#ifdef FB_REV_PIXELS_IN_BYTE
+ reverse.pixel = info->var.bits_per_pixel < BITS_PER_BYTE
+ && (info->var.nonstd & FB_NONSTD_REV_PIX_IN_B);
+#else
+ reverse.pixel = false;
+#endif
+ return reverse;
}
#endif /* FB_DRAW_H */
diff --git a/drivers/video/fbdev/core/fb_fillrect.h b/drivers/video/fbdev/core/fb_fillrect.h
new file mode 100644
index 000000000000..66042e534de7
--- /dev/null
+++ b/drivers/video/fbdev/core/fb_fillrect.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Generic bit area filler and twister engine for packed pixel framebuffers
+ *
+ * Rewritten by:
+ * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
+ *
+ * Based on earlier work of:
+ * Copyright (C) 2000 James Simmons (jsimmons@linux-fbdev.org)
+ * Michal Januszewski <spock@gentoo.org>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ * Antonino A. Daplas <adaplas@gmail.com>
+ * Geert Uytterhoeven
+ * and others
+ *
+ * NOTES:
+ *
+ * Handles native and foreign byte order on both endians, standard and
+ * reverse pixel order in a byte (<8 BPP), word length of 32/64 bits,
+ * bits per pixel from 1 to the word length. Handles line lengths at byte
+ * granularity while maintaining aligned accesses.
+ *
+ * Optimized path for power of two bits per pixel modes.
+ */
+#include "fb_draw.h"
+
+/* inverts bits at a given offset */
+static inline void fb_invert_offset(unsigned long pat, int offset, const struct fb_address *dst)
+{
+ fb_write_offset(fb_read_offset(offset, dst) ^ pat, offset, dst);
+}
+
+/* state for pattern generator and whether swapping is necessary */
+struct fb_pattern {
+ unsigned long pixels;
+ int left, right;
+ struct fb_reverse reverse;
+};
+
+/* used to get the pattern in native order */
+static unsigned long fb_pattern_get(struct fb_pattern *pattern)
+{
+ return pattern->pixels;
+}
+
+/* used to get the pattern in reverse order */
+static unsigned long fb_pattern_get_reverse(struct fb_pattern *pattern)
+{
+ return swab_long(pattern->pixels);
+}
+
+/* next static pattern */
+static void fb_pattern_static(struct fb_pattern *pattern)
+{
+ /* nothing to do */
+}
+
+/* next rotating pattern */
+static void fb_pattern_rotate(struct fb_pattern *pattern)
+{
+ pattern->pixels = fb_left(pattern->pixels, pattern->left)
+ | fb_right(pattern->pixels, pattern->right);
+}
+
+#define FB_PAT(i) (((1UL<<(BITS_PER_LONG-1)/(i)*(i))/((1<<(i))-1)<<(i))|1)
+
+/* create the filling pattern from a given color */
+static unsigned long pixel_to_pat(int bpp, u32 color)
+{
+ static const unsigned long mulconst[BITS_PER_LONG/4] = {
+ 0, ~0UL, FB_PAT(2), FB_PAT(3),
+ FB_PAT(4), FB_PAT(5), FB_PAT(6), FB_PAT(7),
+#if BITS_PER_LONG == 64
+ FB_PAT(8), FB_PAT(9), FB_PAT(10), FB_PAT(11),
+ FB_PAT(12), FB_PAT(13), FB_PAT(14), FB_PAT(15),
+#endif
+ };
+ unsigned long pattern;
+
+ switch (bpp) {
+ case 0 ... BITS_PER_LONG/4-1:
+ pattern = mulconst[bpp] * color;
+ break;
+ case BITS_PER_LONG/4 ... BITS_PER_LONG/2-1:
+ pattern = color;
+ pattern = pattern | pattern << bpp;
+ pattern = pattern | pattern << bpp*2;
+ break;
+ case BITS_PER_LONG/2 ... BITS_PER_LONG-1:
+ pattern = color;
+ pattern = pattern | pattern << bpp;
+ break;
+ default:
+ pattern = color;
+ break;
+ }
+#ifndef __LITTLE_ENDIAN
+ pattern <<= (BITS_PER_LONG % bpp);
+ pattern |= pattern >> bpp;
+#endif
+ return pattern;
+}
+
+/* overwrite bits according to a pattern in a line */
+static __always_inline void bitfill(const struct fb_address *dst,
+ struct fb_pattern *pattern,
+ unsigned long (*get)(struct fb_pattern *pattern),
+ void (*rotate)(struct fb_pattern *pattern),
+ int end)
+{
+ unsigned long first, last;
+
+ end += dst->bits;
+ first = fb_pixel_mask(dst->bits, pattern->reverse);
+ last = ~fb_pixel_mask(end & (BITS_PER_LONG-1), pattern->reverse);
+
+ if (end <= BITS_PER_LONG) {
+ last = last ? (last & first) : first;
+ first = get(pattern);
+ if (last == ~0UL)
+ fb_write_offset(first, 0, dst);
+ else if (last)
+ fb_modify_offset(first, last, 0, dst);
+ } else {
+ int offset = first != ~0UL;
+
+ if (offset) {
+ fb_modify_offset(get(pattern), first, 0, dst);
+ rotate(pattern);
+ }
+ end /= BITS_PER_LONG;
+ for (; offset + 4 <= end; offset += 4) {
+ fb_write_offset(get(pattern), offset + 0, dst);
+ rotate(pattern);
+ fb_write_offset(get(pattern), offset + 1, dst);
+ rotate(pattern);
+ fb_write_offset(get(pattern), offset + 2, dst);
+ rotate(pattern);
+ fb_write_offset(get(pattern), offset + 3, dst);
+ rotate(pattern);
+ }
+ while (offset < end) {
+ fb_write_offset(get(pattern), offset++, dst);
+ rotate(pattern);
+ }
+
+ if (last)
+ fb_modify_offset(get(pattern), last, offset, dst);
+ }
+}
+
+/* inverts bits according to a pattern in a line */
+static __always_inline void bitinvert(const struct fb_address *dst,
+ struct fb_pattern *pattern,
+ unsigned long (*get)(struct fb_pattern *pattern),
+ void (*rotate)(struct fb_pattern *pattern),
+ int end)
+{
+ unsigned long first, last;
+ int offset;
+
+ end += dst->bits;
+ first = fb_pixel_mask(dst->bits, pattern->reverse);
+ last = ~fb_pixel_mask(end & (BITS_PER_LONG-1), pattern->reverse);
+
+ if (end <= BITS_PER_LONG) {
+ offset = 0;
+ last = last ? (last & first) : first;
+ } else {
+ offset = first != ~0UL;
+
+ if (offset) {
+ first &= get(pattern);
+ if (first)
+ fb_invert_offset(first, 0, dst);
+ rotate(pattern);
+ }
+
+ end /= BITS_PER_LONG;
+ for (; offset + 4 <= end; offset += 4) {
+ fb_invert_offset(get(pattern), offset + 0, dst);
+ rotate(pattern);
+ fb_invert_offset(get(pattern), offset + 1, dst);
+ rotate(pattern);
+ fb_invert_offset(get(pattern), offset + 2, dst);
+ rotate(pattern);
+ fb_invert_offset(get(pattern), offset + 3, dst);
+ rotate(pattern);
+ }
+ while (offset < end) {
+ fb_invert_offset(get(pattern), offset++, dst);
+ rotate(pattern);
+ }
+ }
+
+ last &= get(pattern);
+ if (last)
+ fb_invert_offset(last, offset, dst);
+}
+
+/* pattern doesn't change. 1, 2, 4, 8, 16, 32, 64 bpp */
+static inline void fb_fillrect_static(const struct fb_fillrect *rect, int bpp,
+ struct fb_address *dst, struct fb_pattern *pattern,
+ unsigned int bits_per_line)
+{
+ u32 height = rect->height;
+ int width = rect->width * bpp;
+
+ if (bpp > 8 && pattern->reverse.byte)
+ pattern->pixels = swab_long(pattern->pixels);
+
+ if (rect->rop == ROP_XOR)
+ while (height--) {
+ bitinvert(dst, pattern, fb_pattern_get, fb_pattern_static, width);
+ fb_address_forward(dst, bits_per_line);
+ }
+ else
+ while (height--) {
+ bitfill(dst, pattern, fb_pattern_get, fb_pattern_static, width);
+ fb_address_forward(dst, bits_per_line);
+ }
+}
+
+/* rotate pattern to the correct position */
+static inline unsigned long fb_rotate(unsigned long pattern, int shift, int bpp)
+{
+ shift %= bpp;
+ return fb_right(pattern, shift) | fb_left(pattern, bpp - shift);
+}
+
+/* rotating pattern, for example 24 bpp */
+static __always_inline void fb_fillrect_rotating(const struct fb_fillrect *rect,
+ int bpp, struct fb_address *dst,
+ struct fb_pattern *pattern,
+ unsigned long (*get)(struct fb_pattern *pattern),
+ unsigned int bits_per_line)
+{
+ unsigned long pat = pattern->pixels;
+ u32 height = rect->height;
+ int width = rect->width * bpp;
+
+ if (rect->rop == ROP_XOR)
+ while (height--) {
+ pattern->pixels = fb_rotate(pat, dst->bits, bpp);
+ bitinvert(dst, pattern, get, fb_pattern_rotate, width);
+ fb_address_forward(dst, bits_per_line);
+ }
+ else
+ while (height--) {
+ pattern->pixels = fb_rotate(pat, dst->bits, bpp);
+ bitfill(dst, pattern, get, fb_pattern_rotate, width);
+ fb_address_forward(dst, bits_per_line);
+ }
+}
+
+static inline void fb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
+{
+ int bpp = p->var.bits_per_pixel;
+ unsigned int bits_per_line = BYTES_TO_BITS(p->fix.line_length);
+ const u32 *palette = fb_palette(p);
+ struct fb_address dst = fb_address_init(p);
+ struct fb_pattern pattern;
+
+ fb_address_forward(&dst, rect->dy * bits_per_line + rect->dx * bpp);
+
+ pattern.pixels = pixel_to_pat(bpp, palette ? palette[rect->color] : rect->color);
+ pattern.reverse = fb_reverse_init(p);
+ pattern.left = BITS_PER_LONG % bpp;
+ if (pattern.left) {
+ pattern.right = bpp - pattern.left;
+ if (pattern.reverse.byte)
+ fb_fillrect_rotating(rect, bpp, &dst, &pattern,
+ fb_pattern_get_reverse, bits_per_line);
+ else
+ fb_fillrect_rotating(rect, bpp, &dst, &pattern,
+ fb_pattern_get, bits_per_line);
+ } else
+ fb_fillrect_static(rect, bpp, &dst, &pattern, bits_per_line);
+}
diff --git a/drivers/video/fbdev/core/fb_imageblit.h b/drivers/video/fbdev/core/fb_imageblit.h
new file mode 100644
index 000000000000..3b2bb4946505
--- /dev/null
+++ b/drivers/video/fbdev/core/fb_imageblit.h
@@ -0,0 +1,495 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Generic bitmap / 8 bpp image bitstreamer for packed pixel framebuffers
+ *
+ * Rewritten by:
+ * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
+ *
+ * Based on previous work of:
+ * Copyright (C) June 1999 James Simmons
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ * Antonino A. Daplas <adaplas@gmail.com>
+ * and others
+ *
+ * NOTES:
+ *
+ * Handles native and foreign byte order on both endians, standard and
+ * reverse pixel order in a byte (<8 BPP), word length of 32/64 bits,
+ * bits per pixel from 1 to the word length. Handles line lengths at byte
+ * granularity while maintaining aligned accesses.
+ *
+ * Optimized routines for word aligned 1, 2, 4 pixel per word for high
+ * bpp modes and 4 pixel at a time operation for low bpp.
+ *
+ * The color image is expected to be one byte per pixel, and values should
+ * not exceed the bitdepth or the pseudo palette (if used).
+ */
+#include "fb_draw.h"
+
+/* bitmap image iterator, one pixel at a time */
+struct fb_bitmap_iter {
+ const u8 *data;
+ unsigned long colors[2];
+ int width, i;
+};
+
+static bool fb_bitmap_image(void *iterator, unsigned long *pixels, int *bits)
+{
+ struct fb_bitmap_iter *iter = iterator;
+
+ if (iter->i < iter->width) {
+ int bit = ~iter->i & (BITS_PER_BYTE-1);
+ int byte = iter->i++ / BITS_PER_BYTE;
+
+ *pixels = iter->colors[(iter->data[byte] >> bit) & 1];
+ return true;
+ }
+ iter->data += BITS_TO_BYTES(iter->width);
+ iter->i = 0;
+ return false;
+}
+
+/* color image iterator, one pixel at a time */
+struct fb_color_iter {
+ const u8 *data;
+ const u32 *palette;
+ struct fb_reverse reverse;
+ int shift;
+ int width, i;
+};
+
+static bool fb_color_image(void *iterator, unsigned long *pixels, int *bits)
+{
+ struct fb_color_iter *iter = iterator;
+
+ if (iter->i < iter->width) {
+ unsigned long color = iter->data[iter->i++];
+
+ if (iter->palette)
+ color = iter->palette[color];
+ *pixels = color << iter->shift;
+ if (iter->reverse.pixel)
+ *pixels = fb_reverse_bits_long(*pixels);
+ return true;
+ }
+ iter->data += iter->width;
+ iter->i = 0;
+ return false;
+}
+
+/* bitmap image iterator, 4 pixels at a time */
+struct fb_bitmap4x_iter {
+ const u8 *data;
+ u32 fgxcolor, bgcolor;
+ int width, i;
+ const u32 *expand;
+ int bpp;
+ bool top;
+};
+
+static bool fb_bitmap4x_image(void *iterator, unsigned long *pixels, int *bits)
+{
+ struct fb_bitmap4x_iter *iter = iterator;
+ u8 data;
+
+ if (iter->i >= BITS_PER_BYTE/2) {
+ iter->i -= BITS_PER_BYTE/2;
+ iter->top = !iter->top;
+ if (iter->top)
+ data = *iter->data++ >> BITS_PER_BYTE/2;
+ else
+ data = iter->data[-1] & ((1 << BITS_PER_BYTE/2)-1);
+ } else if (iter->i != 0) {
+ *bits = iter->bpp * iter->i;
+ if (iter->top)
+ data = iter->data[-1] & ((1 << BITS_PER_BYTE/2)-1);
+ else
+ data = *iter->data++ >> BITS_PER_BYTE/2;
+#ifndef __LITTLE_ENDIAN
+ data >>= BITS_PER_BYTE/2 - iter->i;
+#endif
+ iter->i = 0;
+ } else {
+ *bits = iter->bpp * BITS_PER_BYTE/2;
+ iter->i = iter->width;
+ iter->top = false;
+ return false;
+ }
+ *pixels = (iter->fgxcolor & iter->expand[data]) ^ iter->bgcolor;
+#ifndef __LITTLE_ENDIAN
+ *pixels <<= BITS_PER_LONG - *bits;
+#endif
+ return true;
+}
+
+/* draw a line a group of pixels at a time */
+static __always_inline void fb_bitblit(bool (*get)(void *iter, unsigned long *pixels,
+ int *bits),
+ void *iter, int bits, struct fb_address *dst,
+ struct fb_reverse reverse)
+{
+ unsigned long pixels, val, mask, old;
+ int offset = 0;
+ int shift = dst->bits;
+
+ if (shift) {
+ old = fb_read_offset(0, dst);
+ val = fb_reverse_long(old, reverse);
+ val &= ~fb_right(~0UL, shift);
+ } else {
+ old = 0;
+ val = 0;
+ }
+
+ while (get(iter, &pixels, &bits)) {
+ val |= fb_right(pixels, shift);
+ shift += bits;
+
+ if (shift < BITS_PER_LONG)
+ continue;
+
+ val = fb_reverse_long(val, reverse);
+ fb_write_offset(val, offset++, dst);
+ shift &= BITS_PER_LONG - 1;
+ val = !shift ? 0 : fb_left(pixels, bits - shift);
+ }
+
+ if (shift) {
+ mask = ~fb_pixel_mask(shift, reverse);
+ val = fb_reverse_long(val, reverse);
+ if (offset || !dst->bits)
+ old = fb_read_offset(offset, dst);
+ fb_write_offset(fb_comp(val, old, mask), offset, dst);
+ }
+}
+
+/* draw a color image a pixel at a time */
+static inline void fb_color_imageblit(const struct fb_image *image, struct fb_address *dst,
+ unsigned int bits_per_line, const u32 *palette, int bpp,
+ struct fb_reverse reverse)
+{
+ struct fb_color_iter iter;
+ u32 height;
+
+ iter.data = (const u8 *)image->data;
+ iter.palette = palette;
+ iter.reverse = reverse;
+#ifdef __LITTLE_ENDIAN
+ if (reverse.pixel)
+ iter.shift = BITS_PER_BYTE - bpp;
+ else
+ iter.shift = 0;
+#else
+ if (reverse.pixel)
+ iter.shift = BITS_PER_LONG - BITS_PER_BYTE;
+ else
+ iter.shift = BITS_PER_LONG - bpp;
+#endif
+ iter.width = image->width;
+ iter.i = 0;
+
+ height = image->height;
+ while (height--) {
+ fb_bitblit(fb_color_image, &iter, bpp, dst, reverse);
+ fb_address_forward(dst, bits_per_line);
+ }
+}
+
+#ifdef __LITTLE_ENDIAN
+#define FB_GEN(a, b) (((a)/8+(((a)&4)<<((b)-2)) \
+ +(((a)&2)<<((b)*2-1))+(((a)&1u)<<((b)*3)))*((1<<(b))-1))
+#define FB_GEN1(a) ((a)/8+((a)&4)/2+((a)&2)*2+((a)&1)*8)
+#else
+#define FB_GEN(a, b) (((((a)/8)<<((b)*3))+(((a)&4)<<((b)*2-2)) \
+ +(((a)&2)<<(b-1))+((a)&1u))*((1<<(b))-1))
+#define FB_GEN1(a) (a)
+#endif
+
+#define FB_GENx(a) { FB_GEN(0, (a)), FB_GEN(1, (a)), FB_GEN(2, (a)), FB_GEN(3, (a)), \
+ FB_GEN(4, (a)), FB_GEN(5, (a)), FB_GEN(6, (a)), FB_GEN(7, (a)), \
+ FB_GEN(8, (a)), FB_GEN(9, (a)), FB_GEN(10, (a)), FB_GEN(11, (a)), \
+ FB_GEN(12, (a)), FB_GEN(13, (a)), FB_GEN(14, (a)), FB_GEN(15, (a)) }
+
+/* draw a 2 color image four pixels at a time (for 1-8 bpp only) */
+static inline void fb_bitmap4x_imageblit(const struct fb_image *image, struct fb_address *dst,
+ unsigned long fgcolor, unsigned long bgcolor, int bpp,
+ unsigned int bits_per_line, struct fb_reverse reverse)
+{
+ static const u32 mul[BITS_PER_BYTE] = {
+ 0xf, 0x55, 0x249, 0x1111, 0x8421, 0x41041, 0x204081, 0x01010101
+ };
+ static const u32 expand[BITS_PER_BYTE][1 << 4] = {
+ {
+ FB_GEN1(0), FB_GEN1(1), FB_GEN1(2), FB_GEN1(3),
+ FB_GEN1(4), FB_GEN1(5), FB_GEN1(6), FB_GEN1(7),
+ FB_GEN1(8), FB_GEN1(9), FB_GEN1(10), FB_GEN1(11),
+ FB_GEN1(12), FB_GEN1(13), FB_GEN1(14), FB_GEN1(15)
+ },
+ FB_GENx(2), FB_GENx(3), FB_GENx(4),
+ FB_GENx(5), FB_GENx(6), FB_GENx(7), FB_GENx(8),
+ };
+ struct fb_bitmap4x_iter iter;
+ u32 height;
+
+ iter.data = (const u8 *)image->data;
+ if (reverse.pixel) {
+ fgcolor = fb_reverse_bits_long(fgcolor << (BITS_PER_BYTE - bpp));
+ bgcolor = fb_reverse_bits_long(bgcolor << (BITS_PER_BYTE - bpp));
+ }
+ iter.fgxcolor = (fgcolor ^ bgcolor) * mul[bpp-1];
+ iter.bgcolor = bgcolor * mul[bpp-1];
+ iter.width = image->width;
+ iter.i = image->width;
+ iter.expand = expand[bpp-1];
+ iter.bpp = bpp;
+ iter.top = false;
+
+ height = image->height;
+ while (height--) {
+ fb_bitblit(fb_bitmap4x_image, &iter, bpp * BITS_PER_BYTE/2, dst, reverse);
+ fb_address_forward(dst, bits_per_line);
+ }
+}
+
+/* draw a bitmap image 1 pixel at a time (for >8 bpp) */
+static inline void fb_bitmap1x_imageblit(const struct fb_image *image, struct fb_address *dst,
+ unsigned long fgcolor, unsigned long bgcolor, int bpp,
+ unsigned int bits_per_line, struct fb_reverse reverse)
+{
+ struct fb_bitmap_iter iter;
+ u32 height;
+
+ iter.colors[0] = bgcolor;
+ iter.colors[1] = fgcolor;
+#ifndef __LITTLE_ENDIAN
+ iter.colors[0] <<= BITS_PER_LONG - bpp;
+ iter.colors[1] <<= BITS_PER_LONG - bpp;
+#endif
+ iter.data = (const u8 *)image->data;
+ iter.width = image->width;
+ iter.i = 0;
+
+ height = image->height;
+ while (height--) {
+ fb_bitblit(fb_bitmap_image, &iter, bpp, dst, reverse);
+ fb_address_forward(dst, bits_per_line);
+ }
+}
+
+/* one pixel per word, 64/32 bpp blitting */
+static inline void fb_bitmap_1ppw(const struct fb_image *image, struct fb_address *dst,
+ unsigned long fgcolor, unsigned long bgcolor,
+ int words_per_line, struct fb_reverse reverse)
+{
+ unsigned long tab[2];
+ const u8 *src = (u8 *)image->data;
+ int width = image->width;
+ int offset;
+ u32 height;
+
+ if (reverse.byte) {
+ tab[0] = swab_long(bgcolor);
+ tab[1] = swab_long(fgcolor);
+ } else {
+ tab[0] = bgcolor;
+ tab[1] = fgcolor;
+ }
+
+ height = image->height;
+ while (height--) {
+ for (offset = 0; offset + 8 <= width; offset += 8) {
+ unsigned int srcbyte = *src++;
+
+ fb_write_offset(tab[(srcbyte >> 7) & 1], offset + 0, dst);
+ fb_write_offset(tab[(srcbyte >> 6) & 1], offset + 1, dst);
+ fb_write_offset(tab[(srcbyte >> 5) & 1], offset + 2, dst);
+ fb_write_offset(tab[(srcbyte >> 4) & 1], offset + 3, dst);
+ fb_write_offset(tab[(srcbyte >> 3) & 1], offset + 4, dst);
+ fb_write_offset(tab[(srcbyte >> 2) & 1], offset + 5, dst);
+ fb_write_offset(tab[(srcbyte >> 1) & 1], offset + 6, dst);
+ fb_write_offset(tab[(srcbyte >> 0) & 1], offset + 7, dst);
+ }
+
+ if (offset < width) {
+ unsigned int srcbyte = *src++;
+
+ while (offset < width) {
+ fb_write_offset(tab[(srcbyte >> 7) & 1], offset, dst);
+ srcbyte <<= 1;
+ offset++;
+ }
+ }
+ fb_address_move_long(dst, words_per_line);
+ }
+}
+
+static inline unsigned long fb_pack(unsigned long left, unsigned long right, int bits)
+{
+#ifdef __LITTLE_ENDIAN
+ return left | right << bits;
+#else
+ return right | left << bits;
+#endif
+}
+
+/* aligned 32/16 bpp blitting */
+static inline void fb_bitmap_2ppw(const struct fb_image *image, struct fb_address *dst,
+ unsigned long fgcolor, unsigned long bgcolor,
+ int words_per_line, struct fb_reverse reverse)
+{
+ unsigned long tab[4];
+ const u8 *src = (u8 *)image->data;
+ int width = image->width / 2;
+ int offset;
+ u32 height;
+
+ tab[0] = fb_pack(bgcolor, bgcolor, BITS_PER_LONG/2);
+ tab[1] = fb_pack(bgcolor, fgcolor, BITS_PER_LONG/2);
+ tab[2] = fb_pack(fgcolor, bgcolor, BITS_PER_LONG/2);
+ tab[3] = fb_pack(fgcolor, fgcolor, BITS_PER_LONG/2);
+
+ if (reverse.byte) {
+ tab[0] = swab_long(tab[0]);
+ tab[1] = swab_long(tab[1]);
+ tab[2] = swab_long(tab[2]);
+ tab[3] = swab_long(tab[3]);
+ }
+
+ height = image->height;
+ while (height--) {
+ for (offset = 0; offset + 4 <= width; offset += 4) {
+ unsigned int srcbyte = *src++;
+
+ fb_write_offset(tab[(srcbyte >> 6) & 3], offset + 0, dst);
+ fb_write_offset(tab[(srcbyte >> 4) & 3], offset + 1, dst);
+ fb_write_offset(tab[(srcbyte >> 2) & 3], offset + 2, dst);
+ fb_write_offset(tab[(srcbyte >> 0) & 3], offset + 3, dst);
+ }
+
+ if (offset < width) {
+ unsigned int srcbyte = *src++;
+
+ while (offset < width) {
+ fb_write_offset(tab[(srcbyte >> 6) & 3], offset, dst);
+ srcbyte <<= 2;
+ offset++;
+ }
+ }
+ fb_address_move_long(dst, words_per_line);
+ }
+}
+
+#define FB_PATP(a, b) (((a)<<((b)*BITS_PER_LONG/4))*((1UL<<BITS_PER_LONG/4)-1UL))
+#define FB_PAT4(a) (FB_PATP((a)&1, 0)|FB_PATP(((a)&2)/2, 1)| \
+ FB_PATP(((a)&4)/4, 2)|FB_PATP(((a)&8)/8, 3))
+
+/* aligned 16/8 bpp blitting */
+static inline void fb_bitmap_4ppw(const struct fb_image *image, struct fb_address *dst,
+ unsigned long fgcolor, unsigned long bgcolor,
+ int words_per_line, struct fb_reverse reverse)
+{
+ static const unsigned long tab16_be[] = {
+ 0, FB_PAT4(1UL), FB_PAT4(2UL), FB_PAT4(3UL),
+ FB_PAT4(4UL), FB_PAT4(5UL), FB_PAT4(6UL), FB_PAT4(7UL),
+ FB_PAT4(8UL), FB_PAT4(9UL), FB_PAT4(10UL), FB_PAT4(11UL),
+ FB_PAT4(12UL), FB_PAT4(13UL), FB_PAT4(14UL), ~0UL
+ };
+ static const unsigned long tab16_le[] = {
+ 0, FB_PAT4(8UL), FB_PAT4(4UL), FB_PAT4(12UL),
+ FB_PAT4(2UL), FB_PAT4(10UL), FB_PAT4(6UL), FB_PAT4(14UL),
+ FB_PAT4(1UL), FB_PAT4(9UL), FB_PAT4(5UL), FB_PAT4(13UL),
+ FB_PAT4(3UL), FB_PAT4(11UL), FB_PAT4(7UL), ~0UL
+ };
+ const unsigned long *tab;
+ const u8 *src = (u8 *)image->data;
+ int width = image->width / 4;
+ int offset;
+ u32 height;
+
+ fgcolor = fgcolor | fgcolor << BITS_PER_LONG/4;
+ bgcolor = bgcolor | bgcolor << BITS_PER_LONG/4;
+ fgcolor = fgcolor | fgcolor << BITS_PER_LONG/2;
+ bgcolor = bgcolor | bgcolor << BITS_PER_LONG/2;
+ fgcolor ^= bgcolor;
+
+ if (BITS_PER_LONG/4 > BITS_PER_BYTE && reverse.byte) {
+ fgcolor = swab_long(fgcolor);
+ bgcolor = swab_long(bgcolor);
+ }
+
+#ifdef __LITTLE_ENDIAN
+ tab = reverse.byte ? tab16_be : tab16_le;
+#else
+ tab = reverse.byte ? tab16_le : tab16_be;
+#endif
+
+ height = image->height;
+ while (height--) {
+ for (offset = 0; offset + 2 <= width; offset += 2, src++) {
+ fb_write_offset((fgcolor & tab[*src >> 4]) ^ bgcolor, offset + 0, dst);
+ fb_write_offset((fgcolor & tab[*src & 0xf]) ^ bgcolor, offset + 1, dst);
+ }
+
+ if (offset < width)
+ fb_write_offset((fgcolor & tab[*src++ >> 4]) ^ bgcolor, offset, dst);
+
+ fb_address_move_long(dst, words_per_line);
+ }
+}
+
+static inline void fb_bitmap_imageblit(const struct fb_image *image, struct fb_address *dst,
+ unsigned int bits_per_line, const u32 *palette, int bpp,
+ struct fb_reverse reverse)
+{
+ unsigned long fgcolor, bgcolor;
+
+ if (palette) {
+ fgcolor = palette[image->fg_color];
+ bgcolor = palette[image->bg_color];
+ } else {
+ fgcolor = image->fg_color;
+ bgcolor = image->bg_color;
+ }
+
+ if (!dst->bits && !(bits_per_line & (BITS_PER_LONG-1))) {
+ if (bpp == BITS_PER_LONG && BITS_PER_LONG == 32) {
+ fb_bitmap_1ppw(image, dst, fgcolor, bgcolor,
+ bits_per_line / BITS_PER_LONG, reverse);
+ return;
+ }
+ if (bpp == BITS_PER_LONG/2 && !(image->width & 1)) {
+ fb_bitmap_2ppw(image, dst, fgcolor, bgcolor,
+ bits_per_line / BITS_PER_LONG, reverse);
+ return;
+ }
+ if (bpp == BITS_PER_LONG/4 && !(image->width & 3)) {
+ fb_bitmap_4ppw(image, dst, fgcolor, bgcolor,
+ bits_per_line / BITS_PER_LONG, reverse);
+ return;
+ }
+ }
+
+ if (bpp > 0 && bpp <= BITS_PER_BYTE)
+ fb_bitmap4x_imageblit(image, dst, fgcolor, bgcolor, bpp,
+ bits_per_line, reverse);
+ else if (bpp > BITS_PER_BYTE && bpp <= BITS_PER_LONG)
+ fb_bitmap1x_imageblit(image, dst, fgcolor, bgcolor, bpp,
+ bits_per_line, reverse);
+}
+
+static inline void fb_imageblit(struct fb_info *p, const struct fb_image *image)
+{
+ int bpp = p->var.bits_per_pixel;
+ unsigned int bits_per_line = BYTES_TO_BITS(p->fix.line_length);
+ struct fb_address dst = fb_address_init(p);
+ struct fb_reverse reverse = fb_reverse_init(p);
+ const u32 *palette = fb_palette(p);
+
+ fb_address_forward(&dst, image->dy * bits_per_line + image->dx * bpp);
+
+ if (image->depth == 1)
+ fb_bitmap_imageblit(image, &dst, bits_per_line, palette, bpp, reverse);
+ else
+ fb_color_imageblit(image, &dst, bits_per_line, palette, bpp, reverse);
+}
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index e8b4e8c119b5..ac3c99ed92d1 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -160,7 +160,6 @@ static int info_idx = -1;
/* console rotation */
static int initial_rotation = -1;
-static int fbcon_has_sysfs;
static int margin_color;
static const struct consw fb_con;
@@ -1258,7 +1257,7 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
-
+ int fg, bg;
struct fbcon_display *p = &fb_display[vc->vc_num];
u_int y_break;
@@ -1279,16 +1278,18 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
fbcon_clear_margins(vc, 0);
}
+ fg = get_color(vc, info, vc->vc_video_erase_char, 1);
+ bg = get_color(vc, info, vc->vc_video_erase_char, 0);
/* Split blits that cross physical y_wrap boundary */
y_break = p->vrows - p->yscroll;
if (sy < y_break && sy + height - 1 >= y_break) {
u_int b = y_break - sy;
- ops->clear(vc, info, real_y(p, sy), sx, b, width);
+ ops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg);
ops->clear(vc, info, real_y(p, sy + b), sx, height - b,
- width);
+ width, fg, bg);
} else
- ops->clear(vc, info, real_y(p, sy), sx, height, width);
+ ops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg);
}
static void fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
@@ -3157,7 +3158,7 @@ static const struct consw fb_con = {
.con_debug_leave = fbcon_debug_leave,
};
-static ssize_t store_rotate(struct device *device,
+static ssize_t rotate_store(struct device *device,
struct device_attribute *attr, const char *buf,
size_t count)
{
@@ -3179,7 +3180,7 @@ err:
return count;
}
-static ssize_t store_rotate_all(struct device *device,
+static ssize_t rotate_all_store(struct device *device,
struct device_attribute *attr,const char *buf,
size_t count)
{
@@ -3201,7 +3202,7 @@ err:
return count;
}
-static ssize_t show_rotate(struct device *device,
+static ssize_t rotate_show(struct device *device,
struct device_attribute *attr,char *buf)
{
struct fb_info *info;
@@ -3220,7 +3221,7 @@ err:
return sysfs_emit(buf, "%d\n", rotate);
}
-static ssize_t show_cursor_blink(struct device *device,
+static ssize_t cursor_blink_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct fb_info *info;
@@ -3245,7 +3246,7 @@ err:
return sysfs_emit(buf, "%d\n", blink);
}
-static ssize_t store_cursor_blink(struct device *device,
+static ssize_t cursor_blink_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3279,35 +3280,18 @@ err:
return count;
}
-static struct device_attribute device_attrs[] = {
- __ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate),
- __ATTR(rotate_all, S_IWUSR, NULL, store_rotate_all),
- __ATTR(cursor_blink, S_IRUGO|S_IWUSR, show_cursor_blink,
- store_cursor_blink),
-};
-
-static int fbcon_init_device(void)
-{
- int i, error = 0;
-
- fbcon_has_sysfs = 1;
-
- for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
- error = device_create_file(fbcon_device, &device_attrs[i]);
+static DEVICE_ATTR_RW(cursor_blink);
+static DEVICE_ATTR_RW(rotate);
+static DEVICE_ATTR_WO(rotate_all);
- if (error)
- break;
- }
-
- if (error) {
- while (--i >= 0)
- device_remove_file(fbcon_device, &device_attrs[i]);
+static struct attribute *fbcon_device_attrs[] = {
+ &dev_attr_cursor_blink.attr,
+ &dev_attr_rotate.attr,
+ &dev_attr_rotate_all.attr,
+ NULL
+};
- fbcon_has_sysfs = 0;
- }
-
- return 0;
-}
+ATTRIBUTE_GROUPS(fbcon_device);
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
static void fbcon_register_existing_fbs(struct work_struct *work)
@@ -3365,16 +3349,16 @@ void __init fb_console_init(void)
int i;
console_lock();
- fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), NULL,
- "fbcon");
+ fbcon_device = device_create_with_groups(fb_class, NULL,
+ MKDEV(0, 0), NULL,
+ fbcon_device_groups, "fbcon");
if (IS_ERR(fbcon_device)) {
printk(KERN_WARNING "Unable to create device "
"for fbcon; errno = %ld\n",
PTR_ERR(fbcon_device));
fbcon_device = NULL;
- } else
- fbcon_init_device();
+ }
for (i = 0; i < MAX_NR_CONSOLES; i++)
con2fb_map[i] = -1;
@@ -3385,18 +3369,6 @@ void __init fb_console_init(void)
#ifdef MODULE
-static void __exit fbcon_deinit_device(void)
-{
- int i;
-
- if (fbcon_has_sysfs) {
- for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
- device_remove_file(fbcon_device, &device_attrs[i]);
-
- fbcon_has_sysfs = 0;
- }
-}
-
void __exit fb_console_exit(void)
{
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
@@ -3409,7 +3381,6 @@ void __exit fb_console_exit(void)
#endif
console_lock();
- fbcon_deinit_device();
device_destroy(fb_class, MKDEV(0, 0));
do_unregister_con_driver(&fb_con);
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index df70ea5ec5b3..4d97e6d8a16a 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -55,7 +55,7 @@ struct fbcon_ops {
void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width);
void (*clear)(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int height, int width);
+ int sx, int height, int width, int fb, int bg);
void (*putcs)(struct vc_data *vc, struct fb_info *info,
const unsigned short *s, int count, int yy, int xx,
int fg, int bg);
@@ -116,42 +116,6 @@ static inline int mono_col(const struct fb_info *info)
return (~(0xfff << max_len)) & 0xff;
}
-static inline int attr_col_ec(int shift, struct vc_data *vc,
- struct fb_info *info, int is_fg)
-{
- int is_mono01;
- int col;
- int fg;
- int bg;
-
- if (!vc)
- return 0;
-
- if (vc->vc_can_do_color)
- return is_fg ? attr_fgcol(shift,vc->vc_video_erase_char)
- : attr_bgcol(shift,vc->vc_video_erase_char);
-
- if (!info)
- return 0;
-
- col = mono_col(info);
- is_mono01 = info->fix.visual == FB_VISUAL_MONO01;
-
- if (attr_reverse(vc->vc_video_erase_char)) {
- fg = is_mono01 ? col : 0;
- bg = is_mono01 ? 0 : col;
- }
- else {
- fg = is_mono01 ? 0 : col;
- bg = is_mono01 ? col : 0;
- }
-
- return is_fg ? fg : bg;
-}
-
-#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0)
-#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1)
-
/*
* Scroll Method
*/
diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
index f9b794ff7d39..89ef4ba7e867 100644
--- a/drivers/video/fbdev/core/fbcon_ccw.c
+++ b/drivers/video/fbdev/core/fbcon_ccw.c
@@ -78,14 +78,13 @@ static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
}
static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int height, int width)
+ int sx, int height, int width, int fg, int bg)
{
struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
u32 vyres = GETVYRES(ops->p, info);
- region.color = attr_bgcol_ec(bgshift,vc,info);
+ region.color = bg;
region.dx = sy * vc->vc_font.height;
region.dy = vyres - ((sx + width) * vc->vc_font.width);
region.height = width * vc->vc_font.width;
diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
index 903f6fc174e1..b9dac7940fb7 100644
--- a/drivers/video/fbdev/core/fbcon_cw.c
+++ b/drivers/video/fbdev/core/fbcon_cw.c
@@ -63,14 +63,13 @@ static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
}
static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int height, int width)
+ int sx, int height, int width, int fg, int bg)
{
struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
u32 vxres = GETVXRES(ops->p, info);
- region.color = attr_bgcol_ec(bgshift,vc,info);
+ region.color = bg;
region.dx = vxres - ((sy + height) * vc->vc_font.height);
region.dy = sx * vc->vc_font.width;
region.height = width * vc->vc_font.width;
diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
index 594331936fd3..0af7913a2abd 100644
--- a/drivers/video/fbdev/core/fbcon_ud.c
+++ b/drivers/video/fbdev/core/fbcon_ud.c
@@ -64,15 +64,14 @@ static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy,
}
static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int height, int width)
+ int sx, int height, int width, int fg, int bg)
{
struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
u32 vyres = GETVYRES(ops->p, info);
u32 vxres = GETVXRES(ops->p, info);
- region.color = attr_bgcol_ec(bgshift,vc,info);
+ region.color = bg;
region.dy = vyres - ((sy + height) * vc->vc_font.height);
region.dx = vxres - ((sx + width) * vc->vc_font.width);
region.width = width * vc->vc_font.width;
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 1b3c9958ef5c..06d75c767579 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -416,55 +416,64 @@ static ssize_t show_bl_curve(struct device *device,
/* When cmap is added back in it should be a binary attribute
* not a text one. Consideration should also be given to converting
* fbdev to use configfs instead of sysfs */
-static struct device_attribute device_attrs[] = {
- __ATTR(bits_per_pixel, S_IRUGO|S_IWUSR, show_bpp, store_bpp),
- __ATTR(blank, S_IRUGO|S_IWUSR, show_blank, store_blank),
- __ATTR(console, S_IRUGO|S_IWUSR, show_console, store_console),
- __ATTR(cursor, S_IRUGO|S_IWUSR, show_cursor, store_cursor),
- __ATTR(mode, S_IRUGO|S_IWUSR, show_mode, store_mode),
- __ATTR(modes, S_IRUGO|S_IWUSR, show_modes, store_modes),
- __ATTR(pan, S_IRUGO|S_IWUSR, show_pan, store_pan),
- __ATTR(virtual_size, S_IRUGO|S_IWUSR, show_virtual, store_virtual),
- __ATTR(name, S_IRUGO, show_name, NULL),
- __ATTR(stride, S_IRUGO, show_stride, NULL),
- __ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate),
- __ATTR(state, S_IRUGO|S_IWUSR, show_fbstate, store_fbstate),
+static DEVICE_ATTR(bits_per_pixel, 0644, show_bpp, store_bpp);
+static DEVICE_ATTR(blank, 0644, show_blank, store_blank);
+static DEVICE_ATTR(console, 0644, show_console, store_console);
+static DEVICE_ATTR(cursor, 0644, show_cursor, store_cursor);
+static DEVICE_ATTR(mode, 0644, show_mode, store_mode);
+static DEVICE_ATTR(modes, 0644, show_modes, store_modes);
+static DEVICE_ATTR(pan, 0644, show_pan, store_pan);
+static DEVICE_ATTR(virtual_size, 0644, show_virtual, store_virtual);
+static DEVICE_ATTR(name, 0444, show_name, NULL);
+static DEVICE_ATTR(stride, 0444, show_stride, NULL);
+static DEVICE_ATTR(rotate, 0644, show_rotate, store_rotate);
+static DEVICE_ATTR(state, 0644, show_fbstate, store_fbstate);
#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
- __ATTR(bl_curve, S_IRUGO|S_IWUSR, show_bl_curve, store_bl_curve),
+static DEVICE_ATTR(bl_curve, 0644, show_bl_curve, store_bl_curve);
#endif
+
+static struct attribute *fb_device_attrs[] = {
+ &dev_attr_bits_per_pixel.attr,
+ &dev_attr_blank.attr,
+ &dev_attr_console.attr,
+ &dev_attr_cursor.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_modes.attr,
+ &dev_attr_pan.attr,
+ &dev_attr_virtual_size.attr,
+ &dev_attr_name.attr,
+ &dev_attr_stride.attr,
+ &dev_attr_rotate.attr,
+ &dev_attr_state.attr,
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
+ &dev_attr_bl_curve.attr,
+#endif
+ NULL,
+};
+
+static const struct attribute_group fb_device_attr_group = {
+ .attrs = fb_device_attrs,
};
static int fb_init_device(struct fb_info *fb_info)
{
- int i, error = 0;
+ int ret;
dev_set_drvdata(fb_info->dev, fb_info);
fb_info->class_flag |= FB_SYSFS_FLAG_ATTR;
- for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
- error = device_create_file(fb_info->dev, &device_attrs[i]);
-
- if (error)
- break;
- }
-
- if (error) {
- while (--i >= 0)
- device_remove_file(fb_info->dev, &device_attrs[i]);
+ ret = device_add_group(fb_info->dev, &fb_device_attr_group);
+ if (ret)
fb_info->class_flag &= ~FB_SYSFS_FLAG_ATTR;
- }
return 0;
}
static void fb_cleanup_device(struct fb_info *fb_info)
{
- unsigned int i;
-
if (fb_info->class_flag & FB_SYSFS_FLAG_ATTR) {
- for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
- device_remove_file(fb_info->dev, &device_attrs[i]);
+ device_remove_group(fb_info->dev, &fb_device_attr_group);
fb_info->class_flag &= ~FB_SYSFS_FLAG_ATTR;
}
diff --git a/drivers/video/fbdev/core/syscopyarea.c b/drivers/video/fbdev/core/syscopyarea.c
index 75e7001e8450..b634e2d21208 100644
--- a/drivers/video/fbdev/core/syscopyarea.c
+++ b/drivers/video/fbdev/core/syscopyarea.c
@@ -1,373 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * Generic Bit Block Transfer for frame buffers located in system RAM with
- * packed pixels of any depth.
- *
- * Based almost entirely from cfbcopyarea.c (which is based almost entirely
- * on Geert Uytterhoeven's copyarea routine)
- *
- * Copyright (C) 2007 Antonino Daplas <adaplas@pol.net>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
+ * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
*/
#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
#include <linux/fb.h>
+#include <linux/bitrev.h>
#include <asm/types.h>
-#include <asm/io.h>
-#include "fb_draw.h"
- /*
- * Generic bitwise copy algorithm
- */
+#ifdef CONFIG_FB_SYS_REV_PIXELS_IN_BYTE
+#define FB_REV_PIXELS_IN_BYTE
+#endif
-static void
-bitcpy(struct fb_info *p, unsigned long *dst, unsigned dst_idx,
- const unsigned long *src, unsigned src_idx, int bits, unsigned n)
-{
- unsigned long first, last;
- int const shift = dst_idx-src_idx;
- int left, right;
-
- first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
-
- if (!shift) {
- /* Same alignment for source and dest */
- if (dst_idx+n <= bits) {
- /* Single word */
- if (last)
- first &= last;
- *dst = comp(*src, *dst, first);
- } else {
- /* Multiple destination words */
- /* Leading bits */
- if (first != ~0UL) {
- *dst = comp(*src, *dst, first);
- dst++;
- src++;
- n -= bits - dst_idx;
- }
-
- /* Main chunk */
- n /= bits;
- while (n >= 8) {
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
- n -= 8;
- }
- while (n--)
- *dst++ = *src++;
-
- /* Trailing bits */
- if (last)
- *dst = comp(*src, *dst, last);
- }
- } else {
- unsigned long d0, d1;
- int m;
-
- /* Different alignment for source and dest */
- right = shift & (bits - 1);
- left = -shift & (bits - 1);
-
- if (dst_idx+n <= bits) {
- /* Single destination word */
- if (last)
- first &= last;
- if (shift > 0) {
- /* Single source word */
- *dst = comp(*src << left, *dst, first);
- } else if (src_idx+n <= bits) {
- /* Single source word */
- *dst = comp(*src >> right, *dst, first);
- } else {
- /* 2 source words */
- d0 = *src++;
- d1 = *src;
- *dst = comp(d0 >> right | d1 << left, *dst,
- first);
- }
- } else {
- /* Multiple destination words */
- /** We must always remember the last value read,
- because in case SRC and DST overlap bitwise (e.g.
- when moving just one pixel in 1bpp), we always
- collect one full long for DST and that might
- overlap with the current long from SRC. We store
- this value in 'd0'. */
- d0 = *src++;
- /* Leading bits */
- if (shift > 0) {
- /* Single source word */
- *dst = comp(d0 << left, *dst, first);
- dst++;
- n -= bits - dst_idx;
- } else {
- /* 2 source words */
- d1 = *src++;
- *dst = comp(d0 >> right | d1 << left, *dst,
- first);
- d0 = d1;
- dst++;
- n -= bits - dst_idx;
- }
-
- /* Main chunk */
- m = n % bits;
- n /= bits;
- while (n >= 4) {
- d1 = *src++;
- *dst++ = d0 >> right | d1 << left;
- d0 = d1;
- d1 = *src++;
- *dst++ = d0 >> right | d1 << left;
- d0 = d1;
- d1 = *src++;
- *dst++ = d0 >> right | d1 << left;
- d0 = d1;
- d1 = *src++;
- *dst++ = d0 >> right | d1 << left;
- d0 = d1;
- n -= 4;
- }
- while (n--) {
- d1 = *src++;
- *dst++ = d0 >> right | d1 << left;
- d0 = d1;
- }
-
- /* Trailing bits */
- if (m) {
- if (m <= bits - right) {
- /* Single source word */
- d0 >>= right;
- } else {
- /* 2 source words */
- d1 = *src;
- d0 = d0 >> right | d1 << left;
- }
- *dst = comp(d0, *dst, last);
- }
- }
- }
-}
-
- /*
- * Generic bitwise copy algorithm, operating backward
- */
-
-static void
-bitcpy_rev(struct fb_info *p, unsigned long *dst, unsigned dst_idx,
- const unsigned long *src, unsigned src_idx, unsigned bits,
- unsigned n)
-{
- unsigned long first, last;
- int shift;
-
- dst += (dst_idx + n - 1) / bits;
- src += (src_idx + n - 1) / bits;
- dst_idx = (dst_idx + n - 1) % bits;
- src_idx = (src_idx + n - 1) % bits;
-
- shift = dst_idx-src_idx;
-
- first = ~FB_SHIFT_HIGH(p, ~0UL, (dst_idx + 1) % bits);
- last = FB_SHIFT_HIGH(p, ~0UL, (bits + dst_idx + 1 - n) % bits);
-
- if (!shift) {
- /* Same alignment for source and dest */
- if ((unsigned long)dst_idx+1 >= n) {
- /* Single word */
- if (first)
- last &= first;
- *dst = comp(*src, *dst, last);
- } else {
- /* Multiple destination words */
-
- /* Leading bits */
- if (first) {
- *dst = comp(*src, *dst, first);
- dst--;
- src--;
- n -= dst_idx+1;
- }
-
- /* Main chunk */
- n /= bits;
- while (n >= 8) {
- *dst-- = *src--;
- *dst-- = *src--;
- *dst-- = *src--;
- *dst-- = *src--;
- *dst-- = *src--;
- *dst-- = *src--;
- *dst-- = *src--;
- *dst-- = *src--;
- n -= 8;
- }
- while (n--)
- *dst-- = *src--;
- /* Trailing bits */
- if (last != -1UL)
- *dst = comp(*src, *dst, last);
- }
- } else {
- /* Different alignment for source and dest */
-
- int const left = shift & (bits-1);
- int const right = -shift & (bits-1);
-
- if ((unsigned long)dst_idx+1 >= n) {
- /* Single destination word */
- if (first)
- last &= first;
- if (shift < 0) {
- /* Single source word */
- *dst = comp(*src >> right, *dst, last);
- } else if (1+(unsigned long)src_idx >= n) {
- /* Single source word */
- *dst = comp(*src << left, *dst, last);
- } else {
- /* 2 source words */
- *dst = comp(*src << left | *(src-1) >> right,
- *dst, last);
- }
- } else {
- /* Multiple destination words */
- /** We must always remember the last value read,
- because in case SRC and DST overlap bitwise (e.g.
- when moving just one pixel in 1bpp), we always
- collect one full long for DST and that might
- overlap with the current long from SRC. We store
- this value in 'd0'. */
- unsigned long d0, d1;
- int m;
-
- d0 = *src--;
- /* Leading bits */
- if (shift < 0) {
- /* Single source word */
- d1 = d0;
- d0 >>= right;
- } else {
- /* 2 source words */
- d1 = *src--;
- d0 = d0 << left | d1 >> right;
- }
- if (!first)
- *dst = d0;
- else
- *dst = comp(d0, *dst, first);
- d0 = d1;
- dst--;
- n -= dst_idx+1;
-
- /* Main chunk */
- m = n % bits;
- n /= bits;
- while (n >= 4) {
- d1 = *src--;
- *dst-- = d0 << left | d1 >> right;
- d0 = d1;
- d1 = *src--;
- *dst-- = d0 << left | d1 >> right;
- d0 = d1;
- d1 = *src--;
- *dst-- = d0 << left | d1 >> right;
- d0 = d1;
- d1 = *src--;
- *dst-- = d0 << left | d1 >> right;
- d0 = d1;
- n -= 4;
- }
- while (n--) {
- d1 = *src--;
- *dst-- = d0 << left | d1 >> right;
- d0 = d1;
- }
-
- /* Trailing bits */
- if (m) {
- if (m <= bits - left) {
- /* Single source word */
- d0 <<= left;
- } else {
- /* 2 source words */
- d1 = *src;
- d0 = d0 << left | d1 >> right;
- }
- *dst = comp(d0, *dst, last);
- }
- }
- }
-}
+#include "sysmem.h"
+#include "fb_copyarea.h"
void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
{
- u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
- u32 height = area->height, width = area->width;
- unsigned int const bits_per_line = p->fix.line_length * 8u;
- unsigned long *base = NULL;
- int bits = BITS_PER_LONG, bytes = bits >> 3;
- unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
-
- if (p->state != FBINFO_STATE_RUNNING)
- return;
-
if (!(p->flags & FBINFO_VIRTFB))
- fb_warn_once(p, "Framebuffer is not in virtual address space.");
-
- /* if the beginning of the target area might overlap with the end of
- the source area, be have to copy the area reverse. */
- if ((dy == sy && dx > sx) || (dy > sy)) {
- dy += height;
- sy += height;
- rev_copy = 1;
- }
+ fb_warn_once(p, "%s: framebuffer is not in virtual address space.\n", __func__);
- /* split the base of the framebuffer into a long-aligned address and
- the index of the first bit */
- base = (unsigned long *)((unsigned long)p->screen_base & ~(bytes-1));
- dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1));
- /* add offset of source and target area */
- dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel;
- src_idx += sy*bits_per_line + sx*p->var.bits_per_pixel;
-
- if (p->fbops->fb_sync)
- p->fbops->fb_sync(p);
-
- if (rev_copy) {
- while (height--) {
- dst_idx -= bits_per_line;
- src_idx -= bits_per_line;
- bitcpy_rev(p, base + (dst_idx / bits), dst_idx % bits,
- base + (src_idx / bits), src_idx % bits, bits,
- width*p->var.bits_per_pixel);
- }
- } else {
- while (height--) {
- bitcpy(p, base + (dst_idx / bits), dst_idx % bits,
- base + (src_idx / bits), src_idx % bits, bits,
- width*p->var.bits_per_pixel);
- dst_idx += bits_per_line;
- src_idx += bits_per_line;
- }
- }
+ fb_copyarea(p, area);
}
-
EXPORT_SYMBOL(sys_copyarea);
-MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
-MODULE_DESCRIPTION("Generic copyarea (sys-to-sys)");
+MODULE_AUTHOR("Zsolt Kajtar <soci@c64.rulez.org>");
+MODULE_DESCRIPTION("Virtual memory packed pixel framebuffer area copy");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/video/fbdev/core/sysfillrect.c b/drivers/video/fbdev/core/sysfillrect.c
index e49221a88ccc..372ca6a324c2 100644
--- a/drivers/video/fbdev/core/sysfillrect.c
+++ b/drivers/video/fbdev/core/sysfillrect.c
@@ -1,328 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * Generic fillrect for frame buffers in system RAM with packed pixels of
- * any depth.
- *
- * Based almost entirely from cfbfillrect.c (which is based almost entirely
- * on Geert Uytterhoeven's fillrect routine)
- *
- * Copyright (C) 2007 Antonino Daplas <adaplas@pol.net>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
+ * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
*/
#include <linux/module.h>
-#include <linux/string.h>
#include <linux/fb.h>
+#include <linux/bitrev.h>
#include <asm/types.h>
-#include "fb_draw.h"
- /*
- * Aligned pattern fill using 32/64-bit memory accesses
- */
-
-static void
-bitfill_aligned(struct fb_info *p, unsigned long *dst, int dst_idx,
- unsigned long pat, unsigned n, int bits)
-{
- unsigned long first, last;
-
- if (!n)
- return;
-
- first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
-
- if (dst_idx+n <= bits) {
- /* Single word */
- if (last)
- first &= last;
- *dst = comp(pat, *dst, first);
- } else {
- /* Multiple destination words */
-
- /* Leading bits */
- if (first!= ~0UL) {
- *dst = comp(pat, *dst, first);
- dst++;
- n -= bits - dst_idx;
- }
-
- /* Main chunk */
- n /= bits;
- memset_l(dst, pat, n);
- dst += n;
-
- /* Trailing bits */
- if (last)
- *dst = comp(pat, *dst, last);
- }
-}
-
-
- /*
- * Unaligned generic pattern fill using 32/64-bit memory accesses
- * The pattern must have been expanded to a full 32/64-bit value
- * Left/right are the appropriate shifts to convert to the pattern to be
- * used for the next 32/64-bit word
- */
-
-static void
-bitfill_unaligned(struct fb_info *p, unsigned long *dst, int dst_idx,
- unsigned long pat, int left, int right, unsigned n, int bits)
-{
- unsigned long first, last;
-
- if (!n)
- return;
-
- first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
-
- if (dst_idx+n <= bits) {
- /* Single word */
- if (last)
- first &= last;
- *dst = comp(pat, *dst, first);
- } else {
- /* Multiple destination words */
- /* Leading bits */
- if (first) {
- *dst = comp(pat, *dst, first);
- dst++;
- pat = pat << left | pat >> right;
- n -= bits - dst_idx;
- }
-
- /* Main chunk */
- n /= bits;
- while (n >= 4) {
- *dst++ = pat;
- pat = pat << left | pat >> right;
- *dst++ = pat;
- pat = pat << left | pat >> right;
- *dst++ = pat;
- pat = pat << left | pat >> right;
- *dst++ = pat;
- pat = pat << left | pat >> right;
- n -= 4;
- }
- while (n--) {
- *dst++ = pat;
- pat = pat << left | pat >> right;
- }
-
- /* Trailing bits */
- if (last)
- *dst = comp(pat, *dst, last);
- }
-}
-
- /*
- * Aligned pattern invert using 32/64-bit memory accesses
- */
-static void
-bitfill_aligned_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
- unsigned long pat, unsigned n, int bits)
-{
- unsigned long val = pat;
- unsigned long first, last;
-
- if (!n)
- return;
-
- first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
-
- if (dst_idx+n <= bits) {
- /* Single word */
- if (last)
- first &= last;
- *dst = comp(*dst ^ val, *dst, first);
- } else {
- /* Multiple destination words */
- /* Leading bits */
- if (first!=0UL) {
- *dst = comp(*dst ^ val, *dst, first);
- dst++;
- n -= bits - dst_idx;
- }
-
- /* Main chunk */
- n /= bits;
- while (n >= 8) {
- *dst++ ^= val;
- *dst++ ^= val;
- *dst++ ^= val;
- *dst++ ^= val;
- *dst++ ^= val;
- *dst++ ^= val;
- *dst++ ^= val;
- *dst++ ^= val;
- n -= 8;
- }
- while (n--)
- *dst++ ^= val;
- /* Trailing bits */
- if (last)
- *dst = comp(*dst ^ val, *dst, last);
- }
-}
-
-
- /*
- * Unaligned generic pattern invert using 32/64-bit memory accesses
- * The pattern must have been expanded to a full 32/64-bit value
- * Left/right are the appropriate shifts to convert to the pattern to be
- * used for the next 32/64-bit word
- */
-
-static void
-bitfill_unaligned_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
- unsigned long pat, int left, int right, unsigned n,
- int bits)
-{
- unsigned long first, last;
-
- if (!n)
- return;
-
- first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
-
- if (dst_idx+n <= bits) {
- /* Single word */
- if (last)
- first &= last;
- *dst = comp(*dst ^ pat, *dst, first);
- } else {
- /* Multiple destination words */
-
- /* Leading bits */
- if (first != 0UL) {
- *dst = comp(*dst ^ pat, *dst, first);
- dst++;
- pat = pat << left | pat >> right;
- n -= bits - dst_idx;
- }
-
- /* Main chunk */
- n /= bits;
- while (n >= 4) {
- *dst++ ^= pat;
- pat = pat << left | pat >> right;
- *dst++ ^= pat;
- pat = pat << left | pat >> right;
- *dst++ ^= pat;
- pat = pat << left | pat >> right;
- *dst++ ^= pat;
- pat = pat << left | pat >> right;
- n -= 4;
- }
- while (n--) {
- *dst ^= pat;
- pat = pat << left | pat >> right;
- }
+#ifdef CONFIG_FB_SYS_REV_PIXELS_IN_BYTE
+#define FB_REV_PIXELS_IN_BYTE
+#endif
- /* Trailing bits */
- if (last)
- *dst = comp(*dst ^ pat, *dst, last);
- }
-}
+#include "sysmem.h"
+#include "fb_fillrect.h"
void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
{
- unsigned long pat, pat2, fg;
- unsigned long width = rect->width, height = rect->height;
- int bits = BITS_PER_LONG, bytes = bits >> 3;
- u32 bpp = p->var.bits_per_pixel;
- unsigned long *dst;
- int dst_idx, left;
-
- if (p->state != FBINFO_STATE_RUNNING)
- return;
-
if (!(p->flags & FBINFO_VIRTFB))
- fb_warn_once(p, "Framebuffer is not in virtual address space.");
+ fb_warn_once(p, "%s: framebuffer is not in virtual address space.\n", __func__);
- if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
- p->fix.visual == FB_VISUAL_DIRECTCOLOR )
- fg = ((u32 *) (p->pseudo_palette))[rect->color];
- else
- fg = rect->color;
-
- pat = pixel_to_pat( bpp, fg);
-
- dst = (unsigned long *)((unsigned long)p->screen_base & ~(bytes-1));
- dst_idx = ((unsigned long)p->screen_base & (bytes - 1))*8;
- dst_idx += rect->dy*p->fix.line_length*8+rect->dx*bpp;
- /* FIXME For now we support 1-32 bpp only */
- left = bits % bpp;
- if (p->fbops->fb_sync)
- p->fbops->fb_sync(p);
- if (!left) {
- void (*fill_op32)(struct fb_info *p, unsigned long *dst,
- int dst_idx, unsigned long pat, unsigned n,
- int bits) = NULL;
-
- switch (rect->rop) {
- case ROP_XOR:
- fill_op32 = bitfill_aligned_rev;
- break;
- case ROP_COPY:
- fill_op32 = bitfill_aligned;
- break;
- default:
- printk( KERN_ERR "cfb_fillrect(): unknown rop, "
- "defaulting to ROP_COPY\n");
- fill_op32 = bitfill_aligned;
- break;
- }
- while (height--) {
- dst += dst_idx >> (ffs(bits) - 1);
- dst_idx &= (bits - 1);
- fill_op32(p, dst, dst_idx, pat, width*bpp, bits);
- dst_idx += p->fix.line_length*8;
- }
- } else {
- int right, r;
- void (*fill_op)(struct fb_info *p, unsigned long *dst,
- int dst_idx, unsigned long pat, int left,
- int right, unsigned n, int bits) = NULL;
-#ifdef __LITTLE_ENDIAN
- right = left;
- left = bpp - right;
-#else
- right = bpp - left;
-#endif
- switch (rect->rop) {
- case ROP_XOR:
- fill_op = bitfill_unaligned_rev;
- break;
- case ROP_COPY:
- fill_op = bitfill_unaligned;
- break;
- default:
- printk(KERN_ERR "sys_fillrect(): unknown rop, "
- "defaulting to ROP_COPY\n");
- fill_op = bitfill_unaligned;
- break;
- }
- while (height--) {
- dst += dst_idx / bits;
- dst_idx &= (bits - 1);
- r = dst_idx % bpp;
- /* rotate pattern to the correct start position */
- pat2 = le_long_to_cpu(rolx(cpu_to_le_long(pat), r, bpp));
- fill_op(p, dst, dst_idx, pat2, left, right,
- width*bpp, bits);
- dst_idx += p->fix.line_length*8;
- }
- }
+ fb_fillrect(p, rect);
}
-
EXPORT_SYMBOL(sys_fillrect);
-MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
-MODULE_DESCRIPTION("Generic fill rectangle (sys-to-sys)");
+MODULE_AUTHOR("Zsolt Kajtar <soci@c64.rulez.org>");
+MODULE_DESCRIPTION("Virtual memory packed pixel framebuffer area fill");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
index 6949bbd51d92..c756cc658b7d 100644
--- a/drivers/video/fbdev/core/sysimgblt.c
+++ b/drivers/video/fbdev/core/sysimgblt.c
@@ -1,339 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * Generic 1-bit or 8-bit source to 1-32 bit destination expansion
- * for frame buffer located in system RAM with packed pixels of any depth.
- *
- * Based almost entirely on cfbimgblt.c
- *
- * Copyright (C) April 2007 Antonino Daplas <adaplas@pol.net>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
+ * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
*/
#include <linux/module.h>
-#include <linux/string.h>
#include <linux/fb.h>
+#include <linux/bitrev.h>
#include <asm/types.h>
-#define DEBUG
-
-#ifdef DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt,__func__,## args)
-#else
-#define DPRINTK(fmt, args...)
+#ifdef CONFIG_FB_SYS_REV_PIXELS_IN_BYTE
+#define FB_REV_PIXELS_IN_BYTE
#endif
-static const u32 cfb_tab8_be[] = {
- 0x00000000,0x000000ff,0x0000ff00,0x0000ffff,
- 0x00ff0000,0x00ff00ff,0x00ffff00,0x00ffffff,
- 0xff000000,0xff0000ff,0xff00ff00,0xff00ffff,
- 0xffff0000,0xffff00ff,0xffffff00,0xffffffff
-};
-
-static const u32 cfb_tab8_le[] = {
- 0x00000000,0xff000000,0x00ff0000,0xffff0000,
- 0x0000ff00,0xff00ff00,0x00ffff00,0xffffff00,
- 0x000000ff,0xff0000ff,0x00ff00ff,0xffff00ff,
- 0x0000ffff,0xff00ffff,0x00ffffff,0xffffffff
-};
-
-static const u32 cfb_tab16_be[] = {
- 0x00000000, 0x0000ffff, 0xffff0000, 0xffffffff
-};
-
-static const u32 cfb_tab16_le[] = {
- 0x00000000, 0xffff0000, 0x0000ffff, 0xffffffff
-};
-
-static const u32 cfb_tab32[] = {
- 0x00000000, 0xffffffff
-};
-
-static void color_imageblit(const struct fb_image *image, struct fb_info *p,
- void *dst1, u32 start_index, u32 pitch_index)
-{
- /* Draw the penguin */
- u32 *dst, *dst2;
- u32 color = 0, val, shift;
- int i, n, bpp = p->var.bits_per_pixel;
- u32 null_bits = 32 - bpp;
- u32 *palette = (u32 *) p->pseudo_palette;
- const u8 *src = image->data;
-
- dst2 = dst1;
- for (i = image->height; i--; ) {
- n = image->width;
- dst = dst1;
- shift = 0;
- val = 0;
-
- if (start_index) {
- u32 start_mask = ~(FB_SHIFT_HIGH(p, ~(u32)0,
- start_index));
- val = *dst & start_mask;
- shift = start_index;
- }
- while (n--) {
- if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
- p->fix.visual == FB_VISUAL_DIRECTCOLOR )
- color = palette[*src];
- else
- color = *src;
- color <<= FB_LEFT_POS(p, bpp);
- val |= FB_SHIFT_HIGH(p, color, shift);
- if (shift >= null_bits) {
- *dst++ = val;
-
- val = (shift == null_bits) ? 0 :
- FB_SHIFT_LOW(p, color, 32 - shift);
- }
- shift += bpp;
- shift &= (32 - 1);
- src++;
- }
- if (shift) {
- u32 end_mask = FB_SHIFT_HIGH(p, ~(u32)0, shift);
-
- *dst &= end_mask;
- *dst |= val;
- }
- dst1 += p->fix.line_length;
- if (pitch_index) {
- dst2 += p->fix.line_length;
- dst1 = (u8 *)((long)dst2 & ~(sizeof(u32) - 1));
-
- start_index += pitch_index;
- start_index &= 32 - 1;
- }
- }
-}
-
-static void slow_imageblit(const struct fb_image *image, struct fb_info *p,
- void *dst1, u32 fgcolor, u32 bgcolor,
- u32 start_index, u32 pitch_index)
-{
- u32 shift, color = 0, bpp = p->var.bits_per_pixel;
- u32 *dst, *dst2;
- u32 val, pitch = p->fix.line_length;
- u32 null_bits = 32 - bpp;
- u32 spitch = (image->width+7)/8;
- const u8 *src = image->data, *s;
- u32 i, j, l;
-
- dst2 = dst1;
- fgcolor <<= FB_LEFT_POS(p, bpp);
- bgcolor <<= FB_LEFT_POS(p, bpp);
-
- for (i = image->height; i--; ) {
- shift = val = 0;
- l = 8;
- j = image->width;
- dst = dst1;
- s = src;
-
- /* write leading bits */
- if (start_index) {
- u32 start_mask = ~(FB_SHIFT_HIGH(p, ~(u32)0,
- start_index));
- val = *dst & start_mask;
- shift = start_index;
- }
-
- while (j--) {
- l--;
- color = (*s & (1 << l)) ? fgcolor : bgcolor;
- val |= FB_SHIFT_HIGH(p, color, shift);
-
- /* Did the bitshift spill bits to the next long? */
- if (shift >= null_bits) {
- *dst++ = val;
- val = (shift == null_bits) ? 0 :
- FB_SHIFT_LOW(p, color, 32 - shift);
- }
- shift += bpp;
- shift &= (32 - 1);
- if (!l) { l = 8; s++; }
- }
-
- /* write trailing bits */
- if (shift) {
- u32 end_mask = FB_SHIFT_HIGH(p, ~(u32)0, shift);
-
- *dst &= end_mask;
- *dst |= val;
- }
-
- dst1 += pitch;
- src += spitch;
- if (pitch_index) {
- dst2 += pitch;
- dst1 = (u8 *)((long)dst2 & ~(sizeof(u32) - 1));
- start_index += pitch_index;
- start_index &= 32 - 1;
- }
-
- }
-}
-
-/*
- * fast_imageblit - optimized monochrome color expansion
- *
- * Only if: bits_per_pixel == 8, 16, or 32
- * image->width is divisible by pixel/dword (ppw);
- * fix->line_legth is divisible by 4;
- * beginning and end of a scanline is dword aligned
- */
-static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
- void *dst1, u32 fgcolor, u32 bgcolor)
-{
- u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
- u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
- u32 bit_mask, eorx, shift;
- const u8 *s = image->data, *src;
- u32 *dst;
- const u32 *tab;
- size_t tablen;
- u32 colortab[16];
- int i, j, k;
-
- switch (bpp) {
- case 8:
- tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
- tablen = 16;
- break;
- case 16:
- tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
- tablen = 4;
- break;
- case 32:
- tab = cfb_tab32;
- tablen = 2;
- break;
- default:
- return;
- }
-
- for (i = ppw-1; i--; ) {
- fgx <<= bpp;
- bgx <<= bpp;
- fgx |= fgcolor;
- bgx |= bgcolor;
- }
-
- bit_mask = (1 << ppw) - 1;
- eorx = fgx ^ bgx;
- k = image->width/ppw;
-
- for (i = 0; i < tablen; ++i)
- colortab[i] = (tab[i] & eorx) ^ bgx;
-
- for (i = image->height; i--; ) {
- dst = dst1;
- shift = 8;
- src = s;
-
- /*
- * Manually unroll the per-line copying loop for better
- * performance. This works until we processed the last
- * completely filled source byte (inclusive).
- */
- switch (ppw) {
- case 4: /* 8 bpp */
- for (j = k; j >= 2; j -= 2, ++src) {
- *dst++ = colortab[(*src >> 4) & bit_mask];
- *dst++ = colortab[(*src >> 0) & bit_mask];
- }
- break;
- case 2: /* 16 bpp */
- for (j = k; j >= 4; j -= 4, ++src) {
- *dst++ = colortab[(*src >> 6) & bit_mask];
- *dst++ = colortab[(*src >> 4) & bit_mask];
- *dst++ = colortab[(*src >> 2) & bit_mask];
- *dst++ = colortab[(*src >> 0) & bit_mask];
- }
- break;
- case 1: /* 32 bpp */
- for (j = k; j >= 8; j -= 8, ++src) {
- *dst++ = colortab[(*src >> 7) & bit_mask];
- *dst++ = colortab[(*src >> 6) & bit_mask];
- *dst++ = colortab[(*src >> 5) & bit_mask];
- *dst++ = colortab[(*src >> 4) & bit_mask];
- *dst++ = colortab[(*src >> 3) & bit_mask];
- *dst++ = colortab[(*src >> 2) & bit_mask];
- *dst++ = colortab[(*src >> 1) & bit_mask];
- *dst++ = colortab[(*src >> 0) & bit_mask];
- }
- break;
- }
-
- /*
- * For image widths that are not a multiple of 8, there
- * are trailing pixels left on the current line. Print
- * them as well.
- */
- for (; j--; ) {
- shift -= ppw;
- *dst++ = colortab[(*src >> shift) & bit_mask];
- if (!shift) {
- shift = 8;
- ++src;
- }
- }
-
- dst1 += p->fix.line_length;
- s += spitch;
- }
-}
+#include "sysmem.h"
+#include "fb_imageblit.h"
void sys_imageblit(struct fb_info *p, const struct fb_image *image)
{
- u32 fgcolor, bgcolor, start_index, bitstart, pitch_index = 0;
- u32 bpl = sizeof(u32), bpp = p->var.bits_per_pixel;
- u32 width = image->width;
- u32 dx = image->dx, dy = image->dy;
- void *dst1;
-
- if (p->state != FBINFO_STATE_RUNNING)
- return;
-
if (!(p->flags & FBINFO_VIRTFB))
- fb_warn_once(p, "Framebuffer is not in virtual address space.");
-
- bitstart = (dy * p->fix.line_length * 8) + (dx * bpp);
- start_index = bitstart & (32 - 1);
- pitch_index = (p->fix.line_length & (bpl - 1)) * 8;
+ fb_warn_once(p, "%s: framebuffer is not in virtual address space.\n", __func__);
- bitstart /= 8;
- bitstart &= ~(bpl - 1);
- dst1 = (void __force *)p->screen_base + bitstart;
-
- if (p->fbops->fb_sync)
- p->fbops->fb_sync(p);
-
- if (image->depth == 1) {
- if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
- p->fix.visual == FB_VISUAL_DIRECTCOLOR) {
- fgcolor = ((u32*)(p->pseudo_palette))[image->fg_color];
- bgcolor = ((u32*)(p->pseudo_palette))[image->bg_color];
- } else {
- fgcolor = image->fg_color;
- bgcolor = image->bg_color;
- }
-
- if (32 % bpp == 0 && !start_index && !pitch_index &&
- ((width & (32/bpp-1)) == 0) &&
- bpp >= 8 && bpp <= 32)
- fast_imageblit(image, p, dst1, fgcolor, bgcolor);
- else
- slow_imageblit(image, p, dst1, fgcolor, bgcolor,
- start_index, pitch_index);
- } else
- color_imageblit(image, p, dst1, start_index, pitch_index);
+ fb_imageblit(p, image);
}
-
EXPORT_SYMBOL(sys_imageblit);
-MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
-MODULE_DESCRIPTION("1-bit/8-bit to 1-32 bit color expansion (sys-to-sys)");
+MODULE_AUTHOR("Zsolt Kajtar <soci@c64.rulez.org>");
+MODULE_DESCRIPTION("Virtual memory packed pixel framebuffer image draw");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/video/fbdev/core/sysmem.h b/drivers/video/fbdev/core/sysmem.h
new file mode 100644
index 000000000000..033c31a96e78
--- /dev/null
+++ b/drivers/video/fbdev/core/sysmem.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Virtual memory framebuffer access for drawing routines
+ *
+ * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
+ */
+
+/* keeps track of a bit address in framebuffer memory */
+struct fb_address {
+ void *address;
+ int bits;
+};
+
+/* initialize the bit address pointer to the beginning of the frame buffer */
+static inline struct fb_address fb_address_init(struct fb_info *p)
+{
+ void *base = p->screen_buffer;
+ struct fb_address ptr;
+
+ ptr.address = PTR_ALIGN_DOWN(base, BITS_PER_LONG / BITS_PER_BYTE);
+ ptr.bits = (base - ptr.address) * BITS_PER_BYTE;
+ return ptr;
+}
+
+/* framebuffer write access */
+static inline void fb_write_offset(unsigned long val, int offset, const struct fb_address *dst)
+{
+ unsigned long *mem = dst->address;
+
+ mem[offset] = val;
+}
+
+/* framebuffer read access */
+static inline unsigned long fb_read_offset(int offset, const struct fb_address *src)
+{
+ unsigned long *mem = src->address;
+
+ return mem[offset];
+}
diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
index eff7ec4da167..d342b90c42b7 100644
--- a/drivers/video/fbdev/core/tileblit.c
+++ b/drivers/video/fbdev/core/tileblit.c
@@ -32,16 +32,14 @@ static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy,
}
static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int height, int width)
+ int sx, int height, int width, int fg, int bg)
{
struct fb_tilerect rect;
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- int fgshift = (vc->vc_hi_font_mask) ? 9 : 8;
rect.index = vc->vc_video_erase_char &
((vc->vc_hi_font_mask) ? 0x1ff : 0xff);
- rect.fg = attr_fgcol_ec(fgshift, vc, info);
- rect.bg = attr_bgcol_ec(bgshift, vc, info);
+ rect.fg = fg;
+ rect.bg = bg;
rect.sx = sx;
rect.sy = sy;
rect.width = width;
@@ -76,7 +74,42 @@ static void tile_putcs(struct vc_data *vc, struct fb_info *info,
static void tile_clear_margins(struct vc_data *vc, struct fb_info *info,
int color, int bottom_only)
{
- return;
+ unsigned int cw = vc->vc_font.width;
+ unsigned int ch = vc->vc_font.height;
+ unsigned int rw = info->var.xres - (vc->vc_cols*cw);
+ unsigned int bh = info->var.yres - (vc->vc_rows*ch);
+ unsigned int rs = info->var.xres - rw;
+ unsigned int bs = info->var.yres - bh;
+ unsigned int vwt = info->var.xres_virtual / cw;
+ unsigned int vht = info->var.yres_virtual / ch;
+ struct fb_tilerect rect;
+
+ rect.index = vc->vc_video_erase_char &
+ ((vc->vc_hi_font_mask) ? 0x1ff : 0xff);
+ rect.fg = color;
+ rect.bg = color;
+
+ if ((int) rw > 0 && !bottom_only) {
+ rect.sx = (info->var.xoffset + rs + cw - 1) / cw;
+ rect.sy = 0;
+ rect.width = (rw + cw - 1) / cw;
+ rect.height = vht;
+ if (rect.width + rect.sx > vwt)
+ rect.width = vwt - rect.sx;
+ if (rect.sx < vwt)
+ info->tileops->fb_tilefill(info, &rect);
+ }
+
+ if ((int) bh > 0) {
+ rect.sx = info->var.xoffset / cw;
+ rect.sy = (info->var.yoffset + bs) / ch;
+ rect.width = rs / cw;
+ rect.height = (bh + ch - 1) / ch;
+ if (rect.height + rect.sy > vht)
+ rect.height = vht - rect.sy;
+ if (rect.sy < vht)
+ info->tileops->fb_tilefill(info, &rect);
+ }
}
static void tile_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index 5ac8201c3533..b71d15794ce8 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1827,6 +1827,7 @@ static void fsl_diu_remove(struct platform_device *pdev)
int i;
data = dev_get_drvdata(&pdev->dev);
+ device_remove_file(&pdev->dev, &data->dev_attr);
disable_lcdc(&data->fsl_diu_info[0]);
free_irq(data->irq, data->diu_reg);
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 7fdb5edd7e2e..75338ffc703f 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -282,6 +282,8 @@ static uint screen_depth;
static uint screen_fb_size;
static uint dio_fb_size; /* FB size for deferred IO */
+static void hvfb_putmem(struct fb_info *info);
+
/* Send message to Hyper-V host */
static inline int synthvid_send(struct hv_device *hdev,
struct synthvid_msg *msg)
@@ -863,6 +865,17 @@ static void hvfb_ops_damage_area(struct fb_info *info, u32 x, u32 y, u32 width,
}
/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+ * of unregister_framebuffer() or fb_release(). Do any cleanup related to
+ * framebuffer here.
+ */
+static void hvfb_destroy(struct fb_info *info)
+{
+ hvfb_putmem(info);
+ framebuffer_release(info);
+}
+
+/*
* TODO: GEN1 codepaths allocate from system or DMA-able memory. Fix the
* driver to use the _SYSMEM_ or _DMAMEM_ helpers in these cases.
*/
@@ -877,6 +890,7 @@ static const struct fb_ops hvfb_ops = {
.fb_set_par = hvfb_set_par,
.fb_setcolreg = hvfb_setcolreg,
.fb_blank = hvfb_blank,
+ .fb_destroy = hvfb_destroy,
};
/* Get options from kernel paramenter "video=" */
@@ -952,7 +966,7 @@ static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
}
/* Release contiguous physical memory */
-static void hvfb_release_phymem(struct hv_device *hdev,
+static void hvfb_release_phymem(struct device *device,
phys_addr_t paddr, unsigned int size)
{
unsigned int order = get_order(size);
@@ -960,7 +974,7 @@ static void hvfb_release_phymem(struct hv_device *hdev,
if (order <= MAX_PAGE_ORDER)
__free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
else
- dma_free_coherent(&hdev->device,
+ dma_free_coherent(device,
round_up(size, PAGE_SIZE),
phys_to_virt(paddr),
paddr);
@@ -989,6 +1003,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
base = pci_resource_start(pdev, 0);
size = pci_resource_len(pdev, 0);
+ aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
/*
* For Gen 1 VM, we can directly use the contiguous memory
@@ -1010,11 +1025,21 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto getmem_done;
}
pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
+ } else {
+ aperture_remove_all_conflicting_devices(KBUILD_MODNAME);
}
/*
- * Cannot use the contiguous physical memory.
- * Allocate mmio space for framebuffer.
+ * Cannot use contiguous physical memory, so allocate MMIO space for
+ * the framebuffer. At this point in the function, conflicting devices
+ * that might have claimed the framebuffer MMIO space based on
+ * screen_info.lfb_base must have already been removed so that
+ * vmbus_allocate_mmio() does not allocate different MMIO space. If the
+ * kdump image were to be loaded using kexec_file_load(), the
+ * framebuffer location in the kdump image would be set from
+ * screen_info.lfb_base at the time that kdump is enabled. If the
+ * framebuffer has moved elsewhere, this could be the wrong location,
+ * causing kdump to hang when efifb (for example) loads.
*/
dio_fb_size =
screen_width * screen_height * screen_depth / 8;
@@ -1051,11 +1076,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
info->screen_size = dio_fb_size;
getmem_done:
- if (base && size)
- aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
- else
- aperture_remove_all_conflicting_devices(KBUILD_MODNAME);
-
if (!gen2vm)
pci_dev_put(pdev);
@@ -1074,16 +1094,16 @@ err1:
}
/* Release the framebuffer */
-static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info)
+static void hvfb_putmem(struct fb_info *info)
{
struct hvfb_par *par = info->par;
if (par->need_docopy) {
vfree(par->dio_vp);
- iounmap(info->screen_base);
+ iounmap(par->mmio_vp);
vmbus_free_mmio(par->mem->start, screen_fb_size);
} else {
- hvfb_release_phymem(hdev, info->fix.smem_start,
+ hvfb_release_phymem(info->device, info->fix.smem_start,
screen_fb_size);
}
@@ -1172,7 +1192,7 @@ static int hvfb_probe(struct hv_device *hdev,
if (ret)
goto error;
- ret = register_framebuffer(info);
+ ret = devm_register_framebuffer(&hdev->device, info);
if (ret) {
pr_err("Unable to register framebuffer\n");
goto error;
@@ -1197,7 +1217,7 @@ static int hvfb_probe(struct hv_device *hdev,
error:
fb_deferred_io_cleanup(info);
- hvfb_putmem(hdev, info);
+ hvfb_putmem(info);
error2:
vmbus_close(hdev->channel);
error1:
@@ -1220,14 +1240,10 @@ static void hvfb_remove(struct hv_device *hdev)
fb_deferred_io_cleanup(info);
- unregister_framebuffer(info);
cancel_delayed_work_sync(&par->dwork);
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
-
- hvfb_putmem(hdev, info);
- framebuffer_release(info);
}
static int hvfb_suspend(struct hv_device *hdev)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index ccb96a5be07e..1dc70c96d813 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -2659,13 +2659,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
row_inc = 0;
pix_inc = 0;
- if (plane == OMAP_DSS_WB) {
- frame_width = out_width;
- frame_height = out_height;
- } else {
- frame_width = in_width;
- frame_height = height;
- }
+ frame_width = in_width;
+ frame_height = height;
if (rotation_type == OMAP_DSS_ROT_TILER)
calc_tiler_rotation_offset(screen_width, frame_width,
@@ -2738,9 +2733,13 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
bool mem_to_mem)
{
int r;
- enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
+ enum omap_overlay_caps caps;
enum omap_channel channel;
+ if (plane == OMAP_DSS_WB)
+ return -EINVAL;
+
+ caps = dss_feat_get_overlay_caps(plane);
channel = dispc_ovl_get_channel_out(plane);
DSSDBG("dispc_ovl_setup %d, pa %pad, pa_uv %pad, sw %d, %d,%d, %dx%d ->"
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 4aa84853e31a..ee6da5084242 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2233,32 +2233,27 @@ static int pxafb_probe(struct platform_device *dev)
{
struct pxafb_info *fbi;
struct pxafb_mach_info *inf, *pdata;
- int i, irq, ret;
+ int irq, ret;
dev_dbg(&dev->dev, "pxafb_probe\n");
ret = -ENOMEM;
pdata = dev_get_platdata(&dev->dev);
- inf = devm_kmalloc(&dev->dev, sizeof(*inf), GFP_KERNEL);
- if (!inf)
- goto failed;
-
if (pdata) {
- *inf = *pdata;
- inf->modes =
- devm_kmalloc_array(&dev->dev, pdata->num_modes,
- sizeof(inf->modes[0]), GFP_KERNEL);
+ inf = devm_kmemdup(&dev->dev, pdata, sizeof(*pdata), GFP_KERNEL);
+ if (!inf)
+ goto failed;
+
+ inf->modes = devm_kmemdup_array(&dev->dev, pdata->modes, pdata->num_modes,
+ sizeof(*pdata->modes), GFP_KERNEL);
if (!inf->modes)
goto failed;
- for (i = 0; i < inf->num_modes; i++)
- inf->modes[i] = pdata->modes[i];
} else {
inf = of_pxafb_of_mach_info(&dev->dev);
+ if (IS_ERR_OR_NULL(inf))
+ goto failed;
}
- if (IS_ERR_OR_NULL(inf))
- goto failed;
-
ret = pxafb_parse_options(&dev->dev, g_options, inf);
if (ret < 0)
goto failed;
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 4715dcb59811..dd950e4ab5ce 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1338,16 +1338,19 @@ overlay_rop3_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static const struct device_attribute overlay_sysfs_attrs[] = {
- __ATTR(ovl_alpha, S_IRUGO|S_IWUSR,
- overlay_alpha_show, overlay_alpha_store),
- __ATTR(ovl_mode, S_IRUGO|S_IWUSR,
- overlay_mode_show, overlay_mode_store),
- __ATTR(ovl_position, S_IRUGO|S_IWUSR,
- overlay_position_show, overlay_position_store),
- __ATTR(ovl_rop3, S_IRUGO|S_IWUSR,
- overlay_rop3_show, overlay_rop3_store),
+static DEVICE_ATTR_RW(overlay_alpha);
+static DEVICE_ATTR_RW(overlay_mode);
+static DEVICE_ATTR_RW(overlay_position);
+static DEVICE_ATTR_RW(overlay_rop3);
+
+static struct attribute *overlay_sysfs_attrs[] = {
+ &dev_attr_overlay_alpha.attr,
+ &dev_attr_overlay_mode.attr,
+ &dev_attr_overlay_position.attr,
+ &dev_attr_overlay_rop3.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(overlay_sysfs);
static const struct fb_fix_screeninfo sh_mobile_lcdc_overlay_fix = {
.id = "SH Mobile LCDC",
@@ -1516,7 +1519,6 @@ sh_mobile_lcdc_overlay_fb_register(struct sh_mobile_lcdc_overlay *ovl)
{
struct sh_mobile_lcdc_priv *lcdc = ovl->channel->lcdc;
struct fb_info *info = ovl->info;
- unsigned int i;
int ret;
if (info == NULL)
@@ -1530,12 +1532,6 @@ sh_mobile_lcdc_overlay_fb_register(struct sh_mobile_lcdc_overlay *ovl)
dev_name(lcdc->dev), ovl->index, info->var.xres,
info->var.yres, info->var.bits_per_pixel);
- for (i = 0; i < ARRAY_SIZE(overlay_sysfs_attrs); ++i) {
- ret = device_create_file(info->dev, &overlay_sysfs_attrs[i]);
- if (ret < 0)
- return ret;
- }
-
return 0;
}
@@ -2641,6 +2637,7 @@ err1:
static struct platform_driver sh_mobile_lcdc_driver = {
.driver = {
.name = "sh_mobile_lcdc_fb",
+ .dev_groups = overlay_sysfs_groups,
.pm = &sh_mobile_lcdc_dev_pm_ops,
},
.probe = sh_mobile_lcdc_probe,
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 7734377b2d87..ed6f4f43e2d5 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -327,6 +327,13 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var,
if (var->xres_virtual > 4096 || var->yres_virtual > 2048)
return -EINVAL;
+ /* geometry sanity checks */
+ if (var->xres + var->xoffset > var->xres_virtual)
+ return -EINVAL;
+
+ if (var->yres + var->yoffset > var->yres_virtual)
+ return -EINVAL;
+
/* can cope with 8,16 or 32bpp */
if (var->bits_per_pixel <= 8)
diff --git a/drivers/video/fbdev/wmt_ge_rops.c b/drivers/video/fbdev/wmt_ge_rops.c
index 69106299ab47..92fbb3f3a0d3 100644
--- a/drivers/video/fbdev/wmt_ge_rops.c
+++ b/drivers/video/fbdev/wmt_ge_rops.c
@@ -12,7 +12,6 @@
#include <linux/io.h>
#include <linux/platform_device.h>
-#include "core/fb_draw.h"
#include "wmt_ge_rops.h"
#define GE_COMMAND_OFF 0x00
@@ -41,6 +40,33 @@
static void __iomem *regbase;
+/* from the spec it seems more like depth than bits per pixel */
+static inline unsigned long pixel_to_pat(u32 depth, u32 pixel, struct fb_info *p)
+{
+ switch (depth) {
+ case 1:
+ return ~0ul*pixel;
+ case 2:
+ return ~0ul/3*pixel;
+ case 4:
+ return ~0ul/15*pixel;
+ case 8:
+ return ~0ul/255*pixel;
+ case 12:
+ case 15:
+ case 16:
+ return ~0ul/0xffff*pixel;
+ case 18:
+ case 24:
+ return 0x1000001ul*pixel;
+ case 32:
+ return pixel;
+ default:
+ fb_warn_once(p, "%s: unsupported pixelformat %d\n", __func__, depth);
+ return 0;
+ }
+}
+
void wmt_ge_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
{
unsigned long fg, pat;
@@ -54,7 +80,7 @@ void wmt_ge_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
else
fg = rect->color;
- pat = pixel_to_pat(p->var.bits_per_pixel, fg);
+ pat = pixel_to_pat(p->var.bits_per_pixel, fg, p);
if (p->fbops->fb_sync)
p->fbops->fb_sync(p);
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 70fbc9a3e703..cf3fb61f4d5b 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -23,6 +23,7 @@
#include <linux/cleanup.h>
#include <linux/uuid.h>
#include <linux/configfs.h>
+#include <linux/mm.h>
#include <uapi/linux/sev-guest.h>
#include <uapi/linux/psp-sev.h>
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 8a294b9cbcf6..56d0dbe62163 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -2950,8 +2950,8 @@ static int virtio_mem_probe(struct virtio_device *vdev)
mutex_init(&vm->hotplug_mutex);
INIT_LIST_HEAD(&vm->next);
spin_lock_init(&vm->removal_lock);
- hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- vm->retry_timer.function = virtio_mem_timer_expired;
+ hrtimer_setup(&vm->retry_timer, virtio_mem_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
vm->in_kdump = is_kdump_kernel();
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
index 4631d0a3866a..76dffc89c641 100644
--- a/drivers/watchdog/diag288_wdt.c
+++ b/drivers/watchdog/diag288_wdt.c
@@ -27,6 +27,7 @@
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/watchdog.h>
+#include <asm/machine.h>
#include <asm/ebcdic.h>
#include <asm/diag.h>
#include <linux/io.h>
@@ -110,7 +111,7 @@ static int wdt_start(struct watchdog_device *dev)
int ret;
unsigned int func;
- if (MACHINE_IS_VM) {
+ if (machine_is_vm()) {
func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
: WDT_FUNC_INIT;
ret = diag288_str(func, dev->timeout, wdt_cmd);
@@ -136,7 +137,7 @@ static int wdt_ping(struct watchdog_device *dev)
int ret;
unsigned int func;
- if (MACHINE_IS_VM) {
+ if (machine_is_vm()) {
/*
* It seems to be ok to z/VM to use the init function to
* retrigger the watchdog. On LPAR WDT_FUNC_CHANGE must
@@ -192,7 +193,7 @@ static int __init diag288_init(void)
watchdog_set_nowayout(&wdt_dev, nowayout_info);
- if (MACHINE_IS_VM) {
+ if (machine_is_vm()) {
cmd_buf = kmalloc(MAX_CMDLEN, GFP_KERNEL);
if (!cmd_buf) {
pr_err("The watchdog cannot be initialized\n");
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index 7a1096265f18..0820e35ad2e3 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -187,14 +187,12 @@ static int __init softdog_init(void)
watchdog_set_nowayout(&softdog_dev, nowayout);
watchdog_stop_on_reboot(&softdog_dev);
- hrtimer_init(&softdog_ticktock, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- softdog_ticktock.function = softdog_fire;
+ hrtimer_setup(&softdog_ticktock, softdog_fire, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) {
softdog_info.options |= WDIOF_PRETIMEOUT;
- hrtimer_init(&softdog_preticktock, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- softdog_preticktock.function = softdog_pretimeout;
+ hrtimer_setup(&softdog_preticktock, softdog_pretimeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
if (soft_active_on_boot)
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 19698d87dc57..8369fd94fc1a 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -1051,8 +1051,8 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
}
kthread_init_work(&wd_data->work, watchdog_ping_work);
- hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
- wd_data->timer.function = watchdog_timer_expired;
+ hrtimer_setup(&wd_data->timer, watchdog_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_HARD);
watchdog_hrtimer_pretimeout_init(wdd);
if (wdd->id == 0) {
diff --git a/drivers/watchdog/watchdog_hrtimer_pretimeout.c b/drivers/watchdog/watchdog_hrtimer_pretimeout.c
index 940b53718a91..fbc7eecd8b20 100644
--- a/drivers/watchdog/watchdog_hrtimer_pretimeout.c
+++ b/drivers/watchdog/watchdog_hrtimer_pretimeout.c
@@ -23,8 +23,8 @@ void watchdog_hrtimer_pretimeout_init(struct watchdog_device *wdd)
{
struct watchdog_core_data *wd_data = wdd->wd_data;
- hrtimer_init(&wd_data->pretimeout_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- wd_data->pretimeout_timer.function = watchdog_hrtimer_pretimeout;
+ hrtimer_setup(&wd_data->pretimeout_timer, watchdog_hrtimer_pretimeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
void watchdog_hrtimer_pretimeout_start(struct watchdog_device *wdd)
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 416f231809cb..bfe07adb3e3a 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -43,6 +43,18 @@ static int xen_add_device(struct device *dev)
pci_mcfg_reserved = true;
}
#endif
+
+ if (pci_domain_nr(pci_dev->bus) >> 16) {
+ /*
+ * The hypercall interface is limited to 16bit PCI segment
+ * values, do not attempt to register devices with Xen in
+ * segments greater or equal than 0x10000.
+ */
+ dev_info(dev,
+ "not registering with Xen: invalid PCI segment\n");
+ return 0;
+ }
+
if (pci_seg_supported) {
DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
@@ -149,6 +161,16 @@ static int xen_remove_device(struct device *dev)
int r;
struct pci_dev *pci_dev = to_pci_dev(dev);
+ if (pci_domain_nr(pci_dev->bus) >> 16) {
+ /*
+ * The hypercall interface is limited to 16bit PCI segment
+ * values.
+ */
+ dev_info(dev,
+ "not unregistering with Xen: invalid PCI segment\n");
+ return 0;
+ }
+
if (pci_seg_supported) {
struct physdev_pci_device device = {
.seg = pci_domain_nr(pci_dev->bus),
@@ -182,6 +204,16 @@ int xen_reset_device(const struct pci_dev *dev)
.flags = PCI_DEVICE_RESET_FLR,
};
+ if (pci_domain_nr(dev->bus) >> 16) {
+ /*
+ * The hypercall interface is limited to 16bit PCI segment
+ * values.
+ */
+ dev_info(&dev->dev,
+ "unable to notify Xen of device reset: invalid PCI segment\n");
+ return 0;
+ }
+
return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device);
}
EXPORT_SYMBOL_GPL(xen_reset_device);
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 544d3f9010b9..1db82da56db6 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -26,6 +26,8 @@
#define DRV_NAME "xen-platform-pci"
+#define PCI_DEVICE_ID_XEN_PLATFORM_XS61 0x0002
+
static unsigned long platform_mmio;
static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen;
@@ -174,6 +176,8 @@ pci_out:
static const struct pci_device_id platform_pci_tbl[] = {
{PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM_XS61,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index b616b7768c3b..5c2f829d5b0b 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -262,26 +262,6 @@ struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
return found_dev;
}
-struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
- struct pci_dev *dev)
-{
- struct pcistub_device *psdev;
- struct pci_dev *found_dev = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&pcistub_devices_lock, flags);
-
- list_for_each_entry(psdev, &pcistub_devices, dev_list) {
- if (psdev->dev == dev) {
- found_dev = pcistub_device_get_pci_dev(pdev, psdev);
- break;
- }
- }
-
- spin_unlock_irqrestore(&pcistub_devices_lock, flags);
- return found_dev;
-}
-
/*
* Called when:
* - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index f9599ed2f2e2..b786c1f74f85 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -67,8 +67,6 @@ extern struct list_head xen_pcibk_quirks;
struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
int domain, int bus,
int slot, int func);
-struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
- struct pci_dev *dev);
void pcistub_put_pci_dev(struct pci_dev *dev);
static inline bool xen_pcibk_pv_support(void)
diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c
index b799bc759c15..088b7f02c358 100644
--- a/drivers/xen/xenfs/xensyms.c
+++ b/drivers/xen/xenfs/xensyms.c
@@ -48,7 +48,7 @@ static int xensyms_next_sym(struct xensyms *xs)
return -ENOMEM;
set_xen_guest_handle(symdata->name, xs->name);
- symdata->symnum--; /* Rewind */
+ symdata->symnum = symnum; /* Rewind */
ret = HYPERVISOR_platform_op(&xs->op);
if (ret < 0)
@@ -78,7 +78,7 @@ static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos)
{
struct xensyms *xs = m->private;
- xs->op.u.symdata.symnum = ++(*pos);
+ *pos = xs->op.u.symdata.symnum;
if (xensyms_next_sym(xs))
return NULL;